query
stringlengths 7
3.85k
| document
stringlengths 11
430k
| metadata
dict | negatives
sequencelengths 0
101
| negative_scores
sequencelengths 0
101
| document_score
stringlengths 3
10
| document_rank
stringclasses 102
values |
---|---|---|---|---|---|---|
Validate returns an error if key is empty | func (kv BatchKeyRotateKV) Validate() error {
if kv.Key == "" {
return errInvalidArgument
}
return nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func validateKey(key *types.MapValue) error {\n\tif key == nil {\n\t\treturn nosqlerr.NewIllegalArgument(\"Key must be non-nil\")\n\t}\n\n\tif key.Len() == 0 {\n\t\treturn nosqlerr.NewIllegalArgument(\"Key must be non-empty\")\n\t}\n\n\treturn nil\n}",
"func (e EmptyValidationError) Key() bool { return e.key }",
"func (kv BatchJobReplicateKV) Validate() error {\n\tif kv.Key == \"\" {\n\t\treturn errInvalidArgument\n\t}\n\treturn nil\n}",
"func validateKiteKey(k *protocol.Kite) error {\n\tfields := k.Query().Fields()\n\n\t// Validate fields.\n\tfor k, v := range fields {\n\t\tif v == \"\" {\n\t\t\treturn fmt.Errorf(\"Empty Kite field: %s\", k)\n\t\t}\n\t\tif strings.ContainsRune(v, '/') {\n\t\t\treturn fmt.Errorf(\"Field \\\"%s\\\" must not contain '/'\", k)\n\t\t}\n\t}\n\n\treturn nil\n}",
"func validateKeyName(keyName string) error {\n\tif strings.TrimSpace(keyName) == \"\" {\n\t\treturn errors.New(\"empty key name\")\n\t}\n\n\treturn nil\n}",
"func validateKeyMap(km *KeyMap) error {\n\tif len(km.Yes) == 0 && len(km.No) == 0 && len(km.Submit) == 0 {\n\t\treturn fmt.Errorf(\"no submit key\")\n\t}\n\n\tif !(len(km.Yes) > 0 && len(km.No) > 0) &&\n\t\tlen(km.Toggle) == 0 &&\n\t\t!(len(km.SelectYes) > 0 && len(km.SelectNo) > 0) {\n\t\treturn fmt.Errorf(\"missing keys to select a value\")\n\t}\n\n\treturn nil\n}",
"func (e JsonToMetadata_RuleValidationError) Key() bool { return e.key }",
"func (e SimpleRequestValidationError) Key() bool { return e.key }",
"func (e JwtComponentValidationError) Key() bool { return e.key }",
"func (e MinioComponentValidationError) Key() bool { return e.key }",
"func (e EarfcnValidationError) Key() bool { return e.key }",
"func (e ArfcnValidationError) Key() bool { return e.key }",
"func (e NrcgiValidationError) Key() bool { return e.key }",
"func Validate(id string, key string) string {\n\treturn \"invalid\"\n}",
"func (e SimpleResponseValidationError) Key() bool { return e.key }",
"func (e EutracgiValidationError) Key() bool { return e.key }",
"func (e SearchRequestValidationError) Key() bool { return e.key }",
"func (e HealthCheck_CustomHealthCheckValidationError) Key() bool { return e.key }",
"func (e GetApplicationPubSubRequestValidationError) Key() bool { return e.key }",
"func (e CommonResponseValidationError) Key() bool { return e.key }",
"func (e RequestValidationError) Key() bool { return e.key }",
"func (e NrarfcnValidationError) Key() bool { return e.key }",
"func (e HelloRequestValidationError) Key() bool { return e.key }",
"func (r *Result) validate(errMsg *[]string, parentField string) {\n\tjn := resultJsonMap\n\taddErrMessage(errMsg, len(r.Key) > 0 && hasNonEmptyKV(r.Key), \"field '%s' must be non-empty and must not have empty keys or values\", parentField+\".\"+jn[\"Key\"])\n\taddErrMessage(errMsg, hasNonEmptyKV(r.Options), \"field '%s' must not have empty keys or values\", parentField+\".\"+jn[\"Options\"])\n\taddErrMessage(errMsg, regExHexadecimal.MatchString(string(r.Digest)), \"field '%s' must be hexadecimal\", parentField+\".\"+jn[\"Digest\"])\n}",
"func (e JsonToMetadata_KeyValuePairValidationError) Key() bool { return e.key }",
"func (e NrtValidationError) Key() bool { return e.key }",
"func (e HealthCheckValidationError) Key() bool { return e.key }",
"func (e HealthCheckValidationError) Key() bool { return e.key }",
"func (e HealthCheck_PayloadValidationError) Key() bool { return e.key }",
"func (keySetter *KeySetter) Validate() []string {\n\tvar errorData []string = []string{}\n\tif keySetter.Key == \"\" {\n\t\terrorData = append(errorData, \"field 'key' is required\")\n\t}\n\tif keySetter.Value == \"\" || keySetter.Value == nil {\n\t\terrorData = append(errorData, \"field 'value' is required\")\n\t}\n\tif keySetter.Expiry < 0 {\n\t\terrorData = append(errorData, \"Enter a valid numerical expiry in ms\")\n\t}\n\treturn errorData\n}",
"func (e ApplicationPubSubValidationError) Key() bool { return e.key }",
"func AssertValidKey(key []byte) {\n\tif len(key) == 0 {\n\t\tpanic(\"key is nil or empty\")\n\t}\n\tif len(key) > MaxKeyLength {\n\t\tpanic(\"key is too large\")\n\t}\n}",
"func (e HTTPRequestValidationError) Key() bool { return e.key }",
"func (e HealthCheck_HttpHealthCheckValidationError) Key() bool { return e.key }",
"func (e JwtRequirementValidationError) Key() bool { return e.key }",
"func (e LoadStatsRequestValidationError) Key() bool { return e.key }",
"func (e HealthCheck_RedisHealthCheckValidationError) Key() bool { return e.key }",
"func (e ApplicationPubSub_MessageValidationError) Key() bool { return e.key }",
"func (e ActiveHealthCheckValidationError) Key() bool { return e.key }",
"func (e GetRequestValidationError) Key() bool { return e.key }",
"func (e GetRequestValidationError) Key() bool { return e.key }",
"func (e HealthCheck_GrpcHealthCheckValidationError) Key() bool { return e.key }",
"func (e SearchResponseValidationError) Key() bool { return e.key }",
"func (e MatcherValidationError) Key() bool { return e.key }",
"func (e MaxofRanparametersValidationError) Key() bool { return e.key }",
"func (e AllowedValuesValidationError) Key() bool { return e.key }",
"func (e PublishRequestValidationError) Key() bool { return e.key }",
"func (e ActiveHealthCheckUnhealthyValidationError) Key() bool { return e.key }",
"func (e MaxEarfcnValidationError) Key() bool { return e.key }",
"func (e JwtProviderValidationError) Key() bool { return e.key }",
"func (e ApplicationPubSubFormatsValidationError) Key() bool { return e.key }",
"func (e ResponseValidationError) Key() bool { return e.key }",
"func (e MaxofRicstylesValidationError) Key() bool { return e.key }",
"func (e SXGValidationError) Key() bool { return e.key }",
"func (e ActiveHealthCheckHealthyValidationError) Key() bool { return e.key }",
"func (k Key) Validate() error {\n\n\t// check method\n\tif err := k.hasValidMethod(); err != nil {\n\t\treturn err\n\t}\n\n\t//check label\n\tif err := k.hasValidLabel(); err != nil {\n\t\treturn err\n\t}\n\n\t// check secret\n\tif err := k.hasValidSecret32(); err != nil {\n\t\treturn err\n\t}\n\n\t// check algo\n\tif err := k.hasValidAlgo(); err != nil {\n\t\treturn err\n\t}\n\n\t// check digits\n\tif err := k.hasValidDigits(); err != nil {\n\t\treturn err\n\t}\n\n\t// check period\n\tif err := k.hasValidPeriod(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (e BitStringValidationError) Key() bool { return e.key }",
"func CheckKey(key string) error {\n\tmsg := \"\"\n\tif key == \"\" {\n\t\tmsg = \"empty key.\"\n\t\tif DEBUG {\n\t\t\tfmt.Println(msg)\n\t\t}\n\t\treturn errors.New(msg)\n\t}\n\n\treturn nil\n}",
"func (e MetricValidationError) Key() bool { return e.key }",
"func (e Response_DataValidationError) Key() bool { return e.key }",
"func ValidKey(key string) bool {\n\treturn len(key) <= maxKey && keyRegex.Match([]byte(key))\n}",
"func (e HttpBodyValidationError) Key() bool { return e.key }",
"func (e MaxPlmnValidationError) Key() bool { return e.key }",
"func (e ChannelPayRequestValidationError) Key() bool { return e.key }",
"func (e JwtHeaderValidationError) Key() bool { return e.key }",
"func (e StatsMatcherValidationError) Key() bool { return e.key }",
"func (e HeaderMatchValidationError) Key() bool { return e.key }",
"func (e JsonToMetadata_MatchRulesValidationError) Key() bool { return e.key }",
"func (e Matcher_OnMatchValidationError) Key() bool { return e.key }",
"func (e RequirementRuleValidationError) Key() bool { return e.key }",
"func (e KafkaNetworkPolicyRuleValidationError) Key() bool { return e.key }",
"func (e MetricsValidationError) Key() bool { return e.key }",
"func (e GetResponseValidationError) Key() bool { return e.key }",
"func (e MessageDValidationError) Key() bool { return e.key }",
"func (e GetMetricsRequestValidationError) Key() bool { return e.key }",
"func (e MetricImplementationValidationError) Key() bool { return e.key }",
"func (e CreateResponseValidationError) Key() bool { return e.key }",
"func (e PassiveHealthCheckValidationError) Key() bool { return e.key }",
"func (e CreatMessageRequestValidationError) Key() bool { return e.key }",
"func (e LoadStatsResponseValidationError) Key() bool { return e.key }",
"func (e CreateRequestValidationError) Key() bool { return e.key }",
"func (e CreateRequestValidationError) Key() bool { return e.key }",
"func (e HelloResponseValidationError) Key() bool { return e.key }",
"func (e JsonToMetadataValidationError) Key() bool { return e.key }",
"func (e ApplicationPubSub_NATSProviderValidationError) Key() bool { return e.key }",
"func (e PassiveHealthCheckUnhealthyValidationError) Key() bool { return e.key }",
"func validateKeyID(keyID string) error {\n\tif len(keyID) == 0 {\n\t\treturn fmt.Errorf(\"keyID is empty\")\n\t}\n\tif len(keyID) > keyIDMaxSize {\n\t\treturn fmt.Errorf(\"keyID is %d bytes, which exceeds the max size of %d\", len(keyID), keyIDMaxSize)\n\t}\n\treturn nil\n}",
"func (e PublishResponseValidationError) Key() bool { return e.key }",
"func (e PassiveHealthCheckHealthyValidationError) Key() bool { return e.key }",
"func (e CalculateComplianceRequestValidationError) Key() bool { return e.key }",
"func (e RanfunctionNameValidationError) Key() bool { return e.key }",
"func (e HttpFilterValidationError) Key() bool { return e.key }",
"func (e StatsdValidationError) Key() bool { return e.key }",
"func (e MaxNrarfcnValidationError) Key() bool { return e.key }",
"func (e GcpComponentValidationError) Key() bool { return e.key }",
"func (e ResponseMapperValidationError) Key() bool { return e.key }",
"func validateMetaKey(key string) bool {\n\treturn metaKeyValidator.MatchString(key)\n}",
"func (e GetObjectSchemasRequestValidationError) Key() bool { return e.key }",
"func (e CreatMessageResponseValidationError) Key() bool { return e.key }",
"func ErrEmptyKey(codespace string) error {\n\treturn fmt.Errorf(\"parameter key is empty\")\n}"
] | [
"0.75002897",
"0.69816774",
"0.690423",
"0.6776137",
"0.6737957",
"0.662673",
"0.65838337",
"0.6549191",
"0.64973724",
"0.64879715",
"0.6456016",
"0.6449215",
"0.64477557",
"0.6446813",
"0.64176035",
"0.6412607",
"0.63904417",
"0.63821954",
"0.63796514",
"0.6375793",
"0.6375384",
"0.63646626",
"0.6359673",
"0.6358784",
"0.6351877",
"0.63501847",
"0.6349836",
"0.6349836",
"0.63375294",
"0.6334598",
"0.63333005",
"0.6325637",
"0.6320088",
"0.6316861",
"0.63156754",
"0.6313896",
"0.6311449",
"0.630972",
"0.6304981",
"0.6303446",
"0.6303446",
"0.62979025",
"0.62970525",
"0.62917036",
"0.62895995",
"0.6287905",
"0.62876356",
"0.62828016",
"0.6279854",
"0.62798244",
"0.6279299",
"0.6273517",
"0.6267332",
"0.626443",
"0.626356",
"0.6263079",
"0.6262099",
"0.6257153",
"0.6250904",
"0.62508774",
"0.6249552",
"0.6246541",
"0.6245884",
"0.62401843",
"0.62390983",
"0.62357223",
"0.62356883",
"0.6235189",
"0.6235166",
"0.6233872",
"0.62322587",
"0.62299",
"0.6228081",
"0.6225832",
"0.6225418",
"0.6222952",
"0.6221065",
"0.6220015",
"0.6218648",
"0.62179583",
"0.62122935",
"0.62122935",
"0.620567",
"0.6205645",
"0.6204482",
"0.62042814",
"0.62011033",
"0.6200594",
"0.6200583",
"0.620017",
"0.6197525",
"0.61958605",
"0.6192996",
"0.6192434",
"0.61854863",
"0.6183311",
"0.6179989",
"0.61710614",
"0.61671025",
"0.61617404"
] | 0.693187 | 2 |
Empty indicates if kv is not set | func (kv BatchKeyRotateKV) Empty() bool {
return kv.Key == "" && kv.Value == ""
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func hasNonEmptyKV(kvMap map[string]string) bool {\n\tfor k, v := range kvMap {\n\t\tif strings.TrimSpace(k) == \"\" && strings.TrimSpace(v) == \"\" {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func equalNilAndEmpty(key, value string, params Parameter) bool {\n if v, ok := params[key]; ok {\n if v == nil {\n return true\n }\n vs := fmt.Sprintf(\"%v\", v)\n return vs == \"\" ||\n vs == \"0\" ||\n (reflect.TypeOf(v).Kind() == reflect.Bool && v.(bool) == false) ||\n vs == \"[]\" ||\n vs == \"map[]\"\n } else {\n return true\n }\n}",
"func (kv BatchJobReplicateKV) Empty() bool {\n\treturn kv.Key == \"\" && kv.Value == \"\"\n}",
"func (store KeyValue) empty() {\n\tswitch store.Type {\n\tcase MEMORY:\n\tcase PERSISTENT:\n\tdefault:\n\t\tpanic(\"Unknown Type\")\n\t}\n}",
"func (b *Builder) Empty() bool { return len(b.keyHashes) == 0 }",
"func (h headerUtil) setIfEmpty(key, value string) {\n\tif v := h.Get(key); len(v) == 0 {\n\t\th.Set(key, value)\n\t}\n}",
"func (m *OrderedMap[K, V]) Empty() bool {\n\treturn m == nil || m.len == 0\n}",
"func (k Key) IsEmpty() bool {\n\treturn len(k) == 0\n}",
"func IsEmptyKey(observe byte) bool {\n\treturn observe == byte(EmptyKey)\n}",
"func IsEmpty(key string) bool {\n\treturn c.IsEmpty(key)\n}",
"func (e EmptyValidationError) Key() bool { return e.key }",
"func (k Kind) Empty() bool {\n\tswitch k {\n\tcase LParen,\n\t\tRParen,\n\t\tSemicolon,\n\t\tPlaceholder:\n\t\treturn true\n\t}\n\treturn false\n}",
"func (n KeyReference[T]) IsEmpty() bool {\n\treturn n.KeyNode == nil\n}",
"func checkGetIsEmpty(store *Store, key string, t *testing.T) {\n\t_, ok, err := store.Get(key)\n\tif err != nil {\n\t\tt.Errorf(\"Expected no error but got : %v\", err)\n\t\tpanic(err)\n\t}\n\tif ok {\n\t\tt.Fatal(\"Expected key to be absent but it was there\")\n\t}\n}",
"func IsSet(key string) bool { return viper.IsSet(key) }",
"func Empty() Optional {\n\treturn emtpy\n}",
"func Empty(t TestingT, v interface{}, extras ...interface{}) bool {\n\tif !IsEmpty(v) {\n\t\tvar acts = \"<nil>\"\n\t\tif v != nil {\n\t\t\t_, acts = toString(reflect.Zero(reflect.TypeOf(v)).Interface(), v)\n\t\t}\n\n\t\treturn Errorf(t, \"Expect to be empty\", []labeledOutput{\n\t\t\t{\n\t\t\t\tlabel: labelMessages,\n\t\t\t\tcontent: formatExtras(extras...),\n\t\t\t},\n\t\t\t{\n\t\t\t\tlabel: \"-expected\",\n\t\t\t\tcontent: fmt.Sprintf(\"(%T)()\", v),\n\t\t\t},\n\t\t\t{\n\t\t\t\tlabel: \"+received\",\n\t\t\t\tcontent: acts,\n\t\t\t},\n\t\t})\n\t}\n\n\treturn true\n}",
"func (e JsonToMetadata_KeyValuePairValidationError) Key() bool { return e.key }",
"func (index *DbIterator) clearKV() {\n\tindex.key = nil\n\tindex.value = nil\n}",
"func failIfEmpty(m map[string]interface{}) error {\n\terrs := make([]string, 0)\n\tfor k, v := range m {\n\t\tif v == \"\" || v == 0 {\n\t\t\terrs = append(errs, k)\n\t\t}\n\t}\n\tif len(errs) != 0 {\n\t\treturn errors.Errorf(\"check QueueCfg, the following fields cannot be empty: %s\", strings.Join(errs, \", \"))\n\t}\n\treturn nil\n}",
"func (t Tag) Valid() error {\n\tif t.Key == \"\" || t.Value == \"\" {\n\t\treturn &influxdb.Error{\n\t\t\tCode: influxdb.EInvalid,\n\t\t\tMsg: \"tag must contain a key and a value\",\n\t\t}\n\t}\n\treturn nil\n}",
"func EmptyMap[K comparable, V any]() *gocrest.Matcher[map[K]V] {\n\tmatcher := new(gocrest.Matcher[map[K]V])\n\tmatcher.Describe = \"empty value\"\n\tmatcher.Matches = func(actual map[K]V) bool {\n\t\treturn len(actual) == 0\n\t}\n\treturn matcher\n}",
"func Empty(t Testing, v interface{}, formatAndArgs ...interface{}) bool {\n\tif !types.IsEmpty(v) {\n\t\treturn Fail(t,\n\t\t\tpretty.Sprintf(\"Expected to be empty, but got: %# v\", v),\n\t\t\tformatAndArgs...)\n\t}\n\n\treturn true\n}",
"func (e Empty) HasDefault() bool {\n\treturn true\n}",
"func (t *Map) Empty() bool {\n\treturn t.keys.Len() == 0\n}",
"func (a *AdvancedScalingConfig[_]) IsEmpty() bool {\n\treturn a.Cooldown.IsEmpty() && a.Value == nil\n}",
"func (txt *DNSSdTxtRecord) IfNotEmpty(key, value string) bool {\n\tif value != \"\" {\n\t\ttxt.Add(key, value)\n\t\treturn true\n\t}\n\treturn false\n}",
"func (that *StrAnyMap) IsEmpty() bool {\n\treturn that.Size() == 0\n}",
"func (t *Map) NotEmpty() bool {\n\treturn t.keys.Len() > 0\n}",
"func (v Value) HasKeys() bool {\n\treturn len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0\n}",
"func (o Args) Empty() bool { return o.Len() == 0 }",
"func (t *RbTree[K, V]) Empty() bool {\n\treturn t.size == 0\n}",
"func (o *OptionalVal[T]) Clear() {\n\tvar v T\n\to.v, o.present = v, false\n}",
"func (t Tag) Valid() bool { return t.Key != nil }",
"func (c *Config) IsEmpty(key string) bool {\n\tval, err := c.get(key)\n\tif err != nil {\n\t\treturn true\n\t}\n\tif !val.IsValid() {\n\t\treturn false\n\t}\n\treturn val.IsZero()\n}",
"func (b *Builder) Empty() bool { return b.sz == 0 }",
"func (p *AssetProperties) Empty() bool {\n\tif p.RenderAs.Value != nil {\n\t\treturn false\n\t}\n\n\tif p.Vector.preserveVectorData != nil {\n\t\treturn false\n\t}\n\n\tif p.Compression.Value != nil {\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func (l *Llvar) IsEmpty() bool {\n\treturn len(l.Value) == 0\n}",
"func (e ExternalRubyPackageConfigV1) IsEmpty() bool {\n\treturn len(e.Except) == 0 && len(e.Override) == 0\n}",
"func (r *ScalingConfigOrT[_]) IsEmpty() bool {\n\treturn r.ScalingConfig.IsEmpty() && r.Value == nil\n}",
"func (s *pstack) pushKV(str *string, val interface{}) bool {\n\t// if val != nil && str != nil {\n\t// \tfmt.Printf(\"# push( %s, %#v )\\n\", *str, val)\n\t// } else if val != nil {\n\t// \tfmt.Printf(\"# push( %#v )\\n\", val)\n\t// }\n\tif s == nil || len(*s) == 0 {\n\t\tpanic(\"use of un-initialized parser stack\")\n\t}\n\ttos := &(*s)[len(*s)-1]\n\ttos.Values = append(tos.Values, val)\n\tif str != nil {\n\t\tif tos.Keys == nil {\n\t\t\t//panic(\"top-most stack entry should not contain keys\")\n\t\t\treturn false\n\t\t}\n\t\ttos.Keys = append(tos.Keys, *str)\n\t}\n\treturn true\n}",
"func (enc *Encoder) BoolKeyOmitEmpty(key string, v bool) {\n\tif enc.hasKeys {\n\t\tif !enc.keyExists(key) {\n\t\t\treturn\n\t\t}\n\t}\n\tif v == false {\n\t\treturn\n\t}\n\tenc.grow(5 + len(key))\n\tr := enc.getPreviousRune()\n\tif r != '{' {\n\t\tenc.writeByte(',')\n\t}\n\tenc.writeByte('\"')\n\tenc.writeStringEscape(key)\n\tenc.writeBytes(objKey)\n\tenc.buf = strconv.AppendBool(enc.buf, v)\n}",
"func (t T) Zero() bool { return t.cb == nil }",
"func (k ProcKey) Null() bool {\n\treturn k == (ProcKey{})\n}",
"func (enc *Encoder) BoolKeyNullEmpty(key string, v bool) {\n\tif enc.hasKeys {\n\t\tif !enc.keyExists(key) {\n\t\t\treturn\n\t\t}\n\t}\n\tenc.grow(5 + len(key))\n\tr := enc.getPreviousRune()\n\tif r != '{' {\n\t\tenc.writeByte(',')\n\t}\n\tenc.writeByte('\"')\n\tenc.writeStringEscape(key)\n\tenc.writeBytes(objKey)\n\tif v == false {\n\t\tenc.writeBytes(nullBytes)\n\t\treturn\n\t}\n\tenc.buf = strconv.AppendBool(enc.buf, v)\n}",
"func (e ExternalJavaPackagePrefixConfigV1) IsEmpty() bool {\n\treturn e.Default == \"\" &&\n\t\tlen(e.Except) == 0 &&\n\t\tlen(e.Override) == 0\n}",
"func (me TxsdAnimAdditionAttrsAccumulate) IsNone() bool { return me.String() == \"none\" }",
"func (this ActivityStreamsImageProperty) Empty() bool {\n\treturn this.Len() == 0\n}",
"func (hm *HashMap) Empty() bool {\n\treturn hm.Size() == 0\n}",
"func (n NodeReference[T]) IsEmpty() bool {\n\treturn n.KeyNode == nil && n.ValueNode == nil\n}",
"func NoZeroValues(i interface{}, k string) (s []string, es []error) {\n\tif reflect.ValueOf(i).Interface() == reflect.Zero(reflect.TypeOf(i)).Interface() {\n\t\tswitch reflect.TypeOf(i).Kind() {\n\t\tcase reflect.String:\n\t\t\tes = append(es, fmt.Errorf(\"%s must not be empty, got %v\", k, i))\n\t\tcase reflect.Int, reflect.Float64:\n\t\t\tes = append(es, fmt.Errorf(\"%s must not be zero, got %v\", k, i))\n\t\tdefault:\n\t\t\t// this validator should only ever be applied to TypeString, TypeInt and TypeFloat\n\t\t\tpanic(fmt.Errorf(\"can't use NoZeroValues with %T attribute %s\", i, k))\n\t\t}\n\t}\n\treturn\n}",
"func (this *MyStack) Empty() bool {\n\tif len(this.val) > 0 {\n\t\treturn false\n\t}\n\treturn true\n}",
"func (v Variable) Missing() bool {\n\treturn v.Value() == \"\"\n}",
"func (this *MyStack) Empty() bool {\n\tif len(this.v) != 0 {\n\t\treturn false\n\t}\n\treturn true\n}",
"func isEmpty(val interface{}) bool {\n\tif val == \"\" || val == nil {\n\t\treturn true\n\t}\n\treturn false\n}",
"func NotEmpty(t *testing.T, target interface{}) {\n\tif IsEmpty(target) {\n\t\tt.Errorf(\"%v Empty: %v\", line(), target)\n\t}\n}",
"func (s *BaseSyslParserListener) EnterEmpty_tuple(ctx *Empty_tupleContext) {}",
"func (a Attribute) IsZero() bool {\n\treturn a.Key == \"\"\n}",
"func Empty(t *testing.T, target interface{}) {\n\tif !IsEmpty(target) {\n\t\tt.Errorf(\"%v Not Empty: %v\", line(), target)\n\t}\n}",
"func (args *Args) isEmpty() bool {\n\treturn len(args.items) == 0\n}",
"func IsEmpty(in *storage.Policy) bool {\n\treturn in.Bindings == nil\n}",
"func IsEmpty(v interface{}) bool {\n\tif v == nil {\n\t\treturn true\n\t}\n\tswitch t := v.(type) {\n\tcase string:\n\t\treturn t == \"\"\n\tcase []interface{}:\n\t\treturn len(t) == 0\n\tcase map[string]interface{}:\n\t\treturn len(t) == 0\n\tcase template.Context:\n\t\treturn len(t) == 0\n\t}\n\treturn false\n}",
"func (set KeySet) IsEmpty() bool {\n\treturn len(set) == 0\n}",
"func EmptyInit(value bool) EmptyAttr {\n\treturn func(m optionalAttr) {\n\t\tm[\"init\"] = value\n\t}\n}",
"func AllowEmpty(c *commitConfig) { c.allowEmpty = true }",
"func OmitEmpty(m map[string]interface{}) {\n\tfor _, key := range []string{\"parent_type\", \"parent_uuid\"} {\n\t\tif v, ok := m[key]; ok && isEmpty(v) {\n\t\t\tdelete(m, key)\n\t\t}\n\t}\n}",
"func (kv KeyValue) Valid() bool {\n\treturn kv.Key.Defined() && kv.Value.Type() != INVALID\n}",
"func Empty(obj string, a ...any) {\n\tif obj != \"\" {\n\t\tdefMsg := assertionMsg + \": string should be empty\"\n\t\tDefault().reportAssertionFault(defMsg, a...)\n\t}\n}",
"func IsEmpty(value *gjson.Result) bool {\n\tif !value.Exists() || value.Type == gjson.Null || strings.TrimSpace(value.String()) == \"\" {\n\t\treturn true\n\t}\n\treturn false\n}",
"func (j Json) Undefined() bool {\n\treturn !j.exists\n}",
"func (e ExternalGoPackagePrefixConfigV1) IsEmpty() bool {\n\treturn e.Default == \"\" &&\n\t\tlen(e.Except) == 0 &&\n\t\tlen(e.Override) == 0\n}",
"func (o *OptionalVal[T]) Present() bool {\n\treturn o.present\n}",
"func Undefined() Val { return Val{t: bsontype.Undefined} }",
"func Zero[T any](t testing.TB, value T, msgAndArgs ...interface{}) {\n\tvar zero T\n\tif objectsAreEqual(value, zero) {\n\t\treturn\n\t}\n\tval := reflect.ValueOf(value)\n\tif (val.Kind() == reflect.Slice || val.Kind() == reflect.Map || val.Kind() == reflect.Array) && val.Len() == 0 {\n\t\treturn\n\t}\n\tt.Helper()\n\tmsg := formatMsgAndArgs(\"Expected a zero value but got:\", msgAndArgs...)\n\tt.Fatalf(\"%s\\n%s\", msg, repr.String(value, repr.Indent(\" \")))\n}",
"func (v *ConfigurationParams) IsNil() bool { return v == nil }",
"func (e ExternalCsharpNamespaceConfigV1) IsEmpty() bool {\n\treturn len(e.Except) == 0 &&\n\t\tlen(e.Override) == 0\n}",
"func HasSingleFlagNonemptyArgument(flag string, params []string) bool {\n\tfound := filterFlags(params, flag)\n\tif len(found) != 1 {\n\t\treturn false\n\t}\n\n\t_, value := splitKV(found[0], \"=\")\n\tif value == \"\" {\n\t\treturn false\n\t}\n\treturn true\n}",
"func (m TxMarker) IsEmpty() bool {\n\tif m.Height == 0 {\n\t\treturn true\n\t}\n\tif len(m.Memo) == 0 {\n\t\treturn true\n\t}\n\treturn false\n}",
"func (j Json) HasValue() bool {\n\treturn j != \"\"\n}",
"func (opts *ListOpts) GetAllOrEmpty() []string {\n v := *opts.values\n if v == nil {\n return make([]string, 0)\n }\n return v\n}",
"func (r RoleARN) Empty() bool {\n\treturn len(r.value) == 0\n}",
"func (clr ConfigurationListResult) IsEmpty() bool {\n return clr.Value == nil || len(*clr.Value) == 0\n }",
"func isEmpty(c yaml.Constraint) bool {\n\treturn len(c.Include) == 0 && len(c.Exclude) == 0\n}",
"func (v *DidChangeConfigurationParams) IsNil() bool { return v == nil }",
"func (lm *LevelMetadata) Empty() bool {\n\treturn lm.tree.Count() == 0\n}",
"func (ai *AppInfo) IsEmpty() bool {\n\tif ai.timestamp == 0 {\n\t\treturn true\n\t}\n\treturn false\n}",
"func (b *AutoscalerScaleDownConfigBuilder) Empty() bool {\n\treturn b == nil || b.bitmap_ == 0\n}",
"func (prv ProductResultValue) IsEmpty() bool {\n\treturn prv.Value == nil || len(*prv.Value) == 0\n}",
"func (e ExternalOptimizeForConfigV1) IsEmpty() bool {\n\treturn e.Default == \"\" &&\n\t\tlen(e.Except) == 0 &&\n\t\tlen(e.Override) == 0\n}",
"func (m *MultiMap) Empty() bool {\n\treturn m.Size() == 0\n}",
"func (w TWarning) Empty() bool {\n\treturn len(w.desc) == 0\n}",
"func IsValueEmpty(v string) bool {\n\tif len(strings.TrimSpace(v)) == 0 {\n\t\treturn true\n\t}\n\treturn false\n}",
"func TestCloudConfigEmpty(t *testing.T) {\n\tcfg, err := NewCloudConfig([]byte{})\n\tif err != nil {\n\t\tt.Fatalf(\"Encountered unexpected error :%v\", err)\n\t}\n\n\tkeys := cfg.SSH_Authorized_Keys\n\tif len(keys) != 0 {\n\t\tt.Error(\"Parsed incorrect number of SSH keys\")\n\t}\n\n\tif cfg.Coreos.Etcd.Discovery_URL != \"\" {\n\t\tt.Error(\"Parsed incorrect value of discovery url\")\n\t}\n\n\tif cfg.Coreos.Fleet.Autostart {\n\t\tt.Error(\"Expected AutostartFleet not to be defined\")\n\t}\n\n\tif len(cfg.Write_Files) != 0 {\n\t\tt.Error(\"Expected zero Write_Files\")\n\t}\n}",
"func (e *Engine) empty() bool {\n\treturn len(e.Orderbook.Asks) == 0 && len(e.Orderbook.Bids) == 0\n}",
"func NotEmpty(t TestingT, v interface{}, extras ...interface{}) bool {\n\tif IsEmpty(v) {\n\t\tvar acts = \"<nil>\"\n\t\tif v != nil {\n\t\t\t_, acts = toString(reflect.Zero(reflect.TypeOf(v)).Interface(), v)\n\t\t}\n\n\t\treturn Errorf(t, \"Expect to be NOT empty\", []labeledOutput{\n\t\t\t{\n\t\t\t\tlabel: labelMessages,\n\t\t\t\tcontent: formatExtras(extras...),\n\t\t\t},\n\t\t\t{\n\t\t\t\tlabel: \"-expected\",\n\t\t\t\tcontent: fmt.Sprintf(\"(%T)(???)\", v),\n\t\t\t},\n\t\t\t{\n\t\t\t\tlabel: \"+received\",\n\t\t\t\tcontent: acts,\n\t\t\t},\n\t\t})\n\t}\n\n\treturn true\n}",
"func noEmptyField(target FormatResult, args ...string) string {\n\tfor _, field := range args {\n\t\tif cur, ok := target[field]; ok && cur != \"\" {\n\t\t\treturn cur\n\t\t}\n\t}\n\treturn \"\"\n}",
"func (s *StateObject) empty() bool {\n\treturn len(s.data.UTXOs) <= 0\n}",
"func (conf blah) Exists(key string) bool {\n\treturn viper.IsSet(key)\n}",
"func (v *ConfigurationItem) IsNil() bool { return v == nil }",
"func anyTagExists(tag reflect.StructTag) bool {\r\n\t_, isEnv := tag.Lookup(\"env\")\r\n\t_, isKv := tag.Lookup(\"kv\")\r\n\t_, isVal := tag.Lookup(\"val\")\r\n\treturn isEnv || isKv || isVal\r\n}"
] | [
"0.6333462",
"0.6198159",
"0.61713",
"0.6114734",
"0.59245336",
"0.5807965",
"0.5784596",
"0.55849326",
"0.55599976",
"0.5501188",
"0.5485564",
"0.54497916",
"0.54161036",
"0.5416033",
"0.537964",
"0.53667325",
"0.53588593",
"0.53375435",
"0.53161156",
"0.52891976",
"0.52875304",
"0.52686566",
"0.52599186",
"0.5244922",
"0.52338964",
"0.52275103",
"0.5226687",
"0.5223648",
"0.5218274",
"0.5214116",
"0.5211135",
"0.5208828",
"0.5197648",
"0.51869994",
"0.5161654",
"0.5146062",
"0.51389164",
"0.51258826",
"0.5110899",
"0.5107692",
"0.51045525",
"0.5103265",
"0.5084319",
"0.5076155",
"0.50717133",
"0.5067223",
"0.5066318",
"0.50661594",
"0.50650084",
"0.50456864",
"0.5026411",
"0.5014823",
"0.4984917",
"0.49829936",
"0.49824008",
"0.49818823",
"0.49816626",
"0.49669287",
"0.4964857",
"0.49580717",
"0.49540657",
"0.49519232",
"0.49477825",
"0.49477434",
"0.4940037",
"0.49391633",
"0.49382618",
"0.4934744",
"0.4928791",
"0.4922129",
"0.4920867",
"0.49157682",
"0.49146533",
"0.4913337",
"0.49107352",
"0.49104843",
"0.49051479",
"0.49019408",
"0.49012816",
"0.4898589",
"0.4892765",
"0.48915806",
"0.48887074",
"0.48860115",
"0.4875273",
"0.48751232",
"0.4872443",
"0.48717165",
"0.48709598",
"0.48682317",
"0.48668635",
"0.486578",
"0.48645556",
"0.486433",
"0.4859762",
"0.48568398",
"0.48486146",
"0.48474264",
"0.48443174",
"0.48352647"
] | 0.65100455 | 0 |
Match matches input kv with kv, value will be wildcard matched depending on the user input | func (kv BatchKeyRotateKV) Match(ikv BatchKeyRotateKV) bool {
if kv.Empty() {
return true
}
if strings.EqualFold(kv.Key, ikv.Key) {
return wildcard.Match(kv.Value, ikv.Value)
}
return false
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func Match(goos, kv, key string) (value string, ok bool) {\n\tif len(kv) <= len(key) || kv[len(key)] != '=' {\n\t\treturn \"\", false\n\t}\n\n\tif goos == \"windows\" {\n\t\t// Case insensitive.\n\t\tif !strings.EqualFold(kv[:len(key)], key) {\n\t\t\treturn \"\", false\n\t\t}\n\t} else {\n\t\t// Case sensitive.\n\t\tif kv[:len(key)] != key {\n\t\t\treturn \"\", false\n\t\t}\n\t}\n\n\treturn kv[len(key)+1:], true\n}",
"func (kv BatchJobReplicateKV) Match(ikv BatchJobReplicateKV) bool {\n\tif kv.Empty() {\n\t\treturn true\n\t}\n\tif strings.EqualFold(kv.Key, ikv.Key) {\n\t\treturn wildcard.Match(kv.Value, ikv.Value)\n\t}\n\treturn false\n}",
"func match(got string, pattern *regexp.Regexp, msg string, note func(key string, value interface{})) error {\n\tif pattern.MatchString(got) {\n\t\treturn nil\n\t}\n\n\treturn errors.New(msg)\n}",
"func (f *CompiledFingerprints) matchKeyValueString(key, value string, part part) []string {\n\tvar matched bool\n\tvar technologies []string\n\n\tfor app, fingerprint := range f.Apps {\n\t\tswitch part {\n\t\tcase cookiesPart:\n\t\t\tfor data, pattern := range fingerprint.cookies {\n\t\t\t\tif data != key {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif pattern.MatchString(value) {\n\t\t\t\t\tmatched = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\tcase headersPart:\n\t\t\tfor data, pattern := range fingerprint.headers {\n\t\t\t\tif data != key {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif pattern.MatchString(value) {\n\t\t\t\t\tmatched = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\tcase metaPart:\n\t\t\tfor data, patterns := range fingerprint.meta {\n\t\t\t\tif data != key {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tfor _, pattern := range patterns {\n\t\t\t\t\tif pattern.MatchString(value) {\n\t\t\t\t\t\tmatched = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// If no match, continue with the next fingerprint\n\t\tif !matched {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Append the technologies as well as implied ones\n\t\ttechnologies = append(technologies, app)\n\t\tif len(fingerprint.implies) > 0 {\n\t\t\ttechnologies = append(technologies, fingerprint.implies...)\n\t\t}\n\t\tmatched = false\n\t}\n\treturn technologies\n}",
"func (m AllKeysMatcher) Match(key string, attributes map[string]interface{}, bucketingKey *string) bool {\n\treturn true\n}",
"func (f filters) matchAny(k string, v []byte) bool {\n\tfor _, filter := range f {\n\t\tif filter(k, v) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (kt KeyToken) Match(okt KeyToken) bool {\n\tif kt.Tok.IsKeyword() && kt.Key != \"\" {\n\t\treturn kt.Tok.Match(okt.Tok) && kt.Key == okt.Key\n\t}\n\treturn kt.Tok.Match(okt.Tok)\n}",
"func (kl KeyTokenList) Match(okt KeyToken) bool {\n\tfor _, kt := range kl {\n\t\tif kt.Match(okt) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func KeyMatchFunc(args ...interface{}) (interface{}, error) {\n\tname1 := args[0].(string)\n\tname2 := args[1].(string)\n\n\treturn (bool)(KeyMatch(name1, name2)), nil\n}",
"func ExtCaseInsensitiveMatch(mval interface{}, sval map[string]interface{}) bool {\n\tspecif, ok := sval[\"value\"]\n\tif !ok {\n\t\treturn false\n\t}\n\n\tspecval, ok := specif.(string)\n\tif !ok {\n\t\treturn false\n\t}\n\n\tswitch mcast := mval.(type) {\n\tcase string:\n\t\tif strings.ToLower(specval) == strings.ToLower(mcast) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (r *Key) matcher(c *Client) func([]byte) bool {\n\treturn func(b []byte) bool {\n\t\tcr, err := unmarshalKey(b, c, r)\n\t\tif err != nil {\n\t\t\tc.Config.Logger.Warning(\"failed to unmarshal provided resource in matcher.\")\n\t\t\treturn false\n\t\t}\n\t\tnr := r.urlNormalized()\n\t\tncr := cr.urlNormalized()\n\t\tc.Config.Logger.Infof(\"looking for %v\\nin %v\", nr, ncr)\n\n\t\tif nr.Project == nil && ncr.Project == nil {\n\t\t\tc.Config.Logger.Info(\"Both Project fields null - considering equal.\")\n\t\t} else if nr.Project == nil || ncr.Project == nil {\n\t\t\tc.Config.Logger.Info(\"Only one Project field is null - considering unequal.\")\n\t\t\treturn false\n\t\t} else if *nr.Project != *ncr.Project {\n\t\t\treturn false\n\t\t}\n\t\tif nr.Name == nil && ncr.Name == nil {\n\t\t\tc.Config.Logger.Info(\"Both Name fields null - considering equal.\")\n\t\t} else if nr.Name == nil || ncr.Name == nil {\n\t\t\tc.Config.Logger.Info(\"Only one Name field is null - considering unequal.\")\n\t\t\treturn false\n\t\t} else if *nr.Name != *ncr.Name {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n}",
"func (ef *Filter) ExactMatch(key, source string) bool {\n\tfieldValues, ok := ef.filter[key]\n\t//do not filter if there is no filter set or cannot determine filter\n\tif !ok || len(fieldValues) == 0 {\n\t\treturn true\n\t}\n\t// try to match full name value to avoid O(N) regular expression matching\n\treturn fieldValues[source]\n}",
"func (s *setting) Match(exists string) (bool, bool) {\n\tfor _, o := range s.Options {\n\t\tif o == exists {\n\t\t\treturn true, false\n\t\t} else if o == exists+\":\" {\n\t\t\treturn true, true\n\t\t}\n\t}\n\treturn false, false\n}",
"func (f RabinKarp) MatchAll(p string, v []string) Matches {\n\tvar matches Matches\n\tfor _, value := range v {\n\t\tif ok, match := f(p, value); ok {\n\t\t\tmatches = append(matches, match)\n\t\t}\n\t}\n\treturn matches\n}",
"func KeyMatch(key1 string, key2 string) bool {\n\ti := strings.Index(key2, \"*\")\n\tif i == -1 {\n\t\treturn key1 == key2\n\t}\n\n\tif len(key1) > i {\n\t\treturn key1[:i] == key2[:i]\n\t}\n\treturn key1 == key2[:i]\n}",
"func (mux *Mux) match(key muxKey) Handler {\n\t// Check for exact match first.\n\tif r, ok := mux.m[key]; ok {\n\t\treturn r\n\t} else if r, ok := mux.m[muxKey{\"\", key.host, key.path}]; ok {\n\t\treturn r\n\t} else if r, ok := mux.m[muxKey{key.scheme, \"\", key.path}]; ok {\n\t\treturn r\n\t} else if r, ok := mux.m[muxKey{\"\", \"\", key.path}]; ok {\n\t\treturn r\n\t}\n\n\t// Check for longest valid match. mux.es contains all patterns\n\t// that end in / sorted from longest to shortest.\n\tfor _, e := range mux.es {\n\t\tif (e.key.scheme == \"\" || key.scheme == e.key.scheme) &&\n\t\t\t(e.key.host == \"\" || key.host == e.key.host) &&\n\t\t\tstrings.HasPrefix(key.path, e.key.path) {\n\t\t\treturn e.handler\n\t\t}\n\t}\n\treturn nil\n}",
"func Match(path string, key string) bool {\n\tif path == key {\n\t\treturn true\n\t}\n\tif !strings.Contains(path, \"*\") {\n\t\treturn false\n\t}\n\tmatch, err := filepath.Match(path, key)\n\tif err != nil {\n\t\treturn false\n\t}\n\tcountPath := strings.Count(path, \"/\")\n\tcountKey := strings.Count(key, \"/\")\n\treturn match && countPath == countKey\n}",
"func (c Provider) Match(query string) (params []string) {\n\tif sm := SourceRegex.FindStringSubmatch(query); len(sm) > 2 {\n\t\tparams = sm[1:]\n\t}\n\treturn\n}",
"func (a *WhisperAggregation) Match(metric string) *WhisperAggregationItem {\n\tfor _, s := range a.Data {\n\t\tif s.pattern.MatchString(metric) {\n\t\t\treturn s\n\t\t}\n\t}\n\treturn a.Default\n}",
"func (c Provider) Match(query string) (params []string) {\n\tif sm := SourceRegex.FindStringSubmatch(query); len(sm) > 1 {\n\t\tparams = sm[1:]\n\t}\n\treturn\n}",
"func (i Info) Matches(value string) bool {\n\tif strings.Contains(i.Name, value) {\n\t\treturn true\n\t}\n\tif strings.Contains(i.Zone, value) {\n\t\treturn true\n\t}\n\tif strings.Contains(i.AliasTarget, value) {\n\t\treturn true\n\t}\n\tfor _, v := range i.Values {\n\t\tif strings.Contains(v, value) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (r *RegexpKeys) Match(key string) (string, error) {\n\tfor _, re := range r.regexp_keys {\n\t\tif re.CompiledRegexp.Match([]byte(key)) {\n\t\t\treturn re.Name, nil\n\t\t}\n\t}\n\treturn \"\", errors.New(\"Could not match key to regex.\")\n}",
"func TestMockKv_Get(t *testing.T) {\n\tt.Run(\"exact match\", func(t *testing.T) {\n\t\tpair := mvccpb.KeyValue{\n\t\t\tKey: []byte(\"/foo\"),\n\t\t\tValue: []byte(\"1\"),\n\t\t}\n\n\t\tkv := newMockKV()\n\t\tkv.values = map[string]mvccpb.KeyValue{string(pair.Key): pair}\n\t\tres, err := kv.Get(context.Background(), \"/foo\")\n\n\t\trequire.NoError(t, err)\n\t\trequire.Len(t, res.Kvs, 1)\n\t\tassert.Equal(t, []byte(\"/foo\"), res.Kvs[0].Key)\n\t})\n\n\tt.Run(\"not exact match\", func(t *testing.T) {\n\t\tpair := mvccpb.KeyValue{\n\t\t\tKey: []byte(\"/foo\"),\n\t\t\tValue: []byte(\"1\"),\n\t\t}\n\n\t\tkv := newMockKV()\n\t\tkv.values = map[string]mvccpb.KeyValue{string(pair.Key): pair}\n\t\tres, err := kv.Get(context.Background(), \"/bar\")\n\n\t\trequire.NoError(t, err)\n\t\tassert.Empty(t, res.Kvs)\n\t})\n\n\tt.Run(\"prefix match\", func(t *testing.T) {\n\t\tfooPair := mvccpb.KeyValue{\n\t\t\tKey: []byte(\"/foo\"),\n\t\t\tValue: []byte(\"1\"),\n\t\t}\n\t\tbazPair := mvccpb.KeyValue{\n\t\t\tKey: []byte(\"/baz\"),\n\t\t\tValue: []byte(\"2\"),\n\t\t}\n\t\tfirstPair := mvccpb.KeyValue{\n\t\t\tKey: []byte(\"/first\"),\n\t\t\tValue: []byte(\"3\"),\n\t\t}\n\n\t\tkv := newMockKV()\n\t\tkv.values = map[string]mvccpb.KeyValue{\n\t\t\tstring(fooPair.Key): fooPair,\n\t\t\tstring(bazPair.Key): bazPair,\n\t\t\tstring(firstPair.Key): firstPair,\n\t\t}\n\t\tres, err := kv.Get(context.Background(), \"/f\", clientv3.WithPrefix())\n\n\t\trequire.NoError(t, err)\n\t\tassert.ElementsMatch(t, []*mvccpb.KeyValue{&fooPair, &firstPair}, res.Kvs)\n\t})\n\n\tt.Run(\"empty prefix\", func(t *testing.T) {\n\t\tfooPair := mvccpb.KeyValue{\n\t\t\tKey: []byte(\"/foo\"),\n\t\t\tValue: []byte(\"1\"),\n\t\t}\n\t\tbazPair := mvccpb.KeyValue{\n\t\t\tKey: []byte(\"/baz\"),\n\t\t\tValue: []byte(\"2\"),\n\t\t}\n\t\tfirstPair := mvccpb.KeyValue{\n\t\t\tKey: []byte(\"/first\"),\n\t\t\tValue: []byte(\"3\"),\n\t\t}\n\n\t\tkv := newMockKV()\n\t\tkv.values = map[string]mvccpb.KeyValue{\n\t\t\tstring(fooPair.Key): fooPair,\n\t\t\tstring(bazPair.Key): bazPair,\n\t\t\tstring(firstPair.Key): firstPair,\n\t\t}\n\t\tres, err := kv.Get(context.Background(), \"\", clientv3.WithPrefix())\n\n\t\trequire.NoError(t, err)\n\t\tassert.ElementsMatch(t, []*mvccpb.KeyValue{&fooPair, &bazPair, &firstPair}, res.Kvs)\n\t})\n}",
"func (m *Model) MatchesKey(msg tea.KeyMsg) bool {\n\tif !m.focused || len(m.valueLists) == 0 {\n\t\treturn false\n\t}\n\tcurList := m.valueLists[m.selectedList]\n\tswitch {\n\tcase key.Matches(msg,\n\t\tm.KeyMap.CursorUp,\n\t\tm.KeyMap.CursorDown,\n\t\tm.KeyMap.GoToStart,\n\t\tm.KeyMap.GoToEnd,\n\t\tm.KeyMap.Filter,\n\t\tm.KeyMap.ClearFilter,\n\t\tm.KeyMap.CancelWhileFiltering,\n\t\tm.KeyMap.AcceptWhileFiltering,\n\t\tm.KeyMap.PrevCompletions,\n\t\tm.KeyMap.NextCompletions,\n\t\tm.KeyMap.NextPage,\n\t\tm.KeyMap.PrevPage,\n\t\tm.KeyMap.Abort):\n\t\treturn true\n\tcase !curList.SettingFilter() &&\n\t\tkey.Matches(msg, m.KeyMap.AcceptCompletion):\n\t\treturn true\n\tcase curList.SettingFilter():\n\t\treturn true\n\t}\n\treturn false\n}",
"func (c Provider) Match(query string) (params []string) {\n\tif sm := MirrorsRegex.FindStringSubmatch(query); len(sm) > 1 {\n\t\tparams = sm[1:]\n\t}\n\treturn\n}",
"func metadataContainsValue(m interface{}, path []string, value string) bool {\n\tif len(path) == 0 {\n\t\treturn false\n\t}\n\n\tkey := strings.Title(strings.ToLower(path[0]))\n\n\tif mapData, isMap := m.(map[string]interface{}); isMap {\n\t\t// here we know its a map, but don't know the type of value, so we must check before accessing it\n\t\tv := mapData[key]\n\n\t\t// we will handle both strings and slice of strings here, so create a variable to use in both cases\n\t\ttempSlice := []string{}\n\n\t\tif sliceValue, isSliceString := v.([]string); isSliceString {\n\t\t\ttempSlice = sliceValue\n\t\t} else if stringValue, isString := v.(string); isString {\n\t\t\ttempSlice = []string{stringValue}\n\t\t}\n\n\t\tfor _, val := range tempSlice {\n\t\t\tmatch := strings.Contains(strings.ToLower(val), strings.ToLower(value))\n\n\t\t\tif match {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\t// if value is anything besides a string or slice of string, pass it to another function call with the next key in the path\n\t\treturn metadataContainsValue(v, path[1:], value)\n\t}\n\n\t// if m is not a map, it must be a slice; pass each value in it back to this function with the current key and check return values\n\tif sliceData, isSlice := m.([]interface{}); isSlice {\n\t\tfor _, elem := range sliceData {\n\t\t\tmatch := metadataContainsValue(elem, []string{key}, value)\n\n\t\t\tif match {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}",
"func (f *CompiledFingerprints) matchMapString(keyValue map[string]string, part part) []string {\n\tvar matched bool\n\tvar technologies []string\n\n\tfor app, fingerprint := range f.Apps {\n\t\tswitch part {\n\t\tcase cookiesPart:\n\t\t\tfor data, pattern := range fingerprint.cookies {\n\t\t\t\tvalue, ok := keyValue[data]\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif pattern.MatchString(value) {\n\t\t\t\t\tmatched = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\tcase headersPart:\n\t\t\tfor data, pattern := range fingerprint.headers {\n\t\t\t\tvalue, ok := keyValue[data]\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif pattern.MatchString(value) {\n\t\t\t\t\tmatched = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\tcase metaPart:\n\t\t\tfor data, patterns := range fingerprint.meta {\n\t\t\t\tvalue, ok := keyValue[data]\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfor _, pattern := range patterns {\n\t\t\t\t\tif pattern.MatchString(value) {\n\t\t\t\t\t\tmatched = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// If no match, continue with the next fingerprint\n\t\tif !matched {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Append the technologies as well as implied ones\n\t\ttechnologies = append(technologies, app)\n\t\tif len(fingerprint.implies) > 0 {\n\t\t\ttechnologies = append(technologies, fingerprint.implies...)\n\t\t}\n\t\tmatched = false\n\t}\n\treturn technologies\n}",
"func (f *StringSetFilter) ItemMatch(pattern string) *StringSetFilter {\r\n\tf.AddValidator(func(paramName string, paramValue []string) *Error {\r\n\t\tre, err := regexp.Compile(pattern)\r\n\t\tif err != nil {\r\n\t\t\treturn NewError(ErrorInternalError, paramName, \"InvalidValidator\")\r\n\t\t}\r\n\t\tfor _, v := range paramValue {\r\n\t\t\tif !re.MatchString(v) {\r\n\t\t\t\treturn NewError(ErrorInvalidParam, paramName, \"ItemWrongFormat\")\r\n\t\t\t}\r\n\t\t}\r\n\t\treturn nil\r\n\t})\r\n\treturn f\r\n}",
"func (vb *Builder) Match(fieldName string, str1, str2 interface{}) {\n\tif str1 != str2 {\n\t\tvb.Append(fieldName, doesNotMatch)\n\t}\n}",
"func (a AnyArgument) Match(v driver.Value) bool {\n\treturn true\n}",
"func (a AnyArgument) Match(v driver.Value) bool {\n\treturn true\n}",
"func (t *TST) Match(p string) []string {\n\tif p == \"\" {\n\t\treturn nil\n\t}\n\t// if p has no matching, just find the string\n\tif !strings.ContainsAny(p, \"*_\") {\n\t\tif t.Find(p) {\n\t\t\treturn []string{p}\n\t\t}\n\t\treturn nil\n\t}\n\t// when matching for \"word*\" use prefix instead since it's cheaper\n\tif idx := strings.LastIndex(p, \"*\"); strings.Count(p, \"*\") == 1 && idx == len(p)-1 {\n\t\treturn t.Prefix(p[:idx])\n\t}\n\tmatches := []string{}\n\tt.root.rmatch(p, \"\", &matches)\n\tif len(matches) > 0 {\n\t\treturn matches\n\t}\n\treturn nil\n}",
"func PrefixMatch(key string) (res []interface{}) {\n\tglobalStore.RLock()\n\tdefer globalStore.RUnlock()\n\n\tfor k, v := range globalStore.store {\n\t\tif strings.HasPrefix(k, key) {\n\t\t\tres = append(res, v)\n\t\t}\n\t}\n\n\treturn\n}",
"func (s Selection) Match(msg Event) (bool, bool) {\n\tfor _, v := range s.N {\n\t\tval, ok := msg.Select(v.Key)\n\t\tif !ok {\n\t\t\treturn false, false\n\t\t}\n\t\tswitch vt := val.(type) {\n\t\tcase float64:\n\t\t\t// JSON numbers are all by spec float64 values\n\t\t\tif !v.Pattern.NumMatch(int(vt)) {\n\t\t\t\treturn false, true\n\t\t\t}\n\t\tcase int:\n\t\t\t// JSON numbers are all by spec float64 values\n\t\t\tif !v.Pattern.NumMatch(vt) {\n\t\t\t\treturn false, true\n\t\t\t}\n\t\tcase int64:\n\t\t\t// JSON numbers are all by spec float64 values\n\t\t\tif !v.Pattern.NumMatch(int(vt)) {\n\t\t\t\treturn false, true\n\t\t\t}\n\t\tcase int32:\n\t\t\t// JSON numbers are all by spec float64 values\n\t\t\tif !v.Pattern.NumMatch(int(vt)) {\n\t\t\t\treturn false, true\n\t\t\t}\n\t\tcase uint:\n\t\t\t// JSON numbers are all by spec float64 values\n\t\t\tif !v.Pattern.NumMatch(int(vt)) {\n\t\t\t\treturn false, true\n\t\t\t}\n\t\tcase uint32:\n\t\t\t// JSON numbers are all by spec float64 values\n\t\t\tif !v.Pattern.NumMatch(int(vt)) {\n\t\t\t\treturn false, true\n\t\t\t}\n\t\tcase uint64:\n\t\t\t// JSON numbers are all by spec float64 values\n\t\t\tif !v.Pattern.NumMatch(int(vt)) {\n\t\t\t\treturn false, true\n\t\t\t}\n\t\t}\n\t}\n\tfor _, v := range s.S {\n\t\tval, ok := msg.Select(v.Key)\n\t\tif !ok {\n\t\t\treturn false, false\n\t\t}\n\t\tswitch vt := val.(type) {\n\t\tcase string:\n\t\t\tif !v.Pattern.StringMatch(vt) {\n\t\t\t\treturn false, true\n\t\t\t}\n\t\tcase float64:\n\t\t\t// TODO - tmp hack that also loses floating point accuracy\n\t\t\tif !v.Pattern.StringMatch(strconv.Itoa(int(vt))) {\n\t\t\t\treturn false, true\n\t\t\t}\n\t\tdefault:\n\t\t\ts.incrementMismatchCount()\n\t\t\treturn false, true\n\t\t}\n\t}\n\treturn true, true\n}",
"func (c *TimerCond) Match(t *TimerRecord) bool {\n\tif val, ok := c.ID.Get(); ok && t.ID != val {\n\t\treturn false\n\t}\n\n\tif val, ok := c.Namespace.Get(); ok && t.Namespace != val {\n\t\treturn false\n\t}\n\n\tif val, ok := c.Key.Get(); ok {\n\t\tif c.KeyPrefix && !strings.HasPrefix(t.Key, val) {\n\t\t\treturn false\n\t\t}\n\n\t\tif !c.KeyPrefix && t.Key != val {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func (f *Filter) Match(key [KeySize]byte, data []byte) bool {\n\t// Create a filter bitstream.\n\tb := newBitReader(f.filterNData[4:])\n\n\t// Hash our search term with the same parameters as the filter.\n\tk0 := binary.LittleEndian.Uint64(key[0:8])\n\tk1 := binary.LittleEndian.Uint64(key[8:16])\n\tterm := siphash.Hash(k0, k1, data) % f.modulusNP\n\n\t// Go through the search filter and look for the desired value.\n\tvar lastValue uint64\n\tfor lastValue < term {\n\t\t// Read the difference between previous and new value from\n\t\t// bitstream.\n\t\tvalue, err := f.readFullUint64(&b)\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\n\t\t// Add the previous value to it.\n\t\tvalue += lastValue\n\t\tif value == term {\n\t\t\treturn true\n\t\t}\n\n\t\tlastValue = value\n\t}\n\n\treturn false\n}",
"func RegexMatch(key1 string, key2 string) bool {\n\tres, err := regexp.MatchString(key2, key1)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn res\n}",
"func RegexMatch(key1 string, key2 string) bool {\n\tres, err := regexp.MatchString(key2, key1)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn res\n}",
"func sinkMatch(r *types.SinkRequest, c eventbus.ConstraintMatcher) bool {\n\tswitch c.Name() {\n\tcase \"request.remoteAddress\":\n\t\treturn c.Match(r.RemoteAddr)\n\tcase \"request.method\":\n\t\treturn c.Match(r.Method)\n\tcase \"request.path\":\n\t\t// match path, right side of \"/sink\"\n\t\treturn c.Match(r.Path)\n\tcase \"request.username\":\n\t\treturn c.Match(r.Username)\n\tcase \"request.password\":\n\t\treturn c.Match(r.Password)\n\tcase \"request.content-type\":\n\t\treturn c.Match(r.Header.Get(\"content-type\"))\n\t}\n\n\t// Dynamically check matcher name if it contains request.(get|post|header).*\n\t// and use value for matcher:\n\t//\n\t// to match \"&foo=bar\" in URL string use .where('request.get.foo', 'bar')\n\t//\n\t// It only matches first value (get, post and header can have multiple values)\n\n\tif strings.HasPrefix(c.Name(), sinkMatchRequestGet) {\n\t\treturn c.Match(r.Query.Get(c.Name()[len(sinkMatchRequestGet):]))\n\t}\n\n\tif strings.HasPrefix(c.Name(), sinkMatchRequestPost) {\n\t\treturn c.Match(r.PostForm.Get(c.Name()[len(sinkMatchRequestPost):]))\n\t}\n\n\tif strings.HasPrefix(c.Name(), sinkMatchRequestHeader) {\n\t\treturn c.Match(r.Header.Get(c.Name()[len(sinkMatchRequestHeader):]))\n\t}\n\n\treturn true\n}",
"func (a *Arg) Match(arg string) bool {\n\tswitch {\n\tcase a.ShortName != \"\" && a.ShortName == arg:\n\t\treturn true\n\tcase a.LongName != \"\" && a.LongName == arg:\n\t\treturn true\n\t}\n\treturn false\n}",
"func (f *Flow) MatchString(key string, predicate getter.StringPredicate) bool {\n\tif s, err := f.GetFieldString(key); err == nil {\n\t\treturn predicate(s)\n\t}\n\treturn false\n}",
"func (m EqualsMatcher) Match(s string) bool {\n\tfor _, term := range m.list {\n\t\tif term == s {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func (set Set) Match(caps [][]string) []string {\n\tif set == nil {\n\t\treturn nil\n\t}\nanyof:\n\tfor _, andList := range caps {\n\t\tfor _, cap := range andList {\n\t\t\tif _, ok := set[cap]; !ok {\n\t\t\t\tcontinue anyof\n\t\t\t}\n\t\t}\n\t\treturn andList\n\t}\n\t// match anything\n\treturn nil\n}",
"func match(path, pattern string, vars ...interface{}) bool {\n\tregex := mustCompileCached(pattern)\n\tmatches := regex.FindStringSubmatch(path)\n\tif len(matches) <= 0 {\n\t\treturn false\n\t}\n\tfor i, match := range matches[1:] {\n\t\tswitch p := vars[i].(type) {\n\t\tcase *string:\n\t\t\t*p = match\n\t\tcase *int:\n\t\t\tn, err := strconv.Atoi(match)\n\t\t\tif err != nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\t*p = n\n\t\tdefault:\n\t\t\tpanic(\"vars must be *string or *int\")\n\t\t}\n\t}\n\treturn true\n}",
"func (m ValueMatcher) Match(got reflect.Value, d data.Data, _ Region) (data.Data, bool) {\n\tif m.Type != got.Type() {\n\t\treturn d, false\n\t}\n\treturn d, m.Value == got.Interface()\n}",
"func (*Privilege) Match(toks sayori.Toks) (string, bool) {\n\talias, ok := toks.Get(0)\n\tif !ok {\n\t\treturn \"\", false\n\t}\n\talias = strings.ToLower(alias)\n\n\tfor _, validAlias := range []string{\"p\", \"priv\", \"privileged\"} {\n\t\tif alias == validAlias {\n\t\t\treturn alias, true\n\t\t}\n\t}\n\treturn \"\", false\n}",
"func (v *Value) Match(expr string) bool {\n\t// Compile the regular expression.\n\tre, err := v.script.compileRegexp(expr)\n\tif err != nil {\n\t\treturn false // Fail silently\n\t}\n\n\t// Return true if the expression matches the value, interpreted as a\n\t// string.\n\tloc := re.FindStringIndex(v.String())\n\tif loc == nil {\n\t\tv.script.RStart = 0\n\t\tv.script.RLength = -1\n\t\treturn false\n\t}\n\tv.script.RStart = loc[0] + 1\n\tv.script.RLength = loc[1] - loc[0]\n\treturn true\n}",
"func Test_CheckParam(t *testing.T) {\n\n\t//Validate mode\n\n\tat := []string{\"Alphanumeric\", \"Alpha\"}\n\tbt := []string{\"Alphanumeric\", \"Alpha\"}\n\tct := []string{\"Alphanumeric\", \"Alpha\"}\n\tdt := []string{\"Numeric\"}\n\ttarget := map[string][]string{\n\t\t\"a\": at,\n\t\t\"b\": bt,\n\t\t\"c\": ct,\n\t\t\"d\": dt,\n\t}\n\n\t//Test set 1\n\tstandardOutput := make(map[string]string)\n\n\tstandardOutput[\"c\"] = \"[Check catal#yst123 with Alphanumeric failed][Check catal#yst123 with Alpha failed]\"\n\tstandardOutput[\"d\"] = \"[Check 81927l39824 with Numeric failed]\"\n\n\ta := []string{\"apple\", \"applause\"}\n\tb := []string{\"banana\", \"balista\"}\n\tc := []string{\"catherine\", \"catal#yst123\"}\n\td := []string{\"432\", \"301\", \"81927l39824\"}\n\n\tx := map[string][]string{\n\t\t\"a\": a,\n\t\t\"b\": b,\n\t\t\"c\": c,\n\t\t\"d\": d,\n\t}\n\n\tkeys := []string{\"a\", \"b\", \"c\", \"d\"}\n\n\t//Simulate input []interface with getQueryValue().\n\t//This is the param we get when parsing the GET\n\n\tinput := getQueryValue(x, keys)\n\n\t//fmt.Println(\"input: \", x)\n\t//fmt.Println(\"filter: \", target)\n\t_, detail := CheckParam(*input, target)\n\n\tfmt.Println(\"Result: \", detail)\n\n\tassert.Equal(t, detail, standardOutput, \"The two words should be the same.\")\n\n\t//Test set 2\n\n\tstandardOutput = make(map[string]string)\n\n\tstandardOutput[\"d\"] = \"[Check monopolosomeplace.com with Email failed]\"\n\n\ta = []string{\"op.gg\", \"www.yahoo.com.tw\"}\n\tb = []string{\"banana\", \"balista\"}\n\tc = []string{\"catherine\", \"catalyst\"}\n\td = []string{\"tig4605246@gmail.com\", \"monopolosomeplace.com\"}\n\n\tx = map[string][]string{\n\t\t\"a\": a,\n\t\t\"b\": b,\n\t\t\"c\": c,\n\t\t\"d\": d,\n\t}\n\n\tkeys = []string{\"a\", \"b\", \"c\", \"d\"}\n\n\t//Simulate input []interface with getQueryValue().\n\t//This is the param we get when parsing the GET\n\n\tinput = getQueryValue(x, keys)\n\n\tdt2 := []string{\"Email\"}\n\tat2 := []string{\"DNS\"}\n\ttarget[\"a\"] = at2\n\ttarget[\"d\"] = dt2\n\t//fmt.Println(\"input: \", x)\n\t//fmt.Println(\"filter: \", target)\n\t_, detail = CheckParam(*input, target)\n\t//fmt.Println(\"Result: \",detail,\"\\nexpected: \",standardOutput)\n\tassert.Equal(t, detail, standardOutput, \"The two words should be the same.\")\n\n}",
"func (k *KeyHandler) ExactPathMatch(pathA string, pathB string) bool {\n\treturn pathA == pathB\n}",
"func (r TargetRule) matches(target map[string]string, username, hostname string) bool {\n\tfor k, v := range r {\n\t\tv = strings.ReplaceAll(v, OwnUser, username)\n\t\tv = strings.ReplaceAll(v, OwnHost, hostname)\n\n\t\tif target[k] != v {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func (m *Matcher) Match(name string, attrs []string) bool {\n\tif _, ok := m.names[name]; ok {\n\t\treturn true\n\t}\n\tfor _, g := range m.globs {\n\t\tif g.MatchString(name) {\n\t\t\treturn true\n\t\t}\n\t}\n\tfor _, e := range m.exprs {\n\t\tif e.Matches(attrs) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (rule Rule) MatchRule(input string) (api.TaggedMetric, bool) {\n\ttagSet := extractTagValues(rule.graphitePatternRegex, rule.graphitePatternTags, input)\n\tif tagSet == nil {\n\t\treturn api.TaggedMetric{}, false\n\t}\n\tinterpolatedKey, err := interpolateTags(rule.raw.MetricKeyPattern, tagSet, false)\n\tif err != nil {\n\t\treturn api.TaggedMetric{}, false\n\t}\n\t// Do not output tags appearing in both graphite metric & metric key.\n\t// for exmaple, if graphite metric is\n\t// `foo.%a%.%b%`\n\t// and metric key is\n\t// `bar.%b%`\n\t// the resulting tag set should only contain {a} after the matching\n\t// because %b% is already encoded.\n\tfor _, metricKeyTag := range rule.metricKeyTags {\n\t\tif _, containsKey := tagSet[metricKeyTag]; containsKey {\n\t\t\tdelete(tagSet, metricKeyTag)\n\t\t}\n\t}\n\treturn api.TaggedMetric{\n\t\tapi.MetricKey(interpolatedKey),\n\t\ttagSet,\n\t}, true\n}",
"func (k *Key) In(defaultVal string, candidates []string) string {\n\tval := k.String()\n\tfor _, cand := range candidates {\n\t\tif val == cand {\n\t\t\treturn val\n\t\t}\n\t}\n\treturn defaultVal\n}",
"func (m ContainsMatcher) Match(s string) bool {\n\tfor _, term := range m.list {\n\t\tif strings.Contains(s, term) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func mappingUrlKeystoValues(postback *Pbo) {\n\tmatchingIndexes := argumentPattern.FindStringIndex(postback.Url)\n\tfor matchingIndexes != nil {\n\t\tpatternMatch := argumentPattern.FindString(postback.Url)\n\t\tmatchString := patternMatch[1:(len(patternMatch) - 1)]\n\t\treplaceString, keyHasValue := postback.Data[matchString]\n\t\tif !keyHasValue {\n\t\t\treplaceString = MISMATCH_KEY_VALUE_URL\n\t\t\tpostback.Data[matchString] = MISMATCH_KEY_VALUE_URL\n\t\t}\n\t\tpostback.Url = postback.Url[:matchingIndexes[0]] + replaceString + postback.Url[matchingIndexes[1]:]\n\t\tmatchingIndexes = argumentPattern.FindStringIndex(postback.Url)\n\t}\n}",
"func apiKeyMatcher(key string) (string, bool) {\n\tswitch key {\n\tcase \"Api_key\", \"api_key\":\n\t\treturn key, true\n\tdefault:\n\t\treturn key, false\n\t}\n}",
"func TestMatchByPrefix(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tpcc := buildTestPrefixConfigMap()\n\ttestData := []struct {\n\t\tkey proto.Key\n\t\texpConfig interface{}\n\t}{\n\t\t{proto.KeyMin, config1},\n\t\t{proto.Key(\"\\x01\"), config1},\n\t\t{proto.Key(\"/db\"), config1},\n\t\t{proto.Key(\"/db1\"), config2},\n\t\t{proto.Key(\"/db1/a\"), config2},\n\t\t{proto.Key(\"/db1/table1\"), config3},\n\t\t{proto.Key(\"/db1/table\\xff\"), config3},\n\t\t{proto.Key(\"/db2\"), config1},\n\t\t{proto.Key(\"/db3\"), config4},\n\t\t{proto.Key(\"/db3\\xff\"), config4},\n\t\t{proto.Key(\"/db5\"), config1},\n\t\t{proto.Key(\"/xfe\"), config1},\n\t\t{proto.Key(\"/xff\"), config1},\n\t}\n\tfor i, test := range testData {\n\t\tpc := pcc.MatchByPrefix(test.key)\n\t\tif test.expConfig != pc.Config {\n\t\t\tt.Errorf(\"%d: expected config %v for %q; got %v\", i, test.expConfig, test.key, pc.Config)\n\t\t}\n\t}\n}",
"func (c *condition) match(v string) bool {\n\tif c.excludes(v) {\n\t\treturn false\n\t}\n\tif c.includes(v) {\n\t\treturn true\n\t}\n\tif len(c.Include) == 0 {\n\t\treturn true\n\t}\n\treturn false\n}",
"func Match(prefix string) string {\n\tfor _, enc := range defaultEncodings {\n\t\thint := enc.Match(prefix)\n\t\tif hint != \"\" {\n\t\t\treturn hint\n\t\t}\n\t}\n\treturn \"\"\n}",
"func (ps *Segment) Match(s string) (name string, capture bool, wildcard bool, matches bool) {\n\tif ps.IsWildcard {\n\t\twildcard = true\n\t\tmatches = true\n\t\treturn\n\t}\n\tif ps.IsVariable {\n\t\tname = ps.Name\n\t\tcapture = true\n\t\tmatches = true\n\t\treturn\n\t}\n\tif strings.EqualFold(s, ps.Name) {\n\t\tmatches = true\n\t\treturn\n\t}\n\treturn\n}",
"func (p *Path) Match(path string) *Match {\n\tvar match = &Match{\n\t\tValues: make(map[string]string),\n\t}\n\n\tfor _, part := range p.parts {\n\t\tif len(path) < 1 {\n\t\t\treturn nil\n\t\t}\n\n\t\tif path[0] != '/' {\n\t\t\treturn nil\n\t\t}\n\t\t// prefix /\n\t\tpath = path[1:]\n\n\t\tmatched, key, value, length := part.match(path)\n\n\t\t//log.Printf(\"%#v == %v (%d) %s\", part, matched, length, value)\n\n\t\tif !matched {\n\t\t\treturn nil\n\t\t}\n\n\t\tif key != \"\" {\n\t\t\tmatch.Values[key] = value\n\t\t}\n\t\tpath = path[length:]\n\t}\n\n\tif len(path) > 0 && path != \"/\" {\n\t\treturn nil\n\t}\n\n\treturn match\n}",
"func (k *KeyHandler) WildcardPathMatch(pathA string, pathB string) bool {\n\tsegsA := strings.Split(pathA, \"/\")\n\tsegsB := strings.Split(pathB, \"/\")\n\n\tmatch := true\n\n\tfor i, seg := range segsA {\n\t\tif i > (len(segsB) - 1) {\n\t\t\treturn false\n\t\t}\n\n\t\tif segsB[i] != seg && seg != \"*\" {\n\t\t\tmatch = false\n\t\t}\n\t}\n\n\treturn match\n}",
"func (s *Plugin) Match(ctx context.Context, request *interact.Request, condition *v1alpha1.MockAPI_Condition) (match bool, err error) {\n\tsimple := condition.GetSimple()\n\tif simple == nil {\n\t\treturn false, nil\n\t}\n\tc := core.NewContext(request)\n\tfor _, item := range simple.Items {\n\t\toperandX := core.Render(c, item.OperandX)\n\t\toperandY := core.Render(c, item.OperandY)\n\t\tmatched, err := core.Match(operandX, item.Operator, operandY)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif item.Opposite {\n\t\t\tmatched = !matched\n\t\t}\n\t\tif matched {\n\t\t\tif simple.UseOrAmongItems {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t\tcontinue\n\t\t} else {\n\t\t\tif simple.UseOrAmongItems {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn false, nil\n\t\t}\n\t}\n\treturn true, nil\n}",
"func (n *tnode) rmatch(pat, prefix string, m *[]string) {\n\tif n == nil {\n\t\treturn\n\t}\n\tc := pat[0]\n\tif c == '_' || c < n.c {\n\t\tn.lokid.rmatch(pat, prefix, m)\n\t}\n\tif c == '_' || c == n.c {\n\t\tif n.val != nil && len(pat)-1 == 0 {\n\t\t\t*m = append(*m, prefix+string(n.c))\n\t\t}\n\t\tif len(pat)-1 > 0 {\n\t\t\tn.eqkid.rmatch(pat[1:], prefix+string(n.c), m)\n\t\t}\n\t}\n\tif c == '_' || c > n.c {\n\t\tn.hikid.rmatch(pat, prefix, m)\n\t}\n}",
"func searchExact(w http.ResponseWriter, r *http.Request, db *mgo.Database, argPos int) {\n\tkey := r.FormValue(\"key\")\n\tval := r.FormValue(\"val\")\n\n\tcontext := make([]appResult, 0, 10)\n\tvar res *appResult\n\n\tc := db.C(\"machines\")\n\tvar usePath bool\n\tif key == \"apps.path\" {\n\t\tusePath = true\n\t}\n\n\terr := c.Find(bson.M{key: val}).\n\t\tSelect(bson.M{\n\t\t\"hostname\": 1,\n\t\t\"apps\": 1,\n\t\t\"_id\": 1}).\n\t\tSort(\"hostname\").\n\t\tFor(&res, func() error {\n\t\tres.Apps = filter_apps(val, res.Apps, usePath)\n\t\tcontext = append(context, *res)\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\tset.ExecuteTemplate(w, \"searchresults\", context)\n}",
"func keyExistsInArray(key string, value interface{}, log logr.Logger) (invalidType bool, keyExists bool) {\n\tswitch valuesAvailable := value.(type) {\n\tcase []interface{}:\n\t\tfor _, val := range valuesAvailable {\n\t\t\tif wildcard.Match(fmt.Sprint(val), key) || wildcard.Match(key, fmt.Sprint(val)) {\n\t\t\t\treturn false, true\n\t\t\t}\n\t\t}\n\n\tcase string:\n\t\tif wildcard.Match(valuesAvailable, key) {\n\t\t\treturn false, true\n\t\t}\n\n\t\tvar arr []string\n\t\tif err := json.Unmarshal([]byte(valuesAvailable), &arr); err != nil {\n\t\t\tlog.Error(err, \"failed to unmarshal value to JSON string array\", \"key\", key, \"value\", value)\n\t\t\treturn true, false\n\t\t}\n\n\t\tfor _, val := range arr {\n\t\t\tif key == val {\n\t\t\t\treturn false, true\n\t\t\t}\n\t\t}\n\n\tdefault:\n\t\tinvalidType = true\n\t\treturn\n\t}\n\n\treturn false, false\n}",
"func MatchPatterns(p Pattern, v string) error {\n\tswitch {\n\tcase p == \"\": // No pattern is specified.\n\t\treturn nil\n\n\tcase strings.HasPrefix(string(p), \"const:\"):\n\t\tw := string(p[len(\"const:\"):])\n\t\tif w != v {\n\t\t\treturn fmt.Errorf(\"const not matched: %q %q\", p, v)\n\t\t}\n\t\treturn nil\n\n\tcase strings.HasPrefix(string(p), \"pattern:\"):\n\t\tw := string(p[len(\"pattern:\"):])\n\t\tif matchSuffix(w, v) != nil {\n\t\t\treturn fmt.Errorf(\"pattern not matched: %q %q\", p, v)\n\t\t}\n\t\treturn nil\n\n\tcase strings.HasPrefix(string(p), \"split_pattern:\"):\n\t\tws := strings.Split(string(p[len(\"split_pattern:\"):]), \";\")\n\t\tfor _, w := range ws {\n\t\t\tif matchSuffix(w, v) == nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\treturn fmt.Errorf(\"split_pattern not matched: %q %q\", p, v)\n\n\tdefault:\n\t\treturn fmt.Errorf(\"unkown pattern\")\n\t}\n}",
"func processFilter(keys []string, filter []string) ([]string, bool) {\n\tvar vpps []string\n\tif len(filter) > 0 {\n\t\t// Ignore all parameters but first\n\t\tvpps = strings.Split(filter[0], \",\")\n\t} else {\n\t\t// Show all if there is no filter\n\t\tvpps = keys\n\t}\n\tvar isData bool\n\t// Find at leas one match\n\tfor _, key := range keys {\n\t\tfor _, vpp := range vpps {\n\t\t\tif key == vpp {\n\t\t\t\tisData = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn vpps, isData\n}",
"func (n *Node) Match(u string) (interface{}, map[string]string, bool) {\n\tu, err := url.QueryUnescape(u)\n\n\tif err != nil {\n\t\treturn nil, nil, false\n\t}\n\n\treturn n.match(map[string]string{}, strings.Split(checkURL(u), \"/\")[1:])\n}",
"func (in InHandler) Evaluate(key, value interface{}) bool {\n\tswitch typedKey := key.(type) {\n\tcase string:\n\t\treturn in.validateValueWithStringPattern(typedKey, value)\n\tcase int, int32, int64, float32, float64, bool:\n\t\treturn in.validateValueWithStringPattern(fmt.Sprint(typedKey), value)\n\tcase []interface{}:\n\t\tvar stringSlice []string\n\t\tfor _, v := range typedKey {\n\t\t\tstringSlice = append(stringSlice, v.(string))\n\t\t}\n\t\treturn in.validateValueWithStringSetPattern(stringSlice, value)\n\tdefault:\n\t\tin.log.V(2).Info(\"Unsupported type\", \"value\", typedKey, \"type\", fmt.Sprintf(\"%T\", typedKey))\n\t\treturn false\n\t}\n}",
"func whereLabelMatches(label, pattern string, in interface{}) ([]interface{}, error) {\n\trx, err := regexp.Compile(pattern)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn whereLabel(\"whereLabelMatches\", in, label, func(value string, ok bool) bool {\n\t\treturn ok && rx.MatchString(value)\n\t})\n}",
"func matchAction(ivr string) (string, error) {\n\tinput_words := strings.Split(strings.ToLower(ivr), \" \")\n\tactions := map[string]string{\n\t\t\"on\": \"On\",\n\t\t\"off\": \"Off\",\n\t}\n\n\tfor key, value := range actions {\n\t\tif contains(input_words, key) {\n\t\t\treturn value, nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"no matching action\")\n}",
"func MatchVarsRegexp(path string, url string) (bool, bool, []string, []string) {\n\tmatch := true\n\tnext := false\n\tkeys := []string{}\n\tvalues := []string{}\n\tu := 0\n\tp := 0\n\tfor {\n\t\tif url[u] == path[p] {\n\t\t} else {\n\t\t\tif path[p] != braceStart {\n\t\t\t\tmatch = false\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tkey := \"\"\n\t\t\t\treg := \"\"\n\t\t\t\tfor {\n\t\t\t\t\tp++\n\t\t\t\t\tif path[p] == coron {\n\t\t\t\t\t\tkeys = append(keys, key)\n\t\t\t\t\t\treg, p = forwardPoint(path, p)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tif path[p] == braceEnd {\n\t\t\t\t\t\tkeys = append(keys, key)\n\t\t\t\t\t\tp++\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tif !(p < len(path)) {\n\t\t\t\t\t\tkeys = append(keys, key)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tkey = key + string(path[p])\n\t\t\t\t}\n\t\t\t\tvalue := \"\"\n\t\t\t\tfor {\n\t\t\t\t\tif url[u] == slash {\n\t\t\t\t\t\tvalues = append(values, value)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tvalue = value + string(url[u])\n\t\t\t\t\tu++\n\t\t\t\t\tif !(u < len(url)) {\n\t\t\t\t\t\tvalues = append(values, value)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif reg != \"\" {\n\t\t\t\t\tre := regexp.MustCompile(reg)\n\t\t\t\t\tif !re.MatchString(value) {\n\t\t\t\t\t\tmatch = false\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tu++\n\t\tp++\n\t\tif p < len(path) && u < len(url) {\n\t\t} else if !(p < len(path)) && !(u < len(url)) {\n\t\t\tbreak\n\t\t} else if !(p < len(path)) && u < len(url) {\n\t\t\tnext = true\n\t\t\tbreak\n\t\t} else {\n\t\t\tmatch = false\n\t\t\tbreak\n\t\t}\n\t}\n\treturn match, next, keys, values\n}",
"func (k *VRFKey) Match(srcIP net.IP, cidr *net.IPNet) bool {\n\treturn k.SourceIP.String() == srcIP.String() && k.DestCIDR.String() == cidr.String()\n}",
"func (p *Policy) Match(policy Policy) *Policy {\n\t// Exact or all op\n\t// *resource, resource*, *resource*\n\tif (p.Op == policy.Op || p.Op == OpAll) &&\n\t\t(p.Resource == policy.Resource || p.Resource == \"*\" ||\n\t\t\t(p.Resource[0] == '*' && strings.HasSuffix(policy.Resource, p.Resource[1:])) ||\n\t\t\t(p.Resource[len(p.Resource)-1] == '*' && strings.HasPrefix(policy.Resource, p.Resource[:len(p.Resource)-1])) ||\n\t\t\t(p.Resource[0] == '*' && p.Resource[len(p.Resource)-1] == '*' && strings.Contains(policy.Resource, p.Resource[1:len(p.Resource)-1]))) {\n\n\t\treturn p\n\t}\n\treturn nil\n}",
"func (r *Request) MatchParam(key, value string) *Request {\n\tquery := r.URLStruct.Query()\n\tquery.Set(key, value)\n\tr.URLStruct.RawQuery = query.Encode()\n\treturn r\n}",
"func (s Set) Match(value string) bool {\n\tfor i := range s {\n\t\tif s[i].Match(value) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func matchPattern(pattern, name string) (matched bool) {\n\tif pattern == \"\" {\n\t\treturn name == pattern\n\t}\n\tif pattern == \"*\" {\n\t\treturn true\n\t}\n\trName, rPattern := make([]rune, 0, len(name)), make([]rune, 0, len(pattern))\n\tfor _, r := range name {\n\t\trName = append(rName, r)\n\t}\n\tfor _, r := range pattern {\n\t\trPattern = append(rPattern, r)\n\t}\n\treturn deepMatchRune(rName, rPattern, false)\n}",
"func (s StreamID) Match(pattern string) bool {\n\treturn wildcard.MatchSimple(pattern, s.str)\n}",
"func (mg *MultiGlob) Match(input string) bool {\n\t_, matched := match(mg.node, input, false)\n\treturn matched\n}",
"func (p *PropertySet) Match(s string) []string {\n\tp.mux.RLock()\n\tdefer p.mux.RUnlock()\n\n\tproperties := make([]string, 0)\n\tfor property := range p.props {\n\t\tif strings.HasPrefix(property, s) {\n\t\t\tproperties = append(properties, property)\n\t\t}\n\t}\n\n\treturn properties\n}",
"func (cont *Container) Match(query fl.Query) bool {\n\tfor k, q := range query {\n\t\tif !cont.MatchField(k, q...) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (s *AppServerV3) MatchSearch(values []string) bool {\n\treturn MatchSearch(nil, values, nil)\n}",
"func match(r io.ReaderAt, buf []byte, key []byte, pos uint32) (bool, error) {\n\tklen := len(key)\n\tfor n := 0; n < klen; n += len(buf) {\n\t\tnleft := klen - n\n\t\tif len(buf) > nleft {\n\t\t\tbuf = buf[:nleft]\n\t\t}\n\t\tif _, err := r.ReadAt(buf, int64(pos)); err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif !bytes.Equal(buf, key[n:n+len(buf)]) {\n\t\t\treturn false, nil\n\t\t}\n\t\tpos += uint32(len(buf))\n\t}\n\treturn true, nil\n}",
"func GlobMatch(patterns ...string) MatcherFunc { return GlobMatches(patterns) }",
"func (c *Counter) Match(values []string) bool {\n\ta, b := c.Values, values\n\tif len(a) == len(b) {\n\t\tb = b[:len(a)]\n\t\tfor i := range a {\n\t\t\tif a[i] != b[i] {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\treturn false\n}",
"func (r *Requirement) Matches(ls Labels) bool {\n\tswitch strings.ToLower(r.Operator) {\n\tcase strings.ToLower(Operator_equals.String()), strings.ToLower(Operator_in.String()):\n\t\tif !ls.Has(r.Key) {\n\t\t\treturn false\n\t\t}\n\t\treturn r.hasValue(ls.Get(r.Key))\n\tcase strings.ToLower(Operator_notEquals.String()), strings.ToLower(Operator_notIn.String()):\n\t\tif !ls.Has(r.Key) {\n\t\t\treturn false\n\t\t}\n\t\treturn !r.hasValue(ls.Get(r.Key))\n\tdefault:\n\t\treturn false\n\t}\n}",
"func LooksLikeAKeyString(inputStr string) (matched bool) {\n\treturn keyStringRe.MatchString(inputStr)\n}",
"func (m fieldMatcher) Matches(x interface{}) bool {\n\tval := reflect.ValueOf(x)\n\tfor i := 0; i < val.NumField(); i++ {\n\t\tfield := val.Type().Field(i)\n\t\tif field.Name == m.Key {\n\t\t\tif reflect.DeepEqual(getValue(val.Field(i)), m.Value) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}",
"func getEnvKeyValue(match string, partial bool) (string, string, error) {\n\tfor _, e := range os.Environ() {\n\t\tpair := strings.Split(e, \"=\")\n\t\tif len(pair) != 2 {\n\t\t\tcontinue\n\t\t}\n\n\t\tkey := pair[0]\n\t\tvalue := pair[1]\n\n\t\tif partial && strings.Contains(key, match) {\n\t\t\treturn key, value, nil\n\t\t}\n\n\t\tif strings.Compare(key, match) == 0 {\n\t\t\treturn key, value, nil\n\t\t}\n\t}\n\n\tmatchType := \"match\"\n\tif partial {\n\t\tmatchType = \"partial match\"\n\t}\n\n\treturn \"\", \"\", fmt.Errorf(\"Failed to find %s with %s\", matchType, match)\n}",
"func Match(rule grpc.Rule, data map[string]string, pidRuntime string) bool {\n\t// Return early if we have nothing to filter on.\n\tif len(rule.ContainerRuntimes) < 1 && len(rule.FilterEvents) < 1 {\n\t\treturn true\n\t}\n\n\tmatchedRuntime := false\n\tfor _, runtime := range rule.ContainerRuntimes {\n\t\tif pidRuntime == runtime {\n\t\t\t// Return early if we know we have nothing else to filter on.\n\t\t\tif len(rule.FilterEvents) < 1 {\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\t// Continue to the next check.\n\t\t\tmatchedRuntime = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Return early here if we never matched a runtime.\n\tif len(rule.ContainerRuntimes) > 0 && !matchedRuntime {\n\t\treturn false\n\t}\n\n\t// Return early here if we have nothing else to filter on.\n\tif len(rule.FilterEvents) < 1 {\n\t\treturn true\n\t}\n\n\tfor key, ogValue := range data {\n\t\ts, ok := rule.FilterEvents[key]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, find := range s.Values {\n\t\t\tif strings.Contains(ogValue, find) {\n\t\t\t\t// Return early since we have nothing else to filter on.\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\t// We did not match any filters.\n\treturn false\n}",
"func (a AnyPastTime) Match(v driver.Value) bool {\n\tstr, ok := v.(string)\n\tif !ok {\n\t\treturn false\n\t}\n\tt := types.Timestamp{}\n\tif err := t.Scan(str); err != nil {\n\t\treturn false\n\t}\n\treturn time.Since(t.Time()) < a.Range\n}",
"func (action Action) Match(a Action) bool {\n\treturn wildcard.Match(string(action), string(a))\n}",
"func customGrep(data string, pattern map[string]string) map[string][]string {\n\t// \"pattern name\":\"\"\n\tresult := make(map[string][]string)\n\tfor k, v := range pattern {\n\t\tresultArr := Grepping(data, v)\n\t\tif len(resultArr) > 0 {\n\t\t\tresult[k] = resultArr\n\t\t}\n\t}\n\treturn result\n}",
"func equal(key, value string, params Parameter) bool {\n switch value {\n case \"nil\", \"empty\":\n return equalNilAndEmpty(key, value, params)\n default:\n return equalValue(key, value, params)\n }\n}",
"func bestMatch(name string, t reflect.Type) string {\n\tkey := strings.ToLower(hyphens.ReplaceAllString(name, \"\"))\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tfield := t.Field(i)\n\t\tif field.Name == name {\n\t\t\treturn field.Name\n\t\t}\n\t\tj := field.Tag.Get(\"json\")\n\t\tif j != \"\" {\n\t\t\tflags := strings.Split(j, \",\")\n\t\t\tfor _, flag := range flags {\n\t\t\t\tif name == flag {\n\t\t\t\t\treturn field.Name\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tlname := strings.ToLower(hyphens.ReplaceAllString(field.Name, \"\"))\n\t\tif key == lname {\n\t\t\treturn field.Name\n\t\t}\n\t}\n\treturn \"\"\n}",
"func (d metaphoneDict) matches(metaphone string) metaphoneDict {\n\tvar l, r int\n\ti := d.phoneticLocation(metaphone)\n\tfor r = i; r < len(d) && d[r].metaphone == metaphone; r++ {\n\t}\n\tfor l = i; l >= 0 && d[l].metaphone == metaphone; l-- {\n\t}\n\tl++\n\tif r-l < fudgeDistance*2 {\n\t\tl -= fudgeDistance\n\t\tr += fudgeDistance\n\t}\n\tif l < 0 {\n\t\tl = 0\n\t}\n\tif r > len(d) {\n\t\tr = len(d)\n\t}\n\treturn d[l:r]\n}",
"func (card Card) fieldsMatchByType(name, value string) (fields []Field) {\n name = strings.ToLower(name)\n value = strings.ToLower(value)\n for _, f := range card.Fields {\n if strings.ToLower(f.Type) == name &&\n strings.Contains(strings.ToLower(f.Value), value) {\n fields = append(fields, f)\n }\n }\n\n return\n}",
"func (t attrSelector) Match(n *html.Node) bool {\n\tswitch t.operation {\n\tcase \"\":\n\t\treturn matchAttribute(n, t.key, func(string) bool { return true })\n\tcase \"=\":\n\t\treturn matchAttribute(n, t.key, func(s string) bool { return s == t.val })\n\tcase \"!=\":\n\t\treturn attributeNotEqualMatch(t.key, t.val, n)\n\tcase \"~=\":\n\t\t// matches elements where the attribute named key is a whitespace-separated list that includes val.\n\t\treturn matchAttribute(n, t.key, func(s string) bool { return matchInclude(t.val, s) })\n\tcase \"|=\":\n\t\treturn attributeDashMatch(t.key, t.val, n)\n\tcase \"^=\":\n\t\treturn attributePrefixMatch(t.key, t.val, n)\n\tcase \"$=\":\n\t\treturn attributeSuffixMatch(t.key, t.val, n)\n\tcase \"*=\":\n\t\treturn attributeSubstringMatch(t.key, t.val, n)\n\tcase \"#=\":\n\t\treturn attributeRegexMatch(t.key, t.regexp, n)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unsuported operation : %s\", t.operation))\n\t}\n}",
"func whereAny(entries interface{}, key, sep string, cmp []string) (interface{}, error) {\n\treturn generalizedWhere(\"whereAny\", entries, key, func(value interface{}) bool {\n\t\tif value == nil {\n\t\t\treturn false\n\t\t} else {\n\t\t\titems := strings.Split(value.(string), sep)\n\t\t\treturn len(intersect(cmp, items)) > 0\n\t\t}\n\t})\n}"
] | [
"0.7097848",
"0.69746256",
"0.6162469",
"0.6156034",
"0.60447186",
"0.599638",
"0.5879797",
"0.5872499",
"0.5860298",
"0.58108395",
"0.57897544",
"0.57737875",
"0.5747656",
"0.5741802",
"0.56776214",
"0.5676985",
"0.5634095",
"0.56085414",
"0.5597004",
"0.5589954",
"0.557177",
"0.5567571",
"0.5556967",
"0.5525543",
"0.5478485",
"0.5461796",
"0.54478884",
"0.54216236",
"0.5407237",
"0.5377896",
"0.5377896",
"0.53481555",
"0.53255564",
"0.52947456",
"0.5289103",
"0.52722126",
"0.5267257",
"0.5267257",
"0.5257442",
"0.5232316",
"0.5210086",
"0.52078223",
"0.5205293",
"0.518691",
"0.5178901",
"0.5173906",
"0.51736754",
"0.516359",
"0.5158604",
"0.5150494",
"0.5148744",
"0.5138188",
"0.5128183",
"0.51218",
"0.51207054",
"0.51013595",
"0.5098328",
"0.5090163",
"0.50792694",
"0.50711775",
"0.50552505",
"0.50517535",
"0.5051306",
"0.50501394",
"0.5047032",
"0.5041911",
"0.5033525",
"0.5024925",
"0.50241643",
"0.50232524",
"0.5006551",
"0.49865666",
"0.49860978",
"0.49847582",
"0.49816144",
"0.4979884",
"0.497763",
"0.49593437",
"0.49398288",
"0.49331662",
"0.49311292",
"0.49137333",
"0.4902631",
"0.49009874",
"0.48973328",
"0.48787296",
"0.48780355",
"0.4869388",
"0.48691073",
"0.48640612",
"0.4858991",
"0.48544428",
"0.48533815",
"0.48524925",
"0.48512816",
"0.48487774",
"0.4848216",
"0.48462984",
"0.48416254",
"0.4836475"
] | 0.71222144 | 0 |
Validate validates input replicate retries. | func (r BatchKeyRotateRetry) Validate() error {
if r.Attempts < 0 {
return errInvalidArgument
}
if r.Delay < 0 {
return errInvalidArgument
}
return nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (r BatchReplicateRetry) Validate() error {\n\tif r.Attempts < 0 {\n\t\treturn errInvalidArgument\n\t}\n\n\tif r.Delay < 0 {\n\t\treturn errInvalidArgument\n\t}\n\n\treturn nil\n}",
"func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest, o ObjectLayer) error {\n\tif r == nil {\n\t\treturn nil\n\t}\n\n\tif r.APIVersion != batchReplJobAPIVersion {\n\t\treturn errInvalidArgument\n\t}\n\n\tif r.Source.Bucket == \"\" {\n\t\treturn errInvalidArgument\n\t}\n\n\tinfo, err := o.GetBucketInfo(ctx, r.Source.Bucket, BucketOptions{})\n\tif err != nil {\n\t\tif isErrBucketNotFound(err) {\n\t\t\treturn batchReplicationJobError{\n\t\t\t\tCode: \"NoSuchSourceBucket\",\n\t\t\t\tDescription: \"The specified source bucket does not exist\",\n\t\t\t\tHTTPStatusCode: http.StatusNotFound,\n\t\t\t}\n\t\t}\n\t\treturn err\n\t}\n\n\tif err := r.Source.Type.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\tif r.Target.Endpoint == \"\" {\n\t\treturn errInvalidArgument\n\t}\n\n\tif r.Target.Bucket == \"\" {\n\t\treturn errInvalidArgument\n\t}\n\n\tif err := r.Target.Creds.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := r.Target.Type.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, tag := range r.Flags.Filter.Tags {\n\t\tif err := tag.Validate(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfor _, meta := range r.Flags.Filter.Metadata {\n\t\tif err := meta.Validate(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := r.Flags.Retry.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\tu, err := url.Parse(r.Target.Endpoint)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcred := r.Target.Creds\n\n\tc, err := miniogo.NewCore(u.Host, &miniogo.Options{\n\t\tCreds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),\n\t\tSecure: u.Scheme == \"https\",\n\t\tTransport: getRemoteInstanceTransport,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.SetAppInfo(\"minio-\"+batchJobPrefix, r.APIVersion+\" \"+job.ID)\n\n\tvcfg, err := c.GetBucketVersioning(ctx, r.Target.Bucket)\n\tif err != nil {\n\t\tif miniogo.ToErrorResponse(err).Code == \"NoSuchBucket\" {\n\t\t\treturn batchReplicationJobError{\n\t\t\t\tCode: \"NoSuchTargetBucket\",\n\t\t\t\tDescription: \"The specified target bucket does not exist\",\n\t\t\t\tHTTPStatusCode: http.StatusNotFound,\n\t\t\t}\n\t\t}\n\t\treturn err\n\t}\n\n\tif info.Versioning && !vcfg.Enabled() {\n\t\treturn batchReplicationJobError{\n\t\t\tCode: \"InvalidBucketState\",\n\t\t\tDescription: fmt.Sprintf(\"The source '%s' has versioning enabled, target '%s' must have versioning enabled\",\n\t\t\t\tr.Source.Bucket, r.Target.Bucket),\n\t\t\tHTTPStatusCode: http.StatusBadRequest,\n\t\t}\n\t}\n\n\tr.clnt = c\n\treturn nil\n}",
"func (c BatchJobReplicateCredentials) Validate() error {\n\tif !auth.IsAccessKeyValid(c.AccessKey) || !auth.IsSecretKeyValid(c.SecretKey) {\n\t\treturn errInvalidArgument\n\t}\n\treturn nil\n}",
"func validateGenerateResyncInputs(auts, key, opc, rand []byte) error {\n\tif len(auts) != ExpectedAutsBytes {\n\t\treturn fmt.Errorf(\"incorrect auts size. Expected %v bytes, but got %v bytes\", ExpectedAutsBytes, len(auts))\n\t}\n\tif len(key) != ExpectedKeyBytes {\n\t\treturn fmt.Errorf(\"incorrect key size. Expected %v bytes, but got %v bytes\", ExpectedKeyBytes, len(key))\n\t}\n\tif len(opc) != ExpectedOpcBytes {\n\t\treturn fmt.Errorf(\"incorrect opc size. Expected %v bytes, but got %v bytes\", ExpectedOpcBytes, len(opc))\n\t}\n\tif len(rand) != RandChallengeBytes {\n\t\treturn fmt.Errorf(\"incorrect rand size. Expected %v bytes, but got %v bytes\", RandChallengeBytes, len(rand))\n\t}\n\treturn nil\n}",
"func canRetry(args interface{}, err error) bool {\n\t// No leader errors are always safe to retry since no state could have\n\t// been changed.\n\tif structs.IsErrNoLeader(err) {\n\t\treturn true\n\t}\n\n\t// Reads are safe to retry for stream errors, such as if a server was\n\t// being shut down.\n\tinfo, ok := args.(structs.RPCInfo)\n\tif ok && info.IsRead() && lib.IsErrEOF(err) {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (kv BatchJobReplicateKV) Validate() error {\n\tif kv.Key == \"\" {\n\t\treturn errInvalidArgument\n\t}\n\treturn nil\n}",
"func (k Keeper) ValidateRepay(ctx sdk.Context, sender sdk.AccAddress, coins sdk.Coins) error {\n\tsenderAcc := k.accountKeeper.GetAccount(ctx, sender)\n\tsenderCoins := senderAcc.SpendableCoins(ctx.BlockTime())\n\n\tfor _, coin := range coins {\n\t\tif senderCoins.AmountOf(coin.Denom).LT(coin.Amount) {\n\t\t\treturn sdkerrors.Wrapf(types.ErrInsufficientBalanceForRepay, \"account can only repay up to %s%s\", senderCoins.AmountOf(coin.Denom), coin.Denom)\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (b Takuzu) Validate() (bool, error) {\n\tfinished := true\n\n\tcomputeVal := func(cells []Cell) (val int) {\n\t\tfor i := 0; i < len(cells); i++ {\n\t\t\tval += cells[i].Value * 1 << uint(i)\n\t\t}\n\t\treturn\n\t}\n\n\tlineVals := make(map[int]bool)\n\tcolVals := make(map[int]bool)\n\n\tfor i := 0; i < b.Size; i++ {\n\t\tvar d []Cell\n\t\tvar full bool\n\t\tvar err error\n\n\t\t// Let's check line i\n\t\td = b.GetLine(i)\n\t\tfull, err = checkRange(d)\n\t\tif err != nil {\n\t\t\terr := err.(validationError)\n\t\t\terr.LineNumber = &i\n\t\t\treturn false, err\n\t\t}\n\t\tif full {\n\t\t\thv := computeVal(d)\n\t\t\tif lineVals[hv] {\n\t\t\t\terr := validationError{\n\t\t\t\t\tErrorType: ErrorDuplicate,\n\t\t\t\t\tLineNumber: &i,\n\t\t\t\t}\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tlineVals[hv] = true\n\t\t} else {\n\t\t\tfinished = false\n\t\t}\n\n\t\t// Let's check column i\n\t\td = b.GetColumn(i)\n\t\tfull, err = checkRange(d)\n\t\tif err != nil {\n\t\t\terr := err.(validationError)\n\t\t\terr.ColumnNumber = &i\n\t\t\treturn false, err\n\t\t}\n\t\tif full {\n\t\t\thv := computeVal(d)\n\t\t\tif colVals[hv] {\n\t\t\t\terr := validationError{\n\t\t\t\t\tErrorType: ErrorDuplicate,\n\t\t\t\t\tColumnNumber: &i,\n\t\t\t\t}\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tcolVals[hv] = true\n\t\t} else {\n\t\t\tfinished = false\n\t\t}\n\t}\n\treturn finished, nil\n}",
"func (r *Restore) Validate() error {\n\tlogrus.Trace(\"validating restore action configuration\")\n\n\t// verify bucket is provided\n\tif len(r.Bucket) == 0 {\n\t\treturn fmt.Errorf(\"no bucket provided\")\n\t}\n\n\t// verify filename is provided\n\tif len(r.Filename) == 0 {\n\t\treturn fmt.Errorf(\"no filename provided\")\n\t}\n\n\t// verify timeout is provided\n\tif r.Timeout == 0 {\n\t\treturn fmt.Errorf(\"timeout must be greater than 0\")\n\t}\n\n\treturn nil\n}",
"func (r *Retrier) RunRetry() error {\n\t// Start signal handler.\n\tsigHandler := signals.NewSignalHandler(10)\n\tgo sigHandler.Register()\n\n\tfinish := make(chan bool, 1)\n\tgo func() {\n\t\tselect {\n\t\tcase <-finish:\n\t\t\treturn\n\t\tcase <-time.After(10 * time.Second):\n\t\t\treturn\n\t\tdefault:\n\t\t\tfor {\n\t\t\t\tif sigHandler.GetState() != 0 {\n\t\t\t\t\tlogger.Critical(\"detected signal. retry failed.\")\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\tfor i := 0; i < r.retries; i++ {\n\t\terr := r.retryable.Try()\n\t\tif err != nil {\n\t\t\tlogger.Info(\"Retryable error: %v\", err)\n\t\t\ttime.Sleep(time.Duration(r.sleepSeconds) * time.Second)\n\t\t\tcontinue\n\t\t}\n\t\tfinish <- true\n\t\treturn nil\n\t}\n\n\tfinish <- true\n\treturn fmt.Errorf(\"unable to succeed at retry after %d attempts at %d seconds\", r.retries, r.sleepSeconds)\n}",
"func (j BatchJobRequest) Validate(ctx context.Context, o ObjectLayer) error {\n\tif j.Replicate != nil {\n\t\treturn j.Replicate.Validate(ctx, j, o)\n\t}\n\treturn errInvalidArgument\n}",
"func (m *ReplicaStatus) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateStatus(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (pow *ProofOfWork) Validate() bool {\n\t// we run the Run() loop one more time\n\tvar attempt big.Int\n\n\tdata := pow.InitiateData(pow.Block.Nonce)\n\thash := sha256.Sum256(data)\n\tattempt.SetBytes(hash[:])\n\n\treturn attempt.Cmp(pow.Target) == -1\n}",
"func IsRetryable(err error) bool {\n\tif err == nil {\n\t\treturn true\n\t}\n\n\tif strings.Contains(err.Error(), \"field type conflict\") {\n\t\treturn false\n\t}\n\treturn true\n}",
"func IsRetryable(err error) bool {\n\tif err == nil {\n\t\treturn true\n\t}\n\n\tif strings.Contains(err.Error(), \"field type conflict\") {\n\t\treturn false\n\t}\n\treturn true\n}",
"func validateBatchHandler(w http.ResponseWriter, r *http.Request) {\n\tif InstableMode {\n\t\t<-time.Tick(time.Duration(rand.Intn(MaxLag)) * time.Millisecond)\n\t\tif rand.Intn(10) == 1 {\n\t\t\thandleErr(w, http.StatusInternalServerError, \"please try again\")\n\t\t\treturn\n\t\t}\n\t}\n\tvars := mux.Vars(r)\n\tbatch, ok := vars[\"batch\"]\n\tif !ok {\n\t\thandleErr(w, http.StatusBadRequest, \"must provide batch uuid as URI segment\")\n\t\treturn\n\t}\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\thandleErr(w, http.StatusInternalServerError, \"unable to read post body\")\n\t\treturn\n\t}\n\n\tconn, err := redis.Dial(\"tcp\", RedisAddr)\n\tif err != nil {\n\t\tlog.Println(\"unable to connect to redis when validating batch\", err.Error())\n\t\thandleErr(w, http.StatusInternalServerError, \"unable to reach key store\")\n\t\treturn\n\t}\n\n\t/*\n\t Expected body format, n lines, each representing an ordered response based on batch\n\t ex:\n\n\t dasdfSDFd\n\t sdfHDFas\n\t invalid\n\t DGHDksdfhkdL\n\n\t*/\n\n\t// validate each line individually\n\tsubmissionValid := true\n\tlines := strings.Split(string(body), \"\\n\")\n\tfor i, line := range lines {\n\t\tif line == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tkey := fmt.Sprintf(\"%s_%d\", batch, i)\n\n\t\tresp, err := conn.Do(\"GET\", key)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"error getting batch '%s' - %s\", key, err.Error())\n\t\t\thandleErr(w, http.StatusInternalServerError, \"unable to query key store\")\n\t\t\treturn\n\t\t}\n\n\t\tif resp == nil {\n\t\t\thandleErr(w, http.StatusNotFound, fmt.Sprintf(\"batch '%s' does not exist\", key))\n\t\t\treturn\n\t\t}\n\t\tdata, err := redis.String(resp, err)\n\t\tif err != nil {\n\t\t\tlog.Println(\"error getting string data \", err.Error())\n\t\t\thandleErr(w, http.StatusInternalServerError, \"unable to read data\")\n\t\t\treturn\n\t\t}\n\t\tif data != line {\n\t\t\tsubmissionValid = false\n\t\t\tw.Write([]byte(fmt.Sprintf(\"invalid submission %s_%d got %s, want %s\\n\", batch, i, line, data)))\n\t\t}\n\t}\n\n\tif submissionValid {\n\t\tw.Write([]byte(\"ok\\n\"))\n\t}\n}",
"func (t BatchJobReplicateResourceType) Validate() error {\n\tswitch t {\n\tcase BatchJobReplicateResourceMinIO:\n\tdefault:\n\t\treturn errInvalidArgument\n\t}\n\treturn nil\n}",
"func ValidateInputLength(cepRaw interface{}) observable.Observable {\n\treturn observable.Create(func(emitter *observer.Observer, disposed bool) {\n\t\tcep, _ := cepRaw.(string)\n\t\tcepLength := len(cep)\n\t\tif cepLength <= cepSize {\n\t\t\temitter.OnNext(cep)\n\t\t\temitter.OnDone()\n\t\t} else {\n\t\t\temitter.OnError(errors.New(\"Cep length is less than 8 characters\"))\n\t\t}\n\t})\n}",
"func (sv *StubbedValidator) StubSuccessValidateRestart() {\n\tsv.revalidationError = nil\n}",
"func (c *Client) ShouldRetry(rpcName string, err error) bool {\n\treturn false\n}",
"func (p *proxiedRequest) canRetry(e error) bool {\n\tswitch e := e.(type) {\n\tcase *net.OpError:\n\t\tif e.Op == \"dial\" {\n\t\t\treturn true\n\t\t}\n\t}\n\n\tglog.V(2).Info(\"Will not retry after error: \", e)\n\treturn false\n}",
"func (f *Fuse) shouldRetry() bool {\n\tif f.retries > f.RetryThreshold {\n\t\tf.retries = 0\n\t\treturn true\n\t}\n\treturn false\n}",
"func (l *retryLoop) ShouldRetry(err error) bool {\n\tif err == zk.ErrSessionExpired || err == zk.ErrSessionMoved {\n\t\treturn true\n\t}\n\n\tif netErr, ok := err.(net.Error); ok {\n\t\treturn netErr.Timeout() || netErr.Temporary()\n\t}\n\n\treturn false\n}",
"func (cfg *HATrackerConfig) Validate() error {\n\tif cfg.UpdateTimeoutJitterMax < 0 {\n\t\treturn errNegativeUpdateTimeoutJitterMax\n\t}\n\n\tminFailureTimeout := cfg.UpdateTimeout + cfg.UpdateTimeoutJitterMax + time.Second\n\tif cfg.FailoverTimeout < minFailureTimeout {\n\t\treturn fmt.Errorf(errInvalidFailoverTimeout, cfg.FailoverTimeout, minFailureTimeout)\n\t}\n\n\treturn nil\n}",
"func canRetry(err error) bool {\n\terr = interpret(err)\n\tif temp, ok := err.(TemporaryError); ok && !temp.Temporary() {\n\t\treturn false\n\t}\n\treturn true\n}",
"func Retries(retries int, maxRepeat int, nonIdempotent bool) RetryPolicy {\n\treturn func(req *http.Request, err error, ctx RoundTripContext) bool {\n\t\tif ctx.Retries() >= retries {\n\t\t\treturn false\n\t\t}\n\t\tif ctx.Exhausted() > maxRepeat {\n\t\t\treturn false\n\t\t}\n\t\tif nonIdempotent || isIdempotent(req.Method) {\n\t\t\treturn true\n\t\t}\n\t\tif ne, ok := err.(*net.OpError); ok {\n\t\t\tif ne.Op == \"dial\" {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n}",
"func (m *GameflowLcdsReconnectInfoDto) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateGame(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validatePlayerCredentials(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (*backfillingDispatcher) IsRetryableErr(error) bool {\n\treturn true\n}",
"func ValidateCreate(client client.Client, rollout *v1alpha1.RolloutPlan, rootPath *field.Path) field.ErrorList {\n\tvar allErrs field.ErrorList\n\n\tif rollout.RolloutBatches == nil {\n\t\tallErrs = append(allErrs, field.Required(rootPath.Child(\"rolloutBatches\"), \"the rollout has to have batches\"))\n\t}\n\n\t// the rollout batch partition is either automatic or positive\n\tif rollout.BatchPartition != nil && *rollout.BatchPartition < 0 {\n\t\tallErrs = append(allErrs, field.Invalid(rootPath.Child(\"batchPartition\"), rollout.BatchPartition,\n\t\t\t\"the rollout plan has to be positive\"))\n\t}\n\n\t// NumBatches has to be the size of RolloutBatches\n\tif rollout.NumBatches != nil && len(rollout.RolloutBatches) != int(*rollout.NumBatches) {\n\t\tallErrs = append(allErrs, field.Invalid(rootPath.Child(\"numBatches\"), rollout.NumBatches,\n\t\t\t\"the num batches does not match the rollout batch size\"))\n\t}\n\n\tif rollout.RolloutStrategy != v1alpha1.IncreaseFirstRolloutStrategyType &&\n\t\trollout.RolloutStrategy != v1alpha1.DecreaseFirstRolloutStrategyType {\n\t\tallErrs = append(allErrs, field.Invalid(rootPath.Child(\"rolloutStrategy\"),\n\t\t\trollout.RolloutStrategy, \"the rolloutStrategy can only be IncreaseFirst or DecreaseFirst\"))\n\t}\n\n\t// validate the webhooks\n\tallErrs = append(allErrs, validateWebhook(rollout, rootPath)...)\n\n\t// validate the rollout batches\n\tallErrs = append(allErrs, validateRolloutBatches(rollout, rootPath)...)\n\n\t// TODO: The total number of num in the batches match the current target resource pod size\n\treturn allErrs\n}",
"func (r Describe) validation(cmd *cobra.Command, args []string) error {\n\tif err := require.MaxArgs(args, 3); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (d *downloader) shouldRetry() bool {\n\t//never restart transfer of the same file more than 10 times\n\td.TotalRetryCount++\n\tif d.TotalRetryCount > maxTotalRetryCount {\n\t\tlogg.Info(\"giving up on GET %s after %d read errors\", d.URI, maxTotalRetryCount)\n\t\treturn false\n\t}\n\n\t//if there was no error at this offset before, always retry\n\tif d.LastErrorAtBytesRead != d.BytesRead {\n\t\td.LastErrorAtBytesRead = d.BytesRead\n\t\td.LastErrorRetryCount = 0\n\t\treturn true\n\t}\n\n\t//only retry an error at the same offset for 3 times\n\td.LastErrorRetryCount++\n\tif d.LastErrorRetryCount > maxRetryCount {\n\t\tlogg.Info(\"giving up on GET %s after %d read errors at the same offset (%d)\",\n\t\t\td.URI,\n\t\t\tmaxRetryCount,\n\t\t\td.LastErrorAtBytesRead,\n\t\t)\n\t\treturn false\n\t}\n\treturn true\n}",
"func (ut *RecoveryPayload) Validate() (err error) {\n\tif ut.Token == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"token\"))\n\t}\n\tif ut.Password == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"password\"))\n\t}\n\tif utf8.RuneCountInString(ut.Password) < 10 {\n\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`type.password`, ut.Password, utf8.RuneCountInString(ut.Password), 10, true))\n\t}\n\treturn\n}",
"func (c *controller) reconciliationRetryDurationExceeded(operationStartTime *metav1.Time) bool {\n\tif operationStartTime == nil || time.Now().Before(operationStartTime.Time.Add(c.reconciliationRetryDuration)) {\n\t\treturn false\n\t}\n\treturn true\n}",
"func validateOneReplicaPerNode(desc *roachpb.RangeDescriptor, chgsByNodeID changesByNodeID) error {\n\treplsByNodeID := make(map[roachpb.NodeID]int)\n\tfor _, repl := range desc.Replicas().Descriptors() {\n\t\treplsByNodeID[repl.NodeID]++\n\t}\n\n\tfor nodeID, chgs := range chgsByNodeID {\n\t\tif len(chgs) > 2 {\n\t\t\treturn errors.AssertionFailedf(\"more than 2 changes for the same node(%d): %+v\",\n\t\t\t\tnodeID, chgs)\n\t\t}\n\t\tswitch replsByNodeID[nodeID] {\n\t\tcase 0:\n\t\t\t// If there are no existing replicas on the node, a rebalance is not\n\t\t\t// possible and there must not be more than 1 change for it.\n\t\t\t//\n\t\t\t// NB: We don't care _what_ kind of change it is. If it's a removal, it\n\t\t\t// will be invalidated by `validateRemovals`.\n\t\t\tif len(chgs) > 1 {\n\t\t\t\treturn errors.AssertionFailedf(\"unexpected set of changes(%+v) for node %d, which has\"+\n\t\t\t\t\t\" no existing replicas for the range\", chgs, nodeID)\n\t\t\t}\n\t\tcase 1:\n\t\t\t// If the node has exactly one replica, then the only changes allowed on\n\t\t\t// the node are:\n\t\t\t// 1. An addition and a removal (constituting a rebalance within the node)\n\t\t\t// 2. Removal\n\t\t\tswitch n := len(chgs); n {\n\t\t\tcase 1:\n\t\t\t\t// Must be a removal unless the range only has a single replica. Ranges\n\t\t\t\t// with only one replica cannot be atomically rebalanced, and must go\n\t\t\t\t// through addition and then removal separately. See #40333.\n\t\t\t\tif !chgs[0].ChangeType.IsRemoval() && len(desc.Replicas().Descriptors()) > 1 {\n\t\t\t\t\treturn errors.AssertionFailedf(\"node %d already has a replica; only valid actions\"+\n\t\t\t\t\t\t\" are a removal or a rebalance(add/remove); got %+v\", nodeID, chgs)\n\t\t\t\t}\n\t\t\tcase 2:\n\t\t\t\t// Must be an addition then removal\n\t\t\t\tc1, c2 := chgs[0], chgs[1]\n\t\t\t\tif !(c1.ChangeType.IsAddition() && c2.ChangeType.IsRemoval()) {\n\t\t\t\t\treturn errors.AssertionFailedf(\"node %d already has a replica; only valid actions\"+\n\t\t\t\t\t\t\" are a removal or a rebalance(add/remove); got %+v\", nodeID, chgs)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tpanic(fmt.Sprintf(\"unexpected number of changes for node %d: %+v\", nodeID, chgs))\n\t\t\t}\n\t\tcase 2:\n\t\t\t// If there are 2 replicas on any given node, a removal is the only legal\n\t\t\t// thing to do.\n\t\t\tif !(len(chgs) == 1 && chgs[0].ChangeType.IsRemoval()) {\n\t\t\t\treturn errors.AssertionFailedf(\"node %d has 2 replicas, expected exactly one of them\"+\n\t\t\t\t\t\" to be removed; got %+v\", nodeID, chgs)\n\t\t\t}\n\t\tdefault:\n\t\t\treturn errors.AssertionFailedf(\"node %d unexpectedly has more than 2 replicas: %s\",\n\t\t\t\tnodeID, desc.Replicas().Descriptors())\n\t\t}\n\t}\n\treturn nil\n}",
"func (rb *QueryDelayRetryBehavior) CanRetry(retries uint) bool {\n\treturn retries < rb.maxRetries\n}",
"func (c *Connection) checkForRetry(err error) bool {\n\treturn err == io.EOF\n}",
"func validateController(ctx context.Context, c clientset.Interface, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) {\n\tcontainerImage = trimDockerRegistry(containerImage)\n\tgetPodsTemplate := \"--template={{range.items}}{{.metadata.name}} {{end}}\"\n\n\tgetContainerStateTemplate := fmt.Sprintf(`--template={{if (exists . \"status\" \"containerStatuses\")}}{{range .status.containerStatuses}}{{if (and (eq .name \"%s\") (exists . \"state\" \"running\"))}}true{{end}}{{end}}{{end}}`, containername)\n\n\tgetImageTemplate := fmt.Sprintf(`--template={{if (exists . \"spec\" \"containers\")}}{{range .spec.containers}}{{if eq .name \"%s\"}}{{.image}}{{end}}{{end}}{{end}}`, containername)\n\n\tginkgo.By(fmt.Sprintf(\"waiting for all containers in %s pods to come up.\", testname)) //testname should be selector\nwaitLoop:\n\tfor start := time.Now(); time.Since(start) < framework.PodStartTimeout && ctx.Err() == nil; time.Sleep(5 * time.Second) {\n\t\tgetPodsOutput := e2ekubectl.RunKubectlOrDie(ns, \"get\", \"pods\", \"-o\", \"template\", getPodsTemplate, \"-l\", testname)\n\t\tpods := strings.Fields(getPodsOutput)\n\t\tif numPods := len(pods); numPods != replicas {\n\t\t\tginkgo.By(fmt.Sprintf(\"Replicas for %s: expected=%d actual=%d\", testname, replicas, numPods))\n\t\t\tcontinue\n\t\t}\n\t\tvar runningPods []string\n\t\tfor _, podID := range pods {\n\t\t\trunning := e2ekubectl.RunKubectlOrDie(ns, \"get\", \"pods\", podID, \"-o\", \"template\", getContainerStateTemplate)\n\t\t\tif running != \"true\" {\n\t\t\t\tframework.Logf(\"%s is created but not running\", podID)\n\t\t\t\tcontinue waitLoop\n\t\t\t}\n\n\t\t\tcurrentImage := e2ekubectl.RunKubectlOrDie(ns, \"get\", \"pods\", podID, \"-o\", \"template\", getImageTemplate)\n\t\t\tcurrentImage = trimDockerRegistry(currentImage)\n\t\t\tif currentImage != containerImage {\n\t\t\t\tframework.Logf(\"%s is created but running wrong image; expected: %s, actual: %s\", podID, containerImage, currentImage)\n\t\t\t\tcontinue waitLoop\n\t\t\t}\n\n\t\t\t// Call the generic validator function here.\n\t\t\t// This might validate for example, that (1) getting a url works and (2) url is serving correct content.\n\t\t\tif err := validator(ctx, c, podID); err != nil {\n\t\t\t\tframework.Logf(\"%s is running right image but validator function failed: %v\", podID, err)\n\t\t\t\tcontinue waitLoop\n\t\t\t}\n\n\t\t\tframework.Logf(\"%s is verified up and running\", podID)\n\t\t\trunningPods = append(runningPods, podID)\n\t\t}\n\t\t// If we reach here, then all our checks passed.\n\t\tif len(runningPods) == replicas {\n\t\t\treturn\n\t\t}\n\t}\n\t// Reaching here means that one of more checks failed multiple times. Assuming its not a race condition, something is broken.\n\tframework.Failf(\"Timed out after %v seconds waiting for %s pods to reach valid state\", framework.PodStartTimeout.Seconds(), testname)\n}",
"func (v *FakeValidator) Validate(clients map[string]kubeclient.Interface, ing *v1beta1.Ingress) error {\n\tif err := v.serverVersionsNewEnough(clients); err != nil {\n\t\treturn err\n\t}\n\t// TODO: Might be useful to also call serviceNodePortsSame.\n\tif !v.validationSucceeds {\n\t\tv.ValidationError = fmt.Errorf(\"test-validation-err\")\n\t\treturn v.ValidationError\n\t}\n\treturn nil\n}",
"func (m *RebootInstanceRequest) Validate() error {\n\treturn m.validate(false)\n}",
"func (m *Message) Retry() {\n\tm.Trial++\n}",
"func (c *modAccountRun) validate(args []string) error {\n\tif len(args) < 2 {\n\t\treturn errors.New(\"not enough arguments\")\n\t}\n\n\tif len(args) > 2 {\n\t\treturn errors.New(\"too many arguments\")\n\t}\n\n\treturn nil\n}",
"func (c Config) Validate() error {\n\tif len(c.ClientID) == 0 {\n\t\treturn fmt.Errorf(\"error: ClientID missing\")\n\t}\n\n\tif c.VodID < 1 {\n\t\treturn fmt.Errorf(\"error: VodID missing\")\n\t}\n\n\ttimePattern := `\\d+ \\d+ \\d+`\n\ttimeRegex := regexp.MustCompile(timePattern)\n\tif c.StartTime != \"start\" && !timeRegex.MatchString(c.StartTime) {\n\t\treturn fmt.Errorf(\"error: StartTime must be 'start' or in format '%s'; got '%s'\", timePattern, c.StartTime)\n\t}\n\tif c.EndTime == \"\" && c.Length == \"\" {\n\t\treturn errors.New(\"error: must specify either EndTime or Length\")\n\t}\n\tif c.Length == \"\" && c.EndTime != \"end\" && !timeRegex.MatchString(c.EndTime) {\n\t\treturn fmt.Errorf(\"error: EndTime must be 'end' or in format '%s'; got '%s'\", timePattern, c.EndTime)\n\t}\n\tif c.EndTime == \"\" && c.Length != \"full\" && !timeRegex.MatchString(c.Length) {\n\t\treturn fmt.Errorf(\"error: Length must be 'full' or in format '%s'; got '%s'\", timePattern, c.Length)\n\t}\n\n\tqualityPattern := `\\d{3,4}p[36]0`\n\tqualityRegex := regexp.MustCompile(qualityPattern)\n\tif c.Quality != \"best\" && c.Quality != \"chunked\" && !qualityRegex.MatchString(c.Quality) {\n\t\treturn fmt.Errorf(\"error: Quality must be 'best', 'chunked', or in format '%s'; got '%s'\", qualityPattern, c.Quality)\n\t}\n\n\tif c.FilePrefix != \"\" && !isValidFilename(c.FilePrefix) {\n\t\treturn fmt.Errorf(\"error: FilePrefix contains invalid characters; got '%s'\", c.FilePrefix)\n\t}\n\n\tif c.Workers < 1 {\n\t\treturn fmt.Errorf(\"error: Worker must be an integer greater than 0; got '%d'\", c.Workers)\n\t}\n\n\treturn nil\n}",
"func Retry(interval time.Duration, maxRetries int, f ConditionFunc) error {\n\tif maxRetries <= 0 {\n\t\treturn fmt.Errorf(\"maxRetries (%d) should be > 0\", maxRetries)\n\t}\n\ttick := time.NewTicker(interval)\n\tdefer tick.Stop()\n\n\tfor i := 0; ; i++ {\n\t\tok, err := f()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif ok {\n\t\t\treturn nil\n\t\t}\n\t\tif i+1 == maxRetries {\n\t\t\tbreak\n\t\t}\n\t\t<-tick.C\n\t}\n\treturn &RetryError{maxRetries}\n}",
"func (ut *recoveryPayload) Validate() (err error) {\n\tif ut.Token == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`request`, \"token\"))\n\t}\n\tif ut.Password == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`request`, \"password\"))\n\t}\n\tif ut.Password != nil {\n\t\tif utf8.RuneCountInString(*ut.Password) < 10 {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`request.password`, *ut.Password, utf8.RuneCountInString(*ut.Password), 10, true))\n\t\t}\n\t}\n\treturn\n}",
"func retryNTimes(targetFunc retryableFunc, numOfRetries int, delay time.Duration) error {\n\tretryNo := 0\n\tvar err error\n\tvar continueRetrying bool\n\tfor retryNo <= numOfRetries {\n\t\tcontinueRetrying, err = targetFunc()\n\t\tif !continueRetrying {\n\t\t\treturn err\n\t\t}\n\t\t//delay between retries.\n\t\tretryNo++\n\t\ttime.Sleep(delay * time.Duration(retryNo))\n\t}\n\tif err != nil {\n\t\treqErr, ok := err.(RequestError)\n\t\tif ok {\n\t\t\tif reqErr.Description == \"\" {\n\t\t\t\treqErr.Description = \"no error message received from server\"\n\t\t\t}\n\t\t\treqErr.Description = fmt.Sprintf(\"Maximum number of re-tries has been exhausted with error: %s\", reqErr.Description)\n\t\t\treturn reqErr\n\t\t}\n\t\treturn fmt.Errorf(\"maximum number of tries has been exhausted with error: %v\", err)\n\t}\n\treturn errors.New(\"maximum number of tries has been exhausted\")\n}",
"func (sbr *SnapshotRestoreArgs) Validate() bool {\n\tif sbr.SnapIdentifier == \"\" || sbr.ID == \"\" || sbr.PStore == nil || !sbr.PStore.Validate() || sbr.SourceSnapshot == \"\" || sbr.DestFile == \"\" || sbr.ProtectionDomainID == \"\" {\n\t\treturn false\n\t}\n\treturn true\n}",
"func (gc *GarbageCollector) ShouldRetry() bool {\n\treturn false\n}",
"func (gc *GarbageCollector) ShouldRetry() bool {\n\treturn false\n}",
"func (p *PasswordService) Validate(password domain.PasswordInterface) bool {\n\t/*\n\t\tFor performance purpouses only, we gonna work with channels;\n\t\t\"Channels are a typed conduit through which you can send and receive values with the channel operator,<-.\" - https://tour.golang.org/concurrency/2\n\n\t\tWith channels, we can separate each validation in one go routine, making a multithreading validation, and share or read some data between routines.\n\n\t\tTo read all validation results, we need to use a loop with range. But while the channel are open, the loop wont finish, generating a infinite loop.\n\n\t\tTo Prevent this, we use a Semaphore Pattern in Go.\n\n\t\tThe semaphore is used for routines to notify when they complete.\n\n\t\tAnd with this, we can closes the Validation Channel with value of validations, and auto closes the channel of semaphores too.\n\t*/\n\n\t// Channels\n\tvalidationChan := make(chan bool)\n\tfinishedChan := make(chan bool)\n\n\t// Routines\n\tgo func() {\n\t\tvalidators.DigitValidator(password.GetValue(), validationChan)\n\t\tfinishedChan <- true\n\t}()\n\tgo func() {\n\t\tvalidators.DuplicatesValidator(password.GetValue(), validationChan)\n\t\tfinishedChan <- true\n\t}()\n\tgo func() {\n\t\tvalidators.LowerValidator(password.GetValue(), validationChan)\n\t\tfinishedChan <- true\n\t}()\n\tgo func() {\n\t\tvalidators.SpecialCharsValidator(password.GetValue(), validationChan)\n\t\tfinishedChan <- true\n\t}()\n\tgo func() {\n\t\tvalidators.UpperValidator(password.GetValue(), validationChan)\n\t\tfinishedChan <- true\n\t}()\n\tgo func() {\n\t\tvalidators.WhiteSpacesValidator(password.GetValue(), validationChan)\n\t\tfinishedChan <- true\n\t}()\n\tgo func() {\n\t\tvalidators.MinLenghtValidator(password.GetValue(), validationChan)\n\t\tfinishedChan <- true\n\t}()\n\n\t// Semaphore\n\tgo func() {\n\t\t<-finishedChan\n\t\t<-finishedChan\n\t\t<-finishedChan\n\t\t<-finishedChan\n\t\t<-finishedChan\n\t\t<-finishedChan\n\t\t<-finishedChan\n\t\tclose(validationChan)\n\t\tclose(finishedChan)\n\t}()\n\t// If some routine return false, we immediatly return false.\n\tfor isValid := range validationChan {\n\t\tif !isValid {\n\t\t\treturn false\n\t\t}\n\t}\n\t// If all routines return true, the password is valid and we return true.\n\treturn true\n}",
"func shouldRetry(c context.Context, a *model.Attempt, stat dm.AbnormalFinish_Status) (retry bool, err error) {\n\tif !stat.CouldRetry() {\n\t\treturn\n\t}\n\tq := model.QuestFromID(a.ID.Quest)\n\n\tif err = ds.Get(ds.WithoutTransaction(c), q); err != nil {\n\t\treturn\n\t}\n\tvar cur, max uint32\n\tswitch stat {\n\tcase dm.AbnormalFinish_FAILED:\n\t\tcur, max = a.RetryState.Failed, q.Desc.Meta.Retry.Failed\n\t\ta.RetryState.Failed++\n\tcase dm.AbnormalFinish_CRASHED:\n\t\tcur, max = a.RetryState.Crashed, q.Desc.Meta.Retry.Crashed\n\t\ta.RetryState.Crashed++\n\tcase dm.AbnormalFinish_EXPIRED:\n\t\tcur, max = a.RetryState.Expired, q.Desc.Meta.Retry.Expired\n\t\ta.RetryState.Expired++\n\tcase dm.AbnormalFinish_TIMED_OUT:\n\t\tcur, max = a.RetryState.TimedOut, q.Desc.Meta.Retry.TimedOut\n\t\ta.RetryState.TimedOut++\n\tdefault:\n\t\tpanic(fmt.Errorf(\"do not know how to retry %q\", stat))\n\t}\n\tretry = cur < max\n\treturn\n}",
"func (c *Client) ValidateApplicationClone(name, namespace string, timeout, retryInterval time.Duration) error {\n\tif err := c.initClient(); err != nil {\n\t\treturn err\n\t}\n\tt := func() (interface{}, bool, error) {\n\t\tapplicationclone, err := c.stork.StorkV1alpha1().ApplicationClones(namespace).Get(context.TODO(), name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn \"\", true, err\n\t\t}\n\n\t\tif applicationclone.Status.Status == storkv1alpha1.ApplicationCloneStatusSuccessful {\n\t\t\treturn \"\", false, nil\n\t\t}\n\t\treturn \"\", true, &errors.ErrFailedToValidateCustomSpec{\n\t\t\tName: applicationclone.Name,\n\t\t\tCause: fmt.Sprintf(\"Application Clone failed . Error: %v .Expected status: %v Actual status: %v\", err, storkv1alpha1.ApplicationCloneStatusSuccessful, applicationclone.Status.Status),\n\t\t\tType: applicationclone,\n\t\t}\n\t}\n\tif _, err := task.DoRetryWithTimeout(t, timeout, retryInterval); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func TestRetry(t *testing.T) {\n\ttestCases := []struct {\n\t\tname string\n\t\tcommand string\n\t\tattempts int\n\t\texpectedRetry int\n\t\texpectError bool\n\t}{\n\t\t{\n\t\t\tname: \"fail_with_retry1\",\n\t\t\tcommand: \"abcd\", // This is an invalid command, should result in exit code 127.\n\t\t\tattempts: 5,\n\t\t\texpectedRetry: 4,\n\t\t\texpectError: true,\n\t\t}, {\n\t\t\tname: \"fail_with_retry2\",\n\t\t\tcommand: \"efgh\", // This is an invalid command, should result in exit code 127.\n\t\t\tattempts: 10,\n\t\t\texpectedRetry: 9,\n\t\t\texpectError: true,\n\t\t}, {\n\t\t\tname: \"fail_no_retry\",\n\t\t\tcommand: \"false\", // This will produce exit code 1\n\t\t\tattempts: 10,\n\t\t\texpectedRetry: 0,\n\t\t\texpectError: true,\n\t\t}, {\n\t\t\tname: \"pass\",\n\t\t\tcommand: \"ls\", // This will produce exit code 0\n\t\t\tattempts: 50,\n\t\t\texpectedRetry: 0,\n\t\t\texpectError: false,\n\t\t},\n\t}\n\tdefer log.SetOutput(os.Stderr)\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tcmd := exec.Command(tc.command)\n\t\t\tvar buf bytes.Buffer\n\t\t\tlog.SetOutput(&buf)\n\t\t\terr := launchEmuWithRetry(tc.attempts, cmd)\n\t\t\tlog.SetOutput(os.Stderr)\n\t\t\tif tc.expectError && err != nil {\n\t\t\t\tout := strings.Split(buf.String(), \"\\n\")\n\t\t\t\tretry_cnt := 0\n\t\t\t\tfor _, line := range out {\n\t\t\t\t\tif strings.Contains(line, \"Retry launching emulator\") {\n\t\t\t\t\t\tretry_cnt++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif retry_cnt != tc.expectedRetry {\n\t\t\t\t\tt.Errorf(\"incorrect number of retries expect %d retries, got %d\", tc.expectedRetry, retry_cnt)\n\t\t\t\t}\n\t\t\t} else if tc.expectError && err == nil {\n\t\t\t\tt.Error(\"expect error to be thrown, got nil.\")\n\t\t\t} else if !tc.expectError && err != nil {\n\t\t\t\tt.Errorf(\"did not expect error to be thrown, got error %s.\", err)\n\t\t\t}\n\t\t})\n\t}\n}",
"func Retry(attempts int, sleep time.Duration, fn func() bool) bool {\n\tif err := fn(); err != true {\n\n\t\tif attempts--; attempts > 0 {\n\t\t\tlog.Warnf(\"retry func error: %s. attemps #%d after %s.\", \"failed\", attempts, sleep)\n\t\t\ttime.Sleep(60 * time.Second)\n\t\t\treturn Retry(attempts, 2*sleep, fn)\n\t\t}\n\n\t\treturn err\n\t}\n\t//IsConnected = true\n\treturn true\n}",
"func (m *ServiceInternalConnection) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateMaster(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateReplica(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func shouldRetry(status int, err error) bool {\n\t// Retry for 5xx response codes.\n\tif 500 <= status && status < 600 {\n\t\treturn true\n\t}\n\n\t// Retry on statusTooManyRequests{\n\tif status == statusTooManyRequests {\n\t\treturn true\n\t}\n\n\t// Retry on unexpected EOFs and temporary network errors.\n\tif err == io.ErrUnexpectedEOF {\n\t\treturn true\n\t}\n\tif err, ok := err.(net.Error); ok {\n\t\treturn err.Temporary()\n\t}\n\n\treturn false\n}",
"func (r Restore) Validate() error {\n\treturn validateRestore(&r).ToAggregate()\n}",
"func (e *storageExecutor) validation() error {\n\t// check input shardIDs if empty\n\tif len(e.ctx.shardIDs) == 0 {\n\t\treturn errNoShardID\n\t}\n\tnumOfShards := e.database.NumOfShards()\n\t// check engine has shard\n\tif numOfShards == 0 {\n\t\treturn errNoShardInDatabase\n\t}\n\n\treturn nil\n}",
"func validateReplicationChanges(\n\tdesc *roachpb.RangeDescriptor, chgs roachpb.ReplicationChanges,\n) error {\n\tchgsByStoreID := getChangesByStoreID(chgs)\n\tchgsByNodeID := getChangesByNodeID(chgs)\n\n\tif err := validateAdditionsPerStore(desc, chgsByStoreID); err != nil {\n\t\treturn err\n\t}\n\tif err := validateRemovals(desc, chgsByStoreID); err != nil {\n\t\treturn err\n\t}\n\tif err := validatePromotionsAndDemotions(desc, chgsByStoreID); err != nil {\n\t\treturn err\n\t}\n\tif err := validateOneReplicaPerNode(desc, chgsByNodeID); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func main() {\n\tmaxTrial := uint8(1 << 5)\n\terr := r.Retry(maxTrial, job)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n}",
"func validateGenerateEutranVectorInputs(key []byte, opc []byte, sqn uint64, plmn []byte) error {\n\tif err := validateGenerateSIPAuthVectorInputs(key, opc, sqn); err != nil {\n\t\treturn err\n\t}\n\tif len(plmn) != ExpectedPlmnBytes {\n\t\treturn fmt.Errorf(\"incorrect plmn size. Expected 3 bytes, but got %v bytes\", len(plmn))\n\t}\n\treturn nil\n}",
"func IsReplicaRebuildingFailed(reusableFailedReplica *longhorn.Replica) bool {\n\treplicaRebuildFailedCondition := types.GetCondition(reusableFailedReplica.Status.Conditions, longhorn.ReplicaConditionTypeRebuildFailed)\n\n\tif replicaRebuildFailedCondition.Status != longhorn.ConditionStatusTrue {\n\t\treturn true\n\t}\n\n\tswitch replicaRebuildFailedCondition.Reason {\n\tcase longhorn.ReplicaConditionReasonRebuildFailedDisconnection, longhorn.NodeConditionReasonManagerPodDown, longhorn.NodeConditionReasonKubernetesNodeGone, longhorn.NodeConditionReasonKubernetesNodeNotReady:\n\t\treturn false\n\tdefault:\n\t\treturn true\n\t}\n}",
"func (c *config) validate() []error {\n\tvalidationErrors := make([]error, 0)\n\tif c.MinPort <= 0 || c.MaxPort <= 0 {\n\t\tvalidationErrors = append(validationErrors, errors.New(\"min Port and Max Port values are required\"))\n\t}\n\tif c.MaxPort < c.MinPort {\n\t\tvalidationErrors = append(validationErrors, errors.New(\"max Port cannot be set less that the Min Port\"))\n\t}\n\tresourceErrors := validateResource(c.SidecarCPURequest, c.SidecarCPULimit, corev1.ResourceCPU)\n\tvalidationErrors = append(validationErrors, resourceErrors...)\n\tresourceErrors = validateResource(c.SidecarMemoryRequest, c.SidecarMemoryLimit, corev1.ResourceMemory)\n\tvalidationErrors = append(validationErrors, resourceErrors...)\n\treturn validationErrors\n}",
"func (s ScheduledJob) validate() error {\n\tvar err error\n\tif err = s.ScheduledJobConfig.validate(); err != nil {\n\t\treturn err\n\t}\n\tif err = s.Workload.validate(); err != nil {\n\t\treturn err\n\t}\n\tif err = validateContainerDeps(validateDependenciesOpts{\n\t\tsidecarConfig: s.Sidecars,\n\t\timageConfig: s.ImageConfig.Image,\n\t\tmainContainerName: aws.StringValue(s.Name),\n\t\tlogging: s.Logging,\n\t}); err != nil {\n\t\treturn fmt.Errorf(\"validate container dependencies: %w\", err)\n\t}\n\tif err = validateExposedPorts(validateExposedPortsOpts{\n\t\tsidecarConfig: s.Sidecars,\n\t}); err != nil {\n\t\treturn fmt.Errorf(\"validate unique exposed ports: %w\", err)\n\t}\n\treturn nil\n}",
"func shouldRetry(ctx context.Context, err error) (bool, error) {\n\tif fserrors.ContextError(ctx, &err) {\n\t\treturn false, err\n\t}\n\tif err == nil {\n\t\treturn false, err\n\t}\n\terrString := err.Error()\n\t// First check for specific errors\n\tif strings.Contains(errString, \"insufficient_space\") {\n\t\treturn false, fserrors.FatalError(err)\n\t} else if strings.Contains(errString, \"malformed_path\") {\n\t\treturn false, fserrors.NoRetryError(err)\n\t}\n\t// Then handle any official Retry-After header from Dropbox's SDK\n\tswitch e := err.(type) {\n\tcase auth.RateLimitAPIError:\n\t\tif e.RateLimitError.RetryAfter > 0 {\n\t\t\tfs.Logf(errString, \"Too many requests or write operations. Trying again in %d seconds.\", e.RateLimitError.RetryAfter)\n\t\t\terr = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)\n\t\t}\n\t\treturn true, err\n\t}\n\t// Keep old behavior for backward compatibility\n\tif strings.Contains(errString, \"too_many_write_operations\") || strings.Contains(errString, \"too_many_requests\") || errString == \"\" {\n\t\treturn true, err\n\t}\n\treturn fserrors.ShouldRetry(err), err\n}",
"func TestRetryDoInteralFunc(t *testing.T) {\n\texec := func() (error, interface{}){\n\t\tr := number.Random(0, 100000000)\n\t\tfmt.Println(\"r is\", r)\n\t\tif r < 100000000 / 2{\n\t\t\treturn errors.New(\"xx\"), nil\n\t\t}\n\t\treturn nil, map[string]string{\"abc\":\"wocao\"}\n\t}\n\terr, res, count := RetryDoInteralTime(exec, 5, 100)\n\tfmt.Printf(\"TestRetryDoInteralFunc error is %s res is %v count is %d\", err, res, count)\n}",
"func (*FlowHandle) IsRetryableErr(error) bool {\n\t// TODO: check whether the error is retryable.\n\treturn false\n}",
"func (client *BaseClient) Retry() int {\n\treturn client.retry\n}",
"func (b *Builder) Validate() error {\n\tstepCnt := len(b.steps)\n\tif stepCnt < 2 {\n\t\treturn errStepCnt\n\t}\n\n\t// loop through and validate steps\n\tvar (\n\t\tinput reflect.Type\n\t\toutput reflect.Type\n\t\tprevOutput reflect.Type\n\t\tst stepType\n\t)\n\tfor i := range b.steps {\n\t\tst = typeOfStep(b.steps[i].step)\n\t\t// some initial validation\n\t\tif st == invalid {\n\t\t\treturn errStepType\n\t\t} else if i == 0 && st == onlyIn {\n\t\t\treturn errFirstStep\n\t\t} else if i == stepCnt-1 && st == onlyOut {\n\t\t\treturn errLastStep\n\t\t} else if 0 < i && i < stepCnt-1 && st != inOut {\n\t\t\treturn errInteriorStep\n\t\t}\n\n\t\t// set the stepRunner's type\n\t\tb.steps[i].sType = st\n\n\t\t// set variables for input/output types\n\t\tswitch st {\n\t\tcase onlyOut:\n\t\t\toutput = reflect.TypeOf(b.steps[i].step).Out(0)\n\t\tcase inOut:\n\t\t\tinput = reflect.TypeOf(b.steps[i].step).In(1)\n\t\t\toutput = reflect.TypeOf(b.steps[i].step).Out(0)\n\t\tcase onlyIn:\n\t\t\tinput = reflect.TypeOf(b.steps[i].step).In(1)\n\t\t}\n\n\t\tif i == 0 {\n\t\t\tprevOutput = output\n\t\t\tcontinue\n\t\t}\n\n\t\t// make sure types align\n\t\tif input.Kind() == reflect.Interface {\n\t\t\tif !prevOutput.Implements(input) {\n\t\t\t\treturn fmt.Errorf(typeMismatchFmt, i+1, prevOutput, input)\n\t\t\t}\n\t\t} else if prevOutput != input {\n\t\t\treturn fmt.Errorf(typeMismatchFmt, i+1, prevOutput, input)\n\t\t}\n\t\tprevOutput = output\n\t}\n\n\t// validate input channel\n\tif b.inCh != nil {\n\t\terr := validateInputChannel(b.inCh, b.steps[0])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// validate input channel\n\tif b.outCh != nil {\n\t\terr := validateOutputChannel(b.outCh, b.steps[len(b.steps)-1])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func batchValidate(ds InfluxEntries, n int, t *testing.T) {\n\tif len(ds) != n {\n\t\tt.Errorf(\"Expected %d entries; found %d\", n, len(ds))\n\t}\n\tfor i, d := range ds {\n\t\tif len(d.Columns) != len(d.Points[0]) {\n\t\t\tt.Errorf(\"Entry %d: There are %d columns and %d points\", i, len(d.Columns), len(d.Points[0]))\n\t\t}\n\t\tif len(d.Points) > 1 {\n\t\t\tt.Errorf(\"Entry %d: There are %d points arrays; there should only be 1\", i, len(d.Points))\n\t\t}\n\t}\n}",
"func (dn *DNode) replicateFailedRequest(sb *syncBufferState) error {\n\tswitch sb.clusterType {\n\tcase meta.ClusterTypeTstore:\n\t\treturn dn.replicateFailedPoints(sb)\n\tcase meta.ClusterTypeKstore:\n\t\treturn dn.replicateFailedWrite(sb)\n\tdefault:\n\t\tdn.logger.Fatalf(\"unknown cluster type: %+v in sync buffer\", sb)\n\t}\n\treturn nil\n}",
"func RetryOnErr(\n\ti time.Duration,\n\tn int,\n\tfn func() (interface{}, error),\n) (r interface{}, err error) {\n\ttry := 1\n\tfor range time.Tick(i) {\n\t\tif try > n {\n\t\t\tbreak\n\t\t}\n\n\t\tif r, err = fn(); err == nil {\n\t\t\tbreak\n\t\t}\n\n\t\ttry++\n\t}\n\treturn r, err\n}",
"func (s stdlib) RetryInterval(time.Duration) {}",
"func Retry(f Func, opts ...Option) error {\n\tif f == nil {\n\t\tpanic(\"f is nil\")\n\t}\n\n\tmaxTries := 3\n\timplies := func(error) bool { return true }\n\n\tfor _, opt := range opts {\n\t\tswitch v := opt.(type) {\n\t\tcase MaxTries:\n\t\t\tif v > 1 {\n\t\t\t\tmaxTries = int(v)\n\t\t\t} else if v >= 0 {\n\t\t\t\tmaxTries = 1\n\t\t\t}\n\t\tcase If:\n\t\t\tif v != nil {\n\t\t\t\timplies = v\n\t\t\t}\n\t\t}\n\t}\n\n\tvar err error\n\n\tfor try := 0; try < maxTries; try++ {\n\t\tif err = f(); err == nil || !implies(err) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn err\n}",
"func (o *Virtualserver) validate(dbRecord *common.DbRecord) (ok bool, err error) {\n\t////////////////////////////////////////////////////////////////////////////\n\t// Marshal data interface.\n\t////////////////////////////////////////////////////////////////////////////\n\tvar data virtualserver.Data\n\terr = shared.MarshalInterface(dbRecord.Data, &data)\n\tif err != nil {\n\t\treturn\n\t}\n\t////////////////////////////////////////////////////////////////////////////\n\t// Test required fields.\n\t////////////////////////////////////////////////////////////////////////////\n\tok = true\n\trequired := make(map[string]bool)\n\trequired[\"ProductCode\"] = false\n\trequired[\"IP\"] = false\n\trequired[\"Port\"] = false\n\trequired[\"LoadBalancerIP\"] = false\n\trequired[\"Name\"] = false\n\t////////////////////////////////////////////////////////////////////////////\n\tif data.ProductCode != 0 {\n\t\trequired[\"ProductCode\"] = true\n\t}\n\tif len(dbRecord.LoadBalancerIP) > 0 {\n\t\trequired[\"LoadBalancerIP\"] = true\n\t}\n\tif len(data.Ports) != 0 {\n\t\trequired[\"Port\"] = true\n\t}\n\tif data.IP != \"\" {\n\t\trequired[\"IP\"] = true\n\t}\n\tif data.Name != \"\" {\n\t\trequired[\"Name\"] = true\n\t}\n\tfor _, val := range required {\n\t\tif val == false {\n\t\t\tok = false\n\t\t}\n\t}\n\tif !ok {\n\t\terr = fmt.Errorf(\"missing required fields - %+v\", required)\n\t}\n\treturn\n}",
"func (h CreateEscrowHandler) validate(ctx weave.Context, db weave.KVStore, tx weave.Tx) (*CreateMsg, error) {\n\tvar msg CreateMsg\n\tif err := weave.LoadMsg(tx, &msg); err != nil {\n\t\treturn nil, errors.Wrap(err, \"load msg\")\n\t}\n\tif weave.IsExpired(ctx, msg.Timeout) {\n\t\treturn nil, errors.Wrap(errors.ErrInput, \"timeout in the past\")\n\t}\n\tif !h.auth.HasAddress(ctx, msg.Source) {\n\t\treturn nil, errors.ErrUnauthorized\n\t}\n\treturn &msg, nil\n}",
"func shouldRetry(try int, resp *http.Response) bool {\n\tif try > maxTries {\n\t\treturn false\n\t}\n\n\treturn resp.StatusCode >= 500 && resp.StatusCode < 600\n}",
"func (m *Upstream_Timeout) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\tif m.GetConnect() <= 0 {\n\t\treturn Upstream_TimeoutValidationError{\n\t\t\tfield: \"Connect\",\n\t\t\treason: \"value must be greater than 0\",\n\t\t}\n\t}\n\n\tif m.GetSend() <= 0 {\n\t\treturn Upstream_TimeoutValidationError{\n\t\t\tfield: \"Send\",\n\t\t\treason: \"value must be greater than 0\",\n\t\t}\n\t}\n\n\tif m.GetRead() <= 0 {\n\t\treturn Upstream_TimeoutValidationError{\n\t\t\tfield: \"Read\",\n\t\t\treason: \"value must be greater than 0\",\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (m *RebootInstanceResponse) Validate() error {\n\treturn m.validate(false)\n}",
"func Retry(main func() error, retries int, afterTryFailure func(error) error, beforeRetry func() error) error {\r\n\tvar mainErr error\r\n\r\n\tif main == nil {\r\n\t\treturn fmt.Errorf(\"the main function to try can't be nil\")\r\n\t}\r\n\r\n\tfor i := 0; i <= retries; i++ {\r\n\t\tif i != 0 && beforeRetry != nil {\r\n\t\t\terr := beforeRetry()\r\n\t\t\tif err != nil {\r\n\t\t\t\treturn fmt.Errorf(\"retry before function: %s\", err)\r\n\t\t\t}\r\n\t\t}\r\n\r\n\t\tmainErr = main()\r\n\t\tif mainErr == nil {\r\n\t\t\tbreak\r\n\t\t} else if _, ok := mainErr.(*NoFail); ok {\r\n\t\t\ti--\r\n\t\t\tmainErr = nil\r\n\t\t\tcontinue\r\n\t\t} else if re, ok := mainErr.(*PermFail); ok {\r\n\t\t\tmainErr = re.Err\r\n\t\t\tbreak\r\n\t\t}\r\n\r\n\t\tif afterTryFailure != nil {\r\n\t\t\terr := afterTryFailure(mainErr)\r\n\t\t\tif err != nil {\r\n\t\t\t\treturn fmt.Errorf(\"retry after function: %s\", err)\r\n\t\t\t}\r\n\t\t}\r\n\t\tif i == retries {\r\n\t\t\tbreak\r\n\t\t}\r\n\t\tn, err := RandInt(-500, 1000)\r\n\t\tif err != nil {\r\n\t\t\treturn fmt.Errorf(\"retry rand: %s\", err)\r\n\t\t}\r\n\t\twait := Min(15000, i*750) + n\r\n\t\ttime.Sleep(time.Duration(wait) * time.Millisecond)\r\n\t}\r\n\r\n\treturn mainErr\r\n}",
"func (m DNSServers1) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tiDNSServers1Size := int64(len(m))\n\n\tif err := validate.MinItems(\"\", \"body\", iDNSServers1Size, 0); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func isRetry(err error) bool {\n\ttarget := &tpm2.Warning{Code: tpm2.RCRetry}\n\tif errors.As(err, target) && target.Code == tpm2.RCRetry {\n\t\treturn true\n\t}\n\treturn false\n}",
"func (m *HealthCheck_RedisHealthCheck) Validate() error {\n\treturn m.validate(false)\n}",
"func (sv *StubbedValidator) ExpectSuccessValidateRestart() {\n\tsv.expectRevalidate = true\n\tsv.StubSuccessValidateRestart()\n}",
"func Retry(wins int, loss int, tie int, gameType int) {\r\n\r\n\tagain := \"\" //user input variable for handling response to query to Retry\r\n\r\n\tfmt.Printf(\"Try again?[y/n]\\n\")\r\n\tfmt.Scanln(&again)\r\n\tif again == \"Y\" || again == \"y\" {\r\n\t\tif gameType == 1 {\r\n\t\t\tSssWeapon(wins, loss, tie)\r\n\t\t}\r\n\t\tif gameType == 2 {\r\n\t\t\tWhatNum(playerName)\r\n\t\t}\r\n\t}\r\n\tif again == \"N\" || again == \"n\" {\r\n\t\tDriver()\r\n\t} else {\r\n\t\tfmt.Printf(\"I'm sorry I don't understand ('Y','y'/'N','n') please \")\r\n\t\tRetry(wins, loss, tie, gameType)\r\n\t}\r\n}",
"func (s SecretForDockerRegistryGeneratorV1) validate() error {\n\tif len(s.Name) == 0 {\n\t\treturn fmt.Errorf(\"name must be specified\")\n\t}\n\tif len(s.Username) == 0 {\n\t\treturn fmt.Errorf(\"username must be specified\")\n\t}\n\tif len(s.Password) == 0 {\n\t\treturn fmt.Errorf(\"password must be specified\")\n\t}\n\tif len(s.Server) == 0 {\n\t\treturn fmt.Errorf(\"server must be specified\")\n\t}\n\treturn nil\n}",
"func (c config) validate() error {\n\tif c.MinPort <= 0 || c.MaxPort <= 0 {\n\t\treturn errors.New(\"min Port and Max Port values are required\")\n\t}\n\tif c.MaxPort < c.MinPort {\n\t\treturn errors.New(\"max Port cannot be set less that the Min Port\")\n\t}\n\treturn nil\n}",
"func (ds *DomainSetting) Validate() bool {\n\treturn (ds.Timeout >= 5 && ds.Timeout <= 20) &&\n\t\t(ds.Retry >= 1 && ds.Retry <= 5) &&\n\t\t(ds.Interval >= 60 && ds.Interval <= 180)\n}",
"func LimitsTimeoutValidation(timeout *int) bool {\n\tif timeout == nil {\n\t\treturn true\n\t}\n\tif *timeout < 100 || *timeout > 300000 {\n\t\twskprint.PrintlnOpenWhiskWarning(wski18n.T(wski18n.ID_WARN_LIMITS_TIMEOUT))\n\t\treturn false\n\t}\n\treturn true\n}",
"func ValidateRestartPolicy(policy RestartPolicy) error {\n\tswitch policy.Name {\n\tcase RestartPolicyAlways, RestartPolicyUnlessStopped, RestartPolicyDisabled:\n\t\tif policy.MaximumRetryCount != 0 {\n\t\t\tmsg := \"invalid restart policy: maximum retry count can only be used with 'on-failure'\"\n\t\t\tif policy.MaximumRetryCount < 0 {\n\t\t\t\tmsg += \" and cannot be negative\"\n\t\t\t}\n\t\t\treturn &errInvalidParameter{fmt.Errorf(msg)}\n\t\t}\n\t\treturn nil\n\tcase RestartPolicyOnFailure:\n\t\tif policy.MaximumRetryCount < 0 {\n\t\t\treturn &errInvalidParameter{fmt.Errorf(\"invalid restart policy: maximum retry count cannot be negative\")}\n\t\t}\n\t\treturn nil\n\tcase \"\":\n\t\t// Versions before v25.0.0 created an empty restart-policy \"name\" as\n\t\t// default. Allow an empty name with \"any\" MaximumRetryCount for\n\t\t// backward-compatibility.\n\t\treturn nil\n\tdefault:\n\t\treturn &errInvalidParameter{fmt.Errorf(\"invalid restart policy: unknown policy '%s'; use one of '%s', '%s', '%s', or '%s'\", policy.Name, RestartPolicyDisabled, RestartPolicyAlways, RestartPolicyOnFailure, RestartPolicyUnlessStopped)}\n\t}\n}",
"func (req *RestartVbucketsRequest) Validate() bool {\n\treturn validateMapping(req.GetRestartTimestamps(), req.GetKeyspaceIds())\n}",
"func validateRollingUpdateStatefulSet(rollingUpdate *apps.RollingUpdateStatefulSetStrategy, fldPath *field.Path) field.ErrorList {\n\tvar allErrs field.ErrorList\n\tfldPathMaxUn := fldPath.Child(\"maxUnavailable\")\n\tallErrs = append(allErrs,\n\t\tapivalidation.ValidateNonnegativeField(\n\t\t\tint64(rollingUpdate.Partition),\n\t\t\tfldPath.Child(\"partition\"))...)\n\tif rollingUpdate.MaxUnavailable != nil {\n\t\tallErrs = append(allErrs, ValidatePositiveIntOrPercent(*rollingUpdate.MaxUnavailable, fldPathMaxUn)...)\n\t\tif getIntOrPercentValue(*rollingUpdate.MaxUnavailable) == 0 {\n\t\t\t// MaxUnavailable cannot be 0.\n\t\t\tallErrs = append(allErrs, field.Invalid(fldPathMaxUn, *rollingUpdate.MaxUnavailable, \"cannot be 0\"))\n\t\t}\n\t\t// Validate that MaxUnavailable is not more than 100%.\n\t\tallErrs = append(allErrs, IsNotMoreThan100Percent(*rollingUpdate.MaxUnavailable, fldPathMaxUn)...)\n\t}\n\treturn allErrs\n}",
"func (drj *DelRepoJob) ShouldRetry() bool {\n\treturn true\n}",
"func ExceededRetries(err error) error {\n\treturn exceeded{err: err}\n}",
"func (v *validator) Validate(ctx context.Context, seed *kubermaticv1.Seed, op admissionv1.Operation) error {\n\t// We need locking to make the validation concurrency-safe\n\t// TODO: this is acceptable as request rate is low, but is it required?\n\tv.lock.Lock()\n\tdefer v.lock.Unlock()\n\n\tseeds := kubermaticv1.SeedList{}\n\terr := v.client.List(ctx, &seeds)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get seeds: %v\", err)\n\t}\n\tseedsMap := map[string]*kubermaticv1.Seed{}\n\tfor i, s := range seeds.Items {\n\t\tseedsMap[s.Name] = &seeds.Items[i]\n\t}\n\tif op == admissionv1.Delete {\n\t\t// when a namespace is deleted, a DELETE call for all seeds in the namespace\n\t\t// is issued; this request has no .Request.Name set, so this check will make\n\t\t// sure that we exit cleanly and allow deleting namespaces without seeds\n\t\tif _, exists := seedsMap[seed.Name]; !exists && op == admissionv1.Delete {\n\t\t\treturn nil\n\t\t}\n\t\t// in case of delete request the seed is empty\n\t\tseed = seedsMap[seed.Name]\n\t}\n\n\tclient, err := v.seedClientGetter(seed)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get client for seed %q: %v\", seed.Name, err)\n\t}\n\n\treturn v.validate(ctx, seed, client, seedsMap, op == admissionv1.Delete)\n}",
"func (m *ArrayConnectionPostAllOf1) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func ValidBytesForTrytes(bytes []byte) error {\n\tif len(bytes) == 0 {\n\t\treturn consts.ErrInvalidBytesLength\n\t}\n\treturn nil\n}",
"func (o *dryrunOptions) validate() error {\n\tif o.userWPAName == \"\" && o.labelSelector == \"\" && !o.allWPA {\n\t\treturn fmt.Errorf(\"the watermarkpodautoscaler name or label-selector is required\")\n\t}\n\n\treturn nil\n}",
"func Retry(attempts int, interval time.Duration, fn func() error) error {\n\t// @step: give it a go once before jumping in\n\tfor i := 0; i < attempts; i++ {\n\t\tif err := fn(); err == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tif interval > 0 {\n\t\t\ttime.Sleep(interval)\n\t\t}\n\t}\n\n\treturn errors.New(\"operation failed\")\n}",
"func (compose *Compose) Validate() error {\n\tisInSlice := func(list []string, value string) bool {\n\t\tfor _, v := range list {\n\t\t\tif v == value {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\t// Version\n\tif len(compose.Version) == 0 {\n\t\treturn fmt.Errorf(\"no version specified\")\n\n\t}\n\t// Unique labels\n\tknownLabels := []string{}\n\tfor _, service := range compose.Services {\n\t\tlabel := service.Label\n\t\tif isInSlice(knownLabels, label) {\n\t\t\treturn fmt.Errorf(\"label used twice: '%s'\", label)\n\t\t}\n\t\tknownLabels = append(knownLabels, label)\n\t}\n\treturn nil\n}",
"func (m *NonCompliantResourceFailedRulesItems0) Validate(formats strfmt.Registry) error {\n\treturn nil\n}"
] | [
"0.74960744",
"0.57643306",
"0.5702115",
"0.55220336",
"0.5487586",
"0.5443635",
"0.52778167",
"0.5185343",
"0.51229566",
"0.5120975",
"0.5108453",
"0.5068902",
"0.50688136",
"0.50606054",
"0.50606054",
"0.5039195",
"0.50262135",
"0.5020048",
"0.49768957",
"0.49728695",
"0.49632707",
"0.49530736",
"0.4948551",
"0.4935774",
"0.4922753",
"0.48973602",
"0.48905396",
"0.4890012",
"0.48879737",
"0.4887169",
"0.4880128",
"0.48729196",
"0.48534906",
"0.48476365",
"0.48350215",
"0.4831187",
"0.4825481",
"0.48200208",
"0.48075107",
"0.4797763",
"0.47714037",
"0.47469297",
"0.47348082",
"0.47339606",
"0.4720916",
"0.47193715",
"0.47163847",
"0.47163847",
"0.4714645",
"0.4714511",
"0.47119313",
"0.4710042",
"0.47069317",
"0.46947944",
"0.46866718",
"0.4684684",
"0.4683963",
"0.46810505",
"0.46804968",
"0.46775728",
"0.4675185",
"0.46748078",
"0.46715865",
"0.46638396",
"0.4651231",
"0.46397874",
"0.4636153",
"0.463409",
"0.4632465",
"0.4631797",
"0.4626765",
"0.462471",
"0.46232295",
"0.46111917",
"0.4610799",
"0.46098185",
"0.46081197",
"0.46037257",
"0.45983285",
"0.4595763",
"0.4595542",
"0.45949972",
"0.45922062",
"0.45889577",
"0.45859182",
"0.45801455",
"0.45786113",
"0.45781144",
"0.45762527",
"0.4567901",
"0.4561489",
"0.45614293",
"0.45577052",
"0.45553946",
"0.45529866",
"0.4550115",
"0.4549973",
"0.45466158",
"0.45429233",
"0.45357826"
] | 0.6482321 | 1 |
Validate validates input key rotation encryption options. | func (e BatchJobKeyRotateEncryption) Validate() error {
if e.Type != sses3 && e.Type != ssekms {
return errInvalidArgument
}
spaces := strings.HasPrefix(e.Key, " ") || strings.HasSuffix(e.Key, " ")
if e.Type == ssekms && spaces {
return crypto.ErrInvalidEncryptionKeyID
}
if e.Type == ssekms && GlobalKMS != nil {
ctx := kms.Context{}
if e.Context != "" {
b, err := base64.StdEncoding.DecodeString(e.Context)
if err != nil {
return err
}
json := jsoniter.ConfigCompatibleWithStandardLibrary
if err := json.Unmarshal(b, &ctx); err != nil {
return err
}
}
e.kmsContext = kms.Context{}
for k, v := range ctx {
e.kmsContext[k] = v
}
ctx["MinIO batch API"] = "batchrotate" // Context for a test key operation
if _, err := GlobalKMS.GenerateKey(GlobalContext, e.Key, ctx); err != nil {
return err
}
}
return nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (kv BatchKeyRotateKV) Validate() error {\n\tif kv.Key == \"\" {\n\t\treturn errInvalidArgument\n\t}\n\treturn nil\n}",
"func (e RotationValidationError) Key() bool { return e.key }",
"func (r *BatchJobKeyRotateV1) Validate(ctx context.Context, job BatchJobRequest, o ObjectLayer) error {\n\tif r == nil {\n\t\treturn nil\n\t}\n\n\tif r.APIVersion != batchKeyRotateAPIVersion {\n\t\treturn errInvalidArgument\n\t}\n\n\tif r.Bucket == \"\" {\n\t\treturn errInvalidArgument\n\t}\n\n\tif _, err := o.GetBucketInfo(ctx, r.Bucket, BucketOptions{}); err != nil {\n\t\tif isErrBucketNotFound(err) {\n\t\t\treturn batchKeyRotationJobError{\n\t\t\t\tCode: \"NoSuchSourceBucket\",\n\t\t\t\tDescription: \"The specified source bucket does not exist\",\n\t\t\t\tHTTPStatusCode: http.StatusNotFound,\n\t\t\t}\n\t\t}\n\t\treturn err\n\t}\n\tif GlobalKMS == nil {\n\t\treturn errKMSNotConfigured\n\t}\n\tif err := r.Encryption.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, tag := range r.Flags.Filter.Tags {\n\t\tif err := tag.Validate(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfor _, meta := range r.Flags.Filter.Metadata {\n\t\tif err := meta.Validate(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := r.Flags.Retry.Validate(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (e aesGCMEncodedEncryptor) ConfiguredToRotate() bool {\n\treturn len(e.primaryKey) == requiredKeyLength && len(e.secondaryKey) == requiredKeyLength\n}",
"func (k Key) Validate() error {\n\n\t// check method\n\tif err := k.hasValidMethod(); err != nil {\n\t\treturn err\n\t}\n\n\t//check label\n\tif err := k.hasValidLabel(); err != nil {\n\t\treturn err\n\t}\n\n\t// check secret\n\tif err := k.hasValidSecret32(); err != nil {\n\t\treturn err\n\t}\n\n\t// check algo\n\tif err := k.hasValidAlgo(); err != nil {\n\t\treturn err\n\t}\n\n\t// check digits\n\tif err := k.hasValidDigits(); err != nil {\n\t\treturn err\n\t}\n\n\t// check period\n\tif err := k.hasValidPeriod(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (m *Rotation) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\t// no validation rules for Rx\n\n\t// no validation rules for Ry\n\n\t// no validation rules for Rz\n\n\treturn nil\n}",
"func (o *Options) Verify() error {\n\tif o.GC != \"\" {\n\t\tvalid := isInArray(validGCOptions, o.GC)\n\t\tif !valid {\n\t\t\treturn fmt.Errorf(`invalid gc option '%s': valid values are %s`,\n\t\t\t\to.GC,\n\t\t\t\tstrings.Join(validGCOptions, \", \"))\n\t\t}\n\t}\n\n\tif o.Scheduler != \"\" {\n\t\tvalid := isInArray(validSchedulerOptions, o.Scheduler)\n\t\tif !valid {\n\t\t\treturn fmt.Errorf(`invalid scheduler option '%s': valid values are %s`,\n\t\t\t\to.Scheduler,\n\t\t\t\tstrings.Join(validSchedulerOptions, \", \"))\n\t\t}\n\t}\n\n\tif o.Serial != \"\" {\n\t\tvalid := isInArray(validSerialOptions, o.Serial)\n\t\tif !valid {\n\t\t\treturn fmt.Errorf(`invalid serial option '%s': valid values are %s`,\n\t\t\t\to.Serial,\n\t\t\t\tstrings.Join(validSerialOptions, \", \"))\n\t\t}\n\t}\n\n\tif o.PrintSizes != \"\" {\n\t\tvalid := isInArray(validPrintSizeOptions, o.PrintSizes)\n\t\tif !valid {\n\t\t\treturn fmt.Errorf(`invalid size option '%s': valid values are %s`,\n\t\t\t\to.PrintSizes,\n\t\t\t\tstrings.Join(validPrintSizeOptions, \", \"))\n\t\t}\n\t}\n\n\tif o.PanicStrategy != \"\" {\n\t\tvalid := isInArray(validPanicStrategyOptions, o.PanicStrategy)\n\t\tif !valid {\n\t\t\treturn fmt.Errorf(`invalid panic option '%s': valid values are %s`,\n\t\t\t\to.PanicStrategy,\n\t\t\t\tstrings.Join(validPanicStrategyOptions, \", \"))\n\t\t}\n\t}\n\n\tif o.Opt != \"\" {\n\t\tif !isInArray(validOptOptions, o.Opt) {\n\t\t\treturn fmt.Errorf(\"invalid -opt=%s: valid values are %s\", o.Opt, strings.Join(validOptOptions, \", \"))\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (o *CryptoHandlerOpts) ValidateForEncryptDecrypt() error {\n\tif o.EncProvider == ThEncryptProviderSimple && o.SimpleKey == \"\" {\n\t\treturn fmt.Errorf(\"You must supply a valid simple-key when using the simply provider. \" +\n\t\t\t\"The simple provider uses AES and so the AES key should be either 16 or 32 byte to select AES-128 or AES-256 encryption\")\n\n\t}\n\tif (o.EncProvider == ThEncryptProviderVault || o.EncProvider == ThEncryptProviderVaultCli) && o.NamedEncKey == \"\" {\n\t\treturn fmt.Errorf(\"You must supply a vault-namedkey when using the vault provider \")\n\t}\n\treturn nil\n}",
"func (a KeyAlgorithm) ValidKeySize(size int) error {\n\tswitch a {\n\tcase ECDSAKey:\n\t\tif !(size == 0 || size == 256 || size == 384 || size == 521) {\n\t\t\treturn fmt.Errorf(\"invalid ecdsa key size %d - key size must be either 256, 384 or 521\", size)\n\t\t}\n\t\treturn nil\n\tcase RSAKey:\n\t\tif !(size == 0 || (size >= minRSAKeySize && size <= maxRSAKeySize)) {\n\t\t\treturn fmt.Errorf(\"invalid rsa key size %d - key size must be between %d and %d\", size, minRSAKeySize, maxRSAKeySize)\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"invalid key algorithm\")\n}",
"func (m *LicenseKeys) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateLicenseKey(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func validateEncryptedDEK(encryptedDEK []byte) error {\n\tif len(encryptedDEK) == 0 {\n\t\treturn fmt.Errorf(\"encrypted DEK is empty\")\n\t}\n\tif len(encryptedDEK) > encryptedDEKMaxSize {\n\t\treturn fmt.Errorf(\"encrypted DEK is %d bytes, which exceeds the max size of %d\", len(encryptedDEK), encryptedDEKMaxSize)\n\t}\n\treturn nil\n}",
"func (opts resourceOptions) validate() error {\n\t// Check that the required flags did not get a flag as their value.\n\t// We can safely look for a '-' as the first char as none of the fields accepts it.\n\t// NOTE: We must do this for all the required flags first or we may output the wrong\n\t// error as flags may seem to be missing because Cobra assigned them to another flag.\n\tif strings.HasPrefix(opts.Group, \"-\") {\n\t\treturn fmt.Errorf(groupPresent)\n\t}\n\tif strings.HasPrefix(opts.Version, \"-\") {\n\t\treturn fmt.Errorf(versionPresent)\n\t}\n\tif strings.HasPrefix(opts.Kind, \"-\") {\n\t\treturn fmt.Errorf(kindPresent)\n\t}\n\n\t// We do not check here if the GVK values are empty because that would\n\t// make them mandatory and some plugins may want to set default values.\n\t// Instead, this is checked by resource.GVK.Validate()\n\n\treturn nil\n}",
"func (m *MultiClusterLicenseKeys) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateVsan(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func validateKeyLength(key string) error {\n\tdata, err := base64.StdEncoding.DecodeString(key)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(data) != EARKeyLength {\n\t\treturn fmt.Errorf(\"key length should be 32 it is %d\", len(data))\n\t}\n\n\treturn nil\n}",
"func (a *Service) validateEncryptionAlgorithm(encryptionAlgorithm string) error {\n\n\t// convert to Jose content type\n\tencAlgorithm := jose.ContentEncryption(encryptionAlgorithm)\n\n\tif encAlgorithm == jose.A128CBC_HS256 ||\n\t\tencAlgorithm == jose.A192CBC_HS384 ||\n\t\tencAlgorithm == jose.A256CBC_HS512 ||\n\t\tencAlgorithm == jose.A128GCM ||\n\t\tencAlgorithm == jose.A192GCM ||\n\t\tencAlgorithm == jose.A256GCM {\n\t\treturn nil\n\t}\n\n\treturn errors.New(\"invalid encryption algorithm\")\n}",
"func (m *EncryptionAtRestConfig) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateKmsConfigUUID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateOpType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (r BatchKeyRotateRetry) Validate() error {\n\tif r.Attempts < 0 {\n\t\treturn errInvalidArgument\n\t}\n\n\tif r.Delay < 0 {\n\t\treturn errInvalidArgument\n\t}\n\n\treturn nil\n}",
"func (jwk *Jwk) Validate() error {\n\n\t// If the alg parameter is set, make sure it matches the set JWK Type\n\tif len(jwk.Algorithm) > 0 {\n\t\talgKeyType := GetKeyType(jwk.Algorithm)\n\t\tif algKeyType != jwk.Type {\n\t\t\tfmt.Errorf(\"Jwk Type (kty=%v) doesn't match the algorithm key type (%v)\", jwk.Type, algKeyType)\n\t\t}\n\t}\n\tswitch jwk.Type {\n\tcase KeyTypeRSA:\n\t\tif err := jwk.validateRSAParams(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\tcase KeyTypeEC:\n\t\tif err := jwk.validateECParams(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\tcase KeyTypeOct:\n\t\tif err := jwk.validateOctParams(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\tdefault:\n\t\treturn errors.New(\"KeyType (kty) must be EC, RSA or Oct\")\n\t}\n\n\treturn nil\n}",
"func (s SecretForTLSGeneratorV1) validate() error {\n\t// TODO: This is not strictly necessary. We can generate a self signed cert\n\t// if no key/cert is given. The only requiredment is that we either get both\n\t// or none. See test/e2e/ingress_utils for self signed cert generation.\n\tif len(s.Key) == 0 {\n\t\treturn fmt.Errorf(\"key must be specified.\")\n\t}\n\tif len(s.Cert) == 0 {\n\t\treturn fmt.Errorf(\"certificate must be specified.\")\n\t}\n\tif _, err := tls.LoadX509KeyPair(s.Cert, s.Key); err != nil {\n\t\treturn fmt.Errorf(\"failed to load key pair %v\", err)\n\t}\n\t// TODO: Add more validation.\n\t// 1. If the certificate contains intermediates, it is a valid chain.\n\t// 2. Format etc.\n\treturn nil\n}",
"func ValidateOptions(options []*commonpb.KeyValuePair) error {\n\toptionMap := funcutil.KeyValuePair2Map(options)\n\t// StartTs should be int\n\t_, ok := optionMap[StartTs]\n\tvar startTs uint64\n\tvar endTs uint64 = math.MaxInt64\n\tvar err error\n\tif ok {\n\t\tstartTs, err = strconv.ParseUint(optionMap[StartTs], 10, 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t// EndTs should be int\n\t_, ok = optionMap[EndTs]\n\tif ok {\n\t\tendTs, err = strconv.ParseUint(optionMap[EndTs], 10, 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif startTs > endTs {\n\t\treturn errors.New(\"start_ts shouldn't be larger than end_ts\")\n\t}\n\treturn nil\n}",
"func (opt *Options) Valid() bool {\n\n\tif opt.Image == \"\" {\n\t\toutput.Error(\"An image is required\")\n\t}\n\n\tif !(opt.Gif || opt.Trigger || opt.Shake) && opt.OutName != \"\" {\n\t\tif !strings.HasSuffix(strings.ToLower(opt.OutName), \".png\") {\n\t\t\toutput.Error(\"The output file name must have the suffix of .png\")\n\t\t}\n\t}\n\n\tif (opt.Gif || opt.Trigger || opt.Shake) && opt.OutName != \"\" {\n\t\tif !strings.HasSuffix(strings.ToLower(opt.OutName), \".gif\") {\n\t\t\toutput.Error(\"The output file name must have the suffix of .gif\")\n\t\t}\n\t}\n\n\treturn true\n}",
"func validateKeyMap(km *KeyMap) error {\n\tif len(km.Yes) == 0 && len(km.No) == 0 && len(km.Submit) == 0 {\n\t\treturn fmt.Errorf(\"no submit key\")\n\t}\n\n\tif !(len(km.Yes) > 0 && len(km.No) > 0) &&\n\t\tlen(km.Toggle) == 0 &&\n\t\t!(len(km.SelectYes) > 0 && len(km.SelectNo) > 0) {\n\t\treturn fmt.Errorf(\"missing keys to select a value\")\n\t}\n\n\treturn nil\n}",
"func (o *CertificateOptions) Validate() error {\n\tif len(o.csrNames) < 1 && cmdutil.IsFilenameSliceEmpty(o.Filenames, o.Kustomize) {\n\t\treturn fmt.Errorf(\"one or more CSRs must be specified as <name> or -f <filename>\")\n\t}\n\treturn nil\n}",
"func (priv *PKCS11PrivateKeyRSA) Validate() error {\n\tpub := priv.key.PubKey.(*rsa.PublicKey)\n\tif pub.E < 2 {\n\t\treturn errMalformedRSAKey\n\t}\n\t// The software implementation actively rejects 'large' public\n\t// exponents, in order to simplify its own implementation.\n\t// Here, instead, we expect the PKCS#11 library to enforce its\n\t// own preferred constraints, whatever they might be.\n\treturn nil\n}",
"func (o *Options) CheckOptions() error {\n\tif o.ViceCrtFile == \"\" {\n\t\treturn fmt.Errorf(\"path to vice certificate not provided. Aborting\")\n\t}\n\tif o.ViceKeyFile == \"\" {\n\t\treturn fmt.Errorf(\"path to vice key not provided. Aborting\")\n\t}\n\tif o.VicePresidentConfig == \"\" {\n\t\treturn fmt.Errorf(\"path to vice config not provided. Aborting\")\n\t}\n\tif o.IntermediateCertificate == \"\" {\n\t\tLogDebug(\"Intermediate certificate not provided\")\n\t}\n\tif o.KubeConfig == \"\" {\n\t\tLogDebug(\"Path to kubeconfig not provided. Using Default\")\n\t}\n\n\tif o.MinCertValidityDays <= 0 {\n\t\tLogDebug(\"Minimum certificate validity invalid. Using default: 30 days\")\n\t\to.MinCertValidityDays = 30\n\t}\n\n\tif o.MetricPort == 0 {\n\t\to.MetricPort = 9091\n\t\tLogDebug(\"Metric port not provided. Using default port: 9091\")\n\t}\n\tif !o.IsEnableAdditionalSymantecMetrics {\n\t\tLogDebug(\"Not exposing additional Symantec metrics\")\n\t} else {\n\t\tLogDebug(\"Exposing additional Symantec metrics\")\n\t}\n\n\treturn nil\n}",
"func (keySetter *KeySetter) Validate() []string {\n\tvar errorData []string = []string{}\n\tif keySetter.Key == \"\" {\n\t\terrorData = append(errorData, \"field 'key' is required\")\n\t}\n\tif keySetter.Value == \"\" || keySetter.Value == nil {\n\t\terrorData = append(errorData, \"field 'value' is required\")\n\t}\n\tif keySetter.Expiry < 0 {\n\t\terrorData = append(errorData, \"Enter a valid numerical expiry in ms\")\n\t}\n\treturn errorData\n}",
"func (opts Options) validate() error {\n\t// AzureDNSClient is only not nil for the tests.\n\tif opts.AzureAuthentication == nil && opts.AzureDNSClient == nil {\n\t\treturn errAzureAuthenticationNil\n\t}\n\n\tif opts.KubeClient == nil {\n\t\treturn errKubeClientNil\n\t}\n\n\tif len(opts.DomainNameRoot) <= 0 {\n\t\treturn errDomainNameRootEmpty\n\t}\n\n\tif len(opts.ResourceGroupName) <= 0 {\n\t\treturn errResourceGroupNameEmpty\n\t}\n\n\tif len(opts.ResourceName) <= 0 {\n\t\treturn errResourceNameEmpty\n\t}\n\n\tif len(opts.Region) <= 0 {\n\t\treturn errRegionEmpty\n\t}\n\n\treturn nil\n}",
"func (m *AzureKeyVaultKey) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateKeyID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateScope(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSvm(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (opts *ClientOptions) Validate() error {\n\tif opts.Host == \"\" {\n\t\treturn errors.New(\"error: Host must be specified\")\n\t}\n\tif opts.Version == \"\" {\n\t\treturn errors.New(\"error: Version must be specified\")\n\t}\n\tif opts.Key == \"\" {\n\t\treturn errors.New(\"error: Key must be specified\")\n\t}\n\tif !keyRegex.MatchString(opts.Key) {\n\t\treturn errors.New(\"error: Key must be 26 hex characters long\")\n\t}\n\n\tisSupportedVersion := false\n\tfor _, v := range supportedVersions {\n\t\tif v == opts.Version {\n\t\t\tisSupportedVersion = true\n\t\t}\n\t}\n\tif !isSupportedVersion {\n\t\treturn errors.New(fmt.Sprintf(\"error: Version %s is not supported by the Content API\", opts.Version))\n\t}\n\n\tif opts.GhostPath == \"\" {\n\t\topts.GhostPath = \"ghost\"\n\t}\n\n\treturn nil\n}",
"func (i GinJwtSignAlgorithm) Valid() bool {\n\tfor _, v := range _GinJwtSignAlgorithmValues {\n\t\tif i == v {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func validateOptions(options Options) (errors []string) {\n\n\t// options.scale\n\tif options.Scale != nil {\n\t\tif options.Scale.Min != nil {\n\t\t\tif *options.Scale.Min < 0 {\n\t\t\t\terrors = append(errors, fmt.Sprintf(\"options field \\\"scale.min\\\" has invalid value set: %d, the value must be greater than \\\"0\\\"\",\n\t\t\t\t\t*options.Scale.Min))\n\t\t\t}\n\t\t}\n\n\t\tif options.Scale.Max != nil {\n\t\t\tif *options.Scale.Max < 0 {\n\t\t\t\terrors = append(errors, fmt.Sprintf(\"options field \\\"scale.max\\\" has invalid value set: %d, the value must be greater than \\\"0\\\"\",\n\t\t\t\t\t*options.Scale.Max))\n\t\t\t}\n\t\t}\n\n\t\tif options.Scale.Min != nil && options.Scale.Max != nil {\n\t\t\tif *options.Scale.Max < *options.Scale.Min {\n\t\t\t\terrors = append(errors, \"options field \\\"scale.max\\\" value must be greater or equal to \\\"scale.min\\\"\")\n\t\t\t}\n\t\t}\n\n\t\tif options.Scale.Metric != nil {\n\t\t\tif *options.Scale.Metric != \"concurrency\" && *options.Scale.Metric != \"rps\" {\n\t\t\t\terrors = append(errors, fmt.Sprintf(\"options field \\\"scale.metric\\\" has invalid value set: %s, allowed is only \\\"concurrency\\\" or \\\"rps\\\"\",\n\t\t\t\t\t*options.Scale.Metric))\n\t\t\t}\n\t\t}\n\n\t\tif options.Scale.Target != nil {\n\t\t\tif *options.Scale.Target < 0.01 {\n\t\t\t\terrors = append(errors, fmt.Sprintf(\"options field \\\"scale.target\\\" has value set to \\\"%f\\\", but it must not be less than 0.01\",\n\t\t\t\t\t*options.Scale.Target))\n\t\t\t}\n\t\t}\n\n\t\tif options.Scale.Utilization != nil {\n\t\t\tif *options.Scale.Utilization < 1 || *options.Scale.Utilization > 100 {\n\t\t\t\terrors = append(errors,\n\t\t\t\t\tfmt.Sprintf(\"options field \\\"scale.utilization\\\" has value set to \\\"%f\\\", but it must not be less than 1 or greater than 100\",\n\t\t\t\t\t\t*options.Scale.Utilization))\n\t\t\t}\n\t\t}\n\t}\n\n\t// options.resource\n\tif options.Resources != nil {\n\n\t\t// options.resource.requests\n\t\tif options.Resources.Requests != nil {\n\n\t\t\tif options.Resources.Requests.CPU != nil {\n\t\t\t\t_, err := resource.ParseQuantity(*options.Resources.Requests.CPU)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrors = append(errors, fmt.Sprintf(\"options field \\\"resources.requests.cpu\\\" has invalid value set: \\\"%s\\\"; \\\"%s\\\"\",\n\t\t\t\t\t\t*options.Resources.Requests.CPU, err.Error()))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif options.Resources.Requests.Memory != nil {\n\t\t\t\t_, err := resource.ParseQuantity(*options.Resources.Requests.Memory)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrors = append(errors, fmt.Sprintf(\"options field \\\"resources.requests.memory\\\" has invalid value set: \\\"%s\\\"; \\\"%s\\\"\",\n\t\t\t\t\t\t*options.Resources.Requests.Memory, err.Error()))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// options.resource.limits\n\t\tif options.Resources.Limits != nil {\n\n\t\t\tif options.Resources.Limits.CPU != nil {\n\t\t\t\t_, err := resource.ParseQuantity(*options.Resources.Limits.CPU)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrors = append(errors, fmt.Sprintf(\"options field \\\"resources.limits.cpu\\\" has invalid value set: \\\"%s\\\"; \\\"%s\\\"\",\n\t\t\t\t\t\t*options.Resources.Limits.CPU, err.Error()))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif options.Resources.Limits.Memory != nil {\n\t\t\t\t_, err := resource.ParseQuantity(*options.Resources.Limits.Memory)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrors = append(errors, fmt.Sprintf(\"options field \\\"resources.limits.memory\\\" has invalid value set: \\\"%s\\\"; \\\"%s\\\"\",\n\t\t\t\t\t\t*options.Resources.Limits.Memory, err.Error()))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif options.Resources.Limits.Concurrency != nil {\n\t\t\t\tif *options.Resources.Limits.Concurrency < 0 {\n\t\t\t\t\terrors = append(errors, fmt.Sprintf(\"options field \\\"resources.limits.concurrency\\\" has value set to \\\"%d\\\", but it must not be less than 0\",\n\t\t\t\t\t\t*options.Resources.Limits.Concurrency))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}",
"func validateOpts(opts VerifyOpts) error {\n\tcheckPub := len(opts.TrustedAKs) > 0\n\tcheckCert := len(opts.TrustedRootCerts) > 0\n\tif !checkPub && !checkCert {\n\t\treturn fmt.Errorf(\"no trust mechanism provided, either use TrustedAKs or TrustedRootCerts\")\n\t}\n\tif checkPub && checkCert {\n\t\treturn fmt.Errorf(\"multiple trust mechanisms provided, only use one of TrustedAKs or TrustedRootCerts\")\n\t}\n\treturn nil\n}",
"func validateOptions(cfg *config) (err error) {\n\t// step: read in the token if required\n\n\tif cfg.vaultAuthFile != \"\" {\n\t\tif exists, _ := fileExists(cfg.vaultAuthFile); !exists {\n\t\t\treturn fmt.Errorf(\"the token file: %s does not exists, please check\", cfg.vaultAuthFile)\n\t\t}\n\n\t\tcfg.vaultAuthOptions, err = readConfigFile(cfg.vaultAuthFile, cfg.vaultAuthFileFormat)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to read in authentication options from: %s, error: %s\", cfg.vaultAuthFile, err)\n\t\t}\n\t\tif cfg.vaultAuthOptions.VaultURL != \"\" {\n\t\t\tcfg.vaultURL = cfg.vaultAuthOptions.VaultURL\n\t\t}\n\t}\n\n\tif cfg.vaultURL == \"\" {\n\t\tcfg.vaultURL = os.Getenv(\"VAULT_ADDR\")\n\t}\n\n\tif cfg.vaultURL == \"\" {\n\t\treturn fmt.Errorf(\"VAULT_ADDR is unset\")\n\t}\n\n\t// step: validate the vault url\n\tif _, err = url.Parse(cfg.vaultURL); err != nil {\n\t\treturn fmt.Errorf(\"invalid vault url: '%s' specified\", cfg.vaultURL)\n\t}\n\n\tif cfg.vaultCaFile != \"\" {\n\t\tif exists, _ := fileExists(cfg.vaultCaFile); !exists {\n\t\t\treturn fmt.Errorf(\"the ca certificate file: %s does not exist\", cfg.vaultCaFile)\n\t\t}\n\t}\n\n\tif cfg.skipTLSVerify == true && cfg.vaultCaFile != \"\" {\n\t\treturn fmt.Errorf(\"you are skipping the tls but supplying a CA, doesn't make sense\")\n\t}\n\n\treturn nil\n}",
"func (e HealthCheck_TlsOptionsValidationError) Key() bool { return e.key }",
"func (o *ChonkOptions) Validate() error {\n\tif len(o.args) > 0 {\n\t\treturn fmt.Errorf(\"no arguments expected\")\n\t}\n\n\treturn nil\n}",
"func TestValidateRootRotationMissingOrigSig(t *testing.T) {\n\ttestValidateRootRotationMissingOrigSig(t, data.ECDSAKey, data.ECDSAx509Key)\n\tif !testing.Short() {\n\t\ttestValidateRootRotationMissingOrigSig(t, data.RSAKey, data.RSAx509Key)\n\t}\n}",
"func (key Key) Valid() bool {\n\tk := uint64(key)\n\treturn 0 <= k && k < config.maxKey // Check for 0 <= not necessary\n}",
"func validateGenerateSIPAuthVectorInputs(key []byte, opc []byte, sqn uint64) error {\n\tif len(key) != ExpectedKeyBytes {\n\t\treturn fmt.Errorf(\"incorrect key size. Expected %v bytes, but got %v bytes\", ExpectedKeyBytes, len(key))\n\t}\n\tif len(opc) != ExpectedOpcBytes {\n\t\treturn fmt.Errorf(\"incorrect opc size. Expected %v bytes, but got %v bytes\", ExpectedOpcBytes, len(opc))\n\t}\n\tif sqn > maxSqn {\n\t\treturn fmt.Errorf(\"sequence number too large, expected a number which can fit in 48 bits. Got: %v\", sqn)\n\t}\n\treturn nil\n}",
"func (opts *Options) Validate() error {\n\tif opts.LockTimeout < 0 {\n\t\treturn errors.New(\"cannot have negative lock timeout\")\n\t}\n\n\tif opts.LockTimeout == 0 {\n\t\topts.LockTimeout = amboy.LockTimeout\n\t}\n\n\tif opts.PoolSize == 0 {\n\t\topts.PoolSize = runtime.NumCPU()\n\t}\n\n\tif opts.WaitInterval == 0 {\n\t\topts.WaitInterval = 100 * time.Millisecond\n\t}\n\n\tif opts.SchemaName == \"\" {\n\t\topts.SchemaName = \"amboy\"\n\t}\n\n\treturn nil\n}",
"func (o *Options) Validate() error {\n\tif len(o.args) > 0 {\n\t\treturn errors.New(\"no argument is allowed\")\n\t}\n\tif o.useServicePrincipal && o.useUserPrincipal {\n\t\treturn errors.New(\"service principal and user principal cannot be used at the same time\")\n\t}\n\treturn nil\n}",
"func IsErrInvalidKey(err error) bool { return errors.Unwrap(err) == datastore.ErrInvalidKey }",
"func (o *options) validateFlags() error {\n\tvar err error\n\n\tif len(o.name) == 0 {\n\t\treturn &util.ExitError{Message: \"-n, --name option is required.\", Code: 1}\n\t}\n\n\to.output, err = filepath.Abs(o.output)\n\tif err != nil {\n\t\treturn &util.ExitError{Message: fmt.Sprintf(\"-o, --output option invalid: %v.\", o.output), Code: 1}\n\t}\n\n\tif util.DirExists(o.output) {\n\t\treturn &util.ExitError{Message: fmt.Sprintf(\"-o, --output already exists and is a directory: %v.\", o.output), Code: 1}\n\t}\n\n\tif o.serviceaccount && o.certificate {\n\t\treturn &util.ExitError{Message: \"-c, --certificate and -s, --serviceaccount are mutually exclusive options.\", Code: 1}\n\t}\n\n\treturn nil\n}",
"func (m *SoftwareDataEncryption) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func ValidateClientEncryptionKeyID(input interface{}, key string) (warnings []string, errors []error) {\n\tv, ok := input.(string)\n\tif !ok {\n\t\terrors = append(errors, fmt.Errorf(\"expected %q to be a string\", key))\n\t\treturn\n\t}\n\n\tif _, err := ParseClientEncryptionKeyID(v); err != nil {\n\t\terrors = append(errors, err)\n\t}\n\n\treturn\n}",
"func PrivateKeyValidate(priv *rsa.PrivateKey,) error",
"func (jwk *Jwk) validateOctParams() error {\n\tif len(jwk.KeyValue) < 1 {\n\t\treturn errors.New(\"Oct Required Param KeyValue (k) is empty\")\n\t}\n\n\treturn nil\n}",
"func ValidKey(key string) bool {\n\treturn len(key) <= maxKey && keyRegex.Match([]byte(key))\n}",
"func (o *Options) validate(args []string) error {\n\tif len(args) != 0 {\n\t\treturn errors.New(\"arguments are not supported\")\n\t}\n\treturn nil\n}",
"func validateGenerateResyncInputs(auts, key, opc, rand []byte) error {\n\tif len(auts) != ExpectedAutsBytes {\n\t\treturn fmt.Errorf(\"incorrect auts size. Expected %v bytes, but got %v bytes\", ExpectedAutsBytes, len(auts))\n\t}\n\tif len(key) != ExpectedKeyBytes {\n\t\treturn fmt.Errorf(\"incorrect key size. Expected %v bytes, but got %v bytes\", ExpectedKeyBytes, len(key))\n\t}\n\tif len(opc) != ExpectedOpcBytes {\n\t\treturn fmt.Errorf(\"incorrect opc size. Expected %v bytes, but got %v bytes\", ExpectedOpcBytes, len(opc))\n\t}\n\tif len(rand) != RandChallengeBytes {\n\t\treturn fmt.Errorf(\"incorrect rand size. Expected %v bytes, but got %v bytes\", RandChallengeBytes, len(rand))\n\t}\n\treturn nil\n}",
"func (kv KeyValue) Valid() bool {\n\treturn kv.Key.Defined() && kv.Value.Type() != INVALID\n}",
"func (e PaymentInputValidationError) Key() bool { return e.key }",
"func (o *dryrunOptions) validate() error {\n\tif o.userWPAName == \"\" && o.labelSelector == \"\" && !o.allWPA {\n\t\treturn fmt.Errorf(\"the watermarkpodautoscaler name or label-selector is required\")\n\t}\n\n\treturn nil\n}",
"func (o *Options) Validate() error {\n\n\tmsgs := make([]string, 0)\n\tif o.CookieSecret == \"\" {\n\t\tpanic(\"missing setting: cookie-secret\")\n\t}\n\n\tparseProviderInfo(o)\n\n\tvar cipher *cookie.Cipher\n\tif o.CookieRefresh != time.Duration(0) {\n\t\tvalidCookieSecretSize := false\n\t\tfor _, i := range []int{16, 24, 32} {\n\t\t\tif len(secretBytes(o.CookieSecret)) == i {\n\t\t\t\tvalidCookieSecretSize = true\n\t\t\t}\n\t\t}\n\t\tvar decoded bool\n\t\tif string(secretBytes(o.CookieSecret)) != o.CookieSecret {\n\t\t\tdecoded = true\n\t\t}\n\t\tif validCookieSecretSize == false {\n\t\t\tvar suffix string\n\t\t\tif decoded {\n\t\t\t\tsuffix = fmt.Sprintf(\" note: cookie secret was base64 decoded from %q\", o.CookieSecret)\n\t\t\t}\n\t\t\tpanic(fmt.Sprintf(\n\t\t\t\t\"cookie_secret must be 16, 24, or 32 bytes \"+\n\t\t\t\t\t\"to create an AES cipher when \"+\n\t\t\t\t\t\"pass_access_token == true or \"+\n\t\t\t\t\t\"cookie_refresh != 0, but is %d bytes.%s\",\n\t\t\t\tlen(secretBytes(o.CookieSecret)), suffix))\n\t\t} else {\n\t\t\tvar err error\n\t\t\tcipher, err = cookie.NewCipher(secretBytes(o.CookieSecret))\n\t\t\tif err != nil {\n\t\t\t\tpanic(fmt.Sprintf(\"cookie-secret error: %v\", err))\n\t\t\t}\n\t\t}\n\t}\n\n\to.SessionOptions.Cipher = cipher\n\tsessionStore, err := sessions.NewSessionStore(&o.SessionOptions, &o.CookieOptions)\n\tif err != nil {\n\t\tmsgs = append(msgs, fmt.Sprintf(\"error initialising session storage: %v\", err))\n\t} else {\n\t\to.sessionStore = sessionStore\n\t}\n\n\tif o.CookieRefresh >= o.CookieExpire {\n\t\tmsgs = append(msgs, fmt.Sprintf(\n\t\t\t\"cookie_refresh (%s) must be less than \"+\n\t\t\t\t\"cookie_expire (%s)\",\n\t\t\to.CookieRefresh.String(),\n\t\t\to.CookieExpire.String()))\n\t}\n\n\t//msgs = parseSignatureKey(o, msgs)\n\tmsgs = validateCookieName(o, msgs)\n\t//msgs = setupLogger(o, msgs)\n\n\tif len(msgs) != 0 {\n\t\treturn fmt.Errorf(\"Invalid configuration:\\n %s\",\n\t\t\tstrings.Join(msgs, \"\\n \"))\n\t}\n\treturn nil\n}",
"func (v *Variant) Validate() error {\n\tok, msg := util.IsSafeKey(v.Key)\n\tif !ok {\n\t\treturn fmt.Errorf(msg)\n\t}\n\treturn nil\n}",
"func validateOptions(s string, options []string) (bool, error) {\n\tl := strings.ToLower(s)\n\tfor _, option := range options {\n\t\tif l == option {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, fmt.Errorf(\"%s is a not a valid option. Valid options are %v\", s, options)\n}",
"func (er *AuthECRecoveryRequest) Validate() error {\n\t// TODO: brush up the validiation e.g. byte string should begin with 0x or so...\n\tif len(er.SigR) != 32 {\n\t\treturn errors.New(\"sig_r's length should be 32\")\n\t}\n\tif len(er.SigS) != 32 {\n\t\treturn errors.New(\"sig_s's length should be 32\")\n\t}\n\tif len(er.Data) < 0 {\n\t\treturn errors.New(\"raw_tx should be non empty\")\n\t}\n\tif len(er.OriginalSigner) < 0 {\n\t\treturn errors.New(\"original_signer should be non empty string\")\n\t}\n\n\treturn nil\n}",
"func (c *KeyPair) Validate() error {\n\tif c.KeyID == \"\" {\n\t\treturn ErrInvalidKeyID\n\t} else if c.PrivateKey == \"\" {\n\t\treturn ErrInvalidPrivateKey\n\t} else if c.PublicKey == \"\" {\n\t\treturn ErrInvalidPublicKey\n\t} else if c.Issuer == \"\" {\n\t\treturn ErrInvalidIssuer\n\t}\n\treturn nil\n}",
"func (o DownsamplerOptions) validate() error {\n\tif o.Storage == nil {\n\t\treturn errNoStorage\n\t}\n\tif o.ClusterClient == nil {\n\t\treturn errNoClusterClient\n\t}\n\tif o.RulesKVStore == nil {\n\t\treturn errNoRulesStore\n\t}\n\tif o.ClockOptions == nil {\n\t\treturn errNoClockOptions\n\t}\n\tif o.InstrumentOptions == nil {\n\t\treturn errNoInstrumentOptions\n\t}\n\tif o.TagEncoderOptions == nil {\n\t\treturn errNoTagEncoderOptions\n\t}\n\tif o.TagDecoderOptions == nil {\n\t\treturn errNoTagDecoderOptions\n\t}\n\tif o.TagEncoderPoolOptions == nil {\n\t\treturn errNoTagEncoderPoolOptions\n\t}\n\tif o.TagDecoderPoolOptions == nil {\n\t\treturn errNoTagDecoderPoolOptions\n\t}\n\treturn nil\n}",
"func TestValidateSuccessfulRootRotation(t *testing.T) {\n\ttestValidateSuccessfulRootRotation(t, data.ECDSAKey, data.ECDSAx509Key)\n\tif !testing.Short() {\n\t\ttestValidateSuccessfulRootRotation(t, data.RSAKey, data.RSAx509Key)\n\t}\n}",
"func (o *Opts) Validate() error {\n\tvar e []string\n\tif o.Queue == 0 {\n\t\te = append(e, \"queue size must be greater than 0\")\n\t}\n\tif len(e) > 0 {\n\t\treturn fmt.Errorf(\"%s\", strings.Join(e, \"; \"))\n\t}\n\treturn nil\n}",
"func initKey() error {\n\n\t// if key is not a valid length, try to load it from config\n\tif len(key) != 16 && len(key) != 24 && len(key) != 32 {\n\n\t\tkey = []byte(viper.GetString(\"aes_key\"))\n\n\t\t// check if key from config is proper size\n\t\tif len(key) != 16 && len(key) != 24 && len(key) != 32 {\n\t\t\treturn fmt.Errorf(\"aes_key specified in config is not of correct length: %d\", len(key))\n\t\t}\n\n\t}\n\n\treturn nil\n\n}",
"func (jwk *Jwk) validateECParams() error {\n\tif jwk.X == nil {\n\t\treturn errors.New(\"EC Required Param (X) is nil\")\n\t}\n\tif jwk.Y == nil {\n\t\treturn errors.New(\"EC Required Param (Y) is nil\")\n\t}\n\tif jwk.Curve == nil {\n\t\treturn errors.New(\"EC Required Param (Crv) is nil\")\n\t}\n\treturn nil\n}",
"func (e VerifyCVVRequestValidationError) Key() bool { return e.key }",
"func TestValidateRootRotationMissingNewSig(t *testing.T) {\n\ttestValidateRootRotationMissingNewSig(t, data.ECDSAKey, data.ECDSAx509Key)\n\tif !testing.Short() {\n\t\ttestValidateRootRotationMissingNewSig(t, data.RSAKey, data.RSAx509Key)\n\t}\n}",
"func (o *Options) Validate() error {\n\n\tif len(o.OrchestratorTopologyUser) == 0 {\n\t\to.OrchestratorTopologyUser = getFromEnvOrDefault(\"ORC_TOPOLOGY_USER\", \"\")\n\t}\n\tif len(o.OrchestratorTopologyPassword) == 0 {\n\t\to.OrchestratorTopologyPassword = getFromEnvOrDefault(\"ORC_TOPOLOGY_PASSWORD\", \"\")\n\t}\n\treturn nil\n}",
"func (k *Key) Valid(allowSpecial bool, kc KeyContext) bool {\n\tif !kc.Matches(k.kc) {\n\t\treturn false\n\t}\n\tfor _, t := range k.toks {\n\t\tif t.IsIncomplete() {\n\t\t\treturn false\n\t\t}\n\t\tif !allowSpecial && t.Special() {\n\t\t\treturn false\n\t\t}\n\t\tif t.Kind == \"\" {\n\t\t\treturn false\n\t\t}\n\t\tif t.StringID != \"\" && t.IntID != 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (s Keygen) Validate() error {\n\treturn validate(s)\n}",
"func (opts *Options) Validate() (err error) {\n\tif opts.ClientID == \"\" {\n\t\treturn ErrClientID\n\t}\n\tif opts.ClientSecret == \"\" {\n\t\treturn ErrClientSecret\n\t}\n\tif opts.CallbackAddress == \"\" {\n\t\treturn ErrCallbackAddress\n\t}\n\tif opts.OAuthRoot == \"\" {\n\t\topts.OAuthRoot = TranquilityOAuth\n\t}\n\tif opts.OAuthRoot != TranquilityOAuth && opts.OAuthRoot != SingularityOAuth {\n\t\treturn ErrBadOAuthAddress\n\t}\n\treturn\n}",
"func validateGenerateEutranVectorInputs(key []byte, opc []byte, sqn uint64, plmn []byte) error {\n\tif err := validateGenerateSIPAuthVectorInputs(key, opc, sqn); err != nil {\n\t\treturn err\n\t}\n\tif len(plmn) != ExpectedPlmnBytes {\n\t\treturn fmt.Errorf(\"incorrect plmn size. Expected 3 bytes, but got %v bytes\", len(plmn))\n\t}\n\treturn nil\n}",
"func isValidKeyPair(param []string) bool {\n\treturn len(param) == 2\n}",
"func (p *Pair) KeyIsValid() bool {\n\tif strings.Contains(p.Key, \"\\n\") {\n\t\treturn false\n\t}\n\tif strings.Contains(p.Key, \":\") {\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func (e ArfcnValidationError) Key() bool { return e.key }",
"func (o *options) Validate() error {\n\tif len(o.tmKubeconfigPath) == 0 {\n\t\treturn errors.New(\"tm-kubeconfig-path is required\")\n\t}\n\tif len(o.testrunNamePrefix) == 0 {\n\t\treturn errors.New(\"testrun-prefix is required\")\n\t}\n\tif len(o.testrunPath) == 0 {\n\t\treturn errors.New(\"file is required\")\n\t}\n\treturn nil\n}",
"func validateDiskEncryption(encryption *gcp.DiskEncryption, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tif encryption == nil {\n\t\treturn allErrs\n\t}\n\n\tif encryption.KmsKeyName == nil || strings.TrimSpace(*encryption.KmsKeyName) == \"\" {\n\t\t// Currently DiskEncryption only contains CMEK fields. Hence if not nil, then kmsKeyName is a must\n\t\t// Validation logic will need to be modified when CSEK fields are possibly added to gcp.DiskEncryption in the future.\n\t\tallErrs = append(allErrs, field.Required(fldPath.Child(\"kmsKeyName\"), \"must be specified when configuring disk encryption\"))\n\t}\n\n\treturn allErrs\n}",
"func (e JwtAuthenticationValidationError) Key() bool { return e.key }",
"func (sa SaltAlgo) Validate() error {\n\tswitch sa {\n\tcase SaltAlgoAES128GCM,\n\t\tSaltAlgoAES196GCM,\n\t\tSaltAlgoAES256GCM:\n\t\treturn nil\n\tdefault:\n\t\treturn ErrInvalidSaltAlgo\n\t}\n}",
"func (e EFSVolumeConfiguration) validate() error {\n\tif e.IsEmpty() {\n\t\treturn nil\n\t}\n\tif !e.EmptyBYOConfig() && !e.EmptyUIDConfig() {\n\t\treturn &errFieldMutualExclusive{\n\t\t\tfirstField: \"uid/gid\",\n\t\t\tsecondField: \"id/root_dir/auth\",\n\t\t}\n\t}\n\tif e.UID != nil && e.GID == nil {\n\t\treturn &errFieldMustBeSpecified{\n\t\t\tmissingField: \"gid\",\n\t\t\tconditionalFields: []string{\"uid\"},\n\t\t}\n\t}\n\tif e.UID == nil && e.GID != nil {\n\t\treturn &errFieldMustBeSpecified{\n\t\t\tmissingField: \"uid\",\n\t\t\tconditionalFields: []string{\"gid\"},\n\t\t}\n\t}\n\tif e.UID != nil && *e.UID == 0 {\n\t\treturn fmt.Errorf(`\"uid\" must not be 0`)\n\t}\n\tif err := e.AuthConfig.validate(); err != nil {\n\t\treturn fmt.Errorf(`validate \"auth\": %w`, err)\n\t}\n\tif e.AuthConfig.AccessPointID != nil {\n\t\tif (aws.StringValue(e.RootDirectory) == \"\" || aws.StringValue(e.RootDirectory) == \"/\") &&\n\t\t\t(e.AuthConfig.IAM == nil || aws.BoolValue(e.AuthConfig.IAM)) {\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(`\"root_dir\" must be either empty or \"/\" and \"auth.iam\" must be true when \"access_point_id\" is used`)\n\t}\n\tif e.RootDirectory != nil {\n\t\tif err := validateVolumePath(aws.StringValue(e.RootDirectory)); err != nil {\n\t\t\treturn fmt.Errorf(`validate \"root_dir\": %w`, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func (o Order) Valid() error {\n\t_, ok := orderOptions[o]\n\tif !ok {\n\t\tlog.Println(\"error while validating query param: order\")\n\t\tlog.Printf(\"value: %s\", string(o))\n\t\treturn errors.New(\"invalid query param: order\")\n\t}\n\treturn nil\n}",
"func (m *License) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateExpireDate(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateLicenseSerial(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMaxChunkNum(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMaxClusterNum(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSignDate(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSoftwareEdition(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (t *TokenSigningConfig) Validate() error {\n\tif len(t.TokenSigningKeys) == 0 {\n\t\treturn fmt.Errorf(\"TOKEN_SIGNING_KEY must have at least one element\")\n\t}\n\n\tif len(t.TokenSigningKeyIDs) == 0 {\n\t\treturn fmt.Errorf(\"TOKEN_SIGNING_KEY_ID must have at least one entry\")\n\t}\n\n\tif len(t.TokenSigningKeys) != len(t.TokenSigningKeyIDs) {\n\t\treturn fmt.Errorf(\"TOKEN_SIGNING_KEY and TOKEN_SIGNING_KEY_ID must be lists of the same length\")\n\t}\n\n\treturn nil\n}",
"func (e EarfcnValidationError) Key() bool { return e.key }",
"func (k *Key) Validate(code string) bool {\n\treturn Validate(code, k.base.Secret())\n}",
"func (m *KeyPair) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\t// validation for a type composition with Resource\n\tif err := m.Resource.Validate(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateGroup(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (e ChannelPayRequestValidationError) Key() bool { return e.key }",
"func (e MaxEarfcnValidationError) Key() bool { return e.key }",
"func (o *Options) validate() error {\n\tif len(o.BindAddress) == 0 {\n\t\treturn fmt.Errorf(\"missing bind address\")\n\t}\n\n\tif o.Port == 0 {\n\t\treturn fmt.Errorf(\"missing port\")\n\t}\n\n\tif len(o.ServerCertDir) == 0 {\n\t\treturn fmt.Errorf(\"missing server tls cert path\")\n\t}\n\n\treturn nil\n}",
"func (e CreatePaymentRequestValidationError) Key() bool { return e.key }",
"func (m *GPGKey) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateCreated(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateEmails(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateExpires(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSubsKey(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *ArchiveOptions) Validate(args []string, cxt *context.Context) error {\n\treturn nil\n}",
"func validateSymmetricKey(k []byte) bool {\n\treturn len(k) > 0 && !containsOnlyZeros(k)\n}",
"func ValidContainerConfigKey(k string) bool {\n\tswitch k {\n\tcase \"limits.cpus\":\n\t\treturn true\n\tcase \"limits.memory\":\n\t\treturn true\n\tcase \"security.privileged\":\n\t\treturn true\n\tcase \"raw.apparmor\":\n\t\treturn true\n\tcase \"raw.lxc\":\n\t\treturn true\n\tcase \"volatile.baseImage\":\n\t\treturn true\n\t}\n\n\tif _, err := ExtractInterfaceFromConfigName(k); err == nil {\n\t\treturn true\n\t}\n\n\treturn strings.HasPrefix(k, \"user.\")\n}",
"func (opt *Options) Valid() bool {\n\tif err := compile(opt.Regex, opt.Case); err != nil {\n\t\tfmt.Fprintln(Stderr, color.RedString(\"Find pattern: %s\", err.Error()))\n\t\treturn false\n\t}\n\n\tif err := compile(opt.Ignore, opt.Case); err != nil {\n\t\tfmt.Fprintln(Stderr, color.RedString(\"Ignore pattern: %s\", err.Error()))\n\t\treturn false\n\t}\n\n\tif opt.Regex == \"\" {\n\t\tfmt.Fprintln(Stderr, color.RedString(\"Find pattern cannot be empty.\"))\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func TestEncryptionRotation(t *testing.T, scenario RotationScenario) {\n\t// test data\n\tns := scenario.Namespace\n\tlabelSelector := scenario.LabelSelector\n\n\t// step 1: create the desired resource\n\te := NewE(t)\n\tclientSet := GetClients(e)\n\tscenario.CreateResourceFunc(e, GetClients(e), ns)\n\n\t// step 2: run provided encryption scenario\n\tTestEncryptionType(t, scenario.BasicScenario, scenario.EncryptionProvider)\n\n\t// step 3: take samples\n\trawEncryptedResourceWithKey1 := scenario.GetRawResourceFunc(e, clientSet, ns)\n\n\t// step 4: force key rotation and wait for migration to complete\n\tlastMigratedKeyMeta, err := GetLastKeyMeta(t, clientSet.Kube, ns, labelSelector)\n\trequire.NoError(e, err)\n\trequire.NoError(e, ForceKeyRotation(e, scenario.UnsupportedConfigFunc, fmt.Sprintf(\"test-key-rotation-%s\", rand.String(4))))\n\tWaitForNextMigratedKey(e, clientSet.Kube, lastMigratedKeyMeta, scenario.TargetGRs, ns, labelSelector)\n\tscenario.AssertFunc(e, clientSet, scenario.EncryptionProvider, ns, labelSelector)\n\n\t// step 5: verify if the provided resource was encrypted with a different key (step 2 vs step 4)\n\trawEncryptedResourceWithKey2 := scenario.GetRawResourceFunc(e, clientSet, ns)\n\tif rawEncryptedResourceWithKey1 == rawEncryptedResourceWithKey2 {\n\t\tt.Errorf(\"expected the resource to has a different content after a key rotation,\\ncontentBeforeRotation %s\\ncontentAfterRotation %s\", rawEncryptedResourceWithKey1, rawEncryptedResourceWithKey2)\n\t}\n\n\t// TODO: assert conditions - operator and encryption migration controller must report status as active not progressing, and not failing for all scenarios\n}",
"func (c Counter) Valid(options ...string) bool {\n\tv := reflect.ValueOf(c)\n\tfor i := 0; i < v.NumField(); i++ {\n\t\t// can't use \"!= 0\" check here, because such shell syntax may occur in rpm specfile:\n\t\t// case `value` in\n\t\t// *)\n\t\t// `command`\n\t\t//\t\t ;;\n\t\t// esac\n\t\t// there's no opening \"(\" here, so the parenteses counter will be negative, but it's still valid\n\t\tif ok, _ := slice.Contains(options, v.Type().Field(i).Name); !ok && v.Field(i).Int() > 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (e PaymentValidationError) Key() bool { return e.key }",
"func (s *OpencxAuctionServer) validateEncryptedOrder(order *match.EncryptedAuctionOrder) (err error) {\n\n\tvar rswPuzzle *rsw.PuzzleRSW\n\tvar ok bool\n\tif rswPuzzle, ok = order.OrderPuzzle.(*rsw.PuzzleRSW); !ok {\n\t\terr = fmt.Errorf(\"Puzzle could not be converted to RSW puzzle, invalid encrypted order\")\n\t\treturn\n\t}\n\n\tif uint64(rswPuzzle.T.Int64()) != s.t {\n\t\terr = fmt.Errorf(\"The time to solve the puzzle is not correct, invalid encrypted order\")\n\t\treturn\n\t}\n\n\treturn\n}",
"func (e JwtComponentValidationError) Key() bool { return e.key }",
"func (m Key) ValidateEnumValue() (bool, error) {\n\terrMessage := []string{}\n\tif _, ok := GetMappingKeyLifecycleStateEnum(string(m.LifecycleState)); !ok && m.LifecycleState != \"\" {\n\t\terrMessage = append(errMessage, fmt.Sprintf(\"unsupported enum value for LifecycleState: %s. Supported values are: %s.\", m.LifecycleState, strings.Join(GetKeyLifecycleStateEnumStringValues(), \",\")))\n\t}\n\n\tif _, ok := GetMappingKeyProtectionModeEnum(string(m.ProtectionMode)); !ok && m.ProtectionMode != \"\" {\n\t\terrMessage = append(errMessage, fmt.Sprintf(\"unsupported enum value for ProtectionMode: %s. Supported values are: %s.\", m.ProtectionMode, strings.Join(GetKeyProtectionModeEnumStringValues(), \",\")))\n\t}\n\tif len(errMessage) > 0 {\n\t\treturn true, fmt.Errorf(strings.Join(errMessage, \"\\n\"))\n\t}\n\treturn false, nil\n}",
"func (h *ArgonHasher) Validate(password, encoded string) (bool, error) {\n\tparams, salt, hash, err := decodeHash(encoded)\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tpasswordHash := argon2.IDKey([]byte(password), salt, params.Iterations, params.Memory, params.Threads, params.KeyLength)\n\n\tif subtle.ConstantTimeCompare(hash, passwordHash) == 0 {\n\t\treturn false, nil\n\t}\n\n\treturn true, nil\n}",
"func (e JwtProviderValidationError) Key() bool { return e.key }"
] | [
"0.6198566",
"0.5907458",
"0.5780951",
"0.56250936",
"0.55951184",
"0.5560013",
"0.5555904",
"0.54674876",
"0.5467326",
"0.54615873",
"0.5447552",
"0.54134583",
"0.5411202",
"0.5406471",
"0.5354693",
"0.5336632",
"0.53256315",
"0.53087765",
"0.52998185",
"0.52895993",
"0.5269103",
"0.5263958",
"0.52566063",
"0.5236638",
"0.5222546",
"0.52094823",
"0.5208627",
"0.5207169",
"0.5173648",
"0.51639473",
"0.51556593",
"0.514426",
"0.5119311",
"0.5110298",
"0.5108393",
"0.51078945",
"0.5105133",
"0.5090458",
"0.50897086",
"0.508456",
"0.5083389",
"0.5077193",
"0.5070929",
"0.50695187",
"0.50419176",
"0.5040846",
"0.5036373",
"0.5022967",
"0.50222623",
"0.50164735",
"0.5011812",
"0.50089425",
"0.5001373",
"0.4996357",
"0.49933836",
"0.49861255",
"0.4975366",
"0.49718845",
"0.49599618",
"0.49571842",
"0.4947003",
"0.49347216",
"0.4933197",
"0.49320468",
"0.49298814",
"0.49286336",
"0.49269968",
"0.49259785",
"0.49169427",
"0.49144095",
"0.49137667",
"0.48993343",
"0.48857844",
"0.48852882",
"0.48806047",
"0.48767474",
"0.4873052",
"0.4872431",
"0.48711932",
"0.48580652",
"0.4853613",
"0.48508686",
"0.4848321",
"0.48445806",
"0.48412973",
"0.48363584",
"0.48348597",
"0.48310778",
"0.48247033",
"0.4820669",
"0.4815478",
"0.48105925",
"0.4800097",
"0.47980613",
"0.47972926",
"0.47965553",
"0.47960302",
"0.47915828",
"0.47900245",
"0.47872764"
] | 0.6515647 | 0 |
Notify notifies notification endpoint if configured regarding job failure or success. | func (r BatchJobKeyRotateV1) Notify(ctx context.Context, body io.Reader) error {
if r.Flags.Notify.Endpoint == "" {
return nil
}
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
req, err := http.NewRequestWithContext(ctx, http.MethodPost, r.Flags.Notify.Endpoint, body)
if err != nil {
return err
}
if r.Flags.Notify.Token != "" {
req.Header.Set("Authorization", r.Flags.Notify.Token)
}
clnt := http.Client{Transport: getRemoteInstanceTransport}
resp, err := clnt.Do(req)
if err != nil {
return err
}
xhttp.DrainBody(resp.Body)
if resp.StatusCode != http.StatusOK {
return errors.New(resp.Status)
}
return nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func notify(job jobConfig, result runResult) error {\n\thostname, err := os.Hostname()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tclient := &http.Client{\n\t\tTimeout: time.Second * 10,\n\t}\n\n\tnotifyPayload := jobNotify{\n\t\tRunID: job.ID,\n\t\tSuccess: result.Success,\n\t\tOutput: result.Output,\n\t\tLogs: result.Logs,\n\t}\n\n\tpayload, err := json.Marshal(notifyPayload)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Printf(\"Sending payload %s\\n\", payload)\n\n\treq, err := http.NewRequest(\"POST\", fmt.Sprintf(\"%s/%s\", os.Getenv(\"ZETTO_HOST\"), \"notify\"), bytes.NewBuffer(payload))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treq.Header.Add(\"Authorization\", fmt.Sprintf(\"ApiKey %s\", os.Getenv(\"ZETTO_API_KEY\")))\n\treq.Header.Add(\"X-Runner-Name\", hostname)\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif res.StatusCode < 200 || res.StatusCode >= 300 {\n\t\treturn fmt.Errorf(\"Notify error %d\", res.StatusCode)\n\t}\n\n\treturn nil\n}",
"func (p Ping) notify(pingErr error) error {\n\tenvelope_ := adapters.Envelope{\n\t\tTitle: \"actuator-failed\",\n\t\tRecipient: \"*\",\n\t}\n\t// TODO proper protocol ?\n\tpayload := fmt.Sprintf(\"endpoint=%s actuator=ping err=%s\", p.Endpoint, pingErr)\n\tif err := p.Adapter.Send(envelope_, payload); err != nil {\n\t\tp.logger.Error(\"Error sending event: %s\", err)\n\t\treturn err\n\t}\n\tp.logger.Info(\"Event '%s' dispatched\", envelope_.Title)\n\treturn pingErr\n}",
"func (r NopReporter) Notify(ctx context.Context, err error) {}",
"func (r BatchJobReplicateV1) Notify(ctx context.Context, body io.Reader) error {\n\tif r.Flags.Notify.Endpoint == \"\" {\n\t\treturn nil\n\t}\n\n\tctx, cancel := context.WithTimeout(ctx, 10*time.Second)\n\tdefer cancel()\n\n\treq, err := http.NewRequestWithContext(ctx, http.MethodPost, r.Flags.Notify.Endpoint, body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif r.Flags.Notify.Token != \"\" {\n\t\treq.Header.Set(\"Authorization\", r.Flags.Notify.Token)\n\t}\n\n\tclnt := http.Client{Transport: getRemoteInstanceTransport}\n\tresp, err := clnt.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\txhttp.DrainBody(resp.Body)\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn errors.New(resp.Status)\n\t}\n\n\treturn nil\n}",
"func (n Notifier) Notify(status int) error {\n\tif n.webHook == \"\" {\n\t\treturn nil\n\t}\n\tstatusStr := \"\"\n\tif status == PROCESS_STARTED {\n\t\tstatusStr = \"\\\"starting\\\"\"\n\t} else if status == PROCESS_RUNNING {\n\t\tstatusStr = \"\\\"up\\\"\"\n\t} else {\n\t\tstatusStr = \"\\\"crashed\\\"\"\n\t}\n\tbody := `{\n\t\t\t\t\t\t\t\"ps\":\n\t\t\t\t\t\t\t\t{ \"status\":` + statusStr + `}\n\t\t\t\t\t\t}`\n\n\treq, err := http.NewRequest(\"PUT\", n.webHook, bytes.NewBufferString(body))\n\tif err != nil {\n\t\treturn errors.New(\"Error in Notify : Failed to construct the HTTP request\" + err.Error())\n\t}\n\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\tclient := &http.Client{}\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn errors.New(\"Error in Notify : Was not able to trigger the hook!\\n\" + err.Error())\n\t}\n\tdefer resp.Body.Close()\n\n\treturn nil\n}",
"func (ctx *Context) Notification(rw http.ResponseWriter, r *http.Request) {\n\tvar payload Payload\n\tif err := json.Decode(&payload, r.Body); err != nil {\n\t\tlogrus.Error(fmt.Errorf(\"failed decode to type: %w\", err))\n\t\twriteErrorResponse(rw)\n\t\treturn\n\t}\n\tif !isRunStatusApplied(payload.Notifications) {\n\t\twriteDefaultResponse(&payload, rw)\n\t\treturn\n\t}\n\tdownloadURLResponse, ok := callTerraform(rw, ctx, payload)\n\tif !ok {\n\t\treturn\n\t}\n\twriteNotificationResponse(downloadURLResponse, rw)\n}",
"func sendNotifications(j *Job) {\n\tnotify := j.config.Notify\n\tif notify == nil {\n\t\treturn\n\t}\n\tif notify.Mode == notifyError && j.success == true {\n\t\treturn\n\t}\n\n\tvar message string\n\tif j.success {\n\t\tmessage = fmt.Sprintf(\"Job %q has finished. Duration: %v\", j.config.Name, j.duration)\n\t} else {\n\t\tmessage = fmt.Sprintf(\"Job %q has failed with status code: %v. Duration: %v\", j.config.Name, j.exitStatus, j.duration)\n\t}\n\n\tlog.Printf(\"[%s] sending notifications\\n\", j.config.Name)\n\tdefer log.Printf(\"[%s] done sending notifications\\n\", j.config.Name)\n\n\twg := &sync.WaitGroup{}\n\n\tif webhook := notify.Webhook; webhook != nil {\n\t\twg.Add(1)\n\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\n\t\t\tform := url.Values{}\n\t\t\tform.Add(\"job_name\", j.config.Name)\n\t\t\tform.Add(\"duration\", fmt.Sprintf(\"%v\", j.duration))\n\t\t\tform.Add(\"started_at\", fmt.Sprintf(\"%v\", j.startedAt))\n\t\t\tform.Add(\"success\", fmt.Sprintf(\"%v\", j.success))\n\t\t\tform.Add(\"exit_status\", fmt.Sprintf(\"%v\", j.exitStatus))\n\t\t\tform.Add(\"message\", message)\n\n\t\t\tresp, err := http.PostForm(webhook.URL, form)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"[%s] failed to send webhook: %v\\n\", j.config.Name, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tresp.Body.Close()\n\n\t\t\tlog.Printf(\"[%s] sent notification to webhook %v\\n\", j.config.Name, webhook.URL)\n\t\t}()\n\t}\n\n\tif slack := notify.Slack; slack != nil {\n\t\twg.Add(1)\n\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\n\t\t\tpayload := map[string]string{\n\t\t\t\t\"text\": message,\n\t\t\t\t\"username\": slack.User,\n\t\t\t\t\"channel\": slack.Channel,\n\t\t\t}\n\t\t\tbody, err := json.Marshal(payload)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"[%s] json error: %v\\n\", j.config.Name, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tresp, err := http.Post(slack.URL, \"application/json\", bytes.NewReader(body))\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"[%s] failed to send slack: %v\\n\", j.config.Name, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tresp.Body.Close()\n\n\t\t\tlog.Printf(\"[%s] sent notification to slack %v\\n\", j.config.Name, slack.Channel)\n\t\t}()\n\t}\n\n\twg.Wait()\n}",
"func notify(ctx context.Context, report string) error {\n\t_, err := sendRequest(ctx, \"POST\", notifyAddr, report)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func Notify(err interface{}, req *http.Request) error {\n\tif Airbrake != nil {\n\t\treturn Airbrake.Notify(err, req)\n\t}\n\tlog.Printf(\"[AIRBRAKE] %v\", err)\n\treturn nil\n}",
"func (n *SMTPNotifier) Notify(work *model.WorkRequest) error {\n\t// Get email body\n\tpayload := work.GetLogContent(n.PrefixFilter)\n\tif strings.TrimSpace(payload) == \"\" {\n\t\t// Nothing to notify, abort\n\t\treturn nil\n\t}\n\n\t// Buidl subject\n\tvar subject string\n\tif work.Status == model.Success {\n\t\tsubject = fmt.Sprintf(\"Webhook %s#%d SUCCESS.\", work.Name, work.ID)\n\t} else {\n\t\tsubject = fmt.Sprintf(\"Webhook %s#%d FAILED.\", work.Name, work.ID)\n\t}\n\n\t// Connect to the remote SMTP server.\n\tc, err := smtp.Dial(n.Host)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Set the sender and recipient first\n\tif err := c.Mail(n.From); err != nil {\n\t\treturn err\n\t}\n\tif err := c.Rcpt(n.To); err != nil {\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\n\t// Send the email body.\n\twc, err := c.Data()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = fmt.Fprintf(wc, \"Subject: %s\\r\\n\\r\\n%s\\r\\n\\r\\n\", subject, payload)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = wc.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogger.Info.Printf(\"job %s#%d notification sent to %s\\n\", work.Name, work.ID, n.To)\n\n\t// Send the QUIT command and close the connection.\n\treturn c.Quit()\n}",
"func (n *IFTTTNotifier) Notify(msg string) error {\n\n\treq := &utility.HTTPRequest{\n\t\tURL: fmt.Sprintf(\"https://maker.ifttt.com/trigger/%s/with/key/%s\", EventName, n.Key),\n\t}\n\n\tresp, _, err := n.httpClient.DoRequest(utility.HTTPMethodGET, req, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif resp.StatusCode < 200 || resp.StatusCode >= 300 {\n\t\treturn errors.New(fmt.Sprintf(\"Unexpected status code %d in IFTTTNotifier\", resp.StatusCode))\n\t}\n\n\treturn nil\n}",
"func (d *JobManager) watchJobExecution(request *restful.Request, response *restful.Response) {\n\n}",
"func (u *Slack) OnFailure(s *types.Service) error {\n\tif u.Enabled {\n\t\tmessage := slackMessage{\n\t\t\tService: s,\n\t\t\tTime: time.Now().Unix(),\n\t\t}\n\t\tSendSlack(FAILING_TEMPLATE, message)\n\t}\n\treturn nil\n}",
"func (wen *workErrNotifier) Fail(err error) {\n\twen.mutex.Lock()\n\tdefer wen.mutex.Unlock()\n\tif wen.err != nil {\n\t\treturn\n\t}\n\twen.err = err\n\tclose(wen.exitC)\n}",
"func (oo *OmciCC) NotifyAboutOnuConfigFailure(ctx context.Context, errID string, meClassID me.ClassID, meEntityID uint16,\n\tmeName string, meResult me.Results) {\n\tvar description string\n\tif !oo.confFailMeAlreadyHandled(meClassID) {\n\t\tswitch errID {\n\t\tcase OnuConfigFailureResponseErr:\n\t\t\tdescription = OnuConfigFailureResponseErrDesc + meResult.String() +\n\t\t\t\t\", OMCI ME: \" + meName + \" / instance: \" + fmt.Sprintf(\"%d\", meEntityID) + \" (only first instance reported)\"\n\t\tcase OnuConfigFailureTimeout:\n\t\t\tdescription = OnuConfigFailureTimeoutDesc + meName + \" / instance: \" + fmt.Sprintf(\"%d\", meEntityID) +\n\t\t\t\t\" (only first instance reported)\"\n\t\tdefault:\n\t\t\tlogger.Warnw(ctx, \"method called with undefined errID\", log.Fields{\"errID\": errID, \"device-id\": oo.deviceID})\n\t\t\treturn\n\t\t}\n\t\too.pOnuDeviceEntry.SendOnuDeviceEvent(ctx, errID, description)\n\t\too.appendConfFailMe(meClassID)\n\t}\n}",
"func (notifier *JenkinsNotifier) Notify() error {\n\tif notifier.JenkinsProject.Name == \"\" || notifier.JenkinsProject.Token == \"\" {\n\t\treturn errors.New(\"Jenkins Project config is not correct.\")\n\t}\n\n\turl := notifier.notifyUrl()\n\treq, err := http.NewRequest(\"POST\", url, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tusername, apiToken := notifier.UserName, notifier.UserApiToken\n\tif notifier.JenkinsProject.HasJenkinsConfig() {\n\t\tusername, apiToken = notifier.JenkinsProject.Username, notifier.JenkinsProject.UserApiToken\n\t}\n\treq.SetBasicAuth(username, apiToken)\n\tresp, err := http.DefaultClient.Do(req)\n\tif err == nil && resp.StatusCode == http.StatusOK {\n\t\tdefer resp.Body.Close()\n\t\tlogs.Info(\"Notified to project \", notifier.JenkinsProject.Name)\n\t\treturn nil\n\t} else {\n\t\tif err == nil {\n\t\t\treturn errors.New(\"Notify Status is \" + resp.Status)\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\t}\n}",
"func (on *OpsGenieNotifier) Notify(evalContext *alerting.EvalContext) error {\n\tvar err error\n\tswitch evalContext.Rule.State {\n\tcase models.AlertStateOK:\n\t\tif on.AutoClose {\n\t\t\terr = on.closeAlert(evalContext)\n\t\t}\n\tcase models.AlertStateAlerting:\n\t\terr = on.createAlert(evalContext)\n\t}\n\treturn err\n}",
"func (h *State) JobEmail(ctx *router.Context) {\n\tif err := ctx.Context.Err(); err != nil {\n\t\thttp.Error(ctx.Writer, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tnow := clock.Now(ctx.Context)\n\tconfigs, err := h.configStore(ctx.Context).RotaConfig(ctx.Context, \"\")\n\tif err != nil {\n\t\thttp.Error(ctx.Writer, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tfor _, cfg := range configs {\n\t\tif err := h.notifyEmail(ctx, cfg, now); err != nil {\n\t\t\tlogging.Warningf(ctx.Context, \"notifyEmail(ctx, _,%v) for rota: %q failed: %v\", now, cfg.Config.Name, err)\n\t\t}\n\t}\n}",
"func (n *Notifier) Notify(err interface{}) error {\n\t_, sendErr := n.Client.SendNotice(NewNotice(err, nil))\n\treturn ex.New(sendErr)\n}",
"func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error) {\n\tkey, err := notify.ExtractGroupKey(ctx)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tvar (\n\t\talerts = types.Alerts(as...)\n\t\tdata = notify.GetTemplateData(ctx, n.tmpl, as, n.logger)\n\t\teventType = pagerDutyEventTrigger\n\t)\n\tif alerts.Status() == model.AlertResolved {\n\t\teventType = pagerDutyEventResolve\n\t}\n\n\tlevel.Debug(n.logger).Log(\"incident\", key, \"eventType\", eventType)\n\n\tdetails := make(map[string]string, len(n.conf.Details))\n\tfor k, v := range n.conf.Details {\n\t\tdetail, err := n.tmpl.ExecuteTextString(v, data)\n\t\tif err != nil {\n\t\t\treturn false, errors.Wrapf(err, \"%q: failed to template %q\", k, v)\n\t\t}\n\t\tdetails[k] = detail\n\t}\n\n\tif n.apiV1 != \"\" {\n\t\treturn n.notifyV1(ctx, eventType, key, data, details, as...)\n\t}\n\treturn n.notifyV2(ctx, eventType, key, data, details, as...)\n}",
"func (r NopReporter) AutoNotify(ctx context.Context) {}",
"func notifyStatus(curStatus WorkerStatus, tid int) {\n\targs := CallArgs{}\n\targs.CurrentStatus = curStatus\n\targs.TaskID = tid\n\treply := CallReply{}\n\tcall(\"Coordinator.Response\", &args, &reply)\n}",
"func (notifier *Notifier) Notify(notification Notification) {\n\n}",
"func (email *Email) MarkFail(client *redis.Client) {\n\tfmt.Println(\"Email Sent\")\n}",
"func (f *Sink) ForceFailure(endpoint *Endpoint) {\n\tf.moveFailed(endpoint)\n\tendpoint.forceFailure()\n}",
"func (mas MetricAlerts) Notify() error {\n\tclient, err := metrics.NewClient()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to create Prometheus client: %v\", err)\n\t}\n\n\tfor _, ma := range mas {\n\t\tlog.Printf(\"Checking %s\", ma.Name)\n\t\tif err := ma.Check(client); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func(checker *ParkedDomainChecker) SendParkedNotification(business connectors.BusinessMetadata) error {\n notificationString := fmt.Sprintf(\"Found parked domain for business %s at URI %s\",\n business.BusinessName, business.BusinessURI)\n // generate new notification\n notification := notifications.ChangeNotification{\n BusinessId: business.BusinessId,\n BusinessName: business.BusinessName,\n EventTimestamp: time.Now(),\n Notification: notificationString,\n NotificationHash: generateNotificationHash(business.BusinessId),\n Metadata: map[string]interface{}{\n \"source\": \"parked-domain-checker\",\n },\n }\n\n // create new API accessor and send notification to API\n accessor := apis.NewNotificationsApiAccessorFromConfig(checker.NotificationsAPIConfig)\n // send notification for business change\n if _, err := accessor.CreateNotification(notification); err != nil {\n log.Error(fmt.Errorf(\"unable to send new notification: %+v\", err))\n return err\n }\n return nil\n}",
"func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error) {\n\tkey, err := notify.ExtractGroupKey(ctx)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\t//n.logger.Info(key)\n\t//data := notify.GetTemplateData(ctx, n.tmpl, as, n.logger)\n\n\t//tmpl := notify.TmplText(n.tmpl, data, &err)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\t//n.logger.Info(tmpl(n.conf.Message))\n\n\ttitle := fmt.Sprintf(\"容器告警\")\n\t//text := n.genMarkdown(as)\n\tmsg := n.genMarkdown(title,as)\n\tvar buf bytes.Buffer\n\tif err := json.NewEncoder(&buf).Encode(msg); err != nil {\n\t\treturn false, err\n\t}\n\n\tv := n.sign()\n\treq, err := http.NewRequest(http.MethodPost, fmt.Sprintf(\"%s?%s\", yachURL, v.Encode()), &buf)\n\tif err != nil {\n\t\treturn true, err\n\t}\n\tresp, err := n.client.Do(req.WithContext(ctx))\n\tif err != nil {\n\t\treturn true, notify.RedactURL(err)\n\t}\n\tdefer notify.Drain(resp)\n\n\tif resp.StatusCode != 200 {\n\t\treturn true, fmt.Errorf(\"unexpected status code %v\", resp.StatusCode)\n\t}\n\n\trespBody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tn.logger.WithFields(logrus.Fields{\"response\": string(respBody), \"iincident\": key}).WithError(err).Error()\n\t\treturn true, err\n\t}\n\tyachResponse := YachResponse{}\n\terr = json.Unmarshal(respBody, &yachResponse)\n\tif yachResponse.Code != 200 {\n\n\t}\n\tn.logger.WithFields(logrus.Fields{\"response\": string(respBody), \"iincident\": key}).Debug()\n\tdefer notify.Drain(resp)\n\n\treturn true, nil\n}",
"func (g *NotifyService) Notify(body string) (exit int, err error) {\n\tcfg := g.client.Config\n\tparser := g.client.Config.Parser\n\ttemplate := g.client.Config.Template\n\n\tresult := parser.Parse(body)\n\tif result.Error != nil {\n\t\treturn result.ExitCode, result.Error\n\t}\n\tif result.Result == \"\" {\n\t\treturn result.ExitCode, result.Error\n\t}\n\n\ttemplate.SetValue(terraform.CommonTemplate{\n\t\tTitle: cfg.MR.Title,\n\t\tMessage: cfg.MR.Message,\n\t\tResult: result.Result,\n\t\tBody: body,\n\t\tLink: cfg.CI,\n\t})\n\tbody, err = template.Execute()\n\tif err != nil {\n\t\treturn result.ExitCode, err\n\t}\n\n\tvalue := template.GetValue()\n\n\tif cfg.MR.IsNumber() {\n\t\tg.client.Comment.DeleteDuplicates(value.Title)\n\t}\n\n\t_, isApply := parser.(*terraform.ApplyParser)\n\tif !cfg.MR.IsNumber() && isApply {\n\t\tcommits, err := g.client.Commits.List(cfg.MR.Revision)\n\t\tif err != nil {\n\t\t\treturn result.ExitCode, err\n\t\t}\n\t\tlastRevision, _ := g.client.Commits.lastOne(commits, cfg.MR.Revision)\n\t\tcfg.MR.Revision = lastRevision\n\t}\n\n\treturn result.ExitCode, g.client.Comment.Post(body, PostOptions{\n\t\tNumber: cfg.MR.Number,\n\t\tRevision: cfg.MR.Revision,\n\t})\n}",
"func (k *k8sService) notify(e types.K8sPodEvent) error {\n\tvar err error\n\tk.Lock()\n\tdefer k.Unlock()\n\tfor _, o := range k.observers {\n\t\tlog.Infof(\"Calling observer: with k8sPodEvent: %v Name: %s Status: %v\", e.Type, e.Pod.Name, e.Pod.Status)\n\t\ter := o.OnNotifyK8sPodEvent(e)\n\t\tif err == nil && er != nil {\n\t\t\terr = er\n\t\t}\n\t}\n\treturn err\n}",
"func handleResponseNotification(task *task.MessageTask, response *libcoap.Pdu, env *task.Env){\n handleNotification(env, task, response)\n}",
"func Notify(client *gophercloud.ServiceClient, id string) (r NotifyResult) {\n\tresp, err := client.Post(notifyURL(client, id), nil, nil, &gophercloud.RequestOpts{\n\t\tOkCodes: []int{204},\n\t})\n\t_, r.Header, r.Err = gophercloud.ParseResponse(resp, err)\n\treturn\n}",
"func (notifier *ApprovalStatusNotifier) Notify(p *ProductReview, approved bool, msg string) error {\n\ts := \"Hello, this is \" + notifier.Sender + \" from Foo Incorporated.\\n\"\n\tif approved {\n\t\ts += \"Thank you for your review. It has been approved and will be on our site shortly!\\n\"\n\t} else {\n\t\ts += \"Your review has been denied due to not meeting our corporate policies regarding language.\"\n\t\ts += \"Please see our policies listed here: foo.inc/guidelines/community-practices.html\\n\"\n\t}\n\ts += msg\n\tlog.Println(\"Notifying client:\", s)\n\treturn nil\n}",
"func (p *DiscoveryProtocol) onNotify() {\n\tlog.Println(\" pending requests: \", p.pendingReq)\n\tfor req := range p.pendingReq {\n\t\tif !p.requestExpired(req) {\n\t\t\tlog.Println(\"Request not expired, trying to send response\")\n\t\t\tif p.createSendResponse(req) {\n\t\t\t\tdelete(p.pendingReq, req)\n\t\t\t}\n\t\t}\n\t}\n}",
"func (c *jobMessage) watchFatal() {\n\n}",
"func (s *remoteSealer) notifyWork() {\n\twork := s.currentWork\n\tblob, _ := json.Marshal(work)\n\ts.reqWG.Add(len(s.notifyURLs))\n\tfor _, url := range s.notifyURLs {\n\t\tgo s.sendNotification(s.notifyCtx, url, blob, work)\n\t}\n}",
"func (svc *Service) Fail(ctx context.Context, id, claimID uuid.UUID, reason string) error {\n\tsvc.tasksFailed.Inc()\n\treturn svc.taskGateway.MarkAsFailed(ctx, id, claimID, reason)\n}",
"func (a *admin) notify() {\n\tfmt.Printf(\"Sending admin email to %s<%s>\\n\",\n\t\ta.name,\n\t\ta.email)\n}",
"func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error) {\n\tkey, err := notify.ExtractGroupKey(ctx)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tlevel.Debug(n.logger).Log(\"incident\", key)\n\tdata := notify.GetTemplateData(ctx, n.tmpl, as, n.logger)\n\n\ttmpl := notify.TmplText(n.tmpl, data, &err)\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\t// if err != nil {\n\t// \treturn false, fmt.Errorf(\"templating error: %s\", err)\n\t// }\n\n\trequest := dysmsapi.CreateSendSmsRequest()\n\trequest.Scheme = \"https\"\n\n\trequest.PhoneNumbers = tmpl(n.conf.ToUsers)\n\trequest.SignName = \"优路教育\"\n\trequest.TemplateCode = \"SMS_192370717\"\n\n\talert01 := data.Alerts[0]\n\n\tvar resultParam bytes.Buffer\n\tresultParam.WriteString(\"微服务 \")\n\tresultParam.WriteString(alert01.Labels[\"serverity\"])\n\tresultParam.WriteString(\" 于 \")\n\tresultParam.WriteString(alert01.StartsAt.Format(\"2006-01-02 15:04:05\"))\n\tresultParam.WriteString(\" 时发生了 \")\n\tresultParam.WriteString(alert01.Labels[\"alertname\"])\n\tresultParam.WriteString(\" 事件,具体错误: \")\n\tresultParam.WriteString(alert01.Annotations.Values()[0])\n\n\tfmt.Println(resultParam.String())\n\n\tresultParamJson := `{\"data\": \"` + resultParam.String() + `\"}`\n\n\trequest.TemplateParam = resultParamJson\n\n\tresp, err := n.client.SendSms(request)\n\n\tfmt.Println(resp)\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif resp.Code == \"OK\" {\n\t\treturn true, nil\n\t}\n\n\treturn false, errors.New(resp.Message)\n}",
"func (a *AutoRollNotifier) SendNewFailure(ctx context.Context, id, url string) {\n\ta.send(ctx, &tmplVars{\n\t\tIssueID: id,\n\t\tIssueURL: url,\n\t}, subjectTmplNewFailure, bodyTmplNewFailure, notifier.SEVERITY_WARNING, MSG_TYPE_NEW_FAILURE, nil)\n}",
"func notifyHandler(w http.ResponseWriter, r *http.Request) error {\n\tc := appengine.NewContext(r)\n\tt := &taskqueue.Task{\n\t\tPath: \"/processnotification\",\n\t\tMethod: \"POST\",\n\t\tHeader: r.Header,\n\t}\n\tpayload, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to read request body: %s\", err)\n\t}\n\tt.Payload = payload\n\t// Insert a new Task in the default Task Queue.\n\tif _, err = taskqueue.Add(c, t, \"\"); err != nil {\n\t\treturn fmt.Errorf(\"Failed to add new task: %s\", err)\n\t}\n\treturn nil\n}",
"func (a *admin) notify() {\n\tfmt.Printf(\"Sending admin email to %s %s\\n\", a.name, a.email)\n}",
"func (j *LockedJob) MarkSuccess() error { return j.success() }",
"func (q *Q) Fail() {\n\tif q.Status != PROGRESS {\n\t\treturn\n\t}\n\tq.Status = FAILURE\n}",
"func (c *Controller) fail(j weles.JobID, msg string) {\n\t// errors logged in the SetStatusAndInfo.\n\t_ = c.jobs.SetStatusAndInfo(j, weles.JobStatusFAILED, msg) // nolint:gosec\n\tc.dryader.CancelJob(j)\n\tc.boruter.Release(j)\n}",
"func (manager *Manager) sendFailedEvent(msg string, projectUpdateID string) {\n\tevent := &automate_event.EventMsg{\n\t\tEventID: createEventUUID(),\n\t\tType: &automate_event.EventType{Name: automate_event_type.ProjectRulesUpdateFailed},\n\t\tPublished: ptypes.TimestampNow(),\n\t\tProducer: &automate_event.Producer{\n\t\t\tID: event_ids.ComplianceInspecReportProducerID,\n\t\t},\n\t\tData: &_struct.Struct{\n\t\t\tFields: map[string]*_struct.Value{\n\t\t\t\tproject_update_tags.ProjectUpdateIDTag: {\n\t\t\t\t\tKind: &_struct.Value_StringValue{\n\t\t\t\t\t\tStringValue: projectUpdateID,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"message\": {\n\t\t\t\t\tKind: &_struct.Value_StringValue{\n\t\t\t\t\t\tStringValue: msg,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tpubReq := &automate_event.PublishRequest{Msg: event}\n\t_, err := manager.eventServiceClient.Publish(context.Background(), pubReq)\n\tif err != nil {\n\t\tlogrus.Warnf(\"Publishing status event %v\", err)\n\t}\n}",
"func (c Client) Notify(ctx context.Context, ee logger.ErrorEvent) {\n\tc.Client.CaptureEvent(errEvent(ctx, ee), nil, raven.NewScope())\n\tc.Client.Flush(c.Config.FlushTimeoutOrDefault()) // goose this a bit\n}",
"func (a *App) NotifyWithMail(body string, statusCode int) {\n\tfrom := a.HealthcheckNotifier.MailAddressFrom\n\tserver := a.HealthcheckNotifier.SMTPServer\n\tto := a.MailAddressToDown\n\tsubject := \"[DOWN] \" + a.Name\n\tif statusCode == 200 {\n\t\tto = a.MailAddressToUp\n\t\tsubject = \"[UP] \" + a.Name\n\t}\n\tif server == \"\" || from == \"\" || len(to) == 0 {\n\t\treturn\n\t}\n\n\tmsg := \"From: \" + from + \"\\r\\n\" +\n\t\t\"To: \" + toLine(to) + \"\\r\\n\" +\n\t\t\"Subject: \" + subject + \"\\r\\n\\r\\n\" +\n\t\tbody + \"\\r\\n\"\n\n\terr := smtp.SendMail(server, nil, from, to, []byte(msg))\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn\n\t}\n}",
"func (m *MockHealthReporter) OK(msg string) {\n\tm.Lock()\n\tdefer m.Unlock()\n\tif m.notify == nil {\n\t\treturn\n\t}\n\tm.notify <- Update{Event: \"OK\"}\n}",
"func (_m *Notifier) Status(message string) {\n\t_m.Called(message)\n}",
"func (e *executor) OnFailure(err error) {\n\tlog(e.id).Errorf(\"execution failed: %v\", err)\n}",
"func (req *BaseRequest) Notify(err error) {\n\treq.Done <- err\n}",
"func (s *sesService) Notify(params interface{}) error {\n\tsesParams := params.(*SESNotifyInput)\n\n\tinput := &ses.SendEmailInput{\n\t\tDestination: &ses.Destination{\n\t\t\tToAddresses: aws.StringSlice(sesParams.To),\n\t\t},\n\t\tMessage: &ses.Message{\n\t\t\tBody: &ses.Body{\n\t\t\t\tText: &ses.Content{\n\t\t\t\t\tData: &sesParams.Message,\n\t\t\t\t\tCharset: aws.String(\"utf-8\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tSubject: &ses.Content{\n\t\t\t\tData: &sesParams.Subject,\n\t\t\t\tCharset: aws.String(\"utf-8\"),\n\t\t\t},\n\t\t},\n\t\tSource: aws.String(sesParams.From),\n\t}\n\n\tresp, err := s.client.SendEmail(input)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif sesParams.Debug {\n\t\tlog.Printf(\"message sent on aws ses: %v\", resp.MessageId)\n\t}\n\n\treturn nil\n}",
"func (prov *Provisioner) notifyActivation(resourceId string) error {\n\tconn, cerr := persistence.DefaultSession()\n\tif cerr != nil {\n\t\tlog.Errorf(\"[res %s] Error in getting connection :%v\", resourceId, cerr)\n\t\treturn cerr\n\t}\n\t//find the asset request for this notification\n\tar, err := conn.Find(bson.M{\"resourceid\": resourceId})\n\tdefer conn.Close()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\tif ar.Status == persistence.RequestFulfilled {\n\t\tlog.Errorf(\"[res %s] Resource already fullfilled\", resourceId)\n\t\treturn fmt.Errorf(\"Resource is %s already full filled\", resourceId)\n\t}\n\tconn.Update(ar)\n\n\tif err = prov.activateVertexResource(resourceId); err != nil {\n\t\t//TODO This needs to be fixed, what is the correct status\n\t\tar.Status = persistence.RequestRetry\n\t\tconn.Update(ar)\n\t\treturn err\n\t}\n\n\tlog.Debugf(\"[res %s] Successfully activated resource\", resourceId)\n\tar.Status = persistence.RequestFulfilled\n\tif ar.Remediation {\n\t\tar.Remediation = false\n\t}\n\tar.Remediation = false\n\tconn.Update(ar)\n\treturn nil\n}",
"func (d *DeadmansSwitch) Notify(summary, detail string) {\n\tif err := d.notifier(summary, detail); err != nil {\n\t\tfailedNotifications.Inc()\n\t\tfmt.Fprintf(os.Stderr, \"%s\\n\", err.Error())\n\t}\n}",
"func sendNotification(n notifier) {\n\tn.notify()\n}",
"func sendNotification(n notifier) {\n\tn.notify()\n}",
"func sendNotification(n notifier) {\n\tn.notify()\n}",
"func sendNotification(n notifier) {\n\tn.notify()\n}",
"func (t *Timer) healthcheckfail(writer http.ResponseWriter, request *http.Request) {\n\tif t.isFinished {\n\t\twriter.WriteHeader(400)\n\t} else {\n\t\twriter.WriteHeader(200)\n\t}\n}",
"func (cp *singleConnectionPool) OnFailure(c *Connection) error { return nil }",
"func OnFail(onFail FailFunc) {\n\tlock_.Lock()\n\tonFail_ = onFail\n\tlock_.Unlock()\n}",
"func (m TaskManager) HandleNotification(c context.Context, ctl task.Controller, msg *pubsub.PubsubMessage) error {\n\treturn errors.New(\"not implemented\")\n}",
"func (m TaskManager) HandleNotification(c context.Context, ctl task.Controller, msg *pubsub.PubsubMessage) error {\n\treturn errors.New(\"not implemented\")\n}",
"func (p *Note) Notification(mt, msg, buid string, out interface{}) error {\n\tctx, cancel := context.WithTimeout(context.Background(), p.Timeout)\n\tdefer cancel()\n\treturn p.client.Do(p.note(ctx, mt, msg, buid), out)\n}",
"func (c *Controller) succeed(j weles.JobID) {\n\t// errors logged in the SetStatusAndInfo.\n\t_ = c.jobs.SetStatusAndInfo(j, weles.JobStatusCOMPLETED, \"\") // nolint:gosec\n\tc.boruter.Release(j)\n}",
"func (p *promise) notifySuccess(handler SuccessHandler, result interface{}) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tlog.Printf(\"success handler panic'd: %s\", r)\n\t\t}\n\t}()\n\n\thandler(result)\n}",
"func (i *DeleteOrUpdateInvTask) StatusUpdate(_ *taskrunner.TaskContext, _ object.ObjMetadata) {}",
"func (r *reporter) Fail() {\n\tatomic.StoreInt32(&r.failed, 1)\n}",
"func Retry(o Operation, b Backoff) error { return RetryNotify(o, b, nil) }",
"func ReportFailure(errorMessages []string) error {\n\twriteLog(\"DEBUG: Reporting FAILURE\")\n\n\t// make a new report without errors\n\tnewReport := status.NewReport(errorMessages)\n\n\t// send it\n\treturn sendReport(newReport)\n}",
"func (h *State) notifyEmail(ctx *router.Context, cfg *rotang.Configuration, t time.Time) error {\n\tif !cfg.Config.Email.Enabled || !cfg.Config.Enabled || cfg.Config.External {\n\t\tmsg := \"config not Enabled\"\n\t\tif cfg.Config.Enabled {\n\t\t\tmsg = \"e-mail notifications disabled\"\n\t\t}\n\t\tlogging.Infof(ctx.Context, \"notifyEmail: %q not considered due to %s\", cfg.Config.Name, msg)\n\t\treturn nil\n\t}\n\texpTime := t.Add(time.Duration(cfg.Config.Email.DaysBeforeNotify) * fullDay).UTC()\n\tshifts, err := h.shiftStore(ctx.Context).ShiftsFromTo(ctx.Context, cfg.Config.Name, expTime, expTime.Add(fullDay))\n\tif err != nil {\n\t\tif status.Code(err) == codes.NotFound {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tfor _, s := range shifts {\n\t\tlogging.Debugf(ctx.Context, \"notifyEmail: %q considering shift: %v with expTime: %v\", cfg.Config.Name, s, expTime)\n\t\t// startAfterExpiry checks that the shift StartTime is Equal or After the expiry time.\n\t\tstartAfterExpiry := s.StartTime.After(expTime) || s.StartTime.Equal(expTime)\n\t\t// startInsideDay handles sending only one mail per shift.\n\t\tstartInsideDay := s.StartTime.Before(expTime.Add(fullDay))\n\t\t// notifyZero DatesBeforeNotify 0, then just check we're in the same day as ShiftStart.\n\t\tnotifyZero := t.Equal(expTime)\n\t\tif (notifyZero || startAfterExpiry) && startInsideDay {\n\t\t\tlogging.Debugf(ctx.Context, \"notifyEmail: %q matched shift: %v with expTime: %v\", cfg.Config.Name, s, expTime)\n\t\t\tfor _, m := range s.OnCall {\n\t\t\t\tif err := h.sendMail(ctx, cfg, &s, m.Email); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tlogging.Infof(ctx.Context, \"notifyEmail: mail sent out to: %q, rota: %q\", m.Email, cfg.Config.Name)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func (r *PytorchJobReconciler) updateGeneralJobStatus(pytorchJob *training.PyTorchJob,\n\treplicaSpecs map[v1.ReplicaType]*v1.ReplicaSpec, jobStatus *v1.JobStatus, restart bool) error {\n\tlog.Info(\"Updating status\", \"PytorchJob name\", pytorchJob.Name, \"restart\", restart)\n\n\t// Set job status start time since this job has acknowledged by controller.\n\tif jobStatus.StartTime == nil {\n\t\tnow := metav1.Now()\n\t\tjobStatus.StartTime = &now\n\t}\n\n\tpreviousRestarting := commonutil.IsRestarting(*jobStatus)\n\tpreviousFailed := commonutil.IsFailed(*jobStatus)\n\tallWorkersSucceed := false\n\tworkerRep, workerFound := replicaSpecs[training.PyTorchReplicaTypeWorker]\n\tif workerFound {\n\t\tsucceed := int32(0)\n\t\tif jobStatus.ReplicaStatuses[training.PyTorchReplicaTypeWorker] != nil {\n\t\t\tsucceed = jobStatus.ReplicaStatuses[training.PyTorchReplicaTypeWorker].Succeeded\n\t\t}\n\t\tallWorkersSucceed = *workerRep.Replicas == succeed\n\t}\n\n\tfor rtype, spec := range replicaSpecs {\n\t\treplicas := *spec.Replicas\n\t\t// If rtype in replica status not found, there must be a mistyped/invalid rtype in job spec,\n\t\t// and it has not been reconciled in previous processes, discard it.\n\t\tstatus, ok := jobStatus.ReplicaStatuses[rtype]\n\t\tif !ok {\n\t\t\tlog.Info(\"skipping invalid replica type\", \"rtype\", rtype)\n\t\t\tcontinue\n\t\t}\n\t\texpected := replicas - status.Succeeded\n\t\trunning := status.Active\n\t\tfailed := status.Failed\n\n\t\tlog.Info(\"Update pytorch job status\", \"PyTorchJob\", pytorchJob.Name,\n\t\t\t\"ReplicaType\", rtype, \"expected\", expected, \"running\", running, \"failed\", failed)\n\n\t\tif job_controller.ContainsReplicaType(replicaSpecs, training.PyTorchReplicaTypeMaster, v1.JobReplicaTypeAIMaster) {\n\t\t\tif rtype == training.PyTorchReplicaTypeMaster || rtype == v1.JobReplicaTypeAIMaster {\n\t\t\t\tif running > 0 {\n\t\t\t\t\tmsg := fmt.Sprintf(\"PyTorchJob %s is running.\", pytorchJob.Name)\n\t\t\t\t\terr := commonutil.UpdateJobConditions(jobStatus, v1.JobRunning, commonutil.JobRunningReason, msg)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Info(\"Append job condition\", \" error:\", err)\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Conditions for marking job as succeeded:\n\t\t\t\t// 1. master exit successfully with success policy is none.\n\t\t\t\t// 2. if success policy is AllWorkers, then wait util all workers succeed.\n\t\t\t\t// 3. aimaster is enabled and it exits successfully.\n\t\t\t\tsucceed := replicas > 0 && expected == 0\n\t\t\t\tif rtype != v1.JobReplicaTypeAIMaster && workerFound {\n\t\t\t\t\tsucceed = succeed && allWorkersSucceed\n\t\t\t\t}\n\t\t\t\tif succeed {\n\t\t\t\t\tmsg := fmt.Sprintf(\"PyTorchJob %s is successfully completed.\", pytorchJob.Name)\n\t\t\t\t\tr.recorder.Event(pytorchJob, corev1.EventTypeNormal, commonutil.JobSucceededReason, msg)\n\t\t\t\t\tif jobStatus.CompletionTime == nil {\n\t\t\t\t\t\tnow := metav1.Now()\n\t\t\t\t\t\tjobStatus.CompletionTime = &now\n\t\t\t\t\t}\n\t\t\t\t\terr := commonutil.UpdateJobConditions(jobStatus, v1.JobSucceeded, commonutil.JobSucceededReason, msg)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Info(\"Append job condition\", \"error:\", err)\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tr.ctrl.Metrics.SuccessInc()\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Info(\"Invalid config: Job must contain master replica spec\")\n\t\t\treturn errors.New(\"invalid config: Job must contain master replica spec\")\n\t\t}\n\n\t\tif failed > 0 {\n\t\t\tif restart && rtype != v1.JobReplicaTypeAIMaster {\n\t\t\t\tmsg := fmt.Sprintf(\"PyTorchJob %s is restarting because %d %s replica(s) failed.\", pytorchJob.Name, failed, rtype)\n\t\t\t\tr.recorder.Event(pytorchJob, corev1.EventTypeWarning, commonutil.JobRestartingReason, msg)\n\t\t\t\terr := commonutil.UpdateJobConditions(jobStatus, v1.JobRestarting, commonutil.JobRestartingReason, msg)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Info(\"Append job condition\", \"error:\", err)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif !previousRestarting {\n\t\t\t\t\tr.ctrl.Metrics.FailureInc()\n\t\t\t\t\tr.ctrl.Metrics.RestartInc()\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tmsg := fmt.Sprintf(\"PyTorchJob %s is failed because %d %s replica(s) failed.\", pytorchJob.Name, failed, rtype)\n\t\t\t\tr.recorder.Event(pytorchJob, corev1.EventTypeNormal, commonutil.JobFailedReason, msg)\n\t\t\t\tif jobStatus.CompletionTime == nil {\n\t\t\t\t\tnow := metav1.Now()\n\t\t\t\t\tjobStatus.CompletionTime = &now\n\t\t\t\t}\n\t\t\t\terr := commonutil.UpdateJobConditions(jobStatus, v1.JobFailed, commonutil.JobFailedReason, msg)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Info(\"Append job condition\", \"error: \", err)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif !previousFailed {\n\t\t\t\t\tr.ctrl.Metrics.FailureInc()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error) {\n\tkey, err := notify.ExtractGroupKey(ctx)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tlevel.Debug(n.logger).Log(\"incident\", key)\n\tdata := notify.GetTemplateData(ctx, n.tmpl, as, n.logger)\n\n\ttmpl := notify.TmplText(n.tmpl, data, &err)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tcontent := tmpl(n.conf.Message)\n\n\t// If the dingtalk chatbot required keywords security authenticate. add the keywords to the content.\n\tif n.conf.Keywords != nil && len(n.conf.Keywords) > 0 {\n\t\tkeywords := \"\\n\\n[Keywords] \"\n\t\tfor _, k := range n.conf.Keywords {\n\t\t\tkeywords = fmt.Sprintf(\"%s%s, \", keywords, k)\n\t\t}\n\n\t\tkeywords = strings.TrimSuffix(keywords, \", \")\n\t\tcontent = fmt.Sprintf(\"%s%s\", content, keywords)\n\t}\n\n\tmsg := &dingtalkMessage{\n\t\tType: \"text\",\n\t\tText: dingtalkMessageContent{\n\t\t\tContent: content,\n\t\t},\n\t}\n\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"templating error: %s\", err)\n\t}\n\n\tvar buf bytes.Buffer\n\tif err := json.NewEncoder(&buf).Encode(msg); err != nil {\n\t\treturn false, err\n\t}\n\n\twebhook, err := url.Parse(n.conf.Webhook.String())\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tpostMessageURL := config.URL{\n\t\tURL: webhook,\n\t}\n\n\t// If the dingtalk chatbot required signature security authenticate,\n\t// add signature and timestamp to the url.\n\tif len(n.conf.Secret) > 0 {\n\t\ttimestamp, sign, err := calcSign(string(n.conf.Secret))\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tq := postMessageURL.Query()\n\t\tq.Set(\"timestamp\", timestamp)\n\t\tq.Set(\"sign\", sign)\n\t\tpostMessageURL.RawQuery = q.Encode()\n\t}\n\n\tresp, err := notify.PostJSON(ctx, n.client, postMessageURL.String(), &buf)\n\tif err != nil {\n\t\treturn true, notify.RedactURL(err)\n\t}\n\tdefer notify.Drain(resp)\n\n\tif resp.StatusCode != 200 {\n\t\treturn true, fmt.Errorf(\"unexpected status code %v\", resp.StatusCode)\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn true, err\n\t}\n\tlevel.Debug(n.logger).Log(\"response\", string(body), \"incident\", key)\n\n\tvar dingResp response\n\tif err := json.Unmarshal(body, &dingResp); err != nil {\n\t\treturn true, err\n\t}\n\n\tif dingResp.Code == 0 {\n\t\treturn false, nil\n\t}\n\n\t// Exceed the active call frequency limit.\n\tif dingResp.Status != 0 {\n\t\treturn false, errors.New(dingResp.Punish)\n\t}\n\n\treturn false, errors.New(dingResp.Message)\n}",
"func (me TNotifyWorkersFailureCode) IsHardFailure() bool { return me.String() == \"HardFailure\" }",
"func Notify (err error, rawData ...interface{}){\n\tbugsnag.Notify(err, rawData)\n}",
"func (f NotifyHandlerFunc) ServeNotify(req Request, n Notifier) {\n\tf(req, n)\n}",
"func (schedule *Schedule) waitForWorkerToSendInfo(worker *types.Worker) error {\r\n\tfor worker.PID == -1 {\r\n\t\ttime.Sleep(10 * time.Millisecond)\r\n\t}\r\n\tfmt.Println(\"PID has been updated\")\r\n\tif worker.PID == -2 {\r\n\t\treturn errors.New(\"ERROR: Worker could not be started\")\r\n\t}\r\n\tfor worker.Address == \"\" {\r\n\t\ttime.Sleep(10 * time.Millisecond)\r\n\t}\r\n\tfmt.Println(\"NetAddress has been updated\")\r\n\treturn nil\r\n}",
"func RetryNotify(f func() error, b BackOff, notify func(err error, wait time.Duration)) error {\n\tvar err error\n\tvar next time.Duration\n\n\tb.Reset()\n\tfor {\n\t\tif err = f(); err == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tif next = b.NextBackOff(); next == Stop {\n\t\t\treturn err\n\t\t}\n\n\t\tif notify != nil {\n\t\t\tnotify(err, next)\n\t\t}\n\n\t\ttime.Sleep(next)\n\t}\n}",
"func notifyStopping() {\n}",
"func (t *Tracker) deployFailureHandler(kcd *customv1.KCD, deployments *appsv1.DeploymentList) {\n\tfor _, item := range deployments.Items {\n\t\tdeployment := item\n\t\tset := labels.Set(deployment.Spec.Selector.MatchLabels)\n\t\tpods, err := t.podClient.List(metav1.ListOptions{LabelSelector: set.AsSelector().String()})\n\t\tif err != nil {\n\t\t\tlog.Warnf(\"Unable to grab pod logs for deployment: \" + deployment.Name)\n\t\t}\n\t\tfor _, pod := range pods.Items {\n\t\t\tlog.Tracef(\"Got pod: %v in\", pod.Name)\n\t\t\tpodReady := false\n\t\t\tvar podMessage, podReason string\n\t\t\tfor _, condition := range pod.Status.Conditions {\n\t\t\t\tif condition.Type == \"Ready\" && condition.Status == \"True\" {\n\t\t\t\t\tpodReady = true\n\t\t\t\t\tpodMessage = condition.Message\n\t\t\t\t\tpodReason = condition.Reason\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !podReady {\n\t\t\t\tfor _, container := range pod.Spec.Containers {\n\t\t\t\t\tlogs, err := t.getContainerLog(pod.Name, container.Name)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Warn(err)\n\t\t\t\t\t}\n\t\t\t\t\tdeployMessage := DeployMessage{\n\t\t\t\t\t\tType: \"deployFailedLogs\",\n\t\t\t\t\t\tVersion: \"v1alpha2\",\n\t\t\t\t\t\tBody: FailedPodLogData{\n\t\t\t\t\t\t\tt.clusterName,\n\t\t\t\t\t\t\ttime.Now().UTC(),\n\t\t\t\t\t\t\tdeployment,\n\t\t\t\t\t\t\tt.version,\n\t\t\t\t\t\t\tpod.Name,\n\t\t\t\t\t\t\tcontainer.Name,\n\t\t\t\t\t\t\tlogs,\n\t\t\t\t\t\t\tpodReason,\n\t\t\t\t\t\t\tpodMessage,\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\t\tt.enqueue(t.informerQueues[\"kcd\"], deployMessage)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\treturn\n}",
"func (z *Zone) Notify() (*NotifyResult, error) {\n\tnotifyResult := &NotifyResult{}\n\tmyError := new(Error)\n\tnotifySling := z.PowerDNSHandle.makeSling()\n\tresp, err := notifySling.New().Put(strings.TrimRight(z.URL, \".\")+\"/notify\").Receive(notifyResult, myError)\n\n\tif err == nil && resp.StatusCode >= 400 {\n\t\tmyError.Message = strings.Join([]string{resp.Status, myError.Message}, \" \")\n\t\treturn &NotifyResult{}, myError\n\t}\n\n\treturn notifyResult, err\n}",
"func (p *promise) notify() {\n\tif p.IsSuccess() {\n\t\tres := p.Result()\n\n\t\thandlers := p.copySuccessHandlers()\n\t\tfor _, handler := range handlers {\n\t\t\tp.notifySuccess(handler, res)\n\t\t}\n\t} else {\n\t\terr := p.Error()\n\n\t\t// invoke the catch handlers, even if err == ErrPromiseCanceled\n\t\thandlers := p.copyCatchHandlers()\n\t\tfor _, handler := range handlers {\n\t\t\tp.notifyCatch(handler, err)\n\t\t}\n\n\t\t// if canceled, invoke cancel handlers\n\t\tif err == ErrPromiseCanceled {\n\t\t\thandlers := p.copyCanceledHandlers()\n\t\t\tfor _, handler := range handlers {\n\t\t\t\tp.notifyCanceled(handler)\n\t\t\t}\n\t\t}\n\t}\n\n\thandlers := p.copyAlwaysHandlers()\n\tfor _, handler := range handlers {\n\t\tp.notifyAlways(handler)\n\t}\n}",
"func (JobFailureHandlerConfig) validate() error {\n\treturn nil\n}",
"func (sn *SlackMessage) Notify(slackURL string) error {\n\tbp, err := json.Marshal(sn)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = http.Post(slackURL, \"application/json\", bytes.NewBuffer(bp))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (b *MemoryBackend) Notify(_ Feature, payload Payload) error {\n\tnotice, ok := payload.(*Notice)\n\tif !ok {\n\t\treturn fmt.Errorf(\"memory backend does not support payload of type %q\", reflect.TypeOf(payload))\n\t}\n\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\n\tb.Notices = append(b.Notices, notice)\n\n\treturn nil\n}",
"func (n *WebNotifier) Notify(topic string, message []byte) error {\n\tvar allErrs error\n\n\tfor _, notifier := range n.notifiers {\n\t\terr := notifier.Notify(topic, message)\n\t\tallErrs = appendError(allErrs, err)\n\t}\n\n\treturn allErrs\n}",
"func (o *PatchEndpointIDConfigFailed) IsSuccess() bool {\n\treturn false\n}",
"func (c *Client) NotifyWhenUploaded(ctx context.Context, params *NotifyWhenUploadedInput, optFns ...func(*Options)) (*NotifyWhenUploadedOutput, error) {\n\tif params == nil {\n\t\tparams = &NotifyWhenUploadedInput{}\n\t}\n\n\tresult, metadata, err := c.invokeOperation(ctx, \"NotifyWhenUploaded\", params, optFns, addOperationNotifyWhenUploadedMiddlewares)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tout := result.(*NotifyWhenUploadedOutput)\n\tout.ResultMetadata = metadata\n\treturn out, nil\n}",
"func sendReport(s status.Report) error {\n\n\twriteLog(\"DEBUG: Sending report with error length of:\", len(s.Errors))\n\twriteLog(\"DEBUG: Sending report with ok state of:\", s.OK)\n\n\t// marshal the request body\n\tb, err := json.Marshal(s)\n\tif err != nil {\n\t\twriteLog(\"ERROR: Failed to marshal status JSON:\", err)\n\t\treturn fmt.Errorf(\"error mashaling status report json: %w\", err)\n\t}\n\n\t// fetch the server url\n\turl, err := getKuberhealthyURL()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to fetch the kuberhealthy url: %w\", err)\n\t}\n\twriteLog(\"INFO: Using kuberhealthy reporting URL:\", url)\n\n\t// send to the server\n\t// TODO - retry logic? Maybe we want this to be sensitive on a failure...\n\tresp, err := http.Post(url, \"application/json\", bytes.NewBuffer(b))\n\tif err != nil {\n\t\twriteLog(\"ERROR: got an error sending POST to kuberhealthy:\", err.Error())\n\t\treturn fmt.Errorf(\"bad POST request to kuberhealthy status reporting url: %w\", err)\n\t}\n\n\t// make sure we got a 200 and consider it an error otherwise\n\tif resp.StatusCode != http.StatusOK {\n\t\twriteLog(\"ERROR: got a bad status code from kuberhealthy:\", resp.StatusCode, resp.Status)\n\t\treturn fmt.Errorf(\"bad status code from kuberhealthy status reporting url: [%d] %s \", resp.StatusCode, resp.Status)\n\t}\n\twriteLog(\"INFO: Got a good http return status code from kuberhealthy URL:\", url)\n\n\treturn err\n}",
"func (r *ReportTaskRequest) SetSuccess(success bool) {\n r.Success = &success\n}",
"func jobCompleted(job JobType) bool {\n\t// Call numerxData server to check the status of this job\n\t// return true if we get:\n\t// \t\t[“step”=”metaindexstatus”, “status”=”success”]\n\t//\tor [“step”=“eventindexstatus”, “status” = “success”]\n\t/*\n\t\t[\n\t\t\t{\"ID\":\"0.0.LqO~iOvJV3sdUOd8\",\"Step\":\"metaindexstatus\",\"Status\":\"success\",\"Timestamp\":1465589455508,\"Notes\":\"\"},\n\t\t\t{\"ID\":\"0.0.LqO~iOvJV3sdUOd8\",\"Step\":\"parsedmeta\",\"Status\":\"success\",\"Timestamp\":1465588843502,\"Notes\":\"\"},\n\t\t\t{\"ID\":\"0.0.LqO~iOvJV3sdUOd8\",\"Step\":\"rawmeta\",\"Status\":\"success\",\"Timestamp\":1465588543502,\"Notes\":\"\"}\n\t\t]\n\t*/\n\t// uri string, resource string, params map[string]string\n\tvar params map[string]string = make(map[string]string)\n\tparams[\"id\"] = job.JobId\n\trequest, err := fileUploadStatusRequest(baseUrl, \"/status\", params)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn false // let the caller func to handle retries\n\t}\n\n\tif verbose {\n\t\tlog.Println(\"RQ URL: \", request.URL)\n\t\tlog.Println(\"RQ Headers: \", request.Header)\n\t\tlog.Println(\"RQ Body: \", request)\n\t}\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(request)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn false // let the caller func to handle retries\n\t} else {\n\t\t/* JSON\n\t\t[\n\t\t\t{\"ID\":\"0.0.LqO~iOvJV3sdUOd8\",\"Step\":\"metaindexstatus\",\"Status\":\"success\",\"Timestamp\":1465589455508,\"Notes\":\"\"},\n\t\t\t{\"ID\":\"0.0.LqO~iOvJV3sdUOd8\",\"Step\":\"parsedmeta\",\"Status\":\"success\",\"Timestamp\":1465588843502,\"Notes\":\"\"},\n\t\t\t{\"ID\":\"0.0.LqO~iOvJV3sdUOd8\",\"Step\":\"rawmeta\",\"Status\":\"success\",\"Timestamp\":1465588543502,\"Notes\":\"\"}\n\t\t]\n\t\t*/\n\t\tdefer resp.Body.Close()\n\n\t\tvar bodyContent []byte\n\t\tif verbose {\n\t\t\tlog.Println(\"Status RS Status: \", resp.StatusCode)\n\t\t\tlog.Println(\"Status RS Headers: \", resp.Header)\n\t\t}\n\n\t\tbodyContent, err := ioutil.ReadAll(resp.Body)\n\n\t\tif verbose {\n\t\t\tlog.Println(\"Status RS Content: error? :\", err)\n\t\t\tlog.Println(\"Status RS Content: body: bytes: \", bodyContent)\n\t\t\tlog.Println(\"Status RS Content: body: string: \", string(bodyContent))\n\t\t}\n\t\tif resp.StatusCode == 200 {\n\t\t\t// Check the step's status\n\t\t\tstatus, err := getStatusResponse(bodyContent)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error %v while checking status for %v, file: %v \\n\", err, job.JobId, job.Filename)\n\t\t\t\treturn false // let the caller func to handle retries\n\t\t\t} else {\n\t\t\t\tswitch requestType {\n\t\t\t\tcase RQ_Viewership:\n\t\t\t\t\tfor _, entry := range status {\n\t\t\t\t\t\tswitch entry.Step {\n\t\t\t\t\t\tcase string(IndexEventData): // \"eventindexstatus\":\n\t\t\t\t\t\t\tswitch entry.Status {\n\t\t\t\t\t\t\tcase string(Success): // \"success\":\n\t\t\t\t\t\t\t\tif verbose {\n\t\t\t\t\t\t\t\t\tlog.Printf(\"Complete for: %s, file: %s\\n\", job.JobId, job.Filename)\n\t\t\t\t\t\t\t\t\tlog.Println(\"Current state: \", status)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t\tcase string(Failure): // \"failure\":\n\t\t\t\t\t\t\t\tfailedJobsChan <- job\n\t\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\tcase string(ParsedEventData), string(RawEventData):\n\t\t\t\t\t\t\tif entry.Status == string(Failure) {\n\t\t\t\t\t\t\t\tfailedJobsChan <- job\n\t\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif verbose {\n\t\t\t\t\t\tlog.Printf(\"Not yet: %s, file: %s\\n\", job.JobId, job.Filename)\n\t\t\t\t\t\tlog.Println(\"Current state: \", status)\n\t\t\t\t\t}\n\n\t\t\t\t//\t(actually the new struct with file-name, id, and retry-number)\n\t\t\t\tcase RQ_MetaBilling, RQ_MetaProgram, RQ_MetaChanMap, RQ_MetaEventMap:\n\t\t\t\t\tfor _, entry := range status {\n\t\t\t\t\t\tswitch entry.Step {\n\t\t\t\t\t\tcase string(IndexMetaData): // \"metaindexstatus\":\n\t\t\t\t\t\t\tswitch entry.Status {\n\t\t\t\t\t\t\tcase string(Success): // \"success\":\n\t\t\t\t\t\t\t\tif verbose {\n\t\t\t\t\t\t\t\t\tlog.Printf(\"Complete for: %s, file: %s\\n\", job.JobId, job.Filename)\n\t\t\t\t\t\t\t\t\tlog.Println(\"Current state: \", status)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t\tcase string(Failure): // \"failure\":\n\t\t\t\t\t\t\t\tfailedJobsChan <- job\n\t\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\tcase string(ParsedMetaData), string(RawMetaData):\n\t\t\t\t\t\t\tif entry.Status == string(Failure) {\n\t\t\t\t\t\t\t\tfailedJobsChan <- job\n\t\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif verbose {\n\t\t\t\t\t\tlog.Printf(\"Not yet: %s, file: %s\\n\", job.JobId, job.Filename)\n\t\t\t\t\t\tlog.Println(\"Current state: \", status)\n\t\t\t\t\t}\n\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Println(\"Error Status %v while checking status for %v, file: %s \\n\", err, job.JobId, job.Filename)\n\t\t\tfailedJobsChan <- job\n\t\t\tif verbose {\n\t\t\t\tlog.Println(\"Error Status %v while checking status for %v, file: %s \\n\", err, job.JobId, job.Filename)\n\t\t\t}\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func (o *PatchEndpointIDConfigTooManyRequests) IsSuccess() bool {\n\treturn false\n}",
"func processNotify(data *etcd.Response, stop chan bool, ts *TestSuit) bool {\n\tif data == nil {\n\t\tts.failcount++\n\t\treturn true\n\t}\n\tif data.Action == EtcdActionUpdate {\n\t\tnow := time.Now().UnixNano()\n\t\t// first update notification\n\t\tif ts.start == 0 {\n\t\t\tts.start = now\n\t\t\treturn false\n\t\t}\n\t\t// omit the first second\n\t\tif !ts.running && (now-ts.start) > 1e9 {\n\t\t\tts.running = true\n\t\t\tts.start = now\n\t\t}\n\t\tts.count++\n\t\tif lstart, err := strconv.ParseInt(data.Node.Value, 10, 0); err != nil {\n\t\t\tpanic(err)\n\t\t} else {\n\t\t\tlatency := now - lstart\n\t\t\tts.latencySum += latency\n\t\t}\n\t}\n\treturn false\n}",
"func (w *Watches) Notify(ctx context.Context, name string, events, cookie uint32, et EventType, unlinked bool) {\n\tvar hasExpired bool\n\tw.mu.RLock()\n\tfor _, watch := range w.ws {\n\t\tif unlinked && watch.ExcludeUnlinked() && et == PathEvent {\n\t\t\tcontinue\n\t\t}\n\t\tif watch.Notify(name, events, cookie) {\n\t\t\thasExpired = true\n\t\t}\n\t}\n\tw.mu.RUnlock()\n\n\tif hasExpired {\n\t\tw.cleanupExpiredWatches(ctx)\n\t}\n}",
"func (h *Health) notify() {\n\tfor _, subscriber := range h.subscribers {\n\t\tif subscriber == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tselect {\n\t\tcase subscriber <- struct{}{}:\n\t\tdefault:\n\t\t}\n\t}\n}",
"func (u user) notify() {\n\tfmt.Printf(\"Sending User Email to %s<%s>\\n\",\n\t\tu.name,\n\t\tu.email)\n}",
"func (n *Notifier) SendIndividualNotification(channelMemberID int64) {\n\tchm, err := n.db.SelectChannelMember(channelMemberID)\n\tif err != nil {\n\t\tlogrus.Errorf(\"SelectChannelMember failed: %v\", err)\n\t\treturn\n\t}\n\tchannel, err := n.db.SelectChannel(chm.ChannelID)\n\tif err != nil {\n\t\tlogrus.Errorf(\"notifier: SelectChannel failed: %v\\n\", err)\n\t\treturn\n\t}\n\tsubmittedStandup := n.db.SubmittedStandupToday(chm.UserID, chm.ChannelID)\n\tif submittedStandup {\n\t\treturn\n\t}\n\trepeats := 0\n\tnotify := func() error {\n\t\tsubmittedStandup := n.db.SubmittedStandupToday(chm.UserID, chm.ChannelID)\n\t\tif repeats < n.conf.ReminderRepeatsMax && !submittedStandup {\n\t\t\tn.s.SendMessage(channel.ChannelID, fmt.Sprintf(n.conf.Translate.IndividualStandupersLate, chm.UserID), nil)\n\t\t\trepeats++\n\t\t\terr := errors.New(\"Continue backoff\")\n\t\t\treturn err\n\t\t}\n\t\tif !submittedStandup {\n\t\t\terr := n.s.SendUserMessage(chm.UserID, fmt.Sprintf(n.conf.Translate.NotifyDirectMessage, chm.UserID, channel.ChannelID, channel.ChannelName))\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"notifier: s.SendMessage failed: %v\\n\", err)\n\t\t\t}\n\t\t}\n\t\tlogrus.Infof(\"User %v submitted standup!\", chm.UserID)\n\t\treturn nil\n\t}\n\tb := backoff.NewConstantBackOff(time.Duration(n.conf.NotifierInterval) * time.Minute)\n\terr = backoff.Retry(notify, b)\n\tif err != nil {\n\t\tlogrus.Errorf(\"notifier: backoff.Retry failed: %v\\n\", err)\n\t}\n}",
"func (_m *MockJournal) Notify(_a0 Notifiee, _a1 Index) {\n\t_m.Called(_a0, _a1)\n}",
"func (mp *MonitorPool) Notify(n *Notification) {\n\tif timeout, ok := mp.timeouts[n.Code]; ok {\n\t\tsession := mp.session(n.Key)\n\t\tmonitor := session.monitor(n.Code, timeout)\n\t\tmonitor.pulse()\n\t} else {\n\t\tlog.Println(fmt.Sprintf(\"presence: no configuration for notification code: %s\", n.Code))\n\t}\n}"
] | [
"0.67180604",
"0.655542",
"0.6183939",
"0.6133323",
"0.60512966",
"0.58008385",
"0.5772567",
"0.5686183",
"0.5661964",
"0.565898",
"0.5417281",
"0.541208",
"0.5409103",
"0.5391522",
"0.5385122",
"0.537624",
"0.5346147",
"0.5321421",
"0.53012604",
"0.52782583",
"0.5277206",
"0.5262294",
"0.5246023",
"0.52454054",
"0.51944774",
"0.5171704",
"0.5159223",
"0.51322174",
"0.51181823",
"0.51058257",
"0.5101343",
"0.5098025",
"0.5053575",
"0.50251293",
"0.5024767",
"0.50188136",
"0.4991418",
"0.49906796",
"0.4989778",
"0.49448475",
"0.49334955",
"0.49108994",
"0.49106622",
"0.48954955",
"0.4893447",
"0.48703563",
"0.4864561",
"0.48582372",
"0.4857781",
"0.48404944",
"0.48402837",
"0.48227197",
"0.4804151",
"0.47927186",
"0.4792659",
"0.4789066",
"0.4789066",
"0.4789066",
"0.4789066",
"0.47887814",
"0.47858366",
"0.4761655",
"0.47541445",
"0.47541445",
"0.47514787",
"0.47497052",
"0.47457793",
"0.47265992",
"0.47122175",
"0.4711315",
"0.4708735",
"0.47004074",
"0.46990633",
"0.46983156",
"0.46970484",
"0.46966273",
"0.4695265",
"0.46844292",
"0.46575552",
"0.46559653",
"0.46545398",
"0.46526322",
"0.464861",
"0.46448195",
"0.46435857",
"0.4624312",
"0.46203792",
"0.4613326",
"0.46049196",
"0.46045843",
"0.46019128",
"0.45991382",
"0.45959237",
"0.45937228",
"0.45909086",
"0.4589331",
"0.45843378",
"0.45826653",
"0.4577657",
"0.4575748"
] | 0.5461978 | 10 |
KeyRotate rotates encryption key of an object | func (r *BatchJobKeyRotateV1) KeyRotate(ctx context.Context, api ObjectLayer, objInfo ObjectInfo) error {
srcBucket := r.Bucket
srcObject := objInfo.Name
if objInfo.DeleteMarker || !objInfo.VersionPurgeStatus.Empty() {
return nil
}
sseKMS := crypto.S3KMS.IsEncrypted(objInfo.UserDefined)
sseS3 := crypto.S3.IsEncrypted(objInfo.UserDefined)
if !sseKMS && !sseS3 { // neither sse-s3 nor sse-kms disallowed
return errInvalidEncryptionParameters
}
if sseKMS && r.Encryption.Type == sses3 { // previously encrypted with sse-kms, now sse-s3 disallowed
return errInvalidEncryptionParameters
}
versioned := globalBucketVersioningSys.PrefixEnabled(srcBucket, srcObject)
versionSuspended := globalBucketVersioningSys.PrefixSuspended(srcBucket, srcObject)
lock := api.NewNSLock(r.Bucket, objInfo.Name)
lkctx, err := lock.GetLock(ctx, globalOperationTimeout)
if err != nil {
return err
}
ctx = lkctx.Context()
defer lock.Unlock(lkctx)
opts := ObjectOptions{
VersionID: objInfo.VersionID,
Versioned: versioned,
VersionSuspended: versionSuspended,
NoLock: true,
}
obj, err := api.GetObjectInfo(ctx, r.Bucket, objInfo.Name, opts)
if err != nil {
return err
}
oi := obj.Clone()
var (
newKeyID string
newKeyContext kms.Context
)
encMetadata := make(map[string]string)
for k, v := range oi.UserDefined {
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
encMetadata[k] = v
}
}
if (sseKMS || sseS3) && r.Encryption.Type == ssekms {
if err = r.Encryption.Validate(); err != nil {
return err
}
newKeyID = strings.TrimPrefix(r.Encryption.Key, crypto.ARNPrefix)
newKeyContext = r.Encryption.kmsContext
}
if err = rotateKey(ctx, []byte{}, newKeyID, []byte{}, r.Bucket, oi.Name, encMetadata, newKeyContext); err != nil {
return err
}
// Since we are rotating the keys, make sure to update the metadata.
oi.metadataOnly = true
oi.keyRotation = true
for k, v := range encMetadata {
oi.UserDefined[k] = v
}
if _, err := api.CopyObject(ctx, r.Bucket, oi.Name, r.Bucket, oi.Name, oi, ObjectOptions{
VersionID: oi.VersionID,
}, ObjectOptions{
VersionID: oi.VersionID,
NoLock: true,
}); err != nil {
return err
}
return nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func rotateKey(oldKey []byte, newKey []byte, metadata map[string]string) error {\n\tdelete(metadata, SSECustomerKey) // make sure we do not save the key by accident\n\n\tif metadata[ServerSideEncryptionSealAlgorithm] != SSESealAlgorithmDareSha256 { // currently DARE-SHA256 is the only option\n\t\treturn errObjectTampered\n\t}\n\tiv, err := base64.StdEncoding.DecodeString(metadata[ServerSideEncryptionIV])\n\tif err != nil || len(iv) != SSEIVSize {\n\t\treturn errObjectTampered\n\t}\n\tsealedKey, err := base64.StdEncoding.DecodeString(metadata[ServerSideEncryptionSealedKey])\n\tif err != nil || len(sealedKey) != 64 {\n\t\treturn errObjectTampered\n\t}\n\n\tsha := sha256.New() // derive key encryption key\n\tsha.Write(oldKey)\n\tsha.Write(iv)\n\tkeyEncryptionKey := sha.Sum(nil)\n\n\tobjectEncryptionKey := bytes.NewBuffer(nil) // decrypt object encryption key\n\tn, err := sio.Decrypt(objectEncryptionKey, bytes.NewReader(sealedKey), sio.Config{\n\t\tKey: keyEncryptionKey,\n\t})\n\tif n != 32 || err != nil { // Either the provided key does not match or the object was tampered.\n\t\tif subtle.ConstantTimeCompare(oldKey, newKey) == 1 {\n\t\t\treturn errInvalidSSEParameters // AWS returns special error for equal but invalid keys.\n\t\t}\n\t\treturn errSSEKeyMismatch // To provide strict AWS S3 compatibility we return: access denied.\n\t}\n\tif subtle.ConstantTimeCompare(oldKey, newKey) == 1 {\n\t\treturn nil // we don't need to rotate keys if newKey == oldKey\n\t}\n\n\tnonce := make([]byte, 32) // generate random values for key derivation\n\tif _, err = io.ReadFull(rand.Reader, nonce); err != nil {\n\t\treturn err\n\t}\n\n\tniv := sha256.Sum256(nonce[:]) // derive key encryption key\n\tsha = sha256.New()\n\tsha.Write(newKey)\n\tsha.Write(niv[:])\n\tkeyEncryptionKey = sha.Sum(nil)\n\n\tsealedKeyW := bytes.NewBuffer(nil) // sealedKey := 16 byte header + 32 byte payload + 16 byte tag\n\tn, err = sio.Encrypt(sealedKeyW, bytes.NewReader(objectEncryptionKey.Bytes()), sio.Config{\n\t\tKey: keyEncryptionKey,\n\t})\n\tif n != 64 || err != nil {\n\t\treturn errors.New(\"failed to seal object encryption key\") // if this happens there's a bug in the code (may panic ?)\n\t}\n\n\tmetadata[ServerSideEncryptionIV] = base64.StdEncoding.EncodeToString(niv[:])\n\tmetadata[ServerSideEncryptionSealAlgorithm] = SSESealAlgorithmDareSha256\n\tmetadata[ServerSideEncryptionSealedKey] = base64.StdEncoding.EncodeToString(sealedKeyW.Bytes())\n\treturn nil\n}",
"func (o OfflineNotaryRepository) RotateKey(data.RoleName, bool, []string) error {\n\treturn storage.ErrOffline{}\n}",
"func (e EmptyTargetsNotaryRepository) RotateKey(data.RoleName, bool, []string) error {\n\treturn nil\n}",
"func (s HTTPStore) RotateKey(role data.RoleName) ([]byte, error) {\n\turl, err := s.buildKeyURL(role)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq, err := http.NewRequest(\"POST\", url.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := s.roundTrip.RoundTrip(req)\n\tif err != nil {\n\t\treturn nil, NetworkError{Wrapped: err}\n\t}\n\tdefer resp.Body.Close()\n\tif err := translateStatusToError(resp, role.String()+\" key\"); err != nil {\n\t\treturn nil, err\n\t}\n\tb := io.LimitReader(resp.Body, MaxKeySize)\n\tbody, err := ioutil.ReadAll(b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn body, nil\n}",
"func (k keyCredential) Rotate(tx transaction.Transaction) (*msgraph.KeyCredential, *crypto.Jwk, error) {\n\tkeysInUse, err := k.filterRevokedKeys(tx)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tkeyCredential, jwk, err := k.new(tx)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tkeysInUse = append(keysInUse, *keyCredential)\n\n\tapp := util.EmptyApplication().Keys(keysInUse).Build()\n\tif err := k.Application().Patch(tx.Ctx, tx.Instance.GetObjectId(), app); err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"updating application with keycredential: %w\", err)\n\t}\n\n\treturn keyCredential, jwk, nil\n}",
"func (u UninitializedNotaryRepository) RotateKey(data.RoleName, bool, []string) error {\n\treturn client.ErrRepositoryNotExist{}\n}",
"func (e aesGCMEncodedEncryptor) RotateEncryption(ciphertext string) (string, error) {\n\tif !e.ConfiguredToRotate() {\n\t\treturn \"\", &EncryptionError{errors.New(\"key rotation not configured\")}\n\t}\n\n\tplaintext, err := e.Decrypt(ciphertext)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn e.Encrypt(plaintext)\n}",
"func TestEncryptionRotation(t *testing.T, scenario RotationScenario) {\n\t// test data\n\tns := scenario.Namespace\n\tlabelSelector := scenario.LabelSelector\n\n\t// step 1: create the desired resource\n\te := NewE(t)\n\tclientSet := GetClients(e)\n\tscenario.CreateResourceFunc(e, GetClients(e), ns)\n\n\t// step 2: run provided encryption scenario\n\tTestEncryptionType(t, scenario.BasicScenario, scenario.EncryptionProvider)\n\n\t// step 3: take samples\n\trawEncryptedResourceWithKey1 := scenario.GetRawResourceFunc(e, clientSet, ns)\n\n\t// step 4: force key rotation and wait for migration to complete\n\tlastMigratedKeyMeta, err := GetLastKeyMeta(t, clientSet.Kube, ns, labelSelector)\n\trequire.NoError(e, err)\n\trequire.NoError(e, ForceKeyRotation(e, scenario.UnsupportedConfigFunc, fmt.Sprintf(\"test-key-rotation-%s\", rand.String(4))))\n\tWaitForNextMigratedKey(e, clientSet.Kube, lastMigratedKeyMeta, scenario.TargetGRs, ns, labelSelector)\n\tscenario.AssertFunc(e, clientSet, scenario.EncryptionProvider, ns, labelSelector)\n\n\t// step 5: verify if the provided resource was encrypted with a different key (step 2 vs step 4)\n\trawEncryptedResourceWithKey2 := scenario.GetRawResourceFunc(e, clientSet, ns)\n\tif rawEncryptedResourceWithKey1 == rawEncryptedResourceWithKey2 {\n\t\tt.Errorf(\"expected the resource to has a different content after a key rotation,\\ncontentBeforeRotation %s\\ncontentAfterRotation %s\", rawEncryptedResourceWithKey1, rawEncryptedResourceWithKey2)\n\t}\n\n\t// TODO: assert conditions - operator and encryption migration controller must report status as active not progressing, and not failing for all scenarios\n}",
"func (g *Generator) rekey() error {\n\tfor i := keySize / g.cipher.BlockSize(); i > 0; i-- {\n\t\tg.readBlock(g.key[g.cipher.BlockSize()*i:])\n\t}\n\n\treturn g.updateCipher()\n}",
"func RotateAccessKeys() string {\n\tsess := session.Must(session.NewSessionWithOptions(session.Options{\n\t\tSharedConfigState: session.SharedConfigEnable,\n\t\tProfile: \"default\",\n\t}))\n\tclient := iam.New(sess)\n\tdeleteCurrentIamKey(client)\n\tnewKeyOutput, err := client.CreateAccessKey(&iam.CreateAccessKeyInput{})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tcfg := readCredentialsFile()\n\tfmt.Println(\"new IAM key is \", *newKeyOutput.AccessKey.AccessKeyId)\n\tcfg.Section(\"default_original\").Key(\"aws_access_key_id\").SetValue(*newKeyOutput.AccessKey.AccessKeyId)\n\tcfg.Section(\"default_original\").Key(\"aws_secret_access_key\").SetValue(*newKeyOutput.AccessKey.SecretAccessKey)\n\tlocation := writeCredentialsFile(cfg)\n\n\treturn location\n}",
"func k8sRotate(t *testing.T, dir string) {\n\tk8sUpdate(t, dir, rotatedHubbleServerCertificate, rotatedHubbleServerPrivkey, rotatedHubbleServerCA)\n}",
"func RotateEncryptionKeys(dbp zesty.DBProvider) (err error) {\n\tdefer errors.DeferredAnnotatef(&err, \"Failed to rotate encrypted callbacks to new key\")\n\n\tvar last string\n\tfor {\n\t\tvar lastID *string\n\t\tif last != \"\" {\n\t\t\tlastID = &last\n\t\t}\n\t\t// load all callbacks\n\t\tcallbacks, err := listCallbacks(dbp, utask.MaxPageSize, lastID, false)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(callbacks) == 0 {\n\t\t\tbreak\n\t\t}\n\t\tlast = callbacks[len(callbacks)-1].PublicID\n\n\t\tfor _, c := range callbacks {\n\t\t\tsp, err := dbp.TxSavepoint()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// load callback locked\n\t\t\tcb, err := loadFromPublicID(dbp, c.PublicID, true)\n\t\t\tif err != nil {\n\t\t\t\tdbp.RollbackTo(sp)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// update callback (encrypt)\n\t\t\tif err := cb.update(dbp); err != nil {\n\t\t\t\tdbp.RollbackTo(sp)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// commit\n\t\t\tif err := dbp.Commit(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}",
"func RotateEncryption(ciphertext string) (string, error) {\n\treturn defaultEncryptor.RotateEncryption(ciphertext)\n}",
"func (b *backend) pathConfigRotate() *framework.Path {\n return &framework.Path{\n\tPattern: fmt.Sprintf(\"config/rotate/?$\"),\n\tHelpSynopsis: \"Use the existing key to generate a set a new key\",\n\tHelpDescription: \"Use this endpoint to use the current key to generate a new key, and use that\",\n\n\tFields: map[string]*framework.FieldSchema{\n\t \"key_name\": &framework.FieldSchema{\n\t\tType: framework.TypeString,\n\t\tDescription: \"The name for the newly generated key.\",\n\t },\n\t},\n\n\tCallbacks: map[logical.Operation]framework.OperationFunc{\n\t logical.UpdateOperation: b.pathRotateKey,\n\t},\n }\n}",
"func (rtg *RTGProtocol) GenRotationKey(share *RTGShare, crp []*ring.Poly, rotKey *rlwe.SwitchingKey) {\n\tfor i := 0; i < rtg.beta; i++ {\n\t\trtg.ringQP.Copy(share.Value[i], rotKey.Value[i][0])\n\t\trtg.ringQP.Copy(crp[i], rotKey.Value[i][1])\n\t}\n}",
"func testKeyRotation(t *testing.T, dbStore keyRotator, newValidAlias string) data.PrivateKey {\n\ttestKey, err := utils.GenerateECDSAKey(rand.Reader)\n\trequire.NoError(t, err)\n\n\t// Test writing new key in database/cache\n\terr = dbStore.AddKey(trustmanager.KeyInfo{Role: data.CanonicalTimestampRole, Gun: \"gun/ignored\"}, testKey)\n\trequire.NoError(t, err)\n\n\t// Try rotating the key to a valid alias\n\terr = dbStore.RotateKeyPassphrase(testKey.ID(), newValidAlias)\n\trequire.NoError(t, err)\n\n\t// Try rotating the key to an invalid alias\n\terr = dbStore.RotateKeyPassphrase(testKey.ID(), \"invalidAlias\")\n\trequire.Error(t, err, \"there should be no password for invalidAlias so rotation should fail\")\n\n\treturn testKey\n}",
"func (c *Clac) Rot() error {\n\treturn c.rot(true)\n}",
"func TestSwizzlerRotateKeyBaseRole(t *testing.T) {\n\tf, origMeta := createNewSwizzler(t)\n\n\ttheRole := data.CanonicalSnapshotRole\n\tcs := signed.NewEd25519()\n\tpubKey, err := cs.Create(theRole, f.Gun, data.ED25519Key)\n\trequire.NoError(t, err)\n\n\trequire.NoError(t, f.RotateKey(theRole, pubKey))\n\n\tfor role, metaBytes := range origMeta {\n\t\tnewMeta, err := f.MetadataCache.GetSized(role.String(), store.NoSizeLimit)\n\t\trequire.NoError(t, err)\n\n\t\tif role != data.CanonicalRootRole {\n\t\t\trequire.True(t, bytes.Equal(metaBytes, newMeta), \"bytes have changed for role %s\", role)\n\t\t} else {\n\t\t\trequire.False(t, bytes.Equal(metaBytes, newMeta))\n\t\t\torigSigned, newSigned := &data.SignedRoot{}, &data.SignedRoot{}\n\t\t\trequire.NoError(t, json.Unmarshal(metaBytes, origSigned))\n\t\t\trequire.NoError(t, json.Unmarshal(newMeta, newSigned))\n\t\t\trequire.NotEqual(t, []string{pubKey.ID()}, origSigned.Signed.Roles[theRole].KeyIDs)\n\t\t\trequire.Equal(t, []string{pubKey.ID()}, newSigned.Signed.Roles[theRole].KeyIDs)\n\t\t\t_, ok := origSigned.Signed.Keys[pubKey.ID()]\n\t\t\trequire.False(t, ok)\n\t\t\t_, ok = newSigned.Signed.Keys[pubKey.ID()]\n\t\t\trequire.True(t, ok)\n\t\t}\n\t}\n}",
"func rotate(s string, rot int) string {\n rot %= 26\n b := []byte(s)\n for i, c := range b {\n c |= 0x20\n if 'a' <= c && c <= 'z' {\n b[i] = alphabet[(int(('z'-'a'+1)+(c-'a'))+rot)%26]\n }\n }\n return string(b)\n}",
"func (r *Rover) rotate(com string) *Rover {\n\tr.rotators[r.Dir](com)\n\treturn r\n}",
"func (mt Mytoken) Rotate() *Mytoken {\n\trotated := mt\n\trotated.SeqNo++\n\tif rotated.Rotation.Lifetime > 0 {\n\t\trotated.ExpiresAt = unixtime.InSeconds(int64(rotated.Rotation.Lifetime))\n\t}\n\trotated.IssuedAt = unixtime.Now()\n\trotated.NotBefore = rotated.IssuedAt\n\trotated.jwt = \"\"\n\treturn &rotated\n}",
"func TestRotatePlainTextToEncrypted(t *testing.T) {\n\tdir, err := os.MkdirTemp(\"\", \"badger-test\")\n\trequire.NoError(t, err)\n\tdefer os.RemoveAll(dir)\n\n\t// Open DB without encryption.\n\topts := badger.DefaultOptions(dir)\n\tdb, err := badger.Open(opts)\n\trequire.NoError(t, err)\n\n\trequire.NoError(t, db.Update(func(txn *badger.Txn) error {\n\t\treturn txn.Set([]byte(\"foo\"), []byte(\"bar\"))\n\t}))\n\n\trequire.NoError(t, db.Close())\n\n\t// Create an encryption key.\n\tkey := make([]byte, 32)\n\ty.Check2(rand.Read(key))\n\tfp, err := os.CreateTemp(\"\", \"*.key\")\n\trequire.NoError(t, err)\n\t_, err = fp.Write(key)\n\trequire.NoError(t, err)\n\tdefer fp.Close()\n\n\toldKeyPath = \"\"\n\tnewKeyPath = fp.Name()\n\tsstDir = dir\n\n\t// Enable encryption. newKeyPath is encrypted.\n\trequire.Nil(t, doRotate(nil, []string{}))\n\n\t// Try opening DB without the key.\n\topts.BlockCacheSize = 1 << 20\n\t_, err = badger.Open(opts)\n\trequire.EqualError(t, err, badger.ErrEncryptionKeyMismatch.Error())\n\n\t// Check whether db opens with the new key.\n\topts.EncryptionKey = key\n\tdb, err = badger.Open(opts)\n\trequire.NoError(t, err)\n\n\trequire.NoError(t, db.View(func(txn *badger.Txn) error {\n\t\tiopt := badger.DefaultIteratorOptions\n\t\tit := txn.NewIterator(iopt)\n\t\tdefer it.Close()\n\t\tcount := 0\n\t\tfor it.Rewind(); it.Valid(); it.Next() {\n\t\t\tcount++\n\t\t}\n\t\trequire.Equal(t, 1, count)\n\t\treturn nil\n\t}))\n\trequire.NoError(t, db.Close())\n}",
"func rotate(arr []byte, k int) []byte {\n\tn := len(arr)\n\tdst := make([]byte, n)\n\tfor i := 0; i < n; i++ {\n\t\tdst[i] = arr[(i+k)%n]\n\t}\n\treturn dst\n}",
"func (mt Mytoken) Rotate() *Mytoken { // skipcq: CRT-P0003\n\trotated := mt\n\trotated.SeqNo++\n\tif rotated.Rotation.Lifetime > 0 {\n\t\trotated.ExpiresAt = unixtime.InSeconds(int64(rotated.Rotation.Lifetime))\n\t}\n\trotated.IssuedAt = unixtime.Now()\n\trotated.NotBefore = rotated.IssuedAt\n\trotated.jwt = \"\"\n\treturn &rotated\n}",
"func (c Chords) Rotate(k int) Chords {\n\tlength := len(c)\n\tif k < 0 || length == 0 {\n\t\treturn c\n\t}\n\n\tr := k % length\n\treturn append(c[k:], c[:r]...)\n}",
"func (e RotationValidationError) Key() bool { return e.key }",
"func (w *Writer) Rotate() {\n\tw.rot <- true\n}",
"func (ag *Agent) Rotate(v float32) {\n\tag.R += v\n}",
"func (tx *Tx) RotateSessionKey() (*Configuration, error) {\n\tconfig := &Configuration{\n\t\tSessionAuthKey: securecookie.GenerateRandomKey(32),\n\t\tSessionCryptKey: securecookie.GenerateRandomKey(32),\n\t}\n\tconfig, _, err := tx.UpdateConfiguration(config)\n\treturn config, err\n}",
"func TestEncryptionRotation(t *testing.T) {\n\tlibrary.TestEncryptionRotation(t, library.RotationScenario{\n\t\tBasicScenario: library.BasicScenario{\n\t\t\tNamespace: operatorclient.GlobalMachineSpecifiedConfigNamespace,\n\t\t\tLabelSelector: \"encryption.apiserver.operator.openshift.io/component\" + \"=\" + operatorclient.TargetNamespace,\n\t\t\tEncryptionConfigSecretName: fmt.Sprintf(\"encryption-config-%s\", operatorclient.TargetNamespace),\n\t\t\tEncryptionConfigSecretNamespace: operatorclient.GlobalMachineSpecifiedConfigNamespace,\n\t\t\tOperatorNamespace: operatorclient.OperatorNamespace,\n\t\t\tTargetGRs: operatorencryption.DefaultTargetGRs,\n\t\t\tAssertFunc: operatorencryption.AssertSecretsAndConfigMaps,\n\t\t},\n\t\tCreateResourceFunc: operatorencryption.CreateAndStoreSecretOfLife,\n\t\tGetRawResourceFunc: operatorencryption.GetRawSecretOfLife,\n\t\tUnsupportedConfigFunc: func(raw []byte) error {\n\t\t\toperatorClient := operatorencryption.GetOperator(t)\n\t\t\tapiServerOperator, err := operatorClient.Get(context.TODO(), \"cluster\", metav1.GetOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tapiServerOperator.Spec.UnsupportedConfigOverrides.Raw = raw\n\t\t\t_, err = operatorClient.Update(context.TODO(), apiServerOperator, metav1.UpdateOptions{})\n\t\t\treturn err\n\t\t},\n\t\tEncryptionProvider: configv1.EncryptionType(*provider),\n\t})\n}",
"func Rotate(v1, v2 Vect) Vect {\n\treturn Vect{v1.X*v2.X - v1.Y*v2.Y, v1.X*v2.Y + v1.Y*v2.X}\n}",
"func RotationalCipher(s string, key int) string {\n\tc := make([]byte, len(s))\n\tvar a int\n\tfor k, v := range s {\n\t\t// lower case letters\n\t\tif v <= 'z' && v >= 'a' {\n\t\t\ta = 'a'\n\t\t\t// upper case letters\n\t\t} else if v <= 'Z' && v >= 'A' {\n\t\t\ta = 'A'\n\t\t\t//punctuations\n\t\t} else {\n\t\t\tc[k] = s[k]\n\t\t\tcontinue\n\t\t}\n\t\tc[k] = byte(a + ((int(v)-a)+key)%26)\n\n\t}\n\treturn string(c)\n}",
"func (n Notes) Rotate(k int) Notes {\n\tif k < 0 {\n\t\tpanic(\"invalid rotation\")\n\t}\n\n\tr := k % len(n)\n\treturn append(n[k:], n[:r]...)\n}",
"func (journal *txJournal) rotate(all map[common.Address]common.Transactions) error {\n\treturn nil\n}",
"func (self *rsaKeyHolder) initEncryptionKey() {\n\tlog.Print(\"Generating JWE encryption key\")\n\tself.mux.Lock()\n\tdefer self.mux.Unlock()\n\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tself.key = privateKey\n}",
"func (text *TEXT) Rotate(r ...float64) *TEXT {\n\ttext.rotate = r\n\treturn text\n}",
"func (c *Clac) RotR() error {\n\treturn c.rotR(true)\n}",
"func RepeatingKey(plaintext []byte, key []byte) []byte {\n\tciphertext := make([]byte, len(plaintext))\n\tkeyLength := len(key)\n\tfor i, thisByte := range plaintext {\n\t\tciphertext[i] = thisByte ^ key[i%keyLength]\n\t}\n\treturn ciphertext\n}",
"func (item *Item) cipherKey(skey []byte) []byte {\n\tif item.Password == \"\" {\n\t\treturn skey\n\t}\n\tn := len(skey)\n\tk := make([]byte, n)\n\tp := []byte(item.Password)\n\t// key = (byte of password) + (bytes of default key)\n\tfor i := range k {\n\t\tif i < len(p) {\n\t\t\tk[i] = p[i]\n\t\t} else {\n\t\t\tk[i] = skey[i]\n\t\t}\n\t}\n\treturn k\n}",
"func (c2d *C2DMatrix) Rotate(rot float64) {\n\tvar mat Matrix\n\n\tvar Sin float64 = math.Sin(rot)\n\tvar Cos float64 = math.Cos(rot)\n\n\tmat.m11 = Cos\n\tmat.m12 = Sin\n\tmat.m13 = 0\n\n\tmat.m21 = -Sin\n\tmat.m22 = Cos\n\tmat.m23 = 0\n\n\tmat.m31 = 0\n\tmat.m32 = 0\n\tmat.m33 = 1\n\n\t//and multiply\n\tc2d.MatrixMultiply(mat)\n}",
"func (s *CreateJobOutput) SetRotate(v string) *CreateJobOutput {\n\ts.Rotate = &v\n\treturn s\n}",
"func (o BucketObjectCustomerEncryptionOutput) EncryptionKey() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BucketObjectCustomerEncryption) string { return v.EncryptionKey }).(pulumi.StringOutput)\n}",
"func (k *Filesystem) Encrypt(ctx context.Context, keyID string, plaintext []byte, aad []byte) ([]byte, error) {\n\tk.mu.RLock()\n\tdefer k.mu.RUnlock()\n\n\t// Find the most recent DEK - that's what we'll use for encryption\n\tpth := filepath.Join(k.root, keyID)\n\tinfos, err := os.ReadDir(pth)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to list keys: %w\", err)\n\t}\n\tif len(infos) < 1 {\n\t\treturn nil, fmt.Errorf(\"there are no key versions\")\n\t}\n\tvar latest fs.DirEntry\n\tfor _, info := range infos {\n\t\tif info.Name() == \"metadata\" {\n\t\t\tcontinue\n\t\t}\n\t\tif latest == nil {\n\t\t\tlatest = info\n\t\t\tcontinue\n\t\t}\n\t\tif info.Name() > latest.Name() {\n\t\t\tlatest = info\n\t\t}\n\t}\n\tif latest == nil {\n\t\treturn nil, fmt.Errorf(\"key %q does not exist\", keyID)\n\t}\n\n\tlatestPath := filepath.Join(pth, latest.Name())\n\tdek, err := os.ReadFile(latestPath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read encryption key: %w\", err)\n\t}\n\n\tblock, err := aes.NewCipher(dek)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"bad cipher block: %w\", err)\n\t}\n\taesgcm, err := cipher.NewGCM(block)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to wrap cipher block: %w\", err)\n\t}\n\tnonce := make([]byte, aesgcm.NonceSize())\n\tif _, err := io.ReadFull(rand.Reader, nonce); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to generate nonce: %w\", err)\n\t}\n\tciphertext := aesgcm.Seal(nonce, nonce, plaintext, aad)\n\n\t// Append the keyID to the ciphertext so we know which key to use to decrypt.\n\tid := []byte(latest.Name() + \":\")\n\tciphertext = append(id, ciphertext...)\n\n\treturn ciphertext, nil\n}",
"func (e aesGCMEncodedEncryptor) ConfiguredToRotate() bool {\n\treturn len(e.primaryKey) == requiredKeyLength && len(e.secondaryKey) == requiredKeyLength\n}",
"func rotateText(inputText string, rot int) string {\n rot %= 26\n rotatedText := []byte(inputText)\n\n for index, byteValue := range rotatedText {\n if byteValue >= 'a' && byteValue <= 'z' {\n rotatedText[index] = lowerCaseAlphabet[(int((26+(byteValue-'a')))+rot)%26]\n } else if byteValue >= 'A' && byteValue <= 'Z' {\n rotatedText[index] = upperCaseAlphabet[(int((26+(byteValue-'A')))+rot)%26]\n }\n }\n return string(rotatedText)\n}",
"func (lf *File) Rotate() {\n\tlf.ReOpen()\n}",
"func (ce *ColumnEncryptionProperties) Key() string { return ce.key }",
"func (self *rsaKeyHolder) recreate(obj runtime.Object) {\n\tsecret := obj.(*v1.Secret)\n\tlog.Printf(\"Synchronized secret %s has been deleted. Recreating.\", secret.Name)\n\tif err := self.synchronizer.Create(self.getEncryptionKeyHolder()); err != nil {\n\t\tpanic(err)\n\t}\n}",
"func (l *FileWriter) rotate() error {\n\tif err := l.close(); err != nil {\n\t\treturn err\n\t}\n\tif err := l.openNew(); err != nil {\n\t\treturn err\n\t}\n\tl.mill()\n\treturn nil\n}",
"func (backend *JwtBackend) rotateSecret(storage logical.Storage, roleID string, secretID string, TTL int) (*secretStorageEntry, error) {\n\tif roleID == \"\" {\n\t\treturn nil, fmt.Errorf(\"Secrets Role ID is not specified\")\n\t}\n\n\tif secretID == \"\" {\n\t\treturn nil, fmt.Errorf(\"Secrets ID is not specified\")\n\t}\n\n\tsecretKey, _ := uuid.NewUUID()\n\tsalt, _ := backend.Salt()\n\tkey := salt.GetHMAC(secretKey.String())\n\n\tsecretEntry, err := backend.getSecretEntry(storage, roleID, secretID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsecretEntry.Key = key\n\tsecretEntry.CreationTime = time.Now().UTC()\n\tsecretEntry.Expiration = time.Now().Add(time.Duration(TTL) * time.Second).UTC()\n\n\tif err := backend.setSecretEntry(storage, secretEntry); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn secretEntry, nil\n}",
"func rotorsIncr(_key [3]int, _rotors [3]int) [3]int {\n\tvar notch = [5]int{16, 4, 21, 9, 25}\n\tif _key[1] == notch[_rotors[1]] {\n\t\t_key[0] = (_key[0] + 1) % 26\n\t\t_key[1] = (_key[1] + 1) % 26\n\t}\n\tif _key[2] == notch[_rotors[2]] {\n\t\t_key[1] = (_key[1] + 1) % 26\n\t}\n\t_key[2] = (_key[2] + 1) % 26\n\treturn _key\n}",
"func (cd *ColumnDecryptionProperties) Key() string { return cd.key }",
"func (s *JobOutput) SetRotate(v string) *JobOutput {\n\ts.Rotate = &v\n\treturn s\n}",
"func (q Quat) Rotate(other Quat) Quat {\n\treturn Quat{\n\t\t(other.W * q.W) - (other.X * q.X) - (other.Y * q.Y) - (other.Z * q.Z),\n\t\t(other.X * q.W) + (other.W * q.X) - (other.Z * q.Y) + (other.Y * q.Z),\n\t\t(other.Y * q.W) + (other.Z * q.X) + (other.W * q.Y) - (other.X * q.Z),\n\t\t(other.Z * q.W) - (other.Y * q.X) + (other.X * q.Y) + (other.W * q.Z),\n\t}\n}",
"func (ce *ColumnEncryptionProperties) WipeOutEncryptionKey() { ce.key = \"\" }",
"func (obj *key) Key() rsa.PublicKey {\n\treturn obj.ky\n}",
"func ConfiguredToRotate() bool {\n\treturn defaultEncryptor.ConfiguredToRotate()\n}",
"func (obj *key) Encrypt(msg []byte) ([]byte, error) {\n\th := sha256.New()\n\treturn rsa.EncryptOAEP(h, rand.Reader, &obj.ky, msg, []byte(\"\"))\n}",
"func (c *MysqlConn) Rotate(operation Operation) OpOutput {\n\tsp, err := GetMysqlOpQuery(operation)\n\tif err != nil {\n\t\treturn OpOutput{\n\t\t\tResult: nil,\n\t\t\tErr: err,\n\t\t}\n\t}\n\n\trows, err := c.c.Query(sp)\n\tif err != nil {\n\t\treturn OpOutput{Result: nil, Err: err}\n\t}\n\n\tvar key string\n\tvar value string\n\tresult := make(map[string]string)\n\tfor rows.Next() {\n\t\terr = rows.Scan(&key, &value)\n\t\tif err != nil {\n\t\t\treturn OpOutput{nil, err}\n\t\t}\n\t\tresult[key] = value\n\t}\n\n\treturn OpOutput{result, nil}\n}",
"func (p *siprng) rekey() {\n\tvar k [16]byte\n\tif _, err := io.ReadFull(rand.Reader, k[:]); err != nil {\n\t\tpanic(err.Error())\n\t}\n\tp.k0 = binary.LittleEndian.Uint64(k[0:8])\n\tp.k1 = binary.LittleEndian.Uint64(k[8:16])\n\tp.ctr = 1\n}",
"func Vrotate(v1, v2 Vect) Vect {\n\treturn goVect(C.cpvrotate(v1.c(), v2.c()))\n}",
"func (decryptor *PgDecryptor) ReadSymmetricKeyRotated(privateKeys []*keys.PrivateKey, reader io.Reader) ([]byte, []byte, error) {\n\tsymmetricKey, rawData, err := decryptor.binaryDecryptor.ReadSymmetricKeyRotated(privateKeys, reader)\n\tif err != nil {\n\t\treturn symmetricKey, rawData, err\n\t}\n\treturn symmetricKey, rawData, nil\n}",
"func (client JobClient) RenewKey(ctx context.Context, resourceGroupName string, accountName string, jobName string) (result JobSasTokenDescription, err error) {\n if tracing.IsEnabled() {\n ctx = tracing.StartSpan(ctx, fqdn + \"/JobClient.RenewKey\")\n defer func() {\n sc := -1\n if result.Response.Response != nil {\n sc = result.Response.Response.StatusCode\n }\n tracing.EndSpan(ctx, sc, err)\n }()\n }\n req, err := client.RenewKeyPreparer(ctx, resourceGroupName, accountName, jobName)\n if err != nil {\n err = autorest.NewErrorWithError(err, \"microsoftazuremanagementaisupercomputer.JobClient\", \"RenewKey\", nil , \"Failure preparing request\")\n return\n }\n\n resp, err := client.RenewKeySender(req)\n if err != nil {\n result.Response = autorest.Response{Response: resp}\n err = autorest.NewErrorWithError(err, \"microsoftazuremanagementaisupercomputer.JobClient\", \"RenewKey\", resp, \"Failure sending request\")\n return\n }\n\n result, err = client.RenewKeyResponder(resp)\n if err != nil {\n err = autorest.NewErrorWithError(err, \"microsoftazuremanagementaisupercomputer.JobClient\", \"RenewKey\", resp, \"Failure responding to request\")\n }\n\n return\n}",
"func (e *Entity) encryptionKey(now time.Time) (Key, bool) {\n\tcandidateSubkey := -1\n\n\t// Iterate the keys to find the newest key\n\tvar maxTime time.Time\n\tfor i, subkey := range e.Subkeys {\n\t\tif subkey.Sig.FlagsValid &&\n\t\t\tsubkey.Sig.FlagEncryptCommunications &&\n\t\t\tsubkey.PublicKey.PubKeyAlgo.CanEncrypt() &&\n\t\t\t!subkey.Sig.KeyExpired(now) &&\n\t\t\t(maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) {\n\t\t\tcandidateSubkey = i\n\t\t\tmaxTime = subkey.Sig.CreationTime\n\t\t}\n\t}\n\n\tif candidateSubkey != -1 {\n\t\tsubkey := e.Subkeys[candidateSubkey]\n\t\treturn Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true\n\t}\n\n\t// If we don't have any candidate subkeys for encryption and\n\t// the primary key doesn't have any usage metadata then we\n\t// assume that the primary key is ok. Or, if the primary key is\n\t// marked as ok to encrypt to, then we can obviously use it.\n\ti := e.primaryIdentity()\n\tif !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications &&\n\t\te.PrimaryKey.PubKeyAlgo.CanEncrypt() &&\n\t\t!i.SelfSignature.KeyExpired(now) {\n\t\treturn Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true\n\t}\n\n\t// This Entity appears to be signing only.\n\treturn Key{}, false\n}",
"func (transform *Transform) Rotate(angle float32, axis mgl32.Vec3) {\n\ttransform.objMatrix = mgl32.HomogRotate3D(angle, axis)\n}",
"func (canvas *Canvas) Rotate(theta float32) {\n\ts, c := math.Sin(float64(theta)), math.Cos(float64(theta))\n\twriteCommand(canvas.contents, \"cm\", c, s, -s, c, 0, 0)\n}",
"func (b *Bucket) RotateFileEncryptionKeysForPrefix(pre string) error {\n\tif b.Version == 0 {\n\t\treturn nil\n\t}\n\n\tfor p, md := range b.Metadata {\n\t\tif strings.HasPrefix(p, pre) {\n\t\t\tif md.Key != \"\" {\n\t\t\t\tkey, err := dcrypto.NewKey()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tmd.SetFileEncryptionKey(key)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func (key *Key) MarshalJSON(passphrase string) ([]byte, error) {\n\tsalt, err := crypto.RandomEntropy(32)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdk, err := scrypt.Key([]byte(passphrase), salt, scryptN, scryptR, scryptP, scryptKeyLen)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tiv, err := crypto.RandomEntropy(aes.BlockSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tenckey := dk[:16]\n\n\tprivateKeyBytes, err := key.KeyPair.Private.Bytes()\n\tprivateKeyBytes = privateKeyBytes[4:]\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\taesBlock, err := aes.NewCipher(enckey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstream := cipher.NewCTR(aesBlock, iv)\n\tcipherText := make([]byte, len(privateKeyBytes))\n\tstream.XORKeyStream(cipherText, privateKeyBytes)\n\n\tmac := crypto.Keccak256(dk[16:32], cipherText)\n\tcipherParamsJSON := cipherparamsJSON{\n\t\tIV: hex.EncodeToString(iv),\n\t}\n\n\tsp := ScryptParams{\n\t\tN: scryptN,\n\t\tR: scryptR,\n\t\tP: scryptP,\n\t\tDKeyLength: scryptKeyLen,\n\t\tSalt: hex.EncodeToString(salt),\n\t}\n\n\tkeyjson := cryptoJSON{\n\t\tCipher: ksCipher,\n\t\tCipherText: hex.EncodeToString(cipherText),\n\t\tCipherParams: cipherParamsJSON,\n\t\tKDF: nameKDF,\n\t\tKDFParams: sp,\n\t\tMAC: hex.EncodeToString(mac),\n\t}\n\n\tencjson := encryptedKeyJSON{\n\t\tAddress: key.KeyPair.Address,\n\t\tCrypto: keyjson,\n\t\tID: key.ID.String(),\n\t\tVersion: ksVersion,\n\t}\n\tdata, err := json.MarshalIndent(&encjson, \"\", \" \")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn data, nil\n}",
"func Rekey(dbname string, oldPassphrase, newPassphrase []byte, newIter int) error {\n\treturn encdb.Rekey(dbname, oldPassphrase, newPassphrase, newIter)\n}",
"func (w *RotateWriter) rotate() (err error) {\n\t// Close existing file if open.\n\tif w.fp != nil {\n\t\terr = w.fp.Close()\n\t\tw.fp = nil\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Rename dest file if it already exists.\n\t_, err = os.Stat(w.filename)\n\tif err == nil {\n\t\trot := w.filename + \".\" + time.Now().Format(TimeFmt)\n\t\terr = os.Rename(w.filename, rot)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif w.Compress {\n\t\t\terr = w.compress(rot) // TODO: async\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// Clean up old.\n\tw.drain()\n\n\t// Create new.\n\treturn w.open()\n}",
"func (s *Surface) Rotate(radians float64) {\n\ts.Ctx.Call(\"rotate\", 2*math.Pi-radians)\n}",
"func (t *transform) Rotate(rotate mgl32.Vec3) {\n\tt.dataLock.Lock()\n\tdefer t.dataLock.Unlock()\n\n\tt.rotation = t.rotation.Add(rotate)\n\ttotal := t.rotation\n\trotX := mgl32.HomogRotate3DX(total.X())\n\trotY := mgl32.HomogRotate3DY(total.Y())\n\trotZ := mgl32.HomogRotate3DZ(total.Z())\n\trotMatrix := rotZ.Mul4(rotY).Mul4(rotX)\n\ttrans := t.translation\n\tt.modelView = mgl32.Ident4().Mul4(mgl32.Translate3D(trans.X(), trans.Y(), trans.Z())).Mul4(rotMatrix)\n}",
"func (c Cardinal) RotateCW() Cardinal {\r\n\treturn (c + 1) % 4\r\n}",
"func Cipher(msg string, key string) string {\n\tvar ciphered string \n\tvar keylen int = len(key)\n\tif (keylen == 0) { return msg } // No key provided\n\tfor i := 0; i < len(msg); i++ {\n\t\tvar keyIndex int = i % keylen // Calculate the key index e.g. (i=10, keylen=4, keyIndex 2), (i=11, keylen=4, keyIndex=3)\n\t\tciphered += string(msg[i] ^ key[keyIndex])\n\t}\n\treturn ciphered\n}",
"func EncryptionKey() []byte {\n\treturn store.EncryptionKey\n}",
"func (this *Transformable) Rotate(angle float32) {\n\tC.sfTransformable_rotate(this.cptr, C.float(angle))\n}",
"func (w *FileLogWriter) Rotate() {\n\tw.rot <- true\n}",
"func AsJWK(key interface{}) (*jose.JsonWebKey, error) {\n\tJWK := jose.JsonWebKey{\n\t\tKey: key,\n\t\tAlgorithm: string(jose.RSA1_5),\n\t}\n\tthumbprint, err := JWK.Thumbprint(crypto.SHA256)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tJWK.KeyID = base64.URLEncoding.EncodeToString(thumbprint)\n\treturn &JWK, nil\n}",
"func (a *AuthTime) keyString() string {\n\treturn fmt.Sprintf(\"%d;%d\", a.KeyStartTime.Unix(), a.KeyEndTime.Unix())\n}",
"func (self *TileSprite) SetPreviousRotationA(member int) {\n self.Object.Set(\"previousRotation\", member)\n}",
"func (self *TileSprite) PreviousRotation() int{\n return self.Object.Get(\"previousRotation\").Int()\n}",
"func GenReencryptHash(runtime *config.ControlRuntime, keyName string) (string, error) {\n\n\tkeys, err := GetEncryptionKeys(runtime)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tnewKey := apiserverconfigv1.Key{\n\t\tName: keyName,\n\t\tSecret: \"12345\",\n\t}\n\tkeys = append(keys, newKey)\n\tb, err := json.Marshal(keys)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\thash := sha256.Sum256(b)\n\treturn hex.EncodeToString(hash[:]), nil\n}",
"func (c *canvasRenderer) Rotate(angle sprec.Angle) {\n\tc.currentLayer.Transform = sprec.Mat4Prod(\n\t\tc.currentLayer.Transform,\n\t\tsprec.RotationMat4(angle, 0.0, 0.0, 1.0),\n\t)\n}",
"func (key twofishKey) Key() []byte {\n\treturn key[:]\n}",
"func (m *Manager) RotateSSH(name string, gOpt operator.Options, skipConfirm bool) error {\n\tmetadata, err := m.meta(name)\n\tif err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) &&\n\t\t!errors.Is(perrs.Cause(err), spec.ErrNoTiSparkMaster) {\n\t\treturn err\n\t}\n\n\ttopo := metadata.GetTopology()\n\tbase := metadata.GetBaseMeta()\n\tif !skipConfirm {\n\t\tif err := tui.PromptForConfirmOrAbortError(\n\t\t\t\"This operation will rotate ssh keys for user '%s' .\\nDo you want to continue? [y/N]:\",\n\t\t\tbase.User); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar rotateSSHTasks []*task.StepDisplay // tasks which are used to initialize environment\n\tuniqueHosts, _ := getMonitorHosts(topo)\n\tfor host, hostInfo := range uniqueHosts {\n\t\tt, err := m.sshTaskBuilder(name, topo, base.User, gOpt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tt = t.RotateSSH(host, base.User, m.specManager.Path(name, \"ssh\", \"new.pub\"))\n\n\t\trotateSSHTasks = append(rotateSSHTasks, t.BuildAsStep(fmt.Sprintf(\" - Rotate ssh key on %s:%d\", host, hostInfo.ssh)))\n\t}\n\n\tbuilder := task.NewBuilder(m.logger).\n\t\tStep(\"+ Generate new SSH keys\",\n\t\t\ttask.NewBuilder(m.logger).\n\t\t\t\tSSHKeyGen(m.specManager.Path(name, \"ssh\", \"new\")).\n\t\t\t\tBuild(),\n\t\t\tm.logger).\n\t\tParallelStep(\"+ rotate ssh keys of target host environments\", false, rotateSSHTasks...).\n\t\tStep(\"+ overwrite old SSH keys\",\n\t\t\ttask.NewBuilder(m.logger).\n\t\t\t\tFunc(\"rename\", func(ctx context.Context) error {\n\t\t\t\t\terr := os.Rename(m.specManager.Path(name, \"ssh\", \"new.pub\"), m.specManager.Path(name, \"ssh\", \"id_rsa.pub\"))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\terr = os.Rename(m.specManager.Path(name, \"ssh\", \"new\"), m.specManager.Path(name, \"ssh\", \"id_rsa\"))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t}).\n\t\t\t\tBuild(),\n\t\t\tm.logger)\n\n\tctx := ctxt.New(\n\t\tcontext.Background(),\n\t\tgOpt.Concurrency,\n\t\tm.logger,\n\t)\n\tif err := builder.Build().Execute(ctx); err != nil {\n\t\tif errorx.Cast(err) != nil {\n\t\t\t// FIXME: Map possible task errors and give suggestions.\n\t\t\treturn err\n\t\t}\n\t\treturn err\n\t}\n\tm.logger.Infof(\"ssh keys are successfully updated\")\n\treturn nil\n}",
"func (eval *evaluator) RotateHoisted(ctIn *Ciphertext, rotations []int, ctOut map[int]*Ciphertext) {\n\tlevelQ := ctIn.Level()\n\teval.DecomposeNTT(levelQ, eval.params.PCount()-1, eval.params.PCount(), ctIn.Value[1], eval.PoolDecompQP)\n\tfor _, i := range rotations {\n\t\tif i == 0 {\n\t\t\tctOut[i].Copy(ctIn)\n\t\t} else {\n\t\t\teval.PermuteNTTHoisted(levelQ, ctIn.Value[0], ctIn.Value[1], eval.PoolDecompQP, i, ctOut[i].Value[0], ctOut[i].Value[1])\n\t\t}\n\t}\n}",
"func (self *Graphics) SetPreviousRotationA(member int) {\n self.Object.Set(\"previousRotation\", member)\n}",
"func (c *Cipher) ReKey(key, nonce []byte) error {\n\tc.Reset()\n\treturn c.doReKey(key, nonce)\n}",
"func (c Cardinal) RotateCCW() Cardinal {\r\n\treturn (c + 3) % 4\r\n}",
"func rc4K(key []byte, ciphertext []byte) ([]byte, error) {\n\tcipher, err := rc4P.NewCipher(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresult := make([]byte, len(ciphertext))\n\tcipher.XORKeyStream(result, ciphertext)\n\treturn result, nil\n}",
"func encryptionKey(e *openpgp.Entity, now time.Time) (openpgp.Key, bool) {\n\tcandidateSubkey := -1\n\n\t// Iterate the keys to find the newest key\n\tvar maxTime time.Time\n\tfor i, subkey := range e.Subkeys {\n\t\tif subkey.Sig.FlagsValid &&\n\t\t\tsubkey.Sig.FlagEncryptCommunications &&\n\t\t\tsubkey.PublicKey.PubKeyAlgo.CanEncrypt() &&\n\t\t\t!subkey.PublicKey.KeyExpired(subkey.Sig, now) &&\n\t\t\t(maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) {\n\t\t\tcandidateSubkey = i\n\t\t\tmaxTime = subkey.Sig.CreationTime\n\t\t}\n\t}\n\n\tif candidateSubkey != -1 {\n\t\tsubkey := e.Subkeys[candidateSubkey]\n\t\treturn *entitySubkeyToKey(e, &subkey), true\n\t}\n\n\t// If we don't have any candidate subkeys for encryption and\n\t// the primary key doesn't have any usage metadata then we\n\t// assume that the primary key is ok. Or, if the primary key is\n\t// marked as ok to encrypt to, then we can obviously use it.\n\ti := primaryIdentity(e)\n\tif !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications && e.PrimaryKey.PubKeyAlgo.CanEncrypt() && !i.SelfSignature.SigExpired(now) {\n\t\treturn *entityToKey(e, i), true\n\t}\n\n\t// This Entity appears to be signing only.\n\treturn openpgp.Key{}, false\n}",
"func (j *JWT) KeyFunc(token *gojwt.Token) (interface{}, error) {\n\tif _, ok := token.Method.(*gojwt.SigningMethodHMAC); ok {\n\t\treturn []byte(j.Secret), nil\n\t} else if _, ok := token.Method.(*gojwt.SigningMethodRSA); ok {\n\t\treturn j.KeyFuncRS256(token)\n\t}\n\treturn nil, fmt.Errorf(\"unexpected signing method: %v\", token.Header[\"alg\"])\n}",
"func (self *Graphics) PreviousRotation() int{\n return self.Object.Get(\"previousRotation\").Int()\n}",
"func (self *botStats) rotate(date flap.EpochTime, rdd flap.Days, t db.Table) {\n\ti:= len(self.Rows) -1\n\tself.Rows[i].Date = date\n\tself.Rows[i].Entries +=1\n\tif self.Rows[i].Entries == int(rdd) {\n\t\tself.newRow()\n\t}\n\tself.save(t,i)\n}",
"func (r *ImageRef) Rotate(angle Angle) error {\n\twidth := r.Width()\n\n\tif r.Pages() > 1 && (angle == Angle90 || angle == Angle270) {\n\t\tif angle == Angle270 {\n\t\t\tif err := r.Flip(DirectionHorizontal); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif err := r.Grid(r.GetPageHeight(), r.Pages(), 1); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif angle == Angle270 {\n\t\t\tif err := r.Flip(DirectionHorizontal); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tout, err := vipsRotate(r.image, angle)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.setImage(out)\n\n\tif r.Pages() > 1 && (angle == Angle90 || angle == Angle270) {\n\t\tif err := r.SetPageHeight(width); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (k *DecryptionKey) Marshal() ([]byte, error) {\n\treturn ic.MarshalPrivateKey(k.sk)\n}",
"func (r *RGBARotator) Rotate180() {\n\tsrc := r.Img\n\tsrcB := src.Bounds()\n\tsrcWidth := srcB.Dx()\n\tsrcHeight := srcB.Dy()\n\n\tdst := image.NewRGBA(image.Rect(0, 0, srcWidth, srcHeight))\n\n\tvar x, y, srcIdx, dstIdx int64\n\tmaxX, maxY := int64(srcWidth), int64(srcHeight)\n\tsrcStride, dstStride := int64(src.Stride), int64(dst.Stride)\n\tsrcPix := src.Pix\n\tdstPix := dst.Pix\n\tfor y = 0; y < maxY; y++ {\n\t\tfor x = 0; x < maxX; x++ {\n\t\t\tsrcIdx = y*srcStride + (x << 2)\n\t\t\tdstIdx = (maxY-1-y)*dstStride + ((maxX - 1 - x) << 2)\n\t\t\tcopy(dstPix[dstIdx:dstIdx+4], srcPix[srcIdx:srcIdx+4])\n\t\t}\n\t}\n\n\tr.Img = dst\n}",
"func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {\n\tauthArray := []byte(auth)\n\tsalt := GetEntropyCSPRNG(32)\n\tderivedKey, err := scrypt.Key(authArray, salt, scryptN, scryptR, scryptP, scryptDKLen)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tencryptKey := derivedKey[:16]\n\tkeyBytes := math.PaddedBigBytes(key.PrivateKey.D, 32)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tiv := GetEntropyCSPRNG(aes.BlockSize) // 16\n\tcipherText, err := aesCTRXOR(encryptKey, keyBytes, iv)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmac := NewHash(derivedKey[16:32], cipherText)\n\n\tscryptParamsJSON := make(map[string]interface{}, 5)\n\tscryptParamsJSON[\"n\"] = scryptN\n\tscryptParamsJSON[\"r\"] = scryptR\n\tscryptParamsJSON[\"p\"] = scryptP\n\tscryptParamsJSON[\"dklen\"] = scryptDKLen\n\tscryptParamsJSON[\"salt\"] = hex.EncodeToString(salt)\n\n\tcipherParamsJSON := cipherparamsJSON{\n\t\tIV: hex.EncodeToString(iv),\n\t}\n\n\tcryptoStruct := cryptoJSON{\n\t\tCipher: \"aes-128-ctr\",\n\t\tCipherText: hex.EncodeToString(cipherText),\n\t\tCipherParams: cipherParamsJSON,\n\t\tKDF: keyHeaderKDF,\n\t\tKDFParams: scryptParamsJSON,\n\t\tMAC: hex.EncodeToString(mac.Bytes()),\n\t}\n\n\tencryptedKeyJSONV3 := encryptedKeyJSONV3{\n\t\tkey.Address,\n\t\tcryptoStruct,\n\t\tkey.Id.String(),\n\t\tversion,\n\t}\n\treturn json.Marshal(encryptedKeyJSONV3)\n}",
"func decrypt(_message string, _rotors [3]int, _ref int, _key [3]int) string {\n\tvar builder strings.Builder\n\n\tfor _, char := range _message {\n\t\t_key = rotorsIncr(_key, _rotors)\n\t\tvar rd = (byte(rotors[_rotors[2]][(byte(char)-65+byte(_key[2])+26)%26]) - 65 + 26 - byte(_key[2])) % 26\n\t\tvar rm = (byte(rotors[_rotors[1]][(rd+byte(_key[1])+26)%26]) - 65 + 26 - byte(_key[1])) % 26\n\t\tvar rg = (byte(rotors[_rotors[0]][(rm+byte(_key[0])+26)%26]) - 65 + 26 - byte(_key[0])) % 26\n\t\tvar r = byte(rotors[_ref][rg] - 65)\n\n\t\tvar rg2 = (byte(rotorsInv[_rotors[0]][(r+byte(_key[0])+26)%26]) - 65 + 26 - byte(_key[0])) % 26\n\t\tvar rm2 = (byte(rotorsInv[_rotors[1]][(rg2+byte(_key[1])+26)%26]) - 65 + 26 - byte(_key[1])) % 26\n\t\tvar rd2 = (byte(rotorsInv[_rotors[2]][(rm2+byte(_key[2])+26)%26]) - 65 + 26 - byte(_key[2])) % 26\n\t\tbuilder.WriteRune(rune(rd2 + 65))\n\t}\n\n\treturn builder.String()\n}",
"func (t *Tree) leftRotate(x *Node) {\n\ty := x.right\n\tx.right = y.left\n\tif y.left != nil {\n\t\ty.left.p = x\n\t}\n\tt.transplant(x, y)\n\ty.left = x\n\tx.p = y\n}"
] | [
"0.68230635",
"0.66989833",
"0.66093475",
"0.647618",
"0.6370345",
"0.6170176",
"0.60946894",
"0.6072444",
"0.5827766",
"0.5811087",
"0.5599854",
"0.5597232",
"0.55714077",
"0.55631316",
"0.55575067",
"0.55345094",
"0.55277187",
"0.549797",
"0.5486713",
"0.545797",
"0.5429036",
"0.5378346",
"0.53783274",
"0.5357436",
"0.5314408",
"0.5300428",
"0.52864677",
"0.5284503",
"0.52474976",
"0.5246306",
"0.52344793",
"0.5229894",
"0.5223779",
"0.52181166",
"0.51930183",
"0.5153825",
"0.5145179",
"0.513208",
"0.5121423",
"0.5106245",
"0.50927675",
"0.50799125",
"0.50498104",
"0.5041327",
"0.5033124",
"0.5025203",
"0.5017004",
"0.5011344",
"0.49986583",
"0.4994486",
"0.4992916",
"0.49871346",
"0.49832487",
"0.4982121",
"0.49747705",
"0.496962",
"0.49681312",
"0.4954282",
"0.49541104",
"0.49461126",
"0.49408337",
"0.49377286",
"0.4936801",
"0.49272928",
"0.49159196",
"0.4915233",
"0.4912421",
"0.4906594",
"0.49009353",
"0.48956466",
"0.48855308",
"0.48813957",
"0.48791447",
"0.48776087",
"0.4877485",
"0.4873049",
"0.48706427",
"0.48697004",
"0.48628548",
"0.48496026",
"0.48395374",
"0.48369423",
"0.48344013",
"0.4831111",
"0.48297057",
"0.482435",
"0.48176935",
"0.48158333",
"0.4797376",
"0.4797112",
"0.4790722",
"0.4784462",
"0.4779278",
"0.47781667",
"0.47714648",
"0.47611937",
"0.47581863",
"0.475749",
"0.47543073",
"0.47535002"
] | 0.7712946 | 0 |
Start the batch key rottion job, resumes if there was a pending job via "job.ID" | func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job BatchJobRequest) error {
ri := &batchJobInfo{
JobID: job.ID,
JobType: string(job.Type()),
StartTime: job.Started,
}
if err := ri.load(ctx, api, job); err != nil {
return err
}
globalBatchJobsMetrics.save(job.ID, ri)
lastObject := ri.Object
delay := job.KeyRotate.Flags.Retry.Delay
if delay == 0 {
delay = batchKeyRotateJobDefaultRetryDelay
}
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
skip := func(info FileInfo) (ok bool) {
if r.Flags.Filter.OlderThan > 0 && time.Since(info.ModTime) < r.Flags.Filter.OlderThan {
// skip all objects that are newer than specified older duration
return false
}
if r.Flags.Filter.NewerThan > 0 && time.Since(info.ModTime) >= r.Flags.Filter.NewerThan {
// skip all objects that are older than specified newer duration
return false
}
if !r.Flags.Filter.CreatedAfter.IsZero() && r.Flags.Filter.CreatedAfter.Before(info.ModTime) {
// skip all objects that are created before the specified time.
return false
}
if !r.Flags.Filter.CreatedBefore.IsZero() && r.Flags.Filter.CreatedBefore.After(info.ModTime) {
// skip all objects that are created after the specified time.
return false
}
if len(r.Flags.Filter.Tags) > 0 {
// Only parse object tags if tags filter is specified.
tagMap := map[string]string{}
tagStr := info.Metadata[xhttp.AmzObjectTagging]
if len(tagStr) != 0 {
t, err := tags.ParseObjectTags(tagStr)
if err != nil {
return false
}
tagMap = t.ToMap()
}
for _, kv := range r.Flags.Filter.Tags {
for t, v := range tagMap {
if kv.Match(BatchKeyRotateKV{Key: t, Value: v}) {
return true
}
}
}
// None of the provided tags filter match skip the object
return false
}
if len(r.Flags.Filter.Metadata) > 0 {
for _, kv := range r.Flags.Filter.Metadata {
for k, v := range info.Metadata {
if !strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") && !isStandardHeader(k) {
continue
}
// We only need to match x-amz-meta or standardHeaders
if kv.Match(BatchKeyRotateKV{Key: k, Value: v}) {
return true
}
}
}
// None of the provided metadata filters match skip the object.
return false
}
if r.Flags.Filter.KMSKeyID != "" {
if v, ok := info.Metadata[xhttp.AmzServerSideEncryptionKmsID]; ok && strings.TrimPrefix(v, crypto.ARNPrefix) != r.Flags.Filter.KMSKeyID {
return false
}
}
return true
}
workerSize, err := strconv.Atoi(env.Get("_MINIO_BATCH_KEYROTATION_WORKERS", strconv.Itoa(runtime.GOMAXPROCS(0)/2)))
if err != nil {
return err
}
wk, err := workers.New(workerSize)
if err != nil {
// invalid worker size.
return err
}
retryAttempts := ri.RetryAttempts
ctx, cancel := context.WithCancel(ctx)
results := make(chan ObjectInfo, 100)
if err := api.Walk(ctx, r.Bucket, r.Prefix, results, ObjectOptions{
WalkMarker: lastObject,
WalkFilter: skip,
}); err != nil {
cancel()
// Do not need to retry if we can't list objects on source.
return err
}
for result := range results {
result := result
sseKMS := crypto.S3KMS.IsEncrypted(result.UserDefined)
sseS3 := crypto.S3.IsEncrypted(result.UserDefined)
if !sseKMS && !sseS3 { // neither sse-s3 nor sse-kms disallowed
continue
}
wk.Take()
go func() {
defer wk.Give()
for attempts := 1; attempts <= retryAttempts; attempts++ {
attempts := attempts
stopFn := globalBatchJobsMetrics.trace(batchKeyRotationMetricObject, job.ID, attempts, result)
success := true
if err := r.KeyRotate(ctx, api, result); err != nil {
stopFn(err)
logger.LogIf(ctx, err)
success = false
} else {
stopFn(nil)
}
ri.trackCurrentBucketObject(r.Bucket, result, success)
ri.RetryAttempts = attempts
globalBatchJobsMetrics.save(job.ID, ri)
// persist in-memory state to disk after every 10secs.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
if success {
break
}
}
}()
}
wk.Wait()
ri.Complete = ri.ObjectsFailed == 0
ri.Failed = ri.ObjectsFailed > 0
globalBatchJobsMetrics.save(job.ID, ri)
// persist in-memory state to disk.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job))
buf, _ := json.Marshal(ri)
if err := r.Notify(ctx, bytes.NewReader(buf)); err != nil {
logger.LogIf(ctx, fmt.Errorf("unable to notify %v", err))
}
cancel()
if ri.Failed {
ri.ObjectsFailed = 0
ri.Bucket = ""
ri.Object = ""
ri.Objects = 0
time.Sleep(delay + time.Duration(rnd.Float64()*float64(delay)))
}
return nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (a adminAPIHandlers) StartBatchJob(w http.ResponseWriter, r *http.Request) {\n\tctx := newContext(r, w, \"StartBatchJob\")\n\n\tdefer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))\n\n\tobjectAPI, creds := validateAdminReq(ctx, w, r, iampolicy.StartBatchJobAction)\n\tif objectAPI == nil {\n\t\treturn\n\t}\n\n\tbuf, err := io.ReadAll(r.Body)\n\tif err != nil {\n\t\twriteErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)\n\t\treturn\n\t}\n\n\tuser := creds.AccessKey\n\tif creds.ParentUser != \"\" {\n\t\tuser = creds.ParentUser\n\t}\n\n\tjob := &BatchJobRequest{}\n\tif err = yaml.Unmarshal(buf, job); err != nil {\n\t\twriteErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)\n\t\treturn\n\t}\n\n\tjob.ID = shortuuid.New()\n\tjob.User = user\n\tjob.Started = time.Now()\n\n\tif err := job.save(ctx, objectAPI); err != nil {\n\t\twriteErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)\n\t\treturn\n\t}\n\n\tif err = globalBatchJobPool.queueJob(job); err != nil {\n\t\twriteErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)\n\t\treturn\n\t}\n\n\tbuf, err = json.Marshal(&madmin.BatchJobResult{\n\t\tID: job.ID,\n\t\tType: job.Type(),\n\t\tStarted: job.Started,\n\t\tUser: job.User,\n\t})\n\tif err != nil {\n\t\twriteErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)\n\t\treturn\n\t}\n\n\twriteSuccessResponseJSON(w, buf)\n}",
"func StartJob(\n\tctx context.Context,\n\tconn sqlexec.SQLExecutor,\n\tjobID int64,\n) error {\n\tctx = util.WithInternalSourceType(ctx, kv.InternalLoadData)\n\t_, err := conn.ExecuteInternal(ctx,\n\t\t`UPDATE mysql.load_data_jobs\n\t\tSET start_time = CURRENT_TIMESTAMP(6), update_time = CURRENT_TIMESTAMP(6)\n\t\tWHERE job_id = %? AND start_time IS NULL;`,\n\t\tjobID)\n\treturn err\n}",
"func (m *Manager) Start(ID string) error {\n\te, ok := m.Entries[ID]\n\tif !ok {\n\t\treturn ErrorInvalidJobID\n\t}\n\te.Start(m.ctx)\n\treturn nil\n}",
"func (c *client) startNewJob(ctx context.Context, opts launcher.LaunchOptions, jobInterface v12.JobInterface, ns string, safeName string, safeSha string) ([]runtime.Object, error) {\n\tlog.Logger().Infof(\"about to create a new job for name %s and sha %s\", safeName, safeSha)\n\n\t// lets see if we are using a version stream to store the git operator configuration\n\tfolder := filepath.Join(opts.Dir, \"versionStream\", \"git-operator\")\n\texists, err := files.DirExists(folder)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to check if folder exists %s\", folder)\n\t}\n\tif !exists {\n\t\t// lets try the original location\n\t\tfolder = filepath.Join(opts.Dir, \".jx\", \"git-operator\")\n\t}\n\n\tjobFileName := \"job.yaml\"\n\n\tfileNamePath := filepath.Join(opts.Dir, \".jx\", \"git-operator\", \"filename.txt\")\n\texists, err = files.FileExists(fileNamePath)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to check for file %s\", fileNamePath)\n\t}\n\tif exists {\n\t\tdata, err := ioutil.ReadFile(fileNamePath)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to load file %s\", fileNamePath)\n\t\t}\n\t\tjobFileName = strings.TrimSpace(string(data))\n\t\tif jobFileName == \"\" {\n\t\t\treturn nil, errors.Errorf(\"the job name file %s is empty\", fileNamePath)\n\t\t}\n\t}\n\n\tfileName := filepath.Join(folder, jobFileName)\n\texists, err = files.FileExists(fileName)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to find file %s in repository %s\", fileName, safeName)\n\t}\n\tif !exists {\n\t\treturn nil, errors.Errorf(\"repository %s does not have a Job file: %s\", safeName, fileName)\n\t}\n\n\tresource := &v1.Job{}\n\terr = yamls.LoadFile(fileName, resource)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to load Job file %s in repository %s\", fileName, safeName)\n\t}\n\n\tif !opts.NoResourceApply {\n\t\t// now lets check if there is a resources dir\n\t\tresourcesDir := filepath.Join(folder, \"resources\")\n\t\texists, err = files.DirExists(resourcesDir)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to check if resources directory %s exists in repository %s\", resourcesDir, safeName)\n\t\t}\n\t\tif exists {\n\t\t\tabsDir, err := filepath.Abs(resourcesDir)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"failed to get absolute resources dir %s\", resourcesDir)\n\t\t\t}\n\n\t\t\tcmd := &cmdrunner.Command{\n\t\t\t\tName: \"kubectl\",\n\t\t\t\tArgs: []string{\"apply\", \"-f\", absDir},\n\t\t\t}\n\t\t\tlog.Logger().Infof(\"running command: %s\", cmd.CLI())\n\t\t\t_, err = c.runner(cmd)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"failed to apply resources in dir %s\", absDir)\n\t\t\t}\n\t\t}\n\t}\n\n\t// lets try use a maximum of 31 characters and a minimum of 10 for the sha\n\tnamePrefix := trimLength(safeName, 20)\n\n\tid := uuid.New().String()\n\tresourceName := namePrefix + \"-\" + id\n\n\tresource.Name = resourceName\n\n\tif resource.Labels == nil {\n\t\tresource.Labels = map[string]string{}\n\t}\n\tresource.Labels[constants.DefaultSelectorKey] = constants.DefaultSelectorValue\n\tresource.Labels[launcher.RepositoryLabelKey] = safeName\n\tresource.Labels[launcher.CommitShaLabelKey] = safeSha\n\n\tr2, err := jobInterface.Create(ctx, resource, metav1.CreateOptions{})\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to create Job %s in namespace %s\", resourceName, ns)\n\t}\n\tlog.Logger().Infof(\"created Job %s in namespace %s\", resourceName, ns)\n\treturn []runtime.Object{r2}, nil\n}",
"func (w *Worker) Start(lg *logrus.Logger) {\n\tlg.Infof(\"starting worker %v \\n\", w.ID)\n\tgo func() {\n\t\tappFS := afero.NewOsFs()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase jid := <-w.WorkQueue:\n\t\t\t\t// Receive a work request.\n\t\t\t\tlg.WithFields(logrus.Fields{\n\t\t\t\t\t\"WID\": w.ID,\n\t\t\t\t\t\"JID\": jid.JobID,\n\t\t\t\t}).Debug(\"received work request\")\n\t\t\t\twork, err := w.js.GetJobByID(jid.JobID)\n\t\t\t\tlg.WithFields(logrus.Fields{\n\t\t\t\t\t\"WID\": w.ID,\n\t\t\t\t\t\"JID\": jid.JobID,\n\t\t\t\t\t\"job\": work,\n\t\t\t\t}).Debug(\"got job\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tlg.WithFields(logrus.Fields{\n\t\t\t\t\t\t\"error\": err,\n\t\t\t\t\t\t\"JID\": jid.JobID,\n\t\t\t\t\t\t\"job\": work,\n\t\t\t\t\t}).Error(\"error getting job, aborting...\")\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif work.Status == \"CANCELLED\" {\n\t\t\t\t\tlg.WithFields(logrus.Fields{\n\t\t\t\t\t\t\"WID\": w.ID,\n\t\t\t\t\t\t\"JID\": jid.JobID,\n\t\t\t\t\t}).Debug(\"job detected as cancelled\")\n\t\t\t\t\tw.UpdateQueue <- &JobUpdate{\n\t\t\t\t\t\tJob: work,\n\t\t\t\t\t\tmsg: \"cancelled job\",\n\t\t\t\t\t\terr: err,\n\t\t\t\t\t\tShouldUpdate: false,\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\trs := runner.RSettings{\n\t\t\t\t\tRpath: work.Rscript.RPath,\n\t\t\t\t\tEnvVars: work.Rscript.Renv,\n\t\t\t\t}\n\t\t\t\tes := runner.ExecSettings{\n\t\t\t\t\tWorkDir: work.Rscript.WorkDir,\n\t\t\t\t\tRfile: work.Rscript.RscriptPath,\n\t\t\t\t}\n\t\t\t\twork.Status = \"RUNNING\"\n\t\t\t\twork.RunDetails.StartTime = time.Now().UTC()\n\t\t\t\tlg.WithFields(logrus.Fields{\n\t\t\t\t\t\"WID\": w.ID,\n\t\t\t\t\t\"JID\": jid.JobID,\n\t\t\t\t\t\"job\": work,\n\t\t\t\t}).Debug(\"starting Rscript\")\n\t\t\t\tw.UpdateQueue <- &JobUpdate{\n\t\t\t\t\tJob: work,\n\t\t\t\t\tmsg: \"starting job\",\n\t\t\t\t\tShouldUpdate: true,\n\t\t\t\t}\n\t\t\t\tresult, err, exitCode := runner.RunRscript(appFS, rs, es, lg)\n\t\t\t\twork.RunDetails.EndTime = time.Now().UTC()\n\t\t\t\tif err != nil {\n\t\t\t\t\twork.RunDetails.Error = err.Error()\n\t\t\t\t}\n\t\t\t\tlg.WithFields(logrus.Fields{\n\t\t\t\t\t\"WID\": w.ID,\n\t\t\t\t\t\"JID\": work.ID,\n\t\t\t\t\t\"Duration\": work.RunDetails.EndTime.Sub(work.RunDetails.StartTime),\n\t\t\t\t}).Debug(\"completed job\")\n\t\t\t\twork.Result.Output = result\n\t\t\t\twork.Result.ExitCode = int32(exitCode)\n\t\t\t\tif exitCode == 0 {\n\t\t\t\t\twork.Status = \"COMPLETED\"\n\t\t\t\t} else {\n\t\t\t\t\twork.Status = \"ERROR\"\n\t\t\t\t}\n\t\t\t\tw.UpdateQueue <- &JobUpdate{\n\t\t\t\t\tJob: work,\n\t\t\t\t\tmsg: \"completed job\",\n\t\t\t\t\terr: err,\n\t\t\t\t\tShouldUpdate: true,\n\t\t\t\t}\n\n\t\t\tcase <-w.Quit:\n\t\t\t\t// We have been asked to stop.\n\t\t\t\tlg.Printf(\"worker%d stopping\\n\", w.ID)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}",
"func (r *Runner) Start(job Job) error {\n\tselect {\n\tcase r.jobs <- job:\n\t\treturn nil\n\tdefault:\n\t\treturn errors.New(\"jobs queue overflowed\")\n\t}\n}",
"func Enqueue(job *models.Job) {\n\n\tsession, err := mgo.Dial(os.Getenv(\"MONGODB_URI\"))\n\tif err != nil {\n\t\tlogInternalError(\"DB SESSION\", err)\n\t\treturn\n\t}\n\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tfailed(session, job, fmt.Errorf(\"%v\", err))\n\t\t}\n\t\tsession.Close()\n\t}()\n\n\tc := models.Jobs(session)\n\n\tif err = c.UpdateId(job.ID, bson.M{\n\t\t\"$set\": bson.M{\n\t\t\t\"status\": models.Running,\n\t\t\t\"started_at\": time.Now(),\n\t\t},\n\t}); err != nil {\n\t\tfailed(session, job, err)\n\t\treturn\n\t}\n\n\tif err = c.FindId(job.ID).One(job); err != nil {\n\t\tfailed(session, job, err)\n\t\treturn\n\t}\n\n\tmachine, err := fetchMachineConfig()\n\tif err != nil {\n\t\tfailed(session, job, err)\n\t\treturn\n\t}\n\tif len(job.Workflow) == 0 {\n\t\tfailed(session, job, fmt.Errorf(\"No any workflow specified\"))\n\t\treturn\n\t}\n\timg := job.Workflow[0]\n\n\tenv := []string{\n\t\tfmt.Sprintf(\"REFERENCE=%s\", \"GRCh37.fa\"),\n\t}\n\tfor key, input := range job.Resource.Inputs {\n\t\tenv = append(env, fmt.Sprintf(\"%s=%s\", key, input))\n\t}\n\tfor key, param := range job.Parameters {\n\t\tenv = append(env, fmt.Sprintf(\"%s=%s\", key, param))\n\t}\n\n\t// Ensure outputs directory exsits.\n\tos.MkdirAll(filepath.Join(job.Resource.URL, \"out\"), os.ModePerm)\n\n\targ := daap.Args{\n\t\tMachine: machine,\n\t\tMounts: []daap.Mount{\n\t\t\t// Mount inputs and outpus directory.\n\t\t\tdaap.Volume(job.Resource.URL, \"/var/data\"),\n\t\t},\n\t\tEnv: env,\n\t}\n\n\tprocess := daap.NewProcess(img, arg)\n\n\tctx := context.Background()\n\tif err = process.Run(ctx); err != nil {\n\t\tfailed(session, job, err)\n\t\treturn\n\t}\n\n\tout, err := ioutil.ReadAll(process.Stdout)\n\tif err != nil {\n\t\tfailed(session, job, err)\n\t\treturn\n\t}\n\tserr, err := ioutil.ReadAll(process.Stderr)\n\tif err != nil {\n\t\tfailed(session, job, err)\n\t\treturn\n\t}\n\tapplog, err := ioutil.ReadAll(process.Log)\n\tif err != nil {\n\t\tfailed(session, job, err)\n\t\treturn\n\t}\n\n\terr = models.Jobs(session).UpdateId(job.ID, bson.M{\n\t\t\"$set\": bson.M{\n\t\t\t\"stdout\": string(out),\n\t\t\t\"stderr\": string(serr),\n\t\t\t\"applog\": string(applog),\n\t\t},\n\t})\n\tif err != nil {\n\t\tfailed(session, job, err)\n\t\treturn\n\t}\n\n\t// TODO: Use \"Salamander\"\n\tresults, err := detectResultFiles(job)\n\tif err != nil {\n\t\tfailed(session, job, err)\n\t\treturn\n\t}\n\n\tif err := c.UpdateId(job.ID, bson.M{\n\t\t\"$set\": bson.M{\n\t\t\t\"status\": models.Completed,\n\t\t\t\"results\": results,\n\t\t\t\"finished_at\": time.Now(),\n\t\t},\n\t}); err != nil {\n\t\tfailed(session, job, err)\n\t}\n\n}",
"func (j *AuroraJob) JobKey() *aurora.JobKey {\n\treturn j.jobConfig.Key\n}",
"func (self *JobPipeline) Start() {\n\tself.host, _ = os.Hostname()\n\tself.id = fmt.Sprintf(\"%s|%s\", self.TaskName, self.host)\n\tself.refresh_interval = time.Second\n\tself.last_update = time.Now().Add(-1 * self.refresh_interval)\n\tself.queue = []*Document{}\n\tself.running_jobs = map[string]bool{}\n\n\tlog.Printf(\"Starting %s\", self.TaskName)\n\tself.workers = make([]*Worker, self.NumWorkers)\n\tfor i := 0; i < self.NumWorkers; i++ {\n\t\tself.workers[i] = &Worker{Client: self.Client, Task: self.Task}\n\t\tself.workers[i].Start()\n\t}\n}",
"func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job BatchJobRequest) error {\n\tri := &batchJobInfo{\n\t\tJobID: job.ID,\n\t\tJobType: string(job.Type()),\n\t\tStartTime: job.Started,\n\t}\n\tif err := ri.load(ctx, api, job); err != nil {\n\t\treturn err\n\t}\n\tglobalBatchJobsMetrics.save(job.ID, ri)\n\tlastObject := ri.Object\n\n\tdelay := job.Replicate.Flags.Retry.Delay\n\tif delay == 0 {\n\t\tdelay = batchReplJobDefaultRetryDelay\n\t}\n\trnd := rand.New(rand.NewSource(time.Now().UnixNano()))\n\n\tskip := func(info FileInfo) (ok bool) {\n\t\tif r.Flags.Filter.OlderThan > 0 && time.Since(info.ModTime) < r.Flags.Filter.OlderThan {\n\t\t\t// skip all objects that are newer than specified older duration\n\t\t\treturn false\n\t\t}\n\n\t\tif r.Flags.Filter.NewerThan > 0 && time.Since(info.ModTime) >= r.Flags.Filter.NewerThan {\n\t\t\t// skip all objects that are older than specified newer duration\n\t\t\treturn false\n\t\t}\n\n\t\tif !r.Flags.Filter.CreatedAfter.IsZero() && r.Flags.Filter.CreatedAfter.Before(info.ModTime) {\n\t\t\t// skip all objects that are created before the specified time.\n\t\t\treturn false\n\t\t}\n\n\t\tif !r.Flags.Filter.CreatedBefore.IsZero() && r.Flags.Filter.CreatedBefore.After(info.ModTime) {\n\t\t\t// skip all objects that are created after the specified time.\n\t\t\treturn false\n\t\t}\n\n\t\tif len(r.Flags.Filter.Tags) > 0 {\n\t\t\t// Only parse object tags if tags filter is specified.\n\t\t\ttagMap := map[string]string{}\n\t\t\ttagStr := info.Metadata[xhttp.AmzObjectTagging]\n\t\t\tif len(tagStr) != 0 {\n\t\t\t\tt, err := tags.ParseObjectTags(tagStr)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\ttagMap = t.ToMap()\n\t\t\t}\n\n\t\t\tfor _, kv := range r.Flags.Filter.Tags {\n\t\t\t\tfor t, v := range tagMap {\n\t\t\t\t\tif kv.Match(BatchJobReplicateKV{Key: t, Value: v}) {\n\t\t\t\t\t\treturn true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// None of the provided tags filter match skip the object\n\t\t\treturn false\n\t\t}\n\n\t\tif len(r.Flags.Filter.Metadata) > 0 {\n\t\t\tfor _, kv := range r.Flags.Filter.Metadata {\n\t\t\t\tfor k, v := range info.Metadata {\n\t\t\t\t\tif !strings.HasPrefix(strings.ToLower(k), \"x-amz-meta-\") && !isStandardHeader(k) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\t// We only need to match x-amz-meta or standardHeaders\n\t\t\t\t\tif kv.Match(BatchJobReplicateKV{Key: k, Value: v}) {\n\t\t\t\t\t\treturn true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// None of the provided metadata filters match skip the object.\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}\n\n\tu, err := url.Parse(r.Target.Endpoint)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcred := r.Target.Creds\n\n\tc, err := miniogo.NewCore(u.Host, &miniogo.Options{\n\t\tCreds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),\n\t\tSecure: u.Scheme == \"https\",\n\t\tTransport: getRemoteInstanceTransport,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.SetAppInfo(\"minio-\"+batchJobPrefix, r.APIVersion+\" \"+job.ID)\n\n\tworkerSize, err := strconv.Atoi(env.Get(\"_MINIO_BATCH_REPLICATION_WORKERS\", strconv.Itoa(runtime.GOMAXPROCS(0)/2)))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\twk, err := workers.New(workerSize)\n\tif err != nil {\n\t\t// invalid worker size.\n\t\treturn err\n\t}\n\n\tretryAttempts := ri.RetryAttempts\n\tretry := false\n\tfor attempts := 1; attempts <= retryAttempts; attempts++ {\n\t\tattempts := attempts\n\n\t\tctx, cancel := context.WithCancel(ctx)\n\n\t\tresults := make(chan ObjectInfo, 100)\n\t\tif err := api.Walk(ctx, r.Source.Bucket, r.Source.Prefix, results, ObjectOptions{\n\t\t\tWalkMarker: lastObject,\n\t\t\tWalkFilter: skip,\n\t\t}); err != nil {\n\t\t\tcancel()\n\t\t\t// Do not need to retry if we can't list objects on source.\n\t\t\treturn err\n\t\t}\n\n\t\tfor result := range results {\n\t\t\tresult := result\n\t\t\twk.Take()\n\t\t\tgo func() {\n\t\t\t\tdefer wk.Give()\n\n\t\t\t\tstopFn := globalBatchJobsMetrics.trace(batchReplicationMetricObject, job.ID, attempts, result)\n\t\t\t\tsuccess := true\n\t\t\t\tif err := r.ReplicateToTarget(ctx, api, c, result, retry); err != nil {\n\t\t\t\t\tif miniogo.ToErrorResponse(err).Code == \"PreconditionFailed\" {\n\t\t\t\t\t\t// pre-condition failed means we already have the object copied over.\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\t// object must be deleted concurrently, allow these failures but do not count them\n\t\t\t\t\tif isErrVersionNotFound(err) || isErrObjectNotFound(err) {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tstopFn(err)\n\t\t\t\t\tlogger.LogIf(ctx, err)\n\t\t\t\t\tsuccess = false\n\t\t\t\t} else {\n\t\t\t\t\tstopFn(nil)\n\t\t\t\t}\n\t\t\t\tri.trackCurrentBucketObject(r.Source.Bucket, result, success)\n\t\t\t\tglobalBatchJobsMetrics.save(job.ID, ri)\n\t\t\t\t// persist in-memory state to disk after every 10secs.\n\t\t\t\tlogger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job.Location))\n\t\t\t}()\n\t\t}\n\t\twk.Wait()\n\n\t\tri.RetryAttempts = attempts\n\t\tri.Complete = ri.ObjectsFailed == 0\n\t\tri.Failed = ri.ObjectsFailed > 0\n\n\t\tglobalBatchJobsMetrics.save(job.ID, ri)\n\t\t// persist in-memory state to disk.\n\t\tlogger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job.Location))\n\n\t\tbuf, _ := json.Marshal(ri)\n\t\tif err := r.Notify(ctx, bytes.NewReader(buf)); err != nil {\n\t\t\tlogger.LogIf(ctx, fmt.Errorf(\"Unable to notify %v\", err))\n\t\t}\n\n\t\tcancel()\n\t\tif ri.Failed {\n\t\t\tri.ObjectsFailed = 0\n\t\t\tri.Bucket = \"\"\n\t\t\tri.Object = \"\"\n\t\t\tri.Objects = 0\n\t\t\tri.BytesFailed = 0\n\t\t\tri.BytesTransferred = 0\n\t\t\tretry = true // indicate we are retrying..\n\t\t\ttime.Sleep(delay + time.Duration(rnd.Float64()*float64(delay)))\n\t\t\tcontinue\n\t\t}\n\n\t\tbreak\n\t}\n\n\treturn nil\n}",
"func (r *restoreRunner) Start() error {\n\n\tr.Logger.Infof(\"Received restore job %v in namespace %v\", r.restore.Name, r.restore.Namespace)\n\n\tif r.restore.Spec.Backend == nil {\n\t\tr.Logger.Infof(\"Restore %v doesn't have a backend configured, skipping...\", r.restore.Name)\n\t\treturn nil\n\t}\n\n\trestoreJob := newRestoreJob(r.restore, r.config)\n\n\tgo r.watchState(restoreJob)\n\n\t_, err := r.K8sCli.Batch().Jobs(r.restore.Namespace).Create(restoreJob)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr.restore.Status.Started = true\n\n\terr = r.updateStatus()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Cannot get baas object: %v\", err)\n\t}\n\n\treturn nil\n}",
"func (j *Job) Start() {\n\tj.status.Set(StatusReady)\n}",
"func (fc *FederatedController) syncFLJob(key string) (bool, error) {\n\tstartTime := time.Now()\n\tdefer func() {\n\t\tklog.V(4).Infof(\"Finished syncing federatedlearning job %q (%v)\", key, time.Since(startTime))\n\t}()\n\n\tns, name, err := cache.SplitMetaNamespaceKey(key)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif len(ns) == 0 || len(name) == 0 {\n\t\treturn false, fmt.Errorf(\"invalid federatedlearning job key %q: either namespace or name is missing\", key)\n\t}\n\tsharedFLJob, err := fc.jobLister.FederatedLearningJobs(ns).Get(name)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\tklog.V(4).Infof(\"FLJob has been deleted: %v\", key)\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, err\n\t}\n\tflJob := *sharedFLJob\n\t// set kind for flJob in case that the kind is None\n\tflJob.SetGroupVersionKind(neptunev1.SchemeGroupVersion.WithKind(\"FederatedLearningJob\"))\n\t// if flJob was finished previously, we don't want to redo the termination\n\tif IsFLJobFinished(&flJob) {\n\t\treturn true, nil\n\t}\n\tselector, _ := GenerateSelector(&flJob)\n\tpods, err := fc.podStore.Pods(flJob.Namespace).List(selector)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tactivePods := k8scontroller.FilterActivePods(pods)\n\tactive := int32(len(activePods))\n\tsucceeded, failed := getStatus(pods)\n\tconditions := len(flJob.Status.Conditions)\n\t// flJob first start\n\tif flJob.Status.StartTime == nil {\n\t\tnow := metav1.Now()\n\t\tflJob.Status.StartTime = &now\n\t}\n\n\tvar manageJobErr error\n\tjobFailed := false\n\tvar failureReason string\n\tvar failureMessage string\n\tphase := flJob.Status.Phase\n\n\tif failed > 0 {\n\t\tjobFailed = true\n\t\tfailureReason = \"workerFailed\"\n\t\tfailureMessage = \"the worker of FLJob failed\"\n\t}\n\n\tif jobFailed {\n\t\tflJob.Status.Conditions = append(flJob.Status.Conditions, NewFLJobCondition(neptunev1.FLJobCondFailed, failureReason, failureMessage))\n\t\tflJob.Status.Phase = neptunev1.FLJobFailed\n\t\tfc.recorder.Event(&flJob, v1.EventTypeWarning, failureReason, failureMessage)\n\t} else {\n\t\t// in the First time, we create the pods\n\t\tif len(pods) == 0 {\n\t\t\tactive, manageJobErr = fc.createPod(&flJob)\n\t\t}\n\t\tcomplete := false\n\t\tif succeeded > 0 && active == 0 {\n\t\t\tcomplete = true\n\t\t}\n\t\tif complete {\n\t\t\tflJob.Status.Conditions = append(flJob.Status.Conditions, NewFLJobCondition(neptunev1.FLJobCondComplete, \"\", \"\"))\n\t\t\tnow := metav1.Now()\n\t\t\tflJob.Status.CompletionTime = &now\n\t\t\tfc.recorder.Event(&flJob, v1.EventTypeNormal, \"Completed\", \"FLJob completed\")\n\t\t\tflJob.Status.Phase = neptunev1.FLJobSucceeded\n\t\t} else {\n\t\t\tflJob.Status.Phase = neptunev1.FLJobRunning\n\t\t}\n\t}\n\n\tforget := false\n\t// Check if the number of jobs succeeded increased since the last check. If yes \"forget\" should be true\n\t// This logic is linked to the issue: https://github.com/kubernetes/kubernetes/issues/56853 that aims to\n\t// improve the FLJob backoff policy when parallelism > 1 and few FLJobs failed but others succeed.\n\t// In this case, we should clear the backoff delay.\n\tif flJob.Status.Succeeded < succeeded {\n\t\tforget = true\n\t}\n\n\t// no need to update the flJob if the status hasn't changed since last time\n\tif flJob.Status.Active != active || flJob.Status.Succeeded != succeeded || flJob.Status.Failed != failed || len(flJob.Status.Conditions) != conditions || flJob.Status.Phase != phase {\n\t\tflJob.Status.Active = active\n\t\tflJob.Status.Succeeded = succeeded\n\t\tflJob.Status.Failed = failed\n\n\t\tif jobFailed && !IsFLJobFinished(&flJob) {\n\t\t\t// returning an error will re-enqueue FLJob after the backoff period\n\t\t\treturn forget, fmt.Errorf(\"failed pod(s) detected for flJob key %q\", key)\n\t\t}\n\n\t\tforget = true\n\t}\n\n\treturn forget, manageJobErr\n}",
"func (tc *testContext) runJob(name string, command []string) (string, error) {\n\t// Create a job which runs the provided command via SSH\n\tkeyMountDir := \"/private-key\"\n\tkeyMode := int32(0600)\n\tjob := &batch.Job{\n\t\tObjectMeta: meta.ObjectMeta{\n\t\t\tGenerateName: name + \"-job-\",\n\t\t},\n\t\tSpec: batch.JobSpec{\n\t\t\tTemplate: core.PodTemplateSpec{\n\t\t\t\tSpec: core.PodSpec{\n\t\t\t\t\tOS: &core.PodOS{Name: core.Linux},\n\t\t\t\t\tHostNetwork: true,\n\t\t\t\t\tRestartPolicy: core.RestartPolicyNever,\n\t\t\t\t\tServiceAccountName: tc.workloadNamespace,\n\t\t\t\t\tContainers: []core.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: name,\n\t\t\t\t\t\t\tImage: tc.toolsImage,\n\t\t\t\t\t\t\tImagePullPolicy: core.PullIfNotPresent,\n\t\t\t\t\t\t\tCommand: command,\n\t\t\t\t\t\t\tVolumeMounts: []core.VolumeMount{{\n\t\t\t\t\t\t\t\tName: \"private-key\",\n\t\t\t\t\t\t\t\tMountPath: keyMountDir,\n\t\t\t\t\t\t\t}},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVolumes: []core.Volume{{Name: \"private-key\", VolumeSource: core.VolumeSource{\n\t\t\t\t\t\tSecret: &core.SecretVolumeSource{\n\t\t\t\t\t\t\tSecretName: secrets.PrivateKeySecret,\n\t\t\t\t\t\t\tDefaultMode: &keyMode,\n\t\t\t\t\t\t},\n\t\t\t\t\t}}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tjobsClient := tc.client.K8s.BatchV1().Jobs(tc.workloadNamespace)\n\tjob, err := jobsClient.Create(context.TODO(), job, meta.CreateOptions{})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error creating job: %w\", err)\n\t}\n\n\t// Wait for the job to complete then gather and return the pod output\n\tif err = tc.waitUntilJobSucceeds(job.GetName()); err != nil {\n\t\treturn \"\", fmt.Errorf(\"error waiting for job to succeed: %w\", err)\n\t}\n\tlabelSelector := \"job-name=\" + job.Name\n\tlogs, err := tc.getLogs(labelSelector)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error getting logs from job pod: %w\", err)\n\t}\n\treturn logs, nil\n}",
"func (j *Job) begin() (Executor, error) {\n\tj.mu.Lock()\n\tdefer j.mu.Unlock()\n\n\tif j.st > monitor.PENDING {\n\t\treturn nil, JobBegunError{}\n\t}\n\n\tj.st = monitor.ACTIVE\n\n\tgo j.finish()\n\n\treturn j.exec, nil\n}",
"func (c *Controller) syncMXJob(key string) (bool, error) {\n\tstartTime := time.Now()\n\tdefer func() {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"job\": key,\n\t\t}).Infof(\"Finished syncing job %q (%v)\", key, time.Since(startTime))\n\t}()\n\n\tns, name, err := cache.SplitMetaNamespaceKey(key)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif len(ns) == 0 || len(name) == 0 {\n\t\treturn false, fmt.Errorf(\"invalid job key %q: either namespace or name is missing\", key)\n\t}\n\n\tmxJob, err := c.MXJobLister.MXJobs(ns).Get(name)\n\n\tif err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"job\": key,\n\t\t\t}).Infof(\"Job has been deleted: %v\", key)\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, err\n\t}\n\n\t// Create a new TrainingJob if there is no TrainingJob stored for it in the jobs map or if the UID's don't match.\n\t// The UID's won't match in the event we deleted the job and then recreated the job with the same name.\n\tif cJob, ok := c.jobs[key]; !ok || cJob.UID() != mxJob.UID {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"job\": key,\n\t\t}).Infof(\"Creating new job %v\", key)\n\t\tnc, err := trainer.NewJob(c.KubeClient, c.MXJobClient, c.recorder, mxJob, &c.config)\n\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"job\": key,\n\t\t\t}).Errorf(\"There was a problem creating NewJob %v; Error: %v\", key, err)\n\t\t\treturn false, err\n\t\t}\n\t\tc.jobs[key] = nc\n\t} else {\n\t\t// Replace the MXJob stored inside TrainingJob with the latest job.\n\t\t// We need to do this to pull in the latest changes to the spec/status.\n\t\tc.jobs[key].Update(mxJob)\n\t}\n\n\tnc := c.jobs[key]\n\n\tif err := nc.Reconcile(&c.config, c.enableGangScheduling); err != nil {\n\t\treturn false, err\n\t}\n\n\tmxJob, err = c.MXJobClient.KubeflowV1alpha1().MXJobs(mxJob.ObjectMeta.Namespace).Get(mxJob.ObjectMeta.Name, metav1.GetOptions{})\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif mxJob.Status.Phase == mxv1alpha1.MXJobPhaseCleanUp {\n\t\treturn true, nil\n\t}\n\treturn false, nil\n\n}",
"func (a *Scheduler) Run(job *jobs.Job) error {\n\tlogrus.Debugf(\"Processing job: %+v\", job)\n\n\t// If the compliance database says the thing is already running,\n\t// we'll try to insert it into cereal once to make sure it is correct\n\t// Otherwise, compliance has completed and we're waiting for cereal\n\t// to agree\n\tshouldRetry := true\n\tif job.Status == types.StatusRunning {\n\t\tshouldRetry = false\n\t\tlogrus.Warnf(\"job %q (%q) already running\", job.Id, job.Name)\n\t}\n\t// If the job has a recurrence, we update the job schedule\n\tif job.Recurrence != \"\" {\n\t\t// Ensure recurrence rule can be parsed\n\t\t_, err := rrule.StrToRRule(job.Recurrence)\n\t\tif err != nil {\n\t\t\treturn &errorutils.InvalidError{Msg: fmt.Sprintf(\"failed to schedule job %q (%q) invalid job recurrence rule: %v\",\n\t\t\t\tjob.Id, job.Name, err)}\n\t\t}\n\t\terr = a.scanner.UpdateParentJobSchedule(job.Id, job.JobCount, job.Recurrence, job.ScheduledTime)\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"error updating status for job %s (%s) : %s\", job.Name, job.Id, err.Error())\n\t\t}\n\t\treturn nil\n\t}\n\n\terr := a.pushWorkflow(job, shouldRetry)\n\tif err != nil {\n\t\tstrErr := fmt.Sprintf(\"Unable to add jobs to inspec agent: %s\", err.Error())\n\t\tlogrus.Error(strErr)\n\t\treturn errors.New(strErr)\n\t}\n\treturn nil\n}",
"func (w Worker) Start() {\n go func() {\n for {\n // register the current worker into the worker queue.\n w.WorkerPool <- w.JobChannel\n select {\n case job := <-w.JobChannel:\n // we have received a work request.\n if err, payLoad := job.JobProcessor(job.JobPayload); err == nil {\n if job.NextParallelJobs != nil {\n for _, nextJob := range job.NextParallelJobs {\n nextJob.JobPayload = payLoad // set the payload since it might have been modified\n JobQueue <- *nextJob\n }\n }\n } else {\n log.Println(\"err not nil: \", err.Error())\n SendFailureEmail(\"custom-sender@email.com\", job, err)\n }\n case <-w.quitChannel:\n // we have received a signal to stop\n return\n }\n }\n }()\n}",
"func (d *Data)nextJobId() int {\n d.jobId++\n return d.jobId\n}",
"func jobKey(j *pps.Job) string {\n\treturn fmt.Sprintf(\"%s@%s\", j.Pipeline, j.Id)\n}",
"func (p *MasterWorker) StartGen(key string) {\n\tif p.ValidWork(key) {\n\t\tp.workers[key].StartGen()\n\t}\n}",
"func (ctl *taskController) JobID() string {\n\treturn ctl.saved.JobKey.StringID()\n}",
"func (runner *TestRunner) ContinueBatchExecution (batchResultId string) error {\n\tbatchStatusPending := BATCH_STATUS_CODE_PENDING\n\n\tvar batchResult BatchResult\n\terr := runner.rtdbServer.GetObject(batchResultId, &batchResult)\n\tif err != nil { return err }\n\n\tif batchResult.Status != BATCH_STATUS_CODE_CANCELED && batchResult.Status != BATCH_STATUS_CODE_INTERRUPTED {\n\t\treturn fmt.Errorf(\"[runner] WARNING: trying to continue batch '%s' with status '%s'\", batchResultId, batchResult.Status)\n\t}\n\n\topSet := rtdb.NewOpSet()\n\topSet.Call(typeActiveBatchResultList, \"activeBatchResultList\", \"Append\", batchResultId)\n\topSet.Call(typeBatchResult, batchResultId, \"SetStatus\", batchStatusPending)\n\terr = runner.rtdbServer.ExecuteOpSet(opSet)\n\tif err != nil { panic(err) }\n\n\trunner.queueControl <- batchExecQueueControlEnqueue {\n\t\tbatchResultId:\tbatchResultId,\n\t\tqueueId:\t\tbatchQueueId(batchResult.ExecParams),\n\t}\n\n\treturn nil\n}",
"func start_jobs(done chan<- doneStatus, jobs <-chan job) {\n num_processed := 0\n for job := range jobs {\n success := job.run()\n if success {\n num_processed++\n }\n }\n done <- doneStatus{num_processed}\n}",
"func start_jobs(done chan<- doneStatus, jobs <-chan job) {\n num_processed := 0\n for job := range jobs {\n success := job.run()\n if success {\n num_processed++\n }\n }\n done <- doneStatus{num_processed}\n}",
"func (c *Client) RunJob(ctx context.Context, jobID string) error {\n\t_, err := c.API.SendRequestWithBytes(ctx, \"POST\", \"/bulk/\"+jobID+\"/run\", nil)\n\treturn err\n}",
"func (m *jobManager) launchJob(job *Job) error {\n\tif len(job.Credentials) > 0 && len(job.PasswordSnippet) > 0 {\n\t\treturn nil\n\t}\n\n\tnamespace := fmt.Sprintf(\"ci-ln-%s\", namespaceSafeHash(job.Name))\n\t// launch a prow job, tied back to this cluster user\n\tpj, err := prow.JobForConfig(m.prowConfigLoader, job.JobName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttargetPodName, err := findTargetName(pj.Spec.PodSpec)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpj.ObjectMeta = metav1.ObjectMeta{\n\t\tName: job.Name,\n\t\tNamespace: m.prowNamespace,\n\t\tAnnotations: map[string]string{\n\t\t\t\"ci-chat-bot.openshift.io/mode\": job.Mode,\n\t\t\t\"ci-chat-bot.openshift.io/user\": job.RequestedBy,\n\t\t\t\"ci-chat-bot.openshift.io/channel\": job.RequestedChannel,\n\t\t\t\"ci-chat-bot.openshift.io/ns\": namespace,\n\t\t\t\"ci-chat-bot.openshift.io/releaseImage\": job.InstallImage,\n\t\t\t\"ci-chat-bot.openshift.io/upgradeImage\": job.UpgradeImage,\n\n\t\t\t\"prow.k8s.io/job\": pj.Spec.Job,\n\t\t},\n\t\tLabels: map[string]string{\n\t\t\t\"ci-chat-bot.openshift.io/launch\": \"true\",\n\n\t\t\t\"prow.k8s.io/type\": string(pj.Spec.Type),\n\t\t\t\"prow.k8s.io/job\": pj.Spec.Job,\n\t\t},\n\t}\n\n\t// register annotations the release controller can use to assess the success\n\t// of this job if it is upgrading between two edges\n\tif len(job.InstallVersion) > 0 && len(job.UpgradeVersion) > 0 {\n\t\tpj.Labels[\"release.openshift.io/verify\"] = \"true\"\n\t\tpj.Annotations[\"release.openshift.io/from-tag\"] = job.InstallVersion\n\t\tpj.Annotations[\"release.openshift.io/tag\"] = job.UpgradeVersion\n\t}\n\n\timage := job.InstallImage\n\tvar initialImage string\n\tif len(job.UpgradeImage) > 0 {\n\t\tinitialImage = image\n\t\timage = job.UpgradeImage\n\t}\n\tprow.OverrideJobEnvironment(&pj.Spec, image, initialImage, namespace)\n\t_, err = m.prowClient.Namespace(m.prowNamespace).Create(prow.ObjectToUnstructured(pj), metav1.CreateOptions{})\n\tif err != nil {\n\t\tif !errors.IsAlreadyExists(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tlog.Printf(\"prow job %s launched to target namespace %s\", job.Name, namespace)\n\terr = wait.PollImmediate(10*time.Second, 15*time.Minute, func() (bool, error) {\n\t\tuns, err := m.prowClient.Namespace(m.prowNamespace).Get(job.Name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tvar pj prowapiv1.ProwJob\n\t\tif err := prow.UnstructuredToObject(uns, &pj); err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif len(pj.Status.URL) > 0 {\n\t\t\tjob.URL = pj.Status.URL\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"did not retrieve job url due to an error: %v\", err)\n\t}\n\n\tif job.Mode != \"launch\" {\n\t\treturn nil\n\t}\n\n\tseen := false\n\terr = wait.PollImmediate(5*time.Second, 15*time.Minute, func() (bool, error) {\n\t\tpod, err := m.coreClient.Core().Pods(m.prowNamespace).Get(job.Name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\tif !errors.IsNotFound(err) {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tif seen {\n\t\t\t\treturn false, fmt.Errorf(\"pod was deleted\")\n\t\t\t}\n\t\t\treturn false, nil\n\t\t}\n\t\tseen = true\n\t\tif pod.Status.Phase == \"Succeeded\" || pod.Status.Phase == \"Failed\" {\n\t\t\treturn false, fmt.Errorf(\"pod has already exited\")\n\t\t}\n\t\treturn true, nil\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to check launch status: %v\", err)\n\t}\n\n\tlog.Printf(\"waiting for setup container in pod %s/%s to complete\", namespace, targetPodName)\n\n\tseen = false\n\tvar lastErr error\n\terr = wait.PollImmediate(5*time.Second, 45*time.Minute, func() (bool, error) {\n\t\tpod, err := m.coreClient.Core().Pods(namespace).Get(targetPodName, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\t// pod could not be created or we may not have permission yet\n\t\t\tif !errors.IsNotFound(err) && !errors.IsForbidden(err) {\n\t\t\t\tlastErr = err\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tif seen {\n\t\t\t\treturn false, fmt.Errorf(\"pod was deleted\")\n\t\t\t}\n\t\t\treturn false, nil\n\t\t}\n\t\tseen = true\n\t\tif pod.Status.Phase == \"Succeeded\" || pod.Status.Phase == \"Failed\" {\n\t\t\treturn false, fmt.Errorf(\"pod has already exited\")\n\t\t}\n\t\tok, err := containerSuccessful(pod, \"setup\")\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\treturn ok, nil\n\t})\n\tif err != nil {\n\t\tif lastErr != nil && err == wait.ErrWaitTimeout {\n\t\t\terr = lastErr\n\t\t}\n\t\treturn fmt.Errorf(\"pod never became available: %v\", err)\n\t}\n\n\tlog.Printf(\"trying to grab the kubeconfig from launched pod\")\n\n\tvar kubeconfig string\n\terr = wait.PollImmediate(30*time.Second, 10*time.Minute, func() (bool, error) {\n\t\tcontents, err := commandContents(m.coreClient.Core(), m.coreConfig, namespace, targetPodName, \"test\", []string{\"cat\", \"/tmp/admin.kubeconfig\"})\n\t\tif err != nil {\n\t\t\tif strings.Contains(err.Error(), \"container not found\") {\n\t\t\t\t// periodically check whether the still exists and is not succeeded or failed\n\t\t\t\tpod, err := m.coreClient.Core().Pods(namespace).Get(targetPodName, metav1.GetOptions{})\n\t\t\t\tif errors.IsNotFound(err) || (pod != nil && (pod.Status.Phase == \"Succeeded\" || pod.Status.Phase == \"Failed\")) {\n\t\t\t\t\treturn false, fmt.Errorf(\"pod cannot be found or has been deleted, assume cluster won't come up\")\n\t\t\t\t}\n\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\tlog.Printf(\"Unable to retrieve config contents: %v\", err)\n\t\t\treturn false, nil\n\t\t}\n\t\tkubeconfig = contents\n\t\treturn len(contents) > 0, nil\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not retrieve kubeconfig from pod: %v\", err)\n\t}\n\n\tjob.Credentials = kubeconfig\n\n\t// once the cluster is reachable, we're ok to send credentials\n\t// TODO: better criteria?\n\tvar waitErr error\n\tif err := waitForClusterReachable(kubeconfig); err != nil {\n\t\tlog.Printf(\"error: unable to wait for the cluster to start: %v\", err)\n\t\tjob.Credentials = \"\"\n\t\twaitErr = fmt.Errorf(\"cluster did not become reachable: %v\", err)\n\t}\n\n\tlines := int64(2)\n\tlogs, err := m.coreClient.Core().Pods(namespace).GetLogs(targetPodName, &corev1.PodLogOptions{Container: \"setup\", TailLines: &lines}).DoRaw()\n\tif err != nil {\n\t\tlog.Printf(\"error: unable to get setup logs\")\n\t}\n\tjob.PasswordSnippet = reFixLines.ReplaceAllString(string(logs), \"$1\")\n\n\t// clear the channel notification in case we crash so we don't attempt to redeliver\n\tpatch := []byte(`{\"metadata\":{\"annotations\":{\"ci-chat-bot.openshift.io/channel\":\"\"}}}`)\n\tif _, err := m.prowClient.Namespace(m.prowNamespace).Patch(job.Name, types.MergePatchType, patch, metav1.UpdateOptions{}); err != nil {\n\t\tlog.Printf(\"error: unable to clear channel annotation from prow job: %v\", err)\n\t}\n\n\treturn waitErr\n}",
"func (*listener) JobID() models.JobID {\n\treturn models.NilJobID\n}",
"func (con lt) LaunchJob(job lava.JobStruct, opt JobOptions) (int, error) {\n\t// Fill data\n\n\terr := updateTimeouts(&job, opt)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\tjob.Priority = opt.Priority\n\tjob.Visibility = opt.Visibility\n\n\tyaml, err := yaml.Marshal(&job)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\t// Some basic checks before submission\n\toutput, err := con.JobsValidate(string(yaml))\n\tif err != nil {\n\t\tlog.Printf(\"Invalid YAML job definition: %s, %s\\n\", output, err.Error())\n\t\treturn -1, err\n\t}\n\n\tjobID, err := con.JobsSubmitStringWithRetry(string(yaml))\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\treturn jobID, nil\n}",
"func TestStartJob(t *testing.T) {\n\tc := &kc{}\n\tbr := BuildRequest{\n\t\tOrg: \"owner\",\n\t\tRepo: \"kube\",\n\t\tBaseRef: \"master\",\n\t\tBaseSHA: \"abc\",\n\t\tPulls: []Pull{\n\t\t\t{\n\t\t\t\tNumber: 5,\n\t\t\t\tAuthor: \"a\",\n\t\t\t\tSHA: \"123\",\n\t\t\t},\n\t\t},\n\t}\n\tif _, err := startJob(c, \"job-name\", \"Context\", br); err != nil {\n\t\tt.Fatalf(\"Didn't expect error starting job: %v\", err)\n\t}\n\tlabels := c.job.Metadata.Labels\n\tif labels[\"jenkins-job-name\"] != \"job-name\" {\n\t\tt.Errorf(\"Jenkins job name label incorrect: %s\", labels[\"jenkins-job-name\"])\n\t}\n\tif labels[\"owner\"] != \"owner\" {\n\t\tt.Errorf(\"Owner label incorrect: %s\", labels[\"owner\"])\n\t}\n\tif labels[\"repo\"] != \"kube\" {\n\t\tt.Errorf(\"Repo label incorrect: %s\", labels[\"kube\"])\n\t}\n\tif labels[\"pr\"] != \"5\" {\n\t\tt.Errorf(\"PR label incorrect: %s\", labels[\"pr\"])\n\t}\n}",
"func (g *GardenerAPI) NextJob(ctx context.Context) (*tracker.JobWithTarget, error) {\n\treturn g.jobs.Next(ctx)\n}",
"func (a LocalApi) RunJob(jobId string) (Log, error) {\n\tlog := newLog(jobId)\n\n\tpipeline, err := buildPipeline(a.path, jobId, log)\n\tif err != nil {\n\t\treturn Log{}, err\n\t}\n\n\tgo func() {\n\t\tpipeline.Run(a.path)\n\t}()\n\n\treturn log, nil\n}",
"func (lsn *listenerV2) JobID() int32 {\n\treturn lsn.job.ID\n}",
"func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) {\n\treturn c.JobFromProject(ctx, c.projectID, id, c.Location)\n}",
"func StartAndCheckJobBatchSuccess(testName string) {\n\tnspMetrics.AddStartAndCheckJobBatchSuccess()\n\tmetrics.AddTestOne(testName, nspMetrics.Success)\n\tmetrics.AddTestZero(testName, nspMetrics.Errors)\n\tlogger.Infof(\"Test %s: SUCCESS\", testName)\n}",
"func (w *worker) start() {\n\tgo func() {\n\t\tfor {\n\t\t\tw.WorkerQueue <- w.Job\n\n\t\t\tselect {\n\t\t\tcase job := <-w.Job:\n\t\t\t\tlog.Printf(\"worker %d: %s\", w.ID, job.User.Login)\n\t\t\t\tjob.User.run()\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}",
"func (mpi *mempoolImpl) startBatchTimer(reason string) {\n\t// stop old timer\n\tmpi.stopBatchTimer(StopReason3)\n\tmpi.logger.Debugf(\"Start batch timer, reason: %s\", reason)\n\ttimestamp := time.Now().UnixNano()\n\tkey := strconv.FormatInt(timestamp, 10)\n\tmpi.batchTimerMgr.isActive.Set(key, true)\n\n\ttime.AfterFunc(mpi.batchTimerMgr.timeout, func() {\n\t\tif mpi.batchTimerMgr.isActive.Has(key) {\n\t\t\tmpi.batchTimerMgr.timeoutEventC <- true\n\t\t}\n\t})\n}",
"func (q *Queue) ResumePendingJobs(ctx context.Context) error {\n\tjobs, err := q.repo.GetJobsForRetry(ctx)\n\tif err != nil {\n\t\tlogger.Log.Error(\"Error getting jobs from DB %v\", zap.Error(err))\n\t\treturn err\n\t}\n\tlogger.Log.Debug(\"Total Resume jobs \", zap.Any(\"jobs\", len(jobs)))\n\tfor i, j := range jobs {\n\t\tif j.RetryCount.Int32 < int32(q.retries) {\n\t\t\tjob := *(job.FromRepoJob(&jobs[i]))\n\t\t\t// metaData := make(map[string]string)\n\t\t\tmd := metadata.MD{}\n\t\t\tif j.MetaData != nil {\n\t\t\t\terr := json.Unmarshal(j.MetaData, &md)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogger.Log.Error(\"Error unmarshling meta data %s\", zap.Error(err))\n\t\t\t\t}\n\t\t\t}\n\t\t\t// for k, v := range metaData {\n\t\t\t// \tmd = metadata.Pairs(k, v)\n\t\t\t// }\n\t\t\tctx = metadata.NewOutgoingContext(ctx, md)\n\t\t\tctx = metadata.NewIncomingContext(ctx, md)\n\t\t\tq.PushJob(ctx, job, job.Type.String)\n\t\t} else {\n\t\t\tlogger.Log.Error(\"Error already retires execeeded for \", zap.Int32(\"jobID\", j.JobID))\n\t\t\terr = q.repo.UpdateJobStatusCompleted(ctx, dbgen.UpdateJobStatusCompletedParams{JobID: j.JobID, Status: \"FAILED\", EndTime: sql.NullTime{Time: time.Now(), Valid: true}})\n\t\t\tif err != nil {\n\t\t\t\tlogger.Log.Error(\"Error update status to failed for job: %s\", zap.Error(err))\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func StartWorker(c *configuration.DumperConfiguration) {\n\n\tclient := datakube.NewDatakubeProtobufClient(c.Server, &http.Client{})\n\n\t// Attach the headers to a context\n\tctx := context.Background()\n\n\tticker := time.NewTicker(time.Second * time.Duration(c.Interval))\n\tfor range ticker.C {\n\t\tjobs, err := client.ListJobs(ctx, &datakube.ListJobsRequest{Status: types.STATUS_QUEUED})\n\n\t\tif err != nil {\n\t\t\tlog.Error(\"Errror getting jobs \", err.Error())\n\t\t\tos.Exit(15)\n\t\t}\n\n\t\tfor _, job := range jobs.Jobs {\n\n\t\t\tjob.State = types.STATUS_IN_PROGRESS\n\t\t\tupdateRequest := datakube.UpdateJobRequest{\n\t\t\t\tJob: job,\n\t\t\t}\n\t\t\tclient.UpdateJob(ctx, &updateRequest)\n\n\t\t\tadapter, err := adapter.CreateNewAdapter(job.Target.Credentials.Host, job.Target.Credentials.Port, job.Target.Credentials.Database, job.Target.Credentials.User, job.Target.Credentials.Password, job.Target.Type)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"Cant execute job for target %s with error => %s\", job.Target, err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tres := Run(job.Target.Name, adapter)\n\n\t\t\tif res.Success == false {\n\t\t\t\tlog.Debug(\"Something failed in job \", job.Id)\n\t\t\t\tjob.State = types.STATUS_ERROR\n\t\t\t\tupdateRequest := datakube.UpdateJobRequest{\n\t\t\t\t\tJob: job,\n\t\t\t\t\tMessage: res.ErrorMsg,\n\t\t\t\t}\n\t\t\t\tclient.UpdateJob(ctx, &updateRequest)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdata, err := ioutil.ReadFile(res.TemporaryFile)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Error reading temporary file to send %s\", err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\treq := datakube.SaveDumpFileRequest{\n\t\t\t\tTargetname: res.TargetName,\n\t\t\t\tData: data,\n\t\t\t\tJobId: job.Id,\n\t\t\t}\n\n\t\t\tsaveresult, err := client.SaveDumpFileForJob(ctx, &req)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Error sending file to server %s\", err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif saveresult.Success != true {\n\t\t\t\tlog.Debugf(\"Transfered dump to Server not successful\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlog.Debugf(\"Transfered dump to Server successfuly - acknowling\")\n\t\t}\n\t}\n}",
"func (e *engineImpl) startInvocation(c context.Context, jobID string, invocationNonce int64,\n\ttriggeredBy identity.Identity, retryCount int) error {\n\n\tc = logging.SetField(c, \"JobID\", jobID)\n\tc = logging.SetField(c, \"InvNonce\", invocationNonce)\n\tc = logging.SetField(c, \"Attempt\", retryCount)\n\n\t// Create new Invocation entity in StatusStarting state and associated it with\n\t// CronJob entity.\n\t//\n\t// Task queue guarantees not to execute same task concurrently (i.e. retry\n\t// happens only if previous attempt finished already).\n\t// There are 3 possibilities here:\n\t// 1) It is a first attempt. In that case we generate new Invocation in\n\t// state STARTING and update CronJob with a reference to it.\n\t// 2) It is a retry and previous attempt is still starting (indicated by\n\t// IsExpectingInvocation returning true). Assume it failed to start\n\t// and launch a new one. Mark old one as obsolete.\n\t// 3) It is a retry and previous attempt has already started (in this case\n\t// cron job is in RUNNING state and IsExpectingInvocation returns\n\t// false). Assume this retry was unnecessary and skip it.\n\tvar inv Invocation\n\tvar skip bool\n\terr := e.txn(c, jobID, func(c context.Context, job *CronJob, isNew bool) error {\n\t\tds := datastore.Get(c)\n\t\tif isNew {\n\t\t\tlogging.Errorf(c, \"Queued job is unexpectedly gone\")\n\t\t\tskip = true\n\t\t\treturn errSkipPut\n\t\t}\n\t\tif !job.State.IsExpectingInvocation(invocationNonce) {\n\t\t\tlogging.Errorf(c, \"No longer need to start invocation with nonce %d\", invocationNonce)\n\t\t\tskip = true\n\t\t\treturn nil\n\t\t}\n\t\tjobKey := ds.KeyForObj(job)\n\t\tinvID, err := generateInvocationID(c, jobKey)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Put new invocation entity, generate its ID.\n\t\tinv = Invocation{\n\t\t\tID: invID,\n\t\t\tJobKey: jobKey,\n\t\t\tStarted: clock.Now(c).UTC(),\n\t\t\tInvocationNonce: invocationNonce,\n\t\t\tTriggeredBy: triggeredBy,\n\t\t\tRevision: job.Revision,\n\t\t\tRevisionURL: job.RevisionURL,\n\t\t\tTask: job.Task,\n\t\t\tRetryCount: int64(retryCount),\n\t\t\tStatus: task.StatusStarting,\n\t\t}\n\t\tinv.debugLog(c, \"Invocation initiated (attempt %d)\", retryCount+1)\n\t\tif triggeredBy != \"\" {\n\t\t\tinv.debugLog(c, \"Manually triggered by %s\", triggeredBy)\n\t\t}\n\t\tif err := ds.Put(&inv); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Move previous invocation (if any) to failed state. It has failed to\n\t\t// start.\n\t\tif job.State.InvocationID != 0 {\n\t\t\tprev := Invocation{\n\t\t\t\tID: job.State.InvocationID,\n\t\t\t\tJobKey: jobKey,\n\t\t\t}\n\t\t\terr := ds.Get(&prev)\n\t\t\tif err != nil && err != datastore.ErrNoSuchEntity {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err == nil && !prev.Status.Final() {\n\t\t\t\tprev.debugLog(c, \"New invocation is running (%d), marking this one as failed.\", inv.ID)\n\t\t\t\tprev.Status = task.StatusFailed\n\t\t\t\tprev.Finished = clock.Now(c).UTC()\n\t\t\t\tprev.MutationsCount++\n\t\t\t\tif err := ds.Put(&prev); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// Store the reference to the new invocation ID.\n\t\treturn e.rollSM(c, job, func(sm *StateMachine) error {\n\t\t\treturn sm.OnInvocationStarting(invocationNonce, inv.ID)\n\t\t})\n\t})\n\tif err != nil || skip {\n\t\treturn err\n\t}\n\tc = logging.SetField(c, \"InvID\", inv.ID)\n\n\t// Now we have a new Invocation entity in the datastore in StatusStarting\n\t// state. Grab corresponding TaskManager and launch task through it, keeping\n\t// track of the progress in created Invocation entity.\n\tctl, err := e.controllerForInvocation(c, &inv)\n\tif err != nil {\n\t\t// Note: controllerForInvocation returns both ctl and err on errors, with\n\t\t// ctl not fully initialized (but good enough for what's done below).\n\t\tctl.DebugLog(\"Failed to initialize task controller - %s\", err)\n\t\tctl.State().Status = task.StatusFailed\n\t\treturn ctl.Save()\n\t}\n\n\t// Ask manager to start the task. If it returns no errors, it should also move\n\t// invocation out of StatusStarting state (a failure to do so is an error). If\n\t// it returns an error, invocation is forcefully moved to StatusFailed state.\n\t// In either case, invocation never ends up in StatusStarting state.\n\terr = ctl.manager.LaunchTask(c, ctl)\n\tretryInvocation := false\n\tif ctl.State().Status == task.StatusStarting {\n\t\tctl.State().Status = task.StatusFailed\n\t\tif err != nil {\n\t\t\tretryInvocation = errors.IsTransient(err)\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"LaunchTask didn't move invocation out of StatusStarting\")\n\t\t\tretryInvocation = false\n\t\t}\n\t}\n\n\t// If asked to retry the invocation, do not touch CronJob entity when saving\n\t// the current (failed) invocation. That way CronJob stays in \"QUEUED\" state\n\t// (indicating it's queued for a new invocation).\n\tif saveErr := ctl.saveImpl(!retryInvocation); saveErr != nil {\n\t\tlogging.Errorf(ctl.ctx, \"Failed to save invocation state - %s\", saveErr)\n\t\tif err == nil {\n\t\t\terr = saveErr\n\t\t}\n\t}\n\treturn err\n}",
"func (s *Scheduler) Cancel(id uint) error {\n\tresp, err := s.client.Get(context.TODO(), common.QueuePrefix, clientv3.WithPrefix())\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor i := range resp.Kvs {\n\t\tkey, val, job := string(resp.Kvs[i].Key), resp.Kvs[i].Value, &common.Job{}\n\t\tif err := json.Unmarshal(val, &job); err == nil {\n\t\t\tif job.ID == id {\n\t\t\t\ts.logger.Debugf(\"removing job %d from queue...\", job.ID)\n\t\t\t\tif _, err := s.client.Delete(context.TODO(), key); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tjob.StartTime = lib.TimeNow()\n\t\t\t\tjob.EndTime = lib.TimeNow()\n\t\t\t\tjob.Status = common.StatusFailing\n\t\t\t\tif err := s.save(job); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t} else {\n\t\t\ts.logger.Errorf(\"error unmarshaling job: %v\", err)\n\t\t}\n\t}\n\ts.mu.Lock()\n\tif job, ok := s.pending[id]; ok {\n\t\ts.mu.Unlock()\n\t\ts.logger.Debugf(\"stopping job %d...\", id)\n\t\tjob.EndTime = lib.TimeNow()\n\t\tjob.Status = common.StatusFailing\n\t\tkey := path.Join(common.StopPrefix, fmt.Sprintf(\"%d\", job.ID))\n\t\tval, err := json.Marshal(&job)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err := s.client.Put(context.TODO(), key, string(val)); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\ts.mu.Unlock()\n\treturn nil\n}",
"func (job *AnalyzeJob) Start() {\n\tnow := time.Now()\n\tjob.Mutex.Lock()\n\tjob.State = running\n\tjob.StartTime = now\n\tjob.updateTime = now\n\tjob.Mutex.Unlock()\n}",
"func (gr *gcsReporter) reportStartedJob(ctx context.Context, log *logrus.Entry, pj *prowv1.ProwJob) error {\n\tbucketName, dir, err := util.GetJobDestination(gr.cfg, pj)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get job destination: %w\", err)\n\t}\n\n\tif gr.dryRun {\n\t\tlog.WithFields(logrus.Fields{\"bucketName\": bucketName, \"dir\": dir}).Debug(\"Would upload started.json\")\n\t\treturn nil\n\t}\n\n\t// Best-effort read of existing started.json; it's overwritten only if it's uploaded\n\t// by crier and there is something new (clone record).\n\tvar existingStarted metadata.Started\n\tvar existing bool\n\tstartedFilePath, err := providers.StoragePath(bucketName, path.Join(dir, prowv1.StartedStatusFile))\n\tif err != nil {\n\t\t// Started.json storage path is invalid, so this function will\n\t\t// eventually fail.\n\t\treturn fmt.Errorf(\"failed to resolve started.json path: %v\", err)\n\t}\n\n\tcontent, err := io.ReadContent(ctx, log, gr.opener, startedFilePath)\n\tif err != nil {\n\t\tif !io.IsNotExist(err) {\n\t\t\tlog.WithError(err).Warn(\"Failed to read started.json.\")\n\t\t}\n\t} else {\n\t\terr = json.Unmarshal(content, &existingStarted)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Warn(\"Failed to unmarshal started.json.\")\n\t\t} else {\n\t\t\texisting = true\n\t\t}\n\t}\n\n\tif existing && (existingStarted.Metadata == nil || existingStarted.Metadata[\"uploader\"] != \"crier\") {\n\t\t// Uploaded by prowjob itself, skip reporting\n\t\tlog.Debug(\"Uploaded by pod-utils, skipping\")\n\t\treturn nil\n\t}\n\n\tstaticRevision := downwardapi.GetRevisionFromRefs(pj.Spec.Refs, pj.Spec.ExtraRefs)\n\tif pj.Spec.Refs == nil || (existingStarted.RepoCommit != \"\" && existingStarted.RepoCommit != staticRevision) {\n\t\t// RepoCommit could only be \"\", BaseRef, or the final resolved SHA,\n\t\t// which shouldn't change for a given presubmit job. Avoid query GCS is\n\t\t// this is already done.\n\t\tlog.Debug(\"RepoCommit already resolved before, skipping\")\n\t\treturn nil\n\t}\n\n\t// Try to read clone records\n\tcloneRecord := make([]clone.Record, 0)\n\tcloneRecordFilePath, err := providers.StoragePath(bucketName, path.Join(dir, prowv1.CloneRecordFile))\n\tif err != nil {\n\t\t// This is user config error\n\t\tlog.WithError(err).Debug(\"Failed to resolve clone-records.json path.\")\n\t} else {\n\t\tcloneRecordBytes, err := io.ReadContent(ctx, log, gr.opener, cloneRecordFilePath)\n\t\tif err != nil {\n\t\t\tif !io.IsNotExist(err) {\n\t\t\t\tlog.WithError(err).Warn(\"Failed to read clone records.\")\n\t\t\t}\n\t\t} else {\n\t\t\tif err := json.Unmarshal(cloneRecordBytes, &cloneRecord); err != nil {\n\t\t\t\tlog.WithError(err).Warn(\"Failed to unmarshal clone records.\")\n\t\t\t}\n\t\t}\n\t}\n\ts := downwardapi.PjToStarted(pj, cloneRecord)\n\ts.Metadata = metadata.Metadata{\"uploader\": \"crier\"}\n\n\toutput, err := json.MarshalIndent(s, \"\", \"\\t\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to marshal started metadata: %w\", err)\n\t}\n\n\t// Overwrite if it was uploaded by crier(existing) and there might be\n\t// something new.\n\t// Add a new var for better readability.\n\toverwrite := existing\n\toverwriteOpt := io.WriterOptions{PreconditionDoesNotExist: utilpointer.Bool(!overwrite)}\n\treturn io.WriteContent(ctx, log, gr.opener, startedFilePath, output, overwriteOpt)\n}",
"func (j *Job) start() {\n\tgo func() {\n\t\tfor {\n\t\t\t// Check if runImmediately is set on the first run\n\t\t\tif j.firstRun && j.runImmediately {\n\t\t\t\tj.fn()\n\t\t\t}\n\t\t\tj.firstRun = false\n\n\t\t\t// Sleep for the predetermined time.\n\t\t\ttime.Sleep(j.delay)\n\n\t\t\tselect {\n\t\t\t// Check for the 'stop' signal.\n\t\t\tcase <-j.stop:\n\t\t\t\treturn\n\n\t\t\t// Execute the function.\n\t\t\tdefault:\n\t\t\t\tj.fn()\n\t\t\t}\n\t\t}\n\t}()\n}",
"func (m *Manager) loadAndStartJobsLocked() {\n\tuuids, err := m.ts.GetWorkflowNames(m.ctx)\n\tif err != nil {\n\t\tlog.Errorf(\"GetWorkflowNames failed to find existing workflows: %v\", err)\n\t\treturn\n\t}\n\n\tfor _, uuid := range uuids {\n\t\t// Load workflows from the topo server, only look at\n\t\t// 'Running' ones.\n\t\twi, err := m.ts.GetWorkflow(m.ctx, uuid)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed to load workflow %v, will not start it: %v\", uuid, err)\n\t\t\tcontinue\n\t\t}\n\n\t\trw, err := m.instantiateWorkflow(wi.Workflow)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed to instantiate workflow %v from factory %v, will not start it: %v\", uuid, wi.FactoryName, err)\n\t\t\tcontinue\n\t\t}\n\t\trw.wi = wi\n\n\t\tif rw.wi.State == workflowpb.WorkflowState_Running {\n\t\t\tm.runWorkflow(rw)\n\t\t}\n\t}\n}",
"func (_m *CIPDClient) BeginBatch(ctx context.Context) {\n\t_m.Called(ctx)\n}",
"func (js *JobSession) RunJob(jt drmaa2interface.JobTemplate) (drmaa2interface.Job, error) {\n\tid, err := js.tracker[0].AddJob(jt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn newJob(id, js.name, jt, js.tracker[0]), nil\n}",
"func Job() {\n\tlogger.Log.Debug(\"cron job started...\")\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tlogger.Log.Debug(\"Panic recovered from cron job\", zap.Any(\"recover\", r))\n\t\t}\n\t}()\n\tcronCtx, err := createSharedContext(AuthAPI)\n\tif err != nil {\n\t\tlogger.Log.Debug(\"couldnt fetch token, will try next time when cron will execute\", zap.Any(\"error\", err))\n\t\treturn\n\t}\n\tif cronCtx != nil {\n\t\t*cronCtx, err = grpc.AddClaimsInContext(*cronCtx, VerifyKey)\n\t\tfileScopeMapping := make(map[string][]string)\n\t\t//Read Dir , if found create the job\n\t\tfiles, er := ioutil.ReadDir(SourceDir)\n\t\tif er != nil {\n\t\t\tlogger.Log.Debug(\"Failed to read the dirctory/files\", zap.Any(\"directory\", SourceDir), zap.Error(er))\n\t\t\treturn\n\t\t}\n\t\tfor _, fileInfo := range files {\n\t\t\ttemp := strings.Split(fileInfo.Name(), constants.SCOPE_DELIMETER)\n\t\t\tif len(temp) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t//data[\"TST\"]= []{\"f1.csv\",\"f2.csv\",\"f3.csv\"}, map is because if multiple files come\n\t\t\tfileScopeMapping[temp[0]] = append(fileScopeMapping[temp[0]], fileInfo.Name())\n\t\t}\n\n\t\tfor scope, files := range fileScopeMapping {\n\t\t\tresp, err := Obj.NotifyUpload(*cronCtx, &v1.NotifyUploadRequest{\n\t\t\t\tScope: scope,\n\t\t\t\tType: \"data\",\n\t\t\t\tUploadedBy: \"Nifi\",\n\t\t\t\tFiles: files})\n\t\t\tif err != nil || (resp != nil && !resp.Success) {\n\t\t\t\tlogger.Log.Debug(\"failed to upload the transformed files\", zap.Error(err))\n\t\t\t}\n\t\t}\n\t}\n}",
"func (job *Job) startPipeline(w http.ResponseWriter, r *http.Request, jobstore *JobStore) {\n\tstartTime := time.Now()\n\tfmt.Fprintf(w, \"Executing Job: %s\\n\", job.name)\n\ttime.Sleep(1 * time.Second)\n\tfmt.Fprintf(w, \"Finished executing Job: %s in duration: %s \\n\", job.name, time.Since(startTime))\n\tjobstore.addJobs(Job{job.name, job.scm, time.Since(startTime), time.Now().UTC()})\n}",
"func (t *Task) Job() bool {\n\t//task已经关闭申请失败\n\tif t.close.Load() {\n\t\treturn false\n\t}\n\t//Parent要求关闭\n\tselect {\n\tcase <-t.Ctx.Done():\n\t\tt.Die()\n\t\treturn false\n\tdefault:\n\t}\n\t//申请任务\n\tt.jobs <- 1\n\tt.wait.Add(1)\n\treturn true\n}",
"func (m *PrintersItemJobsPrintJobItemRequestBuilder) Start()(*PrintersItemJobsItemStartRequestBuilder) {\n return NewPrintersItemJobsItemStartRequestBuilderInternal(m.BaseRequestBuilder.PathParameters, m.BaseRequestBuilder.RequestAdapter)\n}",
"func (j *Job) run() {\n\tdefer close(j.done)\n\n\tvar waitInterval time.Duration\n\n\tfor {\n\t\tselect {\n\t\tcase <-j.stop:\n\t\t\treturn\n\t\tcase <-time.After(waitInterval):\n\t\t}\n\n\t\tfunc() {\n\t\t\t// Acquire the corresponding job lock and hold it throughout execution.\n\t\t\tj.mutex.Lock()\n\t\t\tdefer j.mutex.Unlock()\n\n\t\t\tmetadata, err := j.readMetadata()\n\t\t\tif err != nil {\n\t\t\t\tj.pluginAPI.LogError(\"failed to read job metadata\", \"err\", err, \"key\", j.key)\n\t\t\t\twaitInterval = nextWaitInterval(waitInterval, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Is it time to run the job?\n\t\t\twaitInterval = j.nextWaitInterval(time.Now(), metadata)\n\t\t\tif waitInterval > 0 {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Run the job\n\t\t\tj.callback()\n\n\t\t\tmetadata.LastFinished = time.Now()\n\n\t\t\terr = j.saveMetadata(metadata)\n\t\t\tif err != nil {\n\t\t\t\tj.pluginAPI.LogError(\"failed to write job data\", \"err\", err, \"key\", j.key)\n\t\t\t}\n\n\t\t\twaitInterval = j.nextWaitInterval(time.Now(), metadata)\n\t\t}()\n\t}\n}",
"func (c Context) JobID() string {\n\treturn c.Current().JobID\n}",
"func (j Job) Run() {\n\tj.running = true\n\tj.startedAt = time.Now()\n\tlog.Printf(\"[%s] job started\\n\", j.config.Name)\n\n\tswitch j.config.RunMode {\n\tcase nativeMode:\n\t\trunNative(&j)\n\tcase dockerMode:\n\t\trunDocker(&j)\n\t}\n\n\tj.running = false\n\tj.duration = time.Since(j.startedAt)\n\tlog.Printf(\n\t\t\"[%s] job finished with %d. success: %v, duration: %v,\\n\",\n\t\tj.config.Name,\n\t\tj.exitStatus,\n\t\tj.success,\n\t\tj.duration,\n\t)\n\n\tsendNotifications(&j)\n}",
"func (t *DRMAATracker) JobControl(jobID, action string) error {\n\tif t == nil || t.session == nil {\n\t\treturn fmt.Errorf(\"no active job session\")\n\t}\n\tswitch action {\n\tcase \"suspend\":\n\t\treturn t.session.SuspendJob(jobID)\n\tcase \"resume\":\n\t\treturn t.session.ResumeJob(jobID)\n\tcase \"hold\":\n\t\treturn t.session.HoldJob(jobID)\n\tcase \"release\":\n\t\treturn t.session.ReleaseJob(jobID)\n\tcase \"terminate\":\n\t\treturn t.session.TerminateJob(jobID)\n\t}\n\treturn fmt.Errorf(\"internal: unknown job state change request: %s\", action)\n}",
"func (zp *ZPackIns) StartBatch(dt int64) {\n\tzp.startBatch(dt)\n}",
"func (env *Env) JobController(ctx context.Context) (chan<- JobRequest, <-chan error) {\n\t// the caller will own the inJobStream channel and must close it\n\tinJobStream := make(chan JobRequest)\n\t// we own the errc channel. make it buffered so we can write 1 error\n\t// without blocking.\n\terrc := make(chan error, 1)\n\n\t// rc is the response channel for all Job status messages.\n\trc := make(chan JobUpdate)\n\n\t// n is the WaitGroup used to synchronize agent completion.\n\t// Each runJob goroutine adds 1 to n when it starts.\n\tvar n sync.WaitGroup\n\t// Here the JobController itself also adds 1 to n, and this 1 is\n\t// Done()'d when the JobController's context gets cancelled,\n\t// signalling the termination of the JobController.\n\tn.Add(1)\n\n\t// start a separate goroutine to wait on the waitgroup until all agents\n\t// AND the JobController are done, and then close the response channel\n\tgo func() {\n\t\tn.Wait()\n\t\tclose(rc)\n\t}()\n\n\t// now we start a goroutine to listen to channels and wait for\n\t// things to happen\n\tgo func() {\n\t\t// note that this could introduce a race condition IF the JobController\n\t\t// were to receive a cancel signal from context, and decremented n to\n\t\t// zero, AND then a new Job were started, which would try to reuse\n\t\t// the zeroed waitgroup. To avoid this, we set exiting to true before\n\t\t// calling n.Done(), and after exiting is true we don't create any\n\t\t// new Jobs.\n\t\texiting := false\n\n\t\t// note that this should not need to be synchronized. only this\n\t\t// goroutine should be checking and updating the job submitted map,\n\t\t// and this goroutine is only being run once.\n\t\tjobSubmitted := map[uint32]bool{}\n\n\t\tfor !exiting {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\t// the JobController has been cancelled and should shut down\n\t\t\t\texiting = true\n\t\t\t\tn.Done()\n\t\t\tcase jr := <-inJobStream:\n\t\t\t\t// the caller has submitted a new JobRequest\n\t\t\t\t// check whether it's already been submitted once\n\t\t\t\tif _, ok := jobSubmitted[jr.JobID]; ok {\n\t\t\t\t\tlog.Printf(\"===> got Job %d as a repeat; dropping\", jr.JobID)\n\t\t\t\t} else {\n\t\t\t\t\t// mark it as submitted\n\t\t\t\t\tjobSubmitted[jr.JobID] = true\n\t\t\t\t\t// and create the job\n\t\t\t\t\tenv.startNewJob(ctx, &jr, &n, rc, errc)\n\t\t\t\t}\n\t\t\tcase ju := <-rc:\n\t\t\t\t// an agent has sent a JobUpdate\n\t\t\t\tenv.updateJobDB(&ju)\n\t\t\t}\n\t\t}\n\n\t\t// FIXME as we are exiting, do we first need to drain any remaining\n\t\t// FIXME updates from rc?\n\t}()\n\n\t// finally we return the channels so that the caller can kick things off\n\treturn inJobStream, errc\n}",
"func (r *ReconcileSentry) jobForSentryCreateUser() *batchv1.Job {\n\tname := \"sentry-createuser\"\n\trestartPolicy := corev1.RestartPolicyNever\n\tone := int32(1)\n\tzero := int32(0)\n\topts := templateOpts{\n\t\tName: name,\n\t\tArgs: []string{\n\t\t\t\"createuser\",\n\t\t\t\"--no-input\",\n\t\t\t\"--superuser\",\n\t\t\t\"--email\",\n\t\t\t\"$(SENTRY_SU_EMAIL)\",\n\t\t\t\"--password\",\n\t\t\t\"$(SENTRY_SU_PASSWORD)\",\n\t\t},\n\t\tExtraEnv: []corev1.EnvVar{\n\t\t\t{\n\t\t\t\tName: \"SENTRY_SU_EMAIL\",\n\t\t\t\tValueFrom: &corev1.EnvVarSource{\n\t\t\t\t\tSecretKeyRef: &corev1.SecretKeySelector{\n\t\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\t\tName: r.sentry.Spec.SentrySecret,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tKey: r.sentry.Spec.SentrySuperUserEmailKey,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"SENTRY_SU_PASSWORD\",\n\t\t\t\tValueFrom: &corev1.EnvVarSource{\n\t\t\t\t\tSecretKeyRef: &corev1.SecretKeySelector{\n\t\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\t\tName: r.sentry.Spec.SentrySecret,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tKey: r.sentry.Spec.SentrySuperUserPasswordKey,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tRestartPolicy: &restartPolicy,\n\t}\n\tjobSpec := r.getCommonPodTemplate(opts)\n\tjob := &batchv1.Job{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: r.sentry.Namespace,\n\t\t},\n\t\tSpec: batchv1.JobSpec{\n\t\t\tTemplate: jobSpec,\n\t\t\tParallelism: &one,\n\t\t\tCompletions: &one,\n\t\t\tBackoffLimit: &zero,\n\t\t},\n\t}\n\n\tcontrollerutil.SetControllerReference(r.sentry, job, r.scheme)\n\treturn job\n}",
"func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) {\n\tjob, err := c.service.getJob(ctx, c.projectID, id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tjob.c = c\n\treturn job, nil\n}",
"func (c *TestController) lookupJob(id TestID) *control.Job {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\treturn c.running[id]\n}",
"func BatchJobForCockroachDB(r *ReconcileCockroachDB, m *dbv1alpha1.CockroachDB) interface{} {\n\n\treqLogger := log.WithValues(\"CockroachDB.Meta.Name\", m.ObjectMeta.Name, \"CockroachDB.Meta.Namespace\", m.ObjectMeta.Namespace)\n\treqLogger.Info(\"Reconciling CockroachDB Batch Job\")\n\n\tls := labelsForCockroachDB(m.Name)\n\n\tdep := &batchv1.Job{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: \"batch/v1\",\n\t\t\tKind: \"Job\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: m.Name,\n\t\t\tNamespace: m.Namespace,\n\t\t\tLabels: ls,\n\t\t},\n\t\tSpec: batchv1.JobSpec{\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tServiceAccountName: m.Name,\n\t\t\t\t\tInitContainers: []corev1.Container{{\n\t\t\t\t\t\tName: \"init-certs\",\n\t\t\t\t\t\tImage: \"smartmachine/cockroach-k8s-request-cert:0.3\",\n\t\t\t\t\t\tImagePullPolicy: corev1.PullIfNotPresent,\n\t\t\t\t\t\tCommand: []string{\n\t\t\t\t\t\t\t\"/bin/ash\",\n\t\t\t\t\t\t\t\"-ecx\",\n\t\t\t\t\t\t\t\"/request-cert -namespace=${POD_NAMESPACE} -certs-dir=/cockroach-certs -type=client \" +\n\t\t\t\t\t\t\t\t\"-user=root -symlink-ca-from=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt \" +\n\t\t\t\t\t\t\t\t\"-cluster=\" + m.Name,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tEnv: []corev1.EnvVar{{\n\t\t\t\t\t\t\tName: \"POD_NAMESPACE\",\n\t\t\t\t\t\t\tValueFrom: &corev1.EnvVarSource{\n\t\t\t\t\t\t\t\tFieldRef: &corev1.ObjectFieldSelector{\n\t\t\t\t\t\t\t\t\tFieldPath: \"metadata.namespace\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}},\n\t\t\t\t\t\tVolumeMounts: []corev1.VolumeMount{{\n\t\t\t\t\t\t\tName: \"client-certs\",\n\t\t\t\t\t\t\tMountPath: \"/cockroach-certs\",\n\t\t\t\t\t\t}},\n\t\t\t\t\t}},\n\t\t\t\t\tContainers: []corev1.Container{{\n\t\t\t\t\t\tName: \"cluster-init\",\n\t\t\t\t\t\tImage: m.Spec.Cluster.Image,\n\t\t\t\t\t\tImagePullPolicy: corev1.PullIfNotPresent,\n\t\t\t\t\t\tVolumeMounts: []corev1.VolumeMount{{\n\t\t\t\t\t\t\tName: \"client-certs\",\n\t\t\t\t\t\t\tMountPath: \"/cockroach-certs\",\n\t\t\t\t\t\t}},\n\t\t\t\t\t\tCommand: []string{\n\t\t\t\t\t\t\t\"/cockroach/cockroach\",\n\t\t\t\t\t\t\t\"init\",\n\t\t\t\t\t\t\t\"--certs-dir=/cockroach-certs\",\n\t\t\t\t\t\t\t\"--host=\" + m.Name + \"-0.\" + m.Name,\n\t\t\t\t\t\t},\n\t\t\t\t\t}},\n\t\t\t\t\tRestartPolicy: corev1.RestartPolicyNever,\n\t\t\t\t\tVolumes: []corev1.Volume{{\n\t\t\t\t\t\tName: \"client-certs\",\n\t\t\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\t\t\tEmptyDir: &corev1.EmptyDirVolumeSource{},\n\t\t\t\t\t\t},\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t// Set CockroachDB instance as the owner and controller\n\terr := controllerutil.SetControllerReference(m, dep, r.scheme)\n\tif err != nil {\n\t\treqLogger.Error(err, \"Failed to set Controller Reference\", \"m\", m, \"dep\", dep, \"r.scheme\", r.scheme)\n\t}\n\treturn dep\n}",
"func TestBatch(t *testing.T) {\n\tpre := config.Presubmit{\n\t\tName: \"pr-some-job\",\n\t\tAgent: \"jenkins\",\n\t\tContext: \"Some Job Context\",\n\t}\n\tfc := &fkc{\n\t\tprowjobs: []kube.ProwJob{pjutil.NewProwJob(pjutil.BatchSpec(pre, kube.Refs{\n\t\t\tOrg: \"o\",\n\t\t\tRepo: \"r\",\n\t\t\tBaseRef: \"master\",\n\t\t\tBaseSHA: \"123\",\n\t\t\tPulls: []kube.Pull{\n\t\t\t\t{\n\t\t\t\t\tNumber: 1,\n\t\t\t\t\tSHA: \"abc\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tNumber: 2,\n\t\t\t\t\tSHA: \"qwe\",\n\t\t\t\t},\n\t\t\t},\n\t\t}))},\n\t}\n\tjc := &fjc{}\n\tc := Controller{\n\t\tkc: fc,\n\t\tjc: jc,\n\t\tca: newFakeConfigAgent(t),\n\t\tpendingJobs: make(map[string]int),\n\t\tlock: sync.RWMutex{},\n\t}\n\n\tif err := c.Sync(); err != nil {\n\t\tt.Fatalf(\"Error on first sync: %v\", err)\n\t}\n\tif fc.prowjobs[0].Status.State != kube.PendingState {\n\t\tt.Fatalf(\"Wrong state: %v\", fc.prowjobs[0].Status.State)\n\t}\n\tif !fc.prowjobs[0].Status.JenkinsEnqueued {\n\t\tt.Fatal(\"Wrong enqueued.\")\n\t}\n\tjc.enqueued = true\n\tif err := c.Sync(); err != nil {\n\t\tt.Fatalf(\"Error on second sync: %v\", err)\n\t}\n\tif !fc.prowjobs[0].Status.JenkinsEnqueued {\n\t\tt.Fatal(\"Wrong enqueued steady state.\")\n\t}\n\tjc.enqueued = false\n\tif err := c.Sync(); err != nil {\n\t\tt.Fatalf(\"Error on third sync: %v\", err)\n\t}\n\tif fc.prowjobs[0].Status.JenkinsEnqueued {\n\t\tt.Fatal(\"Wrong enqueued after leaving queue.\")\n\t}\n\tjc.status = Status{Building: true}\n\tif err := c.Sync(); err != nil {\n\t\tt.Fatalf(\"Error on fourth sync: %v\", err)\n\t}\n\tif fc.prowjobs[0].Status.State != kube.PendingState {\n\t\tt.Fatalf(\"Wrong state: %v\", fc.prowjobs[0].Status.State)\n\t}\n\tjc.status = Status{\n\t\tBuilding: false,\n\t\tNumber: 42,\n\t}\n\tif err := c.Sync(); err != nil {\n\t\tt.Fatalf(\"Error on fifth sync: %v\", err)\n\t}\n\tif fc.prowjobs[0].Status.PodName != \"pr-some-job-42\" {\n\t\tt.Fatalf(\"Wrong PodName: %s\", fc.prowjobs[0].Status.PodName)\n\t}\n\tif fc.prowjobs[0].Status.State != kube.FailureState {\n\t\tt.Fatalf(\"Wrong state: %v\", fc.prowjobs[0].Status.State)\n\t}\n\n\t// This is what the SQ reads.\n\tif fc.prowjobs[0].Spec.Context != \"Some Job Context\" {\n\t\tt.Fatalf(\"Wrong context: %v\", fc.prowjobs[0].Spec.Context)\n\t}\n}",
"func (j *Job) Run() {\n\tleftRunningTimes := j.times.Add(-1)\n\tif leftRunningTimes < 0 {\n\t\tj.status.Set(StatusClosed)\n\t\treturn\n\t}\n\t// This means it does not limit the running times.\n\t// I know it's ugly, but it is surely high performance for running times limit.\n\tif leftRunningTimes < 2000000000 && leftRunningTimes > 1000000000 {\n\t\tj.times.Set(math.MaxInt32)\n\t}\n\tgo func() {\n\t\tdefer func() {\n\t\t\tif err := recover(); err != nil {\n\t\t\t\tif err != panicExit {\n\t\t\t\t\tpanic(err)\n\t\t\t\t} else {\n\t\t\t\t\tj.Close()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tif j.Status() == StatusRunning {\n\t\t\t\tj.SetStatus(StatusReady)\n\t\t\t}\n\t\t}()\n\t\tj.job()\n\t}()\n}",
"func (d *Data)AddJob() int {\n id := d.nextJobId()\n mutex.Lock()\n d.hashMap[id] = \"\"\n d.inProgressCount++\n mutex.Unlock()\n return id\n}",
"func (c *client) Launch(opts launcher.LaunchOptions) ([]runtime.Object, error) {\n\tctx := context.Background()\n\tns := opts.Repository.Namespace\n\tif ns == \"\" {\n\t\tns = c.ns\n\t}\n\tsafeName := naming.ToValidValue(opts.Repository.Name)\n\tsafeSha := naming.ToValidValue(opts.GitSHA)\n\tselector := fmt.Sprintf(\"%s,%s=%s\", c.selector, launcher.RepositoryLabelKey, safeName)\n\tjobInterface := c.kubeClient.BatchV1().Jobs(ns)\n\tlist, err := jobInterface.List(ctx, metav1.ListOptions{\n\t\tLabelSelector: selector,\n\t})\n\tif err != nil && apierrors.IsNotFound(err) {\n\t\terr = nil\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to find Jobs in namespace %s with selector %s\", ns, selector)\n\t}\n\n\tvar jobsForSha []v1.Job\n\tvar activeJobs []v1.Job\n\tfor _, r := range list.Items {\n\t\tlog.Logger().Infof(\"found Job %s\", r.Name)\n\n\t\tif r.Labels[launcher.CommitShaLabelKey] == safeSha && r.Labels[launcher.RerunLabelKey] != \"true\" {\n\t\t\tjobsForSha = append(jobsForSha, r)\n\t\t}\n\n\t\t// is the job active\n\t\tif IsJobActive(r) {\n\t\t\tactiveJobs = append(activeJobs, r)\n\t\t}\n\t}\n\n\tif len(jobsForSha) == 0 {\n\t\tif len(activeJobs) > 0 {\n\t\t\tlog.Logger().Infof(\"not creating a Job in namespace %s for repo %s sha %s yet as there is an active job %s\", ns, safeName, safeSha, activeJobs[0].Name)\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn c.startNewJob(ctx, opts, jobInterface, ns, safeName, safeSha)\n\t}\n\treturn nil, nil\n}",
"func StartAndCheckJobBatch(cfg config.Config, suiteName string) error {\n\tappEnvs := []string{\"egressrulestopublicdns\", \"allowradix\"}\n\tlogger = log.WithFields(log.Fields{\"Suite\": suiteName})\n\n\tfor _, appEnv := range appEnvs {\n\t\tbaseUrl := cfg.GetNetworkPolicyCanaryUrl(appEnv)\n\t\tpassword := cfg.GetNetworkPolicyCanaryPassword()\n\t\tif err := startJobBatch(baseUrl, password, appEnv); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (s *schedule) start() {\n\tif s.running {\n\t\treturn\n\t}\n\n\ts.running = true\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-s.ticker.C:\n\t\t\t\tcallJobFuncWithParams(s.jobFunc, s.jobParams)\n\t\t\tcase <-s.stopCh:\n\t\t\t\ts.ticker.Stop()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}",
"func (t *traverser) runJobs() {\n\tt.logger.Info(\"runJobs call\")\n\tdefer t.logger.Info(\"runJobs return\")\n\tdefer close(t.pendingChan)\n\n\t// Run all jobs that come in on runJobChan. The loop exits when runJobChan\n\t// is closed in the runningReaper goroutine in Run().\n\tfor job := range t.runJobChan {\n\t\t// Don't run the job if traverser stopped or shutting down. In this case,\n\t\t// drain runJobChan to prevent runningReaper from blocking (the chan is\n\t\t// unbuffered). As long as we do not add job to runner repo, or do anything\n\t\t// to the job, it's like the job never ran; it stays pending and tries=0.\n\t\t//\n\t\t// Must check before running goroutine because Run() closes runJobChan\n\t\t// when the runningReaper is done. Then this loop will end and close\n\t\t// pendingChan which stopRunningJobs blocks on. Since this check happens\n\t\t// in loop not goroutine, a closed pendingChan means it's been checked\n\t\t// for all jobs and either the job did not run or it did with pending+1\n\t\t// because the loop won't finish until running all code before the goroutine\n\t\t// is launched.\n\t\tselect {\n\t\tcase <-t.stopChan:\n\t\t\tlog.Infof(\"not running job %s: traverser stopped or shutting down\", job.Id)\n\t\t\tcontinue\n\t\tdefault:\n\t\t}\n\n\t\t// Signal to stopRunningJobs that there's +1 goroutine that's going\n\t\t// to add itself to runnerRepo\n\t\tatomic.AddInt64(&t.pending, 1)\n\n\t\t// Explicitly pass the job into the func, or all goroutines would share\n\t\t// the same loop \"job\" variable.\n\t\tgo func(job proto.Job) {\n\t\t\tjLogger := t.logger.WithFields(log.Fields{\"job_id\": job.Id, \"sequence_id\": job.SequenceId, \"sequence_try\": t.chain.SequenceTries(job.Id)})\n\n\t\t\t// If this is sequence start job (which currently means sequenceId == job.Id),\n\t\t\t// wait for duration of SequenceRetryWait, then increment sequence try count.\n\t\t\tif t.chain.IsSequenceStartJob(job.Id) {\n\t\t\t\tif t.chain.SequenceTries(job.Id) != 0 {\n\t\t\t\t\tjLogger.Infof(fmt.Sprintf(\"waiting %s before retrying sequence\", job.SequenceRetryWait))\n\t\t\t\t\tretryWait, _ := time.ParseDuration(job.SequenceRetryWait) // checked that this parses in RM\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-time.After(retryWait): // wait before retry\n\t\t\t\t\tcase <-t.stopChan:\n\t\t\t\t\t\tjLogger.Infof(\"traverser was stopped - exiting sequence retry wait early and not running job\")\n\t\t\t\t\t\tatomic.AddInt64(&t.pending, -1)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tt.chain.IncrementSequenceTries(job.Id, 1)\n\t\t\t\tjLogger.Infof(\"sequence try %d\", t.chain.SequenceTries(job.Id))\n\t\t\t}\n\n\t\t\t// Always send the finished job to doneJobChan to be reaped. If the\n\t\t\t// reaper isn't reaping any more jobs (if this job took too long to\n\t\t\t// finish after being stopped), sending to doneJobChan won't be\n\t\t\t// possible - timeout after a while so we don't leak this goroutine.\n\t\t\tdefer func() {\n\t\t\t\tselect {\n\t\t\t\tcase t.doneJobChan <- job: // reap the done job\n\t\t\t\tcase <-time.After(t.sendTimeout):\n\t\t\t\t\tjLogger.Warnf(\"timed out sending job to doneJobChan\")\n\t\t\t\t}\n\t\t\t\t// Remove the job's runner from the repo (if it was ever added)\n\t\t\t\t// AFTER sending it to doneJobChan. This avoids a race condition\n\t\t\t\t// when the stopped + suspended reapers check if the runnerRepo\n\t\t\t\t// is empty.\n\t\t\t\tt.runnerRepo.Remove(job.Id)\n\t\t\t}()\n\n\t\t\t// Job tries for current sequence try and total tries for all seq tries.\n\t\t\t// For new chains, these are zero. For suspended/resumed chains they can\n\t\t\t// be > 0 which is why we pass them to the job runner: to resume for the\n\t\t\t// last counts.\n\t\t\tcurTries, totalTries := t.chain.JobTries(job.Id)\n\n\t\t\trunner, err := t.rf.Make(job, t.chain.RequestId(), curTries, totalTries)\n\t\t\tif err != nil {\n\t\t\t\t// Problem creating the job runner - treat job as failed.\n\t\t\t\t// Send a JobLog to the RM so that it knows this job failed.\n\t\t\t\tatomic.AddInt64(&t.pending, -1)\n\t\t\t\tjob.State = proto.STATE_FAIL\n\t\t\t\terr = fmt.Errorf(\"problem creating job runner: %s\", err)\n\t\t\t\tt.sendJL(job, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// --------------------------------------------------------------\n\n\t\t\t// Add the runner to the repo. Runners in the repo are used\n\t\t\t// by the Status, Stop, and shutdown methods on the traverser.\n\t\t\t// Then decrement pending to signal to stopRunningJobs that\n\t\t\t// there's one less goroutine it nees to wait for.\n\t\t\tt.runnerRepo.Set(job.Id, runner)\n\t\t\tatomic.AddInt64(&t.pending, -1)\n\n\t\t\t// Run the job. This is a blocking operation that could take a long time.\n\t\t\tjLogger.Infof(\"running job\")\n\t\t\tt.chain.SetJobState(job.Id, proto.STATE_RUNNING)\n\t\t\tret := runner.Run(job.Data)\n\t\t\tjLogger.Infof(\"job done: state=%s (%d)\", proto.StateName[ret.FinalState], ret.FinalState)\n\n\t\t\t// We don't pass the Chain to the job runner, so it can't call this\n\t\t\t// itself. Instead, it returns how many tries it did, and we set it.\n\t\t\tt.chain.IncrementJobTries(job.Id, int(ret.Tries))\n\n\t\t\t// Set job final state because this job is about to be reaped on\n\t\t\t// the doneJobChan, sent in this goroutine's defer func at top ^.\n\t\t\tjob.State = ret.FinalState\n\t\t}(job)\n\t}\n}",
"func (cu *CurlJob) Key() int {\n\treturn HashCode(cu.description)\n}",
"func (c *Controller) RetryJob(jobID string) error {\n\tif utils.IsEmptyStr(jobID) {\n\t\treturn errors.New(\"empty job ID\")\n\t}\n\n\treturn c.backendPool.RetryJob(jobID)\n}",
"func (scw *JobFuncWrapper) ensureNooneElseRunning(job *que.Job, tx *pgx.Tx, key string) (bool, error) {\n\tvar lastCompleted time.Time\n\tvar nextScheduled time.Time\n\terr := tx.QueryRow(\"SELECT last_completed, next_scheduled FROM cron_metadata WHERE id = $1 FOR UPDATE\", key).Scan(&lastCompleted, &nextScheduled)\n\tif err != nil {\n\t\tif err == pgx.ErrNoRows {\n\t\t\t_, err = tx.Exec(\"INSERT INTO cron_metadata (id) VALUES ($1)\", key)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\treturn false, ErrImmediateReschedule\n\t\t}\n\t\treturn false, err\n\t}\n\n\tif time.Now().Before(nextScheduled) {\n\t\tvar futureJobs int\n\t\t// make sure we don't regard ourself as a future job. Sometimes clock skew makes us think we can't run yet.\n\t\terr = tx.QueryRow(\"SELECT count(*) FROM que_jobs WHERE job_class = $1 AND args::jsonb = $2::jsonb AND run_at >= $3 AND job_id != $4\", job.Type, job.Args, nextScheduled, job.ID).Scan(&futureJobs)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tif futureJobs > 0 {\n\t\t\treturn false, nil\n\t\t}\n\n\t\treturn false, scw.QC.EnqueueInTx(&que.Job{\n\t\t\tType: job.Type,\n\t\t\tArgs: job.Args,\n\t\t\tRunAt: nextScheduled,\n\t\t}, tx)\n\t}\n\n\t// Continue\n\treturn true, nil\n}",
"func newJobID(tm Time) (Job, error) {\n\tk, err := ksuid.NewRandomWithTime(tm)\n\tif err != nil {\n\t\treturn Job{}, err\n\t}\n\treturn Job(k), nil\n}",
"func NextJob(ctx context.Context, p string) (j *Job, err error) {\n\tt := utils.FromTaskContext(ctx)\n\n\tit := contexts.DB.NewIterator(\n\t\tutil.BytesPrefix(constants.FormatJobKey(t, \"\")), nil)\n\n\tfor ok := it.Seek(constants.FormatJobKey(t, p)); ok; ok = it.Next() {\n\t\tk := it.Key()\n\n\t\t// Check if the same key first, and go further.\n\t\tif bytes.Compare(k, constants.FormatJobKey(t, p)) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\t// If k doesn't has job prefix, there are no job any more.\n\t\tif !bytes.HasPrefix(k, constants.FormatJobKey(t, \"\")) {\n\t\t\tbreak\n\t\t}\n\n\t\tj = &Job{}\n\t\tv := it.Value()\n\t\terr = msgpack.Unmarshal(v, j)\n\t\tif err != nil {\n\t\t\tlogrus.Panicf(\"Msgpack unmarshal failed for %v.\", err)\n\t\t}\n\t\treturn\n\t}\n\n\tit.Release()\n\terr = it.Error()\n\treturn\n}",
"func GenJobId() gopter.Gen {\n\treturn func(genParams *gopter.GenParameters) *gopter.GenResult {\n\t\tconst chars = \"abcdefghijklmnopqrstuvwxyz0123456789\"\n\t\tlength := (genParams.Rng.Intn(20) + 1)\n\t\tresult := make([]byte, length)\n\t\tfor i := 0; i < length; i++ {\n\t\t\tresult[i] = chars[genParams.Rng.Intn(len(chars))]\n\t\t}\n\n\t\treturn gopter.NewGenResult(string(result), gopter.NoShrinker)\n\t}\n}",
"func EnqueueJob(ctx context.Context, workerBaseStore *basestore.Store, job *Job) (id int, err error) {\n\tid, _, err = basestore.ScanFirstInt(workerBaseStore.Query(\n\t\tctx,\n\t\tsqlf.Sprintf(\n\t\t\tenqueueJobFmtStr,\n\t\t\tjob.SeriesID,\n\t\t\tjob.SearchQuery,\n\t\t\tjob.RecordTime,\n\t\t\tjob.State,\n\t\t\tjob.ProcessAfter,\n\t\t\tjob.Cost,\n\t\t\tjob.Priority,\n\t\t),\n\t))\n\treturn\n}",
"func waitForJobTurn(jobBody map[string]interface{}, qName string) (res bool) {\n\t// Create REDIS pool.\n\tvar pool = queue.NewPool()\n\tbodyBytes, _ := json.Marshal(jobBody)\n\tres, err := queue.WaitAndRunJob(pool, qName, bodyBytes)\n\tif err != nil {\n\t\tlog.Errorf(\"[HANDLER] Error while waiting for job to run...\")\n\t\treturn false\n\t}\n\treturn res\n}",
"func (js *JobServ) NewJobId() int64 {\n\tid := js.NextJobId\n\tjs.NextJobId++\n\treturn id\n}",
"func (d Dispatcher) Job(id string) (string, error) {\n\tj, err := d.GetBC().FindJob(id)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tjBytes, err := helpers.Serialize(j)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(jBytes), nil\n}",
"func (j *JobRunner) Run() error {\n\tif j.job.Stdin == nil {\n\t\tvar err error\n\t\tj.job.Stdin, err = os.Open(os.DevNull)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := j.client.Message(j.job.Name, j.job); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (con lt) JobsSubmitWithRetry(job *lava.JobStruct) (id int, err error) {\n\tvar ids []int\n\tid = -1\n\tids, err = con.c.JobsSubmit(job)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"JobsSubmit returned error: %v\\n\", err)\n\t\treturn\n\t}\n\tif len(ids) == 0 {\n\t\terr = fmt.Errorf(\"Got Invalid jobIDs\")\n\t\treturn\n\t}\n\tid = ids[0]\n\treturn\n}",
"func (w *worker) launchTryjobs(ctx context.Context, tryjobs []*tryjob.Tryjob) ([]*tryjob.Tryjob, error) {\n\ttoBeLaunched := make([]*tryjob.Tryjob, len(tryjobs))\n\tfor i, tj := range tryjobs {\n\t\tswitch {\n\t\tcase tj.Status != tryjob.Status_PENDING:\n\t\t\tpanic(fmt.Errorf(\"expected PENDING status for tryjob %d; got %s\", tj.ID, tj.Status))\n\t\tcase tj.ExternalID != \"\":\n\t\t\tpanic(fmt.Errorf(\"expected empty external ID for tryjob %d; got %s\", tj.ID, tj.ExternalID))\n\t\tdefault:\n\t\t\ttoBeLaunched[i] = tj\n\t\t}\n\t}\n\tclsInOrder, err := submit.ComputeOrder(w.cls)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlaunchFailures := make(map[*tryjob.Tryjob]error)\n\t_ = retry.Retry(clock.Tag(ctx, launchRetryClockTag), retryFactory, func() error {\n\t\tvar hasFatal bool\n\t\tlaunchFailures, hasFatal = w.tryLaunchTryjobsOnce(ctx, toBeLaunched, clsInOrder)\n\t\tswitch {\n\t\tcase len(launchFailures) == 0:\n\t\t\treturn nil\n\t\tcase hasFatal: // stop the retry\n\t\t\treturn nil\n\t\tdefault: // all failures can be retried\n\t\t\ttoBeLaunched = toBeLaunched[:0] // reuse existing slice\n\t\t\tfor tj := range launchFailures {\n\t\t\t\ttoBeLaunched = append(toBeLaunched, tj)\n\t\t\t}\n\t\t\treturn errors.New(\"please retry\") // returns an arbitrary error to retry\n\t\t}\n\t}, func(error, time.Duration) {\n\t\tvar sb strings.Builder\n\t\tsb.WriteString(\"retrying following tryjobs:\")\n\t\tfor tj, err := range launchFailures {\n\t\t\tsb.WriteString(\"\\n * \")\n\t\t\tsb.WriteString(strconv.FormatInt(int64(tj.ID), 10))\n\t\t\tsb.WriteString(\": \")\n\t\t\tsb.WriteString(err.Error())\n\t\t}\n\t\tlogging.Warningf(ctx, sb.String())\n\t})\n\n\tlaunchFailureLogs := make([]*tryjob.ExecutionLogEntry_TryjobLaunchFailed, 0, len(launchFailures))\n\tfor _, tj := range tryjobs {\n\t\tif err, ok := launchFailures[tj]; ok {\n\t\t\ttj.Status = tryjob.Status_UNTRIGGERED\n\t\t\tswitch grpcStatus, ok := status.FromError(errors.Unwrap(err)); {\n\t\t\tcase !ok:\n\t\t\t\t// Log the error detail but don't leak the internal error.\n\t\t\t\tlogging.Errorf(ctx, \"unexpected internal error when launching tryjob: %s\", err)\n\t\t\t\ttj.UntriggeredReason = \"unexpected internal error\"\n\t\t\tdefault:\n\t\t\t\ttj.UntriggeredReason = fmt.Sprintf(\"received %s from %s\", grpcStatus.Code(), w.backend.Kind())\n\t\t\t\tif msg := grpcStatus.Message(); msg != \"\" {\n\t\t\t\t\ttj.UntriggeredReason += \". message: \" + msg\n\t\t\t\t}\n\t\t\t}\n\t\t\tlaunchFailureLogs = append(launchFailureLogs, &tryjob.ExecutionLogEntry_TryjobLaunchFailed{\n\t\t\t\tDefinition: tj.Definition,\n\t\t\t\tReason: tj.UntriggeredReason,\n\t\t\t})\n\t\t}\n\t\ttj.EVersion += 1\n\t\ttj.EntityUpdateTime = datastore.RoundTime(clock.Now(ctx).UTC())\n\t}\n\tif len(launchFailureLogs) > 0 {\n\t\tw.logEntries = append(w.logEntries, &tryjob.ExecutionLogEntry{\n\t\t\tTime: timestamppb.New(clock.Now(ctx).UTC()),\n\t\t\tKind: &tryjob.ExecutionLogEntry_TryjobsLaunchFailed_{\n\t\t\t\tTryjobsLaunchFailed: &tryjob.ExecutionLogEntry_TryjobsLaunchFailed{\n\t\t\t\t\tTryjobs: launchFailureLogs,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t}\n\treturn w.saveLaunchedTryjobs(ctx, tryjobs)\n}",
"func (w *Processor) start() {\n\tfor {\n\t\tselect {\n\t\tcase job, ok := <-w.jobQueue:\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tw.limiter <- empty{}\n\n\t\t\t// Spawn a worker goroutine.\n\t\t\tgo func() {\n\t\t\t\tif err := job.Run(); err != nil {\n\t\t\t\t\tw.jobErrorHandler(err)\n\t\t\t\t}\n\t\t\t\t<-w.limiter\n\t\t\t}()\n\t\tcase <-w.stop:\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (v *johndictTasker) Run() error {\n\t// Grab a Lock\n\tv.mux.Lock()\n\tdefer v.mux.Unlock()\n\n\t// Check for the status of this job\n\tif common.IsDone(v.job.Status) {\n\t\tlog.WithField(\"Status\", v.job.Status).Debug(\"Unable to start johndict job\")\n\t\treturn errors.New(\"Job has already finished.\")\n\t}\n\n\t// Check if this job is running\n\tif common.IsRunning(v.job.Status) {\n\t\tlog.WithField(\"Status\", v.job.Status).Debug(\"Johndict job is already running.\")\n\t\treturn nil\n\t}\n\n\t// Set commands for first start or restoring\n\tif common.IsNew(v.job.Status) {\n\t\tv.cmd = *exec.Command(config.BinPath, v.args...)\n\t} else {\n\t\trestoreArgs := []string{\"--restore=\" + v.job.UUID}\n\t\tv.cmd = *exec.Command(config.BinPath, restoreArgs...)\n\t}\n\n\tv.cmd.Dir = v.wd\n\n\tlog.WithFields(log.Fields{\n\t\t\"status\": v.job.Status,\n\t\t\"dir\": v.cmd.Dir,\n\t}).Debug(\"Setup exec.command\")\n\n\t// Assign the Stderr, Stdout, and Stdin pipes\n\tvar pipeError error\n\tv.stderrPipe, pipeError = v.cmd.StderrPipe()\n\tif pipeError != nil {\n\t\treturn pipeError\n\t}\n\tv.stdoutPipe, pipeError = v.cmd.StdoutPipe()\n\tif pipeError != nil {\n\t\treturn pipeError\n\t}\n\tv.stdinPipe, pipeError = v.cmd.StdinPipe()\n\tif pipeError != nil {\n\t\treturn pipeError\n\t}\n\n\tv.stderr = bytes.NewBuffer([]byte(\"\"))\n\tv.stdout = bytes.NewBuffer([]byte(\"\"))\n\n\t// Start goroutine to copy data from stderr and stdout pipe\n\tgo func() {\n\t\tfor {\n\t\t\tio.Copy(v.stderr, v.stderrPipe)\n\t\t}\n\t}()\n\tgo func() {\n\t\tfor {\n\t\t\tio.Copy(v.stdout, v.stdoutPipe)\n\t\t}\n\t}()\n\n\t// Start the command\n\tlog.WithField(\"Arguments\", v.cmd.Args).Debug(\"Running the command\")\n\n\terr := v.cmd.Start()\n\tif err != nil {\n\t\tv.job.Status = common.STATUS_FAILED\n\t\tlog.WithField(\"Start Error\", err.Error())\n\t\treturn err\n\t}\n\n\tv.job.StartTime = time.Now()\n\tv.job.Status = common.STATUS_RUNNING\n\n\t// Goroutine to change status once the external executable quits\n\tgo func() {\n\t\tv.cmd.Wait()\n\n\t\t// The exec command has finished running\n\t\tv.mux.Lock()\n\t\tv.job.Status = common.STATUS_DONE\n\t\tv.job.Progress = 100.00\n\t\tv.doneWaitChan <- struct{}{}\n\t\tv.mux.Unlock()\n\t}()\n\n\treturn nil\n}",
"func (c *OrganizationsApiproductsRateplansListCall) StartKey(startKey string) *OrganizationsApiproductsRateplansListCall {\n\tc.urlParams_.Set(\"startKey\", startKey)\n\treturn c\n}",
"func (w *Worker) SubmitJob(namespace, id, command string, args []string, opts ...SubmitJobOption) (*Job, error) {\n\t// Lock shutdown for life of the submission\n\tw.shutdownLock.RLock()\n\tdefer w.shutdownLock.RUnlock()\n\tif w.shutdown {\n\t\treturn nil, ErrShutdown\n\t}\n\t// Make unique ID if not there\n\tif id == \"\" {\n\t\tid = uuid.New().String()\n\t}\n\t// Put nil in the map to confirm ID not in use and hold ID spot\n\tw.jobsLock.Lock()\n\t_, exists := w.jobs[namespace][id]\n\tif !exists {\n\t\tif w.jobs[namespace] == nil {\n\t\t\tw.jobs[namespace] = map[string]*Job{}\n\t\t}\n\t\tw.jobs[namespace][id] = nil\n\t}\n\tw.jobsLock.Unlock()\n\tif exists {\n\t\treturn nil, ErrIDAlreadyExists\n\t}\n\t// Remove ID from job map on failure\n\tsuccess := false\n\tdefer func() {\n\t\tif !success {\n\t\t\tw.jobsLock.Lock()\n\t\t\tdefer w.jobsLock.Unlock()\n\t\t\tdelete(w.jobs[namespace], id)\n\t\t}\n\t}()\n\t// Create job with options\n\tjob := newJob(namespace, id, command, args...)\n\tfor _, opt := range opts {\n\t\topt(job)\n\t}\n\tif !w.hasLimits && job.RootFS != \"\" {\n\t\treturn nil, fmt.Errorf(\"cannot set root FS on non-limited worker\")\n\t}\n\t// Attempt to start job\n\tif err := w.runner.start(job); err != nil {\n\t\treturn nil, fmt.Errorf(\"starting job: %w\", err)\n\t}\n\t// Add to map and return\n\tw.jobsLock.Lock()\n\tw.jobs[namespace][id] = job\n\tw.jobsLock.Unlock()\n\tsuccess = true\n\treturn job, nil\n}",
"func HookOnJobStart(hooks []string, j common.Job) {\n\tlog.WithField(\"id\", j.UUID).Debug(\"Executing hooks against job start.\")\n\n\tdata := copyJobToHookJob(j)\n\n\thooksRun(hooks, data)\n\n}",
"func (shim *JobDirectClient) ResumeJob(ctx context.Context, in *ExtendQuery, opts ...grpc.CallOption) (Job_ResumeJobClient, error) {\n md, _ := metadata.FromOutgoingContext(ctx)\n ictx := metadata.NewIncomingContext(ctx, md)\n\n\tw := &directJobResumeJob{ictx, make(chan *QueryResult, 100), in, nil}\n if shim.streamServerInt != nil {\n go func() {\n defer w.close()\n info := grpc.StreamServerInfo{\n FullMethod: \"/gripql.Job/ResumeJob\",\n IsServerStream: true,\n }\n w.e = shim.streamServerInt(shim.server, w, &info, _Job_ResumeJob_Handler)\n } ()\n return w, nil\n }\n\tgo func() {\n defer w.close()\n\t\tw.e = shim.server.ResumeJob(in, w)\n\t}()\n\treturn w, nil\n}",
"func (c *controller) CreateJob(namespace string, job Job) (*apibatchv1.Job, error) {\n\tj := job.Build()\n\treturn c.k8sBatchClient.Jobs(namespace).Create(j)\n}",
"func (fc *FederatedController) Start() error {\n\tworkers := 1\n\tstopCh := messageContext.Done()\n\n\tgo func() {\n\t\tdefer utilruntime.HandleCrash()\n\t\tdefer fc.queue.ShutDown()\n\t\tklog.Infof(\"Starting federatedlearning job controller\")\n\t\tdefer klog.Infof(\"Shutting down federatedlearning job controller\")\n\n\t\tif !cache.WaitForNamedCacheSync(\"federatedlearning job\", stopCh, fc.podStoreSynced, fc.jobStoreSynced) {\n\t\t\tklog.Errorf(\"failed to wait for caches to sync\")\n\n\t\t\treturn\n\t\t}\n\n\t\tklog.Infof(\"Starting federatedlearning job workers\")\n\t\tfor i := 0; i < workers; i++ {\n\t\t\tgo wait.Until(fc.worker, time.Second, stopCh)\n\t\t}\n\n\t\t<-stopCh\n\t}()\n\treturn nil\n}",
"func (c *Client) ControlJob(jobID string, action string) {\n\turl := fmt.Sprintf(\"http://%s/api/v1/jobs/%s\", c.options.ServerAddr, jobID)\n\tresp, err := http.Post(url,\n\t\t\"application/json\",\n\t\tstrings.NewReader(fmt.Sprintf(\"action=%s\", action)),\n\t)\n\tif err != nil {\n\t\tlogrus.WithError(err)\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n}",
"func (c *Client) StartSimulationJobBatch(ctx context.Context, params *StartSimulationJobBatchInput, optFns ...func(*Options)) (*StartSimulationJobBatchOutput, error) {\n\tif params == nil {\n\t\tparams = &StartSimulationJobBatchInput{}\n\t}\n\n\tresult, metadata, err := c.invokeOperation(ctx, \"StartSimulationJobBatch\", params, optFns, addOperationStartSimulationJobBatchMiddlewares)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tout := result.(*StartSimulationJobBatchOutput)\n\tout.ResultMetadata = metadata\n\treturn out, nil\n}",
"func (b *batch) trigger() {\n\tb.start.Do(b.run)\n}",
"func (b *batch) trigger() {\n\tb.start.Do(b.run)\n}",
"func NewKeyRingImportJob(ctx *pulumi.Context,\n\tname string, args *KeyRingImportJobArgs, opts ...pulumi.ResourceOption) (*KeyRingImportJob, error) {\n\tif args == nil || args.ImportJobId == nil {\n\t\treturn nil, errors.New(\"missing required argument 'ImportJobId'\")\n\t}\n\tif args == nil || args.ImportMethod == nil {\n\t\treturn nil, errors.New(\"missing required argument 'ImportMethod'\")\n\t}\n\tif args == nil || args.KeyRing == nil {\n\t\treturn nil, errors.New(\"missing required argument 'KeyRing'\")\n\t}\n\tif args == nil || args.ProtectionLevel == nil {\n\t\treturn nil, errors.New(\"missing required argument 'ProtectionLevel'\")\n\t}\n\tif args == nil {\n\t\targs = &KeyRingImportJobArgs{}\n\t}\n\tvar resource KeyRingImportJob\n\terr := ctx.RegisterResource(\"gcp:kms/keyRingImportJob:KeyRingImportJob\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (r Runner) Job(job string, opts State) (JobRun, error) {\n\tf, ok := r.jobRunProviders[job]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"job %q not added\", job)\n\t}\n\n\tif opts.Options == nil {\n\t\topts.Options = map[string]interface{}{}\n\t}\n\n\tif opts.Parameters == nil {\n\t\topts.Parameters = map[string]interface{}{}\n\t}\n\n\tjr := f(opts)\n\n\treturn jr, nil\n}",
"func (client JobClient) RenewKeyPreparer(ctx context.Context, resourceGroupName string, accountName string, jobName string) (*http.Request, error) {\n pathParameters := map[string]interface{} {\n \"accountName\": accountName,\n \"jobName\": jobName,\n \"resourceGroupName\": resourceGroupName,\n \"subscriptionId\": autorest.Encode(\"path\",client.SubscriptionID),\n }\n\n const APIVersion = \"2020-12-01-preview\"\n queryParameters := map[string]interface{} {\n \"api-version\": APIVersion,\n }\n\n preparer := autorest.CreatePreparer(\nautorest.AsPost(),\nautorest.WithBaseURL(client.BaseURI),\nautorest.WithPathParameters(\"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.AISupercomputer/accounts/{accountName}/jobs/{jobName}/renewkey\",pathParameters),\nautorest.WithQueryParameters(queryParameters))\n return preparer.Prepare((&http.Request{}).WithContext(ctx))\n }",
"func (t *traverser) runJobs() {\n\t// Run all jobs that come in on runJobChan. The loop exits when runJobChan\n\t// is closed after the running reaper finishes.\n\tfor job := range t.runJobChan {\n\t\t// Explicitly pass the job into the func, or all goroutines would share\n\t\t// the same loop \"job\" variable.\n\t\tgo func(job proto.Job) {\n\t\t\tjLogger := t.logger.WithFields(log.Fields{\"job_id\": job.Id, \"sequence_id\": job.SequenceId, \"sequence_try\": t.chain.SequenceTries(job.Id)})\n\n\t\t\t// Always send the finished job to doneJobChan to be reaped. If the\n\t\t\t// reaper isn't reaping any more jobs (if this job took too long to\n\t\t\t// finish after being stopped), sending to doneJobChan won't be\n\t\t\t// possible - timeout after a while so we don't leak this goroutine.\n\t\t\tdefer func() {\n\t\t\t\tselect {\n\t\t\t\tcase t.doneJobChan <- job:\n\t\t\t\tcase <-time.After(t.sendTimeout):\n\t\t\t\t\tjLogger.Warnf(\"timed out sending job to doneJobChan\")\n\t\t\t\t}\n\t\t\t\t// Remove the job's runner from the repo (if it was ever added)\n\t\t\t\t// AFTER sending it to doneJobChan. This avoids a race condition\n\t\t\t\t// when the stopped + suspended reapers check if the runnerRepo\n\t\t\t\t// is empty.\n\t\t\t\tt.runnerRepo.Remove(job.Id)\n\t\t\t}()\n\n\t\t\t// Retrieve job and sequence try info from the chain for the Runner.\n\t\t\tsequenceTries := t.chain.SequenceTries(job.Id) // used in job logs\n\t\t\ttotalJobTries := t.chain.TotalTries(job.Id) // used in job logs\n\t\t\t// When resuming a stopped job, only try the job\n\t\t\t// [allowed tries - tries before being stopped] times, so the total\n\t\t\t// number of times the job is tried (during this sequence try) stays\n\t\t\t// correct. The job's last try (the try it was stopped on) doesn't\n\t\t\t// count, so subtract 1 if it was tried at least once before\n\t\t\t// being stopped.\n\t\t\ttriesBeforeStopped := uint(0)\n\t\t\tif job.State == proto.STATE_STOPPED {\n\t\t\t\ttriesBeforeStopped = t.chain.LatestRunTries(job.Id)\n\t\t\t\tif triesBeforeStopped > 0 {\n\t\t\t\t\ttriesBeforeStopped--\n\t\t\t\t}\n\t\t\t}\n\n\t\t\trunner, err := t.rf.Make(job, t.chain.RequestId(), totalJobTries, triesBeforeStopped, sequenceTries)\n\t\t\tif err != nil {\n\t\t\t\t// Problem creating the job runner - treat job as failed.\n\t\t\t\t// Send a JobLog to the RM so that it knows this job failed.\n\t\t\t\tjob.State = proto.STATE_FAIL\n\t\t\t\terr = fmt.Errorf(\"problem creating job runner: %s\", err)\n\t\t\t\tt.sendJL(job, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Add the runner to the repo. Runners in the repo are used\n\t\t\t// by the Status, Stop, and shutdown methods on the traverser.\n\t\t\tt.runnerRepo.Set(job.Id, runner)\n\n\t\t\t// Bail out if Stop was called or traverser shut down. It is\n\t\t\t// important that this check happens AFTER the runner is added to\n\t\t\t// the repo. Otherwise if Stop gets called after this check but\n\t\t\t// before the runner is added to the repo, there will be nothing to\n\t\t\t// stop the job from running.\n\t\t\t//\n\t\t\t// We don't lock stopMux around this check and runner.Run. It's okay if\n\t\t\t// there's a small chance for the runner to be run after the traverser\n\t\t\t// gets stopped or shut down - it'll just return after trying the job\n\t\t\t// once.\n\t\t\tif t.stopped {\n\t\t\t\tjob.State = proto.STATE_STOPPED\n\n\t\t\t\t// Send a JL to the RM so that it knows this job was stopped.\n\t\t\t\t// Add 1 to the total job tries, since this is used for keeping\n\t\t\t\t// job logs unique.\n\t\t\t\tt.chain.AddJobTries(job.Id, 1)\n\t\t\t\terr = fmt.Errorf(\"not starting job because traverser has already been stopped\")\n\t\t\t\tt.sendJL(job, err)\n\t\t\t\treturn\n\t\t\t} else if t.suspended {\n\t\t\t\tjob.State = proto.STATE_STOPPED\n\t\t\t\t// Don't send a JL because this job will be resumed later,\n\t\t\t\t// and don't include this try in the total # of tries (only\n\t\t\t\t// set job tries for the latest run).\n\t\t\t\tt.chain.SetLatestRunJobTries(job.Id, 1)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Run the job. This is a blocking operation that could take a long time.\n\t\t\tjLogger.Infof(\"running job\")\n\t\t\tt.chain.SetJobState(job.Id, proto.STATE_RUNNING)\n\t\t\tret := runner.Run(job.Data)\n\n\t\t\tt.chain.AddJobTries(job.Id, ret.Tries)\n\t\t\tjob.State = ret.FinalState\n\t\t}(job)\n\t}\n}",
"func (b *Batch) Do(key Key, hooks ...HookFunc) {\n\tb.reqs <- &batchRequest{\n\t\tkey: key,\n\t\thooks: hooks,\n\t}\n}",
"func (p *AuroraSchedulerManagerClient) StartCronJob(ctx context.Context, job *JobKey) (r *Response, err error) {\n var _args138 AuroraSchedulerManagerStartCronJobArgs\n _args138.Job = job\n var _result139 AuroraSchedulerManagerStartCronJobResult\n if err = p.Client_().Call(ctx, \"startCronJob\", &_args138, &_result139); err != nil {\n return\n }\n return _result139.GetSuccess(), nil\n}",
"func handleJob() {\n\tfor {\n\t\tworker := <-freeWorkerList\n\t\tmodels.MoveFromListByValue(\"FreeWorkerList\", worker, 0)\n\t\tjob := <-unhandleJobList\n\t\tjobStr, err := json.Marshal(job)\n\t\tif err != nil {\n\t\t\tlog.Println(\"[ErrorInfo]\", err)\n\t\t}\n\n\t\tmodels.MoveFromListByValue(\"DockerJobList\", string(jobStr), 0)\n\t\tbusyWorkerList = append(busyWorkerList, worker)\n\t\tmodels.PushMsgToList(\"BusyWorkerList\", worker)\n\n\t\tgo BuildDockerImageStartByHTTPReq(worker, job.Name, tarDockerFile(job.DockerFile), job.Tag)\n\t}\n}"
] | [
"0.63678306",
"0.60703164",
"0.5710661",
"0.56723154",
"0.5664913",
"0.5619767",
"0.55071634",
"0.5507015",
"0.5505586",
"0.54581165",
"0.54401153",
"0.54312783",
"0.5407354",
"0.5398783",
"0.5394899",
"0.53920925",
"0.539137",
"0.538903",
"0.5365163",
"0.53522587",
"0.53367233",
"0.5324912",
"0.5312212",
"0.5282328",
"0.5282328",
"0.52508646",
"0.52506346",
"0.5246734",
"0.5228505",
"0.52233773",
"0.5207536",
"0.5205856",
"0.51406556",
"0.5125192",
"0.5122959",
"0.51214045",
"0.51050234",
"0.5102281",
"0.5095149",
"0.5087506",
"0.50863653",
"0.5085368",
"0.50786716",
"0.50784045",
"0.50684774",
"0.5044486",
"0.50333023",
"0.50175744",
"0.5015522",
"0.5011378",
"0.5009624",
"0.50083274",
"0.50044554",
"0.49892783",
"0.49833164",
"0.4974591",
"0.4942834",
"0.4941852",
"0.49407968",
"0.49398655",
"0.49344182",
"0.49300414",
"0.49264807",
"0.49196726",
"0.49151525",
"0.49096033",
"0.4892489",
"0.48920387",
"0.4889582",
"0.48881903",
"0.48824978",
"0.48819798",
"0.48751563",
"0.48734808",
"0.48712912",
"0.48646453",
"0.48554233",
"0.48545575",
"0.48514366",
"0.48498496",
"0.48425534",
"0.48377123",
"0.48232794",
"0.4815202",
"0.48147082",
"0.48010436",
"0.4795941",
"0.47953808",
"0.47938958",
"0.47919586",
"0.47917154",
"0.47846454",
"0.47846454",
"0.47818145",
"0.47814423",
"0.47805995",
"0.47763693",
"0.4775328",
"0.477023",
"0.4770163"
] | 0.608413 | 1 |
Validate validates the job definition input | func (r *BatchJobKeyRotateV1) Validate(ctx context.Context, job BatchJobRequest, o ObjectLayer) error {
if r == nil {
return nil
}
if r.APIVersion != batchKeyRotateAPIVersion {
return errInvalidArgument
}
if r.Bucket == "" {
return errInvalidArgument
}
if _, err := o.GetBucketInfo(ctx, r.Bucket, BucketOptions{}); err != nil {
if isErrBucketNotFound(err) {
return batchKeyRotationJobError{
Code: "NoSuchSourceBucket",
Description: "The specified source bucket does not exist",
HTTPStatusCode: http.StatusNotFound,
}
}
return err
}
if GlobalKMS == nil {
return errKMSNotConfigured
}
if err := r.Encryption.Validate(); err != nil {
return err
}
for _, tag := range r.Flags.Filter.Tags {
if err := tag.Validate(); err != nil {
return err
}
}
for _, meta := range r.Flags.Filter.Metadata {
if err := meta.Validate(); err != nil {
return err
}
}
if err := r.Flags.Retry.Validate(); err != nil {
return err
}
return nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (s ScheduledJob) validate() error {\n\tvar err error\n\tif err = s.ScheduledJobConfig.validate(); err != nil {\n\t\treturn err\n\t}\n\tif err = s.Workload.validate(); err != nil {\n\t\treturn err\n\t}\n\tif err = validateContainerDeps(validateDependenciesOpts{\n\t\tsidecarConfig: s.Sidecars,\n\t\timageConfig: s.ImageConfig.Image,\n\t\tmainContainerName: aws.StringValue(s.Name),\n\t\tlogging: s.Logging,\n\t}); err != nil {\n\t\treturn fmt.Errorf(\"validate container dependencies: %w\", err)\n\t}\n\tif err = validateExposedPorts(validateExposedPortsOpts{\n\t\tsidecarConfig: s.Sidecars,\n\t}); err != nil {\n\t\treturn fmt.Errorf(\"validate unique exposed ports: %w\", err)\n\t}\n\treturn nil\n}",
"func (p *DefaultJobParser) Validate(job *paddlev1.TrainingJob) error {\n\n\tvar frameWork *paddlev1.Framework = nil\n\n\tif job.Spec.FrameWork != nil {\n\t\tframeWork = job.Spec.FrameWork\n\t}\n\t// Fill in default values\n\t// FIXME: Need to test. What is the value if specified \"omitempty\"\n\tif job.Spec.Port == 0 {\n\t\tjob.Spec.Port = 7164\n\t}\n\tif job.Spec.PortsNum == 0 {\n\t\tjob.Spec.PortsNum = 1\n\t}\n\tif job.Spec.PortsNumForSparse == 0 {\n\t\tjob.Spec.PortsNumForSparse = 1\n\t}\n\tif job.Spec.Image == \"\" {\n\t\tjob.Spec.Image = \"paddlepaddle/paddlecloud-job\"\n\t}\n\tif job.Spec.Passes == 0 {\n\t\tjob.Spec.Passes = 1\n\t}\n\t// only one trainer instance for local job\n\tif frameWork == nil && job.Spec.LocalJob ||\n\t\tframeWork != nil && frameWork.Type == paddlev1.Local {\n\t\tjob.Spec.Trainer.MaxInstance = 1\n\t\tjob.Spec.Trainer.MinInstance = 1\n\t}\n\n\t//if !job.Spec.FaultTolerant && job.Elastic() {\n\t//\treturn errors.New(\"max-instances should equal to min-instances when fault_tolerant is disabled\")\n\t//}\n\t// TODO: add validations.\n\n\treturn nil\n}",
"func (p *Job) Validate() error {\n\treturn nil\n\t//return validation.ValidateStruct(p,\n\t//validation.Field(&p.Theme, validation.Required, validation.In(\"default\", \"dark\")),\n\t//)\n}",
"func (m *Job) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateCreatedAt(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateID(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateOwner(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateStatus(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTags(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateType(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateUpdatedAt(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (j *Job) Validate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.Validate(\n\t\t&validators.StringIsPresent{Field: j.Name, Name: \"Name\"},\n\t\t&validators.StringIsPresent{Field: j.Description, Name: \"Description\"},\n\t\t&validators.StringIsPresent{Field: j.Salary, Name: \"Salary\"},\n\t), nil\n}",
"func (j *JobConfig) validate() error {\n\tif j.Name == \"\" {\n\t\treturn errors.New(\"job must have a name\")\n\t}\n\n\tif j.Spec == \"\" {\n\t\treturn errors.New(\"spec is required\")\n\t}\n\n\tif j.Command == \"\" {\n\t\treturn errors.New(\"command is required\")\n\t}\n\n\t// Configure shell when multi-line scripts\n\tif j.Shell == \"\" && len(strings.Split(j.Command, \"\\n\")) > 1 {\n\t\tj.Shell = defaultShell\n\t}\n\n\tif _, err := cron.Parse(j.Spec); err != nil {\n\t\treturn fmt.Errorf(\"invalid cron spec: %v\", err)\n\t}\n\n\tif val := j.TimeoutString; val != \"\" {\n\t\tdur, err := time.ParseDuration(val)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"invalid timeout: %v\", err)\n\t\t}\n\t\tj.Timeout = dur\n\t}\n\n\tif j.Docker != nil {\n\t\tj.RunMode = dockerMode\n\t} else {\n\t\tj.RunMode = nativeMode\n\t}\n\n\t// Notify on errors only by default\n\tif j.Notify != nil && j.Notify.Mode == \"\" {\n\t\tj.Notify.Mode = notifyError\n\t}\n\n\treturn nil\n}",
"func (m *Job) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateJob(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateProgress(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *FortifyJob) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateInvokingUserName(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateJobState(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateJobType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *JobJob) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateFilament(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateFile(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *JobJobFile) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (j Job) Validate() *RhoError {\n\t// Command is required.\n\tif j.Command == \"\" {\n\t\treturn &RhoError{\n\t\t\tCode: CodeMissingCommand,\n\t\t\tMessage: \"All jobs must specify a command to execute.\",\n\t\t\tHint: `Specify a command to execute as a \"cmd\" element in your job.`,\n\t\t}\n\t}\n\n\t// ResultSource\n\tif j.ResultSource != \"stdout\" && !strings.HasPrefix(j.ResultSource, \"file:\") {\n\t\treturn &RhoError{\n\t\t\tCode: CodeInvalidResultSource,\n\t\t\tMessage: fmt.Sprintf(\"Invalid result source [%s]\", j.ResultSource),\n\t\t\tHint: `The \"result_source\" must be either \"stdout\" or \"file:{path}\".`,\n\t\t}\n\t}\n\n\t// ResultType\n\tif _, ok := validResultType[j.ResultType]; !ok {\n\t\taccepted := make([]string, 0, len(validResultType))\n\t\tfor tp := range validResultType {\n\t\t\taccepted = append(accepted, tp)\n\t\t}\n\n\t\treturn &RhoError{\n\t\t\tCode: CodeInvalidResultType,\n\t\t\tMessage: fmt.Sprintf(\"Invalid result type [%s]\", j.ResultType),\n\t\t\tHint: fmt.Sprintf(`The \"result_type\" must be one of the following: %s`, strings.Join(accepted, \", \")),\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (m *JobsJob) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateActions(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateContextMetaFilter(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateDataSourceFilter(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateHooks(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateIdmFilter(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMergeAction(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateNodeEventFilter(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateParameters(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateResourcesDependencies(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSchedule(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTasks(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateUserEventFilter(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *WebhookJob) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *JobProgress) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *V1RayJob) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateHead(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateWorkers(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (s ScheduledJobConfig) validate() error {\n\tvar err error\n\tif err = s.ImageConfig.validate(); err != nil {\n\t\treturn fmt.Errorf(`validate \"image\": %w`, err)\n\t}\n\tif err = s.ImageOverride.validate(); err != nil {\n\t\treturn err\n\t}\n\tif err = s.TaskConfig.validate(); err != nil {\n\t\treturn err\n\t}\n\tif err = s.Logging.validate(); err != nil {\n\t\treturn fmt.Errorf(`validate \"logging\": %w`, err)\n\t}\n\tfor k, v := range s.Sidecars {\n\t\tif err = v.validate(); err != nil {\n\t\t\treturn fmt.Errorf(`validate \"sidecars[%s]\": %w`, k, err)\n\t\t}\n\t}\n\tif err = s.Network.validate(); err != nil {\n\t\treturn fmt.Errorf(`validate \"network\": %w`, err)\n\t}\n\tif err = s.On.validate(); err != nil {\n\t\treturn fmt.Errorf(`validate \"on\": %w`, err)\n\t}\n\tif err = s.JobFailureHandlerConfig.validate(); err != nil {\n\t\treturn err\n\t}\n\tif err = s.PublishConfig.validate(); err != nil {\n\t\treturn fmt.Errorf(`validate \"publish\": %w`, err)\n\t}\n\tfor ind, taskDefOverride := range s.TaskDefOverrides {\n\t\tif err = taskDefOverride.validate(); err != nil {\n\t\t\treturn fmt.Errorf(`validate \"taskdef_overrides[%d]\": %w`, ind, err)\n\t\t}\n\t}\n\tif s.TaskConfig.IsWindows() {\n\t\tif err = validateWindows(validateWindowsOpts{\n\t\t\tefsVolumes: s.Storage.Volumes,\n\t\t\treadOnlyFS: s.Storage.ReadonlyRootFS,\n\t\t}); err != nil {\n\t\t\treturn fmt.Errorf(`validate Windows: %w`, err)\n\t\t}\n\t}\n\tif s.TaskConfig.IsARM() {\n\t\tif err = validateARM(validateARMOpts{\n\t\t\tSpot: s.Count.AdvancedCount.Spot,\n\t\t\tSpotFrom: s.Count.AdvancedCount.Range.RangeConfig.SpotFrom,\n\t\t}); err != nil {\n\t\t\treturn fmt.Errorf(\"validate ARM: %w\", err)\n\t\t}\n\t}\n\treturn nil\n}",
"func (m *JobJobFilament) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (a *EmployeeArguments) Validate() error {\n\tif a.Work != nil {\n\t\treturn a.Work.Validate()\n\t}\n\n\treturn nil\n}",
"func (a *WorkArguments) Validate() error {\n\tif a.DepartmentName == \"\" || a.AppointmentName == \"\" {\n\t\treturn server.EmptyArgumentsHTTPError\n\t}\n\n\treturn nil\n}",
"func (o *initJobOpts) Validate() error {\n\t// If this app is pending creation, we'll skip validation.\n\tif !o.wsPendingCreation {\n\t\tif err := validateWorkspaceApp(o.wsAppName, o.appName, o.store); err != nil {\n\t\t\treturn err\n\t\t}\n\t\to.appName = o.wsAppName\n\t}\n\tif o.dockerfilePath != \"\" && o.image != \"\" {\n\t\treturn fmt.Errorf(\"--%s and --%s cannot be specified together\", dockerFileFlag, imageFlag)\n\t}\n\tif o.dockerfilePath != \"\" {\n\t\tif _, err := o.fs.Stat(o.dockerfilePath); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif o.timeout != \"\" {\n\t\tif err := validateTimeout(o.timeout); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif o.retries < 0 {\n\t\treturn errors.New(\"number of retries must be non-negative\")\n\t}\n\treturn nil\n}",
"func (con lt) JobsValidate(jobYaml string) (msg string, err error) {\n\n\tjobErrors, err := con.c.JobsValidate(jobYaml, false)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor k, v := range jobErrors {\n\t\tmsg += fmt.Sprintf(\"Error: %s %v\\n\", k, v)\n\t}\n\n\treturn\n}",
"func (c JobTriggerConfig) validate() error {\n\tif c.Schedule == nil {\n\t\treturn &errFieldMustBeSpecified{\n\t\t\tmissingField: \"schedule\",\n\t\t}\n\t}\n\treturn nil\n}",
"func (r Describe) validation(cmd *cobra.Command, args []string) error {\n\tif err := require.MaxArgs(args, 3); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (JobFailureHandlerConfig) validate() error {\n\treturn nil\n}",
"func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest, o ObjectLayer) error {\n\tif r == nil {\n\t\treturn nil\n\t}\n\n\tif r.APIVersion != batchReplJobAPIVersion {\n\t\treturn errInvalidArgument\n\t}\n\n\tif r.Source.Bucket == \"\" {\n\t\treturn errInvalidArgument\n\t}\n\n\tinfo, err := o.GetBucketInfo(ctx, r.Source.Bucket, BucketOptions{})\n\tif err != nil {\n\t\tif isErrBucketNotFound(err) {\n\t\t\treturn batchReplicationJobError{\n\t\t\t\tCode: \"NoSuchSourceBucket\",\n\t\t\t\tDescription: \"The specified source bucket does not exist\",\n\t\t\t\tHTTPStatusCode: http.StatusNotFound,\n\t\t\t}\n\t\t}\n\t\treturn err\n\t}\n\n\tif err := r.Source.Type.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\tif r.Target.Endpoint == \"\" {\n\t\treturn errInvalidArgument\n\t}\n\n\tif r.Target.Bucket == \"\" {\n\t\treturn errInvalidArgument\n\t}\n\n\tif err := r.Target.Creds.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := r.Target.Type.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, tag := range r.Flags.Filter.Tags {\n\t\tif err := tag.Validate(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfor _, meta := range r.Flags.Filter.Metadata {\n\t\tif err := meta.Validate(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := r.Flags.Retry.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\tu, err := url.Parse(r.Target.Endpoint)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcred := r.Target.Creds\n\n\tc, err := miniogo.NewCore(u.Host, &miniogo.Options{\n\t\tCreds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),\n\t\tSecure: u.Scheme == \"https\",\n\t\tTransport: getRemoteInstanceTransport,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.SetAppInfo(\"minio-\"+batchJobPrefix, r.APIVersion+\" \"+job.ID)\n\n\tvcfg, err := c.GetBucketVersioning(ctx, r.Target.Bucket)\n\tif err != nil {\n\t\tif miniogo.ToErrorResponse(err).Code == \"NoSuchBucket\" {\n\t\t\treturn batchReplicationJobError{\n\t\t\t\tCode: \"NoSuchTargetBucket\",\n\t\t\t\tDescription: \"The specified target bucket does not exist\",\n\t\t\t\tHTTPStatusCode: http.StatusNotFound,\n\t\t\t}\n\t\t}\n\t\treturn err\n\t}\n\n\tif info.Versioning && !vcfg.Enabled() {\n\t\treturn batchReplicationJobError{\n\t\t\tCode: \"InvalidBucketState\",\n\t\t\tDescription: fmt.Sprintf(\"The source '%s' has versioning enabled, target '%s' must have versioning enabled\",\n\t\t\t\tr.Source.Bucket, r.Target.Bucket),\n\t\t\tHTTPStatusCode: http.StatusBadRequest,\n\t\t}\n\t}\n\n\tr.clnt = c\n\treturn nil\n}",
"func validateArgoWorkflowTrigger(trigger *v1alpha1.ArgoWorkflowTrigger) error {\n\tif trigger == nil {\n\t\treturn errors.New(\"k8s trigger for can't be nil\")\n\t}\n\tif trigger.Source == nil {\n\t\treturn errors.New(\"k8s trigger for does not contain an absolute action\")\n\t}\n\tif trigger.GroupVersionResource.Size() == 0 {\n\t\treturn errors.New(\"must provide group, version and resource for the resource\")\n\t}\n\tswitch trigger.Operation {\n\tcase v1alpha1.Submit, v1alpha1.Suspend, v1alpha1.Retry, v1alpha1.Resume, v1alpha1.Resubmit:\n\tdefault:\n\t\treturn errors.Errorf(\"unknown operation type %s\", string(trigger.Operation))\n\t}\n\tif trigger.Parameters != nil {\n\t\tfor i, parameter := range trigger.Parameters {\n\t\t\tif err := validateTriggerParameter(¶meter); err != nil {\n\t\t\t\treturn errors.Errorf(\"resource parameter index: %d. err: %+v\", i, err)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func (r *Job) ValidateCreate() error {\n\tjoblog.Info(\"validate create\", \"name\", r.Name)\n\tclient := jobmgr.GetClient()\n\tjobs := JobList{}\n\tif err := client.List(context.Background(), &jobs); err != nil {\n\t\treturn err\n\t}\n\tfor _, job := range jobs.Items {\n\t\tif job.Spec.Name == r.Spec.Name {\n\t\t\treturn ErrJobExists\n\t\t}\n\t}\n\treturn nil\n}",
"func (m *JobJob) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateFilament(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateFile(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (e *storageExecutor) validation() error {\n\t// check input shardIDs if empty\n\tif len(e.ctx.shardIDs) == 0 {\n\t\treturn errNoShardID\n\t}\n\tnumOfShards := e.database.NumOfShards()\n\t// check engine has shard\n\tif numOfShards == 0 {\n\t\treturn errNoShardInDatabase\n\t}\n\n\treturn nil\n}",
"func ValidateSpec(ts string) (Type, error) {\n\tvar jb Job\n\t// Note we can't use:\n\t// toml.NewDecoder(bytes.NewReader([]byte(ts))).Strict(true).Decode(&jb)\n\t// to error in the case of unrecognized keys because all the keys in the toml are at\n\t// the top level and so decoding for the job will have undecodable keys meant for the job\n\t// type specific struct and vice versa. Should we upgrade the schema,\n\t// we put the type specific config in its own subtree e.g.\n\t// \tschemaVersion=1\n\t// name=\"test\"\n\t// [vrf_spec]\n\t// publicKey=\"0x...\"\n\t// and then we could use it.\n\ttree, err := toml.Load(ts)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\terr = tree.Unmarshal(&jb)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif _, ok := jobTypes[jb.Type]; !ok {\n\t\treturn \"\", ErrInvalidJobType\n\t}\n\tif jb.Type.SchemaVersion() != jb.SchemaVersion {\n\t\treturn \"\", ErrInvalidSchemaVersion\n\t}\n\tif jb.Type.RequiresPipelineSpec() && (jb.Pipeline.Source == \"\") {\n\t\treturn \"\", ErrNoPipelineSpec\n\t}\n\tif jb.Pipeline.RequiresPreInsert() && !jb.Type.SupportsAsync() {\n\t\treturn \"\", errors.Errorf(\"async=true tasks are not supported for %v\", jb.Type)\n\t}\n\n\tif strings.Contains(ts, \"<{}>\") {\n\t\treturn \"\", errors.Errorf(\"'<{}>' syntax is not supported. Please use \\\"{}\\\" instead\")\n\t}\n\n\treturn jb.Type, nil\n}",
"func (r *RunCommand) Validate() error {\n\tif r.buildName == \"\" {\n\t\treturn fmt.Errorf(\"name is not informed\")\n\t}\n\treturn nil\n}",
"func (m *Job) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateJob(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateProgress(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *TaskOrchestrationJob) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\t// validation for a type composition with TaskOrchestrationItem\n\tif err := m.TaskOrchestrationItem.Validate(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateDemands(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateExecuteAs(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateInstanceID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTasks(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *Merge) validate(ctx context.Context) {\n\tif m.table == nil {\n\t\tpanic(moerr.NewInternalError(ctx, \"merge task missing input 'table'\"))\n\t}\n\tif m.fs == nil {\n\t\tpanic(moerr.NewInternalError(ctx, \"merge task missing input 'FileService'\"))\n\t}\n}",
"func (h NLBHealthCheckArgs) validate() error {\n\tif h.isEmpty() {\n\t\treturn nil\n\t}\n\treturn nil\n}",
"func (m *JobJobFile) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *BuildDefinition) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\t// validation for a type composition with BuildDefinitionReference\n\tif err := m.BuildDefinitionReference.Validate(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateDemands(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateOptions(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateProcess(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateProcessParameters(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateProperties(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateRepository(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateRetentionRules(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTriggers(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateVariableGroups(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateVariables(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *Options) validate(args []string) error {\n\tif len(args) != 0 {\n\t\treturn errors.New(\"arguments are not supported\")\n\t}\n\treturn nil\n}",
"func (x *MultipleCommand) Validate(args []string) error {\n\tif x.ConfigFileName == config.InputFileName {\n\t\treturn errors.New(\"cannot receive config file and input file from same source\")\n\t}\n\n\treturn nil\n}",
"func (v Validator) Validate(ctx context.Context, def *Definition, config map[string]any) ValidateResult {\n\tvar result ValidateResult\n\n\tconfigJSON, err := json.Marshal(config)\n\tif err != nil {\n\t\tresult.errors = append(result.errors, err)\n\t\treturn result\n\t}\n\n\tcommandExistsFunc := v.commandExists\n\tif commandExistsFunc == nil {\n\t\tcommandExistsFunc = commandExists\n\t}\n\n\t// validate that the required commands exist\n\tif def.Requirements != nil {\n\t\tfor _, command := range def.Requirements {\n\t\t\tif commandExistsFunc(command) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tresult.errors = append(result.errors, fmt.Errorf(\"%q %w\", command, ErrCommandNotInPATH))\n\t\t}\n\t}\n\n\t// validate that the config matches the json schema we have\n\tif def.Configuration != nil {\n\t\tvalErrors, err := def.Configuration.ValidateBytes(ctx, configJSON)\n\t\tif err != nil {\n\t\t\tresult.errors = append(result.errors, err)\n\t\t}\n\t\tfor _, err := range valErrors {\n\t\t\tresult.errors = append(result.errors, err)\n\t\t}\n\t}\n\n\treturn result\n}",
"func (w Workload) validate() error {\n\tif w.Name == nil {\n\t\treturn &errFieldMustBeSpecified{\n\t\t\tmissingField: \"name\",\n\t\t}\n\t}\n\treturn nil\n}",
"func (cfg fromCFN) validate() error {\n\tif cfg.isEmpty() {\n\t\treturn nil\n\t}\n\tif len(aws.StringValue(cfg.Name)) == 0 {\n\t\treturn errors.New(\"name cannot be an empty string\")\n\t}\n\treturn nil\n}",
"func (j BatchJobRequest) Validate(ctx context.Context, o ObjectLayer) error {\n\tif j.Replicate != nil {\n\t\treturn j.Replicate.Validate(ctx, j, o)\n\t}\n\treturn errInvalidArgument\n}",
"func (b BuildArgsOrString) validate() error {\n\tif b.isEmpty() {\n\t\treturn nil\n\t}\n\tif !b.BuildArgs.isEmpty() {\n\t\treturn b.BuildArgs.validate()\n\t}\n\treturn nil\n}",
"func (c Config) Validate() error {\n\tif len(c.ClientID) == 0 {\n\t\treturn fmt.Errorf(\"error: ClientID missing\")\n\t}\n\n\tif c.VodID < 1 {\n\t\treturn fmt.Errorf(\"error: VodID missing\")\n\t}\n\n\ttimePattern := `\\d+ \\d+ \\d+`\n\ttimeRegex := regexp.MustCompile(timePattern)\n\tif c.StartTime != \"start\" && !timeRegex.MatchString(c.StartTime) {\n\t\treturn fmt.Errorf(\"error: StartTime must be 'start' or in format '%s'; got '%s'\", timePattern, c.StartTime)\n\t}\n\tif c.EndTime == \"\" && c.Length == \"\" {\n\t\treturn errors.New(\"error: must specify either EndTime or Length\")\n\t}\n\tif c.Length == \"\" && c.EndTime != \"end\" && !timeRegex.MatchString(c.EndTime) {\n\t\treturn fmt.Errorf(\"error: EndTime must be 'end' or in format '%s'; got '%s'\", timePattern, c.EndTime)\n\t}\n\tif c.EndTime == \"\" && c.Length != \"full\" && !timeRegex.MatchString(c.Length) {\n\t\treturn fmt.Errorf(\"error: Length must be 'full' or in format '%s'; got '%s'\", timePattern, c.Length)\n\t}\n\n\tqualityPattern := `\\d{3,4}p[36]0`\n\tqualityRegex := regexp.MustCompile(qualityPattern)\n\tif c.Quality != \"best\" && c.Quality != \"chunked\" && !qualityRegex.MatchString(c.Quality) {\n\t\treturn fmt.Errorf(\"error: Quality must be 'best', 'chunked', or in format '%s'; got '%s'\", qualityPattern, c.Quality)\n\t}\n\n\tif c.FilePrefix != \"\" && !isValidFilename(c.FilePrefix) {\n\t\treturn fmt.Errorf(\"error: FilePrefix contains invalid characters; got '%s'\", c.FilePrefix)\n\t}\n\n\tif c.Workers < 1 {\n\t\treturn fmt.Errorf(\"error: Worker must be an integer greater than 0; got '%d'\", c.Workers)\n\t}\n\n\treturn nil\n}",
"func (r *Job) ValidateDelete() error {\n\treturn nil\n}",
"func ValidateJenkinsJobConfig(input string) error {\n\t// Job repository should not be longer than 512 characters\n\tif len(input) > 512 {\n\t\treturn errors.New(\"Should not be longer than 512 characters. \")\n\t}\n\t// Regex regex to validate repository\n\tvar regex = regexp.MustCompile(configuration.GetConfiguration().Jenkins.JobDSL.RepoValidatePattern)\n\tif !regex.Match([]byte(input)) {\n\t\treturn errors.New(\"Wrong repository name! \")\n\t}\n\n\treturn nil\n}",
"func (a *dataConfig) Validate(args []string) error {\n\tif len(args) != 1 {\n\t\treturn fmt.Errorf(\"name must be specified once\")\n\t}\n\ta.Name = args[0]\n\tif len(a.EnvFileSource) == 0 && len(a.FileSources) == 0 && len(a.LiteralSources) == 0 {\n\t\treturn fmt.Errorf(\"at least from-env-file, or from-file or from-literal must be set\")\n\t}\n\tif len(a.EnvFileSource) > 0 && (len(a.FileSources) > 0 || len(a.LiteralSources) > 0) {\n\t\treturn fmt.Errorf(\"from-env-file cannot be combined with from-file or from-literal\")\n\t}\n\t// TODO: Should we check if the path exists? if it's valid, if it's within the same (sub-)directory?\n\treturn nil\n}",
"func Validate(identifier string, td *tmv1beta1.TestDefinition) error {\n\tif td.GetName() == \"\" {\n\t\treturn fmt.Errorf(\"Invalid TestDefinition (%s): metadata.name : Required value: name has to be defined\", identifier)\n\t}\n\n\tif err := ValidateName(identifier, td.GetName()); err != nil {\n\t\treturn err\n\t}\n\n\tif len(td.Spec.Command) == 0 {\n\t\treturn fmt.Errorf(\"Invalid TestDefinition (%s) Name: \\\"%s\\\": spec.command : Required value: command has to be defined\", identifier, td.GetName())\n\t}\n\tif td.Spec.Owner == \"\" || !isEmailValid(td.Spec.Owner) {\n\t\treturn fmt.Errorf(\"Invalid TestDefinition (%s) Owner: \\\"%s\\\": spec.owner : Required value: valid email has to be defined\", identifier, td.Spec.Owner)\n\t}\n\tif len(td.Spec.RecipientsOnFailure) != 0 && !isEmailListValid(td.Spec.RecipientsOnFailure) {\n\t\treturn fmt.Errorf(\"Invalid TestDefinition (%s) ReceipientsOnFailure: \\\"%s\\\": spec.notifyOnFailure : Required value: valid email has to be defined\", identifier, td.Spec.RecipientsOnFailure)\n\t}\n\n\tfor i, label := range td.Spec.Labels {\n\t\tidentifier := fmt.Sprintf(\"Invalid TestDefinition (%s): spec.labels[%d]\", identifier, i)\n\t\tif err := ValidateLabelName(identifier, label); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (j *JobJob) Valid() bool {\n\treturn j.TargetJob != nil\n}",
"func (wec *WorkExperienceCreate) check() error {\n\tif _, ok := wec.mutation.CreatedAt(); !ok {\n\t\treturn &ValidationError{Name: \"created_at\", err: errors.New(`ent: missing required field \"created_at\"`)}\n\t}\n\tif _, ok := wec.mutation.UpdatedAt(); !ok {\n\t\treturn &ValidationError{Name: \"updated_at\", err: errors.New(`ent: missing required field \"updated_at\"`)}\n\t}\n\tif _, ok := wec.mutation.Title(); !ok {\n\t\treturn &ValidationError{Name: \"title\", err: errors.New(`ent: missing required field \"title\"`)}\n\t}\n\tif v, ok := wec.mutation.Title(); ok {\n\t\tif err := workexperience.TitleValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"title\", err: fmt.Errorf(`ent: validator failed for field \"title\": %w`, err)}\n\t\t}\n\t}\n\tif _, ok := wec.mutation.Location(); !ok {\n\t\treturn &ValidationError{Name: \"location\", err: errors.New(`ent: missing required field \"location\"`)}\n\t}\n\tif v, ok := wec.mutation.Location(); ok {\n\t\tif err := workexperience.LocationValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"location\", err: fmt.Errorf(`ent: validator failed for field \"location\": %w`, err)}\n\t\t}\n\t}\n\tif _, ok := wec.mutation.StartDate(); !ok {\n\t\treturn &ValidationError{Name: \"start_date\", err: errors.New(`ent: missing required field \"start_date\"`)}\n\t}\n\tif _, ok := wec.mutation.Description(); !ok {\n\t\treturn &ValidationError{Name: \"description\", err: errors.New(`ent: missing required field \"description\"`)}\n\t}\n\tif v, ok := wec.mutation.Description(); ok {\n\t\tif err := workexperience.DescriptionValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"description\", err: fmt.Errorf(`ent: validator failed for field \"description\": %w`, err)}\n\t\t}\n\t}\n\treturn nil\n}",
"func (m *V1RayJob) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateHead(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateWorkers(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (r *OptimJob) ValidateCreate() error {\n\toptimjoblog.Info(\"validate create\", \"name\", r.Name)\n\n\t// TODO(user): fill in your validation logic upon object creation.\n\treturn r.validateOptimJob()\n\n}",
"func (m *JobsJob) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateActions(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateContextMetaFilter(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateDataSourceFilter(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateHooks(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateIdmFilter(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateMergeAction(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateNodeEventFilter(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateParameters(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateResourcesDependencies(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateSchedule(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateTasks(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateUserEventFilter(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *WorkflowExecutionStepStatus) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *ReportDefinition) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateName(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateParameters(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (kv BatchJobReplicateKV) Validate() error {\n\tif kv.Key == \"\" {\n\t\treturn errInvalidArgument\n\t}\n\treturn nil\n}",
"func (m *BuildSetup) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (c *jsiiProxy_CfnJobTemplate) Validate() *[]*string {\n\tvar returns *[]*string\n\n\t_jsii_.Invoke(\n\t\tc,\n\t\t\"validate\",\n\t\tnil, // no parameters\n\t\t&returns,\n\t)\n\n\treturn returns\n}",
"func validate(c *cli.Context) error {\n\t// load configuration\n\tif len(c.String(\"org\")) == 0 {\n\t\terr := c.Set(\"org\", c.String(\"org\"))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to set context: %w\", err)\n\t\t}\n\t}\n\n\tif len(c.String(\"repo\")) == 0 {\n\t\terr := c.Set(\"repo\", c.String(\"repo\"))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to set context: %w\", err)\n\t\t}\n\t}\n\n\t// validate the user input in the command\n\tif len(c.String(\"org\")) == 0 {\n\t\treturn util.InvalidCommand(\"org\")\n\t}\n\n\tif len(c.String(\"repo\")) == 0 {\n\t\treturn util.InvalidCommand(\"repo\")\n\t}\n\n\treturn nil\n}",
"func (f *ForkParams) Validate() error {\n\treturn nil\n}",
"func (fg *GitLabBuildTriggerFieldGroup) Validate(opts shared.Options) []shared.ValidationError {\n\n\tfgName := \"GitLabBuildTrigger\"\n\n\t// Make empty errors\n\terrors := []shared.ValidationError{}\n\n\t// If build support is off, return false\n\tif fg.FeatureBuildSupport == false {\n\t\treturn errors\n\t}\n\n\t// If github trigger is off\n\tif fg.FeatureGitlabBuild == false {\n\t\treturn errors\n\t}\n\n\t// Check for config\n\tif fg.GitlabTriggerConfig == nil {\n\t\tnewError := shared.ValidationError{\n\t\t\tTags: []string{\"GITLAB_TRIGGER_CONFIG\"},\n\t\t\tFieldGroup: fgName,\n\t\t\tMessage: \"GITLAB_TRIGGER_CONFIG is required for GitLabBuildTrigger\",\n\t\t}\n\t\terrors = append(errors, newError)\n\t\treturn errors\n\t}\n\n\t// Check for endpoint\n\tif fg.GitlabTriggerConfig.GitlabEndpoint == \"\" {\n\t\tnewError := shared.ValidationError{\n\t\t\tTags: []string{\"GITLAB_TRIGGER_CONFIG.GITLAB_ENDPOINT\"},\n\t\t\tFieldGroup: fgName,\n\t\t\tMessage: \"GITLAB_TRIGGER_CONFIG.GITLAB_ENDPOINT is required for GitLabBuildTrigger\",\n\t\t}\n\t\terrors = append(errors, newError)\n\t}\n\n\t// Check for endpoint\n\tif !strings.HasPrefix(fg.GitlabTriggerConfig.GitlabEndpoint, \"http://\") && !strings.HasPrefix(fg.GitlabTriggerConfig.GitlabEndpoint, \"https://\") {\n\t\tnewError := shared.ValidationError{\n\t\t\tTags: []string{\"GITLAB_TRIGGER_CONFIG.GITLAB_ENDPOINT\"},\n\t\t\tFieldGroup: fgName,\n\t\t\tMessage: \"GITLAB_TRIGGER_CONFIG.GITLAB_ENDPOINT must be a url\",\n\t\t}\n\t\terrors = append(errors, newError)\n\t}\n\n\t// Check for client id\n\tif fg.GitlabTriggerConfig.ClientId == \"\" {\n\t\tnewError := shared.ValidationError{\n\t\t\tTags: []string{\"GITLAB_TRIGGER_CONFIG.CLIENT_ID\"},\n\t\t\tFieldGroup: fgName,\n\t\t\tMessage: \"GITLAB_TRIGGER_CONFIG.CLIENT_ID is required for GitLabBuildTrigger\",\n\t\t}\n\t\terrors = append(errors, newError)\n\t}\n\n\t// Check for endpoint\n\tif fg.GitlabTriggerConfig.ClientSecret == \"\" {\n\t\tnewError := shared.ValidationError{\n\t\t\tTags: []string{\"GITLAB_TRIGGER_CONFIG.CLIENT_SECRET\"},\n\t\t\tFieldGroup: fgName,\n\t\t\tMessage: \"GITLAB_TRIGGER_CONFIG.CLIENT_SECRET is required for GitLabBuildTrigger\",\n\t\t}\n\t\terrors = append(errors, newError)\n\t}\n\n\t// Check OAuth endpoint\n\tsuccess := shared.ValidateGitLabOAuth(fg.GitlabTriggerConfig.ClientId, fg.GitlabTriggerConfig.ClientSecret, fg.GitlabTriggerConfig.GitlabEndpoint)\n\tif !success {\n\t\tnewError := shared.ValidationError{\n\t\t\tTags: []string{\"GITLAB_TRIGGER_CONFIG.CLIENT_ID\", \"GITLAB_TRIGGER_CONFIG.CLIENT_SECRET\"},\n\t\t\tFieldGroup: fgName,\n\t\t\tMessage: \"Could not verify GitLab OAuth credentials\",\n\t\t}\n\t\terrors = append(errors, newError)\n\t}\n\n\treturn errors\n}",
"func (c Cron) Validate() error {\n\tswitch {\n\tcase c.Spec.Branch == \"\":\n\t\treturn errors.New(\"yaml: invalid cron branch\")\n\tdefault:\n\t\treturn nil\n\t}\n}",
"func (w WorkerService) validate() error {\n\tvar err error\n\tif err = w.WorkerServiceConfig.validate(); err != nil {\n\t\treturn err\n\t}\n\tif err = w.Workload.validate(); err != nil {\n\t\treturn err\n\t}\n\tif err = validateContainerDeps(validateDependenciesOpts{\n\t\tsidecarConfig: w.Sidecars,\n\t\timageConfig: w.ImageConfig.Image,\n\t\tmainContainerName: aws.StringValue(w.Name),\n\t\tlogging: w.Logging,\n\t}); err != nil {\n\t\treturn fmt.Errorf(\"validate container dependencies: %w\", err)\n\t}\n\tif err = validateExposedPorts(validateExposedPortsOpts{\n\t\tsidecarConfig: w.Sidecars,\n\t}); err != nil {\n\t\treturn fmt.Errorf(\"validate unique exposed ports: %w\", err)\n\t}\n\treturn nil\n}",
"func (m *EnvironmentDeploymentExecutionRecord) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateDefinition(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateFinishTime(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateOwner(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validatePlanID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateQueueTime(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateScopeID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateServiceOwner(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateStartTime(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (qs QueueScaling) validate() error {\n\tif qs.IsEmpty() {\n\t\treturn nil\n\t}\n\tif qs.AcceptableLatency == nil && qs.AvgProcessingTime != nil {\n\t\treturn &errFieldMustBeSpecified{\n\t\t\tmissingField: \"acceptable_latency\",\n\t\t\tconditionalFields: []string{\"msg_processing_time\"},\n\t\t}\n\t}\n\tif qs.AvgProcessingTime == nil && qs.AcceptableLatency != nil {\n\t\treturn &errFieldMustBeSpecified{\n\t\t\tmissingField: \"msg_processing_time\",\n\t\t\tconditionalFields: []string{\"acceptable_latency\"},\n\t\t}\n\t}\n\tlatency, process := *qs.AcceptableLatency, *qs.AvgProcessingTime\n\tif latency == 0 {\n\t\treturn errors.New(`\"acceptable_latency\" cannot be 0`)\n\t}\n\tif process == 0 {\n\t\treturn errors.New(`\"msg_processing_time\" cannot be 0`)\n\t}\n\tif process > latency {\n\t\treturn errors.New(`\"msg_processing_time\" cannot be longer than \"acceptable_latency\"`)\n\t}\n\treturn qs.Cooldown.validate()\n}",
"func (b *Step) Validate() error {\n\n\treturn nil\n}",
"func (gc *GarbageCollector) Validate(params job.Parameters) error {\n\treturn nil\n}",
"func (gc *GarbageCollector) Validate(params job.Parameters) error {\n\treturn nil\n}",
"func (b *InstanceBuild) Validate() []error {\n\terrors := []error{}\n\tif b.Site == \"\" {\n\t\terrors = append(errors, errEmptySite)\n\t}\n\tif b.Site != \"org\" && b.Site != \"com\" {\n\t\terrors = append(errors, errInvalidSite)\n\t}\n\tif b.Env == \"\" {\n\t\terrors = append(errors, errEmptyEnv)\n\t}\n\tif b.Env != \"prod\" && b.Env != \"staging\" && b.Env != \"test\" {\n\t\terrors = append(errors, errInvalidEnv)\n\t}\n\tif b.Queue == \"\" {\n\t\terrors = append(errors, errEmptyQueue)\n\t}\n\tif b.InstanceType == \"\" {\n\t\terrors = append(errors, errEmptyInstanceType)\n\t}\n\tif b.State != \"pending\" && b.State != \"started\" && b.State != \"finished\" {\n\t\terrors = append(errors, errInvalidState)\n\t}\n\tif b.Count < 1 {\n\t\terrors = append(errors, errInvalidInstanceCount)\n\t}\n\n\treturn errors\n}",
"func (m *CreateStatusOption) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (b *batData) validate() error {\n\tif !checkSize(b.HighestCellVoltage, voltFactor, 2) {\n\t\treturn fmt.Errorf(\"Invalid Highest Cell Voltage: %v\", b.HighestCellVoltage)\n\t}\n\n\tif !checkSize(b.LowestCellVoltage, voltFactor, 2) {\n\t\treturn fmt.Errorf(\"Invalid Lowest Cell Voltage: %v\", b.LowestCellVoltage)\n\t}\n\n\tif !checkSize(b.SysMaxTemperature, tempFactor, 2) {\n\t\treturn fmt.Errorf(\"Invalid System Max Temperature: %v\", b.SysMaxTemperature)\n\t}\n\n\tif !checkSize(b.SysAvgTemperature, tempFactor, 2) {\n\t\treturn fmt.Errorf(\"Invalid System Average Temperature: %v\", b.SysAvgTemperature)\n\t}\n\n\tif !checkSize(b.SysMinTemperature, tempFactor, 2) {\n\t\treturn fmt.Errorf(\"Invalid System Min Temperature: %v\", b.SysMinTemperature)\n\t}\n\n\treturn nil\n}",
"func (DockerBuildArgs) validate() error {\n\treturn nil\n}",
"func (etc *ExportTaskCreate) check() error {\n\tif _, ok := etc.mutation.GetType(); !ok {\n\t\treturn &ValidationError{Name: \"type\", err: errors.New(\"ent: missing required field \\\"type\\\"\")}\n\t}\n\tif v, ok := etc.mutation.GetType(); ok {\n\t\tif err := exporttask.TypeValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"type\", err: fmt.Errorf(\"ent: validator failed for field \\\"type\\\": %w\", err)}\n\t\t}\n\t}\n\tif _, ok := etc.mutation.Status(); !ok {\n\t\treturn &ValidationError{Name: \"status\", err: errors.New(\"ent: missing required field \\\"status\\\"\")}\n\t}\n\tif v, ok := etc.mutation.Status(); ok {\n\t\tif err := exporttask.StatusValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"status\", err: fmt.Errorf(\"ent: validator failed for field \\\"status\\\": %w\", err)}\n\t\t}\n\t}\n\tif _, ok := etc.mutation.Progress(); !ok {\n\t\treturn &ValidationError{Name: \"progress\", err: errors.New(\"ent: missing required field \\\"progress\\\"\")}\n\t}\n\tif v, ok := etc.mutation.Progress(); ok {\n\t\tif err := exporttask.ProgressValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"progress\", err: fmt.Errorf(\"ent: validator failed for field \\\"progress\\\": %w\", err)}\n\t\t}\n\t}\n\tif _, ok := etc.mutation.Filters(); !ok {\n\t\treturn &ValidationError{Name: \"filters\", err: errors.New(\"ent: missing required field \\\"filters\\\"\")}\n\t}\n\treturn nil\n}",
"func (m *Task) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validatePeriod(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateStatus(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (t *Task) Validate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.Validate(\n\t\t&validators.StringIsPresent{Field: t.Description, Name: \"Description\"},\n\t\t&validators.StringIsPresent{Field: t.Status, Name: \"Status\"},\n\t\t&validators.StringIsPresent{Field: t.RequesterName, Name: \"RequesterName\"},\n\t\t&validators.StringIsPresent{Field: t.ExecutorName, Name: \"ExecutorName\"},\n\t), nil\n}",
"func (cc *CompanyCreate) check() error {\n\tif _, ok := cc.mutation.CreatedAt(); !ok {\n\t\treturn &ValidationError{Name: \"created_at\", err: errors.New(`ent: missing required field \"created_at\"`)}\n\t}\n\tif _, ok := cc.mutation.UpdatedAt(); !ok {\n\t\treturn &ValidationError{Name: \"updated_at\", err: errors.New(`ent: missing required field \"updated_at\"`)}\n\t}\n\tif _, ok := cc.mutation.Name(); !ok {\n\t\treturn &ValidationError{Name: \"name\", err: errors.New(`ent: missing required field \"name\"`)}\n\t}\n\tif v, ok := cc.mutation.Name(); ok {\n\t\tif err := company.NameValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"name\", err: fmt.Errorf(`ent: validator failed for field \"name\": %w`, err)}\n\t\t}\n\t}\n\tif _, ok := cc.mutation.Overview(); !ok {\n\t\treturn &ValidationError{Name: \"overview\", err: errors.New(`ent: missing required field \"overview\"`)}\n\t}\n\tif v, ok := cc.mutation.Overview(); ok {\n\t\tif err := company.OverviewValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"overview\", err: fmt.Errorf(`ent: validator failed for field \"overview\": %w`, err)}\n\t\t}\n\t}\n\tif _, ok := cc.mutation.Website(); !ok {\n\t\treturn &ValidationError{Name: \"website\", err: errors.New(`ent: missing required field \"website\"`)}\n\t}\n\tif v, ok := cc.mutation.Website(); ok {\n\t\tif err := company.WebsiteValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"website\", err: fmt.Errorf(`ent: validator failed for field \"website\": %w`, err)}\n\t\t}\n\t}\n\tif _, ok := cc.mutation.LogoURL(); !ok {\n\t\treturn &ValidationError{Name: \"logo_url\", err: errors.New(`ent: missing required field \"logo_url\"`)}\n\t}\n\tif v, ok := cc.mutation.LogoURL(); ok {\n\t\tif err := company.LogoURLValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"logo_url\", err: fmt.Errorf(`ent: validator failed for field \"logo_url\": %w`, err)}\n\t\t}\n\t}\n\tif _, ok := cc.mutation.Size(); !ok {\n\t\treturn &ValidationError{Name: \"size\", err: errors.New(`ent: missing required field \"size\"`)}\n\t}\n\tif v, ok := cc.mutation.Size(); ok {\n\t\tif err := company.SizeValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"size\", err: fmt.Errorf(`ent: validator failed for field \"size\": %w`, err)}\n\t\t}\n\t}\n\tif _, ok := cc.mutation.FoundedAt(); !ok {\n\t\treturn &ValidationError{Name: \"founded_at\", err: errors.New(`ent: missing required field \"founded_at\"`)}\n\t}\n\tif v, ok := cc.mutation.FoundedAt(); ok {\n\t\tif err := company.FoundedAtValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"founded_at\", err: fmt.Errorf(`ent: validator failed for field \"founded_at\": %w`, err)}\n\t\t}\n\t}\n\treturn nil\n}",
"func (m *TaskAllOf1) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateReason(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (c *modAccountRun) validate(args []string) error {\n\tif len(args) < 2 {\n\t\treturn errors.New(\"not enough arguments\")\n\t}\n\n\tif len(args) > 2 {\n\t\treturn errors.New(\"too many arguments\")\n\t}\n\n\treturn nil\n}",
"func validateJob(r *http.Request, j *job.Job) (bool, error) {\n\tctx := r.Context()\n\ttoken := \"\"\n\tif ctx.Value(job.AccessTokenKey) != nil {\n\t\ttoken = ctx.Value(job.AccessTokenKey).(string)\n\t}\n\t// Calculate a response timeout\n\ttimeout := j.ResponseTimeout()\n\n\tctx = context.Background()\n\tif timeout > 0 {\n\t\tvar cncl func()\n\t\tctx, cncl = context.WithTimeout(ctx, timeout)\n\t\tdefer cncl()\n\t}\n\t// Get the actual url and body we're going to be using,\n\t// including any necessary templating.\n\turl, err := j.TryTemplatize(j.RemoteProperties.Url)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif strings.HasSuffix(url, \"/\") {\n\t\turl += \"validate\"\n\t} else {\n\t\turl += \"/validate\"\n\t}\n\tbody, err := j.TryTemplatize(j.RemoteProperties.Body)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t// Normalize the method passed by the user\n\tmethod := strings.ToUpper(http.MethodPost)\n\tbodyBuffer := bytes.NewBufferString(body)\n\treq, err := http.NewRequest(method, url, bodyBuffer)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t// Set default or user's passed headers\n\tj.SetHeaders(req, token)\n\n\theaders := viper.GetStringSlice(\"remote.headers\")\n\tfor _, header := range headers {\n\t\tvalue := r.Header.Get(header)\n\t\tif value != \"\" {\n\t\t\treq.Header.Set(header, value)\n\t\t}\n\t}\n\n\t// Do the request\n\tres, err := http.DefaultClient.Do(req.WithContext(ctx))\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tdefer res.Body.Close()\n\tif res.StatusCode == http.StatusOK {\n\t\tvar result bool\n\t\terr := json.NewDecoder(res.Body).Decode(&result)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"validate for job %s did not return a boolean value\", j.Name)\n\t\t\treturn false, err\n\t\t}\n\t\treturn result, nil\n\t} else {\n\t\treturn false, errors.New(res.Status)\n\t}\n}",
"func (m *DataHistoryManager) runValidationJob(job *DataHistoryJob, exch exchange.IBotExchange) error {\n\tif !m.IsRunning() {\n\t\treturn ErrSubSystemNotStarted\n\t}\n\tvar intervalsProcessed int64\n\tvar jobIntervals, intervalsToCheck []time.Time\n\tintervalLength := job.Interval.Duration() * time.Duration(job.RequestSizeLimit)\n\tfor i := job.StartDate; i.Before(job.EndDate); i = i.Add(intervalLength) {\n\t\tjobIntervals = append(jobIntervals, i)\n\t}\n\tnextIntervalToProcess := job.StartDate\ntimesToFetch:\n\tfor t, results := range job.Results {\n\t\ttt := time.Unix(t, 0)\n\t\tif len(results) < int(job.MaxRetryAttempts) {\n\t\t\tfor x := range results {\n\t\t\t\tif results[x].Status == dataHistoryStatusComplete {\n\t\t\t\t\tcontinue timesToFetch\n\t\t\t\t}\n\t\t\t}\n\t\t\tintervalsToCheck = append(intervalsToCheck, tt)\n\t\t} else {\n\t\t\tfor x := range results {\n\t\t\t\tresults[x].Status = dataHistoryIntervalIssuesFound\n\t\t\t}\n\t\t\tjob.Results[t] = results\n\t\t}\n\t\tif tt.After(nextIntervalToProcess) {\n\t\t\tnextIntervalToProcess = tt.Add(intervalLength)\n\t\t}\n\t}\n\tfor i := nextIntervalToProcess; i.Before(job.EndDate); i = i.Add(intervalLength) {\n\t\tintervalsToCheck = append(intervalsToCheck, i)\n\t}\n\n\tfor i := range intervalsToCheck {\n\t\tif intervalsProcessed >= job.RunBatchLimit {\n\t\t\tbreak\n\t\t}\n\t\tif err := common.StartEndTimeCheck(intervalsToCheck[i], job.EndDate); err != nil {\n\t\t\tbreak\n\t\t}\n\t\trequestEnd := intervalsToCheck[i].Add(intervalLength)\n\t\tif requestEnd.After(job.EndDate) {\n\t\t\trequestEnd = job.EndDate\n\t\t}\n\t\tif m.verbose {\n\t\t\tlog.Debugf(log.DataHistory, \"running data history job %v start: %s end: %s interval: %s datatype: %s\",\n\t\t\t\tjob.Nickname,\n\t\t\t\tintervalsToCheck[i],\n\t\t\t\trequestEnd,\n\t\t\t\tjob.Interval,\n\t\t\t\tjob.DataType)\n\t\t}\n\t\tintervalsProcessed++\n\t\tresult, err := m.validateCandles(job, exch, intervalsToCheck[i], requestEnd)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlookup := job.Results[result.IntervalStartDate.Unix()]\n\t\tlookup = append(lookup, *result)\n\t\tjob.Results[result.IntervalStartDate.Unix()] = lookup\n\t}\n\n\tcompleted := true // nolint:ifshort,nolintlint // false positive and triggers only on Windows\n\tallResultsSuccessful := true\n\tallResultsFailed := true\ncompletionCheck:\n\tfor i := range jobIntervals {\n\t\tresults, ok := job.Results[jobIntervals[i].Unix()]\n\t\tif !ok {\n\t\t\tcompleted = false\n\t\t\tbreak\n\t\t}\n\tresults:\n\t\tfor j := range results {\n\t\t\tswitch results[j].Status {\n\t\t\tcase dataHistoryIntervalIssuesFound:\n\t\t\t\tallResultsSuccessful = false\n\t\t\t\tbreak results\n\t\t\tcase dataHistoryStatusComplete:\n\t\t\t\tallResultsFailed = false\n\t\t\t\tbreak results\n\t\t\tdefault:\n\t\t\t\tcompleted = false\n\t\t\t\tbreak completionCheck\n\t\t\t}\n\t\t}\n\t}\n\tif completed {\n\t\terr := m.completeJob(job, allResultsSuccessful, allResultsFailed)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (a *Arguments) Validate() error {\n\tif a.Person == nil && a.Employee == nil {\n\t\treturn server.EmptyArgumentsHTTPError\n\t}\n\n\tif a.Person != nil && a.Employee.PersonID != nil {\n\t\treturn server.NewHTTPError(http.StatusBadRequest, \"person_id and person can not be specified together\")\n\t}\n\n\tif a.Person != nil {\n\t\tif err := a.Person.Validate(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif a.Employee != nil {\n\t\tif err := a.Employee.Validate(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (o *Opts) Validate() error {\n\tvar e []string\n\tif o.Queue == 0 {\n\t\te = append(e, \"queue size must be greater than 0\")\n\t}\n\tif len(e) > 0 {\n\t\treturn fmt.Errorf(\"%s\", strings.Join(e, \"; \"))\n\t}\n\treturn nil\n}",
"func (c *slotsCmd) Validate() error {\n\tif len(c.args) > 1 {\n\t\treturn fmt.Errorf(\"maximum 1 service name can be given, got %d\", len(c.args))\n\t}\n\n\treturn nil\n}",
"func (o *Options) Validate() error {\n\tif o.NumWorkers == 0 {\n\t\treturn errors.New(\"number of workers cannot be zero\")\n\t}\n\n\tif o.ProwJobNamespace == \"\" {\n\t\treturn errors.New(\"namespace containing ProwJobs not configured\")\n\t}\n\n\treturn o.Options.Validate()\n}",
"func (m *CreationTask) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateCreationTime(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validatePersonalization(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateStatus(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTicketData(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func Validate(rule grpc.Rule) error {\n\t// Check the rule name.\n\tif len(rule.Name) < 1 {\n\t\treturn errors.New(\"rule name cannot be empty\")\n\t}\n\n\t// Check the tracer name.\n\tif len(rule.Tracer) < 1 {\n\t\treturn errors.New(\"rule tracer cannot be empty\")\n\t}\n\n\t// Check the container runtimes against the valid container runtimes.\n\tfor _, runtime := range rule.ContainerRuntimes {\n\t\tif !proc.IsValidContainerRuntime(runtime) {\n\t\t\treturn fmt.Errorf(\"[%s]: %s is not a valid container runtime\", rule.Name, runtime)\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (m *BatchStatus) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateCreated(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateJobStatuses(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateName(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateStatus(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (q SQSQueue) validate() error {\n\tif q.IsEmpty() {\n\t\treturn nil\n\t}\n\tif err := q.DeadLetter.validate(); err != nil {\n\t\treturn fmt.Errorf(`validate \"dead_letter\": %w`, err)\n\t}\n\treturn q.FIFO.validate()\n}",
"func (r *Job) ValidateUpdate(old runtime.Object) error {\n\tjoblog.Info(\"validate update\", \"name\", r.Name)\n\toldJob, ok := old.(*Job)\n\tif !ok {\n\t\treturn fmt.Errorf(\"can't validate job update\")\n\t}\n\tclient := jobmgr.GetClient()\n\tjobs := JobList{}\n\tif err := client.List(context.Background(), &jobs); err != nil {\n\t\treturn err\n\t}\n\tfor _, job := range jobs.Items {\n\t\tif job.Spec.Name == oldJob.Spec.Name {\n\t\t\treturn ErrJobExists\n\t\t}\n\t}\n\treturn nil\n}",
"func (id BuildID) Validate() error {\n\tswitch {\n\tcase id.Master == \"\":\n\t\treturn errors.New(\"master is unspecified\", grpcutil.InvalidArgumentTag)\n\tcase id.Builder == \"\":\n\t\treturn errors.New(\"builder is unspecified\", grpcutil.InvalidArgumentTag)\n\tcase id.Number < 0:\n\t\treturn errors.New(\"number must be >= 0\", grpcutil.InvalidArgumentTag)\n\tdefault:\n\t\treturn nil\n\t}\n}",
"func (mt *Status) Validate() (err error) {\n\tif mt.Commit == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"commit\"))\n\t}\n\tif mt.BuildTime == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"buildTime\"))\n\t}\n\tif mt.StartTime == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"startTime\"))\n\t}\n\tif mt.DatabaseStatus == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"databaseStatus\"))\n\t}\n\tif mt.ConfigurationStatus == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"configurationStatus\"))\n\t}\n\treturn\n}",
"func (m *CreateRestoreJobRequest) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateDelivery(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (opts *Options) Validate() error {\n\tif opts.LockTimeout < 0 {\n\t\treturn errors.New(\"cannot have negative lock timeout\")\n\t}\n\n\tif opts.LockTimeout == 0 {\n\t\topts.LockTimeout = amboy.LockTimeout\n\t}\n\n\tif opts.PoolSize == 0 {\n\t\topts.PoolSize = runtime.NumCPU()\n\t}\n\n\tif opts.WaitInterval == 0 {\n\t\topts.WaitInterval = 100 * time.Millisecond\n\t}\n\n\tif opts.SchemaName == \"\" {\n\t\topts.SchemaName = \"amboy\"\n\t}\n\n\treturn nil\n}",
"func (rc *RuleCreate) check() error {\n\tif _, ok := rc.mutation.CreateTime(); !ok {\n\t\treturn &ValidationError{Name: \"create_time\", err: errors.New(\"ent: missing required field \\\"create_time\\\"\")}\n\t}\n\tif _, ok := rc.mutation.UpdateTime(); !ok {\n\t\treturn &ValidationError{Name: \"update_time\", err: errors.New(\"ent: missing required field \\\"update_time\\\"\")}\n\t}\n\tif _, ok := rc.mutation.Name(); !ok {\n\t\treturn &ValidationError{Name: \"name\", err: errors.New(\"ent: missing required field \\\"name\\\"\")}\n\t}\n\tif v, ok := rc.mutation.Name(); ok {\n\t\tif err := rule.NameValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"name\", err: fmt.Errorf(\"ent: validator failed for field \\\"name\\\": %w\", err)}\n\t\t}\n\t}\n\tif _, ok := rc.mutation.GracePeriod(); !ok {\n\t\treturn &ValidationError{Name: \"gracePeriod\", err: errors.New(\"ent: missing required field \\\"gracePeriod\\\"\")}\n\t}\n\tif _, ok := rc.mutation.StartDateTime(); !ok {\n\t\treturn &ValidationError{Name: \"startDateTime\", err: errors.New(\"ent: missing required field \\\"startDateTime\\\"\")}\n\t}\n\tif _, ok := rc.mutation.EndDateTime(); !ok {\n\t\treturn &ValidationError{Name: \"endDateTime\", err: errors.New(\"ent: missing required field \\\"endDateTime\\\"\")}\n\t}\n\tif _, ok := rc.mutation.Status(); !ok {\n\t\treturn &ValidationError{Name: \"status\", err: errors.New(\"ent: missing required field \\\"status\\\"\")}\n\t}\n\treturn nil\n}",
"func (m *ScheduledJobDestination) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *BuildInfo) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateAux(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateErrorDetail(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateProgressDetail(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func Validate() error {\n\tmissingFields := []string{}\n\n\tfor name, value := range map[string]string{\n\t\t\"AppVersion\": AppVersion, \"VCSRef\": VCSRef, \"BuildVersion\": BuildVersion, \"Date\": Date,\n\t} {\n\t\tif value == \"\" {\n\t\t\tmissingFields = append(missingFields, name)\n\t\t}\n\t}\n\n\tif len(missingFields) == 0 {\n\t\treturn nil\n\t}\n\n\treturn errors.New(\"missing build flags\")\n}",
"func _PlotSpreadsheetValidation(_ctx context.Context, _input *PlotSpreadsheetInput, _output *PlotSpreadsheetOutput) {\n\n}",
"func (h HTTPHealthCheckArgs) validate() error {\n\treturn nil\n}"
] | [
"0.71353847",
"0.70872307",
"0.7057234",
"0.70403004",
"0.7038734",
"0.6985453",
"0.6956082",
"0.6955184",
"0.6812868",
"0.6765313",
"0.6757547",
"0.675049",
"0.6660855",
"0.6626627",
"0.65732706",
"0.6557419",
"0.65209156",
"0.6478754",
"0.6438617",
"0.631663",
"0.6313686",
"0.6257895",
"0.60594356",
"0.6037357",
"0.6032301",
"0.60245115",
"0.6021099",
"0.6018997",
"0.59721506",
"0.5971661",
"0.5955307",
"0.5941134",
"0.5934184",
"0.5900576",
"0.5898574",
"0.5896115",
"0.5882209",
"0.58722574",
"0.58598197",
"0.58549875",
"0.58480775",
"0.5844391",
"0.58176017",
"0.58092934",
"0.58069885",
"0.58031785",
"0.5793654",
"0.57896906",
"0.5784547",
"0.5779131",
"0.57778925",
"0.5775462",
"0.57739997",
"0.5757356",
"0.5743237",
"0.57317704",
"0.57261497",
"0.57204807",
"0.57022154",
"0.57000893",
"0.56990474",
"0.56926966",
"0.5672864",
"0.56681585",
"0.5659105",
"0.5657829",
"0.5656415",
"0.56561536",
"0.56561536",
"0.5650555",
"0.56379",
"0.56293195",
"0.5621551",
"0.56206614",
"0.56164044",
"0.56137794",
"0.5603219",
"0.56016856",
"0.55878884",
"0.55781037",
"0.5573737",
"0.5563018",
"0.55610555",
"0.5558584",
"0.5551945",
"0.5534946",
"0.55335677",
"0.5528886",
"0.5519247",
"0.5516305",
"0.5516034",
"0.5506022",
"0.55059093",
"0.55006766",
"0.55006313",
"0.54988927",
"0.54972607",
"0.54881513",
"0.5486589",
"0.54865783"
] | 0.58256733 | 42 |
toGA is an utility method to return the baseInstance data as a GA Instance object | func (bi *baseInstance) toGA() *ga.Instance {
inst := &ga.Instance{Name: bi.name, Zone: bi.zone, NetworkInterfaces: []*ga.NetworkInterface{{}}}
if bi.aliasRange != "" {
inst.NetworkInterfaces[0].AliasIpRanges = []*ga.AliasIpRange{
{IpCidrRange: bi.aliasRange, SubnetworkRangeName: util.TestSecondaryRangeName},
}
}
return inst
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (bi *baseInstance) toBeta() *beta.Instance {\n\tinst := &beta.Instance{Name: bi.name, Zone: bi.zone, NetworkInterfaces: []*beta.NetworkInterface{{}}}\n\tif bi.aliasRange != \"\" {\n\t\tinst.NetworkInterfaces[0].AliasIpRanges = []*beta.AliasIpRange{\n\t\t\t{IpCidrRange: bi.aliasRange, SubnetworkRangeName: util.TestSecondaryRangeName},\n\t\t}\n\t}\n\treturn inst\n}",
"func (e *GT) Base() *GT {\n\tif e.p == nil {\n\t\te.p = &gfP12{}\n\t}\n\te.p.Set(gfP12Gen)\n\treturn e\n}",
"func (bil *baseInstanceList) newGAGetHook() func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) {\n\treturn func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) {\n\t\tm.Lock.Lock()\n\t\tdefer m.Lock.Unlock()\n\n\t\tif _, found := m.Objects[*key]; !found {\n\t\t\tm.Objects[*key] = &cloud.MockInstancesObj{Obj: bil.getOrCreateBaseInstance(key).toGA()}\n\t\t}\n\t\treturn false, nil, nil\n\t}\n}",
"func (a *abaImpl) AsGPA() gpa.GPA {\n\treturn a.asGPA\n}",
"func CreateGqlDefFromInstance(name string, data interface{}) *graphql.Object {\n\tdataType := reflect.TypeOf(data)\n\treturn CreateGqlDefFromType(name, dataType)\n}",
"func ToGObject(p unsafe.Pointer) *C.GObject {\n\treturn (*C.GObject)(p)\n}",
"func SomeGraphToJSONable(\n\tinstance *SomeGraph) (\n\ttarget map[string]interface{}, err error) {\n\n\tif instance == nil {\n\t\tpanic(\"unexpected nil instance\")\n\t}\n\n\ttarget = make(map[string]interface{})\n\tdefer func() {\n\t\tif err != nil {\n\t\t\ttarget = nil\n\t\t}\n\t}()\n\t////\n\t// Serialize instance registry of SomeClass\n\t////\n\n\tif len(instance.SomeClasses) > 0 {\n\t\ttargetSomeClasses := make(map[string]interface{})\n\t\tfor id := range instance.SomeClasses {\n\t\t\tsomeClassInstance := instance.SomeClasses[id]\n\n\t\t\tif id != someClassInstance.ID {\n\t\t\t\terr = fmt.Errorf(\n\t\t\t\t\t\"expected the instance of SomeClass to have the ID %s according to the registry, but got: %s\",\n\t\t\t\t\tid, someClassInstance.ID)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttargetSomeClasses[id] = SomeClassToJSONable(\n\t\t\t\tsomeClassInstance)\n\t\t}\n\n\t\ttarget[\"some_classes\"] = targetSomeClasses\n\t}\n\n\t////\n\t// Serialize instance registry of OtherClass\n\t////\n\n\tif len(instance.OtherClasses) > 0 {\n\t\ttargetOtherClasses := make(map[string]interface{})\n\t\tfor id := range instance.OtherClasses {\n\t\t\totherClassInstance := instance.OtherClasses[id]\n\n\t\t\tif id != otherClassInstance.ID {\n\t\t\t\terr = fmt.Errorf(\n\t\t\t\t\t\"expected the instance of OtherClass to have the ID %s according to the registry, but got: %s\",\n\t\t\t\t\tid, otherClassInstance.ID)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttargetOtherClasses[id] = OtherClassToJSONable(\n\t\t\t\totherClassInstance)\n\t\t}\n\n\t\ttarget[\"other_classes\"] = targetOtherClasses\n\t}\n\n\treturn\n}",
"func (v *Variant) ToGVariant() *C.GVariant {\n\tif v == nil {\n\t\treturn nil\n\t}\n\treturn v.native()\n}",
"func NewGraph(base Base) {\n\n}",
"func (s StatsGraph) construct() StatsGraphClass { return &s }",
"func (bi *BridgerInfo) pushInstance(ins *zstypes.InsCacheInfo) (uint64, uint64, error) {\n\n\tmeta := dtypes.StFileMeta{\n\t\tAccount: ins.AName,\n\t\tFiletype: \"prometheus\",\n\t\tIid: ins.IName,\n\t\tIsCompressed: true,\n\t\tDTs: ins.DTs,\n\t\tOLabels: ins.OLabels,\n\t}\n\n\tif config.GlCfg.BrdigerNotifyStIngest {\n\t\tfor atomic.LoadInt32(&bi.stingestWorkerReady) == 0 {\n\t\t\tzlog.Info(\"Waiting for stingest req_q worker to be ready\")\n\t\t\ttime.Sleep(5 * time.Second)\n\t\t}\n\t}\n\n\tdmepoch := time.Now().UnixNano() / 1000\n\tdts := strconv.FormatInt(dmepoch, 10)\n\ttmpDir := path.Join(config.GlCfg.StatsReqsDir, ins.AName, strings.Replace(ins.IName, \":\", \"_\", 1), dts)\n\terr := os.MkdirAll(tmpDir, 0755)\n\tif err != nil {\n\t\tzlog.Error(\"Failed to create directory %s: %s\", tmpDir, err)\n\t\treturn uint64(0), uint64(0), err\n\t}\n\ttmpMFpath := filepath.Join(tmpDir, dts+\".json\")\n\ttmpDFpath := filepath.Join(tmpDir, dts+\".data.gz\")\n\n\tclblsMap, ntstamps, nbytes, err := bi.writeStatsFile(tmpDFpath, ins)\n\tif err != nil {\n\t\tzlog.Error(\"Failed to write to file %s, err %s\\n\", tmpDFpath, err)\n\t\treturn ntstamps, nbytes, err\n\t}\n\n\tif len(clblsMap) > 0 {\n\t\tmeta.Kvs = make([]dtypes.KvPair, 0, len(clblsMap))\n\t\tfor k, v := range clblsMap {\n\t\t\tmeta.Kvs = append(meta.Kvs, dtypes.KvPair{N: k, V: v})\n\t\t}\n\t}\n\tmjson, err := json.Marshal(meta)\n\tif err != nil {\n\t\tzlog.Error(\"Failed to marshal file metadata %v: %s\", meta, err)\n\t\treturn ntstamps, nbytes, err\n\t}\n\terr = ioutil.WriteFile(tmpMFpath, mjson, 0644)\n\tif err != nil {\n\t\tzlog.Error(\"Failed to write to file %s, err %s\\n\", tmpMFpath, err)\n\t\treturn ntstamps, nbytes, err\n\t}\n\n\t// If configured, send to the next stage for further processing.\n\tif config.GlCfg.BrdigerNotifyStIngest {\n\t\treq := stingestpb.StatsIngestRequest{\n\t\t\tAccount: ins.AName,\n\t\t\tDFpath: tmpDFpath,\n\t\t\tMFpath: tmpMFpath,\n\t\t\tInstanceId: ins.IName,\n\t\t\tType: meta.Filetype,\n\t\t\tDTs: ins.DTs,\n\t\t}\n\n\t\tsReq := &stiReq{tmpDir: tmpDir, req: &req}\n\t\tbi.stiReqCh <- sReq\n\t}\n\n\treturn ntstamps, nbytes, nil\n}",
"func newDataInstance(repo datastore.Repo, t *testing.T, name dvid.DataString) *Data {\n\tconfig := dvid.NewConfig()\n\tconfig.SetVersioned(true)\n\tdataservice, err := repo.NewData(labelsT, name, config)\n\tif err != nil {\n\t\tt.Errorf(\"Unable to create labels64 instance %q: %s\\n\", name, err.Error())\n\t}\n\tlabels, ok := dataservice.(*Data)\n\tif !ok {\n\t\tt.Errorf(\"Can't cast labels data service into Data\\n\")\n\t}\n\treturn labels\n}",
"func (track *Track) ToDb() interface{} {\n\treturn track.Id\n}",
"func OtherClassToJSONable(\n\tinstance *OtherClass) (\n\ttarget map[string]interface{}) {\n\n\tif instance == nil {\n\t\tpanic(\"unexpected nil instance\")\n\t}\n\n\ttarget = make(map[string]interface{})\n\n\t////\n\t// Serialize ReferenceSome\n\t////\n\n\ttarget[\"reference_some\"] = instance.ReferenceSome.ID\n\n\t////\n\t// Serialize ArrayOfSomes\n\t////\n\n\tcount0 := len(instance.ArrayOfSomes)\n\tslice0 := instance.ArrayOfSomes\n\ttarget0 := make([]interface{}, count0)\n\tfor i0 := 0; i0 < count0; i0++ {\n\t\ttarget0[i0] = slice0[i0].ID\n\t}\n\ttarget[\"array_of_somes\"] = target0\n\n\t////\n\t// Serialize MapOfSomes\n\t////\n\n\ttarget1 := make(map[string]interface{})\n\tmap1 := instance.MapOfSomes\n\tfor k1, v1 := range map1 {\n\t\ttarget1[k1] = v1.ID\n\t}\n\ttarget[\"map_of_somes\"] = target1\n\n\treturn\n}",
"func newdbBasePostgres() dbBaser {\n\tb := new(dbBasePostgres)\n\tb.ins = b\n\treturn b\n}",
"func toRecord(cache airtabledb.DB, src Feature, dst interface{}) {\n\tdV := reflect.ValueOf(dst).Elem().FieldByName(\"Fields\")\n\tsV := reflect.ValueOf(src)\n\tcopyFields(cache, sV, dV)\n}",
"func GAEResource(ctx context.Context) (*MonitoredResource, error) {\n\t// appengine.IsAppEngine is confusingly false as we're using a custom\n\t// container and building without the appenginevm build constraint.\n\t// Check metadata.OnGCE instead.\n\tif !metadata.OnGCE() {\n\t\treturn nil, fmt.Errorf(\"not running on appengine\")\n\t}\n\tprojID, err := metadata.ProjectID()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn (*MonitoredResource)(&mrpb.MonitoredResource{\n\t\tType: \"gae_instance\",\n\t\tLabels: map[string]string{\n\t\t\t\"project_id\": projID,\n\t\t\t\"module_id\": appengine.ModuleName(ctx),\n\t\t\t\"version_id\": appengine.VersionID(ctx),\n\t\t\t\"instance_id\": appengine.InstanceID(),\n\t\t\t\"location\": appengine.Datacenter(ctx),\n\t\t},\n\t}), nil\n}",
"func (e *EncryptedChatRequested) GetGA() (value []byte) {\n\treturn e.GA\n}",
"func (self *Graphics) Data() interface{}{\n return self.Object.Get(\"data\")\n}",
"func Generational(MakeGenome GenomeMaker) GA {\n\tvar ga = GA{\n\t\tMakeGenome: MakeGenome,\n\t\tTopology: Topology{\n\t\t\tNPopulations: 2,\n\t\t\tNIndividuals: 50,\n\t\t},\n\t\tModel: ModGenerational{\n\t\t\tSelector: SelTournament{\n\t\t\t\tNParticipants: 3,\n\t\t\t},\n\t\t\tMutRate: 0.5,\n\t\t},\n\t}\n\tga.Initialize()\n\treturn ga\n}",
"func (c *converter) ToEntity(in model.AutomaticScenarioAssignment) Entity {\n\treturn Entity{\n\t\tTenantID: in.Tenant,\n\t\tScenario: in.ScenarioName,\n\t\tTargetTenantID: in.TargetTenantID,\n\t}\n}",
"func (m *AzureManager) GetAsgForInstance(instance *azureRef) (cloudprovider.NodeGroup, error) {\n\treturn m.asgCache.FindForInstance(instance, m.config.VMType)\n}",
"func dataToSg(name string, d *schema.ResourceData) go_thunder.ServiceGroup {\n\t//\tlogger := util.GetLoggerInstance()\n\tvar s go_thunder.ServiceGroup\n\n\tvar sInstance go_thunder.ServiceGroupInstance\n\n\tsInstance.ConnRate = d.Get(\"conn_rate\").(int)\n\tsInstance.ResetOnServerSelectionFail = d.Get(\"reset_on_server_selection_fail\").(int)\n\tsInstance.HealthCheckDisable = d.Get(\"health_check_disable\").(int)\n\tsInstance.Protocol = d.Get(\"protocol\").(string)\n\tsInstance.TrafficReplicationMirrorIPRepl = d.Get(\"traffic_replication_mirror_ip_repl\").(int)\n\tsInstance.ResetPriorityAffinity = d.Get(\"reset_priority_affinity\").(int)\n\tsInstance.MinActiveMember = d.Get(\"min_active_member\").(int)\n\tsInstance.StatsDataAction = d.Get(\"stats_data_action\").(string)\n\tsInstance.TrafficReplicationMirrorDaRepl = d.Get(\"traffic_replication_mirror_da_repl\").(int)\n\tsInstance.TemplatePolicyShared = d.Get(\"template_policy_shared\").(string)\n\tsInstance.RptExtServer = d.Get(\"rpt_ext_server\").(int)\n\tsInstance.TemplatePort = d.Get(\"template_port\").(string)\n\tsInstance.ConnRateGracePeriod = d.Get(\"conn_rate_grace_period\").(int)\n\tsInstance.L4SessionUsageDuration = d.Get(\"l4_session_usage\").(int)\n\tsInstance.UUID = d.Get(\"uuid\").(string)\n\tsInstance.BackupServerEventLog = d.Get(\"backup_server_event_log\").(int)\n\tsInstance.LcMethod = d.Get(\"lc_method\").(string)\n\tsInstance.PseudoRoundRobin = d.Get(\"pseudo_round_robin\").(int)\n\tsInstance.SharedPartitionPolicyTemplate = d.Get(\"shared_partition_policy_template\").(int)\n\tsInstance.L4SessionUsageRevertRate = d.Get(\"l4_session_usage_revert_rate\").(int)\n\tsInstance.SharedPartitionSvcgrpHealthCheck = d.Get(\"shared_partition_svcgrp_health_check\").(int)\n\tsInstance.TemplateServer = d.Get(\"template_server\").(string)\n\tsInstance.SvcgrpHealthCheckShared = d.Get(\"svcgrp_health_check_shared\").(string)\n\tsInstance.TrafficReplicationMirror = d.Get(\"traffic_replication_mirror\").(int)\n\tsInstance.L4SessionRevertDuration = d.Get(\"l4_session_revert_duration\").(int)\n\tsInstance.TrafficReplicationMirrorSaDaRepl = d.Get(\"traffic_replication_mirror_sa_da_repl\").(int)\n\tsInstance.LbMethod = d.Get(\"lb_method\").(string)\n\tsInstance.StatelessAutoSwitch = d.Get(\"stateless_auto_switch\").(int)\n\tsInstance.MinActiveMemberAction = d.Get(\"min_active_member_action\").(string)\n\tsInstance.L4SessionUsage = d.Get(\"l4_session_usage\").(int)\n\tsInstance.ExtendedStats = d.Get(\"extended_stats\").(int)\n\tsInstance.ConnRateRevertDuration = d.Get(\"conn_rate_revert_duration\").(int)\n\tsInstance.StrictSelect = d.Get(\"strict_select\").(int)\n\tsInstance.Name = d.Get(\"name\").(string)\n\tsInstance.TrafficReplicationMirrorSaRepl = d.Get(\"traffic_replication_mirror_sa_repl\").(int)\n\tsInstance.ReportDelay = d.Get(\"report_delay\").(int)\n\tsInstance.ConnRateLog = d.Get(\"conn_rate_log\").(int)\n\tsInstance.L4SessionUsageLog = d.Get(\"l4_session_usage_log\").(int)\n\tsInstance.ConnRateDuration = d.Get(\"conn_rate_duration\").(int)\n\tsInstance.StatelessLbMethod = d.Get(\"stateless_lb_method\").(string)\n\tsInstance.TemplatePolicy = d.Get(\"template_policy\").(string)\n\tsInstance.StatelessLbMethod2 = d.Get(\"stateless_lb_method2\").(string)\n\tsInstance.UserTag = d.Get(\"user_tag\").(string)\n\tsInstance.SampleRspTime = d.Get(\"sample_rsp_time\").(int)\n\tsInstance.TopFastest = d.Get(\"top_fastest\").(int)\n\tsInstance.ConnRevertRate = d.Get(\"conn_revert_rate\").(int)\n\tsInstance.L4SessionUsageGracePeriod = d.Get(\"l4_session_usage_grace_period\").(int)\n\tsInstance.PriorityAffinity = d.Get(\"priority_affinity\").(int)\n\tsInstance.TopSlowest = d.Get(\"top_slowest\").(int)\n\tsInstance.HealthCheck = d.Get(\"health_check\").(string)\n\n\tpriorityCount := d.Get(\"priorities.#\").(int)\n\tsInstance.Priority = make([]go_thunder.Priorities, 0, priorityCount)\n\tfor i := 0; i < priorityCount; i++ {\n\t\tvar pr go_thunder.Priorities\n\t\tprefix := fmt.Sprintf(\"priorities.%d\", i)\n\t\tpr.Priority = d.Get(prefix + \".priority\").(int)\n\t\tpr.PriorityAction = d.Get(prefix + \".priority_action\").(string)\n\n\t\tsInstance.Priority = append(sInstance.Priority, pr)\n\t}\n\n\tsamplingCount := d.Get(\"sampling_enable.#\").(int)\n\tsInstance.Counters1 = make([]go_thunder.SamplingEnable, 0, samplingCount)\n\tfor i := 0; i < samplingCount; i++ {\n\t\tvar sm go_thunder.SamplingEnable\n\t\tprefix := fmt.Sprintf(\"sampling_enable.%d\", i)\n\t\tsm.Counters1 = d.Get(prefix + \".counters1\").(string)\n\n\t\tsInstance.Counters1 = append(sInstance.Counters1, sm)\n\t}\n\n\t//NEED TO FIGURE OUT IF VALUE IS PROVIDED IN TF FILE OR DEFAULT IS BEING USED\n\t//\tvar as Reset\n\t//\tas.AutoSwitch = d.Get(\"reset.0.auto_switch\").(int)\n\t//\tlogger.Println(\"[INFO] Auto switch is- \", d.Get(\"reset.0.auto_switch\").(int))\n\t//\tsInstance.AutoSwitch = as\n\n\tmemberCount := d.Get(\"member_list.#\").(int)\n\tsInstance.Host = make([]go_thunder.MemberList, 0, memberCount)\n\tfor i := 0; i < memberCount; i++ {\n\t\tvar ml go_thunder.MemberList\n\t\tprefix := fmt.Sprintf(\"member_list.%d\", i)\n\t\tml.FqdnName = d.Get(prefix + \".fqdn_name\").(string)\n\t\tml.Host = d.Get(prefix + \".host\").(string)\n\t\tml.MemberPriority = d.Get(prefix + \".member_priority\").(int)\n\t\tml.MemberState = d.Get(prefix + \".member_state\").(string)\n\t\tml.MemberStatsDataDisable = d.Get(prefix + \".member_stats_data_disable\").(int)\n\t\tml.MemberTemplate = d.Get(prefix + \".member_template\").(string)\n\t\tml.Name = d.Get(prefix + \".name\").(string)\n\t\tml.Port = d.Get(prefix + \".port\").(int)\n\t\tml.ResolveAs = d.Get(prefix + \".resolve_as\").(string)\n\t\tml.ServerIpv6Addr = d.Get(prefix + \".server_ipv6_addr\").(string)\n\t\tml.UUID = d.Get(prefix + \".uuid\").(string)\n\t\tml.UserTag = d.Get(prefix + \".user_tag\").(string)\n\n\t\tsampleCount := d.Get(prefix + \".sampling_enable.#\").(int)\n\t\tml.Counters1 = make([]go_thunder.SamplingEnable, sampleCount, sampleCount)\n\n\t\tfor x := 0; x < sampleCount; x++ {\n\t\t\tvar s go_thunder.SamplingEnable\n\t\t\tmapEntity(d.Get(fmt.Sprintf(\"%s.sampling_enable.%d\", prefix, x)).(map[string]interface{}), &s)\n\t\t\tml.Counters1[x] = s\n\t\t}\n\n\t\tsInstance.Host = append(sInstance.Host, ml)\n\t}\n\n\ts.Name = sInstance\n\n\treturn s\n}",
"func (ga *GenesisAccount) ToAccount() auth.Account {\n\tbacc := &auth.BaseAccount{\n\t\tAddress: ga.Address,\n\t\tCoins: ga.Coins.Sort(),\n\t\tAccountNumber: ga.AccountNumber,\n\t\tSequence: ga.Sequence,\n\t}\n\n\tif !ga.OriginalVesting.IsZero() {\n\t\tbaseVestingAcc := &auth.BaseVestingAccount{\n\t\t\tBaseAccount: bacc,\n\t\t\tOriginalVesting: ga.OriginalVesting,\n\t\t\tDelegatedFree: ga.DelegatedFree,\n\t\t\tDelegatedVesting: ga.DelegatedVesting,\n\t\t\tEndTime: ga.EndTime,\n\t\t}\n\n\t\tif ga.StartTime != 0 && ga.EndTime != 0 {\n\t\t\treturn &auth.ContinuousVestingAccount{\n\t\t\t\tBaseVestingAccount: baseVestingAcc,\n\t\t\t\tStartTime: ga.StartTime,\n\t\t\t}\n\t\t} else if ga.EndTime != 0 {\n\t\t\treturn &auth.DelayedVestingAccount{\n\t\t\t\tBaseVestingAccount: baseVestingAcc,\n\t\t\t}\n\t\t} else {\n\t\t\tpanic(fmt.Sprintf(\"invalid genesis vesting account: %+v\", ga))\n\t\t}\n\t}\n\n\treturn bacc\n}",
"func (ga *GA) Init(pool_size uint, data_size uint, fit_func FitnessFunc, p GA_Params) {\n\n rand.Seed(time.Now().UTC().UnixNano())\n data_bytes := (data_size + 7) / 8\n ga.Population = make(GenePool, pool_size)\n // for _, ind := range ga.Population {\n for i := range ga.Population {\n\t// var ind *Individual\n\tind := new(Individual)\n\tind.Data = make([]byte, data_bytes)\n\trandom_word := rand.Uint32()\n\tfor j := range ind.Data {\n\t if (j % 4) == 0 {\n\t\trandom_word = rand.Uint32()\n\t }\n\t ind.Data[j] = byte(random_word & 0xff)\n\t random_word >>= 8\n\t}\n\tga.Population[i] = ind\n }\n ga.Params = p\n ga.data_size = data_size\n ga.Generation = 0\n ga.fit_func = fit_func\n ga.MeasureAndSort()\n ga.Stats_best = make([]float64, 0, 1024)\n ga.Stats_avg = make([]float64, 0, 1024)\n ga.Stats_best = append(ga.Stats_best, ga.Population[0].Fitness)\n ga.Stats_avg = append(ga.Stats_avg, ga.AvgFitness())\n}",
"func GetDataBase() *gorm.DB {\n\treturn db\n}",
"func (g UGaugeSnapshot) Snapshot() UGauge { return g }",
"func (c *converter) ToGraphQL(in model.AutomaticScenarioAssignment, targetTenantExternalID string) graphql.AutomaticScenarioAssignment {\n\treturn graphql.AutomaticScenarioAssignment{\n\t\tScenarioName: in.ScenarioName,\n\t\tSelector: &graphql.Label{\n\t\t\tKey: SubaccountIDKey,\n\t\t\tValue: targetTenantExternalID,\n\t\t},\n\t}\n}",
"func (self *Tween) Game() *Game{\n return &Game{self.Object.Get(\"game\")}\n}",
"func GetInstance() *CoinBase {\n\treturn ins\n}",
"func ToGob(src interface{}) ([]byte, error) {\n\treturn NewGobber().To(src)\n}",
"func (self *GameObjectCreator) Game() *Game{\n return &Game{self.Object.Get(\"game\")}\n}",
"func (profile *Profile) ToDb() interface{} {\n\treturn profile.Id\n}",
"func instancesToProto(insts registry.Instances) []*instances.Instance {\n\tret := make([]*instances.Instance, 0)\n\tfor _, inst := range insts {\n\t\tprotoInst := &instances.Instance{\n\t\t\tInstanceId: proto.String(inst.Id),\n\t\t\tHostname: proto.String(inst.Hostname),\n\t\t\tMachineClass: proto.String(inst.MachineClass),\n\t\t\tServiceName: proto.String(inst.Name),\n\t\t\tServiceDescription: proto.String(inst.Description),\n\t\t\tServiceVersion: proto.Uint64(inst.Version),\n\t\t\tAzName: proto.String(inst.AzName),\n\t\t\tSubTopic: make([]string, 0),\n\t\t}\n\t\tfor _, ep := range inst.Endpoints {\n\t\t\tif ep.Subscribe != \"\" {\n\t\t\t\tprotoInst.SubTopic = append(protoInst.SubTopic, ep.Subscribe)\n\t\t\t}\n\t\t}\n\t\tret = append(ret, protoInst)\n\t}\n\treturn ret\n}",
"func VFromDB(gid GoogleID) (*VAgent, time.Time, error) {\n\ta := VAgent{\n\t\tGid: gid,\n\t}\n\tvar fetched string\n\tvar t time.Time\n\tvar vlevel, vpoints, distance sql.NullInt64\n\tvar telegram, cellid sql.NullString\n\tvar startlat, startlon sql.NullFloat64\n\n\terr := db.QueryRow(\"SELECT enlid, vlevel, vpoints, agent, level, quarantine, active, blacklisted, verified, flagged, banned, cellid, telegram, startlat, startlon, distance, fetched FROM v WHERE gid = ?\", gid).Scan(&a.EnlID, &vlevel, &vpoints, &a.Agent, &a.Level, &a.Quarantine, &a.Active, &a.Blacklisted, &a.Verified, &a.Flagged, &a.Banned, &cellid, &telegram, &startlat, &startlon, &distance, &fetched)\n\tif err != nil && err != sql.ErrNoRows {\n\t\tlog.Error(err)\n\t\treturn &a, t, err\n\t}\n\n\tif err == sql.ErrNoRows {\n\t\treturn &a, t, nil\n\t}\n\n\tif fetched == \"\" {\n\t\treturn &a, t, nil\n\t}\n\n\tif vlevel.Valid {\n\t\ta.Vlevel = vlevel.Int64\n\t}\n\tif vpoints.Valid {\n\t\ta.Vpoints = vpoints.Int64\n\t}\n\tif telegram.Valid {\n\t\ta.Telegram = telegram.String\n\t}\n\tif cellid.Valid {\n\t\ta.CellID = cellid.String\n\t}\n\tif startlat.Valid {\n\t\ta.StartLat = startlat.Float64\n\t}\n\tif startlon.Valid {\n\t\ta.StartLon = startlon.Float64\n\t}\n\tif distance.Valid {\n\t\ta.Distance = distance.Int64\n\t}\n\n\tt, err = time.ParseInLocation(\"2006-01-02 15:04:05\", fetched, time.UTC)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\t// return &a, t, err\n\t}\n\t// log.Debugw(\"VFromDB\", \"gid\", gid, \"fetched\", fetched, \"data\", a)\n\treturn &a, t, nil\n}",
"func toGame(data interface{}, isResponse bool) *Game {\n\tif data == nil {\n\t\treturn nil\n\t}\n\n\tif isResponse {\n\t\tdest := gameResponse{}\n\n\t\tif recast(data, &dest) == nil {\n\t\t\treturn &dest.Data\n\t\t}\n\t} else {\n\t\tdest := Game{}\n\n\t\tif recast(data, &dest) == nil {\n\t\t\treturn &dest\n\t\t}\n\t}\n\n\treturn nil\n}",
"func gceInfo(inst *instance) error {\n\tvar err error\n\tinst.zone, err = metadata.Zone()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinst.name, err = metadata.InstanceName()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinst.hostname, err = metadata.Hostname()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinst.project, err = metadata.ProjectID()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func SomeClassToJSONable(\n\tinstance *SomeClass) (\n\ttarget map[string]interface{}) {\n\n\tif instance == nil {\n\t\tpanic(\"unexpected nil instance\")\n\t}\n\n\ttarget = make(map[string]interface{})\n\n\t////\n\t// Serialize ReferenceOther\n\t////\n\n\ttarget[\"reference_other\"] = instance.ReferenceOther.ID\n\n\t////\n\t// Serialize ArrayOfOthers\n\t////\n\n\tcount0 := len(instance.ArrayOfOthers)\n\tslice0 := instance.ArrayOfOthers\n\ttarget0 := make([]interface{}, count0)\n\tfor i0 := 0; i0 < count0; i0++ {\n\t\ttarget0[i0] = slice0[i0].ID\n\t}\n\ttarget[\"array_of_others\"] = target0\n\n\t////\n\t// Serialize MapOfOthers\n\t////\n\n\ttarget1 := make(map[string]interface{})\n\tmap1 := instance.MapOfOthers\n\tfor k1, v1 := range map1 {\n\t\ttarget1[k1] = v1.ID\n\t}\n\ttarget[\"map_of_others\"] = target1\n\n\treturn\n}",
"func newBase() *base {\n\treturn &base{shared.NewUUID(), time.Now().UTC(), time.Now().UTC(), false/*, shared.NewUUID()*/}\n}",
"func ProtoToInstance(p *sqlpb.SqlInstance) *sql.Instance {\n\tobj := &sql.Instance{\n\t\tBackendType: ProtoToSqlInstanceBackendTypeEnum(p.GetBackendType()),\n\t\tConnectionName: dcl.StringOrNil(p.ConnectionName),\n\t\tDatabaseVersion: ProtoToSqlInstanceDatabaseVersionEnum(p.GetDatabaseVersion()),\n\t\tEtag: dcl.StringOrNil(p.Etag),\n\t\tGceZone: dcl.StringOrNil(p.GceZone),\n\t\tInstanceType: ProtoToSqlInstanceInstanceTypeEnum(p.GetInstanceType()),\n\t\tMasterInstanceName: dcl.StringOrNil(p.MasterInstanceName),\n\t\tMaxDiskSize: ProtoToSqlInstanceMaxDiskSize(p.GetMaxDiskSize()),\n\t\tName: dcl.StringOrNil(p.Name),\n\t\tProject: dcl.StringOrNil(p.Project),\n\t\tRegion: dcl.StringOrNil(p.Region),\n\t\tRootPassword: dcl.StringOrNil(p.RootPassword),\n\t\tCurrentDiskSize: ProtoToSqlInstanceCurrentDiskSize(p.GetCurrentDiskSize()),\n\t\tDiskEncryptionConfiguration: ProtoToSqlInstanceDiskEncryptionConfiguration(p.GetDiskEncryptionConfiguration()),\n\t\tFailoverReplica: ProtoToSqlInstanceFailoverReplica(p.GetFailoverReplica()),\n\t\tMasterInstance: ProtoToSqlInstanceMasterInstance(p.GetMasterInstance()),\n\t\tReplicaConfiguration: ProtoToSqlInstanceReplicaConfiguration(p.GetReplicaConfiguration()),\n\t\tScheduledMaintenance: ProtoToSqlInstanceScheduledMaintenance(p.GetScheduledMaintenance()),\n\t\tSettings: ProtoToSqlInstanceSettings(p.GetSettings()),\n\t}\n\tfor _, r := range p.GetIpAddresses() {\n\t\tobj.IPAddresses = append(obj.IPAddresses, *ProtoToSqlInstanceIPAddresses(r))\n\t}\n\treturn obj\n}",
"func (self *Graphics) Game() *Game{\n return &Game{self.Object.Get(\"game\")}\n}",
"func newdbBaseClickHouse() dbBaser {\n\tb := new(dbBaseClickHouse)\n\tb.ins = b\n\treturn b\n}",
"func (obj *transaction) Graphbase() graphbases.Transaction {\n\treturn obj.graphbase\n}",
"func (s StatsGraphAsync) construct() StatsGraphClass { return &s }",
"func ToDomain(gymData *Gym) gyms.Domain {\n\treturn gyms.Domain{\n\t\tID: gymData.ID,\n\t\tName: gymData.Name,\n\t\tAddress: gymData.Address,\n\t\tCreatedAt: gymData.CreatedAt,\n\t\tUpdatedAt: gymData.UpdatedAt,\n\t\tDeletedAt: gymData.DeletedAt,\n\t}\n}",
"func Instance() *gorm.DB {\n return database\n}",
"func (g *Group) TOML() interface{} {\n\tgtoml := &GroupTOML{\n\t\tThreshold: g.Threshold,\n\t}\n\tgtoml.Nodes = make([]*NodeTOML, g.Len())\n\tfor i, n := range g.Nodes {\n\t\tgtoml.Nodes[i] = n.TOML().(*NodeTOML)\n\t}\n\n\tif g.PublicKey != nil {\n\t\tgtoml.PublicKey = g.PublicKey.TOML().(*DistPublicTOML)\n\t}\n\n\tgtoml.ID = g.ID\n\tgtoml.SchemeID = g.Scheme.Name\n\tgtoml.Period = g.Period.String()\n\tgtoml.CatchupPeriod = g.CatchupPeriod.String()\n\tgtoml.GenesisTime = g.GenesisTime\n\tif g.TransitionTime != 0 {\n\t\tgtoml.TransitionTime = g.TransitionTime\n\t}\n\tgtoml.GenesisSeed = hex.EncodeToString(g.GetGenesisSeed())\n\treturn gtoml\n}",
"func toGoBGPParameters(obj Protection, protectionID int64) []db_models.GoBgpParameter {\n\tresult := make([]db_models.GoBgpParameter, 0)\n t, _ := obj.(*RTBH)\n\tfor _, target := range t.RtbhTargets() {\n\t\tresult = append(result, db_models.GoBgpParameter{\n\t\t\tProtectionId: protectionID,\n\t\t\tTargetAddress: target})\n\t}\n\n\treturn result\n}",
"func populateInstance(instance *spotcluster.Instance,\n\tdroplet provider.InstanceConfig) {\n\tinstance.Spec.InstanceName = droplet.Name\n\tinstance.Spec.RemoteAddress = func() string {\n\t\tif droplet.ExteralIP != \"\" {\n\t\t\treturn droplet.ExteralIP + \":22\"\n\t\t}\n\t\treturn \"\"\n\t}()\n\tinstance.Spec.ExternalIP = droplet.ExteralIP\n\tinstance.Spec.InternalIP = droplet.InternalIP\n\tinstance.Spec.InstanceAvailable = true\n\tinstance.Spec.InstanceReady = droplet.IsRunning\n\tinstance.Spec.NodeAvailable = false\n\tinstance.Finalizers = func() []string {\n\t\treturn []string{controller.InstanceProtectionFinalizer}\n\t}()\n\tinstance.Labels[controller.LabelInstanceID] = droplet.ID\n}",
"func (c Config) toInterface() interface{} {\n\treturn c.RgwStorage\n}",
"func InstanceToProto(resource *sql.Instance) *sqlpb.SqlInstance {\n\tp := &sqlpb.SqlInstance{\n\t\tBackendType: SqlInstanceBackendTypeEnumToProto(resource.BackendType),\n\t\tConnectionName: dcl.ValueOrEmptyString(resource.ConnectionName),\n\t\tDatabaseVersion: SqlInstanceDatabaseVersionEnumToProto(resource.DatabaseVersion),\n\t\tEtag: dcl.ValueOrEmptyString(resource.Etag),\n\t\tGceZone: dcl.ValueOrEmptyString(resource.GceZone),\n\t\tInstanceType: SqlInstanceInstanceTypeEnumToProto(resource.InstanceType),\n\t\tMasterInstanceName: dcl.ValueOrEmptyString(resource.MasterInstanceName),\n\t\tMaxDiskSize: SqlInstanceMaxDiskSizeToProto(resource.MaxDiskSize),\n\t\tName: dcl.ValueOrEmptyString(resource.Name),\n\t\tProject: dcl.ValueOrEmptyString(resource.Project),\n\t\tRegion: dcl.ValueOrEmptyString(resource.Region),\n\t\tRootPassword: dcl.ValueOrEmptyString(resource.RootPassword),\n\t\tCurrentDiskSize: SqlInstanceCurrentDiskSizeToProto(resource.CurrentDiskSize),\n\t\tDiskEncryptionConfiguration: SqlInstanceDiskEncryptionConfigurationToProto(resource.DiskEncryptionConfiguration),\n\t\tFailoverReplica: SqlInstanceFailoverReplicaToProto(resource.FailoverReplica),\n\t\tMasterInstance: SqlInstanceMasterInstanceToProto(resource.MasterInstance),\n\t\tReplicaConfiguration: SqlInstanceReplicaConfigurationToProto(resource.ReplicaConfiguration),\n\t\tScheduledMaintenance: SqlInstanceScheduledMaintenanceToProto(resource.ScheduledMaintenance),\n\t\tSettings: SqlInstanceSettingsToProto(resource.Settings),\n\t}\n\tfor _, r := range resource.IPAddresses {\n\t\tp.IpAddresses = append(p.IpAddresses, SqlInstanceIPAddressesToProto(&r))\n\t}\n\n\treturn p\n}",
"func dataToLogging(name string, d *schema.ResourceData) go_thunder.Logging {\n\tvar s go_thunder.Logging\n\n\tvar sInstance go_thunder.LoggingInstance\n\n\tsInstance.PoolShared = d.Get(\"pool_shared\").(string)\n\tsInstance.Name = d.Get(\"name\").(string)\n\tsInstance.Format = d.Get(\"format\").(string)\n\tsInstance.Auto = d.Get(\"auto\").(string)\n\tsInstance.KeepEnd = d.Get(\"keep_end\").(int)\n\tsInstance.LocalLogging = d.Get(\"local_logging\").(int)\n\tsInstance.Mask = d.Get(\"mask\").(string)\n\tsInstance.TemplateTCPProxyShared = d.Get(\"template_tcp_proxy_shared\").(string)\n\tsInstance.SharedPartitionTCPProxyTemplate = d.Get(\"shared_partition_tcp_proxy_template\").(int)\n\tsInstance.KeepStart = d.Get(\"keep_start\").(int)\n\tsInstance.ServiceGroup = d.Get(\"service_group\").(string)\n\tsInstance.PcreMask = d.Get(\"pcre_mask\").(string)\n\tsInstance.UserTag = d.Get(\"user_tag\").(string)\n\tsInstance.TCPProxy = d.Get(\"tcp_proxy\").(string)\n\tsInstance.SharedPartitionPool = d.Get(\"shared_partition_pool\").(int)\n\tsInstance.Pool = d.Get(\"pool\").(string)\n\n\ts.Name = sInstance\n\n\treturn s\n}",
"func (self *SinglePad) Game() *Game{\n return &Game{self.Object.Get(\"game\")}\n}",
"func DrawElementsInstancedBaseVertexBaseInstance(mode uint32, count int32, xtype uint32, indices unsafe.Pointer, instancecount int32, basevertex int32, baseinstance uint32) {\n C.glowDrawElementsInstancedBaseVertexBaseInstance(gpDrawElementsInstancedBaseVertexBaseInstance, (C.GLenum)(mode), (C.GLsizei)(count), (C.GLenum)(xtype), indices, (C.GLsizei)(instancecount), (C.GLint)(basevertex), (C.GLuint)(baseinstance))\n}",
"func (reg *Registry) ToReal(logger logr.Logger) (globalregistry.Registry, error) {\n\treturn globalregistry.New(logger, reg)\n}",
"func (g FunctionalUGauge) Snapshot() UGauge { return UGaugeSnapshot(g.Value()) }",
"func (self *PhysicsP2) Game() *Game{\n return &Game{self.Object.Get(\"game\")}\n}",
"func VToDB(a *VAgent) error {\n\tif a.Agent == \"\" {\n\t\treturn nil\n\t}\n\n\tif len(a.Agent) > 15 {\n\t\tlog.Infow(\"bad agent name from V\", \"gid\", a.Gid, \"name\", a.Agent)\n\t}\n\n\t// telegram, startlat, startlon, distance, fetched are not set on the \"trust\" API call.\n\t// use ON DUPLICATE so as to not overwrite apikey or telegram\n\t// TODO: prune fields we will never use or that V never sends\n\t_, err := db.Exec(\"INSERT INTO v (enlid, gid, vlevel, vpoints, agent, level, quarantine, active, blacklisted, verified, flagged, banned, cellid, startlat, startlon, distance, fetched) VALUES (?,?,?,?,LEFT(?,15),?,?,?,?,?,?,?,?,?,?,?,UTC_TIMESTAMP()) ON DUPLICATE KEY UPDATE agent=LEFT(?, 15), quarantine=?, blacklisted=?, verified=?, flagged=?, banned=?, fetched=UTC_TIMESTAMP()\",\n\t\ta.EnlID, a.Gid, a.Vlevel, a.Vpoints, a.Agent, a.Level, a.Quarantine, a.Active, a.Blacklisted, a.Verified, a.Flagged, a.Banned, a.CellID, a.StartLat, a.StartLon, a.Distance,\n\t\ta.Agent, a.Quarantine, a.Blacklisted, a.Verified, a.Flagged, a.Banned)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn err\n\t}\n\n\tif a.TelegramID != 0 {\n\t\texisting, err := a.Gid.TelegramID()\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn err\n\t\t}\n\t\tif existing == 0 {\n\t\t\terr := a.Gid.SetTelegramID(TelegramID(a.TelegramID), a.Telegram)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (i *Interface) Instance() interface{} {\n\treturn i.base.instance\n}",
"func (app *adapter) ToGenesis(js []byte) (Genesis, error) {\n\tins := new(genesis)\n\terr := json.Unmarshal(js, ins)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ins, nil\n}",
"func GetInstance() *gorm.DB {\n\treturn DB\n}",
"func (v *Vehicle) AsProto() *gtfsrt.FeedEntity {\n\tlat32 := float32(v.Lat)\n\tlon32 := float32(v.Lon)\n\tbearing32 := float32(v.Bearing)\n\ttstamp := uint64(v.TimeObj.Unix())\n\n\treturn >fsrt.FeedEntity{\n\t\tId: &v.ID,\n\t\tVehicle: >fsrt.VehiclePosition{\n\t\t\tTrip: >fsrt.TripDescriptor{TripId: &v.Trip},\n\t\t\tVehicle: >fsrt.VehicleDescriptor{Id: &v.ID, Label: &v.SideNumber},\n\t\t\tPosition: >fsrt.Position{\n\t\t\t\tLatitude: &lat32,\n\t\t\t\tLongitude: &lon32,\n\t\t\t\tBearing: &bearing32,\n\t\t\t},\n\t\t\tTimestamp: &tstamp,\n\t\t},\n\t}\n}",
"func (info PGInfo) ToPGOption() *pg.Options {\n\treturn &pg.Options{\n\t\tApplicationName: \"unit testing\",\n\t\tDatabase: info.Database,\n\t\tUser: info.User,\n\t\tPassword: info.Password,\n\t\tAddr: fmt.Sprintf(\"127.0.0.1:%d\", info.Port),\n\t}\n}",
"func (gs *GameSpec) toParams() (gsp GameSpecParams) {\n\tgsp = GameSpecParams{\n\t\tTeaser: gs.Description,\n\t\tPace: gs.Pace,\n\t\tNbTurn: gs.Turns,\n\t\tNbAntPerPlayer: gs.AntsPerPlayer,\n\t\tNbPlayer: gs.MaxPlayers,\n\t\tMinimalNbPlayer: gs.MinPlayers,\n\t\tInitialEnergy: gs.InitialEnergy,\n\t\tInitialAcid: gs.InitialAcid,\n\t}\n\n\t// the API requires that the `users` field contain either \"all\" for a\n\t// public game or a comma-separated list of usernames if it's private.\n\tif gs.Public {\n\t\tgsp.Users = \"all\"\n\t} else {\n\t\tgsp.Users = strings.Join(gs.Players, \",\")\n\t}\n\n\treturn\n}",
"func GenerateGBfromproto(record *bioproto.Genbank) string {\n\tvar stringbuffer bytes.Buffer\n\n\tstringbuffer.WriteString(generateHeaderString(record))\n\tstringbuffer.WriteString(\"FEATURES Location/Qualifiers\\n\")\n\tstringbuffer.WriteString(generateQualifierString(record))\n\tif record.FEATURES != nil {\n\n\t}\n\tif record.CONTIG != \"\" {\n\t\tstringbuffer.WriteString(\"CONTIG \" + record.CONTIG + \"\\n\")\n\t}\n\tstringbuffer.WriteString(\"//\\n\")\n\treturn stringbuffer.String()\n}",
"func (g *Generation) RunGenerationStatistics() (result GenerationResult) {\n\n\tcorrelation := stat.Correlation(g.AntagonistAvgFitnessValuesOfEveryIndividual,\n\t\tg.ProtagonistAvgFitnessOfEveryIndividual, nil)\n\tcovariance := stat.Covariance(g.AntagonistAvgFitnessValuesOfEveryIndividual,\n\t\tg.ProtagonistAvgFitnessOfEveryIndividual, nil)\n\n\tantMean, antStd := stat.MeanStdDev(g.AntagonistAvgFitnessValuesOfEveryIndividual, nil)\n\tproMean, proStd := stat.MeanStdDev(g.ProtagonistAvgFitnessOfEveryIndividual, nil)\n\n\tantVar := stat.Variance(g.AntagonistAvgFitnessValuesOfEveryIndividual, nil)\n\tproVar := stat.Variance(g.ProtagonistAvgFitnessOfEveryIndividual, nil)\n\n\tresult.AllAntagonistAverageFitness = antMean\n\tresult.AntagonistStdDev = antStd\n\tresult.AntagonistVariance = antVar\n\tresult.AllProtagonistAverageFitness = proMean\n\tresult.ProtagonistStdDev = proStd\n\tresult.ProtagonistVariance = proVar\n\tresult.Correlation = correlation\n\tresult.Covariance = covariance\n\tresult.AntagonistAvgAge = g.AntagonistsAvgAge\n\tresult.AntagonistAvgBirthGen = g.AntagonistsAvgBirthGen\n\tresult.ProtagonistAvgAge = g.ProtagonistsAvgAge\n\tresult.ProtagonistAvgBirthGen = g.ProtagonistsAvgBirthGen\n\n\tresult.BestAntagonist = g.BestAntagonist()\n\tresult.BestProtagonist = g.BestProtagonist()\n\n\t//statsString := result.ToString()\n\n\t//g.Parameters.LoggingChan <- evolog.Logger{Timestamp: time.Now(), Type: evolog.LoggerGeneration, Message: statsString}\n\n\treturn result\n}",
"func GetG() *G {\n\treturn (*G)(getg())\n}",
"func NewBgpConfiguration()(*BgpConfiguration) {\n m := &BgpConfiguration{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}",
"func (self *PhysicsP2) ToJSON() interface{}{\n return self.Object.Call(\"toJSON\")\n}",
"func ToGeo(h H3Index) GeoCoord {\n\tg := C.GeoCoord{}\n\tC.h3ToGeo(h, &g)\n\treturn geoCoordFromC(g)\n}",
"func ToGeo(h H3Index) GeoCoord {\n\tg := C.GeoCoord{}\n\tC.h3ToGeo(h, &g)\n\treturn geoCoordFromC(g)\n}",
"func SomeGraphToJSONable(\n\tinstance *SomeGraph) (\n\ttarget map[string]interface{}, err error) {\n\n\tif instance == nil {\n\t\tpanic(\"unexpected nil instance\")\n\t}\n\n\ttarget = make(map[string]interface{})\n\tdefer func() {\n\t\tif err != nil {\n\t\t\ttarget = nil\n\t\t}\n\t}()\n\t////\n\t// Serialize SomeProperty\n\t////\n\n\ttarget[\"some_property\"] = WithOptionalToJSONable(\n\t\t&instance.SomeProperty)\n\n\treturn\n}",
"func TestGetData(t *testing.T) {\n\n\tgaTemp := new(GAData)\n\n\t// initialise GAData object\n\tgaTemp.Init()\n\n\ttestRequest := GaRequest{\"ga:23949588\",\n\t\t\"2014-01-01\",\n\t\t\"2014-01-02\",\n\t\t\"ga:visits\",\n\t\t\"ga:day\",\n\t\t\"\",\n\t\t\"\",\n\t\t\"\",\n\t\t100,\n\t\t5}\n\n\tresult := gaTemp.GetData(1, &testRequest)\n\tlog.Println(result)\n}",
"func toPgOptions(config DbConfig) *pg.Options {\r\n\treturn &pg.Options{\r\n\t\tAddr: fmt.Sprintf(\"%s:%s\", config.Host, config.Port),\r\n\t\tUser: config.UserName,\r\n\t\tPassword: config.UserPassword,\r\n\t\tDatabase: config.DbName,\r\n\t\tApplicationName: AppName,\r\n\t\tReadTimeout: ReadTimeout,\r\n\t\tWriteTimeout: WriteTimeout,\r\n\t\tPoolSize: PoolSize,\r\n\t\tMinIdleConns: MinIdleConns,\r\n\t}\r\n}",
"func (_m *gqlAssetConverter) ToGQL(in *v1beta1.Asset) (*gqlschema.Asset, error) {\n\tvar r0 *gqlschema.Asset\n\tvar r1 error\n\tr1 = _m.err\n\n\treturn r0, r1\n}",
"func (client AccessGovernanceCPClient) getGovernanceInstance(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/governanceInstances/{governanceInstanceId}\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response GetGovernanceInstanceResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\tapiReferenceLink := \"https://docs.oracle.com/iaas/api/#/en/access-governance-cp/20220518/GovernanceInstance/GetGovernanceInstance\"\n\t\terr = common.PostProcessServiceError(err, \"AccessGovernanceCP\", \"GetGovernanceInstance\", apiReferenceLink)\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}",
"func logInstance(authRequest *authorization.HandleAuthorizationRequest) {\n\tinstance := authRequest.Instance\n\n\tif false {\n\t\tlog.Println(\"sourceAddress:\", instance.Subject.Properties[\"sourceAddress\"].GetStringValue())\n\t\tlog.Println(\"sourceName:\", instance.Subject.Properties[\"sourceName\"].GetStringValue())\n\t\tlog.Println(\"sourceUid:\", instance.Subject.Properties[\"sourceUid\"].GetStringValue())\n\t\tlog.Println(\"sourceNamespace:\", instance.Subject.Properties[\"sourceNamespace\"].GetStringValue())\n\t\tlog.Println(\"sourceVersion:\", instance.Subject.Properties[\"sourceVersion\"].GetStringValue())\n\t\tlog.Println(\"sourcePrincipal:\", instance.Subject.Properties[\"sourcePrincipal\"].GetStringValue())\n\t\tlog.Println(\"sourceOwner:\", instance.Subject.Properties[\"sourceOwnern\"].GetStringValue())\n\t\tlog.Println(\"sourceWorkloadUid:\", instance.Subject.Properties[\"sourceWorkloadUid\"].GetStringValue())\n\t\tlog.Println(\"sourceWorkloadName:\", instance.Subject.Properties[\"sourceWorkloadName\"].GetStringValue())\n\t\tlog.Println(\"sourceWorkloadNamespace:\", instance.Subject.Properties[\"sourceWorkloadNamespace\"].GetStringValue())\n\n\t\tlog.Println(\"instance.Action.Namespace:\", instance.Action.Namespace)\n\t\tlog.Println(\"instance.Action.Service:\", instance.Action.Service)\n\t\tlog.Println(\"instance.Action.Method:\", instance.Action.Method)\n\t\tlog.Println(\"instance.Action.Path:\", instance.Action.Path)\n\n\t\tlog.Println(\"protocol:\", instance.Action.Properties[\"protocol\"].GetStringValue())\n\t\tlog.Println(\"destinationAddress:\", instance.Action.Properties[\"destinationAddress\"].GetStringValue())\n\t\tlog.Println(\"destinationName:\", instance.Action.Properties[\"destinationName\"].GetStringValue())\n\t\tlog.Println(\"destinationUid:\", instance.Action.Properties[\"destinationUid\"].GetStringValue())\n\t\tlog.Println(\"destinationNamespace:\", instance.Action.Properties[\"destinationNamespace\"].GetStringValue())\n\t\tlog.Println(\"destinationVersion:\", instance.Action.Properties[\"destinationVersion\"].GetStringValue())\n\n\t\tlog.Println(\"destinationWorkloadUid:\", instance.Action.Properties[\"destinationWorkloadUid\"].GetStringValue())\n\t\tlog.Println(\"destinationWorkloadName:\", instance.Action.Properties[\"destinationWorkloadName\"].GetStringValue())\n\t\tlog.Println(\"destinationWorkloadNamespace:\", instance.Action.Properties[\"destinationWorkloadNamespace\"].GetStringValue())\n\t}\n\tlog.Println(\"-------------------------------------------\")\n\tlog.Println(instance)\n\tlog.Println(\"-------------------------------------------\")\n}",
"func (pn *paxosNode) getInstance(key string) *paxosKeyData {\n\tpxi, ok := pn.instances[key]\n\tif !ok {\n\t\tpxi = &paxosKeyData{\n\t\t\tMyn: 0,\n\t\t\tNa: -1,\n\t\t\tNh: 0,\n\t\t\tVa: nil,\n\t\t\tmu: &sync.RWMutex{},\n\t\t\tCommittedVal: nil,\n\t\t\tstoreLock: &sync.RWMutex{},\n\t\t\tproposeLock: &sync.RWMutex{},\n\t\t}\n\t\tpn.instances[key] = pxi\n\t}\n\treturn pxi\n}",
"func GetAPIGeneralInformation() *GeneralInformation {\n timeNow := time.Now()\n timeZone, _ := timeNow.Zone()\n timeFormat := \"2006-01-02 15:04:05\"\n return &GeneralInformation{\n ServerTime: ServerTime{\n Exact: timeNow.Unix(),\n Nice: timeNow.Format(timeFormat),\n Timezone: timeZone,\n },\n }\n}",
"func (app *adapter) ToJSON(genesis Genesis) ([]byte, error) {\n\treturn json.Marshal(genesis)\n}",
"func (gene *Gene) Copy() *Gene {\n\treturn &Gene{\n\t\tgene.A,\n\t\tgene.B,\n\t\tgene.C,\n\t\tgene.F,\n\t\tgene.Format,\n\t}\n}",
"func GetInstance() *gorm.DB {\n\tonce.Do(func() {\n\t\t// refer https://github.com/go-sql-driver/mysql#dsn-data-source-name for details\n\t\tuser := viper.GetString(\"database.user\")\n\t\tpassword := viper.GetString(\"database.password\")\n\t\thost := viper.GetString(\"database.host\")\n\t\tport := viper.GetString(\"database.port\")\n\t\tdbname := viper.GetString(\"database.dbname\")\n\n\t\tdsn := fmt.Sprintf(\"%s:%s@tcp(%s:%s)/%s\", user, password, host, port, dbname)\n\t\tdb, err := gorm.Open(mysql.Open(dsn), &gorm.Config{})\n\t\tdba = db\n\t\tif err != nil {\n\t\t\tlog.Panic().Msgf(\"Error connecting to the database at %s:%s/%s\", host, port, dbname)\n\t\t}\n\t\tsqlDB, err := dba.DB()\n\t\tif err != nil {\n\t\t\tlog.Panic().Msgf(\"Error getting GORM DB definition\")\n\t\t}\n\t\tsqlDB.SetMaxIdleConns(10)\n\t\tsqlDB.SetMaxOpenConns(100)\n\n\t\tlog.Info().Msgf(\"Successfully established connection to %s:%s/%s\", host, port, dbname)\n\t})\n\treturn dba\n}",
"func DrawArraysInstancedBaseInstance(mode uint32, first int32, count int32, instancecount int32, baseinstance uint32) {\n C.glowDrawArraysInstancedBaseInstance(gpDrawArraysInstancedBaseInstance, (C.GLenum)(mode), (C.GLint)(first), (C.GLsizei)(count), (C.GLsizei)(instancecount), (C.GLuint)(baseinstance))\n}",
"func (self *TileSprite) Game() *Game{\n return &Game{self.Object.Get(\"game\")}\n}",
"func (bil *baseInstanceList) getOrCreateBaseInstance(key *meta.Key) *baseInstance {\n\tbil.lock.Lock()\n\tdefer bil.lock.Unlock()\n\n\tinst, found := bil.instances[*key]\n\tif !found {\n\t\tinst = &baseInstance{name: key.Name, zone: key.Zone}\n\t\tif bil.allocateCIDR {\n\t\t\tnextRange, _ := bil.cidrSet.AllocateNext()\n\t\t\tinst.aliasRange = nextRange.String()\n\t\t}\n\t\tbil.instances[*key] = inst\n\t}\n\treturn inst\n}",
"func getInstance() *KeyGen {\n\tonce.Do(\n\t\tfunc() {\n\t\t\tkeygen = new(KeyGen)\n\t\t\tkeygen.random = rand.New(rand.NewSource(time.Now().Unix()))\n\n\t\t})\n\treturn keygen\n}",
"func (s *Superhero) ToMap() map[string]interface{} {\n\tdata := map[string]interface{}{\n\t\t\"id\": s.ID,\n\t\t\"affiliation_id\": s.AffiliationID,\n\t\t\"name\": s.Name,\n\t\t\"life\": s.Life,\n\t\t\"energy\": s.Energy,\n\t\t\"powers\": make([]interface{}, len(s.Powers)),\n\t}\n\n\tfor i, p := range s.Powers {\n\t\tdata[\"powers\"].([]interface{})[i] = p.ToMap()\n\t}\n\n\treturn data\n}",
"func GetInstance() Proxy {\n\tonce.Do(func() {\n\t\tinstance = &proxy{\n\t\t\tproxy: &apiconfigv1.Proxy{},\n\t\t\tlock: sync.Mutex{},\n\t\t}\n\t})\n\treturn instance\n}",
"func (p *Plugin) As(entity string) *PluginDB {\n\n\tmethod, host, err := run.GetEndpoint(p.Meta.DataDir, p.Meta.Config.GetAPI())\n\tif err != nil {\n\t\tlogrus.Panicf(\"Got an error parsing config API %v at %s\", err, dbutil.MiniStack(0))\n\t}\n\n\tc := http.Client{\n\t\tTimeout: time.Duration(5 * time.Second),\n\t}\n\tif method == \"unix\" {\n\t\tc.Transport = &http.Transport{\n\t\t\tDialContext: func(_ context.Context, _, _ string) (net.Conn, error) {\n\t\t\t\treturn net.Dial(\"unix\", host)\n\t\t\t},\n\t\t}\n\t}\n\n\treturn &PluginDB{\n\t\tP: p,\n\t\thost: host,\n\t\tclient: c,\n\t\tEntity: entity,\n\t}\n}",
"func (v *Variant) ToVariant() *Variant {\n\treturn v\n}",
"func (client AccessGovernanceCPClient) createGovernanceInstance(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/governanceInstances\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateGovernanceInstanceResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\tapiReferenceLink := \"https://docs.oracle.com/iaas/api/#/en/access-governance-cp/20220518/GovernanceInstance/CreateGovernanceInstance\"\n\t\terr = common.PostProcessServiceError(err, \"AccessGovernanceCP\", \"CreateGovernanceInstance\", apiReferenceLink)\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}",
"func GetPromoType() *graphql.Object{\n if promoType == nil{\n promoType = graphql.NewObject(graphql.ObjectConfig{\n Name: \"promoType\",\n Fields: graphql.Fields{\n \"id\" : &graphql.Field{\n Type:graphql.Int,\n },\n \"name\": &graphql.Field{\n Type:graphql.String,\n },\n \"promoPrice\": &graphql.Field{\n Type:graphql.Int,\n },\n \"promoCode\": &graphql.Field{\n Type: graphql.String,\n },\n \"availableUntil\": &graphql.Field{\n Type:graphql.DateTime,\n },\n \"platform\": &graphql.Field{\n Type:graphql.String,\n },\n \"promoFor\": &graphql.Field{\n Type:graphql.String,\n },\n \"description\": &graphql.Field{\n Type:graphql.String,\n },\n \"image\": &graphql.Field{\n Type:graphql.String,\n },\n\n },\n })\n }\n return promoType\n}",
"func GetGaID() string {\n\t//Change This to Your Google Analytics ID\n\tconst gaID = \"UA-51746203-1\"\n\treturn gaID\n}",
"func (a *API) GetDataFor7Days() (GA, error) {\n\tclient, err := a.GetOAuthClient()\n\tif err != nil {\n\t\treturn GA{}, err\n\t}\n\n\tanalyticsService, err := analytics.New(client)\n\tif err != nil {\n\t\treturn GA{}, err\n\t}\n\n\tgaData := analyticsService.Data.Ga.Get(a.ViewID, \"7daysAgo\", \"yesterday\",\n\t\t\"ga:users, ga:impressions, ga:adClicks, ga:organicSearches\")\n\tgaData.Dimensions(\"ga:day\")\n\tgaData.SamplingLevel(\"HIGHER_PRECISION\")\n\tgaData.Output(\"json\")\n\n\td, err := gaData.Do()\n\tif err != nil {\n\t\treturn GA{}, err\n\t}\n\n\tbytes, err := d.MarshalJSON()\n\tif err != nil {\n\t\treturn GA{}, err\n\t}\n\n\tgaDatas := []GAData{}\n\n\tfor _, value := range d.Rows {\n\t\tgaData := GAData{}\n\t\tgaData.Date = value[0]\n\t\tgaData.Users = value[1]\n\t\tgaData.AdImpressions = value[2]\n\t\tgaData.AdClicks = value[3]\n\t\tgaData.OrganicSearches = value[4]\n\t\tgaDatas = append(gaDatas, gaData)\n\t}\n\n\tga := GA{}\n\tga.GADatas = gaDatas\n\tga.JSON = string(bytes)\n\tga.URL = a.URL\n\n\treturn ga, nil\n}",
"func (rt *resourceTracking) GetAppInstance(un *unstructured.Unstructured, key string, trackingMethod v1alpha1.TrackingMethod) *AppInstanceValue {\n\tswitch trackingMethod {\n\tcase TrackingMethodAnnotation, TrackingMethodAnnotationAndLabel:\n\t\treturn rt.getAppInstanceValue(un, key, trackingMethod)\n\tdefault:\n\t\treturn nil\n\t}\n}",
"func (self *TileSprite) Data() interface{}{\n return self.Object.Get(\"data\")\n}",
"func (l *LogEntry) addInstance(i *guardduty.InstanceDetails) {\n\tl.InstanceAz = aws.StringValue(i.AvailabilityZone)\n\tl.InstanceDesc = aws.StringValue(i.ImageDescription)\n\tl.InstanceImageId = aws.StringValue(i.ImageId)\n\tl.InstanceId = aws.StringValue(i.InstanceId)\n\tl.InstanceState = aws.StringValue(i.InstanceState)\n\tl.InstanceType = aws.StringValue(i.InstanceType)\n\tl.InstanceLaunchTime = aws.StringValue(i.LaunchTime)\n\tl.InstanceTags = make(map[string]string)\n\tfor _, t := range i.Tags {\n\t\tl.InstanceTags[aws.StringValue(t.Key)] = aws.StringValue(t.Value)\n\t}\n\t// build list of public, private IP's and subnets:\n\tl.InstanceSg = make(map[string]string)\n\tprv := make(map[string]bool)\n\tpub := make(map[string]bool)\n\tsub := make(map[string]bool)\n\tfor _, in := range i.NetworkInterfaces {\n\t\tprv[aws.StringValue(in.PrivateIpAddress)] = true\n\t\tfor _, ip := range in.PrivateIpAddresses {\n\t\t\tprv[aws.StringValue(ip.PrivateIpAddress)] = true\n\t\t}\n\t\tpub[aws.StringValue(in.PublicIp)] = true\n\t\tsub[aws.StringValue(in.SubnetId)] = true\n\t\tl.InstanceVpc = aws.StringValue(in.VpcId)\n\t\tfor _, sg := range in.SecurityGroups {\n\t\t\tl.InstanceSg[aws.StringValue(sg.GroupId)] = aws.StringValue(sg.GroupName)\n\t\t}\n\t}\n\tfor k := range prv {\n\t\tl.InstancePrivateIp = append(l.InstancePrivateIp, k)\n\t}\n\tfor k := range pub {\n\t\tl.InstancePublicIp = append(l.InstancePublicIp, k)\n\t}\n\tfor k := range sub {\n\t\tl.InstanceSubnet = append(l.InstanceSubnet, k)\n\t}\n}",
"func (r GetInstanceMetricDataRequest) Send(ctx context.Context) (*GetInstanceMetricDataResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &GetInstanceMetricDataResponse{\n\t\tGetInstanceMetricDataOutput: r.Request.Data.(*GetInstanceMetricDataOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}",
"func (e *Engine) GT() *GT {\n\treturn NewGT()\n}",
"func QuotaInstanceGT(v int) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.GT(s.C(FieldQuotaInstance), v))\n\t})\n}"
] | [
"0.58498",
"0.55195946",
"0.5480924",
"0.5227962",
"0.50450355",
"0.4820873",
"0.48025364",
"0.47862035",
"0.4779465",
"0.47661734",
"0.47625732",
"0.4697985",
"0.46975428",
"0.46894577",
"0.46771544",
"0.46434072",
"0.46340284",
"0.46209815",
"0.46189263",
"0.46185815",
"0.46124786",
"0.4596707",
"0.4586061",
"0.45847437",
"0.45761085",
"0.45670113",
"0.45549384",
"0.45388564",
"0.45150253",
"0.44999447",
"0.4492385",
"0.44681507",
"0.44614404",
"0.44425994",
"0.44345447",
"0.44322133",
"0.4427989",
"0.442248",
"0.44191808",
"0.44168016",
"0.44137347",
"0.44083557",
"0.4404175",
"0.44040695",
"0.43985415",
"0.4381498",
"0.43733117",
"0.4373088",
"0.43536532",
"0.43507817",
"0.4347556",
"0.43418857",
"0.43347573",
"0.43254834",
"0.43233737",
"0.43210745",
"0.43190953",
"0.4318587",
"0.43184066",
"0.431807",
"0.43176547",
"0.43174812",
"0.43155617",
"0.4315423",
"0.43029207",
"0.42935088",
"0.42932442",
"0.42913967",
"0.4284365",
"0.42817423",
"0.42817423",
"0.42795593",
"0.4279244",
"0.4270787",
"0.4267292",
"0.42612037",
"0.42603227",
"0.42529383",
"0.42514777",
"0.42492622",
"0.42407325",
"0.42388916",
"0.42315164",
"0.4230544",
"0.42213994",
"0.42186654",
"0.4209816",
"0.4200521",
"0.41979453",
"0.4197203",
"0.41915935",
"0.4185646",
"0.41816333",
"0.41811565",
"0.41781718",
"0.41745213",
"0.41724864",
"0.41668177",
"0.41650268",
"0.4163206"
] | 0.8056762 | 0 |
toGA is an utility method to return the baseInstance data as a beta Instance object | func (bi *baseInstance) toBeta() *beta.Instance {
inst := &beta.Instance{Name: bi.name, Zone: bi.zone, NetworkInterfaces: []*beta.NetworkInterface{{}}}
if bi.aliasRange != "" {
inst.NetworkInterfaces[0].AliasIpRanges = []*beta.AliasIpRange{
{IpCidrRange: bi.aliasRange, SubnetworkRangeName: util.TestSecondaryRangeName},
}
}
return inst
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (bi *baseInstance) toGA() *ga.Instance {\n\tinst := &ga.Instance{Name: bi.name, Zone: bi.zone, NetworkInterfaces: []*ga.NetworkInterface{{}}}\n\tif bi.aliasRange != \"\" {\n\t\tinst.NetworkInterfaces[0].AliasIpRanges = []*ga.AliasIpRange{\n\t\t\t{IpCidrRange: bi.aliasRange, SubnetworkRangeName: util.TestSecondaryRangeName},\n\t\t}\n\t}\n\treturn inst\n}",
"func (e *GT) Base() *GT {\n\tif e.p == nil {\n\t\te.p = &gfP12{}\n\t}\n\te.p.Set(gfP12Gen)\n\treturn e\n}",
"func (bil *baseInstanceList) newGAGetHook() func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) {\n\treturn func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) {\n\t\tm.Lock.Lock()\n\t\tdefer m.Lock.Unlock()\n\n\t\tif _, found := m.Objects[*key]; !found {\n\t\t\tm.Objects[*key] = &cloud.MockInstancesObj{Obj: bil.getOrCreateBaseInstance(key).toGA()}\n\t\t}\n\t\treturn false, nil, nil\n\t}\n}",
"func newDataInstance(repo datastore.Repo, t *testing.T, name dvid.DataString) *Data {\n\tconfig := dvid.NewConfig()\n\tconfig.SetVersioned(true)\n\tdataservice, err := repo.NewData(labelsT, name, config)\n\tif err != nil {\n\t\tt.Errorf(\"Unable to create labels64 instance %q: %s\\n\", name, err.Error())\n\t}\n\tlabels, ok := dataservice.(*Data)\n\tif !ok {\n\t\tt.Errorf(\"Can't cast labels data service into Data\\n\")\n\t}\n\treturn labels\n}",
"func InstanceToProto(resource *sql.Instance) *sqlpb.SqlInstance {\n\tp := &sqlpb.SqlInstance{\n\t\tBackendType: SqlInstanceBackendTypeEnumToProto(resource.BackendType),\n\t\tConnectionName: dcl.ValueOrEmptyString(resource.ConnectionName),\n\t\tDatabaseVersion: SqlInstanceDatabaseVersionEnumToProto(resource.DatabaseVersion),\n\t\tEtag: dcl.ValueOrEmptyString(resource.Etag),\n\t\tGceZone: dcl.ValueOrEmptyString(resource.GceZone),\n\t\tInstanceType: SqlInstanceInstanceTypeEnumToProto(resource.InstanceType),\n\t\tMasterInstanceName: dcl.ValueOrEmptyString(resource.MasterInstanceName),\n\t\tMaxDiskSize: SqlInstanceMaxDiskSizeToProto(resource.MaxDiskSize),\n\t\tName: dcl.ValueOrEmptyString(resource.Name),\n\t\tProject: dcl.ValueOrEmptyString(resource.Project),\n\t\tRegion: dcl.ValueOrEmptyString(resource.Region),\n\t\tRootPassword: dcl.ValueOrEmptyString(resource.RootPassword),\n\t\tCurrentDiskSize: SqlInstanceCurrentDiskSizeToProto(resource.CurrentDiskSize),\n\t\tDiskEncryptionConfiguration: SqlInstanceDiskEncryptionConfigurationToProto(resource.DiskEncryptionConfiguration),\n\t\tFailoverReplica: SqlInstanceFailoverReplicaToProto(resource.FailoverReplica),\n\t\tMasterInstance: SqlInstanceMasterInstanceToProto(resource.MasterInstance),\n\t\tReplicaConfiguration: SqlInstanceReplicaConfigurationToProto(resource.ReplicaConfiguration),\n\t\tScheduledMaintenance: SqlInstanceScheduledMaintenanceToProto(resource.ScheduledMaintenance),\n\t\tSettings: SqlInstanceSettingsToProto(resource.Settings),\n\t}\n\tfor _, r := range resource.IPAddresses {\n\t\tp.IpAddresses = append(p.IpAddresses, SqlInstanceIPAddressesToProto(&r))\n\t}\n\n\treturn p\n}",
"func DrawElementsInstancedBaseVertexBaseInstance(mode uint32, count int32, xtype uint32, indices unsafe.Pointer, instancecount int32, basevertex int32, baseinstance uint32) {\n C.glowDrawElementsInstancedBaseVertexBaseInstance(gpDrawElementsInstancedBaseVertexBaseInstance, (C.GLenum)(mode), (C.GLsizei)(count), (C.GLenum)(xtype), indices, (C.GLsizei)(instancecount), (C.GLint)(basevertex), (C.GLuint)(baseinstance))\n}",
"func instancesToProto(insts registry.Instances) []*instances.Instance {\n\tret := make([]*instances.Instance, 0)\n\tfor _, inst := range insts {\n\t\tprotoInst := &instances.Instance{\n\t\t\tInstanceId: proto.String(inst.Id),\n\t\t\tHostname: proto.String(inst.Hostname),\n\t\t\tMachineClass: proto.String(inst.MachineClass),\n\t\t\tServiceName: proto.String(inst.Name),\n\t\t\tServiceDescription: proto.String(inst.Description),\n\t\t\tServiceVersion: proto.Uint64(inst.Version),\n\t\t\tAzName: proto.String(inst.AzName),\n\t\t\tSubTopic: make([]string, 0),\n\t\t}\n\t\tfor _, ep := range inst.Endpoints {\n\t\t\tif ep.Subscribe != \"\" {\n\t\t\t\tprotoInst.SubTopic = append(protoInst.SubTopic, ep.Subscribe)\n\t\t\t}\n\t\t}\n\t\tret = append(ret, protoInst)\n\t}\n\treturn ret\n}",
"func (a *abaImpl) AsGPA() gpa.GPA {\n\treturn a.asGPA\n}",
"func DrawArraysInstancedBaseInstance(mode uint32, first int32, count int32, instancecount int32, baseinstance uint32) {\n C.glowDrawArraysInstancedBaseInstance(gpDrawArraysInstancedBaseInstance, (C.GLenum)(mode), (C.GLint)(first), (C.GLsizei)(count), (C.GLsizei)(instancecount), (C.GLuint)(baseinstance))\n}",
"func CreateGqlDefFromInstance(name string, data interface{}) *graphql.Object {\n\tdataType := reflect.TypeOf(data)\n\treturn CreateGqlDefFromType(name, dataType)\n}",
"func ProtoToInstanceTemplate(p *betapb.ComputeBetaInstanceTemplate) *beta.InstanceTemplate {\n\tobj := &beta.InstanceTemplate{\n\t\tCreationTimestamp: dcl.StringOrNil(p.GetCreationTimestamp()),\n\t\tDescription: dcl.StringOrNil(p.Description),\n\t\tId: dcl.Int64OrNil(p.Id),\n\t\tSelfLink: dcl.StringOrNil(p.SelfLink),\n\t\tName: dcl.StringOrNil(p.Name),\n\t\tProperties: ProtoToComputeBetaInstanceTemplateProperties(p.GetProperties()),\n\t\tProject: dcl.StringOrNil(p.Project),\n\t}\n\treturn obj\n}",
"func gceInfo(inst *instance) error {\n\tvar err error\n\tinst.zone, err = metadata.Zone()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinst.name, err = metadata.InstanceName()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinst.hostname, err = metadata.Hostname()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinst.project, err = metadata.ProjectID()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (i *Interface) Instance() interface{} {\n\treturn i.base.instance\n}",
"func ProtoToInstance(p *sqlpb.SqlInstance) *sql.Instance {\n\tobj := &sql.Instance{\n\t\tBackendType: ProtoToSqlInstanceBackendTypeEnum(p.GetBackendType()),\n\t\tConnectionName: dcl.StringOrNil(p.ConnectionName),\n\t\tDatabaseVersion: ProtoToSqlInstanceDatabaseVersionEnum(p.GetDatabaseVersion()),\n\t\tEtag: dcl.StringOrNil(p.Etag),\n\t\tGceZone: dcl.StringOrNil(p.GceZone),\n\t\tInstanceType: ProtoToSqlInstanceInstanceTypeEnum(p.GetInstanceType()),\n\t\tMasterInstanceName: dcl.StringOrNil(p.MasterInstanceName),\n\t\tMaxDiskSize: ProtoToSqlInstanceMaxDiskSize(p.GetMaxDiskSize()),\n\t\tName: dcl.StringOrNil(p.Name),\n\t\tProject: dcl.StringOrNil(p.Project),\n\t\tRegion: dcl.StringOrNil(p.Region),\n\t\tRootPassword: dcl.StringOrNil(p.RootPassword),\n\t\tCurrentDiskSize: ProtoToSqlInstanceCurrentDiskSize(p.GetCurrentDiskSize()),\n\t\tDiskEncryptionConfiguration: ProtoToSqlInstanceDiskEncryptionConfiguration(p.GetDiskEncryptionConfiguration()),\n\t\tFailoverReplica: ProtoToSqlInstanceFailoverReplica(p.GetFailoverReplica()),\n\t\tMasterInstance: ProtoToSqlInstanceMasterInstance(p.GetMasterInstance()),\n\t\tReplicaConfiguration: ProtoToSqlInstanceReplicaConfiguration(p.GetReplicaConfiguration()),\n\t\tScheduledMaintenance: ProtoToSqlInstanceScheduledMaintenance(p.GetScheduledMaintenance()),\n\t\tSettings: ProtoToSqlInstanceSettings(p.GetSettings()),\n\t}\n\tfor _, r := range p.GetIpAddresses() {\n\t\tobj.IPAddresses = append(obj.IPAddresses, *ProtoToSqlInstanceIPAddresses(r))\n\t}\n\treturn obj\n}",
"func expectedNewInstance(jobID, datasetID string) *dataset.NewInstance {\n\tnewInstance := &dataset.NewInstance{\n\t\tLinks: &dataset.Links{\n\t\t\tDataset: dataset.Link{\n\t\t\t\tURL: \"http://localhost:22000/datasets/\" + datasetID,\n\t\t\t\tID: datasetID,\n\t\t\t},\n\t\t\tJob: dataset.Link{\n\t\t\t\tURL: \"http://import-api/jobs/\" + jobID,\n\t\t\t\tID: jobID,\n\t\t\t},\n\t\t},\n\t\tDimensions: []dataset.CodeList{},\n\t\tImportTasks: &dataset.InstanceImportTasks{\n\t\t\tImportObservations: &dataset.ImportObservationsTask{\n\t\t\t\tState: dataset.StateCreated.String(),\n\t\t\t},\n\t\t\tBuildHierarchyTasks: []*dataset.BuildHierarchyTask{},\n\t\t\tBuildSearchIndexTasks: []*dataset.BuildSearchIndexTask{},\n\t\t},\n\t\tType: \"cantabular_blob\",\n\t}\n\tif datasetID == \"dataset1\" {\n\t\tnewInstance.Dimensions = []dataset.CodeList{{ID: \"codelist11\"}, {ID: \"codelist12\"}}\n\t\tnewInstance.LowestGeography = \"lowest_geo\"\n\t} else if datasetID == \"dataset2\" {\n\t\tnewInstance.Dimensions = []dataset.CodeList{{ID: \"codelist21\"}, {ID: \"codelist22\"}, {ID: \"codelist23\"}}\n\t}\n\treturn newInstance\n}",
"func (self *Graphics) Data() interface{}{\n return self.Object.Get(\"data\")\n}",
"func InstanceTemplateToProto(resource *beta.InstanceTemplate) *betapb.ComputeBetaInstanceTemplate {\n\tp := &betapb.ComputeBetaInstanceTemplate{\n\t\tCreationTimestamp: dcl.ValueOrEmptyString(resource.CreationTimestamp),\n\t\tDescription: dcl.ValueOrEmptyString(resource.Description),\n\t\tId: dcl.ValueOrEmptyInt64(resource.Id),\n\t\tSelfLink: dcl.ValueOrEmptyString(resource.SelfLink),\n\t\tName: dcl.ValueOrEmptyString(resource.Name),\n\t\tProperties: ComputeBetaInstanceTemplatePropertiesToProto(resource.Properties),\n\t\tProject: dcl.ValueOrEmptyString(resource.Project),\n\t}\n\n\treturn p\n}",
"func GetInstanceBD() *connection {\n\treturn instance\n}",
"func DrawElementsInstancedBaseVertexBaseInstance(mode uint32, count int32, xtype uint32, indices unsafe.Pointer, instancecount int32, basevertex int32, baseinstance uint32) {\n\tC.glowDrawElementsInstancedBaseVertexBaseInstance(gpDrawElementsInstancedBaseVertexBaseInstance, (C.GLenum)(mode), (C.GLsizei)(count), (C.GLenum)(xtype), indices, (C.GLsizei)(instancecount), (C.GLint)(basevertex), (C.GLuint)(baseinstance))\n}",
"func DrawElementsInstancedBaseVertexBaseInstance(mode uint32, count int32, xtype uint32, indices unsafe.Pointer, instancecount int32, basevertex int32, baseinstance uint32) {\n\tC.glowDrawElementsInstancedBaseVertexBaseInstance(gpDrawElementsInstancedBaseVertexBaseInstance, (C.GLenum)(mode), (C.GLsizei)(count), (C.GLenum)(xtype), indices, (C.GLsizei)(instancecount), (C.GLint)(basevertex), (C.GLuint)(baseinstance))\n}",
"func (ga *GenesisAccount) ToAccount() auth.Account {\n\tbacc := &auth.BaseAccount{\n\t\tAddress: ga.Address,\n\t\tCoins: ga.Coins.Sort(),\n\t\tAccountNumber: ga.AccountNumber,\n\t\tSequence: ga.Sequence,\n\t}\n\n\tif !ga.OriginalVesting.IsZero() {\n\t\tbaseVestingAcc := &auth.BaseVestingAccount{\n\t\t\tBaseAccount: bacc,\n\t\t\tOriginalVesting: ga.OriginalVesting,\n\t\t\tDelegatedFree: ga.DelegatedFree,\n\t\t\tDelegatedVesting: ga.DelegatedVesting,\n\t\t\tEndTime: ga.EndTime,\n\t\t}\n\n\t\tif ga.StartTime != 0 && ga.EndTime != 0 {\n\t\t\treturn &auth.ContinuousVestingAccount{\n\t\t\t\tBaseVestingAccount: baseVestingAcc,\n\t\t\t\tStartTime: ga.StartTime,\n\t\t\t}\n\t\t} else if ga.EndTime != 0 {\n\t\t\treturn &auth.DelayedVestingAccount{\n\t\t\t\tBaseVestingAccount: baseVestingAcc,\n\t\t\t}\n\t\t} else {\n\t\t\tpanic(fmt.Sprintf(\"invalid genesis vesting account: %+v\", ga))\n\t\t}\n\t}\n\n\treturn bacc\n}",
"func populateInstance(instance *spotcluster.Instance,\n\tdroplet provider.InstanceConfig) {\n\tinstance.Spec.InstanceName = droplet.Name\n\tinstance.Spec.RemoteAddress = func() string {\n\t\tif droplet.ExteralIP != \"\" {\n\t\t\treturn droplet.ExteralIP + \":22\"\n\t\t}\n\t\treturn \"\"\n\t}()\n\tinstance.Spec.ExternalIP = droplet.ExteralIP\n\tinstance.Spec.InternalIP = droplet.InternalIP\n\tinstance.Spec.InstanceAvailable = true\n\tinstance.Spec.InstanceReady = droplet.IsRunning\n\tinstance.Spec.NodeAvailable = false\n\tinstance.Finalizers = func() []string {\n\t\treturn []string{controller.InstanceProtectionFinalizer}\n\t}()\n\tinstance.Labels[controller.LabelInstanceID] = droplet.ID\n}",
"func GetInstance() *CoinBase {\n\treturn ins\n}",
"func newInstance(x *runtime.Runtime, p *build.Instance, v *adt.Vertex) *Instance {\n\t// TODO: associate root source with structLit.\n\tinst := &Instance{\n\t\troot: v,\n\t\tinst: p,\n\t}\n\tif p != nil {\n\t\tinst.ImportPath = p.ImportPath\n\t\tinst.Dir = p.Dir\n\t\tinst.PkgName = p.PkgName\n\t\tinst.DisplayName = p.ImportPath\n\t\tif p.Err != nil {\n\t\t\tinst.setListOrError(p.Err)\n\t\t}\n\t}\n\n\tx.AddInst(p.ImportPath, v, p)\n\tx.SetBuildData(p, inst)\n\tinst.index = x\n\treturn inst\n}",
"func GenerateGBfromproto(record *bioproto.Genbank) string {\n\tvar stringbuffer bytes.Buffer\n\n\tstringbuffer.WriteString(generateHeaderString(record))\n\tstringbuffer.WriteString(\"FEATURES Location/Qualifiers\\n\")\n\tstringbuffer.WriteString(generateQualifierString(record))\n\tif record.FEATURES != nil {\n\n\t}\n\tif record.CONTIG != \"\" {\n\t\tstringbuffer.WriteString(\"CONTIG \" + record.CONTIG + \"\\n\")\n\t}\n\tstringbuffer.WriteString(\"//\\n\")\n\treturn stringbuffer.String()\n}",
"func Instance(name string) *Baa {\n\tif name == \"\" {\n\t\tname = defaultAppName\n\t}\n\tif appInstances[name] == nil {\n\t\tappInstances[name] = New()\n\t\tappInstances[name].name = name\n\t}\n\treturn appInstances[name]\n}",
"func (*Instance) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_instances_proto_rawDescGZIP(), []int{1}\n}",
"func dataToSg(name string, d *schema.ResourceData) go_thunder.ServiceGroup {\n\t//\tlogger := util.GetLoggerInstance()\n\tvar s go_thunder.ServiceGroup\n\n\tvar sInstance go_thunder.ServiceGroupInstance\n\n\tsInstance.ConnRate = d.Get(\"conn_rate\").(int)\n\tsInstance.ResetOnServerSelectionFail = d.Get(\"reset_on_server_selection_fail\").(int)\n\tsInstance.HealthCheckDisable = d.Get(\"health_check_disable\").(int)\n\tsInstance.Protocol = d.Get(\"protocol\").(string)\n\tsInstance.TrafficReplicationMirrorIPRepl = d.Get(\"traffic_replication_mirror_ip_repl\").(int)\n\tsInstance.ResetPriorityAffinity = d.Get(\"reset_priority_affinity\").(int)\n\tsInstance.MinActiveMember = d.Get(\"min_active_member\").(int)\n\tsInstance.StatsDataAction = d.Get(\"stats_data_action\").(string)\n\tsInstance.TrafficReplicationMirrorDaRepl = d.Get(\"traffic_replication_mirror_da_repl\").(int)\n\tsInstance.TemplatePolicyShared = d.Get(\"template_policy_shared\").(string)\n\tsInstance.RptExtServer = d.Get(\"rpt_ext_server\").(int)\n\tsInstance.TemplatePort = d.Get(\"template_port\").(string)\n\tsInstance.ConnRateGracePeriod = d.Get(\"conn_rate_grace_period\").(int)\n\tsInstance.L4SessionUsageDuration = d.Get(\"l4_session_usage\").(int)\n\tsInstance.UUID = d.Get(\"uuid\").(string)\n\tsInstance.BackupServerEventLog = d.Get(\"backup_server_event_log\").(int)\n\tsInstance.LcMethod = d.Get(\"lc_method\").(string)\n\tsInstance.PseudoRoundRobin = d.Get(\"pseudo_round_robin\").(int)\n\tsInstance.SharedPartitionPolicyTemplate = d.Get(\"shared_partition_policy_template\").(int)\n\tsInstance.L4SessionUsageRevertRate = d.Get(\"l4_session_usage_revert_rate\").(int)\n\tsInstance.SharedPartitionSvcgrpHealthCheck = d.Get(\"shared_partition_svcgrp_health_check\").(int)\n\tsInstance.TemplateServer = d.Get(\"template_server\").(string)\n\tsInstance.SvcgrpHealthCheckShared = d.Get(\"svcgrp_health_check_shared\").(string)\n\tsInstance.TrafficReplicationMirror = d.Get(\"traffic_replication_mirror\").(int)\n\tsInstance.L4SessionRevertDuration = d.Get(\"l4_session_revert_duration\").(int)\n\tsInstance.TrafficReplicationMirrorSaDaRepl = d.Get(\"traffic_replication_mirror_sa_da_repl\").(int)\n\tsInstance.LbMethod = d.Get(\"lb_method\").(string)\n\tsInstance.StatelessAutoSwitch = d.Get(\"stateless_auto_switch\").(int)\n\tsInstance.MinActiveMemberAction = d.Get(\"min_active_member_action\").(string)\n\tsInstance.L4SessionUsage = d.Get(\"l4_session_usage\").(int)\n\tsInstance.ExtendedStats = d.Get(\"extended_stats\").(int)\n\tsInstance.ConnRateRevertDuration = d.Get(\"conn_rate_revert_duration\").(int)\n\tsInstance.StrictSelect = d.Get(\"strict_select\").(int)\n\tsInstance.Name = d.Get(\"name\").(string)\n\tsInstance.TrafficReplicationMirrorSaRepl = d.Get(\"traffic_replication_mirror_sa_repl\").(int)\n\tsInstance.ReportDelay = d.Get(\"report_delay\").(int)\n\tsInstance.ConnRateLog = d.Get(\"conn_rate_log\").(int)\n\tsInstance.L4SessionUsageLog = d.Get(\"l4_session_usage_log\").(int)\n\tsInstance.ConnRateDuration = d.Get(\"conn_rate_duration\").(int)\n\tsInstance.StatelessLbMethod = d.Get(\"stateless_lb_method\").(string)\n\tsInstance.TemplatePolicy = d.Get(\"template_policy\").(string)\n\tsInstance.StatelessLbMethod2 = d.Get(\"stateless_lb_method2\").(string)\n\tsInstance.UserTag = d.Get(\"user_tag\").(string)\n\tsInstance.SampleRspTime = d.Get(\"sample_rsp_time\").(int)\n\tsInstance.TopFastest = d.Get(\"top_fastest\").(int)\n\tsInstance.ConnRevertRate = d.Get(\"conn_revert_rate\").(int)\n\tsInstance.L4SessionUsageGracePeriod = d.Get(\"l4_session_usage_grace_period\").(int)\n\tsInstance.PriorityAffinity = d.Get(\"priority_affinity\").(int)\n\tsInstance.TopSlowest = d.Get(\"top_slowest\").(int)\n\tsInstance.HealthCheck = d.Get(\"health_check\").(string)\n\n\tpriorityCount := d.Get(\"priorities.#\").(int)\n\tsInstance.Priority = make([]go_thunder.Priorities, 0, priorityCount)\n\tfor i := 0; i < priorityCount; i++ {\n\t\tvar pr go_thunder.Priorities\n\t\tprefix := fmt.Sprintf(\"priorities.%d\", i)\n\t\tpr.Priority = d.Get(prefix + \".priority\").(int)\n\t\tpr.PriorityAction = d.Get(prefix + \".priority_action\").(string)\n\n\t\tsInstance.Priority = append(sInstance.Priority, pr)\n\t}\n\n\tsamplingCount := d.Get(\"sampling_enable.#\").(int)\n\tsInstance.Counters1 = make([]go_thunder.SamplingEnable, 0, samplingCount)\n\tfor i := 0; i < samplingCount; i++ {\n\t\tvar sm go_thunder.SamplingEnable\n\t\tprefix := fmt.Sprintf(\"sampling_enable.%d\", i)\n\t\tsm.Counters1 = d.Get(prefix + \".counters1\").(string)\n\n\t\tsInstance.Counters1 = append(sInstance.Counters1, sm)\n\t}\n\n\t//NEED TO FIGURE OUT IF VALUE IS PROVIDED IN TF FILE OR DEFAULT IS BEING USED\n\t//\tvar as Reset\n\t//\tas.AutoSwitch = d.Get(\"reset.0.auto_switch\").(int)\n\t//\tlogger.Println(\"[INFO] Auto switch is- \", d.Get(\"reset.0.auto_switch\").(int))\n\t//\tsInstance.AutoSwitch = as\n\n\tmemberCount := d.Get(\"member_list.#\").(int)\n\tsInstance.Host = make([]go_thunder.MemberList, 0, memberCount)\n\tfor i := 0; i < memberCount; i++ {\n\t\tvar ml go_thunder.MemberList\n\t\tprefix := fmt.Sprintf(\"member_list.%d\", i)\n\t\tml.FqdnName = d.Get(prefix + \".fqdn_name\").(string)\n\t\tml.Host = d.Get(prefix + \".host\").(string)\n\t\tml.MemberPriority = d.Get(prefix + \".member_priority\").(int)\n\t\tml.MemberState = d.Get(prefix + \".member_state\").(string)\n\t\tml.MemberStatsDataDisable = d.Get(prefix + \".member_stats_data_disable\").(int)\n\t\tml.MemberTemplate = d.Get(prefix + \".member_template\").(string)\n\t\tml.Name = d.Get(prefix + \".name\").(string)\n\t\tml.Port = d.Get(prefix + \".port\").(int)\n\t\tml.ResolveAs = d.Get(prefix + \".resolve_as\").(string)\n\t\tml.ServerIpv6Addr = d.Get(prefix + \".server_ipv6_addr\").(string)\n\t\tml.UUID = d.Get(prefix + \".uuid\").(string)\n\t\tml.UserTag = d.Get(prefix + \".user_tag\").(string)\n\n\t\tsampleCount := d.Get(prefix + \".sampling_enable.#\").(int)\n\t\tml.Counters1 = make([]go_thunder.SamplingEnable, sampleCount, sampleCount)\n\n\t\tfor x := 0; x < sampleCount; x++ {\n\t\t\tvar s go_thunder.SamplingEnable\n\t\t\tmapEntity(d.Get(fmt.Sprintf(\"%s.sampling_enable.%d\", prefix, x)).(map[string]interface{}), &s)\n\t\t\tml.Counters1[x] = s\n\t\t}\n\n\t\tsInstance.Host = append(sInstance.Host, ml)\n\t}\n\n\ts.Name = sInstance\n\n\treturn s\n}",
"func toGoBGPParameters(obj Protection, protectionID int64) []db_models.GoBgpParameter {\n\tresult := make([]db_models.GoBgpParameter, 0)\n t, _ := obj.(*RTBH)\n\tfor _, target := range t.RtbhTargets() {\n\t\tresult = append(result, db_models.GoBgpParameter{\n\t\t\tProtectionId: protectionID,\n\t\t\tTargetAddress: target})\n\t}\n\n\treturn result\n}",
"func newdbBasePostgres() dbBaser {\n\tb := new(dbBasePostgres)\n\tb.ins = b\n\treturn b\n}",
"func DrawElementsInstancedBaseInstance(mode uint32, count int32, xtype uint32, indices unsafe.Pointer, instancecount int32, baseinstance uint32) {\n C.glowDrawElementsInstancedBaseInstance(gpDrawElementsInstancedBaseInstance, (C.GLenum)(mode), (C.GLsizei)(count), (C.GLenum)(xtype), indices, (C.GLsizei)(instancecount), (C.GLuint)(baseinstance))\n}",
"func (bi *BridgerInfo) pushInstance(ins *zstypes.InsCacheInfo) (uint64, uint64, error) {\n\n\tmeta := dtypes.StFileMeta{\n\t\tAccount: ins.AName,\n\t\tFiletype: \"prometheus\",\n\t\tIid: ins.IName,\n\t\tIsCompressed: true,\n\t\tDTs: ins.DTs,\n\t\tOLabels: ins.OLabels,\n\t}\n\n\tif config.GlCfg.BrdigerNotifyStIngest {\n\t\tfor atomic.LoadInt32(&bi.stingestWorkerReady) == 0 {\n\t\t\tzlog.Info(\"Waiting for stingest req_q worker to be ready\")\n\t\t\ttime.Sleep(5 * time.Second)\n\t\t}\n\t}\n\n\tdmepoch := time.Now().UnixNano() / 1000\n\tdts := strconv.FormatInt(dmepoch, 10)\n\ttmpDir := path.Join(config.GlCfg.StatsReqsDir, ins.AName, strings.Replace(ins.IName, \":\", \"_\", 1), dts)\n\terr := os.MkdirAll(tmpDir, 0755)\n\tif err != nil {\n\t\tzlog.Error(\"Failed to create directory %s: %s\", tmpDir, err)\n\t\treturn uint64(0), uint64(0), err\n\t}\n\ttmpMFpath := filepath.Join(tmpDir, dts+\".json\")\n\ttmpDFpath := filepath.Join(tmpDir, dts+\".data.gz\")\n\n\tclblsMap, ntstamps, nbytes, err := bi.writeStatsFile(tmpDFpath, ins)\n\tif err != nil {\n\t\tzlog.Error(\"Failed to write to file %s, err %s\\n\", tmpDFpath, err)\n\t\treturn ntstamps, nbytes, err\n\t}\n\n\tif len(clblsMap) > 0 {\n\t\tmeta.Kvs = make([]dtypes.KvPair, 0, len(clblsMap))\n\t\tfor k, v := range clblsMap {\n\t\t\tmeta.Kvs = append(meta.Kvs, dtypes.KvPair{N: k, V: v})\n\t\t}\n\t}\n\tmjson, err := json.Marshal(meta)\n\tif err != nil {\n\t\tzlog.Error(\"Failed to marshal file metadata %v: %s\", meta, err)\n\t\treturn ntstamps, nbytes, err\n\t}\n\terr = ioutil.WriteFile(tmpMFpath, mjson, 0644)\n\tif err != nil {\n\t\tzlog.Error(\"Failed to write to file %s, err %s\\n\", tmpMFpath, err)\n\t\treturn ntstamps, nbytes, err\n\t}\n\n\t// If configured, send to the next stage for further processing.\n\tif config.GlCfg.BrdigerNotifyStIngest {\n\t\treq := stingestpb.StatsIngestRequest{\n\t\t\tAccount: ins.AName,\n\t\t\tDFpath: tmpDFpath,\n\t\t\tMFpath: tmpMFpath,\n\t\t\tInstanceId: ins.IName,\n\t\t\tType: meta.Filetype,\n\t\t\tDTs: ins.DTs,\n\t\t}\n\n\t\tsReq := &stiReq{tmpDir: tmpDir, req: &req}\n\t\tbi.stiReqCh <- sReq\n\t}\n\n\treturn ntstamps, nbytes, nil\n}",
"func toInstance(syncInstance *pb.SyncInstance) (instance *scpb.MicroServiceInstance) {\n\tinstance = &scpb.MicroServiceInstance{}\n\tif syncInstance.PluginName == PluginName && len(syncInstance.Expansions) > 0 {\n\t\tmatches := pb.Expansions(syncInstance.Expansions).Find(expansionDatasource, map[string]string{})\n\t\tif len(matches) > 0 {\n\t\t\terr := proto.Unmarshal(matches[0].Bytes, instance)\n\t\t\tif err == nil {\n\t\t\t\tinstance.InstanceId = syncInstance.InstanceId\n\t\t\t\tinstance.ServiceId = syncInstance.ServiceId\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Errorf(err, \"proto unmarshal %s instance, instanceID = %s, kind = %v, content = %v failed\",\n\t\t\t\tPluginName, instance.InstanceId, matches[0].Kind, matches[0].Bytes)\n\n\t\t}\n\t}\n\tinstance.InstanceId = syncInstance.InstanceId\n\tinstance.ServiceId = syncInstance.ServiceId\n\tinstance.Endpoints = make([]string, 0, len(syncInstance.Endpoints))\n\tinstance.HostName = syncInstance.HostName\n\tinstance.Version = syncInstance.Version\n\tinstance.Status = pb.SyncInstance_Status_name[int32(syncInstance.Status)]\n\n\tfor _, ep := range syncInstance.Endpoints {\n\t\taddr, err := url.Parse(ep)\n\t\tif err != nil {\n\t\t\tlog.Errorf(err, \"parse sc instance endpoint failed: %s\", err)\n\t\t\tcontinue\n\t\t}\n\t\tendpoint := \"\"\n\t\tswitch addr.Scheme {\n\t\tcase \"http\":\n\t\t\tendpoint = strings.Replace(ep, \"http://\", \"rest://\", 1)\n\t\tcase \"https\":\n\t\t\tendpoint = strings.Replace(ep, \"https://\", \"rest://\", 1) + \"?sslEnabled=true\"\n\t\tcase \"rest\", \"highway\":\n\t\t\tendpoint = ep\n\t\t}\n\t\tinstance.Endpoints = append(instance.Endpoints, endpoint)\n\t}\n\n\tif syncInstance.HealthCheck != nil && syncInstance.HealthCheck.Mode != pb.HealthCheck_UNKNOWN {\n\t\tinstance.HealthCheck = &scpb.HealthCheck{\n\t\t\tMode: pb.HealthCheck_Modes_name[int32(syncInstance.HealthCheck.Mode)],\n\t\t\tPort: syncInstance.HealthCheck.Port,\n\t\t\tInterval: syncInstance.HealthCheck.Interval,\n\t\t\tTimes: syncInstance.HealthCheck.Times,\n\t\t\tUrl: syncInstance.HealthCheck.Url,\n\t\t}\n\t}\n\treturn\n}",
"func (self *Tween) Game() *Game{\n return &Game{self.Object.Get(\"game\")}\n}",
"func testInstance() Instance {\n\treturn Instance{\n\t\tCreated: true,\n\t\tImageID: \"ami-7172b611\",\n\t\tInstanceID: \"i-1234567890abcdef0\",\n\t\tInstanceType: \"t2.nano\",\n\t\tSubnetID: \"subnet-1234567890abcdef0\",\n\t\tKeyPairName: \"bastion-test\",\n\t\tSecurityGroupID: \"sg-1234567890abcdef0\",\n\t\tPublicIPAddress: \"8.8.8.8\",\n\t\tPrivateIPAddress: \"10.0.0.1\",\n\t\tSSHUser: \"ec2-user\",\n\t}\n}",
"func (v *Vehicle) AsProto() *gtfsrt.FeedEntity {\n\tlat32 := float32(v.Lat)\n\tlon32 := float32(v.Lon)\n\tbearing32 := float32(v.Bearing)\n\ttstamp := uint64(v.TimeObj.Unix())\n\n\treturn >fsrt.FeedEntity{\n\t\tId: &v.ID,\n\t\tVehicle: >fsrt.VehiclePosition{\n\t\t\tTrip: >fsrt.TripDescriptor{TripId: &v.Trip},\n\t\t\tVehicle: >fsrt.VehicleDescriptor{Id: &v.ID, Label: &v.SideNumber},\n\t\t\tPosition: >fsrt.Position{\n\t\t\t\tLatitude: &lat32,\n\t\t\t\tLongitude: &lon32,\n\t\t\t\tBearing: &bearing32,\n\t\t\t},\n\t\t\tTimestamp: &tstamp,\n\t\t},\n\t}\n}",
"func (self *Graphics) Game() *Game{\n return &Game{self.Object.Get(\"game\")}\n}",
"func (bil *baseInstanceList) newBetaGetHook() func(ctx context.Context, key *meta.Key, m *cloud.MockBetaInstances) (bool, *beta.Instance, error) {\n\treturn func(ctx context.Context, key *meta.Key, m *cloud.MockBetaInstances) (bool, *beta.Instance, error) {\n\t\tm.Lock.Lock()\n\t\tdefer m.Lock.Unlock()\n\n\t\tif _, found := m.Objects[*key]; !found {\n\t\t\tm.Objects[*key] = &cloud.MockInstancesObj{Obj: bil.getOrCreateBaseInstance(key).toBeta()}\n\t\t}\n\t\treturn false, nil, nil\n\t}\n}",
"func Instance() *gorm.DB {\n return database\n}",
"func NewBgpConfiguration()(*BgpConfiguration) {\n m := &BgpConfiguration{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}",
"func ClouddeployAlphaTargetGkeToProto(o *alpha.TargetGke) *alphapb.ClouddeployAlphaTargetGke {\n\tif o == nil {\n\t\treturn nil\n\t}\n\tp := &alphapb.ClouddeployAlphaTargetGke{}\n\tp.SetCluster(dcl.ValueOrEmptyString(o.Cluster))\n\tp.SetInternalIp(dcl.ValueOrEmptyBool(o.InternalIP))\n\treturn p\n}",
"func DrawElementsInstancedBaseVertexBaseInstance(mode uint32, count int32, xtype uint32, indices unsafe.Pointer, instancecount int32, basevertex int32, baseinstance uint32) {\n\tsyscall.Syscall9(gpDrawElementsInstancedBaseVertexBaseInstance, 7, uintptr(mode), uintptr(count), uintptr(xtype), uintptr(indices), uintptr(instancecount), uintptr(basevertex), uintptr(baseinstance), 0, 0)\n}",
"func (l *LogEntry) addInstance(i *guardduty.InstanceDetails) {\n\tl.InstanceAz = aws.StringValue(i.AvailabilityZone)\n\tl.InstanceDesc = aws.StringValue(i.ImageDescription)\n\tl.InstanceImageId = aws.StringValue(i.ImageId)\n\tl.InstanceId = aws.StringValue(i.InstanceId)\n\tl.InstanceState = aws.StringValue(i.InstanceState)\n\tl.InstanceType = aws.StringValue(i.InstanceType)\n\tl.InstanceLaunchTime = aws.StringValue(i.LaunchTime)\n\tl.InstanceTags = make(map[string]string)\n\tfor _, t := range i.Tags {\n\t\tl.InstanceTags[aws.StringValue(t.Key)] = aws.StringValue(t.Value)\n\t}\n\t// build list of public, private IP's and subnets:\n\tl.InstanceSg = make(map[string]string)\n\tprv := make(map[string]bool)\n\tpub := make(map[string]bool)\n\tsub := make(map[string]bool)\n\tfor _, in := range i.NetworkInterfaces {\n\t\tprv[aws.StringValue(in.PrivateIpAddress)] = true\n\t\tfor _, ip := range in.PrivateIpAddresses {\n\t\t\tprv[aws.StringValue(ip.PrivateIpAddress)] = true\n\t\t}\n\t\tpub[aws.StringValue(in.PublicIp)] = true\n\t\tsub[aws.StringValue(in.SubnetId)] = true\n\t\tl.InstanceVpc = aws.StringValue(in.VpcId)\n\t\tfor _, sg := range in.SecurityGroups {\n\t\t\tl.InstanceSg[aws.StringValue(sg.GroupId)] = aws.StringValue(sg.GroupName)\n\t\t}\n\t}\n\tfor k := range prv {\n\t\tl.InstancePrivateIp = append(l.InstancePrivateIp, k)\n\t}\n\tfor k := range pub {\n\t\tl.InstancePublicIp = append(l.InstancePublicIp, k)\n\t}\n\tfor k := range sub {\n\t\tl.InstanceSubnet = append(l.InstanceSubnet, k)\n\t}\n}",
"func GAEResource(ctx context.Context) (*MonitoredResource, error) {\n\t// appengine.IsAppEngine is confusingly false as we're using a custom\n\t// container and building without the appenginevm build constraint.\n\t// Check metadata.OnGCE instead.\n\tif !metadata.OnGCE() {\n\t\treturn nil, fmt.Errorf(\"not running on appengine\")\n\t}\n\tprojID, err := metadata.ProjectID()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn (*MonitoredResource)(&mrpb.MonitoredResource{\n\t\tType: \"gae_instance\",\n\t\tLabels: map[string]string{\n\t\t\t\"project_id\": projID,\n\t\t\t\"module_id\": appengine.ModuleName(ctx),\n\t\t\t\"version_id\": appengine.VersionID(ctx),\n\t\t\t\"instance_id\": appengine.InstanceID(),\n\t\t\t\"location\": appengine.Datacenter(ctx),\n\t\t},\n\t}), nil\n}",
"func NewGraph(base Base) {\n\n}",
"func (c *TestClient) CreateInstanceBeta(project, zone string, i *computeBeta.Instance) error {\n\tif c.CreateInstanceBetaFn != nil {\n\t\treturn c.CreateInstanceBetaFn(project, zone, i)\n\t}\n\treturn c.client.CreateInstanceBeta(project, zone, i)\n}",
"func toRecord(cache airtabledb.DB, src Feature, dst interface{}) {\n\tdV := reflect.ValueOf(dst).Elem().FieldByName(\"Fields\")\n\tsV := reflect.ValueOf(src)\n\tcopyFields(cache, sV, dV)\n}",
"func (c *InstanceManagerClient) InstanceGet(backendStoreDriver longhorn.BackendStoreDriverType, name, kind string) (*longhorn.InstanceProcess, error) {\n\tif err := CheckInstanceManagerCompatibility(c.apiMinVersion, c.apiVersion); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif c.GetAPIVersion() < 4 {\n\t\t/* Fall back to the old way of getting process */\n\t\tprocess, err := c.processManagerGrpcClient.ProcessGet(name)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn parseProcess(imapi.RPCToProcess(process)), nil\n\t}\n\n\tinstance, err := c.instanceServiceGrpcClient.InstanceGet(string(backendStoreDriver), name, kind)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn parseInstance(instance), nil\n}",
"func (bil *baseInstanceList) getOrCreateBaseInstance(key *meta.Key) *baseInstance {\n\tbil.lock.Lock()\n\tdefer bil.lock.Unlock()\n\n\tinst, found := bil.instances[*key]\n\tif !found {\n\t\tinst = &baseInstance{name: key.Name, zone: key.Zone}\n\t\tif bil.allocateCIDR {\n\t\t\tnextRange, _ := bil.cidrSet.AllocateNext()\n\t\t\tinst.aliasRange = nextRange.String()\n\t\t}\n\t\tbil.instances[*key] = inst\n\t}\n\treturn inst\n}",
"func (pn *paxosNode) getInstance(key string) *paxosKeyData {\n\tpxi, ok := pn.instances[key]\n\tif !ok {\n\t\tpxi = &paxosKeyData{\n\t\t\tMyn: 0,\n\t\t\tNa: -1,\n\t\t\tNh: 0,\n\t\t\tVa: nil,\n\t\t\tmu: &sync.RWMutex{},\n\t\t\tCommittedVal: nil,\n\t\t\tstoreLock: &sync.RWMutex{},\n\t\t\tproposeLock: &sync.RWMutex{},\n\t\t}\n\t\tpn.instances[key] = pxi\n\t}\n\treturn pxi\n}",
"func VFromDB(gid GoogleID) (*VAgent, time.Time, error) {\n\ta := VAgent{\n\t\tGid: gid,\n\t}\n\tvar fetched string\n\tvar t time.Time\n\tvar vlevel, vpoints, distance sql.NullInt64\n\tvar telegram, cellid sql.NullString\n\tvar startlat, startlon sql.NullFloat64\n\n\terr := db.QueryRow(\"SELECT enlid, vlevel, vpoints, agent, level, quarantine, active, blacklisted, verified, flagged, banned, cellid, telegram, startlat, startlon, distance, fetched FROM v WHERE gid = ?\", gid).Scan(&a.EnlID, &vlevel, &vpoints, &a.Agent, &a.Level, &a.Quarantine, &a.Active, &a.Blacklisted, &a.Verified, &a.Flagged, &a.Banned, &cellid, &telegram, &startlat, &startlon, &distance, &fetched)\n\tif err != nil && err != sql.ErrNoRows {\n\t\tlog.Error(err)\n\t\treturn &a, t, err\n\t}\n\n\tif err == sql.ErrNoRows {\n\t\treturn &a, t, nil\n\t}\n\n\tif fetched == \"\" {\n\t\treturn &a, t, nil\n\t}\n\n\tif vlevel.Valid {\n\t\ta.Vlevel = vlevel.Int64\n\t}\n\tif vpoints.Valid {\n\t\ta.Vpoints = vpoints.Int64\n\t}\n\tif telegram.Valid {\n\t\ta.Telegram = telegram.String\n\t}\n\tif cellid.Valid {\n\t\ta.CellID = cellid.String\n\t}\n\tif startlat.Valid {\n\t\ta.StartLat = startlat.Float64\n\t}\n\tif startlon.Valid {\n\t\ta.StartLon = startlon.Float64\n\t}\n\tif distance.Valid {\n\t\ta.Distance = distance.Int64\n\t}\n\n\tt, err = time.ParseInLocation(\"2006-01-02 15:04:05\", fetched, time.UTC)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\t// return &a, t, err\n\t}\n\t// log.Debugw(\"VFromDB\", \"gid\", gid, \"fetched\", fetched, \"data\", a)\n\treturn &a, t, nil\n}",
"func ToGObject(p unsafe.Pointer) *C.GObject {\n\treturn (*C.GObject)(p)\n}",
"func ComputeBetaInstanceTemplatePropertiesToProto(o *beta.InstanceTemplateProperties) *betapb.ComputeBetaInstanceTemplateProperties {\n\tif o == nil {\n\t\treturn nil\n\t}\n\tp := &betapb.ComputeBetaInstanceTemplateProperties{\n\t\tCanIpForward: dcl.ValueOrEmptyBool(o.CanIPForward),\n\t\tDescription: dcl.ValueOrEmptyString(o.Description),\n\t\tMachineType: dcl.ValueOrEmptyString(o.MachineType),\n\t\tMinCpuPlatform: dcl.ValueOrEmptyString(o.MinCpuPlatform),\n\t\tReservationAffinity: ComputeBetaInstanceTemplatePropertiesReservationAffinityToProto(o.ReservationAffinity),\n\t\tShieldedInstanceConfig: ComputeBetaInstanceTemplatePropertiesShieldedInstanceConfigToProto(o.ShieldedInstanceConfig),\n\t\tScheduling: ComputeBetaInstanceTemplatePropertiesSchedulingToProto(o.Scheduling),\n\t}\n\tfor _, r := range o.Disks {\n\t\tp.Disks = append(p.Disks, ComputeBetaInstanceTemplatePropertiesDisksToProto(&r))\n\t}\n\tp.Labels = make(map[string]string)\n\tfor k, r := range o.Labels {\n\t\tp.Labels[k] = r\n\t}\n\tp.Metadata = make(map[string]string)\n\tfor k, r := range o.Metadata {\n\t\tp.Metadata[k] = r\n\t}\n\tfor _, r := range o.GuestAccelerators {\n\t\tp.GuestAccelerators = append(p.GuestAccelerators, ComputeBetaInstanceTemplatePropertiesGuestAcceleratorsToProto(&r))\n\t}\n\tfor _, r := range o.NetworkInterfaces {\n\t\tp.NetworkInterfaces = append(p.NetworkInterfaces, ComputeBetaInstanceTemplatePropertiesNetworkInterfacesToProto(&r))\n\t}\n\tfor _, r := range o.ServiceAccounts {\n\t\tp.ServiceAccounts = append(p.ServiceAccounts, ComputeBetaInstanceTemplatePropertiesServiceAccountsToProto(&r))\n\t}\n\tfor _, r := range o.Tags {\n\t\tp.Tags = append(p.Tags, r)\n\t}\n\treturn p\n}",
"func newSubInstance(parent *BaseInstance, name string) *BaseInstance {\n\tsi := parent.module.subinstance\n\tif si == nil {\n\t\tsi = make(map[string][]*BaseInstance)\n\t\tparent.module.subinstance = si\n\t}\n\n\tbi := &BaseInstance{\n\t\tname: name,\n\t\tinput: parent.input,\n\t\tinput2: parent.input2,\n\t\trules: newRules(),\n\t\tenabled: false,\n\t\tsubinstance: true,\n\t\tinstance: parent.instance,\n\t\tmodule: parent.module,\n\t}\n\n\tsi[parent.name] = append(si[parent.name], bi)\n\treturn bi\n}",
"func (self *SinglePad) Game() *Game{\n return &Game{self.Object.Get(\"game\")}\n}",
"func newSubInstance(parent *BaseInstance, name string) *BaseInstance {\n\tbi := &BaseInstance{\n\t\tname: name,\n\t\tinput: parent.input,\n\t\tinput2: parent.input2,\n\t\trules: newRules(),\n\t\tenabled: false,\n\t\tinstance: parent.instance,\n\t}\n\treturn bi\n}",
"func (client *Client) DescribeGtmInstance(request *DescribeGtmInstanceRequest) (response *DescribeGtmInstanceResponse, err error) {\n\tresponse = CreateDescribeGtmInstanceResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}",
"func (self *PhysicsP2) Game() *Game{\n return &Game{self.Object.Get(\"game\")}\n}",
"func (self *GameObjectCreator) BitmapData() *BitmapData{\n return &BitmapData{self.Object.Call(\"bitmapData\")}\n}",
"func (m *AzureManager) GetAsgForInstance(instance *azureRef) (cloudprovider.NodeGroup, error) {\n\treturn m.asgCache.FindForInstance(instance, m.config.VMType)\n}",
"func Merge(inst ...*Instance) *Instance {\n\tv := &adt.Vertex{}\n\n\ti := inst[0]\n\tctx := newContext(i.index)\n\n\t// TODO: interesting test: use actual unification and then on K8s corpus.\n\n\tfor _, i := range inst {\n\t\tw := i.Value()\n\t\tv.AddConjunct(adt.MakeRootConjunct(nil, w.v.ToDataAll(ctx)))\n\t}\n\tv.Finalize(ctx)\n\n\tp := addInst(i.index, &Instance{\n\t\troot: v,\n\t})\n\treturn p\n}",
"func (self *GameObjectCreator) Game() *Game{\n return &Game{self.Object.Get(\"game\")}\n}",
"func DataprocAlphaClusterConfigWorkerConfigInstanceReferencesToProto(o *alpha.ClusterConfigWorkerConfigInstanceReferences) *alphapb.DataprocAlphaClusterConfigWorkerConfigInstanceReferences {\n\tif o == nil {\n\t\treturn nil\n\t}\n\tp := &alphapb.DataprocAlphaClusterConfigWorkerConfigInstanceReferences{}\n\tp.SetInstanceName(dcl.ValueOrEmptyString(o.InstanceName))\n\tp.SetInstanceId(dcl.ValueOrEmptyString(o.InstanceId))\n\tp.SetPublicKey(dcl.ValueOrEmptyString(o.PublicKey))\n\tp.SetPublicEciesKey(dcl.ValueOrEmptyString(o.PublicEciesKey))\n\treturn p\n}",
"func ProtoToComputeBetaInstanceTemplateProperties(p *betapb.ComputeBetaInstanceTemplateProperties) *beta.InstanceTemplateProperties {\n\tif p == nil {\n\t\treturn nil\n\t}\n\tobj := &beta.InstanceTemplateProperties{\n\t\tCanIPForward: dcl.Bool(p.CanIpForward),\n\t\tDescription: dcl.StringOrNil(p.Description),\n\t\tMachineType: dcl.StringOrNil(p.MachineType),\n\t\tMinCpuPlatform: dcl.StringOrNil(p.MinCpuPlatform),\n\t\tReservationAffinity: ProtoToComputeBetaInstanceTemplatePropertiesReservationAffinity(p.GetReservationAffinity()),\n\t\tShieldedInstanceConfig: ProtoToComputeBetaInstanceTemplatePropertiesShieldedInstanceConfig(p.GetShieldedInstanceConfig()),\n\t\tScheduling: ProtoToComputeBetaInstanceTemplatePropertiesScheduling(p.GetScheduling()),\n\t}\n\tfor _, r := range p.GetDisks() {\n\t\tobj.Disks = append(obj.Disks, *ProtoToComputeBetaInstanceTemplatePropertiesDisks(r))\n\t}\n\tfor _, r := range p.GetGuestAccelerators() {\n\t\tobj.GuestAccelerators = append(obj.GuestAccelerators, *ProtoToComputeBetaInstanceTemplatePropertiesGuestAccelerators(r))\n\t}\n\tfor _, r := range p.GetNetworkInterfaces() {\n\t\tobj.NetworkInterfaces = append(obj.NetworkInterfaces, *ProtoToComputeBetaInstanceTemplatePropertiesNetworkInterfaces(r))\n\t}\n\tfor _, r := range p.GetServiceAccounts() {\n\t\tobj.ServiceAccounts = append(obj.ServiceAccounts, *ProtoToComputeBetaInstanceTemplatePropertiesServiceAccounts(r))\n\t}\n\tfor _, r := range p.GetTags() {\n\t\tobj.Tags = append(obj.Tags, r)\n\t}\n\treturn obj\n}",
"func (s StatsGraph) construct() StatsGraphClass { return &s }",
"func DrawArraysInstancedBaseInstance(mode uint32, first int32, count int32, instancecount int32, baseinstance uint32) {\n\tC.glowDrawArraysInstancedBaseInstance(gpDrawArraysInstancedBaseInstance, (C.GLenum)(mode), (C.GLint)(first), (C.GLsizei)(count), (C.GLsizei)(instancecount), (C.GLuint)(baseinstance))\n}",
"func DrawArraysInstancedBaseInstance(mode uint32, first int32, count int32, instancecount int32, baseinstance uint32) {\n\tC.glowDrawArraysInstancedBaseInstance(gpDrawArraysInstancedBaseInstance, (C.GLenum)(mode), (C.GLint)(first), (C.GLsizei)(count), (C.GLsizei)(instancecount), (C.GLuint)(baseinstance))\n}",
"func ComputeBetaInstanceTemplatePropertiesShieldedInstanceConfigToProto(o *beta.InstanceTemplatePropertiesShieldedInstanceConfig) *betapb.ComputeBetaInstanceTemplatePropertiesShieldedInstanceConfig {\n\tif o == nil {\n\t\treturn nil\n\t}\n\tp := &betapb.ComputeBetaInstanceTemplatePropertiesShieldedInstanceConfig{\n\t\tEnableSecureBoot: dcl.ValueOrEmptyBool(o.EnableSecureBoot),\n\t\tEnableVtpm: dcl.ValueOrEmptyBool(o.EnableVtpm),\n\t\tEnableIntegrityMonitoring: dcl.ValueOrEmptyBool(o.EnableIntegrityMonitoring),\n\t}\n\treturn p\n}",
"func newBase() *base {\n\treturn &base{shared.NewUUID(), time.Now().UTC(), time.Now().UTC(), false/*, shared.NewUUID()*/}\n}",
"func (client AccessGovernanceCPClient) getGovernanceInstance(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/governanceInstances/{governanceInstanceId}\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response GetGovernanceInstanceResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\tapiReferenceLink := \"https://docs.oracle.com/iaas/api/#/en/access-governance-cp/20220518/GovernanceInstance/GetGovernanceInstance\"\n\t\terr = common.PostProcessServiceError(err, \"AccessGovernanceCP\", \"GetGovernanceInstance\", apiReferenceLink)\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}",
"func GetInstance() *gorm.DB {\n\treturn DB\n}",
"func (i *Instance) Clone() *Instance {\n\tclone := &Instance{}\n\tclone.TargetValue, clone.FeatureValues = i.TargetValue, make(map[string]Feature, len(i.FeatureValues))\n\tfor k, v := range i.FeatureValues {\n\t\tclone.FeatureValues[k] = v\n\t}\n\treturn clone\n}",
"func newdbBaseClickHouse() dbBaser {\n\tb := new(dbBaseClickHouse)\n\tb.ins = b\n\treturn b\n}",
"func (g UGaugeSnapshot) Snapshot() UGauge { return g }",
"func GetInstance(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *InstanceState, opts ...pulumi.ResourceOption) (*Instance, error) {\n\tvar resource Instance\n\terr := ctx.ReadResource(\"gcp:alloydb/instance:Instance\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func toHackInstance(hw tinkv1.Hardware) (hack.Instance, error) {\n\tmarshalled, err := json.Marshal(hw.Spec)\n\tif err != nil {\n\t\treturn hack.Instance{}, err\n\t}\n\n\tvar i hack.Instance\n\tif err := json.Unmarshal(marshalled, &i); err != nil {\n\t\treturn hack.Instance{}, err\n\t}\n\n\treturn i, nil\n}",
"func DataprocAlphaClusterConfigMasterConfigInstanceReferencesToProto(o *alpha.ClusterConfigMasterConfigInstanceReferences) *alphapb.DataprocAlphaClusterConfigMasterConfigInstanceReferences {\n\tif o == nil {\n\t\treturn nil\n\t}\n\tp := &alphapb.DataprocAlphaClusterConfigMasterConfigInstanceReferences{}\n\tp.SetInstanceName(dcl.ValueOrEmptyString(o.InstanceName))\n\tp.SetInstanceId(dcl.ValueOrEmptyString(o.InstanceId))\n\tp.SetPublicKey(dcl.ValueOrEmptyString(o.PublicKey))\n\tp.SetPublicEciesKey(dcl.ValueOrEmptyString(o.PublicEciesKey))\n\treturn p\n}",
"func (b Factory) Bg() Logger {\n\treturn logger(b)\n}",
"func ProtoToClouddeployAlphaTargetGke(p *alphapb.ClouddeployAlphaTargetGke) *alpha.TargetGke {\n\tif p == nil {\n\t\treturn nil\n\t}\n\tobj := &alpha.TargetGke{\n\t\tCluster: dcl.StringOrNil(p.GetCluster()),\n\t\tInternalIP: dcl.Bool(p.GetInternalIp()),\n\t}\n\treturn obj\n}",
"func Instance() *unityBridge {\n\treturn instance\n}",
"func (*Instances) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_instances_proto_rawDescGZIP(), []int{3}\n}",
"func generateTokenFromInstance(inst instance.Instance, options auth.Options) (string, error) {\n\ttokenWithExpiry, err := inst.GenerateAccessToken(options)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to generate token: %s\", err.Error())\n\t}\n\n\treturn tokenWithExpiry.Token, nil\n}",
"func GetInstance() *gorm.DB {\n\tonce.Do(func() {\n\t\t// refer https://github.com/go-sql-driver/mysql#dsn-data-source-name for details\n\t\tuser := viper.GetString(\"database.user\")\n\t\tpassword := viper.GetString(\"database.password\")\n\t\thost := viper.GetString(\"database.host\")\n\t\tport := viper.GetString(\"database.port\")\n\t\tdbname := viper.GetString(\"database.dbname\")\n\n\t\tdsn := fmt.Sprintf(\"%s:%s@tcp(%s:%s)/%s\", user, password, host, port, dbname)\n\t\tdb, err := gorm.Open(mysql.Open(dsn), &gorm.Config{})\n\t\tdba = db\n\t\tif err != nil {\n\t\t\tlog.Panic().Msgf(\"Error connecting to the database at %s:%s/%s\", host, port, dbname)\n\t\t}\n\t\tsqlDB, err := dba.DB()\n\t\tif err != nil {\n\t\t\tlog.Panic().Msgf(\"Error getting GORM DB definition\")\n\t\t}\n\t\tsqlDB.SetMaxIdleConns(10)\n\t\tsqlDB.SetMaxOpenConns(100)\n\n\t\tlog.Info().Msgf(\"Successfully established connection to %s:%s/%s\", host, port, dbname)\n\t})\n\treturn dba\n}",
"func (gs *GameSpec) toParams() (gsp GameSpecParams) {\n\tgsp = GameSpecParams{\n\t\tTeaser: gs.Description,\n\t\tPace: gs.Pace,\n\t\tNbTurn: gs.Turns,\n\t\tNbAntPerPlayer: gs.AntsPerPlayer,\n\t\tNbPlayer: gs.MaxPlayers,\n\t\tMinimalNbPlayer: gs.MinPlayers,\n\t\tInitialEnergy: gs.InitialEnergy,\n\t\tInitialAcid: gs.InitialAcid,\n\t}\n\n\t// the API requires that the `users` field contain either \"all\" for a\n\t// public game or a comma-separated list of usernames if it's private.\n\tif gs.Public {\n\t\tgsp.Users = \"all\"\n\t} else {\n\t\tgsp.Users = strings.Join(gs.Players, \",\")\n\t}\n\n\treturn\n}",
"func (e *EncryptedChatRequested) GetGA() (value []byte) {\n\treturn e.GA\n}",
"func newpcpInstanceMetric(vals Instances, indom *PCPInstanceDomain, desc *pcpMetricDesc) (*pcpInstanceMetric, error) {\n\tif len(vals) != indom.InstanceCount() {\n\t\treturn nil, errors.New(\"values for all instances in the instance domain only should be passed\")\n\t}\n\n\tmvals := make(map[string]*instanceValue)\n\n\tfor name := range indom.instances {\n\t\tval, present := vals[name]\n\t\tif !present {\n\t\t\treturn nil, errors.Errorf(\"Instance %v not initialized\", name)\n\t\t}\n\n\t\tif !desc.t.IsCompatible(val) {\n\t\t\treturn nil, errors.Errorf(\"value %v is incompatible with type %v for Instance %v\", val, desc.t, name)\n\t\t}\n\n\t\tval = desc.t.resolve(val)\n\t\tmvals[name] = newinstanceValue(val)\n\t}\n\n\treturn &pcpInstanceMetric{desc, indom, mvals}, nil\n}",
"func labelsForInstance(name string) map[string]string {\n\treturn map[string]string{\"app\": \"instance\", \"instance_cr\": name}\n}",
"func (m *PeriodORM) ToPB(ctx context.Context) (Period, error) {\n\tto := Period{}\n\tvar err error\n\tif prehook, ok := interface{}(m).(PeriodWithBeforeToPB); ok {\n\t\tif err = prehook.BeforeToPB(ctx, &to); err != nil {\n\t\t\treturn to, err\n\t\t}\n\t}\n\tto.Id = m.Id\n\tto.Period = m.Period\n\tif m.CreatedAt != nil {\n\t\tto.CreatedAt = timestamppb.New(*m.CreatedAt)\n\t}\n\tif m.UpdatedAt != nil {\n\t\tto.UpdatedAt = timestamppb.New(*m.UpdatedAt)\n\t}\n\tif posthook, ok := interface{}(m).(PeriodWithAfterToPB); ok {\n\t\terr = posthook.AfterToPB(ctx, &to)\n\t}\n\treturn to, err\n}",
"func GetBaseRuntime() *otto.Otto {\n\treturn baseRuntime.Copy()\n}",
"func (g *Group) TOML() interface{} {\n\tgtoml := &GroupTOML{\n\t\tThreshold: g.Threshold,\n\t}\n\tgtoml.Nodes = make([]*NodeTOML, g.Len())\n\tfor i, n := range g.Nodes {\n\t\tgtoml.Nodes[i] = n.TOML().(*NodeTOML)\n\t}\n\n\tif g.PublicKey != nil {\n\t\tgtoml.PublicKey = g.PublicKey.TOML().(*DistPublicTOML)\n\t}\n\n\tgtoml.ID = g.ID\n\tgtoml.SchemeID = g.Scheme.Name\n\tgtoml.Period = g.Period.String()\n\tgtoml.CatchupPeriod = g.CatchupPeriod.String()\n\tgtoml.GenesisTime = g.GenesisTime\n\tif g.TransitionTime != 0 {\n\t\tgtoml.TransitionTime = g.TransitionTime\n\t}\n\tgtoml.GenesisSeed = hex.EncodeToString(g.GetGenesisSeed())\n\treturn gtoml\n}",
"func ToGob(src interface{}) ([]byte, error) {\n\treturn NewGobber().To(src)\n}",
"func (rt *resourceTracking) GetAppInstance(un *unstructured.Unstructured, key string, trackingMethod v1alpha1.TrackingMethod) *AppInstanceValue {\n\tswitch trackingMethod {\n\tcase TrackingMethodAnnotation, TrackingMethodAnnotationAndLabel:\n\t\treturn rt.getAppInstanceValue(un, key, trackingMethod)\n\tdefault:\n\t\treturn nil\n\t}\n}",
"func DataprocAlphaClusterConfigGceClusterConfigShieldedInstanceConfigToProto(o *alpha.ClusterConfigGceClusterConfigShieldedInstanceConfig) *alphapb.DataprocAlphaClusterConfigGceClusterConfigShieldedInstanceConfig {\n\tif o == nil {\n\t\treturn nil\n\t}\n\tp := &alphapb.DataprocAlphaClusterConfigGceClusterConfigShieldedInstanceConfig{}\n\tp.SetEnableSecureBoot(dcl.ValueOrEmptyBool(o.EnableSecureBoot))\n\tp.SetEnableVtpm(dcl.ValueOrEmptyBool(o.EnableVtpm))\n\tp.SetEnableIntegrityMonitoring(dcl.ValueOrEmptyBool(o.EnableIntegrityMonitoring))\n\treturn p\n}",
"func VToDB(a *VAgent) error {\n\tif a.Agent == \"\" {\n\t\treturn nil\n\t}\n\n\tif len(a.Agent) > 15 {\n\t\tlog.Infow(\"bad agent name from V\", \"gid\", a.Gid, \"name\", a.Agent)\n\t}\n\n\t// telegram, startlat, startlon, distance, fetched are not set on the \"trust\" API call.\n\t// use ON DUPLICATE so as to not overwrite apikey or telegram\n\t// TODO: prune fields we will never use or that V never sends\n\t_, err := db.Exec(\"INSERT INTO v (enlid, gid, vlevel, vpoints, agent, level, quarantine, active, blacklisted, verified, flagged, banned, cellid, startlat, startlon, distance, fetched) VALUES (?,?,?,?,LEFT(?,15),?,?,?,?,?,?,?,?,?,?,?,UTC_TIMESTAMP()) ON DUPLICATE KEY UPDATE agent=LEFT(?, 15), quarantine=?, blacklisted=?, verified=?, flagged=?, banned=?, fetched=UTC_TIMESTAMP()\",\n\t\ta.EnlID, a.Gid, a.Vlevel, a.Vpoints, a.Agent, a.Level, a.Quarantine, a.Active, a.Blacklisted, a.Verified, a.Flagged, a.Banned, a.CellID, a.StartLat, a.StartLon, a.Distance,\n\t\ta.Agent, a.Quarantine, a.Blacklisted, a.Verified, a.Flagged, a.Banned)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn err\n\t}\n\n\tif a.TelegramID != 0 {\n\t\texisting, err := a.Gid.TelegramID()\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn err\n\t\t}\n\t\tif existing == 0 {\n\t\t\terr := a.Gid.SetTelegramID(TelegramID(a.TelegramID), a.Telegram)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}",
"func flattenInstance(c *Client, i interface{}) *Instance {\n\tm, ok := i.(map[string]interface{})\n\tif !ok {\n\t\treturn nil\n\t}\n\tif len(m) == 0 {\n\t\treturn nil\n\t}\n\n\tr := &Instance{}\n\tr.Name = dcl.FlattenString(m[\"name\"])\n\tr.Description = dcl.FlattenString(m[\"description\"])\n\tr.State = flattenInstanceStateEnum(m[\"state\"])\n\tr.StatusMessage = dcl.FlattenString(m[\"statusMessage\"])\n\tr.CreateTime = dcl.FlattenString(m[\"createTime\"])\n\tr.Tier = flattenInstanceTierEnum(m[\"tier\"])\n\tr.Labels = dcl.FlattenKeyValuePairs(m[\"labels\"])\n\tr.FileShares = flattenInstanceFileSharesSlice(c, m[\"fileShares\"])\n\tr.Networks = flattenInstanceNetworksSlice(c, m[\"networks\"])\n\tr.Etag = dcl.FlattenString(m[\"etag\"])\n\tr.Project = dcl.FlattenString(m[\"project\"])\n\tr.Location = dcl.FlattenString(m[\"location\"])\n\n\treturn r\n}",
"func (ga *GA) Init(pool_size uint, data_size uint, fit_func FitnessFunc, p GA_Params) {\n\n rand.Seed(time.Now().UTC().UnixNano())\n data_bytes := (data_size + 7) / 8\n ga.Population = make(GenePool, pool_size)\n // for _, ind := range ga.Population {\n for i := range ga.Population {\n\t// var ind *Individual\n\tind := new(Individual)\n\tind.Data = make([]byte, data_bytes)\n\trandom_word := rand.Uint32()\n\tfor j := range ind.Data {\n\t if (j % 4) == 0 {\n\t\trandom_word = rand.Uint32()\n\t }\n\t ind.Data[j] = byte(random_word & 0xff)\n\t random_word >>= 8\n\t}\n\tga.Population[i] = ind\n }\n ga.Params = p\n ga.data_size = data_size\n ga.Generation = 0\n ga.fit_func = fit_func\n ga.MeasureAndSort()\n ga.Stats_best = make([]float64, 0, 1024)\n ga.Stats_avg = make([]float64, 0, 1024)\n ga.Stats_best = append(ga.Stats_best, ga.Population[0].Fitness)\n ga.Stats_avg = append(ga.Stats_avg, ga.AvgFitness())\n}",
"func (_RandomBeacon *RandomBeaconCaller) GasParameters(opts *bind.CallOpts) (struct {\n\tDkgResultSubmissionGas *big.Int\n\tDkgResultApprovalGasOffset *big.Int\n\tNotifyOperatorInactivityGasOffset *big.Int\n\tRelayEntrySubmissionGasOffset *big.Int\n}, error) {\n\tvar out []interface{}\n\terr := _RandomBeacon.contract.Call(opts, &out, \"gasParameters\")\n\n\toutstruct := new(struct {\n\t\tDkgResultSubmissionGas *big.Int\n\t\tDkgResultApprovalGasOffset *big.Int\n\t\tNotifyOperatorInactivityGasOffset *big.Int\n\t\tRelayEntrySubmissionGasOffset *big.Int\n\t})\n\tif err != nil {\n\t\treturn *outstruct, err\n\t}\n\n\toutstruct.DkgResultSubmissionGas = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\toutstruct.DkgResultApprovalGasOffset = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int)\n\toutstruct.NotifyOperatorInactivityGasOffset = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int)\n\toutstruct.RelayEntrySubmissionGasOffset = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int)\n\n\treturn *outstruct, err\n\n}",
"func newEntityExtracter() *entityExtracter {\n\tvar mapping map[string]int\n\tvar weights []float64\n\tvar labels []string\n\n\tdec := getAsset(\"Maxent\", \"mapping.gob\")\n\tcheckError(dec.Decode(&mapping))\n\n\tdec = getAsset(\"Maxent\", \"weights.gob\")\n\tcheckError(dec.Decode(&weights))\n\n\tdec = getAsset(\"Maxent\", \"labels.gob\")\n\tcheckError(dec.Decode(&labels))\n\n\treturn &entityExtracter{model: newMaxentClassifier(weights, mapping, labels)}\n}",
"func (self *TileSprite) Game() *Game{\n return &Game{self.Object.Get(\"game\")}\n}",
"func (e *Engine) GT() *GT {\n\treturn NewGT()\n}"
] | [
"0.772265",
"0.55584383",
"0.54472685",
"0.5152903",
"0.5063716",
"0.4974295",
"0.49704218",
"0.496328",
"0.48645544",
"0.48514754",
"0.4848639",
"0.48257947",
"0.48195985",
"0.48057404",
"0.47782093",
"0.47607097",
"0.4754396",
"0.47254223",
"0.47182488",
"0.47182488",
"0.47175556",
"0.47143835",
"0.4706615",
"0.46943486",
"0.46898332",
"0.46787372",
"0.4673936",
"0.46727243",
"0.46590754",
"0.4642674",
"0.46359175",
"0.46355006",
"0.46343967",
"0.46132025",
"0.46108896",
"0.4608085",
"0.46054563",
"0.4604067",
"0.4604",
"0.4602524",
"0.46013567",
"0.4593346",
"0.45892853",
"0.45785785",
"0.457152",
"0.45594332",
"0.45583498",
"0.45503414",
"0.45470965",
"0.45434934",
"0.45320162",
"0.4528699",
"0.4522804",
"0.4520935",
"0.45208448",
"0.45141104",
"0.45070627",
"0.45066828",
"0.45026946",
"0.44927084",
"0.4489687",
"0.44872212",
"0.44786364",
"0.4472765",
"0.44668967",
"0.44548112",
"0.44548112",
"0.4454096",
"0.44415414",
"0.44414285",
"0.44315708",
"0.4426655",
"0.44262448",
"0.4421045",
"0.4420678",
"0.44178444",
"0.4417379",
"0.44147655",
"0.44134632",
"0.44098952",
"0.44069982",
"0.44051006",
"0.43993643",
"0.4398606",
"0.43932548",
"0.4393126",
"0.43883976",
"0.43871233",
"0.43831864",
"0.43777254",
"0.43772364",
"0.43771017",
"0.43752432",
"0.43722686",
"0.43653962",
"0.43651286",
"0.4363492",
"0.43603826",
"0.43574873",
"0.43568492"
] | 0.688688 | 1 |
newBaseInstanceList is the baseInstanceList constructor | func newBaseInstanceList(allocateCIDR bool, clusterCIDR *net.IPNet, subnetMaskSize int) *baseInstanceList {
cidrSet, _ := cidrset.NewCIDRSet(clusterCIDR, subnetMaskSize)
return &baseInstanceList{
allocateCIDR: allocateCIDR,
clusterCIDR: clusterCIDR,
subnetMaskSize: subnetMaskSize,
cidrSet: cidrSet,
instances: make(map[meta.Key]*baseInstance),
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func newBase() *base {\n\treturn &base{shared.NewUUID(), time.Now().UTC(), time.Now().UTC(), false/*, shared.NewUUID()*/}\n}",
"func newSubInstance(parent *BaseInstance, name string) *BaseInstance {\n\tsi := parent.module.subinstance\n\tif si == nil {\n\t\tsi = make(map[string][]*BaseInstance)\n\t\tparent.module.subinstance = si\n\t}\n\n\tbi := &BaseInstance{\n\t\tname: name,\n\t\tinput: parent.input,\n\t\tinput2: parent.input2,\n\t\trules: newRules(),\n\t\tenabled: false,\n\t\tsubinstance: true,\n\t\tinstance: parent.instance,\n\t\tmodule: parent.module,\n\t}\n\n\tsi[parent.name] = append(si[parent.name], bi)\n\treturn bi\n}",
"func newList(ctx TransactionContextInterface) *list {\n\t stateList := new(ledgerapi.StateList)\n\t stateList.Ctx = ctx\n\t stateList.Class = \"Asset\"\n\t stateList.Deserialize = func(bytes []byte, state ledgerapi.StateInterface) error {\n\t\t return Deserialize(bytes, state.(*Asset))\n\t }\n \n\t list := new(list)\n\t list.stateList = stateList\n \n\t return list\n }",
"func newBaseRuntime(erp *ECALRuntimeProvider, node *parser.ASTNode) *baseRuntime {\n\tinstanceCounter++\n\treturn &baseRuntime{fmt.Sprint(instanceCounter), erp, node, false}\n}",
"func New() *List { return new(List).Init() }",
"func newSubInstance(parent *BaseInstance, name string) *BaseInstance {\n\tbi := &BaseInstance{\n\t\tname: name,\n\t\tinput: parent.input,\n\t\tinput2: parent.input2,\n\t\trules: newRules(),\n\t\tenabled: false,\n\t\tinstance: parent.instance,\n\t}\n\treturn bi\n}",
"func newIDList(p *idElementPool) *idList {\n\tl := &idList{Pool: p}\n\treturn l.Init()\n}",
"func newList(data interface{}) *List {\n\tnewL := new(List)\n\tnewL.Insert(data)\n\treturn newL\n}",
"func ListBase(base uint32) {\n\tsyscall.Syscall(gpListBase, 1, uintptr(base), 0, 0)\n}",
"func DrawElementsInstancedBaseVertexBaseInstance(mode uint32, count int32, xtype uint32, indices unsafe.Pointer, instancecount int32, basevertex int32, baseinstance uint32) {\n C.glowDrawElementsInstancedBaseVertexBaseInstance(gpDrawElementsInstancedBaseVertexBaseInstance, (C.GLenum)(mode), (C.GLsizei)(count), (C.GLenum)(xtype), indices, (C.GLsizei)(instancecount), (C.GLint)(basevertex), (C.GLuint)(baseinstance))\n}",
"func newList(vert bool, width, height float32) *List {\n\n\tli := new(List)\n\tli.initialize(vert, width, height)\n\treturn li\n}",
"func newList(rowType reflect.Type) []*Info {\n\tvar list columnList\n\tvar state = stateT{}\n\tlist.addFields(rowType, state)\n\treturn list\n}",
"func ListBase(base uint32) {\n\tC.glowListBase(gpListBase, (C.GLuint)(base))\n}",
"func newList() *List {\n\tl := &List{\n\t\tch: make(chan sh.QData),\n\t}\n\treturn l\n}",
"func NewList() *List {\n newObj := &List {\n counters : make(map[string]Counter),\n }\n\n return newObj\n}",
"func ListBase(base uint32) {\n C.glowListBase(gpListBase, (C.GLuint)(base))\n}",
"func DrawElementsInstancedBaseVertexBaseInstance(mode uint32, count int32, xtype uint32, indices unsafe.Pointer, instancecount int32, basevertex int32, baseinstance uint32) {\n\tC.glowDrawElementsInstancedBaseVertexBaseInstance(gpDrawElementsInstancedBaseVertexBaseInstance, (C.GLenum)(mode), (C.GLsizei)(count), (C.GLenum)(xtype), indices, (C.GLsizei)(instancecount), (C.GLint)(basevertex), (C.GLuint)(baseinstance))\n}",
"func DrawElementsInstancedBaseVertexBaseInstance(mode uint32, count int32, xtype uint32, indices unsafe.Pointer, instancecount int32, basevertex int32, baseinstance uint32) {\n\tC.glowDrawElementsInstancedBaseVertexBaseInstance(gpDrawElementsInstancedBaseVertexBaseInstance, (C.GLenum)(mode), (C.GLsizei)(count), (C.GLenum)(xtype), indices, (C.GLsizei)(instancecount), (C.GLint)(basevertex), (C.GLuint)(baseinstance))\n}",
"func (s *BasevhdlListener) EnterInstantiation_list(ctx *Instantiation_listContext) {}",
"func New() *List {\n return &List{size:0}\n}",
"func DrawElementsInstancedBaseInstance(mode uint32, count int32, xtype uint32, indices unsafe.Pointer, instancecount int32, baseinstance uint32) {\n C.glowDrawElementsInstancedBaseInstance(gpDrawElementsInstancedBaseInstance, (C.GLenum)(mode), (C.GLsizei)(count), (C.GLenum)(xtype), indices, (C.GLsizei)(instancecount), (C.GLuint)(baseinstance))\n}",
"func NewBase() Base {\r\n\treturn Base{\r\n\t\tActive: \"\",\r\n\t\tTitle: \"Lemonade Stand Supply\",\r\n\t}\r\n}",
"func newListProcessor(ctx context.Context, dynamicClient dynamic.Interface, workerFn workerFunc) *listProcessor {\n\treturn &listProcessor{\n\t\tconcurrency: defaultConcurrency,\n\t\tworkerFn: workerFn,\n\t\tdynamicClient: dynamicClient,\n\t\tctx: ctx,\n\t}\n}",
"func DrawElementsInstancedBaseVertexBaseInstance(mode uint32, count int32, xtype uint32, indices unsafe.Pointer, instancecount int32, basevertex int32, baseinstance uint32) {\n\tsyscall.Syscall9(gpDrawElementsInstancedBaseVertexBaseInstance, 7, uintptr(mode), uintptr(count), uintptr(xtype), uintptr(indices), uintptr(instancecount), uintptr(basevertex), uintptr(baseinstance), 0, 0)\n}",
"func newListFormulaArg(l []formulaArg) formulaArg {\n\treturn formulaArg{Type: ArgList, List: l}\n}",
"func (bil *baseInstanceList) newMockCloud() cloud.Cloud {\n\tc := cloud.NewMockGCE(nil)\n\n\t// insert hooks to lazy create a instance when needed\n\tc.MockInstances.GetHook = bil.newGAGetHook()\n\tc.MockBetaInstances.GetHook = bil.newBetaGetHook()\n\n\treturn c\n}",
"func newProcBase(name, bin, serviceAddr string, loggers []Logger) *procBase {\n\tlog.Infof(\"%s has addr %s\", name, serviceAddr)\n\treturn &procBase{\n\t\tname: name,\n\t\tbin: bin,\n\t\tserviceAddr: serviceAddr,\n\t\tloggers: loggers,\n\t}\n}",
"func New(values ...uint16) (l *List) {\n\tl = &List{} // init the ptr\n\tfor _, value := range values {\n\t\tl.Insert(value)\n\t}\n\treturn l\n}",
"func NewList(list uint32, mode uint32) {\n\tsyscall.Syscall(gpNewList, 2, uintptr(list), uintptr(mode), 0)\n}",
"func baseListConvert(list baseIList) baseIList { return baseList(list.AsArray()) }",
"func NewList()(*List) {\n m := &List{\n BaseItem: *NewBaseItem(),\n }\n odataTypeValue := \"#microsoft.graph.list\";\n m.SetOdataType(&odataTypeValue);\n return m\n}",
"func newPodList(podsNumber int) *corev1.PodList {\n\tpods := []corev1.Pod{}\n\tfor i := 0; i < podsNumber; i++ {\n\t\tpods = append(pods, *newPod(fmt.Sprintf(\"test-pod%d\", i)))\n\t}\n\treturn &corev1.PodList{Items: pods}\n}",
"func DrawElementsInstancedBaseInstance(mode uint32, count int32, xtype uint32, indices unsafe.Pointer, instancecount int32, baseinstance uint32) {\n\tsyscall.Syscall6(gpDrawElementsInstancedBaseInstance, 6, uintptr(mode), uintptr(count), uintptr(xtype), uintptr(indices), uintptr(instancecount), uintptr(baseinstance))\n}",
"func newInstances(pod *Pod, prov provider.DataCenter, cfg *config.Instances) (*instances, error) {\n\tlog.Debug(\"Initializing Instances\")\n\n\ti := &instances{\n\t\tResources: resource.NewResources(),\n\t\tpod: pod,\n\t\tinstances: map[string]resource.Instance{},\n\t}\n\n\t// The reference to the network resource.\n\tnet := pod.Cluster().Compute().DataCenter().Network()\n\n\t// The availability zones available to these instances.\n\tavailabilityZones := net.AvailabilityZones()\n\n\t// The subnet group associated with these instances.\n\tsubnetGroup := net.SubnetGroups().Find(pod.SubnetGroup())\n\tif subnetGroup == nil {\n\t\treturn nil, fmt.Errorf(\"Cannot find subnet group %s configured for pod %s\", pod.SubnetGroup(), pod.Name())\n\t}\n\n\t// The keypair to be used with these instances.\n\tkeypair := pod.Cluster().Compute().KeyPair()\n\n\tn := 0\n\tfor _, conf := range *cfg {\n\t\t// Ensure the instance is uniquely named.\n\t\tif i.Find(conf.Name()) != nil {\n\t\t\treturn nil, fmt.Errorf(\"Instance name %q must be unique but is used multiple times\", conf.Name())\n\t\t}\n\n\t\t// The availability zone for this instance. Chosing via round robin. Always starting at 0.\n\t\taz := availabilityZones[n%len(availabilityZones)]\n\n\t\t// Get the subnet associated with the AZ.\n\t\tsubnetName := pod.SubnetGroup() + \"-\" + az\n\t\tsubnet := subnetGroup.Find(subnetName)\n\t\tif subnet == nil {\n\t\t\treturn nil, fmt.Errorf(\"Cannot find subnet %s configured for instance %s\", subnetName, conf.Name())\n\t\t}\n\n\t\tinstance, err := newInstance(pod, subnet, keypair, prov, conf)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ti.instances[instance.Name()] = instance\n\t\ti.Append(instance)\n\n\t\tn++\n\t}\n\treturn i, nil\n}",
"func NewCustom_List(s *capnp.Segment, sz int32) (Custom_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 24, PointerCount: 1}, sz)\n\treturn Custom_List{l}, err\n}",
"func DrawArraysInstancedBaseInstance(mode uint32, first int32, count int32, instancecount int32, baseinstance uint32) {\n C.glowDrawArraysInstancedBaseInstance(gpDrawArraysInstancedBaseInstance, (C.GLenum)(mode), (C.GLint)(first), (C.GLsizei)(count), (C.GLsizei)(instancecount), (C.GLuint)(baseinstance))\n}",
"func DrawElementsInstancedBaseInstance(mode uint32, count int32, xtype uint32, indices unsafe.Pointer, instancecount int32, baseinstance uint32) {\n\tC.glowDrawElementsInstancedBaseInstance(gpDrawElementsInstancedBaseInstance, (C.GLenum)(mode), (C.GLsizei)(count), (C.GLenum)(xtype), indices, (C.GLsizei)(instancecount), (C.GLuint)(baseinstance))\n}",
"func DrawElementsInstancedBaseInstance(mode uint32, count int32, xtype uint32, indices unsafe.Pointer, instancecount int32, baseinstance uint32) {\n\tC.glowDrawElementsInstancedBaseInstance(gpDrawElementsInstancedBaseInstance, (C.GLenum)(mode), (C.GLsizei)(count), (C.GLenum)(xtype), indices, (C.GLsizei)(instancecount), (C.GLuint)(baseinstance))\n}",
"func newMemberlist(conf *Config) (*Memberlist, error) {\n\tif conf.ProtocolVersion < ProtocolVersionMin {\n\t\treturn nil, fmt.Errorf(\"Protocol version '%d' too low. Must be in range: [%d, %d]\",\n\t\t\tconf.ProtocolVersion, ProtocolVersionMin, ProtocolVersionMax)\n\t} else if conf.ProtocolVersion > ProtocolVersionMax {\n\t\treturn nil, fmt.Errorf(\"Protocol version '%d' too high. Must be in range: [%d, %d]\",\n\t\t\tconf.ProtocolVersion, ProtocolVersionMin, ProtocolVersionMax)\n\t}\n\n\tif len(conf.SecretKey) > 0 {\n\t\tif conf.Keyring == nil {\n\t\t\tkeyring, err := NewKeyring(nil, conf.SecretKey)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tconf.Keyring = keyring\n\t\t} else {\n\t\t\tif err := conf.Keyring.AddKey(conf.SecretKey); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif err := conf.Keyring.UseKey(conf.SecretKey); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\tif conf.LogOutput != nil && conf.Logger != nil {\n\t\treturn nil, fmt.Errorf(\"Cannot specify both LogOutput and Logger. Please choose a single log configuration setting.\")\n\t}\n\n\tlogDest := conf.LogOutput\n\tif logDest == nil {\n\t\tlogDest = os.Stderr\n\t}\n\n\tlogger := conf.Logger\n\tif logger == nil {\n\t\tlogger = log.New(logDest, \"\", log.LstdFlags)\n\t}\n\n\t// Set up a network transport by default if a custom one wasn't given\n\t// by the config.\n\ttransport := conf.Transport\n\tif transport == nil {\n\t\tnc := &NetTransportConfig{\n\t\t\tBindAddrs: []string{conf.BindAddr},\n\t\t\tBindPort: conf.BindPort,\n\t\t\tLogger: logger,\n\t\t\tMetricLabels: conf.MetricLabels,\n\t\t}\n\n\t\t// See comment below for details about the retry in here.\n\t\tmakeNetRetry := func(limit int) (*NetTransport, error) {\n\t\t\tvar err error\n\t\t\tfor try := 0; try < limit; try++ {\n\t\t\t\tvar nt *NetTransport\n\t\t\t\tif nt, err = NewNetTransport(nc); err == nil {\n\t\t\t\t\treturn nt, nil\n\t\t\t\t}\n\t\t\t\tif strings.Contains(err.Error(), \"address already in use\") {\n\t\t\t\t\tlogger.Printf(\"[DEBUG] memberlist: Got bind error: %v\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn nil, fmt.Errorf(\"failed to obtain an address: %v\", err)\n\t\t}\n\n\t\t// The dynamic bind port operation is inherently racy because\n\t\t// even though we are using the kernel to find a port for us, we\n\t\t// are attempting to bind multiple protocols (and potentially\n\t\t// multiple addresses) with the same port number. We build in a\n\t\t// few retries here since this often gets transient errors in\n\t\t// busy unit tests.\n\t\tlimit := 1\n\t\tif conf.BindPort == 0 {\n\t\t\tlimit = 10\n\t\t}\n\n\t\tnt, err := makeNetRetry(limit)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Could not set up network transport: %v\", err)\n\t\t}\n\t\tif conf.BindPort == 0 {\n\t\t\tport := nt.GetAutoBindPort()\n\t\t\tconf.BindPort = port\n\t\t\tconf.AdvertisePort = port\n\t\t\tlogger.Printf(\"[DEBUG] memberlist: Using dynamic bind port %d\", port)\n\t\t}\n\t\ttransport = nt\n\t}\n\n\tnodeAwareTransport, ok := transport.(NodeAwareTransport)\n\tif !ok {\n\t\tlogger.Printf(\"[DEBUG] memberlist: configured Transport is not a NodeAwareTransport and some features may not work as desired\")\n\t\tnodeAwareTransport = &shimNodeAwareTransport{transport}\n\t}\n\n\tif len(conf.Label) > LabelMaxSize {\n\t\treturn nil, fmt.Errorf(\"could not use %q as a label: too long\", conf.Label)\n\t}\n\n\tif conf.Label != \"\" {\n\t\tnodeAwareTransport = &labelWrappedTransport{\n\t\t\tlabel: conf.Label,\n\t\t\tNodeAwareTransport: nodeAwareTransport,\n\t\t}\n\t}\n\n\tm := &Memberlist{\n\t\tconfig: conf,\n\t\tshutdownCh: make(chan struct{}),\n\t\tleaveBroadcast: make(chan struct{}, 1),\n\t\ttransport: nodeAwareTransport,\n\t\thandoffCh: make(chan struct{}, 1),\n\t\thighPriorityMsgQueue: list.New(),\n\t\tlowPriorityMsgQueue: list.New(),\n\t\tnodeMap: make(map[string]*nodeState),\n\t\tnodeTimers: make(map[string]*suspicion),\n\t\tawareness: newAwareness(conf.AwarenessMaxMultiplier, conf.MetricLabels),\n\t\tackHandlers: make(map[uint32]*ackHandler),\n\t\tbroadcasts: &TransmitLimitedQueue{RetransmitMult: conf.RetransmitMult},\n\t\tlogger: logger,\n\t\tmetricLabels: conf.MetricLabels,\n\t}\n\tm.broadcasts.NumNodes = func() int {\n\t\treturn m.estNumNodes()\n\t}\n\n\t// Get the final advertise address from the transport, which may need\n\t// to see which address we bound to. We'll refresh this each time we\n\t// send out an alive message.\n\tif _, _, err := m.refreshAdvertise(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tgo m.streamListen()\n\tgo m.packetListen()\n\tgo m.packetHandler()\n\treturn m, nil\n}",
"func newToDoList() toDoList {\n\treturn toDoList{}\n}",
"func newObjectList() *ObjectList {\n\treturn &ObjectList{\n\t\tObjectIDs: make([]int, 0, 200),\n\t}\n}",
"func NewList(vs ...Value) Value {\n\treturn StrictPrepend(vs, EmptyList)\n}",
"func NewList(list uint32, mode uint32) {\n C.glowNewList(gpNewList, (C.GLuint)(list), (C.GLenum)(mode))\n}",
"func NewList(client *secretsapi.Client, p listPrimeable) *List {\n\treturn &List{\n\t\tsecretsClient: client,\n\t\tout: p.Output(),\n\t\tproj: p.Project(),\n\t}\n}",
"func NewList(e Type, i *debug.Information) List {\n\treturn List{e, i}\n}",
"func NewList() List {\n\treturn List{}\n}",
"func newListMetrics() *listMetrics {\n\treturn new(listMetrics)\n}",
"func (bil *baseInstanceList) getOrCreateBaseInstance(key *meta.Key) *baseInstance {\n\tbil.lock.Lock()\n\tdefer bil.lock.Unlock()\n\n\tinst, found := bil.instances[*key]\n\tif !found {\n\t\tinst = &baseInstance{name: key.Name, zone: key.Zone}\n\t\tif bil.allocateCIDR {\n\t\t\tnextRange, _ := bil.cidrSet.AllocateNext()\n\t\t\tinst.aliasRange = nextRange.String()\n\t\t}\n\t\tbil.instances[*key] = inst\n\t}\n\treturn inst\n}",
"func New(l interface{}) list.Interface {\n\tif reflect.TypeOf(l).Kind() != reflect.Slice {\n\t\tpanic(fmt.Errorf(\"Param must be a slice\"))\n\t}\n\n\ts := reflect.ValueOf(l)\n\titems := make([]interface{}, s.Len())\n\n\tfor i := 0; i < len(items); i++ {\n\t\titems[i] = s.Index(i).Interface()\n\t}\n\n\treturn &randList{\n\t\tlist: items,\n\t\tperm: rand.Perm(len(items)),\n\t\toffset: 0,\n\t}\n}",
"func createList(arg string) []string {\n\tvar retObject = []string{arg}\n\treturn retObject\n}",
"func NewList(parent sparta.Widget, name string, rect image.Rectangle) *List {\n\tl := &List{\n\t\tname: name,\n\t\tparent: parent,\n\t\tgeometry: rect,\n\t\tback: backColor,\n\t\tfore: foreColor,\n\t\ttarget: parent,\n\t}\n\tsparta.NewWindow(l)\n\tl.scroll = NewScroll(l, \"list\"+name+\"Scroll\", 0, 0, Vertical, image.Rect(rect.Dx()-10, 0, rect.Dx(), rect.Dy()))\n\treturn l\n}",
"func New(vals ...interface{}) *List {\n\thead := list.New()\n\tfor _, v := range vals {\n\t\thead.PushBack(v)\n\t}\n\treturn &List{head}\n}",
"func newListFromUIDs(uids []string) *CSPList {\n\treturn NewListBuilder().WithUIDs(uids...).List()\n}",
"func newReassemblyList(epoch int, capacity int, s ingressSender,\n\tframesDiscarded metrics.Counter) *reassemblyList {\n\n\tlist := &reassemblyList{\n\t\tepoch: epoch,\n\t\tcapacity: capacity,\n\t\tsnd: s,\n\t\tmarkedForDeletion: false,\n\t\tentries: list.New(),\n\t\tbuf: bytes.NewBuffer(make([]byte, 0, frameBufCap)),\n\t}\n\tif framesDiscarded != nil {\n\t\tlist.tooOld = framesDiscarded.With(\"reason\", \"too_old\")\n\t\tlist.duplicate = framesDiscarded.With(\"reason\", \"duplicate\")\n\t\tlist.evicted = framesDiscarded.With(\"reason\", \"evicted\")\n\t\tlist.invalid = framesDiscarded.With(\"reason\", \"invalid\")\n\t}\n\treturn list\n}",
"func newErrorList() *errorList {\n\treturn &errorList{\n\t\tlist: make([]string, 0, 16),\n\t}\n}",
"func newBaseCount() baseCount {\n\treturn baseCount{words: make(map[word]int)}\n}",
"func NewList(g ...Getter) *List {\n\tlist := &List{\n\t\tlist: g,\n\t}\n\tlist.GetProxy = NewGetProxy(list) // self\n\treturn list\n}",
"func newBGPFilterList() *api.BGPFilterList {\n\treturn &api.BGPFilterList{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: api.KindBGPFilterList,\n\t\t\tAPIVersion: api.GroupVersionCurrent,\n\t\t},\n\t}\n}",
"func DrawArraysInstancedBaseInstance(mode uint32, first int32, count int32, instancecount int32, baseinstance uint32) {\n\tsyscall.Syscall6(gpDrawArraysInstancedBaseInstance, 5, uintptr(mode), uintptr(first), uintptr(count), uintptr(instancecount), uintptr(baseinstance), 0)\n}",
"func InitializeList(uninitializedList *List, itemSize uint64) {\n uninitializedList.itemSize = itemSize;\n uninitializedList.capacity = 16; //Allocate 16 items by default\n uninitializedList.baseAddress = Alloc(uninitializedList.capacity * uninitializedList.itemSize);\n uninitializedList.itemCount = 0; //Reset item count (to zero)\n}",
"func NewBasePool() BasePool {\n\treturn BasePool{\n\t\tlastTuneTs: *atomicutil.NewTime(time.Now()),\n\t}\n}",
"func DrawArraysInstancedBaseInstance(mode uint32, first int32, count int32, instancecount int32, baseinstance uint32) {\n\tC.glowDrawArraysInstancedBaseInstance(gpDrawArraysInstancedBaseInstance, (C.GLenum)(mode), (C.GLint)(first), (C.GLsizei)(count), (C.GLsizei)(instancecount), (C.GLuint)(baseinstance))\n}",
"func DrawArraysInstancedBaseInstance(mode uint32, first int32, count int32, instancecount int32, baseinstance uint32) {\n\tC.glowDrawArraysInstancedBaseInstance(gpDrawArraysInstancedBaseInstance, (C.GLenum)(mode), (C.GLint)(first), (C.GLsizei)(count), (C.GLsizei)(instancecount), (C.GLuint)(baseinstance))\n}",
"func (m *MockLoadBalancerServiceIface) NewListLoadBalancerRuleInstancesParams(id string) *ListLoadBalancerRuleInstancesParams {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NewListLoadBalancerRuleInstancesParams\", id)\n\tret0, _ := ret[0].(*ListLoadBalancerRuleInstancesParams)\n\treturn ret0\n}",
"func New(values ...interface{}) *List {\n\tlist := &List{}\n\tif len(values) > 0 {\n\t\tlist.Add(values...)\n\t}\n\treturn list\n}",
"func New(values ...interface{}) *List {\n\tlist := &List{}\n\tif len(values) > 0 {\n\t\tlist.Add(values)\n\t}\n\treturn list\n}",
"func NewList(args ...interface{}) *List {\n\tl := &List{}\n\tfor _, v := range args {\n\t\tl.PushBack(v)\n\t}\n\treturn l\n}",
"func DrawElementsInstancedBaseVertex(mode uint32, count int32, xtype uint32, indices unsafe.Pointer, instancecount int32, basevertex int32) {\n C.glowDrawElementsInstancedBaseVertex(gpDrawElementsInstancedBaseVertex, (C.GLenum)(mode), (C.GLsizei)(count), (C.GLenum)(xtype), indices, (C.GLsizei)(instancecount), (C.GLint)(basevertex))\n}",
"func NewGraph(base Base) {\n\n}",
"func (pr *pluginRegistry) InstanceList() []*Instance {\n\tpr.mut.Lock()\n\tdefer pr.mut.Unlock()\n\n\t// this gets called in the router for every message that comes in, so it\n\t// might come to pass that this will perform poorly, but for now with a\n\t// relatively small number of instances we'll take the copy hit in exchange\n\t// for not having to think about concurrent access to the list\n\tout := make([]*Instance, len(pr.instances))\n\tcopy(out, pr.instances) // intentional shallow copy\n\treturn out\n}",
"func newBaseClient() *baseClient {\n\treturn &baseClient{\n\t\thttpClient: http.DefaultClient,\n\t\tmethod: \"GET\",\n\t\theader: make(http.Header),\n\t}\n}",
"func newFiltBase(n int, mu float64, w []float64) (AdaptiveFilter, error) {\n\tvar err error\n\tp := new(filtBase)\n\tp.kind = \"Base filter\"\n\tp.n = n\n\tp.muMin = 0\n\tp.muMax = 1000\n\tp.mu, err = p.checkFloatParam(mu, p.muMin, p.muMax, \"mu\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = p.initWeights(w, n)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn p, nil\n}",
"func New(elems ...interface{}) List {\n\tl := Mzero()\n\tfor _, elem := range elems {\n\t\tl = Cons(elem, l)\n\t}\n\treturn Reverse(l)\n}",
"func newListFromUIDNode(UIDNodeMap map[string]string) *CSPList {\n\treturn NewListBuilder().WithUIDNode(UIDNodeMap).List()\n}",
"func newRpcServices(c *RpccontrollerV1Client, namespace string) *rpcServices {\n\treturn &rpcServices{\n\t\tclient: c.RESTClient(),\n\t\tns: namespace,\n\t}\n}",
"func (s CHF) NewCustom(n int32) (Custom_List, error) {\n\tl, err := NewCustom_List(s.Struct.Segment(), n)\n\tif err != nil {\n\t\treturn Custom_List{}, err\n\t}\n\terr = s.Struct.SetPtr(9, l.List.ToPtr())\n\treturn l, err\n}",
"func newCoverageList(name string) *CoverageList {\n\treturn &CoverageList{\n\t\tCoverage: &Coverage{Name: name},\n\t\tGroup: []Coverage{},\n\t}\n}",
"func New(maxlevel int, cmpFn CompareFn) *List {\n\treturn NewCustom(maxlevel, DefaultProbability, cmpFn, time.Now().Unix())\n}",
"func newVaultLists(c *VaultV1alpha1Client, namespace string) *vaultLists {\n\treturn &vaultLists{\n\t\tclient: c.RESTClient(),\n\t\tns: namespace,\n\t}\n}",
"func NewObj_List(s *capnp.Segment, sz int32) (Obj_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}, sz)\n\treturn Obj_List{l}, err\n}",
"func getInstanceList(nodeNames sets.String) *compute.InstanceGroupsListInstances {\n\tinstanceNames := nodeNames.List()\n\tcomputeInstances := []*compute.InstanceWithNamedPorts{}\n\tfor _, name := range instanceNames {\n\t\tinstanceLink := getInstanceUrl(name)\n\t\tcomputeInstances = append(\n\t\t\tcomputeInstances, &compute.InstanceWithNamedPorts{\n\t\t\t\tInstance: instanceLink})\n\t}\n\treturn &compute.InstanceGroupsListInstances{\n\t\tItems: computeInstances,\n\t}\n}",
"func newTestList() LinkedList {\n\treturn newList(16384, 8192, func(baseIdx int, data []int) {\n\t\tfor i := range data {\n\t\t\tdata[i] = baseIdx + i\n\t\t}\n\t})\n}",
"func NewList(args ...interface{}) *List {\n\tl := List{}\n\tfor _, data := range args {\n\t\tl.PushBack(data)\n\t}\n\treturn &l\n}",
"func NewList(list uint32, mode uint32) {\n\tC.glowNewList(gpNewList, (C.GLuint)(list), (C.GLenum)(mode))\n}",
"func NewList(vs ...Value) List {\n\treturn List{&vs}\n}",
"func initList(myId id) []id {\n\tvar MembershipList []id\n\tMembershipList = append(MembershipList, myId)\n\treturn MembershipList\n}",
"func Constructor() MyHashSet {\n\treturn MyHashSet{make([]list.List, base)}\n\n}",
"func NewListCommand(parent common.Registerer, globals *config.Data) *ListCommand {\n\tvar c ListCommand\n\tc.Globals = globals\n\tc.manifest.File.SetOutput(c.Globals.Output)\n\tc.manifest.File.Read(manifest.Filename)\n\tc.CmdClause = parent.Command(\"list\", \"List Syslog endpoints on a Fastly service version\")\n\tc.CmdClause.Flag(\"service-id\", \"Service ID\").Short('s').StringVar(&c.manifest.Flag.ServiceID)\n\tc.CmdClause.Flag(\"version\", \"Number of service version\").Required().IntVar(&c.Input.ServiceVersion)\n\treturn &c\n}",
"func NewList() *List {\n\tl := List{\n\t\tpostings: make(map[uint64]*Posting),\n\t}\n\treturn &l\n}",
"func NewList() List {\n\tl := List{}\n\tl.Set = make(map[string]int)\n\treturn l\n}",
"func newBaseRunner(collector *resourceStatusCollector) *baseRunner {\n\treturn &baseRunner{\n\t\tcollector: collector,\n\t}\n}",
"func NewListOpts(validator ValidatorFctType) ListOpts {\n var values []string\n return *NewListOptsRef(&values, validator)\n}",
"func NewList(initial []W) UpdatableList {\n\tul := &updatableList{}\n\tul.Update(initial)\n\treturn ul\n}",
"func newModule(base mb.BaseModule) (mb.Module, error) {\n\t// Validate that at least one host has been specified.\n\tconfig := struct {\n\t\tHosts []string `config:\"hosts\" validate:\"nonzero,required\"`\n\t}{}\n\tif err := base.UnpackConfig(&config); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &base, nil\n}",
"func initPeerList() {\n\t// peerList\n\tpeerList = make([]Peer, MaxPeers)\n\n\t// populate peerList with dead peers\n\tfor i := 0; i < MaxPeers; i++ {\n\t\tpeerList[i] = Peer{expirationTimer: 0}\n\t}\n}",
"func NewList() List {\n\treturn make(List, 0)\n}",
"func (p PageListOrderedItemBlocks) construct() PageListOrderedItemClass { return &p }",
"func newInstance(x *runtime.Runtime, p *build.Instance, v *adt.Vertex) *Instance {\n\t// TODO: associate root source with structLit.\n\tinst := &Instance{\n\t\troot: v,\n\t\tinst: p,\n\t}\n\tif p != nil {\n\t\tinst.ImportPath = p.ImportPath\n\t\tinst.Dir = p.Dir\n\t\tinst.PkgName = p.PkgName\n\t\tinst.DisplayName = p.ImportPath\n\t\tif p.Err != nil {\n\t\t\tinst.setListOrError(p.Err)\n\t\t}\n\t}\n\n\tx.AddInst(p.ImportPath, v, p)\n\tx.SetBuildData(p, inst)\n\tinst.index = x\n\treturn inst\n}",
"func NewList(slice []Unit) List {\n\treturn unitlist{slice}\n}",
"func (e *exprHelper) NewList(elems ...ast.Expr) ast.Expr {\n\treturn e.exprFactory.NewList(e.nextMacroID(), elems, []int32{})\n}"
] | [
"0.6095147",
"0.6051629",
"0.60512936",
"0.60462177",
"0.6043791",
"0.5972696",
"0.58985627",
"0.5896269",
"0.58636135",
"0.5861767",
"0.57537234",
"0.573758",
"0.5711063",
"0.5693298",
"0.5691532",
"0.5683488",
"0.5661154",
"0.5661154",
"0.5643412",
"0.56272656",
"0.5625379",
"0.5610555",
"0.5580606",
"0.5561794",
"0.55542386",
"0.5538938",
"0.55370694",
"0.5524617",
"0.5523634",
"0.5518314",
"0.5477948",
"0.5472562",
"0.53989154",
"0.53810954",
"0.5375317",
"0.53735423",
"0.53653544",
"0.53653544",
"0.5331648",
"0.5327969",
"0.53155744",
"0.5299514",
"0.52828664",
"0.52628875",
"0.52416325",
"0.5241076",
"0.52374166",
"0.5234415",
"0.51587874",
"0.5158299",
"0.51518995",
"0.5135716",
"0.51280975",
"0.51187694",
"0.510828",
"0.5108234",
"0.51073116",
"0.5105616",
"0.5099046",
"0.50945735",
"0.50900227",
"0.5082316",
"0.5082316",
"0.5077379",
"0.505801",
"0.5057991",
"0.50555605",
"0.50524056",
"0.50481474",
"0.50480324",
"0.50468075",
"0.5046412",
"0.50434965",
"0.5038662",
"0.503643",
"0.5029013",
"0.50274116",
"0.50236416",
"0.5023445",
"0.5017092",
"0.50078213",
"0.49991798",
"0.49977162",
"0.4996381",
"0.49730483",
"0.49658477",
"0.496181",
"0.49548334",
"0.49285877",
"0.4925838",
"0.4920352",
"0.4896176",
"0.48828945",
"0.48783025",
"0.48701972",
"0.48685795",
"0.48639762",
"0.4862713",
"0.48625156",
"0.48582953"
] | 0.8217321 | 0 |
getOrCreateBaseInstance lazily creates a new base instance, assigning if allocateCIDR is true | func (bil *baseInstanceList) getOrCreateBaseInstance(key *meta.Key) *baseInstance {
bil.lock.Lock()
defer bil.lock.Unlock()
inst, found := bil.instances[*key]
if !found {
inst = &baseInstance{name: key.Name, zone: key.Zone}
if bil.allocateCIDR {
nextRange, _ := bil.cidrSet.AllocateNext()
inst.aliasRange = nextRange.String()
}
bil.instances[*key] = inst
}
return inst
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func newBaseInstanceList(allocateCIDR bool, clusterCIDR *net.IPNet, subnetMaskSize int) *baseInstanceList {\n\tcidrSet, _ := cidrset.NewCIDRSet(clusterCIDR, subnetMaskSize)\n\treturn &baseInstanceList{\n\t\tallocateCIDR: allocateCIDR,\n\t\tclusterCIDR: clusterCIDR,\n\t\tsubnetMaskSize: subnetMaskSize,\n\t\tcidrSet: cidrSet,\n\t\tinstances: make(map[meta.Key]*baseInstance),\n\t}\n}",
"func newBase() *base {\n\treturn &base{shared.NewUUID(), time.Now().UTC(), time.Now().UTC(), false/*, shared.NewUUID()*/}\n}",
"func newProcBase(name, bin, serviceAddr string, loggers []Logger) *procBase {\n\tlog.Infof(\"%s has addr %s\", name, serviceAddr)\n\treturn &procBase{\n\t\tname: name,\n\t\tbin: bin,\n\t\tserviceAddr: serviceAddr,\n\t\tloggers: loggers,\n\t}\n}",
"func (instance *Network) Create(ctx context.Context, req abstract.NetworkRequest) (xerr fail.Error) {\n\tdefer fail.OnPanic(&xerr)\n\n\tif instance == nil || instance.IsNull() {\n\t\treturn fail.InvalidInstanceError()\n\t}\n\tif ctx == nil {\n\t\treturn fail.InvalidParameterCannotBeNilError(\"ctx\")\n\t}\n\n\ttask, xerr := concurrency.TaskFromContext(ctx)\n\txerr = debug.InjectPlannedFail(xerr)\n\tif xerr != nil {\n\t\treturn xerr\n\t}\n\n\tif task.Aborted() {\n\t\treturn fail.AbortedError(nil, \"aborted\")\n\t}\n\n\ttracer := debug.NewTracer(task, true, \"('%s', '%s')\", req.Name, req.CIDR).WithStopwatch().Entering()\n\tdefer tracer.Exiting()\n\n\tinstance.lock.Lock()\n\tdefer instance.lock.Unlock()\n\n\t// Check if subnet already exists and is managed by SafeScale\n\tsvc := instance.GetService()\n\tif existing, xerr := LoadNetwork(svc, req.Name); xerr == nil {\n\t\texisting.Released()\n\t\treturn fail.DuplicateError(\"Network '%s' already exists\", req.Name)\n\t}\n\n\tif task.Aborted() {\n\t\treturn fail.AbortedError(nil, \"aborted\")\n\t}\n\n\t// Verify if the subnet already exist and in this case is not managed by SafeScale\n\t_, xerr = svc.InspectNetworkByName(req.Name)\n\txerr = debug.InjectPlannedFail(xerr)\n\tif xerr != nil {\n\t\tswitch xerr.(type) {\n\t\tcase *fail.ErrNotFound:\n\t\t\t// continue\n\t\tdefault:\n\t\t\treturn xerr\n\t\t}\n\t} else {\n\t\treturn fail.DuplicateError(\"Network '%s' already exists (not managed by SafeScale)\", req.Name)\n\t}\n\n\t// Verify the CIDR is not routable\n\tif req.CIDR != \"\" {\n\t\troutable, xerr := netretry.IsCIDRRoutable(req.CIDR)\n\t\txerr = debug.InjectPlannedFail(xerr)\n\t\tif xerr != nil {\n\t\t\treturn fail.Wrap(xerr, \"failed to determine if CIDR is not routable\")\n\t\t}\n\n\t\tif routable {\n\t\t\treturn fail.InvalidRequestError(\"cannot create such a Networking, CIDR must not be routable; please choose an appropriate CIDR (RFC1918)\")\n\t\t}\n\t}\n\n\tif task.Aborted() {\n\t\treturn fail.AbortedError(nil, \"aborted\")\n\t}\n\n\t// Create the Network\n\tlogrus.Debugf(\"Creating Network '%s' with CIDR '%s'...\", req.Name, req.CIDR)\n\tan, xerr := svc.CreateNetwork(req)\n\txerr = debug.InjectPlannedFail(xerr)\n\tif xerr != nil {\n\t\treturn xerr\n\t}\n\n\tdefer func() {\n\t\tif xerr != nil && !req.KeepOnFailure {\n\t\t\tderr := svc.DeleteNetwork(an.ID)\n\t\t\tderr = debug.InjectPlannedFail(derr)\n\t\t\tif derr != nil {\n\t\t\t\t_ = xerr.AddConsequence(fail.Wrap(derr, \"cleaning up on failure, failed to delete Network\"))\n\t\t\t}\n\t\t}\n\t}()\n\n\tif task.Aborted() {\n\t\treturn fail.AbortedError(nil, \"aborted\")\n\t}\n\n\t// Write subnet object metadata\n\t// logrus.Debugf(\"Saving subnet metadata '%s' ...\", subnet.GetName)\n\treturn instance.carry(an)\n}",
"func (r *Reconciler) create() error {\n\tif err := validateMachine(*r.machine); err != nil {\n\t\treturn fmt.Errorf(\"%v: failed validating machine provider spec: %w\", r.machine.GetName(), err)\n\t}\n\n\tif ipam.HasStaticIPConfiguration(r.providerSpec) {\n\t\tif !r.staticIPFeatureGateEnabled {\n\t\t\treturn fmt.Errorf(\"%v: static IP/IPAM configuration is only available with the VSphereStaticIPs feature gate\", r.machine.GetName())\n\t\t}\n\n\t\toutstandingClaims, err := ipam.HasOutstandingIPAddressClaims(\n\t\t\tr.Context,\n\t\t\tr.client,\n\t\t\tr.machine,\n\t\t\tr.providerSpec.Network.Devices,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcondition := metav1.Condition{\n\t\t\tType: string(machinev1.IPAddressClaimedCondition),\n\t\t\tReason: machinev1.WaitingForIPAddressReason,\n\t\t\tMessage: \"All IP address claims are bound\",\n\t\t\tStatus: metav1.ConditionFalse,\n\t\t}\n\t\tif outstandingClaims > 0 {\n\t\t\tcondition.Message = fmt.Sprintf(\"Waiting on %d IP address claims to be bound\", outstandingClaims)\n\t\t\tcondition.Status = metav1.ConditionTrue\n\t\t\tklog.Infof(\"Waiting for IPAddressClaims associated with machine %s to be bound\", r.machine.Name)\n\t\t}\n\t\tif err := setProviderStatus(\"\", condition, r.machineScope, nil); err != nil {\n\t\t\treturn fmt.Errorf(\"could not set provider status: %w\", err)\n\t\t}\n\t}\n\n\t// We only clone the VM template if we have no taskRef.\n\tif r.providerStatus.TaskRef == \"\" {\n\t\tif !r.machineScope.session.IsVC() {\n\t\t\treturn fmt.Errorf(\"%v: not connected to a vCenter\", r.machine.GetName())\n\t\t}\n\t\tklog.Infof(\"%v: cloning\", r.machine.GetName())\n\t\ttask, err := clone(r.machineScope)\n\t\tif err != nil {\n\t\t\tmetrics.RegisterFailedInstanceCreate(&metrics.MachineLabels{\n\t\t\t\tName: r.machine.Name,\n\t\t\t\tNamespace: r.machine.Namespace,\n\t\t\t\tReason: \"Clone task finished with error\",\n\t\t\t})\n\t\t\tconditionFailed := conditionFailed()\n\t\t\tconditionFailed.Message = err.Error()\n\t\t\tstatusError := setProviderStatus(task, conditionFailed, r.machineScope, nil)\n\t\t\tif statusError != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to set provider status: %w\", err)\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\treturn setProviderStatus(task, conditionSuccess(), r.machineScope, nil)\n\t}\n\n\tmoTask, err := r.session.GetTask(r.Context, r.providerStatus.TaskRef)\n\tif err != nil {\n\t\tmetrics.RegisterFailedInstanceCreate(&metrics.MachineLabels{\n\t\t\tName: r.machine.Name,\n\t\t\tNamespace: r.machine.Namespace,\n\t\t\tReason: \"GetTask finished with error\",\n\t\t})\n\t\treturn err\n\t}\n\n\tif moTask == nil {\n\t\t// Possible eventual consistency problem from vsphere\n\t\t// TODO: change error message here to indicate this might be expected.\n\t\treturn fmt.Errorf(\"unexpected moTask nil\")\n\t}\n\n\tif taskIsFinished, err := taskIsFinished(moTask); err != nil {\n\t\tif taskIsFinished {\n\t\t\tmetrics.RegisterFailedInstanceCreate(&metrics.MachineLabels{\n\t\t\t\tName: r.machine.Name,\n\t\t\t\tNamespace: r.machine.Namespace,\n\t\t\t\tReason: \"Task finished with error\",\n\t\t\t})\n\t\t\tconditionFailed := conditionFailed()\n\t\t\tconditionFailed.Message = err.Error()\n\t\t\tstatusError := setProviderStatus(moTask.Reference().Value, conditionFailed, r.machineScope, nil)\n\t\t\tif statusError != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to set provider status: %w\", statusError)\n\t\t\t}\n\t\t\treturn machinecontroller.CreateMachine(err.Error())\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"failed to check task status: %w\", err)\n\t\t}\n\t} else if !taskIsFinished {\n\t\treturn fmt.Errorf(\"%v task %v has not finished\", moTask.Info.DescriptionId, moTask.Reference().Value)\n\t}\n\n\t// if clone task finished successfully, power on the vm\n\tif moTask.Info.DescriptionId == cloneVmTaskDescriptionId {\n\t\tklog.Infof(\"Powering on cloned machine: %v\", r.machine.Name)\n\t\ttask, err := powerOn(r.machineScope)\n\t\tif err != nil {\n\t\t\tmetrics.RegisterFailedInstanceCreate(&metrics.MachineLabels{\n\t\t\t\tName: r.machine.Name,\n\t\t\t\tNamespace: r.machine.Namespace,\n\t\t\t\tReason: \"PowerOn task finished with error\",\n\t\t\t})\n\t\t\tconditionFailed := conditionFailed()\n\t\t\tconditionFailed.Message = err.Error()\n\t\t\tstatusError := setProviderStatus(task, conditionFailed, r.machineScope, nil)\n\t\t\tif statusError != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to set provider status: %w\", err)\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\treturn setProviderStatus(task, conditionSuccess(), r.machineScope, nil)\n\t}\n\n\t// If taskIsFinished then next reconcile should result in update.\n\treturn nil\n}",
"func appNumOnUNetBaseCreate(baseID uuid.UUID) *types.Bitmap {\n\tif appNumOnUNetBaseGet(baseID) == nil {\n\t\tlog.Functionf(\"appNumOnUNetBaseCreate (%s)\", baseID.String())\n\t\tappNumBase[baseID.String()] = new(types.Bitmap)\n\t}\n\treturn appNumOnUNetBaseGet(baseID)\n}",
"func newInstance(moduleName, name string, priv interface{}) (*BaseInstance, error) {\n\tm, found := modules[moduleName]\n\tif !found {\n\t\treturn nil, fmt.Errorf(\"No such module: %s\", moduleName)\n\t}\n\n\tif _, exists := m.instance[name]; exists {\n\t\treturn nil, fmt.Errorf(\"%s already exists in %s\", name, moduleName)\n\t}\n\n\tbi := &BaseInstance{name: name, module: m, subinstance: false}\n\n\tringName := fmt.Sprintf(\"input-%s\", name)\n\tbi.input = dpdk.RingCreate(ringName, m.ringParam.Count, m.ringParam.SocketId, dpdk.RING_F_SC_DEQ)\n\tif bi.input == nil {\n\t\treturn nil, fmt.Errorf(\"Input ring creation faild for %s.\\n\", name)\n\t}\n\n\tif m.ringParam.SecondaryInput {\n\t\tringName := fmt.Sprintf(\"input2-%s\", name)\n\t\tbi.input2 = dpdk.RingCreate(ringName, m.ringParam.Count, m.ringParam.SocketId, dpdk.RING_F_SC_DEQ)\n\t\tif bi.input2 == nil {\n\t\t\treturn nil, fmt.Errorf(\"Second input ring creation failed for %s\", name)\n\t\t}\n\t}\n\n\tbi.rules = newRules()\n\n\tif m.moduleType == TypeInterface || m.moduleType == TypeRIF {\n\t\tbi.counter = NewCounter()\n\t}\n\n\tinstance, err := m.factory(bi, priv)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Creating module '%s' with name '%s' failed: %v\\n\", moduleName, name, err)\n\t}\n\tbi.instance = instance\n\n\t// Set rule observer, if the module complies to RulesNotify.\n\tif rn, ok := instance.(RulesNotify); ok {\n\t\tbi.rules.setRulesNotify(rn)\n\t}\n\n\tm.instance[name] = bi\n\n\treturn bi, nil\n}",
"func newBaseRuntime(erp *ECALRuntimeProvider, node *parser.ASTNode) *baseRuntime {\n\tinstanceCounter++\n\treturn &baseRuntime{fmt.Sprint(instanceCounter), erp, node, false}\n}",
"func newInstance(moduleName, name string, priv interface{}) (*BaseInstance, error) {\n\tfactory, found := instanceFactories[moduleName]\n\tif !found {\n\t\treturn nil, fmt.Errorf(\"Module '%s' doesn't exist.\\n\", moduleName)\n\t}\n\n\trp, ok := ringParams[moduleName]\n\tif !ok {\n\t\trp = defaultRingParam\n\t}\n\n\tbi := &BaseInstance{name: name}\n\n\tringName := fmt.Sprintf(\"input-%s\", name)\n\tbi.input = dpdk.RingCreate(ringName, rp.Count, rp.SocketId, dpdk.RING_F_SC_DEQ)\n\tif bi.input == nil {\n\t\treturn nil, fmt.Errorf(\"Input ring creation faild for %s.\\n\", name)\n\t}\n\n\tif rp.SecondaryInput {\n\t\tringName := fmt.Sprintf(\"input2-%s\", name)\n\t\tbi.input2 = dpdk.RingCreate(ringName, rp.Count, rp.SocketId, dpdk.RING_F_SC_DEQ)\n\t\tif bi.input2 == nil {\n\t\t\treturn nil, fmt.Errorf(\"Second input ring creation failed for %s\", name)\n\t\t}\n\t}\n\n\tbi.rules = newRules()\n\n\tinstance, err := factory(bi, priv)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Creating module '%s' with name '%s' failed: %v\\n\", moduleName, name, err)\n\t}\n\tbi.instance = instance\n\n\treturn bi, nil\n}",
"func NewBase(opt Opts) Dialer {\n\trv := &base{\n\t\tnetDialer: net.Dialer{\n\t\t\tTimeout: opt.GetTimeout(),\n\t\t\tControl: reuseport.Control,\n\t\t},\n\t\ttlsConfigs: cache.New(TLSConfigCacheSize,\n\t\t\tTLSConfigTTL,\n\t\t\tcache.NoopEvictCallback),\n\t\ttlsSkipVerify: opt.GetTLSSkipVerify(),\n\t}\n\n\treturn rv\n}",
"func MakeBase(name, key, owner string, defaultValue interface{}, lifetime Lifetime, expose bool) Base {\n\treturn Base{\n\t\tname: name,\n\t\tkey: key,\n\t\towner: owner,\n\t\tdefaultValue: defaultValue,\n\t\tlifetime: lifetime,\n\t\texpose: expose,\n\t}\n}",
"func NewBasePool() BasePool {\n\treturn BasePool{\n\t\tlastTuneTs: *atomicutil.NewTime(time.Now()),\n\t}\n}",
"func newCache(nbClient libovsdbclient.Client) (*LBCache, error) {\n\t// first, list all load balancers\n\tlbs, err := listLBs(nbClient)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := LBCache{}\n\tc.existing = make(map[string]*CachedLB, len(lbs))\n\n\tfor i := range lbs {\n\t\tc.existing[lbs[i].UUID] = &lbs[i]\n\t}\n\n\tps := func(item *nbdb.LogicalSwitch) bool {\n\t\treturn len(item.LoadBalancer) > 0\n\t}\n\tswitches, err := libovsdbops.FindLogicalSwitchesWithPredicate(nbClient, ps)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, ls := range switches {\n\t\tfor _, lbuuid := range ls.LoadBalancer {\n\t\t\tif lb, ok := c.existing[lbuuid]; ok {\n\t\t\t\tlb.Switches.Insert(ls.Name)\n\t\t\t}\n\t\t}\n\t}\n\n\tpr := func(item *nbdb.LogicalRouter) bool {\n\t\treturn len(item.LoadBalancer) > 0\n\t}\n\trouters, err := libovsdbops.FindLogicalRoutersWithPredicate(nbClient, pr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, router := range routers {\n\t\tfor _, lbuuid := range router.LoadBalancer {\n\t\t\tif lb, ok := c.existing[lbuuid]; ok {\n\t\t\t\tlb.Routers.Insert(router.Name)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Get non-empty LB groups\n\tpg := func(item *nbdb.LoadBalancerGroup) bool {\n\t\treturn len(item.LoadBalancer) > 0\n\t}\n\tgroups, err := libovsdbops.FindLoadBalancerGroupsWithPredicate(nbClient, pg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, group := range groups {\n\t\tfor _, lbuuid := range group.LoadBalancer {\n\t\t\tif lb, ok := c.existing[lbuuid]; ok {\n\t\t\t\tlb.Groups.Insert(group.Name)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &c, nil\n}",
"func newPrimary() *proxy {\n\tvar (\n\t\tp = &proxy{}\n\t\ttracker = mock.NewStatsTracker()\n\t\tsmap = newSmap()\n\t)\n\n\tp.owner.smap = newSmapOwner(cmn.GCO.Get())\n\tp.si = meta.NewSnode(\"primary\", apc.Proxy, meta.NetInfo{}, meta.NetInfo{}, meta.NetInfo{})\n\n\tsmap.addProxy(p.si)\n\tsmap.Primary = p.si\n\tp.owner.smap.put(smap)\n\n\tconfig := cmn.GCO.BeginUpdate()\n\tconfig.ConfigDir = \"/tmp/ais-tests\"\n\tconfig.Periodic.RetrySyncTime = cos.Duration(time.Millisecond * 100)\n\tconfig.Keepalive.Proxy.Name = \"heartbeat\"\n\tconfig.Keepalive.Proxy.Interval = cos.Duration(3 * time.Second)\n\tconfig.Timeout.CplaneOperation = cos.Duration(2 * time.Second)\n\tconfig.Timeout.MaxKeepalive = cos.Duration(4 * time.Second)\n\tconfig.Client.Timeout = cos.Duration(10 * time.Second)\n\tconfig.Client.TimeoutLong = cos.Duration(10 * time.Second)\n\tconfig.Cksum.Type = cos.ChecksumXXHash\n\tcmn.GCO.CommitUpdate(config)\n\tcmn.GCO.SetInitialGconfPath(\"/tmp/ais-tests/ais.config\")\n\n\tp.client.data = &http.Client{}\n\tp.client.control = &http.Client{}\n\tp.keepalive = newPalive(p, tracker, atomic.NewBool(true))\n\n\to := newBMDOwnerPrx(config)\n\to.put(newBucketMD())\n\tp.owner.bmd = o\n\n\te := newEtlMDOwnerPrx(config)\n\te.put(newEtlMD())\n\tp.owner.etl = e\n\n\tp.gmm = memsys.PageMM()\n\treturn p\n}",
"func newInstances(pod *Pod, prov provider.DataCenter, cfg *config.Instances) (*instances, error) {\n\tlog.Debug(\"Initializing Instances\")\n\n\ti := &instances{\n\t\tResources: resource.NewResources(),\n\t\tpod: pod,\n\t\tinstances: map[string]resource.Instance{},\n\t}\n\n\t// The reference to the network resource.\n\tnet := pod.Cluster().Compute().DataCenter().Network()\n\n\t// The availability zones available to these instances.\n\tavailabilityZones := net.AvailabilityZones()\n\n\t// The subnet group associated with these instances.\n\tsubnetGroup := net.SubnetGroups().Find(pod.SubnetGroup())\n\tif subnetGroup == nil {\n\t\treturn nil, fmt.Errorf(\"Cannot find subnet group %s configured for pod %s\", pod.SubnetGroup(), pod.Name())\n\t}\n\n\t// The keypair to be used with these instances.\n\tkeypair := pod.Cluster().Compute().KeyPair()\n\n\tn := 0\n\tfor _, conf := range *cfg {\n\t\t// Ensure the instance is uniquely named.\n\t\tif i.Find(conf.Name()) != nil {\n\t\t\treturn nil, fmt.Errorf(\"Instance name %q must be unique but is used multiple times\", conf.Name())\n\t\t}\n\n\t\t// The availability zone for this instance. Chosing via round robin. Always starting at 0.\n\t\taz := availabilityZones[n%len(availabilityZones)]\n\n\t\t// Get the subnet associated with the AZ.\n\t\tsubnetName := pod.SubnetGroup() + \"-\" + az\n\t\tsubnet := subnetGroup.Find(subnetName)\n\t\tif subnet == nil {\n\t\t\treturn nil, fmt.Errorf(\"Cannot find subnet %s configured for instance %s\", subnetName, conf.Name())\n\t\t}\n\n\t\tinstance, err := newInstance(pod, subnet, keypair, prov, conf)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ti.instances[instance.Name()] = instance\n\t\ti.Append(instance)\n\n\t\tn++\n\t}\n\treturn i, nil\n}",
"func (b *BridgeNetworkDriver) Create(name string, subnet string) (*Network, error) {\n\t// 取到网段字符串中的网关ip地址和网络的ip段\n\tip, IPRange, _ := net.ParseCIDR(subnet)\n\tIPRange.IP = ip\n\n\tn := &Network{\n\t\tName: name,\n\t\tIPRange: IPRange,\n\t\tDriver: b.Name(),\n\t}\n\n\terr := b.initBridge(n)\n\treturn n, err\n}",
"func (p *pool) AllocateBlock(ctx context.Context, nodeName, requestUID string) (*coilv2.AddressBlock, error) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\tnextIndex, ok := p.allocated.NextClear(0)\n\tif !ok {\n\t\tnextIndex = p.allocated.Len()\n\t}\n\n\tap := &coilv2.AddressPool{}\n\terr := p.client.Get(ctx, client.ObjectKey{Name: p.name}, ap)\n\tif err != nil {\n\t\tp.log.Error(err, \"failed to get AddressPool\")\n\t\treturn nil, err\n\t}\n\tif ap.DeletionTimestamp != nil {\n\t\tp.log.Info(\"unable to curve out a block because pool is under deletion\")\n\t\treturn nil, ErrNoBlock\n\t}\n\n\tvar currentIndex uint\n\tfor _, ss := range ap.Spec.Subnets {\n\t\tvar ones, bits int\n\t\tif ss.IPv4 != nil {\n\t\t\t_, n, _ := net.ParseCIDR(*ss.IPv4) // ss was validated\n\t\t\tones, bits = n.Mask.Size()\n\t\t} else {\n\t\t\t_, n, _ := net.ParseCIDR(*ss.IPv6) // ss was validated\n\t\t\tones, bits = n.Mask.Size()\n\t\t}\n\t\tsize := uint(1) << (bits - ones - int(ap.Spec.BlockSizeBits))\n\t\tif nextIndex >= (currentIndex + size) {\n\t\t\tcurrentIndex += size\n\t\t\tcontinue\n\t\t}\n\n\t\tipv4, ipv6 := ss.GetBlock(nextIndex-currentIndex, int(ap.Spec.BlockSizeBits))\n\n\t\tr := &coilv2.AddressBlock{}\n\t\tr.Name = fmt.Sprintf(\"%s-%d\", p.name, nextIndex)\n\t\tif err := controllerutil.SetControllerReference(ap, r, p.scheme); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tr.Labels = map[string]string{\n\t\t\tconstants.LabelPool: p.name,\n\t\t\tconstants.LabelNode: nodeName,\n\t\t\tconstants.LabelRequest: requestUID,\n\t\t}\n\t\tcontrollerutil.AddFinalizer(r, constants.FinCoil)\n\t\tr.Index = int32(nextIndex)\n\t\tif ipv4 != nil {\n\t\t\ts := ipv4.String()\n\t\t\tr.IPv4 = &s\n\t\t}\n\t\tif ipv6 != nil {\n\t\t\ts := ipv6.String()\n\t\t\tr.IPv6 = &s\n\t\t}\n\t\tif err := p.client.Create(ctx, r); err != nil {\n\t\t\tp.log.Error(err, \"failed to create AddressBlock\", \"index\", nextIndex, \"node\", nodeName)\n\t\t\treturn nil, err\n\t\t}\n\n\t\tp.log.Info(\"created AddressBlock\", \"index\", nextIndex, \"node\", nodeName)\n\t\tp.allocated.Set(nextIndex)\n\t\tp.allocatedBlocks.Inc()\n\t\treturn r, nil\n\t}\n\n\tp.log.Error(ErrNoBlock, \"no available blocks\")\n\treturn nil, ErrNoBlock\n}",
"func TestAllocOnInit(t *testing.T) {\n\tinitDone := make(chan struct{})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, func() {\n\t\tclose(initDone)\n\t})\n\n\tpolicy := slim_core_v1.IPFamilyPolicySingleStack\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilyPolicy: &policy,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: slim_core_v1.ServiceStatus{\n\t\t\t\tLoadBalancer: slim_core_v1.LoadBalancerStatus{\n\t\t\t\t\tIngress: []slim_core_v1.LoadBalancerIngress{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP: \"10.0.10.123\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-b\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceBUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerIP: \"10.0.10.124\",\n\t\t\t},\n\t\t\tStatus: slim_core_v1.ServiceStatus{\n\t\t\t\tLoadBalancer: slim_core_v1.LoadBalancerStatus{\n\t\t\t\t\tIngress: []slim_core_v1.LoadBalancerIngress{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP: \"10.0.10.124\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tt.Error(\"No service updates expected\")\n\n\t\treturn false\n\t}, 100*time.Millisecond)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\t<-initDone\n\n\tawait.Block()\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(\"10.0.10.123\")) {\n\t\tt.Fatal(\"Expected the imported IP to be allocated\")\n\t}\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(\"10.0.10.124\")) {\n\t\tt.Fatal(\"Expected the imported IP to be allocated\")\n\t}\n}",
"func (na *cnmNetworkAllocator) allocateVIP(vip *api.Endpoint_VirtualIP) error {\n\tvar opts map[string]string\n\tlocalNet := na.getNetwork(vip.NetworkID)\n\tif localNet == nil {\n\t\treturn errors.New(\"networkallocator: could not find local network state\")\n\t}\n\n\tif localNet.isNodeLocal {\n\t\treturn nil\n\t}\n\n\t// If this IP is already allocated in memory we don't need to\n\t// do anything.\n\tif _, ok := localNet.endpoints[vip.Addr]; ok {\n\t\treturn nil\n\t}\n\n\tipam, _, _, err := na.resolveIPAM(localNet.nw)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to resolve IPAM while allocating\")\n\t}\n\n\tvar addr net.IP\n\tif vip.Addr != \"\" {\n\t\tvar err error\n\n\t\taddr, _, err = net.ParseCIDR(vip.Addr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif localNet.nw.IPAM != nil && localNet.nw.IPAM.Driver != nil {\n\t\t// set ipam allocation method to serial\n\t\topts = setIPAMSerialAlloc(localNet.nw.IPAM.Driver.Options)\n\t}\n\n\tfor _, poolID := range localNet.pools {\n\t\tip, _, err := ipam.RequestAddress(poolID, addr, opts)\n\t\tif err != nil && err != ipamapi.ErrNoAvailableIPs && err != ipamapi.ErrIPOutOfRange {\n\t\t\treturn errors.Wrap(err, \"could not allocate VIP from IPAM\")\n\t\t}\n\n\t\t// If we got an address then we are done.\n\t\tif err == nil {\n\t\t\tipStr := ip.String()\n\t\t\tlocalNet.endpoints[ipStr] = poolID\n\t\t\tvip.Addr = ipStr\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn errors.New(\"could not find an available IP while allocating VIP\")\n}",
"func (b *BaseImpl) New(n Base) Base {\n\treturn n\n}",
"func lookupOrAllocateIPv4(\n\tctx *zedrouterContext,\n\tstatus *types.NetworkInstanceStatus,\n\tmac net.HardwareAddr) (string, error) {\n\n\tlog.Infof(\"lookupOrAllocateIPv4(%s-%s): mac:%s\\n\",\n\t\tstatus.DisplayName, status.Key(), mac.String())\n\t// Lookup to see if it exists\n\tif ip, ok := status.IPAssignments[mac.String()]; ok {\n\t\tlog.Infof(\"found Ip addr ( %s) for mac(%s)\\n\",\n\t\t\tip.String(), mac.String())\n\t\treturn ip.String(), nil\n\t}\n\n\tlog.Infof(\"bridgeName %s Subnet %v range %v-%v\\n\",\n\t\tstatus.BridgeName, status.Subnet,\n\t\tstatus.DhcpRange.Start, status.DhcpRange.End)\n\n\tif status.DhcpRange.Start == nil {\n\t\tif status.Type == types.NetworkInstanceTypeSwitch {\n\t\t\tlog.Infof(\"%s-%s switch means no bridgeIpAddr\",\n\t\t\t\tstatus.DisplayName, status.Key())\n\t\t\treturn \"\", nil\n\t\t}\n\t\tlog.Fatalf(\"%s-%s: nil DhcpRange.Start\",\n\t\t\tstatus.DisplayName, status.Key())\n\t}\n\n\t// Starting guess based on number allocated\n\tallocated := uint(len(status.IPAssignments))\n\ta := addToIP(status.DhcpRange.Start, allocated)\n\tfor status.DhcpRange.End == nil ||\n\t\tbytes.Compare(a, status.DhcpRange.End) < 0 {\n\n\t\tlog.Infof(\"lookupOrAllocateIPv4(%s) testing %s\\n\",\n\t\t\tmac.String(), a.String())\n\t\tif status.IsIpAssigned(a) {\n\t\t\ta = addToIP(a, 1)\n\t\t\tcontinue\n\t\t}\n\t\tlog.Infof(\"lookupOrAllocateIPv4(%s) found free %s\\n\",\n\t\t\tmac.String(), a.String())\n\t\tstatus.IPAssignments[mac.String()] = a\n\t\t// Publish the allocation\n\t\tpublishNetworkInstanceStatus(ctx, status)\n\t\treturn a.String(), nil\n\t}\n\terrStr := fmt.Sprintf(\"lookupOrAllocateIPv4(%s) no free address in DhcpRange\",\n\t\tstatus.Key())\n\treturn \"\", errors.New(errStr)\n}",
"func createSingleHostNetworking(ctx context.Context, svc iaas.Service, singleHostRequest abstract.HostRequest) (_ resources.Subnet, _ func() fail.Error, ferr fail.Error) {\n\t// Build network name\n\tcfg, xerr := svc.GetConfigurationOptions(ctx)\n\tif xerr != nil {\n\t\treturn nil, nil, xerr\n\t}\n\n\tbucketName := cfg.GetString(\"MetadataBucketName\")\n\tif bucketName == \"\" {\n\t\treturn nil, nil, fail.InconsistentError(\"missing service configuration option 'MetadataBucketName'\")\n\t}\n\n\t// Trim and TrimPrefix don't do the same thing\n\tnetworkName := fmt.Sprintf(\"sfnet-%s\", strings.TrimPrefix(bucketName, objectstorage.BucketNamePrefix+\"-\"))\n\n\t// Create network if needed\n\tnetworkInstance, xerr := LoadNetwork(ctx, svc, networkName)\n\tif xerr != nil {\n\t\tswitch xerr.(type) {\n\t\tcase *fail.ErrNotFound:\n\t\t\tnetworkInstance, xerr = NewNetwork(svc)\n\t\t\tif xerr != nil {\n\t\t\t\treturn nil, nil, xerr\n\t\t\t}\n\n\t\t\trequest := abstract.NetworkRequest{\n\t\t\t\tName: networkName,\n\t\t\t\tCIDR: abstract.SingleHostNetworkCIDR,\n\t\t\t\tKeepOnFailure: true,\n\t\t\t}\n\t\t\txerr = networkInstance.Create(ctx, &request, nil)\n\t\t\tif xerr != nil {\n\t\t\t\t// handle a particular case of *fail.ErrDuplicate...\n\t\t\t\tswitch cerr := xerr.(type) {\n\t\t\t\tcase *fail.ErrDuplicate:\n\t\t\t\t\tvalue, found := cerr.Annotation(\"managed\")\n\t\t\t\t\tif found && value != nil {\n\t\t\t\t\t\tmanaged, ok := value.(bool)\n\t\t\t\t\t\tif ok && !managed {\n\t\t\t\t\t\t\treturn nil, nil, xerr\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t\t// ... otherwise, try to get Network that is created by another goroutine\n\t\t\t\tswitch xerr.(type) {\n\t\t\t\tcase *fail.ErrDuplicate, *fail.ErrNotAvailable:\n\t\t\t\t\t// If these errors occurred, another goroutine is running to create the same Network, so wait for it\n\t\t\t\t\tnetworkInstance, xerr = LoadNetwork(ctx, svc, networkName)\n\t\t\t\t\tif xerr != nil {\n\t\t\t\t\t\treturn nil, nil, xerr\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\treturn nil, nil, xerr\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, nil, xerr\n\t\t}\n\t}\n\n\tnid, err := networkInstance.GetID()\n\tif err != nil {\n\t\treturn nil, nil, fail.ConvertError(err)\n\t}\n\n\t// Check if Subnet exists\n\tvar (\n\t\tsubnetRequest abstract.SubnetRequest\n\t\tcidrIndex uint\n\t)\n\tsubnetInstance, xerr := LoadSubnet(ctx, svc, nid, singleHostRequest.ResourceName)\n\tif xerr != nil {\n\t\tswitch xerr.(type) {\n\t\tcase *fail.ErrNotFound:\n\t\t\tsubnetInstance, xerr = NewSubnet(svc)\n\t\t\tif xerr != nil {\n\t\t\t\treturn nil, nil, xerr\n\t\t\t}\n\n\t\t\tvar (\n\t\t\t\tsubnetCIDR string\n\t\t\t)\n\n\t\t\tsubnetCIDR, cidrIndex, xerr = ReserveCIDRForSingleHost(ctx, networkInstance)\n\t\t\txerr = debug.InjectPlannedFail(xerr)\n\t\t\tif xerr != nil {\n\t\t\t\treturn nil, nil, xerr\n\t\t\t}\n\n\t\t\tvar dnsServers []string\n\t\t\topts, xerr := svc.GetConfigurationOptions(ctx)\n\t\t\txerr = debug.InjectPlannedFail(xerr)\n\t\t\tif xerr != nil {\n\t\t\t\tswitch xerr.(type) {\n\t\t\t\tcase *fail.ErrNotFound:\n\t\t\t\tdefault:\n\t\t\t\t\treturn nil, nil, xerr\n\t\t\t\t}\n\t\t\t} else if servers := strings.TrimSpace(opts.GetString(\"DNSServers\")); servers != \"\" {\n\t\t\t\tdnsServers = strings.Split(servers, \",\")\n\t\t\t}\n\n\t\t\tsubnetRequest.Name = singleHostRequest.ResourceName\n\t\t\tsubnetRequest.NetworkID, err = networkInstance.GetID()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, fail.ConvertError(err)\n\t\t\t}\n\t\t\tsubnetRequest.IPVersion = ipversion.IPv4\n\t\t\tsubnetRequest.CIDR = subnetCIDR\n\t\t\tsubnetRequest.DNSServers = dnsServers\n\t\t\tsubnetRequest.HA = false\n\n\t\t\txerr = subnetInstance.CreateSubnetWithoutGateway(ctx, subnetRequest)\n\t\t\txerr = debug.InjectPlannedFail(xerr)\n\t\t\tif xerr != nil {\n\t\t\t\treturn nil, nil, xerr\n\t\t\t}\n\n\t\t\tdefer func() {\n\t\t\t\tferr = debug.InjectPlannedFail(ferr)\n\t\t\t\tif ferr != nil && !singleHostRequest.KeepOnFailure {\n\t\t\t\t\tderr := subnetInstance.Delete(cleanupContextFrom(ctx))\n\t\t\t\t\tif derr != nil {\n\t\t\t\t\t\t_ = ferr.AddConsequence(\n\t\t\t\t\t\t\tfail.Wrap(\n\t\t\t\t\t\t\t\tderr, \"cleaning up on failure, failed to delete Subnet '%s'\",\n\t\t\t\t\t\t\t\tsingleHostRequest.ResourceName,\n\t\t\t\t\t\t\t),\n\t\t\t\t\t\t)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\t// Sets the CIDR index in instance metadata\n\t\t\txerr = subnetInstance.Alter(ctx, func(clonable data.Clonable, _ *serialize.JSONProperties) fail.Error {\n\t\t\t\tas, ok := clonable.(*abstract.Subnet)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn fail.InconsistentError(\n\t\t\t\t\t\t\"'*abstract.Subnet' expected, '%s' provided\", reflect.TypeOf(clonable).String(),\n\t\t\t\t\t)\n\t\t\t\t}\n\n\t\t\t\tas.SingleHostCIDRIndex = cidrIndex\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\tif xerr != nil {\n\t\t\t\treturn nil, nil, xerr\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, nil, xerr\n\t\t}\n\t} else {\n\t\treturn nil, nil, fail.DuplicateError(\"there is already a Subnet named '%s'\", singleHostRequest.ResourceName)\n\t}\n\n\tundoFunc := func() fail.Error {\n\t\tvar errs []error\n\t\tif !singleHostRequest.KeepOnFailure {\n\t\t\tderr := subnetInstance.Delete(cleanupContextFrom(ctx))\n\t\t\tif derr != nil {\n\t\t\t\terrs = append(\n\t\t\t\t\terrs, fail.Wrap(\n\t\t\t\t\t\tderr, \"cleaning up on failure, failed to delete Subnet '%s'\", singleHostRequest.ResourceName,\n\t\t\t\t\t),\n\t\t\t\t)\n\t\t\t}\n\t\t\tderr = FreeCIDRForSingleHost(cleanupContextFrom(ctx), networkInstance, cidrIndex)\n\t\t\tif derr != nil {\n\t\t\t\terrs = append(\n\t\t\t\t\terrs, fail.Wrap(\n\t\t\t\t\t\tderr, \"cleaning up on failure, failed to free CIDR slot in Network '%s'\",\n\t\t\t\t\t\tnetworkInstance.GetName(),\n\t\t\t\t\t),\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\t\tif len(errs) > 0 {\n\t\t\treturn fail.NewErrorList(errs)\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn subnetInstance, undoFunc, nil\n}",
"func newSubInstance(parent *BaseInstance, name string) *BaseInstance {\n\tbi := &BaseInstance{\n\t\tname: name,\n\t\tinput: parent.input,\n\t\tinput2: parent.input2,\n\t\trules: newRules(),\n\t\tenabled: false,\n\t\tinstance: parent.instance,\n\t}\n\treturn bi\n}",
"func (s *Service) CreateOrUpdate(ctx context.Context, spec azure.Spec) error {\n\tnicSpec, ok := spec.(*Spec)\n\tif !ok {\n\t\treturn errors.New(\"invalid network interface specification\")\n\t}\n\n\tnicConfig := &network.InterfaceIPConfigurationPropertiesFormat{}\n\n\tsubnetInterface, err := subnets.NewService(s.Scope).Get(ctx, &subnets.Spec{Name: nicSpec.SubnetName, VnetName: nicSpec.VnetName})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsubnet, ok := subnetInterface.(network.Subnet)\n\tif !ok {\n\t\treturn errors.New(\"subnet get returned invalid network interface\")\n\t}\n\n\tnicConfig.Subnet = &network.Subnet{ID: subnet.ID}\n\tnicConfig.PrivateIPAllocationMethod = network.Dynamic\n\tif nicSpec.StaticIPAddress != \"\" {\n\t\tnicConfig.PrivateIPAllocationMethod = network.Static\n\t\tnicConfig.PrivateIPAddress = to.StringPtr(nicSpec.StaticIPAddress)\n\t}\n\n\tbackendAddressPools := []network.BackendAddressPool{}\n\tif nicSpec.PublicLoadBalancerName != \"\" {\n\t\tlbInterface, lberr := publicloadbalancers.NewService(s.Scope).Get(ctx, &publicloadbalancers.Spec{Name: nicSpec.PublicLoadBalancerName})\n\t\tif lberr != nil {\n\t\t\treturn lberr\n\t\t}\n\n\t\tlb, ok := lbInterface.(network.LoadBalancer)\n\t\tif !ok {\n\t\t\treturn errors.New(\"public load balancer get returned invalid network interface\")\n\t\t}\n\n\t\tbackendAddressPools = append(backendAddressPools,\n\t\t\tnetwork.BackendAddressPool{\n\t\t\t\tID: (*lb.BackendAddressPools)[0].ID,\n\t\t\t})\n\t\tnicConfig.LoadBalancerInboundNatRules = &[]network.InboundNatRule{\n\t\t\t{\n\t\t\t\tID: (*lb.InboundNatRules)[nicSpec.NatRule].ID,\n\t\t\t},\n\t\t}\n\t}\n\tif nicSpec.InternalLoadBalancerName != \"\" {\n\t\tinternallbInterface, ilberr := internalloadbalancers.NewService(s.Scope).Get(ctx, &internalloadbalancers.Spec{Name: nicSpec.InternalLoadBalancerName})\n\t\tif ilberr != nil {\n\t\t\treturn ilberr\n\t\t}\n\n\t\tinternallb, ok := internallbInterface.(network.LoadBalancer)\n\t\tif !ok {\n\t\t\treturn errors.New(\"internal load balancer get returned invalid network interface\")\n\t\t}\n\t\tbackendAddressPools = append(backendAddressPools,\n\t\t\tnetwork.BackendAddressPool{\n\t\t\t\tID: (*internallb.BackendAddressPools)[0].ID,\n\t\t\t})\n\t}\n\tnicConfig.LoadBalancerBackendAddressPools = &backendAddressPools\n\n\tf, err := s.Client.CreateOrUpdate(ctx,\n\t\ts.Scope.ClusterConfig.ResourceGroup,\n\t\tnicSpec.Name,\n\t\tnetwork.Interface{\n\t\t\tLocation: to.StringPtr(s.Scope.ClusterConfig.Location),\n\t\t\tInterfacePropertiesFormat: &network.InterfacePropertiesFormat{\n\t\t\t\tIPConfigurations: &[]network.InterfaceIPConfiguration{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: to.StringPtr(\"pipConfig\"),\n\t\t\t\t\t\tInterfaceIPConfigurationPropertiesFormat: nicConfig,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to create network interface %s in resource group %s\", nicSpec.Name, s.Scope.ClusterConfig.ResourceGroup)\n\t}\n\n\terr = f.WaitForCompletionRef(ctx, s.Client.Client)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"cannot create, future response\")\n\t}\n\n\t_, err = f.Result(s.Client)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"result error\")\n\t}\n\tklog.V(2).Infof(\"successfully created network interface %s\", nicSpec.Name)\n\treturn err\n}",
"func (s *Pool) ReserveForInstance(insId uint64) (*GroupInstance, error) {\n\tgins, exists := s.getActive(insId)\n\tif !exists {\n\t\treturn nil, fmt.Errorf(\"instance %d not found\", insId)\n\t}\n\n\tif IN_DEPLOYMENT_MIGRATION {\n\t\treturn gins, nil\n\t} else {\n\t\treturn s.ReserveForGroup(gins.group, gins.idx)\n\t}\n}",
"func (d *V8interceptor) Base() *BaseRefCounted {\n\treturn (*BaseRefCounted)(&d.base)\n}",
"func TestReallocOnInit(t *testing.T) {\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, nil)\n\n\t// Initially request only an IPv4\n\tpolicy := slim_core_v1.IPFamilyPolicySingleStack\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilyPolicy: &policy,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: slim_core_v1.ServiceStatus{\n\t\t\t\tLoadBalancer: slim_core_v1.LoadBalancerStatus{\n\t\t\t\t\tIngress: []slim_core_v1.LoadBalancerIngress{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP: \"192.168.1.12\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() == nil {\n\t\t\tt.Error(\"Expected service to receive a IPv4 address\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.LoadBalancer.Ingress[0].IP == \"192.168.1.12\" {\n\t\t\tt.Error(\"Expected ingress IP to not be the initial, bad IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one condition\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\tt.Error(\"Expected second condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionTrue {\n\t\t\tt.Error(\"Expected second condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service to be updated\")\n\t}\n}",
"func NewIdentityProviderBase()(*IdentityProviderBase) {\n m := &IdentityProviderBase{\n Entity: *NewEntity(),\n }\n return m\n}",
"func initPool() {\n\tpool = &sync.Pool{\n\t\tNew: func() interface{} {\n\t\t\tfmt.Println(\"Returning new A\")\n\t\t\treturn new(A)\n\t\t},\n\t}\n}",
"func (hd *Datapath) CreateNatPool(np *netproto.NatPool, vrf *netproto.Vrf) error {\n\t// This will ensure that only one datapath config will be active at a time. This is a temporary restriction\n\t// to ensure that HAL will use a single config thread , this will be removed prior to FCS to allow parallel configs to go through.\n\t// TODO Remove Global Locking\n\thd.Lock()\n\tdefer hd.Unlock()\n\tvrfKey := &halproto.VrfKeyHandle{\n\t\tKeyOrHandle: &halproto.VrfKeyHandle_VrfId{\n\t\t\tVrfId: vrf.Status.VrfID,\n\t\t},\n\t}\n\n\tipRange := strings.Split(np.Spec.IPRange, \"-\")\n\tif len(ipRange) != 2 {\n\t\treturn fmt.Errorf(\"could not parse IP Range from the NAT Pool IPRange. {%v}\", np.Spec.IPRange)\n\t}\n\n\tstartIP := net.ParseIP(strings.TrimSpace(ipRange[0]))\n\tif len(startIP) == 0 {\n\t\treturn fmt.Errorf(\"could not parse IP from {%v}\", startIP)\n\t}\n\tendIP := net.ParseIP(strings.TrimSpace(ipRange[1]))\n\tif len(endIP) == 0 {\n\t\treturn fmt.Errorf(\"could not parse IP from {%v}\", endIP)\n\t}\n\n\tlowIP := halproto.IPAddress{\n\t\tIpAf: halproto.IPAddressFamily_IP_AF_INET,\n\t\tV4OrV6: &halproto.IPAddress_V4Addr{\n\t\t\tV4Addr: ipv4Touint32(startIP),\n\t\t},\n\t}\n\n\thighIP := halproto.IPAddress{\n\t\tIpAf: halproto.IPAddressFamily_IP_AF_INET,\n\t\tV4OrV6: &halproto.IPAddress_V4Addr{\n\t\t\tV4Addr: ipv4Touint32(endIP),\n\t\t},\n\t}\n\n\taddrRange := &halproto.Address_Range{\n\t\tRange: &halproto.AddressRange{\n\t\t\tRange: &halproto.AddressRange_Ipv4Range{\n\t\t\t\tIpv4Range: &halproto.IPRange{\n\t\t\t\t\tLowIpaddr: &lowIP,\n\t\t\t\t\tHighIpaddr: &highIP,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tnatPoolReqMsg := &halproto.NatPoolRequestMsg{\n\t\tRequest: []*halproto.NatPoolSpec{\n\t\t\t{\n\t\t\t\tKeyOrHandle: &halproto.NatPoolKeyHandle{\n\t\t\t\t\tKeyOrHandle: &halproto.NatPoolKeyHandle_PoolKey{\n\t\t\t\t\t\tPoolKey: &halproto.NatPoolKey{\n\t\t\t\t\t\t\tVrfKh: vrfKey,\n\t\t\t\t\t\t\tPoolId: np.Status.NatPoolID,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tAddress: []*halproto.Address{\n\t\t\t\t\t{\n\t\t\t\t\t\tAddress: addrRange,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tif hd.Kind == \"hal\" {\n\t\tresp, err := hd.Hal.Natclient.NatPoolCreate(context.Background(), natPoolReqMsg)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Error creating nat pool. Err: %v\", err)\n\t\t\treturn err\n\t\t}\n\t\tif !(resp.Response[0].ApiStatus == halproto.ApiStatus_API_STATUS_OK || resp.Response[0].ApiStatus == halproto.ApiStatus_API_STATUS_EXISTS_ALREADY) {\n\t\t\tlog.Errorf(\"HAL returned non OK status. %v\", resp.Response[0].ApiStatus.String())\n\t\t\treturn fmt.Errorf(\"HAL returned non OK status. %v\", resp.Response[0].ApiStatus.String())\n\t\t}\n\t} else {\n\t\t_, err := hd.Hal.Natclient.NatPoolCreate(context.Background(), natPoolReqMsg)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Error creating nat pool. Err: %v\", err)\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (v *Global) tryApplyBase(base *Global) bool {\n\tif !v.addressBase.IsEmpty() {\n\t\treturn false\n\t}\n\n\tif !base.IsSelfScope() {\n\t\tswitch base.GetScope() {\n\t\tcase LocalDomainMember, GlobalDomainMember:\n\t\t\tbreak\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t}\n\tv.addressBase = base.addressLocal\n\treturn true\n}",
"func newCache(size int, withECS, optimistic bool) (c *cache) {\n\tc = &cache{\n\t\titemsLock: &sync.RWMutex{},\n\t\titemsWithSubnetLock: &sync.RWMutex{},\n\t\titems: createCache(size),\n\t\toptimistic: optimistic,\n\t}\n\n\tif withECS {\n\t\tc.itemsWithSubnet = createCache(size)\n\t}\n\n\treturn c\n}",
"func newPeerBase(origCfg *Config, inbound bool) *Peer {\n\t// Default to the max supported protocol version if not specified by the\n\t// caller.\n\tcfg := *origCfg // Copy to avoid mutating caller.\n\tif cfg.ProtocolVersion == 0 {\n\t\tcfg.ProtocolVersion = MaxProtocolVersion\n\t}\n\n\t// Set the chain parameters to testnet if the caller did not specify any.\n\tif cfg.ChainParams == nil {\n\t\tcfg.ChainParams = &chaincfg.TestNet3Params\n\t}\n\n\t// Set the trickle interval if a non-positive value is specified.\n\tif cfg.TrickleInterval <= 0 {\n\t\tcfg.TrickleInterval = DefaultTrickleInterval\n\t}\n\n\tp := Peer{\n\t\tinbound: inbound,\n\t\twireEncoding: wire.BaseEncoding,\n\t\tknownInventory: lru.NewCache(maxKnownInventory),\n\t\tstallControl: make(chan stallControlMsg, 1), // nonblocking sync\n\t\toutputQueue: make(chan outMsg, outputBufferSize),\n\t\tsendQueue: make(chan outMsg, 1), // nonblocking sync\n\t\tsendDoneQueue: make(chan struct{}, 1), // nonblocking sync\n\t\toutputInvChan: make(chan *wire.InvVect, outputBufferSize),\n\t\tinQuit: make(chan struct{}),\n\t\tqueueQuit: make(chan struct{}),\n\t\toutQuit: make(chan struct{}),\n\t\tquit: make(chan struct{}),\n\t\tcfg: cfg, // Copy so caller can't mutate.\n\t\tservices: cfg.Services,\n\t\tprotocolVersion: cfg.ProtocolVersion,\n\t}\n\treturn &p\n}",
"func (p *connPool) new() (*conn, error) {\n\tif p.rl.Limit() {\n\t\terr := fmt.Errorf(\n\t\t\t\"redis: you open connections too fast (last_error=%q)\",\n\t\t\tp.loadLastErr(),\n\t\t)\n\t\treturn nil, err\n\t}\n\n\tcn, err := p.dialer()\n\tif err != nil {\n\t\tp.storeLastErr(err.Error())\n\t\treturn nil, err\n\t}\n\n\treturn cn, nil\n}",
"func (s *Service) CreateOrUpdate(ctx context.Context, spec azure.Spec) error {\n\tinternalLBSpec, ok := spec.(*Spec)\n\tif !ok {\n\t\treturn errors.New(\"invalid internal load balancer specification\")\n\t}\n\tklog.V(2).Infof(\"creating internal load balancer %s\", internalLBSpec.Name)\n\tprobeName := \"tcpHTTPSProbe\"\n\tfrontEndIPConfigName := \"controlplane-internal-lbFrontEnd\"\n\tbackEndAddressPoolName := \"controlplane-internal-backEndPool\"\n\tidPrefix := fmt.Sprintf(\"/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers\", s.Scope.SubscriptionID, s.Scope.ClusterConfig.ResourceGroup)\n\tlbName := internalLBSpec.Name\n\n\tklog.V(2).Infof(\"getting subnet %s\", internalLBSpec.SubnetName)\n\tsubnetInterface, err := subnets.NewService(s.Scope).Get(ctx, &subnets.Spec{Name: internalLBSpec.SubnetName, VnetName: internalLBSpec.VnetName})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsubnet, ok := subnetInterface.(network.Subnet)\n\tif !ok {\n\t\treturn errors.New(\"subnet Get returned invalid interface\")\n\t}\n\tklog.V(2).Infof(\"successfully got subnet %s\", internalLBSpec.SubnetName)\n\n\tfuture, err := s.Client.CreateOrUpdate(ctx,\n\t\ts.Scope.ClusterConfig.ResourceGroup,\n\t\tlbName,\n\t\tnetwork.LoadBalancer{\n\t\t\tSku: &network.LoadBalancerSku{Name: network.LoadBalancerSkuNameStandard},\n\t\t\tLocation: to.StringPtr(s.Scope.ClusterConfig.Location),\n\t\t\tLoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{\n\t\t\t\tFrontendIPConfigurations: &[]network.FrontendIPConfiguration{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: &frontEndIPConfigName,\n\t\t\t\t\t\tFrontendIPConfigurationPropertiesFormat: &network.FrontendIPConfigurationPropertiesFormat{\n\t\t\t\t\t\t\tPrivateIPAllocationMethod: network.Static,\n\t\t\t\t\t\t\tSubnet: &subnet,\n\t\t\t\t\t\t\tPrivateIPAddress: to.StringPtr(internalLBSpec.IPAddress),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tBackendAddressPools: &[]network.BackendAddressPool{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: &backEndAddressPoolName,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tProbes: &[]network.Probe{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: &probeName,\n\t\t\t\t\t\tProbePropertiesFormat: &network.ProbePropertiesFormat{\n\t\t\t\t\t\t\tProtocol: network.ProbeProtocolTCP,\n\t\t\t\t\t\t\tPort: to.Int32Ptr(6443),\n\t\t\t\t\t\t\tIntervalInSeconds: to.Int32Ptr(15),\n\t\t\t\t\t\t\tNumberOfProbes: to.Int32Ptr(4),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tLoadBalancingRules: &[]network.LoadBalancingRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: to.StringPtr(\"LBRuleHTTPS\"),\n\t\t\t\t\t\tLoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{\n\t\t\t\t\t\t\tProtocol: network.TransportProtocolTCP,\n\t\t\t\t\t\t\tFrontendPort: to.Int32Ptr(6443),\n\t\t\t\t\t\t\tBackendPort: to.Int32Ptr(6443),\n\t\t\t\t\t\t\tIdleTimeoutInMinutes: to.Int32Ptr(4),\n\t\t\t\t\t\t\tEnableFloatingIP: to.BoolPtr(false),\n\t\t\t\t\t\t\tLoadDistribution: network.LoadDistributionDefault,\n\t\t\t\t\t\t\tFrontendIPConfiguration: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/frontendIPConfigurations/%s\", idPrefix, lbName, frontEndIPConfigName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tBackendAddressPool: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/backendAddressPools/%s\", idPrefix, lbName, backEndAddressPoolName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tProbe: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/probes/%s\", idPrefix, lbName, probeName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"cannot create load balancer\")\n\t}\n\n\terr = future.WaitForCompletionRef(ctx, s.Client.Client)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"cannot get internal load balancer create or update future response\")\n\t}\n\n\t_, err = future.Result(s.Client)\n\tklog.V(2).Infof(\"successfully created internal load balancer %s\", internalLBSpec.Name)\n\treturn err\n}",
"func newSubInstance(parent *BaseInstance, name string) *BaseInstance {\n\tsi := parent.module.subinstance\n\tif si == nil {\n\t\tsi = make(map[string][]*BaseInstance)\n\t\tparent.module.subinstance = si\n\t}\n\n\tbi := &BaseInstance{\n\t\tname: name,\n\t\tinput: parent.input,\n\t\tinput2: parent.input2,\n\t\trules: newRules(),\n\t\tenabled: false,\n\t\tsubinstance: true,\n\t\tinstance: parent.instance,\n\t\tmodule: parent.module,\n\t}\n\n\tsi[parent.name] = append(si[parent.name], bi)\n\treturn bi\n}",
"func (d *ResourceHandler) Base() *BaseRefCounted {\n\treturn (*BaseRefCounted)(&d.base)\n}",
"func New(ringWeight int) LoadBalancer {\n\t// TODO: Implement this!\n\tnewLB := new(loadBalancer)\n\tnewLB.sortedNames = make([]MMENode, 0)\n\tnewLB.weight = ringWeight\n\tnewLB.hashRing = NewRing()\n\tif 7 == 2 {\n\t\tfmt.Println(ringWeight)\n\t}\n\treturn newLB\n}",
"func NewPooledWrapper(ctx context.Context, base wrapping.Wrapper) (*PooledWrapper, error) {\n\tbaseKeyId, err := base.KeyId(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// For safety, no real reason this should happen\n\tif baseKeyId == BaseEncryptor {\n\t\treturn nil, fmt.Errorf(\"base wrapper cannot have a key ID of built-in base encryptor\")\n\t}\n\n\tret := &PooledWrapper{\n\t\twrappers: make(map[string]wrapping.Wrapper, 3),\n\t}\n\tret.wrappers[BaseEncryptor] = base\n\tret.wrappers[baseKeyId] = base\n\treturn ret, nil\n}",
"func newCockroachDBFromConfig(ctx context.Context, instanceConfig *config.InstanceConfig) (*pgxpool.Pool, error) {\n\tsingletonPoolMutex.Lock()\n\tdefer singletonPoolMutex.Unlock()\n\n\tif singletonPool != nil {\n\t\treturn singletonPool, nil\n\t}\n\n\tcfg, err := pgxpool.ParseConfig(instanceConfig.DataStoreConfig.ConnectionString)\n\tif err != nil {\n\t\treturn nil, skerr.Wrapf(err, \"Failed to parse database config: %q\", instanceConfig.DataStoreConfig.ConnectionString)\n\t}\n\n\tsklog.Infof(\"%#v\", *cfg)\n\tcfg.MaxConns = maxPoolConnections\n\tcfg.ConnConfig.Logger = pgxLogAdaptor{}\n\tsingletonPool, err = pgxpool.ConnectConfig(ctx, cfg)\n\tif err != nil {\n\t\treturn nil, skerr.Wrap(err)\n\t}\n\n\t// Confirm the database has the right schema.\n\texpectedSchema, err := expectedschema.Load()\n\tif err != nil {\n\t\treturn nil, skerr.Wrap(err)\n\t}\n\n\tactual, err := schema.GetDescription(singletonPool, sql.Tables{})\n\tif err != nil {\n\t\treturn nil, skerr.Wrap(err)\n\t}\n\tif diff := assertdeep.Diff(expectedSchema, *actual); diff != \"\" {\n\t\treturn nil, skerr.Fmt(\"Schema needs to be updated: %s.\", diff)\n\t}\n\n\treturn singletonPool, err\n}",
"func InitRoundRobin(name string, endpoints []string) {\n\tif len(lb) == 0 {\n\t\tlb = make(map[string]*roundrobin.Balancer)\n\t}\n\n\tlb[name] = roundrobin.New(endpoints)\n}",
"func (face *FaceBase) InitFaceBase(id FaceId, sizeofPriv int, socket dpdk.NumaSocket) error {\n\tface.id = id\n\n\tif socket == dpdk.NUMA_SOCKET_ANY {\n\t\tif lc := dpdk.GetCurrentLCore(); lc.IsValid() {\n\t\t\tsocket = lc.GetNumaSocket()\n\t\t} else {\n\t\t\tsocket = 0\n\t\t}\n\t}\n\n\tfaceC := face.getPtr()\n\t*faceC = C.Face{}\n\tfaceC.id = C.FaceId(face.id)\n\tfaceC.state = C.FACESTA_UP\n\tfaceC.numaSocket = C.int(socket)\n\n\tsizeofImpl := int(C.sizeof_FaceImpl) + sizeofPriv\n\tfaceC.impl = (*C.FaceImpl)(dpdk.ZmallocAligned(\"FaceImpl\", sizeofImpl, 1, socket))\n\n\treturn nil\n\n}",
"func TestCNContainer_Base(t *testing.T) {\n\tassert := assert.New(t)\n\n\tcn, err := cnTestInit()\n\trequire.Nil(t, err)\n\n\tdockerPlugin, err := startDockerPlugin(t)\n\trequire.Nil(t, err)\n\n\t//From YAML on instance init\n\t//Two VNICs on the same tenant subnet\n\tmac, _ := net.ParseMAC(\"CA:FE:00:01:02:03\")\n\tmac2, _ := net.ParseMAC(\"CA:FE:00:02:02:03\")\n\t_, tnet, _ := net.ParseCIDR(\"192.168.111.0/24\")\n\ttip := net.ParseIP(\"192.168.111.100\")\n\ttip2 := net.ParseIP(\"192.168.111.102\")\n\tcip := net.ParseIP(\"192.168.200.200\")\n\n\tvnicCfg := &VnicConfig{\n\t\tVnicRole: TenantContainer,\n\t\tVnicIP: tip,\n\t\tConcIP: cip,\n\t\tVnicMAC: mac,\n\t\tSubnet: *tnet,\n\t\tSubnetKey: 0xF,\n\t\tVnicID: \"vuuid\",\n\t\tInstanceID: \"iuuid\",\n\t\tTenantID: \"tuuid\",\n\t\tSubnetID: \"suuid\",\n\t\tConcID: \"cnciuuid\",\n\t}\n\n\tvnicCfg2 := &VnicConfig{\n\t\tVnicRole: TenantContainer,\n\t\tVnicIP: tip2,\n\t\tConcIP: cip,\n\t\tVnicMAC: mac2,\n\t\tSubnet: *tnet,\n\t\tSubnetKey: 0xF,\n\t\tVnicID: \"vuuid2\",\n\t\tInstanceID: \"iuuid2\",\n\t\tTenantID: \"tuuid\",\n\t\tSubnetID: \"suuid\",\n\t\tConcID: \"cnciuuid\",\n\t}\n\n\tvar subnetID, iface string //Used to check that they match\n\n\t// Create a VNIC: Should create bridge and tunnels\n\tif vnic, ssntpEvent, cInfo, err := cn.CreateVnic(vnicCfg); err != nil {\n\t\tt.Error(err)\n\t} else {\n\t\t// expected SSNTP Event\n\t\tif assert.NotNil(ssntpEvent) {\n\t\t\tassert.Equal(ssntpEvent.Event, SsntpTunAdd)\n\t\t}\n\t\t// expected Container Event\n\t\tif assert.NotNil(cInfo) {\n\t\t\tassert.Equal(cInfo.CNContainerEvent, ContainerNetworkAdd)\n\t\t\tassert.NotEqual(cInfo.SubnetID, \"\")\n\t\t\tassert.NotEqual(cInfo.Subnet.String(), \"\")\n\t\t\tassert.NotEqual(cInfo.Gateway.String(), \"\")\n\t\t\tassert.NotEqual(cInfo.Bridge, \"\")\n\t\t}\n\t\tassert.Nil(validSsntpEvent(ssntpEvent, vnicCfg))\n\n\t\t//Cache the first subnet ID we see. All subsequent should have the same\n\t\tsubnetID = cInfo.SubnetID\n\t\tiface = vnic.InterfaceName()\n\t\tassert.NotEqual(iface, \"\")\n\n\t\t//Launcher will attach to this name and send out the event\n\t\t//Launcher will also create the logical docker network\n\t\tdebugPrint(t, \"VNIC created =\", vnic.LinkName, ssntpEvent, cInfo)\n\t\tassert.Nil(linkDump(t))\n\n\t\t//Now kick off the docker commands\n\t\tassert.Nil(dockerNetCreate(cInfo.Subnet, cInfo.Gateway, cInfo.Bridge, cInfo.SubnetID))\n\t\tassert.Nil(dockerNetInfo(cInfo.SubnetID))\n\t\tassert.Nil(dockerRunVerify(vnicCfg.VnicIP.String(), vnicCfg.VnicIP, vnicCfg.VnicMAC, cInfo.SubnetID))\n\t\tassert.Nil(dockerContainerDelete(vnicCfg.VnicIP.String()))\n\t}\n\n\t//Duplicate VNIC creation\n\tif vnic, ssntpEvent, cInfo, err := cn.CreateVnic(vnicCfg); err != nil {\n\t\tt.Error(err)\n\t} else {\n\t\tassert.Nil(ssntpEvent, \"ERROR: DUP unexpected event\")\n\t\tif assert.NotNil(cInfo) {\n\t\t\tassert.Equal(cInfo.SubnetID, subnetID)\n\t\t\tassert.Equal(cInfo.CNContainerEvent, ContainerNetworkInfo)\n\t\t\tassert.Equal(iface, vnic.InterfaceName())\n\t\t}\n\t}\n\n\t//Second VNIC creation - Should succeed\n\tif vnic, ssntpEvent, cInfo, err := cn.CreateVnic(vnicCfg2); err != nil {\n\t\tt.Error(err)\n\t} else {\n\t\tassert.Nil(ssntpEvent)\n\t\tif assert.NotNil(cInfo) {\n\t\t\tassert.Equal(cInfo.SubnetID, subnetID)\n\t\t\tassert.Equal(cInfo.CNContainerEvent, ContainerNetworkInfo)\n\t\t}\n\t\tiface = vnic.InterfaceName()\n\t\tassert.NotEqual(iface, \"\")\n\t\tassert.Nil(dockerRunVerify(vnicCfg2.VnicIP.String(), vnicCfg2.VnicIP,\n\t\t\tvnicCfg2.VnicMAC, cInfo.SubnetID))\n\t\tassert.Nil(dockerContainerDelete(vnicCfg2.VnicIP.String()))\n\t}\n\n\t//Duplicate VNIC creation\n\tif vnic, ssntpEvent, cInfo, err := cn.CreateVnic(vnicCfg2); err != nil {\n\t\tt.Error(err)\n\t} else {\n\t\tassert.Nil(ssntpEvent)\n\t\tif assert.NotNil(cInfo) {\n\t\t\tassert.Equal(cInfo.SubnetID, subnetID)\n\t\t\tassert.Equal(cInfo.CNContainerEvent, ContainerNetworkInfo)\n\t\t\tassert.Equal(iface, vnic.InterfaceName())\n\t\t}\n\t}\n\n\t//Destroy the first one\n\tif ssntpEvent, cInfo, err := cn.DestroyVnic(vnicCfg); err != nil {\n\t\tt.Error(err)\n\t} else {\n\t\tassert.Nil(ssntpEvent)\n\t\tassert.Nil(cInfo)\n\t}\n\n\t//Destroy it again\n\tif ssntpEvent, cInfo, err := cn.DestroyVnic(vnicCfg); err != nil {\n\t\tt.Error(err)\n\t} else {\n\t\tassert.Nil(ssntpEvent)\n\t\tassert.Nil(cInfo)\n\t}\n\n\t// Try and destroy - should work - cInfo should be reported\n\tif ssntpEvent, cInfo, err := cn.DestroyVnic(vnicCfg2); err != nil {\n\t\tt.Error(err)\n\t} else {\n\t\tassert.NotNil(ssntpEvent)\n\t\tif assert.NotNil(cInfo) {\n\t\t\tassert.Equal(cInfo.SubnetID, subnetID)\n\t\t\tassert.Equal(cInfo.CNContainerEvent, ContainerNetworkDel)\n\t\t}\n\t}\n\n\t//Has to be called after the VNIC has been deleted\n\tassert.Nil(dockerNetDelete(subnetID))\n\tassert.Nil(dockerNetList())\n\n\t//Destroy it again\n\tif ssntpEvent, cInfo, err := cn.DestroyVnic(vnicCfg2); err != nil {\n\t\tt.Error(err)\n\t} else {\n\t\tassert.Nil(ssntpEvent)\n\t\tassert.Nil(cInfo)\n\t}\n\n\tassert.Nil(stopDockerPlugin(dockerPlugin))\n}",
"func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\tgceNew, err := gce.New(\"\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn &ReconcileTargetPool{\n\t\tclient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\tgce: gceNew,\n\t\treconcileResult: reconcile.Result{\n\t\t\tRequeueAfter: time.Duration(5 * time.Second),\n\t\t},\n\t\tk8sObject: &computev1.TargetPool{},\n\t}\n}",
"func newPool(cfg *config) (*miningPool, error) {\n\tp := new(miningPool)\n\tp.cfg = cfg\n\tdcrdRPCCfg := &rpcclient.ConnConfig{\n\t\tHost: cfg.DcrdRPCHost,\n\t\tEndpoint: \"ws\",\n\t\tUser: cfg.RPCUser,\n\t\tPass: cfg.RPCPass,\n\t\tCertificates: cfg.dcrdRPCCerts,\n\t}\n\tp.ctx, p.cancel = context.WithCancel(context.Background())\n\tpowLimit := cfg.net.PowLimit\n\tpowLimitF, _ := new(big.Float).SetInt(powLimit).Float64()\n\titerations := math.Pow(2, 256-math.Floor(math.Log2(powLimitF)))\n\taddPort := func(ports map[string]uint32, key string, entry uint32) error {\n\t\tvar match bool\n\t\tvar miner string\n\t\tfor m, port := range ports {\n\t\t\tif port == entry {\n\t\t\t\tmatch = true\n\t\t\t\tminer = m\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif match {\n\t\t\treturn fmt.Errorf(\"%s and %s share port %d\", key, miner, entry)\n\t\t}\n\t\tports[key] = entry\n\t\treturn nil\n\t}\n\n\t// Ensure provided miner ports are unique.\n\tminerPorts := make(map[string]uint32)\n\t_ = addPort(minerPorts, pool.CPU, cfg.CPUPort)\n\terr := addPort(minerPorts, pool.InnosiliconD9, cfg.D9Port)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = addPort(minerPorts, pool.AntminerDR3, cfg.DR3Port)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = addPort(minerPorts, pool.AntminerDR5, cfg.DR5Port)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = addPort(minerPorts, pool.WhatsminerD1, cfg.D1Port)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = addPort(minerPorts, pool.ObeliskDCR1, cfg.DCR1Port)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdb, err := pool.InitDB(cfg.DBFile, cfg.SoloPool)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thcfg := &pool.HubConfig{\n\t\tDB: db,\n\t\tActiveNet: cfg.net.Params,\n\t\tPoolFee: cfg.PoolFee,\n\t\tMaxGenTime: cfg.MaxGenTime,\n\t\tPaymentMethod: cfg.PaymentMethod,\n\t\tLastNPeriod: cfg.LastNPeriod,\n\t\tWalletPass: cfg.WalletPass,\n\t\tPoolFeeAddrs: cfg.poolFeeAddrs,\n\t\tSoloPool: cfg.SoloPool,\n\t\tNonceIterations: iterations,\n\t\tMinerPorts: minerPorts,\n\t\tMaxConnectionsPerHost: cfg.MaxConnectionsPerHost,\n\t\tWalletAccount: cfg.WalletAccount,\n\t}\n\tp.hub, err = pool.NewHub(p.cancel, hcfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Establish a connection to the mining node.\n\tntfnHandlers := p.hub.CreateNotificationHandlers()\n\tnodeConn, err := rpcclient.New(dcrdRPCCfg, ntfnHandlers)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := nodeConn.NotifyWork(p.ctx); err != nil {\n\t\tnodeConn.Shutdown()\n\t\treturn nil, err\n\t}\n\tif err := nodeConn.NotifyBlocks(p.ctx); err != nil {\n\t\tnodeConn.Shutdown()\n\t\treturn nil, err\n\t}\n\n\tp.hub.SetNodeConnection(nodeConn)\n\n\t// Establish a connection to the wallet if the pool is mining as a\n\t// publicly available mining pool.\n\tif !cfg.SoloPool {\n\t\tcreds, err := credentials.\n\t\t\tNewClientTLSFromFile(cfg.WalletRPCCert, \"localhost\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tgrpc, err := grpc.Dial(cfg.WalletGRPCHost,\n\t\t\tgrpc.WithTransportCredentials(creds))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Perform a Balance request to check connectivity and account\n\t\t// existence.\n\t\twalletConn := walletrpc.NewWalletServiceClient(grpc)\n\t\treq := &walletrpc.BalanceRequest{\n\t\t\tAccountNumber: cfg.WalletAccount,\n\t\t\tRequiredConfirmations: 1,\n\t\t}\n\t\t_, err = walletConn.Balance(p.ctx, req)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tp.hub.SetWalletConnection(walletConn, grpc.Close)\n\n\t\tconfNotifs, err := walletConn.ConfirmationNotifications(p.ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tp.hub.SetTxConfNotifClient(confNotifs)\n\t}\n\n\terr = p.hub.FetchWork(p.ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = p.hub.Listen()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcsrfSecret, err := p.hub.CSRFSecret()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgcfg := &gui.Config{\n\t\tSoloPool: cfg.SoloPool,\n\t\tGUIDir: cfg.GUIDir,\n\t\tAdminPass: cfg.AdminPass,\n\t\tGUIPort: cfg.GUIPort,\n\t\tUseLEHTTPS: cfg.UseLEHTTPS,\n\t\tDomain: cfg.Domain,\n\t\tTLSCertFile: cfg.TLSCert,\n\t\tTLSKeyFile: cfg.TLSKey,\n\t\tActiveNet: cfg.net.Params,\n\t\tPaymentMethod: cfg.PaymentMethod,\n\t\tDesignation: cfg.Designation,\n\t\tPoolFee: cfg.PoolFee,\n\t\tCSRFSecret: csrfSecret,\n\t\tMinerPorts: minerPorts,\n\t\tWithinLimit: p.hub.WithinLimit,\n\t\tFetchLastWorkHeight: p.hub.FetchLastWorkHeight,\n\t\tFetchLastPaymentHeight: p.hub.FetchLastPaymentHeight,\n\t\tFetchMinedWork: p.hub.FetchMinedWork,\n\t\tFetchWorkQuotas: p.hub.FetchWorkQuotas,\n\t\tBackupDB: p.hub.BackupDB,\n\t\tFetchClients: p.hub.FetchClients,\n\t\tAccountExists: p.hub.AccountExists,\n\t\tFetchArchivedPayments: p.hub.FetchArchivedPayments,\n\t\tFetchPendingPayments: p.hub.FetchPendingPayments,\n\t\tFetchCacheChannel: p.hub.FetchCacheChannel,\n\t}\n\tp.gui, err = gui.NewGUI(gcfg)\n\tif err != nil {\n\t\tp.hub.CloseListeners()\n\t\treturn nil, err\n\t}\n\treturn p, nil\n}",
"func (m *MicroService) getLoadBalancedInstance() (*Instance, int, error) {\r\n\tinstCount := len(m.Instances)\r\n\tif instCount == 0 {\r\n\t\treturn nil, -1, ErrServiceNoInstance\r\n\t}\r\n\r\n\tif len(m.BlackList) == instCount {\r\n\t\treturn nil, -1, ErrAllInstancesDown\r\n\t}\r\n\r\n\tinstances := make([]*Instance, instCount)\r\n\tcopy(instances, m.Instances)\r\n\r\n\tvar idx int\r\n\tvar err error\r\n\tfor {\r\n\t\tswitch m.Strategy {\r\n\t\tcase RoundRobin:\r\n\t\t\tidx, err = getRoundRobinInstIdx(instances, m.LastUsedIdx.Get())\r\n\t\tcase LeastConnected:\r\n\t\t\tidx = getLeastConInstIdx(instances)\r\n\t\tcase Random:\r\n\t\t\tidx = getRandomInstIdx(instances)\r\n\t\tdefault:\r\n\t\t\treturn nil, -1, NewError(ErrInvalidStrategyCode, \"Unexpected strategy \" + string(m.Strategy))\r\n\t\t}\r\n\r\n\t\tif err != nil {\r\n\t\t\treturn nil, -1, err\r\n\t\t}\r\n\r\n\t\tif m.isBlacklisted(idx) {\r\n\t\t\tinstances[idx] = nil\r\n\t\t} else {\r\n\t\t\tm.LastUsedIdx.Set(idx)\r\n\t\t\treturn instances[idx], idx, nil\r\n\t\t}\r\n\t}\r\n}",
"func New(network, addr string, size int) (*Pool, error) {\n\treturn NewCustom(network, size, SingleAddrFunc(addr), redis.Dial)\n}",
"func bindBaseFactory(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := ParsedABI(K_BaseFactory)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil\n}",
"func newPool(addr string) (*pool, error) {\n\tp := pool{redis.Pool{\n\t\tMaxActive: 100,\n\t\tWait: true,\n\t\tMaxIdle: 10,\n\t\tIdleTimeout: 240 * time.Second,\n\t\tDial: func() (redis.Conn, error) { return redis.Dial(\"tcp\", addr) },\n\t}}\n\n\t// Test connection\n\tconn := p.Get()\n\tdefer conn.Close()\n\n\t_, err := conn.Do(\"PING\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &p, nil\n}",
"func (t *strideTable[T]) getOrCreateChild(addr uint8) *strideTable[T] {\n\tidx := hostIndex(addr)\n\tif t.entries[idx].child == nil {\n\t\tt.entries[idx].child = new(strideTable[T])\n\t\tt.refs++\n\t}\n\treturn t.entries[idx].child\n}",
"func (n NetworkTypeWiFi) construct() NetworkTypeClass { return &n }",
"func NewKeybase(validatorMoniker, mnemonic, password string) (keyring.Keyring, keyring.Info, error) {\n\tkr := keyring.NewInMemory()\n\thdpath := *hd.NewFundraiserParams(0, sdk.CoinType, 0)\n\tinfo, err := kr.NewAccount(validatorMoniker, mnemonic, password, hdpath.String(), hd.Secp256k1)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn kr, info, nil\n}",
"func newPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (*prSignedBaseLayer, error) {\n\tif baseLayerIdentity == nil {\n\t\treturn nil, InvalidPolicyFormatError(\"baseLayerIdentity not specified\")\n\t}\n\treturn &prSignedBaseLayer{\n\t\tprCommon: prCommon{Type: prTypeSignedBaseLayer},\n\t\tBaseLayerIdentity: baseLayerIdentity,\n\t}, nil\n}",
"func NewBase(path string, hashName string) (*Base, error) {\n\tfor _, p := range []string{\"blobs/\" + hashName, \"state\", \"tmp\"} {\n\t\tif err := os.MkdirAll(filepath.Join(path, p), 0755); err != nil && !os.IsExist(err) {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn &Base{Path: path, HashName: hashName, Hash: cryptomap.DetermineHash(hashName)}, nil\n}",
"func newBaseRunner(collector *resourceStatusCollector) *baseRunner {\n\treturn &baseRunner{\n\t\tcollector: collector,\n\t}\n}",
"func (p *Periph) LoadBASE(n int) uint32 {\n\treturn p.base[n].Load()\n}",
"func createPerNodePhysicalVIPs(isIPv6 bool, protocol v1.Protocol, sourcePort int32, targetIPs []string, targetPort int32) error {\n\tklog.V(5).Infof(\"Creating Node VIPs - %s, %d, [%v], %d\", protocol, sourcePort, targetIPs, targetPort)\n\t// Each gateway has a separate load-balancer for N/S traffic\n\tgatewayRouters, _, err := gateway.GetOvnGateways()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, gatewayRouter := range gatewayRouters {\n\t\tgatewayLB, err := gateway.GetGatewayLoadBalancer(gatewayRouter, protocol)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Gateway router %s does not have load balancer (%v)\",\n\t\t\t\tgatewayRouter, err)\n\t\t\tcontinue\n\t\t}\n\t\tphysicalIPs, err := gateway.GetGatewayPhysicalIPs(gatewayRouter)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Gateway router %s does not have physical ip (%v)\", gatewayRouter, err)\n\t\t\tcontinue\n\t\t}\n\t\t// Filter only phyiscal IPs of the same family\n\t\tphysicalIPs, err = util.MatchAllIPStringFamily(isIPv6, physicalIPs)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Failed to find node physical IPs, for gateway: %s, error: %v\", gatewayRouter, err)\n\t\t\treturn err\n\t\t}\n\n\t\t// If self ip is in target list, we need to use special IP to allow hairpin back to host\n\t\tnewTargets := util.UpdateIPsSlice(targetIPs, physicalIPs, []string{types.V4HostMasqueradeIP, types.V6HostMasqueradeIP})\n\n\t\terr = loadbalancer.CreateLoadBalancerVIPs(gatewayLB, physicalIPs, sourcePort, newTargets, targetPort)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Failed to create VIP in load balancer %s - %v\", gatewayLB, err)\n\t\t\treturn err\n\t\t}\n\n\t\tif config.Gateway.Mode == config.GatewayModeShared {\n\t\t\tworkerNode := util.GetWorkerFromGatewayRouter(gatewayRouter)\n\t\t\tworkerLB, err := loadbalancer.GetWorkerLoadBalancer(workerNode, protocol)\n\t\t\tif err != nil {\n\t\t\t\tklog.Errorf(\"Worker switch %s does not have load balancer (%v)\", workerNode, err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = loadbalancer.CreateLoadBalancerVIPs(workerLB, physicalIPs, sourcePort, targetIPs, targetPort)\n\t\t\tif err != nil {\n\t\t\t\tklog.Errorf(\"Failed to create VIP in load balancer %s - %v\", workerLB, err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func ensureNetwork(name string) error {\n\t// TODO: the network might already exist and not have ipv6 ... :|\n\t// discussion: https://github.com/kubernetes-sigs/kind/pull/1508#discussion_r414594198\n\texists, err := checkIfNetworkExists(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// network already exists, we're good\n\tif exists {\n\t\treturn nil\n\t}\n\n\t// Generate unique subnet per network based on the name\n\t// obtained from the ULA fc00::/8 range\n\t// Make N attempts with \"probing\" in case we happen to collide\n\tsubnet := generateULASubnetFromName(name, 0)\n\terr = createNetwork(name, subnet)\n\tif err == nil {\n\t\t// Success!\n\t\treturn nil\n\t}\n\n\t// On the first try check if ipv6 fails entirely on this machine\n\t// https://github.com/kubernetes-sigs/kind/issues/1544\n\t// Otherwise if it's not a pool overlap error, fail\n\t// If it is, make more attempts below\n\tif isIPv6UnavailableError(err) {\n\t\t// only one attempt, IPAM is automatic in ipv4 only\n\t\treturn createNetwork(name, \"\")\n\t} else if isPoolOverlapError(err) {\n\t\t// unknown error ...\n\t\treturn err\n\t}\n\n\t// keep trying for ipv6 subnets\n\tconst maxAttempts = 5\n\tfor attempt := int32(1); attempt < maxAttempts; attempt++ {\n\t\tsubnet := generateULASubnetFromName(name, attempt)\n\t\terr = createNetwork(name, subnet)\n\t\tif err == nil {\n\t\t\t// success!\n\t\t\treturn nil\n\t\t} else if !isPoolOverlapError(err) {\n\t\t\t// unknown error ...\n\t\t\treturn err\n\t\t}\n\t}\n\treturn errors.New(\"exhausted attempts trying to find a non-overlapping subnet\")\n}",
"func NewBase(name string) *Base {\n\treturn &Base{name}\n}",
"func (n NetworkTypeOther) construct() NetworkTypeClass { return &n }",
"func (bi *baseInstance) toGA() *ga.Instance {\n\tinst := &ga.Instance{Name: bi.name, Zone: bi.zone, NetworkInterfaces: []*ga.NetworkInterface{{}}}\n\tif bi.aliasRange != \"\" {\n\t\tinst.NetworkInterfaces[0].AliasIpRanges = []*ga.AliasIpRange{\n\t\t\t{IpCidrRange: bi.aliasRange, SubnetworkRangeName: util.TestSecondaryRangeName},\n\t\t}\n\t}\n\treturn inst\n}",
"func (e *GT) Base() *GT {\n\tif e.p == nil {\n\t\te.p = &gfP12{}\n\t}\n\te.p.Set(gfP12Gen)\n\treturn e\n}",
"func createDefaultVxlanIPPool(ctx context.Context, client client.Interface, cidr *cnet.IPNet, blockSize int, isNATOutgoingEnabled, checkVxlan bool) error {\n\tvar poolName string\n\tswitch cidr.Version() {\n\tcase 4:\n\t\tpoolName = defaultIpv4PoolName\n\tcase 6:\n\t\tpoolName = defaultIpv6PoolName\n\tdefault:\n\t\treturn fmt.Errorf(\"Unknown IP version for CIDR: %s\", cidr.String())\n\n\t}\n\tpool := &api.IPPool{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: poolName,\n\t\t},\n\t\tSpec: api.IPPoolSpec{\n\t\t\tCIDR: cidr.String(),\n\t\t\tBlockSize: blockSize,\n\t\t\tNATOutgoing: isNATOutgoingEnabled,\n\t\t\tIPIPMode: api.IPIPModeNever,\n\t\t\tVXLANMode: api.VXLANModeAlways,\n\t\t},\n\t}\n\n\tlog.Infof(\"Ensure default IPv%d pool (cidr %s, blockSize %d, nat %t, vxlanMode %s).\", cidr.Version(), cidr.String(), blockSize, isNATOutgoingEnabled, api.VXLANModeAlways)\n\n\tvar defaultPool *api.IPPool\n\tvar err error\n\tcreatePool := true\n\tif !checkVxlan {\n\t\t// Canal will always create a default ippool with vxlan disabled.\n\t\tdefaultPool, err = client.IPPools().Get(ctx, poolName, options.GetOptions{})\n\t\tif err == nil {\n\t\t\tif defaultPool.Spec.VXLANMode != api.VXLANModeAlways {\n\t\t\t\t// ippool is created by Canal. Delete it\n\t\t\t\t_, err := client.IPPools().Delete(ctx, poolName, options.DeleteOptions{})\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.WithError(err).Errorf(\"Failed to delete existing default IPv%d IP pool\", cidr.Version())\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// We have a default pool and vxlan mode is enabled.\n\t\t\t\tcreatePool = false\n\t\t\t}\n\t\t} else {\n\t\t\tif _, ok := err.(cerrors.ErrorResourceDoesNotExist); !ok {\n\t\t\t\tlog.WithError(err).Errorf(\"Failed to get default IPv%d pool for Canal\", cidr.Version())\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tlog.WithError(err).Warnf(\"Default IPv%d pool for Canal not exists\", cidr.Version())\n\t\t}\n\t}\n\n\tif createPool {\n\t\t// Create the pool.\n\t\t// Validate if pool already exists.\n\t\t_, err = client.IPPools().Create(ctx, pool, options.SetOptions{})\n\t\tif err == nil {\n\t\t\tlog.Infof(\"Created default IPv%d pool.\", cidr.Version())\n\t\t\treturn nil\n\t\t}\n\n\t\tif _, ok := err.(cerrors.ErrorResourceAlreadyExists); !ok {\n\t\t\tlog.WithError(err).Errorf(\"Failed to create default IPv%d pool (%s)\", cidr.Version(), cidr.String())\n\t\t\treturn err\n\t\t}\n\n\t\t// Default pool exists.\n\t\tdefaultPool, err = client.IPPools().Get(ctx, poolName, options.GetOptions{})\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Errorf(\"Failed to get existing default IPv%d IP pool\", cidr.Version())\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Check CIDR/blockSize/NATOutgoing for existing pool.\n\tif defaultPool.Spec.CIDR != cidr.String() ||\n\t\tdefaultPool.Spec.BlockSize != blockSize ||\n\t\tdefaultPool.Spec.NATOutgoing != isNATOutgoingEnabled ||\n\t\tdefaultPool.Spec.VXLANMode != api.VXLANModeAlways {\n\t\tmsg := fmt.Sprintf(\"current [cidr:%s, blocksize:%d, nat:%t, vxlanMode %s], expected [cidr:%s, blocksize:%d, nat:%t, vxlanMode %s]\",\n\t\t\tdefaultPool.Spec.CIDR, defaultPool.Spec.BlockSize, defaultPool.Spec.NATOutgoing, defaultPool.Spec.VXLANMode,\n\t\t\tcidr.String(), blockSize, isNATOutgoingEnabled, api.VXLANModeAlways)\n\t\tlog.Errorf(\"Failed to validate existing default IPv%d IP pool (cidr/blocksize/nat/vxlanMode) %+v\", cidr.Version(), defaultPool.Spec)\n\t\treturn cerrors.ErrorValidation{\n\t\t\tErroredFields: []cerrors.ErroredField{{\n\t\t\t\tName: \"pool.Spec\",\n\t\t\t\tReason: msg,\n\t\t\t}},\n\t\t}\n\t}\n\n\tlog.Infof(\"Use current default IPv%d pool.\", cidr.Version())\n\treturn nil\n}",
"func newVirtualNetworkClient(subID string, authorizer auth.Authorizer) (*client, error) {\n\tc, err := wssdcloudclient.GetVirtualNetworkClient(&subID, authorizer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &client{c}, nil\n}",
"func init() {\n\t_drv = &Drv{}\n\t_drv.locations = make(map[string]*time.Location)\n\t_drv.openEnvs = newEnvList()\n\n\t// init general pools\n\t_drv.listPool = newPool(func() interface{} { return list.New() })\n\t_drv.envPool = newPool(func() interface{} { return &Env{openSrvs: newSrvList(), openCons: newConList()} })\n\t_drv.conPool = newPool(func() interface{} { return &Con{} })\n\t_drv.srvPool = newPool(func() interface{} { return &Srv{openSess: newSesList()} })\n\t_drv.sesPool = newPool(func() interface{} { return &Ses{openStmts: newStmtList(), openTxs: newTxList()} })\n\t_drv.stmtPool = newPool(func() interface{} { return &Stmt{openRsets: newRsetList()} })\n\t_drv.txPool = newPool(func() interface{} { return &Tx{} })\n\t_drv.rsetPool = newPool(func() interface{} { return &Rset{genByPool: true} })\n\n\t// init bind pools\n\t_drv.bndPools = make([]*sync.Pool, bndIdxNil+1)\n\t_drv.bndPools[bndIdxInt64] = newPool(func() interface{} { return &bndInt64{} })\n\t_drv.bndPools[bndIdxInt32] = newPool(func() interface{} { return &bndInt32{} })\n\t_drv.bndPools[bndIdxInt16] = newPool(func() interface{} { return &bndInt16{} })\n\t_drv.bndPools[bndIdxInt8] = newPool(func() interface{} { return &bndInt8{} })\n\t_drv.bndPools[bndIdxUint64] = newPool(func() interface{} { return &bndUint64{} })\n\t_drv.bndPools[bndIdxUint32] = newPool(func() interface{} { return &bndUint32{} })\n\t_drv.bndPools[bndIdxUint16] = newPool(func() interface{} { return &bndUint16{} })\n\t_drv.bndPools[bndIdxUint8] = newPool(func() interface{} { return &bndUint8{} })\n\t_drv.bndPools[bndIdxFloat64] = newPool(func() interface{} { return &bndFloat64{} })\n\t_drv.bndPools[bndIdxFloat32] = newPool(func() interface{} { return &bndFloat32{} })\n\t_drv.bndPools[bndIdxNumString] = newPool(func() interface{} { return &bndNumString{} })\n\t_drv.bndPools[bndIdxOCINum] = newPool(func() interface{} { return &bndOCINum{} })\n\t_drv.bndPools[bndIdxInt64Ptr] = newPool(func() interface{} { return &bndInt64Ptr{} })\n\t_drv.bndPools[bndIdxInt32Ptr] = newPool(func() interface{} { return &bndInt32Ptr{} })\n\t_drv.bndPools[bndIdxInt16Ptr] = newPool(func() interface{} { return &bndInt16Ptr{} })\n\t_drv.bndPools[bndIdxInt8Ptr] = newPool(func() interface{} { return &bndInt8Ptr{} })\n\t_drv.bndPools[bndIdxUint64Ptr] = newPool(func() interface{} { return &bndUint64Ptr{} })\n\t_drv.bndPools[bndIdxUint32Ptr] = newPool(func() interface{} { return &bndUint32Ptr{} })\n\t_drv.bndPools[bndIdxUint16Ptr] = newPool(func() interface{} { return &bndUint16Ptr{} })\n\t_drv.bndPools[bndIdxUint8Ptr] = newPool(func() interface{} { return &bndUint8Ptr{} })\n\t_drv.bndPools[bndIdxFloat64Ptr] = newPool(func() interface{} { return &bndFloat64Ptr{} })\n\t_drv.bndPools[bndIdxFloat32Ptr] = newPool(func() interface{} { return &bndFloat32Ptr{} })\n\t_drv.bndPools[bndIdxNumStringPtr] = newPool(func() interface{} { return &bndNumStringPtr{} })\n\t_drv.bndPools[bndIdxOCINumPtr] = newPool(func() interface{} { return &bndOCINumPtr{} })\n\t_drv.bndPools[bndIdxInt64Slice] = newPool(func() interface{} { return &bndInt64Slice{} })\n\t_drv.bndPools[bndIdxInt32Slice] = newPool(func() interface{} { return &bndInt32Slice{} })\n\t_drv.bndPools[bndIdxInt16Slice] = newPool(func() interface{} { return &bndInt16Slice{} })\n\t_drv.bndPools[bndIdxInt8Slice] = newPool(func() interface{} { return &bndInt8Slice{} })\n\t_drv.bndPools[bndIdxUint64Slice] = newPool(func() interface{} { return &bndUint64Slice{} })\n\t_drv.bndPools[bndIdxUint32Slice] = newPool(func() interface{} { return &bndUint32Slice{} })\n\t_drv.bndPools[bndIdxUint16Slice] = newPool(func() interface{} { return &bndUint16Slice{} })\n\t_drv.bndPools[bndIdxUint8Slice] = newPool(func() interface{} { return &bndUint8Slice{} })\n\t_drv.bndPools[bndIdxFloat64Slice] = newPool(func() interface{} { return &bndFloat64Slice{} })\n\t_drv.bndPools[bndIdxFloat32Slice] = newPool(func() interface{} { return &bndFloat32Slice{} })\n\t_drv.bndPools[bndIdxNumStringSlice] = newPool(func() interface{} { return &bndNumStringSlice{} })\n\t_drv.bndPools[bndIdxOCINumSlice] = newPool(func() interface{} { return &bndOCINumSlice{} })\n\t_drv.bndPools[bndIdxTime] = newPool(func() interface{} { return &bndTime{} })\n\t_drv.bndPools[bndIdxTimePtr] = newPool(func() interface{} { return &bndTimePtr{} })\n\t_drv.bndPools[bndIdxTimeSlice] = newPool(func() interface{} { return &bndTimeSlice{} })\n\t_drv.bndPools[bndIdxDate] = newPool(func() interface{} { return &bndDate{} })\n\t_drv.bndPools[bndIdxDatePtr] = newPool(func() interface{} { return &bndDatePtr{} })\n\t_drv.bndPools[bndIdxDateSlice] = newPool(func() interface{} { return &bndDateSlice{} })\n\t_drv.bndPools[bndIdxString] = newPool(func() interface{} { return &bndString{} })\n\t_drv.bndPools[bndIdxStringPtr] = newPool(func() interface{} { return &bndStringPtr{} })\n\t_drv.bndPools[bndIdxStringSlice] = newPool(func() interface{} { return &bndStringSlice{} })\n\t_drv.bndPools[bndIdxBool] = newPool(func() interface{} { return &bndBool{} })\n\t_drv.bndPools[bndIdxBoolPtr] = newPool(func() interface{} { return &bndBoolPtr{} })\n\t_drv.bndPools[bndIdxBoolSlice] = newPool(func() interface{} { return &bndBoolSlice{} })\n\t_drv.bndPools[bndIdxBin] = newPool(func() interface{} { return &bndBin{} })\n\t_drv.bndPools[bndIdxBinSlice] = newPool(func() interface{} { return &bndBinSlice{} })\n\t_drv.bndPools[bndIdxLob] = newPool(func() interface{} { return &bndLob{} })\n\t_drv.bndPools[bndIdxLobPtr] = newPool(func() interface{} { return &bndLobPtr{} })\n\t_drv.bndPools[bndIdxLobSlice] = newPool(func() interface{} { return &bndLobSlice{} })\n\t_drv.bndPools[bndIdxIntervalYM] = newPool(func() interface{} { return &bndIntervalYM{} })\n\t_drv.bndPools[bndIdxIntervalYMSlice] = newPool(func() interface{} { return &bndIntervalYMSlice{} })\n\t_drv.bndPools[bndIdxIntervalDS] = newPool(func() interface{} { return &bndIntervalDS{} })\n\t_drv.bndPools[bndIdxIntervalDSSlice] = newPool(func() interface{} { return &bndIntervalDSSlice{} })\n\t_drv.bndPools[bndIdxRset] = newPool(func() interface{} { return &bndRset{} })\n\t_drv.bndPools[bndIdxBfile] = newPool(func() interface{} { return &bndBfile{} })\n\t_drv.bndPools[bndIdxNil] = newPool(func() interface{} { return &bndNil{} })\n\n\t// init def pools\n\t_drv.defPools = make([]*sync.Pool, defIdxRset+1)\n\t_drv.defPools[defIdxInt64] = newPool(func() interface{} { return &defInt64{} })\n\t_drv.defPools[defIdxInt32] = newPool(func() interface{} { return &defInt32{} })\n\t_drv.defPools[defIdxInt16] = newPool(func() interface{} { return &defInt16{} })\n\t_drv.defPools[defIdxInt8] = newPool(func() interface{} { return &defInt8{} })\n\t_drv.defPools[defIdxUint64] = newPool(func() interface{} { return &defUint64{} })\n\t_drv.defPools[defIdxUint32] = newPool(func() interface{} { return &defUint32{} })\n\t_drv.defPools[defIdxUint16] = newPool(func() interface{} { return &defUint16{} })\n\t_drv.defPools[defIdxUint8] = newPool(func() interface{} { return &defUint8{} })\n\t_drv.defPools[defIdxFloat64] = newPool(func() interface{} { return &defFloat64{} })\n\t_drv.defPools[defIdxFloat32] = newPool(func() interface{} { return &defFloat32{} })\n\t_drv.defPools[defIdxOCINum] = newPool(func() interface{} { return &defOCINum{} })\n\t_drv.defPools[defIdxTime] = newPool(func() interface{} { return &defTime{} })\n\t_drv.defPools[defIdxDate] = newPool(func() interface{} { return &defDate{} })\n\t_drv.defPools[defIdxString] = newPool(func() interface{} { return &defString{} })\n\t_drv.defPools[defIdxNumString] = newPool(func() interface{} { return &defNumString{} })\n\t_drv.defPools[defIdxOCINum] = newPool(func() interface{} { return &defOCINum{} })\n\t_drv.defPools[defIdxBool] = newPool(func() interface{} { return &defBool{} })\n\t_drv.defPools[defIdxLob] = newPool(func() interface{} { return &defLob{} })\n\t_drv.defPools[defIdxRaw] = newPool(func() interface{} { return &defRaw{} })\n\t_drv.defPools[defIdxLongRaw] = newPool(func() interface{} { return &defLongRaw{} })\n\t_drv.defPools[defIdxBfile] = newPool(func() interface{} { return &defBfile{} })\n\t_drv.defPools[defIdxIntervalYM] = newPool(func() interface{} { return &defIntervalYM{} })\n\t_drv.defPools[defIdxIntervalDS] = newPool(func() interface{} { return &defIntervalDS{} })\n\t_drv.defPools[defIdxRowid] = newPool(func() interface{} { return &defRowid{} })\n\t_drv.defPools[defIdxRset] = newPool(func() interface{} { return &defRset{} })\n\n\tvar err error\n\tif _drv.sqlPkgEnv, err = OpenEnv(); err != nil {\n\t\tpanic(fmt.Sprintf(\"OpenEnv: %v\", err))\n\t}\n\t_drv.sqlPkgEnv.isPkgEnv = true\n\t// database/sql/driver expects binaryFloat to return float64 (not the Rset default of float32)\n\tcfg := _drv.sqlPkgEnv.Cfg()\n\tcfg.RsetCfg.binaryFloat = F64\n\t_drv.sqlPkgEnv.SetCfg(cfg)\n\tsql.Register(Name, _drv)\n}",
"func LoadExistingBase(id string, dateCreatedUTC time.Time, dateUpdatedUTC time.Time, deleted bool) (*base, *shared.CustomError) {\n\tif uuid.FromStringOrNil(id) == uuid.Nil {\n\t\treturn nil, shared.NewCustomError(errInvalidGuid, shared.ErrorTypeSystem)\n\t}\n\n\tif dateCreatedUTC.After(time.Now().UTC()) {\n\t\treturn nil, shared.NewCustomError(errFutureCreated, shared.ErrorTypeSystem)\n\t}\n\n\tif dateUpdatedUTC.After(time.Now().UTC()) {\n\t\treturn nil, shared.NewCustomError(errFutureUpdated, shared.ErrorTypeSystem)\n\t}\n\n\tif dateUpdatedUTC.Before(dateCreatedUTC) {\n\t\treturn nil, shared.NewCustomError(errBeforeCreated, shared.ErrorTypeSystem)\n\t}\n\n\tif deleted != true && deleted != false {\n\t\treturn nil, shared.NewCustomError(errInvalidBool, shared.ErrorTypeSystem)\n\t}\n\treturn &base{\n\t\tIDx: id,\n\t\tDateCreatedUTCx: dateCreatedUTC,\n\t\tDateUpdatedUTCx: dateUpdatedUTC,\n\t\tDeletedx: deleted,\n\t\t//tracerID: eTag,\n\t}, nil\n}",
"func NewWithBaseURI(baseURI string, ) BaseClient {\n return BaseClient{\n Client: autorest.NewClientWithUserAgent(UserAgent()),\n BaseURI: baseURI,\n }\n}",
"func (n NetworkTypeMobileRoaming) construct() NetworkTypeClass { return &n }",
"func Create(cfg *mgrconfig.Config, debug bool) (*Pool, error) {\n\ttyp, ok := vmimpl.Types[cfg.Type]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"unknown instance type '%v'\", cfg.Type)\n\t}\n\tenv := &vmimpl.Env{\n\t\tName: cfg.Name,\n\t\tOS: cfg.TargetOS,\n\t\tArch: cfg.TargetVMArch,\n\t\tWorkdir: cfg.Workdir,\n\t\tImage: cfg.Image,\n\t\tSSHKey: cfg.SSHKey,\n\t\tSSHUser: cfg.SSHUser,\n\t\tDebug: debug,\n\t\tConfig: cfg.VM,\n\t}\n\timpl, err := typ.Ctor(env)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Pool{\n\t\timpl: impl,\n\t\tworkdir: env.Workdir,\n\t}, nil\n}",
"func (alloc *RuntimePortAllocator) createAndRestorePortAllocator() (err error) {\n\talloc.pa, err = portallocator.NewPortAllocatorCustom(*alloc.pr, func(max int, rangeSpec string) (allocator.Interface, error) {\n\t\treturn allocator.NewAllocationMap(max, rangeSpec), nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tports, err := alloc.getReservedPorts(alloc.client)\n\tif err != nil {\n\t\treturn err\n\t}\n\talloc.log.Info(\"Found reserved ports\", \"ports\", ports)\n\n\tfor _, port := range ports {\n\t\tif err = alloc.pa.Allocate(port); err != nil {\n\t\t\talloc.log.Error(err, \"can't allocate reserved ports\", \"port\", port)\n\t\t}\n\t}\n\n\treturn nil\n}",
"func init() {\n\tpools = make([]*sync.Pool, len(bucketSize))\n\tfor i, v := range bucketSize {\n\t\t// to use new variable inside the New function\n\t\tv1 := v\n\t\tpools[i] = &sync.Pool{\n\t\t\tNew: func() interface{} {\n\t\t\t\treturn make([]byte, v1)\n\t\t\t},\n\t\t}\n\t}\n}",
"func ReserveCIDRForSingleHost(ctx context.Context, networkInstance resources.Network) (_ string, _ uint, ferr fail.Error) {\n\tvar index uint\n\txerr := networkInstance.Alter(ctx, func(clonable data.Clonable, props *serialize.JSONProperties) fail.Error {\n\t\treturn props.Alter(networkproperty.SingleHostsV1, func(clonable data.Clonable) fail.Error {\n\t\t\tnshV1, ok := clonable.(*propertiesv1.NetworkSingleHosts)\n\t\t\tif !ok {\n\t\t\t\treturn fail.InconsistentError(\n\t\t\t\t\t\"'*propertiesv1.NetworkSingleHosts' expected, '%s' provided\",\n\t\t\t\t\treflect.TypeOf(clonable).String(),\n\t\t\t\t)\n\t\t\t}\n\n\t\t\tindex = nshV1.ReserveSlot()\n\t\t\treturn nil\n\t\t})\n\t})\n\tif xerr != nil {\n\t\treturn \"\", 0, xerr\n\t}\n\n\tdefer func() {\n\t\tferr = debug.InjectPlannedFail(ferr)\n\t\tif ferr != nil {\n\t\t\tderr := FreeCIDRForSingleHost(cleanupContextFrom(ctx), networkInstance, index)\n\t\t\tif derr != nil {\n\t\t\t\t_ = ferr.AddConsequence(fail.Wrap(derr, \"cleaning up on failure, failed to free CIDR slot '%d' in Network '%s'\", index, networkInstance.GetName()))\n\t\t\t}\n\t\t}\n\t}()\n\n\t_, networkNet, err := net.ParseCIDR(abstract.SingleHostNetworkCIDR)\n\terr = debug.InjectPlannedError(err)\n\tif err != nil {\n\t\treturn \"\", 0, fail.Wrap(err, \"failed to convert CIDR to net.IPNet\")\n\t}\n\n\tresult, xerr := netretry.NthIncludedSubnet(*networkNet, propertiesv1.SingleHostsCIDRMaskAddition, index)\n\tif xerr != nil {\n\t\treturn \"\", 0, xerr\n\t}\n\treturn result.String(), index, nil\n}",
"func Create(self *IpAddress) (*Memberlist, error) {\n\n\tserviceUrl := os.Getenv(\"GCP_SERVICE_URL\")\n\tif serviceUrl == \"\" {\n\t\treturn nil, fmt.Errorf(\"GCP_SERVICE_URL environment variable unset or missing\")\n\t}\n\n\tid, _ := uuid.NewV4()\n\n\tipAddresses := make(map[string]*IpAddress)\n\tipAddresses[id.String()] = self\n\n\treturn &Memberlist{\n\t\tServiceUrl: serviceUrl,\n\t\tUuid: id.String(),\n\t\tSelf: self,\n\t}, nil\n}",
"func DrawElementsInstancedBaseVertexBaseInstance(mode uint32, count int32, xtype uint32, indices unsafe.Pointer, instancecount int32, basevertex int32, baseinstance uint32) {\n C.glowDrawElementsInstancedBaseVertexBaseInstance(gpDrawElementsInstancedBaseVertexBaseInstance, (C.GLenum)(mode), (C.GLsizei)(count), (C.GLenum)(xtype), indices, (C.GLsizei)(instancecount), (C.GLint)(basevertex), (C.GLuint)(baseinstance))\n}",
"func (a *PodAllocator) Init() error {\n\tvar err error\n\tif util.DoesNetworkRequireTunnelIDs(a.netInfo) {\n\t\ta.idAllocator, err = id.NewIDAllocator(a.netInfo.GetNetworkName(), types.MaxLogicalPortTunnelKey)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Reserve the id 0. We don't want to assign this id to any of the pods.\n\t\terr = a.idAllocator.ReserveID(\"zero\", 0)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif util.DoesNetworkRequireIPAM(a.netInfo) {\n\t\tsubnets := a.netInfo.Subnets()\n\t\tipNets := make([]*net.IPNet, 0, len(subnets))\n\t\tfor _, subnet := range subnets {\n\t\t\tipNets = append(ipNets, subnet.CIDR)\n\t\t}\n\n\t\treturn a.ipAllocator.AddOrUpdateSubnet(a.netInfo.GetNetworkName(), ipNets, a.netInfo.ExcludeSubnets()...)\n\t}\n\n\treturn nil\n}",
"func newPool(addr string, num int) *TCPPool{\n\tif(num < 1){\n\t\tfmt.Println(\"Connection pool requires atleast one connection\")\n\t\treturn nil\n\t}\n\ttcpPool := &TCPPool{}\n\tfor i := 0; i < num; i++{\n\t\tconn, err := createConnection(addr);\n\t\tif err != nil{\n\t\t\tfmt.Println(\"Cannot create connection \", err)\n\t\t\treturn nil\n\t\t}\n\t\ttcpPool.putConnection(conn)\t\n\t}\n\treturn tcpPool\n}",
"func New(userName string, subuidSrc, subgidSrc io.Reader) (intf.SubidAlloc, error) {\n\n\tfilter := func(entry user.SubID) bool {\n\t\treturn entry.Name == userName\n\t}\n\n\t// read subuid range(s) for userName\n\tuidRanges, err := user.ParseSubIDFilter(subuidSrc, filter)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(uidRanges) == 0 {\n\t\treturn nil, fmt.Errorf(\"could not find subuid info for user %s\", userName)\n\t}\n\n\t// read subgid range(s) for userName\n\tgidRanges, err := user.ParseSubIDFilter(subgidSrc, filter)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(gidRanges) == 0 {\n\t\treturn nil, fmt.Errorf(\"could not find subgid info for user %s\", userName)\n\t}\n\n\t// we need at least one common subuid and subgid range\n\tcommonRanges := getCommonRanges(uidRanges, gidRanges)\n\tif len(commonRanges) == 0 {\n\t\treturn nil, fmt.Errorf(\"could not find matching subuid and subgids range for user %s\", userName)\n\t}\n\n\tsub := &subidAlloc{}\n\n\t// find a common range that is large enough for the allocation size\n\tfoundRange := false\n\tfor _, subid := range commonRanges {\n\t\tif subid.Count >= int64(allocBlkSize) {\n\t\t\tfoundRange = true\n\t\t\tsub.idRange = subid\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !foundRange {\n\t\treturn nil, fmt.Errorf(\"did not find a large enough subuid range for user %s (need %v)\", userName, allocBlkSize)\n\t}\n\n\treturn sub, nil\n}",
"func (ms *memoryStore) GetWithBase(base string) (*NameSpace, error) {\n\tms.RLock()\n\tdefer ms.RUnlock()\n\tns, ok := ms.base2prefix[base]\n\tif !ok {\n\t\treturn nil, ErrNameSpaceNotFound\n\t}\n\treturn ns, nil\n}",
"func (m *InstanceManager) Ensure(ctx context.Context, obj runtime.Object) (bool, error) {\n\tinstance, err := convertInstance(obj)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tregion := scw.Region(instance.Spec.Region)\n\n\t// if instanceID is empty, we need to create the instance\n\tif instance.Spec.InstanceID == \"\" {\n\t\treturn false, m.createInstance(ctx, instance)\n\t}\n\n\trdbInstanceResp, err := m.API.GetInstance(&rdb.GetInstanceRequest{\n\t\tRegion: region,\n\t\tInstanceID: instance.Spec.InstanceID,\n\t})\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tneedReturn, err := m.updateInstance(instance, rdbInstanceResp)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif needReturn {\n\t\treturn false, nil\n\t}\n\n\tneedReturn, err = m.upgradeInstance(instance, rdbInstanceResp)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif needReturn {\n\t\treturn false, nil\n\t}\n\n\tif instance.Spec.ACL != nil {\n\t\terr = m.updateACLs(ctx, instance, rdbInstanceResp)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t}\n\n\tif rdbInstanceResp.Endpoint != nil {\n\t\tinstance.Status.Endpoint.IP = rdbInstanceResp.Endpoint.IP.String()\n\t\tinstance.Status.Endpoint.Port = int32(rdbInstanceResp.Endpoint.Port)\n\t}\n\n\treturn rdbInstanceResp.Status == rdb.InstanceStatusReady, nil\n}",
"func (pool *ComplexPool) New() (Proxy, error) {\n\tlength := pool.SizeUnused()\n\n\tif length == 0 {\n\t\tif !pool.Config.ReloadWhenEmpty {\n\t\t\treturn Proxy{}, fmt.Errorf(\"prox (%p): cannot select proxy, no unused proxies left in pool\", pool)\n\t\t}\n\n\t\terr := pool.Load()\n\t\tif err != nil {\n\t\t\treturn Proxy{}, fmt.Errorf(\"prox (%p): cannot select unused proxy, error occurred while reloading pool: %v\", pool, err)\n\t\t}\n\n\t\tlength = pool.SizeUnused()\n\t\tif length == 0 {\n\t\t\treturn Proxy{}, fmt.Errorf(\"prox (%p): cannot select proxy, no unused proxies even after reload\", pool)\n\t\t}\n\t}\n\n\trawProxy := pool.Unused.Random()\n\tpool.Unused.Remove(rawProxy)\n\n\treturn *CastProxy(rawProxy), nil\n}",
"func (pool *servicePool) malloc(t reflect.Type) interface{} {\n\t// 判断此 领域服务类型是否存在 pool\n\tsyncpool, ok := pool.pool[t]\n\tif !ok {\n\t\treturn nil\n\t}\n\t// Get 其实是在 BindService 时注入的 生成 service 对象的函数\n\tnewService := syncpool.Get()\n\tif newService == nil {\n\t\tpanic(fmt.Sprintf(\"[Freedom] BindService: func return to empty, %v\", t))\n\t}\n\treturn newService\n}",
"func (rf *Factory) Create(address string) (types.Backend, error) {\n\t// No need to add prints in this function.\n\t// Make sure caller of this takes care of printing error\n\tlogrus.Infof(\"Connecting to remote: %s\", address)\n\n\tcontrolAddress, dataAddress, _, err := util.ParseAddresses(address)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tr := &Remote{\n\t\tName: address,\n\t\treplicaURL: fmt.Sprintf(\"http://%s/v1/replicas/1\", controlAddress),\n\t\tpingURL: fmt.Sprintf(\"http://%s/ping\", controlAddress),\n\t\thttpClient: &http.Client{\n\t\t\tTimeout: timeout,\n\t\t},\n\t\t// We don't want sender to wait for receiver, because receiver may\n\t\t// has been already notified\n\t\tcloseChan: make(chan struct{}, 5),\n\t\tmonitorChan: make(types.MonitorChannel, 5),\n\t}\n\n\treplica, err := r.info()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif replica.State != \"closed\" {\n\t\treturn nil, fmt.Errorf(\"Replica must be closed, Can not add in state: %s\", replica.State)\n\t}\n\n\tconn, err := net.Dial(\"tcp\", dataAddress)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tremote := rpc.NewClient(conn, r.closeChan)\n\tr.IOs = remote\n\n\tif err := r.open(); err != nil {\n\t\tlogrus.Errorf(\"Failed to open replica, error: %v\", err)\n\t\tremote.Close()\n\t\treturn nil, err\n\t}\n\n\tgo r.monitorPing(remote)\n\n\treturn r, nil\n}",
"func (db *DB) allocate(txid txid, count int) (*page, error) {\n\t// Allocate a temporary buffer for the page.\n\tvar buf []byte\n\tif count == 1 {\n\t\tbuf = db.pagePool.Get().([]byte)\n\t} else {\n\t\tbuf = make([]byte, count*db.pageSize)\n\t}\n\tp := (*page)(unsafe.Pointer(&buf[0]))\n\tp.overflow = uint32(count - 1)\n\n\t// Use pages from the freelist if they are available.\n\tif p.id = db.freelist.allocate(txid, count); p.id != 0 {\n\t\treturn p, nil\n\t}\n\n\t// Resize mmap() if we're at the end.\n\tp.id = db.rwtx.meta.pgid\n\tvar minsz = int((p.id+pgid(count))+1) * db.pageSize\n\tif minsz >= db.datasz {\n\t\tif err := db.mmap(minsz); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"mmap allocate error: %s\", err)\n\t\t}\n\t}\n\n\t// Move the page id high water mark.\n\tdb.rwtx.meta.pgid += pgid(count)\n\n\treturn p, nil\n}",
"func newBaseConn(conn net.Conn) *BaseConn {\n\n\tb := new(BaseConn)\n\tb.conn = conn\n\n\tb.disconnected = make(chan struct{})\n\tb.send = make(chan *baseproto.Message, 5)\n\tb.stop = make(chan struct{})\n\tb.receivedCapabilities = make(chan *baseproto.Message, 1)\n\tb.received = make(chan *baseproto.Message, 5)\n\n\tb.Received = b.received\n\tb.Disconnected = b.disconnected\n\n\tgo b.readLoop()\n\tgo b.writeLoop()\n\n\treturn b\n}",
"func HandleInstanceCreate(w rest.ResponseWriter, r *rest.Request) {\n\t// get ima\n\tima := Ima{}\n\terr := r.DecodeJsonPayload(&ima)\n\tif err != nil {\n\t\trest.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tif ima.Ima == \"\" {\n\t\trest.Error(w, \"ima required\", 400)\n\t\treturn\n\t}\n\tif ima.Mem == 0 {\n\t\trest.Error(w, \"memory required\", 400)\n\t\treturn\n\t}\n\tif ima.Cpu == 0 {\n\t\trest.Error(w, \"cpu required\", 400)\n\t\treturn\n\t}\n\n\t// start the instance\n\tos := getImaOs(ima.Ima)\n\tswitch os {\n\tcase \"freebsd\":\n\t\t// clone ima to instance\n\t\tinstanceid := allocateInstanceId()\n\t\tcloneIma(ima.Ima, instanceid)\n\n\t\t// create network interface and bring up\n\t\ttap := allocateTap()\n\t\tif tap == \"\" {\n\t\t\treturn\n\t\t}\n\t\tsaveTap(tap, instanceid)\n\t\tbridge := findBridge()\n\t\taddTapToBridge(tap, bridge)\n\t\tbridgeUp(bridge)\n\n\t\tnmdm := \"/dev/nmdm-\" + instanceid + \"-A\"\n\t\tsaveCpu(ima.Cpu, instanceid)\n\t\tsaveMem(ima.Mem, instanceid)\n\t\tgo startFreeBSDVM(nmdm, ima.Cpu, ima.Mem, tap, instanceid)\n\t\tw.WriteJson(&instanceid)\n\tcase \"linux\":\n\t\t// clone ima to instance\n\t\tinstanceid := allocateInstanceId()\n\t\tcloneIma(ima.Ima, instanceid)\n\n\t\t// create network interface and bring up\n\t\ttap := allocateTap()\n\t\tif tap == \"\" {\n\t\t\treturn\n\t\t}\n\t\tsaveTap(tap, instanceid)\n\t\tbridge := findBridge()\n\t\taddTapToBridge(tap, bridge)\n\t\tbridgeUp(bridge)\n\n\t\t//nmdm := \"/dev/nmdm-\" + instanceid + \"-A\"\n\t\tsaveCpu(ima.Cpu, instanceid)\n\t\tsaveMem(ima.Mem, instanceid)\n\t\tbhyveDestroy(instanceid)\n\t\tnmdm := \"/dev/nmdm-\" + instanceid + \"-A\"\n\t\tgo startLinuxVM(nmdm, ima.Cpu, ima.Mem, tap, instanceid)\n\t\tw.WriteJson(&instanceid)\n\tdefault:\n\t\trest.Error(w, \"unknown OS\", 400)\n\t}\n}",
"func newClient(addr string, max int, discardClientTimeout time.Duration, fn connectRPCFn) (Client, error) {\n\n\trpcClientFactory := func() (interface{}, error) {\n\t\treturn fn(addr)\n\t}\n\trpcPool, err := pool.NewPool(max, rpcClientFactory)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trc := &reconnectingClient{addr: addr, pool: rpcPool, discardClientTimeout: discardClientTimeout}\n\treturn rc, nil\n}",
"func newBaseClient() *baseClient {\n\treturn &baseClient{\n\t\thttpClient: http.DefaultClient,\n\t\tmethod: \"GET\",\n\t\theader: make(http.Header),\n\t}\n}",
"func NewCustom(network string, size int, af AddrFunc, df DialFunc) (*Pool, error) {\n\tp := Pool{\n\t\tpool: make(chan *redis.Client, size),\n\t\tspare: make(chan string, size),\n\t\tdf: df,\n\t\tstopCh: make(chan bool),\n\t\tnetwork: network,\n\t}\n\n\tclient, err := df(network, af(0))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp.pool <- client\n\n\tfor i := 1; i < size; i++ {\n\t\tp.spare <- af(i)\n\t}\n\n\t// set up a go-routine which will periodically ping connections in the pool.\n\t// if the pool is idle every connection will be hit once every 10 seconds.\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-time.After(10 * time.Second / time.Duration(size - len(p.spare))):\n\t\t\t\tp.ping()\n\t\t\tcase <-p.stopCh:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn &p, err\n}",
"func (bi *baseInstance) toBeta() *beta.Instance {\n\tinst := &beta.Instance{Name: bi.name, Zone: bi.zone, NetworkInterfaces: []*beta.NetworkInterface{{}}}\n\tif bi.aliasRange != \"\" {\n\t\tinst.NetworkInterfaces[0].AliasIpRanges = []*beta.AliasIpRange{\n\t\t\t{IpCidrRange: bi.aliasRange, SubnetworkRangeName: util.TestSecondaryRangeName},\n\t\t}\n\t}\n\treturn inst\n}",
"func (face *FaceBase) InitFaceBase(id FaceId, sizeofPriv int, socket eal.NumaSocket) error {\n\tface.id = id\n\n\tif socket.IsAny() {\n\t\tif lc := eal.GetCurrentLCore(); lc.IsValid() {\n\t\t\tsocket = lc.GetNumaSocket()\n\t\t} else {\n\t\t\tsocket = eal.NumaSocketFromID(0) // TODO what if socket 0 is unavailable?\n\t\t}\n\t}\n\n\tfaceC := face.getPtr()\n\t*faceC = C.Face{}\n\tfaceC.id = C.FaceId(face.id)\n\tfaceC.state = C.FACESTA_UP\n\tfaceC.numaSocket = C.int(socket.ID())\n\n\tsizeofImpl := int(C.sizeof_FaceImpl) + sizeofPriv\n\tfaceC.impl = (*C.FaceImpl)(eal.ZmallocAligned(\"FaceImpl\", sizeofImpl, 1, socket))\n\n\treturn nil\n\n}",
"func CreateBaseConfigWithReadiness() *dynamic.Configuration {\n\treturn &dynamic.Configuration{\n\t\tHTTP: &dynamic.HTTPConfiguration{\n\t\t\tRouters: map[string]*dynamic.Router{\n\t\t\t\t\"readiness\": {\n\t\t\t\t\tRule: \"Path(`/ping`)\",\n\t\t\t\t\tEntryPoints: []string{\"readiness\"},\n\t\t\t\t\tService: \"readiness\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tServices: map[string]*dynamic.Service{\n\t\t\t\t\"readiness\": {\n\t\t\t\t\tLoadBalancer: &dynamic.ServersLoadBalancer{\n\t\t\t\t\t\tPassHostHeader: Bool(true),\n\t\t\t\t\t\tServers: []dynamic.Server{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tURL: \"http://127.0.0.1:8080\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tMiddlewares: map[string]*dynamic.Middleware{},\n\t\t},\n\t\tTCP: &dynamic.TCPConfiguration{\n\t\t\tRouters: map[string]*dynamic.TCPRouter{},\n\t\t\tServices: map[string]*dynamic.TCPService{},\n\t\t},\n\t}\n}",
"func (c BaseConfig) GetBaseConfig() BaseConfig { return c }",
"func (c *AntreaIPAMController) preallocateIPPoolForStatefulSet(ss *appsv1.StatefulSet) error {\n\tklog.InfoS(\"Processing create notification\", \"Namespace\", ss.Namespace, \"StatefulSet\", ss.Name)\n\n\tipPools := c.getIPPoolsForStatefulSet(ss)\n\n\tif ipPools == nil {\n\t\t// nothing to preallocate\n\t\treturn nil\n\t}\n\n\tif len(ipPools) > 1 {\n\t\treturn fmt.Errorf(\"annotation of multiple IP Pools is not supported\")\n\t}\n\n\t// Only one pool is supported for now. Dual stack support coming in future.\n\tipPoolName := ipPools[0]\n\tallocator, err := poolallocator.NewIPPoolAllocator(ipPoolName, c.crdClient, c.ipPoolLister)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to find IP Pool %s: %s\", ipPoolName, err)\n\t}\n\n\tsize := int(*ss.Spec.Replicas)\n\t// Note that AllocateStatefulSet would not preallocate IPs if this StatefulSet is already present\n\t// in the pool. This safeguards us from double allocation in case agent allocated IP by the time\n\t// controller task is executed. Note also that StatefulSet resize will not be handled.\n\tif size > 0 {\n\t\terr = allocator.AllocateStatefulSet(ss.Namespace, ss.Name, size)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to preallocate continuous IP space of size %d from Pool %s: %s\", size, ipPoolName, err)\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (self *RegisObjManager) LoadPlayerBaseObj(id PLAYER_ID) *RedisPlayerBaseObj {\n\tvalue, ok := self.Load(id)\n\tif ok {\n\t\treturn value.(*RedisPlayerBaseObj)\n\t}\n\treturn nil\n}",
"func (a *APILoadBalancers) New() (types.Resource, error) {\n\tif err := a.Validate(); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to validate API Load balancers configuration: %w\", err)\n\t}\n\n\tcc := &container.Containers{\n\t\tPreviousState: a.State,\n\t\tDesiredState: make(container.ContainersState),\n\t}\n\n\tfor i, lb := range a.APILoadBalancers {\n\t\tlb := lb\n\t\ta.propagateInstance(&lb)\n\n\t\tlbx, _ := lb.New()\n\t\tlbxHcc, _ := lbx.ToHostConfiguredContainer()\n\n\t\tcc.DesiredState[strconv.Itoa(i)] = lbxHcc\n\t}\n\n\tc, _ := cc.New()\n\n\treturn &apiLoadBalancers{\n\t\tcontainers: c,\n\t}, nil\n}",
"func newNetwork(cfg *config.Network, c *ec2.EC2) (*network, error) {\n\tlog.Debug(\"Initializing AWS Network\")\n\tn := &network{\n\t\tResources: resource.NewResources(),\n\t\tNetwork: cfg,\n\t\tec2: c,\n\t}\n\n\tvpc, err := newVpc(c, n)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tn.vpc = vpc\n\tn.Append(vpc)\n\n\trouteTables, err := newRouteTables(c, n)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tn.routeTables = routeTables\n\tn.Append(routeTables)\n\n\tinternetGateway, err := newInternetGateway(c, n, \"public\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tn.internetGateway = internetGateway\n\tn.Append(internetGateway)\n\n\t// Load the vpc since it is needed for the caches.\n\terr = n.vpc.Load()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tn.subnetCache, err = newSubnetCache(n)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tn.secgroupCache, err = newSecurityGroupCache(n)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn n, nil\n}",
"func newMaglevLoadBalancer(info types.ClusterInfo, set types.HostSet) types.LoadBalancer {\n\tnames := []string{}\n\tfor _, host := range set.Hosts() {\n\t\tnames = append(names, host.AddressString())\n\t}\n\tmgv := &maglevLoadBalancer{\n\t\thosts: set,\n\t}\n\n\tnameCount := len(names)\n\t// if host count > BigM, maglev table building will cross array boundary\n\t// maglev lb will not work in this scenario\n\tif nameCount >= maglev.BigM {\n\t\tlog.DefaultLogger.Errorf(\"[lb][maglev] host count too large, expect <= %d, get %d\",\n\t\t\tmaglev.BigM, nameCount)\n\t\treturn mgv\n\t}\n\tif nameCount == 0 {\n\t\treturn mgv\n\t}\n\n\tmaglevM := maglev.SmallM\n\t// according to test, 30000 host with testing 1e8 times, hash distribution begins to go wrong,\n\t// max=4855, mean=3333.3333333333335, peak-to-mean=1.4565\n\t// so use BigM when host >= 30000\n\tlimit := 30000\n\tif nameCount >= limit {\n\t\tlog.DefaultLogger.Infof(\"[lb][maglev] host count %d >= %d, using maglev.BigM\", nameCount, limit)\n\t\tmaglevM = maglev.BigM\n\t}\n\n\tmgv.maglev = maglev.New(names, uint64(maglevM))\n\treturn mgv\n}",
"func (c *Repair) doRunOnce() error {\n\t// TODO: (per smarterclayton) if Get() or ListServices() is a weak consistency read,\n\t// or if they are executed against different leaders,\n\t// the ordering guarantee required to ensure no IP is allocated twice is violated.\n\t// ListServices must return a ResourceVersion higher than the etcd index Get triggers,\n\t// and the release code must not release services that have had IPs allocated but not yet been created\n\t// See #8295\n\n\t// If etcd server is not running we should wait for some time and fail only then. This is particularly\n\t// important when we start apiserver and etcd at the same time.\n\tsnapshotByFamily := make(map[v1.IPFamily]*api.RangeAllocation)\n\tstoredByFamily := make(map[v1.IPFamily]ipallocator.Interface)\n\n\terr := wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) {\n\t\tfor family, allocator := range c.allocatorByFamily {\n\t\t\t// get snapshot if it is not there\n\t\t\tif _, ok := snapshotByFamily[family]; !ok {\n\t\t\t\tsnapshot, err := allocator.Get()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false, err\n\t\t\t\t}\n\n\t\t\t\tsnapshotByFamily[family] = snapshot\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\t})\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to refresh the service IP block: %v\", err)\n\t}\n\n\t// ensure that ranges are assigned\n\tfor family, snapshot := range snapshotByFamily {\n\t\tif snapshot.Range == \"\" {\n\t\t\tsnapshot.Range = c.networkByFamily[family].String()\n\t\t}\n\t}\n\n\t// Create an allocator because it is easy to use.\n\tfor family, snapshot := range snapshotByFamily {\n\t\tstored, err := ipallocator.NewFromSnapshot(snapshot)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to rebuild allocator from snapshots for family:%v with error:%v\", family, err)\n\t\t}\n\n\t\tstoredByFamily[family] = stored\n\t}\n\n\trebuiltByFamily := make(map[v1.IPFamily]*ipallocator.Range)\n\n\tfor family, network := range c.networkByFamily {\n\t\trebuilt, err := ipallocator.NewInMemory(network)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to create CIDR range for family %v: %v\", family, err)\n\t\t}\n\n\t\trebuiltByFamily[family] = rebuilt\n\t}\n\t// We explicitly send no resource version, since the resource version\n\t// of 'snapshot' is from a different collection, it's not comparable to\n\t// the service collection. The caching layer keeps per-collection RVs,\n\t// and this is proper, since in theory the collections could be hosted\n\t// in separate etcd (or even non-etcd) instances.\n\tlist, err := c.serviceClient.Services(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to refresh the service IP block: %v\", err)\n\t}\n\n\tgetFamilyByIP := func(ip net.IP) v1.IPFamily {\n\t\tif netutils.IsIPv6(ip) {\n\t\t\treturn v1.IPv6Protocol\n\t\t}\n\t\treturn v1.IPv4Protocol\n\t}\n\n\t// Check every Service's ClusterIP, and rebuild the state as we think it should be.\n\tfor _, svc := range list.Items {\n\t\tif !helper.IsServiceIPSet(&svc) {\n\t\t\t// didn't need a cluster IP\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, ip := range svc.Spec.ClusterIPs {\n\t\t\tip := netutils.ParseIPSloppy(ip)\n\t\t\tif ip == nil {\n\t\t\t\t// cluster IP is corrupt\n\t\t\t\tc.recorder.Eventf(&svc, nil, v1.EventTypeWarning, \"ClusterIPNotValid\", \"ClusterIPValidation\", \"Cluster IP %s is not a valid IP; please recreate service\", ip)\n\t\t\t\truntime.HandleError(fmt.Errorf(\"the cluster IP %s for service %s/%s is not a valid IP; please recreate\", ip, svc.Name, svc.Namespace))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfamily := getFamilyByIP(ip)\n\t\t\tif _, ok := rebuiltByFamily[family]; !ok {\n\t\t\t\t// this service is using an IPFamily no longer configured on cluster\n\t\t\t\tc.recorder.Eventf(&svc, nil, v1.EventTypeWarning, \"ClusterIPNotValid\", \"ClusterIPValidation\", \"Cluster IP %s(%s) is of ip family that is no longer configured on cluster; please recreate service\", ip, family)\n\t\t\t\truntime.HandleError(fmt.Errorf(\"the cluster IP %s(%s) for service %s/%s is of ip family that is no longer configured on cluster; please recreate\", ip, family, svc.Name, svc.Namespace))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// mark it as in-use\n\t\t\tactualAlloc := rebuiltByFamily[family]\n\t\t\tswitch err := actualAlloc.Allocate(ip); err {\n\t\t\tcase nil:\n\t\t\t\tactualStored := storedByFamily[family]\n\t\t\t\tif actualStored.Has(ip) {\n\t\t\t\t\t// remove it from the old set, so we can find leaks\n\t\t\t\t\tactualStored.Release(ip)\n\t\t\t\t} else {\n\t\t\t\t\t// cluster IP doesn't seem to be allocated\n\t\t\t\t\tc.recorder.Eventf(&svc, nil, v1.EventTypeWarning, \"ClusterIPNotAllocated\", \"ClusterIPAllocation\", \"Cluster IP [%v]:%s is not allocated; repairing\", family, ip)\n\t\t\t\t\truntime.HandleError(fmt.Errorf(\"the cluster IP [%v]:%s for service %s/%s is not allocated; repairing\", family, ip, svc.Name, svc.Namespace))\n\t\t\t\t}\n\t\t\t\tdelete(c.leaksByFamily[family], ip.String()) // it is used, so it can't be leaked\n\t\t\tcase ipallocator.ErrAllocated:\n\t\t\t\t// cluster IP is duplicate\n\t\t\t\tc.recorder.Eventf(&svc, nil, v1.EventTypeWarning, \"ClusterIPAlreadyAllocated\", \"ClusterIPAllocation\", \"Cluster IP [%v]:%s was assigned to multiple services; please recreate service\", family, ip)\n\t\t\t\truntime.HandleError(fmt.Errorf(\"the cluster IP [%v]:%s for service %s/%s was assigned to multiple services; please recreate\", family, ip, svc.Name, svc.Namespace))\n\t\t\tcase err.(*ipallocator.ErrNotInRange):\n\t\t\t\t// cluster IP is out of range\n\t\t\t\tc.recorder.Eventf(&svc, nil, v1.EventTypeWarning, \"ClusterIPOutOfRange\", \"ClusterIPAllocation\", \"Cluster IP [%v]:%s is not within the service CIDR %s; please recreate service\", family, ip, c.networkByFamily[family])\n\t\t\t\truntime.HandleError(fmt.Errorf(\"the cluster IP [%v]:%s for service %s/%s is not within the service CIDR %s; please recreate\", family, ip, svc.Name, svc.Namespace, c.networkByFamily[family]))\n\t\t\tcase ipallocator.ErrFull:\n\t\t\t\t// somehow we are out of IPs\n\t\t\t\tcidr := actualAlloc.CIDR()\n\t\t\t\tc.recorder.Eventf(&svc, nil, v1.EventTypeWarning, \"ServiceCIDRFull\", \"ClusterIPAllocation\", \"Service CIDR %v is full; you must widen the CIDR in order to create new services for Cluster IP [%v]:%s\", cidr, family, ip)\n\t\t\t\treturn fmt.Errorf(\"the service CIDR %v is full; you must widen the CIDR in order to create new services for Cluster IP [%v]:%s\", cidr, family, ip)\n\t\t\tdefault:\n\t\t\t\tc.recorder.Eventf(&svc, nil, v1.EventTypeWarning, \"UnknownError\", \"ClusterIPAllocation\", \"Unable to allocate cluster IP [%v]:%s due to an unknown error\", family, ip)\n\t\t\t\treturn fmt.Errorf(\"unable to allocate cluster IP [%v]:%s for service %s/%s due to an unknown error, exiting: %v\", family, ip, svc.Name, svc.Namespace, err)\n\t\t\t}\n\t\t}\n\t}\n\n\t// leak check\n\tfor family, leaks := range c.leaksByFamily {\n\t\tc.checkLeaked(leaks, storedByFamily[family], rebuiltByFamily[family])\n\t}\n\n\t// save logic\n\t// Blast the rebuilt state into storage.\n\tfor family, rebuilt := range rebuiltByFamily {\n\t\terr = c.saveSnapShot(rebuilt, c.allocatorByFamily[family], snapshotByFamily[family])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (na *cnmNetworkAllocator) Allocate(n *api.Network) error {\n\tif _, ok := na.networks[n.ID]; ok {\n\t\treturn fmt.Errorf(\"network %s already allocated\", n.ID)\n\t}\n\n\td, err := na.resolveDriver(n)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnw := &network{\n\t\tnw: n,\n\t\tendpoints: make(map[string]string),\n\t\tisNodeLocal: d.capability.DataScope == scope.Local,\n\t}\n\n\t// No swarm-level allocation can be provided by the network driver for\n\t// node-local networks. Only thing needed is populating the driver's name\n\t// in the driver's state.\n\tif nw.isNodeLocal {\n\t\tn.DriverState = &api.Driver{\n\t\t\tName: d.name,\n\t\t}\n\t\t// In order to support backward compatibility with older daemon\n\t\t// versions which assumes the network attachment to contains\n\t\t// non nil IPAM attribute, passing an empty object\n\t\tn.IPAM = &api.IPAMOptions{Driver: &api.Driver{}}\n\t} else {\n\t\tnw.pools, err = na.allocatePools(n)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed allocating pools and gateway IP for network %s\", n.ID)\n\t\t}\n\n\t\tif err := na.allocateDriverState(n); err != nil {\n\t\t\tna.freePools(n, nw.pools)\n\t\t\treturn errors.Wrapf(err, \"failed while allocating driver state for network %s\", n.ID)\n\t\t}\n\t}\n\n\tna.networks[n.ID] = nw\n\n\treturn nil\n}",
"func (o BuildSpecRuntimePtrOutput) Base() BuildSpecRuntimeBasePtrOutput {\n\treturn o.ApplyT(func(v *BuildSpecRuntime) *BuildSpecRuntimeBase {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Base\n\t}).(BuildSpecRuntimeBasePtrOutput)\n}"
] | [
"0.5949005",
"0.5725722",
"0.5369504",
"0.5214787",
"0.5149179",
"0.5123685",
"0.5117184",
"0.5099963",
"0.5049843",
"0.5029948",
"0.50290453",
"0.50138724",
"0.49412763",
"0.49271756",
"0.48621428",
"0.48538187",
"0.47791496",
"0.47764036",
"0.47500402",
"0.47404003",
"0.47392026",
"0.47286868",
"0.47158033",
"0.47081983",
"0.4691098",
"0.46894893",
"0.4674756",
"0.46728227",
"0.466863",
"0.4660493",
"0.46567085",
"0.46546787",
"0.46466398",
"0.46423286",
"0.46196347",
"0.4616663",
"0.4585225",
"0.45729405",
"0.45725137",
"0.45479596",
"0.4547838",
"0.4546467",
"0.45397297",
"0.45248595",
"0.45191827",
"0.45146802",
"0.45107323",
"0.45061502",
"0.4505096",
"0.4499987",
"0.44824037",
"0.44774488",
"0.44768718",
"0.4467408",
"0.446287",
"0.4462736",
"0.44576836",
"0.4448222",
"0.44353896",
"0.44297665",
"0.44271377",
"0.44262126",
"0.44209427",
"0.44085407",
"0.4397695",
"0.43925774",
"0.4392097",
"0.43910056",
"0.43904477",
"0.43850523",
"0.43801013",
"0.43771797",
"0.43745402",
"0.43695778",
"0.4366509",
"0.43612984",
"0.43545148",
"0.43533444",
"0.4347718",
"0.43467346",
"0.43460947",
"0.4345409",
"0.43446916",
"0.43419752",
"0.43319753",
"0.43311697",
"0.43300846",
"0.432516",
"0.43236843",
"0.43221587",
"0.43209547",
"0.43176502",
"0.43176174",
"0.4317361",
"0.4317",
"0.4316545",
"0.43143174",
"0.43054664",
"0.43016604",
"0.4300207"
] | 0.72381115 | 0 |
newGAGetHook creates a new closure with the current baseInstanceList to be used as a MockInstances.GetHook | func (bil *baseInstanceList) newGAGetHook() func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) {
return func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) {
m.Lock.Lock()
defer m.Lock.Unlock()
if _, found := m.Objects[*key]; !found {
m.Objects[*key] = &cloud.MockInstancesObj{Obj: bil.getOrCreateBaseInstance(key).toGA()}
}
return false, nil, nil
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (bil *baseInstanceList) newBetaGetHook() func(ctx context.Context, key *meta.Key, m *cloud.MockBetaInstances) (bool, *beta.Instance, error) {\n\treturn func(ctx context.Context, key *meta.Key, m *cloud.MockBetaInstances) (bool, *beta.Instance, error) {\n\t\tm.Lock.Lock()\n\t\tdefer m.Lock.Unlock()\n\n\t\tif _, found := m.Objects[*key]; !found {\n\t\t\tm.Objects[*key] = &cloud.MockInstancesObj{Obj: bil.getOrCreateBaseInstance(key).toBeta()}\n\t\t}\n\t\treturn false, nil, nil\n\t}\n}",
"func (bil *baseInstanceList) newMockCloud() cloud.Cloud {\n\tc := cloud.NewMockGCE(nil)\n\n\t// insert hooks to lazy create a instance when needed\n\tc.MockInstances.GetHook = bil.newGAGetHook()\n\tc.MockBetaInstances.GetHook = bil.newBetaGetHook()\n\n\treturn c\n}",
"func (f *AutoIndexingServiceGetIndexesFunc) PushHook(hook func(context.Context, shared.GetIndexesOptions) ([]types.Index, int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *AutoIndexingServiceGetListTagsFunc) PushHook(hook func(context.Context, api.RepoName, ...string) ([]*gitdomain.Tag, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *ExtensionStoreGetFeaturedExtensionsFunc) PushHook(hook func(context.Context) ([]*stores.Extension, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *AutoIndexingServiceGetIndexByIDFunc) PushHook(hook func(context.Context, int) (types.Index, bool, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *AutoIndexingServiceGetIndexesByIDsFunc) PushHook(hook func(context.Context, ...int) ([]types.Index, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *AutoIndexingServiceGetUnsafeDBFunc) PushHook(hook func() database.DB) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func newGoGetter() *goGetter {\n\treturn &goGetter{}\n}",
"func NewHookLister(indexer cache.Indexer) HookLister {\n\treturn &hookLister{indexer: indexer}\n}",
"func (f *AutoIndexingServiceNumRepositoriesWithCodeIntelligenceFunc) PushHook(hook func(context.Context) (int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *UploadServiceGetListTagsFunc) PushHook(hook func(context.Context, api.RepoName, ...string) ([]*gitdomain.Tag, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *ReleaseStoreGetLatestFunc) PushHook(hook func(context.Context, int32, string, bool) (*stores.Release, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *DBStoreGetUploadsFunc) PushHook(hook func(context.Context, dbstore.GetUploadsOptions) ([]dbstore.Upload, int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *DBStoreGetUploadsFunc) PushHook(hook func(context.Context, dbstore.GetUploadsOptions) ([]dbstore.Upload, int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (ch *CloudwatchHook) GetHook() (func(zapcore.Entry) error, error) {\n\n\tvar cloudwatchWriter = func(e zapcore.Entry) error {\n\t\tif !ch.isAcceptedLevel(e.Level) {\n\t\t\treturn nil\n\t\t}\n\n\t\tevent := &cloudwatchlogs.InputLogEvent{\n\t\t\tMessage: aws.String(fmt.Sprintf(\"[%s] %s\", e.LoggerName, e.Message)),\n\t\t\tTimestamp: aws.Int64(int64(time.Nanosecond) * time.Now().UnixNano() / int64(time.Millisecond)),\n\t\t}\n\t\tparams := &cloudwatchlogs.PutLogEventsInput{\n\t\t\tLogEvents: []*cloudwatchlogs.InputLogEvent{event},\n\t\t\tLogGroupName: aws.String(ch.GroupName),\n\t\t\tLogStreamName: aws.String(ch.StreamName),\n\t\t\tSequenceToken: ch.nextSequenceToken,\n\t\t}\n\n\t\tif ch.Async {\n\t\t\tgo ch.sendEvent(params)\n\t\t\treturn nil\n\t\t}\n\n\t\treturn ch.sendEvent(params)\n\t}\n\n\tch.svc = cloudwatchlogs.New(session.New(ch.AWSConfig))\n\n\tlgresp, err := ch.svc.DescribeLogGroups(&cloudwatchlogs.DescribeLogGroupsInput{LogGroupNamePrefix: aws.String(ch.GroupName), Limit: aws.Int64(1)})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(lgresp.LogGroups) < 1 {\n\t\t// we need to create this log group\n\t\t_, err := ch.svc.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{LogGroupName: aws.String(ch.GroupName)})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tresp, err := ch.svc.DescribeLogStreams(&cloudwatchlogs.DescribeLogStreamsInput{\n\t\tLogGroupName: aws.String(ch.GroupName), // Required\n\t\tLogStreamNamePrefix: aws.String(ch.StreamName),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// grab the next sequence token\n\tif len(resp.LogStreams) > 0 {\n\t\tch.nextSequenceToken = resp.LogStreams[0].UploadSequenceToken\n\t\treturn cloudwatchWriter, nil\n\t}\n\n\t// create stream if it doesn't exist. the next sequence token will be null\n\t_, err = ch.svc.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{\n\t\tLogGroupName: aws.String(ch.GroupName),\n\t\tLogStreamName: aws.String(ch.StreamName),\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn cloudwatchWriter, nil\n}",
"func (f *ReleaseStoreGetLatestBatchFunc) PushHook(hook func(context.Context, []int32, string, bool) ([]*stores.Release, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *ReleaseStoreGetArtifactsFunc) PushHook(hook func(context.Context, int64) ([]byte, []byte, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *UploadServiceGetUploadsFunc) PushHook(hook func(context.Context, shared1.GetUploadsOptions) ([]types.Upload, int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *AutoIndexingServiceRepositoryIDsWithConfigurationFunc) PushHook(hook func(context.Context, int, int) ([]shared.RepositoryWithAvailableIndexers, int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *DBStoreGetConfigurationPoliciesFunc) PushHook(hook func(context.Context, dbstore.GetConfigurationPoliciesOptions) ([]dbstore.ConfigurationPolicy, int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *UploadServiceGetUploadsByIDsFunc) PushHook(hook func(context.Context, ...int) ([]types.Upload, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (e *Exporter) NewGaugesFuncWithMultiLabels(name, help string, labels []string, f func() map[string]int64) *stats.GaugesFuncWithMultiLabels {\n\tif e.name == \"\" || name == \"\" {\n\t\tv := stats.NewGaugesFuncWithMultiLabels(name, help, labels, f)\n\t\taddUnnamedExport(name, v)\n\t\treturn v\n\t}\n\tlvar := stats.NewGaugesFuncWithMultiLabels(\"\", help, labels, f)\n\t_ = e.createCountsTracker(name, help, labels, lvar, replaceOnDup, typeGauge)\n\treturn lvar\n}",
"func (f *ExtensionStoreGetByIDFunc) PushHook(hook func(context.Context, int32) (*stores.Extension, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *ExtensionStoreGetByUUIDFunc) PushHook(hook func(context.Context, string) (*stores.Extension, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *ExtensionStoreGetByExtensionIDFunc) PushHook(hook func(context.Context, string) (*stores.Extension, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func New(serviceName string, environment string) LoggerWrapper {\n\tlogStore = &loggerWrapper{logrus.New().WithField(\"service\", serviceName).WithField(\"environment\", environment)}\n\tif environment == \"production\" {\n\t\tlogStore.SetFormat(&logrus.JSONFormatter{})\n\t}\n\n\t// fmt.Println(\"Adding hook\")\n\t// hook := logrusly.NewLogglyHook(\"71000042-f956-4c7e-987d-8694a20695a8\", \"https://logs-01.loggly.com/bulk/\", logrus.InfoLevel, serviceName)\n\t// logStore.Logger.Hooks.Add(hook)\n\treturn logStore\n}",
"func (f *DBStoreSelectRepositoriesForRetentionScanFunc) PushHook(hook func(context.Context, time.Duration, int) ([]int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *ExtensionStoreListFunc) PushHook(hook func(context.Context, stores.ExtensionsListOptions) ([]*stores.Extension, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *PolicyServiceGetRetentionPolicyOverviewFunc) PushHook(hook func(context.Context, types.Upload, bool, int, int64, string, time.Time) ([]types.RetentionPolicyMatchCandidate, int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *ResolverGetUploadsByIDsFunc) PushHook(hook func(context.Context, ...int) ([]dbstore.Upload, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (this *service) GRPCHook() reflect.Value {\n\treturn reflect.ValueOf(pb.RegisterMiHomeServer)\n}",
"func (f *ResolverGetIndexesByIDsFunc) PushHook(hook func(context.Context, ...int) ([]dbstore.Index, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func NewGet(g Getter) *Get {\n\treturn &Get{g}\n}",
"func newMockGetDataFunc(mockStore *mock.NodeStore) func(addr Address) (data []byte, err error) {\n\treturn func(addr Address) (data []byte, err error) {\n\t\tdata, err = mockStore.Get(addr)\n\t\tif err == mock.ErrNotFound {\n\t\t\t// preserve ErrChunkNotFound error\n\t\t\terr = ErrChunkNotFound\n\t\t}\n\t\treturn data, err\n\t}\n}",
"func ClosureNew(f interface{}) *C.GClosure {\n\tclosure := C._g_closure_new()\n\tclosures.Lock()\n\tclosures.m[closure] = reflect.ValueOf(f)\n\tclosures.Unlock()\n\treturn closure\n}",
"func (f *ExtensionStoreCreateFunc) PushHook(hook func(context.Context, int32, int32, string) (int32, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (k *kubelet) getHooks() *container.Hooks {\n\treturn &container.Hooks{\n\t\tPostStart: k.postStartHook(),\n\t}\n}",
"func NewLifeHook(e *Engine) Hook {\n\treturn Hook{\n\t\tOnStart: OnStart(e),\n\t\tOnStop: OnStop(e),\n\t}\n}",
"func newGoFactory() *GOFactory {\n\tgologger.SLogger.Println(\"Init Game Object Factory Singleton\")\n\tfOnce.Do(func() {\n\t\tgofactory = &GOFactory{\n\t\t\tGoCreator: make(map[string]ICreator),\n\t\t}\n\t})\n\treturn gofactory\n}",
"func (f *ReleaseStoreCreateFunc) PushHook(hook func(context.Context, *stores.Release) (int64, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func getHook(data *domain.Data, repo config.Repository) (config.Hook, bool, error) {\n\tfor _, hook := range repo.Hooks {\n\t\tf, err := matchHook(data, hook)\n\t\tif err != nil {\n\t\t\treturn config.Hook{}, false, err\n\t\t}\n\t\tif f {\n\t\t\treturn hook, true, nil\n\t\t}\n\t}\n\treturn config.Hook{}, false, nil\n}",
"func GetBindHook() BindHook {\n\treturn bindHook\n}",
"func NewHook(peerID int, token string) *VkHook {\n\thook := &VkHook{\n\t\tPeerID: peerID,\n\t\tVK: api.NewVK(token),\n\t\tUseLevels: DefaultLevels,\n\t}\n\n\treturn hook\n}",
"func (f *Function) M__get__(instance, owner Object) (Object, error) {\n\tif instance != None {\n\t\treturn NewBoundMethod(instance, f), nil\n\t}\n\treturn f, nil\n}",
"func (f *ResolverGetIndexByIDFunc) PushHook(hook func(context.Context, int) (dbstore.Index, bool, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func RegisterNewGroupHook(fn func(*Group)) {\n\tif newGroupHook != nil {\n\t\tpanic(\"RegisterNewGroupHook called more than once\")\n\t}\n\tnewGroupHook = fn\n}",
"func newAlfredWatcher() *alfredWatcher {\n w, _ := inotify.NewWatcher()\n aw := &alfredWatcher{\n watcher: w,\n list: make(map[string]uint32),\n }\n return aw\n}",
"func (a *App) GetHook(name string) (*Hook, error) {\n\tsp, err := a.GetSnapshot().FastForward()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn getHook(a, name, sp)\n}",
"func newListeners() *listeners { return &listeners{m: make(map[string]nl.Listener, 64)} }",
"func (ng *AlertNG) GetHooks() *api.Hooks {\n\treturn ng.api.Hooks\n}",
"func New() logrus.Hook {\n\treturn &normalCallerHook{}\n}",
"func (bi *baseInstance) toGA() *ga.Instance {\n\tinst := &ga.Instance{Name: bi.name, Zone: bi.zone, NetworkInterfaces: []*ga.NetworkInterface{{}}}\n\tif bi.aliasRange != \"\" {\n\t\tinst.NetworkInterfaces[0].AliasIpRanges = []*ga.AliasIpRange{\n\t\t\t{IpCidrRange: bi.aliasRange, SubnetworkRangeName: util.TestSecondaryRangeName},\n\t\t}\n\t}\n\treturn inst\n}",
"func New(config Config) zapcore.WriteSyncer {\n\treturn &gelf{Config: config}\n}",
"func (f *DBStoreDeleteOldIndexesFunc) PushHook(hook func(context.Context, time.Duration, time.Time) (int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *ExtensionStoreGetPublisherFunc) PushHook(hook func(context.Context, string) (*stores.Publisher, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *ExtensionStoreGetFeaturedExtensionsFunc) PushReturn(r0 []*stores.Extension, r1 error) {\n\tf.PushHook(func(context.Context) ([]*stores.Extension, error) {\n\t\treturn r0, r1\n\t})\n}",
"func newLfsHook(writer *rotatelogs.RotateLogs, tf *moduleFormatter) logrus.Hook {\n\tlfsHook := lfshook.NewHook(lfshook.WriterMap{\n\t\tlogrus.DebugLevel: writer,\n\t\tlogrus.InfoLevel: writer,\n\t\tlogrus.WarnLevel: writer,\n\t\tlogrus.ErrorLevel: writer,\n\t\tlogrus.FatalLevel: writer,\n\t\tlogrus.PanicLevel: writer,\n\t}, tf)\n\n\treturn lfsHook\n}",
"func newGauge(namespace, subsystem, name string, labelNames []string, client *statsd.Statter, isPrometheusEnabled bool) *Gauge {\n\topts := prometheus.GaugeOpts{\n\t\tNamespace: namespace,\n\t\tSubsystem: subsystem,\n\t\tName: name,\n\t}\n\tvec := prometheus.NewGaugeVec(opts, labelNames)\n\tif isPrometheusEnabled {\n\t\tprometheus.MustRegister(vec)\n\t}\n\n\treturn &Gauge{\n\t\twatcher: vec,\n\t\tlabels: labelNames,\n\t\tclient: client,\n\t\tprefix: strings.Join([]string{namespace, subsystem, name}, \".\"),\n\t}\n}",
"func NewHook(client *Client, levels []logrus.Level) *Hook {\n\n\treturn &Hook{client, levels}\n}",
"func generateListGetter(buf *bytes.Buffer, method *generatedGoListMethod) error {\n\treturn goListGetterTemplate.Execute(buf, method)\n}",
"func (s *hookLister) Hooks(namespace string) HookNamespaceLister {\n\treturn hookNamespaceLister{indexer: s.indexer, namespace: namespace}\n}",
"func (f *JobRunFunc) PushHook(hook func(context.Context, database.DB, streaming.Sender) (*search.Alert, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func newGrpcListener(ghandler *GrpcHandler) net.Listener {\n\tl := &grpcListener{\n\t\tGrpcHandler: ghandler,\n\t}\n\tl.listenerCtx, l.listenerCtxCancel = context.WithCancel(ghandler.ctx)\n\treturn l\n}",
"func New(executor GetExecutor, lc logger.LoggingClient) *get {\n\treturn &get{\n\t\texecutor: executor,\n\t\tloggingClient: lc,\n\t}\n}",
"func newLdbCacheIter(snap *dbCacheSnapshot, slice *util.Range) *ldbCacheIter {\n\titer := snap.pendingKeys.Iterator(slice.Start, slice.Limit)\n\treturn &ldbCacheIter{Iterator: iter}\n}",
"func getHandleHookChange(\n\thooks []types.Hook, \n\trules []types.Rule, \n\texits []types.Exit,\n\tcallEvent func(string),\n\texitFunc func(code int),\n) func(string, string) {\n\thookChangeMap := generateHookChangeMap(rules, hooks)\n\thookChange := func(laststate string, newstate string) {\n\t\tif hookChangeMap[laststate] !=nil {\n\t\t\tlabels, hasMapping := hookChangeMap[laststate][newstate]\n\t\t\tif hasMapping {\n\t\t\t\tfor _, label := range(labels){\n\t\t\t\t\tcallEvent(label)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor _, exit := range(exits){\n\t\t\tif exit.State == newstate {\n\t\t\t\texitFunc(exit.Exitcode)\n\t\t\t}\n\t\t}\n\t}\n\n\n\treturn hookChange\n}",
"func instrumentGet(inner func()) {\n\tTotalRequests.Add(1)\n\tPendingRequests.Add(1)\n\tdefer PendingRequests.Add(-1)\n\n\tstart := time.Now()\n\n\tinner()\n\n\t// Capture the histogram over 18 geometric buckets \n\tdelta := time.Since(start)\n\tswitch {\n\tcase delta < time.Millisecond:\n\t\tLatencies.Add(\"0ms\", 1)\n\tcase delta > 32768*time.Millisecond:\n\t\tLatencies.Add(\">32s\", 1)\n\tdefault:\n\t\tfor i := time.Millisecond; i < 32768*time.Millisecond; i *= 2 {\n\t\t\tif delta >= i && delta < i*2 {\n\t\t\t\tLatencies.Add(i.String(), 1)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}",
"func (f *ExtensionStoreCountFunc) PushHook(hook func(context.Context, stores.ExtensionsListOptions) (int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func mockedGranter(kubeutil *kube.Kube, app *v1.RadixRegistration, namespace string, serviceAccount *corev1.ServiceAccount) error {\n\treturn nil\n}",
"func NewGetGeneric[T constraints.Integer](t mockConstructorTestingTNewGetGeneric) *GetGeneric[T] {\n\tmock := &GetGeneric[T]{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func (f *ExtensionStoreListPublishersFunc) PushHook(hook func(context.Context, stores.PublishersListOptions) ([]*stores.Publisher, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *DBStoreDirtyRepositoriesFunc) PushHook(hook func(context.Context) (map[int]int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *DBStoreDirtyRepositoriesFunc) PushHook(hook func(context.Context) (map[int]int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *UploadServiceGetAuditLogsForUploadFunc) PushHook(hook func(context.Context, int) ([]types.UploadLog, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *DBStoreSoftDeleteOldUploadsFunc) PushHook(hook func(context.Context, time.Duration, time.Time) (int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *DBStoreDeleteIndexesWithoutRepositoryFunc) PushHook(hook func(context.Context, time.Time) (map[int]int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func NewHook(b BoltHook) (*BoltHook, error) {\n\tboltDB, err := bolt.Open(b.DBLoc, 0600, nil)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &BoltHook{\n\t\tDBLoc: b.DBLoc,\n\t\tBucket: b.Bucket,\n\t\tFormatter: b.Formatter,\n\t\tdb: boltDB,\n\t}, nil\n}",
"func GetNew(target *core.ServiceInstance) (Client, error) {\n\tpool := getPool(target.Kind)\n\treturn connect(pool, target)\n}",
"func newSnapshotCache() cache.SnapshotCache {\n\treturn cache.NewSnapshotCache(false, tbnProxyNodeHash{}, consoleLogger{})\n}",
"func (f *AutoIndexingServiceGetListTagsFunc) PushReturn(r0 []*gitdomain.Tag, r1 error) {\n\tf.PushHook(func(context.Context, api.RepoName, ...string) ([]*gitdomain.Tag, error) {\n\t\treturn r0, r1\n\t})\n}",
"func NewCustom(fieldName string, fullPath bool) logrus.Hook {\n\treturn &customCallerHook{fieldName: fieldName, fullPath: fullPath}\n}",
"func (f *ExtensionStoreUpdateFunc) PushHook(hook func(context.Context, int32, *string) error) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (gatewayContext *GatewayContext) newGatewayWatch(name string) *cache.ListWatch {\n\tx := gatewayContext.gatewayClient.ArgoprojV1alpha1().RESTClient()\n\tresource := \"gateways\"\n\tfieldSelector := fields.ParseSelectorOrDie(fmt.Sprintf(\"metadata.name=%s\", name))\n\n\tlistFunc := func(options metav1.ListOptions) (runtime.Object, error) {\n\t\toptions.FieldSelector = fieldSelector.String()\n\t\treq := x.Get().\n\t\t\tNamespace(gatewayContext.namespace).\n\t\t\tResource(resource).\n\t\t\tVersionedParams(&options, metav1.ParameterCodec)\n\t\treturn req.Do().Get()\n\t}\n\twatchFunc := func(options metav1.ListOptions) (watch.Interface, error) {\n\t\toptions.Watch = true\n\t\toptions.FieldSelector = fieldSelector.String()\n\t\treq := x.Get().\n\t\t\tNamespace(gatewayContext.namespace).\n\t\t\tResource(resource).\n\t\t\tVersionedParams(&options, metav1.ParameterCodec)\n\t\treturn req.Watch()\n\t}\n\treturn &cache.ListWatch{ListFunc: listFunc, WatchFunc: watchFunc}\n}",
"func (f *UploadServiceGetCommitGraphMetadataFunc) PushHook(hook func(context.Context, int) (bool, *time.Time, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (mrb *MrbState) ProcNewGofunc(f interface{}) RProc {\n\tproc, _ := mrb.ProcNewGofuncWithEnv(f)\n\treturn proc\n}",
"func (f *AutoIndexingServiceGetListTagsFunc) SetDefaultHook(hook func(context.Context, api.RepoName, ...string) ([]*gitdomain.Tag, error)) {\n\tf.defaultHook = hook\n}",
"func (f *DBStoreUpdateUploadRetentionFunc) PushHook(hook func(context.Context, []int, []int) error) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func newLabo(s *goquery.Selection, l *Labo) *Labo {\n\tfor _, fn := range laboFn {\n\t\tfn(s, l)\n\t}\n\treturn l\n}",
"func NewGaWidget(keyfile string, viewID string) (*gaWidget, error) {\n\tan, err := platform.NewAnalyticsClient(keyfile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &gaWidget{\n\t\tanalytics: an,\n\t\tviewID: viewID,\n\t}, nil\n}",
"func newInfluxDB(config *Config) (hook *InfluxDBHook, err error) {\n if config == nil {\n config = &Config{}\n }\n\n config.defaults()\n\n var client = newInfluxDBClient(config)\n\n // Make sure that we can connect to InfluxDB\n isReady, err := client.Ready(context.Background()) // if this takes more than 5 seconds then influxdb is probably down\n if err != nil || !isReady {\n return nil, fmt.Errorf(\"NewInfluxDB: Error connecting to InfluxDB, %v\", err)\n }\n\n hook = &InfluxDBHook{\n client: client,\n database: config.Database,\n measurement: config.Measurement,\n tagList: config.Tags,\n precision: config.Precision,\n syslog: config.Syslog,\n facility: config.Facility,\n facilityCode: config.FacilityCode,\n appName: config.AppName,\n version: config.Version,\n minLevel: config.MinLevel,\n org: config.Org,\n bucket: config.Bucket,\n ch: ringchan.NewRingChan(10, config.MaxBufferLog),\n }\n go hook.process()\n return hook, nil\n}",
"func NewHookOptions(options map[string]interface{}) HookOptions {\n\tvar mapper models.Mapper\n\tif mapperOptions, ok := options[\"mapper\"]; ok {\n\t\tif m, ok := mapperOptions.(map[string]interface{}); ok {\n\t\t\tmapper = models.ParseMapper(m)\n\t\t}\n\t}\n\tvar name string\n\tif nameOption, ok := options[\"name\"]; ok {\n\t\tname = nameOption.(string)\n\t} else {\n\t\t//TODO: use hook-index as name\n\t\tname = \"default\"\n\t}\n\treturn HookOptions{\n\t\tName: name,\n\t\tMapper: mapper,\n\t}\n}",
"func NewMockHook(ctrl *gomock.Controller) *MockHook {\n\tmock := &MockHook{ctrl: ctrl}\n\tmock.recorder = &MockHookMockRecorder{mock}\n\treturn mock\n}",
"func (f *ExtensionStoreCountPublishersFunc) PushHook(hook func(context.Context, stores.PublishersListOptions) (int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func GetRpcServiceFb(key string) Factory {\n\treturn rpcServiceFactoryBuilder[key]\n}",
"func (v *version) Hooks() HookInformer {\n\treturn &hookInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}\n}",
"func (f *ResolverIndexConfigurationFunc) PushHook(hook func(context.Context, int) ([]byte, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *DBStoreHandleFunc) PushHook(hook func() *basestore.TransactableHandle) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func NewForTest(t *testing.T) (context.Context, *pgxpool.Pool, *testutils.GitBuilder, []string, provider.Provider, *config.InstanceConfig) {\n\tctx := cipd_git.UseGitFinder(context.Background())\n\tctx, cancel := context.WithCancel(ctx)\n\n\t// Create a git repo for testing purposes.\n\tgb := testutils.GitInit(t, ctx)\n\thashes := []string{}\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(2*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"bar.txt\", StartTime.Add(3*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(4*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(5*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"bar.txt\", StartTime.Add(6*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(7*time.Minute)))\n\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(8*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(9*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(10*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(11*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(12*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(13*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(14*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(15*time.Minute)))\n\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(16*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(17*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(18*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(19*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(20*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(21*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(22*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(23*time.Minute)))\n\n\t// Init our sql database.\n\tdb := sqltest.NewCockroachDBForTests(t, \"dbgit\")\n\n\t// Get tmp dir to use for repo checkout.\n\ttmpDir, err := ioutil.TempDir(\"\", \"git\")\n\trequire.NoError(t, err)\n\n\t// Create the cleanup function.\n\tt.Cleanup(func() {\n\t\tcancel()\n\t\terr = os.RemoveAll(tmpDir)\n\t\tassert.NoError(t, err)\n\t\tgb.Cleanup()\n\t})\n\n\tinstanceConfig := &config.InstanceConfig{\n\t\tGitRepoConfig: config.GitRepoConfig{\n\t\t\tURL: gb.Dir(),\n\t\t\tDir: filepath.Join(tmpDir, \"checkout\"),\n\t\t},\n\t}\n\tgp, err := git_checkout.New(ctx, instanceConfig)\n\trequire.NoError(t, err)\n\treturn ctx, db, gb, hashes, gp, instanceConfig\n}",
"func (f *DBStoreHandleFunc) PushHook(hook func() basestore.TransactableHandle) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}"
] | [
"0.73362684",
"0.55606365",
"0.5346862",
"0.53349245",
"0.51018983",
"0.50766736",
"0.50756764",
"0.5031727",
"0.49362725",
"0.49105883",
"0.48886675",
"0.48711956",
"0.48659244",
"0.4857758",
"0.4857758",
"0.4845478",
"0.4821866",
"0.47960016",
"0.47906336",
"0.4775277",
"0.47730014",
"0.47523877",
"0.47479406",
"0.472726",
"0.47114184",
"0.46798778",
"0.46782318",
"0.4665728",
"0.4643404",
"0.46396217",
"0.46335372",
"0.4630124",
"0.46241173",
"0.46231842",
"0.46189186",
"0.46035084",
"0.4603419",
"0.4599399",
"0.45930314",
"0.45843828",
"0.4575383",
"0.45700067",
"0.45693627",
"0.45690772",
"0.45450547",
"0.45424673",
"0.4541671",
"0.45341465",
"0.45293367",
"0.45238426",
"0.45215097",
"0.4521429",
"0.4516111",
"0.44893545",
"0.44876432",
"0.44865716",
"0.44820508",
"0.44794202",
"0.4473823",
"0.44678614",
"0.4451733",
"0.44343713",
"0.44329304",
"0.44183034",
"0.44150752",
"0.44136",
"0.44015387",
"0.43969283",
"0.43952924",
"0.4385314",
"0.43804273",
"0.43759438",
"0.43713394",
"0.43713394",
"0.4349196",
"0.43485594",
"0.43373233",
"0.43318668",
"0.43300954",
"0.4328112",
"0.4323547",
"0.4322696",
"0.43063924",
"0.43027085",
"0.42972922",
"0.4294898",
"0.42808393",
"0.42671457",
"0.42611027",
"0.4260794",
"0.42551446",
"0.42520428",
"0.42402652",
"0.4232919",
"0.4228743",
"0.4224091",
"0.4221166",
"0.42158407",
"0.42133886",
"0.42028248"
] | 0.88796777 | 0 |
newBetaGetHook creates a new closure with the current baseInstanceList to be used as a MockBetaInstances.GetHook | func (bil *baseInstanceList) newBetaGetHook() func(ctx context.Context, key *meta.Key, m *cloud.MockBetaInstances) (bool, *beta.Instance, error) {
return func(ctx context.Context, key *meta.Key, m *cloud.MockBetaInstances) (bool, *beta.Instance, error) {
m.Lock.Lock()
defer m.Lock.Unlock()
if _, found := m.Objects[*key]; !found {
m.Objects[*key] = &cloud.MockInstancesObj{Obj: bil.getOrCreateBaseInstance(key).toBeta()}
}
return false, nil, nil
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (bil *baseInstanceList) newGAGetHook() func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) {\n\treturn func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) {\n\t\tm.Lock.Lock()\n\t\tdefer m.Lock.Unlock()\n\n\t\tif _, found := m.Objects[*key]; !found {\n\t\t\tm.Objects[*key] = &cloud.MockInstancesObj{Obj: bil.getOrCreateBaseInstance(key).toGA()}\n\t\t}\n\t\treturn false, nil, nil\n\t}\n}",
"func (bil *baseInstanceList) newMockCloud() cloud.Cloud {\n\tc := cloud.NewMockGCE(nil)\n\n\t// insert hooks to lazy create a instance when needed\n\tc.MockInstances.GetHook = bil.newGAGetHook()\n\tc.MockBetaInstances.GetHook = bil.newBetaGetHook()\n\n\treturn c\n}",
"func NewHookLister(indexer cache.Indexer) HookLister {\n\treturn &hookLister{indexer: indexer}\n}",
"func (bi *baseInstance) toBeta() *beta.Instance {\n\tinst := &beta.Instance{Name: bi.name, Zone: bi.zone, NetworkInterfaces: []*beta.NetworkInterface{{}}}\n\tif bi.aliasRange != \"\" {\n\t\tinst.NetworkInterfaces[0].AliasIpRanges = []*beta.AliasIpRange{\n\t\t\t{IpCidrRange: bi.aliasRange, SubnetworkRangeName: util.TestSecondaryRangeName},\n\t\t}\n\t}\n\treturn inst\n}",
"func (t *T) Beta(name string, f interface{}) bool {\n\tt.Helper()\n\treturn t.invokeFeature(feature.Beta, name, f)\n}",
"func (f *AutoIndexingServiceGetListTagsFunc) PushHook(hook func(context.Context, api.RepoName, ...string) ([]*gitdomain.Tag, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func NewLifeHook(e *Engine) Hook {\n\treturn Hook{\n\t\tOnStart: OnStart(e),\n\t\tOnStop: OnStop(e),\n\t}\n}",
"func NewHook(b BoltHook) (*BoltHook, error) {\n\tboltDB, err := bolt.Open(b.DBLoc, 0600, nil)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &BoltHook{\n\t\tDBLoc: b.DBLoc,\n\t\tBucket: b.Bucket,\n\t\tFormatter: b.Formatter,\n\t\tdb: boltDB,\n\t}, nil\n}",
"func (f *ReleaseStoreGetLatestBatchFunc) PushHook(hook func(context.Context, []int32, string, bool) ([]*stores.Release, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func NewHook(peerID int, token string) *VkHook {\n\thook := &VkHook{\n\t\tPeerID: peerID,\n\t\tVK: api.NewVK(token),\n\t\tUseLevels: DefaultLevels,\n\t}\n\n\treturn hook\n}",
"func (f *AutoIndexingServiceGetIndexesFunc) PushHook(hook func(context.Context, shared.GetIndexesOptions) ([]types.Index, int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (ch *CloudwatchHook) GetHook() (func(zapcore.Entry) error, error) {\n\n\tvar cloudwatchWriter = func(e zapcore.Entry) error {\n\t\tif !ch.isAcceptedLevel(e.Level) {\n\t\t\treturn nil\n\t\t}\n\n\t\tevent := &cloudwatchlogs.InputLogEvent{\n\t\t\tMessage: aws.String(fmt.Sprintf(\"[%s] %s\", e.LoggerName, e.Message)),\n\t\t\tTimestamp: aws.Int64(int64(time.Nanosecond) * time.Now().UnixNano() / int64(time.Millisecond)),\n\t\t}\n\t\tparams := &cloudwatchlogs.PutLogEventsInput{\n\t\t\tLogEvents: []*cloudwatchlogs.InputLogEvent{event},\n\t\t\tLogGroupName: aws.String(ch.GroupName),\n\t\t\tLogStreamName: aws.String(ch.StreamName),\n\t\t\tSequenceToken: ch.nextSequenceToken,\n\t\t}\n\n\t\tif ch.Async {\n\t\t\tgo ch.sendEvent(params)\n\t\t\treturn nil\n\t\t}\n\n\t\treturn ch.sendEvent(params)\n\t}\n\n\tch.svc = cloudwatchlogs.New(session.New(ch.AWSConfig))\n\n\tlgresp, err := ch.svc.DescribeLogGroups(&cloudwatchlogs.DescribeLogGroupsInput{LogGroupNamePrefix: aws.String(ch.GroupName), Limit: aws.Int64(1)})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(lgresp.LogGroups) < 1 {\n\t\t// we need to create this log group\n\t\t_, err := ch.svc.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{LogGroupName: aws.String(ch.GroupName)})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tresp, err := ch.svc.DescribeLogStreams(&cloudwatchlogs.DescribeLogStreamsInput{\n\t\tLogGroupName: aws.String(ch.GroupName), // Required\n\t\tLogStreamNamePrefix: aws.String(ch.StreamName),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// grab the next sequence token\n\tif len(resp.LogStreams) > 0 {\n\t\tch.nextSequenceToken = resp.LogStreams[0].UploadSequenceToken\n\t\treturn cloudwatchWriter, nil\n\t}\n\n\t// create stream if it doesn't exist. the next sequence token will be null\n\t_, err = ch.svc.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{\n\t\tLogGroupName: aws.String(ch.GroupName),\n\t\tLogStreamName: aws.String(ch.StreamName),\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn cloudwatchWriter, nil\n}",
"func getHook(data *domain.Data, repo config.Repository) (config.Hook, bool, error) {\n\tfor _, hook := range repo.Hooks {\n\t\tf, err := matchHook(data, hook)\n\t\tif err != nil {\n\t\t\treturn config.Hook{}, false, err\n\t\t}\n\t\tif f {\n\t\t\treturn hook, true, nil\n\t\t}\n\t}\n\treturn config.Hook{}, false, nil\n}",
"func GetBindHook() BindHook {\n\treturn bindHook\n}",
"func NewHook(client *Client, levels []logrus.Level) *Hook {\n\n\treturn &Hook{client, levels}\n}",
"func newLabo(s *goquery.Selection, l *Labo) *Labo {\n\tfor _, fn := range laboFn {\n\t\tfn(s, l)\n\t}\n\treturn l\n}",
"func getHandleHookChange(\n\thooks []types.Hook, \n\trules []types.Rule, \n\texits []types.Exit,\n\tcallEvent func(string),\n\texitFunc func(code int),\n) func(string, string) {\n\thookChangeMap := generateHookChangeMap(rules, hooks)\n\thookChange := func(laststate string, newstate string) {\n\t\tif hookChangeMap[laststate] !=nil {\n\t\t\tlabels, hasMapping := hookChangeMap[laststate][newstate]\n\t\t\tif hasMapping {\n\t\t\t\tfor _, label := range(labels){\n\t\t\t\t\tcallEvent(label)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor _, exit := range(exits){\n\t\t\tif exit.State == newstate {\n\t\t\t\texitFunc(exit.Exitcode)\n\t\t\t}\n\t\t}\n\t}\n\n\n\treturn hookChange\n}",
"func NewBehatGetList(variables templateUtils.TemplateVariables) Template {\n\trawTemplate, err := template.New(\"behat_get_list\").Parse(BehatGetListTemplate)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn New(resource.New(geography.BehatDir+string(variables.Entity)+\"/crud/\", \"get_list.feature\"),\n\t\trawTemplate, variables)\n}",
"func NewChangelistLandedUpdater(t testing.TB) *ChangelistLandedUpdater {\n\tmock := &ChangelistLandedUpdater{}\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func (k *kubelet) getHooks() *container.Hooks {\n\treturn &container.Hooks{\n\t\tPostStart: k.postStartHook(),\n\t}\n}",
"func defaultNewExpBundler(uploader func(interface{}), delayThreshold time.Duration, countThreshold int) expBundler {\n\tbndler := bundler.NewBundler((*RowData)(nil), uploader)\n\n\t// Set options for bundler if they are provided by users.\n\tif 0 < delayThreshold {\n\t\tbndler.DelayThreshold = delayThreshold\n\t}\n\tif 0 < countThreshold {\n\t\tbndler.BundleCountThreshold = countThreshold\n\t}\n\n\treturn bndler\n}",
"func newAlfredWatcher() *alfredWatcher {\n w, _ := inotify.NewWatcher()\n aw := &alfredWatcher{\n watcher: w,\n list: make(map[string]uint32),\n }\n return aw\n}",
"func getHook(w http.ResponseWriter, req *http.Request) {\n\t// Get the JSON and put it into a hook struct\n\tdecoder := json.NewDecoder(req.Body)\n\tvar h github.PostReceiveHook\n\terr := decoder.Decode(&h)\n\n\tif err != nil {\n\t\tfmt.Println(\"ERROR: \", err.Error())\n\t\tfmt.Fprint(w, \"No JSON... what? (\"+err.Error()+\")\")\n\n\t\treturn\n\t}\n\n\t// If there is a branch, run the commands\n\tif len(h.Branch()) > 0 {\n\t\trunCommands(h.Branch())\n\t}\n\n\tfmt.Fprint(w, \"OK: \"+h.Ref)\n}",
"func (f *ExtensionStoreGetFeaturedExtensionsFunc) PushHook(hook func(context.Context) ([]*stores.Extension, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *ReleaseStoreGetLatestFunc) PushHook(hook func(context.Context, int32, string, bool) (*stores.Release, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (t TestDescription) Beta() TestDescription {\n\treturn t.newLabel(\"BETA\")\n}",
"func NewBlueprint(uri string, cache time.Duration) (api.Meter, error) {\n\tlog := util.NewLogger(\"foo\")\n\n\tm := &Blueprint{\n\t\tHelper: request.NewHelper(log),\n\t\tcache: cache,\n\t}\n\n\treturn m, nil\n}",
"func newBaseInstanceList(allocateCIDR bool, clusterCIDR *net.IPNet, subnetMaskSize int) *baseInstanceList {\n\tcidrSet, _ := cidrset.NewCIDRSet(clusterCIDR, subnetMaskSize)\n\treturn &baseInstanceList{\n\t\tallocateCIDR: allocateCIDR,\n\t\tclusterCIDR: clusterCIDR,\n\t\tsubnetMaskSize: subnetMaskSize,\n\t\tcidrSet: cidrSet,\n\t\tinstances: make(map[meta.Key]*baseInstance),\n\t}\n}",
"func (f *UploadServiceGetListTagsFunc) PushHook(hook func(context.Context, api.RepoName, ...string) ([]*gitdomain.Tag, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *AutoIndexingServiceGetUnsafeDBFunc) PushHook(hook func() database.DB) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func newLfsHook(writer *rotatelogs.RotateLogs, tf *moduleFormatter) logrus.Hook {\n\tlfsHook := lfshook.NewHook(lfshook.WriterMap{\n\t\tlogrus.DebugLevel: writer,\n\t\tlogrus.InfoLevel: writer,\n\t\tlogrus.WarnLevel: writer,\n\t\tlogrus.ErrorLevel: writer,\n\t\tlogrus.FatalLevel: writer,\n\t\tlogrus.PanicLevel: writer,\n\t}, tf)\n\n\treturn lfsHook\n}",
"func (f *AutoIndexingServiceGetIndexByIDFunc) PushHook(hook func(context.Context, int) (types.Index, bool, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *AutoIndexingServiceGetIndexesByIDsFunc) PushHook(hook func(context.Context, ...int) ([]types.Index, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (a *alphaMock) GetList(ctx context.Context, in *alpha.GetListRequest, opts ...grpc.CallOption) (*alpha.List, error) {\n\t// TODO(#2716): Implement me!\n\treturn nil, errors.Errorf(\"Unimplemented -- GetList coming soon\")\n}",
"func NewHook(token string, env string, r ...RollrusInitializer) *Hook {\n\th := &Hook{\n\t\tClient: roll.New(token, env),\n\t\ttriggers: defaultTriggerLevels,\n\t\tclosed: make(chan struct{}),\n\t\tentries: channel.NewBuffer(defaultBufferSize),\n\t\tonce: new(sync.Once),\n\t\tpool: make(chan chan job, defaultNumWorkers),\n\t\tnumWorkers: defaultNumWorkers,\n\t\twg: new(sync.WaitGroup),\n\t}\n\n\tfor _, init := range r {\n\t\tinit(h)\n\t}\n\n\tfor i := 0; i < h.numWorkers; i++ {\n\t\th.wg.Add(1)\n\t\tworker := newWorker(h.pool, h.closed, h.wg)\n\t\tworker.Work()\n\t}\n\n\tgo h.dispatch()\n\n\treturn h\n}",
"func newListenerBuilder(meshCatalog catalog.MeshCataloger, svcIdentity identity.ServiceIdentity, cfg configurator.Configurator, statsHeaders map[string]string) *listenerBuilder {\n\treturn &listenerBuilder{\n\t\tmeshCatalog: meshCatalog,\n\t\tserviceIdentity: svcIdentity,\n\t\tcfg: cfg,\n\t\tstatsHeaders: statsHeaders,\n\t}\n}",
"func NewBetaProposer(alpha, beta float64) *BetaProposer {\n\tp, _ := prob.NewBeta(alpha, beta)\n\treturn &BetaProposer{p}\n}",
"func (mock *HarborRepositoryInterfaceMock) AddFeatureLifecycleCalls() []struct {\n\tCtx context.Context\n\tEnabled func() bool\n\tName string\n\tLifecycle v3.HarborRepositoryLifecycle\n} {\n\tvar calls []struct {\n\t\tCtx context.Context\n\t\tEnabled func() bool\n\t\tName string\n\t\tLifecycle v3.HarborRepositoryLifecycle\n\t}\n\tlockHarborRepositoryInterfaceMockAddFeatureLifecycle.RLock()\n\tcalls = mock.calls.AddFeatureLifecycle\n\tlockHarborRepositoryInterfaceMockAddFeatureLifecycle.RUnlock()\n\treturn calls\n}",
"func (a *App) GetHook(name string) (*Hook, error) {\n\tsp, err := a.GetSnapshot().FastForward()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn getHook(a, name, sp)\n}",
"func (v *version) Hooks() HookInformer {\n\treturn &hookInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}\n}",
"func (f *Function) M__get__(instance, owner Object) (Object, error) {\n\tif instance != None {\n\t\treturn NewBoundMethod(instance, f), nil\n\t}\n\treturn f, nil\n}",
"func (f *AutoIndexingServiceNumRepositoriesWithCodeIntelligenceFunc) PushHook(hook func(context.Context) (int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func TestGetAllBackendServer(t *testing.T) {\n\tcloud, _, resp, err := beforeTestBlb()\n\tif err != nil {\n\t\tt.Errorf(\"beforeTestBlb err, err: %v\", err)\n\t}\n\tctx := context.Background()\n\t// bs is nil\n\tlb := &blb.LoadBalancer{\n\t\tBlbId: resp.LoadBalancerId,\n\t}\n\tbs, err := cloud.getAllBackendServer(ctx, lb)\n\tif err != nil {\n\t\tt.Errorf(\"getAllBackendServer err, err: %v\", err)\n\t}\n\tif len(bs) != 0 {\n\t\tt.Errorf(\"getAllBackendServer err, bs should be nil but get : %v\", bs)\n\t}\n\t// add bs\n\tbsAdd := []blb.BackendServer{\n\t\t{\n\t\t\tInstanceId: \"1\",\n\t\t},\n\t\t{\n\t\t\tInstanceId: \"2\",\n\t\t},\n\t}\n\targs := blb.AddBackendServersArgs{\n\t\tLoadBalancerId: lb.BlbId,\n\t\tBackendServerList: bsAdd,\n\t}\n\terr = cloud.clientSet.BLBClient.AddBackendServers(ctx, &args, &bce.SignOption{\n\t\tCustomSignFunc: CCEServiceSign,\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"AddBackendServers err, err: %v\", err)\n\t}\n\t// get bs\n\tbs, err = cloud.getAllBackendServer(ctx, lb)\n\tif err != nil {\n\t\tt.Errorf(\"getAllBackendServer err, err: %v\", err)\n\t}\n\tif len(bs) != 2 {\n\t\tt.Errorf(\"getAllBackendServer err, bs should be nil but get : %v\", bs)\n\t}\n}",
"func newAssert(t *testing.T) func(args ...interface{}) *rpcAssert {\n\treturn func(args ...interface{}) *rpcAssert {\n\t\treturn &rpcAssert{\n\t\t\tt: t,\n\t\t\targs: args,\n\t\t}\n\t}\n}",
"func getActivatedVersioners(pluginsToActivate []string, args plugins.PluginArgs, versioners *[]*plugins.ActivatedVersioner) func() {\n\tvar err error\n\tvm, err := plugins.NewVersionerManager(getPluginArgs())\n\tif err != nil {\n\t\tlogAndExit(app.ErrorLogger(log.StandardLogger(), err), \"Failed to create lifecycle plugin manager\")\n\t}\n\n\t*versioners, err = vm.ActivatePlugins(pluginsToActivate)\n\tif err != nil {\n\t\tlogAndExit(app.ErrorLogger(log.StandardLogger(), err), \"Failed to activate lifecycle plugins\")\n\t}\n\n\treturn vm.Close\n}",
"func (*llcFactory) New(args *xreg.XactArgs) xreg.BucketEntry {\n\treturn &llcFactory{t: args.T, uuid: args.UUID}\n}",
"func NewMockHook(ctrl *gomock.Controller) *MockHook {\n\tmock := &MockHook{ctrl: ctrl}\n\tmock.recorder = &MockHookMockRecorder{mock}\n\treturn mock\n}",
"func getNewAPI(anonymous bool) API {\n\t// ignore errors for now\n\td, err := xl.New()\n\tfatalIf(err.Trace(), \"Instantiating xl failed.\", nil)\n\n\treturn API{\n\t\tOP: make(chan APIOperation),\n\t\tXL: d,\n\t\tAnonymous: anonymous,\n\t}\n}",
"func newBackingServices(c *Client, namespace string) *backingservices {\n\treturn &backingservices{\n\t\tr: c,\n\t\tns: namespace,\n\t}\n}",
"func (s *hookLister) Hooks(namespace string) HookNamespaceLister {\n\treturn hookNamespaceLister{indexer: s.indexer, namespace: namespace}\n}",
"func (f *AutoIndexingServiceGetListTagsFunc) SetDefaultHook(hook func(context.Context, api.RepoName, ...string) ([]*gitdomain.Tag, error)) {\n\tf.defaultHook = hook\n}",
"func getSwarmingRpcsBotList(ctx context.Context, c *Client, call *swarming_api.BotsListCall) (*swarming_api.SwarmingRpcsBotList, error) {\n\tvar tl *swarming_api.SwarmingRpcsBotList\n\tf := func() error {\n\t\tvar err error\n\t\ttl, err = call.Context(ctx).Do()\n\t\treturn err\n\t}\n\terr := callWithRetries(ctx, f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn tl, nil\n}",
"func NewGetBalanceCallback(t mockConstructorTestingTNewGetBalanceCallback) *GetBalanceCallback {\n\tmock := &GetBalanceCallback{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func newCache(nbClient libovsdbclient.Client) (*LBCache, error) {\n\t// first, list all load balancers\n\tlbs, err := listLBs(nbClient)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := LBCache{}\n\tc.existing = make(map[string]*CachedLB, len(lbs))\n\n\tfor i := range lbs {\n\t\tc.existing[lbs[i].UUID] = &lbs[i]\n\t}\n\n\tps := func(item *nbdb.LogicalSwitch) bool {\n\t\treturn len(item.LoadBalancer) > 0\n\t}\n\tswitches, err := libovsdbops.FindLogicalSwitchesWithPredicate(nbClient, ps)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, ls := range switches {\n\t\tfor _, lbuuid := range ls.LoadBalancer {\n\t\t\tif lb, ok := c.existing[lbuuid]; ok {\n\t\t\t\tlb.Switches.Insert(ls.Name)\n\t\t\t}\n\t\t}\n\t}\n\n\tpr := func(item *nbdb.LogicalRouter) bool {\n\t\treturn len(item.LoadBalancer) > 0\n\t}\n\trouters, err := libovsdbops.FindLogicalRoutersWithPredicate(nbClient, pr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, router := range routers {\n\t\tfor _, lbuuid := range router.LoadBalancer {\n\t\t\tif lb, ok := c.existing[lbuuid]; ok {\n\t\t\t\tlb.Routers.Insert(router.Name)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Get non-empty LB groups\n\tpg := func(item *nbdb.LoadBalancerGroup) bool {\n\t\treturn len(item.LoadBalancer) > 0\n\t}\n\tgroups, err := libovsdbops.FindLoadBalancerGroupsWithPredicate(nbClient, pg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, group := range groups {\n\t\tfor _, lbuuid := range group.LoadBalancer {\n\t\t\tif lb, ok := c.existing[lbuuid]; ok {\n\t\t\t\tlb.Groups.Insert(group.Name)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &c, nil\n}",
"func newLdbCacheIter(snap *dbCacheSnapshot, slice *util.Range) *ldbCacheIter {\n\titer := snap.pendingKeys.Iterator(slice.Start, slice.Limit)\n\treturn &ldbCacheIter{Iterator: iter}\n}",
"func (f *AutoIndexingServiceGetListTagsFunc) History() []AutoIndexingServiceGetListTagsFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]AutoIndexingServiceGetListTagsFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}",
"func NewBleveBackend() BackendFactory {\n\treturn newBleveBackend\n}",
"func (d *dataUpdateTracker) newBloomFilter() bloomFilter {\n\treturn bloomFilter{bloom.NewWithEstimates(dataUpdateTrackerEstItems, dataUpdateTrackerFP)}\n}",
"func newMockGetDataFunc(mockStore *mock.NodeStore) func(addr Address) (data []byte, err error) {\n\treturn func(addr Address) (data []byte, err error) {\n\t\tdata, err = mockStore.Get(addr)\n\t\tif err == mock.ErrNotFound {\n\t\t\t// preserve ErrChunkNotFound error\n\t\t\terr = ErrChunkNotFound\n\t\t}\n\t\treturn data, err\n\t}\n}",
"func (f *ExtensionStoreGetFeaturedExtensionsFunc) History() []ExtensionStoreGetFeaturedExtensionsFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]ExtensionStoreGetFeaturedExtensionsFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}",
"func NewBoltHook(options ...HookOption) log.Hook {\n\n\tdefaultOptions := &HookOptions{\n\t\tIDGenerator: NewSatoru(),\n\t\tDbpath: \"log.db\",\n\t\tFileMode: 0600,\n\t\tBoltOptions: nil,\n\t}\n\n\tfor _, option := range options {\n\t\toption(defaultOptions)\n\t}\n\n\tboltDb, err := bolt.Open(defaultOptions.Dbpath, defaultOptions.FileMode, defaultOptions.BoltOptions)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn &boltHook{boltDb, defaultOptions.IDGenerator}\n}",
"func (bc *BlueprintController) GetBlueprint(bi usecase.BlueprintInteractor) func(*gin.Context) {\n\tb, err := bi.GetBlueprint()\n\n\tif err != nil {\n\t\treturn func(c *gin.Context) {\n\t\t\tc.JSON(http.StatusInternalServerError, gin.H{\n\t\t\t\t\"error\": \"error occur\",\n\t\t\t})\n\t\t}\n\t}\n\n\treturn func(c *gin.Context) {\n\t\tkey := \"j9uzyqp6cyzq\"\n\t\tsecret := \"5y485r8nq9jre4fk6anpu59sqdcpq8xdkuqbd5jxqpvw455gek3aw27ysx4uq7tz\"\n\n\t\tclient, _ := stream.NewClient(key, secret)\n\n\t\tnotifFeed := client.NotificationFeed(\"agency\", \"125\")\n\n\t\tactor, _ := json.Marshal(map[string]interface{}{\n\t\t\t\"url\": \"http://example.org/martin\",\n\t\t\t\"objectType\": \"person\",\n\t\t\t\"id\": \"tag:example.org,2011:martin\",\n\t\t\t\"image\": map[string]interface{}{\n\t\t\t\t\"url\": \"http://example.org/martin/image\",\n\t\t\t\t\"width\": 250,\n\t\t\t\t\"height\": 250,\n\t\t\t},\n\t\t\t\"displayName\": \"Martin Smith\",\n\t\t})\n\t\tobject, _ := json.Marshal(map[string]string{\n\t\t\t\"url\": \"http://example.org/blog/2011/02/entry\",\n\t\t\t\"id\": \"tag:example.org,2011:abc123/xyz\",\n\t\t})\n\t\ttarget, _ := json.Marshal(map[string]string{\n\t\t\t\"url\": \"http://example.org/blog/\",\n\t\t\t\"objectType\": \"blog\",\n\t\t\t\"id\": \"tag:example.org,2011:abc123\",\n\t\t\t\"displayName\": \"Martin's Blog\",\n\t\t})\n\n\t\tresp, err := notifFeed.AddActivity(stream.Activity{\n\t\t\tActor: string(actor),\n\t\t\tVerb: \"post\",\n\t\t\tObject: string(object),\n\t\t\tTarget: string(target),\n\t\t})\n\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tlog.Printf(\"%v\", resp)\n\n\t\tc.JSON(http.StatusOK, b)\n\t}\n}",
"func NewHook(handler HookFunc, kind HookKind) *Hook {\n\thook := &Hook{\n\t\tHandler: handler,\n\t\tKind: kind,\n\t}\n\n\treturn hook\n}",
"func (ct *ctrlerCtx) diffBucket(apicl apiclient.Services) {\n\topts := api.ListWatchOptions{}\n\n\t// get a list of all objects from API server\n\tobjlist, err := apicl.ObjstoreV1().Bucket().List(context.Background(), &opts)\n\tif err != nil {\n\t\tct.logger.Errorf(\"Error getting a list of objects. Err: %v\", err)\n\t\treturn\n\t}\n\n\tct.logger.Infof(\"diffBucket(): BucketList returned %d objects\", len(objlist))\n\n\t// build an object map\n\tobjmap := make(map[string]*objstore.Bucket)\n\tfor _, obj := range objlist {\n\t\tobjmap[obj.GetKey()] = obj\n\t}\n\n\tlist, err := ct.Bucket().List(context.Background(), &opts)\n\tif err != nil && !strings.Contains(err.Error(), \"not found in local cache\") {\n\t\tct.logger.Infof(\"Failed to get a list of objects. Err: %s\", err)\n\t\treturn\n\t}\n\n\t// if an object is in our local cache and not in API server, trigger delete for it\n\tfor _, obj := range list {\n\t\t_, ok := objmap[obj.GetKey()]\n\t\tif !ok {\n\t\t\tct.logger.Infof(\"diffBucket(): Deleting existing object %#v since its not in apiserver\", obj.GetKey())\n\t\t\tevt := kvstore.WatchEvent{\n\t\t\t\tType: kvstore.Deleted,\n\t\t\t\tKey: obj.GetKey(),\n\t\t\t\tObject: &obj.Bucket,\n\t\t\t}\n\t\t\tct.handleBucketEvent(&evt)\n\t\t}\n\t}\n\n\t// trigger create event for all others\n\tfor _, obj := range objlist {\n\t\tct.logger.Infof(\"diffBucket(): Adding object %#v\", obj.GetKey())\n\t\tevt := kvstore.WatchEvent{\n\t\t\tType: kvstore.Created,\n\t\t\tKey: obj.GetKey(),\n\t\t\tObject: obj,\n\t\t}\n\t\tct.handleBucketEvent(&evt)\n\t}\n}",
"func (f *ReleaseStoreGetArtifactsFunc) PushHook(hook func(context.Context, int64) ([]byte, []byte, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func generateListGetter(buf *bytes.Buffer, method *generatedGoListMethod) error {\n\treturn goListGetterTemplate.Execute(buf, method)\n}",
"func (mock *MultiClusterAppInterfaceMock) AddFeatureLifecycleCalls() []struct {\n\tCtx context.Context\n\tEnabled func() bool\n\tName string\n\tLifecycle v31.MultiClusterAppLifecycle\n} {\n\tvar calls []struct {\n\t\tCtx context.Context\n\t\tEnabled func() bool\n\t\tName string\n\t\tLifecycle v31.MultiClusterAppLifecycle\n\t}\n\tlockMultiClusterAppInterfaceMockAddFeatureLifecycle.RLock()\n\tcalls = mock.calls.AddFeatureLifecycle\n\tlockMultiClusterAppInterfaceMockAddFeatureLifecycle.RUnlock()\n\treturn calls\n}",
"func New(maxlevel int, cmpFn CompareFn) *List {\n\treturn NewCustom(maxlevel, DefaultProbability, cmpFn, time.Now().Unix())\n}",
"func BetaDiff(matches []Match, beta float64) []float64 {\n\tvar diffs []float64\n\tplayers := make(map[string]*glicko2.Player)\n\tparams := glicko2.Parameters{\n\t\tInitialDeviation: 27,\n\t\tInitialVolatility: .06,\n\t}\n\tfor _, match := range matches {\n\t\t// Add players as we discover them.\n\t\tp1, ok := players[match.P1name]\n\t\tif !ok {\n\t\t\tparams.InitialRating = match.P1skill\n\t\t\tplayers[match.P1name] = glicko2.NewPlayer(params)\n\t\t\tp1 = players[match.P1name]\n\t\t}\n\t\tp2, ok := players[match.P2name]\n\t\tif !ok {\n\t\t\tparams.InitialRating = match.P2skill\n\t\t\tplayers[match.P2name] = glicko2.NewPlayer(params)\n\t\t\tp2 = players[match.P2name]\n\t\t}\n\n\t\texpected := Pwin(p1, p2, beta)\n\t\tactual := float64(\n\t\t\tfloat64(match.P1got) / float64(match.P1got+match.P2got))\n\t\tdiff := math.Abs(expected - actual)\n\t\tdiffs = append(diffs, diff)\n\t}\n\n\treturn diffs\n}",
"func newListenerCfg(config *Config, rpcCfg RPCConfig) *listenerCfg {\n\treturn &listenerCfg{\n\t\tgrpcListener: func() (net.Listener, error) {\n\t\t\t// If a custom RPC listener is set, we will listen on\n\t\t\t// it instead of the regular tcp socket.\n\t\t\tif rpcCfg.RPCListener != nil {\n\t\t\t\treturn rpcCfg.RPCListener, nil\n\t\t\t}\n\n\t\t\treturn net.Listen(\"tcp\", config.RPCListen)\n\t\t},\n\t\trestListener: func() (net.Listener, error) {\n\t\t\t// If a custom RPC listener is set, we disable REST.\n\t\t\tif rpcCfg.RPCListener != nil {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\n\t\t\treturn net.Listen(\"tcp\", config.RESTListen)\n\t\t},\n\t\tgetLnd: func(network lndclient.Network, cfg *lndConfig) (\n\t\t\t*lndclient.GrpcLndServices, error) {\n\n\t\t\tsvcCfg := &lndclient.LndServicesConfig{\n\t\t\t\tLndAddress: cfg.Host,\n\t\t\t\tNetwork: network,\n\t\t\t\tMacaroonDir: cfg.MacaroonDir,\n\t\t\t\tTLSPath: cfg.TLSPath,\n\t\t\t\tCheckVersion: LoopMinRequiredLndVersion,\n\t\t\t}\n\n\t\t\t// If a custom lnd connection is specified we use that\n\t\t\t// directly.\n\t\t\tif rpcCfg.LndConn != nil {\n\t\t\t\tsvcCfg.Dialer = func(context.Context, string) (\n\t\t\t\t\tnet.Conn, error) {\n\t\t\t\t\treturn rpcCfg.LndConn, nil\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn lndclient.NewLndServices(svcCfg)\n\t\t},\n\t}\n}",
"func getBeta(fAlpha, fBeta float64) float64 {\n\tvar fA, fB float64\n\tif fAlpha > fBeta {\n\t\tfA = fAlpha\n\t\tfB = fBeta\n\t} else {\n\t\tfA = fBeta\n\t\tfB = fAlpha\n\t}\n\tconst maxGammaArgument = 171.624376956302\n\tif fA+fB < maxGammaArgument {\n\t\treturn math.Gamma(fA) / math.Gamma(fA+fB) * math.Gamma(fB)\n\t}\n\tfg := 6.024680040776729583740234375\n\tfgm := fg - 0.5\n\tfLanczos := getLanczosSum(fA)\n\tfLanczos /= getLanczosSum(fA + fB)\n\tfLanczos *= getLanczosSum(fB)\n\tfABgm := fA + fB + fgm\n\tfLanczos *= math.Sqrt((fABgm / (fA + fgm)) / (fB + fgm))\n\tfTempA := fB / (fA + fgm)\n\tfTempB := fA / (fB + fgm)\n\tfResult := math.Exp(-fA*math.Log1p(fTempA) - fB*math.Log1p(fTempB) - fgm)\n\tfResult *= fLanczos\n\treturn fResult\n}",
"func (o *InlineObject885) GetBeta() AnyOfobject {\n\tif o == nil || o.Beta == nil {\n\t\tvar ret AnyOfobject\n\t\treturn ret\n\t}\n\treturn *o.Beta\n}",
"func NewHookOptions(options map[string]interface{}) HookOptions {\n\tvar mapper models.Mapper\n\tif mapperOptions, ok := options[\"mapper\"]; ok {\n\t\tif m, ok := mapperOptions.(map[string]interface{}); ok {\n\t\t\tmapper = models.ParseMapper(m)\n\t\t}\n\t}\n\tvar name string\n\tif nameOption, ok := options[\"name\"]; ok {\n\t\tname = nameOption.(string)\n\t} else {\n\t\t//TODO: use hook-index as name\n\t\tname = \"default\"\n\t}\n\treturn HookOptions{\n\t\tName: name,\n\t\tMapper: mapper,\n\t}\n}",
"func (c *ClubBranchClient) Hooks() []Hook {\n\treturn c.hooks.ClubBranch\n}",
"func NewSlowLogHook(threshold time.Duration) HookFunc {\n\treturn func(ctx context.Context, call hrpc.Call, customName string) func(err error) {\n\t\tstart := time.Now()\n\t\treturn func(error) {\n\t\t\tduration := time.Since(start)\n\t\t\tif duration < threshold {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Warn(\"hbase slow log: %s %s %s time: %s\", customName, call.Table(), call.Key(), duration)\n\t\t}\n\t}\n}",
"func (*llcFactory) PreRenewHook(_ xreg.BucketEntry) (bool, error) { return true, nil }",
"func testHook() *Hook {\n\treturn &Hook{\n\t\tID: sql.NullInt64{Int64: 1, Valid: true},\n\t\tRepoID: sql.NullInt64{Int64: 1, Valid: true},\n\t\tBuildID: sql.NullInt64{Int64: 1, Valid: true},\n\t\tNumber: sql.NullInt32{Int32: 1, Valid: true},\n\t\tSourceID: sql.NullString{String: \"c8da1302-07d6-11ea-882f-4893bca275b8\", Valid: true},\n\t\tCreated: sql.NullInt64{Int64: time.Now().UTC().Unix(), Valid: true},\n\t\tHost: sql.NullString{String: \"github.com\", Valid: true},\n\t\tEvent: sql.NullString{String: \"push\", Valid: true},\n\t\tEventAction: sql.NullString{String: \"\", Valid: false},\n\t\tBranch: sql.NullString{String: \"master\", Valid: true},\n\t\tError: sql.NullString{String: \"\", Valid: false},\n\t\tStatus: sql.NullString{String: \"success\", Valid: true},\n\t\tLink: sql.NullString{String: \"https://github.com/github/octocat/settings/hooks/1\", Valid: true},\n\t\tWebhookID: sql.NullInt64{Int64: 123456, Valid: true},\n\t}\n}",
"func (mock *HarborRepositoryInterfaceMock) AddClusterScopedFeatureLifecycleCalls() []struct {\n\tCtx context.Context\n\tEnabled func() bool\n\tName string\n\tClusterName string\n\tLifecycle v3.HarborRepositoryLifecycle\n} {\n\tvar calls []struct {\n\t\tCtx context.Context\n\t\tEnabled func() bool\n\t\tName string\n\t\tClusterName string\n\t\tLifecycle v3.HarborRepositoryLifecycle\n\t}\n\tlockHarborRepositoryInterfaceMockAddClusterScopedFeatureLifecycle.RLock()\n\tcalls = mock.calls.AddClusterScopedFeatureLifecycle\n\tlockHarborRepositoryInterfaceMockAddClusterScopedFeatureLifecycle.RUnlock()\n\treturn calls\n}",
"func (*TestingKnobs) ModuleTestingKnobs() {}",
"func (f *ExtensionStoreListFunc) PushHook(hook func(context.Context, stores.ExtensionsListOptions) ([]*stores.Extension, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (f *ReleaseStoreGetLatestBatchFunc) SetDefaultHook(hook func(context.Context, []int32, string, bool) ([]*stores.Release, error)) {\n\tf.defaultHook = hook\n}",
"func newBlockRetrievalWorker(bg blockGetter, q *blockRetrievalQueue) *blockRetrievalWorker {\n\tbrw := &blockRetrievalWorker{\n\t\tblockGetter: bg,\n\t\tstopCh: make(chan struct{}),\n\t\tqueue: q,\n\t}\n\tgo brw.run()\n\treturn brw\n}",
"func (m *MockPoolRegistry) GetNewHostBrickAllocations(ctxt context.Context, hostname string) <-chan registry.BrickAllocation {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetNewHostBrickAllocations\", ctxt, hostname)\n\tret0, _ := ret[0].(<-chan registry.BrickAllocation)\n\treturn ret0\n}",
"func (o *InlineObject885) SetBeta(v AnyOfobject) {\n\to.Beta = &v\n}",
"func initHistlist(ed editor, ev *eval.Evaler, getCmds func() ([]string, error), lsMode *listing.Mode, lsBinding *bindingMap) eval.Ns {\n\tbinding := emptyBindingMap\n\tmode := histlist.Mode{\n\t\tMode: lsMode,\n\t\tKeyHandler: keyHandlerFromBindings(ed, ev, &binding, lsBinding),\n\t}\n\tns := eval.Ns{}.\n\t\tAddGoFn(\"<edit:histlist>\", \"start\", func() {\n\t\t\tstartHistlist(ed, getCmds, &mode)\n\t\t})\n\treturn ns\n}",
"func (c *ClientWithResponses) BetaTestersGetInstanceWithResponse(ctx context.Context, id string, params *BetaTestersGetInstanceParams, reqEditors ...RequestEditorFn) (*BetaTestersGetInstanceResponse, error) {\n\trsp, err := c.BetaTestersGetInstance(ctx, id, params, reqEditors...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ParseBetaTestersGetInstanceResponse(rsp)\n}",
"func (f *AutoIndexingServiceRepositoryIDsWithConfigurationFunc) PushHook(hook func(context.Context, int, int) ([]shared.RepositoryWithAvailableIndexers, int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func newListeners() *listeners { return &listeners{m: make(map[string]nl.Listener, 64)} }",
"func (f *DBStoreGetConfigurationPoliciesFunc) PushHook(hook func(context.Context, dbstore.GetConfigurationPoliciesOptions) ([]dbstore.ConfigurationPolicy, int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func New() logrus.Hook {\n\treturn &normalCallerHook{}\n}",
"func (m *MockPool) Get() MutableList {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Get\")\n\tret0, _ := ret[0].(MutableList)\n\treturn ret0\n}",
"func NewGet(g Getter) *Get {\n\treturn &Get{g}\n}",
"func newBinder(chart *chart.Chart, cmIface v1.ConfigMapInterface) (mode.Binder, error) {\n\t// parse the values file for steward-specific config map info\n\tcmNames, err := getStewardConfigMapInfo(chart.Values)\n\tif err != nil {\n\t\tlogger.Errorf(\"getting steward config map info (%s)\", err)\n\t\treturn nil, err\n\t}\n\tlogger.Debugf(\"got config map names for helm chart %s\", cmNames)\n\treturn binder{\n\t\tcmNames: cmNames,\n\t\tcmIface: cmIface,\n\t}, nil\n}",
"func generateGetOrCreateList(buf *bytes.Buffer, method *generatedGoListMethod) error {\n\treturn goGetOrCreateListTemplate.Execute(buf, method)\n}",
"func (c *TestClient) CreateInstanceBeta(project, zone string, i *computeBeta.Instance) error {\n\tif c.CreateInstanceBetaFn != nil {\n\t\treturn c.CreateInstanceBetaFn(project, zone, i)\n\t}\n\treturn c.client.CreateInstanceBeta(project, zone, i)\n}",
"func GetMutableBagForTesting(v map[string]interface{}) *MutableBag {\n\tm := GetMutableBag(nil)\n\tm.values = v\n\treturn m\n}",
"func (f *PolicyServiceGetRetentionPolicyOverviewFunc) PushHook(hook func(context.Context, types.Upload, bool, int, int64, string, time.Time) ([]types.RetentionPolicyMatchCandidate, int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}",
"func (c *BillClient) Hooks() []Hook {\n\treturn c.hooks.Bill\n}",
"func (c *BillClient) Hooks() []Hook {\n\treturn c.hooks.Bill\n}",
"func (c *BillClient) Hooks() []Hook {\n\treturn c.hooks.Bill\n}"
] | [
"0.75942796",
"0.5408831",
"0.5109261",
"0.49638084",
"0.4917815",
"0.4800882",
"0.47378483",
"0.46847326",
"0.46703503",
"0.46522126",
"0.46307093",
"0.45831934",
"0.45636097",
"0.45611542",
"0.45577788",
"0.45528764",
"0.45515147",
"0.4545838",
"0.4534998",
"0.4518134",
"0.44979674",
"0.4494156",
"0.44940612",
"0.44720718",
"0.4467337",
"0.44011977",
"0.43894717",
"0.43653357",
"0.43468305",
"0.43416816",
"0.43360996",
"0.43265915",
"0.43110937",
"0.4306581",
"0.4289749",
"0.4282926",
"0.427915",
"0.42504057",
"0.42370427",
"0.42334604",
"0.4231189",
"0.42269793",
"0.421497",
"0.42106593",
"0.42046946",
"0.4203805",
"0.41849655",
"0.41782472",
"0.41781804",
"0.4177024",
"0.41654238",
"0.41649538",
"0.41560593",
"0.41554624",
"0.41546685",
"0.415336",
"0.4132749",
"0.4131572",
"0.41230866",
"0.41198125",
"0.4119241",
"0.4113467",
"0.41127306",
"0.41097945",
"0.4106927",
"0.41059798",
"0.4094409",
"0.40905142",
"0.409001",
"0.40887478",
"0.4084991",
"0.4083814",
"0.40782285",
"0.4076583",
"0.4075371",
"0.40753412",
"0.40749973",
"0.4074948",
"0.40716353",
"0.40679857",
"0.40677813",
"0.4067077",
"0.40657103",
"0.4065607",
"0.40633082",
"0.4061706",
"0.406094",
"0.40560326",
"0.40547928",
"0.40527216",
"0.40362617",
"0.40340793",
"0.40319988",
"0.4027003",
"0.4021774",
"0.40187457",
"0.40181366",
"0.40139788",
"0.40139788",
"0.40139788"
] | 0.87772727 | 0 |
newMockCloud returns a mock GCE instance with the appropriate handlers hooks | func (bil *baseInstanceList) newMockCloud() cloud.Cloud {
c := cloud.NewMockGCE(nil)
// insert hooks to lazy create a instance when needed
c.MockInstances.GetHook = bil.newGAGetHook()
c.MockBetaInstances.GetHook = bil.newBetaGetHook()
return c
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func NewCloudMock() *CloudMock {\n\taddress, grpcServer, mockTrace := startMockServer()\n\n\tconn, err := grpc.Dial(address, grpc.WithInsecure())\n\tif err != nil {\n\t\tlog.Fatalf(\"did not connect: %s\", err)\n\t}\n\n\ttraceClient := cloudtrace.NewTraceServiceClient(conn)\n\tmetricClient := monitoring.NewMetricServiceClient(conn)\n\treturn &CloudMock{\n\t\tconn,\n\t\tgrpcServer,\n\t\tmockTrace,\n\t\ttraceClient,\n\t\tmetricClient,\n\t}\n}",
"func NewMockCloud(ctrl *gomock.Controller) *MockCloud {\n\tmock := &MockCloud{ctrl: ctrl}\n\tmock.recorder = &MockCloudMockRecorder{mock}\n\treturn mock\n}",
"func NewFakeGCECloud(vals TestClusterValues) *Cloud {\n\tservice, err := compute.NewService(context.Background(), option.WithoutAuthentication())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tgce := &Cloud{\n\t\tregion: vals.Region,\n\t\tservice: service,\n\t\tmanagedZones: []string{vals.ZoneName},\n\t\tprojectID: vals.ProjectID,\n\t\tnetworkProjectID: vals.ProjectID,\n\t\tClusterID: fakeClusterID(vals.ClusterID),\n\t\tonXPN: vals.OnXPN,\n\t\tmetricsCollector: newLoadBalancerMetrics(),\n\t\tprojectsBasePath: getProjectsBasePath(service.BasePath),\n\t}\n\tc := cloud.NewMockGCE(&gceProjectRouter{gce})\n\tgce.c = c\n\treturn gce\n}",
"func newK8SCloud(opts Options) (CloudProvider, error) {\n\n\tif opts.Name == \"\" {\n\t\treturn nil, errors.New(\"K8SCloud: Invalid cloud name\")\n\t}\n\tif opts.Host == \"\" {\n\t\treturn nil, errors.New(\"K8SCloud: Invalid cloud host\")\n\t}\n\tif opts.K8SNamespace == \"\" {\n\t\topts.K8SNamespace = apiv1.NamespaceDefault\n\t}\n\n\tcloud := &K8SCloud{\n\t\tname: opts.Name,\n\t\thost: opts.Host,\n\t\tbearerToken: opts.K8SBearerToken,\n\t\tnamespace: opts.K8SNamespace,\n\t\tinsecure: opts.Insecure,\n\t}\n\tconfig := &rest.Config{\n\t\tHost: opts.Host,\n\t\tBearerToken: opts.K8SBearerToken,\n\t\tTLSClientConfig: rest.TLSClientConfig{\n\t\t\tInsecure: opts.Insecure,\n\t\t},\n\t}\n\tclientset, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcloud.client = clientset\n\treturn cloud, nil\n}",
"func NewK8sClient(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *K8sClient {\n\tmock := &K8sClient{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func NewCloud(cfg CloudConfig, metricsRegisterer prometheus.Registerer) (Cloud, error) {\n\tmetadataSess := session.Must(session.NewSession(aws.NewConfig()))\n\tmetadata := services.NewEC2Metadata(metadataSess)\n\tif len(cfg.Region) == 0 {\n\t\tregion, err := metadata.Region()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to introspect region from EC2Metadata, specify --aws-region instead if EC2Metadata is unavailable\")\n\t\t}\n\t\tcfg.Region = region\n\t}\n\n\tif len(cfg.VpcID) == 0 {\n\t\tvpcId, err := metadata.VpcID()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to introspect vpcID from EC2Metadata, specify --aws-vpc-id instead if EC2Metadata is unavailable\")\n\t\t}\n\t\tcfg.VpcID = vpcId\n\t}\n\n\tawsCFG := aws.NewConfig().WithRegion(cfg.Region).WithSTSRegionalEndpoint(endpoints.RegionalSTSEndpoint).WithMaxRetries(cfg.MaxRetries)\n\tsess := session.Must(session.NewSession(awsCFG))\n\tinjectUserAgent(&sess.Handlers)\n\n\tif cfg.ThrottleConfig != nil {\n\t\tthrottler := throttle.NewThrottler(cfg.ThrottleConfig)\n\t\tthrottler.InjectHandlers(&sess.Handlers)\n\t}\n\tif metricsRegisterer != nil {\n\t\tmetricsCollector, err := metrics.NewCollector(metricsRegisterer)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to initialize sdk metrics collector\")\n\t\t}\n\t\tmetricsCollector.InjectHandlers(&sess.Handlers)\n\t}\n\n\treturn &defaultCloud{\n\t\tcfg: cfg,\n\t\tec2: services.NewEC2(sess),\n\t\telbv2: services.NewELBV2(sess),\n\t\tacm: services.NewACM(sess),\n\t\twafv2: services.NewWAFv2(sess),\n\t\twafRegional: services.NewWAFRegional(sess, cfg.Region),\n\t\tshield: services.NewShield(sess),\n\t\trgt: services.NewRGT(sess),\n\t}, nil\n}",
"func fakeGcp() (*compute.Service, error) {\n\tclient := &http.Client{}\n\thttpmock.ActivateNonDefault(client)\n\treturn compute.NewService(context.Background(), option.WithoutAuthentication(), option.WithHTTPClient(client))\n}",
"func NewMockAzureCloud(location string) *MockAzureCloud {\n\treturn &MockAzureCloud{\n\t\tLocation: location,\n\t\tResourceGroupsClient: &MockResourceGroupsClient{\n\t\t\tRGs: map[string]resources.Group{},\n\t\t},\n\t\tVirtualNetworksClient: &MockVirtualNetworksClient{\n\t\t\tVNets: map[string]network.VirtualNetwork{},\n\t\t},\n\t\tSubnetsClient: &MockSubnetsClient{\n\t\t\tSubnets: map[string]network.Subnet{},\n\t\t},\n\t\tRouteTablesClient: &MockRouteTablesClient{\n\t\t\tRTs: map[string]network.RouteTable{},\n\t\t},\n\t\tNetworkSecurityGroupsClient: &MockNetworkSecurityGroupsClient{\n\t\t\tNSGs: map[string]network.SecurityGroup{},\n\t\t},\n\t\tApplicationSecurityGroupsClient: &MockApplicationSecurityGroupsClient{\n\t\t\tASGs: map[string]network.ApplicationSecurityGroup{},\n\t\t},\n\t\tVMScaleSetsClient: &MockVMScaleSetsClient{\n\t\t\tVMSSes: map[string]compute.VirtualMachineScaleSet{},\n\t\t},\n\t\tVMScaleSetVMsClient: &MockVMScaleSetVMsClient{\n\t\t\tVMs: map[string]compute.VirtualMachineScaleSetVM{},\n\t\t},\n\t\tDisksClient: &MockDisksClient{\n\t\t\tDisks: map[string]compute.Disk{},\n\t\t},\n\t\tRoleAssignmentsClient: &MockRoleAssignmentsClient{\n\t\t\tRAs: map[string]authz.RoleAssignment{},\n\t\t},\n\t\tNetworkInterfacesClient: &MockNetworkInterfacesClient{\n\t\t\tNIs: map[string]network.Interface{},\n\t\t},\n\t\tLoadBalancersClient: &MockLoadBalancersClient{\n\t\t\tLBs: map[string]network.LoadBalancer{},\n\t\t},\n\t\tPublicIPAddressesClient: &MockPublicIPAddressesClient{\n\t\t\tPubIPs: map[string]network.PublicIPAddress{},\n\t\t},\n\t\tNatGatewaysClient: &MockNatGatewaysClient{\n\t\t\tNGWs: map[string]network.NatGateway{},\n\t\t},\n\t}\n}",
"func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {\n\taz, err := NewCloudWithoutFeatureGates(configReader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\taz.ipv6DualStackEnabled = true\n\n\treturn az, nil\n}",
"func NewGCEClient() *gce.Cloud {\n\tvar configReader func() io.Reader\n\tif flags.F.ConfigFilePath != \"\" {\n\t\tklog.Infof(\"Reading config from path %q\", flags.F.ConfigFilePath)\n\t\tconfig, err := os.Open(flags.F.ConfigFilePath)\n\t\tif err != nil {\n\t\t\tklog.Fatalf(\"%v\", err)\n\t\t}\n\t\tdefer config.Close()\n\n\t\tallConfig, err := io.ReadAll(config)\n\t\tif err != nil {\n\t\t\tklog.Fatalf(\"Error while reading config (%q): %v\", flags.F.ConfigFilePath, err)\n\t\t}\n\t\tklog.V(4).Infof(\"Cloudprovider config file contains: %q\", string(allConfig))\n\n\t\tconfigReader = generateConfigReaderFunc(allConfig)\n\t} else {\n\t\tklog.V(2).Infof(\"No cloudprovider config file provided, using default values.\")\n\t\tconfigReader = func() io.Reader { return nil }\n\t}\n\n\t// Creating the cloud interface involves resolving the metadata server to get\n\t// an oauth token. If this fails, the token provider assumes it's not on GCE.\n\t// No errors are thrown. So we need to keep retrying till it works because\n\t// we know we're on GCE.\n\tfor {\n\t\tprovider, err := cloudprovider.GetCloudProvider(\"gce\", configReader())\n\t\tif err == nil {\n\t\t\tcloud := provider.(*gce.Cloud)\n\t\t\t// Configure GCE rate limiting\n\t\t\trl, err := ratelimit.NewGCERateLimiter(flags.F.GCERateLimit.Values(), flags.F.GCEOperationPollInterval)\n\t\t\tif err != nil {\n\t\t\t\tklog.Fatalf(\"Error configuring rate limiting: %v\", err)\n\t\t\t}\n\t\t\tcloud.SetRateLimiter(rl)\n\t\t\t// If this controller is scheduled on a node without compute/rw\n\t\t\t// it won't be allowed to list backends. We can assume that the\n\t\t\t// user has no need for Ingress in this case. If they grant\n\t\t\t// permissions to the node they will have to restart the controller\n\t\t\t// manually to re-create the client.\n\t\t\t// TODO: why do we bail with success out if there is a permission error???\n\t\t\tif _, err = cloud.ListGlobalBackendServices(); err == nil || utils.IsHTTPErrorCode(err, http.StatusForbidden) {\n\t\t\t\treturn cloud\n\t\t\t}\n\t\t\tklog.Warningf(\"Failed to list backend services, retrying: %v\", err)\n\t\t} else {\n\t\t\tklog.Warningf(\"Failed to get cloud provider, retrying: %v\", err)\n\t\t}\n\t\ttime.Sleep(cloudClientRetryInterval)\n\t}\n}",
"func newHTTPCloud(config io.Reader) (*httpCloud, error) {\n\tif config != nil {\n\t\tvar cfg Config\n\t\tif err := gcfg.ReadInto(&cfg, config); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Couldn't read config: %v\", err)\n\t\t}\n\n\t\tinstancesURL := cfg.Global.InstancesURL\n\t\t// Validate URL\n\t\t_, err := url.ParseRequestURI(instancesURL)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Can't parse the instances-url provided: %s\", err)\n\t\t}\n\t\t// Handle Trailing slashes\n\t\tinstancesURL = strings.TrimRight(instancesURL, \"/\")\n\n\t\tschedulerExtensionURL := cfg.Global.SchedulerExtensionURL\n\t\t// Validate URL\n\t\t_, err = url.ParseRequestURI(schedulerExtensionURL)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Can't parse the scheduler-extension-url provided: %s\", err)\n\t\t}\n\t\t// Handle Trailing slashes\n\t\tschedulerExtensionURL = strings.TrimRight(schedulerExtensionURL, \"/\")\n\n\t\treturn &httpCloud{\n\t\t\tinstancesURL: instancesURL,\n\t\t\tinstancesSupported: cfg.Global.InstancesSupported,\n\t\t\ttcpLoadBalancerSupported: cfg.Global.TcpLoadBalancerSupported,\n\t\t\tzonesSupported: cfg.Global.ZonesSupported,\n\t\t\tclustersSupported: cfg.Global.ClustersSupported,\n\t\t\tschedulerExtensionURL: schedulerExtensionURL,\n\t\t\tschedulerExtensionSupported: cfg.Global.SchedulerExtensionSupported,\n\t\t}, nil\n\t}\n\treturn nil, fmt.Errorf(\"Config file is empty or is not provided\")\n}",
"func newClient() (*storage.Client, error) {\n\tctx := context.Background()\n\n\tbyteKey, err := gcloud.GetDecodedKey()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get gcp key, err: %w\", err)\n\t}\n\tclient, err := storage.NewClient(ctx, option.WithCredentialsJSON(byteKey))\n\tif err != nil {\n\t\tlog.Println(\"failed to login with GCP_KEY, trying with default application credentials...\")\n\t\tclient, err = storage.NewClient(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to open Google Cloud Storage client: %w\", err)\n\t\t}\n\t}\n\n\treturn client, nil\n}",
"func NewGCSUploader(t testing.TB) *GCSUploader {\n\tmock := &GCSUploader{}\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func New(d diag.Sink, cloudURL string, project *workspace.Project, insecure bool) (Backend, error) {\n\tcloudURL = ValueOrDefaultURL(cloudURL)\n\taccount, err := workspace.GetAccount(cloudURL)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"getting stored credentials: %w\", err)\n\t}\n\tapiToken := account.AccessToken\n\n\tclient := client.NewClient(cloudURL, apiToken, insecure, d)\n\tcapabilities := detectCapabilities(d, client)\n\n\treturn &cloudBackend{\n\t\td: d,\n\t\turl: cloudURL,\n\t\tclient: client,\n\t\tcapabilities: capabilities,\n\t\tcurrentProject: project,\n\t}, nil\n}",
"func NewCloudCommunications()(*CloudCommunications) {\n m := &CloudCommunications{\n Entity: *NewEntity(),\n }\n return m\n}",
"func newCloudlyckeClient() *http.Client {\n\treturn &http.Client{}\n}",
"func configureCloud(res http.ResponseWriter, req *http.Request) (gcs *gcsPhotos) {\n\tctx := appengine.NewContext(req)\n\tclient, err := storage.NewClient(ctx)\n\tif err != nil {\n\t\tlog.Errorf(ctx, \"ERROR handler NewClient: \", err)\n\t\treturn\n\t}\n\tdefer client.Close()\n\t\n\tgcs = &gcsPhotos{\n\t\tctx: ctx,\n\t\tres: res,\n\t\tclient: client,\n\t\tbucket: client.Bucket(gcsBucket),\n\t}\n\treturn\n}",
"func New() (*mock, error) {\n\treturn &mock{\n\t\tConfigService: ConfigService{},\n\t\tContainerService: ContainerService{},\n\t\tDistributionService: DistributionService{},\n\t\tImageService: ImageService{},\n\t\tNetworkService: NetworkService{},\n\t\tNodeService: NodeService{},\n\t\tPluginService: PluginService{},\n\t\tSecretService: SecretService{},\n\t\tServiceService: ServiceService{},\n\t\tSystemService: SystemService{},\n\t\tSwarmService: SwarmService{},\n\t\tVolumeService: VolumeService{},\n\t\tVersion: Version,\n\t}, nil\n}",
"func newCloudConnection(config io.Reader) (cloudprovider.Interface, error) {\n\tklog.V(4).Infof(\"newCloudConnection called with %+v\", config)\n\tif config != nil {\n\t\tklog.Warningf(\"supplied config is not read by this version. Using environment\")\n\t}\n\tnewCloud := &cloud{}\n\t_, err := newCloud.cloudClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn newCloud, nil\n}",
"func NewCloudStorage(devservers []string,\n\ttlwServer, dutName, dutServer, buildArtifactsURL, swarmingTaskID, buildBucketID string) *CloudStorage {\n\treturn &CloudStorage{\n\t\tnewClient: func(ctx context.Context) (devserver.Client, error) {\n\t\t\treturn newClientForURLs(ctx, devservers, tlwServer, dutName, dutServer, swarmingTaskID, buildBucketID)\n\t\t},\n\t\tbuildArtifactsURL: buildArtifactsURL,\n\t}\n}",
"func TestGetCloudProvider(t *testing.T) {\n\tfakeCredFile := \"fake-cred-file.json\"\n\tfakeKubeConfig := \"fake-kube-config\"\n\temptyKubeConfig := \"empty-kube-config\"\n\tfakeContent := `\napiVersion: v1\nclusters:\n- cluster:\n server: https://localhost:8080\n name: foo-cluster\ncontexts:\n- context:\n cluster: foo-cluster\n user: foo-user\n namespace: bar\n name: foo-context\ncurrent-context: foo-context\nkind: Config\nusers:\n- name: foo-user\n user:\n exec:\n apiVersion: client.authentication.k8s.io/v1alpha1\n args:\n - arg-1\n - arg-2\n command: foo-command\n`\n\n\terr := createTestFile(emptyKubeConfig)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tdefer func() {\n\t\tif err := os.Remove(emptyKubeConfig); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}()\n\n\ttests := []struct {\n\t\tdesc string\n\t\tcreateFakeCredFile bool\n\t\tcreateFakeKubeConfig bool\n\t\tkubeconfig string\n\t\tnodeID string\n\t\tuserAgent string\n\t\tallowEmptyCloudConfig bool\n\t\texpectedErr error\n\t}{\n\t\t{\n\t\t\tdesc: \"out of cluster, no kubeconfig, no credential file\",\n\t\t\tkubeconfig: \"\",\n\t\t\tnodeID: \"\",\n\t\t\tallowEmptyCloudConfig: true,\n\t\t\texpectedErr: nil,\n\t\t},\n\t\t{\n\t\t\tdesc: \"[failure][disallowEmptyCloudConfig] out of cluster, no kubeconfig, no credential file\",\n\t\t\tkubeconfig: \"\",\n\t\t\tnodeID: \"\",\n\t\t\tallowEmptyCloudConfig: false,\n\t\t\texpectedErr: nil,\n\t\t},\n\t\t{\n\t\t\tdesc: \"[failure] out of cluster & in cluster, specify a non-exist kubeconfig, no credential file\",\n\t\t\tkubeconfig: \"/tmp/non-exist.json\",\n\t\t\tnodeID: \"\",\n\t\t\tallowEmptyCloudConfig: true,\n\t\t\texpectedErr: nil,\n\t\t},\n\t\t{\n\t\t\tdesc: \"[failure] out of cluster & in cluster, specify a empty kubeconfig, no credential file\",\n\t\t\tkubeconfig: emptyKubeConfig,\n\t\t\tnodeID: \"\",\n\t\t\tallowEmptyCloudConfig: true,\n\t\t\texpectedErr: fmt.Errorf(\"failed to get KubeClient: invalid configuration: no configuration has been provided, try setting KUBERNETES_MASTER environment variable\"),\n\t\t},\n\t\t{\n\t\t\tdesc: \"[failure] out of cluster & in cluster, specify a fake kubeconfig, no credential file\",\n\t\t\tcreateFakeKubeConfig: true,\n\t\t\tkubeconfig: fakeKubeConfig,\n\t\t\tnodeID: \"\",\n\t\t\tallowEmptyCloudConfig: true,\n\t\t\texpectedErr: nil,\n\t\t},\n\t\t{\n\t\t\tdesc: \"[success] out of cluster & in cluster, no kubeconfig, a fake credential file\",\n\t\t\tcreateFakeCredFile: true,\n\t\t\tkubeconfig: \"\",\n\t\t\tnodeID: \"\",\n\t\t\tuserAgent: \"useragent\",\n\t\t\tallowEmptyCloudConfig: true,\n\t\t\texpectedErr: nil,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tif test.createFakeKubeConfig {\n\t\t\tif err := createTestFile(fakeKubeConfig); err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\t\t\tdefer func() {\n\t\t\t\tif err := os.Remove(fakeKubeConfig); err != nil {\n\t\t\t\t\tt.Error(err)\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tif err := os.WriteFile(fakeKubeConfig, []byte(fakeContent), 0666); err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\t\t}\n\t\tif test.createFakeCredFile {\n\t\t\tif err := createTestFile(fakeCredFile); err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\t\t\tdefer func() {\n\t\t\t\tif err := os.Remove(fakeCredFile); err != nil {\n\t\t\t\t\tt.Error(err)\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\toriginalCredFile, ok := os.LookupEnv(DefaultAzureCredentialFileEnv)\n\t\t\tif ok {\n\t\t\t\tdefer os.Setenv(DefaultAzureCredentialFileEnv, originalCredFile)\n\t\t\t} else {\n\t\t\t\tdefer os.Unsetenv(DefaultAzureCredentialFileEnv)\n\t\t\t}\n\t\t\tos.Setenv(DefaultAzureCredentialFileEnv, fakeCredFile)\n\t\t}\n\t\tcloud, err := getCloudProvider(test.kubeconfig, test.nodeID, \"\", \"\", test.userAgent, test.allowEmptyCloudConfig, 25.0, 50)\n\t\tif !reflect.DeepEqual(err, test.expectedErr) && test.expectedErr != nil && !strings.Contains(err.Error(), test.expectedErr.Error()) {\n\t\t\tt.Errorf(\"desc: %s,\\n input: %q, GetCloudProvider err: %v, expectedErr: %v\", test.desc, test.kubeconfig, err, test.expectedErr)\n\t\t}\n\t\tif cloud == nil {\n\t\t\tt.Errorf(\"return value of getCloudProvider should not be nil even there is error\")\n\t\t} else {\n\t\t\tassert.Equal(t, cloud.Environment.StorageEndpointSuffix, storage.DefaultBaseURL)\n\t\t\tassert.Equal(t, cloud.UserAgent, test.userAgent)\n\t\t}\n\t}\n}",
"func New(ip string, user string, name string) *Cloud {\n\treturn &Cloud{\n\t\tIP: ip,\n\t\tUser: user,\n\t\tName: name,\n\t\tType: types.CloudTypeDocker,\n\t}\n}",
"func NewMockBackend(conf config.Config) (*MockBackend, error) {\n\t// Set up a GCE scheduler backend that has a mock client\n\t// so that it doesn't actually communicate with GCE.\n\n\tgceWrapper := new(gcemock.Wrapper)\n\tgceClient := &gceClient{\n\t\twrapper: gceWrapper,\n\t\tproject: conf.Backends.GCE.Project,\n\t\tzone: conf.Backends.GCE.Zone,\n\t}\n\n\tschedClient, err := scheduler.NewClient(conf.Worker)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &MockBackend{\n\t\tBackend: &Backend{\n\t\t\tconf: conf,\n\t\t\tclient: schedClient,\n\t\t\tgce: gceClient,\n\t\t},\n\t\tWrapper: gceWrapper,\n\t}, nil\n}",
"func NewClient(t mockConstructorTestingTNewClient) *Client {\n\tmock := &Client{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func NewClient(t mockConstructorTestingTNewClient) *Client {\n\tmock := &Client{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func NewClient(t mockConstructorTestingTNewClient) *Client {\n\tmock := &Client{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func NewClient(t mockConstructorTestingTNewClient) *Client {\n\tmock := &Client{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func NewStorage(t mockConstructorTestingTNewStorage) *Storage {\n\tmock := &Storage{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func NewStorage(t mockConstructorTestingTNewStorage) *Storage {\n\tmock := &Storage{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func newMockKvCapabilityVerifier(t mockConstructorTestingTnewMockKvCapabilityVerifier) *mockKvCapabilityVerifier {\n\tmock := &mockKvCapabilityVerifier{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func NewClient(hCloudToken string) *Client {\n\treturn &Client{\n\t\thCloudToken: hCloudToken,\n\t\thttpClient: &http.Client{Timeout: 3 * time.Second},\n\t}\n}",
"func test_cloud() {\n\tfmt.Println(\"Testing the clouded background...\")\n\tbg := initBackground()\n\tbg = insertCloud(bg)\n\trender(bg)\n}",
"func NewMock(opts ...ClientOpt) (*client, error) {\n\t// create new Docker runtime client\n\tc, err := New(opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// create Docker client from the mock client\n\t//\n\t// https://pkg.go.dev/github.com/go-vela/mock/docker#New\n\t_docker, err := mock.New()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// set the Docker client in the runtime client\n\tc.Docker = _docker\n\n\treturn c, nil\n}",
"func New(username, password string) (up *UpCloud, err error) {\n\tvar u UpCloud\n\n\tu.req = requester.New(&http.Client{}, Hostname)\n\n\t// Set username\n\tu.username = username\n\t// Set password\n\tu.password = password\n\t// Assign pointer reference\n\tup = &u\n\treturn\n}",
"func NewGCP() (*GCP, error) {\n\n\tctx := context.Background()\n\tclient, err := monitoring.NewMetricClient(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"NewMetricClient: %v\", err)\n\t}\n\n\treturn &GCP{\n\t\tctx: ctx,\n\t\tclient: client,\n\t}, nil\n}",
"func FakeNewStorage() *fakeStorage {\n\treturn &fakeStorage{}\n}",
"func newGoogleStorageStore(config *GoogleStorageStoreConfig) (*googleStorageStore, error) {\n\tif config.Bucket == \"\" {\n\t\treturn nil, errors.New(\"bucket required\")\n\t}\n\n\tvar opts []option.ClientOption\n\tvar noAuth bool\n\tcredsPath := getGoogleCredsPath()\n\tif credsPath == \"\" {\n\t\tnoAuth = true\n\t\topts = append(opts, option.WithoutAuthentication())\n\t} else {\n\t\topts = append(opts, option.WithCredentialsFile(credsPath), option.WithScopes(storage.ScopeFullControl))\n\t}\n\n\tvar httpTransport http.Transport\n\tvar err error\n\tctx := context.WithValue(context.Background(), oauth2.HTTPClient, &http.Client{Transport: &httpTransport})\n\tgcpTransport, err := gcphttp.NewTransport(ctx, &httpTransport, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thttpClient := &http.Client{Transport: gcpTransport}\n\tclientOpt := option.WithHTTPClient(httpClient)\n\tclient, err := storage.NewClient(context.Background(), clientOpt)\n\tif err != nil {\n\t\thttpTransport.CloseIdleConnections()\n\t\tif noAuth {\n\t\t\treturn nil, errors.Wrap(err, 0)\n\t\t}\n\t\thttpClient.Transport, err = gcphttp.NewTransport(ctx, &httpTransport, option.WithoutAuthentication())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tclient, err = storage.NewClient(context.Background(), clientOpt)\n\t\tif err != nil {\n\t\t\thttpTransport.CloseIdleConnections()\n\t\t\treturn nil, errors.Wrap(err, 0)\n\t\t}\n\t}\n\n\treturn &googleStorageStore{\n\t\tclient: client,\n\t\tbucket: client.Bucket(config.Bucket),\n\t\thttpTransport: &httpTransport,\n\t}, nil\n}",
"func newFactory() func(config *client.Config) (client.Client, *probe.Error) {\n\tclientCache := make(map[uint32]minio.CloudStorageAPI)\n\tmutex := &sync.Mutex{}\n\n\t// Return New function.\n\treturn func(config *client.Config) (client.Client, *probe.Error) {\n\t\tu := client.NewURL(config.HostURL)\n\t\ttransport := http.DefaultTransport\n\t\tif config.Debug == true {\n\t\t\tif config.Signature == \"S3v4\" {\n\t\t\t\ttransport = httptracer.GetNewTraceTransport(NewTraceV4(), http.DefaultTransport)\n\t\t\t}\n\t\t\tif config.Signature == \"S3v2\" {\n\t\t\t\ttransport = httptracer.GetNewTraceTransport(NewTraceV2(), http.DefaultTransport)\n\t\t\t}\n\t\t}\n\n\t\t// New S3 configuration.\n\t\ts3Conf := minio.Config{\n\t\t\tAccessKeyID: config.AccessKey,\n\t\t\tSecretAccessKey: config.SecretKey,\n\t\t\tTransport: transport,\n\t\t\tEndpoint: u.Scheme + u.SchemeSeparator + u.Host,\n\t\t\tSignature: func() minio.SignatureType {\n\t\t\t\tif config.Signature == \"S3v2\" {\n\t\t\t\t\treturn minio.SignatureV2\n\t\t\t\t}\n\t\t\t\treturn minio.SignatureV4\n\t\t\t}(),\n\t\t}\n\n\t\ts3Conf.SetUserAgent(config.AppName, config.AppVersion, config.AppComments...)\n\n\t\t// Generate a hash out of s3Conf.\n\t\tconfHash := fnv.New32a()\n\t\tconfHash.Write([]byte(s3Conf.Endpoint + s3Conf.AccessKeyID + s3Conf.SecretAccessKey))\n\t\tconfSum := confHash.Sum32()\n\n\t\t// Lookup previous cache by hash.\n\t\tmutex.Lock()\n\t\tdefer mutex.Unlock()\n\t\tvar api minio.CloudStorageAPI\n\t\tfound := false\n\t\tif api, found = clientCache[confSum]; !found {\n\t\t\t// Not found. Instantiate a new minio client.\n\t\t\tvar e error\n\t\t\tapi, e = minio.New(s3Conf)\n\t\t\tif e != nil {\n\t\t\t\treturn nil, probe.NewError(e)\n\t\t\t}\n\t\t\t// Cache the new minio client with hash of config as key.\n\t\t\tclientCache[confSum] = api\n\t\t}\n\n\t\ts3Clnt := &s3Client{\n\t\t\tmu: new(sync.Mutex),\n\t\t\tapi: api,\n\t\t\thostURL: u,\n\t\t\tvirtualStyle: isVirtualHostStyle(u.Host),\n\t\t}\n\t\treturn s3Clnt, nil\n\t}\n}",
"func NewForTesting() buckets.Provider {\n\treturn newWithOptions(option.WithoutAuthentication())\n}",
"func newWithOptions(options ...option.ClientOption) buckets.Provider {\n\tctx := context.Background()\n\tclient, err := storage.NewClient(ctx, options...)\n\tif err != nil {\n\t\tlogs.Panicf(\"Failed to get GCS client: %v\", err)\n\t}\n\n\treturn &bucketProvider{client}\n}",
"func NewMock() *Mock {\n\tc := &Mock{\n\t\tFakeIncoming: func() chan []byte {\n\t\t\treturn make(chan []byte, 2)\n\t\t},\n\t\tFakeName: func() string {\n\t\t\treturn \"TestClient\"\n\t\t},\n\t\tFakeGame: func() string {\n\t\t\treturn \"test\"\n\t\t},\n\t\tFakeClose: func() {\n\t\t\t// Do nothing\n\t\t},\n\t\tFakeStopTimer: func() {\n\t\t\t// Do nothing\n\t\t},\n\t\tFakeRoom: func() interfaces.Room {\n\t\t\treturn nil\n\t\t},\n\t\tFakeSetRoom: func(interfaces.Room) {\n\n\t\t},\n\t}\n\n\tc.FakeWritePump = func() {\n\t\tfor range c.Incoming() {\n\t\t\t// Do nothing\n\t\t}\n\t}\n\n\tc.FakeSetName = func(string) interfaces.Client {\n\t\treturn c\n\t}\n\treturn c\n}",
"func newCloudflareClient(ctx *cli.Context) *cloudflareClient {\n\ttoken := ctx.String(cloudflareTokenFlag.Name)\n\tif token == \"\" {\n\t\texit(fmt.Errorf(\"need cloudflare API token to proceed\"))\n\t}\n\tapi, err := cloudflare.NewWithAPIToken(token)\n\tif err != nil {\n\t\texit(fmt.Errorf(\"can't create Cloudflare client: %v\", err))\n\t}\n\treturn &cloudflareClient{\n\t\tAPI: api,\n\t\tzoneID: ctx.String(cloudflareZoneIDFlag.Name),\n\t}\n}",
"func NewFakeDocker() *FakeDocker {\n dockerClient := &FakeDocker{}\n dockerClient.Containers = make(map[string]*docker.Container)\n return dockerClient\n}",
"func NewMockGCS(ctrl *gomock.Controller) *MockGCS {\n\tmock := &MockGCS{ctrl: ctrl}\n\tmock.recorder = &MockGCSMockRecorder{mock}\n\treturn mock\n}",
"func NewCloudTestConfig() (result *CloudTestConfig) {\n\tresult = &CloudTestConfig{}\n\tresult.Statistics.Enabled = true\n\tresult.Statistics.Interval = 60\n\treturn result\n}",
"func NewMock(serverHost string) (*MockClient, error) {\n\treturn &MockClient{}, nil\n}",
"func (r *ResourceManager) Cloud() *CloudServiceClient {\n\treturn &CloudServiceClient{getConn: r.getConn}\n}",
"func NewMock(path string, nodes uint, replicas uint, vbuckets uint, specs ...BucketSpec) (m *Mock, err error) {\n\tvar lsn *net.TCPListener\n\tchAccept := make(chan bool)\n\tm = &Mock{}\n\n\tdefer func() {\n\t\tclose(chAccept)\n\t\tif lsn != nil {\n\t\t\tif err := lsn.Close(); err != nil {\n\t\t\t\tlog.Printf(\"Failed to close listener: %v\", err)\n\t\t\t}\n\t\t}\n\t\texc := recover()\n\n\t\tif exc == nil {\n\t\t\t// No errors, everything is OK\n\t\t\treturn\n\t\t}\n\n\t\t// Close mock on error, destroying resources\n\t\tm.Close()\n\t\tif mExc, ok := exc.(mockError); !ok {\n\t\t\tpanic(mExc)\n\t\t} else {\n\t\t\tm = nil\n\t\t\terr = mExc\n\t\t}\n\t}()\n\n\tif lsn, err = net.ListenTCP(\"tcp\", &net.TCPAddr{Port: 0}); err != nil {\n\t\tthrowMockError(\"Couldn't set up listening socket\", err)\n\t}\n\t_, ctlPort, err := net.SplitHostPort(lsn.Addr().String())\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to split host and port: %v\", err)\n\t}\n\tlog.Printf(\"Listening for control connection at %s\\n\", ctlPort)\n\n\tgo func() {\n\t\tvar err error\n\n\t\tdefer func() {\n\t\t\tchAccept <- false\n\t\t}()\n\t\tif m.conn, err = lsn.Accept(); err != nil {\n\t\t\tthrowMockError(\"Couldn't accept incoming control connection from mock\", err)\n\t\t\treturn\n\t\t}\n\t}()\n\n\tif len(specs) == 0 {\n\t\tspecs = []BucketSpec{{Name: \"default\", Type: BCouchbase}}\n\t}\n\n\toptions := []string{\n\t\t\"-jar\", path, \"--harakiri-monitor\", \"localhost:\" + ctlPort, \"--port\", \"0\",\n\t\t\"--replicas\", strconv.Itoa(int(replicas)),\n\t\t\"--vbuckets\", strconv.Itoa(int(vbuckets)),\n\t\t\"--nodes\", strconv.Itoa(int(nodes)),\n\t\t\"--buckets\", m.buildSpecStrings(specs),\n\t}\n\n\tlog.Printf(\"Invoking java %s\", strings.Join(options, \" \"))\n\tm.cmd = exec.Command(\"java\", options...)\n\n\tm.cmd.Stdout = os.Stdout\n\tm.cmd.Stderr = os.Stderr\n\n\tif err = m.cmd.Start(); err != nil {\n\t\tm.cmd = nil\n\t\tthrowMockError(\"Couldn't start command\", err)\n\t}\n\n\tselect {\n\tcase <-chAccept:\n\t\tbreak\n\n\tcase <-time.After(mockInitTimeout):\n\t\tthrowMockError(\"Timed out waiting for initialization\", errors.New(\"timeout\"))\n\t}\n\n\tm.rw = bufio.NewReadWriter(bufio.NewReader(m.conn), bufio.NewWriter(m.conn))\n\n\t// Read the port buffer, which is delimited by a NUL byte\n\tif portBytes, err := m.rw.ReadBytes(0); err != nil {\n\t\tthrowMockError(\"Couldn't get port information\", err)\n\t} else {\n\t\tportBytes = portBytes[:len(portBytes)-1]\n\t\tif entryPort, err := strconv.Atoi(string(portBytes)); err != nil {\n\t\t\tthrowMockError(\"Incorrectly formatted port from mock\", err)\n\t\t} else {\n\t\t\tm.EntryPort = uint16(entryPort)\n\t\t}\n\t}\n\n\tlog.Printf(\"Mock HTTP port at %d\\n\", m.EntryPort)\n\treturn\n}",
"func newKeyServerWithMocking(user upspin.UserName, ref string, data []byte) (*server, *storagetest.ExpectDownloadCapturePut) {\n\tmockGCP := &storagetest.ExpectDownloadCapturePut{\n\t\tRef: []string{ref},\n\t\tData: [][]byte{data},\n\t\tPutContents: make([][]byte, 0, 1),\n\t\tPutRef: make([]string, 0, 1),\n\t}\n\ts := &server{\n\t\tstorage: mockGCP,\n\t\tuser: user,\n\t\tlookupTXT: mockLookupTXT,\n\t\tlogger: &noopLogger{},\n\t\tcache: cache.NewLRU(10),\n\t\tnegCache: cache.NewLRU(10),\n\t}\n\treturn s, mockGCP\n}",
"func init() {\n\tcloudprovider.RegisterCloudProvider(providerName, newCloudConnection)\n}",
"func (cloud *Cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) {\n\tcloud.kubeClient = clientBuilder.ClientOrDie(\"tencentcloud-cloud-provider\")\n\tcredential := common.NewCredential(\n\t\t//os.Getenv(\"TENCENTCLOUD_SECRET_ID\"),\n\t\t//os.Getenv(\"TENCENTCLOUD_SECRET_KEY\"),\n\t\tcloud.txConfig.SecretId,\n\t\tcloud.txConfig.SecretKey,\n\t)\n\t// 非必要步骤\n\t// 实例化一个客户端配置对象,可以指定超时时间等配置\n\tcpf := profile.NewClientProfile()\n\t// SDK有默认的超时时间,非必要请不要进行调整。\n\t// 如有需要请在代码中查阅以获取最新的默认值。\n\tcpf.HttpProfile.ReqTimeout = 10\n\tcvmClient, err := cvm.NewClient(credential, cloud.txConfig.Region, cpf)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tcloud.cvm = cvmClient\n\tcvmV3Client, err := cvm.NewClient(credential, cloud.txConfig.Region, cpf)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tcloud.cvmV3 = cvmV3Client\n\ttkeClient, err := tke.NewClient(credential, cloud.txConfig.Region, cpf)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tcloud.tke = tkeClient\n\tclbClient, err := clb.NewClient(credential, cloud.txConfig.Region, cpf)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tcloud.clb = clbClient\n\treturn\n}",
"func newMemClient(t *testing.T) drive.Client {\n\tc, err := drive.NewClient(drive.Config{Provider: \"memory\"})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn c\n}",
"func NewForge(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *Forge {\n\tmock := &Forge{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func (m *MockCandidatePropertyGetter) Cloudprovider() *models.SCloudprovider {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Cloudprovider\")\n\tret0, _ := ret[0].(*models.SCloudprovider)\n\treturn ret0\n}",
"func Mock(codec codec.Codec) (*Client, io.Closer, error) {\n\tdir, err := ioutil.TempDir(\"\", \"etcd\")\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tcfg := embed.NewConfig()\n\tcfg.Logger = \"zap\"\n\tcfg.Dir = dir\n\tlpurl, _ := url.Parse(\"http://localhost:0\")\n\tlcurl, _ := url.Parse(\"http://localhost:0\")\n\tcfg.LPUrls = []url.URL{*lpurl}\n\tcfg.LCUrls = []url.URL{*lcurl}\n\n\tetcd, err := embed.StartEtcd(cfg)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tselect {\n\tcase <-etcd.Server.ReadyNotify():\n\tcase <-time.After(etcdStartTimeout):\n\t\tetcd.Server.Stop() // trigger a shutdown\n\t\treturn nil, nil, fmt.Errorf(\"server took too long to start\")\n\t}\n\n\tcloser := CloserFunc(func() error {\n\t\tetcd.Server.Stop()\n\t\treturn nil\n\t})\n\n\tvar config Config\n\tflagext.DefaultValues(&config)\n\n\tclient := &Client{\n\t\tcfg: config,\n\t\tcodec: codec,\n\t\tcli: v3client.New(etcd.Server),\n\t}\n\n\treturn client, closer, nil\n}",
"func newClient(auth azure.Authorizer) *azureClient {\n\treturn &azureClient{\n\t\tscalesetvms: newVirtualMachineScaleSetVMsClient(auth.SubscriptionID(), auth.BaseURI(), auth.Authorizer()),\n\t}\n}",
"func Mock(objects ...runtime.Object) KubernetesClientLambda {\n\tfakePool, fakeClient := NewFakes(objects...)\n\treturn &kubernetesClientLambdaImpl{\n\t\tclientPool: fakePool,\n\t\tinformerFactory: informers.NewSharedInformerFactory(fakeClient, 0),\n\t}\n}",
"func NewMockContiv() *MockContiv {\n\tci := containeridx.NewConfigIndex(logrus.DefaultLogger(), \"title\", nil)\n\treturn &MockContiv{\n\t\tpodIf: make(map[podmodel.ID]string),\n\t\tpodAppNs: make(map[podmodel.ID]uint32),\n\t\tcontainerIndex: ci,\n\t\tserviceLocalEndpointWeight: 1,\n\t}\n}",
"func New() *Mock {\n\treturn &Mock{\n\t\tm: mockMap{},\n\t\toldTransport: http.DefaultTransport,\n\t}\n}",
"func newClient(conf config) (*storage.Client, error) {\n\tdb, err := storage.NewDBClient(conf.MongoURI, conf.DBName, conf.MongoMICol, conf.MongoAgCol)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error creating DB client: %q\", err)\n\t}\n\tdb.Collection(conf.MongoMICol)\n\tbc := storage.NewCloudClient(conf.SwiftUsername, conf.SwiftAPIKey, conf.SwiftAuthURL, conf.SwiftDomain, conf.SwiftContainer)\n\tclient, err := storage.NewClient(db, bc)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error creating storage.client: %q\", err)\n\t}\n\treturn client, nil\n}",
"func New() (*Client, error) {\n\tstorageClient, err := newClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient := &Client{\n\t\tClient: *storageClient,\n\t}\n\tclient.SetRetryer()\n\n\treturn client, nil\n}",
"func TestNewCache(t *testing.T) {\n\n\t// Test Data\n\tk8sNamespace := \"TestK8SNamespace\"\n\n\t// Create A Context With Test Logger & K8S Client\n\tctx := logging.WithLogger(context.TODO(), logtesting.TestLogger(t))\n\tctx = context.WithValue(ctx, injectionclient.Key{}, fake.NewSimpleClientset())\n\n\t// Perform The Test\n\tcache := NewCache(ctx, k8sNamespace)\n\n\t// Verify The Results\n\tassert.NotNil(t, cache)\n}",
"func FakeNew() (*Client, *FakeClientset) {\n\treturn FakeNewWithIngressSupports(false, true)\n}",
"func New() Client {\n\treturn &client{\n\t\tControllerParams: nil,\n\t\tCloud: nil,\n\t\tServiceEngineGroup: nil,\n\t\tNetwork: nil,\n\t}\n}",
"func (g *FakeClientFactory) New(context.Context, client.Reader, string, string) (capb.ConfigAgentClient, controllers.ConnCloseFunc, error) {\n\tif g.Caclient == nil {\n\t\tg.Reset()\n\t}\n\treturn g.Caclient, emptyConnCloseFunc, nil\n}",
"func NewClient() (cloudops.Ops, error) {\n\tvar i = new(instance)\n\tvar err error\n\tif metadata.OnGCE() {\n\t\terr = gceInfo(i)\n\t} else if ok := IsDevMode(); ok {\n\t\terr = gceInfoFromEnv(i)\n\t} else {\n\t\treturn nil, fmt.Errorf(\"instance is not running on GCE\")\n\t}\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error fetching instance info. Err: %v\", err)\n\t}\n\n\tc, err := google.DefaultClient(context.Background(), compute.ComputeScope)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to authenticate with google api. Err: %v\", err)\n\t}\n\n\tservice, err := compute.New(c)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to create Compute service: %v\", err)\n\t}\n\n\treturn &gceOps{\n\t\tCompute: unsupported.NewUnsupportedCompute(),\n\t\tinst: i,\n\t\tservice: service,\n\t}, nil\n}",
"func NewDeviceClient(t mockConstructorTestingTNewDeviceClient) *DeviceClient {\n\tmock := &DeviceClient{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func NewFake(force bool) (m starlark.HasAttrs, closeFn func(), err error) {\n\t// Create a fake API store with some endpoints pre-populated\n\tcm := corev1.ConfigMap{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: \"v1\",\n\t\t\tKind: \"ConfigMap\",\n\t\t},\n\t\tData: map[string]string{\n\t\t\t\"client-ca-file\": \"contents\",\n\t\t},\n\t}\n\tcmData, err := apiruntime.Encode(unstructured.UnstructuredJSONScheme, &cm)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tfm := map[string][]byte{\n\t\t\"/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\": cmData,\n\t}\n\n\ts := httptest.NewTLSServer(&fakeKube{m: fm})\n\n\tu, err := url.Parse(s.URL)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\th := \"https://\" + u.Host\n\ttlsConfig := rest.TLSClientConfig{\n\t\tInsecure: true,\n\t}\n\trConf := &rest.Config{Host: h, TLSClientConfig: tlsConfig}\n\n\tt, err := rest.TransportFor(rConf)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tk := New(\n\t\th,\n\t\tfakeDiscovery(),\n\t\tdynamic.NewForConfigOrDie(rConf),\n\t\t&http.Client{Transport: t},\n\t\tfalse, /* dryRun */\n\t\tforce,\n\t\tfalse, /* diff */\n\t\tnil, /* diffFilters */\n\t)\n\n\treturn newFakeModule(k.(*kubePackage)), s.Close, nil\n}",
"func newVirtualMachineClient(subID string, authorizer auth.Authorizer) (*client, error) {\n\tc, err := wssdcloudclient.GetVirtualMachineClient(&subID, authorizer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &client{c}, nil\n}",
"func CreateCloudCredential(provider, name string, uid, orgID string) {\n\tStep(fmt.Sprintf(\"Create cloud credential [%s] in org [%s]\", name, orgID), func() {\n\t\tlogrus.Printf(\"Create credential name %s for org %s provider %s\", name, orgID, provider)\n\t\tbackupDriver := Inst().Backup\n\t\tswitch provider {\n\t\tcase drivers.ProviderAws:\n\t\t\tlogrus.Infof(\"Create creds for aws\")\n\t\t\tid := os.Getenv(\"AWS_ACCESS_KEY_ID\")\n\t\t\texpect(id).NotTo(equal(\"\"),\n\t\t\t\t\"AWS_ACCESS_KEY_ID Environment variable should not be empty\")\n\n\t\t\tsecret := os.Getenv(\"AWS_SECRET_ACCESS_KEY\")\n\t\t\texpect(secret).NotTo(equal(\"\"),\n\t\t\t\t\"AWS_SECRET_ACCESS_KEY Environment variable should not be empty\")\n\n\t\t\tcredCreateRequest := &api.CloudCredentialCreateRequest{\n\t\t\t\tCreateMetadata: &api.CreateMetadata{\n\t\t\t\t\tName: name,\n\t\t\t\t\tUid: uid,\n\t\t\t\t\tOrgId: orgID,\n\t\t\t\t},\n\t\t\t\tCloudCredential: &api.CloudCredentialInfo{\n\t\t\t\t\tType: api.CloudCredentialInfo_AWS,\n\t\t\t\t\tConfig: &api.CloudCredentialInfo_AwsConfig{\n\t\t\t\t\t\tAwsConfig: &api.AWSConfig{\n\t\t\t\t\t\t\tAccessKey: id,\n\t\t\t\t\t\t\tSecretKey: secret,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\t//ctx, err := backup.GetPxCentralAdminCtx()\n\t\t\tctx, err := backup.GetAdminCtxFromSecret()\n\t\t\texpect(err).NotTo(haveOccurred(),\n\t\t\t\tfmt.Sprintf(\"Failed to fetch px-central-admin ctx: [%v]\",\n\t\t\t\t\terr))\n\t\t\t_, err = backupDriver.CreateCloudCredential(ctx, credCreateRequest)\n\t\t\tif err != nil && strings.Contains(err.Error(), \"already exists\") {\n\t\t\t\treturn\n\t\t\t}\n\t\t\texpect(err).NotTo(haveOccurred(),\n\t\t\t\tfmt.Sprintf(\"Failed to create cloud credential [%s] in org [%s]\", name, orgID))\n\t\t// TODO: validate CreateCloudCredentialResponse also\n\t\tcase drivers.ProviderAzure:\n\t\t\tlogrus.Infof(\"Create creds for azure\")\n\t\t\ttenantID, clientID, clientSecret, subscriptionID, accountName, accountKey := GetAzureCredsFromEnv()\n\t\t\tcredCreateRequest := &api.CloudCredentialCreateRequest{\n\t\t\t\tCreateMetadata: &api.CreateMetadata{\n\t\t\t\t\tName: name,\n\t\t\t\t\tUid: uid,\n\t\t\t\t\tOrgId: orgID,\n\t\t\t\t},\n\t\t\t\tCloudCredential: &api.CloudCredentialInfo{\n\t\t\t\t\tType: api.CloudCredentialInfo_Azure,\n\t\t\t\t\tConfig: &api.CloudCredentialInfo_AzureConfig{\n\t\t\t\t\t\tAzureConfig: &api.AzureConfig{\n\t\t\t\t\t\t\tTenantId: tenantID,\n\t\t\t\t\t\t\tClientId: clientID,\n\t\t\t\t\t\t\tClientSecret: clientSecret,\n\t\t\t\t\t\t\tAccountName: accountName,\n\t\t\t\t\t\t\tAccountKey: accountKey,\n\t\t\t\t\t\t\tSubscriptionId: subscriptionID,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\t//ctx, err := backup.GetPxCentralAdminCtx()\n\t\t\tctx, err := backup.GetAdminCtxFromSecret()\n\t\t\texpect(err).NotTo(haveOccurred(),\n\t\t\t\tfmt.Sprintf(\"Failed to fetch px-central-admin ctx: [%v]\",\n\t\t\t\t\terr))\n\t\t\t_, err = backupDriver.CreateCloudCredential(ctx, credCreateRequest)\n\t\t\tif err != nil && strings.Contains(err.Error(), \"already exists\") {\n\t\t\t\treturn\n\t\t\t}\n\t\t\texpect(err).NotTo(haveOccurred(),\n\t\t\t\tfmt.Sprintf(\"Failed to create cloud credential [%s] in org [%s]\", name, orgID))\n\t\t\t// TODO: validate CreateCloudCredentialResponse also\n\t\t}\n\t})\n}",
"func New(mockenv *common.MockEnvironment, storage storage.Storage) *MockService {\n\ts := &MockService{\n\t\tkube: mockenv.GetKubeClient(),\n\t\tstorage: storage,\n\t\tprojects: mockenv.GetProjects(),\n\t}\n\ts.v1 = &SecretsV1{MockService: s}\n\treturn s\n}",
"func Create(rw *RequestWrapper) (*clm.GKECluster, error) {\n\tgkeOps, err := rw.acquire()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif rw.Request.SaveMetaData {\n\t\t// At this point we should have a cluster ready to run test. Need to save\n\t\t// metadata so that following flow can understand the context of cluster, as\n\t\t// well as for Prow usage later\n\t\twriteMetaData(gkeOps.Cluster, gkeOps.Project)\n\t}\n\n\t// set up kube config points to cluster\n\tclusterAuthCmd := fmt.Sprintf(\n\t\t\"gcloud beta container clusters get-credentials %s --region %s --project %s\",\n\t\tgkeOps.Cluster.Name, gkeOps.Cluster.Location, gkeOps.Project)\n\tif out, err := cmd.RunCommand(clusterAuthCmd); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed connecting to cluster: %q, %w\", out, err)\n\t}\n\tif out, err := cmd.RunCommand(\"gcloud config set project \" + gkeOps.Project); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed setting project: %q, %w\", out, err)\n\t}\n\n\treturn gkeOps, nil\n}",
"func NewCloudProvider(dc *kubermaticv1.Datacenter, secretKeyGetter provider.SecretKeySelectorValueFunc) (*AmazonEC2, error) {\n\tif dc.Spec.AWS == nil {\n\t\treturn nil, errors.New(\"datacenter is not an AWS datacenter\")\n\t}\n\treturn &AmazonEC2{\n\t\tdc: dc.Spec.AWS,\n\t\tsecretKeySelector: secretKeyGetter,\n\t}, nil\n}",
"func newKVClient(ctx context.Context, storeType, address string, timeout time.Duration) (kvstore.Client, error) {\n\tlogger.Infow(ctx, \"kv-store-type\", log.Fields{\"store\": storeType})\n\tswitch storeType {\n\tcase \"etcd\":\n\t\treturn kvstore.NewEtcdClient(ctx, address, timeout, log.FatalLevel)\n\t}\n\treturn nil, errors.New(\"unsupported-kv-store\")\n}",
"func NewhttpClientMock(valid bool) *HTTPClientMock {\n\treturn &HTTPClientMock{\n\t\tapiKeyPublic: \"apiKeyPublic\",\n\t\tapiKeyPrivate: \"apiKeyPrivate\",\n\t\tclient: http.DefaultClient,\n\t\tvalidCreds: valid,\n\t\tfx: fixtures.New(),\n\t\tCallFunc: func() (int, int, error) {\n\t\t\tif valid == true {\n\t\t\t\treturn 1, 1, nil\n\t\t\t}\n\t\t\treturn 0, 0, errors.New(\"Unexpected error: Unexpected server response code: 401: EOF\")\n\t\t},\n\t\tSendMailV31Func: func(req *http.Request) (*http.Response, error) {\n\t\t\treturn nil, errors.New(\"mock send mail function not implemented yet\")\n\t\t},\n\t}\n}",
"func NewMock() Client {\n\treturn &mockClient{}\n}",
"func NewCloudTunnel(address string) CloudTunnel {\n\ttunnel := &cloudTunnel{\n\t\taddress: address,\n\t\tclusterNameCheck: defaultClusterNameChecker,\n\t\tnotifyClientClosed: func(*config.ClusterRegistry) { return },\n\t\tafterConnectHook: defaultAfterConnectHook,\n\t}\n\n\ttunnel.receiveMessageHandler = func(client string, msg []byte) error {\n\t\treturn nil\n\t}\n\treturn tunnel\n}",
"func newCloudFormationTemplates(c *ServiceoperatorV1alpha1Client, namespace string) *cloudFormationTemplates {\n\treturn &cloudFormationTemplates{\n\t\tclient: c.RESTClient(),\n\t\tns: namespace,\n\t}\n}",
"func NewK8SCloudInCluster(opts Options) (CloudProvider, error) {\n\tconfig, err := rest.InClusterConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnamespace, err := ioutil.ReadFile(\"/var/run/secrets/kubernetes.io/serviceaccount/\" + apiv1.ServiceAccountNamespaceKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcloud := &K8SCloud{\n\t\tname: opts.Name,\n\t\thost: config.Host,\n\t\tbearerToken: config.BearerToken,\n\t\tnamespace: string(namespace),\n\t\tinsecure: opts.Insecure,\n\t}\n\tclientset, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcloud.client = clientset\n\treturn cloud, nil\n}",
"func newMockClient(doer func(*http.Request) (*http.Response, error)) *http.Client {\n\treturn &http.Client{\n\t\tTransport: transportFunc(doer),\n\t}\n}",
"func newGoogleStorageClient(config stow.Config) (*storage.Service, error) {\n\tjson, _ := config.Config(ConfigJSON)\n\tvar httpClient *http.Client\n\tscopes := []string{storage.DevstorageReadWriteScope}\n\tif s, ok := config.Config(ConfigScopes); ok && s != \"\" {\n\t\tscopes = strings.Split(s, \",\")\n\t}\n\tif json != \"\" {\n\t\tjwtConf, err := google.JWTConfigFromJSON([]byte(json), scopes...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thttpClient = jwtConf.Client(context.Background())\n\n\t} else {\n\t\tcreds, err := google.FindDefaultCredentials(context.Background(), strings.Join(scopes, \",\"))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thttpClient = oauth2.NewClient(context.Background(), creds.TokenSource)\n\t}\n\tservice, err := storage.New(httpClient)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn service, nil\n}",
"func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageClient, error) {\n\ts := initSettings(opts...)\n\to := s.clientOption\n\n\tvar creds *google.Credentials\n\t// In general, it is recommended to use raw.NewService instead of htransport.NewClient\n\t// since raw.NewService configures the correct default endpoints when initializing the\n\t// internal http client. However, in our case, \"NewRangeReader\" in reader.go needs to\n\t// access the http client directly to make requests, so we create the client manually\n\t// here so it can be re-used by both reader.go and raw.NewService. This means we need to\n\t// manually configure the default endpoint options on the http client. Furthermore, we\n\t// need to account for STORAGE_EMULATOR_HOST override when setting the default endpoints.\n\tif host := os.Getenv(\"STORAGE_EMULATOR_HOST\"); host == \"\" {\n\t\t// Prepend default options to avoid overriding options passed by the user.\n\t\to = append([]option.ClientOption{option.WithScopes(ScopeFullControl, \"https://www.googleapis.com/auth/cloud-platform\"), option.WithUserAgent(userAgent)}, o...)\n\n\t\to = append(o, internaloption.WithDefaultEndpoint(\"https://storage.googleapis.com/storage/v1/\"))\n\t\to = append(o, internaloption.WithDefaultMTLSEndpoint(\"https://storage.mtls.googleapis.com/storage/v1/\"))\n\n\t\t// Don't error out here. The user may have passed in their own HTTP\n\t\t// client which does not auth with ADC or other common conventions.\n\t\tc, err := transport.Creds(ctx, o...)\n\t\tif err == nil {\n\t\t\tcreds = c\n\t\t\to = append(o, internaloption.WithCredentials(creds))\n\t\t}\n\t} else {\n\t\tvar hostURL *url.URL\n\n\t\tif strings.Contains(host, \"://\") {\n\t\t\th, err := url.Parse(host)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\thostURL = h\n\t\t} else {\n\t\t\t// Add scheme for user if not supplied in STORAGE_EMULATOR_HOST\n\t\t\t// URL is only parsed correctly if it has a scheme, so we build it ourselves\n\t\t\thostURL = &url.URL{Scheme: \"http\", Host: host}\n\t\t}\n\n\t\thostURL.Path = \"storage/v1/\"\n\t\tendpoint := hostURL.String()\n\n\t\t// Append the emulator host as default endpoint for the user\n\t\to = append([]option.ClientOption{option.WithoutAuthentication()}, o...)\n\n\t\to = append(o, internaloption.WithDefaultEndpoint(endpoint))\n\t\to = append(o, internaloption.WithDefaultMTLSEndpoint(endpoint))\n\t}\n\ts.clientOption = o\n\n\t// htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint.\n\thc, ep, err := htransport.NewClient(ctx, s.clientOption...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"dialing: %v\", err)\n\t}\n\t// RawService should be created with the chosen endpoint to take account of user override.\n\trawService, err := raw.NewService(ctx, option.WithEndpoint(ep), option.WithHTTPClient(hc))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"storage client: %v\", err)\n\t}\n\t// Update readHost and scheme with the chosen endpoint.\n\tu, err := url.Parse(ep)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"supplied endpoint %q is not valid: %v\", ep, err)\n\t}\n\n\treturn &httpStorageClient{\n\t\tcreds: creds,\n\t\thc: hc,\n\t\treadHost: u.Host,\n\t\traw: rawService,\n\t\tscheme: u.Scheme,\n\t\tsettings: s,\n\t}, nil\n}",
"func New(conf *GCEConfig) (*GceImages, error) {\n\tvar err error\n\tif conf.ProjectID == \"\" {\n\t\treturn nil, errors.New(\"ProjectID is not set. Please check your configuration.\")\n\t}\n\n\t// increase the timeout. Also we need to pass the client with the context itself\n\ttimeout := time.Second * 30\n\tctx := context.WithValue(oauth2.NoContext, oauth2.HTTPClient, &http.Client{\n\t\tTransport: &http.Transport{TLSHandshakeTimeout: timeout},\n\t\tTimeout: timeout,\n\t})\n\n\tvar client *http.Client\n\n\t// allowed scopes\n\tscopes := []string{compute.ComputeScope}\n\n\t// Recommended way is explicit passing of credentials json which can be\n\t// downloaded from console.developers.google under APIs & Auth/Credentials\n\t// section\n\tif conf.AccountFile != \"\" {\n\t\t// expand shell meta character\n\t\tpath, err := homedir.Expand(conf.AccountFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tjsonContent, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tjtwConfig, err := google.JWTConfigFromJSON(jsonContent, scopes...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tclient = jtwConfig.Client(ctx)\n\t} else {\n\t\t// Look for application default credentials, for more details, see:\n\t\t// https://developers.google.com/accounts/docs/application-default-credentials\n\t\tclient, err = google.DefaultClient(ctx, scopes...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tsvc, err := compute.New(client)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &GceImages{\n\t\tsvc: compute.NewImagesService(svc),\n\t\tconfig: conf,\n\t}, nil\n}",
"func newFakeClient() client.Client {\n\treturn fakeclient.NewFakeClient()\n}",
"func newFakeReconciler(initObjects ...runtime.Object) *ReconcileMachineRemediation {\n\tfakeClient := fake.NewFakeClient(initObjects...)\n\tremediator := &FakeRemedatior{}\n\treturn &ReconcileMachineRemediation{\n\t\tclient: fakeClient,\n\t\tremediator: remediator,\n\t\tnamespace: consts.NamespaceOpenshiftMachineAPI,\n\t}\n}",
"func NewMock() *Mock {\n\treturn &Mock{VolumesMock: &VolumesServiceMock{}}\n}",
"func NewAuth(users map[string]string) policies.AuthServiceClient {\n\treturn &authServiceMock{users}\n}",
"func getCloudCostClient() dbclient.CloudCostClient {\n\tvar cloudCost dbclient.CloudCostClient\n\n\tif strings.EqualFold(*cloud, \"azure\") {\n\t\tlog.Println(\"Initializing Azure client...\")\n\t\tazureClient := initAzureClient()\n\t\tcloudCost = &azureCloudCost{UsageExplorer: &azureClient}\n\t} else if strings.EqualFold(*cloud, \"aws\") {\n\t\tlog.Println(\"Initializing AWS client...\")\n\t\tawsClient := initAwsClient()\n\t\tcloudCost = &awsCloudCost{Client: &awsClient}\n\t} else {\n\t\tlog.Fatalf(\"Cloud provider \\\"%v\\\" is not supported\", *cloud)\n\t}\n\treturn cloudCost\n}",
"func NewClient(t string) *gophercloud.ServiceClient {\n\tvar err error\n\tao, region, err := authMethod()\n\tif err != nil {\n\t\tfmt.Printf(\"Error retrieving authentication credentials: %s\\n\", err)\n\t}\n\tif ao.IdentityEndpoint == \"\" {\n\t\tao.IdentityEndpoint = rackspace.RackspaceUSIdentity\n\t}\n\tpc, err := rackspace.AuthenticatedClient(ao)\n\tif err != nil {\n\t\tfmt.Printf(\"Error creating ProviderClient: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tvar sc *gophercloud.ServiceClient\n\tswitch t {\n\tcase \"compute\":\n\t\tsc, err = rackspace.NewComputeV2(pc, gophercloud.EndpointOpts{\n\t\t\tRegion: region,\n\t\t})\n\t\tbreak\n\tcase \"blockstorage\":\n\t\tsc, err = rackspace.NewBlockStorageV1(pc, gophercloud.EndpointOpts{\n\t\t\tRegion: region,\n\t\t})\n\t\tbreak\n\tcase \"networking\":\n\t\tsc, err = rackspace.NewNetworkV2(pc, gophercloud.EndpointOpts{\n\t\t\tRegion: region,\n\t\t})\n\t\tbreak\n\t}\n\tif err != nil {\n\t\tfmt.Printf(\"Error creating ServiceClient (%s): %s\\n\", err, t)\n\t\tos.Exit(1)\n\t}\n\t// sc.UserAgent.Prepend(\"rack/\" + util.Version)\n\treturn sc\n}",
"func New(options Options) (TKGClient, error) { //nolint:gocritic\n\tvar err error\n\n\t// configure log options for tkg library\n\tconfigureLogging(options.LogOptions)\n\n\tif options.ConfigDir == \"\" {\n\t\treturn nil, errors.New(\"config directory cannot be empty. Please provide config directory when creating tkgctl client\")\n\t}\n\n\tif options.ProviderGetter == nil {\n\t\toptions.ProviderGetter = getDefaultProviderGetter()\n\t}\n\n\tif options.CustomizerOptions.RegionManagerFactory == nil {\n\t\toptions.CustomizerOptions = types.CustomizerOptions{\n\t\t\tRegionManagerFactory: region.NewFactory(),\n\t\t}\n\t}\n\tappConfig := types.AppConfig{\n\t\tTKGConfigDir: options.ConfigDir,\n\t\tProviderGetter: options.ProviderGetter,\n\t\tCustomizerOptions: options.CustomizerOptions,\n\t\tTKGSettingsFile: options.SettingsFile,\n\t}\n\n\terr = ensureTKGConfigFile(options.ConfigDir, options.ProviderGetter)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tallClients, err := clientcreator.CreateAllClients(appConfig, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar clusterKubeConfig *types.ClusterKubeConfig\n\tif options.KubeConfig != \"\" {\n\t\tclusterKubeConfig = &types.ClusterKubeConfig{\n\t\t\tFile: options.KubeConfig,\n\t\t\tContext: options.KubeContext,\n\t\t}\n\t}\n\n\ttkgClient, err := client.New(client.Options{\n\t\tClusterCtlClient: allClients.ClusterCtlClient,\n\t\tReaderWriterConfigClient: allClients.ConfigClient,\n\t\tRegionManager: allClients.RegionManager,\n\t\tTKGConfigDir: options.ConfigDir,\n\t\tTimeout: constants.DefaultOperationTimeout,\n\t\tFeaturesClient: allClients.FeaturesClient,\n\t\tTKGConfigProvidersClient: allClients.TKGConfigProvidersClient,\n\t\tTKGBomClient: allClients.TKGBomClient,\n\t\tTKGConfigUpdater: allClients.TKGConfigUpdaterClient,\n\t\tTKGPathsClient: allClients.TKGConfigPathsClient,\n\t\tClusterKubeConfig: clusterKubeConfig,\n\t\tClusterClientFactory: clusterclient.NewClusterClientFactory(),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// ensure BoM and Providers prerequisite files are extracted if missing\n\terr = ensureBoMandProvidersPrerequisite(options.ConfigDir, allClients.TKGConfigUpdaterClient, options.ForceUpdateTKGCompatibilityImage)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to ensure prerequisites\")\n\t}\n\t// Set default BOM name to the config variables to use during template generation\n\tdefaultBoMFileName, err := allClients.TKGBomClient.GetDefaultBoMFileName()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to get default BOM file name\")\n\t}\n\tallClients.ConfigClient.TKGConfigReaderWriter().Set(constants.ConfigVariableDefaultBomFile, defaultBoMFileName)\n\n\treturn &tkgctl{\n\t\tconfigDir: options.ConfigDir,\n\t\tkubeconfig: options.KubeConfig,\n\t\tkubecontext: options.KubeContext,\n\t\tappConfig: appConfig,\n\t\ttkgBomClient: allClients.TKGBomClient,\n\t\ttkgConfigUpdaterClient: allClients.TKGConfigUpdaterClient,\n\t\ttkgConfigProvidersClient: allClients.TKGConfigProvidersClient,\n\t\ttkgConfigPathsClient: allClients.TKGConfigPathsClient,\n\t\ttkgClient: tkgClient,\n\t\tproviderGetter: options.ProviderGetter,\n\t\ttkgConfigReaderWriter: allClients.ConfigClient.TKGConfigReaderWriter(),\n\t}, nil\n}",
"func NewGitClient(t mockConstructorTestingTNewGitClient) *GitClient {\n\tmock := &GitClient{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func (c *Client) CloudCreateInstance(projectID, name, pubkeyID, flavorID, imageID, region string) (instance *types.CloudInstance, err error) {\n\tinstanceReq := types.CloudInstance{\n\t\tName: name,\n\t\tSSHKeyID: pubkeyID,\n\t\tFlavorID: flavorID,\n\t\tImageID: imageID,\n\t\tRegion: region,\n\t}\n\terr = c.Post(queryEscape(\"/cloud/project/%s/instance\", projectID), instanceReq, &instance)\n\treturn instance, err\n}",
"func NewCloudStore() CloudStore {\n\treturn NewStow()\n}",
"func (g *FakeDatabaseClientFactory) New(context.Context, client.Reader, string, string) (dbdpb.DatabaseDaemonClient, func() error, error) {\n\tif g.Dbclient == nil {\n\t\tg.Reset()\n\t}\n\treturn g.Dbclient, func() error { return nil }, nil\n}",
"func New(ctx context.Context, bucket string) (fs.Interface, error) {\n\tclient, err := storage.NewClient(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &gcs{\n\t\tbucket: client.Bucket(bucket),\n\t}, nil\n}",
"func NewGCPClient(keys, projectName string) (*GCPClient, error) {\n\tlog.Debugf(\"Connecting to GCP\")\n\tctx := context.Background()\n\tvar client *GCPClient\n\tif projectName == \"\" {\n\t\treturn nil, fmt.Errorf(\"the project name is not specified\")\n\t}\n\tif keys != \"\" {\n\t\tlog.Debugf(\"Using Keys %s\", keys)\n\t\tf, err := os.Open(keys)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tjsonKey, err := io.ReadAll(f)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tconfig, err := google.JWTConfigFromJSON(jsonKey,\n\t\t\tstorage.DevstorageReadWriteScope,\n\t\t\tcompute.ComputeScope,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tclient = &GCPClient{\n\t\t\tclient: config.Client(ctx),\n\t\t\tprojectName: projectName,\n\t\t}\n\t} else {\n\t\tlog.Debugf(\"Using Application Default credentials\")\n\t\tgc, err := google.DefaultClient(\n\t\t\tctx,\n\t\t\tstorage.DevstorageReadWriteScope,\n\t\t\tcompute.ComputeScope,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tclient = &GCPClient{\n\t\t\tclient: gc,\n\t\t\tprojectName: projectName,\n\t\t}\n\t}\n\n\tvar err error\n\tclient.compute, err = compute.NewService(ctx, option.WithHTTPClient(client.client))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient.storage, err = storage.NewService(ctx, option.WithHTTPClient(client.client))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlog.Debugf(\"Generating SSH Keypair\")\n\tclient.privKey, err = rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn client, nil\n}",
"func (f *FactoryFake) New(address string) (client.Interface, error) {\n\tc, _ := f.Clients[address]\n\treturn c, nil\n}",
"func NewMock(t *testing.T) *MockT { return &MockT{t: t} }",
"func providerFactory(_ io.Reader) (cloudprovider.Interface, error) {\n\tlog := klogr.NewWithOptions(klogr.WithFormat(klogr.FormatKlog))\n\tc, err := loadConfig(envconfig.OsLookuper())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tapiUrl := katapult.DefaultURL\n\tif c.APIHost != \"\" {\n\t\tlog.Info(\"default API base URL overrided\",\n\t\t\t\"url\", c.APIHost)\n\t\tapiUrl, err = url.Parse(c.APIHost)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to parse provided api url: %w\", err)\n\t\t}\n\t}\n\n\trm, err := katapult.New(\n\t\tkatapult.WithAPIKey(c.APIKey),\n\t\tkatapult.WithBaseURL(apiUrl),\n\t\tkatapult.WithUserAgent(\"kce-ccm\"), // TODO: Add version.\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient := core.New(rm)\n\n\treturn &provider{\n\t\tlog: log,\n\t\tkatapult: client,\n\t\tconfig: *c,\n\t\tloadBalancer: &loadBalancerManager{\n\t\t\tlog: log,\n\t\t\tconfig: *c,\n\t\t\tloadBalancerController: client.LoadBalancers,\n\t\t\tloadBalancerRuleController: client.LoadBalancerRules,\n\t\t},\n\t}, nil\n}",
"func setupGCP(ctx *context.Context, bucket string) (*blob.Bucket, error) {\n\t// DefaultCredentials assumes a user has logged in with gcloud.\n\t// See here for more information:\n\t// https://cloud.google.com/docs/authentication/getting-started\n\tcreds, err := gcp.DefaultCredentials(*ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc, err := gcp.NewHTTPClient(gcp.DefaultTransport(), gcp.CredentialsTokenSource(creds))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// The bucket name must be globally unique.\n\treturn gcsblob.OpenBucket(*ctx, bucket, c, nil)\n}"
] | [
"0.74995255",
"0.69299114",
"0.69029814",
"0.6815023",
"0.6562258",
"0.6515134",
"0.6346468",
"0.62458694",
"0.6240434",
"0.6203039",
"0.61380965",
"0.60854924",
"0.6003493",
"0.59790623",
"0.5966205",
"0.59641105",
"0.5882635",
"0.5866135",
"0.5820343",
"0.58198625",
"0.5764206",
"0.572279",
"0.56885946",
"0.5664913",
"0.5664913",
"0.5664913",
"0.5664913",
"0.56582886",
"0.56582886",
"0.56340057",
"0.5618201",
"0.56142944",
"0.5610625",
"0.5609929",
"0.5604749",
"0.5575401",
"0.5526576",
"0.55188113",
"0.55094385",
"0.55057156",
"0.54962784",
"0.54961073",
"0.5476079",
"0.5468519",
"0.54669565",
"0.5450706",
"0.5449571",
"0.54478395",
"0.5443971",
"0.5436088",
"0.54295254",
"0.5408054",
"0.5394611",
"0.53891116",
"0.53833276",
"0.53734654",
"0.53722453",
"0.53700125",
"0.53413844",
"0.5333573",
"0.5327173",
"0.5326814",
"0.53231794",
"0.53171074",
"0.5314388",
"0.5309397",
"0.53089",
"0.5307246",
"0.530722",
"0.53021663",
"0.52999413",
"0.5295033",
"0.5287255",
"0.52791244",
"0.52790326",
"0.526684",
"0.5249596",
"0.52437633",
"0.52396643",
"0.52368903",
"0.5236126",
"0.52004826",
"0.5197418",
"0.51960266",
"0.51959544",
"0.51945835",
"0.518601",
"0.5183194",
"0.51780796",
"0.51771873",
"0.5175839",
"0.51680297",
"0.5167862",
"0.51677996",
"0.5165721",
"0.5165062",
"0.5164794",
"0.51638633",
"0.515727",
"0.51536"
] | 0.7895326 | 0 |
Asset loads and returns the asset for the given name. It returns an error if the asset could not be found or could not be loaded. | func Asset(name string) ([]byte, error) {
canonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[canonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
}
return a.bytes, nil
}
return nil, fmt.Errorf("Asset %s not found", name)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func Asset(name string) ([]byte, error) {\n cannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n if f, ok := _bindata[cannonicalName]; ok {\n a, err := f()\n if err != nil {\n return nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n }\n return a.bytes, nil\n }\n return nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func (model *GrogModel) GetAsset(name string) (*Asset, error) {\n\tvar foundAsset *Asset\n\tvar mimeType string\n\tvar content = make([]byte, 0)\n\tvar serveExternal int64\n\tvar rendered int64\n\tvar added int64\n\tvar modified int64\n\tvar err error\n\n\trow := model.db.DB.QueryRow(`select mimeType, content, serve_external, rendered,\n\t\tadded, modified from Assets where name = ?`, name)\n\tif row.Scan(&mimeType, &content, &serveExternal, &rendered, &added, &modified) != sql.ErrNoRows {\n\t\tfoundAsset = model.NewAsset(name, mimeType)\n\t\tfoundAsset.Content = content\n\t\tif serveExternal == 1 {\n\t\t\tfoundAsset.ServeExternal = true\n\t\t} else {\n\t\t\tfoundAsset.ServeExternal = false\n\t\t}\n\n\t\tif rendered == 1 {\n\t\t\tfoundAsset.Rendered = true\n\t\t} else {\n\t\t\tfoundAsset.Rendered = false\n\t\t}\n\n\t\tfoundAsset.Added.Set(time.Unix(added, 0))\n\t\tfoundAsset.Modified.Set(time.Unix(modified, 0))\n\t} else {\n\t\terr = fmt.Errorf(\"No asset with name %s\", name)\n\t}\n\n\treturn foundAsset, err\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}"
] | [
"0.7346324",
"0.7267003",
"0.7103171",
"0.7103171",
"0.7103171",
"0.7103171",
"0.7103171",
"0.7103171",
"0.7103171",
"0.7103171",
"0.7103171",
"0.7103171",
"0.7103171",
"0.7103171",
"0.7103171",
"0.7103171",
"0.7103171",
"0.7103171",
"0.7103171",
"0.7103171",
"0.70684695",
"0.70660174",
"0.70660174",
"0.70660174",
"0.70660174",
"0.70660174",
"0.70660174",
"0.70660174",
"0.70660174",
"0.70660174",
"0.70660174",
"0.70660174",
"0.70660174",
"0.70660174",
"0.70660174",
"0.70660174",
"0.70660174",
"0.70660174",
"0.70660174",
"0.70660174",
"0.70660174",
"0.70660174",
"0.70660174",
"0.70660174"
] | 0.7093733 | 71 |
AssetString returns the asset contents as a string (instead of a []byte). | func AssetString(name string) (string, error) {
data, err := Asset(name)
return string(data), err
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (s GetAssetOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s GetAssetOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func String() string {\n\tbuffer := bytes.Buffer{}\n\tfor _, name := range AssetNames() {\n\t\tbytes := MustAsset(name)\n\t\tbuffer.Write(bytes)\n\n\t\tif len(bytes) > 0 && bytes[len(bytes)-1] != '\\n' {\n\t\t\tbuffer.WriteByte('\\n')\n\t\t}\n\t}\n\treturn buffer.String()\n}",
"func (a Asset) String() (result string, err error) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tmsg := fmt.Sprintf(\"failed to build URL: %v\", r)\n\t\t\ta.logger.Error(msg)\n\t\t\tresult = \"\"\n\t\t\terr = errors.New(msg)\n\t\t}\n\t}()\n\n\tassetURL := joinUrl([]interface{}{a.distribution(), a.assetType(), a.signature(), a.Transformation, a.version(), a.source()})\n\tquery := a.query()\n\n\treturn joinNonEmpty([]interface{}{assetURL, query}, \"?\"), nil\n}",
"func (s RedshiftDataShareAsset) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s S3SnapshotAsset) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s GetAssetInput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s GetAssetInput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s SendApiAssetOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s ApiGatewayApiAsset) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s DeleteAssetOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s DeleteAssetOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s S3DataAccessAsset) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s UpdateAssetOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func Asset(name string) ([]byte, error) {\n\treturn _escFSByte(false, \"/\"+name)\n}",
"func Asset() string {\n\treturn \"eJzcWUtvHLkRvutXFJRDEmDUsuWsEcwhiCMH2UEiW8japyDQVpM1PcywyV6SrZnJrw/Ifr/mvRtj52C4u8nv+1hVrCpSN3ewpt0cYkJ3A+CEkzSHvxRPnCwzInNCqzn86QYA4FErh0JZYDpNtQrzYClIcgv4ikJiLAmEApQS6JWUA7fLyEY3UA6b3wSgO1CYUkEc+f+Gt6Oc/vdlRWEC6CW4FQWFYElxoZLwQuoEUrIWE7IRLFqjwjRhayhLzgv035lWS5HkBj0dLIWkmX/vP6KDV5S5nwm5JR4whfOPSrs2WJgCK21dyVSO/6IDVUfHzH8Lr370jz/WODqseFpXNDRaxXjYcLU2tGDI5UYRh3gXqHRGnkYlYHfWUQpawWYl2KoR3rKdyZUSKhlR40RK/9XqCDXVyJ9TzSsZK7Q6LKYcWIVVCOfg/ISUl0Ic3ErYIpSjbuje/tkvxTpMs9sS1Mf6HDi6yg6GfsqFIT4HZ/Lq5VKbFF1nHG0xzfzW+5AnuXXw8N6t4OHN2/czePswf/fd/Lt30bt3D8dZN0iCTRHIVG5Dv0EMMW04bNA26+stymFi97N8MLFwBs0ujC2sxdCnghDvGZnCUah4eHAGlUXmGn8UduoRF9mhY0cd/4dYtdeKh5fiy5p2G234fqF1rsotmWZP+QRVkPUUkDHadAQkRufZfpK/+klVBmQFo49f5Fz4sShBqKX2O5uhDfkr8NioCoYyK1aAlZoymdXvK02Otq71ckJWI63EiQYETPMhutQqOQXdgwyhPdYAuuuzo9CLMClLFJM6502NevSPkBn9Kjj5ZTrk6HC8bD2VX2FpdFog1VOt91WTgpDzlzDgpYL0IxlZq81kFfNDozArqmD7G5vYgd37qVXeugojeNbWCh+4oSZZQEMecAYJoxloA1wkwqHUjFBFk9qEsg4VoxdxYOssyoGw+FhJ8kUEUmQrofpbd4zhcGWqOdp1/TiWcsBLK85qO7uHKCUu8nQ/+1MBEULsNPKyzRFSuN1Lq+TVCnJ7R2jd3Vt2IJG2gCBURNFUO2ELOcI2ZW5PyIXcWHu1llJ+udseH3rlFK/lb1onkoqdNs1uKDlYav8ZxhxaX7nRuWbrsH/Knf6xeh4BL76Bdeh8+pWSmK/ZYZsX3/yetStt3EtRAeawRGm901CxlTYV3129y2+6Sblaci0LRuvDVB4vawKZSPDLcuJXJX7KqQEEwceyek2XjpWPkxjbcRHgqu60FOAbiTgX0oFW+6S0ksGZSh5rTo+1j0tiTNIO2Dq9BOzvJw5oWQRLFDx10PpgbkL2++JpBGThm4FWoPoqN0g9TWz69wcjs+Q+LS4v98n35bFi6I0rRXqRIEaCHA1bCUfM5eYKa+jAwe8oSiLY/vH9y/s/zABNOoMsYzNIRWZ/P5SibZRJdL6lv0zJ5x+gAio1MFJO2xnkca5cPoONUFxvJkR0TzznayhxRjmWmAq5u5iigCkXaYiv0M2AUyxQzWBpiGLL961WZAMJnVd72P8hrPMJbfF8h5wbspbskCBFdtkiK5oVGr5BQw3ZDHKbo5Q7ePrw2NZQ5ZF1HpNR5Mg22eTv7XcjtM33ug3u9rQNKLRzyf6y2Ew6mIA6ouGkNJRpfoXy0LJApnmR20ap8ktTU4vpWXP4uvg4JPL/2gzZ9RbVIA7J/Ansqhb0iBMmPLa4HkdUoEGK2ZAJldIu3H9dja4FOc55zYalxcs6vcs+2iu0bKO8BW6ZYeS2dYq+lVt+O55VaJuRESkph/Jf/y5kyy2HVPNc0kQmKMDhlBsU3xGZtLjqRMVDLy+sE8wCxjp3HvO3FpA58dpqPg9eoLAsH5iyLeeAIR+fv/psagRrB2CfrDOlYs57Fzcwdb1yQEIlI+D5w5MlphW3Q/b6jCLsWuhLlv1R2DUsPp+79N7GgT1BDO2DqtHanWaYLysKyx1SnuWG6k423jVV7EglwWSFj8I5dNo9Qi0vck44PJSegRgtcdAKVs5ldn5/nwi3yuOI6fRebtm93PL7WOr4PkXryNxzze4NWXeHmYhS/pu3b5q91LNh39Uwlq36VhpfzRH2+/xKBqVs5atgxqjvoRFRbWHlNcLg+x6nHyHO/75ohxJUnsZkfFtXMk2bohFlnc4y6of9zyGqZDpGlPNTfwFJtPV5fNxQzUVSqs3wTHHCpngKCNfP1WdEcimllQtODeKzXLMvdx2hGmrXpS39kxIzwvX/QeETbqf11dttg32Xne3MHzaYjab1cknftCe9Ib55d06LrNsJchtt1pekh08FxLn5QShHZtk9y0Gvq+nbr+5qbsmt3tyeFnaV3pp4usERKta56heXswN+oZhOfbourX5W2E+FxC8T9k3tEdVixjHr6Ee2psEiDwq+RNAYZX3JlruruvRz7hL9a3Gprhbzrbi0FtSnrKUU92aXpK/nAuLc9FUqGJjr3OPop3rxNXLdDmvV/fPQzf8CAAD//5Xhb2c=\"\n}",
"func (s AssetDetails) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s AssetEntry) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (p *Path) SourceAssetString() string {\n\tif p.sourceAssetString == \"\" {\n\t\tp.sourceAssetString = p.SourceAsset.String()\n\t}\n\treturn p.sourceAssetString\n}",
"func (s LakeFormationDataPermissionAsset) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s SendApiAssetInput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func Asset() string {\n\treturn \"eJzsXd1z2ziSf89f0eV92KRKVmJnkkxcl5lz7OzGc3HiGyUzW3V1xQXBlogxCDAAKNmztf/7FT5IkRIp0rb8kav4YSqSiO5ff6DR3QA4j3bhHC8PIEZiHgEYZjgewFv/KUFNFcsNk+IAfnoEAHAkhSFMaKAyy6Rw42DKkCcayJwwTmKOwAQQzgHnKAyYyxz1+BGExw4eOUK7IEiGnvHY/tN928rT/n1O0Q0AOQWTokMIGkXCxMx9weUMMtSazFCP4aT2lBvGdEVKo7EA7e9UiimbFYpYdjBlHEf2e/sjMTAnvLAjodCYOJrM2I9CmjoxNwRSqU3gFJ7/LB2rBo6R/c199U/78Z8VHekk7sY1XldaybFfcRU2okGhKZTABOJLx0rmaNmIGehLbTADKWCRMpougdd0pwohmJi1oDEswz+lGICmfPI20cxRaSZFP5jwYOlWzp2d8WcoLBRMwKRMe1ceN1135z+tKNqQLN8JRK2vH0BCTKkHhV8LpjA5AKOK8supVBkxjefwgmS5nXqHxazQBvZfmhT2n+29HMHe/sHzFwcvno+fP98fpl0HCRbekTFMQztBFFKpElgQvZRvRShDZnozl0MVM6OIunTPem1RYkOB8/cclTcUEYn7YBQRmlCztIfX0wpjHx0aepTxH0jLueY/RP6Xc7xcSJVsBlrFqkKjWs4pG6A8sxUEqJRUDQAzJYt8M5N3dlAZAannaP2XJAmzzxIOTEylndmUaBe/HB89Lp0hRMWSYIkmBLPq+xKTwQtT+7ID1hJaoDNeY0Blsk6dSzG7CnVLZJ20pbVGummzQdSDm4Q1inBG9HKROgwfW6i4n0qjTB3MLCeGxYwzcwkLZlJ4Nb6oL0p22F/gyK9q1n913SNLwWwkXQ28XroSnP3LiUnbgnTlaC6YrETMjWSqZ0ucXBYJ5EpS1LryvwYTah8Z50rOWYKqh0mGhoxbRjSJMaENERTHLBlOrxwUhUEdJAcotI3omm797xmhKRM4rjniAKphVFSNahIN+Y3zoWiA5WqU24eumcpGuCspN4xp163C2TLmDiAWng8udizpOaoeH/PxDlU/6MSRG6+NWCc1wBPWiK27wZJPZqPfdYi6kWXwcSpaBp9qArq54rSYEEPaw9Fp+BWmSmaeUjVU25VimQCRJIncA1FJcmmBzhy6a/bW0gqkPbnDx1py3UQ4hjOpNbPLpsuINRCFluAIZhRHIBUkbMYM4ZIiEeNObKuRoBPLSXgQTo5LSDaOQjmr+zn058UVj3pVMYzLWpio6dnsjzNMWJFt5n7qSThfvBrzriBUISj0LhJtdvdoTxpXIwQuH2fLXJtpD4fpZZK9weXqMagGJfyyezHc9cIQi+XvUs44+pnWzb0R5DoY/Oqe6ZMvTHQfBpYz/bj83EI8xEhtbLpAJedIbcXgprn/zc5ZnUplIp9/HsCUcG2NRgRNpSr57VazfCUnKUWuYEFrdtqVRbZEaLheRvZFsK8FLgkCS9pyymbwvBHHul84cmVtHADYMiYuGDcgxSYotWBwTSRHFU9LaxMvTmLkeo1bo5KBzdVMD5YTpwnPp3Ja68xLl33vP7UQObGlSM1RbY69FnqWvmm/7/XMwPtqfnlzm7wPmfW6Nbbk6T5AtDg5UTRlBqkp1BZkaJCDxziejeHix5fRyx9GQFQ2gjynI8hYrp9scDz2J65LHUvJkYiBflVbhZgGUnPqNbZSj3NOzFSq7GYK+DSBklAQnaIwUo+giAthihEsmEjkok12qbcwsz+tNpxcOuCh7JwSahH+Y6edu409N3S1TxMXwRLXI1CZ7/cF7ns/Hu89e93Bu9nguj73QKeVx5RkjF/emIUnE6RSmKTEjCDBmBExgqlCjHWyyc4sX4PQ+GoD9w9MG7uCnJztkiRRqDXqdQYZoTcTsmSTEpUsiMIlsxEUuiCcX8Lp4VEdQ4jb50WMSqDBWl/jv+rftXBd/l6VHc0aYkkU6rF7cxqyHNQb8Bug4UphP5fJFiZtTQO5TPxa0sqquOlSUON0JhP4cnK8zsj+V+eEbk+oJcV1ZjLB7WrQUuxQ4dBkZhgjTw0ykq9zIkJI46Lf1tjVSLbz3GaCWONLG7niJrZbSJFb+Tb6FrmW01pquHOGMuc4kVMDp2gUozESs9Ox2Udym5S4OKMY9a3UGgEUc6akyFAY3RErSvYwvKfeFTtslbWmrTq1Hl0dcl4H72Xyu5O151a5Q6MGywgT0Yq7bDJdDyT3F9LNsg/hIVq1BX5+I4cq9Ls1bcBInkc1cFGh+BUAlttR66M88APY8ft0YKvQ//mrs+nYmmNcY/rX/x279tDOCo1C8chgZtM8PIA18XchYyIKacgB7OyNn42frdKwf474AYzHT0mePz1nMRHkL08TotNYEpU8xVevf3wev3y2+wqfv97d28PXu2R/P97di39IXr3+YZ++ev7jzxF58/hrgery4DEnYlaQGR7wgqLAkf+6Q7iDnX/9K+wH//vfO3998mQFoIttNTEbj688K3MUEWfiPGIiooVSKExkSNzYImzad4Hxd/u6v71nuy+nL154+76exnu7dPpD/Cx+Fb/cf/78W7VvQLKybdZn182B5XNoLdZCXognb2y8GC0wHuWKjt22sUZYMM4hRutr7UEmL1Qu9RYj37GHo4vY90HfgP38+Ozk8MkITt6O4Nd3H0dgFKHYjkijmqPacjw+89kvTGiKScFRBTbh/AZDOJt8+fiPERQubvNLYAkKw6bMLpQFNyznCLoc7RdNd9whz5+imHdG8O2J8BvhLLEkOaO+pKQ2yXsDVI9gqkeQqhGwdARcjIAqUJgr1FaIcJimPtR+Do7DnDDtAqCYb0+Ad8vEwkO3Oqy8N8H5CIw2I/hqS8lcJdZLxAhyVCNIMgnj8RgN7dT0VrEeySxmwutKTi15d/IBxXxZ/FUK1kDg+G0rLiMl19F6XX9TeD4AefKrx128Stv1tMCYyxmjtwfpd4z5TDJaobJWJpbxRmC1fuMWwLgW3CJFhSueHvYF2iEg4SbdvkbeO7orUfv3YIiArW22F6hhFz6dj+B3YifCkWKGUcJH8DfCOCYj+DRHxSVJ7L+P5UK0R3gd2We2L5eluuJzcEQ4LTip9kvC7o+LvUdnX9yYcbewH9gsNfDYUf4PeDZ+8WQEp27nDR4/G7+An6D66ccnI3jPZml4+if3VfuCYogp9Pblt3SrrQxD1AxNrzW/5CN4ywk9l4UZwbsLyovKeiM4apwQdCeK/LEWT70r8mnVHvlWy6ghspXF4dkEDvMcJn6VfPy5uMBEPhlYM9b/2kovWF/uqSzEagCArgNMA4Xxfx+LLEblpt/k8Oxs8utvUO4o67bMtgErkQtxX9hClskvwaLohko5s8noXaA8cqw0zBmBXybvgUoh/KaTkVUM6ARqMUQhAkcKvxao7wZ2pdzAtFoHgMRyjkDmqNxOJMuwG345ODLSkNWia9uQK6QujBLOKyH0evysIXQAybwNgYenKeGYRFMuSZvaB8E8DPoS63BzVBXSDa5QWj9HFWmkPSrdBuZf6wg1Uin8TmlTtcMR35WOVQduJDQd4hOEGjbHKKfdk6wHbtlpyFFRbJ2rgwQ688OtTLUpWQ9zIVFw87KUe4NFQtxzJ5EjEktlMLmTcOIZLy3jDkubVOECCPwiufFL9xBn8hJHNvBE2d1NXBfoylPsNnZTmeUcDQ5SfInerpGM4gNCbzOxgApSIhJufcnWb0PcidB0teMAG3OpwcCPLGnIiCAzdOUvEyHBCl2I4/a0sTt7gsa5qUyqy0izPzE6jztDKfTNgsHi1ERyrN3VmPVgCo/P4yfrgWkD9HYHgqFOdCUBSmeirYI0omuLIJ1e9LXA4pa8aNVd4L8tr2of55re05N/w1a9ZhlDnZ5ali1oS7jCmntnvtGShtVW35xo7W4QKVnM/B0mJ81mYRLM15oLTQm2q+AK+XJ9dShbyu86TBaPdRFHD1P1urxKNylieG/jO6ohug9CZeQick9H/cbYpkyn5MKj9E7gAowuYr9CoerFnl/VILfkSS2GOLuyIfIHZoi8yxCdId7QthB+8wAfei1a0nMsd7JbI+SQsE651BgtCNsc1JkwOKtOIF8HtYfuMYd2gDudwQQcffg0eRf9fnjy2ffKNvuGyyLvF/Dnk9PheFEbEnOmU1ztH94h4neTz4dvP5xM3r87HoJ5yoRT8d79If7byUen4r2r4N2/f7z7bXhr2ypb78BWOwS30XPV2so3DrlBZzjr1uywuqfslAd+Vp8rGx/djYsKZMpmbUvEVhCeITnfBA80ExSt6ZU7DFrdbOyE/Yfk3fq8wfJwWvMCu/K6PoPvx5Ylr658V8xsWbxs4F97GVH41Zb5jHBbopH5LMraCg241RLN1ftMQMY4Z2UCYiRUwJxCQppifyB5Xu7vu051u0nqUuo8SvDBCprg7qqsOpdCY+gOr4vbV1j54Q9KTtc/WEpiZIDpOjzO28sezuY0QhrCq9bUfQloQXSas9wQbfdYt+FXmrt6EUaXuKWgruN451V83SyhD1q9/IOFHcyRd1JkJq3tDQBVRKeuPaeqsDVn5hKY1t9UZ8DOx+XphtK4uKqbuo1HtVJqQ9GxcY2Jcim7d0y2vdBM/BIJZ1Jy76EVBmtDpgGFfwdHzZxbW4XCAu2vyQ5w8i0lhJ/dLK5nB67ktSpQSJJLJ2yhe3LYEjwTUaHxQaB3E5UorPWGmFiX5Hs5fHXUHvr3cvh+EH8vh++1HB4YMFIbPW9t/VqWcJ8dIx/y6qtaSvwb5lJj8mqFvm5k8X1MjmLW08jcVlhP6+cu8vCyvCrPCGE+V0wqm035puy6MeoEmbYLuDvF5tYMV+caVMKuH/4CbUWfiMS/eWuT3mqHDz2+qn19fxpqgL6Bmv4otAmvLCiE6wkQzpvUN6cDhogkvrzDPKChDj/5KgUENGAnY4/4flyVOghpQCAmfgfed9CDTt2haFhIde6Pa9rHE9RsJtzBUKIrttab/C1XI6+KB907Fz0qy8EddHEM3NWqTKrm7x5szyJ2gbQwOO7fQ9+icfyk6zSRU8Ug1CzhbVvPtwJ6CZd5w69j9lPGT5NEukupBphwR14rQ1cWEgloU9Dz8ps+6wfCTCSMEoO6olR5qM/Rbb3N6DkUOQhceK90DsLsU4rN+2rNVM5mNoTd6YRdEyZGt2IhdxfCq/MsoNyJaSEXV5gt7lJQqMdjOzUpJ6qcmNYEZGpQNd5Zamcoy1AWBtyZZM5DoV9OXGtxiHHqZx1pO2jTlFAjn+6awp3CzJCmRDCdhftKhJ5P7T/YFATaapqoy54iy+K+75haHSqtrONuCdkq5gVkTOixzaoylAJrz8YkAXeN7Wc4fgtc0nMmZj9vlFaGc/+Rwj/c8du7kLml9VGyh6RAfw6r8pZJ6lzqiOSE2gXWasLNPX8KSmlIydy6NQo7UWnaFZr93zFOScENvIGXL148f/nNNIdqx0ZMNa39ZGG6OqLWE4Dc/ZHI5dsbZWm/zXBlMcJ9leb1hiXwcXWd4U11OYWJ2eb7KcMqg7ztCNzNC4KzQqfwURo2La+/7doioaxwbthaWGAc+SbFHbVfy121ZQBaCqPdRdW+NMf17paF3YPA3bhKQBzEqnW2uSh2zhaR8nTcHUnjfbxRHtsYVxep0fPtSeHmm65wbBf5Mkw5rhoer00Q/QSm/nBt7ZDbAuPQjVvK0jmVNdLCVla3W+FPApebzmEm5oSzJLJURUSMwSzvKVa3awnvxOD4Q8nfOlAlarnHOwLOzrGsy23dudmzyrvqNrHAJLID7lKwCv/j7HJXIeHZEwdaN5vQAg2/BI9xgHf9Mc9uxbF++e200Smq0Hfv+w9zsPKgP82LttuYTQm2mZMcnX2BD7YI9xfWuX+ZVCVXwNWTXDtfu2fsrjvggDz9NBmE12XIZD6L9rLWs/W3A3riJ2Z52bW6W1bKwIk2sGfLAVuyw2H5P93gl6BtMmwraNnmv42EMI+mCrtu9cC2U/9wdcft+P52ahPEvDwxb2H0pq8BbfdpIxgSfcp7SPGlwa4QNlikt5bIMIGG5a42PnNsXTRuHpfqMUnnSO1CDfnynUIl8+uGJ8L5GC+Q+qs893ENw/ew3KspWIbVZZIgmH+h0+pZis1+15QpIxd3JtN7Nktt9X87MvVdnIWtruTh2mTottVOwxV5u0A9+wE6f3iO5vpXOq/kuJJJmhI9EDe7sUQPy8lWxOmDTx+qi9HrGoQ+VBe7gUQP0cXoMBdjcaRQm4fpZmeTk7//Xsoxqp0at5nCr+8mnz8wbVAwMTvyvQuprma8dekfkEvemfQPz32vIXqvqKHr8A16+sQj34K513Xwrfj79nXwjXl9pwJ6Bc75A80iNsq7BUs3Bf9WPH1Lgn9j7t3r14/aZM3V6pboVl4Xtny15v+rl4aFWxnL13K5Hkwla9sL3zphP9A3SHXi/f5KlDt4Jcqj/wsAAP//sC922w==\"\n}",
"func (s DeleteAssetInput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s DeleteAssetInput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}",
"func (s AssociateAssetsOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func Asset(name string) ([]byte, error) {\n cannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n if f, ok := _bindata[cannonicalName]; ok {\n a, err := f()\n if err != nil {\n return nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n }\n return a.bytes, nil\n }\n return nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}"
] | [
"0.74635357",
"0.74635357",
"0.74050194",
"0.71176565",
"0.69487256",
"0.68454397",
"0.68166065",
"0.68166065",
"0.678136",
"0.66660875",
"0.662912",
"0.662912",
"0.65534294",
"0.64794993",
"0.643966",
"0.63937074",
"0.6383043",
"0.63824075",
"0.62887394",
"0.6259392",
"0.62369424",
"0.61089474",
"0.6066645",
"0.6066645",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.6061399",
"0.60188866",
"0.5992328",
"0.598204",
"0.59819174",
"0.59819174",
"0.59819174",
"0.59819174",
"0.59819174",
"0.59819174",
"0.59819174",
"0.59819174",
"0.59819174",
"0.59819174",
"0.59819174",
"0.59819174",
"0.59819174",
"0.59819174",
"0.59819174",
"0.59819174",
"0.59819174",
"0.59819174",
"0.59819174",
"0.59819174"
] | 0.0 | -1 |
MustAsset is like Asset but panics when Asset would return an error. It simplifies safe initialization of global variables. | func MustAsset(name string) []byte {
a, err := Asset(name)
if err != nil {
panic("asset: Asset(" + name + "): " + err.Error())
}
return a
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}",
"func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}",
"func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}",
"func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}",
"func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}",
"func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}",
"func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}",
"func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}",
"func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}",
"func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}",
"func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}",
"func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}",
"func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}",
"func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}",
"func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}",
"func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}",
"func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}",
"func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}",
"func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}",
"func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}",
"func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}",
"func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}",
"func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}",
"func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}",
"func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}"
] | [
"0.7499818",
"0.7499818",
"0.7499818",
"0.7499818",
"0.7499818",
"0.7499818",
"0.7499818",
"0.7499818",
"0.7499818",
"0.7499818",
"0.7499818",
"0.7499818",
"0.7499818",
"0.7499818",
"0.7499818",
"0.7499818",
"0.7499818",
"0.7499818",
"0.7499818",
"0.7499818",
"0.7499818",
"0.7499818",
"0.7499818",
"0.7499818",
"0.7499818"
] | 0.0 | -1 |
MustAssetString is like AssetString but panics when Asset would return an error. It simplifies safe initialization of global variables. | func MustAssetString(name string) string {
return string(MustAsset(name))
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}",
"func AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}"
] | [
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313",
"0.6235313"
] | 0.7863556 | 49 |
AssetInfo loads and returns the asset info for the given name. It returns an error if the asset could not be found or could not be loaded. | func AssetInfo(name string) (os.FileInfo, error) {
canonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[canonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
}
return a.info, nil
}
return nil, fmt.Errorf("AssetInfo %s not found", name)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}",
"func AssetInfo(name string) (os.FileInfo, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}"
] | [
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118",
"0.8059118"
] | 0.81341106 | 52 |
AssetDigest returns the digest of the file with the given name. It returns an error if the asset could not be found or the digest could not be loaded. | func AssetDigest(name string) ([sha256.Size]byte, error) {
canonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[canonicalName]; ok {
a, err := f()
if err != nil {
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err)
}
return a.digest, nil
}
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (a *Asset) Hash(hashType string) (string, error) {\n\tvar hashEncodedLen int\n\tvar hash string\n\n\t// We read the actual asset content\n\tbytes, err := os.ReadFile(a.path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(bytes) == 0 {\n\t\treturn \"\", fmt.Errorf(\"Empty asset file at %s\", a.path)\n\t}\n\n\t// Build the asset hash and convert it to a string.\n\t// We only support SHA512 for now.\n\tswitch hashType {\n\tcase annotations.SHA512:\n\t\thashComputed := sha512.Sum512(bytes)\n\t\thashEncodedLen = hex.EncodedLen(len(hashComputed))\n\t\thashEncoded := make([]byte, hashEncodedLen)\n\t\thex.Encode(hashEncoded, hashComputed[:])\n\t\thash = string(hashEncoded[:])\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"Invalid hash type %s\", hashType)\n\t}\n\n\ta.computedHash = hash\n\n\treturn hash, nil\n}",
"func (a *Asset) Hash() string {\n\treturn fmt.Sprintf(\"%x\", sha256.Sum256(a.Data))\n}",
"func (r *Registry) ManifestDigest(reponame, tag string) (string, error) {\n\turl := fmt.Sprintf(\"%s/v2/%s/manifests/%s\", r.URL, reponame, tag)\n\n\tlog.Debugf(\"HEAD %s\", url)\n\n\treq, err := http.NewRequest(http.MethodHead, url, nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treq.Header.Add(\"Accept\", \"application/vnd.docker.distribution.manifest.v2+json\")\n\tresp, err := r.Client.Do(req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn \"\", fmt.Errorf(\"Request failed: %s\", resp.Status)\n\t}\n\n\tdigest := resp.Header.Get(\"Docker-Content-Digest\")\n\tif digest == \"\" {\n\t\treturn \"\", errors.New(\"No digest in response\")\n\t}\n\n\tlog.Debugf(\"Found digest %s\", digest)\n\treturn digest, nil\n}",
"func (o *ImageImportManifest) GetDigest() string {\n\tif o == nil || o.Digest == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Digest\n}",
"func DigestFile(filename string) (string, error) {\n\tb, err := DigestFileBytes(filename)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn hex.EncodeToString(b), nil\n}",
"func Asset(name string) ([]byte, error) {\n cannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n if f, ok := _bindata[cannonicalName]; ok {\n a, err := f()\n if err != nil {\n return nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n }\n return a.bytes, nil\n }\n return nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, &os.PathError{Op: \"open\", Path: name, Err: os.ErrNotExist}\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}",
"func Asset(name string) ([]byte, error) {\n\tcannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[cannonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}"
] | [
"0.56964684",
"0.5555062",
"0.5486972",
"0.53367144",
"0.53113675",
"0.5301951",
"0.525409",
"0.525409",
"0.525409",
"0.525409",
"0.525409",
"0.525409",
"0.525409",
"0.525409",
"0.525409",
"0.525409",
"0.525409",
"0.525409",
"0.525409",
"0.525409",
"0.525409",
"0.525409",
"0.525409",
"0.525409",
"0.52473897",
"0.5245037",
"0.5245037",
"0.5245037",
"0.5245037",
"0.5245037",
"0.5245037",
"0.5245037",
"0.5245037",
"0.5245037",
"0.5245037",
"0.5245037",
"0.5245037",
"0.5245037",
"0.5245037",
"0.5245037",
"0.5245037",
"0.5245037",
"0.5245037",
"0.5245037",
"0.5245037",
"0.5245037",
"0.5245037"
] | 0.7696742 | 49 |
Digests returns a map of all known files and their checksums. | func Digests() (map[string][sha256.Size]byte, error) {
mp := make(map[string][sha256.Size]byte, len(_bindata))
for name := range _bindata {
a, err := _bindata[name]()
if err != nil {
return nil, err
}
mp[name] = a.digest
}
return mp, nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func filesDigest(fs afero.Fs, paths []string) []*PackageFilesDigest {\n\treturn mapPaths(fs, paths, pathToOperator)\n}",
"func Md5All(root string) (map[string][md5.Size]byte, error) {\n\tdone := make(chan struct{})\n\tdefer close(done)\n\n\tpaths, errc := walkFiles(done, root)\n\n\t// Starts a fixed number of goroutines to read and digest files\n\tc := make(chan result)\n\tvar wg sync.WaitGroup\n\twg.Add(numDigesters)\n\tfor i := 0; i < numDigesters; i++ {\n\t\tgo func() {\n\t\t\tdigester(done, paths, c)\n\t\t\twg.Done()\n\t\t}()\n\t}\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(c)\n\t}()\n\t// End of pipeline\n\n\tm := make(map[string][md5.Size]byte)\n\tfor r := range c {\n\t\tif r.err != nil {\n\t\t\treturn nil, r.err\n\t\t}\n\t\tm[r.path] = r.sum\n\t}\n\t// Check whether the walk failed\n\tif err := <-errc; err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}",
"func MD5All(root string) (map[string][md5.Size]byte, error) {\n\t// MD5All closes the done channel when it returns; it may do so before\n\t// receiving all the values from c and errc\n\tparallelism := contour.GetInt(\"parallel\")\n\n\tdone := make(chan struct{})\n\tdefer close(done)\n\n\tpaths, errc := walkFiles(done, root)\n\n\t// Start the goroutines for digestion\n\tc := make(chan MD5Result)\n\tvar wg sync.WaitGroup\n\twg.Add(parallelism)\n\tfor i := 0; i < parallelism; i++ {\n\t\tgo func() {\n\t\t\tMD5Digester(done, paths, c)\n\t\t\twg.Done()\n\t\t}()\n\t}\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(c)\n\t}()\n\n\t// range through the results and add to the map\n\tm := make(map[string][md5.Size]byte)\n\tfor r := range c {\n\t\tif r.err != nil {\n\t\t\treturn nil, r.err\n\t\t}\n\t\tm[r.path] = r.sum\n\t}\n\tif err := <-errc; err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}",
"func ChecksumFilesFS(fs fs.FileSystem, groups ...[]File) {\n\tstart := time.Now()\n\n\ttotalFiles := 0\n\ttotalBytes := util.ByteCount(0)\n\tfor _, files := range groups {\n\t\tfor _, f := range files {\n\t\t\tif (f.IsRegular() || f.IsSymlink()) && !f.HasChecksum() {\n\t\t\t\ttotalFiles += 1\n\t\t\t\ttotalBytes += util.ByteCount(f.Size)\n\t\t\t}\n\t\t}\n\t}\n\n\tif totalFiles == 0 {\n\t\treturn // no files to hash\n\t}\n\n\tlog.Printf(\"check: calculating hashes for %d files (%s).\\n\", totalFiles,\n\t\ttotalBytes)\n\n\tdoneFiles := 0\n\tdoneBytes := util.ByteCount(0)\n\n\ttimer := util.NewTimer(1800, func() {\n\t\tprogress := doneBytes / totalBytes * 100\n\t\tlog.Printf(\"check: busy. %d files (%s, %.1f%%) hashed.\\n\", doneFiles,\n\t\t\tdoneBytes, progress)\n\t})\n\n\tfor i, files := range groups {\n\t\tfor j, f := range files {\n\t\t\tvar hash [sha1.Size]byte\n\t\t\tvar length int\n\t\t\tif f.IsRegular() {\n\t\t\t\thash, length = checksumFile(fs, f.Path())\n\t\t\t} else if f.IsSymlink() {\n\t\t\t\thash, length = checksumSymlink(fs, f.Path())\n\t\t\t} else {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t//log.Printf(\"check: read %q\\n\", f.Path())\n\t\t\tgroups[i][j].SHA1 = hash\n\t\t\tdoneFiles += 1\n\t\t\tdoneBytes += util.ByteCount(length)\n\t\t}\n\t}\n\n\ttimer.Stop()\n\n\telapsed := time.Since(start)\n\tlog.Printf(\"check: done. %d files (%s) hashed. took %s.\\n\", doneFiles,\n\t\tdoneBytes, elapsed)\n}",
"func MD5All(root string) (map[string][md5.Size]byte, error) {\n\t// MD5All closes the done channel when it returns; it may do so before\n\t// receiving all the values from c and errc.\n\tdone := make(chan struct{}) // HLdone\n\tdefer close(done) // HLdone\n\n\tc, errc := sumFiles(done, root) // HLdone\n\n\tm := make(map[string][md5.Size]byte)\n\tfor r := range c { // HLrange\n\t\tif r.err != nil {\n\t\t\treturn nil, r.err\n\t\t}\n\t\tm[r.path] = r.sum\n\t}\n\tif err := <-errc; err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}",
"func HashFiles(files []string) string {\n\thasher := md5.New()\n\n\tfor i := 0; i < len(files); i++ {\n\t\tfp, err := os.Open(files[i])\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tfor {\n\t\t\tbuffer := make([]byte, 8192, 8192)\n\t\t\tbytes_read, err := fp.Read(buffer)\n\t\t\tif bytes_read > 0 {\n\t\t\t\thasher.Write(buffer)\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\ts := make([]byte, 0)\n\n\treturn hex.EncodeToString(hasher.Sum(s))\n\n}",
"func GetFilesMetadataHash(paths []string) (string, error) {\n\thashData := make([]string, 0, len(paths))\n\n\tfor _, path := range paths {\n\t\tmeta, err := os.Stat(path)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"couldn't stat '%s': %v\", path, err)\n\t\t}\n\n\t\thashData = append(hashData, meta.Name()+strconv.FormatInt(meta.Size(), 10)+meta.ModTime().String())\n\t}\n\n\t// sort the strings so that the same paths in different order still generate the same hash\n\tsort.Strings(hashData)\n\n\th := sha256.Sum256([]byte(strings.Join(hashData, \"\")))\n\treturn base64.StdEncoding.EncodeToString(h[:]), nil\n}",
"func (ss *Sources) hashAll(path string) (hash.Hash, error) {\n\th := sha1.New()\n\terr := filepath.Walk(path, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\t_, _ = io.WriteString(h, path)\n\n\t\tif info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\n\t\tr, err := os.Open(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer func(r *os.File) {\n\t\t\terr := r.Close()\n\t\t\tif err != nil {\n\t\t\t\tss.Log.Error(err, \"hashAll\")\n\t\t\t}\n\t\t}(r)\n\t\t_, err = io.Copy(h, r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\n\treturn h, err\n}",
"func (md *Metadata) Digest() metadata.Digest {\n\tsshKeys := make(map[string][]ssh.Key)\n\n\tfor usr, rawKeys := range md.PublicKeys {\n\t\tkeys := strings.Split(rawKeys, \"\\n\")\n\n\t\tfor _, key := range keys {\n\t\t\tif key != \"\" {\n\t\t\t\tsshKeys[usr] = append(sshKeys[usr], ssh.Key(key))\n\t\t\t}\n\t\t}\n\t}\n\n\treturn metadata.Digest{\n\t\tHostname: md.Hostname,\n\t\tSSHKeys: sshKeys,\n\t}\n}",
"func (r *HashRing) Checksums() (checksums map[string]uint32) {\n\tr.RLock()\n\t// even though the map is immutable the pointer to it is not so it requires\n\t// a readlock\n\tchecksums = r.checksums\n\tr.RUnlock()\n\treturn\n}",
"func hashDir(hc hashConstructor, dirname string) ([]byte, error) {\n\t// ReadDir returns the entries sorted by filename\n\tdirEntries, err := os.ReadDir(dirname)\n\tif err != nil {\n\t\treturn nil, errs.FileError(err, dirname)\n\t}\n\tst, err := os.Stat(dirname)\n\tif err != nil {\n\t\treturn nil, errs.FileError(err, dirname)\n\t}\n\n\tvar sum []byte\n\tmode := make([]byte, 4)\n\n\t// calculate sum of contents and mode\n\th := hc()\n\tbinary.LittleEndian.PutUint32(mode, uint32(st.Mode()))\n\th.Write(mode)\n\tfor _, dirEntry := range dirEntries {\n\t\tfi, err := dirEntry.Info()\n\t\tif err != nil {\n\t\t\treturn nil, errs.FileError(err, dirEntry.Name())\n\t\t}\n\t\tname := path.Join(dirname, fi.Name())\n\t\tswitch {\n\t\tcase fi.IsDir():\n\t\t\tsum, err = hashDir(hc, name)\n\t\tcase fi.Mode()&os.ModeSymlink != 0:\n\t\t\tbinary.LittleEndian.PutUint32(mode, uint32(fi.Mode()))\n\t\t\th.Write(mode)\n\t\t\tsum, err = hashSymlink(hc, name)\n\t\tdefault:\n\t\t\tbinary.LittleEndian.PutUint32(mode, uint32(fi.Mode()))\n\t\t\th.Write(mode)\n\t\t\tsum, err = hashFile(hc(), name)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\th.Write(sum)\n\t}\n\n\treturn h.Sum(nil), nil\n}",
"func (f *Fs) Hashes() hash.Set {\n\thashes := hash.Set(hash.None)\n\tif f.hasOCMD5 {\n\t\thashes.Add(hash.MD5)\n\t}\n\tif f.hasOCSHA1 || f.hasMESHA1 {\n\t\thashes.Add(hash.SHA1)\n\t}\n\treturn hashes\n}",
"func ChecksumFiles(groups ...[]File) {\n\tChecksumFilesFS(DefaultFileSystem, groups...)\n}",
"func getFilesFromRelease(p string, r io.Reader) ([]*FileInfo, Paragraph, error) {\n\tdir := path.Dir(p)\n\n\td, err := NewParser(r).Read()\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"NewParser(r).Read()\")\n\t}\n\n\tmd5sums := d[\"MD5Sum\"]\n\tsha1sums := d[\"SHA1\"]\n\tsha256sums := d[\"SHA256\"]\n\n\tif len(md5sums) == 0 && len(sha1sums) == 0 && len(sha256sums) == 0 {\n\t\treturn nil, d, nil\n\t}\n\n\tm := make(map[string]*FileInfo)\n\n\tfor _, l := range md5sums {\n\t\tp, size, csum, err := parseChecksum(l)\n\t\tp = path.Join(dir, path.Clean(p))\n\t\tif err != nil {\n\t\t\treturn nil, nil, errors.Wrap(err, \"parseChecksum for md5sums\")\n\t\t}\n\n\t\tfi := &FileInfo{\n\t\t\tpath: p,\n\t\t\tsize: size,\n\t\t\tmd5sum: csum,\n\t\t}\n\t\tm[p] = fi\n\t}\n\n\tfor _, l := range sha1sums {\n\t\tp, size, csum, err := parseChecksum(l)\n\t\tp = path.Join(dir, path.Clean(p))\n\t\tif err != nil {\n\t\t\treturn nil, nil, errors.Wrap(err, \"parseChecksum for sha1sums\")\n\t\t}\n\n\t\tfi, ok := m[p]\n\t\tif ok {\n\t\t\tfi.sha1sum = csum\n\t\t} else {\n\t\t\tfi := &FileInfo{\n\t\t\t\tpath: p,\n\t\t\t\tsize: size,\n\t\t\t\tsha1sum: csum,\n\t\t\t}\n\t\t\tm[p] = fi\n\t\t}\n\t}\n\n\tfor _, l := range sha256sums {\n\t\tp, size, csum, err := parseChecksum(l)\n\t\tp = path.Join(dir, path.Clean(p))\n\t\tif err != nil {\n\t\t\treturn nil, nil, errors.Wrap(err, \"parseChecksum for sha256sums\")\n\t\t}\n\n\t\tfi, ok := m[p]\n\t\tif ok {\n\t\t\tfi.sha256sum = csum\n\t\t} else {\n\t\t\tfi := &FileInfo{\n\t\t\t\tpath: p,\n\t\t\t\tsize: size,\n\t\t\t\tsha256sum: csum,\n\t\t\t}\n\t\t\tm[p] = fi\n\t\t}\n\t}\n\n\t// WORKAROUND: some (e.g. dell) repositories have invalid Release\n\t// that contains wrong checksum for Release itself. Ignore them.\n\tdelete(m, path.Join(dir, \"Release\"))\n\tdelete(m, path.Join(dir, \"Release.gpg\"))\n\tdelete(m, path.Join(dir, \"InRelease\"))\n\n\tl := make([]*FileInfo, 0, len(m))\n\tfor _, fi := range m {\n\t\tl = append(l, fi)\n\t}\n\treturn l, d, nil\n}",
"func (i *AppxDigest) digestFile(f *zipslicer.File, doPageHash bool) error {\n\tvar peWriters []io.WriteCloser\n\tvar peResults []<-chan peDigestResult\n\tvar sink io.Writer\n\tif strings.HasSuffix(f.Name, \".exe\") || strings.HasSuffix(f.Name, \".dll\") {\n\t\t// DigestPE wants a Reader so make a pipe for each one and sink data into the pipes\n\t\tpeWriters, peResults = setupPeDigests(f.Name, i.Hash, doPageHash)\n\t\tdefer func() {\n\t\t\tfor _, w := range peWriters {\n\t\t\t\tw.Close()\n\t\t\t}\n\t\t}()\n\t\tmw := make([]io.Writer, len(peWriters))\n\t\tfor i, w := range peWriters {\n\t\t\tmw[i] = w\n\t\t}\n\t\tsink = io.MultiWriter(mw...)\n\t}\n\tif err := i.blockMap.AddFile(f, i.axpc, sink); err != nil {\n\t\treturn err\n\t}\n\tif peWriters != nil {\n\t\tfor _, w := range peWriters {\n\t\t\tw.Close()\n\t\t}\n\t\tfor _, ch := range peResults {\n\t\t\tresult := <-ch\n\t\t\tif result.err != nil {\n\t\t\t\treturn result.err\n\t\t\t}\n\t\t\ti.peDigests = append(i.peDigests, result.digest)\n\t\t}\n\t}\n\treturn nil\n}",
"func (h *MultiHasher) Sums() (hashMD5, hashSHA1, hashSHA256, hashSHA512 []byte) {\n\thashMD5 = h.md5.Sum(nil)\n\thashSHA1 = h.sha1.Sum(nil)\n\thashSHA256 = h.sha256.Sum(nil)\n\thashSHA512 = h.sha512.Sum(nil)\n\treturn hashMD5, hashSHA1, hashSHA256, hashSHA512\n}",
"func (hasher *FileHasher) HashFile(path string) (map[HashType]Digest, error) {\n\tinfo, err := os.Stat(path)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to stat file %v\", path)\n\t}\n\n\t// Throttle reading and hashing rate.\n\tif len(hasher.config.HashTypes) > 0 {\n\t\terr = hasher.throttle(info.Size())\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to hash file %v\", path)\n\t\t}\n\t}\n\n\tvar hashes []hash.Hash\n\tfor _, hashType := range hasher.config.HashTypes {\n\t\th, valid := validHashes[hashType]\n\t\tif !valid {\n\t\t\treturn nil, errors.Errorf(\"unknown hash type '%v'\", hashType)\n\t\t}\n\n\t\thashes = append(hashes, h())\n\t}\n\n\tif len(hashes) > 0 {\n\t\tf, err := file.ReadOpen(path)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to open file for hashing\")\n\t\t}\n\t\tdefer f.Close()\n\n\t\thashWriter := multiWriter(hashes)\n\t\tif _, err := io.Copy(hashWriter, f); err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to calculate file hashes\")\n\t\t}\n\n\t\tnameToHash := make(map[HashType]Digest, len(hashes))\n\t\tfor i, h := range hashes {\n\t\t\tnameToHash[hasher.config.HashTypes[i]] = h.Sum(nil)\n\t\t}\n\n\t\treturn nameToHash, nil\n\t}\n\n\treturn nil, nil\n}",
"func HashFiles(fs []*common.File, hashAlgorithm string) error {\n\tvar commonErr error\n\tsfl := &common.SafeFileList{List: fs}\n\n\t// End workers channel handler\n\twg := new(sync.WaitGroup)\n\twg.Add(runtime.NumCPU())\n\tdone := make(chan bool)\n\tgo func() {\n\t\twg.Wait()\n\t\tdone<-true\n\t}()\n\n\t// Make control channels\n\tpauseChans, pause := createMultipleChans(runtime.NumCPU())\n\tresumeChans, resume := createMultipleChans(runtime.NumCPU())\n\tstopChans, stop := createMultipleChans(runtime.NumCPU())\n\n\tfor i:=0; i<runtime.NumCPU(); i++ {\n\t\tgo func(pauseChan, resumeChan, stopChan chan bool) {\n\t\t\tdefer wg.Done()\n\t\t\th := hash.NewHasher(hashAlgorithm, 128*1024)\n\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-pauseChan:\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-stopChan:\n\t\t\t\t\t\treturn\n\t\t\t\t\tcase <-resumeChan:\n\t\t\t\t\t}\n\t\t\t\tcase <-stopChan:\n\t\t\t\t\treturn\n\t\t\t\tdefault:\n\t\t\t\t}\n\n\t\t\t\tif commonErr != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tf := sfl.Next()\n\t\t\t\tif f == nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\toutput.NewPartialStep(\"Hash file \" + f.AbsPath)\n\t\t\t\tif err := h.HashFile(f); err != nil {\n\t\t\t\t\tcommonErr = err\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}(pauseChans[i], resumeChans[i], stopChans[i])\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-input.Pause:\n\t\t\tpause()\n\t\tcase <-input.Resume:\n\t\t\tresume()\n\t\tcase <-input.Stop:\n\t\t\tstop()\n\t\t\t<-done\n\t\t\treturn output.ErrProcessStopped\n\t\tcase <-done:\n\t\t\treturn commonErr\n\t\t}\n\t}\n}",
"func hash(fn string) (res string) {\n\th := sha256.New()\n\n\tfi, err := os.Stat(fn)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tf, err := os.Open(fn)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdefer f.Close()\n\n\tif fi.IsDir() {\n\t\tns, err := f.Readdirnames(0)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tfor _, e := range ns {\n\t\t\th.Write([]byte(e))\n\t\t}\n\t} else {\n\t\tif _, err := io.Copy(h, f); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn string(h.Sum(nil))\n}",
"func MD5All(roots []string) (map[string][md5.Size]byte, error) {\n\t// MD5All closes the done channel when it returns; it may do so before\n\t// receiving all the values from c and errc.\n\tnumBackend := runtime.NumCPU()\n\n\t// These channels link the different stages together:\n\tinputChan := make(chan interface{}, numBackend*2)\n\tfileChan := make(chan interface{}, numBackend*2)\n\toutputChan := make(chan interface{})\n\tquitChan := make(chan struct{})\n\n\terrChan := make(chan error)\n\n\t// Control number of spawned goroutines here:\n\tfirstWg := sync.WaitGroup{}\n\tsecondWg := sync.WaitGroup{}\n\tlastWg := sync.WaitGroup{}\n\n\t// These define how the stages connect to each stage\n\t// Note that the input for pipe2 is the output of pipe1\n\tpipe1 := Pipes{\n\t\tInput: inputChan,\n\t\tOutput: fileChan,\n\t\tErr: errChan,\n\t\tQuit: quitChan,\n\t\tDone: &firstWg,\n\t}\n\tpipe2 := Pipes{\n\t\tInput: fileChan,\n\t\tOutput: outputChan,\n\t\tErr: errChan,\n\t\tQuit: quitChan,\n\t\tDone: &secondWg,\n\t}\n\t// More concisely, the last stage:\n\tpipe3 := Pipes{outputChan, nil, errChan, quitChan, &lastWg}\n\n\tfor i := 0; i < numBackend; i++ {\n\t\t// 1st stage\n\t\tfirstWg.Add(1)\n\t\tgo walkFiles(pipe1)\n\t}\n\tfor i := 0; i < numBackend; i++ {\n\t\t// 2nd stage\n\t\tsecondWg.Add(1)\n\t\tgo digester(pipe2)\n\t}\n\n\t// We don't really stream the results, just collect them in\n\t// final stage in one map:\n\tm := make(map[string][md5.Size]byte)\n\n\t// Final stage, the \"reduce\" of this pipeline. One goroutine only\n\tlastWg.Add(1) // Only one goroutine!\n\tgo func(p Pipes) {\n\t\tdefer p.Done.Done()\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase r := <-p.Input:\n\t\t\t\tif r == nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif r.(result).err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tm[r.(result).path] = r.(result).sum\n\t\t\tcase <-p.Quit:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}(pipe3)\n\n\t// All set up, so send the input down the pipes we set up:\n\tfor _, root := range roots {\n\t\tinputChan <- root\n\t}\n\n\t// This is the test part to force all goroutines to gracefully quit\n\t// after 3 seconds: this simulates a deadline:\n\tgo func() {\n\t\ttime.Sleep(time.Second * 3)\n\t\tfor i := 0; i < numBackend*2+1; i++ {\n\t\t\tquitChan <- struct{}{}\n\t\t}\n\t}()\n\n\t// Now we're done input:\n\tclose(inputChan)\n\t// Wait for first pipes to clear:\n\tfirstWg.Wait()\n\tclose(fileChan)\n\t// Wait for 2nd stage pipes to clear:\n\tsecondWg.Wait()\n\tclose(outputChan)\n\t// Wait for final pipe to clear:\n\tlastWg.Wait()\n\n\treturn m, nil\n}",
"func NewDigests(appliedRuleset *rulesets.AppliedRulesetSummary, statuses []scanner.ScanStatus) ([]Digest, error) {\n\tds := make([]Digest, 0)\n\terrs := make([]string, 0, 0)\n\n\tfor i := range statuses {\n\t\ts := statuses[i]\n\n\t\tvar e *scans.Evaluation\n\t\tif appliedRuleset != nil && appliedRuleset.RuleEvaluationSummary != nil {\n\t\t\tfor i := range appliedRuleset.RuleEvaluationSummary.Ruleresults {\n\t\t\t\tif appliedRuleset.RuleEvaluationSummary.Ruleresults[i].ID == s.ID {\n\t\t\t\t\te = &appliedRuleset.RuleEvaluationSummary.Ruleresults[i]\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\td, err := _newDigests(&s, e)\n\t\tif err != nil {\n\t\t\terrs = append(errs, fmt.Sprintf(\"failed to make digest(s) from scan: %v\", err.Error()))\n\t\t\tcontinue\n\t\t}\n\n\t\tds = append(ds, d...)\n\t}\n\n\tsort.Slice(ds, func(i, j int) bool { return ds[i].Index < ds[j].Index })\n\n\tif len(errs) > 0 {\n\t\treturn ds, fmt.Errorf(\"failed to make some digests: %v\", strings.Join(errs, \"; \"))\n\t}\n\n\treturn ds, nil\n}",
"func (t *File) Hash() [32]byte {\n\tvar out [32]byte\n\th := sha256.New()\n\tbinary.Write(h, binary.LittleEndian, uint32(len(t.Name)))\n\th.Write([]byte(t.Name))\n\th.Write(t.MetafileHash)\n\tcopy(out[:], h.Sum(nil))\n\treturn out\n}",
"func calculateSHA256s(path string) ([]checksumSHA256, error) {\n\tvar checksums []checksumSHA256\n\tpath = fmt.Sprintf(\"%s%c\", filepath.Clean(path), filepath.Separator)\n\tcalculateSHA256 := func(filepath string, f os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif f.IsDir() {\n\t\t\treturn nil\n\t\t}\n\n\t\tfile, err := os.Open(filepath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer file.Close()\n\n\t\thash := sha256.New()\n\t\tif _, err = io.Copy(hash, file); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tchecksums = append(checksums, checksumSHA256{\n\t\t\tfilename: strings.TrimPrefix(filepath, path),\n\t\t\tchecksum: hash.Sum(nil),\n\t\t})\n\n\t\treturn nil\n\t}\n\tif err := filepath.Walk(path, calculateSHA256); err != nil {\n\t\treturn nil, err\n\t}\n\treturn checksums, nil\n}",
"func recursiveHash(dir string, fout string) {\n\tf, err := os.Create(fout)\n\tif err != nil {\n\t\tlog.Print(err)\n\t}\n\tdefer f.Close()\n\tfilepath.Walk(dir, func(path string, file os.FileInfo, _ error) error {\n\t\tif !file.IsDir() {\n\t\t\tfileData, err := ioutil.ReadFile(path)\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t}\n\t\t\trel, err := filepath.Rel(dir, path)\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t}\n\t\t\tfmt.Printf(\"%x *%s\\n\", md5.Sum(fileData), rel)\n\t\t\tvalue := md5.Sum(fileData)\n\n\t\t\ts := hex.EncodeToString(value[:]) + \" *\" + rel + \"\\n\"\n\t\t\tf.WriteString(s)\n\t\t}\n\t\treturn nil\n\t})\n\tf.Sync()\n}",
"func hashedDigestsForSSH(sshPK ssh.PublicKey, v23PK, purpose, message []byte) ([]byte, security.Hash, error) {\n\thashName, hasher := hashForSSHKey(sshPK)\n\tsum, err := digest(hasher, v23PK, message, purpose)\n\tif err != nil {\n\t\treturn nil, hashName, err\n\t}\n\thasher.Reset()\n\thasher.Write(sum)\n\treturn hasher.Sum(nil), hashName, nil\n}",
"func getFilesFromSources(p string, r io.Reader) ([]*FileInfo, Paragraph, error) {\n\tvar l []*FileInfo\n\tparser := NewParser(r)\n\n\tfor {\n\t\td, err := parser.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, nil, errors.Wrap(err, \"parser.Read\")\n\t\t}\n\n\t\tdir, ok := d[\"Directory\"]\n\t\tif !ok {\n\t\t\treturn nil, nil, errors.New(\"no Directory in \" + p)\n\t\t}\n\t\tfiles, ok := d[\"Files\"]\n\t\tif !ok {\n\t\t\treturn nil, nil, errors.New(\"no Files in \" + p)\n\t\t}\n\n\t\tm := make(map[string]*FileInfo)\n\t\tfor _, l := range files {\n\t\t\tfname, size, csum, err := parseChecksum(l)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, errors.Wrap(err, \"parseChecksum for Files\")\n\t\t\t}\n\n\t\t\tfpath := path.Clean(path.Join(dir[0], fname))\n\t\t\tfi := &FileInfo{\n\t\t\t\tpath: fpath,\n\t\t\t\tsize: size,\n\t\t\t\tmd5sum: csum,\n\t\t\t}\n\t\t\tm[fpath] = fi\n\t\t}\n\n\t\tfor _, l := range d[\"Checksums-Sha1\"] {\n\t\t\tfname, _, csum, err := parseChecksum(l)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, errors.Wrap(err, \"parseChecksum for Checksums-Sha1\")\n\t\t\t}\n\n\t\t\tfpath := path.Clean(path.Join(dir[0], fname))\n\t\t\tfi, ok := m[fpath]\n\t\t\tif !ok {\n\t\t\t\treturn nil, nil, errors.New(\"mismatch between Files and Checksums-Sha1 in \" + p)\n\t\t\t}\n\t\t\tfi.sha1sum = csum\n\t\t}\n\n\t\tfor _, l := range d[\"Checksums-Sha256\"] {\n\t\t\tfname, _, csum, err := parseChecksum(l)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, errors.Wrap(err, \"parseChecksum for Checksums-Sha256\")\n\t\t\t}\n\n\t\t\tfpath := path.Clean(path.Join(dir[0], fname))\n\t\t\tfi, ok := m[fpath]\n\t\t\tif !ok {\n\t\t\t\treturn nil, nil, errors.New(\"mismatch between Files and Checksums-Sha256 in \" + p)\n\t\t\t}\n\t\t\tfi.sha256sum = csum\n\t\t}\n\n\t\tfor _, fi := range m {\n\t\t\tl = append(l, fi)\n\t\t}\n\t}\n\n\treturn l, nil, nil\n}",
"func Sum(m types.Metadata) (string, error) {\n\t// Open the file.\n\tfile, err := os.Open(m.File())\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Defer closing it.\n\tdefer file.Close()\n\n\t// Make a new hash object.\n\thash := sha256.New()\n\n\t// Copy the file to the hasher object.\n\t_, err = io.Copy(hash, file)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Calculate the hash and return it.\n\treturn hex.EncodeToString(hash.Sum(nil)), err\n}",
"func (fs *Searcher) combineChecksums(checksums [][]byte) ([]byte, error) {\n\tif len(checksums) == 1 {\n\t\treturn checksums[0], nil\n\t}\n\n\tvar total []byte\n\tfor _, chksum := range checksums {\n\t\ttotal = append(total, chksum...)\n\t}\n\n\tnewChecksum := crc32.ChecksumIEEE(total)\n\tbuf := bytes.NewBuffer(make([]byte, 0, 4))\n\terr := binary.Write(buf, binary.LittleEndian, &newChecksum)\n\treturn buf.Bytes(), err\n}",
"func HashFile(path string) (hashes FileInfo, err error) {\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer f.Close()\n\n\treader := bufio.NewReader(f)\n\n\tvar writers []io.Writer\n\n\thsha1 := newHasher(sha1.New(), &hashes.Sha1)\n\tdefer hsha1.Close()\n\twriters = append(writers, hsha1)\n\thsha256 := newHasher(sha256.New(), &hashes.Sha256)\n\tdefer hsha256.Close()\n\twriters = append(writers, hsha256)\n\thmd5 := newHasher(md5.New(), &hashes.Md5)\n\tdefer hmd5.Close()\n\twriters = append(writers, hmd5)\n\n\tif len(writers) == 0 {\n\t\treturn\n\t}\n\n\tw := io.MultiWriter(writers...)\n\n\t_, err = io.Copy(w, reader)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}",
"func calcHashSum(s string) ([]byte, error) {\n\t// Open the file.\n\tfile, err := os.Open(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\n\t// Get a new hash value.\n\th := md5.New()\n\n\t// Read line by line and add to the hash sum.\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tif err := scanner.Err(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tio.WriteString(h, scanner.Text())\n\t}\n\n\treturn h.Sum(nil), nil\n}",
"func digestsForSSH(sshPK ssh.PublicKey, v23PK, purpose, message []byte) ([]byte, security.Hash, error) {\n\thashName, hasher := hashForSSHKey(sshPK)\n\tsum, err := digest(hasher, v23PK, message, purpose)\n\tif err != nil {\n\t\treturn nil, hashName, err\n\t}\n\treturn sum, hashName, nil\n}",
"func (e *Entity) ReadChecksums(filePath string) error {\n\tif e.Checksum == nil {\n\t\te.Checksum = map[string]string{}\n\t}\n\tfile, err := os.Open(filePath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"opening file for reading: \"+filePath+\" :%w\", err)\n\t}\n\tdefer file.Close()\n\t// TODO: Make this line like the others once this PR is\n\t// included in a k-sigs/release-util release:\n\t// https://github.com/kubernetes-sigs/release-utils/pull/16\n\ts1, err := hash.ForFile(filePath, sha1.New())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getting sha1 sum for file: %w\", err)\n\t}\n\ts256, err := hash.SHA256ForFile(filePath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getting file checksums: %w\", err)\n\t}\n\ts512, err := hash.SHA512ForFile(filePath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getting file checksums: %w\", err)\n\t}\n\n\te.Checksum = map[string]string{\n\t\t\"SHA1\": s1,\n\t\t\"SHA256\": s256,\n\t\t\"SHA512\": s512,\n\t}\n\treturn nil\n}",
"func GetHashSum() []byte {\n\tpaths := []string{\n\t\tconfigPath,\n\t\t\"/etc/scrt/service-template/sqlUser\",\n\t\t\"/etc/scrt/service-template/sqlPassword\",\n\t}\n\th := sha256.New()\n\n\tfor _, p := range paths {\n\t\tf, err := os.Open(p)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tdefer f.Close()\n\t\tif _, err = io.Copy(h, f); err != nil {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn h.Sum(nil)\n}",
"func GetAllEncryptedSums(f *shade.File) (encryptedSums [][]byte, err error) {\n\tif f == nil {\n\t\treturn nil, errors.New(\"provide a file pointer to Get an encrypted chunk\")\n\t}\n\tencryptedSums = make([][]byte, len(f.Chunks))\n\tfor i, chunk := range f.Chunks {\n\t\tif chunk.Nonce == nil {\n\t\t\treturn nil, fmt.Errorf(\"no Nonce in Chunk %d: %x\", i, chunk.Sha256)\n\t\t}\n\t\tencryptedSums[i], err = encryptUnsafe(chunk.Sha256, f.AesKey, chunk.Nonce)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn encryptedSums, nil\n}",
"func (b *Bag) GenerateChecksums() error {\n\tvar err error\n\tvar realroot string\n\n\trealroot, err = filepath.Abs(b.root)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to determine bag's absolute root path from %q: %s\", b.root, err)\n\t}\n\tb.root = realroot\n\n\tvar dataPath = filepath.Join(b.root, \"data\")\n\tif !fileutil.IsDir(dataPath) {\n\t\treturn fmt.Errorf(\"%q is not a directory\", dataPath)\n\t}\n\n\tb.Checksums = nil\n\terr = filepath.Walk(dataPath, func(path string, info os.FileInfo, err error) error {\n\t\tif info.Mode().IsRegular() {\n\t\t\tvar chksum, err = b.getsum(path)\n\t\t\tif err == nil {\n\t\t\t\tb.Checksums = append(b.Checksums, chksum)\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\n\treturn err\n}",
"func (m MMR) Digest() Path {\n\tvar path Path\n\tfor _, p := range m.peaks {\n\t\tpath = append(path, PathEntry{op: PUSHPOS, pos: p})\n\t}\n\treturn path\n}",
"func commitsByRepo() (map[string][]string, error) {\n\tinfos, err := os.ReadDir(indexDir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcommitsByRepo := map[string][]string{}\n\tfor _, info := range infos {\n\t\tif matches := indexFilenamePattern.FindStringSubmatch(info.Name()); len(matches) > 0 {\n\t\t\tcommitsByRepo[matches[1]] = append(commitsByRepo[matches[1]], matches[2])\n\t\t}\n\t}\n\n\treturn commitsByRepo, nil\n}",
"func (s *FileCollector) Collect() error {\n\tfor _, inputVal := range s.Paths {\n\t\t// inputVal is of type sda:/path/to/file\n\t\terr := HashFile(inputVal)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"File Collector: input=%s, err = %v\", inputVal, err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func copyFilegroupHashes(state *core.BuildState, target *core.BuildTarget) {\n\toutDir := target.OutDir()\n\tlocalSources := target.AllSourceLocalPaths(state.Graph)\n\tfor i, source := range target.AllSourceFullPaths(state.Graph) {\n\t\tif out := filepath.Join(outDir, localSources[i]); out != source {\n\t\t\tstate.PathHasher.CopyHash(source, out)\n\t\t}\n\t}\n}",
"func (f *Fs) Hashes() hash.Set {\n\treturn hash.Set(hash.MD5)\n}",
"func WriteCacheChecksums(suffix string, cacheDir string, paths []string) error {\n\tcheckBeforeFilename := filepath.Join(cacheDir, \"md5sums_\"+suffix)\n\tcheckBefore, err := os.OpenFile(checkBeforeFilename, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer checkBefore.Close()\n\n\thashes := []string{}\n\tfor _, path := range paths {\n\t\terr = filepath.Walk(path, func(p string, info os.FileInfo, err error) error {\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\thash, err := hashFile(p)\n\t\t\thashes = append(hashes, fmt.Sprintf(\"%s %s\", hash, p))\n\t\t\treturn err\n\t\t})\n\t}\n\n\tsort.Strings(hashes)\n\n\tw := bufio.NewWriter(checkBefore)\n\tfor _, line := range hashes {\n\t\tfmt.Fprintln(w, line)\n\t}\n\treturn w.Flush()\n}",
"func printHashes(results <-chan FileInfo, hashType string) (int64, int64, []error) {\n\tvar totalFileSize int64\n\tvar numFiles int64\n\tvar errors []error\n\tfor info := range results {\n\t\tif info.Error != nil {\n\t\t\terrors = append(errors, info.Error)\n\t\t\tcontinue\n\t\t}\n\t\tnumFiles++\n\t\ttotalFileSize += int64(info.Size)\n\t\tfmt.Printf(\"%s,\\\"%s\\\",%s\\n\", hashType, info.Path, info.Hash)\n\t}\n\treturn totalFileSize, numFiles, errors\n}",
"func FindAllFiles(rootdir string, mask []string) (files map[string]string, err error) {\n\tdirs := make(map[string]string)\n\tfiles = make(map[string]string)\n\n\tdirs, err = FindFiles(rootdir, []string{\"*\"})\n\tif err != nil {\n\t\tlog.Fatalf(\"FindAllFiles: FindAllFiles error: %v\", err)\n\t}\n\n\tfor k := range dirs {\n\t\tf, err := FindFiles(k, []string{\"*.*\"})\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"FindAllFiles: FindAllFiles error: %v\", err)\n\t\t}\n\n\t\tfor kk, vv := range f {\n\t\t\tfiles[kk] = vv\n\t\t}\n\t}\n\treturn files, err\n}",
"func (d *GitDir) Refs() (map[string]core.Hash, error) {\n\tvar err error\n\n\td.refs = make(map[string]core.Hash)\n\n\tif err = d.addRefsFromPackedRefs(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = d.addRefsFromRefDir(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn d.refs, err\n}",
"func hashFile(file io.Reader) []byte {\n\thash := suite.Hash()\n\tbuflen := 1024 * 1024\n\tbuf := make([]byte, buflen)\n\tread := buflen\n\tfor read == buflen {\n\t\tvar err error\n\t\tread, err = file.Read(buf)\n\t\tif err != nil && err != io.EOF {\n\t\t\tdbg.Fatal(\"Error while reading bytes\")\n\t\t}\n\t\thash.Write(buf)\n\t}\n\treturn hash.Sum(nil)\n}",
"func Hashcalc(filename string) (Hashval, error) {\n\tr, err := GetHashForFile(filename)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn internHash(r), nil\n}",
"func DirHash(path string) (finalHash string, err error) {\n\thash := md5.New()\n\n\terr = filepath.Walk(path, func(path string, info os.FileInfo, err error) (err2 error) {\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tio.WriteString(hash, path)\n\n\t\tfmt.Fprintf(hash, \"%v\", info.IsDir())\n\t\tfmt.Fprintf(hash, \"%v\", info.ModTime())\n\t\tfmt.Fprintf(hash, \"%v\", info.Mode())\n\t\tfmt.Fprintf(hash, \"%v\", info.Name())\n\t\tfmt.Fprintf(hash, \"%v\", info.Size())\n\t\treturn\n\t})\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfinalHash = fmt.Sprintf(\"%x\", hash.Sum(nil))\n\treturn\n}"
] | [
"0.6962257",
"0.6857267",
"0.6500901",
"0.64340717",
"0.6351849",
"0.6168927",
"0.6060193",
"0.6027875",
"0.5841485",
"0.58326197",
"0.5764696",
"0.57436156",
"0.5716916",
"0.5619292",
"0.5611238",
"0.56094915",
"0.55823195",
"0.5540355",
"0.5539558",
"0.55263436",
"0.54708636",
"0.5469622",
"0.5447435",
"0.54164386",
"0.53611475",
"0.53385925",
"0.52997947",
"0.5298761",
"0.5273952",
"0.52257097",
"0.5205912",
"0.51886225",
"0.5159227",
"0.5146557",
"0.5141931",
"0.51346743",
"0.5129186",
"0.5126566",
"0.5126346",
"0.5122437",
"0.51164675",
"0.5114642",
"0.51122075",
"0.5109352",
"0.5106089",
"0.5061447",
"0.50579965"
] | 0.69082654 | 50 |
AssetNames returns the names of the assets. | func AssetNames() []string {
names := make([]string, 0, len(_bindata))
for name := range _bindata {
names = append(names, name)
}
return names
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [] | [] | 0.0 | -1 |
RestoreAsset restores an asset under the given directory. | func RestoreAsset(dir, name string) error {
data, err := Asset(name)
if err != nil {
return err
}
info, err := AssetInfo(name)
if err != nil {
return err
}
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
if err != nil {
return err
}
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil {
return err
}
return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, path.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, path.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, path.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, path.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}"
] | [
"0.7935779",
"0.7935779",
"0.7935779",
"0.7935779",
"0.79253083",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187",
"0.7924187"
] | 0.0 | -1 |
RestoreAssets restores an asset under the given directory recursively. | func RestoreAssets(dir, name string) error {
children, err := AssetDir(name)
// File
if err != nil {
return RestoreAsset(dir, name)
}
// Dir
for _, child := range children {
err = RestoreAssets(dir, filepath.Join(name, child))
if err != nil {
return err
}
}
return nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func RestoreAssets(dir, name string) error {\n\tchildren, err := AssetDir(name)\n\tif err != nil { // File\n\t\treturn RestoreAsset(dir, name)\n\t} else { // Dir\n\t\tfor _, child := range children {\n\t\t\terr = RestoreAssets(dir, path.Join(name, child))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func RestoreAssets(dir, name string) error {\n\tchildren, err := AssetDir(name)\n\tif err != nil { // File\n\t\treturn RestoreAsset(dir, name)\n\t} else { // Dir\n\t\tfor _, child := range children {\n\t\t\terr = RestoreAssets(dir, path.Join(name, child))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func RestoreAssets(dir, name string) error {\n\tchildren, err := AssetDir(name)\n\tif err != nil { // File\n\t\treturn RestoreAsset(dir, name)\n\t} else { // Dir\n\t\tfor _, child := range children {\n\t\t\terr = RestoreAssets(dir, path.Join(name, child))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func RestoreAssets(dir, name string) error {\n\tchildren, err := AssetDir(name)\n\tif err != nil { // File\n\t\treturn RestoreAsset(dir, name)\n\t} else { // Dir\n\t\tfor _, child := range children {\n\t\t\terr = RestoreAssets(dir, path.Join(name, child))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}"
] | [
"0.78780913",
"0.78780913",
"0.78780913",
"0.78780913"
] | 0.0 | -1 |
GetTask returns a new task for the action | func GetTask(name, action string, conf *config.MountConfig) (iface.Task, error) {
switch action {
case "", "create":
return NewCreateTask(name, conf), nil
case "remove", "rm":
return NewRemoveTask(name, conf), nil
default:
return nil, fmt.Errorf("Invalid mount action %q for task %q", action, name)
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func GetTask(r *http.Request) *task.Task {\n\tif rv := r.Context().Value(model.ApiTaskKey); rv != nil {\n\t\tif t, ok := rv.(*task.Task); ok {\n\t\t\treturn t\n\t\t}\n\t}\n\treturn nil\n}",
"func (*FakeReconcilerClient) GetTask(string) (swarm.Task, error) {\n\treturn swarm.Task{}, FakeUnimplemented\n}",
"func (a *agent) GetTask(ctx context.Context, msg *api.AgentID) (*api.Task, error) {\n\tvar task *api.Task = new(api.Task)\n\tselect {\n\tcase task, ok := <-a.work[msg.GetAgentID()]:\n\t\tif ok {\n\t\t\treturn task, nil\n\t\t}\n\t\treturn task, errors.New(\"channel closed\")\n\tdefault:\n\t\treturn task, nil\n\t}\n}",
"func (ds *DNSSuite) GetTask() *boomer.Task {\n\tvar fn func()\n\n\tswitch ds.Type {\n\tcase dns.TypeA:\n\t\tfn = ds.doA\n\t}\n\n\treturn &boomer.Task{\n\t\tName: \"dns\",\n\t\tOnStart: func() {},\n\t\tOnStop: func() {},\n\t\tFn: fn,\n\t}\n}",
"func GetTask(tid, user_token string) (*Task, error) {\n\t// declarations\n\tvar start_time, end_time pq.NullTime\n\tvar exit_status sql.NullInt64\n\tvar output sql.NullString\n\n\t// initialize Task\n\ttask := Task{}\n\t// get task information\n\tif err := db.QueryRow(\"SELECT * FROM tasks WHERE tasks.id=$1\", tid).\n\t\tScan(&task.Id, &task.Gid, &start_time, &end_time, &task.Status,\n\t\t&exit_status, &output, &task.Patch); err != nil {\n\t\treturn nil, err\n\t}\n\t// set remaining fields\n\tif start_time.Valid {\n\t\ttask.Start_time = &start_time.Time\n\t}\n\tif end_time.Valid {\n\t\ttask.End_time = &end_time.Time\n\t}\n\tif exit_status.Valid {\n\t\ttask.Exit_status = exit_status.Int64\n\t}\n\tif output.Valid {\n\t\ttask.Output = output.String\n\t}\n\n\tgroup_task, _ := getGroupTask(task.Gid)\n\ttask.User = group_task.user\n\ttask.Project = group_task.project\n\ttask.Bot = group_task.bot\n\n\treturn &task, nil\n}",
"func (d *dispatcher) Get(state string) *Task {\n\ttask, ok := d.Tasks[state]\n\tif !ok {\n\t\treturn &Task{\n\t\t\tHandler: NotFoundHandler,\n\t\t}\n\t}\n\treturn task\n}",
"func GetTask(c common.Client, uri string) (*Task, error) {\n\tvar task Task\n\treturn &task, task.Get(c, uri, &task)\n}",
"func (v1 *V1) GetTask(w http.ResponseWriter, r *http.Request) {\n\ttaskID := chi.URLParam(r, \"taskID\")\n\tshouldDeleteTask := false\n\tdeleteParam := r.URL.Query().Get(\"delete\")\n\tif deleteParam == \"1\" {\n\t\tshouldDeleteTask = true\n\t}\n\n\ttask := v1.metaCrawlSvc.TaskByID(taskID)\n\tif task == nil {\n\t\tv1.responseErrorJSON(w, \"task not found\", 404)\n\t\treturn\n\t}\n\n\ttaskStatus := task.Status()\n\tswitch taskStatus {\n\tcase metacrawl.TaskInProgress:\n\t\tv1.responseJSON(w, \"task in progress\", 204)\n\t\treturn\n\tcase metacrawl.TaskCompleted:\n\t\tif shouldDeleteTask {\n\t\t\tv1.metaCrawlSvc.DeleteTaskByID(taskID)\n\t\t}\n\n\t\tv1.responseCSV(w, taskID, task.Render(), 200)\n\t}\n}",
"func (_Contract *ContractCallerSession) GetTask(i *big.Int) (struct {\n\tActive bool\n\tAssignment *big.Int\n\tProposalID *big.Int\n}, error) {\n\treturn _Contract.Contract.GetTask(&_Contract.CallOpts, i)\n}",
"func (t *Tasker) Get() *Task {\n\ttask := t.tasks.Pop()\n\tif task == nil {\n\t\treturn nil\n\t}\n\n\ttt := task.(*Task)\n\tlog.Printf(\"tasker get task: %v\\n\", tt)\n\treturn tt\n}",
"func (m *Master) GetTask(req *GetTaskReq, rsp *GetTaskRsp) error {\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\tfor k := range m.todoMapTask {\n\t\tif m.todoMapTask[k] == 0 {\n\t\t\trsp.Status = \"Task\"\n\t\t\trsp.Filename = m.files[k]\n\t\t\trsp.NReduce = m.nReduce\n\t\t\trsp.TaskID = k\n\t\t\tm.todoMapTask[k] = time.Now().Unix()\n\t\t\treturn nil\n\t\t}\n\t}\n\tif len(m.todoMapTask) != 0 {\n\t\trsp.Status = \"Wait\"\n\t\treturn nil\n\t}\n\n\tfor k := range m.todoReduceTask {\n\t\tif m.todoReduceTask[k] == 0 {\n\t\t\trsp.Status = \"Task\"\n\t\t\trsp.NReduce = m.nReduce\n\t\t\trsp.NMap = len(m.files)\n\t\t\trsp.TaskID = k\n\t\t\tm.todoReduceTask[k] = time.Now().Unix()\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tif len(m.todoReduceTask) != 0 {\n\t\trsp.Status = \"Wait\"\n\t\treturn nil\n\t} else {\n\t\trsp.Status = \"Exit\"\n\t\treturn nil\n\t}\n\n\treturn nil\n}",
"func (ts *TaskService) Get(reqdata *TaskGetRequest) (*TaskGetResponse, *http.Response, error) {\n\n\tu := fmt.Sprintf(\"tasks/%s\", reqdata.UUID)\n\n\tu, err := addOptions(u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := ts.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar result *TaskGetResponse\n\tresp, err := ts.client.Do(req, &result)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn result, resp, nil\n}",
"func (cl *RedisClient) GetTask() (*RedisTask, error) {\n\tval, err := cl.client.Keys(\"tasks:*\").Result()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar result *RedisTask\n\tvar key string\n\ttxf := func(tx *redis.Tx) error {\n\t\tresult = nil\n\t\tstate, err := tx.HGet(key, \"state\").Result()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif state == \"new\" {\n\t\t\tinputfile, err := cl.client.HGet(key, \"inputfile\").Result()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\toutfile, err := cl.client.HGet(key, \"outfile\").Result()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tresult = &RedisTask{}\n\t\t\tresult.InputFile = inputfile\n\t\t\tresult.OutFile = outfile\n\t\t\tresult.TaskName = key\n\t\t\t_, err = tx.HSet(key, \"state\", \"holded\").Result()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\tfor _, key = range val {\n\t\terr := cl.client.Watch(txf, key)\n\t\tif err == redis.TxFailedErr {\n\t\t\treturn nil, err\n\t\t}\n\t\tif result != nil {\n\t\t\treturn result, nil\n\t\t}\n\t}\n\n\treturn nil, nil\n}",
"func (s *state) GetTask(exID string) (*mesos.Task, error) {\n\t// Check if task is in Launched Tasks list\n\tfor _, t := range s.st.GetTasks.LaunchedTasks {\n\t\tif s.isMatchingTask(&t, exID) {\n\t\t\treturn &t, nil\n\t\t}\n\t}\n\n\t// Check if task is in Queued Tasks list\n\tfor _, t := range s.st.GetTasks.QueuedTasks {\n\t\tif s.isMatchingTask(&t, exID) {\n\t\t\treturn &t, nil\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"unable to find task matching executor id %s\", exID)\n}",
"func (_Contract *ContractSession) GetTask(i *big.Int) (struct {\n\tActive bool\n\tAssignment *big.Int\n\tProposalID *big.Int\n}, error) {\n\treturn _Contract.Contract.GetTask(&_Contract.CallOpts, i)\n}",
"func (ctrl *TaskController) GetTask(w http.ResponseWriter, r *http.Request) {\n\ttaskId := ParamAsString(\"id\", r)\n\tlogrus.Println(\"task : \", taskId)\n\n\ttask, err := ctrl.taskDao.Get(taskId)\n\tif err != nil {\n\t\tlogrus.Error(err)\n\t\tSendJSONError(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tlogrus.Println(\"task : \", task)\n\tSendJSONOk(w, task)\n}",
"func (_Contract *ContractCaller) GetTask(opts *bind.CallOpts, i *big.Int) (struct {\n\tActive bool\n\tAssignment *big.Int\n\tProposalID *big.Int\n}, error) {\n\tvar out []interface{}\n\terr := _Contract.contract.Call(opts, &out, \"getTask\", i)\n\n\toutstruct := new(struct {\n\t\tActive bool\n\t\tAssignment *big.Int\n\t\tProposalID *big.Int\n\t})\n\tif err != nil {\n\t\treturn *outstruct, err\n\t}\n\n\toutstruct.Active = *abi.ConvertType(out[0], new(bool)).(*bool)\n\toutstruct.Assignment = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int)\n\toutstruct.ProposalID = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int)\n\n\treturn *outstruct, err\n\n}",
"func (svc *Service) Get(ctx context.Context, id uuid.UUID) (*domain.Task, error) {\n\tsvc.taskRequestPolled.Inc()\n\treturn svc.taskGateway.FindByID(ctx, id)\n}",
"func (t *TaskData) GetTask() Task {\n\treturn Task{\n\t\tLocation: t.Location,\n\t\tDestination: t.Destination,\n\t\tAppID: t.AppID,\n\t\tRequestTime: t.RequestTime,\n\t}\n}",
"func GetTask(db *sql.DB, taskuuid string) (*Task, error) {\n\tdbLogger.Debug(\"GetTask...\")\n\tvar task = new(Task)\n\tvar err error\n\tvar stmt *sql.Stmt\n\n\tif err := db.Ping(); err != nil {\n\t\tdbLogger.Fatal(ERROR_DB_NOT_CONNECTED)\n\t\treturn nil, errors.New(ERROR_DB_NOT_CONNECTED)\n\t}\n\n\tstmt, err = db.Prepare(\"SELECT rowid, taskuuid, useruuid, keyword, bc_txuuid, type, state, payload FROM task WHERE taskuuid = ? and deleted = 0\")\n\tif err != nil {\n\t\tdbLogger.Errorf(\"Failed preparing statement: %v\", err)\n\t\treturn nil, fmt.Errorf(ERROR_DB_PREPARED + \": %v\", err)\n\t}\n\tdefer stmt.Close()\n\n\tif err := stmt.QueryRow(taskuuid).Scan(&task.RowID, &task.TaskUUID, &task.UserUUID, &task.Keyword, &task.BC_txuuid, &task.Type, &task.State, &task.Payload); err != nil {\n\t\tdbLogger.Errorf(\"Failed getting task by taskuuid %s: %v\", taskuuid, err)\n\t\treturn nil, fmt.Errorf(ERROR_DB_QUERY + \": %v\", err)\n\t}\n\tdbLogger.Debugf(\"Get task by taskuuid %s: \\n%#v\", taskuuid, *task)\n\n\treturn task, nil\n}",
"func getTask(c *cli.Context) (string, error) {\n\tif c.NArg() == 0 {\n\t\treturn \"\", fmt.Errorf(\"no task specified\")\n\t}\n\n\treturn c.Args()[0], nil\n}",
"func GetTask() *Task {\n\treturn taskPool.Get().(*Task)\n}",
"func (s *Storage) GetTask(id uint) (*todopb.TaskResponse, error) {\n\trow := s.db.QueryRow(\"SELECT * FROM tasks WHERE id=$1\", id)\n\n\ttask, err := scan(row)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn task, nil\n}",
"func (db *DynamoDB) GetTask(ctx context.Context, req *tes.GetTaskRequest) (*tes.Task, error) {\n\tvar task *tes.Task\n\tvar response *dynamodb.GetItemOutput\n\tvar err error\n\n\tswitch req.View {\n\tcase tes.TaskView_MINIMAL:\n\t\tresponse, err = db.getMinimalView(ctx, req.Id)\n\tcase tes.TaskView_BASIC:\n\t\tresponse, err = db.getBasicView(ctx, req.Id)\n\tcase tes.TaskView_FULL:\n\t\tresponse, err = db.getFullView(ctx, req.Id)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif response.Item == nil {\n\t\treturn nil, grpc.Errorf(codes.NotFound, fmt.Sprintf(\"%v: taskID: %s\", errNotFound.Error(), req.Id))\n\t}\n\n\terr = dynamodbattribute.UnmarshalMap(response.Item, &task)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to DynamoDB unmarshal Task, %v\", err)\n\t}\n\n\treturn task, nil\n}",
"func (db *DynamoDB) GetTask(ctx context.Context, req *tes.GetTaskRequest) (*tes.Task, error) {\n\tvar task *tes.Task\n\tvar response *dynamodb.GetItemOutput\n\tvar err error\n\n\tswitch req.View {\n\tcase tes.TaskView_MINIMAL:\n\t\tresponse, err = db.getMinimalView(ctx, req.Id)\n\tcase tes.TaskView_BASIC:\n\t\tresponse, err = db.getBasicView(ctx, req.Id)\n\tcase tes.TaskView_FULL:\n\t\tresponse, err = db.getFullView(ctx, req.Id)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif response.Item == nil {\n\t\treturn nil, tes.ErrNotFound\n\t}\n\n\terr = dynamodbattribute.UnmarshalMap(response.Item, &task)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to DynamoDB unmarshal Task, %v\", err)\n\t}\n\n\treturn task, nil\n}",
"func (domain *Domain) GetTask(name string) (*Task, error) {\n\t// determine task\n\tdomain.TasksX.RLock()\n\ttask, ok := domain.Tasks[name]\n\tdomain.TasksX.RUnlock()\n\n\tif !ok {\n\t\treturn nil, errors.New(\"task not found\")\n\t}\n\n\t// success\n\treturn task, nil\n}",
"func GetTask(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *TaskState, opts ...pulumi.ResourceOption) (*Task, error) {\n\tvar resource Task\n\terr := ctx.ReadResource(\"google-native:cloudtasks/v2:Task\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func GetTask(id bson.ObjectId) (*Task, error) {\n\ttask := Task{}\n\terr := sess.DB(\"\").C(taskC).FindId(id).One(&task)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &task, nil\n}",
"func (a *Client) GetTask(params *GetTaskParams) (*GetTaskOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetTaskParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getTask\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/TaskService/Tasks/{identifier}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetTaskReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetTaskOK), nil\n\n}",
"func (p *taskController) GetTask(c echo.Context) error {\n\tid, err := strconv.Atoi(c.Param(\"id\"))\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, \"Task ID must be int\")\n\t}\n\tctx := c.Request().Context()\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\ttask, err := p.TaskUseCase.GetTask(ctx, id)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusNotFound, \"Task does not exist.\")\n\t}\n\treturn c.JSON(http.StatusOK, task)\n}",
"func GetTask(id int) (Task, error) {\n\tvar t Task\n\tvar jsonTask []byte\n\n\terr := db.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket(taskBucket)\n\t\tjsonTask = b.Get(itob(id))\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn t, err\n\t}\n\n\terr = t.ReadFromJSON(jsonTask)\n\tif err != nil {\n\t\treturn t, err\n\t}\n\n\treturn t, nil\n\n}",
"func (dtm *DfgetTaskManager) Get(ctx context.Context, clientID, taskID string) (dfgetTask *types.DfGetTask, err error) {\n\treturn dtm.getDfgetTask(clientID, taskID)\n}",
"func newTask() task {\n\treturn task{}\n}",
"func GetTask(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *TaskState, opts ...pulumi.ResourceOption) (*Task, error) {\n\tvar resource Task\n\terr := ctx.ReadResource(\"aws:datasync/task:Task\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (service *Service) Task(id string) *Task {\n\treturn service.Worker.Task(id).(*Task)\n}",
"func GetEventTask(etid int64) (*EventTask, error) {\n\tvar hook_id sql.NullInt64\n\ttask := EventTask{}\n\n\tif err := db.QueryRow(\"SELECT * FROM event_tasks WHERE id=$1\", etid).\n\t\tScan(&task.Id, &task.Name, &task.Status, &task.Event,\n\t\t&task.Token, &hook_id); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif hook_id.Valid {\n\t\ttask.HookId = hook_id.Int64\n\t}\n\n\tgroup_task, err := getGroupTask(task.Id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttask.User = group_task.user\n\ttask.Project = group_task.project\n\ttask.Bot = group_task.bot\n\n\treturn &task, nil\n}",
"func (cm *Docker) GetTask(id string) (*entity.Task, bool) {\n\tcm.lock.Lock()\n\tt, ok := cm.tasks[id]\n\tcm.lock.Unlock()\n\treturn t, ok\n}",
"func (s *Service) GetTask(taskKey string) (*Task, error) {\n\tfor _, task := range s.Tasks {\n\t\tif task.Key == taskKey {\n\t\t\ttask.serviceName = s.Name\n\t\t\treturn task, nil\n\t\t}\n\t}\n\treturn nil, &TaskNotFoundError{\n\t\tTaskKey: taskKey,\n\t\tServiceName: s.Name,\n\t}\n}",
"func (d *DeploymentRequest) GetTask() string {\n\tif d == nil || d.Task == nil {\n\t\treturn \"\"\n\t}\n\treturn *d.Task\n}",
"func GetTask(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) (bool, bool, string, int) {\n\n\t// declare an argument structure.\n\targs := GetTaskArgs{}\n\n\t// fill in the argument(s).\n\n\t// declare a reply structure.\n\treply := GetTaskReply{}\n\n\t// send the RPC request, wait for the reply.\n\tcallSuccess := call(\"Master.GetTask\", &args, &reply)\n\ttaskSuccess := false\n\tif (callSuccess) {\n\t\ttaskSuccess = DoTask(reply, mapf, reducef);\n\t}\n\n\treturn callSuccess, taskSuccess, reply.TaskType, reply.TaskNumber;\n}",
"func (c *Client) GetTask(ctx context.Context, in *todopb.TaskQuery, opts ...grpc.CallOption) (*todopb.TaskResponse, error) {\n\treturn c.client.GetTask(ctx, in, opts...)\n}",
"func (m *Master) GetTask(_ *ExampleArgs, reply *GetTaskReply) error {\n\tswitch m.masterState {\n\tcase newMaster:\n\t\tfor i, task := range m.mapTask {\n\t\t\tif task.State == initialState {\n\t\t\t\treply.Task.Type_ = task.Type_\n\t\t\t\treply.Task.Filename = task.Filename\n\t\t\t\treply.Task.Id = task.Id\n\t\t\t\treply.Task.NReduce = task.NReduce\n\t\t\t\treply.Flag = 0\n\t\t\t\treply.Task.State = task.State\n\n\t\t\t\tm.mapTask[i].State = inProgress\n\t\t\t\tm.mapTask[i].Time = time.Now()\n\t\t\t\t//reply.Task.State=m.mapTask[i].State\n\n\t\t\t\treturn nil\n\t\t\t} else if task.State == inProgress && time.Now().Sub(m.mapTask[i].Time) > time.Duration(5)*time.Second {\n\t\t\t\treply.Task.Type_ = task.Type_\n\t\t\t\treply.Task.Filename = task.Filename\n\t\t\t\treply.Task.Id = task.Id\n\t\t\t\treply.Task.NReduce = task.NReduce\n\t\t\t\treply.Task.State = task.State\n\t\t\t\treply.Flag = 0\n\n\t\t\t\tm.mapTask[i].State = inProgress\n\t\t\t\tm.mapTask[i].Time = time.Now()\n\t\t\t\t//reply.Task.State=m.mapTask[i].State\n\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\treply.Flag = 1 // map not finished but in progress\n\tcase completeMap:\n\t\tfor i, task := range m.reduceTask {\n\t\t\tif task.State == initialState {\n\t\t\t\treply.Task.Type_ = task.Type_\n\t\t\t\treply.Task.Filename = task.Filename\n\t\t\t\treply.Task.Id = task.Id\n\t\t\t\treply.Task.NReduce = task.NReduce\n\t\t\t\treply.Flag = 0\n\t\t\t\treply.Task.Files = task.Files\n\t\t\t\treply.Task.State = task.State\n\n\t\t\t\tm.reduceTask[i].State = inProgress\n\t\t\t\tm.reduceTask[i].Time = time.Now()\n\t\t\t\t//reply.Task.State=m.mapTask[i].State\n\n\t\t\t\treturn nil\n\t\t\t} else if task.State == inProgress && time.Now().Sub(m.reduceTask[i].Time) > time.Duration(5)*time.Second {\n\t\t\t\treply.Task.Type_ = task.Type_\n\t\t\t\treply.Task.Filename = task.Filename\n\t\t\t\treply.Task.Id = task.Id\n\t\t\t\treply.Task.NReduce = task.NReduce\n\t\t\t\treply.Flag = 0\n\t\t\t\treply.Task.Files = task.Files\n\t\t\t\treply.Task.State = task.State\n\n\t\t\t\tm.reduceTask[i].State = inProgress\n\t\t\t\tm.reduceTask[i].Time = time.Now()\n\t\t\t\t//reply.Task.State=m.mapTask[i].State\n\n\t\t\t\treturn nil\n\n\t\t\t}\n\t\t}\n\t\treply.Flag = 1 // reduce not finished but in progress\n\tcase completeReduce:\n\t\treply.Flag = 2 // all task have been finished\n\n\t}\n\n\treturn nil\n}",
"func (m *Master) GetTask(args *GetTaskArgs, reply *GetTaskReply) error {\n\n\tif !m.Done() {\n\t\t//fmt.Println(m.isAllMapCompleted())\n\t\tif !m.isAllMapCompleted() {\n\t\t\tm.Mux.Lock()\n\t\t\tfor i := 0; i < m.M; i += 1 {\n\t\t\t\tif m.IsIdleMaps[i] == 0 {\n\t\t\t\t\tm.IsIdleMaps[i] = 1\n\t\t\t\t\treply.TaskId = i\n\t\t\t\t\treply.TaskType = 1\n\t\t\t\t\tinput := []string{m.MapTasks[i]}\n\t\t\t\t\treply.Input = input\n\t\t\t\t\treply.R = m.R\n\t\t\t\t\tm.MapTasksTime[i] = time.Now().Unix()\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tm.Mux.Unlock()\n\t\t} else {\n\t\t\tm.Mux.Lock()\n\t\t\tfor i := 0; i < m.R; i += 1 {\n\t\t\t\tif m.IsIdleReduces[i] == 0 {\n\t\t\t\t\tm.IsIdleReduces[i] = 1\n\t\t\t\t\treply.TaskId = i\n\t\t\t\t\treply.TaskType = 2\n\t\t\t\t\treply.Input = m.ReduceTasks[i]\n\t\t\t\t\tm.ReduceTasksTime[i] = time.Now().Unix()\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tm.Mux.Unlock()\n\t\t}\n\t}\n\treturn nil\n}",
"func (s *StubTodoStore) GetTask(projectID, taskName string) model.Task {\n\tfor _, t := range s.Tasks {\n\t\tif t.Name == taskName && t.ProjectID == projectID {\n\t\t\treturn wrapStubTask(taskName)\n\t\t}\n\t}\n\treturn model.Task{}\n}",
"func (r Ref) GetTask() string {\n\tstart := strings.LastIndex(string(r), \":\")\n\ttask := string(r)[start+1:]\n\n\tif strings.HasPrefix(task, \"//\") {\n\t\t// there is no task because task cannot start with '//'\n\t\treturn \"\"\n\t}\n\treturn task\n}",
"func GetTask(id int) (Task, error) {\n\tpath := fmt.Sprintf(\"tasks/%d\", id)\n\tres, err := makeRequest(http.MethodGet, path, nil)\n\tif err != nil {\n\t\treturn Task{}, err\n\t}\n\n\treturn decodeTask(res.Body)\n}",
"func (g *getRunTask) Get() (runtask *v1alpha1.RunTask, err error) {\n\tvar allStrategies []runTaskGetterFn\n\tif g.currentStrategy != nil {\n\t\tallStrategies = append(allStrategies, g.currentStrategy)\n\t}\n\n\tif len(g.oldStrategies) != 0 {\n\t\tallStrategies = append(allStrategies, g.oldStrategies...)\n\t}\n\n\tif len(allStrategies) == 0 {\n\t\terr = fmt.Errorf(\"no strategies to get runtask: failed to get runtask '%s'\", g.taskName)\n\t\treturn\n\t}\n\n\tfor _, s := range allStrategies {\n\t\truntask, err = s(g.getRunTaskSpec)\n\t\tif err == nil {\n\t\t\treturn\n\t\t}\n\n\t\terr = errors.Wrap(err, fmt.Sprintf(\"failed to get runtask '%s'\", g.taskName))\n\t\tklog.Warningf(\"%s\", err)\n\t}\n\n\t// at this point, we have a real error we can not recover from\n\terr = fmt.Errorf(\"exhausted all strategies to get runtask: failed to get runtask '%s'\", g.taskName)\n\treturn\n}",
"func (t *TaskStore) getTask(id int64) *Task {\n\tif id <= 0 {\n\t\t// Invalid ID.\n\t\treturn nil\n\t}\n\tif _, ok := t.delTasks[id]; ok {\n\t\t// Already deleted in the temporary cache.\n\t\treturn nil\n\t}\n\tif t, ok := t.tmpTasks[id]; ok {\n\t\t// Sitting in cache.\n\t\treturn t\n\t}\n\tif t, ok := t.tasks[id]; ok {\n\t\t// Sitting in the main index.\n\t\treturn t\n\t}\n\treturn nil\n}",
"func (cm *Docker) MustGetTask(id string) *entity.Task {\n\tn, ok := cm.GetTask(id)\n\tif !ok {\n\t\tcollector := collector.NewDocker(cm.client, id)\n\t\tn = entity.NewTask(id, collector)\n\t\tcm.lock.Lock()\n\t\tcm.tasks[id] = n\n\t\tcm.lock.Unlock()\n\t}\n\treturn n\n}",
"func (e *Executor) GetTask(name string) (task Task, ok bool) {\n\te.lock.RLock()\n\tfor i := len(e.tasks) - 1; i >= 0; i-- {\n\t\tif e.tasks[i].Job.Name() == name {\n\t\t\ttask = e.tasks[i].Task()\n\t\t\tok = true\n\t\t\tbreak\n\t\t}\n\t}\n\te.lock.RUnlock()\n\treturn\n}",
"func GetOriginTask(ctx context.Context, task *swarmingAPI.SwarmingRpcsTaskResult, swarmSvc *swarmingAPI.Service) (*swarmingAPI.SwarmingRpcsTaskResult, error) {\n\t// If the task was deduped, then the invocation associated with it is just the one associated\n\t// to the task from which it was deduped.\n\tfor task.DedupedFrom != \"\" {\n\t\tvar err error\n\t\tif task, err = GetSwarmingTask(ctx, task.DedupedFrom, swarmSvc); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn task, nil\n}",
"func (s *RefreshImpl) Task(ctx context.Refresh) *taskDTO.PostTaskRequest {\n\tdto := taskDTO.PostTaskRequest{}\n\tdto.MessageID = constvalue.RefreshTaskID\n\tdto.Name = \"Refresh Enclosure\"\n\tdto.Description = \"Refresh enclosure's settings and component.\"\n\tdto.CreatedByName = \"Enclosure Service\"\n\tdto.CreatedByURI = \"/promise/v1/enclosure\"\n\tdto.TargetName = ctx.GetEnclosure().Name\n\tdto.TargetURI = base.ToEnclosureURI(ctx.GetEnclosure().ID)\n\tfor _, v := range s.sub {\n\t\tstep := taskDTO.PostTaskStepRequest{}\n\t\tstep.MessageID = v.MessageID()\n\t\tstep.Name = v.Name()\n\t\tstep.Description = v.Description()\n\t\tstep.ExpectedExecutionMs = v.ExpectedExecutionMs()\n\t\tdto.TaskSteps = append(dto.TaskSteps, step)\n\t}\n\treturn &dto\n}",
"func GetOneTimeTask(otid int64) (*OneTimeTask, error) {\n\ttask := OneTimeTask{}\n\n\tif err := db.QueryRow(\"SELECT * FROM onetime_tasks WHERE id=$1\", otid).\n\t\tScan(&task.Id, &task.Name, &task.Status, &task.Exec_time); err != nil {\n\t\treturn nil, err\n\t}\n\n\tgroup_task, err := getGroupTask(task.Id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttask.User = group_task.user\n\ttask.Project = group_task.project\n\ttask.Bot = group_task.bot\n\n\treturn &task, nil\n}",
"func (m *Master) GetTask(args ExampleArgs, reply *Task) error {\n\tif len(m.completedTasks[0]) < m.M {\n\t\tselect {\n\t\tcase reply1 := <-m.idleTasks[0]:\n\t\t\treply.Type = reply1.Type\n\t\t\treply.Filename = reply1.Filename\n\t\t\treply.NReduce = reply1.NReduce\n\t\t\treply.TaskNum = reply1.TaskNum\n\t\t\treply1.StartTime = time.Now().UnixNano()\n\t\t\treply.StartTime = reply1.StartTime\n\t\t\tm.mu.Lock()\n\t\t\tm.inProgress[0][reply1] = true\n\t\t\tm.mu.Unlock()\n\t\t\tgo m.waitForTask(reply1)\n\t\tdefault:\n\t\t\treply.EmptyIdle = true\n\t\t}\n\t} else if len(m.completedTasks[1]) < m.R {\n\t\tselect {\n\t\tcase reply1 := <-m.idleTasks[1]:\n\t\t\treply.Type = reply1.Type\n\t\t\treply.Filename = reply1.Filename\n\t\t\treply.NReduce = reply1.NReduce\n\t\t\treply.TaskNum = reply1.TaskNum\n\t\t\treply1.StartTime = time.Now().UnixNano()\n\t\t\treply.StartTime = reply1.StartTime\n\t\t\tm.mu.Lock()\n\t\t\tm.inProgress[1][reply1] = true\n\t\t\tm.mu.Unlock()\n\t\t\tgo m.waitForTask(reply1)\n\t\tdefault:\n\t\t\treply.EmptyIdle = true\n\t\t}\n\t} else {\n\t\treply.Type = \"done\"\n\t}\n\treturn nil\n}",
"func (service *Service) Task(id string) *Task {\n\tif t := service.Worker.Task(id); t != nil {\n\t\treturn t.(*Task)\n\t}\n\treturn nil\n}",
"func (c *Coordinator) GetTask(args *GetArgs, reply *GetReply) error {\n\tfmt.Printf(\"Worker %v requesting for task\\n\", args.WorkerId)\n\t// If map task available, send map task\n\n\tc.mapLock.Lock()\n\tfor id, _ := range c.availableMapTasks {\n\t\tfmt.Printf(\"Map Task %v given to worker %v\\n\", id, args.WorkerId)\n\n\t\t// Populate reply\n\t\treply.TaskType = 0\n\t\treply.TaskNum = id\n\t\treply.Filename = c.mapTasks[id].filename\n\t\treply.Partitions = len(c.reduceTasks)\n\n\t\t// Fill in maptask details\n\t\tc.mapTasks[id].worker = args.WorkerId\n\n\t\t// Remove from available\n\t\tdelete(c.availableMapTasks, id)\n\n\t\t// Run waiting thread\n\t\tgo waitCheck(c, 0, id)\n\n\t\tc.mapLock.Unlock()\n\n\t\treturn nil\n\t}\n\tc.mapLock.Unlock()\n\n\t// All map tasks not finished yet\n\tif c.mapDoneTasks != len(c.mapTasks) {\n\t\tfmt.Printf(\"No tasks available for worker %v\\n\", args.WorkerId)\n\t\treply.TaskType = 2\n\t\treturn nil\n\t}\n\n\tc.reduceLock.Lock()\n\t// If all map tasks over and reduce task available send reduce task\n\tfor id, _ := range c.availableReduceTasks {\n\t\tfmt.Printf(\"Reduce Task %v given to worker %v\\n\", id, args.WorkerId)\n\n\t\t// Populate reply\n\t\treply.TaskType = 1\n\t\treply.TaskNum = id\n\t\treply.Partitions = len(c.mapTasks)\n\n\t\t// Fill in reduce details\n\t\tc.reduceTasks[id].worker = args.WorkerId\n\n\t\t// Remove from available\n\t\tdelete(c.availableReduceTasks, id)\n\n\t\t// Run waiting thread\n\t\tgo waitCheck(c, 1, id)\n\n\t\tc.reduceLock.Unlock()\n\n\t\treturn nil\n\t}\n\tc.reduceLock.Unlock()\n\n\tif c.reduceDoneTasks != len(c.reduceTasks) {\n\t\t// No task available right now\n\t\tfmt.Printf(\"No tasks available for worker %v\\n\", args.WorkerId)\n\t\treply.TaskType = 2\n\t\treturn nil\n\t} else {\n\t\t// No task available right now\n\t\tfmt.Printf(\"All tasks completed, quiting worker %v\\n\", args.WorkerId)\n\t\treply.TaskType = 3\n\t\treturn nil\n\t}\n}",
"func (tasks *Tasks) GetTask(name string) (task *Task, err error) {\n\tfor _, task := range tasks.Tasks {\n\t\tif task.Name == name {\n\t\t\treturn task, nil\n\t\t}\n\t}\n\n\treturn nil, fmt.Errorf(\"No Task with name %s\", name)\n}",
"func (s Step) Task(ctx context.Context) Task {\n\treturn GetModelContext(ctx).Nodes.MustLoadTask(s.TaskID)\n}",
"func (c *jxTasks) Get(name string, options v1.GetOptions) (result *v1alpha1.JxTask, err error) {\n\tresult = &v1alpha1.JxTask{}\n\terr = c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"jxtasks\").\n\t\tName(name).\n\t\tVersionedParams(&options, scheme.ParameterCodec).\n\t\tDo().\n\t\tInto(result)\n\treturn\n}",
"func (c *iperfTasks) Get(name string, options v1.GetOptions) (result *alpha1.IperfTask, err error) {\n\tresult = &alpha1.IperfTask{}\n\terr = c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"iperftasks\").\n\t\tName(name).\n\t\tVersionedParams(&options, scheme.ParameterCodec).\n\t\tDo().\n\t\tInto(result)\n\treturn\n}",
"func GetInstantTask(itid int64) (*InstantTask, error) {\n\ttask := InstantTask{}\n\n\tif err := db.QueryRow(\"SELECT * FROM instant_tasks WHERE id=$1\", itid).\n\t\tScan(&task.Id); err != nil {\n\t\treturn nil, err\n\t}\n\n\tgroup_task, err := getGroupTask(task.Id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttask.User = group_task.user\n\ttask.Project = group_task.project\n\ttask.Bot = group_task.bot\n\n\treturn &task, nil\n}",
"func (w *worker) requestTask() Task {\n\n\targs := TaskArgs{}\n\targs.WorkerId = w.id\n\treply := TaskReply{}\n\n\tif ok := call(\"Master.GetOneTask\", &args, &reply); !ok {\n\t\tlog.Println(\"Failed to get the Task\")\n\n\t\t// could not find the master process\n\t\t// possible improvements:\n\t\t//\t\t1. Add a retry with a delay, it could be due to network issue\n\t\t//\t\t2. Send graceful termination from `master` to all the registered\n\t\t//\t\t `worker`(s) when all the tasks are completed\n\t\tos.Exit(1)\n\t}\n\tlog.Printf(\"Worker Task: %+v\\n\", reply.Task)\n\treturn *reply.Task\n}",
"func (d *Deployment) GetTask() string {\n\tif d == nil || d.Task == nil {\n\t\treturn \"\"\n\t}\n\treturn *d.Task\n}",
"func (i *TaskRegisterUpdater) StartTask(ctx context.Context, action string, age time.Duration) (models.Task, error) {\n\n\treturn i.repository.GetTask(ctx, action, age)\n}",
"func NewTask() *Task {\n\treturn &Task{}\n}",
"func GetTaskConfig(name, action string, conf *config.ComposeConfig) (types.TaskConfig, error) {\n\tact, err := getAction(action, name, conf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn types.NewTaskConfig(act.name, conf, act.deps, NewTask(act.Run, act.Stop)), nil\n}",
"func GetSingleTask(w http.ResponseWriter, r *http.Request) {\n\tparams := mux.Vars(r)\n\n\tif params[\"id\"] == \"\" {\n\t\thttp.Error(w, http.StatusText(400), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\ttaskID := bson.ObjectIdHex(params[\"id\"])\n\n\ttask, err := repository.GetSingleTask(taskID)\n\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(404), http.StatusNotFound)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\n\tjson.NewEncoder(w).Encode(task)\n}",
"func (c *clustermgrClient) GetMigrateTask(ctx context.Context, taskType proto.TaskType, key string) (task *proto.MigrateTask, err error) {\n\tval, err := c.client.GetKV(ctx, key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = json.Unmarshal(val.Value, &task)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif task.TaskType != taskType {\n\t\treturn nil, errcode.ErrIllegalTaskType\n\t}\n\treturn\n}",
"func getOrCreateMigrationTask(kind, name, openebsNamespace string, r Migrator,\n\tclient openebsclientset.Interface) (*v1Alpha1API.MigrationTask, error) {\n\tvar mtaskObj *v1Alpha1API.MigrationTask\n\tvar err error\n\tmtaskObj = buildMigrationTask(kind, name, r)\n\t// the below logic first tries to fetch the CR if not found\n\t// then creates a new CR\n\tmtaskObj1, err1 := client.OpenebsV1alpha1().\n\t\tMigrationTasks(openebsNamespace).\n\t\tGet(context.TODO(), mtaskObj.Name, metav1.GetOptions{})\n\tif err1 != nil {\n\t\tif k8serror.IsNotFound(err1) {\n\t\t\tmtaskObj, err = client.OpenebsV1alpha1().\n\t\t\t\tMigrationTasks(openebsNamespace).Create(context.TODO(),\n\t\t\t\tmtaskObj, metav1.CreateOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t} else {\n\t\t\treturn nil, err1\n\t\t}\n\t} else {\n\t\tmtaskObj = mtaskObj1\n\t}\n\n\tif mtaskObj.Status.StartTime.IsZero() {\n\t\tmtaskObj.Status.Phase = v1Alpha1API.MigrateStarted\n\t\tmtaskObj.Status.StartTime = metav1.Now()\n\t}\n\n\tmtaskObj.Status.MigrationDetailedStatuses = []v1Alpha1API.MigrationDetailedStatuses{}\n\tmtaskObj, err = client.OpenebsV1alpha1().\n\t\tMigrationTasks(openebsNamespace).\n\t\tUpdate(context.TODO(), mtaskObj, metav1.UpdateOptions{})\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to update migratetask\")\n\t}\n\treturn mtaskObj, nil\n}",
"func forwardGetTask(restAPIaddress string, guid string) (string, error) {\n\turl := \"http://\" + restAPIaddress + \"/api/v1.0/task/\" + guid\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\tsessionData, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(sessionData), err\n}",
"func (context Context) CreateTask(task Task) (result Task, err error) {\n\n\t// Validate that the job exists and is running.\n\tvar job Job\n\tjob, err = context.GetJobByID(task.Job)\n\tif err != nil && err != ErrNotFound {\n\t\terr = errors.Wrap(err, \"error while trying to access the referenced job\")\n\t\treturn\n\t} else if err == ErrNotFound || job.Status != JobRunning {\n\t\terr = errors.Wrapf(ErrBadInput,\n\t\t\t\"the referenced objective \\\"%s\\\" does not exist or is running\", task.Job)\n\t}\n\n\t// Validate that the models exist and are active.\n\tvar found bool\n\tfor i := range job.Models {\n\t\tif task.Model == job.Models[i] {\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif found == false {\n\t\terr = errors.Wrapf(ErrBadInput,\n\t\t\t\"the referenced model \\\"%s\\\" does not appear in the models list of the parent job \\\"%s\\\"\",\n\t\t\ttask.Model, job.ID)\n\t}\n\n\t// Give default values to some fields. Copy some from the job.\n\ttask.ObjectID = bson.NewObjectId()\n\ttask.User = job.User\n\ttask.Dataset = job.Dataset\n\ttask.Objective = job.Objective\n\ttask.AltObjectives = job.AltObjectives\n\ttask.CreationTime = time.Now()\n\ttask.Status = TaskScheduled\n\ttask.Stage = TaskStageBegin\n\ttask.StageTimes = TaskStageIntervals{}\n\ttask.StageDurations = TaskStageDurations{}\n\ttask.RunningDuration = 0\n\ttask.Quality = 0.0\n\ttask.AltQualities = make([]float64, len(task.AltObjectives))\n\n\t// Get next ID.\n\tc := context.Session.DB(context.DBName).C(\"tasks\")\n\tquery := bson.M{\"job\": bson.M{\"$eq\": task.Job}}\n\tvar resultSize int\n\tresultSize, err = c.Find(query).Count()\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"mongo find failed\")\n\t\treturn\n\t}\n\ttask.ID = fmt.Sprintf(\"%s/%010d\", task.Job.Hex(), resultSize+1)\n\n\terr = c.Insert(task)\n\tif err != nil {\n\t\tlastError := err.(*mgo.LastError)\n\t\tif lastError.Code == 11000 {\n\t\t\terr = ErrIdentifierTaken\n\t\t\treturn\n\t\t}\n\t\terr = errors.Wrap(err, \"mongo insert failed\")\n\t\treturn\n\t}\n\n\treturn task, nil\n\n}",
"func (worker *Worker) Task(taskID string) Task {\n\tworker.Lock()\n\tdefer worker.Unlock()\n\n\tfor _, task := range worker.tasks {\n\t\tif task.ID() == taskID {\n\t\t\treturn task\n\t\t}\n\t}\n\treturn nil\n}",
"func (this *TodoList) GetTask(pID string) (rFound *Task) {\n\tif this != nil {\n\t\tfor _, zTask := range this.Tasks {\n\t\t\tif zTask.Id == pID {\n\t\t\t\treturn zTask\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}",
"func (s *Service) GetTask(filter filters.Args) ([]swarm.Task, error) {\n\ttasks, err := s.DockerClient.TaskList(context.Background(), types.TaskListOptions{Filters: filter})\n\n\tif err != nil {\n\t\treturn []swarm.Task{}, err\n\t}\n\n\treturn tasks, nil\n}",
"func (tp *ThreadPool) getTask() Task {\n\tvar returnIdleTask = true\n\n\t// Check if tasks should be stopped\n\n\ttp.workerMapLock.Lock()\n\tif tp.workerKill > 0 {\n\t\ttp.workerKill--\n\t\ttp.workerMapLock.Unlock()\n\t\treturn nil\n\n\t} else if tp.workerKill == -1 {\n\n\t\t// Check for special worker kill value which is used when workers should\n\t\t// be killed when no more tasks are available.\n\n\t\treturnIdleTask = false\n\t}\n\ttp.workerMapLock.Unlock()\n\n\t// Check if there is a task available\n\n\ttp.queueLock.Lock()\n\ttask := tp.queue.Pop()\n\ttp.queueLock.Unlock()\n\n\tif task != nil {\n\t\treturn task\n\t}\n\n\ttp.RegulationLock.Lock()\n\n\t// Reset too many flag\n\n\tif tp.tooManyTriggered && tp.TooManyThreshold > tp.queue.Size() {\n\t\ttp.tooManyTriggered = false\n\t}\n\n\t// Check too few\n\n\tif !tp.tooFewTriggered && tp.TooFewThreshold >= tp.queue.Size() {\n\t\ttp.tooFewTriggered = true\n\t\ttp.TooFewCallback()\n\t}\n\n\ttp.RegulationLock.Unlock()\n\n\tif returnIdleTask {\n\n\t\t// No new task available return idle task\n\n\t\treturn &idleTask{tp}\n\t}\n\n\treturn nil\n}",
"func getTask(w http.ResponseWriter, r *http.Request){\n\tvars := mux.Vars(r)\n\ttaskID, err :=strconv.Atoi(vars[\"id\"])\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"Invalid ID\")\n\t\treturn\n\t}\n\n\t//Se busca entre las tasks el ID solicitado y luego se muestra en forma de JSON\n\tfor _, task := range tasks {\n\t\tif task.ID == taskID {\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tjson.NewEncoder(w).Encode(task)\n\t\t}\n\t}\n}",
"func NewTask(action Work) Task {\n\treturn &task{\n\t\taction: action,\n\t\tdone: make(signal, 1),\n\t\tcancel: make(signal, 1),\n\t}\n}",
"func (s *Scavenger) newTask(info *p.TaskListInfo) executor.Task {\n\treturn &executorTask{\n\t\ttaskListInfo: *info,\n\t\tscvg: s,\n\t}\n}",
"func (s *InMemoryTasksStore) Get(id uuid.UUID) *task.Task {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\treturn s.tasks[id.String()]\n}",
"func getTaskByID(taskID string) Task {\n\tresp, err := http.Get(URL + \"/tasks/\" + taskID)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tdefer resp.Body.Close()\n\n\tvar tasksArray []Task\n\te := json.NewDecoder(resp.Body).Decode(&tasksArray)\n\tif e != nil {\n\t\tfmt.Println(e)\n\t}\n\n\tif len(tasksArray) == 0 {\n\t\tfmt.Println(`\n\t\tTask not found!\n\t\t`)\n\t\tos.Exit(0)\n\t}\n\n\treturn tasksArray[0]\n\n}",
"func (ctl *taskController) Task() proto.Message {\n\treturn ctl.task\n}",
"func GetTaskController(w http.ResponseWriter, r *http.Request) {\n\t_task := new(Task)\n\tvars := mux.Vars(r)\n\n\ttaskID, err := strconv.Atoi(vars[\"id\"])\n\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\ttask, err := _task.GetTask(taskID)\n\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tresponse, _ := json.Marshal(task)\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(response)\n}",
"func (dtw dispatchTaskWrapper) Task() queues.Task {\n return dtw.t\n}",
"func (m *Master) RequestTask(args *RequestTaskArgs, reply *RequestTaskReply) error {\n\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\n\tswitch m.state {\n\tcase Initializing:\n\t\treply.WorkerNextState = Idle\n\tcase MapPhase:\n\t\tfor i, task := range m.mapTasks {\n\t\t\tif task.State == UnScheduled {\n\t\t\t\t//schedule unassigned task\n\t\t\t\ttask.State = InProgress\n\t\t\t\treply.Task = task\n\t\t\t\treply.WorkerNextState = WorkAssigned\n\n\t\t\t\tm.mapTasks[i].State = InProgress\n\t\t\t\tm.mapTasks[i].TimeStamp = time.Now()\n\t\t\t\treturn nil\n\t\t\t} else if task.State == InProgress && time.Now().Sub(task.TimeStamp) > 10*time.Second {\n\t\t\t\t//reassign tasks due to timeout\n\t\t\t\treply.Task = task\n\t\t\t\treply.WorkerNextState = WorkAssigned\n\t\t\t\t//update TimeStamp\n\t\t\t\tm.mapTasks[i].TimeStamp = time.Now()\n\n\t\t\t\treturn nil\n\t\t\t} else if task.State == Done {\n\t\t\t\t//ignore the task\n\t\t\t\t//TODO: array for task is not efficient, maybe change to map?\n\t\t\t}\n\t\t}\n\t\t//no more mapWork, wait for other tasks\n\t\treply.WorkerNextState = Idle\n\n\tcase ReducePhase:\n\t\tfor i, task := range m.reduceTasks {\n\t\t\tif task.State == UnScheduled {\n\t\t\t\t//schedule unassigned task\n\t\t\t\ttask.State = InProgress\n\t\t\t\treply.Task = task\n\t\t\t\treply.WorkerNextState = WorkAssigned\n\n\t\t\t\tm.reduceTasks[i].State = InProgress\n\t\t\t\tm.reduceTasks[i].TimeStamp = time.Now()\n\n\t\t\t\treturn nil\n\t\t\t} else if task.State == InProgress && time.Now().Sub(task.TimeStamp) > 10*time.Second {\n\t\t\t\t//reassign tasks due to timeout\n\t\t\t\treply.Task = task\n\t\t\t\treply.WorkerNextState = WorkAssigned\n\t\t\t\t//update TimeStamp\n\t\t\t\tm.reduceTasks[i].TimeStamp = time.Now()\n\t\t\t\treturn nil\n\t\t\t} else if task.State == Done {\n\t\t\t\t//ignore the task\n\t\t\t\t//TODO: array for task is not efficient, maybe change to map?\n\t\t\t}\n\t\t}\n\t\t//no more reduceWork, wait for other tasks\n\t\treply.WorkerNextState = Idle\n\tdefault:\n\t\t//master gonna be teared down, shut down worker\n\t\t//or something weng wrong\n\t\treply.WorkerNextState = NoMoreWork\n\t}\n\n\treturn nil\n}",
"func (t Task) Task() string {\n\treturn t.task\n}",
"func getTaskByName(app string, c *gin.Context) (*_5xxDBTask, error) {\n\tdb, err := utils.GetDBFromContext(c)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Unable to access database\")\n\t}\n\n\ttask := _5xxDBTask{}\n\n\terr = db.Get(&task, \"SELECT * FROM _5xx_tasks WHERE app=$1\", app)\n\tif err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn nil, sql.ErrNoRows\n\t\t}\n\t\treturn nil, errors.New(\"Unable to access database\")\n\t}\n\treturn &task, nil\n}",
"func (t *Task) ToHTTPTask() *http.Task {\n\tauthType := v2alpha2.BearerAuthType\n\tauthtype := &authType\n\tsecret := &t.With.Secret\n\n\tref := DefaultRef\n\tif t.With.Ref != nil {\n\t\tref = *t.With.Ref\n\t}\n\n\t// compose body of POST request\n\tbody := \"\"\n\tbody += \"{\"\n\tbody += \"\\\"ref\\\": \\\"\" + ref + \"\\\",\"\n\tbody += \"\\\"inputs\\\": {\"\n\tnumWFInputs := len(t.With.WFInputs)\n\tfor i := 0; i < numWFInputs; i++ {\n\t\tbody += \"\\\"\" + t.With.WFInputs[i].Name + \"\\\": \\\"\" + t.With.WFInputs[i].Value + \"\\\"\"\n\t\tif i+1 < numWFInputs {\n\t\t\tbody += \",\"\n\t\t}\n\t}\n\tbody += \"}\"\n\tbody += \"}\"\n\n\ttSpec := &http.Task{\n\t\tTaskMeta: core.TaskMeta{\n\t\t\tTask: core.StringPointer(TaskName),\n\t\t},\n\t\tWith: http.Inputs{\n\t\t\tURL: \"https://api.github.com/repos/\" + t.With.Repository + \"/actions/workflows/\" + t.With.Workflow + \"/dispatches\",\n\t\t\tAuthType: authtype,\n\t\t\tSecret: secret,\n\t\t\tHeaders: []v2alpha2.NamedValue{{\n\t\t\t\tName: \"Accept\",\n\t\t\t\tValue: \"application/vnd.github.v3+json\",\n\t\t\t}},\n\t\t\tBody: &body,\n\t\t\tIgnoreFailure: t.With.IgnoreFailure,\n\t\t},\n\t}\n\n\tif t.With.IgnoreFailure != nil {\n\t\ttSpec.With.IgnoreFailure = t.With.IgnoreFailure\n\t}\n\n\tlog.Info(\"Dispatching GitHub workflow: \", tSpec.With.URL)\n\tlog.Info(*tSpec.With.Body)\n\n\treturn tSpec\n}",
"func (server *Server) GetRegisteredTask(name string) interface{} {\n\treturn server.registeredTasks[name]\n}",
"func NewGetTaskOK() *GetTaskOK {\n\treturn &GetTaskOK{}\n}",
"func (dtm *DfgetTaskManager) getDfgetTask(clientID, taskID string) (*types.DfGetTask, error) {\n\tkey, err := generateKey(clientID, taskID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tv, err := dtm.dfgetTaskStore.Get(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif dfgetTask, ok := v.(*types.DfGetTask); ok {\n\t\treturn dfgetTask, nil\n\t}\n\treturn nil, errors.Wrapf(errorType.ErrConvertFailed, \"clientID: %s, taskID: %s: %v\", clientID, taskID, v)\n}",
"func (c *C2Default) GetTasking() interface{} {\n\turl := fmt.Sprintf(\"%sapi/v%s/agent_message\", c.BaseURL, ApiVersion)\n\t//request := structs.Msg{}\n\trequest := structs.TaskRequestMessage{}\n\trequest.Action = \"get_tasking\"\n\trequest.TaskingSize = -1\n\n\traw, err := json.Marshal(request)\n\n\tif err != nil {\n\t\t//log.Printf(\"Error unmarshalling: %s\", err.Error())\n\t}\n\n\trawTask := c.htmlGetData(url, raw)\n\n\ttask := structs.TaskRequestMessageResponse{}\n\terr = json.Unmarshal(rawTask, &task)\n\n\tif err != nil {\n\t\t//log.Printf(\"Error unmarshalling task data: %s\", err.Error())\n\t}\n\n\treturn task\n}",
"func (r *DeviceAppManagementTaskRequest) Get(ctx context.Context) (resObj *DeviceAppManagementTask, err error) {\n\tvar query string\n\tif r.query != nil {\n\t\tquery = \"?\" + r.query.Encode()\n\t}\n\terr = r.JSONRequest(ctx, \"GET\", query, nil, &resObj)\n\treturn\n}",
"func (s *K8sSvc) GetServiceTask(ctx context.Context, cluster string, service string, containerInstanceID string) (serviceTaskID string, err error) {\n\treturn \"\", common.ErrNotSupported\n}",
"func (t *TaskList) Get(name string) Task {\n\tt.RLock()\n\tdefer t.RUnlock()\n\treturn t.taskSet[name]\n}",
"func (c *CreateKubernetesTaskRepo) GetLastTask(eid string, providerName string) (*model.CreateKubernetesTask, error) {\n\tvar old model.CreateKubernetesTask\n\tif err := c.DB.Where(\"eid = ? and provider_name=?\", eid, providerName).Order(\"created_at desc\").Limit(1).Take(&old).Error; err != nil {\n\t\treturn nil, err\n\t}\n\treturn &old, nil\n}",
"func (r *Redis) GetTask(taskID string) (*Task, error) {\n\tconn := r.pool.Get()\n\tdefer conn.Close()\n\n\ttaskFields, err := redis.Values(conn.Do(\"HGETALL\", \"sync_tasks#\"+taskID))\n\tnoticeError(err)\n\tif err != nil {\n\t\tif err == redis.ErrNil {\n\t\t\treturn nil, ErrTaskNotFound\n\t\t}\n\n\t\treturn nil, err\n\t}\n\n\tif len(taskFields) == 0 {\n\t\treturn nil, ErrTaskNotFound\n\t}\n\n\ttask := &Task{}\n\terr = redis.ScanStruct(taskFields, task)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error deserializing task entity [%s]: %v\", taskID, err)\n\t}\n\n\treturn task, nil\n}",
"func (client *Client) GetTranscodeTask(request *GetTranscodeTaskRequest) (_result *GetTranscodeTaskResponse, _err error) {\n\truntime := &util.RuntimeOptions{}\n\t_result = &GetTranscodeTaskResponse{}\n\t_body, _err := client.GetTranscodeTaskWithOptions(request, runtime)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\t_result = _body\n\treturn _result, _err\n}",
"func (ctx *Context) getOrAddTask(a *Application, pod *v1.Pod) *Task {\n\t// using pod UID as taskId\n\t//如果已经存在app中,则直接返回它\n\tif task, err := a.GetTask(string(pod.UID)); err == nil {\n\t\treturn task\n\t}\n\t//不存在,则创建一个新的task\n\tnewTask := createTaskFromPod(a, ctx.kubeClient, ctx.schedulerApi, pod)\n\ta.AddTask(newTask)\n\treturn newTask\n}",
"func (r *TaskRepository) GetTask(id int64) (*api.Task, error) {\n\tvar task api.Task\n\tr.DB.First(&task, id)\n\treturn &task, nil\n}",
"func (o *TaskRequest) GetTask() TaskTask {\n\tif o == nil || o.Task == nil {\n\t\tvar ret TaskTask\n\t\treturn ret\n\t}\n\treturn *o.Task\n}"
] | [
"0.7213508",
"0.71484256",
"0.7080093",
"0.7022939",
"0.70011353",
"0.68791914",
"0.6873889",
"0.68579173",
"0.68332416",
"0.6799068",
"0.67832404",
"0.6742814",
"0.6718434",
"0.670646",
"0.6698478",
"0.6695807",
"0.6687799",
"0.665799",
"0.6657192",
"0.6641929",
"0.6606461",
"0.65781647",
"0.6544192",
"0.65326095",
"0.65316254",
"0.65070736",
"0.6493138",
"0.6484437",
"0.6480699",
"0.647932",
"0.6457312",
"0.64545876",
"0.6448082",
"0.6434571",
"0.6399549",
"0.6378033",
"0.6369145",
"0.6365156",
"0.63484883",
"0.63438624",
"0.63365966",
"0.6335778",
"0.63135964",
"0.6313303",
"0.6305743",
"0.62376636",
"0.622401",
"0.61992776",
"0.61863536",
"0.6182286",
"0.6175694",
"0.615826",
"0.6156162",
"0.6154125",
"0.6143831",
"0.6131809",
"0.6122542",
"0.6092725",
"0.6085765",
"0.6081627",
"0.6074413",
"0.60706574",
"0.60629815",
"0.6054584",
"0.6053985",
"0.60478526",
"0.60306215",
"0.6026293",
"0.60196686",
"0.60177004",
"0.60176617",
"0.60136324",
"0.5994884",
"0.5971288",
"0.5968961",
"0.5950695",
"0.59204066",
"0.5918559",
"0.59020805",
"0.5885462",
"0.58825994",
"0.58822006",
"0.58777946",
"0.5860541",
"0.5854294",
"0.5850355",
"0.58493155",
"0.58489895",
"0.5848823",
"0.5844829",
"0.58448094",
"0.58403033",
"0.5823561",
"0.582266",
"0.5821775",
"0.5814983",
"0.5803273",
"0.5797133",
"0.5779612",
"0.57767105"
] | 0.7655484 | 0 |
NewCommonTime returns a simple 4/4 meter at the specified tempo | func NewCommonTime(bpm float64) *Meter {
return &Meter{
BeatsPerMinute: bpm,
BeatsPerBar: 4,
BeatValue: notes.Quarter,
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (t Time) Nanosecond() int {}",
"func New(h, m int) Clock {\n\tminutes := h*60 + m\n\tminutes %= 1440\n\tif minutes < 0 {\n\t\tminutes += 1440\n\t}\n\n\treturn Clock{minutes}\n}",
"func (t Time) Minute() int {}",
"func newFakeTime() {\n\tfakeCurrentTime = fakeTime().Add(time.Hour * 24 * 2)\n}",
"func New(hour, minute int) Clock {\n\tminute += hour * 60\n\tminute %= 24 * 60\n\tif minute < 0 {\n\t\tminute += 24 * 60\n\t}\n\treturn Clock{minute}\n}",
"func New(hour, minute int) Clock {\n\ttotalMinutes := (hour*60 + minute) % 1440\n\tif totalMinutes < 0 {\n\t\ttotalMinutes = totalMinutes + 1440\n\t}\n\n\tm := totalMinutes % 60\n\th := totalMinutes / 60 % 24\n\n\treturn Clock{hour: h, minute: m}\n}",
"func NanoTime() int64",
"func (t Time) Clock() (hour, min, sec int) {}",
"func New(h, m int) Clock {\n\tm = (H*h + m) % D\n\tif m < 0 {\n\t\tm += D\n\t}\n\treturn Clock(m)\n}",
"func newTime(year int, month time.Month, day int, hourMinSec ...int) time.Time {\n\tvar hour, min, sec int\n\n\tswitch len(hourMinSec) {\n\tcase 0:\n\t\t// nothing\n\tcase 3:\n\t\tsec = hourMinSec[2]\n\t\tfallthrough\n\tcase 2:\n\t\tmin = hourMinSec[1]\n\t\tfallthrough\n\tcase 1:\n\t\thour = hourMinSec[0]\n\tdefault:\n\t\tpanic(\"too many arguments\")\n\t}\n\n\treturn time.Date(year, month, day, hour, min, sec, 0, time.UTC)\n}",
"func new_time() time.Duration {\n\treturn time.Duration((rand.Intn(300) + 150)) * time.Millisecond\n}",
"func NewMinute(t time.Time, lsw, dut1 int) (Minute, error) {\n\tt = t.UTC() // Don't care about local times\n\tmin := Minute{\n\t\tTime: t,\n\t\tlsw: lsw == 1,\n\t\tdut1: dut1,\n\t}\n\tbits := min.bits[:]\n\n\tmarkers := []int{9, 19, 29, 39, 49, 59} // P1-P6\n\tbits[0] = bitNone // Minute mark\n\tfor _, v := range markers {\n\t\tbits[v] = bitMarker\n\t}\n\n\tmidnight := time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, time.UTC)\n\tendOfDay := midnight.AddDate(0, 0, 1)\n\n\tdst1 := 0 // DST status at 00:00Z today\n\tif isDST(midnight) {\n\t\tdst1 = 1\n\t}\n\tdst2 := 0 // DST status at 24:00Z today\n\tif isDST(endOfDay) {\n\t\tdst2 = 1\n\t}\n\n\tyear1s := t.Year() % 10\n\tyear10s := t.Year()%100 - year1s\n\n\tminute1s := t.Minute() % 10\n\tminute10s := t.Minute()%100 - minute1s\n\n\thour1s := t.Hour() % 10\n\thour10s := t.Hour()%100 - hour1s\n\n\tdayOfYear1s := t.YearDay() % 10\n\tdayOfYear10s := t.YearDay()%100 - dayOfYear1s\n\tdayOfYear100s := t.YearDay()%1000 - dayOfYear1s - dayOfYear10s\n\n\tdut1Sign, dut1Magnitude := 1, dut1 // dut1Sign is positive\n\tif dut1 < 0 {\n\t\tdut1Sign = 0\n\t\tdut1Magnitude *= -1\n\t}\n\tif dut1Magnitude > 7 {\n\t\tdut1Magnitude = 7 // Only 3 bits for this value.\n\t}\n\n\terr := minuteEncoder.encode(bits, []int{\n\t\t0, 0, dst1, lsw, year1s, 0, 0,\n\t\tminute1s, minute10s, 0, 0,\n\t\thour1s, hour10s, 0, 0,\n\t\tdayOfYear1s, dayOfYear10s, 0,\n\t\tdayOfYear100s, 0, 0,\n\t\tdut1Sign, year10s, dst2, dut1Magnitude, 0,\n\t})\n\tif err != nil {\n\t\treturn min, errors.Wrapf(err, \"Cannot encode minute %s\", t.Format(\"15:04\"))\n\t}\n\n\tmin.lastSecond = lastSecond(t, min.lsw)\n\n\treturn min, nil\n}",
"func nanotime() int64",
"func nanotime() int64",
"func nanotime() int64",
"func nanotime() int64",
"func NewGTime() GTime {\n\treturn GTime{From: \"now-24h\", To: \"now\"}\n}",
"func (am *AutogitManager) commonTime(ctx context.Context) time.Time {\n\toffset, haveOffset := am.config.MDServer().OffsetFromServerTime()\n\tif !haveOffset {\n\t\tam.log.CDebugf(ctx, \"No offset, cannot use common time; \"+\n\t\t\t\"falling back to local time\")\n\t\treturn am.config.Clock().Now()\n\t}\n\treturn am.config.Clock().Now().Add(-offset)\n}",
"func New(hours, minutes int) Time {\n\th := (hours + minutes/60) % 24\n\tm := minutes % 60\n\n\tfor m < 0 {\n\t\tm += 60\n\t\th--\n\t}\n\n\tfor h < 0 {\n\t\th += 24\n\t}\n\n\treturn Time{\n\t\tminutes: h*60 + m,\n\t}\n}",
"func New(h, m int) Clock {\n\tclock := &Clock{hour: h, minute: m}\n\tclock.normalize()\n\treturn *clock\n}",
"func New(hours, minutes int) Clock {\n\treturn Clock(((hours*MinutesInAnHour+minutes)%MinutesInADay + MinutesInADay) % MinutesInADay)\n}",
"func (*Root) ModTime() time.Time { return time.Time{} }",
"func (tso TimeWebsmsShortOne) Time() time.Time { return time.Time(tso) }",
"func New(hours int, minutes int) Clock {\n\tminutes = hours*minutesPerHour + minutes\n\tminutes %= minutesPerDay\n\tif minutes < 0 {\n\t\tminutes += minutesPerDay\n\t}\n\n\treturn Clock{minutes / minutesPerHour, minutes % minutesPerHour}\n}",
"func New(h, m int) Clock {\n\tm = h*60 + m\n\tm %= 60 * 24\n\tif m < 0 {\n\t\treturn Clock{m + 60*24}\n\t}\n\treturn Clock{m}\n}",
"func GetSignalTime(timeUnit int32, refDate time.Time) time.Time {\n\tvar t time.Time\n\tswitch timeUnit {\n\tcase SignalTimeUnit_NOW:\n\t\t{\n\t\t\treturn refDate.UTC().Truncate(time.Hour * 24)\n\t\t}\n\tcase SignalTimeUnit_MONTH:\n\t\t{\n\t\t\tt = refDate.UTC().AddDate(0, 0, -30)\n\t\t}\n\tcase SignalTimeUnit_BIMONTH:\n\t\t{\n\t\t\tt = refDate.UTC().AddDate(0, 0, -60)\n\t\t}\n\tcase SignalTimeUnit_QUARTER:\n\t\t{\n\t\t\tt = refDate.UTC().AddDate(0, 0, -90)\n\t\t}\n\tcase SignalTimeUnit_HALFYEAR:\n\t\t{\n\t\t\tt = refDate.UTC().AddDate(0, 0, -180)\n\t\t}\n\tcase SignalTimeUnit_THIRDQUARTER:\n\t\t{\n\t\t\tt = refDate.UTC().AddDate(0, 0, -270)\n\t\t}\n\tcase SignalTimeUnit_YEAR:\n\t\t{\n\t\t\tt = refDate.UTC().AddDate(0, 0, -365)\n\t\t}\n\t}\n\n\treturn t.Truncate(time.Hour * 24)\n}",
"func (f File) CTime() (uint32, uint32) {\n\treturn 0, 0\n}",
"func New(h int, m int) Clock {\n\t// Normalize time.\n\tconst minutesInADay = 24 * 60\n\tminutes := (h*60 + m) % minutesInADay\n\tif minutes < 0 {\n\t\tminutes += minutesInADay\n\t}\n\treturn Clock{\n\t\tminutes: minutes,\n\t}\n}",
"func New(h int, m int) Clock {\n\tc := (h*60 + m) % minutesInDay\n\n\tfor c < 0 {\n\t\tc += minutesInDay\n\t}\n\n\treturn Clock(c)\n}",
"func AddGigasecond(input time.Time) time.Time {\n return input.Add( time.Second * 1e9 )\n}",
"func New(hour, minute int) Clock {\n\th, m := normalize(hour, minute)\n\treturn Clock{h, m}\n}",
"func New(hour, minute int) Clock {\n\thour, minute = normalize(hour, minute)\n\treturn Clock{hour, minute}\n}",
"func createCDateTime(goTime time.Time) (string, string) {\n\td := goTime.Format(mqDateFormat) // These magic values tell Go how to parse/format between Times and strings\n\tt := goTime.Format(mqTimeFormat)\n\tt = t[:6] + t[7:] // Strip the '.'\n\treturn d, t\n}",
"func New(hour, minute int) Clock {\n\tadjustedHour, adjustedMinute := convertHelper(hour, minute)\n\tc := Clock{adjustedHour, adjustedMinute}\n\treturn c\n}",
"func New(hour, minute int) Clock {\n\tvar c Clock\n\treturn c.Add(hour*60 + minute)\n}",
"func timeInWords(h int32, m int32) string {\n if m == 0 {\n return numberToString(h) + \" o' clock\"\n }\n\n var next_h int32 = h + 1\n if h == 12 {\n next_h = 1\n }\n\n var min_str = \"minutes\" \n if m == 1 {\n min_str = \"minute\"\n }\n\n if m % 15 == 0 {\n switch m {\n case 15:\n return \"quarter past \" + numberToString(h)\n case 30:\n return \"half past \" + numberToString(h)\n case 45:\n return \"quarter to \" + numberToString(next_h)\n }\n } else if m > 30 {\n return numberToString(60 - m) + \" \" + min_str + \" to \" + numberToString(next_h)\n }\n \n return numberToString(m) + \" \" + min_str + \" past \" + numberToString(h)\n}",
"func New(hour, minute int) Clock {\n\t// create a clock with 0 minutes and then add what was passed as arguments\n\treturn Clock{0}.Add((hour * 60) + minute)\n}",
"func Fixed(t time.Time) Clock {\n\treturn ClockFunc(func() time.Time {\n\t\treturn t\n\t})\n}",
"func (l *CircularTimes) UpdateTime() {\n\n // Time\n t := -1\n if l.Wtd != nil && !l.Wtd.IsZero() {\n t = l.Wtd.Get()\n } else if l.Wta != nil && !l.Wta.IsZero() {\n t = l.Wta.Get()\n } else if l.Wtp != nil && !l.Wtp.IsZero() {\n t = l.Wtp.Get()\n } else if l.Ptd != nil && !l.Ptd.IsZero() {\n // Should not happen, we should have a working time\n t = l.Ptd.Get() * 60\n } else if l.Pta != nil && !l.Pta.IsZero() {\n // Should not happen, we should have a working time\n t = l.Pta.Get() * 60\n }\n l.Time.Set(t)\n\n // PublicTime\n t = -1\n if l.Ptd != nil {\n t = l.Ptd.Get() * 60\n } else if l.Pta != nil {\n t = l.Pta.Get() * 60\n } else if l.Wtd != nil && !l.Wtd.IsZero() {\n t = l.Wtd.Get()\n } else if l.Wta != nil && !l.Wta.IsZero() {\n t = l.Wta.Get()\n } else if l.Wtp != nil && !l.Wtp.IsZero() {\n t = l.Wtp.Get()\n }\n l.PublicTime.Set(t)\n\n}",
"func NewTime(t time.Time) Time {\n\treturn Time{\n\t\tTime: t,\n\t}\n}",
"func Time(hour, minute int) Clock {\n\ttotal := hour*MinInHour + minute\n\n\tfor total >= MinsInDay || total < 0 {\n\t\tif total >= MinsInDay {\n\t\t\ttotal -= MinsInDay\n\t\t} else if total < 0 {\n\t\t\ttotal += MinsInDay\n\t\t}\n\t}\n\n\treturn Clock{total}\n}",
"func Time(hour, minute int) Clock {\n\ttime := (hour*60 + minute) % (60 * 24)\n\tif time < 0 {\n\t\ttime += 60 * 24\n\t}\n\treturn Clock(time)\n}",
"func New(hour, minute int) Clock {\n\treturn Clock(modulus(hour*60+minute, minutesInDay))\n}",
"func newClock() clock {\n\treturn make(clock)\n}",
"func NewTime(t time.Time) Time {\n\treturn Time(t.Format(timeLayout))\n}",
"func Time(hour, minute int) Clock {\n\treturn Clock(hour * minutesPerHour).Add(minute)\n}",
"func New(h int, m int) Clock {\n\tm, addHours := normalizeMinutes(m)\n\th = normalizeHours(h + addHours)\n\treturn Clock{h, m}\n}",
"func CurrentNanosecond() int64 {\n\treturn CurrentMicrosecond() * 1e3\n}",
"func New(hour, minute int) Clock {\n\treturn Clock(0).Add(hour*60 + minute)\n}",
"func problem5() {\n\ttotalDistance := 4500.0 // meters\n\ttestSpeed := 23.0 // m/s\n\t//t = d/s\n\ttime1 := totalDistance / testSpeed //in seconds\n\ttime1Minutes := time1 / 60\n\tfmt.Println(\"Time in Seconds:\", time1)\n\tfmt.Println(\"Time in Minutes:\", time1Minutes)\n\n}",
"func GetTime() TimeInfo {\n\n\tt := time.Now()\n\td := TimeInfo{\n\t\tYear: LeftPadInt(4, t.Year()),\n\t\tMonth: LeftPadInt(2, int(t.Month())),\n\t\tDay: LeftPadInt(2, t.Day()),\n\t\tHour: LeftPadInt(2, t.Hour()),\n\t\tMinute: LeftPadInt(2, t.Minute()),\n\t\tSecond: LeftPadInt(2, t.Second()),\n\t\tTime: &t,\n\t}\n\n\treturn d\n\n}",
"func hour(n int) float64 { return float64(n * 60 * 60) }",
"func Nanotime() int64 {\n\treturn nanotime()\n}",
"func Nanotime() int64 {\n\treturn nanotime()\n}",
"func NewTime(t time.Time) *Time {\n\treturn &Time{\n\t\ttime: t,\n\t}\n}",
"func NewTime(t time.Time) *Time {\n\treturn &Time{Time: t}\n}",
"func StandardFor(t time.Time) times.Object {\n\treturn Standard(t.Second(), t.Minute(), t.Hour(), t.Day())\n}",
"func NewTime(t time.Time) Time {\n\treturn Time{t.UTC()}\n}",
"func (f Fahrenheit) Celcious() Celcious {\n\treturn Celcious((f - 32) / 1.8)\n}",
"func temps_process(new_client *client.Client, new_haid *coiffeur.Coiffeur) float32 {\n\tworkingTime := 0.0\n\tif new_client.Sexe == \"homme\" {\n\t\tworkingTime = new_haid.StatCoupeHomme * tempsCoupeHomme\n\t} else {\n\t\tworkingTime = new_haid.StatCoupeFemme * tempsCoupeFemme\n\t}\n\n\tif new_client.Shampoo {\n\t\tworkingTime += rand.Float64() * tempsShampoo\n\t}\n\treturn float32(workingTime)\n}",
"func Date(year int, month Month, day, hour, min, sec, nsec int, loc *Location) Time {}",
"func NewTime(sec uint32, nsec uint32) Time {\n\tsec, nsec = normalizeTemporal(int64(sec), int64(nsec))\n\treturn Time{temporal{sec, nsec}}\n}",
"func StrTime13() string {\n\treturn strconv.FormatInt(time.Now().UnixNano()/1e6, 10) //\"1576395361\"\n}",
"func NewReal() Clock {\n\treturn &realClock{}\n}",
"func GenTime() string {\n\tcTime := time.Now()\n\tt := fmt.Sprint(cTime)\n\n\treturn t\n}",
"func (k Kelvin) Celcious() Celcious {\n\treturn Celcious(k - 273.15)\n}",
"func handleMidnight(t time.Time) time.Time {\n if t.Hour() == 0 {\n t = t.Add(time.Minute)\n }\n return t\n}",
"func AddGigasecond(t time.Time) time.Time {\n\n\ttt, _ := time.Parse(\"2006-01-02T15:04:05\", t.Add(giga*time.Second).Format(fmtDT))\n\treturn tt\n}",
"func New() *Time {\n\treturn &Time{}\n}",
"func GetAccurate(sec int) times.Object {\n\treturn object.AccurateZero().SetSecond(sec)\n}",
"func Spring(year int, precision time.Duration) Time {\n\tt1 := Date(year, 3, 15, 0, 0, 0, 0, time.Local)\n\tt2 := Time{Time: t1.AddDate(0, 0, 15)}\n\ttt := Time{}\n\tfor t2.Sub(t1.Time) > precision {\n\t\ttt.Time = t1.Add(t2.Sub(t1.Time) / 2)\n\t\tif sun.App(tt).Dec > 0 {\n\t\t\tt2.Time = tt.Time\n\t\t} else {\n\t\t\tt1.Time = tt.Time\n\t\t}\n\t}\n\treturn tt\n}",
"func (s SpaceUnit) Time(t TimeUnit, dimension int8) MetricUnit {\n\treturn (&metricUnit{uint32(s)}).Time(t, dimension)\n}",
"func NewTime(t time.Time) *Time {\n\treturn &Time{t}\n}",
"func (depre deprecatedTimeZones) Mexico() mexicoDeprecatedTimeZones {return \"\" }",
"func (o *Object) ModTime(ctx context.Context) time.Time {\n\treturn o.modTime\n}",
"func ExampleNewFromTime() {\n\ttimer, _ := time.Parse(\"2006-01-02 15:04:05\", \"2018-08-08 08:08:08\")\n\tnTime := gtime.NewFromTime(timer)\n\n\tfmt.Println(nTime)\n\n\t// Output:\n\t// 2018-08-08 08:08:08\n}",
"func milliTime() (r []byte) {\n\tbuf := new(bytes.Buffer)\n\tt := time.Now().UnixNano()\n\tm := t / 1e6\n\tbinary.Write(buf, binary.BigEndian, m)\n\treturn buf.Bytes()[2:]\n}",
"func milliTime() (r []byte) {\n\tbuf := new(bytes.Buffer)\n\tt := time.Now().UnixNano()\n\tm := t / 1e6\n\tbinary.Write(buf, binary.BigEndian, m)\n\treturn buf.Bytes()[2:]\n}",
"func (c CountUnit) Time(t TimeUnit, dimension int8) MetricUnit {\n\treturn (&metricUnit{uint32(c)}).Time(t, dimension)\n}",
"func GetPhysical(t time.Time) int64 {\n\treturn t.UnixNano() / int64(time.Millisecond)\n}",
"func newClockInternal(co ClockOpts, rtClock tstime.Clock) *Clock {\n\tif !co.FollowRealTime && rtClock != nil {\n\t\tpanic(\"rtClock can only be set with FollowRealTime enabled\")\n\t}\n\n\tif co.FollowRealTime && rtClock == nil {\n\t\trtClock = new(tstime.StdClock)\n\t}\n\n\tc := &Clock{\n\t\tstart: co.Start,\n\t\trealTimeClock: rtClock,\n\t\tstep: co.Step,\n\t\ttimerChannelSize: co.TimerChannelSize,\n\t}\n\tc.init() // init now to capture the current time when co.Start.IsZero()\n\treturn c\n}",
"func (europ europeTimeZones) Copenhagen() string {return \"Europe/Copenhagen\" }",
"func (mes *MarkerEncodingScheme) TimeUnit() Marker { return mes.timeUnit }",
"func NewTime(t time.Time) *Value {\n\treturn &Value{t, Timestamp}\n}",
"func New(t time.Time) *Clock {\n\treturn NewWithStep(t, 0)\n}",
"func ExampleNewClock() {\n\t// Initialize a new clock, using the local\n\t// physical clock.\n\tc := NewClock(UnixNano, time.Nanosecond)\n\t// Update the state of the hybrid clock.\n\ts := c.Now()\n\ttime.Sleep(50 * time.Nanosecond)\n\tt := Timestamp{WallTime: UnixNano()}\n\t// The sanity checks below will usually never be triggered.\n\n\tif s.Less(t) || !t.Less(s) {\n\t\tlog.Fatalf(context.Background(), \"The later timestamp is smaller than the earlier one\")\n\t}\n\n\tif t.WallTime-s.WallTime > 0 {\n\t\tlog.Fatalf(context.Background(), \"HLC timestamp %d deviates from physical clock %d\", s, t)\n\t}\n\n\tif s.Logical > 0 {\n\t\tlog.Fatalf(context.Background(), \"Trivial timestamp has logical component\")\n\t}\n\n\tfmt.Printf(\"The Unix Epoch is now approximately %dns old.\\n\", t.WallTime)\n}",
"func NewClock(hour, minute int) Clock {\n\treturn Clock(time.Duration(hour)*time.Hour + time.Duration(minute)*time.Minute)\n}",
"func NewTime(i interface{}, features map[string]interface{}) (Entry, error) {\n\tif i == nil {\n\t\treturn Time(0), nil\n\t}\n\tn, ok := i.(float64)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"failed to unpack int from: %#v\", i)\n\t}\n\treturn Time(n), nil\n}",
"func TickFromTime(t time.Time) Tick {\n\treturn Tick(util.TimeToSecs(t) / 4)\n}",
"func (s MeshService) GenesisTime(ctx context.Context, in *pb.GenesisTimeRequest) (*pb.GenesisTimeResponse, error) {\n\tlog.Info(\"GRPC MeshService.GenesisTime\")\n\treturn &pb.GenesisTimeResponse{Unixtime: &pb.SimpleInt{\n\t\tValue: uint64(s.GenTime.GetGenesisTime().Unix()),\n\t}}, nil\n}",
"func convertMillToTime(originalTime int64) time.Time {\n\ti := time.Unix(0, originalTime*int64(time.Millisecond))\n\treturn i\n}",
"func Now() Time {\n\treturn Time(time.Now().UnixNano() / 1000)\n}",
"func timeToOxygen(d *droid) int {\n\td.moveToPoint(d.oxygenPosition)\n\n\t// Reset initial location to oxygen point\n\tmovementMapRoot := movementMap{location: &point{x: d.location.x, y: d.location.y}, distance: 0}\n\td.rootPath = &movementMapRoot\n\td.flatPath = []*movementMap{&movementMapRoot}\n\n\ttimeToFill := 0\n\ttryMoves := d.getAllMovesWithDistance(timeToFill)\n\n\tfor len(tryMoves) != 0 {\n\t\tfor _, tryMove := range tryMoves {\n\t\t\t// First, navigate to the location of the move chosen\n\t\t\td.moveToPoint(tryMove.location)\n\t\t\t// Take a step in every direction (will populate map of movements)\n\t\t\td.moveEveryDirection()\n\t\t}\n\t\ttimeToFill++\n\t\ttryMoves = d.getAllMovesWithDistance(timeToFill)\n\t}\n\n\treturn timeToFill - 2\n}",
"func (t Time) Tick() uint {\n\treturn 60\n}",
"func (realClocker) Now() time.Time { return time.Now() }",
"func CurrHour() time.Time {\n\tnow := time.Now()\n\treturn time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())\n}",
"func (milTz militaryTimeZones) Delta() string {return \"Etc/GMT+4\" }",
"func (c stubClocker) Now() time.Time { return c.t }",
"func NewTime(t time.Time) *Time {\n\ttt := Time{}\n\ttt.v.Store(t)\n\treturn &tt\n}",
"func adjTime(context interface{}, value string) (time.Time, error) {\n\n\t// The default value is in seconds unless overridden.\n\t// #time:0 Current date/time\n\t// #time:-3600 3600 seconds in the past\n\t// #time:3m\t\t3 minutes in the future.\n\n\t// Possible duration types.\n\t// \"ns\": int64(Nanosecond),\n\t// \"us\": int64(Microsecond),\n\t// \"ms\": int64(Millisecond),\n\t// \"s\": int64(Second),\n\t// \"m\": int64(Minute),\n\t// \"h\": int64(Hour),\n\n\t// Do we have a single value?\n\tif len(value) == 1 {\n\t\tval, err := strconv.Atoi(value[0:1])\n\t\tif err != nil {\n\t\t\treturn time.Time{}.UTC(), fmt.Errorf(\"Invalid duration : %q\", value[0:1])\n\t\t}\n\n\t\tif val == 0 {\n\t\t\treturn time.Now().UTC(), nil\n\t\t}\n\n\t\treturn time.Now().Add(time.Duration(val) * time.Second).UTC(), nil\n\t}\n\n\t// Do we have a duration type and where does the\n\t// actual duration value end\n\tvar typ string\n\tvar end int\n\n\t// The end byte position for the last character in the string.\n\tePos := len(value) - 1\n\n\t// Look at the very last character.\n\tt := value[ePos:]\n\tswitch t {\n\n\t// Is this a minute or hour? [3m]\n\tcase \"m\", \"h\":\n\t\ttyp = t\n\t\tend = ePos // Position of last chr in value.\n\n\t// Is this a second or other duration? [3s or 3us]\n\tcase \"s\":\n\t\ttyp = t // s for 3s\n\t\tend = ePos // 3 for 3s\n\n\t\t// Is this smaller than a second? [ns, us, ms]\n\t\tif len(value) > 2 {\n\t\t\tt := value[ePos-1 : ePos]\n\t\t\tswitch t {\n\t\t\tcase \"n\", \"u\", \"m\":\n\t\t\t\ttyp = value[ePos-1:] // us for 3us\n\t\t\t\tend = ePos - 1 // 3 for 3us\n\t\t\t}\n\t\t}\n\n\tdefault:\n\t\ttyp = \"s\" // s for 3600\n\t\tend = ePos + 1 // 0 for 3600\n\t}\n\n\t// Check if we are to negative the value.\n\tvar start int\n\tif value[0] == '-' {\n\t\tstart = 1\n\t}\n\n\t// Check the remaining bytes is an integer value.\n\tval, err := strconv.Atoi(value[start:end])\n\tif err != nil {\n\t\treturn time.Time{}.UTC(), fmt.Errorf(\"Invalid duration : %q\", value[start:end])\n\t}\n\n\t// Do we have to negate the value?\n\tif start == 1 {\n\t\tval *= -1\n\t}\n\n\t// Calcuate the time value.\n\tswitch typ {\n\tcase \"ns\":\n\t\treturn time.Now().Add(time.Duration(val) * time.Nanosecond).UTC(), nil\n\tcase \"us\":\n\t\treturn time.Now().Add(time.Duration(val) * time.Microsecond).UTC(), nil\n\tcase \"ms\":\n\t\treturn time.Now().Add(time.Duration(val) * time.Millisecond).UTC(), nil\n\tcase \"m\":\n\t\treturn time.Now().Add(time.Duration(val) * time.Minute).UTC(), nil\n\tcase \"h\":\n\t\treturn time.Now().Add(time.Duration(val) * time.Hour).UTC(), nil\n\tdefault:\n\t\treturn time.Now().Add(time.Duration(val) * time.Second).UTC(), nil\n\t}\n}"
] | [
"0.53669107",
"0.5306059",
"0.52969766",
"0.5279843",
"0.5251734",
"0.52410793",
"0.5239546",
"0.522227",
"0.51717865",
"0.5143131",
"0.51333964",
"0.5087262",
"0.5053068",
"0.5053068",
"0.5053068",
"0.5053068",
"0.50448024",
"0.50265586",
"0.50222766",
"0.50056636",
"0.49707845",
"0.49684703",
"0.4964671",
"0.49409607",
"0.49315923",
"0.4911224",
"0.4877709",
"0.4875053",
"0.486171",
"0.48587024",
"0.48536325",
"0.4848453",
"0.48385954",
"0.48116523",
"0.48071134",
"0.47826606",
"0.47813046",
"0.4776355",
"0.47694555",
"0.47623715",
"0.47479144",
"0.47307074",
"0.47302487",
"0.47160497",
"0.46889627",
"0.4684202",
"0.467669",
"0.46567786",
"0.46328306",
"0.46206626",
"0.46042988",
"0.46015978",
"0.4554699",
"0.4554699",
"0.455442",
"0.45540676",
"0.45424038",
"0.45315662",
"0.45262954",
"0.4525928",
"0.45241952",
"0.45168906",
"0.4513972",
"0.4507577",
"0.4504285",
"0.45036522",
"0.45028454",
"0.44814166",
"0.4480495",
"0.44776925",
"0.44738042",
"0.44698575",
"0.4466969",
"0.4461854",
"0.4460444",
"0.44553235",
"0.44521213",
"0.44521213",
"0.44493854",
"0.44445044",
"0.44378114",
"0.4437549",
"0.44356084",
"0.44322562",
"0.44194165",
"0.44143656",
"0.4413477",
"0.44114894",
"0.44000232",
"0.4398604",
"0.4398382",
"0.43983102",
"0.43934336",
"0.43913808",
"0.43872175",
"0.43825617",
"0.43811643",
"0.43809226",
"0.43807405",
"0.43801227"
] | 0.6473046 | 0 |
New returns a new meter with the specified parameters | func New(bpm, beatsPerBar float64, beatValue notes.Duration) *Meter {
return &Meter{
BeatsPerMinute: bpm,
BeatsPerBar: beatsPerBar,
BeatValue: beatValue,
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func NewMeter(name string, options ...Option) Meter {\n\treturn newMeter(name, options...)\n}",
"func NewMeter(client Client, name string, tagOptions ...TagOption) (*Meter, error) {\n\tif err := validateMetricName(name); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Meter{\n\t\tclient: client,\n\t\tname: name,\n\t\ttags: GetTags(tagOptions...),\n\t}, nil\n}",
"func NewMeter(name string, snapshotInterval time.Duration) *Meter {\n\tm := Meter{}\n\tm.name = name\n\tm.printInterval = snapshotInterval\n\tm.Reset()\n\treturn &m\n}",
"func NewMeter(name string) metics.Meter {\n\tif !Enabled {\n\t\treturn new(metics.NilMeter)\n\t}\n\treturn metics.GetOrRegisterMeter(name, metics.DefaultRegistry)\n}",
"func New(p Params) (*Worker, error) {\n\tif p.SampleDir == \"\" {\n\t\treturn nil, fmt.Errorf(\"no sample directory set\")\n\t}\n\tif p.MeterAddr == \"\" {\n\t\treturn nil, fmt.Errorf(\"no meter address set\")\n\t}\n\tif p.Now == nil {\n\t\tp.Now = time.Now\n\t}\n\tif p.Interval == 0 {\n\t\tp.Interval = DefaultInterval\n\t}\n\tctx, cancel := context.WithCancel(context.Background())\n\tw := &Worker{\n\t\tp: p,\n\t\tctx: ctx,\n\t\tclose: cancel,\n\t}\n\tw.wg.Add(1)\n\tgo func() {\n\t\tif err := w.run(); err != nil {\n\t\t\tlog.Printf(\"sample worker for meter at %q failed: %v\", w.p.MeterAddr, err)\n\t\t}\n\t}()\n\treturn w, nil\n}",
"func New(name string, rate float64, tags ...string) Metric {\n\treturn Metric{name, rate, tags}\n}",
"func NewMeter(options ...meterOption) *ProgressMeter {\n\tm := &ProgressMeter{\n\t\tlogger: &progressLogger{},\n\t\tstartTime: time.Now(),\n\t\tfileIndex: make(map[string]int64),\n\t\tfileIndexMutex: &sync.Mutex{},\n\t\tfinished: make(chan interface{}),\n\t}\n\n\tfor _, opt := range options {\n\t\topt(m)\n\t}\n\n\treturn m\n}",
"func New() MME {\n\t// TODO: Implement this!\n\toperationCosts = make(map[rpcs.Operation]int)\n\toperationCosts[rpcs.SMS] = -1\n\toperationCosts[rpcs.Call] = -5\n\toperationCosts[rpcs.Load] = 10\n\tm := new(mme)\n\tm.state = make(map[uint64]rpcs.MMEState)\n\tm.stateMutex = new(sync.Mutex)\n\treturn m\n}",
"func CreateMeter(numChannels uint32, names []string) (Meter, error) {\n\tnumNames := len(names)\n\tnumNames32 := uint32(numNames)\n\n\t/*\n\t * Check if number of channel names matches number of channels.\n\t */\n\tif numChannels != numNames32 {\n\t\treturn nil, fmt.Errorf(\"Failed to create channel meter. Requested channel meter for %d channels, but provided %d channel names.\", numChannels, numNames)\n\t} else {\n\t\tchannelMeters := make([]*channelMeterStruct, numChannels)\n\n\t\t/*\n\t\t * Create the channel meters.\n\t\t */\n\t\tfor i := range channelMeters {\n\t\t\tname := names[i]\n\n\t\t\t/*\n\t\t\t * Create a new channel meter.\n\t\t\t */\n\t\t\tchannelMeter := &channelMeterStruct{\n\t\t\t\tchannelName: name,\n\t\t\t\tenabled: false,\n\t\t\t\tcurrentValue: 0.0,\n\t\t\t\tpeakValue: 0.0,\n\t\t\t\tsampleCounter: 0,\n\t\t\t}\n\n\t\t\tchannelMeters[i] = channelMeter\n\t\t}\n\n\t\t/*\n\t\t * Create a new level meter.\n\t\t */\n\t\tmeter := meterStruct{\n\t\t\tchannelMeters: channelMeters,\n\t\t\tenabled: false,\n\t\t}\n\n\t\treturn &meter, nil\n\t}\n\n}",
"func New(opts ...Option) *Metric {\n\tvar options Options\n\tfor _, opt := range opts {\n\t\topt(&options)\n\t}\n\tm := &Metric{\n\t\tOptions: options,\n\t\thistograms: make(map[string]metrics.Histogram),\n\t\tkeyLabels: make(map[string]map[string]string),\n\t}\n\tgo m.watch()\n\treturn m\n}",
"func New(h, m int) Clock {\n\tclock := &Clock{hour: h, minute: m}\n\tclock.normalize()\n\treturn *clock\n}",
"func New(h, m int) Clock {\n\tm = (H*h + m) % D\n\tif m < 0 {\n\t\tm += D\n\t}\n\treturn Clock(m)\n}",
"func (c *Configuration) NewMeter(name string, options ...Option) (metric.Meter, error) {\n\tif !c.Enabled {\n\t\treturn metric.NoopProvider{}.Meter(name), nil\n\t}\n\n\tif c.AgentEndpoint == \"\" {\n\t\treturn metric.Meter{}, fmt.Errorf(\"missing agent address, please set environment variable %s\", envAgentEndpoint)\n\t}\n\n\topts := applyOptions(options...)\n\texporter := sotlp.SingletonExporter()\n\tif exporter == nil {\n\t\texp, err := otlp.NewExporter(otlp.WithInsecure(),\n\t\t\totlp.WithAddress(c.AgentEndpoint),\n\t\t\totlp.WithReconnectionPeriod(time.Minute),\n\t\t\totlp.WithGRPCDialOption(grpc.WithTimeout(5*time.Second)))\n\t\tif err != nil {\n\t\t\treturn metric.Meter{}, fmt.Errorf(\"failed to create the collector exporter: %w\", err)\n\t\t}\n\t\texporter = exp\n\t\tsotlp.SetExporter(exporter)\n\t\topts.Logger.With(zap.String(\"agentEndpoint\", c.AgentEndpoint)).Info(\"success to otlp agent\")\n\t}\n\t// exporter.Stop()\n\n\tif meterPusher == nil {\n\t\tmeterPusher = push.New(\n\t\t\tbasic.New(\n\t\t\t\tsimple.NewWithExactDistribution(),\n\t\t\t\texporter,\n\t\t\t),\n\t\t\texporter,\n\t\t\tpush.WithPeriod(30*time.Second),\n\t\t\t//push.WithTimeout(10*time.Second),\n\t\t)\n\t\tmeterProvider = meterPusher.Provider()\n\t\tmeterPusher.Start()\n\t\topts.Logger.With(zap.String(\"agentEndpoint\", c.AgentEndpoint)).Info(\"success to create metric pusher and start to push metric\")\n\t}\n\n\treturn meterProvider.Meter(name), nil\n}",
"func New(name errors.Op) *Metric {\n\treturn &Metric{\n\t\tName: name,\n\t}\n}",
"func New(hour, minute int) Clock {\n\t// create a clock with 0 minutes and then add what was passed as arguments\n\treturn Clock{0}.Add((hour * 60) + minute)\n}",
"func New(h, m int) Clock {\n\tminutes := h*60 + m\n\tminutes %= 1440\n\tif minutes < 0 {\n\t\tminutes += 1440\n\t}\n\n\treturn Clock{minutes}\n}",
"func (m *Manager) Meter(delay time.Duration) *Meter {\n\treturn &Meter{\n\t\tm: m,\n\t\tdelay: delay,\n\t\tnext: time.Now(),\n\t}\n}",
"func New() MME {\n\tvar m MME = new(mme)\n\treturn m\n}",
"func New(title string, x, y, width, height int) Device { return newDevice(title, x, y, width, height) }",
"func NewMeasurement(name string) Measurement {\n\tattrs := make(map[string]interface{})\n\treturn Measurement{\n\t\tName: name,\n\t\tAttributes: attrs,\n\t}\n}",
"func NewMeasurement(name string) *Measurement {\n\treturn &Measurement{\n\t\tname: name,\n\t\tfields: make(map[string]*Field),\n\t\tseries: make(map[uint32]*Series),\n\t}\n}",
"func New(\n\tname string,\n\ttags map[string]string,\n\tmeta map[string]string,\n\tfields map[string]interface{},\n\ttm time.Time,\n) (CCMetric, error) {\n\tm := &ccMetric{\n\t\tname: name,\n\t\ttags: make(map[string]string, len(tags)),\n\t\tmeta: make(map[string]string, len(meta)),\n\t\tfields: make(map[string]interface{}, len(fields)),\n\t\ttm: tm,\n\t}\n\n\t// deep copy tags, meta data tags and fields\n\tfor k, v := range tags {\n\t\tm.tags[k] = v\n\t}\n\tfor k, v := range meta {\n\t\tm.meta[k] = v\n\t}\n\tfor k, v := range fields {\n\t\tv := convertField(v)\n\t\tif v == nil {\n\t\t\tcontinue\n\t\t}\n\t\tm.fields[k] = v\n\t}\n\n\treturn m, nil\n}",
"func New(h int, m int) Clock {\n\t// Normalize time.\n\tconst minutesInADay = 24 * 60\n\tminutes := (h*60 + m) % minutesInADay\n\tif minutes < 0 {\n\t\tminutes += minutesInADay\n\t}\n\treturn Clock{\n\t\tminutes: minutes,\n\t}\n}",
"func New(hours, minutes int) Clock {\n\treturn Clock(((hours*MinutesInAnHour+minutes)%MinutesInADay + MinutesInADay) % MinutesInADay)\n}",
"func New(monster int) *Monster {\n\treturn &Monster{\n\t\tid: monster,\n\t\tInfo: monsterData[monster],\n\t\tDisplaced: Empty{},\n\t}\n}",
"func NewPingMeter() (pm *PingMeter) {\n\n\treturn\n}",
"func New(n int) MMR {\n\tpeaks, heights := peaksAndHeights(n)\n\treturn MMR{\n\t\tpeaks: peaks,\n\t\theights: heights,\n\t}\n}",
"func New(h int, m int) Clock {\n\tm, addHours := normalizeMinutes(m)\n\th = normalizeHours(h + addHours)\n\treturn Clock{h, m}\n}",
"func Meter(props *MeterProps, children ...Element) *MeterElem {\n\trProps := &_MeterProps{\n\t\tBasicHTMLElement: newBasicHTMLElement(),\n\t}\n\n\tif props != nil {\n\t\tprops.assign(rProps)\n\t}\n\n\treturn &MeterElem{\n\t\tElement: createElement(\"meter\", rProps, children...),\n\t}\n}",
"func NewMetric(rtype string) Metric {\n\treturn Metric{\n\t\tType: rtype,\n\t\tCurrent: map[string]int{},\n\t\tOwners: map[string]int{},\n\t}\n}",
"func New(hours int, minutes int) Clock {\n\tminutes = hours*minutesPerHour + minutes\n\tminutes %= minutesPerDay\n\tif minutes < 0 {\n\t\tminutes += minutesPerDay\n\t}\n\n\treturn Clock{minutes / minutesPerHour, minutes % minutesPerHour}\n}",
"func NewMeterProvider() *MeterProvider {\n\treturn &MeterProvider{}\n}",
"func New(h, m int) Clock {\n\tm = h*60 + m\n\tm %= 60 * 24\n\tif m < 0 {\n\t\treturn Clock{m + 60*24}\n\t}\n\treturn Clock{m}\n}",
"func NewMeasurement(name string) Measurement {\n\treturn Measurement{\n\t\tname: name,\n\t\ttagSet: map[string]string{},\n\t\tfieldSet: map[string]interface{}{},\n\t\ttimestamp: time.Now(),\n\t}\n}",
"func NewMetrics(component string, sampleRate float64, client metrics.Client) BaseMetrics {\n\treturn BaseMetrics{\n\t\tcomponent: component,\n\t\trate: sampleRate,\n\t\tmetrics: client,\n\t\tmetMap: map[string]string{\n\t\t\t\"latency\": \"comp.\" + component + \".requests.latency\",\n\t\t\t\"request\": \"comp.\" + component + \".requests.%d\",\n\t\t\t\"mLatency\": \"comp.\" + component + \".requests.%s.latency\",\n\t\t\t\"mRequest\": \"comp.\" + component + \".requests.%s.%d\",\n\t\t},\n\t}\n}",
"func New(hour, minute int) Clock {\n\th, m := normalize(hour, minute)\n\treturn Clock{h, m}\n}",
"func (bar *Progress) New(start, total int) {\n\tbar.cur = start\n\tbar.total = total\n\tif bar.graph == \"\" {\n\t\tbar.graph = \"█\"\n\t}\n\tbar.percent = bar.getPercent()\n}",
"func New(m int64, c string) *Money {\n\treturn &Money{m, c}\n}",
"func New() (*T) {\n\n\tme := T{\n\t\tcount: 0,\n\t\tdatum: \"\",\n\t}\n\n\treturn &me\n}",
"func New(hour, minute int) Clock {\n\tminute += hour * 60\n\tminute %= 24 * 60\n\tif minute < 0 {\n\t\tminute += 24 * 60\n\t}\n\treturn Clock{minute}\n}",
"func New(hour, minute int) Clock {\n\thour, minute = normalize(hour, minute)\n\treturn Clock{hour, minute}\n}",
"func NewMeasurement(name string, idx *DatabaseIndex) *Measurement {\n\treturn &Measurement{\n\t\tName: name,\n\t\tfieldNames: make(map[string]struct{}),\n\t\tindex: idx,\n\n\t\tseries: make(map[string]*Series),\n\t\tseriesByID: make(map[uint64]*Series),\n\t\tseriesByTagKeyValue: make(map[string]map[string]seriesIDs),\n\t\tseriesIDs: make(seriesIDs, 0),\n\t}\n}",
"func New(ver, mcls, mtype uint8, params ...*params.Param) *Generic {\n\tg := &Generic{\n\t\tHeader: &Header{\n\t\t\tVersion: ver,\n\t\t\tReserved: 0,\n\t\t\tClass: mcls,\n\t\t\tType: mtype,\n\t\t},\n\t\tParams: params,\n\t}\n\tg.SetLength()\n\n\treturn g\n}",
"func NewProgressMeter(estFiles int, estBytes int64, dryRun bool) *ProgressMeter {\n\tlogger, err := newProgressLogger()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Error creating progress logger: %s\\n\", err)\n\t}\n\n\treturn &ProgressMeter{\n\t\tlogger: logger,\n\t\tstartTime: time.Now(),\n\t\tfileIndex: make(map[string]int64),\n\t\tfinished: make(chan interface{}),\n\t\testimatedFiles: estFiles,\n\t\testimatedBytes: estBytes,\n\t\tdryRun: dryRun,\n\t}\n}",
"func New(hour, minute int) Clock {\n\ttotalMinutes := (hour*60 + minute) % 1440\n\tif totalMinutes < 0 {\n\t\ttotalMinutes = totalMinutes + 1440\n\t}\n\n\tm := totalMinutes % 60\n\th := totalMinutes / 60 % 24\n\n\treturn Clock{hour: h, minute: m}\n}",
"func New() *Metrics {\n\tm := &Metrics{\n\t\tBuildInfo: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: Namespace,\n\t\t\tSubsystem: Subsystem,\n\t\t\tName: \"build_info\",\n\t\t\tHelp: \"Build information\",\n\t\t}, []string{\"version\"}),\n\t}\n\n\t_ = prometheus.Register(m.BuildInfo)\n\t// TODO: implement metrics\n\treturn m\n}",
"func New(config Config, errCh chan<- error) *Librato {\n\tu, _ := url.Parse(apiEndpoint)\n\tu.User = url.UserPassword(config.Email, config.APIKey)\n\tu.Path = \"/v1/metrics\"\n\n\t// determine queue size\n\tqueueSize := 600\n\tif config.QueueSize > 0 {\n\t\tqueueSize = config.QueueSize\n\t}\n\n\t// start the publisher\n\tp := &publisher{\n\t\tmetricsURL: u,\n\t\tqueueSize: queueSize,\n\t\tmeasures: make(chan interface{}, queueSize),\n\t\tshutdown: make(chan chan struct{}),\n\t\terrors: errCh,\n\t}\n\tgo p.run(time.Second * 1)\n\n\treturn &Librato{publisher: p}\n}",
"func NewMeasurement(database, name string) Measurement {\n\tm := Measurement{\n\t\tDatabase: database,\n\t\tName: name,\n\t\tTimestamp: time.Now(),\n\t\tValues: make(map[string]string, 0),\n\t\tTags: make(map[string]string, 0),\n\t}\n\treturn m\n}",
"func (constructor *Constructor) New(resource string, specs *specs.ParameterMap) (codec.Manager, error) {\n\tif specs == nil {\n\t\treturn nil, ErrUndefinedSpecs{}\n\t}\n\n\treturn &Manager{\n\t\tresource: resource,\n\t\tspecs: specs.Property,\n\t}, nil\n}",
"func New(hour, minute int) Clock {\n\tvar c Clock\n\treturn c.Add(hour*60 + minute)\n}",
"func NewMetric(name string, fields []MetricField, tags []MetricTag) (Metric, error) {\n\tif err := ValidateMetricName(name, \"metric\"); err != nil {\n\t\treturn Metric{}, err\n\t}\n\n\tif len(fields) == 0 {\n\t\treturn Metric{}, errors.New(\"one or more metric fields are required\")\n\t}\n\n\tfor _, field := range fields {\n\t\tif err := ValidateMetricName(field.Name, \"field\"); err != nil {\n\t\t\treturn Metric{}, err\n\t\t}\n\t}\n\n\tif len(tags) > 0 {\n\t\tfor _, tag := range tags {\n\t\t\tif err := ValidateMetricName(tag.Name, \"tag\"); err != nil {\n\t\t\t\treturn Metric{}, err\n\t\t\t}\n\t\t}\n\t}\n\n\tmetric := Metric{\n\t\tVersionable: common.NewVersionable(),\n\t\tName: name,\n\t\tFields: fields,\n\t\tTimestamp: time.Now().UnixNano(),\n\t\tTags: tags,\n\t}\n\n\treturn metric, nil\n}",
"func New() *M {\n\tc := &M{}\n\tc.Component()\n\tc.items = make([]*js.Object, 0)\n\treturn c\n}",
"func New(hour, minute int) Clock {\n\treturn Clock(0).Add(hour*60 + minute)\n}",
"func New() Battery {\n\treturn Battery{\n\t\tConf: configuration{\n\t\t\tPrintTemplate: \"{{ .Status }} {{ .Power }} {{ .Time }}\",\n\t\t},\n\t}\n}",
"func New(hour, minute int) Clock {\n\tadjustedHour, adjustedMinute := convertHelper(hour, minute)\n\tc := Clock{adjustedHour, adjustedMinute}\n\treturn c\n}",
"func (x *fastReflection_DenomUnit) New() protoreflect.Message {\n\treturn new(fastReflection_DenomUnit)\n}",
"func New() handler.Handler {\n\treturn &ceilometerMetricHandler{\n\t\tceilo: ceilometer.New(),\n\t}\n}",
"func NewMining(minter sdk.AccAddress, tally int64) Mining {\n\treturn Mining{\n\t\tMinter: minter,\n\t\tLastTime: 0,\n\t\tTally: tally,\n\t}\n}",
"func New(issueser getIssueser, metricser metricser, queries map[string]string) *Monitoring {\n\tlastActiveIssues := make(map[string]map[string]model.Issue)\n\tfor queryName := range queries {\n\t\tlastActiveIssues[queryName] = make(map[string]model.Issue)\n\t}\n\n\treturn &Monitoring{\n\t\tissueser: issueser,\n\t\tmetricser: metricser,\n\t\tlastActiveIssues: lastActiveIssues,\n\t\tqueries: queries,\n\t}\n}",
"func New() *SystemMetrics {\n\treturn &SystemMetrics{}\n}",
"func (m *podMetrics) New() runtime.Object {\n\treturn &metrics.PodMetrics{}\n}",
"func NewIskratelMsan() *IskratelMsan {\r\n var t = &IskratelMsan{}\r\n\r\n return t\r\n}",
"func newMonitor(ui cli.Ui, client *api.Client, length int) *monitor {\n\tmon := &monitor{\n\t\tui: &cli.PrefixedUi{\n\t\t\tInfoPrefix: \"==> \",\n\t\t\tOutputPrefix: \" \",\n\t\t\tErrorPrefix: \"==> \",\n\t\t\tUi: ui,\n\t\t},\n\t\tclient: client,\n\t\tstate: newEvalState(),\n\t\tlength: length,\n\t}\n\treturn mon\n}",
"func New() *Mediator {\n\tconfig := cfg.New()\n\taddress := fmt.Sprintf(\"%s:%s\", config.RPCHost, config.RPCPort)\n\tpool := pools.NewResourcePool(func() (pools.Resource, error) {\n\t\tconn, err := grpc.Dial(address, grpc.WithInsecure())\n\t\tclient := pb.NewDailyBonusClient(conn)\n\t\treturn &ResourceConn{\n\t\t\tconn,\n\t\t\tclient,\n\t\t}, err\n\t}, config.RPCConnectionPool.InitialCapacity, config.RPCConnectionPool.MaxCapacity, config.RPCConnectionPool.IdleTimeout)\n\treturn &Mediator{\n\t\tclientPool: pool,\n\t\tconfig: &config,\n\t\tpoolMutex: &sync.Mutex{},\n\t}\n}",
"func New(label string, units UnitOfMeasurement, value string) *PerfData {\n\tif value != \"\" && !valueCheck.MatchString(value) {\n\t\tpanic(\"invalid value\")\n\t}\n\tr := &PerfData{}\n\tr.Label = label\n\tr.units = units\n\tif value == \"\" {\n\t\tr.value = \"U\"\n\t} else {\n\t\tr.value = value\n\t}\n\treturn r\n}",
"func New() *Metrics {\n\treturn &Metrics{\n\t\tSectionCounts: make(map[string]int),\n\t}\n}",
"func New(r *chi.Mux, log *logging.Logger, m servermetrics.Metrics) *API {\n\tapi := &API{\n\t\tmetrics: m,\n\t\tstartedAt: time.Now(),\n\t\tminuteDecValues: make(map[*dmsg.SessionCommon]uint64),\n\t\tminuteEncValues: make(map[*dmsg.SessionCommon]uint64),\n\t\tsecondDecValues: make(map[*dmsg.SessionCommon]uint64),\n\t\tsecondEncValues: make(map[*dmsg.SessionCommon]uint64),\n\t\trouter: r,\n\t}\n\tr.Use(httputil.SetLoggerMiddleware(log))\n\tr.Get(\"/health\", api.health)\n\treturn api\n}",
"func NewMetric(id string, name string, uri string) *Metric {\n\tthis := Metric{}\n\tthis.Id = id\n\tthis.Name = name\n\tthis.Uri = uri\n\treturn &this\n}",
"func newMetrics() *Metrics {\n\treturn newMetricsFrom(DefaultMetricsOpts)\n}",
"func New(hours, minutes int) Time {\n\th := (hours + minutes/60) % 24\n\tm := minutes % 60\n\n\tfor m < 0 {\n\t\tm += 60\n\t\th--\n\t}\n\n\tfor h < 0 {\n\t\th += 24\n\t}\n\n\treturn Time{\n\t\tminutes: h*60 + m,\n\t}\n}",
"func New(r, c int) M {\n\tvals := make([]Frac, r*c)\n\tfor i := range vals {\n\t\tvals[i] = NewScalarFrac(0)\n\t}\n\n\treturn M{r: r, c: c, values: vals}\n}",
"func Meter(attrs []htmlgo.Attribute, children ...HTML) HTML {\n\treturn &htmlgo.Tree{Tag: \"meter\", Attributes: attrs, Children: children}\n}",
"func newMetrics() *metrics {\n\treturn new(metrics)\n}",
"func New() Info {\n\treturn &hardwareInfo{}\n}",
"func New(tracker tracker.Tracker, opts Opts) Bar {\n\topts.ensureDefaults()\n\tunits := united.UnitsNone\n\tif tracker.ByteAmount() != nil {\n\t\tunits = united.UnitsBytes\n\t}\n\n\tb := &bar{\n\t\ttracker: tracker,\n\t\topts: opts,\n\t\ttheme: state.GetTheme(),\n\t\tunits: units,\n\t\tscale: 1.0,\n\n\t\tfinished: false,\n\t\tfinishChan: make(chan struct{}),\n\t}\n\ttracker.OnFinish(b.finish)\n\tgo b.writer()\n\treturn b\n}",
"func New(fetcherConfig *config.MetainfoFetcherConfig) (fetcher *MetainfoFetcher, err error) {\n\tclientConfig := torrent.Config{}\n\t// Well, it seems this is the right way to convert speed -> rate.Limiter\n\t// https://github.com/anacrolix/torrent/blob/master/cmd/torrent/main.go\n\tif fetcherConfig.UploadRateLimiter != -1 {\n\t\tclientConfig.UploadRateLimiter = rate.NewLimiter(rate.Limit(fetcherConfig.UploadRateLimiter*1024), 256<<10)\n\t}\n\tif fetcherConfig.DownloadRateLimiter != -1 {\n\t\tclientConfig.DownloadRateLimiter = rate.NewLimiter(rate.Limit(fetcherConfig.DownloadRateLimiter*1024), 1<<20)\n\t}\n\n\tclient, err := torrent.NewClient(&clientConfig)\n\n\tfetcher = &MetainfoFetcher{\n\t\ttorrentClient: client,\n\t\tresults: make(chan Result, fetcherConfig.QueueSize),\n\t\tqueueSize: fetcherConfig.QueueSize,\n\t\ttimeout: fetcherConfig.Timeout,\n\t\tmaxDays: fetcherConfig.MaxDays,\n\t\tnewTorrentsOnly: fetcherConfig.FetchNewTorrentsOnly,\n\t\tbaseFailCooldown: fetcherConfig.BaseFailCooldown,\n\t\tmaxFailCooldown: fetcherConfig.MaxFailCooldown,\n\t\tdone: make(chan int, 1),\n\t\tfailedOperations: make(map[uint]time.Time),\n\t\tnumFails: make(map[uint]int),\n\t\twakeUp: time.NewTicker(time.Second * time.Duration(fetcherConfig.WakeUpInterval)),\n\t}\n\n\treturn\n}",
"func New(h int, m int) Clock {\n\tc := (h*60 + m) % minutesInDay\n\n\tfor c < 0 {\n\t\tc += minutesInDay\n\t}\n\n\treturn Clock(c)\n}",
"func New(hour, minute int) Clock {\n\treturn Clock(modulus(hour*60+minute, minutesInDay))\n}",
"func New(configFile string) (Bench, error) {\n\tb := Bench{}\n\tvar err error\n\tif b.config, err = config(configFile); err != nil {\n\t\treturn b, err\n\t}\n\treturn b, nil\n}",
"func NewMetal(albedo Color, roughness float64) Metal {\n\treturn Metal{Albedo: albedo, Rough: roughness}\n}",
"func New() *Manager {\n\treturn &Manager{\n\t\tdevices: make(map[string]Modem),\n\t\thandleAdd: func(m Modem){_ = m},\n\t\thandleRemove: func(m Modem){_ = m},\n\t\thandleUpdate: func(m Modem){_ = m},\n\t}\n}",
"func New(counter metrics.Counter, latency metrics.Histogram, logger log.Logger) Logger {\n\treturn Logger{\n\t\tcallUpdate: make(chan interface{}),\n\t\tcallError: make(chan error),\n\t\trequestCount: counter,\n\t\trequestLatency: latency,\n\t\tlogger: logger,\n\t}\n}",
"func New(base mb.BaseMetricSet) (mb.MetricSet, error) {\n\n\tconfig := struct{}{}\n\n\tif err := base.Module().UnpackConfig(&config); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &MetricSet{\n\t\tBaseMetricSet: base,\n\t}, nil\n}",
"func New(name string) *Module {\n\tm := &Module{\n\t\tbatteryName: name,\n\t\tscheduler: timing.NewScheduler(),\n\t}\n\tl.Label(m, name)\n\tl.Register(m, \"scheduler\", \"format\")\n\tm.format.Set(format{})\n\tm.RefreshInterval(3 * time.Second)\n\t// Construct a simple template that's just the available battery percent.\n\tm.OutputTemplate(outputs.TextTemplate(`BATT {{.RemainingPct}}%`))\n\treturn m\n}",
"func New(hours int, minutes int) Clock {\n\tt := time.Date(0, 0, 0, 0, 0, 0, 0, time.UTC)\n\tt = t.Add(time.Hour * time.Duration(hours))\n\tt = t.Add(time.Minute * time.Duration(minutes))\n\n\treturn Clock{t.Hour(), t.Minute()}\n}",
"func New(t time.Time) *Clock {\n\treturn NewWithStep(t, 0)\n}",
"func New(c Config) (Monitor, error) {\n\tm := &monitor{\n\t\tblankThreshold: blankThreshold,\n\t\ttip: []string{\"\"},\n\t\tpath: c.Path,\n\t\tscanner: c.Scanner,\n\t\tsorter: c.Sorter,\n\t}\n\n\tif err := m.sync(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn m, nil\n}",
"func New(expire, maid int) (*Cache){\n if expire==0 {\n expire = defaultExpiringDuration\n }\n if maid==0 {\n maid = defaultMaidDuration\n }\n\n expireDuration, _ := time.ParseDuration(fmt.Sprintf(\"%dm\", expire))\n maidDuration, _ := time.ParseDuration(fmt.Sprintf(\"%dm\", maid))\n\n //Make sure that no one is calling New at the same time.\n //Lock and Unlock the same mutex and set the old cache as invalid.\n cache.cacheMutex.Lock()\n cache.isValid = false\n cache.cacheMutex.Unlock()\n\n //Create the new cache\n cache = &Cache{\n cache: map[string]value{},\n expire: expireDuration,\n maid: maidDuration,\n isValid: false}\n\n go callMaid(cache)\n\n //Set cache as valid before returning\n cache.isValid = true\n return cache\n}",
"func New(base mb.BaseMetricSet) (mb.MetricSet, error) {\n\tvar config Config\n\n\tif err := base.Module().UnpackConfig(&config); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &MetricSet{\n\t\tBaseMetricSet: base,\n\t\tcfg: config,\n\t}, nil\n}",
"func newmetric(name string, kind metricKind, tags []string, common bool) *metric {\n\treturn &metric{\n\t\tname: name,\n\t\tkind: kind,\n\t\ttags: append([]string{}, tags...),\n\t\tcommon: common,\n\t}\n}",
"func New(bus drivers.I2C) Device {\n\treturn Device{\n\t\tbus: bus,\n\t\tpowerCtl: powerCtl{\n\t\t\tmeasure: 1,\n\t\t},\n\t\tdataFormat: dataFormat{\n\t\t\tsensorRange: RANGE_2G,\n\t\t},\n\t\tbwRate: bwRate{\n\t\t\tlowPower: 1,\n\t\t\trate: RATE_100HZ,\n\t\t},\n\t\tAddress: AddressLow,\n\t}\n}",
"func New(minLat, maxLat, minLong, maxLong float64) Client {\n\treturn Client{\n\t\tminLat: minLat,\n\t\tmaxLat: maxLat,\n\t\tminLong: minLong,\n\t\tmaxLong: maxLong,\n\t}\n}",
"func Metal(index int32) Device {\n return Device{KDLMetal, index}\n}",
"func (s *Service) New(ctx context.Context, params *light.NewParams) (*light.Scan, error) {\n\t_, span := trace.StartSpan(ctx, \"hue.lights.new\")\n\tdefer span.End()\n\n\tctx = context.WithValue(ctx, hue.UserKey{}, params.GetUser())\n\tctx = context.WithValue(ctx, hue.HostKey{}, params.GetHost())\n\n\tres, err := s.hue.NewLights(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tscan, ok := res.(*light.Scan)\n\tif !ok {\n\t\treturn nil, errors.Errorf(\"failed to convert '%T' to *lights.Scan\", res)\n\t}\n\n\treturn scan, nil\n}",
"func New(settings ...Option) *Info {\n\ti := &Info{}\n\ti.Set(settings...)\n\treturn i\n}",
"func New(base mb.BaseMetricSet) (mb.MetricSet, error) {\n\n\tconfig := struct{}{}\n\tif err := base.Module().UnpackConfig(&config); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &MetricSet{\n\t\tBaseMetricSet: base,\n\t\tJmxClient: psoft.GetPsoftJMXClient(),\n\t}, nil\n}",
"func NewSmart(pickupConfidence float64, callConfidence float64,\n aloneConfidence float64,\n pickupRuns int, pickupDeterminizations int,\n callRuns int, callDeterminizations int,\n playRuns int, playDeterminizations int,\n aloneRuns int, aloneDeterminizations int) (*SmartPlayer) {\n\n return &SmartPlayer{\n pickupConfidence,\n callConfidence,\n aloneConfidence,\n pickupRuns,\n pickupDeterminizations,\n callRuns,\n callDeterminizations,\n playRuns,\n playDeterminizations,\n aloneRuns,\n aloneDeterminizations,\n }\n}",
"func NewMetric(name string, prog string, kind Kind, keys ...string) *Metric {\n\tm := &Metric{Name: name, Program: prog, Kind: kind,\n\t\tKeys: make([]string, len(keys), len(keys)),\n\t\tLabelValues: make([]*LabelValue, 0)}\n\tcopy(m.Keys, keys)\n\treturn m\n}",
"func New() *Prober {\n\treturn newForTest(time.Now, newRealTicker)\n}",
"func NewMetric(asset, method string, backend MetricsBackend) Metric {\n\tm := Metric{}\n\n\tm.backend = backend\n\tm.methodName = method\n\tm.startTime = time.Now()\n\tm.asset = asset\n\n\tm.backend.AddMethod(m.asset, method)\n\treturn m\n}"
] | [
"0.6899709",
"0.6802081",
"0.6737444",
"0.66954213",
"0.6548257",
"0.6525749",
"0.6503611",
"0.64407635",
"0.63265437",
"0.612432",
"0.5977774",
"0.59515846",
"0.59335434",
"0.5911891",
"0.5897539",
"0.5890825",
"0.5890561",
"0.58305466",
"0.58248305",
"0.5815958",
"0.5770586",
"0.5769807",
"0.5768072",
"0.57628417",
"0.5751877",
"0.5741924",
"0.57267696",
"0.5711325",
"0.5702508",
"0.5695079",
"0.5694533",
"0.5691486",
"0.56862295",
"0.5681234",
"0.56414366",
"0.56233436",
"0.56108934",
"0.5602679",
"0.55888283",
"0.55871874",
"0.5577002",
"0.5574096",
"0.5560242",
"0.55547506",
"0.55357397",
"0.5534261",
"0.55338657",
"0.5526467",
"0.5522441",
"0.5486933",
"0.5467335",
"0.5460902",
"0.5454856",
"0.54514146",
"0.5448433",
"0.54425776",
"0.5439266",
"0.5431807",
"0.5430335",
"0.5419925",
"0.5405198",
"0.5404749",
"0.53934294",
"0.5386821",
"0.5385955",
"0.53760993",
"0.53674394",
"0.53632045",
"0.5347166",
"0.5338815",
"0.53322494",
"0.53208613",
"0.53118837",
"0.529858",
"0.52953136",
"0.52929896",
"0.5283002",
"0.5278523",
"0.52778476",
"0.527088",
"0.52660936",
"0.5265931",
"0.5257226",
"0.5246451",
"0.5240478",
"0.52385074",
"0.5237661",
"0.5234075",
"0.5232164",
"0.5220792",
"0.52165073",
"0.521261",
"0.52124745",
"0.52095324",
"0.5205043",
"0.5201212",
"0.51975924",
"0.5193682",
"0.51857173",
"0.5179317"
] | 0.71873426 | 0 |
NoteToTime converts a notes.Duration to a time.Duration based on the meter | func (m Meter) NoteToTime(noteVal notes.Duration) time.Duration {
return time.Duration((float64(noteVal/m.BeatValue) / m.BeatsPerMinute) * float64(time.Minute))
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func minutesToDuration(n uint8) Duration {\n\treturn Duration(time.Duration(n) * time.Minute)\n}",
"func (e *Exact) convertToDuration() time.Duration {\n\tif isValidUnitOfTime(e.Unit) {\n\t\treturn convertTimeToDuration(e.Quantity, e.Unit)\n\t}\n\tpanic(\"'unit' is not a valid unit of time\")\n}",
"func (r Rest) Duration(measure time.Duration) time.Duration {\n\tif Duration(r) == None {\n\t\treturn 0\n\t}\n\t//the fraction of the measure the note takes\n\tfraq := 1. / math.Pow(2., float64(r))\n\n\treturn time.Duration(float64(measure) * fraq)\n}",
"func (m Meter) NoteToFreq(noteVal notes.Duration) float64 {\n\tduration := m.NoteToTime(noteVal)\n\treturn 1 / float64(duration.Seconds())\n}",
"func ToDuration(value interface{}) (time.Duration, error) {\n\tvalue = indirect(value)\n\n\tvar s string\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn 0, nil\n\tcase time.Duration:\n\t\treturn v, nil\n\tcase int, int64, int32, int16, int8, uint, uint64, uint32, uint16, uint8, float32, float64:\n\t\ti, _ := ToInt64(value)\n\t\treturn time.Duration(i), nil\n\tcase string:\n\t\ts = v\n\tcase []byte:\n\t\ts = string(v)\n\tcase fmt.Stringer:\n\t\ts = v.String()\n\tdefault:\n\t\treturn 0, fmt.Errorf(\"unable to cast %#v of type %T to Duration\", v, v)\n\t}\n\n\tif strings.ContainsAny(s, \"nsuµmh\") {\n\t\treturn time.ParseDuration(s)\n\t}\n\treturn time.ParseDuration(s + \"ns\")\n}",
"func adjTime(context interface{}, value string) (time.Time, error) {\n\n\t// The default value is in seconds unless overridden.\n\t// #time:0 Current date/time\n\t// #time:-3600 3600 seconds in the past\n\t// #time:3m\t\t3 minutes in the future.\n\n\t// Possible duration types.\n\t// \"ns\": int64(Nanosecond),\n\t// \"us\": int64(Microsecond),\n\t// \"ms\": int64(Millisecond),\n\t// \"s\": int64(Second),\n\t// \"m\": int64(Minute),\n\t// \"h\": int64(Hour),\n\n\t// Do we have a single value?\n\tif len(value) == 1 {\n\t\tval, err := strconv.Atoi(value[0:1])\n\t\tif err != nil {\n\t\t\treturn time.Time{}.UTC(), fmt.Errorf(\"Invalid duration : %q\", value[0:1])\n\t\t}\n\n\t\tif val == 0 {\n\t\t\treturn time.Now().UTC(), nil\n\t\t}\n\n\t\treturn time.Now().Add(time.Duration(val) * time.Second).UTC(), nil\n\t}\n\n\t// Do we have a duration type and where does the\n\t// actual duration value end\n\tvar typ string\n\tvar end int\n\n\t// The end byte position for the last character in the string.\n\tePos := len(value) - 1\n\n\t// Look at the very last character.\n\tt := value[ePos:]\n\tswitch t {\n\n\t// Is this a minute or hour? [3m]\n\tcase \"m\", \"h\":\n\t\ttyp = t\n\t\tend = ePos // Position of last chr in value.\n\n\t// Is this a second or other duration? [3s or 3us]\n\tcase \"s\":\n\t\ttyp = t // s for 3s\n\t\tend = ePos // 3 for 3s\n\n\t\t// Is this smaller than a second? [ns, us, ms]\n\t\tif len(value) > 2 {\n\t\t\tt := value[ePos-1 : ePos]\n\t\t\tswitch t {\n\t\t\tcase \"n\", \"u\", \"m\":\n\t\t\t\ttyp = value[ePos-1:] // us for 3us\n\t\t\t\tend = ePos - 1 // 3 for 3us\n\t\t\t}\n\t\t}\n\n\tdefault:\n\t\ttyp = \"s\" // s for 3600\n\t\tend = ePos + 1 // 0 for 3600\n\t}\n\n\t// Check if we are to negative the value.\n\tvar start int\n\tif value[0] == '-' {\n\t\tstart = 1\n\t}\n\n\t// Check the remaining bytes is an integer value.\n\tval, err := strconv.Atoi(value[start:end])\n\tif err != nil {\n\t\treturn time.Time{}.UTC(), fmt.Errorf(\"Invalid duration : %q\", value[start:end])\n\t}\n\n\t// Do we have to negate the value?\n\tif start == 1 {\n\t\tval *= -1\n\t}\n\n\t// Calcuate the time value.\n\tswitch typ {\n\tcase \"ns\":\n\t\treturn time.Now().Add(time.Duration(val) * time.Nanosecond).UTC(), nil\n\tcase \"us\":\n\t\treturn time.Now().Add(time.Duration(val) * time.Microsecond).UTC(), nil\n\tcase \"ms\":\n\t\treturn time.Now().Add(time.Duration(val) * time.Millisecond).UTC(), nil\n\tcase \"m\":\n\t\treturn time.Now().Add(time.Duration(val) * time.Minute).UTC(), nil\n\tcase \"h\":\n\t\treturn time.Now().Add(time.Duration(val) * time.Hour).UTC(), nil\n\tdefault:\n\t\treturn time.Now().Add(time.Duration(val) * time.Second).UTC(), nil\n\t}\n}",
"func convertTime(time uint64, stream_uot, target_uot UnitOfTime) uint64 {\n\tunitmultiplier := map[UnitOfTime]uint64{\n\t\tUOT_NS: 1000000000,\n\t\tUOT_US: 1000000,\n\t\tUOT_MS: 1000,\n\t\tUOT_S: 1}\n\treturn time / unitmultiplier[stream_uot] * unitmultiplier[target_uot]\n}",
"func (pomo *Pomo) GetDuration() string {\n\n\t// if pomo is off do not output anything\n\tif pomo.Status == OFF {\n\t\treturn \"\"\n\t}\n\n\t// if pomo run out of time that was set\n\t// make a blinking animation and send ntification\n\tif pomo.Time < 0 {\n\n\t\t// if user not notified\n\t\tif !pomo.Notified {\n\n\t\t\t// notify the user\n\t\t\tgo notifyUser(NOTIFICATION_MESSAGE)\n\n\t\t\tpomo.Notified = true\n\t\t}\n\n\t\t// emoji_id is a number between 0 and 1\n\t\temoji_id := (pomo.Time.Milliseconds() / 1000 % 2) * (-1)\n\n\t\treturn fmt.Sprintf(\"%s%s\\n\", pomo.Blink[emoji_id], pomo.Time)\n\t}\n\n\treturn fmt.Sprintf(\"%s%s\\n\", pomo.Emoji, pomo.Time)\n}",
"func (e PrecisionTiming) durationToMs(x time.Duration) float64 {\n\treturn float64(x) / float64(time.Millisecond)\n}",
"func (q MetricTicks) Duration(tempoBPM uint32, deltaTicks uint32) time.Duration {\n\tif q == 0 {\n\t\tq = defaultMetric\n\t}\n\t// (60000 / T) * (d / R) = D[ms]\n\t//\tdurQnMilli := 60000 / float64(tempoBPM)\n\t//\t_4thticks := float64(deltaTicks) / float64(uint16(q))\n\tres := 60000000000 * float64(deltaTicks) / (float64(tempoBPM) * float64(uint16(q)))\n\t//fmt.Printf(\"what: %vns\\n\", res)\n\treturn time.Duration(int64(math.Round(res)))\n\t//\treturn time.Duration(roundFloat(durQnMilli*_4thticks, 0)) * time.Millisecond\n}",
"func msToDuration(ms int64) time.Duration {\n\treturn time.Duration(ms * int64(time.Millisecond))\n}",
"func durToMsec(dur time.Duration) string {\n\treturn fmt.Sprintf(\"%dms\", dur/time.Millisecond)\n}",
"func (d *Delay) TimeDuration() time.Duration {\n\treturn time.Duration(d.Duration*1000) * time.Millisecond\n}",
"func (n *Note) measure() {\n\tvar samples int\n\tlength := wholeNote + (wholeNote / 100 * 4 * (4 - n.tempo)) // 4% per tempo unit\n\tswitch n.duration {\n\tcase 'W':\n\t\tsamples = length\n\tcase 'H':\n\t\tsamples = length / 2\n\tcase 'Q':\n\t\tsamples = length / 4\n\tcase 'E':\n\t\tsamples = length / 8\n\tcase 'S':\n\t\tsamples = length / 16\n\tcase 'T':\n\t\tsamples = length / 32\n\tcase 'I':\n\t\tsamples = length / 64\n\t}\n\n\tif samples > 0 {\n\t\t// Apply dot measure\n\t\tif n.dotted {\n\t\t\tsamples += samples / 2\n\t\t}\n\t}\n\n\tn.samples = samples\n}",
"func (d Duration) TimeDuration() time.Duration {\n\treturn time.Duration(int64(d) / Millisecond * int64(time.Millisecond))\n}",
"func ToDuration(i interface{}) (d time.Duration, err error) {\n\ti = indirect(i)\n\n\tswitch s := i.(type) {\n\tcase time.Duration:\n\t\treturn s, nil\n\tcase int64:\n\t\td = time.Duration(s)\n\t\treturn\n\tcase float64:\n\t\td = time.Duration(s)\n\t\treturn\n\tcase string:\n\t\td, err = time.ParseDuration(s)\n\t\treturn\n\tdefault:\n\t\terr = fmt.Errorf(\"unable to cast %#v to Duration\\n\", i)\n\t\treturn\n\t}\n}",
"func ToTime(t uint64) time.Time {\n\tseconds := (t & 0xFFFFFFFF00000000) >> 32\n\tfractional := float64(t&0x00000000FFFFFFFF) / float64(0xFFFFFFFF)\n\td := time.Duration(seconds)*time.Second + time.Duration(fractional*1e9)*time.Nanosecond\n\n\treturn time.Unix(0, 0).Add(-2208988800 * time.Second).Add(d)\n}",
"func (ts Timespec) ToDuration() time.Duration {\n\treturn time.Duration(ts.ToNsecCapped())\n}",
"func toMilliseconds(duration time.Duration) float64 {\n\tif duration < time.Microsecond*10 {\n\t\treturn 0\n\t}\n\n\tms := float64(duration) / float64(time.Millisecond)\n\t// Round time to 0.02 precision\n\treturn math.Round(ms*100) / 100\n}",
"func DurationToTimeMillisField(duration time.Duration) zapcore.Field {\n\treturn zap.Float32(\"grpc.time_ms\", durationToMilliseconds(duration))\n}",
"func msToTime(t int64) time.Time {\n\treturn time.Unix(t/int64(1000), (t%int64(1000))*int64(1000000))\n}",
"func secondsToDuration(seconds float64) time.Duration {\n\tttl := seconds * float64(time.Second)\n\treturn time.Duration(ttl)\n}",
"func secondsToDuration(seconds float64) time.Duration {\n\tttl := seconds * float64(time.Second)\n\treturn time.Duration(ttl)\n}",
"func convertMillToTime(originalTime int64) time.Time {\n\ti := time.Unix(0, originalTime*int64(time.Millisecond))\n\treturn i\n}",
"func (gdb *Gdb) getTimeDuration(duration int) int64 {\n\treturn time.Now().Add(time.Duration(duration)*time.Second).Unix() + 8*3600\n}",
"func timeToPtr(t time.Duration) *time.Duration {\n\treturn &t\n}",
"func tempoToPulseInterval(t Bpm) time.Duration {\n\n\treturn time.Duration((uSecInMin/(t/10.00))/ppqn) * time.Microsecond\n}",
"func NoteAtTime(t, sr int, note Note) float64 {\n\tsum := 0.0\n\tmultiplier := (2.0 * math.Pi) / float64(sr)\n\tfor i := 0; i < len(note.Frequency); i++ {\n\t\tsum += math.Sin((multiplier * (note.Frequency[i] * note.Octave)) * float64(t))\n\t}\n\treturn sum\n}",
"func ToDurationE(i interface{}) (time.Duration, error) {\n\treturn cast.ToDurationE(i)\n}",
"func ToDuration(i interface{}) time.Duration {\n\treturn cast.ToDuration(i)\n}",
"func (r *Range) convertToDuration() time.Duration {\n\tvar pick int64\n\tif r.High >= r.Low {\n\t\tpick = rand.Int63n(r.High+1-r.Low) + r.Low\n\t\tif isValidUnitOfTime(r.Unit) {\n\t\t\treturn convertTimeToDuration(pick, r.Unit)\n\t\t}\n\t\tpanic(\"'unit' is not a valid unit of time\")\n\t}\n\tpanic(\"'high' cannot be less than 'low'\")\n}",
"func (d *Duration) GetTimeDuration() time.Duration {\n\treturn time.Duration(d.Year * yToNano + d.Month * monthToNano +\n\t\td.Day * dToNano + d.Hour * hToNano + d.Minute * mToNano + \n\t\td.Second * sToNano)\n}",
"func (i *InputInlineQueryResultVoiceNote) GetVoiceNoteDuration() (value int32) {\n\tif i == nil {\n\t\treturn\n\t}\n\treturn i.VoiceNoteDuration\n}",
"func MicroSecToDuration(msec int) time.Duration {\n\treturn time.Duration(msec) * time.Microsecond\n}",
"func getDuration(seconds int) time.Duration {\n\treturn time.Duration(seconds) * time.Second\n}",
"func (tv Timeval) ToDuration() time.Duration {\n\treturn time.Duration(tv.ToNsecCapped())\n}",
"func (e2 *PicoSecondTimeStamp) Duration(e1 *PicoSecondTimeStamp) *PicoSecondDuration {\n\tresult := &PicoSecondDuration{\n\t\tEpoch: int32(e2.Epoch - e1.Epoch),\n\t\tPicoSeconds: int64(e2.PicoSeconds - e1.PicoSeconds),\n\t}\n\n\tif result.PicoSeconds < 0 && result.Epoch > 0 {\n\t\tresult.Epoch = result.Epoch - 1\n\t\tresult.PicoSeconds = result.PicoSeconds + 1000000000000\n\t}\n\treturn result\n}",
"func (p *PodStatusInformation) ConvertTime(tlocal *time.Location) {\n\n\tp.FinishedAt = p.FinishedAt.In(tlocal)\n\tp.StartedAt = p.StartedAt.In(tlocal)\n\n}",
"func hoursToDuration(n uint8) Duration {\n\treturn Duration(time.Duration(n) * time.Hour)\n}",
"func (t timeFlag) Duration() time.Duration {\n\treturn time.Duration(t)\n}",
"func durtoTV(d time.Duration) (int64, int64) {\n\tsec := int64(d / nanoPerSec)\n\tmicro := int64((int64(d) - sec*nanoPerSec) / 1000)\n\n\treturn sec, micro\n}",
"func timeToMillis(t time.Time) float64 {\n\treturn float64(t.UnixNano() / 1000000)\n}",
"func (decoder *Decoder) ByteOffsetToDur(offset int32) time.Duration {\n\treturn time.Duration(offset/decoder.byteRate) * time.Second\n}",
"func durationHook(from, to reflect.Type, data reflect.Value) (reflect.Value, error) {\n\tif from.Kind() != reflect.String || to != _typeOfDuration {\n\t\treturn data, nil\n\t}\n\n\td, err := time.ParseDuration(data.String())\n\treturn reflect.ValueOf(d), err\n}",
"func halfHoursToDuration(n uint8) Duration {\n\treturn Duration(time.Duration(n) * 30 * time.Minute)\n}",
"func (m *RedundantAssignmentAlertConfiguration) GetDuration()(*i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ISODuration) {\n val, err := m.GetBackingStore().Get(\"duration\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ISODuration)\n }\n return nil\n}",
"func (t ntpTime) Duration() time.Duration {\n\tsec := (t >> 32) * nanoPerSec\n\tfrac := (t & 0xffffffff) * nanoPerSec >> 32\n\treturn time.Duration(sec + frac)\n}",
"func (t *Track) Duration() float64 {\n\treturn float64(t.duration) / float64(t.globalTimescale)\n}",
"func (d Duration) TimeDuration() time.Duration {\n\tdl := ast.DurationLiteral(d)\n\tdd, _ := ast.DurationFrom(&dl, time.Time{})\n\treturn dd\n}",
"func (d Duration) TimeDuration() time.Duration {\n\tdl := ast.DurationLiteral(d)\n\tdd, _ := ast.DurationFrom(&dl, time.Time{})\n\treturn dd\n}",
"func durationFromMvhdAtom(mvhdStart int64, mvhdLength int64, file *os.File) (int, error) {\n\tbuffer := make([]byte, 8)\n\t_, err := file.ReadAt(buffer, mvhdStart+20) // The timescale field starts at the 21st byte of the mvhd atom\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t// The timescale is bytes 21-24.\n\t// The duration is bytes 25-28\n\ttimescale := convertBytesToInt(buffer[0:4]) // This is in number of units per second\n\tdurationInTimeScale := convertBytesToInt(buffer[4:])\n\treturn int(durationInTimeScale) / int(timescale), nil\n}",
"func Duration(i interface{}) time.Duration {\n\t// It's already this type.\n\tif v, ok := i.(time.Duration); ok {\n\t\treturn v\n\t}\n\ts := String(i)\n\tif !utils.IsNumeric(s) {\n\t\td, _ := time.ParseDuration(s)\n\t\treturn d\n\t}\n\treturn time.Duration(Int64(i))\n}",
"func ClockTimeDuration(a ClockTime, b ClockTime) ClockTime {\n\ta_I := ClockTimetoI(a)\n\tb_I := ClockTimetoI(b)\n\tduration := a_I - b_I // if a < b then we assume that there's one day difference\n\tif duration < 0 {\n\t\treturn ItoClockTime(1440 - b_I + a_I)\n\t}\n\treturn ItoClockTime(duration)\n}",
"func (d *Duration) Time() (time.Duration, error) {\n\tp, err := period.Parse(string(*d))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn p.DurationApprox(), nil\n}",
"func (t Tie) Duration(measure time.Duration) time.Duration {\n\tout := time.Duration(0)\n\tfor _, p := range t {\n\t\tout += p.Duration(measure)\n\t}\n\treturn out\n}",
"func (t ntpTimeShort) Duration() time.Duration {\n\tsec := uint64(t>>16) * nanoPerSec\n\tfrac := uint64(t&0xffff) * nanoPerSec\n\tnsec := frac >> 16\n\tif uint16(frac) >= 0x8000 {\n\t\tnsec++\n\t}\n\treturn time.Duration(sec + nsec)\n}",
"func (t *timer) Measure() time.Duration {\n\treturn t.stopTime.Sub(t.startTime)\n}",
"func DurationToTimespec(dur time.Duration) Timespec {\n\treturn NsecToTimespec(dur.Nanoseconds())\n}",
"func ToFloat(d time.Duration) (seconds float64) {\n\treturn float64(d) / float64(time.Second)\n}",
"func msToTime(ms int64) time.Time {\n\treturn time.Unix(0, ms*int64(time.Millisecond))\n}",
"func (t HighresTimestamp) Duration() time.Duration {\n\treturn time.Duration(uint64(t) * uint64(tbinfo.numer) / uint64(tbinfo.denom)))\n}",
"func ReadTime() time.Duration {\n\tif drvDMA.timerMemory == nil {\n\t\treturn 0\n\t}\n\tv := uint64(drvDMA.timerMemory.counterHigh)<<32 | uint64(drvDMA.timerMemory.counterLow)\n\tif v == 0 {\n\t\t// BUG(maruel): Implement using AVS_CNT0_REG on A64.\n\t\treturn 0\n\t}\n\t// BUG(maruel): Assumes that counterCtrl & timerPLL6 is not set.\n\tconst tick = time.Microsecond / 24\n\treturn time.Duration(v) * tick\n}",
"func (t ntpTime) Duration() time.Duration {\n\tsec := (t >> 32) * nanoPerSec\n\tfrac := (t & 0xffffffff) * nanoPerSec\n\tnsec := frac >> 32\n\tif uint32(frac) >= 0x80000000 {\n\t\tnsec++\n\t}\n\treturn time.Duration(sec + nsec)\n}",
"func getNoteTicker(secondsPerNote float64) *time.Ticker {\n\tnoteDurationString := fmt.Sprintf(\"%.5f\", secondsPerNote) + \"s\"\n\tnoteDuration, _ := time.ParseDuration(noteDurationString)\n\n\treturn time.NewTicker(noteDuration)\n}",
"func ToMillis(t time.Time) int64 {\n\treturn t.UnixNano() / 1e6\n}",
"func calculateDuration(r *record) time.Duration {\n\tdateFormat := \"2006-01-0215:04:05\"\n\n\tstart, err := time.Parse(dateFormat, r.date+r.startTime)\n\tcheckErr(err)\n\n\tend, err := time.Parse(dateFormat, r.date+r.endTime)\n\tcheckErr(err)\n\n\tpause, err := time.ParseDuration(r.pause)\n\tcheckErr(err)\n\n\treturn end.Sub(start) - pause\n}",
"func (this *StrToMillis) Type() value.Type { return value.NUMBER }",
"func (item Item) GetDuration(name string) time.Duration {\n\tduration, _ := time.ParseDuration(\"0h0m0s\")\n\n\tswitch item[name].(type) {\n\tcase time.Duration:\n\t\tduration = item[name].(time.Duration)\n\tcase string:\n\t\tvar matched bool\n\t\tvar re *regexp.Regexp\n\t\tvalue := item[name].(string)\n\n\t\tmatched, _ = regexp.MatchString(`^\\d{2}:\\d{2}:\\d{2}$`, value)\n\n\t\tif matched {\n\t\t\tre, _ = regexp.Compile(`^(\\d{2}):(\\d{2}):(\\d{2})$`)\n\t\t\tall := re.FindAllStringSubmatch(value, -1)\n\n\t\t\tformatted := fmt.Sprintf(\"%sh%sm%ss\", all[0][1], all[0][2], all[0][3])\n\t\t\tduration, _ = time.ParseDuration(formatted)\n\t\t}\n\t}\n\treturn duration\n}",
"func MakeDuration(target string, def int) time.Duration {\n\tif !elapso.MatchString(target) {\n\t\treturn time.Duration(def)\n\t}\n\n\tmatchs := elapso.FindAllStringSubmatch(target, -1)\n\n\tif len(matchs) <= 0 {\n\t\treturn time.Duration(def)\n\t}\n\n\tmatch := matchs[0]\n\n\tif len(match) < 3 {\n\t\treturn time.Duration(def)\n\t}\n\n\tdur := time.Duration(ConvertToInt(match[1], def))\n\n\tmtype := match[2]\n\n\tswitch mtype {\n\tcase \"s\":\n\t\treturn dur * time.Second\n\tcase \"mcs\":\n\t\treturn dur * time.Microsecond\n\tcase \"ns\":\n\t\treturn dur * time.Nanosecond\n\tcase \"ms\":\n\t\treturn dur * time.Millisecond\n\tcase \"m\":\n\t\treturn dur * time.Minute\n\tcase \"h\":\n\t\treturn dur * time.Hour\n\tdefault:\n\t\treturn time.Duration(dur) * time.Second\n\t}\n}",
"func FootToMeters(f Foot) Meter { return Meter(f * 3) }",
"func timeFromJournalInt(t int64) time.Time {\n\tsecs := t / 1000000\n\tms := t % 1000000\n\treturn time.Unix(secs, ms).UTC()\n}",
"func Ms(duration time.Duration) float64 {\n\treturn float64(duration / time.Millisecond)\n}",
"func (t *Time) Diff(from Time) Duration {\n\tsec, nsec := normalizeTemporal(int64(t.Sec)-int64(from.Sec),\n\t\tint64(t.NSec)-int64(from.NSec))\n\treturn Duration{temporal{sec, nsec}}\n}",
"func (tv Timeval) ToTime() time.Time {\n\treturn time.Unix(tv.Sec, tv.Usec*1e3)\n}",
"func durationToMilliseconds(d time.Duration) (uint64, error) {\n\tif d < 0 {\n\t\treturn 0, fmt.Errorf(\"report period cannot be negative: %v\", d)\n\t}\n\n\treturn uint64(d / time.Millisecond), nil\n}",
"func (s Broker) TimingDuration(name string, duration time.Duration) {\n\ttimeMillis := int(duration.Nanoseconds() / 1000000)\n\ts.Timing(name, timeMillis)\n}",
"func (d *Duration) UnmarshalTOML(b []byte) error {\n\tvar err error\n\tb = bytes.Trim(b, `'`)\n\n\t// see if we can directly convert it\n\td.Duration, err = time.ParseDuration(string(b))\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\t// Parse string duration, ie, \"1s\"\n\tif uq, err := strconv.Unquote(string(b)); err == nil && len(uq) > 0 {\n\t\td.Duration, err = time.ParseDuration(uq)\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// First try parsing as integer seconds\n\tsI, err := strconv.ParseInt(string(b), 10, 64)\n\tif err == nil {\n\t\td.Duration = time.Second * time.Duration(sI)\n\t\treturn nil\n\t}\n\t// Second try parsing as float seconds\n\tsF, err := strconv.ParseFloat(string(b), 64)\n\tif err == nil {\n\t\td.Duration = time.Second * time.Duration(sF)\n\t\treturn nil\n\t}\n\n\treturn nil\n}",
"func durationToWord(in Interval) string {\n\tswitch in {\n\tcase FifteenSecond:\n\t\treturn \"fifteensecond\"\n\tcase OneMin:\n\t\treturn \"onemin\"\n\tcase ThreeMin:\n\t\treturn \"threemin\"\n\tcase FiveMin:\n\t\treturn \"fivemin\"\n\tcase TenMin:\n\t\treturn \"tenmin\"\n\tcase FifteenMin:\n\t\treturn \"fifteenmin\"\n\tcase ThirtyMin:\n\t\treturn \"thirtymin\"\n\tcase OneHour:\n\t\treturn \"onehour\"\n\tcase TwoHour:\n\t\treturn \"twohour\"\n\tcase FourHour:\n\t\treturn \"fourhour\"\n\tcase SixHour:\n\t\treturn \"sixhour\"\n\tcase EightHour:\n\t\treturn \"eighthour\"\n\tcase TwelveHour:\n\t\treturn \"twelvehour\"\n\tcase OneDay:\n\t\treturn \"oneday\"\n\tcase ThreeDay:\n\t\treturn \"threeday\"\n\tcase FifteenDay:\n\t\treturn \"fifteenday\"\n\tcase OneWeek:\n\t\treturn \"oneweek\"\n\tcase TwoWeek:\n\t\treturn \"twoweek\"\n\tcase OneMonth:\n\t\treturn \"onemonth\"\n\tcase OneYear:\n\t\treturn \"oneyear\"\n\tdefault:\n\t\treturn \"notfound\"\n\t}\n}",
"func (tj *TensorFlowJob) Duration() time.Duration {\n\tjob := tj.tfjob\n\n\tif job.Status.StartTime == nil ||\n\t\tjob.Status.StartTime.IsZero() {\n\t\treturn 0\n\t}\n\n\tif !job.Status.CompletionTime.IsZero() {\n\t\treturn job.Status.CompletionTime.Time.Sub(job.Status.StartTime.Time)\n\t}\n\n\tif tj.GetStatus() == \"FAILED\" {\n\t\tcond := getPodLatestCondition(tj.chiefPod)\n\t\tif !cond.LastTransitionTime.IsZero() {\n\t\t\treturn cond.LastTransitionTime.Time.Sub(job.Status.StartTime.Time)\n\t\t} else {\n\t\t\tlog.Debugf(\"the latest condition's time is zero of pod %s\", tj.chiefPod.Name)\n\t\t}\n\t}\n\n\treturn metav1.Now().Sub(job.Status.StartTime.Time)\n}",
"func ToTimeE(i interface{}) (time.Time, error) {\n\treturn cast.ToTimeE(i)\n}",
"func SecondsToTime(n int64) Time {\n\treturn Time(n * 1e6)\n}",
"func DurationToTimeval(dur time.Duration) Timeval {\n\treturn NsecToTimeval(dur.Nanoseconds())\n}",
"func convertToSeconds(hours, minutes, seconds, microseconds string) {\n\thoursInSeconds, _ := strconv.Atoi(hours)\n\tminutesInSeconds, _ := strconv.Atoi(minutes)\n\tformattedSeconds, _ := strconv.Atoi(seconds)\n\tformattedSeconds = formattedSeconds + (hoursInSeconds * 3600) + (minutesInSeconds * 60)\n\n\tvar buffer bytes.Buffer\n\n\tbuffer.WriteString(strconv.Itoa(formattedSeconds))\n\tbuffer.WriteString(\".\")\n\tbuffer.WriteString(microseconds)\n\n\tfmt.Println(\"BarDuration: \" + buffer.String())\n}",
"func stampToTime(quadPart C.LONGLONG) time.Time {\n\tft := windows.Filetime{\n\t\tHighDateTime: uint32(quadPart >> 32),\n\t\tLowDateTime: uint32(quadPart & math.MaxUint32),\n\t}\n\treturn time.Unix(0, ft.Nanoseconds())\n}",
"func toTimeSeconds(value string) (int64, error) {\n\t//is serial format?\n\tserial, err := strconv.ParseFloat(value, 64)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\treturn int64(serial * 86400), nil\n}",
"func timeDiff(t1 time.Time, t2 time.Time, duration int) string {\n\tif t1.After(t2) {\n\t\ttimeList := diff(t1, t2)\n\t\treturn timeDiffHelper(timeList) + \"AGO\"\n\t}\n\n\ttimeList := diff(t2, t1)\n\tif duration == 1 {\n\t\treturn timeDiffHelper(timeList)\n\t}\n\treturn \"IN \" + timeDiffHelper(timeList)\n\n}",
"func (d ISODuration) ToDuration() (time.Duration, error) {\r\n\treturn d.duration.ToDuration()\r\n}",
"func (br *BandwidthMeter) Duration() (duration time.Duration) {\n duration = br.lastRead.Sub(br.start)\n return\n}",
"func to_ms(nano int64) int64 {\n\treturn nano / int64(time.Millisecond)\n}",
"func (ts Timespec) ToTime() time.Time {\n\treturn time.Unix(ts.Sec, ts.Nsec)\n}",
"func prettyDuration(t int64) string {\n\tif t > 1000000000 {\n\t\treturn fmt.Sprintf(\"%.2fs\", float64(t)/float64(1000000000))\n\t}\n\treturn fmt.Sprintf(\"%.2fms\", float64(t)/float64(1000000))\n}",
"func parseIntoDuration(str string) (time.Duration, error) {\n\tvar d time.Duration\n\t/**\n\t * important! When editing this regex, make sure that you specify the \"or\"s as\n\t * whole -> subset instead of subset -> whole, that is \"second|sec|s\" instead of\n\t * \"s|sec|second\". Otherwise, you will find yourself matching \"s\", but with a tailing\n\t * \"econd\"\n\t**/\n\tre := regexp.MustCompile(\"([-+][0-9]+)(hour|hr|h|minute|min|m|second|sec|s|days|day|d)\")\n\tres := re.FindAllStringSubmatch(str, -1)\n\tif len(res) != 1 {\n\t\treturn d, errors.New(\"Invalid timespec: \" + str)\n\t}\n\n\t// handle amount\n\ti, err := strconv.ParseInt(res[0][1], 10, 64)\n\tif err != nil {\n\t\treturn d, err\n\t}\n\td = time.Duration(i)\n\n\t// handle units\n\tswitch res[0][2] {\n\tcase \"h\", \"hr\", \"hour\":\n\t\td *= time.Hour\n\tcase \"m\", \"min\", \"minute\":\n\t\td *= time.Minute\n\tcase \"s\", \"sec\", \"second\":\n\t\td *= time.Second\n\tcase \"d\", \"days\", \"day\":\n\t\td *= 24 * time.Hour\n\tdefault:\n\t\treturn d, errors.New(\"Timespec needs valid units:\" + str)\n\t}\n\n\treturn d, nil\n}",
"func (d Duration) StringUsingUnits(unit units.Unit) string {\n\treturn d.convert(units.Second, unit).toString()\n}",
"func (note Note) AtTime(t, sr int) float64 {\n\treturn NoteAtTime(t, sr, note)\n}",
"func DurationInWords(d time.Duration) string {\n\n\tif d >= time.Second && d <= (time.Second*4) {\n\t\treturn fmt.Sprintf(lssthnd, 5, \"seconds\")\n\t} else if d >= (time.Second*5) && d < (time.Second*10) {\n\t\treturn fmt.Sprintf(lssthnd, 10, \"seconds\")\n\t} else if d >= (time.Second*10) && d < (time.Second*20) {\n\t\treturn fmt.Sprintf(lssthnd, 20, \"seconds\")\n\t} else if d >= (time.Second*20) && d < (time.Second*40) {\n\t\treturn \"half a minute\"\n\t} else if d >= (time.Second*40) && d < (time.Second*60) {\n\t\treturn fmt.Sprintf(lssthns, \"minute\")\n\t} else if d >= (time.Second*60) && d < time.Minute+(time.Second*30) {\n\t\treturn \"1 minute\"\n\t} else if d >= time.Minute+(time.Second*30) && d < (time.Minute*44)+(time.Second*30) {\n\t\treturn fmt.Sprintf(\"%d minutes\", (d / time.Minute))\n\t} else if d >= (time.Minute*44)+(time.Second*30) && d < (time.Minute*89)+(time.Second*30) {\n\t\treturn fmt.Sprintf(aboutnd, d/time.Hour, \"hour\")\n\t} else if d >= (time.Minute*89)+(time.Second*30) && d < (time.Hour*29)+(time.Minute*59)+(time.Second*30) {\n\t\treturn fmt.Sprintf(aboutnd, (d / time.Hour), \"hours\")\n\t} else if d >= (time.Hour*23)+(time.Minute*59)+(time.Second*30) && d < (time.Hour*41)+(time.Minute*59)+(time.Second*30) {\n\t\treturn \"1 day\"\n\t} else if d >= (time.Hour*41)+(time.Minute*59)+(time.Second*30) && d < (day*29)+(time.Hour*23)+(time.Minute*59)+(time.Second*30) {\n\t\treturn fmt.Sprintf(\"%d days\", d/(time.Hour*24))\n\t} else if d >= (day*29)+(time.Hour*23)+(time.Minute*59)+(time.Second*30) && d < (day*59)+(time.Hour*23)+(time.Minute*59)+(time.Second*30) {\n\t\treturn fmt.Sprintf(aboutnd, 1, \"month\")\n\t} else if d >= (day*59)+(time.Hour*23)+(time.Minute*59)+(time.Second*30) && d < (year) {\n\t\treturn fmt.Sprintf(aboutnd, d/month+1, \"months\")\n\t} else if d >= year && d < year+(3*month) {\n\t\treturn fmt.Sprintf(aboutnd, 1, \"year\")\n\t} else if d >= year+(3*month) && d < year+(9*month) {\n\t\treturn \"over 1 year\"\n\t} else if d >= year+(9*month) && d < (year*2) {\n\t\treturn \"almost 2 years\"\n\t} else {\n\t\treturn fmt.Sprintf(aboutnd, d/year, \"years\")\n\t}\n}",
"func (mes *MarkerEncodingScheme) TimeUnit() Marker { return mes.timeUnit }",
"func GetSignalTime(timeUnit int32, refDate time.Time) time.Time {\n\tvar t time.Time\n\tswitch timeUnit {\n\tcase SignalTimeUnit_NOW:\n\t\t{\n\t\t\treturn refDate.UTC().Truncate(time.Hour * 24)\n\t\t}\n\tcase SignalTimeUnit_MONTH:\n\t\t{\n\t\t\tt = refDate.UTC().AddDate(0, 0, -30)\n\t\t}\n\tcase SignalTimeUnit_BIMONTH:\n\t\t{\n\t\t\tt = refDate.UTC().AddDate(0, 0, -60)\n\t\t}\n\tcase SignalTimeUnit_QUARTER:\n\t\t{\n\t\t\tt = refDate.UTC().AddDate(0, 0, -90)\n\t\t}\n\tcase SignalTimeUnit_HALFYEAR:\n\t\t{\n\t\t\tt = refDate.UTC().AddDate(0, 0, -180)\n\t\t}\n\tcase SignalTimeUnit_THIRDQUARTER:\n\t\t{\n\t\t\tt = refDate.UTC().AddDate(0, 0, -270)\n\t\t}\n\tcase SignalTimeUnit_YEAR:\n\t\t{\n\t\t\tt = refDate.UTC().AddDate(0, 0, -365)\n\t\t}\n\t}\n\n\treturn t.Truncate(time.Hour * 24)\n}",
"func (r Rest) TickDuration(quarter uint16) uint16 {\n\tif Duration(r) == None {\n\t\treturn 0\n\t}\n\t//the fraction of the measure the note takes\n\tfraq := 1. / math.Pow(2., float64(r))\n\n\treturn uint16(float64(4*quarter) * fraq)\n}",
"func (p *FrameLease) TimeToLive() time.Duration {\n\tv := binary.BigEndian.Uint32(p.body.Bytes())\n\treturn time.Millisecond * time.Duration(v)\n}",
"func StringToTimeDurationHookFunc() DecodeHookFunc {\n\treturn func(\n\t\tf reflect.Type,\n\t\tt reflect.Type,\n\t\tdata interface{}) (interface{}, error) {\n\t\tif f.Kind() != reflect.String {\n\t\t\treturn data, nil\n\t\t}\n\t\tif t != reflect.TypeOf(time.Duration(5)) {\n\t\t\treturn data, nil\n\t\t}\n\n\t\t// Convert it by parsing\n\t\treturn time.ParseDuration(data.(string))\n\t}\n}"
] | [
"0.6077799",
"0.5801322",
"0.5798061",
"0.5757803",
"0.5740865",
"0.57225984",
"0.5601669",
"0.5556431",
"0.54757607",
"0.5466047",
"0.54560196",
"0.54490495",
"0.5432458",
"0.5383502",
"0.53819966",
"0.5361781",
"0.53517246",
"0.53364056",
"0.5326453",
"0.5320531",
"0.5315199",
"0.5305441",
"0.5305441",
"0.5302505",
"0.5270126",
"0.52627414",
"0.524501",
"0.5244543",
"0.52388316",
"0.5228539",
"0.5218616",
"0.521388",
"0.51982343",
"0.51920253",
"0.51789004",
"0.5165287",
"0.5159179",
"0.5153292",
"0.5131149",
"0.51276755",
"0.5105739",
"0.5093638",
"0.50809175",
"0.5064148",
"0.50355005",
"0.50234497",
"0.501787",
"0.5013835",
"0.50025123",
"0.50025123",
"0.4991857",
"0.49719313",
"0.497044",
"0.49627566",
"0.4951521",
"0.4949176",
"0.49286336",
"0.49201325",
"0.49190268",
"0.4912904",
"0.49082246",
"0.49065349",
"0.4897654",
"0.489457",
"0.48943564",
"0.4893668",
"0.48843744",
"0.4879211",
"0.48695427",
"0.48614013",
"0.48591074",
"0.48539004",
"0.48528993",
"0.4851961",
"0.48487324",
"0.4843809",
"0.48289412",
"0.48275736",
"0.48221448",
"0.48167086",
"0.4815702",
"0.48103678",
"0.48057002",
"0.47917658",
"0.47831908",
"0.47743097",
"0.4769937",
"0.47650608",
"0.47537443",
"0.4748513",
"0.47459233",
"0.47332752",
"0.47317386",
"0.47299075",
"0.47254565",
"0.47192535",
"0.47164682",
"0.47150022",
"0.47093403",
"0.47052515"
] | 0.8580701 | 0 |
NoteToFreq converts a notes.Duration into a frequency with period equal to that note length | func (m Meter) NoteToFreq(noteVal notes.Duration) float64 {
duration := m.NoteToTime(noteVal)
return 1 / float64(duration.Seconds())
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func toFreq(s semi, tonic freq) freq {\n\treturn tonic * freq(math.Pow(root12, float64(s)))\n}",
"func (c *Config) FrequencyDur() time.Duration {\n\tconfigLock.RLock()\n\tdefer configLock.RUnlock()\n\n\tif c.Frequency == 0 {\n\t\treturn callhomeCycleDefault\n\t}\n\n\treturn c.Frequency\n}",
"func (m Meter) NoteToTime(noteVal notes.Duration) time.Duration {\n\treturn time.Duration((float64(noteVal/m.BeatValue) / m.BeatsPerMinute) * float64(time.Minute))\n}",
"func (m *TermsExpiration) GetFrequency()(*i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ISODuration) {\n return m.frequency\n}",
"func NewNote(vol float64, len time.Duration, freq ...float64) *Note {\n\treturn &Note{\n\t\tVolume: vol,\n\t\tFrequency: freq,\n\t\tOctave: 1.0,\n\t\tLength: len,\n\t}\n}",
"func (c *Config) GetFrequency() time.Duration {\n\tif c.FrequencyInMS == 0 {\n\t\treturn time.Second\n\t}\n\n\treturn time.Duration(c.FrequencyInMS) * time.Millisecond\n}",
"func (n *Note) measure() {\n\tvar samples int\n\tlength := wholeNote + (wholeNote / 100 * 4 * (4 - n.tempo)) // 4% per tempo unit\n\tswitch n.duration {\n\tcase 'W':\n\t\tsamples = length\n\tcase 'H':\n\t\tsamples = length / 2\n\tcase 'Q':\n\t\tsamples = length / 4\n\tcase 'E':\n\t\tsamples = length / 8\n\tcase 'S':\n\t\tsamples = length / 16\n\tcase 'T':\n\t\tsamples = length / 32\n\tcase 'I':\n\t\tsamples = length / 64\n\t}\n\n\tif samples > 0 {\n\t\t// Apply dot measure\n\t\tif n.dotted {\n\t\t\tsamples += samples / 2\n\t\t}\n\t}\n\n\tn.samples = samples\n}",
"func frequencyFromSemitone(semitone int) float32 {\n\t// A4 is 440 Hz, 12 semitones per octave\n\treturn float32(440 * math.Pow(2, float64(semitone-69)/12))\n}",
"func (o AnomalySubscriptionOutput) Frequency() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *AnomalySubscription) pulumi.StringOutput { return v.Frequency }).(pulumi.StringOutput)\n}",
"func (o KubernetesClusterMaintenanceWindowAutoUpgradePtrOutput) Frequency() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *KubernetesClusterMaintenanceWindowAutoUpgrade) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Frequency\n\t}).(pulumi.StringPtrOutput)\n}",
"func Freq() float32 {\n\treturn global.Freq()\n}",
"func (j *ScheduledJob) Frequency() (time.Duration, error) {\n\tif !j.HasRecurringSchedule() {\n\t\treturn 0, errors.Newf(\n\t\t\t\"schedule %d is not periodic\", j.rec.ScheduleID)\n\t}\n\texpr, err := cronexpr.Parse(j.rec.ScheduleExpr)\n\tif err != nil {\n\t\treturn 0, errors.Wrapf(err,\n\t\t\t\"parsing schedule expression: %q; it must be a valid cron expression\",\n\t\t\tj.rec.ScheduleExpr)\n\t}\n\tnext := expr.Next(j.env.Now())\n\tnextNext := expr.Next(next)\n\treturn nextNext.Sub(next), nil\n}",
"func ConvertNanosecondsToHz(val float64) float64 {\n\treturn val / 1e7\n}",
"func NoteAtTime(t, sr int, note Note) float64 {\n\tsum := 0.0\n\tmultiplier := (2.0 * math.Pi) / float64(sr)\n\tfor i := 0; i < len(note.Frequency); i++ {\n\t\tsum += math.Sin((multiplier * (note.Frequency[i] * note.Octave)) * float64(t))\n\t}\n\treturn sum\n}",
"func (o KubernetesClusterMaintenanceWindowAutoUpgradeOutput) Frequency() pulumi.StringOutput {\n\treturn o.ApplyT(func(v KubernetesClusterMaintenanceWindowAutoUpgrade) string { return v.Frequency }).(pulumi.StringOutput)\n}",
"func tempoToPulseInterval(t Bpm) time.Duration {\n\n\treturn time.Duration((uSecInMin/(t/10.00))/ppqn) * time.Microsecond\n}",
"func (o ElastigroupScheduledTaskOutput) Frequency() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ElastigroupScheduledTask) *string { return v.Frequency }).(pulumi.StringPtrOutput)\n}",
"func SanitizeFrequency(frequency float64) uint64 {\n\t// 868.1 to 868100000 - but we will lose the decimals\n\tif frequency < 1000.0 {\n\t\tfrequency = frequency * 1000000\n\t}\n\n\t// 868400000000000 to 868400000\n\tif frequency > 1000000000 {\n\t\tfrequency = frequency / 1000000\n\t}\n\n\t// 869099976 to 869100000\n\tfrequency = math.Round(frequency/1000) * 1000\n\tfrequencyInt := uint64(frequency)\n\n\treturn frequencyInt\n}",
"func (o BeanstalkScheduledTaskOutput) Frequency() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BeanstalkScheduledTask) *string { return v.Frequency }).(pulumi.StringPtrOutput)\n}",
"func (o ScheduledAuditOutput) Frequency() ScheduledAuditFrequencyOutput {\n\treturn o.ApplyT(func(v *ScheduledAudit) ScheduledAuditFrequencyOutput { return v.Frequency }).(ScheduledAuditFrequencyOutput)\n}",
"func getNoteTicker(secondsPerNote float64) *time.Ticker {\n\tnoteDurationString := fmt.Sprintf(\"%.5f\", secondsPerNote) + \"s\"\n\tnoteDuration, _ := time.ParseDuration(noteDurationString)\n\n\treturn time.NewTicker(noteDuration)\n}",
"func (note Note) ToData(index, sr int) float64 {\n\tfreqLen := len(note.Frequency)\n\tvol := note.Volume\n\tif freqLen > 1 {\n\t\tvol = vol / float64(freqLen)\n\t}\n\treturn vol * note.AtTime(index, sr)\n}",
"func AutocorrelateFrequency(waveform []float64, sampleRate float64) float64 {\n\n\tsearchSize := len(waveform) / 2\n\n\ttolerance := 0.001\n\trms := 0.0\n\trmsMin := 0.008\n\n\tprevAssessedStrings := assessedStringsInLastFrame\n\n\tfor _, amplitude := range waveform {\n\t\trms += amplitude * amplitude\n\t}\n\n\trms = math.Sqrt(rms / float64(len(waveform)))\n\n\tif rms < rmsMin {\n\t\treturn 0\n\t}\n\n\ttime := (time.Now().UnixNano() / 1000000)\n\n\tif rms > lastRms+rmsThreshold {\n\t\tassessStringsUntilTime = time + 250\n\t}\n\n\tif time < assessStringsUntilTime {\n\t\tassessedStringsInLastFrame = true\n\n\t\tfor i, note := range notes {\n\t\t\toffset := int(math.Round(sampleRate / note.frequency))\n\t\t\tdifference := 0.0\n\n\t\t\tif !prevAssessedStrings {\n\t\t\t\tdifferences[i] = 0\n\t\t\t}\n\n\t\t\tfor j := 0; j < searchSize; j++ {\n\t\t\t\tcurrentAmp := waveform[j]\n\t\t\t\toffsetAmp := waveform[j+offset]\n\t\t\t\tdifference += math.Abs(currentAmp - offsetAmp)\n\t\t\t}\n\n\t\t\tdifference /= float64(searchSize)\n\n\t\t\tdifferences[i] += difference * float64(offset)\n\t\t}\n\t} else {\n\t\tassessedStringsInLastFrame = false\n\t}\n\n\tif !assessedStringsInLastFrame && prevAssessedStrings {\n\t\tlastMinDifference = argmin(differences)\n\t}\n\n\tassumedString := notes[lastMinDifference]\n\tsearchRange := 10\n\tactualFrequency := int(math.Round(sampleRate / assumedString.frequency))\n\tsearchStart := actualFrequency - searchRange\n\tsearchEnd := actualFrequency + searchRange\n\tsmallestDifference := math.Inf(1)\n\n\tfor i := searchStart; i < searchEnd; i++ {\n\t\tdifference := 0.0\n\n\t\tfor j := 0; j < searchSize; j++ {\n\t\t\tcurrentAmp := waveform[j]\n\t\t\toffsetAmp := waveform[j+i]\n\t\t\tdifference += math.Abs(currentAmp - offsetAmp)\n\t\t}\n\n\t\tdifference /= float64(searchSize)\n\n\t\tif difference < smallestDifference {\n\t\t\tsmallestDifference = difference\n\t\t\tactualFrequency = i\n\t\t}\n\n\t\tif difference < tolerance {\n\t\t\tactualFrequency = i\n\t\t\tbreak\n\t\t}\n\n\t}\n\n\tlastRms = rms\n\n\treturn sampleRate / float64(actualFrequency)\n}",
"func durationToWord(in Interval) string {\n\tswitch in {\n\tcase FifteenSecond:\n\t\treturn \"fifteensecond\"\n\tcase OneMin:\n\t\treturn \"onemin\"\n\tcase ThreeMin:\n\t\treturn \"threemin\"\n\tcase FiveMin:\n\t\treturn \"fivemin\"\n\tcase TenMin:\n\t\treturn \"tenmin\"\n\tcase FifteenMin:\n\t\treturn \"fifteenmin\"\n\tcase ThirtyMin:\n\t\treturn \"thirtymin\"\n\tcase OneHour:\n\t\treturn \"onehour\"\n\tcase TwoHour:\n\t\treturn \"twohour\"\n\tcase FourHour:\n\t\treturn \"fourhour\"\n\tcase SixHour:\n\t\treturn \"sixhour\"\n\tcase EightHour:\n\t\treturn \"eighthour\"\n\tcase TwelveHour:\n\t\treturn \"twelvehour\"\n\tcase OneDay:\n\t\treturn \"oneday\"\n\tcase ThreeDay:\n\t\treturn \"threeday\"\n\tcase FifteenDay:\n\t\treturn \"fifteenday\"\n\tcase OneWeek:\n\t\treturn \"oneweek\"\n\tcase TwoWeek:\n\t\treturn \"twoweek\"\n\tcase OneMonth:\n\t\treturn \"onemonth\"\n\tcase OneYear:\n\t\treturn \"oneyear\"\n\tdefault:\n\t\treturn \"notfound\"\n\t}\n}",
"func (c *Context) GetTimerFrequency() uint64 {\n\treturn uint64(C.glfwGetTimerFrequency())\n}",
"func (c *Context) GetTimerFrequency() uint64 {\n\treturn uint64(C.glfwGetTimerFrequency())\n}",
"func (o KubernetesClusterMaintenanceWindowNodeOsPtrOutput) Frequency() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *KubernetesClusterMaintenanceWindowNodeOs) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Frequency\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o KubernetesClusterMaintenanceWindowNodeOsOutput) Frequency() pulumi.StringOutput {\n\treturn o.ApplyT(func(v KubernetesClusterMaintenanceWindowNodeOs) string { return v.Frequency }).(pulumi.StringOutput)\n}",
"func ToFloat(d time.Duration) (seconds float64) {\n\treturn float64(d) / float64(time.Second)\n}",
"func (s *Sound) Length() time.Duration {\n\treturn time.Duration(s.snd.Get(\"duration\").Float()) * time.Second\n}",
"func (m *TermsExpiration) SetFrequency(value *i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ISODuration)() {\n m.frequency = value\n}",
"func Frequency(s string) FreqMap {\n\tm := FreqMap{}\n\tfor _, r := range s {\n\t\tm[r]++\n\t}\n\treturn m\n}",
"func Frequency(s string) FreqMap {\n\tm := FreqMap{}\n\tfor _, r := range s {\n\t\tm[r]++\n\t}\n\treturn m\n}",
"func Frequency(s string) FreqMap {\n\tm := FreqMap{}\n\tfor _, r := range s {\n\t\tm[r]++\n\t}\n\treturn m\n}",
"func Frequency(s string) FreqMap {\n\tm := FreqMap{}\n\tfor _, r := range s {\n\t\tm[r]++\n\t}\n\treturn m\n}",
"func Frequency(s string) FreqMap {\n\tm := FreqMap{}\n\tfor _, r := range s {\n\t\tm[r]++\n\t}\n\treturn m\n}",
"func Frequency(s string) FreqMap {\n\tm := FreqMap{}\n\tfor _, r := range s {\n\t\tm[r]++\n\t}\n\treturn m\n}",
"func Frequency(s string) FreqMap {\n\tm := FreqMap{}\n\tfor _, r := range s {\n\t\tm[r]++\n\t}\n\treturn m\n}",
"func Frequency(s string) FreqMap {\n\tm := FreqMap{}\n\tfor _, r := range s {\n\t\tm[r]++\n\t}\n\treturn m\n}",
"func Frequency(s string) FreqMap {\n\tm := FreqMap{}\n\tfor _, r := range s {\n\t\tm[r]++\n\t}\n\treturn m\n}",
"func Frequency(s string) FreqMap {\n\tm := FreqMap{}\n\tfor _, r := range s {\n\t\tm[r]++\n\t}\n\treturn m\n}",
"func Frequency(s string) FreqMap {\n\tm := FreqMap{}\n\tfor _, r := range s {\n\t\tm[r]++\n\t}\n\treturn m\n}",
"func Frequency(s string) FreqMap {\n\tm := FreqMap{}\n\tfor _, r := range s {\n\t\tm[r]++\n\t}\n\treturn m\n}",
"func Frequency(s string) FreqMap {\n\tm := FreqMap{}\n\tfor _, r := range s {\n\t\tm[r]++\n\t}\n\treturn m\n}",
"func Frequency(s string) FreqMap {\n\tm := FreqMap{}\n\tfor _, r := range s {\n\t\tm[r]++\n\t}\n\treturn m\n}",
"func (d *DFT) ToFreqRange() map[int]float64 {\n\tif d == nil {\n\t\treturn nil\n\t}\n\toutput := make(map[int]float64, len(d.Coefs)/2)\n\tfor i := 0; i < len(d.Coefs)/2; i++ {\n\t\tf := (i * d.SampleRate) / (len(d.Coefs))\n\t\t// calculate the magnitude\n\t\toutput[f] = math.Log10(math.Sqrt(math.Pow(real(d.Coefs[i]), 2) + math.Pow(imag(d.Coefs[i]), 2)))\n\t}\n\treturn output\n}",
"func (o InventorySchedulePtrOutput) Frequency() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *InventorySchedule) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Frequency\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o AnomalySubscriptionOutput) Frequency() AnomalySubscriptionFrequencyOutput {\n\treturn o.ApplyT(func(v *AnomalySubscription) AnomalySubscriptionFrequencyOutput { return v.Frequency }).(AnomalySubscriptionFrequencyOutput)\n}",
"func fseconds(d time.Duration) float64 { return float64(d) / float64(time.Second) }",
"func (e PrecisionTiming) durationToMs(x time.Duration) float64 {\n\treturn float64(x) / float64(time.Millisecond)\n}",
"func WithFrequency(f int32) Option {\n\treturn func(d *Daemon) {\n\t\td.Frequency = f\n\t\td.frequency = time.Duration(time.Duration(f) * time.Second)\n\t}\n}",
"func (c *Context) GetFreqCorrection() (freq int) {\n\treturn int(C.rtlsdr_get_freq_correction((*C.rtlsdr_dev_t)(c.dev)))\n}",
"func (r Rest) TickDuration(quarter uint16) uint16 {\n\tif Duration(r) == None {\n\t\treturn 0\n\t}\n\t//the fraction of the measure the note takes\n\tfraq := 1. / math.Pow(2., float64(r))\n\n\treturn uint16(float64(4*quarter) * fraq)\n}",
"func Frequency(frequency uint32) PmMetricsOption {\n\treturn func(args *PmMetrics) {\n\t\targs.frequency = frequency\n\t}\n}",
"func (r *FTW) FreqTuningWord() uint32 {\n\treturn binary.BigEndian.Uint32(r[:])\n}",
"func Frequency(s string) FreqMap {\n m := FreqMap{}\n for _, r := range s {\n m[r]++\n }\n return m\n}",
"func (r Rest) Duration(measure time.Duration) time.Duration {\n\tif Duration(r) == None {\n\t\treturn 0\n\t}\n\t//the fraction of the measure the note takes\n\tfraq := 1. / math.Pow(2., float64(r))\n\n\treturn time.Duration(float64(measure) * fraq)\n}",
"func (o InventoryScheduleOutput) Frequency() pulumi.StringOutput {\n\treturn o.ApplyT(func(v InventorySchedule) string { return v.Frequency }).(pulumi.StringOutput)\n}",
"func (s *ReplicationJob) SetFrequency(v int64) *ReplicationJob {\n\ts.Frequency = &v\n\treturn s\n}",
"func (m *Mixer) Frequency() uint {\n\treturn uint(C.al_get_mixer_frequency((*C.ALLEGRO_MIXER)(m)))\n}",
"func durationToMilliseconds(d time.Duration) (uint64, error) {\n\tif d < 0 {\n\t\treturn 0, fmt.Errorf(\"report period cannot be negative: %v\", d)\n\t}\n\n\treturn uint64(d / time.Millisecond), nil\n}",
"func (s *CreateReplicationJobInput) SetFrequency(v int64) *CreateReplicationJobInput {\n\ts.Frequency = &v\n\treturn s\n}",
"func (i *InputInlineQueryResultVoiceNote) GetVoiceNoteDuration() (value int32) {\n\tif i == nil {\n\t\treturn\n\t}\n\treturn i.VoiceNoteDuration\n}",
"func (f *Fs) Precision() time.Duration {\n\treturn time.Second\n}",
"func (f *Fs) Precision() time.Duration {\n\treturn time.Second\n}",
"func NewFrequency(v float64, s string) Frequency {\n\treturn Frequency(v) * frequency[s]\n}",
"func (r *rPIO) UpdatePollFreq(d time.Duration) error {\n\tif !r.open {\n\t\treturn fmt.Errorf(\"polling has not yet started\")\n\t}\n\n\t// Update the poller frequency\n\tr.poller.newPollFreq <- d\n\n\treturn nil\n}",
"func (r *Radio) Frequency() uint32 {\n\treturn r.freq\n}",
"func toMilliseconds(duration time.Duration) float64 {\n\tif duration < time.Microsecond*10 {\n\t\treturn 0\n\t}\n\n\tms := float64(duration) / float64(time.Millisecond)\n\t// Round time to 0.02 precision\n\treturn math.Round(ms*100) / 100\n}",
"func (o VolumeGroupSapHanaVolumeDataProtectionReplicationPtrOutput) ReplicationFrequency() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *VolumeGroupSapHanaVolumeDataProtectionReplication) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.ReplicationFrequency\n\t}).(pulumi.StringPtrOutput)\n}",
"func (mh *MidiLogger) HandleNote(n gm.Note) {\n\tif n.Vel == 0 {\n\t\tn.On = false\n\t}\n\tnotes := mh.keys[int(n.Ch)]\n\n\t// Update our note maps maps\n\tif n.On {\n\t\tnotes[n.Note] = n.Vel\n\t} else {\n\t\tnotes[n.Note] = 0\n\t}\n\tfmt.Printf(\"%s - %d.%d (%v)\\n\", n, mh.sixteenthsElapsed, mh.beatPosition, time.Since(mh.startTime))\n}",
"func (sound L8) Duration() float32 {\n\tif sound.SampleRate <= 0 {\n\t\treturn 0.0\n\t}\n\treturn float32(len(sound.Samples)) / sound.SampleRate\n}",
"func (r *RAM) FreqTuningWord() uint32 {\n\treturn binary.BigEndian.Uint32(r[0:4])\n}",
"func (l DNA8List) BaseFreq() (b [4]float64) {\n\tvar n [7]int\n\tfor _, s := range l {\n\t\tfor _, b := range s {\n\t\t\tn[b&6]++\n\t\t}\n\t}\n\tc := 1 / float64(n[0]+n[2]+n[4]+n[6])\n\tfor i := range b {\n\t\tb[i] = float64(n[i*2]) * c\n\t}\n\treturn\n}",
"func (fn *formulaFuncs) duration(settlement, maturity, coupon, yld, frequency, basis formulaArg) formulaArg {\n\tfrac := yearFrac(settlement.Number, maturity.Number, int(basis.Number))\n\tif frac.Type != ArgNumber {\n\t\treturn frac\n\t}\n\targumments := list.New().Init()\n\targumments.PushBack(settlement)\n\targumments.PushBack(maturity)\n\targumments.PushBack(frequency)\n\targumments.PushBack(basis)\n\tcoups := fn.COUPNUM(argumments)\n\tduration := 0.0\n\tp := 0.0\n\tcoupon.Number *= 100 / frequency.Number\n\tyld.Number /= frequency.Number\n\tyld.Number++\n\tdiff := frac.Number*frequency.Number - coups.Number\n\tfor t := 1.0; t < coups.Number; t++ {\n\t\ttDiff := t + diff\n\t\tadd := coupon.Number / math.Pow(yld.Number, tDiff)\n\t\tp += add\n\t\tduration += tDiff * add\n\t}\n\tadd := (coupon.Number + 100) / math.Pow(yld.Number, coups.Number+diff)\n\tp += add\n\tduration += (coups.Number + diff) * add\n\tduration /= p\n\tduration /= frequency.Number\n\treturn newNumberFormulaArg(duration)\n}",
"func (s *UpdateReplicationJobInput) SetFrequency(v int64) *UpdateReplicationJobInput {\n\ts.Frequency = &v\n\treturn s\n}",
"func (s *ServerReplicationParameters) SetFrequency(v int64) *ServerReplicationParameters {\n\ts.Frequency = &v\n\treturn s\n}",
"func (d TimeDivision) TicksPerQuarterNote() uint16 {\n\tif (d & 0x8000) != 0 {\n\t\treturn 0\n\t}\n\treturn uint16(d)\n}",
"func SilentNote(length time.Duration) *Note {\n\treturn &Note{\n\t\tVolume: 0.0,\n\t\tFrequency: []float64{0.0},\n\t\tOctave: 1.0,\n\t\tLength: length,\n\t}\n}",
"func microsecondsPerPulse(bpm float32) time.Duration {\n\treturn time.Duration((float32(Minute) * float32(Microsecond)) / (float32(Ppqn) * bpm))\n}",
"func (g *Keyboard) Add(freq float64) {\n\tg.l.Lock()\n\tdefer g.l.Unlock()\n\n\tbaseT := float64(g.totalSamples) / float64(g.sr)\n\tdurT := float64(g.dur) / float64(time.Second)\n\n\t// todo (bs): this fixed gain is pretty clumsy. It acts as something of a\n\t// safeguard to ensure that multiple notes can be played at the same time\n\t// without overwhelming the volume. I'd kinda guess this should be more\n\t// adaptive based on the number of concurrent notes - e.g. make a mapping\n\t// like: 1 note -> 0.4 gain; 2 notes -> 0.35 gain each; 3 notes -> 0.28 gain\n\t// each; 4 notes -> 0.25 gain each\n\t//\n\t// and have further notes have a fixed fraction of 1. This would need some\n\t// good internal smarts about how to downscale past gains for existing notes;\n\t// I'd say it'd require some better struct-based functions to make variability\n\t// easier to manage.\n\t// w := AmplifyWave(Gain(0.4), PianoNote(g.dur, freq))\n\n\tw := AmplifyWave(\n\t\tGain(0.4),\n\t\tPianoNote(g.dur, freq),\n\t)\n\n\tg.waves = append(g.waves, func(t float64) (float64, bool) {\n\t\tif t > baseT+durT {\n\t\t\treturn 0, true\n\t\t}\n\t\treturn w(t - baseT), false\n\t})\n}",
"func (t *Track) Duration() float64 {\n\treturn float64(t.duration) / float64(t.globalTimescale)\n}",
"func wordFreq(filename string, word string)(int, time.Duration){\n\tstartTime := time.Now()\n\tfile, err := os.Open(filename)\n\tif err != nil {\n\t\treturn -1000, time.Since(startTime)\n\t}\n\tdefer file.Close()\n\n\tscanner := bufio.NewScanner(file)\n\twc := 0\n\tfor scanner.Scan() {\n\t\twords := strings.Fields(scanner.Text())\n\t\tfor _, lword := range words {\n\t\t\tif lword == word || lword == word+\",\" || lword == word+\".\"{\n\t\t\t\twc += 1\n\t\t\t}\n\t\t}\n\t}\n\n\tdur := time.Since(startTime)\n\treturn wc, dur\n}",
"func (p *Posting) Frequency() uint64 {\n\treturn p.freq\n}",
"func tickspersecond() int64",
"func fmtDuration(d time.Duration) string {\n\treturn fmt.Sprintf(\"%.2fs\", d.Seconds())\n}",
"func (decoder *Decoder) ByteOffsetToDur(offset int32) time.Duration {\n\treturn time.Duration(offset/decoder.byteRate) * time.Second\n}",
"func (q MetricTicks) FractionalDuration(fractionalBPM float64, deltaTicks uint32) time.Duration {\n\tif q == 0 {\n\t\tq = defaultMetric\n\t}\n\t// (60000 / T) * (d / R) = D[ms]\n\t//\tdurQnMilli := 60000 / float64(tempoBPM)\n\t//\t_4thticks := float64(deltaTicks) / float64(uint16(q))\n\tres := 60000000000 * float64(deltaTicks) / (fractionalBPM * float64(uint16(q)))\n\t//fmt.Printf(\"what: %vns\\n\", res)\n\treturn time.Duration(int64(math.Round(res)))\n\t//\treturn time.Duration(roundFloat(durQnMilli*_4thticks, 0)) * time.Millisecond\n}",
"func (pm *PmMetrics) UpdateFrequency(frequency uint32) {\n\tpm.frequency = frequency\n}",
"func (r *TTNRandom) Freq() float32 {\n\treturn freqs[r.Interface.Intn(len(freqs))]\n}",
"func (w *wavData) writeNote(note string, time float32, amplitude float32, channels []int, blend bool, reset bool, relativeDuration int) {\n\tvar (\n\t\tnumChannels = w.numChannels\n\t\tsampleRate = w.sampleRate\n\n\t\t// to prevent sound artifacts\n\t\tfadeSeconds float32 = 0.001\n\n\t\t// calculating properties of given note\n\t\tsemitone, _ = semitoneFromNote(note)\n\t\tfrequency = float32(frequencyFromSemitone(semitone)) * math.Pi * 2 / float32(sampleRate)\n\n\t\t// amount of blocks to be written\n\t\tblocksOut = int(math.Round(float64(sampleRate) * float64(time)))\n\t\t// reduces sound artifacts by fading at last fadeSeconds\n\t\tnonZero = float32(blocksOut) - float32(sampleRate)*fadeSeconds\n\t\t// fade interval in samples\n\t\tfade = float32(sampleRate)*fadeSeconds + 1\n\n\t\t// index of start and stop samples\n\t\tstart = int(w.pointer)\n\t\tstop = len(w.data)\n\n\t\t// k = cached index of data\n\t\t// d = sample data value\n\t\tk int\n\t\td float32\n\t)\n\n\t// by default write to all channels\n\tif len(channels) == 0 {\n\t\tfor i := 0; i < int(numChannels); i++ {\n\t\t\tchannels = append(channels, i)\n\t\t}\n\t}\n\n\tskipChannels := make([]bool, numChannels)\n\tfor i := 0; i < len(skipChannels); i++ {\n\t\tskipChannels[i] = channels[i] == -1\n\t}\n\n\t// update existing data\n\tfor i := 0; i < blocksOut; i++ {\n\t\t// iterate through specified channels\n\t\tfor j := 0; j < len(channels); j++ {\n\t\t\tk = start + i*int(numChannels) + channels[j]\n\t\t\td = 0\n\n\t\t\tif frequency > 0 {\n\t\t\t\td = amplitude * float32(math.Sin(float64(frequency)*float64(i)))\n\t\t\t\tif float32(i) < fade {\n\t\t\t\t\td *= float32(i) / fade\n\t\t\t\t} else if float32(i) > nonZero {\n\t\t\t\t\td *= float32(blocksOut-i+1) / fade\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif blend {\n\t\t\t\tw.data[k] = d + w.data[k]\n\t\t\t} else {\n\t\t\t\tw.data[k] = d\n\t\t\t}\n\t\t}\n\t}\n\n\tend := maxInt(start+blocksOut*int(numChannels), stop) * (w.bitsPerSample >> 3)\n\tw.chunkSize = uint32(end + len(w.header) - 8)\n\tw.subChunk2Size = uint32(end)\n\n\tbinary.LittleEndian.PutUint32(w.header[4:8], w.chunkSize)\n\tbinary.LittleEndian.PutUint32(w.header[40:44], w.subChunk2Size)\n\n\tif !reset {\n\t\tw.pointer = uint(start + blocksOut*int(numChannels))\n\t}\n}",
"func updatePollFrequency(url string) int8 {\n\tvar (\n\t\tlastChange sql.NullString\n\t)\n\n\terr := db.QueryRow(\"SELECT last_change FROM podcasts WHERE feed_url = $1;\", url).Scan(&lastChange)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\t// Setup time for comparison\n\tt := time.Now()\n\n\t// no last change date, set default time\n\tif !lastChange.Valid {\n\t\treturn 4\n\t}\n\n\t// Parse lastChange into time\n\tlastChangeTime, err := time.Parse(time.RFC3339, lastChange.String)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\t// get the difference from now to lastChange\n\tdiff := t.Sub(lastChangeTime).Hours()\n\n\tif diff > 730 {\n\t\treturn 48\n\t} else if diff > 168 {\n\t\treturn 24\n\t} else if diff > 48 {\n\t\treturn 16\n\t} else if diff > 24 {\n\t\treturn 8\n\t} else {\n\t\treturn 4\n\t}\n}",
"func (p *Pitch) FrequencyInHertz(concertPitch float64) float64 {\n\treturn math.Pow(2, float64(A4().GetDistanceTo(p))/12.0) * concertPitch\n}",
"func (q *IntervalQueryRuleFuzzy) PrefixLength(prefixLength int) *IntervalQueryRuleFuzzy {\n\tq.prefixLength = &prefixLength\n\treturn q\n}",
"func (p *Periph) StoreFREQUENCY(f Freq) {\n\tp.frequency.Store(uint32(f))\n}",
"func (f *Fs) Precision() time.Duration {\n\treturn f.precision\n}",
"func GetTimeFrequency(lines []string) (timeFrequence map[string]int) {\n\ttimeFrequence = map[string]int{\n\t\t\"Morning\": 0,\n\t\t\"Noon\": 0,\n\t\t\"Afternoon\": 0,\n\t\t\"Evening\": 0,\n\t\t\"Night\": 0,\n\t}\n\n\tfor _, line := range lines {\n\t\thourTime := getHourTime(parseHour(line))\n\t\tif _, ok := timeFrequence[hourTime]; ok {\n\t\t\ttimeFrequence[hourTime]++\n\t\t}\n\t}\n\n\treturn\n}",
"func (t *ToneGenerator) Tone(freq, seconds float64, vol int32) []int32 {\n\tvar synthArray = make([]int32, int(seconds*t.sampleRate))\n\tdelta := freq * t.step\n\n\tfor i := 0; i < len(synthArray); i++ {\n\t\tsynthArray[i] = int32(t.wave(float64(i)*delta) * float64(vol))\n\n\t}\n\treturn synthArray\n}",
"func countRuneFreq(work [][]rune, acc *FreqMap, mu *sync.Mutex, wg *sync.WaitGroup) {\n\tfreq := FreqMap{} // this worker's count\n\n\tfor _, runes := range work {\n\t\tfor _, r := range runes {\n\t\t\tfreq[r]++\n\t\t}\n\t}\n\n\t// atomic update\n\tmu.Lock()\n\tfor r, count := range freq {\n\t\t(*acc)[r] += count\n\t}\n\tmu.Unlock()\n\n\twg.Done()\n}",
"func (o *PayPeriodDetails) GetPayFrequency() string {\n\tif o == nil || o.PayFrequency.Get() == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.PayFrequency.Get()\n}"
] | [
"0.6109282",
"0.56692827",
"0.5592371",
"0.54737693",
"0.5341398",
"0.5270148",
"0.52006036",
"0.5196711",
"0.50826305",
"0.5026439",
"0.5008344",
"0.50019354",
"0.49847287",
"0.4954853",
"0.4947973",
"0.4922762",
"0.49062866",
"0.48975572",
"0.48922068",
"0.48828414",
"0.48826703",
"0.48503333",
"0.4807786",
"0.4796074",
"0.47953132",
"0.47953132",
"0.47924343",
"0.4768234",
"0.47653648",
"0.4731927",
"0.4721248",
"0.4715538",
"0.4715538",
"0.4715538",
"0.4715538",
"0.4715538",
"0.4715538",
"0.4715538",
"0.4715538",
"0.4715538",
"0.4715538",
"0.4715538",
"0.4715538",
"0.4715538",
"0.4715538",
"0.4714661",
"0.46773726",
"0.46529272",
"0.46274608",
"0.46207258",
"0.462014",
"0.4612961",
"0.4601178",
"0.45749488",
"0.45575896",
"0.45520902",
"0.45391828",
"0.45371342",
"0.4528572",
"0.45119452",
"0.45013675",
"0.44932637",
"0.4485349",
"0.44812185",
"0.44812185",
"0.44774956",
"0.44760126",
"0.4468291",
"0.44388062",
"0.44294864",
"0.44292873",
"0.4422195",
"0.4419397",
"0.4413722",
"0.44132966",
"0.4409625",
"0.44007844",
"0.439679",
"0.43836528",
"0.43821883",
"0.4358468",
"0.4357613",
"0.43272364",
"0.4326961",
"0.4309018",
"0.4304459",
"0.42959937",
"0.42715394",
"0.42690802",
"0.42679313",
"0.42483518",
"0.42415392",
"0.4227476",
"0.42173022",
"0.42141944",
"0.41979897",
"0.41942605",
"0.41896075",
"0.41890633",
"0.41696638"
] | 0.82673764 | 0 |
/ A tuple is a finite sorted list of elements. It is a data structure that groups data. Tuples are typically immutable sequential collections. The element has related fields of different datatypes. The only way to modify a tuple is to change the fields. Operators such as + and can be applied to tuples. A database record is referred to as a tuple. In the following example, power series of integers are calculated and the square and cube of the integer is returned as a tuple: importing fmt package gets the power series of integer a and returns tuple of square of a and cube of a | func powerSeries(a int) (int, int) {
return a * a, a * a * a
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func Multiply(t Tuplelike, n float64) Tuplelike {\n\tresult := []float64{}\n\n\tfor _, value := range t.Values() {\n\t\tresult = append(result, value*n)\n\t}\n\n\treturn Tuple(result)\n}",
"func Tuple(argv []TermT) TermT {\n\tcount := C.uint32_t(len(argv))\n\t//iam: FIXME need to unify the yices errors and the go errors...\n\tif count == 0 {\n\t\treturn NullTerm\n\t}\n\treturn TermT(C.yices_tuple(count, (*C.term_t)(&argv[0])))\n}",
"func MakeTuple(v []interface{}) (skylark.Tuple, error) {\n\tvals := make([]skylark.Value, len(v))\n\tfor i := range v {\n\t\tval, err := ToValue(v[i])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvals[i] = val\n\t}\n\treturn skylark.Tuple(vals), nil\n}",
"func TupleUpdate(tuple TermT, index uint32, value TermT) TermT {\n\treturn TermT(C.yices_tuple_update(C.term_t(tuple), C.uint32_t(index), C.term_t(value)))\n}",
"func execNewTuple(arity int, p *gop.Context) {\n\targs := p.GetArgs(arity)\n\tconv := func(args []interface{}) []*types.Var {\n\t\tret := make([]*types.Var, len(args))\n\t\tfor i, arg := range args {\n\t\t\tret[i] = arg.(*types.Var)\n\t\t}\n\t\treturn ret\n\t}\n\tret := types.NewTuple(conv(args[0:])...)\n\tp.Ret(arity, ret)\n}",
"func NewTuple(n int) *Tuple {\n\tt := &Tuple{}\n\tt.data = make([]interface{}, n)\n\treturn t\n}",
"func (t Tuple) Values() []float64 {\n\treturn t\n}",
"func versionTuple(ver string) starlark.Tuple {\n\tvar major, minor, rev int\n\t_, err := fmt.Sscanf(ver, \"%d.%d.%d\", &major, &minor, &rev)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn starlark.Tuple{\n\t\tstarlark.MakeInt(major),\n\t\tstarlark.MakeInt(minor),\n\t\tstarlark.MakeInt(rev),\n\t}\n}",
"func SequenceTuple(v Object) (Tuple, error) {\n\tswitch x := v.(type) {\n\tcase Tuple:\n\t\treturn x, nil\n\tcase *List:\n\t\treturn Tuple(x.Items).Copy(), nil\n\tdefault:\n\t\tt := Tuple{}\n\t\terr := Iterate(v, func(item Object) bool {\n\t\t\tt = append(t, item)\n\t\t\treturn false\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn t, nil\n\t}\n}",
"func (sym *symtab) buildTuple(tuple *types.Tuple, varnm string, methvar string) (string, error) {\n\tsz := tuple.Len()\n\tif sz == 0 {\n\t\treturn \"\", fmt.Errorf(\"buildTuple: no elements\")\n\t}\n\t// TODO: https://www.reddit.com/r/Python/comments/3618cd/calling_back_python_instance_methods_from_c/\n\t// could not get this to work across threads for methods -- and furthermore the basic version with\n\t// CallObject works fine within the same thread, so all this extra work seems unnecessary.\n\t//\n\t// bstr := fmt.Sprintf(\"var %s *C.PyObject\\n\", varnm)\n\t// bstr += fmt.Sprintf(\"_pyargstidx := 0\\n\")\n\t// bstr += fmt.Sprintf(\"_pyargidx := C.long(0)\\n\")\n\t// if methvar != \"\" {\n\t// \tbstr += fmt.Sprintf(\"if C.gopy_method_check(%s) != 0 {\\n\", methvar)\n\t// \tbstr += fmt.Sprintf(\"\\tC.gopy_incref(%s)\\n\", methvar)\n\t// \tbstr += fmt.Sprintf(\"\\t%s = C.PyTuple_New(%d)\\n\", varnm, sz+1)\n\t// \tbstr += fmt.Sprintf(\"\\tC.PyTuple_SetItem(%s, 0, C.PyMethod_Self(%s))\\n\", varnm, methvar)\n\t// \tbstr += fmt.Sprintf(\"\\t_pyargstidx = 1\\n\")\n\t// \tbstr += fmt.Sprintf(\"\\t%[1]s = C.PyMethod_Function(%[1]s)\\n\", methvar)\n\t// \tbstr += fmt.Sprintf(\"} else {\\n\")\n\t// \tbstr += fmt.Sprintf(\"\\t%s = C.PyTuple_New(%d)\\n\", varnm, sz)\n\t// \tbstr += fmt.Sprintf(\"}\\n\")\n\t// }\n\n\t// TODO: more efficient to use strings.Builder here..\n\tbstr := fmt.Sprintf(\"%s := C.PyTuple_New(%d)\\n\", varnm, sz)\n\tfor i := 0; i < sz; i++ {\n\t\tv := tuple.At(i)\n\t\ttyp := v.Type()\n\t\tanm := pySafeArg(v.Name(), i)\n\t\tvsym := sym.symtype(typ)\n\t\tif vsym == nil {\n\t\t\terr := sym.addType(v, typ)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\tvsym = sym.symtype(typ)\n\t\t\tif vsym == nil {\n\t\t\t\treturn \"\", fmt.Errorf(\"buildTuple: type still not found: %s\", typ.String())\n\t\t\t}\n\t\t}\n\t\t// bstr += fmt.Sprintf(\"_pyargidx = C.long(_pyargstidx + %d)\\n\", i)\n\n\t\tbt, isb := typ.Underlying().(*types.Basic)\n\t\tswitch {\n\t\tcase vsym.goname == \"interface{}\":\n\t\t\tbstr += fmt.Sprintf(\"C.PyTuple_SetItem(%s, %d, C.gopy_build_string(%s(%s)%s))\\n\", varnm, i, vsym.go2py, anm, vsym.go2pyParenEx)\n\t\tcase vsym.hasHandle(): // note: assuming int64 handles\n\t\t\tbstr += fmt.Sprintf(\"C.PyTuple_SetItem(%s, %d, C.gopy_build_int64(C.int64_t(%s(%s)%s)))\\n\", varnm, i, vsym.go2py, anm, vsym.go2pyParenEx)\n\t\tcase isb:\n\t\t\tbk := bt.Kind()\n\t\t\tswitch {\n\t\t\tcase types.Int <= bk && bk <= types.Int64:\n\t\t\t\tbstr += fmt.Sprintf(\"C.PyTuple_SetItem(%s, %d, C.gopy_build_int64(C.int64_t(%s)))\\n\", varnm, i, anm)\n\t\t\tcase types.Uint <= bk && bk <= types.Uintptr:\n\t\t\t\tbstr += fmt.Sprintf(\"C.PyTuple_SetItem(%s, %d, C.gopy_build_uint64(C.uint64_t(%s)))\\n\", varnm, i, anm)\n\t\t\tcase types.Float32 <= bk && bk <= types.Float64:\n\t\t\t\tbstr += fmt.Sprintf(\"C.PyTuple_SetItem(%s, %d, C.gopy_build_float64(C.double(%s)))\\n\", varnm, i, anm)\n\t\t\tcase bk == types.String:\n\t\t\t\tbstr += fmt.Sprintf(\"C.PyTuple_SetItem(%s, %d, C.gopy_build_string(C.CString(%s)))\\n\", varnm, i, anm)\n\t\t\tcase bk == types.Bool:\n\t\t\t\tbstr += fmt.Sprintf(\"C.PyTuple_SetItem(%s, %d, C.gopy_build_bool(C.uint8_t(boolGoToPy(%s))))\\n\", varnm, i, anm)\n\t\t\t}\n\t\tdefault:\n\t\t\treturn \"\", fmt.Errorf(\"buildTuple: type not handled: %s\", typ.String())\n\t\t}\n\t}\n\treturn bstr, nil\n}",
"func DefineTuple(types []*IHType) *HTuple {\n\treturn &HTuple{types: types}\n}",
"func TupleType(tau []TypeT) TypeT {\n\ttauLen := len(tau)\n\t//iam: FIXME need to unify the yices errors and the go errors...\n\tif tauLen == 0 {\n\t\treturn NullType\n\t}\n\treturn TypeT(C.yices_tuple_type(C.uint32_t(tauLen), (*C.type_t)(&tau[0])))\n}",
"func Point(x, y, z float64) Tuple {\n\treturn Tuple{x, y, z, 1}\n}",
"func encodeTuple(t *tree.DTuple, appendTo []byte, colID uint32, scratch []byte) ([]byte, error) {\n\tappendTo = encoding.EncodeValueTag(appendTo, colID, encoding.Tuple)\n\tappendTo = encoding.EncodeNonsortingUvarint(appendTo, uint64(len(t.D)))\n\n\tvar err error\n\tfor _, dd := range t.D {\n\t\tappendTo, err = EncodeTableValue(appendTo, descpb.ColumnID(encoding.NoColumnID), dd, scratch)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn appendTo, nil\n}",
"func (t *Tuple) Add(o *Tuple) *Tuple {\n\treturn &Tuple{\n\t\tt.x + o.x,\n\t\tt.y + o.y,\n\t\tt.z + o.z,\n\t\tt.w + o.w,\n\t}\n}",
"func (t *Tuple) Reflect(normal *Tuple) *Tuple {\n\treturn t.Sub(normal.Mul(2).Mul(t.Dot(normal)))\n}",
"func (e *ObservableEditableBuffer) RuneTuple(q int) OffsetTuple {\n\treturn e.f.RuneTuple(q)\n}",
"func (t Tuple) Mul(scalar float64) Tuple {\n\treturn Tuple{t.X * scalar, t.Y * scalar, t.Z * scalar, t.W * scalar}\n}",
"func execmTupleAt(_ int, p *gop.Context) {\n\targs := p.GetArgs(2)\n\tret := args[0].(*types.Tuple).At(args[1].(int))\n\tp.Ret(2, ret)\n}",
"func (t *Tuple) String() string {\n\tstart := \"(\"\n\tif t.IsPoint() {\n\t\tstart = \"p\" + start\n\t} else {\n\t\tstart = \"v\" + start\n\t}\n\treturn start + floatToString(t.x, 3) + \",\" + floatToString(t.y, 3) + \",\" + floatToString(t.z, 3) + \")\"\n}",
"func (req *UpsertRequest) Tuple(tuple Tuple) *UpsertRequest {\n\treq.tuple = tuple\n\treturn req\n}",
"func Point(x, y, z float64) *Tuple {\n\treturn &Tuple{x, y, z, 1.0}\n}",
"func (t *Tuple) Mul(scalar float64) *Tuple {\n\treturn &Tuple{\n\t\tt.x * scalar,\n\t\tt.y * scalar,\n\t\tt.z * scalar,\n\t\tt.w * scalar,\n\t}\n\n}",
"func CloneValTuple(n ValTuple) ValTuple {\n\tres := make(ValTuple, 0, len(n))\n\tfor _, x := range n {\n\t\tres = append(res, CloneExpr(x))\n\t}\n\treturn res\n}",
"func TupleType3(tau1 TypeT, tau2 TypeT, tau3 TypeT) TypeT {\n\tcarr := []C.type_t{C.type_t(tau1), C.type_t(tau2), C.type_t(tau3)}\n\treturn TypeT(C.yices_tuple_type(C.uint32_t(3), (*C.type_t)(&carr[0])))\n}",
"func NewTupleFromSlice(slice []interface{}) *Tuple {\n\tt := &Tuple{}\n\tt.data = slice\n\treturn t\n}",
"func Add(t, other Tuplelike) Tuplelike {\n\tresult := []float64{}\n\n\tfor idx, value := range t.Values() {\n\t\tresult = append(result, value+other.At(idx))\n\t}\n\n\treturn Tuple(result)\n}",
"func CloneTuple(t Tuple) Tuple {\n\tnewTuple := make(Tuple, len(t))\n\tfor i := range t {\n\t\tv := reflect.New(reflect.TypeOf(t[i]).Elem())\n\t\tv.Elem().Set(reflect.ValueOf(t[i]).Elem())\n\t\tnewTuple[i] = v.Interface()\n\t}\n\treturn newTuple\n}",
"func Tuples(a []int, fn func([]int)) {\n\tp := make([]int, 2)\n\tfor i := 0; i < len(a); i++ {\n\t\tfor j := i + 1; j < len(a); j++ {\n\t\t\tp[0], p[1] = a[i], a[j]\n\t\t\tfn(p)\n\t\t}\n\t}\n}",
"func (t *Tuple) Div(scalar float64) *Tuple {\n\treturn &Tuple{\n\t\tt.x / scalar,\n\t\tt.y / scalar,\n\t\tt.z / scalar,\n\t\tt.w / scalar,\n\t}\n\n}",
"func New(args ...float64) Tuple {\n\treturn args\n}",
"func VariadicTupleType(types []interface{}) dgo.TupleType {\n\tn := len(types)\n\tif n == 0 {\n\t\tpanic(errors.New(`a variadic tuple must have at least one element`))\n\t}\n\treturn newTupleType(types, true)\n}",
"func CreateTuple(types ...Type) Type {\n\treturn tupleType(types)\n}",
"func UnpackTuple(args Tuple, kwargs StringDict, name string, min int, max int, results ...*Object) error {\n\tif len(kwargs) != 0 {\n\t\treturn ExceptionNewf(TypeError, \"%s() does not take keyword arguments\", name)\n\t}\n\n\t// Check number of arguments\n\terr := checkNumberOfArgs(name, len(args), len(results), min, max)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Copy the results in\n\tfor i := range args {\n\t\t*results[i] = args[i]\n\t}\n\treturn nil\n}",
"func (node ValTuple) formatFast(buf *TrackedBuffer) {\n\tbuf.WriteByte('(')\n\tExprs(node).formatFast(buf)\n\tbuf.WriteByte(')')\n}",
"func TupleType1(tau1 TypeT) TypeT {\n\tcarr := []C.type_t{C.type_t(tau1)}\n\treturn TypeT(C.yices_tuple_type(C.uint32_t(1), (*C.type_t)(&carr[0])))\n}",
"func (node ValTuple) Format(buf *TrackedBuffer) {\n\tbuf.astPrintf(node, \"(%v)\", Exprs(node))\n}",
"func (item Item) GetTuple(name string) sugar.Tuple {\n\ttuple := sugar.Tuple{}\n\n\tswitch item[name].(type) {\n\tcase map[string]interface{}:\n\t\tfor k, _ := range item[name].(map[string]interface{}) {\n\t\t\ttuple[k] = item[name].(map[string]interface{})[k]\n\t\t}\n\tcase sugar.Tuple:\n\t\ttuple = item[name].(sugar.Tuple)\n\t}\n\n\treturn tuple\n}",
"func (e *ObservableEditableBuffer) ByteTuple(q int) OffsetTuple {\n\treturn e.f.ByteTuple(q)\n}",
"func (s *seriesValueGenerator) Values() DTuple {\n\treturn DTuple{NewDInt(DInt(s.value))}\n}",
"func ValExpandTuple(model ModelT, yval *YvalT, child []YvalT) int32 {\n\treturn int32(C.yices_val_expand_tuple(ymodel(model), (*C.yval_t)(yval), (*C.yval_t)(&child[0])))\n}",
"func (this *Tuple) Data() []interface{} {\n\treturn this.data\n}",
"func TupleType(types []interface{}) dgo.TupleType {\n\treturn newTupleType(types, false)\n}",
"func (a Tuple) String() string {\n\tvar buf bytes.Buffer\n\tbuf.WriteRune('[')\n\tfor i := range a {\n\t\tbuf.WriteString(a[i].String())\n\t\tif i < len(a)-1 {\n\t\t\tbuf.WriteRune(' ')\n\t\t}\n\t}\n\tbuf.WriteRune(']')\n\treturn buf.String()\n}",
"func (ts *tupleSet) add(tuple seed.Tuple) {\n\tif len(tuple) != ts.numberOfColumns {\n\t\tfatal(ts.collectionName, \"expected\", ts.numberOfColumns, \"columns for\", tuple)\n\t}\n\n\tkey, err := json.Marshal(tuple[:ts.keyEnds])\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tts.tuples[string(key)] = tuple\n}",
"func Vector(x, y, z float64) Tuple {\n\treturn Tuple{x, y, z, 0}\n}",
"func (t Tuple) Add(o Tuple) Tuple {\n\tif t.IsPoint() && o.IsPoint() {\n\t\tpanic(\"cannot add 2 point tuples\")\n\t}\n\treturn Tuple{t.X + o.X, t.Y + o.Y, t.Z + o.Z, t.W + o.W}\n}",
"func (sym *symtab) processTuple(tuple *types.Tuple) error {\n\tif tuple == nil {\n\t\treturn nil\n\t}\n\tfor i := 0; i < tuple.Len(); i++ {\n\t\tivar := tuple.At(i)\n\t\tityp := ivar.Type()\n\t\tisym := sym.symtype(ityp)\n\t\tif isym != nil {\n\t\t\tcontinue\n\t\t}\n\t\terr := sym.addType(ivar, ityp)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (t Tuple) Pack() []byte {\n\tbuf := new(bytes.Buffer)\n\n\tfor i, e := range t {\n\t\tswitch e := e.(type) {\n\t\tcase nil:\n\t\t\tbuf.WriteByte(0x00)\n\t\tcase int64:\n\t\t\tencodeInt(buf, e)\n\t\tcase uint32:\n\t\t\tencodeInt(buf, int64(e))\n\t\tcase uint64:\n\t\t\tencodeInt(buf, int64(e))\n\t\tcase int:\n\t\t\tencodeInt(buf, int64(e))\n\t\tcase byte:\n\t\t\tencodeInt(buf, int64(e))\n\t\tcase []byte:\n\t\t\tencodeBytes(buf, 0x01, e)\n\t\tcase lex.KeyConvertible:\n\t\t\tencodeBytes(buf, 0x01, []byte(e.LexKey()))\n\t\tcase string:\n\t\t\tencodeBytes(buf, 0x02, []byte(e))\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"unencodable element at index %d (%v, type %T)\", i, t[i], t[i]))\n\t\t}\n\t}\n\n\treturn buf.Bytes()\n}",
"func Vector(x, y, z float64) *Tuple {\n\treturn &Tuple{x, y, z, 0.0}\n}",
"func (w *Writer) WriteTuple(tuple ...interface{}) {\n\tsetWriterRef(w, nil, nil)\n\tcount := len(tuple)\n\tif count == 0 {\n\t\twriteEmptyList(w)\n\t\treturn\n\t}\n\twriteListHeader(w, count)\n\tfor _, v := range tuple {\n\t\tw.Serialize(v)\n\t}\n\twriteListFooter(w)\n}",
"func vals() (int, int) {\n\treturn 3, 7\n}",
"func vals() (int, int) {\n\treturn 3, 7\n}",
"func vals() (int, int) {\n\treturn 3, 7\n}",
"func (bbox *Bbox) AsTuple() []float64 {\r\n\treturn []float64{bbox.LonMin, bbox.LatMin, bbox.LonMax, bbox.LatMax}\r\n}",
"func (this *Tuple) String() string {\n\treturn fmt.Sprintf(\"%v\", this.data)\n}",
"func (t Tuple) Div(scalar float64) Tuple {\n\tif scalar == 0 {\n\t\tpanic(\"cannot divide tuple by 0\")\n\t}\n\treturn Tuple{t.X / scalar, t.Y / scalar, t.Z / scalar, t.W / scalar}\n}",
"func MultipleValue() (int64, int64) {\n\tlog.Println(\"From multiplereturn function\")\n\treturn 33, 55\n}",
"func TermIsTuple(t TermT) bool {\n\treturn C.yices_term_is_tuple(C.term_t(t)) == C.int32_t(1)\n}",
"func (m *MyTuple) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateP0(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateP1(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMyTupleItems(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func Negate(t Tuplelike) Tuplelike {\n\tresult := []float64{}\n\n\tfor _, value := range t.Values() {\n\t\tresult = append(result, -value)\n\t}\n\n\treturn Tuple(result)\n}",
"func (*Tuple) Descriptor() ([]byte, []int) {\n\treturn file_proto_ssql_proto_rawDescGZIP(), []int{6}\n}",
"func NewTupleFromItems(items ...interface{}) *Tuple {\n\tt := NewTuple(len(items))\n\tfor i, item := range items {\n\t\tt.Set(i, item)\n\t}\n\treturn t\n}",
"func vals() (int, int) {\n\treturn 3, 7 // Return results\n}",
"func ValTupleArity(model ModelT, val *YvalT) uint32 {\n\treturn uint32(C.yices_val_tuple_arity(ymodel(model), (*C.yval_t)(val)))\n}",
"func (s *BaselimboListener) EnterTuple_type(ctx *Tuple_typeContext) {}",
"func (t *Tuple) Sub(o *Tuple) *Tuple {\n\treturn &Tuple{\n\t\tt.x - o.x,\n\t\tt.y - o.y,\n\t\tt.z - o.z,\n\t\tt.w - o.w,\n\t}\n\n}",
"func TupleQueryArgs(argCount int) (query string) {\n\tif argCount == 0 {\n\t\treturn\n\t}\n\tquery = strings.Repeat(\"?,\", argCount)\n\tquery = query[0 : len(query)-1]\n\treturn \"(\" + query + \")\"\n}",
"func TupleType2(tau1 TypeT, tau2 TypeT) TypeT {\n\tcarr := []C.type_t{C.type_t(tau1), C.type_t(tau2)}\n\treturn TypeT(C.yices_tuple_type(C.uint32_t(2), (*C.type_t)(&carr[0])))\n}",
"func DeserializeTuple(d []byte) *SPOTuple {\n\n\ttuple := &SPOTuple{}\n\n\terr := proto.Unmarshal(d, tuple)\n\tif err != nil {\n\t\tlog.Println(\"tuple-deserialize: protobuf decoding error: \", err)\n\t}\n\n\treturn tuple\n\n}",
"func Triple(arg1 TermT, arg2 TermT, arg3 TermT) TermT {\n\treturn TermT(C.yices_triple(C.term_t(arg1), C.term_t(arg2), C.term_t(arg3)))\n}",
"func (expr *TupleLiteralExpr) String() string {\n\tvar buf bytes.Buffer\n\n\tif len(expr.Members) != 0 {\n\t\tbuf.WriteString(\"{\")\n\t\tfor i, col := range expr.Members {\n\t\t\tif i != 0 {\n\t\t\t\tbuf.WriteString(\", \")\n\t\t\t}\n\t\t\tbuf.WriteString(col.String())\n\t\t}\n\t\tbuf.WriteString(\"}\")\n\t}\n\n\treturn buf.String()\n}",
"func add(x int, y int) (p,q int) {\n a := x + y\n b := 34 * 34\n return a,b\n}",
"func EqualsValTuple(a, b ValTuple) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(a); i++ {\n\t\tif !EqualsExpr(a[i], b[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (s *TupleIncrementalEmitterSource) EmitTuples(n int) {\n\ts.EmitTuplesNB(n)\n\ts.WaitForEmission()\n}",
"func (t Tuple2[A, B]) Unpack() (A, B) {\n\treturn t.A, t.B\n}",
"func learnMultiple(x, y int) (sum, prod int) {\n\t// two values returned\n\treturn x + y, x * y\n\n}",
"func Pair(children ...Element) *CompoundElement { return newCE(\"Pair\", children) }",
"func PyTuple_GetItem(o *PyObject, pos int) *PyObject {\n\treturn c2go(C.__PyTuple_GetItem(go2c(o), C.Py_ssize_t(pos)))\n}",
"func (this *Tuple) Get(n int) interface{} {\n\titem := this.data[this.Offset(n)]\n\treturn item\n}",
"func (s RegexMatchTuple) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func decodeTuple(a *DatumAlloc, tupTyp *types.T, b []byte) (tree.Datum, []byte, error) {\n\tb, _, _, err := encoding.DecodeNonsortingUvarint(b)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresult := tree.DTuple{\n\t\tD: a.NewDatums(len(tupTyp.TupleContents())),\n\t}\n\n\tvar datum tree.Datum\n\tfor i := range tupTyp.TupleContents() {\n\t\tdatum, b, err = DecodeTableValue(a, tupTyp.TupleContents()[i], b)\n\t\tif err != nil {\n\t\t\treturn nil, b, err\n\t\t}\n\t\tresult.D[i] = datum\n\t}\n\treturn a.NewDTuple(result), b, nil\n}",
"func (t Tuple3[A, B, C]) Unpack() (A, B, C) {\n\treturn t.A, t.B, t.C\n}",
"func SelectFeatures(tuples []base.Tuple, features []int) []base.Tuple {\n if (len(features) <= 0) {\n return tuples;\n }\n\n var tupleType reflect.Type = reflect.TypeOf(tuples[0]);\n\n var rtn []base.Tuple = make([]base.Tuple, len(tuples));\n for tupleIndex, tuple := range(tuples) {\n var data []interface{} = make([]interface{}, len(features));\n for featurePosition, featureIndex := range(features) {\n data[featurePosition] = tuple.GetData(featureIndex);\n }\n\n rtn[tupleIndex] = base.NewTypedTuple(tupleType, data, tuple.GetClass());\n }\n\n return rtn;\n}",
"func CloneColTuple(in ColTuple) ColTuple {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tswitch in := in.(type) {\n\tcase ListArg:\n\t\treturn CloneListArg(in)\n\tcase *Subquery:\n\t\treturn CloneRefOfSubquery(in)\n\tcase ValTuple:\n\t\treturn CloneValTuple(in)\n\tdefault:\n\t\t// this should never happen\n\t\treturn nil\n\t}\n}",
"func Divide(t Tuplelike, n float64) Tuplelike {\n\treturn Multiply(t, 1/n)\n}",
"func (t Tuple9[A, B, C, D, E, F, G, H, I]) Unpack() (A, B, C, D, E, F, G, H, I) {\n\treturn t.A, t.B, t.C, t.D, t.E, t.F, t.G, t.H, t.I\n}",
"func (td TupleDesc) Format(tup Tuple) string {\n\tif tup == nil || tup.Count() == 0 {\n\t\treturn \"( )\"\n\t}\n\n\tvar sb strings.Builder\n\tsb.WriteString(\"( \")\n\n\tseenOne := false\n\tfor i := range td.Types {\n\t\tif seenOne {\n\t\t\tsb.WriteString(\", \")\n\t\t}\n\t\tseenOne = true\n\t\tsb.WriteString(td.FormatValue(i, tup.GetField(i)))\n\t}\n\tsb.WriteString(\" )\")\n\treturn sb.String()\n}",
"func TMath(value interface{}) (string, string) {\n\t// dive into the data structure, outside in\n\t// the tree dump of dptree is of great help\n\t//\n\t// have a look at\n\t// https://github.com/mitchellh/mapstructure\n\t// this could work if we're able to define the math part\n\t// in a proper go struct. This appears to be difficult\n\t// or even impossible as the math data structure uses interface{}\n\t// collections to its full extent:\n\t// the \"c\" list has a map and a string value\n\t// the latter map has a list and a string value\n\t// maybe we could help things a little by adding a\n\t//\n\t// GetString(\"c\", \"0\", \"c\")\n\t//\n\t// all the info is essentially there \"c\" and \"t\" refer to map fields\n\t// every thing else is to be converted to an int which refers to a\n\t// slice\n\t//\n\t// to convert back to an interface{} which is understood by pandoc\n\t// is completely else. The scenario here is that we change a\n\t// value in math field. It must be possible to use a copy of this\n\t// math field to do a\n\t//\n\t// PutString(\"c\", \"0\", \"c\", \"newvalue\")\n\t//\n\t// The other scenario is that one element is replaced by something\n\t// completely else, for instance, a code block for an image\n\ts1 := value.(map[string]interface{})\n\ts2 := s1[\"c\"].([]interface{})\n\ts3 := s2[0].(map[string]interface{})\n\tt := s3[\"t\"].(string)\n\tc := s2[1].(string)\n\n\treturn t, c\n}",
"func (s SqlInjectionMatchTuple) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (t Tuple7[A, B, C, D, E, F, G]) Unpack() (A, B, C, D, E, F, G) {\n\treturn t.A, t.B, t.C, t.D, t.E, t.F, t.G\n}",
"func NewPoint(x, y, z float64) *Tuple {\n\treturn &Tuple{x, y, z, 1.0}\n}",
"func (s *seriesValueGenerator) ColumnTypes() TTuple { return TTuple{TypeInt} }",
"func (s ByteMatchTuple) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func returnMulti() (int, string) {\n\treturn 42, \"foobar\"\n}",
"func (t Tuple) At(idx int) float64 {\n\treturn t[idx]\n}",
"func NewPoint(x, y, z float64) Tuple {\n\treturn Tuple{\n\t\tX: x,\n\t\tY: y,\n\t\tZ: z,\n\t\tW: pointW,\n\t}\n}",
"func (b *BlockingForwardBox) EmitTuples(n int) {\n\tb.m.Lock()\n\tdefer b.m.Unlock()\n\tb.cnt += n\n\tb.c.Broadcast()\n}",
"func Subtract(t, other Tuplelike) Tuplelike {\n\tresult := []float64{}\n\n\tfor idx, value := range t.Values() {\n\t\tresult = append(result, value-other.At(idx))\n\t}\n\n\treturn Tuple(result)\n}",
"func (b *AppendOnlyBufferedBatch) AppendTuples(batch coldata.Batch, startIdx, endIdx int) {\n\tfor _, colIdx := range b.colsToStore {\n\t\tb.colVecs[colIdx].Append(\n\t\t\tcoldata.SliceArgs{\n\t\t\t\tSrc: batch.ColVec(colIdx),\n\t\t\t\tSel: batch.Selection(),\n\t\t\t\tDestIdx: b.length,\n\t\t\t\tSrcStartIdx: startIdx,\n\t\t\t\tSrcEndIdx: endIdx,\n\t\t\t},\n\t\t)\n\t}\n\tb.length += endIdx - startIdx\n}"
] | [
"0.64886904",
"0.6247497",
"0.61603564",
"0.610934",
"0.6033286",
"0.5953124",
"0.5881533",
"0.58625937",
"0.5833882",
"0.5820546",
"0.57736194",
"0.57283825",
"0.56730825",
"0.56594837",
"0.55717015",
"0.55475885",
"0.55184346",
"0.54951537",
"0.54623514",
"0.54509795",
"0.54370296",
"0.5426124",
"0.5409086",
"0.5365197",
"0.5364557",
"0.5357154",
"0.53441274",
"0.53399014",
"0.53333634",
"0.5304976",
"0.5304798",
"0.52922744",
"0.52916414",
"0.5273533",
"0.5239635",
"0.52378005",
"0.52323973",
"0.5225026",
"0.5219407",
"0.52088267",
"0.5195355",
"0.5189202",
"0.51716787",
"0.51545966",
"0.511954",
"0.5116192",
"0.50839454",
"0.5056672",
"0.5051693",
"0.5047577",
"0.50389534",
"0.50356597",
"0.50356597",
"0.50356597",
"0.5031929",
"0.50315523",
"0.49453443",
"0.49351048",
"0.4930047",
"0.49157333",
"0.4915219",
"0.49148822",
"0.49092984",
"0.4894404",
"0.48854727",
"0.48835236",
"0.48649317",
"0.48356712",
"0.48315734",
"0.48046303",
"0.48017928",
"0.47747588",
"0.47709143",
"0.47698504",
"0.47621053",
"0.475651",
"0.47472468",
"0.4719404",
"0.4710267",
"0.47080818",
"0.470562",
"0.47037098",
"0.47010455",
"0.46960595",
"0.46958214",
"0.46938753",
"0.46934932",
"0.4689788",
"0.46623755",
"0.46570715",
"0.4647029",
"0.46405742",
"0.46327248",
"0.46155918",
"0.46142778",
"0.46066016",
"0.460152",
"0.45890436",
"0.45852387",
"0.4582674"
] | 0.5276199 | 33 |
GenerateJWTToken generates a JWT token with the username and singed by the given secret key | func GenerateJWTToken(userName, jwtAccSecretKey string) (string, error) {
claims := jwt.MapClaims{
"username": userName,
"ExpiresAt": jwt.TimeFunc().Add(1 * time.Minute).Unix(),
"IssuedAt": jwt.TimeFunc().Unix(),
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
return token.SignedString([]byte(jwtAccSecretKey))
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func GenerateJWTToken(username string) (string, error) {\n\t// Create token\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"username\": username,\n\t\t\"exp\": time.Now().Add(time.Minute * 5).Unix(),\n\t})\n\n\tt, err := token.SignedString([]byte(jwtsecret))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn t, err\n}",
"func GenerateToken(username, dept_id string) (string, error) {\n\tnowTime := time.Now()\n\texpireTime := nowTime.Add(330 * 24 * time.Hour)\n\n\tclaims := CustomClaims{\n\t\tusername,\n\t\tdept_id,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: expireTime.Unix(),\n\t\t\tIssuer: \"dingtalk\",\n\t\t},\n\t}\n\n\ttokenClaims := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\ttoken, err := tokenClaims.SignedString(jwtSecret)\n\n\treturn token, err\n}",
"func GenerateJWT(t models.User) (string, error) {\n\n\tJWTSECRET := config.Get(\"JWTSECRET\")\n\tsecret := []byte(JWTSECRET)\n\n\tpayload := jwt.MapClaims{\n\t\t\"_id\": t.ID.Hex(),\n\t\t\"email\": t.Email,\n\t\t\"name\": t.Name,\n\t\t\"lastname\": t.Lastname,\n\t\t\"bio\": t.Biography,\n\t\t\"website\": t.Website,\n\t\t\"birthDate\": t.BirthDate,\n\t\t\"location\": t.Location,\n\n\t\t\"exp\": time.Now().Add(24 * time.Hour).Unix(),\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, payload)\n\ttokenStr, err := token.SignedString(secret)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\ttokenStr = \"Bearer \" + tokenStr\n\n\treturn tokenStr, nil\n}",
"func GenerateJWTToken(info *TokenInfo, expiresAt int64) (string, error) {\n\tinfo.ExpiresAt = expiresAt\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, info)\n\tencryptedToken, err := token.SignedString([]byte(secretKey))\n\tif err != nil {\n\t\treturn \"\", errors.Customize(500, \"failed to sign on token\", err)\n\t}\n\treturn encryptedToken, nil\n}",
"func GenerateJWT(t models.User) (string, error) {\n\tmySecret := []byte(\"learningaboutgobybuildingatwittercloneusingmongodb\")\n\n\tpayload := jwt.MapClaims{\n\t\t\"email\": t.Email,\n\t\t\"name\": t.Name,\n\t\t\"Last\": t.Last,\n\t\t\"bod\": t.Bod,\n\t\t\"location\": t.Location,\n\t\t\"website\": t.Website,\n\t\t\"biography\": t.Biography,\n\t\t\"_id\": t.ID.Hex(),\n\t\t\"exp\": time.Now().Add(time.Hour * 24).Unix(),\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, payload)\n\ttokenStr, err := token.SignedString(mySecret)\n\tif err != nil {\n\t\treturn tokenStr, err\n\t}\n\treturn tokenStr, nil\n}",
"func GenerateJWT(username string, session *r.Session) string {\n\tvar jwt string\n\tdb := os.Getenv(\"DB\")\n\ttokenTable := os.Getenv(\"TOKENTABLE\")\n\tsalt := randStringBytes(32)\n\tu64 := b64.URLEncoding.EncodeToString([]byte(username))\n\ts64 := b64.URLEncoding.EncodeToString([]byte(salt))\n\thash := computeHMAC(u64 + \".\" + s64)\n\th := u64 + \".\" + s64 + \".\" + b64.URLEncoding.EncodeToString([]byte(hash))\n\t// Write to token table\n\tif !CheckUserExists(username, tokenTable, session) {\n\t\tauth := AuthToken{username, h}\n\t\t// fmt.Println(auth)\n\t\tr.DB(db).Table(tokenTable).Insert(auth).Run(session)\n\t\tjwt = h\n\t}\n\n\treturn jwt\n}",
"func GenerateToken(username string, isAdmin bool, expires int, signingKey []byte) (string, error) {\n\tiat := time.Now()\n\texpirationTime := iat.Add(time.Duration(expires) * time.Second)\n\t// Create the JWT claims, which includes the username and expiry time\n\tclaims := &CustomClaims{\n\t\tUsername: username,\n\t\tIsAdmin: isAdmin,\n\t\tIssuedAt: iat.Unix(),\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\t// In JWT, the expiry time is expressed as unix milliseconds\n\t\t\tExpiresAt: expirationTime.Unix(),\n\t\t},\n\t}\n\n\t// Declare the token with the algorithm used for signing, and the claims\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\t// Create the JWT string.\n\treturn token.SignedString(signingKey)\n}",
"func (engine ssoEngineImpl) generateJWTToken(authenticatedUser *authenticatedUser) (*common.CustomClaims, string, error) {\n\n\t// Build the claims\n\tclaims := &common.CustomClaims{\n\t\tUser: authenticatedUser.UserName,\n\t\tRoles: authenticatedUser.Roles,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Unix() + engine.tokenSecondsToLive,\n\t\t\tIssuedAt: time.Now().Unix(),\n\t\t\tIssuer: \"EasySSO Server\",\n\t\t},\n\t}\n\t// Build the token\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodRS512, claims)\n\n\t// Convert the token to a string\n\ttokenString, err := token.SignedString(engine.privateKey)\n\tif err != nil {\n\t\tlog.Error(\"Unable to sign generated token\", err)\n\t\treturn nil, \"\", err\n\t}\n\treturn claims, tokenString, nil\n}",
"func GenerateJWT() (string, error) {\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\tclaims := token.Claims.(jwt.MapClaims)\n\tclaims[\"authorized\"] = true\n\tclaims[\"user\"] = \"heheh\"\n\tclaims[\"exp\"] = time.Now().Add(time.Minute * 30).Unix()\n\ttokenString, err := token.SignedString(secretKey)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn tokenString, nil\n}",
"func GenerateJWT() (string, error) {\n\tlog.Printf(\"Generating new JWT\")\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\n\tclaims := token.Claims.(jwt.MapClaims)\n\n\tclaims[\"authorized\"] = true\n\tclaims[\"exp\"] = time.Now().Add(time.Minute * 30).Unix()\n\n\ttokenString, err := token.SignedString(signingKey)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn tokenString, nil\n}",
"func GenerateJWT(t models.User) (string, error) {\n\tcode := []byte(\"lmSeryiJuasJuas\")\n\tpayload := jwt.MapClaims{\n\t\t\"_id\": t.ID.Hex(),\n\t\t\"email\": t.Email,\n\t\t\"name\": t.Name,\n\t\t\"lastName\": t.LastName,\n\t\t\"birthday\": t.Birthday,\n\t\t\"biography\": t.Biography,\n\t\t\"location\": t.Location,\n\t\t\"webSite\": t.WebSite,\n\t\t\"exp\": time.Now().Add(time.Hour * 24).Unix(),\n\t}\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, payload)\n\ttokenStr, err := token.SignedString(code)\n\n\tif err != nil {\n\t\treturn tokenStr, err\n\t}\n\treturn tokenStr, nil\n}",
"func GenerateJWT() (string, error) {\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\tclaims := token.Claims.(jwt.MapClaims)\n\n\tclaims[\"authorized\"] = true\n\tclaims[\"user\"] = \"Wyllis Monteiro\"\n\tclaims[\"exp\"] = time.Now().Add(time.Minute * 30).Unix()\n\n\ttokenString, err := token.SignedString(MySigningKey)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn tokenString, nil\n}",
"func (m *manager) GenerateToken(userID string, username string, roles []string) (string, error) {\n nowTime := time.Now()\n expireTime := nowTime.Add(m.expireTime * time.Second)\n\n claims := Token{\n UserID: userID,\n Name: m.hashService.Make(username),\n Roles: roles,\n StandardClaims: &jwt.StandardClaims{\n ExpiresAt: expireTime.Unix(),\n Issuer: m.issuer,\n Audience: m.audience,\n },\n }\n\n tokenClaims := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n token, err := tokenClaims.SignedString(m.jwtSecret)\n\n return token, err\n}",
"func GenerateToken(user string) (string, error) {\n\tvar err error\n\tsecret := \"secret\"\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"username\": user,\n\t\t\"iss\": strconv.FormatInt(GetCurrentTimeMillis(), 10),\n\t})\n\ttokenString, err := token.SignedString([]byte(secret))\n\n\treturn tokenString, err\n}",
"func generateJwtToken(login, fgp string, api *UserAPIHandler) (string, error) {\n\tvar claims models.TokenClaims\n\n\t// set required claims\n\tclaims.ExpiresAt = time.Now().Add(1 * time.Hour).Unix()\n\tclaims.Fingerprint = fgp\n\tif IsUserAdmin(login, api.admins) {\n\t\tclaims.Role = roleAdmin\n\t} else {\n\t\tclaims.Role = roleUser\n\t}\n\n\t// generate and sign the token\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\treturn token.SignedString(api.jwtSecret)\n}",
"func GenerateJWT(user models.User) (resp LoginResponse, err error) {\n\tclaims := jwt.MapClaims{}\n\n\t// set our claims\n\tclaims[\"User\"] = user\n\tclaims[\"Name\"] = user.Name\n\n\t// set the expire time\n\n\tclaims[\"exp\"] = time.Now().Add(time.Hour * 24 * 30 * 12).Unix() //24 hours inn a day, in 30 days * 12 months = 1 year in milliseconds\n\n\t// create a signer for rsa 256\n\tt := jwt.NewWithClaims(jwt.GetSigningMethod(\"RS256\"), claims)\n\n\tpub, err := jwt.ParseRSAPrivateKeyFromPEM(config.GetConf().Encryption.Private)\n\tif err != nil {\n\t\treturn\n\t}\n\ttokenString, err := t.SignedString(pub)\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tresp = LoginResponse{\n\t\tUser: user,\n\t\tMessage: \"Token succesfully generated\",\n\t\tToken: tokenString,\n\t}\n\n\treturn\n\n}",
"func GenerateJWT(userID string) (string, error) {\n\tsigningKey := []byte(\"havealookatbath\")\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\tclaims := token.Claims.(jwt.MapClaims)\n\n\tclaims[\"authorised\"] = true\n\tclaims[\"user_id\"] = userID\n\tclaims[\"exp\"] = time.Now().Add(time.Minute * 30).UnixNano()\n\n\ttokenString, err := token.SignedString(signingKey)\n\tif err != nil {\n\t\tfmt.Printf(err.Error())\n\t\treturn \"\", err\n\t}\n\treturn tokenString, nil\n}",
"func GenerateToken(secret []byte, aud, sub string) (string, error) {\n\n\ttok := jwt.NewWithClaims(jwt.SigningMethodHS256, &jwt.RegisteredClaims{\n\t\tIssuer: TokenIssuer,\n\t\tAudience: []string{aud},\n\t\tSubject: sub,\n\t\tIssuedAt: jwt.NewNumericDate(time.Now()),\n\t\tNotBefore: jwt.NewNumericDate(time.Now().Add(-15 * time.Minute)),\n\t})\n\n\treturn tok.SignedString(secret)\n}",
"func GenerateToken(key []byte, userID int64, credential string) (string, error) {\n\n\t//new token\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\n\t// Claims\n\tclaims := make(jwt.MapClaims)\n\tclaims[\"user_id\"] = userID\n\tclaims[\"credential\"] = credential\n\tclaims[\"exp\"] = time.Now().Add(time.Hour*720).UnixNano() / int64(time.Millisecond)\n\ttoken.Claims = claims\n\n\t// Sign and get as a string\n\ttokenString, err := token.SignedString(key)\n\treturn tokenString, err\n}",
"func GenerateToken(userID uint) (string, error) {\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"userID\": userID,\n\t})\n\n\ttokenStr, err := token.SignedString([]byte(secret))\n\n\treturn tokenStr, err\n}",
"func GenerateToken(c *gin.Context, user *models.UserResource) string {\n\tclaims := jwt.NewWithClaims(jwt.SigningMethodHS256, &jwt.StandardClaims{\n\t\tIssuer: user.ID,\n\t\tExpiresAt: jwt.NewTime(float64(time.Now().Add(24 * time.Hour).UnixNano())),\n\t})\n\n\ttoken, err := claims.SignedString([]byte(SecretKey))\n\tif err != nil {\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\"error\": \"Unable to authonticate\"})\n\t\treturn \"\"\n\t}\n\tc.SetCookie(\n\t\t\"jwt\", token, int(time.Now().Add(24*time.Hour).UnixNano()), \"/\", \"localhost\", false, true,\n\t)\n\treturn token\n}",
"func GenerateJWT(user models.User) (string, error) {\n\tscopes, err := executedao.PermissionDAO.GetScopes(user.Profile.ID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\n\tclaims := models.AppClaims{\n\t\tUser: user,\n\t\tScopes: scopes,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Add(time.Hour * 8).Unix(),\n\t\t\tIssuer: \"Contabilidad por Alexys\",\n\t\t},\n\t}\n\tlog.Printf(\"Creando un token a: %s\\n\", user.Username)\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)\n\tlog.Println(\"Firmando el token...\")\n\tss, err := token.SignedString(signKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn ss, nil\n}",
"func GenerateJWT() (string, error) {\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\tclaims := token.Claims.(jwt.MapClaims)\n\tclaims[\"authorized\"] = true\n\tclaims[\"user\"] = \"Niroop\"\n\tclaims[\"exp\"] = time.Now().Add(time.Minute * 30).Unix()\n\ttokenString, err := token.SignedString(mySigningKey)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn \"\", err\n\t}\n\n\treturn tokenString, nil\n}",
"func GenerateJWT() (string, error) {\n\n\tkey := os.Getenv(\"JWT_SECRETKEY\")\n\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\tclaims := token.Claims.(jwt.MapClaims)\n\n\tclaims[\"authorized\"] = true\n\tclaims[\"user\"] = \"Client\"\n\n\ttokenString, err := token.SignedString([]byte(key))\n\n\tif err != nil {\n\t\tlog.Warning.Println(err)\n\t\treturn \"\", err\n\t}\n\n\treturn tokenString, nil\n}",
"func GenerateJWT(user *models.User) string {\n\tclaims := models.Claim{\n\t\tUser: *user,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Add(time.Hour * 10).Unix(),\n\t\t\tIssuer: \"marold97@outlook.com\",\n\t\t},\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)\n\tresult, err := token.SignedString(PrivateKey)\n\tif err != nil {\n\t\tlog.Println(\"No se ha podido firmar el token: \", err)\n\t}\n\n\treturn result\n}",
"func generateJWT(u Model) (string, error) {\n\tvar token string\n\tc := Claim{\n\t\tUsuario: u,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\t// Tiempo de expiración del token: 1 semana\n\t\t\tExpiresAt: time.Now().Add(time.Hour * 24 * 1).Unix(),\n\t\t\tIssuer: \"Cursos EDteam\",\n\t\t},\n\t}\n\n\tt := jwt.NewWithClaims(jwt.SigningMethodRS256, c)\n\ttoken, err := t.SignedString(SignKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn token, nil\n}",
"func GenToken(id uint) string {\n\tjwt_token := jwt.New(jwt.GetSigningMethod(\"HS256\"))\n\t// Set some claims\n\tjwt_token.Claims = jwt.MapClaims{\n\t\t\"id\": id,\n\t\t\"exp\": time.Now().Add(time.Hour * 24).Unix(),\n\t}\n\t// Sign and get the complete encoded token as a string\n\ttoken, _ := jwt_token.SignedString([]byte(NBSecretPassword))\n\treturn token\n}",
"func GenerateToken(secretKey string, validDays int) string {\n\n\tif validDays < 1 {\n\t\tvalidDays = 1\n\t}\n\n\tclaims := sjwt.New() // Issuer of the token\n\tclaims.SetIssuer(\"goUpload\")\n\t/*\n\t\tclaims.SetTokenID() // UUID generated\n\t\tclaims.SetSubject(\"Bearer Token\") // Subject of the token\n\t\tclaims.SetAudience([]string{\"Prometeus\"}) // Audience the toke is for\n\t\tclaims.SetIssuedAt(time.Now()) // IssuedAt in time, value is set in unix\n\t*/\n\tclaims.SetNotBeforeAt(time.Now()) // Token valid now\n\tclaims.SetExpiresAt(time.Now().Add(time.Hour * 24 * time.Duration(validDays))) // Token expires in 24 hours\n\tjwt := claims.Generate([]byte(secretKey))\n\treturn jwt\n}",
"func createJwtToken(u user.User) (string, error) {\n\t// Set custom claims\n\tclaims := &middleware.LoginCustomClaims{\n\t\tu.Username,\n\t\tfalse,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Add(time.Hour * 72).Unix(),\n\t\t},\n\t}\n\n\t// Create token with claims\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\t// Generate encoded token and send it as response.\n\tkey := viper.GetString(\"auth.signkey\")\n\tt, err := token.SignedString([]byte(key))\n\treturn t, err\n\n}",
"func (j *Jwt) GenerateToken() string {\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"exp\": json.Number(strconv.FormatInt(time.Now().AddDate(0, 0, 1).Unix(), 10)),\n\t\t\"iat\": json.Number(strconv.FormatInt(time.Now().Unix(), 10)),\n\t\t\"uid\": j.UID,\n\t\t\"name\": j.Name,\n\t\t\"username\": j.Username,\n\t})\n\n\ttokenStr, err := token.SignedString(JWTSecret)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn tokenStr\n}",
"func GenerateJWT(u models.User) (string, error) {\n\tmyKey := []byte(\"LaClaveSecretaQueUsoParaCrearUnJWT\")\n\n\tpayload := jwt.MapClaims{\n\t\t\"email\": u.Email,\n\t\t\"name\": u.Name,\n\t\t\"lastname\": u.Lastname,\n\t\t\"birthday\": u.Birthday,\n\t\t\"location\": u.Location,\n\t\t\"biography\": u.Biography,\n\t\t\"website\": u.Website,\n\t\t\"_id\": u.ID.Hex(),\n\t\t\"exp\": time.Now().Add(24 * time.Hour).Unix(),\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, payload)\n\n\ttokenStr, err := token.SignedString(myKey)\n\n\tif err != nil {\n\t\treturn tokenStr, err\n\t}\n\n\treturn tokenStr, nil\n}",
"func GenerateJWT(initialToken string, validDuration int) (string, error) {\n\n\tloginKey := []byte(initialToken)\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\tclaims := token.Claims.(jwt.MapClaims)\n\n\tclaims[\"authorized\"] = true\n\tclaims[\"exp\"] = time.Now().Add(time.Minute * time.Duration(validDuration))\n\n\tjwtToken, jwtErr := token.SignedString(loginKey)\n\n\tif jwtErr != nil {\n\t\tlog.Println(\"Error creating jwt Token : \", jwtErr)\n\t\treturn \"\", jwtErr\n\t}\n\n\treturn jwtToken, nil\n}",
"func (s service) generateJWT(identity Identity) (string, error) {\n\treturn jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"id\": identity.ID(),\n\t\t\"isAdmin\": identity.IsAdmin(),\n\t\t\"exp\": time.Now().Add(time.Duration(s.tokenExpiration) * time.Hour).Unix(),\n\t}).SignedString([]byte(s.signingKey))\n}",
"func (t *Jwt) GenerateToken(userID uint, expiredAt time.Duration) (accessToken string, err error) {\n\texp := time.Now().Add(expiredAt)\n\t// jwt token\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{\"exp\": exp.Unix(), \"userID\": userID})\n\t// sign the jwt token\n\taccessToken, err = token.SignedString(t.PrivateKey)\n\tif err != nil {\n\t\t// todo: log error\n\t}\n\treturn\n}",
"func GenerateJWT(id, role string, signinigKey []byte) (access string, err error) {\n\tvar (\n\t\taccessToken *jwt.Token\n\t\tclaims jwt.MapClaims\n\t)\n\taccessToken = jwt.New(jwt.SigningMethodHS256)\n\n\tclaims = accessToken.Claims.(jwt.MapClaims)\n\tclaims[\"iss\"] = \"user\"\n\tclaims[\"sub\"] = id\n\tclaims[\"role\"] = role\n\tclaims[\"iat\"] = time.Now().Unix()\n\n\taccessTokenString, err := accessToken.SignedString(signinigKey)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"access_token generating error: %s\", err)\n\t\treturn\n\t}\n\n\treturn accessTokenString, nil\n}",
"func GenerateJWT(name, role string) (string, error) {\n\t// Create the Claims\n\tclaims := AppClaims{\n\t\tUserName: name,\n\t\tRole: role,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Add(time.Minute * 20).Unix(),\n\t\t\tIssuer: \"admin\",\n\t\t},\n\t}\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)\n\tss, err := token.SignedString(signKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn ss, nil\n}",
"func GenerateToken(jwtSecret string, claims InvoicesClaims) string {\n\thmacSampleSecret := []byte(jwtSecret)\n\n\ttype Claims struct {\n\t\tInvoicesClaims\n\t\tjwt.StandardClaims\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, Claims{\n\t\tInvoicesClaims{\n\t\t\tGetInvoices: true,\n\t\t\tGetInvoice: true,\n\t\t\tCreateInvoice: true,\n\t\t},\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: getExpiry(),\n\t\t},\n\t})\n\n\ttokenString, err := token.SignedString(hmacSampleSecret)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn tokenString\n}",
"func GenerateJWT(user string) *jwtgo.Token {\n\n\ttoken := jwtgo.New(jwtgo.SigningMethodRS512)\n\tin10m := time.Now().Add(time.Duration(10) * time.Minute).Unix()\n\ttoken.Claims = jwtgo.MapClaims{\n\t\t\"iss\": \"Issuer\", // who creates the token and signs it\n\t\t\"aud\": \"Audience\", // to whom the token is intended to be sent\n\t\t\"exp\": in10m, // time when the token will expire (10 minutes from now)\n\t\t\"jti\": uuid.Must(uuid.NewV4()).String(), // a unique identifier for the token\n\t\t\"iat\": time.Now().Unix(), // when the token was issued/created (now)\n\t\t\"nbf\": 2, // time before which the token is not yet valid (2 minutes ago)\n\t\t\"sub\": \"subject\", // the subject/principal is whom the token is about\n\t\t\"scopes\": \"api:access\", // token scope - not a standard claim\n\t\t\"user\": user, // username\n\t}\n\treturn token\n}",
"func GenerateJWT(user interface{}) (string, error) {\n\t// 4380 hours = 6 months\n\texpireToken := time.Now().Add(time.Hour * 4380).Unix()\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, &model.User{\n\t\tId: user.(model.User).Id,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: expireToken,\n\t\t},\n\t})\n\tsignedToken, err := token.SignedString(server.JwtSecret)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn signedToken, nil\n}",
"func (s service) generateJWT(identity Identity) (string, error) {\n\treturn jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"id\": identity.GetID(),\n\t\t\"name\": identity.GetName(),\n\t\t\"exp\": time.Now().Add(time.Duration(s.tokenExpiration) * time.Hour).Unix(),\n\t}).SignedString([]byte(s.signingKey))\n}",
"func GenerateJWT(user models.User) (string, error) {\n\n\tmyKey := []byte(\"MastersdelDesarrollo_grupodeFacebook\")\n\n\tpayload := jwt.MapClaims{\n\t\t\"email\": user.Email,\n\t\t\"nombre\": user.UserName,\n\t\t\"userid\": user.UserID,\n\t\t\"phone\": user.PhoneID,\n\t\t\"Password\": user.Password,\n\t\t\"_id\": user.ID.Hex(),\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, payload)\n\ttokenStr, err := token.SignedString(myKey)\n\tif err != nil {\n\t\treturn tokenStr, err\n\t}\n\treturn tokenStr, nil\n}",
"func (a *Service) GenerateJweToken(customClaims map[string]interface{}) (string, *time.Time, *error_utils.ApiError) {\n\n\tenc, err := jose.NewEncrypter(\n\t\tjose.ContentEncryption(a.encryptionAlgorithm),\n\t\tjose.Recipient{Algorithm: jose.DIRECT, Key: a.encryptionKey},\n\t\t(&jose.EncrypterOptions{}).WithType(\"JWT\"),\n\t)\n\tif err != nil {\n\t\treturn \"\", nil, error_utils.NewInternalServerError(err.Error())\n\t}\n\n\texpire := a.timeFunc().UTC().Add(a.timeout)\n\n\tclaims := map[string]interface{} { }\n\tclaims[\"exp\"] = expire.Unix()\n\tclaims[\"orig_iat\"] = a.timeFunc().Unix()\n\tclaims[\"iss\"] = a.issuer\n\n\tif customClaims != nil {\n\t\tfor key, value := range customClaims {\n\t\t\tclaims[key] = value\n\t\t}\n\t}\n\n\ttoken, err := jwt.Encrypted(enc).Claims(claims).CompactSerialize()\n\tif err != nil {\n\t\treturn \"\", nil, error_utils.NewInternalServerError(err.Error())\n\t}\n\n\treturn token, &expire, nil\n}",
"func generateUserToken(identity *Identity) *jwt.Token {\n\ttoken := jwt.New(jwt.SigningMethodRS256)\n\tclaims := token.Claims.(jwt.MapClaims)\n\tclaims[\"jti\"] = uuid.NewV4().String()\n\tiat := time.Now().Unix()\n\tclaims[\"exp\"] = 0\n\tclaims[\"iat\"] = iat\n\tclaims[\"typ\"] = \"Bearer\"\n\tclaims[\"preferred_username\"] = identity.Username\n\tclaims[\"sub\"] = identity.ID.String()\n\tclaims[\"email\"] = identity.Email\n\n\ttoken.Header[\"kid\"] = \"test-key\"\n\n\treturn token\n}",
"func (op *AuthOperations) HandleJWTGenerate(w http.ResponseWriter, r *http.Request) {\n\tvar input jwt.General\n\t//fid := r.Header.Get(\"x-fid\")\n\tiid := r.Header.Get(\"x-iid\")\n\terr := json.NewDecoder(r.Body).Decode(&input)\n\tif err != nil {\n\t\tLOGGER.Warningf(\"Error while validating token body : %v\", err)\n\t\tjwt.ResponseError(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tLOGGER.Debugf(\"%s, %s\", iid, input.JTI)\n\n\tvar token jwt.Info\n\tinfoCollection, ctx := op.session.GetSpecificCollection(AuthDBName, JWTInfoCollection)\n\terr = infoCollection.FindOne(ctx,\n\t\tbson.M{\n\t\t\t\"institution\": iid,\n\t\t\t\"jti\": input.JTI,\n\t\t}).Decode(&token)\n\tif err != nil {\n\t\tLOGGER.Errorf(\"Error getting JWT info from query: %s\", err.Error())\n\t\tjwt.ResponseError(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\tLOGGER.Debugf(\"%+v\", token)\n\n\t// if token exists\n\tif &token == nil {\n\t\tLOGGER.Errorf(\"Token info not found\")\n\t\tjwt.ResponseError(w, http.StatusInternalServerError, errors.New(\"token info not found\"))\n\t\treturn\n\t}\n\n\t// only generate if stage is currently approved\n\tif token.Stage != jwt.Approved {\n\t\tLOGGER.Errorf(\"Token is not currently approved\")\n\t\tjwt.ResponseError(w, http.StatusForbidden, errors.New(\"token is not currently approved\"))\n\t\treturn\n\t}\n\n\temail := r.Header.Get(\"email\")\n\t// check to make sure the authenticated user is the same user who requested the token\n\tif email == \"\" || email != token.CreatedBy {\n\t\tLOGGER.Errorf(\"User who requested the token must be the same user to generate the token\")\n\t\tjwt.ResponseError(w, http.StatusForbidden, errors.New(\"user who requested the token must be the same user to generate the token\"))\n\t\treturn\n\t}\n\n\t// ensure that the approved request includes a jti\n\tif token.JTI != input.JTI {\n\t\tLOGGER.Errorf(\"Unknown token id\")\n\t\tjwt.ResponseError(w, http.StatusForbidden, errors.New(\"unknown token id\"))\n\t\treturn\n\t}\n\n\t// update token info\n\ttoken.Stage = jwt.Ready\n\n\t// set default expiration time\n\t//initExp := \"15m\" //os.Getenv(\"initial_mins\") + \"m\"\n\t//if initExp == \"\" {\n\t//\tinitExp = \"1h\"\n\t//}\n\n\t// generate the token with payload and claims\n\t// initialize to expire in n1 hrs and not before n2 seconds from now\n\t//encodedToken := jwt.GenerateToken(payload, initExp, \"0s\")\n\ttokenSecret := stringutil.RandStringRunes(64, false)\n\n\tkeyID := primitive.NewObjectIDFromTimestamp(time.Now())\n\tjwtSecure := jwt.IJWTSecure{\n\t\tID: keyID,\n\t\tSecret: tokenSecret,\n\t\tJTI: input.JTI,\n\t\tNumber: 0,\n\t}\n\n\tsecureCollection, secureCtx := op.session.GetSpecificCollection(AuthDBName, JWTSecureCollection)\n\t_, err = secureCollection.InsertOne(secureCtx, jwtSecure)\n\tif err != nil {\n\t\tLOGGER.Errorf(\"Insert JWT secure failed: %+v\", err)\n\t\tjwt.ResponseError(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\t// convert the interface type ID to string\n\tLOGGER.Debugf(\"New generate ID: %s\" , keyID.Hex())\n\n\tcount := 0\n\t// define payload\n\tpayload := jwt.CreateClaims(token, count, iid, keyID.Hex())\n\tpayload.ExpiresAt = time.Now().Add(time.Minute * 60).Unix()\n\tpayload.NotBefore = time.Now().Unix()\n\n\tencodedToken, _ := jwt.CreateAndSign(payload, tokenSecret, keyID.Hex())\n\n\t// save updated token info\n\tupdateResult, updateInfoErr := infoCollection.UpdateOne(ctx, bson.M{\"institution\": iid, \"jti\": input.JTI}, bson.M{\"$set\": &token})\n\tif updateInfoErr != nil || updateResult.MatchedCount < 1{\n\t\tLOGGER.Errorf(\"Error update token info: %+v\", updateInfoErr)\n\t\tjwt.ResponseError(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tLOGGER.Debugf(\"Successfully generate JWT token\")\n\tjwt.ResponseSuccess(w, encodedToken)\n\treturn\n}",
"func GenerateToken(info Jwt) (string, error) {\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"id\": info.ID,\n\t\t\"email\": info.Email,\n\t\t\"name\": info.Name,\n\t\t\"nbf\": time.Date(2015, 10, 10, 12, 0, 0, 0, time.UTC).Unix(),\n\t})\n\n\t// Sign and get the complete encoded token as a string using the secret\n\treturn token.SignedString(secret)\n}",
"func createJwToken(user models.User) (string, error) {\n\n\tjwtExpired, _ := strconv.ParseInt(os.Getenv(\"JWT_EXPIRED_MINUTES\"), 10, 64)\n\n\tclaims := models.JwtClaims{\n\t\tName: user.Name,\n\t\tEmail: user.Email,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tId: strconv.Itoa(user.ID),\n\t\t\tExpiresAt: time.Now().Add(time.Duration(jwtExpired) * time.Minute).Unix(),\n\t\t},\n\t}\n\n\trawToken := jwt.NewWithClaims(jwt.SigningMethodHS512, claims)\n\n\ttoken, err := rawToken.SignedString([]byte(os.Getenv(\"JWT_SECRET\")))\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn token, nil\n}",
"func GenerateToken(m *models.User) (*AuthToken, error) {\n\tnowTime := time.Now()\n\texpireTime := nowTime.Add(24 * time.Hour)\n\n\tclaims := userStdClaims{\n\t\tUser: m,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: expireTime.Unix(),\n\t\t\tIssuedAt: time.Now().Unix(),\n\t\t\tIssuer: \"gin-server-api\",\n\t\t},\n\t}\n\n\ttokenClaims := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\ttoken, err := tokenClaims.SignedString(jwtSecret)\n\n\tauthToken := &AuthToken{Token: token, ExpiresAt: expireTime.Format(\"2006-01-02 15:04:05\")}\n\treturn authToken, err\n}",
"func (a *Authenticator) generateJwt(p *Profile) ([]byte, error) {\n\t// Create the token\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\t// Set some claims\n\t// TODO: complete the claims.\n\ttoken.Claims[\"exp\"] = time.Now().Add(time.Hour * 72).Unix()\n\t// User is valid. Create a jwt response.\n\ttoken.Claims[\"kid\"] = 0\n\ttoken.Claims[\"userid\"] = p.UserName\n\ttoken.Claims[\"scopes\"] = p.Scopes\n\tts, err := token.SignedString(a.secret)\n\t// Sign and get the complete encoded token as a string\n\n\tts, err = token.SignedString(a.secret)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn []byte(ts), nil\n}",
"func GenerateToken(payload interface{}) string {\n\ttokenContent := jwt.MapClaims{\n\t\t\"payload\": payload,\n\t\t\"exp\": time.Now().Add(time.Second * TokenExpiredTime).Unix(),\n\t}\n\tjwtToken := jwt.NewWithClaims(jwt.GetSigningMethod(\"HS256\"), tokenContent)\n\ttoken, err := jwtToken.SignedString([]byte(\"TokenPassword\"))\n\tif err != nil {\n\t\tlogger.Error(\"Failed to generate token: \", err)\n\t\treturn \"\"\n\t}\n\n\treturn token\n}",
"func (this *Token) CreateJWTToken(typeUser string, user interface{}) string {\n\n\t// Create new JWT token for the newly registered account\n\tvar id uint64\n\tswitch typeUser {\n\tcase \"user_buyers\":\n\t\tid = user.(*UserBuyers).ID\n\t}\n\n\ttk := &Token{UserId: id, UserType: typeUser, UserDetail: user}\n\ttoken := jwt.NewWithClaims(jwt.GetSigningMethod(\"HS256\"), tk)\n\ttokenString, _ := token.SignedString([]byte(os.Getenv(\"TOKEN_PASSWORD\")))\n\n\treturn tokenString\n}",
"func CreateToken(userId uint64, secret_name string) (string, error) {\n\n //Retrieve secret value from secrets manager\n\tsecret, err := getSecretValue(secret_name);\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n atClaims := jwt.MapClaims{}\n atClaims[\"authorized\"] = true\n atClaims[\"user_id\"] = userId\n atClaims[\"exp\"] = time.Now().Add(time.Minute * 15).Unix()\n at := jwt.NewWithClaims(jwt.SigningMethodHS256, atClaims)\n\ttoken, err := at.SignedString([]byte(secret))\n if err != nil {\n return \"\", err\n }\n\tlog.Println(\"Token is successfully created\")\n return token, nil\n}",
"func (j *JWTUtil) CreateToken(userID uint) (string, error) {\n\n\tclaims := jwt.MapClaims{}\n\n\tvar duration time.Duration\n\tdurationStr := os.Getenv(\"JWT_LIFESPAN_MINUTES\")\n\tif durationStr == \"\" {\n\t\tduration = DefaultTokenLifeSpan\n\t} else {\n\t\td, _ := strconv.ParseInt(durationStr, 10, 64)\n\t\tduration = time.Duration(d) * time.Minute\n\t}\n\n\tclaims[USER_ID] = userID\n\tclaims[\"authorized\"] = true\n\tclaims[\"exp\"] = time.Now().Add(duration).Unix()\n\tjwtToken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\tsecret := os.Getenv(\"JWT_SECRET\")\n\tif secret == \"\" {\n\t\treturn \"\", errors.New(\"missing jwt token secret\")\n\t}\n\ttoken, err := jwtToken.SignedString([]byte(secret))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn token, nil\n}",
"func GenerateJWT(privateKey []byte, fields map[string]interface{}) (string, error) {\n\n\ttype CustomClaims struct {\n\t\tUserId int64 `json:\"user_id,omitempty\"`\n\t\tjwt.StandardClaims\n\t}\n\n\tclaims := &CustomClaims{}\n\n\tfor k, v := range fields {\n\t\tswitch k {\n\t\tcase \"aud\":\n\t\t\tclaims.Audience = cast.ToString(v)\n\t\tcase \"sub\":\n\t\t\tclaims.Subject = cast.ToString(v)\n\t\tcase \"iss\":\n\t\t\tclaims.Issuer = cast.ToString(v)\n\t\tcase \"id\":\n\t\t\tclaims.Id = cast.ToString(v)\n\t\tcase \"exp\":\n\t\t\tclaims.ExpiresAt = cast.ToInt64(v)\n\t\tcase \"user_id\":\n\t\t\tclaims.UserId = cast.ToInt64(v)\n\t\tcase \"nbf\":\n\t\t\tclaims.NotBefore = cast.ToInt64(v)\n\t\tcase \"iat\":\n\t\t\tclaims.IssuedAt = time.Now().Unix()\n\t\t}\n\t}\n\n\tvar (\n\t\tsPrivateKey interface{}\n\t\terr error\n\t)\n\n\tsPrivateKey, err = jwt.ParseRSAPrivateKeyFromPEM(privateKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)\n\tss, err := token.SignedString(sPrivateKey)\n\n\tif err != nil {\n\n\t\treturn \"\", err\n\t}\n\treturn ss, nil\n}",
"func (user *User) GenerateToken() {\n\n\tvalue, _ := strconv.Atoi(os.Getenv(\"token_exp\"))\n\n\t//Create new JWT token for the newly registered account\n\ttk := &Token{UserID: uint(user.ID), ExpirationTime: time.Now().Add(time.Duration(value) * time.Second).Unix()}\n\n\ttoken := jwt.NewWithClaims(jwt.GetSigningMethod(\"HS256\"), tk)\n\ttokenString, _ := token.SignedString([]byte(os.Getenv(\"token_password\")))\n\tuser.Token = tokenString\n\n}",
"func GenerateJWTforUser(user models.User) (string, error) {\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"userId\": user.UserID,\n\t\t\"username\": user.Username,\n\t\t\"isAdmin\": user.IsAdmin,\n\t\t\"expires\": strconv.FormatInt(time.Now().Add(time.Minute*time.Duration(1)).Unix(), 10),\n\t})\n\treturn token.SignedString([]byte(os.Getenv(\"JWT_SECRET\")))\n}",
"func GenerateJWT(currentUserEmail string) (string, error) {\n\t// Create the Claims\n\tclaims := AppClaims{\n\t\tcurrentUserEmail,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Add(time.Minute * 60).Unix(),\n\t\t\tIssuer: \"admin\",\n\t\t},\n\t}\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)\n\tss, err := token.SignedString(signKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn ss, nil\n}",
"func CreateToken(user model.User, jwtKey string) (string, error) {\n\n\texpireToken := time.Now().Add(time.Hour * 48).Unix()\n\n\t// Set-up claims\n\tclaims := model.TokenClaims{\n\t\tID: user.ID,\n\t\tUsername: user.Username,\n\t\tName: user.Name,\n\t\tEmail: user.Email,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: expireToken,\n\t\t\tIssuer: \"smartdashboard-backend-auth\",\n\t\t},\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\ttokenString, err := token.SignedString([]byte(jwtKey))\n\n\treturn tokenString, err\n}",
"func GenerateToken(payload PayLoad, expireTime int64) (string, error) {\n\n\tclaims := Claims{\n\t\tpayload.ID,\n\t\tpayload.Account,\n\t\tEncodeMD5(payload.Password),\n\t\tpayload.Scope,\n\t\tpayload.IsSuper,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: expireTime,\n\t\t\tIssuer: \"liaoliao\",\n\t\t},\n\t}\n\n\ttokenClaims := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\ttoken, err := tokenClaims.SignedString(jwtSecret)\n\n\treturn token, err\n}",
"func GenerateToken(id int, account string, role string) (token string, err error) {\n nowTime := time.Now()\n expireTime := nowTime.Add(3 * time.Hour) // token發放後多久過期\n\n claims := Claims{\n ID: id,\n Account: account,\n Role: role,\n StandardClaims: jwt.StandardClaims{\n ExpiresAt: expireTime.Unix(),\n IssuedAt: nowTime.Unix(),\n Issuer: \"go-gin-cli\",\n },\n }\n\n tokenClaims := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n token, err = tokenClaims.SignedString(jwtSecret)\n if err != nil {\n log.Println(err)\n return\n }\n\n return\n}",
"func (h *Helper) generateToken(tokentype int, expiresInSec time.Duration, id, role, username, email, picturepath string, createdAt, modifiedAt int64) (string, error) {\n\t// Create the Claims\n\tclaims := AppClaims{\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tAudience: helper.TokenAudience,\n\t\t\tSubject: id,\n\t\t\tIssuedAt: time.Now().Unix(),\n\t\t\t//1Day\n\t\t\tExpiresAt: time.Now().Add(expiresInSec).Unix(),\n\t\t\tIssuer: helper.TokenIssuer,\n\t\t},\n\t\tRole: role,\n\t}\n\tswitch tokentype {\n\tcase ID_TOKEN:\n\t\tclaims.Type = \"id_token\"\n\t\tclaims.User = &TokenUser{username, email, picturepath, createdAt, modifiedAt}\n\tcase REFRESH_TOKEN:\n\t\tclaims.Type = \"refresh\"\n\tcase ACCESS_TOKEN:\n\t\tclaims.Type = \"bearer\"\n\t}\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)\n\tss, err := token.SignedString(h.signKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn ss, nil\n}",
"func (j *JWT) GenerateToken(user models.User) (string, error) {\n\texpirationTime := time.Now().Add(7 * 24 * time.Hour)\n\tclaims := &requset.CustomClaims{\n\t\tTelephone: user.Telephone,\n\t\tUserName: user.Username,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: expirationTime.Unix(),\n\t\t\tIssuedAt: time.Now().Unix(),\n\t\t\tIssuer: \"y\",\n\t\t},\n\t}\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\treturn token.SignedString(j.JwtSecret)\n}",
"func (t *jwtMgr) createJWTToken(user *auth.User, privateClaims map[string]interface{}) (string, time.Time, error) {\n\tcurrTime := time.Now()\n\texp := currTime.Add(t.expiration)\n\tif user == nil || user.Name == \"\" {\n\t\tt.logger.Errorf(\"User information is required to create a JWT token\")\n\t\treturn \"\", exp, ErrMissingUserInfo\n\t}\n\t// standard jwt claims like sub, iss, exp\n\tclaims := jwt.Claims{\n\t\tSubject: user.Name,\n\t\tIssuer: issuerClaimValue,\n\t\tExpiry: jwt.NewNumericDate(exp),\n\t\tIssuedAt: jwt.NewNumericDate(currTime),\n\t}\n\t// venice custom claims\n\tif privateClaims == nil {\n\t\tprivateClaims = make(map[string]interface{})\n\t}\n\tprivateClaims[TenantClaim] = user.GetTenant()\n\tprivateClaims[RolesClaim] = user.Status.GetRoles()\n\t// create signed JWT\n\ttoken, err := jwt.Signed(t.signer).Claims(claims).Claims(privateClaims).CompactSerialize()\n\tif err != nil {\n\t\tt.logger.Errorf(\"Unable to create JWT token: Err: %v\", err)\n\t\treturn \"\", exp, err\n\t}\n\treturn token, exp, err\n}",
"func createJWT(userID int) (string, int64, error) {\n\t// expirationTime := time.Now().Add(7 * 24 * 60 * time.Minute)\n\t// expirationTime := time.Now().Add(1 * time.Minute)\n\texpirationTime := time.Now().Add(24 * 60 * time.Minute)\n\tclaims := &Claims{\n\t\tUserID: userID,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: expirationTime.Unix(),\n\t\t},\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\ttokenString, err := token.SignedString(jwtSecretKey)\n\tif err == nil {\n\t\treturn tokenString, expirationTime.Unix(), nil\n\t}\n\treturn \"\", 0, err\n}",
"func createJWT(secret map[string]interface{}, scope string, pkey pkeyInterface) (string, error) {\n\t// A valid JWT has an \"iat\" timestamp and an \"exp\" timestamp. Get the current\n\t// time to create these timestamps.\n\tnow := int(time.Now().Unix())\n\n\t// Construct the JWT header, which contains the private key id in the service\n\t// account secret.\n\theader := map[string]string{\n\t\t\"typ\": \"JWT\",\n\t\t\"alg\": \"RS256\",\n\t\t\"kid\": toString(secret[\"private_key_id\"]),\n\t}\n\n\t// Construct the JWT payload.\n\tpayload := map[string]string{\n\t\t\"aud\": toString(secret[\"token_uri\"]),\n\t\t\"scope\": scope,\n\t\t\"iat\": strconv.Itoa(now),\n\t\t\"exp\": strconv.Itoa(now + 3600),\n\t\t\"iss\": toString(secret[\"client_email\"]),\n\t}\n\n\t// Convert header and payload to base64-encoded JSON.\n\theaderB64, err := mapToJsonBase64(header)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tpayloadB64, err := mapToJsonBase64(payload)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// The first two segments of the JWT are signed. The signature is the third\n\t// segment.\n\tsegments := headerB64 + \".\" + payloadB64\n\n\t// sign the hash, instead of the actual segments.\n\thashed := sha256.Sum256([]byte(segments))\n\tsignedBytes, err := pkey.Sign(rand.Reader, hashed[:], crypto.SignerOpts(sha256Opts{}))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Generate the final JWT as\n\t// base64(header) + \".\" + base64(payload) + \".\" + base64(signature)\n\treturn segments + \".\" + base64Encode(signedBytes), nil\n}",
"func GenerateToken() (string, int64, error) {\n\tnow := time.Now()\n\tnow = now.UTC()\n\n\texpiration := now.Add(TokenLifespan).Unix()\n\tclaims := &jwt.StandardClaims{\n\t\tIssuer: \"auth.evanmoncuso.com\",\n\t\tAudience: \"*\",\n\t\tExpiresAt: expiration,\n\t\tIssuedAt: now.Unix(),\n\t}\n\n\ttoken := jwt.NewWithClaims(SigningMethod, claims)\n\ttokenString, err := token.SignedString(TokenSecret)\n\n\treturn tokenString, expiration, err\n}",
"func CreateToken(userId primitive.ObjectID) (tokenString string, err error) {\n\n\t// Get config file\n\tconfig, err := ConfigHelper.GetConfig()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\ttype MyCustomClaims struct {\n\t\tUserId primitive.ObjectID `json:\"userId\"`\n\t\tjwt.StandardClaims\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, MyCustomClaims{\n\t\tuserId,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Unix() + (config.JwtExpHours * 3600),\n\t\t},\n\t})\n\n\t// Sign and get the complete encoded token as a string using the secret\n\ttokenString, err = token.SignedString([]byte(config.JwtSecret))\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\treturn\n}",
"func GenerateJwtToken(email string) (SignedToken, error) {\n\tclaims := &jwtClaim{\n\t\tEmail: email,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tIssuedAt: time.Now().Unix(),\n\t\t\tIssuer: os.Getenv(\"JWT_ISSUER\"),\n\t\t},\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\tsignedToken, err := token.SignedString([]byte(os.Getenv(\"JWT_KEY\")))\n\n\treturn SignedToken{Token: signedToken}, err\n}",
"func createJwt(payload *JWTUser) (string, error) {\n\t// if the Expires isn't set, we need to set it to the expiration from the config\n\t// the only time it may be set is during test\n\t// generally, if you find yourself setting this by hand, you're doing it wrong\n\tif payload.Expires == \"\" {\n\t\tpayload.Expires = time.Now().Add(Config.TokenExpiresMinutes).Format(\"2006-01-02T15:04:05Z\")\n\t}\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"user\": payload,\n\t})\n\ttokenString, err := token.SignedString([]byte(Config.TokenSalt))\n\n\treturn tokenString, err\n}",
"func GeneroJWT(t models.Usuario)(string,error){\n\tmiClave:=[]byte(\"MastersDelDesarrollo\")\n\tpayload:=jwt.MapClaims{\n\t\t\"email\":t.Email,\n\t\t\"nombres\":t.Nombre,\n\t\t\"apellidos\":t.Apellidos,\n\t\t\"fecha_nacimiento\":t.FechaNacimiento,\n\t\t\"biografia\":t.Biografia,\n\t\t\"ubicacion\":t.Ubicacion,\n\t\t\"sitioweb\":t.SitioWeb,\n\t\t\"_id\":t.ID.Hex(),\n\t\t\"exp\":time.Now().Add(time.Hour*24).Unix(),\n\t}\n\n\ttoken:=jwt.NewWithClaims(jwt.SigningMethodHS256,payload)\n\ttokenStr,err:=token.SignedString(miClave)\n\tif err!=nil{\n\t\treturn tokenStr,err\n\t}\n\treturn tokenStr,nil\n}",
"func CreateJWTToken(email string, provider string, providerID string, group string) (string, error) {\n\tclaims := Claims{\n\t\tEmail: email,\n\t\tProvider: provider,\n\t\tGroup: group,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Add(time.Hour * 24).Unix(),\n\t\t\tId: providerID,\n\t\t},\n\t}\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\ttokenString, err := token.SignedString(jwtSecret)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn tokenString, nil\n}",
"func Generate(payload map[string]interface{}, privateKey *rsa.PrivateKey) []byte {\n\tpayload[\"date\"] = time.Now().UTC().Format(\"02-01-2006\")\n\n\ttoken, err := jws.NewJWT(payload, crypto.SigningMethodRS512).Serialize(privateKey)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn token\n}",
"func GenerateToken(c *gin.Context) {\n\tcurrentUser := GetCurrentUser(c.Request)\n\tif currentUser == nil {\n\t\terr := c.AbortWithError(http.StatusUnauthorized, fmt.Errorf(\"Invalid session\"))\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagAuth, c.ClientIP(), err.Error())\n\t\treturn\n\t}\n\n\ttokenID := uuid.NewV4().String()\n\n\t// Create the Claims\n\tclaims := &ScopedClaims{\n\t\tjwt.StandardClaims{\n\t\t\tIssuer: auth0ApiIssuer,\n\t\t\tAudience: auth0ApiAudiences[0],\n\t\t\tIssuedAt: time.Now().UnixNano(),\n\t\t\tExpiresAt: time.Now().UnixNano() * 2,\n\t\t\tSubject: strconv.Itoa(int(currentUser.ID)),\n\t\t\tId: tokenID,\n\t\t},\n\t\t\"api:invoke\",\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\tsignedToken, err := token.SignedString(signingKey)\n\n\tif err != nil {\n\t\terr = c.AbortWithError(http.StatusInternalServerError, fmt.Errorf(\"Failed to sign token: %s\", err))\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagAuth, c.ClientIP(), err.Error())\n\t} else {\n\t\terr = tokenStore.Store(strconv.Itoa(int(currentUser.ID)), tokenID)\n\t\tif err != nil {\n\t\t\terr = c.AbortWithError(http.StatusInternalServerError, fmt.Errorf(\"Failed to store token: %s\", err))\n\t\t\tbanzaiUtils.LogInfo(banzaiConstants.TagAuth, c.ClientIP(), err.Error())\n\t\t} else {\n\t\t\tc.JSON(http.StatusOK, gin.H{\"token\": signedToken})\n\t\t}\n\t}\n}",
"func (r *Repository) CreateJWT(u *User) (string, error) {\n\tclaims := r.GetClaims(u)\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\t// Sign and get the complete encoded token as a string using the secret\n\ttokenString, err := token.SignedString([]byte(r.secretForToken))\n\treturn tokenString, err\n}",
"func (c *Claim) GenerateJWT(signingString string) (string, error) {\n\t// set the expiration time to 15'\n\tc.ExpiresAt = time.Now().Add(time.Minute * 15).Unix()\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, c)\n\treturn token.SignedString([]byte(signingString))\n}",
"func CreateToken(username string) (string, error) {\n\n\tclaims := jwt.MapClaims{\n\t\t\"user_id\": username,\n\t\t\"exp\": time.Now().Add(12 * time.Hour).Unix(),\n\t\t\"authorized\": true,\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\treturn token.SignedString([]byte(SECRET))\n}",
"func createJWT(SID string) (string, error) {\n\t// Create custom claims value\n\tclaims := MyCustomClaims{\n\t\tSID,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: expire,\n\t\t},\n\t}\n\t// Create a jwt tokenizer\n\ttokenizer := jwt.NewWithClaims(jwt.SigningMethodHS512, &claims)\n\t// create a token and sign it with your key\n\tss, err := tokenizer.SignedString(key)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Error in SignedString while signing: %w\", err)\n\t}\n\treturn ss, nil\n}",
"func GenerateAuthToken(claims *JWTClaims, expiry time.Duration, jwtKey []byte) (string, time.Time, error) {\n\tissuedTime := time.Now()\n\texpirationTime := issuedTime.Add(expiry)\n\tclaims.StandardClaims = jwt.StandardClaims{\n\t\t// In JWT, the expiry time is expressed as unix milliseconds\n\t\tExpiresAt: expirationTime.Unix(),\n\t\t// Can be used to blacklist in the future. Needs to hold state\n\t\t// in that case :/\n\t\tId: uuid.NewV4().String(),\n\t\tIssuedAt: issuedTime.Unix(),\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\tres, err := token.SignedString(jwtKey)\n\treturn res, expirationTime, err\n}",
"func (s *Setup) GenerateToken(info *model.Auth) (string, error) {\n\tcfg, err := config.Load()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tsecret := []byte(cfg.JWTSecret)\n\n\tvar claims model.AuthClaims\n\n\tclaims.ID = info.ID\n\tclaims.Name = info.Name\n\tclaims.Email = info.Email\n\tclaims.StandardClaims = jwt.StandardClaims{\n\t\tExpiresAt: time.Now().Add(time.Hour * 2).Unix(),\n\t\tIssuer: cfg.AppName,\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\tsignedString, err := token.SignedString(secret)\n\tif err != nil {\n\t\treturn \"\", errors.New(errGeneratingToken)\n\t}\n\n\treturn signedString, nil\n}",
"func (handler *AuthHandler) GenerateToken(w http.ResponseWriter, r *http.Request) {\n\ttokenString, err := GenerateJWT()\n\tif err != nil {\n\t\tfmt.Println(\"error occured while generating the token string\")\n\t}\n\n\tfmt.Fprintf(w, tokenString)\n}",
"func GenerateToken(payload map[string]interface{}) (string, error) {\n\treturn GenerateCustomToken(payload, defaultSecret, defaultExpireTime)\n}",
"func JWTCreate(userID int, expiredAt int64) string {\n\tclaims := UserClaims{\n\t\tuserID,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: expiredAt,\n\t\t\tIssuer: \"proton\",\n\t\t},\n\t}\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\tsignedToken, _ := token.SignedString(mySigningKey)\n\treturn signedToken\n}",
"func GenerateNewAccessToken(u *domain.User) (string, error) {\n\t// Set secret key from .env file.\n\tsecret := os.Getenv(\"JWT_SECRET_KEY\")\n\n\t// Set expires minutes count for secret key from .env file.\n\tminutesCount, _ := strconv.Atoi(os.Getenv(\"JWT_SECRET_KEY_EXPIRE_MINUTES\"))\n\n\t// Create token\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\n\t// Set claims\n\tclaims := token.Claims.(jwt.MapClaims)\n\tclaims[\"id\"] = u.ID\n\tclaims[\"email\"] = u.Email\n\tclaims[\"username\"] = u.Username\n\tclaims[\"full_name\"] = u.FullName\n\tclaims[\"exp\"] = time.Now().Add(time.Minute * time.Duration(minutesCount)).Unix()\n\n\t// Generate encoded token and send it as response.\n\tt, err := token.SignedString([]byte(secret))\n\tif err != nil {\n\t\t// Return error, it JWT token generation failed.\n\t\treturn \"\", err\n\t}\n\n\treturn t, nil\n}",
"func createToken(user *models.User) string {\n\tvar store models.Store\n\tvar storeID uint\n\n\tif user.HaveStore == true {\n\t\tif config.DB.First(&store, \"user_id = ?\", user.ID).RecordNotFound() {\n\t\t\tstoreID = 0\n\t\t}\n\t\tstoreID = store.ID\n\t} else {\n\t\tstoreID = 0\n\t}\n\t// to send time expire, issue at (iat)\n\tjwtToken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"user_id\": user.ID,\n\t\t\"user_role\": user.Role,\n\t\t\"user_store\": user.HaveStore,\n\t\t\"store_id\": storeID,\n\t\t\"exp\": time.Now().AddDate(0, 0, 7).Unix(),\n\t\t\"iat\": time.Now().Unix(),\n\t})\n\n\t// Sign and get the complete encoded token as a string using the secret\n\ttokenString, err := jwtToken.SignedString([]byte(os.Getenv(\"JWT_SECRET\")))\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\treturn tokenString\n}",
"func generateAndSaveJWT(secrets secret.Interface, cfg ServerConfig) error {\n\tclaims := jg.MapClaims{\n\t\t\"iss\": fmt.Sprintf(\"kube-arangodb/%s\", cfg.ServerName),\n\t\t\"iat\": time.Now().Unix(),\n\t}\n\terr := k8sutil.CreateJWTFromSecret(context.Background(), secrets, secrets, cfg.JWTSecretName, cfg.JWTKeySecretName, claims, nil)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\treturn err\n}",
"func CreateToken(id, username string) (string, error) {\n\tvar err error\n\tatClaims := jwt.MapClaims{}\n\tatClaims[\"authorized\"] = true\n\tatClaims[\"ID\"] = id\n\tatClaims[\"username\"] = username\n\tatClaims[\"exp\"] = time.Now().Add(time.Hour * 23).Unix()\n\tat := jwt.NewWithClaims(jwt.SigningMethodHS256, atClaims)\n\ttoken, err := at.SignedString([]byte(os.Getenv(\"jwtsecret\")))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn token, nil\n}",
"func GenerateOAuthToken(isSignUp bool, timeZone string, typeVal string, host string) (string, error) {\n\t//compute the expiration\n\texpiration := time.Now().Unix() + JWTOAuthExpirationSec\n\n\t//create the claims\n\tclaims := &OAuthClaims{\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: expiration,\n\t\t},\n\t\tIsSignUp: isSignUp,\n\t\tTimeZone: timeZone,\n\t\tType: typeVal,\n\t\tHost: host,\n\t}\n\n\t//create the token\n\talgorithm := jwt.GetSigningMethod(JWTSigningAlgorithm)\n\ttoken := jwt.NewWithClaims(algorithm, claims)\n\n\t//create the signed string\n\ttokenStr, err := token.SignedString([]byte(GetJWTKey()))\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"sign oauth token\")\n\t}\n\treturn tokenStr, nil\n}",
"func GetJWT(claims *UserAuthClaims, keyID string) string {\n\thmacSecret := os.Getenv(env.HmacSecret)\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\ttoken.Header[\"kid\"] = keyID\n\n\ttokenString, err := token.SignedString([]byte(hmacSecret))\n\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn \"\"\n\t}\n\n\treturn tokenString\n}",
"func GenerateToken(uuid string, name string, email string, role string) (string, error) {\n\texpirationTime := time.Now().Add(24 * time.Hour)\n\tclaims := &Claims{\n\t\tUUID: uuid,\n\t\tName: name,\n\t\tEmail: email,\n\t\tRole: role,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: expirationTime.Unix(),\n\t\t},\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\ttokenString, err := token.SignedString(jwtKey)\n\tif err != nil {\n\t\treturn err.Error(), err\n\t}\n\treturn tokenString, nil\n}",
"func prepareJWTToken(config *Config) (string, error) {\n\tpubBytes, err := x509.MarshalPKIXPublicKey(config.PrivateKey.Public())\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\thash := sha256.Sum256(pubBytes)\n\n\taccountName := strings.ToUpper(config.Account)\n\tuserName := strings.ToUpper(config.User)\n\n\tissueAtTime := time.Now().UTC()\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{\n\t\t\"iss\": fmt.Sprintf(\"%s.%s.%s\", accountName, userName, \"SHA256:\"+base64.StdEncoding.EncodeToString(hash[:])),\n\t\t\"sub\": fmt.Sprintf(\"%s.%s\", accountName, userName),\n\t\t\"iat\": issueAtTime.Unix(),\n\t\t\"nbf\": time.Date(2015, 10, 10, 12, 0, 0, 0, time.UTC).Unix(),\n\t\t\"exp\": issueAtTime.Add(config.JWTExpireTimeout).Unix(),\n\t})\n\n\ttokenString, err := token.SignedString(config.PrivateKey)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn tokenString, err\n}",
"func mustAuthCreateJWTToken() string {\n\tauthOptions.jwtSecretFile = mustExpand(authOptions.jwtSecretFile)\n\n\tif authOptions.jwtSecretFile == \"\" {\n\t\tlog.Fatal().Msg(\"A JWT secret file is required. Set --auth.jwt-secret option.\")\n\t}\n\tcontent, err := ioutil.ReadFile(authOptions.jwtSecretFile)\n\tif err != nil {\n\t\tlog.Fatal().Err(err).Msgf(\"Failed to read JWT secret file '%s'\", authOptions.jwtSecretFile)\n\t}\n\tjwtSecret := strings.TrimSpace(string(content))\n\ttoken, err := service.CreateJwtToken(jwtSecret, authOptions.user)\n\tif err != nil {\n\t\tlog.Fatal().Err(err).Msg(\"Failed to create JWT token\")\n\t}\n\treturn token\n}",
"func generateAuthToken(u *db.UserModel) (*types.AuthorizedUser, error) {\n\tc := make(chan *types.TokenOutput)\n\n\te := time.Now().Add(time.Hour * 72).Unix()\n\n\tclaims := &types.JwtUserClaims{\n\t\tCurrentUser: types.CurrentUser{Name: u.Username, Email: u.Email, Id: u.ID},\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: e,\n\t\t},\n\t}\n\n\tt := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\ts, err := t.SignedString([]byte(config.JWT_SECRET))\n\n\tif err != nil {\n\t\treturn nil, errors.New(utils.StatusMessage(500))\n\t}\n\n\tgo tokenModel.Create(\n\t\t&types.Token{UserId: u.ID, Token: s, Expiration: e},\n\t\tc,\n\t)\n\n\tif r := <-c; r.Err != nil {\n\t\treturn nil, errors.New(utils.StatusMessage(500))\n\t}\n\n\treturn &types.AuthorizedUser{Token: s}, nil\n}",
"func (t *TokenClaims) GenerateToken(key []byte) (string, error) {\n\treturn jwt.\n\t\tNewWithClaims(jwt.SigningMethodHS256, t).\n\t\tSignedString(key)\n}",
"func (u *User) CreateJWTToken() error {\n\tdb.First(&u.UserGroup, u.UserGroupID)\n\tnow := time.Now()\n\tclaims := JWTClaims{\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tIssuedAt: now.Unix(),\n\t\t\tId: createJWTID(u.ID),\n\t\t\tIssuer: \"pm\",\n\t\t\tExpiresAt: now.AddDate(0, 0, 365).Unix(),\n\t\t\tSubject: u.Email,\n\t\t},\n\t\tUserID: u.ID,\n\t\tName: u.Name,\n\t\tRole: u.UserGroup.Name,\n\t}\n\trawToken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\tvar err error\n\tu.JWTToken, err = rawToken.SignedString([]byte(config.Settings.JWTSecret))\n\treturn err\n}",
"func getJWT(msg string) (string, error) {\n\n\t// create a new claim\n\tclaims := myClaims{\n\t\tEmail: msg,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\t// expires in 5 minutes from now\n\t\t\tExpiresAt: time.Now().Add(5 * time.Minute).Unix(),\n\t\t},\n\t}\n\n\t// create token from newly created claims\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, &claims)\n\n\t// sign the token\n\tss, err := token.SignedString([]byte(key))\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error getting signed string from token\")\n\t}\n\n\treturn ss, nil\n}",
"func (manager *JWTManager) Generate(user *User) (string, error) {\n\tclaims := UserClaims{\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Add(manager.tokenDuration).Unix(),\n\t\t},\n\t\tUsername: user.Username,\n\t\tRole: user.Role,\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\treturn token.SignedString([]byte(manager.secretKey))\n}",
"func CreateToken(username, secret string) (string, time.Time, error) {\n\texpTime := time.Now().Add(tokenTTL * time.Hour)\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"username\": username,\n\t\t\"exp\": expTime.Unix(),\n\t})\n\n\tstoken, err := token.SignedString([]byte(secret))\n\treturn stoken, expTime, err\n}",
"func BuildJWT(us models.User) (string, error) {\n\n\tpayload := jwt.MapClaims{\n\t\t\"email\": us.Email,\n\t\t\"name\": us.Name,\n\t\t\"lastName\": us.LastName,\n\t\t\"birthDate\": us.BirthDate,\n\t\t\"_id\": us.ID.Hex(),\n\t\t\"exp\": time.Now().Add(time.Hour * 24).Unix(),\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, payload)\n\n\tjwtKey, err := token.SignedString(Pass)\n\tif err != nil {\n\t\treturn jwtKey, err\n\t}\n\n\treturn jwtKey, nil\n}",
"func NewJWT(claims map[string]interface{}, validFor time.Duration) (string, error) {\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\n\tfor k, v := range claims {\n\t\ttoken.Claims[k] = v\n\t}\n\n\ttoken.Claims[\"exp\"] = time.Now().UTC().Add(validFor).Unix()\n\treturn token.SignedString([]byte(JWTPrivateKey))\n}",
"func (a *API) createJWT(claims jwt.MapClaims) (string, error) {\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\ttokenString, err := token.SignedString([]byte(a.config.SigningSecret))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn tokenString, nil\n}",
"func CreateToken(user *models.User, ExpiresAt int64) (string, error) {\n\n\tclaims := &models.Claims{\n\t\tID: user.ID,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: ExpiresAt,\n\t\t},\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\treturn token.SignedString([]byte(\"pingouin123\"))\n}"
] | [
"0.8215574",
"0.78082126",
"0.7724785",
"0.77192235",
"0.771866",
"0.7673677",
"0.7650788",
"0.7650347",
"0.76278704",
"0.7624205",
"0.761858",
"0.76141596",
"0.76023495",
"0.7587422",
"0.7586905",
"0.7568152",
"0.75659156",
"0.75551575",
"0.75548047",
"0.7543786",
"0.7528981",
"0.7524678",
"0.75244164",
"0.7514457",
"0.74895495",
"0.7487728",
"0.7440466",
"0.74403876",
"0.744",
"0.741801",
"0.74047357",
"0.7359958",
"0.73590213",
"0.73575294",
"0.73181087",
"0.7317995",
"0.731551",
"0.7313919",
"0.7283807",
"0.72584724",
"0.7251446",
"0.72204876",
"0.7205572",
"0.71996665",
"0.7191368",
"0.7173905",
"0.7172686",
"0.7099483",
"0.7090258",
"0.706544",
"0.70422524",
"0.7029094",
"0.7023916",
"0.700178",
"0.6981462",
"0.69594556",
"0.6958624",
"0.69478303",
"0.694667",
"0.69274664",
"0.69220674",
"0.69162625",
"0.68926543",
"0.6889208",
"0.6885338",
"0.6852373",
"0.682971",
"0.68182045",
"0.6801709",
"0.6790563",
"0.6789386",
"0.6770377",
"0.67510265",
"0.6739653",
"0.6734999",
"0.67264044",
"0.6725783",
"0.6712088",
"0.66923213",
"0.66867864",
"0.6686479",
"0.6670591",
"0.6638893",
"0.6629424",
"0.6628068",
"0.66222537",
"0.66173935",
"0.66005933",
"0.66004825",
"0.65875965",
"0.65852576",
"0.65739745",
"0.6561735",
"0.65458506",
"0.65446305",
"0.6540049",
"0.6531577",
"0.65255296",
"0.651029",
"0.65089244"
] | 0.8048552 | 1 |
ValidateJWTToken validates a JWT token with the given key | func ValidateJWTToken(jwtKey, tokenString string) (*Claims, error) {
clms := &Claims{}
tkn, err := jwt.ParseWithClaims(tokenString, clms, func(token *jwt.Token) (interface{}, error) {
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
}
return []byte(jwtKey), nil
})
if err != nil {
if err == jwt.ErrSignatureInvalid {
return nil, errors.WithMessage(err, "Invalid Signature")
}
return nil, errors.WithMessage(err, "Access Denied-Please check the access token")
}
if !tkn.Valid {
return nil, errors.WithMessage(err, "Invalid Token")
}
return clms, nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func ValidateJWT(next echo.HandlerFunc) echo.HandlerFunc {\n\treturn func(c echo.Context) error {\n\t\tvar tokenString string\n\t\ttokenString, err := getTokenFromAuthorizationHeader(c.Request())\n\t\tif err != nil {\n\t\t\ttokenString, err = getTokenFromURLParams(c.Request())\n\t\t\tif err != nil {\n\t\t\t\treturn c.JSON(http.StatusUnauthorized, \"no se encontró el token\")\n\t\t\t}\n\t\t}\n\n\t\tverifyFunction := func(token *jwt.Token) (interface{}, error) {\n\t\t\treturn VerifyKey, nil\n\t\t}\n\n\t\ttexto := \"\"\n\t\ttoken, err := jwt.ParseWithClaims(tokenString, &Claim{}, verifyFunction)\n\t\tif err != nil {\n\t\t\tswitch err.(type) {\n\t\t\tcase *jwt.ValidationError:\n\t\t\t\tvErr := err.(*jwt.ValidationError)\n\t\t\t\tswitch vErr.Errors {\n\t\t\t\tcase jwt.ValidationErrorExpired:\n\t\t\t\t\ttexto = \"Su token ha expirado, por favor vuelva a ingresar\"\n\t\t\t\tdefault:\n\t\t\t\t\ttexto = \"Error de validación del token\"\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\ttexto = \"Error al procesar el token\"\n\t\t\t}\n\n\t\t\treturn c.JSON(http.StatusBadRequest, texto)\n\t\t}\n\n\t\tif !token.Valid {\n\t\t\treturn c.JSON(http.StatusBadRequest, \"token no valido\")\n\t\t}\n\n\t\temail := token.Claims.(*Claim).Usuario.Email\n\n\t\tc.Set(\"email\", email)\n\n\t\treturn next(c)\n\t}\n}",
"func ValidateToken(tokenStr string, jwk map[string]JWKKey) (*jwt.Token, error) {\n\t// @note 2. Decode the token string into JWT format.\n\ttoken, err := jwt.Parse(tokenStr, func(token *jwt.Token) (interface{}, error) {\n\t\t// Methods both of Cognito User Pools and Google are RS256\n\t\tif _, ok := token.Method.(*jwt.SigningMethodRSA); !ok {\n\t\t\treturn nil, fmt.Errorf(\"Unexpected signing method: %v\", token.Header[\"alg\"])\n\t\t}\n\t\t// @note 5. Get the kid from the JWT token header and retrieve the corresponding JSON Web Key that was stored\n\t\tif kid, ok := token.Header[\"kid\"]; ok {\n\t\t\tif kidStr, ok := kid.(string); ok {\n\t\t\t\tkey := jwk[kidStr]\n\t\t\t\t// @note 6. Verify the signature of the decoded JWT token.\n\t\t\t\trsaPublicKey := convertKey(key.E, key.N)\n\t\t\t\treturn rsaPublicKey, nil\n\t\t\t}\n\t\t}\n\t\t// Does not get RSA public key\n\t\treturn \"\", nil\n\t})\n\tif err != nil {\n\t\treturn token, err\n\t}\n\n\tclaims := token.Claims.(jwt.MapClaims)\n\tiss, ok := claims[\"iss\"]\n\tif !ok {\n\t\treturn token, fmt.Errorf(\"token does not contain issuer\")\n\t}\n\n\tissStr := iss.(string)\n\tif strings.Contains(issStr, \"cognito-idp\") {\n\t\terr = validateAWSJWTClaims(claims)\n\t\tif err != nil {\n\t\t\treturn token, err\n\t\t}\n\t} else if strings.Contains(issStr, \"accounts.google.com\") {\n\t\terr = validateGoogleJWTClaims(claims)\n\t\tif err != nil {\n\t\t\treturn token, err\n\t\t}\n\t}\n\n\tif token.Valid {\n\t\treturn token, nil\n\t}\n\n\treturn token, err\n}",
"func (t *jwtMgr) validateJWTToken(token string) (*jwt.Claims, map[string]interface{}, bool, error) {\n\ttok, err := jwt.ParseSigned(token)\n\tif err != nil {\n\t\tt.logger.Errorf(\"Unable to parse JWT token: Err: %v\", err)\n\t\treturn nil, nil, false, err\n\t}\n\t// there should be only one signature\n\tif len(tok.Headers) != 1 {\n\t\tt.logger.Errorf(\"Multiple signatures present in JWT\")\n\t\treturn nil, nil, false, ErrInvalidSignature\n\t}\n\t// signature algorithm type should be HS512\n\tif jose.SignatureAlgorithm(tok.Headers[0].Algorithm) != signatureAlgorithm {\n\t\tt.logger.Errorf(\"Incorrect signature algorithm type\")\n\t\treturn nil, nil, false, ErrInvalidSignature\n\t}\n\t// standard jwt claims like sub, iss, exp\n\tstandardClaims := jwt.Claims{}\n\t// venice custom claims\n\tprivateClaims := make(map[string]interface{})\n\tif err := tok.Claims(t.secret, &standardClaims, &privateClaims); err != nil {\n\t\tt.logger.Errorf(\"Unable to parse claims in JWT token: Err: %v\", err)\n\t\treturn nil, nil, false, err\n\t}\n\t// check if token is not expired and has correct issuer\n\tif err := standardClaims.Validate(jwt.Expected{Issuer: issuerClaimValue, Time: time.Now()}); err != nil {\n\t\treturn nil, nil, false, err\n\t}\n\n\treturn &standardClaims, privateClaims, true, nil\n}",
"func (a *AuthService) ValidateJWT(tokenString string, customClaims *builder.CustomClaims) (*jwt.Token, error) {\n\ttoken, err := jwt.ParseWithClaims(tokenString, customClaims, func(token *jwt.Token) (interface{}, error) {\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"unexpected signing method: %v\", token.Header[\"alg\"])\n\t\t}\n\t\treturn []byte(*a.signedSecret), nil\n\t})\n\treturn token, err\n}",
"func checkJWT(jwtToken string, exporterConfig config.Config) error {\n\ttoken, err := jwt.Parse(jwtToken, func(token *jwt.Token) (interface{}, error) {\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"unexpected signing method: %v\", token.Header[\"alg\"])\n\t\t}\n\n\t\treturn []byte(exporterConfig.BearerAuth.SigningKey), nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, ok := token.Claims.(jwt.MapClaims); ok && token.Valid {\n\t\treturn nil\n\t}\n\n\treturn errors.New(\"not authorized\")\n}",
"func ValidateToken(tokenString string, w http.ResponseWriter) (Claims, error) {\n\tclaims := Claims{}\n\tjwtKey := []byte(config.Configuration.TokenPrivateKey)\n\n\t// The token string is parsed, decoded and stored into the given Claims struct\n\ttoken, err := jwt.ParseWithClaims(tokenString, &claims,\n\t\tfunc(token *jwt.Token) (interface{}, error) {\n\t\t\treturn jwtKey, nil\n\t\t})\n\n\t// Check if the token has expired according to the expiry time fixed during the sign in\n\tif !token.Valid {\n\t\terr = ExpiredToken\n\t\tMakeErrorResponse(w, http.StatusUnauthorized, err.Error())\n\t\tlog.Println(err.Error())\n\t\treturn claims, err\n\t}\n\n\t// Check if the token has been signed with the private key of the api gateway\n\tif err != nil {\n\t\tif err == jwt.ErrSignatureInvalid {\n\t\t\t// If the token is expired or has not been signed according to the api gateway key, an Unauthorization code\n\t\t\t// is returned in both cases, but a different message is provided to the client.\n\t\t\tMakeErrorResponse(w, http.StatusUnauthorized, \"Wrong credentials\")\n\t\t\tlog.Println(\"Wrong credentials\")\n\t\t\treturn claims, err\n\t\t}\n\n\t\tMakeErrorResponse(w, http.StatusBadRequest, \"Malformed token\")\n\t\tlog.Println(\"Malformed token\")\n\t\treturn claims, err\n\t}\n\n\treturn claims, nil\n\n}",
"func (s *service) ValidateToken(ctx context.Context, t string) (jwt.Token, error) {\n\n\tset, err := s.getKeySet(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to fetch JWK Set: %w\", err)\n\t}\n\n\ttoken, err := jwt.ParseString(\n\t\tt,\n\t\tjwt.WithKeySet(set),\n\t\tjwt.WithAudience(s.config.Audience),\n\t\tjwt.WithIssuer(s.config.Issuer),\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse token: %w\", err)\n\t}\n\n\treturn token, nil\n\n}",
"func (a *Service) ValidateJweToken(token string) (map[string]interface{}, *error_utils.ApiError) {\n\n\t// parse token string\n\tclaims, err := a.parseTokenString(token)\n\tif err != nil {\n\t\treturn nil, error_utils.NewUnauthorizedError(err.Error())\n\t}\n\n\t// validate dates\n\tif claims[\"orig_iat\"] == nil {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Orig Iat is missing\")\n\t}\n\n\t// try convert to float64\n\tif _, ok := claims[\"orig_iat\"].(float64); !ok {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Orig Iat must be float64 format\")\n\t}\n\n\t// get value and validate\n\torigIat := int64(claims[\"orig_iat\"].(float64))\n\tif origIat < a.timeFunc().Add(-a.maxRefresh).Unix() {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Token is expired\")\n\t}\n\n\t// check if exp exists in map\n\tif claims[\"exp\"] == nil {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Exp is missing\")\n\t}\n\n\t// try convert to float 64\n\tif _, ok := claims[\"exp\"].(float64); !ok {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Exp must be float64 format\")\n\t}\n\n\t// get value and validate\n\texp := int64(claims[\"exp\"].(float64))\n\tif exp < a.timeFunc().Unix(){\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Token is expired\")\n\t}\n\t// validate dates\n\n\t// validate issuer\n\t// check if iss exists in map\n\tif claims[\"iss\"] == nil {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Iss is missing\")\n\t}\n\n\t// try convert to string\n\tif _, ok := claims[\"iss\"].(string); !ok {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Iss must be string format\")\n\t}\n\n\t// get value and validate\n\tissuer := claims[\"iss\"]\n\tif issuer != a.issuer{\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Invalid issuer\")\n\t}\n\t// validate issuer\n\n\treturn claims, nil\n}",
"func ValidateToken(tokenString string, secretSignKey []byte) (string, error) {\n\ttoken, err := jwt.ParseWithClaims(tokenString, &Claims{}, func(token *jwt.Token) (interface{}, error) {\n\t\treturn secretSignKey, nil\n\t})\n\n\tif claims, ok := token.Claims.(*Claims); ok && token.Valid {\n\t\t// fmt.Printf(\"%v %v\", claims.Email, claims.StandardClaims.ExpiresAt)\n\t\treturn claims.Email, nil\n\t}\n\treturn \"\", err\n}",
"func ValidateToken(tokenString string, lookup jwt.Keyfunc) (*TokenClaims, error) {\n\t// Parse takes the token string and a function for looking up the key.\n\ttoken, err := jwt.ParseWithClaims(tokenString, &TokenClaims{}, lookup)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Verify signing algorithm and token\n\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok || !token.Valid {\n\t\treturn nil, ErrInvalidToken\n\t}\n\n\t// Verify the claims and token.\n\tif claim, ok := token.Claims.(*TokenClaims); ok {\n\t\treturn claim, nil\n\t}\n\treturn nil, ErrInvalidToken\n}",
"func CheckJWT(tokenStr string) error {\n\t_, err := parseJWT(tokenStr)\n\treturn err\n}",
"func (s *jwtService) ValidateToken(tokenString string) (*jwt.Token, error) {\n\ttryGetSecretKey := func(token *jwt.Token) (interface{}, error) {\n\t\t// check token signing method\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"Unexpected signing method: %v\", token.Header[\"alg\"])\n\t\t}\n\t\treturn []byte(s.secretKey), nil\n\t}\n\n\treturn jwt.Parse(tokenString, tryGetSecretKey)\n}",
"func ValidateJWT(next http.HandlerFunc) http.HandlerFunc{\n\treturn func(writer http.ResponseWriter, request *http.Request) {\n\t\tclaims, err := Utils.ProcessToken(request.Header.Get(\"Authorization\"))\n\t\tif err != nil {\n\t\t\thttp.Error(writer, \"Error in JWT \" + err.Error(), http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\t_, exist, _ := Repositories.ExistUser(claims.Email)\n\t\tif exist==true {\n\t\t\tEmail = claims.Email\n\t\t\tIdUser = claims.Id.Hex()\n\t\t}\n\t\tnext.ServeHTTP(writer, request)\n\t}\n}",
"func ValidateToken(jwtToken string) (map[string]interface{}, error) {\n\tcleanJWT := strings.Replace(jwtToken, \"Bearer \", \"\", -1)\n\ttokenData := jwt.MapClaims{}\n\ttoken, err := jwt.ParseWithClaims(cleanJWT, tokenData, func(token *jwt.Token) (interface{}, error) {\n\t\treturn []byte(\"TokenPassword\"), nil\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !token.Valid {\n\t\treturn nil, jwt.ErrInvalidKey\n\t}\n\n\tvar data map[string]interface{}\n\tcopier.Copy(&data, tokenData[\"payload\"])\n\treturn data, nil\n}",
"func ValidateJWT(jwt string, session *r.Session) string {\n\tvar auth interface{}\n\ttokenTable := os.Getenv(\"TOKENTABLE\")\n\tdb := os.Getenv(\"DB\")\n\tvar username string\n\tif jwt != \"\" {\n\t\tparts := strings.Split(jwt, \".\")\n\t\tif len(parts) == 3 {\n\t\t\tu, _ := b64.URLEncoding.DecodeString(parts[0])\n\t\t\t// s, _ := b64.URLEncoding.DecodeString(parts[1])\n\t\t\th, _ := b64.URLEncoding.DecodeString(parts[2])\n\t\t\thash := computeHMAC(parts[0] + \".\" + parts[1])\n\t\t\tif hash == string(h) {\n\t\t\t\tif CheckUserExists(string(u), tokenTable, session) {\n\t\t\t\t\tusername = string(u)\n\t\t\t\t\tcur, _ := r.DB(db).Table(tokenTable).GetAllByIndex(\"token\", jwt).Run(session)\n\t\t\t\t\tcur.One(&auth)\n\t\t\t\t\tif auth != nil {\n\t\t\t\t\t\t// Token exists in table, ie valid token\n\t\t\t\t\t\t// Delete the currently used token from tokentable\n\t\t\t\t\t\tr.DB(db).Table(tokenTable).GetAllByIndex(\"username\", username).Delete().Run(session)\n\t\t\t\t\t\treturn username\n\t\t\t\t\t}\n\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}",
"func CheckJwtToken(tokenString string) (Token, error) {\n\ttoken, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {\n\t\treturn []byte(configurations.Configuration.Security.JWTSecret), nil\n\t})\n\tif err != nil {\n\t\treturn Token{Authorized: false}, err\n\t}\n\n\tclaims, ok := token.Claims.(jwt.MapClaims)\n\tif !ok || !token.Valid {\n\t\treturn Token{Authorized: false}, fmt.Errorf(\"invalid Token\")\n\t}\n\n\texps := claims[\"exp\"].(float64)\n\tif int64(exps) < time.Now().Unix() {\n\t\treturn Token{Authorized: false}, fmt.Errorf(\"expired token\")\n\t}\n\n\treturn Token{\n\t\tAuthorized: true,\n\t\tID: claims[\"id\"].(string),\n\t\tInstitution: claims[\"institution\"].(string),\n\t\tPermission: claims[\"permission\"].(string),\n\t}, nil\n}",
"func ValidateToken(secretKey string, token string) error {\n\n\tif !sjwt.Verify(token, []byte(secretKey)) {\n\t\treturn errors.New(\"Token isn't valid\")\n\t}\n\n\tclaims, err := sjwt.Parse(token)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tissuer, err := claims.GetIssuer()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif issuer != \"goUpload\" {\n\t\treturn errors.New(\"No valid Issuer \")\n\t}\n\n\t// Validate will check(if set) Expiration At and Not Before At dates\n\terr = claims.Validate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n\n}",
"func ValidateToken(myToken string) (bool, string) {\n\ttoken, err := jwt.ParseWithClaims(myToken, &CustomClaims{}, func(token *jwt.Token) (interface{}, error) {\n\t\treturn []byte(jwtKey), nil\n\t})\n\n\tif err != nil {\n\t\treturn false, \"\"\n\t}\n\n\tclaims := token.Claims.(*CustomClaims)\n\treturn token.Valid, claims.Username\n}",
"func ValidateToken(r *http.Request) error {\n\ttoken, err := VerifyToken(r)\n\tif err != nil {\n\t return err\n\t}\n\tif _, ok := token.Claims.(jwt.Claims); !ok && !token.Valid {\n\t return err\n\t}\n\treturn nil\n}",
"func ValidateToken(authClient umAPI.UserManagementApiClient) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\ttoken := c.MustGet(\"encodedToken\").(string)\n\t\tparsedToken, err := authClient.ValidateJWT(context.Background(), &umAPI.JWTRequest{\n\t\t\tToken: token,\n\t\t})\n\t\tif err != nil {\n\t\t\tst := status.Convert(err)\n\t\t\tlogger.Error.Println(st.Message())\n\t\t\tc.JSON(http.StatusUnauthorized, gin.H{\"error\": \"error during token validation\"})\n\t\t\tc.Abort()\n\t\t\treturn\n\t\t}\n\t\tc.Set(\"validatedToken\", parsedToken)\n\t\tc.Next()\n\t}\n}",
"func (m *middlewareStruct) CheckJWTToken(c *gin.Context) {\n\tbearToken := c.GetHeader(\"Authorization\")\n\n\tstrArr := strings.Split(bearToken, \" \")\n\tif len(strArr) == 2 {\n\n\t\ttoken, err := m.service.VerifyToken(strArr[1], os.Getenv(\"ACCESS_SECRET\"))\n\t\tif err != nil {\n\t\t\tc.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{\"error\": err.Error()})\n\t\t\treturn\n\t\t}\n\n\t\tclaims, _ := token.Claims.(jwt.MapClaims)\n\n\t\tc.Set(\"user_id\", claims[\"user_id\"])\n\n\t\treturn\n\n\t}\n\n\tc.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{\"error\": \"Token inválido\"})\n\treturn\n}",
"func (j *JWTUtil) ValidateToken(tokenString string) (bool, error) {\n\t// parse the token string\n\t_, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {\n\t\t// validate the signing method\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"invalid signing method: %s\", token.Method.Alg())\n\t\t}\n\n\t\treturn []byte(os.Getenv(\"JWT_SECRET\")), nil\n\t})\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn true, nil\n}",
"func ValidateToken(token string) (bool, UserSignature) {\n\tsignature := UserSignature{}\n\tdecode, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) {\n\t\treturn jwtKey, nil\n\t})\n\n\tclaims := decode.Claims.(jwt.MapClaims)\n\tif err != nil {\n\t\treturn false, signature\n\t}\n\tif !decode.Valid {\n\t\treturn false, signature\n\t}\n\tsignature.UUID = claims[\"uuid\"].(string)\n\tsignature.Role = claims[\"role\"].(string)\n\treturn true, signature\n}",
"func (m *JWTManager) ValidateToken(tokenString string) (*model.Token, error) {\n\ttoken, err := jwt.ParseWithClaims(tokenString, &jwt.StandardClaims{}, func(token *jwt.Token) (interface{}, error) {\n\t\tswitch token.Method {\n\t\tcase jwt.SigningMethodHS256:\n\t\t\treturn m.OP.PrivateKey, nil\n\t\tcase jwt.SigningMethodRS256:\n\t\t\treturn m.OP.PublicKey, nil\n\t\tdefault:\n\t\t\treturn nil, errors.New(\"JWT Token is not Valid\")\n\t\t}\n\t})\n\n\tif err != nil {\n\t\tswitch err.(type) {\n\t\tcase *jwt.ValidationError:\n\t\t\tvErr := err.(*jwt.ValidationError)\n\n\t\t\tswitch vErr.Errors {\n\t\t\tcase jwt.ValidationErrorExpired:\n\t\t\t\treturn nil, errors.New(\"Token Expired, get a new one\")\n\t\t\tdefault:\n\t\t\t\tlog.Printf(\"[INFO][Auth Middleware] %s\", vErr.Error())\n\t\t\t\treturn nil, errors.New(\"JWT Token ValidationError\")\n\t\t\t}\n\t\t}\n\t\treturn nil, errors.New(\"JWT Token Error Parsing the token or empty token\")\n\t}\n\tclaims, ok := token.Claims.(*jwt.StandardClaims)\n\tif !ok || !token.Valid {\n\t\treturn nil, errors.New(\"JWT Token is not Valid\")\n\t}\n\n\tif userID, err := strconv.Atoi(claims.Subject); err == nil {\n\t\treturn &model.Token{UserID: int64(userID)}, nil\n\t}\n\n\tv := tokenFormat{}\n\n\terr = json.NewDecoder(strings.NewReader(claims.Subject)).Decode(&v)\n\tif err != nil {\n\t\tlog.Printf(\"[INFO][Auth Middleware] TokenManager was not able to decode the Subject: %s\", claims.Subject)\n\t\treturn nil, errors.New(\"JWT token has a unknown subject format\")\n\t}\n\n\tt := model.Token{\n\t\tUserID: v.UserID,\n\t\tPermissions: make(map[string]bool),\n\t\tToken: tokenString,\n\t}\n\tif v.Permissions != nil {\n\t\tfor _, p := range *v.Permissions {\n\t\t\tt.Permissions[p] = true\n\t\t}\n\t}\n\n\treturn &t, err\n}",
"func ValidateAuthTokenWithJWT(\n\tctx types.Context,\n\tconfig *types.AuthConfig,\n\tencJWT string) (*types.AuthToken, error) {\n\n\tlf := map[string]interface{}{\"encJWT\": encJWT}\n\tctx.WithFields(lf).Debug(\"validating jwt\")\n\n\tjwt, err := jws.ParseJWT([]byte(encJWT))\n\tif err != nil {\n\t\tctx.WithFields(lf).WithError(err).Error(\"error parsing jwt\")\n\t\treturn nil, &types.ErrSecTokInvalid{InvalidToken: true, InnerError: err}\n\t}\n\n\tsm := parseSigningMethod(config.Alg)\n\tlf[\"signingMethod\"] = sm.Alg()\n\tctx.WithFields(lf).Debug(\"parsed jwt signing method\")\n\n\tif err := jwt.Validate(config.Key, sm); err != nil {\n\t\tctx.WithFields(lf).WithError(err).Error(\"error validating jwt\")\n\t\treturn nil, &types.ErrSecTokInvalid{InvalidSig: true, InnerError: err}\n\t}\n\n\tctx.WithFields(lf).Debug(\"validated jwt signature\")\n\n\tif len(jwt.Claims()) == 0 {\n\t\tctx.WithFields(lf).Error(\"jwt missing claims\")\n\t\treturn nil, &types.ErrSecTokInvalid{}\n\t}\n\n\tvar (\n\t\tsub string\n\t\tiat time.Time\n\t\texp time.Time\n\t\tnbf time.Time\n\t\tok bool\n\t)\n\n\tif sub, ok = jwt.Claims().Subject(); !ok {\n\t\tctx.WithFields(lf).Error(\"jwt missing sub claim\")\n\t\treturn nil, &types.ErrSecTokInvalid{MissingClaim: \"sub\"}\n\t}\n\n\tif iat, ok = jwt.Claims().IssuedAt(); !ok {\n\t\tctx.WithFields(lf).Error(\"jwt missing iat claim\")\n\t\treturn nil, &types.ErrSecTokInvalid{MissingClaim: \"iat\"}\n\t}\n\n\tif exp, ok = jwt.Claims().Expiration(); !ok {\n\t\tctx.WithFields(lf).Error(\"jwt missing exp claim\")\n\t\treturn nil, &types.ErrSecTokInvalid{MissingClaim: \"exp\"}\n\t}\n\n\tif nbf, ok = jwt.Claims().NotBefore(); !ok {\n\t\tctx.WithFields(lf).Error(\"jwt missing nbf claim\")\n\t\treturn nil, &types.ErrSecTokInvalid{MissingClaim: \"nbf\"}\n\t}\n\n\ttok := &types.AuthToken{\n\t\tSubject: sub,\n\t\tIssuedAt: iat.UTC().Unix(),\n\t\tExpires: exp.UTC().Unix(),\n\t\tNotBefore: nbf.UTC().Unix(),\n\t}\n\n\tlf[\"sub\"] = tok.Subject\n\tlf[\"iat\"] = tok.IssuedAt\n\tlf[\"exp\"] = tok.Expires\n\tlf[\"nbf\"] = tok.NotBefore\n\n\tif err := validateAuthTokenAllowed(ctx, config, lf, tok); err != nil {\n\t\treturn nil, err\n\t}\n\n\tctx.WithFields(lf).Info(\"validated security token\")\n\treturn tok, nil\n}",
"func ValidateToken(tokenString string) (bool, error) {\n\t_, err := ParseToken(tokenString)\n\n\t// I'm not interested in getting any of the information off the jwt, just verification and expiration\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn true, nil\n}",
"func ValidateToken(tokenString string) (string, error) {\n\tsecret := []byte(\"kalle4ever\")\n\ttoken, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"Unexpected signing method: %v\", token.Header[\"alg\"])\n\t\t}\n\n\t\treturn secret, nil\n\t})\n\n\tif claims, ok := token.Claims.(jwt.MapClaims); ok && token.Valid {\n\t\treturn claims[\"username\"].(string), nil\n\t}\n\treturn \"\", err\n}",
"func validateJWT(agentJWT string, key []byte) (uuid.UUID, error) {\n\tvar agentID uuid.UUID\n\tif core.Debug {\n\t\tmessage(\"debug\", \"Entering into http2.ValidateJWT\")\n\t\tmessage(\"debug\", fmt.Sprintf(\"Input JWT: %v\", agentJWT))\n\t}\n\n\tclaims := jwt.Claims{}\n\n\t// Parse to make sure it is a valid JWT\n\tnestedToken, err := jwt.ParseSignedAndEncrypted(agentJWT)\n\tif err != nil {\n\t\treturn agentID, fmt.Errorf(\"there was an error parsing the JWT:\\r\\n%s\", err.Error())\n\t}\n\n\t// Decrypt JWT\n\ttoken, errToken := nestedToken.Decrypt(key)\n\tif errToken != nil {\n\t\treturn agentID, fmt.Errorf(\"there was an error decrypting the JWT:\\r\\n%s\", errToken.Error())\n\t}\n\n\t// Deserialize the claims and validate the signature\n\terrClaims := token.Claims(key, &claims)\n\tif errClaims != nil {\n\t\treturn agentID, fmt.Errorf(\"there was an deserializing the JWT claims:\\r\\n%s\", errClaims.Error())\n\t}\n\n\tagentID = uuid.FromStringOrNil(claims.ID)\n\n\tAgentWaitTime, errWait := agents.GetAgentFieldValue(agentID, \"WaitTime\")\n\t// An error will be returned during OPAQUE registration & authentication\n\tif errWait != nil {\n\t\tif core.Debug {\n\t\t\tmessage(\"debug\", fmt.Sprintf(\"there was an error getting the agent's wait time:\\r\\n%s\", errWait.Error()))\n\t\t}\n\t}\n\tif AgentWaitTime == \"\" {\n\t\tAgentWaitTime = \"10s\"\n\t}\n\n\tWaitTime, errParse := time.ParseDuration(AgentWaitTime)\n\tif errParse != nil {\n\t\treturn agentID, fmt.Errorf(\"there was an error parsing the agent's wait time into a duration:\\r\\n%s\", errParse.Error())\n\t}\n\t// Validate claims; Default Leeway is 1 minute; Set it to 1x the agent's WaitTime setting\n\terrValidate := claims.ValidateWithLeeway(jwt.Expected{\n\t\tTime: time.Now(),\n\t}, WaitTime)\n\n\tif errValidate != nil {\n\t\tif core.Verbose {\n\t\t\tmessage(\"warn\", fmt.Sprintf(\"The JWT claims were not valid for %s\", agentID))\n\t\t\tmessage(\"note\", fmt.Sprintf(\"JWT Claim Expiry: %s\", claims.Expiry.Time()))\n\t\t\tmessage(\"note\", fmt.Sprintf(\"JWT Claim Issued: %s\", claims.IssuedAt.Time()))\n\t\t}\n\t\treturn agentID, errValidate\n\t}\n\tif core.Debug {\n\t\tmessage(\"debug\", fmt.Sprintf(\"agentID: %s\", agentID.String()))\n\t\tmessage(\"debug\", \"Leaving http2.ValidateJWT without error\")\n\t}\n\t// TODO I need to validate other things like token age/expiry\n\treturn agentID, nil\n}",
"func CheckJWTToken(auth string) (*Claims, error) {\n\tif !strings.HasPrefix(auth, \"JWT \") {\n\t\treturn &Claims{}, errors.New(\"tokenstring should contains 'JWT'\")\n\t}\n\ttoken := strings.Split(auth, \"JWT \")[1]\n\ttokenClaims, err := jwt.ParseWithClaims(token, &Claims{}, func(token *jwt.Token) (i interface{}, err error) {\n\t\treturn jwtSecret, nil\n\t})\n\tif err != nil {\n\t\tmessage := \"\"\n\t\tif ve, ok := err.(*jwt.ValidationError); ok {\n\t\t\tif ve.Errors&jwt.ValidationErrorMalformed != 0 {\n\t\t\t\tmessage = \"token is malformed\"\n\t\t\t} else if ve.Errors&jwt.ValidationErrorUnverifiable != 0 {\n\t\t\t\tmessage = \"token could not be verified because of signing problems\"\n\t\t\t} else if ve.Errors&jwt.ValidationErrorSignatureInvalid != 0 {\n\t\t\t\tmessage = \"signature validation failed\"\n\t\t\t} else if ve.Errors&jwt.ValidationErrorExpired != 0 {\n\t\t\t\tmessage = \"token is expired\"\n\t\t\t} else if ve.Errors&jwt.ValidationErrorNotValidYet != 0 {\n\t\t\t\tmessage = \"token is not yet valid before sometime\"\n\t\t\t} else {\n\t\t\t\tmessage = \"can not handle this token\"\n\t\t\t}\n\t\t}\n\t\treturn &Claims{}, errors.New(message)\n\t}\n\tif claim, ok := tokenClaims.Claims.(*Claims); ok && tokenClaims.Valid {\n\t\treturn claim, nil\n\t}\n\treturn &Claims{}, errors.New(\"token is not valid\")\n}",
"func (svc *basicAuthNService) ValidateToken(tokenString, kid string) (dto.CustomClaim, error) {\n\tclaim := dto.CustomClaim{}\n\n\tkf := func(token *stdjwt.Token) (interface{}, error) {\n\t\tkeyID := token.Header[\"kid\"].(string)\n\t\tif keyID != kid {\n\t\t\treturn claim, stdjwt.ErrInvalidKeyType\n\t\t}\n\t\treturn []byte(svcconf.C.Auth.SecretKey), nil\n\t}\n\n\ttoken, err := stdjwt.ParseWithClaims(tokenString, &claim, kf)\n\n\t// check if signature is valid\n\tif err != nil {\n\t\treturn claim, err\n\t}\n\tif token.Valid {\n\t\treturn claim, nil\n\t}\n\treturn claim, kitjwt.ErrTokenInvalid\n}",
"func (db DB) ValidateAuthJWT(token string) bool {\n\ts := db.session.Copy()\n\tdefer s.Close()\n\tcount, err := s.DB(app).C(\"auth_token\").Find(bson.M{\"bearer_token\": token}).Count()\n\tif count == 0 || err != nil {\n\t\treturn false\n\t}\n\treturn true\n}",
"func (j *Jwt) ValidateToken(tokenStr string) bool {\n\ttoken := j.GetTokenMust(tokenStr)\n\n\tif !token.Valid {\n\t\treturn false\n\t} else {\n\t\treturn true\n\t}\n}",
"func (r VerifyJWTRequest) Validate() error {\n\treturn validation.ValidateStruct(&r,\n\t\tvalidation.Field(&r.Jwt, validation.Required),\n\t)\n}",
"func validateJWT(w http.ResponseWriter, req *http.Request) {\n\tif klog.V(3).Enabled() {\n\t\tklog.Infof(\"request received from: %v, headers: %v\", req.RemoteAddr, req.Header)\n\t}\n\tiapJWT := req.Header.Get(\"X-Goog-IAP-JWT-Assertion\")\n\tif iapJWT == \"\" {\n\t\tklog.V(1).Infof(\"X-Goog-IAP-JWT-Assertion header not found\")\n\t\thttp.Error(w, \"\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tif audience == \"\" {\n\t\tklog.V(1).ErrorS(fmt.Errorf(\"token cannot be validated, empty audience, check for previous errors\"), \"\")\n\t\thttp.Error(w, \"\", http.StatusForbidden)\n\t\treturn\n\t}\n\n\tif issuer == \"\" {\n\t\tklog.V(1).ErrorS(fmt.Errorf(\"token cannot be validated, empty issuer, check for previous errors\"), \"\")\n\t\thttp.Error(w, \"\", http.StatusForbidden)\n\t\treturn\n\t}\n\n\tctx := context.Background()\n\t// we pass empty as audience here because we will validate it later\n\tpayload, err := jwtValidator.Validate(ctx, iapJWT, \"\")\n\tklog.V(3).Infof(\"payload received: %+v\", payload)\n\tif err != nil {\n\t\tklog.V(1).ErrorS(err, \"error validating jwt token\")\n\t\thttp.Error(w, \"\", http.StatusForbidden)\n\t\treturn\n\t}\n\t// empty payload should not be possible\n\tif payload == nil {\n\t\tklog.V(1).ErrorS(nil, \"null payload received\")\n\t\thttp.Error(w, \"\", http.StatusForbidden)\n\t\treturn\n\t}\n\t// validate the audience\n\tif audience != payload.Audience {\n\t\tklog.V(1).ErrorS(nil, \"error validating jwt token, invalid audience, expected %s, got %s\", audience, payload.Audience)\n\t\thttp.Error(w, \"\", http.StatusForbidden)\n\t\treturn\n\t}\n\t// validate the issuer\n\tif issuer != payload.Issuer {\n\t\tklog.V(1).ErrorS(nil, \"error validating jwt token, invalid issuer, expected %s, got %s\", issuer, payload.Issuer)\n\t\thttp.Error(w, \"\", http.StatusForbidden)\n\t\treturn\n\t}\n\t// validate expired - this may be redundant - but we check it anyway\n\tif payload.Expires == 0 || payload.Expires+30 < time.Now().Unix() {\n\t\tklog.V(1).ErrorS(nil, \"error validating jwt token, expired\")\n\t\thttp.Error(w, \"\", http.StatusForbidden)\n\t\treturn\n\t}\n\n\t// validate IssuedAt - should not be in the future\n\tif payload.IssuedAt == 0 || payload.IssuedAt-30 > time.Now().Unix() {\n\t\tklog.V(1).ErrorS(nil, \"error validating jwt token, emitted in the future\")\n\t\thttp.Error(w, \"\", http.StatusForbidden)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n}",
"func (s *SMJWT) Validate(tokenStr string, customClaims ...jwt.Claims) (*jwt.Token, error) {\n\tif s.publicKey == nil {\n\t\treturn nil, ErrNoPublicKeySpecified\n\t}\n\n\tvar claimsType jwt.Claims\n\tif len(customClaims) > 0 {\n\t\tclaimsType = customClaims[0]\n\t} else {\n\t\tclaimsType = &jwt.StandardClaims{}\n\t}\n\n\ttoken, err := jwt.ParseWithClaims(tokenStr, claimsType, func(token *jwt.Token) (i interface{}, e error) {\n\t\treturn s.publicKey, nil\n\t})\n\n\t// a-ok!\n\tif err == nil && token.Valid {\n\t\treturn token, nil\n\t}\n\n\tif ve, ok := err.(*jwt.ValidationError); ok && ve.Errors&jwt.ValidationErrorExpired > 0 {\n\t\treturn nil, ErrExpired\n\t}\n\n\treturn nil, err\n}",
"func JWTVerify (jwtToken string) (error) {\n\t\n\tparts := strings.Split(jwtToken, \".\")\n\tif len(parts) != 2 {\n\t\treturn errors.New(\"Incorrect JWT format\")\n\t}\n\n\thasher := hmac.New(sha256.New, []byte(config.Config.Server.SecretSalt))\n\thasher.Write([]byte(parts[0]))\n\n\tsignature, err := DecodeSegment(parts[1])\n\tif err != nil {\n\t\treturn errors.New(\"Can't Decode Claim\")\n\t}\n\n\tif !hmac.Equal(signature, hasher.Sum(nil)) {\n\t\treturn errors.New(\"Signature is incorrect\")\n\t}\n\treturn nil\n}",
"func Validate(token string) (bool, string) {\n\tvar claims Claims\n\n\tt, err := jwt.ParseWithClaims(token, &claims, jwtKeyFunc)\n\tif err != nil {\n\t\treturn false, \"\"\n\t}\n\n\tif time.Now().After(time.Unix(claims.ExpirationUTC, 0)) {\n\t\treturn false, \"\"\n\t}\n\n\treturn t.Valid, claims.UserID\n}",
"func CheckTokenValid(myToken string) error {\n\tlog.Println(myToken)\n\ttoken, err := jwt.Parse(myToken, func(token *jwt.Token) (interface{}, error) {\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"Unexpected signing method: %v\", token.Header[\"alg\"])\n\t\t}\n\n\t\t//secret is a []byte containing your secret, e.g. []byte(\"my_secret_key\")\n\t\treturn secret, nil\n\t})\n\n\tif err == nil && token.Valid {\n\t\treturn nil\n\t}\n\treturn err\n}",
"func (t *Token) Validate() error {\n\tif t.JWT == nil {\n\t\treturn ErrEmptyToken\n\t}\n\tif !t.new && !t.JWT.Valid {\n\t\treturn ErrTokenInvalid\n\t}\n\n\treturn nil\n}",
"func (jwtAuth *JWTAuth) TokenValid(r *http.Request) error {\n\ttokenStr := jwtAuth.ExtractToken(r)\n\ttoken, err := verifyToken(tokenStr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif _, ok := token.Claims.(jwt.Claims); !ok && !token.Valid {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (gen *securityGenerator) ValidateJwt(token string) (*Claims, error) {\n\tt, err := jwt.ParseWithClaims(token, &Claims{}, func(token *jwt.Token) (interface{}, error) {\n\t\treturn []byte(gen.config.JwtSalt), nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !t.Valid {\n\t\treturn nil, ErrInvalidToken\n\t}\n\n\tclaims, ok := t.Claims.(*Claims)\n\tif !ok {\n\t\treturn nil, ErrUnexpectedJwtError\n\t}\n\n\treturn claims, nil\n}",
"func (m *manager) Validate(r *http.Request) error {\n\ttokenString, err := getToken(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttoken, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"unexpected signing method was used in JWT token making it invalid: %v\", token.Header[\"alg\"])\n\t\t}\n\n\t\treturn m.secret, nil\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%s:%v\", \"invalid JWT token\", err)\n\t}\n\n\tif token == nil {\n\t\treturn fmt.Errorf(\"%s\", \"invalid JWT token\")\n\t}\n\n\tif !token.Valid {\n\t\treturn fmt.Errorf(\"%s\", \"invalid JWT token\")\n\t}\n\n\tfor i := range m.options {\n\t\topt, ok := m.options[i].(*option)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"error in type assertion in jwt token\")\n\t\t}\n\n\t\tswitch opt.optionType {\n\t\tcase optLifeSpan: // do nothing, this option is for the client side\n\t\tcase optEnforceExpiry: // if enforce is set, claims must have expiry\n\t\t\tclaims, ok := token.Claims.(jwt.MapClaims)\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"error in type assertion in jwt claims\")\n\t\t\t}\n\n\t\t\tif _, ok := claims[exp]; !ok {\n\t\t\t\treturn fmt.Errorf(\"all claims must have expiry in their claims\")\n\t\t\t}\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"invalid option type\")\n\t\t}\n\t}\n\n\treturn nil\n}",
"func Validate(page http.HandlerFunc) http.HandlerFunc {\n\treturn http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {\n\t\tvar tokenJson string\n\t\ttokenHeader, ok := req.Header[\"Authorization\"]\n\t\tif ok && len(tokenHeader) >= 1 {\n\t\t\ttokenJson = strings.TrimPrefix(tokenHeader[0], \"Bearer \")\n\t\t}\n\n\t\tif tokenJson == \"\" {\n\t\t\thttp.Error(res, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\tvar tokenArray map[string]string\n\t\tif err := json.Unmarshal([]byte(tokenJson), &tokenArray); err != nil {\n\t\t\tpanic(database.ErrorResponse{Error: err.Error(), StackTrace: string(debug.Stack())})\n\t\t}\n\n\t\tparsedToken, err := jwt.ParseWithClaims(tokenArray[\"token\"], &database.JwtData{}, func(token *jwt.Token) (interface{}, error) {\n\t\t\treturn []byte(database.JsonKey), nil\n\t\t})\n\t\tif err != nil {\n\t\t\thttp.Error(res, err.Error(), http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\tif jwtData, ok := parsedToken.Claims.(*database.JwtData); ok && parsedToken.Valid {\n\t\t\tctx := context.WithValue(req.Context(), database.MyKey, *jwtData)\n\t\t\tpage(res, req.WithContext(ctx))\n\t\t} else {\n\t\t\thttp.Error(res, err.Error(), http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t})\n}",
"func ValidateToken(token string) interface{} {\n\tfmt.Println(\"Starting token validation...\")\n\tclaims := jwt.MapClaims{}\n\t_, err := jwt.ParseWithClaims(token, claims, func(token *jwt.Token) (interface{}, error) {\n\t\treturn []byte(Secret), nil\n\t})\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn map[string]string{\"User\": \"\", \"Role\": \"\"}\n\t}\n\n\tif err != nil {\n\t\tfmt.Println(\"ERROR: \", err)\n\t\treturn map[string]string{\"User\": \"\", \"Role\": \"\"}\n\t}\n\n\tfmt.Println(claims)\n\n\tudatos := make(map[string]string)\n\tudatos[\"User\"] = claims[\"user\"].(string)\n\tudatos[\"Role\"] = claims[\"role\"].(string)\n\n\treturn udatos\n}",
"func ValidateToken(signedtoken string) (bool, *jwt.Token, error) {\n\n\ttoken, err := parseToken(signedtoken) //Retrive decoded token\n\n\tif err != nil || !token.Valid {\n\t\tfmt.Println(err.Error())\n\t\treturn false, token, err\n\t}\n\n\treturn true, token, err\n}",
"func (t *jwtMgr) ValidateToken(token string) (map[string]interface{}, bool, error) {\n\tif !validateTokenFormat(token) {\n\t\treturn nil, false, ErrInvalidTokenFormat\n\t}\n\tstandardClaims, privateClaims, ok, err := t.validateJWTToken(fmt.Sprintf(\"%s.%s\", t.header, token))\n\tif !ok {\n\t\treturn nil, ok, err\n\t}\n\t// collect all claims in a map\n\tprivateClaims[SubClaim] = standardClaims.Subject\n\tprivateClaims[IssuerClaim] = standardClaims.Issuer\n\tprivateClaims[IssuedAtClaim] = standardClaims.IssuedAt\n\tprivateClaims[ExpClaim] = standardClaims.Expiry\n\n\treturn privateClaims, true, nil\n}",
"func (v *verifierPrivate) ValidateTokenAndGetClaims(tokenString string, customClaims interface{}) (*Token, error) {\n\n\t// let us check if the verifier is already expired. If it is just return verifier expired error\n\t// The caller has to re-initialize the verifier.\n\ttoken := Token{}\n\ttoken.standardClaims = &jwt.StandardClaims{}\n\tparsedToken, err := jwt.ParseWithClaims(tokenString, token.standardClaims, func(token *jwt.Token) (interface{}, error) {\n\n\t\tif keyIDValue, keyIDExists := token.Header[\"kid\"]; keyIDExists {\n\n\t\t\tkeyIDString, ok := keyIDValue.(string)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"kid (key id) in jwt header is not a string : %v\", keyIDValue)\n\t\t\t}\n\n\t\t\tif matchPubKey, found := v.pubKeyMap[keyIDString]; !found {\n\t\t\t\treturn nil, &MatchingCertNotFoundError{keyIDString}\n\t\t\t} else {\n\t\t\t\t// if the certificate just expired.. we need to return appropriate error\n\t\t\t\t// so that the caller can deal with it appropriately\n\t\t\t\tnow := time.Now()\n\t\t\t\tif now.After(matchPubKey.expTime) {\n\t\t\t\t\treturn nil, &MatchingCertJustExpired{keyIDString}\n\t\t\t\t}\n\t\t\t\t// if the verifier expired, we need to use a new instance of the verifier\n\t\t\t\tif time.Now().After(v.expiration) {\n\t\t\t\t\treturn nil, &VerifierExpiredError{v.expiration}\n\t\t\t\t}\n\t\t\t\treturn matchPubKey.pubKey, nil\n\t\t\t}\n\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"kid (key id) field missing in token. field is mandatory\")\n\t\t}\n\t})\n\n\tif err != nil {\n\t\tif jwtErr, ok := err.(*jwt.ValidationError); ok {\n\t\t\tswitch e := jwtErr.Inner.(type) {\n\t\t\tcase *MatchingCertNotFoundError, *VerifierExpiredError, *MatchingCertJustExpired:\n\t\t\t\treturn nil, e\n\t\t\t}\n\t\t\treturn nil, jwtErr\n\t\t}\n\t\treturn nil, err\n\t}\n\ttoken.jwtToken = parsedToken\n\t// so far we have only got the standardClaims parsed. We need to now fill the customClaims\n\n\tparts := strings.Split(tokenString, \".\")\n\t// no need check for the number of segments since the previous ParseWithClaims has already done this check.\n\t// therefor the following is redundant. If we change the implementation, will need to revisit\n\t//if len(parts) != 3 {\n\t//\treturn nil, \"jwt token to be parsed seems to be in \"\n\t//}\n\n\t// parse Claims\n\tvar claimBytes []byte\n\n\tif claimBytes, err = jwt.DecodeSegment(parts[1]); err != nil {\n\t\treturn nil, fmt.Errorf(\"could not decode claims part of the jwt token\")\n\t}\n\tdec := json.NewDecoder(bytes.NewBuffer(claimBytes))\n\terr = dec.Decode(customClaims)\n\ttoken.customClaims = customClaims\n\n\treturn &token, nil\n}",
"func IsJWTValid(token string) bool {\n\tverifiedToken, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) {\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"Unexpected signing method: %v\", token.Header[\"alg\"])\n\t\t}\n\t\treturn []byte(os.Getenv(\"JWT_SECRET\")), nil\n\t})\n\tif err != nil {\n\t\treturn false\n\t}\n\tif claims, ok := verifiedToken.Claims.(jwt.MapClaims); ok && verifiedToken.Valid {\n\t\tparsedExpires, err := strconv.ParseInt(fmt.Sprintf(\"%v\", claims[\"expires\"]), 10, 64)\n\t\tif err == nil {\n\t\t\treturn parsedExpires > time.Now().Unix()\n\t\t}\n\t}\n\treturn false\n}",
"func (jp JWTProvider) Validate(tokenString string) bool {\n\tts := strings.Replace(tokenString, \"Bearer \", \"\", -1)\n\ttoken, err := jwt.Parse(ts, jp.verify)\n\tif err != nil {\n\t\tlogrus.Errorln(\"Error at token verification \", err)\n\t\treturn false\n\t}\n\treturn token.Valid\n}",
"func validateSignatureAgainstKey(token *jwt.Token, tokenParts []string, key interface{}) error {\n\t// jwt.SigningMethod.Verify requires signing string and signature as separate inputs\n\treturn token.Method.Verify(strings.Join(tokenParts[0:2], \".\"), token.Signature, key)\n}",
"func (s *userService) ValidateToken(ctx context.Context, token *pb.Token) (tokenOut *pb.Token, err error) {\n\tt, err := jwt.Parse(token.Token, func(token *jwt.Token) (interface{}, error) {\n\t\treturn []byte(config.Cfg.Jwt.Key), nil\n\t})\n\tif err != nil {\n\t\treturn\n\t}\n\tif !t.Valid {\n\t\terr = errors.New(\"token invalid\")\n\t\treturn\n\t}\n\tclaims := t.Claims.(jwt.MapClaims)\n\tif claims == nil {\n\t\terr = errors.New(\"token invalid\")\n\t}\n\tvar user pb.User\n\tuser.PrettyId = claims[\"id\"].(string)\n\tuser.Name = claims[\"name\"].(string)\n\tresult, err := s.dao.GetUserTokenFromRedis(config.Cfg.Redis.TokenKey + user.Name)\n\tif err != nil {\n\t\treturn\n\t}\n\tif result != t.Raw {\n\t\terr = errors.New(\"token invalid\")\n\t\treturn\n\t}\n\ttokenOut = &pb.Token{\n\t\tValid: true,\n\t}\n\treturn\n}",
"func (f *Janusgraph) ValidateToken(ctx context.Context, UUID strfmt.UUID, keyResponse *models.KeyGetResponse) (token string, err error) {\n\tq := gremlin.G.V().HasLabel(KEY_VERTEX_LABEL).HasString(\"uuid\", string(UUID))\n\n\tresult, err := f.client.Execute(q)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvertices, err := result.Vertices()\n\n\t// We got something that are not vertices.\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// No key is found\n\tif len(vertices) == 0 {\n\t\treturn \"\", errors.New(connutils.StaticKeyNotFound)\n\t}\n\n\tif len(vertices) != 1 {\n\t\treturn \"\", fmt.Errorf(\"More than one key with UUID '%v' found!\", UUID)\n\t}\n\n\tvertex := vertices[0]\n\tfillKeyResponseFromVertex(&vertex, keyResponse)\n\ttokenToReturn, err := base64.StdEncoding.DecodeString(vertex.AssertPropertyValue(PROP_KEY_TOKEN).AssertString())\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// If success return nil, otherwise return the error\n\treturn string(tokenToReturn), nil\n}",
"func IsTokenValid(token string, tokenExpireDurationDiff time.Duration) bool {\n\tif token == \"\" {\n\t\treturn false\n\t}\n\n\tparser := jwt.NewParser(jwt.WithLeeway(tokenExpireDurationDiff))\n\n\tvar claims jwt.RegisteredClaims\n\n\t_, _, err := parser.ParseUnverified(token, &claims)\n\treturn err == nil\n}",
"func (h *Helper) TokenValidWithToken(tokenString string) (bool, *jwt.Token) {\n\ttoken, err := jwt.ParseWithClaims(tokenString, &AppClaims{}, func(token *jwt.Token) (interface{}, error) {\n\t\t// since we only use the one private key to sign the tokens,\n\t\t// we also only use its public counter part to verify\n\t\treturn h.verifyKey, nil\n\t})\n\tif err != nil {\n\t\treturn false, nil\n\t}\n\treturn token.Valid, token\n}",
"func (k *JSONWebKeySet) VerifyJWT(jwt string) (body []byte, err error) {\n\tchunks := strings.Split(jwt, \".\")\n\tif len(chunks) != 3 {\n\t\treturn nil, fmt.Errorf(\"bad JWT - expected 3 components separated by '.'\")\n\t}\n\n\t// Check the header, grab the corresponding public key.\n\tvar hdr struct {\n\t\tAlg string `json:\"alg\"`\n\t\tKid string `json:\"kid\"`\n\t}\n\tif err := unmarshalB64JSON(chunks[0], &hdr); err != nil {\n\t\treturn nil, fmt.Errorf(\"bad JWT header - %s\", err)\n\t}\n\tif hdr.Alg != \"RS256\" {\n\t\treturn nil, fmt.Errorf(\"bad JWT - only RS256 alg is supported, not %q\", hdr.Alg)\n\t}\n\tif hdr.Kid == \"\" {\n\t\treturn nil, fmt.Errorf(\"bad JWT - missing the signing key ID in the header\")\n\t}\n\tpub, ok := k.keys[hdr.Kid]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"can't verify JWT - unknown signing key %q\", hdr.Kid)\n\t}\n\n\t// Decode the signature.\n\tsig, err := base64.RawURLEncoding.DecodeString(chunks[2])\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"bad JWT - can't base64 decode the signature - %s\", err)\n\t}\n\n\t// Check the signature. The signed string is \"b64(header).b64(body)\".\n\thasher := sha256.New()\n\thasher.Write([]byte(chunks[0]))\n\thasher.Write([]byte{'.'})\n\thasher.Write([]byte(chunks[1]))\n\tif err := rsa.VerifyPKCS1v15(&pub, crypto.SHA256, hasher.Sum(nil), sig); err != nil {\n\t\treturn nil, fmt.Errorf(\"bad JWT - bad signature\")\n\t}\n\n\t// Decode the body. There should be no errors here generally, the encoded body\n\t// is signed and the signature was already verified.\n\tbody, err = base64.RawURLEncoding.DecodeString(chunks[1])\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"bad JWT - can't base64 decode the body - %s\", err)\n\t}\n\treturn body, nil\n}",
"func (a *Authenticator) ValidateToken(jwt string) (string, error) {\n\tvalidatedToken, err := jwkkeys.ValidateGoogleClaims(a.cachedKeys, jwt, a.audience, jwkkeys.GoogleIssuers)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn validatedToken.GoogleClaims.Email, nil\n}",
"func (m *JWTMiddleware) CheckJWT(ctx context.Context) error {\n\tif !m.Config.EnableAuthOnOptions {\n\t\tif ctx.Method() == iris.MethodOptions {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// Use the specified token extractor to extract a token from the request\n\ttoken, err := m.Config.Extractor(ctx)\n\n\t// If debugging is turned on, log the outcome\n\tif err != nil {\n\t\tlogf(ctx, \"Error extracting JWT: %v\", err)\n\t\treturn err\n\t}\n\n\tlogf(ctx, \"Token extracted: %s\", token)\n\n\t// If the token is empty...\n\tif token == \"\" {\n\t\t// Check if it was required\n\t\tif m.Config.CredentialsOptional {\n\t\t\tlogf(ctx, \"No credentials found (CredentialsOptional=true)\")\n\t\t\t// No error, just no token (and that is ok given that CredentialsOptional is true)\n\t\t\treturn nil\n\t\t}\n\n\t\t// If we get here, the required token is missing\n\t\tlogf(ctx, \"Error: No credentials found (CredentialsOptional=false)\")\n\t\treturn ErrTokenMissing\n\t}\n\n\t// Now parse the token\n\n\tparsedToken, err := jwtParser.Parse(token, m.Config.ValidationKeyGetter)\n\t// Check if there was an error in parsing...\n\tif err != nil {\n\t\tlogf(ctx, \"Error parsing token: %v\", err)\n\t\treturn err\n\t}\n\n\tif m.Config.SigningMethod != nil && m.Config.SigningMethod.Alg() != parsedToken.Header[\"alg\"] {\n\t\terr := fmt.Errorf(\"Expected %s signing method but token specified %s\",\n\t\t\tm.Config.SigningMethod.Alg(),\n\t\t\tparsedToken.Header[\"alg\"])\n\t\tlogf(ctx, \"Error validating token algorithm: %v\", err)\n\t\treturn err\n\t}\n\n\t// Check if the parsed token is valid...\n\tif !parsedToken.Valid {\n\t\tlogf(ctx, \"Token is invalid\")\n\t\tm.Config.ErrorHandler(ctx, ErrTokenInvalid)\n\t\treturn ErrTokenInvalid\n\t}\n\n\tif m.Config.Expiration {\n\t\tif claims, ok := parsedToken.Claims.(jwt.MapClaims); ok {\n\t\t\tif expired := claims.VerifyExpiresAt(time.Now().Unix(), true); !expired {\n\t\t\t\tlogf(ctx, \"Token is expired\")\n\t\t\t\treturn ErrTokenExpired\n\t\t\t}\n\t\t}\n\t}\n\n\tlogf(ctx, \"JWT: %v\", parsedToken)\n\n\t// If we get here, everything worked and we can set the\n\t// user property in context.\n\tctx.Values().Set(m.Config.ContextKey, parsedToken)\n\n\treturn nil\n}",
"func ValidateToken(next http.HandlerFunc) http.HandlerFunc {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tsession := GetSession(w, req, cookieName)\n\t\taccessToken, setbool := session.Values[\"access_token\"].(string)\n\t\tif setbool == true && accessToken == \"\" {\n\t\t\tRedirectLogin(w, req)\n\t\t\t//return\n\t\t} else if setbool == false {\n\t\t\tRedirectLogin(w, req)\n\t\t} else {\n\t\t\tvar p jwt.Parser\n\t\t\ttoken, _, _ := p.ParseUnverified(accessToken, &jwt.StandardClaims{})\n\t\t\tif err := token.Claims.Valid(); err != nil {\n\t\t\t\t//invalid\n\t\t\t\tRedirectLogin(w, req)\n\t\t\t\t//return\n\t\t\t} else {\n\t\t\t\t//valid\n\t\t\t\tnext(w, req)\n\t\t\t\t//return\n\t\t\t}\n\t\t}\n\t\t//RedirectLogin(w, r)\n\t\treturn\n\t})\n}",
"func (tokenController TokenController) ValidateTokenHandler(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\n\ttoken, err := request.ParseFromRequest(r, request.AuthorizationHeaderExtractor,\n\t\tfunc(token *jwt.Token) (interface{}, error) {\n\t\t\treturn tokenController.mySigningKey, nil\n\t\t})\n\n\tif err == nil {\n\t\tif token.Valid {\n\t\t\tnext(w, r)\n\t\t} else {\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\tfmt.Fprint(w, \"Token is not valid\")\n\t\t}\n\t} else {\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\tfmt.Fprint(w, \"Unauthorized access to this resource\")\n\t}\n}",
"func JWTValidator(certificates map[string]CertificateList) jwt.Keyfunc {\n\treturn func(token *jwt.Token) (interface{}, error) {\n\n\t\tvar certificateList CertificateList\n\t\tvar kid string\n\t\tvar ok bool\n\n\t\tif _, ok := token.Method.(*jwt.SigningMethodRSA); !ok {\n\t\t\treturn nil, fmt.Errorf(\"unexpected signing method: %v\", token.Header[\"alg\"])\n\t\t}\n\n\t\tif kid, ok = token.Header[\"kid\"].(string); !ok {\n\t\t\treturn nil, fmt.Errorf(\"field 'kid' is of invalid type %T, should be string\", token.Header[\"kid\"])\n\t\t}\n\n\t\tif certificateList, ok = certificates[kid]; !ok {\n\t\t\treturn nil, fmt.Errorf(\"kid '%s' not found in certificate list\", kid)\n\t\t}\n\n\t\tfor _, certificate := range certificateList {\n\t\t\treturn certificate.PublicKey, nil\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"no certificate candidates for kid '%s'\", kid)\n\t}\n}",
"func ValidateJwtMiddlewear(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\n\t\tauthToken := r.Header.Get(\"Authorization\")\n\t\tsplitToekn := strings.Split(authToken, \"Bearer\")\n\t\tif len(splitToekn) != 2 {\n\t\t\tutils.RespondError(w, 401, \"Invalid Token\")\n\t\t\treturn\n\t\t}\n\t\ttokenSring := strings.TrimSpace(splitToekn[1])\n\n\t\ttoken, err := jwt.ParseWithClaims(tokenSring, &CustomClaims{}, func(token *jwt.Token) (interface{}, error) {\n\t\t\treturn []byte([]byte(os.Getenv(\"JWT_SECRATE\"))), nil\n\t\t})\n\n\t\tif claims, ok := token.Claims.(*CustomClaims); ok && token.Valid {\n\t\t\t//todo: add logic to check token xpired or not\n\t\t\tfmt.Printf(\"%v %v\", claims.Token, claims.StandardClaims.ExpiresAt)\n\t\t} else {\n\t\t\t//todo : add some changes for catching error\n\t\t\tfmt.Println(err)\n\t\t}\n\n\t\tnext.ServeHTTP(w, r)\n\t})\n}",
"func TokenValid(tokenString string) error {\n\ttoken, err := VerifyToken(tokenString)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif _, ok := token.Claims.(jwt.Claims); !ok && !token.Valid {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func ValidateToken(bearerHeader string) (User, error) {\n\n\t// format the token string\n\ttokenString := strings.Split(bearerHeader, \" \")[1]\n\n\tvar user User\n\n\ttoken, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {\n\t\t// Don't forget to validate the alg is what you expect:\n\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"Unexpected signing method: %v\", token.Header[\"alg\"])\n\t\t}\n\n\t\treturn []byte(\"secretkey\"), nil\n\t})\n\n\tif err != nil {\n\n\t\tfmt.Println(err)\n\t\treturn user, err\n\t}\n\n\tif claims, ok := token.Claims.(jwt.MapClaims); ok && token.Valid {\n\t\t// convert the interface to the map[string]interface{}\n\t\ts := claims[\"user\"].(map[string]interface{})\n\n\t\t// create a user of User type\n\t\t// convert the s[\"userID\"] interface to string\n\t\tuser := User{s[\"userID\"].(string), s[\"name\"].(string)}\n\n\t\treturn user, nil\n\n\t}\n\n\treturn user, errors.New(\"Something went wrong\")\n\n}",
"func (mt *Mytoken) Valid() error {\n\tstandardClaims := jwt.StandardClaims{\n\t\tAudience: mt.Audience,\n\t\tExpiresAt: int64(mt.ExpiresAt),\n\t\tId: mt.ID.String(),\n\t\tIssuedAt: int64(mt.IssuedAt),\n\t\tIssuer: mt.Issuer,\n\t\tNotBefore: int64(mt.NotBefore),\n\t\tSubject: mt.Subject,\n\t}\n\tif err := errors.WithStack(standardClaims.Valid()); err != nil {\n\t\treturn err\n\t}\n\tif ok := standardClaims.VerifyIssuer(config.Get().IssuerURL, true); !ok {\n\t\treturn errors.New(\"invalid issuer\")\n\t}\n\tif ok := standardClaims.VerifyAudience(config.Get().IssuerURL, true); !ok {\n\t\treturn errors.New(\"invalid Audience\")\n\t}\n\tif ok := mt.verifyID(); !ok {\n\t\treturn errors.New(\"invalid id\")\n\t}\n\tif ok := mt.verifySubject(); !ok {\n\t\treturn errors.New(\"invalid subject\")\n\t}\n\treturn nil\n}",
"func (mt *Mytoken) Valid() error {\n\tstandardClaims := jwt.StandardClaims{\n\t\tAudience: mt.Audience,\n\t\tExpiresAt: int64(mt.ExpiresAt),\n\t\tId: mt.ID.String(),\n\t\tIssuedAt: int64(mt.IssuedAt),\n\t\tIssuer: mt.Issuer,\n\t\tNotBefore: int64(mt.NotBefore),\n\t\tSubject: mt.Subject,\n\t}\n\tif err := errors.WithStack(standardClaims.Valid()); err != nil {\n\t\treturn err\n\t}\n\tif ok := standardClaims.VerifyIssuer(config.Get().IssuerURL, true); !ok {\n\t\treturn errors.New(\"invalid issuer\")\n\t}\n\tif ok := standardClaims.VerifyAudience(config.Get().IssuerURL, true); !ok {\n\t\treturn errors.New(\"invalid Audience\")\n\t}\n\tif ok := mt.verifyID(); !ok {\n\t\treturn errors.New(\"invalid id\")\n\t}\n\tif ok := mt.verifySubject(); !ok {\n\t\treturn errors.New(\"invalid subject\")\n\t}\n\treturn nil\n}",
"func verifyToken(tokenString string) (*jwt.Token, error) {\n\ttoken, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"unexpected signing method: %v\", token.Header[\"alg\"])\n\t\t}\n\t\treturn []byte(os.Getenv(\"JWT_SECRET\")), nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn token, nil\n}",
"func validatetoken(dao DAO) echo.MiddlewareFunc {\n\treturn func(next echo.HandlerFunc) echo.HandlerFunc {\n\t\treturn func(c echo.Context) error {\n\n\t\t\tcook, err := c.Cookie(\"jwt\")\n\t\t\tif err != nil {\n\t\t\t\tif err == http.ErrNoCookie {\n\t\t\t\t\t// If the cookie is not set, return an unauthorized status\n\t\t\t\t\treturn c.String(http.StatusUnauthorized, \"You are not authorized\")\n\t\t\t\t}\n\t\t\t\t// For any other type of error, return a bad request status\n\t\t\t\treturn c.String(http.StatusBadRequest, \"Bad Request A\")\n\t\t\t}\n\n\t\t\t// Get the JWT string from the cookie\n\t\t\ttknStr := cook.Value\n\n\t\t\t// Initialize a new instance of `Claims`\n\t\t\tclaims := &m.Claims{}\n\n\t\t\t// Parse the JWT string and store the result in `claims`.\n\t\t\t// Note that we are passing the key in this method as well. This method will return an error\n\t\t\t// if the token is invalid (if it has expired according to the expiry time we set on sign in),\n\t\t\t// or if the signature does not match\n\t\t\ttkn, err := jwt.ParseWithClaims(tknStr, claims, func(token *jwt.Token) (interface{}, error) {\n\t\t\t\treturn jwtKey, nil\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tif err == jwt.ErrSignatureInvalid {\n\t\t\t\t\treturn c.String(http.StatusUnauthorized, fmt.Sprintf(\"You are Not Authorized %v:%s:%v\", err, err.Error(), jwt.ErrSignatureInvalid))\n\t\t\t\t}\n\t\t\t\treturn c.String(http.StatusBadRequest, fmt.Sprintf(\"Bad Request B %v:%s:%v\", err, err.Error(), jwt.ErrSignatureInvalid))\n\t\t\t}\n\t\t\tif !tkn.Valid {\n\t\t\t\treturn c.String(http.StatusUnauthorized, \"You are not authorized\")\n\t\t\t}\n\n\t\t\tprofilesexist, err := dao.DoesProfileExist(claims.ProfileId)\n\t\t\tif err != nil || !profilesexist {\n\t\t\t\treturn c.String(http.StatusUnauthorized, \"You are not authorized: 10101\")\n\t\t\t}\n\n\t\t\t// We ensure that a new token is not issued until enough time has elapsed\n\t\t\t// In this case, a new token will only be issued if the old token is within\n\t\t\t// 30 seconds of expiry. otherwise.. leave everything be\n\t\t\tif time.Unix(claims.ExpiresAt, 0).Sub(time.Now()) < 30*time.Second {\n\n\t\t\t\t// Now, create a new token for the current use, with a renewed expiration time\n\t\t\t\texpirationTime := time.Now().Add(5 * time.Minute)\n\t\t\t\tclaims.ExpiresAt = expirationTime.Unix()\n\t\t\t\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\t\t\t\ttokenString, err := token.SignedString(jwtKey)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn c.String(http.StatusInternalServerError, \"Crazy ass internal error\")\n\t\t\t\t}\n\t\t\t\tc.SetCookie(&http.Cookie{\n\t\t\t\t\tName: \"jwt\",\n\t\t\t\t\tValue: tokenString,\n\t\t\t\t\tExpires: expirationTime,\n\t\t\t\t})\n\n\t\t\t}\n\t\t\treturn next(c)\n\t\t}\n\t}\n}",
"func validateToken(token string) error {\n\treturn nil\n}",
"func checkJWT(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"content-type\", \"application/json\")\n\t\terr := auth.TokenValid(r)\n\n\t\tif err != nil {\n\t\t\thttp.Error(w, fmt.Sprintf(`{\"status\":\"error\",\"error\":true,\"msg\":%s}`, \"Unathorized\"), 401)\n\t\t\treturn\n\t\t}\n\n\t\tnext.ServeHTTP(w, r)\n\n\t})\n}",
"func (a *authSvc) ValidateToken(authHeader interface{}) (interface{}, error) {\n\t// validate an Authorization header token is present in the request\n\tif authHeader == nil {\n\t\treturn nil, errors.New(\"no valid Authorization token in request\")\n\t}\n\theader := authHeader.(string)\n\tif header == \"\" {\n\t\treturn nil, errors.New(\"no valid Authorization token in request\")\n\t}\n\t// validate that it is a Bearer token\n\tif !strings.HasPrefix(header, bearerTokenKey) {\n\t\treturn nil, errors.New(\"authorization token is not valid Bearer token\")\n\t}\n\tt := strings.Replace(header, bearerTokenKey, \"\", -1)\n\t// parse the header token\n\ttoken, err := jwt.Parse(t, func(token *jwt.Token) (interface{}, error) {\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"there was an parsing the given token. please validate the token is for this service\")\n\t\t}\n\t\treturn a.authSecret, nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// validate token and get claims\n\tif claims, ok := token.Claims.(jwt.MapClaims); ok && token.Valid {\n\t\tvar decodedToken map[string]string\n\t\terr = mapstructure.Decode(claims, &decodedToken)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn decodedToken[\"email\"], nil\n\t}\n\treturn nil, errors.New(\"invalid authorization token\") // token is not valid, return error\n}",
"func ValidateAuthToken(tokenString string) bool {\n\t// Parse takes the token string and a function for looking up the key. The latter is especially\n\t// useful if you use multiple keys for your application. The standard is to use 'kid' in the\n\t// head of the token to identify which key to use, but the parsed token (head and claims) is provided\n\t// to the callback, providing flexibility.\n\ttoken, err := jwt.Parse(tokenString, getSecretKey)\n\n\tif err != nil {\n\t\tfmt.Println(\"Error parsing token: \", err)\n\t}\n\n\tclaims, ok := token.Claims.(jwt.MapClaims)\n\n\tif !ok || !token.Valid {\n\t\tfmt.Println(\"The claims: \", claims)\n\t\tfmt.Println(\"Token is not valid:\", err)\n\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func ValidateToken(pathHandler server.HandlerType) server.HandlerType {\n\treturn func(res http.ResponseWriter, req *http.Request) {\n\t\tlog.Printf(\"ValidateToken Received request: %v\", req)\n\t\tprovidedToken := req.Header.Get(tokenRequestHeader)\n\t\tif providedToken == \"\" {\n\t\t\tlog.Println(\"Token required; No token provided.\")\n\t\t\tserver.ReturnUnauthorizedResponse(res)\n\t\t\treturn\n\t\t}\n\n\t\tif actualToken, ok := generatedTokens[providedToken]; ok {\n\t\t\taccessTime := time.Now()\n\t\t\tduration := accessTime.Sub(actualToken.CreatedAt)\n\t\t\tif int(duration.Seconds()) >= actualToken.TTL {\n\t\t\t\tlog.Println(\"Token has expired\")\n\t\t\t\tdelete(generatedTokens, providedToken)\n\t\t\t\tserver.ReturnUnauthorizedResponse(res)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Println(\"Token validated!\")\n\t\t\tpathHandler(res, req)\n\t\t} else {\n\t\t\tlog.Printf(\"Invalid token provided: %v\", providedToken)\n\t\t\tserver.ReturnUnauthorizedResponse(res)\n\t\t\treturn\n\t\t}\n\t}\n}",
"func CheckToken(tokenString string) (err error){\n\ttoken, err := jwt.Parse(tokenString, func(token *jwt.Token)(interface{}, error){\n\t\t// validate alg is what I expected\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"Unexpected signing method: %v\", token.Header[\"alg\"])\n\t\t}\n\n\t\t// Secret is a []byte containing your secret, e.g. []byte(\"my_secret_key\")\n\t\treturn Secret, nil\n\t})\n\tif token.Valid {\n\t\tlog4go.Info(\"Succeeded in parsing the token\")\n\t} else if ve, ok := err.(*jwt.ValidationError); ok {\n\t\tif ve.Errors&jwt.ValidationErrorMalformed != 0 {\n\t\t\tlog4go.Error(\"This is not even a token\")\n\t\t} else if ve.Errors&(jwt.ValidationErrorExpired|jwt.ValidationErrorNotValidYet) != 0 {\n\t\t\t// Token is either expired or not active yet\n\t\t\tlog4go.Error(\"Token has expired or is inactive\")\n\t\t\treturn err\n\t\t} else {\n\t\t\tlog4go.Error(\"Couldn't handle this token\")\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tlog4go.Error(\"Couldn't handle this token\")\n\t\treturn err\n\t}\n\treturn nil\n}",
"func JWTVerify(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\n\t\tvar tokenHeader = r.Header.Get(\"Token\") //Grab the token from the header\n\n\t\ttokenHeader = strings.TrimSpace(tokenHeader)\n\n\t\tif tokenHeader == \"\" {\n\t\t\t//Token is missing, returns with error code 403 Unauthorized\n\t\t\tw.WriteHeader(http.StatusForbidden)\n\t\t\treturn\n\t\t}\n\t\tclaims := &models.Claims{}\n\n\t\ttoken, err := jwt.ParseWithClaims(tokenHeader, claims, func(token *jwt.Token) (interface{}, error) {\n\t\t\treturn []byte(os.Getenv(\"tokenSigningKey\")), nil\n\t\t})\n\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tif !token.Valid {\n\t\t\tw.WriteHeader(http.StatusForbidden)\n\t\t\treturn\n\t\t}\n\n\t\tctx := context.WithValue(r.Context(), models.UserKey, claims.UserID)\n\t\tnext.ServeHTTP(w, r.WithContext(ctx))\n\t})\n}",
"func ContextHasValidatedJWT(ctx context.Context, token string) bool {\n\tjwt, ok := GetJWTPayload(ctx)\n\tif ok &&\n\t\tjwt.Validated &&\n\t\tjwt.Token == token {\n\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func ValidateToken() middleware.Middleware {\n\treturn func(hf http.HandlerFunc) http.HandlerFunc {\n\t\treturn func(rw http.ResponseWriter, r *http.Request) {\n\t\t\trw.Header().Set(\"Content-type\", \"application/json\")\n\n\t\t\tbearerToken := strings.TrimSpace(r.Header.Get(\"Authorization\"))\n\t\t\tif bearerToken == \"\" {\n\t\t\t\thttputils.DispatchHTTPError(rw, \"It was not possible to get the token from the headers\", http.StatusForbidden)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttokenParts := strings.Split(bearerToken, \" \")\n\t\t\tif tokenParts[0] != \"Bearer\" {\n\t\t\t\thttputils.DispatchHTTPError(rw, \"It should be Bearer authentication\", http.StatusForbidden)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tuser, err := token.GetUserFromToken(tokenParts[1])\n\t\t\tif err != nil {\n\t\t\t\thttpError := usererrormapper.MapUserErrorToHTTPError(err)\n\t\t\t\thttputils.DispatchHTTPError(rw, httpError.Message, httpError.StatusCode)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tnewHTTPContext := context.WithValue(r.Context(), \"user\", user)\n\t\t\thf(rw, r.WithContext(newHTTPContext))\n\t\t}\n\t}\n}",
"func IsJwtToken(token string) (map[string]interface{}, bool) {\n\tvar (\n\t\terr error\n\t\tdec jwt1decoded\n\t)\n\n\t// decode the token\n\tif dec, err = jwt1NewDecoded(token); err != nil {\n\t\t// may want to log some error here so we have visibility\n\t\t// intentionally simplifying return type to bool for ease\n\t\t// of use in API. Caller should only do `if auth.Passes(str) {}`\n\t\treturn nil, false\n\t}\n\n\t// base64 decode payload\n\tvar payload []byte\n\tif payload, err = jwtDecodeString(dec.payload); err != nil {\n\t\treturn nil, false\n\t}\n\tdst := map[string]interface{}{}\n\tif err = jwtDecodeJson(payload, &dst); err != nil {\n\t\treturn nil, false\n\t}\n\tif signed, err := dec.sign(); err != nil || signed.token() != token {\n\t\treturn nil, false\n\t}\n\treturn dst, true\n}",
"func ValidateJWTSVID(ctx context.Context, token, audience string, options ...ClientOption) (*jwtsvid.SVID, error) {\n\tc, err := New(ctx, options...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer c.Close()\n\treturn c.ValidateJWTSVID(ctx, token, audience)\n}",
"func CheckJWT() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tlg.Info(\"[CheckJWT]\")\n\n\t\tvar err error\n\n\t\tIsAuth := c.Request.Header.Get(\"Authorization\")\n\t\tif IsAuth != \"\" {\n\t\t\taAry := strings.Split(IsAuth, \" \")\n\t\t\tif len(aAry) != 2 {\n\t\t\t\terr = errors.New(\"Authorization header is invalid\")\n\t\t\t} else {\n\t\t\t\tif aAry[0] != \"Bearer\" {\n\t\t\t\t\terr = errors.New(\"Authorization header is invalid\")\n\t\t\t\t} else {\n\t\t\t\t\ttoken := aAry[1]\n\t\t\t\t\terr = jwt.JudgeJWT(token)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\terr = errors.New(\"authorization header was missed\")\n\t\t}\n\n\t\tif err != nil {\n\t\t\tc.AbortWithError(400, err)\n\t\t\treturn\n\t\t}\n\n\t\tc.Next()\n\t}\n}",
"func checkToken(w http.ResponseWriter, r *http.Request) bool {\n\tc, err := r.Cookie(\"token\")\n\tif err != nil {\n\t\tif err == http.ErrNoCookie {\n\t\t\t// If the cookie is not set, return an unauthorized status\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\treturn false\n\t\t}\n\t\t// For any other type of error, return a bad request status\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn false\n\t}\n\n\t// Get the JWT string from the cookie\n\ttknStr := c.Value\n\n\t// Initialize a new instance of `Claims`\n\tclaims := &s.Claims{}\n\n\ttkn, err := jwt.ParseWithClaims(tknStr, claims, func(token *jwt.Token) (interface{}, error) {\n\t\treturn JWTKey, nil\n\t})\n\tif err != nil {\n\t\tif err == jwt.ErrSignatureInvalid {\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\treturn false\n\t\t}\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn false\n\t}\n\tif !tkn.Valid {\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\treturn false\n\t}\n\treturn true\n}",
"func TokenValid(r *http.Request) error {\n\ttoken, err := VerifyToken(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif _, ok := token.Claims.(jwt.Claims); !ok && !token.Valid {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func TestCheckJWT(t *testing.T) {\n\tbaseId := 1234\n\tsecret := NewToken()\n\tdata := map[string]interface{}{\n\t\t\"id\": baseId,\n\t}\n\tfor i := 0; i < 10000; i++ {\n\t\t_, token, err := NewJWT(secret, data)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Token creation should not return an error: %s\", err)\n\t\t}\n\t\t_, testedToken, err := CheckJWT(token, \"id\", func(id int) (token Token, err error) {\n\t\t\tif id != baseId {\n\t\t\t\tt.Errorf(\"Expected id %d but got %d\", baseId, id)\n\t\t\t}\n\t\t\treturn secret, nil\n\t\t})\n\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Token checking should not return an error: %s\", err)\n\t\t}\n\t\tif !testedToken.Valid {\n\t\t\tt.Error(\"Token is should be valid\")\n\t\t}\n\t}\n}",
"func valid(authorization []string, key []byte) bool {\n\tif len(authorization) < 1 {\n\t\treturn false\n\t}\n\n\tjkey, _ := jwt.ParseRSAPublicKeyFromPEM(key)\n\n\ttoken, err := jwt.Parse(authorization[0], func(token *jwt.Token) (interface{}, error) {\n\t\t// Don't forget to validate the alg is what you expect:\n\t\tif _, ok := token.Method.(*jwt.SigningMethodRSA); !ok {\n\t\t\treturn nil, fmt.Errorf(\"unexpected signing method: %v\", token.Header[\"alg\"])\n\t\t}\n\n\t\t// hmacSampleSecret is a []byte containing your secret, e.g. []byte(\"my_secret_key\")\n\t\treturn jkey, nil\n\t})\n\n\tif err != nil {\n\t\tlog.Printf(\"error validating token:%s\", err)\n\t\treturn false\n\t}\n\n\tif claims, ok := token.Claims.(jwt.MapClaims); ok && token.Valid {\n\t\tfmt.Println(claims[\"some\"], claims[\"nbf\"])\n\t} else {\n\t\tlog.Printf(\"error validating token:%s\", err)\n\t}\n\n\treturn true\n}",
"func CheckJwt(next http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\terr := tokenMiddleware.CheckJWT(w, r)\n\t\tif err != nil {\n\t\t\t//if theres a token validation error then return and dont execute the next handler\n\t\t\treturn\n\t\t} else {\n\t\t\t//token is fine, move to next handler\n\t\t\tnext.ServeHTTP(w, r)\n\t\t}\n\t}\n}",
"func getTokenKey(token *jwt.Token) (interface{}, error) {\n\treturn []byte(GetJWTKey()), nil\n}",
"func parseToken(tokenString string) (*jwt.Token, error) {\n\n\t// Parse takes the token string and a function for looking up the key.\n\t// The latter is especially useful if you use multiple keys for your application.\n\t// The standard is to use 'kid' in the head of the token to identify which key to use,\n\t// but the parsed token (head and claims) is provided to the callback, providing flexibility.\n\ttoken, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {\n\t\t// Parse methods use this callback function to supply\n\t\t// the key for verification. The function receives the parsed,\n\t\t// but unverified Token. This allows you to use properties in the\n\t\t// Header of the token (such as `kid`) to identify which key to use.\n\n\t\t// What method is used in this token?\n\n\t\tif _, ok := token.Method.(*jwt.SigningMethodRSA); ok {\n\t\t\t// This method is supported\n\n\t\t\t// Return RSA Public Key\n\t\t\treturn jwtRSAPublicKey, nil\n\t\t}\n\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); ok {\n\t\t\t// This method IS NOT SUPPORTED\n\t\t}\n\n\t\tif _, ok := token.Method.(*jwt.SigningMethodECDSA); ok {\n\t\t\t// This method IS NOT SUPPORTED\n\t\t}\n\n\t\treturn nil, status.Errorf(codes.Unauthenticated, \"unexpected signing method: %v\", token.Header[\"alg\"])\n\t})\n\tif err != nil {\n\t\tlog.Errorf(\"jwt.Parse() FAILED with error %v\", err)\n\t\treturn nil, errInvalidToken\n\t}\n\tif !token.Valid {\n\t\tlog.Errorf(\"jwt.Parse() FAILED with !token.Valid\")\n\t\treturn nil, errInvalidToken\n\t}\n\n\t// Ensure token has some payload\n\terr = verifyClaims(token)\n\tif err != nil {\n\t\tlog.Errorf(\"jwt.Parse() FAILED with error %v\", err)\n\t\treturn nil, err\n\t}\n\n\t// Main part - token's payload\n\tclaims := getClaims(token)\n\tlog.Infof(\"Claims:\")\n\tfor name, value := range claims {\n\t\tlog.Infof(\"%s: %v\", name, value)\n\t}\n\n\treturn token, nil\n}",
"func VerifyJWT(signedJWT *afgjwt.JSONWebToken, leeway time.Duration) error {\n\tvar claims jwt.Claims\n\n\td, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{\n\t\tResult: &claims,\n\t\tTagName: \"json\",\n\t\tSquash: true,\n\t\tWeaklyTypedInput: true,\n\t\tDecodeHook: utils.JSONNumberToJwtNumericDate(),\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"mapstruct verifyJWT. error: %w\", err)\n\t}\n\n\tif err = d.Decode(signedJWT.Payload); err != nil {\n\t\treturn fmt.Errorf(\"mapstruct verifyJWT decode. error: %w\", err)\n\t}\n\n\t// Validate checks claims in a token against expected values.\n\t// It is validated using the expected.Time, or time.Now if not provided\n\texpected := jwt.Expected{}\n\n\terr = claims.ValidateWithLeeway(expected, leeway)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"invalid JWT time values: %w\", err)\n\t}\n\n\treturn nil\n}",
"func (v validator) Validate(tokenString string) (*token.Session, error) {\n\tclaims := &claims{}\n\ttok, err := jwt.ParseWithClaims(tokenString, claims, func(tok *jwt.Token) (interface{}, error) {\n\t\treturn v.secret, nil\n\t})\n\n\tif err != nil {\n\t\tlog.Printf(\"Error parsing jwt: %s\", err)\n\t\treturn nil, err\n\t}\n\n\tif !tok.Valid {\n\t\tlog.Printf(\"Token is considered invalid\")\n\t\treturn nil, fmt.Errorf(\"Token is considered invalid\")\n\t}\n\n\t// fill out the session to be used for the lifetime of the request\n\tsession := token.Session{}\n\tsession.Email = claims.Email\n\tsession.AccountID = claims.AccountID\n\tsession.ProfileID = claims.ProfileID\n\tsession.RestaurantID = claims.RestaurantID\n\n\treturn &session, nil\n}",
"func (u *User) ValidateToken(ctx context.Context, inToken *pb.Token, outToken *pb.Token) error {\n\t_ = ctx\n\tts := TokenService{}\n\tclaims, err := ts.Decode(inToken.Token)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif claims == nil {\n\t\treturn fmt.Errorf(glErr.AuthNilClaim(serviceName))\n\t}\n\tif claims.User.Id == 0 || claims.Issuer != ClaimIssuer {\n\t\t// fmt.Printf(\"claim User %v\", claims.User)\n\t\treturn fmt.Errorf(glErr.AuthInvalidClaim(serviceName))\n\t}\n\t// fmt.Printf(\"Claim User %v\", claims.User)\n\t// TODO: Check that userid is a valid user in db\n\n\toutToken.Token = inToken.Token\n\toutToken.Valid = true\n\toutToken.EUid = base64.StdEncoding.EncodeToString([]byte(strconv.FormatInt(claims.User.Id, 10)))\n\n\treturn nil\n\n}",
"func VerifyToken(tokenStr string, secret_name string) (string, error) {\n\t var result = \"\"\n\t //Retrieve secret value from secrets manager\n\t secret, err := getSecretValue(secret_name);\n\t verifyToken, err := jwt.Parse(tokenStr, func(token *jwt.Token) (interface{}, error) {\n\t\t return[]byte(secret), nil\n\t })\n\t if err == nil && verifyToken.Valid{\n\t\t result = \"Valid\"\n\t } else {\n\t\t result = \"Invalid\"\n\t }\n\t log.Println(\"VerifyToken result =\", result)\n\n\t return result, err\n}",
"func JWTValidator(next http.HandlerFunc) http.HandlerFunc {\n\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t// Disable check if call is internal from another Dapr service (localhost) or running on dev machine\n\t\tfwdHost := r.Header.Get(\"X-Forwarded-Host\")\n\t\tif strings.Contains(fwdHost, \"localhost\") || r.Host == \"example.com\" {\n\t\t\tlog.Printf(\"### Auth (%s): Bypassing validation for host: %s %s\\n\", r.URL, fwdHost, r.Host)\n\t\t\tnext(w, r)\n\t\t\treturn\n\t\t}\n\n\t\t// Disable check if client id is not set, this is running in demo / unsecured mode\n\t\tclientID := env.GetEnvString(\"AUTH_CLIENT_ID\", \"\")\n\t\tif len(clientID) == 0 {\n\t\t\tlog.Printf(\"### Auth (%s): No validation as AUTH_CLIENT_ID is not set\\n\", r.URL)\n\t\t\tnext(w, r)\n\t\t\treturn\n\t\t}\n\n\t\t// Get auth header & bearer scheme\n\t\tauthHeader := r.Header.Get(\"Authorization\")\n\t\tif len(authHeader) == 0 {\n\t\t\tw.WriteHeader(401)\n\t\t\treturn\n\t\t}\n\t\tauthParts := strings.Split(authHeader, \" \")\n\t\tif len(authParts) != 2 {\n\t\t\tw.WriteHeader(401)\n\t\t\treturn\n\t\t}\n\t\tif strings.ToLower(authParts[0]) != \"bearer\" {\n\t\t\tw.WriteHeader(401)\n\t\t\treturn\n\t\t}\n\t\ttokenString := authParts[1]\n\n\t\t// Decode the token, using getKeyFromJWKS to get the key\n\t\ttoken, err := jwt.Parse(tokenString, getKeyFromJWKS)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(401)\n\t\t\treturn\n\t\t}\n\n\t\t// Now validate the decoded claims\n\t\tclaims := token.Claims.(jwt.MapClaims)\n\t\tif claims[\"scp\"] != appScopeName {\n\t\t\tw.WriteHeader(401)\n\t\t\treturn\n\t\t}\n\t\tif claims[\"aud\"] != clientID {\n\t\t\tw.WriteHeader(401)\n\t\t\treturn\n\t\t}\n\n\t\t// Otherwise, we're all good!\n\t\tlog.Printf(\"### Auth (%s): token passed validation! [scp:%s] [aud:%s]\\n\", r.URL, claims[\"scp\"], claims[\"aud\"])\n\t\tnext(w, r)\n\t}\n}",
"func (c cognitoClient) ParseAndVerifyJWT(ctx context.Context, token string) (*jwt.Token, error) {\n\tlogger := log.With(c.logger, \"method\", \"ParseAndVerifyJWT\")\n\n\tt, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) {\n\t\t// fmt.Printf(\"Token: %v\\n\", t)\n\n\t\t// Looking up the key id will return an array of just one key\n\t\tkeys := c.wellKnownJWKs.LookupKeyID(token.Header[\"kid\"].(string))\n\t\tif len(keys) == 0 {\n\t\t\treturn nil, errors.New(\"Could not find matching `kid` in well known tokens\")\n\t\t}\n\n\t\t// Build the public RSA key\n\t\tkey, err := keys[0].Materialize()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"Failed to create public key\")\n\t\t}\n\n\t\trsaPublicKey := key.(*rsa.PublicKey)\n\t\treturn rsaPublicKey, nil\n\t})\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Invalid token\")\n\t}\n\n\tif t.Valid != true {\n\t\treturn nil, errors.New(\"Token is not valid\")\n\t}\n\n\tlogger.Log(\"Token is valid\")\n\treturn t, nil\n}",
"func ValidateToken(token string) (string, error) {\n username, exists := Sessions[token];\n if (!exists) {\n return \"\", apierrors.TokenValidationError{apierrors.TOKEN_VALIDATION_NO_TOKEN};\n }\n\n return username, nil;\n}",
"func parseJWT(signedToken string) (string, error) {\n\t// check signature - this is weird!! you don't need an instance just a type of MyCustomClaims\n\t// this firstly uses the token(in the callback function) and then verifies it in the same step.\n\tt, err := jwt.ParseWithClaims(signedToken, &MyCustomClaims{}, func(t *jwt.Token) (interface{}, error) {\n\t\t// according to jwt advisory you need to check if your signing method remained the same in callback.\n\t\t// the signing method are carried inside the unverified token. The Method field of the token type carries Alg() from\n\t\t// the SigningMethod used.\n\t\tif t.Method.Alg() != jwt.SigningMethodHS512.Alg() {\n\t\t\treturn nil, fmt.Errorf(\"error in parseJWT while parsing token \")\n\t\t}\n\t\treturn key, nil\n\t})\n\t// Is the token valid? It is populated when you Parse/Verify a token - only checks if the claims has not expired\n\tif err == nil && t.Valid { //there was no error and the token is valid\n\t\t// need to assert VerifiedToken of *MyCustomeClaims type!! You know what you passed in when you created it.\n\t\t// Claims type interface with valid method only\n\t\tclaims := t.Claims.(*MyCustomClaims)\n\t\treturn claims.SID, nil\n\t}\n\t// important to check the error first nill pointer value see running video\n\treturn \"\", errors.New(\"error while verifying token\")\n\n}",
"func VerifyToken(tokenStr string) (*Claims, error) {\n\tclaims := &Claims{}\n\n\tnow := time.Now()\n\n\texpVal := jwt.ExpirationTimeValidator(now)\n\taudVal := jwt.AudienceValidator(cfg.Audience)\n\n\tvalPayload := jwt.ValidatePayload(&claims.Payload, expVal, audVal)\n\n\tif _, err := jwt.Verify([]byte(tokenStr), cfg.Algorithm, claims, valPayload); err != nil {\n\t\tswitch err {\n\t\tcase jwt.ErrExpValidation:\n\t\t\treturn claims, ErrExpiredToken\n\t\tcase jwt.ErrAudValidation:\n\t\t\treturn claims, ErrInvalidAudience\n\t\tdefault:\n\t\t\treturn claims, err\n\t\t}\n\t}\n\n\treturn claims, nil\n}",
"func ValidateMiddleware(next http.Handler) http.Handler {\n\tfn := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tvar token *jwt.Token\n\t\ttoken, err := request.ParseFromRequestWithClaims(r, request.OAuth2Extractor, &authentication.Claim{}, func(token *jwt.Token) (interface{}, error) {\n\t\t\treturn authentication.PublicKey, nil\n\t\t})\n\n\t\tif err != nil {\n\t\t\tresponse.HTTPError(w, r, http.StatusUnauthorized, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tif !token.Valid {\n\t\t\tresponse.HTTPError(w, r, http.StatusUnauthorized, \"Invalid Token\")\n\t\t\treturn\n\t\t}\n\t\tid := token.Claims.(*authentication.Claim).ID\n\t\tctx := context.WithValue(r.Context(), primitive.ObjectID{}, id)\n\t\tnext.ServeHTTP(w, r.WithContext(ctx))\n\n\t})\n\treturn fn\n\n}",
"func ParseAndValidate(token []byte, publicKey *rsa.PublicKey) (map[string]interface{}, error) {\n\tjwt, err := jws.ParseJWT(token)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Validate token\n\tif err = jwt.Validate(publicKey, crypto.SigningMethodRS512); err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid token: %s\", err)\n\t}\n\n\ttoday := time.Now().UTC()\n\tyesterday := today.Add(-24 * time.Hour)\n\ttomorrow := today.Add(24 * time.Hour)\n\n\tconst dateLayout = \"02-01-2006\" // 'd-m-Y' in PHP\n\ttodayStr := today.Format(dateLayout)\n\tyesterdayStr := yesterday.Format(dateLayout)\n\ttomorrowStr := tomorrow.Format(dateLayout)\n\n\tjwtDate := jwt.Claims().Get(\"date\")\n\tif jwtDate != yesterdayStr && jwtDate != todayStr && jwtDate != tomorrowStr {\n\t\treturn nil, errors.New(\"the token has expired\")\n\t}\n\n\treturn jwt.Claims(), nil\n}",
"func (s *ExtendedJWT) verifyRFC9068Token(ctx context.Context, rawToken string) (*ExtendedJWTClaims, error) {\n\tparsedToken, err := jwt.ParseSigned(rawToken)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse JWT: %w\", err)\n\t}\n\n\tif len(parsedToken.Headers) != 1 {\n\t\treturn nil, fmt.Errorf(\"only one header supported, got %d\", len(parsedToken.Headers))\n\t}\n\n\tparsedHeader := parsedToken.Headers[0]\n\n\ttypeHeader := parsedHeader.ExtraHeaders[\"typ\"]\n\tif typeHeader == nil {\n\t\treturn nil, fmt.Errorf(\"missing 'typ' field from the header\")\n\t}\n\n\tjwtType := strings.ToLower(typeHeader.(string))\n\tif jwtType != rfc9068ShortMediaType && jwtType != rfc9068MediaType {\n\t\treturn nil, fmt.Errorf(\"invalid JWT type: %s\", jwtType)\n\t}\n\n\tif !slices.Contains(acceptedSigningMethods, parsedHeader.Algorithm) {\n\t\treturn nil, fmt.Errorf(\"invalid algorithm: %s. Accepted algorithms: %s\", parsedHeader.Algorithm, strings.Join(acceptedSigningMethods, \", \"))\n\t}\n\n\tvar claims ExtendedJWTClaims\n\terr = parsedToken.Claims(s.signingKeys.GetServerPublicKey(), &claims)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to verify the signature: %w\", err)\n\t}\n\n\tif claims.Expiry == nil {\n\t\treturn nil, fmt.Errorf(\"missing 'exp' claim\")\n\t}\n\n\tif claims.ID == \"\" {\n\t\treturn nil, fmt.Errorf(\"missing 'jti' claim\")\n\t}\n\n\tif claims.Subject == \"\" {\n\t\treturn nil, fmt.Errorf(\"missing 'sub' claim\")\n\t}\n\n\tif claims.IssuedAt == nil {\n\t\treturn nil, fmt.Errorf(\"missing 'iat' claim\")\n\t}\n\n\terr = claims.ValidateWithLeeway(jwt.Expected{\n\t\tIssuer: s.cfg.ExtendedJWTExpectIssuer,\n\t\tAudience: jwt.Audience{s.cfg.ExtendedJWTExpectAudience},\n\t\tTime: timeNow(),\n\t}, 0)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to validate JWT: %w\", err)\n\t}\n\n\tif err := s.validateClientIdClaim(ctx, claims); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &claims, nil\n}",
"func CheckTheValidityOfTheToken(token string) (newToken string, err error) {\n\n err = checkInit()\n if err != nil {\n return\n }\n\n err = createError(011)\n\n if v, ok := tokens[token]; ok {\n var expires = v.(map[string]interface{})[\"expires\"].(time.Time)\n var userID = v.(map[string]interface{})[\"id\"].(string)\n\n if expires.Sub(time.Now().Local()) < 0 {\n return\n }\n\n newToken = setToken(userID, token)\n\n err = nil\n\n } else {\n return\n }\n\n return\n}",
"func ValidateAuthToken(tokenStr string) (bool, *uuid.UUID, error) {\n\t//initialize the claims\n\tclaims := &AuthClaims{}\n\n\t//parse the JWT and load the claims\n\ttoken, err := jwt.ParseWithClaims(tokenStr, claims, getTokenKey)\n\tif err != nil {\n\t\tif err == jwt.ErrSignatureInvalid {\n\t\t\treturn false, nil, nil\n\t\t}\n\t\treturn false, nil, err\n\t}\n\n\t//verify the signing algorithm\n\tif token.Method.Alg() != JWTSigningAlgorithm {\n\t\treturn false, nil, fmt.Errorf(\"invalid signing algorthm: %s\", token.Method.Alg())\n\t}\n\n\t//check if the token is valid\n\tif !token.Valid {\n\t\treturn false, nil, nil\n\t}\n\n\t//extract the user id\n\tuserIDStr := claims.UserID\n\tuserID := uuid.FromStringOrNil(userIDStr)\n\tif userID == uuid.Nil {\n\t\treturn false, nil, nil\n\t}\n\treturn true, &userID, nil\n}"
] | [
"0.7699664",
"0.75941306",
"0.7513526",
"0.7442866",
"0.7334197",
"0.7312587",
"0.729662",
"0.7211861",
"0.7191269",
"0.7175674",
"0.7171228",
"0.7161451",
"0.7157099",
"0.7146733",
"0.71361905",
"0.7088462",
"0.708682",
"0.70732176",
"0.7066534",
"0.7061922",
"0.7060058",
"0.6997607",
"0.69910485",
"0.69762754",
"0.6974597",
"0.6965757",
"0.6953478",
"0.694564",
"0.6937268",
"0.6930263",
"0.69114166",
"0.68887347",
"0.6884213",
"0.6877258",
"0.6874198",
"0.6864905",
"0.68453515",
"0.6824732",
"0.6799422",
"0.67988265",
"0.6785065",
"0.6762235",
"0.6733013",
"0.67260283",
"0.6706007",
"0.6701945",
"0.6697797",
"0.66836363",
"0.66820365",
"0.6634552",
"0.6602813",
"0.65875804",
"0.65801734",
"0.6566125",
"0.6563217",
"0.6511841",
"0.65101767",
"0.64994574",
"0.6497112",
"0.64560616",
"0.64460915",
"0.64397705",
"0.6412647",
"0.64009583",
"0.64009583",
"0.63983476",
"0.63982874",
"0.6376784",
"0.6372223",
"0.6350631",
"0.6350006",
"0.63476676",
"0.63043046",
"0.62931377",
"0.62904865",
"0.6282721",
"0.626288",
"0.6261844",
"0.6258684",
"0.6255772",
"0.6243182",
"0.6232441",
"0.6230282",
"0.6220616",
"0.6215572",
"0.619832",
"0.61902374",
"0.6151728",
"0.6135933",
"0.6129958",
"0.6107376",
"0.610469",
"0.60947365",
"0.6087853",
"0.6086538",
"0.6082594",
"0.60791606",
"0.6066183",
"0.6049922",
"0.6049654"
] | 0.7437225 | 4 |
WriteCloserWithContext converts ContextCloser to io.Closer, whenever new Close method will be called, the ctx will be passed to it | func WriteCloserWithContext(ctx context.Context, closer WriteContextCloser) io.WriteCloser {
return &closerWithContext{
WriteContextCloser: closer,
ctx: ctx,
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (c *closerWithContext) Close() error {\n\treturn c.WriteContextCloser.Close(c.ctx)\n}",
"func (c *Context) Close() error {\n\treturn c.writer.Close()\n}",
"func (fw *FileWriter) CloseWithContext(ctx context.Context, opts ...FlushRowGroupOption) error {\n\tif fw.schemaWriter.rowGroupNumRecords() > 0 {\n\t\tif err := fw.FlushRowGroup(opts...); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tkv := make([]*parquet.KeyValue, 0, len(fw.kvStore))\n\tfor i := range fw.kvStore {\n\t\tv := fw.kvStore[i]\n\t\taddr := &v\n\t\tif v == \"\" {\n\t\t\taddr = nil\n\t\t}\n\t\tkv = append(kv, &parquet.KeyValue{\n\t\t\tKey: i,\n\t\t\tValue: addr,\n\t\t})\n\t}\n\tmeta := &parquet.FileMetaData{\n\t\tVersion: fw.version,\n\t\tSchema: fw.schemaWriter.getSchemaArray(),\n\t\tNumRows: fw.totalNumRecords,\n\t\tRowGroups: fw.rowGroups,\n\t\tKeyValueMetadata: kv,\n\t\tCreatedBy: &fw.createdBy,\n\t\tColumnOrders: nil,\n\t}\n\n\tpos := fw.w.Pos()\n\tif err := writeThrift(ctx, meta, fw.w); err != nil {\n\t\treturn err\n\t}\n\n\tln := int32(fw.w.Pos() - pos)\n\tif err := binary.Write(fw.w, binary.LittleEndian, &ln); err != nil {\n\t\treturn err\n\t}\n\n\tif err := writeFull(fw.w, magic); err != nil {\n\t\treturn err\n\t}\n\n\treturn fw.bw.Flush()\n}",
"func WithWriterContext(ctx context.Context) FileWriterOption {\n\treturn func(fw *FileWriter) {\n\t\tfw.ctx = ctx\n\t}\n}",
"func WithContext(response http.ResponseWriter, request *http.Request, ctx context.Context) (http.ResponseWriter, *http.Request) {\n\tif ca, ok := response.(ContextAware); ok {\n\t\tca.SetContext(ctx)\n\t\treturn response, request.WithContext(ctx)\n\t}\n\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\n\treturn &contextAwareResponseWriter{response, ctx}, request.WithContext(ctx)\n}",
"func DelayedCtxCloser(ctx context.Context, delay time.Duration) context.Context {\n\tdelayedCtx, cancel := context.WithCancel(context.Background())\n\n\tgo func() {\n\t\t<-ctx.Done()\n\t\ttime.Sleep(delay)\n\t\tcancel()\n\t}()\n\n\treturn delayedCtx\n}",
"func CloseContext(ctx *ContextT) {\n\tC.yices_free_context(yctx(*ctx))\n\tctx.raw = 0\n}",
"func (o *WriteOptions) Context() context.Context {\n\tif o != nil && o.ctx != nil {\n\t\treturn o.ctx\n\t}\n\treturn context.Background()\n}",
"func (lc *Closer) Ctx() context.Context {\n\treturn lc.ctx\n}",
"func (o *WriteOptions) WithContext(ctx context.Context) *WriteOptions {\n\to2 := new(WriteOptions)\n\tif o != nil {\n\t\t*o2 = *o\n\t}\n\to2.ctx = ctx\n\treturn o2\n}",
"func WriteStructWithContext(ctx context.Context, p thrift.TProtocol, value thrift.TStruct, name string, field int16) error {\n\tif err := p.WriteFieldBegin(ctx, name, thrift.STRUCT, field); err != nil {\n\t\treturn thrift.PrependError(\"write field begin error: \", err)\n\t}\n\tif err := value.Write(ctx, p); err != nil {\n\t\treturn thrift.PrependError(\"field write error: \", err)\n\t}\n\tif err := p.WriteFieldEnd(ctx); err != nil {\n\t\treturn thrift.PrependError(\"write field end error: \", err)\n\t}\n\treturn nil\n}",
"func PipeWithContext(\n\tctx context.Context,\n\tsamplesPerSecond uint,\n\tformat SampleFormat,\n) (PipeReader, PipeWriter) {\n\tctx, cancel := context.WithCancel(ctx)\n\tp := &pipe{\n\t\tcontext: ctx,\n\t\tcancel: cancel,\n\t\tformat: format,\n\t\tsamplesPerSecond: samplesPerSecond,\n\t\tsamplesCh: make(chan Samples),\n\t\treadSamplesCh: make(chan int),\n\t}\n\treturn p, p\n}",
"func ctx(out io.Writer, debug bool) context.Context {\n\tif !debug {\n\t\treturn orascontext.Background()\n\t}\n\tctx := orascontext.WithLoggerFromWriter(context.Background(), out)\n\torascontext.GetLogger(ctx).Logger.SetLevel(logrus.DebugLevel)\n\treturn ctx\n}",
"func (ctx *ResourceContext) SafeClose() {\n}",
"func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) {\n\tvar (\n\t\tctx context.Context\n\t\tcancel context.CancelFunc\n\t)\n\n\tif c.exportTimeout > 0 {\n\t\tctx, cancel = context.WithTimeout(parent, c.exportTimeout)\n\t} else {\n\t\tctx, cancel = context.WithCancel(parent)\n\t}\n\n\tif c.metadata.Len() > 0 {\n\t\tctx = metadata.NewOutgoingContext(ctx, c.metadata)\n\t}\n\n\treturn ctx, cancel\n}",
"func WrapCancel(cancel context.CancelFunc) io.Closer {\n\treturn Wrap(func() error {\n\t\tcancel()\n\t\treturn nil\n\t})\n}",
"func (m *MQTT) WriteWithContext(ctx context.Context, msg *message.Batch) error {\n\treturn m.Write(msg)\n}",
"func runWithContext(fun func(ctx context.Context) error) (context.CancelFunc, chan error) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdone := make(chan error, 1)\n\tgo func() {\n\t\tdefer close(done)\n\t\tdone <- fun(ctx)\n\t}()\n\n\treturn cancel, done\n}",
"func (h ContextHandlerFunc) ServeHTTPWithContext(ctx context.Context, w http.ResponseWriter, req *http.Request) {\n\th(ctx, w, req)\n}",
"func (a *AzureBlobStorage) WriteWithContext(_ context.Context, msg *message.Batch) error {\n\treturn IterateBatchedSend(msg, func(i int, p *message.Part) error {\n\t\tc := a.client.GetContainerReference(a.container.String(i, msg))\n\t\tb := c.GetBlobReference(a.path.String(i, msg))\n\t\tif err := a.uploadBlob(b, a.blobType.String(i, msg), p.Get()); err != nil {\n\t\t\tif containerNotFound(err) {\n\t\t\t\tif cerr := a.createContainer(c, a.accessLevel.String(i, msg)); cerr != nil {\n\t\t\t\t\ta.log.Debugf(\"error creating container: %v.\", cerr)\n\t\t\t\t\treturn cerr\n\t\t\t\t}\n\t\t\t\terr = a.uploadBlob(b, a.blobType.String(i, msg), p.Get())\n\t\t\t\tif err != nil {\n\t\t\t\t\ta.log.Debugf(\"error retrying to upload blob: %v.\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}",
"func (g *Glue) newContext(w http.ResponseWriter, r *http.Request) *Context {\n\tctx := &Context{inj.New(), g, newResponseWriter(w)}\n\n\tctx.Register(r)\n\tctx.Register(ctx.rw)\n\t// register our ResponseWriter as an http.ResponseWriter as well for\n\t// net/http HandlerFunc compatibility\n\tctx.RegisterAs(ctx.rw, (*http.ResponseWriter)(nil))\n\t// register this instance with itself\n\tctx.Register(*ctx)\n\treturn ctx\n}",
"func WithContext(ctx context.Context) Option {\n\treturn func(o *Registry) { o.ctx = ctx }\n}",
"func (zw *ZerologWriter) WithContext(ctx context.Context) ZerologWriter {\n\treturn ZerologWriter{w: zw.w.WithContext(ctx)}\n}",
"func (zw *ZerologWriter) WithContext(ctx context.Context) ZerologWriter {\n\treturn ZerologWriter{w: zw.w.WithContext(ctx)}\n}",
"func archiveContext(ctx context.Context, root string, writer io.Writer) (err error) {\n\n\t// Create a buffered writer.\n\tbufWriter := bufio.NewWriter(writer)\n\tdefer bufWriter.Flush()\n\n\t// Create a zipped writer on the bufferd writer.\n\tzipWriter, err := gzip.NewWriterLevel(bufWriter, gzip.BestCompression)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer zipWriter.Close()\n\n\t// Create a tarball writer on the zipped writer.\n\ttarWriter := tar.NewWriter(zipWriter)\n\tdefer tarWriter.Close()\n\n\t// Create a tarball.\n\tsources, err := ioutil.ReadDir(root)\n\tif err != nil {\n\t\treturn\n\t}\n\tfor _, info := range sources {\n\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\n\t\tdefault:\n\t\t\t// Write a file header.\n\t\t\theader, err := tar.FileInfoHeader(info, info.Name())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ttarWriter.WriteHeader(header)\n\n\t\t\t// Write the body.\n\t\t\tif err = copyFile(filepath.Join(root, info.Name()), tarWriter); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n\n}",
"func (h *Handler) WithContext(ctx context.Context) {\n\tgo func() {\n\t\t<-ctx.Done()\n\t\th.terminating = true\n\t}()\n}",
"func (c MethodsCollection) WithContext() pWithContext {\n\treturn pWithContext{\n\t\tMethod: c.MustGet(\"WithContext\"),\n\t}\n}",
"func SerializeCtx(ctx context.Context, opts ...SerializeOpts) ([]byte, error) {\n\tbuf := new(bytes.Buffer)\n\te := gob.NewEncoder(buf)\n\n\ts := contextData{\n\t\tValues: make(map[interface{}]interface{}),\n\t\tHasCancel: false,\n\t\tDeadline: time.Time{},\n\t}\n\n\tserialized := buildMap(ctx, s)\n\n\t// if options were passed\n\tif len(opts) > 0 {\n\t\t// override cancel/deadline\n\t\tif !opts[0].RetainCancel {\n\t\t\tserialized.HasCancel = false\n\t\t}\n\t\tif !opts[0].RetainDeadline {\n\t\t\tserialized.HasDeadline = false\n\t\t}\n\t\t// ignore functions to allow serialization to pass\n\t\tif opts[0].IgnoreFunctions {\n\t\t\tfor key, val := range serialized.Values {\n\t\t\t\tif reflect.TypeOf(key).Kind() == reflect.Func || reflect.TypeOf(val).Kind() == reflect.Func {\n\t\t\t\t\tdelete(serialized.Values, key)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Encoding the map\n\terr := e.Encode(serialized)\n\treturn buf.Bytes(), err\n}",
"func WithCloser(closeableObject io.Closer, action func()) {\n\tdefer func() {\n\t\terr := closeableObject.Close()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"failed to close %T: %s\", closeableObject, err)\n\t\t}\n\t}()\n\taction()\n}",
"func CopyWithContext(ctx context.Context, dst *Writer, src Stream) error {\n\tif err := src.Open(); err != nil {\n\t\treturn err\n\t}\n\tvar err error\n\tfor ctx.Err() == nil {\n\t\tvar pair Pair\n\t\tpair, err = src.Read()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif pair.Key == nil {\n\t\t\tbreak\n\t\t}\n\t\terr = dst.Write(pair)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn ctx.Err()\n}",
"func NewTickerWithContext(ctx context.Context, d time.Duration) *Ticker {\n\tt := NewTicker(d)\n\tif ctx.Done() != nil {\n\t\tgo func() {\n\t\t\t<-ctx.Done()\n\t\t\tt.Stop()\n\t\t}()\n\t}\n\treturn t\n}",
"func DoWithContext(ctx context.Context, do func(ctx context.Context) error, fallback func(err error)) (err error) {\n\terrorChannel := make(chan error)\n\tvar contextHasBeenDone = false\n\tgo func() {\n\t\terr := do(ctx)\n\t\tif contextHasBeenDone {\n\t\t\tif fallback != nil {\n\t\t\t\tfallback(err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\terrorChannel <- err\n\t}()\n\tselect {\n\tcase err = <-errorChannel:\n\t\treturn err\n\tcase <-ctx.Done():\n\t\tcontextHasBeenDone = true\n\t\treturn ctx.Err()\n\t}\n}",
"func With(ctx context.Context, kvs ...interface{}) context.Context {\n\tl := fromCtx(ctx)\n\tl = l.With(kvs...)\n\treturn toCtx(ctx, l)\n}",
"func (c *ProjectsLocationsInsightsWriteInsightCall) Context(ctx context.Context) *ProjectsLocationsInsightsWriteInsightCall {\n\tc.ctx_ = ctx\n\treturn c\n}",
"func (r *StreamingRuntime) ExecWithContext(\n\tctx context.Context,\n\tcontainerID string,\n\tcmd []string,\n\tin io.Reader,\n\tout, errw io.WriteCloser,\n\ttty bool,\n\tresize <-chan remotecommand.TerminalSize,\n\ttimeout time.Duration,\n) error {\n\tcontainer, err := libdocker.CheckContainerStatus(r.Client, containerID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn r.ExecHandler.ExecInContainer(\n\t\tctx,\n\t\tr.Client,\n\t\tcontainer,\n\t\tcmd,\n\t\tin,\n\t\tout,\n\t\terrw,\n\t\ttty,\n\t\tresize,\n\t\ttimeout,\n\t)\n}",
"func WithCtx(c *Controller) func(http.Handler) http.Handler {\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t*r = *r.WithContext(context.WithValue(r.Context(), helpers.RenderCtxKey, c))\n\t\t\tnext.ServeHTTP(w, r)\n\t\t})\n\t}\n}",
"func ContextResponseWriter(ctx context.Context) (rw ResponseWriter) {\n\tif d, ok := ctx.Value(contextHandlerDetailsKey).(*handlerDetails); ok {\n\t\trw, _ = d.rw.(ResponseWriter)\n\t}\n\treturn\n}",
"func (rw *RWLock) LockWithCtx(ctx context.Context) error {\n\trw.lock.Lock()\n\tif !rw.init() {\n\t\trw.lock.Unlock()\n\t\treturn errors.ClosedState\n\t}\n\n\trw.writers++\n\tif rw.writers == 1 {\n\t\tif atomic.AddInt32(&rw.readers, -rwLockMaxReaders) == -rwLockMaxReaders+1 {\n\t\t\t// happy to lock\n\t\t\trw.state = stateLocked\n\t\t\trw.lock.Unlock()\n\t\t\treturn nil\n\t\t}\n\t}\n\tif rw.state == stateInit {\n\t\trw.state = stateWaiting\n\t}\n\trw.lock.Unlock()\n\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\n\tselect {\n\tcase <-rw.clsCh:\n\t\trw.cancelWriter()\n\t\treturn errors.ClosedState\n\tcase <-ctx.Done():\n\t\trw.cancelWriter()\n\t\treturn ctx.Err()\n\tcase <-rw.wrCh:\n\t\trw.lock.Lock()\n\t\trw.state = stateLocked\n\t\trw.lock.Unlock()\n\t}\n\n\treturn nil\n}",
"func (e *Encoder) EncodeContext(ctx context.Context, v interface{}, optFuncs ...EncodeOptionFunc) error {\n\trctx := encoder.TakeRuntimeContext()\n\trctx.Option.Flag = 0\n\trctx.Option.Flag |= encoder.ContextOption\n\trctx.Option.Context = ctx\n\n\terr := e.encodeWithOption(rctx, v, optFuncs...)\n\n\tencoder.ReleaseRuntimeContext(rctx)\n\treturn err\n}",
"func closeCtx(k *http.Request) {\n\tpk := privateKey(k)\n\tif _, has := internalCtx.Get(pk); has {\n\t\tinternalCtx.Remove(pk)\n\t}\n}",
"func CreticalfWithContext(ctx context.Context, format string, args ...interface{}) {\n\tif hub := sentry.GetHubFromContext(ctx); hub != nil {\n\t\tcreticaldeps(hub.CaptureMessage, 3, format, args...)\n\t\treturn\n\t}\n\n\tcreticaldeps(sentry.CaptureMessage, 3, format, args...)\n}",
"func (q *ChannelQueue) FlushWithContext(ctx context.Context) error {\n\tlog.Trace(\"ChannelQueue: %d Flush\", q.qid)\n\tpaused, _ := q.IsPausedIsResumed()\n\tfor {\n\t\tselect {\n\t\tcase <-paused:\n\t\t\treturn nil\n\t\tcase data, ok := <-q.dataChan:\n\t\t\tif !ok {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif unhandled := q.handle(data); unhandled != nil {\n\t\t\t\tlog.Error(\"Unhandled Data whilst flushing queue %d\", q.qid)\n\t\t\t}\n\t\t\tatomic.AddInt64(&q.numInQueue, -1)\n\t\tcase <-q.baseCtx.Done():\n\t\t\treturn q.baseCtx.Err()\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tdefault:\n\t\t\treturn nil\n\t\t}\n\t}\n}",
"func (wss *WrappedServerStream) Context() context.Context {\n\treturn wss.context\n}",
"func WriteBinaryWithContext(ctx context.Context, p thrift.TProtocol, value []byte, name string, field int16) error {\n\tif err := p.WriteFieldBegin(ctx, name, thrift.STRING, field); err != nil {\n\t\treturn thrift.PrependError(\"write field begin error: \", err)\n\t}\n\tif err := p.WriteBinary(ctx, value); err != nil {\n\t\treturn thrift.PrependError(\"field write error: \", err)\n\t}\n\tif err := p.WriteFieldEnd(ctx); err != nil {\n\t\treturn thrift.PrependError(\"write field end error: \", err)\n\t}\n\treturn nil\n}",
"func Contexter() func(next http.Handler) http.Handler {\n\trnd := templates.HTMLRenderer()\n\tcsrfOpts := CsrfOptions{\n\t\tSecret: setting.SecretKey,\n\t\tCookie: setting.CSRFCookieName,\n\t\tSetCookie: true,\n\t\tSecure: setting.SessionConfig.Secure,\n\t\tCookieHTTPOnly: setting.CSRFCookieHTTPOnly,\n\t\tHeader: \"X-Csrf-Token\",\n\t\tCookieDomain: setting.SessionConfig.Domain,\n\t\tCookiePath: setting.SessionConfig.CookiePath,\n\t\tSameSite: setting.SessionConfig.SameSite,\n\t}\n\tif !setting.IsProd {\n\t\tCsrfTokenRegenerationInterval = 5 * time.Second // in dev, re-generate the tokens more aggressively for debug purpose\n\t}\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {\n\t\t\tctx := Context{\n\t\t\t\tResp: NewResponse(resp),\n\t\t\t\tCache: mc.GetCache(),\n\t\t\t\tLocale: middleware.Locale(resp, req),\n\t\t\t\tLink: setting.AppSubURL + strings.TrimSuffix(req.URL.EscapedPath(), \"/\"),\n\t\t\t\tRender: rnd,\n\t\t\t\tSession: session.GetSession(req),\n\t\t\t\tRepo: &Repository{\n\t\t\t\t\tPullRequest: &PullRequest{},\n\t\t\t\t},\n\t\t\t\tOrg: &Organization{},\n\t\t\t\tData: middleware.GetContextData(req.Context()),\n\t\t\t}\n\t\t\tdefer ctx.Close()\n\n\t\t\tctx.Data.MergeFrom(middleware.CommonTemplateContextData())\n\t\t\tctx.Data[\"Context\"] = &ctx\n\t\t\tctx.Data[\"CurrentURL\"] = setting.AppSubURL + req.URL.RequestURI()\n\t\t\tctx.Data[\"Link\"] = ctx.Link\n\t\t\tctx.Data[\"locale\"] = ctx.Locale\n\n\t\t\t// PageData is passed by reference, and it will be rendered to `window.config.pageData` in `head.tmpl` for JavaScript modules\n\t\t\tctx.PageData = map[string]any{}\n\t\t\tctx.Data[\"PageData\"] = ctx.PageData\n\n\t\t\tctx.Req = WithContext(req, &ctx)\n\t\t\tctx.Csrf = PrepareCSRFProtector(csrfOpts, &ctx)\n\n\t\t\t// Get the last flash message from cookie\n\t\t\tlastFlashCookie := middleware.GetSiteCookie(ctx.Req, CookieNameFlash)\n\t\t\tif vals, _ := url.ParseQuery(lastFlashCookie); len(vals) > 0 {\n\t\t\t\t// store last Flash message into the template data, to render it\n\t\t\t\tctx.Data[\"Flash\"] = &middleware.Flash{\n\t\t\t\t\tDataStore: &ctx,\n\t\t\t\t\tValues: vals,\n\t\t\t\t\tErrorMsg: vals.Get(\"error\"),\n\t\t\t\t\tSuccessMsg: vals.Get(\"success\"),\n\t\t\t\t\tInfoMsg: vals.Get(\"info\"),\n\t\t\t\t\tWarningMsg: vals.Get(\"warning\"),\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// prepare an empty Flash message for current request\n\t\t\tctx.Flash = &middleware.Flash{DataStore: &ctx, Values: url.Values{}}\n\t\t\tctx.Resp.Before(func(resp ResponseWriter) {\n\t\t\t\tif val := ctx.Flash.Encode(); val != \"\" {\n\t\t\t\t\tmiddleware.SetSiteCookie(ctx.Resp, CookieNameFlash, val, 0)\n\t\t\t\t} else if lastFlashCookie != \"\" {\n\t\t\t\t\tmiddleware.SetSiteCookie(ctx.Resp, CookieNameFlash, \"\", -1)\n\t\t\t\t}\n\t\t\t})\n\n\t\t\t// If request sends files, parse them here otherwise the Query() can't be parsed and the CsrfToken will be invalid.\n\t\t\tif ctx.Req.Method == \"POST\" && strings.Contains(ctx.Req.Header.Get(\"Content-Type\"), \"multipart/form-data\") {\n\t\t\t\tif err := ctx.Req.ParseMultipartForm(setting.Attachment.MaxSize << 20); err != nil && !strings.Contains(err.Error(), \"EOF\") { // 32MB max size\n\t\t\t\t\tctx.ServerError(\"ParseMultipartForm\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\thttpcache.SetCacheControlInHeader(ctx.Resp.Header(), 0, \"no-transform\")\n\t\t\tctx.Resp.Header().Set(`X-Frame-Options`, setting.CORSConfig.XFrameOptions)\n\n\t\t\tctx.Data[\"CsrfToken\"] = ctx.Csrf.GetToken()\n\t\t\tctx.Data[\"CsrfTokenHtml\"] = template.HTML(`<input type=\"hidden\" name=\"_csrf\" value=\"` + ctx.Data[\"CsrfToken\"].(string) + `\">`)\n\n\t\t\t// FIXME: do we really always need these setting? There should be someway to have to avoid having to always set these\n\t\t\tctx.Data[\"DisableMigrations\"] = setting.Repository.DisableMigrations\n\t\t\tctx.Data[\"DisableStars\"] = setting.Repository.DisableStars\n\t\t\tctx.Data[\"EnableActions\"] = setting.Actions.Enabled\n\n\t\t\tctx.Data[\"ManifestData\"] = setting.ManifestData\n\n\t\t\tctx.Data[\"UnitWikiGlobalDisabled\"] = unit.TypeWiki.UnitGlobalDisabled()\n\t\t\tctx.Data[\"UnitIssuesGlobalDisabled\"] = unit.TypeIssues.UnitGlobalDisabled()\n\t\t\tctx.Data[\"UnitPullsGlobalDisabled\"] = unit.TypePullRequests.UnitGlobalDisabled()\n\t\t\tctx.Data[\"UnitProjectsGlobalDisabled\"] = unit.TypeProjects.UnitGlobalDisabled()\n\t\t\tctx.Data[\"UnitActionsGlobalDisabled\"] = unit.TypeActions.UnitGlobalDisabled()\n\n\t\t\tctx.Data[\"AllLangs\"] = translation.AllLangs()\n\n\t\t\tnext.ServeHTTP(ctx.Resp, ctx.Req)\n\t\t})\n\t}\n}",
"func WriteI32WithContext(ctx context.Context, p thrift.TProtocol, value int32, name string, field int16) error {\n\tif err := p.WriteFieldBegin(ctx, name, thrift.I32, field); err != nil {\n\t\treturn thrift.PrependError(\"write field begin error: \", err)\n\t}\n\tif err := p.WriteI32(ctx, value); err != nil {\n\t\treturn thrift.PrependError(\"field write error: \", err)\n\t}\n\tif err := p.WriteFieldEnd(ctx); err != nil {\n\t\treturn thrift.PrependError(\"write field end error: \", err)\n\t}\n\treturn nil\n}",
"func (w *wrappedServerStream) Context() context.Context {\n\treturn w.WrappedContext\n}",
"func WithContext(ctx context.Context, fnctx Context) context.Context {\n\treturn context.WithValue(ctx, ctxKey, fnctx)\n}",
"func (x Go) WithContext(ctx context.Context, f func(context.Context)) error {\n\t_ctx := ctx\n\tvar _cancel context.CancelFunc\n\tvar started, funcDone chan struct{}\n\tif x.ensureStarted {\n\t\tstarted = make(chan struct{})\n\t}\n\tif x.timeout != 0 {\n\t\tif x.timeout > 0 {\n\t\t\t_ctx, _cancel = context.WithCancel(ctx)\n\t\t\tdefer _cancel()\n\t\t}\n\t\tfuncDone = make(chan struct{})\n\t}\n\tif x.wg != nil {\n\t\tx.wg.Add(1)\n\t}\n\n\tgo func() {\n\t\tif started != nil {\n\t\t\tclose(started)\n\t\t}\n\t\tif x.wg != nil {\n\t\t\tdefer x.wg.Done()\n\t\t}\n\t\tif funcDone != nil {\n\t\t\tdefer close(funcDone)\n\t\t}\n\t\tif x.recoverFunc != nil {\n\t\t\tdefer func() {\n\t\t\t\tif e := recover(); e != nil {\n\t\t\t\t\tx.recoverFunc(e)\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t\tif x.before != nil {\n\t\t\tx.before()\n\t\t}\n\t\tif x.after != nil && x.deferAfter {\n\t\t\tdefer x.after()\n\t\t}\n\n\t\tf(_ctx)\n\n\t\tif x.after != nil && !x.deferAfter {\n\t\t\tx.after()\n\t\t}\n\t}()\n\n\tif started != nil {\n\t\t<-started\n\t}\n\tif funcDone != nil {\n\t\tif x.timeout > 0 {\n\t\t\ttm := time.NewTimer(x.timeout)\n\t\t\tdefer func() {\n\t\t\t\tif !tm.Stop() {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-tm.C:\n\t\t\t\t\tdefault:\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t\tselect {\n\t\t\tcase <-funcDone:\n\t\t\tcase <-tm.C:\n\t\t\t\treturn ErrTimeout\n\t\t\t}\n\t\t} else if x.timeout < 0 {\n\t\t\t<-funcDone\n\t\t}\n\t}\n\n\treturn nil\n}",
"func WriteI16WithContext(ctx context.Context, p thrift.TProtocol, value int16, name string, field int16) error {\n\tif err := p.WriteFieldBegin(ctx, name, thrift.I16, field); err != nil {\n\t\treturn thrift.PrependError(\"write field begin error: \", err)\n\t}\n\tif err := p.WriteI16(ctx, value); err != nil {\n\t\treturn thrift.PrependError(\"field write error: \", err)\n\t}\n\tif err := p.WriteFieldEnd(ctx); err != nil {\n\t\treturn thrift.PrependError(\"write field end error: \", err)\n\t}\n\treturn nil\n}",
"func (s *DeterminationWithContext) Close() error {\n\treturn s.close(s.trigger)\n}",
"func (tx *WriteTx) RunWithContext(ctx context.Context) error {\n\tif tx.err != nil {\n\t\treturn tx.err\n\t}\n\tinput, err := tx.input()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = retry(ctx, func() error {\n\t\tout, err := tx.db.client.TransactWriteItemsWithContext(ctx, input)\n\t\tif tx.cc != nil && out != nil {\n\t\t\tfor _, cc := range out.ConsumedCapacity {\n\t\t\t\taddConsumedCapacity(tx.cc, cc)\n\t\t\t}\n\t\t}\n\t\treturn err\n\t})\n\treturn err\n}",
"func (k *KeepAliveConn) KeepAliveContext(ctx context.Context) {\n\tk.Lock()\n\tdefer k.Unlock()\n\n\tif k.cancel != nil || k.isClosed {\n\t\treturn\n\t}\n\n\tderivedCtx, cancel := context.WithCancel(ctx)\n\tk.cancel = cancel\n\n\tgo k.readContext(derivedCtx)\n\tgo k.writeContext(derivedCtx)\n\tgo k.keepAliveContext(derivedCtx)\n\n}",
"func ctxForOp() (context.Context, func()) {\n\treturn context.WithTimeout(context.Background(), timeoutOp)\n}",
"func sendWithContext(ctx context.Context, httpClient *http.Client, url string, body io.Reader, opt *Options) (*http.Response, error) {\n\tv, _ := query.Values(opt)\n\n\t// fmt.Print(v.Encode()) will output: \"city=0&mr=1&pb=4&pro=0&yys=0\"\n\tAPIEndpoint := fmt.Sprintf(\"%s&%s\", url, v.Encode())\n\tfmt.Println(APIEndpoint)\n\t// Change NewRequest to NewRequestWithContext and pass context it\n\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, APIEndpoint, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// http.DefaultClient\n\tres, err := httpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res, nil\n}",
"func (s *Stream) WithContext(ctx context.Context) *Stream {\n\ts.ctx = ctx\n\treturn s\n}",
"func MetaWithContext(ctx context.Context, newMeta map[string]interface{}) context.Context {\n\tprevMeta := MetaFromContext(ctx)\n\n\tif prevMeta == nil {\n\t\tprevMeta = make(map[string]interface{})\n\t}\n\n\tfor k, v := range newMeta {\n\t\tprevMeta[k] = v\n\t}\n\n\treturn context.WithValue(ctx, MetaCtxKey, prevMeta)\n}",
"func AsContext(d Doner) context.Context {\n\tc, cancel := context.WithCancel(context.Background())\n\tDefer(d, cancel)\n\treturn c\n}",
"func (el *ZapEventLogger) SerializeContext(ctx context.Context) ([]byte, error) {\n\tgTracer := opentrace.GlobalTracer()\n\tb := make([]byte, 0)\n\tcarrier := bytes.NewBuffer(b)\n\tspan := opentrace.SpanFromContext(ctx)\n\tif err := gTracer.Inject(span.Context(), opentrace.Binary, carrier); err != nil {\n\t\treturn nil, err\n\t}\n\treturn carrier.Bytes(), nil\n}",
"func (c *minecraftConn) newContext(parent context.Context) (ctx context.Context, cancel func()) {\n\tctx, cancel = context.WithCancel(parent)\n\tgo func() {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\tcase <-c.closed:\n\t\t\tcancel()\n\t\t}\n\t}()\n\treturn ctx, cancel\n}",
"func WithContext(ctx context.Context) (Interface, context.Context) {\n\tctx, cancel := context.WithCancel(ctx)\n\treturn (&contextBreaker{ctx, cancel}).trigger(), ctx\n}",
"func WithContext(context string) StructuredLogger {\n\treturn factory.WithContext(context)\n}",
"func (op *Operation) With(ctx context.Context, err *error, args Args) (context.Context, FinishFunc) {\n\tctx, _, endObservation := op.WithAndLogger(ctx, err, args)\n\treturn ctx, endObservation\n}",
"func (op *Operation) With(ctx context.Context, err *error, args Args) (context.Context, FinishFunc) {\n\tctx, _, endObservation := op.WithAndLogger(ctx, err, args)\n\treturn ctx, endObservation\n}",
"func (blk *Block) DrawWithContext(d Drawable, ctx DrawContext) error {\n\tblocks, _, err := d.GeneratePageBlocks(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(blocks) != 1 {\n\t\treturn errors.New(\"too many output blocks\")\n\t}\n\n\tfor _, newBlock := range blocks {\n\t\tif err := blk.mergeBlocks(newBlock); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (obj *ConfigWriter) AddServantWithContext(imp impConfigWriterWithContext, objStr string) {\n\ttars.AddServantWithContext(obj, imp, objStr)\n}",
"func WithContext(ctx context.Context, db interfaces.DB) interfaces.DB {\n\t// the cases on if/else are actually transactions: noop\n\tif sdb, ok := db.(*DB); ok {\n\t\tif sdb.tx != nil {\n\t\t\treturn db\n\t\t}\n\t} else if _, ok := db.(interface{ Rollback() error }); ok {\n\t\treturn db\n\t}\n\n\treturn &DB{\n\t\tinner: db.WithContext(ctx),\n\t}\n}",
"func WithContext(ctx context.Context, opts ...TreeOption) context.Context {\n\tchosenName := fmt.Sprintf(\"tree-%d\", rand.Uint64())\n\tbaseOpts := append([]TreeOption{\n\t\toversight.WithRestartStrategy(oversight.OneForAll()),\n\t\toversight.NeverHalt(),\n\t}, opts...)\n\ttree := oversight.New(baseOpts...)\n\n\tmu.Lock()\n\ttrees[chosenName] = tree\n\tmu.Unlock()\n\n\twrapped := context.WithValue(ctx, treeName, chosenName)\n\tgo tree.Start(wrapped)\n\treturn wrapped\n}",
"func (manager *transportManager) cancelCtxCloseTransport() {\n\t// Grab the notification subscriber lock so new subscribers will not get added\n\t// without seeing the context cancel.\n\tmanager.notificationSubscriberLock.Lock()\n\n\t// Cancel the context the tryReconnect this closure will cause exits.\n\tmanager.cancelFunc()\n\n\t// Release the notification lock. Not doing so before we grab the livesOnce lock\n\t// can result in a deadlock if a redial is in process (since the redial needs to\n\t// grab the subscribers lock to notify them).\n\tmanager.notificationSubscriberLock.Unlock()\n\n\t// Take control of the connection lock to ensure all in-process operations have\n\t// completed.\n\tmanager.transportLock.Lock()\n\tdefer manager.transportLock.Unlock()\n\n\t// Close the current connection on exit\n\tdefer manager.transport.underlyingTransport().Close()\n}",
"func WriteDoubleWithContext(ctx context.Context, p thrift.TProtocol, value float64, name string, field int16) error {\n\tif err := p.WriteFieldBegin(ctx, name, thrift.DOUBLE, field); err != nil {\n\t\treturn thrift.PrependError(\"write field begin error: \", err)\n\t}\n\tif err := p.WriteDouble(ctx, value); err != nil {\n\t\treturn thrift.PrependError(\"field write error: \", err)\n\t}\n\tif err := p.WriteFieldEnd(ctx); err != nil {\n\t\treturn thrift.PrependError(\"write field end error: \", err)\n\t}\n\treturn nil\n}",
"func (f HandlerFunc) Handle(ctx context.Context, c WriteCloser) {\n\tf(ctx, c)\n}",
"func (_obj *DataService) CreateApplyWithContext(tarsCtx context.Context, wx_id string, club_id string, _opt ...map[string]string) (ret int32, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = _os.Write_string(wx_id, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _os.Write_string(club_id, 2)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"createApply\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = _is.Read_int32(&ret, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}",
"func genContext(r *http.Request) (context.Context, context.CancelFunc) {\n\twriteTimeout := r.Context().Value(http.ServerContextKey).(*http.Server).WriteTimeout\n\treturn context.WithTimeout(context.Background(), writeTimeout*80/100)\n}",
"func withContext(borrower ContextBorrower, worker Worker) Worker {\n\n\treturn func(t *T, _ Context) {\n\n\t\tif t.Failed() {\n\t\t\treturn\n\t\t}\n\n\t\tctx, release, err := borrower.Borrow()\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s\", err)\n\t\t\tt.FailNow()\n\t\t}\n\n\t\tdefer release()\n\t\tworkerRunner(nil, worker, t, ctx)\n\t}\n}",
"func DoCtx(ctx context.Context, req *http.Request, resp interface{}) (*http.Response, error) {\n\tr := req.Clone(ctx)\n\n\treturn Do(r, resp)\n}",
"func Context(canc Canceller) context.Context {\n\treturn ctxWrap{Canceller: canc}\n}",
"func ForWithContext(c context.Context, begin int, end int, f ForLoop) {\n\tlength := end - begin\n\n\tif length > 0 {\n\t\tctx, cacnel := context.WithCancel(c)\n\t\tgo doLoop(cacnel, begin, end, f)\n\t\t<-ctx.Done()\n\t}\n}",
"func (c *ConnUDP) WriteWithContext(ctx context.Context, udpCtx *ConnUDPContext, buffer []byte) error {\n\tif udpCtx == nil {\n\t\treturn fmt.Errorf(\"cannot write with context: invalid udpCtx\")\n\t}\n\tif udpCtx.raddr.IP.IsMulticast() {\n\t\treturn c.writeMulticastWithContext(ctx, udpCtx, buffer)\n\t}\n\n\twritten := 0\n\tc.lock.Lock()\n\tdefer c.lock.Unlock()\n\tfor written < len(buffer) {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tdefault:\n\t\t}\n\t\terr := c.connection.SetWriteDeadline(time.Now().Add(c.heartBeat))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"cannot set write deadline for udp connection: %v\", err)\n\t\t}\n\t\tn, err := WriteToSessionUDP(c.connection, udpCtx, buffer[written:])\n\t\tif err != nil {\n\t\t\tif isTemporary(err) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"cannot write to udp connection: %v\", err)\n\t\t}\n\t\twritten += n\n\t}\n\n\treturn nil\n}",
"func (config *DialConfig) DialContext(ctx context.Context, path string) (net.Conn, error) {\n\tvar err error\n\tvar h windows.Handle\n\th, err = tryDialPipe(ctx, &path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif config.ExpectedOwner != nil {\n\t\tsd, err := windows.GetSecurityInfo(h, windows.SE_FILE_OBJECT, windows.OWNER_SECURITY_INFORMATION)\n\t\tif err != nil {\n\t\t\twindows.Close(h)\n\t\t\treturn nil, err\n\t\t}\n\t\trealOwner, _, err := sd.Owner()\n\t\tif err != nil {\n\t\t\twindows.Close(h)\n\t\t\treturn nil, err\n\t\t}\n\t\tif !realOwner.Equals(config.ExpectedOwner) {\n\t\t\twindows.Close(h)\n\t\t\treturn nil, windows.ERROR_ACCESS_DENIED\n\t\t}\n\t}\n\n\tvar flags uint32\n\terr = windows.GetNamedPipeInfo(h, &flags, nil, nil, nil)\n\tif err != nil {\n\t\twindows.Close(h)\n\t\treturn nil, err\n\t}\n\n\tf, err := makeFile(h)\n\tif err != nil {\n\t\twindows.Close(h)\n\t\treturn nil, err\n\t}\n\n\t// If the pipe is in message mode, return a message byte pipe, which\n\t// supports CloseWrite.\n\tif flags&windows.PIPE_TYPE_MESSAGE != 0 {\n\t\treturn &messageBytePipe{\n\t\t\tpipe: pipe{file: f, path: path},\n\t\t}, nil\n\t}\n\treturn &pipe{file: f, path: path}, nil\n}",
"func (e *Encoder) EncodeContext(ctx context.Context, v interface{}) error {\n\tnode, err := e.EncodeToNodeContext(ctx, v)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to encode to node\")\n\t}\n\tif err := e.setCommentByCommentMap(node); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to set comment by comment map\")\n\t}\n\tif !e.written {\n\t\te.written = true\n\t} else {\n\t\t// write document separator\n\t\te.writer.Write([]byte(\"---\\n\"))\n\t}\n\tvar p printer.Printer\n\te.writer.Write(p.PrintNode(node))\n\treturn nil\n}",
"func (cw compressingWriter) Close() error {\n\tz := cw.WriteCloser.(*gzip.Writer)\n\terr := z.Flush()\n\tcw.p.Put(z)\n\treturn err\n}",
"func (cdp *Client) Context(ctx context.Context) *Client {\n\tctx, cancel := context.WithCancel(ctx)\n\tcdp.ctx = ctx\n\tcdp.ctxCancel = cancel\n\treturn cdp\n}",
"func WithCancel(ctx Context) (Context, context.CancelFunc) {\n\tstdCtx, cancel := context.WithCancel(ctx.StdContext())\n\treturn withStdCancel(ctx, stdCtx), cancel\n}",
"func wrapContext(ctx context.Context, adapter Adapter) contextWrapper {\n\treturn contextWrapper{\n\t\tctx: context.WithValue(ctx, ctxKey, adapter),\n\t\tadapter: adapter,\n\t}\n}",
"func WriteByteWithContext(ctx context.Context, p thrift.TProtocol, value int8, name string, field int16) error {\n\tif err := p.WriteFieldBegin(ctx, name, thrift.BYTE, field); err != nil {\n\t\treturn thrift.PrependError(\"write field begin error: \", err)\n\t}\n\tif err := p.WriteByte(ctx, value); err != nil {\n\t\treturn thrift.PrependError(\"field write error: \", err)\n\t}\n\tif err := p.WriteFieldEnd(ctx); err != nil {\n\t\treturn thrift.PrependError(\"write field end error: \", err)\n\t}\n\treturn nil\n}",
"func (ctx *Context) Close() error {\n\tif ctx == nil {\n\t\treturn nil\n\t}\n\n\treturn ctx.yum.Close()\n}",
"func (ctx *Context) Close() error {\n\tvar err error\n\tif ctx.Req != nil && ctx.Req.MultipartForm != nil {\n\t\terr = ctx.Req.MultipartForm.RemoveAll() // remove the temp files buffered to tmp directory\n\t}\n\t// TODO: close opened repo, and more\n\treturn err\n}",
"func (_obj *Apichannels) Channels_joinChannelWithContext(tarsCtx context.Context, params *TLchannels_joinChannel, _opt ...map[string]string) (ret Updates, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = params.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"channels_joinChannel\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = ret.ReadBlock(_is, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}",
"func (m *Macross) ReleaseContext(c *Context) {\n\tc.Response.Header.SetServer(\"Macross\")\n\tm.pool.Put(c)\n}",
"func WithContext(ctx context.Context) OptFn {\n\treturn func(o *Opt) {\n\t\to.ctx = ctx\n\t}\n}",
"func withInterrupt(parent context.Context) (_ context.Context, stop func()) {\n\tsig := make(chan os.Signal, 1)\n\tsignal.Notify(sig, interruptSignals()...)\n\tctx, cancel := context.WithCancel(parent)\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tselect {\n\t\tcase <-sig:\n\t\t\tcancel()\n\t\tcase <-done:\n\t\t}\n\t}()\n\treturn ctx, func() {\n\t\tcancel()\n\t\tsignal.Stop(sig)\n\t\tclose(done)\n\t}\n}",
"func SetContext(response http.ResponseWriter, ctx context.Context) http.ResponseWriter {\n\tif ca, ok := response.(ContextAware); ok {\n\t\tca.SetContext(ctx)\n\t\treturn response\n\t}\n\n\tif ctx == nil {\n\t\tpanic(\"nil context\")\n\t}\n\n\treturn &contextAwareResponseWriter{response, ctx}\n}",
"func (req *UpsertObjectRequest) Context(ctx context.Context) *UpsertObjectRequest {\n\treq.impl = req.impl.Context(ctx)\n\n\treturn req\n}",
"func (_obj *Apichannels) Channels_createChannelWithContext(tarsCtx context.Context, params *TLchannels_createChannel, _opt ...map[string]string) (ret Updates, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = params.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"channels_createChannel\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = ret.ReadBlock(_is, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}",
"func (_obj *Apichannels) Channels_getAdminLogWithContext(tarsCtx context.Context, params *TLchannels_getAdminLog, _opt ...map[string]string) (ret Channels_AdminLogResults, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = params.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"channels_getAdminLog\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = ret.ReadBlock(_is, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}",
"func Context(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// user service\n\t\tu := user.New(cognitoID, cognitoSecret)\n\t\tcontext.Set(r, \"userService\", u)\n\n\t\t// session helper\n\t\ts := session.New()\n\t\tcontext.Set(r, \"session\", s)\n\n\t\tvar netTransport = &http.Transport{\n\t\t\tDial: (&net.Dialer{\n\t\t\t\tTimeout: 5 * time.Second,\n\t\t\t}).Dial,\n\t\t\tTLSHandshakeTimeout: 5 * time.Second,\n\t\t}\n\n\t\t// support timeout and net transport.\n\t\tc := &http.Client{\n\t\t\tTimeout: time.Second * 10,\n\t\t\tTransport: netTransport,\n\t\t}\n\n\t\t// http client\n\t\tcontext.Set(r, \"client\", c)\n\n\t\tp := post.New(dynamoTablePosts, dynamoEndpoint, nil)\n\t\tcontext.Set(r, \"postService\", p)\n\n\t\tl := like.New(dynamoTableLikes, dynamoEndpoint, nil)\n\t\tcontext.Set(r, \"likeService\", l)\n\n\t\th.ServeHTTP(w, r)\n\t})\n}",
"func NewContextWith(data map[string]interface{}) *Context {\n\tc := &Context{\n\t\tContext: context.Background(),\n\t\tdata: data,\n\t\touter: nil,\n\t\tmoot: &sync.Mutex{},\n\t}\n\n\tfor k, v := range Helpers.helpers {\n\t\tif !c.Has(k) {\n\t\t\tc.Set(k, v)\n\t\t}\n\t}\n\n\treturn c\n}",
"func CrtlfWithContext(ctx context.Context, format string, args ...interface{}) {\n\tif hub := sentry.GetHubFromContext(ctx); hub != nil {\n\t\tcreticaldeps(hub.CaptureMessage, 3, format, args...)\n\t\treturn\n\t}\n\n\tcreticaldeps(sentry.CaptureMessage, 3, format, args...)\n}",
"func (cs *ContextualServerStream) Context() context.Context {\n\treturn cs.Ctx\n}",
"func CancelWhenClosed(parent context.Context, w http.ResponseWriter) (context.Context, func()) {\n\tctx, cancel := context.WithCancel(parent)\n\n\tclose := w.(http.CloseNotifier).CloseNotify()\n\n\t// listen for the connection to close, trigger cancelation\n\tgo func() {\n\t\tselect {\n\t\tcase <-close:\n\t\t\tcancel()\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\t}\n\t}()\n\n\treturn ctx, cancel\n}"
] | [
"0.7478201",
"0.59653234",
"0.59053546",
"0.5853121",
"0.5791737",
"0.57658553",
"0.57110214",
"0.5644903",
"0.5617057",
"0.55500585",
"0.54606736",
"0.5370619",
"0.52994496",
"0.5261682",
"0.5238762",
"0.5207119",
"0.52057135",
"0.5195372",
"0.51947",
"0.5191003",
"0.5137905",
"0.5120517",
"0.51183856",
"0.51183856",
"0.5107839",
"0.5091963",
"0.5086669",
"0.5077432",
"0.50702924",
"0.5052297",
"0.5046513",
"0.5024339",
"0.5022383",
"0.50117266",
"0.49957973",
"0.49881437",
"0.4986711",
"0.49835154",
"0.49749213",
"0.49740964",
"0.4971424",
"0.4967874",
"0.49621794",
"0.4952077",
"0.4936446",
"0.49345484",
"0.4930686",
"0.49250188",
"0.49213982",
"0.49011016",
"0.4895321",
"0.48948592",
"0.48907092",
"0.48896906",
"0.48891193",
"0.4868775",
"0.4866342",
"0.48525837",
"0.4848692",
"0.48461318",
"0.48359478",
"0.48349595",
"0.48284096",
"0.48284096",
"0.48269457",
"0.48261982",
"0.48213896",
"0.4821054",
"0.48166162",
"0.48147142",
"0.47924927",
"0.47902372",
"0.47721875",
"0.47642645",
"0.47596124",
"0.4758727",
"0.47571492",
"0.47510484",
"0.47469887",
"0.47348237",
"0.47222126",
"0.47116843",
"0.47116798",
"0.46986398",
"0.46961066",
"0.46952438",
"0.4694454",
"0.46823046",
"0.46797955",
"0.46720335",
"0.46670365",
"0.46628755",
"0.46591228",
"0.46515846",
"0.46474284",
"0.4643623",
"0.46408328",
"0.46376604",
"0.46329018",
"0.46250698"
] | 0.7699851 | 0 |
Close closes all resources and returns the result | func (c *closerWithContext) Close() error {
return c.WriteContextCloser.Close(c.ctx)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (c FinalOutput) Close() {}",
"func (cr *callResult) Close() error { return nil }",
"func (r *result) Close() error {\n\treturn r.reader.Close()\n}",
"func (r *Result) Close() error {\n\tif r == nil {\n\t\treturn nil\n\t}\n\treturn r.close(true)\n}",
"func (fr *FakeResult) Close() {\n}",
"func (i *Iterator) Close() {}",
"func (r *body) Close() error { return nil }",
"func (e *HTTPExecuter) Close() {}",
"func (r *rows) Close() error {\n\treturn nil\n}",
"func Close() {\n\tlog4go.Debug(\"resources destroy, pid:%v\", os.Getpid())\n\tfor name, r := range resources {\n\t\terr := r.Close()\n\t\tif err != nil {\n\t\t\tlog4go.Error(\"resources[%s] destroy failed:%s\", name, err.Error())\n\t\t} else {\n\t\t\tlog4go.Info(\"resources[%s] destroy finish\", name)\n\t\t}\n\t}\n}",
"func (iter *Iterator) Close() error { return iter.impl.Close() }",
"func (i *ResolvedProductIter) Close() {}",
"func (s IOStreams) Close() error {\n\t// TODO\n\treturn nil\n}",
"func (pr *newPartialResult) Close() error {\n\treturn nil\n}",
"func (qm MergeQuerier) Close() error {\n\treturn nil\n}",
"func (directory) Close() error { return nil }",
"func (r *Resource) Close() {\n\tr.Close()\n}",
"func (sr *shardResult) Close() {\n\tfor _, series := range sr.blocks {\n\t\tseries.Blocks.Close()\n\t}\n}",
"func (si *ScanIterator) Close() {\n\t// Cleanup\n}",
"func (it *KVResultsIter) Close() error {\n\tit.it.Close()\n\treturn nil\n}",
"func (c *resultsConn) Close() error {\n\treturn c.c.Close()\n}",
"func (c *cachestub) Close() {}",
"func (q Query) Close() error {\n\treturn q.result.Close()\n}",
"func Close() error {\n\treturn cleanup.close()\n}",
"func (f *fetcher) Close() error {\n\treturn f.conn.Close()\n}",
"func (it *BaseContentSpaceCreateLibraryIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (rd *RemoteDB) Close() {\n}",
"func Close() {\n\t//Nothing to do\n}",
"func (it *BaseLibraryContentObjectCreatedIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (ms *memoryStorer) Close() error {\n\treturn nil\n}",
"func (r RowsImpl) Close() error {\n\treturn r.R.Close()\n}",
"func (e *AnalyzeExec) Close() error {\n\treturn nil\n}",
"func (r *DiscoveryResolver) Close() {\n}",
"func (rows *Rows) Close() error {\n\treturn nil\n}",
"func Close() {\n}",
"func (fn Closer) Close() error {\n\treturn fn()\n}",
"func (it *CakevaultHarvestIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (rs *PollResultSet) Close() error {\n\treturn rs.ResultSet.Close()\n}",
"func (ec *Encrypter) Close() {}",
"func (c *cur) Close() error {\n\tc.logger.Debug(\"Close().\")\n\terr := c.it.Close()\n\tc.it = nil\n\treturn err\n}",
"func Close() {\n\timpl.Close()\n}",
"func (ctx *ResourceContext) SafeClose() {\n}",
"func (it *AggregatorV2V3InterfaceNewRoundIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (iter *PlanIterator) Close() error {\n\treturn iter.src.Close()\n}",
"func (it *IOrakuruCoreFulfilledIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (c *client) Close() error { return c.c.Close() }",
"func (it *BaseContentContentObjectCreateIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (r *analyticsDeferredResultHandle) Close() error {\n\tr.rows.Close()\n\treturn r.err\n}",
"func (k *Khaiii) Close() {\n\tif k.firstWord != nil {\n\t\tk.FreeAnalyzeResult()\n\t}\n\tC.khaiii_close(k.handle)\n}",
"func (it *HarbergerScriptResultIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (r *rows) Close() {\n\tr.rows.Close()\n}",
"func (it *BREMFactoryBREMICOCreatedIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (it *RandomBeaconDkgMaliciousResultSlashedIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (it *RandomBeaconDkgResultApprovedIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (it *BaseContentSpaceCreateContentIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (it *BaseContentSpaceSetFactoryIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func Close() {\n\tpool.Close()\n}",
"func (it *ContentRunCreateIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (c jsonc) Close() error { return c.wc.Close() }",
"func (*Item) Close() error { return nil }",
"func (it *AggregatorNewRoundIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (s *Iterator) Close() {\n\ts.i.Close()\n}",
"func (it *Iterator) Close() {\n\tit.iitr.Close()\n}",
"func (nopCloser) Close() error { return nil }",
"func (h *DBHandle) Close() error {\n\treturn h.res.Close()\n}",
"func (it *HarbergerMetaURIIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (it *RandomBeaconDkgResultChallengedIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (c *Cache) Close() {\n\tc.fh.Close()\n}",
"func (it *RandomBeaconDkgMaliciousResultSlashingFailedIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (h *HTTPTester) Close() error {\n\treturn nil\n}",
"func (trans *Transcoder) Close() (err error) {\n\tfor _, stream := range trans.streams {\n\t\tif stream.aenc != nil {\n\t\t\tstream.aenc.Close()\n\t\t\tstream.aenc = nil\n\t\t}\n\t\tif stream.adec != nil {\n\t\t\tstream.adec.Close()\n\t\t\tstream.adec = nil\n\t\t}\n\t}\n\ttrans.streams = nil\n\treturn\n}",
"func (p *bytesViewer) Close() error { return nil }",
"func (it *BaseContentSetStatusCodeIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (self *Transcoder) Close() (err error) {\n\tfor _, stream := range self.streams {\n\t\tif stream.aenc != nil {\n\t\t\tstream.aenc.Close()\n\t\t\tstream.aenc = nil\n\t\t}\n\t\tif stream.adec != nil {\n\t\t\tstream.adec.Close()\n\t\t\tstream.adec = nil\n\t\t}\n\t}\n\tself.streams = nil\n\treturn\n}",
"func (c *Conn) Close() error { return nil }",
"func (self *FileBaseDataStore) Close() {}",
"func (it *SmartchefOwnershipTransferredIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (rs *PollOptionResultSet) Close() error {\n\treturn rs.ResultSet.Close()\n}",
"func (it *BaseLibraryApproveContentIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (r *Reader) Close() error {\n\tvar err error\n\tfor i, n := 0, r.NumR(); i < n; i++ {\n\t\tvar _err error\n\t\tterm := termReader(r.R(i))\n\t\tif term != nil {\n\t\t\t_err = term()\n\t\t}\n\t\tif err == nil && _err != nil {\n\t\t\terr = _err\n\t\t}\n\t}\n\treturn err\n}",
"func (it *RegistryIssuanceSingleIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (r *reader) Close() error {\n\tif r.lz4Stream != nil {\n\t\tC.LZ4_freeStreamDecode(r.lz4Stream)\n\t\tr.lz4Stream = nil\n\t}\n\n\tC.free(r.left)\n\tC.free(r.right)\n\treturn nil\n}",
"func (it *BaseContentReturnCustomHookIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (it *SimpleMultiSigExecuteIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (r *filter) Close() error {\n\treturn r.input.Close()\n}",
"func (it *BaseContentGetAccessChargeIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (b *Batch) Close() {\n}",
"func (d *downloader) Close() error {\n\tif d.Reader != nil {\n\t\treturn d.Reader.Close()\n\t}\n\treturn nil\n}",
"func (it *RandomBeaconDkgResultSubmittedIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (it *RegistryURIIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (*azblobObjectReader) Close() error {\n\treturn nil\n}",
"func (neverCache) Close() error {\n\treturn nil\n}",
"func (it *CrowdsaleOwnershipTransferredIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (it *Iter) Close() {\n\t// (todo) > handle error\n\tit.i.Close()\n}",
"func (e *IndexReaderExecutor) Close() error {\n\terr := closeAll(e.result, e.partialResult)\n\te.result = nil\n\te.partialResult = nil\n\treturn errors.Trace(err)\n}",
"func (r *reader) Close() error {\n\treturn r.body.Close()\n}",
"func (e *NestedLoopJoinExec) Close() error {\n\te.resultRows = nil\n\te.innerRows = nil\n\treturn e.BigExec.Close()\n}",
"func (r *recorders) close() {\n\tr.Lock()\n\tdefer r.Unlock()\n\n\tfor _, re := range r.list {\n\t\tre.Close()\n\t}\n}",
"func (it *BaseLibraryVersionDeleteIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (mio *Mio) Close() error {\n if mio.obj == nil {\n return errors.New(\"object is not opened\")\n }\n C.m0_obj_fini(mio.obj)\n C.free(unsafe.Pointer(mio.obj))\n mio.obj = nil\n\n return nil\n}",
"func (r ResourceConn) Close() {\n\tr.Conn.Close()\n}"
] | [
"0.68667895",
"0.6778104",
"0.67122173",
"0.6589011",
"0.6545102",
"0.6529692",
"0.6523978",
"0.6461263",
"0.63559073",
"0.6354894",
"0.63269347",
"0.63106054",
"0.6306415",
"0.63030994",
"0.627391",
"0.6237731",
"0.61970115",
"0.6179155",
"0.616494",
"0.61487216",
"0.61434025",
"0.6120729",
"0.61121964",
"0.6061707",
"0.60172725",
"0.6001971",
"0.5974533",
"0.5972011",
"0.595983",
"0.5958085",
"0.5957267",
"0.5956933",
"0.5950135",
"0.5943595",
"0.59308493",
"0.59264",
"0.59260577",
"0.592261",
"0.5916945",
"0.59051216",
"0.5904272",
"0.58947366",
"0.589369",
"0.5891852",
"0.5883829",
"0.5872726",
"0.5872317",
"0.58720994",
"0.58699787",
"0.58608043",
"0.58595324",
"0.5858244",
"0.5838286",
"0.5835777",
"0.5833016",
"0.58315355",
"0.5831236",
"0.5829613",
"0.5829042",
"0.5819083",
"0.58185375",
"0.58167297",
"0.58141583",
"0.58135915",
"0.58120894",
"0.5807715",
"0.5805347",
"0.5802617",
"0.57928926",
"0.579287",
"0.5792228",
"0.57832533",
"0.5777395",
"0.577714",
"0.5762714",
"0.5760198",
"0.57596123",
"0.5758977",
"0.57470137",
"0.5746289",
"0.5742921",
"0.57405776",
"0.57391876",
"0.5737442",
"0.573563",
"0.57352614",
"0.5731621",
"0.5726211",
"0.57197464",
"0.57193285",
"0.5717681",
"0.5716076",
"0.5711017",
"0.5705238",
"0.57049453",
"0.57029426",
"0.56893635",
"0.5687688",
"0.5685339",
"0.5684745",
"0.5681564"
] | 0.0 | -1 |
NilCloser returns closer if it's not nil otherwise returns a nop closer | func NilCloser(r io.Closer) io.Closer {
if r == nil {
return &nilCloser{}
}
return r
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (nopCloser) Close() error { return nil }",
"func (n *nilCloser) Close() error {\n\t// works even if n is nil\n\treturn nil\n}",
"func NopCloser() error { return nil }",
"func NoCloser(in io.Reader) io.Reader {\n\tif in == nil {\n\t\treturn in\n\t}\n\t// if in doesn't implement io.Closer, just return it\n\tif _, canClose := in.(io.Closer); !canClose {\n\t\treturn in\n\t}\n\treturn noClose{in: in}\n}",
"func NopCloser() io.Closer {\r\n\treturn &nopCloser{}\r\n}",
"func NopCloser(bio Biome) BiomeCloser {\n\treturn nopCloser{bio}\n}",
"func NopReadSeekerCloser(r io.ReadSeeker) ReadSeekerCloser {\n\treturn readSeekerCloser{r, func() error { return nil }}\n}",
"func Wrap(closeFunc func() error) io.Closer {\n\treturn simpleCloser{\n\t\tcloseFunc: closeFunc,\n\t}\n}",
"func NopCloser(std agent.ExtendedAgent) Agent {\n\treturn nopCloser{std}\n}",
"func NoCloseReader(r io.Reader) io.Reader {\n _, ok := r.(io.Closer)\n if ok {\n return readerWrapper{r}\n }\n return r\n}",
"func NopWriteCloser(r io.Writer) io.WriteCloser {\n\treturn nopWriteCloser{r}\n}",
"func NopWriteCloser(w io.Writer) io.WriteCloser {\n\treturn &nopWriteCloser{w}\n}",
"func NopCloser(r xml.TokenReader) TokenReadCloser {\n\treturn nopCloser{r}\n}",
"func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser {\n\treturn exported.NopCloser(rs)\n}",
"func NopWriteCloser(w io.Writer) io.WriteCloser {\n\treturn errWriteCloser{Writer: w, CloseErr: nil}\n}",
"func (b *bufCloser) Close() error { return nil }",
"func (g *Group) popCloser() (closer func() error) {\n\tg.mutex.Lock()\n\tdefer g.mutex.Unlock()\n\ti := len(g.closers) - 1\n\tif i >= 0 {\n\t\tcloser = g.closers[i]\n\t\tg.closers = g.closers[:i]\n\t}\n\treturn\n}",
"func closeIgnore(closer io.Closer) {\n\t_ = closer.Close()\n}",
"func NewCloser() *Closer {\n\treturn &Closer{\n\t\tm: &sync.Mutex{},\n\t\to: &sync.Once{},\n\t}\n}",
"func TestExactReadCloserShort(t *testing.T) {\n\tbuf := bytes.NewBuffer(make([]byte, 5))\n\trc := NewExactReadCloser(&readerNilCloser{buf}, 10)\n\tif _, err := rc.Read(make([]byte, 10)); err != nil {\n\t\tt.Fatalf(\"Read expected nil err, got %v\", err)\n\t}\n\tif err := rc.Close(); err != ErrShortRead {\n\t\tt.Fatalf(\"Close expected %v, got %v\", ErrShortRead, err)\n\t}\n}",
"func NewCloser(initial int) *Closer {\n\tret := &Closer{}\n\tret.ctx, ret.cancel = context.WithCancel(context.Background())\n\tret.waiting.Add(initial)\n\treturn ret\n}",
"func TryClose(maybeClosers ...interface{}) {\n\tfor _, maybeCloser := range maybeClosers {\n\t\tif closer, ok := maybeCloser.(io.Closer); ok {\n\t\t\t_ = closer.Close()\n\t\t}\n\t}\n}",
"func mustClose(c io.Closer) {\n\tif err := c.Close(); err != nil {\n\t\tpanic(err)\n\t}\n}",
"func SafeClose(c io.Closer) {\n\tif c != nil {\n\t\terr := c.Close()\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t}\n}",
"func (*writeCloser) Close() error {\n\treturn nil\n}",
"func ReadCloserClose(rc *zip.ReadCloser,) error",
"func (mock WriteCloser) fakeZeroClose() error {\n\tvar (\n\t\tr0 error\n\t)\n\treturn r0\n}",
"func (v *ReadCloserValue) Freeze() {}",
"func (c *carver) newWriterCloser(fp string) (io.WriteCloser, error) {\n\tif c.dryRun {\n\t\treturn noopCloser{w: io.Discard}, nil\n\t}\n\tif c.w != nil {\n\t\treturn noopCloser{w: c.w}, nil\n\t}\n\treturn os.Create(fp)\n}",
"func (f *FakeWriteCloser) Close() error {\n\treturn nil\n}",
"func (dr *NullReader) Close() error {\n\tif dr.r == nil {\n\t\treturn fmt.Errorf(\"not started\")\n\t}\n\treturn dr.r.Close()\n}",
"func ensureReaderClosed(r io.ReadCloser) error {\n\t_, err := io.Copy(ioutil.Discard, r)\n\tif closeErr := r.Close(); closeErr != nil && err == nil {\n\t\terr = closeErr\n\t}\n\treturn err\n}",
"func WriteCloserDaisy(inp <-chan io.WriteCloser, tube WriteCloserTube) (out <-chan io.WriteCloser) {\n\tcha := make(chan io.WriteCloser)\n\tgo tube(inp, cha)\n\treturn cha\n}",
"func NoCloseRows(r Rows) Rows {\n _, ok := r.(io.Closer)\n if ok {\n return rowsWrapper{r}\n }\n return r\n}",
"func NewReadSeekerCloser(t mockConstructorTestingTNewReadSeekerCloser) *ReadSeekerCloser {\n\tmock := &ReadSeekerCloser{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func (p *IdlePool) Get() io.Closer {\n\tp.Lock()\n\tdefer p.Unlock()\n\tfor i, c := range p.elems {\n\t\tif c == nil {\n\t\t\tcontinue\n\t\t}\n\t\tp.elems[i] = nil\n\t\treturn c\n\t}\n\treturn nil\n}",
"func TestExactReadCloserExpectEOF(t *testing.T) {\n\tbuf := bytes.NewBuffer(make([]byte, 10))\n\trc := NewExactReadCloser(&readerNilCloser{buf}, 1)\n\tif _, err := rc.Read(make([]byte, 10)); err != ErrExpectEOF {\n\t\tt.Fatalf(\"expected %v, got %v\", ErrExpectEOF, err)\n\t}\n}",
"func (o *ODirectReader) Close() error {\n\tif o.bufp != nil {\n\t\tif o.SmallFile {\n\t\t\tODirectPoolSmall.Put(o.bufp)\n\t\t} else {\n\t\t\tODirectPoolLarge.Put(o.bufp)\n\t\t}\n\t\to.bufp = nil\n\t\to.buf = nil\n\t}\n\to.err = errors.New(\"internal error: ODirectReader Read after Close\")\n\treturn o.File.Close()\n}",
"func (s *Stopper) AddCloser(c Closer) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tif s.refuseRLocked() {\n\t\tc.Close()\n\t\treturn\n\t}\n\ts.mu.closers = append(s.mu.closers, c)\n}",
"func (s *SeekerWrapper) Close() error { return s.s.Close() }",
"func (c *refCountedCloser) Close(ctx context.Context) error {\n\tremaining := c.refCount.Add(-1)\n\n\tif remaining != 0 {\n\t\treturn nil\n\t}\n\n\tif c.closed.Load() {\n\t\tpanic(\"already closed\")\n\t}\n\n\tc.closed.Store(true)\n\n\tvar errors []error\n\n\tfor _, closer := range c.closers {\n\t\terrors = append(errors, closer(ctx))\n\t}\n\n\t//nolint:wrapcheck\n\treturn multierr.Combine(errors...)\n}",
"func newNullableTicker(d time.Duration) (<-chan time.Time, func()) {\n\tif d > 0 {\n\t\tt := time.NewTicker(d)\n\t\treturn t.C, t.Stop\n\t}\n\treturn nil, func() {}\n}",
"func CloseTheCloser(c io.Closer) {\n\t_ = c.Close()\n}",
"func ensureReaderClosed(stream io.ReadCloser) {\n\tif stream == nil {\n\t\treturn\n\t}\n\tio.Copy(ioutil.Discard, stream)\n\tstream.Close()\n}",
"func (n *NoOP) Close() {}",
"func checkClose(c io.Closer, err *error) {\n\tcerr := c.Close()\n\tif *err == nil {\n\t\t*err = cerr\n\t}\n}",
"func (closer *Closer) CloseChannel() chan struct{} {\n\treturn closer.channel\n}",
"func (hcwc hcWriteCloser) Close() error {\n\treturn nil\n}",
"func (_e *ReadSeekerCloser_Expecter) Close() *ReadSeekerCloser_Close_Call {\n\treturn &ReadSeekerCloser_Close_Call{Call: _e.mock.On(\"Close\")}\n}",
"func (mock WriteCloser) Close() error {\n\tmethodName := \"Close\" // nolint: goconst\n\tif mock.impl.Close != nil {\n\t\treturn mock.impl.Close()\n\t}\n\tif mock.callbackNotImplemented != nil {\n\t\tmock.callbackNotImplemented(mock.t, mock.name, methodName)\n\t} else {\n\t\tgomic.DefaultCallbackNotImplemented(mock.t, mock.name, methodName)\n\t}\n\treturn mock.fakeZeroClose()\n}",
"func WrapCancel(cancel context.CancelFunc) io.Closer {\n\treturn Wrap(func() error {\n\t\tcancel()\n\t\treturn nil\n\t})\n}",
"func Close(closer io.Closer, log log.Logger) {\n\tif err := closer.Close(); err != nil {\n\t\tlog.Crit(\"Failed to Close Object: %#v\\n Error: %s \", err.Error())\n\t}\n}",
"func ReadEOFCloser(r io.Reader) io.Reader {\n\treturn readEOFCloser{\n\t\tReader: r,\n\t}\n}",
"func WithCloser(closer io.Closer) OptionFunc {\n\treturn func(c *Config) error {\n\t\tc.closers = append(c.closers, closer)\n\t\treturn nil\n\t}\n}",
"func CloseAndIgnore(c io.Closer) {\n\t_ = c.Close()\n}",
"func NopCloserWithSize(r io.Reader) io.ReadCloser {\n\treturn nopCloserWithObjectSize{r}\n}",
"func (d *Decoder) IOReadCloser() io.ReadCloser {\n\treturn closeWrapper{d: d}\n}",
"func CloseQuietly(v io.Closer) {\n\t_ = v.Close()\n}",
"func Close(o io.Closer) {\n\t_ = o.Close()\n}",
"func NewWriteCloser(t mockConstructorTestingTNewWriteCloser) *WriteCloser {\n\tmock := &WriteCloser{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func (closer *Closer) Close() {\n\tclose(closer.channel)\n}",
"func WithCloser(f func() error) Option {\n\treturn func(e *environment) {\n\t\tif f == nil {\n\t\t\te.closer = NopCloser\n\t\t} else {\n\t\t\te.closer = f\n\t\t}\n\t}\n}",
"func (g *Group) AddCloser(closer func() error) {\n\tg.mutex.Lock()\n\tdefer g.mutex.Unlock()\n\tg.closers = append(g.closers, closer)\n}",
"func (d *dht) getCloser(k types.PublicKey) []dhtEntry {\n\tresults := append([]dhtEntry{}, d.sorted...)\n\tsort.SliceStable(results, func(i, j int) bool {\n\t\treturn util.DHTOrdered(d.r.public, results[i].PublicKey(), results[j].PublicKey())\n\t})\n\treturn results\n}",
"func New() Closer {\n\treturn &closer{ch: make(chan struct{})}\n}",
"func LoggedCloser(closer io.Closer) {\n\tif err := closer.Close(); err != nil {\n\t\tlogger.Error(\"error while closing: %s\", err)\n\t}\n}",
"func (s *Stopper) AddCloser(c Closer) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tselect {\n\tcase <-s.stopper:\n\t\t// Close immediately.\n\t\tc.Close()\n\tdefault:\n\t\ts.mu.closers = append(s.mu.closers, c)\n\t}\n}",
"func NewCloserInit(route wire.Route, ctx common.Context, socket core.DataSocket) func(machine.WorkerSocket, []interface{}) {\n\tlogger := common.FormatLogger(ctx.Logger(), route)\n\n\treturn func(worker machine.WorkerSocket, args []interface{}) {\n\t\tlogger.Info(\"Closing init\")\n\t\tif err := closeInit(route, ctx, socket.Rx(), socket.Tx()); err != nil {\n\t\t\tlogger.Error(err.Error())\n\t\t\tworker.Fail(err)\n\t\t}\n\n\t\tworker.Terminate()\n\t}\n}",
"func (mrc *MockReadCloser) Close() error {\n\tmrc.closed = true\n\treturn nil\n}",
"func WriteCloserDaisyChain(inp <-chan io.WriteCloser, tubes ...WriteCloserTube) (out <-chan io.WriteCloser) {\n\tcha := inp\n\tfor i := range tubes {\n\t\tcha = WriteCloserDaisy(cha, tubes[i])\n\t}\n\treturn cha\n}",
"func drain(r io.ReadCloser) {\n\tgo func() {\n\t\t// Panicking here does not put data in\n\t\t// an inconsistent state.\n\t\tdefer func() {\n\t\t\t_ = recover()\n\t\t}()\n\n\t\t_, _ = io.Copy(io.Discard, r)\n\t\tr.Close()\n\t}()\n}",
"func (r *mockedReader) Close() error {\n\tr.c = true\n\treturn nil\n}",
"func (c *NOOPConnection) Close() {\n}",
"func Closed(c io.Closer) {\n\tif c != nil {\n\t\t_ = c.Close()\n\t}\n}",
"func (r *Reader) Close() error {\n\tif closer, ok := r.Reader.(io.Closer); ok {\n\t\treturn closer.Close()\n\t}\n\treturn nil\n}",
"func NewMockCloser(ctrl *gomock.Controller) *MockCloser {\n\tmock := &MockCloser{ctrl: ctrl}\n\tmock.recorder = &MockCloserMockRecorder{mock}\n\treturn mock\n}",
"func (r *ThrottledReadCloser) Close() error {\n\tr.pool.mu.Lock()\n\tdefer r.pool.mu.Unlock()\n\tdelete(r.pool.connections, r.id)\n\tr.pool.updateBufferSize()\n\treturn r.origReadCloser.Close()\n}",
"func (f CloserFn) Close() {\n\tf()\n}",
"func (f CloserFn) Close() {\n\tf()\n}",
"func (this *reader) Close() (err error) {\n\tif ioCloser, ok := this.ioReader.(io.Closer); ok {\n\t\terr = ioCloser.Close()\n\t}\n\treturn\n}",
"func NewWriteCloser(t *testing.T, cb gomic.CallbackNotImplemented) *WriteCloser {\n\treturn &WriteCloser{\n\t\tt: t, name: \"WriteCloser\", callbackNotImplemented: cb}\n}",
"func (f *FileBlob) ReadCloser() (io.ReadCloser, error) {\n\tif f.blob == nil {\n\t\treturn nil, fmt.Errorf(\"underlying blob ([]byte) is nil\")\n\t}\n\treturn blob.NewBufferedReadCloser(f.blob), nil\n}",
"func (r *Remoter) Close() error {\n\tr.closer = nil\n\tif r.clt != nil {\n\t\treturn r.clt.Close()\n\t}\n\treturn nil\n}",
"func NewReadSeekerCloser(r io.ReadSeeker, c CloseFunc) ReadSeekerCloser {\n\treturn readSeekerCloser{r, c}\n}",
"func (_e *WriteCloser_Expecter) Close() *WriteCloser_Close_Call {\n\treturn &WriteCloser_Close_Call{Call: _e.mock.On(\"Close\")}\n}",
"func (c *Closer) Close() error {\n\tc.CloseAll()\n\treturn nil\n}",
"func NewReadCloser(rd io.ReadCloser) io.ReadCloser {\n\tif rd == nil {\n\t\treturn nil\n\t}\n\n\tret, err := NewReadCloserSize(rd, DefaultBuffers, DefaultBufferSize)\n\n\t// Should not be possible to trigger from other packages.\n\tif err != nil {\n\t\tpanic(\"unexpected error:\" + err.Error())\n\t}\n\treturn ret\n}",
"func Close(closer io.Closer) {\n\tdefer func() {\n\t\tif p := recover(); p != nil {\n\t\t\tlog.Panic(p)\n\t\t}\n\t}()\n\tif err := closer.Close(); err != nil {\n\t\tlog.Error(err)\n\t}\n}",
"func (a *reader) Close() (err error) {\n\tselect {\n\tcase <-a.exited:\n\tcase a.exit <- struct{}{}:\n\t\t<-a.exited\n\t}\n\tif a.closer != nil {\n\t\t// Only call once\n\t\tc := a.closer\n\t\ta.closer = nil\n\t\treturn c.Close()\n\t}\n\ta.err = errors.New(\"readahead: read after Close\")\n\treturn nil\n}",
"func (er *EventReader[T]) Close() {\n\tif closer, ok := er.reader.(io.Closer); ok {\n\t\tcloser.Close()\n\t}\n}",
"func IgnoreClose(cr io.Closer) {\n\terr := cr.Close()\n\tIgnoreError(err)\n}",
"func Cl(c ...Arg) *Closer {\n\treturn &Closer{c}\n}",
"func Close(obj interface{}) {\n\tif obj == nil {\n\t\treturn\n\t}\n\tif c, ok := obj.(Closer); ok {\n\t\tc.Close()\n\t}\n}",
"func (c *Closer) Close() (err error) {\n\tc.o.Do(func() {\n\t\t// Get close funcs\n\t\tc.m.Lock()\n\t\tfs := append([]CloseFunc{}, c.fs...)\n\t\tc.m.Unlock()\n\n\t\t// Loop through closers\n\t\tvar errs []error\n\t\tfor _, f := range fs {\n\t\t\tif errC := f(); errC != nil {\n\t\t\t\terrs = append(errs, errC)\n\t\t\t}\n\t\t}\n\n\t\t// Process errors\n\t\tif len(errs) == 1 {\n\t\t\terr = errs[0]\n\t\t} else if len(errs) > 1 {\n\t\t\terr = astierror.NewMultiple(errs)\n\t\t}\n\t})\n\treturn\n}",
"func (r *ThrottledWriteCloser) Close() error {\n\tr.pool.mu.Lock()\n\tdefer r.pool.mu.Unlock()\n\tdelete(r.pool.connections, r.id)\n\tr.pool.updateBufferSize()\n\treturn r.origWriteCloser.Close()\n}",
"func MultiCloser(closers ...io.Closer) io.Closer {\n\treturn &multiCloser{\n\t\tclosers: closers,\n\t}\n}",
"func (w *RWWrapper) Close() {\n\tif w.gz != nil {\n\t\tw.gz.Close()\n\t}\n}",
"func NewReadCloser(r io.Reader, c CloseFunc) io.ReadCloser {\n\treturn readCloser{r, c}\n}",
"func (f *FakeReadCloser) Close() error {\n\tf.CloseCalled = true\n\treturn f.CloseError\n}",
"func newFuncCloser(fn func() error) *funcCloser {\n\treturn &funcCloser{\n\t\tfn: fn,\n\t}\n}"
] | [
"0.7222569",
"0.69184256",
"0.68131655",
"0.65612435",
"0.65169245",
"0.608959",
"0.59361756",
"0.59183574",
"0.5906258",
"0.5872431",
"0.57893544",
"0.57882726",
"0.5740676",
"0.5678546",
"0.56239897",
"0.5589913",
"0.55773497",
"0.5547098",
"0.54516846",
"0.54490966",
"0.54453087",
"0.53478074",
"0.53134245",
"0.5238412",
"0.5234843",
"0.5225153",
"0.5168599",
"0.5156914",
"0.5156862",
"0.50839907",
"0.50511545",
"0.501426",
"0.4993796",
"0.49704328",
"0.49326342",
"0.4929904",
"0.49295527",
"0.49192756",
"0.4908235",
"0.49029765",
"0.4890587",
"0.48879722",
"0.4883035",
"0.48813483",
"0.48772123",
"0.48691502",
"0.48554662",
"0.48538432",
"0.48369205",
"0.47948813",
"0.4793564",
"0.478939",
"0.4774067",
"0.47676587",
"0.47608978",
"0.4748314",
"0.47325483",
"0.4731918",
"0.47242805",
"0.46782437",
"0.465498",
"0.46545625",
"0.46501246",
"0.4629458",
"0.46081457",
"0.46073243",
"0.46062264",
"0.45943567",
"0.45672476",
"0.4563887",
"0.4562749",
"0.45618916",
"0.4560796",
"0.45587486",
"0.4551951",
"0.45517477",
"0.4542441",
"0.4527389",
"0.4527389",
"0.45238248",
"0.4498847",
"0.44986692",
"0.44848832",
"0.44810283",
"0.44794363",
"0.44773722",
"0.44711778",
"0.44709802",
"0.44664547",
"0.44648725",
"0.44577488",
"0.44521204",
"0.4443298",
"0.4436248",
"0.44332758",
"0.44323808",
"0.44323087",
"0.4432212",
"0.44299203",
"0.44264048"
] | 0.7534535 | 0 |
NopWriteCloser returns a WriteCloser with a noop Close method wrapping the provided Writer w | func NopWriteCloser(r io.Writer) io.WriteCloser {
return nopWriteCloser{r}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func NopWriteCloser(w io.Writer) io.WriteCloser {\n\treturn &nopWriteCloser{w}\n}",
"func NopWriteCloser(w io.Writer) io.WriteCloser {\n\treturn errWriteCloser{Writer: w, CloseErr: nil}\n}",
"func NopCloser() error { return nil }",
"func NopCloser() io.Closer {\r\n\treturn &nopCloser{}\r\n}",
"func NopFlusher(w Writer) WriteFlusher {\n\treturn nopFlusher{w}\n}",
"func NopCloser(std agent.ExtendedAgent) Agent {\n\treturn nopCloser{std}\n}",
"func NewWriteCloser(t *testing.T, cb gomic.CallbackNotImplemented) *WriteCloser {\n\treturn &WriteCloser{\n\t\tt: t, name: \"WriteCloser\", callbackNotImplemented: cb}\n}",
"func NopCloser(r xml.TokenReader) TokenReadCloser {\n\treturn nopCloser{r}\n}",
"func NewWriteCloser(t mockConstructorTestingTNewWriteCloser) *WriteCloser {\n\tmock := &WriteCloser{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func NopCloser(bio Biome) BiomeCloser {\n\treturn nopCloser{bio}\n}",
"func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser {\n\treturn exported.NopCloser(rs)\n}",
"func NewMockWriteCloser(t *testing.T) *MockWriteCloser {\n\treturn &MockWriteCloser{\n\t\tb: bytes.Buffer{},\n\t\tclosed: false,\n\t\tt: t,\n\t}\n}",
"func wrapWriter(w http.ResponseWriter) writerProxy {\n\tvar _, cn = w.(http.CloseNotifier) // nolint\n\tvar _, fl = w.(http.Flusher)\n\tvar _, hj = w.(http.Hijacker)\n\tvar _, rf = w.(io.ReaderFrom)\n\n\tvar bw = basicWriter{ResponseWriter: w}\n\tif cn && fl && hj && rf {\n\t\treturn &fancyWriter{&bw}\n\t}\n\tif fl {\n\t\treturn &flushWriter{&bw}\n\t}\n\treturn &bw\n}",
"func (c *carver) newWriterCloser(fp string) (io.WriteCloser, error) {\n\tif c.dryRun {\n\t\treturn noopCloser{w: io.Discard}, nil\n\t}\n\tif c.w != nil {\n\t\treturn noopCloser{w: c.w}, nil\n\t}\n\treturn os.Create(fp)\n}",
"func NopReadSeekerCloser(r io.ReadSeeker) ReadSeekerCloser {\n\treturn readSeekerCloser{r, func() error { return nil }}\n}",
"func (nopCloser) Close() error { return nil }",
"func (f *FakeWriteCloser) Close() error {\n\treturn nil\n}",
"func newFlushWriter(w io.Writer) flushWriter {\n\tfw := flushWriter{writer: w}\n\tif f, ok := w.(http.Flusher); ok {\n\t\tfw.flusher = f\n\t}\n\n\treturn fw\n}",
"func NoCloser(in io.Reader) io.Reader {\n\tif in == nil {\n\t\treturn in\n\t}\n\t// if in doesn't implement io.Closer, just return it\n\tif _, canClose := in.(io.Closer); !canClose {\n\t\treturn in\n\t}\n\treturn noClose{in: in}\n}",
"func NewWrappedWriter(writer store.Writer, onFinalize FinalizeFunc) *WrappedWriter {\n\treturn &WrappedWriter{writer: writer, onFinalize: onFinalize}\n}",
"func (*writeCloser) Close() error {\n\treturn nil\n}",
"func newLockingWriteCloser(wc io.WriteCloser) io.WriteCloser {\n\treturn &lockingWriteCloser{WriteCloser: wc}\n}",
"func NewWrappedWriter(w io.Writer) (*WrappedWriter, error) {\n\treturn &WrappedWriter{wrapped: w, bw: bufio.NewWriterSize(w, 1024*1024)}, nil\n}",
"func Wrap(closeFunc func() error) io.Closer {\n\treturn simpleCloser{\n\t\tcloseFunc: closeFunc,\n\t}\n}",
"func (rwc *noPIReadWriteCloser) Write(p []byte) (n int, err error) {\n\tcopy(rwc.wBuffer[4:], p)\n\tn, err = rwc.ReadWriteCloser.Write(rwc.wBuffer[:len(p)+4])\n\treturn n - 4, err\n}",
"func (w *Writer) Close() error {}",
"func ToWriteCloser(w io.WriteCloser) Dest {\n\treturn func() (io.WriteCloser, error) {\n\t\treturn w, nil\n\t}\n}",
"func NoCloseReader(r io.Reader) io.Reader {\n _, ok := r.(io.Closer)\n if ok {\n return readerWrapper{r}\n }\n return r\n}",
"func (w *Writer) Bypass() io.Writer {\n\treturn &bypass{writer: w}\n}",
"func NewLimitedWriter(w io.WriteCloser, options ...LimitedWriterOption) io.WriteCloser {\n\tfor _, o := range options {\n\t\tw = o(w)\n\t}\n\n\treturn NewSyncedWriteCloser(w)\n}",
"func wrapWriter(w http.ResponseWriter) writerProxy {\n\tbw := basicWriter{ResponseWriter: w}\n\treturn &bw\n}",
"func (nw noopWriter) Write(b []byte) (int, error) {\n\treturn len(b), nil\n}",
"func NopCloserWithSize(r io.Reader) io.ReadCloser {\n\treturn nopCloserWithObjectSize{r}\n}",
"func NewWriter(w io.Writer) io.WriteCloser {\n\treturn NewWriterSizeLevel(w, -1, DefaultCompression)\n}",
"func (w *DiscardWriter) Close() error {\n\tw.Writer = nil\n\treturn nil\n}",
"func NewWriteCloser(store Store, key string, ttl int) io.WriteCloser {\n\treturn &writerCloser{\n\t\tstore: store,\n\t\tkey: key,\n\t\tttl: ttl,\n\t}\n}",
"func (hcwc hcWriteCloser) Close() error {\n\treturn nil\n}",
"func NewChunkedWriter(w io.Writer) io.WriteCloser",
"func Base(w io.Writer) io.Writer {\n\tif d, ok := w.(decorator); ok {\n\t\treturn coalesceWriters(d.Base(), w)\n\t}\n\treturn w\n}",
"func newResponseWriterNoBody(w http.ResponseWriter) *responseWriterNoBody {\n\treturn &responseWriterNoBody{w}\n}",
"func Must(w io.WriteCloser, err error) io.WriteCloser {\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"could not create revolving log writer, %v\", err))\n\t}\n\treturn w\n}",
"func NilCloser(r io.Closer) io.Closer {\n\tif r == nil {\n\t\treturn &nilCloser{}\n\t}\n\treturn r\n}",
"func Underlying(w io.Writer) io.Writer {\n\tif u, ok := w.(WrappedWriter); ok {\n\t\treturn Underlying(u.Underlying())\n\t}\n\treturn w\n}",
"func (log *Logger) Wrap(pipe func(wc io.WriteCloser) io.WriteCloser) {\n\tlog.Lock()\n\tdefer log.Unlock()\n\twc := log.output\n\tif log.fallback == nil {\n\t\tlog.fallback = wc\n\t\twc = writeNoCloser{wc}\n\t}\n\tlog.output = pipe(wc)\n}",
"func (w *writerWrapper) Unwrap() http.ResponseWriter {\n\treturn w.ResponseWriter\n}",
"func NewWriter(w io.Writer) *Writer {\n\tvar bw Writer\n\tbw.Reset(w)\n\treturn &bw\n}",
"func WriterClose(w *zip.Writer,) error",
"func NopOutput() io.Writer {\r\n\treturn &nopOutput{}\r\n}",
"func (s *Status) MaybeWrapWriter(w io.Writer) io.Writer {\n\tif IsTerminal(s.writer) && IsTerminal(w) {\n\t\treturn s.WrapWriter(w)\n\t}\n\treturn w\n}",
"func NewWriter(base io.Writer, level int) (io.WriteCloser, error) {\n\tw, err := gzip.NewWriterLevel(base, level)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn streamstore.NewIOCoppler(w, base), nil\n}",
"func NewReadWriteCloser(r stdio.ReadCloser, w stdio.WriteCloser) stdio.ReadWriteCloser {\n\treturn &rwc{r, w}\n}",
"func (b *basicWriter) Unwrap() http.ResponseWriter {\n\treturn b.ResponseWriter\n}",
"func (mock WriteCloser) Write(p []byte) (n int, err error) {\n\tmethodName := \"Write\" // nolint: goconst\n\tif mock.impl.Write != nil {\n\t\treturn mock.impl.Write(p)\n\t}\n\tif mock.callbackNotImplemented != nil {\n\t\tmock.callbackNotImplemented(mock.t, mock.name, methodName)\n\t} else {\n\t\tgomic.DefaultCallbackNotImplemented(mock.t, mock.name, methodName)\n\t}\n\treturn mock.fakeZeroWrite(p)\n}",
"func NewFailingWriteCloser(wc io.WriteCloser) *FailingWriteCloser {\n\treturn &FailingWriteCloser{WriteCloser: wc}\n}",
"func NewDiscardWriter() *DiscardWriter {\n\treturn &DiscardWriter{Writer: ioutil.Discard}\n}",
"func (mock WriteCloser) fakeZeroClose() error {\n\tvar (\n\t\tr0 error\n\t)\n\treturn r0\n}",
"func (n *NoOP) Close() {}",
"func New(prefix string) (io.WriteCloser, error) {\n\treturn nil, fmt.Errorf(\"not implemented on windows\")\n}",
"func NewWriter(t mockConstructorTestingTNewWriter) *Writer {\n\tmock := &Writer{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func (b *Writer) Reset(w io.Writer)",
"func NewCountedWriter(w io.WriteCloser) *CountedWriter {\n\treturn &CountedWriter{w: w}\n}",
"func (w *WrappedWriter) Write(data []byte) error {\n\tif len(data) == 0 {\n\t\treturn nil\n\t}\n\n\t_, err := w.bw.Write(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tn := len(data)\n\t// Increment file position pointer\n\tw.n += int64(n)\n\n\treturn nil\n}",
"func New(w io.Writer) Writer {\n\treturn &writer{w}\n}",
"func WrapWriter(w http.ResponseWriter) ResponseWriterWrapper {\n\treturn &writerWrapper{ResponseWriter: w}\n}",
"func NewWriter(w io.Writer) *zip.Writer",
"func NoCloseStream(s Stream) Stream {\n return noCloseStream{s}\n}",
"func closeIgnore(closer io.Closer) {\n\t_ = closer.Close()\n}",
"func NewDummy() *Writer {\n\treturn &Writer{\n\t\tfp: nil,\n\t\tw: newDummyWriter(),\n\t}\n}",
"func (n NoOp) Apply(io.Reader, io.Writer) error {\n\treturn nil\n}",
"func (mwc *MockWriteCloser) Close() error {\n\tmwc.closed = true\n\treturn nil\n}",
"func (pb *Bar) NewProxyWriter(r io.Writer) *Writer {\n\tpb.Set(Bytes, true)\n\treturn &Writer{r, pb}\n}",
"func (mock WriteCloser) Close() error {\n\tmethodName := \"Close\" // nolint: goconst\n\tif mock.impl.Close != nil {\n\t\treturn mock.impl.Close()\n\t}\n\tif mock.callbackNotImplemented != nil {\n\t\tmock.callbackNotImplemented(mock.t, mock.name, methodName)\n\t} else {\n\t\tgomic.DefaultCallbackNotImplemented(mock.t, mock.name, methodName)\n\t}\n\treturn mock.fakeZeroClose()\n}",
"func (n *nilCloser) Close() error {\n\t// works even if n is nil\n\treturn nil\n}",
"func dummyBytesReader(p []byte) io.Reader {\n\treturn ioutil.NopCloser(bytes.NewReader(p))\n}",
"func (p *ioThrottlerPool) NewThrottledWriteCloser(writer io.WriteCloser, r rate.Limit, b int, id string) *ThrottledWriteCloser {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\tthrottler := ioThrottler{\n\t\tlimiter: rate.NewLimiter(r, b),\n\t}\n\tp.connections[id] = &throttler\n\tp.updateBufferSize()\n\treturn &ThrottledWriteCloser{\n\t\torigWriteCloser: writer,\n\t\tid: id,\n\t\tpool: p,\n\t}\n\n}",
"func IsNop(w io.Writer) bool {\r\n\tif isN, ok := w.(interface {\r\n\t\tIsNop() bool\r\n\t}); ok {\r\n\t\treturn isN.IsNop()\r\n\t}\r\n\treturn false\r\n}",
"func WriteCloserDaisy(inp <-chan io.WriteCloser, tube WriteCloserTube) (out <-chan io.WriteCloser) {\n\tcha := make(chan io.WriteCloser)\n\tgo tube(inp, cha)\n\treturn cha\n}",
"func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter {\n\tvar (\n\t\thj, i0 = t.writer.(http.Hijacker)\n\t\tcn, i1 = t.writer.(http.CloseNotifier)\n\t\tpu, i2 = t.writer.(http.Pusher)\n\t\tfl, i3 = t.writer.(http.Flusher)\n\t\trf, i4 = t.writer.(io.ReaderFrom)\n\t)\n\n\tswitch {\n\tcase !i0 && !i1 && !i2 && !i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t}{t}\n\tcase !i0 && !i1 && !i2 && !i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\tio.ReaderFrom\n\t\t}{t, rf}\n\tcase !i0 && !i1 && !i2 && i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Flusher\n\t\t}{t, fl}\n\tcase !i0 && !i1 && !i2 && i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Flusher\n\t\t\tio.ReaderFrom\n\t\t}{t, fl, rf}\n\tcase !i0 && !i1 && i2 && !i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Pusher\n\t\t}{t, pu}\n\tcase !i0 && !i1 && i2 && !i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Pusher\n\t\t\tio.ReaderFrom\n\t\t}{t, pu, rf}\n\tcase !i0 && !i1 && i2 && i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Pusher\n\t\t\thttp.Flusher\n\t\t}{t, pu, fl}\n\tcase !i0 && !i1 && i2 && i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Pusher\n\t\t\thttp.Flusher\n\t\t\tio.ReaderFrom\n\t\t}{t, pu, fl, rf}\n\tcase !i0 && i1 && !i2 && !i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.CloseNotifier\n\t\t}{t, cn}\n\tcase !i0 && i1 && !i2 && !i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.CloseNotifier\n\t\t\tio.ReaderFrom\n\t\t}{t, cn, rf}\n\tcase !i0 && i1 && !i2 && i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Flusher\n\t\t}{t, cn, fl}\n\tcase !i0 && i1 && !i2 && i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Flusher\n\t\t\tio.ReaderFrom\n\t\t}{t, cn, fl, rf}\n\tcase !i0 && i1 && i2 && !i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Pusher\n\t\t}{t, cn, pu}\n\tcase !i0 && i1 && i2 && !i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Pusher\n\t\t\tio.ReaderFrom\n\t\t}{t, cn, pu, rf}\n\tcase !i0 && i1 && i2 && i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Pusher\n\t\t\thttp.Flusher\n\t\t}{t, cn, pu, fl}\n\tcase !i0 && i1 && i2 && i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Pusher\n\t\t\thttp.Flusher\n\t\t\tio.ReaderFrom\n\t\t}{t, cn, pu, fl, rf}\n\tcase i0 && !i1 && !i2 && !i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t}{t, hj}\n\tcase i0 && !i1 && !i2 && !i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\tio.ReaderFrom\n\t\t}{t, hj, rf}\n\tcase i0 && !i1 && !i2 && i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.Flusher\n\t\t}{t, hj, fl}\n\tcase i0 && !i1 && !i2 && i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.Flusher\n\t\t\tio.ReaderFrom\n\t\t}{t, hj, fl, rf}\n\tcase i0 && !i1 && i2 && !i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.Pusher\n\t\t}{t, hj, pu}\n\tcase i0 && !i1 && i2 && !i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.Pusher\n\t\t\tio.ReaderFrom\n\t\t}{t, hj, pu, rf}\n\tcase i0 && !i1 && i2 && i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.Pusher\n\t\t\thttp.Flusher\n\t\t}{t, hj, pu, fl}\n\tcase i0 && !i1 && i2 && i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.Pusher\n\t\t\thttp.Flusher\n\t\t\tio.ReaderFrom\n\t\t}{t, hj, pu, fl, rf}\n\tcase i0 && i1 && !i2 && !i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.CloseNotifier\n\t\t}{t, hj, cn}\n\tcase i0 && i1 && !i2 && !i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.CloseNotifier\n\t\t\tio.ReaderFrom\n\t\t}{t, hj, cn, rf}\n\tcase i0 && i1 && !i2 && i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Flusher\n\t\t}{t, hj, cn, fl}\n\tcase i0 && i1 && !i2 && i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Flusher\n\t\t\tio.ReaderFrom\n\t\t}{t, hj, cn, fl, rf}\n\tcase i0 && i1 && i2 && !i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Pusher\n\t\t}{t, hj, cn, pu}\n\tcase i0 && i1 && i2 && !i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Pusher\n\t\t\tio.ReaderFrom\n\t\t}{t, hj, cn, pu, rf}\n\tcase i0 && i1 && i2 && i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Pusher\n\t\t\thttp.Flusher\n\t\t}{t, hj, cn, pu, fl}\n\tcase i0 && i1 && i2 && i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Pusher\n\t\t\thttp.Flusher\n\t\t\tio.ReaderFrom\n\t\t}{t, hj, cn, pu, fl, rf}\n\tdefault:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t}{t}\n\t}\n}",
"func WriterFlush(w *zip.Writer,) error",
"func NewWriter(bw BitWriteCloser, eof Symbol, weights []int) SymbolWriteCloser {\n\treturn newWriter(bw, eof, weights)\n}",
"func (w *Writer) Close() error {\n\treturn nil\n}",
"func (w *writer) Close() error {\n\tif !w.upload {\n\t\tif w.pr != nil {\n\t\t\tdefer w.pr.Close()\n\t\t}\n\t\tif w.pw == nil {\n\t\t\t// We never got any bytes written. We'll write an http.NoBody.\n\t\t\tw.open(nil, false)\n\t\t} else if err := w.pw.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t<-w.donec\n\treturn w.err\n}",
"func (w *writer) Close() error {\n\tif !w.upload {\n\t\tif w.pr != nil {\n\t\t\tdefer w.pr.Close()\n\t\t}\n\t\tif w.pw == nil {\n\t\t\t// We never got any bytes written. We'll write an http.NoBody.\n\t\t\tw.open(nil, false)\n\t\t} else if err := w.pw.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t<-w.donec\n\treturn w.err\n}",
"func New(w io.Writer) *Writer {\n\treturn &Writer{\n\t\tout: w,\n\t}\n}",
"func (_e *WriteCloser_Expecter) Close() *WriteCloser_Close_Call {\n\treturn &WriteCloser_Close_Call{Call: _e.mock.On(\"Close\")}\n}",
"func (rw *responseWriter) Unwrap() http.ResponseWriter {\n\treturn rw.ResponseWriter\n}",
"func New(w io.Writer) *Writer {\n\treturn &Writer{\n\t\tw: w,\n\t}\n}",
"func (mock WriteCloser) fakeZeroWrite(p []byte) (n int, err error) {\n\treturn n, err\n}",
"func (m *Nitro) NewWriter() *Writer {\n\tw := m.newWriter()\n\tw.next = m.wlist\n\tm.wlist = w\n\tw.dwrCtx.Init()\n\n\tm.shutdownWg1.Add(1)\n\tgo m.collectionWorker(w)\n\tif m.useMemoryMgmt {\n\t\tm.shutdownWg2.Add(1)\n\t\tgo m.freeWorker(w)\n\t}\n\n\treturn w\n}",
"func NewWriter() *Writer {\n\treturn &Writer{buf: bytes.NewBuffer(nil)}\n}",
"func (w *ChunkWriter) Close() error {\n\tif w.buffer == nil {\n\t\treturn nil\n\t}\n\n\tw.c = NewChunk(w.buffer.Bytes())\n\tw.buffer = nil\n\treturn nil\n}",
"func NewWriter(limit int) *Wrap {\n\treturn &Wrap{\n\t\tLimit: limit,\n\t\tNewline: defaultNewline,\n\t\tKeepNewlines: true,\n\t\t// Keep whitespaces following a forceful line break. If disabled,\n\t\t// leading whitespaces in a line are only kept if the line break\n\t\t// was not forceful, meaning a line break that was already present\n\t\t// in the input\n\t\tPreserveSpace: false,\n\t\tTabWidth: defaultTabWidth,\n\n\t\tbuf: &bytes.Buffer{},\n\t}\n}",
"func New() *Writer {\n\ttermWidth, _ = getTermSize()\n\tif termWidth != 0 {\n\t\toverFlowHandled = true\n\t}\n\n\treturn &Writer{\n\t\tOut: Out,\n\t\tRefreshInterval: RefreshInterval,\n\n\t\tmtx: &sync.Mutex{},\n\t}\n}",
"func (w responseWriterNoBody) Write(data []byte) (int, error) {\n\treturn 0, nil\n}",
"func NewWrappedResponseWriter(w http.ResponseWriter) *WrappedResponseWriter {\n\tgw := gzip.NewWriter(w)\n\treturn &WrappedResponseWriter{w, gw}\n}",
"func (w *Writer) Close() error {\n\tif w.w == nil {\n\t\treturn nil\n\t}\n\n\tvar (\n\t\terrz error\n\t\terrc error\n\t)\n\n\terrz = w.wz.Close()\n\tif w.wc != nil {\n\t\twc := w.wc\n\t\tw.wc = nil\n\t\terrc = wc.Close()\n\t}\n\n\tw.w = nil\n\tw.wz = nil\n\n\tif errz != nil {\n\t\treturn fmt.Errorf(\"npz: could not close npz archive: %w\", errz)\n\t}\n\n\tif errc != nil {\n\t\treturn fmt.Errorf(\"npz: could not close npz file: %w\", errc)\n\t}\n\n\treturn nil\n}",
"func (_e *WriteCloser_Expecter) Write(p interface{}) *WriteCloser_Write_Call {\n\treturn &WriteCloser_Write_Call{Call: _e.mock.On(\"Write\", p)}\n}",
"func NewWriter(w io.Writer, opts WriterOpts) (deprecated.LegacyPackedWriter, error) {\n\twr := &writer{opts: opts}\n\tsubopts := deprecated.LegacyPackedWriterOpts{\n\t\tMarshal: deprecated.MarshalFunc(opts.Marshal),\n\t\tIndex: opts.Index,\n\t\tMaxItems: opts.MaxItems,\n\t\tMaxBytes: opts.MaxBytes,\n\t\tFlushed: opts.Flushed,\n\t}\n\n\tcompress := false\n\tswitch opts.FlateLevel {\n\tcase flate.BestSpeed,\n\t\tflate.BestCompression,\n\t\tflate.DefaultCompression,\n\t\tflate.HuffmanOnly:\n\t\tcompress = true\n\t}\n\n\tif compress {\n\t\twr.compressor = NewFlateTransform(opts.FlateLevel)\n\t\tsubopts.Transform = wr.compressor.CompressTransform\n\t}\n\twr.LegacyPackedWriter = deprecated.NewLegacyPackedWriter(w, subopts)\n\treturn wr, nil\n}",
"func (w *Writer) WriteNull() {\n\tif w.err != nil {\n\t\treturn\n\t}\n\tw.b = AppendNull(w.b)\n}",
"func newBufferedWriter(w io.Writer) *snappy.Writer {\n\trawBufWriter := bufWriterPool.Get()\n\tif rawBufWriter == nil {\n\t\treturn snappy.NewBufferedWriter(w)\n\t}\n\tbufW, ok := rawBufWriter.(*snappy.Writer)\n\tif !ok {\n\t\treturn snappy.NewBufferedWriter(w)\n\t}\n\tbufW.Reset(w)\n\treturn bufW\n}"
] | [
"0.83569413",
"0.8011272",
"0.71590877",
"0.6870779",
"0.67140406",
"0.67096657",
"0.6687032",
"0.6371078",
"0.6286475",
"0.6217874",
"0.6208649",
"0.61860895",
"0.61359453",
"0.61247253",
"0.607347",
"0.6037165",
"0.5976911",
"0.5891818",
"0.58669317",
"0.57316923",
"0.57210654",
"0.5682104",
"0.563397",
"0.5629736",
"0.5625744",
"0.557823",
"0.557412",
"0.55650455",
"0.5563851",
"0.5560972",
"0.55571365",
"0.551571",
"0.5502931",
"0.5491729",
"0.5465291",
"0.5447252",
"0.54405767",
"0.5438453",
"0.5425513",
"0.53875154",
"0.5362165",
"0.53545207",
"0.53502595",
"0.534587",
"0.5317975",
"0.53112465",
"0.52884346",
"0.52672035",
"0.5255381",
"0.5211145",
"0.52089447",
"0.520102",
"0.5185364",
"0.5164889",
"0.51586646",
"0.51447415",
"0.51335126",
"0.51251715",
"0.50978523",
"0.509738",
"0.50793195",
"0.50586474",
"0.50542516",
"0.50520915",
"0.5035403",
"0.5029016",
"0.5024436",
"0.5006709",
"0.50047827",
"0.5000567",
"0.49806482",
"0.49799782",
"0.4976809",
"0.49650797",
"0.4964724",
"0.49459618",
"0.49390182",
"0.4932785",
"0.49304035",
"0.49073264",
"0.48975888",
"0.48895088",
"0.48895088",
"0.48813412",
"0.48811096",
"0.48781016",
"0.4876857",
"0.48465377",
"0.48442262",
"0.48441097",
"0.48290163",
"0.4818307",
"0.48135456",
"0.48111305",
"0.4806843",
"0.4800899",
"0.47868156",
"0.4776983",
"0.47737506",
"0.4772327"
] | 0.82828856 | 1 |
NewTracer returns a new tracer | func NewTracer(description string) *Tracer {
return &Tracer{Started: time.Now().UTC(), Description: description}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func NewTracer(name string) *Tracer {\n\tname = fmt.Sprintf(namePattern, name)\n\treturn &Tracer{\n\t\tname: name,\n\t}\n}",
"func NewTracer(parent *Logger, prefix string) *Logger {\n\treturn &Logger{parent: parent, prefix: prefix, tracer: true}\n}",
"func NewTracer(name string, w io.Writer, m Memory) Memory {\n\treturn &tracer{m: m, w: w, s: name}\n}",
"func NewTracer(cli CLI) (*Tracer, error) {\n\ttracer := &Tracer{\n\t\tcli: cli,\n\t\tnumPackets: defaultNumPackets,\n\t}\n\treturn tracer, nil\n}",
"func NewTracer(_ *config.Config) (*Tracer, error) {\n\treturn nil, ebpf.ErrNotImplemented\n}",
"func NewTracer(cfg TracerConfig) opentracing.Tracer {\n\tvar tracer opentracing.Tracer\n\tswitch cfg.Provider {\n\tcase Zipkin:\n\t\tlogrus.Error(\"No implements yet.\")\n\t\t// fmt.Sprintf(\"http://%s:%s/api/v1/spans\",cfg.Host, cfg.Port)\n\t\tbreak\n\tcase Jaeger:\n\t\ttracer = newJaegerTracer(cfg)\n\t\tbreak\n\tdefault:\n\t\tlogrus.Errorf(\"unsported provider %s, use opentracing.GlobalTracer()\", cfg.Provider)\n\t\ttracer = opentracing.GlobalTracer()\n\t}\n\treturn tracer\n}",
"func New(w io.Writer) Tracer{\n\treturn &tracer{out:w}\n}",
"func NewTracer(\n\tserviceName string,\n\tdispatcher Dispatcher,\n\toptions ...TracerOption,\n) (opentracing.Tracer, io.Closer) {\n\ttracer := &Tracer{\n\t\tserviceName: serviceName,\n\t\tdispatcher: dispatcher,\n\t\tuseDualSpanMode: false,\n\t}\n\ttracer.propagators = make(map[interface{}]Propagator)\n\ttracer.propagators[opentracing.TextMap] = NewDefaultTextMapPropagator()\n\ttracer.propagators[opentracing.HTTPHeaders] = NewTextMapPropagator(PropagatorOpts{}, URLCodex{})\n\tfor _, option := range options {\n\t\toption(tracer)\n\t}\n\n\tif tracer.timeNow == nil {\n\t\ttracer.timeNow = time.Now\n\t}\n\n\tif tracer.logger == nil {\n\t\ttracer.logger = NullLogger{}\n\t}\n\n\tif tracer.idGenerator == nil {\n\t\ttracer.idGenerator = func() string {\n\t\t\t_uuid, err := uuid.NewUUID()\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\treturn _uuid.String()\n\t\t}\n\t}\n\n\tdispatcher.SetLogger(tracer.logger)\n\treturn tracer, tracer\n}",
"func New(w io.Writer) Tracer {\n\treturn &tracer{out: w}\n}",
"func New(w io.Writer) Tracer {\n\treturn &tracer{out: w}\n}",
"func New(w io.Writer) Tracer {\n\treturn &tracer{out: w}\n}",
"func NewTracer() Tracer {\n\treturn &nullTracer{}\n}",
"func NewTracer(\n\tserviceName string,\n\tsampler Sampler,\n\treporter Reporter,\n\toptions ...TracerOption,\n) (opentracing.Tracer, io.Closer) {\n\tt := &Tracer{\n\t\tserviceName: serviceName,\n\t\tsampler: samplerV1toV2(sampler),\n\t\treporter: reporter,\n\t\tinjectors: make(map[interface{}]Injector),\n\t\textractors: make(map[interface{}]Extractor),\n\t\tmetrics: *NewNullMetrics(),\n\t\tspanAllocator: simpleSpanAllocator{},\n\t}\n\n\tfor _, option := range options {\n\t\toption(t)\n\t}\n\n\t// register default injectors/extractors unless they are already provided via options\n\ttextPropagator := NewTextMapPropagator(getDefaultHeadersConfig(), t.metrics)\n\tt.addCodec(opentracing.TextMap, textPropagator, textPropagator)\n\n\thttpHeaderPropagator := NewHTTPHeaderPropagator(getDefaultHeadersConfig(), t.metrics)\n\tt.addCodec(opentracing.HTTPHeaders, httpHeaderPropagator, httpHeaderPropagator)\n\n\tbinaryPropagator := NewBinaryPropagator(t)\n\tt.addCodec(opentracing.Binary, binaryPropagator, binaryPropagator)\n\n\t// TODO remove after TChannel supports OpenTracing\n\tinteropPropagator := &jaegerTraceContextPropagator{tracer: t}\n\tt.addCodec(SpanContextFormat, interopPropagator, interopPropagator)\n\n\tzipkinPropagator := &zipkinPropagator{tracer: t}\n\tt.addCodec(ZipkinSpanFormat, zipkinPropagator, zipkinPropagator)\n\n\tif t.baggageRestrictionManager != nil {\n\t\tt.baggageSetter = newBaggageSetter(t.baggageRestrictionManager, &t.metrics)\n\t} else {\n\t\tt.baggageSetter = newBaggageSetter(baggage.NewDefaultRestrictionManager(0), &t.metrics)\n\t}\n\tif t.debugThrottler == nil {\n\t\tt.debugThrottler = throttler.DefaultThrottler{}\n\t}\n\n\tif t.randomNumber == nil {\n\t\tseedGenerator := utils.NewRand(time.Now().UnixNano())\n\t\tpool := sync.Pool{\n\t\t\tNew: func() interface{} {\n\t\t\t\treturn rand.NewSource(seedGenerator.Int63())\n\t\t\t},\n\t\t}\n\n\t\tt.randomNumber = func() uint64 {\n\t\t\tgenerator := pool.Get().(rand.Source)\n\t\t\tnumber := uint64(generator.Int63())\n\t\t\tpool.Put(generator)\n\t\t\treturn number\n\t\t}\n\t}\n\tif t.timeNow == nil {\n\t\tt.timeNow = time.Now\n\t}\n\tif t.logger == nil {\n\t\tt.logger = log.NullLogger\n\t}\n\t// Set tracer-level tags\n\tt.tags = append(t.tags, Tag{key: JaegerClientVersionTagKey, value: JaegerClientVersion})\n\tif hostname, err := os.Hostname(); err == nil {\n\t\tt.tags = append(t.tags, Tag{key: TracerHostnameTagKey, value: hostname})\n\t}\n\tif ipval, ok := t.getTag(TracerIPTagKey); ok {\n\t\tipv4, err := utils.ParseIPToUint32(ipval.(string))\n\t\tif err != nil {\n\t\t\tt.hostIPv4 = 0\n\t\t\tt.logger.Error(\"Unable to convert the externally provided ip to uint32: \" + err.Error())\n\t\t} else {\n\t\t\tt.hostIPv4 = ipv4\n\t\t}\n\t} else if ip, err := utils.HostIP(); err == nil {\n\t\tt.tags = append(t.tags, Tag{key: TracerIPTagKey, value: ip.String()})\n\t\tt.hostIPv4 = utils.PackIPAsUint32(ip)\n\t} else {\n\t\tt.logger.Error(\"Unable to determine this host's IP address: \" + err.Error())\n\t}\n\n\tif t.options.gen128Bit {\n\t\tif t.options.highTraceIDGenerator == nil {\n\t\t\tt.options.highTraceIDGenerator = t.randomNumber\n\t\t}\n\t} else if t.options.highTraceIDGenerator != nil {\n\t\tt.logger.Error(\"Overriding high trace ID generator but not generating \" +\n\t\t\t\"128 bit trace IDs, consider enabling the \\\"Gen128Bit\\\" option\")\n\t}\n\tif t.options.maxTagValueLength == 0 {\n\t\tt.options.maxTagValueLength = DefaultMaxTagValueLength\n\t}\n\tt.process = Process{\n\t\tService: serviceName,\n\t\tUUID: strconv.FormatUint(t.randomNumber(), 16),\n\t\tTags: t.tags,\n\t}\n\tif throttler, ok := t.debugThrottler.(ProcessSetter); ok {\n\t\tthrottler.SetProcess(t.process)\n\t}\n\n\treturn t, t\n}",
"func New(recorders []basictracer.SpanRecorder) opentracing.Tracer {\n\treturn basictracer.New(NewRecorder(recorders))\n}",
"func NewTracer(ctx context.Context, yamlConfig []byte) (opentracing.Tracer, io.Closer, error) {\n\tconfig := Config{}\n\tif err := yaml.Unmarshal(yamlConfig, &config); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\toptions := lightstep.Options{\n\t\tAccessToken: config.AccessToken,\n\t\tCollector: config.Collector,\n\t}\n\tlighstepTracer, err := lightstep.CreateTracer(options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tt := &Tracer{\n\t\tlighstepTracer,\n\t\tctx,\n\t}\n\treturn t, t, nil\n}",
"func NewTracer(serviceName, host string) (opentracing.Tracer, io.Closer, error) {\n\tjcfg := jaegerconfig.Configuration{\n\t\tSampler: &jaegerconfig.SamplerConfig{\n\t\t\tType: \"const\",\n\t\t\tParam: 1,\n\t\t},\n\t\tReporter: &jaegerconfig.ReporterConfig{\n\t\t\tLogSpans: false,\n\t\t\tBufferFlushInterval: 1 * time.Second,\n\t\t\tLocalAgentHostPort: host,\n\t\t},\n\t\tServiceName: serviceName,\n\t}\n\n\ttracer, closer, err := jcfg.NewTracer()\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"new tracer error: %v\", err)\n\t}\n\n\treturn tracer, closer, nil\n}",
"func NewTracer(cfg Config) (otrace.Tracer, io.Closer, error) {\n\tconf := cfg.New()\n\ttracer, closer, err := conf.New(\n\t\tcfg.App,\n\t\tconfig.Logger(jaeger.StdLogger),\n\t)\n\n\tif err != nil {\n\t\treturn nil, nil, errCreateTracer\n\t}\n\n\treturn tracer, closer, nil\n}",
"func New() graphql.Tracer {\n\treturn tracer{Tracer: gqlopencensus.New()}\n}",
"func New() graphql.Tracer {\n\treturn tracer{Tracer: gqlopencensus.New()}\n}",
"func NewTracer(serviceName, zipkinURL string) (io.Closer, error) {\n\t// Send the tracing in Zipkin format (even if we are using Jaeger as backend).\n\ttransport, err := zipkin.NewHTTPTransport(\"http://\" + zipkinURL + \":9411/api/v1/spans\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not init Jaeger Zipkin HTTP transport: %w\", err)\n\t}\n\n\t// Zipkin shares span ID between client and server spans; it must be enabled via the following option.\n\tzipkinPropagator := zk.NewZipkinB3HTTPHeaderPropagator()\n\n\ttracer, closer := jaeger.NewTracer(\n\t\tserviceName,\n\t\tjaeger.NewConstSampler(true), // Trace everything for now.\n\t\tjaeger.NewRemoteReporter(transport),\n\t\tjaeger.TracerOptions.Injector(opentracing.HTTPHeaders, zipkinPropagator),\n\t\tjaeger.TracerOptions.Extractor(opentracing.HTTPHeaders, zipkinPropagator),\n\t\tjaeger.TracerOptions.ZipkinSharedRPCSpan(true),\n\t\tjaeger.TracerOptions.Gen128Bit(true),\n\t)\n\topentracing.SetGlobalTracer(tracer)\n\n\treturn closer, nil\n}",
"func NewTracer(opts Options) ot.Tracer {\n\toptions := basictracer.DefaultOptions()\n\toptions.ShouldSample = func(_ uint64) bool { return true }\n\n\tif opts.UseGRPC {\n\t\tr := NewRecorder(opts)\n\t\tif r == nil {\n\t\t\treturn ot.NoopTracer{}\n\t\t}\n\t\toptions.Recorder = r\n\t} else {\n\t\topts.setDefaults()\n\t\t// convert opts to thrift_rpc.Options\n\t\tthriftOpts := thrift_rpc.Options{\n\t\t\tAccessToken: opts.AccessToken,\n\t\t\tCollector: thrift_rpc.Endpoint{opts.Collector.Host, opts.Collector.Port, opts.Collector.Plaintext},\n\t\t\tTags: opts.Tags,\n\t\t\tLightStepAPI: thrift_rpc.Endpoint{opts.LightStepAPI.Host, opts.LightStepAPI.Port, opts.LightStepAPI.Plaintext},\n\t\t\tMaxBufferedSpans: opts.MaxBufferedSpans,\n\t\t\tReportingPeriod: opts.ReportingPeriod,\n\t\t\tReportTimeout: opts.ReportTimeout,\n\t\t\tDropSpanLogs: opts.DropSpanLogs,\n\t\t\tMaxLogsPerSpan: opts.MaxLogsPerSpan,\n\t\t\tVerbose: opts.Verbose,\n\t\t\tMaxLogMessageLen: opts.MaxLogValueLen,\n\t\t}\n\t\tr := thrift_rpc.NewRecorder(thriftOpts)\n\t\tif r == nil {\n\t\t\treturn ot.NoopTracer{}\n\t\t}\n\t\toptions.Recorder = r\n\t}\n\toptions.DropAllLogs = opts.DropSpanLogs\n\toptions.MaxLogsPerSpan = opts.MaxLogsPerSpan\n\treturn basictracer.NewWithOptions(options)\n}",
"func NewTracer(task concurrency.Task, enable bool, msg ...interface{}) Tracer {\n\tt := tracer{\n\t\tenabled: enable,\n\t}\n\tif task != nil {\n\t\tt.taskSig = task.Signature()\n\t}\n\n\tmessage := strprocess.FormatStrings(msg...)\n\tif message == \"\" {\n\t\tmessage = \"()\"\n\t}\n\tt.callerParams = strings.TrimSpace(message)\n\n\t// Build the message to trace\n\t// VPL: my version\n\t// if pc, file, line, ok := runtime.Caller(1); ok {\n\t//\tif f := runtime.FuncForPC(pc); f != nil {\n\t//\t\tt.funcName = f.Name()\n\t//\t\tfilename := strings.Replace(file, debug.sourceFilePartToRemove(), \"\", 1)\n\t//\t\tt.inOutMessage = fmt.Sprintf(\"%s %s%s [%s:%d]\", t.taskSig, filepath.Base(t.funcName), message, filename, line)\n\t//\t}\n\t// }\n\t// VPL: la version d'Oscar\n\tif pc, file, _, ok := runtime.Caller(1); ok {\n\t\tt.fileName = callstack.SourceFilePathUpdater()(file)\n\t\tif f := runtime.FuncForPC(pc); f != nil {\n\t\t\tt.funcName = filepath.Base(f.Name())\n\t\t}\n\t}\n\tif t.funcName == \"\" {\n\t\tt.funcName = unknownFunction\n\t}\n\tif t.fileName == \"\" {\n\t\tt.funcName = unknownFile\n\t}\n\n\treturn &t\n}",
"func New(opts ...opentelemetry.Option) (opentracing.Tracer, io.Closer, error) {\n\toptions := opentelemetry.DefaultOptions()\n\tfor _, o := range opts {\n\t\to(&options)\n\t}\n\n\tlogger.Debug(\"Creating a new Jaeger tracer\")\n\n\t// Prepare a Jaeger config using our options:\n\tjaegerConfig := config.Configuration{\n\t\tServiceName: options.ServiceName,\n\t\tSampler: &config.SamplerConfig{\n\t\t\tType: \"const\", // No adaptive sampling or external lookups\n\t\t\tParam: options.SamplingRate,\n\t\t},\n\t\tReporter: &config.ReporterConfig{\n\t\t\tLocalAgentHostPort: options.TraceReporterAddress,\n\t\t},\n\t}\n\n\t// Prepare a new Jaeger tracer from this config:\n\ttracer, closer, err := jaegerConfig.NewTracer()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn tracer, closer, nil\n}",
"func New(conf Config) *AppTracer {\n\treturn &AppTracer{\n\t\tConfig: conf,\n\t}\n}",
"func (t *Tracer) newSpan() *Span {\n\treturn t.spanAllocator.Get()\n}",
"func NewTrace(name string) *Trace {\n\treturn &Trace{\n\t\tTraceID: uuid.New(),\n\t\tSpanID: rand.Int63(),\n\t\tSpanName: name,\n\t}\n}",
"func NewMockTracer(ctrl *gomock.Controller) *MockTracer {\n\tmock := &MockTracer{ctrl: ctrl}\n\tmock.recorder = &MockTracerMockRecorder{mock}\n\treturn mock\n}",
"func NewMockTracer(ctrl *gomock.Controller) *MockTracer {\n\tmock := &MockTracer{ctrl: ctrl}\n\tmock.recorder = &MockTracerMockRecorder{mock}\n\treturn mock\n}",
"func NewMockTracer(ctrl *gomock.Controller) *MockTracer {\n\tmock := &MockTracer{ctrl: ctrl}\n\tmock.recorder = &MockTracerMockRecorder{mock}\n\treturn mock\n}",
"func NewTrace(localAddr string, logPath string) *Trace {\n\tt := &Trace{\n\t\tstopCh: make(chan struct{}),\n\t\tmsgCh: make(chan []byte, 1000),\n\t\tlocalAddr: localAddr,\n\t\tlogPath: logPath,\n\t\tforceLog: false,\n\t}\n\treturn t\n}",
"func AddNewTracer(name string) *Tracer {\n\tsrc := NewTracer(name)\n\tif err := gwr.AddGenericDataSource(src); err != nil {\n\t\tpanic(err.Error())\n\t}\n\treturn src\n}",
"func New(conf Config, opts ...func(Type)) (Type, error) {\n\tif c, ok := Constructors[conf.Type]; ok {\n\t\treturn c.constructor(conf, opts...)\n\t}\n\treturn nil, ErrInvalidTracerType\n}",
"func newTraceV4() httptracer.HTTPTracer {\n\treturn traceV4{}\n}",
"func newTraceV2() httptracer.HTTPTracer {\n\treturn traceV2{}\n}",
"func NewSpan(tracer *Tracing, name string) Span {\n\treturn newSpanWithStart(tracer, name, time.Now())\n}",
"func (i *DI) MakeTracer() opentracing.Tracer {\n\tif cacheTracer != nil {\n\t\treturn *cacheTracer\n\t}\n\t// cacheTracer global is set in MakeTracerCloser\n\t_, _ = i.MakeTracerCloser()\n\treturn *cacheTracer\n}",
"func newOpentelemetryTracerProvider(address string, customAttributes ...attribute.KeyValue) (*tracesdk.TracerProvider, error) {\n\t// Same as Grafana core\n\tclient := otlptracegrpc.NewClient(otlptracegrpc.WithEndpoint(address), otlptracegrpc.WithInsecure())\n\texp, err := otlptrace.New(context.Background(), client)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn initTracerProvider(exp, customAttributes...)\n}",
"func (t *Tracer) Start() *Tracer {\n\tlog.Debugf(\"Tracer started %v.\", t.Description)\n\treturn t\n}",
"func (t *HadeTraceService) NewTrace() *contract.TraceContext {\n\tvar traceID, spanID string\n\tif t.traceIDGenerator != nil {\n\t\ttraceID = t.traceIDGenerator.NewID()\n\t} else {\n\t\ttraceID = t.idService.NewID()\n\t}\n\n\tif t.spanIDGenerator != nil {\n\t\tspanID = t.spanIDGenerator.NewID()\n\t} else {\n\t\tspanID = t.idService.NewID()\n\t}\n\ttc := &contract.TraceContext{\n\t\tTraceID: traceID,\n\t\tParentID: \"\",\n\t\tSpanID: spanID,\n\t\tCspanID: \"\",\n\t\tAnnotation: map[string]string{},\n\t}\n\treturn tc\n}",
"func New(name string, impl []Wrapper, opts ...TraceOption) *Trace {\n\tt := &Trace{impl: impl, event: newSpanEvent(name)}\n\tfor _, opt := range opts {\n\t\topt(t)\n\t}\n\tfor _, i := range t.impl {\n\t\ti.Setup(name)\n\t}\n\treturn t\n}",
"func (p *Provider) Tracer(instrumentationName string, opts ...oteltrace.TracerOption) oteltrace.Tracer {\n\topts = append(opts, oteltrace.WithInstrumentationVersion(teleport.Version))\n\n\treturn p.provider.Tracer(instrumentationName, opts...)\n}",
"func NewWithTracer(tracer opentracing.Tracer) tracing.StartSpan {\n\treturn func(serviceId string, operationId string, protocol tracing.WireProtocol, r *http.Request) (context.Context, tracing.ServerSpan) {\n\t\tspanName := serviceId + OperationDelimiter + operationId\n\t\twireContext, err := tracer.Extract(\n\t\t\topentracing.HTTPHeaders,\n\t\t\topentracing.HTTPHeadersCarrier(r.Header))\n\t\tif err != nil {\n\t\t\tlog.Debug(\"Failed to extract opentracing headers\")\n\t\t}\n\n\t\t// Create the span referring to the RPC client if available.\n\t\t// If wireContext == nil, a root span will be created.\n\t\t// ext.RPCServerOption() sets tag span.kind=server\n\t\tserverSpan := tracer.StartSpan(\n\t\t\tspanName,\n\t\t\text.RPCServerOption(wireContext))\n\n\t\t//Set tag for the component\n\t\tserverSpan.SetTag(\"component\", ComponentTag)\n\n\t\t//Set headers, if header is not present the tag will be set to \"\"\n\t\tserverSpan.SetTag(\"http.user_agent\", r.Header.Get(\"User-Agent\"))\n\t\tserverSpan.SetTag(\"peer.address\", peerAddress(r))\n\t\tserverSpan.SetTag(\"wire.protocol\", protocol)\n\t\tserverSpan.SetTag(\"is_internal\", isInternal(r))\n\n\t\tspan := NewServerSpan(serverSpan)\n\n\t\tctx := opentracing.ContextWithSpan(r.Context(), serverSpan)\n\n\t\treturn ctx, span\n\t}\n}",
"func NewPrometheusTracer(name string, rate int, vec *prometheus.CounterVec) *Tracer {\n\tconst (\n\t\tnewconn = \"new\"\n\t\treusedconn = \"reused\"\n\t)\n\n\thooks := &httptrace.ClientTrace{\n\t\tGotConn: func(con httptrace.GotConnInfo) {\n\t\t\tif con.Reused {\n\t\t\t\tvec.WithLabelValues(name, reusedconn).Inc()\n\t\t\t} else {\n\t\t\t\tvec.WithLabelValues(name, newconn).Inc()\n\t\t\t}\n\t\t},\n\t}\n\n\treturn &Tracer{SampleRate: rate, trace: hooks}\n}",
"func NewCompositeTracer(references cref.IReferences) *CompositeTracer {\n\tc := &CompositeTracer{}\n\tif references != nil {\n\t\tc.SetReferences(references)\n\t}\n\treturn c\n}",
"func NewPTracer(store Store) PTracer {\n\tt := PTracer{\n\t\tops: make(chan func()),\n\t\tstopped: make(chan stopped),\n\t\tquit: make(chan struct{}),\n\t\tchildAttached: make(chan struct{}),\n\n\t\tthreads: make(map[int]*thread),\n\t\tprocesses: make(map[int]*process),\n\t\tstore: store,\n\t}\n\tgo t.waitLoop()\n\tgo t.loop()\n\treturn t\n}",
"func NewContext(ctx context.Context, t *Tracer) context.Context {\n\treturn context.WithValue(ctx, tracerKey, t)\n}",
"func NewChromeTracer(chrome *godet.RemoteDebugger, size *ScreenSize, screenshotsStoragePath string) *ChromeTracer {\n\treturn &ChromeTracer{\n\t\tinstance: chrome,\n\t\tsize: size,\n\t\tscreenshotsStoragePath: screenshotsStoragePath,\n\t}\n}",
"func NewSpan(tracer opentracing.Tracer, operationName string, opts ...opentracing.StartSpanOption) gin.HandlerFunc {\n\treturn func(ctx *gin.Context) {\n\t\tspan := tracer.StartSpan(operationName, opts...)\n\t\tctx.Set(spanContextKey, span)\n\t\tdefer span.Finish()\n\n\t\tctx.Next()\n\t}\n}",
"func NewServiceTracer(serviceName string, agentAddress string, metricsType MetricsType) *ServiceTracer {\n\tzlogger, _ := zap.NewDevelopment(zap.AddStacktrace(zapcore.FatalLevel))\n\tzapLogger := zlogger.With(zap.String(\"service\", serviceName))\n\tlogger := NewLogFactory(zapLogger)\n\tmetricsFactory := NewMetrics(metricsType)\n\ttracer := NewTracer(serviceName, agentAddress, metricsFactory, logger)\n\topentracing.SetGlobalTracer(tracer)\n\treturn &ServiceTracer{tracer: tracer, logger: logger, serviceName: serviceName}\n}",
"func NewDasTracer() DasTracer {\n\treturn DasTracer{\n\t\tURI: DefaultDasURI,\n\t}\n}",
"func NewRecordingTracer() *RecordingTracer {\n\tvar result RecordingTracer\n\ttracer, err := apm.NewTracerOptions(apm.TracerOptions{\n\t\tTransport: &result.RecorderTransport,\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tresult.Tracer = tracer\n\treturn &result\n}",
"func New(x trace.Trace, y *log.Logger) *Object {\n\tceph = cephInterface.New(x, y)\n\tt = x\n\treturn &Object{ y }\n}",
"func New(ctx context.Context, method string, kvps ...interface{}) (*SpanLogger, context.Context) {\n\treturn spanlogger.New(ctx, util_log.Logger, method, resolver, kvps...)\n}",
"func (m *SpanManager) NewTrace(span_name string) *Span {\n\tm.mtx.Lock()\n\ttrace_fraction := m.trace_fraction\n\ttrace_debug := m.trace_debug\n\tm.mtx.Unlock()\n\tif Rng.Float64() >= trace_fraction {\n\t\treturn NewDisabledTrace()\n\t}\n\treturn m.NewSampledTrace(span_name, trace_debug)\n}",
"func NewTrace(s *logic.S, ws ...z.Lit) *Trace {\n\treturn NewTraceLen(s, s.Len(), ws...)\n}",
"func newTraceExporter(logger *zap.Logger, cfg configmodels.Exporter) (component.TracesExporter, error) {\n\n\tl := &logServiceTraceSender{\n\t\tlogger: logger,\n\t}\n\n\tvar err error\n\tif l.client, err = NewLogServiceClient(cfg.(*Config), logger); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn exporterhelper.NewTraceExporter(\n\t\tcfg,\n\t\tlogger,\n\t\tl.pushTraceData)\n}",
"func (t *Trace) NewSpan(name string) platform.Span {\n\ts := NewSpan(name).(*Span)\n\ts.logger = t.logger\n\treturn s\n}",
"func newOpenTelemetryWrapper(\n\tctx context.Context,\n\tspanName string,\n) (openTelemetryWrapper, error) {\n\tif spanName == \"\" {\n\t\tspanName = defaultSpanName\n\t}\n\n\tot := openTelemetryWrapper{\n\t\tspanName: spanName,\n\t}\n\n\tversion, _ := caddy.Version()\n\tres, err := ot.newResource(webEngineName, version)\n\tif err != nil {\n\t\treturn ot, fmt.Errorf(\"creating resource error: %w\", err)\n\t}\n\n\ttraceExporter, err := otlptracegrpc.New(ctx)\n\tif err != nil {\n\t\treturn ot, fmt.Errorf(\"creating trace exporter error: %w\", err)\n\t}\n\n\tot.propagators = autoprop.NewTextMapPropagator()\n\n\ttracerProvider := globalTracerProvider.getTracerProvider(\n\t\tsdktrace.WithBatcher(traceExporter),\n\t\tsdktrace.WithResource(res),\n\t)\n\n\tot.handler = otelhttp.NewHandler(http.HandlerFunc(ot.serveHTTP),\n\t\tot.spanName,\n\t\totelhttp.WithTracerProvider(tracerProvider),\n\t\totelhttp.WithPropagators(ot.propagators),\n\t\totelhttp.WithSpanNameFormatter(ot.spanNameFormatter),\n\t)\n\n\treturn ot, nil\n}",
"func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanConfig) trace.Span {\n\t// If told explicitly to make this a new root use a zero value SpanContext\n\t// as a parent which contains an invalid trace ID and is not remote.\n\tvar psc trace.SpanContext\n\tif config.NewRoot() {\n\t\tctx = trace.ContextWithSpanContext(ctx, psc)\n\t} else {\n\t\tpsc = trace.SpanContextFromContext(ctx)\n\t}\n\n\t// If there is a valid parent trace ID, use it to ensure the continuity of\n\t// the trace. Always generate a new span ID so other components can rely\n\t// on a unique span ID, even if the Span is non-recording.\n\tvar tid trace.TraceID\n\tvar sid trace.SpanID\n\tif !psc.TraceID().IsValid() {\n\t\ttid, sid = tr.provider.idGenerator.NewIDs(ctx)\n\t} else {\n\t\ttid = psc.TraceID()\n\t\tsid = tr.provider.idGenerator.NewSpanID(ctx, tid)\n\t}\n\n\tsamplingResult := tr.provider.sampler.ShouldSample(SamplingParameters{\n\t\tParentContext: ctx,\n\t\tTraceID: tid,\n\t\tName: name,\n\t\tKind: config.SpanKind(),\n\t\tAttributes: config.Attributes(),\n\t\tLinks: config.Links(),\n\t})\n\n\tscc := trace.SpanContextConfig{\n\t\tTraceID: tid,\n\t\tSpanID: sid,\n\t\tTraceState: samplingResult.Tracestate,\n\t}\n\tif isSampled(samplingResult) {\n\t\tscc.TraceFlags = psc.TraceFlags() | trace.FlagsSampled\n\t} else {\n\t\tscc.TraceFlags = psc.TraceFlags() &^ trace.FlagsSampled\n\t}\n\tsc := trace.NewSpanContext(scc)\n\n\tif !isRecording(samplingResult) {\n\t\treturn tr.newNonRecordingSpan(sc)\n\t}\n\treturn tr.newRecordingSpan(psc, sc, name, samplingResult, config)\n}",
"func NewRayTracer(view *Camera, options *RayTracerOptions) *RayTracer {\n\n\t// compute half-dimensions and angles:\n\thalfWidth, halfHeight := entry(view.width)/TWO, entry(view.height)/TWO\n\ttanY := tan(view.fovY / TWO)\n\ttanX := tanY * (halfWidth / halfHeight)\n\n\t// compute eye-basis vectors:\n\tbW := view.pos.minus(&view.lookAt).direction()\n\tbU := view.up.cross(bW).direction()\n\tbV := bW.cross(bU)\n\n\treturn &RayTracer{\n\t\tview.width, view.height,\n\t\thalfWidth, halfHeight, tanX, tanY,\n\t\t*bU, *bV, *bW, view.pos,\n\t\toptions,\n\t}\n}",
"func New(msg string) error {\n return &withTrace{\n msg: msg,\n stack: trace(),\n }\n}",
"func (i *DI) MakeTracerCloser() (opentracing.Tracer, io.Closer) {\n\tif cacheTracer != nil {\n\t\treturn *cacheTracer, cacheTraceCloser\n\t}\n\tcfg := i.MakeTraceConfig()\n\tjLogger := jaegerlog.StdLogger\n\tjMetricsFactory := metrics.NullFactory\n\n\t// Initialize tracer with a logger and a metrics factory\n\ttracer, closer, err := cfg.NewTracer(\n\t\tjaegercfg.Logger(jLogger),\n\t\tjaegercfg.Metrics(jMetricsFactory),\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tcacheTracer := &tracer\n\tcacheTraceCloser := closer\n\treturn *cacheTracer, cacheTraceCloser\n}",
"func NewTraces(consume ConsumeTracesFunc, options ...Option) (Traces, error) {\n\tif consume == nil {\n\t\treturn nil, errNilFunc\n\t}\n\treturn &baseTraces{\n\t\tbaseImpl: newBaseImpl(options...),\n\t\tConsumeTracesFunc: consume,\n\t}, nil\n}",
"func NewTrace(lfn string, site string, ts int64, jobtype string, wnname string) Trace {\n\ttrc := Trace{}\n\ttrc.Account = \"fwjr\"\n\ttrc.ClientState = \"DONE\"\n\ttrc.Filename = lfn\n\ttrc.DID = \"cms:\" + fmt.Sprintf(\"%v\", trc.Filename)\n\ttrc.EventType = \"get\"\n\ttrc.EventVersion = \"API_1.21.6\"\n\ttrc.FileReadts = ts\n\ttrc.Jobtype = jobtype\n\ttrc.RemoteSite = site\n\ttrc.Scope = \"cms\"\n\ttrc.Timestamp = trc.FileReadts\n\ttrc.TraceTimeentryUnix = trc.FileReadts\n\ttrc.Usrdn = \"/DC=ch/DC=cern/OU=Organic Units/OU=Users/CN=yuyi/CN=639751/CN=Yuyi Guo/CN=706639693\"\n\ttrc.Wnname = wnname\n\treturn trc\n}",
"func (st *ServiceTracer) OpenTracer() opentracing.Tracer {\n\treturn st.tracer\n}",
"func NewTraceProvider(ctx context.Context, cfg Config) (*Provider, error) {\n\tif err := cfg.CheckAndSetDefaults(); err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\texporter, err := NewExporter(ctx, cfg)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\tattrs := []attribute.KeyValue{\n\t\t// the service name used to display traces in backends\n\t\tsemconv.ServiceNameKey.String(cfg.Service),\n\t\tattribute.String(VersionKey, teleport.Version),\n\t}\n\tattrs = append(attrs, cfg.Attributes...)\n\n\tres, err := resource.New(ctx,\n\t\tresource.WithFromEnv(),\n\t\tresource.WithProcessPID(),\n\t\tresource.WithProcessExecutableName(),\n\t\tresource.WithProcessExecutablePath(),\n\t\tresource.WithProcessRuntimeName(),\n\t\tresource.WithProcessRuntimeVersion(),\n\t\tresource.WithProcessRuntimeDescription(),\n\t\tresource.WithTelemetrySDK(),\n\t\tresource.WithHost(),\n\t\tresource.WithAttributes(attrs...),\n\t)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\t// set global propagator, the default is no-op.\n\totel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{}))\n\n\t// override the global logging handled with one that uses the\n\t// configured logger instead\n\totel.SetErrorHandler(otel.ErrorHandlerFunc(func(err error) {\n\t\tcfg.Logger.WithError(err).Warnf(\"failed to export traces.\")\n\t}))\n\n\t// set global provider to our provider wrapper to have all tracers use the common TracerOptions\n\tprovider := &Provider{\n\t\tprovider: sdktrace.NewTracerProvider(\n\t\t\tsdktrace.WithSampler(sdktrace.ParentBased(sdktrace.TraceIDRatioBased(cfg.SamplingRate))),\n\t\t\tsdktrace.WithResource(res),\n\t\t\tsdktrace.WithSpanProcessor(sdktrace.NewBatchSpanProcessor(exporter)),\n\t\t),\n\t}\n\totel.SetTracerProvider(provider)\n\n\treturn provider, nil\n}",
"func (t *TraceWrapper) newSpan(name string) *SpanWrapper {\n\tctx := t.ctx\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\n\ts, ok := ctx.Value(spanKey{}).(*SpanWrapper)\n\tif !ok {\n\t\ts = t.generateSpan(name)\n\t\tctx = context.WithValue(ctx, spanKey{}, s)\n\t\ts.ctx, t.ctx = ctx, ctx\n\t}\n\treturn s\n}",
"func GetNewTraceTransport(trace HTTPTracer, transport http.RoundTripper) RoundTripTrace {\n\treturn RoundTripTrace{Trace: trace,\n\t\tTransport: transport}\n}",
"func newTraceExporter(config *Config, transportChannel transportChannel, logger *zap.Logger) (exporter.TraceExporter, error) {\n\n\texporter := &traceExporter{\n\t\tconfig: config,\n\t\ttransportChannel: transportChannel,\n\t\tlogger: logger,\n\t}\n\n\texp, err := exporterhelper.NewTraceExporter(\n\t\tconfig,\n\t\texporter.pushTraceData,\n\t\texporterhelper.WithTracing(true),\n\t\texporterhelper.WithMetrics(true))\n\n\treturn exp, err\n}",
"func (ts *TeeSpan) Tracer() opentracing.Tracer {\n\treturn ts.tracer\n}",
"func New(ctx context.Context) *Manager {\n\tout := &Manager{\n\t\tsync.Mutex{},\n\t\tmake(map[id.ID]tracer.Tracer),\n\t}\n\tbind.GetRegistry(ctx).Listen(bind.NewDeviceListener(out.createTracer, out.destroyTracer))\n\treturn out\n}",
"func (b *Builder) Build() (*Tracer, error) {\n\tif err := b.err; err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar options []Option\n\tif b.loggers != nil {\n\t\tswitch len(b.loggers) {\n\t\tcase 0:\n\t\t\t// intentionally left blank\n\t\tcase 1:\n\t\t\toptions = append(options, WithLogger(b.loggers[0]))\n\t\tdefault:\n\t\t\toptions = append(options, WithLogger(MultiLogger(b.loggers...)))\n\t\t}\n\t}\n\tif b.traceClient != nil {\n\t\toptions = append(options, WithTraceClient(b.traceClient))\n\t}\n\tif b.errorClient != nil {\n\t\toptions = append(options, WithErrorClient(b.errorClient))\n\t}\n\tif b.baggage != nil {\n\t\toptions = append(options, WithBaggage(b.baggage))\n\t}\n\n\treturn New(options...), nil\n}",
"func T() tracing.Trace {\n\treturn gtrace.EngineTracer\n}",
"func NewSpan() Span {\n\treturn newSpan(&otlptrace.Span{})\n}",
"func initTracer(jaegerURL string) (func(), error) {\n\tif jaegerURL == \"\" {\n\t\treturn func() {\n\t\t\ttrace.NewNoopTracerProvider()\n\t\t}, nil\n\t}\n\n\t// create and install Jaeger export pipeline\n\treturn jaeger.InstallNewPipeline(\n\t\tjaeger.WithCollectorEndpoint(jaegerURL),\n\t\tjaeger.WithProcess(jaeger.Process{\n\t\t\tServiceName: \"kms\",\n\t\t}),\n\t\tjaeger.WithSDK(&sdktrace.Config{DefaultSampler: sdktrace.AlwaysSample()}),\n\t)\n}",
"func NewContext(ctx context.Context, tr Tracer) context.Context {\n\treturn trace.NewContext(ctx, tr)\n}",
"func buildTracerProvider(ctx context.Context, tracingCfg config.TracingConfig) (trace.TracerProvider, func() error, error) {\n\tclient, err := getClient(tracingCfg)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\texp, err := otlptrace.New(ctx, client)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// Create a resource describing the service and the runtime.\n\tres, err := resource.New(\n\t\tctx,\n\t\tresource.WithSchemaURL(semconv.SchemaURL),\n\t\tresource.WithAttributes(\n\t\t\tsemconv.ServiceNameKey.String(serviceName),\n\t\t\tsemconv.ServiceVersionKey.String(version.Version),\n\t\t),\n\t\tresource.WithProcessRuntimeDescription(),\n\t\tresource.WithTelemetrySDK(),\n\t)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\ttp := tracesdk.NewTracerProvider(\n\t\ttracesdk.WithBatcher(exp),\n\t\ttracesdk.WithSampler(tracesdk.ParentBased(\n\t\t\ttracesdk.TraceIDRatioBased(tracingCfg.SamplingFraction),\n\t\t)),\n\t\ttracesdk.WithResource(res),\n\t)\n\n\treturn tp, func() error {\n\t\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\t\tdefer cancel()\n\t\terr := tp.Shutdown(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, nil\n}",
"func New(config *Config, log *zap.Logger) (exporter.TraceExporter, error) {\n\thttpClient := &http.Client{}\n\toptions := []elastic.ClientOptionFunc{\n\t\telastic.SetURL(config.Servers...),\n\t\telastic.SetBasicAuth(config.Username, config.Password),\n\t\telastic.SetSniff(config.Sniffer),\n\t\telastic.SetHttpClient(httpClient),\n\t}\n\tif config.TokenFile != \"\" {\n\t\ttoken, err := loadToken(config.TokenFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thttpClient.Transport = &tokenAuthTransport{\n\t\t\ttoken: token,\n\t\t\twrapped: &http.Transport{},\n\t\t}\n\t}\n\n\tesRawClient, err := elastic.NewClient(options...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create Elasticsearch client for %s, %v\", config.Servers, err)\n\t}\n\tbulk, err := esRawClient.BulkProcessor().\n\t\tBulkActions(config.bulkActions).\n\t\tBulkSize(config.bulkSize).\n\t\tWorkers(config.bulkWorkers).\n\t\tFlushInterval(config.bulkFlushInterval).\n\t\tDo(context.Background())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tversion := config.Version\n\tif version == 0 {\n\t\tversion, err = getVersion(esRawClient, config.Servers[0])\n\t}\n\tvar tags []string\n\tif config.TagsAsFields.AllAsFields && config.TagsAsFields.File != \"\" {\n\t\ttags, err = loadTagsFromFile(config.TagsAsFields.File)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to load tags file: %v\", err)\n\t\t}\n\t}\n\n\tw := esSpanStore.NewSpanWriter(esSpanStore.SpanWriterParams{\n\t\tLogger: log,\n\t\tMetricsFactory: metrics.NullFactory,\n\t\tClient: eswrapper.WrapESClient(esRawClient, bulk, version),\n\t\tIndexPrefix: config.IndexPrefix,\n\t\tUseReadWriteAliases: config.UseWriteAlias,\n\t\tAllTagsAsFields: config.TagsAsFields.AllAsFields,\n\t\tTagKeysAsFields: tags,\n\t\tTagDotReplacement: config.TagsAsFields.DotReplacement,\n\t})\n\n\tif config.CreateTemplates {\n\t\tspanMapping, serviceMapping := es.GetMappings(int64(config.Shards), int64(config.Shards), version)\n\t\terr := w.CreateTemplates(spanMapping, serviceMapping)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tstorage := jexporter.Storage{\n\t\tWriter: w,\n\t}\n\treturn exporterhelper.NewTraceExporter(\n\t\tconfig,\n\t\tstorage.Store,\n\t\texporterhelper.WithShutdown(func() error {\n\t\t\treturn w.Close()\n\t\t}))\n}",
"func (s *Service) Tracer() opentracing.Tracer {\n\treturn s.tracer\n}",
"func initTracer(serviceName string, agentUrl string, collectorUrl string) (io.Closer, error) {\n\tvar cfg jaegercfg.Configuration\n\tif agentUrl != \"\" {\n\t\tcfg = jaegercfg.Configuration{\n\t\t\tSampler: &jaegercfg.SamplerConfig{\n\t\t\t\tType: jaeger.SamplerTypeConst,\n\t\t\t\tParam: 1,\n\t\t\t},\n\t\t\tReporter: &jaegercfg.ReporterConfig{\n\t\t\t\tLocalAgentHostPort: agentUrl,\n\t\t\t\tLogSpans: true,\n\t\t\t},\n\t\t}\n\t} else if collectorUrl != \"\" {\n\t\tcfg = jaegercfg.Configuration{\n\t\t\tSampler: &jaegercfg.SamplerConfig{\n\t\t\t\tType: jaeger.SamplerTypeConst,\n\t\t\t\tParam: 1,\n\t\t\t},\n\t\t\tReporter: &jaegercfg.ReporterConfig{\n\t\t\t\tCollectorEndpoint: collectorUrl,\n\t\t\t\tLogSpans: true,\n\t\t\t},\n\t\t}\n\t} else {\n\t\tcfg = jaegercfg.Configuration{}\n\t}\n\n\tcloser, err := cfg.InitGlobalTracer(\n\t\tserviceName,\n\t\tjaegercfg.Logger(jaegerlog.StdLogger),\n\t\tjaegercfg.Metrics(jaegermetrics.NullFactory),\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn closer, nil\n}",
"func initTracer(serviceName string, agentUrl string, collectorUrl string) (io.Closer, error) {\n\tvar cfg jaegercfg.Configuration\n\tif agentUrl != \"\" {\n\t\tcfg = jaegercfg.Configuration{\n\t\t\tSampler: &jaegercfg.SamplerConfig{\n\t\t\t\tType: jaeger.SamplerTypeConst,\n\t\t\t\tParam: 1,\n\t\t\t},\n\t\t\tReporter: &jaegercfg.ReporterConfig{\n\t\t\t\tLocalAgentHostPort: agentUrl,\n\t\t\t\tLogSpans: true,\n\t\t\t},\n\t\t}\n\t} else if collectorUrl != \"\" {\n\t\tcfg = jaegercfg.Configuration{\n\t\t\tSampler: &jaegercfg.SamplerConfig{\n\t\t\t\tType: jaeger.SamplerTypeConst,\n\t\t\t\tParam: 1,\n\t\t\t},\n\t\t\tReporter: &jaegercfg.ReporterConfig{\n\t\t\t\tCollectorEndpoint: collectorUrl,\n\t\t\t\tLogSpans: true,\n\t\t\t},\n\t\t}\n\t} else {\n\t\tcfg = jaegercfg.Configuration{}\n\t}\n\n\tcloser, err := cfg.InitGlobalTracer(\n\t\tserviceName,\n\t\tjaegercfg.Logger(jaegerlog.StdLogger),\n\t\tjaegercfg.Metrics(jaegermetrics.NullFactory),\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn closer, nil\n}",
"func newOpentelemetryTracerProviderStore() *opentelemetryTracerProviderStore {\n\texps := []sdktrace.SpanExporter{}\n\treturn &opentelemetryTracerProviderStore{exps, nil, nil}\n}",
"func newNoOpTracerProvider() TracerProvider {\n\treturn &noopTracerProvider{TracerProvider: trace.NewNoopTracerProvider()}\n}",
"func newTracesExporter(params exporter.CreateSettings, cfg component.Config) (*traceExporterImp, error) {\n\texporterFactory := otlpexporter.NewFactory()\n\n\tlb, err := newLoadBalancer(params, cfg, func(ctx context.Context, endpoint string) (component.Component, error) {\n\t\toCfg := buildExporterConfig(cfg.(*Config), endpoint)\n\t\treturn exporterFactory.CreateTracesExporter(ctx, params, &oCfg)\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttraceExporter := traceExporterImp{loadBalancer: lb, routingKey: traceIDRouting}\n\n\tswitch cfg.(*Config).RoutingKey {\n\tcase \"service\":\n\t\ttraceExporter.routingKey = svcRouting\n\tcase \"traceID\", \"\":\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported routing_key: %s\", cfg.(*Config).RoutingKey)\n\t}\n\treturn &traceExporter, nil\n}",
"func TravelNew(id ID) *Travel {\n\ta := IntransitiveActivityNew(id, TravelType)\n\to := Travel(*a)\n\treturn &o\n}",
"func (sb ServiceBase) Tracer() trace.Tracer {\n\treturn sb.options.Tracer\n}",
"func NewTrace(err error, skip int) error {\n\tswitch err.(type) {\n\tcase Restackable:\n\t\treturn err.(Restackable).NewTrace(skip + 1)\n\t}\n\treturn &detailedError{\n\t\ts: err.Error(),\n\t\tstack: stackTrace(skip + 1),\n\t}\n}",
"func initTracing(cfg domain.TracingConfig) (opentracing.Tracer, io.Closer) {\n\ttracingCfg := jaegerconf.Configuration{\n\t\tServiceName: cfg.ServiceName,\n\t\tSampler: &jaegerconf.SamplerConfig{\n\t\t\tType: jaeger.SamplerTypeConst,\n\t\t\tParam: 1,\n\t\t},\n\t\tReporter: &jaegerconf.ReporterConfig{\n\t\t\tLogSpans: true,\n\t\t},\n\t}\n\ttracer, closer, err := tracingCfg.NewTracer()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\topentracing.SetGlobalTracer(tracer)\n\treturn tracer, closer\n}",
"func NewTraceExporter(logger *zap.Logger) exporter.TraceExporter {\n\treturn &loggingExporter{logger: logger}\n}",
"func newFakeTracerProviderStore() *fakeTracerProviderStore {\n\texps := []sdktrace.SpanExporter{}\n\treturn &fakeTracerProviderStore{exps, nil, nil}\n}",
"func initTracer() func() {\n\t// Create and install Jaeger export pipeline.\n\tflush, err := jaeger.InstallNewPipeline(\n\t\tjaeger.WithCollectorEndpoint(\"http://localhost:14268/api/traces\"),\n\t\tjaeger.WithProcess(jaeger.Process{\n\t\t\tServiceName: \"server\",\n\t\t\tTags: []label.KeyValue{\n\t\t\t\tlabel.String(\"exporter\", \"jaeger\"),\n\t\t\t},\n\t\t}),\n\t\tjaeger.WithSDK(&sdktrace.Config{DefaultSampler: sdktrace.AlwaysSample()}),\n\t)\n\totel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{}))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn flush\n}",
"func newExporter(w io.Writer) (trace.SpanExporter, error) {\n\treturn stdouttrace.New(\n\t\tstdouttrace.WithWriter(w),\n\t\t// Use human-readable output.\n\t\tstdouttrace.WithPrettyPrint(),\n\t\t// Do not print timestamps for the demo.\n\t\tstdouttrace.WithoutTimestamps(),\n\t)\n}",
"func New(svc service.TictacService, logger log.Logger, otTracer stdopentracing.Tracer, zipkinTracer *stdzipkin.Tracer) (ep Endpoints) {\n\tvar ticEndpoint endpoint.Endpoint\n\t{\n\t\tmethod := \"tic\"\n\t\tticEndpoint = MakeTicEndpoint(svc)\n\t\tticEndpoint = opentracing.TraceServer(otTracer, method)(ticEndpoint)\n\t\tif zipkinTracer != nil {\n\t\t\tticEndpoint = zipkin.TraceEndpoint(zipkinTracer, method)(ticEndpoint)\n\t\t}\n\t\tticEndpoint = LoggingMiddleware(log.With(logger, \"method\", method))(ticEndpoint)\n\t\tep.TicEndpoint = ticEndpoint\n\t}\n\n\tvar tacEndpoint endpoint.Endpoint\n\t{\n\t\tmethod := \"tac\"\n\t\ttacEndpoint = MakeTacEndpoint(svc)\n\t\ttacEndpoint = opentracing.TraceServer(otTracer, method)(tacEndpoint)\n\t\tif zipkinTracer != nil {\n\t\t\ttacEndpoint = zipkin.TraceEndpoint(zipkinTracer, method)(tacEndpoint)\n\t\t}\n\t\ttacEndpoint = LoggingMiddleware(log.With(logger, \"method\", method))(tacEndpoint)\n\t\tep.TacEndpoint = tacEndpoint\n\t}\n\n\treturn ep\n}",
"func NewDefault() tracing.StartSpan {\n\treturn NewWithTracer(opentracing.GlobalTracer())\n}",
"func NewTraceClient(c HTTPClient, l Level) *TraceClient {\n\treturn &TraceClient{c: c, l: l}\n}",
"func NewTracingService(s Service) Service {\n\treturn &tracingService{service: s}\n}",
"func InitTracer(tracing env.Tracing, id string) (io.Closer, error) {\n\ttracer, trCloser, err := tracing.NewTracer(id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\topentracing.SetGlobalTracer(tracer)\n\treturn trCloser, nil\n}",
"func (e *detailedError) NewTrace(skip int) error {\n\tcp := new(detailedError)\n\t*cp = *e\n\tcp.stack = stackTrace(skip + 1)\n\tcp.original = e.Original()\n\treturn cp\n}",
"func newTracingMiddleware(tracer opentracing.Tracer) linkManagerMiddleware {\n\treturn func(next om.LinkManager) om.LinkManager {\n\t\treturn tracingMiddleware{next, tracer}\n\t}\n}",
"func NewTransport(base *http.Transport) http.RoundTripper {\n\tif tracer != nil {\n\t\treturn tracer.NewTransport(base)\n\t}\n\treturn nil\n}"
] | [
"0.8209218",
"0.8077655",
"0.79836166",
"0.79239917",
"0.7877002",
"0.78683406",
"0.7835157",
"0.779283",
"0.7783801",
"0.7783801",
"0.7783801",
"0.7775605",
"0.7605881",
"0.76013285",
"0.7595682",
"0.7517959",
"0.7459979",
"0.7405851",
"0.7405851",
"0.7370349",
"0.7234734",
"0.7110442",
"0.6861783",
"0.68284714",
"0.6741338",
"0.66998273",
"0.6696342",
"0.6696342",
"0.6696342",
"0.6627697",
"0.66244566",
"0.65541816",
"0.64701295",
"0.64561486",
"0.64405733",
"0.6439176",
"0.64130574",
"0.6285231",
"0.6277894",
"0.622787",
"0.62226814",
"0.6173185",
"0.61520845",
"0.6120611",
"0.61195815",
"0.6115027",
"0.6111991",
"0.6091892",
"0.6086266",
"0.60798055",
"0.60447294",
"0.6025822",
"0.6023593",
"0.60011387",
"0.5974719",
"0.59408116",
"0.59400487",
"0.5924038",
"0.58784264",
"0.585358",
"0.5846802",
"0.5815383",
"0.5804058",
"0.580377",
"0.5766544",
"0.57646626",
"0.57630575",
"0.5748743",
"0.5743974",
"0.57421386",
"0.57421064",
"0.57368267",
"0.57320774",
"0.57276297",
"0.5659969",
"0.5625857",
"0.56240165",
"0.56091636",
"0.56091166",
"0.5596877",
"0.5596877",
"0.5594606",
"0.5590107",
"0.5583315",
"0.5582584",
"0.5559158",
"0.55540293",
"0.5533872",
"0.5532233",
"0.5489945",
"0.5476027",
"0.5465838",
"0.54539937",
"0.5452661",
"0.54479384",
"0.5439142",
"0.5426805",
"0.54265904",
"0.54242617",
"0.542331"
] | 0.8393048 | 0 |
Start logs start of the trace | func (t *Tracer) Start() *Tracer {
log.Debugf("Tracer started %v.", t.Description)
return t
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func LogStart(version string, env string) {\n\tlog.Println(\"Start\")\n\tlog.Printf(\"Version: %s\", version)\n\tlog.Printf(\"Environment: %s\", env)\n\tlog.Printf(\"Go version: %s\", runtime.Version())\n\tlog.Printf(\"Go max procs: %d\", runtime.GOMAXPROCS(0))\n}",
"func init() {\n\tlog.SetPrefix(\"TRACE: \")\n\tlog.SetFlags(log.Ldate | log.Lmicroseconds | log.Llongfile)\n}",
"func (t *Trace) StartTrace() *Trace {\n\tif t.Started() {\n\t\treturn t\n\t}\n\tnewT := *t\n\tnewT.Start = time.Now()\n\treturn &newT\n}",
"func (m *Main) PrintStart() {\n\tfmt.Println(\"---\")\n\tlog.Infof(\"testing %s\", m.Name)\n\tfmt.Println(\"\")\n}",
"func (el *ZapEventLogger) Start(ctx context.Context, operationName string) context.Context {\n\tspan, ctx := opentrace.StartSpanFromContext(ctx, operationName)\n\tspan.SetTag(\"system\", el.system)\n\treturn ctx\n}",
"func Startf(format string, args ...interface{}) { logRaw(LevelStart, 2, format, args...) }",
"func (op *Operation) startTrace(ctx context.Context) (*trace.Trace, context.Context) {\n\tif op.context.Tracer == nil {\n\t\treturn nil, ctx\n\t}\n\n\ttr, ctx := op.context.Tracer.New(ctx, op.kebabName, \"\")\n\treturn tr, ctx\n}",
"func (w *TraceWriter) Start() {\n\tw.BaseWriter.Start()\n\tgo func() {\n\t\tdefer watchdog.LogOnPanic()\n\t\tw.Run()\n\t}()\n}",
"func LogScenarioStart(s *godog.Scenario) {\n\tlog.Print(scenarioString(true, s))\n}",
"func (s *Step) Start() {\n\tfmt.Fprintln(writer, color.CyanString(fmt.Sprintf(\"%s %s %s %s\", start, time.Now().Format(time.RFC3339), s.test, s.name)))\n}",
"func start(){\n\t\t\t\t\n\t\t\t\tdebug.Send(\"exec-run\")\t\t\t\t\n\t\t\t\tdebug.Send(\"interpreter-exec\",\"console\",\"record\")\n\t\t\t\t\n\t\t\t\t\n\n\t}",
"func StartLogTimer(name string) {\n\tlogEvent(name, START_SYMBOL)\n}",
"func (op *AddonOperator) logTaskStart(logEntry *log.Entry, tsk sh_task.Task) {\n\t// Prevent excess messages for highly frequent tasks.\n\tif tsk.GetType() == task.GlobalHookWaitKubernetesSynchronization {\n\t\treturn\n\t}\n\tif tsk.GetType() == task.ModuleRun {\n\t\thm := task.HookMetadataAccessor(tsk)\n\t\tmodule := op.ModuleManager.GetModule(hm.ModuleName)\n\t\tif module.State.Phase == module_manager.WaitForSynchronization {\n\t\t\treturn\n\t\t}\n\t}\n\n\tlogger := logEntry.\n\t\tWithField(\"task.flow\", \"start\").\n\t\tWithFields(utils.LabelsToLogFields(tsk.GetLogLabels()))\n\tif triggeredBy, ok := tsk.GetProp(\"triggered-by\").(log.Fields); ok {\n\t\tlogger = logger.WithFields(triggeredBy)\n\t}\n\n\tlogger.Infof(taskDescriptionForTaskFlowLog(tsk, \"start\", op.taskPhase(tsk), \"\"))\n}",
"func (l Log) start(path string) (Log, error) {\n\tl.StartTime = time.Now()\n\tl.Status = \"Started\"\n\treturn l, l.save(path)\n}",
"func (obj *ChannelMessageTracer) Start() {\n\t//logger.Log(fmt.Sprint(\"Entering ChannelMessageTracer:Start ...\"))\n\tif !obj.isRunning {\n\t\tobj.isRunning = true\n\t\t// Start reading and processing messages from the wire\n\t\tobj.extractAndTraceMessage()\n\t}\n\t//logger.Log(fmt.Sprint(\"Returning ChannelMessageTracer:Start ...\"))\n}",
"func (o *influxDBLogger) start() error {\n\treturn o.tick()\n}",
"func (kl *Klog) Begin(ctx context.Context, f string) {\n\tif kl.logLevelThreshold > 1 {\n\t\treturn\n\t}\n\tklog.InfoDepth(getLogDepth(ctx), fmt.Sprintf(\"Entry: %s\", f))\n}",
"func logStartOfRequest(\n\tr *stdhttp.Request,\n\textraHeaders []string,\n) {\n\tfields := log.F{}\n\tfor _, header := range extraHeaders {\n\t\t// Strips \"-\" characters and lowercases new logrus.Fields keys to be uniform with the other keys in the logger.\n\t\t// Simplifies querying extended fields.\n\t\tvar headerkey = strings.ToLower(strings.ReplaceAll(header, \"-\", \"\"))\n\t\tfields[headerkey] = r.Header.Get(header)\n\t}\n\tfields[\"subsys\"] = \"http\"\n\tfields[\"path\"] = r.URL.String()\n\tfields[\"method\"] = r.Method\n\tfields[\"ip\"] = r.RemoteAddr\n\tfields[\"host\"] = r.Host\n\tfields[\"useragent\"] = r.Header.Get(\"User-Agent\")\n\tl := log.Ctx(r.Context()).WithFields(fields)\n\n\tl.Info(\"starting request\")\n}",
"func (l *LogAdapter) Start() error {\n\treturn nil\n}",
"func (l *Logger) Starting(url string) {\n\tl.m.Lock()\n\tdefer l.m.Unlock()\n\tl.Log = append(l.Log, fmt.Sprintf(\"start %s\", url))\n}",
"func start(state GameState) {\n\tlog.Printf(\"%s START\\n\", state.Game.ID)\n}",
"func start(state GameState) {\n\tlog.Printf(\"%s START\\n\", state.Game.ID)\n}",
"func (l *InstrumenterLogger) CaptureStart(from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) error {\n\tl.lastCaptureTime = runtimeNano()\n\treturn nil\n}",
"func (t *Tracer) BeginTrace(nodes ...string) error {\n\tif len(nodes) == 0 {\n\t\treturn fmt.Errorf(\"no nodes to trace\")\n\t}\n\n\tnumPackets := t.numPackets\n\n\tcmds := []string{\n\t\t\"clear trace\",\n\t}\n\tfor _, node := range nodes {\n\t\tcmds = append(cmds, fmt.Sprintf(\"trace add %s %d\", node, numPackets))\n\t}\n\tout, err := t.cli.RunCli(strings.Join(cmds, \"\\n\"))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"trace command failed: %w\\n%s\", err, out)\n\t}\n\n\tt.toRetrieve = numPackets * len(nodes)\n\n\treturn nil\n}",
"func Start(opts ...StartOption) {\n\tif internal.Testing {\n\t\treturn // mock tracer active\n\t}\n\tt := newTracer(opts...)\n\tif !t.config.enabled {\n\t\treturn\n\t}\n\tinternal.SetGlobalTracer(t)\n\tif t.config.logStartup {\n\t\tlogStartup(t)\n\t}\n}",
"func StartTrace(traceDir, identifier string) error {\n\t// Lock the trace lock so that only one profiler is running at a\n\t// time.\n\ttraceLock.Lock()\n\tif traceActive {\n\t\ttraceLock.Unlock()\n\t\treturn errors.New(\"cannot start trace, it is already running\")\n\t}\n\ttraceActive = true\n\ttraceLock.Unlock()\n\n\t// Start trace into the trace dir, using the identifer. The timestamp\n\t// of the start time of the trace will be included in the filename.\n\ttraceFile, err := os.Create(filepath.Join(traceDir, \"trace-\"+identifier+\"-\"+time.Now().Format(time.RFC3339Nano)+\".trace\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn trace.Start(traceFile)\n}",
"func (*resourceTraceProcessor) Start(ctx context.Context, host component.Host) error {\n\treturn nil\n}",
"func (a *LogAgent) Start(persister operator.Persister) (err error) {\n\ta.startOnce.Do(func() {\n\t\terr = a.pipeline.Start(persister)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t})\n\treturn\n}",
"func (s *Server) StartTrace(ctx thrift.Context, request *tracetest.StartTraceRequest) (*tracetest.TraceResponse, error) {\n\treturn nil, errCannotStartInTChannel\n}",
"func (er *BufferedExchangeReporter) Start() {\n\n}",
"func StartCommand() {\n\tlog.Info(\"Do something\")\n\n\tlog.Debug(\"Debug\")\n\tlog.Info(\"Info\")\n\tlog.Warn(\"Warn\")\n\tlog.Error(\"Error\")\n\tlog.Fatal(\"Fatal\")\n}",
"func (s *Basegff3Listener) EnterStart(ctx *StartContext) {}",
"func (hPtr *HandlerT) StartGoTrace(file string) error {\n\thPtr.mu.Lock()\n\tdefer hPtr.mu.Unlock()\n\tif hPtr.traceW != nil {\n\t\treturn errors.New(\"trace already in progress\")\n\t}\n\tf, err := os.Create(expandHome(file))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := trace.Start(f); err != nil {\n\t\tf.Close()\n\t\treturn err\n\t}\n\thPtr.traceW = f\n\thPtr.traceFile = file\n\tbgmlogs.Info(\"Go tracing started\", \"dump\", hPtr.traceFile)\n\treturn nil\n}",
"func (u *UTrace) Start() error {\n\t// ensure that at least one function pattern was provided\n\tif u.options.FuncPattern == nil && u.options.KernelFuncPattern == nil {\n\t\treturn NoPatternProvidedErr\n\t}\n\n\tif err := u.start(); err != nil {\n\t\treturn err\n\t}\n\n\tlogrus.Infof(\"Tracing started on %d symbols ... (Ctrl + C to stop)\", len(u.matchingFuncCache))\n\treturn nil\n}",
"func main() {\n\tfmt.Println(Debug, LogLevel, startUpTime)\n}",
"func (sr *SpanRecorder) OnStart(_ context.Context, s sdktrace.ReadWriteSpan) {\n\tsr.startedMu.Lock()\n\tdefer sr.startedMu.Unlock()\n\tsr.started = append(sr.started, s)\n}",
"func (s *EventStore) StartTracing() {\n\ts.traceMu.Lock()\n\tdefer s.traceMu.Unlock()\n\n\ts.tracing = true\n}",
"func (t *Tracer) ConnectStart(network, addr string) {\n\t// If using dual-stack dialing, it's possible to get this\n\t// multiple times, so the atomic compareAndSwap ensures\n\t// that only the first call's time is recorded\n\tatomic.CompareAndSwapInt64(&t.connectStart, 0, now())\n}",
"func (t *TestRun) Start() {\n\tlog.Println(\"================\")\n\tlog.Println(\" Starting test \")\n\tlog.Println(\"================\")\n\tlog.Printf(\"concurrency level [%d]\", t.ConcurrencyLevel)\n\tlog.Printf(\"iterations [%d]\", t.Iterations)\n\tlog.Printf(\"writeRate [%f]\", t.WriteRate)\n\tt.StartedAt = time.Now()\n\tlog.Println(\"TESTRUN Starting...\")\n}",
"func (req *ServerHTTPRequest) start() {\n\tif req.started {\n\t\t/* coverage ignore next line */\n\t\treq.contextLogger.Error(req.Context(),\n\t\t\t\"Cannot start ServerHTTPRequest twice\",\n\t\t\tzap.String(\"path\", req.URL.Path),\n\t\t)\n\t\t/* coverage ignore next line */\n\t\treturn\n\t}\n\treq.started = true\n\treq.startTime = time.Now()\n\n\t// emit request count\n\treq.scope.Counter(endpointRequest).Inc(1)\n\n\tif req.tracer != nil {\n\t\topName := fmt.Sprintf(\"%s.%s\", req.EndpointName, req.HandlerName)\n\t\turlTag := opentracing.Tag{Key: \"URL\", Value: req.URL}\n\t\tMethodTag := opentracing.Tag{Key: \"Method\", Value: req.Method}\n\t\tcarrier := opentracing.HTTPHeadersCarrier(req.httpRequest.Header)\n\t\tspanContext, err := req.tracer.Extract(opentracing.HTTPHeaders, carrier)\n\t\tvar span opentracing.Span\n\t\tif err != nil {\n\t\t\tif err != opentracing.ErrSpanContextNotFound {\n\t\t\t\t/* coverage ignore next line */\n\t\t\t\treq.contextLogger.WarnZ(req.Context(), \"Error Extracting Trace Headers\", zap.Error(err))\n\t\t\t}\n\t\t\tspan = req.tracer.StartSpan(opName, urlTag, MethodTag)\n\t\t} else {\n\t\t\tspan = req.tracer.StartSpan(opName, urlTag, MethodTag, ext.RPCServerOption(spanContext))\n\t\t}\n\t\treq.span = span\n\t}\n\treq.setupLogFields()\n}",
"func (l *loggingT) startLogstash() {\n\tl.logstashChan = make(chan string, 100)\n\tgo l.handleLogstashMessages()\n}",
"func trace(msg string) func() {\n\tstart := time.Now()\n\tlog.Printf(\"enter %s\", msg)\n\treturn func() {\n\t\tlog.Printf(\"exit %s (%s)\", msg, time.Since(start))\n\t}\n}",
"func (s *ServerlessTraceAgent) Start(enabled bool, loadConfig Load, lambdaSpanChan chan<- *pb.Span, coldStartSpanId uint64) {\n\tif enabled {\n\t\t// Set the serverless config option which will be used to determine if\n\t\t// hostname should be resolved. Skipping hostname resolution saves >1s\n\t\t// in load time between gRPC calls and agent commands.\n\t\tddConfig.Datadog.Set(\"serverless.enabled\", true)\n\n\t\ttc, confErr := loadConfig.Load()\n\t\tif confErr != nil {\n\t\t\tlog.Errorf(\"Unable to load trace agent config: %s\", confErr)\n\t\t} else {\n\t\t\tcontext, cancel := context.WithCancel(context.Background())\n\t\t\ttc.Hostname = \"\"\n\t\t\ttc.SynchronousFlushing = true\n\t\t\ts.ta = agent.NewAgent(context, tc, telemetry.NewNoopCollector())\n\t\t\ts.spanModifier = &spanModifier{\n\t\t\t\tcoldStartSpanId: coldStartSpanId,\n\t\t\t\tlambdaSpanChan: lambdaSpanChan,\n\t\t\t}\n\n\t\t\ts.ta.ModifySpan = s.spanModifier.ModifySpan\n\t\t\ts.ta.DiscardSpan = filterSpanFromLambdaLibraryOrRuntime\n\t\t\ts.cancel = cancel\n\t\t\tgo s.ta.Run()\n\t\t}\n\t}\n}",
"func (t *Tracer) TLSHandshakeStart() {\n\tatomic.CompareAndSwapInt64(&t.tlsHandshakeStart, 0, now())\n}",
"func start() {\n\taddDefaultSector()\n}",
"func StartTest(testName string) {\n\tlog.Println(\"\")\n\tlog.Println(\"\")\n\tpc, file, line, _ := runtime.Caller(1)\n\n\tfullPCName := runtime.FuncForPC(pc).Name()\n\tlastIndexOfPc := strings.LastIndex(fullPCName, \"/\") + 1\n\tjustPcName := fullPCName[lastIndexOfPc:len(fullPCName)]\n\n\tlastIndexOfFile := strings.LastIndex(file, \"/\") + 1\n\tjustFileName := file[lastIndexOfFile:len(file)]\n\n\t//log.Printf(\"INFO [%s:%d] [%s] %v\", justFileName, line, justPcName, msg)\n\tlog.Printf(\"***START [%s:%d] [%s] %v\", justFileName, line, justPcName, testName)\n\n\t//log.Printf(\"***START \" + testName + \" [%s:%d] [%s] %v\", justFileName, line, justPcName, msg))\n\tlog.Println(\"\")\n}",
"func Trace() {\n\tfn, _, _, _ := runtime.Caller(1)\n\tname := runtime.FuncForPC(fn).Name()\n\tname = filepath.Base(name)\n\tlog(traceType, name+\"()\")\n}",
"func (s *Stopwatch) Start() {\n\ts.t = time.Now()\n}",
"func (l *Logger) Trace(name, file string, line int) {\r\n\tl.timeReset()\r\n\tdefer l.timeLog(name)\r\n}",
"func (rf *Raft) Start(command interface{}) (int, int, bool) {\n\t// Your code here (2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tif rf.killed() || rf.state != LEADER {\n\t\treturn len(rf.log), rf.currentTerm, false\n\t}\n\n\trf.log = append(rf.log, Log{rf.currentTerm, command})\n\trf.persist()\n\n\treturn len(rf.log) - 1, rf.currentTerm, true\n}",
"func (t *Trace) Started() bool {\n\treturn !t.Start.IsZero()\n}",
"func (s *SafeTestingTBOnStart) Start(t testing.TB) error {\n\ts.SetTestingTB(t)\n\treturn nil\n}",
"func (c *EntityStats) OnStart() {\n\tengosdl.Logger.Trace().Str(\"component\", \"entity-stats\").Str(\"entity-stats\", c.GetName()).Msg(\"OnStart\")\n\tc.Component.OnStart()\n}",
"func (s *BaseDiceListener) EnterStart(ctx *StartContext) {}",
"func StartSpan(name string) {\n\tif tracer == nil {\n\t\treturn\n\t}\n\ttracer.StartSpan(name)\n}",
"func (ssr *SpanRecorder) OnStart(span *Span) {\n\tssr.startedMu.Lock()\n\tdefer ssr.startedMu.Unlock()\n\tssr.started = append(ssr.started, span)\n}",
"func (rf *Raft) Start(command interface{}) (int, int, bool) {\n\tindex := -1\n\tterm := -1\n\tisLeader := false\n\n\t// Your code here (2B).\n\trf.lock(\"Start\")\n\t// term, isLeader = rf.GetState()\n\tisLeader = rf.state == LEADER\n\tterm = rf.currentTerm\n\tif isLeader {\n\t\t// rf.lock(\"Start\")\n\t\tindex = len(rf.log)\n\t\trf.log = append(rf.log, LogEntry{LogTerm: term, Command: command})\n\n\t\trf.matchIndex[rf.me] = index\n\t\trf.nextIndex[rf.me] = index + 1\n\n\t\trf.persist()\n\t\t// rf.unlock(\"Start\")\n\t\tDPrintf(\"Leader[%v]提交Start日志,它的LogTerm是[%v]和LogCommand[%v]\\n\", rf.me,\n\t\t\tterm, command)\n\t}\n\trf.unlock(\"Start\")\n\treturn index, term, isLeader\n}",
"func main() {\n\t// 创建trace文件\n\tf, err := os.Create(\"trace.out\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer f.Close()\n\n\t// 启动trace goroutine\n\terr = trace.Start(f)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer trace.Stop()\n\n\tc := make(chan int, 1)\n\tc <- 1\n\n\t// main\n\tfmt.Println(\"Hello trace\")\n}",
"func (r *Reporter) Start(_ context.Context) error {\n\treturn nil\n}",
"func (c *CompositeTracer) BeginTrace(correlationId string, component string, operation string) *TraceTiming {\n\treturn NewTraceTiming(correlationId, component, operation, c)\n}",
"func (s *StopWatch) Start() {\n if !s.running {\n s.start = time.Now()\n s.running = true\n }\n}",
"func Start(cfg Config) error {\n\texporter, err := jaeger.NewExporter(jaeger.Options{\n\t\tCollectorEndpoint: cfg.ReporterURI,\n\t\tAgentEndpoint: cfg.LocalEndpoint,\n\t\tProcess: jaeger.Process{\n\t\t\tServiceName: cfg.ServiceName,\n\t\t},\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttrace.RegisterExporter(exporter)\n\ttrace.ApplyConfig(trace.Config{\n\t\tDefaultSampler: trace.ProbabilitySampler(cfg.Probability),\n\t})\n\n\treturn nil\n}",
"func (e *humioTracesExporter) start(_ context.Context, host component.Host) error {\n\tclient, err := e.getClient(e.cfg, e.settings, host)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\te.client = client\n\n\treturn nil\n}",
"func (s *BaseGShellListener) EnterStart(ctx *StartContext) {}",
"func (l *LogAnalyticsInput) Start() error {\n\tl.Handler = l.handleBatchedEvents\n\treturn l.StartConsumers(context.Background())\n}",
"func resetStart() {\n\tstart = time.Now()\n}",
"func (nc *NodeCaller) Start() {\n\tnc.caller.Start()\n}",
"func (r *Handler) Started(id uint64) {\n\tr.id = id\n\tlog.V(1).Info(\n\t\t\"event: started.\",\n\t\t\"id\",\n\t\tr.id)\n}",
"func (br *BandwidthMeter) Start() {\n br.start = time.Now().UTC()\n}",
"func (l Mylog) Trace(ctx context.Context, begin time.Time, fc func() (string, int64), err error) {\n\telapsed := time.Now().Sub(begin)\n\ts, _ := json.Marshal(&ctx)\n\tl.Info(ctx, string(s))\n\tif err != nil {\n\t\tsql, rows := fc()\n\t\tl.ServiceLog.Error(ctx, utils.FileWithLineNum(), err, float64(elapsed.Nanoseconds())/1e6, rows, sql)\n\t} else {\n\t\tsql, rows := fc()\n\t\tl.ServiceLog.Info(utils.FileWithLineNum(), float64(elapsed.Nanoseconds())/1e6, rows, sql)\n\t}\n}",
"func (tm *TeamMonitoring) Start() {\n\tgocron.Every(1).Day().At(tm.Config.ReportTime).Do(tm.RevealRooks)\n}",
"func (rf *Raft) Start(command interface{}) (int, int, bool) {\n\tindex := -1\n\tterm := -1\n\tisLeader := false\n\n\t// Your code here (2B).\n\trf.mu.Lock()\n\tif !rf.killed() && rf.getState() == Leader {\n\t\tisLeader = true\n\t\tlastEntry := rf.getLastLog()\n\t\tindex = lastEntry.Index + 1\n\t\tterm = rf.currentTerm\n\t\tnewEntry := LogEntry{\n\t\t\tTerm: term,\n\t\t\tIndex: index,\n\t\t\tCommand: command,\n\t\t}\n\t\t//DPrintf(\"peer=%v start command=%+v\", rf.me, newEntry)\n\t\trf.logEntries = append(rf.logEntries, newEntry)\n\t}\n\trf.mu.Unlock()\n\treturn index, term, isLeader\n}",
"func (m *MockThresholdNotifier) Start() {\n\tm.Called()\n}",
"func (st *buildStatus) start() {\n\tsetStatus(st.builderRev, st)\n\tgo func() {\n\t\terr := st.build()\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(st, \"\\n\\nError: %v\\n\", err)\n\t\t\tlog.Println(st.builderRev, \"failed:\", err)\n\t\t}\n\t\tst.setDone(err == nil)\n\t\tst.buildRecord().put()\n\t\tmarkDone(st.builderRev)\n\t}()\n}",
"func (i *instrumentor) tracingBegin(ctx context.Context, eventJSON []byte) (context.Context, trace.Span) {\n\t// Add trace id to context\n\tmc := i.configuration.EventToCarrier(eventJSON)\n\tctx = i.configuration.Propagator.Extract(ctx, mc)\n\n\tvar span trace.Span\n\tspanName := os.Getenv(\"AWS_LAMBDA_FUNCTION_NAME\")\n\n\tvar attributes []attribute.KeyValue\n\tlc, ok := lambdacontext.FromContext(ctx)\n\tif !ok {\n\t\terrorLogger.Println(\"failed to load lambda context from context, ensure tracing enabled in Lambda\")\n\t}\n\tif lc != nil {\n\t\tctxRequestID := lc.AwsRequestID\n\t\tattributes = append(attributes, semconv.FaaSExecution(ctxRequestID))\n\n\t\t// Some resource attrs added as span attrs because lambda\n\t\t// resource detectors are created before a lambda\n\t\t// invocation and therefore lack lambdacontext.\n\t\t// Create these attrs upon first invocation\n\t\tif len(i.resAttrs) == 0 {\n\t\t\tctxFunctionArn := lc.InvokedFunctionArn\n\t\t\tattributes = append(attributes, semconv.FaaSID(ctxFunctionArn))\n\t\t\tarnParts := strings.Split(ctxFunctionArn, \":\")\n\t\t\tif len(arnParts) >= 5 {\n\t\t\t\tattributes = append(attributes, semconv.CloudAccountID(arnParts[4]))\n\t\t\t}\n\t\t}\n\t\tattributes = append(attributes, i.resAttrs...)\n\t}\n\n\tctx, span = i.tracer.Start(ctx, spanName, trace.WithSpanKind(trace.SpanKindServer), trace.WithAttributes(attributes...))\n\n\treturn ctx, span\n}",
"func Start(text string) {\n\tfile, issue := os.OpenFile(\"log/access.log\", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)\n\tif issue != nil {\n\t\tlog.Printf(\"Error opening file: %v\", issue)\n\t\treturn\n\t}\n\n\tlog.SetOutput(file)\n\tlog.Println(text)\n\terr := file.Close()\n\tErrorHandler(err)\n\treturn\n}",
"func Trace(msg string) func() {\n\tstart := time.Now()\n\tlog.Printf(\"enter %s\", msg)\n\treturn func() { log.Printf(\"exit %s (%s)\", msg, time.Since(start)) }\n}",
"func (a *Agent) start() {\n\ta.initAPI()\n\tnb := 0\n\tfor {\n\t\ta.updateStreams()\n\t\tnb++\n\t\tif nb == 10 {\n\t\t\tlog.Printf(\"Sent %d logs and %d metrics on the last %d seconds\\n\", a.nbLogs, a.nbMetrics, nb*conf.period)\n\t\t\tnb = 0\n\t\t\ta.nbLogs = 0\n\t\t\ta.nbMetrics = 0\n\t\t}\n\t\ttime.Sleep(time.Duration(conf.period) * time.Second)\n\t}\n}",
"func (b *Basic) Start(target Target, rw RecordWriter, filter Filter, formatter Formatter, maxQueued int) {\n\tif filter == nil {\n\t\tfilter = &StdFilter{Lvl: Fatal}\n\t}\n\tif formatter == nil {\n\t\tformatter = &DefaultFormatter{}\n\t}\n\n\tb.target = target\n\tb.filter = filter\n\tb.formatter = formatter\n\tb.in = make(chan *LogRec, maxQueued)\n\tb.done = make(chan struct{}, 1)\n\tb.w = rw\n\tgo b.start()\n\n\tif b.hasMetrics() {\n\t\tgo b.startMetricsUpdater()\n\t}\n}",
"func (_m *MockHistoryEngine) Start() {\n\t_m.Called()\n}",
"func (z *ZapLogger) Trace(args ...interface{}) {\n\tz.Debug(args)\n}",
"func (zw *zerologWrapper) Trace(ctx context.Context, format string, args ...interface{}) {\n\tnewEntry(zw, false, zw.cfg.staticFields).Trace(ctx, format, args...)\n}",
"func (t *Tracer) StartTransaction(name, transactionType string) *Transaction {\n\ttx := t.newTransaction(name, transactionType)\n\ttx.Timestamp = time.Now()\n\treturn tx\n}",
"func (l *LoggerInstance) StartMessage(msg string) {\n\tl.DefaultContext().Info().Msg(fmt.Sprintf(\"%s: %s\", msg, \"START\"))\n}",
"func (rf *Raft) Start(command interface{}) (int, int, bool) {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tif rf.serverState != Leader {\n\t\treturn -1, rf.currentTerm, false\n\t}\n\tentryIndex := rf.lastIncludedIndex + len(rf.log)\n\tentry := LogEntry{\n\t\tTerm: rf.currentTerm,\n\t\tCommand: command}\n\trf.log = append(rf.log, entry)\n\trf.nextIndex[rf.me] = entryIndex + 1\n\trf.matchIndex[rf.me] = entryIndex\n\treturn entryIndex, rf.currentTerm, rf.serverState == Leader\n}",
"func StartTime() {\n\tstart = time.Now()\n}",
"func (el *ZapEventLogger) EventBegin(ctx context.Context, event string, metadata ...Loggable) *EventInProgress {\n\tctx = el.Start(ctx, event)\n\n\tfor _, m := range metadata {\n\t\tfor l, v := range m.Loggable() {\n\t\t\tel.LogKV(ctx, l, v)\n\t\t}\n\t}\n\n\teip := &EventInProgress{}\n\teip.doneFunc = func(additional []Loggable) {\n\t\t// anything added during the operation\n\t\t// e.g. deprecated methods event.Append(...) or event.SetError(...)\n\t\tfor _, m := range eip.loggables {\n\t\t\tfor l, v := range m.Loggable() {\n\t\t\t\tel.LogKV(ctx, l, v)\n\t\t\t}\n\t\t}\n\t\tel.Finish(ctx)\n\t}\n\treturn eip\n}",
"func (rf *Raft) Start(command interface{}) (int, int, bool) {\n\tterm := -1\n\tisLeader := true\n\n\t// Your code here (2B).\n\tterm, isLeader = rf.GetState()\n\n\tif !isLeader {\n\t\treturn -1, term, isLeader\n\t}\n\n\trf.Lock()\n\tdefer rf.Unlock()\n\tnextIndex := func() int {\n\t\tif len(rf.log) > 0 {\n\t\t\treturn rf.log[len(rf.log)-1].Index + 1\n\t\t}\n\t\treturn Max(1, rf.lastSnapshotIndex+1)\n\t}()\n\n\tentry := LogEntry{Index: nextIndex, Term: rf.currentTerm, Command: command}\n\trf.log = append(rf.log, entry)\n\t//RaftInfo(\"New entry appended to leader's log: %s\", rf, entry)\n\n\treturn nextIndex, term, isLeader\n}",
"func TestTrace(t *testing.T) {\n\tvar data = []byte(`Log this!`)\n\tapolog.Trace(data)\n}",
"func (i *CmdLine) PrintStart() {\n\tfmt.Printf(\"Ok, es geht los!\")\n}",
"func (rf *Raft) Start(command interface{}) (int, int, bool) {\n\t// Your code here (2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tindex := -1\n\tterm := rf.currentTerm\n\tisLeader := rf.state == Leader\n\n\tif isLeader {\n\t\tindex = rf.getLastLogIdx() + 1\n\t\tnewLog := Log{\n\t\t\trf.currentTerm,\n\t\t\tcommand,\n\t\t}\n\t\trf.logs = append(rf.logs, newLog)\n\t\trf.persist()\n\t\trf.broadcastHeartbeat()\n\t}\n\treturn index, term, isLeader\n}",
"func (rf *Raft) Start(command interface{}) (int, int, bool) {\n\n\t//Check if I am the leader\n\t//if false --> return\n\t//If true -->\n\t// 1. Add to my log\n\t// 2. Send heart beat/Append entries to other peers\n\t//Check your own last log index and 1 to it.\n\t//Let other peers know that this is the log index for new entry.\n\n\t// we need to modify the heart beat mechanism such that it sends entries if any.\n\tindex := -1\n\tterm := -1\n\t//Otherwise prepare the log entry from the given command.\n\t// Your code here (2B).\n\t///////\n\tterm, isLeader :=rf.GetState()\n\tif isLeader == false {\n\t\treturn index,term,isLeader\n\t}\n\tterm = rf.currentTerm\n\tindex = rf.commitIndex\n\trf.sendAppendLogEntries(command)\n\treturn index, term, isLeader\n}",
"func (l *Logger) Trace(message string, args ...interface{}) { l.Log(Trace, message, args...) }",
"func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanStartOption) (context.Context, trace.Span) {\n\tconfig := trace.NewSpanStartConfig(options...)\n\n\tif ctx == nil {\n\t\t// Prevent trace.ContextWithSpan from panicking.\n\t\tctx = context.Background()\n\t}\n\n\t// For local spans created by this SDK, track child span count.\n\tif p := trace.SpanFromContext(ctx); p != nil {\n\t\tif sdkSpan, ok := p.(*recordingSpan); ok {\n\t\t\tsdkSpan.addChild()\n\t\t}\n\t}\n\n\ts := tr.newSpan(ctx, name, &config)\n\tif rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() {\n\t\tsps, _ := tr.provider.spanProcessors.Load().(spanProcessorStates)\n\t\tfor _, sp := range sps {\n\t\t\tsp.sp.OnStart(ctx, rw)\n\t\t}\n\t}\n\tif rtt, ok := s.(runtimeTracer); ok {\n\t\tctx = rtt.runtimeTrace(ctx)\n\t}\n\n\treturn trace.ContextWithSpan(ctx, s), s\n}",
"func (t *TXNTrigger) Start() error {\n\treturn nil\n}",
"func (rf *Raft) Start(command interface{}) (int, int, bool) {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tindex := -1\n\tterm := rf.currentTerm\n\tisLeader := rf.state == Leader\n\n\t// Your code here (2B).\n\tif isLeader {\n\n\t\tDPrintf(\"i am leader %v, and i send command %v\", rf.me, command)\n\n\t\tindex = rf.getLastLogIndex() + 1\n\t\tnewLog := Log{\n\t\t\tTerm: rf.currentTerm,\n\t\t\tCommand: command,\n\t\t}\n\t\trf.log = append(rf.log, newLog)\n\t\trf.persist()\n\t\t//fmt.Println(\"i am leader,\", rf.me)\n\t}\n\treturn index, term, isLeader\n}",
"func (rf *Raft) Start(command interface{}) (int, int, bool) {\n\tindex := -1\n\tterm := -1\n\tisLeader := true\n\n\t// Your code here (2B).\n\trf.lock(\"Start\")\n\tdefer rf.unlock(\"Start\")\n\tisLeader = rf.state == Leader\n\tterm = rf.currentTerm\n\tif isLeader {\n\t\trf.logs = append(rf.logs, LogEntry{\n\t\t\tTerm: rf.currentTerm,\n\t\t\tIndex: rf.getAbsoluteLogIndex(len(rf.logs)),\n\t\t\tCommand: command,\n\t\t})\n\t\tindex = rf.getAbsoluteLogIndex(len(rf.logs) - 1)\n\t\trf.matchIndex[rf.me] = index\n\t\trf.nextIndex[rf.me] = index + 1\n\t\trf.persist()\n\t\tDPrintf(\"Server %v start an command to be appended to Raft's log, log's last term is %v, index is %v, log length is %v\",\n\t\t\trf.me, rf.logs[len(rf.logs) - 1].Term, rf.logs[len(rf.logs) - 1].Index, len(rf.logs))\n\t}\n\n\treturn index, term, isLeader\n}",
"func (z *ZapLogWrapper) Trace(args ...interface{}) {\n\tz.l.Trace(args...)\n}",
"func makeStartLine(formatter logFormatter, format string, args ...interface{}) *buffer {\n\tentry := makeUnstructuredEntry(\n\t\tcontext.Background(),\n\t\tseverity.UNKNOWN, /* header - ignored */\n\t\t0, /* header - ignored */\n\t\t2, /* depth */\n\t\ttrue, /* redactable */\n\t\tformat,\n\t\targs...)\n\tentry.header = true\n\tentry.tags = configTagsBuffer\n\treturn formatter.formatEntry(entry)\n}",
"func Trace(args ...interface{}) {\n\tLogger.Trace(args...)\n}"
] | [
"0.70481753",
"0.67946726",
"0.6792258",
"0.6780392",
"0.6672557",
"0.65980446",
"0.6561377",
"0.65486723",
"0.64368075",
"0.6423788",
"0.6393507",
"0.638214",
"0.6349291",
"0.62269604",
"0.6202975",
"0.62012595",
"0.6176968",
"0.6169051",
"0.61496973",
"0.61337566",
"0.61204714",
"0.61204714",
"0.60625494",
"0.6060585",
"0.6009836",
"0.6003557",
"0.600163",
"0.6001094",
"0.599879",
"0.59903127",
"0.59855646",
"0.5977496",
"0.5958968",
"0.59095216",
"0.59033036",
"0.59023947",
"0.5887653",
"0.5860836",
"0.5855761",
"0.58540213",
"0.5793039",
"0.5792958",
"0.57912564",
"0.5788376",
"0.57654136",
"0.5751919",
"0.57446206",
"0.573996",
"0.5732638",
"0.57229567",
"0.5721436",
"0.5720462",
"0.57181686",
"0.57072794",
"0.5704987",
"0.5692197",
"0.5689255",
"0.5687258",
"0.56822634",
"0.56818295",
"0.5679025",
"0.5676684",
"0.5670229",
"0.5659731",
"0.5658896",
"0.5653229",
"0.5648343",
"0.5633026",
"0.5622191",
"0.5620036",
"0.55896455",
"0.5587277",
"0.55858845",
"0.55842304",
"0.5583626",
"0.55827653",
"0.5580925",
"0.55783224",
"0.55766654",
"0.55684924",
"0.5567867",
"0.55639803",
"0.55493253",
"0.55478305",
"0.55436105",
"0.55343527",
"0.5530628",
"0.5514673",
"0.55106175",
"0.55037427",
"0.55023116",
"0.54969025",
"0.54792595",
"0.54698825",
"0.54680353",
"0.5467483",
"0.54621345",
"0.54606247",
"0.5454885",
"0.54503447"
] | 0.6492113 | 8 |
Stop logs stop of the trace | func (t *Tracer) Stop() *Tracer {
log.Debugf("Tracer completed %v in %v.", t.Description, time.Since(t.Started))
return t
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (t *Tracer) Stop() {}",
"func Stop() {\n\tinternal.SetGlobalTracer(&internal.NoopTracer{})\n\tlog.Flush()\n}",
"func (o *influxDBLogger) stop() error {\n\treturn nil\n}",
"func (t *Tracer) Stop() {\n\tt.close()\n}",
"func (t *Tracer) Stop() {\n\tt.close()\n}",
"func (t *tracer) Stop() {\n\tt.stopOnce.Do(func() {\n\t\tclose(t.stop)\n\t\tt.config.statsd.Incr(\"datadog.tracer.stopped\", nil, 1)\n\t})\n\tt.stats.Stop()\n\tt.wg.Wait()\n\tt.traceWriter.stop()\n\tt.config.statsd.Close()\n\tappsec.Stop()\n}",
"func (lt *Logtailer) Stop() {\n\tclose(lt.shutdown)\n}",
"func (rf *Raft) Stop() {\n\trf.logger.SetOutput(ioutil.Discard)\n}",
"func (s *ServerlessTraceAgent) Stop() {\n\tif s.cancel != nil {\n\t\ts.cancel()\n\t}\n}",
"func (zl *ZapLogger) Stop() error {\n\treturn zl.logger.Sync()\n}",
"func (l *LogFlow) Stop() {\n\tl.output.StopOutput()\n\tif l.State != Running {\n\t\tl.log.Println(\"LogFlow not running.\")\n\t\treturn\n\t}\n\tgo func() {\n\t\tl.quittimer <- true\n\t}()\n}",
"func (container *ContainerLog) Stop() {\n\tif container.LogCopier != nil {\n\t\tcontainer.LogCopier.Close()\n\t}\n\tcontainer.since = time.Now()\n\tvar containerLogStop = true\n\tcontainer.stoped = &containerLogStop\n\tlogrus.Debugf(\"kato logger stop for container %s\", container.Name)\n}",
"func (er *BufferedExchangeReporter) Stop() {\n\n}",
"func (l *loggingT) StopLogstash() {\n\tl.logstashStop <- true\n}",
"func (r *RunCommand) stop() {\n\tr.logTail.Stop()\n\tr.pw.Stop()\n}",
"func (h *LogzioHook) Stop() {\n\th.s.Stop()\n}",
"func (r *Reporter) Stop() {\n\tr.elapsed = time.Since(r.startAt)\n\tr.Report()\n\tos.Exit(0)\n}",
"func (a *LogAgent) Stop() (err error) {\n\ta.stopOnce.Do(func() {\n\t\terr = a.pipeline.Stop()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t})\n\treturn\n}",
"func (w *TraceWriter) Stop() {\n\tclose(w.exit)\n\tw.exitWG.Wait()\n\tw.BaseWriter.Stop()\n}",
"func (this *Reporter) Stop() {\n\tthis.Status = REPORT_STATUS_STOP\n}",
"func (obj *ChannelMessageTracer) Stop() {\n\t//logger.Log(fmt.Sprint(\"Entering ChannelMessageTracer:Stop ...\"))\n\tif obj.isRunning {\n\t\t// Finish / Flush any remaining processing\n\t\ttraceFileWithSuffix := fmt.Sprintf(\"%s.%d\", obj.traceFileName, obj.currentSuffix)\n\t\tflag, _, _ := exists(traceFileWithSuffix)\n\t\tif flag {\n\t\t\t_ = obj.traceFile.Sync() // Flush\n\t\t\t_ = obj.traceFile.Close() // Close FD\n\t\t\tobj.traceFile = nil\n\t\t}\n\t\tobj.isRunning = false\n\t}\n\t//logger.Log(fmt.Sprint(\"Returning ChannelMessageTracer:Stop ...\"))\n}",
"func (il *InstanceLogger) Stop() {\n\t// Closes the client and flushes the buffer to Stackdriver\n\tif il.client != nil {\n\t\til.client.Close()\n\t\til.client = nil\n\t} else if il.cancelFunc != nil {\n\t\til.cancelFunc()\n\t\til.cancelFunc = nil\n\t}\n}",
"func (e *EvtFailureDetector) Stop() {\n\te.stop <- struct{}{}\n}",
"func (f *FakeOutput) Stop() error { return nil }",
"func (tm *JournalTargetManager) Stop() {}",
"func (rs *RpcServer) Stop() {\n\tlogx.Close()\n}",
"func (e *Server) Stop() {\n\tlogx.Close()\n}",
"func (t *Tracker) Stop() {\n\tt.Finish = time.Now()\n\tt.Duration = time.Since(t.Run)\n}",
"func (tm *ServiceTracerouteManager) Stop() {\n\ttm.StopChan <- true\n}",
"func (s *EBPFSocketInfoEnhancer) Stop() {\n\ts.tracer.Stop()\n}",
"func (m *Manager) Stop() {\n\tdefer close(m.done)\n\n\tif m.shutdownFunc == nil {\n\t\treturn\n\t}\n\n\tif err := m.shutdownFunc(); err != nil {\n\t\tlevel.Error(m.logger).Log(\"msg\", \"failed to shut down the tracer provider\", \"err\", err)\n\t}\n\n\tlevel.Info(m.logger).Log(\"msg\", \"Tracing manager stopped\")\n}",
"func (p *noop) Stop() {}",
"func (t *TimerSnapshot) Stop() {}",
"func Stop(objects []*inject.Object, log Logger) error {\n\tlevels, err := levels(objects)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, level := range levels {\n\t\tfor _, o := range level {\n\t\t\tif stopperO, ok := o.Value.(Stopper); ok {\n\t\t\t\tif log != nil {\n\t\t\t\t\tlog.Debugf(\"stopping %s\", o)\n\t\t\t\t}\n\t\t\t\tif err := stopperO.Stop(); err != nil {\n\t\t\t\t\tif log != nil {\n\t\t\t\t\t\tlog.Errorf(\"error stopping %s: %s\", o, err)\n\t\t\t\t\t}\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tif closerO, ok := o.Value.(Closer); ok {\n\t\t\t\tif log != nil {\n\t\t\t\t\tlog.Debugf(\"closing %s\", o)\n\t\t\t\t}\n\t\t\t\tif err := closerO.Close(); err != nil {\n\t\t\t\t\tif log != nil {\n\t\t\t\t\t\tlog.Errorf(\"error closing %s: %s\", o, err)\n\t\t\t\t\t}\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func (fd *failureDetector) Stop() {\n\tfd.stop <- struct{}{}\n}",
"func (h *LinkerdInfo) Stop() {\n\th.log.Info(\"shutting down\")\n\tclose(h.stopCh)\n}",
"func (l *Logs) End() {\n\t// endTime := time.Now()\n\telapsed := time.Since(l.startTime)\n\tl.TraceF(\"%s took %+v\", l.funcName, elapsed)\n}",
"func (hsp HistoryServicePrecacher) Stop() { hsp.pc.Stop() }",
"func (r *Reporter) Stop(_ context.Context) error {\n\treturn nil\n}",
"func (c *haTracker) stop() {\n\tif c.cfg.EnableHATracker {\n\t\tc.cancel()\n\t\t<-c.done\n\t}\n}",
"func (w *StatsWriter) Stop() {\n\tw.stop <- struct{}{}\n\t<-w.stop\n\tstopSenders(w.senders)\n}",
"func (r *reaper) stop() {\n\tr.stopCh <- struct{}{}\n}",
"func (tc *TrafficCapture) Stop() {\n\ttc.Lock()\n\tdefer tc.Unlock()\n\n\ttc.writer.StopCapture()\n}",
"func (s *maxEPSSampler) Stop() {\n\ts.reportDone <- true\n\t<-s.reportDone\n\n\ts.rateCounter.Stop()\n}",
"func (margelet *Margelet) Stop() {\n\tmargelet.running = false\n}",
"func (l *Ledger) Stop(ctx context.Context) error {\n\treturn nil\n}",
"func (t *Biologist) Stop() {\n\tt.stopAnalysis()\n}",
"func Stop() {\n\tstopRunning <- true\n\n}",
"func Stop() {\n\ts.Stop()\n}",
"func (sl *ReceiverLoop) stop() {\n\tsl.cancel()\n\t<-sl.stopped\n}",
"func (s *syslogWatcher) Stop() {\n\ts.tomb.Stop()\n}",
"func (t *Timer) Stop() {\n\tmetrics.MeasureSince(strings.Split(t.path, \".\"), t.start)\n}",
"func (d *D) stop() {\n\tclose(d.stopCh)\n}",
"func (s *Sampler) Stop() {\n\ts.Backend.Stop()\n\tclose(s.exit)\n}",
"func (l *Logger) Stop() {\n\tclose(l.in)\n\tl.wg.Wait()\n}",
"func (s *StaticSpinner) Stop(msg string) {\n\ts.active = false\n\tfmt.Fprintln(s.w, msg)\n}",
"func (ns *EsIndexer) Stop() {\n\n}",
"func (s *Driver) Stop(force bool) error {\n\t// Then Logging Client might not be initialized\n\tif s.lc != nil {\n\t\ts.lc.Debug(fmt.Sprintf(\"Driver.Stop called: force=%v\", force))\n\t}\n\treturn nil\n}",
"func (l *LogAnalyticsInput) Stop() error {\n\treturn l.StopConsumers()\n}",
"func (_m *TimeTicker) Stop() {\n\t_m.Called()\n}",
"func (_m *MessageMain) Stop() {\n\t_m.Called()\n}",
"func (w *eventCollector) stop(t *testing.T) Events {\n\treturn w.stopWait(t, time.Second)\n}",
"func (f *framework) stop() {\n\tclose(f.epochChan)\n}",
"func (app *frame) Stop() {\n\tapp.isStopped = true\n}",
"func (manager *BarWriter) Stop() {\n\tmanager.stopChan <- struct{}{}\n}",
"func (b *BTCVanity) Stop() {\n\tb.stop <- true\n}",
"func (b *BTCVanity) Stop() {\n\tb.stop <- true\n}",
"func (s *SimpleDriver) Stop(force bool) error {\n\t// Then Logging Client might not be initialized\n\tif s.lc != nil {\n\t\ts.lc.Debug(fmt.Sprintf(\"SimpleDriver.Stop called: force=%v\", force))\n\t}\n\treturn nil\n}",
"func (f *Flame) Stop() {\n\tf.stop <- struct{}{}\n}",
"func (au Auditor) StopAudit() {\n\tclose(transChan)\n}",
"func (pt *panicTimer) stop() {\n\tif pt.t != nil {\n\t\tpt.t.Stop()\n\t\tpt.t = nil\n\t}\n}",
"func (s *ReporterImpl) Stop() {\n\ts.queue.Dispose()\n}",
"func stopUreadaheadTracing(ctx context.Context, cmd *testexec.Cmd) error {\n\tif cmd.ProcessState != nil {\n\t\t// Already stopped. Do nothing.\n\t\treturn nil\n\t}\n\n\ttesting.ContextLog(ctx, \"Sending interrupt signal to ureadahead tracing process\")\n\tif err := cmd.Process.Signal(os.Interrupt); err != nil {\n\t\treturn errors.Wrap(err, \"failed to send interrupt signal to ureadahead tracing\")\n\t}\n\n\tif err := cmd.Wait(); err != nil {\n\t\treturn errors.Wrap(err, \"failed to wait ureadahead tracing done\")\n\t}\n\n\treturn nil\n}",
"func (z *ZapLogWrapper) Trace(args ...interface{}) {\n\tz.l.Trace(args...)\n}",
"func (bt *Metricbeat) Stop() {\n\tclose(bt.done)\n}",
"func (s *schedule) stop() {\n\tif !s.running {\n\t\treturn\n\t}\n\ts.running = false\n\ts.stopCh <- struct{}{}\n}",
"func (rf *Raft) stop(timer *time.Timer) {\n\tif !timer.Stop() && len(timer.C) != 0 {\n\t\t<-timer.C\n\t}\n}",
"func (batchStats *BatchStatsReporter) Stop() {\n\tclose(batchStats.stopChan)\n}",
"func (tw *TimeWheel) Stop() {\n\ttw.stopFlag <- struct{}{}\n}",
"func (b *Batch) Stop() {\n\tb.cancelFunc()\n}",
"func (t *StandardTimer) Stop() {\n\tt.meter.Stop()\n}",
"func (r *RecordStream) Stop() {\n\tif r.state == running {\n\t\tr.c.c.Request(&proto.CorkRecordStream{StreamIndex: r.index, Corked: true}, nil)\n\t\tr.state = idle\n\t}\n}",
"func (s *samplerBackendRateCounter) Stop() {\n\tclose(s.exit)\n\t<-s.stopped\n}",
"func Stop() {\n\tinstance.stop()\n}",
"func (a *Recorder) Stop() error {\n\n\tif !a.IsRunning() {\n\t\treturn fmt.Errorf(\"Cannot stop recorder: Not running\")\n\t}\n\n\ta.shutdown()\n\treturn nil\n}",
"func (_m *MockCompactionPlanContext) stop() {\n\t_m.Called()\n}",
"func (tr *TaskRunner) stop() error {\n\tif tr.logger.IsTrace() {\n\t\tstart := time.Now()\n\t\ttr.logger.Trace(\"running stop hooks\", \"start\", start)\n\t\tdefer func() {\n\t\t\tend := time.Now()\n\t\t\ttr.logger.Trace(\"finished stop hooks\", \"end\", end, \"duration\", end.Sub(start))\n\t\t}()\n\t}\n\n\tvar merr multierror.Error\n\tfor _, hook := range tr.runnerHooks {\n\t\tpost, ok := hook.(interfaces.TaskStopHook)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tname := post.Name()\n\t\tvar start time.Time\n\t\tif tr.logger.IsTrace() {\n\t\t\tstart = time.Now()\n\t\t\ttr.logger.Trace(\"running stop hook\", \"name\", name, \"start\", start)\n\t\t}\n\n\t\treq := interfaces.TaskStopRequest{}\n\n\t\torigHookState := tr.hookState(name)\n\t\tif origHookState != nil {\n\t\t\t// Give the hook data provided by prestart\n\t\t\treq.ExistingState = origHookState.Data\n\t\t}\n\n\t\tvar resp interfaces.TaskStopResponse\n\t\tif err := post.Stop(tr.killCtx, &req, &resp); err != nil {\n\t\t\ttr.emitHookError(err, name)\n\t\t\tmerr.Errors = append(merr.Errors, fmt.Errorf(\"stop hook %q failed: %v\", name, err))\n\t\t}\n\n\t\t// Stop hooks cannot alter state and must be idempotent, so\n\t\t// unlike prestart there's no state to persist here.\n\n\t\tif tr.logger.IsTrace() {\n\t\t\tend := time.Now()\n\t\t\ttr.logger.Trace(\"finished stop hook\", \"name\", name, \"end\", end, \"duration\", end.Sub(start))\n\t\t}\n\t}\n\n\treturn merr.ErrorOrNil()\n}",
"func (a *appsec) stop() {\n\ta.unregisterWAF()\n\ta.limiter.Stop()\n}",
"func (dt *discoveryTool) stop() {\n\tclose(dt.done)\n\n\t//Shutdown timer\n\ttimer := time.NewTimer(time.Second * 3)\n\tdefer timer.Stop()\nL:\n\tfor { //Unblock go routine by reading from dt.dataChan\n\t\tselect {\n\t\tcase <-timer.C:\n\t\t\tbreak L\n\t\tcase <-dt.dataChan:\n\t\t}\n\t}\n\n\tdt.wg.Wait()\n}",
"func Stop() {\n\t// /bin/dbus-send --system --dest=org.ganesha.nfsd --type=method_call /org/ganesha/nfsd/admin org.ganesha.nfsd.admin.shutdown\n}",
"func (_m *MarkerConsumer) Stop() {\n\t_m.Called()\n}",
"func (s *Server) Stop(log *logrus.Entry) {\n\ts.gracefulServer.Shutdown(context.Background())\n}",
"func (e *Exporter) Stop() {\n\te.statsExporter.stop()\n\te.traceExporter.stop()\n}",
"func (NilTimer) Stop() {}",
"func (w *Watcher) Stop() { w.streamer.Stop() }",
"func (app *App) Stop() {}",
"func (p *Bar) Stop() {\n\tp.Output.Write([]byte(\"\\n\"))\n}",
"func (i2c I2C) stop() {\n\t// Send stop condition.\n\tavr.TWCR.Set(avr.TWCR_TWEN | avr.TWCR_TWINT | avr.TWCR_TWSTO)\n\n\t// Wait for stop condition to be executed on bus.\n\tfor !avr.TWCR.HasBits(avr.TWCR_TWSTO) {\n\t}\n}",
"func (c *Check) Stop() { close(c.stopCh) }",
"func (c *collector) Stop() {\n\tclose(c.stop)\n}"
] | [
"0.79674745",
"0.71204233",
"0.69399786",
"0.69265217",
"0.69265217",
"0.6831262",
"0.68158007",
"0.6786613",
"0.6785409",
"0.6773118",
"0.67459726",
"0.6679901",
"0.6633648",
"0.6599372",
"0.6574594",
"0.65598506",
"0.65119165",
"0.6508982",
"0.6487301",
"0.6478834",
"0.6415724",
"0.63562024",
"0.6325252",
"0.6318995",
"0.6309667",
"0.6305349",
"0.62550336",
"0.6213924",
"0.61543995",
"0.6113866",
"0.6105435",
"0.61041653",
"0.60884523",
"0.60831505",
"0.6069174",
"0.6053666",
"0.60459113",
"0.60364074",
"0.6030196",
"0.60241854",
"0.6020847",
"0.6008955",
"0.5998701",
"0.5998676",
"0.59767175",
"0.5968566",
"0.59666115",
"0.5964502",
"0.5962842",
"0.595654",
"0.59542423",
"0.5952726",
"0.59411645",
"0.5931897",
"0.5927446",
"0.59273994",
"0.5917067",
"0.5906212",
"0.5900666",
"0.5900583",
"0.5891241",
"0.58882546",
"0.58727014",
"0.5872442",
"0.58714086",
"0.5849802",
"0.5849802",
"0.5847647",
"0.58427465",
"0.58419853",
"0.58399856",
"0.583392",
"0.5833267",
"0.5822804",
"0.58128935",
"0.5811789",
"0.58095294",
"0.58006126",
"0.5770425",
"0.576629",
"0.57623804",
"0.5759299",
"0.57525635",
"0.57410866",
"0.57394713",
"0.5738204",
"0.5738194",
"0.5735979",
"0.5734859",
"0.57176346",
"0.570788",
"0.5704595",
"0.5703307",
"0.5703033",
"0.56953835",
"0.568952",
"0.5689185",
"0.5688237",
"0.5686755",
"0.56838447"
] | 0.6180919 | 28 |
ThisFunction returns calling function name | func ThisFunction() string {
var pc [32]uintptr
runtime.Callers(2, pc[:])
return runtime.FuncForPC(pc[0]).Name()
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func CurrentFunctionName() string {\n\tpc, _, _, _ := runtime.Caller(1)\n\tfuncname := path.Base(runtime.FuncForPC(pc).Name())\n\treturn funcname\n}",
"func funcName() string {\n\tpc, _, _, _ := runtime.Caller(1)\n\treturn runtime.FuncForPC(pc).Name()\n}",
"func ThisFunc() *runtime.Func {\n\tpc, _, _, _ := runtime.Caller(1)\n\treturn runtime.FuncForPC(pc)\n}",
"func myCaller() string {\n\t// Skip GetCallerFunctionName and the function to get the caller of\n\treturn getFrame(2).Function\n}",
"func funcName() string {\n\tpc, _, _, _ := runtime.Caller(1)\n\tcompleteFuncname := runtime.FuncForPC(pc).Name()\n\treturn strings.Split(completeFuncname, \".\")[len(strings.Split(completeFuncname, \".\"))-1]\n}",
"func callerName() (caller string) {\n\tpc, _, _, ok := runtime.Caller(2) // 0: function-self, 1: parent function caller\n\tif !ok {\n\t\tcaller = \"#\"\n\t} else {\n\t\tpath := runtime.FuncForPC(pc).Name()\n\t\titems := strings.Split(path, \".\")\n\t\tcaller = items[len(items)-1]\n\t\tif len(caller) == 0 {\n\t\t\tcaller = path\n\t\t}\n\t}\n\treturn caller\n}",
"func (logProxy *loggerProxy)getCallerName() string{\n pc := make([]uintptr, 1)\n //Skipping the functions that are part of loggerProxy to get right caller.\t\n runtime.Callers(4, pc)\n f := runtime.FuncForPC(pc[0])\n return f.Name()\n}",
"func getCallerFuncName() (callerFuncName string) {\n\tpc, _, _, _ := runtime.Caller(2)\n\tdetails := runtime.FuncForPC(pc)\n\treturn details.Name()\n}",
"func (m Function) Name() string {\n\treturn m.name\n}",
"func GetMyCaller() string {\n\t_, filename, _, _ := runtime.Caller(1)\n\treturn filename\n}",
"func GetCallingFunction() string {\n\tfpcs := make([]uintptr, 1)\n\n\tn := runtime.Callers(3, fpcs)\n\tif n == 0 {\n\t\treturn \"n/a\"\n\t}\n\n\tfun := runtime.FuncForPC(fpcs[0] - 1)\n\tif fun == nil {\n\t\treturn \"n/a\"\n\t}\n\n\tnameParts := strings.Split(fun.Name(), \".\")\n\n\treturn nameParts[len(nameParts)-1]\n}",
"func GetFuncName() string {\n\tpc, _, _, _ := runtime.Caller(1)\n\treturn runtime.FuncForPC(pc).Name()\n}",
"func GetFunctionName() string {\n\tpc, _, _, _ := runtime.Caller(1)\n\tfullName := runtime.FuncForPC(pc).Name()\n\tparts := strings.Split(fullName, \".\")\n\treturn parts[len(parts)-1]\n}",
"func funcName() string {\n\t// Skip 2 levels to get the caller.\n\tpc, _, _, ok := runtime.Caller(depth)\n\tif !ok {\n\t\tfmt.Println(\"MSG: NO CALLER\")\n\t\treturn \"\"\n\t}\n\n\t// get the function caller.\n\tcaller := runtime.FuncForPC(pc)\n\tif caller == nil {\n\t\tfmt.Println(\"MSG CALLER WAS NIL\")\n\t}\n\n\t// remove extra file path characters.\n\tr := regexp.MustCompile(`[^\\/]+$`)\n\treturn fmt.Sprintf(\"%s\", r.FindString(caller.Name()))\n}",
"func getFunctionName(fn interface{}) string {\n\tname := runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name()\n\n\t// Method above returns functions in the form : main.foo\n\tparts := strings.Split(name, \".\")\n\treturn parts[len(parts)-1]\n}",
"func callerSource() string {\n\tpc, file, line, success := runtime.Caller(2)\n\tif !success {\n\t\tfile = \"<unknown>\"\n\t\tline = 0\n\t}\n\tfile = path.Base(file)\n\tname := runtime.FuncForPC(pc).Name()\n\tname = strings.TrimPrefix(name, \"github.com/minio/minio/cmd.\")\n\treturn fmt.Sprintf(\"[%s:%d:%s()]\", file, line, name)\n}",
"func _getFuncName() string {\n var ptrVal = _getFuncPtrVal()\n\n // Look up the function object\n var fun = runtime.FuncForPC(ptrVal)\n\n // returns: _/home/user/src/gohavenet/src.TestProcMgr_False\n var funcNamePath = fun.Name()\n\n // Split on the slash and return just the func name\n var pathElems = strings.Split(funcNamePath, \"/\")\n var index = len(pathElems) - 1\n if index < 0 {\n index = 0\n }\n\n return pathElems[index]\n}",
"func getFunctionName(i interface{}) string {\n\treturn runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()\n}",
"func This() string {\n\treturn this\n}",
"func _caller(n int) string {\n\tif pc, _, _, ok := runtime.Caller(n); ok {\n\t\tfns := strings.Split(runtime.FuncForPC(pc).Name(), \"/\")\n\t\treturn fns[len(fns)-1]\n\t}\n\n\treturn \"unknown\"\n}",
"func (p Person) FirstNameCaller() string {\n\treturn p.firstName\n}",
"func funcName(fn interface{}) string {\n\tfnV := reflect.ValueOf(fn)\n\tif fnV.Kind() != reflect.Func {\n\t\treturn \"n/a\"\n\t}\n\n\tfunction := runtime.FuncForPC(fnV.Pointer()).Name()\n\treturn fmt.Sprintf(\"%s()\", function)\n}",
"func funcName(f interface{}) string {\n\tfi := ess.GetFunctionInfo(f)\n\treturn fi.Name\n}",
"func FuncName() string {\n\tp := make([]uintptr, 1)\n\truntime.Callers(2, p)\n\tfullname := runtime.FuncForPC(p[0]).Name()\n\n\t_, name := path.Split(fullname)\n\treturn name\n}",
"func (s Stack) FirstFunction() string {\n\treturn s.firstFunction\n}",
"func (f *Function) Name() string {\n\treturn f.name\n}",
"func getFunctionName(fn interface{}) string {\n\treturn runtime.FuncForPC(reflect.ValueOf((fn)).Pointer()).Name()\n}",
"func CallerName() string {\n\tvar callerName string\n\tpc, fileName, line, _ := runtime.Caller(1)\n\n\t// get function name\n\tfuncNameFull := runtime.FuncForPC(pc).Name()\n\tfuncNameEnd := filepath.Ext(funcNameFull)\n\tfuncName := strings.TrimPrefix(funcNameEnd, \".\")\n\n\t// get file name\n\tsuffix := \".go\"\n\t_, nf := filepath.Split(fileName)\n\tif strings.HasSuffix(nf, \".go\") {\n\t\tfileName = strings.TrimSuffix(nf, suffix)\n\t\tcallerName = fileName + suffix + \":\" + strconv.Itoa(line) + \" \" + funcName\n\t}\n\treturn callerName\n}",
"func Name(ctx context.Context) string {\n\tf, ok := ctx.Value(stateKey).(*Func)\n\tif !ok {\n\t\treturn \"<Undefined>\"\n\t}\n\tname := runtime.FuncForPC(reflect.ValueOf(*f).Pointer()).Name()\n\treturn strings.TrimRight(nameRe.FindStringSubmatch(name)[1], \")\")\n}",
"func SprintFnThis(js string) string {\n\treturn fmt.Sprintf(`function() { return (%s).apply(this, arguments) }`, js)\n}",
"func (f *Function) Name() string {\n\treturn \"\"\n}",
"func funcName(f interface{}) string {\n\tname := gort.FuncForPC(reflect.ValueOf(f).Pointer()).Name()\n\ti := strings.LastIndex(name, \".\")\n\treturn name[i+1:]\n}",
"func GetFunctionName(i interface{}) string {\n\treturn runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()\n}",
"func (oe *OraErr) FunName() string { return oe.funName }",
"func (l *littr) GetFuncName(i int) string {\n\treturn l.code[i+5 : i+strings.Index(l.code[i:], \"(\")]\n}",
"func functionName(i func(int, int) (int, error)) string {\n\treturn runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()\n}",
"func Callername() string {\n\tpc := make([]uintptr, 10) // at least 1 entry needed\n\truntime.Callers(3, pc)\n\tf := runtime.FuncForPC(pc[0])\n\treturn shortFuncname(f.Name())\n}",
"func (fn *Function) BaseName() string {\n\tinst := fn.instRange()\n\tif i := strings.LastIndex(fn.Name[inst[1]:], \".\"); i != -1 {\n\t\treturn fn.Name[inst[1]+i+1:]\n\t} else if i := strings.LastIndex(fn.Name[:inst[0]], \".\"); i != -1 {\n\t\treturn fn.Name[i+1:]\n\t}\n\treturn fn.Name\n}",
"func Funcname() string {\n\tpc := make([]uintptr, 10) // at least 1 entry needed\n\truntime.Callers(2, pc)\n\tf := runtime.FuncForPC(pc[0])\n\treturn shortFuncname(f.Name())\n}",
"func GetCallersName(depth int, includeLine bool) string {\n\tpc, file, line, ok := runtime.Caller(depth + 1)\n\tif !ok {\n\t\treturn \"???\"\n\t}\n\n\tfnname := \"\"\n\tif fn := runtime.FuncForPC(pc); fn != nil {\n\t\tfnname = lastComponent(fn.Name())\n\t}\n\n\tif !includeLine {\n\t\treturn fnname\n\t}\n\n\treturn fmt.Sprintf(\"%s() at %s:%d\", fnname, lastComponent(file), line)\n}",
"func (f frame) name() string {\n\tfn := runtime.FuncForPC(f.pc())\n\tif fn == nil {\n\t\treturn \"unknown\"\n\t}\n\treturn fn.Name()\n}",
"func nameOfFunction(f interface{}) string {\n\tfun := runtime.FuncForPC(reflect.ValueOf(f).Pointer())\n\ttokenized := strings.Split(fun.Name(), \".\")\n\tlast := tokenized[len(tokenized)-1]\n\tlast = strings.TrimSuffix(last, \")·fm\") // < Go 1.5\n\tlast = strings.TrimSuffix(last, \")-fm\") // Go 1.5\n\tlast = strings.TrimSuffix(last, \"·fm\") // < Go 1.5\n\tlast = strings.TrimSuffix(last, \"-fm\") // Go 1.5\n\tif last == \"func1\" { // this could mean conflicts in API docs\n\t\tval := atomic.AddInt32(&anonymousFuncCount, 1)\n\t\tlast = \"func\" + fmt.Sprintf(\"%d\", val)\n\t\tatomic.StoreInt32(&anonymousFuncCount, val)\n\t}\n\treturn last\n}",
"func MyCallerFileLine() string {\n\n\t// we get the callers as uintptrs - but we just need 1\n\tfpcs := make([]uintptr, 1)\n\n\t// skip 3 levels to get to the caller of whoever called Caller()\n\tn := runtime.Callers(3, fpcs)\n\tif n == 0 {\n\t\treturn \"n/a\" // proper error her would be better\n\t}\n\n\t// get the info of the actual function that's in the pointer\n\tfun := runtime.FuncForPC(fpcs[0] - 1)\n\tif fun == nil {\n\t\treturn \"n/a\"\n\t}\n\n\t// return its name\n\tfilename, line := fun.FileLine(fpcs[0] - 1)\n\tfilename = filepath.Base(filename)\n\treturn fmt.Sprintf(\"%v:%v\", filename, line)\n}",
"func (f *Function) Name() string {\n\tcstr := C.EnvGetDeffunctionName(f.env.env, f.fptr)\n\treturn C.GoString(cstr)\n}",
"func funcName(skip int) (name string) {\n\tif pc, _, lineNo, ok := runtime.Caller(skip); ok {\n\t\tif v, ok := fm.Load(pc); ok {\n\t\t\tname = v.(string)\n\t\t} else {\n\t\t\tname = runtime.FuncForPC(pc).Name() + \":\" + strconv.FormatInt(int64(lineNo), 10)\n\t\t\tfm.Store(pc, name)\n\t\t}\n\t}\n\treturn\n}",
"func fn(labels ...string) string {\n\tfunction, _, _, _ := runtime.Caller(1)\n\n\tlongname := runtime.FuncForPC(function).Name()\n\n\tnameparts := strings.Split(longname, \".\")\n\tshortname := nameparts[len(nameparts)-1]\n\n\tif labels == nil {\n\t\treturn fmt.Sprintf(\"[%s()]\", shortname)\n\t}\n\n\treturn fmt.Sprintf(\"[%s():%s]\", shortname, strings.Join(labels, \":\"))\n}",
"func (l *Lifecycle) RunningHookCaller() string {\n\tl.mu.Lock()\n\tdefer l.mu.Unlock()\n\treturn l.runningHook.callerFrame.Function\n}",
"func GetCallersName(depth int) string {\n\tpc, file, line, ok := runtime.Caller(depth + 1)\n\tif !ok {\n\t\treturn \"???\"\n\t}\n\n\tfnname := \"\"\n\tif fn := runtime.FuncForPC(pc); fn != nil {\n\t\tfnname = fn.Name()\n\t}\n\n\treturn fmt.Sprintf(\"%s() at %s:%d\", lastComponent(fnname), lastComponent(file), line)\n}",
"func CallerFuncNameString() string {\n\tfuncName, err := CallerFuncName()\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn funcName\n}",
"func (m Method) Name() string {\n\treturn m.function.name\n}",
"func getCaller(skip int, shortFileName bool) string {\n\tvar b strings.Builder\n\t_, file, no, ok := runtime.Caller(skip)\n\tif ok {\n\t\tif shortFileName {\n\t\t\tif lastSlashIndex := strings.LastIndex(file, \"/\"); lastSlashIndex != -1 {\n\t\t\t\tfile = file[lastSlashIndex+1:]\n\t\t\t}\n\t\t}\n\t\tfmt.Fprintf(&b, \"%s:%d\", file, no)\n\t}\n\treturn b.String()\n}",
"func getCallerSourceLocation() string {\n\t_, file, line, ok := runtime.Caller(2)\n\tresult := \"unknown:unknown\"\n\tif ok {\n\t\tresult = fmt.Sprintf(\"%s:%d\", file, line)\n\t}\n\treturn result\n}",
"func CallerFuncName() (string, error) {\n\tfpcs := make([]uintptr, 1)\n\tn := runtime.Callers(3, fpcs)\n\tif n == 0 {\n\t\treturn \"\", errors.New(\"Error after runtime.Callers(), n == 0\")\n\t}\n\tf := runtime.FuncForPC(fpcs[0] - 1)\n\tif f == nil {\n\t\treturn \"\", errors.New(\"Error after runtime.FuncForPC(): fun == nil\")\n\t}\n\treturn f.Name(), nil\n}",
"func this() *runtime.Func {\n pc := make([]uintptr, 10) // at least 1 entry needed\n runtime.Callers(2, pc)\n f:= runtime.FuncForPC(pc[1])\n return f\n}",
"func (p *FuncInfo) Name() string {\n\treturn p.name\n}",
"func (f Frame) name() string {\n\tfn := runtime.FuncForPC(f.pc())\n\tif fn == nil {\n\t\treturn \"unknown\"\n\t}\n\treturn fn.Name()\n}",
"func procName(shortName bool, level int) (name string, line int) {\n\tpc, _, line, _ := runtime.Caller(level)\n\tname = runtime.FuncForPC(pc).Name()\n\tif shortName {\n\t\tname = name[strings.Index(name, \".\")+1:]\n\t}\n\treturn name, line\n}",
"func (fn *Function) Inspect() string {\n\treturn fn.Name\n}",
"func (f Function) GetName() string {\n\treturn f.ident.String()\n}",
"func LogFunctionName() {\n\tpc, _, _, _ := runtime.Caller(1)\n\tfuncname := path.Base(runtime.FuncForPC(pc).Name())\n\tlogRaw(LevelDebug, 2, \"Function %s.\", funcname)\n}",
"func FuncName(frame *govulncheck.StackFrame) string {\n\tswitch {\n\tcase frame.Receiver != \"\":\n\t\treturn fmt.Sprintf(\"%s.%s\", strings.TrimPrefix(frame.Receiver, \"*\"), frame.Function)\n\tcase frame.Package != \"\":\n\t\treturn fmt.Sprintf(\"%s.%s\", frame.Package, frame.Function)\n\tdefault:\n\t\treturn frame.Function\n\t}\n}",
"func FuncName(i interface{}) string {\n\treturn runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()\n}",
"func (o FunctionOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Function) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}",
"func getFullyQualifiedFunctionName(fn interface{}) string {\n\treturn runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name()\n}",
"func (f Frame) Func() string {\n\treturn f.tr.getStringDefault(f.fn)\n}",
"func fullCaller(skip int) (file string, line int, fnc string, ok bool) {\n\tvar pc uintptr\n\tpc, file, line, ok = runtime.Caller(skip + 1)\n\tif !ok {\n\t\treturn\n\t}\n\n\tf := runtime.FuncForPC(pc)\n\tfnc = f.Name()\n\treturn\n}",
"func CallerFuncNameWithOffset(i int) (string, error) {\n\tfpcs := make([]uintptr, 1)\n\tn := runtime.Callers(3+i, fpcs)\n\tif n == 0 {\n\t\treturn \"\", errors.New(\"Error after runtime.Callers(), n == 0\")\n\t}\n\tf := runtime.FuncForPC(fpcs[0] - 1)\n\tif f == nil {\n\t\treturn \"\", errors.New(\"Error after runtime.FuncForPC(): fun == nil\")\n\t}\n\treturn f.Name(), nil\n}",
"func me() string {\n\t_, file, line, ok := runtime.Caller(1)\n\tif !ok {\n\t\treturn \"???\"\n\t}\n\n\treturn path.Base(file) + \":\" + strconv.Itoa(line)\n}",
"func GetFuncName(depth int, a ...interface{}) string {\n\tpc := make([]uintptr, 10)\n\tn := runtime.Callers(2, pc)\n\tframes := runtime.CallersFrames(pc[:n])\n\tframe, more := frames.Next()\n\tfor more && depth > 0 {\n\t\tframe, more = frames.Next()\n\t\tdepth--\n\t}\n\tname := strings.TrimPrefix(frame.Function, \"main.\")\n\t// Using a switch to prevent calling strings.Join for small (common) use cases. Saves a little mem and processing.\n\tswitch len(a) {\n\tcase 0:\n\t\t// do nothing\n\tcase 1:\n\t\tname += fmt.Sprintf(\": %v\", a[0])\n\tcase 2:\n\t\tname += fmt.Sprintf(\": %v, %v\", a[0], a[1])\n\tcase 3:\n\t\tname += fmt.Sprintf(\": %v, %v, %v\", a[0], a[1], a[2])\n\tdefault:\n\t\targs := make([]string, len(a))\n\t\tfor i, arg := range a {\n\t\t\targs[i] = fmt.Sprintf(\"%v\", arg)\n\t\t}\n\t\tname += fmt.Sprintf(\": %s\", strings.Join(args, \", \"))\n\t}\n\treturn name\n}",
"func CallerName(skip int) string {\n\tif pc, _, _, ok := runtime.Caller(skip); ok {\n\t\tsplit := strings.Split(runtime.FuncForPC(pc).Name(), \".\")\n\t\treturn split[len(split)-1]\n\t}\n\treturn \"\"\n}",
"func caller(depth int) (string, string, int) {\n\tpc, src, line, ok := runtime.Caller(depth + 1)\n\tif !ok {\n\t\treturn \"\", \"\", 0\n\t}\n\treturn runtime.FuncForPC(pc).Name(), src, line\n}",
"func (pc *HTTPProxyClient) GetFunctionName(r *http.Request) string {\n\tvars := mux.Vars(r)\n\treturn vars[\"name\"]\n}",
"func (o AzureFunctionOutputDataSourceOutput) FunctionName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v AzureFunctionOutputDataSource) *string { return v.FunctionName }).(pulumi.StringPtrOutput)\n}",
"func (f LetFunction) Name() string {\n\treturn f.name\n}",
"func (fn *Func) FuncName() string {\n\treturn fn.fnName\n}",
"func (s *Instruction) FuncName() string {\n\tif name, ok := protoNameToFuncName[s.Protobuf.TypeName]; ok {\n\t\treturn name\n\t}\n\treturn \"?\"\n}",
"func (o FunctionOutput) FunctionId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Function) pulumi.StringPtrOutput { return v.FunctionId }).(pulumi.StringPtrOutput)\n}",
"func (t *Test) Name() string {\n\treturn t.callable.Name()\n}",
"func GetCaller(skipped int, long bool) string {\n\tif _, file, line, ok := runtime.Caller(skipped + KnownCallerDepth); ok {\n\t\tif base := filepath.Base(file); long {\n\t\t\t// Only the parent directory is added.\n\t\t\treturn filepath.Join(filepath.Base(filepath.Dir(file)), base) + \":\" + strconv.Itoa(line)\n\t\t} else {\n\t\t\treturn base + \":\" + strconv.Itoa(line)\n\t\t}\n\t}\n\treturn \"???:0\"\n}",
"func callerName(skip int) (pkgPath, funcName string, ok bool) {\n\tvar pc [1]uintptr\n\tn := runtime.Callers(skip+1, pc[:])\n\tif n != 1 {\n\t\treturn \"\", \"\", false\n\t}\n\n\tf := runtime.FuncForPC(pc[0]).Name()\n\ts := pkgPathRe.FindStringSubmatch(f)\n\tif len(s) < 3 {\n\t\tpanic(fmt.Errorf(\"failed to extract package path and function name from %q\", f))\n\t}\n\n\treturn s[1], s[2], true\n}",
"func (v *Function) GetName() (o string) {\n\tif v != nil {\n\t\to = v.Name\n\t}\n\treturn\n}",
"func (s UserSet) Function() string {\n\tres, _ := s.RecordCollection.Get(models.NewFieldName(\"Function\", \"function\")).(string)\n\treturn res\n}",
"func getFuncName(e *ast.CallExpr) (string, error) {\n\tfID, ok := e.Fun.(*ast.Ident)\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"Syntax error: unexpected call type: %T\", e.Fun)\n\t}\n\treturn fID.Name, nil\n}",
"func GetFuncName(f interface{}) string {\n\treturn runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name()\n}",
"func (o AzureFunctionOutputDataSourceResponseOutput) FunctionName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v AzureFunctionOutputDataSourceResponse) *string { return v.FunctionName }).(pulumi.StringPtrOutput)\n}",
"func (rl *RotateLogs) CurrentFileName() string {\n\trl.mutex.RLock()\n\tdefer rl.mutex.RUnlock()\n\treturn rl.curFn\n}",
"func (b *MainRoutineBuilder) Function(functionName string) string {\n\treturn b.functions[functionName]\n}",
"func (cx *CurCtx) FuncName() string {\n\tif nm, isMeth := cx.FuncDeclName(); !isMeth {\n\t\treturn nm\n\t}\n\treturn \"\"\n}",
"func getCallingStack() string {\n\tvar fileAndLine string\n\t_, file, line, ok := runtime.Caller(3)\n\tif ok {\n\t\tfiles := strings.Split(file, \"/\")\n\t\tfile = files[len(files)-1]\n\t\tfileAndLine = fmt.Sprintf(\"%s:%d :\", file, line)\n\t\treturn fileAndLine\n\t}\n\treturn \"\"\n}",
"func GetCaller(offset int) (file string, line int) {\n\tfpcs := make([]uintptr, 1)\n\n\tn := runtime.Callers(offset, fpcs)\n\tif n == 0 {\n\t\treturn \"n/a\", -1\n\t}\n\n\tfun := runtime.FuncForPC(fpcs[0] - 1)\n\tif fun == nil {\n\t\treturn \"n/a\", -1\n\t}\n\n\treturn fun.FileLine(fpcs[0] - 1)\n}",
"func FuncName(f interface{}) string {\n\tsplitFuncName := strings.Split(FuncPathAndName(f), \".\")\n\treturn splitFuncName[len(splitFuncName)-1]\n}",
"func (p *PropertyGenerator) getFnName(i int) string {\n\tif len(p.Kinds) == 1 {\n\t\treturn getMethod\n\t}\n\treturn fmt.Sprintf(\"%s%s\", getMethod, p.kindCamelName(i))\n}",
"func (t *LineTable) funcName(off uint32) string {\n\tif s, ok := t.funcNames[off]; ok {\n\t\treturn s\n\t}\n\ti := bytes.IndexByte(t.funcnametab[off:], 0)\n\ts := string(t.funcnametab[off : off+uint32(i)])\n\tt.funcNames[off] = s\n\treturn s\n}",
"func getCallerPosition() string {\n\t_, file, line, ok := runtime.Caller(2)\n\tif !ok {\n\t\treturn \"\"\n\t}\n\tfile = path.Base(file)\n\treturn fmt.Sprintf(\"%s%s:%d%s\", colorWhere, file, line, colorClear)\n}",
"func FILE() string {\n\t_, file, _, _ := runtime.Caller(1)\n\treturn file\n}",
"func (f nullFunc) name() name {\n\treturn null\n}",
"func callerInfo(skip int) string {\n\t_, file, line, _ := runtime.Caller(skip)\n\treturn fmt.Sprintf(\"%v:%v\", file, line)\n}",
"func GetFunctionName(function interface{}) (string, error) {\n\tvalue := reflect.ValueOf(function)\n\tkind := value.Kind()\n\tif kind != reflect.Func {\n\t\treturn \"\", fmt.Errorf(\"Kind is not a func: %v\", kind)\n\t}\n\n\tf := runtime.FuncForPC(value.Pointer())\n\tif f == nil {\n\t\treturn \"\", fmt.Errorf(\"Pointer to func is nil\")\n\t}\n\n\tfName := regexp.MustCompile(`^.*[/\\\\]`).ReplaceAllString(f.Name(), \"\")\n\n\treturn fName, nil\n}",
"func Self() string {\n\treturn naiveSelf()\n}",
"func getFuncName(p uintptr) string {\n\tfnc := runtime.FuncForPC(p)\n\tif fnc == nil {\n\t\treturn \"<unknown>\"\n\t}\n\tname := fnc.Name() // E.g., \"long/path/name/mypkg.(mytype).(long/path/name/mypkg.myfunc)-fm\"\n\tif strings.HasSuffix(name, \")-fm\") || strings.HasSuffix(name, \")·fm\") {\n\t\t// Strip the package name from method name.\n\t\tname = strings.TrimSuffix(name, \")-fm\")\n\t\tname = strings.TrimSuffix(name, \")·fm\")\n\t\tif i := strings.LastIndexByte(name, '('); i >= 0 {\n\t\t\tmethodName := name[i+1:] // E.g., \"long/path/name/mypkg.myfunc\"\n\t\t\tif j := strings.LastIndexByte(methodName, '.'); j >= 0 {\n\t\t\t\tmethodName = methodName[j+1:] // E.g., \"myfunc\"\n\t\t\t}\n\t\t\tname = name[:i] + methodName // E.g., \"long/path/name/mypkg.(mytype).\" + \"myfunc\"\n\t\t}\n\t}\n\tif i := strings.LastIndexByte(name, '/'); i >= 0 {\n\t\t// Strip the package name.\n\t\tname = name[i+1:] // E.g., \"mypkg.(mytype).myfunc\"\n\t}\n\treturn name\n}"
] | [
"0.7722278",
"0.766978",
"0.76039034",
"0.7582178",
"0.7449456",
"0.7403062",
"0.70837003",
"0.7075239",
"0.6886995",
"0.68866515",
"0.68251276",
"0.68018013",
"0.6781213",
"0.6740152",
"0.67332137",
"0.66841245",
"0.66416866",
"0.66173244",
"0.6587148",
"0.65851456",
"0.65746367",
"0.655157",
"0.6547077",
"0.654094",
"0.6538025",
"0.6535191",
"0.65030086",
"0.650037",
"0.64811057",
"0.64791167",
"0.6466215",
"0.64526254",
"0.64453894",
"0.642857",
"0.6424168",
"0.64218324",
"0.6420794",
"0.6403818",
"0.64018",
"0.63949794",
"0.6378275",
"0.63375944",
"0.6336376",
"0.6335996",
"0.6320839",
"0.6319601",
"0.63175285",
"0.6315676",
"0.63125503",
"0.62573636",
"0.6246019",
"0.6228357",
"0.62165916",
"0.620909",
"0.62025785",
"0.61857206",
"0.6171201",
"0.6166255",
"0.6162876",
"0.61572444",
"0.6144164",
"0.6137589",
"0.61373246",
"0.61144495",
"0.6093531",
"0.6091013",
"0.60780674",
"0.60756326",
"0.6069065",
"0.60644674",
"0.60640156",
"0.6049926",
"0.60498554",
"0.60364896",
"0.6032044",
"0.59832954",
"0.59667856",
"0.59665155",
"0.59608686",
"0.5954241",
"0.5941301",
"0.5936304",
"0.5928551",
"0.5920028",
"0.5910365",
"0.5899655",
"0.58984053",
"0.58970374",
"0.5876163",
"0.584755",
"0.5839996",
"0.5833327",
"0.58190584",
"0.5801952",
"0.57942253",
"0.5794094",
"0.57742673",
"0.57723045",
"0.57662433",
"0.57641876"
] | 0.84559995 | 0 |
Value returns value of the string | func (s *SyncString) Value() string {
s.Lock()
defer s.Unlock()
return s.string
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (s *String) GetValue() string {\n\treturn s.value\n}",
"func (sval *ScalarValue) Value() string {\n\tswitch {\n\tcase strings.HasPrefix(sval.Raw, `\"\"\"`):\n\t\treturn parseBlockString(sval.Raw)\n\tcase strings.HasPrefix(sval.Raw, `\"`):\n\t\treturn parseString(sval.Raw)\n\tdefault:\n\t\treturn sval.Raw\n\t}\n}",
"func (t Type) Value() string {\n\tstr := string(t)\n\tv, ok := builtin[str]\n\tif !ok {\n\t\treturn gocase.To(strcase.ToCamel(str))\n\t}\n\n\treturn v\n}",
"func (s DnaString) GetValue() string {\n\treturn s.Value\n}",
"func (s *StringChecksum) Value() string {\n\treturn s.value\n}",
"func (p stringProperty) Value() (string, error) {\n\treturn p.value, nil\n}",
"func (c *StringChanger) Value() (string, error) {\n\tif c.err != nil {\n\t\treturn \"\", c.err\n\t}\n\tif c.node.content.value() == nil {\n\t\treturn \"\", nil\n\t}\n\treturn c.node.content.value().(string), nil\n}",
"func (s String) Value() (driver.Value, error) {\n\tif s == \"\" {\n\t\treturn nil, nil\n\t}\n\treturn string(s), nil\n}",
"func (s Stash) Value() string {\n\tvals := utils.MapKeys(s.payload)\n\tif len(vals) < 1 {\n\t\treturn \"\"\n\t}\n\n\treturn expand(fmt.Sprintf(\"%v\", vals[0]))\n}",
"func (t *Token) Value() string {\n\treturn t.strBuilder.String()\n}",
"func (this *NowStr) Value() value.Value {\n\treturn nil\n}",
"func (b *baseSemanticUTF8String) Value() interface{} {\n\treturn b.V\n}",
"func (f Formal) Value() string {\n\treturn string(f)\n}",
"func (self Param) Value() string { return self.value }",
"func (d *Description) Value() string {\n\tif d == nil {\n\t\treturn \"\"\n\t}\n\tif strings.HasPrefix(d.Raw, `\"\"\"`) {\n\t\treturn parseBlockString(d.Raw)\n\t}\n\treturn parseString(d.Raw)\n}",
"func StringValue(s string) Value { return Value{Typ: '$', Str: []byte(s)} }",
"func (l *LangPackString) GetValue() (value string) {\n\tif l == nil {\n\t\treturn\n\t}\n\treturn l.Value\n}",
"func (o unicodeVersion) GetValue() interface{} {\n\treturn string(o)\n}",
"func (s *StringSymbol) GetValue() string {\n\treturn s.StringData.GetValue()\n}",
"func (p *Property) ValueString() string {\n\treturn p.vstr\n}",
"func (m Model) Value() string {\n\treturn string(m.value)\n}",
"func (s *StringSetting) Value() interface{} {\n\treturn *s.StringValue\n}",
"func (f *Title) Value() string {\n\ts := decode.UTF16(f.data)\n\treturn trim.Nil(s)\n}",
"func (x StrState) Value() (driver.Value, error) {\n\treturn x.String(), nil\n}",
"func (code Code) Value() (value driver.Value, err error) {\n\tif code == \"\" {\n\t\treturn \"\", nil\n\t}\n\n\tif err = code.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn code.String(), nil\n}",
"func (o GetTxtRecordRecordOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetTxtRecordRecord) string { return v.Value }).(pulumi.StringOutput)\n}",
"func (o GetTxtRecordRecordOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetTxtRecordRecord) string { return v.Value }).(pulumi.StringOutput)\n}",
"func (o HealthCheckTagOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v HealthCheckTag) string { return v.Value }).(pulumi.StringOutput)\n}",
"func (stringEntry *String) GetValue() interface{} {\n\treturn stringEntry.trueValue\n}",
"func (o DomainTagOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DomainTag) string { return v.Value }).(pulumi.StringOutput)\n}",
"func (o ThingTypeTagOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ThingTypeTag) string { return v.Value }).(pulumi.StringOutput)\n}",
"func (akv StringKeyValue) Value() string {\n\treturn akv.orig.Value\n}",
"func (nt NullString) Value() (driver.Value, error) {\n\tif !nt.Valid {\n\t\treturn nil, nil\n\t}\n\treturn nt.String, nil\n}",
"func (i *StringIterator) Value() Object {\n\treturn &Char{Value: i.v[i.i-1]}\n}",
"func (id PlannerID) Value() string { return id.value }",
"func (o MetadataFilterLabelMatchOutput) Value() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v MetadataFilterLabelMatch) *string { return v.Value }).(pulumi.StringPtrOutput)\n}",
"func (w *Word) Val() string {\n\treturn w.origStr[w.cptr.begin : w.cptr.begin+w.cptr.length]\n}",
"func (r *RegexpObject) Value() interface{} {\n\treturn r.regexp.String()\n}",
"func (o AppSecretOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v AppSecret) string { return v.Value }).(pulumi.StringOutput)\n}",
"func (o GetAppSecretOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetAppSecret) string { return v.Value }).(pulumi.StringOutput)\n}",
"func (l *Label) Value() string {\n\treturn l.value\n}",
"func (o EnvironmentDaprComponentSecretOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v EnvironmentDaprComponentSecret) string { return v.Value }).(pulumi.StringOutput)\n}",
"func (n NullString) Value() (driver.Value, error) {\n\tif n == \"\" {\n\t\treturn nil, nil\n\t}\n\treturn string(n), nil\n}",
"func (node *SimpleNode) Value() string {\n\tif node == nil {\n\t\treturn \"\"\n\t}\n\n\treturn node.value\n}",
"func (u UnsafeString) Value() (driver.Value, error) {\n\tpanic(\"UnsafeStrings and its constants NOW, DEFAULT ... are disabled when EnableInterpolation==false\")\n}",
"func (o EnvironmentDaprComponentMetadataOutput) Value() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v EnvironmentDaprComponentMetadata) *string { return v.Value }).(pulumi.StringPtrOutput)\n}",
"func (o TaintOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v Taint) string { return v.Value }).(pulumi.StringOutput)\n}",
"func (o SignalingChannelTagOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v SignalingChannelTag) string { return v.Value }).(pulumi.StringOutput)\n}",
"func (s *Scalar) String() string { return s.Value }",
"func (s State) Value() (driver.Value, error) {\n\treturn string(s), nil\n}",
"func (o StreamTagOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v StreamTag) string { return v.Value }).(pulumi.StringOutput)\n}",
"func (this *ClockStr) Value() value.Value {\n\treturn nil\n}",
"func Value(value string) *SimpleElement { return newSEString(\"value\", value) }",
"func (v *Value) String() string {\n\treturn v.val\n}",
"func (o DnsRecordOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *DnsRecord) pulumi.StringOutput { return v.Value }).(pulumi.StringOutput)\n}",
"func (b baseValue) String() string {\n\treturn b.value\n}",
"func (cfg *Config) Value(name string) string {\n\tv, _ := cfg.findLast(name)\n\treturn string(v)\n}",
"func (n *ResourceName) Value() (driver.Value, error) {\n\tif n == nil {\n\t\treturn nil, nil\n\t}\n\n\treturn n.String(), nil\n}",
"func (ns NullString) Value() (driver.Value, error) {\n\tif !ns.Valid {\n\t\treturn nil, nil\n\t}\n\treturn ns.String, nil\n}",
"func (ns NullString) Value() (driver.Value, error) {\n\tif !ns.Valid {\n\t\treturn nil, nil\n\t}\n\treturn ns.String, nil\n}",
"func (ns NullString) Value() (driver.Value, error) {\n\tif !ns.Valid {\n\t\treturn nil, nil\n\t}\n\n\treturn ns.String, nil\n}",
"func (s *String) Inspect() string { return s.Value }",
"func (o MetadataFilterLabelMatchResponseOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v MetadataFilterLabelMatchResponse) string { return v.Value }).(pulumi.StringOutput)\n}",
"func (m *StringMapStringSetting) Value() interface{} {\n\treturn *m.StringMapStringValue\n}",
"func (o CaaRecordRecordOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v CaaRecordRecord) string { return v.Value }).(pulumi.StringOutput)\n}",
"func (o LookupGroupVariableResultOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupGroupVariableResult) string { return v.Value }).(pulumi.StringOutput)\n}",
"func (x NullStrState) Value() (driver.Value, error) {\n\tif !x.Valid {\n\t\treturn nil, nil\n\t}\n\treturn x.StrState.String(), nil\n}",
"func (c Cycle) Value() (driver.Value, error) {\n\ts := c.String()\n\tif s == \"\" {\n\t\treturn nil, nil\n\t}\n\n\treturn s, nil\n}",
"func (c Cryptstring) Value() (driver.Value, error) {\n\tok, err := IsBcrypt(string(c))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif ok {\n\t\treturn string(c), nil\n\t}\n\thash, err := bcrypt.GenerateFromPassword([]byte(c), bcrypt.DefaultCost)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn string(hash), nil\n}",
"func (d Driver) ValueString(c int) string {\n\treturn `$` + strconv.Itoa(c)\n}",
"func (s Version) Value() (driver.Value, error) {\n\treturn s.String(), nil\n}",
"func (fa formulaArg) Value() (value string) {\n\tswitch fa.Type {\n\tcase ArgNumber:\n\t\tif fa.Boolean {\n\t\t\tif fa.Number == 0 {\n\t\t\t\treturn \"FALSE\"\n\t\t\t}\n\t\t\treturn \"TRUE\"\n\t\t}\n\t\treturn fmt.Sprintf(\"%g\", fa.Number)\n\tcase ArgString:\n\t\treturn fa.String\n\tcase ArgError:\n\t\treturn fa.Error\n\t}\n\treturn\n}",
"func (t Token) Value() (driver.Value, error) {\n\treturn string(t.Bytes()), nil\n}",
"func (d *Downloader) getValue(line string) string {\n\tsplitLine := strings.Split(line, \" = \")\n\treturn (splitLine[len(splitLine)-1])\n}",
"func (ip IPv4) Value() string {\n\treturn ip.value\n}",
"func (o PatientIdResponseOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v PatientIdResponse) string { return v.Value }).(pulumi.StringOutput)\n}",
"func (c Casing) Value() string {\n\treturn c.getCasingValue()\n}",
"func (o GetCAARecordRecordOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetCAARecordRecord) string { return v.Value }).(pulumi.StringOutput)\n}",
"func (o DiagnosticBackendRequestDataMaskingHeaderOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DiagnosticBackendRequestDataMaskingHeader) string { return v.Value }).(pulumi.StringOutput)\n}",
"func (o PatientIdOutput) Value() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v PatientId) *string { return v.Value }).(pulumi.StringPtrOutput)\n}",
"func (l *Label) Value(g *Graph) string {\n Assert(nilLabel, l != nil)\n Assert(nilGraph, g != nil)\n Assert(nilTextStore, g.textStore != nil)\n \n t, _ := g.textStore.find(l.value) // TODO don't ignore error\n return t.Value()\n}",
"func (n Number) Value() (driver.Value, error) {\n\treturn string(n), nil\n}",
"func (o TokenPasswordPassword1Output) Value() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v TokenPasswordPassword1) *string { return v.Value }).(pulumi.StringPtrOutput)\n}",
"func (s *String) Get() string {\n\treturn string(s.Value)\n}",
"func (number Number) Value() (value driver.Value, err error) {\n\tif number == \"\" {\n\t\treturn \"\", nil\n\t}\n\n\tif err = number.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn number.String(), nil\n}",
"func (o *Output) GetValue() string {\n\tif !o.ShowValue || o.Value == nil {\n\t\treturn \"\"\n\t}\n\tmarshaled, err := json.MarshalIndent(o.Value, \"\", \" \")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tvalue := string(marshaled)\n\tif value == `null` {\n\t\treturn \"\" // types.Nil\n\t}\n\treturn value // everything else\n}",
"func (o ApplicationPasswordOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ApplicationPassword) pulumi.StringOutput { return v.Value }).(pulumi.StringOutput)\n}",
"func (cv PipVersion) Value() string {\n\treturn cv.value\n}",
"func (c *KeyStringValueChanger) Value() (string, error) {\n\tif c.err != nil {\n\t\treturn \"\", c.err\n\t}\n\tif c.node.content.value() == nil {\n\t\treturn \"\", nil\n\t}\n\treturn c.node.content.value().(string), nil\n}",
"func getValue(valueField string, as *args.Store) (result *string) {\n\t// No input? No result!\n\tif !utils.IsSet(valueField) {\n\t\treturn nil\n\t}\n\n\t// check whether a parameter reference was provided, i.e. something like \"param:<name>\"\n\tparamName := regexParamValue.FindStringSubmatch(valueField)\n\tif len(paramName) > 0 {\n\t\tutils.Assert(len(paramName) == 2, \"Should contain the matching text plus a single capturing group\")\n\n\t\targValue, exists := as.Get(paramName[1])\n\t\tif exists {\n\t\t\treturn &argValue\n\t\t}\n\t\treturn nil\n\t}\n\n\t// else assume that provided value was a static text\n\treturn &valueField\n}",
"func (options *Options) ValueStr(name string) string {\n\treturn Str(options.Value(name))\n}",
"func (p Policy) Value() (string, error) {\n\tif data, err := json.Marshal(p); err != nil {\n\t\treturn \"\", err\n\t} else {\n\t\treturn string(data), nil\n\t}\n}",
"func (o DiagnosticBackendResponseDataMaskingHeaderOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DiagnosticBackendResponseDataMaskingHeader) string { return v.Value }).(pulumi.StringOutput)\n}",
"func (args *Args) Value(label string) string {\n if val, ok := (*args)[label]; ok {\n return val\n }\n return \"\"\n}",
"func (obj *Value) GetString() string {\n\treturn obj.Candy().Guify(\"g_value_get_string\", obj).String()\n}",
"func (d Driver) ValueString(i int) string {\n\treturn `@p` + strconv.Itoa(i)\n}",
"func (o GoogleCloudRetailV2alphaConditionQueryTermOutput) Value() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v GoogleCloudRetailV2alphaConditionQueryTerm) *string { return v.Value }).(pulumi.StringPtrOutput)\n}",
"func (f *TagField) Value() string {\n\treturn f.value\n}",
"func (me *WsPacket) StrValue() string {\n\n\tswitch me.Type {\n\tcase \"string\":\n\t\treturn me.RawValue.(string)\n\n\tcase \"double\":\n\t\treturn fmt.Sprintf(\"%.f\", me.RawValue)\n\n\tcase \"float\":\n\t\treturn strconv.FormatFloat(me.RawValue.(float64), 'f', 20, 64)\n\t}\n\n\treturn \"#### OOOPS ##########\"\n}",
"func (ns NullString) Value() (driver.Value, error) {\n\tif ns.IsNull() {\n\t\treturn nil, nil\n\t}\n\treturn ns.Text, nil\n}"
] | [
"0.75110775",
"0.74706155",
"0.74118876",
"0.73986423",
"0.73799694",
"0.7359382",
"0.72732645",
"0.72452945",
"0.7236553",
"0.7220005",
"0.72173685",
"0.71555114",
"0.71239245",
"0.71044296",
"0.7084027",
"0.70409364",
"0.70191246",
"0.70084894",
"0.6993719",
"0.6992071",
"0.69840467",
"0.6920377",
"0.69069105",
"0.69067216",
"0.69066304",
"0.68772084",
"0.68772084",
"0.68441606",
"0.68291074",
"0.682101",
"0.68174046",
"0.6817233",
"0.68052727",
"0.67977595",
"0.67834234",
"0.6782086",
"0.6777485",
"0.67674136",
"0.67642134",
"0.6761256",
"0.6758575",
"0.67584395",
"0.6730781",
"0.67291766",
"0.6726927",
"0.67194694",
"0.6716395",
"0.67154795",
"0.67026234",
"0.66989714",
"0.6687285",
"0.66705215",
"0.66695905",
"0.66684335",
"0.66615105",
"0.6660145",
"0.6649287",
"0.6648158",
"0.6646867",
"0.6646867",
"0.66453725",
"0.664089",
"0.66389096",
"0.66181207",
"0.6608312",
"0.6604247",
"0.6603157",
"0.659913",
"0.6588552",
"0.6585355",
"0.65736717",
"0.65716964",
"0.65711004",
"0.6567717",
"0.6559617",
"0.65582526",
"0.6551005",
"0.65482885",
"0.6544317",
"0.6538258",
"0.653616",
"0.6534154",
"0.65325195",
"0.65318674",
"0.6531563",
"0.6525541",
"0.65237063",
"0.65223706",
"0.6521619",
"0.65211076",
"0.6521076",
"0.65146524",
"0.6512948",
"0.6507393",
"0.65003383",
"0.6499528",
"0.6487254",
"0.64858425",
"0.64815754",
"0.6480747"
] | 0.75706613 | 0 |
Set sets the value of the string | func (s *SyncString) Set(v string) {
s.Lock()
defer s.Unlock()
s.string = v
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (sf *String) Set(x string) error {\n\tsf.Value = x\n\tsf.set = true\n\treturn nil\n}",
"func (s *String) Set(str string) {\n\ts.Value = []byte(str)\n\ts.Length = int32(len(s.Value))\n}",
"func (l *settableString) Set(s string) error {\n\tl.s = s\n\tl.isSet = true\n\treturn nil\n}",
"func (a *Mpflt) SetString(as string)",
"func (me *TKerningValue) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (s *NullString) Set(value string) {\n\ts.s.Valid = true\n\ts.s.String = value\n}",
"func (s *DnaString) SetValue(str string) {\n\ts.Value = str\n}",
"func (me *TSAFPTCNCode) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TSpacingValueType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TClipValueType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TLanguageCodeType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TLengthType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TScriptType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TClipPathValueType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (s *StringSymbol) SetValue(value interface{}) {\n\ts.StringData.SetValue(value)\n}",
"func (s *StringValue) Set(val string) error {\n\t*s = StringValue(val)\n\treturn nil\n}",
"func (me *TSAFPTUNNumber) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (x *StrState) Set(val string) error {\n\tv, err := ParseStrState(val)\n\t*x = v\n\treturn err\n}",
"func (ns *NullString) Set(value string) {\n\n\tif ns == nil {\n\t\t*ns = NullString{}\n\t}\n\n\tns.Text = value\n\tns.IsNotNull = true\n\n}",
"func (me *TSAFPTHashControl) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (suc *StringUpperCaser) SetString(input string) {\n\tsuc.WriteString(input)\n}",
"func (v *value) SetString(value string) bool {\n\ttmp := C.CString(value)\n\treturn (bool)(C.setStringValue(C.uint32(v.cRef.homeId), C.uint64(v.cRef.valueId.id), tmp))\n}",
"func (me *TxsdShow) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TxsdShow) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TStrokeMiterLimitValueType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func SetString(key string, val string) error {\n\terr := SetStringWithExpire(key, val, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (c StrConv) Set(target interface{}, str string) (ok bool) {\n\tv := reflect.Indirect(reflect.ValueOf(target))\n\treturn c.SetValue(v, str)\n}",
"func (me *TMaskValueType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TBaselineShiftValueType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (instance *Instance) SetString(fieldName, value string) error {\n\tfieldNameCStr := C.CString(fieldName)\n\tdefer C.free(unsafe.Pointer(fieldNameCStr))\n\n\tvalueCStr := C.CString(value)\n\tdefer C.free(unsafe.Pointer(valueCStr))\n\n\tretcode := int(C.RTI_Connector_set_string_into_samples(unsafe.Pointer(instance.output.connector.native), instance.output.nameCStr, fieldNameCStr, valueCStr))\n\treturn checkRetcode(retcode)\n}",
"func (e *Eth) SetString(s string, base int) (*Eth, bool) {\n\tw, ok := e.ToInt().SetString(s, base)\n\treturn (*Eth)(w), ok\n}",
"func (me *TPathDataType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TPostalCodePT) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TLanguageCodesType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TextDecorationValueType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (s *String) SetValue(value interface{}) {\n\tswitch value.(type) {\n\tcase *String:\n\t\ts.value = value.(*String).value\n\tcase String:\n\t\ts.value = value.(String).value\n\tcase string:\n\t\ts.value = value.(string)\n\tdefault:\n\t\tpanic(fmt.Errorf(\"Invalid data type for assignment to string : %T\", value))\n\t}\n\n\ts.value = strings.Trim(s.value, \"\\\"\")\n}",
"func (me *TCursorValueType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (fs *FakeSession) SetStr(oid string, value string) *FakeSession {\n\treturn fs.SetByte(oid, []byte(value))\n}",
"func (me *TxsdTextPathTypeSpacing) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (i *InMemory) SetString(key, value string) {\n\ti.lock.Lock()\n\tdefer i.lock.Unlock()\n\n\ti.data[key] = value\n}",
"func (se *SimpleElement) SetString(value string) {\n\tse.value = value\n}",
"func (me *TMarkerValueType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TartIdType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (jf *JFile) SetString(JSONpath, value string) error {\n\t_, parentNode, err := jf.rootnode.GetNodes(JSONpath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tm, ok := parentNode.CheckMap()\n\tif !ok {\n\t\treturn errors.New(\"Parent is not a map: \" + JSONpath)\n\t}\n\n\t// Set the string\n\tm[lastpart(JSONpath)] = value\n\n\tnewdata, err := jf.rootnode.PrettyJSON()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn jf.Write(newdata)\n}",
"func (z *E12) SetString(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11 string) *E12 {\n\tz.C0.SetString(s0, s1, s2, s3, s4, s5)\n\tz.C1.SetString(s6, s7, s8, s9, s10, s11)\n\treturn z\n}",
"func (obj *Value) SetString(v string) {\n\tobj.Candy().Guify(\"g_value_set_string\", obj, v)\n}",
"func (me *TNumberOrPercentageType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (z *Int) SetString(s string, base int) (*Int, bool) {}",
"func (s *SecretString) Set(newValue string) error {\n\t*s = SecretString(newValue)\n\treturn nil\n}",
"func (me *TxsdActuate) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TxsdActuate) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (recv *Value) SetString(vString string) {\n\tc_v_string := C.CString(vString)\n\tdefer C.free(unsafe.Pointer(c_v_string))\n\n\tC.g_value_set_string((*C.GValue)(recv.native), c_v_string)\n\n\treturn\n}",
"func (me *TLengthsType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TxsdTextPathTypeMethod) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TPointsType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (s *StringPointerValue) Set(str string) error {\n\tif str == \"\" {\n\t\treturn nil\n\t}\n\ts.stringPtr = &str\n\treturn nil\n}",
"func (self Text) SetString(s string) {\n\tC.sfText_setString(self.Cref, C.CString(s))\n}",
"func (me *TNumbersType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TSAFPTtextTypeMax40Car) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TEnableBackgroundValueType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *THITStatus) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TColorType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TNotificationTransport) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TcoordinatesType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (v *T) Set(s string) error {\n\tintV, err := strconv.ParseUint(s, 10, 32)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*v = T(intV)\n\treturn nil\n}",
"func SetString(ctx *system.Context, gconfig *models.Guild, name, defaultVal string, value *string) {\n\tif ctx.Args.After() != \"\" {\n\t\tif ctx.Args.After() == flagDefault {\n\t\t\t*value = defaultVal\n\t\t} else {\n\t\t\t*value = ctx.Args.After()\n\t\t}\n\n\t\terr := ctx.System.DB.SaveGuild(ctx.Msg.GuildID, gconfig)\n\t\tif err != nil {\n\t\t\tctx.ReplyError(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tctx.ReplyNotify(fmt.Sprintf(\"%s: `%s`\", name, *value))\n}",
"func (me *TOpacityValueType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TSAFPTGLAccountID) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (w *BodyPart) Set(s string) {\n\tw.Reset()\n\tw.WriteString(s)\n}",
"func (v *Value2) Set(s string) error {\n\tz, err := Parse2(s)\n\tif err == nil {\n\t\t*v = Value2(z)\n\t}\n\treturn err\n}",
"func (me *TStrokeDashOffsetValueType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (m *Metadata) SetStr(value, v string) error {\n\tif err := validStr(value); err != nil {\n\t\treturn err\n\t}\n\tm.mu.Lock()\n\tm.valuesStr[value] = v\n\tm.mu.Unlock()\n\treturn nil\n}",
"func (me *TFilterValueType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TSAFPTtextTypeMandatoryMax90Car) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TContentTypeType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TpubStatus) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TSAFPTtextTypeMandatoryMax35Car) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TFontFamilyValueType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (c *StringChanger) SetValue(v string) (string, error) {\n\tif c.err != nil {\n\t\treturn \"\", c.err\n\t}\n\toldValue := c.node.content.value().(string)\n\tnewValue := justValue{v}\n\tif !c.node.isAllowed(newValue, true) {\n\t\treturn \"\", failure.New(\"setting duplicate string value is not allowed\")\n\t}\n\tc.node.content = newValue\n\treturn oldValue, nil\n}",
"func (me *TCoordinateType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TGlyphOrientationHorizontalValueType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TSAFPTtextTypeMandatoryMax50Car) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TNumberOptionalNumberType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (z *E6) SetString(s1, s2, s3, s4, s5, s6 string) *E6 {\n\tz.B0.SetString(s1, s2)\n\tz.B1.SetString(s3, s4)\n\tz.B2.SetString(s5, s6)\n\treturn z\n}",
"func (v *Value) SetString(val string) {\n\tcstr := C.CString(val)\n\tdefer C.free(unsafe.Pointer(cstr))\n\tC.g_value_set_string(v.Native(), (*C.gchar)(cstr))\n}",
"func (v *Value10) Set(s string) error {\n\tz, err := Parse10(s)\n\tif err == nil {\n\t\t*v = Value10(z)\n\t}\n\treturn err\n}",
"func (me *TStyleSheetType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (e *stringElement) Set(value interface{}) error {\n\te.valid = true\n\tif value == nil {\n\t\te.valid = false\n\t\treturn nil\n\t}\n\tswitch value.(type) {\n\tcase string:\n\t\tif value.(string) == Nil {\n\t\t\te.valid = false\n\t\t} else {\n\t\t\te.e = string(value.(string))\n\t\t}\n\tcase int:\n\t\te.e = strconv.Itoa(value.(int))\n\tcase int64:\n\t\te.e = strconv.FormatInt(value.(int64), 10)\n\tcase uint64:\n\t\te.e = strconv.FormatUint(value.(uint64), 10)\n\tcase float32:\n\t\te.e = strconv.FormatFloat(float64(value.(float32)), 'f', 6, 64)\n\tcase float64:\n\t\te.e = strconv.FormatFloat(value.(float64), 'f', 6, 64)\n\tcase bool:\n\t\tb := value.(bool)\n\t\tif b {\n\t\t\te.e = \"true\"\n\t\t} else {\n\t\t\te.e = \"false\"\n\t\t}\n\tcase NaNElement:\n\t\te.e = \"NaN\"\n\tcase Element:\n\t\tif value.(Element).IsValid() {\n\t\t\tv, err := value.(Element).String()\n\t\t\tif err != nil {\n\t\t\t\te.valid = false\n\t\t\t\treturn err\n\t\t\t}\n\t\t\te.e = v\n\t\t} else {\n\t\t\te.valid = false\n\t\t\treturn nil\n\t\t}\n\tdefault:\n\t\te.valid = false\n\t\treturn fmt.Errorf(\"Unsupported type '%T' conversion to a string\", value)\n\t}\n\treturn nil\n}",
"func (s *StringReference) SetValue(v string) *StringReference {\n\ts.Value = &v\n\treturn s\n}",
"func (me *TSAFPTtextTypeMandatoryMax100Car) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (akv StringKeyValue) SetValue(v string) {\n\takv.orig.Value = v\n}",
"func (me *TSAFTPTPaymentType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TxsdType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TxsdType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TSAFPTtextTypeMandatoryMax60Car) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (me *TCoordinatesType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (b *baseValue) Set(s string) error {\n\tif err := b.validationFunc(s); err != nil {\n\t\treturn err\n\t}\n\tb.value = s\n\treturn nil\n}",
"func (me *TRequesterStatistic) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (s *StringEnum) Set(arg string) error {\n\tif _, ok := s.choices[s.choiceMapper(arg)]; !ok {\n\t\tmsg := \"%w (valid choices: %v\"\n\t\tif s.caseInsensitive {\n\t\t\tmsg += \" [case insensitive]\"\n\t\t}\n\t\tmsg += \")\"\n\t\treturn fmt.Errorf(msg, ErrInvalidChoice, s.choiceNames)\n\t}\n\n\ts.val = arg\n\n\treturn nil\n}",
"func (me *TrefreshModeEnumType) Set(s string) { (*xsdt.String)(me).Set(s) }"
] | [
"0.79293394",
"0.7827096",
"0.7804932",
"0.7693435",
"0.7640353",
"0.75797755",
"0.7420005",
"0.73857605",
"0.7376449",
"0.73744327",
"0.73223305",
"0.73069286",
"0.7282563",
"0.72566503",
"0.72496474",
"0.7207861",
"0.7196015",
"0.7186725",
"0.7183804",
"0.7176873",
"0.7136401",
"0.71319276",
"0.71314406",
"0.71314406",
"0.70950884",
"0.7093039",
"0.7077168",
"0.70551854",
"0.7044822",
"0.7043966",
"0.70206547",
"0.6992466",
"0.6975414",
"0.6968857",
"0.6967085",
"0.69643325",
"0.69613945",
"0.6957366",
"0.6954184",
"0.69496083",
"0.6927424",
"0.69250524",
"0.6908622",
"0.6896327",
"0.6879186",
"0.68774223",
"0.6875752",
"0.6867806",
"0.686373",
"0.68586034",
"0.68586034",
"0.6843352",
"0.68422997",
"0.68303514",
"0.68284476",
"0.67940915",
"0.67882097",
"0.6786732",
"0.67747456",
"0.6773278",
"0.6769133",
"0.6766666",
"0.6765001",
"0.676047",
"0.6756955",
"0.67535686",
"0.6742564",
"0.67291737",
"0.67227554",
"0.6703056",
"0.67013013",
"0.669507",
"0.6691918",
"0.6682398",
"0.6674913",
"0.6664246",
"0.6659027",
"0.66542065",
"0.6637",
"0.6636734",
"0.6635784",
"0.6630135",
"0.6626378",
"0.6619087",
"0.6613403",
"0.66132283",
"0.6612523",
"0.6600089",
"0.65998006",
"0.6591382",
"0.658995",
"0.6588962",
"0.65856665",
"0.65856665",
"0.6585153",
"0.6581883",
"0.65813637",
"0.6581306",
"0.6580848",
"0.6579858"
] | 0.7578262 | 6 |
ClickableURL fixes address in url to make sure it's clickable, e.g. it replaces "undefined" address like 0.0.0.0 used in network listeners format with loopback 127.0.0.1 | func ClickableURL(in string) string {
out, err := url.Parse(in)
if err != nil {
return in
}
host, port, err := net.SplitHostPort(out.Host)
if err != nil {
return in
}
ip := net.ParseIP(host)
// if address is not an IP, unspecified, e.g. all interfaces 0.0.0.0 or multicast,
// replace with localhost that is clickable
if len(ip) == 0 || ip.IsUnspecified() || ip.IsMulticast() {
out.Host = fmt.Sprintf("127.0.0.1:%v", port)
return out.String()
}
return out.String()
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func SanitizeURL(in string) string {\n\treturn sanitizeURLWithFlags(in, purell.FlagsSafe|purell.FlagRemoveTrailingSlash|purell.FlagRemoveDotSegments|purell.FlagRemoveDuplicateSlashes|purell.FlagRemoveUnnecessaryHostDots|purell.FlagRemoveEmptyPortSeparator)\n}",
"func fixImgurLink(link string) string {\n\toriginalurl, err := url.Parse(link)\n\n\tif err != nil || originalurl.Host != \"imgur.com\" {\n\t\treturn link\n\t}\n\n\treturn fmt.Sprintf(\"http://i.imgur.com%s.gif\", originalurl.Path)\n}",
"func sanitizeUrl(href string, domain string) (url.URL, bool){\n\tif strings.Trim(href, \" \") == \"\"{\n\t\treturn url.URL{}, false\n\t}\n\n\tu, err := url.Parse(href)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn url.URL{}, false\n\t}\n\n\tif u.Host == \"\"{\n\t\tu.Host = domain\n\t} else if u.Host != domain || u.Path == \"/\" || u.Path == \"\"{\n\t\treturn url.URL{}, false\n\t}\n\n\tif u.Scheme == \"\"{\n\t\tu.Scheme = \"https\"\n\t}\n\n\t// Ignore alien schemas [ mailto, ftp, etc ]\n\tif !strings.Contains(u.Scheme, \"http\") {\n\t\treturn url.URL{}, false\n\t}\n\n\t// TODO: Check URL is accessible\n\n\treturn *u, true\n}",
"func (bot *Bot) handleURLsListener(message events.EventMessage) {\n\n\t// Find all URLs in the message.\n\tlinks := xurls.Strict().FindAllString(message.Message, -1)\n\t// Remove multiple same links from one message.\n\tlinks = utils.RemoveDuplicates(links)\n\tfor i := range links {\n\t\t// Validate the url.\n\t\tbot.Log.Infof(\"Got link %s\", links[i])\n\t\tlink := utils.StandardizeURL(links[i])\n\t\tbot.Log.Debugf(\"Standardized to: %s\", link)\n\n\t\t// Try to get the body of the page.\n\t\terr, finalLink, body := bot.GetPageBody(link, map[string]string{})\n\t\tif err != nil {\n\t\t\tbot.Log.Warningf(\"Could't fetch the body: %s\", err)\n\t\t}\n\n\t\t// Update link if needed.\n\t\tif finalLink != \"\" {\n\t\t\tlink = finalLink\n\t\t}\n\n\t\t// Iterate over meta tags to get the description\n\t\tdescription := \"\"\n\t\tmetas := metaRe.FindAllStringSubmatch(string(body), -1)\n\t\tfor i := range metas {\n\t\t\tif len(metas[i]) > 1 {\n\t\t\t\tisDesc := descRe.FindString(metas[i][0])\n\t\t\t\tif isDesc != \"\" && (len(metas[i][1]) > len(description)) {\n\t\t\t\t\tdescription = utils.CleanString(metas[i][1], true)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// Get the title\n\t\ttitle := \"\"\n\t\tmatch := titleRe.FindStringSubmatch(string(body))\n\t\tif len(match) > 1 {\n\t\t\ttitle = utils.CleanString(match[1], true)\n\t\t}\n\n\t\t// Insert URL into the db.\n\t\tbot.Log.Debugf(\"Storing URL info for: %s\", link)\n\t\tif _, err := bot.Db.Exec(`INSERT INTO urls(transport, channel, nick, link, quote, title) VALUES(?, ?, ?, ?, ?, ?)`,\n\t\t\tmessage.TransportName, message.Channel, message.Nick, link, message.Message, title); err != nil {\n\t\t\tbot.Log.Warningf(\"Can't add url to database: %s\", err)\n\t\t}\n\n\t\t// Trigger url found message.\n\t\tbot.EventDispatcher.Trigger(events.EventMessage{\n\t\t\tmessage.TransportName,\n\t\t\tmessage.TransportFormatting,\n\t\t\tevents.EventURLFound,\n\t\t\tmessage.Nick,\n\t\t\tmessage.UserId,\n\t\t\tmessage.Channel,\n\t\t\tlink,\n\t\t\tmessage.Context,\n\t\t\tmessage.AtBot,\n\t\t})\n\n\t\tlinkKey := link + message.Channel\n\t\t// If we can't announce yet, skip this link.\n\t\tif time.Since(bot.lastURLAnnouncedTime[linkKey]) < bot.Config.UrlAnnounceIntervalMinutes*time.Minute {\n\t\t\tcontinue\n\t\t}\n\t\tif lines, exists := bot.lastURLAnnouncedLinesPassed[linkKey]; exists && lines < bot.Config.UrlAnnounceIntervalLines {\n\t\t\tcontinue\n\t\t}\n\n\t\t// On mattermost we can skip all link info display.\n\t\tif message.TransportName == \"mattermost\" {\n\t\t\treturn\n\t\t}\n\n\t\t// Announce the title, save the description.\n\t\tif title != \"\" {\n\t\t\tif description != \"\" {\n\t\t\t\tbot.SendNotice(&message, title+\" …\")\n\t\t\t} else {\n\t\t\t\tbot.SendNotice(&message, title)\n\t\t\t}\n\t\t\tbot.lastURLAnnouncedTime[linkKey] = time.Now()\n\t\t\tbot.lastURLAnnouncedLinesPassed[linkKey] = 0\n\t\t\t// Keep the long info for later.\n\t\t\tbot.AddMoreInfo(message.TransportName, message.Channel, description)\n\t\t}\n\t}\n}",
"func ToURL(s string) string {\n\ts = strings.Trim(s, \" \")\n\ts = strings.ReplaceAll(s, \" \", \"%20\")\n\treturn s\n}",
"func (s *sanitizer) sanitizeLink(l string) string {\n\tvar p *url.URL\n\tvar err error\n\tif strings.TrimSpace(l) == \"\" {\n\t\treturn \"\"\n\t}\n\tif isInternalLink(l) {\n\t\treturn l\n\t}\n\tif s.ForceHrefLink {\n\t\treturn s.forceHttpScheme(l)\n\t}\n\tp, err = url.Parse(l)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\tif s.uriSchemesMap[p.Scheme] {\n\t\treturn \"\"\n\t}\n\treturn p.String()\n}",
"func fixURL(href, base string) string {\n\turi, err := url.Parse(href)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\tbaseURL, err := url.Parse(base)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\turi = baseURL.ResolveReference(uri)\n\treturn uri.String()\n}",
"func Link(url, text string) string {\n\treturn Osc + \"8;;\" + url + Bel + text + Osc + \"8;;\" + Bel\n}",
"func canonicalAddress(url *url.URL) string {\n\thost := url.Hostname()\n\tport := url.Port()\n\tif port == \"\" {\n\t\tport = defaultPorts[url.Scheme]\n\t}\n\treturn fmt.Sprintf(\"%s:%s\", host, port)\n}",
"func makeAbsoluteHref(baseURL string, href string) string {\n\tif strings.HasPrefix(href, \"http\") {\n\t\treturn href\n\t} else {\n\t\treturn baseURL + href\n\t}\n}",
"func URL(e *Context) error {\n\ttarget := extractBaseTarget(e.DOM.HeadNode)\n\n\tfor n := e.DOM.RootNode; n != nil; n = htmlnode.Next(n) {\n\t\t// Skip text nodes and anything inside mustache templates\n\t\tif n.Type == html.TextNode || htmlnode.IsDescendantOf(n, atom.Template) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// TODO(b/112417267): Handle amp-img rewriting.\n\t\tif strings.EqualFold(n.Data, \"amp-img\") {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Make attributes with URLs portable on any tag\n\t\trewritePortableURLs(n, e.BaseURL, anyTagAttrs)\n\n\t\tswitch n.DataAtom {\n\t\tcase atom.Form:\n\t\t\t// Make attributes with URLs absolute on <form> tag.\n\t\t\trewriteAbsoluteURLs(n, e.BaseURL, formTagAttrs)\n\t\tcase atom.Img:\n\t\t\t// Make attributes with URLs portable on <img> tag.\n\t\t\trewritePortableURLs(n, e.BaseURL, imgTagAttrs)\n\t\tdefault:\n\t\t\tswitch n.Data {\n\t\t\tcase \"amp-install-serviceworker\":\n\t\t\t\t// Make attributes with URLs portable on <amp-install-serviceworker> tag.\n\t\t\t\trewritePortableURLs(n, e.BaseURL, ampInstallServiceWorkerTagAttrs)\n\t\t\tcase amphtml.AMPStory:\n\t\t\t\t// Make attributes with URLs portable on <amp-story> tag.\n\t\t\t\trewritePortableURLs(n, e.BaseURL, ampStoryTagAttrs)\n\t\t\tcase \"amp-story-page\":\n\t\t\t\t// Make attributes with URLs portable on <amp-story-page> tag.\n\t\t\t\trewritePortableURLs(n, e.BaseURL, ampStoryPageTagAttrs)\n\t\t\t}\n\t\t}\n\n\t\t// Tags with href attribute.\n\t\tif href, ok := htmlnode.FindAttribute(n, \"\", \"href\"); ok {\n\t\t\t// Remove the base tag href with the following rationale:\n\t\t\t//\n\t\t\t// 1) The <base href> can be harmful. When handling things like image\n\t\t\t// source sets which are re-hosted and served from\n\t\t\t// https://cdn.ampproject.org, paths starting with \"/\" are rewritten\n\t\t\t// into the stored html document with the intent that \"/\" is relative\n\t\t\t// to the root of cdn.ampproject.org. If a base href were present, it\n\t\t\t// would change the meaning of the relative links.\n\t\t\t//\n\t\t\t// 2) Other hrefs are absolutified in the document relative to the base\n\t\t\t// href. Thus, it is not necessary to maintain the base href for\n\t\t\t// browser URL resolution.\n\t\t\tswitch n.DataAtom {\n\t\t\tcase atom.Base:\n\t\t\t\thtmlnode.RemoveAttribute(n, href)\n\t\t\t\tif len(n.Attr) == 0 {\n\t\t\t\t\thtmlnode.RemoveNode(&n)\n\t\t\t\t}\n\t\t\tcase atom.Link:\n\t\t\t\tif v, ok := htmlnode.GetAttributeVal(n, \"rel\"); ok && v == \"canonical\" {\n\t\t\t\t\t// If the origin doc is self-canonical, it should be an absolute URL\n\t\t\t\t\t// and not portable (which would result in canonical = \"#\").\n\t\t\t\t\t// Maintain the original canonical, and absolutify it. See b/36102624\n\t\t\t\t\thtmlnode.SetAttribute(n, \"\", \"href\", amphtml.RewriteAbsoluteURL(e.BaseURL, href.Val))\n\t\t\t\t} else {\n\t\t\t\t\thtmlnode.SetAttribute(n, \"\", \"href\", amphtml.RewritePortableURL(e.BaseURL, href.Val))\n\t\t\t\t}\n\t\t\tcase atom.A:\n\t\t\t\tportableHref := amphtml.RewritePortableURL(e.BaseURL, href.Val)\n\t\t\t\t// Set a default target\n\t\t\t\t// 1. If the href is not a fragment AND\n\t\t\t\t// 2. If there is no target OR If there is a target and it is not an allowed target\n\t\t\t\tif !strings.HasPrefix(portableHref, \"#\") {\n\t\t\t\t\tif v, ok := htmlnode.GetAttributeVal(n, \"target\"); !ok || (ok && !isAllowedTarget(v)) {\n\t\t\t\t\t\thtmlnode.SetAttribute(n, \"\", \"target\", target)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\thtmlnode.SetAttribute(n, \"\", \"href\", portableHref)\n\t\t\tdefault:\n\t\t\t\t// Make a PortableUrl for any remaining tags with href.\n\t\t\t\thtmlnode.SetAttribute(n, \"\", \"href\", amphtml.RewritePortableURL(e.BaseURL, href.Val))\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func (s *htmlState) checkURL(raw string) {\n\tif s.ignore&issueURL != 0 {\n\t\treturn\n\t}\n\n\tif strings.HasPrefix(raw, \"mailto:\") {\n\t\tif strings.Index(raw, \"@\") == -1 {\n\t\t\ts.err(fmt.Errorf(\"not an email address\"))\n\t\t}\n\t\treturn\n\t}\n\n\tu, err := url.Parse(raw)\n\tif err != nil {\n\t\ts.err(fmt.Errorf(\"bad URL '%s': %s\", raw, err.Error()))\n\t\treturn\n\t}\n\tif u.Opaque != \"\" {\n\t\ts.err(fmt.Errorf(\"bad URL part '%s'\", u.Opaque))\n\t\treturn\n\t}\n\n\tif strings.Index(raw, \" \") != -1 {\n\t\ts.err(fmt.Errorf(\"unencoded space in URL\"))\n\t}\n}",
"func fixUrl(url string) string {\n\turlParts := strings.SplitN(url, \"/\", 2)\n\tif len(urlParts) < 2 {\n\t\treturn \"\"\n\t}\n\n\treturn urlParts[0] + \":\" + urlParts[1]\n}",
"func (t *TestRuntime) AddrToURL(addr string) (string, error) {\n\tif strings.HasPrefix(addr, \":\") {\n\t\taddr = \"localhost\" + addr\n\t}\n\n\tif !strings.Contains(addr, \"://\") {\n\t\tscheme := \"http://\"\n\t\tif t.Params.Certificate != nil {\n\t\t\tscheme = \"https://\"\n\t\t}\n\t\taddr = scheme + addr\n\t}\n\n\tparsed, err := url.Parse(addr)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to parse listening address of server: %s\", err)\n\t}\n\n\treturn parsed.String(), nil\n}",
"func MakeURL(addr string) string {\n\tprotocol := \"http\"\n\tif config.Config.Env.IsProduction() {\n\t\tprotocol = \"https\"\n\t}\n\n\tif strings.HasPrefix(addr, \":\") {\n\t\treturn fmt.Sprintf(\"%s://127.0.0.1%s\", protocol, addr)\n\t}\n\n\tif !strings.HasPrefix(addr, \"http://\") && !strings.HasPrefix(addr, \"https://\") {\n\t\treturn fmt.Sprintf(\"%s://%s\", protocol, addr)\n\t}\n\n\treturn addr\n}",
"func formatUrl(source string) string {\n\tif match, _ := regexp.MatchString(\"https?:\\\\/\\\\/\", source); match {\n\t\treturn source\n\t}\n\n\treturn \"https://\" + source\n}",
"func HTMLURL(v string) predicate.User {\n\treturn predicate.User(sql.FieldEQ(FieldHTMLURL, v))\n}",
"func URL(route string, opts ...string) (s string) {\n\tsize := len(opts)\n\tif size >= 1 {\n\t\tif strings.Contains(route, \"$1\") {\n\t\t\troute = strings.Replace(route, \"$1\", opts[0], 1)\n\t\t}\n\t\tif size >= 2 && strings.Contains(route, \"$2\") {\n\t\t\troute = strings.Replace(route, \"$2\", opts[1], 1)\n\t\t}\n\t}\n\ts = fmt.Sprintf(\"%s%s\", Host, route)\n\treturn\n}",
"func rawUrl(htmlUrl string) string {\n\tdomain := strings.Replace(htmlUrl, \"https://github.com/\", \"https://raw.githubusercontent.com/\", -1)\n\treturn strings.Replace(domain, \"/blob/\", \"/\", -1)\n}",
"func SimpleURLChecks(t *testing.T, scheme string, host string, port uint16) mapval.Validator {\n\n\thostPort := host\n\tif port != 0 {\n\t\thostPort = fmt.Sprintf(\"%s:%d\", host, port)\n\t}\n\n\tu, err := url.Parse(fmt.Sprintf(\"%s://%s\", scheme, hostPort))\n\trequire.NoError(t, err)\n\n\treturn mapval.MustCompile(mapval.Map{\n\t\t\"url\": wrappers.URLFields(u),\n\t})\n}",
"func Link(t string, u string, a ...string) got.HTML {\n\tattributes := \"\"\n\tif len(a) > 0 {\n\t\tattributes = strings.Join(a, \" \")\n\t}\n\treturn got.HTML(fmt.Sprintf(\"<a href=\\\"%s\\\" %s>%s</a>\", Escape(u), Escape(attributes), Escape(t)))\n}",
"func fixURL(href, base string) (string, error) {\n\turi, err := url.Parse(href)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tbaseURL, err := url.Parse(base)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\turi = baseURL.ResolveReference(uri)\n\n\treturn uri.String(), err\n}",
"func filterAddress(link, domain string) string {\n\tresolved := resolveReference(link)\n\tif strings.HasPrefix(link, \"/\") {\n\t\tresolved = \"http://\" + domain + resolved\n\t\treturn resolved\n\t}\n\treturn resolved\n}",
"func linkify(s string) template.HTML {\n\toutput := \"\"\n\ti := 0\n\tmatches := urlregexp.FindAllStringIndex(s, -1)\n\tfor _, idxs := range matches {\n\t\tstart, end := idxs[0], idxs[1]\n\t\toutput += html.EscapeString(s[i:start])\n\t\toutput += linkreplace(s[start:end])\n\t\ti = end\n\t}\n\toutput += html.EscapeString(s[i:])\n\treturn template.HTML(output)\n}",
"func HTMLURL(v string) predicate.GithubRelease {\n\treturn predicate.GithubRelease(sql.FieldEQ(FieldHTMLURL, v))\n}",
"func MakeAnchorRequestUrl(api_parts ...string) (string, error) {\n\tvar full_url bytes.Buffer\n\n\tanchorIP := os.Getenv(\"ANCHOR_ADDRESS\")\n\tif len(anchorIP) == 0 {\n\t\tlogger.Logging(logger.ERROR, \"No anchor address environment\")\n\t\treturn \"\", errors.NotFound{\"No anchor address environment\"}\n\t}\n\n\tipTest := net.ParseIP(anchorIP)\n\tif ipTest == nil {\n\t\tlogger.Logging(logger.ERROR, \"Anchor address's validation check failed\")\n\t\treturn \"\", errors.InvalidParam{\"Anchor address's validation check failed\"}\n\t}\n\n\tanchorProxy := os.Getenv(\"ANCHOR_REVERSE_PROXY\")\n\tif len(anchorProxy) == 0 || anchorProxy == \"false\" {\n\t\tfull_url.WriteString(\"http://\" + anchorIP + \":\" + DEFAULT_ANCHOR_PORT + url.Base())\n\t} else if anchorProxy == \"true\" {\n\t\tfull_url.WriteString(\"http://\" + anchorIP + \":\" + UNSECURED_ANCHOR_PORT_WITH_REVERSE_PROXY + url.PharosAnchor() + url.Base())\n\t} else {\n\t\tlogger.Logging(logger.ERROR, \"Invalid value for ANCHOR_REVERSE_PROXY\")\n\t\treturn \"\", errors.InvalidParam{\"Invalid value for ANCHOR_REVERSE_PROXY\"}\n\t}\n\n\tfor _, api_part := range api_parts {\n\t\tfull_url.WriteString(api_part)\n\t}\n\n\tlogger.Logging(logger.DEBUG, full_url.String())\n\treturn full_url.String(), nil\n}",
"func (c *Client) URL(path string, a ...interface{}) string {\n\tu, _ := urlx.Parse(c.addr)\n\n\tu.Path = fmt.Sprintf(path, a...)\n\n\treturn u.String()\n}",
"func (o GroupBadgeOutput) RenderedLinkUrl() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *GroupBadge) pulumi.StringOutput { return v.RenderedLinkUrl }).(pulumi.StringOutput)\n}",
"func TargetHref(value string) *SimpleElement { return newSEString(\"targetHref\", value) }",
"func URL(opts ...options.OptionFunc) string {\n\treturn singleFakeData(URLTag, func() interface{} {\n\t\topt := options.BuildOptions(opts)\n\t\ti := Internet{fakerOption: *opt}\n\t\tu, err := i.url()\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t\treturn u\n\t}, opts...).(string)\n}",
"func clean_url(cand string) string {\n // TODO: url pattern should be refined\n r, _ := regexp.Compile(\"^((http[s]?|ftp)://)?(www\\\\.)?(?P<body>[a-z]+\\\\.[a-z]+)$\")\n if r.MatchString(cand) {\n r2 := r.FindAllStringSubmatch(cand, -1)\n return r2[0][len(r2[0]) - 1]\n }\n return \"\"\n}",
"func Href(value string) *SimpleElement { return newSEString(\"href\", value) }",
"func (c *Client) BanURL(channels []string, host string, value []string) error {\n\treturn c.Do(channels, Request{Command: \"ban.url\", Host: host, Value: value})\n}",
"func replaceAHrefs(orig_url url.URL, n *html.Node) {\n\tif n.Type == html.ElementNode && n.Data == \"a\" {\n\t\tfor i, a := range n.Attr {\n\t\t\tif a.Key == \"href\" {\n\t\t\t\ta.Val = createProxyableUrl(orig_url, a.Val)\n\t\t\t}\n\t\t\tn.Attr[i] = a\n\t\t}\n\t}\n\tfor c := n.FirstChild; c != nil; c = c.NextSibling {\n\t\treplaceAHrefs(orig_url, c)\n\t}\n}",
"func (tu *TwitterURL) IsLinkable() {}",
"func sanitizeSkylinks(links []string) []string {\n\tvar result []string\n\n\tfor _, link := range links {\n\t\ttrimmed := strings.TrimPrefix(link, \"sia://\")\n\t\tresult = append(result, trimmed)\n\t}\n\n\treturn result\n}",
"func href2url(docUrl string, href string) string {\n\tif strings.HasPrefix(href, \"http://\") || strings.HasPrefix(href, \"https://\") {\n\t\treturn href\n\t}\n\tif strings.HasPrefix(href, \"/\") {\n\t\tr := regexp.MustCompile(`https?://[^/]+`)\n\t\tbaseUrl := r.FindAllString(docUrl, -1)[0]\n\t\treturn baseUrl + href\n\t}\n\treturn docUrl + \"/\" + href\n}",
"func href(vuln *osv.Entry) string {\n\tfor _, affected := range vuln.Affected {\n\t\tif url := affected.DatabaseSpecific.URL; url != \"\" {\n\t\t\treturn url\n\t\t}\n\t}\n\tfor _, r := range vuln.References {\n\t\tif r.Type == \"WEB\" {\n\t\t\treturn r.URL\n\t\t}\n\t}\n\treturn fmt.Sprintf(\"https://pkg.go.dev/vuln/%s\", vuln.ID)\n}",
"func (u *GithubGistUpsert) UpdateHTMLURL() *GithubGistUpsert {\n\tu.SetExcluded(githubgist.FieldHTMLURL)\n\treturn u\n}",
"func (b *Builder) TextURL(s, url string) *Builder {\n\treturn b.appendMessage(s, func(offset, limit int) tg.MessageEntityClass {\n\t\treturn &tg.MessageEntityTextUrl{Offset: offset, Length: limit, URL: url}\n\t})\n}",
"func (g GetenvValue) SafeURL() string {\n\tif g.value[len(g.value)-1] == '/' {\n\t\treturn g.value[:len(g.value)-1]\n\t}\n\n\treturn g.value\n}",
"func (k *Keyboard) AddURLButton(text, uri string) *Keyboard {\n\treturn k.addInlineButton(text, uri, \"url\")\n}",
"func (t *Team) AbsoluteURL(path string) string {\n\treturn fmt.Sprintf(\"%s%s\", t.teamConfig.HTTPURL, path)\n}",
"func urlify(rawInput string) string {\n\tencoded := strings.TrimSpace(rawInput)\n\treturn strings.ReplaceAll(encoded, \" \", \"%20\")\n}",
"func redactURLString(raw string) string {\n\tif !strings.ContainsRune(raw, '@') {\n\t\treturn raw\n\t}\n\tu, err := url.Parse(raw)\n\tif err != nil {\n\t\treturn raw\n\t}\n\treturn u.Redacted()\n}",
"func URLize(path string) string {\n\n\tpath = strings.Replace(strings.TrimSpace(path), \" \", \"-\", -1)\n\tpath = strings.ToLower(path)\n\tpath = UnicodeSanitize(path)\n\treturn path\n}",
"func (o GroupBadgeOutput) LinkUrl() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *GroupBadge) pulumi.StringOutput { return v.LinkUrl }).(pulumi.StringOutput)\n}",
"func ToURL(path string) string {\n\treturn filepath.Clean(path)\n}",
"func (seg *Segmenter) CutUrl(str string, num ...bool) []string {\n\tif len(num) <= 0 {\n\t\t// seg.Num = true\n\t\tstr = SplitNums(str)\n\t}\n\ts := seg.Cut(str)\n\treturn seg.TrimSymbol(s)\n}",
"func absoluteURL(link, baselink string) (string, error) {\n\t// scheme relative links, eg <script src=\"//example.com/script.js\">\n\tif len(link) > 1 && link[0:2] == \"//\" {\n\t\tbase, err := url.Parse(baselink)\n\t\tif err != nil {\n\t\t\treturn link, err\n\t\t}\n\t\tlink = base.Scheme + \":\" + link\n\t}\n\n\tu, err := url.Parse(link)\n\tif err != nil {\n\t\treturn link, err\n\t}\n\n\t// remove hashes\n\tu.Fragment = \"\"\n\n\tbase, err := url.Parse(baselink)\n\tif err != nil {\n\t\treturn link, err\n\t}\n\n\t// set global variable\n\tif baseDomain == \"\" {\n\t\tbaseDomain = base.Host\n\t}\n\n\tresult := base.ResolveReference(u)\n\n\t// ensure link is HTTP(S)\n\tif result.Scheme != \"http\" && result.Scheme != \"https\" {\n\t\treturn link, fmt.Errorf(\"Invalid URL: %s\", result.String())\n\t}\n\n\treturn result.String(), nil\n}",
"func (u *GithubGistUpsertOne) UpdateHTMLURL() *GithubGistUpsertOne {\n\treturn u.Update(func(s *GithubGistUpsert) {\n\t\ts.UpdateHTMLURL()\n\t})\n}",
"func (bT BitTorrent) URL() (string, error) {\n\tbaseURL, err := url.Parse(bT.Announce)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif bT.PeerID == \"\" {\n\t\tbT.GenPeerID()\n\t\tinfoHash, err := bT.Info.InfoHash()\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tbT.InfoHash = fmt.Sprintf(\"%s\", infoHash)\n\t}\n\n\tparameters := url.Values{\n\t\t\"info_hash\": []string{bT.InfoHash},\n\t\t\"peer_id\": []string{bT.PeerID},\n\t\t\"port\": []string{bT.Port},\n\t\t\"uploaded\": []string{\"0\"},\n\t\t\"downloaded\": []string{\"0\"},\n\t\t\"compact\": []string{\"1\"},\n\t\t\"left\": []string{fmt.Sprintf(\"%v\", bT.Info.Length)},\n\t}\n\tbaseURL.RawQuery = parameters.Encode()\n\treturn baseURL.String(), nil\n}",
"func htmlLinkFormatter(url, text string) string {\n\treturn fmt.Sprintf(`<a href=\"%s\">%s</a>`, html.EscapeString(url), html.EscapeString(text))\n}",
"func (p *Proxy) LinkHtml(ctx context.Context,\n\tlabel string,\n\tactionValue string,\n\tattributes html5tag.Attributes,\n) string {\n\tif attributes == nil {\n\t\tattributes = html5tag.NewAttributes()\n\t}\n\tattributes.Set(\"onclick\", \"return false;\") // make sure we do not follow the link if javascript is on.\n\tvar href string\n\tif attributes.Has(\"href\") {\n\t\thref = attributes.Get(\"href\")\n\t} else {\n\t\thref = page.GetContext(ctx).HttpContext.URL.RequestURI() // for non-javascript compatibility\n\t\tif offset := strings.Index(href, page.HtmlVarAction); offset >= 0 {\n\t\t\thref = href[:offset-1] // remove the variables we placed here ourselves\n\t\t}\n\t}\n\n\t// These next two lines allow the proxy to work even when javascript is off.\n\tav := page.HtmlVarAction + \"=\" + p.ID() + \"_\" + actionValue\n\tav += \"&\" + page.HtmlVarPagestate + \"=\" + crypt.SessionEncryptUrlValue(ctx, p.Page().StateID())\n\n\tif !strings.ContainsRune(href, '?') {\n\t\thref += \"?\" + av\n\t} else {\n\t\thref += \"&\" + av\n\t}\n\tattributes.Set(\"href\", href)\n\treturn p.TagHtml(label, actionValue, attributes, \"a\", false)\n}",
"func (b *PagesClearCacheBuilder) URL(v string) *PagesClearCacheBuilder {\n\tb.Params[\"url\"] = v\n\treturn b\n}",
"func cleanLink(base, link string) (string, error) {\n\tlink = RegexAnchors.ReplaceAllString(link, \"\")\n\tif len(link) == 0 {\n\t\treturn \"\", ErrInvalidLink\n\t}\n\tlinkURL, err := url.Parse(link)\n\tif err != nil {\n\t\treturn \"\", ErrInvalidLink\n\t}\n\n\tif validScheme(linkURL.Scheme) {\n\t\treturn link, nil\n\t}\n\n\tbaseURL, err := url.Parse(base)\n\tif err != nil {\n\t\treturn \"\", ErrInvalidLink\n\t} else if len(baseURL.Host) == 0 {\n\t\treturn \"\", ErrInvalidLink\n\t}\n\n\tif link[0] == '/' || link[len(link)-1] == '/' {\n\t\tlink = strings.Trim(link, \"/\")\n\t}\n\n\treturn strings.Join([]string{baseURL.Scheme, \"://\", baseURL.Host, \"/\", link}, \"\"), nil\n}",
"func IsValidURL(address string) bool {\n\tif IsEmptyStr(address) {\n\t\treturn false\n\t}\n\n\treturn govalidator.IsURL(address)\n}",
"func DownloadableURL(original string) (string, error) {\n\tif runtime.GOOS == \"windows\" {\n\t\t// If the distance to the first \":\" is just one character, assume\n\t\t// we're dealing with a drive letter and thus a file path.\n\t\tidx := strings.Index(original, \":\")\n\t\tif idx == 1 {\n\t\t\toriginal = \"file:///\" + original\n\t\t}\n\t}\n\n\turl, err := url.Parse(original)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif url.Scheme == \"\" {\n\t\turl.Scheme = \"file\"\n\t}\n\n\tif url.Scheme == \"file\" {\n\t\t// Windows file handling is all sorts of tricky...\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\t// If the path is using Windows-style slashes, URL parses\n\t\t\t// it into the host field.\n\t\t\tif url.Path == \"\" && strings.Contains(url.Host, `\\`) {\n\t\t\t\turl.Path = url.Host\n\t\t\t\turl.Host = \"\"\n\t\t\t}\n\n\t\t\t// For Windows absolute file paths, remove leading / prior to processing\n\t\t\t// since net/url turns \"C:/\" into \"/C:/\"\n\t\t\tif len(url.Path) > 0 && url.Path[0] == '/' {\n\t\t\t\turl.Path = url.Path[1:len(url.Path)]\n\t\t\t}\n\t\t}\n\n\t\t// Only do the filepath transformations if the file appears\n\t\t// to actually exist.\n\t\tif _, err := os.Stat(url.Path); err == nil {\n\t\t\turl.Path, err = filepath.Abs(url.Path)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\n\t\t\turl.Path, err = filepath.EvalSymlinks(url.Path)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\n\t\t\turl.Path = filepath.Clean(url.Path)\n\t\t}\n\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\t// Also replace all backslashes with forwardslashes since Windows\n\t\t\t// users are likely to do this but the URL should actually only\n\t\t\t// contain forward slashes.\n\t\t\turl.Path = strings.Replace(url.Path, `\\`, `/`, -1)\n\t\t}\n\t}\n\n\t// Make sure it is lowercased\n\turl.Scheme = strings.ToLower(url.Scheme)\n\n\t// This is to work around issue #5927. This can safely be removed once\n\t// we distribute with a version of Go that fixes that bug.\n\t//\n\t// See: https://code.google.com/p/go/issues/detail?id=5927\n\tif url.Path != \"\" && url.Path[0] != '/' {\n\t\turl.Path = \"/\" + url.Path\n\t}\n\n\t// Verify that the scheme is something we support in our common downloader.\n\tsupported := []string{\"file\", \"http\", \"https\"}\n\tfound := false\n\tfor _, s := range supported {\n\t\tif url.Scheme == s {\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !found {\n\t\treturn \"\", fmt.Errorf(\"Unsupported URL scheme: %s\", url.Scheme)\n\t}\n\n\treturn url.String(), nil\n}",
"func (i Internet) URL() string {\n\turl := i.Faker.RandomStringElement(urlFormats)\n\n\t// {{domain}}\n\turl = strings.Replace(url, \"{{domain}}\", i.Domain(), 1)\n\n\t// {{slug}}\n\turl = strings.Replace(url, \"{{slug}}\", i.Slug(), 1)\n\n\treturn url\n}",
"func (u *User) HTMLURL() string {\n\treturn conf.Server.ExternalURL + u.Name\n}",
"func (b *Builder) URL(s string) *Builder {\n\treturn b.appendMessage(s, func(offset, limit int) tg.MessageEntityClass {\n\t\treturn &tg.MessageEntityUrl{Offset: offset, Length: limit}\n\t})\n}",
"func effectiveURL(a *analysis.Analyzer, diag analysis.Diagnostic) string {\n\tu := diag.URL\n\tif u == \"\" && diag.Category != \"\" {\n\t\tu = \"#\" + diag.Category\n\t}\n\tif base, err := urlpkg.Parse(a.URL); err == nil {\n\t\tif rel, err := urlpkg.Parse(u); err == nil {\n\t\t\tu = base.ResolveReference(rel).String()\n\t\t}\n\t}\n\treturn u\n}",
"func (o ReleaseLinkOutput) Url() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ReleaseLink) pulumi.StringOutput { return v.Url }).(pulumi.StringOutput)\n}",
"func ShortenURL(urlvar string, b *bitly.Client) string {\n\n\tshortURL, err := b.Links.Shorten(urlvar)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\treturn shortURL.URL\n}",
"func NormalizeURL(addr string) (*url.URL, error) {\n\taddr = strings.TrimSpace(addr)\n\tu, err := url.Parse(addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif u.Opaque != \"\" {\n\t\tu.Host = net.JoinHostPort(u.Scheme, u.Opaque)\n\t\tu.Opaque = \"\"\n\t} else if u.Path != \"\" && !strings.Contains(u.Path, \":\") {\n\t\tu.Host = net.JoinHostPort(u.Path, strconv.Itoa(config.DefaultServerPort))\n\t\tu.Path = \"\"\n\t} else if u.Scheme == \"\" {\n\t\tu.Host = u.Path\n\t\tu.Path = \"\"\n\t}\n\tif u.Scheme != \"https\" {\n\t\tu.Scheme = \"http\"\n\t}\n\t_, port, err := net.SplitHostPort(u.Host)\n\tif err != nil {\n\t\t_, port, err = net.SplitHostPort(u.Host + \":\" + strconv.Itoa(config.DefaultServerPort))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif port != \"\" {\n\t\t_, err = strconv.Atoi(port)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn u, nil\n}",
"func FormatURL(url string) string {\n\turl = strings.TrimSpace(url)\n\tif strings.Contains(url, \"\\\\\") {\n\t\turl = strings.ReplaceAll(url, \"\\\\\", \"/\")\n\t}\n\turl = strings.TrimRight(url, \"#/?\")\n\treturn url\n}",
"func (u *GithubGistUpsertBulk) UpdateHTMLURL() *GithubGistUpsertBulk {\n\treturn u.Update(func(s *GithubGistUpsert) {\n\t\ts.UpdateHTMLURL()\n\t})\n}",
"func urlify(s string) string {\n\tvar r strings.Builder\n\n\tfor i := 0; i < len(s); i++ {\n\t\tif string(s[i]) == \" \" {\n\t\t\tr.WriteString(\"%20\")\n\t\t} else {\n\t\t\tr.WriteString(string(s[i]))\n\t\t}\n\t}\n\treturn r.String()\n}",
"func (o DomainOutput) CheckUrl() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Domain) pulumi.StringPtrOutput { return v.CheckUrl }).(pulumi.StringPtrOutput)\n}",
"func (r *Attestor) urlNormalized() *Attestor {\n\tnormalized := deepcopy.Copy(*r).(Attestor)\n\tnormalized.Name = dcl.SelfLinkToName(r.Name)\n\tnormalized.Description = dcl.SelfLinkToName(r.Description)\n\tnormalized.Project = dcl.SelfLinkToName(r.Project)\n\treturn &normalized\n}",
"func (h *Host) ToURL() string {\n\treturn fmt.Sprintf(\"%s:%d\", h.Address, h.Port)\n}",
"func URL(url string) string {\n\tscheme, host, _, path, query := unpackURL(url)\n\t// log.S(\"url\", url).S(\"host\", host).Debug(fmt.Sprintf(\"should discover: %v\", shouldDiscoverHost(host)))\n\tif !shouldDiscoverHost(host) {\n\t\treturn url\n\t}\n\tsrvs, err := Services(host)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn url\n\t}\n\t// log.I(\"len_srvs\", len(srvs)).Debug(\"service entries\")\n\tif len(srvs) == 0 {\n\t\treturn url\n\t}\n\tsrv := srvs[rand.Intn(len(srvs))]\n\treturn packURL(scheme, srv.String(), \"\", path, query)\n}",
"func FormatWebhookURL(url string, event events.Event) string {\n\treturn strings.ReplaceAll(url, WebhookURLEvent, string(event))\n}",
"func StandardizeURL(url string) string {\n\tlink := url\n\tvar schema, domain, path string\n\n\t// Try to get the schema\n\tslice := strings.SplitN(url, \"://\", 2)\n\tif len(slice) == 2 && len(slice[0]) < 10 { // schema exists\n\t\tschema = slice[0] + \"://\"\n\t\tlink = slice[1]\n\t} else {\n\t\tschema = \"http://\"\n\t}\n\n\t// Get the domain\n\tslice = strings.SplitN(link, \"/\", 2)\n\tif len(slice) == 2 {\n\t\tdomain = slice[0]\n\t\tpath = \"/\" + slice[1]\n\t} else {\n\t\tdomain = slice[0]\n\t\tpath = \"/\"\n\t}\n\n\tdomain, _ = idna.ToASCII(domain)\n\tlink = schema + domain + path\n\n\treturn link\n}",
"func verifyURL(myUrl string) string {\n\tu, _ := url.Parse(myUrl)\n\n\tif u.Scheme != \"\" {\n\t\treturn myUrl\n\t}\n\treturn \"http://\" + myUrl\n}",
"func mutateURL(url string) string {\n\treturn strings.Replace(url, \"/j/\", \"/wc/join/\", 1)\n}",
"func (m *WorkforceIntegration) SetUrl(value *string)() {\n m.url = value\n}",
"func EscapeURL(s string) string {\n\treturn got.URLQueryEscaper(s)\n}",
"func (x XKCDStrip) URL() string {\n\treturn fmt.Sprintf(\"https://xkcd.com/%d\", x.ID)\n}",
"func (r *Rietveld) Url(issueID int64) string {\n\tif issueID == 0 {\n\t\treturn r.url\n\t}\n\treturn fmt.Sprintf(\"%s/%d\", r.url, issueID)\n}",
"func NewURL(userID uint, address string, threshold int) (*URL, error) {\n\turl := new(URL)\n\turl.UserId = userID\n\turl.Threshold = threshold\n\turl.FailedTimes = 0\n\n\tisValid := govalidator.IsURL(address)\n\tif !strings.HasPrefix(\"http://\", address) {\n\t\taddress = \"http://\" + address\n\t}\n\tif isValid {\n\t\t//valid URL address\n\t\turl.Address = address\n\t\treturn url, nil\n\t}\n\treturn nil, errors.New(\"not a valid URL address\")\n}",
"func URL(data ValidationData) error {\n\tv, err := helper.ToString(data.Value)\n\tif err != nil {\n\t\treturn ErrInvalid{\n\t\t\tValidationData: data,\n\t\t\tFailure: \"is not a string\",\n\t\t\tMessage: data.Message,\n\t\t}\n\t}\n\n\tparsed, err := url.Parse(v)\n\tif err != nil {\n\t\treturn ErrInvalid{\n\t\t\tValidationData: data,\n\t\t\tFailure: \"is not a valid URL\",\n\t\t\tMessage: data.Message,\n\t\t}\n\t}\n\n\tif parsed.Scheme != \"http\" && parsed.Scheme != \"https\" {\n\t\treturn ErrInvalid{\n\t\t\tValidationData: data,\n\t\t\tFailure: fmt.Sprintf(\"has an invalid scheme '%s'\", parsed.Scheme),\n\t\t\tMessage: data.Message,\n\t\t}\n\t}\n\n\tif parsed.Host == \"\" || strings.IndexRune(parsed.Host, '\\\\') > 0 {\n\t\treturn ErrInvalid{\n\t\t\tValidationData: data,\n\t\t\tFailure: fmt.Sprintf(\"has an invalid host ('%s')\", parsed.Host),\n\t\t\tMessage: data.Message,\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (o *AddOn) HREF() string {\n\tif o != nil && o.bitmap_&4 != 0 {\n\t\treturn o.href\n\t}\n\treturn \"\"\n}",
"func (r *Router) URL(name string, vars ...string) string {\n\tif route, ok := r.Router.NamedRoutes[name]; ok {\n\t\tu, err := r.Router.matcher.Build(route, vars...)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn u\n\t}\n\treturn \"\"\n}",
"func BitlyURLShorten(urlStr string) string {\n\tapiKey := os.Getenv(\"BITLY_TOKEN\")\n\tb := bitly.New(apiKey)\n\tshortURL, err := b.Links.Shorten(urlStr)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to sign request\", err)\n\t} else {\n\t\tlog.Debug(\"The bitly URL is\", shortURL)\n\t}\n\treturn shortURL.URL\n}",
"func MakeUrl(address string) string {\n\n params := url.Values{}\n params.Add(\"near\", address)\n\n uri := fmt.Sprintf(\"%s?%s\", URL, params.Encode())\n\n return uri\n}",
"func (r *Attestor) urlNormalized() *Attestor {\n\tnormalized := dcl.Copy(*r).(Attestor)\n\tnormalized.Name = dcl.SelfLinkToName(r.Name)\n\tnormalized.Description = dcl.SelfLinkToName(r.Description)\n\tnormalized.Project = dcl.SelfLinkToName(r.Project)\n\treturn &normalized\n}",
"func (b *ServiceClusterBuilder) HREF(value string) *ServiceClusterBuilder {\n\tb.href = value\n\tb.bitmap_ |= 4\n\treturn b\n}",
"func (lm LinksManager) CleanLinkParams(url *url.URL) bool {\n\t// we try to clean all URLs, not specific ones\n\treturn true\n}",
"func (b *AddonInstallationBuilder) HREF(value string) *AddonInstallationBuilder {\n\tb.href = value\n\tb.bitmap_ |= 4\n\treturn b\n}",
"func AppendURL(url_text string, append_text []string) (url string, err error) {\n\tappend_len := len(append_text)\n\tif append_len <= 0 {\n\t\treturn url_text, nil\n\t}\n\tlast_rune, width := utf8.DecodeLastRuneInString(url_text)\n\tif last_rune == utf8.RuneError && width == 1 {\n\t\treturn \"\", errors.New(fmt.Sprintf(\"'%s' is not a valid utf8 string\", url_text))\n\t}\n\tif append_len == 1 && last_rune == '/' {\n\t\turl = url_text + append_text[0]\n\t\treturn url, nil\n\n\t} else if append_len > 1 && last_rune == '/' {\n\t\turl = strings.Join(append([]string{url_text + append_text[0]}, append_text[1:]...), \"/\")\n\t\treturn url, nil\n\t}\n\turl = strings.Join(append([]string{url_text}, append_text...), \"/\")\n\treturn url, nil\n}",
"func URL(s string) got.URL {\n\treturn got.URL(s)\n}",
"func (b *FollowUpBuilder) Url(value string) *FollowUpBuilder {\n\tb.url = value\n\tb.bitmap_ |= 16384\n\treturn b\n}",
"func (m *BookingBusiness) SetPublicUrl(value *string)() {\n err := m.GetBackingStore().Set(\"publicUrl\", value)\n if err != nil {\n panic(err)\n }\n}",
"func generateURLIssue(h string) string {\n\tconst (\n\t\ttitle = \"Move your ass\"\n\t\turlFormat = \"https://github.com/sjeandeaux/nexus-cli/issues/new?title=%s&body=%s\"\n\t\tbodyFormat = \"Could you add the hash %q lazy man?\\n%s\"\n\t)\n\tescapedTitle := url.QueryEscape(title)\n\tbody := fmt.Sprintf(bodyFormat, h, information.Print())\n\tescapedBody := url.QueryEscape(body)\n\turlIssue := fmt.Sprintf(urlFormat, escapedTitle, escapedBody)\n\treturn urlIssue\n}",
"func buildURL(url string, x, y, z int) string {\n\turl = strings.Replace(url, \"{x}\", strconv.Itoa(x), 1)\n\turl = strings.Replace(url, \"{y}\", strconv.Itoa(y), 1)\n\turl = strings.Replace(url, \"{z}\", strconv.Itoa(z), 1)\n\treturn url\n}",
"func isURL(v string) bool {\n\tvalGen := pflagValueFuncMap[urlFlag]\n\treturn valGen().Set(v) == nil\n}",
"func URL(s *httptest.Server) string {\n\treturn strings.Replace(s.URL, \"http\", \"ws\", 1)\n}",
"func BuildURL(route string) string {\n\tprefix := os.Getenv(\"AWS_LAMBDA_RUNTIME_API\")\n\tif len(prefix) == 0 {\n\t\treturn fmt.Sprintf(\"http://localhost:9001%s\", route)\n\t}\n\treturn fmt.Sprintf(\"http://%s%s\", prefix, route)\n}",
"func IsURLValid(value string) bool {\n\tcheck := value != \"\" && !strings.Contains(value, \".gif\") && !strings.Contains(value, \"logo\") && !strings.Contains(value, \"mobilebanner\")\n\n\tif check {\n\t\treturn strings.HasPrefix(value, \"http\") || strings.HasPrefix(value, \"https\")\n\t}\n\n\treturn check\n}"
] | [
"0.52412796",
"0.5147074",
"0.50879025",
"0.5021607",
"0.49071804",
"0.48897803",
"0.48651856",
"0.4834832",
"0.48083636",
"0.48071218",
"0.4795265",
"0.47837985",
"0.47502625",
"0.47394225",
"0.4733341",
"0.47310492",
"0.47305945",
"0.47228217",
"0.4715572",
"0.47154",
"0.47066662",
"0.47008896",
"0.46928576",
"0.46910858",
"0.46733606",
"0.4666813",
"0.46651357",
"0.46502602",
"0.46433145",
"0.46423548",
"0.46404752",
"0.46353295",
"0.4630457",
"0.4620223",
"0.46176836",
"0.46131787",
"0.46093136",
"0.4607997",
"0.46017453",
"0.45999104",
"0.45905638",
"0.45857587",
"0.45853943",
"0.45840877",
"0.45714846",
"0.4569403",
"0.4567161",
"0.45542407",
"0.45467266",
"0.454488",
"0.45406902",
"0.45028764",
"0.44873312",
"0.44799915",
"0.44783986",
"0.44740924",
"0.4462845",
"0.44611317",
"0.44609594",
"0.44572997",
"0.44550335",
"0.4453449",
"0.44501913",
"0.4448752",
"0.44452807",
"0.443646",
"0.4436013",
"0.44356793",
"0.44345385",
"0.44342208",
"0.44265288",
"0.44148684",
"0.4410505",
"0.44078612",
"0.44063443",
"0.4393336",
"0.43865782",
"0.43777984",
"0.4377606",
"0.43702507",
"0.43610692",
"0.43514523",
"0.43448067",
"0.43427974",
"0.43374193",
"0.4336337",
"0.43351358",
"0.43321317",
"0.43301448",
"0.43292397",
"0.4327983",
"0.43262455",
"0.43260548",
"0.43253723",
"0.43247643",
"0.43230867",
"0.4323012",
"0.43193543",
"0.43164158",
"0.4309948"
] | 0.802518 | 0 |
AsBool converts string to bool, in case of the value is empty or unknown, defaults to false | func AsBool(v string) bool {
if v == "" {
return false
}
out, _ := apiutils.ParseBool(v)
return out
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (s *Value) asBool() (bool, error) {\n\t// A missing value is considered false\n\tif s == nil {\n\t\treturn false, nil\n\t}\n\tswitch s.Name {\n\tcase \"true\":\n\t\treturn true, nil\n\tcase \"false\":\n\t\treturn false, nil\n\tdefault:\n\t\treturn false, fmt.Errorf(\"invalid boolean: %s\", s.Name)\n\t}\n}",
"func (v *Value) AsBool(dv bool) bool {\n\tif v.IsUndefined() {\n\t\treturn dv\n\t}\n\tswitch tv := v.raw.(type) {\n\tcase string:\n\t\tb, err := strconv.ParseBool(tv)\n\t\tif err != nil {\n\t\t\treturn dv\n\t\t}\n\t\treturn b\n\tcase int:\n\t\treturn tv == 1\n\tcase float64:\n\t\treturn tv == 1.0\n\tcase bool:\n\t\treturn tv\n\tcase time.Time:\n\t\treturn tv.UnixNano() > 0\n\tcase time.Duration:\n\t\treturn tv.Nanoseconds() > 0\n\t}\n\treturn dv\n}",
"func (val stringValue) toBool() boolValue {\n\tif val.null {\n\t\treturn boolValue{false, true}\n\t}\n\treturn boolValue{true, false}\n}",
"func Bool(value interface{}) bool {\r\n\ts := String(value)\r\n\tb, _ := strconv.ParseBool(s)\r\n\treturn b\r\n}",
"func parseBool(asString string) (bool, error) {\n\tswitch asString {\n\tcase \"true\":\n\t\treturn true, nil\n\tcase \"false\":\n\t\treturn false, nil\n\tdefault:\n\t\treturn false, fmt.Errorf(\"could not parse %q as a bool\", asString)\n\t}\n}",
"func (s *Str) Bool() bool {\n\tval, err := strconv.ParseBool(s.val)\n\tif err != nil {\n\t\ts.err = err\n\t}\n\treturn val\n}",
"func ParseBool(str string) (bool, error) {}",
"func StringToBool(s string, def bool) bool {\n\tv, err := strconv.ParseBool(s)\n\tif err != nil {\n\t\tlog.Warnf(\"failed to parse bool value: %s\", s)\n\t\treturn def\n\t}\n\treturn v\n}",
"func (me StringData) toBoolean() bool {\n\tif b, err := strconv.ParseBool(me.val); err != nil {\n\t\treturn b\n\t}\n\treturn false\n}",
"func Bool(val string) error {\n\tif strings.EqualFold(val, \"true\") || strings.EqualFold(val, \"false\") {\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"invalid bool value '%s', can be only 'true' or 'false'\", val)\n}",
"func ToBool(value interface{}) (bool, error) {\n\tvalue = indirect(value)\n\n\tvar s string\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn false, nil\n\tcase bool:\n\t\treturn v, nil\n\tcase []byte:\n\t\tif len(v) == 0 {\n\t\t\treturn false, nil\n\t\t}\n\t\ts = string(v)\n\tcase string:\n\t\ts = v\n\tcase fmt.Stringer:\n\t\ts = v.String()\n\tdefault:\n\t\treturn !IsZero(v), nil\n\t}\n\n\tswitch s {\n\tcase \"t\", \"T\", \"1\", \"on\", \"On\", \"ON\", \"true\", \"True\", \"TRUE\", \"yes\", \"Yes\", \"YES\":\n\t\treturn true, nil\n\tcase \"f\", \"F\", \"0\", \"off\", \"Off\", \"OFF\", \"false\", \"False\", \"FALSE\", \"no\", \"No\", \"NO\", \"\":\n\t\treturn false, nil\n\tdefault:\n\t\treturn false, fmt.Errorf(\"unrecognized bool string: %s\", s)\n\t}\n}",
"func AsBool(v interface{}) bool {\n\tif v == nil {\n\t\treturn false\n\t}\n\tr := reflect.ValueOf(v)\n\tv = AsValueRef(r).Interface()\n\tswitch v.(type) {\n\tcase int:\n\t\treturn v.(int) > 0\n\tcase int8:\n\t\treturn v.(int8) > 0\n\tcase int16:\n\t\treturn v.(int16) > 0\n\tcase int32:\n\t\treturn v.(int32) > 0\n\tcase int64:\n\t\treturn v.(int64) > 0\n\tcase uint:\n\t\treturn v.(uint) > 0\n\tcase uint8:\n\t\treturn v.(uint8) > 0\n\tcase uint16:\n\t\treturn v.(uint16) > 0\n\tcase uint32:\n\t\treturn v.(uint32) > 0\n\tcase uint64:\n\t\treturn v.(uint64) > 0\n\tcase float32:\n\t\treturn v.(float32) > 0\n\tcase float64:\n\t\treturn v.(float64) > 0\n\tcase []uint8:\n\t\tb, err := strconv.ParseBool(string(v.([]uint8)))\n\t\tif err == nil {\n\t\t\treturn b\n\t\t} else {\n\t\t\treturn len(v.([]uint8)) != 0\n\t\t}\n\tcase string:\n\t\tb, err := strconv.ParseBool(v.(string))\n\t\tif err == nil {\n\t\t\treturn b\n\t\t} else {\n\t\t\treturn len(v.(string)) != 0\n\t\t}\n\tcase bool:\n\t\treturn v.(bool)\n\tcase error:\n\t\treturn false\n\tdefault:\n\t\t// check nil and empty value\n\t\tswitch r.Kind() {\n\t\tcase reflect.Array:\n\t\t\treturn r.Len() != 0\n\t\tcase reflect.Map, reflect.Slice:\n\t\t\treturn !(r.IsNil() || r.Len() == 0)\n\t\tcase reflect.Interface, reflect.Ptr, reflect.Chan, reflect.Func:\n\t\t\treturn !r.IsNil()\n\t\t}\n\t\treturn !reflect.DeepEqual(v, reflect.Zero(r.Type()).Interface())\n\t}\n}",
"func ToBoolean(str string) (bool, error) {\n\tres, err := strconv.ParseBool(str)\n\tif err != nil {\n\t\tres = false\n\t}\n\treturn res, err\n}",
"func ToBoolean(str string) (bool, error) {\n\tres, err := strconv.ParseBool(str)\n\tif err != nil {\n\t\tres = false\n\t}\n\treturn res, err\n}",
"func (s VerbatimString) ToBool() (bool, error) { return _verbatimString(s).ToBool() }",
"func ToBool(v interface{}, def bool) bool {\r\n\tif b, ok := v.(bool); ok {\r\n\t\treturn b\r\n\t}\r\n\tif i, ok := v.(int); ok {\r\n\t\treturn i > 0\r\n\t}\r\n\tif i, ok := v.(float64); ok {\r\n\t\treturn i > 0\r\n\t}\r\n\tif i, ok := v.(float32); ok {\r\n\t\treturn i > 0\r\n\t}\r\n\tif ss, ok := v.([]string); ok {\r\n\t\tv = ss[0]\r\n\t}\r\n\tif s, ok := v.(string); ok {\r\n\t\tif s == \"on\" {\r\n\t\t\treturn true\r\n\t\t}\r\n\t\tif s == \"off\" || s == \"\" {\r\n\t\t\treturn false\r\n\t\t}\r\n\t\tif b, err := strconv.ParseBool(s); err == nil {\r\n\t\t\treturn b\r\n\t\t}\r\n\t}\r\n\r\n\treturn def\r\n\r\n}",
"func StrToBool(s string) bool {\n\tif s == \"да\" {\n\t\treturn true\n\t}\n\n\tif v, err := strconv.ParseBool(s); err == nil {\n\t\treturn v\n\t}\n\n\treturn false\n}",
"func StrToBool(s string) (bool, error) {\n\tclean := strings.TrimSpace(s)\n\n\tif regexp.MustCompile(`(?i)^(1|yes|true|y|t)$`).MatchString(clean) {\n\t\treturn true, nil\n\t}\n\n\tif regexp.MustCompile(`(?i)^(0|no|false|n|f)$`).MatchString(clean) {\n\t\treturn false, nil\n\t}\n\n\treturn false, fmt.Errorf(\"cannot convert string value '%s' into a boolean\", clean)\n}",
"func TextToBool(value string) (result bool) {\n\tvalue = strings.ToLower(value)\n\tswitch value {\n\tcase \"yes\":\n\t\tresult = true\n\tcase \"true\":\n\t\tresult = true\n\tcase \"1\":\n\t\tresult = true\n\tdefault:\n\t\tresult = false\n\t}\n\treturn\n}",
"func atob(str string) (value bool, err error) {\n\tv, err := strconv.ParseBool(str)\n\tif err == nil {\n\t\treturn v, nil\n\t}\n\n\tswitch str {\n\tcase \"y\", \"Y\", \"yes\", \"YES\", \"Yes\":\n\t\treturn true, nil\n\tcase \"n\", \"N\", \"no\", \"NO\", \"No\":\n\t\treturn false, nil\n\t}\n\n\t// Check extra characters, if any.\n\tboolExtra, ok := ExtraBoolString[str]\n\tif ok {\n\t\treturn boolExtra, nil\n\t}\n\n\treturn false, err // Return error of 'strconv.Atob'\n}",
"func ParseBool(operand string) (value bool, err error) { return strconv.ParseBool(operand) }",
"func Str2Bool(v interface{}) (t bool) {\n\tvar i = 0\n\n\tswitch v.(type) {\n\tcase string:\n\t\ti, _ = strconv.Atoi(v.(string))\n\n\tcase int:\n\t\ti = v.(int)\n\n\tcase bool:\n\t\tif v.(bool) == true {\n\t\t\ti = 1\n\t\t} else {\n\t\t\ti = 0\n\t\t}\n\t}\n\n\tif i > 0 {\n\t\tt = true\n\t}\n\n\treturn\n}",
"func StringToBool(str String) Bool {\n\tv := &stringToBool{from: str}\n\tstr.AddListener(v)\n\treturn v\n}",
"func Bool(i interface{}) bool {\n\tif i == nil {\n\t\treturn false\n\t}\n\tif v, ok := i.(bool); ok {\n\t\treturn v\n\t}\n\tif s := String(i); s != \"\" && s != \"0\" && s != \"false\" && s != \"off\" {\n\t\treturn true\n\t}\n\treturn false\n}",
"func boolValue(s string) bool {\n\tswitch s {\n\tcase \"yes\", \"true\":\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func typeConvertBool(i interface{}) bool {\n\tif i == nil {\n\t\treturn false\n\t}\n\tif v, ok := i.(bool); ok {\n\t\treturn v\n\t}\n\tif s := typeConvertString(i); s != \"\" && s != \"0\" && s != \"false\" && s != \"off\" {\n\t\treturn true\n\t}\n\treturn false\n}",
"func BoolConverter(str string, target reflect.Value) (ok bool) {\n\tb, err := strconv.ParseBool(str)\n\tif err != nil {\n\t\treturn\n\t}\n\n\ttarget.SetBool(b)\n\treturn true\n}",
"func ToBool(value interface{}) bool {\n\tswitch value := value.(type) {\n\tcase bool:\n\t\treturn value\n\tcase *bool:\n\t\treturn *value\n\tcase string:\n\t\tswitch value {\n\t\tcase \"\", \"false\":\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\tcase *string:\n\t\treturn ToBool(*value)\n\tcase float64:\n\t\tif value != 0 {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\tcase *float64:\n\t\treturn ToBool(*value)\n\tcase float32:\n\t\tif value != 0 {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\tcase *float32:\n\t\treturn ToBool(*value)\n\tcase int:\n\t\tif value != 0 {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\tcase *int:\n\t\treturn ToBool(*value)\n\t}\n\treturn false\n}",
"func parseBoolFromString(content string, aggErr *AggregateError) bool {\n result, err := strconv.ParseBool(content)\n if err != nil {\n aggErr.Append(err)\n }\n return result\n}",
"func Bool(v interface{}) *bool {\n\tswitch v.(type) {\n\tcase bool:\n\t\tval := v.(bool)\n\t\treturn &val\n\tcase int, uint, int32, int16, int8, int64, uint32, uint16, uint8, uint64, float32, float64:\n\t\tval, err := strconv.Atoi(fmt.Sprintf(\"%v\", v))\n\t\tif err != nil {\n\t\t\texception.Err(err, 500).Ctx(M{\"v\": v}).Throw()\n\t\t}\n\t\tres := false\n\t\tif val != 0 {\n\t\t\tres = true\n\t\t}\n\t\treturn &res\n\tdefault:\n\t\tval := fmt.Sprintf(\"%v\", v)\n\t\tres := false\n\t\tif val != \"\" {\n\t\t\tres = true\n\t\t}\n\t\treturn &res\n\t}\n}",
"func OkToBool(ok string) bool {\n\tif ok == \"ok\" {\n\t\treturn true\n\t}\n\treturn false\n}",
"func ParseBool(val interface{}) (value bool, err error) {\n\tif val != nil {\n\t\tswitch v := val.(type) {\n\t\tcase bool:\n\t\t\treturn v, nil\n\t\tcase string:\n\t\t\tswitch v {\n\t\t\tcase \"1\", \"t\", \"T\", \"true\", \"TRUE\", \"True\", \"YES\", \"yes\", \"Yes\", \"Y\", \"y\", \"ON\", \"on\", \"On\":\n\t\t\t\treturn true, nil\n\t\t\tcase \"0\", \"f\", \"F\", \"false\", \"FALSE\", \"False\", \"NO\", \"no\", \"No\", \"N\", \"n\", \"OFF\", \"off\", \"Off\":\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\tcase int8, int32, int64:\n\t\t\tstrV := fmt.Sprintf(\"%s\", v)\n\t\t\tif strV == \"1\" {\n\t\t\t\treturn true, nil\n\t\t\t} else if strV == \"0\" {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\tcase float64:\n\t\t\tif v == 1 {\n\t\t\t\treturn true, nil\n\t\t\t} else if v == 0 {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\treturn false, fmt.Errorf(\"parsing %q: invalid syntax\", val)\n\t}\n\treturn false, fmt.Errorf(\"parsing <nil>: invalid syntax\")\n}",
"func ToBool(value interface{}) (bool, bool) {\n\tvar data bool\n\tswitch converted := value.(type) {\n\tcase string:\n\t\tvar err error\n\t\tdata, err = strconv.ParseBool(strings.ToLower(converted))\n\t\tif err != nil {\n\t\t\treturn false, false\n\t\t}\n\tcase float64, float32, int:\n\t\tdata = converted != 0\n\tcase bool:\n\t\tdata = converted\n\tdefault:\n\t\treturn false, false\n\t}\n\treturn data, true\n}",
"func getBoolVal(input string) bool {\n\tinput = strings.ToLower(input)\n\tif input == \"yes\" || input == \"true\" {\n\t\treturn true\n\t}\n\treturn false\n}",
"func toBool(val interface{}) bool {\n\tif val == nil || val == false {\n\t\treturn false\n\t}\n\treturn true\n}",
"func flagToBool(f string) bool {\n\tif f == \"true\" {\n\t\treturn true\n\t} else {\n\t\treturn false\n\t}\n}",
"func Bool(name string, defaultValue bool) bool {\n\tif strVal, ok := os.LookupEnv(name); ok {\n\t\tif res, err := strconv.ParseBool(strVal); err == nil {\n\t\t\treturn res\n\t\t}\n\t}\n\n\treturn defaultValue\n}",
"func Bool(v interface{}, defaults ...bool) (b bool) {\n\tswitch tv := v.(type) {\n\tcase nil:\n\t\tif 1 < len(defaults) {\n\t\t\tb = defaults[1]\n\t\t}\n\tcase bool:\n\t\tb = tv\n\tcase string:\n\t\tvar err error\n\t\tif 1 < len(defaults) {\n\t\t\tb = defaults[1]\n\t\t} else if b, err = strconv.ParseBool(tv); err != nil {\n\t\t\tif 0 < len(defaults) {\n\t\t\t\tb = defaults[0]\n\t\t\t}\n\t\t}\n\tcase gen.Bool:\n\t\tb = bool(tv)\n\tcase gen.String:\n\t\tvar err error\n\t\tif 1 < len(defaults) {\n\t\t\tb = defaults[1]\n\t\t} else if b, err = strconv.ParseBool(string(tv)); err != nil {\n\t\t\tif 0 < len(defaults) {\n\t\t\t\tb = defaults[0]\n\t\t\t}\n\t\t}\n\tdefault:\n\t\tif 0 < len(defaults) {\n\t\t\tb = defaults[0]\n\t\t}\n\t}\n\treturn\n}",
"func (v AnnotationValue) AsBool() bool {\n\treturn v.Value.(bool)\n}",
"func ToNullableBoolean(value interface{}) *bool {\n\tif value == nil {\n\t\treturn nil\n\t}\n\n\tvar v string\n\n\tswitch value.(type) {\n\tcase bool:\n\t\tr := value.(bool)\n\t\treturn &r\n\n\tcase string:\n\t\tv = strings.ToLower(value.(string))\n\n\tcase time.Duration:\n\t\td := value.(time.Duration)\n\t\tr := d.Nanoseconds() > 0\n\t\treturn &r\n\n\tdefault:\n\t\tv = strings.ToLower(fmt.Sprint(value))\n\t}\n\n\tif v == \"1\" || v == \"true\" || v == \"t\" || v == \"yes\" || v == \"y\" {\n\t\tr := true\n\t\treturn &r\n\t}\n\n\tif v == \"0\" || v == \"false\" || v == \"f\" || v == \"no\" || v == \"n\" {\n\t\tr := false\n\t\treturn &r\n\t}\n\n\treturn nil\n}",
"func (v Value) AsBool() bool {\n\treturn v.iface.(bool)\n}",
"func (c *JSONElement) AsBool() bool {\n\tvalue := false\n\n\tif v, err := c.Bool(); err == nil {\n\t\tvalue = v\n\t} else if v, err := c.Json.String(); err == nil {\n\t\tif v == \"true\" || v == \"1\" {\n\t\t\tvalue = true\n\t\t}\n\t} else if v, err := c.Json.Int(); err == nil {\n\t\tif v == 1 {\n\t\t\tvalue = true\n\t\t}\n\t}\n\n\treturn value\n}",
"func Bool(key string, def bool) bool {\n\tif s := String(key, \"\"); s != \"\" {\n\t\tif d, err := strconv.ParseBool(s); err == nil {\n\t\t\treturn d\n\t\t} else {\n\t\t\tLog(key, err)\n\t\t}\n\t}\n\treturn def\n}",
"func (s *String) Bool() bool {\n\tif len(s.s) == 0 {\n\t\tpanic(\"Empty string not allowed (should be == None)\")\n\t}\n\treturn true\n}",
"func parseBool(str string) (value bool, err error) {\n\tswitch str {\n\tcase \"1\", \"t\", \"T\", \"true\", \"TRUE\", \"True\", \"YES\", \"yes\", \"Yes\", \"ON\", \"on\", \"On\":\n\t\treturn true, nil\n\tcase \"0\", \"f\", \"F\", \"false\", \"FALSE\", \"False\", \"NO\", \"no\", \"No\", \"OFF\", \"off\", \"Off\":\n\t\treturn false, nil\n\t}\n\treturn false, fmt.Errorf(\"parsing \\\"%s\\\": invalid syntax\", str)\n}",
"func isTrue(s string) bool {\n\tv, _ := strconv.ParseBool(s)\n\treturn v\n}",
"func (f *flag) Bool() bool {\n\tvalue, err := strconv.ParseBool(f.String())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn value\n}",
"func (p Parser) Bool(ctx context.Context) (*bool, error) {\n\tvalue, err := p.Source.String(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif value == nil {\n\t\treturn nil, nil\n\t}\n\n\tparsed, err := stringutil.ParseBool(*value)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &parsed, nil\n}",
"func ConvertToBool(value string) bool {\n\tboolValue, err := strconv.ParseBool(value)\n\tif err != nil {\n\t\tutilsDiagnostics.ConvertToBoolErr(err, value)\n\t\treturn false\n\t}\n\treturn boolValue\n}",
"func IsBool(val any) bool {\n\tif _, ok := val.(bool); ok {\n\t\treturn true\n\t}\n\n\tif typVal, ok := val.(string); ok {\n\t\t_, err := strutil.ToBool(typVal)\n\t\treturn err == nil\n\t}\n\treturn false\n}",
"func StringBool(b bool) string {\n\tconst true = \"true\"\n\tconst false = \"false\"\n\tif b {\n\t\treturn true\n\t}\n\treturn false\n}",
"func Bool(name string) bool {\n\treturn strings.EqualFold(String(name), \"true\")\n}",
"func ExampleBool() {\n\n\t// Bool conversion from other bool values will be returned without\n\t// modification.\n\tfmt.Println(conv.Bool(true))\n\tfmt.Println(conv.Bool(false))\n\n\t// Bool conversion from strings consider the following values true:\n\t// \"t\", \"T\", \"true\", \"True\", \"TRUE\",\n\t// \t \"y\", \"Y\", \"yes\", \"Yes\", \"YES\", \"1\"\n\t//\n\t// It considers the following values false:\n\t// \"f\", \"F\", \"false\", \"False\", \"FALSE\",\n\t// \"n\", \"N\", \"no\", \"No\", \"NO\", \"0\"\n\tfmt.Println(conv.Bool(\"T\"))\n\tfmt.Println(conv.Bool(\"False\"))\n\n\t// Bool conversion from other supported types will return true unless it is\n\t// the zero value for the given type.\n\tfmt.Println(conv.Bool(int64(123)))\n\tfmt.Println(conv.Bool(int64(0)))\n\tfmt.Println(conv.Bool(time.Duration(123)))\n\tfmt.Println(conv.Bool(time.Duration(0)))\n\tfmt.Println(conv.Bool(time.Now()))\n\tfmt.Println(conv.Bool(time.Time{}))\n\n\t// All other types will return false.\n\tfmt.Println(conv.Bool(struct{ string }{\"\"}))\n\n\t// Output:\n\t// true <nil>\n\t// false <nil>\n\t// true <nil>\n\t// false <nil>\n\t// true <nil>\n\t// false <nil>\n\t// true <nil>\n\t// false <nil>\n\t// true <nil>\n\t// false <nil>\n\t// false cannot convert struct { string }{string:\"\"} (type struct { string }) to bool\n}",
"func BoolToBool(bool_ bool) bool {\n\treturn bool_\n}",
"func ParseToBool(data string) (bool, error) {\n\treturn strconv.ParseBool(data)\n}",
"func CastToBool(i interface{}) (bool, error) {\n\tstr := CastOrEmpty(i)\n\treturn strconv.ParseBool(str)\n}",
"func (ref *UIElement) ValueAsBool() (bool, error) {\n\treturn ref.BoolAttr(ValueAttribute)\n}",
"func (nvp *NameValues) Bool(name string) (bool, bool) {\n\tvalue, _ := nvp.String(name)\n\treturn (value == \"true\" || value == \"yes\" || value == \"1\" || value == \"-1\" || value == \"on\"), true\n}",
"func ToBool(v interface{}) bool {\n\tswitch v.(type) {\n\tcase bool:\n\t\treturn v.(bool)\n\tcase string:\n\t\tif strings.ToUpper(v.(string)) == \"FALSE\" {\n\t\t\treturn false\n\t\t}\n\t\tif strings.ToUpper(v.(string)) == \"TRUE\" {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\tcase int:\n\t\treturn v.(int) > 0\n\tcase int64:\n\t\treturn v.(int64) > 0\n\tcase float64:\n\t\treturn v.(float64) > 0\n\tdefault:\n\t\treturn false\n\t}\n}",
"func (f *Form) Bool(param string, defaultValue bool) bool {\n\tvals, ok := f.values[param]\n\tif !ok {\n\t\treturn defaultValue\n\t}\n\tparamVal, err := strconv.ParseBool(vals[0])\n\tif err != nil {\n\t\tf.err = err\n\t\treturn defaultValue\n\t}\n\treturn paramVal\n}",
"func GetBool(v interface{}) bool {\n\tswitch result := v.(type) {\n\tcase bool:\n\t\treturn result\n\tdefault:\n\t\tif d := GetString(v); d != \"\" {\n\t\t\tvalue, _ := strconv.ParseBool(d)\n\t\t\treturn value\n\t\t}\n\t}\n\treturn false\n}",
"func normalizeBool(value string) string {\n\tif contains(strings.ToLower(value), truthy) {\n\t\treturn \"true\"\n\t}\n\treturn \"false\"\n}",
"func ParseBool(s string) (bool, error) {\n\tswitch s {\n\tdefault:\n\t\tb, err := strconv.ParseBool(s)\n\t\tif err != nil {\n\t\t\treturn b, errz.Err(err)\n\t\t}\n\t\treturn b, nil\n\tcase \"1\", \"yes\", \"Yes\", \"YES\", \"y\", \"Y\":\n\t\treturn true, nil\n\tcase \"0\", \"no\", \"No\", \"NO\", \"n\", \"N\":\n\t\treturn false, nil\n\t}\n}",
"func Bool(b bool) string {\n\treturn strconv.FormatBool(b)\n}",
"func ParseBool(str string) bool {\n\tb, _ := strconv.ParseBool(str)\n\treturn b\n}",
"func getEnvAsBool(name string, defaultVal bool) bool {\n\tvalStr := getEnv(name, \"\")\n\tif val, err := strconv.ParseBool(valStr); err == nil {\n\t\treturn val\n\t}\n\n\treturn defaultVal\n}",
"func getEnvAsBool(name string, defaultVal bool) bool {\n\tvalStr := getEnv(name, \"\")\n\tif val, err := strconv.ParseBool(valStr); err == nil {\n\t\treturn val\n\t}\n\n\treturn defaultVal\n}",
"func TestToBool(t *testing.T) {\n\t// conversion 0 to false\n\tresult := evaluator.ToBool(0)\n\tassert.False(t, result)\n\n\t// conversion 1 to true\n\tresult = evaluator.ToBool(1)\n\tassert.True(t, result)\n}",
"func (data *Data) Bool(s ...string) bool {\n\treturn data.Interface(s...).(bool)\n}",
"func ToBoolean(value interface{}) bool {\n\treturn ToBooleanWithDefault(value, false)\n}",
"func parseBool(content []byte, aggErr *AggregateError) bool {\n result, err := strconv.ParseBool(string(content))\n if err != nil {\n aggErr.Append(err)\n }\n return result\n}",
"func (setGroup *SettingGroup) GetValueAsBool(id string) (bool, error) {\n\t//Get the value\n\tvalue, err := setGroup.GetValueAsString(id)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t//Now convert to bool\n\tvalueBool, err := strconv.ParseBool(value)\n\treturn valueBool, nil\n\n}",
"func AssertBool(s string) bool {\n\tb, err := strconv.ParseBool(s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn b\n}",
"func AsBool() Option {\n\treturn func(c *conf.Config) {\n\t\tc.Expect = reflect.Bool\n\t}\n}",
"func BoolStrict(name string, defaultValue bool) (bool, error) {\n\tif strVal, ok := os.LookupEnv(name); ok {\n\t\tres, err := strconv.ParseBool(strVal)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\treturn res, nil\n\t}\n\n\treturn defaultValue, nil\n}",
"func ConvBool(p payload.Safe) (bool, error) {\n\ts, ok := p.(bwrap)\n\tif !ok {\n\t\treturn false, errors.New(\"payload is not a string\")\n\t}\n\n\treturn bool(s), nil\n}",
"func ParseBool(str string) (bool, error) {\n\tif str == \"on\" {\n\t\treturn true, nil\n\t}\n\tif str == \"off\" {\n\t\treturn false, nil\n\t}\n\treturn strconv.ParseBool(str)\n}",
"func TestBool(tst *testing.T) {\n\n\t// Test bool\n\tb, err := StringToBool(\"true\")\n\tbrtesting.AssertEqual(tst, err, nil, \"StringToBool failed\")\n\tbrtesting.AssertEqual(tst, b, true, \"StringToBool failed\")\n\tb, err = StringToBool(\"True\")\n\tbrtesting.AssertEqual(tst, err, nil, \"StringToBool failed\")\n\tbrtesting.AssertEqual(tst, b, true, \"StringToBool failed\")\n\tb, err = StringToBool(\"TruE\")\n\tbrtesting.AssertEqual(tst, err, nil, \"StringToBool failed\")\n\tbrtesting.AssertEqual(tst, b, true, \"StringToBool failed\")\n\tb, err = StringToBool(\"false\")\n\tbrtesting.AssertEqual(tst, err, nil, \"StringToBool failed\")\n\tbrtesting.AssertEqual(tst, b, false, \"StringToBool failed\")\n\tb, err = StringToBool(\"go-bedrock\")\n\tbrtesting.AssertNotEqual(tst, err, nil, \"StringToBool failed\")\n}",
"func (fa formulaArg) ToBool() formulaArg {\n\tvar b bool\n\tvar err error\n\tswitch fa.Type {\n\tcase ArgString:\n\t\tb, err = strconv.ParseBool(fa.String)\n\t\tif err != nil {\n\t\t\treturn newErrorFormulaArg(formulaErrorVALUE, err.Error())\n\t\t}\n\tcase ArgNumber:\n\t\tif fa.Boolean && fa.Number == 1 {\n\t\t\tb = true\n\t\t}\n\t}\n\treturn newBoolFormulaArg(b)\n}",
"func Bool(name string) (bool, error) {\n\tv, err := getenv(name)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn strconv.ParseBool(v)\n}",
"func formatBool(v bool) string {\n\tif v {\n\t\treturn \"true\"\n\t}\n\treturn \"false\"\n}",
"func BoolStr(b bool, s *string) {\n\tif b {\n\t\t*s = \"true\"\n\t} else {\n\t\t*s = \"false\"\n\t}\n}",
"func (a Attributes) GetAsBoolWithDefault(key string, defaultValue bool) bool {\n\tswitch v := a[key].(type) {\n\tcase bool:\n\t\treturn v\n\tcase string:\n\t\tif result, err := strconv.ParseBool(v); err == nil {\n\t\t\treturn result\n\t\t}\n\t}\n\treturn defaultValue\n}",
"func stringifyBool(b bool) string {\n\tif b {\n\t\treturn \"True\"\n\t}\n\treturn \"False\"\n}",
"func (formatter) fBool(v *types.RecordValue) *types.RecordValue {\n\tif v.Value != strBoolTrue {\n\t\tv.Value = \"\"\n\t}\n\n\treturn v\n}",
"func ParseBoolean(s string) Boolean {\n\tif s == \"\" {\n\t\treturn NullBoolean()\n\t}\n\n\treturn NewBoolean(s == \"true\")\n}",
"func (r *Response) Bool() (bool, error) {\n\treturn strconv.ParseBool(r.String())\n}",
"func ParseBool(str string) (val bool, isBool bool) {\n\t// Note: Not using strconv.ParseBool because I want it a bit looser (any casing) and to allow yes/no/off/on values.\n\tlstr := strings.ToLower(strings.TrimSpace(str))\n\tswitch lstr {\n\tcase \"false\", \"f\", \"0\", \"no\", \"n\", \"off\":\n\t\tisBool = true\n\tcase \"true\", \"t\", \"1\", \"yes\", \"y\", \"on\":\n\t\tval = true\n\t\tisBool = true\n\t}\n\treturn\n}",
"func CastBool(val interface{}) (bool, bool) {\n\tswitch val.(type) {\n\tcase bool:\n\t\treturn val.(bool), true\n\tcase int:\n\t\treturn val.(int) != 0, true\n\tcase int8:\n\t\treturn val.(int8) != 0, true\n\tcase int16:\n\t\treturn val.(int16) != 0, true\n\tcase int32:\n\t\treturn val.(int32) != 0, true\n\tcase int64:\n\t\treturn val.(int64) != 0, true\n\tcase uint:\n\t\treturn val.(uint) != 0, true\n\tcase uint8:\n\t\treturn val.(uint8) != 0, true\n\tcase uint16:\n\t\treturn val.(uint16) != 0, true\n\tcase uint32:\n\t\treturn val.(uint32) != 0, true\n\tcase uint64:\n\t\treturn val.(uint64) != 0, true\n\tcase float32:\n\t\treturn val.(float32) != 0, true\n\tcase float64:\n\t\treturn val.(float64) != 0, true\n\tcase string:\n\t\tif bval, err := strconv.ParseBool(val.(string)); err != nil {\n\t\t\tif fval, ok := CastFloat(val.(string)); ok {\n\t\t\t\treturn fval != 0, true\n\t\t\t}\n\t\t\treturn false, false\n\t\t} else {\n\t\t\treturn bval, true\n\t\t}\n\t}\n\treturn false, false\n}",
"func IsBool(val interface{}) bool {\n\tif _, ok := val.(bool); ok {\n\t\treturn true\n\t}\n\n\tif typVal, ok := val.(string); ok {\n\t\t_, err := ToBool(typVal)\n\t\treturn err == nil\n\t}\n\treturn false\n}",
"func (v Value) Bool(defaults ...bool) bool {\n\t// Return the first default if the raw is undefined\n\tif v.raw == nil {\n\t\t// Make sure there's at least one thing in the list\n\t\tdefaults = append(defaults, false)\n\t\treturn defaults[0]\n\t}\n\n\tswitch t := v.raw.(type) {\n\tcase string:\n\t\tb, err := strconv.ParseBool(t)\n\t\tif err != nil {\n\t\t\tslog.Panicf(\"failed to parse bool: %v\", err)\n\t\t}\n\t\treturn b\n\n\tcase bool:\n\t\treturn t\n\n\tdefault:\n\t\tslog.Panicf(\"%v is of unsupported type %v\", t, reflect.TypeOf(t).String())\n\t}\n\n\treturn false\n}",
"func ConvertToBool(value interface{}) (bool, bool) {\n\tif v, ok := value.(bool); ok {\n\t\treturn v, ok\n\t}\n\n\t// try converting \"true\" \"false\"\n\tif v, ok := value.(string); ok {\n\t\tif strings.ToLower(v) == \"true\" {\n\t\t\treturn true, true\n\t\t} else if strings.ToLower(v) == \"false\" {\n\t\t\treturn false, true\n\t\t}\n\t}\n\n\t// try convert as number\n\tv, ok := ConvertToInt8(value)\n\tif ok {\n\t\tif v == 1 {\n\t\t\treturn true, true\n\t\t} else if v == 0 {\n\t\t\treturn false, true\n\t\t}\n\t}\n\n\treturn false, false\n}",
"func (me TdtypeType) IsBoolean() bool { return me.String() == \"boolean\" }",
"func StringToBoolWithFormat(str String, format string) Bool {\n\tif format == \"%t\" { // Same as not using custom format.\n\t\treturn StringToBool(str)\n\t}\n\n\tv := &stringToBool{from: str, format: format}\n\tstr.AddListener(v)\n\treturn v\n}",
"func (d Driver) BoolString(v bool) string {\n\treturn strconv.FormatBool(v)\n}",
"func FakeBool(v interface{}) bool {\n\tswitch r := v.(type) {\n\tcase float64:\n\t\treturn r != 0\n\tcase string:\n\t\treturn r != \"\"\n\tcase bool:\n\t\treturn r\n\tcase nil:\n\t\treturn false\n\tdefault:\n\t\treturn true\n\t}\n}",
"func (f *FlagSet) Bool(name string) bool {\n\tvalue := f.String(name)\n\tif value != \"\" {\n\t\tval, err := strconv.ParseBool(value)\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\treturn val\n\t}\n\treturn false\n}",
"func (res Response) AsBool() (bool, error) {\n\treturn res.Bits.AsBool(), res.Error\n}",
"func YesNo2Bool(val string) bool {\n\tif val != \"\" {\n\t\tif strings.ToLower(val) == \"yes\" {\n\t\t\treturn true\n\t\t} else {\n\t\t\treturn false\n\t\t}\n\t} else {\n\t\treturn false\n\t}\n}",
"func ToBool(i interface{}) (bool, error) {\n\ti = indirect(i)\n\n\tswitch b := i.(type) {\n\tcase bool:\n\t\treturn b, nil\n\tcase nil:\n\t\treturn false, nil\n\tcase int:\n\t\tif i.(int) != 0 {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\tcase string:\n\t\treturn strconv.ParseBool(i.(string))\n\tdefault:\n\t\treturn false, fmt.Errorf(\"unable to cast %#v to bool\", i)\n\t}\n}"
] | [
"0.76321614",
"0.7480224",
"0.7370424",
"0.7344179",
"0.729552",
"0.7221894",
"0.7180088",
"0.7171699",
"0.71345377",
"0.7087665",
"0.70560706",
"0.70353085",
"0.7016963",
"0.7016963",
"0.69989306",
"0.6966437",
"0.6965041",
"0.6953353",
"0.69252783",
"0.6895328",
"0.68600845",
"0.68598616",
"0.68428993",
"0.68333215",
"0.6791774",
"0.67740434",
"0.67525893",
"0.6751817",
"0.67463815",
"0.6715709",
"0.67108727",
"0.6699126",
"0.66969556",
"0.6679258",
"0.6666134",
"0.6650232",
"0.66473275",
"0.6643164",
"0.66400635",
"0.6638218",
"0.66367465",
"0.663148",
"0.6614024",
"0.6608295",
"0.6589852",
"0.65792376",
"0.65463316",
"0.6543714",
"0.653524",
"0.65288687",
"0.65236133",
"0.6522243",
"0.65146446",
"0.65097064",
"0.6509268",
"0.64971936",
"0.6490878",
"0.6472862",
"0.64467645",
"0.6445549",
"0.64440507",
"0.64408934",
"0.64179784",
"0.64118516",
"0.64024806",
"0.63837755",
"0.63837755",
"0.6379381",
"0.63720506",
"0.6360922",
"0.6358412",
"0.6349097",
"0.63476634",
"0.63462204",
"0.6343388",
"0.6341356",
"0.6327867",
"0.6320456",
"0.6311369",
"0.6308425",
"0.6304961",
"0.63037133",
"0.63029945",
"0.6300882",
"0.62964827",
"0.6294834",
"0.6294422",
"0.62896305",
"0.6289209",
"0.6287016",
"0.6275975",
"0.6272531",
"0.6253642",
"0.62410384",
"0.6236598",
"0.6233495",
"0.6226951",
"0.6220002",
"0.621444",
"0.6207518"
] | 0.8361102 | 0 |
ParseAdvertiseAddr validates advertise address, makes sure it's not an unreachable or multicast address returns address split into host and port, port could be empty if not specified | func ParseAdvertiseAddr(advertiseIP string) (string, string, error) {
advertiseIP = strings.TrimSpace(advertiseIP)
host := advertiseIP
port := ""
if len(net.ParseIP(host)) == 0 && strings.Contains(advertiseIP, ":") {
var err error
host, port, err = net.SplitHostPort(advertiseIP)
if err != nil {
return "", "", trace.BadParameter("failed to parse address %q", advertiseIP)
}
if _, err := strconv.Atoi(port); err != nil {
return "", "", trace.BadParameter("bad port %q, expected integer", port)
}
if host == "" {
return "", "", trace.BadParameter("missing host parameter")
}
}
ip := net.ParseIP(host)
if len(ip) != 0 {
if ip.IsUnspecified() || ip.IsMulticast() {
return "", "", trace.BadParameter("unreachable advertise IP: %v", advertiseIP)
}
}
return host, port, nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func parseAdvertiseAddr(advAddr string, port int) (string, int) {\n\treturn advAddr, port\n\n\t// bug: if use domain, always return empty host\n\t/*m, e := regexp.Match(ipv4Pattern, []byte(advAddr))\n\t// if parse error, use serve port and parsed ip address\n\tif e != nil {\n\t\treturn \"\", port\n\t}\n\tif m {\n\t\treturn advAddr, port\n\t}\n\n\tm, e1 := regexp.Match(ipv4WithPortPattern, []byte(advAddr))\n\t// if parse error, use serve port and parsed ip address\n\tif e1 != nil {\n\t\treturn \"\", port\n\t}\n\tif m {\n\t\t// 1 5\n\t\tregxp := regexp.MustCompile(ipv4WithPortPattern)\n\t\tadAddr := regxp.ReplaceAllString(advAddr, \"${1}\")\n\t\tadPort, _ := strconv.Atoi(regxp.ReplaceAllString(advAddr, \"${5}\"))\n\t\treturn adAddr, adPort\n\t}\n\treturn \"\", port*/\n}",
"func parseAdvertiseAddr(advAddr string, port int) (string, int) {\n\tm, e := regexp.Match(ipv4Pattern, []byte(advAddr))\n\t// if parse error, use serve port and parsed ip address\n\tif e != nil {\n\t\treturn \"\", port\n\t}\n\tif m {\n\t\treturn advAddr, port\n\t}\n\n\tm, e1 := regexp.Match(ipv4WithPortPattern, []byte(advAddr))\n\t// if parse error, use serve port and parsed ip address\n\tif e1 != nil {\n\t\treturn \"\", port\n\t}\n\tif m {\n\t\t// 1 5\n\t\tregxp := regexp.MustCompile(ipv4WithPortPattern)\n\t\tadAddr := regxp.ReplaceAllString(advAddr, \"${1}\")\n\t\tadPort, _ := strconv.Atoi(regxp.ReplaceAllString(advAddr, \"${5}\"))\n\t\treturn adAddr, adPort\n\t}\n\treturn \"\", port\n}",
"func calculateAdvertiseAddress(bindAddr, advertiseAddr string) (net.IP, error) {\n\tif advertiseAddr != \"\" {\n\t\tip := net.ParseIP(advertiseAddr)\n\t\tif ip == nil {\n\t\t\treturn nil, errors.Errorf(\"failed to parse advertise addr '%s'\", advertiseAddr)\n\t\t}\n\t\tif ip4 := ip.To4(); ip4 != nil {\n\t\t\tip = ip4\n\t\t}\n\t\treturn ip, nil\n\t}\n\n\tif isAny(bindAddr) {\n\t\tprivateIP, err := getPrivateAddress()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to get private IP\")\n\t\t}\n\t\tif privateIP == \"\" {\n\t\t\treturn nil, errors.New(\"no private IP found, explicit advertise addr not provided\")\n\t\t}\n\t\tip := net.ParseIP(privateIP)\n\t\tif ip == nil {\n\t\t\treturn nil, errors.Errorf(\"failed to parse private IP '%s'\", privateIP)\n\t\t}\n\t\treturn ip, nil\n\t}\n\n\tip := net.ParseIP(bindAddr)\n\tif ip == nil {\n\t\treturn nil, errors.Errorf(\"failed to parse bind addr '%s'\", bindAddr)\n\t}\n\treturn ip, nil\n}",
"func parseHost(addr string) string {\n\tvar (\n\t\thost, port string\n\t\tdefaultAssigned bool\n\t)\n\n\tv := strings.Split(addr, \":\")\n\n\tswitch len(v) {\n\tcase 2:\n\t\thost = v[0]\n\t\tport = v[1]\n\n\t\tif host == \"\" {\n\t\t\thost = _DEFAULT_HOST\n\t\t\tdefaultAssigned = true\n\t\t}\n\n\t\tif port == \"\" {\n\t\t\tport = _DEFAULT_PORT\n\t\t\tdefaultAssigned = true\n\t\t}\n\n\t\tif defaultAssigned == false {\n\t\t\treturn addr // addr is already in required format\n\t\t}\n\t\tbreak\n\n\tcase 1:\n\t\thost = v[0]\n\t\tif host == \"\" {\n\t\t\thost = _DEFAULT_HOST\n\t\t}\n\t\tport = _DEFAULT_PORT\n\tcase 0:\n\t\tfallthrough\n\tdefault:\n\t\thost = _DEFAULT_HOST\n\t\tport = _DEFAULT_PORT\n\t\tbreak\n\t}\n\treturn strings.Join([]string{host, port}, \":\")\n}",
"func ParseAddress(address string) (*Address, errors.TracerError) {\n\taddr := &Address{}\n\tif ValidateIPv6Address(address) {\n\t\tclean, testPort := cleanIPv6(address)\n\t\thasPort := false\n\t\tport := 0\n\t\tif testPort > 0 {\n\t\t\thasPort = true\n\t\t\tport = testPort\n\t\t}\n\t\treturn &Address{Host: clean, Port: port, IsIPv6: true, HasPort: hasPort}, nil\n\t}\n\tcolons := strings.Count(address, \":\")\n\tif colons > 1 {\n\t\treturn nil, errors.New(\"Invalid address: too many colons '%s'\", address)\n\t} else if colons == 0 {\n\t\treturn &Address{Host: address, HasPort: false}, nil\n\t}\n\tsplit := strings.Split(address, \":\")\n\taddr.Host = split[0]\n\tport, err := strconv.Atoi(split[1])\n\tif err != nil {\n\t\treturn nil, errors.New(\"address '%s' is invalid: could not parse port data, %s\", address, err)\n\t}\n\tif port <= 0 || port > math.MaxUint16 {\n\t\treturn nil, errors.New(\"port '%d' is not a valid port number, must be uint16\", port)\n\t}\n\taddr.Port = port\n\taddr.HasPort = true\n\treturn addr, nil\n}",
"func CalculateAdvertiseIP(bindHost, advertiseHost string, resolver Resolver, logger log.Logger) (net.IP, error) {\n\t// Prefer advertise host, if it's given.\n\tif advertiseHost != \"\" {\n\t\t// Best case: parse a plain IP.\n\t\tif ip := net.ParseIP(advertiseHost); ip != nil {\n\t\t\tif ip4 := ip.To4(); ip4 != nil {\n\t\t\t\tip = ip4\n\t\t\t}\n\t\t\treturn ip, nil\n\t\t}\n\n\t\t// Otherwise, try to resolve it as if it's a hostname.\n\t\tips, err := resolver.LookupIPAddr(context.Background(), advertiseHost)\n\t\tif err == nil && len(ips) == 1 {\n\t\t\tif ip4 := ips[0].IP.To4(); ip4 != nil {\n\t\t\t\tips[0].IP = ip4\n\t\t\t}\n\t\t\treturn ips[0].IP, nil\n\t\t}\n\n\t\t// Didn't work, fall back to the bind host.\n\t\tif err == nil && len(ips) != 1 {\n\t\t\terr = fmt.Errorf(\"advertise host '%s' resolved to %d IPs\", advertiseHost, len(ips))\n\t\t}\n\t\tlevel.Warn(logger).Log(\"err\", err, \"msg\", \"falling back to bind host\")\n\t}\n\n\t// If bind host is all-zeroes, try to get a private IP.\n\tif bindHost == \"0.0.0.0\" {\n\t\tprivateIP, err := sockaddr.GetPrivateIP()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to deduce private IP from all-zeroes bind address\")\n\t\t}\n\t\tif privateIP == \"\" {\n\t\t\treturn nil, errors.Wrap(err, \"no private IP found, and explicit advertise address not provided\")\n\t\t}\n\t\tip := net.ParseIP(privateIP)\n\t\tif ip == nil {\n\t\t\treturn nil, errors.Errorf(\"failed to parse private IP '%s'\", privateIP)\n\t\t}\n\t\treturn ip, nil\n\t}\n\n\t// Otherwise, try to parse the bind host as an IP.\n\tif ip := net.ParseIP(bindHost); ip != nil {\n\t\treturn ip, nil\n\t}\n\n\t// And finally, try to resolve the bind host.\n\tips, err := resolver.LookupIPAddr(context.Background(), bindHost)\n\tif err == nil && len(ips) == 1 {\n\t\tif ip4 := ips[0].IP.To4(); ip4 != nil {\n\t\t\tips[0].IP = ip4\n\t\t}\n\t\treturn ips[0].IP, nil\n\t}\n\n\t// Didn't work. This time it's fatal.\n\tif err == nil && len(ips) != 1 {\n\t\terr = fmt.Errorf(\"bind host '%s' resolved to %d IPs\", bindHost, len(ips))\n\t}\n\treturn nil, errors.Wrap(err, \"bind host failed to resolve\")\n}",
"func (c *OneConnection) ParseAddr(pl []byte) {\n\tb := bytes.NewBuffer(pl)\n\tcnt, _ := btc.ReadVLen(b)\n\tfor i := 0; i < int(cnt); i++ {\n\t\tvar buf [30]byte\n\t\tn, e := b.Read(buf[:])\n\t\tif n != len(buf) || e != nil {\n\t\t\tcommon.CountSafe(\"AddrError\")\n\t\t\tc.DoS(\"AddrError\")\n\t\t\t//println(\"ParseAddr:\", n, e)\n\t\t\tbreak\n\t\t}\n\t\ta := peersdb.NewPeer(buf[:])\n\t\tif !sys.ValidIp4(a.Ip4[:]) {\n\t\t\tcommon.CountSafe(\"AddrInvalid\")\n\t\t\t/*if c.Misbehave(\"AddrLocal\", 1) {\n\t\t\t\tbreak\n\t\t\t}*/\n\t\t\t//print(c.PeerAddr.Ip(), \" \", c.Node.Agent, \" \", c.Node.Version, \" addr local \", a.String(), \"\\n> \")\n\t\t} else if time.Unix(int64(a.Time), 0).Before(time.Now().Add(time.Hour)) {\n\t\t\tif time.Now().Before(time.Unix(int64(a.Time), 0).Add(peersdb.ExpirePeerAfter)) {\n\t\t\t\tk := qdb.KeyType(a.UniqID())\n\t\t\t\tv := peersdb.PeerDB.Get(k)\n\t\t\t\tif v != nil {\n\t\t\t\t\ta.Banned = peersdb.NewPeer(v[:]).Banned\n\t\t\t\t}\n\t\t\t\ta.Time = uint32(time.Now().Add(-5 * time.Minute).Unix()) // add new peers as not just alive\n\t\t\t\tif a.Time > uint32(time.Now().Unix()) {\n\t\t\t\t\tprintln(\"wtf\", a.Time, time.Now().Unix())\n\t\t\t\t}\n\t\t\t\tpeersdb.PeerDB.Put(k, a.Bytes())\n\t\t\t} else {\n\t\t\t\tcommon.CountSafe(\"AddrStale\")\n\t\t\t}\n\t\t} else {\n\t\t\tif c.Misbehave(\"AddrFuture\", 50) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}",
"func ParseAddress(address string) (string, string) {\n\tsplit := strings.Split(address, \":\")\n\tip := split[0]\n\tport := split[1]\n\n\treturn ip, port\n}",
"func parseListeningAddress(ctx *context.T, laddress string) (network string, address string, p flow.Protocol, err error) {\n\tparts := strings.SplitN(laddress, \"/\", 2)\n\tif len(parts) != 2 {\n\t\treturn \"\", \"\", nil, ErrorfInvalidAddress(ctx, \"invalid vine address %v, address must be of the form 'network/address/tag'\", laddress)\n\t}\n\tp, _ = flow.RegisteredProtocol(parts[0])\n\tif p == nil {\n\t\treturn \"\", \"\", nil, ErrorfNoRegisteredProtocol(ctx, \"no registered protocol: %v\", parts[0])\n\t}\n\treturn parts[0], parts[1], p, nil\n}",
"func parseAddr(addr string) (string, string) {\n\tparsed := strings.SplitN(addr, \":\", 2)\n\treturn parsed[0], parsed[1]\n}",
"func ParseAddress(addr string) Address {\n\t// Handle IPv6 address in form as \"[2001:4860:0:2001::68]\"\n\tlenAddr := len(addr)\n\tif lenAddr > 0 && addr[0] == '[' && addr[lenAddr-1] == ']' {\n\t\taddr = addr[1 : lenAddr-1]\n\t}\n\taddr = strings.TrimSpace(addr)\n\n\tip := net.ParseIP(addr)\n\tif ip != nil {\n\t\treturn IPAddress(ip)\n\t}\n\treturn DomainAddress(addr)\n}",
"func ParseAddr(s string) (Addr, error) {\n\tcomma := strings.IndexByte(s, ',')\n\tif comma < 0 {\n\t\treturn Addr{}, serrors.New(\"invalid address: expected comma\", \"value\", s)\n\t}\n\tia, err := ParseIA(s[0:comma])\n\tif err != nil {\n\t\treturn Addr{}, err\n\t}\n\th, err := ParseHost(s[comma+1:])\n\tif err != nil {\n\t\treturn Addr{}, err\n\t}\n\treturn Addr{IA: ia, Host: h}, nil\n}",
"func extractAddress(str string) string {\n\tvar addr string\n\n\tswitch {\n\tcase strings.Contains(str, `]`):\n\t\t// IPv6 address [2001:db8::1%lo0]:48467\n\t\taddr = strings.Split(str, `]`)[0]\n\t\taddr = strings.Split(addr, `%`)[0]\n\t\taddr = strings.TrimLeft(addr, `[`)\n\tdefault:\n\t\t// IPv4 address 192.0.2.1:48467\n\t\taddr = strings.Split(str, `:`)[0]\n\t}\n\treturn addr\n}",
"func extractAddress(str string) string {\n\tvar addr string\n\n\tswitch {\n\tcase strings.Contains(str, `]`):\n\t\t// IPv6 address [2001:db8::1%lo0]:48467\n\t\taddr = strings.Split(str, `]`)[0]\n\t\taddr = strings.Split(addr, `%`)[0]\n\t\taddr = strings.TrimLeft(addr, `[`)\n\tdefault:\n\t\t// IPv4 address 192.0.2.1:48467\n\t\taddr = strings.Split(str, `:`)[0]\n\t}\n\treturn addr\n}",
"func AdvertiseHost(listen string) string {\n\tif listen == \"0.0.0.0\" {\n\t\taddrs, err := net.InterfaceAddrs()\n\t\tif err != nil || len(addrs) == 0 {\n\t\t\treturn \"localhost\"\n\t\t}\n\n\t\tfor _, addr := range addrs {\n\t\t\tif ip, ok := addr.(*net.IPNet); ok && !ip.IP.IsLoopback() && ip.IP.To4() != nil {\n\t\t\t\treturn ip.IP.To4().String()\n\t\t\t}\n\t\t}\n\t\treturn \"localhost\"\n\t}\n\n\treturn listen\n}",
"func parseAddr(text string) (*net.TCPAddr, error) {\n\tif text[0] == ':' {\n\t\ttext = \"0.0.0.0\" + text\n\t}\n\n\taddr := strings.Replace(text, \"public\", address.External().String(), 1)\n\treturn net.ResolveTCPAddr(\"tcp\", addr)\n}",
"func SplitAddr(b []byte) Addr {\n\taddrLen := 1\n\tif len(b) < addrLen {\n\t\treturn nil\n\t}\n\n\tswitch b[0] {\n\tcase AtypDomainName:\n\t\tif len(b) < 2 {\n\t\t\treturn nil\n\t\t}\n\t\taddrLen = 1 + 1 + int(b[1]) + 2\n\tcase AtypIPv4:\n\t\taddrLen = 1 + net.IPv4len + 2\n\tcase AtypIPv6:\n\t\taddrLen = 1 + net.IPv6len + 2\n\tdefault:\n\t\treturn nil\n\n\t}\n\n\tif len(b) < addrLen {\n\t\treturn nil\n\t}\n\n\treturn b[:addrLen]\n}",
"func parseEPRTtoAddr(line string) (string, string, error) {\n\taddr := strings.Split(line, \"|\")\n\n\tif len(addr) != 5 {\n\t\treturn \"\", \"\", fmt.Errorf(\"invalid data address\")\n\t}\n\n\tnetProtocol := addr[1]\n\tIP := addr[2]\n\n\t// check port is valid\n\tport := addr[3]\n\tif integerPort, err := strconv.Atoi(port); err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"invalid data address\")\n\t} else if integerPort <= 0 || integerPort > 65535 {\n\t\treturn \"\", \"\", fmt.Errorf(\"invalid data address\")\n\t}\n\n\tswitch netProtocol {\n\tcase \"1\", \"2\":\n\t\t// use protocol 1 means IPv4. 2 means IPv6\n\t\t// net.ParseIP for validate IP\n\t\tif net.ParseIP(IP) == nil {\n\t\t\treturn \"\", \"\", fmt.Errorf(\"invalid data address\")\n\t\t}\n\t\tbreak\n\tdefault:\n\t\t// wrong network protocol\n\t\treturn \"\", \"\", fmt.Errorf(\"unknown network protocol\")\n\t}\n\n\treturn IP, port, nil\n}",
"func ParseCliAddr(ctx *cli.Context) (string, string) {\n\treturn ctx.GlobalString(\"address\"), ctx.GlobalString(\"port\")\n}",
"func (b *Backend) ParseAddress(addr string) (err error) {\n\tif b.Addr, err = url.Parse(addr); err != nil {\n\t\treturn err\n\t}\n\n\tif b.Addr.Scheme == \"\" {\n\t\tb.Addr.Scheme = \"http\"\n\t}\n\n\thttps := b.Addr.Scheme == \"https\"\n\tb.Host = b.Addr.Host\n\n\tif b.Addr.Port() == \"\" {\n\t\tif https {\n\t\t\tb.Host += \":443\"\n\t\t} else {\n\t\t\tb.Host += \":80\"\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (_BaseAccessWallet *BaseAccessWalletFilterer) ParseDbgAddress(log types.Log) (*BaseAccessWalletDbgAddress, error) {\n\tevent := new(BaseAccessWalletDbgAddress)\n\tif err := _BaseAccessWallet.contract.UnpackLog(event, \"dbgAddress\", log); err != nil {\n\t\treturn nil, err\n\t}\n\tevent.Raw = log\n\treturn event, nil\n}",
"func (p *AddressParser) Parse(address string) (*Address, error)",
"func ParseAddress(s string) (Address, error) {\n\n\tvar family uint8\n\tvar sn uint64\n\tvar crcStr string\n\tcnt, err := fmt.Sscanf(s, \"%x.%x.%s\", &family, &sn, &crcStr)\n\n\tif (nil != err) || (3 != cnt) || (sn != (0xffffffffffff & sn)) {\n\t\treturn 0, errors.New(\"onewire: invalid address \" + s)\n\t}\n\ta := sn<<8 | (uint64(family) << 56)\n\n\tbuf := make([]byte, 8)\n\tbinary.LittleEndian.PutUint64(buf, sn<<8|(uint64(family)<<56))\n\n\tcrc := RevCrc8(buf[1:])\n\n\tif \"--\" != crcStr {\n\t\tvar c uint8\n\t\tcnt, err = fmt.Sscanf(crcStr, \"%x\", &c)\n\t\tif c != crc {\n\t\t\treturn 0, errors.New(\"onewire: invalid crc \" + s)\n\t\t}\n\t}\n\n\ta |= 0xff & uint64(crc)\n\n\treturn Address(a), nil\n}",
"func parseInetAddr(af int, b []byte) (Addr, error) {\n\tswitch af {\n\tcase syscall.AF_INET:\n\t\tif len(b) < sizeofSockaddrInet {\n\t\t\treturn nil, errInvalidAddr\n\t\t}\n\t\ta := &Inet4Addr{}\n\t\tcopy(a.IP[:], b[4:8])\n\t\treturn a, nil\n\tcase syscall.AF_INET6:\n\t\tif len(b) < sizeofSockaddrInet6 {\n\t\t\treturn nil, errInvalidAddr\n\t\t}\n\t\ta := &Inet6Addr{ZoneID: int(nativeEndian.Uint32(b[24:28]))}\n\t\tcopy(a.IP[:], b[8:24])\n\t\tif a.IP[0] == 0xfe && a.IP[1]&0xc0 == 0x80 || a.IP[0] == 0xff && (a.IP[1]&0x0f == 0x01 || a.IP[1]&0x0f == 0x02) {\n\t\t\t// KAME based IPv6 protocol stack usually\n\t\t\t// embeds the interface index in the\n\t\t\t// interface-local or link-local address as\n\t\t\t// the kernel-internal form.\n\t\t\tid := int(bigEndian.Uint16(a.IP[2:4]))\n\t\t\tif id != 0 {\n\t\t\t\ta.ZoneID = id\n\t\t\t\ta.IP[2], a.IP[3] = 0, 0\n\t\t\t}\n\t\t}\n\t\treturn a, nil\n\tdefault:\n\t\treturn nil, errInvalidAddr\n\t}\n}",
"func parseEnsAPIAddress(s string) (tld, endpoint string, addr common.Address) {\n\tisAllLetterString := func(s string) bool {\n\t\tfor _, r := range s {\n\t\t\tif !unicode.IsLetter(r) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\tendpoint = s\n\tif i := strings.Index(endpoint, \":\"); i > 0 {\n\t\tif isAllLetterString(endpoint[:i]) && len(endpoint) > i+2 && endpoint[i+1:i+3] != \"//\" {\n\t\t\ttld = endpoint[:i]\n\t\t\tendpoint = endpoint[i+1:]\n\t\t}\n\t}\n\tif i := strings.Index(endpoint, \"@\"); i > 0 {\n\t\taddr = common.HexToAddress(endpoint[:i])\n\t\tendpoint = endpoint[i+1:]\n\t}\n\treturn\n}",
"func ParseAddress(addr string) (*Address, error) {\n\taddr = strings.ToUpper(addr)\n\tl := len(addr)\n\tif l < 50 {\n\t\treturn nil, InvalidAccountAddrError{reason: \"length\"}\n\t}\n\ti := l - 50 // start index of hex\n\n\tidh, err := hex.DecodeString(addr[i:])\n\tif err != nil {\n\t\treturn nil, InvalidAccountAddrError{reason: \"hex\"}\n\t}\n\n\t_addr := &Address{}\n\t_addr.Code = addr[0:i]\n\t_addr.Type = AccountType(idh[0])\n\t_addr.Hash = idh[1:]\n\n\tif err = _addr.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn _addr, nil\n}",
"func (_TokenVesting *TokenVestingCaller) ParseAddr(opts *bind.CallOpts, data []byte) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _TokenVesting.contract.Call(opts, out, \"parseAddr\", data)\n\treturn *ret0, err\n}",
"func parseBindAddr(s string) (address net.Addr, err error) {\n\tconst maxUnixLen = 106\n\n\t// '@' prefix specifies a Linux abstract domain socket.\n\tif runtime.GOOS == \"linux\" && strings.HasPrefix(s, \"@\") {\n\t\tif len(s) > maxUnixLen {\n\t\t\treturn nil, fmt.Errorf(\"sock file length must be less than %d characters\", maxUnixLen)\n\t\t}\n\t\treturn &net.UnixAddr{Name: s, Net: \"unix\"}, nil\n\t}\n\n\tif strings.Contains(s, \"/\") {\n\t\tif !filepath.IsAbs(s) {\n\t\t\treturn nil, errors.New(\"sock file must be an absolute path\")\n\t\t} else if len(s) > maxUnixLen {\n\t\t\treturn nil, fmt.Errorf(\"sock file length must be less than %d characters\", maxUnixLen)\n\t\t}\n\t\treturn &net.UnixAddr{Name: s, Net: \"unix\"}, nil\n\t}\n\n\t// For TCP, the supplied address string, s, is one of a port, a :port, or a host:port.\n\tip, port := net.IPv4(127, 0, 0, 1), 0\n\n\tif strings.Contains(s, \":\") {\n\t\thost, portString, err := net.SplitHostPort(s)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid addr %q - must be provided as host:port\", s)\n\t\t}\n\t\tif host != \"\" {\n\t\t\tip = net.ParseIP(host)\n\t\t}\n\n\t\tport, err = strconv.Atoi(portString)\n\t} else {\n\t\tport, err = strconv.Atoi(s)\n\t}\n\n\tif err != nil || port < 1 || port > 65534 {\n\t\treturn nil, fmt.Errorf(\"invalid port %d - must be between 1 and 65534\", port)\n\t}\n\treturn &net.TCPAddr{IP: ip, Port: port}, nil\n}",
"func parseDialingAddress(ctx *context.T, vaddress string) (network string, address string, tag string, p flow.Protocol, err error) {\n\tparts := strings.SplitN(vaddress, \"/\", 3)\n\tif len(parts) != 3 {\n\t\treturn \"\", \"\", \"\", nil, ErrorfInvalidAddress(ctx, \"invalid vine address %v, address must be of the form 'network/address/tag'\", vaddress)\n\t}\n\tp, _ = flow.RegisteredProtocol(parts[0])\n\tif p == nil {\n\t\treturn \"\", \"\", \"\", nil, ErrorfNoRegisteredProtocol(ctx, \"no registered protocol: %v\", parts[0])\n\t}\n\treturn parts[0], parts[1], parts[2], p, nil\n}",
"func splitAddr(v string) (network, addr string, err error) {\n\tep := strings.Split(v, \"://\")\n\tif len(ep) != 2 {\n\t\terr = errInvalidAddress\n\t\treturn network, addr, err\n\t}\n\tnetwork = ep[0]\n\n\ttrans, ok := drivers.get(network)\n\tif !ok {\n\t\terr = fmt.Errorf(\"zmq4: unknown transport %q\", network)\n\t\treturn network, addr, err\n\t}\n\n\taddr, err = trans.Addr(ep[1])\n\treturn network, addr, err\n}",
"func ValidateAddress(ipPort string, allowLocalhost bool) bool {\n\tipPort = whitespaceFilter.ReplaceAllString(ipPort, \"\")\n\tpts := strings.Split(ipPort, \":\")\n\tif len(pts) != 2 {\n\t\treturn false\n\t}\n\n\tip := net.ParseIP(pts[0])\n\tif ip == nil {\n\t\treturn false\n\t} else if ip.IsLoopback() {\n\t\tif !allowLocalhost {\n\t\t\treturn false\n\t\t}\n\t} else if !ip.IsGlobalUnicast() {\n\t\treturn false\n\t}\n\n\tport, err := strconv.ParseUint(pts[1], 10, 16)\n\tif err != nil || port < 1024 {\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func ParseDevAddr(input string) (addr DevAddr, err error) {\n\tbytes, err := ParseHEX(input, 4)\n\tif err != nil {\n\t\treturn\n\t}\n\tcopy(addr[:], bytes)\n\treturn\n}",
"func ValidateAddress(address string) error {\n\t// TODO: this list is not extensive and needs to be changed once we allow DNS\n\t// names for external metrics endpoints\n\tconst invalidChars = `abcdefghijklmnopqrstuvwxyz/\\ `\n\n\taddress = strings.ToLower(address)\n\tif strings.ContainsAny(address, invalidChars) {\n\t\treturn errors.New(\"invalid character detected (required format: <IP>:<PORT>)\")\n\t}\n\n\t// \tcheck if port if specified\n\tif !strings.Contains(address, \":\") {\n\t\treturn errors.New(\"no port specified\")\n\t}\n\n\th, p, err := net.SplitHostPort(address)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif h == \"\" {\n\t\treturn errors.New(\"no IP listen address specified\")\n\t}\n\n\tif p == \"\" {\n\t\treturn errors.New(\"no port specified\")\n\t}\n\n\treturn nil\n}",
"func SplitAddress(addr string) (string, int) {\n\ts := strings.Split(addr, \":\")\n\thostname := s[0]\n\tport, _ := strconv.Atoi(s[1])\n\treturn hostname, port\n}",
"func (_TokenVesting *TokenVestingSession) ParseAddr(data []byte) (common.Address, error) {\n\treturn _TokenVesting.Contract.ParseAddr(&_TokenVesting.CallOpts, data)\n}",
"func ParseAddress(address string) (*mail.Address, error)",
"func parseAddress(address string) (scheme, host, port string, err error) {\n\tif address == \"\" {\n\t\treturn\n\t}\n\tif strings.Contains(address, \"://\") {\n\t\turl, err := url.Parse(address)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", \"\", err\n\t\t}\n\t\tscheme, address = url.Scheme, url.Host\n\t}\n\tif strings.Contains(address, \":\") {\n\t\thost, port, err = net.SplitHostPort(address)\n\t\tif err != nil {\n\t\t\thost = address\n\t\t\terr = nil\n\t\t}\n\t} else {\n\t\thost = address\n\t}\n\tif port == \"\" {\n\t\tswitch scheme {\n\t\tcase \"http\", \"ws\":\n\t\t\tport = \"80\"\n\t\tcase \"https\", \"wss\":\n\t\t\tport = \"443\"\n\t\t}\n\t}\n\treturn\n}",
"func normalizeNetworkAddress(a, defaultHost, defaultPort string) (string, error) {\n\tif strings.Contains(a, \"://\") {\n\t\treturn a, fmt.Errorf(\"Address %s contains a protocol identifier, which is not allowed\", a)\n\t}\n\tif a == \"\" {\n\t\treturn defaultHost + \":\" + defaultPort, nil\n\t}\n\thost, port, err := net.SplitHostPort(a)\n\tif err != nil {\n\t\tif strings.Contains(err.Error(), \"missing port in address\") {\n\t\t\tnormalized := a + \":\" + defaultPort\n\t\t\thost, port, err = net.SplitHostPort(normalized)\n\t\t\tif err != nil {\n\t\t\t\treturn a, fmt.Errorf(\"Unable to address %s after port resolution: %v\", normalized, err)\n\t\t\t}\n\t\t} else {\n\t\t\treturn a, fmt.Errorf(\"Unable to normalize address %s: %v\", a, err)\n\t\t}\n\t}\n\tif host == \"\" {\n\t\thost = defaultHost\n\t}\n\tif port == \"\" {\n\t\tport = defaultPort\n\t}\n\treturn host + \":\" + port, nil\n}",
"func normalizeNetworkAddress(a, defaultHost, defaultPort string) (string, error) {\n\tif strings.Contains(a, \"://\") {\n\t\treturn a, fmt.Errorf(\"Address %s contains a protocol identifier, which is not allowed\", a)\n\t}\n\tif a == \"\" {\n\t\treturn defaultHost + \":\" + defaultPort, nil\n\t}\n\thost, port, err := net.SplitHostPort(a)\n\tif err != nil {\n\t\tif strings.Contains(err.Error(), \"missing port in address\") {\n\t\t\tnormalized := a + \":\" + defaultPort\n\t\t\thost, port, err = net.SplitHostPort(normalized)\n\t\t\tif err != nil {\n\t\t\t\treturn a, fmt.Errorf(\"Unable to address %s after port resolution: %v\", normalized, err)\n\t\t\t}\n\t\t} else {\n\t\t\treturn a, fmt.Errorf(\"Unable to normalize address %s: %v\", a, err)\n\t\t}\n\t}\n\tif host == \"\" {\n\t\thost = defaultHost\n\t}\n\tif port == \"\" {\n\t\tport = defaultPort\n\t}\n\treturn host + \":\" + port, nil\n}",
"func ParseAddress(address string) (common.Address, error) {\n\tif common.IsHexAddress(address) {\n\t\treturn common.HexToAddress(address), nil\n\t}\n\treturn common.Address{}, fmt.Errorf(\"invalid address: %v\", address)\n}",
"func buildAddress(address string, zone string, l Listener) string {\n\taddr, _, err := net.SplitHostPort(address)\n\tif err != nil {\n\t\taddr = address\n\t}\n\tif addr == \"\" {\n\t\taddr = \"localhost\"\n\t}\n\n\tport := l.GetPort()\n\tif port > 0 && port != 80 && port != 443 {\n\t\taddr += \":\" + strconv.Itoa(port)\n\t}\n\n\treturn withZonePrefix(addr, zone)\n}",
"func (_BaseAccessControlGroup *BaseAccessControlGroupFilterer) ParseDbgAddress(log types.Log) (*BaseAccessControlGroupDbgAddress, error) {\n\tevent := new(BaseAccessControlGroupDbgAddress)\n\tif err := _BaseAccessControlGroup.contract.UnpackLog(event, \"dbgAddress\", log); err != nil {\n\t\treturn nil, err\n\t}\n\tevent.Raw = log\n\treturn event, nil\n}",
"func AddressParserParse(p *mail.AddressParser, address string) (*mail.Address, error)",
"func parseAddresses(addrs []string) (iaddrs []iaddr.IPFSAddr, err error) {\n\tiaddrs = make([]iaddr.IPFSAddr, len(addrs))\n\tfor i, saddr := range addrs {\n\t\tiaddrs[i], err = iaddr.ParseString(saddr)\n\t\tif err != nil {\n\t\t\treturn nil, cmds.ClientError(\"invalid peer address: \" + err.Error())\n\t\t}\n\t}\n\treturn\n}",
"func ParseAddress(address string) (string, int) {\n\tmatch, err := gregex.MatchString(`^(.+):(\\d+)$`, address)\n\tif err == nil {\n\t\ti, _ := strconv.Atoi(match[2])\n\t\treturn match[1], i\n\t}\n\treturn \"\", 0\n}",
"func (_TokenVesting *TokenVestingCallerSession) ParseAddr(data []byte) (common.Address, error) {\n\treturn _TokenVesting.Contract.ParseAddr(&_TokenVesting.CallOpts, data)\n}",
"func Extract(addr string) (string, error) {\n\t// if addr specified then its returned\n\tif len(addr) > 0 && (addr != \"0.0.0.0\" && addr != \"[::]\" && addr != \"::\") {\n\t\treturn addr, nil\n\t}\n\n\tifaces, err := net.Interfaces()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to get interfaces! Err: %v\", err)\n\t}\n\n\t//nolint:prealloc\n\tvar addrs []net.Addr\n\tvar loAddrs []net.Addr\n\tfor _, iface := range ifaces {\n\t\tifaceAddrs, err := iface.Addrs()\n\t\tif err != nil {\n\t\t\t// ignore error, interface can dissapear from system\n\t\t\tcontinue\n\t\t}\n\t\tif iface.Flags&net.FlagLoopback != 0 {\n\t\t\tloAddrs = append(loAddrs, ifaceAddrs...)\n\t\t\tcontinue\n\t\t}\n\t\taddrs = append(addrs, ifaceAddrs...)\n\t}\n\taddrs = append(addrs, loAddrs...)\n\tfmt.Println(\"addrs\", addrs)\n\tvar ipAddr []byte\n\tvar publicIP []byte\n\n\tfor _, rawAddr := range addrs {\n\t\tvar ip net.IP\n\t\tswitch addr := rawAddr.(type) {\n\t\tcase *net.IPAddr:\n\t\t\tip = addr.IP\n\t\tcase *net.IPNet:\n\t\t\tip = addr.IP\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\n\t\tif !isPrivateIP(ip.String()) {\n\t\t\tpublicIP = ip\n\t\t\tcontinue\n\t\t}\n\n\t\tipAddr = ip\n\t\tbreak\n\t}\n\n\t// return private ip\n\tif ipAddr != nil {\n\t\treturn net.IP(ipAddr).String(), nil\n\t}\n\n\t// return public or virtual ip\n\tif publicIP != nil {\n\t\treturn net.IP(publicIP).String(), nil\n\t}\n\n\treturn \"\", fmt.Errorf(\"No IP address found, and explicit IP not provided\")\n}",
"func ParseAddress(tp string) error {\n\t// check source\n\tif tp == conf.TypeDump || tp == conf.TypeSync || tp == conf.TypeRump {\n\t\tif err := parseAddress(tp, conf.Options.SourceAddress, conf.Options.SourceType, true); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif len(conf.Options.SourceAddressList) == 0 {\n\t\t\treturn fmt.Errorf(\"source address shouldn't be empty when type in {dump, sync, rump}\")\n\t\t}\n\t}\n\n\t// check target\n\tif tp == conf.TypeRestore || tp == conf.TypeSync || tp == conf.TypeRump {\n\t\tif err := parseAddress(tp, conf.Options.TargetAddress, conf.Options.TargetType, false); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif len(conf.Options.TargetAddressList) == 0 {\n\t\t\treturn fmt.Errorf(\"target address shouldn't be empty when type in {restore, sync, rump}\")\n\t\t}\n\t}\n\n\treturn nil\n}",
"func ParseAddrPort(s string) (Addr, uint16, error) {\n\thost, port, err := net.SplitHostPort(s)\n\tif err != nil {\n\t\treturn Addr{}, 0, serrors.WrapStr(\"invalid address: split host:port\", err, \"addr\", s)\n\t}\n\ta, err := ParseAddr(host)\n\tif err != nil {\n\t\treturn Addr{}, 0, serrors.WrapStr(\"invalid address: host invalid\", err, \"host\", host)\n\t}\n\tp, err := strconv.ParseUint(port, 10, 16)\n\tif err != nil {\n\t\treturn Addr{}, 0, serrors.WrapStr(\"invalid address: port invalid\", err, \"port\", port)\n\t}\n\treturn a, uint16(p), nil\n}",
"func readAddr(r io.Reader, b []byte) (Addr, error) {\n\tif len(b) < MaxAddrLen {\n\t\treturn nil, io.ErrShortBuffer\n\t}\n\t_, err := io.ReadFull(r, b[:1]) // read 1st byte for address type\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch b[0] {\n\tcase AtypIPv4:\n\t\t_, err = io.ReadFull(r, b[1:1+net.IPv4len+2])\n\t\treturn b[:1+net.IPv4len+2], err\n\tcase AtypDomainName:\n\t\t_, err = io.ReadFull(r, b[1:2]) // read 2nd byte for domain length\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t_, err = io.ReadFull(r, b[2:2+int(b[1])+2])\n\t\treturn b[:1+1+int(b[1])+2], err\n\tcase AtypIPv6:\n\t\t_, err = io.ReadFull(r, b[1:1+net.IPv6len+2])\n\t\treturn b[:1+net.IPv6len+2], err\n\t}\n\n\treturn nil, ErrAddressNotSupported\n}",
"func validChallengeAddr(a string) bool {\n\t// TODO: flesh this out. parse a, make configurable, support\n\t// IPv6. Good enough for now.\n\treturn strings.HasPrefix(a, \"10.\") || strings.HasPrefix(a, \"192.168.\")\n}",
"func (ic *ifConfigurator) advertiseContainerAddr(containerNetNS string, containerIfaceName string, result *current.Result) error {\n\tif err := nsIsNSorErr(containerNetNS); err != nil {\n\t\treturn fmt.Errorf(\"%s is not a valid network namespace: %v\", containerNetNS, err)\n\t}\n\tif len(result.IPs) == 0 {\n\t\tklog.Warningf(\"Expected at least one IP address in CNI result, skip sending Gratuitous ARP\")\n\t\treturn nil\n\t}\n\t// Sending Gratuitous ARP is a best-effort action and is unlikely to fail as we have ensured the netns is valid.\n\tgo nsWithNetNSPath(containerNetNS, func(_ ns.NetNS) error {\n\t\tiface, err := netInterfaceByName(containerIfaceName)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Failed to find container interface %s in ns %s: %v\", containerIfaceName, containerNetNS, err)\n\t\t\treturn nil\n\t\t}\n\t\tvar targetIPv4, targetIPv6 net.IP\n\t\tfor _, ipc := range result.IPs {\n\t\t\tif ipc.Address.IP.To4() != nil {\n\t\t\t\ttargetIPv4 = ipc.Address.IP\n\t\t\t} else {\n\t\t\t\ttargetIPv6 = ipc.Address.IP\n\t\t\t}\n\t\t}\n\t\tif targetIPv4 == nil && targetIPv6 == nil {\n\t\t\tklog.V(2).Infof(\"No IPv4 and IPv6 address found for container interface %s in ns %s, skip sending Gratuitous ARP/NDP\", containerIfaceName, containerNetNS)\n\t\t\treturn nil\n\t\t}\n\t\tticker := time.NewTicker(50 * time.Millisecond)\n\t\tdefer ticker.Stop()\n\t\tcount := 0\n\t\tfor {\n\t\t\t// Send gratuitous ARP/NDP to network in case of stale mappings for this IP address\n\t\t\t// (e.g. if a previous - deleted - Pod was using the same IP).\n\t\t\tif targetIPv4 != nil {\n\t\t\t\tif err := arpingGratuitousARPOverIface(targetIPv4, iface); err != nil {\n\t\t\t\t\tklog.Warningf(\"Failed to send gratuitous ARP #%d: %v\", count, err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif targetIPv6 != nil {\n\t\t\t\tif err := ndpGratuitousNDPOverIface(targetIPv6, iface); err != nil {\n\t\t\t\t\tklog.Warningf(\"Failed to send gratuitous NDP #%d: %v\", count, err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tcount++\n\t\t\tif count == 3 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t<-ticker.C\n\t\t}\n\t\treturn nil\n\t})\n\treturn nil\n}",
"func normalizeAddress(addr, defaultPort string) string {\n\t_, _, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\treturn net.JoinHostPort(addr, defaultPort)\n\t}\n\treturn addr\n}",
"func (_AccessIndexor *AccessIndexorFilterer) ParseDbgAddress(log types.Log) (*AccessIndexorDbgAddress, error) {\n\tevent := new(AccessIndexorDbgAddress)\n\tif err := _AccessIndexor.contract.UnpackLog(event, \"dbgAddress\", log); err != nil {\n\t\treturn nil, err\n\t}\n\tevent.Raw = log\n\treturn event, nil\n}",
"func parseIp(args []string) {\n\tname := args[0]\n\taddr := net.ParseIP(name)\n\tif addr == nil {\n\t\tfmt.Println(\"Invalid address\")\n\t} else {\n\t\tfmt.Println(\"The address is\", addr.String())\n\t}\n}",
"func MustParseAddr(s string) Addr {\n\ta, err := ParseAddr(s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn a\n}",
"func unpackAddr(value nlgo.Binary, af Af) (net.IP, error) {\n\tbuf := ([]byte)(value)\n\tsize := 0\n\n\tswitch af {\n\tcase syscall.AF_INET:\n\t\tsize = 4\n\tcase syscall.AF_INET6:\n\t\tsize = 16\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"ipvs: unknown af=%d addr=%v\", af, buf)\n\t}\n\n\tif size > len(buf) {\n\t\treturn nil, fmt.Errorf(\"ipvs: short af=%d addr=%v\", af, buf)\n\t}\n\n\treturn (net.IP)(buf[:size]), nil\n}",
"func ParseAddress(addr string) (proto string, path string, err error) {\n\tm := netAddrRx.FindStringSubmatch(addr)\n\tif m == nil {\n\t\treturn \"\", \"\", goof.WithField(\"address\", addr, \"invalid address\")\n\t}\n\treturn m[1], m[2], nil\n}",
"func ipAddressFromAnnotation(svc *Service, cloud *gce.Cloud, ipVersion string) (string, error) {\n\tannotationVal, ok := svc.v[StaticL4AddressesAnnotationKey]\n\tif !ok {\n\t\treturn \"\", nil\n\t}\n\n\taddressNames := strings.Split(annotationVal, \",\")\n\n\t// Truncated to 2 values (this is technically maximum, 1 IPv4 and 1 IPv6 address)\n\t// to not make too many API calls.\n\tif len(addressNames) > maxNumberOfAddresses {\n\t\taddressNames = addressNames[:maxNumberOfAddresses]\n\t}\n\n\tfor _, addressName := range addressNames {\n\t\ttrimmedAddressName := strings.TrimSpace(addressName)\n\t\tcloudAddress, err := cloud.GetRegionAddress(trimmedAddressName, cloud.Region())\n\t\tif err != nil {\n\t\t\tif isNotFoundError(err) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn \"\", err\n\t\t}\n\t\tif cloudAddress.IpVersion == \"\" {\n\t\t\tcloudAddress.IpVersion = IPv4Version\n\t\t}\n\n\t\tif cloudAddress.IpVersion == ipVersion {\n\t\t\treturn cloudAddress.Address, nil\n\t\t}\n\t}\n\treturn \"\", nil\n}",
"func initAddr(addr string) (string, error) {\n\thost, port, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif host == \"\" {\n\t\thost = \"127.0.0.1\"\n\t}\n\treturn net.JoinHostPort(host, port), nil\n}",
"func ParseAddress(address string) (*ParsedAddress, error) {\n\taddressParts := &ParsedAddress{}\n\taddressList := strings.Split(address, \"/\")\n\tif len(addressList) != 3 {\n\t\treturn addressParts, logThenErrorf(\"invalid address string %s\", address)\n\t}\n\n\taddressParts = &ParsedAddress{\n\t\tLocationSegment: addressList[0],\n\t\tNetworkSegment: addressList[1],\n\t\tViewSegment: addressList[2],\n\t}\n\n\treturn addressParts, nil\n}",
"func ParseAddress(addr interface{}) (a Address, err error) {\n\t// handle the allowed types\n\tswitch addrVal := addr.(type) {\n\tcase string: // simple string value\n\t\tif addrVal == \"\" {\n\t\t\terr = errors.New(\"Recipient.Address may not be empty\")\n\t\t} else {\n\t\t\ta.Email = addrVal\n\t\t}\n\n\tcase Address:\n\t\ta = addr.(Address)\n\n\tcase map[string]interface{}:\n\t\t// auto-parsed nested json object\n\t\tfor k, v := range addrVal {\n\t\t\tswitch vVal := v.(type) {\n\t\t\tcase string:\n\t\t\t\tif strings.EqualFold(k, \"name\") {\n\t\t\t\t\ta.Name = vVal\n\t\t\t\t} else if strings.EqualFold(k, \"email\") {\n\t\t\t\t\ta.Email = vVal\n\t\t\t\t} else if strings.EqualFold(k, \"header_to\") {\n\t\t\t\t\ta.HeaderTo = vVal\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\terr = errors.New(\"strings are required for all Recipient.Address values\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\tcase map[string]string:\n\t\t// user-provided json literal (convenience)\n\t\tfor k, v := range addrVal {\n\t\t\tif strings.EqualFold(k, \"name\") {\n\t\t\t\ta.Name = v\n\t\t\t} else if strings.EqualFold(k, \"email\") {\n\t\t\t\ta.Email = v\n\t\t\t} else if strings.EqualFold(k, \"header_to\") {\n\t\t\t\ta.HeaderTo = v\n\t\t\t}\n\t\t}\n\n\tdefault:\n\t\terr = errors.Errorf(\"unsupported Recipient.Address value type [%T]\", addrVal)\n\t}\n\n\treturn\n}",
"func parseLineToAddr(line string) (string, string, error) {\n\taddr := strings.Split(line, \",\")\n\n\tif len(addr) != 6 {\n\t\treturn \"\", \"\", fmt.Errorf(\"invalid data address\")\n\t}\n\n\t// Get IP string from line\n\tip := strings.Join(addr[0:4], \".\")\n\n\t// get port number from line\n\tport1, _ := strconv.Atoi(addr[4])\n\tport2, _ := strconv.Atoi(addr[5])\n\n\tport := (port1 << 8) + port2\n\n\t// check IP and Port is valid\n\tif net.ParseIP(ip) == nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"invalid data address\")\n\t}\n\n\tif port <= 0 || port > 65535 {\n\t\treturn \"\", \"\", fmt.Errorf(\"invalid data address\")\n\t}\n\n\treturn ip, strconv.Itoa(port), nil\n}",
"func parseAddrMsg(msg *netlink.Message) (*AddrEntry, bool, error) {\n\tvar ifamsg iproute2.IfAddrMsg\n\tif err := ifamsg.UnmarshalBinary(msg.Data); err != nil {\n\t\treturn nil, false, err\n\t}\n\n\tvar e AddrEntry\n\te.init()\n\te.Family = int(ifamsg.Family)\n\te.PrefixLen = int(ifamsg.Prefixlen)\n\te.Flags = AddrFlag(ifamsg.Flags)\n\te.Scope = AddrScope(ifamsg.Scope)\n\te.Ifindex = int(ifamsg.Index)\n\n\tad, err := netlink.NewAttributeDecoder(msg.Data[iproute2.SizeofIfAddrMsg:])\n\tif err != nil {\n\t\treturn &e, false, err\n\t}\n\n\tfor ad.Next() {\n\t\tswitch ad.Type() {\n\t\tcase unix.IFA_ADDRESS:\n\t\t\te.InterfaceAddr = net.IP(ad.Bytes())\n\t\tcase unix.IFA_LOCAL:\n\t\t\te.LocalAddr = net.IP(ad.Bytes())\n\t\tcase unix.IFA_LABEL:\n\t\t\te.Label = ad.String()\n\t\tcase unix.IFA_BROADCAST:\n\t\t\te.BroadcastAddr = net.IP(ad.Bytes())\n\t\tcase unix.IFA_ANYCAST:\n\t\t\te.AnycastAddr = net.IP(ad.Bytes())\n\t\tcase unix.IFA_CACHEINFO:\n\t\t\te.AddrInfo = new(iproute2.IfaCacheinfo)\n\t\t\t_ = e.AddrInfo.UnmarshalBinary(ad.Bytes())\n\t\tcase unix.IFA_MULTICAST:\n\t\t\te.MulticastAddr = net.IP(ad.Bytes())\n\t\tcase unix.IFA_FLAGS:\n\t\t\te.AddrFlags = AddrFlag(ad.Uint32())\n\t\t}\n\t}\n\terr = ad.Err()\n\treturn &e, err == nil, err\n}",
"func addrToHost(addr string) string {\n\treturn strings.Split(addr, \":\")[0]\n}",
"func extractIPAndPortFromAddresses(addresses []string) (string, string) {\n\tfor _, addr := range addresses {\n\t\taddrParts := strings.SplitN(addr, \"://\", 2)\n\t\tif len(addrParts) != 2 {\n\t\t\tlogrus.Errorf(\"invalid listening address %s: must be in format [protocol]://[address]\", addr)\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch addrParts[0] {\n\t\tcase \"tcp\":\n\t\t\thost, port, err := net.SplitHostPort(addrParts[1])\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"failed to split host and port from address: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn host, port\n\t\tcase \"unix\":\n\t\t\tcontinue\n\t\tdefault:\n\t\t\tlogrus.Errorf(\"only unix socket or tcp address is support\")\n\t\t}\n\t}\n\treturn \"\", \"\"\n}",
"func ResolveAddr(addr string) string {\n\tvar scheme string\n\thost, port, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\turi, err := url.Parse(addr)\n\t\tif err != nil {\n\t\t\treturn \"0.0.0.0:\" + strconv.Itoa(FreePort())\n\t\t}\n\n\t\tif strings.Contains(uri.Host, \":\") {\n\t\t\tsub := strings.Index(uri.Host, \":\")\n\t\t\turi.Host = uri.Host[0:sub]\n\t\t}\n\n\t\tscheme = uri.Scheme\n\t\thost = uri.Host\n\t\tport = uri.Port()\n\t}\n\n\tif host == \"\" {\n\t\thost = \"0.0.0.0\"\n\t}\n\n\tif port == \"\" || zeros.MatchString(port) {\n\t\tport = strconv.Itoa(FreePort())\n\t}\n\n\tif scheme == \"\" {\n\t\treturn host + \":\" + port\n\t}\n\n\treturn scheme + \"://\" + host + \":\" + port\n}",
"func parseAddress(mailAddress string) (address *mail.Address, err error) {\n\tstrimmed := strings.TrimSpace(mailAddress)\n\n\tif address, err = mail.ParseAddress(strimmed); err == nil {\n\t\treturn address, nil\n\t}\n\n\tlog.Printf(\"[mail] parseAddress: %s\\n\", err)\n\treturn nil, err\n}",
"func ParseHost(s string) (*Host, error) {\n\tisValidHost := func(host string) bool {\n\t\tif host == \"\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif ip := net.ParseIP(host); ip != nil {\n\t\t\treturn true\n\t\t}\n\n\t\t// host is not a valid IPv4 or IPv6 address\n\t\t// host may be a hostname\n\t\t// refer https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names\n\t\t// why checks are done like below\n\t\tif len(host) < 1 || len(host) > 253 {\n\t\t\treturn false\n\t\t}\n\n\t\tfor _, label := range strings.Split(host, \".\") {\n\t\t\tif len(label) < 1 || len(label) > 63 {\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\tif !hostLabelRegexp.MatchString(label) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}\n\n\tvar port Port\n\tvar isPortSet bool\n\thost, portStr, err := net.SplitHostPort(s)\n\tif err != nil {\n\t\tif !strings.Contains(err.Error(), \"missing port in address\") {\n\t\t\treturn nil, err\n\t\t}\n\t\thost = s\n\t\tportStr = \"\"\n\t} else {\n\t\tif port, err = ParsePort(portStr); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tisPortSet = true\n\t}\n\n\tif host != \"\" {\n\t\thost, err = trimIPv6(host)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// IPv6 requires a link-local address on every network interface.\n\t// `%interface` should be preserved.\n\ttrimmedHost := host\n\n\tif i := strings.LastIndex(trimmedHost, \"%\"); i > -1 {\n\t\t// `%interface` can be skipped for validity check though.\n\t\ttrimmedHost = trimmedHost[:i]\n\t}\n\n\tif !isValidHost(trimmedHost) {\n\t\treturn nil, errors.New(\"invalid hostname\")\n\t}\n\n\treturn &Host{\n\t\tName: host,\n\t\tPort: port,\n\t\tIsPortSet: isPortSet,\n\t}, nil\n}",
"func DecodeAddr(address []byte) string {\n\tvar stringAddr string\n\tvar ip []byte\n\tvar port []byte\n\n\tip = address[:4]\n\tport = address[4:]\n\n\t// Decode IP\n\tfor index, octet := range ip {\n\t\tstringAddr = stringAddr + strconv.Itoa(int(octet))\n\t\tif index != 3 {\n\t\t\tstringAddr += \".\"\n\t\t}\n\t}\n\tstringAddr += \":\"\n\n\t// Decode Port\n\tb := make([]byte, 8)\n\tfor i := 0; i < 6; i++ {\n\t\tb[i] = byte(0)\n\t}\n\tb[6] = port[0]\n\tb[7] = port[1]\n\tp := binary.BigEndian.Uint64(b)\n\tstringAddr += strconv.FormatUint(p, 10)\n\t//fmt.Println(\"Complete IP:\", stringAddr)\n\treturn stringAddr\n}",
"func ParseAddress(s string) (Address, error) {\n\tvar addr Address\n\terr := addr.parse(s)\n\treturn addr, err\n}",
"func ParseFromIPAddr(ipNet *net.IPNet) (*IPv4Address, *IPv6Address, error) {\n\tif ipNet == nil {\n\t\treturn nil, nil, fmt.Errorf(\"Nil address: %v\", ipNet)\n\t}\n\n\tif v4Addr := ipNet.IP.To4(); v4Addr != nil {\n\t\tcidr, _ := ipNet.Mask.Size()\n\t\tret := NewIPv4AddressFromBytes(v4Addr, uint(cidr))\n\t\treturn &ret, nil, nil\n\t}\n\tif v6Addr := ipNet.IP.To16(); v6Addr != nil {\n\t\tcidr, _ := ipNet.Mask.Size()\n\t\tret := NewIPv6Address(v6Addr, uint(cidr))\n\t\treturn nil, &ret, nil\n\t}\n\n\treturn nil, nil, fmt.Errorf(\"couldn't parse either v4 or v6 address: %v\", ipNet)\n}",
"func IPFromAddr(addr string) string {\n\t// check for IPv6\n\tif strings.Count(addr, \":\") > 1 {\n\t\tip := addr\n\t\t// assume also has suffix if it has prefix for IPv6\n\t\tif strings.HasPrefix(addr, \"[\") {\n\t\t\tip = ip[1:strings.Index(ip, \"]\")]\n\t\t}\n\t\t// otherwise assume only IP present and check for zone\n\t\tzoneIdx := strings.LastIndex(ip, \"%\")\n\t\tif zoneIdx != -1 {\n\t\t\tip = ip[:zoneIdx]\n\t\t}\n\t\treturn ip\n\t}\n\n\t// IPv4\n\tval := strings.LastIndex(addr, \":\")\n\tif val == -1 {\n\t\treturn addr // assume valid IP without port\n\t}\n\tip := addr[:val]\n\treturn ip\n}",
"func DecodeAddress(b []byte) (net.IP, []byte, error) {\n\tif len(b) < 6 {\n\t\treturn nil, nil, errors.New(\"too short\")\n\t}\n\n\t// IPv4\n\tif b[0] == 4 && b[1] == 4 {\n\t\treturn net.IP(b[2:6]), b[6:], nil\n\t}\n\n\t// IPv6\n\tif len(b) < 18 {\n\t\treturn nil, nil, errors.New(\"too short\")\n\t}\n\tif b[0] == 6 && b[1] == 16 {\n\t\treturn net.IP(b[2:18]), b[18:], nil\n\t}\n\n\treturn nil, nil, errors.New(\"unrecognized format\")\n}",
"func GetAddr(addr string) string {\n\tif addr == \"\" {\n\t\tif real, err := GetMainIP(); err == nil {\n\t\t\treturn real + \":0\"\n\t\t}\n\t}\n\n\tip, port, err := net.SplitHostPort(addr)\n\tif err == nil && (ip == \"\" || ip == \"0.0.0.0\") {\n\t\tif realIP, err := GetMainIP(); err == nil {\n\t\t\treturn net.JoinHostPort(realIP, port)\n\t\t}\n\t}\n\n\treturn addr\n}",
"func ParseAddrPort(str string) (ip net.IP, port uint16, err error) {\n\t// See func net.SplitHostPort(hostport string) (host, port string, err error)\n\tpair := strings.Split(str, \":\")\n\tif len(pair) == 2 {\n\t\tip = net.ParseIP(pair[0])\n\t\tif ip != nil {\n\t\t\tvar v uint64\n\t\t\tv, err = strconv.ParseUint(pair[1], 10, 16)\n\t\t\tif err == nil {\n\t\t\t\tport = uint16(v)\n\t\t\t} else {\n\t\t\t\terr = errf(\"\\\"%s\\\" is invalid port specifier\", pair[1])\n\t\t\t}\n\t\t} else {\n\t\t\terr = errf(\"\\\"%s\\\" not a valid IP address\", pair[0])\n\t\t}\n\t} else {\n\t\terr = errf(\"\\\"%s\\\" is missing port specifier\", str)\n\t}\n\treturn\n}",
"func ParseAddress(address string) Address {\n\tif !TrackPositions {\n\t\treturn 0\n\t}\n\taddr, _ := strconv.ParseUint(address, 0, 64)\n\n\treturn Address(addr)\n}",
"func parseListeners(addrs []string) ([]string, []string, error) {\n\tipv4ListenAddrs := make([]string, 0, len(addrs)*2)\n\tipv6ListenAddrs := make([]string, 0, len(addrs)*2)\n\tfor _, addr := range addrs {\n\t\thost, _, err := net.SplitHostPort(addr)\n\t\tif err != nil {\n\t\t\t// Shouldn't happen due to already being normalized.\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\t// Empty host or host of * on plan9 is both IPv4 and IPv6.\n\t\tif host == \"\" || (host == \"*\" && runtime.GOOS == \"plan9\") {\n\t\t\tipv4ListenAddrs = append(ipv4ListenAddrs, addr)\n\t\t\tipv6ListenAddrs = append(ipv6ListenAddrs, addr)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Parse the IP.\n\t\tip := net.ParseIP(host)\n\t\tif ip == nil {\n\t\t\treturn nil, nil, fmt.Errorf(\"'%s' is not a valid IP \"+\n\t\t\t\t\"address\", host)\n\t\t}\n\n\t\t// To4 returns nil when the IP is not an IPv4 address, so use\n\t\t// this determine the address type.\n\t\tif ip.To4() == nil {\n\t\t\tipv6ListenAddrs = append(ipv6ListenAddrs, addr)\n\t\t} else {\n\t\t\tipv4ListenAddrs = append(ipv4ListenAddrs, addr)\n\t\t}\n\t}\n\treturn ipv4ListenAddrs, ipv6ListenAddrs, nil\n}",
"func ExtractAddressFromReverse(reverseName string) string {\n\tsearch := \"\"\n\n\tf := reverse\n\n\tswitch {\n\tcase strings.HasSuffix(reverseName, IP4arpa):\n\t\tsearch = strings.TrimSuffix(reverseName, IP4arpa)\n\tcase strings.HasSuffix(reverseName, IP6arpa):\n\t\tsearch = strings.TrimSuffix(reverseName, IP6arpa)\n\t\tf = reverse6\n\tdefault:\n\t\treturn \"\"\n\t}\n\n\t// Reverse the segments and then combine them.\n\treturn f(strings.Split(search, \".\"))\n}",
"func SplitHostPort(hostport string) (host, port string, err error) {\n\tj, k := 0, 0\n\n\t// The port starts after the last colon.\n\ti := last(hostport, ':')\n\tif i < 0 {\n\t\tgoto missingPort\n\t}\n\n\tif hostport[0] == '[' {\n\t\t// Expect the first ']' just before the last ':'.\n\t\tend := byteIndex(hostport, ']')\n\t\tif end < 0 {\n\t\t\terr = &AddrError{\"missing ']' in address\", hostport}\n\t\t\treturn\n\t\t}\n\t\tswitch end + 1 {\n\t\tcase len(hostport):\n\t\t\t// There can't be a ':' behind the ']' now.\n\t\t\tgoto missingPort\n\t\tcase i:\n\t\t\t// The expected result.\n\t\tdefault:\n\t\t\t// Either ']' isn't followed by a colon, or it is\n\t\t\t// followed by a colon that is not the last one.\n\t\t\tif hostport[end+1] == ':' {\n\t\t\t\tgoto tooManyColons\n\t\t\t}\n\t\t\tgoto missingPort\n\t\t}\n\t\thost = hostport[1:end]\n\t\tj, k = 1, end+1 // there can't be a '[' resp. ']' before these positions\n\t} else {\n\t\thost = hostport[:i]\n\t\tif byteIndex(host, ':') >= 0 {\n\t\t\tgoto tooManyColons\n\t\t}\n\t\tif byteIndex(host, '%') >= 0 {\n\t\t\tgoto missingBrackets\n\t\t}\n\t}\n\tif byteIndex(hostport[j:], '[') >= 0 {\n\t\terr = &AddrError{\"unexpected '[' in address\", hostport}\n\t\treturn\n\t}\n\tif byteIndex(hostport[k:], ']') >= 0 {\n\t\terr = &AddrError{\"unexpected ']' in address\", hostport}\n\t\treturn\n\t}\n\n\tport = hostport[i+1:]\n\treturn\n\nmissingPort:\n\terr = &AddrError{\"missing port in address\", hostport}\n\treturn\n\ntooManyColons:\n\terr = &AddrError{\"too many colons in address\", hostport}\n\treturn\n\nmissingBrackets:\n\terr = &AddrError{\"missing brackets in address\", hostport}\n\treturn\n}",
"func ResolveAddress(lookupIPFunc lookup.LookupIPFunc, dataplane *core_mesh.DataplaneResource) (*core_mesh.DataplaneResource, error) {\n\tvar ips, aips []net.IP\n\tvar err error\n\tvar update_ip, update_aip bool = false, false\n\tif ips, err = lookupIPFunc(dataplane.Spec.Networking.Address); err != nil {\n\t\treturn nil, err\n\t}\n\tif len(ips) == 0 {\n\t\treturn nil, errors.Errorf(\"can't resolve address %v\", dataplane.Spec.Networking.Address)\n\t}\n\tif dataplane.Spec.Networking.Address != ips[0].String() {\n\t\tupdate_ip = true\n\t}\n\tif dataplane.Spec.Networking.AdvertisedAddress != \"\" {\n\t\tif aips, err = lookupIPFunc(dataplane.Spec.Networking.AdvertisedAddress); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif len(aips) == 0 {\n\t\t\treturn nil, errors.Errorf(\"can't resolve address %v\", dataplane.Spec.Networking.AdvertisedAddress)\n\t\t}\n\t\tif dataplane.Spec.Networking.AdvertisedAddress != aips[0].String() {\n\t\t\tupdate_aip = true\n\t\t}\n\t}\n\n\tif update_ip || update_aip { // only if we resolve any address, in most cases this is IP not a hostname\n\t\tdpSpec := proto.Clone(dataplane.Spec).(*mesh_proto.Dataplane)\n\t\tif update_ip {\n\t\t\tdpSpec.Networking.Address = ips[0].String()\n\t\t}\n\t\tif update_aip {\n\t\t\tdpSpec.Networking.AdvertisedAddress = aips[0].String()\n\t\t}\n\t\treturn &core_mesh.DataplaneResource{\n\t\t\tMeta: dataplane.Meta,\n\t\t\tSpec: dpSpec,\n\t\t}, nil\n\t}\n\treturn dataplane, nil\n}",
"func ParseOptIAAddress(data []byte) (*OptIAAddress, error) {\n\tvar opt OptIAAddress\n\tbuf := uio.NewBigEndianBuffer(data)\n\topt.IPv6Addr = net.IP(buf.CopyN(net.IPv6len))\n\topt.PreferredLifetime = buf.Read32()\n\topt.ValidLifetime = buf.Read32()\n\tif err := opt.Options.FromBytes(buf.ReadAll()); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &opt, buf.FinError()\n}",
"func predictAddr(t *netutil.IPTracker) (net.IP, int) {\n\tep := t.PredictEndpoint()\n\tif ep == \"\" {\n\t\treturn nil, 0\n\t}\n\tipString, portString, _ := net.SplitHostPort(ep)\n\tip := net.ParseIP(ipString)\n\tport, err := strconv.ParseInt(portString, 10, 16)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn ip, int(port)\n}",
"func Extract(addr string) (string, error) {\n\t// if addr specified then its returned\n\tif len(addr) > 0 && (addr != \"0.0.0.0\" && addr != \"[::]\" && addr != \"::\") {\n\t\treturn addr, nil\n\t}\n\n\treturn LocalIP, nil\n}",
"func parseListenString(s string) (string, string, error) {\n\tif s == \"\" {\n\t\treturn \"\", \"\", fmt.Errorf(\"patroni configuration option 'restapi.listen' not found\")\n\t}\n\n\tif s == \"::\" {\n\t\treturn \"[::1]\", \"8008\", nil\n\t}\n\n\tparts := strings.Split(s, \":\")\n\n\tvar addr, port string\n\tvar ip net.IP\n\n\tif len(parts) != 1 {\n\t\tip = net.ParseIP(strings.Join(parts[0:len(parts)-1], \":\"))\n\t\tport = parts[len(parts)-1]\n\t} else {\n\t\tip = net.ParseIP(parts[0])\n\t\tport = \"8008\"\n\t}\n\n\t// Convert 'unspecified' address to loopback. Wraps IPv6 addresses into square brackets (required for net/http).\n\tif ip.Equal(net.IPv4zero) {\n\t\taddr = \"127.0.0.1\"\n\t} else if ip.Equal(net.IPv6unspecified) {\n\t\taddr = fmt.Sprintf(\"[%s]\", net.IPv6loopback.String())\n\t} else {\n\t\tif ip.To4() != nil {\n\t\t\taddr = ip.String()\n\t\t} else {\n\t\t\taddr = fmt.Sprintf(\"[%s]\", ip.String())\n\t\t}\n\t}\n\n\treturn addr, port, nil\n}",
"func NormalizeAddr(addr string) (string, error) {\n\tu, err := ParseAddr(addr)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\thost, _, err := net.SplitHostPort(u.Host)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to parse host-port pair: %v\", err)\n\t} else if host == \"\" {\n\t\treturn \"\", fmt.Errorf(\"no hostname in address: %q\", addr)\n\t}\n\treturn u.String(), nil\n}",
"func getDefaultAddr(addr string, n int) string {\n\ta := strings.Split(addr, \":\")\n\tport, _ := strconv.Atoi(a[len(a)-1])\n\ta[len(a)-1] = strconv.Itoa(port + n)\n\treturn strings.Join(a, \":\")\n}",
"func ParseAddr(addr string) (*url.URL, error) {\n\tu, err := url.Parse(addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif u.Scheme != SchemaADC && u.Scheme != SchemaADCS {\n\t\treturn u, fmt.Errorf(\"unsupported protocol: %q\", u.Scheme)\n\t}\n\tu.Path = strings.TrimRight(u.Path, \"/\")\n\treturn u, nil\n}",
"func ParseIPHostFromString(addrStr string) (string, uint16, error) {\n\tip, port, err := net.SplitHostPort(addrStr)\n\tif err != nil {\n\t\treturn \"\", 0, err\n\t}\n\tportUint, err := strconv.ParseUint(port, 10, 16)\n\tif err != nil {\n\t\treturn \"\", 0, err\n\t}\n\n\treturn ip, uint16(portUint), nil\n}",
"func decodePeerAddress(chunk string) string {\n\tip := net.IPv4(chunk[0], chunk[1], chunk[2], chunk[3])\n\tremotePort := 256*int(chunk[4]) + int(chunk[5]) // Port is given in network encoding.\n\treturn fmt.Sprintf(\"%s:%d\", ip.String(), remotePort)\n}",
"func (n *Net) ResolveUDPAddr(network, address string) (*net.UDPAddr, error) {\n\ta := &net.UDPAddr{\n\t\tPort: 0,\n\t\tIP: net.IPv4(127, 0, 0, 1),\n\t}\n\thost, port, err := net.SplitHostPort(address)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif a.IP = net.ParseIP(host); a.IP == nil {\n\t\t// Probably we should use virtual DNS here.\n\t\treturn nil, errors.New(\"bad IP\")\n\t}\n\tif a.Port, err = strconv.Atoi(port); err != nil {\n\t\treturn nil, err\n\t}\n\treturn a, nil\n}",
"func ResolveUDPAddr(network, address string) (*UDPAddr, error) {\n\t// TODO: make sure network is 'udp'\n\t// separate domain from port, if any\n\tr := strings.Split(address, \":\")\n\taddr, err := ActiveDevice.GetDNS(r[0])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tip := IP(addr)\n\tif len(r) > 1 {\n\t\tport, e := strconv.Atoi(r[1])\n\t\tif e != nil {\n\t\t\treturn nil, e\n\t\t}\n\t\treturn &UDPAddr{IP: ip, Port: port}, nil\n\t}\n\n\treturn &UDPAddr{IP: ip}, nil\n}",
"func (_Contract *ContractFilterer) ParseAddrChanged(log types.Log) (*ContractAddrChanged, error) {\n\tevent := new(ContractAddrChanged)\n\tif err := _Contract.contract.UnpackLog(event, \"AddrChanged\", log); err != nil {\n\t\treturn nil, err\n\t}\n\treturn event, nil\n}",
"func (s *ProvisionedServer) Address() string { return s.Server.AdvertiseIP }",
"func extractIPv4(ptr string) string {\n\ts := strings.Replace(ptr, \".in-addr.arpa\", \"\", 1)\n\twords := strings.Split(s, \".\")\n\tfor i, j := 0, len(words)-1; i < j; i, j = i+1, j-1 {\n\t\twords[i], words[j] = words[j], words[i]\n\t}\n\treturn strings.Join(words, \".\")\n}",
"func IsAddress(a string) bool {\n\tif len(a) > 0 && a[:3] == string(binary.PrefixAccountPubkey) {\n\t\treturn true\n\t}\n\treturn false\n}",
"func ListenAddresses(value string) ([]string, error) {\n\taddresses := make([]string, 0)\n\n\tif value == \"\" {\n\t\treturn addresses, nil\n\t}\n\n\tlocalHost, localPort, err := net.SplitHostPort(value)\n\tif err != nil {\n\t\tlocalHost = value\n\t\tlocalPort = DefaultPort\n\t}\n\n\tif localHost == \"0.0.0.0\" || localHost == \"::\" || localHost == \"[::]\" {\n\t\tifaces, err := net.Interfaces()\n\t\tif err != nil {\n\t\t\treturn addresses, err\n\t\t}\n\n\t\tfor _, i := range ifaces {\n\t\t\taddrs, err := i.Addrs()\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor _, addr := range addrs {\n\t\t\t\tvar ip net.IP\n\t\t\t\tswitch v := addr.(type) {\n\t\t\t\tcase *net.IPNet:\n\t\t\t\t\tip = v.IP\n\t\t\t\tcase *net.IPAddr:\n\t\t\t\t\tip = v.IP\n\t\t\t\t}\n\n\t\t\t\tif !ip.IsGlobalUnicast() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif ip.To4() == nil {\n\t\t\t\t\tif localHost == \"0.0.0.0\" {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\taddresses = append(addresses, fmt.Sprintf(\"[%s]:%s\", ip, localPort))\n\t\t\t\t} else {\n\t\t\t\t\taddresses = append(addresses, fmt.Sprintf(\"%s:%s\", ip, localPort))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif strings.Contains(localHost, \":\") {\n\t\t\taddresses = append(addresses, fmt.Sprintf(\"[%s]:%s\", localHost, localPort))\n\t\t} else {\n\t\t\taddresses = append(addresses, fmt.Sprintf(\"%s:%s\", localHost, localPort))\n\t\t}\n\t}\n\n\treturn addresses, nil\n}",
"func parseOptClientLinkLayerAddress(data []byte) (*optClientLinkLayerAddress, error) {\n\tvar opt optClientLinkLayerAddress\n\tbuf := uio.NewBigEndianBuffer(data)\n\topt.LinkLayerType = iana.HWType(buf.Read16())\n\topt.LinkLayerAddress = buf.ReadAll()\n\treturn &opt, buf.FinError()\n}",
"func (s *Setting) CheckAddress(adr string, hasPort, isEmptyHost bool) error {\n\tif strings.HasSuffix(adr, \".onion\") {\n\t\tif s.UseTor {\n\t\t\treturn nil\n\t\t}\n\t\treturn ErrTorAddress\n\t}\n\th, p, err2 := net.SplitHostPort(adr)\n\tif err2 != nil && hasPort {\n\t\treturn err2\n\t}\n\tif err2 == nil && !hasPort {\n\t\treturn errors.New(\"should not have port number\")\n\t}\n\tif hasPort {\n\t\tpo, err := strconv.Atoi(p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif po > 0xffff || po <= 0 {\n\t\t\treturn errors.New(\"illegal port number\")\n\t\t}\n\t\tadr = h\n\t}\n\tif adr == \"\" {\n\t\tif !isEmptyHost {\n\t\t\treturn errors.New(\"empty host name\")\n\t\t}\n\t\treturn nil\n\t}\n\t_, err2 = net.LookupIP(adr)\n\treturn err2\n}",
"func parseIPAndPort(input string) (string, int, error) {\n\tseparator := strings.LastIndex(input, \":\")\n\tif separator == -1 {\n\t\treturn \"\", 0, errors.New(\"cannot parse IP and port correctly\")\n\t}\n\tIPStr := input[0:separator]\n\tif IPStr[0] == '[' {\n\t\tIPStr = IPStr[1 : len(IPStr)-1]\n\t}\n\tfor _, prefix := range localIPv4 {\n\t\tif strings.HasPrefix(IPStr, prefix) {\n\t\t\treturn \"\", 0, errors.New(\"ignore this IP address\")\n\t\t}\n\t}\n\toutputIP := net.ParseIP(IPStr)\n\tif outputIP == nil {\n\t\treturn \"\", 0, errors.New(\"invalid IP address\")\n\t}\n\n\tport, err := strconv.Atoi(input[separator+1:])\n\tif err != nil {\n\t\treturn \"\", 0, errors.New(\"invalid IP port\")\n\t}\n\treturn IPStr, port, nil\n}"
] | [
"0.8340167",
"0.8205273",
"0.7002564",
"0.638477",
"0.6307578",
"0.6307132",
"0.6303563",
"0.6131761",
"0.61101985",
"0.6086822",
"0.6052451",
"0.60465807",
"0.5911784",
"0.5911784",
"0.59074295",
"0.5907354",
"0.58481467",
"0.575326",
"0.57085407",
"0.5657779",
"0.5657514",
"0.5645455",
"0.56334203",
"0.5629601",
"0.56295305",
"0.5621209",
"0.5616896",
"0.5603567",
"0.5593392",
"0.5580358",
"0.5573355",
"0.55725026",
"0.55687046",
"0.5561967",
"0.5528769",
"0.5520014",
"0.5518512",
"0.55170566",
"0.55170566",
"0.5516679",
"0.5497167",
"0.54890287",
"0.5478747",
"0.54338837",
"0.5412715",
"0.5403231",
"0.5402765",
"0.540043",
"0.5383037",
"0.5381386",
"0.5378193",
"0.535518",
"0.5346018",
"0.534566",
"0.53361344",
"0.5333118",
"0.5331835",
"0.53231114",
"0.530948",
"0.53014886",
"0.5299297",
"0.52983034",
"0.5288783",
"0.5283077",
"0.5273748",
"0.5261342",
"0.52536565",
"0.52411807",
"0.52387226",
"0.5223436",
"0.5222616",
"0.5220266",
"0.5214675",
"0.5208065",
"0.5201532",
"0.5185989",
"0.5181148",
"0.5180541",
"0.5179498",
"0.51750064",
"0.5174316",
"0.5173106",
"0.51727796",
"0.5162102",
"0.5150736",
"0.5142128",
"0.51311284",
"0.5126323",
"0.512302",
"0.512183",
"0.51162857",
"0.51034033",
"0.5102507",
"0.5093374",
"0.5089154",
"0.50860536",
"0.5082816",
"0.5062692",
"0.5058135",
"0.5057238"
] | 0.84405524 | 0 |
StringsSliceFromSet returns a sorted strings slice from set | func StringsSliceFromSet(in map[string]struct{}) []string {
if in == nil {
return nil
}
out := make([]string, 0, len(in))
for key := range in {
out = append(out, key)
}
sort.Strings(out)
return out
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func StrSliceSet(slice []string) []string {\n\tset := make([]string, 0)\n\ttempMap := make(map[string]bool, len(slice))\n\tfor _, v := range slice {\n\t\tif !tempMap[v] {\n\t\t\tset = append(set, v)\n\t\t\ttempMap[v] = true\n\t\t}\n\t}\n\n\treturn set\n}",
"func (set StringSet) ToSlice() []string {\n\tif n := set.Len(); n > 0 {\n\t\tresult := make([]string, 0, n)\n\t\tfor str := range set {\n\t\t\tresult = append(result, str)\n\t\t}\n\t\treturn result\n\t}\n\treturn nil\n}",
"func (s *Set) StringSlice() []string {\n\tslice := make([]string, 0, s.Size())\n\n\ts.mutex.Lock()\n\tfor k := range s.m {\n\t\tslice = append(slice, k.(string))\n\t}\n\ts.mutex.Unlock()\n\n\treturn slice\n}",
"func (s StringSet) ToSlice() []string {\n\tret := make([]string, len(s))\n\tidx := 0\n\tfor v := range s {\n\t\tret[idx] = v\n\t\tidx++\n\t}\n\tsort.Strings(ret)\n\treturn ret\n}",
"func (s StringSet) Slice() []string {\n\tt := make([]string, 0, len(s))\n\tfor k := range s {\n\t\tt = append(t, k)\n\t}\n\treturn t\n}",
"func SetToSlice(set map[string]struct{}) []string {\n\tdata := make([]string, 0, len(set))\n\tfor key := range set {\n\t\tdata = append(data, key)\n\t}\n\treturn data\n}",
"func ToStringSlice(set mapset.Set) []string {\n\tif set == nil {\n\t\treturn nil\n\t}\n\tslice := set.ToSlice()\n\tresult := make([]string, len(slice))\n\tfor i, item := range slice {\n\t\tresult[i] = item.(string)\n\t}\n\treturn result\n}",
"func (s StringSet) ToSlice() []string {\n\tslice := []string{}\n\tfor value := range s {\n\t\tslice = append(slice, value)\n\t}\n\treturn slice\n}",
"func NewStringSetFromSlice(start []string) StringSet {\n\tret := make(StringSet)\n\tfor _, s := range start {\n\t\tret.Add(s)\n\t}\n\treturn ret\n}",
"func StringSliceToSet(slice []string) String {\n\tset := make(String, len(slice))\n\tfor _, s := range slice {\n\t\tset.Add(s)\n\t}\n\treturn set\n}",
"func (queryParametersBag) uniqueStringsSlice(in []string) []string {\n\tkeys := make(map[string]bool)\n\tout := make([]string, 0)\n\n\tfor _, entry := range in {\n\t\tif _, ok := keys[entry]; !ok {\n\t\t\tkeys[entry] = true\n\t\t\tout = append(out, entry)\n\t\t}\n\t}\n\n\treturn out\n}",
"func (s StringSet) AsSlice() []string {\n\tresult := make([]string, len(s), len(s))\n\ti := 0\n\tfor v := range s {\n\t\tresult[i] = v\n\t\ti++\n\t}\n\treturn result\n}",
"func StringSet(s sets.String) zapcore.ObjectMarshalerFunc {\n\treturn func(enc zapcore.ObjectEncoder) error {\n\t\tenc.AddString(\"keys\", strings.Join(s.UnsortedList(), \",\"))\n\t\treturn nil\n\t}\n}",
"func copyAndSortStringSlice(s []string) []string {\n\tsc := make([]string, 0, len(s))\n\tsc = append(sc, s...)\n\n\tsort.Strings(sc)\n\treturn sc\n}",
"func UniqueStringSlice(ins ...[]string) []string {\n\tkeys := make(map[string]bool)\n\tlist := []string{}\n\tfor _, in := range ins {\n\t\tfor _, k := range in {\n\t\t\tif _, value := keys[k]; !value {\n\t\t\t\tkeys[k] = true\n\t\t\t\tlist = append(list, k)\n\t\t\t}\n\t\t}\n\t}\n\treturn list\n}",
"func (s Set) Slice() []string {\n\tvar i uint64\n\n\tk := make([]string, len(s))\n\n\tfor key := range s {\n\t\tk[i] = key\n\t\ti++\n\t}\n\n\treturn k\n}",
"func (ss Set) Slice() []string {\n\tslc := make([]string, 0, len(ss))\n\tfor k := range ss {\n\t\tslc = append(slc, k)\n\t}\n\n\treturn slc\n}",
"func Strings(data []string) []string {\n\tsort.Strings(data)\n\tn := Uniq(sort.StringSlice(data))\n\treturn data[:n]\n}",
"func StringSet() *StringSetFilter {\r\n\tf := new(StringSetFilter)\r\n\tf.strcase = STRING_RAWCASE\r\n\tf.delimiter = \",\"\r\n\tf.minCount = 0\r\n\tf.maxCount = types.MaxInt\r\n\treturn f\r\n}",
"func (s Set) Slice() []string {\n\ttoReturn := make([]string, s.Len())\n\ti := 0\n\tfor st := range s {\n\t\ttoReturn[i] = st\n\t\ti++\n\t}\n\treturn toReturn\n}",
"func StringSliceSubset(a []string, b []string) error {\n\taset := make(map[string]bool)\n\tfor _, v := range a {\n\t\taset[v] = true\n\t}\n\n\tfor _, v := range b {\n\t\t_, ok := aset[v]\n\t\tif !ok {\n\t\t\treturn trace.BadParameter(\"%v not in set\", v)\n\t\t}\n\n\t}\n\treturn nil\n}",
"func (tickerSet TickerSet) ToSlice() []string {\n\ttickerSlice := make([]string, 0)\n\tfor ticker, _ := range tickerSet {\n\t\ttickerSlice = append(tickerSlice, ticker)\n\t}\n\treturn tickerSlice\n}",
"func (c *Context) StringSet(strings ...string) *AST {\n\tset := &AST{\n\t\trawCtx: c.raw,\n\t\trawAST: C.Z3_mk_empty_set(\n\t\t\tc.raw,\n\t\t\tc.StringSort().rawSort,\n\t\t),\n\t}\n\tfor _, content := range strings {\n\t\tC.Z3_mk_set_add(\n\t\t\tc.raw,\n\t\t\tset.rawAST,\n\t\t\tc.Str(content).rawAST,\n\t\t)\n\t}\n\treturn set\n}",
"func NewStringSet() *Set {\n\treturn NewCustomSet(func(l, r interface{}) bool {\n\t\treturn l.(string) < r.(string)\n\t})\n}",
"func (p StringSlice) Sort() { Sort(p) }",
"func (cl *CommandLineInterface) StringSliceFlagOnFlagSet(flagSet *pflag.FlagSet, name string, shorthand *string, defaultValue []string, description string) {\n\tif defaultValue == nil {\n\t\tcl.nilDefaults[name] = true\n\t\tdefaultValue = []string{}\n\t}\n\tif shorthand != nil {\n\t\tcl.Flags[name] = flagSet.StringSliceP(name, string(*shorthand), defaultValue, description)\n\t\treturn\n\t}\n\tcl.Flags[name] = flagSet.StringSlice(name, defaultValue, description)\n}",
"func (m refCountedUrlSet) getAsStringSlice() []string {\n\ta := make([]string, 0, len(m))\n\tfor u := range m {\n\t\ta = append(a, u)\n\t}\n\treturn a\n}",
"func (s *Set) Strings() []string {\n\titems := make([]string, 0, s.Size())\n\tfor k := range s.m {\n\t\titems = append(items, k)\n\t}\n\treturn items\n}",
"func Strings(a []string) { Sort(StringSlice(a)) }",
"func NewStringSet(ss []string) StringSet {\n\tres := StringSet{}\n\tfor _, s := range ss {\n\t\tres[s] = struct{}{}\n\t}\n\treturn res\n}",
"func StringSlicesUnion(one, two []string) []string {\n\tvar union []string\n\tunion = append(union, one...)\n\tunion = append(union, two...)\n\treturn OnlyUnique(union)\n}",
"func (f *FlagSet) StringSlice(name string) []string {\n\ts := f.Lookup(name)\n\tif s != nil {\n\t\treturn (s.Value.(*stringSlice)).Value()\n\t}\n\treturn nil\n}",
"func filterSet(autocompletions sets.String, sub string, ignoreCase bool, inclusionFunc func(string, string) bool) sets.String {\n\tif sub == \"\" {\n\t\treturn autocompletions\n\t}\n\tif ignoreCase {\n\t\tsub = strings.ToLower(sub)\n\t}\n\tret := sets.NewString()\n\tfor _, item := range autocompletions.List() {\n\t\tif ignoreCase {\n\t\t\titem = strings.ToLower(item)\n\t\t}\n\t\tif inclusionFunc(item, sub) {\n\t\t\tret.Insert(item)\n\t\t}\n\t}\n\treturn ret\n}",
"func expandSetToStrings(strings []interface{}) []string {\n\texpandedStrings := make([]string, len(strings))\n\tfor i, v := range strings {\n\t\texpandedStrings[i] = v.(string)\n\t}\n\n\treturn expandedStrings\n}",
"func NewStringSet() StringSet {\n\treturn make(StringSet)\n}",
"func (s String) Sorted() []string {\n\tslice := s.ToSlice()\n\tsort.Strings(slice)\n\treturn slice\n}",
"func (s StringSlice) StringSlice() []string {\n\tss := []string{}\n\tfor _, tmp := range s {\n\t\tif len(tmp) <= 2 {\n\t\t\tcontinue\n\t\t}\n\t\tss = append(ss, tmp)\n\t}\n\treturn ss\n}",
"func (this *HandlerBase) getStringSlice(s string) []string {\n\ta := this.get(s)\n\tif m, ok := a.([]interface{}); ok {\n\t\tsl := make([]string, len(m))\n\t\tfor i, v := range m {\n\t\t\tif s, ok := v.(string); ok {\n\t\t\t\tsl[i] = string(s)\n\t\t\t}\n\t\t}\n\t\treturn sl\n\t}\n\treturn nil\n}",
"func (s *Set) Slice() []string {\n\tn := len(s.m)\n\tif n == 0 {\n\t\treturn nil\n\t}\n\tarr := make([]string, 0, n)\n\tfor val := range s.m {\n\t\tarr = append(arr, val)\n\t}\n\treturn arr\n}",
"func NewFromSlice(sl []string) Set {\n\ttoReturn := New()\n\tfor _, s := range sl {\n\t\ttoReturn.Add(s)\n\t}\n\treturn toReturn\n}",
"func toList(set map[string]struct{}) []string {\n\tlist := make([]string, 0, len(set))\n\tfor item, _ := range set {\n\t\tlist = append(list, item)\n\t}\n\tsort.Strings(list)\n\treturn list\n}",
"func (v *InvoiceCollection) stringSlice(vsizes, dsizes []int) (\n\tvdata [][]string, rvsizes []int,\n\tddata [][][]string, rdsizes []int) {\n\trvsizes, rdsizes = vsizes, dsizes\n\tvdata = make([][]string, len(*v))\n\tddata = make([][][]string, len(*v))\n\tfor i, p := range *v {\n\t\tvdata[i] = p.stringSlice(i + 1)\n\t\tfor k, f := range vdata[i] {\n\t\t\t_, _, n := util.CountChars(f)\n\t\t\trvsizes[k] = util.Imax(rvsizes[k], n)\n\t\t}\n\n\t\tddata[i] = make([][]string, len(p.Details))\n\t\tfor j, d := range p.Details {\n\t\t\tddata[i][j] = d.stringSlice(j + 1)\n\t\t\tfor k, f := range ddata[i][j] {\n\t\t\t\t_, _, n := util.CountChars(f)\n\t\t\t\trdsizes[k] = util.Imax(rdsizes[k], n)\n\t\t\t}\n\t\t}\n\t}\n\treturn vdata, rvsizes, ddata, rdsizes\n}",
"func (set KeySet) ToSlice() []Key {\n\tkeys := []Key{}\n\n\tfor key := range set {\n\t\tkeys = append(keys, key)\n\t}\n\n\treturn keys\n}",
"func FirstUniqueStrings(list []string) []string {\n\tk := 0\nouter:\n\tfor i := 0; i < len(list); i++ {\n\t\tfor j := 0; j < k; j++ {\n\t\t\tif list[i] == list[j] {\n\t\t\t\tcontinue outer\n\t\t\t}\n\t\t}\n\t\tlist[k] = list[i]\n\t\tk++\n\t}\n\treturn list[:k]\n}",
"func MergeStringSlices(a []string, b []string) []string {\n\tset := sets.NewString(a...)\n\tset.Insert(b...)\n\treturn set.UnsortedList()\n}",
"func TopicSetFromSlice(s []string) TopicSet {\n\tvar ts = TopicSet{}\n\tfor _, t := range s {\n\t\tts[t] = nil\n\t}\n\treturn ts\n}",
"func (set UInt64Set) Slice(sorted bool) UInt64Slice {\n\tslice := NewUInt64Slice(0, len(set))\n\tfor x := range set {\n\t\tslice = append(slice, x)\n\t}\n\tif sorted {\n\t\tslice.Sort()\n\t}\n\treturn slice\n}",
"func (s StringSet) GetAll() []string {\n\tres := []string{}\n\tfor item := range s {\n\t\tres = append(res, item)\n\t}\n\treturn res\n}",
"func RemoveDuplicatedStrings(slice []string) []string {\n\tresult := []string{}\n\n\tcheck := make(map[string]bool)\n\tfor _, element := range slice {\n\t\tcheck[element] = true\n\t}\n\n\tfor key := range check {\n\t\tresult = append(result, key)\n\t}\n\n\treturn result\n}",
"func NewStringSlice(n ...string) *Slice { return NewSlice(sort.StringSlice(n)) }",
"func NewStringSet() StringSet {\n\treturn &stringSetImpl{\n\t\tentries: make(map[string]bool),\n\t}\n}",
"func StringSliceExtractUnique(strSlice []string) (result []string) {\n\tif strSlice == nil {\n\t\treturn []string{}\n\t} else if len(strSlice) <= 1 {\n\t\treturn strSlice\n\t} else {\n\t\tfor _, v := range strSlice {\n\t\t\tif !StringSliceContains(&result, v) {\n\t\t\t\tresult = append(result, v)\n\t\t\t}\n\t\t}\n\n\t\treturn result\n\t}\n}",
"func getSetValues(s set) []string {\n\tvar retVal []string\n\tfor k := range s {\n\t\tretVal = append(retVal, k)\n\t}\n\treturn retVal\n}",
"func (set *SetString) Slice() SliceString {\n\tset.lock.Lock()\n\tkeys := make(SliceString, len(set.cache))\n\ti := 0\n\tfor k := range set.cache {\n\t\tkeys[i] = k\n\t}\n\tset.lock.Unlock()\n\treturn keys\n}",
"func (s S) SetSlice(key, value string, before, after int) (slice []string, err error) {\n\tvar vv SortedString\n\terr = s.ReadModify(key, &vv, func(_ interface{}) (r bool) {\n\t\tslice = vv.Slice(value, before, after)\n\t\treturn\n\t})\n\treturn\n}",
"func getstrings(fullpath string) mapset.Set {\n\tfileStrings, _ := exec.Command(\"strings\", fullpath).CombinedOutput()\n\tstrArray := strings.Split(string(fileStrings), \"\\n\")\n\tstringSet := mapset.NewSet()\n\n\tfor _, s := range strArray {\n\t\tstringSet.Add(s)\n\t}\n\n\treturn stringSet\n}",
"func NewStringSet(values ...string) StringSet {\n\ts := make(StringSet, len(values))\n\tfor _, v := range values {\n\t\ts.Add(v)\n\t}\n\treturn s\n}",
"func (s *Set) SortedSlice() []uint32 {\n\tss := s.Slice()\n\tsort.Slice(ss, func(i, j int) bool {\n\t\treturn ss[i] < ss[j]\n\t})\n\treturn ss\n}",
"func StringMapToSlice(in map[string]string) []string {\n\tret := []string{}\n\n\tfor _, val := range in {\n\t\tret = append(ret, val)\n\t}\n\n\tsort.Strings(ret)\n\n\treturn ret\n\n}",
"func StringSlice(src []*string) []string {\n\tdst := make([]string, len(src))\n\tfor i := 0; i < len(src); i++ {\n\t\tif src[i] != nil {\n\t\t\tdst[i] = *(src[i])\n\t\t}\n\t}\n\treturn dst\n}",
"func Strings(s []string) int {\n\treturn Sort(sort.StringSlice(s))\n}",
"func NewStringSet() *StringSet {\n\treturn &StringSet{\n\t\tmembers: make(map[string]struct{}),\n\t}\n}",
"func SortedList(set map[string]bool) []string {\n\tvar ret []string\n\tfor s := range set {\n\t\tret = append(ret, s)\n\t}\n\tsort.Strings(ret)\n\treturn ret\n}",
"func (s StringSet) Keys() []string {\n\tret := make([]string, 0, len(s))\n\tfor v := range s {\n\t\tret = append(ret, v)\n\t}\n\treturn ret\n}",
"func NewStringSet(initItems ...string) StringSet {\n\tnewSet := StringSet{}\n\tfor _, item := range initItems {\n\t\tnewSet.Add(item)\n\t}\n\treturn newSet\n}",
"func expandStringSet(configured *schema.Set) []*string {\n\treturn expandStringList(configured.List())\n}",
"func (s stringSet) list() []string {\n\tl := make([]string, 0, len(s))\n\tfor k := range s {\n\t\tl = append(l, k)\n\t}\n\tsort.Strings(l)\n\treturn l\n}",
"func stringSetDifference(aa, bb []string) []string {\n\trr := []string{}\n\nLoopStrings:\n\tfor _, a := range aa {\n\t\tfor _, b := range bb {\n\t\t\tif reflect.DeepEqual(a, b) {\n\t\t\t\tcontinue LoopStrings\n\t\t\t}\n\t\t}\n\n\t\trr = append(rr, a)\n\t}\n\n\treturn rr\n}",
"func NewStringSet(elems ...string) StringSet {\n\tres := StringSet{Set: make(map[string]bool)}\n\tfor _, elem := range elems {\n\t\tres.Set[elem] = true\n\t}\n\treturn res\n}",
"func GetSortedKeySlice(m map[string]string) []string {\n\tkeys := make([]string, 0)\n\tfor key := range m {\n\t\tkeys = append(keys, key)\n\t}\n\tsort.Sort(sort.StringSlice(keys))\n\treturn keys\n}",
"func OfStrings(strings ...string) (s StringSet) {\n\treturn Strings(strings)\n}",
"func NewStringSet() *StringSet {\n\treturn &StringSet{make(map[string]bool), make([]string, 0), false}\n}",
"func RemoveFromSlice(slice []string, values ...string) []string {\n\toutput := make([]string, 0, len(slice))\n\n\tremove := make(map[string]bool)\n\tfor _, value := range values {\n\t\tremove[value] = true\n\t}\n\n\tfor _, s := range slice {\n\t\t_, ok := remove[s]\n\t\tif ok {\n\t\t\tcontinue\n\t\t}\n\t\toutput = append(output, s)\n\t}\n\n\treturn output\n}",
"func SortedUniqueStrings(list []string) []string {\n\tunique := FirstUniqueStrings(list)\n\tsort.Strings(unique)\n\treturn unique\n}",
"func (c StringArrayCollection) Slice(keys ...int) Collection {\n\tvar d = make([]string, len(c.value))\n\tcopy(d, c.value)\n\tif len(keys) == 1 {\n\t\treturn StringArrayCollection{\n\t\t\tvalue: d[keys[0]:],\n\t\t}\n\t} else {\n\t\treturn StringArrayCollection{\n\t\t\tvalue: d[keys[0] : keys[0]+keys[1]],\n\t\t}\n\t}\n}",
"func StringSliceMap(ss []string, fn func(string) string) []string {\n\tss2 := make([]string, len(ss))\n\tfor i, v := range ss {\n\t\tss2[i] = fn(v)\n\t}\n\n\treturn ss2\n}",
"func (ss *StringSet) ElementsSorted() []string {\n\telements := ss.Elements()\n\tsort.Slice(elements, func(i, j int) bool { return elements[i] < elements[j] })\n\treturn elements\n}",
"func (opts *Opts) StringSlice() (sl []string) {\n\topts.updateOptions()\n\tfor option, ok := range opts.options {\n\t\tif ok {\n\t\t\tsl = append(sl, option)\n\t\t}\n\t}\n\treturn\n}",
"func (set *StringMap) SetToArray() []string {\n\tset.RWlock.RLock()\n\tdefer set.RWlock.RUnlock()\n\tkeys := make([]string, 0)\n\tfor k := range set.set {\n\t\tkeys = append(keys, k)\n\t}\n\treturn keys\n}",
"func (this *DynMap) GetStringSlice(key string) ([]string, bool) {\n\tlst, ok := this.Get(key)\n\tif !ok {\n\t\treturn nil, false\n\t}\n\tswitch v := lst.(type) {\n\tcase []string:\n\t\treturn v, true\n\tcase []interface{}:\n\t\tretlist := make([]string, 0)\n\t\tfor _, tmp := range v {\n\t\t\tin := ToString(tmp)\n\t\t\tretlist = append(retlist, in)\n\t\t}\n\t\treturn retlist, true\n\t}\n\treturn nil, false\n}",
"func stringSliceIntersect(s, t []string) []string {\n\tvar res []string\n\tm := make(map[string]bool, len(s))\n\tfor _, x := range s {\n\t\tm[x] = true\n\t}\n\tfor _, y := range t {\n\t\tif m[y] {\n\t\t\tres = append(res, y)\n\t\t}\n\t}\n\treturn res\n}",
"func Union(sets ...StringSet) StringSet {\n\ts := make(StringSet)\n\tfor _, t := range sets {\n\t\tfor k := range t {\n\t\t\ts.Add(k)\n\t\t}\n\t}\n\treturn s\n}",
"func diffStrings(src ...[]string) []string {\n\tfirst := make(map[string]struct{})\n\tunique := make(map[string]struct{})\n\n\tfor i, srci := range src {\n\t\tfor _, v := range srci {\n\t\t\tif i == 0 {\n\t\t\t\tfirst[v] = struct{}{}\n\t\t\t} else {\n\t\t\t\tif _, ok := first[v]; !ok {\n\t\t\t\t\tunique[v] = struct{}{}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tkeys := make([]string, len(unique))\n\ti := 0\n\n\tfor k := range unique {\n\t\tkeys[i] = k\n\t\ti++\n\t}\n\n\treturn keys\n}",
"func uniqueStrings(s []string) []string {\n\tunique := make(map[string]bool, len(s))\n\tus := make([]string, len(unique))\n\tfor _, elem := range s {\n\t\tif len(elem) != 0 {\n\t\t\tif !unique[elem] {\n\t\t\t\tus = append(us, elem)\n\t\t\t\tunique[elem] = true\n\t\t\t}\n\t\t}\n\t}\n\treturn us\n}",
"func removeDuplicates(stringSlices ...[]string) []string {\n\tuniqueMap := map[string]bool{}\n\n\tfor _, stringSlice := range stringSlices {\n\t\tfor _, str := range stringSlice {\n\t\t\tuniqueMap[str] = true\n\t\t}\n\t}\n\n\t// Create a slice with the capacity of unique items\n\t// This capacity make appending flow much more efficient\n\tresult := make([]string, 0, len(uniqueMap))\n\n\tfor key := range uniqueMap {\n\t\tresult = append(result, key)\n\t}\n\n\treturn result\n}",
"func StringSlice(s []string, err error) []string {\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\n\treturn s\n}",
"func NewStringSet(lists ...[]string) StringSet {\n\tret := make(map[string]bool)\n\tfor _, list := range lists {\n\t\tfor _, entry := range list {\n\t\t\tret[entry] = true\n\t\t}\n\t}\n\treturn ret\n}",
"func Strings(s []string, caseInsensitive bool) {\n\tif caseInsensitive {\n\t\tsort.Sort(stringSlice(s))\n\t} else {\n\t\tsort.Strings(s)\n\t}\n}",
"func Strings(values []string) StringSet {\n\ts := make(StringSet)\n\tfor _, str := range values {\n\t\ts[str] = struct{}{}\n\t}\n\treturn s\n}",
"func DedupeSortSlice(s []string, modifier func(string) string) []string {\n\to := DedupeSlice(s, modifier)\n\tsort.Strings(o)\n\treturn o\n}",
"func ShardStrings(s []string, shardSize int) [][]string {\n\tif len(s) == 0 {\n\t\treturn nil\n\t}\n\tret := make([][]string, 0, (len(s)+shardSize-1)/shardSize)\n\tfor len(s) > shardSize {\n\t\tret = append(ret, s[0:shardSize])\n\t\ts = s[shardSize:]\n\t}\n\tif len(s) > 0 {\n\t\tret = append(ret, s)\n\t}\n\treturn ret\n}",
"func sortSliceString(dataSlice sliceStringInterface, keyOrder, sortDirection string) sliceStringInterface {\n\tindex := make(PairList, len(dataSlice))\n\ti := 0\n\tfor k, v := range dataSlice {\n\t\tindex[i] = Pair{v[keyOrder].(string), k}\n\t\ti++\n\t}\n\tsort.Sort(index)\n\n\torderedDataSlice := make(sliceStringInterface, len(dataSlice))\n\tif sortDirection == \"asc\" {\n\t\tfor k, v := range index {\n\t\t\torderedDataSlice[k] = dataSlice[v.Value]\n\t\t}\n\t} else {\n\t\tfor k, v := range index {\n\t\t\torderedDataSlice[len(dataSlice)-k-1] = dataSlice[v.Value]\n\t\t}\n\t}\n\treturn orderedDataSlice\n}",
"func ExampleIntSet_Slice() {\n\ts1 := gset.NewIntSet()\n\ts1.Add([]int{1, 2, 3, 4}...)\n\tfmt.Println(s1.Slice())\n\n\t// May Output:\n\t// [1, 2, 3, 4]\n}",
"func (s String) ToSlice() []string {\n\tres := make([]string, 0)\n\tfor k := range s {\n\t\tres = append(res, k)\n\t}\n\treturn res\n}",
"func removeStringFromSlice(str string, slice []string) []string {\n\tfor i, v := range slice {\n\t\tif v == str {\n\t\t\t//append the subslice of all elements after this one, to the sublice of all elements before this one\n\t\t\treturn append(slice[:i], slice[i+1:]...)\n\t\t}\n\t}\n\n\t//if the string was not present, just return the slice back\n\treturn slice\n}",
"func UniqueStrings(s []string) []string {\n\tkeys := make(map[string]bool, len(s))\n\tlist := []string{}\n\n\tfor _, entry := range s {\n\t\tif _, value := keys[entry]; !value {\n\t\t\tkeys[entry] = true\n\t\t\tlist = append(list, entry)\n\t\t}\n\t}\n\n\treturn list\n}",
"func DecodeStringSet(blob []byte) (*StringSet, error) {\n\tss := NewStringSet()\n\tbuf := bytes.NewBuffer(blob)\n\tdec := gob.NewDecoder(buf)\n\terr := dec.Decode(&ss.members)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ss, nil\n}",
"func SelectPrefixInStringSlice(prefix string, items []string) []string {\n\n\tl := len(prefix)\n\n\tvar results []string\n\n\t// iterate through the slice of items\n\tfor _, item := range items {\n\n\t\t// check the item length is geater than or equal to the prefix length\n\t\t// this ensures no out of bounds memory errors will occur\n\t\tif len(item) >= l {\n\t\t\tif prefix == item[:l] {\n\t\t\t\tresults = append(results, item)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn results\n}",
"func IntersectionSlice(one []string, two []string) []string {\n\tm1 := make(map[string]struct{})\n\tfor _, e := range one {\n\t\tif e == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tm1[e] = struct{}{}\n\t}\n\tm2 := make(map[string]struct{})\n\tfor _, e := range two {\n\t\tif e == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tm2[e] = struct{}{}\n\t}\n\tfor key := range m1 {\n\t\tif _, ok := m2[key]; !ok {\n\t\t\tdelete(m1, key)\n\t\t}\n\t}\n\ts := make([]string, 0, len(m1))\n\tfor key := range m1 {\n\t\ts = append(s, key)\n\t}\n\tsort.Strings(s)\n\treturn s\n}",
"func OnlyUnique(slice []string) []string {\n\tuniqMap := make(map[string]struct{})\n\tfor _, v := range slice {\n\t\tuniqMap[v] = struct{}{}\n\t}\n\n\tuniqSlice := make([]string, 0, len(uniqMap))\n\n\tkeys := make([]string, 0, len(uniqMap))\n\tfor k := range uniqMap {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\tuniqSlice = append(uniqSlice, keys...)\n\n\treturn uniqSlice\n}"
] | [
"0.7395667",
"0.70148724",
"0.6999877",
"0.69945043",
"0.69452065",
"0.6828592",
"0.6691718",
"0.669042",
"0.6537828",
"0.64243567",
"0.641947",
"0.63832146",
"0.6322088",
"0.62684745",
"0.62402517",
"0.62099415",
"0.6133305",
"0.60791206",
"0.6033735",
"0.6019684",
"0.60185736",
"0.6000332",
"0.5990574",
"0.5971549",
"0.5957457",
"0.5927781",
"0.5909564",
"0.5882885",
"0.58540154",
"0.5821667",
"0.58215857",
"0.57886046",
"0.5785382",
"0.5780324",
"0.5772196",
"0.5761199",
"0.57473755",
"0.571585",
"0.5706161",
"0.56939733",
"0.5679153",
"0.5668431",
"0.56633186",
"0.56620383",
"0.5657056",
"0.5646248",
"0.56129694",
"0.56099904",
"0.5598388",
"0.55948895",
"0.5593206",
"0.558793",
"0.55861187",
"0.55823",
"0.5577969",
"0.5571112",
"0.55604225",
"0.55514336",
"0.55507195",
"0.5532341",
"0.5526158",
"0.5518267",
"0.5512108",
"0.5504869",
"0.5491781",
"0.54910356",
"0.54904777",
"0.5470335",
"0.5466717",
"0.5466551",
"0.54528874",
"0.545049",
"0.5433288",
"0.5423347",
"0.54194474",
"0.5417994",
"0.5414889",
"0.5401876",
"0.5395807",
"0.5393886",
"0.53859645",
"0.5382767",
"0.5381976",
"0.537445",
"0.5373625",
"0.53725606",
"0.5371492",
"0.5369686",
"0.535884",
"0.5351194",
"0.53490186",
"0.53469795",
"0.5341149",
"0.5338002",
"0.5318319",
"0.53142923",
"0.5313247",
"0.53093255",
"0.53051925",
"0.5303803"
] | 0.81835073 | 0 |
ParseOnOff parses whether value is "on" or "off", parameterName is passed for error reporting purposes, defaultValue is returned when no value is set | func ParseOnOff(parameterName, val string, defaultValue bool) (bool, error) {
switch val {
case teleport.On:
return true, nil
case teleport.Off:
return false, nil
case "":
return defaultValue, nil
default:
return false, trace.BadParameter("bad %q parameter value: %q, supported values are on or off", parameterName, val)
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (dps *domainParser) di2OnOff() {\n\tdps.defaultValue = dps.onOffDefaultValue\n\tdps.customParseID = dps.onOffCustomParseID\n\tdps.checkEndedCorrect = dps.onOffCheckEndedCorrect\n\tdps.appendQP = dps.onOffAppendQp\n}",
"func ParseBool(str string) (bool, error) {\n\tif str == \"on\" {\n\t\treturn true, nil\n\t}\n\tif str == \"off\" {\n\t\treturn false, nil\n\t}\n\treturn strconv.ParseBool(str)\n}",
"func (f flagBool) Parse(value string) interface{} {\n\tswitch value {\n\tcase \"1\", \"t\", \"T\", \"true\", \"TRUE\", \"True\", \"y\", \"Y\", \"yes\", \"YES\", \"Yes\":\n\t\treturn true\n\t}\n\treturn false\n}",
"func (f flagString) Parse(value string) interface{} {\n\treturn value\n}",
"func ParseBoolP(cmd *cobra.Command, name string) (*bool, error) {\n\tflagRaw, err := cmd.Flags().GetString(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar flagVal *bool\n\tss, err := strconv.ParseBool(flagRaw)\n\tif err != nil && flagRaw != \"\" {\n\t\treturn nil, err\n\t}\n\n\tif flagRaw != \"\" && err == nil {\n\t\treturn &ss, nil\n\t}\n\n\treturn flagVal, nil\n}",
"func (f *Form) Bool(param string, defaultValue bool) bool {\n\tvals, ok := f.values[param]\n\tif !ok {\n\t\treturn defaultValue\n\t}\n\tparamVal, err := strconv.ParseBool(vals[0])\n\tif err != nil {\n\t\tf.err = err\n\t\treturn defaultValue\n\t}\n\treturn paramVal\n}",
"func NamedBoolDefault(name string, def bool) func(http.ResponseWriter, url.Values, martini.Context) {\n\treturn func(w http.ResponseWriter, query url.Values, m martini.Context) {\n\t\tvalue_string := query.Get(name)\n\t\tvalue, err := strconv.ParseBool(value_string)\n\n\t\tif \"\" == value_string {\n\t\t\tm.Map(NamedBoolParameter(def))\n\t\t\treturn\n\t\t}\n\n\t\tif nil != err {\n\t\t\thttp.Error(w, fmt.Sprintf(\"\\\"%s\\\" is not a boolean\"), 422)\n\t\t}\n\n\t\tm.Map(NamedBoolParameter(value))\n\t}\n}",
"func ParseBool(operand string) (value bool, err error) { return strconv.ParseBool(operand) }",
"func (dps *domainParser) onOffDefaultValue() (tmpIDs []string, queryPieceIDs map[string]bool) {\n\ttmpIDs = []string{onoff_default_id}\n\tqueryPieceIDs = map[string]bool{onoff_default_id: true}\n\treturn\n}",
"func ParseBool(str string) (bool, error) {}",
"func ToBool(v interface{}, def bool) bool {\r\n\tif b, ok := v.(bool); ok {\r\n\t\treturn b\r\n\t}\r\n\tif i, ok := v.(int); ok {\r\n\t\treturn i > 0\r\n\t}\r\n\tif i, ok := v.(float64); ok {\r\n\t\treturn i > 0\r\n\t}\r\n\tif i, ok := v.(float32); ok {\r\n\t\treturn i > 0\r\n\t}\r\n\tif ss, ok := v.([]string); ok {\r\n\t\tv = ss[0]\r\n\t}\r\n\tif s, ok := v.(string); ok {\r\n\t\tif s == \"on\" {\r\n\t\t\treturn true\r\n\t\t}\r\n\t\tif s == \"off\" || s == \"\" {\r\n\t\t\treturn false\r\n\t\t}\r\n\t\tif b, err := strconv.ParseBool(s); err == nil {\r\n\t\t\treturn b\r\n\t\t}\r\n\t}\r\n\r\n\treturn def\r\n\r\n}",
"func parseBool(str string) (value bool, err error) {\n\tswitch str {\n\tcase \"1\", \"t\", \"T\", \"true\", \"TRUE\", \"True\", \"YES\", \"yes\", \"Yes\", \"ON\", \"on\", \"On\":\n\t\treturn true, nil\n\tcase \"0\", \"f\", \"F\", \"false\", \"FALSE\", \"False\", \"NO\", \"no\", \"No\", \"OFF\", \"off\", \"Off\":\n\t\treturn false, nil\n\t}\n\treturn false, fmt.Errorf(\"parsing \\\"%s\\\": invalid syntax\", str)\n}",
"func ParseBool(q url.Values, name string) (bool, bool, error) {\n\tstringVal := q.Get(name)\n\tif stringVal == \"\" {\n\t\treturn false, false, nil\n\t}\n\n\tval, err := strconv.ParseBool(stringVal)\n\tif err != nil {\n\t\treturn false, false, trace.BadParameter(\n\t\t\t\"'%v': expected 'true' or 'false', got %v\", name, stringVal)\n\t}\n\treturn val, true, nil\n}",
"func parseBoolean(s *scanner) (bool, error) {\n\tif s.eof() || s.data[s.off] != '?' {\n\t\treturn false, &UnmarshalError{s.off, ErrInvalidBooleanFormat}\n\t}\n\ts.off++\n\n\tif s.eof() {\n\t\treturn false, &UnmarshalError{s.off, ErrInvalidBooleanFormat}\n\t}\n\n\tswitch s.data[s.off] {\n\tcase '0':\n\t\ts.off++\n\n\t\treturn false, nil\n\tcase '1':\n\t\ts.off++\n\n\t\treturn true, nil\n\t}\n\n\treturn false, &UnmarshalError{s.off, ErrInvalidBooleanFormat}\n}",
"func AutotypeValue(input interface{}) interface{} {\n\tif strValue, ok := input.(string); ok {\n\t\tif intVal, err := strconv.ParseInt(strValue, 10, 64); err == nil {\n\t\t\treturn intVal\n\t\t} else if floatVal, err := strconv.ParseFloat(strValue, 64); err == nil {\n\t\t\treturn floatVal\n\t\t} else if strValue == \"true\" {\n\t\t\treturn true\n\t\t} else if strValue == \"false\" {\n\t\t\treturn false\n\t\t} else if strValue == \"null\" {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn input\n}",
"func ParseBool(str string) (val bool, isBool bool) {\n\t// Note: Not using strconv.ParseBool because I want it a bit looser (any casing) and to allow yes/no/off/on values.\n\tlstr := strings.ToLower(strings.TrimSpace(str))\n\tswitch lstr {\n\tcase \"false\", \"f\", \"0\", \"no\", \"n\", \"off\":\n\t\tisBool = true\n\tcase \"true\", \"t\", \"1\", \"yes\", \"y\", \"on\":\n\t\tval = true\n\t\tisBool = true\n\t}\n\treturn\n}",
"func OptionalURLParamBool(request *http.Request, name string) (null.Bool, IResponse) {\n\tvalueStr := chi.URLParam(request, name)\n\tif valueStr == \"\" {\n\t\treturn null.Bool{}, nil\n\t}\n\n\tvalue, err := strconv.ParseBool(valueStr)\n\tif err != nil {\n\t\treturn null.Bool{}, BadRequest(request, \"Invalid url param %s (value: '%s'): %s\", name, valueStr, err)\n\t}\n\n\treturn null.BoolFrom(value), nil\n}",
"func ParseFlagBool(args []string) (bool, int, error) {\n\tif strings.ContainsAny(args[0], \"= \") {\n\t\tparts := strings.SplitN(args[0], \"=\", 2)\n\t\tif len(parts) == 1 {\n\t\t\tparts = strings.SplitN(args[0], \" \", 2)\n\t\t}\n\t\tif len(parts) == 2 {\n\t\t\tval, isBool := ParseBool(parts[1])\n\t\t\tif !isBool {\n\t\t\t\treturn false, 0, fmt.Errorf(\"invalid %s bool value: [%s]\", parts[0], parts[1])\n\t\t\t}\n\t\t\treturn val, 0, nil\n\t\t}\n\t\treturn false, 0, fmt.Errorf(\"unable to split flag and value from string: [%s]\", args[0])\n\t}\n\tif len(args) > 1 {\n\t\tval, isBool := ParseBool(args[1])\n\t\tif isBool {\n\t\t\treturn val, 1, nil\n\t\t}\n\t}\n\treturn true, 0, nil\n}",
"func ParseBool(val interface{}) (value bool, err error) {\n\tif val != nil {\n\t\tswitch v := val.(type) {\n\t\tcase bool:\n\t\t\treturn v, nil\n\t\tcase string:\n\t\t\tswitch v {\n\t\t\tcase \"1\", \"t\", \"T\", \"true\", \"TRUE\", \"True\", \"YES\", \"yes\", \"Yes\", \"Y\", \"y\", \"ON\", \"on\", \"On\":\n\t\t\t\treturn true, nil\n\t\t\tcase \"0\", \"f\", \"F\", \"false\", \"FALSE\", \"False\", \"NO\", \"no\", \"No\", \"N\", \"n\", \"OFF\", \"off\", \"Off\":\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\tcase int8, int32, int64:\n\t\t\tstrV := fmt.Sprintf(\"%s\", v)\n\t\t\tif strV == \"1\" {\n\t\t\t\treturn true, nil\n\t\t\t} else if strV == \"0\" {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\tcase float64:\n\t\t\tif v == 1 {\n\t\t\t\treturn true, nil\n\t\t\t} else if v == 0 {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\treturn false, fmt.Errorf(\"parsing %q: invalid syntax\", val)\n\t}\n\treturn false, fmt.Errorf(\"parsing <nil>: invalid syntax\")\n}",
"func URLParamBool(request *http.Request, name string) (bool, IResponse) {\n\tvalueStr := chi.URLParam(request, name)\n\tvalue, err := strconv.ParseBool(valueStr)\n\tif err != nil {\n\t\treturn false, BadRequest(request, \"Invalid url param %s (value: '%s'): %s\", name, valueStr, err)\n\t}\n\n\treturn value, nil\n}",
"func ParseBooleanDefaultFalse(s string) Boolean {\n\tif s == \"\" {\n\t\treturn NewBoolean(false)\n\t}\n\n\treturn NewBoolean(s == \"true\")\n}",
"func ValBool(k string, d bool, p map[string]string) (v bool, err error) {\n\n\tbStr, ok := p[k]\n\tif !ok {\n\t\tv = d\n\t\treturn\n\t}\n\n\ttErr := fmt.Errorf(\"invalid value for the parameter %s\", k)\n\tb, err := strconv.ParseBool(bStr)\n\tif err != nil {\n\t\terr = tErr\n\t\treturn\n\t}\n\n\tv = b\n\n\treturn\n}",
"func Bool(name string, defaultValue bool) bool {\n\tif strVal, ok := os.LookupEnv(name); ok {\n\t\tif res, err := strconv.ParseBool(strVal); err == nil {\n\t\t\treturn res\n\t\t}\n\t}\n\n\treturn defaultValue\n}",
"func ParseBooleanDefaultTrue(s string) Boolean {\n\tif s == \"\" {\n\t\treturn NewBoolean(true)\n\t}\n\n\treturn NewBoolean(s == \"true\")\n}",
"func onOffParseFile(path string) (*parseResult, error) {\n\tif pr, ok := hasOnOffParseDone[path]; ok {\n\t\treturn pr, nil\n\t}\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\treader := bufio.NewReader(f)\n\tdps := newDomainParser()\n\tdps.di2OnOff()\n\tpr, err := dps.parse(reader)\n\thasOnOffParseDone[path] = pr\n\treturn pr, err\n}",
"func boolHandler(set func(bool) error, get func() bool) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tvars := mux.Vars(r)\n\n\t\tval, err := strconv.ParseBool(vars[\"value\"])\n\t\tif err != nil {\n\t\t\tjsonError(w, http.StatusBadRequest, err)\n\t\t\treturn\n\t\t}\n\n\t\terr = set(val)\n\t\tif err != nil {\n\t\t\tjsonError(w, http.StatusNotAcceptable, err)\n\t\t\treturn\n\t\t}\n\n\t\tjsonResult(w, get())\n\t}\n}",
"func getBoolParamFromURL(r *http.Request, key string) (bool, error) {\n\tval, err := hchi.GetStringFromURL(r, key)\n\tif err != nil {\n\t\treturn false, errors.Wrapf(err, \"loading %s from URL\", key)\n\t}\n\n\tif val == \"true\" {\n\t\treturn true, nil\n\t}\n\tif val == \"false\" || val == \"\" {\n\t\treturn false, nil\n\t}\n\n\treturn false, problem.MakeInvalidFieldProblem(key, errors.New(\"invalid bool value\"))\n}",
"func Parse(config interface{}) { Define(config); flag.Parse() }",
"func (this *OptionBool) Parse(arg string) error {\n argint, err := strconv.Atoi(arg)\n if err != nil {\n return err\n }\n\n //this.opt_storage = argint!=0;\n var storage *bool\n storage = this.opt_storage.(*bool)\n\n *storage = argint != 0\n\n return nil\n}",
"func isFlagged(tagValue *string) bool {\n\tif tagValue == nil {\n\t\treturn false\n\t}\n\tb, err := strconv.ParseBool(*tagValue)\n\tif err == nil {\n\t\treturn b\n\t}\n\treturn false\n}",
"func Bool(v interface{}, defaults ...bool) (b bool) {\n\tswitch tv := v.(type) {\n\tcase nil:\n\t\tif 1 < len(defaults) {\n\t\t\tb = defaults[1]\n\t\t}\n\tcase bool:\n\t\tb = tv\n\tcase string:\n\t\tvar err error\n\t\tif 1 < len(defaults) {\n\t\t\tb = defaults[1]\n\t\t} else if b, err = strconv.ParseBool(tv); err != nil {\n\t\t\tif 0 < len(defaults) {\n\t\t\t\tb = defaults[0]\n\t\t\t}\n\t\t}\n\tcase gen.Bool:\n\t\tb = bool(tv)\n\tcase gen.String:\n\t\tvar err error\n\t\tif 1 < len(defaults) {\n\t\t\tb = defaults[1]\n\t\t} else if b, err = strconv.ParseBool(string(tv)); err != nil {\n\t\t\tif 0 < len(defaults) {\n\t\t\t\tb = defaults[0]\n\t\t\t}\n\t\t}\n\tdefault:\n\t\tif 0 < len(defaults) {\n\t\t\tb = defaults[0]\n\t\t}\n\t}\n\treturn\n}",
"func QueryBoolParam(r *http.Request, param string, defaultValue bool) bool {\n\tvalue := r.URL.Query().Get(param)\n\tif value == \"\" {\n\t\treturn defaultValue\n\t}\n\n\tval, err := strconv.ParseBool(value)\n\n\tif err != nil {\n\t\treturn defaultValue\n\t}\n\n\treturn val\n}",
"func parseBool(asString string) (bool, error) {\n\tswitch asString {\n\tcase \"true\":\n\t\treturn true, nil\n\tcase \"false\":\n\t\treturn false, nil\n\tdefault:\n\t\treturn false, fmt.Errorf(\"could not parse %q as a bool\", asString)\n\t}\n}",
"func (a *Args) IsOff(s string) bool {\n\treturn !a.IsOn(s)\n}",
"func Bool(v string) (bool, bool, error) {\n\tswitch os.Getenv(v) {\n\tcase \"true\":\n\t\treturn true, true, nil\n\tcase \"false\":\n\t\treturn false, true, nil\n\tcase \"\":\n\t\treturn false, false, nil\n\tdefault:\n\t\treturn false, false, fmt.Errorf(\"%s must be 'true' or 'false'\", v)\n\t}\n}",
"func parseOrDefault(req *http.Request, requestParam string, defaultValue int) int {\n\tresult := defaultValue\n\tparamAsInt, ok := req.URL.Query()[requestParam]\n\tif !ok || len(paramAsInt[0]) < 1 {\n\t\tlog.Printf(\"no parameter provided in request --> defaulting to %d \\n\", defaultValue)\n\t} else {\n\t\tintValue, err := strconv.Atoi(paramAsInt[0])\n\t\tif err != nil {\n\t\t\tlog.Printf(\"something went wrong parsing the parameter --> defaulting to %d \\n\", defaultValue)\n\t\t\tlog.Println(err)\n\t\t} else {\n\t\t\tresult = intValue\n\t\t}\n\t}\n\treturn result\n}",
"func (e Entry) BoolDefault(def bool) (bool, error) {\n\tv := e.ValueRaw\n\tif v == nil {\n\t\treturn def, errFindParse.Format(\"bool\", e.Key)\n\t}\n\n\tif vBoolean, ok := v.(bool); ok {\n\t\treturn vBoolean, nil\n\t}\n\n\tif vString, ok := v.(string); ok {\n\t\tb, err := strconv.ParseBool(vString)\n\t\tif err != nil {\n\t\t\treturn def, err\n\t\t}\n\t\treturn b, nil\n\t}\n\n\tif vInt, ok := v.(int); ok {\n\t\tif vInt == 1 {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t}\n\n\treturn def, errFindParse.Format(\"bool\", e.Key)\n}",
"func ParseQueryBool(param string, request *http.Request, params imageserver.Params) error {\n\ts := request.URL.Query().Get(param)\n\tif s == \"\" {\n\t\treturn nil\n\t}\n\tb, err := strconv.ParseBool(s)\n\tif err != nil {\n\t\treturn newParseTypeParamError(param, \"bool\", err)\n\t}\n\tparams.Set(param, b)\n\treturn nil\n}",
"func assignValueBool(params map[string]interface{}, name string, out *bool) error {\n\tif raw, ok := params[name]; ok {\n\t\tval, ok := raw.(bool)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Expecting %s to be a boolean\", name)\n\t\t}\n\t\t*out = val\n\t\tdelete(params, name)\n\t}\n\treturn nil\n}",
"func parseModeFlag() {\n\tswitch modeStr {\n\tcase \"iteration\":\n\t\tmode = coloring.IterationCount\n\tcase \"modulo\":\n\t\tmode = coloring.Modulo\n\tcase \"vector\":\n\t\tmode = coloring.VectorField\n\tcase \"orbit\":\n\t\tmode = coloring.OrbitLength\n\tdefault:\n\t\tlogrus.Fatalln(\"invalid coloring function:\", modeStr)\n\t}\n}",
"func (k *Parameters) ParseFlags(defaultEndpoint string, printVersionInfoFun func(), storeParamsMap map[string]string) {\n\tvar params []*StrParamDetail\n\tk.ConfigFile = StrParamDetail{Name: \"config-file\", ShortName: \"k\", Usage: \"Configuration file.\"}\n\tparams = append(params, &k.ConfigFile)\n\tk.Endpoint = StrParamDetail{Name: \"endpoint\", DefaultValue: defaultEndpoint, Usage: \"Server config: Endpoint the server listen and serve.\"}\n\tparams = append(params, &k.Endpoint)\n\tk.Insecure = StrParamDetail{Name: \"insecure\", DefaultValue: strconv.FormatBool(DefaultInsecure), Usage: \"Server config: Disable transport security.\"}\n\tparams = append(params, &k.Insecure)\n\tk.EnableAuthz = StrParamDetail{Name: \"enable-authz\", DefaultValue: strconv.FormatBool(DefaultEnableAuthz), Usage: \"Server config: Enable authorization check.\"}\n\tparams = append(params, &k.EnableAuthz)\n\tk.CertPath = StrParamDetail{Name: \"cert\", Usage: \"Server config: Server certifice file path.\"}\n\tparams = append(params, &k.CertPath)\n\tk.KeyPath = StrParamDetail{Name: \"key\", Usage: \"Server config: Server key file path.\"}\n\tparams = append(params, &k.KeyPath)\n\tk.ClientCertPath = StrParamDetail{Name: \"client-cert\", ShortName: \"c\", Usage: \"Server config: Client certifice file path.\"}\n\tparams = append(params, &k.ClientCertPath)\n\tk.ForceClientCert = StrParamDetail{Name: \"force-client-cert\", ShortName: \"f\", Usage: \"Server config: Force Client certification.\"}\n\tparams = append(params, &k.ForceClientCert)\n\n\tk.StoreType = StrParamDetail{Name: \"store-type\", DefaultValue: DefaultStoreType, Usage: \"Store config: Policy store type, etcd or file.\"}\n\tparams = append(params, &k.StoreType)\n\tk.StoreWatchEnabled = StrParamDetail{Name: \"enable-watch\", DefaultValue: strconv.FormatBool(DefaultStoreWatchEnabled), Usage: \"Evaluator config: Whether enable watch store changes.\"}\n\tparams = append(params, &k.StoreWatchEnabled)\n\n\t// Log configurations\n\tk.LogConf.LogLevel = StrParamDetail{Name: \"log-level\", Usage: \"Log config: log level, available levels are panic, fatal, error, warn, info and debug.\"}\n\tparams = append(params, &k.LogConf.LogLevel)\n\tk.LogConf.LogFormatter = StrParamDetail{Name: \"log-formatter\", Usage: \"Log config: log formatter, available values are text and json.\"}\n\tparams = append(params, &k.LogConf.LogFormatter)\n\tk.LogConf.LogReportCaller = StrParamDetail{Name: \"log-reportcaller\", DefaultValue: strconv.FormatBool(false), Usage: \"Log config: if the caller(file, line and function) is included in the log entry.\"}\n\tparams = append(params, &k.LogConf.LogReportCaller)\n\tk.LogConf.LogFileName = StrParamDetail{Name: \"log-filename\", Usage: \"Log config: log file name.\"}\n\tparams = append(params, &k.LogConf.LogFileName)\n\tk.LogConf.LogMaxSize = StrParamDetail{Name: \"log-maxsize\", Usage: \"Log config: maximum size in megabytes of the log file before it gets rotated.\"}\n\tparams = append(params, &k.LogConf.LogMaxSize)\n\tk.LogConf.LogCompress = StrParamDetail{Name: \"log-compress\", Usage: \"Log config: if the rotated log files should be compressed.\"}\n\tparams = append(params, &k.LogConf.LogCompress)\n\tk.LogConf.LogMaxBackups = StrParamDetail{Name: \"log-maxbackups\", Usage: \"Log config: maximum number of old log files to retain.\"}\n\tparams = append(params, &k.LogConf.LogMaxBackups)\n\tk.LogConf.LogMaxAge = StrParamDetail{Name: \"log-maxage\", Usage: \"Log config: maximum number of days to retain old log files.\"}\n\tparams = append(params, &k.LogConf.LogMaxAge)\n\tk.LogConf.LogLocalTime = StrParamDetail{Name: \"log-localtime\", Usage: \"Log config: if local time is used for formatting the timestamps in backup files.\"}\n\tparams = append(params, &k.LogConf.LogLocalTime)\n\n\t// Audit Log configurations\n\tk.AuditLogConf.LogLevel = StrParamDetail{Name: \"auditlog-level\", DefaultValue: DefaultAuditLogLevel, Usage: \"Audit Log config: log level, available levels are panic, fatal, error, warn, info and debug.\"}\n\tparams = append(params, &k.AuditLogConf.LogLevel)\n\tk.AuditLogConf.LogFormatter = StrParamDetail{Name: \"auditlog-formatter\", DefaultValue: DefaultAuditLogFormatter, Usage: \"Audit Log config: log formatter, available values are text and json.\"}\n\tparams = append(params, &k.AuditLogConf.LogFormatter)\n\tk.AuditLogConf.LogReportCaller = StrParamDetail{Name: \"auditlog-reportcaller\", DefaultValue: strconv.FormatBool(false), Usage: \"Audit Log config: if the caller(file, line and function) is included in the log entry.\"}\n\tparams = append(params, &k.AuditLogConf.LogReportCaller)\n\tk.AuditLogConf.LogFileName = StrParamDetail{Name: \"auditlog-filename\", DefaultValue: DefaultAuditLogFilename, Usage: \"Audit Log config: log file name.\"}\n\tparams = append(params, &k.AuditLogConf.LogFileName)\n\tk.AuditLogConf.LogMaxSize = StrParamDetail{Name: \"auditlog-maxsize\", DefaultValue: DefaultAuditLogMaxSize, Usage: \"Audit Log config: maximum size in megabytes of the log file before it gets rotated.\"}\n\tparams = append(params, &k.AuditLogConf.LogMaxSize)\n\tk.AuditLogConf.LogCompress = StrParamDetail{Name: \"auditlog-compress\", DefaultValue: \"false\", Usage: \"Audit Log config: if the rotated log files should be compressed.\"}\n\tparams = append(params, &k.AuditLogConf.LogCompress)\n\tk.AuditLogConf.LogMaxBackups = StrParamDetail{Name: \"auditlog-maxbackups\", DefaultValue: DefaultAuditLogMaxBackups, Usage: \"Audit Log config: maximum number of old log files to retain.\"}\n\tparams = append(params, &k.AuditLogConf.LogMaxBackups)\n\tk.AuditLogConf.LogMaxAge = StrParamDetail{Name: \"auditlog-maxage\", DefaultValue: DefaultAuditLogMaxAge, Usage: \"Audit Log config: maximum number of days to retain old log files.\"}\n\tparams = append(params, &k.AuditLogConf.LogMaxAge)\n\tk.AuditLogConf.LogLocalTime = StrParamDetail{Name: \"auditlog-localtime\", DefaultValue: \"false\", Usage: \"Audit Log config: if local time is used for formatting the timestamps in backup files.\"}\n\tparams = append(params, &k.AuditLogConf.LogLocalTime)\n\n\tk.AsserterConf.AsserterEndpoint = StrParamDetail{Name: \"asserter-endpoint\", Usage: \"Assertion server endpoint.\"}\n\tparams = append(params, &k.AsserterConf.AsserterEndpoint)\n\tk.AsserterConf.AsserterClientKeyPath = StrParamDetail{Name: \"asserter-client-key\", Usage: \"Assertion service client key file.\"}\n\tparams = append(params, &k.AsserterConf.AsserterClientKeyPath)\n\tk.AsserterConf.AsserterClientCertPath = StrParamDetail{Name: \"asserter-client-cert\", Usage: \"Assertion service client cert file.\"}\n\tparams = append(params, &k.AsserterConf.AsserterClientCertPath)\n\tk.AsserterConf.AsserterCaPath = StrParamDetail{Name: \"asserter-ca-cert\", Usage: \"Assertion service CA cert file.\"}\n\tparams = append(params, &k.AsserterConf.AsserterCaPath)\n\tk.AsserterConf.AsserterClientTimeout = StrParamDetail{Name: \"asserter-client-timeout\", DefaultValue: DefaultAsserterClientTimeout, Usage: \"Assertion service client http timeout value.\"}\n\tparams = append(params, &k.AsserterConf.AsserterClientTimeout)\n\n\tpflag.BoolVarP(&k.Version, \"version\", \"\", false, \"print version information\")\n\n\tfor _, paramDetail := range params {\n\t\tpflag.StringVarP(&(paramDetail.Value), paramDetail.Name, paramDetail.ShortName, paramDetail.DefaultValue, paramDetail.Usage)\n\t}\n\tpflag.Parse()\n\n\tif k.Version {\n\t\tprintVersionInfoFun()\n\t\tos.Exit(0)\n\t}\n\n\tif len(k.ConfigFile.Value) == 0 {\n\t\tenvVarName := FlagToEnv(k.ConfigFile.Name)\n\t\tval := os.Getenv(envVarName)\n\t\tif len(val) != 0 {\n\t\t\tk.ConfigFile.Value = val\n\t\t}\n\t}\n\n\tvar conf *cfg.Config\n\tif k.ConfigFile.Value != \"\" {\n\t\tvar err error\n\t\tconf, err = cfg.ReadConfig(k.ConfigFile.Value)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Fail to parse config file %s, error is %v. \\n\", k.ConfigFile.Value, err)\n\t\t\tk.usage()\n\t\t}\n\t} else {\n\t\tconf = nil\n\t}\n\n\tpflag.VisitAll(func(f *pflag.Flag) {\n\t\tkey := FlagToEnv(f.Name)\n\t\tif !f.Changed {\n\t\t\t//if not set from command line, search it from environment variable\n\t\t\tval := os.Getenv(key)\n\t\t\tif val != \"\" {\n\t\t\t\tf.Value.Set(val)\n\t\t\t} else {\n\t\t\t\t//if not set from environment variable, search it from config file\n\t\t\t\tswitch f.Name {\n\t\t\t\tcase k.Endpoint.Name:\n\t\t\t\t\tif conf != nil && conf.ServerConfig != nil && len(conf.ServerConfig.Endpoint) != 0 {\n\t\t\t\t\t\tf.Value.Set(conf.ServerConfig.Endpoint)\n\t\t\t\t\t}\n\t\t\t\tcase k.Insecure.Name:\n\t\t\t\t\tif conf != nil && conf.ServerConfig != nil && len(conf.ServerConfig.Insecure) != 0 {\n\t\t\t\t\t\tf.Value.Set(conf.ServerConfig.Insecure)\n\t\t\t\t\t}\n\t\t\t\tcase k.EnableAuthz.Name:\n\t\t\t\t\tif conf != nil && conf.ServerConfig != nil && len(conf.ServerConfig.EnableAuthz) != 0 {\n\t\t\t\t\t\tf.Value.Set(conf.ServerConfig.EnableAuthz)\n\t\t\t\t\t}\n\t\t\t\tcase k.KeyPath.Name:\n\t\t\t\t\tif conf != nil && conf.ServerConfig != nil && len(conf.ServerConfig.KeyPath) != 0 {\n\t\t\t\t\t\tf.Value.Set(conf.ServerConfig.KeyPath)\n\t\t\t\t\t}\n\t\t\t\tcase k.CertPath.Name:\n\t\t\t\t\tif conf != nil && conf.ServerConfig != nil && len(conf.ServerConfig.CertPath) != 0 {\n\t\t\t\t\t\tf.Value.Set(conf.ServerConfig.CertPath)\n\t\t\t\t\t}\n\t\t\t\tcase k.ForceClientCert.Name:\n\t\t\t\t\tif conf != nil && conf.ServerConfig != nil {\n\t\t\t\t\t\tf.Value.Set(strconv.FormatBool(conf.ServerConfig.ForceClientCert))\n\t\t\t\t\t}\n\t\t\t\tcase k.ClientCertPath.Name:\n\t\t\t\t\tif conf != nil && conf.ServerConfig != nil && len(conf.ServerConfig.ClientCertPath) != 0 {\n\t\t\t\t\t\tf.Value.Set(conf.ServerConfig.ClientCertPath)\n\t\t\t\t\t}\n\t\t\t\tcase k.StoreType.Name:\n\t\t\t\t\tif conf != nil && conf.StoreConfig != nil && len(conf.StoreConfig.StoreType) != 0 {\n\t\t\t\t\t\tf.Value.Set(conf.StoreConfig.StoreType)\n\t\t\t\t\t}\n\t\t\t\tcase k.StoreWatchEnabled.Name:\n\t\t\t\t\tif conf != nil {\n\t\t\t\t\t\tf.Value.Set(strconv.FormatBool(conf.EnableWatch))\n\t\t\t\t\t}\n\t\t\t\t// Log configurations\n\t\t\t\tcase k.LogConf.LogLevel.Name:\n\t\t\t\t\tif conf != nil && conf.LogConfig != nil {\n\t\t\t\t\t\tf.Value.Set(conf.LogConfig.Level)\n\t\t\t\t\t}\n\t\t\t\tcase k.LogConf.LogFormatter.Name:\n\t\t\t\t\tif conf != nil && conf.LogConfig != nil {\n\t\t\t\t\t\tf.Value.Set(conf.LogConfig.Formatter)\n\t\t\t\t\t}\n\t\t\t\tcase k.LogConf.LogReportCaller.Name:\n\t\t\t\t\tif conf != nil && conf.LogConfig != nil {\n\t\t\t\t\t\tf.Value.Set(strconv.FormatBool(conf.LogConfig.SetReportCaller))\n\t\t\t\t\t}\n\t\t\t\tcase k.LogConf.LogFileName.Name:\n\t\t\t\t\tif conf != nil && conf.LogConfig != nil && conf.LogConfig.RotationConfig != nil {\n\t\t\t\t\t\tf.Value.Set(conf.LogConfig.RotationConfig.Filename)\n\t\t\t\t\t}\n\t\t\t\tcase k.LogConf.LogMaxSize.Name:\n\t\t\t\t\tif conf != nil && conf.LogConfig != nil && conf.LogConfig.RotationConfig != nil {\n\t\t\t\t\t\tf.Value.Set(strconv.Itoa(conf.LogConfig.RotationConfig.MaxSize))\n\t\t\t\t\t}\n\t\t\t\tcase k.LogConf.LogMaxAge.Name:\n\t\t\t\t\tif conf != nil && conf.LogConfig != nil && conf.LogConfig.RotationConfig != nil {\n\t\t\t\t\t\tf.Value.Set(strconv.Itoa(conf.LogConfig.RotationConfig.MaxAge))\n\t\t\t\t\t}\n\t\t\t\tcase k.LogConf.LogMaxBackups.Name:\n\t\t\t\t\tif conf != nil && conf.LogConfig != nil && conf.LogConfig.RotationConfig != nil {\n\t\t\t\t\t\tf.Value.Set(strconv.Itoa(conf.LogConfig.RotationConfig.MaxBackups))\n\t\t\t\t\t}\n\t\t\t\tcase k.LogConf.LogCompress.Name:\n\t\t\t\t\tif conf != nil && conf.LogConfig != nil && conf.LogConfig.RotationConfig != nil {\n\t\t\t\t\t\tf.Value.Set(strconv.FormatBool(conf.LogConfig.RotationConfig.Compress))\n\t\t\t\t\t}\n\t\t\t\tcase k.LogConf.LogLocalTime.Name:\n\t\t\t\t\tif conf != nil && conf.LogConfig != nil && conf.LogConfig.RotationConfig != nil {\n\t\t\t\t\t\tf.Value.Set(strconv.FormatBool(conf.LogConfig.RotationConfig.LocalTime))\n\t\t\t\t\t}\n\t\t\t\t// Audit Log configurations\n\t\t\t\tcase k.AuditLogConf.LogLevel.Name:\n\t\t\t\t\tif conf != nil && conf.AuditLogConfig != nil {\n\t\t\t\t\t\tf.Value.Set(conf.AuditLogConfig.Level)\n\t\t\t\t\t}\n\t\t\t\tcase k.AuditLogConf.LogFormatter.Name:\n\t\t\t\t\tif conf != nil && conf.AuditLogConfig != nil {\n\t\t\t\t\t\tf.Value.Set(conf.AuditLogConfig.Formatter)\n\t\t\t\t\t}\n\t\t\t\tcase k.AuditLogConf.LogReportCaller.Name:\n\t\t\t\t\tif conf != nil && conf.AuditLogConfig != nil {\n\t\t\t\t\t\tf.Value.Set(strconv.FormatBool(conf.AuditLogConfig.SetReportCaller))\n\t\t\t\t\t}\n\t\t\t\tcase k.AuditLogConf.LogFileName.Name:\n\t\t\t\t\tif conf != nil && conf.AuditLogConfig != nil && conf.AuditLogConfig.RotationConfig != nil {\n\t\t\t\t\t\tf.Value.Set(conf.AuditLogConfig.RotationConfig.Filename)\n\t\t\t\t\t}\n\t\t\t\tcase k.AuditLogConf.LogMaxSize.Name:\n\t\t\t\t\tif conf != nil && conf.AuditLogConfig != nil && conf.AuditLogConfig.RotationConfig != nil {\n\t\t\t\t\t\tf.Value.Set(strconv.Itoa(conf.AuditLogConfig.RotationConfig.MaxSize))\n\t\t\t\t\t}\n\t\t\t\tcase k.AuditLogConf.LogMaxAge.Name:\n\t\t\t\t\tif conf != nil && conf.AuditLogConfig != nil && conf.AuditLogConfig.RotationConfig != nil {\n\t\t\t\t\t\tf.Value.Set(strconv.Itoa(conf.AuditLogConfig.RotationConfig.MaxAge))\n\t\t\t\t\t}\n\t\t\t\tcase k.AuditLogConf.LogMaxBackups.Name:\n\t\t\t\t\tif conf != nil && conf.AuditLogConfig != nil && conf.AuditLogConfig.RotationConfig != nil {\n\t\t\t\t\t\tf.Value.Set(strconv.Itoa(conf.AuditLogConfig.RotationConfig.MaxBackups))\n\t\t\t\t\t}\n\t\t\t\tcase k.AuditLogConf.LogCompress.Name:\n\t\t\t\t\tif conf != nil && conf.AuditLogConfig != nil && conf.AuditLogConfig.RotationConfig != nil {\n\t\t\t\t\t\tf.Value.Set(strconv.FormatBool(conf.AuditLogConfig.RotationConfig.Compress))\n\t\t\t\t\t}\n\t\t\t\tcase k.AuditLogConf.LogLocalTime.Name:\n\t\t\t\t\tif conf != nil && conf.AuditLogConfig != nil && conf.AuditLogConfig.RotationConfig != nil {\n\t\t\t\t\t\tf.Value.Set(strconv.FormatBool(conf.AuditLogConfig.RotationConfig.LocalTime))\n\t\t\t\t\t}\n\t\t\t\t\t// Asserter webhook configurations\n\t\t\t\tcase k.AsserterConf.AsserterEndpoint.Name:\n\t\t\t\t\tif conf != nil && conf.AsserterWebhookConfig != nil {\n\t\t\t\t\t\tf.Value.Set(conf.AsserterWebhookConfig.Endpoint)\n\t\t\t\t\t}\n\t\t\t\tcase k.AsserterConf.AsserterCaPath.Name:\n\t\t\t\t\tif conf != nil && conf.AsserterWebhookConfig != nil {\n\t\t\t\t\t\tf.Value.Set(conf.AsserterWebhookConfig.CACert)\n\t\t\t\t\t}\n\t\t\t\tcase k.AsserterConf.AsserterClientCertPath.Name:\n\t\t\t\t\tif conf != nil && conf.AsserterWebhookConfig != nil {\n\t\t\t\t\t\tf.Value.Set(conf.AsserterWebhookConfig.ClientCert)\n\t\t\t\t\t}\n\t\t\t\tcase k.AsserterConf.AsserterClientKeyPath.Name:\n\t\t\t\t\tif conf != nil && conf.AsserterWebhookConfig != nil {\n\t\t\t\t\t\tf.Value.Set(conf.AsserterWebhookConfig.ClientKey)\n\t\t\t\t\t}\n\t\t\t\tcase k.AsserterConf.AsserterClientTimeout.Name:\n\t\t\t\t\tif conf != nil && conf.AsserterWebhookConfig != nil {\n\t\t\t\t\t\tf.Value.Set(string(conf.AsserterWebhookConfig.HTTPTimeout))\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\t//\n\t\t\t\t}\n\n\t\t\t\tkey, ok := storeParamsMap[f.Name]\n\t\t\t\tif ok {\n\t\t\t\t\tif conf != nil && conf.StoreConfig != nil && conf.StoreConfig.StoreProps != nil {\n\t\t\t\t\t\tif value, ok := conf.StoreConfig.StoreProps[key]; ok {\n\t\t\t\t\t\t\tswitch x := value.(type) {\n\t\t\t\t\t\t\tcase bool:\n\t\t\t\t\t\t\t\tf.Value.Set(strconv.FormatBool(value.(bool)))\n\t\t\t\t\t\t\tcase int:\n\t\t\t\t\t\t\t\tf.Value.Set(strconv.Itoa(value.(int)))\n\t\t\t\t\t\t\tcase string:\n\t\t\t\t\t\t\t\tf.Value.Set(value.(string))\n\t\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\t\tfmt.Printf(\"Unsupported type: %T\\n\", x)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t})\n\n\tfmt.Printf(\"parameters:%v\\n\", k)\n}",
"func (_Mevsky *MevskyFilterer) ParseTurnedOff(log types.Log) (*MevskyTurnedOff, error) {\n\tevent := new(MevskyTurnedOff)\n\tif err := _Mevsky.contract.UnpackLog(event, \"TurnedOff\", log); err != nil {\n\t\treturn nil, err\n\t}\n\tevent.Raw = log\n\treturn event, nil\n}",
"func (BooleanLiteral) paramValueNode() {}",
"func getBoolVal(input string) bool {\n\tinput = strings.ToLower(input)\n\tif input == \"yes\" || input == \"true\" {\n\t\treturn true\n\t}\n\treturn false\n}",
"func Bool(key string, def bool) bool {\n\tif s := String(key, \"\"); s != \"\" {\n\t\tif d, err := strconv.ParseBool(s); err == nil {\n\t\t\treturn d\n\t\t} else {\n\t\t\tLog(key, err)\n\t\t}\n\t}\n\treturn def\n}",
"func LoggingOnOffHandler(w http.ResponseWriter, r *http.Request) {\n\tInfo(\"Toggling Debug and Trace Logs\")\n\n\tresponseString := \"Changing Logging: \"\n\n\t// toggle trace\n\ttrace := r.URL.Query().Get(\"trace\")\n\ttrace = strings.ToLower(trace)\n\tif trace == \"on\" {\n\t\tInfo(\"Toggling Trace Logs on\")\n\t\tresponseString += \"Trace: on \"\n\t\tTraceLogger = log.New(os.Stdout, Tag+\" TRACE: \", loggerFlags)\n\t} else if trace == \"off\" {\n\t\tInfo(\"Toggling Trace Logs off\")\n\t\tresponseString += \"Trace: off \"\n\t\tTraceLogger = log.New(ioutil.Discard, Tag+\" TRACE: \", loggerFlags)\n\t}\n\n\t// toggle debug\n\tdebug := r.URL.Query().Get(\"debug\")\n\tdebug = strings.ToLower(debug)\n\tif debug == \"on\" {\n\t\tInfo(\"Toggling Debug Logs on\")\n\t\tresponseString += \"Debug: on \"\n\t\tDebugLogger = log.New(os.Stdout, Tag+\" DEBUG: \", loggerFlags)\n\t} else if debug == \"off\" {\n\t\tInfo(\"Toggling Debug Logs off \")\n\t\tresponseString += \"Debug: off \"\n\t\tDebugLogger = log.New(ioutil.Discard, Tag+\" DEBUG: \", loggerFlags)\n\t}\n\n\tw.Write([]byte(responseString))\n}",
"func (f *flag) Bool() bool {\n\tvalue, err := strconv.ParseBool(f.String())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn value\n}",
"func ParseBool(s string) (bool, error) {\n\tswitch s {\n\tdefault:\n\t\tb, err := strconv.ParseBool(s)\n\t\tif err != nil {\n\t\t\treturn b, errz.Err(err)\n\t\t}\n\t\treturn b, nil\n\tcase \"1\", \"yes\", \"Yes\", \"YES\", \"y\", \"Y\":\n\t\treturn true, nil\n\tcase \"0\", \"no\", \"No\", \"NO\", \"n\", \"N\":\n\t\treturn false, nil\n\t}\n}",
"func ParseBoolean(s string) Boolean {\n\tif s == \"\" {\n\t\treturn NullBoolean()\n\t}\n\n\treturn NewBoolean(s == \"true\")\n}",
"func getBoolValue(i *ini.File, section, key string, vdefault bool) bool {\n\treturn i.Section(section).Key(key).MustBool(vdefault)\n}",
"func getIsOn(internal *DeviceUpdateMessage) bool {\n\ton, ok := internal.State[enums.PropOn]\n\tif !ok {\n\t\treturn getIsOnDeviceSpecific(internal)\n\t}\n\n\treturn on.(bool)\n}",
"func (fOpenMode FileOpenMode) ParseString(\n valueString string,\n caseSensitive bool) (FileOpenMode, error) {\n\n ePrefix := \"FileOpenMode.ParseString() \"\n\n fOpenMode.checkInitializeMaps(false)\n\n result := FileOpenMode(0)\n\n lenValueStr := len(valueString)\n\n if strings.HasSuffix(valueString, \"()\") {\n valueString = valueString[0 : lenValueStr-2]\n lenValueStr -= 2\n }\n\n if lenValueStr < 3 {\n return result,\n fmt.Errorf(ePrefix+\n \"Input parameter 'valueString' is INVALID! valueString='%v' \", valueString)\n }\n\n var ok bool\n var idx int\n\n if caseSensitive {\n\n if !strings.HasPrefix(valueString, \"Mode\") {\n valueString = \"Mode\" + valueString\n }\n\n idx, ok = mFileOpenModeStringToInt[valueString]\n\n if !ok {\n return FileOpenMode(0),\n fmt.Errorf(ePrefix+\n \"'valueString' did NOT MATCH a FileOpenMode. valueString='%v' \", valueString)\n }\n\n result = FileOpenMode(idx)\n\n } else {\n\n valueString = strings.ToLower(valueString)\n\n if !strings.HasPrefix(valueString, \"mode\") {\n valueString = \"mode\" + valueString\n }\n\n idx, ok = mFileOpenModeLwrCaseStringToInt[valueString]\n\n if !ok {\n return FileOpenMode(0),\n fmt.Errorf(ePrefix+\n \"'valueString' did NOT MATCH a FileOpenMode. valueString='%v' \", valueString)\n }\n\n result =\n FileOpenMode(idx)\n }\n\n return result, nil\n}",
"func ParseFlag(input string, flags []*Flag) (*Flag, string, error) {\n\tkeyvalue := strings.SplitN(input, \"=\", 2)\n\tkey := keyvalue[0]\n\tvalue := \"\"\n\tif len(keyvalue) == 2 {\n\t\tvalue = keyvalue[1]\n\t}\n\tif len(key) > 2 && key[1] != '-' {\n\t\treturn ParseFlag(key[:2]+\"=\"+key[2:], flags)\n\t}\n\tfor _, flag := range flags {\n\t\tif (flag.Char != \"\" && key == \"-\"+flag.Char) || key == \"--\"+flag.Name {\n\t\t\tif flag.HasValue {\n\t\t\t\tif value == \"\" {\n\t\t\t\t\treturn nil, \"\", errors.New(flag.String() + \" needs a value\")\n\t\t\t\t}\n\t\t\t\treturn flag, value, nil\n\t\t\t}\n\t\t\tif value != \"\" {\n\t\t\t\treturn nil, \"\", errors.New(flag.String() + \" does not take a value\")\n\t\t\t}\n\t\t\treturn flag, \"\", nil\n\t\t}\n\t}\n\treturn nil, \"\", nil\n}",
"func (o BoolObj) Parse() ([][]string, error) {\n\treturn [][]string{\n\t\t{string(*o.Prefix)},\n\t\t{strconv.FormatBool(o.Val)},\n\t}, nil\n}",
"func flagToBool(f string) bool {\n\tif f == \"true\" {\n\t\treturn true\n\t} else {\n\t\treturn false\n\t}\n}",
"func (ctx *serverRequestContextImpl) GetBoolQueryParm(name string) (bool, error) {\n\tvar err error\n\n\tvalue := false\n\tparam := ctx.req.URL.Query().Get(name)\n\tif param != \"\" {\n\t\tvalue, err = strconv.ParseBool(strings.ToLower(param))\n\t\tif err != nil {\n\t\t\treturn false, caerrors.NewHTTPErr(400, caerrors.ErrUpdateConfigRemoveAff, \"Failed to correctly parse value of '%s' query parameter: %s\", name, err)\n\t\t}\n\t}\n\n\treturn value, nil\n}",
"func GetBool(name string) bool {\n\t//params, err := url.ParseQuery(r.URL.RawQuery)\n\t//if err != nil {\n\t//\treturn false\n\t//}\n\n\t//value, ok := params[name]\n\t//if !ok {\n\t//\treturn false\n\t//}\n\n\tstrValue := strings.Join([]string{\"\", \"\"}, \"\")\n\tif strValue == \"\" {\n\t\treturn true\n\t}\n\n\tboolValue, err := strconv.ParseBool(strValue)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn boolValue\n}",
"func (p *Parser) parseBoolean() asti.ExpressionI {\n\treturn &ast.Boolean{Token: p.curToken, Value: p.curTokenIs(tokentype.TRUE)}\n}",
"func BoolValue(t bool) Value {\n\tif t {\n\t\treturn Value{Typ: ':', IntegerV: 1}\n\t}\n\treturn Value{Typ: ':', IntegerV: 0}\n}",
"func (v Value) Bool(defaults ...bool) bool {\n\t// Return the first default if the raw is undefined\n\tif v.raw == nil {\n\t\t// Make sure there's at least one thing in the list\n\t\tdefaults = append(defaults, false)\n\t\treturn defaults[0]\n\t}\n\n\tswitch t := v.raw.(type) {\n\tcase string:\n\t\tb, err := strconv.ParseBool(t)\n\t\tif err != nil {\n\t\t\tslog.Panicf(\"failed to parse bool: %v\", err)\n\t\t}\n\t\treturn b\n\n\tcase bool:\n\t\treturn t\n\n\tdefault:\n\t\tslog.Panicf(\"%v is of unsupported type %v\", t, reflect.TypeOf(t).String())\n\t}\n\n\treturn false\n}",
"func internalNewOptionalBoolValue(p *optionalBool) pflag.Value {\n\tp.present = false\n\treturn (*optionalBoolValue)(p)\n}",
"func validateBoolParam(ctx *HttpContext, param *HttpParam) {\n\n\tparam.Raw = retrieveParamValue(ctx, param).(string)\n\n\tif len(param.Raw) == 0 && param.Required {\n\t\tappendInvalidErrorCode(ctx, param)\n\t\treturn\n\t}\n\n\tif len(param.Raw) == 0 { return }\n\n\tif val, err := strconv.ParseBool(param.Raw); err != nil {\n\t\tappendInvalidErrorCode(ctx, param)\n\t} else {\n\t\tparam.setPresentValue(val)\n\t}\n}",
"func OptionalQueryParamBool(request *http.Request, name string) (null.Bool, IResponse) {\n\tvalueStr := request.URL.Query().Get(name)\n\tif valueStr == \"\" {\n\t\treturn null.Bool{}, nil\n\t}\n\n\tvalue, err := strconv.ParseBool(valueStr)\n\tif err != nil {\n\t\treturn null.Bool{}, BadRequest(request, \"Invalid query param %s (value: '%s'): %s\", name, valueStr, err)\n\t}\n\n\treturn null.BoolFrom(value), nil\n}",
"func (m *DeviceHealthScriptBooleanParameter) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.DeviceHealthScriptParameter.GetFieldDeserializers()\n res[\"defaultValue\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetBoolValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDefaultValue(val)\n }\n return nil\n }\n return res\n}",
"func boolValue(s string) bool {\n\tswitch s {\n\tcase \"yes\", \"true\":\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func FlagParse() (err error) {\n\tConfigFlagsRegister()\n\tflag.Parse()\n\treturn ConfigFlagsProcess()\n}",
"func (w waf) Parse(ing *extensions.Ingress) (interface{}, error) {\n\ts, err := parser.GetStringAnnotation(wafAnn, ing)\n\tif err != nil {\n\t\treturn Config{}, nil\n\t}\n\tif !wafAnnRegex.MatchString(s) {\n\t\tglog.Warningf(\"ignoring invalid WAF option '%v' on %v/%v\", s, ing.Namespace, ing.Name)\n\t\treturn Config{}, nil\n\t}\n\treturn Config{\n\t\tMode: s,\n\t}, nil\n}",
"func (parser *Parser) parsePredefined(resolvedInput string, targetType reflect.Type) (interface{}, error) {\n\tvar result interface{}\n\tvar err error\n\tswitch targetType.Kind() {\n\tcase reflect.Bool:\n\t\tresult, err = strconv.ParseBool(resolvedInput)\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\tresult, err = strconv.ParseInt(resolvedInput, 0, targetType.Bits())\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\tresult, err = strconv.ParseUint(resolvedInput, 0, targetType.Bits())\n\tcase reflect.Float32, reflect.Float64:\n\t\tresult, err = strconv.ParseFloat(resolvedInput, targetType.Bits())\n\tcase reflect.String:\n\t\tresult, err = resolvedInput, nil\n\tdefault:\n\t\tresult, err = nil, errors.New(\"\")\n\t}\n\tif err == nil {\n\t\treturn reflect.ValueOf(result).Convert(targetType).Interface(), nil\n\t}\n\treturn nil, toErrorf(\"Could not convert '%v' to type '%v'\", resolvedInput, targetType.String())\n}",
"func parseBoolEx(repr string) (value bool, err error) {\n\tif value, err = strconv.ParseBool(repr); err != nil {\n\t\tswitch repr {\n\t\tcase \"y\", \"yes\", \"YES\", \"Yes\":\n\t\t\treturn true, nil\n\t\tcase \"n\", \"no\", \"NO\", \"No\":\n\t\t\treturn false, nil\n\t\t}\n\t}\n\n\treturn\n}",
"func parseAutoRemoveParam(req *http.Request) (bool, error) {\n\tc := req.URL.Query().Get(\"autoremove\")\n\tvar autoRemove bool\n\tvar err error\n\n\tif c != \"\" {\n\t\tautoRemove, err = strconv.ParseBool(c)\n\t\tif err != nil {\n\t\t\treturn autoRemove, errAutoRemoveNotBool\n\t\t}\n\t}\n\n\treturn autoRemove, nil\n}",
"func (c *Validator) GetBool(key string, def ...bool) (bool, error) {\n\tstrv := c.Input.Query(key)\n\tif len(strv) == 0 && len(def) > 0 {\n\t\treturn def[0], nil\n\t}\n\treturn strconv.ParseBool(strv)\n}",
"func BoolConverter(str string, target reflect.Value) (ok bool) {\n\tb, err := strconv.ParseBool(str)\n\tif err != nil {\n\t\treturn\n\t}\n\n\ttarget.SetBool(b)\n\treturn true\n}",
"func flagValue(flags []string, name string) string {\n\tisBool := booleanFlag(name)\n\tfor i, arg := range flags {\n\t\tif val := strings.TrimPrefix(arg, name+\"=\"); val != arg {\n\t\t\t// -name=value\n\t\t\treturn val\n\t\t}\n\t\tif arg == name { // -name ...\n\t\t\tif isBool {\n\t\t\t\t// -name, equivalent to -name=true\n\t\t\t\treturn \"true\"\n\t\t\t}\n\t\t\tif i+1 < len(flags) {\n\t\t\t\t// -name value\n\t\t\t\treturn flags[i+1]\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}",
"func (formatter) fBool(v *types.RecordValue) *types.RecordValue {\n\tif v.Value != strBoolTrue {\n\t\tv.Value = \"\"\n\t}\n\n\treturn v\n}",
"func parseBoolFromString(content string, aggErr *AggregateError) bool {\n result, err := strconv.ParseBool(content)\n if err != nil {\n aggErr.Append(err)\n }\n return result\n}",
"func ParseBool(str string) bool {\n\tb, _ := strconv.ParseBool(str)\n\treturn b\n}",
"func QueryParamBool(request *http.Request, name string) (bool, IResponse) {\n\tvalueStr := request.URL.Query().Get(name)\n\tvalue, err := strconv.ParseBool(valueStr)\n\tif err != nil {\n\t\treturn false, BadRequest(request, \"Invalid query param %s (value: '%s'): %s\", name, valueStr, err)\n\t}\n\n\treturn value, nil\n}",
"func StringToBool(s string, def bool) bool {\n\tv, err := strconv.ParseBool(s)\n\tif err != nil {\n\t\tlog.Warnf(\"failed to parse bool value: %s\", s)\n\t\treturn def\n\t}\n\treturn v\n}",
"func ParseFlagString(args []string) (string, int, error) {\n\tif strings.ContainsAny(args[0], \"= \") {\n\t\tparts := strings.SplitN(args[0], \"=\", 2)\n\t\tif len(parts) == 1 {\n\t\t\tparts = strings.SplitN(args[0], \" \", 2)\n\t\t}\n\t\tif len(parts) == 2 {\n\t\t\tfor _, c := range []string{`'`, `\"`} {\n\t\t\t\tif parts[1][:1] == c && parts[1][len(parts[1])-1:] == c {\n\t\t\t\t\treturn parts[1][1 : len(parts[1])-1], 0, nil\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn parts[1], 0, nil\n\t\t}\n\t\treturn \"\", 0, fmt.Errorf(\"unable to split flag and value from string: [%s]\", args[0])\n\t}\n\tif len(args) > 1 {\n\t\treturn args[1], 1, nil\n\t}\n\treturn \"\", 0, fmt.Errorf(\"no value provided after %s flag\", args[0])\n}",
"func NewSchemaChangerModeFromString(val string) (_ NewSchemaChangerMode, ok bool) {\n\tswitch strings.ToUpper(val) {\n\tcase \"OFF\":\n\t\treturn UseNewSchemaChangerOff, true\n\tcase \"ON\":\n\t\treturn UseNewSchemaChangerOn, true\n\tcase \"UNSAFE_ALWAYS\":\n\t\treturn UseNewSchemaChangerUnsafeAlways, true\n\tdefault:\n\t\treturn 0, false\n\t}\n}",
"func (m *DeviceHealthScriptBooleanParameter) GetDefaultValue()(*bool) {\n val, err := m.GetBackingStore().Get(\"defaultValue\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*bool)\n }\n return nil\n}",
"func flagValue(flags []string, name string) string {\n\tfor i, arg := range flags {\n\t\tif val := strings.TrimPrefix(arg, name+\"=\"); val != arg {\n\t\t\t// -name=value\n\t\t\treturn val\n\t\t}\n\t\tif arg == name {\n\t\t\tif i+1 < len(flags) {\n\t\t\t\tif val := flags[i+1]; !strings.HasPrefix(val, \"-\") {\n\t\t\t\t\t// -name value\n\t\t\t\t\treturn flags[i+1]\n\t\t\t\t}\n\t\t\t}\n\t\t\t// -name, equivalent to -name=true\n\t\t\treturn \"true\"\n\t\t}\n\t}\n\treturn \"\"\n}",
"func ParseValue(in string) (string, []string) {\n\top, field := parseOp(in)\n\treturn op, parseDelimited(field, ListDelimiter)\n}",
"func (c Controller) GetBool(key string, def ...bool) bool {\n\tif v := string(c.QueryArgs().Peek(key)); v != \"\" {\n\t\ttmp, _ := strconv.ParseBool(v)\n\t\treturn tmp\n\t}\n\tif len(def) > 0 {\n\t\treturn def[0]\n\t}\n\treturn false\n}",
"func (_Mevsky *MevskyFilterer) ParseTurnedOn(log types.Log) (*MevskyTurnedOn, error) {\n\tevent := new(MevskyTurnedOn)\n\tif err := _Mevsky.contract.UnpackLog(event, \"TurnedOn\", log); err != nil {\n\t\treturn nil, err\n\t}\n\tevent.Raw = log\n\treturn event, nil\n}",
"func Bool(v interface{}) *bool {\n\tswitch v.(type) {\n\tcase bool:\n\t\tval := v.(bool)\n\t\treturn &val\n\tcase int, uint, int32, int16, int8, int64, uint32, uint16, uint8, uint64, float32, float64:\n\t\tval, err := strconv.Atoi(fmt.Sprintf(\"%v\", v))\n\t\tif err != nil {\n\t\t\texception.Err(err, 500).Ctx(M{\"v\": v}).Throw()\n\t\t}\n\t\tres := false\n\t\tif val != 0 {\n\t\t\tres = true\n\t\t}\n\t\treturn &res\n\tdefault:\n\t\tval := fmt.Sprintf(\"%v\", v)\n\t\tres := false\n\t\tif val != \"\" {\n\t\t\tres = true\n\t\t}\n\t\treturn &res\n\t}\n}",
"func (c *Configure) ReadBoolValue(key string, def bool) bool {\n\tv := c.Get(key)\n\tif v == \"\" {\n\t\treturn def\n\t}\n\tvalue, err := strconv.ParseBool(v )\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn value\n\n}",
"func GetBool(v interface{}) bool {\n\tswitch result := v.(type) {\n\tcase bool:\n\t\treturn result\n\tdefault:\n\t\tif d := GetString(v); d != \"\" {\n\t\t\tvalue, _ := strconv.ParseBool(d)\n\t\t\treturn value\n\t\t}\n\t}\n\treturn false\n}",
"func MakeBoolOrDefault(in *bool, defaultValue bool) *google_protobuf.BoolValue {\n\tif in == nil {\n\t\treturn &google_protobuf.BoolValue{\n\t\t\tValue: defaultValue,\n\t\t}\n\t}\n\n\treturn &google_protobuf.BoolValue{\n\t\tValue: *in,\n\t}\n}",
"func GetDefaultBool(in bool) bool {\n\treturn in\n}",
"func ParseBool(key string) (bool, error) {\n\tval := os.Getenv(key)\n\n\tif val == \"\" {\n\t\treturn false, notFoundError(key, \"ParseBool\")\n\t}\n\n\tparsedVal, err := strconv.ParseBool(val)\n\tif err != nil {\n\t\treturn false, &EnvError{\"ParseBool\", key, err}\n\t}\n\n\treturn parsedVal, err\n}",
"func ParseOp(opString string) (Op, error) {\n\tvar (\n\t\tempty Op\n\t\top Op\n\t)\n\toperationMatch := operationPattern.FindStringSubmatch(opString)\n\tif len(operationMatch) != 2 {\n\t\treturn empty, errors.New(\"operation should surrounded by {}\")\n\t}\n\n\topIndexMatch := opIndexPattern.FindStringSubmatch(operationMatch[1])\n\tif len(opIndexMatch) == 2 {\n\t\topIndex, err := strconv.Atoi(strings.Trim(opIndexMatch[1], \" \"))\n\t\tif err != nil {\n\t\t\treturn empty, err\n\t\t}\n\t\top.Index = IntOptional{opIndex}\n\t}\n\n\topTimeMatch := opTimePattern.FindStringSubmatch(operationMatch[1])\n\tif len(opTimeMatch) == 2 {\n\t\topTime, err := strconv.Atoi(strings.Trim(opTimeMatch[1], \" \"))\n\t\tif err != nil {\n\t\t\treturn empty, err\n\t\t}\n\t\top.Time = time.Unix(0, int64(opTime))\n\t}\n\n\topProcessMatch := opProcessPattern.FindStringSubmatch(operationMatch[1])\n\tif len(opProcessMatch) == 2 {\n\t\tif opProcessMatch[1] == \":nemesis\" {\n\t\t\top.Process.Set(NemesisProcessMagicNumber)\n\t\t} else {\n\t\t\topProcess, err := strconv.Atoi(strings.Trim(opProcessMatch[1], \" \"))\n\t\t\tif err != nil {\n\t\t\t\treturn empty, err\n\t\t\t}\n\t\t\top.Process.Set(opProcess)\n\t\t}\n\t}\n\n\topTypeMatch := opTypePattern.FindStringSubmatch(operationMatch[1])\n\tif len(opTypeMatch) != 2 {\n\t\treturn empty, errors.New(\"operation should have :type field\")\n\t}\n\tswitch opTypeMatch[1] {\n\tcase \":invoke\":\n\t\top.Type = OpTypeInvoke\n\tcase \":ok\":\n\t\top.Type = OpTypeOk\n\tcase \":fail\":\n\t\top.Type = OpTypeFail\n\tcase \":info\":\n\t\top.Type = OpTypeInfo\n\tdefault:\n\t\treturn empty, errors.Errorf(\"invalid type, %s\", opTypeMatch[1])\n\t}\n\n\topValueMatch := opValuePattern.FindStringSubmatch(operationMatch[1])\n\t// can values be empty?\n\tif len(opValueMatch) == 2 {\n\t\tmopContent := strings.Trim(opValueMatch[1], \" \")\n\t\tif mopContent != \"\" {\n\t\t\tmopMatches := mopPattern.FindAllStringSubmatch(mopContent, -1)\n\t\t\tfor _, mopMatch := range mopMatches {\n\t\t\t\tif len(mopMatch) != 5 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tkey := strings.Trim(mopMatch[3], \" \")\n\t\t\t\tvar value MopValueType\n\t\t\t\tmopValueMatches := mopValuePattern.FindStringSubmatch(mopMatch[4])\n\t\t\t\tif len(mopValueMatches) == 2 {\n\t\t\t\t\tvalues := []int{}\n\t\t\t\t\ttrimVal := strings.Trim(mopValueMatches[1], \"[\")\n\t\t\t\t\ttrimVal = strings.Trim(trimVal, \"]\")\n\t\t\t\t\tif trimVal != \"\" {\n\t\t\t\t\t\tfor _, valStr := range strings.Split(trimVal, \" \") {\n\t\t\t\t\t\t\tval, err := strconv.Atoi(valStr)\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\treturn empty, err\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tvalues = append(values, val)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tvalue = values\n\t\t\t\t} else {\n\t\t\t\t\ttrimVal := strings.Trim(mopMatch[4], \" \")\n\t\t\t\t\tif trimVal == \"nil\" {\n\t\t\t\t\t\tvalue = nil\n\t\t\t\t\t} else {\n\t\t\t\t\t\tval, err := strconv.Atoi(trimVal)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn empty, err\n\t\t\t\t\t\t}\n\t\t\t\t\t\tvalue = val\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tvar mop Mop\n\t\t\t\tswitch mopMatch[2] {\n\t\t\t\tcase \"append\":\n\t\t\t\t\tmop = Append(key, value.(int))\n\t\t\t\tcase \"r\":\n\t\t\t\t\tif value != nil {\n\t\t\t\t\t\tmop = Read(key, value.([]int))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tmop = Read(key, nil)\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\tpanic(\"unreachable\")\n\t\t\t\t}\n\t\t\t\tif op.Value == nil {\n\t\t\t\t\tdestArray := make([]Mop, 0)\n\t\t\t\t\top.Value = &destArray\n\t\t\t\t}\n\t\t\t\t*op.Value = append(*op.Value, mop)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn op, nil\n}",
"func (fm *FieldModelFlagsSimple) GetValue(value *FlagsSimple) error {\n return fm.GetValueDefault(value, FlagsSimple(0))\n}",
"func updateBoolFromFlag(cmd *cobra.Command, v *bool, key string) {\n\tif cmd.Flags().Changed(key) {\n\t\t*v = viper.GetBool(key)\n\t}\n}",
"func ParseStatus(state string) Status {\n\tswitch strings.ToUpper(state) {\n\tcase \"OUTAGE\":\n\t\treturn OUTAGE\n\tcase \"MAJOR\":\n\t\treturn MAJOR\n\tcase \"MINOR\":\n\t\treturn MINOR\n\tcase \"OK\":\n\t\treturn OK\n\tdefault:\n\t\treturn OUTAGE\n\t}\n}",
"func (f *Flag) defaultIsZeroValue() bool {\n\tswitch f.Value.(type) {\n\tcase boolFlag:\n\t\treturn f.DefValue == \"false\"\n\tcase *durationValue:\n\t\t// Beginning in Go 1.7, duration zero values are \"0s\"\n\t\treturn f.DefValue == \"0\" || f.DefValue == \"0s\"\n\tcase *intValue, *int8Value, *int32Value, *int64Value, *uintValue, *uint8Value, *uint16Value, *uint32Value, *uint64Value, *countValue, *float32Value, *float64Value:\n\t\treturn f.DefValue == \"0\"\n\tcase *stringValue:\n\t\treturn f.DefValue == \"\"\n\tcase *ipValue, *ipMaskValue, *ipNetValue:\n\t\treturn f.DefValue == \"<nil>\"\n\tcase *intSliceValue, *stringSliceValue, *stringArrayValue:\n\t\treturn f.DefValue == \"[]\"\n\tdefault:\n\t\tswitch f.Value.String() {\n\t\tcase \"false\":\n\t\t\treturn true\n\t\tcase \"<nil>\":\n\t\t\treturn true\n\t\tcase \"\":\n\t\t\treturn true\n\t\tcase \"0\":\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n}",
"func parseSortFlag(flagName string, flags *pflag.FlagSet) v1.SortOrder {\n\tvalue := \"\"\n\tif v, err := flags.GetString(flagName); err == nil {\n\t\tvalue = strings.ToLower(v)\n\t}\n\n\tswitch value {\n\tcase \"asc\":\n\t\treturn v1.SortOrder_Asc\n\tcase \"desc\":\n\t\treturn v1.SortOrder_Desc\n\tdefault:\n\t\treturn v1.SortOrder_Undefined\n\t}\n}",
"func (me TviewRefreshModeEnumType) IsOnRequest() bool { return me == \"onRequest\" }",
"func Parse(m FlagMap) {\n\tflag.Parse()\n\tflag.VisitAll(func(f *flag.Flag) {\n\t\tmapping := Flag{}\n\t\tif s, ok := m[f.Name]; ok {\n\t\t\tif len(s.Name) > 0 {\n\t\t\t\tmapping.Name = s.Name\n\t\t\t}\n\t\t\tif s.Filter != nil {\n\t\t\t\tmapping.Filter = s.Filter\n\t\t\t}\n\t\t}\n\t\tif len(mapping.Name) == 0 {\n\t\t\tmapping.Name = strings.ToUpper(strings.Replace(f.Name, \"-\", \"_\", -1))\n\t\t}\n\t\tif mapping.Filter == nil {\n\t\t\tmapping.Filter = func(s string) string { return s }\n\t\t}\n\t\tif v := os.Getenv(mapping.Name); len(v) > 0 {\n\t\t\tf.Value.Set(mapping.Filter(v))\n\t\t}\n\t})\n}",
"func (fOpenType FileOpenType) ParseString(\n valueString string,\n caseSensitive bool) (FileOpenType, error) {\n\n ePrefix := \"FileOpenType.ParseString() \"\n\n fOpenType.checkInitializeMaps(false)\n\n result := FileOpenType(0)\n\n lenValueStr := len(valueString)\n\n if strings.HasSuffix(valueString, \"()\") {\n valueString = valueString[0 : lenValueStr-2]\n lenValueStr -= 2\n }\n\n if lenValueStr < 3 {\n return result,\n fmt.Errorf(ePrefix+\n \"Input parameter 'valueString' is INVALID! valueString='%v' \", valueString)\n }\n\n var ok bool\n var idx int\n\n if caseSensitive {\n\n if !strings.HasPrefix(valueString, \"Type\") {\n valueString = \"Type\" + valueString\n }\n\n idx, ok = mFileOpenTypeStringToInt[valueString]\n\n if !ok {\n return FileOpenType(0),\n fmt.Errorf(ePrefix+\n \"'valueString' did NOT MATCH a FileOpenType. valueString='%v' \", valueString)\n }\n\n result = FileOpenType(idx)\n\n } else {\n\n valueString = strings.ToLower(valueString)\n\n if !strings.HasPrefix(valueString, \"type\") {\n valueString = \"type\" + valueString\n }\n\n idx, ok = mFileOpenTypeLwrCaseStringToInt[valueString]\n\n if !ok {\n return FileOpenType(0),\n fmt.Errorf(ePrefix+\n \"'valueString' did NOT MATCH a FileOpenType. valueString='%v' \", valueString)\n }\n\n result =\n FileOpenType(idx)\n }\n\n return result, nil\n}"
] | [
"0.56482726",
"0.5562257",
"0.5544399",
"0.5209864",
"0.5193872",
"0.5183491",
"0.51610637",
"0.51105624",
"0.5067777",
"0.50530106",
"0.5005197",
"0.4955115",
"0.49473256",
"0.49443442",
"0.4923302",
"0.49199948",
"0.49097046",
"0.48919663",
"0.48528236",
"0.4831881",
"0.47686684",
"0.47660202",
"0.4759802",
"0.475832",
"0.4756866",
"0.46989515",
"0.4688759",
"0.46885583",
"0.46858177",
"0.46536285",
"0.46514457",
"0.46509397",
"0.46480742",
"0.464771",
"0.46458536",
"0.4642464",
"0.46230674",
"0.46128467",
"0.46044528",
"0.46011853",
"0.4594015",
"0.45860073",
"0.4547251",
"0.45416772",
"0.4538139",
"0.449131",
"0.4484553",
"0.44539034",
"0.44317263",
"0.44245106",
"0.4420204",
"0.44089636",
"0.43926036",
"0.43854716",
"0.4379263",
"0.43739718",
"0.4371565",
"0.43544528",
"0.43489546",
"0.4347758",
"0.43392053",
"0.43382442",
"0.43281335",
"0.43276453",
"0.43273976",
"0.43257824",
"0.431898",
"0.43188292",
"0.4302453",
"0.43015116",
"0.42956987",
"0.42925814",
"0.42779908",
"0.42687303",
"0.4268698",
"0.42648664",
"0.42645562",
"0.42643517",
"0.42630795",
"0.42544964",
"0.4250056",
"0.42471611",
"0.42425978",
"0.4242403",
"0.42295763",
"0.42226705",
"0.42206922",
"0.42103982",
"0.4194116",
"0.41865808",
"0.41810435",
"0.4179507",
"0.41784546",
"0.41782928",
"0.4166169",
"0.41441712",
"0.41425732",
"0.41412678",
"0.4136381",
"0.41333908"
] | 0.87817115 | 0 |
IsGroupMember returns whether currently logged user is a member of a group | func IsGroupMember(gid int) (bool, error) {
groups, err := os.Getgroups()
if err != nil {
return false, trace.ConvertSystemError(err)
}
for _, group := range groups {
if group == gid {
return true, nil
}
}
return false, nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (s *GroupService) isGroupMember(groupId, userId string) (bool, error) {\n\tvar condition = map[string]interface{}{\n\t\t\"groupId\": groupId,\n\t\t\"userId\": userId,\n\t}\n\tmemberProfile, err := groupRepo.FindOneMember(condition)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn memberProfile == nil, nil\n}",
"func (g *Group) IsMember(userID uint) bool {\n\tfor _, u := range g.Users {\n\t\tif u.UserID == userID {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func IsMember(claims jwtgo.Claims, groups []string, scopes []string) bool {\n\tmapClaims, err := MapClaims(claims)\n\tif err != nil {\n\t\treturn false\n\t}\n\t// O(n^2) loop\n\tfor _, userGroup := range GetGroups(mapClaims, scopes) {\n\t\tfor _, group := range groups {\n\t\t\tif userGroup == group {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}",
"func (mv *MembershipValidator) IsInGroup(\n\tpublicKey *operator.PublicKey,\n) bool {\n\taddress, err := mv.signing.PublicKeyToAddress(publicKey)\n\tif err != nil {\n\t\tmv.logger.Errorf(\"cannot convert public key to chain address: [%v]\", err)\n\t\treturn false\n\t}\n\n\t_, isInGroup := mv.members[address.String()]\n\treturn isInGroup\n}",
"func (m *Manager) IsMember(globalID, username string) (ismember bool, err error) {\n\tmatches, err := m.collection.Find(bson.M{\"globalid\": globalID, \"members\": username}).Count()\n\tismember = (matches > 0)\n\treturn\n}",
"func (c *client) IsMember(org, user string) (bool, error) {\n\tc.log(\"IsMember\", org, user)\n\tif org == user {\n\t\t// Make it possible to run a couple of plugins on personal repos.\n\t\treturn true, nil\n\t}\n\tcode, err := c.request(&request{\n\t\tmethod: http.MethodGet,\n\t\tpath: fmt.Sprintf(\"/orgs/%s/members/%s\", org, user),\n\t\torg: org,\n\t\texitCodes: []int{204, 404, 302},\n\t}, nil)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif code == 204 {\n\t\treturn true, nil\n\t} else if code == 404 {\n\t\treturn false, nil\n\t} else if code == 302 {\n\t\treturn false, fmt.Errorf(\"requester is not %s org member\", org)\n\t}\n\t// Should be unreachable.\n\treturn false, fmt.Errorf(\"unexpected status: %d\", code)\n}",
"func (fc *fakeClient) IsMember(org, user string) (bool, error) {\n\tfor _, m := range fc.orgMembers[org] {\n\t\tif m == user {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}",
"func (c *Client) IsMember(org, user string) (bool, error) {\n\tresp, err := c.request(http.MethodGet, fmt.Sprintf(\"%s/orgs/%s/members/%s\", c.base, org, user), nil)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode == 204 {\n\t\treturn true, nil\n\t} else if resp.StatusCode == 404 {\n\t\treturn false, nil\n\t} else if resp.StatusCode == 302 {\n\t\treturn false, fmt.Errorf(\"requester is not %s org member\", org)\n\t}\n\treturn false, fmt.Errorf(\"unexpected status: %s\", resp.Status)\n}",
"func (m *Member) IsMember() bool { return m.Role == MemberRoleMember }",
"func (u *UserInfoLDAPSource) IsgroupmemberorNot(groupname string, username string) (bool, string, error) {\n\n\tAllUsersinGroup, GroupmanagedbyValue, err := u.GetusersofaGroup(groupname)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn false, GroupmanagedbyValue, err\n\t}\n\tfor _, entry := range AllUsersinGroup {\n\t\tif entry == username {\n\t\t\treturn true, GroupmanagedbyValue, nil\n\t\t}\n\t}\n\treturn false, GroupmanagedbyValue, nil\n}",
"func (c *Settings) IsMember(teams []*Team) bool {\n\tfor _, team := range teams {\n\t\tif c.Orgs[team.Login] {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (g *Group) IsMyGroup(u *User) bool {\n\n\tif g.IsAdmin(u) {\n\t\treturn true\n\t}\n\n\tfor _, user := range g.Users {\n\t\tif user == u.Username {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (i *Installation) IsInGroup() bool {\n\treturn i.GroupID != nil\n}",
"func (s *SyncStorage) IsMember(ns string, group string, member interface{}) (bool, error) {\n\tretVal, err := s.getDbBackend(ns).SIsMember(getNsPrefix(ns)+group, member)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn retVal, err\n}",
"func (ctx *TestContext) UserIsAMemberOfTheGroup(user, group string) error {\n\terr := ctx.ThereIsAUserWith(getParameterString(map[string]string{\n\t\t\"group_id\": user,\n\t\t\"user\": user,\n\t}))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn ctx.GroupIsAChildOfTheGroup(user, group)\n}",
"func (htGroup *HTGroup) IsUserInGroup(user string, group string) bool {\n\tgroups := htGroup.GetUserGroups(user)\n\treturn containsGroup(groups, group)\n}",
"func UserInGroup(u *user.User, g *Group) (bool, error) {\n\treturn userInGroup(u, g)\n}",
"func (ctx *TestContext) UserIsAManagerOfTheGroupAndCanWatchItsMembers(user, group string) error {\n\treturn ctx.UserIsAManagerOfTheGroupWith(getParameterString(map[string]string{\n\t\t\"id\": group,\n\t\t\"user_id\": user,\n\t\t\"name\": group,\n\t\t\"can_watch_members\": strTrue,\n\t}))\n}",
"func (app *App) GroupContainsMember(ctx context.Context, groupName, uid string) (bool, error) {\n\tif uid == \"\" {\n\t\treturn false, constants.ErrEmptyUID\n\t}\n\treturn app.groups.GroupContainsMember(ctx, groupName, uid)\n}",
"func (c *EtcdGroupService) GroupContainsMember(ctx context.Context, groupName, uid string) (bool, error) {\n\tctxT, cancel := context.WithTimeout(ctx, transactionTimeout)\n\tdefer cancel()\n\tetcdRes, err := clientInstance.Txn(ctxT).\n\t\tIf(clientv3.Compare(clientv3.CreateRevision(groupKey(groupName)), \">\", 0)).\n\t\tThen(clientv3.OpGet(memberKey(groupName, uid), clientv3.WithCountOnly())).\n\t\tCommit()\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif !etcdRes.Succeeded {\n\t\treturn false, constants.ErrGroupNotFound\n\t}\n\treturn etcdRes.Responses[0].GetResponseRange().GetCount() > 0, nil\n}",
"func (u *User) HasGroup(group string) bool {\r\n\tfor _, g := range u.Groups {\r\n\t\tif g == group {\r\n\t\t\treturn true\r\n\t\t}\r\n\t}\r\n\treturn false\r\n}",
"func (b *Handler) IsMember(pubKeyBLS []byte, round uint64, step uint8, maxSize int) bool {\n\treturn b.Committee(round, step, maxSize).IsMember(pubKeyBLS)\n}",
"func isMember(role string) bool {\n\t// Possible values are \"COLLABORATOR\", \"CONTRIBUTOR\", \"FIRST_TIMER\", \"FIRST_TIME_CONTRIBUTOR\", \"MEMBER\", \"OWNER\", or \"NONE\".\n\tswitch role {\n\tcase \"COLLABORATOR\", \"MEMBER\", \"OWNER\":\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}",
"func (c *Chat) IsGroup() bool {\n\treturn c.Type == \"group\"\n}",
"func (a *Account) IsMember(userID uuid.UUID) bool {\n\tfor _, value := range a.AccountUser {\n\t\tif value.ID == userID {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func (set Set) IsMember(ctx context.Context, member string) (bool, error) {\n\treq := newRequest(\"*3\\r\\n$9\\r\\nSISMEMBER\\r\\n$\")\n\treq.addString2(set.name, member)\n\tres, err := set.c.cmdInt(ctx, req)\n\treturn res == 1, err\n}",
"func (m *Message) IsGroup() bool {\n\treturn m.From.ID != m.Chat.ID\n}",
"func (g *slimGrouping) IsIn(m types.Member, grp types.Group) (bool, error) {\n\tgroups, err := g.GroupsOf(m)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t_, ok := groups[grp]\n\treturn ok, nil\n}",
"func (_AuthContract *AuthContractCaller) IsMember(opts *bind.CallOpts, arg0 common.Address) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _AuthContract.contract.Call(opts, out, \"isMember\", arg0)\n\treturn *ret0, err\n}",
"func (_AuthContract *AuthContractCallerSession) IsMember(arg0 common.Address) (bool, error) {\n\treturn _AuthContract.Contract.IsMember(&_AuthContract.CallOpts, arg0)\n}",
"func (c *Channel) IsMember(cl *Client) bool {\n\tfor _, v := range c.Members {\n\t\tif v == cl {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (s *Server) userHasGroupByName(phone string, grpNameOrAlias string) bool {\n\n\tfor _, v := range s.groups {\n\t\tif v.AdminPhone == phone {\n\t\t\tif v.Name == grpNameOrAlias || v.Alias == grpNameOrAlias {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn false\n}",
"func (t *TeamsService) IsUserMember(teamID, userID int) (bool, *simpleresty.Response, error) {\n\tisMember := false\n\turlStr := t.client.http.RequestURL(\"/team/%d/user/%d\", teamID, userID)\n\n\t// Set the correct authentication header\n\tt.client.setAuthTokenHeader(t.client.accountAccessToken)\n\n\t// Execute the request\n\tresponse, getErr := t.client.http.Get(urlStr, nil, nil)\n\tif getErr != nil {\n\t\treturn false, response, getErr\n\t}\n\n\t// Per API documentation, the response returns a 200 if user belongs to the team\n\tif response.StatusCode == 200 {\n\t\tisMember = true\n\t}\n\n\treturn isMember, response, nil\n}",
"func (o *Permissao) GetIsGroup() bool {\n\tif o == nil || o.IsGroup == nil {\n\t\tvar ret bool\n\t\treturn ret\n\t}\n\treturn *o.IsGroup\n}",
"func (_AuthContract *AuthContractSession) IsMember(arg0 common.Address) (bool, error) {\n\treturn _AuthContract.Contract.IsMember(&_AuthContract.CallOpts, arg0)\n}",
"func VerifyUserInGroup(token string, groupName string) bool {\n\tvar payload string\n\tpayload = strings.Split(token, \".\")[1]\n\tsDec, err := base64.RawStdEncoding.DecodeString(payload)\n\tif err != nil {\n\t\tfmt.Println(\"payload:\", payload)\n\t\tfmt.Println(\"error:\", err)\n\t\treturn false\n\t}\n\tvar result map[string]interface{}\n\tjson.Unmarshal([]byte(sDec), &result)\n\n\tif groups, ok := result[\"cognito:groups\"]; ok {\n\t\tfor _, v := range groups.([]interface{}) {\n\t\t\tif groupName == v.(string) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}",
"func (_ConsortiumManagement *ConsortiumManagementCallerSession) IsMember(addr common.Address) (bool, error) {\n\treturn _ConsortiumManagement.Contract.IsMember(&_ConsortiumManagement.CallOpts, addr)\n}",
"func (o *Permissao) HasIsGroup() bool {\n\tif o != nil && o.IsGroup != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (b *Handler) IsMember(pubKeyBLS []byte, round uint64, step uint8) bool {\n\treturn b.Handler.IsMember(pubKeyBLS, round, step, config.ConsensusSelectionCommitteeSize)\n}",
"func (_ConsortiumManagement *ConsortiumManagementCaller) IsMember(opts *bind.CallOpts, addr common.Address) (bool, error) {\n\tvar out []interface{}\n\terr := _ConsortiumManagement.contract.Call(opts, &out, \"isMember\", addr)\n\n\tif err != nil {\n\t\treturn *new(bool), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(bool)).(*bool)\n\n\treturn out0, err\n\n}",
"func (u *UserRecord) HasGroup(group string) bool {\n\tfor _, g := range u.Groups {\n\t\tif g == group {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func (ctx *TestContext) IAmAManagerOfTheGroupAndCanWatchItsMembers(group string) error {\n\treturn ctx.UserIsAManagerOfTheGroupWith(getParameterString(map[string]string{\n\t\t\"id\": group,\n\t\t\"user_id\": ctx.user,\n\t\t\"name\": group,\n\t\t\"can_watch_members\": strTrue,\n\t}))\n}",
"func (lc *LdapConfig) CheckGroupMembership(username, group string) (bool, error) {\n\tre := regexp.MustCompile(\"CN=([a-zA-Z0-9_-]+?),\")\n\n\terr := lc.ldapsConnect()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tsearchRequest := ldap.NewSearchRequest(\n\t\tlc.BaseDN,\n\t\tldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,\n\t\tfmt.Sprintf(\"(&(objectClass=group)(CN=%s))\", group),\n\t\t[]string{\"member\"},\n\t\tnil,\n\t)\n\n\ts, err := lc.Conn.Search(searchRequest)\n\tif err != nil {\n\t\tlog.Printf(\"Group search failed: %v\", err)\n\t\treturn false, err\n\t}\n\n\tif len(s.Entries) != 1 {\n\t\treturn false, fmt.Errorf(\"Group '%s' does not exist or too many results\", group)\n\t}\n\n\tfor _, entry := range s.Entries {\n\t\tmemberDNs := entry.GetAttributeValues(\"member\")\n\t\tif len(memberDNs) == 0 {\n\t\t\treturn false, fmt.Errorf(\"Group '%s' does not have any members\", group)\n\t\t}\n\t\tfor _, memberDN := range memberDNs {\n\t\t\tmember := re.FindStringSubmatch(memberDN)\n\t\t\tif strings.ToLower(username) == strings.ToLower(member[1]) {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false, fmt.Errorf(\"User '%s' is not member of group '%s'\", username, group)\n}",
"func (g ScimGroup) HasMember(memberID string) bool {\n\tfor _, member := range g.Members {\n\t\tif member.Value == memberID {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (_ConsortiumManagement *ConsortiumManagementSession) IsMember(addr common.Address) (bool, error) {\n\treturn _ConsortiumManagement.Contract.IsMember(&_ConsortiumManagement.CallOpts, addr)\n}",
"func (g *Group) HasMembers() bool {\n\treturn len(g.Allow) != 0 || len(g.Disallow) != 0 || g.CrawlDelay != \"\"\n}",
"func IsMember(next echo.HandlerFunc) echo.HandlerFunc {\n\treturn func(c echo.Context) error {\n\t\tvar (\n\t\t\tres = response.Echo{C: c}\n\t\t)\n\t\tuser := c.Get(\"user\").(*jwt.Token)\n\t\tclaims := user.Claims.(jwt.MapClaims)\n\t\trole := claims[\"role\"].(bool)\n\t\tif !role {\n\t\t\tres.Response(http.StatusUnauthorized, \"Unauthorized\", nil)\n\t\t\treturn echo.ErrUnauthorized\n\t\t}\n\t\tres.Response(http.StatusOK, \"\", nil)\n\t\treturn next(c)\n\t}\n}",
"func (g *Group) Match(o Owner) bool {\n\tif g.Equal(o) {\n\t\treturn true\n\t}\n\tif g.GetOwner().Match(o) {\n\t\treturn true\n\t}\n\tfor _, m := range g.GetPerm(\"%member%\") {\n\t\tif m.Match(o) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (u *User) IsPublicMember(orgId int64) bool {\n\treturn IsPublicMembership(orgId, u.ID)\n}",
"func (s *Session) IsMemberAdmin() bool {\n\tif s.data.IsGenesis {\n\t\treturn true\n\t}\n\t// TODO\n\treturn false\n}",
"func IsGroupIsUserGroupOrWhitelisted(name string, whitelist ...string) bool {\n\t// check whitelist of groups\n\tfor _, el := range whitelist {\n\t\tif el == name {\n\t\t\treturn true\n\t\t}\n\t}\n\n\tgroup, err := user.LookupGroup(name)\n\tif err != nil {\n\t\treturn false // fail on lookup error\n\t}\n\n\tgid, err := strconv.ParseUint(group.Gid, 10, 32)\n\tif err != nil {\n\t\treturn false // fail on parse error\n\t}\n\n\tminGID, maxGiD := ReadUserGIDRange(LoginDefsPath)\n\n\tif gid < minGID {\n\t\treturn false // group not in lower range\n\t}\n\n\tif gid > maxGiD {\n\t\treturn false // group not in upper range\n\t}\n\n\treturn true\n}",
"func (g *Group) IsAdmin(u *User) bool {\n\n\tif u.IsAdmin() {\n\t\treturn true\n\t}\n\n\tfor _, admin := range g.Admins {\n\t\tif admin == u.Username {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (u *User) IsUser() bool {\n\treturn u.UserGroupID == USER\n}",
"func (m *Member) IsGuest() bool { return m.Role == MemberRoleGuest }",
"func (g *Godis) SIsMember(key, member string) bool {\n\treturn g.cmdInt(\"SISMEMBER\", key, member) == 1\n}",
"func (u *User) In(g *Group) (bool, error) {\n\treturn userInGroup(u, g)\n}",
"func UserInGroup(e *Engine, userId int64, groupId int64) (bool, error) {\n\tres, _, err := e.RawSelect(Filter(\"autoscope_user_groups\", map[string]interface{}{\n\t\t\"user_id\": userId,\n\t\t\"group_id\": groupId,\n\t}))\n\tif err != nil { return false, err }\n\treturn res.Next(), nil\n}",
"func (ctx *TestContext) IsAMemberOfTheGroup(childGroupName, parentGroupName string) {\n\tctx.addGroupGroup(parentGroupName, childGroupName)\n}",
"func IsAdmin(username, groupName string) (bool, error) {\n\tgroup, err := GetGroup(groupName)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"getGroup(%s) err: %s\", groupName, err)\n\t}\n\n\taccount, err := GetAccount(username)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"getAccount(%s) err: %s\", username, err)\n\t}\n\n\tselector := Selector{\n\t\t\"sourceId\": group.Id,\n\t\t\"sourceName\": \"JGroup\",\n\t\t\"targetId\": account.Id,\n\t\t\"targetName\": \"JAccount\",\n\t\t\"as\": \"admin\",\n\t}\n\n\tcount, err := RelationshipCount(selector)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"checkAdminRelationship err: %s\", err)\n\t}\n\n\treturn count == 1, nil\n}",
"func (a *UserGroupAuthZBasic) CanGetGroup(ctx context.Context, curUser model.User, gid int) error {\n\treturn nil\n}",
"func (c *collection) hasGroup(g string) bool {\n\treturn c.has(c.groups, g)\n}",
"func (r *marathonClient) HasGroup(name string) (bool, error) {\n\turi := fmt.Sprintf(\"%s/%s\", marathonAPIGroups, trimRootPath(name))\n\terr := r.apiCall(\"GET\", uri, \"\", nil)\n\tif err != nil {\n\t\tif apiErr, ok := err.(*APIError); ok && apiErr.ErrCode == ErrCodeNotFound {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, err\n\t}\n\treturn true, nil\n}",
"func (c Customer) ContainsGroup(groupName string) bool {\n\tfor _, g := range c.Groups {\n\t\tif g == groupName {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (d UserData) HasGroups() bool {\n\treturn d.ModelData.Has(models.NewFieldName(\"Groups\", \"group_ids\"))\n}",
"func (m *Group) GetMemberOf()([]DirectoryObjectable) {\n return m.memberOf\n}",
"func (w *Widget) IsMemberChannel(channelID string) bool {\n\tfor activeID := range w.activeChannels {\n\t\tif channelID == activeID {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (p *Plugin) HasGroupExpression(user *security.User, resourceTenant string, groupExpression grp.GroupExpression) bool {\n\n\t// no resource tenant is not ok, there can be no default on this layer\n\tif resourceTenant == \"\" {\n\t\treturn false\n\t}\n\n\t// what we have now is the slice of groups that the user has\n\t// (including \"on behalf\", with concrete cluster-tenant or wildcard \"all\")\n\t// \"on behalf\"-groups do not have cluster-tenant because it is already evaluated for the concrete tenant to act\n\n\tfor i := range user.Groups {\n\t\tgrpCtx, err := p.grpr.ParseGroupName(string(user.Groups[i]))\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// check if group maches for any of the tenants\n\t\tif resourceTenant == grp.Any {\n\t\t\tif groupExpression.Matches(*grpCtx) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\t// resource belongs to own tenant\n\t\tif strings.EqualFold(user.Tenant, resourceTenant) && grpCtx.OnBehalfTenant == \"\" {\n\t\t\tif groupExpression.Matches(*grpCtx) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\t// resource belongs to other tenant, access \"on behalf\": if group is for resource-tenant or for \"all\" then check\n\t\tif strings.EqualFold(grpCtx.OnBehalfTenant, resourceTenant) || grpCtx.OnBehalfTenant == grp.All {\n\t\t\tif groupExpression.Matches(*grpCtx) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t}\n\n\treturn false\n\n}",
"func (i UserGroupAccess) IsAUserGroupAccess() bool {\n\tfor _, v := range _UserGroupAccessValues {\n\t\tif i == v {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func HasGroup(r *http.Request, searchGroups ...string) bool {\n\tgroupMap := r.Context().Value(GroupCtxKey).(map[string]bool)\n\n\tfor _, searchGroup := range searchGroups {\n\t\tif _, ok := groupMap[searchGroup]; ok {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func (ctx *TestContext) ThereIsAGroup(group string) error {\n\treturn ctx.ThereIsAGroupWith(getParameterString(map[string]string{\n\t\t\"id\": group,\n\t\t\"name\": group,\n\t}))\n}",
"func MemberHasPermission(s *discordgo.Session, guildID string, userID string, permission int64) (bool, error) {\n\tmember, err := s.State.Member(guildID, userID)\n\tif err != nil {\n\t\tif member, err = s.GuildMember(guildID, userID); err != nil {\n\t\t\treturn false, err\n\t\t}\n\t}\n\tg, err := s.Guild(guildID)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif g.OwnerID == userID {\n\t\treturn true, nil\n\t}\n\t// Iterate through the role IDs stored in member.Roles\n\t// to check permissions\n\tfor _, roleID := range member.Roles {\n\t\trole, err := s.State.Role(guildID, roleID)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif role.Permissions&permission != 0 {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\treturn false, nil\n}",
"func (s UserSet) HasGroup(groupID string) bool {\n\tres := s.Collection().Call(\"HasGroup\", groupID)\n\tresTyped, _ := res.(bool)\n\treturn resTyped\n}",
"func isAllowedUser(request admissionctl.Request) bool {\n\tif utils.SliceContains(request.UserInfo.Username, allowedUsers) {\n\t\treturn true\n\t}\n\n\tfor _, group := range sreAdminGroups {\n\t\tif utils.SliceContains(group, request.UserInfo.Groups) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func (ctx *TestContext) ICanWatchGroup(groupName string) error {\n\treturn ctx.UserIsAManagerOfTheGroupWith(getParameterString(map[string]string{\n\t\t\"id\": groupName,\n\t\t\"user_id\": ctx.user,\n\t\t\"name\": groupName,\n\t\t\"can_watch_members\": strTrue,\n\t}))\n}",
"func (m *User) GetMemberOf()([]DirectoryObjectable) {\n return m.memberOf\n}",
"func IsGroupExist(uid int64, name string) (bool, error) {\n\tif len(name) == 0 {\n\t\treturn false, nil\n\t}\n\treturn x.Where(\"id!=?\", uid).Get(&Group{Name: name})\n}",
"func (o *Permissao) SetIsGroup(v bool) {\n\to.IsGroup = &v\n}",
"func (o *User) GetMemberOfOk() ([]Group, bool) {\n\tif o == nil || o.MemberOf == nil {\n\t\treturn nil, false\n\t}\n\treturn o.MemberOf, true\n}",
"func isInACL(c context.Context, acl *api.PrefixMetadata_ACL) (bool, error) {\n\tcaller := string(auth.CurrentIdentity(c)) // e.g. \"user:abc@example.com\"\n\n\tvar groups []string\n\tfor _, p := range acl.Principals {\n\t\tif p == caller {\n\t\t\treturn true, nil // the caller was specified in ACLs explicitly\n\t\t}\n\t\tif s := strings.SplitN(p, \":\", 2); len(s) == 2 && s[0] == \"group\" {\n\t\t\tgroups = append(groups, s[1])\n\t\t}\n\t}\n\n\tyes, err := auth.IsMember(c, groups...)\n\tif err != nil {\n\t\treturn false, errors.Annotate(err, \"failed to check group memberships when checking ACLs\").Err()\n\t}\n\treturn yes, nil\n}",
"func MemberHasPermission(s *discordgo.Session, guildID string, userID string, perm int) bool {\n\t// Get the guild member\n\tm, err := s.State.Member(guildID, userID)\n\tif err != nil {\n\t\tif m, err = s.GuildMember(guildID, userID); err != nil {\n\t\t\treturn false\n\t\t}\n\t}\n\t// Iterate through all roles to check permissions\n\tfor _, roleID := range m.Roles {\n\t\t// Get the role\n\t\trole, err := s.State.Role(guildID, roleID)\n\t\t// Make sure the role exists\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\t// Check if the role's permissions contains the sought after permission\n\t\tif role.Permissions&perm != 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (e PolicyEntity) IsGroup() bool {\n\treturn e.Type == entityTypeGroup\n}",
"func (sid *SteamID) IsGroupChat() bool {\n\treturn !!(sid.Type == TypeChat && (sid.Instance&chatInstanceFlagClan) != 0)\n}",
"func (s *Session) IsServiceMember() bool {\n\treturn s.ServiceMemberID != uuid.Nil\n}",
"func (o *User) HasMemberOf() bool {\n\tif o != nil && o.MemberOf != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (o *User) HasMemberOf() bool {\n\tif o != nil && o.MemberOf != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func IsLoggedIn(r *http.Request) (bool, error) {\n\tsession, err := getSession(r)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tt := session.Values[\"accessToken\"]\n\tif t == nil {\n\t\treturn false, nil\n\t}\n\tstoredToken, ok := t.(string)\n\tif !ok {\n\t\treturn false, fmt.Errorf(\"bad type of %q value in session: %v\", \"accessToken\", err)\n\t}\n\tgp := session.Values[\"gplusID\"]\n\tif t == nil {\n\t\treturn false, nil\n\t}\n\tgplusId, ok := gp.(string)\n\tif !ok {\n\t\treturn false, fmt.Errorf(\"bad type of %q value in session: %v\", \"gplusID\", err)\n\t}\n\treturn storedToken != \"\" && isAllowed(gplusId), nil\n}",
"func MemberHasPermission(s *discordgo.Session, guildID string, userID string, permission int) (bool, error) {\n\tif permission <= 0 {\n\t\treturn true, nil\n\t}\n\n\t// https://github.com/bwmarrin/discordgo/wiki/FAQ#determining-if-a-role-has-a-permission\n\tmember, err := s.State.Member(guildID, userID)\n\tif err != nil {\n\t\tif member, err = s.GuildMember(guildID, userID); err != nil {\n\t\t\treturn false, err\n\t\t}\n\t}\n\n\t// Iterate through the role IDs stored in member.Roles\n\t// to check permissions\n\tfor _, roleID := range member.Roles {\n\t\trole, err := s.State.Role(guildID, roleID)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif role.Permissions&permission != 0 {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\treturn false, nil\n}",
"func (ctx *TestContext) IAmAManagerOfTheGroup(group string) error {\n\treturn ctx.UserIsAManagerOfTheGroupWith(getParameterString(map[string]string{\n\t\t\"id\": group,\n\t\t\"user_id\": ctx.user,\n\t\t\"name\": group,\n\t\t\"can_watch_members\": \"false\",\n\t}))\n}",
"func (d *DB) IsAdmin(uuid string) (bool, error) {\n\tcnt := 0\n\tr := d.db.QueryRow(\"SELECT COUNT(*) FROM teammember INNER JOIN username ON teamuuid = uuid WHERE useruuid = $1 AND username = $2\", uuid, teamNameAdmin)\n\terr := r.Scan(&cnt)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn cnt == 1, nil\n}",
"func (u *User) IsAdmin() bool {\n\treturn u.UserGroupID == ADMIN\n}",
"func (q *QueryGVR) containsGroup(groups []*metav1.APIGroup, group string) bool {\n\tfor _, grp := range groups {\n\t\tif strings.EqualFold(grp.Name, group) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (app *App) GroupMembers(ctx context.Context, groupName string) ([]string, error) {\n\treturn app.groups.GroupMembers(ctx, groupName)\n}",
"func (db *MongoDBRooms) HasMember(memberID, roomID string) (bool, error) {\n\n\thasMember := false\n\n\tsession, err := mgo.Dial(db.HOST.URI)\n\tif err != nil {\n\t\treturn hasMember, errors.New(\"error dialing the database\")\n\t}\n\tdefer session.Close()\n\n\tvar room RoomStruct\n\n\t// search query for room\n\tfind := bson.M{\"_id\": bson.ObjectIdHex(roomID)}\n\terr = session.DB(db.HOST.NAME).C(db.COLLECTION).Find(find).One(&room)\n\n\tfor _, _memberID := range room.MemberIDs {\n\t\tif _memberID == memberID {\n\t\t\thasMember = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif err != nil {\n\t\treturn hasMember, errors.New(\"error finding the document\")\n\t}\n\treturn hasMember, nil\n}",
"func (dag *DdgAdminGroup) Exists() bool { //ddg_admin_group\n\treturn dag._exists\n}",
"func (p *G1Affine) IsInSubGroup() bool {\n\tvar _p G1Jac\n\t_p.FromAffine(p)\n\treturn _p.IsInSubGroup()\n}",
"func (c Chat) IsGroupChat() bool {\n\treturn c.Type != \"private\"\n}",
"func (o *Permissao) GetIsGroupOk() (*bool, bool) {\n\tif o == nil || o.IsGroup == nil {\n\t\treturn nil, false\n\t}\n\treturn o.IsGroup, true\n}",
"func HaveIAskedMember(s *discordgo.Session, member string) bool {\n\tc, err := s.UserChannelCreate(member)\n\tif err != nil {\n\t\treturn false\n\t}\n\tmessages, err := s.ChannelMessages(c.ID, 10, \"\", \"\", \"\") // reading 10 messages to overcome possible user-sent messages\n\tif err != nil {\n\t\treturn false\n\t}\n\tfor _, message := range messages {\n\t\tif message.Author.Bot {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (c *client) TeamHasMember(org string, teamID int, memberLogin string) (bool, error) {\n\tdurationLogger := c.log(\"TeamHasMember\", teamID, memberLogin)\n\tdefer durationLogger()\n\n\tprojectMaintainers, err := c.ListTeamMembers(org, teamID, RoleAll)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tfor _, person := range projectMaintainers {\n\t\tif NormLogin(person.Login) == NormLogin(memberLogin) {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}",
"func (ctx *TestContext) UserIsAManagerOfTheGroupWith(parameters string) error {\n\terr := ctx.ThereIsAGroupWith(parameters)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// We create a parent group of which the user is the manager.\n\tgroup := ctx.getParameterMap(parameters)\n\n\tcanWatchMembers := \"0\"\n\tcanGrantGroupAccess := \"0\"\n\twatchedGroupName := group[\"user_id\"] + \" manages \" + referenceToName(group[\"name\"])\n\n\tif group[\"can_watch_members\"] == strTrue {\n\t\tcanWatchMembers = \"1\"\n\t\twatchedGroupName += \" with can_watch_members\"\n\t}\n\tif group[\"can_grant_group_access\"] == strTrue {\n\t\tcanGrantGroupAccess = \"1\"\n\t\twatchedGroupName += \" with can_grant_group_access\"\n\t}\n\n\terr = ctx.ThereIsAGroupWith(getParameterString(map[string]string{\n\t\t\"id\": watchedGroupName,\n\t\t\"name\": watchedGroupName,\n\t}))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tctx.IsAMemberOfTheGroup(group[\"id\"], watchedGroupName)\n\n\tctx.addGroupManager(group[\"user_id\"], watchedGroupName, canWatchMembers, canGrantGroupAccess)\n\n\treturn nil\n}"
] | [
"0.76126474",
"0.7386575",
"0.73728687",
"0.7257297",
"0.7106678",
"0.6925676",
"0.6812919",
"0.67271173",
"0.67248964",
"0.66902804",
"0.6681328",
"0.6652703",
"0.6634036",
"0.6619776",
"0.657318",
"0.64903814",
"0.64772445",
"0.64528537",
"0.63925785",
"0.6369088",
"0.6359759",
"0.63573694",
"0.63542205",
"0.6330751",
"0.62784445",
"0.62735087",
"0.6266872",
"0.624544",
"0.62349355",
"0.6209475",
"0.620026",
"0.61901104",
"0.61734724",
"0.61572963",
"0.6152183",
"0.6151992",
"0.6113642",
"0.60998946",
"0.60814357",
"0.6066931",
"0.605734",
"0.60565954",
"0.6027011",
"0.6017587",
"0.598068",
"0.5940163",
"0.5925166",
"0.59237367",
"0.5874775",
"0.58684903",
"0.5844566",
"0.5839029",
"0.582963",
"0.58260256",
"0.57648593",
"0.57309186",
"0.5712062",
"0.5699605",
"0.5683727",
"0.5671324",
"0.563709",
"0.55956984",
"0.557315",
"0.5566756",
"0.55630046",
"0.55621207",
"0.5552294",
"0.55197036",
"0.55150825",
"0.5512918",
"0.54987144",
"0.5485967",
"0.5485232",
"0.5459601",
"0.5451298",
"0.5437292",
"0.542229",
"0.5408021",
"0.5398786",
"0.5390968",
"0.5381086",
"0.53694266",
"0.5366776",
"0.5361372",
"0.5361372",
"0.53600496",
"0.5348922",
"0.5348099",
"0.53216136",
"0.5321024",
"0.529885",
"0.5284517",
"0.528294",
"0.528091",
"0.52618986",
"0.52573484",
"0.52573097",
"0.5247586",
"0.5223666",
"0.52061695"
] | 0.7899438 | 0 |
DNSName extracts DNS name from host:port string. | func DNSName(hostport string) (string, error) {
host, err := Host(hostport)
if err != nil {
return "", trace.Wrap(err)
}
if ip := net.ParseIP(host); len(ip) != 0 {
return "", trace.BadParameter("%v is an IP address", host)
}
return host, nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func GetHostName(hostAddr string) string {\n\treturn strings.Split(hostAddr, base.UrlPortNumberDelimiter)[0]\n}",
"func GetHostname(addr string) string {\n\treturn strings.Split(addr, \":\")[0]\n}",
"func hostname(hostport string) (string, error) {\n\thost, _, err := net.SplitHostPort(hostport)\n\treturn host, err\n}",
"func ExtractHostName(urlStr string) (HostNames, error) {\n\thn := &HostNames{\n\t\tURL: \"\",\n\t\tHostName: \"\",\n\t}\n\n\tu, err := url.Parse(urlStr)\n\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn *hn, err\n\t}\n\n\tisSchema, err := IsSchema(urlStr)\n\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn *hn, err\n\t}\n\n\tif u.Hostname() != \"\" && true == isSchema {\n\t\thn.URL = u.Scheme + \"://\" + u.Hostname()\n\t\thn.HostName = u.Hostname()\n\t}\n\n\treturn *hn, nil\n}",
"func DnsDecoder(urlStr string) (*string, *string, error) {\n\tu, err := url.Parse(urlStr)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\thostTmp := u.Host\n\tIP := Dns(u.Host)\n\tif IP != nil {\n\t\tu.Host = IP.String()\n\t\turlStr = u.String()\n\t\treturn &urlStr, &hostTmp, nil\n\t}\n\treturn nil, nil, fmt.Errorf(\"dnsDecoder fail\")\n}",
"func parseHost(addr string) string {\n\tvar (\n\t\thost, port string\n\t\tdefaultAssigned bool\n\t)\n\n\tv := strings.Split(addr, \":\")\n\n\tswitch len(v) {\n\tcase 2:\n\t\thost = v[0]\n\t\tport = v[1]\n\n\t\tif host == \"\" {\n\t\t\thost = _DEFAULT_HOST\n\t\t\tdefaultAssigned = true\n\t\t}\n\n\t\tif port == \"\" {\n\t\t\tport = _DEFAULT_PORT\n\t\t\tdefaultAssigned = true\n\t\t}\n\n\t\tif defaultAssigned == false {\n\t\t\treturn addr // addr is already in required format\n\t\t}\n\t\tbreak\n\n\tcase 1:\n\t\thost = v[0]\n\t\tif host == \"\" {\n\t\t\thost = _DEFAULT_HOST\n\t\t}\n\t\tport = _DEFAULT_PORT\n\tcase 0:\n\t\tfallthrough\n\tdefault:\n\t\thost = _DEFAULT_HOST\n\t\tport = _DEFAULT_PORT\n\t\tbreak\n\t}\n\treturn strings.Join([]string{host, port}, \":\")\n}",
"func addrToHost(addr string) string {\n\treturn strings.Split(addr, \":\")[0]\n}",
"func HostNameandPort(node string) (host, port string, ipv6 bool, err error) {\n\ttokens := []string{}\n\n\t// Set _IPv6 based on input address\n\tipv6, err = IsIPv6(node)\n\n\tif err != nil {\n\t\treturn \"\", \"\", false, err\n\t}\n\n\terr = nil\n\t// For IPv6\n\tif ipv6 {\n\t\t// Then the url should be of the form [::1]:8091\n\t\ttokens = strings.Split(node, \"]:\")\n\t\thost = strings.Replace(tokens[0], \"[\", \"\", 1)\n\n\t} else {\n\t\t// For IPv4\n\t\ttokens = strings.Split(node, \":\")\n\t\thost = tokens[0]\n\t}\n\n\tif len(tokens) == 2 {\n\t\tport = tokens[1]\n\t} else {\n\t\tport = \"\"\n\t}\n\n\treturn\n}",
"func parseHostPort(str string) (string, string) {\n\tvar (\n\t\thost string\n\t\tport string\n\n\t\ti = strings.Index(str, \":\")\n\t)\n\tif i == -1 {\n\t\treturn str, \"\"\n\t}\n\n\thost = str[:i]\n\tport = str[i+1:]\n\n\treturn host, port\n}",
"func Hostname() (string, error)",
"func hostnameInSNI(name string) string {\n\thost := name\n\tif len(host) > 0 && host[0] == '[' && host[len(host)-1] == ']' {\n\t\thost = host[1 : len(host)-1]\n\t}\n\tif i := strings.LastIndex(host, \"%\"); i > 0 {\n\t\thost = host[:i]\n\t}\n\tif net.ParseIP(host) != nil {\n\t\treturn \"\"\n\t}\n\tfor len(name) > 0 && name[len(name)-1] == '.' {\n\t\tname = name[:len(name)-1]\n\t}\n\treturn name\n}",
"func getHostNameAndPort(hostInfo string) (string, int, error) {\n\thost := strings.SplitN(hostInfo, \":\", -1)\n\tif len(host) != 2 {\n\t\treturn \"\", 0, fmt.Errorf(\"expected hostname:port, got %s\", host)\n\t}\n\n\tport, err := strconv.Atoi(host[1])\n\tif err != nil {\n\t\treturn \"\", 0, fmt.Errorf(\"invalid port number, got %s\", host[1])\n\t}\n\n\treturn host[0], port, nil\n}",
"func ExtractHost(address string) string {\n\thost, _, _ := net.SplitHostPort(address)\n\tif host == \"\" {\n\t\treturn \"localhost\"\n\t}\n\treturn host\n}",
"func GetHostAddr(hostName string, port uint16) string {\n\treturn hostName + base.UrlPortNumberDelimiter + strconv.FormatInt(int64(port), base.ParseIntBase)\n}",
"func (internet Internet) DomainName(v reflect.Value) (interface{}, error) {\n\treturn internet.domainName()\n}",
"func ParseHost(s string) (*Host, error) {\n\tisValidHost := func(host string) bool {\n\t\tif host == \"\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif ip := net.ParseIP(host); ip != nil {\n\t\t\treturn true\n\t\t}\n\n\t\t// host is not a valid IPv4 or IPv6 address\n\t\t// host may be a hostname\n\t\t// refer https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names\n\t\t// why checks are done like below\n\t\tif len(host) < 1 || len(host) > 253 {\n\t\t\treturn false\n\t\t}\n\n\t\tfor _, label := range strings.Split(host, \".\") {\n\t\t\tif len(label) < 1 || len(label) > 63 {\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\tif !hostLabelRegexp.MatchString(label) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}\n\n\tvar port Port\n\tvar isPortSet bool\n\thost, portStr, err := net.SplitHostPort(s)\n\tif err != nil {\n\t\tif !strings.Contains(err.Error(), \"missing port in address\") {\n\t\t\treturn nil, err\n\t\t}\n\t\thost = s\n\t\tportStr = \"\"\n\t} else {\n\t\tif port, err = ParsePort(portStr); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tisPortSet = true\n\t}\n\n\tif host != \"\" {\n\t\thost, err = trimIPv6(host)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// IPv6 requires a link-local address on every network interface.\n\t// `%interface` should be preserved.\n\ttrimmedHost := host\n\n\tif i := strings.LastIndex(trimmedHost, \"%\"); i > -1 {\n\t\t// `%interface` can be skipped for validity check though.\n\t\ttrimmedHost = trimmedHost[:i]\n\t}\n\n\tif !isValidHost(trimmedHost) {\n\t\treturn nil, errors.New(\"invalid hostname\")\n\t}\n\n\treturn &Host{\n\t\tName: host,\n\t\tPort: port,\n\t\tIsPortSet: isPortSet,\n\t}, nil\n}",
"func ParseDSN(name string) apmsql.DSNInfo {\n\tif pos := strings.IndexRune(name, '?'); pos >= 0 {\n\t\tname = name[:pos]\n\t}\n\treturn apmsql.DSNInfo{\n\t\tDatabase: name,\n\t}\n}",
"func parseHost(host string) string {\n\trealHost, _, _ := net.SplitHostPort(host)\n\tif realHost != \"\" {\n\t\treturn realHost\n\t}\n\treturn host\n}",
"func parseHostname(hostname string) (string, error) {\n\t// TODO does the hostname even need to be parsed?\n\treturn hostname, nil\n}",
"func parseAddr(addr string) (string, string) {\n\tparsed := strings.SplitN(addr, \":\", 2)\n\treturn parsed[0], parsed[1]\n}",
"func HostWithoutPort(s string) string {\n\tif strings.Contains(s, \":\") {\n\t\treturn strings.Split(s, \":\")[0]\n\t}\n\treturn s\n}",
"func (p *Printer) Hostname(ip, port string, ns, pod, svc string, names []string) (host string) {\n\thost = ip\n\tif p.opts.enableIPTranslation {\n\t\tif pod != \"\" {\n\t\t\t// path.Join omits the slash if ns is empty\n\t\t\thost = path.Join(ns, pod)\n\t\t} else if svc != \"\" {\n\t\t\thost = path.Join(ns, svc)\n\t\t} else if len(names) != 0 {\n\t\t\thost = strings.Join(names, \",\")\n\t\t}\n\t}\n\n\tif port != \"\" && port != \"0\" {\n\t\treturn net.JoinHostPort(host, port)\n\t}\n\n\treturn host\n}",
"func DnsDomain(s string) string {\n\tl := strings.Split(s, \"/\")\n\t// start with 1, to strip /skydns\n\tfor i, j := 1, len(l)-1; i < j; i, j = i+1, j-1 {\n\t\tl[i], l[j] = l[j], l[i]\n\t}\n\treturn dns.Fqdn(strings.Join(l[2:len(l)-1], \".\"))\n}",
"func Fqdn(hostname string) string {\n\taddrs, err := net.LookupIP(hostname)\n\tif err != nil {\n\t\treturn hostname\n\t}\n\n\tfor _, addr := range addrs {\n\t\tif ipv4 := addr.To4(); ipv4 != nil {\n\t\t\tip, err := ipv4.MarshalText()\n\t\t\tif err != nil {\n\t\t\t\treturn hostname\n\t\t\t}\n\t\t\thosts, err := net.LookupAddr(string(ip))\n\t\t\tif err != nil || len(hosts) == 0 {\n\t\t\t\treturn hostname\n\t\t\t}\n\t\t\treturn hosts[0]\n\t\t}\n\t}\n\treturn hostname\n}",
"func buildHostName(subDomainPrefix string, subDomainSuffix string, subDomain string, domain string) string {\n\treturn joinNonEmpty([]interface{}{joinNonEmpty([]interface{}{subDomainPrefix, subDomain, subDomainSuffix}, \"-\"), domain}, \".\")\n}",
"func GetNameServer() string {\n\t// run command: netsh interface ip show dnsservers\n\tif out, _, err := util.RunAndWait(exec.Command(\"netsh\",\n\t\t\"interface\",\n\t\t\"ip\",\n\t\t\"show\",\n\t\t\"dnsservers\",\n\t)); err != nil {\n\t\tlog.Error().Msgf(\"Failed to get dns server\")\n\t\treturn \"\"\n\t} else {\n\t\tr, _ := regexp.Compile(\"[0-9]+\\\\.[0-9]+\\\\.[0-9]+\\\\.[0-9]+\")\n\t\treturn r.FindString(out)\n\t}\n}",
"func SplitHostPort(hostport string) (host, port string, err error) {\n\tj, k := 0, 0\n\n\t// The port starts after the last colon.\n\ti := last(hostport, ':')\n\tif i < 0 {\n\t\tgoto missingPort\n\t}\n\n\tif hostport[0] == '[' {\n\t\t// Expect the first ']' just before the last ':'.\n\t\tend := byteIndex(hostport, ']')\n\t\tif end < 0 {\n\t\t\terr = &AddrError{\"missing ']' in address\", hostport}\n\t\t\treturn\n\t\t}\n\t\tswitch end + 1 {\n\t\tcase len(hostport):\n\t\t\t// There can't be a ':' behind the ']' now.\n\t\t\tgoto missingPort\n\t\tcase i:\n\t\t\t// The expected result.\n\t\tdefault:\n\t\t\t// Either ']' isn't followed by a colon, or it is\n\t\t\t// followed by a colon that is not the last one.\n\t\t\tif hostport[end+1] == ':' {\n\t\t\t\tgoto tooManyColons\n\t\t\t}\n\t\t\tgoto missingPort\n\t\t}\n\t\thost = hostport[1:end]\n\t\tj, k = 1, end+1 // there can't be a '[' resp. ']' before these positions\n\t} else {\n\t\thost = hostport[:i]\n\t\tif byteIndex(host, ':') >= 0 {\n\t\t\tgoto tooManyColons\n\t\t}\n\t\tif byteIndex(host, '%') >= 0 {\n\t\t\tgoto missingBrackets\n\t\t}\n\t}\n\tif byteIndex(hostport[j:], '[') >= 0 {\n\t\terr = &AddrError{\"unexpected '[' in address\", hostport}\n\t\treturn\n\t}\n\tif byteIndex(hostport[k:], ']') >= 0 {\n\t\terr = &AddrError{\"unexpected ']' in address\", hostport}\n\t\treturn\n\t}\n\n\tport = hostport[i+1:]\n\treturn\n\nmissingPort:\n\terr = &AddrError{\"missing port in address\", hostport}\n\treturn\n\ntooManyColons:\n\terr = &AddrError{\"too many colons in address\", hostport}\n\treturn\n\nmissingBrackets:\n\terr = &AddrError{\"missing brackets in address\", hostport}\n\treturn\n}",
"func DomainName(opts ...options.OptionFunc) string {\n\treturn singleFakeData(DomainNameTag, func() interface{} {\n\t\topt := options.BuildOptions(opts)\n\t\ti := Internet{fakerOption: *opt}\n\t\td, err := i.domainName()\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t\treturn d\n\t}, opts...).(string)\n}",
"func HostPort(urlStr string) (string, error) {\n\t// TODO: rename this function to URLHostPort instead, like\n\t// ListenHostPort below.\n\tu, err := url.Parse(urlStr)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"could not parse %q as a url: %v\", urlStr, err)\n\t}\n\tif u.Scheme == \"\" {\n\t\treturn \"\", fmt.Errorf(\"url %q has no scheme\", urlStr)\n\t}\n\thostPort := u.Host\n\tif hostPort == \"\" || strings.HasPrefix(hostPort, \":\") {\n\t\treturn \"\", fmt.Errorf(\"url %q has no host\", urlStr)\n\t}\n\tidx := strings.Index(hostPort, \"]\")\n\tif idx == -1 {\n\t\tidx = 0\n\t}\n\tif !strings.Contains(hostPort[idx:], \":\") {\n\t\tif u.Scheme == \"https\" {\n\t\t\thostPort += \":443\"\n\t\t} else {\n\t\t\thostPort += \":80\"\n\t\t}\n\t}\n\treturn hostPort, nil\n}",
"func DNSResolve(host string) string {\n\taddress, err := net.ResolveIPAddr(\"ip\", host)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn address.String()\n}",
"func getHost(host_url string) (host string, err error) {\n\tu, err := url.Parse(host_url)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn strings.Split(u.Host, \":\")[0], nil\n}",
"func getHostName(staxName string, nodeName string) string {\n\treturn fmt.Sprintf(\"%s-%s\", staxName, nodeName)\n}",
"func LookupDNSHostCNAME(domain string) string {\n\tif nsRecord, err := net.LookupCNAME(domain); err == nil {\n\t\treturn nsRecord\n\t}\n\treturn \"\"\n}",
"func main() {\n\tflag.StringVar(&addr, \"addr\", \"127.0.0.1\", \"host address to lookup\")\n\tflag.Parse()\n\n\tname, err := net.LookupAddr(addr)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tfmt.Println(name)\n}",
"func Host(hostname string) (string, error) {\n\tif hostname == \"\" {\n\t\treturn \"\", trace.BadParameter(\"missing parameter hostname\")\n\t}\n\t// if this is IPv4 or V6, return as is\n\tif ip := net.ParseIP(hostname); len(ip) != 0 {\n\t\treturn hostname, nil\n\t}\n\t// has no indication of port, return, note that\n\t// it will not break ipv6 as it always has at least one colon\n\tif !strings.Contains(hostname, \":\") {\n\t\treturn hostname, nil\n\t}\n\thost, _, err := SplitHostPort(hostname)\n\tif err != nil {\n\t\treturn \"\", trace.Wrap(err)\n\t}\n\treturn host, nil\n}",
"func (o DnsDomainOutput) DomainName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *DnsDomain) pulumi.StringOutput { return v.DomainName }).(pulumi.StringOutput)\n}",
"func withoutPort(addr string) string {\n\tif h, _, err := net.SplitHostPort(addr); err == nil {\n\t\treturn h\n\t}\n\treturn addr\n}",
"func findMDA(host string) (string, error) {\n\tresults, err := net.LookupMX(host)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(results) == 0 {\n\t\treturn \"\", errors.New(\"No MX records found\")\n\t}\n\n\t// todo: support for multiple MX records\n\th := results[0].Host\n\treturn h[:len(h)-1] + \":25\", nil\n}",
"func Resolve(q string) (ip net.IP, port uint16, target string, err error) {\n c := new(dns.Client)\n m := new(dns.Msg)\n m.SetQuestion(dns.Fqdn(q), dns.TypeSRV)\n m.RecursionDesired = true\n\n dns_server := \"127.0.0.1:8600\"\n if len(os.Args) > 1 {\n dns_server = os.Args[1]\n }\n fmt.Printf(\"Using dns server: %v\\n\", dns_server)\n\n r, _, err := c.Exchange(m, dns_server)\n if r == nil {\n log.Fatalf(\"error: %s\\n\", err.Error())\n }\n\n if r.Rcode != dns.RcodeSuccess {\n log.Fatalf(\"dns lookup failed\\n\")\n }\n\n for _, srv := range r.Answer {\n port = srv.(*dns.SRV).Port\n target = srv.(*dns.SRV).Target\n\n fmt.Printf(\"%v %v\\n\", port, target)\n\n for _, a := range r.Extra {\n if target != a.(*dns.A).Hdr.Name {\n continue\n }\n ip = a.(*dns.A).A\n fmt.Printf(\"%v %v\\n\", target, ip)\n return\n }\n }\n\n log.Fatalf(\"no DNS record found\\n\")\n return\n}",
"func extractAddress(str string) string {\n\tvar addr string\n\n\tswitch {\n\tcase strings.Contains(str, `]`):\n\t\t// IPv6 address [2001:db8::1%lo0]:48467\n\t\taddr = strings.Split(str, `]`)[0]\n\t\taddr = strings.Split(addr, `%`)[0]\n\t\taddr = strings.TrimLeft(addr, `[`)\n\tdefault:\n\t\t// IPv4 address 192.0.2.1:48467\n\t\taddr = strings.Split(str, `:`)[0]\n\t}\n\treturn addr\n}",
"func extractAddress(str string) string {\n\tvar addr string\n\n\tswitch {\n\tcase strings.Contains(str, `]`):\n\t\t// IPv6 address [2001:db8::1%lo0]:48467\n\t\taddr = strings.Split(str, `]`)[0]\n\t\taddr = strings.Split(addr, `%`)[0]\n\t\taddr = strings.TrimLeft(addr, `[`)\n\tdefault:\n\t\t// IPv4 address 192.0.2.1:48467\n\t\taddr = strings.Split(str, `:`)[0]\n\t}\n\treturn addr\n}",
"func getFqdnHostname(osHost string) (string, error) {\n\tips, err := lookupIp(osHost)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor _, ip := range ips {\n\t\thosts, err := lookupAddr(ip.String())\n\t\tif err != nil || len(hosts) == 0 {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif hosts[0] == \"localhost\" {\n\t\t\tcontinue\n\t\t}\n\t\ttrace.Hostname(\"found FQDN hosts: %s\", strings.Join(hosts, \", \"))\n\t\treturn strings.TrimSuffix(hosts[0], \".\"), nil\n\t}\n\treturn \"\", errors.New(\"can't lookup FQDN\")\n}",
"func (o TCPHealthCheckOutput) PortName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v TCPHealthCheck) *string { return v.PortName }).(pulumi.StringPtrOutput)\n}",
"func DomainName() string {\n\treturn fmt.Sprintf(\"%s.%s\",\n\t\tAlpha(\"\", 14),\n\t\tItem(\"net\", \"com\", \"org\", \"io\", \"gov\"))\n}",
"func ParseAddrPort(str string) (ip net.IP, port uint16, err error) {\n\t// See func net.SplitHostPort(hostport string) (host, port string, err error)\n\tpair := strings.Split(str, \":\")\n\tif len(pair) == 2 {\n\t\tip = net.ParseIP(pair[0])\n\t\tif ip != nil {\n\t\t\tvar v uint64\n\t\t\tv, err = strconv.ParseUint(pair[1], 10, 16)\n\t\t\tif err == nil {\n\t\t\t\tport = uint16(v)\n\t\t\t} else {\n\t\t\t\terr = errf(\"\\\"%s\\\" is invalid port specifier\", pair[1])\n\t\t\t}\n\t\t} else {\n\t\t\terr = errf(\"\\\"%s\\\" not a valid IP address\", pair[0])\n\t\t}\n\t} else {\n\t\terr = errf(\"\\\"%s\\\" is missing port specifier\", str)\n\t}\n\treturn\n}",
"func ValidateNameserverIpAndPort(nameServer string) (string, string, error) {\n\tif ip := net.ParseIP(nameServer); ip != nil {\n\t\treturn ip.String(), \"53\", nil\n\t}\n\n\thost, port, err := net.SplitHostPort(nameServer)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tif ip := net.ParseIP(host); ip == nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"bad IP address: %q\", host)\n\t}\n\tif p, err := strconv.Atoi(port); err != nil || p < 1 || p > 65535 {\n\t\treturn \"\", \"\", fmt.Errorf(\"bad port number: %q\", port)\n\t}\n\treturn host, port, nil\n}",
"func HostPort(addr string, port interface{}) string {\n\thost := addr\n\tif strings.Count(addr, \":\") > 0 {\n\t\thost = fmt.Sprintf(\"[%s]\", addr)\n\t}\n\t// TODO check for NATS case\n\tif v, ok := port.(string); ok {\n\t\tif v == \"\" {\n\t\t\treturn fmt.Sprintf(\"%s\", host)\n\t\t}\n\t}\n\treturn fmt.Sprintf(\"%s:%v\", host, port)\n}",
"func GetHost(URLString string) string {\n\tu, err := url.Parse(URLString)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\treturn u.Hostname()\n}",
"func getsockname(fd int, rsa *unix.RawSockaddrAny, addrlen *socklen) (err error)",
"func callboxHostName(dut *dut.DUT) (string, error) {\n\tdutHost := dut.HostName()\n\tif host, _, err := net.SplitHostPort(dutHost); err == nil {\n\t\tdutHost = host\n\t}\n\n\tdutHost = strings.TrimSuffix(dutHost, \".cros\")\n\tif dutHost == \"localhost\" {\n\t\treturn \"\", errors.Errorf(\"unable to parse hostname from: %q, localhost not supported\", dutHost)\n\t}\n\n\tif ip := net.ParseIP(dutHost); ip != nil {\n\t\treturn \"\", errors.Errorf(\"unable to parse hostname from: %q, ip:port format not supported\", dutHost)\n\t}\n\n\thostname := strings.Split(dutHost, \"-\")\n\tif len(hostname) < 2 {\n\t\treturn \"\", errors.Errorf(\"unable to parse hostname from: %q, unknown name format\", dutHost)\n\t}\n\n\t// CallboxManager expects callbox hostnames to end in .cros\n\thostname = hostname[0 : len(hostname)-1]\n\treturn fmt.Sprintf(\"%s.cros\", strings.Join(hostname, \"-\")), nil\n}",
"func (o TCPHealthCheckResponseOutput) PortName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v TCPHealthCheckResponse) string { return v.PortName }).(pulumi.StringOutput)\n}",
"func LooselyGetHost(hostport string) string {\n\thoststart, hostend := 0, 0\n\tif len(hostport) >= 1 && hostport[0] == '[' {\n\t\thoststart = 1\n\t\thostend = strings.IndexByte(hostport, ']')\n\t} else {\n\t\thostend = strings.IndexByte(hostport, ':')\n\t}\n\tif hostend < 0 {\n\t\thostend = len(hostport)\n\t}\n\treturn hostport[hoststart:hostend]\n}",
"func SplitHostPort(hostport string) (host, port string, err error) {\n\taddrErr := func(addr, why string) (host, port string, err error) {\n\t\treturn \"\", \"\", &net.AddrError{Err: why, Addr: addr}\n\t}\n\n\thoststart, hostend := 0, 0\n\tportstart := len(hostport)\n\tif len(hostport) >= 1 && hostport[0] == '[' {\n\t\thoststart = 1\n\t\thostend = strings.IndexByte(hostport, ']')\n\t\tif hostend < 0 {\n\t\t\treturn addrErr(hostport, \"missing ']' in address\")\n\t\t}\n\t\tportstart = hostend + 1\n\t} else {\n\t\thostend = strings.IndexByte(hostport, ':')\n\t\tif hostend < 0 {\n\t\t\thostend = len(hostport)\n\t\t}\n\t\tportstart = hostend\n\t}\n\tif portstart < len(hostport) {\n\t\tif hostport[portstart] != ':' {\n\t\t\treturn addrErr(hostport, \"invalid character at the end of address, expecting ':'\")\n\t\t}\n\t\tportstart += 1\n\t}\n\n\tport = hostport[portstart:]\n\thost = hostport[hoststart:hostend]\n\n\tif strings.IndexByte(port, ':') >= 0 {\n\t\treturn addrErr(hostport, \"too many colons in suspected port number\")\n\t}\n\tif strings.IndexByte(port, ']') >= 0 {\n\t\treturn addrErr(hostport, \"unexpected ']' in port\")\n\t}\n\tif strings.IndexByte(port, '[') >= 0 {\n\t\treturn addrErr(hostport, \"unexpected '[' in port\")\n\t}\n\tif strings.IndexByte(host, '[') >= 0 {\n\t\treturn addrErr(hostport, \"unexpected '[' in host\")\n\t}\n\tif strings.IndexByte(host, ']') >= 0 {\n\t\treturn addrErr(hostport, \"unexpected ']' in host\")\n\t}\n\n\treturn host, port, nil\n}",
"func (sd *ServiceDiscovery) ResolveName(name string) (string, string) {\n\tname = stripDomain(name)\n\tif strings.HasSuffix(name, sd.Domain) {\n\t\tname = name[0 : len(name)-len(sd.Domain)-1]\n\t}\n\tseparator := fmt.Sprintf(\".%s\", serviceSuffix)\n\tvar service string\n\tvar dc string\n\tif strings.Contains(name, separator) {\n\t\tcols := strings.Split(name, separator)\n\t\tservice, dc = cols[0], stripDomain(cols[1])\n\t\tif len(dc) < 1 {\n\t\t\tdc = sd.Dc\n\t\t}\n\t} else {\n\t\tservice, dc = name, sd.Dc\n\t}\n\treturn service, dc\n}",
"func fixHostPort(address string, defaultPort int) (fixed string, err error) {\n\t// If the address is wrapped in brackets, append a port if necessary.\n\tif address[0] == '[' {\n\t\tend := strings.IndexByte(address, ']')\n\t\tswitch {\n\t\tcase end < 0:\n\t\t\treturn \"\", errors.New(\"missing ']' in address\")\n\t\tcase end+1 == len(address):\n\t\t\treturn fmt.Sprintf(\"%s:%d\", address, defaultPort), nil\n\t\tcase address[end+1] == ':':\n\t\t\treturn address, nil\n\t\tdefault:\n\t\t\treturn \"\", errors.New(\"unexpected character following ']' in address\")\n\t\t}\n\t}\n\n\t// No colons? Must be a port-less IPv4 or domain address.\n\tlast := strings.LastIndexByte(address, ':')\n\tif last < 0 {\n\t\treturn fmt.Sprintf(\"%s:%d\", address, defaultPort), nil\n\t}\n\n\t// Exactly one colon? A port have been included along with an IPv4 or\n\t// domain address. (IPv6 addresses are guaranteed to have more than one\n\t// colon.)\n\tprev := strings.LastIndexByte(address[:last], ':')\n\tif prev < 0 {\n\t\treturn address, nil\n\t}\n\n\t// Two or more colons means we must have an IPv6 address without a port.\n\treturn fmt.Sprintf(\"[%s]:%d\", address, defaultPort), nil\n}",
"func Dns(host string) *net.IP {\n\tfor _, dnsServer := range appConfig.Dnsservers {\n\t\tIP := dnss(host, dnsServer+\":53\")\n\t\tif IP != nil {\n\t\t\treturn IP\n\t\t}\n\t}\n\treturn nil\n}",
"func (d *Device) GetDNS(domain string) (string, error) {\n\td.Set(TCPDNSLookup, \"\\\"\"+domain+\"\\\"\")\n\tresp, err := d.Response(1000)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif !strings.Contains(string(resp), \":\") {\n\t\treturn \"\", errors.New(\"GetDNS error:\" + string(resp))\n\t}\n\tr := strings.Split(string(resp), \":\")\n\tif len(r) != 2 {\n\t\treturn \"\", errors.New(\"Invalid domain lookup result\")\n\t}\n\tres := strings.Split(r[1], \"\\r\\n\")\n\treturn strings.Trim(res[0], `\"`), nil\n}",
"func NormalizeAddr(addr string) (string, error) {\n\tu, err := ParseAddr(addr)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\thost, _, err := net.SplitHostPort(u.Host)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to parse host-port pair: %v\", err)\n\t} else if host == \"\" {\n\t\treturn \"\", fmt.Errorf(\"no hostname in address: %q\", addr)\n\t}\n\treturn u.String(), nil\n}",
"func prepareMatchName(matchName string) string {\n\treturn dns.FQDN(matchName)\n}",
"func (c *Conn) GetHostName(property string) (string, error) {\n\tp, err := c.object.GetProperty(dbusInterface + \".\" + property)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tv, b := p.Value().(string)\n\tif !b {\n\t\treturn \"\", fmt.Errorf(\"Empty value received: %s\", property)\n\t}\n\n\treturn v, nil\n}",
"func GetHostname(host ...string) string {\n\tif len(host) > 0 && strings.TrimSpace(host[0]) != \"\" {\n\t\tif h, ok := hostHostList[host[0]]; ok {\n\t\t\treturn h\n\t\t}\n\t\tstr := GetCmdStr(\"host %s\", host[0])\n\t\th := \"\"\n\t\tif strings.Contains(str, \"has address\") {\n\t\t\th = GetCmdStr(\"host %s|awk '{print $1}'\", host[0])\n\t\t\thostHostList[host[0]] = h\n\t\t} else if strings.Contains(str, \"domain name pointer\") {\n\t\t\th = strings.TrimSuffix(GetCmdStr(\"host %s|awk '{print $5}'\", host[0]), \".\")\n\t\t\thostHostList[host[0]] = h\n\t\t}\n\t\treturn h\n\t}\n\tif chostname == \"\" {\n\t\th, err := os.Hostname()\n\t\tif err != nil {\n\t\t\tchostname = GetCmdStr(\"hostname\")\n\t\t} else {\n\t\t\tchostname = h\n\t\t}\n\t}\n\treturn chostname\n}",
"func parseAdvertiseAddr(advAddr string, port int) (string, int) {\n\treturn advAddr, port\n\n\t// bug: if use domain, always return empty host\n\t/*m, e := regexp.Match(ipv4Pattern, []byte(advAddr))\n\t// if parse error, use serve port and parsed ip address\n\tif e != nil {\n\t\treturn \"\", port\n\t}\n\tif m {\n\t\treturn advAddr, port\n\t}\n\n\tm, e1 := regexp.Match(ipv4WithPortPattern, []byte(advAddr))\n\t// if parse error, use serve port and parsed ip address\n\tif e1 != nil {\n\t\treturn \"\", port\n\t}\n\tif m {\n\t\t// 1 5\n\t\tregxp := regexp.MustCompile(ipv4WithPortPattern)\n\t\tadAddr := regxp.ReplaceAllString(advAddr, \"${1}\")\n\t\tadPort, _ := strconv.Atoi(regxp.ReplaceAllString(advAddr, \"${5}\"))\n\t\treturn adAddr, adPort\n\t}\n\treturn \"\", port*/\n}",
"func splitHost(host string) string {\n\treturn strings.Split(host, \":\")[0]\n}",
"func DNSName(str string) bool {\n\tif str == \"\" || len(strings.Replace(str, \".\", \"\", -1)) > 255 {\n\t\t// constraints already violated\n\t\treturn false\n\t}\n\treturn rxDNSName.MatchString(str)\n}",
"func getAddrString(host string, port string) string {\n\treturn fmt.Sprintf(\"%s:%s\", host, port)\n}",
"func dbnameOfDSN(dsn string) (string, string) {\n\tvar dbname string\n\ti := strings.LastIndex(dsn, \"/\")\n\tif i >= 0 {\n\t\tdbname = dsn[i+1:] // save the database name\n\t\tj := strings.Index(dbname, \"?\")\n\t\tif j >= 0 {\n\t\t\tdbname = dbname[:j]\n\t\t}\n\t\tdsn = dsn[:i+1] // stomp on the database name in conf. Requires trailing '/'.\n\t}\n\n\treturn dbname, dsn\n}",
"func HostPort(urlStr string) (string, error) {\n\tu, err := url.Parse(urlStr)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"could not parse %q as a url: %v\", urlStr, err)\n\t}\n\tif u.Scheme == \"\" {\n\t\treturn \"\", fmt.Errorf(\"url %q has no scheme\", urlStr)\n\t}\n\thostPort := u.Host\n\tif hostPort == \"\" || strings.HasPrefix(hostPort, \":\") {\n\t\treturn \"\", fmt.Errorf(\"url %q has no host\", urlStr)\n\t}\n\tidx := strings.Index(hostPort, \"]\")\n\tif idx == -1 {\n\t\tidx = 0\n\t}\n\tif !strings.Contains(hostPort[idx:], \":\") {\n\t\tif u.Scheme == \"https\" {\n\t\t\thostPort += \":443\"\n\t\t} else {\n\t\t\thostPort += \":80\"\n\t\t}\n\t}\n\treturn hostPort, nil\n}",
"func (d DNSSeed) String() string {\n\treturn d.Host\n}",
"func Resolve(addr string) (string, error) {\n\tip, port, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tips, err := net.LookupHost(ip)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn net.JoinHostPort(ips[0], port), nil\n}",
"func dnsDecodeString(raw string) ([]byte, error) {\n\tpad := 8 - (len(raw) % 8)\n\tnb := []byte(raw)\n\tif pad != 8 {\n\t\tnb = make([]byte, len(raw)+pad)\n\t\tcopy(nb, raw)\n\t\tfor index := 0; index < pad; index++ {\n\t\t\tnb[len(raw)+index] = '='\n\t\t}\n\t}\n\treturn sliverBase32.DecodeString(string(nb))\n}",
"func parseBindAddr(s string) (address net.Addr, err error) {\n\tconst maxUnixLen = 106\n\n\t// '@' prefix specifies a Linux abstract domain socket.\n\tif runtime.GOOS == \"linux\" && strings.HasPrefix(s, \"@\") {\n\t\tif len(s) > maxUnixLen {\n\t\t\treturn nil, fmt.Errorf(\"sock file length must be less than %d characters\", maxUnixLen)\n\t\t}\n\t\treturn &net.UnixAddr{Name: s, Net: \"unix\"}, nil\n\t}\n\n\tif strings.Contains(s, \"/\") {\n\t\tif !filepath.IsAbs(s) {\n\t\t\treturn nil, errors.New(\"sock file must be an absolute path\")\n\t\t} else if len(s) > maxUnixLen {\n\t\t\treturn nil, fmt.Errorf(\"sock file length must be less than %d characters\", maxUnixLen)\n\t\t}\n\t\treturn &net.UnixAddr{Name: s, Net: \"unix\"}, nil\n\t}\n\n\t// For TCP, the supplied address string, s, is one of a port, a :port, or a host:port.\n\tip, port := net.IPv4(127, 0, 0, 1), 0\n\n\tif strings.Contains(s, \":\") {\n\t\thost, portString, err := net.SplitHostPort(s)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid addr %q - must be provided as host:port\", s)\n\t\t}\n\t\tif host != \"\" {\n\t\t\tip = net.ParseIP(host)\n\t\t}\n\n\t\tport, err = strconv.Atoi(portString)\n\t} else {\n\t\tport, err = strconv.Atoi(s)\n\t}\n\n\tif err != nil || port < 1 || port > 65534 {\n\t\treturn nil, fmt.Errorf(\"invalid port %d - must be between 1 and 65534\", port)\n\t}\n\treturn &net.TCPAddr{IP: ip, Port: port}, nil\n}",
"func (o ZoneSoaRecordOutput) HostName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ZoneSoaRecord) *string { return v.HostName }).(pulumi.StringPtrOutput)\n}",
"func (o ZoneSoaRecordOutput) HostName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ZoneSoaRecord) *string { return v.HostName }).(pulumi.StringPtrOutput)\n}",
"func (d DNS64) Name() string { return \"dns64\" }",
"func getHost(line string) (string, error) {\n\tsplit := strings.Split(line, \" \")\n\tif len(split) == 1 {\n\t\t// plain domain list format\n\t\treturn split[0], nil\n\t} else if len(split) == 2 {\n\t\t// hosts file format\n\t\treturn split[1], nil\n\t} else {\n\t\treturn \"\", errParseHosts\n\t}\n}",
"func (o HTTP2HealthCheckOutput) PortName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v HTTP2HealthCheck) *string { return v.PortName }).(pulumi.StringPtrOutput)\n}",
"func (o SSLHealthCheckOutput) PortName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v SSLHealthCheck) *string { return v.PortName }).(pulumi.StringPtrOutput)\n}",
"func GetHost(loc string, www, decode bool) (string, error) {\n\tparsed, err := url.Parse(loc)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\thost, _, err := net.SplitHostPort(parsed.Host)\n\tif err != nil {\n\t\thost = parsed.Host\n\t}\n\tif www {\n\t\tre := regexp.MustCompile(`^www\\.`)\n\t\thost = re.ReplaceAllString(host, \"\")\n\t}\n\n\tif decode {\n\t\treturn idna.ToASCII(host)\n\t}\n\n\treturn host, nil\n}",
"func (o TCPHealthCheckPtrOutput) PortName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *TCPHealthCheck) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.PortName\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o HTTPSHealthCheckResponseOutput) PortName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v HTTPSHealthCheckResponse) string { return v.PortName }).(pulumi.StringOutput)\n}",
"func Parse(s string) (Name, error) {\n\tformattedName := strings.Trim(strings.ToLower(s), \".\")\n\n\tif strings.HasPrefix(formattedName, \"*.\") {\n\t\tformattedName = strings.Replace(formattedName, \"*.\", \"\", 1)\n\t}\n\tif strings.HasPrefix(formattedName, \"@.\") {\n\t\tformattedName = strings.Replace(formattedName, \"@.\", \"\", 1)\n\t}\n\n\tif len(formattedName) == 0 {\n\t\treturn Name{}, fmt.Errorf(\"domain name is empty\")\n\t}\n\n\tvar err error\n\tformattedName, err = idna.ToASCII(formattedName)\n\tif err != nil {\n\t\treturn Name{}, fmt.Errorf(\"domain name %s is invalid: %w\", s, err)\n\t}\n\n\tif err = Validate(formattedName); err != nil {\n\t\treturn Name{}, fmt.Errorf(\"domain name %s is invalid: %w\", s, err)\n\t}\n\n\trule := publicsuffix.DefaultList.Find(formattedName, publicsuffix.DefaultFindOptions)\n\tif rule == nil {\n\t\treturn Name{}, fmt.Errorf(\"domain name %s is invalid: no rule found\", s)\n\t}\n\n\tcategory := eTLDUndefined\n\tif rule.Private {\n\t\tcategory = eTLDPrivate\n\t} else if len(rule.Value) > 0 {\n\t\t// empty value indicates the default rule\n\t\tcategory = eTLDICANN\n\t}\n\n\tdecomposedName := rule.Decompose(formattedName)\n\tif decomposedName[1] == \"\" {\n\t\t// no TLD found, which means it's already a TLD\n\t\treturn Name{\n\t\t\tlabels: []string{formattedName},\n\t\t\tcategory: category,\n\t\t}, nil\n\t}\n\n\tlabelsNoTDL := strings.TrimSuffix(formattedName, decomposedName[1])\n\tlabelsNoTDL = strings.TrimSuffix(labelsNoTDL, \".\")\n\n\tif len(labelsNoTDL) == 0 {\n\t\treturn Name{\n\t\t\tlabels: []string{decomposedName[1]},\n\t\t\tcategory: category,\n\t\t}, nil\n\t}\n\n\treturn Name{\n\t\tlabels: append(strings.Split(labelsNoTDL, \".\"), decomposedName[1]),\n\t\tcategory: category,\n\t}, nil\n}",
"func stripPort(s string) string {\n\tix := strings.IndexRune(s, ':')\n\tif ix == -1 {\n\t\treturn s\n\t}\n\treturn s[:ix]\n}",
"func (n Node) Hostname() string {\n\tparts := strings.Split(n.Name, \"@\")\n\tif len(parts) >= 2 {\n\t\treturn parts[1]\n\t}\n\treturn \"\"\n}",
"func (o TCPHealthCheckResponsePtrOutput) PortName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *TCPHealthCheckResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.PortName\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o HTTPHealthCheckResponseOutput) PortName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v HTTPHealthCheckResponse) string { return v.PortName }).(pulumi.StringOutput)\n}",
"func (s *Server) DSN(suffix string) string {\n\treturn fmt.Sprintf(\"root@tcp(127.0.0.1:%d)/%s\", s.Port, suffix)\n}",
"func shortHostname(hostname string) string {\n\tif i := strings.Index(hostname, \".\"); i >= 0 {\n\t\treturn hostname[:i]\n\t}\n\treturn hostname\n}",
"func shortHostname(hostname string) string {\n\tif i := strings.Index(hostname, \".\"); i >= 0 {\n\t\treturn hostname[:i]\n\t}\n\treturn hostname\n}",
"func extractIPv4(ptr string) string {\n\ts := strings.Replace(ptr, \".in-addr.arpa\", \"\", 1)\n\twords := strings.Split(s, \".\")\n\tfor i, j := 0, len(words)-1; i < j; i, j = i+1, j-1 {\n\t\twords[i], words[j] = words[j], words[i]\n\t}\n\treturn strings.Join(words, \".\")\n}",
"func RemovePort(addressWithPort string) string {\n\thost, _, err := net.SplitHostPort(addressWithPort)\n\tif err != nil {\n\t\tklog.Errorf(\"Split host and port for a service name has an error:%v\\n\", err)\n\t\t// returning the original address instead if the address has a incorrect format\n\t\treturn addressWithPort\n\t}\n\treturn host\n}",
"func DecodeAddr(address []byte) string {\n\tvar stringAddr string\n\tvar ip []byte\n\tvar port []byte\n\n\tip = address[:4]\n\tport = address[4:]\n\n\t// Decode IP\n\tfor index, octet := range ip {\n\t\tstringAddr = stringAddr + strconv.Itoa(int(octet))\n\t\tif index != 3 {\n\t\t\tstringAddr += \".\"\n\t\t}\n\t}\n\tstringAddr += \":\"\n\n\t// Decode Port\n\tb := make([]byte, 8)\n\tfor i := 0; i < 6; i++ {\n\t\tb[i] = byte(0)\n\t}\n\tb[6] = port[0]\n\tb[7] = port[1]\n\tp := binary.BigEndian.Uint64(b)\n\tstringAddr += strconv.FormatUint(p, 10)\n\t//fmt.Println(\"Complete IP:\", stringAddr)\n\treturn stringAddr\n}",
"func lookupDomainName(domainName string) string {\n\tif du, ok := domainUuid[domainName]; ok {\n\t\treturn du\n\t}\n\treturn \"\"\n}",
"func SplitAddress(addr string) (string, int) {\n\ts := strings.Split(addr, \":\")\n\thostname := s[0]\n\tport, _ := strconv.Atoi(s[1])\n\treturn hostname, port\n}",
"func (o HTTP2HealthCheckResponseOutput) PortName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v HTTP2HealthCheckResponse) string { return v.PortName }).(pulumi.StringOutput)\n}",
"func splitHostPort(addr string) (string, int, error) {\n\thost, sPort, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\tlog.Errorf(\"Could not split network address: %v\", err)\n\t\treturn \"\", 0, errors.Wrap(err)\n\t}\n\tport, err := strconv.Atoi(sPort)\n\tif err != nil {\n\t\tlog.Errorf(\"No port number found %v\", err)\n\t\treturn \"\", 0, errors.Wrap(err)\n\t}\n\treturn host, port, nil\n}",
"func Get() (host string, domain string, full string, err error) {\n\thost, err = os.Hostname()\n\tif err != nil {\n\t\treturn\n\t}\n\thost = removeTrailingDot(host)\n\thost, domain = split2(host, '.')\n\tif domain != \"\" {\n\t\tfull = host + \".\" + domain\n\t} else {\n\t\tfull, err = resolveNetFullname(host)\n\t\tif err == nil {\n\t\t\tfull = removeTrailingDot(full)\n\t\t\thost, domain = split2(full, '.')\n\t\t}\n\t}\n\treturn\n}",
"func (o SSLHealthCheckResponseOutput) PortName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v SSLHealthCheckResponse) string { return v.PortName }).(pulumi.StringOutput)\n}",
"func splitDockerDomain(name string) (domain, remainder string) {\n\ti := strings.IndexRune(name, '/')\n\tif i == -1 || (!strings.ContainsAny(name[:i], \".:\") && name[:i] != \"localhost\") {\n\t\tdomain, remainder = defaultDomain, name\n\t} else {\n\t\tdomain, remainder = name[:i], name[i+1:]\n\t}\n\tif domain == legacyDefaultDomain {\n\t\tdomain = defaultDomain\n\t}\n\tif domain == defaultDomain && !strings.ContainsRune(remainder, '/') {\n\t\tremainder = officialRepoName + \"/\" + remainder\n\t}\n\treturn\n}",
"func splitDockerDomain(name string) (domain, remainder string) {\n\ti := strings.IndexRune(name, '/')\n\tif i == -1 || (!strings.ContainsAny(name[:i], \".:\") && name[:i] != \"localhost\") {\n\t\tdomain, remainder = defaultDomain, name\n\t} else {\n\t\tdomain, remainder = name[:i], name[i+1:]\n\t}\n\tif domain == legacyDefaultDomain {\n\t\tdomain = defaultDomain\n\t}\n\tif domain == defaultDomain && !strings.ContainsRune(remainder, '/') {\n\t\tremainder = officialRepoName + \"/\" + remainder\n\t}\n\treturn\n}",
"func hostnameForService(svc string) string {\n\n\tparts := strings.Split(svc, \"/\")\n\tif len(parts) < 2 {\n\t\treturn parts[0]\n\t}\n\tif len(parts) > 2 {\n\t\tlog.Printf(\"Malformated service identifier [%s] - Hostname will be truncated\", svc)\n\t}\n\treturn fmt.Sprintf(\"%s.%s\", parts[1], parts[0])\n\n}"
] | [
"0.6603416",
"0.6248293",
"0.61732805",
"0.61518425",
"0.60280794",
"0.59298724",
"0.5926241",
"0.5838026",
"0.5804168",
"0.57999206",
"0.57859087",
"0.5661244",
"0.56175256",
"0.559655",
"0.5595708",
"0.5587074",
"0.5566612",
"0.55407655",
"0.55281454",
"0.5504938",
"0.55014837",
"0.54476506",
"0.5445435",
"0.5429305",
"0.541024",
"0.54022217",
"0.5393858",
"0.53937256",
"0.5377149",
"0.5371813",
"0.53676784",
"0.5345948",
"0.5339239",
"0.53294593",
"0.53161824",
"0.5315227",
"0.52965254",
"0.52807206",
"0.52790046",
"0.5272594",
"0.5272594",
"0.5267359",
"0.5263078",
"0.5256207",
"0.52482736",
"0.52449256",
"0.5244048",
"0.5240452",
"0.5234649",
"0.52339226",
"0.5228818",
"0.5220929",
"0.5220192",
"0.52177763",
"0.5207422",
"0.5205988",
"0.520409",
"0.5202219",
"0.5199763",
"0.51988316",
"0.51981926",
"0.51903987",
"0.51881224",
"0.5181254",
"0.51797265",
"0.5179401",
"0.5174353",
"0.5173946",
"0.51570493",
"0.5151287",
"0.51480293",
"0.5134729",
"0.5134729",
"0.51208085",
"0.51129586",
"0.5112419",
"0.5111724",
"0.51080287",
"0.51045626",
"0.51002204",
"0.50960535",
"0.5087783",
"0.50862175",
"0.50801736",
"0.5079264",
"0.5076809",
"0.50730866",
"0.50730866",
"0.50698197",
"0.5068049",
"0.5067723",
"0.5063858",
"0.5056818",
"0.50492495",
"0.504386",
"0.50406754",
"0.5038285",
"0.5035944",
"0.5035944",
"0.5033655"
] | 0.7793767 | 0 |
Host extracts host from host:port string | func Host(hostname string) (string, error) {
if hostname == "" {
return "", trace.BadParameter("missing parameter hostname")
}
// if this is IPv4 or V6, return as is
if ip := net.ParseIP(hostname); len(ip) != 0 {
return hostname, nil
}
// has no indication of port, return, note that
// it will not break ipv6 as it always has at least one colon
if !strings.Contains(hostname, ":") {
return hostname, nil
}
host, _, err := SplitHostPort(hostname)
if err != nil {
return "", trace.Wrap(err)
}
return host, nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func parseHost(host string) string {\n\trealHost, _, _ := net.SplitHostPort(host)\n\tif realHost != \"\" {\n\t\treturn realHost\n\t}\n\treturn host\n}",
"func parseHostPort(str string) (string, string) {\n\tvar (\n\t\thost string\n\t\tport string\n\n\t\ti = strings.Index(str, \":\")\n\t)\n\tif i == -1 {\n\t\treturn str, \"\"\n\t}\n\n\thost = str[:i]\n\tport = str[i+1:]\n\n\treturn host, port\n}",
"func splitHost(host string) string {\n\treturn strings.Split(host, \":\")[0]\n}",
"func parseHost(addr string) string {\n\tvar (\n\t\thost, port string\n\t\tdefaultAssigned bool\n\t)\n\n\tv := strings.Split(addr, \":\")\n\n\tswitch len(v) {\n\tcase 2:\n\t\thost = v[0]\n\t\tport = v[1]\n\n\t\tif host == \"\" {\n\t\t\thost = _DEFAULT_HOST\n\t\t\tdefaultAssigned = true\n\t\t}\n\n\t\tif port == \"\" {\n\t\t\tport = _DEFAULT_PORT\n\t\t\tdefaultAssigned = true\n\t\t}\n\n\t\tif defaultAssigned == false {\n\t\t\treturn addr // addr is already in required format\n\t\t}\n\t\tbreak\n\n\tcase 1:\n\t\thost = v[0]\n\t\tif host == \"\" {\n\t\t\thost = _DEFAULT_HOST\n\t\t}\n\t\tport = _DEFAULT_PORT\n\tcase 0:\n\t\tfallthrough\n\tdefault:\n\t\thost = _DEFAULT_HOST\n\t\tport = _DEFAULT_PORT\n\t\tbreak\n\t}\n\treturn strings.Join([]string{host, port}, \":\")\n}",
"func HostPort(urlStr string) (string, error) {\n\t// TODO: rename this function to URLHostPort instead, like\n\t// ListenHostPort below.\n\tu, err := url.Parse(urlStr)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"could not parse %q as a url: %v\", urlStr, err)\n\t}\n\tif u.Scheme == \"\" {\n\t\treturn \"\", fmt.Errorf(\"url %q has no scheme\", urlStr)\n\t}\n\thostPort := u.Host\n\tif hostPort == \"\" || strings.HasPrefix(hostPort, \":\") {\n\t\treturn \"\", fmt.Errorf(\"url %q has no host\", urlStr)\n\t}\n\tidx := strings.Index(hostPort, \"]\")\n\tif idx == -1 {\n\t\tidx = 0\n\t}\n\tif !strings.Contains(hostPort[idx:], \":\") {\n\t\tif u.Scheme == \"https\" {\n\t\t\thostPort += \":443\"\n\t\t} else {\n\t\t\thostPort += \":80\"\n\t\t}\n\t}\n\treturn hostPort, nil\n}",
"func addrToHost(addr string) string {\n\treturn strings.Split(addr, \":\")[0]\n}",
"func (p Plugin) hostPort(host string) (string, string) {\n\t// Split the host string by colon (\":\") to get the host and port\n\thosts := strings.Split(host, \":\")\n\t// Get the default port from the Plugin's Config field\n\tport := p.Config.Port\n\t// If the host string contains a port (i.e. it has more than one element after splitting), set the port to that value\n\tif len(hosts) > 1 {\n\t\thost = hosts[0]\n\t\tport = hosts[1]\n\t}\n\n\t// Return the host and port as separate strings\n\treturn host, port\n}",
"func getHost(host_url string) (host string, err error) {\n\tu, err := url.Parse(host_url)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn strings.Split(u.Host, \":\")[0], nil\n}",
"func ParseHost(s string) (*Host, error) {\n\tisValidHost := func(host string) bool {\n\t\tif host == \"\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif ip := net.ParseIP(host); ip != nil {\n\t\t\treturn true\n\t\t}\n\n\t\t// host is not a valid IPv4 or IPv6 address\n\t\t// host may be a hostname\n\t\t// refer https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names\n\t\t// why checks are done like below\n\t\tif len(host) < 1 || len(host) > 253 {\n\t\t\treturn false\n\t\t}\n\n\t\tfor _, label := range strings.Split(host, \".\") {\n\t\t\tif len(label) < 1 || len(label) > 63 {\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\tif !hostLabelRegexp.MatchString(label) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}\n\n\tvar port Port\n\tvar isPortSet bool\n\thost, portStr, err := net.SplitHostPort(s)\n\tif err != nil {\n\t\tif !strings.Contains(err.Error(), \"missing port in address\") {\n\t\t\treturn nil, err\n\t\t}\n\t\thost = s\n\t\tportStr = \"\"\n\t} else {\n\t\tif port, err = ParsePort(portStr); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tisPortSet = true\n\t}\n\n\tif host != \"\" {\n\t\thost, err = trimIPv6(host)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// IPv6 requires a link-local address on every network interface.\n\t// `%interface` should be preserved.\n\ttrimmedHost := host\n\n\tif i := strings.LastIndex(trimmedHost, \"%\"); i > -1 {\n\t\t// `%interface` can be skipped for validity check though.\n\t\ttrimmedHost = trimmedHost[:i]\n\t}\n\n\tif !isValidHost(trimmedHost) {\n\t\treturn nil, errors.New(\"invalid hostname\")\n\t}\n\n\treturn &Host{\n\t\tName: host,\n\t\tPort: port,\n\t\tIsPortSet: isPortSet,\n\t}, nil\n}",
"func ExtractHost(address string) string {\n\thost, _, _ := net.SplitHostPort(address)\n\tif host == \"\" {\n\t\treturn \"localhost\"\n\t}\n\treturn host\n}",
"func hostname(hostport string) (string, error) {\n\thost, _, err := net.SplitHostPort(hostport)\n\treturn host, err\n}",
"func HostWithoutPort(s string) string {\n\tif strings.Contains(s, \":\") {\n\t\treturn strings.Split(s, \":\")[0]\n\t}\n\treturn s\n}",
"func HostNameandPort(node string) (host, port string, ipv6 bool, err error) {\n\ttokens := []string{}\n\n\t// Set _IPv6 based on input address\n\tipv6, err = IsIPv6(node)\n\n\tif err != nil {\n\t\treturn \"\", \"\", false, err\n\t}\n\n\terr = nil\n\t// For IPv6\n\tif ipv6 {\n\t\t// Then the url should be of the form [::1]:8091\n\t\ttokens = strings.Split(node, \"]:\")\n\t\thost = strings.Replace(tokens[0], \"[\", \"\", 1)\n\n\t} else {\n\t\t// For IPv4\n\t\ttokens = strings.Split(node, \":\")\n\t\thost = tokens[0]\n\t}\n\n\tif len(tokens) == 2 {\n\t\tport = tokens[1]\n\t} else {\n\t\tport = \"\"\n\t}\n\n\treturn\n}",
"func HostPort(urlStr string) (string, error) {\n\tu, err := url.Parse(urlStr)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"could not parse %q as a url: %v\", urlStr, err)\n\t}\n\tif u.Scheme == \"\" {\n\t\treturn \"\", fmt.Errorf(\"url %q has no scheme\", urlStr)\n\t}\n\thostPort := u.Host\n\tif hostPort == \"\" || strings.HasPrefix(hostPort, \":\") {\n\t\treturn \"\", fmt.Errorf(\"url %q has no host\", urlStr)\n\t}\n\tidx := strings.Index(hostPort, \"]\")\n\tif idx == -1 {\n\t\tidx = 0\n\t}\n\tif !strings.Contains(hostPort[idx:], \":\") {\n\t\tif u.Scheme == \"https\" {\n\t\t\thostPort += \":443\"\n\t\t} else {\n\t\t\thostPort += \":80\"\n\t\t}\n\t}\n\treturn hostPort, nil\n}",
"func LooselyGetHost(hostport string) string {\n\thoststart, hostend := 0, 0\n\tif len(hostport) >= 1 && hostport[0] == '[' {\n\t\thoststart = 1\n\t\thostend = strings.IndexByte(hostport, ']')\n\t} else {\n\t\thostend = strings.IndexByte(hostport, ':')\n\t}\n\tif hostend < 0 {\n\t\thostend = len(hostport)\n\t}\n\treturn hostport[hoststart:hostend]\n}",
"func getHostNameAndPort(hostInfo string) (string, int, error) {\n\thost := strings.SplitN(hostInfo, \":\", -1)\n\tif len(host) != 2 {\n\t\treturn \"\", 0, fmt.Errorf(\"expected hostname:port, got %s\", host)\n\t}\n\n\tport, err := strconv.Atoi(host[1])\n\tif err != nil {\n\t\treturn \"\", 0, fmt.Errorf(\"invalid port number, got %s\", host[1])\n\t}\n\n\treturn host[0], port, nil\n}",
"func ParseHost(host string) (string, string, string, error) {\n\tprotoAddrParts := strings.SplitN(host, \"://\", 2)\n\tif len(protoAddrParts) == 1 {\n\t\treturn \"\", \"\", \"\", fmt.Errorf(\"unable to parse docker host `%s`\", host)\n\t}\n\n\tvar basePath string\n\tproto, addr := protoAddrParts[0], protoAddrParts[1]\n\tif proto == \"tcp\" {\n\t\tparsed, err := url.Parse(\"tcp://\" + addr)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", \"\", err\n\t\t}\n\t\taddr = parsed.Host\n\t\tbasePath = parsed.Path\n\t}\n\treturn proto, addr, basePath, nil\n}",
"func ParseHost(host string) (string, string, string, error) {\n\tprotoAddrParts := strings.SplitN(host, \"://\", 2)\n\tif len(protoAddrParts) == 1 {\n\t\treturn \"\", \"\", \"\", fmt.Errorf(\"unable to parse storm host `%s`\", host)\n\t}\n\n\tvar basePath string\n\tproto, addr := protoAddrParts[0], protoAddrParts[1]\n\tif proto == \"tcp\" {\n\t\tparsed, err := url.Parse(\"tcp://\" + addr)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", \"\", err\n\t\t}\n\t\taddr = parsed.Host\n\t\tbasePath = parsed.Path\n\t}\n\treturn proto, addr, basePath, nil\n}",
"func getHost(r *http.Request) string {\n\tif r.URL.IsAbs() {\n\t\thost := r.Host\n\t\t// Slice off any port information.\n\t\tif i := strings.Index(host, \":\"); i != -1 {\n\t\t\thost = host[:i]\n\t\t}\n\t\treturn host\n\t}\n\treturn r.URL.Host\n}",
"func extractHost(cfg *config.Config, r *http.Request) string {\n\tif cfg.Host != \"\" {\n\t\treturn cfg.Host\n\t}\n\n\treturn fmt.Sprintf(\"http://%s\", r.Host)\n}",
"func SplitHostPort(hostport string) (host, port string, err error) {\n\taddrErr := func(addr, why string) (host, port string, err error) {\n\t\treturn \"\", \"\", &net.AddrError{Err: why, Addr: addr}\n\t}\n\n\thoststart, hostend := 0, 0\n\tportstart := len(hostport)\n\tif len(hostport) >= 1 && hostport[0] == '[' {\n\t\thoststart = 1\n\t\thostend = strings.IndexByte(hostport, ']')\n\t\tif hostend < 0 {\n\t\t\treturn addrErr(hostport, \"missing ']' in address\")\n\t\t}\n\t\tportstart = hostend + 1\n\t} else {\n\t\thostend = strings.IndexByte(hostport, ':')\n\t\tif hostend < 0 {\n\t\t\thostend = len(hostport)\n\t\t}\n\t\tportstart = hostend\n\t}\n\tif portstart < len(hostport) {\n\t\tif hostport[portstart] != ':' {\n\t\t\treturn addrErr(hostport, \"invalid character at the end of address, expecting ':'\")\n\t\t}\n\t\tportstart += 1\n\t}\n\n\tport = hostport[portstart:]\n\thost = hostport[hoststart:hostend]\n\n\tif strings.IndexByte(port, ':') >= 0 {\n\t\treturn addrErr(hostport, \"too many colons in suspected port number\")\n\t}\n\tif strings.IndexByte(port, ']') >= 0 {\n\t\treturn addrErr(hostport, \"unexpected ']' in port\")\n\t}\n\tif strings.IndexByte(port, '[') >= 0 {\n\t\treturn addrErr(hostport, \"unexpected '[' in port\")\n\t}\n\tif strings.IndexByte(host, '[') >= 0 {\n\t\treturn addrErr(hostport, \"unexpected '[' in host\")\n\t}\n\tif strings.IndexByte(host, ']') >= 0 {\n\t\treturn addrErr(hostport, \"unexpected ']' in host\")\n\t}\n\n\treturn host, port, nil\n}",
"func stripHostPort(h string) string {\n\t// If no port on host, return unchanged\n\tif !strings.Contains(h, \":\") {\n\t\treturn h\n\t}\n\thost, _, err := net.SplitHostPort(h)\n\tif err != nil {\n\t\treturn h // on error, return unchanged\n\t}\n\treturn host\n}",
"func ParseFriendlyHost(source string) (host *FriendlyHost, rest string) {\n\tparts := strings.SplitN(source, \"/\", 2)\n\n\tif hostRe.MatchString(parts[0]) {\n\t\thost = &FriendlyHost{Raw: parts[0]}\n\t\tif len(parts) == 2 {\n\t\t\trest = parts[1]\n\t\t}\n\t\treturn\n\t}\n\n\t// No match, return whole string as rest along with nil host\n\trest = source\n\treturn\n}",
"func (b *Bogus) HostPort() (string, string) {\n\th, p, _ := net.SplitHostPort(b.server.URL[7:])\n\treturn h, p\n}",
"func (s *serverCGI) extractHost(method string) (string, string, int) {\n\treg := regexp.MustCompile(\"^\" + method + `/([^\\+]*)(\\+.*)`)\n\tm := reg.FindStringSubmatch(s.path())\n\tif m == nil {\n\t\tlog.Println(\"illegal url\")\n\t\treturn \"\", \"\", 0\n\t}\n\tpath := m[2]\n\thost, portstr, err := net.SplitHostPort(m[1])\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn \"\", \"\", 0\n\t}\n\tport, err := strconv.Atoi(portstr)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn \"\", \"\", 0\n\t}\n\treturn host, path, port\n}",
"func parsehostpath(hostpath string) (host string, path string) {\n\tsplits := strings.Split(hostpath, \"/\")\n\thost = splits[0]\n\tpath = strings.Join(splits[1:len(splits)], \"/\")\n\tpath = \"/\" + path\n\treturn\n}",
"func getHost(httplink string) string {\n\tu, err := url.Parse(httplink)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn u.Host\n}",
"func GetHostName(hostAddr string) string {\n\treturn strings.Split(hostAddr, base.UrlPortNumberDelimiter)[0]\n}",
"func SplitHostPort(hostport string) (host, port string, err error) {\n\tj, k := 0, 0\n\n\t// The port starts after the last colon.\n\ti := last(hostport, ':')\n\tif i < 0 {\n\t\tgoto missingPort\n\t}\n\n\tif hostport[0] == '[' {\n\t\t// Expect the first ']' just before the last ':'.\n\t\tend := byteIndex(hostport, ']')\n\t\tif end < 0 {\n\t\t\terr = &AddrError{\"missing ']' in address\", hostport}\n\t\t\treturn\n\t\t}\n\t\tswitch end + 1 {\n\t\tcase len(hostport):\n\t\t\t// There can't be a ':' behind the ']' now.\n\t\t\tgoto missingPort\n\t\tcase i:\n\t\t\t// The expected result.\n\t\tdefault:\n\t\t\t// Either ']' isn't followed by a colon, or it is\n\t\t\t// followed by a colon that is not the last one.\n\t\t\tif hostport[end+1] == ':' {\n\t\t\t\tgoto tooManyColons\n\t\t\t}\n\t\t\tgoto missingPort\n\t\t}\n\t\thost = hostport[1:end]\n\t\tj, k = 1, end+1 // there can't be a '[' resp. ']' before these positions\n\t} else {\n\t\thost = hostport[:i]\n\t\tif byteIndex(host, ':') >= 0 {\n\t\t\tgoto tooManyColons\n\t\t}\n\t\tif byteIndex(host, '%') >= 0 {\n\t\t\tgoto missingBrackets\n\t\t}\n\t}\n\tif byteIndex(hostport[j:], '[') >= 0 {\n\t\terr = &AddrError{\"unexpected '[' in address\", hostport}\n\t\treturn\n\t}\n\tif byteIndex(hostport[k:], ']') >= 0 {\n\t\terr = &AddrError{\"unexpected ']' in address\", hostport}\n\t\treturn\n\t}\n\n\tport = hostport[i+1:]\n\treturn\n\nmissingPort:\n\terr = &AddrError{\"missing port in address\", hostport}\n\treturn\n\ntooManyColons:\n\terr = &AddrError{\"too many colons in address\", hostport}\n\treturn\n\nmissingBrackets:\n\terr = &AddrError{\"missing brackets in address\", hostport}\n\treturn\n}",
"func (h *HostInfo) ParseHostWithPort(host string, isHTTPS bool) {\n\thasPortFuncByte := func(host string) bool {\n\t\treturn strings.LastIndexByte(host, ':') >\n\t\t\tstrings.LastIndexByte(host, ']')\n\t}\n\tif len(host) == 0 {\n\t\treturn\n\t}\n\n\t// separate domain and port\n\tif !hasPortFuncByte(host) {\n\t\th.domain = host\n\t\tif isHTTPS {\n\t\t\th.port = \"443\"\n\t\t} else {\n\t\t\th.port = \"80\"\n\t\t}\n\t} else {\n\t\tvar err error\n\t\th.domain, h.port, err = net.SplitHostPort(host)\n\t\tif err != nil {\n\t\t\th.reset()\n\t\t\treturn\n\t\t}\n\t}\n\tif len(h.domain) == 0 {\n\t\treturn\n\t}\n\n\t// determine whether the given domain is already an IP Address\n\tip := net.ParseIP(h.domain)\n\tif ip != nil {\n\t\th.ip = ip\n\t}\n\n\t// host and target with port\n\th.hostWithPort = h.domain + \":\" + h.port\n\th.targetWithPort = h.hostWithPort\n}",
"func Hostname() (string, error)",
"func stripHostPort(host string) string {\n\t// If no port on host, return unchanged\n\tif strings.IndexByte(host, ':') == -1 {\n\t\treturn host\n\t}\n\n\th, _, err := net.SplitHostPort(host)\n\tif err != nil {\n\t\treturn host // on error, return unchanged\n\t}\n\n\treturn h\n}",
"func parseHostAndPath(s string) (string, string) {\n\ttoks := strings.SplitN(s, \":\", 2)\n\n\tif len(toks) > 1 {\n\t\treturn toks[0], toks[1]\n\t}\n\n\treturn s, \"./\"\n}",
"func HostPort(addr string, port interface{}) string {\n\thost := addr\n\tif strings.Count(addr, \":\") > 0 {\n\t\thost = fmt.Sprintf(\"[%s]\", addr)\n\t}\n\t// TODO check for NATS case\n\tif v, ok := port.(string); ok {\n\t\tif v == \"\" {\n\t\t\treturn fmt.Sprintf(\"%s\", host)\n\t\t}\n\t}\n\treturn fmt.Sprintf(\"%s:%v\", host, port)\n}",
"func getHost(line string) (string, error) {\n\tsplit := strings.Split(line, \" \")\n\tif len(split) == 1 {\n\t\t// plain domain list format\n\t\treturn split[0], nil\n\t} else if len(split) == 2 {\n\t\t// hosts file format\n\t\treturn split[1], nil\n\t} else {\n\t\treturn \"\", errParseHosts\n\t}\n}",
"func GetHostAddr(hostName string, port uint16) string {\n\treturn hostName + base.UrlPortNumberDelimiter + strconv.FormatInt(int64(port), base.ParseIntBase)\n}",
"func ExtractHostName(urlStr string) (HostNames, error) {\n\thn := &HostNames{\n\t\tURL: \"\",\n\t\tHostName: \"\",\n\t}\n\n\tu, err := url.Parse(urlStr)\n\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn *hn, err\n\t}\n\n\tisSchema, err := IsSchema(urlStr)\n\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn *hn, err\n\t}\n\n\tif u.Hostname() != \"\" && true == isSchema {\n\t\thn.URL = u.Scheme + \"://\" + u.Hostname()\n\t\thn.HostName = u.Hostname()\n\t}\n\n\treturn *hn, nil\n}",
"func (u URL) Host() string {\n\thost, _, err := net.SplitHostPort(u.URL.Host)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn host\n}",
"func splitHostPort(hostport string) (host string, port int) {\n\tport = -1\n\n\tif strings.HasPrefix(hostport, \"[\") {\n\t\taddrEnd := strings.LastIndex(hostport, \"]\")\n\t\tif addrEnd < 0 {\n\t\t\t// Invalid hostport.\n\t\t\treturn\n\t\t}\n\t\tif i := strings.LastIndex(hostport[addrEnd:], \":\"); i < 0 {\n\t\t\thost = hostport[1:addrEnd]\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tif i := strings.LastIndex(hostport, \":\"); i < 0 {\n\t\t\thost = hostport\n\t\t\treturn\n\t\t}\n\t}\n\n\thost, pStr, err := net.SplitHostPort(hostport)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tp, err := strconv.ParseUint(pStr, 10, 16)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn host, int(p)\n}",
"func RemovePortFromHost(host string) string {\n\tif !containIPv6Addr(host) {\n\t\treturn strings.Split(host, \":\")[0]\n\t}\n\tif containPortIPv6(host) {\n\t\thost = host[:strings.LastIndexByte(host, ':')]\n\t}\n\treturn strings.Trim(host, \"[]\")\n}",
"func GetHost(URLString string) string {\n\tu, err := url.Parse(URLString)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\treturn u.Hostname()\n}",
"func parseHost(host string) string {\n\tif isVar(host) {\n\t\tif strings.Contains(host, \"*\") {\n\t\t\thost = strings.Replace(host, \"*\", `[\\S]*`, -1)\n\t\t}\n\t\thost = strings.Replace(host, \".\", `\\.`, -1)\n\t\thost = \"^\" + host + \"$\"\n\t}\n\treturn host\n}",
"func Host(k string) string {\n\tkey := strings.TrimPrefix(k, \"/\")\n\tkey = strings.TrimSuffix(key, \"/\")\n\tkeys := strings.Split(key, \"/\")\n\thost := \"\"\n\tfor i, w := range keys {\n\t\tif i < (len(keys) - 1) {\n\t\t\thost += w + \"/\"\n\t\t} else {\n\t\t\tsplitKeys := strings.Split(strings.TrimSuffix(host, \"/\"), \"/\")\n\t\t\tif splitKeys[len(splitKeys)-1] != \"hosts\" {\n\t\t\t\thost += \"/hosts\"\n\t\t\t}\n\t\t}\n\t}\n\thost = strings.TrimSuffix(host, \"/\")\n\treturn host\n}",
"func GetHostURL(hostName string, port uint16) string {\n\treturn fmt.Sprintf(\"%v:%v\", hostName, port)\n}",
"func GetHostname(addr string) string {\n\treturn strings.Split(addr, \":\")[0]\n}",
"func containerAddrForHost(u url.URL) (string, *url.URL, error) {\n\thostname := u.Host\n\tif strings.Contains(u.Host, \":\") {\n\t\tvar err error\n\t\thostname, _, err = net.SplitHostPort(hostname)\n\t\tif err != nil {\n\t\t\treturn \"\", nil, err\n\t\t}\n\t}\n\n\tif hostname == \"localhost\" {\n\t\tcontainerHostname, err := dockerutil.ContainerHost()\n\t\tif err != nil {\n\t\t\treturn \"\", nil, err\n\t\t}\n\t\thostname = containerHostname\n\t\tu.Host = strings.Replace(u.Host, \"localhost\", containerHostname, 1)\n\t}\n\n\treturn hostname, &u, nil\n}",
"func (c *tomlConfig) Host() string {\n\tparts := strings.Split(c.Server.HTTPAddress, \":\")\n\thost := c.Server.Host\n\tif len(parts) > 1 {\n\t\thost = host + \":\" + parts[len(parts)-1]\n\t}\n\treturn host\n}",
"func GetHost(loc string, www, decode bool) (string, error) {\n\tparsed, err := url.Parse(loc)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\thost, _, err := net.SplitHostPort(parsed.Host)\n\tif err != nil {\n\t\thost = parsed.Host\n\t}\n\tif www {\n\t\tre := regexp.MustCompile(`^www\\.`)\n\t\thost = re.ReplaceAllString(host, \"\")\n\t}\n\n\tif decode {\n\t\treturn idna.ToASCII(host)\n\t}\n\n\treturn host, nil\n}",
"func parseHostPort(hostPort string, defaultPort int) (host string, port int, err error) {\n\tif hostPort != \"\" {\n\t\thost, sPort, err := net.SplitHostPort(hostPort)\n\t\tif ae, ok := err.(*net.AddrError); ok && strings.Contains(ae.Err, \"missing port\") {\n\t\t\t// try appending the current port\n\t\t\thost, sPort, err = net.SplitHostPort(fmt.Sprintf(\"%s:%d\", hostPort, defaultPort))\n\t\t}\n\t\tif err != nil {\n\t\t\treturn \"\", -1, err\n\t\t}\n\t\tport, err = strconv.Atoi(strings.TrimSpace(sPort))\n\t\tif err != nil {\n\t\t\treturn \"\", -1, err\n\t\t}\n\t\tif port == 0 || port == -1 {\n\t\t\tport = defaultPort\n\t\t}\n\t\treturn strings.TrimSpace(host), port, nil\n\t}\n\treturn \"\", -1, errors.New(\"no hostport specified\")\n}",
"func splitHostPort(addr string) (string, int, error) {\n\thost, sPort, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\tlog.Errorf(\"Could not split network address: %v\", err)\n\t\treturn \"\", 0, errors.Wrap(err)\n\t}\n\tport, err := strconv.Atoi(sPort)\n\tif err != nil {\n\t\tlog.Errorf(\"No port number found %v\", err)\n\t\treturn \"\", 0, errors.Wrap(err)\n\t}\n\treturn host, port, nil\n}",
"func canonicalHost(host string) string {\n\t// If no port is present in the server, we assume port 2424\n\t_, _, err := net.SplitHostPort(host)\n\tif err != nil {\n\t\thost += \":2424\"\n\t}\n\n\t// if no protocol is given, assume https://\n\tif !strings.Contains(host, \"://\") {\n\t\thost = \"https://\" + host\n\t}\n\n\treturn host\n}",
"func withoutPort(addr string) string {\n\tif h, _, err := net.SplitHostPort(addr); err == nil {\n\t\treturn h\n\t}\n\treturn addr\n}",
"func host(u *url.URL) string {\n\tvar buf bytes.Buffer\n\tif u.Scheme != \"\" {\n\t\tbuf.WriteString(u.Scheme)\n\t\tbuf.WriteByte(':')\n\t}\n\tif u.Opaque != \"\" {\n\t\tbuf.WriteString(u.Opaque)\n\t} else {\n\t\tif u.Scheme != \"\" || u.Host != \"\" || u.User != nil {\n\t\t\tbuf.WriteString(\"//\")\n\t\t\tif ui := u.User; ui != nil {\n\t\t\t\tbuf.WriteString(ui.String())\n\t\t\t\tbuf.WriteByte('@')\n\t\t\t}\n\t\t\tif h := u.Host; h != \"\" {\n\t\t\t\tbuf.WriteString(h)\n\t\t\t}\n\t\t}\n\t}\n\treturn buf.String()\n}",
"func ServiceAndPort(host string) (string, string) {\n\tsp := strings.Split(host, \":\")\n\tif len(sp) <= 1 {\n\t\treturn host, \"0\"\n\t}\n\tss := strings.Split(sp[0], \".\")\n\tif len(ss) <= 1 {\n\t\treturn sp[0], sp[1]\n\t}\n\treturn ss[0], sp[1]\n}",
"func ExtractPort(host string) int {\n\t_, port, _ := net.SplitHostPort(host)\n\tif port == \"\" {\n\t\treturn 80\n\t}\n\tportInt, _ := strconv.Atoi(port)\n\treturn portInt\n}",
"func SplitHostPort(hostPort string, defaultPort int) (string, int) {\n\thostPort = strings.Replace(hostPort, \"[::]\", \"0.0.0.0\", -1)\n\tparts := strings.Split(hostPort, \":\")\n\tupstreamHost := parts[0]\n\tupstreamPort := defaultPort\n\tif len(parts) > 1 {\n\t\tvar err error\n\t\tupstreamPort, err = strconv.Atoi(parts[1])\n\t\tif err != nil {\n\t\t\tupstreamPort = defaultPort\n\t\t\tfmt.Printf(\"Error converting port to int for %s : %s\", upstreamHost, err)\n\t\t}\n\t}\n\treturn upstreamHost, upstreamPort\n}",
"func looksLikeHostport(s string) bool {\n\tif !strings.Contains(s, \":\") {\n\t\treturn false\n\t} else {\n\t\thostport := strings.SplitN(s, \":\", 2)\n\t\tif _, err := regexp.MatchString(\"[0-9]+\", hostport[1]); err == nil {\n\t\t\treturn true\n\t\t} else {\n\t\t\treturn false\n\t\t}\n\t}\n}",
"func stripPort(hostportURL *url.URL) string {\n\tvar hostport string\n\tif hostportURL.Host != \"\" {\n\t\thostport = hostportURL.Host\n\t} else {\n\t\thostport = hostportURL.String()\n\t}\n\n\tcolon := strings.IndexByte(hostport, ':')\n\tif colon == -1 {\n\t\treturn hostport\n\t}\n\tif i := strings.IndexByte(hostport, ']'); i != -1 {\n\t\treturn strings.TrimPrefix(hostport[:i], \"[\")\n\t}\n\treturn hostport[:colon]\n}",
"func HostOnly(addr string) string {\n\thost, _, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\treturn addr\n\t} else {\n\t\treturn host\n\t}\n}",
"func (c *NetConv) Host(address string) []attribute.KeyValue {\n\th, p := splitHostPort(address)\n\tvar n int\n\tif h != \"\" {\n\t\tn++\n\t\tif p > 0 {\n\t\t\tn++\n\t\t}\n\t}\n\n\tif n == 0 {\n\t\treturn nil\n\t}\n\n\tattrs := make([]attribute.KeyValue, 0, n)\n\tattrs = append(attrs, c.HostName(h))\n\tif p > 0 {\n\t\tattrs = append(attrs, c.HostPort(int(p)))\n\t}\n\treturn attrs\n}",
"func hosten(dom string) string {\n\tdom = strings.TrimSpace(dom)\n\tvar domain string\n\tif strings.HasPrefix(dom, \"http:\") ||\n\t\tstrings.HasPrefix(dom, \"https:\") {\n\t\tdmt, err := url.Parse(dom)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tdomain = dmt.Host\n\t} else {\n\t\tdomain = dom\n\t}\n\treturn domain\n}",
"func Host(h string) {\n\thost = h\n}",
"func fetchFromURL(rawURL string) (host, port string, err error) {\n\tparsed, err := url.Parse(rawURL)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn parsed.Hostname(), parsed.Port(), nil\n}",
"func (_this *URL) Host() string {\n\tvar ret string\n\tvalue := _this.Value_JS.Get(\"host\")\n\tret = (value).String()\n\treturn ret\n}",
"func getPortFromURL(defaultValue, data string) string {\n\tval := defaultValue\n\tfirstColon := strings.Index(data, \":\")\n\tif firstColon != -1 {\n\t\tsecondColon := strings.Index(data[firstColon+1:], \":\")\n\t\tnextForwardSlash := strings.Index(data[firstColon+secondColon+1:], \"/\")\n\t\tendOfLine := strings.Index(data[firstColon+secondColon+1:], \"\\n\")\n\t\tif nextForwardSlash != -1 {\n\t\t\tendOfLine = nextForwardSlash\n\t\t}\n\t\tif endOfLine == -1 {\n\t\t\tval = data[firstColon+secondColon:]\n\t\t} else {\n\t\t\tval = data[firstColon+secondColon : firstColon+secondColon+endOfLine]\n\t\t}\n\n\t}\n\treturn val\n}",
"func parseHostname(hostname string) (string, error) {\n\t// TODO does the hostname even need to be parsed?\n\treturn hostname, nil\n}",
"func (h *WLSHandler) getHost(r *http.Request) string {\n\tif r.Host == \"\" {\n\t\tklog.Warning(\"the request does not contain a host header\")\n\t\treturn \"\"\n\t}\n\thostPieces := strings.Split(r.Host, \":\")\n\treturn hostPieces[0]\n}",
"func SplitHostPort(hostname string) (string, string, error) {\n\thost, port, err := net.SplitHostPort(hostname)\n\tif err != nil {\n\t\treturn \"\", \"\", trace.Wrap(err)\n\t}\n\tif host == \"\" {\n\t\treturn \"\", \"\", trace.BadParameter(\"empty hostname\")\n\t}\n\treturn host, port, nil\n}",
"func splitHostPortGraceful(hostPort string) (host, port string, err error) {\n\thost, port, err = net.SplitHostPort(hostPort)\n\tif err != nil {\n\t\tif addrErr, ok := err.(*net.AddrError); ok && addrErr.Err == \"missing port in address\" {\n\t\t\treturn hostPort, port, nil\n\t\t} else {\n\t\t\tif log.DefaultLogger.GetLogLevel() >= log.DEBUG {\n\t\t\t\tmsg := fmt.Sprintf(\"host invalid : %s, error: %v\", hostPort, err)\n\t\t\t\tlog.DefaultLogger.Debugf(RouterLogFormat, \"routers\", \"SplitHostPortGraceful\", msg)\n\t\t\t}\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t}\n\treturn host, port, nil\n}",
"func parseAddr(addr string) (string, string) {\n\tparsed := strings.SplitN(addr, \":\", 2)\n\treturn parsed[0], parsed[1]\n}",
"func (o HTTP2HealthCheckOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v HTTP2HealthCheck) *string { return v.Host }).(pulumi.StringPtrOutput)\n}",
"func ensureHostport(scheme string, host string) string {\n\tif strings.Index(host, \":\") > 0 {\n\t\treturn host\n\t}\n\tif scheme == \"https\" {\n\t\treturn host + \":443\"\n\t}\n\tif scheme == \"http\" {\n\t\treturn host + \":80\"\n\t}\n\tlog.Println(\"wtt not supported\", scheme, host)\n\treturn host\n}",
"func stripPort(rawurl string) string {\n\tu, err := url.Parse(rawurl)\n\tif err != nil {\n\t\treturn rawurl\n\t}\n\tu.Host = u.Hostname()\n\treturn u.String()\n}",
"func (node *Node) Host() string {\n\treturn node.IP().String() + fmt.Sprintf(\":%d\", node.Port())\n}",
"func (r *Bitbucket) GetHost() string {\n\turi, _ := url.Parse(r.URL)\n\treturn uri.Host\n}",
"func formatHost(host string) string {\n\tswitch h := strings.ToLower(host); h {\n\tcase \"github.com\":\n\t\treturn \"GitHub\"\n\tcase \"gitlab.com\":\n\t\treturn \"GitLab\"\n\tcase \"bitbucket.org\":\n\t\treturn \"Bitbucket\"\n\tdefault:\n\t\treturn host\n\t}\n}",
"func extractHostsFromLabel(label string) []string {\n\tlabel = strings.Replace(label, \"Host:\", \"\", -1)\n\tlabel = strings.Replace(label, \",\", \" \", -1)\n\n\treturn strings.Split(label, \" \")\n}",
"func SplitAddress(addr string) (string, int) {\n\ts := strings.Split(addr, \":\")\n\thostname := s[0]\n\tport, _ := strconv.Atoi(s[1])\n\treturn hostname, port\n}",
"func extractIPAndPortFromAddresses(addresses []string) (string, string) {\n\tfor _, addr := range addresses {\n\t\taddrParts := strings.SplitN(addr, \"://\", 2)\n\t\tif len(addrParts) != 2 {\n\t\t\tlogrus.Errorf(\"invalid listening address %s: must be in format [protocol]://[address]\", addr)\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch addrParts[0] {\n\t\tcase \"tcp\":\n\t\t\thost, port, err := net.SplitHostPort(addrParts[1])\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"failed to split host and port from address: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn host, port\n\t\tcase \"unix\":\n\t\t\tcontinue\n\t\tdefault:\n\t\t\tlogrus.Errorf(\"only unix socket or tcp address is support\")\n\t\t}\n\t}\n\treturn \"\", \"\"\n}",
"func parseIPv6Host(input string, start int) (*utils.NetAddr, int, error) {\n\thostStr := input[start:]\n\t// if there is only one ':' in the entire input, the host isn't\n\t// an IPv6 address\n\tif strings.Count(hostStr, \":\") == 1 {\n\t\treturn nil, 0, trace.BadParameter(\"%q has an invalid host, host cannot contain '[' unless it is an IPv6 address\", input)\n\t}\n\t// if there's no closing ']', this isn't a valid IPv6 address\n\trbraceIdx := strings.Index(hostStr, \"]\")\n\tif rbraceIdx == -1 {\n\t\treturn nil, 0, trace.BadParameter(\"%q has an invalid host, host cannot contain '[' or ':' unless it is an IPv6 address\", input)\n\t}\n\t// if there's nothing after ']' then the path is missing\n\tif len(hostStr) <= rbraceIdx+2 {\n\t\treturn nil, 0, trace.BadParameter(\"%q is missing a path, use form [user@]host:[path]\", input)\n\t}\n\n\tmaybeAddr := hostStr[:rbraceIdx+1]\n\thost, err := utils.ParseAddr(maybeAddr)\n\tif err != nil {\n\t\treturn nil, 0, trace.Wrap(err)\n\t}\n\n\t// the host ends after the login + the IPv6 address\n\t// (including the trailing ']') and a ':'\n\treturn host, start + rbraceIdx + 1 + 1, nil\n}",
"func stripPort(s string) string {\n\tix := strings.IndexRune(s, ':')\n\tif ix == -1 {\n\t\treturn s\n\t}\n\treturn s[:ix]\n}",
"func (URI) CleanHost(host string) string {\n\tmatches := hostPattern.FindAllStringSubmatch(host, -1)\n\tif len(matches) != 1 {\n\t\tpanic(errInvalidHost)\n\t}\n\tkeys := matches[0][1:]\n\tif keys[0] == \"\" {\n\t\tkeys[0] = \"http://\"\n\t}\n\treturn strings.Join(keys, \"\")\n}",
"func (o HTTP2HealthCheckPtrOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *HTTP2HealthCheck) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Host\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o UrlMapTestOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v UrlMapTest) *string { return v.Host }).(pulumi.StringPtrOutput)\n}",
"func ParsePuntToHostKey(key string) (l3Proto L3Protocol, l4Proto L4Protocol, port uint32, isPuntToHostKey bool) {\n\tif strings.HasPrefix(key, PrefixToHost) {\n\t\tkeySuffix := strings.TrimPrefix(key, PrefixToHost)\n\t\tpuntComps := strings.Split(keySuffix, \"/\")\n\t\tif len(puntComps) == 6 {\n\t\t\tl3Proto := L3Protocol_value[puntComps[1]]\n\t\t\tl4Proto := L4Protocol_value[puntComps[3]]\n\t\t\tkeyPort, err := strconv.Atoi(puntComps[5])\n\t\t\tif err != nil {\n\t\t\t\t// Keep port at zero value\n\t\t\t}\n\t\t\treturn L3Protocol(l3Proto), L4Protocol(l4Proto), uint32(keyPort), true\n\t\t}\n\t}\n\treturn L3Protocol_UNDEFINED_L3, L4Protocol_UNDEFINED_L4, 0, false\n}",
"func HostToService(host string, externalApex string) (string, error) {\n\t// First, prepend a \".\" to root domain\n\texternalApex = \".\" + strings.ToLower(externalApex)\n\n\t// For safety, set host to lowercase\n\thost = strings.ToLower(host)\n\n\t// Host may contain a port, so chop it off\n\tcolonIndex := strings.Index(host, \":\")\n\tif colonIndex > -1 {\n\t\thost = host[:colonIndex]\n\t}\n\n\t// Check that host ends with subdomain\n\tif len(host) <= len(externalApex) {\n\t\treturn \"\", fmt.Errorf(\"Host is less than root domain length\")\n\t}\n\n\tsubdomainLength := len(host) - len(externalApex)\n\tif host[subdomainLength:] != externalApex {\n\t\treturn \"\", fmt.Errorf(\"Does not contain root domain\")\n\t}\n\n\t// Return subdomain\n\treturn host[:subdomainLength], nil\n}",
"func (cm *ConnectMethod) tlsHost() string {\n\th := cm.targetAddr\n\tif HasPort(h) {\n\t\th = h[:strings.LastIndex(h, \":\")]\n\t}\n\treturn h\n}",
"func cleanHost(in string) string {\n\tif i := strings.IndexAny(in, \" /\"); i != -1 {\n\t\treturn in[:i]\n\t}\n\treturn in\n}",
"func withPort(host string) string {\n\tif _, _, err := net.SplitHostPort(host); err != nil {\n\t\treturn net.JoinHostPort(host, \"443\")\n\t}\n\treturn host\n}",
"func withPort(host string) string {\n\tif _, _, err := net.SplitHostPort(host); err != nil {\n\t\treturn net.JoinHostPort(host, \"443\")\n\t}\n\treturn host\n}",
"func (m MetaData) GetHost() string {\n\tif len(m) == 0 {\n\t\treturn \"\"\n\t}\n\tfor _, k := range []string{\"x-forwarded-host\", \"host\"} {\n\t\tif v := m[k]; v != \"\" {\n\t\t\treturn v\n\t\t}\n\t}\n\treturn \"\"\n}",
"func (c *Database) parsePostgreSQLHostPort(info string) (host, port string) {\n\thost, port = \"127.0.0.1\", \"5432\"\n\tif strings.Contains(info, \":\") && !strings.HasSuffix(info, \"]\") {\n\t\tidx := strings.LastIndex(info, \":\")\n\t\thost = info[:idx]\n\t\tport = info[idx+1:]\n\t} else if len(info) > 0 {\n\t\thost = info\n\t}\n\treturn host, port\n}",
"func (o BuildStrategySpecBuildStepsStartupProbeHttpGetPtrOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *BuildStrategySpecBuildStepsStartupProbeHttpGet) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Host\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o HTTPHealthCheckResponsePtrOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *HTTPHealthCheckResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Host\n\t}).(pulumi.StringPtrOutput)\n}",
"func (uri *URI) Host() []byte {\n\treturn uri.host\n}",
"func parseAddress(address string) (scheme, host, port string, err error) {\n\tif address == \"\" {\n\t\treturn\n\t}\n\tif strings.Contains(address, \"://\") {\n\t\turl, err := url.Parse(address)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", \"\", err\n\t\t}\n\t\tscheme, address = url.Scheme, url.Host\n\t}\n\tif strings.Contains(address, \":\") {\n\t\thost, port, err = net.SplitHostPort(address)\n\t\tif err != nil {\n\t\t\thost = address\n\t\t\terr = nil\n\t\t}\n\t} else {\n\t\thost = address\n\t}\n\tif port == \"\" {\n\t\tswitch scheme {\n\t\tcase \"http\", \"ws\":\n\t\t\tport = \"80\"\n\t\tcase \"https\", \"wss\":\n\t\t\tport = \"443\"\n\t\t}\n\t}\n\treturn\n}",
"func (o HTTPHealthCheckResponseOutput) Host() pulumi.StringOutput {\n\treturn o.ApplyT(func(v HTTPHealthCheckResponse) string { return v.Host }).(pulumi.StringOutput)\n}",
"func SplitHostPort(addr string) (string, int, error) {\n\thost, sPort, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\tlog.Errorf(\"Could not split network address: %v\", err)\n\t\treturn \"\", 0, err\n\t}\n\tport, err := strconv.Atoi(sPort)\n\tif err != nil {\n\t\tlog.Errorf(\"No port number found %v\", err)\n\t\treturn \"\", 0, err\n\t}\n\treturn host, port, nil\n}",
"func GetHostname(host ...string) string {\n\tif len(host) > 0 && strings.TrimSpace(host[0]) != \"\" {\n\t\tif h, ok := hostHostList[host[0]]; ok {\n\t\t\treturn h\n\t\t}\n\t\tstr := GetCmdStr(\"host %s\", host[0])\n\t\th := \"\"\n\t\tif strings.Contains(str, \"has address\") {\n\t\t\th = GetCmdStr(\"host %s|awk '{print $1}'\", host[0])\n\t\t\thostHostList[host[0]] = h\n\t\t} else if strings.Contains(str, \"domain name pointer\") {\n\t\t\th = strings.TrimSuffix(GetCmdStr(\"host %s|awk '{print $5}'\", host[0]), \".\")\n\t\t\thostHostList[host[0]] = h\n\t\t}\n\t\treturn h\n\t}\n\tif chostname == \"\" {\n\t\th, err := os.Hostname()\n\t\tif err != nil {\n\t\t\tchostname = GetCmdStr(\"hostname\")\n\t\t} else {\n\t\t\tchostname = h\n\t\t}\n\t}\n\treturn chostname\n}",
"func stripPort(hostport string) string {\n\tcolon := strings.IndexByte(hostport, ':')\n\tif colon == -1 {\n\t\treturn hostport\n\t}\n\tif i := strings.IndexByte(hostport, ']'); i != -1 {\n\t\treturn strings.TrimPrefix(hostport[:i], \"[\")\n\t}\n\treturn hostport[:colon]\n}"
] | [
"0.7416197",
"0.7274514",
"0.7265152",
"0.72162014",
"0.7179156",
"0.7138976",
"0.7102227",
"0.70657533",
"0.70626915",
"0.7054715",
"0.70489645",
"0.7039764",
"0.7039558",
"0.70273805",
"0.67160875",
"0.6699287",
"0.6681174",
"0.6674688",
"0.65914196",
"0.65827954",
"0.6579051",
"0.657206",
"0.65704936",
"0.65457827",
"0.6542215",
"0.6538912",
"0.65053904",
"0.6502312",
"0.64896137",
"0.64835423",
"0.64400834",
"0.6428798",
"0.6420954",
"0.635817",
"0.63449895",
"0.633002",
"0.6269045",
"0.62580323",
"0.62335116",
"0.6222525",
"0.6215798",
"0.61303127",
"0.6103238",
"0.610069",
"0.60849667",
"0.60741967",
"0.605795",
"0.60348874",
"0.6034204",
"0.6029574",
"0.6029382",
"0.6019672",
"0.6010973",
"0.6008752",
"0.5996543",
"0.5984937",
"0.59797066",
"0.5973244",
"0.5968699",
"0.59541965",
"0.59477776",
"0.5946965",
"0.59122086",
"0.59071845",
"0.59071386",
"0.58908814",
"0.58510053",
"0.5846389",
"0.58456826",
"0.5830331",
"0.58170474",
"0.58133566",
"0.5812513",
"0.5804863",
"0.5803298",
"0.580297",
"0.5802646",
"0.5780379",
"0.5772581",
"0.57689923",
"0.57613546",
"0.5751499",
"0.5750577",
"0.57429713",
"0.57336915",
"0.57336575",
"0.5726875",
"0.572446",
"0.57204014",
"0.57204014",
"0.5719942",
"0.57193077",
"0.5711607",
"0.57078373",
"0.56979215",
"0.5696179",
"0.5695008",
"0.5691933",
"0.5691338",
"0.5682566"
] | 0.7172603 | 5 |
SplitHostPort splits host and port and checks that host is not empty | func SplitHostPort(hostname string) (string, string, error) {
host, port, err := net.SplitHostPort(hostname)
if err != nil {
return "", "", trace.Wrap(err)
}
if host == "" {
return "", "", trace.BadParameter("empty hostname")
}
return host, port, nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func SplitHostPort(hostport string) (host, port string, err error) {\n\taddrErr := func(addr, why string) (host, port string, err error) {\n\t\treturn \"\", \"\", &net.AddrError{Err: why, Addr: addr}\n\t}\n\n\thoststart, hostend := 0, 0\n\tportstart := len(hostport)\n\tif len(hostport) >= 1 && hostport[0] == '[' {\n\t\thoststart = 1\n\t\thostend = strings.IndexByte(hostport, ']')\n\t\tif hostend < 0 {\n\t\t\treturn addrErr(hostport, \"missing ']' in address\")\n\t\t}\n\t\tportstart = hostend + 1\n\t} else {\n\t\thostend = strings.IndexByte(hostport, ':')\n\t\tif hostend < 0 {\n\t\t\thostend = len(hostport)\n\t\t}\n\t\tportstart = hostend\n\t}\n\tif portstart < len(hostport) {\n\t\tif hostport[portstart] != ':' {\n\t\t\treturn addrErr(hostport, \"invalid character at the end of address, expecting ':'\")\n\t\t}\n\t\tportstart += 1\n\t}\n\n\tport = hostport[portstart:]\n\thost = hostport[hoststart:hostend]\n\n\tif strings.IndexByte(port, ':') >= 0 {\n\t\treturn addrErr(hostport, \"too many colons in suspected port number\")\n\t}\n\tif strings.IndexByte(port, ']') >= 0 {\n\t\treturn addrErr(hostport, \"unexpected ']' in port\")\n\t}\n\tif strings.IndexByte(port, '[') >= 0 {\n\t\treturn addrErr(hostport, \"unexpected '[' in port\")\n\t}\n\tif strings.IndexByte(host, '[') >= 0 {\n\t\treturn addrErr(hostport, \"unexpected '[' in host\")\n\t}\n\tif strings.IndexByte(host, ']') >= 0 {\n\t\treturn addrErr(hostport, \"unexpected ']' in host\")\n\t}\n\n\treturn host, port, nil\n}",
"func SplitHostPort(hostport string) (host, port string, err error) {\n\tj, k := 0, 0\n\n\t// The port starts after the last colon.\n\ti := last(hostport, ':')\n\tif i < 0 {\n\t\tgoto missingPort\n\t}\n\n\tif hostport[0] == '[' {\n\t\t// Expect the first ']' just before the last ':'.\n\t\tend := byteIndex(hostport, ']')\n\t\tif end < 0 {\n\t\t\terr = &AddrError{\"missing ']' in address\", hostport}\n\t\t\treturn\n\t\t}\n\t\tswitch end + 1 {\n\t\tcase len(hostport):\n\t\t\t// There can't be a ':' behind the ']' now.\n\t\t\tgoto missingPort\n\t\tcase i:\n\t\t\t// The expected result.\n\t\tdefault:\n\t\t\t// Either ']' isn't followed by a colon, or it is\n\t\t\t// followed by a colon that is not the last one.\n\t\t\tif hostport[end+1] == ':' {\n\t\t\t\tgoto tooManyColons\n\t\t\t}\n\t\t\tgoto missingPort\n\t\t}\n\t\thost = hostport[1:end]\n\t\tj, k = 1, end+1 // there can't be a '[' resp. ']' before these positions\n\t} else {\n\t\thost = hostport[:i]\n\t\tif byteIndex(host, ':') >= 0 {\n\t\t\tgoto tooManyColons\n\t\t}\n\t\tif byteIndex(host, '%') >= 0 {\n\t\t\tgoto missingBrackets\n\t\t}\n\t}\n\tif byteIndex(hostport[j:], '[') >= 0 {\n\t\terr = &AddrError{\"unexpected '[' in address\", hostport}\n\t\treturn\n\t}\n\tif byteIndex(hostport[k:], ']') >= 0 {\n\t\terr = &AddrError{\"unexpected ']' in address\", hostport}\n\t\treturn\n\t}\n\n\tport = hostport[i+1:]\n\treturn\n\nmissingPort:\n\terr = &AddrError{\"missing port in address\", hostport}\n\treturn\n\ntooManyColons:\n\terr = &AddrError{\"too many colons in address\", hostport}\n\treturn\n\nmissingBrackets:\n\terr = &AddrError{\"missing brackets in address\", hostport}\n\treturn\n}",
"func splitHostPortGraceful(hostPort string) (host, port string, err error) {\n\thost, port, err = net.SplitHostPort(hostPort)\n\tif err != nil {\n\t\tif addrErr, ok := err.(*net.AddrError); ok && addrErr.Err == \"missing port in address\" {\n\t\t\treturn hostPort, port, nil\n\t\t} else {\n\t\t\tif log.DefaultLogger.GetLogLevel() >= log.DEBUG {\n\t\t\t\tmsg := fmt.Sprintf(\"host invalid : %s, error: %v\", hostPort, err)\n\t\t\t\tlog.DefaultLogger.Debugf(RouterLogFormat, \"routers\", \"SplitHostPortGraceful\", msg)\n\t\t\t}\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t}\n\treturn host, port, nil\n}",
"func SplitHostPort(s string) (hostport []string, err error) {\n\thost, port, err := net.SplitHostPort(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn []string{host, port}, nil\n}",
"func SplitHostPort(hostPort string, defaultPort int) (string, int) {\n\thostPort = strings.Replace(hostPort, \"[::]\", \"0.0.0.0\", -1)\n\tparts := strings.Split(hostPort, \":\")\n\tupstreamHost := parts[0]\n\tupstreamPort := defaultPort\n\tif len(parts) > 1 {\n\t\tvar err error\n\t\tupstreamPort, err = strconv.Atoi(parts[1])\n\t\tif err != nil {\n\t\t\tupstreamPort = defaultPort\n\t\t\tfmt.Printf(\"Error converting port to int for %s : %s\", upstreamHost, err)\n\t\t}\n\t}\n\treturn upstreamHost, upstreamPort\n}",
"func splitConnection(hostPort string) (bool, string, int, error) {\n\thost, port, err := net.SplitHostPort(hostPort)\n\tif nil != err {\n\t\treturn false, \"\", 0, fault.ErrInvalidIPAddress\n\t}\n\n\tIP := net.ParseIP(strings.Trim(host, \" \"))\n\tif nil == IP {\n\t\treturn false, \"\", 0, fault.ErrInvalidIPAddress\n\t}\n\n\tnumericPort, err := strconv.Atoi(strings.Trim(port, \" \"))\n\tif nil != err {\n\t\treturn false, \"\", 0, err\n\t}\n\tif numericPort < 1 || numericPort > 65535 {\n\t\treturn false, \"\", 0, fault.ErrInvalidPortNumber\n\t}\n\n\tif nil != IP.To4() {\n\t\treturn false, IP.String(), numericPort, nil\n\t}\n\treturn true, \"[\" + IP.String() + \"]\", numericPort, nil\n}",
"func SplitHostPort(addr string) (string, int, error) {\n\thost, sPort, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\tlog.Errorf(\"Could not split network address: %v\", err)\n\t\treturn \"\", 0, err\n\t}\n\tport, err := strconv.Atoi(sPort)\n\tif err != nil {\n\t\tlog.Errorf(\"No port number found %v\", err)\n\t\treturn \"\", 0, err\n\t}\n\treturn host, port, nil\n}",
"func splitHostPort(hostport string) (host string, port int) {\n\tport = -1\n\n\tif strings.HasPrefix(hostport, \"[\") {\n\t\taddrEnd := strings.LastIndex(hostport, \"]\")\n\t\tif addrEnd < 0 {\n\t\t\t// Invalid hostport.\n\t\t\treturn\n\t\t}\n\t\tif i := strings.LastIndex(hostport[addrEnd:], \":\"); i < 0 {\n\t\t\thost = hostport[1:addrEnd]\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tif i := strings.LastIndex(hostport, \":\"); i < 0 {\n\t\t\thost = hostport\n\t\t\treturn\n\t\t}\n\t}\n\n\thost, pStr, err := net.SplitHostPort(hostport)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tp, err := strconv.ParseUint(pStr, 10, 16)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn host, int(p)\n}",
"func splitHostPort(addr string) (string, int, error) {\n\thost, sPort, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\tlog.Errorf(\"Could not split network address: %v\", err)\n\t\treturn \"\", 0, errors.Wrap(err)\n\t}\n\tport, err := strconv.Atoi(sPort)\n\tif err != nil {\n\t\tlog.Errorf(\"No port number found %v\", err)\n\t\treturn \"\", 0, errors.Wrap(err)\n\t}\n\treturn host, port, nil\n}",
"func splitHost(host string) string {\n\treturn strings.Split(host, \":\")[0]\n}",
"func parseHostPort(str string) (string, string) {\n\tvar (\n\t\thost string\n\t\tport string\n\n\t\ti = strings.Index(str, \":\")\n\t)\n\tif i == -1 {\n\t\treturn str, \"\"\n\t}\n\n\thost = str[:i]\n\tport = str[i+1:]\n\n\treturn host, port\n}",
"func HostPort(urlStr string) (string, error) {\n\t// TODO: rename this function to URLHostPort instead, like\n\t// ListenHostPort below.\n\tu, err := url.Parse(urlStr)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"could not parse %q as a url: %v\", urlStr, err)\n\t}\n\tif u.Scheme == \"\" {\n\t\treturn \"\", fmt.Errorf(\"url %q has no scheme\", urlStr)\n\t}\n\thostPort := u.Host\n\tif hostPort == \"\" || strings.HasPrefix(hostPort, \":\") {\n\t\treturn \"\", fmt.Errorf(\"url %q has no host\", urlStr)\n\t}\n\tidx := strings.Index(hostPort, \"]\")\n\tif idx == -1 {\n\t\tidx = 0\n\t}\n\tif !strings.Contains(hostPort[idx:], \":\") {\n\t\tif u.Scheme == \"https\" {\n\t\t\thostPort += \":443\"\n\t\t} else {\n\t\t\thostPort += \":80\"\n\t\t}\n\t}\n\treturn hostPort, nil\n}",
"func HostPort(urlStr string) (string, error) {\n\tu, err := url.Parse(urlStr)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"could not parse %q as a url: %v\", urlStr, err)\n\t}\n\tif u.Scheme == \"\" {\n\t\treturn \"\", fmt.Errorf(\"url %q has no scheme\", urlStr)\n\t}\n\thostPort := u.Host\n\tif hostPort == \"\" || strings.HasPrefix(hostPort, \":\") {\n\t\treturn \"\", fmt.Errorf(\"url %q has no host\", urlStr)\n\t}\n\tidx := strings.Index(hostPort, \"]\")\n\tif idx == -1 {\n\t\tidx = 0\n\t}\n\tif !strings.Contains(hostPort[idx:], \":\") {\n\t\tif u.Scheme == \"https\" {\n\t\t\thostPort += \":443\"\n\t\t} else {\n\t\t\thostPort += \":80\"\n\t\t}\n\t}\n\treturn hostPort, nil\n}",
"func stripHostPort(h string) string {\n\t// If no port on host, return unchanged\n\tif !strings.Contains(h, \":\") {\n\t\treturn h\n\t}\n\thost, _, err := net.SplitHostPort(h)\n\tif err != nil {\n\t\treturn h // on error, return unchanged\n\t}\n\treturn host\n}",
"func parseHost(addr string) string {\n\tvar (\n\t\thost, port string\n\t\tdefaultAssigned bool\n\t)\n\n\tv := strings.Split(addr, \":\")\n\n\tswitch len(v) {\n\tcase 2:\n\t\thost = v[0]\n\t\tport = v[1]\n\n\t\tif host == \"\" {\n\t\t\thost = _DEFAULT_HOST\n\t\t\tdefaultAssigned = true\n\t\t}\n\n\t\tif port == \"\" {\n\t\t\tport = _DEFAULT_PORT\n\t\t\tdefaultAssigned = true\n\t\t}\n\n\t\tif defaultAssigned == false {\n\t\t\treturn addr // addr is already in required format\n\t\t}\n\t\tbreak\n\n\tcase 1:\n\t\thost = v[0]\n\t\tif host == \"\" {\n\t\t\thost = _DEFAULT_HOST\n\t\t}\n\t\tport = _DEFAULT_PORT\n\tcase 0:\n\t\tfallthrough\n\tdefault:\n\t\thost = _DEFAULT_HOST\n\t\tport = _DEFAULT_PORT\n\t\tbreak\n\t}\n\treturn strings.Join([]string{host, port}, \":\")\n}",
"func (p Plugin) hostPort(host string) (string, string) {\n\t// Split the host string by colon (\":\") to get the host and port\n\thosts := strings.Split(host, \":\")\n\t// Get the default port from the Plugin's Config field\n\tport := p.Config.Port\n\t// If the host string contains a port (i.e. it has more than one element after splitting), set the port to that value\n\tif len(hosts) > 1 {\n\t\thost = hosts[0]\n\t\tport = hosts[1]\n\t}\n\n\t// Return the host and port as separate strings\n\treturn host, port\n}",
"func stripHostPort(host string) string {\n\t// If no port on host, return unchanged\n\tif strings.IndexByte(host, ':') == -1 {\n\t\treturn host\n\t}\n\n\th, _, err := net.SplitHostPort(host)\n\tif err != nil {\n\t\treturn host // on error, return unchanged\n\t}\n\n\treturn h\n}",
"func parseHost(host string) string {\n\trealHost, _, _ := net.SplitHostPort(host)\n\tif realHost != \"\" {\n\t\treturn realHost\n\t}\n\treturn host\n}",
"func SplitAddress(addr string) (string, int) {\n\ts := strings.Split(addr, \":\")\n\thostname := s[0]\n\tport, _ := strconv.Atoi(s[1])\n\treturn hostname, port\n}",
"func looksLikeHostport(s string) bool {\n\tif !strings.Contains(s, \":\") {\n\t\treturn false\n\t} else {\n\t\thostport := strings.SplitN(s, \":\", 2)\n\t\tif _, err := regexp.MatchString(\"[0-9]+\", hostport[1]); err == nil {\n\t\t\treturn true\n\t\t} else {\n\t\t\treturn false\n\t\t}\n\t}\n}",
"func ensureHostPort(addr string, defaultPort string) string {\n\thost, port, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\treturn net.JoinHostPort(addr, defaultPort)\n\t}\n\tif host == \"\" {\n\t\thost, err = os.Hostname()\n\t\tif err != nil {\n\t\t\thost = \"127.0.0.1\"\n\t\t}\n\t}\n\tif port == \"\" {\n\t\tport = defaultPort\n\t}\n\n\treturn net.JoinHostPort(host, port)\n}",
"func parseHostPort(hostPort string, defaultPort int) (host string, port int, err error) {\n\tif hostPort != \"\" {\n\t\thost, sPort, err := net.SplitHostPort(hostPort)\n\t\tif ae, ok := err.(*net.AddrError); ok && strings.Contains(ae.Err, \"missing port\") {\n\t\t\t// try appending the current port\n\t\t\thost, sPort, err = net.SplitHostPort(fmt.Sprintf(\"%s:%d\", hostPort, defaultPort))\n\t\t}\n\t\tif err != nil {\n\t\t\treturn \"\", -1, err\n\t\t}\n\t\tport, err = strconv.Atoi(strings.TrimSpace(sPort))\n\t\tif err != nil {\n\t\t\treturn \"\", -1, err\n\t\t}\n\t\tif port == 0 || port == -1 {\n\t\t\tport = defaultPort\n\t\t}\n\t\treturn strings.TrimSpace(host), port, nil\n\t}\n\treturn \"\", -1, errors.New(\"no hostport specified\")\n}",
"func (b *Bogus) HostPort() (string, string) {\n\th, p, _ := net.SplitHostPort(b.server.URL[7:])\n\treturn h, p\n}",
"func getHostNameAndPort(hostInfo string) (string, int, error) {\n\thost := strings.SplitN(hostInfo, \":\", -1)\n\tif len(host) != 2 {\n\t\treturn \"\", 0, fmt.Errorf(\"expected hostname:port, got %s\", host)\n\t}\n\n\tport, err := strconv.Atoi(host[1])\n\tif err != nil {\n\t\treturn \"\", 0, fmt.Errorf(\"invalid port number, got %s\", host[1])\n\t}\n\n\treturn host[0], port, nil\n}",
"func HostNameandPort(node string) (host, port string, ipv6 bool, err error) {\n\ttokens := []string{}\n\n\t// Set _IPv6 based on input address\n\tipv6, err = IsIPv6(node)\n\n\tif err != nil {\n\t\treturn \"\", \"\", false, err\n\t}\n\n\terr = nil\n\t// For IPv6\n\tif ipv6 {\n\t\t// Then the url should be of the form [::1]:8091\n\t\ttokens = strings.Split(node, \"]:\")\n\t\thost = strings.Replace(tokens[0], \"[\", \"\", 1)\n\n\t} else {\n\t\t// For IPv4\n\t\ttokens = strings.Split(node, \":\")\n\t\thost = tokens[0]\n\t}\n\n\tif len(tokens) == 2 {\n\t\tport = tokens[1]\n\t} else {\n\t\tport = \"\"\n\t}\n\n\treturn\n}",
"func withoutPort(addr string) string {\n\tif h, _, err := net.SplitHostPort(addr); err == nil {\n\t\treturn h\n\t}\n\treturn addr\n}",
"func hasPort(s string) bool { return strings.LastIndex(s, \":\") > strings.LastIndex(s, \"]\") }",
"func hasPort(s string) bool { return strings.LastIndex(s, \":\") > strings.LastIndex(s, \"]\") }",
"func hasPort(s string) bool { return strings.LastIndex(s, \":\") > strings.LastIndex(s, \"]\") }",
"func HostOnly(addr string) string {\n\thost, _, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\treturn addr\n\t} else {\n\t\treturn host\n\t}\n}",
"func isHostnamePort(fl FieldLevel) bool {\n\tval := fl.Field().String()\n\thost, port, err := net.SplitHostPort(val)\n\tif err != nil {\n\t\treturn false\n\t}\n\t// Port must be a iny <= 65535.\n\tif portNum, err := strconv.ParseInt(\n\t\tport, 10, 32,\n\t); err != nil || portNum > 65535 || portNum < 1 {\n\t\treturn false\n\t}\n\n\t// If host is specified, it should match a DNS name\n\tif host != \"\" {\n\t\treturn hostnameRegexRFC1123.MatchString(host)\n\t}\n\treturn true\n}",
"func (h *HostInfo) ParseHostWithPort(host string, isHTTPS bool) {\n\thasPortFuncByte := func(host string) bool {\n\t\treturn strings.LastIndexByte(host, ':') >\n\t\t\tstrings.LastIndexByte(host, ']')\n\t}\n\tif len(host) == 0 {\n\t\treturn\n\t}\n\n\t// separate domain and port\n\tif !hasPortFuncByte(host) {\n\t\th.domain = host\n\t\tif isHTTPS {\n\t\t\th.port = \"443\"\n\t\t} else {\n\t\t\th.port = \"80\"\n\t\t}\n\t} else {\n\t\tvar err error\n\t\th.domain, h.port, err = net.SplitHostPort(host)\n\t\tif err != nil {\n\t\t\th.reset()\n\t\t\treturn\n\t\t}\n\t}\n\tif len(h.domain) == 0 {\n\t\treturn\n\t}\n\n\t// determine whether the given domain is already an IP Address\n\tip := net.ParseIP(h.domain)\n\tif ip != nil {\n\t\th.ip = ip\n\t}\n\n\t// host and target with port\n\th.hostWithPort = h.domain + \":\" + h.port\n\th.targetWithPort = h.hostWithPort\n}",
"func parseHostPort(param []byte) (ip net.IP, port int) {\n\tvar h1, h2, h3, h4, p1, p2 int\n\tfmt.Fscanf(bytes.NewReader(param), \"%d,%d,%d,%d,%d,%d\", &h1, &h2, &h3, &h4, &p1, &p2)\n\tport = p1<<8 + p2\n\tip = net.IP{byte(h1), byte(h2), byte(h3), byte(h4)}\n\treturn\n}",
"func ParseHost(s string) (*Host, error) {\n\tisValidHost := func(host string) bool {\n\t\tif host == \"\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif ip := net.ParseIP(host); ip != nil {\n\t\t\treturn true\n\t\t}\n\n\t\t// host is not a valid IPv4 or IPv6 address\n\t\t// host may be a hostname\n\t\t// refer https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names\n\t\t// why checks are done like below\n\t\tif len(host) < 1 || len(host) > 253 {\n\t\t\treturn false\n\t\t}\n\n\t\tfor _, label := range strings.Split(host, \".\") {\n\t\t\tif len(label) < 1 || len(label) > 63 {\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\tif !hostLabelRegexp.MatchString(label) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}\n\n\tvar port Port\n\tvar isPortSet bool\n\thost, portStr, err := net.SplitHostPort(s)\n\tif err != nil {\n\t\tif !strings.Contains(err.Error(), \"missing port in address\") {\n\t\t\treturn nil, err\n\t\t}\n\t\thost = s\n\t\tportStr = \"\"\n\t} else {\n\t\tif port, err = ParsePort(portStr); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tisPortSet = true\n\t}\n\n\tif host != \"\" {\n\t\thost, err = trimIPv6(host)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// IPv6 requires a link-local address on every network interface.\n\t// `%interface` should be preserved.\n\ttrimmedHost := host\n\n\tif i := strings.LastIndex(trimmedHost, \"%\"); i > -1 {\n\t\t// `%interface` can be skipped for validity check though.\n\t\ttrimmedHost = trimmedHost[:i]\n\t}\n\n\tif !isValidHost(trimmedHost) {\n\t\treturn nil, errors.New(\"invalid hostname\")\n\t}\n\n\treturn &Host{\n\t\tName: host,\n\t\tPort: port,\n\t\tIsPortSet: isPortSet,\n\t}, nil\n}",
"func SplitHostPortInt32(vip string) (string, int32, error) {\n\tip, portRaw, err := net.SplitHostPort(vip)\n\tif err != nil {\n\t\treturn \"\", 0, err\n\t}\n\tport, err := strconv.ParseInt(portRaw, 10, 32)\n\tif err != nil {\n\t\treturn \"\", 0, err\n\t}\n\treturn ip, int32(port), nil\n}",
"func ServiceAndPort(host string) (string, string) {\n\tsp := strings.Split(host, \":\")\n\tif len(sp) <= 1 {\n\t\treturn host, \"0\"\n\t}\n\tss := strings.Split(sp[0], \".\")\n\tif len(ss) <= 1 {\n\t\treturn sp[0], sp[1]\n\t}\n\treturn ss[0], sp[1]\n}",
"func ListenHostPort(listenAddr string) (string, error) {\n\thp := listenAddr\n\tif strings.HasPrefix(hp, \":\") {\n\t\thp = \"localhost\" + hp\n\t} else if strings.HasPrefix(hp, \"0.0.0.0:\") {\n\t\thp = \"localhost:\" + hp[len(\"0.0.0.0:\"):]\n\t}\n\tif _, _, err := net.SplitHostPort(hp); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn hp, nil\n}",
"func HostWithoutPort(s string) string {\n\tif strings.Contains(s, \":\") {\n\t\treturn strings.Split(s, \":\")[0]\n\t}\n\treturn s\n}",
"func hostname(hostport string) (string, error) {\n\thost, _, err := net.SplitHostPort(hostport)\n\treturn host, err\n}",
"func ValidateNameserverIpAndPort(nameServer string) (string, string, error) {\n\tif ip := net.ParseIP(nameServer); ip != nil {\n\t\treturn ip.String(), \"53\", nil\n\t}\n\n\thost, port, err := net.SplitHostPort(nameServer)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tif ip := net.ParseIP(host); ip == nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"bad IP address: %q\", host)\n\t}\n\tif p, err := strconv.Atoi(port); err != nil || p < 1 || p > 65535 {\n\t\treturn \"\", \"\", fmt.Errorf(\"bad port number: %q\", port)\n\t}\n\treturn host, port, nil\n}",
"func TestIsPort(t *testing.T) {\n\n\tif HasPort(\"blah.not.port:\") {\n\t\tt.Fatal(\"Failed to parse port when : at end\")\n\t}\n\n\tif !HasPort(\"host:1\") {\n\t\tt.Fatal(\"Failed to parse with port = 1\")\n\t}\n\n\tif HasPort(\"https://example.com\") {\n\t\tt.Fatal(\"Failed when scheme is specified\")\n\t}\n}",
"func validatePortHosts(conf *config.Gateway, configuredPort int) (ports, hosts, error) {\n\tportMap := make(ports)\n\thostMap := make(hosts)\n\tisHostsMandatory := len(conf.Server) > 1\n\n\tfor _, srv := range conf.Server {\n\t\tif isHostsMandatory && len(srv.Hosts) == 0 {\n\t\t\treturn nil, nil, fmt.Errorf(\"hosts attribute is mandatory for multiple servers: %q\", srv.Name)\n\t\t}\n\n\t\tsrvPortMap := make(ports)\n\t\tfor _, host := range srv.Hosts {\n\t\t\tif !reValidFormat.MatchString(host) {\n\t\t\t\treturn nil, nil, fmt.Errorf(\"host format is invalid: %q\", host)\n\t\t\t}\n\n\t\t\tho, po, err := splitWildcardHostPort(host, configuredPort)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\n\t\t\tif _, ok := srvPortMap[po]; !ok {\n\t\t\t\tsrvPortMap[po] = make(hosts)\n\t\t\t}\n\n\t\t\tsrvPortMap[po][ho] = true\n\n\t\t\thostMap[fmt.Sprintf(\"%s:%d\", ho, po)] = true\n\t\t}\n\n\t\t// srvPortMap contains all unique host port combinations for\n\t\t// the current server and should not exist multiple times.\n\t\tfor po, ho := range srvPortMap {\n\t\t\tif _, ok := portMap[po]; !ok {\n\t\t\t\tportMap[po] = make(hosts)\n\t\t\t}\n\n\t\t\tfor h := range ho {\n\t\t\t\tif _, ok := portMap[po][h]; ok {\n\t\t\t\t\treturn nil, nil, fmt.Errorf(\"conflict: host %q already defined for port: %d\", h, po)\n\t\t\t\t}\n\n\t\t\t\tportMap[po][h] = true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn portMap, hostMap, nil\n}",
"func hasPort(s string) bool {\n\treturn strings.LastIndex(s, \":\") > strings.LastIndex(s, \"]\")\n}",
"func hasPort(s string) bool {\n\treturn strings.LastIndex(s, \":\") > strings.LastIndex(s, \"]\")\n}",
"func ensureHostport(scheme string, host string) string {\n\tif strings.Index(host, \":\") > 0 {\n\t\treturn host\n\t}\n\tif scheme == \"https\" {\n\t\treturn host + \":443\"\n\t}\n\tif scheme == \"http\" {\n\t\treturn host + \":80\"\n\t}\n\tlog.Println(\"wtt not supported\", scheme, host)\n\treturn host\n}",
"func ParseHost(host string) (string, string, string, error) {\n\tprotoAddrParts := strings.SplitN(host, \"://\", 2)\n\tif len(protoAddrParts) == 1 {\n\t\treturn \"\", \"\", \"\", fmt.Errorf(\"unable to parse docker host `%s`\", host)\n\t}\n\n\tvar basePath string\n\tproto, addr := protoAddrParts[0], protoAddrParts[1]\n\tif proto == \"tcp\" {\n\t\tparsed, err := url.Parse(\"tcp://\" + addr)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", \"\", err\n\t\t}\n\t\taddr = parsed.Host\n\t\tbasePath = parsed.Path\n\t}\n\treturn proto, addr, basePath, nil\n}",
"func parseIPAndPort(input string) (string, int, error) {\n\tseparator := strings.LastIndex(input, \":\")\n\tif separator == -1 {\n\t\treturn \"\", 0, errors.New(\"cannot parse IP and port correctly\")\n\t}\n\tIPStr := input[0:separator]\n\tif IPStr[0] == '[' {\n\t\tIPStr = IPStr[1 : len(IPStr)-1]\n\t}\n\tfor _, prefix := range localIPv4 {\n\t\tif strings.HasPrefix(IPStr, prefix) {\n\t\t\treturn \"\", 0, errors.New(\"ignore this IP address\")\n\t\t}\n\t}\n\toutputIP := net.ParseIP(IPStr)\n\tif outputIP == nil {\n\t\treturn \"\", 0, errors.New(\"invalid IP address\")\n\t}\n\n\tport, err := strconv.Atoi(input[separator+1:])\n\tif err != nil {\n\t\treturn \"\", 0, errors.New(\"invalid IP port\")\n\t}\n\treturn IPStr, port, nil\n}",
"func NormalizeHostPort(inputPort string, addr *net.TCPAddr) string {\n\turlHostPort := addr.String()\n\tif strings.HasPrefix(inputPort, \":\") || !strings.Contains(inputPort, \":\") {\n\t\turlHostPort = fmt.Sprintf(\"localhost:%d\", addr.Port)\n\t}\n\treturn urlHostPort\n}",
"func extractIPAndPortFromAddresses(addresses []string) (string, string) {\n\tfor _, addr := range addresses {\n\t\taddrParts := strings.SplitN(addr, \"://\", 2)\n\t\tif len(addrParts) != 2 {\n\t\t\tlogrus.Errorf(\"invalid listening address %s: must be in format [protocol]://[address]\", addr)\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch addrParts[0] {\n\t\tcase \"tcp\":\n\t\t\thost, port, err := net.SplitHostPort(addrParts[1])\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"failed to split host and port from address: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn host, port\n\t\tcase \"unix\":\n\t\t\tcontinue\n\t\tdefault:\n\t\t\tlogrus.Errorf(\"only unix socket or tcp address is support\")\n\t\t}\n\t}\n\treturn \"\", \"\"\n}",
"func stripPort(hostportURL *url.URL) string {\n\tvar hostport string\n\tif hostportURL.Host != \"\" {\n\t\thostport = hostportURL.Host\n\t} else {\n\t\thostport = hostportURL.String()\n\t}\n\n\tcolon := strings.IndexByte(hostport, ':')\n\tif colon == -1 {\n\t\treturn hostport\n\t}\n\tif i := strings.IndexByte(hostport, ']'); i != -1 {\n\t\treturn strings.TrimPrefix(hostport[:i], \"[\")\n\t}\n\treturn hostport[:colon]\n}",
"func DialString(str string) bool {\n\n\tif h, p, err := net.SplitHostPort(str); err == nil && h != \"\" && p != \"\" && (DNSName(h) || IP(h)) && Port(p) {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func parsehostpath(hostpath string) (host string, path string) {\n\tsplits := strings.Split(hostpath, \"/\")\n\thost = splits[0]\n\tpath = strings.Join(splits[1:len(splits)], \"/\")\n\tpath = \"/\" + path\n\treturn\n}",
"func ExtractPort(host string) int {\n\t_, port, _ := net.SplitHostPort(host)\n\tif port == \"\" {\n\t\treturn 80\n\t}\n\tportInt, _ := strconv.Atoi(port)\n\treturn portInt\n}",
"func checkServerPort(s string) bool {\n\tif _, err := strconv.ParseInt(s, 10, 0); err == nil {\n\t\treturn true\n\t}\n\treturn false\n}",
"func (c *Database) parsePostgreSQLHostPort(info string) (host, port string) {\n\thost, port = \"127.0.0.1\", \"5432\"\n\tif strings.Contains(info, \":\") && !strings.HasSuffix(info, \"]\") {\n\t\tidx := strings.LastIndex(info, \":\")\n\t\thost = info[:idx]\n\t\tport = info[idx+1:]\n\t} else if len(info) > 0 {\n\t\thost = info\n\t}\n\treturn host, port\n}",
"func shouldDiscoverHost(name string) bool {\n\tparts := strings.Split(name, \".\")\n\tif len(parts) == 1 {\n\t\tif parts[0] == \"localhost\" {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n\treturn parts[len(parts)-1] == domain\n}",
"func Host(hostname string) (string, error) {\n\tif hostname == \"\" {\n\t\treturn \"\", trace.BadParameter(\"missing parameter hostname\")\n\t}\n\t// if this is IPv4 or V6, return as is\n\tif ip := net.ParseIP(hostname); len(ip) != 0 {\n\t\treturn hostname, nil\n\t}\n\t// has no indication of port, return, note that\n\t// it will not break ipv6 as it always has at least one colon\n\tif !strings.Contains(hostname, \":\") {\n\t\treturn hostname, nil\n\t}\n\thost, _, err := SplitHostPort(hostname)\n\tif err != nil {\n\t\treturn \"\", trace.Wrap(err)\n\t}\n\treturn host, nil\n}",
"func ParseHost(host string) (string, string, string, error) {\n\tprotoAddrParts := strings.SplitN(host, \"://\", 2)\n\tif len(protoAddrParts) == 1 {\n\t\treturn \"\", \"\", \"\", fmt.Errorf(\"unable to parse storm host `%s`\", host)\n\t}\n\n\tvar basePath string\n\tproto, addr := protoAddrParts[0], protoAddrParts[1]\n\tif proto == \"tcp\" {\n\t\tparsed, err := url.Parse(\"tcp://\" + addr)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", \"\", err\n\t\t}\n\t\taddr = parsed.Host\n\t\tbasePath = parsed.Path\n\t}\n\treturn proto, addr, basePath, nil\n}",
"func OptionallyJoinHostPort(host string, port int) string {\n\tis_ipv6 := strings.IndexByte(host, ':') >= 0\n\thas_port := port > 0\n\tif is_ipv6 {\n\t\thost = \"[\" + host + \"]\"\n\t}\n\tif has_port {\n\t\thost += \":\" + strconv.Itoa(port)\n\t}\n\treturn host\n}",
"func parseSchemeAndPort(services []byte, scheme, port string) (string, string) {\n\tre := regexp.MustCompile(`([a-zA-Z0-9-]+)\\s+(\\d+)`) // for configs/services\n\t// re groups: 0. full match - [ftp 21]\n\t// 1. service - [ftp] 21\n\t// 2. port - ftp [21]\n\n\tif isVar(scheme) && !isVar(port) {\n\t\t// set corresponding port from configs/services\n\t\tscanner := bufio.NewScanner(strings.NewReader(string(services[:])))\n\t\tfor scanner.Scan() {\n\t\t\tmatch := re.FindStringSubmatch(scanner.Text())\n\t\t\tif scheme == match[1] {\n\t\t\t\tport = \"^\" + match[2] + \"$\"\n\t\t\t}\n\t\t}\n\t} else if !isVar(scheme) && !isVar(port) {\n\t\t// set port to 80, 443\n\t\tport = \"^(80|443)$\"\n\t} else if isVar(scheme) && isVar(port) {\n\t\t// set whatever port + service port\n\t\tif scheme == \"http\" {\n\t\t\tport = \"^(80|\" + port + \")$\"\n\t\t} else if scheme == \"https\" {\n\t\t\tport = \"^(443|\" + port + \")$\"\n\t\t} else {\n\t\t\tport = \"^\" + port + \"$\"\n\t\t}\n\t} else if isVar(port) {\n\t\tport = \"^\" + port + \"$\"\n\t}\n\n\t// set \"Any\" when not http(s)\n\tif scheme != \"http\" && scheme != \"https\" {\n\t\tscheme = \"Any\"\n\t}\n\n\treturn scheme, port\n}",
"func LoopbackHostPort(bindAddress string) (string, string, error) {\n\thost, port, err := net.SplitHostPort(bindAddress)\n\tif err != nil {\n\t\t// should never happen\n\t\treturn \"\", \"\", fmt.Errorf(\"invalid server bind address: %q\", bindAddress)\n\t}\n\n\t// Value is expected to be an IP or DNS name, not \"0.0.0.0\".\n\tif host == \"0.0.0.0\" {\n\t\t// compare MaybeDefaultWithSelfSignedCerts which adds \"localhost\" to the cert as alternateDNS\n\t\thost = \"localhost\"\n\t}\n\treturn host, port, nil\n}",
"func ExtractHost(address string) string {\n\thost, _, _ := net.SplitHostPort(address)\n\tif host == \"\" {\n\t\treturn \"localhost\"\n\t}\n\treturn host\n}",
"func HostPort(addr string, port interface{}) string {\n\thost := addr\n\tif strings.Count(addr, \":\") > 0 {\n\t\thost = fmt.Sprintf(\"[%s]\", addr)\n\t}\n\t// TODO check for NATS case\n\tif v, ok := port.(string); ok {\n\t\tif v == \"\" {\n\t\t\treturn fmt.Sprintf(\"%s\", host)\n\t\t}\n\t}\n\treturn fmt.Sprintf(\"%s:%v\", host, port)\n}",
"func parseHostAndPath(s string) (string, string) {\n\ttoks := strings.SplitN(s, \":\", 2)\n\n\tif len(toks) > 1 {\n\t\treturn toks[0], toks[1]\n\t}\n\n\treturn s, \"./\"\n}",
"func splitAddr(v string) (network, addr string, err error) {\n\tep := strings.Split(v, \"://\")\n\tif len(ep) != 2 {\n\t\terr = errInvalidAddress\n\t\treturn network, addr, err\n\t}\n\tnetwork = ep[0]\n\n\ttrans, ok := drivers.get(network)\n\tif !ok {\n\t\terr = fmt.Errorf(\"zmq4: unknown transport %q\", network)\n\t\treturn network, addr, err\n\t}\n\n\taddr, err = trans.Addr(ep[1])\n\treturn network, addr, err\n}",
"func isHost(id string) bool {\n\tif id == \"/\" {\n\t\t// it's a host\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func isValidHostURL(hostURL string) bool {\n\tif strings.TrimSpace(hostURL) == \"\" {\n\t\treturn false\n\t}\n\turl := client.NewURL(hostURL)\n\tif url.Scheme != \"https\" && url.Scheme != \"http\" {\n\t\treturn false\n\t}\n\tif url.Path != \"\" && url.Path != \"/\" {\n\t\treturn false\n\t}\n\treturn true\n}",
"func getServerIPorName(env string) string {\n\n\thostEnv := os.Getenv(env) // SERVER_ADDRESS or SERVICE_ADDRESS\n\n\tif hostEnv == \"\" {\n\t\treturn getPublicIP()\n\t}\n\n\t// \"1.2.3.4\" or \"localhost\"\n\tif !strings.Contains(hostEnv, \":\") {\n\t\treturn hostEnv\n\t}\n\n\tstrs := strings.Split(hostEnv, \":\")\n\tfmt.Println(len(strs))\n\tif strs[0] == \"\" { // \":31024\"\n\t\treturn getPublicIP()\n\t} else { // \"1.2.3.4:31024\" or \"localhost:31024\"\n\t\treturn strs[0]\n\t}\n}",
"func extractHostsFromLabel(label string) []string {\n\tlabel = strings.Replace(label, \"Host:\", \"\", -1)\n\tlabel = strings.Replace(label, \",\", \" \", -1)\n\n\treturn strings.Split(label, \" \")\n}",
"func brokerEntrySeemsValid(broker string) bool {\n\tif !strings.Contains(broker, \":\") {\n\t\treturn false\n\t}\n\n\tparts := strings.Split(broker, \":\")\n\tif len(parts) > 2 {\n\t\treturn false\n\t}\n\n\thost := parts[0]\n\tport := parts[1]\n\n\tif _, err := strconv.ParseUint(port, 10, 16); err != nil {\n\t\treturn false\n\t}\n\n\t// Valid hostnames may contain only the ASCII letters 'a' through 'z' (in a\n\t// case-insensitive manner), the digits '0' through '9', and the hyphen. IP\n\t// v4 addresses are represented in dot-decimal notation, which consists of\n\t// four decimal numbers, each ranging from 0 to 255, separated by dots,\n\t// e.g., 172.16.254.1\n\t// The following regular expression:\n\t// 1. allows just a-z (case-insensitive), 0-9, and the dot and hyphen characters\n\t// 2. does not allow leading trailing dots or hyphens\n\tre, _ := regexp.Compile(\"^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9.-]*[a-zA-Z0-9])$\")\n\tmatched := re.FindString(host)\n\treturn len(matched) == len(host)\n}",
"func getHost(host_url string) (host string, err error) {\n\tu, err := url.Parse(host_url)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn strings.Split(u.Host, \":\")[0], nil\n}",
"func (p *process) HostPort(containerPort int) (int, error) {\n\treturn containerPort, nil\n}",
"func isValidHost(h string) bool {\n\tif strings.Contains(h, \"global-sidecar\") ||\n\t\tstrings.Contains(h, \":\") ||\n\t\tstrings.Contains(h, \"unknown\") {\n\t\treturn false\n\t}\n\treturn true\n}",
"func GetHostPort() (string, string, error) {\n\taddress, err := GetAddress()\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\taddressSlice := strings.Split(address, \":\")\n\n\thost := addressSlice[0]\n\tport := addressSlice[1]\n\treturn host, port, nil\n}",
"func withPort(host string) string {\n\tif _, _, err := net.SplitHostPort(host); err != nil {\n\t\treturn net.JoinHostPort(host, \"443\")\n\t}\n\treturn host\n}",
"func withPort(host string) string {\n\tif _, _, err := net.SplitHostPort(host); err != nil {\n\t\treturn net.JoinHostPort(host, \"443\")\n\t}\n\treturn host\n}",
"func IsHost(str string) bool {\n\treturn IsIP(str) || IsDNSName(str)\n}",
"func addrToHost(addr string) string {\n\treturn strings.Split(addr, \":\")[0]\n}",
"func TestConfig_Parse_Host(t *testing.T) {\n\tif c, err := ParseConfig(`host = \"local\"`); err != nil {\n\t\tt.Fatal(err)\n\t} else if c.Host != \"local\" {\n\t\tt.Fatalf(\"unexpected host: %s\", c.Host)\n\t}\n}",
"func GetHostPort(uri string) (string, int, error) {\n\tconst zeroPort int = 0\n\tu, err := url.Parse(uri)\n\tif err != nil {\n\t\treturn \"\", zeroPort, err\n\t}\n\thost, port, err := net.SplitHostPort(u.Host)\n\tif err != nil {\n\t\treturn \"\", zeroPort, err\n\t}\n\n\tportInt, err := strconv.Atoi(port)\n\tif err != nil {\n\t\tportInt = zeroPort\n\t}\n\treturn host, portInt, nil\n}",
"func JoinHostPort(host, port string) string {\n\t// If host has colons or a percent sign, have to bracket it.\n\tif byteIndex(host, ':') >= 0 || byteIndex(host, '%') >= 0 {\n\t\treturn \"[\" + host + \"]:\" + port\n\t}\n\treturn host + \":\" + port\n}",
"func resolveIdpHostAndPort() (string, string) {\n\tidpHost := os.Getenv(\"IDP_HOST\")\n\tif len(idpHost) == 0 {\n\t\tglog.Error(\"Error: IDP_HOST environment variable is empty\")\n\t\textension.Exit(extension.ErrorExitCode)\n\t}\n\tisPort := os.Getenv(\"IDP_PORT\")\n\tif len(isPort) == 0 {\n\t\tglog.Error(\"Error: IDP_PORT environment variable is empty\")\n\t\textension.Exit(extension.ErrorExitCode)\n\t}\n\treturn idpHost, isPort\n}",
"func stripPort(hostport string) string {\n\tcolon := strings.IndexByte(hostport, ':')\n\tif colon == -1 {\n\t\treturn hostport\n\t}\n\tif i := strings.IndexByte(hostport, ']'); i != -1 {\n\t\treturn strings.TrimPrefix(hostport[:i], \"[\")\n\t}\n\treturn hostport[:colon]\n}",
"func ExtractHostName(urlStr string) (HostNames, error) {\n\thn := &HostNames{\n\t\tURL: \"\",\n\t\tHostName: \"\",\n\t}\n\n\tu, err := url.Parse(urlStr)\n\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn *hn, err\n\t}\n\n\tisSchema, err := IsSchema(urlStr)\n\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn *hn, err\n\t}\n\n\tif u.Hostname() != \"\" && true == isSchema {\n\t\thn.URL = u.Scheme + \"://\" + u.Hostname()\n\t\thn.HostName = u.Hostname()\n\t}\n\n\treturn *hn, nil\n}",
"func fakeHostPorts(fromHost, toHost, fromPort, toPort int) []string {\n\tvar hostports []string\n\tfor h := fromHost; h <= toHost; h++ {\n\t\tfor p := fromPort; p <= toPort; p++ {\n\t\t\thostports = append(hostports, fmt.Sprintf(\"192.0.2.%v:%v\", h, p))\n\t\t}\n\t}\n\treturn hostports\n}",
"func (s *serverCGI) extractHost(method string) (string, string, int) {\n\treg := regexp.MustCompile(\"^\" + method + `/([^\\+]*)(\\+.*)`)\n\tm := reg.FindStringSubmatch(s.path())\n\tif m == nil {\n\t\tlog.Println(\"illegal url\")\n\t\treturn \"\", \"\", 0\n\t}\n\tpath := m[2]\n\thost, portstr, err := net.SplitHostPort(m[1])\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn \"\", \"\", 0\n\t}\n\tport, err := strconv.Atoi(portstr)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn \"\", \"\", 0\n\t}\n\treturn host, path, port\n}",
"func GetHostName(hostAddr string) string {\n\treturn strings.Split(hostAddr, base.UrlPortNumberDelimiter)[0]\n}",
"func getSplitURL(r *http.Request, expectedNrSplits int) ([]string, CustError) {\n\tparts := strings.Split(r.URL.Path, \"/\")\n\n\t// Missing a field/part of URL\n\tif len(parts) != expectedNrSplits {\n\t\treturn nil, CustError{http.StatusBadRequest, errorStr[6]}\n\t}\n\n\t// Nothing bad happened\n\treturn parts, CustError{0, errorStr[0]}\n}",
"func isHost(mode string) bool {\n\treturn mode == \"host\"\n}",
"func validOptionalPort(port string) bool {\n\tif port == \"\" {\n\t\treturn true\n\t}\n\tif port[0] != ':' {\n\t\treturn false\n\t}\n\tfor _, b := range port[1:] {\n\t\tif b < '0' || b > '9' {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func ValidateHost(s string) (bool, error) {\n\thost := net.ParseIP(s)\n\tif host != nil {\n\t\treturn true, nil\n\t}\n\thostname, _ := net.LookupHost(s)\n\tif len(hostname) > 0 {\n\t\treturn true, nil\n\t}\n\treturn false, fmt.Errorf(\"'%s' does not seem to be a valid IP or Hostname\", s)\n}",
"func splitDockerDomain(name string) (domain, remainder string) {\n\ti := strings.IndexRune(name, '/')\n\tif i == -1 || (!strings.ContainsAny(name[:i], \".:\") && name[:i] != \"localhost\") {\n\t\tdomain, remainder = defaultDomain, name\n\t} else {\n\t\tdomain, remainder = name[:i], name[i+1:]\n\t}\n\tif domain == legacyDefaultDomain {\n\t\tdomain = defaultDomain\n\t}\n\tif domain == defaultDomain && !strings.ContainsRune(remainder, '/') {\n\t\tremainder = officialRepoName + \"/\" + remainder\n\t}\n\treturn\n}",
"func splitDockerDomain(name string) (domain, remainder string) {\n\ti := strings.IndexRune(name, '/')\n\tif i == -1 || (!strings.ContainsAny(name[:i], \".:\") && name[:i] != \"localhost\") {\n\t\tdomain, remainder = defaultDomain, name\n\t} else {\n\t\tdomain, remainder = name[:i], name[i+1:]\n\t}\n\tif domain == legacyDefaultDomain {\n\t\tdomain = defaultDomain\n\t}\n\tif domain == defaultDomain && !strings.ContainsRune(remainder, '/') {\n\t\tremainder = officialRepoName + \"/\" + remainder\n\t}\n\treturn\n}",
"func parseAddress(address string) (scheme, host, port string, err error) {\n\tif address == \"\" {\n\t\treturn\n\t}\n\tif strings.Contains(address, \"://\") {\n\t\turl, err := url.Parse(address)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", \"\", err\n\t\t}\n\t\tscheme, address = url.Scheme, url.Host\n\t}\n\tif strings.Contains(address, \":\") {\n\t\thost, port, err = net.SplitHostPort(address)\n\t\tif err != nil {\n\t\t\thost = address\n\t\t\terr = nil\n\t\t}\n\t} else {\n\t\thost = address\n\t}\n\tif port == \"\" {\n\t\tswitch scheme {\n\t\tcase \"http\", \"ws\":\n\t\t\tport = \"80\"\n\t\tcase \"https\", \"wss\":\n\t\t\tport = \"443\"\n\t\t}\n\t}\n\treturn\n}",
"func Hostname() (string, error)",
"func getHost(r *http.Request) string {\n\tif r.URL.IsAbs() {\n\t\thost := r.Host\n\t\t// Slice off any port information.\n\t\tif i := strings.Index(host, \":\"); i != -1 {\n\t\t\thost = host[:i]\n\t\t}\n\t\treturn host\n\t}\n\treturn r.URL.Host\n}",
"func JoinHostPort(host, port cue.Value) (string, error) {\n\tvar err error\n\thostStr := \"\"\n\tswitch host.Kind() {\n\tcase cue.ListKind:\n\t\tipdata := netGetIP(host)\n\t\tif len(ipdata) != 4 && len(ipdata) != 16 {\n\t\t\terr = fmt.Errorf(\"invalid host %s\", host)\n\t\t}\n\t\thostStr = ipdata.String()\n\tcase cue.BytesKind:\n\t\tvar b []byte\n\t\tb, err = host.Bytes()\n\t\thostStr = string(b)\n\tdefault:\n\t\thostStr, err = host.String()\n\t}\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tportStr := \"\"\n\tswitch port.Kind() {\n\tcase cue.StringKind:\n\t\tportStr, err = port.String()\n\tcase cue.BytesKind:\n\t\tvar b []byte\n\t\tb, err = port.Bytes()\n\t\tportStr = string(b)\n\tdefault:\n\t\tvar i int64\n\t\ti, err = port.Int64()\n\t\tportStr = strconv.Itoa(int(i))\n\t}\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn net.JoinHostPort(hostStr, portStr), nil\n}",
"func cleanHost(in string) string {\n\tif i := strings.IndexAny(in, \" /\"); i != -1 {\n\t\treturn in[:i]\n\t}\n\treturn in\n}",
"func RemovePortFromHost(host string) string {\n\tif !containIPv6Addr(host) {\n\t\treturn strings.Split(host, \":\")[0]\n\t}\n\tif containPortIPv6(host) {\n\t\thost = host[:strings.LastIndexByte(host, ':')]\n\t}\n\treturn strings.Trim(host, \"[]\")\n}",
"func LooselyGetHost(hostport string) string {\n\thoststart, hostend := 0, 0\n\tif len(hostport) >= 1 && hostport[0] == '[' {\n\t\thoststart = 1\n\t\thostend = strings.IndexByte(hostport, ']')\n\t} else {\n\t\thostend = strings.IndexByte(hostport, ':')\n\t}\n\tif hostend < 0 {\n\t\thostend = len(hostport)\n\t}\n\treturn hostport[hoststart:hostend]\n}"
] | [
"0.7563116",
"0.74220127",
"0.73175776",
"0.72000736",
"0.717764",
"0.70948535",
"0.68692344",
"0.6828243",
"0.6814598",
"0.66328686",
"0.66094387",
"0.6568446",
"0.6309145",
"0.62275636",
"0.62050116",
"0.6202007",
"0.6161781",
"0.6160127",
"0.61394835",
"0.61028975",
"0.6073383",
"0.6072585",
"0.6062605",
"0.6047155",
"0.60305023",
"0.60069615",
"0.5955481",
"0.5955481",
"0.5955481",
"0.58925855",
"0.58822894",
"0.5881661",
"0.58410674",
"0.58287895",
"0.58242476",
"0.5775377",
"0.57691866",
"0.573835",
"0.56891465",
"0.56796306",
"0.5679189",
"0.5645065",
"0.562279",
"0.562279",
"0.5612393",
"0.55672324",
"0.5484635",
"0.54668224",
"0.544885",
"0.54466796",
"0.54421026",
"0.54246837",
"0.5412363",
"0.54029703",
"0.5371724",
"0.5370882",
"0.53443485",
"0.5338707",
"0.53359133",
"0.5323588",
"0.5316517",
"0.53130084",
"0.53116894",
"0.5296496",
"0.5295246",
"0.52807516",
"0.5273332",
"0.5248248",
"0.5229894",
"0.52201754",
"0.5214348",
"0.5211086",
"0.52040535",
"0.51836425",
"0.51728725",
"0.51728725",
"0.5164821",
"0.5164495",
"0.5163796",
"0.5158458",
"0.51420045",
"0.5136638",
"0.5133657",
"0.5112636",
"0.51067346",
"0.5103487",
"0.5102199",
"0.50962895",
"0.50953543",
"0.50944597",
"0.5082951",
"0.5080084",
"0.5080084",
"0.5075617",
"0.50704014",
"0.50539875",
"0.5029774",
"0.50226986",
"0.5022636",
"0.50005955"
] | 0.71889913 | 4 |
IsValidHostname checks if a string represents a valid hostname. | func IsValidHostname(hostname string) bool {
for _, label := range strings.Split(hostname, ".") {
if len(validation.IsDNS1035Label(label)) > 0 {
return false
}
}
return true
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (p *Parser) validateHostname(host string) bool {\n\t// Lazy regexp init\n\tif p.hostValidate == nil {\n\t\tconst r string = `(?i)^((-?)(xn--|_)?[a-z0-9-_]{0,61}[a-z0-9-_]\\.)*(xn--)?([a-z0-9][a-z0-9\\-]{0,60}|[a-z0-9-]{1,30}\\.[a-z]{2,})$`\n\t\t// @link: https://stackoverflow.com/a/26987741\n\t\tp.hostValidate = regexp.MustCompile(r)\n\t}\n\n\treturn p.hostValidate.Match([]byte(host))\n}",
"func IsValidHostname(hostname string, fqdn bool) bool {\n\tif !fqdn {\n\t\treturn validHostNameRegexp.Match([]byte(hostname)) || validIPv6Regexp.Match([]byte(hostname))\n\t} else {\n\t\treturn validFQDNRegexp.Match([]byte(hostname))\n\t}\n}",
"func ValidateHostname(hostname string) error {\n\tif len(hostname) > 255 {\n\t\treturn fmt.Errorf(\"length exceeds 255 bytes\")\n\t}\n\tif !hostnameRegexp.MatchString(hostname) {\n\t\treturn fmt.Errorf(\"hostname does not match regex %q\", hostnameRegexp)\n\t}\n\tfor _, s := range strings.Split(hostname, \".\") {\n\t\tif len(s) > 63 {\n\t\t\treturn fmt.Errorf(\"segment %q exceeds 63 bytes\", s)\n\t\t}\n\t}\n\treturn nil\n}",
"func IsHostname(toCheckHostname string) bool {\n\treturn RegexDomain.MatchString(toCheckHostname)\n}",
"func ValidateHost(s string) (bool, error) {\n\thost := net.ParseIP(s)\n\tif host != nil {\n\t\treturn true, nil\n\t}\n\thostname, _ := net.LookupHost(s)\n\tif len(hostname) > 0 {\n\t\treturn true, nil\n\t}\n\treturn false, fmt.Errorf(\"'%s' does not seem to be a valid IP or Hostname\", s)\n}",
"func IsHostname(str string) bool {\n\tif !rxHostname.MatchString(str) {\n\t\treturn false\n\t}\n\n\t// the sum of all label octets and label lengths is limited to 255.\n\tif len(str) > 255 {\n\t\treturn false\n\t}\n\n\t// Each node has a label, which is zero to 63 octets in length\n\tparts := strings.Split(str, \".\")\n\tvalid := true\n\tfor _, p := range parts {\n\t\tif len(p) > 63 {\n\t\t\tvalid = false\n\t\t}\n\t}\n\treturn valid\n}",
"func (p *Parser) validateHostname(hostname string) (string, error) {\n\tif len(hostname) == 0 {\n\t\tif p.conf.Hostname != \"\" {\n\t\t\treturn p.conf.Hostname, nil\n\t\t}\n\n\t\treturn \"\", fmt.Errorf(\"hostname is a required field\")\n\t}\n\n\tu, err := url.ParseRequestURI(hostname)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"malformed hostname provided\")\n\t}\n\n\t// TODO: not sure how much validation we want to do - this currently will allow schemes\n\t// that are not http or https\n\n\t// Reformat the url to ensure there is not extra text like a trailing slash.\n\t// NOTE: the prevents people from doing things like http://localhost:3000/v1\n\t// Not sure if we want to allow that or not.\n\treturn fmt.Sprintf(\"%v://%v\", u.Scheme, u.Host), nil\n}",
"func IsIPHostname(input string) bool {\n\tvalidIP := net.ParseIP(input)\n\tif validIP == nil {\n\t\tmatched, err := regexp.MatchString(HostnameRegex, input)\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\n\t\treturn matched\n\t}\n\n\treturn true\n}",
"func IsHost(str string) bool {\n\treturn IsIP(str) || IsDNSName(str)\n}",
"func isValidDomainName(d string) bool {\n\t// Reject localhost.localdomain\n\tif d == \"localhost.localdomain\" {\n\t\treturn false\n\t}\n\n\t// Run the dns package's check (extremely liberal)\n\tif _, ok := dns.IsDomainName(d); !ok {\n\t\treturn false\n\t}\n\n\t// Check for a valid length\n\tif len(d) < 4 || len(d) > 255 {\n\t\treturn false\n\t}\n\n\t// Check that it has a dot, and the dot is reasonably positioned\n\tif i := strings.IndexByte(d, '.'); i < 1 || i >= len(d)-2 {\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func ValidateHost(host string, allowNonCompliant string, hostPath *field.Path) field.ErrorList {\n\tresult := field.ErrorList{}\n\n\tif allowNonCompliant == \"true\" {\n\t\terrs := kvalidation.IsDNS1123Subdomain(host)\n\t\tif len(errs) != 0 {\n\t\t\tresult = append(result, field.Invalid(hostPath, host, fmt.Sprintf(\"host must conform to DNS naming conventions: %v\", errs)))\n\t\t}\n\t} else {\n\t\terrs := kvalidation.IsFullyQualifiedDomainName(hostPath, host)\n\t\tif len(errs) != 0 {\n\t\t\tresult = append(result, field.Invalid(hostPath, host, fmt.Sprintf(\"host must conform to DNS 1123 naming conventions: %v\", errs)))\n\t\t}\n\t}\n\treturn result\n}",
"func isValidHost(targetUrl string) bool {\n\tif !stringsutil.HasPrefixAny(targetUrl, \"http:\", \"https:\") {\n\t\treturn true\n\t}\n\tif networkPolicy == nil {\n\t\treturn true\n\t}\n\turlx, err := urlutil.Parse(targetUrl)\n\tif err != nil {\n\t\t// not a valid url\n\t\treturn false\n\t}\n\ttargetUrl = urlx.Hostname()\n\t_, ok := networkPolicy.ValidateHost(targetUrl)\n\treturn ok\n}",
"func (o *V0037Node) SetHostname(v string) {\n\to.Hostname = &v\n}",
"func (r *Route) IsHostnameMatching(h string) bool {\n\treturn r.hostMatch.MatchString(h)\n}",
"func (o *NSQProducer) SetHostname(v string) {\n\to.Hostname = &v\n}",
"func (o *SmartstackBackend) SetHostname(v string) {\n\to.Hostname = &v\n}",
"func ParseHost(s string) (*Host, error) {\n\tisValidHost := func(host string) bool {\n\t\tif host == \"\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif ip := net.ParseIP(host); ip != nil {\n\t\t\treturn true\n\t\t}\n\n\t\t// host is not a valid IPv4 or IPv6 address\n\t\t// host may be a hostname\n\t\t// refer https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names\n\t\t// why checks are done like below\n\t\tif len(host) < 1 || len(host) > 253 {\n\t\t\treturn false\n\t\t}\n\n\t\tfor _, label := range strings.Split(host, \".\") {\n\t\t\tif len(label) < 1 || len(label) > 63 {\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\tif !hostLabelRegexp.MatchString(label) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}\n\n\tvar port Port\n\tvar isPortSet bool\n\thost, portStr, err := net.SplitHostPort(s)\n\tif err != nil {\n\t\tif !strings.Contains(err.Error(), \"missing port in address\") {\n\t\t\treturn nil, err\n\t\t}\n\t\thost = s\n\t\tportStr = \"\"\n\t} else {\n\t\tif port, err = ParsePort(portStr); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tisPortSet = true\n\t}\n\n\tif host != \"\" {\n\t\thost, err = trimIPv6(host)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// IPv6 requires a link-local address on every network interface.\n\t// `%interface` should be preserved.\n\ttrimmedHost := host\n\n\tif i := strings.LastIndex(trimmedHost, \"%\"); i > -1 {\n\t\t// `%interface` can be skipped for validity check though.\n\t\ttrimmedHost = trimmedHost[:i]\n\t}\n\n\tif !isValidHost(trimmedHost) {\n\t\treturn nil, errors.New(\"invalid hostname\")\n\t}\n\n\treturn &Host{\n\t\tName: host,\n\t\tPort: port,\n\t\tIsPortSet: isPortSet,\n\t}, nil\n}",
"func parseHostname(hostname string) (string, error) {\n\t// TODO does the hostname even need to be parsed?\n\treturn hostname, nil\n}",
"func (s *VcenterClient) SetHostname(v string) *VcenterClient {\n\ts.Hostname = &v\n\treturn s\n}",
"func isValidHostURL(hostURL string) bool {\n\tif strings.TrimSpace(hostURL) == \"\" {\n\t\treturn false\n\t}\n\turl := client.NewURL(hostURL)\n\tif url.Scheme != \"https\" && url.Scheme != \"http\" {\n\t\treturn false\n\t}\n\tif url.Path != \"\" && url.Path != \"/\" {\n\t\treturn false\n\t}\n\treturn true\n}",
"func (o *V0037Node) HasHostname() bool {\n\tif o != nil && o.Hostname != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (o *EventAttributes) SetHostname(v string) {\n\to.Hostname = &v\n}",
"func domainCheck(host string) bool {\n\tcheck, err := regexp.MatchString(\".[a-z]+$\", host)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn check\n}",
"func (s *RequestDomainControllersStruct) SetHostname(v string) *RequestDomainControllersStruct {\n\ts.Hostname = &v\n\treturn s\n}",
"func (o *ApplianceDeviceClaimAllOf) SetHostname(v string) {\n\to.Hostname = &v\n}",
"func IsDomainName(s string) bool {\n\t// The root domain name is valid. See golang.org/issue/45715.\n\tif s == \".\" {\n\t\treturn true\n\t}\n\n\t// See RFC 1035, RFC 3696.\n\t// Presentation format has dots before every label except the first, and the\n\t// terminal empty label is optional here because we assume fully-qualified\n\t// (absolute) input. We must therefore reserve space for the first and last\n\t// labels' length octets in wire format, where they are necessary and the\n\t// maximum total length is 255.\n\t// So our _effective_ maximum is 253, but 254 is not rejected if the last\n\t// character is a dot.\n\tl := len(s)\n\tif l == 0 || l > 254 || l == 254 && s[l-1] != '.' {\n\t\treturn false\n\t}\n\n\tlast := byte('.')\n\tnonNumeric := false // true once we've seen a letter or hyphen\n\tpartlen := 0\n\tfor i := 0; i < len(s); i++ {\n\t\tc := s[i]\n\t\tswitch {\n\t\tdefault:\n\t\t\treturn false\n\t\tcase 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || c == '_':\n\t\t\tnonNumeric = true\n\t\t\tpartlen++\n\t\tcase '0' <= c && c <= '9':\n\t\t\t// fine\n\t\t\tpartlen++\n\t\tcase c == '-':\n\t\t\t// Byte before dash cannot be dot.\n\t\t\tif last == '.' {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tpartlen++\n\t\t\tnonNumeric = true\n\t\tcase c == '.':\n\t\t\t// Byte before dot cannot be dot, dash.\n\t\t\tif last == '.' || last == '-' {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif partlen > 63 || partlen == 0 {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tpartlen = 0\n\t\t}\n\t\tlast = c\n\t}\n\tif last == '-' || partlen > 63 {\n\t\treturn false\n\t}\n\n\treturn nonNumeric\n}",
"func (s *ContainerDefinition) SetHostname(v string) *ContainerDefinition {\n\ts.Hostname = &v\n\treturn s\n}",
"func (s *HostEntry) SetHostname(v string) *HostEntry {\n\ts.Hostname = &v\n\treturn s\n}",
"func (o *ShowSystem) SetHostname(v string) {\n\to.Hostname = &v\n}",
"func ValidateDomainName(name string) error {\n\tif len(name) > maxHostNameLen {\n\t\treturn errors.New(\"validate domain name length failed\")\n\t}\n\treturn ValidateRegexp(name, DomainPattern, \"validate domain name failed\")\n}",
"func ValidName(str string) bool {\n\tvar nameRegex = regexp.MustCompile(`^[a-zA-Z0-9\\-._]{0,80}$`)\n\treturn nameRegex.MatchString(str)\n}",
"func (s *IdentificationHints) SetHostname(v string) *IdentificationHints {\n\ts.Hostname = &v\n\treturn s\n}",
"func (o *NodeUpdate) SetHostname(v string) {\n\to.Hostname = v\n}",
"func (s *DomainController) SetHostname(v string) *DomainController {\n\ts.Hostname = &v\n\treturn s\n}",
"func isValidHost(h string) bool {\n\tif strings.Contains(h, \"global-sidecar\") ||\n\t\tstrings.Contains(h, \":\") ||\n\t\tstrings.Contains(h, \"unknown\") {\n\t\treturn false\n\t}\n\treturn true\n}",
"func (_this *URL) SetHostname(value string) {\n\tinput := value\n\t_this.Value_JS.Set(\"hostname\", input)\n}",
"func isHostnamePort(fl FieldLevel) bool {\n\tval := fl.Field().String()\n\thost, port, err := net.SplitHostPort(val)\n\tif err != nil {\n\t\treturn false\n\t}\n\t// Port must be a iny <= 65535.\n\tif portNum, err := strconv.ParseInt(\n\t\tport, 10, 32,\n\t); err != nil || portNum > 65535 || portNum < 1 {\n\t\treturn false\n\t}\n\n\t// If host is specified, it should match a DNS name\n\tif host != \"\" {\n\t\treturn hostnameRegexRFC1123.MatchString(host)\n\t}\n\treturn true\n}",
"func validDNSLabel(s string) bool {\n\tif len(s) > 63 {\n\t\treturn false\n\t}\n\n\tfor i, r := range s {\n\t\tif i == 0 || i == len(s)-1 {\n\t\t\tif (r < 'a' || r > 'z') && (r < '0' || r > '9') {\n\t\t\t\treturn false\n\t\t\t}\n\t\t} else {\n\t\t\tif (r < 'a' || r > 'z') && (r < '0' || r > '9') && (r != '-') {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\n\treturn true\n}",
"func ConnVerifyHostname(c *tls.Conn, host string) error",
"func SetHostname(h string) {\n\thostname = h\n}",
"func (d *Docker) checkHostname(env []string) bool {\n\tfor _, e := range env {\n\t\tps := strings.SplitN(e, \"=\", 2)\n\t\tif len(ps) != 2 {\n\t\t\tcontinue\n\t\t}\n\t\tif ps[0] == \"ERU_NODE_NAME\" && ps[1] == d.config.HostName {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (m *socatManager) ValidHost(host string) bool {\n\t_, err := getSource(host)\n\treturn err == nil\n}",
"func (o *SmartstackBackend) HasHostname() bool {\n\tif o != nil && o.Hostname != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func IsValidUsername(s string) bool {\n\tif len(s) < UsernameMinLength || len(s) > UsernameMaxLength {\n\t\treturn false\n\t}\n\tif !regexp.MustCompile(`^[\\p{L}0-9\\.\\-_]+$`).MatchString(s) {\n\t\treturn false\n\t}\n\treturn true\n}",
"func Host(hostname string) (string, error) {\n\tif hostname == \"\" {\n\t\treturn \"\", trace.BadParameter(\"missing parameter hostname\")\n\t}\n\t// if this is IPv4 or V6, return as is\n\tif ip := net.ParseIP(hostname); len(ip) != 0 {\n\t\treturn hostname, nil\n\t}\n\t// has no indication of port, return, note that\n\t// it will not break ipv6 as it always has at least one colon\n\tif !strings.Contains(hostname, \":\") {\n\t\treturn hostname, nil\n\t}\n\thost, _, err := SplitHostPort(hostname)\n\tif err != nil {\n\t\treturn \"\", trace.Wrap(err)\n\t}\n\treturn host, nil\n}",
"func isDomainName(s string) bool {\n\t// See RFC 1035, RFC 3696.\n\t// Presentation format has dots before every label except the first, and the\n\t// terminal empty label is optional here because we assume fully-qualified\n\t// (absolute) input. We must therefore reserve space for the first and last\n\t// labels' length octets in wire format, where they are necessary and the\n\t// maximum total length is 255.\n\t// So our _effective_ maximum is 253, but 254 is not rejected if the last\n\t// character is a dot.\n\tl := len(s)\n\tif l == 0 || l > 254 || l == 254 && s[l-1] != '.' {\n\t\treturn false\n\t}\n\n\tlast := byte('.')\n\tnonNumeric := false // true once we've seen a letter or hyphen\n\tpartlen := 0\n\tfor i := 0; i < len(s); i++ {\n\t\tc := s[i]\n\t\tswitch {\n\t\tdefault:\n\t\t\treturn false\n\t\tcase 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || c == '_':\n\t\t\tnonNumeric = true\n\t\t\tpartlen++\n\t\tcase '0' <= c && c <= '9':\n\t\t\t// fine\n\t\t\tpartlen++\n\t\tcase c == '-':\n\t\t\t// Byte before dash cannot be dot.\n\t\t\tif last == '.' {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tpartlen++\n\t\t\tnonNumeric = true\n\t\tcase c == '.':\n\t\t\t// Byte before dot cannot be dot, dash.\n\t\t\tif last == '.' || last == '-' {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif partlen > 63 || partlen == 0 {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tpartlen = 0\n\t\t}\n\t\tlast = c\n\t}\n\tif last == '-' || partlen > 63 {\n\t\treturn false\n\t}\n\n\treturn nonNumeric\n}",
"func (c *Conn) VerifyHostname(host string) error {\n\tcert, err := c.PeerCertificate()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn cert.VerifyHostname(host)\n}",
"func (m *SiteCollection) SetHostname(value *string)() {\n m.hostname = value\n}",
"func (h *Hostname) DeepCopy() *Hostname {\n\tif h == nil {\n\t\treturn nil\n\t}\n\tout := new(Hostname)\n\th.DeepCopyInto(out)\n\treturn out\n}",
"func IsPlainHostName(host string) bool {\n\treturn strings.Index(host, \".\") == -1\n}",
"func IsValidUrl(str string) bool {\n\tu, err := url.Parse(str)\n\treturn err == nil && u.Scheme != \"\" && u.Host != \"\"\n}",
"func (o *ShowSystem) HasHostname() bool {\n\tif o != nil && !IsNil(o.Hostname) {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (f *FastURL) SetHostname(hostname string) {\n\tf.hostname = append(f.hostname[:0], hostname...)\n}",
"func hostnameMatchesWildcardHostname(hostname, wildcardHostname string) bool {\n\tif !strings.HasSuffix(hostname, strings.TrimPrefix(wildcardHostname, allHosts)) {\n\t\treturn false\n\t}\n\n\twildcardMatch := strings.TrimSuffix(hostname, strings.TrimPrefix(wildcardHostname, allHosts))\n\treturn len(wildcardMatch) > 0\n}",
"func HasHost(hostname string) bool {\n\t_, exists := GetHost(strings.ToLower(hostname))\n\treturn exists\n}",
"func IsValidResourceName(name string) error {\n\tif len(name) <= 2 {\n\t\treturn fmt.Errorf(\"name is too short. Must be at least 3 characters long\")\n\t}\n\tif !govalidator.Matches(name, \"^[a-z0-9][a-zA-Z0-9_-]+$\") {\n\t\tif name != \"\" && (name[0] == '_' || name[0] == '-') {\n\t\t\treturn fmt.Errorf(\"invalid identifier; identifier cannot start with _ or - character\")\n\t\t}\n\t\treturn fmt.Errorf(\"invalid identifier; only alphanumeric, _, and - characters are allowed\")\n\t}\n\tif len(name) > MaxResourceNameLength {\n\t\treturn fmt.Errorf(\"name is too long. Maximum character length is %d\", MaxResourceNameLength)\n\t}\n\treturn nil\n}",
"func IsValidName(s string) bool {\n\tregex := regexp.MustCompile(`^[0-9A-Za-z_]*$`)\n\treturn regex.MatchString(s)\n}",
"func validDelegateServerName(n string) bool {\n\tif n == \"\" {\n\t\treturn false\n\t}\n\tif !strings.Contains(n, \".\") {\n\t\treturn false\n\t}\n\tif strings.Contains(n, \":\") {\n\t\t// Contains port or is IPv6 literal.\n\t\treturn false\n\t}\n\tif net.ParseIP(n) != nil {\n\t\t// No IPs.\n\t\treturn false\n\t}\n\tif \"x://\"+n != (&url.URL{Scheme: \"x\", Host: n}).String() {\n\t\t// name must have contained invalid characters and caused escaping.\n\t\treturn false\n\t}\n\treturn true\n}",
"func (o *ServiceBrokerOpenstacksHostsGetParams) SetHostname(hostname string) {\n\to.Hostname = hostname\n}",
"func isValidURL(str string) bool {\n\tu, err := url.Parse(str)\n\treturn err == nil && u.Scheme != \"\" && u.Host != \"\"\n}",
"func ValidateHost(host string) (err error) {\n\tverResp, err := http.Get(host + \"/api/server/version\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif verResp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"❌ Invalid host, unable to obtain server version, status code %d\", verResp.StatusCode)\n\t}\n\treturn nil\n}",
"func IsFQDN(fqdn, hostname string) bool {\n\treturn len(fqdn) > len(hostname) && strings.HasPrefix(fqdn, hostname)\n}",
"func IsValidUsername(s string) bool {\n\tif s == \"\" {\n\t\treturn false\n\t}\n\n\tif len(s) > MaxUsernameLen {\n\t\treturn false\n\t}\n\n\tfor i, r := range s {\n\t\tif r >= 'a' && r <= 'z' {\n\t\t\tcontinue\n\t\t}\n\n\t\tif i > 0 && r >= '0' && r <= '9' {\n\t\t\tcontinue\n\t\t}\n\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func (o *NSQProducer) HasHostname() bool {\n\tif o != nil && o.Hostname != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func validateEmailHost(address string) int {\n\tif(!validateEmailAddress(address)){}\n\t\treturn 0\n\t}",
"func matchHostname(image, hostname string) bool {\n\tref, err := reference.ParseNamed(image)\n\tif err != nil {\n\t\treturn false\n\t}\n\thName, _ := reference.SplitHostname(ref)\n\treturn hName == hostname\n}",
"func IsDefaultHostname(hostname string) bool {\n\treturn isDefaultHostname(hostname, config.Datadog.GetBool(\"ec2_use_windows_prefix_detection\"))\n}",
"func (h *handler) ValidateDNSName(clusterName, baseDNSDomainName string) error {\n\tappsDomainNameSuffix := fmt.Sprintf(appsDomainNameFormat, clusterName, baseDNSDomainName)\n\tapiErrorCode, err := validations.ValidateDomainNameFormat(baseDNSDomainName)\n\tif err != nil {\n\t\treturn common.NewApiError(apiErrorCode, err)\n\t}\n\tif len(appsDomainNameSuffix) > dnsDomainPrefixMaxLen {\n\t\treturn errors.Errorf(\"Combination of cluster name and base DNS domain too long\")\n\t}\n\tfor _, label := range strings.Split(appsDomainNameSuffix, \".\") {\n\t\tif len(label) > dnsDomainLabelLen {\n\t\t\treturn errors.Errorf(\"DNS label '%s' is longer than 63 bytes\", label)\n\t\t}\n\t}\n\treturn nil\n}",
"func NewAcceptInvalidHostname(CABundleFile, CABundleDir string, httpTimeout time.Duration, hostname string) (*http.Client, error) {\n\treturn _new(CABundleFile, CABundleDir, httpTimeout, hostname)\n}",
"func (o *EventAttributes) HasHostname() bool {\n\treturn o != nil && o.Hostname != nil\n}",
"func IsValidUsername(username string) bool {\n\treturn usernameRegex.MatchString(username)\n}",
"func (c *hostNameFormatConfig) IsValid(name string) bool {\n\tfor _, validator := range c.validators {\n\t\tif !validator.IsValid(name) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func IsDNSName(str string) bool {\n\tif str == \"\" || len(strings.Replace(str, \".\", \"\", -1)) > 255 {\n\t\t// constraints already violated\n\t\treturn false\n\t}\n\treturn !IsIP(str) && rxDNSName.MatchString(str)\n}",
"func IsValidEmailAddress(str string) bool {\n\t// non-ascii\n\treNotASCII := regexp.MustCompile(`[^a-z0-9\\@\\.\\-\\+\\_]`)\n\n\treturn !reNotASCII.MatchString(str)\n}",
"func ValidateHost(host string, config *Config) error {\n\t_, ok := config.Topology[host]\n\tif !ok {\n\t\treturn fmt.Errorf(\"unable to find info for host %q in config file\", host)\n\t}\n\treturn nil\n}",
"func validRemoteName(name string) bool {\n\tmatcher := regexp.MustCompile(\"^[[:alpha:]]+$\")\n\tif !matcher.MatchString(name) {\n\t\tlog.Printf(\"%s is not a valid name for a remote!\")\n\t\treturn false\n\t}\n\treturn true\n}",
"func IsValidUsername(username string) bool {\n\treturn usernamePattern.MatchString(username)\n}",
"func (o *ApplianceDeviceClaimAllOf) HasHostname() bool {\n\tif o != nil && o.Hostname != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (h *Handlers) ValidateURL(input string) bool {\n\tu, err := url.Parse(input)\n\n\tfmt.Println(err, u.Scheme, u.Host)\n\tif err != nil || u.Scheme == \"\" || !strings.Contains(u.Host, \".\") {\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func (h *FriendlyHost) Valid() bool {\n\treturn svchost.IsValid(h.Raw)\n}",
"func isLocalhost(hostname string) bool {\n\treturn hostname == \"localhost\" ||\n\t\thostname == \"127.0.0.1\" ||\n\t\thostname == \"::1\"\n}",
"func (d *Domain) Validate(v validate.Validator) validate.Error {\n\t//func IsDomain(p []byte) (res validate.Result) {\n\t// Domain rules:\n\t// - 255 character total length max\n\t// - 63 character label max\n\t// - 127 sub-domains\n\t// - Characters a-z, A-Z, 0-9, and -\n\t// - Labels may not start or end with -\n\t// - TLD may not be all numeric\n\n\t// Check for max length.\n\t// NOTE: Invalid unicode will count as a 1 byte rune, but we'll catch that\n\t// later.\n\n\tp := d.domain\n\t// If a max length was specified, use it\n\tif d.checks[\"maxlength\"] != nil &&\n\t\tutf8.RuneCount(p) > d.checks[\"maxlength\"].(int) {\n\t\treturn ErrDomainLength\n\n\t} else if utf8.RuneCount(p) > 255 {\n\t\treturn ErrDomainLength\n\t}\n\n\t// First we split by label\n\tdomain := bytes.Split(p, []byte(\".\"))\n\t// 127 sub-domains max (not including TLD)\n\tif len(domain) > 128 {\n\t\treturn ErrDomainLength\n\t}\n\n\tif d.checks[\"minsubs\"] != nil && len(domain) < d.checks[\"minsubs\"].(int)+1 {\n\t\treturn ErrDomainLength\n\t}\n\tif d.checks[\"maxsubs\"] != nil && len(domain) > d.checks[\"maxsubs\"].(int)+1 {\n\t\treturn ErrDomainLength\n\t}\n\t// Check each domain for valid characters\n\tfor _, subDomain := range domain {\n\t\tlength := len(subDomain)\n\t\t// Check for a domain with two periods next to eachother.\n\t\tif length < 1 {\n\t\t\treturn ErrFormatting\n\t\t}\n\n\t\t// Check 63 character max.\n\t\tif length > 63 {\n\t\t\treturn ErrDomainLength\n\t\t}\n\n\t\t// Check that label doesn't start or end with hyphen.\n\t\tr, size := utf8.DecodeRune(subDomain)\n\t\tif r == utf8.RuneError && size == 1 {\n\t\t\t// Invalid rune\n\t\t\treturn validate.ErrInvalidUTF8\n\t\t}\n\n\t\tif r == '-' {\n\t\t\treturn ErrFormatting\n\t\t}\n\n\t\tr, size = utf8.DecodeLastRune(subDomain)\n\t\tif r == utf8.RuneError && size == 1 {\n\t\t\t// Invalid rune\n\t\t\treturn validate.ErrInvalidUTF8\n\t\t}\n\n\t\tif r == '-' {\n\t\t\treturn ErrFormatting\n\t\t}\n\n\t\t// Now we check each rune individually to make sure its valid unicode\n\t\t// and an acceptable character.\n\t\tfor i := 0; i < length; {\n\t\t\tif subDomain[i] < utf8.RuneSelf {\n\t\t\t\t// Check if it's a valid domain character\n\t\t\t\tif !unicode.Is(domainTable, rune(subDomain[i])) {\n\t\t\t\t\treturn ErrFormatting\n\t\t\t\t}\n\t\t\t\ti++\n\t\t\t} else {\n\t\t\t\tr, size := utf8.DecodeRune(subDomain[i:])\n\t\t\t\tif size == 1 {\n\t\t\t\t\t// All valid runes of size 1 (those\n\t\t\t\t\t// below RuneSelf) were handled above.\n\t\t\t\t\t// This must be a RuneError.\n\t\t\t\t\treturn validate.ErrInvalidUTF8\n\t\t\t\t}\n\t\t\t\t// Check if it's a valid domain character\n\t\t\t\tif !unicode.Is(domainTable, r) {\n\t\t\t\t\treturn ErrFormatting\n\t\t\t\t}\n\t\t\t\ti += size\n\t\t\t}\n\t\t}\n\t}\n\n\t// We have all valid unicode characters, now make sure the TLD is real.\n\t// TODO(inhies): Add check for an all numeric TLD.\n\tdomainTLD := domain[len(domain)-1]\n\tif tld.Valid(domainTLD) {\n\t\treturn nil\n\t}\n\n\t// Not sure how we got here, but lets return false just in case.\n\treturn ErrUnknown\n}",
"func NormalizeHost(host string) (string, error) {\n\tvar buf bytes.Buffer\n\n\t// hosts longer than 253 characters are illegal\n\tif len(host) > 253 {\n\t\treturn \"\", fmt.Errorf(\"hostname is too long, should contain less than 253 characters\")\n\t}\n\n\tfor _, r := range host {\n\t\tswitch r {\n\t\t// has null rune just toss the whole thing\n\t\tcase '\\x00':\n\t\t\treturn \"\", fmt.Errorf(\"hostname cannot contain null character\")\n\t\t// drop these characters entirely\n\t\tcase '\\n', '\\r', '\\t':\n\t\t\tcontinue\n\t\t// replace characters that are generally used for xss with '-'\n\t\tcase '>', '<':\n\t\t\tbuf.WriteByte('-')\n\t\tdefault:\n\t\t\tbuf.WriteRune(r)\n\t\t}\n\t}\n\n\treturn buf.String(), nil\n}",
"func hasDomain(hostname string, whitelist []string) bool {\n\tfor _, domain := range whitelist {\n\t\t// @step: we need to remove whitelist\n\t\tdomain = strings.Replace(domain, \" \", \"\", -1)\n\t\twildcard := strings.HasPrefix(domain, \"*.\")\n\t\tswitch wildcard {\n\t\tcase true:\n\t\t\t// a quick hacky check to ensure the you don't have subdomains\n\t\t\tsize := len(strings.Split(domain, \".\"))\n\t\t\thostSize := len(strings.Split(hostname, \".\"))\n\t\t\tif size != hostSize {\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\tdomain = strings.TrimPrefix(domain, \"*\")\n\t\t\tif strings.HasSuffix(hostname, domain) {\n\t\t\t\treturn true\n\t\t\t}\n\t\tdefault:\n\t\t\t// @check there is an exact match between hostname and whitelist\n\t\t\tif hostname == domain {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}",
"func IsDomain(s string) bool {\n\treturn !IsIP(s) && \"localhost\" != s\n}",
"func isValidString(str string) bool {\n\tif len(strings.TrimSpace(str)) <= 2 { //if there is nothing in string other than \"\", then it is invalid string\n\t\treturn false\n\t}\n\tvar invalidSubstrings = []string{\"/\", \"\\\\\", \"{\", \"}\", \"http\", \"https\", \".com\", \"#\", \"%\", \"identifier\"} //these strings are not allowed in a string to be put in constant or translated\n\tfor _, subStr := range invalidSubstrings {\n\t\tif strings.Contains(strings.ToLower(str), subStr) { //if lowerCased(str) contains invalid substring, then str is invalid\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func ValidateAddress(address string) error {\n\t// TODO: this list is not extensive and needs to be changed once we allow DNS\n\t// names for external metrics endpoints\n\tconst invalidChars = `abcdefghijklmnopqrstuvwxyz/\\ `\n\n\taddress = strings.ToLower(address)\n\tif strings.ContainsAny(address, invalidChars) {\n\t\treturn errors.New(\"invalid character detected (required format: <IP>:<PORT>)\")\n\t}\n\n\t// \tcheck if port if specified\n\tif !strings.Contains(address, \":\") {\n\t\treturn errors.New(\"no port specified\")\n\t}\n\n\th, p, err := net.SplitHostPort(address)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif h == \"\" {\n\t\treturn errors.New(\"no IP listen address specified\")\n\t}\n\n\tif p == \"\" {\n\t\treturn errors.New(\"no port specified\")\n\t}\n\n\treturn nil\n}",
"func (o *ServiceCheck) SetHostName(v string) {\n\to.HostName = v\n}",
"func looksLikeHostport(s string) bool {\n\tif !strings.Contains(s, \":\") {\n\t\treturn false\n\t} else {\n\t\thostport := strings.SplitN(s, \":\", 2)\n\t\tif _, err := regexp.MatchString(\"[0-9]+\", hostport[1]); err == nil {\n\t\t\treturn true\n\t\t} else {\n\t\t\treturn false\n\t\t}\n\t}\n}",
"func (c *Connection) validateHost() error {\n\tif check := checkIPConnection(c.Config.Host, c.Config.Port); check {\n\t\treturn nil\n\t}\n\n\tips, err := resolveDNS(c.Config.Host)\n\tif err != nil {\n\t\tlog.Println(\"failed to resolve host: \", err)\n\n\t\treturn err\n\t}\n\n\tfor _, ip := range ips {\n\t\tif check := checkIPConnection(ip.String(), c.Config.Port); check {\n\t\t\tc.Config.Host = ip.String()\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn nil\n}",
"func shouldDiscoverHost(name string) bool {\n\tparts := strings.Split(name, \".\")\n\tif len(parts) == 1 {\n\t\tif parts[0] == \"localhost\" {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n\treturn parts[len(parts)-1] == domain\n}",
"func Valid(s string) bool { return Convert(s) == s && s != \"\" }",
"func IsDNS(str string) bool {\n\tif str == \"\" || len(strings.Replace(str, \".\", \"\", -1)) > 255 {\n\t\t// constraints already violated\n\t\treturn false\n\t}\n\treturn !IsIP(str) && rxDNSName.MatchString(str)\n}",
"func hostname(hostport string) (string, error) {\n\thost, _, err := net.SplitHostPort(hostport)\n\treturn host, err\n}",
"func IsValidHash(obj string) bool {\n\treturn hashPtn.MatchString(obj)\n}",
"func ValidHash(s string) bool {\n\treturn len(s) == sha1.Size*2\n}",
"func (v URL) IsValid() bool {\n\tif v == \"\" || len(v) >= 2083 || len(v) <= 3 || strings.HasPrefix(v.String(), \".\") {\n\t\treturn false\n\t}\n\tu, err := netURL.Parse(v.String())\n\tif err != nil {\n\t\treturn false\n\t}\n\tif strings.HasPrefix(u.Host, \".\") {\n\t\treturn false\n\t}\n\tif u.Host == \"\" && (u.Path != \"\" && !strings.Contains(u.Path, \".\")) {\n\t\treturn false\n\t}\n\ttmp := strings.Split(u.Host, \":\")\n\tif tmp[0] == \"localhost\" {\n\t\treturn true\n\t}\n\treturn rxURL.Match([]byte(v.String()))\n}",
"func ValidateSlug(fl v.FieldLevel) bool {\n\tm, _ := regexp.MatchString(\"^[a-z0-9]+[a-z0-9-]+[a-z0-9]+$\", fl.Field().String())\n\treturn m\n}",
"func IsValidUsername(s string) bool {\n\tif s == \"\" {\n\t\treturn false\n\t}\n\n\t// match twitter names like @name_123\n\tr, err := regexp.Compile(`^[@](\\w){1,15}$`)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn r.MatchString(s)\n}",
"func isValidUsername(s string) bool {\n\tif utf8.RuneCountInString(s) > 30 {\n\t\treturn false\n\t}\n\t// ^[a-zA-Z0-9\\uac00-\\ud7a3]+$\n\tr, err := regexp.Compile(\"^[a-zA-Z0-9\\uac00-\\ud7a3._]+$\")\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn r.MatchString(s)\n}"
] | [
"0.79797286",
"0.77808625",
"0.77077204",
"0.7385047",
"0.73775864",
"0.7297476",
"0.6890047",
"0.66355765",
"0.6396525",
"0.6324849",
"0.62274617",
"0.61423105",
"0.6126824",
"0.60815984",
"0.6032941",
"0.59857845",
"0.59436375",
"0.59223866",
"0.58482933",
"0.58423096",
"0.58288103",
"0.582368",
"0.58229095",
"0.5821159",
"0.58173025",
"0.58049995",
"0.5792281",
"0.5787452",
"0.57802045",
"0.5768933",
"0.57658106",
"0.57631314",
"0.57553357",
"0.57376987",
"0.57321244",
"0.57238925",
"0.57139903",
"0.56819415",
"0.5662152",
"0.5649858",
"0.5637927",
"0.56249946",
"0.5616692",
"0.55701864",
"0.556967",
"0.5545634",
"0.5541715",
"0.55280787",
"0.55229425",
"0.550699",
"0.54956996",
"0.54944384",
"0.54870945",
"0.5451081",
"0.54190326",
"0.54149306",
"0.5400104",
"0.538117",
"0.53709614",
"0.534355",
"0.5342492",
"0.5337626",
"0.5328957",
"0.53249454",
"0.531104",
"0.5297125",
"0.5286174",
"0.5266876",
"0.52601933",
"0.52424824",
"0.5224549",
"0.5221379",
"0.52122545",
"0.52095795",
"0.52007073",
"0.5199903",
"0.5199095",
"0.518816",
"0.5182497",
"0.51744366",
"0.51692826",
"0.51506317",
"0.51294464",
"0.5117648",
"0.5103788",
"0.5101505",
"0.5088344",
"0.5083895",
"0.50830483",
"0.5082867",
"0.50741696",
"0.5071562",
"0.5066744",
"0.5060422",
"0.5041471",
"0.50405544",
"0.5040195",
"0.5039829",
"0.5035196",
"0.5019625"
] | 0.7682102 | 3 |
ReadPath reads file contents | func ReadPath(path string) ([]byte, error) {
if path == "" {
return nil, trace.NotFound("empty path")
}
s, err := filepath.Abs(path)
if err != nil {
return nil, trace.ConvertSystemError(err)
}
abs, err := filepath.EvalSymlinks(s)
if err != nil {
if errors.Is(err, fs.ErrPermission) {
//do not convert to system error as this loses the ability to compare that it is a permission error
return nil, err
}
return nil, trace.ConvertSystemError(err)
}
bytes, err := os.ReadFile(abs)
if err != nil {
if errors.Is(err, fs.ErrPermission) {
//do not convert to system error as this loses the ability to compare that it is a permission error
return nil, err
}
return nil, trace.ConvertSystemError(err)
}
return bytes, nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func ReadFilePath(path string) ([]byte, error) {\n\tfileData, err := os.Open(path)\n\tdefer fileData.Close()\n\tdata, err := ioutil.ReadAll(fileData)\n\treturn data, err\n}",
"func ReadPath(ctx context.Context, readBucket ReadBucket, path string) (_ []byte, retErr error) {\n\treadObject, err := readBucket.Get(ctx, path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\tif err := readObject.Close(); err != nil && retErr == nil {\n\t\t\tretErr = err\n\t\t}\n\t}()\n\treturn ioutil.ReadAll(readObject)\n}",
"func Read(path string) (string, error) {\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer f.Close()\n\n\tfd, err := ioutil.ReadAll(f)\n\treturn string(fd), err\n}",
"func ReadAll(path string) (string, error) {\n\tf, err := os.Open(path)\n\tdefer f.Close()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tcontents, err := ioutil.ReadAll(f)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn string(contents), nil\n}",
"func (self PathReader) Read(path string) (io.Reader, error) {\n bufBytes := make([]byte, 0)\n wa := NewWriteAtBuffer(bufBytes[:])\n err := readPath(self, path, wa)\n if err != nil {\n return nil, errors.Wrap(err, \"Error while reading path\")\n }\n return bytes.NewReader(wa.Bytes()), nil\n}",
"func Read(t *testing.T, paths ...string) []byte {\n\tt.Helper()\n\n\tpath := filepath.Join(paths...)\n\tfile, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\tt.Fatalf(\"cannot read %v: %v\", path, err)\n\t}\n\treturn file\n}",
"func (f FileManager) Read(path string) ([]byte, error) {\n\tb, err := ioutil.ReadFile(path)\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn nil, err\n\t}\n\n\treturn b, err\n}",
"func readUserFriendlyFilePath(path string) ([]byte, error) {\n\tpath, err := homedir.Expand(path)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to retrieve key path: %v\", err)\n\t}\n\treturn ioutil.ReadFile(path)\n}",
"func readFile(path string) ([]byte, error) {\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\treturn ioutil.ReadAll(file)\n}",
"func readFile(path string) ([]byte, error) {\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\treturn ioutil.ReadAll(file)\n}",
"func (e *Echo) Read(path string) fs.FileReader {\n\treturn fs.Read(e.Eval(path))\n}",
"func (this *File) readPath(path string) error {\n\tthis.log.Debug2(\"<persistence.File>ReadPath{ path=%v }\", strconv.Quote(path))\n\n\t// Append home directory if relative path\n\tif filepath.IsAbs(path) == false {\n\t\tif homedir, err := os.UserHomeDir(); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\tpath = filepath.Join(homedir, path)\n\t\t}\n\t}\n\n\t// Set path\n\tthis.path = path\n\n\t// Append filename\n\tif stat, err := os.Stat(this.path); err == nil && stat.IsDir() {\n\t\t// append default filename\n\t\tthis.path = filepath.Join(this.path, this.filename_default)\n\t}\n\n\t// Read file\n\tif stat, err := os.Stat(this.path); err == nil && stat.Mode().IsRegular() {\n\t\tif err := this.readPath_(this.path); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\t} else if os.IsNotExist(err) {\n\t\t// Create file\n\t\tif fh, err := os.Create(this.path); err != nil {\n\t\t\treturn err\n\t\t} else if err := fh.Close(); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\tthis.SetModified()\n\t\t\treturn nil\n\t\t}\n\t} else {\n\t\treturn err\n\t}\n}",
"func (P *Parser) readFile(filepath string) (string, error) {\n\tif P.fs == nil {\n\t\tb, err := ioutil.ReadFile(filepath)\n\t\treturn string(b), err\n\t}\n\n\t// some filepath checks\n\tif filepath == \"\" {\n\t\treturn \"\", fmt.Errorf(\"readFile: filepath empty\")\n\t}\n\tif filepath[0:1] != \"/\" {\n\t\tfilepath = \"/\" + filepath\n\t}\n\n\t// Open the file\n\tF, err := P.fs.Open(filepath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Read contents\n\tb, err := ioutil.ReadAll(F)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn string(b), nil\n}",
"func (realFS) ReadFile(name string) ([]byte, error) { return ioutil.ReadFile(name) }",
"func Read(path string) ([]byte, error) {\n\tr, err := os.OpenFile(path, os.O_RDONLY, 0644)\n\tdefer r.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ioutil.ReadAll(r)\n}",
"func ReadContents(path string) []byte {\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\tlog.Fatalf(\"os.Open() failed with '%s'\\n\", err)\n\t}\n\tdefer f.Close()\n\n\td, err := ioutil.ReadAll(f)\n\tif err != nil {\n\t\tlog.Fatalf(\"ioutil.ReadAll() failed with '%s'\\n\", err)\n\t}\n\treturn d\n}",
"func ReadContent(path string) (string, error) {\n\t_, err := os.Stat(path)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn \"\", fmt.Errorf(\"file [%s] does not exist\", path)\n\t\t}\n\t\treturn \"\", err\n\t}\n\tcontent, err := os.ReadFile(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(content), nil\n}",
"func ReadingFiles(path string, fileName string) string {\n\tfile, err := os.Open(fmt.Sprint(path, fileName))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\tCreateFileDayError(fmt.Sprint(\"O arquivo '\", fileName, \"' não está mais na pasta!\"))\n\t}\n\tdefer file.Close()\n\n\tb, err := ioutil.ReadAll(file)\n\treturn string(b)\n}",
"func (o OSFS) ReadFile(path string) ([]byte, error) {\n\treturn ioutil.ReadFile(path)\n}",
"func readFile(path string) ([]byte, error) {\n\tp, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn p, nil\n}",
"func ReadFile(path string) []byte {\n\tfile, err := physfs.Open(path)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tlength, err := file.Length()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tbuffer := make([]byte, length)\n\tfile.Read(buffer)\n\n\treturn buffer\n}",
"func ReadFile(path string) ([]byte, error) {\n\tfileContent, err := ioutil.ReadFile(filepath.Join(path))\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, PathNotFoundMsg, path)\n\t}\n\ts := string(fileContent)\n\ts = strings.Replace(s, \"\\r\\n\", \"\\r\", -1)\n\tfileContent = []byte(s)\n\treturn fileContent, nil\n}",
"func ReadFile(t *testing.T, path string) []byte {\n\texpandedPath := ExpandFilepath(t, path)\n\n\treadData, err := ioutil.ReadFile(expandedPath)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to read in testdata from %#v: %v\", expandedPath, err)\n\t}\n\treturn readData\n}",
"func ReadEntireFile(path string) string {\n\tf, err := ioutil.ReadFile(path)\n\tcommon.CheckError(err)\n\treturn string(f)\n}",
"func readFile(path string) []byte {\n\t_l := metrics.StartLogDiff(\"read-file\")\n\n\t// Do it\n\tdata, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tmetrics.StopLogDiff(\"read-file\", _l)\n\treturn data\n}",
"func ReadFile(fullpath string) []byte {\n\tcontent, err := os.ReadFile(fullpath)\n\tif err != nil {\n\t\tpanic(\"failed to read from \" + fullpath + \": \" + err.Error())\n\t}\n\treturn content\n}",
"func (file *File) read() ([]byte, error) {\n\treturn os.ReadFile(file.Path())\n}",
"func readFile(t *testing.T, path string) string {\n\tt.Helper()\n\tcontent, err := ioutil.ReadFile(path)\n\tassert.NilError(t, err, \"missing '\"+path+\"' file\")\n\treturn strings.Replace(string(content), \"\\r\", \"\", -1)\n}",
"func ReadFile(path string) (string, error) {\n\tdata, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(data), nil\n}",
"func ReadMountedFileContent(path string) string {\n\tif _, err := os.Stat(path); e.Is(err, os.ErrNotExist) {\n\t\tklog.Exit(\"Path \", path, \" does not exist, exiting\")\n\t}\n\tcontent, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\tklog.Exit(\"Failed to open path \", path)\n\t}\n\treturn string(content)\n}",
"func ReadArchiverPath(id int) string {\n\treturn fmt.Sprintf(\"/archive/%v\", id)\n}",
"func (r RootFileSource) ReadTestFile(filePath string) ([]byte, error) {\n\tvar fullPath string\n\tif path.IsAbs(filePath) {\n\t\tfullPath = filePath\n\t} else {\n\t\tfullPath = filepath.Join(r.Root, filePath)\n\t}\n\tdata, err := ioutil.ReadFile(fullPath)\n\tif os.IsNotExist(err) {\n\t\t// Not an error (yet), some other provider may have the file.\n\t\treturn nil, nil\n\t}\n\treturn data, err\n}",
"func readFile (path string) string {\n\tbytesStrFile, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\tfmt.Print(err)\n\t}\n\treturn string(bytesStrFile)\n}",
"func ReadFile(path string) string {\n\tvar buff, err = os.ReadFile(path)\n\tif IsError(err, `ReadFile: %s`, path) {\n\t\treturn ``\n\t}\n\treturn string(buff)\n}",
"func readFile(path string) []byte {\n\tvar fileBytes []byte\n\tvar err error\n\n\tlogger := log.WithField(\"path\", path)\n\tlogger.Infof(\"Reading file bytes\")\n\n\tif !FileExists(path) {\n\t\tlogger.Fatal(\"Can't find file\")\n\t}\n\tif fileBytes, err = ioutil.ReadFile(path); err != nil {\n\t\tlogger.Fatalf(\"Error on read file: '%s'\", err)\n\t}\n\treturn fileBytes\n}",
"func ReadFile(relativePath string) ([]byte, error) {\n\tabsPath, absPathErr := filepath.Abs(relativePath)\n\tif absPathErr != nil {\n\t\treturn nil, absPathErr\n\t}\n\n\tfileContents, readFileErr := ioutil.ReadFile(absPath)\n\tif readFileErr != nil {\n\t\treturn nil, readFileErr\n\t}\n\n\treturn fileContents, nil\n}",
"func readFile(fs http.FileSystem, path string) ([]byte, error) {\n\tf, err := fs.Open(path)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"opening file %s\", path)\n\t}\n\tdefer f.Close()\n\tb, err := ioutil.ReadAll(f)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"reading file content %s\", path)\n\t}\n\treturn b, nil\n}",
"func GetFileContents(path, fileName string) string {\n\tpath, err := filepath.Abs(createFullPath(path, fileName))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdata, err := ioutil.ReadFile(path)\n\treturn string(data)\n}",
"func ReadFileToString(path string) (string, error) {\n\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer f.Close()\n\n\tscanner := bufio.NewScanner(f)\n\tscanner.Scan()\n\n\treturn scanner.Text(), nil\n}",
"func readFile(pathToFile string) string {\n\tf, err := ioutil.ReadFile(pathToFile)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdata := string(f)\n\treturn data\n}",
"func ReadFile(relativePath string) ([]byte, error) {\n\tpath, err := filepath.Abs(relativePath)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\n\treturn ioutil.ReadFile(path)\n}",
"func ReadFile(path string) ([]byte, error) {\n\tdata, err := openData(path)\n\tif err == os.ErrNotExist {\n\t\treturn os.ReadFile(path)\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\treturn data, nil\n}",
"func ReadContents(filepath string) (string, error) {\n\tbytes, err := ioutil.ReadFile(filepath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn trimByteOrderMark(string(bytes)), nil\n}",
"func (fsOnDisk) ReadFile(name string) ([]byte, error) {\n\tcontent, err := os.ReadFile(name)\n\treturn content, errors.Wrap(err)\n}",
"func (repo Repository) GetByFullPath(fullpath string) (data []byte, err error) {\n\tfullPath := path.Join(repo.StorageDir, fullpath)\n\tdata, err = ioutil.ReadFile(fullPath)\n\n\treturn\n}",
"func ReadFromFile(name string) []byte {\n\tdata, err := ioutil.ReadFile(name)\n\tif err !=nil{\n\t\tpanic(0)\n\t}\n\treturn data\n}",
"func Read(file string) (string, error) {\n\terr := Exists(file)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tcontent, err := ioutil.ReadFile(realPath(file))\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn string(content), nil\n}",
"func ReadFileFromPath(fpath string) (*dbrpc.FileInfo, []byte, error) {\n\tif fpath == \"\" {\n\t\treturn nil, nil, fmt.Errorf(\"path must be provided\")\n\t}\n\tabspath, err := filepath.Abs(fpath)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tfile, err := os.Open(abspath)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tdefer file.Close()\n\tbuffer := make([]byte, 512)\n\t_, err = file.Read(buffer)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tfinfo := sharedReadFrom(abspath, buffer)\n\n\tb, err := imgEncode(fpath, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn finfo, b, nil\n}",
"func ReadFile(fileName string) (string, error) {\n\n\tfileName = keptnutils.ExpandTilde(fileName)\n\tif _, err := os.Stat(fileName); os.IsNotExist(err) {\n\t\treturn \"\", fmt.Errorf(\"Cannot find file %s\", fileName)\n\t}\n\tdata, err := ioutil.ReadFile(fileName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(data), nil\n}",
"func ReadFile(path string, doGzip bool) (string, error){\n body, err := ReadFileByte(path, doGzip)\n return string(body), err\n}",
"func ReadFile(path string) (model.FileData, error) {\n\tb, err := ioutil.ReadFile(path) // just pass the file name\n\tif err != nil {\n\t\tfmt.Print(err)\n\t\treturn model.FileData{}, err\n\t}\n\n\tfileName := filepath.Base(path)\n\n\tfileData := model.FileData{\n\t\tName: fileName,\n\t\tData: b,\n\t}\n\n\treturn fileData, nil\n}",
"func LookPath(file string) (string, error) {}",
"func readfile(filename string) (data string, err error) {\n\tvar abspath string\n\tabspath, err = filepath.Abs(filename)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar bytes []byte\n\tbytes, err = ioutil.ReadFile(abspath)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdata = string(bytes)\n\treturn\n}",
"func ReadFile(fileName string) string{\r\n\tcontent, err := ioutil.ReadFile(fileName)\r\n\r\n\t\tif err != nil {\r\n\t\t\tlog.Fatal(err)\r\n\t\t}\r\n\r\n\t\treturn string(content)\r\n}",
"func (l Local) ReadFile(filepath string) ([]byte, error) {\n\treturn ioutil.ReadFile(filepath)\n}",
"func ReadFile(path string) ([]string, error) {\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\tvar input []string\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tinput = append(input, scanner.Text())\n\t}\n\treturn input, scanner.Err()\n}",
"func readFile(path string) []string {\n\tfile, err := os.OpenFile(path, os.O_RDONLY, 0666)\n\tif err != nil {\n\t\tfmt.Println(\"Open file failed.\")\n\t\tpanic(err)\n\t}\n\tdefer file.Close()\n\n\tvar input []string\n\ti := 0\n\n\tbuf := bufio.NewReader(file)\n\tfor {\n\t\ta, _, c := buf.ReadLine()\n\t\tif c == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tinput = append(input, string(a))\n\t\ti += 1\n\t}\n\treturn input\n}",
"func FileReadAll(path string) ([]byte, error) {\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\tc, err := ioutil.ReadAll(f)\n\tif err != nil && err != io.EOF {\n\t\treturn nil, err\n\t}\n\treturn c, nil\n}",
"func ReadFile() {\n\tbody, _ := ioutil.ReadFile(\"tmp\")\n\tvar text = string(body)\n\tprintln(text)\n}",
"func ReadPath(path string) (plan *Plan, err error) {\n\tplan = new(Plan)\n\terr = mjson.Read(plan, path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn plan, nil\n}",
"func readKey(key string, path string) (string, error) {\n\tdata, err := ioutil.ReadFile(filepath.Join(path, key))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(data), nil\n}",
"func ReadCatalogue(filePath string) []string {\n\tifltab := C.longlong(250)\n\tcPath := C.CString(filePath)\n\n\t// Add functionality for selection criteria here\n\tcDSSPaths := C.CString(\"/*/*/*/*/*/*/\")\n\tcField := C.int(1)\n\n\tC.zopen(&ifltab, cPath)\n\tdefer C.zclose(&ifltab)\n\n\tcatStruct := C.zstructCatalogNew()\n\tdefer C.zstructFree(unsafe.Pointer(catStruct))\n\n\tnPaths := C.zcatalog(&ifltab, cDSSPaths, catStruct, cField)\n\n\treturn GoStrings(nPaths, catStruct.pathnameList)\n}",
"func read(r string) string {\n\tif _, err := os.Stat(r); err == nil {\n\t\tb, e := os.ReadFile(r)\n\t\tif e != nil {\n\t\t\tlog.Fatalf(\"Unable to read data from file: %s, error: %s\", r, e)\n\t\t}\n\t\treturn strings.Replace(string(b), \"\\n\", \"\", -1)\n\t}\n\treturn r\n}",
"func ReadFile(path string) ([]byte, error) {\n\tf, err := FS.OpenFile(CTX, path, os.O_RDONLY, 0644)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuf := bytes.NewBuffer(make([]byte, 0, bytes.MinRead))\n\n\t// If the buffer overflows, we will get bytes.ErrTooLarge.\n\t// Return that as an error. Any other panic remains.\n\tdefer func() {\n\t\te := recover()\n\t\tif e == nil {\n\t\t\treturn\n\t\t}\n\t\tif panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge {\n\t\t\terr = panicErr\n\t\t} else {\n\t\t\tpanic(e)\n\t\t}\n\t}()\n\t_, err = buf.ReadFrom(f)\n\treturn buf.Bytes(), err\n}",
"func ReadFile(path string) ([]byte, error) {\n\tf, err := FS.OpenFile(CTX, path, os.O_RDONLY, 0644)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuf := bytes.NewBuffer(make([]byte, 0, bytes.MinRead))\n\n\t// If the buffer overflows, we will get bytes.ErrTooLarge.\n\t// Return that as an error. Any other panic remains.\n\tdefer func() {\n\t\te := recover()\n\t\tif e == nil {\n\t\t\treturn\n\t\t}\n\t\tif panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge {\n\t\t\terr = panicErr\n\t\t} else {\n\t\t\tpanic(e)\n\t\t}\n\t}()\n\t_, err = buf.ReadFrom(f)\n\treturn buf.Bytes(), err\n}",
"func ReadFile(path string) ([]byte, error) {\n\tf, err := FS.OpenFile(CTX, path, os.O_RDONLY, 0644)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuf := bytes.NewBuffer(make([]byte, 0, bytes.MinRead))\n\n\t// If the buffer overflows, we will get bytes.ErrTooLarge.\n\t// Return that as an error. Any other panic remains.\n\tdefer func() {\n\t\te := recover()\n\t\tif e == nil {\n\t\t\treturn\n\t\t}\n\t\tif panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge {\n\t\t\terr = panicErr\n\t\t} else {\n\t\t\tpanic(e)\n\t\t}\n\t}()\n\t_, err = buf.ReadFrom(f)\n\treturn buf.Bytes(), err\n}",
"func ReadFile(path string) ([]byte, error) {\n\tf, err := FS.OpenFile(CTX, path, os.O_RDONLY, 0644)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuf := bytes.NewBuffer(make([]byte, 0, bytes.MinRead))\n\n\t// If the buffer overflows, we will get bytes.ErrTooLarge.\n\t// Return that as an error. Any other panic remains.\n\tdefer func() {\n\t\te := recover()\n\t\tif e == nil {\n\t\t\treturn\n\t\t}\n\t\tif panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge {\n\t\t\terr = panicErr\n\t\t} else {\n\t\t\tpanic(e)\n\t\t}\n\t}()\n\t_, err = buf.ReadFrom(f)\n\treturn buf.Bytes(), err\n}",
"func ReadFile(path string) ([]byte, error) {\n\tf, err := FS.OpenFile(CTX, path, os.O_RDONLY, 0644)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuf := bytes.NewBuffer(make([]byte, 0, bytes.MinRead))\n\n\t// If the buffer overflows, we will get bytes.ErrTooLarge.\n\t// Return that as an error. Any other panic remains.\n\tdefer func() {\n\t\te := recover()\n\t\tif e == nil {\n\t\t\treturn\n\t\t}\n\t\tif panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge {\n\t\t\terr = panicErr\n\t\t} else {\n\t\t\tpanic(e)\n\t\t}\n\t}()\n\t_, err = buf.ReadFrom(f)\n\treturn buf.Bytes(), err\n}",
"func ReadAllText(path string) (string, error) {\n\tb, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(b), nil\n}",
"func ReadAllText(path string) (string, error) {\n\tb, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(b), nil\n}",
"func read(path string) ([]io.Reader, error) {\n\tif path == \"-\" {\n\t\treturn []io.Reader{os.Stdin}, nil\n\t}\n\n\tif url, ok := toURL(path); ok {\n\t\tif strings.ToLower(url.Scheme) != \"https\" {\n\t\t\treturn nil, fmt.Errorf(\"only HTTPS URLs are allowed\")\n\t\t}\n\t\tresp, err := http.Get(url.String())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer resp.Body.Close()\n\n\t\tif resp.StatusCode != http.StatusOK {\n\t\t\treturn nil, fmt.Errorf(\"unable to read URL %q, server reported %s, status code=%d\", path, resp.Status, resp.StatusCode)\n\t\t}\n\n\t\t// Save to a buffer, so that response can be closed here\n\t\tbuf := new(bytes.Buffer)\n\t\t_, err = buf.ReadFrom(resp.Body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn []io.Reader{buf}, nil\n\t}\n\n\treturn walk(path)\n}",
"func ReadTextFile(filePath string) (string, error) {\n data, err := ioutil.ReadFile(filePath)\n return string(data), err\n}",
"func (conf *Configuration) Read(path string) error {\n\tif _, err := toml.DecodeFile(path, conf); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func readFile(filePath string) (os.FileInfo, []byte, error) {\n\tfile, err := os.Open(filePath)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tfileContents, err := ioutil.ReadAll(file)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tdefer func() {\n\t\t_ = file.Close()\n\t}()\n\n\tfi, err := file.Stat()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn fi, fileContents, nil\n}",
"func ReadFileToString(filePath string) (string, error) {\n\tif fileExists(filePath) {\n\t\tdata, err := ioutil.ReadFile(filePath)\n\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\treturn string(data), nil\n\t} else {\n\t\treturn \"\", nil\n\t}\n}",
"func ConfigReadFile(path string) (string, []byte, error) {\n\tif strings.HasPrefix(path, \"file://\") {\n\t\tpath = path[7:]\n\t}\n\n\tpos := strings.LastIndexByte(path, '.')\n\tif pos == -1 {\n\t\tpos += len(path)\n\t}\n\n\tdata, err := ioutil.ReadFile(path)\n\tlast := strings.LastIndex(path, \".\") + 1\n\tif last == 0 {\n\t\treturn \"\", nil, fmt.Errorf(\"read file config, type is null\")\n\t}\n\treturn path[pos+1:], data, err\n}",
"func _file_readString(call otto.FunctionCall) otto.Value {\n\tfilepath, _ := call.Argument(0).ToString()\n\n\tdata, err := ioutil.ReadFile(filepath)\n\tif err != nil {\n\t\tjsThrow(call, err)\n\t}\n\tv, _ := otto.ToValue(string(data))\n\treturn v\n}",
"func (h *Server) readPath(location string) (string, string, error) {\n\tp := strings.Trim(path.Clean(location), \"/\")\n\tparts := strings.Split(p, \"/\")\n\n\tif parts[0] != \"logs\" && parts[0] != \"async-logs\" {\n\t\treturn \"\", \"\", fmt.Errorf(\"%s is not a valid action, only logs and async-logs are allowed\", parts[0])\n\t}\n\n\tswitch len(parts) {\n\tcase 2:\n\t\treturn strings.ToLower(parts[1]), DefaultType, nil\n\tcase 3:\n\t\treturn strings.ToLower(parts[1]), parts[2], nil\n\tdefault:\n\t\treturn \"\", \"\", fmt.Errorf(\"path %s must be in the form /logs/<index>/[type]\", location)\n\t}\n}",
"func ReadFile(filePath string) string {\n\tbytes, err := ioutil.ReadFile(filePath)\n\tif err != nil {\n\t\tlogger.Info(err.Error())\n\t}\n\n\treturn string(bytes)\n}",
"func readLines(path string) (lines []string, err error) {\n\tvar (\n\t\tfile *os.File\n\t\tpart []byte\n\t\tprefix bool\n\t)\n\tif file, err = os.Open(path); err != nil {\n\t\treturn\n\t}\n\tdefer file.Close()\n\n\treader := bufio.NewReader(file)\n\tbuffer := bytes.NewBuffer(make([]byte, 0))\n\tfor {\n\t\tif part, prefix, err = reader.ReadLine(); err != nil {\n\t\t\tbreak\n\t\t}\n\t\tbuffer.Write(part)\n\t\tif !prefix {\n\t\t\tlines = append(lines, buffer.String())\n\t\t\tbuffer.Reset()\n\t\t}\n\t}\n\tif err == io.EOF {\n\t\terr = nil\n\t}\n\treturn\n}",
"func ReadFile(filePath string) []string {\n\tfile, err := os.Open(filePath)\n\tvar content []string\n\n\tif err != nil {\n\t\ter(err)\n\t\treturn nil\n\t}\n\tdefer file.Close()\n\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tcontent = append(content, scanner.Text())\n\t}\n\n\tif err := scanner.Err(); err != nil {\n\t\ter(err)\n\t\treturn nil\n\t}\n\n\treturn content\n}",
"func ReadFileContent(path string) (string, error) {\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer f.Close()\n\tvar data []byte\n\tbuf := make([]byte, 2000)\n\tfor {\n\t\tl, err := f.Read(buf)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tdata = append(data, buf[:l]...)\n\t}\n\treturn string(data), nil\n}",
"func (l *FileConfig) Get(path string) ([]byte, error) {\n\t// path = strings.TrimPrefix(path, l.root)\n\treturn readFile(filepath.Join(l.root, path))\n}",
"func readFile(filePath string) string {\n\tcontents, _ := ioutil.ReadFile(filePath)\n\tvar s string = string(contents)\n\treturn s\n}",
"func readLines(path string) (lines []string, err error) {\n\tvar (\n\t\tfile *os.File\n\t\tpart []byte\n\t\tprefix bool\n\t)\n\tif file, err = os.Open(path); err != nil {\n\t\treturn\n\t}\n\tdefer func() {\n\t\t_ = file.Close()\n\t}()\n\n\treader := bufio.NewReader(file)\n\tbuffer := bytes.NewBuffer(make([]byte, 0))\n\tfor {\n\t\tif part, prefix, err = reader.ReadLine(); err != nil {\n\t\t\tbreak\n\t\t}\n\t\tbuffer.Write(part)\n\t\tif !prefix {\n\t\t\tlines = append(lines, buffer.String())\n\t\t\tbuffer.Reset()\n\t\t}\n\t}\n\tif err == io.EOF {\n\t\terr = nil\n\t}\n\treturn\n}",
"func Read(filepath string, original *[]byte) error {\n\tvar err error\n\n\tif filepath != \"\" {\n\t\t*original, err = ioutil.ReadFile(filepath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (d *Driver) Read(path string) ([]byte, error) {\n\tdata, _, err := d.conn.Get(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn data, nil\n}",
"func (fs FileSystem) ReadFile(filename string) ([]byte, error) {\n\tfs.testDriver()\n\tfilename = filepath.FromSlash(filename)\n\treturn fs.drv.ReadFile(filename)\n}",
"func Read(filename string) []byte {\n\tcontent, _ := ioutil.ReadFile(filepath.Join(DataDir, filename))\n\treturn content\n}",
"func (fs *FS) ReadFile(fpath string) (string, error) {\n\tfp, err := os.Open(fpath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer fp.Close()\n\tdata, err := ioutil.ReadAll(fp)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(data), nil\n}",
"func readLines(path string) (lines []string, err error) {\n\tvar (\n\t\tfile *os.File\n\t\tpart []byte\n\t\tprefix bool\n\t)\n\tif file, err = os.Open(path); err != nil {\n\t\treturn\n\t}\n\treader := bufio.NewReader(file)\n\tbuffer := bytes.NewBuffer(make([]byte, 1024))\n\tfor {\n\t\tif part, prefix, err = reader.ReadLine(); err != nil {\n\t\t\tbreak\n\t\t}\n\t\tbuffer.Write(part)\n\t\tif !prefix {\n\t\t\tlines = append(lines, buffer.String())\n\t\t\tbuffer.Reset()\n\t\t}\n\t}\n\tif err == io.EOF {\n\t\terr = nil\n\t}\n\treturn\n}",
"func ReadFromFile(cnfPath string) ([]byte, error) {\n\tfile, err := os.Open(cnfPath)\n\n\t// Config file not found\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Open file error: %s\", err)\n\t}\n\n\t// Config file found, let's try to read it\n\tdata := make([]byte, 1000)\n\tcount, err := file.Read(data)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Read from file error: %s\", err)\n\t}\n\n\treturn data[:count], nil\n}",
"func readFile(file_path string) string {\n content, err := ioutil.ReadFile(file_path)\n if err != nil {\n os.Stderr.WriteString(err.Error() + \"\\n\")\n return \"\"\n }\n return string(content)\n}",
"func (e *EndToEndTest) ReadFile(repo string, volume string, filename string) (string, error) {\n\tmountpoint, err := e.GetVolumePath(repo, volume)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tpath := fmt.Sprintf(\"%s/%s\", mountpoint, filename)\n\tout, err := exec.Command(\"docker\", \"exec\", e.GetContainer(\"server\"), \"cat\", path).Output()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(out), nil\n}",
"func (fileReader *Reader) Read() ([][]byte, error) {\n\tfiles, err := ioutil.ReadDir(fileReader.rootPath)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresults := make([][]byte, 0)\n\n\tfor _, file := range files {\n\t\tfilename := file.Name()\n\n\t\tif !strings.HasSuffix(filename, \".json\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tfilePath := path.Join(fileReader.rootPath, filename)\n\t\tdata, err := ioutil.ReadFile(filePath)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresults = append(results, data)\n\t}\n\treturn results, nil\n}",
"func (f *FileStore) Read(key string) ([]byte, error) {\n\tif err := ValidateKey(key); err != nil {\n\t\treturn nil, err\n\t}\n\tbytes, err := f.filesystem.ReadFile(f.getPathByKey(key))\n\tif os.IsNotExist(err) {\n\t\treturn bytes, ErrKeyNotFound\n\t}\n\treturn bytes, err\n}",
"func (r *FileRepository) ReadFileSlice(path string) ([]string, error) {\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"fail to open file: %s\", path)\n\t}\n\tdefer file.Close()\n\tdata := make([]string, 0)\n\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tdata = append(data, scanner.Text())\n\t}\n\n\tif err := scanner.Err(); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"fail to scan file\")\n\t}\n\treturn data, nil\n}",
"func (u *volumeUtil) ReadDir(fullPath string) ([]string, error) {\n\tdir, err := os.Open(fullPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer dir.Close()\n\n\tfiles, err := dir.Readdirnames(-1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn files, nil\n}",
"func (fs *Fs) ReadFile(name string) ([]byte, error) {\n\treturn os.ReadFile(filepath.Clean(name))\n}",
"func (c *Chunk) Reader(rootPath *string) (file *os.File, err error) {\n\tvar path string\n\tif path, err = c.Path(rootPath); err != nil {\n\t\treturn\n\t}\n\treturn os.Open(path)\n}"
] | [
"0.69383174",
"0.68287534",
"0.6815623",
"0.6785166",
"0.6754481",
"0.6678411",
"0.6626459",
"0.6582085",
"0.65602815",
"0.65602815",
"0.6546036",
"0.6472949",
"0.6459003",
"0.6405429",
"0.6402996",
"0.63998675",
"0.63586974",
"0.62960666",
"0.62625414",
"0.625736",
"0.62287873",
"0.6211927",
"0.6181701",
"0.61741656",
"0.6168871",
"0.6139509",
"0.61187285",
"0.6109042",
"0.6108838",
"0.6108473",
"0.6088347",
"0.60821694",
"0.60756713",
"0.60704386",
"0.60660386",
"0.60485345",
"0.6043139",
"0.6033872",
"0.60095793",
"0.5986765",
"0.59719396",
"0.5954573",
"0.59440464",
"0.59349495",
"0.5927802",
"0.59264576",
"0.59263486",
"0.5924747",
"0.59246266",
"0.5917067",
"0.5905789",
"0.5897691",
"0.58878255",
"0.58857906",
"0.5875473",
"0.5856716",
"0.58538216",
"0.5847156",
"0.584173",
"0.5840266",
"0.5817804",
"0.5800771",
"0.5800496",
"0.5791946",
"0.5791946",
"0.5791946",
"0.5791946",
"0.5791946",
"0.57821417",
"0.57821417",
"0.57719713",
"0.57692164",
"0.5761067",
"0.5747723",
"0.57422954",
"0.57413435",
"0.57253504",
"0.5708974",
"0.5708451",
"0.5704019",
"0.569826",
"0.56894815",
"0.5685807",
"0.5685094",
"0.56841844",
"0.5683815",
"0.56828684",
"0.568216",
"0.56804067",
"0.5677442",
"0.5676102",
"0.5665772",
"0.5660974",
"0.5654798",
"0.5650616",
"0.56437457",
"0.56434643",
"0.5641714",
"0.56404734",
"0.5636047"
] | 0.6771913 | 4 |
MultiCloser implements io.Close, it sequentially calls Close() on each object | func MultiCloser(closers ...io.Closer) io.Closer {
return &multiCloser{
closers: closers,
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (mc *MultiCloser) Close() error {\n\tmc.mu.Lock()\n\tdefer mc.mu.Unlock()\n\tresult := &multierror.Error{ErrorFormat: utils.SingleLineErrorFormatter}\n\n\tfor _, closer := range mc.closers {\n\t\tif err := closer.Close(); err != nil {\n\t\t\tresult = multierror.Append(result, err)\n\t\t}\n\t}\n\n\tmc.closers = []io.Closer{}\n\treturn result.ErrorOrNil()\n}",
"func (m *IOClosers) Close() (err error) {\n\tfor _, c := range m.closers {\n\t\tif err = c.Close(); err != nil {\n\t\t\tlogger.Errorf(\"Error closing write strream: %s\", err.Error())\n\t\t}\n\t}\n\treturn\n}",
"func (mw *MultiWriter) Close() error {\n\tmw.Lock()\n\tdefer mw.Unlock()\n\n\tvar err error\n\n\tfor _, out := range mw.outputs {\n\t\tif e1 := out.Close(); e1 != nil {\n\t\t\terr = e1\n\t\t}\n\t}\n\tmw.outputs = nil\n\tmw.closed = true\n\treturn err\n}",
"func (mw *multiWriter) Close() error {\n\tmw.Lock()\n\tfor _, w := range mw.writers {\n\t\tw.Close()\n\t}\n\tmw.writers = nil\n\tmw.Unlock()\n\treturn nil\n}",
"func closeAll(closers ...xclose.Closer) error {\n\tmultiErr := xerrors.NewMultiError()\n\tfor _, closer := range closers {\n\t\tif err := closer.Close(); err != nil {\n\t\t\tmultiErr = multiErr.Add(err)\n\t\t}\n\t}\n\treturn multiErr.FinalError()\n}",
"func closeMultipleSrvs(srvs []*httptest.Server) {\n\tfor _, srv := range srvs {\n\t\tsrv.Close()\n\t}\n}",
"func (bc *BatchCloser) Close() error {\n\tvar errs errorsbp.Batch\n\tfor _, closer := range bc.closers {\n\t\terrs.AddPrefix(fmt.Sprintf(\"%#v\", closer), closer.Close())\n\t}\n\treturn errs.Compile()\n}",
"func (d *Death) closeObjects(closer closer, done chan<- closer) {\n\terr := closer.C.Close()\n\tif err != nil {\n\t\td.log.Error(err)\n\t\tcloser.Err = err\n\t}\n\tdone <- closer\n}",
"func (c *Closer) CloseAll() {\n\tfor i := len(c.closers) - 1; i >= 0; i-- {\n\t\tClose(c.closers[i])\n\t}\n}",
"func (p *AsyncPipeline) Close() error {\n\tvar firstErr error\n\tfor _, op := range p.ops {\n\t\terr := op.Close()\n\t\tif firstErr == nil {\n\t\t\tfirstErr = err\n\t\t}\n\t}\n\treturn firstErr\n}",
"func (c *Client) Close() error {\n\tfor _, wgc := range c.cs {\n\t\tif err := wgc.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (m *MultiConnPool) Close() {\n\tfor _, p := range m.Pools {\n\t\tp.Close()\n\t}\n}",
"func (w *MultiWriter) Close() error {\n\terrs := new(multierror.Error)\n\tfor _, w := range w.writers {\n\t\terrs = multierror.Append(w.Close())\n\t}\n\treturn errs.ErrorOrNil()\n}",
"func (i *Iterator) Close() {}",
"func (e *BaseExecutor) Close() error {\n\tvar firstErr error\n\tfor _, src := range e.children {\n\t\tif err := src.Close(); err != nil && firstErr == nil {\n\t\t\tfirstErr = err\n\t\t}\n\t}\n\treturn firstErr\n}",
"func (c *OneClient) Close() error {\n\tmultierror := errors.NewMultiError(nil)\n\tc.mu.RLock()\n\tfor _, v := range c.xclients {\n\t\terr := v.Close()\n\t\tif err != nil {\n\t\t\tmultierror.Append(err)\n\t\t}\n\t}\n\tc.mu.RUnlock()\n\n\tif len(multierror.Errors) == 0 {\n\t\treturn nil\n\t}\n\treturn multierror\n}",
"func (mlog *MultiLogger) Close() {\n\tmlog.Lock()\n\tmlog.isClosed = true\n\tclose(mlog.qerr)\n\tclose(mlog.qout)\n\t<-mlog.flushq\n\t<-mlog.flushq\n\tmlog.Unlock()\n}",
"func (c *Consumer) closeAll() {\n\tclose(c.messages)\n\tclose(c.errors)\n\tc.zoo.Close()\n\tc.consumer.Close()\n\tif c.ownClient {\n\t\tc.client.Close()\n\t}\n}",
"func (c *Closer) Close() (err error) {\n\tc.o.Do(func() {\n\t\t// Get close funcs\n\t\tc.m.Lock()\n\t\tfs := append([]CloseFunc{}, c.fs...)\n\t\tc.m.Unlock()\n\n\t\t// Loop through closers\n\t\tvar errs []error\n\t\tfor _, f := range fs {\n\t\t\tif errC := f(); errC != nil {\n\t\t\t\terrs = append(errs, errC)\n\t\t\t}\n\t\t}\n\n\t\t// Process errors\n\t\tif len(errs) == 1 {\n\t\t\terr = errs[0]\n\t\t} else if len(errs) > 1 {\n\t\t\terr = astierror.NewMultiple(errs)\n\t\t}\n\t})\n\treturn\n}",
"func (c *RPCClient) Close() {\n\tfor _, conn := range c.pool {\n\t\tconn.Close()\n\t}\n}",
"func (cr *ChainReader) Close() error {\n\tfor _, reader := range cr.readers {\n\t\terr := reader.Close()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (p *Pipeline) Close() {\n\tfor _, pool := range p.pools {\n\t\tclose(pool.terminate)\n\t\tpool.done.Wait()\n\t\tpool.factory.Destroy()\n\t}\n}",
"func CloseAll() {\n\tkrw.reader.Lock()\n\tdefer krw.reader.Unlock()\n\t// Closing all opened Readers Connections\n\tfor rp, rc := range krw.Readers {\n\t\trc.Close()\n\t\tdelete(krw.Readers, rp)\n\t}\n\n\tkrw.writer.Lock()\n\tdefer krw.writer.Unlock()\n\t// Closing all opened Writers Connections\n\tfor wp, wc := range krw.Writers {\n\t\twc.Close()\n\t\tdelete(krw.Writers, wp)\n\t}\n}",
"func (w *writer) Close() error {\n\tfor name, file := range w.files {\n\t\tif file != nil {\n\t\t\tfile.Close()\n\t\t\tdelete(w.files, name)\n\t\t}\n\t}\n\treturn nil\n}",
"func PoolCloseAll(pools []Pool) {\n\tfor _, p := range pools {\n\t\tp.Close()\n\t}\n}",
"func (d *Death) closeInMass(closable ...io.Closer) (err error) {\n\n\tcount := len(closable)\n\tsentToClose := make(map[int]closer)\n\t//call close async\n\tdoneClosers := make(chan closer, count)\n\tfor i, c := range closable {\n\t\tname, pkgPath := getPkgPath(c)\n\t\tcloser := closer{Index: i, C: c, Name: name, PKGPath: pkgPath}\n\t\tgo d.closeObjects(closer, doneClosers)\n\t\tsentToClose[i] = closer\n\t}\n\n\t// wait on channel for notifications.\n\ttimer := time.NewTimer(d.timeout)\n\tfailedClosers := []closer{}\n\tfor {\n\t\tselect {\n\t\tcase <-timer.C:\n\t\t\ts := \"failed to close: \"\n\t\t\tpkgs := []string{}\n\t\t\tfor _, c := range sentToClose {\n\t\t\t\tpkgs = append(pkgs, fmt.Sprintf(\"%s/%s\", c.PKGPath, c.Name))\n\t\t\t\td.log.Error(\"Failed to close: \", c.PKGPath, \"/\", c.Name)\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"%s\", fmt.Sprintf(\"%s %s\", s, strings.Join(pkgs, \", \")))\n\t\tcase closer := <-doneClosers:\n\t\t\tdelete(sentToClose, closer.Index)\n\t\t\tcount--\n\t\t\tif closer.Err != nil {\n\t\t\t\tfailedClosers = append(failedClosers, closer)\n\t\t\t}\n\n\t\t\td.log.Debug(count, \" object(s) left\")\n\t\t\tif count != 0 || len(sentToClose) != 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif len(failedClosers) != 0 {\n\t\t\t\terrString := generateErrString(failedClosers)\n\t\t\t\treturn fmt.Errorf(\"errors from closers: %s\", errString)\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\t}\n}",
"func closeResources(handler http.Handler, closers ...io.Closer) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tfor _, closer := range closers {\n\t\t\tdefer closer.Close()\n\t\t}\n\t\thandler.ServeHTTP(w, r)\n\t})\n}",
"func (p *tubePool) closeAll(head tube) {\n\tvar next tube\n\tfor head != nil {\n\t\tnext = head.Next()\n\t\thead.SetNext(nil)\n\t\thead.Close()\n\t\thead = next\n\t}\n}",
"func (sr *shardResult) Close() {\n\tfor _, series := range sr.blocks {\n\t\tseries.Blocks.Close()\n\t}\n}",
"func (r *Reader) Close() error {\n\tvar err error\n\tfor i, n := 0, r.NumR(); i < n; i++ {\n\t\tvar _err error\n\t\tterm := termReader(r.R(i))\n\t\tif term != nil {\n\t\t\t_err = term()\n\t\t}\n\t\tif err == nil && _err != nil {\n\t\t\terr = _err\n\t\t}\n\t}\n\treturn err\n}",
"func CloseAll() {\n\tfor _, ps := range poolMaps {\n\t\tfor _, c := range ps {\n\t\t\t_ = c.Close()\n\t\t}\n\t}\n}",
"func (iter *BatchObjectIter) Close() {\n\tclose(iter.oidCh)\n}",
"func (c *Context) Close() {\n\tfor _, storage := range c.storages {\n\t\tstorage.Disk.Close()\n\t\tc.setEOF()\n\t}\n}",
"func Close() {\n\tlog4go.Debug(\"resources destroy, pid:%v\", os.Getpid())\n\tfor name, r := range resources {\n\t\terr := r.Close()\n\t\tif err != nil {\n\t\t\tlog4go.Error(\"resources[%s] destroy failed:%s\", name, err.Error())\n\t\t} else {\n\t\t\tlog4go.Info(\"resources[%s] destroy finish\", name)\n\t\t}\n\t}\n}",
"func (c *Container) Close() {\n\tfor _, d := range c.dependencies {\n\t\tif dep, ok := d.(Dependency); ok {\n\t\t\tdep.Close()\n\t\t}\n\t}\n}",
"func (cl *CompositeLogger) Close() (err error) {\n\tcl.mu.Lock()\n\tdefer cl.mu.Unlock()\n\n\tfor i, logger := range cl.loggers {\n\t\tif i == 0 {\n\t\t\terr = logger.Close()\n\t\t} else {\n\t\t\tlogger.Close()\n\t\t}\n\t}\n\treturn\n}",
"func (r *RemoteSSH) Close() error {\n\tvar err error\n\tfor i := 0; i < r.n; i++ {\n\t\tclient := <-r.pool\n\t\terr = client.SendGoodbye()\n\t}\n\treturn err\n}",
"func (h *proxyHandler) close() {\n\tfor _, image := range h.images {\n\t\terr := image.src.Close()\n\t\tif err != nil {\n\t\t\t// This shouldn't be fatal\n\t\t\tlogrus.Warnf(\"Failed to close image %s: %v\", transports.ImageName(image.cachedimg.Reference()), err)\n\t\t}\n\t}\n}",
"func (p Pipe) Close() error {\n\tfor receiver := range p.receivers {\n\t\t// errors from one of the receivers shouldn't affect any others\n\t\treceiver.Close()\n\t}\n\treturn nil\n}",
"func (trans *Transcoder) Close() (err error) {\n\tfor _, stream := range trans.streams {\n\t\tif stream.aenc != nil {\n\t\t\tstream.aenc.Close()\n\t\t\tstream.aenc = nil\n\t\t}\n\t\tif stream.adec != nil {\n\t\t\tstream.adec.Close()\n\t\t\tstream.adec = nil\n\t\t}\n\t}\n\ttrans.streams = nil\n\treturn\n}",
"func (writer *FileLogWriter) Close() {\n\tfor l := writer.level; l <= _LEVEL_MAX; l++ {\n\t\twriter.files[l].close()\n\t\twriter.files[l] = nil\n\t}\n}",
"func (obj *Object) Close() error {\n\tif obj == nil {\n\t\treturn nil\n\t}\n\n\tif obj.closefuncs == nil {\n\t\treturn nil\n\t}\n\n\tvar mErr *multierror.Error\n\tfor _, f := range obj.closefuncs {\n\t\terr := f()\n\t\tif err != nil {\n\t\t\tmErr = multierror.Append(mErr, err)\n\t\t}\n\t}\n\treturn errors.WithStack(helpers.FlattenMultiError(mErr))\n}",
"func (l *ChannelList) Close() {\n\tchannels := make([]*Channel, 0, l.Count())\n\tfor _, c := range l.channels {\n\t\tc.Lock()\n\t\tfor _, c := range c.data {\n\t\t\tchannels = append(channels, c)\n\t\t}\n\t\tc.Unlock()\n\t}\n\t// close all channels\n\tfor _, c := range channels {\n\t\tif err := c.Close(); err != nil {\n\t\t\tlog.Error(\"c.Close() error(%v)\", err)\n\t\t}\n\t}\n}",
"func (p *Pool) Close() error {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\tclose(p.items)\n\tfor v := range p.items {\n\t\tif c, ok := v.(closer); ok {\n\t\t\tif err := c.Close(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (p *Pools) Close() {\n\tfor _, pool := range p.pools {\n\t\tpool.close()\n\t}\n\tp.Flush(true)\n}",
"func CloseAsyncPipeline() {\n\tfor _, pool := range asyncPipelinePools {\n\t\tfor _, pipeline := range pool.pipelines {\n\t\t\tpipeline.Close()\n\t\t}\n\t}\n\tasyncPipelinePools = nil\n}",
"func (self *Transcoder) Close() (err error) {\n\tfor _, stream := range self.streams {\n\t\tif stream.aenc != nil {\n\t\t\tstream.aenc.Close()\n\t\t\tstream.aenc = nil\n\t\t}\n\t\tif stream.adec != nil {\n\t\t\tstream.adec.Close()\n\t\t\tstream.adec = nil\n\t\t}\n\t}\n\tself.streams = nil\n\treturn\n}",
"func (transmuxer *Transmuxer) Close() {\n\tif transmuxer.closed {\n\t\treturn\n\t}\n\n\tfor _, streamer := range transmuxer.Streamers {\n\t\tstreamer.Close()\n\t}\n\n\ttransmuxer.FinalStream.Close()\n\n\ttransmuxer.closed = true\n\ttransmuxer.running = false\n}",
"func (iter *logIterator) close() {\n\tfor _, f := range iter.pending {\n\t\t_ = f.Close()\n\t}\n}",
"func (b *Balancer) Close() (err error) {\n\tfor _, b := range b.selector {\n\t\tif e := b.Close(); e != nil {\n\t\t\terr = e\n\t\t}\n\t}\n\treturn\n}",
"func (r *ShardReader) Close() {\n\tctx := vcontext.Background()\n\tfor f := range r.fieldReaders {\n\t\tfr := r.fieldReaders[f]\n\t\tif fr != nil {\n\t\t\tif fr.rio != nil { // fr.rio =nil on error\n\t\t\t\tfr.err.Set(fr.rio.Finish())\n\t\t\t}\n\t\t\tif fr.in != nil { // fr.in =nil on error\n\t\t\t\tr.err.Set(fr.in.Close(ctx))\n\t\t\t}\n\t\t}\n\t}\n}",
"func (x *Indexer) Close() error {\n\tdefer x.lock.Close()\n\tfor i := 0; i < x.config.NumShatters; i++ {\n\t\tx.shatter <- &shatterReq{shutdown: true}\n\t\t// no more shatters running, each waits\n\t\t// for all shards to complete before\n\t\t// returning => shards are no longer busy.\n\t}\n\tfor i := 0; i < x.config.NumShards; i++ {\n\t\tclose(x.shards[i].PostChan())\n\t}\n\tif err := x.config.Write(); err != nil {\n\t\treturn err\n\t}\n\tif err := x.writeFiles(); err != nil {\n\t\treturn err\n\t}\n\tif err := x.dmds.Close(); err != nil {\n\t\treturn err\n\t}\n\terrs := make(chan error, len(x.shards))\n\tfor i := range x.shards {\n\t\tb := &x.shards[i]\n\t\tgo func(b *shard.Indexer) {\n\t\t\terrs <- b.Close()\n\t\t}(b)\n\t}\n\tvar err error\n\tfor i := range x.shards {\n\t\tierr := <-errs\n\t\tif err != nil && ierr != nil {\n\t\t\tlog.Printf(\"dupy.Index.Close: dropping error %s from bucket %d\", ierr, i)\n\t\t} else if ierr != nil {\n\t\t\terr = ierr\n\t\t}\n\t}\n\treturn err\n}",
"func ReadCloserClose(rc *zip.ReadCloser,) error",
"func (e *HTTPExecuter) Close() {}",
"func (c *RemoteHTTP) Close() {\n\tc.HTTPClient = nil\n\n\tif c.SSHSession != nil {\n\t\tc.SSHSession.Close()\n\t\tc.SSHSession = nil\n\t}\n\n\tif c.SSHClient != nil {\n\t\tc.SSHClient.Close()\n\t\tc.SSHClient = nil\n\t}\n}",
"func (nopCloser) Close() error { return nil }",
"func (a *API) Close() {\n\ta.lock.Lock()\n\tdefer a.lock.Unlock()\n\n\tfor _, clients := range a.clients {\n\t\tfor _, client := range clients {\n\t\t\tclient.Close()\n\t\t}\n\t}\n\tfor k := range a.clients {\n\t\tdelete(a.clients, k)\n\t}\n}",
"func (s *RandomAggr) Close() error {\n\tvar err error\n\tfor _, v := range s.sources {\n\t\tif closer, ok := v.Reader.(io.Closer); ok {\n\t\t\titemErr := closer.Close()\n\t\t\tif err == nil {\n\t\t\t\terr = itemErr\n\t\t\t}\n\t\t}\n\t}\n\n\treturn err\n}",
"func (s IOStreams) Close() error {\n\t// TODO\n\treturn nil\n}",
"func (c *refCountedCloser) Close(ctx context.Context) error {\n\tremaining := c.refCount.Add(-1)\n\n\tif remaining != 0 {\n\t\treturn nil\n\t}\n\n\tif c.closed.Load() {\n\t\tpanic(\"already closed\")\n\t}\n\n\tc.closed.Store(true)\n\n\tvar errors []error\n\n\tfor _, closer := range c.closers {\n\t\terrors = append(errors, closer(ctx))\n\t}\n\n\t//nolint:wrapcheck\n\treturn multierr.Combine(errors...)\n}",
"func (a *AppTracer) Close() {\n\tfor _, cli := range a.ClientList {\n\t\tcli.Close()\n\t}\n}",
"func (b *Batch) Close() {\n}",
"func (c *Copier) Close() {\n\tc.once.Do(func() {\n\t\tif c.dst != nil {\n\t\t\tfor _, d := range c.dst {\n\t\t\t\tif err := d.Close(); err != nil {\n\t\t\t\t\tlogrus.Errorf(\"close log driver failure %s\", err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tclose(c.closed)\n\t})\n}",
"func (mu *MultipartUpload) Close() error {\n\terr := mu.multiWriter.Close()\n\tif err != nil {\n\t\treturn errors.Wrap(err, 1)\n\t}\n\n\terr = mu.bufferedWriter.Flush()\n\tif err != nil {\n\t\treturn errors.Wrap(err, 1)\n\t}\n\n\terr = mu.pipeWriter.Close()\n\tif err != nil {\n\t\treturn errors.Wrap(err, 1)\n\t}\n\n\treturn nil\n}",
"func (fr *FormatReaders) Close() (rtnerr error) {\n\tvar err error\n\tfor i := len(fr.readers) - 1; i >= 0; i-- {\n\t\terr = fr.readers[i].rdr.Close()\n\t\tif err != nil {\n\t\t\trtnerr = err // tracking last error\n\t\t}\n\t}\n\treturn rtnerr\n}",
"func (p *hardwareProfiler) Close() error {\n\tvar err error\n\tfor _, profiler := range p.profilers {\n\t\terr = multierr.Append(err, profiler.Close())\n\t}\n\treturn err\n}",
"func (s *ModelSubscriber) Close() error {\n\tfor _, rec := range s.receivers {\n\t\tif closer, _ := rec.(io.Closer); closer != nil {\n\t\t\tif err := closer.Close(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func Close() {\n\tatomic.StoreInt64(&writingStopFlag, 1)\n\twritingFilesLock.Lock()\n\tfor name, fh := range writingFiles {\n\t\tfh.Sync()\n\t\tfh.Close()\n\t\tlog.Debug(\"fh-close\", \"name\", name)\n\t}\n\twritingFilesLock.Unlock()\n}",
"func CloseAll() {\n\tserfClients.closeAllSerfs()\n}",
"func (c FinalOutput) Close() {}",
"func (x *Data) Close() {\n for _, c := range x.productChannels {\n close(c)\n }\n x.processorsRunning.Wait()\n close(x.ResultChannel)\n}",
"func (r *recorders) close() {\n\tr.Lock()\n\tdefer r.Unlock()\n\n\tfor _, re := range r.list {\n\t\tre.Close()\n\t}\n}",
"func (c *Closer) Close() error {\n\tc.CloseAll()\n\treturn nil\n}",
"func (it *BaseLibraryContentObjectCreatedIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (it *DogsOfRomeScoobyIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (b *bufCloser) Close() error { return nil }",
"func (b *Balancer) Close() error {\n\tvar cErr error\n\n\tfor _, backend := range b.pool {\n\t\tif err := backend.close(); err != nil {\n\t\t\tcErr = err\n\t\t}\n\t}\n\n\treturn cErr\n}",
"func (m compKeyMap) closeAll(blockIdx uint64, txIdx uint64, availableIdx uint64) error {\n\tfor _, d := range m {\n\t\terr := d.writer.CloseList()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Check if block height was reached\n\t\tif blockIdx < availableIdx || (d.searchBlockLimit <= blockIdx && d.searchTxLimit <= txIdx) {\n\t\t\terr = d.writer.AddField(\"blockStoreHeightSufficient\", true)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\terr = d.writer.AddField(\"blockStoreHeightSufficient\", false)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\terr = d.writer.CloseObject()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = d.writer.Close()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (r *relation) Close() {\n\tfor _, v := range r.mp {\n\t\tv.Close()\n\t}\n}",
"func (_m *MockMultiReaderIterator) Close() {\n\t_m.ctrl.Call(_m, \"Close\")\n}",
"func (c *Client) Close() {\n\tfor i, client := range clients {\n\t\tif client == c {\n\t\t\tclients = append(clients[:i], clients[i+1:]...)\n\t\t\tbreak\n\t\t}\n\t}\n\n\tclose(c.send)\n\tc.conn.Close()\n\tlog.Printf(\"close connection. addr: %s\", c.conn.RemoteAddr())\n}",
"func (m *agentEndpointsManager) close() {\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\tlogger.Info(\"closing all agent endpoints...\")\n\tfor _, endpoint := range m.endpoints {\n\t\tlogger.Infof(\"closing agent (id == %s) endpoint\", endpoint.id)\n\t\tendpoint.close()\n\t}\n}",
"func (conn *Connection) Close() {\n\tclose(conn.directChan)\n\tclose(conn.rpcChan)\n\tfor direct := range conn.directChan {\n\t\terr := direct.Close()\n\t\tif err != nil{\n\t\t\tlog.Errorln(err)\n\t\t}\n\t}\n\tfor client := range conn.rpcChan {\n\t\terr := client.Close()\n\t\tif err != nil{\n\t\t\tlog.Errorln(err)\n\t\t}\n\t}\n}",
"func (it *BaseContentContentObjectCreateIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (c *Client) Close() error {\n\tfor _, s := range c.subs {\n\t\ts.Unsubscribe()\n\t}\n\treturn nil\n}",
"func (c *ClosablePool) Close(timeout time.Duration) error {\n\tstarted := time.Now()\n\n\ttiers := []int{}\n\tfor i := range c.closables {\n\t\ttiers = append(tiers, i)\n\t}\n\tsort.Ints(tiers)\n\n\tfor _, i := range tiers {\n\t\ttier := c.closables[i]\n\t\tfor j := range tier {\n\t\t\ttier[j].CloseAsync()\n\t\t}\n\t\tfor j := range tier {\n\t\t\tif err := tier[j].WaitForClose(timeout - time.Since(started)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tdelete(c.closables, i)\n\t}\n\treturn nil\n}",
"func (e *RawExecutor) close() {\n\tif e != nil {\n\t\tfor _, m := range e.mappers {\n\t\t\tm.Close()\n\t\t}\n\t}\n}",
"func (pool Pool) Close() error {\n\tfor conn := range pool.connC {\n\t\tfor _, handler := range pool.connCloseHandler {\n\t\t\tif err := handler(conn); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif err := conn.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (c *Client) Close() error {\n\tc.mu.Lock()\n\tfactories := c.pool\n\tc.pool = nil\n\tc.mu.Unlock()\n\n\tif factories == nil {\n\t\treturn nil\n\t}\n\n\tvar (\n\t\terrInfo []string\n\t\terr error\n\t)\n\n\tfor _, c := range factories {\n\t\twrapperCli, ok := c.(*WrapperClient)\n\t\tif !ok {\n\t\t\terrInfo = append(errInfo, \"failed to convert Factory interface to *WrapperClient\")\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := wrapperCli.client.Close(); err != nil {\n\t\t\terrInfo = append(errInfo, err.Error())\n\t\t\tcontinue\n\t\t}\n\t}\n\n\tif len(errInfo) > 0 {\n\t\terr = fmt.Errorf(\"failed to close client pool: %s\", errInfo)\n\t}\n\treturn err\n}",
"func (f *factory) Close() error {\n\tif f.closed.CAS(false, true) {\n\t\tf.mutex.Lock()\n\t\tdefer f.mutex.Unlock()\n\n\t\tfor _, page := range f.pages {\n\t\t\tif err := page.Close(); err != nil {\n\t\t\t\tpageLogger.Error(\"close mapped page data err\",\n\t\t\t\t\tlogger.String(\"path\", f.path), logger.Error(err))\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func parallelMultiReader(readers ...io.Reader) io.ReadCloser {\n\tr, w := io.Pipe()\n\n\terrChs := make([]chan error, len(readers))\n\tfor j, reader := range readers {\n\t\terrChs[j] = make(chan error, 1)\n\t\tgo func(errCh chan<- error, r io.Reader) {\n\t\t\t_, err := io.Copy(w, r)\n\t\t\terrCh <- err\n\t\t}(errChs[j], reader)\n\t}\n\n\tgo func() {\n\t\t// After all readers have stopped, propagate EOF.\n\t\tfor j, errCh := range errChs {\n\t\t\tif err := <-errCh; err != nil {\n\t\t\t\tglog.Warningf(\"Error copying from reader %d: %s\", j, err)\n\t\t\t}\n\t\t}\n\t\tw.Close()\n\t}()\n\n\treturn r\n}",
"func (coll *Collection) Close() []error {\n\terrs := []error{}\n\tfor secName, prog := range coll.Programs {\n\t\tif errTmp := prog.Close(); errTmp != nil {\n\t\t\terrs = append(errs, errors.Wrapf(errTmp, \"couldn't close program %s\", secName))\n\t\t}\n\t}\n\tfor secName, m := range coll.Maps {\n\t\tif errTmp := m.Close(); errTmp != nil {\n\t\t\terrs = append(errs, errors.Wrapf(errTmp, \"couldn't close map %s\", secName))\n\t\t}\n\t}\n\treturn errs\n}",
"func CloseEnvs(envs []Env) {\n\tfor _, e := range envs {\n\t\te.Close()\n\t}\n}",
"func TryClose(maybeClosers ...interface{}) {\n\tfor _, maybeCloser := range maybeClosers {\n\t\tif closer, ok := maybeCloser.(io.Closer); ok {\n\t\t\t_ = closer.Close()\n\t\t}\n\t}\n}",
"func (nc *NoiseClient) Close() error {\n\tnc.mu.Lock()\n\tconns := nc.connPool\n\tnc.connPool = nil\n\tnc.mu.Unlock()\n\n\tvar errors []error\n\tfor _, c := range conns {\n\t\tif err := c.Close(); err != nil {\n\t\t\terrors = append(errors, err)\n\t\t}\n\t}\n\treturn multierr.New(errors...)\n}",
"func (it *SimpleMultiSigExecuteIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (p *pool) close() {\n\tif p.closed {\n\t\treturn\n\t}\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\tp.closed = true\n\tclose(p.readyChannel)\n\n\tfor connIndex := range p.connList {\n\t\tp.connList[connIndex].close()\n\t}\n\tp.connList = nil\n}",
"func (c *Channel) Close() {\n\tfor uuid := range c.clients.m {\n\t\tc.removeClient(uuid)\n\t}\n}",
"func (c *ChannelPool) Close() {\n\tc.mu.Lock()\n\tconns := c.conns\n\tc.conns = nil\n\tc.factory = nil\n\tc.mu.Unlock()\n\n\tif conns == nil {\n\t\treturn\n\t}\n\n\tclose(conns)\n\tfor conn := range conns {\n\t\tconn.Close()\n\t}\n}",
"func (it *DogsOfRomeRomulusIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}"
] | [
"0.7285341",
"0.69266",
"0.68220395",
"0.6799848",
"0.6782212",
"0.676067",
"0.67010254",
"0.6576231",
"0.6567459",
"0.65182304",
"0.6500774",
"0.64879596",
"0.645716",
"0.6314351",
"0.62897563",
"0.62881887",
"0.62770766",
"0.62508285",
"0.62197375",
"0.62140936",
"0.61650044",
"0.6159571",
"0.6154363",
"0.61302245",
"0.612434",
"0.61194974",
"0.6111266",
"0.61098665",
"0.6094182",
"0.60907876",
"0.608526",
"0.60816634",
"0.606246",
"0.6045381",
"0.60430133",
"0.60076827",
"0.6002082",
"0.59970796",
"0.59916246",
"0.59908706",
"0.5966395",
"0.59507716",
"0.59501046",
"0.5914453",
"0.59108484",
"0.59042484",
"0.59042156",
"0.5904178",
"0.5889518",
"0.58724564",
"0.5854216",
"0.5847633",
"0.5846979",
"0.58469033",
"0.5837563",
"0.58216405",
"0.5811925",
"0.57858",
"0.5785144",
"0.5767068",
"0.5764046",
"0.5740587",
"0.57404643",
"0.5733851",
"0.57287663",
"0.57279736",
"0.57244056",
"0.5707181",
"0.5701457",
"0.5701434",
"0.57011676",
"0.56997406",
"0.56844276",
"0.5668773",
"0.56683636",
"0.56656724",
"0.56648684",
"0.56645423",
"0.5660724",
"0.5653165",
"0.5652253",
"0.5642935",
"0.5640629",
"0.5634419",
"0.562989",
"0.56199354",
"0.5617954",
"0.5616917",
"0.56143755",
"0.5602238",
"0.55901134",
"0.55900913",
"0.55890715",
"0.5588813",
"0.55876184",
"0.55703855",
"0.5560555",
"0.555829",
"0.5555988",
"0.5555861"
] | 0.74036545 | 0 |
OpaqueAccessDenied returns a generic NotFound instead of AccessDenied so as to avoid leaking the existence of secret resources. | func OpaqueAccessDenied(err error) error {
if trace.IsAccessDenied(err) {
return trace.NotFound("not found")
}
return trace.Wrap(err)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (ctx *ShowSecretsContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}",
"func ErrAccessDenied(w http.ResponseWriter, r *http.Request) {\n\tAccessDeniedWithErr(w, r, errors.New(\"Forbidden\"))\n}",
"func (aee *ActiveEndpointsError) Forbidden() {}",
"func Forbidden(msg string) ErrorResponse {\n\tif msg == \"\" {\n\t\tmsg = \"You are not authorized to perform the requested action.\"\n\t}\n\treturn ErrorResponse{\n\t\tStatus: http.StatusForbidden,\n\t\tMessage: msg,\n\t}\n}",
"func Forbidden(err error) Response {\n\tmessage := \"not authorized\"\n\tif err != nil {\n\t\tmessage = err.Error()\n\t}\n\treturn &errorResponse{\n\t\tcode: http.StatusForbidden,\n\t\tmsg: message,\n\t}\n}",
"func AccessDeniedWithErr(w http.ResponseWriter, r *http.Request, err error) {\n\tdata := []byte(`{ \"error\": \"ERR_FORBIDDEN\" }`)\n\n\tsendError(w, r, err, http.StatusForbidden, data)\n}",
"func (ctx *AcceptOfferContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}",
"func (he *HTTPErrors) NotFound(ctx *Context) {\n\the.Emit(http.StatusNotFound, ctx)\n}",
"func (ctx *ShowVerificationContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}",
"func TestAccessDenied(t *testing.T) {\n\trunTest(t, func(s *Session) {\n\t\ts.Handle(\"model\", res.Access(func(r res.AccessRequest) {\n\t\t\tr.AccessDenied()\n\t\t}))\n\t}, func(s *Session) {\n\t\tinb := s.Request(\"access.test.model\", nil)\n\t\ts.GetMsg(t).\n\t\t\tAssertSubject(t, inb).\n\t\t\tAssertError(t, res.ErrAccessDenied)\n\t})\n}",
"func (ctx *ShowUserContext) NotFound(r error) error {\n\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 404, r)\n}",
"func (ctx *ShowProfileContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}",
"func NotFound(w http.ResponseWriter, r *http.Request) { Error(w, \"404 page not found\", http.StatusNotFound) }",
"func (ctx *ShowBottleContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}",
"func (ctx *ShowBottleContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}",
"func assertSecretNotFound(\n\tctx context.Context,\n\tf *framework.Framework,\n\tnamespacedName types.NamespacedName,\n) error {\n\tsecret := &corev1.Secret{}\n\terr := f.Client.Get(ctx, namespacedName, secret)\n\tif err == nil {\n\t\treturn fmt.Errorf(\"secret '%s' still found\", namespacedName)\n\t}\n\tif errors.IsNotFound(err) {\n\t\treturn nil\n\t}\n\treturn err\n}",
"func AccessDenied() ErrorBuilder {\n\treturn &defaultErrorBuilder{\n\t\terr: \"access_denied\",\n\t\terrorDescription: \"The authorization request was denied.\",\n\t}\n}",
"func (r Response) Forbidden(code string, payload Payload, header ...ResponseHeader) {\n\tr.Response(code, http.Forbidden, payload, header...)\n}",
"func NotFound(ctx context.Context, w http.ResponseWriter, r *http.Request) error {\n\thttp.NotFound(w, r)\n\treturn nil\n}",
"func NotFound(ctx context.Context, w http.ResponseWriter, r *http.Request) error {\n\thttp.NotFound(w, r)\n\treturn nil\n}",
"func Forbidden(format string, args ...interface{}) error {\n\treturn New(http.StatusForbidden, format, args...)\n}",
"func (c ApiWrapper) NotFound(msg string, objs ...interface{}) revel.Result {\n\treturn c.renderErrorString(404, fmt.Sprintf(msg, objs))\n}",
"func NotFound(w http.ResponseWriter, r *http.Request, h *render.Renderer) {\n\taccept := strings.Split(r.Header.Get(\"Accept\"), \",\")\n\taccept = append(accept, strings.Split(r.Header.Get(\"Content-Type\"), \",\")...)\n\n\tswitch {\n\tcase prefixInList(accept, ContentTypeHTML):\n\t\tm := TemplateMapFromContext(r.Context())\n\t\tm.Title(http.StatusText(http.StatusNotFound))\n\t\th.RenderHTMLStatus(w, http.StatusNotFound, \"404\", m)\n\tcase prefixInList(accept, ContentTypeJSON):\n\t\th.RenderJSON(w, http.StatusNotFound, http.StatusText(http.StatusNotFound))\n\tdefault:\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t}\n}",
"func (ace *ActiveContainerError) Forbidden() {}",
"func TestAccessDeniedHandler(t *testing.T) {\n\trunTest(t, func(s *Session) {\n\t\ts.Handle(\"model\", res.Access(res.AccessDenied))\n\t}, func(s *Session) {\n\t\tinb := s.Request(\"access.test.model\", nil)\n\t\ts.GetMsg(t).\n\t\t\tAssertSubject(t, inb).\n\t\t\tAssertError(t, res.ErrAccessDenied)\n\t})\n}",
"func (r *Router) Forbidden(ctx *Context) {\n\tctx.Forbidden()\n}",
"func (c *Action) NotFound(message string) error {\n\treturn c.Abort(404, message)\n}",
"func (ctx *GetFilterContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}",
"func (ctx *Context) NotFound(err error, message string) *HTTPError {\n\treturn notFoundError(err, message)\n}",
"func (ctx *GetByIDHostContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}",
"func (ctx *ShowCommentContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}",
"func (ctx *ListOfferContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}",
"func NotFound(fn http.HandlerFunc) {\n\tinfoMutex.Lock()\n\tvestigo.CustomNotFoundHandlerFunc(fn)\n\tinfoMutex.Unlock()\n}",
"func (ctx *ListFeedContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}",
"func NotFound(c *routing.Context, msg string, service string) error {\n\tResponse(c, `{\"error\": true, \"msg\": \"`+msg+`\"}`, 404, service, \"application/json\")\n\treturn nil\n}",
"func (ctx *GetFeedContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}",
"func (c ApiWrapper) Forbidden(msg string, objs ...interface{}) revel.Result {\n\treturn c.renderErrorString(403, fmt.Sprintf(msg, objs))\n}",
"func (h *Handler) NotFound(w http.ResponseWriter, r *http.Request) {\n\twriteResponse(r, w, http.StatusNotFound, &SimpleResponse{\n\t\tTraceID: tracing.FromContext(r.Context()),\n\t\tMessage: \"not found\",\n\t})\n}",
"func (ctx *ShowWorkflowContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}",
"func (b *Baa) NotFound(c *Context) {\n\tif b.notFoundHandler != nil {\n\t\tb.notFoundHandler(c)\n\t\treturn\n\t}\n\thttp.NotFound(c.Resp, c.Req)\n}",
"func NotFound(w http.ResponseWriter, r *http.Request) {\n\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\treturn\n}",
"func Forbidden(w http.ResponseWriter, err error) {\n\t(Response{Error: err.Error()}).json(w, http.StatusForbidden)\n}",
"func (ctx *GetAllHostContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}",
"func (c *Context) NotFound() {\n\tc.Handle(http.StatusNotFound, \"\", nil)\n}",
"func NotFound(message ...interface{}) Err {\n\treturn Boomify(http.StatusNotFound, message...)\n}",
"func (r Response) NotFound(code string, payload Payload, header ...ResponseHeader) {\n\tr.Response(code, http.NotFound, payload, header...)\n}",
"func (ctx *ListUserContext) NotFound(r error) error {\n\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 404, r)\n}",
"func NotFound(msg string) ErrorResponse {\n\tif msg == \"\" {\n\t\tmsg = \"The requested resource was not found.\"\n\t}\n\treturn ErrorResponse{\n\t\tStatus: http.StatusNotFound,\n\t\tMessage: msg,\n\t}\n}",
"func NotFoundRoute(res http.ResponseWriter, req *http.Request) {\n res.Write([]byte(\"Oopsie woopsie this doesn't exist.\"))\n}",
"func (ctx *GetDogsByHostIDHostContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}",
"func NotFound(rw http.ResponseWriter) {\n\tHttpError(rw, \"not found\", 404)\n}",
"func NotFoundHandler() Handler { return HandlerFunc(NotFound) }",
"func NotFoundHandler() Handler { return HandlerFunc(NotFound) }",
"func (ctx *DeleteFeedContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}",
"func NotFound(w http.ResponseWriter, message ...interface{}) {\n\tboom(w, 404, message...)\n}",
"func (ctx *DeleteDogContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}",
"func (ctx *UpdateFilterContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}",
"func NotFound(context *gin.Context) {\n\tcontext.JSON(404, gin.H{\n\t\t\"error\": \"404 not found\",\n\t\t\"url\": context.Request.URL,\n\t})\n}",
"func (ctx *UpdateFeedContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}",
"func NotFound(w http.ResponseWriter, err error) {\n\tError(w, http.StatusNotFound, err)\n}",
"func NotFound(w http.ResponseWriter, r *http.Request) {\n\thandlerMu.RLock()\n\tf, ok := handlerMap[http.StatusNotFound]\n\thandlerMu.RUnlock()\n\tif ok {\n\t\tf.ServeHTTP(w, r)\n\t} else {\n\t\tdefaultNotFound(w, r)\n\t}\n}",
"func (ctx *DeleteFilterContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}",
"func Forbidden(message ...interface{}) Err {\n\treturn Boomify(http.StatusForbidden, message...)\n}",
"func (c *Context) NotFound() {\n\tc.JSON(404, ResponseWriter(404, \"page not found\", nil))\n}",
"func (this *Context) NotFound(message string) {\n\tthis.ResponseWriter.WriteHeader(404)\n\tthis.ResponseWriter.Write([]byte(message))\n}",
"func NotFound(c *gin.Context) {\n\tresponse := types.APIErrResponse{Msg: \"Something went wrong\", Success: false, Err: \"Not found\"}\n\tc.JSON(http.StatusNotFound, response)\n}",
"func Forbidden(p protocol.Instance) echo.Checker {\n\tswitch {\n\tcase p.IsGRPC():\n\t\treturn ErrorContains(\"rpc error: code = PermissionDenied\")\n\tcase p.IsTCP():\n\t\treturn ErrorContains(\"EOF\")\n\tdefault:\n\t\treturn NoErrorAndStatus(http.StatusForbidden)\n\t}\n}",
"func NotFound(w http.ResponseWriter, _ error) {\n\t(Response{Error: \"resource not found\"}).json(w, http.StatusNotFound)\n}",
"func Forbidden(msg string) error {\n\tif msg == \"\" {\n\t\tmsg = \"no está autorizado a realizar la acción solicitada.\"\n\t}\n\treturn echo.NewHTTPError(http.StatusForbidden, msg)\n}",
"func NotFound(w http.ResponseWriter, r *http.Request) {\n\tresponse := response.CreateResponse()\n\tresponse.SendDataWithStatusCode(w, \"not found\", http.StatusOK)\n}",
"func (ctx *AddLinkWorkflowContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}",
"func (response BasicJSONResponse) NotFound(writer http.ResponseWriter) {\n\tNotFound(writer, response)\n}",
"func (ctx *DeleteHostContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}",
"func NotFound(w http.ResponseWriter) {\n\thttp.Error(w, \"404 not found!!!\", http.StatusNotFound)\n}",
"func NotFound(w http.ResponseWriter) {\n\trenderError(w, http.StatusNotFound, nil)\n}",
"func (ctx *DeleteOutputContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}",
"func ForbiddenErr(err error, format string, args ...interface{}) error {\n\treturn NewError(http.StatusForbidden, err, format, args...)\n}",
"func NotFound(ctx context.Context, w http.ResponseWriter, message string) {\n\tfhirError(ctx, w, http.StatusNotFound, fhir.IssueSeverityWarning, fhir.IssueTypeNotFound, message)\n}",
"func NotFound(err error) error {\n\treturn APIError{\n\t\tcode: http.StatusNotFound,\n\t\tMessage: err.Error(),\n\t}\n}",
"func (c DBaseController) ForbiddenResponse() revel.Result {\n\tc.Response.Status = http.StatusForbidden\n\n\treturn c.RenderJSON(serializers.ForbiddenResponse())\n}",
"func (ctx *DeleteLinkWorkflowContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}",
"func (h *HandleHelper) NotFound() {\n\terrResponse(http.StatusNotFound,\n\t\t\"the requested resource could not be found\",\n\t)(h.w, h.r)\n}",
"func (e DiscoveryError) IsAccessDenied() bool {\n\treturn strings.Contains(e.Error(), AccessDenied)\n}",
"func accessForbiddenResp() response.Response {\n\t//nolint:stylecheck // Grandfathered capitalization of error.\n\treturn ErrResp(http.StatusForbidden, errors.New(\"Permission denied\"), \"\")\n}",
"func notFound(resource string) middleware.Responder {\n\tmessage := fmt.Sprintf(\"404 %s not found\", resource)\n\treturn operations.NewGetChartDefault(http.StatusNotFound).WithPayload(\n\t\t&models.Error{Code: helpers.Int64ToPtr(http.StatusNotFound), Message: &message},\n\t)\n}",
"func (uee *UnknownEndpointError) NotFound() {}",
"func (ctx *UpdateUserContext) NotFound(r error) error {\n\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 404, r)\n}",
"func (r *Responder) NotFound() { r.write(http.StatusNotFound) }",
"func (r *Responder) Forbidden() { r.write(http.StatusForbidden) }",
"func (ctx *StopFeedContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}",
"func NotFoundHandler(*Context) error {\n\treturn NewHTTPError(StatusNotFound)\n}",
"func ERROR_AUTH_USER_NOT_FOUND(w http.ResponseWriter, pl string) {\n\tbuildForeignError(w, http.StatusForbidden, \"ERROR_AUTH_USER_NOT_FOUND\", pl)\n}",
"func (r *Route) NotFound(handler http.Handler) *Route {\n\tr.handlers[notFound] = handler\n\treturn r\n}",
"func (ctx *CreateUserContext) NotFound(r error) error {\n\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 404, r)\n}",
"func (ctx *DeleteUserContext) NotFound(r error) error {\n\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 404, r)\n}",
"func WrapWithPermissionDenied(cause error, parameters ...wparams.ParamStorer) Error {\n\treturn newGenericError(cause, DefaultPermissionDenied, wparams.NewParamStorer(parameters...))\n}",
"func (req *Request) NotFound(body string) {\n\treq.Reply(http.StatusNotFound, body)\n}",
"func NewSecretInspectNotFound() *SecretInspectNotFound {\n\treturn &SecretInspectNotFound{}\n}",
"func (ctx *MoveLinkWorkflowContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}",
"func (ctx *ListMessageContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}"
] | [
"0.6432331",
"0.60147",
"0.588723",
"0.5819935",
"0.5743779",
"0.5713874",
"0.5659095",
"0.5598807",
"0.5597008",
"0.5592161",
"0.5579683",
"0.5568134",
"0.5550085",
"0.55464023",
"0.55464023",
"0.5545322",
"0.5542302",
"0.55259365",
"0.5513903",
"0.5513903",
"0.55054015",
"0.5493526",
"0.54794586",
"0.5474977",
"0.5471457",
"0.54650295",
"0.54612654",
"0.5435942",
"0.54350364",
"0.543279",
"0.5426074",
"0.54157656",
"0.5407503",
"0.5405352",
"0.5403094",
"0.53887254",
"0.5380413",
"0.53766626",
"0.53710234",
"0.536787",
"0.53573143",
"0.5352898",
"0.53499216",
"0.53345394",
"0.5334449",
"0.5333373",
"0.53188235",
"0.53029233",
"0.5300304",
"0.52866703",
"0.52790534",
"0.52778614",
"0.52778614",
"0.5276735",
"0.52758485",
"0.52533525",
"0.52451426",
"0.5241564",
"0.52391595",
"0.5233154",
"0.5230381",
"0.52181226",
"0.5215855",
"0.52140534",
"0.52045393",
"0.51976794",
"0.5192057",
"0.5189199",
"0.518597",
"0.5185721",
"0.5183919",
"0.5175863",
"0.5175223",
"0.51446986",
"0.5144459",
"0.5131449",
"0.51299775",
"0.5127557",
"0.5127268",
"0.5119076",
"0.51163",
"0.51082784",
"0.51081765",
"0.5104693",
"0.50972664",
"0.5092853",
"0.5079889",
"0.5078243",
"0.5076729",
"0.5067516",
"0.5065334",
"0.5062758",
"0.5055248",
"0.5048371",
"0.50388956",
"0.50384295",
"0.5034623",
"0.50319886",
"0.5028053",
"0.5024341"
] | 0.7602695 | 0 |
Pop returns a value from the list, it panics if the value is not there | func (p *PortList) Pop() string {
p.Lock()
defer p.Unlock()
if len(p.ports) == 0 {
panic("list is empty")
}
val := p.ports[len(p.ports)-1]
p.ports = p.ports[:len(p.ports)-1]
return val
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (l *List) Pop() (v Value, err error) {\n\tif l.tail == nil {\n\t\terr = errEmpty\n\t} else {\n\t\tv = l.tail.Value\n\t\tl.tail = l.tail.prev\n\t\tif l.tail == nil {\n\t\t\tl.head = nil\n\t\t}\n\t}\n\treturn v, err\n}",
"func (l *pqList) Pop() interface{} {\n\treturn l.Remove(len(l.Slice) - 1)\n}",
"func (o *openList) Pop() interface{} {\n\topn := *o\n\tit := opn[len(opn)-1]\n\tit.pqindex = -1\n\t*o = opn[:len(opn)-1]\n\treturn it\n}",
"func (self *Queue)Pop()interface{}{\r\n\tdefer self.popkey.Release()\r\n\tself.popkey.Get()\t\r\n\te:=self.list.Front()\r\n\tif e!=nil {\r\n\t\tself.list.Remove(e)\r\n\t\treturn e.Value\r\n\t}else{\r\n\t\treturn e\r\n\t}\r\n}",
"func (l *SList) Pop() (V, bool) {\n\tif l.n == 0 {\n\t\treturn nil, false\n\t}\n\tv := l.h.v\n\tl.h = l.h.n\n\tl.n--\n\tif l.n == 0 {\n\t\tl.t = nil\n\t}\n\treturn v, true\n}",
"func (q *MyQueue) Pop() int {\n\tq.lock.Lock()\n\tx := q.list[0] // 对空数组取值时自动panic\n\tq.list = q.list[1:]\n\tq.lock.Unlock()\n\treturn x\n}",
"func (l *SliceList[T]) Pop() (v T, ok bool) {\n\tif len(*l) > 0 {\n\t\tv, *l = (*l)[len(*l)-1], (*l)[:len(*l)-1]\n\t\treturn\n\t}\n\n\treturn\n}",
"func (stack *Stack) Pop() interface{} {\n\te := stack.list.Back()\n\tif e != nil {\n\t\tstack.list.Remove(e)\n\t\treturn e.Value\n\t}\n\treturn nil\n}",
"func (list *List) Pop(idx ...int) (interface{}, error) {\n\tindex := list.getLastIndex()\n\tll := len(idx)\n\n\tif ll > 1 {\n\t\treturn nil, fmt.Errorf(\"only 1 or 0 arguments are allowed\")\n\t}\n\n\t// in case of `list.Pop()`\n\telement := list.getByIndex(index)\n\tif ll == 0 {\n\t\treturn element, list.removeByIndex(index)\n\t}\n\n\tif idx[0] > index {\n\t\treturn nil, fmt.Errorf(\"index out of range\")\n\t}\n\n\tindex = idx[0]\n\treturn element, list.removeByIndex(index)\n}",
"func (s *items) pop() (out Item) {\n\tindex := len(*s) - 1\n\tout = (*s)[index]\n\t(*s)[index] = nil\n\t*s = (*s)[:index]\n\treturn\n}",
"func (pq *PriorityQueue) Pop() interface{} {\n\treturn Pop(pq.list)\n}",
"func (s *orderedItems) Pop() interface{} {\n\told := *s\n\tn := len(old)\n\tx := old[n-1]\n\t*s = old[0 : n-1]\n\treturn x\n}",
"func (t *topK) Pop() interface{} {\n\tn := len(t.values)\n\tx := t.values[n-1]\n\tt.values = t.values[:n-1]\n\treturn x\n}",
"func (s *Stack) Pop() (val interface{}) {\n\tif s.isEmpty() {\n\t\treturn\n\t}\n\treturn s.list.RemoveHead()\n}",
"func (list *List) Pop() string {\n\tsize := list.Len() - 1\n\n\tif size == -1 {\n\t\tpanic(\"Trying to pop from an empty List\")\n\t}\n\n\tstr := list.data[size]\n\tlist.data = list.data[:size]\n\n\treturn str\n}",
"func (h *Heap) Pop() interface{} {\n\tif h.size == 0 {\n\t\treturn nil\n\t}\n\tres := h.values[1]\n\th.values[1] = h.values[h.size]\n\th.values = h.values[:h.size]\n\th.size--\n\n\th.bubbleDown()\n\n\treturn res\n}",
"func (h *PerformanceHeap) Pop() interface{} {\n\told := h.items\n\tn := len(old)\n\tx := old[n-1]\n\th.items = old[0 : n-1]\n\treturn x\n}",
"func (l *list) Pop() {\n\tl.elements = l.elements[:len(l.elements)-1]\n}",
"func (stack *ArrayStack) Pop() (value interface{}, ok bool) {\n\tvalue, ok = stack.list.Get(stack.list.Len() - 1)\n\tstack.list.Remove(stack.list.Len() - 1)\n\treturn\n}",
"func (q *queryPQ) Pop() any {\n\titem := (*q)[len(*q)-1]\n\t*q = (*q)[:len(*q)-1]\n\treturn item\n}",
"func (pq *priorityQueue) Pop() interface{} {\n\told := *pq\n\tn := len(old)\n\titem := old[n-1]\n\told[n-1] = nil\n\t*pq = old[0 : n-1]\n\treturn item\n}",
"func (l *SinglyLinkedList) Pop() interface{} {\n\treturn l.Remove(l.Last())\n}",
"func (stepList *StepList) Pop() steps.Step {\n\tif stepList.IsEmpty() {\n\t\treturn nil\n\t}\n\tresult := stepList.List[0]\n\tstepList.List = stepList.List[1:]\n\treturn result\n}",
"func (s *Stack) Pop() (value interface{}) {\n\n if s.size > 0 {\n\n value, s.top = s.top.value, s.top.next\n\n s.size--\n\n return\n\n }\n\n return nil\n\n}",
"func (pq *PriorityQueue) Pop() interface{} {\n\told := *pq\n\tn := len(old)\n\titem := old[n-1]\n\told[n-1] = nil\n\t*pq = old[0 : n-1]\n\treturn item\n}",
"func (a *ArrayObject) pop() Object {\n\tif len(a.Elements) < 1 {\n\t\treturn NULL\n\t}\n\n\tvalue := a.Elements[len(a.Elements)-1]\n\ta.Elements = a.Elements[:len(a.Elements)-1]\n\treturn value\n}",
"func (pq *priorityQueue) Pop() interface{} {\n\told := *pq\n\tn := len(old)\n\titem := old[n-1]\n\titem.index = -1 // for safety\n\t*pq = old[0 : n-1]\n\treturn item\n}",
"func (pq *MaxPQ) Pop() interface{} {\n\told := *pq\n\tn := len(old)\n\titem := old[n-1]\n\t*pq = old[0 : n-1]\n\treturn item\n}",
"func (s *Stack) Pop() (value interface{}, exists bool) {\n\texists = false\n\tif s.size > 0 {\n\t\tvalue, s.top = s.top.value, s.top.next\n\t\ts.size--\n\t\texists = true\n\t}\n\n\treturn\n}",
"func (vector *Vector) Pop() {\n\tvar element interface{}\n\telement, *vector = (*vector)[len(*vector)-1], (*vector)[:len(*vector)-1]\n\t// Note: dropping element here.\n\t_ = element\n}",
"func (p Pool) Pop() interface{} {\n\tel := p[p.Len()-1]\n\tp = p[:p.Len()-2]\n\treturn el\n}",
"func (s *Stack) Pop() (value interface{}) {\r\n\tif s.size > 0 {\r\n\t\tvalue, s.top = s.top.value, s.top.next\r\n\t\ts.size--\r\n\t\treturn value\r\n\t}\r\n\treturn nil\r\n}",
"func (q *Stack) Pop() interface{} {\n\treturn q.Items.Pop().Value\n}",
"func (h *data) Pop() interface{} {\n\tkey := h.queue[len(h.queue)-1]\n\th.queue = h.queue[0 : len(h.queue)-1]\n\titem, ok := h.items[key]\n\tif !ok {\n\t\t// This is an error\n\t\treturn nil\n\t}\n\tdelete(h.items, key)\n\treturn item.obj\n}",
"func (c *Clac) Pop() (value.Value, error) {\n\tx, err := c.remove(0, 1)\n\tif err != nil {\n\t\treturn zero, err\n\t}\n\treturn x[0], err\n}",
"func (pq *MinPQueue) Pop() interface{} {\n\told := *pq\n\tn := len(old)\n\titem := old[n-1]\n\titem.index = -1 // for safety\n\t*pq = old[0 : n-1]\n\treturn item\n}",
"func (pq *PriorityQueue) Pop() interface{} {\n\told := *pq\n\tn := len(old)\n\titem := old[n-1]\n\t*pq = old[0 : n-1]\n\treturn item\n}",
"func (pq *priorityQueue) Pop() interface{} {\n\told := *pq\n\tn := len(old)\n\titem := old[n-1]\n\told[n-1] = nil // avoid memory leak\n\titem.index = -1 // for safety\n\t*pq = old[0 : n-1]\n\treturn item\n}",
"func (pq *priorityQueue) Pop() interface{} {\n\told := *pq\n\tn := len(old)\n\titem := old[n-1]\n\told[n-1] = nil // avoid memory leak\n\titem.index = -1 // for safety\n\t*pq = old[0 : n-1]\n\treturn item\n}",
"func (s *RRset) Pop() RR {\n\tif len(*s) == 0 {\n\t\treturn nil\n\t}\n\t// Pop and remove the entry\n\tr := (*s)[len(*s)-1]\n\t*s = (*s)[:len(*s)-1]\n\treturn r\n}",
"func (pq *PriorityQueue) Pop() interface{} {\n\told := *pq\n\tn := len(old)\n\tvar item *Item\n\tif len(old) == 0 {\n\t\treturn nil\n\t}\n\titem = old[n-1]\n\titem.index = -1 // for safety\n\t*pq = old[0 : n-1]\n\n\treturn item\n}",
"func (pq *MinPQ) Pop() interface{} {\n\told := *pq\n\tn := len(old)\n\titem := old[n-1]\n\t*pq = old[0 : n-1]\n\treturn item\n}",
"func (s *SliceOfUint) Pop() uint {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}",
"func (s *Queue) Pop() interface{} {\n\tif s.IsEmpty() {\n\t\tpanic(\"Pop on empty queue\")\n\t} else {\n\t\tcurrent_head := s.head\n\t\tval := current_head.val\n\t\ts.head = current_head.previous\n\t\tcurrent_head.val = nil\n\t\treturn val\n\t}\n}",
"func (h *Queue) Pop() interface{} {\n\told := h.slice\n\tn := len(old)\n\titem := old[n-1]\n\th.slice = old[0 : n-1]\n\treturn item\n}",
"func Pop[T any](h Interface[T]) T {\n\tn := h.Len() - 1\n\th.Swap(0, n)\n\tdown(h, 0, n)\n\treturn h.Pop()\n}",
"func (s *Slot) pop() Item {\n\titem := s.item\n\ts.item = Empty\n\treturn item\n}",
"func (t *Map) Pop() interface{} {\n\tif t.NotEmpty() {\n\t\tkey := t.keys.Remove(t.keys.Back())\n\t\tval, ok := t.entries[key]\n\t\tdelete(t.entries, key)\n\t\tif ok {\n\t\t\treturn val.val\n\t\t}\n\t}\n\treturn nil\n}",
"func (h *Heap) Pop() interface{} {\n\told := h.slice\n\tn := len(old)\n\titem := old[n-1]\n\told[n-1] = nil // avoid memory leak\n\titem.index = -1 // for safety\n\th.slice = old[0 : n-1]\n\treturn item\n}",
"func (arr *ArrayList) Pop() ItemType {\n if arr.length > 0 {\n // shrink by half if only a third is used - dampening resize operations\n if arr.length < arr.capacity / 3 {\n arr.resize(arr.capacity / 2)\n }\n arr.length--\n return arr.data[arr.length]\n }\n panic(\"out of bounds\")\n}",
"func (s *Storage) RPop() *list.Element {\r\n\tele := s.Back()\r\n\tif ele != nil {\r\n\t\ts.Remove(ele)\r\n\t}\r\n\treturn ele\r\n}",
"func (shelf *Shelf) Pop() interface{} {\n\tx := shelf.queue[shelf.Len()-1]\n\tshelf.queue = shelf.queue[:shelf.Len()-1]\n\treturn x\n}",
"func (s *Stack) Pop() (interface{}, error) {\n\n\tif s.IsEmpty() {\n\t\treturn nil, fmt.Errorf(\"stack is empty\")\n\t}\n\treturn s.list.RemoveLast()\n}",
"func (s *Stack) Pop() (value interface{}) {\n\tif s.size > 0 {\n\t\tvalue, s.top = s.top.value, s.top.next\n\t\ts.size--\n\t\treturn\n\t}\n\treturn nil\n}",
"func (s *Stack) Pop() (value interface{}) {\n\tif s.size > 0 {\n\t\tvalue, s.top = s.top.value, s.top.next\n\t\ts.size--\n\t\treturn\n\t}\n\treturn nil\n}",
"func (sll *SingleLinkedList) Pop(index int) interface{} {\n\t// Panic if index is smaller 0\n\tif index < 0 {\n\t\tpanic(\"index < 0\")\n\t}\n\n\t// Pop first element\n\tif index == 0 {\n\t\t// Result\n\t\tv := sll.first.value\n\t\t// Remove first element\n\t\tsll.first = sll.first.next\n\t\t// Decrease length\n\t\tsll.length--\n\t\treturn v\n\t}\n\n\t// Get node before the one to pop\n\tn := sll.getNode(index - 1)\n\t// Result\n\tv := n.next.value\n\t// Remove reference to remove element\n\tn.next = n.next.next\n\t// Decrease length\n\tsll.length--\n\treturn v\n}",
"func (s *Stack) Pop() (value interface{}) {\n\tif s.size > 0 {\n\t\ts.top, value = s.top.next, s.top.value\n\t\ts.elements = s.elements[:s.size-1]\n\t\ts.size--\n\t\treturn\n\t}\n\treturn nil\n}",
"func (p *path) Pop() interface{} {\n\told := *p\n\tn := len(old)\n\tx := old[n-1]\n\t*p = old[0 : n-1]\n\treturn x\n}",
"func (s *Storage) ListPop(key string) (string, error) {\n\tshard := s.getShard(key)\n\n\tshard.mutex.Lock()\n\tdefer shard.mutex.Unlock()\n\n\tif item, ok := shard.keyValues[key]; ok {\n\t\tif isExpired(item.expiration) {\n\t\t\treturn \"\", nil\n\t\t}\n\n\t\tif list, ok := item.value.([]string); ok {\n\t\t\tif len(list) == 0 {\n\t\t\t\treturn \"\", newErrCustom(errEmptyList)\n\t\t\t}\n\n\t\t\tlastElem := list[len(list)-1]\n\t\t\titem.value = list[:len(list)-1]\n\t\t\tshard.keyValues[key] = item\n\t\t\treturn lastElem, nil\n\t\t}\n\t\treturn \"\", newErrCustom(errWrongType)\n\t}\n\treturn \"\", nil\n}",
"func (this *LinkedList) Pop() interface{} {\n\tif this.head == nil {\n\t\treturn nil\n\t}\n\treturn this.RemoveAt(0)\n}",
"func (stack *Stack) Pop() (value interface{}) {\n\tif stack.size > 0 {\n\t\tvalue, stack.top = stack.top.value, stack.top.next\n\t\tstack.size--\n\t\treturn\n\t}\n\treturn nil\n}",
"func (h *PerformanceHeap) SafePop() *performanceHeapItem {\n\tif h.Len() == 0 {\n\t\treturn nil\n\t}\n\n\ti := heap.Pop(h)\n\titem := i.(*performanceHeapItem)\n\treturn item\n}",
"func (stack *Stack) Pop() interface{} {\n\tnode := stack.list.First()\n\tif node == nil {\n\t\treturn nil\n\t}\n\n\tif !stack.list.Remove(node) {\n\t\treturn nil\n\t}\n\n\treturn node.Value()\n}",
"func Pop(h *PriorityQueue) *Item {\n\tn := h.Len() - 1\n\th.Swap(0, n)\n\tdown(h, 0, n)\n\treturn h.Pop()\n}",
"func (sv *sorterValues) Pop() interface{} {\n\tidx := len(sv.rows) - 1\n\t// Returning a pointer to avoid an allocation when storing the slice in an\n\t// interface{}.\n\tx := &(sv.rows)[idx]\n\tsv.rows = sv.rows[:idx]\n\treturn x\n}",
"func (l *List) PopBack() (interface{}, error) {\n\tif l.Len == 0 {\n\t\treturn nil, ErrEmptyList\n\t}\n\tv := l.tail.Val\n\tl.tail = l.tail.prev\n\tl.Len--\n\tif l.Len == 0 {\n\t\tl.head = nil\n\t} else {\n\t\tl.tail.next = nil\n\t}\n\treturn v, nil\n}",
"func (s *SliceOfFloat32) Pop() float32 {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}",
"func (s *stack) pop() int {\n\tl := len(s.items)\n\tremovedItem := s.items[l-1]\n\ts.items = s.items[:l-1]\n\treturn removedItem\n}",
"func (h *tsHeap) Pop() interface{} {\n\tit := (*h)[len(*h)-1]\n\t// Poison the removed element, for safety.\n\tit.index = -1\n\t*h = (*h)[0 : len(*h)-1]\n\treturn it\n}",
"func (l *List) PopBack() (interface{}, error) {\n\tif l.last == nil {\n\t\treturn nil, ErrEmptyList\n\t}\n\tn := l.last\n\tif l.first == n {\n\t\tl.first = nil\n\t}\n\tl.last = n.prev\n\tif l.last != nil {\n\t\tl.last.next = nil\n\t}\n\treturn n.Val, nil\n}",
"func (v *Data) Pop() PicData {\n\treturn v.Remove(len(*v) - 1)\n}",
"func (s *stack) pop() {\n\ts.items = s.items[:len(s.items)-1]\n}",
"func (s *Stack) Pop() interface{} {\n\tv := s.v[len(s.v)]\n\ts.v = s.v[:len(s.v)-1]\n\treturn v\n}",
"func (heap *maxheap) Pop() interface{} {\n\told := *heap\n\tn := len(old)\n\n\titem := old[n-1]\n\told[n-1] = nil\n\titem.index = -1\n\n\t*heap = old[0 : n-1]\n\n\treturn item\n}",
"func (s *SliceOfInt8) Pop() int8 {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}",
"func (l *List) PopBack() (interface{}, error) {\n\tif l.tail == nil {\n\t\treturn nil, ErrEmptyList\n\t}\n\n\tval := l.tail.Val\n\tl.tail = l.tail.prev\n\n\tif l.tail == nil {\n\t\tl.head = nil\n\t} else {\n\t\tl.tail.next = nil\n\t}\n\n\treturn val, nil\n}",
"func (p *PriorityQueue) Pop() (interface{}, error) {\n\tif len(*p.itemHeap) == 0 {\n\t\treturn nil, errors.New(\"empty queue\")\n\t}\n\n\titem := heap.Pop(p.itemHeap).(*item)\n\tdelete(p.lookup, item.value)\n\treturn item.value, nil\n}",
"func (m *OrderedMap[K,V]) PopBack() (k K, v V, ok bool) {\n\te := m.list.Back()\n\tif e == nil {\n\t\treturn\n\t}\n\tk, v, ok = e.Value.Key, e.Value.Value, true\n\tdelete(m.mp, k)\n\tm.list.Remove(e)\n\treturn\n}",
"func (q *queue) pop() Item {\n\ti := q.head\n\tq.head = (q.head + 1) % len(q.items)\n\tq.count--\n\treturn q.items[i]\n}",
"func (nl *nodeList) pop() *Node {\n\tsize := len(nl.elements)\n\tif size == 0 {\n\t\treturn nil\n\t}\n\n\t// This method of deletion is used instead of calling nl.Delete(), because it's faster.\n\tend := size - 1\n\tn := nl.elements[end]\n\tnl.elements[end] = nil\n\tnl.elements = nl.elements[0:end]\n\n\treturn n\n}",
"func (hp *theHeap) Pop() interface{} {\n\tn := len(*hp)\n\told := *hp\n\titem := old[n-1]\n\t*hp = old[0 : n-1]\n\treturn item\n}",
"func (list *Linkedlist) Pop() (data interface{}) {\n\tif list.head != nil {\n\t\tdata, list.head = list.head.data, list.head.next\n\t\tlist.size--\n\t\treturn data\n\t}\n\treturn nil\n}",
"func (s *Stack) Pop() (item float64) {\n\ts.Length--\n\titem = s.Items[s.Length]\n\ts.Items = s.Items[:s.Length]\n\treturn\n}",
"func (q *MyQueue) Pop() int {\n\tfront := q.list.Front()\n\tres := front.Value.(int)\n\tq.list.Remove(front)\n\treturn res\n}",
"func (c StringArrayCollection) Pop() interface{} {\n\tlast := c.value[len(c.value)-1]\n\tc.value = c.value[:len(c.value)-1]\n\treturn last\n}",
"func (q *Queue) Pop() Any {\n\tif q.count == 0 {\n\t\treturn nil\n\t}\n\titem := q.nodes[q.head]\n\tq.head = (q.head + 1) % len(q.nodes)\n\tq.count--\n\treturn item\n}",
"func (s *StackTemplate) Pop() *interface{} {\n\tif s == nil {\n\t\treturn nil\n\t}\n\n\tif last := len(*s) - 1; last < 0 {\n\t\treturn nil\n\t} else {\n\t\titem := (*s)[len(*s)-1]\n\t\treduced := (*s)[:last]\n\t\t*s = reduced\n\t\treturn &item\n\t}\n}",
"func (s *SliceOfFloat64) Pop() float64 {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}",
"func (pq *PrioQueue) Pop() interface{} {\n\told := *pq\n\tn := len(old)\n\tx := old[n-1]\n\t*pq = old[0 : n-1]\n\treturn x\n}",
"func (h *Strings) Pop() string {\n\tif h.less == nil {\n\t\tPopToLastF(len(h.list), h.list.Less, h.list.Swap)\n\t} else {\n\t\tPopToLastF(len(h.list), h.less, h.list.Swap)\n\t}\n\n\tres := h.list[len(h.list)-1]\n\th.list[len(h.list)-1] = \"\" // remove the reference in h.list\n\th.list = h.list[:len(h.list)-1]\n\n\treturn res\n}",
"func (l *List) PopBack() (interface{}, error) {\n\tif l.last == nil {\n\t\treturn 0, ErrEmptyList\n\t}\n\n\tdata := l.last.Val\n\n\tif l.last.Prev() == nil {\n\t\tl.head = nil\n\t\tl.last = nil\n\t} else {\n\t\tl.last.Prev().next = nil\n\t\tl.last = l.last.Prev()\n\t}\n\n\treturn data, nil\n}",
"func (r *RecordSlice) Pop() interface{} {\n\trec := r.zvals[len(r.zvals)-1]\n\tr.zvals = r.zvals[:len(r.zvals)-1]\n\treturn &rec\n}",
"func (r *Ring) Pop() (interface{}, bool) {\n\tif r.IsEmpty() {\n\t\treturn nil, false\n\t}\n\tvalue := r.data[r.out]\n\tr.out = (r.out + 1) % r.size\n\treturn value, true\n}",
"func (this *MyQueue) Pop() int {\n x := this.q[0]\n this.q = this.q[1:]\n return x\n}",
"func (h *itemHeap) Pop() interface{} {\n\tl := len(*h)\n\ti := (*h)[l-1]\n\t*h = (*h)[:l-1]\n\treturn i\n}",
"func (h *minPath) Pop() interface{} {\n\told := *h\n\tn := len(old)\n\tx := old[n-1]\n\t*h = old[0 : n-1]\n\treturn x\n}",
"func lvalPop(v *LVal, i int) *LVal {\n\tx := v.Cell[i]\n\n\tv.Cell = append(v.Cell[:i], v.Cell[i+1:]...)\n\treturn x\n}",
"func (s *Stack) Pop() interface{} {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\tif s.length == 0 {\n\t\treturn nil\n\t}\n\tn := s.top\n\ts.top = n.prev\n\ts.length--\n\treturn n.value\n}",
"func Pop(h Interface) interface{} {\n\tn := h.Len() - 1\n\th.Swap(0, n)\n\tdown(h, 0, n)\n\treturn h.Pop()\n}",
"func (q *Queue) Pop() interface{} {\n\tq.m.Lock()\n\tdefer q.m.Unlock()\n\n\tif q.closed {\n\t\treturn nil\n\t}\n\n\tif q.i >= q.j {\n\t\treturn nil\n\t}\n\n\tdefer func() {\n\t\tq.i++\n\t}()\n\n\treturn q.a[q.i]\n}"
] | [
"0.7859467",
"0.7850854",
"0.76105386",
"0.7610248",
"0.7603819",
"0.7530249",
"0.7521987",
"0.7520093",
"0.74930227",
"0.74818516",
"0.73888147",
"0.7387611",
"0.73734367",
"0.7354793",
"0.7348931",
"0.7323641",
"0.7319464",
"0.7305898",
"0.7296203",
"0.72842634",
"0.72688144",
"0.7266831",
"0.7258833",
"0.7258347",
"0.72563064",
"0.7250001",
"0.72480506",
"0.72309405",
"0.7222947",
"0.7215498",
"0.72118014",
"0.72055846",
"0.72028375",
"0.71961826",
"0.7183176",
"0.7180257",
"0.7174919",
"0.71729654",
"0.71729654",
"0.71556515",
"0.7150523",
"0.71454424",
"0.71387017",
"0.7138101",
"0.71371454",
"0.7129333",
"0.7126662",
"0.7125067",
"0.7117501",
"0.71097547",
"0.71001714",
"0.7095148",
"0.70931554",
"0.70923275",
"0.70923275",
"0.7083007",
"0.7078532",
"0.70691115",
"0.7064637",
"0.7060958",
"0.70567423",
"0.7052737",
"0.7051465",
"0.70502096",
"0.70496094",
"0.7038556",
"0.7037131",
"0.70300645",
"0.70266163",
"0.70237803",
"0.7017422",
"0.7016267",
"0.7008607",
"0.6998821",
"0.69940186",
"0.6992314",
"0.69917226",
"0.6989841",
"0.698251",
"0.6979996",
"0.69794977",
"0.6971148",
"0.6970652",
"0.6965768",
"0.6963807",
"0.6961228",
"0.6953258",
"0.69522244",
"0.69474983",
"0.69381505",
"0.6934239",
"0.6931406",
"0.69280416",
"0.69276565",
"0.6924909",
"0.69245374",
"0.6920434",
"0.6918969",
"0.6918691",
"0.6916067"
] | 0.71927667 | 34 |
PopInt returns a value from the list, it panics if not enough values were allocated | func (p *PortList) PopInt() int {
i, err := strconv.Atoi(p.Pop())
if err != nil {
panic(err)
}
return i
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (s *SliceOfInt) Pop() int {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}",
"func (p *IntVector) Pop() int\t{ return p.Vector.Pop().(int) }",
"func (s *SliceOfInt32) Pop() int32 {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}",
"func (s *SliceOfInt64) Pop() int64 {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}",
"func (s *SliceOfInt8) Pop() int8 {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}",
"func (v *IntVec) Pop() int {\n\treturn v.Remove(len(*v) - 1)\n}",
"func (q *MyQueue) Pop() int {\n\tq.lock.Lock()\n\tx := q.list[0] // 对空数组取值时自动panic\n\tq.list = q.list[1:]\n\tq.lock.Unlock()\n\treturn x\n}",
"func (s *SliceOfUint) Pop() uint {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}",
"func (v *Int32Vec) Pop() int32 {\n\treturn v.Remove(len(*v) - 1)\n}",
"func (q *MyQueue) Pop() int {\n\tfront := q.list.Front()\n\tres := front.Value.(int)\n\tq.list.Remove(front)\n\treturn res\n}",
"func (s *Int64) Pop() int64 {\n\tfor val := range s.m {\n\t\tdelete(s.m, val)\n\t\treturn val\n\t}\n\treturn 0\n}",
"func (s *SliceOfInt16) Pop() int16 {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}",
"func (s *SliceOfUint32) Pop() uint32 {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}",
"func (this *MyStack) Pop() int {\n\ttemp := this.val[0]\n\tthis.val = this.val[1:]\n\treturn temp\n}",
"func (h *IntMaxHeap) Pop() interface{} {\n\told := *h\n\tn := len(old)\n\tx := old[n-1]\n\t*h = old[0 : n-1]\n\treturn x\n}",
"func (p *IntArray) Pop() int {\n\ttmp := *p\n\tlast := tmp[len(tmp)-1 : len(tmp)]\n\ttmp = tmp[0 : len(tmp)-1]\n\n\t*p = tmp\n\treturn last[0]\n}",
"func (s *IntStack) Pop() (int, error) {\n\tif s.Size() == 0 {\n\t\treturn 0, fmt.Errorf(\"Stack is empty.\")\n\t}\n\tv := s.head.value\n\ts.head = s.head.next\n\ts.size--\n\treturn v, nil\n}",
"func (list *List) Pop(idx ...int) (interface{}, error) {\n\tindex := list.getLastIndex()\n\tll := len(idx)\n\n\tif ll > 1 {\n\t\treturn nil, fmt.Errorf(\"only 1 or 0 arguments are allowed\")\n\t}\n\n\t// in case of `list.Pop()`\n\telement := list.getByIndex(index)\n\tif ll == 0 {\n\t\treturn element, list.removeByIndex(index)\n\t}\n\n\tif idx[0] > index {\n\t\treturn nil, fmt.Errorf(\"index out of range\")\n\t}\n\n\tindex = idx[0]\n\treturn element, list.removeByIndex(index)\n}",
"func (h *IntHeap) Pop() interface{} {\n\told := *h\n\tn := len(old)\n\tx := old[n-1]\n\t*h = old[0 : n-1]\n\treturn x\n}",
"func (iheap *IntegerHeap) Pop() interface{} {\n\tvar (\n\t\tn, x1 int\n\n\t\t// hold a reference to the\n\t\tprevious = *iheap\n\t)\n\n\tn = len(previous)\n\tx1 = previous[n-1]\n\n\t*iheap = previous[0 : n-1]\n\n\treturn x1\n\n}",
"func (s *StackInt) Pop() int {\nlength := len(s.s)\nres := s.s[length-1]\ns.s = s.s[:length-1]\nreturn res\n}",
"func (s *SliceOfUint64) Pop() uint64 {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}",
"func (this *MyStack) Pop() int {\n\tres := this.v[len(this.v)-1]\n\tthis.v = this.v[:len(this.v)-1]\n\treturn res\n}",
"func (arr *ArrayList) Pop() ItemType {\n if arr.length > 0 {\n // shrink by half if only a third is used - dampening resize operations\n if arr.length < arr.capacity / 3 {\n arr.resize(arr.capacity / 2)\n }\n arr.length--\n return arr.data[arr.length]\n }\n panic(\"out of bounds\")\n}",
"func (this *MyStack) Pop() int {\n\ttmpQ := list.New()\n\tll := this.queue.Len() - 1\n\tfor i := 0; i < ll; i++ {\n\t\te := this.queue.Front()\n\t\ttmpQ.PushBack(e.Value)\n\t\tthis.queue.Remove(e)\n\t}\n\n\ttopE := this.queue.Front()\n\tres := topE.Value.(int)\n\tthis.queue.Remove(topE)\n\n\tfor tmpQ.Len() > 0 {\n\t\te := tmpQ.Front()\n\t\tthis.queue.PushBack(e.Value)\n\t\ttmpQ.Remove(e)\n\t}\n\treturn res\n}",
"func (t *topK) Pop() interface{} {\n\tn := len(t.values)\n\tx := t.values[n-1]\n\tt.values = t.values[:n-1]\n\treturn x\n}",
"func (h *FixedSizeHeap) Pop() interface{} {\n\tn := len(h.data)\n\tx := h.data[n-1]\n\th.data = h.data[0 : n-1]\n\treturn x\n}",
"func (h *PerformanceHeap) Pop() interface{} {\n\told := h.items\n\tn := len(old)\n\tx := old[n-1]\n\th.items = old[0 : n-1]\n\treturn x\n}",
"func (s *Uint64) Pop() uint64 {\n\tfor val := range s.m {\n\t\tdelete(s.m, val)\n\t\treturn val\n\t}\n\treturn 0\n}",
"func lvalPop(v *LVal, i int) *LVal {\n\tx := v.Cell[i]\n\n\tv.Cell = append(v.Cell[:i], v.Cell[i+1:]...)\n\treturn x\n}",
"func (s *stack) pop() int {\n\tl := len(s.items)\n\tremovedItem := s.items[l-1]\n\ts.items = s.items[:l-1]\n\treturn removedItem\n}",
"func (iheap *IntegerHeap) Pop() interface{} {\n\tvar previous IntegerHeap = *iheap\n\tn := len(previous)\n\tx1 := previous[n-1]\n\t*iheap = previous[0 : n-1]\n\treturn x1\n}",
"func (h *itemHeap) Pop() interface{} {\n\tl := len(*h)\n\ti := (*h)[l-1]\n\t*h = (*h)[:l-1]\n\treturn i\n}",
"func (heap *MinHeap) Pop() int {\n\tvalue := heap.Heap[0]\n\theap.Heap[0] = heap.Count - 1\n\theap.Heap[heap.Count-1] = 0\n\theap.Count--\n\theap.heapifyDown()\n\treturn value\n}",
"func (o *openList) Pop() interface{} {\n\topn := *o\n\tit := opn[len(opn)-1]\n\tit.pqindex = -1\n\t*o = opn[:len(opn)-1]\n\treturn it\n}",
"func (s *SliceOfFloat32) Pop() float32 {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}",
"func (s *MyStack) Pop() int {\n\titem := s.queue[0]\n\ts.queue = s.queue[1:]\n\treturn item\n}",
"func (s *MyStack) Pop() int {\n v := s.queue1[0]\n s.queue1 = s.queue1[1:]\n return v\n}",
"func (this *MyStack) Pop() int {\n\treturn this.l.Remove(this.l.Back()).(int)\n}",
"func (pq *MaxPQ) Pop() interface{} {\n\told := *pq\n\tn := len(old)\n\titem := old[n-1]\n\t*pq = old[0 : n-1]\n\treturn item\n}",
"func (l *List) Pop() (v Value, err error) {\n\tif l.tail == nil {\n\t\terr = errEmpty\n\t} else {\n\t\tv = l.tail.Value\n\t\tl.tail = l.tail.prev\n\t\tif l.tail == nil {\n\t\t\tl.head = nil\n\t\t}\n\t}\n\treturn v, err\n}",
"func Pop(h *PriorityQueue) *Item {\n\tn := h.Len() - 1\n\th.Swap(0, n)\n\tdown(h, 0, n)\n\treturn h.Pop()\n}",
"func (this *MyQueue) Pop() int {\n\tv := this.Stack[0]\n\tthis.Stack = this.Stack[1:]\n\treturn v\n}",
"func (s *Stack) Pop() int {\n\tlength := len(s.items) - 1\n\ttoRemove := s.items[length]\n\ts.items = s.items[:length]\n\treturn toRemove\n}",
"func (h *Heap) Pop() interface{} {\n\tif h.size == 0 {\n\t\treturn nil\n\t}\n\tres := h.values[1]\n\th.values[1] = h.values[h.size]\n\th.values = h.values[:h.size]\n\th.size--\n\n\th.bubbleDown()\n\n\treturn res\n}",
"func (this *MyQueue) Pop() int {\n\tr := this.q[len(this.q)-1]\n\tthis.q = this.q[:len(this.q)-1]\n\treturn r\n}",
"func (s *MyStack) Pop() int {\n\tif s.Empty() {\n\t\treturn -1\n\t}\n\tn := len(s.Q)\n\tx := s.Q[n-1]\n\ts.Q = s.Q[:n-1]\n\treturn x\n}",
"func popInt(cloneMap map[string]string, key string) (int, error) {\n\tval, err := pop(cloneMap, key)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn strconv.Atoi(val)\n}",
"func (t *Tower) pop() (result int) {\n\tresult = (*t)[len(*t)-1]\n\t*t = (*t)[:len(*t)-1]\n\treturn result\n}",
"func Pop[T any](h Interface[T]) T {\n\tn := h.Len() - 1\n\th.Swap(0, n)\n\tdown(h, 0, n)\n\treturn h.Pop()\n}",
"func (this *MyQueue) Pop() int {\n\tif this.out.Len() == 0 {\n\t\tfor v := this.in.Pop(); v != nil; v = this.in.Pop() {\n\t\t\tthis.out.Push(v)\n\t\t}\n\t}\n\treturn this.out.Pop().(int)\n}",
"func (s *SliceOfUint8) Pop() uint8 {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}",
"func (l *pqList) Pop() interface{} {\n\treturn l.Remove(len(l.Slice) - 1)\n}",
"func (this *MyQueue) Pop() int {\n x := this.q[0]\n this.q = this.q[1:]\n return x\n}",
"func (h *heap) pop() int {\n\tr := h.H[0];\n\th.V--;\n\th.H[0] = h.H[h.V];\n\tif h.V > 1 {\n\t\th.bubble_down(0);\n\t}\n\treturn r;\n}",
"func (s *Stack) Pop() (int, error) {\n\tif s.size > 0 {\n\t\tval := s.top.value\n\t\ts.top = s.top.next\n\t\ts.size--\n\t\treturn val, nil\n\t}\n\treturn 0, errorEmptyStack\n}",
"func (this *MyStack) Pop() int {\n\tx := this.Queue[0]\n\tthis.Queue = this.Queue[1:]\n\treturn x\n}",
"func (s *Stack) Pop() int {\n\tl := len(s-item) - 1\n\ttoRemove := s.items[l]\n\ts.items = s.items[:l]\n\treturn toRemove\n}",
"func (q *TaskQueue) Pop() int {\n\tif q.Count == 0 {\n\t\treturn -1\n\t}\n\tnode := q.Nodes[q.Head]\n\tq.Head = (q.Head + 1) % q.Size\n\tq.Count--\n\treturn node\n}",
"func (this *MyQueue) Pop() int {\n\tthis.Peek()\n\te := this.b[len(this.b)-1]\n\tthis.b = this.b[:len(this.b)-1]\n\treturn e\n}",
"func Pop(h Interface) interface{} {\n\tn := h.Len() - 1\n\th.Swap(0, n)\n\tdown(h, 0, n)\n\treturn h.Pop()\n}",
"func (heap *MinHeap) Pop() (int, error) {\n\tif heap.Size() == 0 {\n\t\treturn 0, errors.New(\"heap underflow\")\n\t}\n\n\tmin := heap.elements[0]\n\theap.elements[0] = heap.elements[heap.Size()-1]\n\theap.elements = heap.elements[:heap.Size()-1]\n\theap.minHeapify(0)\n\n\treturn min, nil\n}",
"func (this *MyStack) Pop() int {\n\tfor this.current.Qsize() != 1 {\n\t\tthis.backup.push(this.current.pop())\n\t}\n\tres := this.current.pop()\n\tthis.current, this.backup = this.backup, this.current\n\n\treturn res\n}",
"func (p *intPool) get() *big.Int {\n\tif p.pool.len() > 0 {\n\t\treturn p.pool.pop()\n\t}\n\treturn new(big.Int)\n}",
"func (heap *maxheap) Pop() interface{} {\n\told := *heap\n\tn := len(old)\n\n\titem := old[n-1]\n\told[n-1] = nil\n\titem.index = -1\n\n\t*heap = old[0 : n-1]\n\n\treturn item\n}",
"func (sm *StackMax) Pop() (int, error) {\n\tif sm.Empty() {\n\t\treturn -1, ErrstackEmpty\n\t}\n\n\ttop, _ := sm.Top()\n\n\tsm.length--\n\tsm.container = sm.container[:sm.length]\n\tsm.maxer = sm.maxer[:sm.length]\n\treturn top, nil\n}",
"func (w *MetricWindow) Pop() bitflow.Value {\n\tif w.Empty() {\n\t\treturn 0\n\t}\n\tval := w.data[w.first]\n\tw.first = w.inc(w.first)\n\tw.full = false\n\treturn val\n}",
"func (d *DynamicArr) Pop() (interface{}, error) {\n\tif d.length == 0 {\n\t\treturn nil, errors.New(\"Empty array\")\n\t}\n\n\tfor d.capacity/2 > d.length-1 {\n\t\td.shrinkSize()\n\t}\n\n\ttempArr := make([]interface{}, d.capacity)\n\tfor i := 0; i < d.length-1; i++ {\n\t\ttempArr[i] = d.array[i]\n\t}\n\tval := d.array[d.length-1]\n\td.length--\n\td.array = tempArr\n\n\treturn val, nil\n}",
"func(k *Stack) Pop(){\n\tl := len(k.nums)-1\n\n\tk.nums = k.nums[:l]\n\n\n}",
"func (pq *askPQueue) Pop() *models.Ask {\n\tpq.Lock()\n\tdefer pq.Unlock()\n\n\tif pq.size() < 1 {\n\t\treturn nil\n\t}\n\n\tmax := pq.items[1]\n\n\tpq.exch(1, pq.size())\n\tpq.items = pq.items[0:pq.size()]\n\tpq.elemsCount--\n\tpq.sink(1)\n\n\treturn max.value\n}",
"func (pq *MinPQ) Pop() interface{} {\n\told := *pq\n\tn := len(old)\n\titem := old[n-1]\n\t*pq = old[0 : n-1]\n\treturn item\n}",
"func (q *BoundedQueue) Pop() int {\n\tq.Lock()\n\tdefer q.Unlock()\n\tvar res int\n\tfor len(q.queue) == 0 {\n\t\tq.hasItems.Wait()\n\t}\n\tres = q.queue[0]\n\tq.queue = q.queue[1:]\n\tif len(q.queue) < q.capacity {\n\t\tq.hasSpace.Broadcast()\n\t}\n\tfmt.Println(\"Popped\", res)\n\treturn res\n}",
"func (this *MyStack) Pop() int {\n\tfor this.wareHouse.Size() > 1 {\n\t\tthis.backup.Push(this.wareHouse.Pop())\n\t}\n\tval := this.wareHouse.Pop()\n\tthis.wareHouse, this.backup = this.backup, this.wareHouse\n\treturn val\n}",
"func (p Pool) Pop() interface{} {\n\tel := p[p.Len()-1]\n\tp = p[:p.Len()-2]\n\treturn el\n}",
"func (this *MyStack) Pop() int {\n\tthis.top--\n\treturn this.queue[this.top+1]\n}",
"func (p *PortList) Pop() string {\n\tp.Lock()\n\tdefer p.Unlock()\n\tif len(p.ports) == 0 {\n\t\tpanic(\"list is empty\")\n\t}\n\tval := p.ports[len(p.ports)-1]\n\tp.ports = p.ports[:len(p.ports)-1]\n\treturn val\n}",
"func MaxIntHeapPop(s []int) (int, []int) {\n\tcpy := make([]int, len(s), cap(s))\n\tcopy(cpy, s)\n\tmaxVal := cpy[0]\n\tlastIndex := len(cpy) - 1\n\tcpy[0] = cpy[lastIndex]\n\tcpy = cpy[:lastIndex]\n\tMaxIntHeapify(cpy)\n\treturn maxVal, cpy\n}",
"func (s *Stack) Pop() (int, error) {\n\tif s.empty() {\n\t\treturn -1, ErrStackUnderflow\n\t}\n\tresult := s.array[s.top]\n\ts.top--\n\treturn result, nil\n}",
"func (this *MyStack) Pop() int {\n\tret := this.Head.Val\n\tthis.Head = this.Head.Next\n\tif this.Head != nil {\n\t\tthis.Head.Pre = nil\n\t}\n\tthis.Len--\n\treturn ret\n}",
"func popCntq(uint) (ret uint)",
"func pop(s stack, top int) (*element, int, error) {\n\tif top == -1 {\n\t\treturn nil, -1, fmt.Errorf(\"underflow\")\n\t}\n\tpoppedElement := s[top]\n\ttop--\n\treturn &poppedElement, top, nil\n}",
"func (pq *MinPQueue) Pop() interface{} {\n\told := *pq\n\tn := len(old)\n\titem := old[n-1]\n\titem.index = -1 // for safety\n\t*pq = old[0 : n-1]\n\treturn item\n}",
"func (h *MaxKeyHeap) Pop() interface{} {\n\told := *h\n\tn := len(old)\n\tx := old[n-1]\n\t*h = old[0 : n-1]\n\treturn x\n}",
"func PopLastInt(x []int) (int, []int, error) {\n\tif len(x) == 0 {\n\t\treturn 0, nil, fmt.Errorf(\"no value to pop\")\n\t}\n\treturn x[len(x)-1], x[:len(x)-1], nil\n}",
"func (this *MyStack) Pop() int {\n\tfor this.l.Front().Next() != nil {\n\t\tthis.t.PushBack(this.l.Remove(this.l.Front()))\n\t}\n\ttop := this.l.Remove(this.l.Front())\n\tfor this.t.Front() != nil {\n\t\tthis.l.PushBack(this.t.Remove(this.t.Front()))\n\t}\n\treturn top.(int)\n}",
"func (this *MyStack) Pop() int {\n\tans := this.Ele[this.Len-1]\n\tthis.Ele = this.Ele[:this.Len-1]\n\tthis.Len--\n\treturn ans\n}",
"func (c *Clac) Pop() (value.Value, error) {\n\tx, err := c.remove(0, 1)\n\tif err != nil {\n\t\treturn zero, err\n\t}\n\treturn x[0], err\n}",
"func (pq *bidPQueue) Pop() *models.Bid {\n\tpq.Lock()\n\tdefer pq.Unlock()\n\n\tif pq.size() < 1 {\n\t\treturn nil\n\t}\n\n\tmax := pq.items[1]\n\n\tpq.exch(1, pq.size())\n\tpq.items = pq.items[0:pq.size()]\n\tpq.elemsCount--\n\tpq.sink(1)\n\n\treturn max.value\n}",
"func lvalTake(v *LVal, i int) *LVal {\n\tx := lvalPop(v, i)\n\treturn x\n}",
"func (h *ReqHeap) Pop() interface{} {\n\told := *h\n\tn := len(old)\n\tx := old[n-1]\n\t*h = old[0 : n-1]\n\treturn x\n}",
"func (pq *priorityQueue) Pop() interface{} {\n\told := *pq\n\tn := len(old)\n\titem := old[n-1]\n\titem.index = -1 // for safety\n\t*pq = old[0 : n-1]\n\treturn item\n}",
"func (s *Stack) Pop() (int, error) {\n\tif s.Empty() {\n\t\treturn 0, fmt.Errorf(\"stack is empty\")\n\t}\n\n\tv := (*s)[len(*s)-1]\n\t*s = (*s)[:len(*s)-1]\n\treturn v, nil\n}",
"func (s *StackOfPlates) pop() int {\n\n\tif s.stacks[s.last].getCapacity() == 0 {\n\t\ts.last--\n\t}\n\n\t// first column\n\tif s.last < 0 {\n\t\tpanic(\"Cannot pop() from a first empty stack!\")\n\t}\n\n\treturn s.stacks[s.last].pop()\n}",
"func (sll *SingleLinkedList) Pop(index int) interface{} {\n\t// Panic if index is smaller 0\n\tif index < 0 {\n\t\tpanic(\"index < 0\")\n\t}\n\n\t// Pop first element\n\tif index == 0 {\n\t\t// Result\n\t\tv := sll.first.value\n\t\t// Remove first element\n\t\tsll.first = sll.first.next\n\t\t// Decrease length\n\t\tsll.length--\n\t\treturn v\n\t}\n\n\t// Get node before the one to pop\n\tn := sll.getNode(index - 1)\n\t// Result\n\tv := n.next.value\n\t// Remove reference to remove element\n\tn.next = n.next.next\n\t// Decrease length\n\tsll.length--\n\treturn v\n}",
"func (f *Float64Stack) Pop() float64 {\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\n\tln := len(f.items)\n\tif ln == 0 {\n\t\treturn 0\n\t}\n\n\ttail := f.items[ln-1]\n\tf.items = f.items[:ln-1]\n\n\treturn tail\n}",
"func (q *queue) pop() Item {\n\ti := q.head\n\tq.head = (q.head + 1) % len(q.items)\n\tq.count--\n\treturn q.items[i]\n}",
"func (s *SliceOfUint16) Pop() uint16 {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}",
"func (s *orderedItems) Pop() interface{} {\n\told := *s\n\tn := len(old)\n\tx := old[n-1]\n\t*s = old[0 : n-1]\n\treturn x\n}",
"func (h *Heap) Pop() interface{} {\n\told := h.slice\n\tn := len(old)\n\titem := old[n-1]\n\told[n-1] = nil // avoid memory leak\n\titem.index = -1 // for safety\n\th.slice = old[0 : n-1]\n\treturn item\n}",
"func (s *SliceOfFloat64) Pop() float64 {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}"
] | [
"0.75718653",
"0.72919536",
"0.7277665",
"0.724577",
"0.71927625",
"0.71801865",
"0.713347",
"0.70770997",
"0.70427364",
"0.69788194",
"0.6975585",
"0.6890257",
"0.6820779",
"0.6796666",
"0.6782513",
"0.6761821",
"0.673664",
"0.6729056",
"0.67286617",
"0.67235994",
"0.6723054",
"0.6703815",
"0.6658389",
"0.6645202",
"0.65945506",
"0.65889615",
"0.6579576",
"0.6577539",
"0.65609723",
"0.6546097",
"0.6545244",
"0.65297294",
"0.652464",
"0.65155226",
"0.65143025",
"0.6514281",
"0.6509543",
"0.65066904",
"0.64951545",
"0.6451631",
"0.6449532",
"0.64488375",
"0.6448265",
"0.644799",
"0.6436029",
"0.643563",
"0.643331",
"0.6432291",
"0.64294934",
"0.64285135",
"0.6416184",
"0.6408146",
"0.6407682",
"0.64029264",
"0.6397758",
"0.6395056",
"0.63639516",
"0.636237",
"0.63344586",
"0.6333951",
"0.6330025",
"0.6327194",
"0.6321632",
"0.63205963",
"0.63067764",
"0.6297744",
"0.6291099",
"0.6279541",
"0.6279358",
"0.6279184",
"0.6268787",
"0.62667584",
"0.6264959",
"0.6263308",
"0.62622625",
"0.6256006",
"0.6254914",
"0.6246175",
"0.6230732",
"0.62306285",
"0.6227003",
"0.6226629",
"0.6225177",
"0.6222511",
"0.62216085",
"0.6217921",
"0.6213844",
"0.61957425",
"0.6188665",
"0.6187534",
"0.6185014",
"0.618347",
"0.61830187",
"0.61799145",
"0.6179893",
"0.61781704",
"0.6177073",
"0.61606616",
"0.6153374",
"0.61521816"
] | 0.7560944 | 1 |
GetFreeTCPPorts returns n ports starting from port 20000. | func GetFreeTCPPorts(n int, offset ...int) (PortList, error) {
list := make([]string, 0, n)
start := PortStartingNumber
if len(offset) != 0 {
start = offset[0]
}
for i := start; i < start+n; i++ {
list = append(list, strconv.Itoa(i))
}
return PortList{ports: list}, nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func Take(n int) (ports []int, err error) {\n\tif n <= 0 {\n\t\treturn nil, fmt.Errorf(\"freeport: cannot take %d ports\", n)\n\t}\n\n\tmu.Lock()\n\tdefer mu.Unlock()\n\n\t// Reserve a port block\n\tonce.Do(initialize)\n\n\tif n > total {\n\t\treturn nil, fmt.Errorf(\"freeport: block size too small\")\n\t}\n\n\tfor len(ports) < n {\n\t\tfor freePorts.Len() == 0 {\n\t\t\tif total == 0 {\n\t\t\t\treturn nil, fmt.Errorf(\"freeport: impossible to satisfy request; there are no actual free ports in the block anymore\")\n\t\t\t}\n\t\t\tcondNotEmpty.Wait()\n\t\t}\n\n\t\telem := freePorts.Front()\n\t\tfreePorts.Remove(elem)\n\t\tport := elem.Value.(int)\n\n\t\tif used := isPortInUse(port); used {\n\t\t\t// Something outside of the test suite has stolen this port, possibly\n\t\t\t// due to assignment to an ephemeral port, remove it completely.\n\t\t\tlogf(\"WARN\", \"leaked port %d due to theft; removing from circulation\", port)\n\t\t\ttotal--\n\t\t\tcontinue\n\t\t}\n\n\t\tports = append(ports, port)\n\t}\n\n\t// logf(\"DEBUG\", \"free ports: %v\", ports)\n\treturn ports, nil\n}",
"func freePortAddrs(ip string, n int) []string {\n\tmin, max := 49152, 65535\n\tfreePortsMu.Lock()\n\tdefer freePortsMu.Unlock()\n\tports := make(map[int]net.Listener, n)\n\taddrs := make([]string, n)\n\tif lastPort < min || lastPort > max {\n\t\tlastPort = min\n\t}\n\tfor i := 0; i < n; i++ {\n\t\tp, addr, listener, err := oneFreePort(ip, lastPort, min, max)\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\t\tlastPort = p\n\t\taddrs[i] = addr\n\t\tports[p] = listener\n\t\tusedPorts[p] = struct{}{}\n\t}\n\t// Now release them all. It's now a race to get our desired things\n\t// listening on these addresses.\n\tfor _, l := range ports {\n\t\tif err := l.Close(); err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\t}\n\treturn addrs\n}",
"func FindFreePort() int {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer l.Close()\n\treturn l.Addr().(*net.TCPAddr).Port\n}",
"func GetFreePort(t *testing.T) string {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\trequire.NoError(t, err)\n\tlistener, err := net.ListenTCP(\"tcp\", addr)\n\trequire.NoError(t, err)\n\tdefer listener.Close()\n\n\taddress := listener.Addr().String()\n\tcolon := strings.Index(address, \":\")\n\tport := address[colon+1:]\n\treturn port\n}",
"func freePort() (uint16, error) {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer l.Close()\n\n\treturn uint16(l.Addr().(*net.TCPAddr).Port), nil\n}",
"func freePort() (uint16, error) {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer l.Close()\n\n\treturn uint16(l.Addr().(*net.TCPAddr).Port), nil\n}",
"func FreePort() int {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn 0\n\t}\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn 0\n\t}\n\n\tdefer l.Close()\n\n\treturn l.Addr().(*net.TCPAddr).Port\n}",
"func getFreePort() string {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer l.Close()\n\treturn fmt.Sprintf(\"%d\", l.Addr().(*net.TCPAddr).Port)\n}",
"func freeport(t *testing.T) (port int, addr string) {\n\tl, err := net.ListenTCP(\"tcp\", &net.TCPAddr{IP: net.ParseIP(\"127.0.0.1\")})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer l.Close()\n\ta := l.Addr().(*net.TCPAddr)\n\tport = a.Port\n\treturn port, a.String()\n}",
"func findFreePort() int {\n\tln, _ := net.Listen(\"tcp\", \":0\")\n\tln.Close()\n\n\taddr := ln.Addr().(*net.TCPAddr)\n\treturn addr.Port\n}",
"func getFreePort() int {\n\tln, err := net.Listen(\"tcp\", \"[::]:0\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tport := ln.Addr().(*net.TCPAddr).Port\n\n\terr = ln.Close()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn port\n}",
"func getOpenPorts(n int) []string {\n\tports := []string{}\n\tfor i := 0; i < n; i++ {\n\t\tts := httptest.NewServer(http.NewServeMux())\n\t\tdefer ts.Close()\n\t\tu, err := url.Parse(ts.URL)\n\t\trtx.Must(err, \"Could not parse url to local server:\", ts.URL)\n\t\tports = append(ports, \":\"+u.Port())\n\t}\n\treturn ports\n}",
"func getFreePort() (int, error) {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer l.Close()\n\treturn l.Addr().(*net.TCPAddr).Port, nil\n}",
"func GetFreePort() int {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tlisten, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer listen.Close()\n\treturn listen.Addr().(*net.TCPAddr).Port\n}",
"func getFreePort(t *testing.T) string {\n\tl, err := net.Listen(\"tcp\", \":\")\n\tif err != nil {\n\t\tt.Fatalf(\"getFreePort: could not get free port: %v\", err)\n\t}\n\tdefer l.Close()\n\treturn l.Addr().String()[strings.LastIndex(l.Addr().String(), \":\"):]\n}",
"func freeport() (port int, addr string) {\n\tl, err := net.ListenTCP(\"tcp\", &net.TCPAddr{IP: net.ParseIP(\"127.0.0.1\")})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer l.Close()\n\ta := l.Addr().(*net.TCPAddr)\n\tport = a.Port\n\treturn port, a.String()\n}",
"func availablePorts(cnt int) ([]string, error) {\n\trtn := []string{}\n\n\tfor i := 0; i < cnt; i++ {\n\t\tport, err := getPort()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\trtn = append(rtn, strconv.Itoa(port))\n\t}\n\treturn rtn, nil\n}",
"func getFreePort() (int, error) {\n\tlistener, err := net.Listen(\"tcp\", \":0\")\n\tdefer listener.Close()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tport := listener.Addr().(*net.TCPAddr).Port\n\treturn port, nil\n}",
"func GetFreePort() (int, error) {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer l.Close()\n\n\treturn l.Addr().(*net.TCPAddr).Port, nil\n}",
"func FreePort() (int, error) {\n\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tlisten, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tdefer listen.Close()\n\treturn listen.Addr().(*net.TCPAddr).Port, nil\n}",
"func GetPort() (int, error) {\n\tfor i := previousPort; i < maxPort; i++ {\n\t\tif IsPortAvailable(i) {\n\t\t\t// Next previousPort is 1124 if i == 1024 now.\n\t\t\tpreviousPort = i + 100\n\t\t\treturn i, nil\n\t\t}\n\t}\n\treturn -1, errors.New(\"Not found free TCP Port\")\n}",
"func FreePort() (int, error) {\n\t// Opens a TCP connection to a free port on the host\n\t// and closes the connection but getting the port from it\n\t// so the can be setted to a free\n\t// random port each time if no one is specified\n\tl, err := net.Listen(\"tcp\", \"\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tl.Close()\n\tsl := strings.Split(l.Addr().String(), \":\")\n\tp, err := strconv.Atoi(sl[len(sl)-1])\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn p, nil\n}",
"func NextN(n int) ([]int, error) {\n\tresult := make([]int, n)\n\tlisteners := make([]net.Listener, n)\n\tfor i := 0; i < n; i++ {\n\t\tlistener, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tlisteners[i] = listener\n\t\tresult[i] = listener.Addr().(*net.TCPAddr).Port\n\t}\n\tfor _, l := range listeners {\n\t\t_ = l.Close()\n\t}\n\treturn result, nil\n}",
"func GetFreePort(host string, preferredPort uint32) (int, error) {\n\taddress := host + \":\" + fmt.Sprint(preferredPort)\n\taddr, err := net.ResolveTCPAddr(\"tcp\", address)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer l.Close()\n\treturn l.Addr().(*net.TCPAddr).Port, nil\n}",
"func getProbablyFreePortNumber() (int, error) {\n\tl, err := net.Listen(\"tcp\", \":0\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tdefer l.Close()\n\n\t_, port, err := net.SplitHostPort(l.Addr().String())\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tportNum, err := strconv.Atoi(port)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn portNum, nil\n}",
"func nextAvailablePort() int {\n\tservers, err := All(dockerClient())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tusedPorts := make([]int, len(servers))\n\n\tfor i, s := range servers {\n\t\tp, err := s.Port()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tusedPorts[i] = p\n\t}\n\n\t// Iterate 100 ports starting with the default\nOUTER:\n\tfor p := defaultPort; p < defaultPort+100; p++ {\n\t\tfor _, up := range usedPorts {\n\t\t\tif p == up {\n\t\t\t\t// Another server is using this port\n\t\t\t\tcontinue OUTER\n\t\t\t}\n\t\t}\n\n\t\t// The port is available\n\t\treturn p\n\t}\n\n\tpanic(\"100 ports were not available\")\n}",
"func (p *PortForward) getFreePort() (int, error) {\n\tlistener, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tport := listener.Addr().(*net.TCPAddr).Port\n\terr = listener.Close()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn port, nil\n}",
"func (p *PortForward) getFreePort() (int, error) {\n\tlistener, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tport := listener.Addr().(*net.TCPAddr).Port\n\terr = listener.Close()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn port, nil\n}",
"func (g *Group) GetFreePort() uint16 {\n\n\tvar ports []uint16\n\t/*\n\t\tfor _, s := range g.Services {\n\t\t\tports = append(ports, s.Ports...)\n\t\t}\n\t*/\n\tfor i := g.MinPort; i < g.MaxPort; i++ {\n\t\tif !findPort(i, ports) {\n\t\t\treturn i\n\t\t}\n\t}\n\n\treturn 0\n}",
"func alloc() (int, net.Listener) {\n\tfor i := 0; i < attempts; i++ {\n\t\tblock := int(rand.Int31n(int32(effectiveMaxBlocks)))\n\t\tfirstPort := lowPort + block*blockSize\n\t\tln, err := net.ListenTCP(\"tcp\", tcpAddr(\"127.0.0.1\", firstPort))\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\t// logf(\"DEBUG\", \"allocated port block %d (%d-%d)\", block, firstPort, firstPort+blockSize-1)\n\t\treturn firstPort, ln\n\t}\n\tpanic(\"freeport: cannot allocate port block\")\n}",
"func (alloc *RuntimePortAllocator) GetAvailablePorts(portNum int) (ports []int, err error) {\n\tif alloc.pa == nil {\n\t\treturn nil, errors.New(\"Runtime port allocator not setup\")\n\t}\n\n\tfor i := 0; i < portNum; i++ {\n\t\tif availPort, err := alloc.pa.AllocateNext(); err != nil {\n\t\t\talloc.log.Error(err, \"can't allocate next, all ports are in use\")\n\t\t\tbreak\n\t\t} else {\n\t\t\tports = append(ports, availPort)\n\t\t}\n\t}\n\n\t// Something unexpected happened, rollback to release allocated ports\n\tif len(ports) < portNum {\n\t\tfor _, reservedPort := range ports {\n\t\t\t_ = alloc.pa.Release(reservedPort)\n\t\t}\n\t\treturn nil, errors.Errorf(\"can't get enough available ports, only %d ports are available\", len(ports))\n\t}\n\n\talloc.log.Info(\"Successfully allocated ports\", \"expeceted port num\", portNum, \"allocated ports\", ports)\n\treturn ports, nil\n}",
"func (p *P) Ports() gnomock.NamedPorts {\n\treturn gnomock.DefaultTCP(defaultPort)\n}",
"func (s *socatManager) Reserve(n int) ([]int, error) {\n\t//get all listening tcp ports\n\ttype portInfo struct {\n\t\tNetwork string `json:\"network\"`\n\t\tPort int `json:\"port\"`\n\t}\n\tvar ports []portInfo\n\n\t/*\n\t\tlist ports from local services, we of course can't grantee\n\t\tthat a service will start listening after listing the ports\n\t\tbut zos doesn't start any more services (it shouldn't) after\n\t\tthe initial bootstrap, so we almost safe by using this returned\n\t\tlist\n\t*/\n\tif err := s.api.Internal(\"info.port\", nil, &ports); err != nil {\n\t\treturn nil, err\n\t}\n\n\tused := make(map[int]struct{})\n\n\tfor _, port := range ports {\n\t\tif port.Network == \"tcp\" {\n\t\t\tused[port.Port] = struct{}{}\n\t\t}\n\t}\n\n\ts.rm.Lock()\n\tdefer s.rm.Unlock()\n\n\tfor port := range s.rules {\n\t\tused[port] = struct{}{}\n\t}\n\n\ts.sm.Lock()\n\tdefer s.sm.Unlock()\n\n\t//used is now filled with all assigned system ports (except reserved)\n\t//we can safely find the first port that is not used, and not in reseved and add it to\n\t//the result list\n\tvar result []int\n\tp := 1024\n\tfor i := 0; i < n; i++ {\n\t\tfor ; p <= 65536; p++ { //i know last valid port is at 65535, but check code below\n\t\t\tif _, ok := used[p]; ok {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif _, ok := s.reserved.Get(fmt.Sprint(p)); ok {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\n\t\tif p == 65536 {\n\t\t\treturn result, fmt.Errorf(\"pool is exhausted\")\n\t\t}\n\n\t\ts.reserved.Set(fmt.Sprint(p), nil, cache.DefaultExpiration)\n\t\tresult = append(result, p)\n\t}\n\n\treturn result, nil\n}",
"func getAvailablePort(t *testing.T) int {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\trequire.Nil(t, err)\n\n\tlisten, err := net.ListenTCP(\"tcp\", addr)\n\trequire.Nil(t, err)\n\n\tdefer listen.Close()\n\treturn listen.Addr().(*net.TCPAddr).Port\n}",
"func OpenFreeUDPPort(portBase int, num int) (net.PacketConn, int, error) {\n\tfor i := 0; i < num; i++ {\n\t\tport := portBase + i\n\t\tconn, err := net.ListenPacket(\"udp\", fmt.Sprint(\":\", port))\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\treturn conn, port, nil\n\t}\n\treturn nil, 0, errors.New(\"failed to open free port\")\n}",
"func FindUnusedPort() (uint16, error) {\n\t// We let the kernel to find the port for us.\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer l.Close()\n\treturn uint16(l.Addr().(*net.TCPAddr).Port), nil\n}",
"func getAvailablePort(from, to int) int {\n\tfor port := from; port <= to; port++ {\n\t\tif isPortAvailable(port) {\n\t\t\treturn port\n\t\t}\n\t}\n\n\treturn 0\n}",
"func getPort() int {\n\tcount := 0\n\tfor count < 1000 {\n\t\tln, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\t\tif err == nil {\n\t\t\tp := ln.Addr().(*net.TCPAddr).Port\n\t\t\tln.Close()\n\t\t\treturn p\n\t\t}\n\t\tcount++\n\t}\n\tpanic(\"Could not find an available port\")\n}",
"func (m *Manager) useFreePort() error {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer l.Close()\n\tlocalServerPort = l.Addr().(*net.TCPAddr).Port\n\treturn nil\n}",
"func GetPorts(lookupPids bool) map[string]GOnetstat.Process {\n\tports := make(map[string]GOnetstat.Process)\n\tnetstat, _ := GOnetstat.Tcp(lookupPids)\n\tvar net string\n\t//netPorts := make(map[string]GOnetstat.Process)\n\t//ports[\"tcp\"] = netPorts\n\tnet = \"tcp\"\n\tfor _, entry := range netstat {\n\t\tif entry.State == \"LISTEN\" {\n\t\t\tport := strconv.FormatInt(entry.Port, 10)\n\t\t\tports[net+\":\"+port] = entry\n\t\t}\n\t}\n\tnetstat, _ = GOnetstat.Tcp6(lookupPids)\n\t//netPorts = make(map[string]GOnetstat.Process)\n\t//ports[\"tcp6\"] = netPorts\n\tnet = \"tcp6\"\n\tfor _, entry := range netstat {\n\t\tif entry.State == \"LISTEN\" {\n\t\t\tport := strconv.FormatInt(entry.Port, 10)\n\t\t\tports[net+\":\"+port] = entry\n\t\t}\n\t}\n\tnetstat, _ = GOnetstat.Udp(lookupPids)\n\t//netPorts = make(map[string]GOnetstat.Process)\n\t//ports[\"udp\"] = netPorts\n\tnet = \"udp\"\n\tfor _, entry := range netstat {\n\t\tport := strconv.FormatInt(entry.Port, 10)\n\t\tports[net+\":\"+port] = entry\n\t}\n\tnetstat, _ = GOnetstat.Udp6(lookupPids)\n\t//netPorts = make(map[string]GOnetstat.Process)\n\t//ports[\"udp6\"] = netPorts\n\tnet = \"udp6\"\n\tfor _, entry := range netstat {\n\t\tport := strconv.FormatInt(entry.Port, 10)\n\t\tports[net+\":\"+port] = entry\n\t}\n\treturn ports\n}",
"func (s *Scan) tcpConnScan() []int {\n\tvar wg sync.WaitGroup\n\n\tvar ports []int\n\tfor i := s.minPort; i <= s.maxPort; i++ {\n\t\twg.Add(1)\n\t\tgo func(i int) {\n\t\t\tfor {\n\t\t\t\thost := net.JoinHostPort(s.raddr.String(), fmt.Sprintf(\"%d\", i))\n\t\t\t\tconn, err := net.DialTimeout(\"tcp\", host, 2*time.Second)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif strings.Contains(err.Error(), \"too many open files\") {\n\t\t\t\t\t\t// random back-off\n\t\t\t\t\t\ttime.Sleep(time.Duration(10+rand.Int31n(30)) * time.Millisecond)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\twg.Done()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tconn.Close()\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tports = append(ports, i)\n\t\t\twg.Done()\n\t\t}(i)\n\t}\n\n\twg.Wait()\n\tsort.Ints(ports)\n\treturn ports\n}",
"func RandomTCPPort() int {\n\tfor i := maxReservedTCPPort; i < maxTCPPort; i++ {\n\t\tp := tcpPortRand.Intn(maxRandTCPPort) + maxReservedTCPPort + 1\n\t\tif IsTCPPortAvailable(p) {\n\t\t\treturn p\n\t\t}\n\t}\n\treturn -1\n}",
"func (server *testHTTPServerImpl) randomFreePort() int64 {\n\tmaxAttempts := 5\n\tattempt := 0\n\trandomPort := server.randomPort()\n\tfor attempt < maxAttempts && server.isPortInUse(randomPort) {\n\t\tlog.Printf(\"Port %d already in use, try with new port number\", randomPort)\n\t\tattempt++\n\t\trandomPort = server.randomPort()\n\t}\n\treturn randomPort\n}",
"func (test *Test) GetPorts(projectName string, ip string) ([]models.Port, error) {\n\treturn tests.NormalPorts, nil\n}",
"func GetPorts(service corev1.Service) []int {\n\tif len(service.Spec.Ports) == 0 {\n\t\treturn []int{}\n\t}\n\tvar svcPorts []int\n\tfor _, port := range service.Spec.Ports {\n\t\tsvcPorts = append(svcPorts, int(port.Port))\n\t}\n\treturn svcPorts\n}",
"func (ec *EvalCtx) growPorts(n int) {\n\tif len(ec.ports) >= n {\n\t\treturn\n\t}\n\tports := ec.ports\n\tec.ports = make([]*Port, n)\n\tcopy(ec.ports, ports)\n}",
"func (ec *EvalCtx) growPorts(n int) {\n\tif len(ec.ports) >= n {\n\t\treturn\n\t}\n\tports := ec.ports\n\tec.ports = make([]*Port, n)\n\tcopy(ec.ports, ports)\n}",
"func (c *RedfishClient) GetNetworkPorts(uri string) ([]model.NetworkPort, error) {\n\tcollection := redfish.Collection{}\n\tif err := c.Get(uri, &collection); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret []model.NetworkPort\n\tfor i := range collection.Members {\n\t\tresp := new(redfish.NetworkPort)\n\t\tif err := c.Get(collection.Members[i].Id, resp); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tret = append(ret, *redfish.CreateNetworkPortModel(resp))\n\t}\n\treturn ret, nil\n}",
"func getSplunkServicePorts(instanceType InstanceType) []corev1.ServicePort {\n\tl := []corev1.ServicePort{}\n\tfor key, value := range getSplunkPorts(instanceType) {\n\t\tl = append(l, corev1.ServicePort{\n\t\t\tName: key,\n\t\t\tPort: int32(value),\n\t\t\tProtocol: \"TCP\",\n\t\t})\n\t}\n\treturn l\n}",
"func nodePorts(svcPorts []utils.ServicePort) []int64 {\n\tports := []int64{}\n\tfor _, p := range uniq(svcPorts) {\n\t\tif !p.NEGEnabled {\n\t\t\tports = append(ports, p.NodePort)\n\t\t}\n\t}\n\treturn ports\n}",
"func (c *RedfishClient) GetNetworkPorts(uri string) ([]model.NetworkPort, error) {\n\tcollection := dto.Collection{}\n\tif err := c.Get(uri, &collection); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret []model.NetworkPort\n\tfor i := range collection.Members {\n\t\tresp := new(dto.NetworkPort)\n\t\tif err := c.Get(collection.Members[i].Id, resp); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tret = append(ret, *createNetworkPortModel(resp))\n\t}\n\treturn ret, nil\n}",
"func TestCometStarter_PortContention(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping long test in short mode\")\n\t}\n\n\tconst nVals = 4\n\n\t// Find n+1 addresses that should be free.\n\t// Ephemeral port range should start at about 49k+\n\t// according to `sysctl net.inet.ip.portrange` on macOS,\n\t// and at about 32k+ on Linux\n\t// according to `sysctl net.ipv4.ip_local_port_range`.\n\t//\n\t// Because we attempt to find free addresses outside that range,\n\t// it is unlikely that another process will claim a port\n\t// we discover to be free, during the time this test runs.\n\tconst portSeekStart = 19000\n\treuseAddrs := make([]string, 0, nVals+1)\n\tfor i := portSeekStart; i < portSeekStart+1000; i++ {\n\t\taddr := fmt.Sprintf(\"127.0.0.1:%d\", i)\n\t\tln, err := net.Listen(\"tcp\", addr)\n\t\tif err != nil {\n\t\t\t// No need to log the failure.\n\t\t\tcontinue\n\t\t}\n\n\t\t// If the port was free, append it to our reusable addresses.\n\t\treuseAddrs = append(reuseAddrs, \"tcp://\"+addr)\n\t\t_ = ln.Close()\n\n\t\tif len(reuseAddrs) == nVals+1 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif len(reuseAddrs) != nVals+1 {\n\t\tt.Fatalf(\"needed %d free ports but only found %d\", nVals+1, len(reuseAddrs))\n\t}\n\n\t// Now that we have one more port than the number of validators,\n\t// there is a good chance that picking a random port will conflict with a previously chosen one.\n\t// But since CometStarter retries several times,\n\t// it should eventually land on a free port.\n\n\tvalPKs := testnet.NewValidatorPrivKeys(nVals)\n\tcmtVals := valPKs.CometGenesisValidators()\n\tstakingVals := cmtVals.StakingValidators()\n\n\tconst chainID = \"simapp-cometstarter\"\n\n\tb := testnet.DefaultGenesisBuilderOnlyValidators(\n\t\tchainID,\n\t\tstakingVals,\n\t\tsdk.NewCoin(sdk.DefaultBondDenom, sdk.DefaultPowerReduction),\n\t)\n\n\tjGenesis := b.Encode()\n\n\t// Use an info-level logger, because the debug logs in comet are noisy\n\t// and there is a data race in comet debug logs,\n\t// due to be fixed in v0.37.1 which is not yet released:\n\t// https://github.com/cometbft/cometbft/pull/532\n\tlogger := log.NewTestLoggerInfo(t)\n\n\tconst nRuns = 4\n\tfor i := 0; i < nRuns; i++ {\n\t\tt.Run(fmt.Sprintf(\"attempt %d\", i), func(t *testing.T) {\n\t\t\tnodes, err := testnet.NewNetwork(nVals, func(idx int) *testnet.CometStarter {\n\t\t\t\trootDir := t.TempDir()\n\n\t\t\t\tapp := simapp.NewSimApp(\n\t\t\t\t\tlogger.With(\"instance\", idx),\n\t\t\t\t\tdbm.NewMemDB(),\n\t\t\t\t\tnil,\n\t\t\t\t\ttrue,\n\t\t\t\t\tsimtestutil.NewAppOptionsWithFlagHome(rootDir),\n\t\t\t\t\tbaseapp.SetChainID(chainID),\n\t\t\t\t)\n\n\t\t\t\tcfg := cmtcfg.DefaultConfig()\n\n\t\t\t\t// memdb is sufficient for this test.\n\t\t\t\tcfg.BaseConfig.DBBackend = \"memdb\"\n\n\t\t\t\treturn testnet.NewCometStarter(\n\t\t\t\t\tapp,\n\t\t\t\t\tcfg,\n\t\t\t\t\tvalPKs[idx].Val,\n\t\t\t\t\tjGenesis,\n\t\t\t\t\trootDir,\n\t\t\t\t).\n\t\t\t\t\tLogger(logger.With(\"rootmodule\", fmt.Sprintf(\"comet_node-%d\", idx))).\n\t\t\t\t\tTCPAddrChooser(func() string {\n\t\t\t\t\t\t// This chooser function is the key of this test,\n\t\t\t\t\t\t// where there is only one more available address than there are nodes.\n\t\t\t\t\t\t// Therefore it is likely that an address will already be in use,\n\t\t\t\t\t\t// thereby exercising the address-in-use retry.\n\t\t\t\t\t\treturn reuseAddrs[rand.Intn(len(reuseAddrs))]\n\t\t\t\t\t})\n\t\t\t})\n\n\t\t\t// Ensure nodes are stopped completely,\n\t\t\t// so that we don't get t.Cleanup errors around directories not being empty.\n\t\t\tdefer func() {\n\t\t\t\terr := nodes.StopAndWait()\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t}()\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Ensure that the height advances.\n\t\t\t// Looking for height 2 seems more meaningful than 1.\n\t\t\trequire.NoError(t, testnet.WaitForNodeHeight(nodes[0], 2, 10*time.Second))\n\t\t})\n\t}\n}",
"func FreeTCPAddr() (addr, port string, err error) {\n\tl, err := net.Listen(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tcloser := func() {\n\t\terr := l.Close()\n\t\tif err != nil {\n\t\t\t// TODO: Handle with #870\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tdefer closer()\n\n\tportI := l.Addr().(*net.TCPAddr).Port\n\tport = fmt.Sprintf(\"%d\", portI)\n\taddr = fmt.Sprintf(\"tcp://0.0.0.0:%s\", port)\n\treturn\n}",
"func (a *cpuAccumulator) freeSockets() []int {\n\tfree := []int{}\n\tfor _, socket := range a.sortAvailableSockets() {\n\t\tif a.isSocketFree(socket) {\n\t\t\tfree = append(free, socket)\n\t\t}\n\t}\n\treturn free\n}",
"func MustNextN(n int) []int {\n\tports, err := NextN(n)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn ports\n}",
"func (r *portsRegistry) Reserve() (uint16, error) {\n\tr.m.Lock()\n\tdefer r.m.Unlock()\n\n\tsize := r.max - r.min + 1\n\tfor i := uint16(1); i <= size; i++ {\n\t\tport := r.min + (r.last-r.min+i)%size\n\t\tif _, ok := r.reserved[port]; ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tl, err := net.Listen(\"tcp\", fmt.Sprintf(\"127.0.0.1:%d\", port))\n\t\tif l != nil {\n\t\t\t_ = l.Close()\n\t\t}\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tr.reserved[port] = struct{}{}\n\t\tr.last = port\n\t\treturn port, nil\n\t}\n\n\treturn 0, errNoFreePort\n}",
"func (s *SecurityRule) Ports() []string {\n\treturn s.Ports_\n}",
"func Get() int {\n\tfor i := maxReservedTCPPort; i < maxTCPPort; i++ {\n\t\tp := tcpPortRand.Intn(maxRandTCPPort) + maxReservedTCPPort + 1\n\t\tif IsAvailable(p) {\n\t\t\treturn p\n\t\t}\n\t}\n\treturn -1\n}",
"func GetOpenPortInRange(lowerBound, upperBound int) (int, error) {\n\tif lowerBound < portRangeMin {\n\t\treturn -1, errPortMin\n\t}\n\tfor lowerBound <= portRangeMax && lowerBound <= upperBound {\n\t\tif _, err := net.Dial(\"tcp\", fmt.Sprintf(\":%d\", lowerBound)); err != nil {\n\t\t\treturn lowerBound, nil\n\t\t}\n\t\tlowerBound++\n\t}\n\tif upperBound > portRangeMax {\n\t\treturn -1, errPortMax\n\t}\n\treturn -1, errPortNotFound\n}",
"func GetValidExposedPortNumber(exposedPort int) (int, error) {\n\t// exposed port number will be -1 if the user doesn't specify any port\n\tif exposedPort == -1 {\n\t\tfreePort, err := util.HTTPGetFreePort()\n\t\tif err != nil {\n\t\t\treturn -1, err\n\t\t}\n\t\treturn freePort, nil\n\t} else {\n\t\t// check if the given port is available\n\t\tlistener, err := net.Listen(\"tcp\", \":\"+strconv.Itoa(exposedPort))\n\t\tif err != nil {\n\t\t\treturn -1, errors.Wrapf(err, \"given port %d is not available, please choose another port\", exposedPort)\n\t\t}\n\t\tdefer listener.Close()\n\t\treturn exposedPort, nil\n\t}\n}",
"func (p *Printer) GetPorts(f v1.Flow) (string, string) {\n\tl4 := f.GetL4()\n\tif l4 == nil {\n\t\treturn \"\", \"\"\n\t}\n\tswitch l4.Protocol.(type) {\n\tcase *pb.Layer4_TCP:\n\t\treturn p.TCPPort(layers.TCPPort(l4.GetTCP().SourcePort)), p.TCPPort(layers.TCPPort(l4.GetTCP().DestinationPort))\n\tcase *pb.Layer4_UDP:\n\t\treturn p.UDPPort(layers.UDPPort(l4.GetUDP().SourcePort)), p.UDPPort(layers.UDPPort(l4.GetUDP().DestinationPort))\n\tdefault:\n\t\treturn \"\", \"\"\n\t}\n}",
"func (w Work) Ports() map[string]connector.Connector {\n\treturn w.Ports_\n}",
"func (l *Libvirt) NodeGetFreePages(Pages []uint32, StartCell int32, CellCount uint32, Flags uint32) (rCounts []uint64, err error) {\n\tvar buf []byte\n\n\targs := NodeGetFreePagesArgs {\n\t\tPages: Pages,\n\t\tStartCell: StartCell,\n\t\tCellCount: CellCount,\n\t\tFlags: Flags,\n\t}\n\n\tbuf, err = encode(&args)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar r response\n\tr, err = l.requestStream(340, constants.Program, buf, nil, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Return value unmarshaling\n\ttpd := typedParamDecoder{}\n\tct := map[string]xdr.TypeDecoder{\"libvirt.TypedParam\": tpd}\n\trdr := bytes.NewReader(r.Payload)\n\tdec := xdr.NewDecoderCustomTypes(rdr, 0, ct)\n\t// Counts: []uint64\n\t_, err = dec.Decode(&rCounts)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}",
"func (o *Service) GetServicePortsTcp() []string {\n\tif o == nil || o.ServicePortsTcp == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn *o.ServicePortsTcp\n}",
"func getPort() (int, error) {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer l.Close()\n\n\treturn l.Addr().(*net.TCPAddr).Port, nil\n}",
"func getSplunkContainerPorts(instanceType InstanceType) []corev1.ContainerPort {\n\tl := []corev1.ContainerPort{}\n\tfor key, value := range getSplunkPorts(instanceType) {\n\t\tl = append(l, corev1.ContainerPort{\n\t\t\tName: key,\n\t\t\tContainerPort: int32(value),\n\t\t\tProtocol: \"TCP\",\n\t\t})\n\t}\n\treturn l\n}",
"func (rp *ResolverPool) Port() int {\n\treturn 0\n}",
"func (rp *ResolverPool) Port() int {\n\treturn 0\n}",
"func IsTCPPortAvailable(port int) bool {\n\tif port < minTCPPort || port > maxTCPPort {\n\t\treturn false\n\t}\n\tconn, err := net.Listen(\"tcp\", fmt.Sprintf(\"127.0.0.1:%d\", port))\n\tif err != nil {\n\t\treturn false\n\t}\n\tconn.Close()\n\treturn true\n}",
"func Ports(ports ...int) Option {\n\treturn func(c *Container) {\n\t\tvar p []string\n\t\tfor _, port := range ports {\n\t\t\tp = append(p, fmt.Sprintf(\"%d\", port))\n\t\t}\n\t\tc.ports = p\n\t}\n}",
"func exposedPorts(node *parser.Node) [][]string {\n\tvar allPorts [][]string\n\tvar ports []string\n\tfroms := FindAll(node, command.From)\n\texposes := FindAll(node, command.Expose)\n\tfor i, j := len(froms)-1, len(exposes)-1; i >= 0; i-- {\n\t\tfor ; j >= 0 && exposes[j] > froms[i]; j-- {\n\t\t\tports = append(nextValues(node.Children[exposes[j]]), ports...)\n\t\t}\n\t\tallPorts = append([][]string{ports}, allPorts...)\n\t\tports = nil\n\t}\n\treturn allPorts\n}",
"func getOpenPorts() string {\n\tcmd := \"./Bash Functions/getOpenPorts.sh\"\n\n\t// Get's output of 'nmap' command\n\topenPortsByte, _ := exec.Command(cmd).Output()\n\topenPortsString := string(openPortsByte)\n\topenPortsString = strings.Trim(openPortsString, \"\\n\")\n\n\treturn openPortsString\n}",
"func initialize() {\n\tvar err error\n\teffectiveMaxBlocks, err = adjustMaxBlocks()\n\tif err != nil {\n\t\tpanic(\"freeport: ephemeral port range detection failed: \" + err.Error())\n\t}\n\tif effectiveMaxBlocks < 0 {\n\t\tpanic(\"freeport: no blocks of ports available outside of ephemeral range\")\n\t}\n\tif lowPort+effectiveMaxBlocks*blockSize > 65535 {\n\t\tpanic(\"freeport: block size too big or too many blocks requested\")\n\t}\n\n\trand.Seed(time.Now().UnixNano())\n\tfirstPort, lockLn = alloc()\n\n\tcondNotEmpty = sync.NewCond(&mu)\n\tfreePorts = list.New()\n\tpendingPorts = list.New()\n\n\t// fill with all available free ports\n\tfor port := firstPort + 1; port < firstPort+blockSize; port++ {\n\t\tif used := isPortInUse(port); !used {\n\t\t\tfreePorts.PushBack(port)\n\t\t}\n\t}\n\ttotal = freePorts.Len()\n\n\tgo checkFreedPorts()\n}",
"func getPort() (port uint16) {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer l.Close()\n\treturn uint16(l.Addr().(*net.TCPAddr).Port)\n}",
"func RandomPort() (int, error) {\n\tl, err := net.Listen(\"tcp\", \"\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif err := l.Close(); err != nil {\n\t\treturn 0, nil\n\t}\n\treturn l.Addr().(*net.TCPAddr).Port, nil\n}",
"func (crMgr *CRManager) virtualPorts(vs *cisapiv1.VirtualServer) []portStruct {\n\n\t// TODO ==> This will change as we will support custom ports.\n\tconst DEFAULT_HTTP_PORT int32 = 80\n\t//const DEFAULT_HTTPS_PORT int32 = 443\n\tvar httpPort int32\n\t// var httpsPort int32\n\thttpPort = DEFAULT_HTTP_PORT\n\t// httpsPort = DEFAULT_HTTPS_PORT\n\n\thttp := portStruct{\n\t\tprotocol: \"http\",\n\t\tport: httpPort,\n\t}\n\t// Support TLS Type, Create both HTTP and HTTPS\n\t/**\n\thttps := portStruct{\n\t\tprotocol: \"https\",\n\t\tport: httpsPort,\n\t}**/\n\tvar ports []portStruct\n\n\t// Support TLS Type, Create both HTTP and HTTPS\n\t/**\n\tif len(vs.Spec.TLS) > 0 {\n\t\t// 2 virtual servers needed, both HTTP and HTTPS\n\t\tports = append(ports, http)\n\t\tports = append(ports, https)\n\t} else {\n\t\t// HTTP only\n\t\tports = append(ports, http)\n\t}**/\n\n\tports = append(ports, http)\n\n\treturn ports\n}",
"func openPorts() {\n\tinPort, err = utils.CreateInputPort(\"bonjour/discover.options\", *inputEndpoint, nil)\n\tutils.AssertError(err)\n}",
"func randomPort() (string, error) {\n\tconst (\n\t\tminPort = 1024\n\t\tmaxPort = 65535\n\t\tmaxTries = 10\n\t)\n\tfor i := 0; i < maxTries; i++ {\n\t\tport := rand.Intn(maxPort-minPort+1) + minPort\n\t\tl, err := net.Listen(\"tcp\", fmt.Sprintf(\":%d\", port))\n\t\tif err == nil {\n\t\t\t_ = l.Close()\n\t\t\treturn strconv.Itoa(port), nil\n\t\t}\n\t\tflog.Info(\"port taken: %d\", port)\n\t}\n\n\treturn \"\", xerrors.Errorf(\"max number of tries exceeded: %d\", maxTries)\n}",
"func (c *ClientProxyMappingParser) GetClientProxyMappingPorts() (ports []string) {\n\tc.init()\n\treturn c.ports\n}",
"func portscan(asyncCount int, host string, startPort uint32, endPort uint32, portsChecked *uint32) (chan uint32, chan bool) {\n\tportCount := endPort + 1 - startPort\n\n\tvar goroutines = make(chan bool, asyncCount) // concurrency control\n\tvar openPorts = make(chan uint32, portCount) // Store list of open ports, concurrency-safe, buffered\n\tvar completed = make(chan bool)\n\n\tgo func() {\n\t\t// Tasks to do at completion of scanning\n\t\tdefer func() {\n\t\t\t// Close openPorts channel since it's buffered\n\t\t\tclose(openPorts)\n\n\t\t\t// Send signal to anything waiting on buffered completion channel\n\t\t\tcompleted <- true\n\t\t}()\n\n\t\tfor port := startPort; port <= endPort; port++ {\n\t\t\tgoroutines <- true // Wait until allowed to go\n\n\t\t\tgo func(p uint32) {\n\t\t\t\tdefer func() {\n\t\t\t\t\t<-goroutines\n\t\t\t\t}() // release lock when done\n\n\t\t\t\t// Check the port\n\t\t\t\tif portOpen := scanOnePort(host, p); portOpen {\n\t\t\t\t\topenPorts <- p\n\t\t\t\t}\n\t\t\t\tatomic.AddUint32(portsChecked, 1)\n\t\t\t}(port)\n\t\t}\n\n\t}()\n\n\treturn openPorts, completed\n}",
"func newServicePorts(m *influxdatav1alpha1.Influxdb) []corev1.ServicePort {\n\tvar ports []corev1.ServicePort\n\n\tports = append(ports, corev1.ServicePort{Port: 8086, Name: \"api\"},\n\t\tcorev1.ServicePort{Port: 2003, Name: \"graphite\"},\n\t\tcorev1.ServicePort{Port: 25826, Name: \"collectd\"},\n\t\tcorev1.ServicePort{Port: 8089, Name: \"udp\"},\n\t\tcorev1.ServicePort{Port: 4242, Name: \"opentsdb\"},\n\t\tcorev1.ServicePort{Port: 8088, Name: \"backup-restore\"},\n\t)\n\treturn ports\n}",
"func Sequential(port string) []string {\n\tdialer := net.Dialer{Timeout: time.Millisecond * 100}\n\toutput := make([]string, 0)\n\n\tfor i := 0; i <= 255; i++ {\n\t\taddr := addr + \".\" + strconv.Itoa(i) + \":\" + port\n\t\tconn, err := dialer.Dial(\"tcp\", addr)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\toutput = append(output, conn.RemoteAddr().String())\n\t\tconn.Close()\n\t}\n\n\treturn output\n}",
"func (rcsw *RemoteClusterServiceWatcher) getEndpointsPorts(service *corev1.Service, gatewayPort int32) []corev1.EndpointPort {\n\tvar endpointsPorts []corev1.EndpointPort\n\tfor _, remotePort := range service.Spec.Ports {\n\t\tendpointsPorts = append(endpointsPorts, corev1.EndpointPort{\n\t\t\tName: remotePort.Name,\n\t\t\tProtocol: remotePort.Protocol,\n\t\t\tPort: gatewayPort,\n\t\t})\n\t}\n\treturn endpointsPorts\n}",
"func findPort() (int, error) {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer l.Close()\n\n\treturn l.Addr().(*net.TCPAddr).Port, nil\n}",
"func CreateServers(ctx context.Context, n int) ([]int, error) {\n\tvar ports []int\n\n\tlocalCtx, cancel := context.WithCancel(ctx)\n\tfor i := 0; i < n; i++ {\n\t\tport, err := ListenHTTP(localCtx)\n\t\tif err != nil {\n\t\t\tcancel()\n\t\t\treturn nil, err\n\t\t}\n\n\t\tports = append(ports, port)\n\t}\n\n\treturn ports, nil\n}",
"func RandomPort() int {\n\tr := rand.New(rand.NewSource(time.Now().UnixNano()))\n\tport := r.Intn(2000) + 30000\n\tfor i := 0; i < 18000; i++ {\n\t\tif checkPortIsOpen(port) == false {\n\t\t\tbreak\n\t\t}\n\t\tport++\n\t\t// retry next port\n\t}\n\treturn port\n}",
"func getContainerPorts(ports []echo.Port) model.PortList {\n\tcontainerPorts := make(model.PortList, 0, len(ports))\n\tvar healthPort *model.Port\n\tvar readyPort *model.Port\n\tfor _, p := range ports {\n\t\t// Add the port to the set of application ports.\n\t\tcport := &model.Port{\n\t\t\tName: p.Name,\n\t\t\tProtocol: p.Protocol,\n\t\t\tPort: p.InstancePort,\n\t\t}\n\t\tcontainerPorts = append(containerPorts, cport)\n\n\t\tswitch p.Protocol {\n\t\tcase model.ProtocolGRPC:\n\t\t\tcontinue\n\t\tcase model.ProtocolHTTP:\n\t\t\tif p.InstancePort == httpReadinessPort {\n\t\t\t\treadyPort = cport\n\t\t\t}\n\t\tdefault:\n\t\t\tif p.InstancePort == tcpHealthPort {\n\t\t\t\thealthPort = cport\n\t\t\t}\n\t\t}\n\t}\n\n\t// If we haven't added the readiness/health ports, do so now.\n\tif readyPort == nil {\n\t\tcontainerPorts = append(containerPorts, &model.Port{\n\t\t\tName: \"http-readiness-port\",\n\t\t\tProtocol: model.ProtocolHTTP,\n\t\t\tPort: httpReadinessPort,\n\t\t})\n\t}\n\tif healthPort == nil {\n\t\tcontainerPorts = append(containerPorts, &model.Port{\n\t\t\tName: \"tcp-health-port\",\n\t\t\tProtocol: model.ProtocolHTTP,\n\t\t\tPort: tcpHealthPort,\n\t\t})\n\t}\n\treturn containerPorts\n}",
"func (o *NetworkElementSummaryAllOf) GetNumFcPorts() int64 {\n\tif o == nil || o.NumFcPorts == nil {\n\t\tvar ret int64\n\t\treturn ret\n\t}\n\treturn *o.NumFcPorts\n}",
"func ScanPorts(host string, options ...ScanPortsOption) <-chan int {\n\topts := defaultScanPortsOptions()\n\tfor _, opt := range options {\n\t\topt.setScanPortsOption(opts)\n\t}\n\n\tallPorts := pipeline.Ints(1, maxPort)\n\tfoundPorts := make([]<-chan int, 128)\n\tfor i := 0; i < 128; i++ {\n\t\tfoundPorts[i] = scanPorts(host, allPorts, opts.EagerPrint)\n\t}\n\treturn pipeline.MergeInts(foundPorts...)\n}",
"func (alloc *RuntimePortAllocator) ReleaseReservedPorts(ports []int) {\n\talloc.log.Info(\"Releasing reserved ports\", \"ports to be released\", ports)\n\tfor _, port := range ports {\n\t\tif err := alloc.pa.Release(port); err != nil {\n\t\t\talloc.log.Error(err, \"can't release port\", \"port\", port)\n\t\t}\n\t}\n}",
"func (o FirewallAllowedItemOutput) Ports() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v FirewallAllowedItem) []string { return v.Ports }).(pulumi.StringArrayOutput)\n}",
"func GetUriFilteringTotalAttackConnectionPort(engine *xorm.Engine, telePreMitigationId int64) (tac []UriFilteringTotalAttackConnectionPort, err error) {\n\ttac = []UriFilteringTotalAttackConnectionPort{}\n\terr = engine.Where(\"tele_pre_mitigation_id = ?\", telePreMitigationId).OrderBy(\"id ASC\").Find(&tac)\n\treturn\n}",
"func GetPort() int {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer l.Close()\n\treturn l.Addr().(*net.TCPAddr).Port\n}",
"func pickFreeAddr(t *testing.T) string {\n\tl, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer l.Close()\n\treturn l.Addr().String()\n}",
"func randomPort() string {\n\tl, err := net.Listen(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\t// listening for port 0 should never error but just in case\n\t\treturn strconv.Itoa(1024 + rand.Intn(65536-1024))\n\t}\n\n\tp := l.Addr().(*net.TCPAddr).Port\n\tl.Close()\n\treturn strconv.Itoa(p)\n}",
"func (o FirewallAllowedItemResponseOutput) Ports() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v FirewallAllowedItemResponse) []string { return v.Ports }).(pulumi.StringArrayOutput)\n}",
"func (s *server) getListenerPorts() map[uint32]bool {\n\n\tlistenerPorts := map[uint32]bool{}\n\tfor _, listener := range s.dbentities.GetListeners() {\n\t\tlistenerPorts[uint32(listener.Port)] = true\n\t}\n\treturn listenerPorts\n}",
"func V1GetPorts(c *gin.Context) {\n\tvar ports []m.PortCBP\n\tportPt := &ports\n\tif err := dao.GetAllPorts(&portPt); err != nil {\n\t\tutils.NotFound(c, err)\n\t\treturn\n\t}\n\tutils.Ok(c, *portPt)\n}",
"func openPorts() {\n\toptionsPort, err = utils.CreateInputPort(\"distinct.options\", *optionsEndpoint, nil)\n\tutils.AssertError(err)\n\n\tinPort, err = utils.CreateInputPort(\"distinct.in\", *inputEndpoint, inCh)\n\tutils.AssertError(err)\n\n\toutPort, err = utils.CreateOutputPort(\"distinct.out\", *outputEndpoint, outCh)\n\tutils.AssertError(err)\n}",
"func WaitTCPPort(ctx Ctx, addr fmt.Stringer) error {\n\tconst delay = time.Second / 20\n\tbackOff := backoff.WithContext(backoff.NewConstantBackOff(delay), ctx)\n\top := func() error {\n\t\tvar dialer net.Dialer\n\t\tconn, err := dialer.DialContext(ctx, \"tcp\", addr.String())\n\t\tif err == nil {\n\t\t\terr = conn.Close()\n\t\t}\n\t\treturn err\n\t}\n\treturn backoff.Retry(op, backOff)\n}"
] | [
"0.6888929",
"0.67983717",
"0.66069543",
"0.65850073",
"0.6558296",
"0.6558296",
"0.65428734",
"0.6534923",
"0.6523196",
"0.65159756",
"0.65052295",
"0.64984024",
"0.6488279",
"0.6486295",
"0.6423216",
"0.64032197",
"0.6374218",
"0.63717896",
"0.6371297",
"0.63018143",
"0.6290728",
"0.62243843",
"0.6131413",
"0.6048003",
"0.602278",
"0.60175073",
"0.594997",
"0.594997",
"0.58913636",
"0.5886659",
"0.58812517",
"0.583339",
"0.58161175",
"0.5798913",
"0.5773416",
"0.57414144",
"0.572828",
"0.5690129",
"0.5667676",
"0.561003",
"0.55756605",
"0.55755717",
"0.5511968",
"0.5399512",
"0.5258598",
"0.5257981",
"0.5257981",
"0.52372044",
"0.5208359",
"0.5204069",
"0.51661086",
"0.51636267",
"0.51075286",
"0.5099787",
"0.5081063",
"0.5070362",
"0.5067064",
"0.50518686",
"0.50508904",
"0.5038434",
"0.49972916",
"0.49908856",
"0.4957175",
"0.4893593",
"0.4884692",
"0.48818564",
"0.48740864",
"0.48740864",
"0.48738757",
"0.4859928",
"0.48560318",
"0.4853574",
"0.48479646",
"0.4846071",
"0.4845135",
"0.48419297",
"0.48287135",
"0.48183346",
"0.47942844",
"0.4780374",
"0.47620478",
"0.47512275",
"0.47439438",
"0.47422034",
"0.4741782",
"0.47404355",
"0.4727248",
"0.4721838",
"0.4721072",
"0.4720844",
"0.47015762",
"0.46961495",
"0.4694343",
"0.46881056",
"0.46860495",
"0.46858385",
"0.4685394",
"0.4681093",
"0.46760586",
"0.4664238"
] | 0.8380831 | 0 |
HostUUIDExistsLocally checks if dataDir/host_uuid file exists in local storage. | func HostUUIDExistsLocally(dataDir string) bool {
_, err := ReadHostUUID(dataDir)
return err == nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (r *Release) localExist() error {\n\tvar (\n\t\tversion string = fmt.Sprintf(\"terraform-%s.zip\", r.Version)\n\t\terr error\n\t)\n\n\tif _, err = os.Stat(filepath.Join(r.Home, PathTmp.toString(), version)); !os.IsNotExist(err) {\n\t\tfmt.Println(\"Already in cache ...\")\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (this *DataStore) isExistUUID(uuid string) bool {\n\tif _ ,ok := this.ProcessTable[uuid]; ok {\n\t\treturn true\n\t} \n\treturn false\n}",
"func (b *Binary) LocalExist() bool {\n\treturn b.file.LocalExist()\n}",
"func (l *localFileSystem) Exists(prefix string, filepath string) bool {\n\tif p := strings.TrimPrefix(filepath, prefix); len(p) <= len(filepath) {\n\t\tp = path.Join(l.root, p)\n\t\t/*if !l.physfs {\n\t\t\treturn existsFile(l, p)\n\t\t} else {*/\n\t\tfmt.Println(\"Exists: \" + p)\n\t\treturn physfs.Exists(p)\n\t\t//}\n\t}\n\treturn false\n}",
"func (r *Release) remoteExist() error {\n\tvar (\n\t\turl string = fmt.Sprintf(PathTerraform.toString(), r.Version, r.Version, runtime.GOOS, runtime.GOARCH)\n\t\tresp *http.Response\n\t\terr error\n\t)\n\n\tif resp, err = r.HTTPclient.Get(url); err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\t// Verify code equal 200\n\tif resp.StatusCode == http.StatusOK {\n\t\treturn nil\n\t}\n\n\treturn nil\n}",
"func (s storage) Exist(path string) bool {\n\t_, err := os.Stat(path)\n\treturn !os.IsNotExist(err)\n}",
"func IsExist(err error) bool",
"func exists() bool {\r\n\t_, err := ioutil.ReadFile(\"nodestore.json\")\r\n\tif os.IsNotExist(err) {\r\n\t\treturn false\r\n\t}\r\n\treturn true\r\n}",
"func FilesStorageExists(exec boil.Executor, iD int) (bool, error) {\n\tvar exists bool\n\tsql := \"select exists(select 1 from `files_storages` where `id`=? limit 1)\"\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, sql)\n\t\tfmt.Fprintln(boil.DebugWriter, iD)\n\t}\n\n\trow := exec.QueryRow(sql, iD)\n\n\terr := row.Scan(&exists)\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"models: unable to check if files_storages exists\")\n\t}\n\n\treturn exists, nil\n}",
"func (d *Driver) Exists(id string) bool {\n\td.Lock()\n\tdefer d.Unlock()\n\treturn d.filesystemsCache[d.zfsPath(id)]\n}",
"func (instance *Host) Exists(ctx context.Context) (_ bool, ferr fail.Error) {\n\tdefer fail.OnPanic(&ferr)\n\n\tif valid.IsNil(instance) {\n\t\treturn false, fail.InvalidInstanceError()\n\t}\n\n\tdefer elapsed(ctx, fmt.Sprintf(\"Exist of %s\", instance.name.Load().(string)))()\n\ttheID, err := instance.GetID()\n\tif err != nil {\n\t\treturn false, fail.ConvertError(err)\n\t}\n\n\tif beta := os.Getenv(\"SAFESCALE_DETECT_CORRUPTION\"); beta != \"yes\" {\n\t\treturn true, nil\n\t}\n\n\t_, xerr := instance.Service().InspectHost(ctx, theID)\n\tif xerr != nil {\n\t\tswitch xerr.(type) {\n\t\tcase *fail.ErrNotFound:\n\t\t\treturn false, nil\n\t\tdefault:\n\t\t\treturn false, xerr\n\t\t}\n\t}\n\n\treturn true, nil\n}",
"func machineExists(id string) bool {\n\tmut.Lock()\n\tlogFile.Seek(0, 0)\n\tdefer logFile.Seek(0, 2)\n\tdefer mut.Unlock()\n\tscanner := bufio.NewScanner(logFile)\n\tfor scanner.Scan() {\n\t\tif strings.Contains(scanner.Text(), id) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (gcs *localStorage) FileExists(bucket, fileName string) bool {\n\t_, err := os.Stat(fileName)\n\treturn err == nil\n}",
"func db_check_user_exists(username string) bool {\n file_path := path.Join(\"db/users\", strings.ToLower(username) + \".json\")\n \n if _, err := os.Stat(file_path); !os.IsNotExist(err) {\n return true\n }\n return false\n}",
"func (z *ZKC) identityExists(id [zkidentity.IdentitySize]byte) bool {\n\t_, err := os.Stat(path.Join(z.settings.Root, inboundDir,\n\t\thex.EncodeToString(id[:]), identityFilename))\n\tif err == nil {\n\t\tids := hex.EncodeToString(id[:])\n\t\tfullPath := path.Join(z.settings.Root, inboundDir, ids)\n\t\t_, err1 := os.Stat(path.Join(fullPath, ratchetFilename))\n\t\t_, err2 := os.Stat(path.Join(fullPath, halfRatchetFilename))\n\t\tif err1 == nil || err2 == nil {\n\t\t\treturn true\n\t\t}\n\n\t\t// this happens during reset condiftion\n\t\tz.Dbg(idZKC, \"identityExists: reset condition\")\n\t\treturn false\n\t}\n\n\treturn false\n}",
"func (c *Local) Exists(key string) (string, error) {\n\tsum := hash(key)\n\tdirPrefix := filepath.Join(c.Root, fmt.Sprintf(\"%x\", sum[0:1]))\n\tdirEnd := fmt.Sprintf(\"%x\", sum[1:len(sum)-1])\n\tdirFull := filepath.Join(dirPrefix, dirEnd)\n\tif PathIsDir(dirFull) {\n\t\treturn dirFull, nil\n\t}\n\treturn \"\", nil\n}",
"func (_UsersData *UsersDataCaller) IsUuidExist(opts *bind.CallOpts, uuid [16]byte) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _UsersData.contract.Call(opts, out, \"isUuidExist\", uuid)\n\treturn *ret0, err\n}",
"func MasterFileExists(path string) bool {\n\t_, err := os.Stat(path)\n\treturn err == nil\n}",
"func (_UsersData *UsersDataCallerSession) IsUuidExist(uuid [16]byte) (bool, error) {\n\treturn _UsersData.Contract.IsUuidExist(&_UsersData.CallOpts, uuid)\n}",
"func exists(filePath string) (exists bool) {\n _,err := os.Stat(filePath)\n if err != nil {\n exists = false\n } else {\n exists = true\n }\n return\n}",
"func Exists(uid int, address string) bool {\n\tnowTime := time.Now().Unix()\n\n\tif uCache, ok := localCache.UIDCache[uid]; ok {\n\t\t// cache未过期\n\t\tif uCache.lastModifyTime+CacheExpireTime> nowTime {\n\t\t\treturn true\n\t\t}\n\t}\n\n\tif addrCache, ok := localCache.AddressCache[address]; ok {\n\t\t// cache未过期\n\t\tif addrCache.lastModifyTime+CacheExpireTime> nowTime {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func (cs ConsulStorage) Exists(ctx context.Context, key string) bool {\n\tkv, _, err := cs.ConsulClient.KV().Get(cs.prefixKey(key), ConsulQueryDefaults(ctx))\n\tif kv != nil && err == nil {\n\t\treturn true\n\t}\n\treturn false\n}",
"func (_UsersData *UsersDataSession) IsUuidExist(uuid [16]byte) (bool, error) {\n\treturn _UsersData.Contract.IsUuidExist(&_UsersData.CallOpts, uuid)\n}",
"func (ros RealOS) Exists(p string) bool {\n\tif _, err := os.Stat(path.Clean(p)); err == nil {\n\t\treturn true\n\t}\n\treturn false\n}",
"func (b *Binary) RemoteExist() bool {\n\tif !b.file.RemoteExist() {\n\t\treturn false\n\t}\n\n\tcmd := fmt.Sprintf(\"sha256sum %s | cut -d\\\" \\\" -f1\", b.file.RemotePath())\n\tremoteSHA256, err := b.file.sshClient.SudoCmd(cmd)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tif err := b.checksumList.Get(); err != nil {\n\t\treturn false\n\t}\n\n\tif remoteSHA256 != b.checksumList.Value() {\n\t\treturn false\n\t}\n\treturn true\n}",
"func (m *manifestService) Exists(ctx context.Context, dgst digest.Digest) (bool, error) {\n\tcontext.GetLogger(ctx).Debugf(\"(*manifestService).Exists\")\n\n\timage, _, err := m.repo.getImageOfImageStream(dgst)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn image != nil, nil\n}",
"func Exists(fname string) bool {\n if _, err := os.Stat(fname); os.IsNotExist(err) {\n return false\n }\n return true\n}",
"func (d *Driver) Exists(id string) bool {\n\tlogrus.Debugf(\"secureoverlay2: Exists called w. id: %s\", id)\n\n\t// TODO: below is implementation from overlay2 but doesn't really mesh with the function description (also from overlay2)\n\t// as this should be true as soon as layer is created using Create, regardless of mount (call of Get)?!\n\t_, err := os.Stat(d.dir(id))\n\treturn err == nil\n}",
"func hostExists(host string, hosts []string) bool {\n\tfor _, entry := range hosts {\n\t\tif host == cleanup(entry) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func (l *Location) Exists() (bool, error) {\n\n\tclient, err := l.fileSystem.Client(l.Authority)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\t// start timer once action is completed\n\tdefer l.fileSystem.connTimerStart()\n\n\tinfo, err := client.Stat(l.Path())\n\tif err != nil && err == os.ErrNotExist {\n\t\treturn false, nil\n\t} else if err != nil {\n\t\treturn false, err\n\t}\n\n\tif !info.IsDir() {\n\t\treturn false, err\n\t}\n\n\treturn true, nil\n}",
"func (cli *FakeDatabaseClient) FileExists(ctx context.Context, in *dbdpb.FileExistsRequest, opts ...grpc.CallOption) (*dbdpb.FileExistsResponse, error) {\n\tpanic(\"implement me\")\n}",
"func (s *fsStore) Exists(typ namespace.Type, name string) bool {\n\ttrgt := s.targetPath(name, typ)\n\tif _, err := os.Stat(trgt); err == nil {\n\t\treturn true\n\t}\n\treturn false\n}",
"func (s *s3ManifestService) Exists(ctx context.Context, dgst godigest.Digest) (bool, error) {\n\treturn false, fmt.Errorf(\"unimplemented\")\n}",
"func (c *PumpsClient) exist(nodeID string) bool {\n\tc.RLock()\n\t_, ok := c.Pumps.Pumps[nodeID]\n\tc.RUnlock()\n\treturn ok\n}",
"func isExist(filePath string) bool {\n\t_, err := os.Stat(filePath)\n\treturn err == nil\n}",
"func (a *FileStorage) Exists() bool {\n\treturn a._exists\n}",
"func (sample *SampleImage) Exists() bool {\n\treturn filesystem.Exists(sample.RootDir)\n}",
"func IsFileExists(filePath string) (bool, error) {\n\t//Check if file exists in cache\n\t_, err := os.Stat(filePath)\n\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t} else if err != nil {\n\t\treturn false, err\n\t}\n\treturn true, nil\n}",
"func Exists(name string) bool {\n if _, err := os.Stat(name); err != nil {\n if os.IsNotExist(err) {\n return false\n }\n }\n return true\n}",
"func exists(name string) bool {\n if _, err := os.Stat(name); err != nil {\n if os.IsNotExist(err) {\n return false\n }\n }\n return true\n}",
"func (fs *FileStore) Exists(key string) bool {\n\t_, err := os.Stat(filepath.Join(fs.baseDir, fs.mangleKey(key, false)))\n\treturn err == nil\n}",
"func (a *StorageUsage) Exists() bool {\n\treturn a._exists\n}",
"func (_ElvTradableLocal *ElvTradableLocalCaller) Exists(opts *bind.CallOpts, tokenId *big.Int) (bool, error) {\n\tvar out []interface{}\n\terr := _ElvTradableLocal.contract.Call(opts, &out, \"exists\", tokenId)\n\n\tif err != nil {\n\t\treturn *new(bool), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(bool)).(*bool)\n\n\treturn out0, err\n\n}",
"func (mcm *MinioChunkManager) Exist(ctx context.Context, filePath string) (bool, error) {\n\t_, err := mcm.statMinioObject(ctx, mcm.bucketName, filePath, minio.StatObjectOptions{})\n\tif err != nil {\n\t\terrResponse := minio.ToErrorResponse(err)\n\t\tif errResponse.Code == \"NoSuchKey\" {\n\t\t\treturn false, nil\n\t\t}\n\t\tlog.Warn(\"failed to stat object\", zap.String(\"bucket\", mcm.bucketName), zap.String(\"path\", filePath), zap.Error(err))\n\t\treturn false, err\n\t}\n\treturn true, nil\n}",
"func (db StdNetDB) Exists() bool {\n\tp := db.Path()\n\t// check root directory\n\t_, err := os.Stat(p)\n\tif err == nil {\n\t\t// check subdirectories for skiplist\n\t\tfor _, c := range base64.Alphabet {\n\t\t\tif _, err = os.Stat(filepath.Join(p, fmt.Sprintf(\"r%c\", c))); err != nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn err == nil\n}",
"func IsStaticIPFileExist() (bool, error) {\n\treturn utils.IsFileExist(staticipfilepath)\n}",
"func (storage *FileStorage) IsExist(storedName string) bool {\n\tfor _, entry := range *storage {\n\t\tif entry.StoredName == storedName {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func Exists(filename string) (bool, error) {\n _, err := os.Stat(filename)\n if err == nil {\n return true, nil\n }\n if os.IsNotExist(err) {\n return false, nil\n }\n var mu bool\n return mu, err\n}",
"func Exists(filePath string) (bool, error) {\n\tfilePath = strings.Replace(filePath, \"~\", HomeDir(), 1)\n\n\tif _, err := os.Stat(filePath); err == nil {\n\t\treturn true, nil\n\t} else if os.IsNotExist(err) {\n\t\treturn false, nil\n\t} else {\n\t\treturn false, err\n\t}\n}",
"func (s *Storage)IsExist(key interface{})bool {\n\t_,ok :=s.data.Load(key)\n\treturn ok\n}",
"func (d *Driver) Exists(walletType, dataDir string, _ map[string]string, net dex.Network) (bool, error) {\n\tif walletType != walletTypeSPV {\n\t\treturn false, fmt.Errorf(\"no Decred wallet of type %q available\", walletType)\n\t}\n\n\tchainParams, err := parseChainParams(net)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn walletExists(filepath.Join(dataDir, chainParams.Name, \"spv\"))\n}",
"func fileExists(ctx context.Context, uri span.URI, source source.FileSource) (bool, error) {\n\tfh, err := source.GetFile(ctx, uri)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn fileHandleExists(fh)\n}",
"func doesDeviceExist(deviceID int, meta interface{}) bool {\n\tlog.Printf(\"[Dotcom-Monitor] [DEBUG] Checking if device exists with ID: %v\", deviceID)\n\tdevice := &client.Device{\n\t\tID: deviceID,\n\t}\n\n\t// Since an empty HTTP response is a valid 200 from the API, we will determine if\n\t// the device exists by comparing the hash of the struct before and after the HTTP call.\n\t// If the has does not change, it means nothing else was added, therefore it does not exist.\n\t// If the hash changes, the API found the device and added the rest of the fields.\n\th := sha256.New()\n\tt := fmt.Sprintf(\"%v\", device)\n\tsum := h.Sum([]byte(t))\n\tlog.Printf(\"[Dotcom-Monitor] [DEBUG] Hash before: %x\", sum)\n\n\t// Try to get device from API\n\tapi := meta.(*client.APIClient)\n\terr := api.GetDevice(device)\n\n\tt2 := fmt.Sprintf(\"%v\", device)\n\tsum2 := h.Sum([]byte(t2))\n\tlog.Printf(\"[Dotcom-Monitor] [DEBUG] Hash after: %x\", sum2)\n\n\t// Compare the hashes, and if there was an error from the API we will assume the device exists\n\t// to be safe that we do not improperly remove an existing device from state\n\tif bytes.Equal(sum, sum2) && err == nil {\n\t\tlog.Println(\"[Dotcom-Monitor] [DEBUG] No new fields added to the device, therefore the device did not exist\")\n\t\treturn false\n\t}\n\n\t// If we get here, we can assume the device does exist\n\treturn true\n}",
"func (cache *LedisCacheStorage) CheckDeviceExistence(clientID string, id string) bool {\n\tamount, err := cache.db.HGet([]byte(clientID+\":device\"), []byte(id))\n\n\tif err != nil {\n\t\t_, _ = fmt.Fprintf(os.Stderr, \"Ledis Cache: failed to check device existence %v\\n\", err)\n\t\treturn false\n\t}\n\n\treturn amount != nil\n}",
"func (h *fs) Exists(file string) bool {\n\t_, err := os.Stat(file)\n\treturn err == nil\n}",
"func (servers *Servers) IsExist(macAddressStr string) bool {\n\n\tserver, _ := servers.GetServer(macAddressStr)\n\n\tif server != nil {\n\t\treturn true\n\t} else {\n\t\treturn false\n\t}\n\n}",
"func ImageExistsLocally(ctx context.Context, imageName string, platform string) (bool, error) {\n\treturn false, errors.New(\"Unsupported Operation\")\n}",
"func blockExists(target, data string) (bool, error) {\n\tif !exists(target) {\n\t\treturn false, nil\n\t}\n\tb, err := ioutil.ReadFile(target)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif bytes.Contains(b, []byte(data)) {\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}",
"func (m *InMemoryRepository) Exists(u fyne.URI) (bool, error) {\n\tpath := u.Path()\n\tif path == \"\" {\n\t\treturn false, fmt.Errorf(\"invalid path '%s'\", path)\n\t}\n\n\t_, ok := m.Data[path]\n\treturn ok, nil\n}",
"func Exists(path string) bool {\n\tif SunnyDay {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (d *Driver) Exists(id string) bool {\n\tlogrus.Debugf(\"Exists - id %s\", id)\n\terr := d.ioctl(LayerStat, \"\", id)\n\treturn err == nil\n}",
"func DoesExist(pth string) bool {\n\tif _, err := os.Stat(pth); os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn true\n}",
"func (p *MemProvider) Exist(sid string) bool {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\n\t_, ok := p.data[sid]\n\treturn ok\n}",
"func exists(filePath string) bool {\n\t_, err := os.Stat(filePath)\n\treturn err == nil\n}",
"func (client *GCSBlobstore) Exists(dest string) (exists bool, err error) {\n\tif exists, err = client.exists(client.publicGCS, dest); err == nil {\n\t\treturn exists, nil\n\t}\n\n\t// If the public client fails, try using it as an authenticated actor\n\tif client.authenticatedGCS != nil {\n\t\treturn client.exists(client.authenticatedGCS, dest)\n\t}\n\n\treturn\n}",
"func (q filesStorageQuery) Exists(exec boil.Executor) (bool, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\tqueries.SetLimit(q.Query, 1)\n\n\terr := q.Query.QueryRow(exec).Scan(&count)\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"models: failed to check if files_storages exists\")\n\t}\n\n\treturn count > 0, nil\n}",
"func (sds *SiaDirSet) exists(siaPath string) (bool, error) {\n\t// Check for SiaDir in Memory\n\tsiaPath = strings.Trim(siaPath, \"/\")\n\t_, exists := sds.siaDirMap[siaPath]\n\tif exists {\n\t\treturn exists, nil\n\t}\n\t// Check for SiaDir on disk\n\t_, err := os.Stat(filepath.Join(sds.rootDir, siaPath+\"/\"+SiaDirExtension))\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\treturn false, err\n}",
"func exists(p string) bool {\n\tif _, err := os.Stat(p); os.IsNotExist(err) {\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func ReadHostUUID(dataDir string) (string, error) {\n\tout, err := ReadPath(filepath.Join(dataDir, HostUUIDFile))\n\tif err != nil {\n\t\tif errors.Is(err, fs.ErrPermission) {\n\t\t\t//do not convert to system error as this loses the ability to compare that it is a permission error\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn \"\", trace.ConvertSystemError(err)\n\t}\n\tid := strings.TrimSpace(string(out))\n\tif id == \"\" {\n\t\treturn \"\", trace.NotFound(\"host uuid is empty\")\n\t}\n\treturn id, nil\n}",
"func InternalExists(opts ExistsOpts, conn connections.Connection) (*connections.FileResult, error) {\n\tinput := map[string]interface{}{\n\t\t\"path\": opts.Path,\n\t\t\"timeout\": opts.Timeout,\n\t\t\"_logger\": opts.Logger,\n\t\t\"_internal\": true,\n\t}\n\n\treturn Exists(input, opts.Connection)\n}",
"func (*GuluFile) IsExist(path string) bool {\n\t_, err := os.Stat(path)\n\n\treturn err == nil || os.IsExist(err)\n}",
"func (xsml *XfileServiceMetricLog) Exists() bool {\n\treturn xsml._exists\n}",
"func (u *urlShortner) exists() bool {\n\tif _, err := appFs.Stat(u.shortURL); os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn true\n}",
"func fileExists(path string) bool {\n _, err := os.Stat(path)\n return err == nil\n}",
"func (f *FakeFileSystem) Exists(file string) bool {\n\tf.ExistsFile = append(f.ExistsFile, file)\n\treturn f.ExistsResult[file]\n}",
"func (realFS) Exists(name string) bool {\n\t_, err := os.Stat(name)\n\treturn err == nil\n}",
"func (s *storager) Exists(ctx context.Context, resourceID string, options ...storage.Option) (bool, error) {\n\tresource, err := newResource(resourceID)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tclient := s.secretManager(resource.Region)\n\t_, err = client.GetSecretValueWithContext(ctx,\n\t\t&secretsmanager.GetSecretValueInput{\n\t\t\tSecretId: &resource.Secret,\n\t\t\tVersionStage: aws.String(\"AWSCURRENT\"),\n\t\t})\n\treturn !isNotFound(err), nil\n}",
"func Exists(path string) bool {\n _, err := os.Stat(path)\n if err == nil { return true }\n if os.IsNotExist(err) { return false }\n return false\n}",
"func (z *ZKC) ratchetExists(id [zkidentity.IdentitySize]byte) bool {\n\t_, err := os.Stat(path.Join(z.settings.Root, inboundDir,\n\t\thex.EncodeToString(id[:]), ratchetFilename))\n\treturn err == nil\n}",
"func (c *FakeZkConn) Exists(path string) (bool, *zk.Stat, error) {\n\tc.history.addToHistory(\"Exists\", path)\n\treturn true, nil, nil\n}",
"func (fs *EmbedFs) IsFileExist(path string) bool {\n\t_, exist := fs.index[path]\n\treturn exist\n}",
"func exists(path string) bool {\n\t_, err := os.Stat(path)\n\tif err != nil {\n\t\tif os.IsExist(err) {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\treturn true\n}",
"func (p *Provider) Exists(alias string) (bool, error) {\n\tp.mutex.RLock()\n\tdefer p.mutex.RUnlock()\n\n\t_, err := os.Stat(filepath.Join(p.Config.Path, path.Base(alias)))\n\treturn !os.IsNotExist(err), nil\n}",
"func exists(f string) (bool, error) {\n\t_, err := os.Stat(f)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, fmt.Errorf(\"cannot get stats for path `%s`: %v\", f, err)\n\t}\n\treturn true, nil\n}",
"func (lg *Logger) isExist(path string) bool {\n\t_, err := os.Stat(path)\n\treturn err == nil || os.IsExist(err)\n}",
"func fileExists(file string) bool {\n\t//Debugf(\"checking for file existence \" + file)\n\tif _, err := os.Stat(file); os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn true\n}",
"func (o *StorageHyperFlexStorageContainer) HasUuid() bool {\n\tif o != nil && o.Uuid != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (o *NetworkLicenseFile) HasHostId() bool {\n\tif o != nil && o.HostId != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func exist(t *testing.T, root, name string) {\n\t_, err := stat(root, name)\n\tif err != nil {\n\t\tt.Fatalf(\"exist: %v\", err)\n\t}\n}",
"func isExist(n string) string {\n\tif _, err := os.Stat(n); !os.IsNotExist(err) {\n\t\tn = fmt.Sprintf(\"%s_1\", n)\n\t\treturn n\n\t}\n\treturn n\n}",
"func (z *ZkPlus) Exists(path string) (bool, *zk.Stat, error) {\n\tz.forPath(path).Log(logkey.ZkMethod, \"Exists\")\n\treturn z.blockOnConn().Exists(z.realPath(path))\n}",
"func (c *Cache) Exists(name string) bool {\n\tosPath := c.ToOSPath(name)\n\tfi, err := os.Stat(osPath)\n\tif err != nil {\n\t\treturn false\n\t}\n\t// checks for non-regular files (e.g. directories, symlinks, devices, etc.)\n\tif !fi.Mode().IsRegular() {\n\t\treturn false\n\t}\n\treturn true\n}",
"func (b *Blob) IsExist() bool {\n\n\tfilepath := b.FilePath()\n\tif utils.IsFileExist(filepath) {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (s *Store) Exists(ctx context.Context, name string) bool {\n\treturn s.storage.Exists(ctx, s.Passfile(name))\n}",
"func (service *FolderServiceImpl) Exists(id int) bool {\n\t_, exists := service.folders[id]\n\n\treturn exists\n}",
"func exists(path string) bool {\n\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn true\n}",
"func ReadOrMakeHostUUID(dataDir string) (string, error) {\n\tid, err := ReadHostUUID(dataDir)\n\tif err == nil {\n\t\treturn id, nil\n\t}\n\tif !trace.IsNotFound(err) {\n\t\treturn \"\", trace.Wrap(err)\n\t}\n\t// Checking error instead of the usual uuid.New() in case uuid generation\n\t// fails due to not enough randomness. It's been known to happen happen when\n\t// Teleport starts very early in the node initialization cycle and /dev/urandom\n\t// isn't ready yet.\n\trawID, err := uuid.NewRandom()\n\tif err != nil {\n\t\treturn \"\", trace.BadParameter(\"\" +\n\t\t\t\"Teleport failed to generate host UUID. \" +\n\t\t\t\"This may happen if randomness source is not fully initialized when the node is starting up. \" +\n\t\t\t\"Please try restarting Teleport again.\")\n\t}\n\tid = rawID.String()\n\tif err = WriteHostUUID(dataDir, id); err != nil {\n\t\treturn \"\", trace.Wrap(err)\n\t}\n\treturn id, nil\n}",
"func (db *Database) DoesFileExist(f File, c Client) bool {\n\tdbC := db.dbClientForClient(c)\n\tvar count uint64\n\tconst countSQL = `\n\tSELECT COUNT(id) FROM File WHERE name=$1 AND ownerId=$2`\n\tif err := db.QueryRow(countSQL, f.name, dbC.id).Scan(&count); err != nil {\n\t\tlog.Println(\"checking if file saved:\", err)\n\t\treturn false\n\t}\n\treturn count > 0\n}",
"func (linux *Linux) FileExists(filePath string) bool {\n\tfile, err := os.Open(linux.applyChroot(filePath))\n\tdefer file.Close()\n\treturn err == nil\n}",
"func NonZeroFileExists(filename string) bool {\n\n\tif info, err := os.Stat(filename); err == nil {\n\t\tif info.Size() > 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}"
] | [
"0.6837421",
"0.6144338",
"0.61178225",
"0.6017075",
"0.59379965",
"0.59195673",
"0.5889505",
"0.5859016",
"0.58369076",
"0.5826344",
"0.5795487",
"0.57538235",
"0.57405114",
"0.5738003",
"0.57183146",
"0.57179755",
"0.5670447",
"0.56537825",
"0.56479305",
"0.564708",
"0.56407434",
"0.5610793",
"0.5607607",
"0.56041396",
"0.5596046",
"0.55446655",
"0.5534099",
"0.55258054",
"0.55255264",
"0.55228984",
"0.55221367",
"0.55091155",
"0.5499236",
"0.54862314",
"0.5485776",
"0.5474485",
"0.5457866",
"0.54446954",
"0.5436129",
"0.5415444",
"0.54133725",
"0.5405575",
"0.5403257",
"0.5400908",
"0.53881127",
"0.53865445",
"0.53856826",
"0.53850687",
"0.5376573",
"0.5367834",
"0.5367548",
"0.53614604",
"0.5357764",
"0.5348482",
"0.53448886",
"0.53428864",
"0.5341882",
"0.53330183",
"0.53300077",
"0.53294754",
"0.53248703",
"0.531093",
"0.5302594",
"0.52866375",
"0.52864194",
"0.52840936",
"0.5282022",
"0.52784055",
"0.52775544",
"0.52722764",
"0.52711135",
"0.5267289",
"0.5261935",
"0.5250576",
"0.5240205",
"0.5236874",
"0.5234187",
"0.5230421",
"0.52300847",
"0.5224365",
"0.52231234",
"0.52136385",
"0.5212599",
"0.5209042",
"0.5204611",
"0.5202703",
"0.5199821",
"0.5197458",
"0.5196864",
"0.51886266",
"0.51873654",
"0.5184305",
"0.51819515",
"0.51749367",
"0.51709235",
"0.5169141",
"0.5169092",
"0.5167395",
"0.5164498",
"0.5156312"
] | 0.8382169 | 0 |
ReadHostUUID reads host UUID from the file in the data dir | func ReadHostUUID(dataDir string) (string, error) {
out, err := ReadPath(filepath.Join(dataDir, HostUUIDFile))
if err != nil {
if errors.Is(err, fs.ErrPermission) {
//do not convert to system error as this loses the ability to compare that it is a permission error
return "", err
}
return "", trace.ConvertSystemError(err)
}
id := strings.TrimSpace(string(out))
if id == "" {
return "", trace.NotFound("host uuid is empty")
}
return id, nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func ReadOrMakeHostUUID(dataDir string) (string, error) {\n\tid, err := ReadHostUUID(dataDir)\n\tif err == nil {\n\t\treturn id, nil\n\t}\n\tif !trace.IsNotFound(err) {\n\t\treturn \"\", trace.Wrap(err)\n\t}\n\t// Checking error instead of the usual uuid.New() in case uuid generation\n\t// fails due to not enough randomness. It's been known to happen happen when\n\t// Teleport starts very early in the node initialization cycle and /dev/urandom\n\t// isn't ready yet.\n\trawID, err := uuid.NewRandom()\n\tif err != nil {\n\t\treturn \"\", trace.BadParameter(\"\" +\n\t\t\t\"Teleport failed to generate host UUID. \" +\n\t\t\t\"This may happen if randomness source is not fully initialized when the node is starting up. \" +\n\t\t\t\"Please try restarting Teleport again.\")\n\t}\n\tid = rawID.String()\n\tif err = WriteHostUUID(dataDir, id); err != nil {\n\t\treturn \"\", trace.Wrap(err)\n\t}\n\treturn id, nil\n}",
"func UUIDFile(fpath string) (string, error) {\n\n\t_, err := os.Stat(fpath)\n\tif err != nil && !os.IsExist(err) {\n\t\tkey := uuid.New().String()\n\t\tif err := ioutil.WriteFile(fpath, []byte(key), 0777); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn key, nil\n\t}\n\n\tfp, err := os.Open(fpath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdefer fp.Close()\n\tdata, err := ioutil.ReadAll(fp)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tkey := string(data)\n\tif _, err := uuid.Parse(key); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn key, nil\n}",
"func Read(args ...string) (*UUID, error) {\n\tfpath := sfFilePath(args)\n\tf, err := os.Open(fpath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\tdata := make([]byte, UUIDHexLen+8)\n\tn, err := f.Read(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif n < UUIDHexLen {\n\t\treturn nil, fmt.Errorf(\"File '%s' is too small\", fpath)\n\t}\n\tdata = data[:n]\n\tuuid, err := Decode(string(data))\n\tif err == nil {\n\t\tnc := &cache{uuid: *uuid, filePath: fpath, validationTime: time.Now().Add(ValidationTimePeriod)}\n\t\tatomic.StorePointer(¤t, unsafe.Pointer(nc))\n\t}\n\treturn uuid, err\n}",
"func GetUUID() string {\n\tuuid, _ := ioutil.ReadFile(AppPath.UUIDFile)\n\treturn string(bytes.TrimSpace(uuid))\n}",
"func ReadUUID(buffer []byte, offset int) UUID {\n bytes := ReadBytes(buffer, offset, 16)\n return UUIDFromBytes(bytes)\n}",
"func (b *Broker) readIDFromFile(home, filepath string) (id string, err error) {\n\t_filepath := fmt.Sprintf(\"%v%v%v\", home, string(os.PathSeparator), filepath)\n\t_bytes, err := ioutil.ReadFile(_filepath)\n\tif err != nil {\n\t\treturn\n\t}\n\tid = string(_bytes)\n\treturn\n}",
"func readMachineID() []byte {\n\tid := make([]byte, 3)\n\tif hostname, err := os.Hostname(); err == nil {\n\t\thw := md5.New()\n\t\thw.Write([]byte(hostname))\n\t\tcopy(id, hw.Sum(nil))\n\t} else {\n\t\t// Fallback to rand number if machine id can't be gathered\n\t\tif _, randErr := rand.Reader.Read(id); randErr != nil {\n\t\t\tpanic(fmt.Errorf(\"Cannot get hostname nor generate a random number: %v; %v\", err, randErr))\n\t\t}\n\t}\n\treturn id\n}",
"func readMachineID() []byte {\n\tid := make([]byte, 3)\n\thid, err := readPlatformMachineID()\n\tif err != nil || len(hid) == 0 {\n\t\thid, err = os.Hostname()\n\t}\n\tif err == nil && len(hid) != 0 {\n\t\thw := md5.New()\n\t\thw.Write([]byte(hid))\n\t\tcopy(id, hw.Sum(nil))\n\t} else {\n\t\t// Fallback to rand number if machine id can't be gathered\n\t\tif _, randErr := rand.Reader.Read(id); randErr != nil {\n\t\t\tpanic(fmt.Errorf(\"xid: cannot get hostname nor generate a random number: %v; %v\", err, randErr))\n\t\t}\n\t}\n\treturn id\n}",
"func WriteHostUUID(dataDir string, id string) error {\n\terr := os.WriteFile(filepath.Join(dataDir, HostUUIDFile), []byte(id), os.ModeExclusive|0400)\n\tif err != nil {\n\t\tif errors.Is(err, fs.ErrPermission) {\n\t\t\t//do not convert to system error as this loses the ability to compare that it is a permission error\n\t\t\treturn err\n\t\t}\n\t\treturn trace.ConvertSystemError(err)\n\t}\n\treturn nil\n}",
"func readMachineId() []byte {\n\tvar sum [3]byte\n\tid := sum[:]\n\thostname, err1 := os.Hostname()\n\tif err1 != nil {\n\t\tn := uint32(time.Now().UnixNano())\n\t\tsum[0] = byte(n >> 0)\n\t\tsum[1] = byte(n >> 8)\n\t\tsum[2] = byte(n >> 16)\n\t\treturn id\n\t}\n\thw := md5.New()\n\thw.Write([]byte(hostname))\n\tcopy(id, hw.Sum(nil))\n\treturn id\n}",
"func readInstanceID() string {\n\tconst instanceIDFile = \"/var/lib/cloud/data/instance-id\"\n\tidBytes, err := ioutil.ReadFile(instanceIDFile)\n\tif err != nil {\n\t\tglog.Infof(\"Failed to get instance id from file: %v\", err)\n\t\treturn \"\"\n\t} else {\n\t\tinstanceID := string(idBytes)\n\t\tinstanceID = strings.TrimSpace(instanceID)\n\t\tglog.Infof(\"Get instance id from file: %s\", instanceID)\n\t\treturn instanceID\n\t}\n}",
"func removeUuidFromFilepath(path string) string {\n\t// UUID has 4 hyphens, so we split into 6 parts. \n\treturn strings.SplitN(filepath.Base(path), \"-\", 6)[5]\n}",
"func UUID() (string, error) {\n\tb := make([]byte, 2)\n\n\t_, err := crand.Read(b)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn fmt.Sprintf(\"%X\", b[0:2]), nil\n}",
"func HostUUIDExistsLocally(dataDir string) bool {\n\t_, err := ReadHostUUID(dataDir)\n\treturn err == nil\n}",
"func (s *Store) readID() error {\n\tb, err := ioutil.ReadFile(s.IDPath())\n\tif os.IsNotExist(err) {\n\t\ts.id = 0\n\t\treturn nil\n\t} else if err != nil {\n\t\treturn fmt.Errorf(\"read file: %s\", err)\n\t}\n\n\tid, err := strconv.ParseUint(string(b), 10, 64)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"parse id: %s\", err)\n\t}\n\ts.id = id\n\n\ts.Logger.Printf(\"read local node id: %d\", s.id)\n\n\treturn nil\n}",
"func (this *Actions) generateUid(config *Config, hostname string) string{\n uuidNew := uuid.NewV5(uuid.NewV1(), hostname).String()\n uuid_path := config.GetValue(\"basic\", \"uuid_path\")\n log.Info(\"The new uuid is : \" + uuidNew)\n file, error := os.OpenFile(uuid_path, os.O_RDWR|os.O_CREATE, 0622)\n if error != nil {\n log.Error(\"Open uuid file in \"+ uuid_path +\" failed.\" + error.Error())\n }\n _,err := file.WriteString(uuidNew)\n if err != nil {\n log.Error(\"Save uuid file in \"+ uuid_path +\" failed.\" + err.Error())\n }\n file.Close()\n return uuidNew\n}",
"func GetHostUUID(nbmaster string, httpClient *http.Client, jwt string, host string) string {\r\n fmt.Printf(\"\\nGet the UUID of host %s...\\n\", host)\r\n\r\n uri := \"https://\" + nbmaster + \":\" + port + \"/netbackup/config/hosts\";\r\n\r\n request, _ := http.NewRequest(http.MethodGet, uri, nil)\r\n query := request.URL.Query()\r\n query.Add(\"filter\", \"hostName eq '\" + host + \"'\")\r\n request.URL.RawQuery = query.Encode()\r\n\r\n request.Header.Add(\"Authorization\", jwt);\r\n request.Header.Add(\"Accept\", contentTypeV3);\r\n\r\n response, err := httpClient.Do(request)\r\n\r\n hostUuid := \"\"\r\n if err != nil {\r\n fmt.Printf(\"The HTTP request failed with error: %s\\n\", err)\r\n panic(\"Unable to get the host UUID\")\r\n } else {\r\n if response.StatusCode == 200 {\r\n data, _ := ioutil.ReadAll(response.Body)\r\n var obj interface{}\r\n json.Unmarshal(data, &obj)\r\n response := obj.(map[string]interface{})\r\n hosts := response[\"hosts\"].([]interface{})\r\n hostUuid = ((hosts[0].(map[string]interface{}))[\"uuid\"]).(string)\r\n fmt.Printf(\"Host UUID: %s\\n\", hostUuid);\r\n } else {\r\n printErrorResponse(response)\r\n }\r\n }\r\n\r\n return hostUuid\r\n}",
"func getHostFromUUID(id string) (*model.Host, error) {\n\thosts, err := driver.GetHosts()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, host := range *hosts {\n\t\tif host.UUID == id {\n\t\t\t// Host Matches\n\t\t\tlog.Tracef(\"current host matches with id=%s\", id)\n\t\t\treturn host, nil\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"no host found with id %s\", id)\n}",
"func (c *Config) getRandomId() (string, error) {\n\tb, err := ioutil.ReadFile(c.ProcBootId)\n\tif err != nil {\n\t\tglog.Errorf(\"fail to open %s: %q\", c.ProcBootId, err)\n\t\treturn \"\", err\n\t}\n\trandomId := string(b)\n\trandomId = strings.Trim(randomId, \"\\n\")\n\tglog.V(2).Infof(\"RandomId: %q\", randomId)\n\treturn randomId, nil\n\n}",
"func UDID() string {\n\tf, err := os.Open(\"/dev/urandom\")\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to get /dev/urandom! %s\", err))\n\t}\n\tb := make([]byte, 16)\n\t_, err = f.Read(b)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to read 16 bytes from /dev/urandom! %s\", err))\n\t}\n\tf.Close()\n\treturn fmt.Sprintf(\"%x-%x-%x-%x-%x\", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])\n}",
"func uuid() []byte {\n\tuuid := make([]byte, 16)\n\t_, err := rand.Read(uuid)\n\tif err != nil {\n\t\tpanic(\"cue/hosted: uuid() failed to read random bytes\")\n\t}\n\n\t// The following bit twiddling is outlined in RFC 4122. In short, it\n\t// identifies the UUID as a v4 random UUID.\n\tuuid[6] = (4 << 4) | (0xf & uuid[6])\n\tuuid[8] = (8 << 4) | (0x3f & uuid[8])\n\treturn uuid\n}",
"func pid(instance int) (pid string, err error) {\n file, err := os.Open(pidFileName(instance))\n if err != nil {\n return\n }\n\n defer file.Close()\n\n scanner := bufio.NewScanner(file)\n scanner.Scan()\n pid = scanner.Text()\n return\n}",
"func HardwareUUID() (string, error) {\n\t/*\n\t\tSample output of 'wmic path Win32_ComputerSystemProduct get uuid'\n\n\t\tUUID\n\t\t4219B2F5-C25F-6AF2-573C-35B0DF557236\n\t*/\n\tresult, err := readAndParseFromCommandLine(hardwareUUIDCmd)\n\tif err != nil {\n\t\treturn \"-1\", err\n\t}\n\thardwareUUID := \"\"\n\tif len(result) > 1 {\n\t\t// remove all spaces from the second line as that line consists hardware uuid\n\t\tre := regexp.MustCompile(\"\\\\s|\\\\r\")\n\t\thardwareUUID = re.ReplaceAllString(result[1], \"\")\n\t}\n\treturn hardwareUUID, nil\n}",
"func (h *Harness) UUID(id string) string { return h.uuidG.Get(id) }",
"func parseUUID(src string) (dst [16]byte, err error) {\n\tswitch len(src) {\n\tcase 36:\n\t\tsrc = src[0:8] + src[9:13] + src[14:18] + src[19:23] + src[24:]\n\tcase 32:\n\t\t// dashes already stripped, assume valid\n\tdefault:\n\t\t// assume invalid.\n\t\treturn dst, fmt.Errorf(\"cannot parse UUID %v\", src)\n\t}\n\n\tbuf, err := hex.DecodeString(src)\n\tif err != nil {\n\t\treturn dst, err\n\t}\n\n\tcopy(dst[:], buf)\n\treturn dst, err\n}",
"func (device *DCV2Bricklet) ReadUID() (uid uint32, err error) {\n\tvar buf bytes.Buffer\n\n\tresultBytes, err := device.device.Get(uint8(FunctionReadUID), buf.Bytes())\n\tif err != nil {\n\t\treturn uid, err\n\t}\n\tif len(resultBytes) > 0 {\n\t\tvar header PacketHeader\n\n\t\theader.FillFromBytes(resultBytes)\n\n\t\tif header.Length != 12 {\n\t\t\treturn uid, fmt.Errorf(\"Received packet of unexpected size %d, instead of %d\", header.Length, 12)\n\t\t}\n\n\t\tif header.ErrorCode != 0 {\n\t\t\treturn uid, DeviceError(header.ErrorCode)\n\t\t}\n\n\t\tresultBuf := bytes.NewBuffer(resultBytes[8:])\n\t\tbinary.Read(resultBuf, binary.LittleEndian, &uid)\n\n\t}\n\n\treturn uid, nil\n}",
"func (dev VMVolumeDevice) UUID() string {\n\treturn utils.NewUUID5(blockVolumeNsUUID, dev.HostPath)\n}",
"func GetVendorIDByCPUInfo(path string) (string, error) {\n\tvendorID := \"unknown\"\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn vendorID, err\n\t}\n\tdefer f.Close()\n\n\ts := bufio.NewScanner(f)\n\tfor s.Scan() {\n\t\tif err := s.Err(); err != nil {\n\t\t\treturn vendorID, err\n\t\t}\n\n\t\tline := s.Text()\n\n\t\t// get \"vendor_id\" from first line\n\t\tif strings.Contains(line, \"vendor_id\") {\n\t\t\tattrs := strings.Split(line, \":\")\n\t\t\tif len(attrs) >= 2 {\n\t\t\t\tvendorID = strings.TrimSpace(attrs[1])\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn vendorID, nil\n}",
"func (device *IndustrialDigitalIn4V2Bricklet) ReadUID() (uid uint32, err error) {\n\tvar buf bytes.Buffer\n\n\tresultBytes, err := device.device.Get(uint8(FunctionReadUID), buf.Bytes())\n\tif err != nil {\n\t\treturn uid, err\n\t}\n\tif len(resultBytes) > 0 {\n\t\tvar header PacketHeader\n\n\t\theader.FillFromBytes(resultBytes)\n\n\t\tif header.Length != 12 {\n\t\t\treturn uid, fmt.Errorf(\"Received packet of unexpected size %d, instead of %d\", header.Length, 12)\n\t\t}\n\n\t\tif header.ErrorCode != 0 {\n\t\t\treturn uid, DeviceError(header.ErrorCode)\n\t\t}\n\n\t\tresultBuf := bytes.NewBuffer(resultBytes[8:])\n\t\tbinary.Read(resultBuf, binary.LittleEndian, &uid)\n\n\t}\n\n\treturn uid, nil\n}",
"func hostRead(d *schema.ResourceData, m interface{}, params zabbix.Params) error {\n\tapi := m.(*zabbix.API)\n\n\tlog.Debug(\"Lookup of host with params %#v\", params)\n\n\thosts, err := api.HostsGet(params)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(hosts) < 1 {\n\t\td.SetId(\"\")\n\t\treturn nil\n\t}\n\tif len(hosts) > 1 {\n\t\treturn errors.New(\"multiple hosts found\")\n\t}\n\thost := hosts[0]\n\n\tlog.Debug(\"Got host: %+v\", host)\n\n\td.SetId(host.HostID)\n\td.Set(\"name\", host.Name)\n\td.Set(\"host\", host.Host)\n\td.Set(\"proxyid\", host.ProxyID)\n\td.Set(\"enabled\", host.Status == 0)\n\td.Set(\"inventory_mode\", HINV_LOOKUP_REV[host.InventoryMode])\n\n\td.Set(\"interface\", flattenHostInterfaces(host, d, m))\n\td.Set(\"templates\", flattenTemplateIds(host.ParentTemplateIDs))\n\td.Set(\"inventory\", flattenInventory(host))\n\td.Set(\"groups\", flattenHostGroupIds(host.GroupIds))\n\td.Set(\"macro\", flattenMacros(host.UserMacros))\n\td.Set(\"tag\", flattenTags(host.Tags))\n\n\treturn nil\n}",
"func GetClientID() (string, error) {\n\tfn := \"clientid\" // File Name\n\tif _, err := os.Stat(fn); os.IsNotExist(err) {\n\t\t// File does not exists, create a new uuid\n\t\tuuid := uuid.NewV4()\n\t\tuuidStr := uuid.String()\n\t\tlog.Println(\"Created new Client ID.\", uuidStr)\n\t\terr = ioutil.WriteFile(fn, []byte(uuidStr), 0666)\n\t\tif err != nil {\n\t\t\treturn uuidStr, err\n\t\t}\n\t\treturn uuidStr, nil\n\t}\n\t// Read the uuid from the file\n\tdata, err := ioutil.ReadFile(fn)\n\tif err != nil {\n\t\tlog.Println(\"Failed to read the Client ID file. Attempting to recreate it.\", err)\n\t\tuuid := uuid.NewV4()\n\t\tuuidStr := uuid.String()\n\t\tlog.Println(\"Created new Client ID.\", uuidStr)\n\t\terr = ioutil.WriteFile(fn, []byte(uuidStr), 0666)\n\t\tif err != nil {\n\t\t\treturn uuidStr, err\n\t\t}\n\t\treturn uuidStr, nil\n\t}\n\treturn string(data), nil\n}",
"func extractUuid(input string) string {\n\treGetID := regexp.MustCompile(`([a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})`)\n\tmatchListId := reGetID.FindAllStringSubmatch(input, -1)\n\tif len(matchListId) > 0 && len(matchListId[0]) > 0 {\n\t\treturn matchListId[len(matchListId)-1][1]\n\t}\n\treturn \"\"\n}",
"func loadHostString() (string, error) {\n\tif hostFile, err := getHostFile(); err == nil {\n\t\tbytes, err := ioutil.ReadFile(hostFile)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn string(bytes), nil\n\t} else {\n\t\treturn \"\", err\n\t}\n\n}",
"func ReadPidFile(name string) (pid int, err error) {\n\tvar file *os.File\n\tif file, err = os.OpenFile(name, os.O_RDONLY, 0640); err != nil {\n\t\treturn\n\t}\n\tdefer file.Close()\n\n\tlock := &LockFile{file}\n\tpid, err = lock.ReadPid()\n\treturn\n}",
"func getUUID() string{\n\tresponse,_ := http.Get(BaseUrl+\"/_uuids\")\n\tdefer response.Body.Close()\n\tdecoder := json.NewDecoder(response.Body)\n\terr := decoder.Decode(&uniqueid)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn uniqueid.Uuids[0]\n}",
"func (o *Partition) GetUUID(ctx context.Context) (uUID string, err error) {\n\terr = o.object.CallWithContext(ctx, \"org.freedesktop.DBus.Properties.Get\", 0, InterfacePartition, \"UUID\").Store(&uUID)\n\treturn\n}",
"func PartitionUUIDs(r io.Reader) []string {\n\tparts, err := readPartitionEntries(r)\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn []string{\n\t\tGUIDFromBytes(parts[0].GUID[:]),\n\t}\n}",
"func (c *Config) ReadNodeID() (string, error) {\n\tdata, err := ioutil.ReadFile(filepath.Join(c.Chdir, \"node.id\"))\n\tif err != nil || len(data) == 0 {\n\t\treturn \"\", errNodeIDEmpty\n\t}\n\n\t// Trim all leading and trailing whitespace\n\tnodeIDStr := strings.TrimSpace(string(data))\n\tif len(nodeIDStr) == 0 {\n\t\treturn \"\", errNodeIDEmpty\n\t}\n\n\t// Make sure that there is no whitespace inside of the nodeid\n\tfields := strings.Fields(nodeIDStr)\n\tif len(fields) > 1 {\n\t\treturn \"\", errNodeIDMalformed\n\t}\n\n\treturn nodeIDStr, nil\n}",
"func readFile(file *os.File, offset int) uint32 {\n\tbytes := make([]byte, UINT32_LENGTH)\n\n\tfile.Seek(int64(offset), 0)\n\n\tdata, err := file.Read(bytes)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn binary.LittleEndian.Uint32(bytes[:data])\n}",
"func CreateUuidForMonitorData(md MonitorData) string {\n serial := SerializeMonitorData(md)\n h := sha256.New()\n h.Write(serial)\n return fmt.Sprintf(\"%x\", h.Sum(nil))\n}",
"func uuid() string {\n\tout, err := exec.Command(\"/usr/bin/uuidgen\").Output()\n\tif err != nil {\n\t\tlog.Fatal().\n\t\t\tStr(\"command\", \"/usr/bin/uuidgen\").\n\t\t\tMsg(\"There was an error generating the uuid.\")\n\t}\n\n\t//n := bytes.IndexByte(out, 0)\n\ts := string(out)\n\ts = strings.TrimSpace(s)\n\treturn s\n}",
"func idOfFile(filename string) string {\n\treturn fmt.Sprintf(`{\"$oid\":\"%s\"}`, testFiles[filename].Hex())\n}",
"func (o *MDRaid) GetUUID(ctx context.Context) (uUID string, err error) {\n\terr = o.object.CallWithContext(ctx, \"org.freedesktop.DBus.Properties.Get\", 0, InterfaceMDRaid, \"UUID\").Store(&uUID)\n\treturn\n}",
"func (gpu *Device) UUID() (string, error) {\n\treturn gpu.textProperty(\"UUID\")\n}",
"func GUIDFromBytes(b []byte) string {\n\t// See Intel EFI specification, Appendix A: GUID and Time Formats\n\t// https://www.intel.de/content/dam/doc/product-specification/efi-v1-10-specification.pdf\n\tvar (\n\t\ttimeLow uint32\n\t\ttimeMid uint16\n\t\ttimeHighAndVersion uint16\n\t\tclockSeqHighAndReserved uint8\n\t\tclockSeqLow uint8\n\t\tnode [6]byte\n\t)\n\ttimeLow = binary.LittleEndian.Uint32(b[0:4])\n\ttimeMid = binary.LittleEndian.Uint16(b[4:6])\n\ttimeHighAndVersion = binary.LittleEndian.Uint16(b[6:8])\n\tclockSeqHighAndReserved = b[8]\n\tclockSeqLow = b[9]\n\tcopy(node[:], b[10:])\n\treturn fmt.Sprintf(\"%08X-%04X-%04X-%02X%02X-%012X\",\n\t\ttimeLow,\n\t\ttimeMid,\n\t\ttimeHighAndVersion,\n\t\tclockSeqHighAndReserved,\n\t\tclockSeqLow,\n\t\tnode)\n}",
"func getDiskUUID() string {\n\treturn vboxmanage.GetVMInfoByRegexp(boxName, \"\\\"SATA Controller-ImageUUID-0-0\\\"=\\\"(.*?)\\\"\")\n}",
"func resourceHostRead(d *schema.ResourceData, m interface{}) error {\n\tlog.Debug(\"Lookup of hostgroup with id %s\", d.Id())\n\n\treturn hostRead(d, m, zabbix.Params{\n\t\t\"selectInterfaces\": \"extend\",\n\t\t\"selectParentTemplates\": \"extend\",\n\t\t\"selectGroups\": \"extend\",\n\t\t\"selectMacros\": \"extend\",\n\t\t\"selectTags\": \"extend\",\n\t\t\"selectInventory\": \"extend\",\n\t\t\"hostids\": d.Id(),\n\t})\n}",
"func (o *NetworkLicenseFile) GetHostId() string {\n\tif o == nil || o.HostId == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.HostId\n}",
"func libc_getuid() int32",
"func GetHostID() string {\n\tif cachedHostID != \"\" {\n\t\treturn cachedHostID\n\t}\n\n\tecsMetadataURI := os.Getenv(\"ECS_CONTAINER_METADATA_URI_V4\")\n\tif ecsMetadataURI != \"\" {\n\t\thostID, err := getHostIDFromECS(ecsMetadataURI + \"/task\")\n\t\tif err == nil {\n\t\t\tcachedHostID = hostID\n\t\t\treturn cachedHostID\n\t\t}\n\n\t\tfmt.Fprintf(os.Stderr, \"Failed to get task ARN from ECS metadata v4 endpoint: %v\\n\", err)\n\t}\n\n\tecsMetadataURI = os.Getenv(\"ECS_CONTAINER_METADATA_URI\")\n\tif ecsMetadataURI != \"\" {\n\t\thostID, err := getHostIDFromECS(ecsMetadataURI + \"/task\")\n\t\tif err == nil {\n\t\t\tcachedHostID = hostID\n\t\t\treturn cachedHostID\n\t\t}\n\n\t\tfmt.Fprintf(os.Stderr, \"Failed to get task ARN from ECS metadata v3 endpoint: %v\\n\", err)\n\t}\n\n\thostID, errECS := getHostIDFromECS(\"http://169.254.170.2/v2/metadata\")\n\tif errECS == nil {\n\t\tcachedHostID = hostID\n\t\treturn cachedHostID\n\t}\n\n\thostID, errEC2 := getHostIDFromEC2()\n\tif errEC2 == nil {\n\t\tcachedHostID = hostID\n\t\treturn cachedHostID\n\t}\n\n\thostID, errIF := getHostIDFromInterfaces()\n\tif errIF == nil {\n\t\tcachedHostID = hostID\n\t\treturn cachedHostID\n\t}\n\n\thostID, errRand := getRandomHostID()\n\tif errRand == nil {\n\t\tcachedHostID = hostID\n\t\treturn cachedHostID\n\t}\n\n\tfmt.Fprintf(os.Stderr, \"Failed to get task ARN from ECS metadata v2 endpoint: %v\\n\", errECS)\n\tfmt.Fprintf(os.Stderr, \"Failed to get instance ID from EC2 metadata endpoint: %v\\n\", errEC2)\n\tfmt.Fprintf(os.Stderr, \"Failed to get IP address from network interface: %v\\n\", errIF)\n\tfmt.Fprintf(os.Stderr, \"Failed to get random host ID: %v\\n\", errRand)\n\tpanic(\"Unable to obtain a valid host ID\")\n}",
"func Getuid() int",
"func getHostId() (uint64, error) {\n\ta := getLocalIP()\n\tip := (uint64(a[0]) << 24) + (uint64(a[1]) << 16) + (uint64(a[2]) << 8) + uint64(a[3])\n\treturn ip % MaxHostId, nil\n}",
"func dataHostRead(d *schema.ResourceData, m interface{}) error {\n\tparams := zabbix.Params{\n\t\t\"selectInterfaces\": \"extend\",\n\t\t\"selectParentTemplates\": \"extend\",\n\t\t\"selectGroups\": \"extend\",\n\t\t\"selectMacros\": \"extend\",\n\t\t\"selectTags\": \"extend\",\n\t\t\"selectInventory\": \"extend\",\n\t\t\"filter\": map[string]interface{}{},\n\t}\n\n\tlookups := []string{\"host\", \"hostid\", \"name\"}\n\tfor _, k := range lookups {\n\t\tif v, ok := d.GetOk(k); ok {\n\t\t\tparams[\"filter\"].(map[string]interface{})[k] = v\n\t\t}\n\t}\n\n\tif len(params[\"filter\"].(map[string]interface{})) < 1 {\n\t\treturn errors.New(\"no host lookup attribute\")\n\t}\n\tlog.Debug(\"performing data lookup with params: %#v\", params)\n\n\treturn hostRead(d, m, params)\n}",
"func (conf *Configuration) UUID(name string) (string, error) {\n\tctx := context.NewContext(conf.Timeout)\n\tdefer ctx.Cancel()\n\n\treturn conf.UUIDWithContext(ctx, name)\n}",
"func (pe *ProgramExt) UUID() string {\n\treturn fmt.Sprintf(\"%s_%s\", pe.Manager, pe.Config)\n}",
"func getNodeUUID(client clientset.Interface, nodeName string) string {\n\tnode, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})\n\tgomega.Expect(err).NotTo(gomega.HaveOccurred())\n\treturn strings.TrimPrefix(node.Spec.ProviderID, providerPrefix)\n}",
"func readUserFriendlyFilePath(path string) ([]byte, error) {\n\tpath, err := homedir.Expand(path)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to retrieve key path: %v\", err)\n\t}\n\treturn ioutil.ReadFile(path)\n}",
"func readIdentifier(path string, fileSuffix string, idtype string) (string, error) {\n\tidentifiers, err := readIdentifiers(path)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error reading identifier: %v\", err)\n\t}\n\n\tfor _, item := range identifiers {\n\t\tif !strings.HasSuffix(strings.ToLower(item.File), strings.ToLower(fileSuffix)) {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, pair := range item.Identifiers {\n\t\t\tif pair.Type == idtype {\n\t\t\t\treturn pair.Value, nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn \"\", errors.New(\"error reading identifier: not found\")\n}",
"func (mariadbFlavor) serverUUID(c *Conn) (string, error) {\n\treturn \"\", nil\n}",
"func (e *EPub) UUID() string {\n\treturn strings.TrimPrefix(\"urn:uuid:\", e.uuid)\n}",
"func getUID(lib utils.PathIdentifier) string {\n\treturn lib.Key()[:5]\n}",
"func chatUUIDstring(idstr string) (idbytes []byte, err error) {\n\tif idstr == \"\" {\n\t\tidbytes, err = db.DB.Read(db.CHAT, db.LastCB)\n\t} else {\n\t\tvar id uuid.UUID\n\t\tid, err = uuid.FromString(idstr)\n\t\tidbytes = id.Bytes()\n\t\treturn\n\t}\n\treturn\n}",
"func parseFormatted(b []byte) (UUID, error) {\n\tvar u UUID\n\tvar iu, ib int\n\tfor idx, cnt := range uuidHexLengths {\n\t\tn, err := hex.Decode(u[iu:], b[ib:ib+cnt])\n\t\tif err != nil {\n\t\t\treturn u, ErrInvalidUUID\n\t\t}\n\t\tif idx < 4 && b[ib+cnt] != dash {\n\t\t\treturn u, ErrInvalidUUID\n\t\t}\n\t\tiu += n\n\t\tib += cnt + 1\n\t}\n\treturn u, nil\n}",
"func ExtractUUID(r *http.Request) (uuid.UUID, error) {\n\ts := r.Header.Get(\"Authorization\")\n\tif s == \"\" {\n\t\tpretty.Printf(\"fatal error: Authorization Header empty \\n\")\n\t\treturn uuid.Nil, errors.New(\"Authorization Header empty\")\n\t}\n\n\tinUUID, err := uuid.FromString(s)\n\tif err != nil {\n\t\tpretty.Printf(\"fatal error: %s \\n\", err)\n\t\treturn uuid.Nil, err\n\t}\n\n\tif _, err := datastructures.GetEntry(inUUID); err != nil {\n\t\tpretty.Printf(\"fatal error: %s \\n\", err)\n\t\treturn uuid.Nil, errors.New(\"sorry, UUID is not correct, please access /welcome to receive an UUID\")\n\t}\n\n\treturn inUUID, nil\n}",
"func (c parser) GetHexDeviceID(command []byte) (string, error) {\n\treturn extractByteRangeAndReturnHex(command, 5, 25, \"Identificador do device não encontrado\")\n}",
"func (ps *PS) UUID() uint64 {\n\tif ps.uuid != 0 {\n\t\treturn ps.uuid\n\t}\n\t// assume the uuid is derived from boot ID and process start time\n\tps.uuid = (bootid.Read() << 30) + uint64(ps.PID) | uint64(ps.StartTime.UnixNano())\n\tmaj, _, patch := windows.RtlGetNtVersionNumbers()\n\tif maj >= 10 && patch >= 1507 {\n\t\tseqNum := querySequenceNumber(ps.PID)\n\t\t// prefer the most robust variant of the uuid which uses the\n\t\t// process sequence number obtained from the process object\n\t\tif seqNum != 0 {\n\t\t\tps.uuid = (bootid.Read() << 30) | seqNum\n\t\t}\n\t}\n\treturn ps.uuid\n}",
"func ParseUUID(s string) (u UUID, err error) {\n\tif len(s) != UUIDStringLen {\n\t\terr = errors.New(\"invalid UUID string length\")\n\t\treturn\n\t}\n\n\tif s[sDelim0At] != uuidDelim || s[sDelim1At] != uuidDelim || s[sDelim2At] != uuidDelim || s[sDelim3At] != uuidDelim {\n\t\terr = errors.New(\"invalid UUID string delimiters\")\n\t\treturn\n\t}\n\n\tb := []byte(s)\n\n\tif l, e := hex.Decode(u[part0From:part1From], b[sPart0From:sPart0To]); l != part0Len || e != nil {\n\t\terr = errors.New(\"invalid UUID part 1\")\n\t\treturn\n\t}\n\n\tif l, e := hex.Decode(u[part1From:part2From], b[sPart1From:sPart1To]); l != part1Len || e != nil {\n\t\terr = errors.New(\"invalid UUID part 2\")\n\t\treturn\n\t}\n\n\tif l, e := hex.Decode(u[part2From:part3From], b[sPart2From:sPart2To]); l != part2Len || e != nil {\n\t\terr = errors.New(\"invalid UUID part 3\")\n\t\treturn\n\t}\n\n\tif l, e := hex.Decode(u[part3From:part4From], b[sPart3From:sPart3To]); l != part3Len || e != nil {\n\t\terr = errors.New(\"invalid UUID part 4\")\n\t\treturn\n\t}\n\n\tif l, e := hex.Decode(u[part4From:], b[sPart4From:sPart4To]); l != part4Len || e != nil {\n\t\terr = errors.New(\"invalid UUID part 5\")\n\t\treturn\n\t}\n\treturn\n}",
"func generateUUID() string {\n\tbuf := make([]byte, 16)\n\tif _, err := cr.Read(buf); err != nil {\n\t\tpanic(fmt.Errorf(\"failed to read random bytes: %w\", err))\n\t}\n\n\treturn fmt.Sprintf(\"%08x-%04x-%04x-%04x-%12x\",\n\t\tbuf[0:4],\n\t\tbuf[4:6],\n\t\tbuf[6:8],\n\t\tbuf[8:10],\n\t\tbuf[10:16])\n}",
"func generateUUID(bd blockdevice.BlockDevice) (string, bool) {\n\tvar ok bool\n\tvar uuidField, uuid string\n\n\t// select the field which is to be used for generating UUID\n\t//\n\t// Serial number is not used directly for UUID generation. This is because serial number is not\n\t// unique in some cloud environments. For example, in GCP the serial number is\n\t// configurable by the --device-name flag while attaching the disk.\n\t// If this flag is not provided, GCP automatically assigns the serial number\n\t// which is unique only to the node. Therefore Serial number is used only in cases\n\t// where the disk has a WWN.\n\t//\n\t// If disk has WWN, a combination of WWN+Serial will be used. This is done because there are cases\n\t// where the disks has same WWN but different serial. It is seen in some storage arrays.\n\t// All the LUNs will have same WWN, but different serial.\n\t//\n\t// PartitionTableUUID is not used for UUID generation in NDM. The only case where the disk has a PartitionTable\n\t// and not partition is when, the user has manually created a partition table without writing any actual partitions.\n\t// This means NDM will have to give its consumers the entire disk, i.e consumers will have access to the sectors\n\t// where partition table is written. If consumers decide to reformat or erase the disk completely the partition\n\t// table UUID is also lost, making NDM unable to identify the disk. Hence, even if a partition table is present\n\t// NDM will rewrite it and create a new GPT table and a single partition. Thus consumers will have access only to\n\t// the partition and the unique data will be stored in sectors where consumers do not have access.\n\n\tswitch {\n\tcase bd.DeviceAttributes.DeviceType == blockdevice.BlockDeviceTypePartition:\n\t\t// The partition entry UUID is used when a partition (/dev/sda1) is processed. The partition UUID should be used\n\t\t// if available, other than the partition table UUID, because multiple partitions can have the same partition table\n\t\t// UUID, but each partition will have a different UUID.\n\t\tklog.Infof(\"device(%s) is a partition, using partition UUID: %s\", bd.DevPath, bd.PartitionInfo.PartitionEntryUUID)\n\t\tuuidField = bd.PartitionInfo.PartitionEntryUUID\n\t\tok = true\n\tcase len(bd.DeviceAttributes.WWN) > 0:\n\t\t// if device has WWN, both WWN and Serial will be used for UUID generation.\n\t\tklog.Infof(\"device(%s) has a WWN, using WWN: %s and Serial: %s\",\n\t\t\tbd.DevPath,\n\t\t\tbd.DeviceAttributes.WWN, bd.DeviceAttributes.Serial)\n\t\tuuidField = bd.DeviceAttributes.WWN +\n\t\t\tbd.DeviceAttributes.Serial\n\t\tok = true\n\tcase len(bd.FSInfo.FileSystemUUID) > 0:\n\t\tklog.Infof(\"device(%s) has a filesystem, using filesystem UUID: %s\", bd.DevPath, bd.FSInfo.FileSystemUUID)\n\t\tuuidField = bd.FSInfo.FileSystemUUID\n\t\tok = true\n\t}\n\n\tif ok {\n\t\tuuid = blockdevice.BlockDevicePrefix + util.Hash(uuidField)\n\t\tklog.Infof(\"generated uuid: %s for device: %s\", uuid, bd.DevPath)\n\t}\n\n\treturn uuid, ok\n}",
"func (hof *Heap) uuid() http.Arrow {\n\treturn http.GET(\n\t\tø.URI(\"https://httpbin.org/uuid\"),\n\t\tø.Accept.JSON,\n\n\t\tƒ.Status.OK,\n\t\tƒ.ContentType.JSON,\n\t\tƒ.Body(&hof.ID),\n\t)\n}",
"func readRandomUint32() uint32 {\n\t// We've found systems hanging in this function due to lack of entropy.\n\t// The randomness of these bytes is just preventing nearby clashes, so\n\t// just look at the time.\n\treturn uint32(time.Now().UnixNano())\n}",
"func UCDReader(file string) (io.Reader, error) {\n\tdata, err := os.ReadFile(UCDPath(file))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn bytes.NewReader(data), nil\n}",
"func readPinFile(pinFile string, mac hash.Hash) (*treeHead, error) {\n\tdata, err := ioutil.ReadFile(pinFile)\n\tif os.IsNotExist(err) {\n\t\tlog.Println(\"integrity: local pin file not found, will accept whatever remote storage returns\")\n\t\treturn &treeHead{}, nil\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\treturn unmarshalTreeHead(data, mac)\n}",
"func Parse(b []byte) (UUID, error) {\n\tswitch len(b) {\n\tcase 16:\n\t\tvar u UUID\n\t\tcopy(u[:], b)\n\t\treturn u, nil\n\tcase 32:\n\t\tvar u UUID\n\t\t_, err := hex.Decode(u[:], b)\n\t\tif err != nil {\n\t\t\treturn u, ErrInvalidUUID\n\t\t}\n\t\treturn u, nil\n\tcase 36:\n\t\treturn parseFormatted(b)\n\tdefault:\n\t\treturn UUID{}, ErrInvalidUUID\n\t}\n}",
"func readOuiFile(ouiPath string) (radix.Readonly, error) {\n\tfh, err := os.Open(ouiPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer fh.Close()\n\tgzreader, err := gzip.NewReader(fh)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer gzreader.Close()\n\n\ttree := radix.New()\n\n\tcommentre := regexp.MustCompile(\"\\\\s*#.*\")\n\tre := regexp.MustCompile(\"^([0-9a-fA-F:/-]+)\\\\s+(\\\\S+)\\\\s+(.*?)\\\\s*$\")\n\tprefixmatch := regexp.MustCompile(\"^([0-9a-fA-F:-]+)/(\\\\d+)$\")\n\tstripre := regexp.MustCompile(\"[:-]\")\n\n\tscanner := bufio.NewScanner(gzreader)\n\tfor scanner.Scan() {\n\t\tline := commentre.ReplaceAllString(scanner.Text(), \"\")\n\t\tsubs := re.FindStringSubmatch(line)\n\t\tif subs == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\toui := &OUIDescr{\n\t\t\tPrefix: subs[1],\n\t\t\tVendor: subs[2],\n\t\t\tComments: subs[3],\n\t\t}\n\n\t\tprefix := oui.Prefix\n\t\tplen := 24\n\n\t\tpmatch := prefixmatch.FindStringSubmatch(prefix)\n\t\tif pmatch != nil {\n\t\t\tprefix = pmatch[1]\n\t\t\tif len, err := strconv.Atoi(pmatch[2]); err == nil {\n\t\t\t\tplen = len\n\t\t\t} else {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\tprefix = strings.ToLower(stripre.ReplaceAllString(prefix, \"\"))\n\t\tif plen%4 != 0 {\n\t\t\treturn nil, fmt.Errorf(\"Prefix length not multiple of 4\")\n\t\t}\n\n\t\tprefix = prefix[:plen/4]\n\n\t\ttree.Add(prefix, oui)\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn tree.Readonly(), nil\n}",
"func ReadOrInitSessionId(bD *BaseData) (string, error) {\n _, ok := sessions[bD.SessionId]; if !ok {\n bytes := make([]byte, 16)\n if _, err := rand.Read(bytes); err != nil {\n return \"\", err\n }\n sessionId := hex.EncodeToString(bytes)\n sessions[sessionId] = &Data{SessionId: sessionId, CopyAndPaste: make(map[string]bool)}\n return sessionId, nil\n }\n return bD.SessionId, nil\n}",
"func (store Storage) UUID() string {\n\treturn \"\"\n}",
"func ReadHostsFile() ([]byte, error) {\n\tbs, err := ioutil.ReadFile(HostsPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bs, nil\n}",
"func (d *Descriptor) UUID() UUID {\n\treturn d.uuid\n}",
"func (sys *Sys) GetUUID() []byte {\n\tmyUUID := uuid.NewV1()\n\tlog.Debugln(\"UUID Generated:\", myUUID.String())\n\treturn myUUID.Bytes()\n}",
"func Parse(value string) (UUID, error) {\n\tvar uuid UUID\n\tif len(value) != 36 && len(value) != 38 {\n\t\treturn uuid, fmt.Errorf(\"string is not the correct length\")\n\t}\n\n\tif len(value) == 38 {\n\t\tif value[0] != '{' && value[37] != '}' {\n\t\t\treturn uuid, fmt.Errorf(\"invalid UUID string format\")\n\t\t}\n\t\tvalue = value[1:37]\n\t}\n\tif value[8] != '-' ||\n\t\tvalue[13] != '-' ||\n\t\tvalue[18] != '-' ||\n\t\tvalue[23] != '-' {\n\t\treturn uuid, fmt.Errorf(\"invalid UUID string format\")\n\t}\n\n\tif _, err := hex.Decode(uuid[0:], []byte(value[0:8])); err != nil {\n\t\treturn uuid, fmt.Errorf(\"invalid UUID : %v\", err)\n\t}\n\tif _, err := hex.Decode(uuid[4:], []byte(value[9:13])); err != nil {\n\t\treturn uuid, fmt.Errorf(\"invalid UUID : %v\", err)\n\t}\n\tif _, err := hex.Decode(uuid[6:], []byte(value[14:18])); err != nil {\n\t\treturn uuid, fmt.Errorf(\"invalid UUID : %v\", err)\n\t}\n\tif _, err := hex.Decode(uuid[8:], []byte(value[19:23])); err != nil {\n\t\treturn uuid, fmt.Errorf(\"invalid UUID : %v\", err)\n\t}\n\tif _, err := hex.Decode(uuid[10:], []byte(value[24:36])); err != nil {\n\t\treturn uuid, fmt.Errorf(\"invalid UUID : %v\", err)\n\t}\n\n\treturn uuid, nil\n}",
"func (o FioSpecVolumeVolumeSourceFlockerOutput) DatasetUUID() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceFlocker) *string { return v.DatasetUUID }).(pulumi.StringPtrOutput)\n}",
"func readIdentifiers(path string) (TransferIdentifiers, error) {\n\tidentifiers := TransferIdentifiers([]TransferIdentifier{})\n\n\tblob, err := os.ReadFile(filepath.Join(path, \"metadata\", \"identifiers.json\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := json.Unmarshal(blob, &identifiers); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn identifiers, nil\n}",
"func GetFileID(path string) (string, error) {\n\n\tdat, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn \"\", errors.New(\"Reading File: \" + error.Error(err))\n\t}\n\n\treturn string(dat), nil\n}",
"func blake2HashFromFileUUID(fileUUID []string) ([64]byte, error) {\n\tvar buffer bytes.Buffer\n\tencoder := gob.NewEncoder(&buffer)\n\n\terr := encoder.Encode(fileUUID)\n\tif err != nil {\n\t\treturn [64]byte{}, err\n\t}\n\treturn blake2b.Sum512(buffer.Bytes()), nil\n}",
"func (g *Gateway) parseUUID(c *gin.Context, param string) (parsed string) {\n\tid, err := uuid.Parse(param)\n\tif err != nil {\n\t\tg.render404(c)\n\t\treturn\n\t}\n\treturn id.String()\n}",
"func (j *juicefs) GetJfsVolUUID(ctx context.Context, name string) (string, error) {\n\tcmdCtx, cmdCancel := context.WithTimeout(ctx, 8*defaultCheckTimeout)\n\tdefer cmdCancel()\n\tstdout, err := j.Exec.CommandContext(cmdCtx, config.CeCliPath, \"status\", name).CombinedOutput()\n\tif err != nil {\n\t\tre := string(stdout)\n\t\tif strings.Contains(re, \"database is not formatted\") {\n\t\t\tklog.V(6).Infof(\"juicefs %s not formatted.\", name)\n\t\t\treturn \"\", nil\n\t\t}\n\t\tklog.Infof(\"juicefs status error: %v, output: '%s'\", err, re)\n\t\tif cmdCtx.Err() == context.DeadlineExceeded {\n\t\t\tre = fmt.Sprintf(\"juicefs status %s timed out\", 8*defaultCheckTimeout)\n\t\t\treturn \"\", errors.New(re)\n\t\t}\n\t\treturn \"\", errors.Wrap(err, re)\n\t}\n\n\tmatchExp := regexp.MustCompile(`\"UUID\": \"(.*)\"`)\n\tidStr := matchExp.FindString(string(stdout))\n\tidStrs := strings.Split(idStr, \"\\\"\")\n\tif len(idStrs) < 4 {\n\t\treturn \"\", fmt.Errorf(\"get uuid of %s error\", name)\n\t}\n\n\treturn idStrs[3], nil\n}",
"func readUsername() string {\n\tusername := \"bob\"\n\n\tdata, err := ioutil.ReadFile(usernameFile)\n\tif err != nil {\n\t\treturn username\n\t}\n\n\tvar userData struct {\n\t\tUsername string `yaml:\"username\"`\n\t}\n\terr = yaml.Unmarshal(data, &userData)\n\tif err != nil {\n\t\treturn username\n\t}\n\treturn userData.Username\n}",
"func loadIdentity(userName, identity string) ([]byte, error) {\n\tif filepath.Dir(identity) == \".\" {\n\t\tu, err := user.Current()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tidentity = filepath.Join(u.HomeDir, \".ssh\", identity)\n\t}\n\n\treturn ioutil.ReadFile(identity)\n}",
"func DecodeHash(hash string) string {\n\tbyteArray := []byte(hash)\n\n\tfor i := 0; i < 19; i++ {\n\t\tif (string(byteArray[i*2]) == \"0\") && (string(byteArray[(i*2)+1]) == \"3\") {\n\t\t\tfileName, _ := hex.DecodeString(string(byteArray[:(i)*2]))\n\t\t\treturn string(fileName)\n\t\t}\n\t}\n\treturn \"Error when decoding dataID\"\n}",
"func (*UserUUID) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{2}\n}",
"func Readfile(conf *goini.Config) {\n\tfilename := conf.GetValue(\"hqmodule\", \"sh_filename\")\n\tinterval, _ := strconv.Atoi(conf.GetValue(\"hqmodule\", \"sh_readfileinterval\"))\n\tquotes := conf.GetStr(helper.ConfigHQSessionName, \"sh\")\n\tquotemap := make(map[string]bool, 3)\n\tfor _, q := range strings.Split(quotes, \"|\") {\n\t\tquotemap[q] = true\n\t}\n\t_, md001ok := quotemap[\"md001\"]\n\t_, md002ok := quotemap[\"md002\"]\n\t_, md004ok := quotemap[\"md004\"]\n\n\tvar fd []byte\n\tvar l int\n\ti := 0\n\tpauseinter := time.Duration(interval) * time.Millisecond\n\tfor {\n\t\tfd, _ = ioutil.ReadFile(filename)\n\t\tl = len(fd) - 11\n\t\tfor i = 0; i < l; i++ {\n\t\t\tif fd[i] == 0x0A {\n\t\t\t\tif fd[i+5] == 0x33 {\n\t\t\t\t\ti += 399\n\t\t\t\t\tcontinue\n\t\t\t\t} else if fd[i+5] == 0x31 {\n\t\t\t\t\tif md001ok {\n\t\t\t\t\t\tif updateHs(&fd, &i, 150) {\n\t\t\t\t\t\t\trbmd001map.Put(fd[i+7 : i+150])\n\t\t\t\t\t\t\ti += 149\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else if fd[i+5] == 0x32 {\n\t\t\t\t\tif md002ok {\n\t\t\t\t\t\tif updateHs(&fd, &i, 400) {\n\t\t\t\t\t\t\trbmd002map.Put(fd[i+7 : i+400])\n\t\t\t\t\t\t\tif string(fd[i+7:i+7+6]) == \"600000\" {\n\t\t\t\t\t\t\t\tlog.Info(\"ReadFile: %d\", time.Now().UnixNano()/1e6)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\ti += 399\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else if fd[i+5] == 0x34 {\n\t\t\t\t\tif md004ok {\n\t\t\t\t\t\tif updateHs(&fd, &i, 424) {\n\t\t\t\t\t\t\trbmd004map.Put(fd[i+7 : i+424])\n\t\t\t\t\t\t\ti += 423\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(pauseinter)\n\t}\n}",
"func (c *Client) ReadHost(host string) (*Host, error) {\n\tvar hostdetail StructGetHostResult\n\ts := \"request={\\\"hostname\\\": \\\"\" + host + \"\\\"}\"\n\tbody := strings.NewReader(s)\n\trespBody, respErr := c.NewAPIRequest(\"POST\", \"get_host\", body)\n\tif respErr != nil {\n\t\tfmt.Printf(\"API Request for get_host failed. Error: %s\\n\", respErr)\n\t\treturn nil, respErr\n\t}\n\trespUnmarshalErr := json.Unmarshal(respBody, &hostdetail)\n\tif respUnmarshalErr != nil {\n\t\tfmt.Printf(\"Error Decoding the API response. Error: %s\\n\", respUnmarshalErr)\n\t\treturn nil, respUnmarshalErr\n\t}\n\thostname := hostdetail.Result.Hostname\n\tfolder := hostdetail.Result.Path\n\talias := hostdetail.Result.Attributes.Alias\n\ttagAgent := hostdetail.Result.Attributes.TagAgent\n\ttagCriticality := hostdetail.Result.Attributes.TagCriticality\n\tipaddress := hostdetail.Result.Attributes.Ipaddress\n\thoststruct := &Host{Attributes{alias, tagAgent, tagCriticality, ipaddress}, hostname, folder}\n\treturn hoststruct, nil\n}",
"func (*FileUUID) Descriptor() ([]byte, []int) {\n\treturn file_github_com_Ultimate_Super_WebDev_Corp_gateway_services_file_file_proto_rawDescGZIP(), []int{0}\n}",
"func GetUID() string {\n\twd, err := os.Getwd()\n\n\tvar data map[string]interface{}\n\n\tbuff, err := ioutil.ReadFile(wd + \"/package.json\")\n\n\tcheck(err)\n\n\tif err := json.Unmarshal(buff, &data); err != nil {\n\t\tpanic(err)\n\t}\n\n\tuser, err := GetStoredUser()\n\n\tGuard(user)\n\n\tcheck(err)\n\n\tname := data[\"name\"].(string)\n\n\tuid := CreateUID(name, user.Email)\n\n\treturn uid\n}",
"func UnhexUuid(uuid uuid.UUID) ([]byte, error) {\n\ts := strings.Replace(uuid.String(), \"-\", \"\", -1)\n\treturn hex.DecodeString(s)\n}",
"func newNameUUIDFromBytes(bytes []byte) *UUID {\n\tmd5Hash := md5.Sum(bytes)\n\tmd5Hash[6] &= 0x0f /* clear version */\n\tmd5Hash[6] |= 0x30 /* set to version 3 */\n\tmd5Hash[8] &= 0x3f /* clear variant */\n\tmd5Hash[8] |= 0x80 /* set to IETF variant */\n\n\tvar msb uint64\n\tvar lsb uint64\n\n\tfor i := 0; i < 8; i++ {\n\t\tmsb = (msb << 8) | (uint64(md5Hash[i]) & 0xff)\n\t}\n\tfor i := 8; i < 16; i++ {\n\t\tlsb = (lsb << 8) | (uint64(md5Hash[i]) & 0xff)\n\t}\n\n\treturn &UUID{msb, lsb}\n}",
"func (TiText) ReadHex(r io.Reader) (Hexfile, os.Error) {\n\tresp := RecordSequence{}\n\n\t// 16 bytes per line ought to be enough for anybody.\n\tline_reader := line.NewReader(r, 64)\n\taddr := 0\n\n\tfor {\n\t\tline, is_prefix, err := line_reader.ReadLine()\n\t\tif line == nil && err == os.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor is_prefix {\n\t\t\tvar line_part []byte\n\t\t\tline_part, is_prefix, err = line_reader.ReadLine()\n\t\t\tif line == nil && err == os.EOF {\n\t\t\t\tbreak\n\t\t\t} else if err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tline = append(line, line_part...)\n\t\t}\n\t\tline_s := string(line)\n\t\treader := strings.NewReader(line_s)\n\t\tswitch line_s[0] {\n\t\tcase '@':\n\t\t\treader.ReadByte()\n\t\t\tif len(line) < 2 {\n\t\t\t\treturn nil, StrError(\"Format error: short address\")\n\t\t\t}\n\t\t\taddr, err = decodeInt(reader, -1)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t// TODO(thequux): Check for trailing junk\n\t\tcase 'q':\n\t\t\treturn resp, nil\n\t\tcase '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':\n\t\t\tfallthrough\n\t\tcase 'a', 'b', 'c', 'd', 'e', 'f':\n\t\t\tfallthrough\n\t\tcase 'A', 'B', 'C', 'D', 'E', 'F':\n\t\t\tbuf := make([]byte, 0, len(line)/3)\n\t\t\tfor {\n\n\t\t\t\tif n, err := decodeInt(reader, -1); err != nil {\n\t\t\t\t\tif err == os.EOF {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\treturn nil, err\n\t\t\t\t} else {\n\t\t\t\t\tbuf = append(buf, byte(n))\n\t\t\t\t}\n\t\t\t}\n\t\t\tresp = append(resp, Record{addr, buf})\n\t\t\taddr += len(buf)\n\t\tdefault:\n\t\t\treturn nil, StrError(\"Invalid format\")\n\t\t}\n\t}\n\treturn resp, nil\n}",
"func uniqueHandle(client interfaces.Client) (interfaces.Client, error) {\n\tfile, err := os.Open(\"users.txt\")\n\tif err != nil {\n\t\treturn client, err\n\t}\n\tdefer file.Close()\n\treader := bufio.NewReader(file)\n\tfor {\n\t\tline, err := reader.ReadString('\\n')\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn client, err\n\t\t}\n\t\thandle, _ := helpers.SplitOnFirstDelim(',', line)\n\t\tif client.GetHandle() == handle {\n\t\t\treturn client, errors.New(\"Handle is not unique\")\n\t\t}\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn client, err\n}",
"func lookupUsername(file string) (string, error) {\n\tfileInfo, err := os.Stat(file)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tuid := fileInfo.Sys().(*syscall.Stat_t).Uid\n\tif ucache[uid] != \"\" {\n\t\treturn ucache[uid], nil\n\t}\n\tu, err := user.LookupId(strconv.FormatUint(uint64(uid), 10))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tucache[uid] = u.Username\n\treturn u.Username, nil\n}"
] | [
"0.69632655",
"0.6446444",
"0.60150903",
"0.59953517",
"0.5983552",
"0.59579515",
"0.58662516",
"0.5794862",
"0.5774995",
"0.57337934",
"0.57194346",
"0.56959444",
"0.5645361",
"0.5586416",
"0.554383",
"0.55005544",
"0.53939855",
"0.53913856",
"0.5279676",
"0.5251984",
"0.5185961",
"0.5163046",
"0.51620716",
"0.5162019",
"0.5161038",
"0.5160165",
"0.51465535",
"0.5131792",
"0.51190853",
"0.51169676",
"0.5112542",
"0.50973433",
"0.50754976",
"0.5031062",
"0.5018129",
"0.5014143",
"0.5007482",
"0.49971324",
"0.49307427",
"0.49180737",
"0.49170688",
"0.4913092",
"0.4905663",
"0.4900515",
"0.48987964",
"0.48901162",
"0.48837683",
"0.48796737",
"0.48489857",
"0.48304746",
"0.48303434",
"0.48263764",
"0.48166877",
"0.47918335",
"0.47893786",
"0.47828728",
"0.4781566",
"0.47805685",
"0.4771621",
"0.47484154",
"0.4724709",
"0.47210962",
"0.47095472",
"0.46963507",
"0.46925047",
"0.4690793",
"0.46879882",
"0.46758112",
"0.4675701",
"0.467455",
"0.46569797",
"0.46559978",
"0.46557954",
"0.46545693",
"0.4653859",
"0.46469694",
"0.4641024",
"0.46406263",
"0.4639627",
"0.4628563",
"0.46231022",
"0.46203563",
"0.46183893",
"0.46093425",
"0.46045282",
"0.45931858",
"0.45878783",
"0.45829403",
"0.45806572",
"0.45776314",
"0.45727795",
"0.4572598",
"0.45669642",
"0.4566248",
"0.45639655",
"0.45593965",
"0.45592088",
"0.45562103",
"0.455411",
"0.4554011"
] | 0.8575717 | 0 |
WriteHostUUID writes host UUID into a file | func WriteHostUUID(dataDir string, id string) error {
err := os.WriteFile(filepath.Join(dataDir, HostUUIDFile), []byte(id), os.ModeExclusive|0400)
if err != nil {
if errors.Is(err, fs.ErrPermission) {
//do not convert to system error as this loses the ability to compare that it is a permission error
return err
}
return trace.ConvertSystemError(err)
}
return nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func WriteUUID(buffer []byte, offset int, value UUID) {\n bytes, _ := value.MarshalBinary()\n WriteBytes(buffer, offset, bytes)\n}",
"func (this *Actions) generateUid(config *Config, hostname string) string{\n uuidNew := uuid.NewV5(uuid.NewV1(), hostname).String()\n uuid_path := config.GetValue(\"basic\", \"uuid_path\")\n log.Info(\"The new uuid is : \" + uuidNew)\n file, error := os.OpenFile(uuid_path, os.O_RDWR|os.O_CREATE, 0622)\n if error != nil {\n log.Error(\"Open uuid file in \"+ uuid_path +\" failed.\" + error.Error())\n }\n _,err := file.WriteString(uuidNew)\n if err != nil {\n log.Error(\"Save uuid file in \"+ uuid_path +\" failed.\" + err.Error())\n }\n file.Close()\n return uuidNew\n}",
"func ReadOrMakeHostUUID(dataDir string) (string, error) {\n\tid, err := ReadHostUUID(dataDir)\n\tif err == nil {\n\t\treturn id, nil\n\t}\n\tif !trace.IsNotFound(err) {\n\t\treturn \"\", trace.Wrap(err)\n\t}\n\t// Checking error instead of the usual uuid.New() in case uuid generation\n\t// fails due to not enough randomness. It's been known to happen happen when\n\t// Teleport starts very early in the node initialization cycle and /dev/urandom\n\t// isn't ready yet.\n\trawID, err := uuid.NewRandom()\n\tif err != nil {\n\t\treturn \"\", trace.BadParameter(\"\" +\n\t\t\t\"Teleport failed to generate host UUID. \" +\n\t\t\t\"This may happen if randomness source is not fully initialized when the node is starting up. \" +\n\t\t\t\"Please try restarting Teleport again.\")\n\t}\n\tid = rawID.String()\n\tif err = WriteHostUUID(dataDir, id); err != nil {\n\t\treturn \"\", trace.Wrap(err)\n\t}\n\treturn id, nil\n}",
"func UUIDFile(fpath string) (string, error) {\n\n\t_, err := os.Stat(fpath)\n\tif err != nil && !os.IsExist(err) {\n\t\tkey := uuid.New().String()\n\t\tif err := ioutil.WriteFile(fpath, []byte(key), 0777); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn key, nil\n\t}\n\n\tfp, err := os.Open(fpath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdefer fp.Close()\n\tdata, err := ioutil.ReadAll(fp)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tkey := string(data)\n\tif _, err := uuid.Parse(key); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn key, nil\n}",
"func ReadHostUUID(dataDir string) (string, error) {\n\tout, err := ReadPath(filepath.Join(dataDir, HostUUIDFile))\n\tif err != nil {\n\t\tif errors.Is(err, fs.ErrPermission) {\n\t\t\t//do not convert to system error as this loses the ability to compare that it is a permission error\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn \"\", trace.ConvertSystemError(err)\n\t}\n\tid := strings.TrimSpace(string(out))\n\tif id == \"\" {\n\t\treturn \"\", trace.NotFound(\"host uuid is empty\")\n\t}\n\treturn id, nil\n}",
"func post(w http.ResponseWriter,r *http.Request) {\n\toutput, _ := exec.Command(\"dbus-uuidgen\").Output()\n\tuuid := strings.TrimSuffix(string(output), \"\\n\") //注意生成的uuid包含\\n后缀,而在url中该字符别翻译为%OA,造成无法删除临时问题\n\tname := strings.Split(r.URL.EscapedPath(), \"/\")[2]\n\tsize, e := strconv.ParseInt(r.Header.Get(\"size\"), 0, 64)\n\tif e != nil{\n\t\tlog.Errorf(\"Temp/<hash> post parse_size error %v\",e)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\tt := tempinfo{Uuid:uuid,Name:name,Size:size}\n\te = t.writeToFile()\n\tif e!= nil{\n\t\tlog.Errorf(\"Temp/<hash> post write to file error %v\",e)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\tos.Create(os.Getenv(\"STORAGE_ROOT\")+\"/temp/\"+t.Uuid+\".dat\")\n\tw.Write([]byte(t.Uuid))\n}",
"func writeHostMap(hostMap map[string]string) {\n\tif host_list_file == \"\" {\n\t\treturn\n\t}\n\tf, err := os.Create(host_list_file)\n\tif err != nil {\n\t\tlogr.LogLine(logr.Lerror, ltagsrc, err.Error())\n\t}\n\tdefer f.Close()\n\n\tfor host := range hostMap {\n\t\tf.WriteString(fmt.Sprintf(\"%s\\n\", host))\n\t}\n}",
"func (b *Broker) createIDFile(home string, filepath string, id string) (err error) {\n\t_filepath := fmt.Sprintf(\"%v%v%v\", home, string(os.PathSeparator), filepath)\n\terr = ioutil.WriteFile(_filepath, []byte(id), 0644)\n\n\treturn\n}",
"func WritePidFile(path string, pid int) error {\n\tlog.WithField(\"pid\", pid).Debug(\"writing pid file\")\n\tpidFile, err := os.Create(path)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to write pid file: %v\", err)\n\t}\n\tdefer pidFile.Close()\n\tpidFile.WriteString(strconv.Itoa(pid))\n\treturn nil\n}",
"func (e *EPub) SetUUID(uu string) error {\n\tu, err := uuid.FromString(uu)\n\tif err != nil {\n\t\treturn err\n\t}\n\te.uuid = \"urn:uuid:\" + u.String()\n\tlog.Printf(\"Setting uuid, theoretically %q\", e.uuid)\n\tfor i, m := range e.metadata {\n\t\tif m.kind == \"dc:identifier\" {\n\t\t\tlog.Printf(\"Set id to %q\", e.uuid)\n\t\t\te.metadata[i].value = e.uuid\n\t\t}\n\t}\n\treturn nil\n}",
"func (device *DCV2Bricklet) WriteUID(uid uint32) (err error) {\n\tvar buf bytes.Buffer\n\tbinary.Write(&buf, binary.LittleEndian, uid)\n\n\tresultBytes, err := device.device.Set(uint8(FunctionWriteUID), buf.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(resultBytes) > 0 {\n\t\tvar header PacketHeader\n\n\t\theader.FillFromBytes(resultBytes)\n\n\t\tif header.Length != 8 {\n\t\t\treturn fmt.Errorf(\"Received packet of unexpected size %d, instead of %d\", header.Length, 8)\n\t\t}\n\n\t\tif header.ErrorCode != 0 {\n\t\t\treturn DeviceError(header.ErrorCode)\n\t\t}\n\n\t\tbytes.NewBuffer(resultBytes[8:])\n\n\t}\n\n\treturn nil\n}",
"func (device *IndustrialDigitalIn4V2Bricklet) WriteUID(uid uint32) (err error) {\n\tvar buf bytes.Buffer\n\tbinary.Write(&buf, binary.LittleEndian, uid)\n\n\tresultBytes, err := device.device.Set(uint8(FunctionWriteUID), buf.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(resultBytes) > 0 {\n\t\tvar header PacketHeader\n\n\t\theader.FillFromBytes(resultBytes)\n\n\t\tif header.Length != 8 {\n\t\t\treturn fmt.Errorf(\"Received packet of unexpected size %d, instead of %d\", header.Length, 8)\n\t\t}\n\n\t\tif header.ErrorCode != 0 {\n\t\t\treturn DeviceError(header.ErrorCode)\n\t\t}\n\n\t\tbytes.NewBuffer(resultBytes[8:])\n\n\t}\n\n\treturn nil\n}",
"func WritePidFile(componentName string) error {\n\tpidFile := fmt.Sprintf(\"%s/%s-%d.pid\", KtHome, componentName, os.Getpid())\n\treturn ioutil.WriteFile(pidFile, []byte(fmt.Sprintf(\"%d\", os.Getpid())), 0644)\n}",
"func writeUint24(b *bytes.Buffer, value uint24) {\n\tb.WriteByte(byte(value))\n\tb.WriteByte(byte(value >> 8))\n\tb.WriteByte(byte(value >> 16))\n}",
"func encodeUUID(src [16]byte) string {\n\treturn fmt.Sprintf(\"%x-%x-%x-%x-%x\", src[0:4], src[4:6], src[6:8], src[8:10], src[10:16])\n}",
"func writeFile(dir, file, uid, gid string, data []byte) error {\n\tfnlog := log.\n\t\tWithField(\"dir\", dir).\n\t\tWithField(\"file\", file)\n\n\ttmpfile, err := ioutil.TempFile(dir, \"systemk.*.tmp\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tfnlog.Debugf(\"chowning %q to %s.%s\", tmpfile.Name(), uid, gid)\n\tif err := chown(tmpfile.Name(), uid, gid); err != nil {\n\t\treturn err\n\t}\n\n\tx := 10\n\tif len(data) < 10 {\n\t\tx = len(data)\n\t}\n\tfnlog.Debugf(\"writing data %q to path %q\", data[:x], tmpfile.Name())\n\tif err := ioutil.WriteFile(tmpfile.Name(), data, 0640); err != nil {\n\t\treturn err\n\t}\n\tpath := filepath.Join(dir, file)\n\tfnlog.Debugf(\"renaming %q to %q\", tmpfile.Name(), path)\n\n\treturn os.Rename(tmpfile.Name(), path)\n}",
"func writePid() {\n\tif *pid_file_path == \"\" {\n\t\treturn\n\t}\n\tf, err := os.OpenFile(*pid_file_path, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0644)\n\tif err != nil {\n\t\tlog.Printf(\"ERROR unable to open pidfile: %v\", err)\n\t\treturn\n\t}\n\tfmt.Fprintln(f, os.Getpid())\n\tonShutdown(rmPidfile)\n}",
"func GenUUID(account string) string {\n h1 := md5.New()\n io.WriteString(h1, account)\n io.WriteString(h1, UUIDkey)\n h2 := md5.New()\n io.WriteString(h2, account)\n io.WriteString(h2, MD5key)\n return fmt.Sprintf(\"%x%x\", h1.Sum(nil), h2.Sum(nil))\n}",
"func CreateUuidForMonitorData(md MonitorData) string {\n serial := SerializeMonitorData(md)\n h := sha256.New()\n h.Write(serial)\n return fmt.Sprintf(\"%x\", h.Sum(nil))\n}",
"func (w *FormSerializationWriter) WriteUUIDValue(key string, value *uuid.UUID) error {\n\tif key != \"\" && value != nil {\n\t\tw.writePropertyName(key)\n\t}\n\tif value != nil {\n\t\tw.writeStringValue((*value).String())\n\t}\n\tif key != \"\" && value != nil {\n\t\tw.writePropertySeparator()\n\t}\n\treturn nil\n}",
"func encodeHex(dst []byte, uuid Uuid) {\n\t// hex.Encode takes an input byte array and returns the bytes of the string\n\t// of the hex encoding of each input byte\n\t// example: [63,127] ==> ['3', 'f', '7', 'f']\n\thex.Encode(dst[:], uuid[:4])\n\tdst[8] = '-'\n\thex.Encode(dst[9:13], uuid[4:6])\n\tdst[13] = '-'\n\thex.Encode(dst[14:18], uuid[6:8])\n\tdst[18] = '-'\n\thex.Encode(dst[19:23], uuid[8:10])\n\tdst[23] = '-'\n\thex.Encode(dst[24:], uuid[10:])\n}",
"func createFakeDHCP() error{\n\n\n dhcpData := []byte(`lease 192.168.50.63 {\n starts 4 2019/08/08 22:32:49;\n ends 4 2019/08/08 23:52:49;\n cltt 4 2019/08/08 22:32:49;\n binding state active;\n next binding state free;\n rewind binding state free;\n hardware ethernet 08:00:27:00:ab:2c;\n client-hostname \"fake-test-bmh\"\";\n}`)\n err := ioutil.WriteFile(\"/var/lib/dhcp/dhcpd.leases\", dhcpData, 0777)\n\n if (err != nil) {\n return err\n }\n\n return nil\n}",
"func (ins *EC2RemoteClient) WriteBytesToFile(source []byte, destination string) error {\n\terr := ins.cmdClient.WriteBytesToFile(source, destination)\n\treturn err\n}",
"func writePid() {\n\tpid := os.Getpid()\n\tpidfile := os.ExpandEnv(\"$PIDFILE\")\n\tlog.Printf(\"Opening pidfile %s: %d\", pidfile, pid)\n\tif pidfile != \"\" {\n\t\tfile, err := os.Create(pidfile)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"Couldn't open pidfile \" + pidfile)\n\t\t}\n\t\tio.WriteString(file, strconv.Itoa(pid))\n\t\tdefer func() {\n\t\t\tif err = file.Close(); err != nil {\n\t\t\t\tlog.Fatal(\"Couldn't close pidfile \" + pidfile + \". \" + err.Error())\n\t\t\t}\n\t\t}()\n\t}\n}",
"func (hof *Heap) uuid() http.Arrow {\n\treturn http.GET(\n\t\tø.URI(\"https://httpbin.org/uuid\"),\n\t\tø.Accept.JSON,\n\n\t\tƒ.Status.OK,\n\t\tƒ.ContentType.JSON,\n\t\tƒ.Body(&hof.ID),\n\t)\n}",
"func pgFormatUUID(arr [16]byte) (out []byte) {\n\tout = make([]byte, 36)\n\n\t_ = hex.Encode(out[0:8], arr[0:4])\n\t_ = hex.Encode(out[9:13], arr[4:6])\n\t_ = hex.Encode(out[14:18], arr[6:8])\n\t_ = hex.Encode(out[19:23], arr[8:10])\n\t_ = hex.Encode(out[24:], arr[10:])\n\n\tout[8] = '-'\n\tout[13] = '-'\n\tout[18] = '-'\n\tout[23] = '-'\n\n\treturn out\n}",
"func writeToFile(file *os.File, data uint32, offset int) {\n\tbuffer := make([]byte, UINT32_LENGTH)\n\tbinary.LittleEndian.PutUint32(buffer, data)\n\tfile.WriteAt(buffer, int64(offset))\n}",
"func WriteTempVaultIDFile(t *testing.T, password string) string {\n\ttempVaultIDFile, err := ioutil.TempFile(\"\", \".temp-vault-id\")\n\tif err != nil {\n\t\tt.Fatal(\"Expected a temp vault id file to be crated\", err)\n\t}\n\ttempVaultIDFileToWrite, err := os.OpenFile(tempVaultIDFile.Name(), os.O_RDWR, 0644)\n\tif err != nil {\n\t\tt.Fatal(\"Expected a temp vault id file to be writable\", err)\n\t}\n\ttempVaultIDFileToWrite.WriteString(password)\n\ttempVaultIDFileToWrite.Close()\n\treturn tempVaultIDFile.Name()\n}",
"func (mariadbFlavor) serverUUID(c *Conn) (string, error) {\n\treturn \"\", nil\n}",
"func (h *Host) ID() string {\n\tif h.id == \"\" {\n\t\thash := md5.New()\n\t\t_, _ = io.WriteString(hash, h.IP+h.MAC)\n\t\th.id = fmt.Sprintf(\"%x\", hash.Sum(nil))\n\t}\n\n\treturn h.id\n}",
"func uuid() string {\n\tout, err := exec.Command(\"/usr/bin/uuidgen\").Output()\n\tif err != nil {\n\t\tlog.Fatal().\n\t\t\tStr(\"command\", \"/usr/bin/uuidgen\").\n\t\t\tMsg(\"There was an error generating the uuid.\")\n\t}\n\n\t//n := bytes.IndexByte(out, 0)\n\ts := string(out)\n\ts = strings.TrimSpace(s)\n\treturn s\n}",
"func Write(idFile *IdentityFile, path string) error {\n\tbuf := new(bytes.Buffer)\n\tif err := encodeIdentityFile(buf, idFile); err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\tif err := os.WriteFile(path, buf.Bytes(), FilePermissions); err != nil {\n\t\treturn trace.ConvertSystemError(err)\n\t}\n\treturn nil\n}",
"func (k TimeKey) UUID() string {\n\tbuf := make([]byte, 36)\n\n\thex.Encode(buf[0:8], k[0:4])\n\tbuf[8] = '-'\n\thex.Encode(buf[9:13], k[4:6])\n\tbuf[13] = '-'\n\thex.Encode(buf[14:18], k[6:8])\n\tbuf[18] = '-'\n\thex.Encode(buf[19:23], k[8:10])\n\tbuf[23] = '-'\n\thex.Encode(buf[24:], k[10:])\n\n\treturn string(buf)\n}",
"func (h *Harness) UUID(id string) string { return h.uuidG.Get(id) }",
"func writePidFile(pidFile string) error {\n\t// Read in the pid file as a slice of bytes.\n\tpiddata, err := ioutil.ReadFile(pidFile)\n\tif err == nil {\n\t\t// Convert the file contents to an integer.\n\t\tpid, err := strconv.Atoi(string(piddata))\n\t\tif err == nil {\n\t\t\t// Look for the pid in the process list.\n\t\t\tprocess, err := os.FindProcess(pid)\n\t\t\tif err == nil {\n\t\t\t\t// Send the process a signal zero kill.\n\t\t\t\terr := process.Signal(syscall.Signal(0))\n\t\t\t\tif err == nil {\n\t\t\t\t\t// We only get an error if the pid isn't running,\n\t\t\t\t\t// or it's not ours.\n\t\t\t\t\treturn fmt.Errorf(\"pid already running: %d\", pid)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t// If we get here, then the pidfile didn't exist,\n\t// or the pid in it doesn't belong to the user running this app.\n\treturn ioutil.WriteFile(pidFile,\n\t\t[]byte(fmt.Sprintf(\"%d\", os.Getpid())), 0664)\n}",
"func sendUUIDToPlayer(id int64, client *Client) {\n\tmsg := protocol.CreatePlayerUUIDMessage(id)\n\tsendMessageToClient(msg, id)\n}",
"func (w *Writer) SetUUID(u uuid.UUID) {\n\tcopy(w.blk[uuidStart:uuidEnd], u[:])\n\tcopy(w.blk[uuidCopyStart:uuidCopyEnd], u[:])\n}",
"func (h *HAProxyManager) write(b []byte) error {\n\tf, err := os.OpenFile(h.filename(), os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\t_, err = f.Write(b)\n\treturn err\n}",
"func writeFile(v string) {\n\t// 打开文件\n\tfilePtr, err := os.OpenFile(\"mqtt.json\", os.O_CREATE|os.O_WRONLY, os.ModePerm)\n\tif err != nil {\n\t\tmqtt.ERROR.Println(err)\n\t}\n\n\tdefer filePtr.Close()\n\n\ttype Data struct {\n\t\tDeviceID string `JSON:\"deviceID\"` //设备id\n\t\tTimestamp string `JSON:\"timestamp\"` //时间戳\n\t\tFields map[string]string `JSON:\"fields\"` //标签\n\t}\n\tvar data Data\n\tif err := json.Unmarshal([]byte(v), &data); err == nil {\n\n\t\t// 创建Json编码器\n\t\tencoder := json.NewEncoder(filePtr)\n\t\terr = encoder.Encode(data)\n\t\tif err != nil {\n\t\t\tmqtt.ERROR.Println(\"writeFile failed\", err.Error())\n\t\t} else {\n\t\t\tmqtt.ERROR.Println(\"writeFile success\")\n\t\t}\n\t} else {\n\t\tmqtt.ERROR.Println(err)\n\t}\n\n}",
"func UUID() (string, error) {\n\tb := make([]byte, 2)\n\n\t_, err := crand.Read(b)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn fmt.Sprintf(\"%X\", b[0:2]), nil\n}",
"func HardwareUUID() (string, error) {\n\t/*\n\t\tSample output of 'wmic path Win32_ComputerSystemProduct get uuid'\n\n\t\tUUID\n\t\t4219B2F5-C25F-6AF2-573C-35B0DF557236\n\t*/\n\tresult, err := readAndParseFromCommandLine(hardwareUUIDCmd)\n\tif err != nil {\n\t\treturn \"-1\", err\n\t}\n\thardwareUUID := \"\"\n\tif len(result) > 1 {\n\t\t// remove all spaces from the second line as that line consists hardware uuid\n\t\tre := regexp.MustCompile(\"\\\\s|\\\\r\")\n\t\thardwareUUID = re.ReplaceAllString(result[1], \"\")\n\t}\n\treturn hardwareUUID, nil\n}",
"func generateUniqueId() string {\n\tcmd := exec.Command(\"/usr/bin/uuidgen\")\n\tvar out bytes.Buffer\n\tcmd.Stdout = &out\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tuuid := out.String()\n\tuuid = strings.Replace(uuid, \"\\n\", \"\", 1)\n\treturn uuid\n}",
"func MakeCustomizedUuid(port, nodeNum int) (string, error) {\n\treDigit := regexp.MustCompile(`\\d`)\n\tgroup1 := fmt.Sprintf(\"%08d\", port)\n\tgroup2 := fmt.Sprintf(\"%04d-%04d-%04d\", nodeNum, nodeNum, nodeNum)\n\tgroup3 := fmt.Sprintf(\"%012d\", port)\n\t// 12345678 1234 1234 1234 123456789012\n\t// new_uuid=\"00000000-0000-0000-0000-000000000000\"\n\tswitch {\n\tcase nodeNum > 0 && nodeNum <= 9:\n\t\tgroup2 = reDigit.ReplaceAllString(group2, fmt.Sprintf(\"%d\", nodeNum))\n\t\tgroup3 = reDigit.ReplaceAllString(group3, fmt.Sprintf(\"%d\", nodeNum))\n\t// Number greater than 10 make little sense for this purpose.\n\t// But we keep the rule so that a valid UUID will be formatted in any case.\n\tcase nodeNum >= 10000 && nodeNum <= 99999:\n\t\tgroup2 = fmt.Sprintf(\"%04d-%04d-%04d\", 0, int(nodeNum/10000), nodeNum-10000*int(nodeNum/10000))\n\tcase nodeNum >= 100000 && nodeNum < 1000000:\n\t\tgroup2 = fmt.Sprintf(\"%04d-%04d-%04d\", int(nodeNum/10000), 0, 0)\n\tcase nodeNum >= 1000000:\n\t\treturn \"\", fmt.Errorf(\"node num out of boundaries: %d\", nodeNum)\n\t}\n\treturn fmt.Sprintf(\"%s-%s-%s\", group1, group2, group3), nil\n}",
"func writePidFile(pidFile string) error {\n\t// Read in the pid file as a slice of bytes.\n\tif piddata, err := ioutil.ReadFile(pidFile); err == nil {\n\t\t// Convert the file contents to an integer.\n\t\tif pid, err := strconv.Atoi(string(piddata)); err == nil {\n\t\t\t// Look for the pid in the process list.\n\t\t\tif process, err := os.FindProcess(pid); err == nil {\n\t\t\t\t// Send the process a signal zero kill.\n\t\t\t\tif err := process.Signal(syscall.Signal(0)); err == nil {\n\t\t\t\t\t// We only get an error if the pid isn't running, or it's not ours.\n\t\t\t\t\treturn fmt.Errorf(\"pid already running: %d\", pid)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t// If we get here, then the pidfile didn't exist,\n\t// or the pid in it doesn't belong to the user running this app.\n\treturn ioutil.WriteFile(pidFile, []byte(fmt.Sprintf(\"%d\", os.Getpid())), 0664)\n}",
"func writePidFile(pidFile string) error {\n\t// Read in the pid file as a slice of bytes.\n\tif piddata, err := ioutil.ReadFile(pidFile); err == nil {\n\t\t// Convert the file contents to an integer.\n\t\tif pid, err := strconv.Atoi(string(piddata)); err == nil {\n\t\t\t// Look for the pid in the process list.\n\t\t\tif process, err := os.FindProcess(pid); err == nil {\n\t\t\t\t// Send the process a signal zero kill.\n\t\t\t\tif err := process.Signal(syscall.Signal(0)); err == nil {\n\t\t\t\t\t// We only get an error if the pid isn't running, or it's not ours.\n\t\t\t\t\treturn fmt.Errorf(\"pid already running: %d\", pid)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t// If we get here, then the pidfile didn't exist,\n\t// or the pid in it doesn't belong to the user running this app.\n\treturn ioutil.WriteFile(pidFile, []byte(fmt.Sprintf(\"%d\", os.Getpid())), 0664)\n}",
"func (m *Attachment) GenerateUUID() (string, error) {\n\tout, err := exec.Command(\"uuidgen\").Output()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn strings.Replace(strings.Trim(string(out), \"\\n\"), \"-\", \"_\", -1), nil\n}",
"func (a *accountManager) writeToFile(addrHex string, secretVersion int64, conf config.NewAccount) (config.AccountFile, error) {\n\tnow := time.Now().UTC()\n\tnowISO8601 := now.Format(\"2006-01-02T15-04-05.000000000Z\")\n\tfilename := fmt.Sprintf(\"UTC--%v--%v\", nowISO8601, addrHex)\n\n\tfullpath, err := a.client.accountDirectory.Parse(filename)\n\tif err != nil {\n\t\treturn config.AccountFile{}, err\n\t}\n\tfilePath := fullpath.Host + \"/\" + fullpath.Path\n\tlog.Printf(\"[DEBUG] writing to file %v\", filePath)\n\n\tfileData := conf.AccountFile(fullpath.String(), addrHex, secretVersion)\n\n\tlog.Printf(\"[DEBUG] marshalling file contents: %v\", fileData)\n\tcontents, err := json.Marshal(fileData.Contents)\n\tif err != nil {\n\t\treturn config.AccountFile{}, err\n\t}\n\tlog.Printf(\"[DEBUG] marshalled file contents: %v\", contents)\n\n\tlog.Printf(\"[DEBUG] Creating temp file %v/%v\", filepath.Dir(filePath), fmt.Sprintf(\".%v*.tmp\", filepath.Base(fullpath.String())))\n\tf, err := ioutil.TempFile(filepath.Dir(filePath), fmt.Sprintf(\".%v*.tmp\", filepath.Base(fullpath.String())))\n\tif err != nil {\n\t\treturn config.AccountFile{}, err\n\t}\n\tif _, err := f.Write(contents); err != nil {\n\t\tf.Close()\n\t\tos.Remove(f.Name())\n\t\treturn config.AccountFile{}, err\n\t}\n\tf.Close()\n\n\tlog.Println(\"[DEBUG] Renaming temp file\")\n\tif err := os.Rename(f.Name(), filePath); err != nil {\n\t\treturn config.AccountFile{}, err\n\t}\n\treturn fileData, nil\n}",
"func SaveHash(hash string) {\n\tfile, err := os.Create(fileName)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdefer file.Close()\n\n\tfile.WriteString(hash)\n}",
"func SetupEncode(uid string, w io.Writer) error {\n\tif !uuidRE.MatchString(uid) {\n\t\treturn fmt.Errorf(\"name must be a UUIDv4 identifier\")\n\t}\n\tw.Write([]byte(uid))\n\n\treturn nil\n}",
"func WriteUInt16(buffer []byte, offset int, value uint16) {\n buffer[offset + 0] = byte(value >> 0)\n buffer[offset + 1] = byte(value >> 8)\n}",
"func (u uuid) string() string {\n\tbuf := make([]byte, 36)\n\n\thex.Encode(buf[0:8], u[0:4])\n\tbuf[8] = dash\n\thex.Encode(buf[9:13], u[4:6])\n\tbuf[13] = dash\n\thex.Encode(buf[14:18], u[6:8])\n\tbuf[18] = dash\n\thex.Encode(buf[19:23], u[8:10])\n\tbuf[23] = dash\n\thex.Encode(buf[24:], u[10:])\n\n\treturn string(buf)\n}",
"func (file *LockFile) WritePid() (err error) {\n\tif _, err = file.Seek(0, os.SEEK_SET); err != nil {\n\t\treturn\n\t}\n\tvar fileLen int\n\tif fileLen, err = fmt.Fprint(file, os.Getpid()); err != nil {\n\t\treturn\n\t}\n\tif err = file.Truncate(int64(fileLen)); err != nil {\n\t\treturn\n\t}\n\terr = file.Sync()\n\treturn\n}",
"func (this UUID) Hex() string {\n\tx := [16]byte(this)\n\treturn fmt.Sprintf(\"%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x\",\n\t\tx[0], x[1], x[2], x[3], x[4],\n\t\tx[5], x[6],\n\t\tx[7], x[8],\n\t\tx[9], x[10], x[11], x[12], x[13], x[14], x[15])\n\n}",
"func Format(uuid UUID, style Style) string {\n\tif len(uuid) != 16 {\n\t\tpanic(\"uuid: UUID is invalid\")\n\t}\n\n\tbuffer := []byte(uuid)\n\tswitch style {\n\tcase StyleStandard:\n\t\treturn fmt.Sprintf(\"%08x-%04x-%04x-%04x-%012x\", buffer[:4], buffer[4:6], buffer[6:8], buffer[8:10], buffer[10:])\n\tcase StyleWithoutDash:\n\t\treturn fmt.Sprintf(\"%x\", buffer[:])\n\tdefault:\n\t\tpanic(\"uuid: style of UUID is invalid\")\n\t}\n}",
"func uuid() []byte {\n\tuuid := make([]byte, 16)\n\t_, err := rand.Read(uuid)\n\tif err != nil {\n\t\tpanic(\"cue/hosted: uuid() failed to read random bytes\")\n\t}\n\n\t// The following bit twiddling is outlined in RFC 4122. In short, it\n\t// identifies the UUID as a v4 random UUID.\n\tuuid[6] = (4 << 4) | (0xf & uuid[6])\n\tuuid[8] = (8 << 4) | (0x3f & uuid[8])\n\treturn uuid\n}",
"func (dev *HidDevice) Write(b []byte) (int, error) {\n\treturn 0, ErrUnsupportedPlatform\n}",
"func writeInt16ToFile(input int16, fp *os.File) {\n\tbuff := new(bytes.Buffer)\n\terr := binary.Write(buff, binary.LittleEndian, input)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tintByteArray := buff.Bytes()\n\tfp.Write(intByteArray)\n}",
"func createV4UUID() string {\n\tu := make([]byte, 16)\n\trand.Read(u)\n\t// 13th char must be 4 and 17th must be in [89AB]\n\tu[8] = (u[8] | 0x80) & 0xBF\n\tu[6] = (u[6] | 0x40) & 0x4F\n\treturn fmt.Sprintf(\"%X-%X-%X-%X-%X\", u[0:4], u[4:6], u[6:8], u[8:10], u[10:])\n}",
"func HostUUIDExistsLocally(dataDir string) bool {\n\t_, err := ReadHostUUID(dataDir)\n\treturn err == nil\n}",
"func writeToken(filePath string, token string) {\n\t// Check if file exists\n\tif _, err := os.Stat(filePath); os.IsNotExist(err) {\n\t\t// Doesn't exist; lets create it\n\t\terr = os.MkdirAll(filepath.Dir(filePath), 0700)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tb := []byte(token)\n\tif err := ioutil.WriteFile(filePath, b, 0600); err != nil {\n\t\treturn\n\t}\n}",
"func putUuid(log log.T, byteArray []byte, offset int, input uuid.UUID) (err error) {\n\tif input == nil {\n\t\tlog.Error(\"putUuid failed: input is null.\")\n\t\treturn errors.New(\"putUuid failed: input is null.\")\n\t}\n\n\tbyteArrayLength := len(byteArray)\n\tif offset > byteArrayLength-1 || offset+16-1 > byteArrayLength-1 || offset < 0 {\n\t\tlog.Error(\"putUuid failed: Offset is invalid.\")\n\t\treturn errors.New(\"Offset is outside the byte array.\")\n\t}\n\n\tleastSignificantLong, err := bytesToLong(log, input.Bytes()[8:16])\n\tif err != nil {\n\t\tlog.Error(\"putUuid failed: Failed to get leastSignificant Long value.\")\n\t\treturn errors.New(\"Failed to get leastSignificant Long value.\")\n\t}\n\n\tmostSignificantLong, err := bytesToLong(log, input.Bytes()[0:8])\n\tif err != nil {\n\t\tlog.Error(\"putUuid failed: Failed to get mostSignificantLong Long value.\")\n\t\treturn errors.New(\"Failed to get mostSignificantLong Long value.\")\n\t}\n\n\terr = putLong(log, byteArray, offset, leastSignificantLong)\n\tif err != nil {\n\t\tlog.Error(\"putUuid failed: Failed to put leastSignificantLong Long value.\")\n\t\treturn errors.New(\"Failed to put leastSignificantLong Long value.\")\n\t}\n\n\terr = putLong(log, byteArray, offset+8, mostSignificantLong)\n\tif err != nil {\n\t\tlog.Error(\"putUuid failed: Failed to put mostSignificantLong Long value.\")\n\t\treturn errors.New(\"Failed to put mostSignificantLong Long value.\")\n\t}\n\n\treturn nil\n}",
"func uuid() string {\n\treturn fmt.Sprintf(\"%s\", guid.NewV4())\n}",
"func (c *Config) WriteNodeID() error {\n\tnodeIDFile := \"node.id\"\n\terr := ioutil.WriteFile(nodeIDFile, []byte(c.NodeID), 0666)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot write file [%s]: %v\", nodeIDFile, err)\n\t}\n\treturn nil\n}",
"func ToHyphenUUID(uuid string) string {\n\t// 8 - 4 - 4 - 4 - 12\n\treturn fmt.Sprintf(\"%v-%v-%v-%v-%v\", uuid[:8], uuid[8:12], uuid[12:16], uuid[16:20], uuid[20:])\n}",
"func saveHostMetadata(metadata Metadata) error {\n\tdataBytes, err := json.Marshal(metadata)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"[Telemetry] marshal data failed with err %+v\", err)\n\t}\n\n\tif err = ioutil.WriteFile(metadataFile, dataBytes, 0644); err != nil {\n\t\ttelemetryLogger.Printf(\"[Telemetry] Writing metadata to file failed: %v\", err)\n\t}\n\n\treturn err\n}",
"func GenerateUUID(device string) error {\n\t// for mounting the cloned volume for btrfs, a new UUID has to be generated\n\tcmd := exec.Command(\"btrfstune\", \"-f\", \"-u\", device)\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tklog.Errorf(\"btrfs: uuid generate failed for device %s error: %s\", device, string(out))\n\t\treturn err\n\t}\n\tklog.Infof(\"btrfs: generated UUID for the device %s \\n %v\", device, string(out))\n\treturn nil\n}",
"func (rng *Tunafish) WriteSeed(filename string) error {\n\tif !rng.Initialised() {\n\t\treturn ErrNotInitialised\n\t}\n\n\tseed, err := rng.Seed()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(filename, seed, 0600)\n}",
"func (dm *dataManager) writeUint(address uint, u uint) (err ProcessException) {\n\tdata := make([]byte, 4)\n\tbinary.LittleEndian.PutUint32(data, uint32(u))\n\n\terr = dm.process.WriteBytes(address, data)\n\n\treturn\n}",
"func (dev VMVolumeDevice) UUID() string {\n\treturn utils.NewUUID5(blockVolumeNsUUID, dev.HostPath)\n}",
"func appendHexUint16(dst []byte, src uint16) []byte {\n\tdst = append(dst, \"0000\"[1+(bits.Len16(src)-1)/4:]...)\n\tdst = strconv.AppendUint(dst, uint64(src), 16)\n\treturn dst\n}",
"func (h *Hostman) Write() error {\n\tvar final string\n\n\tfor _, entry := range h.entries {\n\t\tif entry.Disabled {\n\t\t\tfinal += \"#\"\n\t\t}\n\n\t\tfinal += entry.Raw + \"\\n\"\n\t}\n\n\treturn ioutil.WriteFile(h.filename, []byte(final), 0644)\n}",
"func saveHostMetadata(metadata Metadata) error {\n\tdataBytes, err := json.Marshal(metadata)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"[Telemetry] marshal data failed with err %+v\", err)\n\t}\n\n\tif err = ioutil.WriteFile(metadataFile, dataBytes, 0644); err != nil {\n\t\tlog.Logf(\"[Telemetry] Writing metadata to file failed: %v\", err)\n\t}\n\n\treturn err\n}",
"func TransportFile(uuid string, owlh map[string]string, file string) {\n logs.Info(\"Get file \" + owlh[\"local_pcap_path\"] + \" from \" + owlh[\"name\"] + \" - \" + owlh[\"ip\"])\n TransportFileSSH(uuid, owlh, file)\n}",
"func (pm *procMan) WritePID(t *testing.T, pid int) {\n\tpm.Lock()\n\tdefer pm.Unlock()\n\tpsProc, err := ps.FindProcess(pid)\n\tif err != nil {\n\t\tt.Fatalf(\"cannot inspect proc %d: %s\", pid, err)\n\t}\n\tif psProc == nil {\n\t\ttime.Sleep(time.Second)\n\t\tpsProc, err := ps.FindProcess(pid)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"cannot inspect proc %d: %s\", pid, err)\n\t\t}\n\t\tif psProc == nil {\n\t\t\tt.Logf(\"Warning! Possibly orphaned PID: %d\", pid)\n\t\t\treturn\n\t\t}\n\t}\n\n\tpidFile := pm.PIDFile\n\n\tvar f *os.File\n\tif s, err := os.Stat(pidFile); err != nil {\n\t\tif !isNotExist(err) {\n\t\t\tt.Fatalf(\"could not stat %q: %s\", pidFile, err)\n\t\t\treturn\n\t\t}\n\t\tif s != nil && s.IsDir() {\n\t\t\tt.Fatalf(\"cannot write to file %q: it's a directory\", pidFile)\n\t\t}\n\t\tf, err = os.Create(pidFile)\n\t\tdefer closeFiles(f)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"could not create %q: %s\", pidFile, err)\n\t\t}\n\t}\n\tif f == nil {\n\t\tvar err error\n\t\tf, err = os.OpenFile(pidFile, os.O_APPEND|os.O_WRONLY, os.ModeAppend)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"could not open %q: %s\", pidFile, err)\n\t\t\treturn\n\t\t}\n\t\tdefer closeFiles(f)\n\t}\n\tif psProc == nil {\n\t\tt.Logf(\"Warning! Unable to write PID %d to pidfile: psProc became nil all of a sudden\", pid)\n\t\treturn\n\n\t}\n\tif _, err := fmt.Fprintf(f, \"%d\\t%s\\n\", pid, psProc.Executable()); err != nil {\n\t\tt.Fatalf(\"could not write PID %d (exe %s) to file %q: %s\",\n\t\t\tpid, psProc.Executable(), pidFile, err)\n\t}\n}",
"func writeFile(iptvline string){\n\n\t//check if file exists\n\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\t// create file if not exists\n\t\tif os.IsNotExist(err) {\n\t\t\tvar file, err = os.Create(path)\n\t\t\tif isError(err) { return }\n\t\t\tdefer file.Close()\n\t\t}\n\t}\n\n\n\tfileHandle, _ := os.OpenFile(path, os.O_APPEND, 0666)\n\twriter := bufio.NewWriter(fileHandle)\n\tdefer fileHandle.Close()\n\n\tfmt.Fprintln(writer, iptvline)\n\twriter.Flush()\n}",
"func UUID() string {\n\treturn strings.Replace(UUID4(), \"-\", \"\", -1)\n}",
"func updateHostString(hosts string) error {\n\tif hostFile, err := getHostFile(); err == nil {\n\t\treturn ioutil.WriteFile(hostFile, []byte(hosts), 0777)\n\t} else {\n\t\treturn err\n\t}\n}",
"func BytesToUUIDFormat(bytes []byte) string {\n\treturn codec.ToByteArray(bytes[0:4]).Hex() + \"-\" +\n\t\tcodec.ToByteArray(bytes[4:6]).Hex() + \"-\" +\n\t\tcodec.ToByteArray(bytes[6:8]).Hex() + \"-\" +\n\t\tcodec.ToByteArray(bytes[8:10]).Hex() + \"-\" +\n\t\tcodec.ToByteArray(bytes[10:]).Hex()\n}",
"func WriteUInt32(buffer []byte, offset int, value uint32) {\n buffer[offset + 0] = byte(value >> 0)\n buffer[offset + 1] = byte(value >> 8)\n buffer[offset + 2] = byte(value >> 16)\n buffer[offset + 3] = byte(value >> 24)\n}",
"func createID(rec *OutputRecord) string {\n\tstr := rec.Type + rec.Path + rec.Datetime + rec.IPAddress + rec.UserID\n\tsum := sha1.Sum([]byte(str))\n\treturn hex.EncodeToString(sum[:])\n}",
"func writeHeapToFile(h *minHeap, filename string) {\n\tfile, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.ModePerm)\n\tdefer file.Close()\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n\turlCntMap := make(map[string]int)\n\tfor i := 0; i < h.cap; i++ {\n\t\tpair, err := h.getNode(i)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\turlCntMap[pair.url] = pair.cnt\n\t}\n\tgenResult(filename, urlCntMap, h.cap)\n}",
"func TestWriteTagHeader(t *testing.T) {\n\tt.Parallel()\n\n\tbuf := new(bytes.Buffer)\n\tbw := bufio.NewWriter(buf)\n\tdst := make([]byte, 4)\n\n\tif err := writeTagHeader(bw, dst, 15351, 4); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := bw.Flush(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !bytes.Equal(thb, buf.Bytes()) {\n\t\tt.Fatalf(\"Expected %v, got %v\", thb, buf.Bytes())\n\t}\n}",
"func write_log(nombre string, partes int, nodos []string) {\n f, err := os.OpenFile(\"log.txt\",os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)\n if err != nil {\n log.Println(err)\n }\n defer f.Close()\n \n var buffer string = nombre+\" \"+strconv.Itoa(partes)+\"\\n\"\n \n if _, err := f.WriteString(buffer); err != nil {\n log.Println(err)\n }\n \n for i := 0; i < partes; i++ {\n index, err := strconv.Atoi(nodos[i])\n if err != nil {\n log.Fatalf(\"fail: %s\", err)\n }\n \n buffer = nombre+\" Parte_\"+strconv.Itoa(i+1)+\" \"+addresses[index]+\"\\n\"\n \n if _, err := f.WriteString(buffer); err != nil {\n log.Println(err)\n }\n } \n\n}",
"func (d *DivMaster) WriteDivMaster(n string) {\n\tf, err := os.Create(n)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t//w := bufio.NewReader(f)\n\tfor _, v := range d.divName {\n\t\t_, err := fmt.Fprintf(f, \"%v\\n\", v)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n}",
"func removeUuidFromFilepath(path string) string {\n\t// UUID has 4 hyphens, so we split into 6 parts. \n\treturn strings.SplitN(filepath.Base(path), \"-\", 6)[5]\n}",
"func TestUUID(t *testing.T) {\n\ttext := utl.GeneredUUID()\n\tt.Logf(\"text:[%s]\", text)\n}",
"func (m MessageDescriptorMap) WriteFile(filename string) error {\n\tbytes, err := json.MarshalIndent(m, \"\", \" \")\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn os.WriteFile(filename, append(bytes, '\\n'), 0o644) //nolint:gas\n}",
"func Encode(w io.Writer, h Hostsfile) error {\n\tfor _, record := range h.records {\n\t\tvar toWrite string\n\t\tif record.isBlank {\n\t\t\ttoWrite = \"\"\n\t\t} else if len(record.comment) > 0 {\n\t\t\ttoWrite = record.comment\n\t\t} else {\n\t\t\tout := make([]string, len(record.Hostnames))\n\t\t\ti := 0\n\t\t\tfor name := range record.Hostnames {\n\t\t\t\tout[i] = name\n\t\t\t\ti++\n\t\t\t}\n\t\t\tsort.Strings(out)\n\t\t\tout = append([]string{record.IpAddress.String()}, out...)\n\t\t\ttoWrite = strings.Join(out, \" \")\n\t\t}\n\t\ttoWrite += eol\n\t\t_, err := w.Write([]byte(toWrite))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func genUTF16(path, s string, order binary.ByteOrder) {\n\tbs := utf16.Encode([]rune(s))\n\tbuf := &bytes.Buffer{}\n\tif err := binary.Write(buf, order, bs); err != nil {\n\t\tpanic(err)\n\t}\n\tmustWriteFile(path, buf.Bytes())\n}",
"func (kv *DisKV) filePut(shard int, key string, content string) error {\n\tfullname := kv.shardDir(shard) + \"/key-\" + kv.encodeKey(key)\n\ttempname := kv.shardDir(shard) + \"/temp-\" + kv.encodeKey(key)\n\tif err := ioutil.WriteFile(tempname, []byte(content), 0666); err != nil {\n\t\treturn err\n\t}\n\tif err := os.Rename(tempname, fullname); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func GetHostUUID(nbmaster string, httpClient *http.Client, jwt string, host string) string {\r\n fmt.Printf(\"\\nGet the UUID of host %s...\\n\", host)\r\n\r\n uri := \"https://\" + nbmaster + \":\" + port + \"/netbackup/config/hosts\";\r\n\r\n request, _ := http.NewRequest(http.MethodGet, uri, nil)\r\n query := request.URL.Query()\r\n query.Add(\"filter\", \"hostName eq '\" + host + \"'\")\r\n request.URL.RawQuery = query.Encode()\r\n\r\n request.Header.Add(\"Authorization\", jwt);\r\n request.Header.Add(\"Accept\", contentTypeV3);\r\n\r\n response, err := httpClient.Do(request)\r\n\r\n hostUuid := \"\"\r\n if err != nil {\r\n fmt.Printf(\"The HTTP request failed with error: %s\\n\", err)\r\n panic(\"Unable to get the host UUID\")\r\n } else {\r\n if response.StatusCode == 200 {\r\n data, _ := ioutil.ReadAll(response.Body)\r\n var obj interface{}\r\n json.Unmarshal(data, &obj)\r\n response := obj.(map[string]interface{})\r\n hosts := response[\"hosts\"].([]interface{})\r\n hostUuid = ((hosts[0].(map[string]interface{}))[\"uuid\"]).(string)\r\n fmt.Printf(\"Host UUID: %s\\n\", hostUuid);\r\n } else {\r\n printErrorResponse(response)\r\n }\r\n }\r\n\r\n return hostUuid\r\n}",
"func (n *PaxosNode) writeAtSlot(slotNumber int, buf []byte) error {\n\twriteLog, _ := os.OpenFile(n.logFileName, os.O_RDWR, 0666)\n\toffset := int64(slotNumber * MAX_SLOT_BYTES)\n\twriteLog.Seek(offset, 0) // from origin of file go to current offset\n\tnbytes, err := writeLog.WriteString(string(buf) + \"\\n\")\n\tif err != nil {\n\t\tLOGE.Printf(\"Error in writing to log\")\n\t\treturn err\n\t}\n\tLOGV.Printf(\"wrote %d bytes\\n\", nbytes)\n\twriteLog.Sync()\n\twriteLog.Close()\n\n\tn.MessageAvailable <- true\n\n\treturn nil\n}",
"func (wmid *WzMachineIDUtil) setupMachineId() {\n\tsystemdMidFPath := \"/etc/machine-id\"\n\tif wmid.filePath == \"\" {\n\t\twmid.filePath = systemdMidFPath\n\t}\n\tmid, err := ioutil.ReadFile(wmid.filePath)\n\tif err != nil {\n\t\twmid.GetLogger().Debugf(\"File %s was not found\", wmid.filePath)\n\t\tmid, err = ioutil.ReadFile(systemdMidFPath)\n\t\tif err != nil {\n\t\t\twmid.GetLogger().Debugf(\"This system has no /etc/machine-id file, creating a replacement.\")\n\n\t\t\thasher := md5.New()\n\t\t\t_, err := io.WriteString(hasher, wzlib.MakeJid())\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tmid = []byte(fmt.Sprintf(\"%x\", hasher.Sum(nil)))\n\t\t}\n\t\tif wmid.filePath != systemdMidFPath {\n\t\t\tif err := ioutil.WriteFile(wmid.filePath, mid, 0644); err != nil {\n\t\t\t\twmid.GetLogger().Errorf(\"Unable to duplicate machine id: %s\", err.Error())\n\t\t\t}\n\t\t}\n\t}\n\twmid.machineid = strings.TrimSpace(string(mid))\n}",
"func writeDumpIndex(filepath string, dumpInfo *blockDumpInfo) error {\n\tdumpInfoData, err := json.Marshal(dumpInfo)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(filepath, dumpInfoData, 0666)\n}",
"func (m *Metadata) SetUUID(id string) {\n\tif id == \"\" {\n\t\tid = NewUUID() // generate random UUID if not defined\n\t}\n\tm.Identifier = []Element{{Value: id, ID: \"uuid\"}}\n}",
"func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error {\n\ttmpFile, err := os.CreateTemp(devices.metadataDir(), \".tmp\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"devmapper: Error creating metadata file: %s\", err)\n\t}\n\n\tn, err := tmpFile.Write(jsonData)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"devmapper: Error writing metadata to %s: %s\", tmpFile.Name(), err)\n\t}\n\tif n < len(jsonData) {\n\t\treturn io.ErrShortWrite\n\t}\n\tif err := tmpFile.Sync(); err != nil {\n\t\treturn fmt.Errorf(\"devmapper: Error syncing metadata file %s: %s\", tmpFile.Name(), err)\n\t}\n\tif err := tmpFile.Close(); err != nil {\n\t\treturn fmt.Errorf(\"devmapper: Error closing metadata file %s: %s\", tmpFile.Name(), err)\n\t}\n\tif err := os.Rename(tmpFile.Name(), filePath); err != nil {\n\t\treturn fmt.Errorf(\"devmapper: Error committing metadata file %s: %s\", tmpFile.Name(), err)\n\t}\n\n\treturn nil\n}",
"func (e *EPub) UUID() string {\n\treturn strings.TrimPrefix(\"urn:uuid:\", e.uuid)\n}",
"func writeEtcHostnameForContainer(globalOptions types.GlobalCommandOptions, hostname string, containerID string) ([]oci.SpecOpts, error) {\n\tif containerID == \"\" {\n\t\treturn nil, fmt.Errorf(\"container ID is required for setting up hostname file\")\n\t}\n\n\tdataStore, err := clientutil.DataStore(globalOptions.DataRoot, globalOptions.Address)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstateDir, err := ContainerStateDirPath(globalOptions.Namespace, dataStore, containerID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thostnamePath := filepath.Join(stateDir, \"hostname\")\n\tif err := os.WriteFile(hostnamePath, []byte(hostname+\"\\n\"), 0644); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn []oci.SpecOpts{oci.WithHostname(hostname), withCustomEtcHostname(hostnamePath)}, nil\n}",
"func (e *EndToEndTest) WriteFileSsh(path string, content string) error {\n\treturn exec.Command(\"docker\", \"exec\", e.GetContainer(\"ssh\"), \"sh\", \"-c\",\n\t\tfmt.Sprintf(\"echo \\\"%s\\\" > %s\", content, path)).Run()\n}",
"func writeKeyToFile(keyBytes []byte, saveFileTo string) error {\n\terr := ioutil.WriteFile(saveFileTo, keyBytes, 0666)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}"
] | [
"0.64471084",
"0.6248215",
"0.5742111",
"0.55889857",
"0.54731405",
"0.54549134",
"0.53703684",
"0.5361488",
"0.5359973",
"0.5352255",
"0.5332432",
"0.5296048",
"0.52657807",
"0.5221564",
"0.5199073",
"0.518144",
"0.5132775",
"0.5102348",
"0.50953376",
"0.5062497",
"0.50417596",
"0.5040314",
"0.49691027",
"0.49506995",
"0.49506453",
"0.49401084",
"0.492673",
"0.49043378",
"0.48911652",
"0.4877534",
"0.48622662",
"0.48574185",
"0.4842028",
"0.47974378",
"0.47790903",
"0.47774404",
"0.47758725",
"0.47717625",
"0.47479907",
"0.47433522",
"0.47383502",
"0.47366178",
"0.4733186",
"0.4732963",
"0.4732963",
"0.47329295",
"0.47280547",
"0.47145176",
"0.4709323",
"0.46981215",
"0.46954164",
"0.46918654",
"0.46745187",
"0.46608406",
"0.46596268",
"0.46595505",
"0.46509847",
"0.46362096",
"0.46295586",
"0.4622732",
"0.46210712",
"0.46190825",
"0.46104044",
"0.46097323",
"0.46088728",
"0.46019995",
"0.4600836",
"0.45983732",
"0.45982197",
"0.45971125",
"0.4591601",
"0.4587983",
"0.4576737",
"0.4565739",
"0.45629475",
"0.4551056",
"0.45503482",
"0.45452726",
"0.45409393",
"0.45243233",
"0.45155007",
"0.45105013",
"0.45099956",
"0.4509101",
"0.45063233",
"0.45045635",
"0.44934952",
"0.44932306",
"0.4487627",
"0.4485864",
"0.4485051",
"0.4480689",
"0.44781882",
"0.44692045",
"0.44598013",
"0.4455939",
"0.4454889",
"0.44541255",
"0.444965",
"0.4449336"
] | 0.8289592 | 0 |
ReadOrMakeHostUUID looks for a hostid file in the data dir. If present, returns the UUID from it, otherwise generates one | func ReadOrMakeHostUUID(dataDir string) (string, error) {
id, err := ReadHostUUID(dataDir)
if err == nil {
return id, nil
}
if !trace.IsNotFound(err) {
return "", trace.Wrap(err)
}
// Checking error instead of the usual uuid.New() in case uuid generation
// fails due to not enough randomness. It's been known to happen happen when
// Teleport starts very early in the node initialization cycle and /dev/urandom
// isn't ready yet.
rawID, err := uuid.NewRandom()
if err != nil {
return "", trace.BadParameter("" +
"Teleport failed to generate host UUID. " +
"This may happen if randomness source is not fully initialized when the node is starting up. " +
"Please try restarting Teleport again.")
}
id = rawID.String()
if err = WriteHostUUID(dataDir, id); err != nil {
return "", trace.Wrap(err)
}
return id, nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func ReadHostUUID(dataDir string) (string, error) {\n\tout, err := ReadPath(filepath.Join(dataDir, HostUUIDFile))\n\tif err != nil {\n\t\tif errors.Is(err, fs.ErrPermission) {\n\t\t\t//do not convert to system error as this loses the ability to compare that it is a permission error\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn \"\", trace.ConvertSystemError(err)\n\t}\n\tid := strings.TrimSpace(string(out))\n\tif id == \"\" {\n\t\treturn \"\", trace.NotFound(\"host uuid is empty\")\n\t}\n\treturn id, nil\n}",
"func UUIDFile(fpath string) (string, error) {\n\n\t_, err := os.Stat(fpath)\n\tif err != nil && !os.IsExist(err) {\n\t\tkey := uuid.New().String()\n\t\tif err := ioutil.WriteFile(fpath, []byte(key), 0777); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn key, nil\n\t}\n\n\tfp, err := os.Open(fpath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdefer fp.Close()\n\tdata, err := ioutil.ReadAll(fp)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tkey := string(data)\n\tif _, err := uuid.Parse(key); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn key, nil\n}",
"func HostUUIDExistsLocally(dataDir string) bool {\n\t_, err := ReadHostUUID(dataDir)\n\treturn err == nil\n}",
"func WriteHostUUID(dataDir string, id string) error {\n\terr := os.WriteFile(filepath.Join(dataDir, HostUUIDFile), []byte(id), os.ModeExclusive|0400)\n\tif err != nil {\n\t\tif errors.Is(err, fs.ErrPermission) {\n\t\t\t//do not convert to system error as this loses the ability to compare that it is a permission error\n\t\t\treturn err\n\t\t}\n\t\treturn trace.ConvertSystemError(err)\n\t}\n\treturn nil\n}",
"func readMachineID() []byte {\n\tid := make([]byte, 3)\n\tif hostname, err := os.Hostname(); err == nil {\n\t\thw := md5.New()\n\t\thw.Write([]byte(hostname))\n\t\tcopy(id, hw.Sum(nil))\n\t} else {\n\t\t// Fallback to rand number if machine id can't be gathered\n\t\tif _, randErr := rand.Reader.Read(id); randErr != nil {\n\t\t\tpanic(fmt.Errorf(\"Cannot get hostname nor generate a random number: %v; %v\", err, randErr))\n\t\t}\n\t}\n\treturn id\n}",
"func GetUUID() string {\n\tuuid, _ := ioutil.ReadFile(AppPath.UUIDFile)\n\treturn string(bytes.TrimSpace(uuid))\n}",
"func (this *Actions) generateUid(config *Config, hostname string) string{\n uuidNew := uuid.NewV5(uuid.NewV1(), hostname).String()\n uuid_path := config.GetValue(\"basic\", \"uuid_path\")\n log.Info(\"The new uuid is : \" + uuidNew)\n file, error := os.OpenFile(uuid_path, os.O_RDWR|os.O_CREATE, 0622)\n if error != nil {\n log.Error(\"Open uuid file in \"+ uuid_path +\" failed.\" + error.Error())\n }\n _,err := file.WriteString(uuidNew)\n if err != nil {\n log.Error(\"Save uuid file in \"+ uuid_path +\" failed.\" + err.Error())\n }\n file.Close()\n return uuidNew\n}",
"func getHostFromUUID(id string) (*model.Host, error) {\n\thosts, err := driver.GetHosts()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, host := range *hosts {\n\t\tif host.UUID == id {\n\t\t\t// Host Matches\n\t\t\tlog.Tracef(\"current host matches with id=%s\", id)\n\t\t\treturn host, nil\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"no host found with id %s\", id)\n}",
"func GetHostID() string {\n\tif cachedHostID != \"\" {\n\t\treturn cachedHostID\n\t}\n\n\tecsMetadataURI := os.Getenv(\"ECS_CONTAINER_METADATA_URI_V4\")\n\tif ecsMetadataURI != \"\" {\n\t\thostID, err := getHostIDFromECS(ecsMetadataURI + \"/task\")\n\t\tif err == nil {\n\t\t\tcachedHostID = hostID\n\t\t\treturn cachedHostID\n\t\t}\n\n\t\tfmt.Fprintf(os.Stderr, \"Failed to get task ARN from ECS metadata v4 endpoint: %v\\n\", err)\n\t}\n\n\tecsMetadataURI = os.Getenv(\"ECS_CONTAINER_METADATA_URI\")\n\tif ecsMetadataURI != \"\" {\n\t\thostID, err := getHostIDFromECS(ecsMetadataURI + \"/task\")\n\t\tif err == nil {\n\t\t\tcachedHostID = hostID\n\t\t\treturn cachedHostID\n\t\t}\n\n\t\tfmt.Fprintf(os.Stderr, \"Failed to get task ARN from ECS metadata v3 endpoint: %v\\n\", err)\n\t}\n\n\thostID, errECS := getHostIDFromECS(\"http://169.254.170.2/v2/metadata\")\n\tif errECS == nil {\n\t\tcachedHostID = hostID\n\t\treturn cachedHostID\n\t}\n\n\thostID, errEC2 := getHostIDFromEC2()\n\tif errEC2 == nil {\n\t\tcachedHostID = hostID\n\t\treturn cachedHostID\n\t}\n\n\thostID, errIF := getHostIDFromInterfaces()\n\tif errIF == nil {\n\t\tcachedHostID = hostID\n\t\treturn cachedHostID\n\t}\n\n\thostID, errRand := getRandomHostID()\n\tif errRand == nil {\n\t\tcachedHostID = hostID\n\t\treturn cachedHostID\n\t}\n\n\tfmt.Fprintf(os.Stderr, \"Failed to get task ARN from ECS metadata v2 endpoint: %v\\n\", errECS)\n\tfmt.Fprintf(os.Stderr, \"Failed to get instance ID from EC2 metadata endpoint: %v\\n\", errEC2)\n\tfmt.Fprintf(os.Stderr, \"Failed to get IP address from network interface: %v\\n\", errIF)\n\tfmt.Fprintf(os.Stderr, \"Failed to get random host ID: %v\\n\", errRand)\n\tpanic(\"Unable to obtain a valid host ID\")\n}",
"func readMachineID() []byte {\n\tid := make([]byte, 3)\n\thid, err := readPlatformMachineID()\n\tif err != nil || len(hid) == 0 {\n\t\thid, err = os.Hostname()\n\t}\n\tif err == nil && len(hid) != 0 {\n\t\thw := md5.New()\n\t\thw.Write([]byte(hid))\n\t\tcopy(id, hw.Sum(nil))\n\t} else {\n\t\t// Fallback to rand number if machine id can't be gathered\n\t\tif _, randErr := rand.Reader.Read(id); randErr != nil {\n\t\t\tpanic(fmt.Errorf(\"xid: cannot get hostname nor generate a random number: %v; %v\", err, randErr))\n\t\t}\n\t}\n\treturn id\n}",
"func readMachineId() []byte {\n\tvar sum [3]byte\n\tid := sum[:]\n\thostname, err1 := os.Hostname()\n\tif err1 != nil {\n\t\tn := uint32(time.Now().UnixNano())\n\t\tsum[0] = byte(n >> 0)\n\t\tsum[1] = byte(n >> 8)\n\t\tsum[2] = byte(n >> 16)\n\t\treturn id\n\t}\n\thw := md5.New()\n\thw.Write([]byte(hostname))\n\tcopy(id, hw.Sum(nil))\n\treturn id\n}",
"func (c *Config) getRandomId() (string, error) {\n\tb, err := ioutil.ReadFile(c.ProcBootId)\n\tif err != nil {\n\t\tglog.Errorf(\"fail to open %s: %q\", c.ProcBootId, err)\n\t\treturn \"\", err\n\t}\n\trandomId := string(b)\n\trandomId = strings.Trim(randomId, \"\\n\")\n\tglog.V(2).Infof(\"RandomId: %q\", randomId)\n\treturn randomId, nil\n\n}",
"func HardwareUUID() (string, error) {\n\t/*\n\t\tSample output of 'wmic path Win32_ComputerSystemProduct get uuid'\n\n\t\tUUID\n\t\t4219B2F5-C25F-6AF2-573C-35B0DF557236\n\t*/\n\tresult, err := readAndParseFromCommandLine(hardwareUUIDCmd)\n\tif err != nil {\n\t\treturn \"-1\", err\n\t}\n\thardwareUUID := \"\"\n\tif len(result) > 1 {\n\t\t// remove all spaces from the second line as that line consists hardware uuid\n\t\tre := regexp.MustCompile(\"\\\\s|\\\\r\")\n\t\thardwareUUID = re.ReplaceAllString(result[1], \"\")\n\t}\n\treturn hardwareUUID, nil\n}",
"func getHostId() (uint64, error) {\n\ta := getLocalIP()\n\tip := (uint64(a[0]) << 24) + (uint64(a[1]) << 16) + (uint64(a[2]) << 8) + uint64(a[3])\n\treturn ip % MaxHostId, nil\n}",
"func (o *NetworkLicenseFile) GetHostId() string {\n\tif o == nil || o.HostId == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.HostId\n}",
"func GetHostFile() (*hostess.Hostfile, []error) {\n\n\t// prep for refactor\n\t// capture duplicate localhost here\n\t// TODO need a better solution, this is a hack\n\thf, errs := hostess.LoadHostfile()\n\n\tfor _, err := range errs {\n\n\t\t// auto-fixing hostfile problems.\n\t\tif err.Error() == \"Duplicate hostname entry for localhost -> ::1\" {\n\t\t\t_, err = BackupHostFile(hf)\n\t\t\tif err != nil {\n\t\t\t\treturn hf, []error{errors.New(\"Could not back up hostfile.\")}\n\t\t\t}\n\n\t\t\t// fix the duplicate\n\t\t\tinput, err := ioutil.ReadFile(hf.Path)\n\t\t\tif err != nil {\n\t\t\t\treturn hf, []error{err}\n\t\t\t}\n\n\t\t\tlines := strings.Split(string(input), \"\\n\")\n\t\t\tfor i, line := range lines {\n\t\t\t\t// if the line looks something like this then it's\n\t\t\t\t// probably the fault of hostess on a previous run and\n\t\t\t\t// safe to fix.\n\t\t\t\tif strings.Contains(line, \"::1 localhost localhost\") {\n\t\t\t\t\tlines[i] = \"::1 localhost\"\n\t\t\t\t}\n\t\t\t}\n\n\t\t\toutput := strings.Join(lines, \"\\n\")\n\t\t\terr = ioutil.WriteFile(hf.Path, []byte(output), 0644)\n\t\t\tif err != nil {\n\t\t\t\treturn hf, []error{err}\n\t\t\t}\n\n\t\t\treturn hostess.LoadHostfile()\n\t\t}\n\n\t}\n\n\treturn hf, errs\n}",
"func UDID() string {\n\tf, err := os.Open(\"/dev/urandom\")\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to get /dev/urandom! %s\", err))\n\t}\n\tb := make([]byte, 16)\n\t_, err = f.Read(b)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to read 16 bytes from /dev/urandom! %s\", err))\n\t}\n\tf.Close()\n\treturn fmt.Sprintf(\"%x-%x-%x-%x-%x\", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])\n}",
"func (wmid *WzMachineIDUtil) setupMachineId() {\n\tsystemdMidFPath := \"/etc/machine-id\"\n\tif wmid.filePath == \"\" {\n\t\twmid.filePath = systemdMidFPath\n\t}\n\tmid, err := ioutil.ReadFile(wmid.filePath)\n\tif err != nil {\n\t\twmid.GetLogger().Debugf(\"File %s was not found\", wmid.filePath)\n\t\tmid, err = ioutil.ReadFile(systemdMidFPath)\n\t\tif err != nil {\n\t\t\twmid.GetLogger().Debugf(\"This system has no /etc/machine-id file, creating a replacement.\")\n\n\t\t\thasher := md5.New()\n\t\t\t_, err := io.WriteString(hasher, wzlib.MakeJid())\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tmid = []byte(fmt.Sprintf(\"%x\", hasher.Sum(nil)))\n\t\t}\n\t\tif wmid.filePath != systemdMidFPath {\n\t\t\tif err := ioutil.WriteFile(wmid.filePath, mid, 0644); err != nil {\n\t\t\t\twmid.GetLogger().Errorf(\"Unable to duplicate machine id: %s\", err.Error())\n\t\t\t}\n\t\t}\n\t}\n\twmid.machineid = strings.TrimSpace(string(mid))\n}",
"func GetClientID() (string, error) {\n\tfn := \"clientid\" // File Name\n\tif _, err := os.Stat(fn); os.IsNotExist(err) {\n\t\t// File does not exists, create a new uuid\n\t\tuuid := uuid.NewV4()\n\t\tuuidStr := uuid.String()\n\t\tlog.Println(\"Created new Client ID.\", uuidStr)\n\t\terr = ioutil.WriteFile(fn, []byte(uuidStr), 0666)\n\t\tif err != nil {\n\t\t\treturn uuidStr, err\n\t\t}\n\t\treturn uuidStr, nil\n\t}\n\t// Read the uuid from the file\n\tdata, err := ioutil.ReadFile(fn)\n\tif err != nil {\n\t\tlog.Println(\"Failed to read the Client ID file. Attempting to recreate it.\", err)\n\t\tuuid := uuid.NewV4()\n\t\tuuidStr := uuid.String()\n\t\tlog.Println(\"Created new Client ID.\", uuidStr)\n\t\terr = ioutil.WriteFile(fn, []byte(uuidStr), 0666)\n\t\tif err != nil {\n\t\t\treturn uuidStr, err\n\t\t}\n\t\treturn uuidStr, nil\n\t}\n\treturn string(data), nil\n}",
"func defaultHostId(p peer.ID, prefix string) string {\n\tif os.Getenv(\"HOST_ID\") == \"\" {\n\t\treturn fmt.Sprintf(\"%s-%s\", prefix, shortID(p))\n\t}\n\treturn fmt.Sprintf(\"%s-%s-%s\", prefix, os.Getenv(\"HOST_ID\"), shortID(p))\n}",
"func (a *Agent) makeNodeID() (string, error) {\n\t// If they've disabled host-based IDs then just make a random one.\n\tif a.config.DisableHostNodeID {\n\t\treturn a.makeRandomID()\n\t}\n\n\t// Try to get a stable ID associated with the host itself.\n\tinfo, err := host.Info()\n\tif err != nil {\n\t\ta.logger.Printf(\"[DEBUG] agent: Couldn't get a unique ID from the host: %v\", err)\n\t\treturn a.makeRandomID()\n\t}\n\n\t// Make sure the host ID parses as a UUID, since we don't have complete\n\t// control over this process.\n\tid := strings.ToLower(info.HostID)\n\tif _, err := uuid.ParseUUID(id); err != nil {\n\t\ta.logger.Printf(\"[DEBUG] agent: Unique ID %q from host isn't formatted as a UUID: %v\",\n\t\t\tid, err)\n\t\treturn a.makeRandomID()\n\t}\n\n\t// Hash the input to make it well distributed. The reported Host UUID may be\n\t// similar across nodes if they are on a cloud provider or on motherboards\n\t// created from the same batch.\n\tbuf := sha512.Sum512([]byte(id))\n\tid = fmt.Sprintf(\"%08x-%04x-%04x-%04x-%12x\",\n\t\tbuf[0:4],\n\t\tbuf[4:6],\n\t\tbuf[6:8],\n\t\tbuf[8:10],\n\t\tbuf[10:16])\n\n\ta.logger.Printf(\"[DEBUG] agent: Using unique ID %q from host as node ID\", id)\n\treturn id, nil\n}",
"func Read(args ...string) (*UUID, error) {\n\tfpath := sfFilePath(args)\n\tf, err := os.Open(fpath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\tdata := make([]byte, UUIDHexLen+8)\n\tn, err := f.Read(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif n < UUIDHexLen {\n\t\treturn nil, fmt.Errorf(\"File '%s' is too small\", fpath)\n\t}\n\tdata = data[:n]\n\tuuid, err := Decode(string(data))\n\tif err == nil {\n\t\tnc := &cache{uuid: *uuid, filePath: fpath, validationTime: time.Now().Add(ValidationTimePeriod)}\n\t\tatomic.StorePointer(¤t, unsafe.Pointer(nc))\n\t}\n\treturn uuid, err\n}",
"func (b *Broker) readIDFromFile(home, filepath string) (id string, err error) {\n\t_filepath := fmt.Sprintf(\"%v%v%v\", home, string(os.PathSeparator), filepath)\n\t_bytes, err := ioutil.ReadFile(_filepath)\n\tif err != nil {\n\t\treturn\n\t}\n\tid = string(_bytes)\n\treturn\n}",
"func GetHostUUID(nbmaster string, httpClient *http.Client, jwt string, host string) string {\r\n fmt.Printf(\"\\nGet the UUID of host %s...\\n\", host)\r\n\r\n uri := \"https://\" + nbmaster + \":\" + port + \"/netbackup/config/hosts\";\r\n\r\n request, _ := http.NewRequest(http.MethodGet, uri, nil)\r\n query := request.URL.Query()\r\n query.Add(\"filter\", \"hostName eq '\" + host + \"'\")\r\n request.URL.RawQuery = query.Encode()\r\n\r\n request.Header.Add(\"Authorization\", jwt);\r\n request.Header.Add(\"Accept\", contentTypeV3);\r\n\r\n response, err := httpClient.Do(request)\r\n\r\n hostUuid := \"\"\r\n if err != nil {\r\n fmt.Printf(\"The HTTP request failed with error: %s\\n\", err)\r\n panic(\"Unable to get the host UUID\")\r\n } else {\r\n if response.StatusCode == 200 {\r\n data, _ := ioutil.ReadAll(response.Body)\r\n var obj interface{}\r\n json.Unmarshal(data, &obj)\r\n response := obj.(map[string]interface{})\r\n hosts := response[\"hosts\"].([]interface{})\r\n hostUuid = ((hosts[0].(map[string]interface{}))[\"uuid\"]).(string)\r\n fmt.Printf(\"Host UUID: %s\\n\", hostUuid);\r\n } else {\r\n printErrorResponse(response)\r\n }\r\n }\r\n\r\n return hostUuid\r\n}",
"func getHostFile() (string, error) {\n\tpaltform := runtime.GOOS\n\tif hostFile, ok := pathMap[paltform]; ok {\n\t\treturn hostFile, nil\n\t} else {\n\t\treturn \"\", errors.New(\"unsupported PLATFORM!\")\n\t}\n}",
"func GetVendorIDByCPUInfo(path string) (string, error) {\n\tvendorID := \"unknown\"\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn vendorID, err\n\t}\n\tdefer f.Close()\n\n\ts := bufio.NewScanner(f)\n\tfor s.Scan() {\n\t\tif err := s.Err(); err != nil {\n\t\t\treturn vendorID, err\n\t\t}\n\n\t\tline := s.Text()\n\n\t\t// get \"vendor_id\" from first line\n\t\tif strings.Contains(line, \"vendor_id\") {\n\t\t\tattrs := strings.Split(line, \":\")\n\t\t\tif len(attrs) >= 2 {\n\t\t\t\tvendorID = strings.TrimSpace(attrs[1])\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn vendorID, nil\n}",
"func Make(args ...string) (*UUID, error) {\n\tfpath := sfFilePath(args)\n\tu, err := Get(fpath)\n\tif err == nil {\n\t\treturn u, nil\n\t}\n\tperr, ok := err.(*os.PathError)\n\tif ok && perr != nil && perr.Op == \"open\" {\n\t\tif err = WriteNew(fpath); err == nil {\n\t\t\treturn Get(fpath)\n\t\t}\n\t}\n\treturn nil, err\n}",
"func loadHostString() (string, error) {\n\tif hostFile, err := getHostFile(); err == nil {\n\t\tbytes, err := ioutil.ReadFile(hostFile)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn string(bytes), nil\n\t} else {\n\t\treturn \"\", err\n\t}\n\n}",
"func ReadUUID(buffer []byte, offset int) UUID {\n bytes := ReadBytes(buffer, offset, 16)\n return UUIDFromBytes(bytes)\n}",
"func CreateUuidForMonitorData(md MonitorData) string {\n serial := SerializeMonitorData(md)\n h := sha256.New()\n h.Write(serial)\n return fmt.Sprintf(\"%x\", h.Sum(nil))\n}",
"func UUID() (string, error) {\n\tb := make([]byte, 2)\n\n\t_, err := crand.Read(b)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn fmt.Sprintf(\"%X\", b[0:2]), nil\n}",
"func removeUuidFromFilepath(path string) string {\n\t// UUID has 4 hyphens, so we split into 6 parts. \n\treturn strings.SplitN(filepath.Base(path), \"-\", 6)[5]\n}",
"func ResourceToHostID(res pcommon.Resource) (HostID, bool) {\n\tvar cloudAccount, hostID, provider string\n\n\tattrs := res.Attributes()\n\n\tif attrs.Len() == 0 {\n\t\treturn HostID{}, false\n\t}\n\n\tif attr, ok := attrs.Get(conventions.AttributeCloudAccountID); ok {\n\t\tcloudAccount = attr.Str()\n\t}\n\n\tif attr, ok := attrs.Get(conventions.AttributeHostID); ok {\n\t\thostID = attr.Str()\n\t}\n\n\tif attr, ok := attrs.Get(conventions.AttributeCloudProvider); ok {\n\t\tprovider = attr.Str()\n\t}\n\n\tswitch provider {\n\tcase conventions.AttributeCloudProviderAWS:\n\t\tvar region string\n\t\tif attr, ok := attrs.Get(conventions.AttributeCloudRegion); ok {\n\t\t\tregion = attr.Str()\n\t\t}\n\t\tif hostID == \"\" || region == \"\" || cloudAccount == \"\" {\n\t\t\tbreak\n\t\t}\n\t\treturn HostID{\n\t\t\tKey: HostIDKeyAWS,\n\t\t\tID: fmt.Sprintf(\"%s_%s_%s\", hostID, region, cloudAccount),\n\t\t}, true\n\tcase conventions.AttributeCloudProviderGCP:\n\t\tif cloudAccount == \"\" || hostID == \"\" {\n\t\t\tbreak\n\t\t}\n\t\treturn HostID{\n\t\t\tKey: HostIDKeyGCP,\n\t\t\tID: fmt.Sprintf(\"%s_%s\", cloudAccount, hostID),\n\t\t}, true\n\tcase conventions.AttributeCloudProviderAzure:\n\t\tif cloudAccount == \"\" {\n\t\t\tbreak\n\t\t}\n\t\tid := azureID(attrs, cloudAccount)\n\t\tif id == \"\" {\n\t\t\tbreak\n\t\t}\n\t\treturn HostID{\n\t\t\tKey: HostIDKeyAzure,\n\t\t\tID: id,\n\t\t}, true\n\t}\n\n\tif attr, ok := attrs.Get(conventions.AttributeHostName); ok {\n\t\treturn HostID{\n\t\t\tKey: HostIDKeyHost,\n\t\t\tID: attr.Str(),\n\t\t}, true\n\t}\n\n\treturn HostID{}, false\n}",
"func generateUUID(bd blockdevice.BlockDevice) (string, bool) {\n\tvar ok bool\n\tvar uuidField, uuid string\n\n\t// select the field which is to be used for generating UUID\n\t//\n\t// Serial number is not used directly for UUID generation. This is because serial number is not\n\t// unique in some cloud environments. For example, in GCP the serial number is\n\t// configurable by the --device-name flag while attaching the disk.\n\t// If this flag is not provided, GCP automatically assigns the serial number\n\t// which is unique only to the node. Therefore Serial number is used only in cases\n\t// where the disk has a WWN.\n\t//\n\t// If disk has WWN, a combination of WWN+Serial will be used. This is done because there are cases\n\t// where the disks has same WWN but different serial. It is seen in some storage arrays.\n\t// All the LUNs will have same WWN, but different serial.\n\t//\n\t// PartitionTableUUID is not used for UUID generation in NDM. The only case where the disk has a PartitionTable\n\t// and not partition is when, the user has manually created a partition table without writing any actual partitions.\n\t// This means NDM will have to give its consumers the entire disk, i.e consumers will have access to the sectors\n\t// where partition table is written. If consumers decide to reformat or erase the disk completely the partition\n\t// table UUID is also lost, making NDM unable to identify the disk. Hence, even if a partition table is present\n\t// NDM will rewrite it and create a new GPT table and a single partition. Thus consumers will have access only to\n\t// the partition and the unique data will be stored in sectors where consumers do not have access.\n\n\tswitch {\n\tcase bd.DeviceAttributes.DeviceType == blockdevice.BlockDeviceTypePartition:\n\t\t// The partition entry UUID is used when a partition (/dev/sda1) is processed. The partition UUID should be used\n\t\t// if available, other than the partition table UUID, because multiple partitions can have the same partition table\n\t\t// UUID, but each partition will have a different UUID.\n\t\tklog.Infof(\"device(%s) is a partition, using partition UUID: %s\", bd.DevPath, bd.PartitionInfo.PartitionEntryUUID)\n\t\tuuidField = bd.PartitionInfo.PartitionEntryUUID\n\t\tok = true\n\tcase len(bd.DeviceAttributes.WWN) > 0:\n\t\t// if device has WWN, both WWN and Serial will be used for UUID generation.\n\t\tklog.Infof(\"device(%s) has a WWN, using WWN: %s and Serial: %s\",\n\t\t\tbd.DevPath,\n\t\t\tbd.DeviceAttributes.WWN, bd.DeviceAttributes.Serial)\n\t\tuuidField = bd.DeviceAttributes.WWN +\n\t\t\tbd.DeviceAttributes.Serial\n\t\tok = true\n\tcase len(bd.FSInfo.FileSystemUUID) > 0:\n\t\tklog.Infof(\"device(%s) has a filesystem, using filesystem UUID: %s\", bd.DevPath, bd.FSInfo.FileSystemUUID)\n\t\tuuidField = bd.FSInfo.FileSystemUUID\n\t\tok = true\n\t}\n\n\tif ok {\n\t\tuuid = blockdevice.BlockDevicePrefix + util.Hash(uuidField)\n\t\tklog.Infof(\"generated uuid: %s for device: %s\", uuid, bd.DevPath)\n\t}\n\n\treturn uuid, ok\n}",
"func HostID(nomad *NomadServer, hostname *string) (*Host, error) {\n\thosts, _, err := Hosts(nomad)\n\tif err != nil {\n\t\treturn &Host{}, err\n\t}\n\tfor _, host := range hosts {\n\t\tif *hostname == host.Name {\n\t\t\treturn &host, nil\n\t\t}\n\t}\n\tbuf := log(\"event\", \"node_not_found\", \"hostname\", hostname)\n\treturn &Host{}, errors.New(buf.String())\n\n}",
"func makeRandomHost() (host.Host, *kaddht.IpfsDHT) {\n\tctx := context.Background()\n\tport := 10000 + rand.Intn(10000)\n\n\thost, err := libp2p.New(ctx,\n\t\tlibp2p.ListenAddrStrings(fmt.Sprintf(\"/ip4/0.0.0.0/tcp/%d\", port)),\n\t\tlibp2p.EnableRelay(circuit.OptHop, circuit.OptDiscovery))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Bootstrap the DHT. In the default configuration, this spawns a Background\n\t// thread that will refresh the peer table every five minutes.\n\tdht, err := kaddht.New(ctx, host)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\terr = dht.Bootstrap(ctx)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn host, dht\n}",
"func GenerateMonitorID(s string) (string, error) {\n\tvar errEdidCorrupted = errors.New(\"corrupt EDID: \" + s)\n\tif len(s) < 32 || s[:16] != \"00ffffffffffff00\" {\n\t\treturn \"\", errEdidCorrupted\n\t}\n\n\tedid, err := hex.DecodeString(s)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// we only parse EDID 1.3 and 1.4\n\tif edid[18] != 1 || (edid[19] < 3 || edid[19] > 4) {\n\t\treturn \"\", fmt.Errorf(\"unknown EDID version %d.%d\", edid[18], edid[19])\n\t}\n\n\tmanuf := binary.BigEndian.Uint16(edid[8:10])\n\n\t// The first bit is resevered and needs to be zero\n\tif manuf&0x8000 != 0x0000 {\n\t\treturn \"\", errEdidCorrupted\n\t}\n\n\t// Decode the manufacturer 'A' = 0b00001, 'B' = 0b00010, ..., 'Z' = 0b11010\n\tvar manufacturer string\n\tmask := uint16(0x7C00) // 0b0111110000000000\n\tfor i := uint(0); i <= 10; i += 5 {\n\t\tnumber := ((manuf & (mask >> i)) >> (10 - i))\n\t\tmanufacturer += string(byte(number + 'A' - 1))\n\t}\n\n\t// Decode the product and serial number\n\tproduct := binary.LittleEndian.Uint16(edid[10:12])\n\tserial := binary.LittleEndian.Uint32(edid[12:16])\n\n\t// Decode four descriptor blocks\n\tvar displayName, displaySerialNumber string\n\tfor i := 0; i < 4; i++ {\n\t\td := edid[54+i*18 : 54+18+i*18]\n\n\t\t// interesting descriptors start with three zeroes\n\t\tif d[0] != 0 || d[1] != 0 || d[2] != 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch d[3] {\n\t\tcase 0xff: // display serial number\n\t\t\tdisplaySerialNumber = strings.TrimSpace(string(d[5:]))\n\t\tcase 0xfc: // display name\n\t\t\tdisplayName = strings.TrimSpace(string(d[5:]))\n\t\t}\n\t}\n\n\tstr := fmt.Sprintf(\"%s-%d-%d-%v-%v\", manufacturer, product, serial, displayName, displaySerialNumber)\n\treturn str, nil\n}",
"func (h *Host) ID() string {\n\tif h.id == \"\" {\n\t\thash := md5.New()\n\t\t_, _ = io.WriteString(hash, h.IP+h.MAC)\n\t\th.id = fmt.Sprintf(\"%x\", hash.Sum(nil))\n\t}\n\n\treturn h.id\n}",
"func defaultID() string {\n\tvar id string\n\n\t/* If we have one statically set, use that plus four random bytes */\n\tif \"\" != staticID {\n\t\tn := strconv.FormatInt(int64(time.Now().Nanosecond()),36)\n\t\tif len(n) > staticIDB36Max {\n\t\t\tn = n[:staticIDB36Max]\n\t\t\t}\n\t\treturn staticID +\"-\"+ n\n\t}\n\n\t/* Look through all the interfaces for one we like */\n\tis, err := net.Interfaces()\n\tif nil != err {\n\t\tlog.Printf(\"Unable to list interfaces: %v\", err)\n\t}\n\tfor _, i := range is {\n\t\t/* Skip docker interfaces */\n\t\t/* TODO: Unhardcode this */\n\t\tif \"docker0\" == i.Name {\n\t\t\tcontinue\n\t\t}\n\t\t/* Skip loopback interfaces */\n\t\tif 0 != (net.FlagLoopback & i.Flags) {\n\t\t\tcontinue\n\t\t}\n\t\t/* Get the addresses for this interface */\n\t\tas, err := i.Addrs()\n\t\tif nil != err {\n\t\t\tlog.Printf(\n\t\t\t\t\"Unable to get addresses for %v: %v\",\n\t\t\t\ti.Name,\n\t\t\t\terr,\n\t\t\t)\n\t\t\tcontinue\n\t\t}\n\t\t/* Use the first address we find */\n\t\tif 0 == len(as) {\n\t\t\tcontinue\n\t\t}\n\t\tid = as[0].String()\n\t}\n\t/* Clean up the address a bit, to make DNS-friendly */\n\tparts := strings.SplitN(id, \"/\", 2)\n\tif 0 == len(parts) { /* Probably didn't find one */\n\t\treturn randomID()\n\t}\n\n\t/* Remove all non-hex characters */\n\tid = strings.Map(\n\t\tfunc(r rune) rune {\n\t\t\t/* Turn all non-hex characters into hyphens */\n\t\t\tif !strings.ContainsRune(\"abcdefABCDEF0123456789\", r) {\n\t\t\t\treturn '-'\n\t\t\t}\n\t\t\treturn r\n\t\t},\n\t\tparts[0],\n\t)\n\t/* Trim leading and trailing -'s, which can happen with IPv6\n\taddresses */\n\treturn strings.Trim(id, \"-\")\n}",
"func (dev VMVolumeDevice) UUID() string {\n\treturn utils.NewUUID5(blockVolumeNsUUID, dev.HostPath)\n}",
"func (s *Store) readID() error {\n\tb, err := ioutil.ReadFile(s.IDPath())\n\tif os.IsNotExist(err) {\n\t\ts.id = 0\n\t\treturn nil\n\t} else if err != nil {\n\t\treturn fmt.Errorf(\"read file: %s\", err)\n\t}\n\n\tid, err := strconv.ParseUint(string(b), 10, 64)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"parse id: %s\", err)\n\t}\n\ts.id = id\n\n\ts.Logger.Printf(\"read local node id: %d\", s.id)\n\n\treturn nil\n}",
"func generateUniqueId() string {\n\tcmd := exec.Command(\"/usr/bin/uuidgen\")\n\tvar out bytes.Buffer\n\tcmd.Stdout = &out\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tuuid := out.String()\n\tuuid = strings.Replace(uuid, \"\\n\", \"\", 1)\n\treturn uuid\n}",
"func readInstanceID() string {\n\tconst instanceIDFile = \"/var/lib/cloud/data/instance-id\"\n\tidBytes, err := ioutil.ReadFile(instanceIDFile)\n\tif err != nil {\n\t\tglog.Infof(\"Failed to get instance id from file: %v\", err)\n\t\treturn \"\"\n\t} else {\n\t\tinstanceID := string(idBytes)\n\t\tinstanceID = strings.TrimSpace(instanceID)\n\t\tglog.Infof(\"Get instance id from file: %s\", instanceID)\n\t\treturn instanceID\n\t}\n}",
"func extractUuid(input string) string {\n\treGetID := regexp.MustCompile(`([a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})`)\n\tmatchListId := reGetID.FindAllStringSubmatch(input, -1)\n\tif len(matchListId) > 0 && len(matchListId[0]) > 0 {\n\t\treturn matchListId[len(matchListId)-1][1]\n\t}\n\treturn \"\"\n}",
"func GetHostOSDistro() (string, error) {\n\tinitiatorNSPath := iscsiutil.GetHostNamespacePath(HostProcPath)\n\tmountPath := fmt.Sprintf(\"--mount=%s/mnt\", initiatorNSPath)\n\toutput, err := Execute([]string{}, \"nsenter\", mountPath, \"cat\", OsReleasePath)\n\tif err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"failed to read %v on host\", OsReleasePath)\n\t}\n\n\tscanner := bufio.NewScanner(strings.NewReader(string(output)))\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tif strings.HasPrefix(line, \"ID=\") {\n\t\t\tosDistro := RemoveNewlines(strings.TrimPrefix(line, \"ID=\"))\n\t\t\treturn strings.ReplaceAll(osDistro, `\"`, \"\"), nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"failed to find ID field in %v\", OsReleasePath)\n}",
"func (h *Harness) UUID(id string) string { return h.uuidG.Get(id) }",
"func makeID() (string, error) {\n\tdata := make([]byte, 32)\n\t_, err := rand.Read(data)\n\tx := sha256.Sum256(data)\n\treturn hex.EncodeToString(x[:]), err\n}",
"func ReadOrInitSessionId(bD *BaseData) (string, error) {\n _, ok := sessions[bD.SessionId]; if !ok {\n bytes := make([]byte, 16)\n if _, err := rand.Read(bytes); err != nil {\n return \"\", err\n }\n sessionId := hex.EncodeToString(bytes)\n sessions[sessionId] = &Data{SessionId: sessionId, CopyAndPaste: make(map[string]bool)}\n return sessionId, nil\n }\n return bD.SessionId, nil\n}",
"func GUIDFromBytes(b []byte) string {\n\t// See Intel EFI specification, Appendix A: GUID and Time Formats\n\t// https://www.intel.de/content/dam/doc/product-specification/efi-v1-10-specification.pdf\n\tvar (\n\t\ttimeLow uint32\n\t\ttimeMid uint16\n\t\ttimeHighAndVersion uint16\n\t\tclockSeqHighAndReserved uint8\n\t\tclockSeqLow uint8\n\t\tnode [6]byte\n\t)\n\ttimeLow = binary.LittleEndian.Uint32(b[0:4])\n\ttimeMid = binary.LittleEndian.Uint16(b[4:6])\n\ttimeHighAndVersion = binary.LittleEndian.Uint16(b[6:8])\n\tclockSeqHighAndReserved = b[8]\n\tclockSeqLow = b[9]\n\tcopy(node[:], b[10:])\n\treturn fmt.Sprintf(\"%08X-%04X-%04X-%02X%02X-%012X\",\n\t\ttimeLow,\n\t\ttimeMid,\n\t\ttimeHighAndVersion,\n\t\tclockSeqHighAndReserved,\n\t\tclockSeqLow,\n\t\tnode)\n}",
"func hostRead(d *schema.ResourceData, m interface{}, params zabbix.Params) error {\n\tapi := m.(*zabbix.API)\n\n\tlog.Debug(\"Lookup of host with params %#v\", params)\n\n\thosts, err := api.HostsGet(params)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(hosts) < 1 {\n\t\td.SetId(\"\")\n\t\treturn nil\n\t}\n\tif len(hosts) > 1 {\n\t\treturn errors.New(\"multiple hosts found\")\n\t}\n\thost := hosts[0]\n\n\tlog.Debug(\"Got host: %+v\", host)\n\n\td.SetId(host.HostID)\n\td.Set(\"name\", host.Name)\n\td.Set(\"host\", host.Host)\n\td.Set(\"proxyid\", host.ProxyID)\n\td.Set(\"enabled\", host.Status == 0)\n\td.Set(\"inventory_mode\", HINV_LOOKUP_REV[host.InventoryMode])\n\n\td.Set(\"interface\", flattenHostInterfaces(host, d, m))\n\td.Set(\"templates\", flattenTemplateIds(host.ParentTemplateIDs))\n\td.Set(\"inventory\", flattenInventory(host))\n\td.Set(\"groups\", flattenHostGroupIds(host.GroupIds))\n\td.Set(\"macro\", flattenMacros(host.UserMacros))\n\td.Set(\"tag\", flattenTags(host.Tags))\n\n\treturn nil\n}",
"func mkSeed(t *testing.T, d Distro, sshKey, hostURL, tdir string, port int) {\n\tt.Helper()\n\n\tdir := filepath.Join(tdir, d.Name, \"seed\")\n\tos.MkdirAll(dir, 0700)\n\n\t// make meta-data\n\t{\n\t\tfout, err := os.Create(filepath.Join(dir, \"meta-data\"))\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\terr = metaDataTempl.Execute(fout, struct {\n\t\t\tID string\n\t\t\tHostname string\n\t\t}{\n\t\t\tID: \"31337\",\n\t\t\tHostname: d.Name,\n\t\t})\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\terr = fout.Close()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\t// make user-data\n\t{\n\t\tfout, err := os.Create(filepath.Join(dir, \"user-data\"))\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\terr = userDataTempl.Execute(fout, struct {\n\t\t\tSSHKey string\n\t\t\tHostURL string\n\t\t\tHostname string\n\t\t\tPort int\n\t\t\tInstallPre string\n\t\t\tPassword string\n\t\t}{\n\t\t\tSSHKey: strings.TrimSpace(sshKey),\n\t\t\tHostURL: hostURL,\n\t\t\tHostname: d.Name,\n\t\t\tPort: port,\n\t\t\tInstallPre: d.InstallPre(),\n\t\t\tPassword: securePassword,\n\t\t})\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\terr = fout.Close()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\targs := []string{\n\t\t\"-output\", filepath.Join(dir, \"seed.iso\"),\n\t\t\"-volid\", \"cidata\", \"-joliet\", \"-rock\",\n\t\tfilepath.Join(dir, \"meta-data\"),\n\t\tfilepath.Join(dir, \"user-data\"),\n\t}\n\n\tif hackOpenSUSE151UserData(t, d, dir) {\n\t\targs = append(args, filepath.Join(dir, \"openstack\"))\n\t}\n\n\trun(t, tdir, \"genisoimage\", args...)\n}",
"func (d *WindowsDesktopV3) GetHostID() string {\n\treturn d.Spec.HostID\n}",
"func GenerateClientID(confDir string) {\n\tmachineID, err := ioutil.ReadFile(confDir + \"/.machine_id\")\n\tif err != nil {\n\t\tfmt.Println(\"error reading machine id\")\n\t\tmachineID = generateMachineID(confDir)\n\t}\n\tClientID = string(machineID[:len(machineID)-1])\n\tfmt.Println(\"generated ClientID\", ClientID)\n}",
"func uuid() string {\n\tout, err := exec.Command(\"/usr/bin/uuidgen\").Output()\n\tif err != nil {\n\t\tlog.Fatal().\n\t\t\tStr(\"command\", \"/usr/bin/uuidgen\").\n\t\t\tMsg(\"There was an error generating the uuid.\")\n\t}\n\n\t//n := bytes.IndexByte(out, 0)\n\ts := string(out)\n\ts = strings.TrimSpace(s)\n\treturn s\n}",
"func Get() (*HostID, error) {\n\tvar id HostID\n\tvar addrs []string\n\tifs, err := net.Interfaces()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, v := range ifs {\n\t\th := v.HardwareAddr.String()\n\t\tif len(h) > 0 {\n\t\t\taddrs = append(addrs, h)\n\t\t}\n\t}\n\tsort.Strings(addrs) // sort host IDs\n\tif len(addrs) > 0 { // make host IDs unique\n\t\tid.HostID = append(id.HostID, addrs[0])\n\t\tlast := addrs[0]\n\t\tfor i := 1; i < len(addrs); i++ {\n\t\t\tif addrs[i] != last {\n\t\t\t\tid.HostID = append(id.HostID, addrs[i])\n\t\t\t\tlast = addrs[i]\n\t\t\t}\n\t\t}\n\t}\n\tid.OS = GetOS()\n\tid.CPU = GetCPU()\n\treturn &id, nil\n}",
"func GenerateUUID(device string) error {\n\t// for mounting the cloned volume for btrfs, a new UUID has to be generated\n\tcmd := exec.Command(\"btrfstune\", \"-f\", \"-u\", device)\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tklog.Errorf(\"btrfs: uuid generate failed for device %s error: %s\", device, string(out))\n\t\treturn err\n\t}\n\tklog.Infof(\"btrfs: generated UUID for the device %s \\n %v\", device, string(out))\n\treturn nil\n}",
"func (b *Broker) createIDFile(home string, filepath string, id string) (err error) {\n\t_filepath := fmt.Sprintf(\"%v%v%v\", home, string(os.PathSeparator), filepath)\n\terr = ioutil.WriteFile(_filepath, []byte(id), 0644)\n\n\treturn\n}",
"func GetUID() string {\n\twd, err := os.Getwd()\n\n\tvar data map[string]interface{}\n\n\tbuff, err := ioutil.ReadFile(wd + \"/package.json\")\n\n\tcheck(err)\n\n\tif err := json.Unmarshal(buff, &data); err != nil {\n\t\tpanic(err)\n\t}\n\n\tuser, err := GetStoredUser()\n\n\tGuard(user)\n\n\tcheck(err)\n\n\tname := data[\"name\"].(string)\n\n\tuid := CreateUID(name, user.Email)\n\n\treturn uid\n}",
"func makeInodeID(path string) uint64 {\n hash := fnv.New64a()\n hash.Write([]byte(path))\n return hash.Sum64()\n}",
"func getHardwareID() string {\n\tif hardwareID != \"\" {\n\t\treturn hardwareID\n\t}\n\taddress := \"\"\n\tinters, err := net.Interfaces()\n\tif err == nil {\n\t\tfor _, inter := range inters {\n\t\t\tif inter.HardwareAddr.String() != \"\" {\n\t\t\t\taddress = inter.HardwareAddr.String()\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif address == \"\" {\n\t\taddress = \"0\"\n\t}\n\tcheck32 := crc32.ChecksumIEEE([]byte(address))\n\tid58 := base58.EncodeBig(nil, big.NewInt(int64(check32)))\n\thardwareID = string(id58)\n\treturn hardwareID\n}",
"func getUUID() string{\n\tresponse,_ := http.Get(BaseUrl+\"/_uuids\")\n\tdefer response.Body.Close()\n\tdecoder := json.NewDecoder(response.Body)\n\terr := decoder.Decode(&uniqueid)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn uniqueid.Uuids[0]\n}",
"func (cli *BaseClient) GetHostLunId(ctx context.Context, hostID, lunID string) (string, error) {\n\thostLunId := \"1\"\n\turl := fmt.Sprintf(\"/lun/associate?TYPE=11&ASSOCIATEOBJTYPE=21&ASSOCIATEOBJID=%s\", hostID)\n\tresp, err := cli.Get(ctx, url, nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tcode := int64(resp.Error[\"code\"].(float64))\n\tif code != 0 {\n\t\treturn \"\", fmt.Errorf(\"Get hostLunId of host %s, lun %s error: %d\", hostID, lunID, code)\n\t}\n\n\trespData := resp.Data.([]interface{})\n\tfor _, i := range respData {\n\t\thostLunInfo := i.(map[string]interface{})\n\t\tif hostLunInfo[\"ID\"].(string) == lunID {\n\t\t\tvar associateData map[string]interface{}\n\t\t\tassociateDataBytes := []byte(hostLunInfo[\"ASSOCIATEMETADATA\"].(string))\n\t\t\terr := json.Unmarshal(associateDataBytes, &associateData)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", nil\n\t\t\t}\n\t\t\thostLunIdFloat, ok := associateData[\"HostLUNID\"].(float64)\n\t\t\tif ok {\n\t\t\t\thostLunId = strconv.FormatInt(int64(hostLunIdFloat), 10)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\treturn hostLunId, nil\n}",
"func parseUUID(src string) (dst [16]byte, err error) {\n\tswitch len(src) {\n\tcase 36:\n\t\tsrc = src[0:8] + src[9:13] + src[14:18] + src[19:23] + src[24:]\n\tcase 32:\n\t\t// dashes already stripped, assume valid\n\tdefault:\n\t\t// assume invalid.\n\t\treturn dst, fmt.Errorf(\"cannot parse UUID %v\", src)\n\t}\n\n\tbuf, err := hex.DecodeString(src)\n\tif err != nil {\n\t\treturn dst, err\n\t}\n\n\tcopy(dst[:], buf)\n\treturn dst, err\n}",
"func DbFindHost(id int) Host {\n\tfor _, h := range hosts {\n\t\tif h.Id == id {\n\t\t\treturn h\n\t\t}\n\t}\n\t// empty\n\treturn Host{}\n}",
"func (o *NetworkLicenseFile) GetHostIdOk() (*string, bool) {\n\tif o == nil || o.HostId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.HostId, true\n}",
"func randomHost() (host string, original string) {\n\ts := rand.NewSource(time.Now().UnixNano())\n\tr := rand.New(s)\n\tv := r.Intn(len(hosts))\n\thost = keys[v]\n\toriginal = hosts[host]\n\treturn\n}",
"func makeUserHost(listenPort int, target string, randseed int64) (host.Host, error) {\n\n\t// seed == 0, real cryptographic randomness\n\t// else, deterministic randomness source to make generated keys stay the same across multiple runs\n\tvar r io.Reader\n\tif randseed == 0 {\n\t\tr = rand.Reader\n\t} else {\n\t\tr = mrand.New(mrand.NewSource(randseed))\n\t}\n\n\t// Generate a key pair for this host. We will use it to obtain a valid host ID.\n\tpriv, _, err := crypto.GenerateKeyPairWithReader(crypto.RSA, 2048, r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Generate the libp2p host\n\tbasicHost, err := libp2p.New(\n\t\tcontext.Background(),\n\t\tlibp2p.ListenAddrStrings(fmt.Sprintf(\"/ip4/127.0.0.1/tcp/%d\", listenPort)),\n\t\tlibp2p.Identity(priv),\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfmt.Printf(\"I am user node %s\\n\", basicHost.ID().Pretty())\n\tfmt.Printf(\"\\nNow run this on a different terminal in the user directory in order to connect to the same region node:\\ngo run *.go -port %d -peer %s\\n\\n\", listenPort+1, target)\n\n\treturn basicHost, nil\n}",
"func newUID() ([]byte, error) {\n\t// uuid := make([]byte, 16)\n\t// n, err := io.ReadFull(rand.Reader, uuid)\n\t// if n != len(uuid) || err != nil {\n\t// \treturn nil, err\n\t// }\n\t// // variant bits; see section 4.1.1\n\t// uuid[8] = uuid[8]&^0xc0 | 0x80\n\t// // version 4 (pseudo-random); see section 4.1.3\n\t// uuid[6] = uuid[6]&^0xf0 | 0x40\n\t// return []byte(fmt.Sprintf(\"%x-%x-%x-%x-%x\", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:])), nil\n\treturn []byte(uniuri.New()), nil\n}",
"func generateUUID() string {\n\tbuf := make([]byte, 16)\n\tif _, err := cr.Read(buf); err != nil {\n\t\tpanic(fmt.Errorf(\"failed to read random bytes: %w\", err))\n\t}\n\n\treturn fmt.Sprintf(\"%08x-%04x-%04x-%04x-%12x\",\n\t\tbuf[0:4],\n\t\tbuf[4:6],\n\t\tbuf[6:8],\n\t\tbuf[8:10],\n\t\tbuf[10:16])\n}",
"func GUID() (guid string) {\n\ttm := time.Now().UTC()\n\tt := tm.UnixNano() / 1000000\n\tfileDate := strconv.Itoa(tm.Year()) + \".\" + tm.Month().String() + \".\" + strconv.Itoa(tm.Day()) + \".\" + strconv.Itoa(tm.Hour()) + \".\" + strconv.Itoa(tm.Minute()) + \".\" + strconv.Itoa(tm.Second()) + \".\" + strconv.FormatInt(t, 10)\n\tguid = fileDate\n\treturn\n}",
"func (m *Attachment) GenerateUUID() (string, error) {\n\tout, err := exec.Command(\"uuidgen\").Output()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn strings.Replace(strings.Trim(string(out), \"\\n\"), \"-\", \"_\", -1), nil\n}",
"func GetXenIdFromCloudInit() (string, error) {\n\tif runtime.GOOS == \"windows\" {\n\t\treturn \"\", errors.New(\"cloud init is not supported on windows\")\n\t}\n\tinstanceIdPath := \"/var/lib/cloud/data/instance-id\"\n\tdata, err := ioutil.ReadFile(instanceIdPath)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to read from instance id path\")\n\t}\n\t// remove new line characters\n\txenId := strings.TrimSpace(string(data))\n\txenId = strings.ToLower(xenId)\n\t// the fallback datasource is iid-datasource-none when it does not exist\n\t// https://cloudinit.readthedocs.io/en/latest/topics/datasources/fallback.html\n\tif xenId == \"iid-datasource-none\" || xenId == \"nocloud\" {\n\t\treturn \"\", errors.New(\"invalid instance id found\")\n\t}\n\treturn xenId, nil\n}",
"func uuid() []byte {\n\tuuid := make([]byte, 16)\n\t_, err := rand.Read(uuid)\n\tif err != nil {\n\t\tpanic(\"cue/hosted: uuid() failed to read random bytes\")\n\t}\n\n\t// The following bit twiddling is outlined in RFC 4122. In short, it\n\t// identifies the UUID as a v4 random UUID.\n\tuuid[6] = (4 << 4) | (0xf & uuid[6])\n\tuuid[8] = (8 << 4) | (0x3f & uuid[8])\n\treturn uuid\n}",
"func GetUuidForDB() string {\n\treturn ulid.Make().String()\n}",
"func HostFromDir(hostdir string) (*Host, error) {\n\tconfPath := path.Join(hostdir, hostConfFile)\n\n\th := &Host{}\n\terr := loadJson(h, confPath)\n\tif err != nil {\n\t\treturn nil, microerror.Mask(err)\n\t}\n\n\th.hostDir, err = os.Open(hostdir)\n\tif err != nil {\n\t\treturn nil, microerror.Mask(err)\n\t}\n\n\tfi, err := os.Stat(confPath)\n\tif err != nil {\n\t\treturn nil, microerror.Mask(err)\n\t}\n\th.lastModTime = fi.ModTime()\n\n\treturn h, nil\n}",
"func createCidFile(ctx context.Context, tempDir string, repoSlug string) (string, func(), error) {\n\t// Find a location that we can use for a cidfile, which will contain the\n\t// container ID that is used below. We can then use this to remove the\n\t// container on a successful run, rather than leaving it dangling.\n\tcidFile, err := os.CreateTemp(tempDir, repoSlug+\"-container-id\")\n\tif err != nil {\n\t\treturn \"\", nil, errors.Wrap(err, \"Creating a CID file failed\")\n\t}\n\n\t// However, Docker will fail if the cidfile actually exists, so we need\n\t// to remove it. Because Windows can't remove open files, we'll first\n\t// close it, even though that's unnecessary elsewhere.\n\tcidFile.Close()\n\tif err = os.Remove(cidFile.Name()); err != nil {\n\t\treturn \"\", nil, errors.Wrap(err, \"removing cidfile\")\n\t}\n\n\t// Since we went to all that effort, we can now defer a function that\n\t// uses the cidfile to clean up after this function is done.\n\tcleanup := func() {\n\t\tcid, err := os.ReadFile(cidFile.Name())\n\t\t_ = os.Remove(cidFile.Name())\n\t\tif err == nil {\n\t\t\tctx, cancel := context.WithTimeout(ctx, 2*time.Second)\n\t\t\tdefer cancel()\n\t\t\t_ = exec.CommandContext(ctx, \"docker\", \"rm\", \"-f\", \"--\", string(cid)).Run()\n\t\t}\n\t}\n\n\treturn cidFile.Name(), cleanup, nil\n}",
"func (fs *FS) GetId() (int64, error) {\n\tpath := fmt.Sprintf(\"%s/id\", fs.path)\n\tvar now int64 = 0\n\tif _, err := os.Stat(path); err == nil {\n\t\t// file exists\n\t\tf, err := os.OpenFile(path, os.O_RDWR, 0644)\n\t\tdefer f.Close()\n\t\tif err != nil { return -1, err }\n\n\t\tbyt, err := ioutil.ReadAll(f)\n\t\tif err != nil { return -1, err }\n\t\tstr := string(byt)\n\n\t\twas, err := strconv.ParseInt(str, 16, 64)\n\t\tif err != nil { return -1, err }\n\n\t\tnow = was + 1\n\t}\n\n\terr := os.MkdirAll(filepath.Dir(path), 0777)\n\tf, err := os.Create(path)\n\tdefer f.Close()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tout := fmt.Sprintf(\"%x\", now)\n\td := []byte(out)\n\t_, err = f.Write(d)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn now, nil\n}",
"func libc_getuid() int32",
"func hostsFile() string {\n\tu, err := user.Current()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdir := fmt.Sprintf(\"%s/Applications/nogame/\", u.HomeDir)\n\terr = os.MkdirAll(dir, 0777)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfilename := fmt.Sprintf(\"%shosts.txt\", dir)\n\tfile, err := os.Open(filename)\n\n\tif err != nil {\n\t\tfile, err = os.Create(filename)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\treturn file.Name()\n}",
"func (pe *ProgramExt) UUID() string {\n\treturn fmt.Sprintf(\"%s_%s\", pe.Manager, pe.Config)\n}",
"func GetSeedFromFile(seedFile string) (string, error) {\n\tf, err := os.Open(seedFile)\n\tif err != nil {\n\t\treturn \"\", ErrCombind(ErrorOpenSeedFile, err)\n\t}\n\tdefer f.Close()\n\n\tvar buf bytes.Buffer\n\t_, err = io.Copy(&buf, f)\n\tif err != nil {\n\t\treturn \"\", ErrCombind(ErrorToReadSeedFile, err)\n\t}\n\tseed := strings.Trim(strings.Split(buf.String(), \":\")[1], \"\\n\")\n\treturn seed, nil\n}",
"func uniqueHandle(client interfaces.Client) (interfaces.Client, error) {\n\tfile, err := os.Open(\"users.txt\")\n\tif err != nil {\n\t\treturn client, err\n\t}\n\tdefer file.Close()\n\treader := bufio.NewReader(file)\n\tfor {\n\t\tline, err := reader.ReadString('\\n')\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn client, err\n\t\t}\n\t\thandle, _ := helpers.SplitOnFirstDelim(',', line)\n\t\tif client.GetHandle() == handle {\n\t\t\treturn client, errors.New(\"Handle is not unique\")\n\t\t}\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn client, err\n}",
"func UUID(db *sql.DB) (domain.ID, error) {\n\tvar id domain.ID\n\trow := db.QueryRow(`SELECT uuid_generate_v4()`)\n\tif err := row.Scan(&id); err != nil {\n\t\treturn id, errors.Database(errors.ServerErrorMessage, err, \"trying to populate UUID\")\n\t}\n\treturn id, nil\n}",
"func getBootID() (string, error) {\n\tcurrentBootIDBytes, err := ioutil.ReadFile(\"/proc/sys/kernel/random/boot_id\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn strings.TrimSpace(string(currentBootIDBytes)), nil\n}",
"func MakeIdentifier() string {\n\tb := make([]byte, 12)\n\t_, err := io.ReadFull(rand.Reader, b)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprintf(\"%x\", b)\n}",
"func (o *NetworkLicenseFile) HasHostId() bool {\n\tif o != nil && o.HostId != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func generateUUID() string {\n\tbuf := make([]byte, 16)\n\tif _, err := crand.Read(buf); err != nil {\n\t\tpanic(fmt.Errorf(\"failed to read random bytes: %v\", err))\n\t}\n\n\treturn fmt.Sprintf(\"%08x-%04x-%04x-%04x-%12x\",\n\t\tbuf[0:4],\n\t\tbuf[4:6],\n\t\tbuf[6:8],\n\t\tbuf[8:10],\n\t\tbuf[10:16])\n}",
"func (ps *PS) UUID() uint64 {\n\tif ps.uuid != 0 {\n\t\treturn ps.uuid\n\t}\n\t// assume the uuid is derived from boot ID and process start time\n\tps.uuid = (bootid.Read() << 30) + uint64(ps.PID) | uint64(ps.StartTime.UnixNano())\n\tmaj, _, patch := windows.RtlGetNtVersionNumbers()\n\tif maj >= 10 && patch >= 1507 {\n\t\tseqNum := querySequenceNumber(ps.PID)\n\t\t// prefer the most robust variant of the uuid which uses the\n\t\t// process sequence number obtained from the process object\n\t\tif seqNum != 0 {\n\t\t\tps.uuid = (bootid.Read() << 30) | seqNum\n\t\t}\n\t}\n\treturn ps.uuid\n}",
"func (d *Device) GetHost(ctx context.Context, hostID int) (*Host, error) {\n\tspath := fmt.Sprintf(\"/host/%d\", hostID)\n\n\treq, err := d.newRequest(ctx, \"GET\", spath, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(ErrCreateRequest+\": %w\", err)\n\t}\n\n\thost := &Host{}\n\tif err = d.requestWithRetry(req, host, DefaultHTTPRetryCount); err != nil {\n\t\treturn nil, fmt.Errorf(ErrRequestWithRetry+\": %w\", err)\n\t}\n\n\treturn host, nil\n}",
"func (s *DatabaseServerV3) GetHostID() string {\n\treturn s.Spec.HostID\n}",
"func pid(instance int) (pid string, err error) {\n file, err := os.Open(pidFileName(instance))\n if err != nil {\n return\n }\n\n defer file.Close()\n\n scanner := bufio.NewScanner(file)\n scanner.Scan()\n pid = scanner.Text()\n return\n}",
"func GetPidFrom(pidFilePath string) (pid int, err error) {\n\n\tif pidFilePath == \"\" {\n\t\tpidFilePath = types.MosnPidDefaultFileName\n\t}\n\n\tvar pf io.Reader\n\tif pf, err = os.Open(pidFilePath); err != nil {\n\t\treturn\n\t}\n\n\tvar bs []byte\n\tif bs, err = ioutil.ReadAll(pf); err != nil {\n\t\treturn\n\t}\n\n\tpid, err = strconv.Atoi(strings.TrimRight(string(bs), \"\\n\"))\n\treturn\n}",
"func (rhelpf LinuxPlatformFlavor) getHostUniqueFlavor() ([]cm.Flavor, error) {\n\tlog.Trace(\"flavor/types/linux_platform_flavor:getHostUniqueFlavor() Entering\")\n\tdefer log.Trace(\"flavor/types/linux_platform_flavor:getHostUniqueFlavor() Leaving\")\n\n\tvar errorMessage = \"Error during creation of HOST_UNIQUE flavor\"\n\tvar err error\n\tvar hostUniquePcrs = rhelpf.getPcrList(cf.FlavorPartHostUnique)\n\tvar includeEventLog = rhelpf.eventLogRequired(cf.FlavorPartHostUnique)\n\tvar allPcrDetails = pfutil.GetPcrDetails(\n\t\trhelpf.HostManifest.PcrManifest, hostUniquePcrs, includeEventLog)\n\tvar filteredPcrDetails = pfutil.IncludeModulesToEventLog(\n\t\tallPcrDetails, hostUniqueModules)\n\n\tnewMeta, err := pfutil.GetMetaSectionDetails(rhelpf.HostInfo, rhelpf.TagCertificate, \"\", cf.FlavorPartHostUnique,\n\t\thcConstants.VendorIntel)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, errorMessage+\" Failure in Meta section details\")\n\t}\n\tlog.Debugf(\"flavor/types/linux_platform_flavor:getHostUniqueFlavor() New Meta Section: %v\", *newMeta)\n\n\tnewBios := pfutil.GetBiosSectionDetails(rhelpf.HostInfo)\n\tif newBios == nil {\n\t\treturn nil, errors.Wrap(err, errorMessage+\" Failure in Bios section details\")\n\t}\n\tlog.Debugf(\"flavor/types/linux_platform_flavor:getHostUniqueFlavor() New Bios Section: %v\", *newBios)\n\n\t// Assemble the Host Unique Flavor\n\thostUniqueFlavor := cm.NewFlavor(newMeta, newBios, nil, filteredPcrDetails, nil, nil)\n\n\tlog.Debugf(\"flavor/types/esx_platform_flavor:getHostUniqueFlavor() New PlatformFlavor: %v\", hostUniqueFlavor)\n\n\treturn []cm.Flavor{*hostUniqueFlavor}, nil\n}",
"func GetDeviceUUID(deviceID int32) string {\n\tuuid := C.FSEventsCopyUUIDForDevice(C.dev_t(deviceID))\n\tif uuid == nullCFUUIDRef {\n\t\treturn \"\"\n\t}\n\treturn cfStringToGoString(C.CFUUIDCreateString(nullCFAllocatorRef, uuid))\n}",
"func (db GAEDatabase) LoadUUIDFromHumanTrainerName(ctx context.Context, name string) (string, error) {\n\tvar trainers []GAETrainer\n\n\t_, err := datastore.NewQuery(trainerKindName).\n\t\tFilter(\"Name =\", name).\n\t\tFilter(\"Type =\", pkmn.HumanTrainerType).\n\t\tGetAll(ctx, &trainers)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif len(trainers) == 0 {\n\t\treturn \"\", errors.Wrap(database.ErrNoResults, \"loading UUID from human trainer name\")\n\t}\n\tif len(trainers) > 1 {\n\t\treturn \"\", errors.Errorf(\"multiple human trainers share the same name '%s'\", name)\n\t}\n\n\treturn trainers[0].GetTrainer().UUID, nil\n}",
"func GetGuestUUID(firstName, lastName string) (string, error) {\n\tapp.InitDB()\n\n\tquery := \"SELECT uuid FROM guest WHERE first_name LIKE ? AND last_name LIKE ?\"\n\trevel.INFO.Printf(\"Query -> %s\", query)\n\trow := app.DB.QueryRow(query, firstName, lastName)\n\n\tvar guestUUID string\n\terr := row.Scan(&guestUUID)\n\tif err != nil {\n\t\trevel.ERROR.Printf(\"Query error -> %s\", err)\n\t\treturn \"\", err\n\t}\n\n\trevel.INFO.Printf(\"Query result -> guestUUID: %s\", guestUUID)\n\treturn guestUUID, nil\n}",
"func generateClientID(groupID string) string {\n\thostName, err := os.Hostname()\n\tif err != nil || len(hostName) == 0 {\n\t\tnow := time.Now().UnixNano()\n\t\thostName = strconv.FormatInt(now, 10)\n\t}\n\treturn fmt.Sprintf(\"%s-%s\", groupID, hostName)\n}",
"func getDiskUUID() string {\n\treturn vboxmanage.GetVMInfoByRegexp(boxName, \"\\\"SATA Controller-ImageUUID-0-0\\\"=\\\"(.*?)\\\"\")\n}",
"func getUID(lib utils.PathIdentifier) string {\n\treturn lib.Key()[:5]\n}",
"func MakeCustomizedUuid(port, nodeNum int) (string, error) {\n\treDigit := regexp.MustCompile(`\\d`)\n\tgroup1 := fmt.Sprintf(\"%08d\", port)\n\tgroup2 := fmt.Sprintf(\"%04d-%04d-%04d\", nodeNum, nodeNum, nodeNum)\n\tgroup3 := fmt.Sprintf(\"%012d\", port)\n\t// 12345678 1234 1234 1234 123456789012\n\t// new_uuid=\"00000000-0000-0000-0000-000000000000\"\n\tswitch {\n\tcase nodeNum > 0 && nodeNum <= 9:\n\t\tgroup2 = reDigit.ReplaceAllString(group2, fmt.Sprintf(\"%d\", nodeNum))\n\t\tgroup3 = reDigit.ReplaceAllString(group3, fmt.Sprintf(\"%d\", nodeNum))\n\t// Number greater than 10 make little sense for this purpose.\n\t// But we keep the rule so that a valid UUID will be formatted in any case.\n\tcase nodeNum >= 10000 && nodeNum <= 99999:\n\t\tgroup2 = fmt.Sprintf(\"%04d-%04d-%04d\", 0, int(nodeNum/10000), nodeNum-10000*int(nodeNum/10000))\n\tcase nodeNum >= 100000 && nodeNum < 1000000:\n\t\tgroup2 = fmt.Sprintf(\"%04d-%04d-%04d\", int(nodeNum/10000), 0, 0)\n\tcase nodeNum >= 1000000:\n\t\treturn \"\", fmt.Errorf(\"node num out of boundaries: %d\", nodeNum)\n\t}\n\treturn fmt.Sprintf(\"%s-%s-%s\", group1, group2, group3), nil\n}"
] | [
"0.77543086",
"0.6042349",
"0.588953",
"0.5722182",
"0.5662293",
"0.5657924",
"0.56159735",
"0.5602016",
"0.55612344",
"0.5539282",
"0.5483126",
"0.5434467",
"0.53669393",
"0.5341538",
"0.5333852",
"0.5304593",
"0.5253771",
"0.5247491",
"0.52331644",
"0.5198665",
"0.5154344",
"0.5145973",
"0.5134886",
"0.51346993",
"0.51130307",
"0.5107882",
"0.5035245",
"0.5032972",
"0.499788",
"0.4992474",
"0.49821556",
"0.4977723",
"0.49386126",
"0.49287057",
"0.4919873",
"0.48615667",
"0.48454717",
"0.48411492",
"0.48381948",
"0.48100737",
"0.47995898",
"0.47994405",
"0.47898623",
"0.4773801",
"0.47702017",
"0.4761265",
"0.47596917",
"0.47209916",
"0.47203493",
"0.47079706",
"0.47031465",
"0.46960726",
"0.4695416",
"0.46910235",
"0.46901464",
"0.46834",
"0.46738753",
"0.46690494",
"0.46686134",
"0.4668527",
"0.4652621",
"0.46406546",
"0.46314973",
"0.46208405",
"0.4612477",
"0.4611449",
"0.46097255",
"0.46044987",
"0.45978886",
"0.45973936",
"0.45921338",
"0.45876697",
"0.45866162",
"0.45676672",
"0.45667773",
"0.4556541",
"0.4555783",
"0.45526117",
"0.45502415",
"0.45364282",
"0.4533201",
"0.45231634",
"0.4515219",
"0.45089698",
"0.44936025",
"0.4485255",
"0.4484306",
"0.44770324",
"0.4472675",
"0.44713482",
"0.44458622",
"0.44454214",
"0.44371265",
"0.44319305",
"0.44150296",
"0.4407078",
"0.43942776",
"0.4393268",
"0.43924087",
"0.43918592"
] | 0.8242125 | 0 |
StringSliceSubset returns true if b is a subset of a. | func StringSliceSubset(a []string, b []string) error {
aset := make(map[string]bool)
for _, v := range a {
aset[v] = true
}
for _, v := range b {
_, ok := aset[v]
if !ok {
return trace.BadParameter("%v not in set", v)
}
}
return nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (s String) IsSubset(other String) bool {\n\tif len(s) > len(other) {\n\t\treturn false\n\t}\n\n\tfor k := range s {\n\t\tif _, ok := other[k]; !ok {\n\t\t\treturn false\n\t\t}\n\n\t}\n\treturn true\n}",
"func StringsSliceContains(a []string, b string) bool {\n\tif !sort.StringsAreSorted(a) {\n\t\tsort.Strings(a)\n\t}\n\ti := sort.SearchStrings(a, b)\n\treturn i < len(a) && a[i] == b\n}",
"func sliceSubset(a, b []string) []string {\n\tresults := []string{}\n\n\tfor _, aValue := range a {\n\t\tif !existsInList(b, aValue) {\n\t\t\tresults = append(results, aValue)\n\t\t}\n\t}\n\n\treturn results\n}",
"func EqualsSliceOfString(a, b []string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(a); i++ {\n\t\tif a[i] != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func SliceStringEqual(a, b []string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\taCopy := make([]string, len(a))\n\tbCopy := make([]string, len(a))\n\tfor x, aVal := range a {\n\t\taCopy[x] = aVal\n\t\tbCopy[x] = b[x]\n\t}\n\tsort.Strings(aCopy)\n\tsort.Strings(bCopy)\n\treturn sortedStringSliceEqual(aCopy, bCopy)\n}",
"func sliceEqual(a, b []string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor _, v := range a {\n\t\tif !stringInSlice(v, b) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func SliceIncludeSlice(a, b []string) bool {\n\tif EqualSlice(a, b) {\n\t\treturn true\n\t}\n\tfor _, item := range b {\n\t\tif !StringsContain(a, item) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func SliceStringPEqual(a, b []*string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tsa := make([]string, len(a))\n\tsb := make([]string, len(a))\n\tfor x, aPtr := range a {\n\t\tsa[x] = *aPtr\n\t\tsb[x] = *b[x]\n\t}\n\tsort.Strings(sa)\n\tsort.Strings(sb)\n\treturn sortedStringSliceEqual(sa, sb)\n}",
"func SliceStringPEqual(a, b []*string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tsa := make([]string, len(a))\n\tsb := make([]string, len(a))\n\tfor x, aPtr := range a {\n\t\tsa[x] = *aPtr\n\t\tsb[x] = *b[x]\n\t}\n\tsort.Strings(sa)\n\tsort.Strings(sb)\n\tfor x, aVal := range sa {\n\t\tbVal := sb[x]\n\t\tif aVal != bVal {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func StringInSlice(a string, slice []string) bool {\n\tfor _, b := range slice {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (s StringSet) Subset(t StringSet) bool {\n\tfor k := range s {\n\t\tif _, ok := t[k]; !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func SliceContainsSlice(old, new []string) bool {\n\tfor _, newElement := range new {\n\t\tin := false\n\t\tfor _, oldElement := range old {\n\t\t\tif newElement == oldElement {\n\t\t\t\tin = true\n\t\t\t}\n\t\t}\n\t\tif !in {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (s String) IsProperSubset(other String) bool {\n\treturn len(s) < len(other) && s.IsSubset(other)\n}",
"func StringInSlice(a string, slice []string) bool {\n\tfor _, b := range slice {\n\t\tif a == b {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func SliceContainsString(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func SliceContainsString(list []string, a string) bool {\n\tsort.Strings(list)\n\ti := sort.SearchStrings(list, a)\n\treturn (i < len(list) && list[i] == a)\n}",
"func substringContainedInSlice(str string, substrs []string) bool {\n\tfor _, s := range substrs {\n\t\tif strings.Contains(str, s) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func isSubset(setA, setB []string) bool {\n\tset := make(map[string]bool)\n\tfor _, v := range setB {\n\t\tset[v] = true\n\t}\n\tfor _, v := range setA {\n\t\tif !set[v] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func StringSlicesIntersection(a, b []string) (c []string) {\n\tm := make(map[string]bool)\n\n\tfor _, item := range a {\n\t\tm[item] = true\n\t}\n\n\tfor _, item := range b {\n\t\tif _, ok := m[item]; ok {\n\t\t\tc = append(c, item)\n\t\t}\n\t}\n\treturn\n}",
"func stringSliceEqual(a, b []string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i := range a {\n\t\tif a[i] != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func SliceStringsEq(a []string, b []string) bool {\n\n\tvar aa []string\n\tvar bb []string\n\n\tif a == nil {\n\t\taa = make([]string, 0)\n\t} else {\n\t\taa = a\n\t}\n\n\tif b == nil {\n\t\tbb = make([]string, 0)\n\t} else {\n\t\tbb = b\n\t}\n\n\tif len(aa) != len(bb) {\n\t\treturn false\n\t}\n\n\tfor i := range aa {\n\t\tif aa[i] != bb[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func equalStringSlice(a, b []string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\n\tfor i := 0; i < len(a); i++ {\n\t\tif a[i] != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func StringInSlice(a string, l []string) bool {\n\tfor _, b := range l {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func InSlice(a string, slice []string) bool {\n\tfor _, b := range slice {\n\t\tif a == b {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func StringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func StringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func StringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func StringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func StringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func StringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func StringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func StringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func StringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func Subset(first, second []string) bool {\n\tset := make(map[string]bool)\n\tfor _, value := range second {\n\t\tset[value] = true\n\t}\n\n\tfor _, value := range first {\n\t\tif !set[value] {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func SliceEqualsString(x, y []string) bool {\n\tif len(x) != len(y) {\n\t\treturn false\n\t}\n\tdiff := make(map[string]int, len(x))\n\tfor _, ix := range x {\n\t\tdiff[ix]++\n\t}\n\tfor _, iy := range y {\n\t\tif _, ok := diff[iy]; !ok {\n\t\t\treturn false\n\t\t}\n\t\tdiff[iy]--\n\t\tif diff[iy] == 0 {\n\t\t\tdelete(diff, iy)\n\t\t}\n\t}\n\n\treturn len(diff) == 0\n}",
"func SliceContainsString(sl []string, st string) bool {\n\tfor _, s := range sl {\n\t\tif s == st {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func stringSliceOverlaps(left []string, right []string) bool {\n\tfor _, s := range left {\n\t\tfor _, t := range right {\n\t\t\tif s == t {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}",
"func StringsSliceEqual(a, b []string) bool {\n\tif !sort.StringsAreSorted(a) {\n\t\tsort.Strings(a)\n\t}\n\tif !sort.StringsAreSorted(b) {\n\t\tsort.Strings(b)\n\t}\n\tfor i := range b {\n\t\tif !StringsSliceContains(a, b[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\tfor i := range a {\n\t\tif !StringsSliceContains(b, a[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func InStringSlice(ss []string, str string) bool {\n\tfor _, s := range ss {\n\t\tif strings.EqualFold(s, str) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func sliceContainsString(s string, sl []string) bool {\n\tfor _, v := range sl {\n\t\tif v == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func inStringSlice(ss []string, str string) bool {\n\tfor _, s := range ss {\n\t\tif strings.EqualFold(s, str) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func Subset(first, second []string) bool {\n\tset := make(map[string]int)\n\tfor _, value := range second {\n\t\tset[value]++\n\t}\n\n\tfor _, value := range first {\n\t\tif count, found := set[value]; !found {\n\t\t\treturn false\n\t\t} else if count < 1 {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func InSliceString(x string, a []string) bool {\n\tl := len(a)\n\n\tif l == 0 {\n\t\treturn false\n\t}\n\n\tsort.Strings(a)\n\n\ti := sort.SearchStrings(a, x)\n\n\tif i < l && a[i] == x {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func TestEqStringSlice(t *testing.T) {\n\tt.Parallel()\n\tvar tests = []struct {\n\t\ta []string\n\t\tb []string\n\t\texpected bool\n\t}{\n\t\t{[]string{\"foo\", \"bar\"}, []string{\"foo\", \"bar\"}, true},\n\t\t{[]string{\"foo\", \"bar\"}, []string{\"bar\", \"foo\"}, false},\n\t\t{[]string{\"foo\", \"bar\"}, []string{\"bar\"}, false},\n\t\t{[]string{\"foo\", \"bar\"}, []string{\"\\x66\\x6f\\x6f\", \"bar\"}, true},\n\t}\n\tfor _, test := range tests {\n\t\tactual := primitives.EqSlices(&test.a, &test.b)\n\t\tassert.Equal(t, test.expected, actual, \"expected value '%v' | actual : '%v'\", test.expected, actual)\n\t}\n}",
"func stringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func stringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func stringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func stringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func stringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func stringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func stringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func stringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func SliceSubset(slice1, slice2 interface{}) (bool, error) {\n\n\tswitch x := slice1.(type) {\n\tcase []DRAState:\n\t\tstateSlice1, ok1 := slice1.([]DRAState)\n\t\tstateSlice2, ok2 := slice2.([]DRAState)\n\n\t\tif (!ok1) || (!ok2) {\n\t\t\treturn false, fmt.Errorf(\"Error converting slice1 (%v) or slice2 (%v).\", ok1, ok2)\n\t\t}\n\n\t\t//Iterate through all TransitionSystemState in stateSlice1 and make sure that they are in 2.\n\t\tfor _, stateFrom1 := range stateSlice1 {\n\t\t\tif !(stateFrom1.In(stateSlice2)) {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\t// If all elements of slice1 are in slice2 then return true!\n\t\treturn true, nil\n\tcase []TransitionSystemState:\n\t\tstateSlice1, ok1 := slice1.([]TransitionSystemState)\n\t\tstateSlice2, ok2 := slice2.([]TransitionSystemState)\n\n\t\tif (!ok1) || (!ok2) {\n\t\t\treturn false, fmt.Errorf(\"Error converting slice1 (%v) or slice2 (%v).\", ok1, ok2)\n\t\t}\n\n\t\t//Iterate through all TransitionSystemState in stateSlice1 and make sure that they are in 2.\n\t\tfor _, stateFrom1 := range stateSlice1 {\n\t\t\tif !(stateFrom1.In(stateSlice2)) {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\t// If all elements of slice1 are in slice2 then return true!\n\t\treturn true, nil\n\n\tcase []mc.AtomicProposition:\n\t\tapSlice1, ok1 := slice1.([]mc.AtomicProposition)\n\t\tapSlice2, ok2 := slice2.([]mc.AtomicProposition)\n\n\t\tif (!ok1) || (!ok2) {\n\t\t\treturn false, fmt.Errorf(\"Error converting slice1 (%v) or slice2 (%v).\", ok1, ok2)\n\t\t}\n\n\t\t//Iterate through all TransitionSystemState in stateSlice1 and make sure that they are in 2.\n\t\tfor _, apFrom1 := range apSlice1 {\n\t\t\tif !(apFrom1.In(apSlice2)) {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\t// If all elements of slice1 are in slice2 then return true!\n\t\treturn true, nil\n\n\tdefault:\n\t\treturn false, fmt.Errorf(\"Unexpected type given to SliceSubset(): %v\", x)\n\t}\n\n}",
"func EqualSlice(a, b []string) bool {\n\tsort.Strings(a)\n\tsort.Strings(b)\n\treturn reflect.DeepEqual(a, b)\n}",
"func (a Attributes) IsSubset(b Attributes) bool {\n\tm := map[string]struct{}{}\n\tfor _, s := range []string(b) {\n\t\tm[s] = struct{}{}\n\t}\n\tfor _, s := range []string(a) {\n\t\tif _, ok := m[s]; !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func str_is_in_slice(slice []string, str string) bool {\n\tfor _, s := range slice {\n\t\tif s == str {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func isSubsetMatch(tokens []string, test string) bool {\n\ttsa := [32]string{}\n\ttts := tokenizeSubjectIntoSlice(tsa[:0], test)\n\treturn isSubsetMatchTokenized(tokens, tts)\n}",
"func StringInSlice(str string, slc []string) bool {\n\tfor _, s := range slc {\n\t\tif strings.EqualFold(s, str) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func stringInSlice(a string, list []string) bool {\n\tif list == nil {\n\t\treturn false\n\t}\n\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func StringSliceContains(slice []string, str string) bool {\n\tfor _, s := range slice {\n\t\tif s == str {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func sliceContainsSlice(smallSlice []core.VarId, bigSlice [][]core.VarId) bool {\n\tfor _, slice := range bigSlice {\n\t\tif slicesIdentical(slice, smallSlice) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func StringSlicesEqual(a, b []string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor idx, v := range a {\n\t\tif v != b[idx] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func StringSliceContains(s []string, e string) bool {\n\tfor _, a := range s {\n\t\tif a == e {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func equalSlice(a, b []string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i, v := range a {\n\t\tif v != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func subset(first, second []string) bool {\n\tset := make(map[string]int)\n\tfor _, value := range second {\n\t\tset[value]++\n\t}\n\n\tfor _, value := range first {\n\t\tif count, found := set[value]; !found {\n\t\t\treturn false\n\t\t} else if count < 1 {\n\t\t\treturn false\n\t\t} else {\n\t\t\tset[value] = count - 1\n\t\t}\n\t}\n\n\treturn true\n}",
"func Subset(s1, s2 Set) bool {\n\tif s1.Len() > s2.Len() {\n\t\treturn false\n\t}\n\tfor k := range s1 {\n\t\tif _, ok := s2[k]; !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func StringSlicesEqual(a []string, b []string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor k := range a {\n\t\tif a[k] != b[k] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func stringSliceIntersect(s, t []string) []string {\n\tvar res []string\n\tm := make(map[string]bool, len(s))\n\tfor _, x := range s {\n\t\tm[x] = true\n\t}\n\tfor _, y := range t {\n\t\tif m[y] {\n\t\t\tres = append(res, y)\n\t\t}\n\t}\n\treturn res\n}",
"func SlicesEqual(a, b []string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i, v := range a {\n\t\tif v != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func SliceContains(s []string, value string) bool {\n\tfor _, v := range s {\n\t\tif v == value {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func StringInSlice(str string, slice []string) bool {\n\tfor _, s := range slice {\n\t\tif s == str {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func SliceIntersects(a, b interface{}) bool {\n\taValue, bValue := reflect.ValueOf(a), reflect.ValueOf(b)\n\taValueKind, bValueKind := aValue.Kind(), bValue.Kind()\n\n\tif aValueKind != reflect.Slice || bValueKind != reflect.Slice {\n\t\tpanic(fmt.Sprintf(\"one of parameters is not a slice: (%v, %v)\", aValueKind, bValueKind))\n\t}\n\tfor i := 0; i < bValue.Len(); i++ {\n\t\tfor j := 0; j < aValue.Len(); j++ {\n\t\t\tif bValue.Index(i).Interface() == aValue.Index(j).Interface() {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}",
"func stringSliceContains(ss []string, s string) bool {\n\tfor _, v := range ss {\n\t\tif v == s {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func isSubset(lhs, rhs ref.Val) ref.Val {\n\ta, ok := lhs.(traits.Lister)\n\tif !ok {\n\t\treturn types.ValOrErr(a, \"no such overload\")\n\t}\n\n\tb, ok := rhs.(traits.Lister)\n\tif !ok {\n\t\treturn types.ValOrErr(b, \"no such overload\")\n\t}\n\n\tm := convertToMap(b)\n\n\tfor ai := a.Iterator(); ai.HasNext() == types.True; {\n\t\tva := ai.Next()\n\t\tif m != nil {\n\t\t\tif _, ok := m[va]; !ok {\n\t\t\t\treturn types.False\n\t\t\t}\n\t\t} else {\n\t\t\tif !find(b.Iterator(), va) {\n\t\t\t\treturn types.False\n\t\t\t}\n\t\t}\n\t}\n\n\treturn types.True\n}",
"func StringSliceContains(slice []string, elem string) bool {\n\tfor _, v := range slice {\n\t\tif v == elem {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func IsSubString(reads []string, genome string, len int) bool {\n kmers := MakeKMerSet(genome, len)\n for _, read := range reads{\n _, found := kmers[read]\n if (!found) {\n return false\n }\n }\n return true\n}",
"func InStringSlice(haystack []string, needle string) bool {\n\tfor _, str := range haystack {\n\t\tif needle == str {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func InStringSlice(s string, arr []string) bool {\n\tfor _, v := range arr {\n\t\tif v == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func StringInSlice(s string, sl []string) bool {\n\tfor _, val := range sl {\n\t\tif s == val {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func AllStringsInSlice(strings []string, slice []string) bool {\n\tfor _, s := range strings {\n\t\tif !StringInSlice(s, slice) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func isInStringSlice(x string, elements []string) bool {\n\tfor _, elem := range elements {\n\t\tif elem == x {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func IsInStringSlice(slice []string, search string) bool {\n\tfor _, v := range slice {\n\t\tif v == search {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (gdb *generalDatabase) IsStringInSlice(needle string, haystack []string) bool {\n\tfor _, s := range haystack {\n\t\tif s == needle {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func StringSliceContains(haystack []string, needle string) bool {\n\tfor _, str := range haystack {\n\t\tif str == needle {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func doSlicesIntersect(s1, s2 []string) bool {\n if s1 == nil || s2 == nil {\n return false\n }\n for _, str := range s1 {\n if isElementInSlice(str, s2) {\n return true\n }\n }\n return false\n}",
"func StringSliceContains(slice []string, value string) bool {\n\tinterfaceSlice := make([]interface{}, len(slice))\n\tfor _, item := range slice {\n\t\tvar interfaceItem interface{} = item\n\t\tinterfaceSlice = append(interfaceSlice, interfaceItem)\n\t}\n\tvar interfaceValue interface{} = value\n\treturn InterfaceSliceContains(interfaceSlice, interfaceValue)\n}",
"func stringInSlice(s string, sl []string) bool {\n\tfor _, v := range sl {\n\t\tif v == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (s String) IsSuperset(other String) bool {\n\treturn other.IsSubset(s)\n}",
"func stringInSlice(a string, list []string) bool {\n for _, b := range list {\n if b == a {\n return true\n }\n }\n return false\n}",
"func SliceContains(s string, properties []string) (contain bool, prop string) {\n\tfor _, p := range properties {\n\t\tif strings.Contains(s, p) {\n\t\t\treturn true, p\n\t\t}\n\t}\n\treturn false, \"\"\n}",
"func StringInSliceCS(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif a == b {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func ContainsInSlice(s []string, str string) bool {\n\tfor _, val := range s {\n\t\tif val == str {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func sliceContains(haystack []string, needle string) bool {\n\tfor _, s := range haystack {\n\t\tif s == needle {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func sliceContains(slice []string, values ...string) bool {\n\tfor _, s := range slice {\n\t\tfor _, v := range values {\n\t\t\tif strings.EqualFold(strings.TrimSpace(s), strings.TrimSpace(v)) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}",
"func StringSliceContains(list []string, s string) bool {\n\tfor _, v := range list {\n\t\tif v == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func StringSliceContains(list []string, s string) bool {\n\tfor _, v := range list {\n\t\tif v == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func Fsubset(lista, listb []string) bool {\n\tfound := 0\n\tFcompress(&lista)\n\tFcompress(&listb)\n\tfor _, i := range lista {\n\t\tif Fmember(listb, i) {\n\t\t\tfound++\n\t\t}\n\t}\n\tif found < len(lista) {\n\t\treturn false\n\t} else {\n\t\treturn true\n\t}\n}",
"func InSlice(v string, sl []string) bool {\n\tfor _, vv := range sl {\n\t\tif vv == v {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func InStringSlice(str string, strSli []string) bool {\n\tfor _, v := range strSli {\n\t\tif str == v {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func SliceContains(slice []string, needle string) bool {\n\tfor _, s := range slice {\n\t\tif s == needle {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}"
] | [
"0.72063106",
"0.6842188",
"0.6821371",
"0.6786121",
"0.6751392",
"0.67450565",
"0.6714515",
"0.6702434",
"0.66884464",
"0.6672785",
"0.66710854",
"0.6625813",
"0.6613793",
"0.66135323",
"0.6598829",
"0.65849054",
"0.6550833",
"0.6545979",
"0.6528951",
"0.65145385",
"0.65053153",
"0.6499472",
"0.6480875",
"0.64218384",
"0.6402446",
"0.6402446",
"0.6402446",
"0.6402446",
"0.6402446",
"0.6402446",
"0.6402446",
"0.6402446",
"0.6402446",
"0.63731784",
"0.6362486",
"0.63611865",
"0.63506305",
"0.6349333",
"0.63462496",
"0.63289106",
"0.632398",
"0.6294954",
"0.62924653",
"0.6270303",
"0.62625664",
"0.62625664",
"0.62625664",
"0.62625664",
"0.62625664",
"0.62625664",
"0.62625664",
"0.62625664",
"0.62625396",
"0.625351",
"0.6246504",
"0.62450135",
"0.62419134",
"0.6240835",
"0.6237596",
"0.6234671",
"0.6225487",
"0.6220364",
"0.6193443",
"0.6133501",
"0.61300474",
"0.6102156",
"0.60986376",
"0.6094159",
"0.60940903",
"0.6090623",
"0.60892373",
"0.6066791",
"0.6055128",
"0.6050912",
"0.6049116",
"0.6045325",
"0.6028459",
"0.6015364",
"0.60153127",
"0.6010669",
"0.60092133",
"0.6004618",
"0.59971607",
"0.59834975",
"0.59827524",
"0.5971274",
"0.59609497",
"0.5959726",
"0.59588665",
"0.59453344",
"0.5933795",
"0.5911407",
"0.5910734",
"0.589611",
"0.58958054",
"0.58958054",
"0.5886982",
"0.5886703",
"0.5882388",
"0.58772045"
] | 0.8096038 | 0 |
UintSliceSubset returns true if b is a subset of a. | func UintSliceSubset(a []uint16, b []uint16) error {
aset := make(map[uint16]bool)
for _, v := range a {
aset[v] = true
}
for _, v := range b {
_, ok := aset[v]
if !ok {
return trace.BadParameter("%v not in set", v)
}
}
return nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func StringSliceSubset(a []string, b []string) error {\n\taset := make(map[string]bool)\n\tfor _, v := range a {\n\t\taset[v] = true\n\t}\n\n\tfor _, v := range b {\n\t\t_, ok := aset[v]\n\t\tif !ok {\n\t\t\treturn trace.BadParameter(\"%v not in set\", v)\n\t\t}\n\n\t}\n\treturn nil\n}",
"func SliceSubset(slice1, slice2 interface{}) (bool, error) {\n\n\tswitch x := slice1.(type) {\n\tcase []DRAState:\n\t\tstateSlice1, ok1 := slice1.([]DRAState)\n\t\tstateSlice2, ok2 := slice2.([]DRAState)\n\n\t\tif (!ok1) || (!ok2) {\n\t\t\treturn false, fmt.Errorf(\"Error converting slice1 (%v) or slice2 (%v).\", ok1, ok2)\n\t\t}\n\n\t\t//Iterate through all TransitionSystemState in stateSlice1 and make sure that they are in 2.\n\t\tfor _, stateFrom1 := range stateSlice1 {\n\t\t\tif !(stateFrom1.In(stateSlice2)) {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\t// If all elements of slice1 are in slice2 then return true!\n\t\treturn true, nil\n\tcase []TransitionSystemState:\n\t\tstateSlice1, ok1 := slice1.([]TransitionSystemState)\n\t\tstateSlice2, ok2 := slice2.([]TransitionSystemState)\n\n\t\tif (!ok1) || (!ok2) {\n\t\t\treturn false, fmt.Errorf(\"Error converting slice1 (%v) or slice2 (%v).\", ok1, ok2)\n\t\t}\n\n\t\t//Iterate through all TransitionSystemState in stateSlice1 and make sure that they are in 2.\n\t\tfor _, stateFrom1 := range stateSlice1 {\n\t\t\tif !(stateFrom1.In(stateSlice2)) {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\t// If all elements of slice1 are in slice2 then return true!\n\t\treturn true, nil\n\n\tcase []mc.AtomicProposition:\n\t\tapSlice1, ok1 := slice1.([]mc.AtomicProposition)\n\t\tapSlice2, ok2 := slice2.([]mc.AtomicProposition)\n\n\t\tif (!ok1) || (!ok2) {\n\t\t\treturn false, fmt.Errorf(\"Error converting slice1 (%v) or slice2 (%v).\", ok1, ok2)\n\t\t}\n\n\t\t//Iterate through all TransitionSystemState in stateSlice1 and make sure that they are in 2.\n\t\tfor _, apFrom1 := range apSlice1 {\n\t\t\tif !(apFrom1.In(apSlice2)) {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\t// If all elements of slice1 are in slice2 then return true!\n\t\treturn true, nil\n\n\tdefault:\n\t\treturn false, fmt.Errorf(\"Unexpected type given to SliceSubset(): %v\", x)\n\t}\n\n}",
"func sliceSubset(a, b []string) []string {\n\tresults := []string{}\n\n\tfor _, aValue := range a {\n\t\tif !existsInList(b, aValue) {\n\t\t\tresults = append(results, aValue)\n\t\t}\n\t}\n\n\treturn results\n}",
"func (ids IDSlice) IsSubsetOf(o IDSlice) bool {\n\tfor _, id := range ids {\n\t\tif !o.Contains(id) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func sliceContainsSlice(smallSlice []core.VarId, bigSlice [][]core.VarId) bool {\n\tfor _, slice := range bigSlice {\n\t\tif slicesIdentical(slice, smallSlice) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func isSubset(setA, setB []string) bool {\n\tset := make(map[string]bool)\n\tfor _, v := range setB {\n\t\tset[v] = true\n\t}\n\tfor _, v := range setA {\n\t\tif !set[v] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func isSubset(lhs, rhs ref.Val) ref.Val {\n\ta, ok := lhs.(traits.Lister)\n\tif !ok {\n\t\treturn types.ValOrErr(a, \"no such overload\")\n\t}\n\n\tb, ok := rhs.(traits.Lister)\n\tif !ok {\n\t\treturn types.ValOrErr(b, \"no such overload\")\n\t}\n\n\tm := convertToMap(b)\n\n\tfor ai := a.Iterator(); ai.HasNext() == types.True; {\n\t\tva := ai.Next()\n\t\tif m != nil {\n\t\t\tif _, ok := m[va]; !ok {\n\t\t\t\treturn types.False\n\t\t\t}\n\t\t} else {\n\t\t\tif !find(b.Iterator(), va) {\n\t\t\t\treturn types.False\n\t\t\t}\n\t\t}\n\t}\n\n\treturn types.True\n}",
"func SliceIntersects(a, b interface{}) bool {\n\taValue, bValue := reflect.ValueOf(a), reflect.ValueOf(b)\n\taValueKind, bValueKind := aValue.Kind(), bValue.Kind()\n\n\tif aValueKind != reflect.Slice || bValueKind != reflect.Slice {\n\t\tpanic(fmt.Sprintf(\"one of parameters is not a slice: (%v, %v)\", aValueKind, bValueKind))\n\t}\n\tfor i := 0; i < bValue.Len(); i++ {\n\t\tfor j := 0; j < aValue.Len(); j++ {\n\t\t\tif bValue.Index(i).Interface() == aValue.Index(j).Interface() {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}",
"func IntSliceIntersects(a, b []int) (rb bool) {\n\trb = false\n\tfor _, k := range a {\n\t\tfor _, l := range b {\n\t\t\tif k == l {\n\t\t\t\trb = true\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}",
"func Subset(first, second []string) bool {\n\tset := make(map[string]bool)\n\tfor _, value := range second {\n\t\tset[value] = true\n\t}\n\n\tfor _, value := range first {\n\t\tif !set[value] {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func IsSubset(s, t Interface) bool {\n\tfor _, x := range t.Members() {\n\t\tif !s.Contains(x) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (b *Builder) IsSubsetOf(rhs interface{}) *predicate.Predicate {\n\tb.p.RegisterPredicate(impl.IsSubsetOf(rhs))\n\tif b.t != nil {\n\t\tb.t.Helper()\n\t\tEvaluate(b)\n\t}\n\treturn &b.p\n}",
"func (set *AppleSet) IsSubset(other *AppleSet) bool {\n\tif set.IsEmpty() {\n\t\treturn !other.IsEmpty()\n\t}\n\n\tif other.IsEmpty() {\n\t\treturn false\n\t}\n\n\tset.s.RLock()\n\tother.s.RLock()\n\tdefer set.s.RUnlock()\n\tdefer other.s.RUnlock()\n\n\tfor v := range set.m {\n\t\tif !other.Contains(v) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (set Int64Set) IsSubset(other Int64Set) bool {\n\tfor v := range set {\n\t\tif !other.Contains(v) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func Subset(first, second []string) bool {\n\tset := make(map[string]int)\n\tfor _, value := range second {\n\t\tset[value]++\n\t}\n\n\tfor _, value := range first {\n\t\tif count, found := set[value]; !found {\n\t\t\treturn false\n\t\t} else if count < 1 {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func (s Slice) Sub(b Slice) Slice {\n\tlut := map[uuid.UUID]struct{}{}\n\tfor _, id := range b {\n\t\tlut[id] = struct{}{}\n\t}\n\n\tsub := []uuid.UUID{}\n\tfor _, id := range s {\n\t\tif _, foundInB := lut[id]; !foundInB {\n\t\t\tsub = append(sub, id)\n\t\t}\n\t}\n\treturn sub\n}",
"func SliceIncludeSlice(a, b []string) bool {\n\tif EqualSlice(a, b) {\n\t\treturn true\n\t}\n\tfor _, item := range b {\n\t\tif !StringsContain(a, item) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (s *IntSet) Subset(y *IntSet) bool {\n\n\tfor _, m := range s.Members() {\n\t\tif !y.Contains(m) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func EqualsSliceOfCharacteristic(a, b []Characteristic) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(a); i++ {\n\t\tif !EqualsCharacteristic(a[i], b[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func isSubset(query, subject asciiset.ASCIISet) bool {\n\t// A ⊆ B iff (A ∪ B) = B\n\tunion := query.Union(subject)\n\treturn union.Equals(subject)\n}",
"func subset(first, second []string) bool {\n\tset := make(map[string]int)\n\tfor _, value := range second {\n\t\tset[value]++\n\t}\n\n\tfor _, value := range first {\n\t\tif count, found := set[value]; !found {\n\t\t\treturn false\n\t\t} else if count < 1 {\n\t\t\treturn false\n\t\t} else {\n\t\t\tset[value] = count - 1\n\t\t}\n\t}\n\n\treturn true\n}",
"func Subset(s1, s2 Set) bool {\n\tif s1.Len() > s2.Len() {\n\t\treturn false\n\t}\n\tfor k := range s1 {\n\t\tif _, ok := s2[k]; !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (a Attributes) IsSubset(b Attributes) bool {\n\tm := map[string]struct{}{}\n\tfor _, s := range []string(b) {\n\t\tm[s] = struct{}{}\n\t}\n\tfor _, s := range []string(a) {\n\t\tif _, ok := m[s]; !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func SliceContainsSlice(old, new []string) bool {\n\tfor _, newElement := range new {\n\t\tin := false\n\t\tfor _, oldElement := range old {\n\t\t\tif newElement == oldElement {\n\t\t\t\tin = true\n\t\t\t}\n\t\t}\n\t\tif !in {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func ExampleIntSet_IsSubsetOf() {\n\ts1 := gset.NewIntSet()\n\ts1.Add([]int{1, 2, 3, 4}...)\n\tvar s2 gset.IntSet\n\ts2.Add([]int{1, 2, 4}...)\n\tfmt.Println(s2.IsSubsetOf(s1))\n\n\t// Output:\n\t// true\n}",
"func subSlice(out, a, b []float64)",
"func sliceEqual(a, b []string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor _, v := range a {\n\t\tif !stringInSlice(v, b) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func isSubsetMatch(tokens []string, test string) bool {\n\ttsa := [32]string{}\n\ttts := tokenizeSubjectIntoSlice(tsa[:0], test)\n\treturn isSubsetMatchTokenized(tokens, tts)\n}",
"func (s *Set) IsSubset(strict bool, other *Set) bool {\n\tif strict && len(s.m) >= len(other.m) {\n\t\treturn false\n\t}\nA:\n\tfor v := range s.m {\n\t\tfor i := range other.m {\n\t\t\tif v == i {\n\t\t\t\tcontinue A\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\treturn true\n}",
"func (s *ConcurrentSet) IsSubset(other Set) bool {\n\tif s.Len() > other.Len() {\n\t\treturn false\n\t}\n\n\tisSubset := true\n\ts.hash.Range(func(k, v interface{}) bool {\n\t\tif !other.Contains(k) {\n\t\t\tisSubset = false\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t})\n\treturn isSubset\n}",
"func SliceUnion(a, b []interface{}) []interface{} {\n\tm := make(map[interface{}]bool)\n\n\t// iterate through slice a, adding values as\n\t// keys in m\n\tfor _, v := range a {\n\t\tm[v] = true\n\t}\n\n\t// iterate through slice b, adding values not\n\t// in map m to slice a\n\tfor _, v := range b {\n\t\tif _, ok := m[v]; !ok {\n\t\t\ta = append(a, v)\n\t\t}\n\t}\n\n\t// return union of slices a and b\n\treturn a\n}",
"func (bb *ByteSliceBuffer) PopSlice(b [][]byte) (n int, ok bool) {\n\tfor wpos := range b {\n\t\tif pos, ok := bb.Buffer.GetReadPos(); ok {\n\t\t\tb[wpos] = bb.data[pos]\n\t\t\tn++\n\t\t} else {\n\t\t\treturn n, false\n\t\t}\n\t}\n\treturn n, true\n}",
"func InSliceSlice(qSlice [][]int, x []int) bool {\n\tfor _, v := range qSlice {\n\t\tif len(v) != len(x) {\n\t\t\tcontinue\n\t\t}\n\t\tvar count int\n\t\tfor i := 0; i < len(v); i++ {\n\n\t\t\tif v[i] != x[i] {\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tcount++\n\t\t\t\tif count == len(v) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\treturn false\n}",
"func TestEqIntSlice(t *testing.T) {\n\tt.Parallel()\n\tvar tests = []struct {\n\t\ta []int\n\t\tb []int\n\t\texpected bool\n\t}{\n\t\t{[]int{1, 2}, []int{1, 2}, true},\n\t\t{[]int{1, 2}, []int{2, 1}, false},\n\t\t{[]int{1, 2}, []int{1}, false},\n\t\t{[]int{1, 2}, []int{1, 2, 1}, false},\n\t}\n\tfor _, test := range tests {\n\t\tactual := primitives.EqSlices(&test.a, &test.b)\n\t\tassert.Equal(t, test.expected, actual, \"expected value '%v' | actual : '%v'\", test.expected, actual)\n\t}\n}",
"func Fsubset(lista, listb []string) bool {\n\tfound := 0\n\tFcompress(&lista)\n\tFcompress(&listb)\n\tfor _, i := range lista {\n\t\tif Fmember(listb, i) {\n\t\t\tfound++\n\t\t}\n\t}\n\tif found < len(lista) {\n\t\treturn false\n\t} else {\n\t\treturn true\n\t}\n}",
"func (s *Set) IsSubset(other *Set) bool {\n\tif other.Len() < s.Len() {\n\t\treturn false\n\t}\n\n\tisSubset := true\n\ts.Range(func(item Value) bool {\n\t\tif !other.Contains(item) {\n\t\t\tisSubset = false\n\t\t}\n\n\t\treturn isSubset\n\t})\n\n\treturn isSubset\n}",
"func (s StringSet) Subset(t StringSet) bool {\n\tfor k := range s {\n\t\tif _, ok := t[k]; !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (s String) IsSubset(other String) bool {\n\tif len(s) > len(other) {\n\t\treturn false\n\t}\n\n\tfor k := range s {\n\t\tif _, ok := other[k]; !ok {\n\t\t\treturn false\n\t\t}\n\n\t}\n\treturn true\n}",
"func Uint64SliceEqual(a []uint64, b []uint64) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i, v := range a {\n\t\tif v != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (s PointBuffer) SubSlice(low int, high int) PointBuffer {\n\tinBounds := low >= 0 && low <= high && high <= s.cap\n\tif !inBounds {\n\t\tpanic(fmt.Errorf(\n\t\t\t\"runtime error: slice bounds out of range [%d:%d] with capacity %d\",\n\t\t\tlow, high, s.cap,\n\t\t))\n\t}\n\tvar tVar Point\n\ttSize := unsafe.Sizeof(tVar)\n\ttype internalPtr struct {\n\t\toffset uintptr\n\t\tbucketIdx uint8\n\t\tarenaMask uint16\n\t}\n\tcurrentPtr := *(*internalPtr)(unsafe.Pointer(&s.data))\n\tnewPtr := internalPtr{\n\t\toffset: currentPtr.offset + uintptr(low*int(tSize)),\n\t\tbucketIdx: currentPtr.bucketIdx,\n\t\tarenaMask: currentPtr.arenaMask,\n\t}\n\treturn PointBuffer{\n\t\tdata: *(*arena.Ptr)(unsafe.Pointer(&newPtr)),\n\t\tlen: high - low,\n\t\tcap: s.cap - low,\n\t}\n}",
"func IntSliceContains(is []int, s int) (rb bool) {\n\trb = false\n\tfor _, a := range is {\n\t\tif a == s {\n\t\t\trb = true\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}",
"func compareOnIntSlice(a, b []int) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\n\tisEqual := true\n\tfor i := range a {\n\t\tif a[i] != b[i] {\n\t\t\tisEqual = false\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn isEqual\n}",
"func InSlice(a string, slice []string) bool {\n\tfor _, b := range slice {\n\t\tif a == b {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func SliceEquals(slice1, slice2 interface{}) (bool, error) {\n\t//Determine if both slices are of the same type.\n\t// if slice1.(type) != slice2.(type) {\n\t// \tfmt.Println(\"Types of the two slices are different!\")\n\t// \treturn false\n\t// }\n\n\toneSubsetTwo, err := SliceSubset(slice1, slice2)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"There was an issue computing SliceSubset(slice1,slice2): %v\", err)\n\t}\n\n\ttwoSubsetOne, err := SliceSubset(slice2, slice1)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"There was an issue computing SliceSubset(slice2,slice1): %v\", err)\n\t}\n\n\treturn oneSubsetTwo && twoSubsetOne, nil\n\n}",
"func EqualSlice(dst, src []byte) bool {\n\tif len(dst) != len(src) {\n\t\treturn false\n\t}\n\tfor idx, b := range dst {\n\t\tif b != src[idx] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func EqualsSliceOfRefOfUnionSelect(a, b []*UnionSelect) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(a); i++ {\n\t\tif !EqualsRefOfUnionSelect(a[i], b[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (el Elements) Subset(sIdx, fIdx int) Elements {\n\tres := Elements{}\n\tswitch el.Type {\n\tcase part3.Int32:\n\t\tres.I32 = el.I32[sIdx:fIdx]\n\tcase part3.Float32:\n\t\tres.F32 = el.F32[sIdx:fIdx]\n\tcase part3.Float64:\n\t\tres.F64 = el.F64[sIdx:fIdx]\n\t}\n\tres.Type = el.Type\n\treturn res\n}",
"func sliceContains(s []*dag.Vertex, v *dag.Vertex) bool {\n\tfor _, i := range s {\n\t\tif i == v {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func equalSlice(a, b []string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i, v := range a {\n\t\tif v != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func CheckSubset(src, trg *Item) bool {\n\ttype obj struct {\n\t\tsrc Attribute\n\t\ttrg Attribute\n\t}\n\tfor _, v := range []obj{\n\t\t{src.part, trg.part},\n\t\t{src.vendor, trg.vendor},\n\t\t{src.product, trg.product},\n\t\t{src.version, trg.version},\n\t\t{src.update, trg.update},\n\t\t{src.edition, trg.edition},\n\t\t{src.language, trg.language},\n\t\t{src.sw_edition, trg.sw_edition},\n\t\t{src.target_sw, trg.target_sw},\n\t\t{src.target_hw, trg.target_hw},\n\t\t{src.other, trg.other},\n\t} {\n\t\tswitch v.src.Comparison(v.trg) {\n\t\tcase Subset, Equal:\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (as AllSlice) InSlice(qSlice []Any, x Any) bool {\n\tfor _, v := range qSlice {\n\t\tif v == x {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func intSliceIncludesOther(a, b []int) bool {\n\tif len(b) > len(a) {\n\t\treturn false\n\t}\n\tfor _, n := range b {\n\t\tvar isMatch bool\n\t\tfor _, m := range a {\n\t\t\tif n == m {\n\t\t\t\tisMatch = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !isMatch {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func InIntSlice(a int, list []int) bool {\n\tfor _, v := range list {\n\t\tif a == v {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func IntInSlice(a uint64, list []uint64) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func sliceContains(container []*html.Node, contained *html.Node) bool {\n\tfor _, n := range container {\n\t\tif nodeContains(n, contained) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func SlicesEqual(a, b []string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i, v := range a {\n\t\tif v != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func Subset[T any](collection []T, offset int, length uint) []T {\n\tsize := len(collection)\n\n\tif offset < 0 {\n\t\toffset = size + offset\n\t\tif offset < 0 {\n\t\t\toffset = 0\n\t\t}\n\t}\n\n\tif offset > size {\n\t\treturn []T{}\n\t}\n\n\tif length > uint(size)-uint(offset) {\n\t\tlength = uint(size - offset)\n\t}\n\n\treturn collection[offset : offset+int(length)]\n}",
"func Test_AreEqualSlices_unequal(t *testing.T) {\n // create two equal slices\n a := []byte{ 0xDE, 0xAD, 0xBE, 0xEF }\n b := []byte{ 0xCA, 0xFE, 0xBA, 0xBE }\n // make test -> log failure but continue testing\n if AreEqualSlices(a,b) { t.Error(\"unequal slices determined equal\") }\n}",
"func Sub(a []string, b []string) []string {\n\tbMap := ToSet(b)\n\tout := []string{}\n\tfor _, v := range a {\n\t\tif !bMap[v] {\n\t\t\tout = append(out, v)\n\t\t}\n\t}\n\treturn out\n}",
"func (s *ConcurrentSet) IsProperSubset(other Set) bool {\n\treturn s.Len() < other.Len() && s.IsSubset(other)\n}",
"func EqualSlice(a, b []string) bool {\n\tsort.Strings(a)\n\tsort.Strings(b)\n\treturn reflect.DeepEqual(a, b)\n}",
"func SliceContains(slice, elem interface{}) (bool, error) {\n\n\tsv := reflect.ValueOf(slice)\n\n\t// Check that slice is actually a slice/array.\n\tif sv.Kind() != reflect.Slice && sv.Kind() != reflect.Array {\n\t\treturn false, errors.New(\"not an array or slice\")\n\t}\n\n\t// iterate the slice\n\tfor i := 0; i < sv.Len(); i++ {\n\n\t\t// compare elem to the current slice element\n\t\tif elem == sv.Index(i).Interface() {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\t// nothing found\n\treturn false, nil\n\n}",
"func TestEqStringSlice(t *testing.T) {\n\tt.Parallel()\n\tvar tests = []struct {\n\t\ta []string\n\t\tb []string\n\t\texpected bool\n\t}{\n\t\t{[]string{\"foo\", \"bar\"}, []string{\"foo\", \"bar\"}, true},\n\t\t{[]string{\"foo\", \"bar\"}, []string{\"bar\", \"foo\"}, false},\n\t\t{[]string{\"foo\", \"bar\"}, []string{\"bar\"}, false},\n\t\t{[]string{\"foo\", \"bar\"}, []string{\"\\x66\\x6f\\x6f\", \"bar\"}, true},\n\t}\n\tfor _, test := range tests {\n\t\tactual := primitives.EqSlices(&test.a, &test.b)\n\t\tassert.Equal(t, test.expected, actual, \"expected value '%v' | actual : '%v'\", test.expected, actual)\n\t}\n}",
"func StringsSliceContains(a []string, b string) bool {\n\tif !sort.StringsAreSorted(a) {\n\t\tsort.Strings(a)\n\t}\n\ti := sort.SearchStrings(a, b)\n\treturn i < len(a) && a[i] == b\n}",
"func BoolSlicesEqual(a, b []bool) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor idx, v := range a {\n\t\tif v != b[idx] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func findSubsets(firstSubset, secondSubset, thirdSubset int, n int, slice []int) bool {\n\n\tif firstSubset == 0 && secondSubset == 0 && thirdSubset == 0 {\n\t\treturn true\n\t}\n\n\tif n < 0 {\n\t\treturn false\n\t}\n\n\tif findSubsets(firstSubset-slice[n], secondSubset, thirdSubset, n-1, slice) {\n\t\treturn true\n\t} else if findSubsets(firstSubset, secondSubset-slice[n], thirdSubset, n-1, slice) {\n\t\treturn true\n\t} else if findSubsets(firstSubset, secondSubset, thirdSubset-slice[n], n-1, slice) {\n\t\treturn true\n\t}\n\treturn false\n}",
"func IntInSlice(a int, list []int) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func UintSlice(src []*uint) []uint {\n\tdst := make([]uint, len(src))\n\tfor i := 0; i < len(src); i++ {\n\t\tif src[i] != nil {\n\t\t\tdst[i] = *(src[i])\n\t\t}\n\t}\n\treturn dst\n}",
"func SliceContains(slice []string, needle string) bool {\n\tfor _, s := range slice {\n\t\tif s == needle {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func equalSlice(a, b []string) bool {\n\tif a == nil && b == nil {\n\t\treturn true\n\t}\n\n\tif a == nil || b == nil {\n\t\treturn false\n\t}\n\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\n\tfor i := range a {\n\t\tif a[i] != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func Test_AreEqualSlices_one_shorter(t *testing.T) {\n // create two equal slices\n a := []byte{ 0xDE, 0xAD, 0xBE, 0xEF }\n b := []byte{ 0xDE, 0xAD, 0xBE }\n //\tmake test -> log failure but continue testing\n if AreEqualSlices(a,b) { t.Error(\"different length slices determined equal\") }\n}",
"func isInSlice(slice []*html.Node, node *html.Node) bool {\n\treturn indexInSlice(slice, node) > -1\n}",
"func SliceContains(s []string, value string) bool {\n\tfor _, v := range s {\n\t\tif v == value {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (o Op) IsSlice3() bool",
"func substringContainedInSlice(str string, substrs []string) bool {\n\tfor _, s := range substrs {\n\t\tif strings.Contains(str, s) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (w *viewBoxWriter) startsWith(slice []byte, subSlice []byte) bool {\n\tfor key, value := range subSlice {\n\t\tif slice[key] != value {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func EqualsSliceOfRefOfPartitionDefinition(a, b []*PartitionDefinition) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(a); i++ {\n\t\tif !EqualsRefOfPartitionDefinition(a[i], b[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func isSubsetMatchTokenized(tokens, test []string) bool {\n\t// Walk the target tokens\n\tfor i, t2 := range test {\n\t\tif i >= len(tokens) {\n\t\t\treturn false\n\t\t}\n\t\tl := len(t2)\n\t\tif l == 0 {\n\t\t\treturn false\n\t\t}\n\t\tif t2[0] == fwc && l == 1 {\n\t\t\treturn true\n\t\t}\n\t\tt1 := tokens[i]\n\n\t\tl = len(t1)\n\t\tif l == 0 || t1[0] == fwc && l == 1 {\n\t\t\treturn false\n\t\t}\n\n\t\tif t1[0] == pwc && len(t1) == 1 {\n\t\t\tm := t2[0] == pwc && len(t2) == 1\n\t\t\tif !m {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif i >= len(test) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif t2[0] != pwc && strings.Compare(t1, t2) != 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn len(tokens) == len(test)\n}",
"func isOrderedSubset(first, second *callstack) bool {\n\tif len(*first) > len(*second) {\n\t\treturn false\n\t}\n\tset := make(map[string]int)\n\tfor _, value := range *second {\n\t\tset[value] += 1\n\t}\n\n\tfor _, value := range *first {\n\t\tif count, found := set[value]; !found {\n\t\t\treturn false\n\t\t} else if count < 1 {\n\t\t\treturn false\n\t\t} else {\n\t\t\tset[value] = count - 1\n\t\t}\n\t}\n\treturn checkSequence(*first, *second)\n}",
"func (c Collection) HasSubsetOf(that Instance) bool {\n\tif len(c) == 0 {\n\t\treturn true\n\t}\n\t// prevent panic when that is nil\n\tif len(that) == 0 {\n\t\treturn false\n\t}\n\tfor _, this := range c {\n\t\tif this.SubsetOf(that) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func compareSlices(A, B []int) bool {\n\tif len(A) != len(B) {\n\t\treturn false\n\t}\n\n\tsort.Ints(A)\n\tsort.Ints(B)\n\n\tfor i, a := range A {\n\t\tif a != B[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func (s String) IsProperSubset(other String) bool {\n\treturn len(s) < len(other) && s.IsSubset(other)\n}",
"func Intersection(a, b AnySlice) AnySlice {\n\tmustBeSlice(a)\n\tmustBeSlice(b)\n\n\taVal := reflect.ValueOf(a)\n\tbVal := reflect.ValueOf(b)\n\taCount := aVal.Len()\n\tbCount := bVal.Len()\n\toutput := makeFilterSlice(a, 0, aCount+bCount)\n\tkeys := make(map[interface{}]bool)\n\n\tfor i := 0; i < aCount; i++ {\n\t\tkeys[aVal.Index(i).Interface()] = true\n\t}\n\tfor i := 0; i < bCount; i++ {\n\t\tkey := bVal.Index(i)\n\t\tif _, present := keys[key.Interface()]; present {\n\t\t\toutput = reflect.Append(output, key)\n\t\t}\n\t}\n\treturn output.Interface()\n}",
"func SubjectIsSubsetMatch(subject, test string) bool {\n\ttsa := [32]string{}\n\ttts := tokenizeSubjectIntoSlice(tsa[:0], subject)\n\treturn isSubsetMatch(tts, test)\n}",
"func EqualInt8Slice(a, b []int8) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i, v := range a {\n\t\tif v != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func RandomArraySubset(inslice []float64, percsample int) []float64 {\n\tsampsizefloat := float64(len(inslice)) * float64(percsample) / 100\n\tif sampsizefloat < 1 {\n\t\terr := errors.New(\"Input array has too few elements\")\n\t\tpanic(err)\n\t}\n\tsampsize := int(sampsizefloat)\n\trandtarget := rangen.RandIntegerInRange(0, len(inslice)-1)\n\tfmt.Println(len(inslice), randtarget, sampsize)\n\tif randtarget+sampsize > len(inslice) {\n\t\tdiff := (randtarget + sampsize) - len(inslice)\n\t\tarrEnd := inslice[randtarget:len(inslice)]\n\t\tarrStart := inslice[0:diff]\n\t\treturn append(arrEnd, arrStart...)\n\t}\n\treturn inslice[randtarget : randtarget+sampsize]\n}",
"func (bm ByteMap) Slice(includeKeys map[string]bool) ByteMap {\n\tresult, _ := bm.doSplit(false, includeKeys)\n\treturn result\n}",
"func InIntSlice(id int, idSli []int) bool {\n\tfor _, v := range idSli {\n\t\tif id == v {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (c *SingleItemExpCache) GetUintSlice() (data []uint, ok bool) {\n\tvar itf interface{}\n\tif itf, ok = c.Get(); !ok {\n\t\treturn nil, false\n\t}\n\n\treturn itf.([]uint), true\n}",
"func IntersectionSlice(slice1, slice2 []string) []string {\n\tvar result []string\n\tfor _, s1 := range slice1 {\n\t\tinSlice2 := false\n\t\tfor _, s2 := range slice2 {\n\t\t\tif s2 == s1 {\n\t\t\t\tinSlice2 = true\n\t\t\t}\n\t\t}\n\t\tif inSlice2 {\n\t\t\tresult = append(result, s1)\n\t\t}\n\t}\n\n\treturn result\n}",
"func EqualsSliceOfString(a, b []string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(a); i++ {\n\t\tif a[i] != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func SliceContains(needle interface{}, haystack interface{}) bool {\n\thaystackValue := reflect.ValueOf(haystack)\n\thaystackValueKind := haystackValue.Kind()\n\n\tif haystackValueKind != reflect.Slice {\n\t\tpanic(fmt.Sprintf(\"haystackValue.Kind() should be reflect.Slice, detected: %v\", haystackValueKind))\n\t}\n\n\tfor i := 0; i < haystackValue.Len(); i++ {\n\t\t// panics if slice element points to an unexported struct field\n\t\t// see https://golang.org/pkg/reflect/#Value.Interface\n\t\tif haystackValue.Index(i).Interface() == needle {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func doSlicesIntersect(s1, s2 []string) bool {\n if s1 == nil || s2 == nil {\n return false\n }\n for _, str := range s1 {\n if isElementInSlice(str, s2) {\n return true\n }\n }\n return false\n}",
"func Uint64SlicesEqual(a, b []uint64) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor idx, v := range a {\n\t\tif v != b[idx] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func ByteSlicesEqual(a, b []byte) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor idx, v := range a {\n\t\tif v != b[idx] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (b Bits) Slice() (s []int) {\n\tfor x, w := range b.Bits {\n\t\tif w == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tt := mb.TrailingZeros64(w)\n\t\ti := t // index in w of next 1 bit\n\t\tfor {\n\t\t\tn := x<<6 | i\n\t\t\tif n >= b.Num {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ts = append(s, n)\n\t\t\tw >>= uint(t + 1)\n\t\t\tif w == 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tt = mb.TrailingZeros64(w)\n\t\t\ti += 1 + t\n\t\t}\n\t}\n\treturn\n}",
"func EqualSlice(a, b []float64) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(a); i++ {\n\t\tif EpsilonEqual(a[0], b[0]) != true {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func InSlice(v string, sl []string) bool {\n\tfor _, vv := range sl {\n\t\tif vv == v {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func sliceContains(haystack []string, needle string) bool {\n\tfor _, s := range haystack {\n\t\tif s == needle {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func contains(a, b []string) bool {\n\tlena, lenb := len(a), len(b)\n\tif lena != lenb {\n\t\treturn false\n\t}\n\n\tfor i := 0; i < lena; i++ {\n\t\tj := 0\n\t\tfor ; j < lenb; j++ {\n\t\t\tif a[i] == b[j] {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif j >= lenb {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}"
] | [
"0.6548862",
"0.63709605",
"0.6341045",
"0.6319706",
"0.6259678",
"0.6141531",
"0.61243564",
"0.61081415",
"0.6035182",
"0.5946461",
"0.59458554",
"0.59438497",
"0.59422666",
"0.59412664",
"0.59306276",
"0.59273285",
"0.59237146",
"0.5862433",
"0.58578426",
"0.58344585",
"0.58241963",
"0.58127195",
"0.58062154",
"0.58002824",
"0.57952744",
"0.57418346",
"0.5738892",
"0.5728441",
"0.57275414",
"0.57078594",
"0.57046425",
"0.5703514",
"0.569385",
"0.569301",
"0.56897646",
"0.56661737",
"0.56307954",
"0.5622447",
"0.5546814",
"0.5514091",
"0.550023",
"0.54763067",
"0.5470725",
"0.5465868",
"0.5436261",
"0.5429419",
"0.54113543",
"0.5399974",
"0.53868926",
"0.5333423",
"0.53009707",
"0.5246746",
"0.5245536",
"0.5244501",
"0.5228069",
"0.5216665",
"0.5197393",
"0.5196998",
"0.5187854",
"0.51734567",
"0.51568633",
"0.5146479",
"0.5137859",
"0.5122403",
"0.5122152",
"0.5114315",
"0.5114083",
"0.5113593",
"0.51018417",
"0.5097926",
"0.5093901",
"0.5093819",
"0.50897145",
"0.5086162",
"0.50842327",
"0.5081607",
"0.5065324",
"0.50548905",
"0.5044117",
"0.5041841",
"0.5041668",
"0.5040351",
"0.50333685",
"0.5027297",
"0.5021661",
"0.5017718",
"0.5012949",
"0.5011614",
"0.5009824",
"0.49932614",
"0.49923214",
"0.49911222",
"0.4989593",
"0.49854794",
"0.49847248",
"0.49720013",
"0.4970265",
"0.49692166",
"0.4963689",
"0.4961032"
] | 0.76196134 | 0 |
RemoveFromSlice makes a copy of the slice and removes the passed in values from the copy. | func RemoveFromSlice(slice []string, values ...string) []string {
output := make([]string, 0, len(slice))
remove := make(map[string]bool)
for _, value := range values {
remove[value] = true
}
for _, s := range slice {
_, ok := remove[s]
if ok {
continue
}
output = append(output, s)
}
return output
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func RemoveFromSlice(slice []int, s int) []int {\n\treturn append(slice[:s], slice[s+1:]...)\n}",
"func RemoveFromSlice(slice []string, item string) []string {\n\tfor i, value := range slice {\n\t\tif value == item {\n\t\t\treturn append(slice[:i], slice[i+1:]...)\n\t\t}\n\t}\n\treturn slice\n}",
"func RemoveFromSlice(fullSlice []int, indexToRemove int) []int {\n\tfullSlice[indexToRemove] = fullSlice[len(fullSlice)-1]\n\treturn fullSlice[:len(fullSlice)-1]\n}",
"func (v *Data) RemoveSlice(start, end int) {\n\tdv := *v\n\n\t*v = append(dv[:start], dv[end:]...)\n\n\tv.Truncate(len(dv) - (end - start))\n}",
"func (v *IntVec) RemoveSlice(start, end int) {\n\tdv := *v\n\n\t*v = append(dv[:start], dv[end:]...)\n\n\tv.Truncate(len(dv) - (end - start))\n}",
"func (k *MutableKey) RemoveSlice(vals []uint64) {\n\tfor _, val := range vals {\n\t\tdelete(k.vals, val)\n\t\tk.synced = false\n\t}\n}",
"func removeFromSlice(s []string, toRemove string) ([]string, error) {\n\ti := -1\n\tfor index, item := range s {\n\t\tif item == toRemove {\n\t\t\ti = index\n\t\t}\n\t}\n\n\tif i == -1 {\n\t\treturn nil, fmt.Errorf(\"%v not found in list\", toRemove)\n\t}\n\n\ts[i] = s[len(s)-1]\n\treturn s[:len(s)-1], nil\n}",
"func (v *Int32Vec) RemoveSlice(start, end int) {\n\tdv := *v\n\n\t*v = append(dv[:start], dv[end:]...)\n\n\tv.Truncate(len(dv) - (end - start))\n}",
"func removeFromSlice(rrs []dns.RR, i int) []dns.RR {\n\tif i >= len(rrs) {\n\t\treturn rrs\n\t}\n\trrs = append(rrs[:i], rrs[i+1:]...)\n\treturn rrs\n}",
"func removeFromSlice(array []string, item string) []string {\n\tfor ind, val := range array {\n\t\tif val == item {\n\t\t\tarray[ind] = array[len(array)-1]\n\t\t\treturn array[:len(array)-1]\n\t\t}\n\t}\n\treturn array\n}",
"func remove(slice []int, i int) []int {\n // copy(dst, src)\n copy(slice[i:], slice[i+1:]) // over writes the slice from i to end with slice from i+1 to end\n return slice[:len(slice)-1]\n}",
"func DeleteInSlice(s interface{}, index int) interface{} {\n\tvalue := reflect.ValueOf(s)\n\tif value.Kind() == reflect.Slice {\n\t\t// || value.Kind() == reflect.Array {\n\t\tresult := reflect.AppendSlice(value.Slice(0, index), value.Slice(index+1, value.Len()))\n\t\treturn result.Interface()\n\t}\n\n\tklog.Errorf(\"Only a slice can be passed into this method for deleting an element of it.\")\n\treturn s\n}",
"func RemoveFromArray(slice []string, input string) []string {\n\tvar output []string\n\tfor i, item := range slice {\n\t\tif item == input {\n\t\t\toutput = append(slice[:i], slice[i+1:]...)\n\t\t\tbreak\n\t\t}\n\t}\n\treturn output\n}",
"func RemoveStringSliceCopy(slice []string, start, end int) []string {\n\tresult := make([]string, len(slice)-(end-start))\n\tat := copy(result, slice[:start])\n\tcopy(result[at:], slice[end:])\n\treturn result\n\n}",
"func RemoveItemFromSlice() {\n\tslice := []int{0, 1, 2, 3, 4, 5, 6}\n\tslice = append(slice[:2], slice[3:]...)\n\tfor _, val := range slice {\n\t\tfmt.Println(val)\n\t}\n}",
"func StringSliceRemove(list []string, s string) []string {\n\tfor i, v := range list {\n\t\tif v == s {\n\t\t\tlist = append(list[:i], list[i+1:]...)\n\t\t}\n\t}\n\treturn list\n}",
"func StringSliceRemove(list []string, s string) []string {\n\tfor i, v := range list {\n\t\tif v == s {\n\t\t\tlist = append(list[:i], list[i+1:]...)\n\t\t}\n\t}\n\treturn list\n}",
"func Remove(slice []int, s int) []int {\n\treturn append(slice[:s], slice[s+1:]...)\n}",
"func remove(slice []string, s int) []string {\n\treturn append(slice[:s], slice[s+1:]...)\n}",
"func remove(slice []string, s int) []string {\n\treturn append(slice[:s], slice[s+1:]...)\n}",
"func remove(slice []int, s int) []int {\n\treturn append(slice[:s], slice[s+1:]...)\n}",
"func RemoveValues[T comparable](slice, values []T) []T {\n\tif len(slice) == 0 {\n\t\treturn slice\n\t}\n\tkeys := make(map[T]struct{}, len(slice))\n\tfor _, v := range values {\n\t\tkeys[v] = struct{}{}\n\t}\n\n\tvar i int\n\tfor _, v := range slice {\n\t\tif _, ok := keys[v]; !ok {\n\t\t\tslice[i] = v\n\t\t\ti++\n\t\t}\n\t}\n\treturn slice[:i]\n}",
"func remove(slice []int, i int) []int{\n\tcopy(slice[i:],slice[i+1:])\n\treturn slice[:len(slice)-1]\n}",
"func RemoveWithKeepOrder(slice []string, s int) []string {\n\treturn append(slice[:s], slice[s+1:]...)\n}",
"func remove(slice []int16, s int) []int16 {\n\treturn append(slice[:s], slice[s+1:]...)\n}",
"func Remove(slice []string, value string) []string {\n\tfor i, s := range slice {\n\t\tif s == value {\n\t\t\tslice = append(slice[:i], slice[i+1:]...)\n\t\t\tbreak\n\t\t}\n\t}\n\treturn slice\n}",
"func RemoveIf(slice []string, p func(s string) bool) []string {\n\tif IsEmpty(slice) {\n\t\treturn slice\n\t}\n\n\tresult := make([]string, 0)\n\tfor _, s := range slice {\n\t\tif !p(s) {\n\t\t\tresult = append(result, s)\n\t\t}\n\t}\n\n\treturn result\n}",
"func RemoveAtIndex(slice []string, s int) []string {\n\treturn append(slice[:s], slice[s+1:]...)\n}",
"func removeSliceElements(txOuts []*apitypes.AddressTxnOutput, inds []int) []*apitypes.AddressTxnOutput {\n\t// Remove entries from the end to the beginning of the slice.\n\tsort.Slice(inds, func(i, j int) bool { return inds[i] > inds[j] }) // descending indexes\n\tfor _, g := range inds {\n\t\tif g > len(txOuts)-1 {\n\t\t\tcontinue\n\t\t}\n\t\ttxOuts[g] = txOuts[len(txOuts)-1] // overwrite element g with last element\n\t\ttxOuts[len(txOuts)-1] = nil // nil out last element\n\t\ttxOuts = txOuts[:len(txOuts)-1]\n\t}\n\treturn txOuts\n}",
"func ExcludeFromSlice(sl []string, exclude map[string]string) []string {\n\tres := make([]string, len(sl))\n\ti := 0\n\tfor k, v := range sl {\n\t\t_, isExcluded := exclude[v]\n\t\tif isExcluded {\n\t\t\tcontinue\n\t\t}\n\t\tres[k] = v\n\t\ti++\n\t}\n\treturn res[:i]\n}",
"func Remove(slice interface{}, i int) {\n\tneogointernal.Opcode2NoReturn(\"REMOVE\", slice, i)\n}",
"func deleteRecordFromSlice(slice []Record, id int) []Record {\n return append(slice[:id], slice[id+1:]...)\n}",
"func (es Slice) RemoveIf(f func(Value) bool) {\n\tnewLen := 0\n\tfor i := 0; i < len(*es.getOrig()); i++ {\n\t\tif f(es.At(i)) {\n\t\t\tcontinue\n\t\t}\n\t\tif newLen == i {\n\t\t\t// Nothing to move, element is at the right place.\n\t\t\tnewLen++\n\t\t\tcontinue\n\t\t}\n\t\t(*es.getOrig())[newLen] = (*es.getOrig())[i]\n\t\tnewLen++\n\t}\n\t// TODO: Prevent memory leak by erasing truncated values.\n\t*es.getOrig() = (*es.getOrig())[:newLen]\n}",
"func removeElementFromStringSlice(list []string, elem string) []string {\n\tfor i, e := range list {\n\t\tif e == elem {\n\t\t\treturn append(list[:i], list[i+1:]...)\n\t\t}\n\t}\n\treturn list\n}",
"func DeleteSlice(source []*Instance, index int) []*Instance {\n\tif len(source) == 1 {\n\t\treturn make([]*Instance, 0)\n\t}\n\tif index == 0 {\n\t\treturn source[1:]\n\t}\n\tif index == len(source)-1 {\n\t\treturn source[:len(source)-2]\n\t}\n\treturn append(source[0:index-1], source[index+1:]...)\n}",
"func FilterSlice[S any](s []S, keep func(S) bool) []S {\n\tvar result []S\n\tfor _, e := range s {\n\t\tif keep(e) {\n\t\t\tresult = append(result, e)\n\t\t}\n\t}\n\treturn result\n}",
"func RemoveElements(slice []string, drop []string) []string {\n\tres := []string{}\n\tfor _, s := range slice {\n\t\tkeep := true\n\t\tfor _, d := range drop {\n\t\t\tif s == d {\n\t\t\t\tkeep = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif keep {\n\t\t\tres = append(res, s)\n\t\t}\n\t}\n\treturn res\n}",
"func DeleteFromSlicePtr(parentSlice interface{}, index int) error {\n\tscope.Debugf(\"DeleteFromSlicePtr index=%d, slice=\\n%s\", index, pretty.Sprint(parentSlice))\n\tpv := reflect.ValueOf(parentSlice)\n\n\tif !IsSliceInterfacePtr(parentSlice) {\n\t\treturn fmt.Errorf(\"deleteFromSlicePtr parent type is %T, must be *[]interface{}\", parentSlice)\n\t}\n\n\tpvv := pv.Elem()\n\tif pvv.Kind() == reflect.Interface {\n\t\tpvv = pvv.Elem()\n\t}\n\n\tpv.Elem().Set(reflect.AppendSlice(pvv.Slice(0, index), pvv.Slice(index+1, pvv.Len())))\n\n\treturn nil\n}",
"func removeStringFromSlice(str string, slice []string) []string {\n\tfor i, v := range slice {\n\t\tif v == str {\n\t\t\t//append the subslice of all elements after this one, to the sublice of all elements before this one\n\t\t\treturn append(slice[:i], slice[i+1:]...)\n\t\t}\n\t}\n\n\t//if the string was not present, just return the slice back\n\treturn slice\n}",
"func subtractSlice(x, y []string) []string {\n\tm := make(map[string]bool)\n\n\tfor _, y := range y {\n\t\tm[y] = true\n\t}\n\n\tvar ret []string\n\tfor _, x := range x {\n\t\tif m[x] {\n\t\t\tcontinue\n\t\t}\n\t\tret = append(ret, x)\n\t}\n\n\treturn ret\n}",
"func Remove(series interface{}, removes ...interface{}) (interface{}, error) {\n\tst := reflect.TypeOf(series)\n\tsv := reflect.ValueOf(series)\n\n\tswitch {\n\tcase st.Kind() != reflect.Array && st.Kind() != reflect.Slice:\n\t\treturn nil, ErrNotArrayOrSlice\n\tcase st.Elem().Kind() == reflect.Func:\n\t\treturn nil, ErrNotSupported\n\tcase len(removes) == 0:\n\t\treturn series, nil\n\tcase st.Elem().Kind() != reflect.TypeOf(removes[0]).Kind():\n\t\treturn nil, ErrNotCompatible\n\t}\n\n\tremoved := reflect.MakeSlice(reflect.SliceOf(st.Elem()), 0, 0)\n\tswitch st.Elem().Kind() {\n\tcase reflect.Map, reflect.Slice:\n\t\tfor i := 0; i < sv.Len(); i++ {\n\t\t\tfound := false\n\t\t\tfor _, r := range removes {\n\t\t\t\tif reflect.DeepEqual(sv.Index(i).Interface(), r) {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\tremoved = reflect.Append(removed, sv.Index(i))\n\t\t\t}\n\t\t}\n\t\treturn removed.Interface(), nil\n\tdefault:\n\t\tfilter := reflect.MakeMapWithSize(reflect.MapOf(st.Elem(), reflect.TypeOf(true)), len(removes))\n\t\tfor _, r := range removes {\n\t\t\tfilter.SetMapIndex(reflect.ValueOf(r), reflect.ValueOf(true))\n\t\t}\n\t\tfor i := 0; i < sv.Len(); i++ {\n\t\t\tif !filter.MapIndex(sv.Index(i)).IsValid() {\n\t\t\t\tremoved = reflect.Append(removed, sv.Index(i))\n\t\t\t}\n\t\t}\n\t\treturn removed.Interface(), nil\n\t}\n}",
"func (p *SliceOfMap) Drop(indices ...int) ISlice {\n\tif p == nil || len(*p) == 0 {\n\t\treturn p\n\t}\n\n\t// Handle index manipulation\n\ti, j, err := absIndices(len(*p), indices...)\n\tif err != nil {\n\t\treturn p\n\t}\n\n\t// Execute\n\tn := j - i\n\tif i+n < len(*p) {\n\t\t*p = append((*p)[:i], (*p)[i+n:]...)\n\t} else {\n\t\t*p = (*p)[:i]\n\t}\n\treturn p\n}",
"func remove2(slice []int, i int) []int{\n\tslice[i] = slice[len(slice)-1]\n return slice[:len(slice)-1]\n}",
"func remove(slice []string, i int) []string {\n\treturn append(slice[:i], slice[i+1:]...)\n}",
"func SliceDeleteElement(slice interface{}, removalIndex int) (resultSlice interface{}) {\n\tsliceObj := reflect.ValueOf(slice)\n\n\tif sliceObj.Kind() == reflect.Ptr {\n\t\tsliceObj = sliceObj.Elem()\n\t}\n\n\tif sliceObj.Kind() != reflect.Slice {\n\t\treturn nil\n\t}\n\n\tif removalIndex < 0 {\n\t\tremovalIndex = sliceObj.Len() - AbsInt(removalIndex)\n\n\t\tif removalIndex < 0 {\n\t\t\treturn slice\n\t\t}\n\t}\n\n\tif removalIndex > sliceObj.Len()-1 {\n\t\treturn slice\n\t}\n\n\trm := sliceObj.Index(removalIndex)\n\tlast := sliceObj.Index(sliceObj.Len() - 1)\n\n\tif rm.CanSet() {\n\t\trm.Set(last)\n\t} else {\n\t\treturn slice\n\t}\n\n\treturn sliceObj.Slice(0, sliceObj.Len()-1).Interface()\n}",
"func RemoveStringInSlice(a string, l []string) ([]string, bool) {\n\ti, in := IndexStringInSlice(a, l)\n\n\tif in {\n\t\tl = append(l[:i], l[i+1:]...)\n\t}\n\treturn l, in\n}",
"func FilterSlice(in []int) ([]int, error) {\n\tif in == nil {\n\t\treturn nil, errors.New(\"input slice is nil\")\n\t}\n\n\tfilterList := map[int]bool{\n\t\t1: true,\n\t\t3: true,\n\t}\n\n\tinLen := len(in)\n\tfilteredSlice := make([]int, 0, inLen)\n\n\tfor _, value := range in {\n\t\tif _, ok := filterList[value]; ok {\n\t\t\tfilteredSlice = append(filteredSlice, value)\n\t\t}\n\t}\n\n\treturn filteredSlice, nil\n}",
"func removeItemFromEquals(slice []Equal, index int) []Equal {\n\tcopy(slice[index:], slice[index+1:])\n\tslice[len(slice)-1] = Equal{}\n\tslice = slice[:len(slice)-1]\n\n\treturn slice\n}",
"func remove(list []*IPRange, index int) []*IPRange {\n\tfor i := index + 1; i < len(list); i++ {\n\t\tlist[i-1] = list[i]\n\t}\n\treturn list[:len(list)-1]\n}",
"func RemoveAtIndex(slice []string, index int) ([]string, bool) {\n\tif index < 0 || IsEmpty(slice) || index > len(slice) {\n\t\treturn slice, false\n\t}\n\n\treturn append(slice[:index], slice[index+1:]...), true\n}",
"func removeAtIndex(source []int, index int) []int {\n\tlastIndex := len(source) - 1\n\tsource[index], source[lastIndex] = source[lastIndex], source[index]\n\treturn source[:lastIndex]\n}",
"func removeString(slice []string, s string) (result []string) {\n\tfor _, item := range slice {\n\t\tif item == s {\n\t\t\tcontinue\n\t\t}\n\t\tresult = append(result, item)\n\t}\n\treturn\n}",
"func RemoveFromBooks (index int) {\n\tn := len(books)\n\tbooks [index] = books [n - 1]\n\tbooks = books[:n - 1]\n}",
"func (obj *Object) RemoveValueFromArray(field string, value interface{}) *Object {\n\tobj.changedData[field] = map[string]interface{}{\"__op\": \"Remove\", \"objects\": []interface{}{value}}\n\treturn obj\n}",
"func (obj *Object) RemoveValueFromArrayFromList(field string, value []interface{}) *Object {\n\tobj.changedData[field] = map[string]interface{}{\"__op\": \"Remove\", \"objects\": value}\n\treturn obj\n}",
"func removeBlockNodeFromSlice(nodes []*BlockNode, node *BlockNode) []*BlockNode {\n\tfor i := range nodes {\n\t\tif nodes[i].Hash.IsEqual(node.Hash) {\n\t\t\tcopy(nodes[i:], nodes[i+1:])\n\t\t\tnodes[len(nodes)-1] = nil\n\t\t\treturn nodes[:len(nodes)-1]\n\t\t}\n\t}\n\treturn nodes\n}",
"func (es ExemplarSlice) RemoveIf(f func(Exemplar) bool) {\n\tnewLen := 0\n\tfor i := 0; i < len(*es.orig); i++ {\n\t\tif f(es.At(i)) {\n\t\t\tcontinue\n\t\t}\n\t\tif newLen == i {\n\t\t\t// Nothing to move, element is at the right place.\n\t\t\tnewLen++\n\t\t\tcontinue\n\t\t}\n\t\t(*es.orig)[newLen] = (*es.orig)[i]\n\t\tnewLen++\n\t}\n\t// TODO: Prevent memory leak by erasing truncated values.\n\t*es.orig = (*es.orig)[:newLen]\n}",
"func RemoveElements(originalSlice []string, removeElementSlice []string) []string {\n\n\tfor _, elem := range removeElementSlice {\n\t\t// search is linear but can be improved\n\t\tfor i := 0; i < len(originalSlice); i++ {\n\t\t\tif originalSlice[i] == elem {\n\t\t\t\toriginalSlice = append(originalSlice[:i], originalSlice[i+1:]...)\n\t\t\t\ti--\n\t\t\t}\n\t\t}\n\t}\n\treturn originalSlice\n}",
"func RemoveStringFromStringSlice(str string, elements []string) []string {\n\tresult := []string{}\n\tfor _, el := range elements {\n\t\tif str != el {\n\t\t\tresult = append(result, el)\n\t\t}\n\t}\n\treturn result\n}",
"func removeAll(source []byte, remove []byte) []byte {\n for bytes.Index(source, remove) > -1 {\n pnt := bytes.Index(source, remove)\n source = append(source[:pnt], source[pnt+12:]...)\n }\n return source\n}",
"func RemoveItem(slice []int, index int) []int {\n\tsliceLength := len(slice)\n\tif checkOutOfBounds(index, sliceLength) {\n\t\treturn slice\n\t}\n\tremovedSlice := slice[:index]\n\t// If the removed index is not at the end ...\n\tif index+1 < sliceLength {\n\t\t// tack on the rest of the slice after the removed index.\n\t\tremovedSlice = append(removedSlice, slice[index+1:]...)\n\t}\n\treturn removedSlice\n}",
"func (s *SegmentChangesWrapper) RemoveFromSegment(segmentName string, keys []string) error {\n\treturn errSegmentStorageNotImplementedMethod\n}",
"func (s *ConcurrentSlice) Remove(e int64) {\n\ts.Lock()\n\tdefer s.Unlock()\n\n\ti := s.index(e)\n\ts.items = append(s.items[0:i], s.items[i+1:]...)\n}",
"func (a Slice[T]) DeleteAt(index int) Slice[T] {\n\treturn append(a[:index], a[index+1:]...)\n}",
"func Delete(slice []int, index int) []int {\n\treturn append(slice[:index], slice[index+1:]...)\n}",
"func (list *TList) Trim(start, stop int) {\n\tlist.mux.Lock()\n\n\tstart = list.convertPos(start)\n\tstop = list.convertPos(stop)\n\n\tif start > list.Len()-1 {\n\t\tlist.mux.Unlock()\n\t\treturn\n\t}\n\n\tif start < 0 {\n\t\tstart = 0\n\t}\n\n\tif stop > list.Len() {\n\t\tstop = list.Len() - 1\n\t}\n\n\tif stop < start {\n\t\tlist.mux.Unlock()\n\t\treturn\n\t}\n\n\titemsForRemoveFromHead := start\n\titemsForRemoveFromTail := list.Len() - 1 - stop\n\n\tlist.mux.Unlock()\n\n\t// TODO We need a more optimized method\n\tfor i := itemsForRemoveFromHead; i > 0; i-- {\n\t\tlist.HPop()\n\t}\n\n\tfor i := itemsForRemoveFromTail; i > 0; i-- {\n\t\tlist.TPop()\n\t}\n}",
"func (cs *Set) Remove(exclude Set) {\n\tif exclude.MatchesAny() {\n\t\tif cs.MatchesAny() {\n\t\t\t*cs = Set{}\n\t\t} else {\n\t\t\t*cs = (*cs)[:0]\n\t\t}\n\t\treturn\n\t}\n\tif len(exclude) == 0 {\n\t\treturn\n\t}\n\ts := *cs\n\tremoved := 0\n\tfor i, cc := range s {\n\t\tif exclude.Contains(cc) {\n\t\t\tremoved++\n\t\t} else {\n\t\t\tif removed > 0 { // shift\n\t\t\t\ts[i-removed] = s[i]\n\t\t\t}\n\t\t}\n\t}\n\tif removed > 0 {\n\t\t*cs = s[:len(s)-removed]\n\t}\n}",
"func SubtractStringSlice(ss []string, str string) []string {\n\tvar res []string\n\tfor _, s := range ss {\n\t\tif strings.EqualFold(s, str) {\n\t\t\tcontinue\n\t\t}\n\t\tres = append(res, s)\n\t}\n\treturn res\n}",
"func ExampleIntSet_Remove() {\n\ts1 := gset.NewIntSet()\n\ts1.Add([]int{1, 2, 3, 4}...)\n\ts1.Remove(1)\n\tfmt.Println(s1.Slice())\n\n\t// May Output:\n\t// [3 4 2]\n}",
"func (list *List) RemoveAll(slice *Slice) {\n\tfor _, e := range slice.Slice() {\n\t\tlist.Remove(e)\n\t}\n}",
"func removeIndex(slice []int, index int) []int {\n\tret := make([]int, 0)\n\tret = append(ret, slice[:index]...)\n\treturn append(ret, slice[index+1:]...)\n}",
"func (r *Repo) RemoveFromSet(field string, value interface{}, i interface{}) error {\n\treturn r.toggleInSet(\"$pull\", field, value, i)\n}",
"func RemoveString(slice []string, s string) (bool, []string) {\n\tremoved := false\n\tresult := []string{}\n\tfor _, item := range slice {\n\t\tif item == s {\n\t\t\tremoved = true\n\t\t\tcontinue\n\t\t}\n\t\tresult = append(result, item)\n\t}\n\treturn removed, result\n}",
"func SliceDelFirstVal(a interface{}, val interface{}) interface{} {\n\tswitch a.(type) {\n\tcase []int:\n\t\treturn SliceDelFirstValInt(a.([]int), val.(int))\n\tdefault:\n\t\tpanic(\"not support type\")\n\t}\n}",
"func (c *Collection) Removing() *Slice {\n\treturn c.unregister\n}",
"func remove[T any](s []T, i int) []T {\n\ts[i] = s[len(s)-1]\n\treturn s[:len(s)-1]\n}",
"func delete(slice []string, el string) (a []string) {\n\ti := -1\n\tfor j, s := range slice {\n\t\tif s == el {\n\t\t\ti = j\n\t\t}\n\t}\n\ta = append(slice[:i], slice[i+1:]...)\n\treturn a\n}",
"func CopySlice(slice []byte) []byte {\n\tcopy := append(slice[:0:0], slice...)\n\treturn copy\n}",
"func (s S) SetSlice(key, value string, before, after int) (slice []string, err error) {\n\tvar vv SortedString\n\terr = s.ReadModify(key, &vv, func(_ interface{}) (r bool) {\n\t\tslice = vv.Slice(value, before, after)\n\t\treturn\n\t})\n\treturn\n}",
"func (a myArray) splice(start int, data ...string) myArray {\n\tcopy(a[start:], data)\n\treturn a\n}",
"func (s strings) Remove(in []string, remove ...string) []string {\n\treturn s.Filter(in, func(item string) bool {\n\t\tfound := false\n\t\tfor _, removeItem := range remove {\n\t\t\tif removeItem == item {\n\t\t\t\tfound = true\n\t\t\t}\n\t\t}\n\t\treturn !found\n\t})\n}",
"func (a *Attribute) RemoveValues(v interface{}) {\n\tif !a.IncludeValues(v) {\n\t\treturn\n\t}\n\n\t_i := a.IndexValues(v)\n\n\ta.Values = append(a.Values[:_i], a.Values[_i+1:]...)\n}",
"func (s Strings) Remove(a string) []string {\n\ti := sort.SearchStrings(s, a)\n\tif s[i] != a {\n\t\treturn s\n\t}\n\treturn append(s[:i], s[i+1:]...)\n}",
"func (_m *MockSegmentManager) RemoveBy(filters ...SegmentFilter) {\n\t_va := make([]interface{}, len(filters))\n\tfor _i := range filters {\n\t\t_va[_i] = filters[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, _va...)\n\t_m.Called(_ca...)\n}",
"func (s StringSet) Remove(values ...string) {\n\tfor _, value := range values {\n\t\tdelete(s, value)\n\t}\n}",
"func ModifySlice(sliceptr interface{}, eq func(i, j int) bool) {\n\trvp := reflect.ValueOf(sliceptr)\n\tif rvp.Type().Kind() != reflect.Ptr {\n\t\tpanic(badTypeError{rvp.Type()})\n\t}\n\trv := rvp.Elem()\n\tif rv.Type().Kind() != reflect.Slice {\n\t\tpanic(badTypeError{rvp.Type()})\n\t}\n\n\tlength := rv.Len()\n\tdst := 0\n\tfor i := 1; i < length; i++ {\n\t\tif eq(dst, i) {\n\t\t\tcontinue\n\t\t}\n\t\tdst++\n\t\t// slice[dst] = slice[i]\n\t\trv.Index(dst).Set(rv.Index(i))\n\t}\n\n\tend := dst + 1\n\tvar zero reflect.Value\n\tif end < length {\n\t\tzero = reflect.Zero(rv.Type().Elem())\n\t}\n\n\t// for i := range slice[end:] {\n\t// size[i] = 0/nil/{}\n\t// }\n\tfor i := end; i < length; i++ {\n\t\t// slice[i] = 0/nil/{}\n\t\trv.Index(i).Set(zero)\n\t}\n\n\t// slice = slice[:end]\n\tif end < length {\n\t\trv.SetLen(end)\n\t}\n}",
"func (vector *Vector) Cut(i int, j int) {\n\t//a = append(a[:i], a[j:]...)\n\t// NOTE If the type of the element is a pointer or a struct with pointer fields,\n\t// which need to be garbage collected, the above implementation of Cut has a potential\n\t// memory leak problem: some elements with values are still referenced by slice a and\n\t// thus can not be collected. The following code can fix this problem:\n\n\tcopy((*vector)[i:], (*vector)[j:])\n\tfor k, n := len(*vector)-j+i, len(*vector); k < n; k++ {\n\t\t(*vector)[k] = nil // or the zero value of T\n\t}\n\t*vector = (*vector)[:len(*vector)-j+i]\n}",
"func (s *IntSlicer) Clear() {\n\ts.slice = []int{}\n}",
"func BenchmarkSliceDel(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tsliceDel([]string{\"a\", \"b\", \"c\"}, \"c\")\n\t}\n}",
"func (t *StringSlice) RemoveAt(i int) bool {\n\tif i >= 0 && i < len(t.items) {\n\t\tt.items = append(t.items[:i], t.items[i+1:]...)\n\t\treturn true\n\t}\n\treturn false\n}",
"func RemoveAtIndex(data interface{}, index int) (interface{}, error) {\n\t// Get concrete value of data\n\tvalue := reflect.ValueOf(data)\n\n\t// Get the type of value\n\tvalueType := value.Type()\n\n\tif valueType.Kind() != reflect.Array && valueType.Kind() != reflect.Slice {\n\t\terr := errors.New(\"Data parameter is not an array or slice\")\n\t\treturn nil, err\n\t}\n\n\tif index >= value.Len() {\n\t\terr := errors.New(\"Index is greater than data length\")\n\t\treturn nil, err\n\t}\n\n\t// Create slice from value\n\tresultSlice := reflect.AppendSlice(value.Slice(0, index), value.Slice(index+1, value.Len()))\n\n\treturn resultSlice.Interface(), nil\n}",
"func removeItemByIndex(slice []string, idx int) []string {\n\n\tcopy(slice[idx:], slice[idx+1:]) // Shift slice[idx+1:] left one index.\n\tslice[len(slice)-1] = \"\" // Erase last element (write zero value).\n\treturn slice[:len(slice)-1] // Truncate slice.\n}",
"func RemoveDuplicates(slice []string) []string {\n\treturn MapToSlice(SliceToMap(slice))\n}",
"func RemoveFromTaskArray(arr []*Task, ndx int) []*Task {\n\tif ndx < 0 || ndx >= len(arr) {\n\t\treturn arr\n\t}\n\treturn append(arr[0:ndx], arr[ndx+1:]...)\n}",
"func remove(s []int, i int) []int {\n\ts[i] = s[len(s)-1]\n\treturn s[:len(s)-1]\n}",
"func removeIfEquals(slice *[]string, match string) {\n\ti := 0\n\tp := *slice\n\tfor _, entry := range p {\n\t\tif strings.TrimSpace(entry) != strings.TrimSpace(match) {\n\t\t\tp[i] = entry\n\t\t\ti++\n\t\t}\n\t}\n\t*slice = p[0:i]\n}",
"func RemoveOne(target interface{}, src []interface{}) []interface{} {\n\ttndx := -1\n\tfor ndx, val := range src {\n\t\tif val == target {\n\t\t\ttndx = ndx\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif tndx > -1 {\n\t\tsrc[tndx] = src[len(src)-1]\n\t\tsrc = src[:len(src)-1]\n\t}\n\n\treturn src\n}",
"func (s *Set) Remove(items ...uint32) *Set {\n\tfor _, item := range items {\n\t\tdelete(s.items, item)\n\t}\n\treturn s\n}",
"func main() {\n\t//cretaing slice using make buildin function type, len, capacity\n\ti := make([]int, 5, 5)\n\tfmt.Println(i)\n\n\tfruits := []string{\"apple\", \"grape\", \"banana\", \"melon\"}\n\tfmt.Println(fruits)\n\tvar aFruits = fruits[0:3]\n\tfmt.Println(aFruits)\n\tvar bFruits = fruits[1:4]\n\tfmt.Println(bFruits)\n\tvar cfruits = append(fruits, \"papaya\")\n\tfmt.Println(cfruits)\n\n\tstudents := []string{\"ara\", \"fanta\", \"jevan\"}\n\tfmt.Println(students)\n\t//remove slice\n\tstudents = students[0:2]\n\tfmt.Println(students)\n\tstudents = append(students, \"misa\")\n\tfmt.Println(students)\n\tfmt.Println(len(students))\n\tfor index := 0; index < len(students); index++ {\n\t\tfmt.Println(\"ok\")\n\t}\n\ttestSliceNotPointer(students)\n\tfmt.Println(students)\n\ttestSlice(&students)\n\tfmt.Println(students)\n}",
"func (v values) Remove(keys ...string) {\n\tfor _, key := range keys {\n\t\tdelete(v, key)\n\t}\n}"
] | [
"0.72074586",
"0.716618",
"0.7101833",
"0.7082994",
"0.70630497",
"0.6932562",
"0.6897101",
"0.68811226",
"0.6855913",
"0.6505445",
"0.63579625",
"0.63161755",
"0.6234457",
"0.62126327",
"0.61018604",
"0.59863406",
"0.59863406",
"0.59863025",
"0.5968467",
"0.5968467",
"0.5958366",
"0.5935262",
"0.5926449",
"0.59181917",
"0.5912026",
"0.59089",
"0.584173",
"0.5821563",
"0.581494",
"0.576812",
"0.5757476",
"0.57540053",
"0.57474166",
"0.57435644",
"0.56565726",
"0.56502724",
"0.56491965",
"0.5587767",
"0.55721563",
"0.5528682",
"0.55229205",
"0.5520467",
"0.5491185",
"0.5447094",
"0.53916746",
"0.5391634",
"0.5371571",
"0.53422344",
"0.5294048",
"0.5266688",
"0.5265465",
"0.5255518",
"0.5233026",
"0.52301276",
"0.5201701",
"0.5201284",
"0.51872045",
"0.51512194",
"0.5133709",
"0.5133515",
"0.50843376",
"0.5079699",
"0.50658554",
"0.50403106",
"0.50268364",
"0.50254935",
"0.5015741",
"0.50074464",
"0.4999378",
"0.4996946",
"0.4995773",
"0.4988785",
"0.49701327",
"0.4955219",
"0.49416986",
"0.4928585",
"0.4922169",
"0.4901508",
"0.49011314",
"0.48892424",
"0.48877373",
"0.48825845",
"0.48801306",
"0.48761123",
"0.48732552",
"0.4872013",
"0.48683402",
"0.4865019",
"0.48544756",
"0.48516953",
"0.48516732",
"0.4842482",
"0.48337054",
"0.4825584",
"0.48250613",
"0.48172286",
"0.48158532",
"0.48158246",
"0.4811272",
"0.47978652"
] | 0.76177776 | 0 |
ChooseRandomString returns a random string from the given slice. | func ChooseRandomString(slice []string) string {
switch len(slice) {
case 0:
return ""
case 1:
return slice[0]
default:
return slice[rand.Intn(len(slice))]
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func ChooseString(l []string) string {\n\tif len(l) == 0 {\n\t\treturn \"\"\n\t}\n\trand.Seed(time.Now().UnixNano())\n\treturn l[rand.Intn(len(l))]\n}",
"func (h *Random) StringFromSlice(in []string) string {\n\trandomIndex := rand.Intn(len(in))\n\treturn in[randomIndex]\n}",
"func RandomString(values ...string) string {\n\tif len(values) == 0 {\n\t\treturn \"\"\n\t}\n\tif len(values) == 1 {\n\t\treturn values[0]\n\t}\n\treturn values[provider.Intn(len(values))]\n}",
"func RandomString(len int) string {\n\tstr := make([]byte, len)\n\trand.Seed(time.Now().UTC().UnixNano())\n\n\tfor i := range str {\n\t\tstr[i] = randomPool[rand.Intn(poolLength)]\n\t}\n\n\treturn string(str)\n}",
"func (c combinatorics) RandomString(values []string) string {\n\tif len(values) == 0 {\n\t\treturn \"\"\n\t}\n\tif len(values) == 1 {\n\t\treturn values[0]\n\t}\n\treturn values[RandomProvider().Intn(len(values))]\n}",
"func (ur UnicodeRanges) randString(r *rand.Rand) string {\n\tn := r.Intn(20)\n\tsb := strings.Builder{}\n\tsb.Grow(n)\n\tfor i := 0; i < n; i++ {\n\t\tsb.WriteRune(ur[r.Intn(len(ur))].choose(r))\n\t}\n\treturn sb.String()\n}",
"func (ur UnicodeRange) randString(r *rand.Rand) string {\n\tn := r.Intn(20)\n\tsb := strings.Builder{}\n\tsb.Grow(n)\n\tfor i := 0; i < n; i++ {\n\t\tsb.WriteRune(ur.choose(r))\n\t}\n\treturn sb.String()\n}",
"func (h *Haikunator) randomString(s []string) string {\n\tsize := len(s)\n\n\tif size <= 0 {\n\t\treturn \"\"\n\t}\n\n\treturn s[h.Random.Intn(size)]\n}",
"func getRandomString(length int) (string, error) {\n\tbuf := make([]byte, length)\n\tif _, err := rand.Read(buf); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor i := 0; i < length; {\n\t\tidx := int(buf[i] & letterIdxMask)\n\t\tif idx < letterSize {\n\t\t\tbuf[i] = letters[idx]\n\t\t\ti++\n\t\t} else {\n\t\t\tif _, err := rand.Read(buf[i : i+1]); err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t}\n\t}\n\treturn string(buf), nil\n}",
"func GetRandomStr(r *rand.Rand, arr []string) string {\n\treturn arr[r.Intn(len(arr))]\n}",
"func GenerateRandomString(stringLen int) string {\n\tb := make([]byte, stringLen)\n\tfor i := range b {\n\t\tb[i] = charset[seededRand.Intn(len(charset))]\n\t}\n\treturn string(b)\n}",
"func StringRand(length int) string {\n\treturn StringRandWithCharset(length, CharsetDefault)\n}",
"func RandomString(n int) string {\n\treturn string(Random(n))\n}",
"func RandomString(length int, strChars string) string {\n\trand.Seed(time.Now().UnixNano())\n\tchars := []rune(strChars)\n\tfmt.Println(chars)\n\tvar b strings.Builder\n\tfor i := 0; i < length; i++ {\n\t\tb.WriteRune(chars[rand.Intn(len(chars))])\n\t}\n\treturn b.String()\n}",
"func RandomString(rand *rand.Rand, size int) string {\n\tsb := strings.Builder{}\n\tfor sb.Len() <= size {\n\t\tsb.WriteRune(RandomRune(rand, 2, 5))\n\t}\n\tret := sb.String()\n\t_, lastRuneSize := utf8.DecodeLastRuneInString(ret)\n\treturn ret[0 : len(ret)-lastRuneSize]\n}",
"func RandomString(len int) string {\n\tbytes := make([]byte, len)\n\tfor i := 0; i < len; i++ {\n\t\tbytes[i] = byte(65 + rand.Intn(25))\n\t}\n\treturn string(bytes)\n}",
"func randString(length int) string {\n\tb := make([]rune, length)\n\tfor i := range b {\n\t\tb[i] = runes[rand.Intn(len(runes))]\n\t}\n\treturn string(b)\n}",
"func RandString(length int) string {\n\trand.Seed(time.Now().UnixNano())\n\trs := make([]string, length)\n\tfor start := 0; start < length; start++ {\n\t\tt := rand.Intn(3)\n\t\tif t == 0 {\n\t\t\trs = append(rs, strconv.Itoa(rand.Intn(10)))\n\t\t} else if t == 1 {\n\t\t\trs = append(rs, string(rand.Intn(26)+65))\n\t\t} else {\n\t\t\trs = append(rs, string(rand.Intn(26)+97))\n\t\t}\n\t}\n\treturn strings.Join(rs, \"\")\n}",
"func RandomString(length uint) (string, error) {\n\tr := make([]byte, length)\n\tbs := int(float64(length) * 1.3)\n\tvar err error\n\tfor i, j, rb := 0, 0, []byte{}; uint(i) < length; j++ {\n\t\tif j%bs == 0 {\n\t\t\trb, err = RandomBytes(uint(bs))\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t}\n\t\tif idx := uint(rb[j%int(length)] & bitmask); idx < uint(len(letters)) {\n\t\t\tr[i] = letters[idx]\n\t\t\ti++\n\t\t}\n\t}\n\n\treturn string(r), nil\n}",
"func RandomString(length int) string {\n\trandomString := \"\"\n\n\tfor len(randomString) < length {\n\t\trandomString += strconv.Itoa(rand.Int())\n\t}\n\n\treturn randomString[:length]\n}",
"func RandomString(maxlen int, charset string) string {\n\tvar s string\n\tfor i := 0; i < rand.Intn(maxlen)+1; i++ {\n\t\ts = s + string(charset[rand.Intn(len(charset))])\n\t}\n\treturn s\n}",
"func RandomString(n int) string {\n\tresult := make([]byte, n)\n\tfor i := range result {\n\t\tresult[i] = CharSet[rnd.Intn(len(CharSet))]\n\t}\n\treturn string(result)\n}",
"func RandomString(n int) string {\n\tresult := make([]byte, n)\n\tfor i := range result {\n\t\tresult[i] = CharSet[rnd.Intn(len(CharSet))]\n\t}\n\treturn string(result)\n}",
"func randomString(n int) string {\n\tresult := make([]byte, n)\n\tfor i := range result {\n\t\tresult[i] = charSet[rnd.Intn(len(charSet))]\n\t}\n\treturn string(result)\n}",
"func RandomString(n int) string {\n\treturn RandomStringFrom(n, randomBase)\n}",
"func randomString(len int) string {\n\tbytes := make([]byte, len)\n\tfor i := 0; i < len; i++ {\n\t\tbytes[i] = byte(randomInt(97, 122))\n\t}\n\treturn string(bytes)\n}",
"func RandomString(length int) string {\n\tsb := strings.Builder{}\n\n\trand.Seed(time.Now().UnixNano())\n\tfor i := 0; i < length; i++ {\n\t\tsb.WriteByte(_letters[rand.Intn(len(_letters))])\n\t}\n\n\treturn sb.String()\n}",
"func RandString(n int, allowedChars ...[]rune) string {\n\tvar letters []rune\n\tif len(allowedChars) == 0 {\n\t\tletters = defaultLetters\n\t} else {\n\t\tletters = allowedChars[0]\n\t}\n\tb := make([]rune, n)\n\tfor i := range b {\n\t\tb[i] = letters[rand.Intn(len(letters))]\n\t}\n\treturn string(b)\n}",
"func randomString(len int) string {\n\tbytes := make([]byte, len)\n\tfor i := 0; i < len; i++ {\n\t\tbytes[i] = byte(randomInt(65, 90))\n\t}\n\treturn string(bytes)\n}",
"func randomString(len int) string {\n\tbytes := make([]byte, len)\n\tfor i := 0; i < len; i++ {\n\t\tbytes[i] = byte(randomInt(65, 90))\n\t}\n\treturn string(bytes)\n}",
"func randomString(len int) string {\n\tbytes := make([]byte, len)\n\tfor i := 0; i < len; i++ {\n\t\tbytes[i] = byte(randomInt(65, 90))\n\t}\n\treturn string(bytes)\n}",
"func randomString(len int) string {\n\tbytes := make([]byte, len)\n\tfor i := 0; i < len; i++ {\n\t\tbytes[i] = byte(randomInt(65, 90))\n\t}\n\treturn string(bytes)\n}",
"func getRandString(n int) string {\n\tpool := []rune(\"ABCDEFGHIJKLMNOPQRSTUVWXYZÜÖÄabcdefghijklmnopqrstuvwxyzüöä\")\n\ts := make([]rune, n)\n\n\tfor pos := range s {\n\t\ts[pos] = pool[rand.Intn(len(pool))]\n\t}\n\n\treturn string(s)\n}",
"func RandomString(length int) string {\n\tvar seed *rand.Rand = rand.New(rand.NewSource(time.Now().UTC().UnixNano()))\n\n\tb := make([]byte, length)\n\tfor idx := range b {\n\t\tb[idx] = charset[seed.Intn(len(charset))]\n\t}\n\treturn string(b)\n}",
"func randomString(length int) string {\n\treturn stringWithCharset(length, charset)\n}",
"func randString(r *rand.Rand) string {\n\treturn defaultUnicodeRanges.randString(r)\n}",
"func RandStr(n int) string {\n\treturn RandStringRunes(n)\n}",
"func GetRandomString(length int) string {\n\tb := make([]byte, length)\n\trnd := rand.New(&source{})\n\n\tfor i := range b {\n\t\tc := rnd.Intn(allowedCharsSize)\n\t\tb[i] = allowedChars[c]\n\t}\n\n\treturn string(b)\n}",
"func RandString(n int) string {\n\tb := make([]byte, n)\n\tfor i := range b {\n\t\tb[i] = CHA[rand.Intn(len(CHA))]\n\t}\n\treturn string(b)\n}",
"func StrSliceRandItem(strsli []string) string {\n\tif len(strsli) == 0 {\n\t\treturn \"\"\n\t}\n\tn := time.Now().UnixNano() % int64(len(strsli))\n\treturn strsli[n]\n}",
"func RandString(chars string, idxBits uint, idxMask int64, idxMax int, n int) string {\n\tb := make([]byte, n)\n\t// A rand.Int63() generates 63 random bits, enough for idCharIdxMax chars!\n\tfor i, cache, remain := n-1, rand.Int63(), idxMax; i >= 0; {\n\t\tif remain == 0 {\n\t\t\tcache, remain = rand.Int63(), idxMax\n\t\t}\n\t\tif idx := int(cache & idxMask); idx < len(chars) {\n\t\t\tb[i] = chars[idx]\n\t\t\ti--\n\t\t}\n\t\tcache >>= idxBits\n\t\tremain--\n\t}\n\n\treturn string(b)\n}",
"func RandomString(length int) string {\n\tb := make([]rune, length)\n\tfor i := range b {\n\t\tb[i] = letterRunes[rand.Intn(len(letterRunes))]\n\t}\n\treturn string(b)\n}",
"func RandomString(length int) string {\n\n\tconst charset = \"abcdefghijklmnopqrstuvwxyz\" +\n\t\t\"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\" +\n\t\t\"1234567890\"\n\n\tvar seededRand *rand.Rand = rand.New(\n\t\trand.NewSource(time.Now().UnixNano()))\n\n\tb := make([]byte, length)\n\tfor i := range b {\n\t\tb[i] = charset[seededRand.Intn(len(charset))]\n\t}\n\treturn string(b)\n}",
"func RandomString(len int) string {\n\tbytes := make([]byte, len)\n\tfor i := 0; i < len; i++ {\n\t\tbytes[i] = byte(65 + rand.Intn(25)) // A=65 and Z = 65+25\n\t}\n\treturn string(bytes)\n}",
"func ShuffleStringSlice(a []string) {\n\tfor i := range a {\n\t\tj := rand.Intn(i + 1)\n\t\ta[i], a[j] = a[j], a[i]\n\t}\n}",
"func RandString(n int) string {\n\trandom := rand.New(rand.NewSource(time.Now().UnixNano()))\n\tb := make([]byte, n)\n\tfor i := range b {\n\t\tb[i] = letterBytes[random.Intn(len(letterBytes))]\n\t}\n\treturn string(b)\n}",
"func randString(n int, characterSet string) string {\n\tb := make([]byte, n)\n\tfor i := range b {\n\t\tb[i] = characterSet[rand.Intn(len(characterSet))]\n\t}\n\treturn string(b)\n}",
"func GetRandomString(length int) string {\n\tstr := \"0123456789abcdefghijklmnopqrstuvwxyz\"\n\tbytes := []byte(str)\n\tresult := []byte{}\n\tr := rand.New(rand.NewSource(time.Now().UnixNano()))\n\tfor i := 0; i < length; i++ {\n\t\tresult = append(result, bytes[r.Intn(len(bytes))])\n\t}\n\treturn string(result)\n}",
"func RandString(n int) string {\n\tvar randBytes = make([]byte, n)\n\trand.Read(randBytes)\n\n\tfor i, b := range randBytes {\n\t\trandBytes[i] = letters[b%byte(len(letters))]\n\t}\n\n\treturn string(randBytes)\n}",
"func RandomString(length int) string {\n\trunes := make([]rune, length)\n\tfor i := range runes {\n\t\trunes[i] = allowedCharactersRunes[rand.Intn(len(allowedCharactersRunes))]\n\t}\n\treturn string(runes)\n}",
"func RandString(n int) string {\n\tb := make([]byte, n)\n\tfor i := range b {\n\t\tb[i] = letterBytes[rand.Intn(len(letterBytes))]\n\t}\n\n\treturn string(b)\n}",
"func randStr(l int) string {\n\tbytes := make([]byte, l)\n\tfor i := 0; i < l; i++ {\n\t\tbytes[i] = pool[rand.Intn(len(pool))]\n\t}\n\treturn string(bytes)\n}",
"func RandomString(length int) string {\n\tseed := rand.New(rand.NewSource(time.Now().UnixNano()))\n\n\tb := make([]byte, length)\n\tfor i := range b {\n\t\tb[i] = charset[seed.Intn(len(charset))]\n\t}\n\n\treturn string(b)\n}",
"func RandomString(randLength int, randType string) (result string) {\n\tvar (\n\t\tnum = \"0123456789\"\n\t\tlower = \"abcdefghijklmnopqrstuvwxyz\"\n\t\tupper = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n\t)\n\n\tb := bytes.Buffer{}\n\n\tswitch {\n\tcase strings.Contains(randType, \"0\"):\n\t\tb.WriteString(num)\n\tcase strings.Contains(randType, \"A\"):\n\t\tb.WriteString(upper)\n\tdefault:\n\t\tb.WriteString(lower)\n\t}\n\n\tstr := b.String()\n\tstrLen := len(str)\n\n\tb = bytes.Buffer{}\n\n\tfor i := 0; i < randLength; i++ {\n\t\tn, err := rand.Int(rand.Reader, big.NewInt(int64(strLen)))\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tb.WriteByte(str[int32(n.Int64())])\n\t}\n\n\treturn b.String()\n}",
"func randChoice(list []string) string {\n randIndex := rand.Intn(len(list))\n return list[randIndex]\n}",
"func randomString() string {\n\tr := make([]rune, 20)\n\tfor i := range r {\n\t\tr[i] = letters[rand.Intn(len(letters))]\n\t}\n\treturn string(r)\n}",
"func randString(length int) string {\n\tcharset := \"abcdefghijklmnopqrstuvwxyz\" +\n\t\t\"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n\tvar seededRand *rand.Rand = rand.New(\n\t\trand.NewSource(time.Now().UnixNano()))\n\tb := make([]byte, length)\n\tfor i := range b {\n\t\tb[i] = charset[seededRand.Intn(len(charset))]\n\t}\n\treturn string(b)\n}",
"func RandomString() string {\n\tsuffix := make([]byte, randSuffixLen)\n\n\tfor i := range suffix {\n\t\tsuffix[i] = letterBytes[rand.Intn(len(letterBytes))]\n\t}\n\treturn string(suffix)\n}",
"func genString(length int) string {\n\tb := make([]byte, length)\n\tfor i := range b {\n\t\tb[i] = charset[seededRand.Intn(len(charset))]\n\t}\n\treturn string(b)\n}",
"func RandomString(length int) string {\n\trand.Seed(time.Now().UnixNano())\n\tchars := []rune(\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\" + \"abcdefghijklmnopqrstuvwxyz\" + \"0123456789\")\n\tvar b strings.Builder\n\tfor i := 0; i < length; i++ {\n\t\tb.WriteRune(chars[rand.Intn(len(chars))])\n\t}\n\tstr := b.String()\n\treturn str\n}",
"func RandString(n int) string {\n\tb := make([]rune, n)\n\tfor i := range b {\n\t\tb[i] = letters[rand.Intn(len(letters))]\n\t}\n\treturn string(b)\n}",
"func RandString(n int) string {\n\tb := make([]rune, n)\n\tfor i := range b {\n\t\tb[i] = letters[rand.Intn(len(letters))]\n\t}\n\treturn string(b)\n}",
"func RandString(n int) string {\n\tb := make([]rune, n)\n\tfor i := range b {\n\t\tb[i] = letterRunes[rand.Intn(len(letterRunes))]\n\t}\n\treturn string(b)\n}",
"func RandString(n int) string {\n\tb := make([]rune, n)\n\tfor i := range b {\n\t\tb[i] = letterRunes[rand.Intn(len(letterRunes))]\n\t}\n\treturn string(b)\n}",
"func RandomString(n int) string {\n\tb := make([]byte, n)\n\tfor i := range b {\n\t\tb[i] = letterBytes[rand.Intn(len(letterBytes))]\n\t}\n\treturn string(b)\n}",
"func randString(n int) string {\n\tb := make([]byte, n)\n\tfor i := range b {\n\t\tb[i] = charset[rand.Intn(len(charset))]\n\t}\n\treturn string(b)\n}",
"func GenerateRandomString(length int) string {\n\tif length > 0 {\n\t\trand.Seed(time.Now().UnixNano())\n\t\tchars := make([]rune, length)\n\t\tfor i := range chars {\n\t\t\tchars[i] = letterRunes[rand.Intn(len(letterRunes))]\n\t\t}\n\n\t\treturn string(chars)\n\t}\n\n\treturn \"\"\n}",
"func RandomString(length int) string {\n\tsrc := rand.NewSource(time.Now().UnixNano())\n\tb := make([]byte, length)\n\tfor i, cache, remain := length-1, src.Int63(), letterIndexMax; i >= 0; {\n\t\tif remain == 0 {\n\t\t\tcache, remain = src.Int63(), letterIndexMax\n\t\t}\n\t\tif idx := int(cache & letterIndexMask); idx < len(letterBytes) {\n\t\t\tb[i] = letterBytes[idx]\n\t\t\ti--\n\t\t}\n\t\tcache >>= letterIndexBits\n\t\tremain--\n\t}\n\n\treturn string(b)\n}",
"func createRandomString(starterString string) string {\n\tresult := starterString + randomString(8)\n\treturn result\n}",
"func generateRandString(length int) string {\n\tb := make([]rune, length)\n\tfor i := range b {\n\t\tb[i] = letterRunes[rand.Intn(len(letterRunes))]\n\t}\n\treturn string(b)\n}",
"func RandomString(length int) string {\n\tbuf := make([]byte, length)\n\tif _, err := rand.Read(buf); err != nil {\n\t\tpanic(err)\n\t}\n\treturn base64.StdEncoding.EncodeToString(buf)\n}",
"func makeRandomString(bytesLength int) []byte {\n\tbyteVar := make([]byte, bytesLength)\n\tchars := \"abcdefghijklmnopqrstuvwxyz123456789\" // our posibilities\n\tfor i := range byteVar {\n\t\tx := genPseudoRand()\n\t\tbyteVar[i] = chars[x.Intn(len(chars))]\n\t}\n\treturn byteVar\n}",
"func RandomString(strlen int) string {\r\n\trand.Seed(time.Now().UTC().UnixNano())\r\n\tconst chars = \"abcdefghijklmnopqrstuvwxyz0123456789\"\r\n\tresult := make([]byte, strlen)\r\n\tfor i := 0; i < strlen; i++ {\r\n\t\tresult[i] = chars[rand.Intn(len(chars))]\r\n\t}\r\n\treturn string(result)\r\n}",
"func RandomString(strlen int) string {\r\n\trand.Seed(time.Now().UTC().UnixNano())\r\n\tconst chars = \"abcdefghijklmnopqrstuvwxyz0123456789\"\r\n\tresult := make([]byte, strlen)\r\n\tfor i := 0; i < strlen; i++ {\r\n\t\tresult[i] = chars[rand.Intn(len(chars))]\r\n\t}\r\n\treturn string(result)\r\n}",
"func StrRandom(length int) string {\n\tresult := make([]rune, length)\n\tfor i := range result {\n\t\tresult[i] = alphaNumeric[rand.Intn(len(alphaNumeric))]\n\t}\n\treturn string(result)\n}",
"func (rs *RandString) String() string {\n\trs.mutex.RLock()\n\tdefer rs.mutex.RUnlock()\n\tif rs.len == 0 {\n\t\treturn \"\"\n\t}\n\trnd := rand.Intn(rs.len)\n\tretString := rs.strings[rnd]\n\tretString = strings.ReplaceAll(retString, \"{rnd}\", randomAlfanum(6))\n\tretString = strings.ReplaceAll(retString, \"{rndnum}\", randomNum(12))\n\treturn retString\n}",
"func RandomString() string {\n\tvar letter = []rune(\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\")\n\tvar number = []rune(\"0123456789\")\n\n\tb := make([]rune, 2)\n\tfor i := 0; i < 2; i++ {\n\t\tb[i] = letter[rand.Intn(len(letter))]\n\t}\n\n\tc := make([]rune, 3)\n\tfor i := 0; i < 3; i++ {\n\t\tc[i] = number[rand.Intn(len(number))]\n\t}\n\n\treturn string(append(b, c...))\n}",
"func RandStr() string {\n\tchars := \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-_\"\n\tnum := 100\n\ts := \"\"\n\tfor i := 0; i < num; i++ {\n\t\ts += string(chars[rand.Intn(len(chars))])\n\t}\n\treturn s\n}",
"func RandString(n int) string {\n\tb := make([]byte, n)\n\tfor i := range b {\n\t\tb[i] = letterBytes[rand.Int63()%int64(len(letterBytes))]\n\t}\n\treturn string(b)\n}",
"func randomString(prefix string, length int) string {\n\tb := make([]rune, length)\n\tfor i := range b {\n\t\tb[i] = letterRunes[rand.Intn(len(letterRunes))]\n\t}\n\treturn prefix + string(b)\n}",
"func RandStr(length int) string {\n\tstr := \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n\treturn RandSomeStr(str, length)\n}",
"func RandString(length int, letter letter) string {\n\tb := make([]byte, length)\n\tr := rand.New(rand.NewSource(time.Now().UnixNano()))\n\tfor i := range b {\n\t\tb[i] = letter[r.Int63()%int64(len(letter))]\n\t}\n\treturn string(b)\n}",
"func randomString(n int) string {\n\trand.Seed(time.Now().UnixNano())\n\n\tvar letter = []rune(runeString)\n\n\tb := make([]rune, n)\n\tfor i := range b {\n\t\tb[i] = letter[rand.Intn(len(letter))]\n\t}\n\treturn string(b)\n}",
"func RandString(length int) string {\n\tbytes := make([]byte, length)\n\tfor i := 0; i < length; i++ {\n\t\tb := common.BASE_SALT[rand.Intn(len(common.BASE_SALT))]\n\t\tbytes[i] = byte(b)\n\t}\n\treturn string(bytes)\n}",
"func (this *MatchString) GetRandStr(n int) string{\n\tr := rand.New(rand.NewSource(time.Now().UnixNano()))\n\tre := r.Intn(n)\n\treturn strconv.Itoa(re)\n}",
"func pickRandomWord(data []string) string {\n\trand.Seed(time.Now().UnixNano())\n\treturn strings.Trim(strings.Title(data[rand.Intn(len(data))]), \"\\n\")\n}",
"func RandString(n int) string {\n\tconst letterBytes = \"abcdefghijklmnopqrstuvwxyz\"\n\tb := make([]byte, n)\n\tfor i := range b {\n\t\tb[i] = letterBytes[rand.Intn(len(letterBytes))]\n\t}\n\treturn string(b)\n}",
"func RandStr(length int) string {\n\tchars := []byte{}\nMAIN_LOOP:\n\tfor {\n\t\tval := rand.Int63()\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tv := int(val & 0x3f) // rightmost 6 bits\n\t\t\tif v >= 62 { // only 62 characters in strChars\n\t\t\t\tval >>= 6\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tchars = append(chars, strChars[v])\n\t\t\t\tif len(chars) == length {\n\t\t\t\t\tbreak MAIN_LOOP\n\t\t\t\t}\n\t\t\t\tval >>= 6\n\t\t\t}\n\t\t}\n\t}\n\n\treturn string(chars)\n}",
"func RandStr(length int) string {\n\tchars := []byte{}\nMAIN_LOOP:\n\tfor {\n\t\tval := rand.Int63()\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tv := int(val & 0x3f) // rightmost 6 bits\n\t\t\tif v >= 62 { // only 62 characters in strChars\n\t\t\t\tval >>= 6\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tchars = append(chars, strChars[v])\n\t\t\t\tif len(chars) == length {\n\t\t\t\t\tbreak MAIN_LOOP\n\t\t\t\t}\n\t\t\t\tval >>= 6\n\t\t\t}\n\t\t}\n\t}\n\n\treturn string(chars)\n}",
"func RandomString(n int) *string {\n\tvar letter = []rune(\"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\")\n\n\tb := make([]rune, n)\n\tfor i := range b {\n\t\tb[i] = letter[rand.Intn(len(letter))]\n\t}\n\n\ts := string(b)\n\treturn &s\n}",
"func RandomString(strlen int) string {\n rand.Seed(time.Now().UTC().UnixNano())\n const chars = \"abcdefghijklmnopqrstuvwxyz\"\n result := make([]byte, strlen)\n for i := 0; i < strlen; i++ {\n result[i] = chars[rand.Intn(len(chars))]\n }\n return string(result)\n}",
"func RandomString(n int) string {\n\tbuffer := make([]byte, n)\n\trand.Read(buffer)\n\n\tfor k, v := range buffer {\n\t\tbuffer[k] = safeChars[v%byte(len(safeChars))]\n\t}\n\n\treturn string(buffer)\n}",
"func RandomString(n int) string {\n\tb := make([]byte, n)\n\tfor i := range b {\n\t\tb[i] = letterBytes[rand.Int63()%int64(len(letterBytes))]\n\t}\n\treturn string(b)\n}",
"func RandString(n int) string {\n\tvar letters = []rune(\"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\")\n\tb := make([]rune, n)\n\tfor i := range b {\n\t\tb[i] = letters[rand.Intn(len(letters))]\n\t}\n\treturn string(b)\n}",
"func RandomString(n int) string {\n\tr := rand.New(rand.NewSource(time.Now().UnixNano()))\n\tvar str string\n\tlength := len(alphanum)\n\tfor i := 0; i < n; i++ {\n\t\ta := alphanum[r.Intn(len(alphanum))%length]\n\t\tstr += string(a)\n\t}\n\treturn str\n}",
"func RandString(n int) string {\n\tgen := rand.New(rand.NewSource(time.Now().UnixNano()))\n\tletters := \"bdghjlmnpqrstvwxyz0123456789\"\n\tb := make([]byte, n)\n\tfor i := range b {\n\t\tb[i] = letters[gen.Int63()%int64(len(letters))]\n\t}\n\treturn string(b)\n}",
"func RandomString(num int) string {\n\tbytes := make([]byte, num)\n\tfor i := 0; i < num; i++ {\n\t\tbytes[i] = byte(randomInt(97, 122)) // lowercase letters.\n\t}\n\treturn string(bytes)\n}",
"func randomString(length int) (str string) {\n\tb := make([]byte, length)\n\trand.Read(b)\n\treturn base64.StdEncoding.EncodeToString(b)\n}",
"func RandomString(length int) string {\n\tvar letters = []rune(\"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\")\n\tb := make([]rune, length)\n\trand.Seed(time.Now().UnixNano())\n\tfor i := range b {\n\t\tb[i] = letters[rand.Intn(len(letters))]\n\t}\n\treturn string(b)\n}",
"func RandomString(str string) string {\n\tb := make([]byte, 3)\n\trand.Read(b)\n\tid := fmt.Sprintf(str+\"%x\", b)\n\treturn strings.ToUpper(id)\n}"
] | [
"0.7593272",
"0.68603545",
"0.6819849",
"0.6768189",
"0.6673112",
"0.6672849",
"0.665283",
"0.6644218",
"0.6626326",
"0.66243505",
"0.6619441",
"0.6537378",
"0.6531689",
"0.64609605",
"0.6444482",
"0.6438242",
"0.6422548",
"0.64116293",
"0.6404121",
"0.6369966",
"0.6364124",
"0.635807",
"0.635807",
"0.6357411",
"0.6331134",
"0.632085",
"0.6315538",
"0.6295567",
"0.62892103",
"0.62892103",
"0.62892103",
"0.62892103",
"0.62889",
"0.6286589",
"0.62745327",
"0.6270315",
"0.6261423",
"0.6260423",
"0.624791",
"0.62371236",
"0.62294835",
"0.6227396",
"0.62057626",
"0.6204432",
"0.6201275",
"0.6199627",
"0.6196624",
"0.6184738",
"0.61776054",
"0.61621153",
"0.61573905",
"0.615639",
"0.61561406",
"0.61545295",
"0.6148863",
"0.61325914",
"0.6107288",
"0.610647",
"0.6099784",
"0.60966665",
"0.6095362",
"0.6095362",
"0.6092944",
"0.6092944",
"0.60915726",
"0.6062413",
"0.6047161",
"0.6042174",
"0.6041752",
"0.60406077",
"0.60243315",
"0.60218054",
"0.601661",
"0.601661",
"0.60154957",
"0.60145",
"0.60135794",
"0.5991769",
"0.5988051",
"0.59875226",
"0.59874916",
"0.5984187",
"0.59735984",
"0.59592825",
"0.5955028",
"0.5948518",
"0.5945005",
"0.594344",
"0.594344",
"0.5942385",
"0.5942095",
"0.59397906",
"0.59396964",
"0.59293747",
"0.5924288",
"0.5920238",
"0.5917259",
"0.5905053",
"0.5897705",
"0.5883929"
] | 0.86567205 | 0 |
CheckCertificateFormatFlag checks if the certificate format is valid. | func CheckCertificateFormatFlag(s string) (string, error) {
switch s {
case constants.CertificateFormatStandard, teleport.CertificateFormatOldSSH, teleport.CertificateFormatUnspecified:
return s, nil
default:
return "", trace.BadParameter("invalid certificate format parameter: %q", s)
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (da *DefaultAuth) CheckFormat() error {\n\treturn nil\n}",
"func (dd *AccountDoc) IsValidFormat() bool {\n\tif dd.Created == 0 || dd.GetType() != int(AccountDIDType) {\n\t\treturn false\n\t}\n\treturn true\n}",
"func CheckCertificate(crt string) {\n\t// Read and parse the PEM certificate file\n\tpemData, err := ioutil.ReadFile(crt)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tblock, rest := pem.Decode([]byte(pemData))\n\tif block == nil || len(rest) > 0 {\n\t\tlog.Fatal(\"Certificate decoding error\")\n\t}\n\tcert, err := x509.ParseCertificate(block.Bytes)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Print the certificate\n\tresult, err := certinfo.CertificateText(cert)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Print(result)\n}",
"func ValidFormat(f string) bool {\n\tfor _, v := range supportedFormats() {\n\t\tif v[0] == f || v[1] == f {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func (m *X509Certificate) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateNotAfter(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateNotBefore(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (h *DeviceHandler) CheckDeviceNameFormat(_ context.Context, arg keybase1.CheckDeviceNameFormatArg) (bool, error) {\n\tok := libkb.CheckDeviceName.F(arg.Name)\n\tif ok {\n\t\treturn ok, nil\n\t}\n\treturn false, errors.New(libkb.CheckDeviceName.Hint)\n}",
"func ValidFormat(format string) bool {\n\tfor _, f := range fmtsByStandard {\n\t\tif f == format {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func isValidCertificate(c []byte) bool {\n\tp, _ := pem.Decode(c)\n\tif p == nil {\n\t\treturn false\n\t}\n\tif _, err := x509.ParseCertificates(p.Bytes); err != nil {\n\t\treturn false\n\t}\n\treturn true\n}",
"func certificateCheckCallback(cert *git.Certificate, valid bool, hostname string) git.ErrorCode {\n\treturn 0\n}",
"func ValidateFormatVersion(formatVersion uint32) (bool) {\n if formatVersion == 1 || formatVersion == 2 || formatVersion == 3 || formatVersion == 4 { //format version should still be 1 for now\n return true\n }\n return false\n}",
"func PossibleCertificateFormatValues() []CertificateFormat {\n\treturn []CertificateFormat{CertificateFormatCer, CertificateFormatPfx}\n}",
"func (s *CertificatesService) Validate(body *CertificateCreate) error {\n\tenc, err := json.Marshal(body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = s.client.Post(\"/v1/certificates/validate\", enc)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn err\n}",
"func TestCheck(t *testing.T) {\n\t// Valid OCSP Must Staple Extension.\n\tvalidExtension := pkix.Extension{\n\t\tId: extensionOid,\n\t\tValue: expectedExtensionValue,\n\t\tCritical: false,\n\t}\n\t// Invalid OCSP Must Staple Extension: Critical field set to `true`.\n\tcriticalExtension := pkix.Extension{\n\t\tId: extensionOid,\n\t\tValue: expectedExtensionValue,\n\t\tCritical: true,\n\t}\n\t// Invalid OCSP Must Staple Extension: Wrong value.\n\twrongValueExtension := pkix.Extension{\n\t\tId: extensionOid,\n\t\tValue: []uint8{0xC0, 0xFF, 0xEE},\n\t\tCritical: false,\n\t}\n\t// Invalid OCSP Must Staple Extension: Wrong value, Critical field set to\n\t// `true`\n\twrongValueExtensionCritical := pkix.Extension{\n\t\tId: extensionOid,\n\t\tValue: []uint8{0xC0, 0xFF, 0xEE},\n\t\tCritical: true,\n\t}\n\n\ttestCases := []struct {\n\t\tName string\n\t\tInputEx pkix.Extension\n\t\tCertType string\n\t\tExpectedErrors []string\n\t}{\n\t\t{\n\t\t\tName: \"Valid: DV cert type\",\n\t\t\tInputEx: validExtension,\n\t\t\tCertType: \"DV\",\n\t\t\tExpectedErrors: []string{},\n\t\t},\n\t\t{\n\t\t\tName: \"Valid: OV cert type\",\n\t\t\tInputEx: validExtension,\n\t\t\tCertType: \"DV\",\n\t\t\tExpectedErrors: []string{},\n\t\t},\n\t\t{\n\t\t\tName: \"Valid: EV cert type\",\n\t\t\tInputEx: validExtension,\n\t\t\tCertType: \"DV\",\n\t\t\tExpectedErrors: []string{},\n\t\t},\n\t\t{\n\t\t\tName: \"Valid: CA cert type\",\n\t\t\tInputEx: validExtension,\n\t\t\tCertType: \"CA\",\n\t\t\tExpectedErrors: []string{},\n\t\t},\n\t\t{\n\t\t\tName: \"Invalid: OCSP cert type\",\n\t\t\tInputEx: validExtension,\n\t\t\tCertType: \"OCSP\",\n\t\t\tExpectedErrors: []string{\n\t\t\t\tcertTypeErr,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Invalid: critical extension\",\n\t\t\tInputEx: criticalExtension,\n\t\t\tCertType: \"DV\",\n\t\t\tExpectedErrors: []string{critExtErr},\n\t\t},\n\t\t{\n\t\t\tName: \"Invalid: critical extension, OCSP cert type\",\n\t\t\tInputEx: criticalExtension,\n\t\t\tCertType: \"OCSP\",\n\t\t\tExpectedErrors: []string{\n\t\t\t\tcertTypeErr, critExtErr,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Invalid: wrong extension value\",\n\t\t\tInputEx: wrongValueExtension,\n\t\t\tCertType: \"DV\",\n\t\t\tExpectedErrors: []string{\n\t\t\t\textValueErr,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Invalid: wrong extension value, critical extension, OCSP cert type\",\n\t\t\tInputEx: wrongValueExtensionCritical,\n\t\t\tCertType: \"OCSP\",\n\t\t\tExpectedErrors: []string{\n\t\t\t\tcertTypeErr, critExtErr, extValueErr,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.Name, func(t *testing.T) {\n\t\t\tcertData := &certdata.Data{\n\t\t\t\tType: tc.CertType,\n\t\t\t}\n\t\t\t// Run the OCSP Must Staple check on the test data\n\t\t\terrors := Check(tc.InputEx, certData)\n\t\t\t// Collect the returned errors into a list\n\t\t\terrList := errors.List()\n\t\t\t// Verify the expected number of errors are in the list\n\t\t\tif len(tc.ExpectedErrors) != len(errList) {\n\t\t\t\tt.Errorf(\"wrong number of Check errors: expected %d, got %d\\n\",\n\t\t\t\t\tlen(tc.ExpectedErrors), len(errList))\n\t\t\t} else {\n\t\t\t\t// Match the error list to the expected error list\n\t\t\t\tfor i, err := range errList {\n\t\t\t\t\tif errMsg := err.Error(); errMsg != tc.ExpectedErrors[i] {\n\t\t\t\t\t\tt.Errorf(\"expected error %q at index %d, got %q\",\n\t\t\t\t\t\t\ttc.ExpectedErrors[i], i, errMsg)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}",
"func isCertTypeOK(wh *Webhook) bool {\n\tif wh.CertType == linkedca.Webhook_ALL.String() || wh.CertType == \"\" {\n\t\treturn true\n\t}\n\treturn linkedca.Webhook_X509.String() == wh.CertType\n}",
"func CertificateRequestInfoSupportsCertificate(cri *tls.CertificateRequestInfo, c *tls.Certificate,) error",
"func (m *X509Certificate) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateIssuerDN(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateNotAfter(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateNotBefore(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validatePublicKey(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSubjectDN(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (ctl *Ctl) CheckSpecFlags() error {\n\tencryptPassLength := len(ctl.EncryptionPassword)\n\tif encryptPassLength > 0 && encryptPassLength < 16 {\n\t\treturn fmt.Errorf(\"flag EncryptionPassword is %d characters. Must be 16 or more characters\", encryptPassLength)\n\t}\n\tglobalSaltLength := len(ctl.EncryptionGlobalSalt)\n\tif globalSaltLength > 0 && globalSaltLength < 16 {\n\t\treturn fmt.Errorf(\"flag EncryptionGlobalSalt is %d characters. Must be 16 or more characters\", globalSaltLength)\n\t}\n\treturn nil\n}",
"func isMustStapleCertificate(cert *x509.Certificate) (bool, error) {\n\tvar featureExtension pkix.Extension\n\tvar foundExtension bool\n\tfor _, ext := range cert.Extensions {\n\t\tif ext.Id.Equal(tlsFeatureExtensionOID) {\n\t\t\tfeatureExtension = ext\n\t\t\tfoundExtension = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !foundExtension {\n\t\treturn false, nil\n\t}\n\n\t// The value for the TLS feature extension is a sequence of integers. Per the asn1.Unmarshal documentation, an\n\t// integer can be unmarshalled into an int, int32, int64, or *big.Int and unmarshalling will error if the integer\n\t// cannot be encoded into the target type.\n\t//\n\t// Use []*big.Int to ensure that all values in the sequence can be successfully unmarshalled.\n\tvar featureValues []*big.Int\n\tif _, err := asn1.Unmarshal(featureExtension.Value, &featureValues); err != nil {\n\t\treturn false, fmt.Errorf(\"error unmarshalling TLS feature extension values: %v\", err)\n\t}\n\n\tfor _, value := range featureValues {\n\t\tif value.Cmp(mustStapleFeatureValue) == 0 {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}",
"func (f FormatHeader) Valid() bool {\n\treturn f.ID == 0x20746d66 && f.Size == 0x10 && f.AudioFormat == 1\n}",
"func (o CertificateCreateOpts) Validate() error {\n\tif o.Name == \"\" {\n\t\treturn errors.New(\"missing name\")\n\t}\n\tif o.Certificate == \"\" {\n\t\treturn errors.New(\"missing certificate\")\n\t}\n\tif o.PrivateKey == \"\" {\n\t\treturn errors.New(\"missing private key\")\n\t}\n\treturn nil\n}",
"func IsValidCertType(certType string) bool {\n\tfor _, c := range GetSupportedCerts() {\n\t\tif c == certType {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func printCertificate(cert *x509.Certificate) bool {\n\n\tfmt.Printf(\"Subject:%s\\t%s%s\\n\", Green, cert.Subject, Reset)\n\tfmt.Printf(\"Valid from:%s\\t%s%s\\n\", Yellow, cert.NotBefore, Reset)\n\tfmt.Printf(\"Valid until:%s\\t%s%s\\n\", Yellow, cert.NotAfter, Reset)\n\tfmt.Printf(\"Issuer:%s\\t\\t%s%s\\n\", Cyan, cert.Issuer.Organization[0], Reset)\n\tfmt.Printf(\"Is CA?:%s\\t\\t%t%s\\n\", Pink, cert.IsCA, Reset)\n\tfmt.Printf(\"Algorithm:%s\\t%s%s\\n\", Pink, cert.SignatureAlgorithm, Reset)\n\n\tif len(cert.DNSNames) > 0 {\n\t\tfmt.Printf(\"DNS Names:%s\\t%s%s\\n\", Purple, strings.Join(cert.DNSNames, \", \"), Reset)\n\t}\n\n\tif len(cert.OCSPServer) > 0 {\n\t\tfmt.Printf(\"OCSP Server:%s\\t%s%s\\n\", Comment, strings.Join(cert.OCSPServer, \", \"), Reset)\n\t}\n\n\treturn true\n}",
"func (o BackendTlsOutput) ValidateCertificateName() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v BackendTls) *bool { return v.ValidateCertificateName }).(pulumi.BoolPtrOutput)\n}",
"func (c ConvertTransformFormat) IsValid() bool {\n\tswitch c {\n\tcase ConvertTransformFormatNone, ConvertTransformFormatQuantity, ConvertTransformFormatJSON:\n\t\treturn true\n\t}\n\treturn false\n}",
"func Check(d *certdata.Data) *errors.Errors {\n\tvar e = errors.New(nil)\n\n\tswitch d.Type {\n\tcase \"EV\":\n\t\tif strings.LastIndex(d.Cert.Subject.CommonName, \"*\") > -1 {\n\t\t\te.Err(\"Certificate should not contain a wildcard\")\n\t\t}\n\t\tfor _, n := range d.Cert.DNSNames {\n\t\t\tif strings.LastIndex(n, \"*\") > -1 {\n\t\t\t\te.Err(\"Certificate subjectAltName '%s' should not contain a wildcard\", n)\n\t\t\t}\n\t\t}\n\tcase \"DV\", \"OV\":\n\t\tif strings.LastIndex(d.Cert.Subject.CommonName, \"*\") > 0 {\n\t\t\te.Err(\"Certificate wildcard is only allowed as prefix\")\n\t\t}\n\t\tfor _, n := range d.Cert.DNSNames {\n\t\t\tif strings.LastIndex(n, \"*\") > 0 {\n\t\t\t\te.Err(\"Certificate subjectAltName '%s' wildcard is only allowed as prefix\", n)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn e\n}",
"func (o *CertificateOptions) Validate() error {\n\tif len(o.csrNames) < 1 && cmdutil.IsFilenameSliceEmpty(o.Filenames, o.Kustomize) {\n\t\treturn fmt.Errorf(\"one or more CSRs must be specified as <name> or -f <filename>\")\n\t}\n\treturn nil\n}",
"func FormatCert(c *x509.Certificate) (id CertID) {\n\tid.AltNames.IPs = append([]net.IP{}, c.IPAddresses...)\n\tid.AltNames.DNSNames = append([]string{}, c.DNSNames...)\n\tid.AltNames.Emails = append([]string{}, getEmail(c)...)\n\tid.Issuer = c.Issuer.CommonName\n\tid.CommonName = c.Subject.CommonName\n\tid.Organization = c.Subject.Organization\n\tid.IsCA = c.IsCA\n\tid.NotBefore = c.NotBefore\n\tid.NotAfter = c.NotAfter\n\treturn\n}",
"func validateFormatString(v models.ValueDescriptor) (bool, error) {\n\t// No formatting specified\n\tif v.Formatting == \"\" {\n\t\treturn true, nil\n\t} else {\n\t\treturn regexp.MatchString(formatSpecifier, v.Formatting)\n\t}\n}",
"func (c Certificate) SigningFormat() ([]byte, error) {\n\tvar buf bytes.Buffer\n\tif err := gob.NewEncoder(&buf).Encode(&c); err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}",
"func (e Error) IsInvalidFormat() bool {\n\treturn e.kind == invalidFormat\n}",
"func validateRequestedReportFormat(config *Config) bool {\n\tconfig.reportFormat = lowercaseOrNotice(config.reportFormat, \"requested report format\")\n\n\tif !sechubUtil.StringArrayContains(SupportedReportFormats, config.reportFormat) {\n\t\tsechubUtil.LogWarning(\"Unsupported report format '\" + config.reportFormat + \"'. Changing to '\" + ReportFormatJSON + \"'.\")\n\t\tconfig.reportFormat = ReportFormatJSON\n\t}\n\treturn true\n}",
"func (o *V1VirusDatasetRequest) HasFormat() bool {\n\tif o != nil && o.Format != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func CheckCertsValidity(driver drivers.Driver) error {\n\tcertExpiryDateCmd := `date --date=\"$(sudo openssl x509 -in /var/lib/kubelet/pki/kubelet-client-current.pem -noout -enddate | cut -d= -f 2)\" --iso-8601=seconds`\n\toutput, err := drivers.RunSSHCommandFromDriver(driver, certExpiryDateCmd)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcertExpiryDate, err := time.Parse(time.RFC3339, strings.TrimSpace(output))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif time.Now().After(certExpiryDate) {\n\t\treturn fmt.Errorf(\"Certs have expired, they were valid till: %s\", certExpiryDate.Format(time.RFC822))\n\t}\n\treturn nil\n}",
"func verifyProduceCodeFormat(produceCode string) (bool, error) {\n\treturn regexp.MatchString(PRODUCECODE_REGEX, produceCode)\n}",
"func ClientHelloInfoSupportsCertificate(chi *tls.ClientHelloInfo, c *tls.Certificate,) error",
"func CheckTLSCert(con *tls.ConnectionState, fp []byte) bool {\n\tfor _, cert := range con.PeerCertificates {\n\t\tcs := sha256.Sum256(cert.Raw)\n\t\tif bytes.Compare(cs[:], fp) == 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (f Format) Valid() bool {\n\tswitch f {\n\tcase None:\n\t\tbreak\n\tcase Custom:\n\t\tbreak\n\tcase Zstd:\n\t\tbreak\n\tdefault:\n\t\treturn false\n\t}\n\treturn true\n}",
"func ValidatePasswordFormat(password string) error {\n\tif password == \"\" {\n\t\treturn errors.New(\"Password is empty\")\n\t} else if l := len(password); l < PasswordMinLength || l > PasswordMaxLength {\n\t\treturn fmt.Errorf(\"Password's length must be %d-%d characters\", PasswordMinLength, PasswordMaxLength)\n\t}\n\tcountUpper, countLower, countNumber, countSpecial := 0, 0, 0, 0\n\tfor _, c := range password {\n\t\tif c >= '0' && c <= '9' {\n\t\t\tcountNumber++\n\t\t} else if c >= 'a' && c <= 'z' {\n\t\t\tcountLower++\n\t\t} else if c >= 'A' && c <= 'Z' {\n\t\t\tcountUpper++\n\t\t} else {\n\t\t\tcountSpecial++\n\t\t}\n\t}\n\tif countUpper == 0 || countLower == 0 || countNumber == 0 || countSpecial == 0 {\n\t\treturn errors.New(\"Password must contain at least 1 lowercase, uppercase, and special characters and 1 number\")\n\t}\n\treturn nil\n}",
"func (config *Config) evaluateCertificate() error {\n\n\tif FileExists(config.Certificate.Cert) {\n\n\t\tdata, err := ioutil.ReadFile(config.Certificate.Cert)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tblock, _ := pem.Decode(data)\n\n\t\tcert, err := x509.ParseCertificate(block.Bytes)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif len(cert.Subject.OrganizationalUnit) > 0 {\n\t\t\tif cert.Subject.OrganizationalUnit[0] == \"Hosts\" {\n\t\t\t\tconfig.IsHost = true\n\t\t\t\treturn nil\n\t\t\t}\n\t\t} else if len(cert.Issuer.Organization) > 0 {\n\t\t\tif cert.Issuer.Organization[0] == \"Tapp\" {\n\t\t\t\tconfig.IsHost = true\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\tconfig.IsHost = false\n\treturn nil\n}",
"func (b FormatOption) Has(flag FormatOption) bool { return b&flag != 0 }",
"func validateClientCertificate(certificate *x509.Certificate, trustedCertsFile string,\n\tsuppressCertificateTimeInvalid, suppressCertificateChainIncomplete bool) (bool, error) {\n\tif certificate == nil {\n\t\treturn false, ua.BadCertificateInvalid\n\t}\n\tvar intermediates, roots *x509.CertPool\n\tif buf, err := os.ReadFile(trustedCertsFile); err == nil {\n\t\tfor len(buf) > 0 {\n\t\t\tvar block *pem.Block\n\t\t\tblock, buf = pem.Decode(buf)\n\t\t\tif block == nil {\n\t\t\t\t// maybe its der\n\t\t\t\tcert, err := x509.ParseCertificate(buf)\n\t\t\t\tif err == nil {\n\t\t\t\t\t// is self-signed?\n\t\t\t\t\tif bytes.Equal(cert.RawIssuer, cert.RawSubject) {\n\t\t\t\t\t\tif roots == nil {\n\t\t\t\t\t\t\troots = x509.NewCertPool()\n\t\t\t\t\t\t}\n\t\t\t\t\t\troots.AddCert(cert)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tif intermediates == nil {\n\t\t\t\t\t\t\tintermediates = x509.NewCertPool()\n\t\t\t\t\t\t}\n\t\t\t\t\t\tintermediates.AddCert(cert)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif block.Type != \"CERTIFICATE\" || len(block.Headers) != 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcert, err := x509.ParseCertificate(block.Bytes)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// is self-signed?\n\t\t\tif bytes.Equal(cert.RawIssuer, cert.RawSubject) {\n\t\t\t\tif roots == nil {\n\t\t\t\t\troots = x509.NewCertPool()\n\t\t\t\t}\n\t\t\t\troots.AddCert(cert)\n\t\t\t} else {\n\t\t\t\tif intermediates == nil {\n\t\t\t\t\tintermediates = x509.NewCertPool()\n\t\t\t\t}\n\t\t\t\tintermediates.AddCert(cert)\n\t\t\t}\n\t\t}\n\t}\n\n\topts := x509.VerifyOptions{\n\t\tIntermediates: intermediates,\n\t\tRoots: roots,\n\t\tKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},\n\t}\n\n\tif suppressCertificateTimeInvalid {\n\t\topts.CurrentTime = certificate.NotAfter // causes test to pass\n\t}\n\n\tif suppressCertificateChainIncomplete {\n\t\tif opts.Roots == nil {\n\t\t\topts.Roots = x509.NewCertPool()\n\t\t}\n\t\topts.Roots.AddCert(certificate)\n\t}\n\n\t// build chain and verify\n\tif _, err := certificate.Verify(opts); err != nil {\n\t\tswitch se := err.(type) {\n\t\tcase x509.CertificateInvalidError:\n\t\t\tswitch se.Reason {\n\t\t\tcase x509.Expired:\n\t\t\t\treturn false, ua.BadCertificateTimeInvalid\n\t\t\tcase x509.IncompatibleUsage:\n\t\t\t\treturn false, ua.BadCertificateUseNotAllowed\n\t\t\tdefault:\n\t\t\t\treturn false, ua.BadSecurityChecksFailed\n\t\t\t}\n\t\tcase x509.UnknownAuthorityError:\n\t\t\treturn false, ua.BadSecurityChecksFailed\n\t\tdefault:\n\t\t\treturn false, ua.BadSecurityChecksFailed\n\t\t}\n\t}\n\treturn true, nil\n}",
"func isCveFormat(fl FieldLevel) bool {\n\tcveString := fl.Field().String()\n\n\treturn cveRegex.MatchString(cveString)\n}",
"func (o BackendTlsOutput) ValidateCertificateChain() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v BackendTls) *bool { return v.ValidateCertificateChain }).(pulumi.BoolPtrOutput)\n}",
"func isValidVersionFormat(version string) bool {\n\tmatch, _ := regexp.MatchString(\"[0-9]+\\\\.[0-9]+\\\\.[0-9]+\", version)\n\treturn match\n}",
"func CheckFlags(sampler config.Sampler) (int, bool) {\n\tif helpConfig {\n\t\tsampler.Sample(os.Stdout, nil, nil)\n\t\treturn 0, false\n\t}\n\tif version {\n\t\tfmt.Printf(VersionInfo())\n\t\treturn 0, false\n\t}\n\tif configFile == \"\" {\n\t\tfmt.Fprintln(os.Stderr, \"Err: Missing config file\")\n\t\tflag.Usage()\n\t\treturn 1, false\n\t}\n\treturn 0, true\n}",
"func (c certificate) Check(d *certdata.Data) *errors.Errors {\n\tvar e = errors.New(nil)\n\n\tfor _, cc := range c {\n\t\tif cc.filter != nil && !cc.filter.Check(d) {\n\t\t\tcontinue\n\t\t}\n\t\te.Append(cc.f(d))\n\t}\n\n\treturn e\n}",
"func Valid(format, version string) error {\n\tversionParser, exists := GetParser(format)\n\tif !exists {\n\t\treturn ErrUnknownVersionFormat\n\t}\n\n\tif !versionParser.Valid(version) {\n\t\treturn ErrInvalidVersion\n\t}\n\n\treturn nil\n}",
"func (sfs *SoundFileInfoService) IsValidSoundFileFormat(fileName string) bool {\n\tisValidFileFormat := false\n\tfname := strings.ToLower(fileName)\n\n\tif strings.HasSuffix(fname, \".mp3\") || strings.HasSuffix(fname, \".wav\") {\n\t\tisValidFileFormat = true\n\t}\n\n\treturn isValidFileFormat\n}",
"func (c Certificate) GetBooleanValidity() (trusted_ubuntu, trusted_mozilla, trusted_microsoft, trusted_apple, trusted_android bool) {\n\n\t//check Ubuntu validation info\n\tvalInfo, ok := c.ValidationInfo[Ubuntu_TS_name]\n\n\tif !ok {\n\t\ttrusted_ubuntu = false\n\t} else {\n\t\ttrusted_ubuntu = valInfo.IsValid\n\t}\n\n\t//check Mozilla validation info\n\tvalInfo, ok = c.ValidationInfo[Mozilla_TS_name]\n\n\tif !ok {\n\t\ttrusted_mozilla = false\n\t} else {\n\t\ttrusted_mozilla = valInfo.IsValid\n\t}\n\n\t//check Microsoft validation info\n\tvalInfo, ok = c.ValidationInfo[Microsoft_TS_name]\n\n\tif !ok {\n\t\ttrusted_microsoft = false\n\t} else {\n\t\ttrusted_microsoft = valInfo.IsValid\n\t}\n\n\t//check Apple validation info\n\tvalInfo, ok = c.ValidationInfo[Apple_TS_name]\n\n\tif !ok {\n\t\ttrusted_apple = false\n\t} else {\n\t\ttrusted_apple = valInfo.IsValid\n\t}\n\n\t//check Android validation info\n\tvalInfo, ok = c.ValidationInfo[Android_TS_name]\n\n\tif !ok {\n\t\ttrusted_android = false\n\t} else {\n\t\ttrusted_android = valInfo.IsValid\n\t}\n\treturn\n}",
"func (o *StorageNetAppCloudTargetAllOf) GetCertificateValidationEnabledOk() (*bool, bool) {\n\tif o == nil || o.CertificateValidationEnabled == nil {\n\t\treturn nil, false\n\t}\n\treturn o.CertificateValidationEnabled, true\n}",
"func (c CertAuthType) Check() error {\n\tfor _, caType := range CertAuthTypes {\n\t\tif c == caType {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn trace.BadParameter(\"%q authority type is not supported\", c)\n}",
"func (o *Manager) CanFormat(ctx context.Context, inType string) (available struct {\n\tV0 bool\n\tV1 string\n}, err error) {\n\terr = o.object.CallWithContext(ctx, InterfaceManager+\".CanFormat\", 0, inType).Store(&available)\n\treturn\n}",
"func CheckCertSignature(caCert *x509.Certificate) VerifyPeerCertificateFunc {\n\treturn func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {\n\t\tfor _, c := range rawCerts {\n\t\t\tparsedCert, err := x509.ParseCertificate(c)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcertErr := parsedCert.CheckSignatureFrom(caCert)\n\t\t\tif parsedCert.NotAfter.Before(time.Now()) || parsedCert.NotBefore.After(time.Now()) {\n\t\t\t\tcertErr = errors.New(\"Certificate expired or used too soon\")\n\t\t\t}\n\t\t\tlog.Printf(\"Remote presented certificate %d with time bounds (%v-%v). Verification error for certificate: %+v\",\n\t\t\t\tparsedCert.SerialNumber, parsedCert.NotBefore, parsedCert.NotAfter, certErr)\n\t\t\treturn certErr\n\t\t}\n\t\treturn errors.New(\"Expected certificate which would pass, none presented\")\n\t}\n}",
"func (c *CertChecker) CheckCert(principal string, cert *Certificate) error {\n\tif c.IsRevoked != nil && c.IsRevoked(cert) {\n\t\treturn fmt.Errorf(\"ssh: certificate serial %d revoked\", cert.Serial)\n\t}\n\n\tfor opt := range cert.CriticalOptions {\n\t\t// sourceAddressCriticalOption will be enforced by\n\t\t// serverAuthenticate\n\t\tif opt == sourceAddressCriticalOption {\n\t\t\tcontinue\n\t\t}\n\n\t\tfound := false\n\t\tfor _, supp := range c.SupportedCriticalOptions {\n\t\t\tif supp == opt {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\treturn fmt.Errorf(\"ssh: unsupported critical option %q in certificate\", opt)\n\t\t}\n\t}\n\n\tif len(cert.ValidPrincipals) > 0 {\n\t\t// By default, certs are valid for all users/hosts.\n\t\tfound := false\n\t\tfor _, p := range cert.ValidPrincipals {\n\t\t\tif p == principal {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\treturn fmt.Errorf(\"ssh: principal %q not in the set of valid principals for given certificate: %q\", principal, cert.ValidPrincipals)\n\t\t}\n\t}\n\n\tclock := c.Clock\n\tif clock == nil {\n\t\tclock = time.Now\n\t}\n\n\tunixNow := clock().Unix()\n\tif after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) {\n\t\treturn fmt.Errorf(\"ssh: cert is not yet valid\")\n\t}\n\tif before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) {\n\t\treturn fmt.Errorf(\"ssh: cert has expired\")\n\t}\n\tif err := cert.SignatureKey.Verify(cert.bytesForSigning(), cert.Signature); err != nil {\n\t\treturn fmt.Errorf(\"ssh: certificate signature does not verify\")\n\t}\n\n\treturn nil\n}",
"func (o *CaCertificateCreateReqWeb) GetCertificateOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Certificate, true\n}",
"func checkProtocal(protocol string) bool {\n\tpass := false\n\tfor _, v := range protocals {\n\t\tif strings.EqualFold(protocol, v) {\n\t\t\tpass = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn pass\n}",
"func (asf AttestationStatementFormat) Valid() error {\n\tswitch asf {\n\tcase AttestationFormatPacked:\n\tcase AttestationFormatTPM:\n\tcase AttestationFormatAndroidKey:\n\tcase AttestationFormatAndroidSafetyNet:\n\tcase AttestationFormatFidoU2F:\n\tcase AttestationFormatNone:\n\tdefault:\n\t\treturn NewError(\"Invalid attestation statement %s\", asf)\n\t}\n\treturn nil\n}",
"func IsFormatSupported(p StreamParameters, args ...interface{}) error {\n\ts := &Stream{}\n\terr := s.init(p, args...)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn newError(C.Pa_IsFormatSupported(s.inParams, s.outParams, C.double(p.SampleRate)))\n}",
"func IsExtFormatValid(ext string) bool {\n\tif string(ext[0]) != \".\" {\n\t\treturn false\n\t}\n\n\tfor _, letter := range ext[1:] {\n\t\tif !unicode.IsLetter(rune(letter)) && !unicode.IsDigit(rune(letter)) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func VerifyFileFormat(fh *multipart.FileHeader, format string, size int64) (string, error) {\n\tif len(format) == 0 || size <= 0 {\n\t\treturn \"\", invar.ErrInvalidParams\n\t}\n\n\tsuffix := path.Ext(fh.Filename)\n\tswitch suffix {\n\tcase format:\n\t\tif fh.Size > int64(size<<20) {\n\t\t\treturn \"\", invar.ErrImgOverSize\n\t\t}\n\tdefault:\n\t\treturn \"\", invar.ErrUnsupportedFile\n\t}\n\treturn suffix, nil\n}",
"func (o BackendTlsPtrOutput) ValidateCertificateName() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v *BackendTls) *bool {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.ValidateCertificateName\n\t}).(pulumi.BoolPtrOutput)\n}",
"func TestCertificate(t *testing.T) {\n\tvar result Certificate\n\n\tif err := json.NewDecoder(certificateBody).Decode(&result); err != nil {\n\t\tt.Errorf(\"Error decoding JSON: %s\", err)\n\t}\n\n\tassertEquals(t, \"1\", result.ID)\n\tassertEquals(t, \"HTTPS Certificate\", result.Name)\n\tassertEquals(t, \"PEM\", string(result.CertificateType))\n\tassertEquals(t, \"Contoso\", result.Issuer.Organization)\n\tassertEquals(t, \"2019-09-07T13:22:05Z\", result.ValidNotAfter)\n\tassertEquals(t, \"TPM_ALG_SHA1\", result.FingerprintHashAlgorithm)\n\tassertEquals(t, \"sha256WithRSAEncryption\", result.SignatureAlgorithm)\n}",
"func ValidatePhoneFormat(countryCallingCode, phone string) error {\n\tif phone == \"\" {\n\t\treturn errors.New(\"Phone number is empty\")\n\t} else if !IsNumericString(phone) {\n\t\treturn errors.New(\"Phone number must be numeric\")\n\t} else if strings.HasPrefix(phone, \"0\") {\n\t\treturn errors.New(\"Phone number can't start with '0'\")\n\t}\n\tlenCC, lenPhone := len(countryCallingCode), len(phone)\n\tmin, max := PhoneMinLength-lenCC, PhoneMaxLength-lenCC\n\tif lenPhone < min || lenPhone > max {\n\t\treturn fmt.Errorf(\"Phone number's length must be %d-%d digits\", min, max)\n\t}\n\treturn nil\n}",
"func isValidForDelegation(cert *x509.Certificate) bool {\n\t// Check that the digitalSignature key usage is set.\n\t// The certificate must contains the digitalSignature KeyUsage.\n\tif (cert.KeyUsage & x509.KeyUsageDigitalSignature) == 0 {\n\t\treturn false\n\t}\n\n\t// Check that the certificate has the DelegationUsage extension and that\n\t// it's marked as non-critical (See Section 4.2 of RFC5280).\n\tfor _, extension := range cert.Extensions {\n\t\tif extension.Id.Equal(extensionDelegatedCredential) {\n\t\t\tif extension.Critical {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func InvalidCollectionFormat(name, in, format string) *Validation {\n\treturn &Validation{\n\t\tcode: InvalidTypeCode,\n\t\tName: name,\n\t\tIn: in,\n\t\tValue: format,\n\t\tmessage: fmt.Sprintf(\"the collection format %q is not supported for the %s param %q\", format, in, name),\n\t}\n}",
"func (m *X509Certificate) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateIssuerDN(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidatePublicKey(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateSubjectDN(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (p *CertProfile) Validate() error {\n\tif p.Expiry == 0 {\n\t\treturn errors.New(\"no expiry set\")\n\t}\n\n\tif len(p.Usage) == 0 {\n\t\treturn errors.New(\"no usages specified\")\n\t} else if _, _, unk := p.Usages(); len(unk) > 0 {\n\t\treturn errors.Errorf(\"unknown usage: %s\", strings.Join(unk, \",\"))\n\t}\n\n\tfor _, policy := range p.Policies {\n\t\tfor _, qualifier := range policy.Qualifiers {\n\t\t\tif qualifier.Type != \"\" &&\n\t\t\t\tqualifier.Type != csr.UserNoticeQualifierType &&\n\t\t\t\tqualifier.Type != csr.CpsQualifierType {\n\t\t\t\treturn errors.New(\"invalid policy qualifier type: \" + qualifier.Type)\n\t\t\t}\n\t\t}\n\t}\n\n\tif p.AllowedNames != \"\" && p.AllowedNamesRegex == nil {\n\t\trule, err := regexp.Compile(p.AllowedNames)\n\t\tif err != nil {\n\t\t\treturn errors.WithMessage(err, \"failed to compile AllowedNames\")\n\t\t}\n\t\tp.AllowedNamesRegex = rule\n\t}\n\tif p.AllowedDNS != \"\" && p.AllowedDNSRegex == nil {\n\t\trule, err := regexp.Compile(p.AllowedDNS)\n\t\tif err != nil {\n\t\t\treturn errors.WithMessage(err, \"failed to compile AllowedDNS\")\n\t\t}\n\t\tp.AllowedDNSRegex = rule\n\t}\n\tif p.AllowedEmail != \"\" && p.AllowedEmailRegex == nil {\n\t\trule, err := regexp.Compile(p.AllowedEmail)\n\t\tif err != nil {\n\t\t\treturn errors.WithMessage(err, \"failed to compile AllowedEmail\")\n\t\t}\n\t\tp.AllowedEmailRegex = rule\n\t}\n\tif p.AllowedURI != \"\" && p.AllowedURIRegex == nil {\n\t\trule, err := regexp.Compile(p.AllowedURI)\n\t\tif err != nil {\n\t\t\treturn errors.WithMessage(err, \"failed to compile AllowedURI\")\n\t\t}\n\t\tp.AllowedURIRegex = rule\n\t}\n\n\treturn nil\n}",
"func CustomizeCertificateValidation(fedCluster *fedv1b1.KubeFedCluster, tlsConfig *tls.Config) error {\n\t// InsecureSkipVerify must be enabled to prevent early validation errors from\n\t// returning before VerifyPeerCertificate is run\n\ttlsConfig.InsecureSkipVerify = true\n\n\tvar ignoreSubjectName, ignoreValidityPeriod bool\n\tfor _, validation := range fedCluster.Spec.DisabledTLSValidations {\n\t\tswitch fedv1b1.TLSValidation(validation) {\n\t\tcase fedv1b1.TLSAll:\n\t\t\tklog.V(1).Infof(\"Cluster %s will not perform TLS certificate validation\", fedCluster.Name)\n\t\t\treturn nil\n\t\tcase fedv1b1.TLSSubjectName:\n\t\t\tignoreSubjectName = true\n\t\tcase fedv1b1.TLSValidityPeriod:\n\t\t\tignoreValidityPeriod = true\n\t\t}\n\t}\n\n\t// Normal TLS SubjectName validation uses the conn dnsname for validation,\n\t// but this is not available when using a VerifyPeerCertificate functions.\n\t// As a workaround, we will fill the tls.Config.ServerName with the URL host\n\t// specified as the KubeFedCluster API target\n\tif !ignoreSubjectName && tlsConfig.ServerName == \"\" {\n\t\tapiURL, err := url.Parse(fedCluster.Spec.APIEndpoint)\n\t\tif err != nil {\n\t\t\treturn errors.Errorf(\"failed to identify a valid host from APIEndpoint for use in SubjectName validation\")\n\t\t}\n\t\ttlsConfig.ServerName = apiURL.Hostname()\n\t}\n\n\t// VerifyPeerCertificate uses the same logic as crypto/tls Conn.verifyServerCertificate\n\t// but uses a modified set of options to ignore specific validations\n\ttlsConfig.VerifyPeerCertificate = func(certificates [][]byte, verifiedChains [][]*x509.Certificate) error {\n\t\topts := x509.VerifyOptions{\n\t\t\tRoots: tlsConfig.RootCAs,\n\t\t\tCurrentTime: time.Now(),\n\t\t\tIntermediates: x509.NewCertPool(),\n\t\t\tDNSName: tlsConfig.ServerName,\n\t\t}\n\t\tif tlsConfig.Time != nil {\n\t\t\topts.CurrentTime = tlsConfig.Time()\n\t\t}\n\n\t\tcerts := make([]*x509.Certificate, len(certificates))\n\t\tfor i, asn1Data := range certificates {\n\t\t\tcert, err := x509.ParseCertificate(asn1Data)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.New(\"tls: failed to parse certificate from server: \" + err.Error())\n\t\t\t}\n\t\t\tcerts[i] = cert\n\t\t}\n\n\t\tfor i, cert := range certs {\n\t\t\tif i == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\topts.Intermediates.AddCert(cert)\n\t\t}\n\n\t\tif ignoreSubjectName {\n\t\t\t// set the DNSName to nil to ignore the name validation\n\t\t\topts.DNSName = \"\"\n\t\t\tklog.V(1).Infof(\"Cluster %s will not perform tls certificate SubjectName validation\", fedCluster.Name)\n\t\t}\n\t\tif ignoreValidityPeriod {\n\t\t\t// set the CurrentTime to immediately after the certificate start time\n\t\t\t// this will ensure that certificate passes the validity period check\n\t\t\topts.CurrentTime = certs[0].NotBefore.Add(time.Second)\n\t\t\tklog.V(1).Infof(\"Cluster %s will not perform tls certificate ValidityPeriod validation\", fedCluster.Name)\n\t\t}\n\n\t\t_, err := certs[0].Verify(opts)\n\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (s TLSSpec) Validate() error {\n\tif s.IsSecure() {\n\t\tif err := shared.ValidateResourceName(s.GetCASecretName()); err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t\tif _, _, _, err := s.GetParsedAltNames(); err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t\tif err := s.GetTTL().Validate(); err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t}\n\treturn nil\n}",
"func pathFetchValid(b *backend) *framework.Path {\n\treturn &framework.Path{\n\t\tPattern: `cert/(?P<serial>[0-9A-Fa-f-:]+)`,\n\t\tFields: map[string]*framework.FieldSchema{\n\t\t\t\"serial\": &framework.FieldSchema{\n\t\t\t\tType: framework.TypeString,\n\t\t\t\tDescription: `Certificate serial number, in colon- or\nhyphen-separated octal`,\n\t\t\t},\n\t\t},\n\n\t\tCallbacks: map[logical.Operation]framework.OperationFunc{\n\t\t\tlogical.ReadOperation: b.pathFetchRead,\n\t\t},\n\n\t\tHelpSynopsis: pathFetchHelpSyn,\n\t\tHelpDescription: pathFetchHelpDesc,\n\t}\n}",
"func CheckFeatureFlag(v *viper.Viper) error {\n\treturn nil\n}",
"func (o KeystoresAliasesKeyCertFileCertsInfoCertInfoOutput) IsValid() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v KeystoresAliasesKeyCertFileCertsInfoCertInfo) *string { return v.IsValid }).(pulumi.StringPtrOutput)\n}",
"func (o *Options) CheckOptions() error {\n\tif o.ViceCrtFile == \"\" {\n\t\treturn fmt.Errorf(\"path to vice certificate not provided. Aborting\")\n\t}\n\tif o.ViceKeyFile == \"\" {\n\t\treturn fmt.Errorf(\"path to vice key not provided. Aborting\")\n\t}\n\tif o.VicePresidentConfig == \"\" {\n\t\treturn fmt.Errorf(\"path to vice config not provided. Aborting\")\n\t}\n\tif o.IntermediateCertificate == \"\" {\n\t\tLogDebug(\"Intermediate certificate not provided\")\n\t}\n\tif o.KubeConfig == \"\" {\n\t\tLogDebug(\"Path to kubeconfig not provided. Using Default\")\n\t}\n\n\tif o.MinCertValidityDays <= 0 {\n\t\tLogDebug(\"Minimum certificate validity invalid. Using default: 30 days\")\n\t\to.MinCertValidityDays = 30\n\t}\n\n\tif o.MetricPort == 0 {\n\t\to.MetricPort = 9091\n\t\tLogDebug(\"Metric port not provided. Using default port: 9091\")\n\t}\n\tif !o.IsEnableAdditionalSymantecMetrics {\n\t\tLogDebug(\"Not exposing additional Symantec metrics\")\n\t} else {\n\t\tLogDebug(\"Exposing additional Symantec metrics\")\n\t}\n\n\treturn nil\n}",
"func itValidatesTLSFlags(args ...string) {\n\tContext(\"TLS Flag Validation\", func() {\n\t\tIt(\"exits with status 3 when no TLS flags are specified\", func() {\n\t\t\tcmd := exec.Command(cfdotPath, args...)\n\n\t\t\tsess, err := gexec.Start(cmd, GinkgoWriter, GinkgoWriter)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\tEventually(sess.Exited).Should(BeClosed())\n\n\t\t\tExpect(sess.ExitCode()).To(Equal(3))\n\t\t})\n\t})\n}",
"func (samlCert DownloadSamlCert) Validate() error {\n\n\tif samlCert.Config.AttestationService.AttestationType == \"SGX\" {\n\t\tfmt.Println(\"tasks/download_saml_cert:Validate() Skipping download of SAML Cert task for SGX attestation\")\n\t\treturn nil\n\t}\n\n\tif _, err := os.Stat(samlCert.SamlCertPath); os.IsNotExist(err) {\n\t\treturn errors.Wrap(err, \"tasks/download_saml_cert:Validate() saml certificate does not exist\")\n\t}\n\n\t_, err := ioutil.ReadFile(samlCert.SamlCertPath)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"tasks/download_saml_cert:Validate() Error while reading Saml CA Certificate file\")\n\t}\n\n\treturn nil\n}",
"func checkSecurityGroupFlags() string {\n\tcheckResult := \"\"\n\tcheckResult += checkGroupId()\n\tcheckResult += checkProtocol()\n\tcheckResult += checkWay()\n\treturn checkResult\n}",
"func (m *CaCert) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateCaName(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateCertificate(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func validateCertificatePEM(certPEM string, options *x509.VerifyOptions) ([]*x509.Certificate, error) {\n\tcerts, err := cert.ParseCertsPEM([]byte(certPEM))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(certs) < 1 {\n\t\treturn nil, fmt.Errorf(\"invalid/empty certificate data\")\n\t}\n\n\tif options != nil {\n\t\t// Ensure we don't report errors for expired certs or if\n\t\t// the validity is in the future.\n\t\t// Not that this can be for the actual certificate or any\n\t\t// intermediates in the CA chain. This allows the router to\n\t\t// still serve an expired/valid-in-the-future certificate\n\t\t// and lets the client to control if it can tolerate that\n\t\t// (just like for self-signed certs).\n\t\t_, err = certs[0].Verify(*options)\n\t\tif err != nil {\n\t\t\tif invalidErr, ok := err.(x509.CertificateInvalidError); !ok || invalidErr.Reason != x509.Expired {\n\t\t\t\treturn certs, fmt.Errorf(\"error verifying certificate: %s\", err.Error())\n\t\t\t}\n\t\t}\n\t}\n\n\treturn certs, nil\n}",
"func prettyPrintCertificate(w io.Writer, certs []*x509.Certificate, short bool) error {\n\tformat := certificateText\n\tif short {\n\t\tformat = certificateShortText\n\t}\n\tfor i, cert := range certs {\n\t\tinfo, err := format(cert)\n\t\tif err != nil {\n\t\t\treturn serrors.WrapStr(\"formatting certificate info\", err, \"index\", i)\n\t\t}\n\t\tif _, err = fmt.Fprint(w, info); err != nil {\n\t\t\treturn serrors.WrapStr(\"writing certificate info\", err, \"index\", i)\n\t\t}\n\t}\n\treturn nil\n}",
"func (o *V1VirusDatasetRequest) GetFormatOk() (*V1TableFormat, bool) {\n\tif o == nil || o.Format == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Format, true\n}",
"func (o *PodnetOptions) Validate() error {\n\t/*\n\t\tif len(o.rawConfig.CurrentContext) == 0 {\n\t\t\treturn errNoContext\n\t\t}\n\t\tif len(o.args) > 1 {\n\t\t\treturn fmt.Errorf(\"either one or no arguments are allowed\")\n\t\t}\n\t*/\n\n\tif o.outputFormat != \"\" {\n\t\to.outputFormat = strings.ToLower(o.outputFormat)\n\n\t\tswitch o.outputFormat {\n\t\tcase \"json\", \"text\": // valid format\n\t\tdefault: // illegal format\n\t\t\treturn fmt.Errorf(\"unknown output format %s\", o.outputFormat)\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (f Format) Valid() bool {\n\tfor _, valid := range Formats {\n\t\tif valid == f {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func (o LookupCertificateResultOutput) Format() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupCertificateResult) string { return v.Format }).(pulumi.StringOutput)\n}",
"func (cer *CER) sanityCheck() error {\n\tif len(cer.OriginHost) == 0 {\n\t\treturn ErrMissingOriginHost\n\t}\n\tif len(cer.OriginRealm) == 0 {\n\t\treturn ErrMissingOriginRealm\n\t}\n\treturn nil\n}",
"func ExpectValidCertificate(csr *certificatesv1.CertificateSigningRequest, _ crypto.Signer) error {\n\t_, err := pki.DecodeX509CertificateBytes(csr.Status.Certificate)\n\treturn err\n}",
"func CheckCerts(certDir string) error {\n\tcertFile := filepath.Join(certDir, \"cert.pem\")\n\tkeyFile := filepath.Join(certDir, \"key.pem\")\n\n\tif !file.Exists(certFile) || !file.Exists(keyFile) {\n\t\tlog.Warnln(log.Global, \"gRPC certificate/key file missing, recreating...\")\n\t\treturn genCert(certDir)\n\t}\n\n\tpemData, err := os.ReadFile(certFile)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to open TLS cert file: %s\", err)\n\t}\n\n\tif err = verifyCert(pemData); err != nil {\n\t\tif err != errCertExpired {\n\t\t\treturn err\n\t\t}\n\t\tlog.Warnln(log.Global, \"gRPC certificate has expired, regenerating...\")\n\t\treturn genCert(certDir)\n\t}\n\n\tlog.Infoln(log.Global, \"gRPC TLS certificate and key files exist, will use them.\")\n\treturn nil\n}",
"func isValidSecret(secret *v1.Secret) (bool, error) {\n\tswitch secret.Type {\n\t// We will accept TLS secrets that also have the 'ca.crt' payload.\n\tcase v1.SecretTypeTLS:\n\t\tdata, ok := secret.Data[v1.TLSCertKey]\n\t\tif !ok {\n\t\t\treturn false, errors.New(\"missing TLS certificate\")\n\t\t}\n\n\t\tif err := validateCertificate(data); err != nil {\n\t\t\treturn false, fmt.Errorf(\"invalid TLS certificate: %v\", err)\n\t\t}\n\n\t\tdata, ok = secret.Data[v1.TLSPrivateKeyKey]\n\t\tif !ok {\n\t\t\treturn false, errors.New(\"missing TLS private key\")\n\t\t}\n\n\t\tif err := validatePrivateKey(data); err != nil {\n\t\t\treturn false, fmt.Errorf(\"invalid TLS private key: %v\", err)\n\t\t}\n\n\t// Generic secrets may have a 'ca.crt' only.\n\tcase v1.SecretTypeOpaque, \"\":\n\t\tif _, ok := secret.Data[v1.TLSCertKey]; ok {\n\t\t\treturn false, nil\n\t\t}\n\n\t\tif _, ok := secret.Data[v1.TLSPrivateKeyKey]; ok {\n\t\t\treturn false, nil\n\t\t}\n\n\t\tif data := secret.Data[\"ca.crt\"]; len(data) == 0 {\n\t\t\treturn false, nil\n\t\t}\n\n\tdefault:\n\t\treturn false, nil\n\n\t}\n\n\t// If the secret we propose to accept has a CA bundle key,\n\t// validate that it is PEM certificate(s). Note that the\n\t// CA bundle on TLS secrets is allowed to be an empty string\n\t// (see https://github.com/projectcontour/contour/issues/1644).\n\tif data := secret.Data[\"ca.crt\"]; len(data) > 0 {\n\t\tif err := validateCertificate(data); err != nil {\n\t\t\treturn false, fmt.Errorf(\"invalid CA certificate bundle: %v\", err)\n\t\t}\n\t}\n\n\treturn true, nil\n}",
"func RegisterCertificateCheck(name string, filter *Filter, f func(*certdata.Data) *errors.Errors) {\n\tcertMutex.Lock()\n\tCertificate = append(Certificate, certificateCheck{name, filter, f})\n\tcertMutex.Unlock()\n}",
"func validateFnFormat(fnType reflect.Type) error {\n\tif fnType.Kind() != reflect.Func {\n\t\treturn fmt.Errorf(\"expected a func as input but was %s\", fnType.Kind())\n\t}\n\tif fnType.NumIn() < 1 {\n\t\treturn fmt.Errorf(\n\t\t\t\"expected at least one argument of type context.Context in function, found %d input arguments\",\n\t\t\tfnType.NumIn(),\n\t\t)\n\t}\n\tif !isContext(fnType.In(0)) {\n\t\treturn fmt.Errorf(\"expected first argument to be context.Context but found %s\", fnType.In(0))\n\t}\n\tif fnType.NumOut() != 1 {\n\t\treturn fmt.Errorf(\n\t\t\t\"expected function to return only error but found %d return values\", fnType.NumOut(),\n\t\t)\n\t}\n\tif !isError(fnType.Out(0)) {\n\t\treturn fmt.Errorf(\n\t\t\t\"expected function to return error but found %d\", fnType.Out(0).Kind(),\n\t\t)\n\t}\n\treturn nil\n}",
"func AssertCertificateHasClientAuthUsage(t *testing.T, cert *x509.Certificate) {\n\tfor i := range cert.ExtKeyUsage {\n\t\tif cert.ExtKeyUsage[i] == x509.ExtKeyUsageClientAuth {\n\t\t\treturn\n\t\t}\n\t}\n\tt.Error(\"cert has not ClientAuth usage as expected\")\n}",
"func canReadCertAndKey(certPath, keyPath string) error {\n\tcertReadable := canReadFile(certPath)\n\tkeyReadable := canReadFile(keyPath)\n\n\tif certReadable == false && keyReadable == false {\n\t\treturn fmt.Errorf(\"error reading key and certificate\")\n\t}\n\n\tif certReadable == false {\n\t\treturn fmt.Errorf(\"error reading %s, certificate and key must be supplied as a pair\", certPath)\n\t}\n\n\tif keyReadable == false {\n\t\treturn fmt.Errorf(\"error reading %s, certificate and key must be supplied as a pair\", keyPath)\n\t}\n\n\treturn nil\n}",
"func canReadCertAndKey(certPath, keyPath string) error {\n\tcertReadable := canReadFile(certPath)\n\tkeyReadable := canReadFile(keyPath)\n\n\tif certReadable == false && keyReadable == false {\n\t\treturn fmt.Errorf(\"error reading key and certificate\")\n\t}\n\n\tif certReadable == false {\n\t\treturn fmt.Errorf(\"error reading %s, certificate and key must be supplied as a pair\", certPath)\n\t}\n\n\tif keyReadable == false {\n\t\treturn fmt.Errorf(\"error reading %s, certificate and key must be supplied as a pair\", keyPath)\n\t}\n\n\treturn nil\n}",
"func Check(d *certdata.Data) *errors.Errors {\n\tvar e = errors.New(nil)\n\n\tif d.Issuer != nil && !bytes.Equal(d.Cert.RawIssuer, d.Issuer.RawSubject) {\n\t\te.Err(\"Certificate Issuer Distinguished Name field MUST match the Subject DN of the Issuing CA\")\n\t\treturn e\n\t}\n\n\treturn e\n}",
"func (o BackendTlsPtrOutput) ValidateCertificateChain() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v *BackendTls) *bool {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.ValidateCertificateChain\n\t}).(pulumi.BoolPtrOutput)\n}",
"func (o *TppCertificateParams) GetValidUntilDateOk() (*string, bool) {\n\tif o == nil || o.ValidUntilDate == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ValidUntilDate, true\n}",
"func (t Authority) Check(ctx context.Context, req *CheckRequest) (resp *CheckResponse, err error) {\n\tvar (\n\t\tm1 agent.TLSCertificates\n\t\tencoded []byte\n\t)\n\n\tif encoded, err = os.ReadFile(t.protoPath); err != nil {\n\t\treturn &CheckResponse{}, status.Error(codes.Unavailable, \"missing info\")\n\t}\n\n\tif err = proto.Unmarshal(encoded, &m1); err != nil {\n\t\treturn &CheckResponse{}, status.Error(codes.Unavailable, \"invalid authority\")\n\t}\n\n\tif m1.Fingerprint != req.Fingerprint {\n\t\treturn &CheckResponse{}, status.Error(codes.NotFound, \"fingerprint mismatch\")\n\t}\n\n\treturn &CheckResponse{}, nil\n}",
"func ReadCertificate(data []byte) (certificate *Certificate, remainder []byte, err error) {\n\tcertificate, err = NewCertificate(data)\n\tif err != nil && err.Error() == \"certificate parsing warning: certificate data is longer than specified by length\" {\n\t\tremainder = certificate.ExcessBytes()\n\t\terr = nil\n\t}\n\treturn\n}",
"func (c *CertAuthID) Check() error {\n\tif err := c.Type.Check(); err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\tif strings.TrimSpace(c.DomainName) == \"\" {\n\t\treturn trace.BadParameter(\"identity validation error: empty domain name\")\n\t}\n\treturn nil\n}",
"func (s *udtSocket) checkValidHandshake(m *multiplexer, p *packet.HandshakePacket, from *net.UDPAddr) bool {\n\tif s.udtVer != 4 {\n\t\treturn false\n\t}\n\treturn true\n}",
"func CheckBadLinkerFlags(ctx BaseModuleContext, prop string, flags []string) {\n\tfor _, flag := range flags {\n\t\tflag = strings.TrimSpace(flag)\n\n\t\tif !strings.HasPrefix(flag, \"-\") {\n\t\t\tctx.PropertyErrorf(prop, \"Flag `%s` must start with `-`\", flag)\n\t\t} else if strings.HasPrefix(flag, \"-l\") {\n\t\t\tif ctx.Host() {\n\t\t\t\tctx.PropertyErrorf(prop, \"Bad flag: `%s`, use shared_libs or host_ldlibs instead\", flag)\n\t\t\t} else {\n\t\t\t\tctx.PropertyErrorf(prop, \"Bad flag: `%s`, use shared_libs instead\", flag)\n\t\t\t}\n\t\t} else if strings.HasPrefix(flag, \"-L\") {\n\t\t\tctx.PropertyErrorf(prop, \"Bad flag: `%s` is not allowed\", flag)\n\t\t} else if strings.HasPrefix(flag, \"-Wl,--version-script\") {\n\t\t\tctx.PropertyErrorf(prop, \"Bad flag: `%s`, use version_script instead\", flag)\n\t\t} else if flag == \"--coverage\" {\n\t\t\tctx.PropertyErrorf(prop, \"Bad flag: `%s`, use native_coverage instead\", flag)\n\t\t} else if strings.Contains(flag, \" \") {\n\t\t\targs := strings.Split(flag, \" \")\n\t\t\tif args[0] == \"-z\" {\n\t\t\t\tif len(args) > 2 {\n\t\t\t\t\tctx.PropertyErrorf(prop, \"`-z` only takes one argument: `%s`\", flag)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tctx.PropertyErrorf(prop, \"Bad flag: `%s` is not an allowed multi-word flag. Should it be split into multiple flags?\", flag)\n\t\t\t}\n\t\t}\n\t}\n}"
] | [
"0.6173354",
"0.6016101",
"0.5732977",
"0.55939424",
"0.5554805",
"0.5551032",
"0.55084914",
"0.54934126",
"0.53708816",
"0.5328171",
"0.52916765",
"0.5222796",
"0.5215715",
"0.52015436",
"0.5192821",
"0.5157193",
"0.50885516",
"0.50314575",
"0.502443",
"0.50205916",
"0.5017505",
"0.49988452",
"0.49825194",
"0.4981845",
"0.49788326",
"0.4968657",
"0.49582395",
"0.49154413",
"0.49117067",
"0.49054348",
"0.49037984",
"0.49034977",
"0.48994467",
"0.48846835",
"0.48786858",
"0.48573306",
"0.48034465",
"0.47817802",
"0.4777313",
"0.4775215",
"0.47677293",
"0.47663137",
"0.47629154",
"0.47511145",
"0.4735472",
"0.47133613",
"0.4709934",
"0.47022897",
"0.46969554",
"0.46879178",
"0.46867564",
"0.46785852",
"0.46473852",
"0.46428883",
"0.46294853",
"0.4610677",
"0.46097043",
"0.46091715",
"0.46087793",
"0.45987424",
"0.45899686",
"0.45746183",
"0.45697144",
"0.45695114",
"0.45463893",
"0.454016",
"0.4529676",
"0.45259792",
"0.45256615",
"0.4513146",
"0.4510958",
"0.45098832",
"0.45080668",
"0.45059767",
"0.45045975",
"0.4503708",
"0.44941726",
"0.44899735",
"0.4482769",
"0.44733602",
"0.44722885",
"0.44720092",
"0.44659194",
"0.4457492",
"0.44497234",
"0.4431028",
"0.44296518",
"0.4427992",
"0.44263336",
"0.4413067",
"0.44074467",
"0.44074467",
"0.4406721",
"0.44050825",
"0.4404762",
"0.43970692",
"0.43924367",
"0.4390169",
"0.43826684",
"0.43691853"
] | 0.8533605 | 0 |
AddrsFromStrings returns strings list converted to address list | func AddrsFromStrings(s apiutils.Strings, defaultPort int) ([]NetAddr, error) {
addrs := make([]NetAddr, len(s))
for i, val := range s {
addr, err := ParseHostPortAddr(val, defaultPort)
if err != nil {
return nil, trace.Wrap(err)
}
addrs[i] = *addr
}
return addrs, nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func parseAddressesFromStr(s string) ([]cipher.Address, error) {\n\taddrsStr := splitCommaString(s)\n\n\tvar addrs []cipher.Address\n\tfor _, s := range addrsStr {\n\t\ta, err := cipher.DecodeBase58Address(s)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\taddrs = append(addrs, a)\n\t}\n\n\treturn addrs, nil\n}",
"func StringToDNSAddressList(dnsList string) []string {\n\ttemp := strings.Split(dnsList, \",\")\n\tadds := []string{}\n\tfor _, t := range temp {\n\t\tif t == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tadds = append(adds, t)\n\t}\n\treturn adds\n}",
"func (a Addresses) String() []string {\n\taddrs := []string{}\n\tfor _, addr := range a {\n\t\taddrs = append(addrs, addr.String())\n\t}\n\treturn addrs\n}",
"func StringToIPAddressList(ipList string) ([]net.IP, error) {\n\ttemp := strings.Split(ipList, \",\")\n\tips := []net.IP{}\n\tfor _, t := range temp {\n\t\tif t == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tparsed := net.ParseIP(t)\n\t\tif parsed == nil {\n\t\t\treturn nil, fmt.Errorf(\"cannot parse %s as an IP address\", t)\n\t\t}\n\t\tips = append(ips, parsed)\n\t}\n\treturn ips, nil\n}",
"func toAddrSlice(addresses string) []string {\n\tif addresses == \"\" {\n\t\treturn make([]string, 0, 0)\n\t}\n\n\t//remove all spaces from the string, and then split the string on all commas\n\treturn strings.Split(strings.Replace(addresses, \" \", \"\", -1), \",\")\n}",
"func parseAddresses(addrs []string) (iaddrs []iaddr.IPFSAddr, err error) {\n\tiaddrs = make([]iaddr.IPFSAddr, len(addrs))\n\tfor i, saddr := range addrs {\n\t\tiaddrs[i], err = iaddr.ParseString(saddr)\n\t\tif err != nil {\n\t\t\treturn nil, cmds.ClientError(\"invalid peer address: \" + err.Error())\n\t\t}\n\t}\n\treturn\n}",
"func LocalIPAddrsAsStrings(includeLoopbacks bool) ([]string, error) {\n\tvar localIPAddrsStrings = []string{}\n\tvar err error\n\tipaddrs, err := LocalIPAddrs()\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\tfor _, ipaddr := range ipaddrs {\n\t\tif includeLoopbacks || !ipaddr.IsLoopback() {\n\t\t\tlocalIPAddrsStrings = append(localIPAddrsStrings, ipaddr.String())\n\t\t}\n\t}\n\treturn localIPAddrsStrings, err\n}",
"func decodeAddresses(val []byte) ([]common.Address, error) {\n\ts := string(val)\n\tvar res []common.Address\n\tif s == \"\" {\n\t\treturn res, nil\n\t}\n\tfor _, a := range strings.Split(s, \",\") {\n\t\tif !common.IsHexAddress(a) {\n\t\t\treturn nil, errors.Errorf(\"malformed address: %q\", s)\n\t\t}\n\n\t\tres = append(res, common.HexToAddress(a))\n\t}\n\treturn res, nil\n}",
"func AddressesToStrings(addrs []cipher.Address) []string {\n\tif addrs == nil {\n\t\treturn nil\n\t}\n\n\taddrsStr := make([]string, len(addrs))\n\tfor i, a := range addrs {\n\t\taddrsStr[i] = a.String()\n\t}\n\n\treturn addrsStr\n}",
"func NewAddrListFromString(list string) *AddrList {\n\t//If we're given an empty string, just return an empty AddrList\n\tif list == \"\" {\n\t\treturn NewAddrList()\n\t}\n\n\treturn &AddrList{toAddrSlice(list), list}\n}",
"func ListenAddresses(value string) ([]string, error) {\n\taddresses := make([]string, 0)\n\n\tif value == \"\" {\n\t\treturn addresses, nil\n\t}\n\n\tlocalHost, localPort, err := net.SplitHostPort(value)\n\tif err != nil {\n\t\tlocalHost = value\n\t\tlocalPort = DefaultPort\n\t}\n\n\tif localHost == \"0.0.0.0\" || localHost == \"::\" || localHost == \"[::]\" {\n\t\tifaces, err := net.Interfaces()\n\t\tif err != nil {\n\t\t\treturn addresses, err\n\t\t}\n\n\t\tfor _, i := range ifaces {\n\t\t\taddrs, err := i.Addrs()\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor _, addr := range addrs {\n\t\t\t\tvar ip net.IP\n\t\t\t\tswitch v := addr.(type) {\n\t\t\t\tcase *net.IPNet:\n\t\t\t\t\tip = v.IP\n\t\t\t\tcase *net.IPAddr:\n\t\t\t\t\tip = v.IP\n\t\t\t\t}\n\n\t\t\t\tif !ip.IsGlobalUnicast() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif ip.To4() == nil {\n\t\t\t\t\tif localHost == \"0.0.0.0\" {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\taddresses = append(addresses, fmt.Sprintf(\"[%s]:%s\", ip, localPort))\n\t\t\t\t} else {\n\t\t\t\t\taddresses = append(addresses, fmt.Sprintf(\"%s:%s\", ip, localPort))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif strings.Contains(localHost, \":\") {\n\t\t\taddresses = append(addresses, fmt.Sprintf(\"[%s]:%s\", localHost, localPort))\n\t\t} else {\n\t\t\taddresses = append(addresses, fmt.Sprintf(\"%s:%s\", localHost, localPort))\n\t\t}\n\t}\n\n\treturn addresses, nil\n}",
"func ParseAddressList(list string) ([]*mail.Address, error)",
"func (hd *Datapath) convertIPs(addresses []string) ([]*halproto.IPAddressObj, error) {\n\tvar halAddresses []*halproto.IPAddressObj\n\tfor _, a := range addresses {\n\t\tif ip := net.ParseIP(strings.TrimSpace(a)); len(ip) > 0 {\n\t\t\t// try parsing as an octet\n\n\t\t\thalAddr := &halproto.IPAddressObj{\n\t\t\t\tFormats: &halproto.IPAddressObj_Address{\n\t\t\t\t\tAddress: &halproto.Address{\n\t\t\t\t\t\tAddress: &halproto.Address_Prefix{\n\t\t\t\t\t\t\tPrefix: &halproto.IPSubnet{\n\t\t\t\t\t\t\t\tSubnet: &halproto.IPSubnet_Ipv4Subnet{\n\t\t\t\t\t\t\t\t\tIpv4Subnet: &halproto.IPPrefix{\n\t\t\t\t\t\t\t\t\t\tAddress: &halproto.IPAddress{\n\t\t\t\t\t\t\t\t\t\t\tIpAf: halproto.IPAddressFamily_IP_AF_INET,\n\t\t\t\t\t\t\t\t\t\t\tV4OrV6: &halproto.IPAddress_V4Addr{\n\t\t\t\t\t\t\t\t\t\t\t\tV4Addr: ipv4Touint32(ip),\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\tPrefixLen: uint32(32),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\thalAddresses = append(halAddresses, halAddr)\n\t\t} else if ip, network, err := net.ParseCIDR(strings.TrimSpace(a)); err == nil {\n\t\t\t// try parsing as IPMask\n\t\t\tprefixLen, _ := network.Mask.Size()\n\n\t\t\thalAddr := &halproto.IPAddressObj{\n\t\t\t\tFormats: &halproto.IPAddressObj_Address{\n\t\t\t\t\tAddress: &halproto.Address{\n\t\t\t\t\t\tAddress: &halproto.Address_Prefix{\n\t\t\t\t\t\t\tPrefix: &halproto.IPSubnet{\n\t\t\t\t\t\t\t\tSubnet: &halproto.IPSubnet_Ipv4Subnet{\n\t\t\t\t\t\t\t\t\tIpv4Subnet: &halproto.IPPrefix{\n\t\t\t\t\t\t\t\t\t\tAddress: &halproto.IPAddress{\n\t\t\t\t\t\t\t\t\t\t\tIpAf: halproto.IPAddressFamily_IP_AF_INET,\n\t\t\t\t\t\t\t\t\t\t\tV4OrV6: &halproto.IPAddress_V4Addr{\n\t\t\t\t\t\t\t\t\t\t\t\tV4Addr: ipv4Touint32(ip),\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\tPrefixLen: uint32(prefixLen),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\thalAddresses = append(halAddresses, halAddr)\n\n\t\t} else if ipRange := strings.Split(strings.TrimSpace(a), \"-\"); len(ipRange) == 2 {\n\t\t\t// try parsing as hyphen separated range\n\t\t\thalAddr, err := hd.convertIPRange(ipRange[0], ipRange[1])\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"failed to parse IP Range {%v}. Err: %v\", ipRange, err)\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\thalAddresses = append(halAddresses, halAddr)\n\t\t} else if a == \"any\" {\n\t\t\t// Interpret it as 0.0.0.0/0\n\t\t\thalAddr := &halproto.IPAddressObj{\n\t\t\t\tFormats: &halproto.IPAddressObj_Address{\n\t\t\t\t\tAddress: &halproto.Address{\n\t\t\t\t\t\tAddress: &halproto.Address_Prefix{\n\t\t\t\t\t\t\tPrefix: &halproto.IPSubnet{\n\t\t\t\t\t\t\t\tSubnet: &halproto.IPSubnet_Ipv4Subnet{\n\t\t\t\t\t\t\t\t\tIpv4Subnet: &halproto.IPPrefix{\n\t\t\t\t\t\t\t\t\t\tAddress: &halproto.IPAddress{\n\t\t\t\t\t\t\t\t\t\t\tIpAf: halproto.IPAddressFamily_IP_AF_INET,\n\t\t\t\t\t\t\t\t\t\t\tV4OrV6: &halproto.IPAddress_V4Addr{},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\thalAddresses = append(halAddresses, halAddr)\n\t\t} else {\n\t\t\t// give up\n\t\t\treturn nil, fmt.Errorf(\"invalid IP Address format {%v}. Should either be in an octet, CIDR or hyphen separated IP Range\", a)\n\t\t}\n\n\t}\n\treturn halAddresses, nil\n}",
"func StringsToIPs(ips []string) []net.IP {\n\ts := make([]net.IP, len(ips))\n\tfor i := range ips {\n\t\ts[i] = net.ParseIP(ips[i])\n\t}\n\treturn s\n}",
"func localAddresses() {\n\tifaces, err := net.Interfaces()\n\tif err != nil {\n\t\tlog.Print(fmt.Errorf(\"localAddresses: %v\\n\", err.Error()))\n\t\treturn\n\t}\n\tfor _, iface := range ifaces {\n\t\taddrs, err := iface.Addrs()\n\t\tif err != nil {\n\t\t\tlog.Print(fmt.Errorf(\"localAddresses: %v\\n\", err.Error()))\n\t\t\tcontinue\n\t\t}\n\t\tfor _, a := range addrs {\n\t\t\t_, ipNet, err := net.ParseCIDR(a.String())\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"error parsing CIDR\")\n\t\t\t}\n\t\t\tif ipNet.IP.To4() == nil || iface.Flags&net.FlagBroadcast == 0 || ipNet.IP.IsLoopback() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ttmpIntf := netInterface{iname: iface.Name, ipnet: ipNet}\n\t\t\tintf = append(intf, tmpIntf)\n\t\t}\n\t}\n\tfmt.Println(intf)\n}",
"func toSPVAddr(seeds []string) []string {\n\tvar addrs = make([]string, len(seeds))\n\tfor i, seed := range seeds {\n\t\tportIndex := strings.LastIndex(seed, \":\")\n\t\tif portIndex > 0 {\n\t\t\taddrs[i] = fmt.Sprint(string([]byte(seed)[:portIndex]), \":\", SPVServerPort)\n\t\t} else {\n\t\t\taddrs[i] = fmt.Sprint(seed, \":\", SPVServerPort)\n\t\t}\n\t}\n\treturn addrs\n}",
"func ProxyAddressesToString(addrs []pb.TcpAddress) string {\n\taddrStrs := make([]string, len(addrs))\n\tfor i := range addrs {\n\t\taddrStrs[i] = ProxyAddressToString(&addrs[i])\n\t}\n\treturn \"[\" + strings.Join(addrStrs, \",\") + \"]\"\n}",
"func (p *AddressParser) ParseList(list string) ([]*Address, error)",
"func (as AddrRanges) String() string {\n\tvar ss []string\n\tfor _, a := range as {\n\t\ts := a.String()\n\t\tss = append(ss, s)\n\t}\n\treturn strings.Join(ss, \",\")\n}",
"func NewAddrListFromSlice(slice []string) *AddrList {\n\t//if we're given an empty slice, just return an empty AddrList\n\tif len(slice) == 0 {\n\t\treturn NewAddrList()\n\t}\n\n\treturn &AddrList{slice, toAddrString(slice)}\n}",
"func toAddrString(addresses []string) string {\n\n\tif len(addresses) < 1 {\n\t\treturn \"\"\n\t}\n\n\tvar ret = \"\"\n\t//loop through all but the last element in the address slice, appending them to the ret string with a comma afterwards\n\tfor _, v := range addresses[:len(addresses)-1] {\n\t\tret += v + \", \"\n\t}\n\t//append the last address to the ret string & return the list of addresses\n\tret += addresses[len(addresses)-1]\n\treturn ret\n}",
"func normalizeAddresses(addrs []string, defaultPort string) []string {\n\tfor i, addr := range addrs {\n\t\taddrs[i] = normalizeAddress(addr, defaultPort)\n\t}\n\n\treturn removeDuplicateAddresses(addrs)\n}",
"func getAddressListString(addresses []mail.Address) string {\n\tvar addressStrings []string\n\n\tfor _, address := range addresses {\n\t\taddressStrings = append(addressStrings, address.String())\n\t}\n\treturn strings.Join(addressStrings, \",\"+crlf+\" \")\n}",
"func getResolvedEndpoints(endpoints []string, netWrapper NetWrapper) ([]string, error) {\n\tresolvedEndpoints := sets.String{}\n\tfor _, endpoint := range endpoints {\n\t\tif net.ParseIP(endpoint) == nil {\n\t\t\t// It's not a valid IP address, so assume it's a DNS name, and try to resolve it,\n\t\t\t// replacing its DNS name with its IP addresses in expandedEndpoints\n\t\t\t// through an interface abstracting the internet\n\t\t\tipAddrs, err := netWrapper.LookupHost(endpoint)\n\t\t\tif err != nil {\n\t\t\t\treturn resolvedEndpoints.List(), err\n\t\t\t}\n\t\t\tfor _, ip := range ipAddrs {\n\t\t\t\tresolvedEndpoints = resolvedEndpoints.Union(sets.NewString(ip))\n\t\t\t}\n\t\t} else {\n\t\t\tresolvedEndpoints = resolvedEndpoints.Union(sets.NewString(endpoint))\n\t\t}\n\t}\n\treturn resolvedEndpoints.List(), nil\n}",
"func ConvertAddrsToValAddrs(addrs []sdk.AccAddress) []sdk.ValAddress {\n\tvalAddrs := make([]sdk.ValAddress, len(addrs))\n\n\tfor i, addr := range addrs {\n\t\tvalAddrs[i] = sdk.ValAddress(addr)\n\t}\n\n\treturn valAddrs\n}",
"func (o *CIBVAddress) FromStringArray(sa StringArray) error {\n\tvar err error\n\tvar val int\n\tif val, err = strconv.Atoi(sa[0]); err != nil {\n\t\treturn err\n\t}\n\to.Lid = uint16(val)\n\tif val, err = strconv.Atoi(sa[1]); err != nil {\n\t\treturn err\n\t}\n\to.Qpn = uint32(val)\n\tif val, err = strconv.Atoi(sa[2]); err != nil {\n\t\treturn err\n\t}\n\to.Psn = uint32(val)\n\tif val, err = strconv.Atoi(sa[3]); err != nil {\n\t\treturn err\n\t}\n\to.Raddr = uint64(val)\n\tif val, err = strconv.Atoi(sa[4]); err != nil {\n\t\treturn err\n\t}\n\to.Rkey = uint32(val)\n\tif val, err = strconv.Atoi(sa[5]); err != nil {\n\t\treturn err\n\t}\n\to.Flag = uint32(val)\n\to.uuid = sa[6]\n\treturn nil\n}",
"func convertToNodeAddrs(addrs []string) ([]*kronospb.NodeAddr, error) {\n\tvar seedHostsAddrs []*kronospb.NodeAddr\n\tfor _, addr := range addrs {\n\t\tnodeAddr, err := kronosutil.NodeAddr(addr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tseedHostsAddrs = append(seedHostsAddrs, nodeAddr)\n\t}\n\treturn seedHostsAddrs, nil\n}",
"func SlicePtrFromStrings(ss []string) ([]*byte, error) {\n\tvar err error\n\tbb := make([]*byte, len(ss)+1)\n\tfor i := 0; i < len(ss); i++ {\n\t\tbb[i], err = BytePtrFromString(ss[i])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tbb[len(ss)] = nil\n\treturn bb, nil\n}",
"func getAddressesFromPool(configIdx int, networkConfig machinev1.NetworkDeviceSpec, s *machineScope) ([]string, string, error) {\n\taddresses := []string{}\n\tvar gateway string\n\tfor poolIdx := range networkConfig.AddressesFromPools {\n\t\tclaimName := ipam.GetIPAddressClaimName(s.machine, configIdx, poolIdx)\n\t\tipAddress, err := ipam.RetrieveBoundIPAddress(s.Context, s.client, s.machine, claimName)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", fmt.Errorf(\"error retrieving bound IP address: %w\", err)\n\t\t}\n\t\tipAddressSpec := ipAddress.Spec\n\t\taddresses = append(addresses, fmt.Sprintf(\"%s/%d\", ipAddressSpec.Address, ipAddressSpec.Prefix))\n\t\tif len(ipAddressSpec.Gateway) > 0 {\n\t\t\tgateway = ipAddressSpec.Gateway\n\t\t}\n\t}\n\treturn addresses, gateway, nil\n}",
"func cdnsToNetAddrs(cdns []*config.CDN) []dfnet.NetAddr {\n\tnetAddrs := make([]dfnet.NetAddr, 0, len(cdns))\n\tfor _, cdn := range cdns {\n\t\tnetAddrs = append(netAddrs, dfnet.NetAddr{\n\t\t\tType: dfnet.TCP,\n\t\t\tAddr: fmt.Sprintf(\"%s:%d\", cdn.IP, cdn.Port),\n\t\t})\n\t}\n\n\treturn netAddrs\n}",
"func convertAddresses(confMap map[string]interface{}, conv convAddrs) {\n\taddressesi, _ := confMap[\"Addresses\"].(map[string]interface{})\n\tif addressesi == nil {\n\t\tlog.Log(\"Addresses field missing or of the wrong type\")\n\t\treturn\n\t}\n\n\tswarm := toStringArray(addressesi[\"Swarm\"])\n\tannounce := toStringArray(addressesi[\"Announce\"])\n\tnoAnnounce := toStringArray(addressesi[\"NoAnnounce\"])\n\n\ts, a, na := conv(swarm, announce, noAnnounce)\n\taddressesi[\"Swarm\"] = s\n\taddressesi[\"Announce\"] = a\n\taddressesi[\"NoAnnounce\"] = na\n}",
"func ClientAddrs(addrs []string) func(*Client) {\n\treturn func(c *Client) { c.addrs = addrs }\n}",
"func (m *Memberlist) resolveAddr(hostStr string) ([]ipPort, error) {\n\t// First peel off any leading node name. This is optional.\n\tnodeName := \"\"\n\tif slashIdx := strings.Index(hostStr, \"/\"); slashIdx >= 0 {\n\t\tif slashIdx == 0 {\n\t\t\treturn nil, fmt.Errorf(\"empty node name provided\")\n\t\t}\n\t\tnodeName = hostStr[0:slashIdx]\n\t\thostStr = hostStr[slashIdx+1:]\n\t}\n\n\t// This captures the supplied port, or the default one.\n\thostStr = ensurePort(hostStr, m.config.BindPort)\n\thost, sport, err := net.SplitHostPort(hostStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlport, err := strconv.ParseUint(sport, 10, 16)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tport := uint16(lport)\n\n\t// If it looks like an IP address we are done. The SplitHostPort() above\n\t// will make sure the host part is in good shape for parsing, even for\n\t// IPv6 addresses.\n\tif ip := net.ParseIP(host); ip != nil {\n\t\treturn []ipPort{\n\t\t\tipPort{ip: ip, port: port, nodeName: nodeName},\n\t\t}, nil\n\t}\n\n\t// First try TCP so we have the best chance for the largest list of\n\t// hosts to join. If this fails it's not fatal since this isn't a standard\n\t// way to query DNS, and we have a fallback below.\n\tips, err := m.tcpLookupIP(host, port, nodeName)\n\tif err != nil {\n\t\tm.logger.Printf(\"[DEBUG] memberlist: TCP-first lookup failed for '%s', falling back to UDP: %s\", hostStr, err)\n\t}\n\tif len(ips) > 0 {\n\t\treturn ips, nil\n\t}\n\n\t// If TCP didn't yield anything then use the normal Go resolver which\n\t// will try UDP, then might possibly try TCP again if the UDP response\n\t// indicates it was truncated.\n\tans, err := net.LookupIP(host)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tips = make([]ipPort, 0, len(ans))\n\tfor _, ip := range ans {\n\t\tips = append(ips, ipPort{ip: ip, port: port, nodeName: nodeName})\n\t}\n\treturn ips, nil\n}",
"func getLocalIPs() (ips []string) {\n\tips = make([]string, 0, 6)\n\n\taddrs, err := net.InterfaceAddrs()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor _, addr := range addrs {\n\t\tip, _, err := net.ParseCIDR(addr.String())\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tips = append(ips, ip.String())\n\t}\n\treturn\n}",
"func PbToAddressList(src []*example.Address, opts ...TransformParam) []model.Address {\n\treturn PbToAddressPtrValList(src)\n}",
"func ResolveAddrs(addrs []string, log *zerolog.Logger) (resolved []*EdgeAddr) {\n\tfor _, addr := range addrs {\n\t\ttcpAddr, err := net.ResolveTCPAddr(\"tcp\", addr)\n\t\tif err != nil {\n\t\t\tlog.Error().Int(management.EventTypeKey, int(management.Cloudflared)).\n\t\t\t\tStr(logFieldAddress, addr).Err(err).Msg(\"edge discovery: failed to resolve to TCP address\")\n\t\t\tcontinue\n\t\t}\n\n\t\tudpAddr, err := net.ResolveUDPAddr(\"udp\", addr)\n\t\tif err != nil {\n\t\t\tlog.Error().Int(management.EventTypeKey, int(management.Cloudflared)).\n\t\t\t\tStr(logFieldAddress, addr).Err(err).Msg(\"edge discovery: failed to resolve to UDP address\")\n\t\t\tcontinue\n\t\t}\n\t\tversion := V6\n\t\tif udpAddr.IP.To4() != nil {\n\t\t\tversion = V4\n\t\t}\n\t\tresolved = append(resolved, &EdgeAddr{\n\t\t\tTCP: tcpAddr,\n\t\t\tUDP: udpAddr,\n\t\t\tIPVersion: version,\n\t\t})\n\t}\n\treturn\n}",
"func IpAddressGen(subnet string) []string {\n\tipAddress, ipNet, err := net.ParseCIDR(subnet)\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tvar ipAddresses []string\n\n\tfor ipAddress := ipAddress.Mask(ipNet.Mask); ipNet.Contains(ipAddress); inc(ipAddress) {\n\t\tipAddresses = append(ipAddresses, ipAddress.String())\n\t}\n\n\treturn ipAddresses\n\n}",
"func extractIPAndPortFromAddresses(addresses []string) (string, string) {\n\tfor _, addr := range addresses {\n\t\taddrParts := strings.SplitN(addr, \"://\", 2)\n\t\tif len(addrParts) != 2 {\n\t\t\tlogrus.Errorf(\"invalid listening address %s: must be in format [protocol]://[address]\", addr)\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch addrParts[0] {\n\t\tcase \"tcp\":\n\t\t\thost, port, err := net.SplitHostPort(addrParts[1])\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"failed to split host and port from address: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn host, port\n\t\tcase \"unix\":\n\t\t\tcontinue\n\t\tdefault:\n\t\t\tlogrus.Errorf(\"only unix socket or tcp address is support\")\n\t\t}\n\t}\n\treturn \"\", \"\"\n}",
"func MatchAllIPStringFamily(isIPv6 bool, ipStrings []string) ([]string, error) {\n\tvar ipAddrs []string\n\tfor _, ipString := range ipStrings {\n\t\tif utilnet.IsIPv6String(ipString) == isIPv6 {\n\t\t\tipAddrs = append(ipAddrs, ipString)\n\t\t}\n\t}\n\tif len(ipAddrs) > 0 {\n\t\treturn ipAddrs, nil\n\t}\n\treturn nil, ErrorNoIP\n}",
"func getNameServerAddressListFromCmd(nameSrvAdders *string) *singlylinkedlist.List {\n\tif nameSrvAdders != nil {\n\t\tif *nameSrvAdders == \"\" {\n\t\t\treturn nil\n\t\t}\n\t\tnameSrvAdderArr := strings.Split(*nameSrvAdders, \";\")\n\t\tif len(nameSrvAdderArr) == 0 {\n\t\t\treturn nil\n\t\t}\n\t\tnameServerAddressList := singlylinkedlist.New()\n\t\tfor _, nameServerAddress := range nameSrvAdderArr {\n\t\t\tnameServerAddressList.Add(nameServerAddress)\n\t\t}\n\t\treturn nameServerAddressList\n\t}\n\treturn nil\n}",
"func cidrToList(cidr string) ([]string, error) {\n\ttempAddresses := []string{}\n\n\t// We'll use the net library to parse it into a netmask and netaddr\n\tip, ipnet, err := net.ParseCIDR(cidr)\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\n\t// Then we'll loop through those and use our incrementer to get each ip\n\tfor ip := ip.Mask(ipnet.Mask); ipnet.Contains(ip); cidrToListInc(ip) {\n\t\ttempAddresses = append(tempAddresses, ip.String())\n\t}\n\n\treturn tempAddresses, nil\n}",
"func splitAddresses(addrs []string) (maddrs []ma.Multiaddr, pids []peer.ID, err error) {\n\n\tmaddrs = make([]ma.Multiaddr, len(addrs))\n\tpids = make([]peer.ID, len(addrs))\n\tfor i, addr := range addrs {\n\t\ta, err := ma.NewMultiaddr(path.Dir(addr))\n\t\tif err != nil {\n\t\t\treturn nil, nil, cmds.ClientError(\"invalid peer address: \" + err.Error())\n\t\t}\n\t\tid, err := peer.IDB58Decode(path.Base(addr))\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tpids[i] = id\n\t\tmaddrs[i] = a\n\t}\n\treturn\n}",
"func getIpFromAddresses(addrs []nmap.Address) (string, error) {\n\tif len(addrs) == 1 {\n\t\treturn addrs[0].Addr, nil\n\t}\n\tvar ipv6 string\n\tfor _, a := range addrs {\n\t\tif a.AddrType == \"ipv4\" {\n\t\t\treturn a.Addr, nil\n\t\t} else if a.AddrType == \"ipv6\" {\n\t\t\tipv6 = a.Addr\n\t\t}\n\t}\n\tif ipv6 == \"\" {\n\t\treturn ipv6, fmt.Errorf(\"host has no ipv4 or ipv6\")\n\t}\n\treturn ipv6, nil\n}",
"func AddressesForHost(host string) []string {\n\tss := collection.NewStringSet()\n\tif host == \"\" { // All address on machine\n\t\tif iFaces, err := net.Interfaces(); err == nil {\n\t\t\tfor _, iFace := range iFaces {\n\t\t\t\tconst interesting = net.FlagUp | net.FlagBroadcast\n\t\t\t\tif iFace.Flags&interesting == interesting {\n\t\t\t\t\tvar addrs []net.Addr\n\t\t\t\t\tif addrs, err = iFace.Addrs(); err == nil {\n\t\t\t\t\t\tfor _, addr := range addrs {\n\t\t\t\t\t\t\tvar ip net.IP\n\t\t\t\t\t\t\tswitch v := addr.(type) {\n\t\t\t\t\t\t\tcase *net.IPNet:\n\t\t\t\t\t\t\t\tip = v.IP\n\t\t\t\t\t\t\tcase *net.IPAddr:\n\t\t\t\t\t\t\t\tip = v.IP\n\t\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif ip.IsGlobalUnicast() {\n\t\t\t\t\t\t\t\tss.Add(ip.String())\n\t\t\t\t\t\t\t\tvar names []string\n\t\t\t\t\t\t\t\tif names, err = net.LookupAddr(ip.String()); err == nil {\n\t\t\t\t\t\t\t\t\tfor _, name := range names {\n\t\t\t\t\t\t\t\t\t\tif strings.HasSuffix(name, \".\") {\n\t\t\t\t\t\t\t\t\t\t\tname = name[:len(name)-1]\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\tss.Add(name)\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\tss.Add(host)\n\t\tif net.ParseIP(host) == nil {\n\t\t\tif ips, err := net.LookupIP(host); err == nil && len(ips) > 0 {\n\t\t\t\tfor _, ip := range ips {\n\t\t\t\t\tss.Add(ip.String())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tfor _, one := range []string{\"::\", \"::1\", \"127.0.0.1\"} {\n\t\tif ss.Contains(one) {\n\t\t\tdelete(ss, one)\n\t\t\tss.Add(\"localhost\")\n\t\t}\n\t}\n\taddrs := ss.Values()\n\tsort.Slice(addrs, func(i, j int) bool {\n\t\tisName1 := net.ParseIP(addrs[i]) == nil\n\t\tisName2 := net.ParseIP(addrs[j]) == nil\n\t\tif isName1 == isName2 {\n\t\t\treturn txt.NaturalLess(addrs[i], addrs[j], true)\n\t\t}\n\t\treturn isName1\n\t})\n\treturn addrs\n}",
"func LocalAddresses() (regular, loopback []string, err error) {\n\t// TODO(crawshaw): don't serve interface addresses that we are routing\n\tifaces, err := net.Interfaces()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tfor i := range ifaces {\n\t\tiface := &ifaces[i]\n\t\tif !isUp(iface) {\n\t\t\t// Down interfaces don't count\n\t\t\tcontinue\n\t\t}\n\t\tifcIsLoopback := isLoopback(iface)\n\n\t\taddrs, err := iface.Addrs()\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tfor _, a := range addrs {\n\t\t\tswitch v := a.(type) {\n\t\t\tcase *net.IPNet:\n\t\t\t\t// TODO(crawshaw): IPv6 support.\n\t\t\t\t// Easy to do here, but we need good endpoint ordering logic.\n\t\t\t\tip := v.IP.To4()\n\t\t\t\tif ip == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// TODO(apenwarr): don't special case cgNAT.\n\t\t\t\t// In the general wireguard case, it might\n\t\t\t\t// very well be something we can route to\n\t\t\t\t// directly, because both nodes are\n\t\t\t\t// behind the same CGNAT router.\n\t\t\t\tif cgNAT.Contains(ip) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif linkLocalIPv4.Contains(ip) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif ip.IsLoopback() || ifcIsLoopback {\n\t\t\t\t\tloopback = append(loopback, ip.String())\n\t\t\t\t} else {\n\t\t\t\t\tregular = append(regular, ip.String())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn regular, loopback, nil\n}",
"func expandConnectionStringIPs(db string) (string, error) {\n\treturn expandConnectionStringIPsDetail(db, net.LookupIP)\n}",
"func HexToAddress(s string) Address { return BytesToAddress(FromHex(s)) }",
"func bindStrings(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(StringsABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}",
"func _ifaddrs(fm *Frame) error {\n\taddrs, err := net.InterfaceAddrs()\n\tif err != nil {\n\t\treturn err\n\t}\n\tout := fm.ValueOutput()\n\tfor _, addr := range addrs {\n\t\terr := out.Put(addr.String())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func HexToAddress(s string) types.Address { return BytesToAddress(FromHex(s)) }",
"func localAndBroadcastAddresses() (localAddrs map[ipAddr]bool, broadcastAddrs map[ipAddr]net.IP) {\n\t// Compile list of local addresses\n\tlocalAddrs = make(map[ipAddr]bool) // list of own (non-loopback) IPv4 addresses\n\tbroadcastAddrs = make(map[ipAddr]net.IP)\n\tifaces, err := net.Interfaces()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfor _, iface := range ifaces {\n\t\taddrs, err := iface.Addrs()\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t\t// fmt.Println(\"Iface error:\", err)\n\t\t}\n\t\tfor _, addr := range addrs {\n\t\t\t// fmt.Printf(\"Addr: %T, %v\\n\", addr, addr)\n\t\t\tswitch t := addr.(type) {\n\t\t\tcase *net.IPNet:\n\t\t\t\t// fmt.Printf(\"IP: %v\\n\", t.IP.String())\n\n\t\t\t\tif (iface.Flags & net.FlagLoopback) != 0 {\n\t\t\t\t\t// fmt.Println(\"Loopback\")\n\t\t\t\t\tcontinue\n\t\t\t\t} else if (iface.Flags & net.FlagUp) == 0 {\n\t\t\t\t\t// interface not up\n\t\t\t\t\tcontinue\n\t\t\t\t} else if (iface.Flags & net.FlagBroadcast) == 0 {\n\t\t\t\t\t// doesn't support broadcast\n\t\t\t\t\tcontinue\n\t\t\t\t} else if t.IP.To4() != nil {\n\t\t\t\t\t// is an IPv4 address\n\n\t\t\t\t\tif bcastIP, err := bcastAddr(t); err == nil {\n\t\t\t\t\t\t// fmt.Println(\"The UDP broadcast address for\", t, \"is\", bcastIP)\n\t\t\t\t\t\tlocalAddrs[ipAddr(t.IP.String())] = true\n\t\t\t\t\t\tbroadcastAddrs[ipAddr(t.IP.String())] = bcastIP\n\t\t\t\t\t\tfmt.Println(\"address found: \", t.IP.To4())\n\t\t\t\t\t}\n\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\t// NOOP\n\t\t\t}\n\t\t}\n\t}\n\tif len(broadcastAddrs) < 1 {\n\t\tpanic(\"No broadcast interfaces found!\")\n\t}\n\treturn\n}",
"func getAddresses(endpoint *v1.Endpoints) []string {\n\tserverAddresses := []string{}\n\tif endpoint == nil {\n\t\treturn serverAddresses\n\t}\n\tfor _, subset := range endpoint.Subsets {\n\t\tvar port string\n\t\tif len(subset.Ports) > 0 {\n\t\t\tport = strconv.Itoa(int(subset.Ports[0].Port))\n\t\t}\n\t\tif port == \"\" {\n\t\t\tport = \"443\"\n\t\t}\n\t\tfor _, address := range subset.Addresses {\n\t\t\tserverAddresses = append(serverAddresses, net.JoinHostPort(address.IP, port))\n\t\t}\n\t}\n\treturn serverAddresses\n}",
"func parseListeners(addrs []string) ([]string, []string, error) {\n\tipv4ListenAddrs := make([]string, 0, len(addrs)*2)\n\tipv6ListenAddrs := make([]string, 0, len(addrs)*2)\n\tfor _, addr := range addrs {\n\t\thost, _, err := net.SplitHostPort(addr)\n\t\tif err != nil {\n\t\t\t// Shouldn't happen due to already being normalized.\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\t// Empty host or host of * on plan9 is both IPv4 and IPv6.\n\t\tif host == \"\" || (host == \"*\" && runtime.GOOS == \"plan9\") {\n\t\t\tipv4ListenAddrs = append(ipv4ListenAddrs, addr)\n\t\t\tipv6ListenAddrs = append(ipv6ListenAddrs, addr)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Parse the IP.\n\t\tip := net.ParseIP(host)\n\t\tif ip == nil {\n\t\t\treturn nil, nil, fmt.Errorf(\"'%s' is not a valid IP \"+\n\t\t\t\t\"address\", host)\n\t\t}\n\n\t\t// To4 returns nil when the IP is not an IPv4 address, so use\n\t\t// this determine the address type.\n\t\tif ip.To4() == nil {\n\t\t\tipv6ListenAddrs = append(ipv6ListenAddrs, addr)\n\t\t} else {\n\t\t\tipv4ListenAddrs = append(ipv4ListenAddrs, addr)\n\t\t}\n\t}\n\treturn ipv4ListenAddrs, ipv6ListenAddrs, nil\n}",
"func StringSliceToPointers(str []string) []*string {\n\tvar idsptrs []*string\n\tif len(str) > 0 {\n\t\tfor _, s := range str {\n\t\t\tfunc(st string) {\n\t\t\t\tidsptrs = append(idsptrs, &st)\n\t\t\t}(s)\n\t\t\ts = \"\"\n\t\t}\n\t\tstr = nil\n\t}\n\treturn idsptrs\n}",
"func extractUnicastIPv4Addrs(addrs []net.Addr) []string {\n\tvar ips []string\n\n\tfor _, a := range addrs {\n\t\tvar ip net.IP\n\n\t\tswitch a := a.(type) {\n\t\tcase *net.IPNet:\n\t\t\tip = a.IP\n\t\tcase *net.IPAddr:\n\t\t\tip = a.IP\n\t\t}\n\n\t\tif ip == nil || len(ip.To4()) == 0 {\n\t\t\t// Windows dataplane doesn't support IPv6 yet.\n\t\t\tcontinue\n\t\t}\n\t\tif ip.IsLoopback() {\n\t\t\t// Skip 127.0.0.1.\n\t\t\tcontinue\n\t\t}\n\t\tips = append(ips, ip.String()+\"/32\")\n\t}\n\n\treturn ips\n}",
"func AddressParserParseList(p *mail.AddressParser, list string) ([]*mail.Address, error)",
"func GetPotentialAddresses(ip string) ([]string, error) {\n\ta := net.ParseIP(ip).To4()\n\tl := []string{}\n\tif a != nil {\n\t\tfor i := 2; i < 255; i++ {\n\t\t\ta[3] = byte(i)\n\t\t\tl = append(l, a.String())\n\t\t}\n\t}\n\treturn l, nil\n}",
"func ConvertUserStrToAddress(userFAddr string) []byte {\n\tv := base58.Decode(userFAddr)\n\treturn v[2:34]\n}",
"func localIPv4s() ([]string, error) {\n\tvar ips []string\n\taddrs, err := net.InterfaceAddrs()\n\tif err != nil {\n\t\treturn ips, err\n\t}\n\n\tfor _, a := range addrs {\n\t\tif ipnet, ok := a.(*net.IPNet); ok && !ipnet.IP.IsLoopback() && ipnet.IP.To4() != nil {\n\t\t\tips = append(ips, ipnet.IP.String())\n\t\t}\n\t}\n\n\treturn ips, nil\n}",
"func ParseAndValidateIPs(ipList string) (res []net.IP, err error) {\n\t// IP list can potentially be a blank string, \"\"\n\tif len(ipList) > 0 {\n\t\tips := strings.Split(ipList, \",\")\n\t\tfor _, ip := range ips {\n\t\t\tparsedIP := net.ParseIP(ip)\n\t\t\tif parsedIP == nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Invalid IP address: %s\", ip)\n\t\t\t}\n\t\t\tres = append(res, parsedIP)\n\t\t}\n\t}\n\treturn\n}",
"func sanitizeAddrsplodedSet(public, private []ma.Multiaddr) []ma.Multiaddr {\n\ttype portAndAddr struct {\n\t\taddr ma.Multiaddr\n\t\tport int\n\t}\n\n\tprivports := make(map[int]struct{})\n\tpubaddrs := make(map[string][]portAndAddr)\n\n\tfor _, a := range private {\n\t\t_, port := addrKeyAndPort(a)\n\t\tprivports[port] = struct{}{}\n\t}\n\n\tfor _, a := range public {\n\t\tkey, port := addrKeyAndPort(a)\n\t\tpubaddrs[key] = append(pubaddrs[key], portAndAddr{addr: a, port: port})\n\t}\n\n\tvar result []ma.Multiaddr\n\tfor _, pas := range pubaddrs {\n\t\tif len(pas) == 1 {\n\t\t\t// it's not addrsploded\n\t\t\tresult = append(result, pas[0].addr)\n\t\t\tcontinue\n\t\t}\n\n\t\thaveAddr := false\n\t\tfor _, pa := range pas {\n\t\t\tif _, ok := privports[pa.port]; ok {\n\t\t\t\t// it matches a privately bound port, use it\n\t\t\t\tresult = append(result, pa.addr)\n\t\t\t\thaveAddr = true\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif pa.port == 4001 || pa.port == 4002 {\n\t\t\t\t// it's a default port, use it\n\t\t\t\tresult = append(result, pa.addr)\n\t\t\t\thaveAddr = true\n\t\t\t}\n\t\t}\n\n\t\tif !haveAddr {\n\t\t\t// we weren't able to select a port; bite the bullet and use them all\n\t\t\tfor _, pa := range pas {\n\t\t\t\tresult = append(result, pa.addr)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn result\n}",
"func (a *Address) StringSlice() []string {\n\treturn []string{0: a.Street, 1: a.Extension, 2: a.POBox,\n\t\t3: a.Locality, 4: a.Region, 5: a.PostalCode, 6: a.Country}\n}",
"func Expand(cidr string) []string {\n\n\tif strings.Contains(cidr, \":\") {\n\t\tif ips, ports, err := ExpandWithPort(cidr); err == nil {\n\t\t\tpp := []string{}\n\t\t\tranges := []string{}\n\t\t\tfor _, p := range ports {\n\t\t\t\tpp = append(pp, fmt.Sprintf(\"%d\", p))\n\t\t\t}\n\t\t\tportsString := strings.Join(pp, \",\")\n\n\t\t\tfor _, ip := range ips {\n\t\t\t\tranges = append(ranges, fmt.Sprintf(\"%s:%s\", ip, portsString))\n\t\t\t}\n\t\t\treturn ranges\n\t\t}\n\t\treturn []string{}\n\t}\n\n\tif !strings.Contains(cidr, \"/\") {\n\t\t//deal with potentially raw IP in non-CIDR format\n\t\tcidr += \"/32\"\n\t}\n\tnonCidr := strings.Split(cidr, \"/\")\n\taddress := nonCidr[0]\n\n\tif strings.Contains(address, \".\") {\n\t\t//sanity check for IP addresses\n\t\tif octets := strings.Split(address, \".\"); len(octets) == 4 {\n\t\t\tfor _, oct := range octets {\n\t\t\t\tif octn, err := strconv.Atoi(oct); err == nil {\n\t\t\t\t\tif octn > 255 { // invalid octet\n\t\t\t\t\t\treturn []string{}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\treturn []string{}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\treturn []string{}\n\t\t}\n\t}\n\n\tipAdds, err := net.LookupIP(address)\n\tif err != nil {\n\t\treturn []string{}\n\t}\n\tcombinedIPs := []string{}\n\tfor _, ipAdd := range ipAdds {\n\t\tcidr = ipAdd.To4().String() + \"/\" + nonCidr[1]\n\t\tip, network, err := net.ParseCIDR(cidr)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tsize, _ := network.Mask.Size()\n\t\tif size == 32 {\n\t\t\tcombinedIPs = append(combinedIPs, ip.String())\n\t\t\tcontinue\n\t\t}\n\n\t\t//include network and broadcast addresses in count\n\t\thostCount := 2 << uint(31-size)\n\t\tips := make([]string, hostCount)\n\t\tips[0] = network.IP.String()\n\n\t\t//network address starting point\n\t\toctets := decimalOctets(network.IP)\n\n\t\tfor i := 1; i < hostCount; i++ {\n\t\t\toctets[3]++\n\t\t\tif octets[3] > 255 {\n\t\t\t\toctets[3] = 0\n\t\t\t\toctets[2]++\n\t\t\t}\n\n\t\t\tif octets[2] > 255 {\n\t\t\t\toctets[2] = 0\n\t\t\t\toctets[1]++\n\t\t\t}\n\n\t\t\tif octets[1] >= 255 {\n\t\t\t\toctets[1] = 0\n\t\t\t\toctets[0]++\n\t\t\t}\n\t\t\tips[i] = toIP(octets)\n\t\t}\n\t\tcombinedIPs = append(combinedIPs, ips...)\n\t}\n\treturn combinedIPs\n}",
"func WithListenAddrString(addrs ...string) Option {\n\treturn func(c *Config) (err error) {\n\t\tas := make([]multiaddr.Multiaddr, len(addrs))\n\t\tfor i, s := range addrs {\n\t\t\tif as[i], err = multiaddr.NewMultiaddr(s); err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tc.addrs = as\n\t\treturn\n\t}\n}",
"func getEndpointsFromAddresses(addresses []corev1.EndpointAddress, addressType discovery.AddressType, ready bool) []discovery.Endpoint {\n\tendpoints := []discovery.Endpoint{}\n\tisIPv6AddressType := addressType == discovery.AddressTypeIPv6\n\n\tfor _, address := range addresses {\n\t\tif utilnet.IsIPv6String(address.IP) == isIPv6AddressType {\n\t\t\tendpoints = append(endpoints, endpointFromAddress(address, ready))\n\t\t}\n\t}\n\n\treturn endpoints\n}",
"func NetIPsFromIPAddresses(addresses []string) (ips []net.IP) {\n\tfor _, address := range addresses {\n\t\tif ip := net.ParseIP(address); ip != nil {\n\t\t\tips = append(ips, ip)\n\t\t}\n\t}\n\treturn\n}",
"func boundIPs(c *caddy.Controller) (ips []net.IP) {\n\tconf := dnsserver.GetConfig(c)\n\thosts := conf.ListenHosts\n\tif hosts == nil || hosts[0] == \"\" {\n\t\thosts = nil\n\t\taddrs, err := net.InterfaceAddrs()\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tfor _, addr := range addrs {\n\t\t\thosts = append(hosts, addr.String())\n\t\t}\n\t}\n\tfor _, host := range hosts {\n\t\tip, _, _ := net.ParseCIDR(host)\n\t\tip4 := ip.To4()\n\t\tif ip4 != nil && !ip4.IsLoopback() {\n\t\t\tips = append(ips, ip4)\n\t\t\tcontinue\n\t\t}\n\t\tip6 := ip.To16()\n\t\tif ip6 != nil && !ip6.IsLoopback() {\n\t\t\tips = append(ips, ip6)\n\t\t}\n\t}\n\treturn ips\n}",
"func (r *RemoteList) CopyAddrs(preferredRanges []*net.IPNet) []*udp.Addr {\n\tif r == nil {\n\t\treturn nil\n\t}\n\n\tr.Rebuild(preferredRanges)\n\n\tr.RLock()\n\tdefer r.RUnlock()\n\tc := make([]*udp.Addr, len(r.addrs))\n\tfor i, v := range r.addrs {\n\t\tc[i] = v.Copy()\n\t}\n\treturn c\n}",
"func NewAddr_List(s *capnp.Segment, sz int32) (Addr_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}, sz)\n\treturn Addr_List{l}, err\n}",
"func LocalAddresses() ([]string, error) {\n\tresult := make([]string, 0)\n\n\taddresses, err := net.InterfaceAddrs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, addr := range addresses {\n\t\tswitch v := addr.(type) {\n\t\tcase *net.IPNet:\n\t\t\tif v.IP.IsGlobalUnicast() || v.IP.IsLoopback() {\n\t\t\t\tresult = append(result, v.IP.String())\n\t\t\t}\n\t\tcase *net.IPAddr:\n\t\t\tif v.IP.IsGlobalUnicast() || v.IP.IsLoopback() {\n\t\t\t\tresult = append(result, v.IP.String())\n\t\t\t}\n\t\t}\n\t}\n\n\treturn result, nil\n}",
"func parsePeers(peersstr []string) (peers []proto.Peer, err error) {\n\tfor _, s := range peersstr {\n\t\tp, err := strconv.Atoi(s)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpeers = append(peers, proto.Peer{ID: uint64(p)})\n\t}\n\treturn\n}",
"func GetTestListOfAddresses() []common.Address {\n\taddress1 := common.HexToAddress(\"8A749D6B91C35b8a5Ce812278C73C988a97790aA\")\n\taddress2 := common.HexToAddress(\"F28B17a7D4Ab334584dF6ebfD70FA59c3527CD1e\")\n\taddress3 := common.HexToAddress(\"09E360FeD8580641CE129252417a8646709196f5\")\n\tarr := []common.Address{address1, address2, address3}\n\treturn arr\n}",
"func bindStrings(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := ParsedABI(K_Strings)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil\n}",
"func bindStrings(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := ParsedABI(K_Strings)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil\n}",
"func parsePortRanges(s string) (ranges []uint16, err error) {\n\tfor _, cp := range strings.Split(s, \",\") {\n\t\tvar lo, hi int\n\t\tdp := strings.Split(cp, \"-\")\n\t\tif lo, err = strconv.Atoi(dp[0]); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif len(dp) > 1 {\n\t\t\tif hi, err = strconv.Atoi(dp[1]); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\thi = lo\n\t\t}\n\t\tranges = append(ranges, uint16(lo), uint16(hi))\n\t}\n\treturn\n}",
"func addrStructure(redisPort []string, redisHosts []string) []string {\n\thosts := []string{}\n\tif len(redisPort) != len(redisHosts) {\n\t\tport := \"6379\"\n\t\tif len(redisPort) == 0 {\n\t\t\tlogrus.Warnf(\"REDIS_PORT not exist, Use default port:%s\", port)\n\t\t} else {\n\t\t\tport = redisPort[0]\n\t\t\tlogrus.Warnf(\"REDIS_PORT len not equal REDIS_HOST len, Use first port:%s\", port)\n\t\t}\n\t\tfor _, host := range redisHosts {\n\t\t\thost := host + \":\" + port\n\t\t\thosts = append(hosts, host)\n\t\t}\n\t} else {\n\t\tfor index, host := range redisHosts {\n\t\t\thost := host + \":\" + redisPort[index]\n\t\t\thosts = append(hosts, host)\n\t\t}\n\t}\n\tif len(hosts) == 0 {\n\t\tlogrus.Warnf(\"REDIS_PORT hosts is empty\")\n\t}\n\treturn hosts\n}",
"func (as *AddrRanges) Set(s string) error {\n\tparts := strings.Split(s, \",\")\n\tfor _, part := range parts {\n\t\tvar a AddrRange\n\t\tpart = strings.TrimSpace(part)\n\t\tif err := a.Set(part); err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t\t*as = append(*as, a)\n\t}\n\treturn nil\n}",
"func IP(s string) []string {\n\ts = stripBrackets(s)\n\tvar matches []string\n\t// We're going to check each token in the string\n\tparts := strings.Split(s, \" \")\n\tfor _, part := range parts {\n\t\tpart := strings.TrimFunc(part, TrimAddress)\n\t\tvar match string\n\t\t// Need to treat parsing CIDRs differently from IPs without lengths\n\t\tif strings.Contains(part, \"/\") {\n\t\t\t_, ip, err := net.ParseCIDR(part)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tmatch = ip.String()\n\t\t} else {\n\t\t\tip := net.ParseIP(part)\n\t\t\tif ip == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tmatch = ip.String()\n\t\t}\n\t\tmatches = append(matches, match)\n\t}\n\treturn matches\n}",
"func resolveIPsToHostnames() ([]IPHostnamesPair, error) {\n\tipAddrs, err := getLocalIPAddresses()\n\tif err != nil {\n\t\treturn []IPHostnamesPair{}, err\n\t}\n\n\t// Reverse resolve all IPs, only keep those which match to a hostname\n\thostnamePairs := []IPHostnamesPair{}\n\tfor _, ip := range ipAddrs {\n\t\tnames, err := net.LookupAddr(ip.String())\n\t\tif err == nil {\n\t\t\tpair := IPHostnamesPair{\n\t\t\t\tIp: ip,\n\t\t\t\tHostnames: names,\n\t\t\t}\n\t\t\thostnamePairs = append(hostnamePairs, pair)\n\t\t} else {\n\t\t\tlog.With(\"ip\", ip.String()).Debugln(\"No DNS results for IP:\", err)\n\t\t}\n\t}\n\treturn hostnamePairs, nil\n}",
"func (k *Kubernetes) nsAddrs(external, headless bool, zone string) []dns.RR {\n\tvar (\n\t\tsvcNames []string\n\t\tsvcIPs []net.IP\n\t\tfoundEndpoint bool\n\t)\n\n\t// Find the CoreDNS Endpoints\n\tfor _, localIP := range k.localIPs {\n\t\tendpoints := k.APIConn.EpIndexReverse(localIP.String())\n\n\t\t// Collect IPs for all Services of the Endpoints\n\t\tfor _, endpoint := range endpoints {\n\t\t\tfoundEndpoint = true\n\t\t\tsvcs := k.APIConn.SvcIndex(endpoint.Index)\n\t\t\tfor _, svc := range svcs {\n\t\t\t\tif external {\n\t\t\t\t\tsvcName := strings.Join([]string{svc.Name, svc.Namespace, zone}, \".\")\n\n\t\t\t\t\tif headless && svc.Headless() {\n\t\t\t\t\t\tfor _, s := range endpoint.Subsets {\n\t\t\t\t\t\t\tfor _, a := range s.Addresses {\n\t\t\t\t\t\t\t\tsvcNames = append(svcNames, endpointHostname(a, k.endpointNameMode)+\".\"+svcName)\n\t\t\t\t\t\t\t\tsvcIPs = append(svcIPs, net.ParseIP(a.IP))\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfor _, exIP := range svc.ExternalIPs {\n\t\t\t\t\t\t\tsvcNames = append(svcNames, svcName)\n\t\t\t\t\t\t\tsvcIPs = append(svcIPs, net.ParseIP(exIP))\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tsvcName := strings.Join([]string{svc.Name, svc.Namespace, Svc, zone}, \".\")\n\t\t\t\tif svc.Headless() {\n\t\t\t\t\t// For a headless service, use the endpoints IPs\n\t\t\t\t\tfor _, s := range endpoint.Subsets {\n\t\t\t\t\t\tfor _, a := range s.Addresses {\n\t\t\t\t\t\t\tsvcNames = append(svcNames, endpointHostname(a, k.endpointNameMode)+\".\"+svcName)\n\t\t\t\t\t\t\tsvcIPs = append(svcIPs, net.ParseIP(a.IP))\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tfor _, clusterIP := range svc.ClusterIPs {\n\t\t\t\t\t\tsvcNames = append(svcNames, svcName)\n\t\t\t\t\t\tsvcIPs = append(svcIPs, net.ParseIP(clusterIP))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// If no CoreDNS endpoints were found, use the localIPs directly\n\tif !foundEndpoint {\n\t\tsvcIPs = make([]net.IP, len(k.localIPs))\n\t\tsvcNames = make([]string, len(k.localIPs))\n\t\tfor i, localIP := range k.localIPs {\n\t\t\tsvcNames[i] = defaultNSName + zone\n\t\t\tsvcIPs[i] = localIP\n\t\t}\n\t}\n\n\t// Create an RR slice of collected IPs\n\trrs := make([]dns.RR, len(svcIPs))\n\tfor i, ip := range svcIPs {\n\t\tif ip.To4() == nil {\n\t\t\trr := new(dns.AAAA)\n\t\t\trr.Hdr.Class = dns.ClassINET\n\t\t\trr.Hdr.Rrtype = dns.TypeAAAA\n\t\t\trr.Hdr.Name = svcNames[i]\n\t\t\trr.AAAA = ip\n\t\t\trrs[i] = rr\n\t\t\tcontinue\n\t\t}\n\t\trr := new(dns.A)\n\t\trr.Hdr.Class = dns.ClassINET\n\t\trr.Hdr.Rrtype = dns.TypeA\n\t\trr.Hdr.Name = svcNames[i]\n\t\trr.A = ip\n\t\trrs[i] = rr\n\t}\n\n\treturn rrs\n}",
"func getIPsFromAddresses(ip net.IP, ipnet *net.IPNet) []net.IP {\n\tips := make([]net.IP, 0)\n\tfor ip := ip.Mask(ipnet.Mask); ipnet.Contains(ip); incrementIP(ip) {\n\t\tips = append(ips, duplicateIP(ip))\n\t}\n\n\treturn ips\n}",
"func (t *Interface) GetAddrList() ([][]byte, error) {\n\tiface, err := netlink.LinkByName(t.Name())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnladdrs, err := netlink.AddrList(iface, netlink.FAMILY_ALL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\taddrs := [][]byte{}\n\tfor _, ipn := range nladdrs {\n\t\tip := ipn.IP\n\t\tif ip.To4().To16().Equal(ip) {\n\t\t\t// it's an IPv4 address- just use the 4 bytes\n\t\t\tip = ip.To4()\n\t\t}\n\t\taddrs = append(addrs, ip)\n\t}\n\treturn addrs, nil\n}",
"func getLocalIPs() (ips []string) {\n\tips = make([]string, 0, 6)\n\n\taddrs, err := net.InterfaceAddrs()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor _, addr := range addrs {\n\t\tif ipnet, ok := addr.(*net.IPNet); ok {\n\t\t\tif ipnet.IP.To4() != nil {\n\t\t\t\tips = append(ips, ipnet.IP.String())\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}",
"func (s ZkId) NewAddrs(n int32) (Addr_List, error) {\n\tl, err := NewAddr_List(s.Struct.Segment(), n)\n\tif err != nil {\n\t\treturn Addr_List{}, err\n\t}\n\terr = s.Struct.SetPtr(1, l.List.ToPtr())\n\treturn l, err\n}",
"func FromStrings(s []string) ([]SemanticID, error) {\n\tresult := make([]SemanticID, len(s))\n\tfor i, id := range s {\n\t\tsid, err := FromString(id)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresult[i] = sid\n\t}\n\n\treturn result, nil\n}",
"func getServerBindaddrs(bindaddrList *string, options *string, transports *string) ([]pt_extras.Bindaddr, error) {\n\tvar result []pt_extras.Bindaddr\n\tvar serverBindaddr string\n\tvar serverTransports string\n\n\t// Get the list of all requested bindaddrs.\n\tif *bindaddrList != \"\" {\n\t\tserverBindaddr = *bindaddrList\n\t}\n\n\tfor _, spec := range strings.Split(serverBindaddr, \",\") {\n\t\tvar bindaddr pt_extras.Bindaddr\n\n\t\tparts := strings.SplitN(spec, \"-\", 2)\n\t\tif len(parts) != 2 {\n\t\t\treturn nil, fmt.Errorf(\"-bindaddr: %q: doesn't contain \\\"-\\\"\", spec)\n\t\t}\n\t\tbindaddr.MethodName = parts[0]\n\t\taddr, err := pt_extras.ResolveAddr(parts[1])\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"-bindaddr: %q: %s\", spec, err.Error())\n\t\t}\n\t\tbindaddr.Addr = addr\n\t\tbindaddr.Options = *options\n\t\tresult = append(result, bindaddr)\n\t}\n\n\tif transports == nil {\n\t\treturn nil, errors.New(\"must specify -transport or -transports in server mode\")\n\t} else {\n\t\tserverTransports = *transports\n\t}\n\tresult = pt_extras.FilterBindaddrs(result, strings.Split(serverTransports, \",\"))\n\tif len(result) == 0 {\n\t\tgolog.Errorf(\"no valid bindaddrs\")\n\t}\n\treturn result, nil\n}",
"func filterIPs(addrs []net.Addr) string {\n\tvar ipAddr string\n\tfor _, addr := range addrs {\n\t\tif v, ok := addr.(*net.IPNet); ok {\n\t\t\tif ip := v.IP.To4(); ip != nil {\n\t\t\t\tipAddr = v.IP.String()\n\t\t\t\tif !strings.HasPrefix(ipAddr, `169.254.`) {\n\t\t\t\t\treturn ipAddr\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn ipAddr\n}",
"func (d *JSONDiscovery) DiscoverVTGateAddrs(ctx context.Context, tags []string) ([]string, error) {\n\tspan, ctx := trace.NewSpan(ctx, \"JSONDiscovery.DiscoverVTGateAddrs\")\n\tdefer span.Finish()\n\n\tgates, err := d.discoverVTGates(ctx, tags)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\taddrs := make([]string, len(gates))\n\tfor i, gate := range gates {\n\t\taddrs[i] = gate.Hostname\n\t}\n\n\treturn addrs, nil\n}",
"func interfaceAddrs() ([]net.Addr, error) {\n\treturn []net.Addr{}, nil\n}",
"func parseNodesString(nodes string, proto string) (parsed map[string]string) {\n\tvar nodeContactLen int\n\tif proto == \"udp4\" {\n\t\tnodeContactLen = v4nodeContactLen\n\t} else if proto == \"udp6\" {\n\t\tnodeContactLen = v6nodeContactLen\n\t} else {\n\t\treturn\n\t}\n\tparsed = make(map[string]string)\n\tif len(nodes)%nodeContactLen > 0 {\n\t\tlogger.Infof(\"DHT: len(NodeString) = %d, INVALID LENGTH, should be a multiple of %d\", len(nodes), nodeContactLen)\n\t\tlogger.Infof(\"%T %#v\\n\", nodes, nodes)\n\t\treturn\n\t} else {\n\t\tlogger.Infof(\"DHT: len(NodeString) = %d, had %d nodes, nodeContactLen=%d\\n\", len(nodes), len(nodes)/nodeContactLen, nodeContactLen)\n\t}\n\tfor i := 0; i < len(nodes); i += nodeContactLen {\n\t\tid := nodes[i : i+nodeIdLen]\n\t\taddress := nettools.BinaryToDottedPort(nodes[i+nodeIdLen : i+nodeContactLen])\n\t\tparsed[id] = address\n\t}\n\treturn\n\n}",
"func GetIpAddress(s string) (result []string) {\n\tlist := make([]string, 0)\n\tif len(s) < 4 || len(s) > 12 {\n\t\treturn\n\t}\n\thelper(&result, &list, s, 0)\n\treturn\n}",
"func TestInitAddrs(t *testing.T) {\n\n\tfor _, tc := range addrTestCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\n\t\t\tvar tr transport.Transport\n\t\t\tvar addrs []string\n\n\t\t\tfor _, addr := range tc.addrs {\n\t\t\t\taddrs = append(addrs, addr)\n\t\t\t}\n\n\t\t\tswitch tc.name {\n\t\t\tcase \"transportOption\":\n\t\t\t\t// we know that there are just two addrs in the dict\n\t\t\t\ttr = NewTransport(transport.Addrs(addrs[0], addrs[1]))\n\t\t\tcase \"natsOption\":\n\t\t\t\tnopts := nats.GetDefaultOptions()\n\t\t\t\tnopts.Servers = addrs\n\t\t\t\ttr = NewTransport(Options(nopts))\n\t\t\tcase \"default\":\n\t\t\t\ttr = NewTransport()\n\t\t\t}\n\n\t\t\tntport, ok := tr.(*ntport)\n\t\t\tif !ok {\n\t\t\t\tt.Fatal(\"Expected broker to be of types *nbroker\")\n\t\t\t}\n\t\t\t// check if the same amount of addrs we set has actually been set\n\t\t\tif len(ntport.addrs) != len(tc.addrs) {\n\t\t\t\tt.Errorf(\"Expected Addr count = %d, Actual Addr count = %d\",\n\t\t\t\t\tlen(ntport.addrs), len(tc.addrs))\n\t\t\t}\n\n\t\t\tfor _, addr := range ntport.addrs {\n\t\t\t\t_, ok := tc.addrs[addr]\n\t\t\t\tif !ok {\n\t\t\t\t\tt.Errorf(\"Expected '%s' has not been set\", addr)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}",
"func ListAddresses(q IPTypeQuery) ([]string, error) {\n\tnames, err := ListInterfaces()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed toget interface list: %s\", err)\n\t}\n\tlist := []string{}\n\tfor _, n := range names {\n\t\taddrs, err := GetAddress(n, q)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to get address for %s: %s\", n, err)\n\t\t}\n\t\tlist = append(list, addrs...)\n\t}\n\treturn list, nil\n}",
"func Addrs(addrs []string) ([]*net.TCPAddr, error) {\n\tnetAddrs := make([]*net.TCPAddr, 0, len(addrs))\n\tnErrs := 0\n\tfor _, a := range addrs {\n\t\tnetAddr, err := net.ResolveTCPAddr(\"tcp4\", a)\n\t\tif err != nil {\n\t\t\tnErrs++\n\t\t\tif nErrs == len(addrs) {\n\t\t\t\t// bail if none of the addrs could be parsed\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tnetAddrs = append(netAddrs, netAddr)\n\t}\n\treturn netAddrs, nil\n}",
"func encodeAddresses(addr []common.Address) []byte {\n\tvar hexstrings []string\n\tfor _, a := range addr {\n\t\thexstrings = append(hexstrings, a.Hex())\n\t}\n\treturn []byte(strings.Join(hexstrings, \",\"))\n}",
"func getFirstAddressOf(names []string, logger log.Logger) (string, error) {\n\tvar ipAddr string\n\tfor _, name := range names {\n\t\tinf, err := net.InterfaceByName(name)\n\t\tif err != nil {\n\t\t\tlevel.Warn(logger).Log(\"msg\", \"error getting interface\", \"inf\", name, \"err\", err)\n\t\t\tcontinue\n\t\t}\n\t\taddrs, err := inf.Addrs()\n\t\tif err != nil {\n\t\t\tlevel.Warn(logger).Log(\"msg\", \"error getting addresses for interface\", \"inf\", name, \"err\", err)\n\t\t\tcontinue\n\t\t}\n\t\tif len(addrs) <= 0 {\n\t\t\tlevel.Warn(logger).Log(\"msg\", \"no addresses found for interface\", \"inf\", name, \"err\", err)\n\t\t\tcontinue\n\t\t}\n\t\tif ip := filterIPs(addrs); ip != \"\" {\n\t\t\tipAddr = ip\n\t\t}\n\t\tif strings.HasPrefix(ipAddr, `169.254.`) || ipAddr == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\treturn ipAddr, nil\n\t}\n\tif ipAddr == \"\" {\n\t\treturn \"\", fmt.Errorf(\"No address found for %s\", names)\n\t}\n\tif strings.HasPrefix(ipAddr, `169.254.`) {\n\t\tlevel.Warn(logger).Log(\"msg\", \"using automatic private ip\", \"address\", ipAddr)\n\t}\n\treturn ipAddr, nil\n}",
"func sliceToETHADDR(s []byte) ETHAddr {\n\tvar e ETHAddr\n\te[0] = s[0]\n\te[1] = s[1]\n\te[2] = s[2]\n\te[3] = s[3]\n\te[4] = s[4]\n\te[5] = s[5]\n\treturn e\n}",
"func IPListWriteFromCIDR(cidrStr string) error {\n\t_, ipnet, err := net.ParseCIDR(cidrStr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmask := binary.BigEndian.Uint32(ipnet.Mask)\n\tstart := binary.BigEndian.Uint32(ipnet.IP)\n\tend := (start & mask) | (mask ^ 0xffffffff)\n\n\tfor i := start; i <= end; i++ {\n\t\tip := make(net.IP, 4)\n\t\tbinary.BigEndian.PutUint32(ip, i)\n\t\tfmt.Println(ip)\n\t}\n\n\treturn nil\n}",
"func mainFunction(start, end string, count int) ([]string, error) {\n\tallAddr := make([]string, count)\n\tsplitStartAddr := strings.Split(start, \".\")\n\ta, err := strconv.Atoi(splitStartAddr[0])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb, err := strconv.Atoi(splitStartAddr[1])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc, err := strconv.Atoi(splitStartAddr[2])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\td, err := strconv.Atoi(splitStartAddr[3])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor i:=0; i<count; i++ {\n\t\tif d > 255 {\n\t\t\td = 0\n\t\t\tc++\n\t\t}\n\t\tif c > 255 {\n\t\t\tc = 0\n\t\t\tb++\n\t\t}\n\t\tif b > 255 {\n\t\t\tb = 0\n\t\t\ta++\n\t\t}\n\t\tif a > 255 {\n\t\t\treturn nil, errors.New(\"ip address is not exist\")\n\t\t}\n\n\t\ttemp := make([]string, 4)\n\t\ttemp[0] = strconv.Itoa(a)\n\t\ttemp[1] = strconv.Itoa(b)\n\t\ttemp[2] = strconv.Itoa(c)\n\t\ttemp[3] = strconv.Itoa(d)\n\n\t\tallAddr[i] = strings.Join(temp, \".\")\n\t\td++\n\t}\n\tcount--\n\tfmt.Println(\"test: \")\n\tfmt.Println(allAddr)\n\tif allAddr[count] != end {\n\t\treturn nil, errors.New(\"fail\")\n\t}\n\n\treturn allAddr, nil\n}",
"func FromStrings(targetStrings ...string) []Target {\n\ttargets := make([]Target, len(targetStrings))\n\tfor i := 0; i < len(targetStrings); i++ {\n\t\ttargets[i] = FromString(targetStrings[i])\n\t}\n\treturn targets\n}"
] | [
"0.71821207",
"0.7044454",
"0.6832384",
"0.6765927",
"0.6733926",
"0.66150105",
"0.6613743",
"0.6550636",
"0.6482139",
"0.6216898",
"0.6203558",
"0.6178492",
"0.6171338",
"0.6144829",
"0.60689884",
"0.6033101",
"0.59134465",
"0.5852022",
"0.58166736",
"0.5807565",
"0.57831806",
"0.5723382",
"0.5691205",
"0.5686441",
"0.5680185",
"0.5674636",
"0.5616643",
"0.5614539",
"0.5610448",
"0.5602714",
"0.5585674",
"0.5574909",
"0.5570933",
"0.5555001",
"0.55398804",
"0.5532985",
"0.5513331",
"0.5492079",
"0.5482563",
"0.5476277",
"0.5476208",
"0.5466791",
"0.5460808",
"0.54594314",
"0.54342425",
"0.5433407",
"0.5418974",
"0.5408797",
"0.54056466",
"0.5403228",
"0.5399418",
"0.53939927",
"0.5390491",
"0.538967",
"0.5386394",
"0.53788924",
"0.53665453",
"0.5360904",
"0.53524613",
"0.535035",
"0.534953",
"0.5347648",
"0.5338467",
"0.53371537",
"0.5336034",
"0.5327664",
"0.53255844",
"0.53200704",
"0.5304706",
"0.530011",
"0.5274456",
"0.5273689",
"0.527008",
"0.527008",
"0.5266012",
"0.52641046",
"0.52637786",
"0.52619946",
"0.52474254",
"0.5242895",
"0.5240588",
"0.5237074",
"0.52352244",
"0.52279097",
"0.5223862",
"0.5210084",
"0.5203819",
"0.519678",
"0.5195361",
"0.5188586",
"0.5184774",
"0.51759875",
"0.5170957",
"0.5166411",
"0.5151446",
"0.5136541",
"0.5121645",
"0.511896",
"0.5113631",
"0.5103202"
] | 0.69718254 | 2 |
FileExists checks whether a file exists at a given path | func FileExists(fp string) bool {
_, err := os.Stat(fp)
if err != nil && os.IsNotExist(err) {
return false
}
return true
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func fileExists(path string) bool {\n _, err := os.Stat(path)\n return err == nil\n}",
"func exists(filePath string) (exists bool) {\n _,err := os.Stat(filePath)\n if err != nil {\n exists = false\n } else {\n exists = true\n }\n return\n}",
"func fileExists(path string) bool {\n\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func fileExists(path string) bool {\n\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn true\n}",
"func fileExists(path string) bool {\n\t_, err := os.Stat(path)\n\treturn err == nil\n}",
"func fileExists(path string) bool {\n\tif _, err := os.Stat(path); err == nil {\n\t\treturn true\n\t}\n\treturn false\n}",
"func fileExists(path string) bool {\n\tif _, err := os.Stat(path); err == nil {\n\t\treturn true\n\t}\n\treturn false\n}",
"func fileExists(path string) bool {\n\tif _, err := os.Stat(path); err == nil {\n\t\treturn true\n\t}\n\treturn false\n}",
"func fileExists(path string) bool {\n\tif _, err := os.Stat(path); err == nil {\n\t\treturn true\n\t}\n\treturn false\n}",
"func fileExists(path string) bool {\n\tif _, err := os.Stat(path); !os.IsNotExist(err) {\n\t\treturn true\n\t}\n\treturn false\n}",
"func fileExists(path string) bool {\n\tif _, err := os.Stat(path); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func FileExists(path string) bool {\n\tif _, err := os.Stat(path); os.IsNotExist(err) {\n return false\n\t}\n\treturn true\n}",
"func FileExists(path string) bool {\n\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func FileExists(path string) bool {\n\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn true\n}",
"func FileExists(path string) bool {\n\tif _, err := os.Stat(path); err != nil {\n\t\treturn false\n\t}\n\treturn true\n}",
"func fileExists(path string) (bool, error) {\n\tif _, err := os.Stat(path); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, err\n\t}\n\treturn true, nil\n}",
"func fileExists(path string) (bool, error) {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\treturn true, err\n}",
"func fileExists(path string) (bool, error) {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\treturn true, err\n}",
"func FileExists(path string) bool {\n\t_, err := os.Stat(path)\n\tif err != nil && os.IsNotExist(err) {\n\t\treturn false\n\n\t}\n\treturn true\n}",
"func fileExists(path string) error {\n\n\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\treturn errors.New(\"File does not exist\")\n\t} else if err != nil {\n\t\treturn errors.New(\"File exists\")\n\t}\n\treturn nil\n}",
"func exists(filePath string) bool {\n\t_, err := os.Stat(filePath)\n\treturn err == nil\n}",
"func FileExists(path string) bool {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn false\n}",
"func fileExists(path string) (bool, error) {\n\t_, err := os.Stat(path)\n\tif err == nil || !os.IsNotExist(err) {\n\t\treturn true, err\n\t}\n\treturn false, nil\n}",
"func FileExists(path string) bool {\n\t_, err := os.Stat(path)\n\treturn err == nil\n}",
"func FileExists(path string) bool {\n\t_, err := os.Stat(path)\n\treturn err == nil\n}",
"func FileExists(path string) bool {\n\t_, err := os.Stat(path)\n\treturn err == nil\n}",
"func PathExists(path string) bool {\n _, err := os.Stat(path)\n if err == nil {\n return true\n }\n return false\n}",
"func pathExists(path string) (exists bool) {\n\texists = true\n\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\texists = false\n\t}\n\treturn\n}",
"func FileExists(path string) bool {\n\tvar _, err = os.Stat(path)\n\n\treturn !os.IsNotExist(err)\n}",
"func FileExists(path string) bool {\n\t_, err := os.Stat(path)\n\treturn !os.IsNotExist(err)\n}",
"func FileExists(path string) bool {\n\t_, err := os.Stat(path)\n\treturn !os.IsNotExist(err)\n}",
"func FileExists(path string) bool {\n\t_, err := os.Stat(path)\n\treturn !os.IsNotExist(err)\n}",
"func exists(path string) bool {\n\t_, err := os.Stat(path)\n\tif err != nil {\n\t\tif os.IsExist(err) {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\treturn true\n}",
"func exists(path string) (bool, error) {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\treturn false, err\n}",
"func exists(path string) (bool, error) {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\treturn false, err\n}",
"func exists(path string) (bool, error) {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\treturn false, err\n}",
"func exists(path string) (bool, error) {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\treturn false, err\n}",
"func exists(path string) (bool, error) {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\treturn false, err\n}",
"func exists(path string) bool {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn false\n}",
"func exists(path string) bool {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn false\n}",
"func exists(path string) (bool) {\n\t_, err := os.Stat(path)\n\tif err == nil { return true }\n\tif os.IsNotExist(err) {return false}\n\treturn true\n}",
"func exists(path string) (bool) {\n\t_, err := os.Stat(path)\n\tif err == nil { return true }\n\tif os.IsNotExist(err) {return false}\n\treturn true\n}",
"func exists(path string) (bool) {\n\t_, err := os.Stat(path)\n\tif err == nil { return true }\n\tif os.IsNotExist(err) {return false}\n\treturn true\n}",
"func exists(path string) bool {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn true\n}",
"func exists(path string) bool {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn true\n}",
"func exists(path string) (bool, error) {\n\tif path == \"\" {\n\t\treturn false, nil\n\t}\n\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\tif !os.IsNotExist(err) {\n\t\treturn false, err\n\t}\n\n\treturn false, nil\n}",
"func FileExists(path string) (bool, os.FileInfo) {\n fileInfo, err := os.Stat(path)\n if err != nil {\n return os.IsExist(err), fileInfo\n }\n\n return true, fileInfo\n}",
"func exists(path string) bool {\n\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn true\n}",
"func FileExists(fs fsa.FileSystem, path string) bool {\n\t_, err := fs.Lstat(path)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn true\n}",
"func ExistsFile(path string) bool {\n\tif _, err := os.Stat(path); err != nil || os.IsNotExist(err) {\n\t\treturn false\n\t} else {\n\t\treturn true\n\t}\n}",
"func exists(path string) (bool, error) {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\treturn true, err\n}",
"func exists(path string) (bool, error) {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\treturn true, err\n}",
"func exists(path string) (bool, error) {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\treturn true, err\n}",
"func exists(path string) (bool, error) {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\treturn true, err\n}",
"func exists(path string) bool {\n\t_, err := os.Stat(path)\n\treturn !os.IsNotExist(err)\n}",
"func exists(path string) bool {\n\t_, err := os.Stat(path)\n\treturn !os.IsNotExist(err)\n}",
"func exists(path string) bool {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false\n\t}\n\tpanic(err)\n}",
"func exists(path string) (bool, error) {\n\t_, err := os.Stat(path)\n\tif err == nil { return true, nil }\n\tif os.IsNotExist(err) { return false, nil }\n\treturn true, err\n}",
"func (f *file) pathIsExists(path string) bool {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn false\n}",
"func pathExists(path string) bool {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn false\n}",
"func pathExists(path string) bool {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn false\n}",
"func fileExists(path string) (bool, error) {\n\t_, err := os.Stat(path)\n\t// If there is no error, the file definitely exists.\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\n\t// If the error matches fs.ErrNotExist, the file definitely does not exist.\n\tif errors.Is(err, fs.ErrNotExist) {\n\t\treturn false, nil\n\t}\n\n\t// An unexpected error occurred.\n\treturn false, fmt.Errorf(\"cannot stat file: %w\", err)\n}",
"func FileExists(path string) bool {\n\te, err := fileExists(path)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn e\n}",
"func Exists(path string) bool {\n _, err := os.Stat(path)\n if err == nil { return true }\n if os.IsNotExist(err) { return false }\n return false\n}",
"func fileExists(name string) bool {\r\n\tif _, err := os.Stat(name); err != nil {\r\n\t\tif os.IsNotExist(err) {\r\n\t\t\treturn false\r\n\t\t}\r\n\t}\r\n\treturn true\r\n}",
"func Exists(path string) error {\n\t_, err := os.Stat(realPath(path))\n\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\tif os.IsNotExist(err) {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func FileExists(path string) (bool, error) {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\treturn true, err\n}",
"func FileExists(path string) bool {\n\tif fi, err := os.Stat(path); err == nil {\n\t\tif fi.Mode().IsRegular() {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func pathExists(path string) (bool, error) {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\treturn true, err\n}",
"func pathExists(path string) (bool, error) {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\treturn true, err\n}",
"func pathExists(path string) (bool, error) {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\treturn false, err\n}",
"func pathExists(path string) (bool, error) {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\treturn false, err\n}",
"func pathExists(path string) (bool, error) {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\treturn false, err\n}",
"func Exists(fPath string) bool {\n\t_, err := os.Stat(fPath)\n\treturn !os.IsNotExist(err) // err would be not-exists\n}",
"func exists(f string) (bool, error) {\n\t_, err := os.Stat(f)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, fmt.Errorf(\"cannot get stats for path `%s`: %v\", f, err)\n\t}\n\treturn true, nil\n}",
"func FileExists(fpath string) bool {\n\t_, err := os.Stat(fpath)\n\treturn err == nil\n}",
"func FileExist(path string) bool {\n\treturn util.FileExist(path)\n}",
"func FileExist(path string) bool {\n\treturn util.FileExist(path)\n}",
"func Exists(fname string) bool {\n if _, err := os.Stat(fname); os.IsNotExist(err) {\n return false\n }\n return true\n}",
"func FileExists(path string) (bool, error) {\n\tpi, err := pathExists(path)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif pi.Exists && pi.IsDir {\n\t\treturn false, ex.New(\"Path exists but is a directory\", ex.OptMessagef(\"Path: %q\", path))\n\t}\n\n\treturn pi.Exists, nil\n}",
"func fileExists(name string) bool {\n\tif _, err := os.Stat(name); err == nil {\n\t\treturn true\n\t}\n\treturn false\n}",
"func exists(path string) bool {\n\tif _, err := os.Stat(path); err != nil {\n\t\treturn os.IsExist(err)\n\t}\n\treturn true\n}",
"func (fs *FileSystem) FileExists(path string) bool {\n\tif fi, err := os.Stat(path); err == nil {\n\t\tif fi.Mode().IsRegular() {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func IsFileExists(filePath string) bool {\n\tif _, err := os.Stat(filePath); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func PathExists(path string) (bool, error) {\r\n\tif _, err := os.Stat(path); err != nil {\r\n\t\tif os.IsNotExist(err) {\r\n\t\t\treturn false, nil\r\n\t\t}\r\n\t\treturn true, err\r\n\t}\r\n\treturn true, nil\r\n}",
"func exists(path string) bool {\n\t_, err := os.Stat(path)\n\treturn err == nil || os.IsExist(err)\n}",
"func IsFileExists(filepath string) bool {\n\t_, err := os.Stat(filepath)\n\tif err == nil {\n\t\treturn true\n\t}\n\treturn os.IsExist(err)\n}",
"func fileExists(name string) bool {\n\tif _, err := os.Stat(name); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func fileExists(name string) bool {\n\tif _, err := os.Stat(name); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func fileExists(name string) bool {\n\tif _, err := os.Stat(name); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func PathExists(path string) bool {\n\t// note: the err is either IsNotExist or something else\n\t// if it's something else, you have bigger issues...\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true\n\t}\n\treturn false\n}",
"func PathExists(path string) bool {\n\tif path == \"\" {\n\t\treturn false\n\t}\n\n\tif _, err := os.Stat(path); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (f FileManager) Exists(path string) bool {\n\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func Exists(path string) bool {\n\t_, err := os.Stat(path) //os.Stat获取文件信息\n\tif err != nil && os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn true\n}",
"func Exists(filename string) (bool, error) {\n _, err := os.Stat(filename)\n if err == nil {\n return true, nil\n }\n if os.IsNotExist(err) {\n return false, nil\n }\n var mu bool\n return mu, err\n}",
"func fileExists(filePath string) bool {\n\tif _, err := os.Stat(filePath); os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn true\n}",
"func fileExists(name string) bool {\n\t_, err := os.Stat(name)\n\treturn !os.IsNotExist(err)\n}",
"func pathExists(path string) bool {\n\t_, err := os.Stat(path)\n\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn os.IsNotExist(err)\n}",
"func fileExists(filePath string) bool {\n\tret := true\n\n\t_, err := os.Open(filePath)\n\tif err != nil {\n\t\tret = false\n\t}\n\treturn ret\n}",
"func FileExists(filePath string) bool {\n\t_, err := os.Stat(filePath)\n\treturn err==nil\n}",
"func PathExists(path string) bool {\n\t_, err := os.Stat(path)\n\treturn err == nil\n}"
] | [
"0.78745645",
"0.7857279",
"0.7842963",
"0.7828053",
"0.7789463",
"0.7785036",
"0.7785036",
"0.7785036",
"0.7785036",
"0.77773625",
"0.777536",
"0.77659947",
"0.7732012",
"0.7717755",
"0.7694575",
"0.7674762",
"0.7639756",
"0.7639756",
"0.7629891",
"0.76086295",
"0.76050866",
"0.7604056",
"0.7599995",
"0.7594714",
"0.7594714",
"0.7594714",
"0.7566832",
"0.75660473",
"0.756409",
"0.75375223",
"0.75375223",
"0.75375223",
"0.7534293",
"0.7530356",
"0.7530356",
"0.7530356",
"0.7530356",
"0.7530356",
"0.7527414",
"0.7527414",
"0.7522653",
"0.7522653",
"0.7522653",
"0.7521739",
"0.7521739",
"0.7520804",
"0.7517396",
"0.75139534",
"0.7510578",
"0.7508888",
"0.75065744",
"0.75065744",
"0.75065744",
"0.75065744",
"0.7503817",
"0.7503817",
"0.74954987",
"0.7489747",
"0.74743354",
"0.7464952",
"0.7464952",
"0.74649",
"0.7460192",
"0.74570036",
"0.74392146",
"0.74287593",
"0.74263376",
"0.74229395",
"0.7419791",
"0.7419791",
"0.7412796",
"0.7412796",
"0.7412796",
"0.7410142",
"0.73873085",
"0.7384365",
"0.7376371",
"0.7376371",
"0.73714626",
"0.7365789",
"0.73650044",
"0.7356476",
"0.73564065",
"0.7354985",
"0.7348347",
"0.7342873",
"0.7330352",
"0.7325286",
"0.7325286",
"0.7325286",
"0.7323606",
"0.7321967",
"0.7321571",
"0.73112327",
"0.7304882",
"0.7300263",
"0.7296397",
"0.72936136",
"0.72931707",
"0.7290793",
"0.7287333"
] | 0.0 | -1 |
ReadAtMost reads up to limit bytes from r, and reports an error when limit bytes are read. | func ReadAtMost(r io.Reader, limit int64) ([]byte, error) {
limitedReader := &io.LimitedReader{R: r, N: limit}
data, err := io.ReadAll(limitedReader)
if err != nil {
return data, err
}
if limitedReader.N <= 0 {
return data, ErrLimitReached
}
return data, nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (c Conn) LimitedRead(b []byte) (int, error) {\n\tr := io.LimitReader(c.Conn, c.maxReadBuffer)\n\treturn r.Read(b)\n}",
"func (rr *Reader) ReadSizeWithLimit(limit uint32) int {\n\tif rr.Err != nil {\n\t\treturn 0\n\t}\n\tvar size32 uint32\n\tsize32, rr.Err = size32Decode(func() (byte, error) {\n\t\treturn rr.ReadByte(), rr.Err\n\t})\n\tif size32 > limit && rr.Err == nil {\n\t\trr.Err = errors.New(\"read size limit overflow\")\n\t\treturn 0\n\t}\n\treturn int(size32)\n}",
"func isReadLimitReached(bytesLoaded int64, linesLoaded int64, logFilePosition string) bool {\n\treturn (logFilePosition == logs.Beginning && bytesLoaded >= byteReadLimit) ||\n\t\t(logFilePosition == logs.End && linesLoaded >= lineReadLimit)\n}",
"func readFull(r io.Reader, buf []byte) (n int, err error) {\n\tfor n < len(buf) && err == nil {\n\t\tvar nn int\n\t\tnn, err = r.Read(buf[n:])\n\t\tn += nn\n\t}\n\tif n == len(buf) {\n\t\terr = nil\n\t}\n\treturn\n}",
"func (s *Reader) Read(p []byte) (int, error) {\n\tlimiter := s.getRateLimit()\n\tif limiter == nil {\n\t\treturn s.r.Read(p)\n\t}\n\tn, err := s.r.Read(p)\n\tif err != nil {\n\t\treturn n, err\n\t}\n\t// log.Printf(\"read: %d\", n)\n\tif err := limiter.WaitN(s.ctx, n); err != nil {\n\t\treturn n, err\n\t}\n\treturn n, nil\n}",
"func (self File) TailBytes(limitSize int) ([]byte, error) {\n\tvar limitBytes []byte\n\tfile := self.Open()\n\n\treadBytes, err := io.ReadAtLeast(file, limitBytes, limitSize)\n\tif readBytes != limitSize {\n\t\treturn limitBytes, fmt.Errorf(\"error: failed to complete read: read \", readBytes, \" out of \", limitSize, \"bytes\")\n\t} else {\n\t\treturn limitBytes, err\n\t}\n}",
"func (s *Stream) willRead(n uint64) error {\n\ts.kind = -1 // rearm / re-initialize Kind\n\tif len(s.stack) > 0 {\n\t\ttos := s.stack[len(s.stack)-1]\n\t\t// read size cannot greater than the size of the list\n\t\tif n > tos.size-tos.pos {\n\t\t\treturn ErrElemTooLarge\n\t\t}\n\t\t// change the list position\n\t\ts.stack[len(s.stack)-1].pos += n\n\t}\n\tif s.limited {\n\n\t\tif n > s.remaining {\n\t\t\treturn ErrValueTooLarge\n\t\t}\n\t\ts.remaining -= n\n\t}\n\treturn nil\n}",
"func limit(n int64) int {\n\tif n < 0 || maxio < n {\n\t\tFatal(\"bad io size:\", n)\n\t}\n\treturn int(n)\n}",
"func (r *LimiterReader) Read(p []byte) (int, error) {\n\ttc := time.Now()\n\twd, abc := r.lim.request(tc, len(p))\n\tif 0 < wd {\n\t\ttimer := time.NewTimer(wd)\n\t\tselect {\n\t\tcase <-timer.C:\n\t\tcase <-r.closedChan:\n\t\t\tif !timer.Stop() {\n\t\t\t\t<-timer.C\n\t\t\t}\n\t\t\treturn 0, ErrClosed\n\t\t}\n\t}\n\tn, err := r.rd.Read(p[:abc])\n\tif n < abc {\n\t\tr.lim.refund(abc - n)\n\t}\n\treturn n, err\n}",
"func (r *Reader) Read(p []byte) (written int, err error) {\n\tif r.eof {\n\t\terr = io.EOF\n\t\treturn\n\t}\n\n\tvar n int\n\tvar lim int\n\tfor written < len(p) && err == nil {\n\n\t\tr.limitedM.RLock()\n\t\tisLimited := r.limited\n\t\tr.limitedM.RUnlock()\n\n\t\tif isLimited {\n\n\t\t\tr.timeoutM.Lock()\n\t\t\ttimeLimit := r.timeout\n\t\t\tr.timeoutM.Unlock()\n\n\t\t\t//TODO consolidate two cases if possible. Dynamic select via reflection?\n\t\t\tif timeLimit > 0 {\n\t\t\t\tselect {\n\t\t\t\tcase <-time.After(timeLimit):\n\t\t\t\t\terr = ErrTimeoutExceeded\n\t\t\t\t\treturn\n\t\t\t\tcase lim = <-r.rate:\n\t\t\t\tdefault:\n\t\t\t\t\tif written > 0 {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tlim = <-r.rate\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tselect {\n\t\t\t\tcase lim = <-r.rate:\n\t\t\t\tdefault:\n\t\t\t\t\tif written > 0 {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tlim = <-r.rate\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tlim = len(p[written:])\n\t\t}\n\n\t\tif lim > len(p[written:]) {\n\t\t\tlim = len(p[written:])\n\t\t}\n\n\t\tn, err = r.r.Read(p[written:][:lim])\n\t\twritten += n\n\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tr.eof = true\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}",
"func readFull(r io.Reader, buf []byte) (int, error) {\n\tvar n int\n\tvar err error\n\tfor n < len(buf) && err == nil {\n\t\tvar nn int\n\t\tnn, err = r.Read(buf[n:])\n\t\tn += nn\n\t}\n\tif n == len(buf) {\n\t\treturn n, nil\n\t}\n\tif err == io.EOF {\n\t\treturn n, io.ErrUnexpectedEOF\n\t}\n\treturn n, err\n}",
"func (alr *adjustableLimitedReader) Read(p []byte) (n int, err error) {\n\tn, err = alr.R.Read(p)\n\tif err == io.EOF && alr.R.N <= 0 {\n\t\t// return our custom error since io.Reader returns EOF\n\t\terr = LineLimitExceeded\n\t}\n\treturn\n}",
"func (r *Reader) ReadFull(n int) ([]byte, error) {\n\tunreadBytes := r.unreadBytes()\n\tif unreadBytes >= n {\n\t\tresult := r.buf[r.r : r.r+n]\n\t\tr.r += n\n\t\treturn result, nil\n\t}\n\n\tneedToRead := n - unreadBytes\n\tif r.capLeft() >= needToRead {\n\t\t// enough room to Read\n\t\tif err := r.readAtLeast(needToRead); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresult := r.buf[r.r : r.r+n]\n\t\tr.r += n\n\t\treturn result, nil\n\t}\n\n\t// not enough room\n\t// check if buf is large enough\n\tif n > len(r.buf) {\n\t\tif cap(r.buf) == 0 {\n\t\t\treturn nil, ErrBufReaderAlreadyClosed\n\t\t}\n\n\t\t// make a larger buf\n\t\tnewBuf := slabPool.Alloc(n + 128)\n\t\tr.w = copy(newBuf, r.buf[r.r:r.w])\n\t\tr.r = 0\n\t\tslabPool.Free(r.buf)\n\t\tr.buf = newBuf\n\t} else {\n\t\t// enough room, shift existing data to left\n\t\tr.w = copy(r.buf, r.buf[r.r:r.w])\n\t\tr.r = 0\n\t}\n\n\tif err := r.readAtLeast(needToRead); err != nil {\n\t\treturn nil, err\n\t}\n\n\tresult := r.buf[r.r : r.r+n]\n\tr.r += n\n\treturn result, nil\n}",
"func (c *LimitedConnection) Read(b []byte) (read int, err error) {\n\treturn c.rateLimitLoop(&c.readNotBefore, &c.readDeadline, c.inner.Read, b)\n}",
"func read(r io.Reader) ([]byte, error) {\n\tvar data []byte\n\t// defer r.Close()\n\tbufSize := 1024 * 10\n\tbuf := make([]byte, bufSize) //一次读取多少个字节\n\tbfRd := bufio.NewReader(r)\n\tfor {\n\t\tn, err := bfRd.Read(buf)\n\t\tdata = append(data, buf[:n]...)\n\t\tif err != nil { //遇到任何错误立即返回,并忽略EOF错误信息\n\t\t\tif err == io.EOF {\n\t\t\t\treturn data, nil\n\t\t\t}\n\t\t\treturn data, err\n\t\t}\n\t}\n\treturn data, nil\n}",
"func (c *conn) Read(b []byte) (int, error) {\n\tc.ronce.Do(c.sleepLatency)\n\n\tn, err := c.rb.FillThrottle(func(remaining int64) (int64, error) {\n\t\tmax := remaining\n\t\tif l := int64(len(b)); max > l {\n\t\t\tmax = l\n\t\t}\n\n\t\tn, err := c.Conn.Read(b[:max])\n\t\treturn int64(n), err\n\t})\n\tif err != nil && err != io.EOF {\n\t\tlog.Errorf(\"trafficshape: error on throttled read: %v\", err)\n\t}\n\n\treturn int(n), err\n}",
"func (r *objReader) readFull(b []byte) error {\n\tif r.err != nil {\n\t\treturn r.err\n\t}\n\tif r.offset+int64(len(b)) > r.limit {\n\t\treturn r.error(io.ErrUnexpectedEOF)\n\t}\n\tn, err := io.ReadFull(r.b, b)\n\tr.offset += int64(n)\n\tif err != nil {\n\t\treturn r.error(err)\n\t}\n\treturn nil\n}",
"func (r *objReader) readFull(b []byte) error {\n\tif r.err != nil {\n\t\treturn r.err\n\t}\n\tif r.offset+int64(len(b)) > r.limit {\n\t\treturn r.error(io.ErrUnexpectedEOF)\n\t}\n\tn, err := io.ReadFull(r.b, b)\n\tr.offset += int64(n)\n\tif err != nil {\n\t\treturn r.error(err)\n\t}\n\treturn nil\n}",
"func (d *Decoder) readMore() {\n\tif d.complete {\n\t\treturn\n\t}\n\tn := cap(d.buf) - len(d.buf)\n\tif n < minRead {\n\t\t// We need to grow the buffer. Note that we don't have to copy\n\t\t// the unused part of the buffer (d.buf[:d.r0]).\n\t\t// TODO provide a way to limit the maximum size that\n\t\t// the buffer can grow to.\n\t\tused := len(d.buf) - d.r0\n\t\tn1 := cap(d.buf) * 2\n\t\tif n1-used < minGrow {\n\t\t\tn1 = used + minGrow\n\t\t}\n\t\tbuf1 := make([]byte, used, n1)\n\t\tcopy(buf1, d.buf[d.r0:])\n\t\td.buf = buf1\n\t\td.r1 -= d.r0\n\t\td.r0 = 0\n\t}\n\tn, err := d.rd.Read(d.buf[len(d.buf):cap(d.buf)])\n\td.buf = d.buf[:len(d.buf)+n]\n\tif err == nil {\n\t\treturn\n\t}\n\td.complete = true\n\tif err != io.EOF {\n\t\td.err = err\n\t}\n}",
"func (c *TestConnection) Read(b []byte) (n int, err error) {\n toRet := 0\n if b == nil {\n return 0, errors.New(\"b cannot be nil\")\n }\n\n if c.ReadError != nil && c.TimesReadCalled == c.ThrowReadErrorAfter {\n return 0, c.ReadError\n }\n\n if len(c.ToRead) == 0 {\n return 0, nil\n } \n \n dataToRet := c.ToRead[0]\n buffLength := len(b)\n \n // b is big enough to hold dataToRet\n if buffLength >= len(dataToRet) {\n copy(b, []byte(dataToRet))\n c.ToRead = append(c.ToRead[:0], c.ToRead[1:]...) // remove the first element \n toRet = len(dataToRet)\n } else {\n // need to only return the maximum we can\n remains := dataToRet[buffLength:len(dataToRet)]\n c.ToRead[0] = remains // keep the remainder of the data\n copy(b, dataToRet[0:buffLength])\n toRet = buffLength\n }\n \n c.TimesReadCalled++\n return toRet, nil\n}",
"func (r *Reader) Unlimit() {\n\tr.newLimit <- nil\n}",
"func (o *ODirectReader) Read(buf []byte) (n int, err error) {\n\tif o.err != nil && (len(o.buf) == 0 || !o.seenRead) {\n\t\treturn 0, o.err\n\t}\n\tif o.buf == nil {\n\t\tif o.SmallFile {\n\t\t\to.bufp = ODirectPoolSmall.Get().(*[]byte)\n\t\t} else {\n\t\t\to.bufp = ODirectPoolLarge.Get().(*[]byte)\n\t\t}\n\t}\n\tif !o.seenRead {\n\t\to.buf = *o.bufp\n\t\tn, err = o.File.Read(o.buf)\n\t\tif err != nil && err != io.EOF {\n\t\t\tif isSysErrInvalidArg(err) {\n\t\t\t\tif err = disk.DisableDirectIO(o.File); err != nil {\n\t\t\t\t\to.err = err\n\t\t\t\t\treturn n, err\n\t\t\t\t}\n\t\t\t\tn, err = o.File.Read(o.buf)\n\t\t\t}\n\t\t\tif err != nil && err != io.EOF {\n\t\t\t\to.err = err\n\t\t\t\treturn n, err\n\t\t\t}\n\t\t}\n\t\tif n == 0 {\n\t\t\t// err is likely io.EOF\n\t\t\to.err = err\n\t\t\treturn n, err\n\t\t}\n\t\to.err = err\n\t\to.buf = o.buf[:n]\n\t\to.seenRead = true\n\t}\n\tif len(buf) >= len(o.buf) {\n\t\tn = copy(buf, o.buf)\n\t\to.seenRead = false\n\t\treturn n, o.err\n\t}\n\tn = copy(buf, o.buf)\n\to.buf = o.buf[n:]\n\t// There is more left in buffer, do not return any EOF yet.\n\treturn n, nil\n}",
"func (c *Conn) setReadRemaining(n int64) error {\n\tif n < 0 {\n\t\treturn ErrReadLimit\n\t}\n\n\tc.readRemaining = n\n\treturn nil\n}",
"func (r *ThrottledReadCloser) Read(buf []byte) (int, error) {\n\tsubBuff, delay, err := getBufferAndDelay(r.pool, r.id, len(buf))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\ttime.Sleep(delay)\n\tif subBuff > len(buf) {\n\t\tsubBuff = len(buf)\n\t}\n\tn, err := r.origReadCloser.Read(buf[:subBuff])\n\treturn n, err\n}",
"func LimitReader(r io.Reader, n int64) io.Reader {\n\treturn &LimitedReader{r, n}\n}",
"func (this *reader) ioRead(buffer []byte) (n int, err error) {\n\tn, err = this.ioReader.Read(buffer)\n\tif err != nil {\n\t\treturn\n\t}\n\tif n != len(buffer) {\n\t\terr = fmt.Errorf(\"Reading failed. Expected %v bytes but %v was read\",\n\t\t\tlen(buffer), n)\n\t}\n\treturn\n}",
"func ReadAll(r io.Reader, threshold int) ([]byte, io.Closer, error) {\n\tlr := io.LimitedReader{R: r, N: int64(threshold) + 1}\n\tb, err := ioutil.ReadAll(&lr)\n\tif err != nil {\n\t\treturn b, nilClose, err\n\t}\n\tif lr.N > 0 {\n\t\treturn b, nilClose, nil\n\t}\n\tfh, err := ioutil.TempFile(\"\", \"iohlp-readall-\")\n\tif err != nil {\n\t\treturn b, nilClose, err\n\t}\n\tos.Remove(fh.Name())\n\tif _, err = fh.Write(b); err != nil {\n\t\treturn b, nilClose, err\n\t}\n\tif _, err = io.Copy(fh, r); err != nil {\n\t\tfh.Close()\n\t\treturn nil, nilClose, err\n\t}\n\tb, closer, err := Mmap(fh)\n\tfh.Close()\n\tif err != nil {\n\t\tif closer != nil {\n\t\t\tcloser.Close()\n\t\t}\n\t\treturn b, nil, err\n\t}\n\treturn b, closer, nil\n}",
"func (r *ChannelReader) Read(b []byte) (sz int, err error) {\n\tif len(b) == 0 {\n\t\treturn 0, io.ErrShortBuffer\n\t}\n\n\tfor {\n\t\tif len(r.buf) > 0 {\n\t\t\tif len(r.buf) <= len(b) {\n\t\t\t\tsz = len(r.buf)\n\t\t\t\tcopy(b, r.buf)\n\t\t\t\tr.buf = nil\n\t\t\t} else {\n\t\t\t\tcopy(b, r.buf)\n\t\t\t\tr.buf = r.buf[len(b):]\n\t\t\t\tsz = len(b)\n\t\t\t}\n\t\t\treturn sz, nil\n\t\t}\n\n\t\tvar ok bool\n\t\tif r.deadline.IsZero() {\n\t\t\tr.buf, ok = <-r.c\n\t\t} else {\n\t\t\ttimer := time.NewTimer(r.deadline.Sub(time.Now()))\n\t\t\tdefer timer.Stop()\n\n\t\t\tselect {\n\t\t\tcase r.buf, ok = <-r.c:\n\t\t\tcase <-timer.C:\n\t\t\t\treturn 0, context.DeadlineExceeded\n\t\t\t}\n\t\t}\n\t\tif len(r.buf) == 0 && !ok {\n\t\t\treturn 0, io.EOF\n\t\t}\n\t}\n}",
"func readSize(conn net.Conn, size int64, buf *[]byte) error {\n\t*buf = make([]byte, 0)\n\tvar err error\n\tleftSize := size\n\tfor {\n\n\t\tbufinner := make([]byte, leftSize)\n\t\tvar n int\n\t\tn, err = conn.Read(bufinner)\n\t\tleftSize -= int64(n)\n\t\tif err == nil {\n\t\t\t*buf = slice_merge(*buf, bufinner)\n\t\t\tif leftSize <= 0 {\n\t\t\t\t//read end\n\t\t\t\tbreak\n\t\t\t}\n\t\t} else {\n\t\t\tif err != io.EOF {\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\treturn err\n}",
"func MaxBytesReader(w ResponseWriter, r io.ReadCloser, n int64) io.ReadCloser {\n\treturn &maxBytesReader{respWriter: w, readCloser: r, bytesRemaining: n}\n}",
"func FutureRead(r io.Reader, b []byte) func() (int, error) {\n\tdone := make(chan ioResult)\n\n\tgo func() {\n\t\tn, err := r.Read(b)\n\n\t\tdone <- ioResult{n, err}\n\t}()\n\n\treturn func() (int, error) {\n\t\tres := <-done\n\n\t\treturn res.n, res.err\n\t}\n}",
"func (b *buffer) read(rd io.Reader) (err error) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\terr = fmt.Errorf(\"panic reading: %v\", r)\n\t\t\tb.err = err\n\t\t}\n\t}()\n\n\tvar n int\n\tbuf := b.buf[0:b.size]\n\tfor n < b.size {\n\t\tn2, err := rd.Read(buf)\n\t\tn += n2\n\t\tif err != nil {\n\t\t\tb.err = err\n\t\t\tbreak\n\t\t}\n\t\tbuf = buf[n2:]\n\t}\n\tb.buf = b.buf[0:n]\n\tb.offset = 0\n\treturn b.err\n}",
"func (s *settings) GetMaxReadSize() uint {\n\treturn s.rMaxSize\n}",
"func (r *objReader) peek(n int) ([]byte, error) {\n\tif r.err != nil {\n\t\treturn nil, r.err\n\t}\n\tif r.offset >= r.limit {\n\t\tr.error(io.ErrUnexpectedEOF)\n\t\treturn nil, r.err\n\t}\n\tb, err := r.b.Peek(n)\n\tif err != nil {\n\t\tif err != bufio.ErrBufferFull {\n\t\t\tr.error(err)\n\t\t}\n\t}\n\treturn b, err\n}",
"func (s *Server) backReader(origAddr *net.UDPAddr, c *net.UDPConn) {\n\tdefer s.wg.Done()\n\n\tvar err error\n\tbuf := make([]byte, bufSize)\n\n\tfor {\n\t\tselect {\n\t\tcase <-s.done:\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\n\t\terr = c.SetReadDeadline(time.Now().Add(time.Second))\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Read error: failed to set deadline: %v\", err)\n\t\t}\n\n\t\tn, _, err := c.ReadFromUDP(buf)\n\t\tif err != nil {\n\t\t\tif _, ok := err.(net.Error); ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlog.Printf(\"Read error: %v\", err)\n\t\t\tcontinue\n\t\t}\n\t\tif n == 0 {\n\t\t\tlog.Printf(\"Read error: no bytes read\")\n\t\t\tcontinue\n\t\t}\n\n\t\tdata := make([]byte, n)\n\t\tcopy(data, buf[0:n])\n\t\ts.backRecv <- packet{origAddr, data}\n\t}\n}",
"func ImplementsReaderAtOpts(t *testing.T, reader io.ReaderAt, length int64, opts ReaderAtOpts) bool {\n\tvar buf = make([]byte, opts.BufferSize)\n\tvar err error\n\tvar n int64\n\n\tif !noopRead(t, toReader(reader, 0)) {\n\t\treturn false\n\t}\n\n\tfor err == nil {\n\t\tvar a int\n\t\ta, err = reader.ReadAt(buf, n)\n\t\tn += int64(a)\n\t\tif !(assert.GreaterOrEqual(t, a, 0) && assert.LessOrEqual(t, int64(opts.BufferSize), n)) {\n\t\t\treturn false\n\t\t}\n\n\t\tif 0 < n && n < int64(opts.BufferSize) {\n\t\t\treturn assert.Error(t, err)\n\t\t}\n\t}\n\n\tgrp, _ := errgroup.WithContext(context.Background())\n\tfor i := int64(0); i < length && i < 50; i++ {\n\t\ti := i\n\t\tgrp.Go(func() error {\n\t\t\tvar buf = make([]byte, opts.BufferSize)\n\t\t\t_, err := reader.ReadAt(buf, i)\n\t\t\tassert.NoError(t, err)\n\t\t\treturn err\n\t\t})\n\t}\n\terr2 := grp.Wait()\n\n\treturn assert.EqualError(t, err, io.EOF.Error()) && assert.NoError(t, err2)\n}",
"func ReadFull(f io.Reader, buf []byte) int {\n\tn, err := io.ReadFull(f, buf)\n\tAbortIf(err)\n\treturn n\n}",
"func readFull(r io.Reader, p []byte) (int, error) {\n\tcur := 0\n\tfor cur < len(p) {\n\t\tamt, err := r.Read(p[cur:])\n\t\tcur += amt\n\t\tif err != nil {\n\t\t\treturn cur, err\n\t\t}\n\t}\n\treturn cur, nil\n}",
"func (c *ByteBuffer) ReadN(n int) (r []byte, err error) {\n\tif n > 0 {\n\t\tif c.Len() >= n { // optimistic branching\n\t\t\tr = make([]byte, n)\n\t\t\t_, _ = c.Read(r)\n\t\t} else {\n\t\t\terr = ErrBufferNotEnoughByteToRead\n\t\t}\n\t}\n\treturn\n}",
"func (download *Download) Read(data []byte) (n int, err error) {\n\tif download.closed {\n\t\treturn 0, Error.New(\"already closed\")\n\t}\n\n\tif download.reader == nil {\n\t\terr = download.resetReader(download.offset)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\tif download.limit == 0 {\n\t\treturn 0, io.EOF\n\t}\n\tif download.limit > 0 && download.limit < int64(len(data)) {\n\t\tdata = data[:download.limit]\n\t}\n\tn, err = download.reader.Read(data)\n\tif download.limit >= 0 {\n\t\tdownload.limit -= int64(n)\n\t}\n\tdownload.offset += int64(n)\n\n\treturn n, err\n}",
"func (s *settings) SetMaxReadSize(size uint) {\n\ts.rMaxSize = size\n}",
"func (s Stream) Limit(n int) Stream {\n\treturn s.Pipe(func() func(r Record) (Record, error) {\n\t\tvar count int\n\n\t\treturn func(r Record) (Record, error) {\n\t\t\tif count < n {\n\t\t\t\tcount++\n\t\t\t\treturn r, nil\n\t\t\t}\n\n\t\t\treturn nil, ErrStreamClosed\n\t\t}\n\t})\n}",
"func (r *Reader) Remaining() int {\n\treturn len(r.buf)\n}",
"func (tr *Reader) Read(b []byte) (n int, err error) {\n\tif tr.nb == 0 {\n\t\t// file consumed\n\t\treturn 0, io.EOF\n\t}\n\n\tif int64(len(b)) > tr.nb {\n\t\tb = b[0:tr.nb]\n\t}\n\tn, err = tr.r.Read(b)\n\ttr.nb -= int64(n)\n\n\tif err == io.EOF && tr.nb > 0 {\n\t\terr = io.ErrUnexpectedEOF\n\t}\n\ttr.err = err\n\treturn\n}",
"func (c *BaseConn) SetReadLimit(limit int64) {\n\tc.stream.SetReadLimit(limit)\n}",
"func (l *Clog) Read(offset uint64, maxToRead uint64) (dataRead []byte, lastReadOffset uint64, err error) {\n\tl.mu.RLock()\n\tdefer l.mu.RUnlock()\n\n\tvar max int = int(maxToRead)\n\tif max <= 0 {\n\t\tmax = internalMaxToRead\n\t} else if max > (internalMaxToRead * 10) {\n\t\t// prevent a case where a malicious actor sends\n\t\t// a maxToRead that is >>> computer RAM leading to OOM.\n\t\tmax = internalMaxToRead * 10\n\t}\n\n\tvar sizeReadSofar int\n\tfor _, seg := range l.segments {\n\t\tif seg.baseOffset > offset {\n\t\t\t// We exclude the offset from reads.\n\t\t\t// This allows people to use lastReadOffset in subsequent calls to l.Read\n\t\t\tb, errR := seg.Read()\n\t\t\tif errR != nil {\n\t\t\t\treturn dataRead, lastReadOffset, errR\n\t\t\t\t// TODO: test that if error occurs, we still return whatever has been read so far.\n\t\t\t}\n\t\t\tdataRead = append(dataRead, b...)\n\t\t\tlastReadOffset = seg.baseOffset\n\t\t\tsizeReadSofar = sizeReadSofar + len(b)\n\n\t\t\tif sizeReadSofar >= max {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\t// clog reads the whole data from a segment, even if the individual segment\n\t// has data greater than maxToRead.\n\t// Thus, the returned lastReadOffset is safe to be used in subsequent l.Read calls\n\t// since the segment it belongs to wont be read again.\n\treturn dataRead, lastReadOffset, nil\n}",
"func (s *stream) read(b []byte) (int, error) {\n\ts.log(logTypeStream, \"Reading from stream %v requested len = %v current chunks=%v\", s.id, len(b), len(s.recv.chunks))\n\n\tread := 0\n\n\tfor len(b) > 0 {\n\t\tif len(s.recv.chunks) == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tchunk := s.recv.chunks[0]\n\n\t\t// We have a gap.\n\t\tif chunk.offset > s.recv.offset {\n\t\t\tbreak\n\t\t}\n\n\t\t// Remove leading bytes\n\t\tremove := s.recv.offset - chunk.offset\n\t\tif remove > uint64(len(chunk.data)) {\n\t\t\t// Nothing left.\n\t\t\ts.recv.chunks = s.recv.chunks[1:]\n\t\t\tcontinue\n\t\t}\n\n\t\tchunk.offset += remove\n\t\tchunk.data = chunk.data[remove:]\n\n\t\t// Now figure out how much we can read\n\t\tn := copy(b, chunk.data)\n\t\tchunk.data = chunk.data[n:]\n\t\tchunk.offset += uint64(n)\n\t\ts.recv.offset += uint64(n)\n\t\tb = b[n:]\n\t\tread += n\n\n\t\t// This chunk is empty.\n\t\tif len(chunk.data) == 0 {\n\t\t\ts.recv.chunks = s.recv.chunks[1:]\n\t\t\tif chunk.last {\n\t\t\t\ts.closeRecv()\n\t\t\t}\n\t\t}\n\t}\n\n\t// If we have read no data, say we would have blocked.\n\tif read == 0 {\n\t\tif s.recv.closed {\n\t\t\treturn 0, ErrorStreamIsClosed\n\t\t}\n\t\treturn 0, ErrorWouldBlock\n\t}\n\treturn read, nil\n}",
"func Read(r io.Reader, n uint64) ([]byte, error) {\n\tread := make([]byte, n)\n\t_, err := r.Read(read)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not read from reader: %s\", err)\n\t}\n\n\treturn read, nil\n}",
"func BufferedRead(reader *bufio.Reader, msgBuf []byte) (uint64, error) {\n\tlen := uint64(0)\n\tvar lengthBuf [8]byte\n\t_, err := io.ReadFull(reader, lengthBuf[:])\n\tlength := binary.BigEndian.Uint64(lengthBuf[:])\n\tif err != nil {\n\t\treturn len, err\n\t}\n\tfor bytesRead := uint64(0); bytesRead < length; {\n\t\treadLen, err := reader.Read(msgBuf[bytesRead:])\n\t\tif err != nil {\n\t\t\treturn len, err\n\t\t}\n\t\tbytesRead += uint64(readLen)\n\t\tlen += uint64(readLen)\n\t}\n\treturn len, nil\n}",
"func TestReadEmptyAtEOF(t *testing.T) {\n\tb := new(Builder)\n\tslice := make([]byte, 0)\n\tn, err := b.Read(slice)\n\tif err != nil {\n\t\tt.Errorf(\"read error: %v\", err)\n\t}\n\tif n != 0 {\n\t\tt.Errorf(\"wrong count; got %d want 0\", n)\n\t}\n}",
"func (alr *adjustableLimitedReader) setLimit(n int64) {\n\talr.R.N = n\n}",
"func (self *IoRate) TakeMax(other *IoRate) {\n\tself.Read.TakeMax(other.Read)\n\tself.Write.TakeMax(other.Write)\n}",
"func SumLimitedReader(algo string, r io.Reader, n int64) ([]byte, error) {\n\tlimit := &io.LimitedReader{\n\t\tR: r,\n\t\tN: n,\n\t}\n\n\treturn SumReader(algo, limit)\n}",
"func (c *Conn) SetReadLimit(limit int64) {\n\tc.readLimit = limit\n}",
"func Read_n_bytes(conn net.Conn, n int, buf []byte) int {\n\tif cap(buf) < n {\n\t\tlog.Fatalf(\"[error] read_n_bytes capacity < n\\n\")\n\t}\n\tfor read := 0; read < n; {\n\t\tgot, err := conn.Read(buf[read:n])\n\t\tif err != nil {\n\t\t\tlog.Printf(\"[error] reading %v\\n\", err.Error())\n\t\t\treturn 1\n\t\t}\n\t\tread += got\n\t}\n\treturn 0\n}",
"func (d *Decoder) NRead() int64 {\n\tr := d.dec.Buffered().(*bytes.Reader)\n\treturn d.r.n - int64(r.Len())\n}",
"func (b brokenReader) Read(p []byte) (n int, err error) {\n\treturn 0, errors.New(\"brokenReader is always broken.\")\n}",
"func (pipe *slimPipe) Read(buffer []byte) (int, error) {\n\terrChannel := make(chan error)\n\tcountChannel := make(chan int)\n\tgo func() {\n\t\treadBytes, err := io.ReadAtLeast(pipe.reader, buffer, 1)\n\t\tif err != nil {\n\t\t\terrChannel <- err\n\t\t} else {\n\t\t\tcountChannel <- readBytes\n\t\t}\n\t\tclose(errChannel)\n\t\tclose(countChannel)\n\t}()\n\tselect {\n\tcase count := <-countChannel:\n\t\treturn count, nil\n\tcase err := <-errChannel:\n\t\treturn 0, err\n\tcase <-time.After(pipe.timeout):\n\t\treturn 0, fmt.Errorf(\"Timeout (%v)\", pipe.timeout)\n\t}\n}",
"func (t *File) Read(b []byte) (int, error) {\n\t// Don't return 0, nil\n\tfor t.ring.Readable == 0 && !t.closed {\n\t\ttime.Sleep(PollIntervalFast) // Maybe swap this out for a notification at some point, but tbh, this works\n\t}\n\n\tif t.closed == true {\n\t\treturn 0, io.EOF\n\t}\n\n\t// Check for any waiting errors\n\tselect {\n\tcase err := <-t.errc:\n\t\tif err != nil { // Just in case XD\n\t\t\treturn 0, err\n\t\t}\n\tdefault:\n\t}\n\n\treturn t.ring.Read(b)\n}",
"func (r *Reader) Read(buf []byte) (int, error) {\n\tdefer func() {\n\t\tr.offset = r.h.Offset()\n\t\tr.frameInfo = r.h.FrameInfo()\n\n\t\tf := r.h.MetaCheck()\n\t\tswitch {\n\t\tcase f&MetaNewID3 != 0:\n\t\t\tid3v2, err := r.h.MetaID3()\n\t\t\tif id3v2 != nil && err == nil {\n\t\t\t\tr.meta.ID3v2 = id3v2\n\t\t\t}\n\t\t}\n\n\t}()\n\tif r.nextOffset > r.totalRead {\n\t\tn, err := io.CopyN(ioutil.Discard, r.input, r.nextOffset-r.totalRead)\n\t\tr.totalRead += n\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\tfor r.bytesSinceOk < r.maxBadBytes {\n\t\tvar feed []byte\n\t\tif r.needMore {\n\t\t\tr.needMore = false\n\t\t\tfeedLen, err := r.input.Read(r.feedBuf)\n\t\t\tr.totalRead += int64(feedLen)\n\t\t\tr.nextOffset = r.totalRead\n\t\t\tif feedLen == 0 && err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\tfeed = r.feedBuf[:feedLen]\n\t\t\tr.bytesSinceOk += feedLen\n\t\t}\n\n\t\tswitch n, err := r.h.Decode(feed, buf); err {\n\t\tcase ErrNewFormat:\n\t\t\tr.outputFormat = r.h.OutputFormat()\n\t\t\tr.bytesSinceOk = 0\n\t\t\tif len(buf) == 0 {\n\t\t\t\treturn n, nil\n\t\t\t}\n\t\tcase ErrNeedMore:\n\t\t\tr.needMore = true\n\t\t\tif n > 0 {\n\t\t\t\tr.bytesSinceOk = 0\n\t\t\t\treturn n, nil\n\t\t\t}\n\t\tcase ErrDone:\n\t\t\treturn n, io.EOF\n\t\tdefault:\n\t\t\tr.bytesSinceOk = 0\n\t\t\treturn n, nil\n\n\t\t}\n\n\t}\n\tr.bytesSinceOk = 0\n\treturn 0, errors.New(\"No valid data found\")\n}",
"func (conn *Conn) read(n int) ([]byte, error) {\n\tresult, err := conn.brw.Peek(n)\n\n\tif err != nil {\n\t\tfmt.Println(\"Error while peeking read buffer\", err)\n\t\treturn result, err\n\t}\n\n\t_, err = conn.brw.Discard(n)\n\n\tif err != nil {\n\t\tfmt.Println(\"Error while discarding read buffer\", err)\n\t}\n\n\treturn result, err\n}",
"func (p *pipe) readFrom(r io.Reader) (read int64, failure error) {\n\tfor {\n\t\t// Wait until some space frees up\n\t\tsafeFree, err := p.inputWait()\n\t\tif err != nil {\n\t\t\treturn read, err\n\t\t}\n\t\t// Try to fill the buffer either till the reader position, or the end\n\t\tlimit := p.inPos + safeFree\n\t\tif limit > p.size {\n\t\t\tlimit = p.size\n\t\t}\n\t\tnr, err := r.Read(p.buffer[p.inPos:limit])\n\t\tread += int64(nr)\n\n\t\t// Update the pipe input state and handle any occurred errors\n\t\tp.inputAdvance(nr)\n\t\tif err == io.EOF {\n\t\t\treturn read, nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn read, err\n\t\t}\n\t}\n}",
"func readerSize(in io.Reader, buffSize int64) (io.Reader, int64, error) {\n\tvar n int64\n\tvar err error\n\tvar r io.Reader\n\n\t// Read first buffSize bytes into buffer\n\tbuf := make([]byte, buffSize)\n\tm, err := in.Read(buf)\n\tif err != nil && err != io.EOF {\n\t\treturn nil, 0, err\n\t}\n\n\t// If first buffSize bytes are read successfully, that means the data size >= buffSize\n\tif int64(m) == buffSize {\n\t\tr = io.MultiReader(bytes.NewReader(buf), in)\n\t\tn = buffSizeLimit\n\t} else {\n\t\tbuf = buf[:m]\n\t\tr = bytes.NewReader(buf)\n\t}\n\n\treturn r, n, nil\n}",
"func discardInput(r io.Reader, n uint32) {\n\tmaxSize := uint32(10 * 1024) // 10k at a time\n\tnumReads := n / maxSize\n\tbytesRemaining := n % maxSize\n\tif n > 0 {\n\t\tbuf := make([]byte, maxSize)\n\t\tfor i := uint32(0); i < numReads; i++ {\n\t\t\tio.ReadFull(r, buf)\n\t\t}\n\t}\n\tif bytesRemaining > 0 {\n\t\tbuf := make([]byte, bytesRemaining)\n\t\tio.ReadFull(r, buf)\n\t}\n}",
"func discardInput(r io.Reader, n uint32) {\n\tmaxSize := uint32(10 * 1024) // 10k at a time\n\tnumReads := n / maxSize\n\tbytesRemaining := n % maxSize\n\tif n > 0 {\n\t\tbuf := make([]byte, maxSize)\n\t\tfor i := uint32(0); i < numReads; i++ {\n\t\t\tio.ReadFull(r, buf)\n\t\t}\n\t}\n\tif bytesRemaining > 0 {\n\t\tbuf := make([]byte, bytesRemaining)\n\t\tio.ReadFull(r, buf)\n\t}\n}",
"func (c *RawConnectionMock) SetReadLimit(limit int64) {\n\tc.Called(limit)\n}",
"func (ctn *Connection) Read(buf []byte, length int) (total int, aerr Error) {\n\tvar err error\n\n\t// if all bytes are not read, retry until successful\n\t// Don't worry about the loop; we've already set the timeout elsewhere\n\tfor total < length {\n\t\tvar r int\n\t\tif err = ctn.updateDeadline(); err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\tif !ctn.compressed {\n\t\t\tr, err = ctn.conn.Read(buf[total:length])\n\t\t} else {\n\t\t\tr, err = ctn.inflater.Read(buf[total:length])\n\t\t\tif err == io.EOF && total+r == length {\n\t\t\t\tctn.compressed = false\n\t\t\t\terr = ctn.inflater.Close()\n\t\t\t}\n\t\t}\n\t\ttotal += r\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif total == length {\n\t\t// If all required bytes are read, ignore any potential error.\n\t\t// The error will bubble up on the next network io if it matters.\n\t\treturn total, nil\n\t}\n\n\taerr = chainErrors(errToAerospikeErr(ctn, err), aerr)\n\n\tif ctn.node != nil {\n\t\tctn.node.incrErrorCount()\n\t\tatomic.AddInt64(&ctn.node.stats.ConnectionsFailed, 1)\n\t}\n\n\tctn.Close()\n\n\treturn total, aerr\n}",
"func NewLineLimitReader(r io.Reader, n int) *LineLimitedReader { return &LineLimitedReader{r, n, 0} }",
"func (br *BufferedReader) Read(v Decoder) (n int, err error) {\n\nRetry:\n\tif br.mustFill {\n\t\t// The buffer needs to be filled before trying to decode\n\t\t// another record.\n\t\tif br.mode == ModeManual {\n\t\t\treturn 0, ErrMustFill\n\t\t}\n\n\t\terr = br.Fill()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\tif br.eof && br.offset == br.buffered {\n\t\t// We've reached EOF on a previous Fill attempt and the\n\t\t// buffered data has been fully consumed.\n\t\treturn 0, io.EOF\n\t}\n\n\tn, err = v.Decode(br.buffer[br.offset:br.buffered])\n\n\tif err == ErrShortBuffer {\n\t\t// Unable to decode a full record.\n\n\t\tif br.offset == 0 && br.buffered == len(br.buffer) {\n\t\t\t// We've tried to decode from the start of a full\n\t\t\t// buffer, so it seems we won't be able to fit this\n\t\t\t// record in our buffer.\n\t\t\treturn 0, ErrTooLarge\n\t\t}\n\n\t\tif br.eof {\n\t\t\t// We won't be able to read more bytes yet there's\n\t\t\t// a partial record left to decode.\n\t\t\treturn 0, io.ErrUnexpectedEOF\n\t\t}\n\n\t\tbr.mustFill = true\n\n\t\tgoto Retry\n\t}\n\n\tbr.offset += n\n\n\tif err != nil {\n\t\treturn n, err\n\t}\n\n\treturn n, nil\n}",
"func (s *Server) readFull(reader *bufio.Reader, size int) ([]byte, error) {\n\tfullMsg := make([]byte, 0)\n\n\t// Without size in message, there is possible situation,\n\t// when message will be exactly 4096 bytes,\n\t// and Peek() will hang after read\n\t// + size allows to separate data is socket when there is multiple different messages\n\tfor {\n\t\tbuffSize := reader.Buffered() // max reader size == 4096\n\n\t\t// Get required chunk size\n\t\tchunkSize := 0\n\t\tif size < buffSize {\n\t\t\tchunkSize = size\n\t\t} else {\n\t\t\tchunkSize = buffSize\n\t\t}\n\t\t// Create tmp storage, read bytes into it, and append them to the full message.\n\t\tbuff := make([]byte, chunkSize)\n\n\t\t_, err := reader.Read(buff)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfullMsg = append(fullMsg, buff...)\n\n\t\t// Break if message is fully read.\n\t\tsize -= chunkSize\n\t\tif size == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\t// Reader would be empty until peek.\n\t\tif _, err := reader.Peek(1); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn fullMsg, nil\n}",
"func (s *source) takeNBuffered(n int) (Fetch, int, bool) {\n\tvar r Fetch\n\tvar taken int\n\n\tb := &s.buffered\n\tbf := &b.fetch\n\tfor len(bf.Topics) > 0 && n > 0 {\n\t\tt := &bf.Topics[0]\n\n\t\tr.Topics = append(r.Topics, *t)\n\t\trt := &r.Topics[len(r.Topics)-1]\n\t\trt.Partitions = nil\n\n\t\ttCursors := b.usedOffsets[t.Topic]\n\n\t\tfor len(t.Partitions) > 0 && n > 0 {\n\t\t\tp := &t.Partitions[0]\n\n\t\t\trt.Partitions = append(rt.Partitions, *p)\n\t\t\trp := &rt.Partitions[len(rt.Partitions)-1]\n\t\t\trp.Records = nil\n\n\t\t\ttake := n\n\t\t\tif take > len(p.Records) {\n\t\t\t\ttake = len(p.Records)\n\t\t\t}\n\n\t\t\trp.Records = p.Records[:take]\n\t\t\tp.Records = p.Records[take:]\n\n\t\t\tn -= take\n\t\t\ttaken += take\n\n\t\t\tpCursor := tCursors[p.Partition]\n\n\t\t\tif len(p.Records) == 0 {\n\t\t\t\tt.Partitions = t.Partitions[1:]\n\n\t\t\t\tpCursor.from.setOffset(pCursor.cursorOffset)\n\t\t\t\tpCursor.from.allowUsable()\n\t\t\t\tdelete(tCursors, p.Partition)\n\t\t\t\tif len(tCursors) == 0 {\n\t\t\t\t\tdelete(b.usedOffsets, t.Topic)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tlastReturnedRecord := rp.Records[len(rp.Records)-1]\n\t\t\tpCursor.from.setOffset(cursorOffset{\n\t\t\t\toffset: lastReturnedRecord.Offset + 1,\n\t\t\t\tlastConsumedEpoch: lastReturnedRecord.LeaderEpoch,\n\t\t\t})\n\t\t}\n\n\t\tif len(t.Partitions) == 0 {\n\t\t\tbf.Topics = bf.Topics[1:]\n\t\t}\n\t}\n\n\tdrained := len(bf.Topics) == 0\n\tif drained {\n\t\ts.takeBuffered()\n\t}\n\treturn r, taken, drained\n}",
"func (r *timeoutReadCloser) Read(b []byte) (int, error) {\n\ttimer := time.NewTimer(r.duration)\n\tc := make(chan readResult, 1)\n\n\tgo func() {\n\t\tn, err := r.reader.Read(b)\n\t\ttimer.Stop()\n\t\tc <- readResult{n: n, err: err}\n\t}()\n\n\tselect {\n\tcase data := <-c:\n\t\treturn data.n, data.err\n\tcase <-timer.C:\n\t\treturn 0, &ResponseTimeoutError{TimeoutDur: r.duration}\n\t}\n}",
"func (serv *Server) delayReader(conn int) {\n\tvar (\n\t\tdelay = 300 * time.Millisecond\n\t\ttotal time.Duration\n\t)\n\tfor total < serv.Options.ReadWriteTimeout {\n\t\ttime.Sleep(delay)\n\t\tselect {\n\t\tcase serv.qreader <- conn:\n\t\t\treturn\n\t\tdefault:\n\t\t\ttotal += delay\n\t\t}\n\t}\n\tvar req = Frame{\n\t\tcloseCode: StatusInternalError,\n\t}\n\tserv.handleClose(conn, &req)\n}",
"func BlockingRead(r *bufio.Reader) *[]byte {\n\tbyteChan := make(chan []byte)\n\tb := make([]byte, 4096) // buffer is 4k- we should never be exceeding this!\n\tgo func() {\n\t\tfor {\n\t\t\tn, _ := r.ReadBytes('\\n')\n\t\t\tif len(n) > 0 {\n\t\t\t\tbyteChan <- n\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase b = <-byteChan:\n\t\t\treturn &b\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\t}\n}",
"func Read(r io.Reader, data []byte) ([]byte, error) {\n\tj := 0\n\tfor {\n\t\tn, err := r.Read(data[j:])\n\t\tj = j + n\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn nil, errors.Wrap(err, \"Read failure\")\n\t\t}\n\n\t\tif (n == 0 && j == len(data)) || j > len(data) {\n\t\t\treturn nil, errors.New(\"Size of requested data is too large\")\n\t\t}\n\t}\n\n\treturn data[:j], nil\n}",
"func (r *FileSizeRotator) reachLimit(n int) bool {\n\tatomic.AddUint64(&r.currSize, uint64(n))\n\tif r.currSize > r.limitSize {\n\t\treturn true\n\t}\n\treturn false\n}",
"func (q *queue) read(ctx context.Context, dst usermem.IOSequence, l *lineDiscipline) (int64, bool, error) {\n\tq.mu.Lock()\n\tdefer q.mu.Unlock()\n\tif !q.readable {\n\t\treturn 0, false, syserror.ErrWouldBlock\n\t}\n\n\t// Read out from the read buffer.\n\tn := canonMaxBytes\n\tif n > int(dst.NumBytes()) {\n\t\tn = int(dst.NumBytes())\n\t}\n\tif n > q.readBuf.Len() {\n\t\tn = q.readBuf.Len()\n\t}\n\tn, err := dst.Writer(ctx).Write(q.readBuf.Bytes()[:n])\n\tif err != nil {\n\t\treturn 0, false, err\n\t}\n\t// Discard bytes read out.\n\tq.readBuf.Next(n)\n\n\t// If we read everything, this queue is no longer readable.\n\tif q.readBuf.Len() == 0 {\n\t\tq.readable = false\n\t}\n\n\t// Move data from the queue's wait buffer to its read buffer.\n\tnPushed := q.pushWaitBufLocked(l)\n\n\treturn int64(n), nPushed > 0, nil\n}",
"func (r *bytesReader) ReadAt(b []byte, offset int64) (n int, err error) {\n\tif offset < 0 {\n\t\treturn 0, errors.New(\"buffer.bytesReader.ReadAt: negative offset\")\n\t}\n\tif offset >= int64(len(r.bs)) {\n\t\treturn 0, io.EOF\n\t}\n\tn = copy(b, r.bs[offset:])\n\tif n < len(b) {\n\t\terr = io.EOF\n\t}\n\treturn\n}",
"func (dev *Device) read(contxt context.Context, waitResponse bool) ([]byte, error) {\n\n\tcountError := 0\n\tlastEvent := time.Now()\n\t//TODO timeoutRead?\n\tfuncerr := func(err error) error {\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\t\tlog.Printf(\"funcread err: %s\", err)\n\t\tswitch {\n\t\tcase errors.Is(err, os.ErrClosed):\n\t\t\treturn err\n\t\tcase errors.Is(err, io.ErrClosedPipe):\n\t\t\treturn err\n\t\tcase errors.Is(err, io.EOF):\n\t\t\tif time.Since(lastEvent) < 10*time.Microsecond {\n\t\t\t\tif countError > 3 {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tcountError++\n\t\t\t}\n\n\t\t\tlastEvent = time.Now()\n\t\t}\n\n\t\treturn nil\n\n\t}\n\n\t//TODO: limit to read\n\tbb := make([]byte, 0)\n\tindxb := 0\n\tlendata := uint32(0)\n\tfor {\n\n\t\tselect {\n\t\tcase <-contxt.Done():\n\t\t\treturn nil, fmt.Errorf(\"timeout error, %w\", smartcard.ErrComm)\n\t\tdefault:\n\t\t}\n\t\ttempb := make([]byte, 2048)\n\n\t\t// fmt.Println(\"execute read\")\n\n\t\tn, err := dev.port.Read(tempb)\n\t\tif err != nil && n <= 0 {\n\t\t\tif err := funcerr(err); err != nil {\n\t\t\t\t// log.Printf(\"0, err: %s\", err)\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\t// fmt.Printf(\"len: %v, [% X]\\n\", len(tempb[:n]), tempb[:n])\n\n\t\t// prepareBuffer := make([]byte, len(tempb[:n]))\n\n\t\t// copy(prepareBuffer, tempb[:n])\n\n\t\tbf := bytes.NewBuffer(tempb[:n])\n\t\t// fmt.Printf(\"len: %v, %v, %v, %v\\n\", len(prepareBuffer), cap(prepareBuffer), bf.Cap(), bf.Len())\n\n\t\tb := func() []byte {\n\t\t\tvar result []byte\n\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-contxt.Done():\n\t\t\t\t\treturn nil\n\t\t\t\tdefault:\n\t\t\t\t}\n\n\t\t\t\tlast, err := bf.ReadByte()\n\t\t\t\tif err == nil {\n\t\t\t\t\tif indxb <= 0 && last != '\\x02' {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tindxb++\n\t\t\t\t\tbb = append(bb, last)\n\t\t\t\t} else {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\t// fmt.Printf(\"len: %v, last: %X, [% X]\\n\", len(bb), last, bb[:])\n\t\t\t\t// log.Println(\"2\")\n\t\t\t\tif len(bb) == 6 {\n\n\t\t\t\t\tlendata = binary.LittleEndian.Uint32(bb[2:6])\n\t\t\t\t\t// fmt.Printf(\"len data: %d\\n\", lendata)\n\t\t\t\t}\n\t\t\t\tif last == '\\x03' && len(bb) == 4 && bb[1] == bb[2] {\n\t\t\t\t\tresult = make([]byte, len(bb))\n\t\t\t\t\tcopy(result, bb[:])\n\t\t\t\t\tbb = make([]byte, 0)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif last == '\\x03' && len(bb) >= int(lendata)+1+10+1+1 {\n\t\t\t\t\t// fmt.Printf(\"tempb final: [% X]\\n\", bb[:])\n\n\t\t\t\t\tresult = make([]byte, len(bb))\n\t\t\t\t\tcopy(result, bb[:])\n\t\t\t\t\tbb = make([]byte, 0)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn result\n\t\t}()\n\n\t\tif waitResponse {\n\t\t\tif len(b) <= 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif len(b) == 4 && b[1] == b[2] && b[1] == 0x00 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif len(b) == 13 && bytes.Equal(b, FRAME_NACK) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif b[len(b)-1] != 0x03 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// fmt.Printf(\"resul final: [% X]\\n\", b[:])\n\n\t\t// if indxb <= 0 {\n\t\t// \tif b == '\\x02' {\n\t\t// \t\ttempb[0] = b\n\t\t// \t\tindxb = 1\n\t\t// \t}\n\t\t// \tcontinue\n\t\t// }\n\n\t\t// tempb[indxb] = b\n\t\t// indxb++\n\t\t// fmt.Printf(\"len: %v, [% X]\\n\", indxb, tempb[:indxb])\n\t\t// // log.Println(\"2\")\n\t\t// if indxb == 6 {\n\t\t// \tlendata = binary.LittleEndian.Uint32(tempb[2:6])\n\t\t// }\n\t\t// if b == '\\x03' && indxb == 4 && tempb[1] == tempb[2] {\n\t\t// \tdest := make([]byte, indxb)\n\t\t// \tcopy(dest, tempb[:indxb])\n\t\t// \treturn dest, nil\n\t\t// }\n\t\t// if b == '\\x03' && indxb >= int(lendata)+1+10+1+1 {\n\t\t// \t// fmt.Printf(\"tempb final: [% X]\\n\", tempb[:indxb])\n\n\t\t// \tdest := make([]byte, indxb)\n\t\t// \tcopy(dest, tempb[:indxb])\n\t\t// \treturn dest, nil\n\t\t// }\n\t\tdest := make([]byte, len(b))\n\t\tcopy(dest, b[:])\n\t\tfmt.Printf(\"recv data: %v, [% X]\\n\", len(b), b[:])\n\t\treturn dest, nil\n\n\t}\n}",
"func TimeoutReader(r io.Reader) io.Reader { return &timeoutReader{r, 0} }",
"func read(conn *net.TCPConn, size int, data *bytes.Buffer) error {\n\tconn.SetReadDeadline(time.Now().Add(time.Second * 15))\n\n\tn, err := io.CopyN(data, conn, int64(size))\n\tif err != nil || n != int64(size) {\n\t\treturn errors.New(\"read error\")\n\t}\n\treturn nil\n}",
"func (d *Decoder) readN(n int) []byte {\n\tif buf, ok := d.r.(*bytes.Buffer); ok {\n\t\tb := buf.Next(n)\n\t\tif len(b) != n {\n\t\t\tpanic(io.ErrUnexpectedEOF)\n\t\t}\n\t\tif d.n += n; d.n > MaxObjectSize {\n\t\t\tbuild.Critical(ErrObjectTooLarge)\n\t\t}\n\t\treturn b\n\t}\n\tb := make([]byte, n)\n\t_, err := io.ReadFull(d, b)\n\tif err != nil {\n\t\tbuild.Critical(err)\n\t}\n\treturn b\n}",
"func (rstra *ReadSeekerToReaderAt) ReadAt(p []byte, offset int64) (n int, err error) {\n\tdefer func() {\n\t\tif state := recover(); state != nil {\n\t\t\terr = log.Wrap(state.(error))\n\t\t}\n\t}()\n\n\toriginalOffset, err := rstra.rs.Seek(0, io.SeekCurrent)\n\tlog.PanicIf(err)\n\n\tdefer func() {\n\t\t_, err := rstra.rs.Seek(originalOffset, io.SeekStart)\n\t\tlog.PanicIf(err)\n\t}()\n\n\t_, err = rstra.rs.Seek(offset, io.SeekStart)\n\tlog.PanicIf(err)\n\n\t// Note that all errors will be wrapped, here. The usage of this method is\n\t// such that typically no specific errors would be expected as part of\n\t// normal operation (in which case we'd check for those first and return\n\t// them directly).\n\tn, err = io.ReadFull(rstra.rs, p)\n\tlog.PanicIf(err)\n\n\treturn n, nil\n}",
"func TestNonFatalRead(t *testing.T) {\n\t// Limit runtime in case of deadlocks\n\tlim := test.TimeOut(time.Second * 20)\n\tdefer lim.Stop()\n\n\texpectedData := []byte(\"expectedData\")\n\n\t// In memory pipe\n\tca, cb := net.Pipe()\n\trequire.NoError(t, cb.Close())\n\n\tconn := &muxErrorConn{ca, []muxErrorConnReadResult{\n\t\t// Non-fatal timeout error\n\t\t{packetio.ErrTimeout, nil},\n\t\t{nil, expectedData},\n\t\t{io.ErrShortBuffer, nil},\n\t\t{nil, expectedData},\n\t\t{io.EOF, nil},\n\t}}\n\n\tm := NewMux(Config{\n\t\tConn: conn,\n\t\tBufferSize: testPipeBufferSize,\n\t\tLoggerFactory: logging.NewDefaultLoggerFactory(),\n\t})\n\n\te := m.NewEndpoint(MatchAll)\n\n\tbuff := make([]byte, testPipeBufferSize)\n\tn, err := e.Read(buff)\n\trequire.NoError(t, err)\n\trequire.Equal(t, buff[:n], expectedData)\n\n\tn, err = e.Read(buff)\n\trequire.NoError(t, err)\n\trequire.Equal(t, buff[:n], expectedData)\n\n\t<-m.closedCh\n\trequire.NoError(t, m.Close())\n\trequire.NoError(t, ca.Close())\n}",
"func (e *T) readAvailable(dst []byte) (n int, err error) {\n\tn = copy(dst, e.buf[e.ridx:e.widx])\n\ti := e.ridx + n\n\tif e.secure {\n\t\tclearbytes(e.buf[e.ridx:i])\n\t}\n\te.ridx = i\n\tif e.ridx >= e.widx {\n\t\te.ridx = 0\n\t\te.widx = 0\n\t\te.available = false\n\t}\n\treturn n, nil\n}",
"func (reader *ExtentReader) Read(req *ExtentRequest) (readBytes int, err error) {\n\toffset := req.FileOffset - int(reader.key.FileOffset) + int(reader.key.ExtentOffset)\n\tsize := req.Size\n\n\treqPacket := NewReadPacket(reader.key, offset, size, reader.inode, req.FileOffset, reader.followerRead)\n\tsc := NewStreamConn(reader.dp, reader.followerRead)\n\n\tlog.LogDebugf(\"ExtentReader Read enter: size(%v) req(%v) reqPacket(%v)\", size, req, reqPacket)\n\n\terr = sc.Send(reqPacket, func(conn *net.TCPConn) (error, bool) {\n\t\treadBytes = 0\n\t\tfor readBytes < size {\n\t\t\treplyPacket := NewReply(reqPacket.ReqID, reader.dp.PartitionID, reqPacket.ExtentID)\n\t\t\tbufSize := util.Min(util.ReadBlockSize, size-readBytes)\n\t\t\treplyPacket.Data = req.Data[readBytes : readBytes+bufSize]\n\t\t\te := replyPacket.readFromConn(conn, proto.ReadDeadlineTime)\n\t\t\tif e != nil {\n\t\t\t\tlog.LogWarnf(\"Extent Reader Read: failed to read from connect, ino(%v) req(%v) readBytes(%v) err(%v)\", reader.inode, reqPacket, readBytes, e)\n\t\t\t\t// Upon receiving TryOtherAddrError, other hosts will be retried.\n\t\t\t\treturn TryOtherAddrError, false\n\t\t\t}\n\n\t\t\t//log.LogDebugf(\"ExtentReader Read: ResultCode(%v) req(%v) reply(%v) readBytes(%v)\", replyPacket.GetResultMsg(), reqPacket, replyPacket, readBytes)\n\n\t\t\tif replyPacket.ResultCode == proto.OpAgain {\n\t\t\t\treturn nil, true\n\t\t\t}\n\n\t\t\te = reader.checkStreamReply(reqPacket, replyPacket)\n\t\t\tif e != nil {\n\t\t\t\t// Dont change the error message, since the caller will\n\t\t\t\t// check if it is NotLeaderErr.\n\t\t\t\treturn e, false\n\t\t\t}\n\n\t\t\treadBytes += int(replyPacket.Size)\n\t\t}\n\t\treturn nil, false\n\t})\n\n\tif err != nil {\n\t\tlog.LogErrorf(\"Extent Reader Read: err(%v) req(%v) reqPacket(%v)\", err, req, reqPacket)\n\t}\n\n\tlog.LogDebugf(\"ExtentReader Read exit: req(%v) reqPacket(%v) readBytes(%v) err(%v)\", req, reqPacket, readBytes, err)\n\treturn\n}",
"func (ctn *Connection) Read(buf []byte, length int) (total int, err error) {\n\t// if all bytes are not read, retry until successful\n\t// Don't worry about the loop; we've already set the timeout elsewhere\n\tfor total < length {\n\t\tvar r int\n\t\tif err = ctn.updateDeadline(); err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\tif !ctn.compressed {\n\t\t\tr, err = ctn.conn.Read(buf[total:length])\n\t\t} else {\n\t\t\tr, err = ctn.inflater.Read(buf[total:length])\n\t\t\tif err == io.EOF && total+r == length {\n\t\t\t\tctn.compressed = false\n\t\t\t\terr = ctn.inflater.Close()\n\t\t\t}\n\t\t}\n\t\ttotal += r\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif total == length {\n\t\t// If all required bytes are read, ignore any potential error.\n\t\t// The error will bubble up on the next network io if it matters.\n\t\treturn total, nil\n\t}\n\n\tif ctn.node != nil {\n\t\tctn.node.incrErrorCount()\n\t\tatomic.AddInt64(&ctn.node.stats.ConnectionsFailed, 1)\n\t}\n\n\t// the line should happen before .Close()\n\terr = errToTimeoutErr(ctn, err)\n\tctn.Close()\n\n\treturn total, err\n}",
"func ReadNMessages(gribFile io.Reader, n int) ([]*Message, error) {\n\tmessages := make([]*Message, 0)\n\n\tfor {\n\t\tmessage, messageErr := ReadMessage(gribFile)\n\n\t\tif messageErr != nil {\n\t\t\tif strings.Contains(messageErr.Error(), \"EOF\") {\n\t\t\t\treturn messages, nil\n\t\t\t}\n\t\t\tlog.Println(\"Error when parsing a message, \", messageErr.Error())\n\t\t\treturn messages, messageErr\n\t\t}\n\t\tmessages = append(messages, message)\n\t\tif len(messages) >= n {\n\t\t\treturn messages, nil\n\t\t}\n\t}\n}",
"func TestReadDataWithMaxSize(t *testing.T) {\n\ttests := []struct {\n\t\tlines string\n\t\tmaxSize int\n\t\terr error\n\t}{\n\t\t// Maximum size of zero (the default) should not return an error.\n\t\t{\"Test message.\\r\\n.\\r\\n\", 0, nil},\n\n\t\t// Messages below the maximum size should not return an error.\n\t\t{\"Test message.\\r\\n.\\r\\n\", 16, nil},\n\n\t\t// Messages matching the maximum size should not return an error.\n\t\t{\"Test message.\\r\\n.\\r\\n\", 15, nil},\n\n\t\t// Messages above the maximum size should return a maximum size exceeded error.\n\t\t{\"Test message.\\r\\n.\\r\\n\", 14, maxSizeExceeded(14)},\n\t}\n\tvar buf bytes.Buffer\n\ts := &session{}\n\ts.br = bufio.NewReader(&buf)\n\n\tfor _, tt := range tests {\n\t\ts.srv = &Server{MaxSize: tt.maxSize}\n\t\tbuf.Write([]byte(tt.lines))\n\t\t_, err := s.readData()\n\t\tif err != tt.err {\n\t\t\tt.Errorf(\"readData(%v) returned err: %v\", tt.lines, tt.err)\n\t\t}\n\t}\n}",
"func MaxReadahead(n uint32) MountOption {\n\treturn func(conf *mountConfig) error {\n\t\tconf.maxReadahead = n\n\t\treturn nil\n\t}\n}",
"func (l *LeechedReadCloser) Read(b []byte) (n int, err error) {\n\tspaceLeft := l.maxBodyLogSize - l.loggedBytesCount\n\tif spaceLeft > 0 {\n\t\t// Let's read the request into our Logger (not all of it maybe), but also let's make sure that\n\t\t// we'll be able to to copy all the content we read in l.data into b\n\t\tn, err := l.originalReadCloser.Read(l.data[l.loggedBytesCount : l.loggedBytesCount+min(int64(len(b)), spaceLeft)])\n\n\t\t// And copy what was read into the original slice\n\t\tcopy(b, l.data[l.loggedBytesCount:l.loggedBytesCount+int64(n)])\n\n\t\t// Let's not forget to increment the pointer on the currently logged amount of bytes\n\t\tl.loggedBytesCount += int64(n)\n\n\t\t// And return what the Read() call we did on the original ReadCloser just returned, shhhhh\n\t\treturn n, err\n\t}\n\n\t// Our leecher is full ? Nevermind, let's just call read on the original Reader. Apart from an\n\t// additional level in the call stack and an if statement, we have no overhead for large bodies :)\n\treturn l.originalReadCloser.Read(b)\n}",
"func checkReader(t *testing.T, r zbuf.Reader, checkReads bool) {\n\tfor expect := 3; expect <= 6; expect++ {\n\t\trec, err := r.Read()\n\t\trequire.NoError(t, err)\n\n\t\tv, err := rec.AccessInt(\"value\")\n\t\trequire.NoError(t, err)\n\n\t\trequire.Equal(t, int64(expect), v, \"Got expected record value\")\n\t}\n\n\trec, err := r.Read()\n\trequire.NoError(t, err)\n\trequire.Nil(t, rec, \"Reached eof after last record in time span\")\n\n\tif checkReads {\n\t\trr, ok := r.(*rangeReader)\n\t\trequire.True(t, ok, \"Can get read stats from index reader\")\n\t\trequire.LessOrEqual(t, rr.reads(), uint64(6), \"Indexed reader did not read the entire file\")\n\t}\n}",
"func (b *Buffer) ReadNFrom(reader io.Reader, n int) (int, error) {\n\t// Loop until we've filled completed the read, run out of storage, or\n\t// encountered a read error.\n\tvar read, result int\n\tvar err error\n\tfor n > 0 && b.used != b.size && err == nil {\n\t\t// Compute the first available contiguous free storage segment.\n\t\tfreeStart := (b.start + b.used) % b.size\n\t\tfree := b.storage[freeStart:min(freeStart+(b.size-b.used), b.size)]\n\n\t\t// If the storage segment is larger than we need, then truncate it.\n\t\tif len(free) > n {\n\t\t\tfree = free[:n]\n\t\t}\n\n\t\t// Perform the read.\n\t\tread, err = reader.Read(free)\n\n\t\t// Update indices and tracking.\n\t\tresult += read\n\t\tb.used += read\n\t\tn -= read\n\t}\n\n\t// If we couldn't complete the read due to a lack of storage, then we need\n\t// to return an error. However, if a read error occurred simultaneously with\n\t// running out of storage, then we don't overwrite it.\n\tif n > 0 && b.used == b.size && err == nil {\n\t\terr = ErrBufferFull\n\t}\n\n\t// If we encountered io.EOF simultaneously with completing the read, then we\n\t// can clear the error.\n\tif err == io.EOF && n == 0 {\n\t\terr = nil\n\t}\n\n\t// Done.\n\treturn result, err\n}",
"func VerifyRLimit(estimateMaxFiles RlimT) error {\n\tif estimateMaxFiles > maxRLimit {\n\t\testimateMaxFiles = maxRLimit\n\t}\n\tvar rLimit syscall.Rlimit\n\terr := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rLimit)\n\tfailpoint.Inject(\"GetRlimitValue\", func(v failpoint.Value) {\n\t\tlimit := RlimT(v.(int))\n\t\trLimit.Cur = limit\n\t\trLimit.Max = limit\n\t\terr = nil\n\t})\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\tif rLimit.Cur >= estimateMaxFiles {\n\t\treturn nil\n\t}\n\tif rLimit.Max < estimateMaxFiles {\n\t\t// If the process is not started by privileged user, this will fail.\n\t\trLimit.Max = estimateMaxFiles\n\t}\n\tprevLimit := rLimit.Cur\n\trLimit.Cur = estimateMaxFiles\n\tfailpoint.Inject(\"SetRlimitError\", func(v failpoint.Value) {\n\t\tif v.(bool) {\n\t\t\terr = errors.New(\"Setrlimit Injected Error\")\n\t\t}\n\t})\n\tif err == nil {\n\t\terr = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &rLimit)\n\t}\n\tif err != nil {\n\t\treturn errors.Annotatef(err, \"the maximum number of open file descriptors is too small, got %d, expect greater or equal to %d\", prevLimit, estimateMaxFiles)\n\t}\n\n\t// fetch the rlimit again to make sure our setting has taken effect\n\terr = syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rLimit)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\tif rLimit.Cur < estimateMaxFiles {\n\t\thelper := \"Please manually execute `ulimit -n %d` to increase the open files limit.\"\n\t\treturn errors.Errorf(\"cannot update the maximum number of open file descriptors, expected: %d, got: %d. %s\",\n\t\t\testimateMaxFiles, rLimit.Cur, helper)\n\t}\n\n\tlog.L().Info(\"Set the maximum number of open file descriptors(rlimit)\",\n\t\tzapRlimT(\"old\", prevLimit), zapRlimT(\"new\", estimateMaxFiles))\n\treturn nil\n}",
"func (h *ReOpen) Read(p []byte) (n int, err error) {\n\th.mu.Lock()\n\tdefer h.mu.Unlock()\n\tif h.err != nil {\n\t\t// return a previous error if there is one\n\t\treturn n, h.err\n\t}\n\tn, err = h.rc.Read(p)\n\tif err != nil {\n\t\th.err = err\n\t}\n\th.read += int64(n)\n\tif err != nil && err != io.EOF && !fserrors.IsNoLowLevelRetryError(err) {\n\t\t// close underlying stream\n\t\th.opened = false\n\t\t_ = h.rc.Close()\n\t\t// reopen stream, clearing error if successful\n\t\tfs.Debugf(h.src, \"Reopening on read failure after %d bytes: retry %d/%d: %v\", h.read, h.tries, h.maxTries, err)\n\t\tif h.open() == nil {\n\t\t\terr = nil\n\t\t}\n\t}\n\treturn n, err\n}",
"func (mCn mockConn) Read(b []byte) (n int, err error) {\n\tfmt.Printf(\"reading: %d of %d.\\n\", *mCn.readCount, len(mockConnOutpBytes))\n\tif *mCn.readCount < len(mockConnOutpBytes) {\n\t\tcopy(b, mockConnOutpBytes[*mCn.readCount])\n\t\t*mCn.readCount = *mCn.readCount + 1\n\t}\n\treturn len(b), nil\n}",
"func (serv *Server) pollReader() {\n\tvar (\n\t\tlogp = `pollReader`\n\n\t\tlistConn []int\n\t\terr error\n\t\tnumReader int32\n\t\tconn int\n\t)\n\n\tfor {\n\t\tlistConn, err = serv.poll.WaitRead()\n\t\tif err != nil {\n\t\t\tlog.Printf(`%s: %s`, logp, err)\n\t\t\tbreak\n\t\t}\n\n\t\tfor _, conn = range listConn {\n\t\t\tselect {\n\t\t\tcase serv.qreader <- conn:\n\t\t\tdefault:\n\t\t\t\tnumReader = serv.numGoReader.Load()\n\t\t\t\tif numReader < serv.Options.maxGoroutineReader {\n\t\t\t\t\tgo serv.reader()\n\t\t\t\t\tserv.numGoReader.Add(1)\n\t\t\t\t\tserv.qreader <- conn\n\t\t\t\t} else {\n\t\t\t\t\tgo serv.delayReader(conn)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}",
"func TcpReadFixedSize(ctx context.Context, conn net.Conn, m int, timeout time.Duration) ([]byte, error) {\n\tctx, _ = context.WithTimeout(ctx, timeout)\n\tc := make(chan error, 1)\n\tvar messageBuffer bytes.Buffer\n\tgo func() {\n\t\tvar err error\n\t\tdefer func() { c <- err }()\n\t\tfor m > 0 {\n\t\t\ttmpBuffer := make([]byte, m)\n\t\t\tn, err := conn.Read(tmpBuffer)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tm = m - n\n\t\t\tif _, err = messageBuffer.Write(tmpBuffer); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\treturn\n\t}()\n\tselect {\n\tcase <-ctx.Done():\n\t\tclose(c)\n\t\treturn messageBuffer.Bytes(), oops.Wrapf(ctx.Err(), \"ctx\")\n\tcase err := <-c:\n\t\treturn messageBuffer.Bytes(), oops.Wrapf(err, \"read\")\n\t}\n}",
"func (c *Conn) Read(b []byte) (n int, err error) {\n\tif len(b) == 0 {\n\t\treturn 0, nil\n\t}\n\terr = tryAgain\n\tfor err == tryAgain {\n\t\tn, errcb := c.read(b)\n\t\terr = c.handleError(errcb)\n\t\tif err == nil {\n\t\t\tgo c.flushOutputBuffer()\n\t\t\treturn n, nil\n\t\t}\n\t\tif err == io.ErrUnexpectedEOF {\n\t\t\terr = io.EOF\n\t\t}\n\t}\n\treturn 0, err\n}",
"func (s *Sniffer) Recv(t *testing.T, timeout time.Duration) []byte {\n\tt.Helper()\n\n\tdeadline := time.Now().Add(timeout)\n\tfor {\n\t\ttimeout = time.Until(deadline)\n\t\tif timeout <= 0 {\n\t\t\treturn nil\n\t\t}\n\t\tusec := timeout.Microseconds()\n\t\tif usec == 0 {\n\t\t\t// Timeout is less than a microsecond; set usec to 1 to avoid\n\t\t\t// blocking indefinitely.\n\t\t\tusec = 1\n\t\t}\n\t\tconst microsInOne = 1e6\n\t\ttv := unix.Timeval{\n\t\t\tSec: usec / microsInOne,\n\t\t\tUsec: usec % microsInOne,\n\t\t}\n\t\tif err := unix.SetsockoptTimeval(s.fd, unix.SOL_SOCKET, unix.SO_RCVTIMEO, &tv); err != nil {\n\t\t\tt.Fatalf(\"can't setsockopt SO_RCVTIMEO: %s\", err)\n\t\t}\n\n\t\tbuf := make([]byte, maxReadSize)\n\t\tnread, _, err := unix.Recvfrom(s.fd, buf, unix.MSG_TRUNC)\n\t\tif err == unix.EINTR || err == unix.EAGAIN {\n\t\t\t// There was a timeout.\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"can't read: %s\", err)\n\t\t}\n\t\tif nread > maxReadSize {\n\t\t\tt.Fatalf(\"received a truncated frame of %d bytes, want at most %d bytes\", nread, maxReadSize)\n\t\t}\n\t\treturn buf[:nread]\n\t}\n}"
] | [
"0.6698476",
"0.592675",
"0.5846762",
"0.58344626",
"0.57960194",
"0.5787837",
"0.5778206",
"0.5766628",
"0.57591337",
"0.5743206",
"0.5741069",
"0.57359076",
"0.5708238",
"0.56657344",
"0.56469196",
"0.56459975",
"0.56374776",
"0.56374776",
"0.5634473",
"0.56332356",
"0.5598442",
"0.55778104",
"0.5576698",
"0.55350846",
"0.5504047",
"0.5490909",
"0.5468817",
"0.5459477",
"0.543513",
"0.5430435",
"0.54167455",
"0.5409444",
"0.53441507",
"0.5339437",
"0.5292262",
"0.5286134",
"0.52626044",
"0.52607167",
"0.5258025",
"0.5201717",
"0.5201214",
"0.51949525",
"0.51899344",
"0.51798725",
"0.5152308",
"0.51435053",
"0.51345176",
"0.5132058",
"0.513093",
"0.51174295",
"0.51123995",
"0.5110346",
"0.5109076",
"0.5105851",
"0.5103035",
"0.5100584",
"0.5090922",
"0.50816554",
"0.50786185",
"0.5074057",
"0.50723016",
"0.50651574",
"0.50587",
"0.505755",
"0.505755",
"0.505679",
"0.5049901",
"0.5048456",
"0.50402915",
"0.50388795",
"0.5038756",
"0.5033854",
"0.5029499",
"0.50175303",
"0.50116235",
"0.5009413",
"0.49965423",
"0.49919522",
"0.49886286",
"0.4988432",
"0.49859938",
"0.49845046",
"0.4981513",
"0.49756",
"0.49729258",
"0.49727002",
"0.49706835",
"0.49681902",
"0.4964",
"0.4961329",
"0.49457538",
"0.49410132",
"0.49395964",
"0.49279583",
"0.49253908",
"0.49213967",
"0.49156803",
"0.4906001",
"0.490579",
"0.49048194"
] | 0.81593716 | 0 |
HasPrefixAny determines if any of the string values have the given prefix. | func HasPrefixAny(prefix string, values []string) bool {
for _, val := range values {
if strings.HasPrefix(val, prefix) {
return true
}
}
return false
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func StartsWithAny(str string, prefixes ...string) bool {\n\tfor _, prefix := range prefixes {\n\t\tif internalStartsWith(str, (string)(prefix), false) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func HasAnyPrefix(s string, prefixList []string) bool {\n\tfor _, prefix := range prefixList {\n\t\tif strings.HasPrefix(s, prefix) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func HasAnyPrefix(text string, slice []string) bool {\n\tfor _, s := range slice {\n\t\tif strings.HasPrefix(text, s) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func StartsWithAnyIgnoreCase(str string, prefixes ...string) bool {\n\tfor _, prefix := range prefixes {\n\t\tif internalStartsWith(str, (string)(prefix), true) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func HasPrefixAnyI(s string, prefixes ...string) bool {\n\tfor _, prefix := range prefixes {\n\t\tif stringsutil.HasPrefixI(s, prefix) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func StringsHasPrefix(s []string, p string) bool {\n\tfor _, x := range s {\n\t\tif !strings.HasPrefix(x, p) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func HasPrefix(s string, p ...string) bool {\n\tfor _, i := range p {\n\t\tif strings.HasPrefix(s, i) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func HasPrefix(s string, prefixes ...string) bool {\n\tfor _, p := range prefixes {\n\t\tif strings.HasPrefix(s, p) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func (s StringSet) IncludesAny(values []string) bool {\n\tfor _, v := range values {\n\t\tif _, ok := s[v]; ok {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func HasOneOfPrefixesFold(str string, prefixes ...string) bool {\n\tfor _, pre := range prefixes {\n\t\tif HasPrefixFold(str, pre) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func HasPrefix(prefix, operand string) bool { return strings.HasPrefix(operand, prefix) }",
"func PrefixInList(list []string, prefix string) bool {\n\tfor _, s := range list {\n\t\tif strings.HasPrefix(s, prefix) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func AnyPrefixMatcher(strs ...string) MatcherFunc {\n\ttree := ternary_search_tree.New(strs...)\n\treturn func(_ io.Writer, r io.Reader) bool {\n\t\tbuf := make([]byte, tree.Depth())\n\t\tn, _ := io.ReadFull(r, buf)\n\t\t_, _, ok := tree.Follow(string(buf[:n]))\n\t\treturn ok\n\t}\n}",
"func IncludesAnyStr(needles []string, haystack []string) bool {\n\tfor _, needle := range needles {\n\t\tif ok, _ := InArray(needle, haystack); ok {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func (this *Trie) StartsWith(prefix string) bool {\n\tfor k, ws := range this.words {\n\t\tif k < len(prefix) {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, w := range ws {\n\t\t\tif strings.HasPrefix(w, prefix) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}",
"func ContainsAny(str string, search ...string) bool {\n\tfor _, s := range search {\n\t\tif Contains(str, (string)(s)) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func StringValHasPrefix(v string) predicate.Property {\n\treturn predicate.Property(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldStringVal), v))\n\t})\n}",
"func Contains(s, substr string) bool {\n\tfor i := range s {\n\t\tif HasPrefix(s[i:], substr) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func StringsSharePrefix(s []string) bool {\n\tsort.Strings(s)\n\n\tfor i := 0; i < len(s)-1; i++ {\n\t\tif strings.HasPrefix(s[i+1], s[i]) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func (this *Trie) StartsWith(prefix string) bool {\n\tfor i := 0; i < len(prefix); i++ {\n\t\tif this.son[prefix[i]-'a'] == nil {\n\t\t\treturn false\n\t\t}\n\t\tthis = this.son[prefix[i]-'a']\n\t}\n\treturn true\n}",
"func PrefixMatch(key string) (res []interface{}) {\n\tglobalStore.RLock()\n\tdefer globalStore.RUnlock()\n\n\tfor k, v := range globalStore.store {\n\t\tif strings.HasPrefix(k, key) {\n\t\t\tres = append(res, v)\n\t\t}\n\t}\n\n\treturn\n}",
"func StringHasPrefix(column string, prefix string, opts ...Option) *sql.Predicate {\n\treturn sql.P(func(b *sql.Builder) {\n\t\topts = append([]Option{Unquote(true)}, opts...)\n\t\tvaluePath(b, column, opts...)\n\t\tb.Join(sql.HasPrefix(\"\", prefix))\n\t})\n}",
"func (this *Trie) StartsWith(prefix string) bool {\n\treturn this.dict[prefix] || this.dictPrefix[prefix]\n}",
"func HasPrefix(s, prefix string) bool {\n\tif len(s) < len(prefix) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(prefix); i++ {\n\t\tif s[i] != prefix[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func Contains(s, substring string) bool {\n\tfor i := 0; i < len(s); i++ {\n\t\tif HasPrefix(s[i:], substring) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func hasPrefixDemo(a string, b string) bool {\n\treturn strings.HasPrefix(a, b)\n}",
"func (this *Trie) StartsWith(prefix string) bool {\n\tt := this\n\tfor i := range prefix {\n\t\tif t.trie == nil {\n\t\t\treturn false\n\t\t}\n\t\tif !t.trie[prefix[i]-'a'].exist {\n\t\t\treturn false\n\t\t}\n\t\tt = &t.trie[prefix[i]-'a'].trie\n\t}\n\treturn true\n}",
"func MixedStringHasPrefix(v string) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldMixedString), v))\n\t})\n}",
"func (this *Trie) StartsWith(prefix string) bool {\n for _,v :=range prefix{\n if this.name[v-'a'] == nil{\n return false\n }\n \n this = this.name[v-'a']\n }\n return true\n}",
"func containsPathPrefix(pats []string, s string) bool {\n\tfor _, pat := range pats {\n\t\tif pat == s || strings.HasPrefix(s, pat+\"/\") {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (this *Trie) StartsWith(prefix string) bool {\n\tif prefix == \"\" {\n\t\treturn false\n\t}\n\thead := this\n\tfor e := range prefix {\n\t\tif head.data[prefix[e]-'a'] == nil {\n\t\t\treturn false\n\t\t}\n\t\thead = head.data[prefix[e]-'a']\n\t}\n\treturn true\n}",
"func MixedStringHasPrefix(v string) predicate.User {\n\treturn predicate.User(sql.FieldHasPrefix(FieldMixedString, v))\n}",
"func HasPrefixFold(str, prefix string) bool {\n\treturn len(str) >= len(prefix) && strings.EqualFold(str[0:len(prefix)], prefix)\n}",
"func StartsWith(str, prefix string) bool {\n\treturn strings.HasPrefix(str, prefix)\n}",
"func hasPrefix(s, prefix string) bool {\n\treturn len(prefix) <= len(s) && s[:len(prefix)] == prefix\n}",
"func MatchPrefix(prefixes ...string) MatcherFunc { return MatchPrefixes(prefixes) }",
"func (cs *CStore) AnyContains(needle string) bool {\n\tfor key := range cs.store {\n\t\tif strings.Contains(key, needle) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func (this *Trie) StartsWith(prefix string) bool {\n\tcur := this.root\n\n\t// go through prefix\n\tfor _, c := range prefix {\n\t\t// check if in children\n\t\tif child, ok := cur.children[c]; ok {\n\t\t\t// set cur\n\t\t\tcur = child\n\t\t} else {\n\t\t\treturn false\n\t\t}\n\t}\n\n\t// no probs\n\treturn true\n}",
"func (s *Stringish) HasPrefix(prefix string) bool {\n\treturn strings.HasPrefix(s.str, prefix)\n}",
"func HasPrefix(prefix string) MatchFunc {\n\treturn func(s string) bool { return strings.HasPrefix(s, prefix) }\n}",
"func (this *Trie) StartsWith(prefix string) bool {\n\tcurr := this\n\tfor _, c := range prefix {\n\t\tif curr.next[c-'a'] == nil {\n\t\t\treturn false\n\t\t}\n\t\tcurr = curr.next[c-'a']\n\t}\n\n\treturn true\n}",
"func AnyValueInStringSlice(Slice1, Slice2 []string) bool {\n\tif len(Slice1) == 0 || len(Slice2) == 0 {\n\t\treturn false\n\t}\n\tfor _, x := range Slice1 {\n\t\tif IsValueInStringSlice(x, Slice2) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func StringContainsAny(str string, subStrings []string) bool {\n\tfor _, subString := range subStrings {\n\t\tif strings.Contains(str, subString) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (t *Trie) StartsWith(prefix string) bool {\n\tp := t.root\n\twordArr := []rune(prefix)\n\n\tfor i := 0; i < len(wordArr); i++ {\n\t\tif p.edges[wordArr[i]-'a'] == nil {\n\t\t\treturn false\n\t\t} else {\n\t\t\tp = p.edges[wordArr[i]-'a']\n\t\t}\n\t}\n\treturn true\n}",
"func (this *Trie) StartsWith(prefix string) bool {\n\troot := this\n\tfor _, chartV := range prefix {\n\t\tnext, ok := root.next[string(chartV)]\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\t\troot = next\n\t}\n\treturn true\n}",
"func (t *Trie) StartsWith(prefix string) bool {\n\tcur := t.Root\n\tfor _, c := range prefix {\n\t\t_, ok := cur.Next[c]\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\n\t\tcur = cur.Next[c]\n\t}\n\n\treturn true\n}",
"func (this *Trie) StartsWith(prefix string) bool {\n\tnode := this\n\tn := len(prefix)\n\tfor i := 0; i < n; i++ {\n\t\tidx := prefix[i] - 'a'\n\t\tif node.sons[idx] == nil {\n\t\t\treturn false\n\t\t}\n\t\tnode = node.sons[idx]\n\t}\n\treturn true\n}",
"func hasPrefix(s, prefix string) bool {\n\treturn len(s) >= len(prefix) && s[:len(prefix)] == prefix\n}",
"func (this *Trie) StartsWith(prefix string) bool {\n\tcur := this.Root\n\tfor _, c := range prefix {\n\t\tif _, ok := cur.Child[c]; !ok {\n\t\t\treturn false\n\t\t}\n\t\tcur = cur.Child[c]\n\t}\n\treturn true\n}",
"func prependIfMissing(str string, prefix string, ignoreCase bool, prefixes ...string) string {\n if IsEmpty(prefix) || internalStartsWith(str, prefix, ignoreCase) {\n\t\treturn str\n\t}\n\tfor _, pref := range prefixes {\n\t\tif pref == \"\" || internalStartsWith(str, pref, ignoreCase) {\n\t\t\treturn str\n\t\t}\n\t}\n\treturn prefix + str\n}",
"func SocialPayloadHasPrefix(v string) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldSocialPayload), v))\n\t})\n}",
"func HasPrefix(s, prefix string) bool {\n\treturn len(s) >= len(prefix) && s[:len(prefix)] == prefix\n}",
"func HasPrefix(s, prefix string) bool {\n\treturn len(s) >= len(prefix) && s[:len(prefix)] == prefix\n}",
"func HasPrefix(s, prefix string) bool {\n\treturn len(s) >= len(prefix) && s[:len(prefix)] == prefix\n}",
"func (this *Trie) StartsWith(prefix string) bool {\n\tn := this.root\n\n\tfor i := 0; i < len(prefix); i++ {\n\t\twid := prefix[i] - 'a'\n\t\tif n.children[wid] == nil {\n\t\t\treturn false\n\t\t}\n\t\tn = n.children[wid]\n\t}\n\n\treturn true\n}",
"func AnySatisfies(pred StringPredicate, slice []string) bool {\n\tfor _, sliceString := range slice {\n\t\tif pred(sliceString) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func internalStartsWith(str string, prefix string, ignoreCase bool) bool {\n\tif str == \"\" || prefix == \"\" {\n\t\treturn (str == \"\" && prefix == \"\")\n\t}\n\tif utf8.RuneCountInString(prefix) > utf8.RuneCountInString(str) {\n\t\treturn false\n\t}\n\tif ignoreCase {\n\t\treturn strings.HasPrefix(strings.ToLower(str), strings.ToLower(prefix))\n\t}\n\treturn strings.HasPrefix(str, prefix)\n}",
"func (this *Trie) StartsWith(prefix string) bool {\n\tnode := this\n\tfor _, v := range prefix {\n\t\tif node = node.next[v-'a']; node == nil {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func HasAllowedImageAsPrefix(str string, imageList []string) bool {\n\tfor _, imagePrefix := range imageList {\n\t\tif strings.HasPrefix(str, imagePrefix) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func ListHasPrefix(list, prefix []string) bool {\n\tif len(prefix) == 0 {\n\t\treturn false\n\t}\n\tif len(prefix) > len(list) {\n\t\treturn false\n\t}\n\treturn ListEquals(list[:len(prefix)], prefix)\n}",
"func (this *Trie) StartsWith(prefix string) bool {\n\ttemp := this\n\tfor _, v := range prefix {\n\t\tnxt := v - 'a'\n\t\tif temp.next[nxt] == nil {\n\t\t\treturn false\n\t\t} else {\n\t\t\ttemp = temp.next[nxt]\n\t\t}\n\t}\n\treturn true\n}",
"func StartsWith(str string, prefix string) bool {\n\treturn internalStartsWith(str, prefix, false)\n}",
"func ContainsAny(text string, slice []string) bool {\n\tfor _, s := range slice {\n\t\tif strings.Contains(text, s) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (t *Trie) StartsWith(prefix string) bool {\n\ttmp := t\n\tfor _, c := range prefix {\n\t\tif l, valid := tmp.links[string(c)]; valid {\n\t\t\ttmp = l\n\t\t} else {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func MatchPrefixes(prefixes []string) MatcherFunc {\n\treturn func(el Elem) bool {\n\t\tfor _, pfx := range prefixes {\n\t\t\tif strings.HasPrefix(el.Name(), pfx) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n}",
"func startsWithFunc(a, b string) bool {\n\treturn strings.HasPrefix(a, b)\n}",
"func StreetHasPrefix(v string) predicate.Delivery {\n\treturn predicate.Delivery(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldStreet), v))\n\t})\n}",
"func startsWith(arr []string, parts ...string) bool {\n\tif len(arr) < len(parts) {\n\t\treturn false\n\t}\n\tfor i, p := range parts {\n\t\tif arr[i] != p {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (this *Trie) StartsWith(prefix string) bool {\n\tcur := this\n\tfor i := 0; i < len(prefix); i++ {\n\t\tb := prefix[i]\n\t\tif cur.next[b-97] == nil {\n\t\t\treturn false\n\t\t}\n\t\tcur = cur.next[b-97]\n\t}\n\treturn cur != nil\n}",
"func SliceContainsAny(haystack []string, needles ...string) bool {\n\tfor _, a := range haystack {\n\t\tfor _, needle := range needles {\n\t\t\tif a == needle {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}",
"func (req *ServerHTTPRequest) HasQueryPrefix(prefix string) bool {\n\tsuccess := req.parseQueryValues()\n\tif !success {\n\t\treturn false\n\t}\n\n\tfor key := range req.queryValues {\n\t\tif strings.HasPrefix(key, prefix) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func (this *Trie) StartsWith(prefix string) bool {\n node := this.root\n for _, r := range prefix {\n child, existed := node.children[r]\n if !existed {\n return false\n }\n node = child\n }\n return true\n}",
"func (this *Trie) StartsWith(prefix string) bool {\n\ttrie := this\n\tfor _, char := range prefix {\n\t\tif trie.childs[char-97] == nil {\n\t\t\treturn false\n\t\t}\n\t\ttrie = trie.childs[char-97]\n\t}\n\treturn true\n}",
"func stringMatchAny(x string, y []string) bool {\n\tfor _, v := range y {\n\t\tif x == v {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (this *Trie) StartsWith(prefix string) bool {\n\tif len(prefix) == 0 {\n\t\treturn true\n\t}\n\tfor _, e := range this.edges {\n\t\tif e.char == prefix[0] {\n\t\t\treturn e.next.StartsWith(prefix[1:])\n\t\t}\n\t}\n\treturn false\n}",
"func (o BucketLifecycleRuleItemConditionResponseOutput) MatchesPrefix() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v BucketLifecycleRuleItemConditionResponse) []string { return v.MatchesPrefix }).(pulumi.StringArrayOutput)\n}",
"func ZipcodeHasPrefix(v string) predicate.Delivery {\n\treturn predicate.Delivery(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldZipcode), v))\n\t})\n}",
"func ZipcodeHasPrefix(v string) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldZipcode), v))\n\t})\n}",
"func anyAreEmpty(values ...string) bool {\n\tfor _, v := range values {\n\t\tif v == \"\" {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (this *Trie) StartsWith(prefix string) bool {\n node := this.searchPrefix(prefix)\n \n return node != nil\n}",
"func CountryHasPrefix(v string) predicate.Location {\n\treturn predicate.Location(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldCountry), v))\n\t})\n}",
"func TestAnyString(t *testing.T) {\n\tt.Parallel()\n\tvar tests = []struct {\n\t\ts []string\n\t\texpected bool\n\t}{\n\t\t{[]string{\"foo\", \"\\u0062\\u0061\\u0072\", \"baz\"}, true},\n\t\t{[]string{\"boo\", \"bar\", \"baz\"}, false},\n\t\t{[]string{\"foo\", \"far\", \"baz\"}, true},\n\t}\n\tfor _, test := range tests {\n\t\tactual := primitives.AnyString(test.s, func(s string) bool {\n\t\t\treturn strings.HasPrefix(s, \"f\")\n\t\t})\n\t\tassert.Equal(t, test.expected, actual, \"expected value '%v' | actual : '%v'\", test.expected, actual)\n\t}\n}",
"func (m URLPrefixMap) Contains(uri *url.URL) bool {\n\ts := strings.TrimPrefix(uri.Host, \"www.\")\n\tif _, ok := m[s]; ok {\n\t\treturn true\n\t}\n\tfor _, p := range strings.Split(uri.Path, \"/\") {\n\t\tif p == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\ts = fmt.Sprintf(\"%s/%s\", s, p)\n\t\tif _, ok := m[s]; ok {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (t *Trie) StartsWith(prefix string) bool {\n\treturn t.searchPrefix(prefix) != nil\n}",
"func (a *Assertions) HasPrefix(corpus, prefix string, userMessageComponents ...interface{}) bool {\n\ta.assertion()\n\tif didFail, message := shouldHasPrefix(corpus, prefix); didFail {\n\t\treturn a.fail(message, userMessageComponents...)\n\t}\n\treturn true\n}",
"func StreetHasPrefix(v string) predicate.Location {\n\treturn predicate.Location(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldStreet), v))\n\t})\n}",
"func PostalcodeHasPrefix(v string) predicate.Location {\n\treturn predicate.Location(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldPostalcode), v))\n\t})\n}",
"func (dc *IPMap) checkPrefixAllMatches(sIP string) ([]ServiceID, bool) {\n\tservices := []ServiceID{}\n\tip := net.ParseIP(sIP)\n\tfound := false\n\tfor _, entry := range dc.prefixes {\n\t\tif entry.prefix.Contains(ip) {\n\t\t\tservices = append(services, entry.services...)\n\t\t\tfound = true\n\t\t}\n\t}\n\treturn services, found\n}",
"func IsAnyEmpty(strings ...string) bool {\n\tfor _, s := range strings {\n\t\tif IsEmpty(s) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (this *Trie) StartsWith(prefix string) bool {\n\tif prefix == \"\" {\n\t\treturn true\n\t}\n\tif this == nil {\n\t\treturn false\n\t}\n\tindex := ([]byte(prefix[0:1]))[0] - byte('a')\n\tif this.child[index] == nil {\n\t\treturn false\n\t}\n\tif prefix[1:] == \"\" {\n\t\treturn true\n\t}\n\treturn this.child[index].StartsWith(prefix[1:])\n\n}",
"func (t *Trie) StartWith(prefix string) bool {\n\tcurr := t.Root\n\tfor _, char := range prefix {\n\t\tif _, ok := curr.Children[char]; !ok {\n\t\t\treturn false\n\t\t}\n\t\tcurr = curr.Children[char]\n\t}\n\treturn true\n}",
"func ContainsAtLeastOneString(haystack []string, needles ...string) bool {\n\t// Avoid allocations for a single check.\n\tif len(needles) == 1 {\n\t\tfor _, h := range haystack {\n\t\t\tif h == needles[0] {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\tchecks := make(map[string]struct{}, len(needles))\n\tfor _, n := range needles {\n\t\tchecks[n] = struct{}{}\n\t}\n\n\tfor _, h := range haystack {\n\t\t_, ok := checks[h]\n\t\tif ok {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func (this *Trie) StartsWith(prefix string) bool {\n\treturn this.SearchPrefix(prefix) != nil\n}",
"func FilterPrefix(stringSet sets.String, prefix string, ignoreCase bool) sets.String {\n\tif prefix == \"\" {\n\t\treturn stringSet\n\t}\n\treturn filterSet(stringSet, prefix, ignoreCase, strings.HasPrefix)\n}",
"func TaxIDHasPrefix(v string) predicate.Watchlist {\n\treturn predicate.Watchlist(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldTaxID), v))\n\t})\n}",
"func (o BucketLifecycleRuleItemConditionOutput) MatchesPrefix() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v BucketLifecycleRuleItemCondition) []string { return v.MatchesPrefix }).(pulumi.StringArrayOutput)\n}",
"func (this *Trie) StartsWith(prefix string) bool {\n\tbytes := []byte(prefix)\n\tif len(bytes) <= 0 {\n\t\treturn true\n\t}\n\tfor _, value := range bytes {\n\t\t//如果数据存在\n\t\tif _, ok := this.nexts[value]; !ok {\n\t\t\treturn false\n\t\t}\n\t\tthis = this.nexts[value]\n\t}\n\treturn true\n}",
"func SelectPrefixInStringSlice(prefix string, items []string) []string {\n\n\tl := len(prefix)\n\n\tvar results []string\n\n\t// iterate through the slice of items\n\tfor _, item := range items {\n\n\t\t// check the item length is geater than or equal to the prefix length\n\t\t// this ensures no out of bounds memory errors will occur\n\t\tif len(item) >= l {\n\t\t\tif prefix == item[:l] {\n\t\t\t\tresults = append(results, item)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn results\n}",
"func containsWildcardValue(flag []string) bool {\n\tfor _, value := range flag {\n\t\tif value == all {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func FirstNameHasPrefix(v string) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldFirstName), v))\n\t})\n}"
] | [
"0.8174585",
"0.79133207",
"0.7500345",
"0.738086",
"0.71756464",
"0.6655777",
"0.659458",
"0.6593437",
"0.6548416",
"0.6371666",
"0.63586235",
"0.6307001",
"0.6305777",
"0.6094398",
"0.60713154",
"0.59563047",
"0.5929936",
"0.5913401",
"0.58664",
"0.58584625",
"0.58547485",
"0.583861",
"0.5823773",
"0.58182263",
"0.5808574",
"0.58025265",
"0.5798194",
"0.57887095",
"0.57716453",
"0.5763238",
"0.5736851",
"0.5735318",
"0.57224435",
"0.5720496",
"0.57106954",
"0.5709657",
"0.5702433",
"0.5692912",
"0.56908226",
"0.56868607",
"0.56796604",
"0.5674447",
"0.56711376",
"0.5663395",
"0.5662449",
"0.5651354",
"0.56510353",
"0.5650179",
"0.56247807",
"0.5621473",
"0.56005555",
"0.5572845",
"0.5572845",
"0.5572845",
"0.5572789",
"0.55708736",
"0.5566191",
"0.55597174",
"0.5552582",
"0.55392563",
"0.5534622",
"0.5522141",
"0.550648",
"0.5496988",
"0.548244",
"0.54611415",
"0.5459387",
"0.5456026",
"0.5452717",
"0.5436017",
"0.54194313",
"0.54187816",
"0.54114026",
"0.54112816",
"0.54085696",
"0.54052943",
"0.5402295",
"0.53974885",
"0.5390822",
"0.5383019",
"0.53817374",
"0.53621274",
"0.5361205",
"0.53465873",
"0.53336126",
"0.5330215",
"0.5328933",
"0.53286994",
"0.5315268",
"0.53122467",
"0.5306369",
"0.53039914",
"0.53021765",
"0.52985954",
"0.5294909",
"0.5284019",
"0.5281666",
"0.5278386",
"0.52772087",
"0.5271012"
] | 0.8484297 | 0 |
ByteCount converts a size in bytes to a humanreadable string. | func ByteCount(b int64) string {
const unit = 1000
if b < unit {
return fmt.Sprintf("%d B", b)
}
div, exp := int64(unit), 0
for n := b / unit; n >= unit; n /= unit {
div *= unit
exp++
}
return fmt.Sprintf("%.1f %cB",
float64(b)/float64(div), "kMGTPE"[exp])
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func Size(size int64) string {\n\tif size < 1024 {\n\t\treturn fmt.Sprintf(\"%d byte\", size)\n\t}\n\tif size < 1024*1024 {\n\t\treturn fmt.Sprintf(\"%d kB\", size/1024)\n\t}\n\tif size < 1024*1024*1024 {\n\t\treturn fmt.Sprintf(\"%d MB\", size/1024/1024)\n\t}\n\treturn fmt.Sprintf(\"%d GB\", size/1024/1024/1024)\n}",
"func BytesSize(size float64) string {\n\treturn units.CustomSize(\"%.2f%s\", size, 1024.0, binaryAbbrs)\n}",
"func ByteSize(bytes int) string {\n\tconst (\n\t\tBYTE = 1 << (10 * iota)\n\t\tKB\n\t\tMB\n\t\tGB\n\t\tTB\n\t\tPB\n\t\tEB\n\t)\n\tvar (\n\t\tu = \"\"\n\t\tv = float64(bytes)\n\t\tresult string\n\t)\n\tswitch {\n\tcase bytes >= EB:\n\t\tu = \"E\"\n\t\tv = v / EB\n\tcase bytes >= PB:\n\t\tu = \"P\"\n\t\tv = v / PB\n\tcase bytes >= TB:\n\t\tu = \"T\"\n\t\tv = v / TB\n\tcase bytes >= GB:\n\t\tu = \"G\"\n\t\tv = v / GB\n\tcase bytes >= MB:\n\t\tu = \"M\"\n\t\tv = v / MB\n\tcase bytes >= KB:\n\t\tu = \"K\"\n\t\tv = v / KB\n\tcase bytes >= BYTE:\n\t\tu = \"B\"\n\tcase bytes == 0:\n\t\treturn \"0B\"\n\t}\n\tresult = strconv.FormatFloat(v, 'f', 1, 64)\n\tresult = strings.TrimSuffix(result, \".0\")\n\treturn result + u\n}",
"func ByteSize(b float64) string {\n\tvar (\n\t\tunit string\n\t\tdel float64 = 1\n\t)\n\n\tswitch {\n\tcase b >= YB:\n\t\tunit = \"Y\"\n\t\tdel = YB\n\tcase b >= ZB:\n\t\tunit = \"Z\"\n\t\tdel = ZB\n\tcase b >= EB:\n\t\tunit = \"E\"\n\t\tdel = EB\n\tcase b >= PB:\n\t\tunit = \"P\"\n\t\tdel = PB\n\tcase b >= TB:\n\t\tunit = \"T\"\n\t\tdel = TB\n\tcase b >= GB:\n\t\tunit = \"G\"\n\t\tdel = GB\n\tcase b >= MB:\n\t\tunit = \"M\"\n\t\tdel = MB\n\tcase b >= KB:\n\t\tunit = \"K\"\n\t\tdel = KB\n\tcase b == 0:\n\t\treturn \"0\"\n\tdefault:\n\t\tunit = \"B\"\n\t}\n\treturn strings.TrimSuffix(\n\t\tstrconv.FormatFloat(b/del, 'f', 1, 32),\n\t\t\".0\",\n\t) + unit\n}",
"func ByteSize(bytes uint64) string {\n\tunit := \"\"\n\tvalue := float32(bytes)\n\n\tswitch {\n\tcase bytes >= TERABYTE:\n\t\tunit = \"T\"\n\t\tvalue = value / TERABYTE\n\tcase bytes >= GIGABYTE:\n\t\tunit = \"G\"\n\t\tvalue = value / GIGABYTE\n\tcase bytes >= MEGABYTE:\n\t\tunit = \"M\"\n\t\tvalue = value / MEGABYTE\n\tcase bytes >= KILOBYTE:\n\t\tunit = \"K\"\n\t\tvalue = value / KILOBYTE\n\tcase bytes >= BYTE:\n\t\tunit = \"B\"\n\tcase bytes == 0:\n\t\treturn \"0\"\n\t}\n\n\tstringValue := fmt.Sprintf(\"%.1f\", value)\n\tstringValue = strings.TrimSuffix(stringValue, \".0\")\n\treturn fmt.Sprintf(\"%s%s\", stringValue, unit)\n}",
"func ByteSize(bytes uint64) string {\n\tunit := \"\"\n\tvalue := float32(bytes)\n\n\tswitch {\n\tcase bytes >= TERABYTE:\n\t\tunit = \"T\"\n\t\tvalue = value / TERABYTE\n\tcase bytes >= GIGABYTE:\n\t\tunit = \"G\"\n\t\tvalue = value / GIGABYTE\n\tcase bytes >= MEGABYTE:\n\t\tunit = \"M\"\n\t\tvalue = value / MEGABYTE\n\tcase bytes >= KILOBYTE:\n\t\tunit = \"K\"\n\t\tvalue = value / KILOBYTE\n\tcase bytes >= BYTE:\n\t\tunit = \"B\"\n\tcase bytes == 0:\n\t\treturn \"0\"\n\t}\n\n\tstringValue := fmt.Sprintf(\"%.1f\", value)\n\tstringValue = strings.TrimSuffix(stringValue, \".0\")\n\treturn fmt.Sprintf(\"%s%s\", stringValue, unit)\n}",
"func ByteSizeString(sizeInBytes int64, useSI bool) string {\n\tn := int64(1024)\n\tif useSI {\n\t\tn = 1000\n\t}\n\tif sizeInBytes > -n && sizeInBytes < n {\n\t\treturn fmt.Sprintf(\"%d B\", sizeInBytes)\n\t}\n\tif !useSI && sizeInBytes == math.MinInt64 {\n\t\treturn \"-7.9 EiB\"\n\t}\n\tneg := sizeInBytes < 0\n\tvar ret string\n\tif neg {\n\t\tsizeInBytes = -sizeInBytes\n\t\tret = \"-\"\n\t}\n\tfor _, group := range []struct {\n\t\tunit string\n\t\tscale int64\n\t}{ // binary: SI:\n\t\t{\"E\", n * n * n * n * n * n}, // exabyte exbibyte\n\t\t{\"P\", n * n * n * n * n}, // petabyte pebibyte\n\t\t{\"T\", n * n * n * n}, // terabyte tebibyte\n\t\t{\"G\", n * n * n}, // gigabyte gibibyte\n\t\t{\"M\", n * n}, // megabyte mebibyte\n\t\t{\"K\", n}, // kilobyte kibibyte\n\t} {\n\t\tif sizeInBytes < group.scale {\n\t\t\tcontinue\n\t\t}\n\t\t// because Sprintf() rounds numbers up, cut at 1dp before calling it\n\t\t// (use either regular arithmetic or math.BigInt if size is very large)\n\t\tvar cut float64\n\t\tif sizeInBytes < math.MaxInt64/1024 {\n\t\t\tcut = float64(sizeInBytes) / float64(group.scale)\n\t\t\tcut = float64(int64(cut*10)) / 10\n\t\t} else {\n\t\t\tsz := big.NewInt(sizeInBytes)\n\t\t\tsz.Mul(sz, big.NewInt(10))\n\t\t\tsz.Div(sz, big.NewInt(group.scale))\n\t\t\tcut = float64(sz.Int64()) / 10\n\t\t}\n\t\tret += fmt.Sprintf(\"%0.1f\", cut)\n\t\t//\n\t\t// remove trailing zero decimal\n\t\tif strings.HasSuffix(ret, \".0\") {\n\t\t\tret = ret[:len(ret)-2]\n\t\t}\n\t\t// append SI or binary units\n\t\tret += \" \" + group.unit\n\t\tif useSI {\n\t\t\tret += \"B\"\n\t\t} else {\n\t\t\tret += \"iB\"\n\t\t}\n\t\tbreak\n\t}\n\treturn ret\n}",
"func sizeString(size int64) string {\n\tsizeFloat := float64(size)\n\tfor i, unit := range sizeUnits {\n\t\tbase := math.Pow(1024, float64(i))\n\t\tif sizeFloat < base*1024 {\n\t\t\tvar sizeStr string\n\t\t\tif i == 0 {\n\t\t\t\tsizeStr = strconv.FormatInt(size, 10)\n\t\t\t} else {\n\t\t\t\tvalue := sizeFloat / base\n\t\t\t\tif value < 1000 {\n\t\t\t\t\tsizeStr = fmt.Sprintf(\"%.2f\", value)\n\t\t\t\t} else {\n\t\t\t\t\tsizeStr = fmt.Sprintf(\"%.1f\", value)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn SizeColor[unit] + pad.Left(sizeStr, 6, \" \") + unit + \" \" + Reset\n\t\t}\n\t}\n\treturn strconv.Itoa(int(size))\n}",
"func (s Size) String() string {\n\tt := uint64(s)\n\tswitch {\n\tcase t < 1<<10:\n\t\treturn fmt.Sprintf(\"%d B\", t)\n\tcase t < 1<<20:\n\t\treturn fmt.Sprintf(\"%.1f KiB\", float64(t)/float64(1<<10))\n\tcase t < 1<<30:\n\t\treturn fmt.Sprintf(\"%.1f MiB\", float64(t)/float64(1<<20))\n\tdefault:\n\t\treturn fmt.Sprintf(\"%.1f GiB\", float64(t)/float64(1<<30))\n\t}\n}",
"func (o BackupOutput) SizeBytes() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Backup) pulumi.StringOutput { return v.SizeBytes }).(pulumi.StringOutput)\n}",
"func ConvertBytesToSizeString(b int64) string {\n\tconst unit = 1000\n\n\tif b < unit {\n\t\treturn fmt.Sprintf(\"%d B\", b)\n\t}\n\n\tdiv, exp := int64(unit), 0\n\tfor n := b / unit; n >= unit; n /= unit {\n\t\tdiv *= unit\n\t\texp++\n\t}\n\n\treturn fmt.Sprintf(\"%.1f %cB\",\n\t\tfloat64(b)/float64(div), \"kMGTPE\"[exp])\n}",
"func Size(a int) string {\n\tswitch {\n\tcase a < 0:\n\t\treturn \"negative\"\n\tcase a == 0:\n\t\treturn \"zero\"\n\tcase a <= 100:\n\t\treturn \"small\"\n\tcase a > 100:\n\t\treturn \"big\"\n\n\t}\n\treturn \"unknow\"\n}",
"func (s SizeMessage) humanize() string {\n\tif s.showHumanized {\n\t\treturn strutil.HumanizeBytes(s.Size)\n\t}\n\treturn fmt.Sprintf(\"%d\", s.Size)\n}",
"func (s Size) String() string {\n\tswitch {\n\tcase s == 0:\n\t\treturn \"0B\"\n\tcase s%Petabyte == 0:\n\t\treturn format(s.Petabytes(), \"PB\")\n\tcase s >= Pebibyte:\n\t\treturn format(s.Pebibytes(), \"PiB\")\n\tcase s%Terabyte == 0:\n\t\treturn format(s.Terabytes(), \"TB\")\n\tcase s >= Tebibyte:\n\t\treturn format(s.Tebibytes(), \"TiB\")\n\tcase s%Gigabyte == 0:\n\t\treturn format(s.Gigabytes(), \"GB\")\n\tcase s >= Gibibyte:\n\t\treturn format(s.Gibibytes(), \"GiB\")\n\tcase s%Megabyte == 0:\n\t\treturn format(s.Megabytes(), \"MB\")\n\tcase s >= Mebibyte:\n\t\treturn format(s.Mebibytes(), \"MiB\")\n\tcase s%Kilobyte == 0:\n\t\treturn format(s.Kilobytes(), \"kB\")\n\tcase s >= Kibibyte:\n\t\treturn format(s.Kibibytes(), \"KiB\")\n\t}\n\treturn fmt.Sprintf(\"%dB\", s)\n}",
"func (size *Size) String() string {\n\treturn size.Format(1, SizeScaleBinary)\n}",
"func SizeToHumanReadable(size int64) string {\n\tsuffixes := [...]string{\"B\", \"KiB\", \"MiB\", \"GiB\", \"TiB\", \"PiB\", \"EiB\", \"ZiB\", \"YiB\"}\n\tdsize := float64(size)\n\tvar resultingSuffix string\n\tfor _, suffix := range suffixes {\n\t\tresultingSuffix = suffix\n\t\tif dsize >= 1024 && suffix != \"YiB\" {\n\t\t\tdsize /= 1024\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn fmt.Sprintf(\"%-5.4g %-3s\", dsize, resultingSuffix)\n}",
"func BytesSize(bytes float64, format string, prec int) string {\n\n\tif bytes <= 0 {\n\t\treturn \"0\"\n\t}\n\n\t// Default format is decimal: MB, GB\n\tvalue := 1000.0\n\tresFormat := \"\"\n\n\t// Binary format: MiB, GiB\n\tif format == \"binary\" {\n\t\tvalue = 1024.0\n\t\tresFormat = \"i\"\n\t}\n\n\tif bytes < value {\n\t\tstrRes := strconv.FormatFloat(bytes, 'f', prec, 64)\n\t\treturn strings.TrimSuffix(strRes, \".0\") + \"B\"\n\t}\n\n\tdivider, exp := value, 0\n\tfor n := bytes / value; n >= value; n /= value {\n\t\tdivider *= value\n\t\texp++\n\t}\n\n\tstrRes := strconv.FormatFloat(bytes/divider, 'f', prec, 64)\n\tif prec == 0 {\n\t\t\tstrRes = strings.TrimSuffix(strRes, \".0\")\n\t}\n\n\treturn strRes + fmt.Sprintf(\"%c%sB\", \"KMGTPE\"[exp], resFormat)\n}",
"func toSize(size string) (int, error) {\n\tsize = strings.ToUpper(strings.TrimSpace(size))\n\tfirstLetter := strings.IndexFunc(size, unicode.IsLetter)\n\tif firstLetter == -1 {\n\t\tfirstLetter = len(size)\n\t}\n\n\tbytesString, multiple := size[:firstLetter], size[firstLetter:]\n\tsz, err := strconv.Atoi(bytesString)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"unable to parse size: %v\", err)\n\t}\n\n\tswitch multiple {\n\tcase \"M\", \"MB\", \"MIB\":\n\t\treturn sz * 1 << 20, nil\n\tcase \"K\", \"KB\", \"KIB\":\n\t\treturn sz * 1 << 10, nil\n\tcase \"B\", \"\":\n\t\treturn sz, nil\n\tdefault:\n\t\treturn 0, fmt.Errorf(\"unknown size suffix: %v\", multiple)\n\t}\n}",
"func HumanizeBytes(size uint64) string {\n\n\tsuffix := []string{\"B\", \"KB\", \"MB\", \"GB\"}\n\n\tf := float64(size)\n\tvar i int\n\tfor i = 0; f >= 1024 && i < len(suffix); i++ {\n\t\tf = f / 1024\n\t}\n\n\tif i == len(suffix) {\n\t\ti = i - 1\n\t}\n\n\treturn fmt.Sprintf(\"%.1f %s\", f, suffix[i])\n}",
"func HumanFileSize(size float32) string {\n\tvar byteString string\n\tif size >= GIGABYTE {\n\t\tsize /= GIGABYTE\n\t\tbyteString = \"G\"\n\t} else if size >= MEGABYTE {\n\t\tsize /= MEGABYTE\n\t\tbyteString = \"M\"\n\t} else if size >= KILOBYTE {\n\t\tsize /= KILOBYTE\n\t\tbyteString = \"K\"\n\t} else {\n\t\tbyteString = \"B\"\n\t}\n\toutput := fmt.Sprintf(\"%7.2f%2s\", size, byteString)\n\treturn output\n}",
"func ConvertSizeToBytes(s string) (string, error) {\n\ts = strings.TrimSpace(strings.ToLower(s))\n\n\t// spin until we find a match, if no match return original string\n\tfor _, k := range units {\n\t\tvar y int = lookupTable[k]\n\t\tif strings.HasSuffix(s, k) {\n\t\t\ts = s[:len(s)-len(k)]\n\t\t\ti, err := strconv.Atoi(s)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\ti = i * Pow(1024, y)\n\t\t\ts = strconv.Itoa(i)\n\t\t\treturn s, nil\n\t\t}\n\t}\n\n\treturn s, nil\n}",
"func StringWithSize(size int) string {\n\treturn stringWithSize(size, charset)\n}",
"func sizeStringer(s uint64, unit string) string {\n\tvar suffix string\n\tvar i int\n\tfor i, suffix = range []string{\"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\"} {\n\t\tif unit == suffix {\n\t\t\tbreak\n\t\t}\n\t\tmul := uint64(1) << ((uint64(i) + 1) * 10)\n\t\tif uint64(s) < mul {\n\t\t\tbreak\n\t\t}\n\t}\n\tif i != 0 {\n\t\tresVal := float64(s) / float64(uint64(1)<<(uint64(i)*10))\n\t\treturn fmt.Sprintf(\"%s%s\", strconv.FormatFloat(resVal, 'g', -1, 64), suffix)\n\t}\n\treturn fmt.Sprintf(\"%dB\", s)\n}",
"func humanSizeToBytes(input string) int64 {\n\tInfoLogV1(\"humanSizeToBytes input: %s\", input)\n\tinput = strings.ToUpper(input)\n\tswitch {\n\tcase strings.Contains(input, \"KB\"):\n\t\treturn int64(strToFloat(input, \"KB\") * KB)\n\tcase strings.Contains(input, \"MB\"):\n\t\treturn int64(strToFloat(input, \"MB\") * MB)\n\tcase strings.Contains(input, \"GB\"):\n\t\treturn int64(strToFloat(input, \"GB\") * GB)\n\tcase strings.Contains(input, \"TB\"):\n\t\treturn int64(strToFloat(input, \"TB\") * TB)\n\tcase strings.Contains(input, \"PB\"):\n\t\treturn int64(strToFloat(input, \"PB\") * PB)\n\tcase strings.Contains(input, \"EB\"):\n\t\treturn int64(strToFloat(input, \"EB\") * EB)\n\tdefault:\n\t\treturn int64(strToFloat(input, \"\"))\n\t}\n}",
"func (s SizeMessage) String() string {\n\tvar storageCls string\n\tif s.StorageClass != \"\" {\n\t\tstorageCls = fmt.Sprintf(\" [%s]\", s.StorageClass)\n\t}\n\treturn fmt.Sprintf(\n\t\t\"%s bytes in %d objects: %s%s\",\n\t\ts.humanize(),\n\t\ts.Count,\n\t\ts.Source,\n\t\tstorageCls,\n\t)\n}",
"func formatSize(sizeInBytes int64) string {\n\tsizeInMiB := float64(sizeInBytes) / (1024 * 1024)\n\treturn fmt.Sprintf(\"%.2f MiB\", sizeInMiB)\n}",
"func FormatSize(val int64) string {\n\tif val < 1000 {\n\t\treturn fmt.Sprint(val)\n\t}\n\treturn fmt.Sprint(gorivets.FormatInt64(val, 1000), \"(\", val, \")\")\n}",
"func ByteSized(size int64, precision int, sep string) string {\n\tf := float64(size)\n\ttpl := \"%.\" + strconv.Itoa(precision) + \"f\" + sep\n\n\tswitch {\n\tcase f >= yb:\n\t\treturn fmt.Sprintf(tpl+\"YB\", f/yb)\n\tcase f >= zb:\n\t\treturn fmt.Sprintf(tpl+\"ZB\", f/zb)\n\tcase f >= eb:\n\t\treturn fmt.Sprintf(tpl+\"EB\", f/eb)\n\tcase f >= pb:\n\t\treturn fmt.Sprintf(tpl+\"PB\", f/pb)\n\tcase f >= tb:\n\t\treturn fmt.Sprintf(tpl+\"TB\", f/tb)\n\tcase f >= gb:\n\t\treturn fmt.Sprintf(tpl+\"GB\", f/gb)\n\tcase f >= mb:\n\t\treturn fmt.Sprintf(tpl+\"MB\", f/mb)\n\tcase f >= kb:\n\t\treturn fmt.Sprintf(tpl+\"KB\", f/kb)\n\t}\n\treturn fmt.Sprintf(tpl+\"B\", f)\n}",
"func humanSize(size uint64) string {\n\ti := 0\n\tvar sizef float64\n\tsizef = float64(size)\n\tunits := []string{\"B\", \"kB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\", \"ZB\", \"YB\"}\n\tfor sizef >= 1000.0 {\n\t\tsizef = sizef / 1000.0\n\t\ti++\n\t}\n\treturn fmt.Sprintf(\"%.4g %s\", sizef, units[i])\n}",
"func ToByteSize(sizeStr string) (uint64, error) {\n\tsizeStr = strings.TrimSpace(sizeStr)\n\tlastPos := len(sizeStr) - 1\n\tif lastPos < 0 {\n\t\treturn 0, nil\n\t}\n\n\tif sizeStr[lastPos] == 'b' || sizeStr[lastPos] == 'B' {\n\t\t// last second char is k,m,g,t\n\t\tlastSec := sizeStr[lastPos-1]\n\t\tif lastSec > 'A' {\n\t\t\tlastPos--\n\t\t}\n\t} else if IsNumChar(sizeStr[lastPos]) { // not unit suffix. eg: 346\n\t\treturn strconv.ParseUint(sizeStr, 10, 32)\n\t}\n\n\tmultiplier := float64(1)\n\tswitch unicode.ToLower(rune(sizeStr[lastPos])) {\n\tcase 'k':\n\t\tmultiplier = 1 << 10\n\tcase 'm':\n\t\tmultiplier = 1 << 20\n\tcase 'g':\n\t\tmultiplier = 1 << 30\n\tcase 't':\n\t\tmultiplier = 1 << 40\n\tcase 'p':\n\t\tmultiplier = 1 << 50\n\tdefault: // b\n\t\tmultiplier = 1\n\t}\n\n\tsizeNum := strings.TrimSpace(sizeStr[:lastPos])\n\tsize, err := strconv.ParseFloat(sizeNum, 64)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn uint64(size * multiplier), nil\n}",
"func FileSize(s int64) string {\n\tsizes := []string{\"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\"}\n\treturn humanateBytes(uint64(s), 1024, sizes)\n}",
"func FileSize(s int64) string {\n\tsizes := []string{\"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\"}\n\treturn humanateBytes(uint64(s), 1024, sizes)\n}",
"func (s Size) Kibibytes() float64 { return float64(s) / float64(Kibibyte) }",
"func (s FileSystemSize) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func Size(file string) string {\n\treturn \"\"\n}",
"func ConvertSize(ssize string) (uint, error) {\n\tssize = strings.ToLower(strings.TrimSpace(ssize))\n\tparts := strings.Split(ssize, \" \")\n\n\tfsize, err := strconv.ParseFloat(parts[0], 64)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tswitch parts[1] {\n\tcase \"kb\":\n\t\tfsize *= convertKB\n\t\tbreak\n\tcase \"mb\":\n\t\tfsize *= convertMB\n\t\tbreak\n\tcase \"gb\":\n\t\tfsize *= convertGB\n\t\tbreak\n\t}\n\n\treturn uint(fsize), nil\n}",
"func humanizeBytes(s uint64) string {\n\tif s < 10 {\n\t\treturn fmt.Sprintf(\"%dB\", s)\n\t}\n\tconst base = 1000\n\tsizes := []string{\"B\", \"kB\", \"MB\", \"GB\", \"TB\"}\n\te := math.Floor(math.Log(float64(s)) / math.Log(base))\n\tsuffix := sizes[int(e)]\n\tval := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10\n\tf := \"%.0f%s\"\n\tif val < 10 {\n\t\tf = \"%.1f%s\"\n\t}\n\treturn fmt.Sprintf(f, val, suffix)\n}",
"func (s String) Size() int { return binary.Size(s) }",
"func FormatByte(s uint64) string {\n\treturn humanFormat(s, 1000, siSizes)\n}",
"func byteCountToHuman(b int64) string {\n\tconst unit = 1000\n\tif b < unit {\n\t\treturn fmt.Sprintf(\"%dB\", b)\n\t}\n\tdiv, exp := int64(unit), 0\n\tfor n := b / unit; n >= unit; n /= unit {\n\t\tdiv *= unit\n\t\texp++\n\t}\n\treturn fmt.Sprintf(\"%.1f%cB\", float64(b)/float64(div), \"kMGTPE\"[exp])\n}",
"func (c *ContainerContext) Size() string {\n\tif c.FieldsUsed == nil {\n\t\tc.FieldsUsed = map[string]interface{}{}\n\t}\n\tc.FieldsUsed[\"Size\"] = struct{}{}\n\tsrw := units.HumanSizeWithPrecision(float64(c.c.SizeRw), 3)\n\tsv := units.HumanSizeWithPrecision(float64(c.c.SizeRootFs), 3)\n\n\tsf := srw\n\tif c.c.SizeRootFs > 0 {\n\t\tsf = fmt.Sprintf(\"%s (virtual %s)\", srw, sv)\n\t}\n\treturn sf\n}",
"func FormatFileSize(sizeBytes int64) string {\n\tswitch {\n\tcase sizeBytes >= 1<<40:\n\t\treturn strconv.FormatInt(sizeBytes/Terabyte, 10) + \"tb\"\n\tcase sizeBytes >= 1<<30:\n\t\treturn strconv.FormatInt(sizeBytes/Gigabyte, 10) + \"gb\"\n\tcase sizeBytes >= 1<<20:\n\t\treturn strconv.FormatInt(sizeBytes/Megabyte, 10) + \"mb\"\n\tcase sizeBytes >= 1<<10:\n\t\treturn strconv.FormatInt(sizeBytes/Kilobyte, 10) + \"kb\"\n\t}\n\treturn strconv.FormatInt(sizeBytes, 10)\n}",
"func (d *Dataset) HumanSize() string {\n\treturn digital.FormatBytes(uint64(d.Size()))\n}",
"func HumanSize(size int64) string {\n\tunits := []string{\"bytes\", \"KB\", \"MB\", \"GB\"}\n\ts, i := float64(size), 0\n\tfor s >= 1024 && i < len(units)-1 {\n\t\ts /= 1024\n\t\ti++\n\t}\n\treturn fmt.Sprintf(\"%.2f %s\", s, units[i])\n}",
"func sizeInMb(size resource.Quantity) int64 {\n\tactualSize, _ := size.AsInt64()\n\tactualSize = actualSize / (1024 * 1024)\n\treturn actualSize\n}",
"func HumanSize(size int) string {\n\tsuffix := HumanSizeSuffix(size)\n\treturn fmt.Sprintf(\"%d %s\", size, suffix)\n}",
"func (o SkuOutput) Size() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v Sku) *string { return v.Size }).(pulumi.StringPtrOutput)\n}",
"func (o *StorageFlexFlashVirtualDrive) GetSize() string {\n\tif o == nil || o.Size == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Size\n}",
"func FileSize(filename string) string {\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\treturn \"Openfile error\"\n\t}\n\tdefer f.Close()\n\n\tif fi, err := f.Stat(); err == nil {\n\t\treturn Humanity(fi.Size())\n\t}\n\treturn \"file stat error\"\n}",
"func (s Size) Tebibytes() float64 { return float64(s) / float64(Tebibyte) }",
"func TransformSize(n uint64) string {\n\tswitch {\n\tcase n < 1024:\n\t\treturn strconv.FormatUint(n, 10) + \"B\"\n\tcase n < 1048576 && n > 1024:\n\t\ttmp := float64(n) / 1024\n\t\treturn strconv.FormatUint(uint64(math.Round(tmp)), 10) + \"KiB\"\n\tcase n < 1073741824:\n\t\ttmp := float64(n) / 1048576\n\t\treturn strconv.FormatUint(uint64(math.Round(tmp)), 10) + \"MiB\"\n\tdefault:\n\t\ttmp := float64(n) / 1073741824\n\t\treturn strconv.FormatUint(uint64(math.Round(tmp)), 10) + \"GiB\"\n\t}\n}",
"func formatSize(i int64) string {\n\tunits := \"=KMGTPEZY\"\n\tn := float64(i)\n\tfor n >= 1024 {\n\t\tn /= 1024\n\t\tunits = units[1:]\n\t}\n\tif units[0] == '=' {\n\t\treturn fmt.Sprintf(\"%dB\", int(n))\n\t} else {\n\t\treturn fmt.Sprintf(\"%0.1f%ciB\", n, units[0])\n\t}\n}",
"func FormatSize(size int64, unit SizeUnit, fractionDigits int) string {\n\tif unit == SizeUnitAuto {\n\t\tswitch {\n\t\tcase size < 1000:\n\t\t\tunit = SizeUnitByte\n\t\tcase size < 1000<<10:\n\t\t\tunit = SizeUnitKB\n\t\tcase size < 1024<<20:\n\t\t\tunit = SizeUnitMB\n\t\tcase size < 1024<<30:\n\t\t\tunit = SizeUnitGB\n\t\tcase size < 1024<<40:\n\t\t\tunit = SizeUnitTB\n\t\tcase size < 1024<<50:\n\t\t\tunit = SizeUnitPB\n\t\tdefault:\n\t\t\tunit = SizeUnitEB\n\t\t}\n\t}\n\n\tif unit == SizeUnitByte {\n\t\treturn fmt.Sprint(size, \" \"+SizeUnitByte)\n\t}\n\n\tvar divisor float64\n\tswitch unit {\n\tcase SizeUnitKB:\n\t\tdivisor = 1 << 10\n\tcase SizeUnitMB:\n\t\tdivisor = 1 << 20\n\tcase SizeUnitGB:\n\t\tdivisor = 1 << 30\n\tcase SizeUnitTB:\n\t\tdivisor = 1 << 40\n\tcase SizeUnitPB:\n\t\tdivisor = 1 << 50\n\tdefault:\n\t\tdivisor = 1 << 60\n\t}\n\n\treturn fmt.Sprintf(\"%.[1]*f %s\", fractionDigits, float64(size)/divisor, unit)\n}",
"func (o *StoragePhysicalDisk) GetSize() string {\n\tif o == nil || o.Size == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Size\n}",
"func (o *StoragePhysicalDiskAllOf) GetSize() string {\n\tif o == nil || o.Size == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Size\n}",
"func (o LookupImageResultOutput) ArchiveSizeBytes() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupImageResult) string { return v.ArchiveSizeBytes }).(pulumi.StringOutput)\n}",
"func (o *StorageFlexUtilVirtualDrive) GetSize() string {\n\tif o == nil || o.Size == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Size\n}",
"func (o DeploymentArtifactOutput) SizeBytes() pulumi.IntOutput {\n\treturn o.ApplyT(func(v *DeploymentArtifact) pulumi.IntOutput { return v.SizeBytes }).(pulumi.IntOutput)\n}",
"func PrettySize(nBytes int64) string {\n\tif nBytes < 1024 {\n\t\treturn fmt.Sprintf(\"%dB\", nBytes)\n\t}\n\n\tvar unit string\n\tvar sizeDbl float32 = float32(nBytes)\n\n\tif sizeDbl/1024 > 1 {\n\t\tunit = \"KB\"\n\t\tsizeDbl /= 1024\n\t}\n\tif sizeDbl/1024 > 1 {\n\t\tunit = \"MB\"\n\t\tsizeDbl /= 1024\n\t}\n\tif sizeDbl/1024 > 1 {\n\t\tunit = \"GB\"\n\t\tsizeDbl /= 1024\n\t}\n\tif sizeDbl/1024 > 1 {\n\t\tunit = \"TB\"\n\t\tsizeDbl /= 1024\n\t}\n\treturn fmt.Sprintf(\"%.2f%s\", sizeDbl, unit)\n\n}",
"func (sc QingStorageClass) FormatVolumeSizeByte(sizeByte int64) int64 {\n\tif sizeByte <= sc.GetMinSizeByte() {\n\t\treturn sc.GetMinSizeByte()\n\t} else if sizeByte > sc.GetMaxSizeByte() {\n\t\treturn sc.GetMaxSizeByte()\n\t}\n\tif sizeByte%sc.GetStepSizeByte() != 0 {\n\t\tsizeByte = (sizeByte/sc.GetStepSizeByte() + 1) * sc.GetStepSizeByte()\n\t}\n\tif sizeByte > sc.GetMaxSizeByte() {\n\t\treturn sc.GetMaxSizeByte()\n\t}\n\treturn sizeByte\n}",
"func UINT64ByteCountDecimal(b uint64) string {\n\tconst unit = 1024\n\tif b < unit {\n\t\treturn fmt.Sprintf(\"%d B\", b)\n\t}\n\tdiv, exp := uint64(unit), 0\n\tfor n := b / unit; n >= unit; n /= unit {\n\t\tdiv *= unit\n\t\texp++\n\t}\n\treturn fmt.Sprintf(\"%.1f %cB\", float64(b)/float64(div), \"kMGTPE\"[exp])\n}",
"func (s Size) Mebibytes() float64 { return float64(s) / float64(Mebibyte) }",
"func ParseByteCount(s string) (ByteCount, error) {\n\tvar v ByteCount\n\tif _, err := fmt.Sscanf(s, \"%s\", &v); err != nil {\n\t\treturn 0, fmt.Errorf(\"invalid byte count: %s: %w\", s, err)\n\t}\n\treturn v, nil\n}",
"func convertSizeToBytes(dataSize string) (float64, error) {\n\tvar size float64\n\n\tswitch {\n\tcase strings.HasSuffix(dataSize, \"Ti\"):\n\t\t_, err := fmt.Sscanf(dataSize, \"%fTi\", &size)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\treturn size * (1 << 40), nil\n\tcase strings.HasSuffix(dataSize, \"Gi\"):\n\t\t_, err := fmt.Sscanf(dataSize, \"%fGi\", &size)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\treturn size * (1 << 30), nil\n\tcase strings.HasSuffix(dataSize, \"Mi\"):\n\t\t_, err := fmt.Sscanf(dataSize, \"%fMi\", &size)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\treturn size * (1 << 20), nil\n\tcase strings.HasSuffix(dataSize, \"Ki\"):\n\t\t_, err := fmt.Sscanf(dataSize, \"%fKi\", &size)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\treturn size * (1 << 10), nil\n\tdefault:\n\t\t_, err := fmt.Sscanf(dataSize, \"%fB\", &size)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\treturn size, nil\n\n\t}\n}",
"func humanBytes(byteCount uint64) string {\n\tconst unit = 1024\n\tif byteCount < unit {\n\t\treturn fmt.Sprintf(\"%d B\", byteCount)\n\t}\n\n\tdiv, exp := int64(unit), 0\n\tfor n := byteCount / unit; n >= unit; n /= unit {\n\t\tdiv *= unit\n\t\texp++\n\t}\n\n\treturn fmt.Sprintf(\"%.1f %ciB\", float64(byteCount)/float64(div), \"KMGTPE\"[exp])\n}",
"func BytesToBinarySize(bytes float64) string {\n\n\tif bytes <= 0 {\n\t\treturn \"0\"\n\t}\n\n\tvar\tunit string\n\tvar res float64\n\n\tswitch {\n\tcase bytes >= EiB:\n\t\tunit = \"EiB\"\n\t\tres = bytes / EiB\n\tcase bytes >= PiB:\n\t\tunit = \"PiB\"\n\t\tres = bytes / PiB\n\tcase bytes >= TiB:\n\t\tunit = \"TiB\"\n\t\tres = bytes / TiB\n\tcase bytes >= GiB:\n\t\tunit = \"GiB\"\n\t\tres = bytes / GiB\n\tcase bytes >= MiB:\n\t\tunit = \"MiB\"\n\t\tres = bytes / MiB\n\tcase bytes >= KiB:\n\t\tunit = \"KiB\"\n\t\tres = bytes / KiB\n\tcase bytes >= BYTE:\n\t\tunit = \"B\"\n\t\tres = bytes\n\t}\n\n\tstrRes := strconv.FormatFloat(res, 'f', 1, 64)\n\tstrRes = strings.TrimSuffix(strRes, \".0\")\n\n\treturn strRes + unit\n}",
"func (s Size) Kilobytes() float64 { return float64(s) / float64(Kilobyte) }",
"func ConvertSizeToBytes64(s string) (string, error) {\n\ts = strings.TrimSpace(strings.ToLower(s))\n\n\t// spin until we find a match, if no match return original string\n\tfor _, k := range units {\n\t\tvar y int = lookupTable[k]\n\t\tif strings.HasSuffix(s, k) {\n\t\t\ts = s[:len(s)-len(k)]\n\t\t\ti, err := strconv.ParseInt(s, 10, 0)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\ti = i * Pow64(1024, y)\n\t\t\ts = strconv.FormatInt(i, 10)\n\t\t\treturn s, nil\n\t\t}\n\t}\n\n\treturn s, nil\n}",
"func ShortByte(sz int64) string {\n\tunitMap := map[string]int64{\n\t\t\"mib\": 1 << 20,\n\t\t\"gib\": 1 << 30,\n\t\t\"tib\": 1 << 40,\n\t\t\"pib\": 1 << 50,\n\t\t\"eib\": 1 << 60,\n\t}\n\n\tif sz < unitMap[\"mib\"] {\n\t\treturn fmt.Sprintf(\"%.2f KiB\", float64(sz)/float64(1024))\n\t}\n\tif sz < unitMap[\"gib\"] {\n\t\treturn fmt.Sprintf(\"%.2f MiB\", float64(sz)/float64(unitMap[\"mib\"]))\n\t}\n\tif sz < unitMap[\"tib\"] {\n\t\treturn fmt.Sprintf(\"%.2f GiB\", float64(sz)/float64(unitMap[\"gib\"]))\n\t}\n\tif sz < unitMap[\"pib\"] {\n\t\treturn fmt.Sprintf(\"%.2f TiB\", float64(sz)/float64(unitMap[\"tib\"]))\n\t}\n\tif sz < unitMap[\"eib\"] {\n\t\treturn fmt.Sprintf(\"%.2f PiB\", float64(sz)/float64(unitMap[\"pib\"]))\n\t}\n\n\treturn fmt.Sprintf(\"%.2f EiB\", float64(sz)/float64(unitMap[\"eib\"]))\n\n}",
"func UINT32ByteCountDecimal(b uint32) string {\n\tconst unit = 1024\n\tif b < unit {\n\t\treturn fmt.Sprintf(\"%d B\", b)\n\t}\n\tdiv, exp := uint32(unit), 0\n\tfor n := b / unit; n >= unit; n /= unit {\n\t\tdiv *= unit\n\t\texp++\n\t}\n\treturn fmt.Sprintf(\"%.1f %cB\", float32(b)/float32(div), \"kMGTPE\"[exp])\n}",
"func (o DatabaseReplicaOutput) Size() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *DatabaseReplica) pulumi.StringPtrOutput { return v.Size }).(pulumi.StringPtrOutput)\n}",
"func (o LookupImageResultOutput) DiskSizeGb() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupImageResult) string { return v.DiskSizeGb }).(pulumi.StringOutput)\n}",
"func SizeOf(v interface{}, options ...EncDecOption) int {\n\tbs, err := Encode(v, options...)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn len(bs)\n}",
"func Bytes(s int64) string {\n\tsizes := []string{\"B\", \"K\", \"M\", \"G\", \"T\", \"P\", \"E\"}\n\treturn humanateBytes(s, 1000, sizes)\n}",
"func Bytes(s uint64) string {\n\tsizes := []string{\"B\", \"kB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\"}\n\treturn humanateBytes(s, 1024, sizes)\n}",
"func (s Size) Pebibytes() float64 { return float64(s) / float64(Pebibyte) }",
"func convertQuantityToQemuSize(size resource.Quantity) string {\n\tint64Size, asInt := size.AsInt64()\n\tif !asInt {\n\t\tsize.AsDec().SetScale(0)\n\t\treturn size.AsDec().String()\n\t}\n\treturn strconv.FormatInt(int64Size, 10)\n}",
"func (o SavedAttachedDiskResponseOutput) DiskSizeGb() pulumi.StringOutput {\n\treturn o.ApplyT(func(v SavedAttachedDiskResponse) string { return v.DiskSizeGb }).(pulumi.StringOutput)\n}",
"func (s StringFileInfo) Size() int { return binary.Size(s) }",
"func (o SkuResponseOutput) Size() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v SkuResponse) *string { return v.Size }).(pulumi.StringPtrOutput)\n}",
"func (s Size) Byte() uint64 {\n\treturn uint64(s)\n}",
"func (o AttachedDiskResponseOutput) DiskSizeGb() pulumi.StringOutput {\n\treturn o.ApplyT(func(v AttachedDiskResponse) string { return v.DiskSizeGb }).(pulumi.StringOutput)\n}",
"func (o *StorageFlexFlashVirtualDrive) SetSize(v string) {\n\to.Size = &v\n}",
"func bytesToHuman(src uint64) string {\n\tif src < 10 {\n\t\treturn fmt.Sprintf(\"%d B\", src)\n\t}\n\n\ts := float64(src)\n\tbase := float64(1024)\n\tsizes := []string{\"B\", \"KiB\", \"MiB\", \"GiB\", \"TiB\", \"PiB\", \"EiB\"}\n\n\te := math.Floor(math.Log(s) / math.Log(base))\n\tsuffix := sizes[int(e)]\n\tval := math.Floor(s/math.Pow(base, e)*10+0.5) / 10\n\tf := \"%.0f %s\"\n\tif val < 10 {\n\t\tf = \"%.1f %s\"\n\t}\n\n\treturn fmt.Sprintf(f, val, suffix)\n}",
"func Size(format string) (int, error) {\n\tp, err := newState(format)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn p.Size()\n}",
"func DirSizeByte(path string) uint64 {\n\tdirSize = 0\n\tfilepath.Walk(path, readSize)\n\treturn dirSize\n}",
"func Bytes(n int64, decimals int, long bool) string {\n\tpadding := \" \"\n\tlevels := []string{\n\t\t\"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\", /* \"ZB\", \"YB\" will overflow int64 */\n\t}\n\tif long {\n\t\tlevels = []string{\n\t\t\t\"bytes\", \"kilobyte\", \"megabyte\", \"gigabyte\", \"terabyte\", \"petabyte\", \"exabyte\",\n\t\t}\n\t}\n\n\treturn human(n, levels, 1024, decimals, padding)\n}",
"func (o *StorageFlexUtilVirtualDrive) SetSize(v string) {\n\to.Size = &v\n}",
"func (bc ByteCount) String() string {\n\treturn fmt.Sprintf(\"% .1s\", bc)\n}",
"func BytesToString(numBytes int64) string {\n\tbyteModifier := []string{\"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"EB\", \"ZB\"}\n\n\tdigits := make([]int, 0)\n\tfor numBytes > 0 {\n\t\td := int(numBytes % 1024)\n\t\tdigits = append(digits, d)\n\t\tnumBytes = numBytes / 1024\n\t}\n\n\t// which digit is the most significant?\n\tmsd := len(digits)\n\tif msd >= len(byteModifier) {\n\t\t// This number is so large that we don't have a modifier\n\t\t// for it.\n\t\treturn fmt.Sprintf(\"%dB\", numBytes)\n\t}\n\treturn fmt.Sprintf(\"%d%s\", digits[msd], byteModifier[msd])\n}",
"func (s Size) Bytes() uint64 { return uint64(s) }",
"func HumanizeBytes(bytes float32) string {\n\tif bytes < 1000000 { //if we have less than 1MB in bytes convert to KB\n\t\tpBytes := fmt.Sprintf(\"%.2f\", bytes/1024)\n\t\tpBytes = pBytes + \" KB\"\n\t\treturn pBytes\n\t}\n\tbytes = bytes / 1024 / 1024 //Converting bytes to a useful measure\n\tif bytes > 1024 {\n\t\tpBytes := fmt.Sprintf(\"%.2f\", bytes/1024)\n\t\tpBytes = pBytes + \" GB\"\n\t\treturn pBytes\n\t}\n\tpBytes := fmt.Sprintf(\"%.2f\", bytes) //If not too big or too small leave it as MB\n\tpBytes = pBytes + \" MB\"\n\treturn pBytes\n}",
"func (o SkuPtrOutput) Size() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Sku) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Size\n\t}).(pulumi.StringPtrOutput)\n}",
"func StringSize(s string) uint32 {\n\treturn 2 + uint32(len(s))\n}",
"func (ftp *FTP) Size(path string) (size int, err error) {\n\tline, err := ftp.cmd(\"213\", \"SIZE %s\", path)\n\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn strconv.Atoi(line[4 : len(line)-2])\n}",
"func (s Size) Gibibytes() float64 { return float64(s) / float64(Gibibyte) }",
"func (f *Font) BytesSize(b []byte) image.Point {\n\treturn image.Pt(f.BytesWidth(b), f.Height)\n}",
"func Uint8StringSize(v uint8) uint8 {\n\tswitch true {\n\tcase v > 99:\n\t\treturn 3\n\tcase v > 9:\n\t\treturn 2\n\tdefault:\n\t\treturn 1\n\t}\n}",
"func ByteFormat(bytes float64) string {\n\tif bytes >= 1024*1024 {\n\t\treturn fmt.Sprintf(\"%.f MB\", bytes/1024/1024)\n\t}\n\treturn fmt.Sprintf(\"%.f KB\", bytes/1024)\n}",
"func (o *JsonEnvironment) SetSize(v string) {\n\to.Size = &v\n}"
] | [
"0.7148524",
"0.69198364",
"0.671273",
"0.67092806",
"0.6649801",
"0.6649801",
"0.66310406",
"0.66257006",
"0.66198426",
"0.6602792",
"0.64785177",
"0.64743775",
"0.64439887",
"0.64107174",
"0.6199427",
"0.6164231",
"0.61463237",
"0.6094446",
"0.60875374",
"0.6059895",
"0.6046804",
"0.6038021",
"0.6037338",
"0.6003945",
"0.5922596",
"0.59082806",
"0.59004015",
"0.58620065",
"0.5832488",
"0.58042634",
"0.5767012",
"0.5767012",
"0.5753083",
"0.57292163",
"0.5713299",
"0.5702402",
"0.5627402",
"0.5589863",
"0.5587746",
"0.55846906",
"0.55712706",
"0.5544334",
"0.554323",
"0.5533418",
"0.5513405",
"0.5491321",
"0.54806906",
"0.5472007",
"0.54717636",
"0.5465745",
"0.5452757",
"0.5449878",
"0.5446788",
"0.54394",
"0.5435944",
"0.54349035",
"0.54209214",
"0.54165965",
"0.5396171",
"0.5385256",
"0.53710675",
"0.5367397",
"0.53592825",
"0.5358081",
"0.53450644",
"0.53416836",
"0.53294814",
"0.5322378",
"0.5313538",
"0.52936864",
"0.5288015",
"0.52870214",
"0.5286139",
"0.52829117",
"0.5281406",
"0.5280322",
"0.5253352",
"0.52351665",
"0.52260435",
"0.521548",
"0.5208535",
"0.5204103",
"0.52006406",
"0.51985645",
"0.5196116",
"0.5179743",
"0.51779735",
"0.5151675",
"0.51456887",
"0.5144581",
"0.5128531",
"0.5128186",
"0.5125977",
"0.5123936",
"0.5121646",
"0.51193905",
"0.5107656",
"0.51032853",
"0.50939596",
"0.5083037"
] | 0.5518703 | 44 |
Equals checks two matches for equality | func (m *Match) Equals(other *Match) bool {
if m == nil && other == nil {
return true
} else if m == nil {
return false
} else if other == nil {
return false
}
return m.PC == other.PC &&
m.StartLine == other.StartLine &&
m.StartColumn == other.StartColumn &&
m.EndLine == other.EndLine &&
m.EndColumn == other.EndColumn &&
bytes.Equal(m.Bytes, other.Bytes)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func TestEquals(t *testing.T) {\n\tt.Parallel()\n\tfor ti, tt := range []struct {\n\t\tm1, m2 MatrixExp\n\t\teq bool\n\t}{\n\t\t{\n\t\t\tm1: GeneralZeros(1, 1),\n\t\t\tm2: GeneralZeros(1, 1),\n\t\t\teq: true,\n\t\t},\n\t\t{\n\t\t\tm1: GeneralZeros(1, 1),\n\t\t\tm2: GeneralZeros(1, 10),\n\t\t\teq: false,\n\t\t},\n\t\t{\n\t\t\tm1: GeneralZeros(10, 1),\n\t\t\tm2: GeneralZeros(1, 1),\n\t\t\teq: false,\n\t\t},\n\t\t{\n\t\t\tm1: GeneralZeros(1, 1),\n\t\t\tm2: GeneralOnes(1, 1),\n\t\t\teq: false,\n\t\t},\n\t} {\n\t\tif v := Equals(tt.m1, tt.m2); v != tt.eq {\n\t\t\tt.Errorf(\"%d: Equals(%v,%v) equals %v, want %v\", ti, tt.m1, tt.m2, v, tt.eq)\n\t\t}\n\t}\n}",
"func (oi ObjectIdentifier) Equal(other ObjectIdentifier) bool {}",
"func eq(args ...interface{}) bool {\n\tif len(args) == 0 {\n\t\treturn false\n\t}\n\tx := args[0]\n\tswitch x := x.(type) {\n\tcase string, int, int64, byte, float32, float64:\n\t\tfor _, y := range args[1:] {\n\t\t\tif x == y {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\tfor _, y := range args[1:] {\n\t\tif reflect.DeepEqual(x, y) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func eq(args ...interface{}) bool {\n\tif len(args) == 0 {\n\t\treturn false\n\t}\n\tx := args[0]\n\tswitch x := x.(type) {\n\tcase string, int, int64, byte, float32, float64:\n\t\tfor _, y := range args[1:] {\n\t\t\tif x == y {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\tfor _, y := range args[1:] {\n\t\tif reflect.DeepEqual(x, y) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (r *RegexpObject) equal(e *RegexpObject) bool {\n\treturn r.ToString() == r.ToString()\n}",
"func main() {\n\temp1 := Employee{}\n\temp1.Name=\"Gowtham\"\n\n\temp2 := Employee{}\n\temp2.Name=\"Gowtham\"\n\n\tprintln(\"the emp1 and emp2 are equal ?\" , emp1 == emp2)\n}",
"func (seq SeqEq[S, T]) Equal(a, b S) bool {\n\tseqA := a\n\tseqB := b\n\tfor !seq.Seq.IsVoid(seqA) && !seq.Seq.IsVoid(seqB) {\n\t\theadA := seq.Seq.Head(seqA)\n\t\theadB := seq.Seq.Head(seqB)\n\t\tif headA == nil || headB == nil || !seq.Eq.Equal(*headA, *headB) {\n\t\t\treturn false\n\t\t}\n\n\t\tseqA = seq.Seq.Tail(seqA)\n\t\tseqB = seq.Seq.Tail(seqB)\n\t}\n\n\treturn seq.Seq.IsVoid(seqA) && seq.Seq.IsVoid(seqB)\n}",
"func (h *HeaderMatch) Equal(o *HeaderMatch) bool {\n\tif h.Mismatch != o.Mismatch ||\n\t\th.Name != o.Name ||\n\t\th.Value != o.Value ||\n\t\t!h.Secret.Equal(o.Secret) {\n\t\treturn false\n\t}\n\treturn true\n}",
"func Equal(g1, g2 InstanceGroup) bool {\n\tif g1 == g2 {\n\t\treturn true\n\t}\n\n\tif g1.App() != g2.App() {\n\t\treturn false\n\t}\n\n\tif g1.Account() != g2.Account() {\n\t\treturn false\n\t}\n\n\tr1, ok1 := g1.Region()\n\tr2, ok2 := g2.Region()\n\tif ok1 != ok2 {\n\t\treturn false\n\t}\n\n\tif ok1 && (r1 != r2) {\n\t\treturn false\n\t}\n\n\ts1, ok1 := g1.Stack()\n\ts2, ok2 := g2.Stack()\n\n\tif ok1 != ok2 {\n\t\treturn false\n\t}\n\n\tif ok1 && (s1 != s2) {\n\t\treturn false\n\t}\n\n\tc1, ok1 := g1.Cluster()\n\tc2, ok2 := g2.Cluster()\n\n\tif ok1 != ok2 {\n\t\treturn false\n\t}\n\n\tif ok1 && (c1 != c2) {\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func Equals(a, b interface{}) bool {\n\treturn neogointernal.Opcode2(\"EQUAL\", a, b).(bool)\n}",
"func eq(args ...interface{}) bool {\n\tif len(args) == 0 {\n\t\treturn false\n\t}\n\tx := args[0]\n\tswitch x := x.(type) {\n\tcase int:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase int:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase int64:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase int64:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase int:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase int64:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase float32:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase float32:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase float64:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase float64:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase float32:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase float64:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase string, byte:\n\t\tfor _, y := range args[1:] {\n\t\t\tif x == y {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\tfor _, y := range args[1:] {\n\t\tif reflect.DeepEqual(x, y) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (t Token) Equal(t2 Token) bool {\n\tif t.TokenType == t2.TokenType && bytes.Equal(t.Data, t2.Data) && len(t.Args) == len(t2.Args) {\n\t\tfor i := 0; i < len(t.Args); i++ {\n\t\t\tif t.Args[i].TokenType != t2.Args[i].TokenType || !bytes.Equal(t.Args[i].Data, t2.Args[i].Data) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\treturn false\n}",
"func (c clock) equals(other clock) bool {\n\treturn reflect.DeepEqual(c, other)\n}",
"func eq(o1, o2 interface{}) bool {\n\n\tf1, ok1 := ToFloat(o1)\n\tf2, ok2 := ToFloat(o2)\n\tif ok1 && ok2 {\n\t\treturn f1 == f2\n\t}\n\n\tb1, ok1 := ToBool(o1)\n\tb2, ok1 := ToBool(o2)\n\tif ok1 && ok2 {\n\t\treturn b1 == b2\n\t}\n\n\treturn o1 == o2\n}",
"func compareEquality(expected, actual interface{}) bool {\n\n\tif expected == nil || actual == nil {\n\t\treturn expected == actual\n\t}\n\n\tif reflect.DeepEqual(expected, actual) {\n\t\treturn true\n\t}\n\n\texpectedValue := reflect.ValueOf(expected)\n\tactualValue := reflect.ValueOf(actual)\n\n\tif expectedValue == actualValue {\n\t\treturn true\n\t}\n\n\t// Attempt comparison after type conversion\n\tif actualValue.Type().ConvertibleTo(expectedValue.Type()) && expectedValue == actualValue.Convert(expectedValue.Type()) {\n\t\treturn true\n\t}\n\n\t// Last ditch effort\n\tif fmt.Sprintf(\"%#v\", expected) == fmt.Sprintf(\"%#v\", actual) {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (chatRoom *ChatRoom) equals(cr *ChatRoom) bool {\n if chatRoom.name != cr.name {\n return false\n } else if chatRoom.connectionCount != cr.connectionCount {\n return false\n }\n for guestKey , guestsValue := range chatRoom.guests {\n guest := cr.guests[guestKey]\n if !guest.equals(&guestsValue) {\n return false\n }\n }\n return true\n}",
"func Equal(s1, s2 Set) bool {\n\tif Same(s1, s2) {\n\t\treturn true\n\t}\n\n\tif len(s1) != len(s2) {\n\t\treturn false\n\t}\n\n\tfor e := range s1 {\n\t\tif _, ok := s2[e]; !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func (s *suite) equals(s1 *suite) bool {\n\tif s == nil || s1 == nil {\n\t\treturn false\n\t}\n\treturn s.Name == s1.Name\n}",
"func (s *Set) Equals(s2 *Set) bool {\n\treturn reflect.DeepEqual(s.set, s2.set)\n}",
"func Same(expected, actual interface{}) Truth {\n\tmustBeCleanStart()\n\treturn Truth{\n\t\tValue: nice(expected) == nice(actual) && reflect.DeepEqual(actual, expected),\n\t\tDump:fmt.Sprintf(\"%#v\", actual),\n\t}\n}",
"func equals(t types.Type, x, y value) bool {\n\tswitch x := x.(type) {\n\tcase bool:\n\t\treturn x == y.(bool)\n\tcase int:\n\t\treturn x == y.(int)\n\tcase int8:\n\t\treturn x == y.(int8)\n\tcase int16:\n\t\treturn x == y.(int16)\n\tcase int32:\n\t\treturn x == y.(int32)\n\tcase int64:\n\t\treturn x == y.(int64)\n\tcase uint:\n\t\treturn x == y.(uint)\n\tcase uint8:\n\t\treturn x == y.(uint8)\n\tcase uint16:\n\t\treturn x == y.(uint16)\n\tcase uint32:\n\t\treturn x == y.(uint32)\n\tcase uint64:\n\t\treturn x == y.(uint64)\n\tcase uintptr:\n\t\treturn x == y.(uintptr)\n\tcase float32:\n\t\treturn x == y.(float32)\n\tcase float64:\n\t\treturn x == y.(float64)\n\tcase complex64:\n\t\treturn x == y.(complex64)\n\tcase complex128:\n\t\treturn x == y.(complex128)\n\tcase string:\n\t\treturn x == y.(string)\n\tcase *value:\n\t\treturn x == y.(*value)\n\tcase chan value:\n\t\treturn x == y.(chan value)\n\tcase structure:\n\t\treturn x.eq(t, y)\n\tcase array:\n\t\treturn x.eq(t, y)\n\tcase iface:\n\t\treturn x.eq(t, y)\n\tcase rtype:\n\t\treturn x.eq(t, y)\n\t}\n\n\t// Since map, func and slice don't support comparison, this\n\t// case is only reachable if one of x or y is literally nil\n\t// (handled in eqnil) or via interface{} values.\n\tpanic(fmt.Sprintf(\"comparing uncomparable type %s\", t))\n}",
"func (s *StorageSuite) TestServersEquality(c *check.C) {\n\tservers := Servers{{\n\t\tAdvertiseIP: \"192.168.1.1\",\n\t\tHostname: \"node-1\",\n\t\tRole: \"worker\",\n\t}}\n\ttestCases := []struct {\n\t\tservers Servers\n\t\tresult bool\n\t\tcomment string\n\t}{\n\t\t{\n\t\t\tservers: Servers{{\n\t\t\t\tAdvertiseIP: \"192.168.1.1\",\n\t\t\t\tHostname: \"node-1\",\n\t\t\t\tRole: \"worker\",\n\t\t\t}},\n\t\t\tresult: true,\n\t\t\tcomment: \"Servers should be equal\",\n\t\t},\n\t\t{\n\t\t\tservers: Servers{\n\t\t\t\t{\n\t\t\t\t\tAdvertiseIP: \"192.168.1.1\",\n\t\t\t\t\tHostname: \"node-1\",\n\t\t\t\t\tRole: \"worker\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tAdvertiseIP: \"192.168.1.2\",\n\t\t\t\t\tHostname: \"node-2\",\n\t\t\t\t\tRole: \"worker\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tresult: false,\n\t\t\tcomment: \"Servers should not be equal: different number of servers\",\n\t\t},\n\t\t{\n\t\t\tservers: Servers{{\n\t\t\t\tAdvertiseIP: \"192.168.1.2\",\n\t\t\t\tHostname: \"node-1\",\n\t\t\t\tRole: \"worker\",\n\t\t\t}},\n\t\t\tresult: false,\n\t\t\tcomment: \"Servers should not be equal: different IPs\",\n\t\t},\n\t\t{\n\t\t\tservers: Servers{{\n\t\t\t\tAdvertiseIP: \"192.168.1.1\",\n\t\t\t\tHostname: \"node-2\",\n\t\t\t\tRole: \"worker\",\n\t\t\t}},\n\t\t\tresult: false,\n\t\t\tcomment: \"Servers should not be equal: different hostnames\",\n\t\t},\n\t\t{\n\t\t\tservers: Servers{{\n\t\t\t\tAdvertiseIP: \"192.168.1.1\",\n\t\t\t\tHostname: \"node-1\",\n\t\t\t\tRole: \"db\",\n\t\t\t}},\n\t\t\tresult: false,\n\t\t\tcomment: \"Servers should not be equal: different roles\",\n\t\t},\n\t}\n\tfor _, tc := range testCases {\n\t\tc.Assert(servers.IsEqualTo(tc.servers), check.Equals, tc.result,\n\t\t\tcheck.Commentf(tc.comment))\n\t}\n}",
"func equals(p1, p2 *node) bool {\n\treturn p1.x == p2.x && p1.y == p2.y\n}",
"func Equal(left Value, right Value) bool {\n\t// TODO: Stop-gap for now, this will need to be much more sophisticated.\n\treturn CoerceString(left) == CoerceString(right)\n}",
"func (a seriesIDs) equals(other seriesIDs) bool {\n\tif len(a) != len(other) {\n\t\treturn false\n\t}\n\tfor i, s := range other {\n\t\tif a[i] != s {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (m Mapping) equal(other Node) bool {\n\to, ok := other.(Mapping)\n\tif !ok || len(m.pairs) != len(o.pairs) || m.path != o.path {\n\t\treturn false\n\t}\n\tif m.pairs == nil || o.pairs == nil {\n\t\treturn m.pairs == nil && o.pairs == nil\n\t}\n\tfor k, v := range o.pairs {\n\t\tif !equal(m.pairs[k], v) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (LHS GUID) Equals(RHS GUID) bool {\n\tif LHS == Empty && RHS == Empty {\n\t\treturn true\n\t}\n\tif LHS == Empty || RHS == Empty {\n\t\treturn false\n\t}\n\n\tif LHS == RHS { // exact match first...\n\t\treturn true\n\t}\n\n\t// tolerate case mismatches...\n\tif strings.EqualFold(string(LHS), string(RHS)) {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func Equal(t1, t2 Token) bool {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tif t1 == nil && t2 == nil {\n\t\treturn true\n\t}\n\n\t// we already checked for t1 == t2 == nil, so safe to do this\n\tif t1 == nil || t2 == nil {\n\t\treturn false\n\t}\n\n\tm1, err := t1.AsMap(ctx)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tfor iter := t2.Iterate(ctx); iter.Next(ctx); {\n\t\tpair := iter.Pair()\n\n\t\tv1 := m1[pair.Key.(string)]\n\t\tv2 := pair.Value\n\t\tswitch tmp := v1.(type) {\n\t\tcase time.Time:\n\t\t\ttmp2, ok := v2.(time.Time)\n\t\t\tif !ok {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\ttmp = tmp.Round(0).Truncate(time.Second)\n\t\t\ttmp2 = tmp2.Round(0).Truncate(time.Second)\n\t\t\tif !tmp.Equal(tmp2) {\n\t\t\t\treturn false\n\t\t\t}\n\t\tdefault:\n\t\t\tif v1 != v2 {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\tdelete(m1, pair.Key.(string))\n\t}\n\n\treturn len(m1) == 0\n}",
"func (this *Base) equals(other Piece) bool {\n\tif this.getName() != other.getName() {\n\t\treturn false\n\t}\n\n\tif this.getKanji() != other.getKanji() {\n\t\treturn false\n\t}\n\n\tif this.getColor() != other.getColor() {\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func ObjectsAreEqual(expected, actual interface{}) bool {\n\tif expected == nil || actual == nil {\n\t\treturn expected == actual\n\t}\n\n\texp, ok := expected.([]byte)\n\tif !ok {\n\t\treturn reflect.DeepEqual(expected, actual)\n\t}\n\n\tact, ok := actual.([]byte)\n\tif !ok {\n\t\treturn false\n\t}\n\n\tif exp == nil || act == nil {\n\t\treturn exp == nil && act == nil\n\t}\n\n\treturn bytes.Equal(exp, act)\n}",
"func (n Name) Equal(other Name) bool {\n\treturn string(n) == string(other)\n}",
"func (qt *queryTerm) equals(qt2 *queryTerm) bool {\n\treturn qt.Subject == qt2.Subject &&\n\t\tqt.Object == qt2.Object &&\n\t\treflect.DeepEqual(qt.Predicates, qt2.Predicates)\n}",
"func isEqual(a interface{}, b interface{}) bool {\n\treturn a == b\n}",
"func identicalInstance(xorig Type, xargs []Type, yorig Type, yargs []Type) bool {\n\tif len(xargs) != len(yargs) {\n\t\treturn false\n\t}\n\n\tfor i, xa := range xargs {\n\t\tif !Identical(xa, yargs[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn Identical(xorig, yorig)\n}",
"func TestEqual(t *testing.T) {\n\ttables := []struct {\n\t\tx []string\n\t\ty []string\n\t\texpected bool\n\t}{\n\t\t{[]string{}, []string{}, true},\n\t\t{[]string{}, []string{\"\"}, false},\n\t\t{[]string{\"\"}, []string{\"\"}, true},\n\t\t{[]string{\"\"}, []string{\"a\"}, false},\n\t\t{[]string{\"a\"}, []string{\"a\", \"a\"}, false},\n\t\t{[]string{\"b\"}, []string{\"a\"}, false},\n\t\t{[]string{\"\", \"\", \"\"}, []string{\"\", \"\", \"\"}, true},\n\t\t{[]string{\"a\", \"b\", \"c\"}, []string{\"a\", \"b\", \"e\"}, false},\n\t}\n\n\tfor _, table := range tables {\n\t\tresult := Equal(table.x, table.y)\n\t\tif result != table.expected {\n\t\t\tt.Errorf(\"Match failed for (%s, %s). Expected %t, got %t\",\n\t\t\t\ttable.x, table.y, table.expected, result)\n\t\t}\n\t}\n}",
"func (gvk GVK) IsEqualTo(other GVK) bool {\n\treturn gvk.Group == other.Group &&\n\t\tgvk.Domain == other.Domain &&\n\t\tgvk.Version == other.Version &&\n\t\tgvk.Kind == other.Kind\n}",
"func (o *Echo) IsEqual(other *Echo) bool {\n\treturn o.GetID() == other.GetID()\n}",
"func (a *Mtx) Equals(b *Mtx) bool {\n\tfor i := 0; i < 4; i++ {\n\t\tfor j := 0; j < 4; j++ {\n\t\t\tif a.el[i][j] != b.el[i][j] {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}",
"func eq(x, y []string) bool {\n\t// NOTE: set equal\n\treturn sset.Equal(x, y)\n}",
"func (route *Route) Equal(otherRoute Route) bool {\n switch {\n case route.Id != otherRoute.Id:\n return false\n case route.Pattern != otherRoute.Pattern:\n return false\n case len(route.Handlers) != len(otherRoute.Handlers):\n return false\n default:\n for key, value := range route.Handlers {\n if otherValue, found := otherRoute.Handlers[key]; found {\n if value != otherValue {\n return false\n }\n } else {\n return false\n }\n }\n\n return true\n }\n}",
"func Equal(t Testing, expected, actual interface{}, formatAndArgs ...interface{}) bool {\n\tif !AreEqualObjects(expected, actual) {\n\t\treturn Fail(t,\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"Expected values are NOT equal.%s\",\n\t\t\t\tdiffValues(expected, actual),\n\t\t\t),\n\t\t\tformatAndArgs...)\n\t}\n\n\treturn true\n}",
"func (l *LabelPair) Equal(o *LabelPair) bool {\n\tswitch {\n\tcase l.Name != o.Name:\n\t\treturn false\n\tcase l.Value != o.Value:\n\t\treturn false\n\tdefault:\n\t\treturn true\n\t}\n}",
"func equalIds(wantedId, foundId, foundHref string) bool {\n\n\twantedUuid := extractUuid(wantedId)\n\tfoundUuid := \"\"\n\n\tif wantedUuid == \"\" {\n\t\treturn false\n\t}\n\tif foundId != \"\" {\n\t\t// In some entities, the ID is a simple UUID without prefix\n\t\tfoundUuid = extractUuid(foundId)\n\t} else {\n\t\tfoundUuid = extractUuid(foundHref)\n\t}\n\treturn foundUuid == wantedUuid\n}",
"func (a joinedTable) equal(b joinedTable) bool {\n\treturn a.secondaryTable == b.secondaryTable && a.primaryColumn == b.primaryColumn && a.secondaryColumn == b.secondaryColumn\n}",
"func eq(x, y []string) bool {\n\t// NOTE: list equal, not set equal\n\treturn strs.Equal(x, y)\n}",
"func (in *HeaderMatch) DeepEqual(other *HeaderMatch) bool {\n\tif other == nil {\n\t\treturn false\n\t}\n\n\tif in.Mismatch != other.Mismatch {\n\t\treturn false\n\t}\n\tif in.Name != other.Name {\n\t\treturn false\n\t}\n\tif (in.Secret == nil) != (other.Secret == nil) {\n\t\treturn false\n\t} else if in.Secret != nil {\n\t\tif !in.Secret.DeepEqual(other.Secret) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif in.Value != other.Value {\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func (v *KeyValue_SetValueV2_Result) Equals(rhs *KeyValue_SetValueV2_Result) bool {\n\tif v == nil {\n\t\treturn rhs == nil\n\t} else if rhs == nil {\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func (u UUID) Equal(u2 UUID) bool {\n\treturn bytes.Equal(u.b, u2.b)\n}",
"func (m Match) Equals(o Match) bool {\n\treturn m.Kind == o.Kind &&\n\t\tm.Behavior == o.Behavior &&\n\t\tm.From.Equals(o.From) &&\n\t\tm.To.Equals(o.To)\n}",
"func Equals(strFirst, strSecond string) bool {\n\treturn strFirst == strSecond\n}",
"func (g *Group) Equal(g2 *Group) bool {\n\tif !commonutils.CompareBeaconIDs(g.ID, g2.ID) {\n\t\treturn false\n\t}\n\tif g.Threshold != g2.Threshold {\n\t\treturn false\n\t}\n\tif g.Period.String() != g2.Period.String() {\n\t\treturn false\n\t}\n\tif g.Len() != g2.Len() {\n\t\treturn false\n\t}\n\tif !bytes.Equal(g.GetGenesisSeed(), g2.GetGenesisSeed()) {\n\t\treturn false\n\t}\n\tif g.TransitionTime != g2.TransitionTime {\n\t\treturn false\n\t}\n\tif g.Scheme.Name != g2.Scheme.Name {\n\t\treturn false\n\t}\n\tfor i := 0; i < g.Len(); i++ {\n\t\tif !g.Nodes[i].Equal(g2.Nodes[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif g.PublicKey != nil {\n\t\tif g2.PublicKey != nil {\n\t\t\t// both keys aren't nil so we verify\n\t\t\treturn g.PublicKey.Equal(g2.PublicKey)\n\t\t}\n\t\t// g is not nil g2 is nil\n\t\treturn false\n\t} else if g2.PublicKey != nil {\n\t\t// g is nil g2 is not nil\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func (s *SHA1) Equal(s2 interface{}) bool {\n\tswitch v := s2.(type) {\n\tcase string:\n\t\treturn v == s.String()\n\tcase [20]byte:\n\t\treturn v == s.bytes\n\tcase *SHA1:\n\t\treturn v.bytes == s.bytes\n\t}\n\treturn false\n}",
"func equals(tb testing.TB, got, want interface{}) {\n\ttb.Helper()\n\tif !reflect.DeepEqual(got, want) {\n\t\ttb.Fatalf(\"\\033[31m\\n\\n\\tgot: %#v\\n\\n\\twant: %#v\\033[39m\\n\\n\", got, want)\n\t}\n}",
"func (current GoVersion) Equal(target GoVersion) bool {\n\tcurrent.Raw, target.Raw = \"\", \"\"\n\treturn current == target\n}",
"func equals(current, desired *v1alpha1.App) bool {\n\tif current.Name != desired.Name {\n\t\treturn false\n\t}\n\tif !reflect.DeepEqual(current.Spec, desired.Spec) {\n\t\treturn false\n\t}\n\tif !reflect.DeepEqual(current.Labels, desired.Labels) {\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func EqualTokens(a, b *oauth2.Token) bool {\n\tif a == b {\n\t\treturn true\n\t}\n\taTok := \"\"\n\tif a != nil {\n\t\taTok = a.AccessToken\n\t}\n\tbTok := \"\"\n\tif b != nil {\n\t\tbTok = b.AccessToken\n\t}\n\treturn aTok == bTok\n}",
"func Equal(lhs, rhs []string) (rv bool) {\n\tif len(lhs) == len(rhs) {\n\t\tfor i, s := range lhs {\n\t\t\tif s != rhs[i] {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\trv = true\n\t}\n\treturn\n}",
"func Equal(t TestingT, expected, actual interface{}, extras ...interface{}) bool {\n\tif !DeepEqual(expected, actual) {\n\t\treturn Errorf(t, \"Expect to be equal\", []labeledOutput{\n\t\t\t{\n\t\t\t\tlabel: labelMessages,\n\t\t\t\tcontent: formatExtras(extras...),\n\t\t\t},\n\t\t\t{\n\t\t\t\tlabel: \"Diff\",\n\t\t\t\tcontent: diff(expected, actual),\n\t\t\t},\n\t\t})\n\t}\n\n\treturn true\n}",
"func (m *Matcher) Match(actual interface{}) (success bool, err error) {\n\t// Nil checks required first here for:\n\t// 1) Nil equality which returns true\n\t// 2) One object nil which returns an error\n\n\tif util.IsNil(actual) && util.IsNil(m.original) {\n\t\treturn true, nil\n\t}\n\tif util.IsNil(actual) || util.IsNil(m.original) {\n\t\treturn false, fmt.Errorf(\"can not compare an object with a nil. original %v , actual %v\", m.original, actual)\n\t}\n\n\t// Calculate diff returns a json diff between the two objects.\n\tm.diff, err = m.calculateDiff(actual)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn bytes.Equal(m.diff, []byte(\"{}\")), nil\n}",
"func (oc *ObjectComprehension) Equal(other Value) bool {\n\treturn Compare(oc, other) == 0\n}",
"func (t *token) Equal(tt *token) bool {\n\treturn t.code == tt.code && t.Text == tt.Text\n}",
"func (r ShardResults) Equal(other ShardResults) bool {\n\tfor shard, result := range r {\n\t\totherResult, ok := r[shard]\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\t\tallSeries := result.AllSeries()\n\t\totherAllSeries := otherResult.AllSeries()\n\t\tif len(allSeries) != len(otherAllSeries) {\n\t\t\treturn false\n\t\t}\n\t\tfor id, series := range allSeries {\n\t\t\totherSeries, ok := otherAllSeries[id]\n\t\t\tif !ok {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tallBlocks := series.Blocks.AllBlocks()\n\t\t\totherAllBlocks := otherSeries.Blocks.AllBlocks()\n\t\t\tif len(allBlocks) != len(otherAllBlocks) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tfor start, block := range allBlocks {\n\t\t\t\totherBlock, ok := otherAllBlocks[start]\n\t\t\t\tif !ok {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\t// Just performing shallow equals so simply compare block addresses\n\t\t\t\tif block != otherBlock {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}",
"func Equal(n1, n2 External) bool {\n\tif n1 == nil && n2 == nil {\n\t\treturn true\n\t} else if n1 == nil || n2 == nil {\n\t\treturn false\n\t}\n\tswitch n1 := n1.(type) {\n\tcase String:\n\t\tn2, ok := n2.(String)\n\t\tif ok {\n\t\t\treturn n1 == n2\n\t\t}\n\tcase Int:\n\t\tn2, ok := n2.(Int)\n\t\tif ok {\n\t\t\treturn n1 == n2\n\t\t}\n\tcase Uint:\n\t\tn2, ok := n2.(Uint)\n\t\tif ok {\n\t\t\treturn n1 == n2\n\t\t}\n\tcase Bool:\n\t\tn2, ok := n2.(Bool)\n\t\tif ok {\n\t\t\treturn n1 == n2\n\t\t}\n\tcase Float:\n\t\tn2, ok := n2.(Float)\n\t\tif ok {\n\t\t\treturn n1 == n2\n\t\t}\n\tcase Object:\n\t\tif n2, ok := n2.(Object); ok {\n\t\t\tif len(n1) != len(n2) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif pointerOf(n1) == pointerOf(n2) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn n1.EqualObject(n2)\n\t\t}\n\t\tif _, ok := n2.(Node); ok {\n\t\t\treturn false\n\t\t}\n\t\treturn n1.Equal(n2)\n\tcase Array:\n\t\tif n2, ok := n2.(Array); ok {\n\t\t\tif len(n1) != len(n2) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn len(n1) == 0 || &n1[0] == &n2[0] || n1.EqualArray(n2)\n\t\t}\n\t\tif _, ok := n2.(Node); ok {\n\t\t\treturn false\n\t\t}\n\t\treturn n1.Equal(n2)\n\tdefault:\n\t\tif Same(n1, n2) {\n\t\t\treturn true\n\t\t}\n\t}\n\tif n, ok := n1.(Node); ok {\n\t\treturn n.Equal(n2)\n\t} else if n, ok = n2.(Node); ok {\n\t\treturn n.Equal(n1)\n\t}\n\treturn equalExt(n1, n2)\n}",
"func regexEqualMatch(mrb *oruby.MrbState, self oruby.Value) oruby.MrbValue {\n\tvar s string\n\targs := mrb.GetArgs()\n\tdest := args.Item(0)\n\tpos := oruby.MrbFixnum(args.ItemDef(1, oruby.MrbFixnumValue(0)))\n\n\tswitch dest.Type() {\n\tcase oruby.MrbTTSymbol:\n\t\ts = mrb.SymString(oruby.MrbSymbol(dest))\n\tcase oruby.MrbTTString:\n\t\ts = mrb.StrToCstr(dest)\n\tdefault:\n\t\treturn oruby.False\n\t}\n\n\tregx := mrb.Data(self).(*regexp.Regexp)\n\treturn oruby.Bool(regx.MatchString(s[pos:]))\n}",
"func Equals(t1, t2 Type) bool {\n\tt1, t2 = t1.Root(), t2.Root()\n\tswitch t1 := t1.(type) {\n\tcase *Variable:\n\t\tr2, ok := t2.(*Variable)\n\t\tif !ok {\n\t\t\treturn occursInType(t1, t2)\n\t\t}\n\t\treturn t1.ID == r2.ID\n\tcase *Operator:\n\t\tt2, ok := t2.(*Operator)\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\t\tif t1.Name != t2.Name {\n\t\t\treturn false\n\t\t}\n\t\tif len(t1.Args) != len(t2.Args) {\n\t\t\treturn false\n\t\t}\n\t\tfor i := range t1.Args {\n\t\t\tif !Equals(t1.Args[i], t2.Args[i]) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\tcase *TypeError:\n\t\treturn false\n\t}\n\treturn true\n}",
"func Equal(vx, vy interface{}) bool {\n\tif reflect.TypeOf(vx) != reflect.TypeOf(vy) {\n\t\treturn false\n\t}\n\n\tswitch x := vx.(type) {\n\tcase map[string]interface{}:\n\t\ty := vy.(map[string]interface{})\n\n\t\tif len(x) != len(y) {\n\t\t\treturn false\n\t\t}\n\n\t\tfor k, v := range x {\n\t\t\tval2 := y[k]\n\n\t\t\tif (v == nil) != (val2 == nil) {\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\tif !Equal(v, val2) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\tcase []interface{}:\n\t\ty := vy.([]interface{})\n\n\t\tif len(x) != len(y) {\n\t\t\treturn false\n\t\t}\n\n\t\tvar matches int\n\t\tflagged := make([]bool, len(y))\n\t\tfor _, v := range x {\n\t\t\tfor i, v2 := range y {\n\t\t\t\tif Equal(v, v2) && !flagged[i] {\n\t\t\t\t\tmatches++\n\t\t\t\t\tflagged[i] = true\n\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn matches == len(x)\n\tdefault:\n\t\treturn vx == vy\n\t}\n}",
"func Equal(expected, actual interface{}) Truth {\n\tmustBeCleanStart()\n\treturn Truth{\n\t\tValue:reflect.DeepEqual(expected, actual),\n\t\tDump:fmt.Sprintf(\"%#v vs. %#v\", expected, actual),\n\t}\n}",
"func (firstDate Nakamura) Equal(secondDate Nakamura) bool {\n\treturn Equal(firstDate, secondDate, secondDate.format)\n}",
"func (result *Result) Equal(other *Result) bool {\n\t// Check for nil cases\n\tif result == nil {\n\t\treturn other == nil\n\t}\n\tif other == nil {\n\t\treturn false\n\t}\n\n\t// Compare Fields, RowsAffected, InsertID, Rows.\n\treturn FieldsEqual(result.Fields, other.Fields) &&\n\t\tresult.RowsAffected == other.RowsAffected &&\n\t\tresult.InsertID == other.InsertID &&\n\t\treflect.DeepEqual(result.Rows, other.Rows)\n}",
"func (g *Graph) Equal(g2 *Graph, debug bool) bool {\n\n\t// Check the vertices\n\tkeys1 := g.listOfKeys()\n\tkeys2 := g2.listOfKeys()\n\n\tif !SlicesHaveSameElements(&keys1, &keys2) {\n\t\tif debug {\n\t\t\tlog.Println(\"Lists of keys are different\")\n\t\t\tlog.Printf(\"Keys1: %v\\n\", keys1)\n\t\t\tlog.Printf(\"Keys2: %v\\n\", keys2)\n\t\t}\n\t\treturn false\n\t}\n\n\t// Walk through each vertex and check its connections\n\tfor _, vertex := range keys1 {\n\t\tconns1 := g.Nodes[vertex]\n\t\tconns2 := g2.Nodes[vertex]\n\n\t\tif !SetsEqual(conns1, conns2) {\n\t\t\tif debug {\n\t\t\t\tlog.Printf(\"Connections different for vertex %v\", vertex)\n\t\t\t\tlog.Printf(\"Connections 1: %v\\n\", conns1)\n\t\t\t\tlog.Printf(\"Connections 2: %v\\n\", conns2)\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func (s *User) Equal(r *User) bool {\n\tif s.ID != r.ID {\n\t\treturn false\n\t}\n\tif s.Name != r.Name {\n\t\treturn false\n\t}\n\tif s.EMail != r.EMail {\n\t\treturn false\n\t}\n\tif len(s.Group) != len(r.Group) {\n\t\treturn false\n\t}\n\n\tfor idx := 0; idx < len(s.Group); idx++ {\n\t\tl := s.Group[idx]\n\t\tr := r.Group[idx]\n\t\tif !l.Equal(r) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func Equal(values ...interface{}) (failureMessage string) {\n\tif values[0] != values[1] {\n\t\tfailureMessage = fmt.Sprintf(\"Expected `%v` to equal `%v`\", values[0], values[1])\n\t}\n\treturn\n}",
"func Eql(v1, v2 Vect) bool { return v1.X == v2.X && v1.Y == v2.Y }",
"func Equal(a, b interface{}) bool {\n\tif reflect.TypeOf(a) == reflect.TypeOf(b) {\n\t\treturn reflect.DeepEqual(a, b)\n\t}\n\tswitch a.(type) {\n\tcase int, int8, int16, int32, int64:\n\t\tswitch b.(type) {\n\t\tcase int, int8, int16, int32, int64:\n\t\t\treturn reflect.ValueOf(a).Int() == reflect.ValueOf(b).Int()\n\t\t}\n\tcase uint, uint8, uint16, uint32, uint64:\n\t\tswitch b.(type) {\n\t\tcase uint, uint8, uint16, uint32, uint64:\n\t\t\treturn reflect.ValueOf(a).Uint() == reflect.ValueOf(b).Uint()\n\t\t}\n\tcase float32, float64:\n\t\tswitch b.(type) {\n\t\tcase float32, float64:\n\t\t\treturn reflect.ValueOf(a).Float() == reflect.ValueOf(b).Float()\n\t\t}\n\tcase string:\n\t\tswitch b.(type) {\n\t\tcase []byte:\n\t\t\treturn a.(string) == string(b.([]byte))\n\t\t}\n\tcase []byte:\n\t\tswitch b.(type) {\n\t\tcase string:\n\t\t\treturn b.(string) == string(a.([]byte))\n\t\t}\n\t}\n\treturn false\n}",
"func Equal(a, b Node) bool {\n\tif a == nil {\n\t\treturn b == nil\n\t}\n\tswitch a := a.(type) {\n\tcase *Text:\n\t\tb, ok := b.(*Text)\n\t\treturn ok &&\n\t\t\tEqual(a.Leading, b.Leading) &&\n\t\t\tEqual(&a.Discourse, &b.Discourse)\n\n\tcase *Discourse:\n\t\tb, ok := b.(*Discourse)\n\t\tif !ok || len(*a) != len(*b) {\n\t\t\treturn false\n\t\t}\n\t\tfor i := range *a {\n\t\t\tif !Equal((*a)[i], (*b)[i]) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\n\tcase *StatementSentence:\n\t\tb, ok := b.(*StatementSentence)\n\t\treturn ok &&\n\t\t\twordEqual(a.JE, b.JE) &&\n\t\t\twordEqual(a.DA, b.DA) &&\n\t\t\tEqual(a.Statement, b.Statement)\n\n\tcase *CoPSentence:\n\t\tb, ok := b.(*CoPSentence)\n\t\treturn ok && copEqual((*CoP)(a), (*CoP)(b))\n\n\tcase *Prenex:\n\t\tb, ok := b.(*Prenex)\n\t\treturn ok && wordEqual(&a.BI, &b.BI) && Equal(a.Terms, b.Terms)\n\n\tcase *PrenexStatement:\n\t\tb, ok := b.(*PrenexStatement)\n\t\treturn ok && Equal(&a.Prenex, &b.Prenex) && Equal(a.Statement, b.Statement)\n\n\tcase *Predication:\n\t\tb, ok := b.(*Predication)\n\t\treturn ok &&\n\t\t\twordEqual(a.NA, b.NA) &&\n\t\t\tEqual(a.Predicate, b.Predicate) &&\n\t\t\t(a.Terms == nil) == (b.Terms == nil) &&\n\t\t\t(a.Terms == nil || Equal(a.Terms, b.Terms))\n\n\tcase *CoPStatement:\n\t\tb, ok := b.(*CoPStatement)\n\t\treturn ok && copEqual((*CoP)(a), (*CoP)(b))\n\n\tcase *PrefixedPredicate:\n\t\tb, ok := b.(*PrefixedPredicate)\n\t\treturn ok && wordEqual(&a.MU, &b.MU) && Equal(a.Predicate, b.Predicate)\n\n\tcase *SerialPredicate:\n\t\tb, ok := b.(*SerialPredicate)\n\t\treturn ok && Equal(a.Left, b.Left) && Equal(a.Right, b.Right)\n\n\tcase *WordPredicate:\n\t\tb, ok := b.(*WordPredicate)\n\t\treturn ok && wordEqual((*Word)(a), (*Word)(b))\n\n\tcase *MIPredicate:\n\t\tb, ok := b.(*MIPredicate)\n\t\treturn ok && wordEqual(&a.MI, &b.MI) && wordEqual(a.GA, b.GA) && Equal(a.Phrase, b.Phrase)\n\n\tcase *POPredicate:\n\t\tb, ok := b.(*POPredicate)\n\t\treturn ok && wordEqual(&a.PO, &b.PO) && wordEqual(a.GA, b.GA) && Equal(a.Argument, b.Argument)\n\n\tcase *MOPredicate:\n\t\tb, ok := b.(*MOPredicate)\n\t\treturn ok && wordEqual(&a.MO, &b.MO) && wordEqual(&a.TEO, &b.TEO) && Equal(&a.Discourse, &b.Discourse)\n\n\tcase *LUPredicate:\n\t\tb, ok := b.(*LUPredicate)\n\t\treturn ok && wordEqual(&a.LU, &b.LU) && Equal(a.Statement, b.Statement)\n\n\tcase *CoPPredicate:\n\t\tb, ok := b.(*CoPPredicate)\n\t\treturn ok && copEqual((*CoP)(a), (*CoP)(b))\n\n\tcase *LinkedTerm:\n\t\tb, ok := b.(*LinkedTerm)\n\t\treturn ok && wordEqual(&a.GO, &b.GO) && Equal(a.Argument, b.Argument)\n\n\tcase Terms:\n\t\tb, ok := b.(Terms)\n\t\tif !ok || len(a) != len(b) {\n\t\t\treturn false\n\t\t}\n\t\tfor i := range a {\n\t\t\tif !Equal(a[i], b[i]) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\n\tcase *TermSet:\n\t\tb, ok := b.(*TermSet)\n\t\treturn ok && copEqual((*CoP)(a), (*CoP)(b))\n\n\tcase *PredicateArgument:\n\t\tb, ok := b.(*PredicateArgument)\n\t\treturn ok &&\n\t\t\twordEqual(a.Focus, b.Focus) &&\n\t\t\twordEqual(a.Quantifier, b.Quantifier) &&\n\t\t\tEqual(a.Predicate, b.Predicate) &&\n\t\t\t(a.Relative == nil) == (b.Relative == nil) &&\n\t\t\t(a.Relative == nil || Equal(a.Relative, b.Relative))\n\n\tcase *CoPArgument:\n\t\tb, ok := b.(*CoPArgument)\n\t\treturn ok && copEqual((*CoP)(a), (*CoP)(b))\n\n\tcase *PredicationRelative:\n\t\tb, ok := b.(*PredicationRelative)\n\t\treturn ok && Equal(&a.Predication, &b.Predication)\n\n\tcase *LURelative:\n\t\tb, ok := b.(*LURelative)\n\t\treturn ok && wordEqual(&a.LU, &b.LU) && Equal(a.Statement, b.Statement)\n\n\tcase *CoPRelative:\n\t\tb, ok := b.(*CoPRelative)\n\t\treturn ok && copEqual((*CoP)(a), (*CoP)(b))\n\n\tcase *PredicateAdverb:\n\t\tb, ok := b.(*PredicateAdverb)\n\t\treturn ok && Equal(a.Predicate, b.Predicate)\n\n\tcase *CoPAdverb:\n\t\tb, ok := b.(*CoPAdverb)\n\t\treturn ok && copEqual((*CoP)(a), (*CoP)(b))\n\n\tcase *PredicationPreposition:\n\t\tb, ok := b.(*PredicationPreposition)\n\t\treturn ok && Equal(a.Predicate, b.Predicate) && Equal(a.Argument, b.Argument)\n\n\tcase *CoPPreposition:\n\t\tb, ok := b.(*CoPPreposition)\n\t\treturn ok && copEqual((*CoP)(a), (*CoP)(b))\n\n\tcase *PredicationContent:\n\t\tb, ok := b.(*PredicationContent)\n\t\treturn ok && Equal(&a.Predication, &b.Predication)\n\n\tcase *LUContent:\n\t\tb, ok := b.(*LUContent)\n\t\treturn ok && wordEqual(&a.LU, &b.LU) && Equal(a.Statement, b.Statement)\n\n\tcase *CoPContent:\n\t\tb, ok := b.(*CoPContent)\n\t\treturn ok && copEqual((*CoP)(a), (*CoP)(b))\n\n\tcase *Parenthetical:\n\t\tb, ok := b.(*Parenthetical)\n\t\treturn ok && wordEqual(&a.KI, &b.KI) && wordEqual(&a.KIO, &b.KIO) && Equal(&a.Discourse, &b.Discourse)\n\n\tcase *Incidental:\n\t\tb, ok := b.(*Incidental)\n\t\treturn ok && wordEqual(&a.JU, &b.JU) && Equal(a.Statement, b.Statement)\n\n\tcase *Vocative:\n\t\tb, ok := b.(*Vocative)\n\t\treturn ok && wordEqual(&a.HU, &b.HU) && Equal(a.Argument, b.Argument)\n\n\tcase *Interjection:\n\t\tb, ok := b.(*Interjection)\n\t\treturn ok && wordEqual((*Word)(a), (*Word)(b))\n\n\tcase *Space:\n\t\tb, ok := b.(*Space)\n\t\treturn ok && wordEqual((*Word)(a), (*Word)(b))\n\n\tcase *Word:\n\t\tb, ok := b.(*Word)\n\t\treturn ok && wordEqual(a, b)\n\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unknown node type %T\", a))\n\t}\n}",
"func (c Command) Equal(o Command) bool {\n\tif c.Inst != o.Inst {\n\t\treturn false\n\t}\n\tif len(c.Args) != len(o.Args) {\n\t\treturn false\n\t}\n\tfor i := range c.Args {\n\t\tif c.Args[i] != o.Args[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (p Pair) Equal(pair Pair) bool {\n\treturn p.String() == pair.String()\n}",
"func (t Token) Equal(v Token) bool {\n\treturn t.ID == v.ID &&\n\t\tt.Class == v.Class &&\n\t\tt.Surface == v.Surface\n}",
"func (j1 *JWTAuth) Equal(j2 *JWTAuth) bool {\n\treturn reflect.DeepEqual(j1.JWTAuth, j2.JWTAuth)\n}",
"func (m Matches) Equals(o Matches) bool {\n\tif len(m) != len(o) {\n\t\treturn false\n\t}\n\n\thasMatch := make(map[Match]bool)\n\tfor _, e := range m {\n\t\thasMatch[e] = true\n\t}\n\n\tfor _, e := range o {\n\t\tif !hasMatch[e] {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func TestCities_Equal(t *testing.T) {\n\ttype testCase struct {\n\t\tc1, c2 cities\n\t\twant bool\n\t}\n\tcases := []testCase{\n\t\t{\n\t\t\tc1: cities{},\n\t\t\tc2: cities{},\n\t\t\twant: true,\n\t\t},\n\t\t{\n\t\t\tc1: cities{},\n\t\t\tc2: cities{\n\t\t\t\tcity{name: \"Barcelona\", population: 1.6e6, cost: ReasonableCost, climate: GreatClimate}},\n\t\t\twant: false,\n\t\t},\n\t}\n\tfor _, tc := range cases {\n\t\t//\tc1 := cities{}\n\t\t//\tc2 := cities{}\n\t\t//\t\twant := true\n\t\tif tc.c1.Equal(tc.c2) != tc.want {\n\t\t\tt.Errorf(\"cities.Equal() should be %v for cities\\n%q\\nand\\n%q\\n\", tc.want, tc.c1, tc.c2)\n\t\t}\n\t}\n}",
"func (k1 *KeyAuth) Equal(k2 *KeyAuth) bool {\n\treturn reflect.DeepEqual(k1.KeyAuth, k2.KeyAuth)\n}",
"func (r *Record) Eq(other *Record) bool {\n\n\t// We disregard leader in equality tests, since LineMARC doesn't have one,\n\t// and it will be generated by decoders and encoder.\n\t/*\n\t\t// Leader equal?\n\t\tif r.Leader != other.Leader {\n\t\t\treturn false\n\t\t}\n\t*/\n\n\t// Control Fields equal?\n\tif len(r.CtrlFields) != len(other.CtrlFields) {\n\t\treturn false\n\t}\n\n\tsort.Sort(r.CtrlFields)\n\tsort.Sort(other.CtrlFields)\n\n\tfor i, f := range r.CtrlFields {\n\t\tif other.CtrlFields[i] != f {\n\t\t\treturn false\n\t\t}\n\t}\n\n\t// Data Fields equal?\n\tif len(r.DataFields) != len(other.DataFields) {\n\t\treturn false\n\t}\n\n\tsort.Sort(r.DataFields)\n\tsort.Sort(other.DataFields)\n\n\tfor i, f := range r.DataFields {\n\t\tif o := other.DataFields[i]; o.Tag != f.Tag || o.Ind1 != f.Ind1 || o.Ind2 != f.Ind2 {\n\t\t\treturn false\n\t\t}\n\t\t// SubFields equal?\n\t\tif len(f.SubFields) != len(other.DataFields[i].SubFields) {\n\t\t\treturn false\n\t\t}\n\n\t\tsort.Sort(f.SubFields)\n\t\tsort.Sort(other.DataFields[i].SubFields)\n\n\t\tfor j, s := range f.SubFields {\n\t\t\tif other.DataFields[i].SubFields[j] != s {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\n\t// All fields equal\n\treturn true\n}",
"func DeepConvertibleEquals(args ...interface{}) (ok bool, message string) {\n\tparams, message, err := ExpectNArgs(2, args)\n\tif err != nil {\n\t\treturn false, err.Error()\n\t}\n\tif !deepValueConvertibleEquals(reflect.ValueOf(params[0]), reflect.ValueOf(params[1])) {\n\t\tif message != \"\" {\n\t\t\treturn false, message\n\t\t}\n\t\treturn false, fmt.Sprintf(\"deep equal: expected %+v; got %+v\", params[1], params[0])\n\t}\n\treturn true, \"\"\n}",
"func equal(left, right []string) bool {\n\tif len(left) != len(right) {\n\t\treturn false\n\t}\n\tfor i, value := range left {\n\t\tif value != right[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func Equal(t *testing.T, a, b interface{}) {\n\tif a != b && !reflect.DeepEqual(a, b) {\n\t\tt.Errorf(\"%v Not Equal: %v == %v\", line(), a, b)\n\t}\n}",
"func (a *Activation) Equal(other *Activation) bool {\n\treturn a.actptr == other.actptr\n}",
"func (t Tags) Equal(other Tags) bool {\n\tif len(t.Values()) != len(other.Values()) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(t.Values()); i++ {\n\t\tequal := t.values[i].Name.Equal(other.values[i].Name) &&\n\t\t\tt.values[i].Value.Equal(other.values[i].Value)\n\t\tif !equal {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func Equal(a, b Sequence) bool {\n\treturn reflect.DeepEqual(a.Info(), b.Info()) &&\n\t\treflect.DeepEqual(a.Features(), b.Features()) &&\n\t\tbytes.Equal(a.Bytes(), b.Bytes())\n}",
"func Equal(s1, s2 Set) bool {\n\tif s1.Len() != s2.Len() {\n\t\treturn false\n\t}\n\tfor k := range s1 {\n\t\tif _, ok := s2[k]; !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func Equal(typ string, raw1, raw2 []byte) (bool, error) {\n\treturn EqualApprox(typ, raw1, raw2, 0)\n}",
"func InstructionEquals(i1 Instruction, i2 Instruction, withResultId bool) bool {\n\tif !withResultId {\n\t\tif id, ok := InstructionResultId(i1); ok {\n\t\t\tSetInstructionResultId(i2, id)\n\t\t}\n\t}\n\treturn reflect.DeepEqual(i1, i2)\n}",
"func actEqualsExp(t *testing.T, id, gfName string, actVal, expVal []byte) bool {\n\tt.Helper()\n\n\tif bytes.Equal(actVal, expVal) {\n\t\treturn true\n\t}\n\n\tt.Log(id)\n\tt.Log(\"\\t: Expected\\n\" + string(expVal))\n\tt.Log(\"\\t: Actual\\n\" + string(actVal))\n\tt.Errorf(\"\\t: The value given differs from the golden file value: %q\",\n\t\tgfName)\n\treturn false\n}",
"func Eq(one, other interface{}) bool {\n\treturn reflect.DeepEqual(one, other)\n}",
"func (a *Assertions) Equal(expected interface{}, actual interface{}, userMessageComponents ...interface{}) bool {\n\ta.assertion()\n\tif didFail, message := shouldBeEqual(expected, actual); didFail {\n\t\treturn a.fail(message, userMessageComponents...)\n\t}\n\treturn true\n}",
"func (e1 *Config) Equal(e2 *Config) bool {\n\tif e1 == e2 {\n\t\treturn true\n\t}\n\tif e1 == nil || e2 == nil {\n\t\treturn false\n\t}\n\tif e1.URL != e2.URL {\n\t\treturn false\n\t}\n\tif e1.Host != e2.Host {\n\t\treturn false\n\t}\n\tif e1.SigninURL != e2.SigninURL {\n\t\treturn false\n\t}\n\tif e1.SigninURLRedirectParam != e2.SigninURLRedirectParam {\n\t\treturn false\n\t}\n\tif e1.Method != e2.Method {\n\t\treturn false\n\t}\n\n\tmatch := sets.StringElementsMatch(e1.ResponseHeaders, e2.ResponseHeaders)\n\tif !match {\n\t\treturn false\n\t}\n\n\tif e1.RequestRedirect != e2.RequestRedirect {\n\t\treturn false\n\t}\n\tif e1.AuthSnippet != e2.AuthSnippet {\n\t\treturn false\n\t}\n\n\tif e1.AuthCacheKey != e2.AuthCacheKey {\n\t\treturn false\n\t}\n\n\tif e1.KeepaliveConnections != e2.KeepaliveConnections {\n\t\treturn false\n\t}\n\n\tif e1.KeepaliveShareVars != e2.KeepaliveShareVars {\n\t\treturn false\n\t}\n\n\tif e1.KeepaliveRequests != e2.KeepaliveRequests {\n\t\treturn false\n\t}\n\n\tif e1.KeepaliveTimeout != e2.KeepaliveTimeout {\n\t\treturn false\n\t}\n\n\tif e1.AlwaysSetCookie != e2.AlwaysSetCookie {\n\t\treturn false\n\t}\n\n\treturn sets.StringElementsMatch(e1.AuthCacheDuration, e2.AuthCacheDuration)\n}",
"func equals(v1, v2 interface{}) bool {\n\tv1Type := jsonType(v1)\n\tif v1Type != jsonType(v2) {\n\t\treturn false\n\t}\n\tswitch v1Type {\n\tcase \"array\":\n\t\tarr1, arr2 := v1.([]interface{}), v2.([]interface{})\n\t\tif len(arr1) != len(arr2) {\n\t\t\treturn false\n\t\t}\n\t\tfor i := range arr1 {\n\t\t\tif !equals(arr1[i], arr2[i]) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\tcase \"object\":\n\t\tobj1, obj2 := v1.(map[string]interface{}), v2.(map[string]interface{})\n\t\tif len(obj1) != len(obj2) {\n\t\t\treturn false\n\t\t}\n\t\tfor k, v1 := range obj1 {\n\t\t\tif v2, ok := obj2[k]; ok {\n\t\t\t\tif !equals(v1, v2) {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\tcase \"number\":\n\t\tnum1, _ := new(big.Float).SetString(string(v1.(json.Number)))\n\t\tnum2, _ := new(big.Float).SetString(string(v2.(json.Number)))\n\t\treturn num1.Cmp(num2) == 0\n\tdefault:\n\t\treturn v1 == v2\n\t}\n}",
"func (r Result) Equal() bool {\n\treturn r.flags&(reportEqual|reportByIgnore) != 0\n}",
"func (a *Advertisement) Equal(b *Advertisement) bool {\n\tif a.Prefix.String() != b.Prefix.String() {\n\t\treturn false\n\t}\n\tif a.LocalPref != b.LocalPref {\n\t\treturn false\n\t}\n\treturn reflect.DeepEqual(a.Communities, b.Communities)\n}",
"func (m Mat2f) Equal(other Mat2f) bool {\n\treturn m.EqualEps(other, Epsilon)\n}"
] | [
"0.6665299",
"0.6641414",
"0.6626935",
"0.6626935",
"0.6571231",
"0.642829",
"0.6321033",
"0.6263613",
"0.6251727",
"0.6233622",
"0.62238616",
"0.6217805",
"0.614363",
"0.6140861",
"0.6139372",
"0.612923",
"0.61276424",
"0.6115138",
"0.6110481",
"0.6083133",
"0.60731983",
"0.60693383",
"0.6057379",
"0.60561407",
"0.60523605",
"0.6050809",
"0.60479265",
"0.60465366",
"0.604467",
"0.6042767",
"0.6041218",
"0.6035287",
"0.60351354",
"0.6034663",
"0.603119",
"0.6024332",
"0.6015992",
"0.6013094",
"0.6007015",
"0.5982384",
"0.59769696",
"0.5967548",
"0.59675205",
"0.5952711",
"0.59525746",
"0.59459794",
"0.5932494",
"0.5929988",
"0.5928355",
"0.5927685",
"0.5923425",
"0.59181726",
"0.59083766",
"0.5906512",
"0.58942413",
"0.5887154",
"0.58860093",
"0.5884951",
"0.5883516",
"0.58771956",
"0.5875479",
"0.58743304",
"0.58722806",
"0.5860414",
"0.5859075",
"0.5844271",
"0.5844235",
"0.58429104",
"0.58406264",
"0.58402324",
"0.5836767",
"0.58338594",
"0.5831417",
"0.58313835",
"0.58311146",
"0.5830279",
"0.58295",
"0.5829102",
"0.58254015",
"0.5823046",
"0.5819642",
"0.58195835",
"0.5819396",
"0.5815609",
"0.58155143",
"0.5814277",
"0.58109474",
"0.5808823",
"0.5807553",
"0.5804116",
"0.5803839",
"0.580334",
"0.5801649",
"0.5800805",
"0.57965857",
"0.57922095",
"0.57884544",
"0.57867575",
"0.57838273",
"0.5782115"
] | 0.6786418 | 0 |
String formats the match for humans | func (m Match) String() string {
return fmt.Sprintf("<Match %d %d (%d, %d)-(%d, %d) '%v'>", m.PC, m.TC, m.StartLine, m.StartColumn, m.EndLine, m.EndColumn, string(m.Bytes))
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (m match) String() string {\n\treturn fmt.Sprintf(\"M{%d,%d}\", m.distance, m.n)\n}",
"func (node *MatchExpr) Format(buf *TrackedBuffer) {\n\tbuf.astPrintf(node, \"match(%v) against (%v%s)\", node.Columns, node.Expr, node.Option.ToString())\n}",
"func (s Match) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (m Match) String() string {\n\treturn fmt.Sprintf(\"Match(pkg=%s vuln=%q type=%q)\", m.Package, m.Vulnerability.String(), m.Type)\n}",
"func matchProtoFormat(actual, expected string) bool {\n\tpattern := strings.ReplaceAll(regexp.QuoteMeta(expected), \": \", \": ?\")\n\tok, err := regexp.MatchString(pattern, actual)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn ok\n}",
"func getFmtStringFromArgs(input string, v1, v2 starlark.Value) string {\n\t// Only calc diff if necessary.\n\tdiff := \"\"\n\tif i3 := strings.Index(input, \"$3\"); i3 >= 0 {\n\t\tdiff = pretty.Compare(v1, v2)\n\t}\n\n\trep := strings.NewReplacer(\n\t\t\"$1\", fmt.Sprint(v1),\n\t\t\"$2\", fmt.Sprint(v2),\n\t\t\"$3\", fmt.Sprint(diff),\n\t\t\"$4\", v1.Type(),\n\t\t\"$5\", v2.Type(),\n\t\t`\\n`, \"\\n\",\n\t\t`\\t`, \"\\t\",\n\t)\n\n\t// Run it through twice to resolve any newlines/tabs that get placed into the diff.\n\treturn rep.Replace(rep.Replace(input))\n}",
"func colorizeMatches(line string, matches [][]int) string {\n\toutput := \"\"\n\tpointer := 0\n\tfor _, match := range matches {\n\t\tstart := match[0]\n\t\tend := match[1]\n\n\t\tif start >= pointer {\n\t\t\toutput += line[pointer:start]\n\t\t}\n\n\t\toutput += Bold(Red(line[start:end])).String()\n\t\tpointer = end\n\t}\n\n\tif pointer < (len(line) - 1) {\n\t\toutput += line[pointer:]\n\t}\n\treturn output\n}",
"func returnMatchMessage(counter int, searchWord string) string {\n\tmessage := \"\"\n\tif counter == 0 {\n\t\tmessage = \"\\nSorry, no matches on \\\"\" + searchWord + \"\\\".\"\n\t} else {\n\t\tstrCounter := strconv.Itoa(counter)\n\t\tmessage = \"\\n\" + strCounter + \" matches on \\\"\" + searchWord + \"\\\".\"\n\t}\n\treturn message\n}",
"func (o HttpHeaderMatchResponseOutput) ExactMatch() pulumi.StringOutput {\n\treturn o.ApplyT(func(v HttpHeaderMatchResponse) string { return v.ExactMatch }).(pulumi.StringOutput)\n}",
"func (d *Details) Matchup() string {\n\tm := make([]rune, 0, 9)\n\tvar prevTeamID int64\n\tfor i, p := range d.Players() {\n\t\tif i > 0 && p.TeamID() != prevTeamID {\n\t\t\tm = append(m, 'v')\n\t\t}\n\t\tm = append(m, p.Race().Letter)\n\t\tprevTeamID = p.TeamID()\n\t}\n\treturn string(m)\n}",
"func FormatScore(game responses.FantasyGame) string {\n\thomeTeam := game.Home.Name\n\tawayTeam := game.Away.Name\n\n\thomeScore := game.HomeScore.Score.Value\n\tawayScore := game.AwayScore.Score.Value\n\n\tif homeScore > awayScore {\n\t\treturn fmt.Sprintf(\"%v beat %v with a score of %.2f-%.2f.\\n\", homeTeam, awayTeam, homeScore, awayScore)\n\t} else if awayScore > homeScore {\n\t\treturn fmt.Sprintf(\"%v beat %v with a score of %.2f-%.2f.\\n\", awayTeam, homeTeam, awayScore, homeScore)\n\t} else {\n\t\treturn fmt.Sprintf(\"Whaaaat....%v and %v tied with a score of %.2f-%.2f.\\n\", homeTeam, awayTeam, homeScore, awayScore)\n\t}\n}",
"func (f Formatter) Format(txt string) (string, error) {\n\ttokens, err := f.l.Scan([]byte(txt))\n\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to format: %q\", err)\n\t}\n\n\tvar s strings.Builder\n\n\tinSession := true\n\tinPerformance := false\n\n\tfor _, tok := range tokens {\n\t\tswitch tok.Name() {\n\t\tcase \"DATE\":\n\t\t\ts.WriteString(\"@ \")\n\t\t\ts.WriteString(tok.Value())\n\t\t\ts.WriteString(\"\\r\\n\")\n\t\tcase \"FAILS\":\n\t\t\ts.WriteString(\" \")\n\t\t\ts.WriteString(tok.Value())\n\t\t\ts.WriteString(\"f\")\n\t\tcase \"LOAD\":\n\t\t\tinPerformance = true\n\t\t\ts.WriteString(\"\\r\\n\")\n\t\t\ts.WriteString(\" \")\n\t\t\ts.WriteString(tok.Value())\n\t\tcase \"METADATA\":\n\t\t\ts.WriteString(\"\\r\\n\")\n\t\t\ts.WriteString(spacer(inSession, inPerformance))\n\t\t\ts.WriteString(\"* \")\n\t\t\ts.WriteString(tok.Value())\n\t\tcase \"MOVEMENT\", \"MOVEMENT_SS\":\n\t\t\tinSession = false\n\t\t\tinPerformance = false\n\t\t\ts.WriteString(\"\\r\\n\\r\\n\")\n\t\t\tif tok.Value() == \"MOVEMENT_SS\" {\n\t\t\t\ts.WriteString(\"+ \")\n\t\t\t}\n\t\t\ts.WriteString(tok.Value())\n\t\t\ts.WriteString(\":\")\n\t\tcase \"NOTE\":\n\t\t\ts.WriteString(\"\\r\\n\")\n\t\t\ts.WriteString(spacer(inSession, inPerformance))\n\t\t\ts.WriteString(\"* \")\n\t\t\ts.WriteString(tok.Value())\n\t\tcase \"REPS\":\n\t\t\ts.WriteString(\" \")\n\t\t\ts.WriteString(tok.Value())\n\t\t\ts.WriteString(\"r\")\n\t\tcase \"SETS\":\n\t\t\ts.WriteString(\" \")\n\t\t\ts.WriteString(tok.Value())\n\t\t\ts.WriteString(\"s\")\n\t\t}\n\t}\n\n\treturn s.String(), nil\n}",
"func (r *Recommendation) Format() string {\n\tscore := \"Scored\"\n\tif !r.Scored {\n\t\tscore = \"Not Scored\"\n\t}\n\treturn fmt.Sprintf(\"CIS %v - %v (%v)\", r.CisID, r.Name, score)\n}",
"func repl(match string, t time.Time) string {\n\tif match == \"%%\" {\n\t\treturn \"%\"\n\t}\n\n\tformatFunc, ok := formats[match]\n\tif ok {\n\t\treturn formatFunc(t)\n\t}\n\treturn formatNanoForMatch(match, t)\n}",
"func (m *Match) printMatch() {\n\tfmt.Printf(\"%s%s%s%s:%s:%s%s%s%s%s%s\\n\",\n\t\tcolors.Purple,\n\t\tm.Path,\n\t\tcolors.Restore,\n\t\tcolors.Green,\n\t\tstrconv.Itoa(m.LineNumber),\n\t\tcolors.Restore,\n\t\tstring(m.Line[:m.Match[0]]),\n\t\tcolors.LightRed,\n\t\tstring(m.Line[m.Match[0]:m.Match[1]]),\n\t\tcolors.Restore,\n\t\tstring(m.Line[m.Match[1]:]),\n\t)\n}",
"func SerializeGrep(matches []match) string {\n\tstr := \"\"\n\n\tfor _, m := range matches {\n\t\tif len(m.Results) == 0 {\n\t\t\tstr = str + fmt.Sprintf(\"%s:%d:%f:%s\\n\", m.Path, 0, m.Score, newFileDescription)\n\t\t} else {\n\t\t\tfor _, r := range m.Results {\n\t\t\t\tstr = str + fmt.Sprintf(\"%s:%d:%f:%s\\n\", m.Path, r.LineNo(), m.Score, r.Snippet())\n\t\t\t}\n\t\t}\n\t}\n\n\treturn str\n}",
"func formatToRegexPattern(format string) string {\n\ts := gregex.Quote(formatToStdLayout(format))\n\ts, _ = gregex.ReplaceString(`[0-9]`, `[0-9]`, s)\n\ts, _ = gregex.ReplaceString(`[A-Za-z]`, `[A-Za-z]`, s)\n\treturn s\n}",
"func RSSTextFormat(plain string) string {\n\tbuf := strings.Replace(plain, \"<br>\", \" \", -1)\n\tbuf = strings.Replace(buf, \"&\", \"&\", -1)\n\treg := regexp.MustCompile(`&(#\\d+|lt|gt|amp);`)\n\tbuf = reg.ReplaceAllString(buf, \"&$1;\")\n\tbuf = strings.Replace(buf, \"<\", \"<\", -1)\n\tbuf = strings.Replace(buf, \">\", \">\", -1)\n\tbuf = strings.Replace(buf, \"\\r\", \"\", -1)\n\tbuf = strings.Replace(buf, \"\\n\", \"\", -1)\n\treturn buf\n}",
"func (m MatcherDecl) Format(w fmt.State, verb rune) {\n\tfmt.Fprintf(w, \"match %v\", m.Name)\n\tfmt.Fprintf(w, \": \")\n\tm.Options.Format(w, verb)\n}",
"func (o *UpdateArgsMatcher) String() string {\n\treturn \"matches\"\n}",
"func (s *simpleSeq) Format(fmt string) string {\n\tif fmt == \"fastq\" {\n\t\treturn s.FastqString()\n\t}\n\treturn s.FastaString()\n}",
"func (o RouterNatRuleResponseOutput) Match() pulumi.StringOutput {\n\treturn o.ApplyT(func(v RouterNatRuleResponse) string { return v.Match }).(pulumi.StringOutput)\n}",
"func parseFormat(format string) (msgfmt string) {\n\tif len(format) < 6 /* (len of \"%{sql} */ {\n\t\treturn defaultFmt\n\t}\n\tidx := strings.IndexRune(format, '%')\n\tfor idx != -1 {\n\t\tmsgfmt += format[:idx]\n\t\tformat = format[idx:]\n\t\tif len(format) > 2 {\n\t\t\tif format[1] == '{' {\n\t\t\t\t// end of curr verb pos\n\t\t\t\tif jdx := strings.IndexRune(format, '}'); jdx != -1 {\n\t\t\t\t\t// next verb pos\n\t\t\t\t\tidx = strings.Index(format[1:], \"%{\")\n\t\t\t\t\t// incorrect verb found (\"...%{wefwef ...\") but after\n\t\t\t\t\t// this, new verb (maybe) exists (\"...%{inv %{verb}...\")\n\t\t\t\t\tif idx != -1 && idx < jdx {\n\t\t\t\t\t\tmsgfmt += \"%%\"\n\t\t\t\t\t\tformat = format[1:]\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\t// get verb and arg\n\t\t\t\t\tverb := ph2verb(format[:jdx+1])\n\t\t\t\t\tmsgfmt += verb\n\n\t\t\t\t\tformat = format[jdx+1:]\n\t\t\t\t} else {\n\t\t\t\t\tformat = format[1:]\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tmsgfmt += \"%%\"\n\t\t\t\tformat = format[1:]\n\t\t\t}\n\t\t}\n\t\tidx = strings.IndexRune(format, '%')\n\t}\n\tmsgfmt += format\n\treturn\n}",
"func (s FieldToMatch) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func Format(given string) (string, error) {\n\tclean, err := Number(given)\n\tif err != nil {\n\t\treturn clean, err\n\t}\n\treturn fmt.Sprintf(\"(%s) %s-%s\", clean[:3], clean[3:6], clean[6:10]), nil\n}",
"func formatPayee(pyee string) string {\n\t// Trim into uniform format\n\tpyee = strings.ToUpper(pyee)\n\tpyee = strings.Trim(pyee, \" \\t\\n\")\n\tpyee = strings.Replace(pyee, \".\", \"\", -1)\n\tpyee = strings.Replace(pyee, \",?\", \" EN \", -1)\n\n\t// Remove the following words that obfuscate the output\n\tfor _, s := range []string{\"HR \", \"MEJ \", \"DR \", \"MR \"} {\n\t\tpyee = strings.Replace(pyee, s, \"\", -1)\n\t}\n\n\treturn pyee\n}",
"func searchable(s string) string {\n\ts = strings.TrimSpace(s)\n\tif s == \"\" {\n\t\treturn \"\"\n\t}\n\twords := strings.Fields(s)\n\treturn strings.Join(words, \":* & \") + \":*\"\n}",
"func FormatMessage(input string) string {\n\tre_leadclose_whtsp := regexp.MustCompile(`^[\\s\\p{Zs}]+|[\\s\\p{Zs}]+$`)\n\tre_inside_whtsp := regexp.MustCompile(`[\\s\\p{Zs}]{2,}`)\n\tfinal := re_leadclose_whtsp.ReplaceAllString(input, \"\")\n\tfinal = re_inside_whtsp.ReplaceAllString(final, \" \")\n\treturn final\n}",
"func (s *sqlStrFormat) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) {\n\n\t// argIndex changes the function argument which gets matched to the regex\n\targIndex := 0\n\n\t// TODO(gm) improve confidence if database/sql is being used\n\tif node := s.calls.ContainsCallExpr(n, c, false); node != nil {\n\t\t// if the function is fmt.Fprintf, search for SQL statement in Args[1] instead\n\t\tif sel, ok := node.Fun.(*ast.SelectorExpr); ok {\n\t\t\tif sel.Sel.Name == \"Fprintf\" {\n\t\t\t\t// if os.Stderr or os.Stdout is in Arg[0], mark as no issue\n\t\t\t\tif arg, ok := node.Args[0].(*ast.SelectorExpr); ok {\n\t\t\t\t\tif ident, ok := arg.X.(*ast.Ident); ok {\n\t\t\t\t\t\tif s.noIssue.Contains(ident.Name, arg.Sel.Name) {\n\t\t\t\t\t\t\treturn nil, nil\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// the function is Fprintf so set argIndex = 1\n\t\t\t\targIndex = 1\n\t\t\t}\n\t\t}\n\n\t\t// no formatter\n\t\tif len(node.Args) == 0 {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\tvar formatter string\n\n\t\t// concats callexpr arg strings together if needed before regex evaluation\n\t\tif argExpr, ok := node.Args[argIndex].(*ast.BinaryExpr); ok {\n\t\t\tif fullStr, ok := gosec.ConcatString(argExpr); ok {\n\t\t\t\tformatter = fullStr\n\t\t\t}\n\t\t} else if arg, e := gosec.GetString(node.Args[argIndex]); e == nil {\n\t\t\tformatter = arg\n\t\t}\n\t\tif len(formatter) <= 0 {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\t// If all formatter args are quoted or constant, then the SQL construction is safe\n\t\tif argIndex+1 < len(node.Args) {\n\t\t\tallSafe := true\n\t\t\tfor _, arg := range node.Args[argIndex+1:] {\n\t\t\t\tif n := s.noIssueQuoted.ContainsCallExpr(arg, c, true); n == nil && !s.constObject(arg, c) {\n\t\t\t\t\tallSafe = false\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif allSafe {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t}\n\t\tif s.MatchPatterns(formatter) {\n\t\t\treturn gosec.NewIssue(c, n, s.ID(), s.What, s.Severity, s.Confidence), nil\n\t\t}\n\t}\n\treturn nil, nil\n}",
"func (o HttpQueryParameterMatchResponseOutput) ExactMatch() pulumi.StringOutput {\n\treturn o.ApplyT(func(v HttpQueryParameterMatchResponse) string { return v.ExactMatch }).(pulumi.StringOutput)\n}",
"func (f String) Formatted(v any) string {\n\tif f.IgnoreNil && v == nil {\n\t\treturn \"\"\n\t}\n\n\tswitch {\n\tcase f.MaxW == 0:\n\t\treturn fmt.Sprintf(\"%s\", v)\n\tcase f.MaxW < 0:\n\t\treturn fmt.Sprintf(\"%.*s\", f.W, v)\n\tdefault:\n\t\treturn fmt.Sprintf(\"%.*s\", f.MaxW, v)\n\t}\n}",
"func (node *MatchExpr) formatFast(buf *TrackedBuffer) {\n\tbuf.WriteString(\"match(\")\n\tfor i, col := range node.Columns {\n\t\tif i != 0 {\n\t\t\tbuf.WriteString(\", \")\n\t\t\tbuf.printExpr(node, col, true)\n\t\t} else {\n\t\t\tbuf.printExpr(node, col, true)\n\t\t}\n\t}\n\tbuf.WriteString(\") against (\")\n\tbuf.printExpr(node, node.Expr, true)\n\tbuf.WriteString(node.Option.ToString())\n\tbuf.WriteByte(')')\n}",
"func (o HttpHeaderMatchOutput) ExactMatch() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v HttpHeaderMatch) *string { return v.ExactMatch }).(pulumi.StringPtrOutput)\n}",
"func formatDetection(sigID string, target map[string]string, fullURLPath string) string {\n\n\tpathToPrint := \"\"\n\tif target[\"protocol\"] == \"aws\" {\n\t\tpathToPrint = \"[\" + sigID + \"] \" + target[\"protocol\"] + \"://\" +\n\t\t\ttarget[\"profile\"] + \":\" + target[\"region\"]\n\t} else {\n\n\t\tif fullURLPath != \"\" {\n\t\t\tpathToPrint = \"[\" + sigID + \"] \" + fullURLPath\n\t\t} else {\n\t\t\t//pathToPrint = \"[\" + sigID + \"] \" + target[\"protocol\"] + \"://\" + target[\"hostname\"] + \":\" +\n\t\t\t//\ttarget[\"port\"]\n\t\t\tpathToPrint = \"[\" + sigID + \"] \" + target[\"input\"]\n\t\t}\n\t}\n\n\treturn pathToPrint\n}",
"func (c Provider) Match(query string) string {\n\tsm := SearchRegex.FindStringSubmatch(query)\n\tif len(sm) == 0 {\n\t\treturn \"\"\n\t}\n\tsms := strings.Split(sm[1], \"/\")\n\tfilename := sms[len(sms)-1]\n\tpieces := strings.Split(filename, \"-\")\n\tif len(pieces) > 2 {\n\t\treturn strings.Join(pieces[0:len(pieces)-1], \"-\")\n\t}\n\treturn pieces[0]\n}",
"func Format(given string) (string, error) {\n\tnumber, err := Number(given)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn fmt.Sprintf(\"(%s) %s-%s\", number[0:3], number[3:6], number[6:]), nil\n}",
"func isMatch(s string, p string) bool {\n\n}",
"func Match(db *sql.DB, w io.Writer, maxCount int) error { //nolint:funlen\n\tif db == nil {\n\t\treturn database.ErrDB\n\t}\n\tif w == nil {\n\t\tw = io.Discard\n\t}\n\n\tconst (\n\t\tn0, n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11, n12 = 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12\n\t)\n\ttick := time.Now()\n\tlist, total, err := List(db, w)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsort.Strings(list)\n\n\tmatches := []string{}\n\tvar a0, a1, a2, b0, b1, b2, c0, c1, d0, d1, d2, d3, d4 string\n\tvar e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12 string\n\tvar f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12 string\n\tvar g0, g1, g2, g3, g4, g5, g6, g7, g8 string\n\tvar h0, h1, h2, h3, h4, h5, h6, h7, h8 string\n\ti := 0\n\tfor _, group := range list {\n\t\tif len(group) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\ti++\n\t\tif maxCount > 0 && i > maxCount {\n\t\t\tbreak\n\t\t}\n\t\ta0 = SwapSuffix(group, \"s\", \"z\")\n\t\ta1 = group + \"s\"\n\t\ta2 = group + \"z\"\n\t\tb0, b1, b2 = TrimSP(group)\n\t\tc0 = SwapSuffix(group, \"er\", \"a\")\n\t\tc1 = SwapPrefix(group, \"th\", \"da\")\n\t\td0 = SwapOne(group, \"ph\", \"f\")\n\t\td1 = SwapOne(group, \"ight\", \"ite\")\n\t\td2 = SwapOne(group, \"oul\", \"ul\")\n\t\td3 = SwapOne(group, \"ool\", \"ewl\")\n\t\td4 = SwapOne(group, \"culd\", \"suld\")\n\t\te0 = SwapNumeral(group, n0)\n\t\te1 = SwapNumeral(group, n1)\n\t\te2 = SwapNumeral(group, n2)\n\t\te3 = SwapNumeral(group, n3)\n\t\te4 = SwapNumeral(group, n4)\n\t\te5 = SwapNumeral(group, n5)\n\t\te6 = SwapNumeral(group, n6)\n\t\te7 = SwapNumeral(group, n7)\n\t\te8 = SwapNumeral(group, n8)\n\t\te9 = SwapNumeral(group, n9)\n\t\te10 = SwapNumeral(group, n10)\n\t\te11 = SwapNumeral(group, n11)\n\t\te12 = SwapNumeral(group, n12)\n\t\tf1 = SwapNumeral(group, n1)\n\t\tf2 = SwapNumeral(group, n2)\n\t\tf3 = SwapNumeral(group, n3)\n\t\tf4 = SwapNumeral(group, n4)\n\t\tf5 = SwapNumeral(group, n5)\n\t\tf6 = SwapNumeral(group, n6)\n\t\tf7 = SwapNumeral(group, n7)\n\t\tf8 = SwapNumeral(group, n8)\n\t\tf9 = SwapNumeral(group, n9)\n\t\tf10 = SwapNumeral(group, n10)\n\t\tf11 = SwapNumeral(group, n11)\n\t\tf12 = SwapNumeral(group, n12)\n\t\tg0 = SwapAll(group, \"0\", \"o\")\n\t\th0 = SwapAll(group, \"o\", \"0\")\n\t\tg1 = SwapAll(group, \"1\", \"l\")\n\t\th1 = SwapAll(group, \"l\", \"1\")\n\t\tg2 = SwapAll(group, \"1\", \"i\")\n\t\th2 = SwapAll(group, \"i\", \"q\")\n\t\tg3 = SwapAll(group, \"i\", \"l\")\n\t\th3 = SwapAll(group, \"l\", \"i\")\n\t\tg4 = SwapAll(group, \"3\", \"e\")\n\t\th4 = SwapAll(group, \"e\", \"3\")\n\t\tg5 = SwapAll(group, \"4\", \"a\")\n\t\th5 = SwapAll(group, \"a\", \"4\")\n\t\tg6 = SwapAll(group, \"6\", \"g\")\n\t\th6 = SwapAll(group, \"g\", \"6\")\n\t\tg7 = SwapAll(group, \"8\", \"b\")\n\t\th7 = SwapAll(group, \"b\", \"8\")\n\t\tg8 = SwapAll(group, \"9\", \"g\")\n\t\th8 = SwapAll(group, \"g\", \"9\")\n\t\tfor _, match := range list {\n\t\t\tif Contains(match, matches) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tswitch match {\n\t\t\tcase group, \"\":\n\t\t\t\tcontinue\n\t\t\tcase a0, a1, a2, b0, b1, b2, c0, c1, d0, d1, d2, d3, d4,\n\t\t\t\te0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12,\n\t\t\t\tf1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12,\n\t\t\t\tg0, g1, g2, g3, g4, g5, g6, g7, g8,\n\t\t\t\th0, h1, h2, h3, h4, h5, h6, h7, h8:\n\t\t\t\tg, err1 := Count(db, group)\n\t\t\t\tm, err2 := Count(db, match)\n\t\t\t\tfmt.Fprintf(w, \"%s %s %s (%d%s%d)\\n\", group, approx, match,\n\t\t\t\t\tg, approx, m)\n\t\t\t\tif err1 != nil {\n\t\t\t\t\tfmt.Fprintln(w, err1)\n\t\t\t\t}\n\t\t\t\tif err2 != nil {\n\t\t\t\t\tfmt.Fprintln(w, err2)\n\t\t\t\t}\n\t\t\t\tmatches = append(matches, match)\n\t\t\t\tsort.Strings(matches)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n\treturn matchSummary(w, tick, len(matches), total)\n}",
"func EvalFormat(s string, data map[string]string) string {\n\tre := regexp.MustCompile(\"{{[A-Za-z]+}}\")\n\tres := re.ReplaceAllStringFunc(s, func(s string) string {\n\n\t\tif v, ok := data[s[2:len(s)-2]]; ok {\n\t\t\treturn v\n\t\t} else {\n\t\t\treturn \"\"\n\t\t}\n\t})\n\treturn res\n}",
"func matchPattern(fp string, patterns []MapPattern) (string, bool, error) {\n\tfp, err := realpath.Realpath(fp)\n\tif err != nil {\n\t\treturn \"\", false,\n\t\t\tErr(fmt.Errorf(\"failed to get the real path: %w\", err).Error())\n\t}\n\n\tfor _, pattern := range patterns {\n\t\tif pattern.Regex.MatchString(fp) {\n\t\t\tmatches := pattern.Regex.FindStringSubmatch(fp)\n\t\t\tif len(matches) > 0 {\n\t\t\t\tparams := make([]interface{}, len(matches[1:]))\n\t\t\t\tfor i, v := range matches[1:] {\n\t\t\t\t\tparams[i] = v\n\t\t\t\t}\n\n\t\t\t\tresult, err := pyfmt.Fmt(pattern.Name, params...)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Errorf(\"error formatting %q: %s\", pattern.Name, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\treturn result, true, nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn \"\", false, nil\n}",
"func TestLogFormat(t *testing.T) {\n\n\t// Log test message\n\tvar message string = \"this is a log entry\"\n\n\t// Create log entry\n\tInfo(message)\n\n\t// The log format should look like this:\n\t// INFO 2020/09/22 17:34:16 INFO this is a log entry\n\tvar pattern string = fmt.Sprintf(\"[A-Z]{1,4} [0-9]{1,4}/[0-9]{1,2}/[0-9]{1,2} [0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2} %s\", message)\n\tr, err := regexp.Compile(pattern)\n\n\tif err != nil {\n\t\tt.Log(err)\n\t\tt.Fail()\n\t}\n\n\tmatched := r.MatchString(buf.String())\n\tt.Log(matched)\n\n\tif matched != true {\n\t\tt.Log(\"The log entry did not match the regex pattern\")\n\n\t\t// Print the log output\n\t\tt.Log(buf.String())\n\t\tt.Fail()\n\t}\n}",
"func (r *RUT) Format(f Formatter) string {\n\tif f == nil {\n\t\tf = DefaultFormatter\n\t}\n\treturn f(r.number, r.verifier)\n}",
"func (me *Eliza) analyse(userinput string) string {\n\t// Loop through the responses, looking for a match for the user input.\n\tfor _, response := range me.responses {\n\t\tif matches := response.question.FindStringSubmatch(userinput); matches != nil {\n\n\t\t\t// Select a random answer.\n\t\t\tanswer := response.answers[rand.Intn(len(response.answers))]\n\n\t\t\t// Fill the answer with the captured groups from the matches.\n\t\t\tfor i, match := range matches[1:] {\n\t\t\t\t// Reflect the pronouns in the captured group.\n\t\t\t\tfor _, sub := range me.substitutions {\n\t\t\t\t\tmatch = sub.original.ReplaceAllString(match, sub.substitute)\n\t\t\t\t\t// Remove any spaces at the start or end.\n\t\t\t\t\tmatch = strings.TrimSpace(match)\n\t\t\t\t}\n\t\t\t\t// Replace $1 with the first reflected captured group, $2 with the second, etc.\n\t\t\t\tanswer = strings.Replace(answer, \"$\"+strconv.Itoa(i+1), match, -1)\n\t\t\t}\n\n\t\t\t// Clear any ~~ markers from the string. They prevent future matches.\n\t\t\tanswer = strings.Replace(answer, \"~~\", \"\", -1)\n\n\t\t\t// Send the filled answer back.\n\t\t\treturn answer\n\t\t}\n\t}\n\n\treturn \"I don't know what to say.\"\n}",
"func colorizeOutput(result string) string {\n\tvar output string\n\tswitch result {\n\tcase \"vulnerable\":\n\t\toutput = term.Redf(result)\n\tcase \"not vulnerable\", \"secure\":\n\t\toutput = term.Greenf(result)\n\tdefault:\n\t\toutput = result\n\t}\n\treturn output\n}",
"func formatUnequal(obtained interface{}, expected interface{}) string {\n\t// We do not do diffs for basic types because go-check already\n\t// shows them very cleanly.\n\tif !diffworthy(obtained) || !diffworthy(expected) {\n\t\treturn \"\"\n\t}\n\n\t// Handle strings, short strings are ignored (go-check formats\n\t// them very nicely already). We do multi-line strings by\n\t// generating two string slices and using kr.Diff to compare\n\t// those (kr.Diff does not do string diffs by itself).\n\taStr, aOK := obtained.(string)\n\tbStr, bOK := expected.(string)\n\tif aOK && bOK {\n\t\tl1 := strings.Split(aStr, \"\\n\")\n\t\tl2 := strings.Split(bStr, \"\\n\")\n\t\t// the \"2\" here is a bit arbitrary\n\t\tif len(l1) > 2 && len(l2) > 2 {\n\t\t\tdiff := pretty.Diff(l1, l2)\n\t\t\treturn fmt.Sprintf(`String difference:\n%s`, formatMultiLine(strings.Join(diff, \"\\n\"), false))\n\t\t}\n\t\t// string too short\n\t\treturn \"\"\n\t}\n\n\t// generic diff\n\tdiff := pretty.Diff(obtained, expected)\n\tif len(diff) == 0 {\n\t\t// No diff, this happens when e.g. just struct\n\t\t// pointers are different but the structs have\n\t\t// identical values.\n\t\treturn \"\"\n\t}\n\n\treturn fmt.Sprintf(`Difference:\n%s`, formatMultiLine(strings.Join(diff, \"\\n\"), false))\n}",
"func printMatches(ipAddressInt int64, awsGroups []*ec2.DescribeSecurityGroupsOutput, pretty bool) {\n\tw := newWriter(os.Stdout)\n\tfor _, group := range awsGroups {\n\t\tparsedGroups := sg.ParseSecurityGroups(group)\n\t\tfor _, parsedGroup := range parsedGroups {\n\t\t\tfor _, rule := range parsedGroup.Rules {\n\t\t\t\tfor _, ipRange := range rule.Networks {\n\t\t\t\t\tif sg.CompareIntIP(ipAddressInt, ipRange) {\n\t\t\t\t\t\tif pretty {\n\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tw.Write([]string{parsedGroup.Name, rule.TrafficDirection,\n\t\t\t\t\t\t\t\trule.Ports, ipRange.Cidr})\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t}\n\t\t}\n\n\t}\n\tw.Flush()\n}",
"func (x ForeignKeyReference_Match) String() string {\n\tswitch x {\n\tcase ForeignKeyReference_SIMPLE:\n\t\treturn \"MATCH SIMPLE\"\n\tcase ForeignKeyReference_FULL:\n\t\treturn \"MATCH FULL\"\n\tcase ForeignKeyReference_PARTIAL:\n\t\treturn \"MATCH PARTIAL\"\n\tdefault:\n\t\treturn strconv.Itoa(int(x))\n\t}\n}",
"func findVersion(matches [][]string, version string) string {\n\t/*\n\t\tlog.Printf(\"Matches: %v\", matches)\n\t\tlog.Printf(\"Version: %v\", version)\n\t*/\n\n\tvar v string\n\n\tfor _, matchPair := range matches {\n\t\t// replace backtraces (max: 3)\n\t\tfor i := 1; i <= 3; i++ {\n\t\t\tbt := fmt.Sprintf(\"\\\\%v\", i)\n\t\t\tif strings.Contains(version, bt) && len(matchPair) >= i {\n\t\t\t\tv = strings.Replace(version, bt, matchPair[i], 1)\n\t\t\t}\n\t\t}\n\n\t\t// return first found version\n\t\tif v != \"\" {\n\t\t\treturn v\n\t\t}\n\n\t}\n\n\treturn \"\"\n}",
"func Format(name string) string {\n\n\tver := Get(name)\n\tif len(ver) == 0 {\n\t\treturn \"\"\n\t}\n\n\tif ver[0] != '0' {\n\t\treturn \"%d\"\n\t}\n\n\treturn fmt.Sprintf(\"%%0%dd\", len(ver))\n}",
"func (o LookupCertificateResultOutput) Format() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupCertificateResult) string { return v.Format }).(pulumi.StringOutput)\n}",
"func Format(name string) string {\n\tver := Get(name)\n\tif len(ver) == 0 {\n\t\treturn \"\"\n\t}\n\n\tif ver[0] != '0' {\n\t\treturn \"%d\"\n\t}\n\n\treturn fmt.Sprintf(\"%%0%dd\", len(ver))\n}",
"func (s MatchRange) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func Format(str string) string {\n\trandomizeSeed()\n\tpossibleCharacters := []rune(\"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\")\n\tchars := []rune(str)\n\toutput := make([]rune, 0)\n\tfor _, char := range chars {\n\t\tif char == '#' {\n\t\t\tc := '0' + rune(rand.Intn('9'-'0'+1))\n\t\t\toutput = append(output, c)\n\t\t}\n\n\t\tif char == '?' {\n\t\t\tc := possibleCharacters[rand.Intn(len(possibleCharacters))]\n\t\t\toutput = append(output, c)\n\t\t}\n\n\t\tif char != '#' && char != '?' {\n\t\t\toutput = append(output, char)\n\t\t}\n\t}\n\n\treturn string(output)\n}",
"func FormatString(value string, params map[string]string) string {\n // value should have at least 3 chars to be able to have key.\n if len(value) <= 2 {\n return value\n }\n\n // To avoid mistake when passing Upper or lower case, the params' keys need to be convert to lower case all.\n for k, v := range params {\n delete(params, k)\n params[strings.ToLower(k)] = v\n }\n\n // The result string can be use strings.Builder to process a large value string.\n var result, key string\n index := 0\n size := len(value)\n\n for index < size {\n currentChar := fmt.Sprintf(\"%c\", value[index])\n if currentChar == \"{\" {\n // if key already had value. E.g: {abc{, {{abc{, -> key need to be reset before continue processing\n if key != \"{\" && key != \"{{\" {\n result += key\n key = \"\"\n } else {\n // this covers case that has \"{{{\" -> result need to be added one 1 and continue with key's value \"{{\"\n // this logic mentioned in the last testcase.\n if key == \"{{\" {\n result += \"{\"\n key = \"{\"\n }\n }\n key += \"{\"\n } else if currentChar == \"}\" && strings.Index(key, \"{\") == 0 { // There are only 3 cases.E.g abc}, {{abc}, and {abc}. We only care about 2 last cases.\n keyValue := \"\"\n key += \"}\"\n // As above logic when we detect \"{\". The value of key can only be started with \"{\" or \"{{\".\n // So we only need to check if this key is {{ }} or not.\n if strings.Index(key, \"{{\") == 0 {\n if index < size - 1 && fmt.Sprintf(\"%c\", value[index+1]) == \"}\" { // key has format {{..}}\n index ++\n result += key[1:] //We cut down 1 { } and put value again into result.\n key = \"\"\n } else { // cover key with format {{....}.\n keyValue = key[1: len(key) -1]\n result += \"{\"\n keyValue = key[2: len(key) -1]\n }\n } else {\n keyValue = key[1: len(key) -1]\n }\n if keyValue != \"\" {\n if v, found := params[strings.ToLower(keyValue)]; found { // if the key is in params, we will replace with value in params, if not we will use keyValue.\n result += v\n } else {\n result = fmt.Sprintf(\"%s{%s}\", result, keyValue)\n }\n }\n key = \"\"\n } else {\n key += currentChar\n }\n index++\n }\n // There are some cases that key has value but not completed as a fully pattern in the end of value.\n return result + key\n}",
"func (daemon *DaemonListening) getHostRegexFormat() (string, error) {\n\t// Return wildcard if no hosts were specified by the user\n\tif len(daemon.Hosts) == 0 {\n\t\treturn \".*\", nil\n\t}\n\t// Get all hosts in a regex-ready format\n\tvar hostsFormat bytes.Buffer\n\tfor i, h := range daemon.Hosts {\n\t\tif i != 0 {\n\t\t\thostsFormat.WriteString(\"|\")\n\t\t}\n\n\t\thostHex, err := network.GetPackedReprFromIP(h)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tdaemon.contextLogger.Tracef(\"Scanning host [%s] with HEX [%s]\", h, hostHex)\n\t\thostsFormat.WriteString(fmt.Sprintf(\"(%s)\", hostHex))\n\t}\n\treturn hostsFormat.String(), nil\n}",
"func (a MatchAction) formatFast(buf *TrackedBuffer) {\n\tswitch a {\n\tcase Full:\n\t\tbuf.WriteString(\"full\")\n\tcase Simple:\n\t\tbuf.WriteString(\"simple\")\n\tcase Partial:\n\t\tbuf.WriteString(\"partial\")\n\t}\n}",
"func formatValue(s string) string {\n\tif KEYWORD(s).isValid() {\n\t\treturn s\n\t}\n\treturn fmt.Sprintf(\"'%s'\", s)\n}",
"func (ctx *TemplateContext) formatMap() (out string) {\n\talphaSortMap(ctx.substitutionsMap,\n\t\tfunc(s string) {\n\t\t\tv := ctx.substitutionsMap[s]\n\t\t\tconst TRIM = 80\n\t\t\tif len(v) > TRIM {\n\t\t\t\tv = v[:TRIM] + \"...\"\n\t\t\t}\n\t\t\tout += fmt.Sprintf(\" % 20s '%v'\\n\\n\", s, v)\n\t\t})\n\treturn\n}",
"func (m *Match) printMatchTooLong() {\n\tfmt.Printf(\"%s%s%s%s:%s:%s%s%s%s%s%s%s%s\\n\",\n\t\tcolors.Purple,\n\t\tm.Path,\n\t\tcolors.Restore,\n\t\tcolors.Green,\n\t\tstrconv.Itoa(m.LineNumber),\n\t\tcolors.Restore,\n\t\tcolors.Yellow,\n\t\t\"<match exceeded maximum length of \",\n\t\tcolors.Restore,\n\t\tstrconv.Itoa(m.MaxLength),\n\t\tcolors.Yellow,\n\t\t\">\",\n\t\tcolors.Restore,\n\t)\n}",
"func printfFormatStringVerbs(info types.Info, call *ast.CallExpr) ([]string, bool) {\n\tif len(call.Args) <= 1 {\n\t\treturn nil, false\n\t}\n\tstrLit, ok := call.Args[0].(*ast.BasicLit)\n\tif !ok {\n\t\t// Ignore format strings that are not literals.\n\t\treturn nil, false\n\t}\n\tformatString := constant.StringVal(info.Types[strLit].Value)\n\n\tpp := printfParser{str: formatString}\n\tverbs, err := pp.ParseAllVerbs()\n\tif err != nil {\n\t\treturn nil, false\n\t}\n\torderedVerbs := verbOrder(verbs, len(call.Args)-1)\n\n\tresolvedVerbs := make([]string, len(orderedVerbs))\n\tfor i, vv := range orderedVerbs {\n\t\tfor _, v := range vv {\n\t\t\tresolvedVerbs[i] = v.format\n\t\t\tif v.format == \"w\" {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn resolvedVerbs, true\n}",
"func match(a, b string) bool {\n\treturn strings.EqualFold(a, b)\n}",
"func (m Match) ID() string {\n\treturn fmt.Sprintf(\"%s\", m.Date.Format(\"20060102\"))\n}",
"func FaStringMatcher(\n\tT string,\n\tnextState []map[string]int,\n\tm int,\n\tn int,\n) {\n\t// Starting from the empty substring of T\n\tstate := 0\n\n\tfor i := 1; i <= n; i++ {\n\t\t// From the current state, what's the next state\n\t\t// given the new character T[i-1]?\n\t\tstate = nextState[state][string(T[i-1])]\n\n\t\tif state == m {\n\t\t\tfmt.Printf(\"The pattern occurs with shift %d.\\n\", i-m)\n\t\t}\n\t}\n}",
"func TestFormatName(t *testing.T) {\n\ttables := []struct {\n\t\tname string\n\t\tconversion string\n\t}{\n\t\t{\"\", \"\"},\n\t\t{\"DxChain\", \"dxChain\"},\n\t\t{\"dxchain\", \"dxchain\"},\n\t}\n\n\tfor _, table := range tables {\n\t\tresult := formatName(table.name)\n\t\tif result != table.conversion {\n\t\t\tt.Errorf(\"input string %s, got %s, expected %s\",\n\t\t\t\ttable.name, result, table.conversion)\n\t\t}\n\t}\n}",
"func HumanizeUsage(usage string) string {\n\treturn HumanizeUsageRegex.ReplaceAllString(usage, \"$1$2$3$4\")\n}",
"func (c Provider) Match(query string) string {\n\tsm := SourceRegex.FindStringSubmatch(query)\n\tif len(sm) != 2 {\n\t\treturn \"\"\n\t}\n\treturn sm[1]\n}",
"func TestFormatRedirect(t *testing.T) {\n\tv := &fmter{}\n\tif expected, actual := \"‹hello›\\n‹world›\", string(Sprintf(\"%v\", v)); expected != actual {\n\t\tt.Errorf(\"expected %q, got %q\", expected, actual)\n\t}\n\tif expected, actual := \"‹hello›\\n‹world›\", string(Sprintf(\"%+v\", v)); expected != actual {\n\t\tt.Errorf(\"expected %q, got %q\", expected, actual)\n\t}\n}",
"func difficultyColorizer(diff string) string {\n\tswitch diff {\n\tcase \"easy\":\n\t\treturn color.GreenString(\"easy\")\n\tcase \"medium\":\n\t\treturn color.YellowString(\"medium\")\n\tcase \"hard\":\n\t\treturn color.HiRedString(\"hard\")\n\tcase \"insane\":\n\t\treturn color.RedString(\"insane\")\n\t}\n\n\treturn \"\"\n}",
"func (o MatcherOptions) Format(w fmt.State, verb rune) {\n\tfor i, mo := range o.Types {\n\t\tif i > 0 {\n\t\t\tfmt.Fprintf(w, \" | \")\n\t\t}\n\t\tmo.Format(w, verb)\n\t}\n\tfor i, mo := range o.Enums {\n\t\tif i > 0 {\n\t\t\tfmt.Fprintf(w, \" | \")\n\t\t}\n\t\tmo.Format(w, verb)\n\t}\n}",
"func StringMatchesPattern(re *regexp.Regexp, reDesc string) String {\n\treturn func(v string) error {\n\t\tif !re.MatchString(v) {\n\t\t\treturn fmt.Errorf(\"%s does not match the pattern: %s\",\n\t\t\t\tv, reDesc)\n\t\t}\n\t\treturn nil\n\t}\n}",
"func runMatch(checkConfig sigCheck, outputToSearch string) bool {\n\tmatcherFound := false\n\tnoRegexMatcherFound := false\n\n\t// Determine what type of matcher was provided\n\tmatchers := checkConfig.Matchers\n\tfor _, matcher := range matchers {\n\t\tmatcherType := matcher.Type\n\t\tif matcherType == \"\" || strings.ToLower(matcherType) == \"regex\" {\n\t\t\tstrToSearch := strings.ReplaceAll(outputToSearch, \"\\n\", Delim)\n\t\t\tstrToSearch = strings.ReplaceAll(strToSearch, \"\\r\", Delim)\n\n\t\t\t// First check if there is regex that should not be present\n\t\t\tnoRegex := matcher.NoRegex\n\t\t\tif noRegex != \"\" {\n\t\t\t\tfound, err := regexp.MatchString(noRegex, strToSearch)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"[-] Regex Error when running NoRegex search: %s\\n\", err.Error())\n\t\t\t\t\tlog.Fatalf(\"[-] Regex Error when running NoRegex search: %s\\n\", err.Error())\n\t\t\t\t}\n\t\t\t\tif found {\n\t\t\t\t\tmatcherFound = false\n\t\t\t\t\tnoRegexMatcherFound = true\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Then search for positive regex\n\t\t\tregex := matcher.Regex\n\t\t\t//fmt.Printf(\"Regex, strToSearch[0:100]: %s, %s\", regex, strToSearch[0:100])\n\t\t\tif !noRegexMatcherFound {\n\t\t\t\t//fmt.Printf(\"regex: %s, strToSearch: %s\\n\", regex, strToSearch[0:200])\n\t\t\t\tfound, err := regexp.MatchString(regex, strToSearch)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"[-] Regex Error when running Regex search: %s\\n\", err.Error())\n\t\t\t\t\tlog.Fatalf(\"[-] Regex Error when running Regex search: %s\\n\", err.Error())\n\t\t\t\t}\n\t\t\t\tif found {\n\t\t\t\t\tmatcherFound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Fatalf(\"[-] Unknown matcher type: %s\\n\", matcherType)\n\t\t}\n\t}\n\treturn matcherFound\n}",
"func (MatchedText) Matches(pattern string) bool { return boolResult }",
"func Verse(v int) string {\n\n\tdays := map[int]string{\n\t\t1: \"first\",\n\t\t2: \"second\",\n\t\t3: \"third\",\n\t\t4: \"fourth\",\n\t\t5: \"fifth\",\n\t\t6: \"sixth\",\n\t\t7: \"seventh\",\n\t\t8: \"eighth\",\n\t\t9: \"ninth\",\n\t\t10: \"tenth\",\n\t\t11: \"eleventh\",\n\t\t12: \"twelfth\",\n\t}\n\n\tgifts := map[int]string{\n\t\t1: \"a Partridge in a Pear Tree\",\n\t\t2: \"two Turtle Doves\",\n\t\t3: \"three French Hens\",\n\t\t4: \"four Calling Birds\",\n\t\t5: \"five Gold Rings\",\n\t\t6: \"six Geese-a-Laying\",\n\t\t7: \"seven Swans-a-Swimming\",\n\t\t8: \"eight Maids-a-Milking\",\n\t\t9: \"nine Ladies Dancing\",\n\t\t10: \"ten Lords-a-Leaping\",\n\t\t11: \"eleven Pipers Piping\",\n\t\t12: \"twelve Drummers Drumming\",\n\t}\n\n\tgift := \"\"\n\tfor d := v; d >= 1; d-- {\n\n\t\tif v != 1 && d == 1 {\n\t\t\tgift = fmt.Sprintf(\"%s, and %s\", gift, gifts[d])\n\t\t} else {\n\t\t\tgift = fmt.Sprintf(\"%s, %s\", gift, gifts[d])\n\t\t}\n\t}\n\treturn fmt.Sprintf(\"On the %s day of Christmas my true love gave to me%s.\", days[v], gift)\n}",
"func (o AliasOutput) Format() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Alias) pulumi.StringOutput { return v.Format }).(pulumi.StringOutput)\n}",
"func (o RouterNatRuleOutput) Match() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v RouterNatRule) *string { return v.Match }).(pulumi.StringPtrOutput)\n}",
"func createCheckText(text string) string {\n\t// Always lower case the string so we don't need case-insensitive regex's\n\tnewText := strings.ToLower(text)\n\n\t// Space out anything that we shouldn't search...\n\n\t// URL's\n\tnewText = xurls.Strict.ReplaceAllStringFunc(newText, func(toRep string) string {\n\t\treturn strings.Repeat(\" \", len(toRep))\n\t})\n\n\treturn newText\n}",
"func (s AvailMatchingCriteria) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func TitleFromCountryCode(match string) string {\n\tfor _, c := range countryCodes {\n\t\tif match == c[0:2] {\n\t\t\treturn c\n\t\t}\n\t}\n\treturn \"\"\n}",
"func Format(msgs ...interface{}) string {\n\tif len(msgs) == 0 || msgs == nil {\n\t\treturn \"\"\n\t}\n\tif len(msgs) == 1 {\n\t\tif v, ok := msgs[0].(string); ok {\n\t\t\treturn v\n\t\t}\n\t\tif v, ok := msgs[0].(error); ok {\n\t\t\treturn v.Error()\n\t\t}\n\t}\n\tif len(msgs) > 1 {\n\t\treturn fmt.Sprintf(msgs[0].(string), msgs[1:]...)\n\t}\n\treturn \"\"\n}",
"func (m *Match) printMatchClip() {\n\tstartStr := \"...\"\n\tendStr := \"...\"\n\tstart := m.Match[0] - SideBuffer\n\tend := m.Match[1] + SideBuffer\n\n\tif start < 0 {\n\t\tstart = 0\n\t\tstartStr = \"\"\n\t}\n\tif end > len(m.Line)-1 {\n\t\tend = len(m.Line) - 1\n\t\tendStr = \"\"\n\t}\n\n\tfmt.Printf(\"%s%s%s%s:%s:%s%s%s%s%s%s%s%s%s%s%s%s\\n\",\n\t\tcolors.Purple,\n\t\tm.Path,\n\t\tcolors.Restore,\n\t\tcolors.Green,\n\t\tstrconv.Itoa(m.LineNumber),\n\t\tcolors.Restore,\n\t\tcolors.Yellow,\n\t\tstartStr,\n\t\tcolors.Restore,\n\t\tstring(m.Line[start:m.Match[0]]),\n\t\tcolors.LightRed,\n\t\tstring(m.Line[m.Match[0]:m.Match[1]]),\n\t\tcolors.Restore,\n\t\tstring(m.Line[m.Match[1]:end]),\n\t\tcolors.Yellow,\n\t\tendStr,\n\t\tcolors.Restore,\n\t)\n}",
"func toPattern(theType string) string {\n\tvar thePattern string\n\n\tswitch theType {\n\tcase \"string\":\n\t\tthePattern = \"\\\\w+\" //xmmm or [a-zA-Z0-9]+\n\n\tcase \"int\":\n\t\tthePattern = \"[0-9]+\"\n\tdefault:\n\n\t}\n\n\treturn thePattern\n}",
"func FormatRuleString(in string) string {\n\tout := \"\"\n\tindent := 0\n\tfor _, char := range in {\n\t\tif char == ')' {\n\t\t\tindent--\n\t\t\tout = out + \"\\n\" + strings.Repeat(\" \", indent*4)\n\t\t}\n\t\tout = out + string(char)\n\t\tif char == '(' {\n\t\t\tindent++\n\t\t\tout = out + \"\\n\" + strings.Repeat(\" \", indent*4)\n\t\t}\n\t}\n\treturn out\n}",
"func matcher(text, pat string, leadingPercent bool) (res string, match bool) {\n\tif !leadingPercent {\n\t\tres = strings.TrimPrefix(text, pat)\n\t\tif len(text) == len(res) {\n\t\t\treturn \"\", false\n\t\t}\n\t} else {\n\t\tparts := strings.SplitN(text, pat, 2)\n\t\tif len(parts) == 1 {\n\t\t\treturn \"\", false\n\t\t}\n\t\tres = parts[1]\n\t}\n\treturn res, true\n}",
"func GetStatusString(ballot *models.Ballot) string {\n\ttemplateBasic := `Tezos address %s voted \"%s\" %son #Tezos proposal \"%s\"%s`\n\ttemplateVanity := `Tezos baker \"%s\" /%s voted \"%s\" %son #Tezos proposal \"%s\"%s`\n\t// TODO(jev) update to query Proposal vanity name for DNS\n\tproposalVanityName := \"Athens A\"\n\n\ttemplateRolls := \"\"\n\tif ballot.Rolls != 0 {\n\t\ttemplateRolls = fmt.Sprintf(\"with %d rolls \", ballot.Rolls)\n\t}\n\n\ttemplateQuorum := \"and quorum has been reached.\"\n\tpercentTowardQuorum := ballot.PercentTowardQuorum()\n\tif percentTowardQuorum > 0 {\n\t\ttemplateQuorum = fmt.Sprintf(\"with %.2f%% remaining to reach %.2f%% quorum.\", percentTowardQuorum, ballot.Quorum)\n\t}\n\ttemplateStatus := fmt.Sprintf(\"\\n\\nVote status is %.2f%% yay/%.2f%% nay, %s\", ballot.CountingPercentYay(), ballot.CountingPercentNay(), templateQuorum)\n\n\t// tz.tezz.ie is an experimental DNS zone to resolve vanity names from tz\n\t// addresses\n\taddress, err := LookupTZName(ballot.PKH, \"tz.tezz.ie\")\n\n\tif err != nil {\n\t\tlog.Printf(\"No address found for %s, err: %s\", ballot.PKH, err)\n\t\treturn fmt.Sprintf(templateBasic, ballot.PKH, ballot.Ballot, templateRolls, proposalVanityName, templateStatus)\n\t}\n\tlog.Printf(\"Address %s found for %s, \", address, ballot.PKH)\n\treturn fmt.Sprintf(templateVanity, address, ballot.PKH, ballot.Ballot, templateRolls, proposalVanityName, templateStatus)\n\n}",
"func (o HttpHeaderMatchResponseOutput) RegexMatch() pulumi.StringOutput {\n\treturn o.ApplyT(func(v HttpHeaderMatchResponse) string { return v.RegexMatch }).(pulumi.StringOutput)\n}",
"func printMyResult(sentence string) string {\n\tnewSentence := \"Saya sedang belajar \" + sentence\n\treturn newSentence\n}",
"func (m AlertMessage) Format() string {\n\treturn wrapAlert(fmt.Sprintf(\"High traffic generated an alert - hits = %d, triggered at %s\", m.Hits, m.Time.Format(timeFormat)))\n}",
"func (quiz *Quiz) FormatResult(numberCorrectAnswers int, numberQuestions int) string {\n\tresultString := fmt.Sprintf(\"You got %d of %d correct answers.\", numberCorrectAnswers, numberQuestions)\n\n\treturn resultString\n}",
"func (node *ShowFilter) Format(buf *TrackedBuffer) {\n\tif node == nil {\n\t\treturn\n\t}\n\tif node.Like != \"\" {\n\t\tbuf.astPrintf(node, \" like '%s'\", node.Like)\n\t} else {\n\t\tbuf.astPrintf(node, \" where %v\", node.Filter)\n\t}\n}",
"func GetMatchString(str string) string {\n\tescapeStr := GetEscapeString(str)\n\tmatchString := make([]byte, 0, 10)\n\tmatchString = append(matchString, \"%\"...)\n\tmatchString = append(matchString, escapeStr...)\n\tmatchString = append(matchString, \"%\"...)\n\n\treturn string(matchString)\n}",
"func hackedFormat(L *lua.LState) int {\n\tstr := L.CheckString(1)\n\targs := make([]any, L.GetTop()-1)\n\ttop := L.GetTop()\n\tfor i := 2; i <= top; i++ {\n\t\targs[i-2] = L.Get(i)\n\t}\n\tnpat := strings.Count(str, \"%\") - strings.Count(str, \"%%\")\n\n\tret := hexFinder.ReplaceAllStringFunc(fmt.Sprintf(str, args[:intMin(npat, len(args))]...), func(s string) string {\n\t\tdec, err := strconv.ParseUint(s[2:], 16, 8)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn fmt.Sprintf(\"\\\\%03d\", dec)\n\t})\n\n\tL.Push(lua.LString(ret))\n\treturn 1\n}",
"func GetBestEnglishMatch(results []string) string {\n\tmax := 0\n\tstr := \"\"\n\tscore := 0\n\n\tfor _, result := range results {\n\t\tscore = GetScore(result)\n\n\t\tif score > max {\n\t\t\tmax = score\n\t\t\tstr = result\n\t\t}\n\t}\n\n\treturn str\n}",
"func (s RegexMatchTuple) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func formatStrftime(in string) string {\n\treplacements := map[string]string{\n\t\t\"%p\": \"PM\",\n\t\t\"%Y\": \"2006\",\n\t\t\"%y\": \"06\",\n\t\t\"%m\": \"01\",\n\t\t\"%d\": \"02\",\n\t\t\"%H\": \"15\",\n\t\t\"%M\": \"04\",\n\t\t\"%S\": \"05\",\n\t}\n\n\tout := in\n\n\tfor bad, good := range replacements {\n\t\tout = strings.ReplaceAll(out, bad, good)\n\t}\n\treturn out\n}",
"func (m *matcher) String() string {\n\treturn fmt.Sprintf(\"%v\", m.StoredData)\n}",
"func (s GetMatchIdOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func statusMsg(prev string, desc string, args ...interface{}) string {\n\tmsg := fmt.Sprintf(desc, args...)\n\tn := len(msg)\n\t// FIXME this optimisation is incorrect, it sometimes eats up first 3 chars\n\tif false && n > 3 && n == len(prev) && msg[:n-3] == prev[:n-3] {\n\t\tfmt.Print(\"\\b\\b\\b\", msg[n-3:]) // optimise if only end changes\n\t} else {\n\t\tif len(msg) < len(prev) {\n\t\t\tfmt.Print(\"\\r\", strings.Repeat(\" \", len(prev)))\n\t\t}\n\t\tfmt.Print(\"\\r\", msg)\n\t}\n\treturn msg\n}",
"func (md *MangaDetails) FormatManga() string {\n\ts := \"\\n\\n\"\n\n\ts += fmt.Sprintf(\"\\t\\t\\u001b[33mTITLE:\\u001b[0m %s\\n\\n\", md.Manga.Name)\n\ts += fmt.Sprintf(\"\\t\\t\\u001b[33mALT_NAME:\\u001b[0m %s\\n\\n\", md.Manga.Alternatives)\n\ts += fmt.Sprintf(\"\\t\\t\\u001b[33mSTATUS:\\u001b[0m %s\\n\\n\", md.Manga.Status)\n\n\tvar genres string\n\tfor _, v := range md.Manga.Genres {\n\t\tgenres += v.GenreName + \"\\t\"\n\n\t\t// maps genres to ids so user can search by genre\n\t\tmd.NameToIDMap[v.GenreName] = v.ID\n\t\t// improves user experience by allowing to search with\n\t\t// all lower case or all upper case\n\t\tmd.NameToIDMap[strings.ToLower(v.GenreName)] = v.ID\n\t\tmd.NameToIDMap[strings.ToUpper(v.GenreName)] = v.ID\n\t}\n\n\ts += fmt.Sprintf(\"\\t\\t\\u001b[33mGENRES:\\u001b[0m %s\\n\\n\", genres)\n\ts += fmt.Sprintf(\"\\t\\t\\u001b[33mAUTHOR:\\u001b[0m %s\\n\\n\", md.Manga.Author.Name)\n\ts += fmt.Sprintf(\"\\t\\t\\u001b[33mUPDATED:\\u001b[0m %s\\n\\n\", md.Manga.Updated)\n\ts += fmt.Sprintf(\"\\t\\t\\u001b[33mVIEWS:\\u001b[0m %s\\n\\n\", md.Manga.Views)\n\ts += fmt.Sprintf(\"\\t\\t\\u001b[33mRATING:\\u001b[0m %s\\n\\n\", md.Manga.Rating)\n\ts += fmt.Sprintf(\"\\t\\t\\u001b[33mDESCRIPTION:\\u001b[0m %s\\n\\n\", md.Manga.Description)\n\n\treturn s\n}",
"func OkMatchesString(label, val, regex string, t *testing.T) {\n\tre := regexp.MustCompile(regex)\n\tif re.MatchString(val) {\n\t\tt.Logf(\"ok - %s: '%s' matches '%s'\\n\", label, val, regex)\n\t} else {\n\t\tt.Logf(\"not ok - %s: String '%s' doesn't match '%s'\", label, val, regex)\n\t\tt.Fail()\n\t}\n}",
"func Match(prefix string) string {\n\tfor _, enc := range defaultEncodings {\n\t\thint := enc.Match(prefix)\n\t\tif hint != \"\" {\n\t\t\treturn hint\n\t\t}\n\t}\n\treturn \"\"\n}"
] | [
"0.61389",
"0.5814977",
"0.5773067",
"0.5701211",
"0.5525912",
"0.54897654",
"0.5479896",
"0.543582",
"0.5378268",
"0.5328926",
"0.5307862",
"0.5284562",
"0.52692956",
"0.52632475",
"0.52471685",
"0.52363086",
"0.52283716",
"0.521719",
"0.518946",
"0.5162455",
"0.5155652",
"0.5152539",
"0.51177454",
"0.5111134",
"0.50963664",
"0.50894994",
"0.50716347",
"0.5069923",
"0.5063501",
"0.50416845",
"0.503494",
"0.5019125",
"0.5011595",
"0.4999778",
"0.49916887",
"0.49911085",
"0.49689013",
"0.4961226",
"0.4951238",
"0.49505913",
"0.49308974",
"0.49270633",
"0.49250993",
"0.4907409",
"0.48655877",
"0.48652223",
"0.48632824",
"0.4860603",
"0.4859149",
"0.4855154",
"0.48419616",
"0.48348323",
"0.48315135",
"0.48173812",
"0.4808124",
"0.47903886",
"0.47880918",
"0.47866994",
"0.4775302",
"0.47710478",
"0.4767372",
"0.4760409",
"0.47542548",
"0.47517473",
"0.4751673",
"0.47410578",
"0.4738035",
"0.47322235",
"0.47320932",
"0.47283232",
"0.47264138",
"0.47252116",
"0.47215965",
"0.47188142",
"0.47134683",
"0.4713281",
"0.47129202",
"0.47080997",
"0.47028714",
"0.47000855",
"0.46956772",
"0.46935105",
"0.469278",
"0.469187",
"0.4691202",
"0.46903023",
"0.4678895",
"0.46785498",
"0.4672959",
"0.46630922",
"0.4657168",
"0.46568355",
"0.46554023",
"0.46551844",
"0.46494138",
"0.46480888",
"0.4634955",
"0.46158102",
"0.46147513",
"0.46146253"
] | 0.55583423 | 4 |
LexerEngine does the actual tokenization of the byte slice text using the NFA bytecode in program. If the lexing process fails the Scanner will return an UnconsumedInput error. | func LexerEngine(program inst.Slice, text []byte) Scanner {
done := false
matchPC := -1
matchTC := -1
prevTC := 0
line := 1
col := 1
var scan Scanner
var cqueue, nqueue *queue.Queue = queue.New(len(program)), queue.New(len(program))
scan = func(tc int) (int, *Match, error, Scanner) {
if done && tc == len(text) {
return tc, nil, nil, nil
}
startTC := tc
if tc < matchTC {
// we back-tracked so reset the last matchTC
matchTC = -1
} else if tc == matchTC {
// the caller did not reset the tc, we are where we left
} else if matchTC != -1 && tc > matchTC {
// we skipped text
matchTC = tc
}
cqueue.Clear()
nqueue.Clear()
cqueue.Push(0)
for ; tc <= len(text); tc++ {
if cqueue.Empty() {
break
}
for !cqueue.Empty() {
pc := cqueue.Pop()
i := program[pc]
switch i.Op {
case inst.CHAR:
x := byte(i.X)
y := byte(i.Y)
if tc < len(text) && x <= text[tc] && text[tc] <= y {
nqueue.Push(pc + 1)
}
case inst.MATCH:
if matchTC < tc {
matchPC = int(pc)
matchTC = tc
} else if matchPC > int(pc) {
matchPC = int(pc)
matchTC = tc
}
case inst.JMP:
cqueue.Push(i.X)
case inst.SPLIT:
cqueue.Push(i.X)
cqueue.Push(i.Y)
default:
panic(fmt.Errorf("unexpected instruction %v", i))
}
}
cqueue, nqueue = nqueue, cqueue
if cqueue.Empty() && matchPC > -1 {
line, col = computeLineCol(text, prevTC, startTC, line, col)
eLine, eCol := computeLineCol(text, startTC, matchTC-1, line, col)
match := &Match{
PC: matchPC,
TC: startTC,
StartLine: line,
StartColumn: col,
EndLine: eLine,
EndColumn: eCol,
Bytes: text[startTC:matchTC],
}
prevTC = startTC
matchPC = -1
return tc, match, nil, scan
}
}
if matchTC != len(text) && startTC >= len(text) {
// the user has moved us farther than the text. Assume that was
// the intent and return EOF.
return tc, nil, nil, nil
} else if matchTC != len(text) {
done = true
if matchTC == -1 {
matchTC = 0
}
sline, scol := computeLineCol(text, 0, startTC, 1, 1)
fline, fcol := computeLineCol(text, 0, tc, 1, 1)
err := &UnconsumedInput{
StartTC: startTC,
FailTC: tc,
StartLine: sline,
StartColumn: scol,
FailLine: fline,
FailColumn: fcol,
Text: text,
}
return tc, nil, err, scan
} else {
return tc, nil, nil, nil
}
}
return scan
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func newLexerWithInit(in io.Reader, initFun func(*lexer)) *lexer {\n type dfa struct {\n acc []bool // Accepting states.\n f []func(rune) int // Transitions.\n startf, endf []int // Transitions at start and end of input.\n nest []dfa\n }\n yylex := new(lexer)\n if initFun != nil {\n initFun(yylex)\n }\n yylex.ch = make(chan frame)\n var scan func(in *bufio.Reader, ch chan frame, family []dfa, line, column int)\n scan = func(in *bufio.Reader, ch chan frame, family []dfa, line, column int) {\n // Index of DFA and length of highest-precedence match so far.\n matchi, matchn := 0, -1\n var buf []rune\n n := 0\n checkAccept := func(i int, st int) bool {\n // Higher precedence match? DFAs are run in parallel, so matchn is at most len(buf), hence we may omit the length equality check.\n if family[i].acc[st] && (matchn < n || matchi > i) {\n matchi, matchn = i, n\n return true\n }\n return false\n }\n var state [][2]int\n for i := 0; i < len(family); i++ {\n mark := make([]bool, len(family[i].startf))\n // Every DFA starts at state 0.\n st := 0\n for {\n state = append(state, [2]int{i, st})\n mark[st] = true\n // As we're at the start of input, follow all ^ transitions and append to our list of start states.\n st = family[i].startf[st]\n if -1 == st || mark[st] { break }\n // We only check for a match after at least one transition.\n checkAccept(i, st)\n }\n }\n atEOF := false\n for {\n if n == len(buf) && !atEOF {\n r,_,err := in.ReadRune()\n switch err {\n case io.EOF: atEOF = true\n case nil: buf = append(buf, r)\n default: panic(err)\n }\n }\n if !atEOF {\n r := buf[n]\n n++\n var nextState [][2]int\n for _, x := range state {\n x[1] = family[x[0]].f[x[1]](r)\n if -1 == x[1] { continue }\n nextState = append(nextState, x)\n checkAccept(x[0], x[1])\n }\n state = nextState\n } else {\ndollar: // Handle $.\n for _, x := range state {\n mark := make([]bool, len(family[x[0]].endf))\n for {\n mark[x[1]] = true\n x[1] = family[x[0]].endf[x[1]]\n if -1 == x[1] || mark[x[1]] { break }\n if checkAccept(x[0], x[1]) {\n // Unlike before, we can break off the search. Now that we're at the end, there's no need to maintain the state of each DFA.\n break dollar\n }\n }\n }\n state = nil\n }\n\n if state == nil {\n lcUpdate := func(r rune) {\n if r == '\\n' {\n line++\n column = 0\n } else {\n column++\n }\n }\n // All DFAs stuck. Return last match if it exists, otherwise advance by one rune and restart all DFAs.\n if matchn == -1 {\n if len(buf) == 0 { // This can only happen at the end of input.\n break\n }\n lcUpdate(buf[0])\n buf = buf[1:]\n } else {\n text := string(buf[:matchn])\n buf = buf[matchn:]\n matchn = -1\n ch <- frame{matchi, text, line, column}\n if len(family[matchi].nest) > 0 {\n scan(bufio.NewReader(strings.NewReader(text)), ch, family[matchi].nest, line, column)\n }\n if atEOF {\n break\n }\n for _, r := range text {\n lcUpdate(r)\n }\n }\n n = 0\n for i := 0; i < len(family); i++ {\n state = append(state, [2]int{i, 0})\n }\n }\n }\n ch <- frame{-1, \"\", line, column}\n }\n go scan(bufio.NewReader(in), yylex.ch, []dfa{\n// \\\"((\\\\\\\")|(\\\\\\\\)|(\\\\\\/)|(\\\\b)|(\\\\f)|(\\\\n)|(\\\\r)|(\\\\t)|(\\\\u[0-9a-fA-F][0-9a-fA-F][0-9a-fA-F][0-9a-fA-F])|[^\\\"])*\\\"\n{[]bool{false, false, true, false, false, false, false, true, false, false, false, false, false, false, false, false, false, false}, []func(rune) int{ // Transitions\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 34: return 1\n\t\tcase 47: return -1\n\t\tcase 92: return -1\n\t\tcase 98: return -1\n\t\tcase 102: return -1\n\t\tcase 110: return -1\n\t\tcase 114: return -1\n\t\tcase 116: return -1\n\t\tcase 117: return -1\n\t}\n\tswitch {\n\t\tcase 97 <= r && r <= 102: return -1\n\t\tcase 65 <= r && r <= 70: return -1\n\t\tcase 48 <= r && r <= 57: return -1\n\t}\n\treturn -1\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 34: return 2\n\t\tcase 47: return 4\n\t\tcase 92: return 3\n\t\tcase 98: return 4\n\t\tcase 102: return 4\n\t\tcase 110: return 4\n\t\tcase 114: return 4\n\t\tcase 116: return 4\n\t\tcase 117: return 4\n\t}\n\tswitch {\n\t\tcase 97 <= r && r <= 102: return 4\n\t\tcase 65 <= r && r <= 70: return 4\n\t\tcase 48 <= r && r <= 57: return 4\n\t}\n\treturn 4\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 34: return -1\n\t\tcase 47: return -1\n\t\tcase 92: return -1\n\t\tcase 98: return -1\n\t\tcase 102: return -1\n\t\tcase 110: return -1\n\t\tcase 114: return -1\n\t\tcase 116: return -1\n\t\tcase 117: return -1\n\t}\n\tswitch {\n\t\tcase 48 <= r && r <= 57: return -1\n\t\tcase 65 <= r && r <= 70: return -1\n\t\tcase 97 <= r && r <= 102: return -1\n\t}\n\treturn -1\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 34: return 7\n\t\tcase 47: return 5\n\t\tcase 92: return 8\n\t\tcase 98: return 12\n\t\tcase 102: return 13\n\t\tcase 110: return 9\n\t\tcase 114: return 6\n\t\tcase 116: return 10\n\t\tcase 117: return 11\n\t}\n\tswitch {\n\t\tcase 97 <= r && r <= 102: return 4\n\t\tcase 65 <= r && r <= 70: return 4\n\t\tcase 48 <= r && r <= 57: return 4\n\t}\n\treturn 4\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 34: return 2\n\t\tcase 47: return 4\n\t\tcase 92: return 3\n\t\tcase 98: return 4\n\t\tcase 102: return 4\n\t\tcase 110: return 4\n\t\tcase 114: return 4\n\t\tcase 116: return 4\n\t\tcase 117: return 4\n\t}\n\tswitch {\n\t\tcase 97 <= r && r <= 102: return 4\n\t\tcase 65 <= r && r <= 70: return 4\n\t\tcase 48 <= r && r <= 57: return 4\n\t}\n\treturn 4\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 34: return 2\n\t\tcase 47: return 4\n\t\tcase 92: return 3\n\t\tcase 98: return 4\n\t\tcase 102: return 4\n\t\tcase 110: return 4\n\t\tcase 114: return 4\n\t\tcase 116: return 4\n\t\tcase 117: return 4\n\t}\n\tswitch {\n\t\tcase 97 <= r && r <= 102: return 4\n\t\tcase 65 <= r && r <= 70: return 4\n\t\tcase 48 <= r && r <= 57: return 4\n\t}\n\treturn 4\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 34: return 2\n\t\tcase 47: return 4\n\t\tcase 92: return 3\n\t\tcase 98: return 4\n\t\tcase 102: return 4\n\t\tcase 110: return 4\n\t\tcase 114: return 4\n\t\tcase 116: return 4\n\t\tcase 117: return 4\n\t}\n\tswitch {\n\t\tcase 97 <= r && r <= 102: return 4\n\t\tcase 65 <= r && r <= 70: return 4\n\t\tcase 48 <= r && r <= 57: return 4\n\t}\n\treturn 4\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 34: return 2\n\t\tcase 47: return 4\n\t\tcase 92: return 3\n\t\tcase 98: return 4\n\t\tcase 102: return 4\n\t\tcase 110: return 4\n\t\tcase 114: return 4\n\t\tcase 116: return 4\n\t\tcase 117: return 4\n\t}\n\tswitch {\n\t\tcase 97 <= r && r <= 102: return 4\n\t\tcase 65 <= r && r <= 70: return 4\n\t\tcase 48 <= r && r <= 57: return 4\n\t}\n\treturn 4\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 34: return 7\n\t\tcase 47: return 5\n\t\tcase 92: return 8\n\t\tcase 98: return 12\n\t\tcase 102: return 13\n\t\tcase 110: return 9\n\t\tcase 114: return 6\n\t\tcase 116: return 10\n\t\tcase 117: return 11\n\t}\n\tswitch {\n\t\tcase 97 <= r && r <= 102: return 4\n\t\tcase 65 <= r && r <= 70: return 4\n\t\tcase 48 <= r && r <= 57: return 4\n\t}\n\treturn 4\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 34: return 2\n\t\tcase 47: return 4\n\t\tcase 92: return 3\n\t\tcase 98: return 4\n\t\tcase 102: return 4\n\t\tcase 110: return 4\n\t\tcase 114: return 4\n\t\tcase 116: return 4\n\t\tcase 117: return 4\n\t}\n\tswitch {\n\t\tcase 97 <= r && r <= 102: return 4\n\t\tcase 65 <= r && r <= 70: return 4\n\t\tcase 48 <= r && r <= 57: return 4\n\t}\n\treturn 4\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 34: return 2\n\t\tcase 47: return 4\n\t\tcase 92: return 3\n\t\tcase 98: return 4\n\t\tcase 102: return 4\n\t\tcase 110: return 4\n\t\tcase 114: return 4\n\t\tcase 116: return 4\n\t\tcase 117: return 4\n\t}\n\tswitch {\n\t\tcase 97 <= r && r <= 102: return 4\n\t\tcase 65 <= r && r <= 70: return 4\n\t\tcase 48 <= r && r <= 57: return 4\n\t}\n\treturn 4\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 34: return 2\n\t\tcase 47: return 4\n\t\tcase 92: return 3\n\t\tcase 98: return 14\n\t\tcase 102: return 14\n\t\tcase 110: return 4\n\t\tcase 114: return 4\n\t\tcase 116: return 4\n\t\tcase 117: return 4\n\t}\n\tswitch {\n\t\tcase 97 <= r && r <= 102: return 14\n\t\tcase 65 <= r && r <= 70: return 14\n\t\tcase 48 <= r && r <= 57: return 14\n\t}\n\treturn 4\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 34: return 2\n\t\tcase 47: return 4\n\t\tcase 92: return 3\n\t\tcase 98: return 4\n\t\tcase 102: return 4\n\t\tcase 110: return 4\n\t\tcase 114: return 4\n\t\tcase 116: return 4\n\t\tcase 117: return 4\n\t}\n\tswitch {\n\t\tcase 97 <= r && r <= 102: return 4\n\t\tcase 65 <= r && r <= 70: return 4\n\t\tcase 48 <= r && r <= 57: return 4\n\t}\n\treturn 4\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 34: return 2\n\t\tcase 47: return 4\n\t\tcase 92: return 3\n\t\tcase 98: return 4\n\t\tcase 102: return 4\n\t\tcase 110: return 4\n\t\tcase 114: return 4\n\t\tcase 116: return 4\n\t\tcase 117: return 4\n\t}\n\tswitch {\n\t\tcase 97 <= r && r <= 102: return 4\n\t\tcase 65 <= r && r <= 70: return 4\n\t\tcase 48 <= r && r <= 57: return 4\n\t}\n\treturn 4\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 34: return 2\n\t\tcase 47: return 4\n\t\tcase 92: return 3\n\t\tcase 98: return 15\n\t\tcase 102: return 15\n\t\tcase 110: return 4\n\t\tcase 114: return 4\n\t\tcase 116: return 4\n\t\tcase 117: return 4\n\t}\n\tswitch {\n\t\tcase 97 <= r && r <= 102: return 15\n\t\tcase 65 <= r && r <= 70: return 15\n\t\tcase 48 <= r && r <= 57: return 15\n\t}\n\treturn 4\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 34: return 2\n\t\tcase 47: return 4\n\t\tcase 92: return 3\n\t\tcase 98: return 16\n\t\tcase 102: return 16\n\t\tcase 110: return 4\n\t\tcase 114: return 4\n\t\tcase 116: return 4\n\t\tcase 117: return 4\n\t}\n\tswitch {\n\t\tcase 65 <= r && r <= 70: return 16\n\t\tcase 97 <= r && r <= 102: return 16\n\t\tcase 48 <= r && r <= 57: return 16\n\t}\n\treturn 4\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 34: return 2\n\t\tcase 47: return 4\n\t\tcase 92: return 3\n\t\tcase 98: return 17\n\t\tcase 102: return 17\n\t\tcase 110: return 4\n\t\tcase 114: return 4\n\t\tcase 116: return 4\n\t\tcase 117: return 4\n\t}\n\tswitch {\n\t\tcase 97 <= r && r <= 102: return 17\n\t\tcase 65 <= r && r <= 70: return 17\n\t\tcase 48 <= r && r <= 57: return 17\n\t}\n\treturn 4\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 34: return 2\n\t\tcase 47: return 4\n\t\tcase 92: return 3\n\t\tcase 98: return 4\n\t\tcase 102: return 4\n\t\tcase 110: return 4\n\t\tcase 114: return 4\n\t\tcase 116: return 4\n\t\tcase 117: return 4\n\t}\n\tswitch {\n\t\tcase 48 <= r && r <= 57: return 4\n\t\tcase 97 <= r && r <= 102: return 4\n\t\tcase 65 <= r && r <= 70: return 4\n\t}\n\treturn 4\n},\n}, []int{ /* Start-of-input transitions */ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,}, []int{ /* End-of-input transitions */ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,},nil},\n\n// \\+\n{[]bool{false, true}, []func(rune) int{ // Transitions\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 43: return 1\n\t}\n\treturn -1\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 43: return -1\n\t}\n\treturn -1\n},\n}, []int{ /* Start-of-input transitions */ -1, -1,}, []int{ /* End-of-input transitions */ -1, -1,},nil},\n\n// -\n{[]bool{false, true}, []func(rune) int{ // Transitions\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 45: return 1\n\t}\n\treturn -1\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 45: return -1\n\t}\n\treturn -1\n},\n}, []int{ /* Start-of-input transitions */ -1, -1,}, []int{ /* End-of-input transitions */ -1, -1,},nil},\n\n// :\n{[]bool{false, true}, []func(rune) int{ // Transitions\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 58: return 1\n\t}\n\treturn -1\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 58: return -1\n\t}\n\treturn -1\n},\n}, []int{ /* Start-of-input transitions */ -1, -1,}, []int{ /* End-of-input transitions */ -1, -1,},nil},\n\n// \\^\n{[]bool{false, true}, []func(rune) int{ // Transitions\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 94: return 1\n\t}\n\treturn -1\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 94: return -1\n\t}\n\treturn -1\n},\n}, []int{ /* Start-of-input transitions */ -1, -1,}, []int{ /* End-of-input transitions */ -1, -1,},nil},\n\n// \\(\n{[]bool{false, true}, []func(rune) int{ // Transitions\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 40: return 1\n\t}\n\treturn -1\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 40: return -1\n\t}\n\treturn -1\n},\n}, []int{ /* Start-of-input transitions */ -1, -1,}, []int{ /* End-of-input transitions */ -1, -1,},nil},\n\n// \\)\n{[]bool{false, true}, []func(rune) int{ // Transitions\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 41: return 1\n\t}\n\treturn -1\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 41: return -1\n\t}\n\treturn -1\n},\n}, []int{ /* Start-of-input transitions */ -1, -1,}, []int{ /* End-of-input transitions */ -1, -1,},nil},\n\n// >\n{[]bool{false, true}, []func(rune) int{ // Transitions\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 62: return 1\n\t}\n\treturn -1\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 62: return -1\n\t}\n\treturn -1\n},\n}, []int{ /* Start-of-input transitions */ -1, -1,}, []int{ /* End-of-input transitions */ -1, -1,},nil},\n\n// <\n{[]bool{false, true}, []func(rune) int{ // Transitions\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 60: return 1\n\t}\n\treturn -1\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 60: return -1\n\t}\n\treturn -1\n},\n}, []int{ /* Start-of-input transitions */ -1, -1,}, []int{ /* End-of-input transitions */ -1, -1,},nil},\n\n// =\n{[]bool{false, true}, []func(rune) int{ // Transitions\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 61: return 1\n\t}\n\treturn -1\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 61: return -1\n\t}\n\treturn -1\n},\n}, []int{ /* Start-of-input transitions */ -1, -1,}, []int{ /* End-of-input transitions */ -1, -1,},nil},\n\n// ~([0-9]|[1-9][0-9]*)\n{[]bool{false, false, true, true, true}, []func(rune) int{ // Transitions\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 126: return 1\n\t}\n\tswitch {\n\t\tcase 48 <= r && r <= 48: return -1\n\t\tcase 49 <= r && r <= 57: return -1\n\t}\n\treturn -1\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 126: return -1\n\t}\n\tswitch {\n\t\tcase 48 <= r && r <= 48: return 2\n\t\tcase 49 <= r && r <= 57: return 3\n\t}\n\treturn -1\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 126: return -1\n\t}\n\tswitch {\n\t\tcase 48 <= r && r <= 48: return -1\n\t\tcase 49 <= r && r <= 57: return -1\n\t}\n\treturn -1\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 126: return -1\n\t}\n\tswitch {\n\t\tcase 48 <= r && r <= 48: return 4\n\t\tcase 49 <= r && r <= 57: return 4\n\t}\n\treturn -1\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 126: return -1\n\t}\n\tswitch {\n\t\tcase 48 <= r && r <= 48: return 4\n\t\tcase 49 <= r && r <= 57: return 4\n\t}\n\treturn -1\n},\n}, []int{ /* Start-of-input transitions */ -1, -1, -1, -1, -1,}, []int{ /* End-of-input transitions */ -1, -1, -1, -1, -1,},nil},\n\n// ~\n{[]bool{false, true}, []func(rune) int{ // Transitions\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 126: return 1\n\t}\n\treturn -1\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 126: return -1\n\t}\n\treturn -1\n},\n}, []int{ /* Start-of-input transitions */ -1, -1,}, []int{ /* End-of-input transitions */ -1, -1,},nil},\n\n// -?([0-9]|[1-9][0-9]*)(\\.[0-9][0-9]*)?\n{[]bool{false, false, true, true, false, true, true, true}, []func(rune) int{ // Transitions\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 45: return 1\n\t\tcase 46: return -1\n\t}\n\tswitch {\n\t\tcase 48 <= r && r <= 48: return 2\n\t\tcase 49 <= r && r <= 57: return 3\n\t}\n\treturn -1\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 45: return -1\n\t\tcase 46: return -1\n\t}\n\tswitch {\n\t\tcase 48 <= r && r <= 48: return 2\n\t\tcase 49 <= r && r <= 57: return 3\n\t}\n\treturn -1\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 45: return -1\n\t\tcase 46: return 4\n\t}\n\tswitch {\n\t\tcase 48 <= r && r <= 48: return -1\n\t\tcase 49 <= r && r <= 57: return -1\n\t}\n\treturn -1\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 45: return -1\n\t\tcase 46: return 4\n\t}\n\tswitch {\n\t\tcase 48 <= r && r <= 48: return 5\n\t\tcase 49 <= r && r <= 57: return 5\n\t}\n\treturn -1\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 45: return -1\n\t\tcase 46: return -1\n\t}\n\tswitch {\n\t\tcase 48 <= r && r <= 48: return 6\n\t\tcase 49 <= r && r <= 57: return 6\n\t}\n\treturn -1\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 45: return -1\n\t\tcase 46: return 4\n\t}\n\tswitch {\n\t\tcase 48 <= r && r <= 48: return 5\n\t\tcase 49 <= r && r <= 57: return 5\n\t}\n\treturn -1\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 45: return -1\n\t\tcase 46: return -1\n\t}\n\tswitch {\n\t\tcase 48 <= r && r <= 48: return 7\n\t\tcase 49 <= r && r <= 57: return 7\n\t}\n\treturn -1\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 45: return -1\n\t\tcase 46: return -1\n\t}\n\tswitch {\n\t\tcase 48 <= r && r <= 48: return 7\n\t\tcase 49 <= r && r <= 57: return 7\n\t}\n\treturn -1\n},\n}, []int{ /* Start-of-input transitions */ -1, -1, -1, -1, -1, -1, -1, -1,}, []int{ /* End-of-input transitions */ -1, -1, -1, -1, -1, -1, -1, -1,},nil},\n\n// [ \\t\\n]+\n{[]bool{false, true}, []func(rune) int{ // Transitions\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 9: return 1\n\t\tcase 10: return 1\n\t\tcase 32: return 1\n\t}\n\treturn -1\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 9: return 1\n\t\tcase 10: return 1\n\t\tcase 32: return 1\n\t}\n\treturn -1\n},\n}, []int{ /* Start-of-input transitions */ -1, -1,}, []int{ /* End-of-input transitions */ -1, -1,},nil},\n\n// [^\\t\\n\\f\\r :^\\+\\-><=~][^\\t\\n\\f\\r :^~]*\n{[]bool{false, true, true}, []func(rune) int{ // Transitions\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 9: return -1\n\t\tcase 10: return -1\n\t\tcase 12: return -1\n\t\tcase 13: return -1\n\t\tcase 32: return -1\n\t\tcase 58: return -1\n\t\tcase 60: return -1\n\t\tcase 61: return -1\n\t\tcase 94: return -1\n\t\tcase 126: return -1\n\t}\n\tswitch {\n\t\tcase 43 <= r && r <= 62: return -1\n\t}\n\treturn 1\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 9: return -1\n\t\tcase 10: return -1\n\t\tcase 12: return -1\n\t\tcase 13: return -1\n\t\tcase 32: return -1\n\t\tcase 58: return -1\n\t\tcase 60: return 2\n\t\tcase 61: return 2\n\t\tcase 94: return -1\n\t\tcase 126: return -1\n\t}\n\tswitch {\n\t\tcase 43 <= r && r <= 62: return 2\n\t}\n\treturn 2\n},\nfunc(r rune) int {\n\tswitch(r) {\n\t\tcase 9: return -1\n\t\tcase 10: return -1\n\t\tcase 12: return -1\n\t\tcase 13: return -1\n\t\tcase 32: return -1\n\t\tcase 58: return -1\n\t\tcase 60: return 2\n\t\tcase 61: return 2\n\t\tcase 94: return -1\n\t\tcase 126: return -1\n\t}\n\tswitch {\n\t\tcase 43 <= r && r <= 62: return 2\n\t}\n\treturn 2\n},\n}, []int{ /* Start-of-input transitions */ -1, -1, -1,}, []int{ /* End-of-input transitions */ -1, -1, -1,},nil},\n}, 0, 0)\n return yylex\n}",
"func (l *promlexer) Lex() token {\n\tif l.i >= len(l.b) {\n\t\treturn tEOF\n\t}\n\tc := l.b[l.i]\n\tl.start = l.i\n\nyystate0:\n\n\tswitch yyt := l.state; yyt {\n\tdefault:\n\t\tpanic(fmt.Errorf(`invalid start condition %d`, yyt))\n\tcase 0: // start condition: INITIAL\n\t\tgoto yystart1\n\tcase 1: // start condition: sComment\n\t\tgoto yystart8\n\tcase 2: // start condition: sMeta1\n\t\tgoto yystart19\n\tcase 3: // start condition: sMeta2\n\t\tgoto yystart21\n\tcase 4: // start condition: sLabels\n\t\tgoto yystart24\n\tcase 5: // start condition: sLValue\n\t\tgoto yystart29\n\tcase 6: // start condition: sValue\n\t\tgoto yystart33\n\tcase 7: // start condition: sTimestamp\n\t\tgoto yystart36\n\t}\n\nyystate1:\n\tc = l.next()\nyystart1:\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == '#':\n\t\tgoto yystate5\n\tcase c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':\n\t\tgoto yystate7\n\tcase c == '\\n':\n\t\tgoto yystate4\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate3\n\tcase c == '\\x00':\n\t\tgoto yystate2\n\t}\n\nyystate2:\n\tc = l.next()\n\tgoto yyrule1\n\nyystate3:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule3\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate3\n\t}\n\nyystate4:\n\tc = l.next()\n\tgoto yyrule2\n\nyystate5:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule5\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate6\n\t}\n\nyystate6:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule4\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate6\n\t}\n\nyystate7:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule10\n\tcase c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':\n\t\tgoto yystate7\n\t}\n\nyystate8:\n\tc = l.next()\nyystart8:\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == 'H':\n\t\tgoto yystate9\n\tcase c == 'T':\n\t\tgoto yystate14\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate3\n\t}\n\nyystate9:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == 'E':\n\t\tgoto yystate10\n\t}\n\nyystate10:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == 'L':\n\t\tgoto yystate11\n\t}\n\nyystate11:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == 'P':\n\t\tgoto yystate12\n\t}\n\nyystate12:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate13\n\t}\n\nyystate13:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule6\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate13\n\t}\n\nyystate14:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == 'Y':\n\t\tgoto yystate15\n\t}\n\nyystate15:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == 'P':\n\t\tgoto yystate16\n\t}\n\nyystate16:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == 'E':\n\t\tgoto yystate17\n\t}\n\nyystate17:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate18\n\t}\n\nyystate18:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule7\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate18\n\t}\n\nyystate19:\n\tc = l.next()\nyystart19:\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':\n\t\tgoto yystate20\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate3\n\t}\n\nyystate20:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule8\n\tcase c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':\n\t\tgoto yystate20\n\t}\n\nyystate21:\n\tc = l.next()\nyystart21:\n\tswitch {\n\tdefault:\n\t\tgoto yyrule9\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate23\n\tcase c >= '\\x01' && c <= '\\b' || c >= '\\v' && c <= '\\x1f' || c >= '!' && c <= 'ÿ':\n\t\tgoto yystate22\n\t}\n\nyystate22:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule9\n\tcase c >= '\\x01' && c <= '\\t' || c >= '\\v' && c <= 'ÿ':\n\t\tgoto yystate22\n\t}\n\nyystate23:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule3\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate23\n\tcase c >= '\\x01' && c <= '\\b' || c >= '\\v' && c <= '\\x1f' || c >= '!' && c <= 'ÿ':\n\t\tgoto yystate22\n\t}\n\nyystate24:\n\tc = l.next()\nyystart24:\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == ',':\n\t\tgoto yystate25\n\tcase c == '=':\n\t\tgoto yystate26\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate3\n\tcase c == '}':\n\t\tgoto yystate28\n\tcase c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':\n\t\tgoto yystate27\n\t}\n\nyystate25:\n\tc = l.next()\n\tgoto yyrule15\n\nyystate26:\n\tc = l.next()\n\tgoto yyrule14\n\nyystate27:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule12\n\tcase c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':\n\t\tgoto yystate27\n\t}\n\nyystate28:\n\tc = l.next()\n\tgoto yyrule13\n\nyystate29:\n\tc = l.next()\nyystart29:\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == '\"':\n\t\tgoto yystate30\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate3\n\t}\n\nyystate30:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == '\"':\n\t\tgoto yystate31\n\tcase c == '\\\\':\n\t\tgoto yystate32\n\tcase c >= '\\x01' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ':\n\t\tgoto yystate30\n\t}\n\nyystate31:\n\tc = l.next()\n\tgoto yyrule16\n\nyystate32:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c >= '\\x01' && c <= '\\t' || c >= '\\v' && c <= 'ÿ':\n\t\tgoto yystate30\n\t}\n\nyystate33:\n\tc = l.next()\nyystart33:\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate3\n\tcase c == '{':\n\t\tgoto yystate35\n\tcase c >= '\\x01' && c <= '\\b' || c >= '\\v' && c <= '\\x1f' || c >= '!' && c <= 'z' || c >= '|' && c <= 'ÿ':\n\t\tgoto yystate34\n\t}\n\nyystate34:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule17\n\tcase c >= '\\x01' && c <= '\\b' || c >= '\\v' && c <= '\\x1f' || c >= '!' && c <= 'z' || c >= '|' && c <= 'ÿ':\n\t\tgoto yystate34\n\t}\n\nyystate35:\n\tc = l.next()\n\tgoto yyrule11\n\nyystate36:\n\tc = l.next()\nyystart36:\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == '\\n':\n\t\tgoto yystate37\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate3\n\tcase c >= '0' && c <= '9':\n\t\tgoto yystate38\n\t}\n\nyystate37:\n\tc = l.next()\n\tgoto yyrule19\n\nyystate38:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule18\n\tcase c >= '0' && c <= '9':\n\t\tgoto yystate38\n\t}\n\nyyrule1: // \\0\n\t{\n\t\treturn tEOF\n\t}\nyyrule2: // \\n\n\t{\n\t\tl.state = sInit\n\t\treturn tLinebreak\n\t\tgoto yystate0\n\t}\nyyrule3: // [ \\t]+\n\t{\n\t\treturn tWhitespace\n\t}\nyyrule4: // #[ \\t]+\n\t{\n\t\tl.state = sComment\n\t\tgoto yystate0\n\t}\nyyrule5: // #\n\t{\n\t\treturn l.consumeComment()\n\t}\nyyrule6: // HELP[\\t ]+\n\t{\n\t\tl.state = sMeta1\n\t\treturn tHelp\n\t\tgoto yystate0\n\t}\nyyrule7: // TYPE[\\t ]+\n\t{\n\t\tl.state = sMeta1\n\t\treturn tType\n\t\tgoto yystate0\n\t}\nyyrule8: // {M}({M}|{D})*\n\t{\n\t\tl.state = sMeta2\n\t\treturn tMName\n\t\tgoto yystate0\n\t}\nyyrule9: // {C}*\n\t{\n\t\tl.state = sInit\n\t\treturn tText\n\t\tgoto yystate0\n\t}\nyyrule10: // {M}({M}|{D})*\n\t{\n\t\tl.state = sValue\n\t\treturn tMName\n\t\tgoto yystate0\n\t}\nyyrule11: // \\{\n\t{\n\t\tl.state = sLabels\n\t\treturn tBraceOpen\n\t\tgoto yystate0\n\t}\nyyrule12: // {L}({L}|{D})*\n\t{\n\t\treturn tLName\n\t}\nyyrule13: // \\}\n\t{\n\t\tl.state = sValue\n\t\treturn tBraceClose\n\t\tgoto yystate0\n\t}\nyyrule14: // =\n\t{\n\t\tl.state = sLValue\n\t\treturn tEqual\n\t\tgoto yystate0\n\t}\nyyrule15: // ,\n\t{\n\t\treturn tComma\n\t}\nyyrule16: // \\\"(\\\\.|[^\\\\\"])*\\\"\n\t{\n\t\tl.state = sLabels\n\t\treturn tLValue\n\t\tgoto yystate0\n\t}\nyyrule17: // [^{ \\t\\n]+\n\t{\n\t\tl.state = sTimestamp\n\t\treturn tValue\n\t\tgoto yystate0\n\t}\nyyrule18: // {D}+\n\t{\n\t\treturn tTimestamp\n\t}\nyyrule19: // \\n\n\tif true { // avoid go vet determining the below panic will not be reached\n\t\tl.state = sInit\n\t\treturn tLinebreak\n\t\tgoto yystate0\n\t}\n\tpanic(\"unreachable\")\n\nyyabort: // no lexem recognized\n\t//\n\t// silence unused label errors for build and satisfy go vet reachability analysis\n\t//\n\t{\n\t\tif false {\n\t\t\tgoto yyabort\n\t\t}\n\t\tif false {\n\t\t\tgoto yystate0\n\t\t}\n\t\tif false {\n\t\t\tgoto yystate1\n\t\t}\n\t\tif false {\n\t\t\tgoto yystate8\n\t\t}\n\t\tif false {\n\t\t\tgoto yystate19\n\t\t}\n\t\tif false {\n\t\t\tgoto yystate21\n\t\t}\n\t\tif false {\n\t\t\tgoto yystate24\n\t\t}\n\t\tif false {\n\t\t\tgoto yystate29\n\t\t}\n\t\tif false {\n\t\t\tgoto yystate33\n\t\t}\n\t\tif false {\n\t\t\tgoto yystate36\n\t\t}\n\t}\n\n\t// Workaround to gobble up comments that started with a HELP or TYPE\n\t// prefix. We just consume all characters until we reach a newline.\n\t// This saves us from adding disproportionate complexity to the parser.\n\tif l.state == sComment {\n\t\treturn l.consumeComment()\n\t}\n\treturn tInvalid\n}",
"func main() {\n\tgopath = strings.Replace(gopath, \"\\\\\", \"/\", -1)\n\tpwd = strings.Replace(pwd, \"\\\\\", \"/\", -1)\n\t// http://stackoverflow.com/questions/12363030/read-from-initial-stdin-in-go\n\tb, err := ioutil.ReadAll(in)\n\tif err != nil || readAllerr {\n\t\tPdbgf(\"gopanic: ioutil.ReadAll(os.Stdin) => err: %s\", errorString(err))\n\t\texitfct(-1)\n\t\treturn\n\t}\n\t// Pdbgf(\"ioutil.ReadAll(in) => len: %d\", len(b))\n\n\tlines := strings.Split(string(b), \"\\n\")\n\tlexer := &lexer{lines: lines, stacks: []*stack{}}\n\t// Pdbgf(\"len: %d, pos %d\", len(lexer.lines), lexer.pos)\n\tfor state := lookForReason; state != nil; {\n\t\tstate = state(lexer)\n\t}\n\tfor _, stack := range lexer.stacks {\n\t\tstack.max = lexer.max + 2\n\t\tfmt.Fprintln(writers.Out(), stack.String())\n\t}\n\t// Pdbgf(\"done\")\n}",
"func lex(input string) (*lexer, error) {\n\tif !utf8.ValidString(input) {\n\t\treturn nil, errors.New(\"input is not a valid UTF-8 string\")\n\t}\n\tl := &lexer{\n\t\tinput: input,\n\t\titems: make(chan item, 0x1000),\n\t}\n\tgo l.run()\n\treturn l, nil\n}",
"func NewLexer(br *bufio.Reader) *Lexer {\n\tb, err := br.ReadByte()\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn &Lexer{br, nil, getSingleByteArray(b), UnknownToken, nil}\n}",
"func BenchmarkLexer(b *testing.B) {\n\tvar tracer trace.Trace\n\t//tracer = trace.New(os.Stderr, true) // use stderr to trace\n\ttracer = trace.New(ioutil.Discard, true) // and this to not\n\n\tfor i := 0; i < b.N; i++ {\n\t\txml_lexer.Lex(xmlInput, tracer) // xml\n\t\t//json_lexer.Lex(jsonInput, tracer) // json\n\t\t//csv_lexer.Lex(jsonInput, tracer) // csv\n\t}\n}",
"func TestScanner_Scan(t *testing.T) {\n\tvar tests = []struct {\n\t\ts string\n\t\ttok lang.Token\n\t\tlit string\n\t\tpos int\n\t}{\n\t\t// Special tokens (EOF, ILLEGAL, WS)\n\t\t{s: ``, tok: lang.EOF},\n\t\t{s: `#`, tok: lang.ILLEGAL, lit: `#`},\n\t\t{s: `+`, tok: lang.ILLEGAL, lit: `+`},\n\t\t{s: `-`, tok: lang.ILLEGAL, lit: `-`},\n\t\t{s: `*`, tok: lang.ILLEGAL, lit: `*`},\n\t\t{s: `/`, tok: lang.BADREGEX, lit: ``},\n\t\t{s: `%`, tok: lang.ILLEGAL, lit: `%`},\n\t\t{s: ` `, tok: lang.WS, lit: \" \"},\n\t\t{s: \"\\t\", tok: lang.WS, lit: \"\\t\"},\n\t\t{s: \"\\n\", tok: lang.WS, lit: \"\\n\"},\n\t\t{s: \"\\r\", tok: lang.WS, lit: \"\\n\"},\n\t\t{s: \"\\r\\n\", tok: lang.WS, lit: \"\\n\"},\n\t\t{s: \"\\rX\", tok: lang.WS, lit: \"\\n\"},\n\t\t{s: \"\\n\\r\", tok: lang.WS, lit: \"\\n\\n\"},\n\t\t{s: \" \\n\\t \\r\\n\\t\", tok: lang.WS, lit: \" \\n\\t \\n\\t\"},\n\t\t{s: \" foo\", tok: lang.WS, lit: \" \"},\n\n\t\t// Logical operators\n\t\t{s: `AND`, tok: lang.AND},\n\t\t{s: `and`, tok: lang.AND},\n\t\t{s: `|`, tok: lang.OR},\n\t\t{s: `OR`, tok: lang.OR},\n\t\t{s: `or`, tok: lang.OR},\n\t\t{s: `!`, tok: lang.NOT},\n\t\t{s: `NOT`, tok: lang.NOT},\n\t\t{s: `not`, tok: lang.NOT},\n\n\t\t// Misc. tokens\n\t\t{s: `(`, tok: lang.LPAREN},\n\t\t{s: `)`, tok: lang.RPAREN},\n\t\t{s: `,`, tok: lang.COMMA},\n\n\t\t// Identifiers\n\t\t{s: `required`, tok: lang.IDENT, lit: `required`},\n\t\t{s: `required()`, tok: lang.IDENT, lit: `required`},\n\t\t{s: `foo`, tok: lang.IDENT, lit: `foo`},\n\t\t{s: `phone`, tok: lang.IDENT, lit: `phone`},\n\t\t{s: `range(1,2)`, tok: lang.IDENT, lit: `range`},\n\n\t\t// Booleans\n\t\t{s: `true`, tok: lang.TRUE},\n\t\t{s: `false`, tok: lang.FALSE},\n\n\t\t// Strings\n\t\t{s: `'testing 123!'`, tok: lang.STRING, lit: `testing 123!`},\n\t\t{s: `'string'`, tok: lang.STRING, lit: `string`},\n\t\t{s: `'foo\\nbar'`, tok: lang.STRING, lit: \"foo\\nbar\"},\n\n\t\t// Numbers\n\t\t{s: `100`, tok: lang.INTEGER, lit: `100`},\n\t\t{s: `100.23`, tok: lang.NUMBER, lit: `100.23`},\n\t\t{s: `.23`, tok: lang.NUMBER, lit: `.23`},\n\t\t// {s: `.`, tok: lang.ILLEGAL, lit: `.`},\n\t\t{s: `10.3s`, tok: lang.NUMBER, lit: `10.3`},\n\n\t\t// Durations\n\t\t{s: `10u`, tok: lang.DURATION, lit: `10u`},\n\t\t{s: `10µ`, tok: lang.DURATION, lit: `10µ`},\n\t\t{s: `10ms`, tok: lang.DURATION, lit: `10ms`},\n\t\t{s: `1s`, tok: lang.DURATION, lit: `1s`},\n\t\t{s: `10m`, tok: lang.DURATION, lit: `10m`},\n\t\t{s: `10h`, tok: lang.DURATION, lit: `10h`},\n\t\t{s: `10d`, tok: lang.DURATION, lit: `10d`},\n\t\t{s: `10w`, tok: lang.DURATION, lit: `10w`},\n\t\t{s: `10x`, tok: lang.DURATION, lit: `10x`}, // non-duration unit, but scanned as a duration value\n\n\t\t// Keywords\n\t\t{s: `EACH`, tok: lang.EACH},\n\t\t{s: `each(!zero)`, tok: lang.EACH},\n\n\t\t// Bound params\n\t\t{s: `$Title`, tok: lang.BOUNDPARAM, lit: `Title`},\n\t\t{s: `$.Book.Description`, tok: lang.BOUNDPARAM, lit: `Book.Description`},\n\t}\n\n\tfor i, tc := range tests {\n\t\tt.Run(tc.s, func(t *testing.T) {\n\t\t\ts := lang.NewScanner(strings.NewReader(tc.s))\n\t\t\ttok, pos, lit := s.Scan()\n\t\t\tassert.Equal(t, tc.tok, tok, fmt.Sprintf(\"%d. %q token mismatch: exp=%q got=%q <%q>\", i, tc.s, tc.tok.String(), tok.String(), lit))\n\t\t\tassert.Equal(t, tc.pos, pos, fmt.Sprintf(\"%d. %q pos mismatch: exp=%#v got=%#v\", i, tc.s, tc.pos, pos))\n\t\t\tassert.Equal(t, tc.lit, lit, fmt.Sprintf(\"%d. %q literal mismatch: exp=%q got=%q\", i, tc.s, tc.lit, lit))\n\t\t})\n\t}\n}",
"func lex(input io.Reader) chan token {\n\tr := &lexer{\n\t\tlineno: 1,\n\t\ttokStream: make(chan token),\n\t\tinput: bufio.NewReader(input),\n\t\tacc: make([]rune, 1),\n\t}\n\tgo r.run()\n\treturn r.tokStream\n}",
"func (l *Lexer) Next() Token {\nrestart:\n\tl.tokenLine = l.line\n\tl.tokenOffset = l.offset\n\n\tstate := tmStateMap[l.State]\n\tfor state >= 0 {\n\t\tvar ch int\n\t\tif uint(l.ch) < tmRuneClassLen {\n\t\t\tch = int(tmRuneClass[l.ch])\n\t\t} else if l.ch < 0 {\n\t\t\tstate = int(tmLexerAction[state*tmNumClasses])\n\t\t\tcontinue\n\t\t} else {\n\t\t\tch = 1\n\t\t}\n\t\tstate = int(tmLexerAction[state*tmNumClasses+ch])\n\t\tif state > tmFirstRule {\n\t\t\tif l.ch == '\\n' {\n\t\t\t\tl.line++\n\t\t\t}\n\n\t\t\t// Scan the next character.\n\t\t\t// Note: the following code is inlined to avoid performance implications.\n\t\t\tl.offset = l.scanOffset\n\t\t\tif l.offset < len(l.source) {\n\t\t\t\tr, w := rune(l.source[l.offset]), 1\n\t\t\t\tif r >= 0x80 {\n\t\t\t\t\t// not ASCII\n\t\t\t\t\tr, w = utf8.DecodeRuneInString(l.source[l.offset:])\n\t\t\t\t}\n\t\t\t\tl.scanOffset += w\n\t\t\t\tl.ch = r\n\t\t\t} else {\n\t\t\t\tl.ch = -1 // EOI\n\t\t\t}\n\t\t}\n\t}\n\n\trule := tmFirstRule - state\n\n\ttoken := tmToken[rule]\n\tspace := false\n\tswitch rule {\n\tcase 0:\n\t\tif l.offset == l.tokenOffset {\n\t\t\tl.rewind(l.scanOffset)\n\t\t}\n\tcase 2: // invalid_token: /\\x00/\n\t\t{\n\t\t\tl.invalidTokenClass = InvalidTokenNullCharInCode\n\t\t}\n\tcase 3: // whitespace: /[\\n\\r\\t\\f\\v ]+/\n\t\tspace = true\n\tcase 4: // EnterBlockComment: /\\(\\*/\n\t\tspace = true\n\t\t{\n\t\t\tl.enterBlockComment()\n\t\t}\n\tcase 5: // invalid_token: /\\*\\)/\n\t\t{\n\t\t\tl.invalidTokenClass = InvalidTokenUnmatchedBlockComment\n\t\t}\n\tcase 6: // invalid_token: /{eoi}/\n\t\t{\n\t\t\tl.State = StateInitial\n\t\t\tl.invalidTokenClass = InvalidTokenEoiInComment\n\t\t}\n\tcase 7: // ExitBlockComment: /\\*\\)/\n\t\tspace = true\n\t\t{\n\t\t\tl.exitBlockComment()\n\t\t}\n\tcase 8: // BlockComment: /[^\\(\\)\\*]+|[\\*\\(\\)]/\n\t\tspace = true\n\tcase 9: // LineComment: /\\-\\-.*/\n\t\tspace = true\n\tcase 14: // invalid_token: /\"({strRune}*\\x00{strRune}*)+\"/\n\t\t{\n\t\t\tl.invalidTokenClass = InvalidTokenNullCharInString\n\t\t}\n\tcase 15: // invalid_token: /\"({strRune}*\\\\\\x00{strRune}*)+\"/\n\t\t{\n\t\t\tl.invalidTokenClass = InvalidTokenEscapedNullCharInString\n\t\t}\n\tcase 16: // invalid_token: /\"{strRune}*{eoi}/\n\t\t{\n\t\t\tl.invalidTokenClass = InvalidTokenEoiInString\n\t\t}\n\t}\n\tif space {\n\t\tgoto restart\n\t}\n\treturn token\n}",
"func lex(str string) int {\n\tvar cur int\n\tcount := 0\n\n\tfor { \n//line \"go/eof/04_generic_api_sentinel.go\":22\n{\n\tvar yych byte\n\tyych = peek(str, cur)\n\tswitch (yych) {\n\tcase 0x00:\n\t\tgoto yy1\n\tcase ' ':\n\t\tgoto yy3\n\tcase 'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z':\n\t\tgoto yy5\n\tdefault:\n\t\tgoto yy2\n\t}\nyy1:\n\tcur += 1\n//line \"go/eof/04_generic_api_sentinel.re\":25\n\t{ return count }\n//line \"go/eof/04_generic_api_sentinel.go\":40\nyy2:\n\tcur += 1\n//line \"go/eof/04_generic_api_sentinel.re\":24\n\t{ return -1 }\n//line \"go/eof/04_generic_api_sentinel.go\":45\nyy3:\n\tcur += 1\n\tyych = peek(str, cur)\n\tswitch (yych) {\n\tcase ' ':\n\t\tgoto yy3\n\tdefault:\n\t\tgoto yy4\n\t}\nyy4:\n//line \"go/eof/04_generic_api_sentinel.re\":27\n\t{ continue }\n//line \"go/eof/04_generic_api_sentinel.go\":58\nyy5:\n\tcur += 1\n\tyych = peek(str, cur)\n\tswitch (yych) {\n\tcase 'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z':\n\t\tgoto yy5\n\tdefault:\n\t\tgoto yy6\n\t}\nyy6:\n//line \"go/eof/04_generic_api_sentinel.re\":26\n\t{ count += 1; continue }\n//line \"go/eof/04_generic_api_sentinel.go\":71\n}\n//line \"go/eof/04_generic_api_sentinel.re\":28\n\n\t}\n}",
"func (l *lexer) lex() error {\n\tvar data string\n\tvar startingChar char\n\n\t// Loop through ever character\n\tfor character, hasNextChar := l.scan.next(); hasNextChar; character, hasNextChar = l.scan.next() {\n\t\tswitch l.state {\n\t\tcase lexerStateNone:\n\t\t\t// The lexer is waiting for a new state\n\n\t\t\tif unicode.IsLetter(character.c) {\n\t\t\t\t// A letter can either mean a name of some kind or a label\n\t\t\t\tl.state = lexerStateNameOrLabel\n\t\t\t\tstartingChar = character\n\t\t\t\tdata += string(character.c)\n\t\t\t} else if unicode.IsDigit(character.c) || character.c == '-' {\n\t\t\t\t// A digit or a negative sign means a numeric literal\n\t\t\t\tl.state = lexerStateNumber\n\t\t\t\tstartingChar = character\n\t\t\t\tdata += string(character.c)\n\t\t\t} else if !unicode.IsSpace(character.c) {\n\t\t\t\t// If the character isn't a letter, number, or space, it isn't valid\n\t\t\t\treturn newParseError(\"unexpected character '\"+string(character.c)+\"'\", character)\n\t\t\t}\n\t\tcase lexerStateNameOrLabel:\n\t\t\t// The lexer expects more characters or a sign that the token is finished\n\n\t\t\tif unicode.IsLetter(character.c) {\n\t\t\t\t// Another letter, so the name or label is still being constructed\n\t\t\t\tdata += string(character.c)\n\t\t\t} else if character.c == ':' {\n\t\t\t\t// A colon denotes the end of a label\n\t\t\t\tl.tokens = append(l.tokens, token{\n\t\t\t\t\ttType: tokenLabel,\n\t\t\t\t\tstartingChar: startingChar,\n\t\t\t\t\tdata: strings.ToUpper(data)})\n\t\t\t\tdata = \"\"\n\t\t\t\tl.state = lexerStateNone\n\t\t\t} else if unicode.IsSpace(character.c) {\n\t\t\t\t// A space denotes the end of a name\n\t\t\t\tl.tokens = append(l.tokens, token{\n\t\t\t\t\ttType: tokenName,\n\t\t\t\t\tstartingChar: startingChar,\n\t\t\t\t\tdata: strings.ToUpper(data)})\n\t\t\t\tdata = \"\"\n\t\t\t\tl.state = lexerStateNone // Reset the lexer state\n\t\t\t} else {\n\t\t\t\t// An invalid character\n\t\t\t\treturn newParseError(\"unexpected character '\"+string(character.c)+\"'\", character)\n\t\t\t}\n\t\tcase lexerStateNumber:\n\t\t\t// The lexer expects more numbers or a sign that the token is finished\n\n\t\t\tif unicode.IsDigit(character.c) {\n\t\t\t\t// Another digit, so the number is still being constructed\n\t\t\t\tdata += string(character.c)\n\t\t\t} else if unicode.IsSpace(character.c) {\n\t\t\t\t// A space denotes the end of the number\n\t\t\t\tl.tokens = append(l.tokens, token{\n\t\t\t\t\ttType: tokenNumber,\n\t\t\t\t\tstartingChar: startingChar,\n\t\t\t\t\tdata: data})\n\t\t\t\tdata = \"\"\n\t\t\t\tl.state = lexerStateNone // Reset the lexer state\n\t\t\t} else {\n\t\t\t\t// An invalid character\n\t\t\t\treturn newParseError(\"unexpected character '\"+string(character.c)+\"'\", character)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}",
"func NewLexerRun(name string, r io.Reader, rec Record, runFn RunFn) (l *Lexer, err error) {\n\tif rec.Buflen < 1 {\n\t\terr = fmt.Errorf(\"rec.Buflen must be > 0: %d\", rec.Buflen)\n\t\treturn\n\t}\n\tif rec.ErrorFn == nil {\n\t\terr = fmt.Errorf(\"rec.ErrorFn must not be nil\")\n\t\treturn\n\t}\n\tl = &Lexer{\n\t\tname: name,\n\t\tr: r,\n\t\trec: rec,\n\t\titems: make(chan Item),\n\t\tnext: make([]byte, rec.Buflen),\n\t\teof: false,\n\t}\n\tgo func(l *Lexer, runFn RunFn) {\n\t\tdefer close(l.items)\n\t\trunFn(l)\n\t}(l, runFn)\n\n\treturn\n}",
"func (st *simpleTokenizer) parseSource(source io.Reader, lproc vertigo.LineProcessor) error {\n\tbrd := bufio.NewScanner(source)\n\n\tch := make(chan []interface{})\n\tchunk := make([]interface{}, channelChunkSize)\n\tgo func() {\n\t\ti := 0\n\t\tfor brd.Scan() {\n\t\t\tline := st.parseLine(brd.Text())\n\t\t\tfor _, token := range line {\n\t\t\t\tif token != \"\" {\n\t\t\t\t\tchunk[i] = &vertigo.Token{Word: token}\n\t\t\t\t\ti++\n\t\t\t\t\tif i == channelChunkSize {\n\t\t\t\t\t\ti = 0\n\t\t\t\t\t\tch <- chunk\n\t\t\t\t\t\tchunk = make([]interface{}, channelChunkSize)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif i > 0 {\n\t\t\tch <- chunk[:i]\n\t\t}\n\t\tclose(ch)\n\t}()\n\n\tfor tokens := range ch {\n\t\tfor _, token := range tokens {\n\t\t\tswitch token.(type) {\n\t\t\tcase *vertigo.Token:\n\t\t\t\ttk := token.(*vertigo.Token)\n\t\t\t\tlproc.ProcToken(tk)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func (l *lexer) run() {\n\tfor state := lexText; state != nil; {\n\t\tstate = state(l)\n\t}\n\tclose(l.tokens) // No more tokens will be delivered\n}",
"func NewLexer(reader io.Reader, capacity int) *Lexer {\n\tl := &Lexer{\n\t\treader: reader,\n\t\tbuffer: make([]byte, capacity),\n\t}\n\n\treturn l\n}",
"func newLexer(r io.Reader) *lexer {\n\ts := bufio.NewScanner(r)\n\n\treturn &lexer{\n\t\tscanner: s,\n\t\tindexPosition: 0,\n\t}\n\n}",
"func (l *lexer) run() {\nmainLoop:\n\tfor {\n\t\tif !processWhitespace(l) {\n\t\t\tbreak\n\t\t}\n\t\t//fmt.Println(\"testing\", string(l.peek()))\n\t\tfound := false\n\tprocessLoop:\n\t\tfor _, processFunc := range processFunctions {\n\t\t\t//fmt.Println(\"func =\", processFunc)\n\t\t\tresult := processFunc(l)\n\t\t\t//fmt.Println(\"peek = \", string(l.peek()))\n\t\t\tswitch result {\n\t\t\tcase resultMatch:\n\t\t\t\tfound = true\n\t\t\t\tbreak processLoop\n\t\t\tcase resultMatchError:\n\t\t\t\tbreak mainLoop\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tl.errorf(\"Invalid token: '%s'\", string(l.peek()))\n\t\t\tbreak\n\t\t}\n\t}\n\tl.emit(itemEOF)\n\tclose(l.items)\n}",
"func lex(input string) *lexer {\n\tl := &lexer{\n\t\tinput: input,\n\t\titems: make(chan token),\n\t}\n\tgo l.run()\n\treturn l\n}",
"func newLexer(src string) *lexer {\n\tl := &lexer{src: src,\n\t\ttokenChan: make(chan token),\n\t}\n\tgo l.run()\n\treturn l\n}",
"func (z *Tokenizer) Scan() {\n\tdefer func() {\n\t\trec := recover()\n\t\tif rErr, ok := rec.(error); ok {\n\t\t\t// we only ever panic(err)\n\t\t\tz.err = rErr\n\t\t\tz.tok = Token{\n\t\t\t\tType: TokenError,\n\t\t\t\tExtra: &TokenExtraError{Err: z.err},\n\t\t\t}\n\t\t} else if rec != nil {\n\t\t\tpanic(rec)\n\t\t}\n\t}()\n\n\tif z.err == nil {\n\t\tz.tok = z.consume()\n\t} else if z.err == io.EOF {\n\t\tz.tok = Token{\n\t\t\tType: TokenEOF,\n\t\t}\n\t} else {\n\t\tz.tok = Token{\n\t\t\tType: TokenError,\n\t\t\tValue: z.err.Error(),\n\t\t\tExtra: &TokenExtraError{Err: z.err},\n\t\t}\n\t}\n}",
"func lex(input string) *lexer {\n\tl := &lexer{\n\t\tinput: input,\n\t\titems: make(chan item),\n\t}\n\tgo l.run()\n\treturn l\n}",
"func lex(input string) *lexer {\n\tl := &lexer{\n\t\tinput: input,\n\t\titems: make(chan item),\n\t}\n\tgo l.run()\n\treturn l\n}",
"func NewLexer(input []byte) *Lexer {\n\treturn &Lexer{\n\t\tinput: input,\n\t\titems: make(chan Item, 2),\n\t\tstate: lexBytes,\n\t}\n}",
"func lexerPreallocMem(inputSize int, numThreads int) {\n\tlexerInt64Pools = make([]*int64Pool, numThreads)\n\t\n\tavgCharsPerNumber := float64(4)\n\t\n\tpoolSizePerThread := int(math.Ceil((float64(inputSize) / avgCharsPerNumber) / float64(numThreads)))\n\n\tfor i := 0; i < numThreads; i++ {\n\t\tlexerInt64Pools[i] = newInt64Pool(poolSizePerThread)\n\t}\n}",
"func FakeLexer(s string, text func(string), emoji func(string)) {\n\ttext(s)\n}",
"func NewLexer(rd io.Reader) *Lexer {\n\tp := Lexer{}\n\tp.rd = rd\n\tp.lastByte = bufSize\n\tp.buf = make([]byte, bufSize)\n\tp.r = -1\n\tp.fill()\n\treturn &p\n}",
"func NewLexer(f io.Reader) (*lexer, <-chan *lexData, <-chan error) {\n\tlexchan := make(chan *lexData)\n\terrchan := make(chan error, 1)\n\tbuf := bufio.NewReader(f)\n\n\treturn &lexer{buf: buf, lexchan: lexchan, errchan: errchan}, lexchan, errchan\n}",
"func newLexer(scan scanner) lexer {\n\treturn lexer{\n\t\tscan: scan,\n\t\ttokens: make([]token, 0, 20)}\n}",
"func Lexer(tokens []token) []expression {\n\tvar expressions []expression\n\n\tp := parser{\n\t\ttokens: tokens,\n\t\tpos: 0,\n\t}\n\n\tfor !p.done() {\n\t\texpressions = append(expressions, p.expression())\n\t}\n\n\treturn expressions\n}",
"func NewLexerEngine(def *LexerDef, text string, stopMarkers []string, extend interface{}) (*LexerEngine, error) {\n\tif err := def.Init(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &LexerEngine{\n\t\tdef: def,\n\t\tstopMarkers: stopMarkers,\n\t\tstr: text,\n\t\toutBuf: make([]Lexeme, 0, 4),\n\t\tmode: def.Modes[0],\n\t\textend: extend,\n\t}, nil\n}",
"func newLexer(input string) *lexer {\n\tl := &lexer{input: input}\n\tl.readChar()\n\treturn l\n}",
"func (l *lexer) Lex() int {\n\tconst (\n\t\tlstateInit = iota\n\t\tlstateValue\n\t\tlstateTimestamp\n\t\tlstateLabels\n\t\tlstateLName\n\t\tlstateLValue\n\t)\n\ts := lstateInit\n\n\tif l.i >= len(l.b) {\n\t\treturn eof\n\t}\n\tc := l.b[l.i]\n\n\tl.ts = nil\n\tl.mstart = l.nextMstart\n\tl.offsets = l.offsets[:0]\n\nyystate0:\n\n\tswitch yyt := s; yyt {\n\tdefault:\n\t\tpanic(fmt.Errorf(`invalid start condition %d`, yyt))\n\tcase 0: // start condition: INITIAL\n\t\tgoto yystart1\n\tcase 1: // start condition: lstateValue\n\t\tgoto yystart8\n\tcase 2: // start condition: lstateTimestamp\n\t\tgoto yystart14\n\tcase 3: // start condition: lstateLabels\n\t\tgoto yystart19\n\tcase 4: // start condition: lstateLName\n\t\tgoto yystart23\n\tcase 5: // start condition: lstateLValue\n\t\tgoto yystart26\n\t}\n\n\tgoto yystate0 // silence unused label error\n\tgoto yystate1 // silence unused label error\nyystate1:\n\tc = l.next()\nyystart1:\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == '#':\n\t\tgoto yystate4\n\tcase c == '\\t' || c == '\\n' || c == '\\r' || c == ' ':\n\t\tgoto yystate3\n\tcase c == '\\x00':\n\t\tgoto yystate2\n\tcase c >= 'A' && c <= 'Z' || c >= 'a' && c <= 'z':\n\t\tgoto yystate6\n\t}\n\nyystate2:\n\tc = l.next()\n\tgoto yyrule1\n\nyystate3:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule3\n\tcase c == '\\t' || c == '\\n' || c == '\\r' || c == ' ':\n\t\tgoto yystate3\n\t}\n\nyystate4:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == '\\n':\n\t\tgoto yystate5\n\tcase c >= '\\x01' && c <= '\\t' || c == '\\v' || c == '\\f' || c >= '\\x0e' && c <= 'ÿ':\n\t\tgoto yystate4\n\t}\n\nyystate5:\n\tc = l.next()\n\tgoto yyrule2\n\nyystate6:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule5\n\tcase c == '{':\n\t\tgoto yystate7\n\tcase c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':\n\t\tgoto yystate6\n\t}\n\nyystate7:\n\tc = l.next()\n\tgoto yyrule4\n\n\tgoto yystate8 // silence unused label error\nyystate8:\n\tc = l.next()\nyystart8:\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == 'N':\n\t\tgoto yystate11\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate10\n\tcase c >= '\\x01' && c <= '\\b' || c == '\\v' || c == '\\f' || c >= '\\x0e' && c <= '\\x1f' || c >= '!' && c <= 'M' || c >= 'O' && c <= 'ÿ':\n\t\tgoto yystate9\n\t}\n\nyystate9:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule14\n\tcase c >= '\\x01' && c <= '\\b' || c == '\\v' || c == '\\f' || c >= '\\x0e' && c <= '\\x1f' || c >= '!' && c <= 'ÿ':\n\t\tgoto yystate9\n\t}\n\nyystate10:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule12\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate10\n\t}\n\nyystate11:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule14\n\tcase c == 'a':\n\t\tgoto yystate12\n\tcase c >= '\\x01' && c <= '\\b' || c == '\\v' || c == '\\f' || c >= '\\x0e' && c <= '\\x1f' || c >= '!' && c <= '`' || c >= 'b' && c <= 'ÿ':\n\t\tgoto yystate9\n\t}\n\nyystate12:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule14\n\tcase c == 'N':\n\t\tgoto yystate13\n\tcase c >= '\\x01' && c <= '\\b' || c == '\\v' || c == '\\f' || c >= '\\x0e' && c <= '\\x1f' || c >= '!' && c <= 'M' || c >= 'O' && c <= 'ÿ':\n\t\tgoto yystate9\n\t}\n\nyystate13:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule13\n\tcase c >= '\\x01' && c <= '\\b' || c == '\\v' || c == '\\f' || c >= '\\x0e' && c <= '\\x1f' || c >= '!' && c <= 'ÿ':\n\t\tgoto yystate9\n\t}\n\n\tgoto yystate14 // silence unused label error\nyystate14:\n\tc = l.next()\nyystart14:\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == '\\n' || c == '\\r':\n\t\tgoto yystate17\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate16\n\tcase c == '\\x00':\n\t\tgoto yystate15\n\tcase c >= '0' && c <= '9':\n\t\tgoto yystate18\n\t}\n\nyystate15:\n\tc = l.next()\n\tgoto yyrule18\n\nyystate16:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule15\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate16\n\t}\n\nyystate17:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule17\n\tcase c == '\\n' || c == '\\r':\n\t\tgoto yystate17\n\t}\n\nyystate18:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule16\n\tcase c >= '0' && c <= '9':\n\t\tgoto yystate18\n\t}\n\n\tgoto yystate19 // silence unused label error\nyystate19:\n\tc = l.next()\nyystart19:\n\tswitch {\n\tdefault:\n\t\tgoto yyrule8\n\tcase c == ',':\n\t\tgoto yystate21\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate20\n\tcase c == '}':\n\t\tgoto yystate22\n\t}\n\nyystate20:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule6\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate20\n\t}\n\nyystate21:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule8\n\tcase c == '}':\n\t\tgoto yystate22\n\t}\n\nyystate22:\n\tc = l.next()\n\tgoto yyrule7\n\n\tgoto yystate23 // silence unused label error\nyystate23:\n\tc = l.next()\nyystart23:\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c >= 'A' && c <= 'Z' || c >= 'a' && c <= 'z':\n\t\tgoto yystate24\n\t}\n\nyystate24:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == '=':\n\t\tgoto yystate25\n\tcase c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':\n\t\tgoto yystate24\n\t}\n\nyystate25:\n\tc = l.next()\n\tgoto yyrule9\n\n\tgoto yystate26 // silence unused label error\nyystate26:\n\tc = l.next()\nyystart26:\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == '\"':\n\t\tgoto yystate27\n\tcase c == '\\'':\n\t\tgoto yystate30\n\t}\n\nyystate27:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yystate27 // c >= '\\x00' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ'\n\tcase c == '\"':\n\t\tgoto yystate28\n\tcase c == '\\\\':\n\t\tgoto yystate29\n\t}\n\nyystate28:\n\tc = l.next()\n\tgoto yyrule10\n\nyystate29:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c >= '\\x01' && c <= '\\t' || c >= '\\v' && c <= 'ÿ':\n\t\tgoto yystate27\n\t}\n\nyystate30:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yystate30 // c >= '\\x00' && c <= '&' || c >= '(' && c <= '[' || c >= ']' && c <= 'ÿ'\n\tcase c == '\\'':\n\t\tgoto yystate31\n\tcase c == '\\\\':\n\t\tgoto yystate32\n\t}\n\nyystate31:\n\tc = l.next()\n\tgoto yyrule11\n\nyystate32:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c >= '\\x01' && c <= '\\t' || c >= '\\v' && c <= 'ÿ':\n\t\tgoto yystate30\n\t}\n\nyyrule1: // \\0\n\t{\n\t\treturn eof\n\t}\nyyrule2: // #[^\\r\\n]*\\n\n\t{\n\t\tl.mstart = l.i\n\t\tgoto yystate0\n\t}\nyyrule3: // [\\r\\n \\t]+\n\t{\n\t\tl.mstart = l.i\n\t\tgoto yystate0\n\t}\nyyrule4: // {S}({M}|{D})*\\{\n\t{\n\t\ts = lstateLabels\n\t\tl.offsets = append(l.offsets, l.i-1)\n\t\tgoto yystate0\n\t}\nyyrule5: // {S}({M}|{D})*\n\t{\n\t\ts = lstateValue\n\t\tl.mend = l.i\n\t\tl.offsets = append(l.offsets, l.i)\n\t\tgoto yystate0\n\t}\nyyrule6: // [ \\t]+\n\n\tgoto yystate0\nyyrule7: // ,?\\}\n\t{\n\t\ts = lstateValue\n\t\tl.mend = l.i\n\t\tgoto yystate0\n\t}\nyyrule8: // ,?\n\t{\n\t\ts = lstateLName\n\t\tl.offsets = append(l.offsets, l.i)\n\t\tgoto yystate0\n\t}\nyyrule9: // {S}({L}|{D})*=\n\t{\n\t\ts = lstateLValue\n\t\tl.offsets = append(l.offsets, l.i-1)\n\t\tgoto yystate0\n\t}\nyyrule10: // \\\"(\\\\.|[^\\\\\"]|\\0)*\\\"\n\t{\n\t\ts = lstateLabels\n\t\tl.offsets = append(l.offsets, l.i-1)\n\t\tgoto yystate0\n\t}\nyyrule11: // \\'(\\\\.|[^\\\\']|\\0)*\\'\n\t{\n\t\ts = lstateLabels\n\t\tl.offsets = append(l.offsets, l.i-1)\n\t\tgoto yystate0\n\t}\nyyrule12: // [ \\t]+\n\t{\n\t\tl.vstart = l.i\n\t\tgoto yystate0\n\t}\nyyrule13: // (NaN)\n\t{\n\t\tl.val = math.Float64frombits(value.NormalNaN)\n\t\ts = lstateTimestamp\n\t\tgoto yystate0\n\t}\nyyrule14: // [^\\n \\t\\r]+\n\t{\n\t\t// We don't parse strictly correct floats as the conversion\n\t\t// repeats the effort anyway.\n\t\tl.val, l.err = strconv.ParseFloat(yoloString(l.b[l.vstart:l.i]), 64)\n\t\tif l.err != nil {\n\t\t\treturn -1\n\t\t}\n\t\ts = lstateTimestamp\n\t\tgoto yystate0\n\t}\nyyrule15: // [ \\t]+\n\t{\n\t\tl.tstart = l.i\n\t\tgoto yystate0\n\t}\nyyrule16: // {D}+\n\t{\n\t\tts, err := strconv.ParseInt(yoloString(l.b[l.tstart:l.i]), 10, 64)\n\t\tif err != nil {\n\t\t\tl.err = err\n\t\t\treturn -1\n\t\t}\n\t\tl.ts = &ts\n\t\tgoto yystate0\n\t}\nyyrule17: // [\\r\\n]+\n\t{\n\t\tl.nextMstart = l.i\n\t\treturn 1\n\t}\nyyrule18: // \\0\n\t{\n\t\treturn 1\n\n\t}\n\tpanic(\"unreachable\")\n\n\tgoto yyabort // silence unused label error\n\nyyabort: // no lexem recognized\n\tl.err = fmt.Errorf(\"no token found\")\n\treturn -1\n}",
"func NewLexer(r io.Reader) *Lexer {\n\treturn &Lexer{scanner: NewScanner(r), Errors: make(chan error, 1)}\n}",
"func (l *Lexer) scan() {\n\tif l.readPos >= len(l.in) {\n\t\tl.ch = 0\n\t} else {\n\t\tl.ch = l.in[l.readPos]\n\t}\n\n\tl.Pos = l.readPos\n\tl.readPos++\n}",
"func NewLexer(scan Scanner) (lx Lexer) {\n\tlx.Scanner = scan\n\tlx.pos = Position{1, 0}\n\tlx.rawPos = Position{1, 0}\n\treturn\n}",
"func lexError(l *lexer) stateFn {\n\t//premature lexical scanning\n\t//不emit接收方就一直在等待\n\tl.emit(tEOF)\n\treturn nil\n}",
"func (l *LexerState) Iterator() Token { // nolint: gocognit\n\tfor l.Pos < len(l.Text) && len(l.Stack) > 0 {\n\t\t// Exhaust the iterator stack, if any.\n\t\tfor len(l.iteratorStack) > 0 {\n\t\t\tn := len(l.iteratorStack) - 1\n\t\t\tt := l.iteratorStack[n]()\n\t\t\tif t == EOF {\n\t\t\t\tl.iteratorStack = l.iteratorStack[:n]\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn t\n\t\t}\n\n\t\tl.State = l.Stack[len(l.Stack)-1]\n\t\tif l.Lexer.trace {\n\t\t\tfmt.Fprintf(os.Stderr, \"%s: pos=%d, text=%q\\n\", l.State, l.Pos, string(l.Text[l.Pos:]))\n\t\t}\n\t\tselectedRule, ok := l.Rules[l.State]\n\t\tif !ok {\n\t\t\tpanic(\"unknown state \" + l.State)\n\t\t}\n\t\truleIndex, rule, groups := matchRules(l.Text, l.Pos, selectedRule)\n\t\t// No match.\n\t\tif groups == nil {\n\t\t\t// From Pygments :\\\n\t\t\t//\n\t\t\t// If the RegexLexer encounters a newline that is flagged as an error token, the stack is\n\t\t\t// emptied and the lexer continues scanning in the 'root' state. This can help producing\n\t\t\t// error-tolerant highlighting for erroneous input, e.g. when a single-line string is not\n\t\t\t// closed.\n\t\t\tif l.Text[l.Pos] == '\\n' && l.State != l.options.State {\n\t\t\t\tl.Stack = []string{l.options.State}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tl.Pos++\n\t\t\treturn Token{Error, string(l.Text[l.Pos-1 : l.Pos])}\n\t\t}\n\t\tl.Rule = ruleIndex\n\t\tl.Groups = groups\n\t\tl.Pos += utf8.RuneCountInString(groups[0])\n\t\tif rule.Mutator != nil {\n\t\t\tif err := rule.Mutator.Mutate(l); err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}\n\t\tif rule.Type != nil {\n\t\t\tl.iteratorStack = append(l.iteratorStack, rule.Type.Emit(l.Groups, l.Lexer))\n\t\t}\n\t}\n\t// Exhaust the IteratorStack, if any.\n\t// Duplicate code, but eh.\n\tfor len(l.iteratorStack) > 0 {\n\t\tn := len(l.iteratorStack) - 1\n\t\tt := l.iteratorStack[n]()\n\t\tif t == EOF {\n\t\t\tl.iteratorStack = l.iteratorStack[:n]\n\t\t\tcontinue\n\t\t}\n\t\treturn t\n\t}\n\n\t// If we get to here and we still have text, return it as an error.\n\tif l.Pos != len(l.Text) && len(l.Stack) == 0 {\n\t\tvalue := string(l.Text[l.Pos:])\n\t\tl.Pos = len(l.Text)\n\t\treturn Token{Type: Error, Value: value}\n\t}\n\treturn EOF\n}",
"func (i *Interpreter) getNextToken() scanResult {\n\n\tvar r scanResult\n\n\tif i.Err != nil {\n\t\tr.err = i.Err\n\t\tr.t = errorT\n\t\treturn r\n\t}\n\n\tif !i.scanner.Scan() {\n\t\t// EOF\n\t\ti.Err = io.EOF\n\t\tr.err = i.Err\n\t\tr.t = errorT\n\t\treturn r\n\t}\n\n\tr.token = i.scanner.Text()\n\n\t// slurp comments if needed\n\tif r.token == \"(\" {\n\t\tfor r.token[len(r.token)-1:] != \")\" {\n\t\t\tif !i.scanner.Scan() {\n\t\t\t\tr.err = fmt.Errorf(\"unexpected unclosed comment : %s\", r.token)\n\t\t\t\tr.t = errorT\n\t\t\t\treturn r\n\t\t\t}\n\t\t\tr.token = i.scanner.Text()\n\t\t}\n\t\t// then tail-recurse,\n\t\t// you can have multiple successive comments ...\n\t\treturn i.getNextToken()\n\t}\n\n\t// try to decode token\n\tnfa := i.lookup(r.token)\n\tif i.Err == nil {\n\t\tr.v = 1 + nfa\n\t\tr.t = compoundT\n\t\tr.err = nil\n\t\treturn r\n\t}\n\n\t// so, token could not be decoded\n\t// reset error and try numbers ...\n\ti.Err = nil\n\tif num, err := strconv.ParseInt(r.token, i.getBase(), 64); err == nil {\n\t\tr.v = int(num)\n\t\tr.t = numberT\n\t\tr.err = nil\n\t\treturn r\n\t}\n\n\t// Token cannot be understood\n\tr.t = errorT\n\tr.err = fmt.Errorf(\"cannot understand the token : %s\", r.token)\n\treturn r\n\n}",
"func newLexer(r io.Reader) *lexer {\n\treturn &lexer{\n\t\tscanner: newScanner(r),\n\t}\n}",
"func lex(input string) *lexer {\n\tl := &lexer{\n\t\tinput: input,\n\t\titems: make(chan Item, 0),\n\t}\n\tgo l.scan()\n\treturn l\n}",
"func lex(input string) *lexer {\n\tl := &lexer{\n\t\tinput: input,\n\t\titems: make(chan Item), // unbuffered\n\t}\n\tgo l.scan()\n\treturn l\n}",
"func New(input string) *Lexer {\n\ts := &Lexer{\n\t\tinput: input,\n\t\tstate: lexCode,\n\t\ttokens: make(chan token.Token, len(input)), // buffer is same size as input to avoid deadlock\n\t\tline: 1,\n\t}\n\treturn s\n}",
"func (t *Tokenizer) scanStream() (*Token, error) {\n\tstate := STATE_START\n\tvar tokenType TokenType\n\tvalue := make([]int32, 0, INITIAL_TOKEN_CAPACITY)\n\tvar (\n\t\tnextRune int32\n\t\tnextRuneType RuneTokenType\n\t\terr error\n\t)\nSCAN:\n\tfor {\n\t\tnextRune, _, err = t.input.ReadRune()\n\t\tnextRuneType = t.classifier.ClassifyRune(nextRune)\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tnextRuneType = RUNETOKEN_EOF\n\t\t\t\terr = nil\n\t\t\t} else {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tswitch state {\n\t\tcase STATE_START: // no runes read yet\n\t\t\t{\n\t\t\t\tswitch nextRuneType {\n\t\t\t\tcase RUNETOKEN_EOF:\n\t\t\t\t\t{\n\t\t\t\t\t\treturn nil, io.EOF\n\t\t\t\t\t}\n\t\t\t\tcase RUNETOKEN_CHAR:\n\t\t\t\t\t{\n\t\t\t\t\t\ttokenType = TOKEN_WORD\n\t\t\t\t\t\tvalue = append(value, nextRune)\n\t\t\t\t\t\tstate = STATE_INWORD\n\t\t\t\t\t}\n\t\t\t\tcase RUNETOKEN_SPACE:\n\t\t\t\t\t{\n\t\t\t\t\t}\n\t\t\t\tcase RUNETOKEN_ESCAPING_QUOTE:\n\t\t\t\t\t{\n\t\t\t\t\t\ttokenType = TOKEN_WORD\n\t\t\t\t\t\tstate = STATE_QUOTED_ESCAPING\n\t\t\t\t\t}\n\t\t\t\tcase RUNETOKEN_NONESCAPING_QUOTE:\n\t\t\t\t\t{\n\t\t\t\t\t\ttokenType = TOKEN_WORD\n\t\t\t\t\t\tstate = STATE_QUOTED\n\t\t\t\t\t}\n\t\t\t\tcase RUNETOKEN_ESCAPE:\n\t\t\t\t\t{\n\t\t\t\t\t\ttokenType = TOKEN_WORD\n\t\t\t\t\t\tstate = STATE_ESCAPING\n\t\t\t\t\t}\n\t\t\t\tcase RUNETOKEN_COMMENT:\n\t\t\t\t\t{\n\t\t\t\t\t\ttokenType = TOKEN_COMMENT\n\t\t\t\t\t\tstate = STATE_COMMENT\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\t{\n\t\t\t\t\t\treturn nil, errors.New(fmt.Sprintf(\"Unknown rune: %v\", nextRune))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase STATE_INWORD: // in a regular word\n\t\t\t{\n\t\t\t\tswitch nextRuneType {\n\t\t\t\tcase RUNETOKEN_EOF:\n\t\t\t\t\t{\n\t\t\t\t\t\tbreak SCAN\n\t\t\t\t\t}\n\t\t\t\tcase RUNETOKEN_CHAR, RUNETOKEN_COMMENT:\n\t\t\t\t\t{\n\t\t\t\t\t\tvalue = append(value, nextRune)\n\t\t\t\t\t}\n\t\t\t\tcase RUNETOKEN_SPACE:\n\t\t\t\t\t{\n\t\t\t\t\t\tt.input.UnreadRune()\n\t\t\t\t\t\tbreak SCAN\n\t\t\t\t\t}\n\t\t\t\tcase RUNETOKEN_ESCAPING_QUOTE:\n\t\t\t\t\t{\n\t\t\t\t\t\tstate = STATE_QUOTED_ESCAPING\n\t\t\t\t\t}\n\t\t\t\tcase RUNETOKEN_NONESCAPING_QUOTE:\n\t\t\t\t\t{\n\t\t\t\t\t\tstate = STATE_QUOTED\n\t\t\t\t\t}\n\t\t\t\tcase RUNETOKEN_ESCAPE:\n\t\t\t\t\t{\n\t\t\t\t\t\tstate = STATE_ESCAPING\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\t{\n\t\t\t\t\t\treturn nil, errors.New(fmt.Sprintf(\"Uknown rune: %v\", nextRune))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase STATE_ESCAPING: // the next rune after an escape character\n\t\t\t{\n\t\t\t\tswitch nextRuneType {\n\t\t\t\tcase RUNETOKEN_EOF:\n\t\t\t\t\t{\n\t\t\t\t\t\terr = errors.New(\"EOF found after escape character\")\n\t\t\t\t\t\tbreak SCAN\n\t\t\t\t\t}\n\t\t\t\tcase RUNETOKEN_CHAR, RUNETOKEN_SPACE, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT:\n\t\t\t\t\t{\n\t\t\t\t\t\tstate = STATE_INWORD\n\t\t\t\t\t\tvalue = append(value, nextRune)\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\t{\n\t\t\t\t\t\treturn nil, errors.New(fmt.Sprintf(\"Uknown rune: %v\", nextRune))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase STATE_ESCAPING_QUOTED: // the next rune after an escape character, in double quotes\n\t\t\t{\n\t\t\t\tswitch nextRuneType {\n\t\t\t\tcase RUNETOKEN_EOF:\n\t\t\t\t\t{\n\t\t\t\t\t\terr = errors.New(\"EOF found after escape character\")\n\t\t\t\t\t\tbreak SCAN\n\t\t\t\t\t}\n\t\t\t\tcase RUNETOKEN_CHAR, RUNETOKEN_SPACE, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT:\n\t\t\t\t\t{\n\t\t\t\t\t\tstate = STATE_QUOTED_ESCAPING\n\t\t\t\t\t\tvalue = append(value, nextRune)\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\t{\n\t\t\t\t\t\treturn nil, errors.New(fmt.Sprintf(\"Uknown rune: %v\", nextRune))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase STATE_QUOTED_ESCAPING: // in escaping double quotes\n\t\t\t{\n\t\t\t\tswitch nextRuneType {\n\t\t\t\tcase RUNETOKEN_EOF:\n\t\t\t\t\t{\n\t\t\t\t\t\terr = errors.New(\"EOF found when expecting closing quote.\")\n\t\t\t\t\t\tbreak SCAN\n\t\t\t\t\t}\n\t\t\t\tcase RUNETOKEN_CHAR, RUNETOKEN_UNKNOWN, RUNETOKEN_SPACE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_COMMENT:\n\t\t\t\t\t{\n\t\t\t\t\t\tvalue = append(value, nextRune)\n\t\t\t\t\t}\n\t\t\t\tcase RUNETOKEN_ESCAPING_QUOTE:\n\t\t\t\t\t{\n\t\t\t\t\t\tstate = STATE_INWORD\n\t\t\t\t\t}\n\t\t\t\tcase RUNETOKEN_ESCAPE:\n\t\t\t\t\t{\n\t\t\t\t\t\tstate = STATE_ESCAPING_QUOTED\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\t{\n\t\t\t\t\t\treturn nil, errors.New(fmt.Sprintf(\"Uknown rune: %v\", nextRune))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase STATE_QUOTED: // in non-escaping single quotes\n\t\t\t{\n\t\t\t\tswitch nextRuneType {\n\t\t\t\tcase RUNETOKEN_EOF:\n\t\t\t\t\t{\n\t\t\t\t\t\terr = errors.New(\"EOF found when expecting closing quote.\")\n\t\t\t\t\t\tbreak SCAN\n\t\t\t\t\t}\n\t\t\t\tcase RUNETOKEN_CHAR, RUNETOKEN_UNKNOWN, RUNETOKEN_SPACE, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT:\n\t\t\t\t\t{\n\t\t\t\t\t\tvalue = append(value, nextRune)\n\t\t\t\t\t}\n\t\t\t\tcase RUNETOKEN_NONESCAPING_QUOTE:\n\t\t\t\t\t{\n\t\t\t\t\t\tstate = STATE_INWORD\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\t{\n\t\t\t\t\t\treturn nil, errors.New(fmt.Sprintf(\"Uknown rune: %v\", nextRune))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase STATE_COMMENT:\n\t\t\t{\n\t\t\t\tswitch nextRuneType {\n\t\t\t\tcase RUNETOKEN_EOF:\n\t\t\t\t\t{\n\t\t\t\t\t\tbreak SCAN\n\t\t\t\t\t}\n\t\t\t\tcase RUNETOKEN_CHAR, RUNETOKEN_UNKNOWN, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT, RUNETOKEN_NONESCAPING_QUOTE:\n\t\t\t\t\t{\n\t\t\t\t\t\tvalue = append(value, nextRune)\n\t\t\t\t\t}\n\t\t\t\tcase RUNETOKEN_SPACE:\n\t\t\t\t\t{\n\t\t\t\t\t\tif nextRune == '\\n' {\n\t\t\t\t\t\t\tstate = STATE_START\n\t\t\t\t\t\t\tbreak SCAN\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tvalue = append(value, nextRune)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\t{\n\t\t\t\t\t\treturn nil, errors.New(fmt.Sprintf(\"Uknown rune: %v\", nextRune))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\t{\n\t\t\t\tpanic(fmt.Sprintf(\"Unexpected state: %v\", state))\n\t\t\t}\n\t\t}\n\t}\n\ttoken := &Token{\n\t\ttokenType: tokenType,\n\t\tvalue: string(value)}\n\treturn token, err\n}",
"func NewLexer(emojis []string) Lexer {\n\troot := newTree(emojis)\n\n\treturn func(s string, text func(string), emoji func(string)) {\n\t\ttextStart := 0 // leftmost byte. Everything before has been sent\n\t\temojiStart := 0\n\n\t\tvar j int\n\t\tvar r rune\n\t\tcurrent := root\n\t\tfor j, r = range s {\n\t\t\t// is (still) matching an emoji\n\t\t\tif c := current.Children[r]; c != nil {\n\t\t\t\t// emoji starts now\n\t\t\t\tif current == root {\n\t\t\t\t\temojiStart = j\n\t\t\t\t}\n\t\t\t\tcurrent = c\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// was matching an emoji\n\t\t\tif current != root {\n\t\t\t\tif current.IsLeaf {\n\t\t\t\t\tif textStart < emojiStart {\n\t\t\t\t\t\ttext(s[textStart:emojiStart])\n\t\t\t\t\t}\n\t\t\t\t\temoji(s[emojiStart:j])\n\t\t\t\t\ttextStart = j\n\t\t\t\t}\n\t\t\t\tcurrent = root\n\t\t\t\t// next emoji starts now\n\t\t\t\tif c := current.Children[r]; c != nil {\n\t\t\t\t\temojiStart = j\n\t\t\t\t\tcurrent = c\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif j >= textStart {\n\t\t\t// currently matching an emoji\n\t\t\tif current != root && current.IsLeaf {\n\t\t\t\tif textStart < emojiStart {\n\t\t\t\t\ttext(s[textStart:emojiStart])\n\t\t\t\t}\n\t\t\t\temoji(s[emojiStart:])\n\t\t\t} else {\n\t\t\t\ttext(s[textStart:])\n\t\t\t}\n\t\t}\n\t}\n}",
"func lexerFunction(thread int, ruleNum int, yytext string, genSym *symbol) int {\n\tswitch ruleNum {\n\tcase 0:\n\t\t{\n\t\t\t*genSym = symbol{LPAR, 0, nil, nil, nil}\n\t\t\treturn _LEX_CORRECT\n\t\t}\n\tcase 1:\n\t\t{\n\t\t\t*genSym = symbol{RPAR, 0, nil, nil, nil}\n\t\t\treturn _LEX_CORRECT\n\t\t}\n\tcase 2:\n\t\t{\n\t\t\t*genSym = symbol{TIMES, 0, nil, nil, nil}\n\t\t\treturn _LEX_CORRECT\n\t\t}\n\tcase 3:\n\t\t{\n\t\t\t*genSym = symbol{PLUS, 0, nil, nil, nil}\n\t\t\treturn _LEX_CORRECT\n\t\t}\n\tcase 4:\n\t\t{\n\t\t\tnum := lexerInt64Pools[thread].Get()\n\t\t\terr := error(nil)\n\t\t\t*num, err = strconv.ParseInt(yytext, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn _ERROR\n\t\t\t}\n\t\t\t*genSym = symbol{NUMBER, 0, num, nil, nil}\n\t\t\treturn _LEX_CORRECT\n\t\t}\n\tcase 5:\n\t\t{\n\t\t\treturn _SKIP\n\t\t}\n\tcase 6:\n\t\t{\n\t\t\treturn _SKIP\n\t\t}\n\tcase 7:\n\t\t{\n\t\t\treturn _ERROR\n\t\t}\n\t}\n\treturn _ERROR\n}",
"func TestScanner_Scan(t *testing.T) {\n\tvar tests = []struct {\n\t\ts string\n\t\ttok expr.Token\n\t\tlit string\n\t\tpos expr.Pos\n\t}{\n\t\t// Special tokens (EOF, ILLEGAL, WS)\n\t\t{s: ``, tok: expr.EOF},\n\t\t{s: `#`, tok: expr.ILLEGAL, lit: `#`},\n\t\t{s: ` `, tok: expr.WS, lit: \" \"},\n\t\t{s: \"\\t\", tok: expr.WS, lit: \"\\t\"},\n\t\t{s: \"\\n\", tok: expr.WS, lit: \"\\n\"},\n\t\t{s: \"\\r\", tok: expr.WS, lit: \"\\n\"},\n\t\t{s: \"\\r\\n\", tok: expr.WS, lit: \"\\n\"},\n\t\t{s: \"\\rX\", tok: expr.WS, lit: \"\\n\"},\n\t\t{s: \"\\n\\r\", tok: expr.WS, lit: \"\\n\\n\"},\n\t\t{s: \" \\n\\t \\r\\n\\t\", tok: expr.WS, lit: \" \\n\\t \\n\\t\"},\n\t\t{s: \" foo\", tok: expr.WS, lit: \" \"},\n\n\t\t// Numeric operators\n\t\t{s: `*`, tok: expr.MUL},\n\t\t{s: `/`, tok: expr.DIV},\n\n\t\t// Logical operators\n\t\t{s: `AND`, tok: expr.AND},\n\t\t{s: `and`, tok: expr.AND},\n\t\t{s: `OR`, tok: expr.OR},\n\t\t{s: `or`, tok: expr.OR},\n\n\t\t{s: `=`, tok: expr.EQ},\n\t\t{s: `<>`, tok: expr.NEQ},\n\t\t{s: `! `, tok: expr.EXCLAMATION},\n\t\t{s: `<`, tok: expr.LT},\n\t\t{s: `<=`, tok: expr.LTE},\n\t\t{s: `>`, tok: expr.GT},\n\t\t{s: `>=`, tok: expr.GTE},\n\n\t\t// Misc tokens\n\t\t{s: `(`, tok: expr.LPAREN},\n\t\t{s: `)`, tok: expr.RPAREN},\n\t\t{s: `,`, tok: expr.COMMA},\n\t\t{s: `.`, tok: expr.DOT},\n\n\t\t// Identifiers\n\t\t{s: `foo`, tok: expr.IDENT, lit: `foo`},\n\t\t{s: `_foo`, tok: expr.IDENT, lit: `_foo`},\n\t\t{s: `Zx12_3U_-`, tok: expr.IDENT, lit: `Zx12_3U_`},\n\t\t{s: \"`foo`\", tok: expr.IDENT, lit: `foo`},\n\t\t{s: \"`foo\\\\\\\\bar`\", tok: expr.IDENT, lit: `foo\\bar`},\n\t\t{s: \"`foo\\\\bar`\", tok: expr.BADESCAPE, lit: `\\b`, pos: expr.Pos{Line: 0, Char: 5}},\n\t\t{s: \"`foo\\\"bar\\\"`\", tok: expr.IDENT, lit: `foo\"bar\"`},\n\t\t{s: \"test`\", tok: expr.BADSTRING, lit: \"\", pos: expr.Pos{Line: 0, Char: 3}},\n\t\t{s: \"`test\", tok: expr.BADSTRING, lit: `test`},\n\n\t\t{s: `true`, tok: expr.TRUE},\n\t\t{s: `false`, tok: expr.FALSE},\n\n\t\t// Strings\n\t\t{s: `'testing 123!'`, tok: expr.STRING, lit: `testing 123!`},\n\t\t{s: `\"testing 123!\"`, tok: expr.STRING, lit: `testing 123!`},\n\t\t{s: `'foo\\nbar'`, tok: expr.STRING, lit: \"foo\\nbar\"},\n\t\t{s: `'foo\\\\bar'`, tok: expr.STRING, lit: \"foo\\\\bar\"},\n\t\t{s: `'test`, tok: expr.BADSTRING, lit: `test`},\n\t\t{s: \"'test\\nfoo\", tok: expr.BADSTRING, lit: `test`},\n\t\t{s: `'test\\g'`, tok: expr.BADESCAPE, lit: `\\g`, pos: expr.Pos{Line: 0, Char: 6}},\n\n\t\t// Numbers\n\t\t{s: `100`, tok: expr.NUMBER, lit: `100`},\n\t\t{s: `100.23`, tok: expr.NUMBER, lit: `100.23`},\n\t\t{s: `+100.23`, tok: expr.NUMBER, lit: `+100.23`},\n\t\t{s: `-100.23`, tok: expr.NUMBER, lit: `-100.23`},\n\t\t{s: `10.3s`, tok: expr.NUMBER, lit: `10.3`},\n\n\t\t// Keywords\n\t\t{s: `ALL`, tok: expr.ALL},\n\t\t{s: `AS`, tok: expr.AS},\n\t\t{s: `ASC`, tok: expr.ASC},\n\t\t{s: `BEGIN`, tok: expr.BEGIN},\n\t\t{s: `BY`, tok: expr.BY},\n\t\t{s: `DEFAULT`, tok: expr.DEFAULT},\n\t\t{s: `DELETE`, tok: expr.DELETE},\n\t\t{s: `DESC`, tok: expr.DESC},\n\t\t{s: `DROP`, tok: expr.DROP},\n\t\t{s: `END`, tok: expr.END},\n\t\t{s: `EXISTS`, tok: expr.EXISTS},\n\t\t{s: `FIELD`, tok: expr.FIELD},\n\t\t{s: `FROM`, tok: expr.FROM},\n\t\t{s: `GROUP`, tok: expr.GROUP},\n\t\t{s: `IF`, tok: expr.IF},\n\t\t{s: `INNER`, tok: expr.INNER},\n\t\t{s: `INSERT`, tok: expr.INSERT},\n\t\t{s: `KEY`, tok: expr.KEY},\n\t\t{s: `KEYS`, tok: expr.KEYS},\n\t\t{s: `LIMIT`, tok: expr.LIMIT},\n\t\t{s: `NOT`, tok: expr.NOT},\n\t\t{s: `OFFSET`, tok: expr.OFFSET},\n\t\t{s: `ON`, tok: expr.ON},\n\t\t{s: `ORDER`, tok: expr.ORDER},\n\t\t{s: `SELECT`, tok: expr.SELECT},\n\t\t{s: `TO`, tok: expr.TO},\n\t\t{s: `VALUES`, tok: expr.VALUES},\n\t\t{s: `WHERE`, tok: expr.WHERE},\n\t\t{s: `WITH`, tok: expr.WITH},\n\t\t{s: `seLECT`, tok: expr.SELECT}, // case insensitive\n\t}\n\n\tfor i, tt := range tests {\n\t\ts := expr.NewScanner(strings.NewReader(tt.s))\n\t\ttok, pos, lit := s.Scan()\n\t\tif tt.tok != tok {\n\t\t\tt.Errorf(\"%d. %q token mismatch: exp=%q got=%q <%q>\", i, tt.s, tt.tok, tok, lit)\n\t\t} else if tt.pos.Line != pos.Line || tt.pos.Char != pos.Char {\n\t\t\tt.Errorf(\"%d. %q pos mismatch: exp=%#v got=%#v\", i, tt.s, tt.pos, pos)\n\t\t} else if tt.lit != lit {\n\t\t\tt.Errorf(\"%d. %q literal mismatch: exp=%q got=%q\", i, tt.s, tt.lit, lit)\n\t\t}\n\t}\n}",
"func NewLexer(raw []rune) *Lexer {\n\treturn &Lexer{\n\t\tbuf: raw,\n\t\tnormal: true,\n\t}\n}",
"func NewLexer(input string) *Lexer {\n\tl := &Lexer{\n\t\tinput: input,\n\t\ttokens: make(chan *Token),\n\t}\n\tgo l.run()\n\treturn l\n}",
"func (l *Lexer) Lex() (*Token, error) {\n\t// keep looping until we return a token\n\tfor {\n\t\tr, _, err := l.reader.ReadRune()\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\treturn &Token{l.pos, EOF, \"\"}, nil\n\t\t\t}\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// update the column to the position of the newly read in rune\n\t\tl.pos.Column++\n\n\t\tswitch r {\n\t\tcase '.':\n\t\t\treturn &Token{l.pos, PATH, tokens[PATH]}, nil\n\t\tcase '|':\n\t\t\treturn &Token{l.pos, PIPE, tokens[PIPE]}, nil\n\t\tcase '=':\n\t\t\tif l.lexEqual() {\n\t\t\t\treturn &Token{l.pos, EQUAL, tokens[EQUAL]}, nil\n\t\t\t}\n\t\t\treturn &Token{l.pos, ASSIGN, tokens[ASSIGN]}, nil\n\t\tcase '\"':\n\t\t\tstartPos := l.pos\n\t\t\tlit := l.lexString()\n\t\t\treturn &Token{startPos, STRING, lit}, nil\n\t\tdefault:\n\t\t\tif unicode.IsSpace(r) {\n\t\t\t\tcontinue // nothing to do here, just move on\n\t\t\t} else if unicode.IsDigit(r) {\n\t\t\t\t// backup and let lexInt rescan the beginning of the int\n\t\t\t\tstartPos := l.pos\n\t\t\t\tl.backup()\n\t\t\t\tlit := l.lexInt()\n\t\t\t\treturn &Token{startPos, INT, lit}, nil\n\t\t\t} else if unicode.IsLetter(r) || r == '-' {\n\t\t\t\t// backup and let lexIdent rescan the beginning of the ident\n\t\t\t\tstartPos := l.pos\n\t\t\t\tl.backup()\n\t\t\t\tlit := l.lexIdent()\n\t\t\t\treturn &Token{startPos, IDENT, lit}, nil\n\t\t\t} else {\n\t\t\t\treturn &Token{l.pos, ILLEGAL, string(r)}, nil\n\t\t\t}\n\t\t}\n\t}\n}",
"func NewLexerA(input CharStream) *LexerA {\n\tLexerAInit()\n\tl := new(LexerA)\n\tl.BaseLexer = NewBaseLexer(input)\n\tstaticData := &lexeraLexerStaticData\n\tl.Interpreter = NewLexerATNSimulator(l, staticData.atn, staticData.decisionToDFA, staticData.predictionContextCache)\n\tl.channelNames = staticData.channelNames\n\tl.modeNames = staticData.modeNames\n\tl.RuleNames = staticData.ruleNames\n\tl.LiteralNames = staticData.literalNames\n\tl.SymbolicNames = staticData.symbolicNames\n\tl.GrammarFileName = \"LexerA.g4\"\n\t// TODO: l.EOF = antlr.TokenEOF\n\n\treturn l\n}",
"func NewLexer(text []string) lexer {\n\treturn lexer{text, 0, text[0]}\n}",
"func New(input []rune) *Lexer {\n\tlex := &Lexer{\n\t\tI: input,\n\t\tTokens: make([]*token.Token, 0, 2048),\n\t}\n\tlext := 0\n\tfor lext < len(lex.I) {\n\t\tfor lext < len(lex.I) && unicode.IsSpace(lex.I[lext]) {\n\t\t\tlext++\n\t\t}\n\t\tif lext < len(lex.I) {\n\t\t\ttok := lex.scan(lext)\n\t\t\tlext = tok.Rext()\n\t\t\tif !tok.Suppress() {\n\t\t\t\tlex.addToken(tok)\n\t\t\t}\n\t\t}\n\t}\n\tlex.add(token.EOF, len(input), len(input))\n\treturn lex\n}",
"func New(input []rune) *Lexer {\n\tlex := &Lexer{\n\t\tI: input,\n\t\tTokens: make([]*token.Token, 0, 2048),\n\t}\n\tlext := 0\n\tfor lext < len(lex.I) {\n\t\tfor lext < len(lex.I) && unicode.IsSpace(lex.I[lext]) {\n\t\t\tlext++\n\t\t}\n\t\tif lext < len(lex.I) {\n\t\t\ttok := lex.scan(lext)\n\t\t\tlext = tok.Rext()\n\t\t\tif !tok.Suppress() {\n\t\t\t\tlex.addToken(tok)\n\t\t\t}\n\t\t}\n\t}\n\tlex.add(token.EOF, len(input), len(input))\n\treturn lex\n}",
"func (l *GoLex) Lex(lval *yySymType) int {\n\tfor l.pos < len(l.input) {\n\t\t// get the next Rune(part of string) and its length\n\t\tr, n := utf8.DecodeRune(l.input[l.pos:])\n\t\t// if it is a space, then skip it.\n\t\tif unicode.IsSpace(r) && r != '\\n' {\n\t\t\tl.pos += n\n\t\t\tcontinue\n\t\t}\n\t\tswitch {\n\t\tcase re[SREACH].Match(l.input[l.pos:]):\n\t\t\tl.pos += len(re[SREACH].Find(l.input[l.pos:]))\n\t\t\treturn SREACH\n\n\t\tcase re[LIST].Match(l.input[l.pos:]):\n\t\t\tl.pos += len(re[LIST].Find(l.input[l.pos:]))\n\t\t\treturn LIST\n\n\t\tcase re[PRINT].Match(l.input[l.pos:]):\n\t\t\tl.pos += len(re[PRINT].Find(l.input[l.pos:]))\n\t\t\treturn PRINT\n\n\t\tcase re[QUIT].Match(l.input[l.pos:]):\n\t\t\tl.pos += len(re[QUIT].Find(l.input[l.pos:]))\n\t\t\treturn QUIT\n\n\t\tcase re[AND].Match(l.input[l.pos:]):\n\t\t\tl.pos += len(re[AND].Find(l.input[l.pos:]))\n\t\t\treturn AND\n\n\t\tcase re[OR].Match(l.input[l.pos:]):\n\t\t\tl.pos += len(re[OR].Find(l.input[l.pos:]))\n\t\t\treturn OR\n\n\t\tcase re[NOT].Match(l.input[l.pos:]):\n\t\t\tl.pos += len(re[NOT].Find(l.input[l.pos:]))\n\t\t\treturn NOT\n\n\t\tcase re[STR].Match(l.input[l.pos:]):\n\t\t\tstr_result := re[STR].Find(l.input[l.pos:])\n\t\t\tl.pos += len(str_result)\n\t\t\t// let itself has the value that it want.\n\t\t\tlval.Str = getstring(string(str_result))\n\t\t\treturn STR\n\n\t\tcase re[NEWLINE].Match(l.input[l.pos:]):\n\t\t\tl.pos += len(re[NEWLINE].Find(l.input[l.pos:]))\n\t\t\treturn NEWLINE\n\n\t\tcase string(l.input[l.pos:l.pos+1]) == \"(\":\n\t\t\tl.pos += len(\"(\")\n\t\t\treturn int('(')\n\n\t\tcase string(l.input[l.pos:l.pos+1]) == \")\":\n\t\t\tl.pos += len(\")\")\n\t\t\treturn int(')')\n\n\t\tdefault:\n\t\t\tfmt.Printf(\" | can't match %#v\\n\", string(l.input[l.pos:]))\n\t\t\treturn 0\n\t\t}\n\t}\n\treturn 0\n}",
"func TestScanner_Scan(t *testing.T) {\n\tvar tests = []struct {\n\t\ts string\n\t\ttok scanner.Token\n\t\tlit string\n\t\tpos scanner.Pos\n\t\traw string\n\t}{\n\t\t// Special tokens (EOF, ILLEGAL, WS)\n\t\t{s: ``, tok: scanner.EOF, raw: ``},\n\t\t{s: `#`, tok: scanner.ILLEGAL, lit: `#`, raw: `#`},\n\t\t{s: ` `, tok: scanner.WS, lit: \" \", raw: ` `},\n\t\t{s: \"\\t\", tok: scanner.WS, lit: \"\\t\", raw: \"\\t\"},\n\t\t{s: \"\\n\", tok: scanner.WS, lit: \"\\n\", raw: \"\\n\"},\n\t\t{s: \"\\r\", tok: scanner.WS, lit: \"\\n\", raw: \"\\n\"},\n\t\t{s: \"\\r\\n\", tok: scanner.WS, lit: \"\\n\", raw: \"\\n\"},\n\t\t{s: \"\\rX\", tok: scanner.WS, lit: \"\\n\", raw: \"\\n\"},\n\t\t{s: \"\\n\\r\", tok: scanner.WS, lit: \"\\n\\n\", raw: \"\\n\\n\"},\n\t\t{s: \" \\n\\t \\r\\n\\t\", tok: scanner.WS, lit: \" \\n\\t \\n\\t\", raw: \" \\n\\t \\n\\t\"},\n\t\t{s: \" foo\", tok: scanner.WS, lit: \" \", raw: \" \"},\n\n\t\t// Numeric operators\n\t\t{s: `+`, tok: scanner.ADD, raw: `+`},\n\t\t{s: `-`, tok: scanner.SUB, raw: `-`},\n\t\t{s: `*`, tok: scanner.MUL, raw: `*`},\n\t\t{s: `/`, tok: scanner.DIV, raw: `/`},\n\t\t{s: `%`, tok: scanner.MOD, raw: `%`},\n\n\t\t// Logical operators\n\t\t{s: `AND`, tok: scanner.AND, raw: `AND`},\n\t\t{s: `and`, tok: scanner.AND, raw: `and`},\n\t\t{s: `OR`, tok: scanner.OR, raw: `OR`},\n\t\t{s: `or`, tok: scanner.OR, raw: `or`},\n\n\t\t// Comparison operators\n\t\t{s: `=`, tok: scanner.EQ, raw: `=`},\n\t\t{s: `==`, tok: scanner.EQ, raw: `==`},\n\t\t{s: `<>`, tok: scanner.NEQ, raw: `<>`},\n\t\t{s: `! `, tok: scanner.ILLEGAL, lit: \"!\", raw: `!`},\n\t\t{s: `<`, tok: scanner.LT, raw: `<`},\n\t\t{s: `<=`, tok: scanner.LTE, raw: `<=`},\n\t\t{s: `>`, tok: scanner.GT, raw: `>`},\n\t\t{s: `>=`, tok: scanner.GTE, raw: `>=`},\n\t\t{s: `IN`, tok: scanner.IN, raw: `IN`},\n\t\t{s: `IS`, tok: scanner.IS, raw: `IS`},\n\t\t{s: `LIKE`, tok: scanner.LIKE, raw: `LIKE`},\n\n\t\t// Misc tokens\n\t\t{s: `(`, tok: scanner.LPAREN, raw: `(`},\n\t\t{s: `)`, tok: scanner.RPAREN, raw: `)`},\n\t\t{s: `{`, tok: scanner.LBRACKET, raw: `{`},\n\t\t{s: `}`, tok: scanner.RBRACKET, raw: `}`},\n\t\t{s: `[`, tok: scanner.LSBRACKET, raw: `[`},\n\t\t{s: `]`, tok: scanner.RSBRACKET, raw: `]`},\n\t\t{s: `,`, tok: scanner.COMMA, raw: `,`},\n\t\t{s: `;`, tok: scanner.SEMICOLON, raw: `;`},\n\t\t{s: `.`, tok: scanner.DOT, raw: `.`},\n\t\t{s: `=~`, tok: scanner.EQREGEX, raw: `=~`},\n\t\t{s: `!~`, tok: scanner.NEQREGEX, raw: `!~`},\n\t\t{s: `:`, tok: scanner.COLON, raw: `:`},\n\t\t{s: `::`, tok: scanner.DOUBLECOLON, raw: `::`},\n\t\t{s: `--`, tok: scanner.COMMENT, raw: `--`},\n\t\t{s: `--10.3`, tok: scanner.COMMENT, lit: ``, raw: `--10.3`},\n\n\t\t// Identifiers\n\t\t{s: `foo`, tok: scanner.IDENT, lit: `foo`, raw: `foo`},\n\t\t{s: `_foo`, tok: scanner.IDENT, lit: `_foo`, raw: `_foo`},\n\t\t{s: `Zx12_3U_-`, tok: scanner.IDENT, lit: `Zx12_3U_`, raw: `Zx12_3U_`},\n\t\t{s: \"`foo`\", tok: scanner.IDENT, lit: \"foo\", raw: \"`foo`\"},\n\t\t{s: \"`foo\\bar`\", tok: scanner.IDENT, lit: \"foo\\bar\", raw: \"`foo\\bar`\"},\n\t\t{s: \"`foo\\\\bar`\", tok: scanner.BADESCAPE, lit: `\\b`, pos: scanner.Pos{Line: 0, Char: 5}, raw: \"`foo\\\\b\"},\n\t\t{s: \"`foo\\\\`bar\\\\``\", tok: scanner.IDENT, lit: \"foo`bar`\", raw: \"`foo\\\\`bar\\\\``\"},\n\t\t{s: \"test`\", tok: scanner.BADSTRING, lit: \"\", pos: scanner.Pos{Line: 0, Char: 3}, raw: \"test`\"},\n\t\t{s: \"`test\", tok: scanner.BADSTRING, lit: \"test\", raw: \"`test\"},\n\t\t{s: \"$host\", tok: scanner.NAMEDPARAM, lit: \"$host\", raw: \"$host\"},\n\t\t{s: \"$`host param`\", tok: scanner.NAMEDPARAM, lit: \"$host param\", raw: \"$`host param`\"},\n\t\t{s: \"?\", tok: scanner.POSITIONALPARAM, lit: \"\", raw: \"?\"},\n\n\t\t// Booleans\n\t\t{s: `true`, tok: scanner.TRUE, raw: `true`},\n\t\t{s: `false`, tok: scanner.FALSE, raw: `false`},\n\n\t\t// Null\n\t\t{s: `null`, tok: scanner.NULL, raw: `null`},\n\t\t{s: `NULL`, tok: scanner.NULL, raw: `NULL`},\n\n\t\t// Strings\n\t\t{s: `'testing 123!'`, tok: scanner.STRING, lit: `testing 123!`, raw: `'testing 123!'`},\n\t\t{s: `'foo\\nbar'`, tok: scanner.STRING, lit: \"foo\\nbar\", raw: `'foo\\nbar'`},\n\t\t{s: `'foo\\\\bar'`, tok: scanner.STRING, lit: \"foo\\\\bar\", raw: `'foo\\\\bar'`},\n\t\t{s: `'test`, tok: scanner.BADSTRING, lit: `test`, raw: `'test`},\n\t\t{s: \"'test\\nfoo\", tok: scanner.BADSTRING, lit: `test`, raw: \"'test\\n\"},\n\t\t{s: `'test\\g'`, tok: scanner.BADESCAPE, lit: `\\g`, pos: scanner.Pos{Line: 0, Char: 6}, raw: `'test\\g`},\n\t\t{s: `\"testing 123!\"`, tok: scanner.STRING, lit: `testing 123!`, raw: `\"testing 123!\"`},\n\t\t{s: `\"foo\\nbar\"`, tok: scanner.STRING, lit: \"foo\\nbar\", raw: `\"foo\\nbar\"`},\n\t\t{s: `\"foo\\\\bar\"`, tok: scanner.STRING, lit: \"foo\\\\bar\", raw: `\"foo\\\\bar\"`},\n\t\t{s: `\"test`, tok: scanner.BADSTRING, lit: `test`, raw: `\"test`},\n\t\t{s: \"\\\"test\\nfoo\", tok: scanner.BADSTRING, lit: `test`, raw: \"\\\"test\\n\"},\n\t\t{s: `\"test\\g\"`, tok: scanner.BADESCAPE, lit: `\\g`, pos: scanner.Pos{Line: 0, Char: 6}, raw: `\"test\\g`},\n\n\t\t// Numbers\n\t\t{s: `100`, tok: scanner.INTEGER, lit: `100`, raw: `100`},\n\t\t{s: `100.23`, tok: scanner.NUMBER, lit: `100.23`, raw: `100.23`},\n\t\t{s: `.23`, tok: scanner.NUMBER, lit: `.23`, raw: `.23`},\n\t\t{s: `10.3s`, tok: scanner.NUMBER, lit: `10.3`, raw: `10.3`},\n\t\t{s: `-10.3`, tok: scanner.NUMBER, lit: `-10.3`, raw: `-10.3`},\n\n\t\t// Keywords\n\t\t{s: `ADD`, tok: scanner.ADD_KEYWORD, raw: `ADD`},\n\t\t{s: `ALTER`, tok: scanner.ALTER, raw: `ALTER`},\n\t\t{s: `AS`, tok: scanner.AS, raw: `AS`},\n\t\t{s: `ASC`, tok: scanner.ASC, raw: `ASC`},\n\t\t{s: `BY`, tok: scanner.BY, raw: `BY`},\n\t\t{s: `BEGIN`, tok: scanner.BEGIN, raw: `BEGIN`},\n\t\t{s: `CAST`, tok: scanner.CAST, raw: `CAST`},\n\t\t{s: `COMMIT`, tok: scanner.COMMIT, raw: `COMMIT`},\n\t\t{s: `CREATE`, tok: scanner.CREATE, raw: `CREATE`},\n\t\t{s: `EXPLAIN`, tok: scanner.EXPLAIN, raw: `EXPLAIN`},\n\t\t{s: `DEFAULT`, tok: scanner.DEFAULT, raw: `DEFAULT`},\n\t\t{s: `DELETE`, tok: scanner.DELETE, raw: `DELETE`},\n\t\t{s: `DESC`, tok: scanner.DESC, raw: `DESC`},\n\t\t{s: `DISTINCT`, tok: scanner.DISTINCT, raw: `DISTINCT`},\n\t\t{s: `DROP`, tok: scanner.DROP, raw: `DROP`},\n\t\t{s: `FIELD`, tok: scanner.FIELD, raw: `FIELD`},\n\t\t{s: `FROM`, tok: scanner.FROM, raw: `FROM`},\n\t\t{s: `GROUP`, tok: scanner.GROUP, raw: `GROUP`},\n\t\t{s: `INSERT`, tok: scanner.INSERT, raw: `INSERT`},\n\t\t{s: `INTO`, tok: scanner.INTO, raw: `INTO`},\n\t\t{s: `LIMIT`, tok: scanner.LIMIT, raw: `LIMIT`},\n\t\t{s: `ONLY`, tok: scanner.ONLY, raw: `ONLY`},\n\t\t{s: `OFFSET`, tok: scanner.OFFSET, raw: `OFFSET`},\n\t\t{s: `ORDER`, tok: scanner.ORDER, raw: `ORDER`},\n\t\t{s: `PRIMARY`, tok: scanner.PRIMARY, raw: `PRIMARY`},\n\t\t{s: `READ`, tok: scanner.READ, raw: `READ`},\n\t\t{s: `REINDEX`, tok: scanner.REINDEX, raw: `REINDEX`},\n\t\t{s: `RENAME`, tok: scanner.RENAME, raw: `RENAME`},\n\t\t{s: `ROLLBACK`, tok: scanner.ROLLBACK, raw: `ROLLBACK`},\n\t\t{s: `SELECT`, tok: scanner.SELECT, raw: `SELECT`},\n\t\t{s: `SET`, tok: scanner.SET, raw: `SET`},\n\t\t{s: `TABLE`, tok: scanner.TABLE, raw: `TABLE`},\n\t\t{s: `TO`, tok: scanner.TO, raw: `TO`},\n\t\t{s: `TRANSACTION`, tok: scanner.TRANSACTION, raw: `TRANSACTION`},\n\t\t{s: `UPDATE`, tok: scanner.UPDATE, raw: `UPDATE`},\n\t\t{s: `UNSET`, tok: scanner.UNSET, raw: `UNSET`},\n\t\t{s: `VALUES`, tok: scanner.VALUES, raw: `VALUES`},\n\t\t{s: `WHERE`, tok: scanner.WHERE, raw: `WHERE`},\n\t\t{s: `WRITE`, tok: scanner.WRITE, raw: `WRITE`},\n\t\t{s: `seLECT`, tok: scanner.SELECT, raw: `seLECT`}, // case insensitive\n\n\t\t// types\n\t\t{s: \"BYTES\", tok: scanner.TYPEBYTES, raw: `BYTES`},\n\t\t{s: \"BOOL\", tok: scanner.TYPEBOOL, raw: `BOOL`},\n\t\t{s: \"DOUBLE\", tok: scanner.TYPEDOUBLE, raw: `DOUBLE`},\n\t\t{s: \"INTEGER\", tok: scanner.TYPEINTEGER, raw: `INTEGER`},\n\t\t{s: \"TEXT\", tok: scanner.TYPETEXT, raw: `TEXT`},\n\t}\n\n\tfor i, tt := range tests {\n\t\ts := scanner.NewScanner(strings.NewReader(tt.s))\n\t\tti := s.Scan()\n\t\tif tt.tok != ti.Tok {\n\t\t\tt.Errorf(\"%d. %q token mismatch: exp=%q got=%q <%q>\", i, tt.s, tt.tok, ti.Tok, ti.Lit)\n\t\t} else if tt.pos.Line != ti.Pos.Line || tt.pos.Char != ti.Pos.Char {\n\t\t\tt.Errorf(\"%d. %q pos mismatch: exp=%#v got=%#v\", i, tt.s, tt.pos, ti.Pos)\n\t\t} else if tt.lit != ti.Lit {\n\t\t\tt.Errorf(\"%d. %q literal mismatch: exp=%q got=%q\", i, tt.s, tt.lit, ti.Lit)\n\t\t} else if tt.raw != ti.Raw {\n\t\t\tt.Errorf(\"%d. %q raw mismatch: exp=%q got=%q\", i, tt.s, tt.raw, ti.Raw)\n\t\t}\n\t}\n}",
"func lex(name, input string) *lexer {\n\tl := &lexer{\n\t\tname: name,\n\t\tinput: input,\n\t\titems: make(chan item),\n\t\tline: 1,\n\t\tstartLine: 1,\n\t}\n\tgo l.run()\n\treturn l\n}",
"func (l *lexer) run() {\n\tfor l.state = lexText; l.state != nil; {\n\t\tl.state = l.state(l)\n\t}\n}",
"func (l *lexer) run() {\n\tfor l.state = lexText; l.state != nil; {\n\t\tl.state = l.state(l)\n\t}\n}",
"func lexText(l *lexer) stateFn {\n\tfor {\n\t\tis, class := l.atBreakPoint() // a breakpoint is any char after which a new line can be\n\t\tif is {\n\t\t\tif l.pos > l.start {\n\t\t\t\tl.emit(tokenText)\n\t\t\t}\n\t\t\tswitch class {\n\t\t\tcase classCR:\n\t\t\t\treturn lexCR\n\t\t\tcase classNL:\n\t\t\t\treturn lexNL\n\t\t\tcase classSpace:\n\t\t\t\treturn lexSpace\n\t\t\tcase classTab:\n\t\t\t\treturn lexTab\n\t\t\tcase classHyphen:\n\t\t\t\treturn lexHyphen\n\t\t\t}\n\t\t}\n\t\tif l.next() == eof {\n\t\t\tl.runeCnt-- // eof doesn't count.\n\t\t\tbreak\n\t\t}\n\t}\n\t// Correctly reached EOF.\n\tif l.pos > l.start {\n\t\tl.emit(tokenText)\n\t}\n\tl.emit(tokenEOF) // Useful to make EOF a token\n\treturn nil // Stop the run loop.\n}",
"func LexicalAnalysis(value string) ([]Token, error) {\n\tresult := []Token{}\n\n\tcurrentContext := ContextVoid\n\n\tcurrentCollun := 0\n\n\tfor currentCollun < len(value) {\n\n\t\tswitch currentContext {\n\t\tcase ContextVoid:\n\t\t\tif regexAnyNumber.Match([]byte(value[currentCollun:])) {\n\n\t\t\t}\n\n\t\t}\n\t}\n\n\treturn nil, result\n}",
"func New(src string) *Lexer {\n\tl := &Lexer{\n\t\tinput: src,\n\t}\n\t// step to the first character in order to be ready\n\tl.readChar()\n\treturn l\n}",
"func tokenize(code string) ([]token, error) {\n\n}",
"func (t *Tokeniser) Run() {\n\tt.init()\n\tgo func() {\n\t\tfor state := lexCode; state != nil; {\n\t\t\tstate = state(t)\n\t\t}\n\t\tt.Tokens.Close()\n\t}()\n}",
"func (s *Scanner) Scan(tokenCode *int, tokenText *bytes.Buffer) {\n\tstate := StartState\n\ttokenText.Reset()\n\n\tfor state != EndState {\n\t\tcurrChar := s.currentChar()\n\n\t\tswitch s.Action(state, currChar) {\n\n\t\tcase ActionError:\n\t\t\tpanic(fmt.Errorf(\"Invalid character for the current state\"))\n\n\t\tcase MoveAppend:\n\t\t\tstate = s.nextState(state, currChar)\n\t\t\ttokenText.WriteByte(currChar)\n\t\t\ts.consumeChar()\n\n\t\tcase MoveNoAppend:\n\t\t\tstate = s.nextState(state, currChar)\n\t\t\ts.consumeChar()\n\n\t\tcase HaltAppend:\n\t\t\ts.lookupCode(state, currChar, tokenCode)\n\t\t\ttokenText.WriteByte(currChar)\n\t\t\ts.checkExceptions(tokenCode, *tokenText)\n\t\t\ts.consumeChar()\n\t\t\tif *tokenCode == UnknownToken {\n\t\t\t\ts.Scan(tokenCode, tokenText)\n\t\t\t}\n\n\t\t\treturn\n\n\t\tcase HaltNoAppend:\n\t\t\ts.lookupCode(state, currChar, tokenCode)\n\t\t\ts.checkExceptions(tokenCode, *tokenText)\n\t\t\ts.consumeChar()\n\t\t\tif *tokenCode == UnknownToken {\n\t\t\t\ts.Scan(tokenCode, tokenText)\n\t\t\t}\n\n\t\t\treturn\n\n\t\tcase HaltReuse:\n\t\t\ts.lookupCode(state, currChar, tokenCode)\n\t\t\ts.checkExceptions(tokenCode, *tokenText)\n\t\t\tif *tokenCode == UnknownToken {\n\t\t\t\ts.Scan(tokenCode, tokenText)\n\t\t\t}\n\n\t\t\treturn\n\t\t}\n\t}\n}",
"func lex(name, input string) *lexer {\n\tl := &lexer{\n\t\tname: name,\n\t\tinput: input,\n\t\titems: make(chan item),\n\t\tline: 1,\n\t\tprevItemType: itemNone,\n\t}\n\tgo l.run()\n\treturn l\n}",
"func (_ Lexer) Scan(r io.Reader) ([]string, error) {\n\tlines := []string{}\n\tscanner := bufio.NewScanner(r)\n\tvar buffer bytes.Buffer\n\tfor scanner.Scan() {\n\t\tif strings.HasPrefix(scanner.Text(), \"#\") {\n\t\t\t// this is a comment, skip it\n\t\t} else if len(buffer.String()) > 0 {\n\t\t\t// we are in the middle of a continuation\n\t\t\tif strings.HasSuffix(scanner.Text(), \"\\\\\") {\n\t\t\t\t// this is a continuation, append to the buffer\n\t\t\t\ttext := scanner.Text()[:len(scanner.Text())-len(\"\\\\\")]\n\t\t\t\tbuffer.WriteString(text)\n\t\t\t} else {\n\t\t\t\t// this ends a buffer\n\t\t\t\tif len(strings.TrimSpace(scanner.Text())) > 0 {\n\t\t\t\t\t// only append if it's not an empty line\n\t\t\t\t\tbuffer.WriteString(scanner.Text())\n\t\t\t\t}\n\t\t\t\tlines = append(lines, buffer.String())\n\t\t\t\tbuffer.Reset()\n\t\t\t}\n\t\t} else {\n\t\t\t// no continuation\n\t\t\tif strings.HasSuffix(scanner.Text(), \"\\\\\") {\n\t\t\t\t// this is the beginning of a continuation, append to the buffer\n\t\t\t\ttext := scanner.Text()[:len(scanner.Text())-len(\"\\\\\")]\n\t\t\t\tbuffer.WriteString(text)\n\t\t\t} else {\n\t\t\t\t// this line is by itself\n\t\t\t\tif len(strings.TrimSpace(scanner.Text())) > 0 {\n\t\t\t\t\t// append it only if not empty\n\t\t\t\t\tlines = append(lines, scanner.Text())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif len(buffer.String()) > 0 {\n\t\t// the last continuation was not closed: this is an error\n\t\treturn nil, fmt.Errorf(\"error at end of file: unterminated instruction\")\n\t}\n\n\tif err := scanner.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn lines, nil\n}",
"func lexText(l *Lexer) stateFn {\n\tfor {\n\t\tswitch r := l.next(); {\n\t\tcase r == '\\n':\n\t\t\tLineno++\n\n\t\t\tl.emit(itemEndStatement)\n\t\t\t//l.ignore()\n\t\tcase r == '\\t':\n\t\t\tl.ignore()\n\t\tcase isSpace(r): // Check whether this is a space (which we ignore)\n\t\t\tl.ignore()\n\t\tcase isAlphaNumeric(r) || r == '_': // Check if it's alpha numeric (var, if, else etc)\n\t\t\tl.backup()\n\n\t\t\treturn lexStatement\n\t\tcase isNumber(r): // Check if it's a number (constant)\n\t\t\tl.backup()\n\n\t\t\treturn lexNumber\n\t\tcase r == '{': // Block check\n\t\t\tl.emit(itemLeftBrace)\n\t\tcase r == '}':\n\t\t\tl.emit(itemRightBrace)\n\t\tcase r == '[':\n\t\t\tl.emit(itemLeftBracket)\n\t\tcase r == ']':\n\t\t\tl.emit(itemRightBracket)\n\t\tcase r == '(':\n\t\t\tl.emit(itemLeftPar)\n\t\tcase r == ')':\n\t\t\tl.emit(itemRightPar)\n\t\tcase r == '.':\n\t\t\tl.emit(itemDot)\n\t\tcase r == ',':\n\t\t\tl.emit(itemComma)\n\t\tcase r == '\"':\n\t\t\tl.emit(itemQuote)\n\n\t\t\treturn lexInsideString\n\t\tcase r == '/', r == '#':\n\t\t\treturn lexComment\n\t\tcase r == ';':\n\t\t\tl.emit(itemEndStatement)\n\t\tcase r == ':':\n\t\t\tl.emit(itemColon)\n\t\tcase isOperator(r):\n\t\t\tl.backup()\n\n\t\t\treturn lexOperator\n\t\tdefault:\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn nil\n}",
"func NewLexer(name string, in io.Reader) (*Lexer, error) {\n\tbuf, err := ioutil.ReadAll(in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tl := &Lexer{\n\t\tname: name,\n\t\tinput: string(buf),\n\t\tstate: lexOuter,\n\t\ttokens: make(chan Token, 4),\n\t}\n\treturn l, nil\n}",
"func NewLexer(sourceCode string) *Lexer {\n\treturn &Lexer{sourceCode, 0, nil}\n}",
"func (e *snappyL3) Encode(dst *tokens, src []byte) {\n\tconst (\n\t\tinputMargin = 8 - 1\n\t\tminNonLiteralBlockSize = 1 + 1 + inputMargin\n\t)\n\n\t// Protect against e.cur wraparound.\n\tif e.cur > 1<<30 {\n\t\tfor i := range e.table[:] {\n\t\t\te.table[i] = tableEntryPrev{}\n\t\t}\n\t\te.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]}\n\t}\n\n\t// This check isn't in the Snappy implementation, but there, the caller\n\t// instead of the callee handles this case.\n\tif len(src) < minNonLiteralBlockSize {\n\t\t// We do not fill the token table.\n\t\t// This will be picked up by caller.\n\t\tdst.n = uint16(len(src))\n\t\te.cur += maxStoreBlockSize\n\t\te.prev = e.prev[:0]\n\t\treturn\n\t}\n\n\t// sLimit is when to stop looking for offset/length copies. The inputMargin\n\t// lets us use a fast path for emitLiteral in the main loop, while we are\n\t// looking for copies.\n\tsLimit := int32(len(src) - inputMargin)\n\n\t// nextEmit is where in src the next emitLiteral should start from.\n\tnextEmit := int32(0)\n\ts := int32(0)\n\tcv := load3232(src, s)\n\tnextHash := hash(cv)\n\n\tfor {\n\t\t// Copied from the C++ snappy implementation:\n\t\t//\n\t\t// Heuristic match skipping: If 32 bytes are scanned with no matches\n\t\t// found, start looking only at every other byte. If 32 more bytes are\n\t\t// scanned (or skipped), look at every third byte, etc.. When a match\n\t\t// is found, immediately go back to looking at every byte. This is a\n\t\t// small loss (~5% performance, ~0.1% density) for compressible data\n\t\t// due to more bookkeeping, but for non-compressible data (such as\n\t\t// JPEG) it's a huge win since the compressor quickly \"realizes\" the\n\t\t// data is incompressible and doesn't bother looking for matches\n\t\t// everywhere.\n\t\t//\n\t\t// The \"skip\" variable keeps track of how many bytes there are since\n\t\t// the last match; dividing it by 32 (ie. right-shifting by five) gives\n\t\t// the number of bytes to move ahead for each iteration.\n\t\tskip := int32(32)\n\n\t\tnextS := s\n\t\tvar candidate tableEntry\n\t\tfor {\n\t\t\ts = nextS\n\t\t\tbytesBetweenHashLookups := skip >> 5\n\t\t\tnextS = s + bytesBetweenHashLookups\n\t\t\tskip += bytesBetweenHashLookups\n\t\t\tif nextS > sLimit {\n\t\t\t\tgoto emitRemainder\n\t\t\t}\n\t\t\tcandidates := e.table[nextHash&tableMask]\n\t\t\tnow := load3232(src, nextS)\n\t\t\te.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}}\n\t\t\tnextHash = hash(now)\n\n\t\t\t// Check both candidates\n\t\t\tcandidate = candidates.Cur\n\t\t\tif cv == candidate.val {\n\t\t\t\toffset := s - (candidate.offset - e.cur)\n\t\t\t\tif offset <= maxMatchOffset {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// We only check if value mismatches.\n\t\t\t\t// Offset will always be invalid in other cases.\n\t\t\t\tcandidate = candidates.Prev\n\t\t\t\tif cv == candidate.val {\n\t\t\t\t\toffset := s - (candidate.offset - e.cur)\n\t\t\t\t\tif offset <= maxMatchOffset {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tcv = now\n\t\t}\n\n\t\t// A 4-byte match has been found. We'll later see if more than 4 bytes\n\t\t// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit\n\t\t// them as literal bytes.\n\t\temitLiteral(dst, src[nextEmit:s])\n\n\t\t// Call emitCopy, and then see if another emitCopy could be our next\n\t\t// move. Repeat until we find no match for the input immediately after\n\t\t// what was consumed by the last emitCopy call.\n\t\t//\n\t\t// If we exit this loop normally then we need to call emitLiteral next,\n\t\t// though we don't yet know how big the literal will be. We handle that\n\t\t// by proceeding to the next iteration of the main loop. We also can\n\t\t// exit this loop via goto if we get close to exhausting the input.\n\t\tfor {\n\t\t\t// Invariant: we have a 4-byte match at s, and no need to emit any\n\t\t\t// literal bytes prior to s.\n\n\t\t\t// Extend the 4-byte match as long as possible.\n\t\t\t//\n\t\t\ts += 4\n\t\t\tt := candidate.offset - e.cur + 4\n\t\t\tl := e.matchlen(s, t, src)\n\n\t\t\t// matchToken is flate's equivalent of Snappy's emitCopy. (length,offset)\n\t\t\tdst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset))\n\t\t\tdst.n++\n\t\t\ts += l\n\t\t\tnextEmit = s\n\t\t\tif s >= sLimit {\n\t\t\t\tt += l\n\t\t\t\t// Index first pair after match end.\n\t\t\t\tif int(t+4) < len(src) && t > 0 {\n\t\t\t\t\tcv := load3232(src, t)\n\t\t\t\t\tnextHash = hash(cv)\n\t\t\t\t\te.table[nextHash&tableMask] = tableEntryPrev{\n\t\t\t\t\t\tPrev: e.table[nextHash&tableMask].Cur,\n\t\t\t\t\t\tCur: tableEntry{offset: e.cur + t, val: cv},\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tgoto emitRemainder\n\t\t\t}\n\n\t\t\t// We could immediately start working at s now, but to improve\n\t\t\t// compression we first update the hash table at s-3 to s. If\n\t\t\t// another emitCopy is not our next move, also calculate nextHash\n\t\t\t// at s+1. At least on GOARCH=amd64, these three hash calculations\n\t\t\t// are faster as one load64 call (with some shifts) instead of\n\t\t\t// three load32 calls.\n\t\t\tx := load6432(src, s-3)\n\t\t\tprevHash := hash(uint32(x))\n\t\t\te.table[prevHash&tableMask] = tableEntryPrev{\n\t\t\t\tPrev: e.table[prevHash&tableMask].Cur,\n\t\t\t\tCur: tableEntry{offset: e.cur + s - 3, val: uint32(x)},\n\t\t\t}\n\t\t\tx >>= 8\n\t\t\tprevHash = hash(uint32(x))\n\n\t\t\te.table[prevHash&tableMask] = tableEntryPrev{\n\t\t\t\tPrev: e.table[prevHash&tableMask].Cur,\n\t\t\t\tCur: tableEntry{offset: e.cur + s - 2, val: uint32(x)},\n\t\t\t}\n\t\t\tx >>= 8\n\t\t\tprevHash = hash(uint32(x))\n\n\t\t\te.table[prevHash&tableMask] = tableEntryPrev{\n\t\t\t\tPrev: e.table[prevHash&tableMask].Cur,\n\t\t\t\tCur: tableEntry{offset: e.cur + s - 1, val: uint32(x)},\n\t\t\t}\n\t\t\tx >>= 8\n\t\t\tcurrHash := hash(uint32(x))\n\t\t\tcandidates := e.table[currHash&tableMask]\n\t\t\tcv = uint32(x)\n\t\t\te.table[currHash&tableMask] = tableEntryPrev{\n\t\t\t\tPrev: candidates.Cur,\n\t\t\t\tCur: tableEntry{offset: s + e.cur, val: cv},\n\t\t\t}\n\n\t\t\t// Check both candidates\n\t\t\tcandidate = candidates.Cur\n\t\t\tif cv == candidate.val {\n\t\t\t\toffset := s - (candidate.offset - e.cur)\n\t\t\t\tif offset <= maxMatchOffset {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// We only check if value mismatches.\n\t\t\t\t// Offset will always be invalid in other cases.\n\t\t\t\tcandidate = candidates.Prev\n\t\t\t\tif cv == candidate.val {\n\t\t\t\t\toffset := s - (candidate.offset - e.cur)\n\t\t\t\t\tif offset <= maxMatchOffset {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tcv = uint32(x >> 8)\n\t\t\tnextHash = hash(cv)\n\t\t\ts++\n\t\t\tbreak\n\t\t}\n\t}\n\nemitRemainder:\n\tif int(nextEmit) < len(src) {\n\t\temitLiteral(dst, src[nextEmit:])\n\t}\n\te.cur += int32(len(src))\n\te.prev = e.prev[:len(src)]\n\tcopy(e.prev, src)\n}",
"func NewLexer(name string, r io.Reader, rec Record) (l *Lexer, err error) {\n\tif len(rec.States) == 0 {\n\t\terr = fmt.Errorf(\"rec.states must not be empty.\")\n\t\treturn\n\t}\n\tif rec.Buflen < 1 {\n\t\terr = fmt.Errorf(\"rec.Buflen must be > 0: %d\", rec.Buflen)\n\t\treturn\n\t}\n\tif rec.ErrorFn == nil {\n\t\terr = fmt.Errorf(\"rec.ErrorFn must not be nil\")\n\t\treturn\n\t}\n\tl = &Lexer{\n\t\tname: name,\n\t\tr: r,\n\t\trec: rec,\n\t\titems: make(chan Item),\n\t\tnext: make([]byte, rec.Buflen),\n\t\teof: false,\n\t}\n\tgo l.run()\n\treturn\n}",
"func (l *lexer) run() {\n\tfor state := lexText; state != nil; {\n\t\tstate = state(l)\n\t}\n\tclose(l.items)\n}",
"func skip_ahead()rune{\nfor true{\nif loc>=len(buffer)&&!get_line(){\nreturn new_section\n}\nfor loc<len(buffer)&&buffer[loc]!='@'{\nloc++\n}\nif loc<len(buffer){\nloc++\nc:=new_section\nif loc<len(buffer)&&buffer[loc]<int32(len(ccode)){\nc= ccode[buffer[loc]]\n}\nloc++\nif c!=ignore||(loc<=len(buffer)&&buffer[loc-1]=='>'){\nreturn c\n}\n}\n}\nreturn 0\n}",
"func (j *jsonNative) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {\n\tvar err error\n\tcurrentKey := ffjtjsonNativebase\n\t_ = currentKey\n\ttok := fflib.FFTok_init\n\twantedTok := fflib.FFTok_init\n\nmainparse:\n\tfor {\n\t\ttok = fs.Scan()\n\t\t//\tprintln(fmt.Sprintf(\"debug: tok: %v state: %v\", tok, state))\n\t\tif tok == fflib.FFTok_error {\n\t\t\tgoto tokerror\n\t\t}\n\n\t\tswitch state {\n\n\t\tcase fflib.FFParse_map_start:\n\t\t\tif tok != fflib.FFTok_left_bracket {\n\t\t\t\twantedTok = fflib.FFTok_left_bracket\n\t\t\t\tgoto wrongtokenerror\n\t\t\t}\n\t\t\tstate = fflib.FFParse_want_key\n\t\t\tcontinue\n\n\t\tcase fflib.FFParse_after_value:\n\t\t\tif tok == fflib.FFTok_comma {\n\t\t\t\tstate = fflib.FFParse_want_key\n\t\t\t} else if tok == fflib.FFTok_right_bracket {\n\t\t\t\tgoto done\n\t\t\t} else {\n\t\t\t\twantedTok = fflib.FFTok_comma\n\t\t\t\tgoto wrongtokenerror\n\t\t\t}\n\n\t\tcase fflib.FFParse_want_key:\n\t\t\t// json {} ended. goto exit. woo.\n\t\t\tif tok == fflib.FFTok_right_bracket {\n\t\t\t\tgoto done\n\t\t\t}\n\t\t\tif tok != fflib.FFTok_string {\n\t\t\t\twantedTok = fflib.FFTok_string\n\t\t\t\tgoto wrongtokenerror\n\t\t\t}\n\n\t\t\tkn := fs.Output.Bytes()\n\t\t\tif len(kn) <= 0 {\n\t\t\t\t// \"\" case. hrm.\n\t\t\t\tcurrentKey = ffjtjsonNativenosuchkey\n\t\t\t\tstate = fflib.FFParse_want_colon\n\t\t\t\tgoto mainparse\n\t\t\t} else {\n\t\t\t\tswitch kn[0] {\n\n\t\t\t\tcase '_':\n\n\t\t\t\t\tif bytes.Equal(ffjKeyjsonNativeParsedRequest, kn) {\n\t\t\t\t\t\tcurrentKey = ffjtjsonNativeParsedRequest\n\t\t\t\t\t\tstate = fflib.FFParse_want_colon\n\t\t\t\t\t\tgoto mainparse\n\t\t\t\t\t}\n\n\t\t\t\tcase 'a':\n\n\t\t\t\t\tif bytes.Equal(ffjKeyjsonNativeAPI, kn) {\n\t\t\t\t\t\tcurrentKey = ffjtjsonNativeAPI\n\t\t\t\t\t\tstate = fflib.FFParse_want_colon\n\t\t\t\t\t\tgoto mainparse\n\t\t\t\t\t}\n\n\t\t\t\tcase 'b':\n\n\t\t\t\t\tif bytes.Equal(ffjKeyjsonNativeBAttr, kn) {\n\t\t\t\t\t\tcurrentKey = ffjtjsonNativeBAttr\n\t\t\t\t\t\tstate = fflib.FFParse_want_colon\n\t\t\t\t\t\tgoto mainparse\n\t\t\t\t\t}\n\n\t\t\t\tcase 'e':\n\n\t\t\t\t\tif bytes.Equal(ffjKeyjsonNativeExt, kn) {\n\t\t\t\t\t\tcurrentKey = ffjtjsonNativeExt\n\t\t\t\t\t\tstate = fflib.FFParse_want_colon\n\t\t\t\t\t\tgoto mainparse\n\t\t\t\t\t}\n\n\t\t\t\tcase 'r':\n\n\t\t\t\t\tif bytes.Equal(ffjKeyjsonNativeRequest, kn) {\n\t\t\t\t\t\tcurrentKey = ffjtjsonNativeRequest\n\t\t\t\t\t\tstate = fflib.FFParse_want_colon\n\t\t\t\t\t\tgoto mainparse\n\t\t\t\t\t}\n\n\t\t\t\tcase 'v':\n\n\t\t\t\t\tif bytes.Equal(ffjKeyjsonNativeVer, kn) {\n\t\t\t\t\t\tcurrentKey = ffjtjsonNativeVer\n\t\t\t\t\t\tstate = fflib.FFParse_want_colon\n\t\t\t\t\t\tgoto mainparse\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t\tif fflib.EqualFoldRight(ffjKeyjsonNativeParsedRequest, kn) {\n\t\t\t\t\tcurrentKey = ffjtjsonNativeParsedRequest\n\t\t\t\t\tstate = fflib.FFParse_want_colon\n\t\t\t\t\tgoto mainparse\n\t\t\t\t}\n\n\t\t\t\tif fflib.SimpleLetterEqualFold(ffjKeyjsonNativeExt, kn) {\n\t\t\t\t\tcurrentKey = ffjtjsonNativeExt\n\t\t\t\t\tstate = fflib.FFParse_want_colon\n\t\t\t\t\tgoto mainparse\n\t\t\t\t}\n\n\t\t\t\tif fflib.SimpleLetterEqualFold(ffjKeyjsonNativeBAttr, kn) {\n\t\t\t\t\tcurrentKey = ffjtjsonNativeBAttr\n\t\t\t\t\tstate = fflib.FFParse_want_colon\n\t\t\t\t\tgoto mainparse\n\t\t\t\t}\n\n\t\t\t\tif fflib.SimpleLetterEqualFold(ffjKeyjsonNativeAPI, kn) {\n\t\t\t\t\tcurrentKey = ffjtjsonNativeAPI\n\t\t\t\t\tstate = fflib.FFParse_want_colon\n\t\t\t\t\tgoto mainparse\n\t\t\t\t}\n\n\t\t\t\tif fflib.SimpleLetterEqualFold(ffjKeyjsonNativeVer, kn) {\n\t\t\t\t\tcurrentKey = ffjtjsonNativeVer\n\t\t\t\t\tstate = fflib.FFParse_want_colon\n\t\t\t\t\tgoto mainparse\n\t\t\t\t}\n\n\t\t\t\tif fflib.EqualFoldRight(ffjKeyjsonNativeRequest, kn) {\n\t\t\t\t\tcurrentKey = ffjtjsonNativeRequest\n\t\t\t\t\tstate = fflib.FFParse_want_colon\n\t\t\t\t\tgoto mainparse\n\t\t\t\t}\n\n\t\t\t\tcurrentKey = ffjtjsonNativenosuchkey\n\t\t\t\tstate = fflib.FFParse_want_colon\n\t\t\t\tgoto mainparse\n\t\t\t}\n\n\t\tcase fflib.FFParse_want_colon:\n\t\t\tif tok != fflib.FFTok_colon {\n\t\t\t\twantedTok = fflib.FFTok_colon\n\t\t\t\tgoto wrongtokenerror\n\t\t\t}\n\t\t\tstate = fflib.FFParse_want_value\n\t\t\tcontinue\n\t\tcase fflib.FFParse_want_value:\n\n\t\t\tif tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {\n\t\t\t\tswitch currentKey {\n\n\t\t\t\tcase ffjtjsonNativeRequest:\n\t\t\t\t\tgoto handle_Request\n\n\t\t\t\tcase ffjtjsonNativeVer:\n\t\t\t\t\tgoto handle_Ver\n\n\t\t\t\tcase ffjtjsonNativeAPI:\n\t\t\t\t\tgoto handle_API\n\n\t\t\t\tcase ffjtjsonNativeBAttr:\n\t\t\t\t\tgoto handle_BAttr\n\n\t\t\t\tcase ffjtjsonNativeExt:\n\t\t\t\t\tgoto handle_Ext\n\n\t\t\t\tcase ffjtjsonNativeParsedRequest:\n\t\t\t\t\tgoto handle_ParsedRequest\n\n\t\t\t\tcase ffjtjsonNativenosuchkey:\n\t\t\t\t\terr = fs.SkipField(tok)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn fs.WrapErr(err)\n\t\t\t\t\t}\n\t\t\t\t\tstate = fflib.FFParse_after_value\n\t\t\t\t\tgoto mainparse\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tgoto wantedvalue\n\t\t\t}\n\t\t}\n\t}\n\nhandle_Request:\n\n\t/* handler: j.Request type=string kind=string quoted=false*/\n\n\t{\n\n\t\t{\n\t\t\tif tok != fflib.FFTok_string && tok != fflib.FFTok_null {\n\t\t\t\treturn fs.WrapErr(fmt.Errorf(\"cannot unmarshal %s into Go value for string\", tok))\n\t\t\t}\n\t\t}\n\n\t\tif tok == fflib.FFTok_null {\n\n\t\t} else {\n\n\t\t\toutBuf := fs.Output.Bytes()\n\n\t\t\tj.Request = string(string(outBuf))\n\n\t\t}\n\t}\n\n\tstate = fflib.FFParse_after_value\n\tgoto mainparse\n\nhandle_Ver:\n\n\t/* handler: j.Ver type=openrtb.StringOrNumber kind=string quoted=false*/\n\n\t{\n\t\tif tok == fflib.FFTok_null {\n\n\t\t} else {\n\n\t\t\ttbuf, err := fs.CaptureField(tok)\n\t\t\tif err != nil {\n\t\t\t\treturn fs.WrapErr(err)\n\t\t\t}\n\n\t\t\terr = j.Ver.UnmarshalJSON(tbuf)\n\t\t\tif err != nil {\n\t\t\t\treturn fs.WrapErr(err)\n\t\t\t}\n\t\t}\n\t\tstate = fflib.FFParse_after_value\n\t}\n\n\tstate = fflib.FFParse_after_value\n\tgoto mainparse\n\nhandle_API:\n\n\t/* handler: j.API type=[]int kind=slice quoted=false*/\n\n\t{\n\n\t\t{\n\t\t\tif tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {\n\t\t\t\treturn fs.WrapErr(fmt.Errorf(\"cannot unmarshal %s into Go value for \", tok))\n\t\t\t}\n\t\t}\n\n\t\tif tok == fflib.FFTok_null {\n\t\t\tj.API = nil\n\t\t} else {\n\n\t\t\tj.API = []int{}\n\n\t\t\twantVal := true\n\n\t\t\tfor {\n\n\t\t\t\tvar tmpJAPI int\n\n\t\t\t\ttok = fs.Scan()\n\t\t\t\tif tok == fflib.FFTok_error {\n\t\t\t\t\tgoto tokerror\n\t\t\t\t}\n\t\t\t\tif tok == fflib.FFTok_right_brace {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tif tok == fflib.FFTok_comma {\n\t\t\t\t\tif wantVal == true {\n\t\t\t\t\t\t// TODO(pquerna): this isn't an ideal error message, this handles\n\t\t\t\t\t\t// things like [,,,] as an array value.\n\t\t\t\t\t\treturn fs.WrapErr(fmt.Errorf(\"wanted value token, but got token: %v\", tok))\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t} else {\n\t\t\t\t\twantVal = true\n\t\t\t\t}\n\n\t\t\t\t/* handler: tmpJAPI type=int kind=int quoted=false*/\n\n\t\t\t\t{\n\t\t\t\t\tif tok != fflib.FFTok_integer && tok != fflib.FFTok_null {\n\t\t\t\t\t\treturn fs.WrapErr(fmt.Errorf(\"cannot unmarshal %s into Go value for int\", tok))\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t{\n\n\t\t\t\t\tif tok == fflib.FFTok_null {\n\n\t\t\t\t\t} else {\n\n\t\t\t\t\t\ttval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)\n\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn fs.WrapErr(err)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\ttmpJAPI = int(tval)\n\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tj.API = append(j.API, tmpJAPI)\n\n\t\t\t\twantVal = false\n\t\t\t}\n\t\t}\n\t}\n\n\tstate = fflib.FFParse_after_value\n\tgoto mainparse\n\nhandle_BAttr:\n\n\t/* handler: j.BAttr type=[]int kind=slice quoted=false*/\n\n\t{\n\n\t\t{\n\t\t\tif tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {\n\t\t\t\treturn fs.WrapErr(fmt.Errorf(\"cannot unmarshal %s into Go value for \", tok))\n\t\t\t}\n\t\t}\n\n\t\tif tok == fflib.FFTok_null {\n\t\t\tj.BAttr = nil\n\t\t} else {\n\n\t\t\tj.BAttr = []int{}\n\n\t\t\twantVal := true\n\n\t\t\tfor {\n\n\t\t\t\tvar tmpJBAttr int\n\n\t\t\t\ttok = fs.Scan()\n\t\t\t\tif tok == fflib.FFTok_error {\n\t\t\t\t\tgoto tokerror\n\t\t\t\t}\n\t\t\t\tif tok == fflib.FFTok_right_brace {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tif tok == fflib.FFTok_comma {\n\t\t\t\t\tif wantVal == true {\n\t\t\t\t\t\t// TODO(pquerna): this isn't an ideal error message, this handles\n\t\t\t\t\t\t// things like [,,,] as an array value.\n\t\t\t\t\t\treturn fs.WrapErr(fmt.Errorf(\"wanted value token, but got token: %v\", tok))\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t} else {\n\t\t\t\t\twantVal = true\n\t\t\t\t}\n\n\t\t\t\t/* handler: tmpJBAttr type=int kind=int quoted=false*/\n\n\t\t\t\t{\n\t\t\t\t\tif tok != fflib.FFTok_integer && tok != fflib.FFTok_null {\n\t\t\t\t\t\treturn fs.WrapErr(fmt.Errorf(\"cannot unmarshal %s into Go value for int\", tok))\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t{\n\n\t\t\t\t\tif tok == fflib.FFTok_null {\n\n\t\t\t\t\t} else {\n\n\t\t\t\t\t\ttval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)\n\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn fs.WrapErr(err)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\ttmpJBAttr = int(tval)\n\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tj.BAttr = append(j.BAttr, tmpJBAttr)\n\n\t\t\t\twantVal = false\n\t\t\t}\n\t\t}\n\t}\n\n\tstate = fflib.FFParse_after_value\n\tgoto mainparse\n\nhandle_Ext:\n\n\t/* handler: j.Ext type=json.RawMessage kind=slice quoted=false*/\n\n\t{\n\t\tif tok == fflib.FFTok_null {\n\n\t\t\tj.Ext = nil\n\n\t\t} else {\n\n\t\t\ttbuf, err := fs.CaptureField(tok)\n\t\t\tif err != nil {\n\t\t\t\treturn fs.WrapErr(err)\n\t\t\t}\n\n\t\t\tif j.Ext == nil {\n\t\t\t\tj.Ext = new(json.RawMessage)\n\t\t\t}\n\n\t\t\terr = j.Ext.UnmarshalJSON(tbuf)\n\t\t\tif err != nil {\n\t\t\t\treturn fs.WrapErr(err)\n\t\t\t}\n\t\t}\n\t\tstate = fflib.FFParse_after_value\n\t}\n\n\tstate = fflib.FFParse_after_value\n\tgoto mainparse\n\nhandle_ParsedRequest:\n\n\t/* handler: j.ParsedRequest type=openrtb.NativeRequest kind=struct quoted=false*/\n\n\t{\n\t\tif tok == fflib.FFTok_null {\n\n\t\t\tj.ParsedRequest = nil\n\n\t\t} else {\n\n\t\t\tif j.ParsedRequest == nil {\n\t\t\t\tj.ParsedRequest = new(NativeRequest)\n\t\t\t}\n\n\t\t\terr = j.ParsedRequest.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tstate = fflib.FFParse_after_value\n\t}\n\n\tstate = fflib.FFParse_after_value\n\tgoto mainparse\n\nwantedvalue:\n\treturn fs.WrapErr(fmt.Errorf(\"wanted value token, but got token: %v\", tok))\nwrongtokenerror:\n\treturn fs.WrapErr(fmt.Errorf(\"ffjson: wanted token: %v, but got token: %v output=%s\", wantedTok, tok, fs.Output.String()))\ntokerror:\n\tif fs.BigError != nil {\n\t\treturn fs.WrapErr(fs.BigError)\n\t}\n\terr = fs.Error.ToError()\n\tif err != nil {\n\t\treturn fs.WrapErr(err)\n\t}\n\tpanic(\"ffjson-generated: unreachable, please report bug.\")\ndone:\n\n\treturn nil\n}",
"func (e *snappyL2) Encode(dst *tokens, src []byte) {\n\tconst (\n\t\tinputMargin = 8 - 1\n\t\tminNonLiteralBlockSize = 1 + 1 + inputMargin\n\t)\n\n\t// Protect against e.cur wraparound.\n\tif e.cur > 1<<30 {\n\t\tfor i := range e.table[:] {\n\t\t\te.table[i] = tableEntry{}\n\t\t}\n\t\te.cur = maxStoreBlockSize\n\t}\n\n\t// This check isn't in the Snappy implementation, but there, the caller\n\t// instead of the callee handles this case.\n\tif len(src) < minNonLiteralBlockSize {\n\t\t// We do not fill the token table.\n\t\t// This will be picked up by caller.\n\t\tdst.n = uint16(len(src))\n\t\te.cur += maxStoreBlockSize\n\t\te.prev = e.prev[:0]\n\t\treturn\n\t}\n\n\t// sLimit is when to stop looking for offset/length copies. The inputMargin\n\t// lets us use a fast path for emitLiteral in the main loop, while we are\n\t// looking for copies.\n\tsLimit := int32(len(src) - inputMargin)\n\n\t// nextEmit is where in src the next emitLiteral should start from.\n\tnextEmit := int32(0)\n\ts := int32(0)\n\tcv := load3232(src, s)\n\tnextHash := hash(cv)\n\n\tfor {\n\t\t// Copied from the C++ snappy implementation:\n\t\t//\n\t\t// Heuristic match skipping: If 32 bytes are scanned with no matches\n\t\t// found, start looking only at every other byte. If 32 more bytes are\n\t\t// scanned (or skipped), look at every third byte, etc.. When a match\n\t\t// is found, immediately go back to looking at every byte. This is a\n\t\t// small loss (~5% performance, ~0.1% density) for compressible data\n\t\t// due to more bookkeeping, but for non-compressible data (such as\n\t\t// JPEG) it's a huge win since the compressor quickly \"realizes\" the\n\t\t// data is incompressible and doesn't bother looking for matches\n\t\t// everywhere.\n\t\t//\n\t\t// The \"skip\" variable keeps track of how many bytes there are since\n\t\t// the last match; dividing it by 32 (ie. right-shifting by five) gives\n\t\t// the number of bytes to move ahead for each iteration.\n\t\tskip := int32(32)\n\n\t\tnextS := s\n\t\tvar candidate tableEntry\n\t\tfor {\n\t\t\ts = nextS\n\t\t\tbytesBetweenHashLookups := skip >> 5\n\t\t\tnextS = s + bytesBetweenHashLookups\n\t\t\tskip += bytesBetweenHashLookups\n\t\t\tif nextS > sLimit {\n\t\t\t\tgoto emitRemainder\n\t\t\t}\n\t\t\tcandidate = e.table[nextHash&tableMask]\n\t\t\tnow := load3232(src, nextS)\n\t\t\te.table[nextHash&tableMask] = tableEntry{offset: s + e.cur, val: cv}\n\t\t\tnextHash = hash(now)\n\n\t\t\toffset := s - (candidate.offset - e.cur)\n\t\t\tif offset > maxMatchOffset || cv != candidate.val {\n\t\t\t\t// Out of range or not matched.\n\t\t\t\tcv = now\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t\t// A 4-byte match has been found. We'll later see if more than 4 bytes\n\t\t// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit\n\t\t// them as literal bytes.\n\t\temitLiteral(dst, src[nextEmit:s])\n\n\t\t// Call emitCopy, and then see if another emitCopy could be our next\n\t\t// move. Repeat until we find no match for the input immediately after\n\t\t// what was consumed by the last emitCopy call.\n\t\t//\n\t\t// If we exit this loop normally then we need to call emitLiteral next,\n\t\t// though we don't yet know how big the literal will be. We handle that\n\t\t// by proceeding to the next iteration of the main loop. We also can\n\t\t// exit this loop via goto if we get close to exhausting the input.\n\t\tfor {\n\t\t\t// Invariant: we have a 4-byte match at s, and no need to emit any\n\t\t\t// literal bytes prior to s.\n\n\t\t\t// Extend the 4-byte match as long as possible.\n\t\t\t//\n\t\t\ts += 4\n\t\t\tt := candidate.offset - e.cur + 4\n\t\t\tl := e.matchlen(s, t, src)\n\n\t\t\t// matchToken is flate's equivalent of Snappy's emitCopy. (length,offset)\n\t\t\tdst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset))\n\t\t\tdst.n++\n\t\t\ts += l\n\t\t\tnextEmit = s\n\t\t\tif s >= sLimit {\n\t\t\t\tt += l\n\t\t\t\t// Index first pair after match end.\n\t\t\t\tif int(t+4) < len(src) && t > 0 {\n\t\t\t\t\tcv := load3232(src, t)\n\t\t\t\t\te.table[hash(cv)&tableMask] = tableEntry{offset: t + e.cur, val: cv}\n\t\t\t\t}\n\t\t\t\tgoto emitRemainder\n\t\t\t}\n\n\t\t\t// We could immediately start working at s now, but to improve\n\t\t\t// compression we first update the hash table at s-1 and at s. If\n\t\t\t// another emitCopy is not our next move, also calculate nextHash\n\t\t\t// at s+1. At least on GOARCH=amd64, these three hash calculations\n\t\t\t// are faster as one load64 call (with some shifts) instead of\n\t\t\t// three load32 calls.\n\t\t\tx := load6432(src, s-1)\n\t\t\tprevHash := hash(uint32(x))\n\t\t\te.table[prevHash&tableMask] = tableEntry{offset: e.cur + s - 1, val: uint32(x)}\n\t\t\tx >>= 8\n\t\t\tcurrHash := hash(uint32(x))\n\t\t\tcandidate = e.table[currHash&tableMask]\n\t\t\te.table[currHash&tableMask] = tableEntry{offset: e.cur + s, val: uint32(x)}\n\n\t\t\toffset := s - (candidate.offset - e.cur)\n\t\t\tif offset > maxMatchOffset || uint32(x) != candidate.val {\n\t\t\t\tcv = uint32(x >> 8)\n\t\t\t\tnextHash = hash(cv)\n\t\t\t\ts++\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\nemitRemainder:\n\tif int(nextEmit) < len(src) {\n\t\temitLiteral(dst, src[nextEmit:])\n\t}\n\te.cur += int32(len(src))\n\te.prev = e.prev[:len(src)]\n\tcopy(e.prev, src)\n}",
"func (j *RunPacket) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {\n\tvar err error\n\tcurrentKey := ffjtRunPacketbase\n\t_ = currentKey\n\ttok := fflib.FFTok_init\n\twantedTok := fflib.FFTok_init\n\nmainparse:\n\tfor {\n\t\ttok = fs.Scan()\n\t\t//\tprintln(fmt.Sprintf(\"debug: tok: %v state: %v\", tok, state))\n\t\tif tok == fflib.FFTok_error {\n\t\t\tgoto tokerror\n\t\t}\n\n\t\tswitch state {\n\n\t\tcase fflib.FFParse_map_start:\n\t\t\tif tok != fflib.FFTok_left_bracket {\n\t\t\t\twantedTok = fflib.FFTok_left_bracket\n\t\t\t\tgoto wrongtokenerror\n\t\t\t}\n\t\t\tstate = fflib.FFParse_want_key\n\t\t\tcontinue\n\n\t\tcase fflib.FFParse_after_value:\n\t\t\tif tok == fflib.FFTok_comma {\n\t\t\t\tstate = fflib.FFParse_want_key\n\t\t\t} else if tok == fflib.FFTok_right_bracket {\n\t\t\t\tgoto done\n\t\t\t} else {\n\t\t\t\twantedTok = fflib.FFTok_comma\n\t\t\t\tgoto wrongtokenerror\n\t\t\t}\n\n\t\tcase fflib.FFParse_want_key:\n\t\t\t// json {} ended. goto exit. woo.\n\t\t\tif tok == fflib.FFTok_right_bracket {\n\t\t\t\tgoto done\n\t\t\t}\n\t\t\tif tok != fflib.FFTok_string {\n\t\t\t\twantedTok = fflib.FFTok_string\n\t\t\t\tgoto wrongtokenerror\n\t\t\t}\n\n\t\t\tkn := fs.Output.Bytes()\n\t\t\tif len(kn) <= 0 {\n\t\t\t\t// \"\" case. hrm.\n\t\t\t\tcurrentKey = ffjtRunPacketnosuchkey\n\t\t\t\tstate = fflib.FFParse_want_colon\n\t\t\t\tgoto mainparse\n\t\t\t} else {\n\t\t\t\tswitch kn[0] {\n\n\t\t\t\tcase 'I':\n\n\t\t\t\t\tif bytes.Equal(ffjKeyRunPacketID, kn) {\n\t\t\t\t\t\tcurrentKey = ffjtRunPacketID\n\t\t\t\t\t\tstate = fflib.FFParse_want_colon\n\t\t\t\t\t\tgoto mainparse\n\t\t\t\t\t}\n\n\t\t\t\tcase 'P':\n\n\t\t\t\t\tif bytes.Equal(ffjKeyRunPacketPlugin_Name, kn) {\n\t\t\t\t\t\tcurrentKey = ffjtRunPacketPlugin_Name\n\t\t\t\t\t\tstate = fflib.FFParse_want_colon\n\t\t\t\t\t\tgoto mainparse\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t\tif fflib.AsciiEqualFold(ffjKeyRunPacketPlugin_Name, kn) {\n\t\t\t\t\tcurrentKey = ffjtRunPacketPlugin_Name\n\t\t\t\t\tstate = fflib.FFParse_want_colon\n\t\t\t\t\tgoto mainparse\n\t\t\t\t}\n\n\t\t\t\tif fflib.SimpleLetterEqualFold(ffjKeyRunPacketID, kn) {\n\t\t\t\t\tcurrentKey = ffjtRunPacketID\n\t\t\t\t\tstate = fflib.FFParse_want_colon\n\t\t\t\t\tgoto mainparse\n\t\t\t\t}\n\n\t\t\t\tcurrentKey = ffjtRunPacketnosuchkey\n\t\t\t\tstate = fflib.FFParse_want_colon\n\t\t\t\tgoto mainparse\n\t\t\t}\n\n\t\tcase fflib.FFParse_want_colon:\n\t\t\tif tok != fflib.FFTok_colon {\n\t\t\t\twantedTok = fflib.FFTok_colon\n\t\t\t\tgoto wrongtokenerror\n\t\t\t}\n\t\t\tstate = fflib.FFParse_want_value\n\t\t\tcontinue\n\t\tcase fflib.FFParse_want_value:\n\n\t\t\tif tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {\n\t\t\t\tswitch currentKey {\n\n\t\t\t\tcase ffjtRunPacketID:\n\t\t\t\t\tgoto handle_ID\n\n\t\t\t\tcase ffjtRunPacketPlugin_Name:\n\t\t\t\t\tgoto handle_Plugin_Name\n\n\t\t\t\tcase ffjtRunPacketnosuchkey:\n\t\t\t\t\terr = fs.SkipField(tok)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn fs.WrapErr(err)\n\t\t\t\t\t}\n\t\t\t\t\tstate = fflib.FFParse_after_value\n\t\t\t\t\tgoto mainparse\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tgoto wantedvalue\n\t\t\t}\n\t\t}\n\t}\n\nhandle_ID:\n\n\t/* handler: j.ID type=uint32 kind=uint32 quoted=false*/\n\n\t{\n\t\tif tok != fflib.FFTok_integer && tok != fflib.FFTok_null {\n\t\t\treturn fs.WrapErr(fmt.Errorf(\"cannot unmarshal %s into Go value for uint32\", tok))\n\t\t}\n\t}\n\n\t{\n\n\t\tif tok == fflib.FFTok_null {\n\n\t\t} else {\n\n\t\t\ttval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32)\n\n\t\t\tif err != nil {\n\t\t\t\treturn fs.WrapErr(err)\n\t\t\t}\n\n\t\t\tj.ID = uint32(tval)\n\n\t\t}\n\t}\n\n\tstate = fflib.FFParse_after_value\n\tgoto mainparse\n\nhandle_Plugin_Name:\n\n\t/* handler: j.Plugin_Name type=string kind=string quoted=false*/\n\n\t{\n\n\t\t{\n\t\t\tif tok != fflib.FFTok_string && tok != fflib.FFTok_null {\n\t\t\t\treturn fs.WrapErr(fmt.Errorf(\"cannot unmarshal %s into Go value for string\", tok))\n\t\t\t}\n\t\t}\n\n\t\tif tok == fflib.FFTok_null {\n\n\t\t} else {\n\n\t\t\toutBuf := fs.Output.Bytes()\n\n\t\t\tj.Plugin_Name = string(string(outBuf))\n\n\t\t}\n\t}\n\n\tstate = fflib.FFParse_after_value\n\tgoto mainparse\n\nwantedvalue:\n\treturn fs.WrapErr(fmt.Errorf(\"wanted value token, but got token: %v\", tok))\nwrongtokenerror:\n\treturn fs.WrapErr(fmt.Errorf(\"ffjson: wanted token: %v, but got token: %v output=%s\", wantedTok, tok, fs.Output.String()))\ntokerror:\n\tif fs.BigError != nil {\n\t\treturn fs.WrapErr(fs.BigError)\n\t}\n\terr = fs.Error.ToError()\n\tif err != nil {\n\t\treturn fs.WrapErr(err)\n\t}\n\tpanic(\"ffjson-generated: unreachable, please report bug.\")\ndone:\n\n\treturn nil\n}",
"func (l *lexer) run() {\r\n\tfor l.state = lexAny(l); l.state != nil; {\r\n\t\tl.state = l.state(l)\r\n\t}\r\n\tclose(l.tokens)\r\n}",
"func NewLexer(input io.Reader, opts ...Option) *Lexer {\n\tlex := new(Lexer)\n\tfor _, opt := range opts {\n\t\topt(lex)\n\t}\n\n\tlex.Error = func(_ *Lexer, err error) {\n\t\tlog.Printf(`Lexer encountered the error \"%v\"`, err)\n\t}\n\tlex.scanner = scanner.NewScanner(input, lex.scannerOpts...)\n\treturn lex\n}",
"func (tkn *Tokenizer) Scan() (int, []byte) {\n\tif tkn.lastChar == 0 {\n\t\ttkn.next()\n\t}\n\ttkn.skipBlank()\n\n\tswitch ch := tkn.lastChar; {\n\tcase isLeadingLetter(ch):\n\t\treturn tkn.scanIdentifier()\n\tcase isDigit(ch):\n\t\treturn tkn.scanNumber(false)\n\tdefault:\n\t\ttkn.next()\n\t\tswitch ch {\n\t\tcase EOFChar:\n\t\t\treturn EOFChar, nil\n\t\tcase ':':\n\t\t\tif tkn.lastChar != '=' {\n\t\t\t\treturn tkn.scanBindVar()\n\t\t\t}\n\t\t\tfallthrough\n\t\tcase '=', ',', ';', '(', ')', '+', '*', '&', '|', '^', '~', '[', ']', '?':\n\t\t\treturn int(ch), []byte{byte(ch)}\n\t\tcase '.':\n\t\t\tif isDigit(tkn.lastChar) {\n\t\t\t\treturn tkn.scanNumber(true)\n\t\t\t}\n\t\t\treturn int(ch), []byte{byte(ch)}\n\t\tcase '/':\n\t\t\tswitch tkn.lastChar {\n\t\t\tcase '/':\n\t\t\t\ttkn.next()\n\t\t\t\treturn tkn.scanCommentType1(\"//\")\n\t\t\tcase '*':\n\t\t\t\ttkn.next()\n\t\t\t\treturn tkn.scanCommentType2()\n\t\t\tdefault:\n\t\t\t\treturn int(ch), []byte{byte(ch)}\n\t\t\t}\n\t\tcase '-':\n\t\t\tif tkn.lastChar == '-' {\n\t\t\t\ttkn.next()\n\t\t\t\treturn tkn.scanCommentType1(\"--\")\n\t\t\t}\n\t\t\treturn int(ch), []byte{byte(ch)}\n\t\tcase '#':\n\t\t\ttkn.next()\n\t\t\treturn tkn.scanCommentType1(\"#\")\n\t\tcase '<':\n\t\t\tswitch tkn.lastChar {\n\t\t\tcase '>':\n\t\t\t\ttkn.next()\n\t\t\t\treturn NE, []byte(\"<>\")\n\t\t\tcase '=':\n\t\t\t\ttkn.next()\n\t\t\t\tswitch tkn.lastChar {\n\t\t\t\tcase '>':\n\t\t\t\t\ttkn.next()\n\t\t\t\t\treturn NullSafeEqual, []byte(\"<=>\")\n\t\t\t\tdefault:\n\t\t\t\t\treturn LE, []byte(\"<=\")\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn int(ch), []byte{byte(ch)}\n\t\t\t}\n\t\tcase '>':\n\t\t\tif tkn.lastChar == '=' {\n\t\t\t\ttkn.next()\n\t\t\t\treturn GE, []byte(\">=\")\n\t\t\t}\n\t\t\treturn int(ch), []byte{byte(ch)}\n\t\tcase '!':\n\t\t\tif tkn.lastChar == '=' {\n\t\t\t\ttkn.next()\n\t\t\t\treturn NE, []byte(\"!=\")\n\t\t\t}\n\t\t\treturn LexError, []byte(\"!\")\n\t\tcase '\\'':\n\t\t\treturn tkn.scanString(ch, String)\n\t\tcase '`':\n\t\t\treturn tkn.scanLiteralIdentifier('`')\n\t\tcase '\"':\n\t\t\treturn tkn.scanLiteralIdentifier('\"')\n\t\tcase '%':\n\t\t\tif tkn.lastChar == '(' {\n\t\t\t\treturn tkn.scanVariableIdentifier('%')\n\t\t\t}\n\t\t\treturn tkn.scanFormatParameter('%')\n\t\tcase '$':\n\t\t\treturn tkn.scanPreparedStatement('$')\n\t\tcase '{':\n\t\t\treturn tkn.scanEscapeSequence('{')\n\t\tdefault:\n\t\t\treturn LexError, []byte{byte(ch)}\n\t\t}\n\t}\n}",
"func (s *javaTokenizer) Init(src []byte) {\n\ts.scanner = &scanner.Scanner{}\n\ts.scanner.Init(bytes.NewReader(src))\n\ts.errors = make([]string, 0)\n\ts.scanner.Error = func(scanner *scanner.Scanner, msg string) {\n\t\ts.errors = append(s.errors, msg)\n\t}\n}",
"func NewScanner(r *Runtime, b *bytes.Buffer) *Scanner {\n\tvar keywords = map[string]TokenType{\n\t\t\"and\": AndTT,\n\t\t\"class\": ClassTT,\n\t\t\"else\": ElseTT,\n\t\t\"elseif\": ElseifTT,\n\t\t\"false\": FalseTT,\n\t\t\"for\": ForTT,\n\t\t\"fun\": FunTT,\n\t\t\"if\": IfTT,\n\t\t\"include\": IncludeTT,\n\t\t\"nil\": NilTT,\n\t\t\"or\": OrTT,\n\t\t\"print\": PrintTT,\n\t\t\"return\": ReturnTT,\n\t\t\"super\": SuperTT,\n\t\t\"this\": ThisTT,\n\t\t\"true\": TrueTT,\n\t\t\"var\": VarTT,\n\t\t\"while\": WhileTT,\n\t}\n\n\treturn &Scanner{\n\t\tkeywords: keywords,\n\t\truntime: r,\n\t\tsource: b,\n\t\tsourceRunes: bytes.Runes(b.Bytes()),\n\t\ttokens: []*Token{},\n\t\tstart: 0,\n\t\tcurrent: 0,\n\t\tline: 1,\n\t}\n}",
"func New(input string) *Lexer {\n\tlexer := Lexer{input: input}\n\tlexer.consumeChar()\n\treturn &lexer\n}",
"func lexCode(t *Tokeniser) stateFunc {\n\tr, _ := t.currentRune()\n\tif stfn, ok := vectoredLexStateFuncs[r]; ok {\n\t\treturn stfn\n\t}\n\tswitch {\n\tcase isAlphaNumeric(r):\n\t\treturn lexIdentifier\n\tdefault:\n\t\tt.emitErrorf(\"unexpected character: %#U\", r)\n\t\treturn nil\n\t}\n}",
"func NewLexer(config *Config, rules Rules) (*RegexLexer, error) {\n\tif config == nil {\n\t\tconfig = &Config{}\n\t}\n\tif _, ok := rules[\"root\"]; !ok {\n\t\treturn nil, fmt.Errorf(\"no \\\"root\\\" state\")\n\t}\n\tcompiledRules := map[string][]*CompiledRule{}\n\tfor state, rules := range rules {\n\t\tcompiledRules[state] = nil\n\t\tfor _, rule := range rules {\n\t\t\tflags := \"\"\n\t\t\tif !config.NotMultiline {\n\t\t\t\tflags += \"m\"\n\t\t\t}\n\t\t\tif config.CaseInsensitive {\n\t\t\t\tflags += \"i\"\n\t\t\t}\n\t\t\tif config.DotAll {\n\t\t\t\tflags += \"s\"\n\t\t\t}\n\t\t\tcompiledRules[state] = append(compiledRules[state], &CompiledRule{Rule: rule, flags: flags})\n\t\t}\n\t}\n\treturn &RegexLexer{\n\t\tconfig: config,\n\t\trules: compiledRules,\n\t}, nil\n}",
"func New(input string) *Lexer {\n\tl := &Lexer{input: input} //instantiate and return its location\n\tl.readChar()\n\treturn l\n}",
"func execmDecoderToken(_ int, p *gop.Context) {\n\targs := p.GetArgs(1)\n\tret, ret1 := args[0].(*json.Decoder).Token()\n\tp.Ret(1, ret, ret1)\n}",
"func New(input string) *Lexer {\n\tl := &Lexer{input: input, runes: []rune(input)}\n\tl.readChar()\n\treturn l\n}",
"func NewFromBytes(input []byte) Scanner {\n\t// Append new line to files not ending with new line.\n\tappendNewLine := false\n\tlastNewLine := bytes.LastIndexByte(input, '\\n')\n\tfor _, r := range string(input[lastNewLine+1:]) {\n\t\tif !strings.ContainsRune(whitespace, r) {\n\t\t\t// Non-whitespace character located after the last new line character.\n\t\t\tappendNewLine = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif appendNewLine {\n\t\tinput = append(input, '\\n')\n\t}\n\n\treturn lexer.NewLexer(input)\n}",
"func (e *snappyL4) Encode(dst *tokens, src []byte) {\n\tconst (\n\t\tinputMargin = 8 - 3\n\t\tminNonLiteralBlockSize = 1 + 1 + inputMargin\n\t\tmatchLenGood = 12\n\t)\n\n\t// Protect against e.cur wraparound.\n\tif e.cur > 1<<30 {\n\t\tfor i := range e.table[:] {\n\t\t\te.table[i] = tableEntryPrev{}\n\t\t}\n\t\te.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]}\n\t}\n\n\t// This check isn't in the Snappy implementation, but there, the caller\n\t// instead of the callee handles this case.\n\tif len(src) < minNonLiteralBlockSize {\n\t\t// We do not fill the token table.\n\t\t// This will be picked up by caller.\n\t\tdst.n = uint16(len(src))\n\t\te.cur += maxStoreBlockSize\n\t\te.prev = e.prev[:0]\n\t\treturn\n\t}\n\n\t// sLimit is when to stop looking for offset/length copies. The inputMargin\n\t// lets us use a fast path for emitLiteral in the main loop, while we are\n\t// looking for copies.\n\tsLimit := int32(len(src) - inputMargin)\n\n\t// nextEmit is where in src the next emitLiteral should start from.\n\tnextEmit := int32(0)\n\ts := int32(0)\n\tcv := load3232(src, s)\n\tnextHash := hash(cv)\n\n\tfor {\n\t\t// Copied from the C++ snappy implementation:\n\t\t//\n\t\t// Heuristic match skipping: If 32 bytes are scanned with no matches\n\t\t// found, start looking only at every other byte. If 32 more bytes are\n\t\t// scanned (or skipped), look at every third byte, etc.. When a match\n\t\t// is found, immediately go back to looking at every byte. This is a\n\t\t// small loss (~5% performance, ~0.1% density) for compressible data\n\t\t// due to more bookkeeping, but for non-compressible data (such as\n\t\t// JPEG) it's a huge win since the compressor quickly \"realizes\" the\n\t\t// data is incompressible and doesn't bother looking for matches\n\t\t// everywhere.\n\t\t//\n\t\t// The \"skip\" variable keeps track of how many bytes there are since\n\t\t// the last match; dividing it by 32 (ie. right-shifting by five) gives\n\t\t// the number of bytes to move ahead for each iteration.\n\t\tskip := int32(32)\n\n\t\tnextS := s\n\t\tvar candidate tableEntry\n\t\tvar candidateAlt tableEntry\n\t\tfor {\n\t\t\ts = nextS\n\t\t\tbytesBetweenHashLookups := skip >> 5\n\t\t\tnextS = s + bytesBetweenHashLookups\n\t\t\tskip += bytesBetweenHashLookups\n\t\t\tif nextS > sLimit {\n\t\t\t\tgoto emitRemainder\n\t\t\t}\n\t\t\tcandidates := e.table[nextHash&tableMask]\n\t\t\tnow := load3232(src, nextS)\n\t\t\te.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}}\n\t\t\tnextHash = hash(now)\n\n\t\t\t// Check both candidates\n\t\t\tcandidate = candidates.Cur\n\t\t\tif cv == candidate.val {\n\t\t\t\toffset := s - (candidate.offset - e.cur)\n\t\t\t\tif offset < maxMatchOffset {\n\t\t\t\t\toffset = s - (candidates.Prev.offset - e.cur)\n\t\t\t\t\tif cv == candidates.Prev.val && offset < maxMatchOffset {\n\t\t\t\t\t\tcandidateAlt = candidates.Prev\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// We only check if value mismatches.\n\t\t\t\t// Offset will always be invalid in other cases.\n\t\t\t\tcandidate = candidates.Prev\n\t\t\t\tif cv == candidate.val {\n\t\t\t\t\toffset := s - (candidate.offset - e.cur)\n\t\t\t\t\tif offset < maxMatchOffset {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tcv = now\n\t\t}\n\n\t\t// A 4-byte match has been found. We'll later see if more than 4 bytes\n\t\t// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit\n\t\t// them as literal bytes.\n\t\temitLiteral(dst, src[nextEmit:s])\n\n\t\t// Call emitCopy, and then see if another emitCopy could be our next\n\t\t// move. Repeat until we find no match for the input immediately after\n\t\t// what was consumed by the last emitCopy call.\n\t\t//\n\t\t// If we exit this loop normally then we need to call emitLiteral next,\n\t\t// though we don't yet know how big the literal will be. We handle that\n\t\t// by proceeding to the next iteration of the main loop. We also can\n\t\t// exit this loop via goto if we get close to exhausting the input.\n\t\tfor {\n\t\t\t// Invariant: we have a 4-byte match at s, and no need to emit any\n\t\t\t// literal bytes prior to s.\n\n\t\t\t// Extend the 4-byte match as long as possible.\n\t\t\t//\n\t\t\ts += 4\n\t\t\tt := candidate.offset - e.cur + 4\n\t\t\tl := e.matchlen(s, t, src)\n\t\t\t// Try alternative candidate if match length < matchLenGood.\n\t\t\tif l < matchLenGood-4 && candidateAlt.offset != 0 {\n\t\t\t\tt2 := candidateAlt.offset - e.cur + 4\n\t\t\t\tl2 := e.matchlen(s, t2, src)\n\t\t\t\tif l2 > l {\n\t\t\t\t\tl = l2\n\t\t\t\t\tt = t2\n\t\t\t\t}\n\t\t\t}\n\t\t\t// matchToken is flate's equivalent of Snappy's emitCopy. (length,offset)\n\t\t\tdst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset))\n\t\t\tdst.n++\n\t\t\ts += l\n\t\t\tnextEmit = s\n\t\t\tif s >= sLimit {\n\t\t\t\tt += l\n\t\t\t\t// Index first pair after match end.\n\t\t\t\tif int(t+4) < len(src) && t > 0 {\n\t\t\t\t\tcv := load3232(src, t)\n\t\t\t\t\tnextHash = hash(cv)\n\t\t\t\t\te.table[nextHash&tableMask] = tableEntryPrev{\n\t\t\t\t\t\tPrev: e.table[nextHash&tableMask].Cur,\n\t\t\t\t\t\tCur: tableEntry{offset: e.cur + t, val: cv},\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tgoto emitRemainder\n\t\t\t}\n\n\t\t\t// We could immediately start working at s now, but to improve\n\t\t\t// compression we first update the hash table at s-3 to s. If\n\t\t\t// another emitCopy is not our next move, also calculate nextHash\n\t\t\t// at s+1. At least on GOARCH=amd64, these three hash calculations\n\t\t\t// are faster as one load64 call (with some shifts) instead of\n\t\t\t// three load32 calls.\n\t\t\tx := load6432(src, s-3)\n\t\t\tprevHash := hash(uint32(x))\n\t\t\te.table[prevHash&tableMask] = tableEntryPrev{\n\t\t\t\tPrev: e.table[prevHash&tableMask].Cur,\n\t\t\t\tCur: tableEntry{offset: e.cur + s - 3, val: uint32(x)},\n\t\t\t}\n\t\t\tx >>= 8\n\t\t\tprevHash = hash(uint32(x))\n\n\t\t\te.table[prevHash&tableMask] = tableEntryPrev{\n\t\t\t\tPrev: e.table[prevHash&tableMask].Cur,\n\t\t\t\tCur: tableEntry{offset: e.cur + s - 2, val: uint32(x)},\n\t\t\t}\n\t\t\tx >>= 8\n\t\t\tprevHash = hash(uint32(x))\n\n\t\t\te.table[prevHash&tableMask] = tableEntryPrev{\n\t\t\t\tPrev: e.table[prevHash&tableMask].Cur,\n\t\t\t\tCur: tableEntry{offset: e.cur + s - 1, val: uint32(x)},\n\t\t\t}\n\t\t\tx >>= 8\n\t\t\tcurrHash := hash(uint32(x))\n\t\t\tcandidates := e.table[currHash&tableMask]\n\t\t\tcv = uint32(x)\n\t\t\te.table[currHash&tableMask] = tableEntryPrev{\n\t\t\t\tPrev: candidates.Cur,\n\t\t\t\tCur: tableEntry{offset: s + e.cur, val: cv},\n\t\t\t}\n\n\t\t\t// Check both candidates\n\t\t\tcandidate = candidates.Cur\n\t\t\tcandidateAlt = tableEntry{}\n\t\t\tif cv == candidate.val {\n\t\t\t\toffset := s - (candidate.offset - e.cur)\n\t\t\t\tif offset <= maxMatchOffset {\n\t\t\t\t\toffset = s - (candidates.Prev.offset - e.cur)\n\t\t\t\t\tif cv == candidates.Prev.val && offset <= maxMatchOffset {\n\t\t\t\t\t\tcandidateAlt = candidates.Prev\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// We only check if value mismatches.\n\t\t\t\t// Offset will always be invalid in other cases.\n\t\t\t\tcandidate = candidates.Prev\n\t\t\t\tif cv == candidate.val {\n\t\t\t\t\toffset := s - (candidate.offset - e.cur)\n\t\t\t\t\tif offset <= maxMatchOffset {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tcv = uint32(x >> 8)\n\t\t\tnextHash = hash(cv)\n\t\t\ts++\n\t\t\tbreak\n\t\t}\n\t}\n\nemitRemainder:\n\tif int(nextEmit) < len(src) {\n\t\temitLiteral(dst, src[nextEmit:])\n\t}\n\te.cur += int32(len(src))\n\te.prev = e.prev[:len(src)]\n\tcopy(e.prev, src)\n}",
"func New(reader *reader.Reader) *Lexer {\n\tl := &Lexer{\n\t\treader: reader,\n\t\trow: 1,\n\t\tcol: 1,\n\t\trewinded: false,\n\t\tsymbol: &Symbol{},\n\t}\n\n\t// List of valid keywords.\n\tl.symbol.Insert(\"true\", token.BOOLEAN)\n\tl.symbol.Insert(\"false\", token.BOOLEAN)\n\tl.symbol.Insert(\"nil\", token.NIL)\n\tl.symbol.Insert(\"let\", token.LET)\n\tl.symbol.Insert(\"var\", token.VAR)\n\tl.symbol.Insert(\"func\", token.FUNCTION)\n\tl.symbol.Insert(\"do\", token.DO)\n\tl.symbol.Insert(\"end\", token.END)\n\tl.symbol.Insert(\"if\", token.IF)\n\tl.symbol.Insert(\"else\", token.ELSE)\n\tl.symbol.Insert(\"for\", token.FOR)\n\tl.symbol.Insert(\"in\", token.IN)\n\tl.symbol.Insert(\"is\", token.IS)\n\tl.symbol.Insert(\"as\", token.AS)\n\tl.symbol.Insert(\"return\", token.RETURN)\n\tl.symbol.Insert(\"then\", token.THEN)\n\tl.symbol.Insert(\"switch\", token.SWITCH)\n\tl.symbol.Insert(\"case\", token.CASE)\n\tl.symbol.Insert(\"default\", token.DEFAULT)\n\tl.symbol.Insert(\"break\", token.BREAK)\n\tl.symbol.Insert(\"continue\", token.CONTINUE)\n\tl.symbol.Insert(\"module\", token.MODULE)\n\tl.symbol.Insert(\"import\", token.IMPORT)\n\n\t// Move to the first token.\n\tl.advance()\n\n\treturn l\n}",
"func New(input string) *Lexer {\n\tl := &Lexer{input: input}\n\tl.readChar() // initialize\n\treturn l\n}",
"func (lex *Lexer) Next() {\n\tdefer func() {\n\t\tif lex.debug {\n\t\t\t_, file, line, ok := runtime.Caller(2)\n\t\t\tif ok {\n\t\t\t\tlog.Printf(\n\t\t\t\t\t\"[DEBUG] Text=[%s], Token=[%v], Pos=[%s] called from %s:%d\\n\",\n\t\t\t\t\tlex.Text,\n\t\t\t\t\tlex.Token,\n\t\t\t\t\tlex.Pos,\n\t\t\t\t\tfilepath.Base(file),\n\t\t\t\t\tline,\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\t}()\n\n\tvar err error\n\tlex.Token, lex.Text, lex.Pos, err = lex.scanner.Scan()\n\tlex.RawText = lex.scanner.LastScanRaw()\n\tif err != nil {\n\t\tlex.scanErr = err\n\t\tlex.Error(lex, err)\n\t}\n}",
"func New(input string) *Lexer {\n\tlexer := Lexer{input: input}\n\tlexer.readChar()\n\treturn &lexer\n}",
"func (lexer *MidiLexer) Lex() error {\n\tif lexer.callback == nil {\n\t\treturn NoCallback\n\t}\n\n\tif lexer.input == nil {\n\t\treturn NoReadSeeker\n\t}\n\n\tvar finished bool = false\n\tvar err error\n\n\tfor {\n\t\tfinished, err = lexer.next()\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif finished == true {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (l *lexer) reset() {\n\tl.line -= strings.Count(l.input[l.start:l.pos], \"\\n\")\n\tl.pos = l.start\n}",
"func Start(in io.Reader, out io.Writer) {\n\tscanner := bufio.NewScanner(in)\n\n\tfor {\n\t\tfmt.Printf(\"\\ndigijan $ \" + os.Getenv(\"USER\") + \" \")\n\t\tscanned := scanner.Scan()\n\t\tif !scanned {\n\t\t\treturn\n\t\t}\n\n\t\tline := scanner.Text()\n\t\ttokenizer := lexer.New(line)\n\n\t\tfor tokenized := tokenizer.NextToken(); tokenized.Type != token.EOF; tokenized = tokenizer.NextToken() {\n\t\t\tfmt.Printf(\"%+v\\n\", tokenized)\n\t\t}\n\t}\n}",
"func New(input string) *Lexer {\n\tl := &Lexer{input: input, charNo: -1, lineNo: 0}\n\n\tl.readChar() // read the first character\n\treturn l\n}",
"func New(input string) *Lexer {\n\tl := &Lexer{input: input}\n\tl.readChar()\n\tl.line = 1\n\treturn l\n}",
"func Lex(opts *common.Options, s common.Scanner) (common.Lexer, error) {\n\t// Construct the scanner\n\tif s == nil {\n\t\tvar err error\n\t\ts, err = scanner.Scan(opts)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Construct the lexer object\n\tl := &lexer{\n\t\ts: s,\n\t\topts: opts,\n\t}\n\n\t// Push the starting column onto the indent stack\n\tl.indent.PushBack(1)\n\n\treturn l, nil\n}",
"func NewFromBytes(input []byte) Scanner {\n\ttoks := lexer.ParseString(string(input))\n\treturn &scanner{toks: toks}\n}"
] | [
"0.58732367",
"0.5784889",
"0.5703706",
"0.55805975",
"0.5456216",
"0.53958595",
"0.538605",
"0.53396213",
"0.53048724",
"0.5303696",
"0.52453625",
"0.52387947",
"0.5211122",
"0.520678",
"0.51975566",
"0.5183511",
"0.5175322",
"0.51680154",
"0.51622397",
"0.5149477",
"0.5146944",
"0.5146944",
"0.5134534",
"0.51309824",
"0.5127324",
"0.51139545",
"0.50985444",
"0.50963736",
"0.5095037",
"0.50945705",
"0.50945014",
"0.5091691",
"0.50706524",
"0.5066678",
"0.5056605",
"0.5054752",
"0.50378513",
"0.50290024",
"0.50263375",
"0.50239366",
"0.5018795",
"0.5015509",
"0.49831435",
"0.49797025",
"0.49621856",
"0.49602303",
"0.49582314",
"0.49564528",
"0.49531785",
"0.49464676",
"0.49463227",
"0.4945855",
"0.4945855",
"0.49385315",
"0.49339578",
"0.4918596",
"0.49141756",
"0.49141756",
"0.49009934",
"0.4895762",
"0.4886355",
"0.4882947",
"0.48724633",
"0.48278567",
"0.48263028",
"0.4817594",
"0.48129877",
"0.48115999",
"0.4786526",
"0.4780448",
"0.4778593",
"0.4766173",
"0.47656965",
"0.47454408",
"0.47176877",
"0.47103745",
"0.47100562",
"0.4706188",
"0.47051737",
"0.4698479",
"0.46960923",
"0.46927375",
"0.4692092",
"0.46920037",
"0.46916732",
"0.4690116",
"0.46878847",
"0.46783537",
"0.46737015",
"0.46730232",
"0.46672505",
"0.46557656",
"0.4653465",
"0.46354204",
"0.46332029",
"0.46279535",
"0.46251196",
"0.4619343",
"0.46155906",
"0.46083802"
] | 0.69410497 | 0 |
Test_Add_Read adds random entries to a testdb then tries to read those entries | func Test_Add_Read(t *testing.T) {
var test_shorthand = make([]byte, 20)
var test_fullpath = make([]byte, 20)
prio := -1
rand.Read(test_shorthand)
rand.Read(test_fullpath)
short := make_printable(test_shorthand)
full := make_printable(test_fullpath)
e := Entry{
shorthand: short,
full_path: full,
prio: prio,
extra: "TESTING",
}
AddEntry(&e)
res := GetShort(short)
if len(res) < 1{
t.Fail()
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func readTestData(t *testing.T, r FileSetReader, shard uint32, timestamp time.Time, entries []testEntry) {\n\tfor _, underTest := range readTestTypes {\n\t\terr := r.Open(testNs1ID, 0, timestamp)\n\t\trequire.NoError(t, err)\n\n\t\trequire.Equal(t, len(entries), r.Entries())\n\t\trequire.Equal(t, 0, r.EntriesRead())\n\n\t\tbloomFilter, err := r.ReadBloomFilter()\n\t\tassert.NoError(t, err)\n\t\t// Make sure the bloom filter doesn't always return true\n\t\tassert.False(t, bloomFilter.Test([]byte(\"some_random_data\")))\n\t\texpectedM, expectedK := bloom.EstimateFalsePositiveRate(\n\t\t\tuint(len(entries)), defaultIndexBloomFilterFalsePositivePercent)\n\t\tassert.Equal(t, expectedK, bloomFilter.K())\n\t\t// EstimateFalsePositiveRate always returns at least 1, so skip this check\n\t\t// if len entries is 0\n\t\tif len(entries) > 0 {\n\t\t\tassert.Equal(t, expectedM, bloomFilter.M())\n\t\t}\n\n\t\tfor i := 0; i < r.Entries(); i++ {\n\t\t\tswitch underTest {\n\t\t\tcase readTestTypeData:\n\t\t\t\tid, data, checksum, err := r.Read()\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tdata.IncRef()\n\n\t\t\t\tassert.Equal(t, entries[i].id, id.String())\n\t\t\t\tassert.True(t, bytes.Equal(entries[i].data, data.Get()))\n\t\t\t\tassert.Equal(t, digest.Checksum(entries[i].data), checksum)\n\n\t\t\t\tassert.Equal(t, i+1, r.EntriesRead())\n\n\t\t\t\t// Verify that the bloomFilter was bootstrapped properly by making sure it\n\t\t\t\t// at least contains every ID\n\t\t\t\tassert.True(t, bloomFilter.Test(id.Data().Get()))\n\n\t\t\t\tid.Finalize()\n\t\t\t\tdata.DecRef()\n\t\t\t\tdata.Finalize()\n\t\t\tcase readTestTypeMetadata:\n\t\t\t\tid, length, checksum, err := r.ReadMetadata()\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tassert.True(t, id.Equal(id))\n\t\t\t\tassert.Equal(t, digest.Checksum(entries[i].data), checksum)\n\t\t\t\tassert.Equal(t, len(entries[i].data), length)\n\n\t\t\t\tassert.Equal(t, i+1, r.MetadataRead())\n\n\t\t\t\t// Verify that the bloomFilter was bootstrapped properly by making sure it\n\t\t\t\t// at least contains every ID\n\t\t\t\tassert.True(t, bloomFilter.Test(id.Data().Get()))\n\n\t\t\t\tid.Finalize()\n\t\t\t}\n\t\t}\n\n\t\trequire.NoError(t, r.Close())\n\t}\n}",
"func TestRead(t *testing.T) {\r\n\tpersonsFiltered, err := Read(client, firstname)\r\n\tif err != nil {\r\n\t\tt.Fatalf(\"Failed read test :%s\", err)\r\n\t}\r\n\r\n\tfor _, value := range *personsFiltered {\r\n\t\tif value.ID == _id {\r\n\t\t\tt.Log(\"person exists :\", value.ID)\r\n\t\t\tbreak\r\n\t\t} else {\r\n\t\t\tt.Fatalf(\"Failed read test. \")\r\n\t\t}\r\n\t}\r\n}",
"func getDbTest() {\n\tvar index int\n\tvar data string\n\tvar newTest test\n\tlistTest = allTest{}\n\n\trows, err := database.Query(\"SELECT * FROM Test\")\n\tif err != nil {\n\t\tfmt.Println(\"Error running query\")\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tfor rows.Next() {\n\t\trows.Scan(&index, &data)\n\t\tfmt.Printf(strconv.Itoa(index)+\" %s\", data)\n\t\tfmt.Printf(\"\\n\")\n\t\tnewTest.ID = index\n\t\tnewTest.Nombre = data\n\t\tlistTest = append(listTest, newTest)\n\t}\n\tdefer rows.Close()\n}",
"func TestBasic(t *testing.T){\r\n\tif !TESTBASIC{\r\n\t\treturn\r\n\t}\r\n rafts,_ := makeRafts(5, \"input_spec.json\", \"log\", 220, 300)\t\r\n\tcontents := make([]string, 2)\r\n\tcontents[0] = \"foo\"\r\n\tcontents[1] = \"bar\"\r\n\t//To get one node elected as Leader\r\n\ttime.Sleep(2*time.Second)\r\n\trafts[0].Append([]byte(contents[0]))\r\n\trafts[0].Append([]byte(contents[1]))\r\n\tciarr := []int{0,0,0,0,0}\r\n\tfor cnt:=0;cnt<5;{\r\n\t\tfor idx, node := range rafts {\r\n\t\t\tselect {\r\n\t\t\tcase ci := <-node.CommitChannel():\r\n\t\t\t\tif ci.Err != nil {\r\n\t\t\t\t\tfmt.Fprintln(os.Stderr,ci.Err)\r\n\t\t\t\t}\r\n\t\t\t\texpect(t,contents[ciarr[idx]], string(ci.Data))\r\n\t\t\t\tciarr[idx] += 1\r\n\t\t\t\tif ciarr[idx] == 2{\r\n\t\t\t\t\tcnt += 1\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\tfor _, node := range rafts{\r\n\t\t//Tests LogStore actions\r\n\t\tnode.mainLogLock.RLock()\r\n\t\tdefer node.mainLogLock.RUnlock()\r\n\t\tiface, err := node.mainLog.Get(0)\r\n\t\tcheckError(t, err,fmt.Sprintf(\"NodeId:%v, mainLog.get(0) mainLog.LastIndex:%v\", node.Id(), node.mainLog.GetLastIndex()))\r\n\t\tif iface != nil{\r\n\t\t\tfoo := iface.([]byte)\r\n\t\t\texpect(t, string(foo), \"foo\")\r\n\t\t\tdebugRaftTest(fmt.Sprintf(\"0:%v\", string(foo)))\r\n\t\t}\r\n\t\tiface, err = node.mainLog.Get(1) \r\n\t\tcheckError(t, err, fmt.Sprintf(\"NodeId:%v, mainLog.get(1) mainLog.LastIndex:%v\", node.Id(), node.mainLog.GetLastIndex()))\r\n\t\tif iface != nil{\r\n\t\t\tbar := iface.([]byte)\r\n\t\t\texpect(t, string(bar), \"bar\")\r\n\t\t\tdebugRaftTest(fmt.Sprintf(\"1:%v\", string(bar)))\r\n\t\t}\r\n\r\n\t\t//Tests StateStore actions\r\n\t\tnode.stateLogLock.RLock()\r\n\t\tdefer node.stateLogLock.RUnlock()\r\n\t\tnode.smLock.RLock()\r\n\t\tdefer node.smLock.RUnlock()\r\n\t\tiface, err = node.stateLog.Get(0) \r\n\t\tcheckError(t, err, fmt.Sprintf(\"Id:%v, stateLog.get(0)\", node.Id()))\r\n\t\tif iface != nil{\r\n\t\t\tstate := iface.(StateInfo)\r\n\t\t\texpect(t, fmt.Sprintf(\"%v\", state.CurrTerm), fmt.Sprintf(\"%v\", node.sm.currTerm))\r\n\t\t\texpect(t, fmt.Sprintf(\"%v\", state.VotedFor), fmt.Sprintf(\"%v\", node.sm.votedFor))\r\n\t\t\texpect(t, state.Log.String(), node.sm.log.String())\r\n\t\t}\r\n\t}\r\n\r\n\tfor _, node := range rafts {\r\n\t\tnode.Shutdown()\r\n\t}\r\n\ttime.Sleep(1*time.Second)\t\t\t\r\n}",
"func TestIntCreateTwoPersonsAndReadBack(t *testing.T) {\n\tlog.SetPrefix(\"TestCreatePersonAndReadBack\")\n\t// Create a dao containing a session\n\tdbsession, err := dbsession.MakeGorpMysqlDBSession()\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t}\n\tdefer dbsession.Close()\n\n\tdao := MakeRepo(dbsession)\n\n\tclearDown(dao, t)\n\n\t//Create two people\n\tp1 := personModel.MakeInitialisedPerson(0, expectedForename1, expectedSurname1)\n\tperson1, err := dao.Create(p1)\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t}\n\tlog.Printf(\"person1 %s\", person1.String())\n\tp2 := personModel.MakeInitialisedPerson(0, expectedForename2, expectedSurname2)\n\tperson2, err := dao.Create(p2)\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t}\n\n\t// read all the people in the DB - expect just the two we created\n\tpeople, err := dao.FindAll()\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t}\n\n\tif len(people) != 2 {\n\t\tt.Errorf(\"expected 2 rows, actual %d\", len(people))\n\t}\n\n\tmatches := 0\n\tfor _, person := range people {\n\t\tswitch person.Forename() {\n\t\tcase expectedForename1:\n\t\t\tif person.Surname() == expectedSurname1 {\n\t\t\t\tmatches++\n\t\t\t} else {\n\t\t\t\tt.Errorf(\"expected surname to be %s actually %s\", expectedSurname1, person.Surname())\n\t\t\t}\n\t\tcase expectedForename2:\n\t\t\tif person.Surname() == expectedSurname2 {\n\t\t\t\tmatches++\n\t\t\t} else {\n\t\t\t\tt.Errorf(\"expected forename to be %s actually %s\", expectedForename2, person.Forename())\n\t\t\t}\n\t\tdefault:\n\t\t\tt.Errorf(\"unexpected forename - %s\", person.Forename())\n\t\t}\n\t}\n\n\t// We should have just the records we created\n\tif matches != 2 {\n\t\tt.Errorf(\"expected two matches, actual %d\", matches)\n\t}\n\n\t// Find each of the records by ID and check the fields\n\tlog.Printf(\"finding person %d\", person1.ID())\n\tperson1Returned, err := dao.FindByID(person1.ID())\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t}\n\tif person1Returned.Forename() != expectedForename1 {\n\t\tt.Errorf(\"expected forename to be %s actually %s\",\n\t\t\texpectedForename1, person1Returned.Forename())\n\t}\n\tif person1Returned.Surname() != expectedSurname1 {\n\t\tt.Errorf(\"expected surname to be %s actually %s\",\n\t\t\texpectedSurname1, person1Returned.Surname())\n\t}\n\n\tvar IDStr = strconv.FormatUint(person2.ID(), 10)\n\tperson2Returned, err := dao.FindByIDStr(IDStr)\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t}\n\n\tlog.Printf(\"found person %s\", person2Returned.String())\n\n\tif person2Returned.Forename() != expectedForename2 {\n\t\tt.Errorf(\"expected forename to be %s actually %s\",\n\t\t\texpectedForename2, person2Returned.Forename())\n\t}\n\tif person2Returned.Surname() != expectedSurname2 {\n\t\tt.Errorf(\"expected surname to be %s actually %s\",\n\t\t\texpectedSurname2, person2Returned.Surname())\n\t}\n\n\tclearDown(dao, t)\n}",
"func InitTestDB() error {\n\tif db == nil {\n\t\treturn errors.New(\"database not initialized\")\n\t}\n\n\tfor i := 0; i < 1000; i++ {\n\t\tkey := intToByteArray(i)\n\t\tvalue := GetByteArray(\"hello from \"+strconv.Itoa(i), \"string\")\n\t\terr := Insert(key, value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func Test_Read(t *testing.T) {\n\tctx := context.Background()\n\tdatabase, err := db.ConnectDB(\"\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tProjectService := NewProjectServiceServer(database)\n\treq := &v1.ReadRequest{\n\t\tApi: apiVersion,\n\t\tId: 2,\n\t}\n\tres, _ := ProjectService.Read(ctx, req)\n\tfmt.Println(res)\n\tt.Log(\"Done\")\n\n}",
"func readsNWrites(t *testing.T, blobs BlobAdmin) {\n\tfor _, testCase := range testData {\n\t\texpectedKey := toKeyOrDie(t, testCase.expectedHash)\n\t\t// 1 read must fail\n\t\t_, err := blobs.Read(expectedKey)\n\t\tassert(err != nil, t, \"Reading %s should had failed!\", testCase.expectedHash)\n\t\tassert(!strings.Contains(err.Error(), \"bytes long hash key\"), t,\n\t\t\t\"Error type when reading %s:%v\", testCase.expectedHash, err)\n\t\t// 2 write must succeed and key must match\n\t\tkey, err := blobs.Write(strings.NewReader(testCase.input))\n\t\tassert(err == nil, t, \"Error writing blob %s:%s\", testCase.expectedHash, err)\n\t\tassert(key.Equals(expectedKey), t, \"Expected blob key to be %s but got %s\", testCase.expectedHash, key)\n\t\t// 3 read must now succeed\n\t\treader, err := blobs.Read(key)\n\t\tassert(err == nil, t, \"Error fetching %s: %v\", key, err)\n\t\tblobBytes, err := ioutil.ReadAll(reader)\n\t\tassert(err == nil, t, \"Error reading %s: %v\", key, err)\n\t\tassert(bytes.Compare(blobBytes, []byte(testCase.input)) == 0, t,\n\t\t\t\"Expected to read '%s' but got '%s'\", testCase.input, blobBytes)\n\t\t// 4 writing again must succeed and key must match all over again\n\t\tkey, err = blobs.Write(strings.NewReader(testCase.input))\n\t\tassert(err == nil, t, \"Error writing blob %s:%s\", testCase.expectedHash, err)\n\t\tassert(key.Equals(expectedKey), t, \"Expected blob key to be %s but got %s\", testCase.expectedHash, key)\n\t\t// 5 remove must succeed\n\t\terr = blobs.Remove(key)\n\t\tassert(err == nil, t, \"Error removing %s: %v\", key, err)\n\t\t// 6 read must now fail\n\t\terr = blobs.Remove(key)\n\t\tassert(err == nil, t, \"Error removing %s: %v\", key, err)\n\t}\n}",
"func loadTestData(filename string) (*mockstore.DB, error) {\n\tinput, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tparsed, err := parser.ParseInsert(parser.InsertFormatTSV, string(input))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdb := mockstore.NewDB()\n\tnextKID := db.SkipIndex()*logread.MaxOffset + 1\n\tresolveOrCreateXID := func(xid string) uint64 {\n\t\tkid := db.Current().ResolveXID(xid)\n\t\tif kid != 0 {\n\t\t\treturn kid\n\t\t}\n\t\tkid = nextKID\n\t\tnextKID++\n\t\tdb.AddSPO(kid, wellknown.HasExternalID, rpc.AString(xid, 0))\n\t\treturn kid\n\t}\n\tfor _, line := range parsed.Facts {\n\t\tspo, err := makeFact(line, resolveOrCreateXID)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Invalid test data line %s: %v\", line, err)\n\t\t}\n\t\tdb.AddSPO(spo.Subject, spo.Predicate, spo.Object)\n\t}\n\treturn db, nil\n}",
"func initTests() int {\n\trows, err := db.Query(\"SELECT idTEST, categories, conditions, params, period, scoreMap, custORacct FROM TXTEST\")\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t// defer rows.Close()\n\ti := 0\n\tfor rows.Next() {\n\t\ttest := new(TxTest)\n\t\terr := rows.Scan(&test.TName, &test.CategoryStr, &test.Conditions, &test.ParamStr, &test.PeriodStr, &test.ScoreMapStr, &test.CustOrAcct)\n\t\tif err != nil {\n\t\t\trows.Close()\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\ttest.Params, test.QueryType = parseParams(strings.Split(test.ParamStr, \",\"), test.CustOrAcct)\n\t\ttest.Period = *parsePeriod(test.PeriodStr)\n\t\ttest.ScoreMap = parseScoreMap(test.ScoreMapStr)\n\n\t\ttxTestCache[test.TName] = test\n\t\ti++\n\t\tfmt.Printf(\"\\ntest %s: %+v\", txTestCache[test.TName].TName, txTestCache[test.TName])\n\t}\n\trows.Close()\n\t//\treturn custs, err\n\treturn i\n}",
"func TestRetrieve(t *testing.T) {\n\t// drop the debug database\n\ttable, err := coreDatabase.TableConnect(true, \"perk\", []string{\"name\", \"brand\", \"value\", \"created\", \"expiry\"})\n\tif err != nil {\n\t\tt.Error(\"TestRetrieve Connection Error: \" + err.Error())\n\t\treturn\n\t}\n\t_ = table.DropCollection()\n\t// Insert test Items\n\tfor i := 0; i < 4; i++ {\n\t\tdateCreated := time.Date(2018, time.Month(i), 4, 0, 0, 0, 0, time.UTC)\n\t\tdateExpiry := time.Date(2018+i, time.Month(i+2), 4, 0, 0, 0, 0, time.UTC)\n\t\titem := bson.M{\"name\": \"Tesco\" + strconv.Itoa(i),\n\t\t\t\"brand\": \"Tesco\",\n\t\t\t\"value\": 1 + i*2,\n\t\t\t\"created\": dateCreated,\n\t\t\t\"expiry\": dateExpiry,\n\t\t}\n\t\terr = table.Insert(&item)\n\t}\n\t// query: filter by name\n\tFullList, err := Retrieve(\"Tesco0\", false, time.Time{}, table)\n\tif err != nil {\n\t\tt.Error(\"TestRetrieve Retrieve Error: \" + err.Error())\n\t\treturn\n\t}\n\tcompareItem := Item{Name: \"Tesco0\", Brand: \"Tesco\", Value: 1,\n\t\tCreated: time.Date(2018, 0, 4, 0, 0, 0, 0, time.UTC),\n\t\tExpiry: time.Date(2018, 2, 4, 0, 0, 0, 0, time.UTC),\n\t}\n\tif FullList[0] != compareItem {\n\t\tt.Error(\"TestRetrieve Retrieve check Item Error\")\n\t}\n\t// query: only active offers\n\tlayout := \"2006-01-02T15:04:05.000Z\"\n\tstr := \"2019-05-25T11:45:26.371Z\"\n\tstartTime, _ := time.Parse(layout, str)\n\tFullList, err = Retrieve(\"*\", true, startTime, table)\n\tif err != nil {\n\t\tt.Error(\"TestRetrieve Retrieve Active Error: \" + err.Error())\n\t\treturn\n\t}\n\tif len(FullList) != 2 {\n\t\tt.Error(\"TestRetrieve Retrieve check Active List\")\n\t}\n\n\t// query: all the offers\n\tFullList, err = Retrieve(\"*\", false, time.Time{}, table)\n\tif err != nil {\n\t\tt.Error(\"TestRetrieve Retrieve Active Error: \" + err.Error())\n\t\treturn\n\t}\n\tif len(FullList) != 4 {\n\t\tt.Error(\"TestRetrieve Retrieve All List\")\n\t}\n\t// cleanup\n\t_ = table.DropCollection()\n}",
"func TestWriteErrorWhenReader(t *testing.T) {\n\t// Ensure the db exists for this test\n\tdb, err := Open(db_filename, \"c\")\n\tdb.Close()\n\n\tdb, err = Open(db_filename, \"r\")\n\tdefer db.Close()\n\tdefer os.Remove(db_filename)\n\n\tif err != nil {\n\t\tt.Error(\"Couldn't read database\")\n\t}\n\n\terr = db.Insert(\"foo\", \"bar\")\n\tif err == nil {\n\t\tt.Error(\"Database let readonly client write\")\n\t}\n}",
"func TestReadUser(t *testing.T) {\r\n/////////////////////////////////// MOCKING ////////////////////////////////////////////\r\n\tvar batches = []string{\r\n\t\t`CREATE TABLE Users (Id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, Name TEXT NOT NULL UNIQUE);`,\r\n\t\t`INSERT INTO Users (Id,Name) VALUES (1,'anonymous');`,\r\n\t}\r\n\t//open pseudo database for function\r\n\tdb, err := sql.Open(\"ramsql\", \"TestReadUser\")\r\n\tif err != nil {\r\n\t\tt.Fatalf(\"Error creating mock sql : %s\\n\", err)\r\n\t}\r\n\tdefer db.Close()\r\n\r\n\t// Exec every line of batch and create database\r\n\tfor _, b := range batches {\r\n\t\t_, err = db.Exec(b)\r\n\t\tif err != nil {\r\n\t\t\tt.Fatalf(\"Error exec query in query: %s\\n Error:%s\", b, err)\r\n\t\t}\r\n\t}\r\n/////////////////////////////////// MOCKING ////////////////////////////////////////////\r\n\r\n\t// Specify test variables and expected results.\r\n\ttests := []struct {\r\n\t\tid int\r\n\t\t// we need to use models.User for passing to object.This is different with \"database.User\".\r\n\t\tresult models.User\r\n\t\terr error\r\n\t}{\r\n\t\t// When give to first parameter(id) 1 , We expect result :1 error nil\r\n\t\t{id: 1, result: models.User{Id: 1, Name: \"anonymous\"}, err: nil},\r\n\t\t// When give to first parameter(id) 1 , We expect result :1 error nil\r\n\t\t//{id: 2, result: models.User{Id: 2, Name: \"test\"}, err: nil},\r\n\t}\r\n\r\n\t// test all of the variables.\r\n\tfor _, test := range tests {\r\n\t\t//get result after test.\r\n\t\ts, err := u.ReadUser(db, test.id)\r\n\t\t// if expected error type nil we need to compare with actual error different way.\r\n\t\tif test.err == nil {\r\n\t\t\t// If test fails give error.It checks expected result and expected error\r\n\t\t\tif err != test.err || s != test.result {\r\n\t\t\t\t// Compare expected error and actual error\r\n\t\t\t\tt.Errorf(\"Error is: %v . Expected: %v\", err, test.err)\r\n\t\t\t\t// Compare expected result and actual result\r\n\t\t\t\tt.Errorf(\"Result is: %v . Expected: %v\", s, test.result)\r\n\t\t\t}\r\n\t\t\t// if expected error type is not nil we need to compare with actual error different way.\r\n\t\t} else {\r\n\t\t\tif err.Error() != test.err.Error() || s != test.result {\r\n\t\t\t\t// Compare expected error and actual error\r\n\t\t\t\tt.Errorf(\"Error is: %v . Expected: %v\", err, test.err)\r\n\t\t\t\t// Compare expected result and actual result\r\n\t\t\t\tt.Errorf(\"Result is: %v . Expected: %v\", s, test.result)\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n}",
"func Read(id_to_read int32, dbMgr *mgr.DBConn) SimpleDbType {\n\tdb := dbMgr.Open()\n\ttheReturn := SimpleDbType{}\n\n\tsqlRead := fmt.Sprintf(\"select id, name, number from test_table where id = %d\", id_to_read)\n\n\trows, err := db.Query(sqlRead)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\terr := rows.Scan(&theReturn.Id, &theReturn.Name, &theReturn.Number)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\terr = rows.Err()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdbMgr.Close()\n\treturn theReturn\n}",
"func main() {\n conn, err := net.Dial(\"tcp\", defaultHost + \":\" + strconv.Itoa(defaultPort))\n if err != nil {\n fmt.Printf(\"Dial error\\n\")\n return\n }\n\n reader := bufio.NewReader(conn)\n testPut(conn, reader, \"foo\", \"bar\")\n testGet(conn, reader, \"foo\")\n\n testPut(conn, reader, \"foo\", \"sun\")\n testGet(conn, reader, \"foo\")\n\n testPut(conn, reader, \"foo\", \"sweet\")\n testGet(conn, reader, \"foo\")\n\n testDelete(conn, reader, \"foo\")\n testGet(conn, reader, \"foo\")\n\n conn.Close()\n}",
"func TestAppendFew(t *testing.T) {\n const basePath = \"../../../_test/durable/tx_few\"\n os.RemoveAll(basePath)\n nt := setup()\n\n tl := CreateTransactionLogger(basePath)\n numTestEntries := 198\n\n tl.assertValid()\n\n for i := 0; i < numTestEntries; i++ {\n tx := &testTx{int64(i), nt()}\n tl.Append(tx)\n tl.assertValid()\n }\n\n expectNFilesAtPath(t, 2, basePath)\n expectNFilePairs(t, 1, tl)\n expectNTransactions(t, int64(numTestEntries), tl)\n tl.Close()\n}",
"func TestReadExistingAndNewLogs(t *testing.T) {\n\tt.Parallel()\n\toperator, logReceived, tempDir := newTestFileOperator(t, nil)\n\n\t// Start with a file with an entry in it, and expect that entry\n\t// to come through when we poll for the first time\n\ttemp := openTemp(t, tempDir)\n\twriteString(t, temp, \"testlog1\\n\")\n\n\trequire.NoError(t, operator.Start(testutil.NewMockPersister(\"test\")))\n\tdefer func() {\n\t\trequire.NoError(t, operator.Stop())\n\t}()\n\n\twaitForMessage(t, logReceived, \"testlog1\")\n\n\t// Write a second entry, and expect that entry to come through\n\t// as well\n\twriteString(t, temp, \"testlog2\\n\")\n\twaitForMessage(t, logReceived, \"testlog2\")\n}",
"func TestDbLog_GetList(t *testing.T) {\n\thandler := debug.NewLocalDb()\n\tdb := newDbLog(handler)\n\t_, err := db.Add(0, \"success\",\"\", 0, \"\", time.GetDayTime())\n\tif err == nil {\n\t\tt.Errorf(\"Add check cronId fail\")\n\t\treturn\n\t}\n\tid, err := db.Add(1, \"success\",\"123\", 1000, \"hello\", time.GetDayTime())\n\tif err != nil {\n\t\tt.Errorf(\"Add fail, error=[%v]\", err)\n\t\treturn\n\t}\n\trows, num, _, _, err := db.GetList(1, 0, 0)\n\tif err != nil || num <= 0 {\n\t\tt.Errorf(\"Get GetList, error=[%v], num=[%v]\", err, num)\n\t\treturn\n\t}\n\tvar row *LogEntity = nil\n\tfor _, r := range rows {\n\t\tif r.Id == id {\n\t\t\trow = r\n\t\t}\n\t}\n\tif row == nil {\n\t\tt.Errorf(\"GetList fail\")\n\t\treturn\n\t}\n\tif row.Id <= 0 || row.CronId != 1 || row.Output != \"123\"||\n\t\trow.UseTime != 1000 || row.Remark != \"hello\" {\n\t\tt.Errorf(\"Add check rows fail\")\n\t\treturn\n\t}\n\tdb.Delete(row.Id)\n}",
"func TestDbLog_Get(t *testing.T) {\n\thandler := debug.NewLocalDb()\n\tdb := newDbLog(handler)\n\t_, err := db.Add(0, \"success\", \"\", 0, \"\", time.GetDayTime())\n\tif err == nil {\n\t\tt.Errorf(\"Add check cronId fail\")\n\t\treturn\n\t}\n\tid, err := db.Add(1, \"success\", \"123\", 1000, \"hello\", time.GetDayTime())\n\tif err != nil {\n\t\tt.Errorf(\"Add fail, error=[%v]\", err)\n\t\treturn\n\t}\n\trow, err := db.Get(id)\n\tif err != nil {\n\t\tt.Errorf(\"Get fail, error=[%v]\", err)\n\t\treturn\n\t}\n\tif row.Id <= 0 || row.CronId != 1 || row.Output != \"123\"||\n\t\trow.UseTime != 1000 || row.Remark != \"hello\" {\n\t\tt.Errorf(\"Add check rows fail\")\n\t\treturn\n\t}\n\tdb.Delete(row.Id)\n}",
"func TestDB(t *testing.T) {\n\ta, err := db.Archive(\"data/efd.tar.gz\")\n\tassert.NoError(t, err)\n\n\tadd, err := db.Directory(\"data\")\n\tassert.NoError(t, err)\n\n\tm := db.Merge(a, add)\n\n\td, err := db.Read(m)\n\tassert.NoError(t, err)\n\n\tfor _, f := range efd.All {\n\t\texpect := d.Formulae[f.ID]\n\t\tif !reflect.DeepEqual(expect, f) {\n\t\t\tt.Fatalf(\"mismatch formula %q\", f.ID)\n\t\t}\n\t}\n}",
"func TestSaveAndRead(t *testing.T) {\n\tcrypter := &meowCrypter{}\n\tmetadata, entries, snapshot := makeWALData(1, 1)\n\n\tc := NewWALFactory(crypter, crypter)\n\ttempdir := createWithWAL(t, c, metadata, snapshot, entries)\n\tdefer os.RemoveAll(tempdir)\n\n\twrapped, err := c.Open(tempdir, snapshot)\n\trequire.NoError(t, err)\n\n\tmeta, _, ents, err := wrapped.ReadAll()\n\trequire.NoError(t, wrapped.Close())\n\trequire.NoError(t, err)\n\trequire.Equal(t, metadata, meta)\n\trequire.Equal(t, entries, ents)\n}",
"func TestSimple(t *testing.T) {\n\tt.Logf(\"Running simple table tests\")\n\tdb, err = DialUnix(TEST_SOCK, TEST_USER, TEST_PASSWD, TEST_DBNAME)\n\tif err != nil {\n\t\tt.Logf(\"Error %s\", err)\n\t\tt.Fail()\n\t}\n\t\n\tt.Logf(\"Create table\")\n\terr = db.Query(CREATE_SIMPLE)\n\tif err != nil {\n\t\tt.Logf(\"Error %s\", err)\n\t\tt.Fail()\n\t}\n\t\n\tt.Logf(\"Insert 1000 records\")\n\trowMap := make(map[uint64][]string)\n\tfor i := 0; i < 1000; i++ {\n\t\tnum, str1, str2 := rand.Int(), randString(32), randString(128)\n\t\terr = db.Query(fmt.Sprintf(INSERT_SIMPLE, num, str1, str2))\n\t\tif err != nil {\n\t\t\tt.Logf(\"Error %s\", err)\n\t\t\tt.Fail()\n\t\t}\n\t\trow := []string{fmt.Sprintf(\"%d\", num), str1, str2}\n\t\trowMap[db.LastInsertId] = row\n\t}\n\t\n\tt.Logf(\"Select inserted data\")\n\terr = db.Query(SELECT_SIMPLE)\n\tif err != nil {\n\t\tt.Logf(\"Error %s\", err)\n\t\tt.Fail()\n\t}\n\t\n\tt.Logf(\"Use result\")\n\tres, err := db.UseResult()\n\tif err != nil {\n\t\tt.Logf(\"Error %s\", err)\n\t\tt.Fail()\n\t}\n\t\n\tt.Logf(\"Validate inserted data\")\n\tfor {\n\t\trow := res.FetchRow()\n\t\tif row == nil {\n\t\t\tbreak\n\t\t}\n\t\tid := row[0].(uint64)\n\t\tnum, str1, str2 := strconv.Itoa64(row[1].(int64)), row[2].(string), string(row[3].([]byte))\n\t\tif rowMap[id][0] != num || rowMap[id][1] != str1 || rowMap[id][2] != str2 {\n\t\t\tt.Logf(\"String from database doesn't match local string\")\n\t\t\tt.Fail()\n\t\t}\n\t}\n\t\n\tt.Logf(\"Free result\")\n\terr = res.Free()\n\tif err != nil {\n\t\tt.Logf(\"Error %s\", err)\n\t\tt.Fail()\n\t}\n\t\n\tt.Logf(\"Update some records\")\n\tfor i := uint64(0); i < 1000; i += 5 {\n\t\trowMap[i+1][2] = randString(256)\n\t\terr = db.Query(fmt.Sprintf(UPDATE_SIMPLE, rowMap[i+1][2], i+1))\n\t\tif err != nil {\n\t\t\tt.Logf(\"Error %s\", err)\n\t\t\tt.Fail()\n\t\t}\n\t\tif db.AffectedRows != 1 {\n\t\t\tt.Logf(\"Expected 1 effected row but got %d\", db.AffectedRows)\n\t\t\tt.Fail()\n\t\t}\n\t}\n\t\n\tt.Logf(\"Select updated data\")\n\terr = db.Query(SELECT_SIMPLE)\n\tif err != nil {\n\t\tt.Logf(\"Error %s\", err)\n\t\tt.Fail()\n\t}\n\t\n\tt.Logf(\"Store result\")\n\tres, err = db.StoreResult()\n\tif err != nil {\n\t\tt.Logf(\"Error %s\", err)\n\t\tt.Fail()\n\t}\n\t\n\tt.Logf(\"Validate updated data\")\n\tfor {\n\t\trow := res.FetchRow()\n\t\tif row == nil {\n\t\t\tbreak\n\t\t}\n\t\tid := row[0].(uint64)\n\t\tnum, str1, str2 := strconv.Itoa64(row[1].(int64)), row[2].(string), string(row[3].([]byte))\n\t\tif rowMap[id][0] != num || rowMap[id][1] != str1 || rowMap[id][2] != str2 {\n\t\t\tt.Logf(\"%#v %#v\", rowMap[id], row)\n\t\t\tt.Logf(\"String from database doesn't match local string\")\n\t\t\tt.Fail()\n\t\t}\n\t}\n\t\n\tt.Logf(\"Free result\")\n\terr = res.Free()\n\tif err != nil {\n\t\tt.Logf(\"Error %s\", err)\n\t\tt.Fail()\n\t}\n\n\tt.Logf(\"Drop table\")\n\terr = db.Query(DROP_SIMPLE)\n\tif err != nil {\n\t\tt.Logf(\"Error %s\", err)\n\t\tt.Fail()\n\t}\n\t\n\tt.Logf(\"Close connection\")\n\terr = db.Close()\n\tif err != nil {\n\t\tt.Logf(\"Error %s\", err)\n\t\tt.Fail()\n\t}\n}",
"func TestAzureDBFSPutRead(t *testing.T) {\n\tfilepath := \"/dbfstest/test.txt\"\n\tputReq := httpmodels.PutReq{\n\t\tPath: filepath,\n\t\tContents: \"VGhpcyBpcyB0aGUgcHV0IHRlc3QuIA==\", //'this is a put test'\n\t\tOverwrite: true,\n\t}\n\n\te := c.Dbfs().Put(putReq)\n\tassert.Nil(t, e, \"Could not put data to the file.\")\n\n\treadReq := httpmodels.ReadReq{\n\t\tPath: filepath,\n\t}\n\n\tresp, e := c.Dbfs().Read(readReq)\n\tassert.Nil(t, e, \"Could not read the file\")\n\tassert.NotEqual(t, resp.BytesRead, 0)\n\tassert.Equal(t, resp.Data, putReq.Contents)\n}",
"func TestIntCreatePersonStoreFetchBackAndCheckContents(t *testing.T) {\n\tlog.SetPrefix(\"TestIntegrationCreatePersonAndCheckContents\")\n\t// Create a dao containing a session\n\tdbsession, err := dbsession.MakeGorpMysqlDBSession()\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t}\n\tdefer dbsession.Close()\n\n\tdao := MakeRepo(dbsession)\n\n\tclearDown(dao, t)\n\n\tp := personModel.MakeInitialisedPerson(0, expectedForename1, expectedSurname1)\n\n\t// Store the person in the DB\n\tperson, err := dao.Create(p)\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t}\n\tlog.Printf(\"created person %s\\n\", person.String())\n\n\tretrievedPerson, err := dao.FindByID(person.ID())\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t}\n\tlog.Printf(\"retrieved person %s\\n\", retrievedPerson.String())\n\n\tif retrievedPerson.ID() != person.ID() {\n\t\tt.Errorf(\"expected ID to be %d actually %d\", person.ID(),\n\t\t\tretrievedPerson.ID())\n\t}\n\tif retrievedPerson.Forename() != expectedForename1 {\n\t\tt.Errorf(\"expected forename to be %s actually %s\", expectedForename1,\n\t\t\tretrievedPerson.Forename())\n\t}\n\tif retrievedPerson.Surname() != expectedSurname1 {\n\t\tt.Errorf(\"expected surname to be %s actually %s\", expectedSurname1,\n\t\t\tretrievedPerson.Surname())\n\t}\n\n\t// Delete person and check response\n\tid := person.ID()\n\trows, err := dao.DeleteByID(person.ID())\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t}\n\tif rows != 1 {\n\t\tt.Errorf(\"expected delete to return 1, actual %d\", rows)\n\t}\n\tlog.Printf(\"deleted person with ID %d\\n\", id)\n\tclearDown(dao, t)\n}",
"func TestReadOnecorrectID(t *testing.T) {\n\tdb := DBSession()\n\tdefer db.Close() // clean up when we’re done\n\tSetupData(db)\n\n\ta := assert.New(t)\n\trouter := mux.NewRouter()\n\trouter.HandleFunc(\"/cb_service/contact_book/{id}\", http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {\n\t\t// save it in the request context\n\t\tctx := context.WithValue(req.Context(), \"database\", db)\n\t\treq.Header.Set(\"Content-Type\", contentType)\n\t\treq.Header.Set(\"Authorization\", encodedAuthToken)\n\t\treq = req.WithContext(ctx)\n\t\treadOneH(res, req)\n\t}))\n\n\tserver := httptest.NewServer(router)\n\tdefer server.Close()\n\treqURL := server.URL + \"/cb_service/contact_book/\" + contactBookID\n\tres, err := http.Get(reqURL)\n\tif err != nil {\n\t\tl.Printf(\"Cannot Make Request :%v \", err)\n\t\ta.Error(err)\n\t}\n\n\ta.Equal(res.StatusCode, http.StatusOK)\n\tClearData(db)\n}",
"func TestQueueCommitRead(t *testing.T) {\n\tdb, err := newDB()\n\tif err != nil {\n\t\tt.Fatalf(\"sql.Open(): %v\", err)\n\t}\n\tdefer db.Close()\n\tfactory := testutil.NewFakeFactory(db)\n\n\tm, err := New(ctx, db, \"test\", factory)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create SQL history: %v\", err)\n\t}\n\tfor i, index := range [][]byte{\n\t\tdh(\"0000000000000000000000000000000000000000000000000000000000000000\"),\n\t\tdh(\"F000000000000000000000000000000000000000000000000000000000000000\"),\n\t\tdh(\"2000000000000000000000000000000000000000000000000000000000000000\"),\n\t\tdh(\"C000000000000000000000000000000000000000000000000000000000000000\"),\n\t} {\n\t\tdata := []byte{byte(i)}\n\t\ttxn, err := factory.NewDBTxn(ctx)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"factory.NewDBTxn() failed: %v\", err)\n\t\t\tcontinue\n\t\t}\n\t\tif err := m.QueueLeaf(txn, index, data); err != nil {\n\t\t\tt.Errorf(\"WriteLeaf(%v, %v)=%v\", index, data, err)\n\t\t\tcontinue\n\t\t}\n\t\tif err := txn.Commit(); err != nil {\n\t\t\tt.Errorf(\"txn.Commit() failed: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tepoch, err := m.Commit(ctx)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Commit()=[_, %v], want [_, nil]\", err)\n\t\t}\n\t\ttxn, err = factory.NewDBTxn(ctx)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"factory.NewDBTxn() failed: %v\", err)\n\t\t\tcontinue\n\t\t}\n\t\treadData, err := m.ReadLeafAt(txn, index, epoch)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"ReadLeafAt(%v, %v)=%v)\", epoch, index, err)\n\t\t}\n\t\tif err := txn.Commit(); err != nil {\n\t\t\tt.Errorf(\"txn.Commit() failed: %v\", err)\n\t\t\tcontinue\n\t\t}\n\t\tif got, want := readData, data; !bytes.Equal(got, want) {\n\t\t\tt.Errorf(\"ReadLeafAt(%v, %v)=%v, want %v\", epoch, index, got, want)\n\t\t}\n\t}\n}",
"func TestAPIGetAll() error {\n\ttestRead := testCase{\n\t\tinput: \"\",\n\t\texpected: `[{\"FirstName\":\"Alec\", \"LastName\":\"Perro\", \"Age\":5}]`,\n\t}\n\n query, err := dm.Read(1)\n if err != nil {\n log.Fatal(err)\n }\n\n\tjsonify, err := json.Marshal(query)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif testRead.expected != string(jsonify) {\n\t\treturn errors.New(\"testDB failed\")\n\t}\n\n\tfmt.Println(\"Tests passed\")\n\treturn nil\n}",
"func Test_Basic(t *testing.T) {\n\ttt := TT{t}\n\n\ttt.ForeachDB(\"+nothing\", func(db *sql.DB) {\n\t\ttt.MustResult(db.Exec(`INSERT INTO knowledge VALUES (5, 'chaos')`)).RowsAffected()\n\n\t\taffected, err := tt.MustResult(db.Exec(`UPDATE knowledge SET thing = $1 WHERE number = $2`, \"douglas\", \"42\")).RowsAffected()\n\t\ttt.Must(err)\n\t\tif affected != 1 {\n\t\t\ttt.Unexpected(\"affected\", 1, affected)\n\t\t}\n\n\t\ttt.MustResult(db.Exec(`DELETE FROM knowledge WHERE thing = 'conspiracy'`))\n\n\t\trows := tt.MustRows(db.Query(`SELECT * FROM knowledge ORDER BY number`))\n\t\ttt.ExpectRow(rows, 5, \"chaos\")\n\t\ttt.ExpectRow(rows, 42, \"douglas\")\n\t\tif rows.Next() {\n\t\t\tt.Fatalf(\"unexpected continuation of result set\")\n\t\t}\n\t\ttt.Must(rows.Close())\n\t})\n\n\ttt.CleanupDB()\n}",
"func StorageListTest(app *Server, t *testing.T, testData string) {\n\tapp.Storage.Clear()\n\tmodData := testData + testData\n\tkey, err := app.Storage.Set(\"test/123\", testData)\n\trequire.NoError(t, err)\n\trequire.Equal(t, \"123\", key)\n\tkey, err = app.Storage.Set(\"test/456\", modData)\n\trequire.NoError(t, err)\n\trequire.Equal(t, \"456\", key)\n\tdata, err := app.Storage.Get(\"test/*\")\n\trequire.NoError(t, err)\n\tvar testObjects []objects.Object\n\terr = json.Unmarshal(data, &testObjects)\n\trequire.NoError(t, err)\n\trequire.Equal(t, 2, len(testObjects))\n\tfor i := range testObjects {\n\t\tif testObjects[i].Index == \"123\" {\n\t\t\trequire.Equal(t, testData, testObjects[i].Data)\n\t\t}\n\n\t\tif testObjects[i].Index == \"456\" {\n\t\t\trequire.Equal(t, modData, testObjects[i].Data)\n\t\t}\n\t}\n\tdata1, err := app.Storage.Get(\"test/123\")\n\trequire.NoError(t, err)\n\tdata2, err := app.Storage.Get(\"test/456\")\n\trequire.NoError(t, err)\n\tobj1, err := objects.DecodeRaw(data1)\n\trequire.NoError(t, err)\n\tobj2, err := objects.DecodeRaw(data2)\n\trequire.NoError(t, err)\n\trequire.Equal(t, testData, obj1.Data)\n\trequire.Equal(t, modData, obj2.Data)\n\tkeys, err := app.Storage.Keys()\n\trequire.NoError(t, err)\n\trequire.Equal(t, \"{\\\"keys\\\":[\\\"test/123\\\",\\\"test/456\\\"]}\", string(keys))\n\n\treq := httptest.NewRequest(\n\t\t\"POST\", \"/test/*\",\n\t\tbytes.NewBuffer(\n\t\t\t[]byte(`{\"data\":\"testpost\"}`),\n\t\t),\n\t)\n\tw := httptest.NewRecorder()\n\tapp.Router.ServeHTTP(w, req)\n\tresp := w.Result()\n\trequire.Equal(t, http.StatusOK, resp.StatusCode)\n\tbody, err := io.ReadAll(resp.Body)\n\trequire.NoError(t, err)\n\tdat, err := objects.DecodeRaw(body)\n\trequire.NoError(t, err)\n\tdata, err = app.Storage.Get(\"test/*\")\n\tapp.Console.Log(string(data))\n\trequire.NoError(t, err)\n\terr = json.Unmarshal(data, &testObjects)\n\trequire.NoError(t, err)\n\trequire.Equal(t, 3, len(testObjects))\n\terr = app.Storage.Del(\"test/\" + dat.Index)\n\trequire.NoError(t, err)\n\tdata, err = app.Storage.Get(\"test/*\")\n\trequire.NoError(t, err)\n\terr = json.Unmarshal(data, &testObjects)\n\trequire.NoError(t, err)\n\trequire.Equal(t, 2, len(testObjects))\n\tkey, err = app.Storage.Set(\"test/glob1/glob123\", testData)\n\trequire.NoError(t, err)\n\trequire.Equal(t, \"glob123\", key)\n\tkey, err = app.Storage.Set(\"test/glob2/glob456\", modData)\n\trequire.NoError(t, err)\n\trequire.Equal(t, \"glob456\", key)\n\tdata, err = app.Storage.Get(\"test/*/*\")\n\trequire.NoError(t, err)\n\terr = json.Unmarshal(data, &testObjects)\n\tapp.Console.Log(testObjects)\n\trequire.NoError(t, err)\n\trequire.Equal(t, 2, len(testObjects))\n\tkey, err = app.Storage.Set(\"test/1/glob/g123\", testData)\n\trequire.NoError(t, err)\n\trequire.Equal(t, \"g123\", key)\n\tkey, err = app.Storage.Set(\"test/2/glob/g456\", modData)\n\trequire.NoError(t, err)\n\trequire.Equal(t, \"g456\", key)\n\tdata, err = app.Storage.Get(\"test/*/glob/*\")\n\trequire.NoError(t, err)\n\terr = json.Unmarshal(data, &testObjects)\n\tapp.Console.Log(testObjects)\n\trequire.NoError(t, err)\n\trequire.Equal(t, 2, len(testObjects))\n\tkey, err = app.Storage.Set(\"test1\", testData)\n\trequire.NoError(t, err)\n\trequire.Equal(t, \"test1\", key)\n\tkey, err = app.Storage.Set(\"test2\", modData)\n\trequire.NoError(t, err)\n\trequire.Equal(t, \"test2\", key)\n\tdata, err = app.Storage.Get(\"*\")\n\trequire.NoError(t, err)\n\terr = json.Unmarshal(data, &testObjects)\n\tapp.Console.Log(testObjects)\n\trequire.NoError(t, err)\n\trequire.Equal(t, 2, len(testObjects))\n\terr = app.Storage.Del(\"*\")\n\trequire.NoError(t, err)\n\tdata, err = app.Storage.Get(\"*\")\n\trequire.NoError(t, err)\n\terr = json.Unmarshal(data, &testObjects)\n\tapp.Console.Log(testObjects)\n\trequire.NoError(t, err)\n\trequire.Equal(t, 0, len(testObjects))\n}",
"func read(arg string) int {\n\t// we do not consume the key, but in real life the key will be consumed to get the value\n\t// from DB or a filesystem etc.\n\t// We simply return a random number between 0 and 100 (excluded 100).\n\treturn rand.Intn(100)\n}",
"func TestReadOneRecord(t *testing.T) {\n\tbuffer := bytes.NewBufferString(s19TestFile)\n\treader := Open(buffer)\n\tif reader != nil {\n\t\trec, err := reader.Next()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif rec == nil {\n\t\t\tt.Fatalf(\"Returned record is nil\")\n\t\t}\n\t\tif rec.Address() != 0x400 {\n\t\t\tt.Errorf(\"Address mismatch expected: 0x400 got: %v\", rec.Address())\n\t\t}\n\t\tdata := rec.Data()\n\t\tif bytes.Compare(data, rec.Data()[:len(data)]) != 0 {\n\t\t\tt.Errorf(\"Data mismatch\")\n\t\t}\n\t} else {\n\t\tt.Fatal(\"Open call failed\")\n\t}\n}",
"func TestDbLog_Add(t *testing.T) {\n\thandler := debug.NewLocalDb()\n\tdb := newDbLog(handler)\n\t_, err := db.Add(0, \"success\", \"\", 0, \"\", time.GetDayTime())\n\tif err == nil {\n\t\tt.Errorf(\"Add check cronId fail\")\n\t\treturn\n\t}\n\tid, err := db.Add(1, \"success\", \"123\", 1000, \"hello\", time.GetDayTime())\n\tif err != nil {\n\t\tt.Errorf(\"Add fail, error=[%v]\", err)\n\t\treturn\n\t}\n\tif id <= 0{\n\t\tt.Errorf(\"Add check rows fail\")\n\t\treturn\n\t}\n\tdb.Delete(id)\n}",
"func TestCreateReadDeleteUpdate(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\n\tsd, err := newTestDir(t.Name())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tupdate := sd.createDeleteUpdate()\n\t// Read update\n\tpath := readDeleteUpdate(update)\n\t// Compare values\n\tsiaDirPath := sd.path\n\tif path != siaDirPath {\n\t\tt.Error(\"paths don't match\")\n\t}\n}",
"func TestAddCMD(t *testing.T) {\n\tinitdb()\n\ttestSuit := []struct {\n\t\targs []string\n\t\texpected string\n\t}{\n\t\t{args: []string{\"do\", \"testing\"}, expected: `Task:\"do testing\" is Added in todo list`},\n\t\t{args: []string{\"do\", \"development\"}, expected: `Task:\"do development\" is Added in todo list`},\n\t\t{args: []string{\"do\", \"deployment\"}, expected: `Task:\"do deployment\" is Added in todo list`},\n\t\t{args: []string{\"do\", \"release\"}, expected: `Task:\"do release\" is Added in todo list`},\n\t\t{args: []string{}, expected: `Please provide task`},\n\t}\n\tfile, _ := os.Create(\"./testresult.txt\")\n\tfile.Truncate(0)\n\tdefer file.Close()\n\tdefer os.Remove(file.Name())\n\told := os.Stdout\n\tos.Stdout = file\n\tfor _, testcase := range testSuit {\n\t\tAddTask.Run(AddTask, testcase.args)\n\t\tfile.Seek(0, 0)\n\t\tfp, _ := ioutil.ReadFile(file.Name())\n\n\t\tmatch, err := regexp.Match(testcase.expected, fp)\n\t\tif err != nil {\n\t\t\tt.Error(\"Error in expected result regex\")\n\t\t}\n\t\tif match {\n\t\t\tt.Log(\"Result is as Expected\")\n\t\t} else {\n\t\t\tt.Error(\"Result is not as Expected\")\n\t\t}\n\t}\n\tos.Stdout = old\n}",
"func TestCreate(t *testing.T) {\n\tlocalStore := NewEventLocalStore()\n\n\tfor i := 1; i < 6; i++ {\n\t\ttestEvent := &entities.Event{\n\t\t\tID: fmt.Sprintf(\"id%d\", i),\n\t\t\tTitle: fmt.Sprintf(\"Title%d\", i),\n\t\t}\n\n\t\terr := localStore.Create(testEvent)\n\t\tassert.NoError(t, err)\n\t}\n\n\treturnedEvents, err := localStore.Read()\n\tassert.NoError(t, err)\n\tassert.Equal(t, 5, len(returnedEvents))\n}",
"func (t *BenchmarkerChaincode) ReadRandom(stub shim.ChaincodeStubInterface, seed, nKeys, keySizeLo, keySizeHi int) pb.Response {\n\tvar (\n\t\tvals []Value\n\t\tkm NoopKeyMapper\n\t)\n\tkeys := km.GetKeys(seed, nKeys, keySizeLo, keySizeHi)\n\tfor _, key := range keys {\n\t\tbval, err := stub.GetState(key)\n\t\tif err != nil {\n\t\t\treturn shim.Error(err.Error())\n\t\t}\n\n\t\tvar val RandomStringValue\n\t\tval.SetKey(key)\n\t\tval.Parse(string(bval))\n\t\tvals = append(vals, &val)\n\t}\n\treturn shim.Success([]byte(MakeJSONArray(vals)))\n}",
"func (s *BackendSuite) TestCopyOnRead(c *C) {\n\tbeCfg, err := engine.NewHTTPBackend(\"foo\", engine.HTTPBackendSettings{})\n\tc.Assert(err, IsNil)\n\tbe, err := newBackend(*beCfg, Options{}, []engine.Server{{Id: \"1\"}, {Id: \"3\"}})\n\tc.Assert(err, IsNil)\n\n\t// When\n\tbe.upsertServer(engine.Server{Id: \"2\"})\n\t_, srvCfgs1 := be.snapshot()\n\n\tbe.upsertServer(engine.Server{Id: \"3\"}) // Duplicate\n\tbe.upsertServer(engine.Server{Id: \"4\"})\n\t_, srvCfgs2 := be.snapshot()\n\n\tbe.deleteServer(engine.ServerKey{Id: \"5\"}) // Missing\n\tbe.deleteServer(engine.ServerKey{Id: \"1\"})\n\tbe.upsertServer(engine.Server{Id: \"5\"})\n\tbe.deleteServer(engine.ServerKey{Id: \"2\"})\n\tbe.upsertServer(engine.Server{Id: \"1\"})\n\t_, srvCfgs3 := be.snapshot()\n\n\t// Then\n\tc.Assert(srvCfgs1, DeepEquals, []engine.Server{{Id: \"1\"}, {Id: \"3\"}, {Id: \"2\"}})\n\tc.Assert(srvCfgs2, DeepEquals, []engine.Server{{Id: \"1\"}, {Id: \"3\"}, {Id: \"2\"}, {Id: \"4\"}})\n\tc.Assert(srvCfgs3, DeepEquals, []engine.Server{{Id: \"3\"}, {Id: \"4\"}, {Id: \"5\"}, {Id: \"1\"}})\n}",
"func Test_ReadAll(t *testing.T) {\n\tctx := context.Background()\n\tdatabase, err := db.ConnectDB(\"\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tProjectService := NewProjectServiceServer(database)\n\treq := &v1.ReadAllRequest{\n\t\tApi: apiVersion,\n\t}\n\tres, _ := ProjectService.ReadAll(ctx, req)\n\tfmt.Println(res)\n\tt.Log(\"Done\")\n\n}",
"func testRead(c *testContext, flow testFlow) {\n\tc.t.Helper()\n\ttestReadInternal(c, flow, false /* packetShouldBeDropped */)\n}",
"func TestDatabase(t *testing.T) {\n\tt.Log(\"Testing database connection...\")\n\tinitDb()\n\n\tt.Log(\"Testing database insertion...\")\n\n\tcollection := db.C(\"testUsers\")\n\n\ttestUsers := []interface{}{\n\t\t&User{\n\t\t\tId: bson.NewObjectId(),\n\t\t\tCreatedAt: bson.Now(),\n\t\t\tUsername: \"Bobble\",\n\t\t\tPassword: \"Suepass\",\n\t\t\tFullname: \"Bob Sue\",\n\t\t\tStories: []string{},\n\t\t},\n\t\t&User{\n\t\t\tId: bson.NewObjectId(),\n\t\t\tCreatedAt: bson.Now(),\n\t\t\tUsername: \"AliceRex\",\n\t\t\tPassword: \"Suepassaroo\",\n\t\t\tFullname: \"Alice Dino\",\n\t\t\tStories: []string{},\n\t\t},\n\t}\n\terr := collection.Insert(testUsers...)\n\tif err != nil {\n\t\tt.Errorf(\"Failed to insert test users into the database\\n%v\\n\", err)\n\t}\n\n\tt.Log(\"Testing database retrieval...\")\n\n\tresult := []User{}\n\terr = collection.Find(bson.M{\"username\": \"AliceRex\"}).All(&result)\n\tif err != nil || len(result) == 0 {\n\t\tt.Errorf(\"Failed to find test user in the database\\n%v\\n\", err)\n\t}\n\n\terr = collection.Find(nil).All(&result)\n\tif err != nil || len(result) == 0 {\n\t\tt.Errorf(\"Failed to find test users in the database\\n%v\\n\", err)\n\t}\n\n\tinfo, err := collection.RemoveAll(nil)\n\tif err != nil {\n\t\tt.Errorf(\"Failed to remove test users from the database\\n%v\\n\", err)\n\t}\n\n\tif info.Removed < 2 {\n\t\tt.Error(\"Failed to either add or remove test users from the database\")\n\t}\n}",
"func TestReadAllCorrectPayload(t *testing.T) {\n\tdb := DBSession()\n\tdefer db.Close() // clean up when we’re done\n\n\tSetupData(db)\n\tres := httptest.NewRecorder()\n\ta := assert.New(t)\n\treq, err := http.NewRequest(http.MethodGet, \"/cb_service/contact_book\", nil)\n\treq.Header.Set(\"Content-Type\", contentType)\n\treq.Header.Set(\"Authorization\", encodedAuthToken)\n\tif err != nil {\n\t\tl.Printf(\"Cannot Make Request :%v \", err)\n\t\ta.Error(err)\n\t}\n\t// save it in the request context\n\tctx := context.WithValue(req.Context(), dbSessionKey, db)\n\treq = req.WithContext(ctx)\n\treadAllH(res, req)\n\ta.Equal(res.Code, http.StatusOK)\n\tClearData(db)\n}",
"func testManyQueryRow(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Log(\"skipping in short mode\")\n\t\treturn\n\t}\n\tdb.tearDown()\n\tdb.mustExec(\"create table foo (id integer primary key, name varchar(50))\")\n\tdb.mustExec(db.q(\"insert into foo (id, name) values(?,?)\"), 1, \"bob\")\n\tvar name string\n\tfor i := 0; i < 10000; i++ {\n\t\terr := db.QueryRow(db.q(\"select name from foo where id = ?\"), 1).Scan(&name)\n\t\tif err != nil || name != \"bob\" {\n\t\t\tt.Fatalf(\"on query %d: err=%v, name=%q\", i, err, name)\n\t\t}\n\t}\n}",
"func TestAddFile(t *testing.T) {\n\ts, dir := createTestServer(5, 8, 8, 0.000001, uint64(100000))\n\tdefer os.RemoveAll(dir)\n\n\tc, cliDir := createTestClient(s, 0)\n\tdefer os.RemoveAll(cliDir)\n\n\tcontent := \"This is a simple test file\"\n\tfile := createTestFile(content)\n\t_, filename := path.Split(file)\n\tdefer os.Remove(file)\n\n\tif c.AddFile(file) != nil {\n\t\tt.Fatalf(\"first time adding file fails\")\n\t}\n\n\tif c.AddFile(file) == nil {\n\t\tt.Fatalf(\"same file added twice\")\n\t}\n\n\tif c.lookupTable[\"0\"] != filename {\n\t\tt.Fatalf(\"lookup table not set up correctly on the client\")\n\t}\n\tif c.reverseLookup[filename] != \"0\" {\n\t\tt.Fatalf(\"reverse lookup table not set up correctly on the client\")\n\t}\n\n\tserverLookupTable := make(map[string]string)\n\tif tableContent, found := s.ReadLookupTable(); found {\n\t\tjson.Unmarshal(tableContent, &serverLookupTable)\n\t}\n\tif serverLookupTable[\"0\"] != filename {\n\t\tt.Fatalf(\"lookup table not set up correctly on the server\")\n\t}\n\n\tif actual, err := s.GetFile(0); err != nil || !bytes.Equal(actual, []byte(content)) {\n\t\tt.Fatalf(\"file not written correctly to the server: %s\", err)\n\t}\n\n\tif !reflect.DeepEqual(s.SearchWord(c.indexer.ComputeTrapdoors(\"simple\")), []int{0}) {\n\t\tt.Fatalf(\"index file not written correctly to server\")\n\t}\n\n\tcontentRead, err := ioutil.ReadFile(path.Join(cliDir, filename))\n\tif err != nil || !bytes.Equal(contentRead, []byte(content)) {\n\t\tt.Fatalf(\"file not correctly written to local client storage\")\n\t}\n}",
"func TestClient_AddDatabase(t *testing.T) {\n\tteardown := setup()\n\tdefer teardown()\n\tmux.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\taddDbRequest := models.AddDatabaseRequest{}\n\t\terr := json.NewDecoder(r.Body).Decode(&addDbRequest)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"failed to parse json\", http.StatusBadRequest)\n\t\t}\n\t\tnewDatabase := models.InstanceData{\n\t\t\tInstanceName: addDbRequest.InstanceName,\n\t\t\tReadHostGroup: 10,\n\t\t\tWriteHostGroup: 5,\n\t\t\tUsername: addDbRequest.Username,\n\t\t\tPassword: addDbRequest.Password,\n\t\t\tQueryRules: addDbRequest.QueryRules,\n\t\t\tMasterInstance: addDbRequest.MasterInstance,\n\t\t\tReadReplicas: addDbRequest.ReadReplicas,\n\t\t\tUseSSL: 0,\n\t\t\tChesterMetaData: addDbRequest.ChesterMetaData,\n\t\t}\n\t\tdatabases = append(databases, newDatabase)\n\t\taddDataBaseResponse := models.AddDatabaseResponse{\n\t\t\tAction: \"add\",\n\t\t\tQueryRules: addDbRequest.QueryRules,\n\t\t\tInstanceName: addDbRequest.InstanceName,\n\t\t\tUsername: addDbRequest.Username,\n\t\t\tPassword: addDbRequest.Password,\n\t\t\tWriteHostGroup: 5,\n\t\t\tReadHostGroup: 10,\n\t\t\tSSLEnabled: 0,\n\t\t\tChesterMetaData: addDbRequest.ChesterMetaData,\n\t\t}\n\t\trespBytes, err := json.Marshal(&addDataBaseResponse)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"failed to marshal response\", http.StatusInternalServerError)\n\t\t}\n\t\tw.WriteHeader(200)\n\t\tw.Write(respBytes)\n\t})\n\tmux.HandleFunc(\"/databases/temp\", func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tdbCheck := false\n\t\tfor _, db := range databases {\n\t\t\tif db.InstanceName == \"temp\" {\n\t\t\t\tdbCheck = true\n\t\t\t\tb, err := json.Marshal(db)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"failed to marshal response from mock server %s\", err.Error())\n\t\t\t\t}\n\t\t\t\tw.Write(b)\n\t\t\t}\n\t\t}\n\t\tif !dbCheck {\n\t\t\thttp.Error(w, \"instance not found\", http.StatusNotFound)\n\t\t}\n\t})\n\tmux.HandleFunc(\"/databases\", http.HandlerFunc(getDatabasesHandler))\n\n\taddDbReadReplicaOne := models.AddDatabaseRequestDatabaseInformation{\n\t\tName: \"foo-read-1\",\n\t\tIPAddress: \"2.3.4.6\",\n\t}\n\taddDbReadReplicaTwo := models.AddDatabaseRequestDatabaseInformation{\n\t\tName: \"foo-read-2\",\n\t\tIPAddress: \"2.3.4.7\",\n\t}\n\taddDbReadReplicas := []models.AddDatabaseRequestDatabaseInformation{\n\t\taddDbReadReplicaOne, addDbReadReplicaTwo,\n\t}\n\taddDbQueryRuleOne := models.ProxySqlMySqlQueryRule{\n\t\tRuleID: 1,\n\t\tUsername: \"foo\",\n\t\tActive: 1,\n\t\tMatchDigest: \"bar\",\n\t\tDestinationHostgroup: 5,\n\t\tApply: 1,\n\t\tComment: \"baz\",\n\t}\n\taddDbQueryRuleTwo := models.ProxySqlMySqlQueryRule{\n\t\tRuleID: 2,\n\t\tUsername: \"foo\",\n\t\tActive: 1,\n\t\tMatchDigest: \"barzoople\",\n\t\tDestinationHostgroup: 10,\n\t\tApply: 1,\n\t\tComment: \"baz\",\n\t}\n\taddDbQueryRules := []models.ProxySqlMySqlQueryRule{\n\t\taddDbQueryRuleOne, addDbQueryRuleTwo,\n\t}\n\taddDbRequest := models.AddDatabaseRequest{\n\t\tAction: \"add\",\n\t\tInstanceName: \"temp\",\n\t\tUsername: \"foo\",\n\t\tPassword: \"bar\",\n\t\tMasterInstance: models.AddDatabaseRequestDatabaseInformation{\n\t\t\tName: \"foo\",\n\t\t\tIPAddress: \"2.3.4.5\",\n\t\t},\n\t\tReadReplicas: addDbReadReplicas,\n\t\tQueryRules: addDbQueryRules,\n\t\tChesterMetaData: models.ChesterMetaData{\n\t\t\tInstanceGroup: \"temp\",\n\t\t\tMaxChesterInstances: 3,\n\t\t},\n\t\tKeyData: \"\",\n\t\tCertData: \"\",\n\t\tCAData: \"\",\n\t\tEnableSSL: 0,\n\t}\n\t_, err := client.AddDatabase(addDbRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t\tt.FailNow()\n\t}\n\tdbs, err := client.GetDatabases()\n\tif err != nil {\n\t\tt.Error(err)\n\t\tt.FailNow()\n\t}\n\tif len(dbs) != 3 {\n\t\tt.Errorf(\"wtf %v\", len(dbs))\n\t\tt.FailNow()\n\t}\n\tdb, err := client.GetDatabase(\"temp\")\n\tif err != nil {\n\t\tt.Error(err)\n\t\tt.FailNow()\n\t}\n\tif db.InstanceName != \"temp\" {\n\t\tt.Errorf(\"wrong instance %s\", db.InstanceName)\n\t\tt.FailNow()\n\t}\n}",
"func TestUpdateLookup(t *testing.T) {\n\t//minerva := NewTester()\n\tvar b [32]byte\n\tfor i := 0; i < 32; i++ {\n\t\tb[i] = RandUint() % 255\n\t}\n\t//dataset:= make([]uint64, TBLSIZE*DATALENGTH*PMTSIZE*32)\n\t//flag,dataset := minerva.updateLookupTBL(22250,dataset)\n\t//if flag {\n\t//\tt.Log(\"dataset:\",dataset)\n\t//}else {\n\t//\tt.Error(\"======update-err=====\",flag)\n\t//}\n}",
"func Test2(t *testing.T) {\n alice,_ := InitUser(\"alice\",\"fubar\")\n // Having previously created a user \"alice\" with password \"fubar\"...\n alice, _ = GetUser(\"alice\", \"fubar\")\n also_alice, _ := GetUser(\"alice\", \"fubar\")\n\n alice.StoreFile(\"todo\", []byte(\"write tests\"))\n todo, _ := also_alice.LoadFile(\"todo\")\n if string(todo) != \"write tests\" {\n t.Error(\"Same user and password could not access file: \", todo)\n }\n}",
"func TestMain(m *testing.M) {\n\tcode := test.TestMain(m, func(svc *dynamodb.Client) error { // this function setups the database\n\t\t// create testing table\n\t\tcreateRequest := svc.CreateTableRequest(&dynamodb.CreateTableInput{\n\t\t\tAttributeDefinitions: []dynamodb.AttributeDefinition{\n\t\t\t\t{\n\t\t\t\t\tAttributeName: aws.String(\"PK\"),\n\t\t\t\t\tAttributeType: \"S\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tKeySchema: []dynamodb.KeySchemaElement{\n\t\t\t\t{\n\t\t\t\t\tAttributeName: aws.String(\"PK\"),\n\t\t\t\t\tKeyType: dynamodb.KeyTypeHash,\n\t\t\t\t},\n\t\t\t},\n\t\t\tProvisionedThroughput: &dynamodb.ProvisionedThroughput{\n\t\t\t\tReadCapacityUnits: aws.Int64(1),\n\t\t\t\tWriteCapacityUnits: aws.Int64(1),\n\t\t\t},\n\t\t\tTableName: aws.String(table),\n\t\t})\n\t\t_, err := createRequest.Send(context.Background())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// add testing data\n\t\trequest := svc.PutItemRequest(&dynamodb.PutItemInput{\n\t\t\tItem: map[string]dynamodb.AttributeValue{\n\t\t\t\t\"PK\": {S: aws.String(\"abc123\")},\n\t\t\t\t\"name\": {S: aws.String(\"John\")},\n\t\t\t},\n\t\t\tTableName: aws.String(table),\n\t\t})\n\t\t_, err = request.Send(context.Background())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n\tos.Exit(code)\n}",
"func TestAribtrayInsertOrder(t *testing.T) {\n\tdb, err := newDB()\n\tif err != nil {\n\t\tt.Fatalf(\"sql.Open(): %v\", err)\n\t}\n\tdefer db.Close()\n\tfactory := testutil.NewFakeFactory(db)\n\n\tleafs := []struct {\n\t\tindex []byte\n\t\tdata string\n\t}{\n\t\t{dh(\"0000000000000000000000000000000000000000000000000000000000000000\"), \"0\"},\n\t\t{dh(\"F000000000000000000000000000000000000000000000000000000000000000\"), \"1\"},\n\t\t{dh(\"2000000000000000000000000000000000000000000000000000000000000000\"), \"2\"},\n\t\t{dh(\"C000000000000000000000000000000000000000000000000000000000000000\"), \"3\"},\n\t\t{dh(\"D000000000000000000000000000000000000000000000000000000000000000\"), \"4\"},\n\t\t{dh(\"E000000000000000000000000000000000000000000000000000000000000000\"), \"5\"},\n\t}\n\troots := make([][]byte, len(leafs))\n\tfor i := range roots {\n\t\tm, err := New(ctx, db, \"testMap\", factory)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to create SQL history: %v\", err)\n\t\t}\n\t\t// Iterating over a map in Go is randomized.\n\t\tfor _, leaf := range leafs {\n\t\t\ttxn, err := factory.NewDBTxn(ctx)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"factory.NewDBTxn() failed: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := m.QueueLeaf(txn, leaf.index, []byte(leaf.data)); err != nil {\n\t\t\t\tt.Errorf(\"WriteLeaf(%v, %v)=%v\", leaf.index, leaf.data, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := txn.Commit(); err != nil {\n\t\t\t\tt.Errorf(\"txn.Commit() failed: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif _, err := m.Commit(ctx); err != nil {\n\t\t\t\tt.Errorf(\"Commit()= %v, want nil\", err)\n\t\t\t}\n\t\t}\n\t\ttxn, err := factory.NewDBTxn(ctx)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"factory.NewDBTxn() failed: %v\", err)\n\t\t\tcontinue\n\t\t}\n\t\tr, err := m.ReadRootAt(txn, 10)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"ReadRootAt() = %v\", err)\n\t\t\tcontinue\n\t\t}\n\t\tif err := txn.Commit(); err != nil {\n\t\t\tt.Errorf(\"txn.Commit() failed: %v\", err)\n\t\t\tcontinue\n\t\t}\n\t\troots[i] = r\n\t}\n\t// Verify that all the roots are the same.\n\tfor i, r := range roots {\n\t\tif got, want := r, roots[0]; !bytes.Equal(got, want) {\n\t\t\tt.Errorf(\"root[%v] != root[0]: \\ngot %v\\nwant %v\", i, got, want)\n\t\t}\n\t}\n}",
"func TestCreateReadMetadataUpdate(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\n\tsd, err := newTestDir(t.Name())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Create metadata update\n\tpath := filepath.Join(sd.path, modules.SiaDirExtension)\n\tupdate, err := createMetadataUpdate(path, sd.metadata)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Read metadata update\n\tdata, path, err := readMetadataUpdate(update)\n\tif err != nil {\n\t\tt.Fatal(\"Failed to read update\", err)\n\t}\n\n\t// Check path\n\tpath2 := filepath.Join(sd.path, modules.SiaDirExtension)\n\tif path != path2 {\n\t\tt.Fatalf(\"Path not correct: expected %v got %v\", path2, path)\n\t}\n\n\t// Check data\n\tvar metadata Metadata\n\terr = json.Unmarshal(data, &metadata)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Check Time separately due to how the time is persisted\n\tif !metadata.AggregateLastHealthCheckTime.Equal(sd.metadata.AggregateLastHealthCheckTime) {\n\t\tt.Fatalf(\"AggregateLastHealthCheckTimes not equal, got %v expected %v\", metadata.AggregateLastHealthCheckTime, sd.metadata.AggregateLastHealthCheckTime)\n\t}\n\tsd.metadata.AggregateLastHealthCheckTime = metadata.AggregateLastHealthCheckTime\n\tif !metadata.LastHealthCheckTime.Equal(sd.metadata.LastHealthCheckTime) {\n\t\tt.Fatalf(\"LastHealthCheckTimes not equal, got %v expected %v\", metadata.LastHealthCheckTime, sd.metadata.LastHealthCheckTime)\n\t}\n\tsd.metadata.LastHealthCheckTime = metadata.LastHealthCheckTime\n\tif !metadata.AggregateModTime.Equal(sd.metadata.AggregateModTime) {\n\t\tt.Fatalf(\"AggregateModTimes not equal, got %v expected %v\", metadata.AggregateModTime, sd.metadata.AggregateModTime)\n\t}\n\tsd.metadata.AggregateModTime = metadata.AggregateModTime\n\tif !metadata.ModTime.Equal(sd.metadata.ModTime) {\n\t\tt.Fatalf(\"ModTimes not equal, got %v expected %v\", metadata.ModTime, sd.metadata.ModTime)\n\t}\n\tsd.metadata.ModTime = metadata.ModTime\n\tif err := equalMetadatas(metadata, sd.metadata); err != nil {\n\t\tt.Fatal(err)\n\t}\n}",
"func (suite *IntPartTestSuite) TestRead() {\n\tpart, err := newIntPartFromString(\"001\")\n\tsuite.Nil(err)\n\tbuff := make([]byte, 3, 3)\n\tcount, err := part.Read(buff)\n\tsuite.Nil(err)\n\tsuite.Equal(3, count)\n\tsuite.Equal(\"001\", string(buff))\n}",
"func TestFreezerConcurrentModifyRetrieve(t *testing.T) {\n\tt.Parallel()\n\n\tf, _ := newFreezerForTesting(t, freezerTestTableDef)\n\tdefer f.Close()\n\n\tvar (\n\t\tnumReaders = 5\n\t\twriteBatchSize = uint64(50)\n\t\twritten = make(chan uint64, numReaders*6)\n\t\twg sync.WaitGroup\n\t)\n\twg.Add(numReaders + 1)\n\n\t// Launch the writer. It appends 10000 items in batches.\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tdefer close(written)\n\t\tfor item := uint64(0); item < 10000; item += writeBatchSize {\n\t\t\t_, err := f.ModifyAncients(func(op database.AncientWriteOp) error {\n\t\t\t\tfor i := uint64(0); i < writeBatchSize; i++ {\n\t\t\t\t\titem := item + i\n\t\t\t\t\tvalue := getChunk(32, int(item))\n\t\t\t\t\tif err := op.AppendRaw(\"test\", item, value); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tfor i := 0; i < numReaders; i++ {\n\t\t\t\twritten <- item + writeBatchSize\n\t\t\t}\n\t\t}\n\t}()\n\n\t// Launch the readers. They read random items from the freezer up to the\n\t// current frozen item count.\n\tfor i := 0; i < numReaders; i++ {\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tfor frozen := range written {\n\t\t\t\tfor rc := 0; rc < 80; rc++ {\n\t\t\t\t\tnum := uint64(rand.Intn(int(frozen)))\n\t\t\t\t\tvalue, err := f.Ancient(\"test\", num)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tpanic(fmt.Errorf(\"error reading %d (frozen %d): %v\", num, frozen, err))\n\t\t\t\t\t}\n\t\t\t\t\tif !bytes.Equal(value, getChunk(32, int(num))) {\n\t\t\t\t\t\tpanic(fmt.Errorf(\"wrong value at %d\", num))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\n\twg.Wait()\n}",
"func TestLoadShortenerRedirectionRecords(t *testing.T) {\n\trecords := make(shortener.Records, 0)\n\terr := db.LoadShortenerRedirectionRecords(&records)\n\tfmt.Println(err, records)\n}",
"func main() {\n\tclient := dynamodb.New(session.New(aws.NewConfig().WithHTTPClient(&http.Client{\n\t\tTimeout: TestRequestTimeout,\n\t})))\n\t// Writer\n\tgo func() {\n\t\tfor {\n\t\t\tctx, cancel := context.WithTimeout(context.Background(), TestContextTimeout)\n\t\t\tdefer cancel()\n\n\t\t\tif err := addItem(ctx, client); err != nil {\n\t\t\t\tlog.Println(\"Error adding item:\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlog.Println(\"Added 1 item\")\n\t\t}\n\t}()\n\t// Reader\n\tgo func() {\n\t\tfor {\n\t\t\tctx, cancel := context.WithTimeout(context.Background(), TestContextTimeout)\n\t\t\tdefer cancel()\n\n\t\t\tcount, err := countItems(ctx, client)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Error counting items:\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlog.Println(\"Counted\", count, \"items\")\n\t\t}\n\t}()\n\t// Keep the program running until interrupted\n\tfor {\n\t\tselect {}\n\t}\n}",
"func TestAdd(t *testing.T) {\n\tfeed := New()\n\tfeed.Add(Item{})\n\n\tif len(feed.Items) != 1 {\n\t\tt.Errorf(\"Item was not added\")\n\t}\n}",
"func TestAddAndRemove(t *testing.T) {\n\tNUM_NODES := 100000\n\tme := Node{RandomID(), \"\"}\n\ttable := NewRoutingTable(me)\n\tnodes := make([]Node, NUM_NODES)\n\tfor i := 0; i < NUM_NODES; i++ {\n\t\tnodes[i] = Node{RandomID(), \"\"}\n\t\ttable.Add(nodes[i])\n\t}\n\tfor i := 0; i < NUM_NODES; i++ {\n\t\ttable.Remove(nodes[i])\n\t}\n\n\tfor i := 0; i < DIGITS; i++ {\n\t\tfor j := 0; j < BASE; j++ {\n\t\t\tif len(*(table.rows[i][j])) > 1 {\n\t\t\t\tt.Errorf(\"Nodes were not deleted from table.\")\n\t\t\t}\n\t\t\tif len(*(table.rows[i][j])) == 1 &&\n\t\t\t\t!equal_ids(me.Id, (*(table.rows[i][j]))[0].Id) {\n\t\t\t\tt.Errorf(\"Nodes were not deleted from table.\")\n\t\t\t}\n\t\t}\n\t}\n}",
"func TestAdd(t *testing.T) {\n\t// Create a mocked peers cache connected to a mock directory\n\tcache, mgds, err := makePeersCache()\n\trequire.NoError(t, err, \"could not create mocked peers cache\")\n\tdefer mgds.Shutdown()\n\n\t// Common name is required to add a peer to the cache\n\terr = cache.Add(&peers.PeerInfo{})\n\trequire.EqualError(t, err, \"common name is required for all peers\")\n\n\t// Generate a random key for some of our fixtures.\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, 2048)\n\trequire.NoError(t, err)\n\n\t// Test adding peers concurrently; the leonardo peer should be updated with\n\t// consecutive updates to\n\tt.Run(\"addTests\", func(t *testing.T) {\n\t\ttests := []struct {\n\t\t\tname string\n\t\t\tinfo *peers.PeerInfo\n\t\t}{\n\t\t\t{\"add-id-only\", &peers.PeerInfo{\n\t\t\t\tCommonName: \"leonardo.trisatest.net\",\n\t\t\t\tID: \"19d84515-007a-48cc-9efd-b153a263e77c\",\n\t\t\t}},\n\t\t\t{\"add-registered-directory-only\", &peers.PeerInfo{\n\t\t\t\tCommonName: \"leonardo.trisatest.net\",\n\t\t\t\tRegisteredDirectory: \"testdirectory.org\",\n\t\t\t}},\n\t\t\t{\"add-endpoint-only\", &peers.PeerInfo{\n\t\t\t\tCommonName: \"leonardo.trisatest.net\",\n\t\t\t\tEndpoint: \"leonardo.trisatest.net:443\",\n\t\t\t}},\n\t\t\t{\"add-signing-key-only\", &peers.PeerInfo{\n\t\t\t\tCommonName: \"leonardo.trisatest.net\",\n\t\t\t\tSigningKey: &privateKey.PublicKey,\n\t\t\t}},\n\t\t\t{\"add-new-peer\", &peers.PeerInfo{\n\t\t\t\tCommonName: \"donatello.trisatest.net\",\n\t\t\t\tID: \"b19c9ebd-82f5-4bda-91ef-226e3ecee4b8\",\n\t\t\t\tRegisteredDirectory: \"testdirectory.org\",\n\t\t\t\tEndpoint: \"donatello.trisatest.net:443\",\n\t\t\t}},\n\t\t}\n\t\tfor _, tt := range tests {\n\t\t\ttt := tt\n\t\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\t\tt.Parallel()\n\t\t\t\trequire.NoError(t, cache.Add(tt.info))\n\t\t\t})\n\t\t}\n\t})\n\n\t// Verify the final state of the cache\n\tleonardo, err := cache.Get(\"leonardo.trisatest.net\")\n\trequire.NoError(t, err)\n\trequire.Equal(t, \"leonardo.trisatest.net\", leonardo.Info().CommonName)\n\trequire.Equal(t, \"19d84515-007a-48cc-9efd-b153a263e77c\", leonardo.Info().ID)\n\trequire.Equal(t, \"testdirectory.org\", leonardo.Info().RegisteredDirectory)\n\trequire.Equal(t, \"leonardo.trisatest.net:443\", leonardo.Info().Endpoint)\n\trequire.Equal(t, &privateKey.PublicKey, leonardo.Info().SigningKey)\n\n\tdonatello, err := cache.Get(\"donatello.trisatest.net\")\n\trequire.NoError(t, err)\n\trequire.Equal(t, \"donatello.trisatest.net\", donatello.Info().CommonName)\n\trequire.Equal(t, \"b19c9ebd-82f5-4bda-91ef-226e3ecee4b8\", donatello.Info().ID)\n\trequire.Equal(t, \"testdirectory.org\", donatello.Info().RegisteredDirectory)\n\trequire.Equal(t, \"donatello.trisatest.net:443\", donatello.Info().Endpoint)\n}",
"func StorageGetNTest(app *Server, t *testing.T, n int) {\n\tapp.Storage.Clear()\n\ttestData := base64.StdEncoding.EncodeToString([]byte(TEST_DATA))\n\tfor i := 0; i < n; i++ {\n\t\tvalue := strconv.Itoa(i)\n\t\tkey, err := app.Storage.Set(\"test/\"+value, testData)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, value, key)\n\t\ttime.Sleep(time.Millisecond * 1)\n\t}\n\n\tlimit := 1\n\ttestObjects, err := app.Storage.GetN(\"test/*\", limit)\n\trequire.NoError(t, err)\n\trequire.Equal(t, limit, len(testObjects))\n\trequire.Equal(t, strconv.Itoa(n-1), testObjects[0].Index)\n}",
"func TestKeys(t *testing.T) {\n\tvar db_filename string = \"test.gdbm\" // pending the test_cleanup merge\n\n\tos.Remove(db_filename) // pending the test_cleanup merge\n\tdb, err := Open(db_filename, \"c\")\n\tif err != nil {\n\t\tt.Error(\"Couldn't create new database\")\n\t}\n\tdefer db.Close()\n\tdefer os.Remove(db_filename)\n\n\terr = db.Insert(\"foo\", \"bar\")\n\tif err != nil {\n\t\tt.Error(\"Database let readonly client write\")\n\t}\n\terr = db.Insert(\"baz\", \"bax\")\n\tif err != nil {\n\t\tt.Error(\"Database let readonly client write\")\n\t}\n\terr = db.Insert(\"biff\", \"bixx\")\n\tif err != nil {\n\t\tt.Error(\"Database let readonly client write\")\n\t}\n\n\texpected_keys := []string{\n\t\t\"foo\",\n\t\t\"baz\",\n\t\t\"biff\",\n\t}\n\n\tk, err := db.FirstKey()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif !ListContains(expected_keys, k) {\n\t\tt.Errorf(\"FirstKey() expected: %s\", expected_keys)\n\t}\n\n\tfor i := 1; i < len(expected_keys); i++ {\n\t\tn, err := db.NextKey(k)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tif !ListContains(expected_keys, n) {\n\t\t\tt.Errorf(\"NextKey() expected: %s\", expected_keys)\n\t\t}\n\t}\n\n}",
"func (suite *SuiteTester) SetupTest() {\n r.Table(\"users\").Delete().RunWrite(session)\n user_fixtures := make([]User, 4)\n user_fixtures[0] = User{\n FirstName: \"Tyrion\",\n LastName: \"Lannister\",\n Email: \"tyrion@lannister.com\",\n Bio: \"Younger brother to Cersei and Jaime.\",\n FacebookId: \"0b8a2b98-f2c5-457a-adc0-34d10a6f3b5c\",\n CreatedAt: time.Date(2008, time.June, 13, 18, 30, 10, 0, time.UTC),\n UpdatedAt: time.Date(2014, time.October, 5, 18, 30, 10, 0, time.UTC),\n }\n user_fixtures[1] = User{\n FirstName: \"Tywin\",\n LastName: \"Lannister\",\n Email: \"tywin@lannister.com\",\n Bio: \"Lord of Casterly Rock, Shield of Lannisport and Warden of the West.\",\n FacebookId: \"bb2d8a7b-92e6-4baf-b4f7-b664bdeee25b\",\n CreatedAt: time.Date(1980, time.July, 14, 18, 30, 10, 0, time.UTC),\n UpdatedAt: time.Date(2014, time.October, 6, 18, 30, 10, 0, time.UTC),\n }\n user_fixtures[2] = User{\n FirstName: \"Jaime\",\n LastName: \"Lannister\",\n Email: \"jaime@lannister.com\",\n Bio: \"Nicknamed 'Kingslayer' for killing the previous King, Aerys II.\",\n FacebookId: \"d4c19866-eaff-4417-a1c1-93882162606d\",\n CreatedAt: time.Date(2000, time.September, 15, 18, 30, 10, 0, time.UTC),\n UpdatedAt: time.Date(2014, time.October, 7, 18, 30, 10, 0, time.UTC),\n }\n user_fixtures[3] = User{\n FirstName: \"Cersei\",\n LastName: \"Lannister\",\n Email: \"cersei@lannister.com\",\n Bio: \"Queen of the Seven Kingdoms of Westeros, is the wife of King Robert Baratheon.\",\n FacebookId: \"251d74d8-7462-4f2a-b132-6f7e429507e5\",\n CreatedAt: time.Date(2002, time.May, 12, 18, 30, 10, 0, time.UTC),\n UpdatedAt: time.Date(2014, time.October, 8, 18, 30, 10, 0, time.UTC),\n }\n\n r.Table(\"users\").Insert(user_fixtures).RunWrite(session)\n}",
"func HelpTestAddWithReedSolomonMetadata(t *testing.T) (*core.IpfsNode, coreiface.CoreAPI, cid.Cid, []byte) {\n\tnode := HelpTestMockRepo(t, nil)\n\n\tout := make(chan interface{})\n\tadder, err := coreunix.NewAdder(context.Background(), node.Pinning, node.Blockstore, node.DAG)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tadder.Out = out\n\t// Set to default reed solomon for metadata\n\tdsize, psize, csize := TestRsDataSize, TestRsParitySize, 262144\n\tadder.Chunker = fmt.Sprintf(\"reed-solomon-%d-%d-%d\", dsize, psize, csize)\n\n\tfb := []byte(\"testfileA\")\n\tfsize := len(fb)\n\trfa := files.NewBytesFile(fb)\n\n\tgo func() {\n\t\tdefer close(out)\n\t\t_, err := adder.AddAllAndPin(rfa)\n\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}()\n\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second*5)\n\tdefer cancel()\n\n\tvar addedHash cid.Cid\n\tselect {\n\tcase o := <-out:\n\t\taddedHash = o.(*coreiface.AddEvent).Path.Cid()\n\tcase <-ctx.Done():\n\t\tt.Fatal(\"add timed out\")\n\t}\n\n\tapi, err := coreapi.NewCoreAPI(node)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Extract and check metadata\n\tb, err := coreunix.GetMetaData(ctx, api, path.IpfsPath(addedHash))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tvar rsMeta chunker.RsMetaMap\n\tb1 := ftutil.GetMetadataElement(b)\n\terr = json.Unmarshal(b1, &rsMeta)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif rsMeta.NumData != uint64(dsize) {\n\t\tt.Fatal(\"reed solomon metadata num data does not match\")\n\t}\n\tif rsMeta.NumParity != uint64(psize) {\n\t\tt.Fatal(\"reed solomon metadata num parity does not match\")\n\t}\n\tif rsMeta.FileSize != uint64(fsize) {\n\t\tt.Fatal(\"reed solomon metadata file size does not match\")\n\t}\n\n\treturn node, api, addedHash, fb\n}",
"func TestDB(t *testing.T) {\n\tdb, dbName, dbClose := getDB(t)\n\tdefer dbClose()\n\n\t// Find the DB.\n\tvar actualDBName string\n\tif err := crdb.Execute(func() error {\n\t\treturn db.QueryRow(\n\t\t\t`SELECT database_name FROM [SHOW DATABASES] WHERE database_name = $1`, dbName,\n\t\t).Scan(&actualDBName)\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif actualDBName != dbName {\n\t\tt.Fatal(fmt.Sprintf(\"DB names do not match expected - %s, actual: %s\", dbName, actualDBName))\n\t}\n\n\t// Create a test table and insert some rows\n\ttable := createTestSimpleTable(t, db, dbName)\n\ttable.populateTable(t, 10)\n\tif count := table.getTableRowCount(t); count != 10 {\n\t\tt.Fatalf(\"Expected Rows 10, actual %d\", count)\n\t}\n}",
"func TestSmokeTfSingleRecordReader(t *testing.T) {\n\tconst path = \"data/tf-train-single.record\"\n\n\tf, err := os.Open(path)\n\ttassert.CheckFatal(t, err)\n\tdefer f.Close()\n\n\treadTfExamples, err := readExamples(f)\n\ttassert.CheckError(t, err)\n\n\tif len(readTfExamples) != 1 {\n\t\tt.Errorf(\"expected to read one tf.Examples, got %d\", len(readTfExamples))\n\t}\n}",
"func TestAddSiaFileFromReader(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\t// Create a fileset with file.\n\tsf, sfs, err := newTestFileSystemWithFile(t.Name())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Add the existing file to the set again this shouldn't do anything.\n\tsr, err := sf.SnapshotReader()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\td, err := ioutil.ReadAll(sr)\n\tsr.Close()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := sfs.AddSiaFileFromReader(bytes.NewReader(d), sfs.FileSiaPath(sf)); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tnumSiaFiles := 0\n\terr = sfs.Walk(modules.RootSiaPath(), func(path string, info os.FileInfo, err error) error {\n\t\tif filepath.Ext(path) == modules.SiaFileExtension {\n\t\t\tnumSiaFiles++\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// There should be 1 siafile.\n\tif numSiaFiles != 1 {\n\t\tt.Fatalf(\"Found %v siafiles but expected %v\", numSiaFiles, 1)\n\t}\n\t// Load the same siafile again, but change the UID.\n\tb, err := ioutil.ReadFile(sf.SiaFilePath())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treader := bytes.NewReader(b)\n\tnewSF, newChunks, err := siafile.LoadSiaFileFromReaderWithChunks(reader, sf.SiaFilePath(), sfs.staticWal)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Save the file to a temporary location with the new uid.\n\tnewSF.UpdateUniqueID()\n\tnewSF.SetSiaFilePath(sf.SiaFilePath() + \"_tmp\")\n\tif err := newSF.SaveWithChunks(newChunks); err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Grab the pre-import UID after changing it.\n\tpreImportUID := newSF.UID()\n\t// Import the file. This should work because the files no longer share the same\n\t// UID.\n\tb, err = ioutil.ReadFile(newSF.SiaFilePath())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Remove file at temporary location after reading it.\n\tif err := os.Remove(newSF.SiaFilePath()); err != nil {\n\t\tt.Fatal(err)\n\t}\n\treader = bytes.NewReader(b)\n\tvar newSFSiaPath modules.SiaPath\n\tif err := newSFSiaPath.FromSysPath(sf.SiaFilePath(), sfs.Root()); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := sfs.AddSiaFileFromReader(reader, newSFSiaPath); err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Reload newSF with the new expected path.\n\tnewSFPath := filepath.Join(filepath.Dir(sf.SiaFilePath()), newSFSiaPath.String()+\"_1\"+modules.SiaFileExtension)\n\tnewSF, err = siafile.LoadSiaFile(newSFPath, sfs.staticWal)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// sf and newSF should have the same pieces.\n\tfor chunkIndex := uint64(0); chunkIndex < sf.NumChunks(); chunkIndex++ {\n\t\tpiecesOld, err1 := sf.Pieces(chunkIndex)\n\t\tpiecesNew, err2 := newSF.Pieces(chunkIndex)\n\t\tif err := errors.Compose(err1, err2); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif !reflect.DeepEqual(piecesOld, piecesNew) {\n\t\t\tt.Log(\"piecesOld: \", piecesOld)\n\t\t\tt.Log(\"piecesNew: \", piecesNew)\n\t\t\tt.Fatal(\"old pieces don't match new pieces\")\n\t\t}\n\t}\n\tnumSiaFiles = 0\n\terr = sfs.Walk(modules.RootSiaPath(), func(path string, info os.FileInfo, err error) error {\n\t\tif filepath.Ext(path) == modules.SiaFileExtension {\n\t\t\tnumSiaFiles++\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// There should be 2 siafiles.\n\tif numSiaFiles != 2 {\n\t\tt.Fatalf(\"Found %v siafiles but expected %v\", numSiaFiles, 2)\n\t}\n\t// The UID should have changed.\n\tif newSF.UID() == preImportUID {\n\t\tt.Fatal(\"newSF UID should have changed after importing the file\")\n\t}\n\tif !strings.HasSuffix(newSF.SiaFilePath(), \"_1\"+modules.SiaFileExtension) {\n\t\tt.Fatal(\"SiaFile should have a suffix but didn't\")\n\t}\n\t// Should be able to open the new file from disk.\n\tif _, err := os.Stat(newSF.SiaFilePath()); err != nil {\n\t\tt.Fatal(err)\n\t}\n}",
"func TestCollectionResponse(t *testing.T) {\n\t// Make a colReq channel\n\tcolReq := make(chan ColRequest)\n\n\t// Activate database\n\tgo Database(colReq)\n\tclose(colReq)\n\n\t// Connect to existing collection\n\t// Build connection request\n\t// Send connection request\n\n\t// The following tests should be in separate functions:\n\t//\t(Not named TestXXX - they would then be run independently which would be bad)\n\n\t// Test Read:\n\t// Positive: To test Read we try to read something we know exists in\n\t// the collection and check that it matches.\n\t// Negative: Also try to read for a document that doesn't exist.\n\n\t// Test Create:\n\t// Positive: To test create, generate a random document, check that it is\n\t// unique and then put it in the database. Read to see that it is\n\t// there and matches the input.\n\n\t// Test Update:\n\t// Positive: To test update, pick a document, update it in a random\n\t// way, read it by id and then compare to see that the changes\n\t// are correct.\n\t// Negative: Also try to update an non existent document.\n\n\t// Test Destroy:\n\t// Positive: To test destroy, pick a document to destroy, and destroy it.\n\t// Read to make sure that it is not there.\n\t// Negative: Also try to destroy a non existent document.\n\n}",
"func ReadTestRecords(reader io.Reader) ([]*TestRecord, error) {\n\tret := []*TestRecord{}\n\tif err := json.NewDecoder(reader).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n}",
"func TestGetSets(t *testing.T) {\n\ttests.ResetLog()\n\tdefer tests.DisplayLog()\n\n\tconst fixture = \"basic.json\"\n\tset1, err := qfix.Get(fixture)\n\tif err != nil {\n\t\tt.Fatalf(\"\\t%s\\tShould load query record from file : %v\", tests.Failed, err)\n\t}\n\tt.Logf(\"\\t%s\\tShould load query record from file.\", tests.Success)\n\n\tdb, err := db.NewMGO(tests.Context, tests.TestSession)\n\tif err != nil {\n\t\tt.Fatalf(\"\\t%s\\tShould be able to get a Mongo session : %v\", tests.Failed, err)\n\t}\n\tdefer db.CloseMGO(tests.Context)\n\n\tdefer func() {\n\t\tif err := qfix.Remove(db, prefix); err != nil {\n\t\t\tt.Fatalf(\"\\t%s\\tShould be able to remove the query set : %v\", tests.Failed, err)\n\t\t}\n\t\tt.Logf(\"\\t%s\\tShould be able to remove the query set.\", tests.Success)\n\t}()\n\n\tt.Log(\"Given the need to retrieve a list of query sets.\")\n\t{\n\t\tt.Log(\"\\tWhen using fixture\", fixture)\n\t\t{\n\t\t\tif err := query.Upsert(tests.Context, db, set1); err != nil {\n\t\t\t\tt.Fatalf(\"\\t%s\\tShould be able to create a query set : %s\", tests.Failed, err)\n\t\t\t}\n\t\t\tt.Logf(\"\\t%s\\tShould be able to create a query set.\", tests.Success)\n\n\t\t\tset1.Name += \"2\"\n\t\t\tif err := query.Upsert(tests.Context, db, set1); err != nil {\n\t\t\t\tt.Fatalf(\"\\t%s\\tShould be able to create a second query set : %s\", tests.Failed, err)\n\t\t\t}\n\t\t\tt.Logf(\"\\t%s\\tShould be able to create a second query set.\", tests.Success)\n\n\t\t\tsets, err := query.GetAll(tests.Context, db, nil)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"\\t%s\\tShould be able to retrieve the query sets : %v\", tests.Failed, err)\n\t\t\t}\n\t\t\tt.Logf(\"\\t%s\\tShould be able to retrieve the query sets\", tests.Success)\n\n\t\t\tvar count int\n\t\t\tfor _, set := range sets {\n\t\t\t\tif len(set.Name) > len(prefix) && set.Name[0:len(prefix)] == prefix {\n\t\t\t\t\tcount++\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif count != 2 {\n\t\t\t\tt.Fatalf(\"\\t%s\\tShould have two query sets : %d\", tests.Failed, count)\n\t\t\t}\n\t\t\tt.Logf(\"\\t%s\\tShould have two query sets.\", tests.Success)\n\t\t}\n\t}\n}",
"func init() {\n\tif !fileExists(toolData) || !fileExists(userData) || !fileExists(rentalData) {\n\t\terr := createFiles(toolData, userData, rentalData)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tfmt.Println(\"Populating Database\")\n\n\t\tgofakeit.Seed(0)\n\t\tgenerateTool := func(i int) *Tool {\n\t\t\treturn &Tool{\n\t\t\t\tID: i + 1,\n\t\t\t\tName: gofakeit.BuzzWord() + \" \" + gofakeit.HackerNoun(),\n\t\t\t\tDesc: gofakeit.HipsterSentence(5),\n\t\t\t\tPrice: gofakeit.Price(1, 2000),\n\t\t\t\tQuantity: gofakeit.Number(1, 30),\n\t\t\t}\n\t\t}\n\n\t\tgenerateUser := func(i int) *User {\n\t\t\treturn &User{\n\t\t\t\tID: i + 1,\n\t\t\t\tName: gofakeit.Name(),\n\t\t\t\tEmail: gofakeit.Email(),\n\t\t\t}\n\t\t}\n\n\t\tgenerateRental := func(i int) *Rental {\n\t\t\trand.Seed(rand.Int63n(1000))\n\t\t\treturn &Rental{\n\t\t\t\tID: i + 1,\n\t\t\t\tActive: true,\n\t\t\t\tToolID: rand.Intn(10) + 1,\n\t\t\t\tUserID: rand.Intn(10) + 1,\n\t\t\t}\n\t\t}\n\n\t\ttools := []Tool{}\n\t\tusers := []User{}\n\t\trentals := []Rental{}\n\n\t\tfor i := 0; i <= 10; i++ {\n\t\t\ttools = append(tools, *generateTool(i))\n\t\t\tusers = append(users, *generateUser(i))\n\t\t\trentals = append(rentals, *generateRental(i))\n\t\t}\n\n\t\tSave(toolData, tools)\n\t\tSave(userData, users)\n\t\tSave(rentalData, rentals)\n\t}\n}",
"func (hs *HealthStatusInfo) DBReadFailed() {\n\ths.lock()\n\tdefer hs.unLock()\n\tDBHealth.DBReadFailures++\n\tDBHealth.lastReadWriteErrorTime = time.Now()\n}",
"func addEntry(t *testing.T, key string, keyspace uint) {\n\t// Insert at least one event to make sure db exists\n\tc, err := rd.Dial(\"tcp\", host)\n\tif err != nil {\n\t\tt.Fatal(\"connect\", err)\n\t}\n\t_, err = c.Do(\"SELECT\", keyspace)\n\tif err != nil {\n\t\tt.Fatal(\"select\", err)\n\t}\n\tdefer c.Close()\n\t_, err = c.Do(\"SET\", key, \"bar\", \"EX\", \"360\")\n\tif err != nil {\n\t\tt.Fatal(\"SET\", err)\n\t}\n}",
"func (r *Database) Test() {\n\tvar (\n\t\tnum int\n\t)\n\trows, err := r.database.Query(\"select num from test\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\terr := rows.Scan(&num)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tlog.Println(num)\n\t}\n\terr = rows.Err()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}",
"func ReadSQLTest(cluster *v1.MySQLCluster, member string) (string, error) {\n\tBy(\"SELECT v FROM foo WHERE k=\\\"foo\\\"\")\n\toutput, err := ExecuteSQL(cluster, member, strings.Join([]string{\n\t\tfmt.Sprintf(\"use %s;\", TestDBName),\n\t\t`SELECT v FROM foo WHERE k=\"foo\";`,\n\t}, \" \"))\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"executing SQL\")\n\t}\n\n\treturn lastLine(output), nil\n}",
"func Test_Insert(t *testing.T) {\n\n\tt.Parallel()\n\tdailwslice := create_dailyweather()\n\tinserted := dailywdao.Insert(dailwslice)\n\tif inserted == false {\n\t\tt.Error(\"insert failed!!! oops!!\")\n\t}\n\n}",
"func TestUserstorage(t *testing.T) {\n t.Log(\"*** User data storage and retrieval test ***\")\n\n // initialize user\n u, err := InitUser(\"alice\",\"fubar\")\n if err != nil {\n t.Error(\"Failed to initialize user (\", err, \")\")\n } else {\n t.Log(\"Successfully stored user\", u)\n }\n\n // retrieve user \n v, err := GetUser(\"alice\", \"fubar\")\n if err != nil {\n t.Error(\"Failed to reload user\", err)\n } else {\n t.Log(\"Correctly retrieved user\", v)\n }\n}",
"func (th *testHelper) assertNoReadAccessToCollectionMPD(chaincodeName, marbleName string, peerList ...*nwo.Peer) {\n\tcommand := commands.ChaincodeQuery{\n\t\tChannelID: th.channelID,\n\t\tName: chaincodeName,\n\t\tCtor: fmt.Sprintf(`{\"Args\":[\"readMarblePrivateDetails\",\"%s\"]}`, marbleName),\n\t}\n\texpectedMsg := \"tx creator does not have read access permission\"\n\tfor _, peer := range peerList {\n\t\tth.queryChaincode(peer, command, expectedMsg, false)\n\t}\n}",
"func (t *DbService) Read(request *ReadRequest) (*ReadResponse, error) {\n\trsp := &ReadResponse{}\n\treturn rsp, t.client.Call(\"db\", \"Read\", request, rsp)\n}",
"func Read(dbName string, ul *[]define.User) {\n\n\t//path, err := filepath.Abs(base)\n\t//if err != nil {\n\t//\tfmt.Println(err)\n\t//}\n\n\t//absDir = filepath.Dir(path)\n\t//dbLocation = filepath.Join(absDir, dbDir)\n\t//dbAbsFile = filepath.Join(dbLocation, dbName)\n\n\tcsvFile, err := os.Open(filepath.Join(dbDir, dbName))\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\treader := csv.NewReader(csvFile)\n\n\tfor {\n\t\tline, err := reader.Read()\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tuserID, _ := strconv.ParseInt(line[0], 10, 64)\n\t\ttmpUsers = append(tmpUsers,\n\t\t\tdefine.User{ID: userID,\n\t\t\t\tName: line[1],\n\t\t\t\tCell: line[2],\n\t\t\t\tAddress: line[3],\n\t\t\t\tBorn: func() time.Time {\n\t\t\t\t\tt, _ := time.Parse(\"2006.01.02\", line[4])\n\t\t\t\t\treturn t\n\t\t\t\t}(),\n\t\t\t\tPasswd: line[5],\n\t\t\t})\n\t}\n\t// assign the read users to define.UserList\n\t(*ul) = tmpUsers\n\t// purge the tmpUsers\n\ttmpUsers = []define.User{}\n}",
"func TestReadExistingLogs(t *testing.T) {\n\tt.Parallel()\n\toperator, logReceived, tempDir := newTestFileOperator(t, nil)\n\n\t// Create a file, then start\n\ttemp := openTemp(t, tempDir)\n\twriteString(t, temp, \"testlog1\\ntestlog2\\n\")\n\n\trequire.NoError(t, operator.Start(testutil.NewMockPersister(\"test\")))\n\tdefer func() {\n\t\trequire.NoError(t, operator.Stop())\n\t}()\n\n\twaitForMessage(t, logReceived, \"testlog1\")\n\twaitForMessage(t, logReceived, \"testlog2\")\n}",
"func TestReadAndSort(t *testing.T) {\n\tres := readAndSort(\"datatest/test.tsv\")\n\n\tif len(res) != 4 {\n\t\tt.Error(\"expected test.tsv to yield 4 values\")\n\t}\n\n\tif r := res[0].query; r != \"first\" {\n\t\tt.Error(\"expected first key to eql 'first' but got \", r)\n\t}\n\n\tif r := res[3].query; r != \"bar\" {\n\t\tt.Error(\"expected first key to eql 'bar' but got \", r)\n\t}\n\n}",
"func testDB(t *testing.T) (*DB, func()) {\n\tdb := NewDB()\n\tdb.SetTimeNow(newClock().Now)\n\tpath := testPath()\n\tctx := context.TODO()\n\tkey := keys.Rand32()\n\terr := db.OpenAtPath(ctx, path, key)\n\trequire.NoError(t, err)\n\n\treturn db, func() {\n\t\tdb.Close()\n\t\tos.Remove(path)\n\t}\n}",
"func (m *Storage) AddSampleRecipes() {\n\n\tfor _, recipe := range SampleMeals {\n\t\tm.recipes = append(m.recipes, recipe)\n\t}\n\n}",
"func read(db *sql.DB, todoID int) ([]Todo, error) {\n\tvar (\n\t\ttodoList = []Todo{}\n\t\tdbParams = []interface{}{}\n\t\twhereStatement string\n\t)\n\n\tif todoID != -1 {\n\t\twhereStatement += \"WHERE id = ? \"\n\t\tdbParams = append(dbParams, todoID)\n\t}\n\trows, err := db.Query(selectQry+whereStatement, dbParams...)\n\tif err != nil {\n\t\t//normally would need to do something with this error, logging most likely.\n\t\tfmt.Println(err)\n\t\treturn nil, fmt.Errorf(\"Failed to build todo list\")\n\t}\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\ttodo := Todo{}\n\t\tif err := rows.Scan(&todo.ID, &todo.Title, &todo.Status); err != nil {\n\t\t\t//normally would need to do something with this error, logging most likely.\n\t\t\tfmt.Println(err)\n\t\t\treturn nil, fmt.Errorf(\"Failed to build todo list\")\n\t\t}\n\n\t\ttodoList = append(todoList, todo)\n\t}\n\treturn todoList, nil\n}",
"func TestNewDBlockExistingData(t *testing.T) {\n\tdefer cleanTestFiles()\n\n\tblk1, err := createTestBlock()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tblk1.Close()\n\n\t// create blk2 with same settings\n\tblk2, err := createTestBlock()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdefer blk2.Close()\n\n\tif len(blk2.segmentFiles) != 1 || len(blk2.segmentMmaps) != 1 {\n\t\tt.Fatal(\"number of segments should be 1 at start\")\n\t}\n\n\tif blk2.recordSize != 400 {\n\t\tt.Fatal(\"record size should be equal to PayloadSize x PayloadCount\")\n\t}\n}",
"func (s *MockStore) Read(collection, id string) (string, error) {\n\tc, ok := s.Data[collection]\n\tif !ok {\n\t\treturn \"\", errors.New(\"No collection found \" + collection)\n\t}\n\titem, ok := c[id]\n\tif !ok {\n\t\treturn \"\", errors.New(\"No item with ID found \" + id)\n\t}\n\treturn item, nil\n}",
"func testInsertRow(t *testing.T) {\n\tn := note.Note{\n\t\tTypeID: 10001,\n\t\tMemberID: 1,\n\t\tContent: \"This is the note content\",\n\t}\n\terr := n.InsertRow(ds)\n\tif err != nil {\n\t\tt.Errorf(\"Note.InsertRow() err = %s\", err)\n\t}\n\t// Re-fetch the note and verify the member id\n\tn2, err := note.ByID(ds, n.ID)\n\tif err != nil {\n\t\tt.Fatalf(\"note.ByID(%d) err = %s\", n.ID, err)\n\t}\n\tgot := n2.MemberID\n\twant := n.MemberID\n\tif got != want {\n\t\tt.Errorf(\"note.MemberID = %d, want %d\", got, want)\n\t}\n}",
"func TestWithRandomKeys(t *testing.T) {\n\tseedRand()\n\tfor i := 0; i < runTestCount; i++ {\n\t\trandomTest(t)\n\t}\n}",
"func ReadTabTestData(filename string) ([]testrow, error) {\n\trows := make([]testrow, 0) // rows of test data\n\t// read data from tab-delimited file\n\tcsvFile, err := os.Open(\"./testdata.txt\")\n\tif err != nil {\n\t\treturn rows, err\n\t}\n\tdefer csvFile.Close()\n\treader := csv.NewReader(csvFile)\n\treader.Comma = '\\t' // Use tab-delimited instead of comma <---- here!\n\treader.FieldsPerRecord = -1\n\tcsvdata, err := reader.ReadAll() // test file is not huge\n\tif err != nil {\n\t\treturn rows, err\n\t}\n\t// Put data into table of testrows\n\ttripid := GenerateRandomTripid() // generate a random trip ID\n\tfor i := range csvdata {\n\t\tvar ritem testrow // working test row\n\t\tritem.hdr = make(map[string]([]string))\n\t\trow := csvdata[i]\n\t\tif verbose || i == 0 {\n\t\t\tfmt.Printf(\"Test entry\")\n\t\t\tfor j := range row {\n\t\t\t\tfmt.Printf(\" %d=\\\"%s\\\"\", j, row[j])\n\t\t\t}\n\t\t\tfmt.Printf(\"\\n\")\n\t\t}\n\t\tvar v vehlogevent // logger data\n\t\tv.Timestamp, err = strconv.ParseInt(row[1], 10, 64)\n\t\tif err != nil { // file may have header lines\n\t\t\tcontinue\n\t\t} // skip them\n\t\tsr, _ := strconv.ParseInt(row[0], 10, 32)\n\t\tv.Serial = int32(sr)\n\t\tv.Tripid = row[10]\n\t\tsv, _ := strconv.ParseInt(row[11], 10, 8)\n\t\tv.Severity = int8(sv)\n\t\tv.Eventtype = row[12]\n\t\tv.Msg = row[13]\n\t\tau, _ := strconv.ParseFloat(row[14], 32)\n\t\tv.Auxval = float32(au)\n\t\tv.Debug = 0\n\t\tritem.hdr[\"X-Secondlife-Shard\"] = append(make([]string, 0), row[2])\n\t\tritem.hdr[\"X-Secondlife-Owner-Name\"] = append(make([]string, 0), row[3])\n\t\tritem.hdr[\"X-Secondlife-Object-Name\"] = append(make([]string, 0), row[4])\n\t\tritem.hdr[\"X-Secondlife-Region\"] = append(make([]string, 0), fmt.Sprintf(\"%s (%s,%s)\", row[5], row[6], row[7]))\n\t\tritem.hdr[\"X-Secondlife-Local-Position\"] = append(make([]string, 0), fmt.Sprintf(\"(%s,%s,0.0)\", row[8], row[9]))\n\t\t// Adjust test data\n\t\tv.Tripid = tripid // use random tripID\n\t\t// Make up JSON\n\t\tjson, err := json.Marshal(v) // convert to JSON\n\t\tif err != nil {\n\t\t\treturn rows, err\n\t\t}\n\t\tif verbose || i == 0 {\n\t\t\tfmt.Printf(\"JSON: %s\\n\", json)\n\t\t}\n\t\t// Sign JSON\n\t\tritem.hdr[\"X-Authtoken-Name\"] = append(make([]string, 0), testtokenname)\n\t\tritem.json = []byte(json)\n\t\tSignLogMsg(ritem.json, ritem.hdr, testtokenname)\n\t\trows = append(rows, ritem) // add new row\n\n\t}\n\treturn rows, nil\n}",
"func main() {\n\n\t// Connect to database\n\tdb, err := database.NewDB()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer db.Close()\n\n\tnumberOfRecords := 25\n\tgroupCount := 4\n\n\t// Create numberOfRecords records\n\tfor i := 0; i < numberOfRecords; i++ {\n\t\tdescription := fake.Paragraph()\n\t\tdescription = description[:10]\n\t\tpurchaser := rand.Intn(groupCount) + 1\n\t\tamount := rand.Intn(8000) + 100\n\n\t\t// Step 1: Initial transaction\n\t\tfmt.Printf(\"Seeding Transaction %v...\\n\", i+1)\n\t\ttx, err := db.Begin()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tstmt, err := tx.Prepare(`\n\t\t\tINSERT INTO transactions \n\t\t\tVALUES(NULL, ?, ?, ?)\n\t\t`)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tres, err := stmt.Exec(\n\t\t\tdescription,\n\t\t\tpurchaser,\n\t\t\tamount)\n\n\t\tlastInsertID, err := res.LastInsertId()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\t// Save\n\t\ttx.Commit()\n\t\tfmt.Print(\"...initial transaction saved!\\n\")\n\n\t\t// Step 2: Generate TransactionBeneficiaries data\n\t\tbeneficiaries := generateRandomBeneficiaries(groupCount)\n\n\t\tfor index, beneficiaryID := range beneficiaries {\n\t\t\ttx, err = db.Begin()\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tstmt, err = tx.Prepare(`\n\t\t\t\tINSERT INTO transactions_beneficiaries \n\t\t\t\tVALUES(?, ?) \n\t\t\t`)\n\t\t\tstmt.Exec(lastInsertID, beneficiaryID)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\ttx.Commit()\n\t\t\tfmt.Printf(\"...beneficiary %v saved!\\n\", index)\n\t\t}\n\t\tfmt.Print(\"...success!\\n\")\n\t\tfmt.Print(\"-----------\\n\")\n\t}\n}",
"func TestTest_Sample(t *testing.T) {\n\tt.Parallel()\n\n\t// get DynamoDB client and do something\n\tsvc, err := test.GetClient()\n\trequire.Nil(t, err)\n\trequest := svc.GetItemRequest(&dynamodb.GetItemInput{\n\t\tKey: map[string]dynamodb.AttributeValue{\n\t\t\t\"PK\": {S: aws.String(\"abc123\")},\n\t\t},\n\t\tTableName: aws.String(table),\n\t})\n\tresponse, err := request.Send(context.Background())\n\trequire.Nil(t, err)\n\tassert.Equal(t, \"abc123\", *(response.Item[\"PK\"].S))\n\tassert.Equal(t, \"John\", *(response.Item[\"name\"].S))\n}",
"func testDB(t *testing.T) *bolt.DB {\n\tdb, err := bolt.Open(\"jobs_test.db\", 0600, &bolt.Options{Timeout: 1 * time.Second})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn db\n}",
"func TestDebeziumIO_BasicRead(t *testing.T) {\n\tintegration.CheckFilters(t)\n\tcheckFlags(t)\n\n\tctx := context.Background()\n\tdbname := \"inventory\"\n\tusername := \"debezium\"\n\tpassword := \"dbz\"\n\tport := setupTestContainer(ctx, t, dbname, username, password)\n\thost := \"localhost\"\n\tconnectionProperties := []string{\n\t\t\"database.dbname=inventory\",\n\t\t\"database.server.name=dbserver1\",\n\t\t\"database.include.list=inventory\",\n\t\t\"include.schema.changes=false\",\n\t}\n\tread := ReadPipeline(expansionAddr, username, password, dbname, host, port, debeziumio.PostgreSQL, 1, connectionProperties)\n\tptest.RunAndValidate(t, read)\n}",
"func TestTransactionSample(t *testing.T) {\n\tos.RemoveAll(dbPathT)\n\n\toptions := grocksdb.NewDefaultOptions()\n\toptions.SetCreateIfMissing(true)\n\n\ttxnDbOptions := grocksdb.NewDefaultTransactionDBOptions()\n\ttxnDb, err := grocksdb.OpenTransactionDb(options, txnDbOptions, dbPathT)\n\tassert.Nil(t, err)\n\n\twriteOptions := grocksdb.NewDefaultWriteOptions()\n\treadOptions := grocksdb.NewDefaultReadOptions()\n\n\treadCommittedT(t, txnDb, writeOptions, readOptions)\n\trepeatableReadT(t, txnDb, writeOptions, readOptions)\n\treadCommittedMonotonicAtomicViews(t, txnDb, writeOptions, readOptions)\n}",
"func TestIterate(t *testing.T) {\n\tbuffer := bytes.NewBufferString(s19TestFile)\n\treader := Open(buffer)\n\tif reader != nil {\n\t\tdata := make([]byte, 0)\n\t\texpectedAddress := uint32(0x400)\n\t\tfor it := range reader.Records() {\n\t\t\trec := it.Record\n\t\t\terr := it.Error\n\t\t\tif err == io.EOF {\n\t\t\t\tt.Fatal(\"EOF not handled properly\")\n\t\t\t}\n\t\t\tif rec.Address() != expectedAddress {\n\t\t\t\tt.Fatalf(\"Address mismatch expected: %v got: %v\", expectedAddress, rec.Address())\n\t\t\t}\n\t\t\texpectedAddress += uint32(len(rec.Data()))\n\t\t\tdata = append(data, rec.Data()...)\n\t\t}\n\t\tif bytes.Compare(data, binaryTestFile[:]) != 0 {\n\t\t\tt.Error(\"Data read did not match reference values\")\n\t\t}\n\t} else {\n\t\tt.Fatal(\"Open call failed\")\n\t}\n}",
"func Test_SLock(t *testing.T) {\n\tr, _ := initRepo(user, password, database, ip, port)\n\n\ttx, err := r.db.Begin()\n\tif err != nil {\n\t\treturn\n\t}\n\n\trows, err := tx.Query(querySql, \"aa\", \"bb\", \"cc\")\n\tif err != nil {\n\t\t_ = tx.Rollback()\n\t\treturn\n\t}\n\n\t// sleep 10s\n\t// then exec \"select * from xxx for update\" for testing\n\ttime.Sleep(10 * time.Second)\n\tfmt.Println(rows.Next())\n\n\tdefer tx.Commit()\n\n\treturn\n}",
"func TestAdd(t *testing.T) {\n\t_ph := New()\n\t_ph.Add(Item{})\n\tif len(_ph.Items) != 1 {\n\t\tt.Errorf(\"Item was not added\")\n\t}\n}",
"func TestGetScripts(t *testing.T) {\n\ttests.ResetLog()\n\tdefer tests.DisplayLog()\n\n\tconst fixture = \"basic.json\"\n\tscr1, err := sfix.Get(fixture)\n\tif err != nil {\n\t\tt.Fatalf(\"\\t%s\\tShould load script record from file : %v\", tests.Failed, err)\n\t}\n\tt.Logf(\"\\t%s\\tShould load script record from file.\", tests.Success)\n\n\tdb, err := db.NewMGO(tests.Context, tests.TestSession)\n\tif err != nil {\n\t\tt.Fatalf(\"\\t%s\\tShould be able to get a Mongo session : %v\", tests.Failed, err)\n\t}\n\tdefer db.CloseMGO(tests.Context)\n\n\tdefer func() {\n\t\tif err := sfix.Remove(db, prefix); err != nil {\n\t\t\tt.Fatalf(\"\\t%s\\tShould be able to remove the scripts : %v\", tests.Failed, err)\n\t\t}\n\t\tt.Logf(\"\\t%s\\tShould be able to remove the scripts.\", tests.Success)\n\t}()\n\n\tt.Log(\"Given the need to retrieve a list of scripts.\")\n\t{\n\t\tt.Log(\"\\tWhen using two scripts\")\n\t\t{\n\t\t\tif err := script.Upsert(tests.Context, db, scr1); err != nil {\n\t\t\t\tt.Fatalf(\"\\t%s\\tShould be able to create a script : %s\", tests.Failed, err)\n\t\t\t}\n\t\t\tt.Logf(\"\\t%s\\tShould be able to create a script.\", tests.Success)\n\n\t\t\tscr2 := scr1\n\t\t\tscr2.Name += \"2\"\n\t\t\tif err := script.Upsert(tests.Context, db, scr2); err != nil {\n\t\t\t\tt.Fatalf(\"\\t%s\\tShould be able to create a second script : %s\", tests.Failed, err)\n\t\t\t}\n\t\t\tt.Logf(\"\\t%s\\tShould be able to create a second script.\", tests.Success)\n\n\t\t\tscripts, err := script.GetAll(tests.Context, db, nil)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"\\t%s\\tShould be able to retrieve the scripts : %v\", tests.Failed, err)\n\t\t\t}\n\t\t\tt.Logf(\"\\t%s\\tShould be able to retrieve the scripts\", tests.Success)\n\n\t\t\tvar count int\n\t\t\tfor _, scr := range scripts {\n\t\t\t\tif len(scr.Name) > len(prefix) && scr.Name[0:len(prefix)] == prefix {\n\t\t\t\t\tcount++\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// When tests are running in parallel with the query and exec package, we could\n\t\t\t// have more scripts.\n\n\t\t\tif count < 2 {\n\t\t\t\tt.Fatalf(\"\\t%s\\tShould have at least two scripts : %d : %v\", tests.Failed, len(scripts), scripts)\n\t\t\t}\n\t\t\tt.Logf(\"\\t%s\\tShould have at least two scripts.\", tests.Success)\n\n\t\t\tvar found int\n\t\t\tfor _, s := range scripts {\n\t\t\t\tif s.Name == scr1.Name || s.Name == scr2.Name {\n\t\t\t\t\tfound++\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif found != 2 {\n\t\t\t\tt.Errorf(\"\\t%s\\tShould have retrieve the correct scripts : found[%d]\", tests.Failed, found)\n\t\t\t} else {\n\t\t\t\tt.Logf(\"\\t%s\\tShould have retrieve the correct scripts.\", tests.Success)\n\t\t\t}\n\t\t}\n\t}\n}",
"func readDatabase() chan t {\n\tout := make(chan t) // HL\n\tgo func(output chan<- t) {\n\t\tfor _, item := range store {\n\t\t\titem = process(item, \"database\")\n\t\t\toutput <- item // HL\n\t\t}\n\t\tclose(output)\n\t}(out)\n\treturn out // HL\n}",
"func TestLMigrate(t *testing.T) {\n\tvar m = newMigrator()\n\n\tm.flushdst = true\n\tm.flushsrc = true\n\n\t// Just use a separate database on the single redis instance.\n\tm.dstdb = 1\n\tm.initRedis()\n\n\ttestkey := \"list1\"\n\ttestLength := 40\n\tfor i := 0; i < testLength; i++ {\n\t\terr := sclient.RPush(testkey, fmt.Sprintf(\"value-%d\", i)).Err()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\tcmdKey = testkey\n\tvar wg sync.WaitGroup\n\tvar lm = &lmigrator{key: cmdKey}\n\tlcount = 7\n\n\tlm.migrate(&wg, dummyProgressPool)\n\n\tlogger.Debugf(\"Migrated test list...%v\", dclient.LLen(testkey).Val())\n\n\tassert.Equal(t, int64(testLength), dclient.LLen(testkey).Val())\n\tlogger.Debug(\"Indexing through all values...\")\n\tfor i := 0; i < testLength; i++ {\n\t\tget := dclient.LIndex(testkey, int64(i))\n\t\tval, err := get.Result()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tassert.Equal(t, fmt.Sprintf(\"value-%d\", i), val)\n\t}\n\n\tsclient.FlushAll()\n\tdclient.FlushAll()\n}",
"func TestNewRandKeys(t *testing.T) {\n\tkeys, err := NewRandKeys()\n\tif err == io.EOF {\n\t\tt.Fatal(\"got EOF: NewRandKeys should never give an EOF error\")\n\t}\n\n\tassert.NoError(t, err)\n\tassert.NotNil(t, keys)\n}",
"func TestTableAdd(t *testing.T) {\n\n\t//iterate over test array\n\tfor _, test := range testingArray {\n\n\t\t//call Add and get the result\n\t\tresult := Add(test.x, test.y)\n\n\t\t//compare the result to expected. return error if failed\n\t\tif result != test.expected {\n\t\t\tt.Error(\"Testing failed\")\n\t\t}\n\t}\n\n}",
"func testReadWithCustomServer(t *testing.T, testData []byte, serverFunc func(conn *WSConn)) {\n\t// Start the server.\n\twst := newWSTester(serverFunc)\n\tdefer wst.Close()\n\n\t// Connect a client that reads all of the data at once.\n\tclient, err := wst.NewClient()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treadBuf := make([]byte, len(testData))\n\t_, err = io.ReadFull(client, readBuf)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !bytes.Equal(readBuf, testData) {\n\t\tt.Fatal(\"readBuf doesn't match testData\")\n\t}\n\n\t// Connect a client that reads 2 times 50% of the data.\n\tclient, err = wst.NewClient()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t_, err = io.ReadFull(client, readBuf[:len(readBuf)/2])\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t_, err = io.ReadFull(client, readBuf[len(readBuf)/2:])\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !bytes.Equal(readBuf, testData) {\n\t\tt.Fatal(\"readBuf doesn't match testData\")\n\t}\n\n\t// Connect a client that reads more than the available data.\n\tclient, err = wst.NewClient()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t_, err = io.ReadFull(client, readBuf)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tn, err := client.Read(readBuf)\n\tif n != 0 || err != nil {\n\t\tt.Fatal(\"expecte n == 0 and err == nil\", n, err)\n\t}\n\n\t// Connect a client that reads only half the data without closing the conn.\n\t// This shouldn't block the next client.\n\tclient, err = wst.NewClient()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t_, err = io.ReadFull(client, readBuf[:len(readBuf)/2])\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Connect a client that does random reading until all of the data is read.\n\tclient, err = wst.NewClient()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tvar readBytes int\n\tfor remainingBytes := len(testData) - readBytes; remainingBytes > 0; remainingBytes = len(testData) - readBytes {\n\t\ttoRead := fastrand.Intn(remainingBytes) + 1\n\t\t_, err = io.ReadFull(client, readBuf[readBytes:][:toRead])\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\treadBytes += toRead\n\t}\n\tif !bytes.Equal(readBuf, testData) {\n\t\tt.Fatal(\"readBuf doesn't match testData\")\n\t}\n}"
] | [
"0.65255356",
"0.62917155",
"0.59972405",
"0.5882072",
"0.5842434",
"0.5656843",
"0.56230897",
"0.551995",
"0.55115515",
"0.5494585",
"0.54865044",
"0.54829204",
"0.5474204",
"0.54673815",
"0.5457889",
"0.5437312",
"0.5421605",
"0.54197836",
"0.5406034",
"0.5395217",
"0.5369563",
"0.53561175",
"0.5355478",
"0.53494513",
"0.53464824",
"0.53352433",
"0.53119826",
"0.53026086",
"0.53014994",
"0.5297486",
"0.52813274",
"0.5271755",
"0.5241902",
"0.5239677",
"0.52372193",
"0.52055514",
"0.5205023",
"0.5203024",
"0.51972884",
"0.5192778",
"0.5176454",
"0.51655537",
"0.5151094",
"0.5149485",
"0.51408607",
"0.5122684",
"0.5112942",
"0.5099672",
"0.5094217",
"0.50840175",
"0.5076511",
"0.50741035",
"0.50735676",
"0.5057334",
"0.50447357",
"0.50352496",
"0.50329924",
"0.50308764",
"0.5022657",
"0.50127167",
"0.50055444",
"0.49936628",
"0.4992616",
"0.49907464",
"0.4989172",
"0.49866068",
"0.49864623",
"0.4971604",
"0.49685985",
"0.49661002",
"0.49602243",
"0.49528185",
"0.49397963",
"0.49385586",
"0.49377316",
"0.49375108",
"0.49357566",
"0.4920203",
"0.49117702",
"0.49111158",
"0.49091747",
"0.49071562",
"0.4901999",
"0.48995262",
"0.48974198",
"0.48881558",
"0.4886067",
"0.48819306",
"0.48793334",
"0.48752493",
"0.48683605",
"0.4857729",
"0.48572692",
"0.48566112",
"0.48536807",
"0.48457974",
"0.48440388",
"0.48435763",
"0.48384842",
"0.48365003"
] | 0.69003636 | 0 |
GetUsersHandler lista todos los usuarios | func GetUsersHandler(w http.ResponseWriter, r *http.Request) {
var users []User
for _, v := range Listusers {
users = append(users, v)
}
w.Header().Set("Content-Type", "application/json")
j, err := json.Marshal(users)
if err != nil {
panic(err)
}
w.WriteHeader(http.StatusOK)
w.Write(j)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func getUsersHandler(c *gin.Context) {\n\tuser, _ := c.Get(JwtIdentityKey)\n\n\t// Role check.\n\tif !isAdmin(user) {\n\t\tc.JSON(http.StatusUnauthorized, gin.H{\"message\": \"unauthorized\"})\n\t\treturn\n\t}\n\n\tpage := c.DefaultQuery(\"page\", \"1\")\n\tcount := c.DefaultQuery(\"count\", \"10\")\n\tpageInt, _ := strconv.Atoi(page)\n\tcountInt, _ := strconv.Atoi(count)\n\n\tif page == \"0\" {\n\t\tpageInt = 1\n\t}\n\n\tvar wg sync.WaitGroup\n\tvar users *[]types.User\n\tvar usersCount int\n\n\tdb := data.New()\n\twg.Add(1)\n\tgo func() {\n\t\tusers = db.Users.GetUsers((pageInt-1)*countInt, countInt)\n\t\twg.Done()\n\t}()\n\n\twg.Add(1)\n\tgo func() {\n\t\tusersCount = db.Users.GetUsersCount()\n\t\twg.Done()\n\t}()\n\twg.Wait()\n\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"code\": http.StatusOK,\n\t\t\"users\": users,\n\t\t\"count\": usersCount,\n\t})\n}",
"func GetUsers(clients *common.ClientContainer, handler common.HandlerInterface) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tusers, err := handler.GetUsers(clients)\n\t\tif err != nil {\n\t\t\tlog.Logger.Error(err)\n\t\t\tcommon.WriteErrorToResponse(w, http.StatusInternalServerError,\n\t\t\t\thttp.StatusText(http.StatusInternalServerError),\n\t\t\t\t\"Internal server error occured\")\n\t\t\treturn\n\t\t}\n\t\tw.Write(users)\n\t}\n}",
"func (auh *AdminUserHandler) GetUsers(w http.ResponseWriter,\n\tr *http.Request, _ httprouter.Params) {\n\n\tvar apiKey = r.Header.Get(\"api-key\")\n\tif apiKey == \"\" || apiKey != adminApiKey {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)\n\t\treturn\n\t}\n\tusers, errs := auh.userService.Users()\n\n\tif len(errs) > 0 {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\n\toutput, err := json.MarshalIndent(users, \"\", \"\\t\")\n\n\tif err != nil {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write(output)\n\treturn\n\n}",
"func (api *API) getUsersHandler() service.Handler {\n\treturn func(ctx context.Context, w http.ResponseWriter, r *http.Request) error {\n\t\tusers, err := user.LoadAll(ctx, api.mustDB(), user.LoadOptions.WithOrganization)\n\t\tif err != nil {\n\t\t\treturn sdk.WrapError(err, \"cannot load user from db\")\n\t\t}\n\t\treturn service.WriteJSON(w, users, http.StatusOK)\n\t}\n}",
"func (u *UserServiceHandler) List(ctx context.Context) ([]User, error) {\n\n\turi := \"/v1/user/list\"\n\n\treq, err := u.client.NewRequest(ctx, http.MethodGet, uri, nil)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar users []User\n\terr = u.client.DoWithContext(ctx, req, &users)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn users, nil\n}",
"func GetUsersHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tj, status := users.GetAllUsers()\n\tw.WriteHeader(status)\n\tw.Write(j)\n}",
"func GetUsers(w http.ResponseWriter, r *http.Request) {\n\tvar users []UsersData\n\terr := model.FindAll(nil, &users)\n\tif err != nil {\n\t\tfmt.Println(\"err\", err)\n\t\tw.Write([]byte(\"Something wen't wrong!!\"))\n\t} else {\n\t\trender.JSON(w, 200, &users)\n\t}\n}",
"func GetUsers(w http.ResponseWriter, r *http.Request) {\n\tloginOrName := strings.ToLower(r.URL.Query().Get(\"user\"))\n\n\tdb, err := database.Connect()\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\tdefer db.Close()\n\n\trepository := repository.NewRepositoryUser(db)\n\n\tusers, err := repository.SearchByLoginOrName(loginOrName)\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tresponses.JSON(w, http.StatusOK, users)\n\n}",
"func (uc UserController) GetUsers(w http.ResponseWriter, r *http.Request, p httprouter.Params) {\n\n\tvar ul []models.User\n\n\t// Fetch user\n\tif err := uc.session.DB(\"todos\").C(\"users\").Find(nil).All(&ul); err != nil {\n\t\tw.WriteHeader(404)\n\t\treturn\n\t}\n\n\t// Marshal provided interface into JSON structure\n\tuj, _ := json.Marshal(ul)\n\n\t// Write content-type, statuscode, payload\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(200)\n\tfmt.Fprintf(w, \"%s\", uj)\n}",
"func (h *Handler) GetUsers(w http.ResponseWriter, r *http.Request) {\n\tvar users []User\n\tcur, err := h.Collection.Find(context.TODO(), bson.D{{}}, options.Find())\n\tif err != nil {\n\t\th.Logger.Errorf(\"err retrieving cursor item: %s\", err)\n\t\thttp.Error(w, http.StatusText(500), 500)\n\t\treturn\n\t}\n\tfor cur.Next(context.TODO()) {\n\t\tuser := &User{}\n\t\terr := cur.Decode(&user)\n\t\tif err != nil {\n\t\t\th.Logger.Errorf(\"err decoding item: %s\", err)\n\t\t\thttp.Error(w, http.StatusText(500), 500)\n\t\t\treturn\n\t\t}\n\t\tuser.Password = \"\" // Never return password hashes\n\t\tusers = append(users, *user)\n\t}\n\trender.JSON(w, r, users) // A chi router helper for serializing and returning json\n}",
"func (h *handler) Users(w http.ResponseWriter, r *http.Request) {\n\tapiReq, err := http.NewRequest(\"GET\", h.serverAddress+\"/users\", nil)\n\tif err != nil {\n\t\tserverError(w, err)\n\t\treturn\n\t}\n\n\tclient := &http.Client{}\n\tres, err := client.Do(apiReq)\n\tif err != nil {\n\t\tserverError(w, err)\n\t\treturn\n\t}\n\tdefer res.Body.Close()\n\n\tvar uis []socialnet.UserItem\n\terr = json.NewDecoder(res.Body).Decode(&uis)\n\tif err != nil {\n\t\tserverError(w, err)\n\t\treturn\n\t}\n\n\terr = h.template.ExecuteTemplate(w, \"users.html\", uis)\n\tif err != nil {\n\t\tserverError(w, fmt.Errorf(\"failed to execute template users.html: %s\", err))\n\t\treturn\n\t}\n}",
"func GetUsers(c *gin.Context) {\n\tvar user []Models.User\n\tvar u Models.User\n\terr := Models.GetAllUsers(&user)\n\tif err != nil {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"error\" : gin.H { \n\t\t\t\"status\": http.StatusNotFound,\n\t\t\t\"message\": err.Error(),\n\t\t}})\n\t\tc.AbortWithStatus(http.StatusNotFound)\n\t} else {\n\t\tlog.Println(\"====== Bind By Query String ======\")\n\t\tlog.Println(u.Nombre)\n\t\t//var rubro []Models.RubroUsuario\n\t \n\t\tfmt.Println(c.Request.URL.Query())\n\t\t page, _ := strconv.Atoi(c.DefaultQuery(\"page\", \"1\"))\n\t\t limit, _ := strconv.Atoi(c.DefaultQuery(\"limit\", \"50\"))\n\t\n\t\t paginator := pagination.Paging(&pagination.Param{\n\t\t\tDB: Config.DB.Preload(\"Rubros\").Preload(\"Unidades\"),\n\t\t\tPage: page,\n\t\t\tLimit: limit,\n\t\t\tOrderBy: []string{\"id\"},\n\t\t\tShowSQL: true,\n\t\t}, &user)\n \n\t\tc.JSON(200, paginator)\n\n\t}\n}",
"func GetUsers(req *http.Request, render render.Render, account services.Account) {\n qs := req.URL.Query()\n userIDs := qs[\"userId\"]\n var users []models.User\n for _, userID := range userIDs {\n if user, err := account.GetUser(userID); err != nil {\n render.JSON(err.HttpCode, err)\n return\n } else {\n users = append(users, *user)\n }\n }\n render.JSON(http.StatusOK, users)\n}",
"func (srv *UsersService) ListHandler(ctx *gin.Context) {\n\tlogger := srv.logger.New(\"action\", \"ListHandler\")\n\n\tcurrentUser := GetCurrentUser(ctx)\n\n\tlimitQuery := ctx.DefaultQuery(\"limit\", \"10\")\n\tpageQuery := ctx.DefaultQuery(\"page\", \"1\")\n\tparams := ctx.Request.URL.Query()\n\n\tvar adminsRoleIncluded = false\n\n\troles := params[\"filter[role_name]\"]\n\tif len(roles) > 0 {\n\t\tfor key, role := range roles {\n\t\t\t// remove root from role names if user is not root\n\t\t\t// only root can see root users\n\t\t\tif role == models.RoleRoot && currentUser.RoleName != models.RoleRoot {\n\t\t\t\tcopy(roles[key:], roles[key+1:])\n\t\t\t\troles[len(roles)-1] = \"\"\n\t\t\t\troles = roles[:len(roles)-1]\n\t\t\t}\n\t\t\tif role == models.RoleRoot || role == models.RoleAdmin {\n\t\t\t\tadminsRoleIncluded = true\n\t\t\t}\n\t\t}\n\t} else {\n\t\tadminsRoleIncluded = true\n\t}\n\n\tvar hasPerm bool\n\tif adminsRoleIncluded {\n\t\thasPerm = srv.PermissionsService.CanViewAdminProfile(currentUser.UID)\n\t} else {\n\t\thasPerm = srv.PermissionsService.CanViewUserProfile(currentUser.UID)\n\t}\n\n\tif !hasPerm {\n\t\tsrv.ResponseService.Forbidden(ctx)\n\t\treturn\n\t}\n\n\tquery := srv.Repository.GetUsersRepository().Filter(params)\n\n\tpagination, err := srv.Repository.GetUsersRepository().Paginate(query, pageQuery, limitQuery, serializers.NewUsers())\n\tif err != nil {\n\t\tlogger.Error(\"сan't load list of user\", \"error\", err)\n\t\t// Returns a \"400 StatusBadRequest\" response\n\t\tsrv.ResponseService.Error(ctx, responses.CannotRetrieveCollection, \"Can't load list of users\")\n\t\treturn\n\t}\n\n\t// Returns a \"200 OK\" response\n\tsrv.ResponseService.OkResponse(ctx, pagination)\n}",
"func (h *HTTPClientHandler) getAllUsersHandler(w http.ResponseWriter, r *http.Request) {\n\n\tuserid, _ := r.URL.Query()[\"q\"]\n\t// looking for specific user\n\tif len(userid) > 0 {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"userid\": userid[0],\n\t\t}).Info(\"Looking for user..\")\n\n\t\tuser, err := h.db.getUser(userid[0])\n\n\t\tif err == nil {\n\t\t\t// Marshal provided interface into JSON structure\n\t\t\tresponse := UserResource{Data: user}\n\t\t\tuj, _ := json.Marshal(response)\n\n\t\t\t// Write content-type, statuscode, payload\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.WriteHeader(200)\n\t\t\tfmt.Fprintf(w, \"%s\", uj)\n\t\t\treturn\n\t\t} else {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"error\": err.Error(),\n\t\t\t}).Warn(\"Failed to insert..\")\n\n\t\t\tcontent, code := responseDetailsFromMongoError(err)\n\n\t\t\t// Marshal provided interface into JSON structure\n\t\t\tuj, _ := json.Marshal(content)\n\n\t\t\t// Write content-type, statuscode, payload\n\t\t\twriteJsonResponse(w, &uj, code)\n\t\t\treturn\n\n\t\t}\n\t}\n\n\tlog.Warn(len(userid))\n\t// displaying all users\n\tresults, err := h.db.getUsers()\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t}).Error(\"Got error when tried to get all users\")\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"count\": len(results),\n\t}).Info(\"number of users\")\n\n\t// Marshal provided interface into JSON structure\n\tresponse := UsersResource{Data: results}\n\tuj, _ := json.Marshal(response)\n\n\t// Write content-type, statuscode, payload\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(200)\n\tfmt.Fprintf(w, \"%s\", uj)\n}",
"func (h *Handler) getAllUsers(c *gin.Context) handlerResponse {\n\n\tusers, err := h.service.User.GetAll()\n\tif err != nil {\n\t\treturn handleError(err)\n\t}\n\tremovePasswords(users)\n\treturn handleOK(StringMap{\"users\": users})\n}",
"func (a *Server) ListUsers(w http.ResponseWriter, r *http.Request) {\n\tfmt.Println(\"lists all users\")\n}",
"func (ac *ApiConfig) GetAllUsersHandler(w http.ResponseWriter, r *http.Request) {\n\tlmt := r.URL.Query().Get(\"limit\")\n\toff := r.URL.Query().Get(\"offset\")\n\n\tlimit, err := strconv.Atoi(lmt)\n\tif err != nil {\n\t\tzerolog.Error().Msg(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\toffset, err := strconv.Atoi(off)\n\tif err != nil {\n\t\tzerolog.Error().Msg(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tusers, err := ac.DHolder.GetAllUsers(limit, offset)\n\tif err != nil {\n\t\tzerolog.Error().Msg(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\terr = dResponseWriter(w, users, http.StatusOK)\n\tif err != nil {\n\t\tzerolog.Error().Msg(err.Error())\n\t\treturn\n\t}\n\n\treturn\n}",
"func (s *ServerState) getUsers(c *gin.Context) {\n\tvar u []User\n\tif err := s.DB.Select(&u, \"select * from users\"); err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"status\": err})\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, gin.H{\"user\": u})\n}",
"func GetAllUsers(w http.ResponseWriter, r *http.Request) {\n\tw.Write([]byte(\"Getting all users\"))\n}",
"func UsersGet(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tlog.Println(\"starting retrieval\")\n\tstart := 0\n\tlimit := 10\n\n\tnext := start + limit\n\n\tw.Header().Set(\"Pragma\", \"no-cache\")\n\tw.Header().Set(\"Link\", \"<http://localhost:8080/api/users?start=\"+string(next)+\"; rel=\\\"next\\\"\")\n\n\trows, _ := database.Query(\"SELECT * FROM users LIMIT 10\")\n\n\tusers := Users{}\n\n\tfor rows.Next() {\n\t\tuser := User{}\n\t\trows.Scan(&user.ID, &user.Username, &user.First, &user.Last, &user.Email)\n\t\tusers.Users = append(users.Users, user)\n\t}\n\n\toutput, err := json.Marshal(users)\n\tif err != nil {\n\t\tfmt.Fprintln(w, \"Something went wrong while processing your request: \", err)\n\t}\n\n\tfmt.Fprintln(w, string(output))\n}",
"func GetUsers(c *gin.Context, client *statsd.Client) {\n\tlog.Info(\"getting all users\")\n\tvar users []entity.User\n\terr := model.GetAllUsers(&users, client)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"error\": err.Error(),\n\t\t})\n\t\treturn\n\t}\n\n\tfor _, user := range users {\n\t\tc.JSON(http.StatusOK, gin.H{\n\t\t\t\"id\": user.ID,\n\t\t\t\"first_name\": user.FirstName,\n\t\t\t\"last_name\": user.LastName,\n\t\t\t\"username\": user.Username,\n\t\t\t\"account_created\": user.AccountCreated,\n\t\t\t\"account_updated\": user.AccountUpdated,\n\t\t})\n\t}\n}",
"func GetUsers(c *gin.Context) {\n\tvar users []models.User\n\tlog.Println(\"GetUsers from db\")\n\tdb := db.GetDB()\n\tdb.Find(&users)\n\tc.JSON(http.StatusOK, users)\n}",
"func GetUsers(c *fiber.Ctx) {\n\tvar users []User\n\tdatabase.DBConn.Find(&users)\n\tc.JSON(users)\n}",
"func (u *UserService) List(ctx context.Context) ([]*User, *http.Response, error) {\n\treq, err := u.client.newRequest(\"GET\", \"user.list\", nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar users []*User\n\tresp, err := u.client.do(ctx, req, &users)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\treturn users, resp, nil\n}",
"func (h *Handler) GetUsers() ([]models.User, error) {\n\tquery := \"SELECT id, first_name, last_name, email, password FROM users;\"\n\trows, err := h.DB.Query(query)\n\tif err != nil {\n\t\tfmt.Printf(\"user_service-GetUsers-query: %s\\n\", err)\n\t\treturn nil, err\n\t}\n\n\tvar users []models.User\n\n\tfor rows.Next(){\n\t\tuser := models.User{}\n\t\t\n\t\terr := rows.Scan(\n\t\t\t&user.ID,\n\t\t\t&user.FirstName,\n\t\t\t&user.LastName,\n\t\t\t&user.Email,\n\t\t\t&user.Password,\n\t\t)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"user_service-GetUsers-Scan: %s \\n\",err)\n\t\t}\n\t\t\n\t\tusers = append(users, user)\n\t}\n\n\treturn users, nil\n}",
"func getUsers(types int) {\n\treq, _ := http.NewRequest(\"GET\", cfg.Main.Server+\"users\", nil)\n\treq.Header.Set(\"Content-Type\", \"application/xml\")\n\treq.Header.Set(\"Authorization\", cfg.Main.Key)\n\tclient := &http.Client{}\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\tp(\"Couldn't connect to Openfire server: %s\", err.Error())\n\t\treturn\n\t}\n\tdefer res.Body.Close()\n\tif res.StatusCode != 200 {\n\t\tp(\"Error requesting userlist from the server.\")\n\t\treturn\n\t}\n\tbody, _ := ioutil.ReadAll(res.Body)\n\tvar users XMLUsers\n\txml.Unmarshal(body, &users)\n\tfor _, e := range users.User {\n\t\tn := e.Username + \",\"\n\t\tif e.Name != \"\" {\n\t\t\tn = e.Username + \",\" + e.Name\n\t\t}\n\t\tswitch types {\n\t\tcase 0:\n\t\t\tm := \"<missing e-mail>\"\n\t\t\tif e.Email != \"\" {\n\t\t\t\tm = e.Email\n\t\t\t}\n\t\t\tp(\"%s,%s\", n, m)\n\t\tcase 1:\n\t\t\tif e.Email != \"\" {\n\t\t\t\tp(\"%s,%s\", n, e.Email)\n\t\t\t}\n\t\tcase 2:\n\t\t\tif e.Email == \"\" {\n\t\t\t\tp(\"%s\", n)\n\t\t\t}\n\t\t}\n\t}\n}",
"func (handler *UserHandler) GetAllUsers(w http.ResponseWriter, req *http.Request, _ httprouter.Params) {\n\tvar users []*User\n\tul, err := handler.UserService.GetAllUsers()\n\n\tfor _, user := range ul {\n\t\tusers = append(users, user.hidePassword())\n\t}\n\n\tif err != nil {\n\t\thandler.Formatter.JSON(w, http.StatusBadRequest, util.NewError(\"1008\", \"Missing user privileges\", err.Error()))\n\t} else {\n\t\thandler.Formatter.JSON(w, http.StatusOK, users)\n\t}\n\n}",
"func GetUsers(c *gin.Context) {\n\n\tlog := logger.WithFields(logrus.Fields{\"tag\": \"GetUsers\"})\n\tlog.Info(\"Fetching users\")\n\n\torganization := auth.GetCurrentOrganization(c.Request)\n\n\tidParam := c.Param(\"id\")\n\tid, err := strconv.ParseUint(idParam, 10, 32)\n\tif idParam != \"\" && err != nil {\n\t\tmessage := fmt.Sprintf(\"error parsing user id: %s\", err)\n\t\tlog.Info(message)\n\t\tc.JSON(http.StatusBadRequest, components.ErrorResponse{\n\t\t\tCode: http.StatusBadRequest,\n\t\t\tMessage: message,\n\t\t\tError: message,\n\t\t})\n\t\treturn\n\t}\n\n\tvar users []auth.User\n\tdb := model.GetDB()\n\terr = db.Model(organization).Related(&users, \"Users\").Error\n\tif err != nil {\n\t\tmessage := \"failed to fetch users\"\n\t\tlog.Info(message + \": \" + err.Error())\n\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, components.ErrorResponse{\n\t\t\tCode: http.StatusInternalServerError,\n\t\t\tMessage: message,\n\t\t\tError: message,\n\t\t})\n\t} else if id == 0 {\n\t\tc.JSON(http.StatusOK, users)\n\t} else if len(users) == 1 {\n\t\tc.JSON(http.StatusOK, users[0])\n\t} else if len(users) > 1 {\n\t\tmessage := fmt.Sprintf(\"multiple users found with id: %d\", id)\n\t\tlog.Info(message)\n\t\tc.AbortWithStatusJSON(http.StatusConflict, components.ErrorResponse{\n\t\t\tCode: http.StatusConflict,\n\t\t\tMessage: message,\n\t\t\tError: message,\n\t\t})\n\t} else {\n\t\tmessage := fmt.Sprintf(\"user not found with id: %d\", id)\n\t\tlog.Info(message)\n\t\tc.AbortWithStatusJSON(http.StatusNotFound, components.ErrorResponse{\n\t\t\tCode: http.StatusNotFound,\n\t\t\tMessage: message,\n\t\t\tError: message,\n\t\t})\n\t}\n}",
"func ListUsersHandle(service iface.Service) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tlimit := 100\n\t\trawLimit := r.URL.Query()[\"limit\"]\n\t\tif len(rawLimit) > 0 {\n\t\t\tvar err error\n\t\t\tlimit, err = strconv.Atoi(rawLimit[0])\n\t\t\tif err != nil || limit <= 0 {\n\t\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\t\tfmt.Fprintf(w, \"invalid limit \\\"%s\\\"\", rawLimit[0])\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tusers, err := service.FilterUsers(r.Context(), iface.FilterUsers{Limit: uint(limit)})\n\t\tif err != nil {\n\t\t\tlog.Log(err)\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tfmt.Fprintf(w, \"service failed\")\n\t\t\treturn\n\t\t}\n\n\t\tJSON(w, r, map[string]interface{}{\n\t\t\t\"users\": users,\n\t\t})\n\t}\n}",
"func GetUsers(c *gin.Context) {\n\tvar users []models.User\n\tdb := db.GetDB()\n\tdb.Find(&users)\n\tc.JSON(200, users)\n}",
"func (h UserHTTP) List(w http.ResponseWriter, r *http.Request) {\n\tlistRequest := listRequestDecoder(r)\n\tusers, err := h.svc.ListUsers(r.Context(), listRequest)\n\tif err != nil {\n\t\th.logger.With(r.Context()).Errorf(\"list users error : %s\", err)\n\t\trender.Render(w, r, e.BadRequest(err, \"bad request\"))\n\t\treturn\n\t}\n\trender.Respond(w, r, users)\n}",
"func ListUsers(w http.ResponseWriter, r *http.Request) {\n\tusers, err := dal.GetUsers(\"\")\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\tcommon.WriteResponse(w, users)\n}",
"func (uh UserHandler) GetAllUsers(w http.ResponseWriter, r *http.Request) {\n\tlog.Print(utils.InfoLog + \"UserHandler:GetAllUsers called\")\n\n\tvar results *[]models.User\n\tresults, err := uh.UserManager.GetUsers(); if err != nil {\n\t\tutils.ReturnWithErrorLong(w, *err)\n\t\tlog.Println(utils.ErrorLog + \"Insert body here\") // TODO ??\n\t\treturn\n\t}\n\tjson.NewEncoder(w).Encode(results)\n\tw.WriteHeader(http.StatusOK)\n}",
"func (g Graph) GetUsers(w http.ResponseWriter, r *http.Request) {\n\tcon, err := g.initLdap()\n\tif err != nil {\n\t\tg.logger.Error().Err(err).Msg(\"Failed to initialize ldap\")\n\t\terrorcode.ServiceNotAvailable.Render(w, r, http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// TODO make filter configurable\n\tresult, err := g.ldapSearch(con, \"(objectClass=posixAccount)\", g.config.Ldap.BaseDNUsers)\n\n\tif err != nil {\n\t\tg.logger.Error().Err(err).Msg(\"Failed search ldap with filter: '(objectClass=posixAccount)'\")\n\t\terrorcode.ServiceNotAvailable.Render(w, r, http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tusers := make([]*msgraph.User, 0, len(result.Entries))\n\n\tfor _, user := range result.Entries {\n\t\tusers = append(\n\t\t\tusers,\n\t\t\tcreateUserModelFromLDAP(\n\t\t\t\tuser,\n\t\t\t),\n\t\t)\n\t}\n\n\trender.Status(r, http.StatusOK)\n\trender.JSON(w, r, &listResponse{Value: users})\n}",
"func (ur *UserResource) handleGetUsers(c *gin.Context) {\n\tusers, err := ur.Store.GetAllUsers()\n\tif err != nil {\n\t\tlogging.Logger.Errorln(\"[API] Failed to get all users\", err)\n\t}\n\n\tc.JSON(http.StatusOK, users)\n}",
"func (cs *UserService) List() ([]UsersResponse, error) {\n\n\treq, err := cs.client.NewRequest(\"GET\", \"/users\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := cs.client.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif err := validateResponse(resp); err != nil {\n\t\treturn nil, err\n\t}\n\n\tbodyBytes, _ := ioutil.ReadAll(resp.Body)\n\tbodyString := string(bodyBytes)\n\n\tu := &listUsersJSONResponse{}\n\terr = json.Unmarshal([]byte(bodyString), &u)\n\n\treturn u.Users, err\n}",
"func getAllUsers(c *fiber.Ctx) error {\n\tcollection := mg.Db.Collection(\"users\")\n\tquery := bson.D{{}}\n\tcursor, err := collection.Find(c.Context(), &query)\n\tif err != nil {\n\t\treturn c.Status(500).SendString(err.Error())\n\t}\n\tvar records []User = make([]User, 0)\n\t// iterate the cursor and decode the values\n\tif err := cursor.All(c.Context(), &records); err != nil {\n\t\treturn c.Status(404).SendString(\"There isnt any\")\n\t}\n\tvar users []User = make([]User, 0)\n\tfor i, s := range records {\n\t\ts.Password = \"\"\n\t\ts.TaskCode = \"\"\n\t\tusers = append(users, s)\n\t\tfmt.Println(i)\n\t}\n\n\treturn c.JSON(users)\n}",
"func userList(w http.ResponseWriter, r *http.Request) {}",
"func (g Graph) GetUsers(w http.ResponseWriter, r *http.Request) {\n\tsanitizedPath := strings.TrimPrefix(r.URL.Path, \"/graph/v1.0/\")\n\todataReq, err := godata.ParseRequest(r.Context(), sanitizedPath, r.URL.Query())\n\tif err != nil {\n\t\tg.logger.Err(err).Interface(\"query\", r.URL.Query()).Msg(\"query error\")\n\t\terrorcode.InvalidRequest.Render(w, r, http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\tusers, err := g.identityBackend.GetUsers(r.Context(), r.URL.Query())\n\tif err != nil {\n\t\tvar errcode errorcode.Error\n\t\tif errors.As(err, &errcode) {\n\t\t\terrcode.Render(w, r)\n\t\t} else {\n\t\t\terrorcode.GeneralException.Render(w, r, http.StatusInternalServerError, err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tusers, err = sortUsers(odataReq, users)\n\tif err != nil {\n\t\tvar errcode errorcode.Error\n\t\tif errors.As(err, &errcode) {\n\t\t\terrcode.Render(w, r)\n\t\t} else {\n\t\t\terrorcode.GeneralException.Render(w, r, http.StatusInternalServerError, err.Error())\n\t\t}\n\t\treturn\n\t}\n\trender.Status(r, http.StatusOK)\n\trender.JSON(w, r, &listResponse{Value: users})\n}",
"func GetUsers(c *gin.Context) {\n\tusers := []models.User{}\n\tif err := database.DBCon.Find(&users).Error; err != nil {\n\t\tc.JSON(http.StatusNotFound, structs.Error{Code: http.StatusNotFound, Error: err.Error()})\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, users)\n}",
"func (c *UsersClient) List(ctx context.Context, filter string) (*[]models.User, int, error) {\n\tparams := url.Values{}\n\tif filter != \"\" {\n\t\tparams.Add(\"$filter\", filter)\n\t}\n\tresp, status, _, err := c.BaseClient.Get(ctx, base.GetHttpRequestInput{\n\t\tValidStatusCodes: []int{http.StatusOK},\n\t\tUri: base.Uri{\n\t\t\tEntity: \"/users\",\n\t\t\tParams: params,\n\t\t\tHasTenantId: true,\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn nil, status, err\n\t}\n\tdefer resp.Body.Close()\n\trespBody, _ := ioutil.ReadAll(resp.Body)\n\tvar data struct {\n\t\tUsers []models.User `json:\"value\"`\n\t}\n\tif err := json.Unmarshal(respBody, &data); err != nil {\n\t\treturn nil, status, err\n\t}\n\treturn &data.Users, status, nil\n}",
"func GetAllUsers(w http.ResponseWriter, r *http.Request) {\n\tusers, err := models.GetAllUsers()\n\tif err != nil {\n\t\tfmt.Println(\"Caught an error\")\n\t\tfmt.Fprintln(w, http.StatusInternalServerError)\n\t} else {\n\t\tfmt.Println(users)\n\t\tjson.NewEncoder(w).Encode(users)\n\t}\n}",
"func GetUsers(c *gin.Context) {\n\trequestID := c.GetString(\"x-request-id\")\n\thelper.Logger(requestID, \"\").Infoln(\"RequestID= \", requestID)\n\t// cacheTest := helper.CacheExists(\"xxxxxxxxxx\")\n\t// helper.Logger(requestID, \"\").Infoln(\"cacheTest= \", cacheTest)\n\n\thttpCode, body, erro := helper.MakeHTTPRequest(\"GET\", \"https://api-101.glitch.me/customers\", \"\", nil, true)\n\thelper.Logger(requestID, \"\").Infoln(\"httpCode= \", httpCode)\n\thelper.Logger(requestID, \"\").Infoln(\"body= \", fmt.Sprintf(\"%s\", body))\n\thelper.Logger(requestID, \"\").Infoln(\"error= \", erro)\n\n\tvar user []models.User\n\terr := models.GetAllUsers(&user)\n\tif err != nil {\n\t\tc.AbortWithStatus(http.StatusNotFound)\n\t} else {\n\t\tc.JSON(http.StatusOK, user)\n\t}\n}",
"func (h *ServiceUsersHandler) List(ctx context.Context, project, serviceName string) ([]*ServiceUser, error) {\n\t// Aiven API does not provide list operation for service users, need to get them via service info instead\n\tservice, err := h.client.Services.Get(ctx, project, serviceName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn service.Users, nil\n}",
"func GetUsers() {\n\tvar users []User\n\t_, err := orm.NewOrm().QueryTable(\"t_user\").Filter(\"name__contains\", \"awd\").All(&users)\n\tif err == nil {\n\t\tfor _, user := range users {\n\t\t\tfmt.Println(user.ToString())\n\t\t}\n\t}\n}",
"func (h *userHandler) showUsers(ctx context.Context, rw http.ResponseWriter) {\n\n\tusers, err := h.serv.DB.UserCol.FindAll(ctx)\n\n\tif err != nil {\n\n\t\th.serv.writeResponse(ctx, rw, err.Error(), http.StatusBadRequest, nil)\n\n\t\treturn\n\t}\n\n\th.serv.writeResponsePlus(ctx, rw, \"users\", http.StatusOK, nil, users)\n}",
"func (svc *Service) getAllUsersHandler(w http.ResponseWriter, r *http.Request) error {\n\t// TODO: Check errors in encoding\n\n\tusers, err := svc.invitationsAPI.GetAllUsers(svc.invitationsSource.GetAllPeople)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn json.NewEncoder(w).Encode(users)\n}",
"func GetUsers(write http.ResponseWriter, request *http.Request) {\n\n\ttypeUser := request.URL.Query().Get(\"type\")\n\tpage := request.URL.Query().Get(\"page\")\n\tsearch := request.URL.Query().Get(\"search\")\n\n\tpagTemp, err := strconv.Atoi(page)\n\tif err != nil {\n\t\thttp.Error(write, \"Page value should be greater than 0\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tpag := int64(pagTemp)\n\n\tresult, status := bd.GetUsers(IDUser, pag, search, typeUser)\n\tif status == false {\n\t\thttp.Error(write, \"Error GetUsers\", http.StatusBadRequest)\n\t\treturn\n\t}\n\twrite.Header().Set(\"Content-Type\", \"application/json\")\n\twrite.WriteHeader(http.StatusCreated)\n\tjson.NewEncoder(write).Encode(result)\n}",
"func getAllUsers(w http.ResponseWriter, r *http.Request) {\n\tctx := r.Context()\n\tusers, err := users.GetAllUsers(ctx)\n\tif err != nil {\n\t\tlog.Error(ctx, \"database problem\", \"error\", err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintf(w, \"null\")\n\t\treturn\n\t}\n\tdata, err := json.Marshal(users)\n\tif err != nil {\n\t\tlog.Error(ctx, \"json marshaling problem\", \"error\", err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintf(w, \"null\")\n\t\treturn\n\t}\n\tfmt.Fprintf(w, string(data))\n}",
"func GetUsers(c *gin.Context) {\n\tdb := dbConn()\n\tselDB, err := db.Query(\"CALL read_users()\")\n\tif err != nil {\n\t\tpanic(err.Error)\n\t}\n\n\tuser := User{}\n\tusers := []User{}\n\tfor selDB.Next() {\n\t\tvar id, username, useremail, fname, lname, password, passwordchange, passwordexpired, lastlogon, accountlocked string\n\t\terr = selDB.Scan(&id, &username, &useremail, &fname, &lname, &password, &passwordchange, &passwordexpired, &lastlogon, &accountlocked)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tc.JSON(500, gin.H{\n\t\t\t\t\"error\": err.Error(),\n\t\t\t})\n\t\t}\n\t\tuser.ID = id\n\t\tuser.UserName = username\n\t\tuser.UserEmail = useremail\n\t\tuser.FName = fname\n\t\tuser.LName = lname\n\t\tuser.Password = password\n\t\tuser.PasswordChange = passwordchange\n\t\tuser.PasswordExpired = passwordexpired\n\t\tuser.LastLogon = lastlogon\n\t\tuser.AccountLocked = accountlocked\n\t\tiid, err := strconv.Atoi(id)\n\t\tif err != nil {\n\t\t\tpanic(err.Error)\n\t\t}\n\t\tselDB02, err := db.Query(\"CALL read_access_userid(?)\", iid)\n\t\tif err != nil {\n\t\t\tpanic(err.Error)\n\t\t}\n\t\taccess := Access{}\n\t\taccessList := []Access{}\n\t\tfor selDB02.Next() {\n\t\t\tvar accessid, userid, courtid, caseaccess, personaccess, accountingaccess, juryaccess, attorneyaccess, configaccess, securitylevel, sealedcase string\n\t\t\terr := selDB02.Scan(&accessid, &userid, &courtid, &caseaccess, &personaccess, &accountingaccess, &juryaccess, &attorneyaccess, &configaccess, &securitylevel, &sealedcase)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\tc.JSON(500, gin.H{\n\t\t\t\t\t\"error\": err.Error(),\n\t\t\t\t})\n\t\t\t}\n\t\t\taccess.AccessID = accessid\n\t\t\taccess.IDUser = userid\n\t\t\taccess.IDCourt = courtid\n\t\t\taccess.CaseAccess = caseaccess\n\t\t\taccess.PersonAccess = personaccess\n\t\t\taccess.AccountingAccess = accountingaccess\n\t\t\taccess.JuryAccess = juryaccess\n\t\t\taccess.AttorneyAccess = attorneyaccess\n\t\t\taccess.ConfigAccess = configaccess\n\t\t\taccess.SecurityLevel = securitylevel\n\t\t\taccess.SealedCase = sealedcase\n\t\t\taccessList = append(accessList, access)\n\t\t}\n\t\tuser.AccessList = accessList\n\t\tusers = append(users, user)\n\t}\n\n\tc.JSON(200, gin.H{\n\t\t\"result\": users,\n\t})\n\n\tdefer db.Close()\n}",
"func ListAllUsers(w http.ResponseWriter, r *http.Request){\n\n\trows, err:= db.Query(\"SELECT * FROM users LIMIT 20\")\n\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\tpanic(\"failed to connect database\")\n\t}\n\n\tlistUsers := Users{}\n\tfor rows.Next() {\n\t\tp := User{}\n\t\tif err := rows.Scan(&p.ID, &p.Name, &p.Score); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tlistUsers = append(listUsers, p)\n\n\t}\n\tdefer rows.Close()\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(200)\n\tjson.NewEncoder(w).Encode(listUsers)\n}",
"func GetUsers(c *gin.Context) {\n\tvar users []models.User\n\tpagination := models.GeneratePaginationFromRequest(c)\n\terr := repository.GetAllUsersPaged(&users, &pagination)\n\tif err != nil {\n\t\tc.AbortWithStatus(http.StatusNotFound)\n\t} else {\n\t\tc.JSON(http.StatusOK, users)\n\t}\n}",
"func (s *AutograderService) GetUsers(ctx context.Context, in *pb.Void) (*pb.Users, error) {\n\tusr, err := s.getCurrentUser(ctx)\n\tif err != nil {\n\t\ts.logger.Errorf(\"GetUsers failed: authentication error: %w\", err)\n\t\treturn nil, ErrInvalidUserInfo\n\t}\n\tif !usr.IsAdmin {\n\t\ts.logger.Error(\"GetUsers failed: user is not admin\")\n\t\treturn nil, status.Errorf(codes.PermissionDenied, \"only admin can access other users\")\n\t}\n\tusrs, err := s.getUsers()\n\tif err != nil {\n\t\ts.logger.Errorf(\"GetUsers failed: %w\", err)\n\t\treturn nil, status.Errorf(codes.NotFound, \"failed to get users\")\n\t}\n\treturn usrs, nil\n}",
"func AllUsersGet(c *gin.Context) {\n\tmeta := model.TableMetaFromQuery(c)\n\tginutils.WriteGinJSON(c, http.StatusOK, model.AllUsers(meta))\n}",
"func (s *Shell) ListUsers(_ *cli.Context) (err error) {\n\tresp, err := s.HTTP.Get(\"/v2/users/\", nil)\n\tif err != nil {\n\t\treturn s.errorOut(err)\n\t}\n\tdefer func() {\n\t\tif cerr := resp.Body.Close(); cerr != nil {\n\t\t\terr = multierr.Append(err, cerr)\n\t\t}\n\t}()\n\n\treturn s.renderAPIResponse(resp, &AdminUsersPresenters{})\n}",
"func (c *MysqlUserController) GetUsers(w http.ResponseWriter, r *http.Request) {\n\tapiKey := r.Header.Get(\"apiKey\")\n\tresult, err := c.service.GetUsers(apiKey)\n\tif err != nil {\n\t\tstatusCode := int(http.StatusInternalServerError)\n\t\topenapi.EncodeJSONResponse(result, &statusCode, w)\n\t\treturn\n\t}\n\topenapi.EncodeJSONResponse(result, nil, w)\n}",
"func GetUsers(c echo.Context) error {\n\tu := []*models.User{}\n\tfor _, v := range users {\n\t\tu = append(u, v)\n\t}\n\n\treturn c.JSON(http.StatusOK, u)\n}",
"func ListAllUsers(w http.ResponseWriter, r *http.Request) {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusBadRequest, \"Mohomaaf ...dsb\", nil, nil)\n\t\t}\n\t}()\n\n\tfLog := userMgmtLogger.WithField(\"func\", \"ListAllUsers\").WithField(\"RequestID\", r.Context().Value(constants.RequestID)).WithField(\"path\", r.URL.Path).WithField(\"method\", r.Method)\n\n\tiauthctx := r.Context().Value(constants.HansipAuthentication)\n\tif iauthctx == nil {\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusUnauthorized, \"You are not authorized to access this resource\", nil, nil)\n\t\treturn\n\t}\n\n\tfLog.Trace(\"Listing Users\")\n\tpageRequest, err := helper.NewPageRequestFromRequest(r)\n\tif err != nil {\n\t\tfLog.Errorf(\"helper.NewPageRequestFromRequest got %s\", err.Error())\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusBadRequest, err.Error(), nil, nil)\n\t\treturn\n\t}\n\tusers, page, err := UserRepo.ListUser(r.Context(), pageRequest)\n\tif err != nil {\n\t\tfLog.Errorf(\"UserRepo.ListUser got %s\", err.Error())\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusInternalServerError, err.Error(), nil, nil)\n\t\treturn\n\t}\n\tsusers := make([]*SimpleUser, len(users))\n\tfor i, v := range users {\n\t\tsusers[i] = &SimpleUser{\n\t\t\tRecID: v.RecID,\n\t\t\tEmail: v.Email,\n\t\t\tEnabled: v.Enabled,\n\t\t\tSuspended: v.Suspended,\n\t\t}\n\t}\n\tret := make(map[string]interface{})\n\tret[\"users\"] = susers\n\tret[\"page\"] = page\n\thelper.WriteHTTPResponse(r.Context(), w, http.StatusOK, \"List of all user paginated\", nil, ret)\n}",
"func (ctlr *userServiceController) GetUsers(ctx context.Context, req *mygrpc.GetUsersRequest) (*mygrpc.GetUsersResponse, error) {\n\tresultMap := ctlr.userService.GetUsersByIDs(req.GetIds())\n\n\tresp := &mygrpc.GetUsersResponse{}\n\tfor _, u := range resultMap {\n\t\tresp.Users = append(resp.Users, marshalUser(u))\n\t}\n\treturn resp, nil\n}",
"func GetUsers() UsersResponse {\n\tvar users UsersResponse\n\tresponse := network.Get(\"admin/users\")\n\tjson.Unmarshal(response, &users)\n\n\treturn users\n}",
"func (c *Client) ListUsers() (*http.Response, error) {\n\treturn c.get(\"/user/listusers\", nil)\n}",
"func hGetUsers(c echo.Context) error {\n\tvar e httpError\n\t// read from token user id\n\tvar tokenUserID int64\n\ttokenUserID = 2\n\n\tusers, errGetUsers := blog.GetAllUsers(tokenUserID, 50)\n\tif errGetUsers != nil {\n\t\te.TheError = errGetUsers.Error()\n\t\treturn c.JSON(http.StatusInternalServerError, e)\n\t}\n\treturn c.JSON(http.StatusOK, users)\n}",
"func HandleUserGetAll(c *gin.Context) {\n\tvar u User\n\tusers, err := u.GetAll()\n\tif err != nil {\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\n\t\t\t\"code\": 1,\n\t\t\t\"msg\": err.Error(),\n\t\t\t\"data\": gin.H{},\n\t\t})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"code\": 0,\n\t\t\"msg\": \"ok\",\n\t\t\"data\": users,\n\t})\n}",
"func GetAllUsers(res http.ResponseWriter, req *http.Request) {\n\tres.Header().Set(\"Content-Type\", \"application/json\")\n\tquery := bson.M{}\n\tselector := bson.M{\n\t\t\"_id\": 1,\n\t\t\"name\": 1,\n\t\t\"email\": 1,\n\t}\n\n\tusers, err := db.GetAllUsers(query, selector)\n\tif err != nil {\n\t\tif err.Error() == mgo.ErrNotFound.Error() {\n\t\t\tmsg := \"User not found\"\n\n\t\t\tutils.ReturnErrorResponse(http.StatusNotFound, msg, \"\", nil, nil, res)\n\t\t\treturn\n\t\t}\n\n\t\tmsg := \"Error occurred while getting user details\"\n\t\tutils.ReturnErrorResponse(http.StatusBadRequest, msg, \"\", nil, nil, res)\n\t\treturn\n\t}\n\n\tmsg := \"Your request processed successfully\"\n\tutils.ReturnSuccessReponse(http.StatusOK, msg, users, res)\n}",
"func (u *UserCtr) GetUserAll(c *gin.Context) {\n\tusers, err := model.UserAll(u.DB)\n\tif err != nil {\n\t\tresp := errors.New(err.Error())\n\t\tc.JSON(http.StatusInternalServerError, resp)\n\t\treturn\n\t}\n\n\tif len(users) == 0 {\n\t\tc.JSON(http.StatusOK, make([]*model.User, 0))\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"result\": users,\n\t})\n\treturn\n}",
"func (uc UserController) getUsers(response http.ResponseWriter, request *http.Request, p httprouter.Params) {\n\tresponse.Header().Add(\"content-type\", \"application/json\")\n\tvar UserArray []Users\n\tctx, _ := context.WithTimeout(context.Background(), 10*time.Second)\n\tcursor, err := uc.collection.Find(ctx, bson.M{})\n\tif err != nil {\n\t\tresponse.WriteHeader(http.StatusInternalServerError)\n\t\tresponse.Write([]byte(`{\"message: \"` + err.Error() + `\"}\"`))\n\t\treturn\n\t}\n\tdefer cursor.Close(ctx)\n\n\tfor cursor.Next(ctx) {\n\t\tvar user Users\n\t\tcursor.Decode(&user)\n\t\tUserArray = append(UserArray, user)\n\t}\n\n\tif err := cursor.Err(); err != nil {\n\t\tresponse.WriteHeader(http.StatusInternalServerError)\n\t\tresponse.Write([]byte(`{\"message: \"` + err.Error() + `\"}\"`))\n\t\treturn\n\t}\n\tjson.NewEncoder(response).Encode(UserArray)\n}",
"func (h *Handler) list() http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tentities, err := h.UserDAO.FetchAll(r.Context())\n\t\tswitch {\n\t\tcase errors.Is(err, errorx.ErrNoUser):\n\t\t\tmsg := &errorMessage{\n\t\t\t\tMessage: fmt.Sprintf(\"no users exist\"),\n\t\t\t}\n\t\t\tresponse.JSON(w, http.StatusNotFound, msg)\n\t\t\treturn\n\t\tcase err != nil:\n\t\t\tmsg := &errorMessage{\n\t\t\t\tError: err.Error(),\n\t\t\t\tMessage: \"user datastore error\",\n\t\t\t}\n\t\t\tresponse.JSON(w, http.StatusInternalServerError, msg)\n\t\t\treturn\n\t\tdefault:\n\t\t\tresponse.JSON(w, http.StatusOK, entities)\n\t\t}\n\t}\n}",
"func (s *UsersService) GetAll(limit, page int64) (*Users, error) {\n\tvar params = map[string]string{}\n\tparams[\"limit\"] = strconv.FormatInt(limit, 10)\n\tparams[\"page\"] = strconv.FormatInt(page, 10)\n\n\tvar data Users\n\terr := s.client.get(\"/users\", params, nil, &data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &data, err\n}",
"func (h *User) List(w http.ResponseWriter, r *http.Request) {\n\tlimit, offset := utils.GetPaginationParams(r.URL.Query())\n\tresp, err := h.Storage.GetUserList(limit, offset)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tR.JSON500(w)\n\t\treturn\n\t}\n\n\tif len(resp) < 1 {\n\t\tR.JSON404(w)\n\t\treturn\n\t}\n\n\tR.JSON200(w, resp)\n}",
"func GetUsers(writer http.ResponseWriter, r *http.Request) {\n\tusers := []models.AssocUser{}\n\n\tif !initDB {\n\t\tdb = utils.GetConnection()\n\t}\n\n\tsqlDB, err := db.DB()\n\tif err != nil {\n\t\tlog.Fatal(\"Error clossing the DB\")\n\t} else {\n\t\tdefer sqlDB.Close()\n\t}\n\tdb.Find(&users)\n\tjUsers, _ := json.Marshal(users)\n\tutils.SendResponse(writer, http.StatusOK, jUsers)\n}",
"func GetUserList(w http.ResponseWriter, r *http.Request) {\n\n\tusers, err := user.GetUserList(r)\n\n\tif err != nil {\n\t\thttpext.AbortAPI(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\thttpext.SuccessDataAPI(w, \"Ok\", users)\n}",
"func GetAllUser(w http.ResponseWriter, r *http.Request) {\n\temail, err := getEmailFromTokenHeader(r)\n\tif err != nil || email == \"\" {\n\t\thttp.Error(w, \"Invalid Token\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\tw.Header().Set(\"Context-Type\", \"application/x-www-form-urlencoded\")\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\t// get all the users in the db\n\tusers, err := database.GetAllUsers()\n\tif err != nil {\n\t\tlogrus.Errorf(\"Unable to get all user. %v\", err)\n\t\thttp.Error(w, \"\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\t// send all the users as response\n\terr = json.NewEncoder(w).Encode(&models.UserList{Users: users})\n\tif err != nil {\n\t\tlogrus.Errorf(err.Error())\n\t\treturn\n\t}\n}",
"func (a *App) GetAllUsers(w http.ResponseWriter, r *http.Request) {\n\tusers, err := models.GetAllUsers(a.DB)\n\tif err != nil {\n\t\tresponses.ERROR(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\tresponses.JSON(w, http.StatusOK, users)\n\treturn\n}",
"func GetAllUsers() (users []User, err error) {\n\trows, err := db.DbClient.Query(\"Select * from reg_users;\")\n\tif err != nil {\n\t\treturn users, err\n\t}\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tu := User{}\n\t\tif err := rows.Scan(&u.ID, &u.Name, &u.Password, &u.Email, &u.Registered, &u.Activated); err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn users, err\n\t\t}\n\t\tusers = append(users, u)\n\t}\n\treturn\n}",
"func (w *ServerInterfaceWrapper) GetUsers(ctx echo.Context) error {\n\tvar err error\n\n\tctx.Set(\"OAuth.Scopes\", []string{\"\"})\n\n\t// Parameter object where we will unmarshal all parameters from the context\n\tvar params GetUsersParams\n\t// ------------- Optional query parameter \"page_size\" -------------\n\n\terr = runtime.BindQueryParameter(\"form\", true, false, \"page_size\", ctx.QueryParams(), ¶ms.PageSize)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf(\"Invalid format for parameter page_size: %s\", err))\n\t}\n\n\t// ------------- Optional query parameter \"page_number\" -------------\n\n\terr = runtime.BindQueryParameter(\"form\", true, false, \"page_number\", ctx.QueryParams(), ¶ms.PageNumber)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf(\"Invalid format for parameter page_number: %s\", err))\n\t}\n\n\t// Invoke the callback with all the unmarshalled arguments\n\terr = w.Handler.GetUsers(ctx, params)\n\treturn err\n}",
"func (a *App) UsersGet(w http.ResponseWriter, r *http.Request) {\n\tusername := chi.URLParam(r, \"username\")\n\tctx := r.Context()\n\tuserID, ok := ctx.Value(middleware.UserCtxKeys(0)).(int64)\n\tquserID, err := a.validateUsernameAndGetID(username)\n\tif err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\tutils.RespondWithError(w, http.StatusNotFound, \"User not found\")\n\t\t\treturn\n\t\t}\n\t\tutils.RespondWithError(w, http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\tlog.Println(quserID)\n\tvar user *User\n\tif ok {\n\t\tuser, err = a.dbAuthenticatedGetUser(userID, quserID)\n\t} else {\n\t\tuser, err = a.dbGetUser(quserID)\n\t}\n\tif err != nil {\n\t\tutils.RespondWithError(w, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\tutils.RespondWithJSON(w, http.StatusOK, &user)\n}",
"func (a *API) listUsers(w http.ResponseWriter, req *http.Request) {\n\tvar (\n\t\tpath string\n\t\tusers cmap.ConcurrentMap\n\t\twg sync.WaitGroup\n\t)\n\t// Force reload policy to get the newest\n\t_ = a.policyEngine.LoadPolicy()\n\tif username := req.FormValue(\"name\"); username != \"\" {\n\t\tpath = common.Path(model.DefaultUsersPrefix, common.Hash(username, crypto.MD5))\n\t} else {\n\t\tpath = common.Path(model.DefaultUsersPrefix)\n\t}\n\tresp, err := a.etcdcli.DoGet(path, etcdv3.WithPrefix(),\n\t\tetcdv3.WithSort(etcdv3.SortByKey, etcdv3.SortAscend))\n\tif err != nil {\n\t\ta.respondError(w, apiError{\n\t\t\tcode: http.StatusInternalServerError,\n\t\t\terr: err,\n\t\t})\n\t\treturn\n\t}\n\tusers = cmap.New()\n\tfor _, ev := range resp.Kvs {\n\t\twg.Add(1)\n\t\tgo func(evv []byte) {\n\t\t\tdefer wg.Done()\n\t\t\tvar (\n\t\t\t\tu model.User\n\t\t\t\tp [][]string\n\t\t\t)\n\t\t\t_ = json.Unmarshal(evv, &u)\n\t\t\tp = a.policyEngine.GetFilteredPolicy(0, u.Username)\n\t\t\tfor i, v := range p {\n\t\t\t\t// The first element is username, so just remove it.\n\t\t\t\tp[i] = v[1:]\n\t\t\t}\n\t\t\tusers.Set(u.Username, p)\n\t\t}(ev.Value)\n\t}\n\twg.Wait()\n\ta.respondSuccess(w, http.StatusOK, users)\n}",
"func (c *Client) GetUsers(queryParams ...string) (map[string]interface{}, error) {\n\tlog.info(\"========== GET CLIENT USERS ==========\")\n\turl := buildURL(path[\"users\"])\n\n\treturn c.do(\"GET\", url, \"\", queryParams)\n}",
"func (uc UserController) GetAllUsers(c *doze.Context) doze.ResponseSender {\n\treturn doze.NewOKJSONResponse(users)\n}",
"func (mgr *UserMgr) GetUsers() []User {\n\tusers := []User{}\n\tmgr.db.Select(&users, \"SELECT username, email, password, role FROM users ORDER BY id desc\")\n\treturn users\n}",
"func GetUsers(w http.ResponseWriter, r *http.Request, db *gorp.DbMap, c *businesscontext.Ctx) error {\n\tusers, err := user.LoadUsers(db)\n\tif err != nil {\n\t\treturn sdk.WrapError(err, \"GetUsers: Cannot load user from db\")\n\t}\n\treturn WriteJSON(w, r, users, http.StatusOK)\n}",
"func (u UserController) GetUsers(w http.ResponseWriter, r *http.Request, p httprouter.Params) {\n\tusers, err := u.userRepository.GetAll()\n\tif err != nil {\n\t\thttp.Error(w, \"service unavailable\", http.StatusServiceUnavailable)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tjson.NewEncoder(w).Encode(users)\n}",
"func (u *UserHandler) GetAllUsers(c *gin.Context) {\n\tusers, err := (*u.UserService).GetAll()\n\tif err != nil {\n\t\t_ = c.Error(err).SetType(gin.ErrorTypePublic)\n\t\treturn\n\t}\n\n\tvar userList []*dto.User\n\tfor _, user := range users {\n\t\tuserList = append(userList, adapter.UserDomainToDTO(user))\n\t}\n\n\tc.JSON(http.StatusOK, userList)\n}",
"func (serv *AppServer) GetUsers() []*User {\n\tret := []*User{}\n\tlines := strings.Split(serv.ServerRequest([]string{\"GetUsers\"}), \"[|]\")\n\tfor _, line := range lines[:len(lines)-1] {\n\t\tret = append(ret, ParseUser(line))\n\t}\n\treturn ret\n}",
"func (cli *OpsGenieUserV2Client) List(req userv2.ListUsersRequest) (*userv2.ListUsersResponse, error) {\n\tvar response userv2.ListUsersResponse\n\terr := cli.sendGetRequest(&req, &response)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &response, nil\n}",
"func GetAllUsers(w http.ResponseWriter, req *http.Request) {\n\trs, err := db.GetAll()\n\tif err != nil {\n\t\thandleError(err, \"Failed to load database Users: %v\", w)\n\t\treturn\n\t}\n\n\tbs, err := json.Marshal(rs)\n\tif err != nil {\n\t\thandleError(err, \"Failed to load marshal data: %v\", w)\n\t\treturn\n\t}\n\n\tw.Write(bs)\n}",
"func (u *User) List(ctx context.Context, w http.ResponseWriter, r *http.Request, params map[string]string) error {\n\tctx, span := trace.StartSpan(ctx, \"handlers.User.List\")\n\tdefer span.End()\n\n\tusers, err := user.List(ctx, u.db)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn web.Respond(ctx, w, users, http.StatusOK)\n}",
"func GetAllUsers(dbmap *gorp.DbMap) func(w http.ResponseWriter, r *http.Request) {\n\n return usersHandler(nil, func(r *http.Request) *[]models.User {\n var users []models.User\n _, dbError := dbmap.Select(&users, \"select * from \\\"user\\\"\")\n if dbError != nil {\n log.Print(dbError)\n }\n\n return &users\n })\n\n}",
"func GetAllUsers(context *fiber.Ctx) error {\n\tvar users = repository.GetAllUsers()\n\n\tif users == nil {\n\t\tlog.Printf(\"database is empty\")\n\t\treturn context.Status(404).JSON(&fiber.Map{\"response\": \"not found\"})\n\t} else {\n\t\treturn context.Status(200).JSON(users)\n\t}\n}",
"func (uc UserController) GetAllUsers(c rest.Context) rest.ResponseSender {\n\treturn rest.NewOKJSONResponse(users)\n}",
"func (s *userService) GetAll(ctx context.Context, req *pb.Request) (rsp *pb.Response, err error) {\n\tif req.PageNum == 0 || req.PageSize == 0 {\n\t\treq.PageNum = 1\n\t\treq.PageSize = 10\n\t}\n\tusers, err := s.dao.GetAllUsers(ctx, req)\n\tif err != nil {\n\t\treturn\n\t}\n\trsp = &pb.Response{\n\t\tUsers: users,\n\t}\n\treturn\n}",
"func (UserService) List(ctx context.Context, gdto dto.GeneralListDto) ([]model.User, int64) {\n\tcols := \"*\"\n\tgdto.Q, cols = dataPermService.DataPermFilter(ctx, \"users\", gdto)\n\treturn userDao.List(gdto, cols)\n}",
"func GetUsers(db *sql.DB) ([]models.UserResponse, error) {\n\n\trows, err := db.Query(\"SELECT id, username, email, createdAt FROM users\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpersons := make([]models.UserResponse, 0)\n\n\tfor rows.Next() {\n\t\tvar id int\n\t\tvar username string\n\t\tvar email string\n\t\tvar createdAt time.Time\n\t\terr = rows.Scan(&id, &username, &email, &createdAt)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpersons = append(persons, models.UserResponse{ID: id, Username: username, CreatedAt: createdAt})\n\t}\n\treturn persons, nil\n}",
"func userIndex(w http.ResponseWriter, r *http.Request) {\n w.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tdb := co.DbConnection(dbc)\n\t// DB query to get all the users\n\tresults, err := db.Query(\"SELECT user_name FROM members\")\n\tif err != nil {\n\t\tpanic(err.Error()) // proper error handling instead of panic in your app\n\t}\n\tvar members []string\n\tfor results.Next() {\n\t\tvar name string\n\t\terr = results.Scan(&name)\n\t\tif err != nil {\n\t\t\tpanic(err.Error()) // proper error handling instead of panic in your app\n\t\t}\n\t\tmembers = append(members, name)\n\t}\n\tresults.Close()\n\tdb.Close()\n\tjsonPrint(w, members)\n return\n}",
"func allUsers(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tvar users []User\n\tdb.Scopes(Paginate(r)).Find(&users)\n\terr := json.NewEncoder(w).Encode(users)\n\tlog.ErrorHandler(err)\n\tlog.AccessHandler(r, 200)\n\treturn\n}",
"func (u *usecase) GetAll(ctx context.Context) ([]*User, error) {\n\tusers, err := u.repository.GetAll(ctx)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error fetching all users\")\n\t}\n\treturn users, nil\n}",
"func GetUsers(c router.Context) (interface{}, error) {\n\t// get the data from the request and parse it as structure\n\tdata := c.Param(`data`).(UserId)\n\n\t// Validate the inputed data\n\terr := data.Validate()\n\tif err != nil {\n\t\tif _, ok := err.(validation.InternalError); ok {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, status.ErrStatusUnprocessableEntity.WithValidationError(err.(validation.Errors))\n\t}\n\tstub := c.Stub()\n\tqueryString := fmt.Sprintf(\"{\\\"selector\\\":{\\\"_id\\\":{\\\"$ne\\\":\\\"%s\\\"},\\\"doc_type\\\":\\\"%s\\\"}}\", data.ID, utils.DocTypeUser)\n\tresultsIterator, err := stub.GetQueryResult(queryString)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn nil, status.ErrInternal.WithError(err)\n\t}\n\n\tdefer resultsIterator.Close()\n\n\t// buffer is a JSON array containing QueryResults\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"{\")\n\tbuffer.WriteString(\"\\\"users\\\": [\")\n\taArrayMemberAlreadyWritten := false\n\tfor resultsIterator.HasNext() {\n\t\tqueryResponse, err2 := resultsIterator.Next()\n\t\tif err2 != nil {\n\t\t\treturn nil, status.ErrInternal.WithError(err2)\n\t\t}\n\n\t\t// Add a comma before array members, suppress it for the first array member\n\t\tif aArrayMemberAlreadyWritten == true {\n\t\t\tbuffer.WriteString(\",\")\n\t\t}\n\t\tuserData := UserResponse{}\n\t\terr3 := json.Unmarshal(queryResponse.Value, &userData)\n\t\tif err3 != nil {\n\t\t\treturn nil, status.ErrInternal.WithError(err3)\n\t\t}\n\n\t\tuserData.ID = queryResponse.Key\n\t\tuserDataBytes, _ := json.Marshal(userData)\n\n\t\tbuffer.WriteString(string(userDataBytes))\n\t\taArrayMemberAlreadyWritten = true\n\t}\n\tbuffer.WriteString(\"]}\")\n\n\t//return the response\n\treturn buffer.Bytes(), nil\n}",
"func (c *EcomClient) GetUsers(ctx context.Context) ([]*UserResponse, error) {\n\turi := c.endpoint + \"/users\"\n\tres, err := c.request(http.MethodGet, uri, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"request failed: %w\", err)\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode >= 400 {\n\t\treturn nil, fmt.Errorf(\"%s: %w\", res.Status, err)\n\t}\n\n\tvar userContainer UserContainer\n\tif err := json.NewDecoder(res.Body).Decode(&userContainer); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"json decode url=%q\", uri)\n\t}\n\treturn userContainer.Data, nil\n}",
"func UserListAll(w http.ResponseWriter, r *http.Request) {\n\n\tvar err error\n\tvar pageSize int\n\tvar paginatedUsers auth.PaginatedUsers\n\n\t// Init output\n\toutput := []byte(\"\")\n\n\t// Add content type header to the response\n\tcontentType := \"application/json\"\n\tcharset := \"utf-8\"\n\tw.Header().Add(\"Content-Type\", fmt.Sprintf(\"%s; charset=%s\", contentType, charset))\n\n\t// Grab context references\n\trefStr := gorillaContext.Get(r, \"str\").(stores.Store)\n\trefRoles := gorillaContext.Get(r, \"auth_roles\").([]string)\n\n\t// Grab url path variables\n\turlValues := r.URL.Query()\n\tpageToken := urlValues.Get(\"pageToken\")\n\tstrPageSize := urlValues.Get(\"pageSize\")\n\tprojectName := urlValues.Get(\"project\")\n\tprojectUUID := \"\"\n\n\tif projectName != \"\" {\n\t\tprojectUUID = projects.GetUUIDByName(projectName, refStr)\n\t\tif projectUUID == \"\" {\n\t\t\terr := APIErrorNotFound(\"ProjectUUID\")\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif strPageSize != \"\" {\n\t\tif pageSize, err = strconv.Atoi(strPageSize); err != nil {\n\t\t\tlog.Errorf(\"Pagesize %v produced an error while being converted to int: %v\", strPageSize, err.Error())\n\t\t\terr := APIErrorInvalidData(\"Invalid page size\")\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// check that user is indeed a service admin in order to be priviledged to see full user info\n\tpriviledged := auth.IsServiceAdmin(refRoles)\n\n\t// Get Results Object - call is always priviledged because this handler is only accessible by service admins\n\tif paginatedUsers, err = auth.PaginatedFindUsers(pageToken, int32(pageSize), projectUUID, priviledged, refStr); err != nil {\n\t\terr := APIErrorInvalidData(\"Invalid page token\")\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// Output result to JSON\n\tresJSON, err := paginatedUsers.ExportJSON()\n\n\tif err != nil {\n\t\terr := APIErrExportJSON()\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// Write response\n\toutput = []byte(resJSON)\n\trespondOK(w, output)\n\n}"
] | [
"0.77413064",
"0.76049715",
"0.75767225",
"0.7560748",
"0.7475642",
"0.74546",
"0.74237984",
"0.73957825",
"0.7393577",
"0.73627543",
"0.73378384",
"0.72897553",
"0.72844577",
"0.7261622",
"0.72614956",
"0.7254002",
"0.72333294",
"0.7209193",
"0.7197647",
"0.71920323",
"0.713491",
"0.71262527",
"0.7123355",
"0.70965743",
"0.7064692",
"0.70629674",
"0.7050243",
"0.70381474",
"0.70371675",
"0.70284104",
"0.70257306",
"0.7024374",
"0.70189476",
"0.7017125",
"0.7014344",
"0.7003844",
"0.70016146",
"0.6999788",
"0.6992649",
"0.6992396",
"0.6986773",
"0.6981682",
"0.6966427",
"0.6964424",
"0.6956028",
"0.695575",
"0.69538933",
"0.69463867",
"0.69463235",
"0.6946047",
"0.6942803",
"0.6935161",
"0.69258964",
"0.6923909",
"0.6922941",
"0.6906425",
"0.69058704",
"0.6898161",
"0.6888485",
"0.6885382",
"0.6875799",
"0.6873976",
"0.6869259",
"0.68668354",
"0.686286",
"0.68512875",
"0.68381137",
"0.68295324",
"0.68294466",
"0.68245655",
"0.68191177",
"0.68157667",
"0.6813053",
"0.68035334",
"0.6796124",
"0.6789523",
"0.67876416",
"0.6784302",
"0.6782266",
"0.67685735",
"0.676751",
"0.67673934",
"0.6744833",
"0.673032",
"0.6728018",
"0.67265326",
"0.67237747",
"0.6716131",
"0.6714956",
"0.67139983",
"0.6712402",
"0.67074966",
"0.6707187",
"0.6688783",
"0.66762954",
"0.66694957",
"0.66659707",
"0.6655547",
"0.66455907",
"0.6630085"
] | 0.77894056 | 0 |
PostUserHandler crea un usuario en la base de datos | func PostUserHandler(w http.ResponseWriter, r *http.Request) {
var user User
err := json.NewDecoder(r.Body).Decode(&user)
if err != nil {
panic(err)
}
user.CreateAt = time.Now()
id++
k := strconv.Itoa(id)
Listusers[k] = user
w.Header().Set("Content-Type", "application/json")
j, err := json.Marshal(user)
if err != nil {
panic(err)
}
w.WriteHeader(http.StatusCreated)
w.Write(j)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func PostUserHandler(w http.ResponseWriter, r *http.Request) {\n\tvar user models.User\n\terr := json.NewDecoder(r.Body).Decode(&user)\n\tif err != nil {\n\t\tlog.Println(\"Error al parsear usuario\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tstatus := users.CreateUser(user)\n\tw.WriteHeader(status)\n}",
"func (a *api) h_POST_users(c *gin.Context) {\n\tusr := &User{}\n\tif a.errorResponse(c, bindAppJson(c, usr)) {\n\t\treturn\n\t}\n\ta.logger.Info(\"Creating new user \", usr)\n\tmu := a.user2muser(usr)\n\tif a.errorResponse(c, a.Dc.CreateUser(mu)) {\n\t\treturn\n\t}\n\n\tif usr.Password != nil {\n\t\tif err := a.Dc.SetUserPasswd(usr.Login, ptr2string(usr.Password, \"\")); err != nil {\n\t\t\ta.logger.Warn(\"Could not set user password for new user \", usr.Login, \", err=\", err, \". Will leave it intact\")\n\t\t}\n\t}\n\n\tw := c.Writer\n\turi := composeURI(c.Request, usr.Login)\n\tw.Header().Set(\"Location\", uri)\n\tc.Status(http.StatusCreated)\n}",
"func (h *UserHandler) handlePostUser(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n //Decode request\n var req postUserRequest\n if err := json.NewDecoder(r.Body).Decode(&req.User); err != nil {\n Error(w, ErrInvalidJSON, http.StatusBadRequest, h.Logger)\n return\n }\n u := req.User\n\n //create a new user\n err := h.UserService.CreateUser(u)\n if err != nil {\n Error(w, err, http.StatusBadRequest, h.Logger)\n }\n w.Header().Set(\"Content-Type\", \"application/json\")\n json.NewEncoder(w).Encode(&postUserResponse{User: u})\n}",
"func PostUser(w http.ResponseWriter, req *http.Request, app *App) {\n\tif models.UserCount(app.Db) == 0 {\n\t\temail, password := req.FormValue(\"email\"), req.FormValue(\"password\")\n\t\tuser := models.NewUser(email, password)\n\t\terr := user.Save(app.Db)\n\t\tif err != nil {\n\t\t\thttp.Redirect(w, req, app.Config.General.Prefix+\"/register\", http.StatusFound)\n\t\t} else {\n\t\t\thttp.Redirect(w, req, app.Config.General.Prefix+\"/login\", http.StatusFound)\n\t\t}\n\t}\n}",
"func (auh *AdminUserHandler) PostUser(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tvar apiKey = r.Header.Get(\"api-key\")\n\tif apiKey == \"\" || (apiKey != adminApiKey && apiKey != userApiKey) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)\n\t\treturn\n\t}\n\tl := r.ContentLength\n\tbody := make([]byte, l)\n\tr.Body.Read(body)\n\n\tuser := &entity.User{}\n\n\terr := json.Unmarshal(body, user)\n\n\tif err != nil {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\n\tuser, errs := auh.userService.StoreUser(user)\n\n\tif len(errs) > 0 {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\toutput, err := json.MarshalIndent(user, \"\", \"\\t\")\n\tw.WriteHeader(http.StatusCreated)\n\tw.Write(output)\n\treturn\n}",
"func CreateUserHandler(w http.ResponseWriter, req *http.Request) {\n // Validate internal token.\n if internalToken := req.Header.Get(app.Config.AuthHeaderName); internalToken != app.Config.RestApiToken {\n respond.Error(w, errmsg.Unauthorized())\n return\n }\n\n // Parse & validate payload.\n var pl payload.CreateUserPayload\n\n if !pl.Validate(req) {\n respond.Error(w, errmsg.InvalidPayload())\n return\n }\n\n // Check if the executor is using the USER_CREATION_HASH to create this user.\n usingUserCreationPw := pl.ExecutorEmail == \"\" && app.Config.UserCreationHash != \"\" &&\n crypt.VerifySha256(pl.ExecutorPassword, app.Config.UserCreationHash)\n\n // If not using USER_CREATION_HASH for auth, verify executor exists using email/pw.\n if !usingUserCreationPw {\n // Get executor user by email.\n executorUser, err := usersvc.FromEmail(pl.ExecutorEmail)\n\n if err != nil {\n app.Log.Errorln(err.Error())\n respond.Error(w, errmsg.UserNotFound())\n return\n }\n\n // Ensure executor user's password is correct.\n if !crypt.VerifyBcrypt(pl.ExecutorPassword, executorUser.HashedPw) {\n app.Log.Errorln(\"error creating new User: invalid executor user password\")\n respond.Error(w, errmsg.Unauthorized())\n return\n }\n\n // Only admin users can create other users.\n if !executorUser.Admin {\n app.Log.Errorln(\"error creating new User: executor user must be an admin\")\n respond.Error(w, errmsg.Unauthorized())\n return\n }\n }\n\n // Hash provided user password.\n hashedPw, err := crypt.BcryptHash(pl.NewPassword)\n\n if err != nil {\n app.Log.Errorf(\"error creating new User: bcrypt password hash failed with %s\\n\", err.Error())\n respond.Error(w, errmsg.ISE())\n return\n }\n\n // Create new User.\n newUser, err := usersvc.Create(pl.NewEmail, hashedPw, pl.Admin)\n\n if err != nil {\n app.Log.Errorln(err.Error())\n pqError, ok := err.(*pq.Error)\n\n if ok && pqError.Code.Name() == \"unique_violation\" {\n respond.Error(w, errmsg.EmailNotAvailable())\n } else {\n respond.Error(w, errmsg.UserCreationFailed())\n }\n\n return\n }\n\n // Create response payload and respond.\n respData := successmsg.UserCreationSuccess\n respData[\"uid\"] = newUser.Uid\n\n respond.Created(w, respData)\n}",
"func CreateUserHandler(connection *sql.DB, cnf config.Config) negroni.HandlerFunc {\n\treturn negroni.HandlerFunc(func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\t\tuser := &models.UserCreate{}\n\t\terr := util.RequestToJSON(r, user)\n\t\tif err != nil {\n\t\t\tutil.SendBadRequest(w, errors.New(\"Bad json\"))\n\t\t\treturn\n\t\t}\n\n\t\tif err := user.Validate(); err == nil {\n\t\t\tif err := user.ValidatePassword(); err == nil {\n\n\t\t\t\thash, _ := bcrypt.GenerateFromPassword([]byte(user.Password), 10)\n\t\t\t\tuser.Hash = string(hash)\n\n\t\t\t\tcreatedID, err := db.CreateUser(connection, user)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tutil.SendBadRequest(w, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tcreatedUser, err := db.GetUserByID(connection, createdID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tutil.SendBadRequest(w, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// create JWT object with claims\n\t\t\t\texpiration := time.Now().Add(time.Hour * 24 * 31).Unix()\n\t\t\t\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\t\t\t\"sub\": createdUser.ID,\n\t\t\t\t\t\"iat\": time.Now().Unix(),\n\t\t\t\t\t\"exp\": expiration,\n\t\t\t\t})\n\n\t\t\t\t// Load secret key from config and generate a signed token\n\t\t\t\tsecretKey := cnf.SecretKey\n\t\t\t\ttokenString, err := token.SignedString([]byte(secretKey))\n\t\t\t\tif err != nil {\n\t\t\t\t\tutil.SendError(w, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\ttype Token struct {\n\t\t\t\t\tToken string `json:\"token\"`\n\t\t\t\t\tExpiresOn string `json:\"expires_on\"`\n\t\t\t\t\tUser *models.UserResponse `json:\"user\"`\n\t\t\t\t}\n\n\t\t\t\tutil.SendOK(w, &Token{\n\t\t\t\t\tToken: tokenString,\n\t\t\t\t\tExpiresOn: strconv.Itoa(int(expiration)),\n\t\t\t\t\tUser: &createdUser,\n\t\t\t\t})\n\n\t\t\t} else {\n\t\t\t\tutil.SendBadRequest(w, err)\n\t\t\t}\n\t\t} else {\n\t\t\tutil.SendBadRequest(w, err)\n\t\t}\n\t})\n}",
"func (h *userHandler) createUser(ctx context.Context, rw http.ResponseWriter, r *http.Request) {\n\n\tvar user = &model.User{}\n\n\terr := json.NewDecoder(r.Body).Decode(user)\n\tif err != nil {\n\n\t\th.serv.writeResponse(ctx, rw, err.Error(), http.StatusBadRequest, nil)\n\n\t\treturn\n\n\t}\n\n\tif user.Login == \"\" || user.Password == \"\" {\n\n\t\th.serv.writeResponse(ctx, rw, \"Login or password are empty\", http.StatusBadRequest, nil)\n\n\t\treturn\n\t}\n\n\terr = h.registerUser(ctx, user)\n\tif err != nil {\n\n\t\th.serv.writeResponse(ctx, rw, err.Error(), http.StatusBadRequest, nil)\n\n\t\treturn\n\t}\n\n\th.serv.writeResponse(ctx, rw, \"user was created: \"+user.Login, http.StatusCreated, user)\n\n}",
"func (e *env) UserSignupPostHandler(w http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\tcase \"GET\":\n\tcase \"POST\":\n\t\tusername := r.FormValue(\"username\")\n\t\tpassword := r.FormValue(\"password\")\n\n\t\terr := e.authState.NewUser(username, password)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error adding user:\", err)\n\t\t\te.authState.SetFlash(\"Error adding user. Check logs.\", r)\n\t\t\thttp.Redirect(w, r, r.Referer(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\t// Login the recently added user\n\t\tif e.authState.Auth(username, password) {\n\t\t\te.authState.Login(username, r)\n\t\t}\n\n\t\te.authState.SetFlash(\"Successfully added '\"+username+\"' user.\", r)\n\t\thttp.Redirect(w, r, \"/\", http.StatusSeeOther)\n\n\tcase \"PUT\":\n\t\t// Update an existing record.\n\tcase \"DELETE\":\n\t\t// Remove the record.\n\tdefault:\n\t\t// Give an error message.\n\t}\n}",
"func CreateUserHandler(w http.ResponseWriter, r *http.Request) {\n\n\tuser := &models.User{}\n\terr := json.NewDecoder(r.Body).Decode(user) //decode the request body into struct and fail if any error occur\n\tif err != nil {\n\t\tfmt.Println(\"Debug user CreateUserHandler:\", err)\n\t\tutils.Respond(w, utils.Message(false, \"Invalid request\"))\n\t\treturn\n\t}\n\n\tresp := user.Create() //Create user\n\tutils.Respond(w, resp)\n}",
"func (handler *Handler) handleUserCreate(w http.ResponseWriter, r *http.Request) {\n\n\t//Create an empty new user\n\tnewUser := handler.userHelper.NewEmptyUser()\n\n\t/**\n\tDefine a struct for just updating password\n\t*/\n\ttype newUserStruct struct {\n\t\tEmail string `json:\"email\"`\n\t\tPassword string `json:\"password\"`\n\t}\n\n\t//Create the new user\n\tnewUserInfo := &newUserStruct{}\n\n\t//decode the request body into struct and failed if any error occur\n\terr := json.NewDecoder(r.Body).Decode(newUserInfo)\n\tif err != nil {\n\t\tutils.ReturnJsonStatus(w, http.StatusUnprocessableEntity, false, err.Error())\n\t\treturn\n\n\t}\n\n\t//Copy over the new user data\n\tnewUser.SetEmail(newUserInfo.Email)\n\tnewUser.SetPassword(newUserInfo.Password)\n\n\t//Now create the new suer\n\terr = handler.userHelper.createUser(newUser)\n\n\tif err != nil {\n\t\tutils.ReturnJsonStatus(w, http.StatusUnprocessableEntity, false, err.Error())\n\t\treturn\n\t}\n\n\t//Check to see if the user was created\n\tif err == nil {\n\t\tutils.ReturnJsonStatus(w, http.StatusCreated, true, \"create_user_added\")\n\t} else {\n\t\tutils.ReturnJsonStatus(w, http.StatusUnprocessableEntity, false, err.Error())\n\t}\n\n}",
"func (h *Handler) PostUser(w http.ResponseWriter, r *http.Request) {\n\tdecoder := json.NewDecoder(r.Body)\n\n\tvar newUser User\n\tif err := decoder.Decode(&newUser); err != nil {\n\t\tif err.Error() == \"EOF\" {\n\t\t\thttp.Error(w, \"Empty user request\", 400)\n\t\t\treturn\n\t\t}\n\t\th.Logger.Errorf(\"err decoding user request: %s\", err)\n\t\thttp.Error(w, http.StatusText(500), 500)\n\t\treturn\n\t}\n\n\tif newUser.Email == \"\" || newUser.Password == \"\" {\n\t\thttp.Error(w, \"Invalid email or password\", 400)\n\t\treturn\n\t}\n\n\t// Hash password\n\tbytes, err := bcrypt.GenerateFromPassword([]byte(newUser.Password), bcrypt.DefaultCost)\n\tif err != nil {\n\t\th.Logger.Errorf(\"err hashing password: %s\", err)\n\t}\n\tnewUser.Password = string(bytes)\n\n\t_, err = h.Collection.InsertOne(context.TODO(), newUser)\n\tif err != nil {\n\t\tif strings.Contains(err.Error(), \"duplicate key error\") {\n\t\t\thttp.Error(w, \"User already exists\", 400)\n\t\t\treturn\n\t\t}\n\t\th.Logger.Errorf(\"error creating user: %s\", err)\n\t\thttp.Error(w, http.StatusText(500), 500)\n\t\treturn\n\t}\n\n\tnewUser.Password = \"\"\n\n\trender.JSON(w, r, newUser)\n}",
"func UserCreate(w http.ResponseWriter, r *http.Request) {\n\terr := json.NewDecoder(r.Body).Decode(&user)\n\tif err != nil {\n\t\tm.Message = fmt.Sprintf(\"Error al leer el usuario a registrarse: %s\", err)\n\t\tm.Code = http.StatusBadRequest\n\t\tcommons.DisplayMessage(w, m)\n\t\treturn\n\t}\n\tif user.Password != user.ConfirmPassword {\n\t\tm.Message = \"Las contraseña no coinciden\"\n\t\tm.Code = http.StatusBadRequest\n\t\tcommons.DisplayMessage(w, m)\n\t\treturn\n\t}\n\tuser.Password = password\n\tavatarmd5 := md5.Sum([]byte(user.Password))\n\tavatarstr := fmt.Sprintf(\"%x\", avatarmd5)\n\tuser.Avatar = \"https://gravatar.com/avatar/\" + avatarstr + \"?s=100\"\n\tdatabase := configuration.GetConnection()\n\tdefer database.Close()\n\terr = database.Create(&user).Error\n\tif err != nil {\n\t\tm.Message = fmt.Sprintf(\"Error al crear el registro: %s\", err)\n\t\tm.Code = http.StatusBadRequest\n\t\tcommons.DisplayMessage(w, m)\n\t\treturn\n\t}\n\tm.Message = \"Usuario creado con éxito\"\n\tm.Code = http.StatusCreated\n\tcommons.DisplayMessage(w, m)\n}",
"func (uh UserHandler) CreateUser(w http.ResponseWriter, r *http.Request) {\n\tlog.Print(utils.InfoLog + \"UserHandler:CreateUser called\")\n\tvar newUser models.User\n\n\treqBody, genErr := ioutil.ReadAll(r.Body); if genErr != nil {\n\t\tutils.ReturnWithError(w, http.StatusBadRequest, http.StatusText(http.StatusBadRequest), genErr.Error())\n\t\tlog.Println(utils.ErrorLog + \"Unable to read request body\")\n\t\treturn\n\t}\n\n\tjson.Unmarshal(reqBody, &newUser)\n\t_, genErr = valid.ValidateStruct(&newUser) ; if genErr != nil {\n\t\tutils.ReturnWithError(w, http.StatusBadRequest, http.StatusText(http.StatusBadRequest), genErr.Error())\n\t\tlog.Println(utils.ErrorLog + \"Request body data invalid\")\n\t\treturn\n\t}\n\terr := models.ValidateUser(&newUser); if err != nil {\n\t\tutils.ReturnWithErrorLong(w, *err)\n\t\tlog.Println(utils.ErrorLog + \"Request body data invalid\") // TODO ??\n\t\treturn\n\t}\n\n\terr = uh.UserManager.CreateUser(&newUser); if err != nil {\n\t\tutils.ReturnWithErrorLong(w, *err)\n\t\tlog.Println(utils.ErrorLog + \"Insert body here\") // TODO ??\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusCreated)\n\tjson.NewEncoder(w).Encode(newUser)\n}",
"func PostUser(w http.ResponseWriter, req *http.Request) {\n\tID := req.FormValue(\"id\")\n\tnameStr := req.FormValue(\"name\")\n\tname := string(nameStr)\n\n\tuser := db.User{ID: ID, Name: name}\n\n\tdb.Save(user)\n\n\tw.Write([]byte(\"OK\"))\n}",
"func (a *API) userSignupPostHandler(w http.ResponseWriter, r *http.Request) {\n\t// Validate user input\n\tvar u model.User\n\terr := json.NewDecoder(r.Body).Decode(&u)\n\tif err != nil {\n\t\tresponse.Errorf(w, r, err, http.StatusInternalServerError, \"Internal Server Error\")\n\t\treturn\n\t} else if u.Name == \"\" {\n\t\tresponse.Errorf(w, r, nil, http.StatusBadRequest, \"Name is missing\")\n\t\treturn\n\t} else if u.Lastname == \"\" {\n\t\tresponse.Errorf(w, r, nil, http.StatusBadRequest, \"Lastname is missing\")\n\t\treturn\n\t} else if u.Email == \"\" {\n\t\tresponse.Errorf(w, r, nil, http.StatusBadRequest, \"Email address is missing\")\n\t\treturn\n\t} else if u.Password == \"\" {\n\t\tresponse.Errorf(w, r, nil, http.StatusBadRequest, \"Password is missing\")\n\t\treturn\n\t}\n\t// Always use the lower case email address\n\tu.Email = strings.ToLower(u.Email)\n\n\t// Create jwt token\n\tu.Token, err = a.createJWT(jwt.MapClaims{\n\t\t\"email\": u.Email,\n\t\t\"name\": u.Name,\n\t\t\"lastname\": u.Lastname,\n\t\t\"password\": u.Password,\n\t\t\"exp\": time.Now().Add(time.Hour * 24 * 7).Unix(),\n\t})\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n\t// Hash the user password\n\terr = u.HashPassword()\n\tif err != nil {\n\t\tresponse.Errorf(w, r, err, http.StatusInternalServerError, \"Internal Server Error\")\n\t\treturn\n\t}\n\t// Save user to database\n\tInsertedUserID, err := a.db.CreateUser(&u)\n\tif err != nil {\n\t\tif err.Error() == \"email_address_already_exists\" {\n\t\t\tresponse.Errorf(w, r, err, http.StatusBadRequest, \"Email address already exists\")\n\t\t\treturn\n\t\t}\n\t\tresponse.Errorf(w, r, err, http.StatusInternalServerError, \"Internal Server Error\")\n\t\treturn\n\t}\n\treturnUser := model.ViewUser{\n\t\tID: InsertedUserID,\n\t\tName: u.Name,\n\t\tLastname: u.Lastname,\n\t\tEmail: u.Email,\n\t\tPassword: u.Password,\n\t\tToken: u.Token,\n\t}\n\t//\thttp.SetCookie(w, &http.Cookie{\n\t//\t\tName: \"token\",\n\t//\t\tValue: u.Token,\n\t//\t\tPath: \"/\",\n\t//\t})\n\n\t// Omit password\n\t//\tu.ID, err = primitive.ObjectIDFromHex(InsertedUserID)\n\t//\tif err != nil {\n\t//\t\tresponse.Errorf(w, r, err, http.StatusInternalServerError, \"Internal Server Error\")\n\t//\t\treturn\n\t//\t}\n\t//\tu.Password = \"\"\n\tresponse.Write(w, r, returnUser)\n}",
"func createHandler(w http.ResponseWriter, r *http.Request) {\n\tusername := r.FormValue(\"user\")\n\tpass := r.FormValue(\"pass\")\n\tuser := User{}\n\terr := userDB.Find(bson.M{\"username\": username}).One(&user)\n\n\tif user.UserName == username {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Fprintln(w, \"User already exists\")\n\t\treturn\n\t}\n\n\tif len(pass) < 8 {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Fprintln(w, \"Password must contain at least 8 characters\")\n\t\treturn\n\t}\n\n\thash, _ := bcrypt.GenerateFromPassword([]byte(pass), 10)\n\tuser.UserName = username\n\tuser.Hash = hash\n\terr = userDB.Insert(&user)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Fprintln(w, \"Error creating user.\")\n\t\treturn\n\t}\n\n\tr.URL.Path = \"/login\"\n}",
"func PostUser(w http.ResponseWriter, r *http.Request) {\n\tvar user models.User\n\tjson.NewDecoder(r.Body).Decode(&user)\n\tmodels.CreateUser(user)\n}",
"func (h Handler) CreateUser(w http.ResponseWriter, r *http.Request) {\n\n\tbuf := new(bytes.Buffer)\n\tbuf.ReadFrom(r.Body)\n\tdefer r.Body.Close()\n\n\tvar usersRequestDTO model.UserRequestDTO\n\n\t//Unmarshall body\n\n\tvar err error\n\tif err = json.Unmarshal(buf.Bytes(), &usersRequestDTO); err != nil {\n\t\thttp.Error(w, \"Invalid body\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t//Validate dto\n\n\tif !users.ValidateUsersRequestDTO(usersRequestDTO) {\n\t\thttp.Error(w, \"Invalid body\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t//Check If user exists\n\n\tvar found bool\n\tif _, found, err = h.Db.GetUser(usersRequestDTO.Username); found && err == nil {\n\t\thttp.Error(w, \"Username already exists\", http.StatusConflict)\n\t\treturn\n\t}\n\n\tif !found && err != nil {\n\t\thttp.Error(w, \"Error generating user\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t//Create User\n\n\tvar user model.User\n\tif user, err = users.CreateUser(usersRequestDTO.Username, usersRequestDTO.Password); err != nil {\n\t\thttp.Error(w, \"Error generating user\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t//Post user\n\n\tuser, err = h.Db.InsertUser(user)\n\tif err != nil {\n\t\thttp.Error(w, \"Error generating user\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif err := json.NewEncoder(w).Encode(model.UserResponseDTO{user.Userid}); err != nil {\n\t\thttp.Error(w, \"Write error\", http.StatusInternalServerError)\n\t}\n}",
"func createHandler(w http.ResponseWriter, r *http.Request) {\n\tuser := new(User)\n\tuser.Token = validateToken(r.FormValue(\"token\"))\n\tuser.PasswordHash = validatePassHash(r.FormValue(\"passHash\"))\n\tuser.PublicKey = validatePublicKey(r.FormValue(\"publicKey\"))\n\tuser.PublicHash = computePublicHash(user.PublicKey)\n\tuser.CipherPrivateKey = validateHex(r.FormValue(\"cipherPrivateKey\"))\n\n\tlog.Printf(\"Woot! New user %s %s\\n\", user.Token, user.PublicHash)\n\n\tif !SaveUser(user) {\n\t\thttp.Error(w, \"That username is taken\", http.StatusBadRequest)\n\t}\n}",
"func postRegistrationHandler(w http.ResponseWriter, r *http.Request, _ map[string]string) {\n\tif database.RetrieveUsersCount() == 0 { // TODO: Or check if authenticated user is admin when adding users from inside the admin area\n\t\tname := r.FormValue(\"name\")\n\t\temail := r.FormValue(\"email\")\n\t\tpassword := r.FormValue(\"password\")\n\t\tif name != \"\" && password != \"\" {\n\t\t\thashedPassword, err := authentication.EncryptPassword(password)\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tuser := structure.User{Name: []byte(name), Slug: slug.Generate(name, \"users\"), Email: []byte(email), Image: []byte(filenames.DefaultUserImageFilename), Cover: []byte(filenames.DefaultUserCoverFilename), Role: 4}\n\t\t\terr = methods.SaveUser(&user, hashedPassword, 1)\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\thttp.Redirect(w, r, \"/admin\", http.StatusFound)\n\t\t\treturn\n\t\t}\n\t\thttp.Redirect(w, r, \"/admin\", http.StatusFound)\n\t\treturn\n\t}\n\t// TODO: Handle creation of other users (not just the first one)\n\thttp.Error(w, \"Not implemented yet.\", http.StatusInternalServerError)\n\treturn\n}",
"func CreateUser(response http.ResponseWriter, request *http.Request) {\n\n\t\n\t\trequest.ParseForm()\n\t\tdecoder := json.NewDecoder(request.Body)\n\t\tvar newUser User\n\t\t\n\t\terr := decoder.Decode(&newUser)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\t\n newUser.Password=hashAndSalt([]byte(newUser.Password))\n\t\t\n\t\tinsertUser(newUser)\n\t\n}",
"func createUserHandler(res http.ResponseWriter, req *http.Request) {\n\tvar user MongoUserSchema\n\tjson.NewDecoder(req.Body).Decode(&user)\n\t// fmt.Println(hash(user.Password))\n\tif checkEmailValidity(user.Email) == false {\n\t\tres.WriteHeader(http.StatusInternalServerError)\n\t\tres.Write([]byte(\"Invalid e-mail id!\"))\n\t\treturn\n\t}\n\n\tusersCol := client.Database(\"Aviroop_Nandy_Appointy\").Collection(\"users\")\n\tctx, _ := context.WithTimeout(context.Background(), 15*time.Second)\n\tcursor, err := usersCol.Find(ctx, bson.M{})\n\n\tfor cursor.Next(ctx) {\n\t\tvar backlogUser MongoUserSchema\n\t\tcursor.Decode(&backlogUser)\n\t\tif backlogUser.Email == user.Email {\n\t\t\tres.WriteHeader(http.StatusInternalServerError)\n\t\t\tres.Write([]byte(`{\"This e-mail is already registered!\":\"` + err.Error() + `\"}`))\n\t\t\treturn\n\t\t}\n\t}\n\n\thashedPswd := hashPassword(user.Password)\n\tuser.Password = hashedPswd\n\n\tuserResult, insertErrorUser := usersCol.InsertOne(ctx, user)\n\tif insertErrorUser != nil {\n\t\tfmt.Println(\"Error while creating user: \", insertErrorUser)\n\t} else {\n\t\tjson.NewEncoder(res).Encode(userResult)\n\t\tuserID := userResult.InsertedID\n\t\tfmt.Println(\"New user id: \", userID)\n\t}\n\n\tres.Header().Add(\"content-type\", \"application/json\")\n\tres.WriteHeader(http.StatusOK)\n}",
"func NewUserHandler() *UserHandler {\n h := &UserHandler{\n Router: httprouter.New(),\n Logger: log.New(os.Stderr, \"\", log.LstdFlags),\n }\n h.POST(\"/api/user\", h.handlePostUser)\n return h\n}",
"func createUser(c *gin.Context) {\n password,_ := HashPassword(c.PostForm(\"password\"))\n\tuser := user{Login: c.PostForm(\"login\"), Password: password}\n\tdb.Save(&user)\n\tc.JSON(http.StatusCreated, gin.H{\"status\": http.StatusCreated, \"message\": \"User item created successfully!\"})\n}",
"func (db *DB) PostUser(w http.ResponseWriter, r *http.Request) {\n\n\tcontentType := r.Header.Get(\"content-type\")\n\n\tif contentType == \"application/json\" {\n\t\tvar user User\n\n\t\terr := json.NewDecoder(r.Body).Decode(&user)\n\t\tif err != nil {\n\t\t\tlog.Println(\"error in encoding the post body\", err)\n\t\t}\n\n\t\t// store id\n\t\tuser.ID = bson.NewObjectId()\n\t\t\n\t\tcollection := db.Database.C(\"users\")\n\t\terr = collection.Insert(user)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\tw.Write([]byte(\"Cannot insert user into database\"))\n\n\t\t\tlog.Fatalln(err.Error())\n\t\t} else {\n\t\t\tw.Header().Set(\"Content-type\", \"application/json\")\n\t\t\tresponse, err := json.Marshal(user)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"error in converting struct to json\", err)\n\t\t\t}\n\t\n\t\t\tw.Write(response)\n\t\t}\n\t} else {\n\t\tw.WriteHeader(http.StatusNotAcceptable)\n\t\tw.Write([]byte(\"unsupported format\"))\n\t}\n\n}",
"func createNewUser(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tparams := mux.Vars(r)\n\tvar userInfo UserBody\n\t//decode the json object and store the values in userInfo\n\terr := json.NewDecoder(r.Body).Decode(&userInfo)\n\tif err != nil {\n\t\tfmt.Println(\"ERROR DECODING JSON OBJ FROM CREATE NEW USER\")\n\t}\n\tresult := post.CreateUser(params[\"id\"], userInfo.FirstName, userInfo.LastName, userInfo.Email)\n\tjson.NewEncoder(w).Encode(map[string]bool{\n\t\t\"result\": result,\n\t})\n}",
"func UserRegisterPostHandler(w http.ResponseWriter, r *http.Request) {\n\tb := form.RegistrationForm{}\n\terr := form.NewErrors()\n\tif !captcha.Authenticate(captcha.Extract(r)) {\n\t\terr[\"errors\"] = append(err[\"errors\"], \"Wrong captcha!\")\n\t}\n\tif len(err) == 0 {\n\t\tif len(r.PostFormValue(\"email\")) > 0 {\n\t\t\t_, err = form.EmailValidation(r.PostFormValue(\"email\"), err)\n\t\t}\n\t\t_, err = form.ValidateUsername(r.PostFormValue(\"username\"), err)\n\t\tif len(err) == 0 {\n\t\t\tmodelHelper.BindValueForm(&b, r)\n\t\t\terr = modelHelper.ValidateForm(&b, err)\n\t\t\tif len(err) == 0 {\n\t\t\t\t_, errorUser := userService.CreateUser(w, r)\n\t\t\t\tif errorUser != nil {\n\t\t\t\t\terr[\"errors\"] = append(err[\"errors\"], errorUser.Error())\n\t\t\t\t}\n\t\t\t\tif len(err) == 0 {\n\t\t\t\t\tlanguages.SetTranslationFromRequest(viewRegisterSuccessTemplate, r, \"en-us\")\n\t\t\t\t\tu := model.User{\n\t\t\t\t\t\tEmail: r.PostFormValue(\"email\"), // indicate whether user had email set\n\t\t\t\t\t}\n\t\t\t\t\thtv := UserRegisterTemplateVariables{b, err, NewSearchForm(), Navigation{}, &u, r.URL, mux.CurrentRoute(r)}\n\t\t\t\t\terrorTmpl := viewRegisterSuccessTemplate.ExecuteTemplate(w, \"index.html\", htv)\n\t\t\t\t\tif errorTmpl != nil {\n\t\t\t\t\t\thttp.Error(w, errorTmpl.Error(), http.StatusInternalServerError)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif len(err) > 0 {\n\t\tb.CaptchaID = captcha.GetID()\n\t\tlanguages.SetTranslationFromRequest(viewRegisterTemplate, r, \"en-us\")\n\t\thtv := UserRegisterTemplateVariables{b, err, NewSearchForm(), Navigation{}, GetUser(r), r.URL, mux.CurrentRoute(r)}\n\t\terrorTmpl := viewRegisterTemplate.ExecuteTemplate(w, \"index.html\", htv)\n\t\tif errorTmpl != nil {\n\t\t\thttp.Error(w, errorTmpl.Error(), http.StatusInternalServerError)\n\t\t}\n\t}\n}",
"func CreateUser(c *gin.Context) {\n\tvar user Models.User\n\tc.BindJSON(&user)\n\n\tauth := c.Request.Header.Get(\"Authorization\")\n if auth != Utils.GetAuthToken() {\n\t\tc.JSON(http.StatusUnauthorized, gin.H{\n\t\t\t\"error\" : gin.H { \n\t\t\t\"status\": http.StatusUnauthorized,\n\t\t\t\"message\": \"Invalid Token\",\n\t\t}})\n\t\tc.Abort()\n\t\treturn\n\t}\n\tvar now = time.Now().Unix()\n\tnur := Models.User(user)\n\t err_password := validator.Validate(nur)\n\t if err_password != nil {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"error\" : gin.H { \n\t\t\t\"status\": http.StatusNotFound,\n\t\t\t\"message\": err_password.Error(),\n\t\t}})\n\t\tfmt.Println(err_password.Error())\n\t\tc.AbortWithStatus(http.StatusNotFound)\n\t\treturn\n\t}\n\n\t//user.Password = Utils.EncodeBase64(user.Password)\n\tuser.Date_created = Utils.ConvertTimestampToDate(int64(now))\n\terr := Models.CreateUser(&user)\n\tif err != nil {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"error\" : gin.H { \n\t\t\t\"status\": http.StatusNotFound,\n\t\t\t\"message\": err.Error(),\n\t\t}})\n\t\tfmt.Println(err.Error())\n\t\tc.AbortWithStatus(http.StatusNotFound)\n\t} else {\n\t\tc.JSON(http.StatusOK, user)\n\t\tfmt.Println(\"usuario_creado\", user.Id)\n\t}\n}",
"func CreateUser(w http.ResponseWriter, r *http.Request) {\n\t// create an empty user of type models.User\n\tvar user models.User\n\n\t// decode the json request to user\n\terr := json.NewDecoder(r.Body).Decode(&user)\n\n\tif err != nil {\n\t\tlogrus.Errorf(\"Unable to decode the request body. %v\", err)\n\t\thttp.Error(w, \"Bad Request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\tif !user.Valid() {\n\t\thttp.Error(w, \"Invalid User\", http.StatusBadRequest)\n\t\treturn\n\t}\n\t// set the header to content type x-www-form-urlencoded\n\t// Allow all origin to handle cors issue\n\tw.Header().Set(\"Context-Type\", \"application/x-www-form-urlencoded\")\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tw.Header().Set(\"Access-Control-Allow-Methods\", \"POST\")\n\tw.Header().Set(\"Access-Control-Allow-Headers\", \"Content-Type\")\n\t//set the hash\n\thashedPass, err := user.HashPassword()\n\tif err != nil {\n\t\tlogrus.Errorf(\"Unable to create hash of the given password. %v\", err)\n\t\thttp.Error(w, \"Bad Request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\t// call insert user function and pass the user\n\terr = database.InsertUser(user.Email, hashedPass, user.FirstName, user.LastName)\n\tif err != nil {\n\t\tlogrus.Errorf(\"Unable to insert user. %v\", err)\n\t\thttp.Error(w, \"\", http.StatusBadRequest)\n\t\treturn\n\t}\n\ttkn, err := models.CreateToken(user.Email)\n\tif err != nil {\n\t\tlogrus.Errorf(\"Unable to create token. %v\", err)\n\t\thttp.Error(w, \"\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\t// format a response object\n\tres := models.TokenResponse{\n\t\tToken: tkn,\n\t}\n\t// send the response\n\terr = json.NewEncoder(w).Encode(res)\n\tif err != nil {\n\t\tlogrus.Errorf(err.Error())\n\t\treturn\n\t}\n}",
"func CreateUserHandler() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\trole, _ := c.Get(\"Role\")\n\n\t\tif role != \"admin\" {\n\t\t\tc.JSON(http.StatusBadRequest, gin.H{\n\t\t\t\t\"status\": \"error\",\n\t\t\t\t\"message\": \"forbidden\",\n\t\t\t})\n\t\t\treturn\n\t\t}\n\n\t\tvar data entity.RegistrationUserEntity\n\t\trepositoryService := repository.UserRepository{}\n\t\tuserService := UserService{repositoryService}\n\n\t\t_ = c.ShouldBindJSON(&data)\n\n\t\tuserData, err := userService.CreateUser(data)\n\t\tif err != nil {\n\t\t\tc.JSON(http.StatusBadRequest, gin.H{\n\t\t\t\t\"status\": \"error\",\n\t\t\t\t\"message\": err.Error(),\n\t\t\t})\n\t\t\treturn\n\t\t}\n\n\t\tc.JSON(http.StatusOK, gin.H{\n\t\t\t\"status\": \"ok\",\n\t\t\t\"data\": userData,\n\t\t})\n\t}\n}",
"func NewUserCreateHandler(db *gorm.DB) echo.HandlerFunc {\n\treturn func(ctx echo.Context) error {\n\t\tuser := &model.User{}\n\t\tif err := ctx.Bind(user); err != nil {\n\t\t\treturn echo.NewHTTPError(http.StatusInternalServerError, err)\n\t\t}\n\n\t\tif err := user.Validate(); err != nil {\n\t\t\treturn echo.NewHTTPError(http.StatusUnprocessableEntity, err)\n\t\t}\n\n\t\thashBytes, err := bcrypt.GenerateFromPassword([]byte(user.Password), 10)\n\t\tif err != nil {\n\t\t\treturn echo.NewHTTPError(http.StatusInternalServerError, err)\n\t\t}\n\n\t\tuser.Password = \"\"\n\t\tuser.PasswordDigest = hashBytes\n\t\tuser.ResetJWTToken()\n\n\t\tif err := db.Create(user).Error; err != nil {\n\t\t\treturn echo.NewHTTPError(http.StatusInternalServerError, err.Error())\n\t\t}\n\n\t\treturn ctx.JSON(http.StatusCreated, user)\n\t}\n}",
"func UserRegisterPost(w http.ResponseWriter, r *http.Request) {\n\t// Get session\n\tsess := session.Instance(r)\n\n\t// Prevent brute force login attempts by not hitting MySQL and pretending like it was invalid :-)\n\tif sess.Values[\"register_attempt\"] != nil && sess.Values[\"register_attempt\"].(int) >= 5 {\n\t\tlog.Println(\"Brute force register prevented\")\n\t\thttp.Redirect(w, r, \"/not_found\", http.StatusFound)\n\t\treturn\n\t}\n\n\tbody, readErr := ioutil.ReadAll(r.Body)\n\tif readErr != nil {\n\t\tlog.Println(readErr)\n\t\tReturnError(w, readErr)\n\t\treturn\n\t}\n\n\tvar regResp webpojo.UserCreateResp\n\tif len(body) == 0 {\n\t\tlog.Println(\"Empty json payload\")\n\t\tRecordRegisterAttempt(sess)\n\t\tsess.Save(r, w)\n\t\tregResp = webpojo.UserCreateResp{constants.StatusCode_400, constants.Msg_400}\n\t\tbs, err := json.Marshal(regResp)\n\t\tif err != nil {\n\t\t\tReturnError(w, err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprint(w, string(bs))\n\t\treturn\n\t}\n\n\t//log.Println(\"r.Body\", string(body))\n\tregReq := webpojo.UserCreateReq{}\n\tjsonErr := json.Unmarshal(body, ®Req)\n\tif jsonErr != nil {\n\t\tlog.Println(jsonErr)\n\t\tReturnError(w, jsonErr)\n\t\treturn\n\t}\n\tlog.Println(regReq.Email)\n\n\t// Validate with required fields\n\tif validate, _ := validateRegisterInfo(r, ®Req, constants.DefaultRole); !validate {\n\t\tlog.Println(\"Invalid reg request! Missing field\")\n\t\tRecordRegisterAttempt(sess)\n\t\tsess.Save(r, w)\n\t\tregResp = webpojo.UserCreateResp{constants.StatusCode_400, constants.Msg_400}\n\t\tbs, err := json.Marshal(regResp)\n\t\tif err != nil {\n\t\t\tReturnError(w, err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprint(w, string(bs))\n\t\treturn\n\t}\n\n\tpassword, errp := passhash.HashString(regReq.Password)\n\n\t// If password hashing failed\n\tif errp != nil {\n\t\tlog.Println(errp)\n\t\tRecordRegisterAttempt(sess)\n\t\tsess.Save(r, w)\n\t\tregResp = webpojo.UserCreateResp{constants.StatusCode_500, constants.Msg_500}\n\t\tbs, err := json.Marshal(regResp)\n\t\tif err != nil {\n\t\t\tReturnError(w, err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprint(w, string(bs))\n\t\treturn\n\t}\n\n\t// Get database result\n\t_, err := model.UserByEmail(regReq.Email)\n\n\tif err == model.ErrNoResult { // If success (no user exists with that email)\n\t\tex := model.UserCreate(regReq.FirstName, regReq.LastName, regReq.Email, password)\n\t\t// Will only error if there is a problem with the query\n\t\tif ex != nil {\n\t\t\tlog.Println(ex)\n\t\t\tRecordRegisterAttempt(sess)\n\t\t\tsess.Save(r, w)\n\t\t\tregResp = webpojo.UserCreateResp{constants.StatusCode_500, constants.Msg_500}\n\t\t\tbs, err := json.Marshal(regResp)\n\t\t\tif err != nil {\n\t\t\t\tReturnError(w, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfmt.Fprint(w, string(bs))\n\t\t} else {\n\t\t\tlog.Println(\"Account created successfully for: \" + regReq.Email)\n\t\t\tRecordRegisterAttempt(sess)\n\t\t\tsess.Save(r, w)\n\t\t\tregResp = webpojo.UserCreateResp{constants.StatusCode_200, constants.Msg_200}\n\t\t\tbs, err := json.Marshal(regResp)\n\t\t\tif err != nil {\n\t\t\t\tReturnError(w, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfmt.Fprint(w, string(bs))\n\t\t}\n\t} else if err != nil { // Catch all other errors\n\t\tlog.Println(err)\n\t\tRecordRegisterAttempt(sess)\n\t\tsess.Save(r, w)\n\t\tregResp = webpojo.UserCreateResp{constants.StatusCode_500, constants.Msg_500}\n\t\tbs, err := json.Marshal(regResp)\n\t\tif err != nil {\n\t\t\tReturnError(w, err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprint(w, string(bs))\n\t} else { // Else the user already exists\n\t\tlog.Println(\"User already existed!!!\")\n\t\tRecordRegisterAttempt(sess)\n\t\tsess.Save(r, w)\n\t\tregResp = webpojo.UserCreateResp{constants.StatusCode_400, constants.Msg_400}\n\t\tbs, err := json.Marshal(regResp)\n\t\tif err != nil {\n\t\t\tReturnError(w, err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprint(w, string(bs))\n\t}\n}",
"func Post(w http.ResponseWriter, r *http.Request) {\n\tvar errs []string\n\tvar gocqlUuid gocql.UUID\n\n\tuser, errs := FormToUser(r)\n\n\tcreated := false\n\n\tif len(errs) == 0 {\n\t\tfmt.Println(\"creating a new user\")\n\n\t\t// generate a UUID for the user\n\t\tgocqlUuid = gocql.TimeUUID()\n\n\t\t// write data to Cassandra\n\t\tif err := cassandra.Session.Query(\n\t\t\t`INSERT INTO users (id, firstname, lastname, email, city, age) VALUES (?, ?, ?, ?, ?, ?)`,\n\t\t\tgocqlUuid,\n\t\t\tuser.FirstName,\n\t\t\tuser.LastName,\n\t\t\tuser.Email,\n\t\t\tuser.City,\n\t\t\tuser.Age,\n\t\t).Exec(); err != nil {\n\t\t\terrs = append(errs, err.Error())\n\t\t} else {\n\t\t\tcreated = true\n\t\t}\n\t}\n\n\tif created {\n\t\tfmt.Println(\"user_id\", gocqlUuid)\n\t\tjson.NewEncoder(w).Encode(NewUserResponse{ID: gocqlUuid})\n\t} else {\n\t\tfmt.Println(\"errors\", errs)\n\t\tjson.NewEncoder(w).Encode(ErrorResponse{Errors: errs})\n\t}\n}",
"func (h *Handler) createUser(c *gin.Context) handlerResponse {\n\n\tvar newUser types.User\n\tif err := c.ShouldBindJSON(&newUser); err != nil {\n\t\treturn handleBadRequest(err)\n\t}\n\tstoredUser, err := h.service.User.Create(newUser, h.who(c))\n\tif err != nil {\n\t\treturn handleError(err)\n\t}\n\t// Remove password so we do not show in response\n\tstoredUser.Password = \"\"\n\treturn handleCreated(storedUser)\n}",
"func (h *userHandler) CreateUserHandler(c *gin.Context) {\n\tvar inputUser entity.UserInput\n\n\tif err := c.ShouldBindJSON(&inputUser); err != nil {\n\t\tsplitError := helper.SplitErrorInformation(err)\n\t\tresponseError := helper.APIResponse(\"input data required\", 400, \"bad request\", gin.H{\"errors\": splitError})\n\n\t\tc.JSON(400, responseError)\n\t\treturn\n\t}\n\n\tnewUser, err := h.userService.SaveNewUser(inputUser)\n\tif err != nil {\n\t\tresponseError := helper.APIResponse(\"internal server error\", 500, \"error\", gin.H{\"error\": err.Error()})\n\n\t\tc.JSON(500, responseError)\n\t\treturn\n\t}\n\n\tresponse := helper.APIResponse(\"success create new User\", 201, \"status Created\", newUser)\n\tc.JSON(201, response)\n}",
"func CreateUser(w http.ResponseWriter, req *http.Request) {\n\tbody, err := ioutil.ReadAll(io.LimitReader(req.Body, 1048576))\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif err := req.Body.Close(); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tvar user User\n\terr = json.Unmarshal(body, &user)\n\tif err != nil {\n\t\tw.WriteHeader(422)\n\t\tlog.Println(err.Error())\n\t}\n\n\tInsertUser(user)\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusCreated)\n\tif err := json.NewEncoder(w).Encode(user); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n}",
"func agregarUsuario(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tvar User usr\n\treqBody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"Insert a Valid Task Data\")\n\t}\n\tjson.Unmarshal(reqBody, &User)\n\tfmt.Println(User)\n\tpol := newCn()\n\tpol.abrir()\n\trows, err := pol.db.Query(\"insert into usuario(username, password, nombre, apellido, fecha_nacimiento, correo) values(:1,:2,:3,:4,to_date(:5, 'yyyy/mm/dd'),:6)\", User.User, User.Contrasena, User.Nombre, User.Apellido, User.Fechanacimiento, User.Correo)\n\tpol.cerrar()\n\tif err != nil {\n\t\tfmt.Println(\"Error running query\")\n\t\tfmt.Println(err)\n\t\tfmt.Fprintf(w, \"usuario ya existe o correo invalido\")\n\t\treturn\n\t} else {\n\t\tfmt.Fprintf(w, \"registro exitos\")\n\t}\n\tdefer rows.Close()\n\n}",
"func CreateUser(w http.ResponseWriter, r *http.Request) {\n\trequestBody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tlog.Fatal(\"Error\")\n\t}\n\n\tvar user models.User\n\tif err = json.Unmarshal(requestBody, &user); err != nil {\n\t\tlog.Fatal(\"Error\")\n\t}\n\n\tdb, err := database.OpenDbConnection()\n\tif err != nil {\n\t\tlog.Fatal(\"error\")\n\t}\n\n\trepository := repositories.UserRepository(db)\n\trepository.Create(user)\n}",
"func UserHandler(w http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\n\tcase \"GET\":\n\n\t\tusersJSON, err := json.Marshal(db.Users)\n\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tw.Write(usersJSON)\n\n\tcase \"POST\":\n\n\t\terr := utils.IsJsonValid(w, r)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tuserPayload := db.UserPayload{}\n\t\tb, err := ioutil.ReadAll(r.Body)\n\t\tdefer r.Body.Close()\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\terr = json.Unmarshal(b, &userPayload)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\t//validate email\n\t\tvalidEmail := utils.IsEmailValid(userPayload.UserEmail)\n\t\tif !validEmail {\n\t\t\tmsg := \"Email address is not valid\"\n\t\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\taccessToken := utils.String(50)\n\n\t\tcookie := http.Cookie{\n\t\t\tName: \"userToken\",\n\t\t\tValue: accessToken,\n\t\t\tExpires: time.Time{},\n\t\t\tMaxAge: 86400,\n\t\t\tSecure: false,\n\t\t\tHttpOnly: false,\n\t\t\tSameSite: 0,\n\t\t}\n\t\thttp.SetCookie(w, &cookie)\n\n\t\tcookie = http.Cookie{\n\t\t\tName: \"userEmail\",\n\t\t\tValue: userPayload.UserEmail,\n\t\t\tExpires: time.Time{},\n\t\t\tMaxAge: 86400,\n\t\t\tSecure: false,\n\t\t\tHttpOnly: false,\n\t\t\tSameSite: 0,\n\t\t}\n\t\thttp.SetCookie(w, &cookie)\n\n\t\tuser := db.User{}\n\t\tuser.Token = accessToken\n\t\tuser.Devices = make(map[string]*db.Device)\n\n\t\tdb.Users[userPayload.UserEmail] = &user\n\n\t\tw.Write([]byte(accessToken))\n\t}\n}",
"func CreateUser(w http.ResponseWriter, r *http.Request){\n\n\t\tu := User{}\n\n\t\terr:= json.NewDecoder(r.Body).Decode(&u)\n\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\t// Checks if name is Empty\n\t\tfmt.Printf(\"name: [%+v]\\n\", u.Name)\n\t\tif u.Name == \"\" {\n\t\t\tfmt.Println(\"Empty string\")\n\t\t\tw.Write([]byte(`{\"status\":\"Invalid Name\"}`))\n\t\t\treturn\n\t\t}\n\n\n\t\t//start validation for username\n\t\tvar isStringAlphabetic = regexp.MustCompile(`^[A-Za-z][A-Za-z0-9]*$`).MatchString\n\t\tif !isStringAlphabetic(u.Name){\n\t\t\tfmt.Println(\"is not alphanumeric\")\n\t\t\tw.Write([]byte(`{\"status\":\"Invalid Name\"}`))\n\t\t\treturn\n\t\t}\n\n\t\t//make the Name Uppercase\n\t\tu.Name = strings.ToUpper(u.Name)\n\n\t\t// check if username already exists\n\t\tuser := userExist(u.Name)\n\t\tif user != (User{}) {\n\t\t\tfmt.Println(\"Name already exists\")\n\t\t\tw.Write([]byte(`{\"status\":\"Name Exists\"}`))\n\t\t\treturn\n\t\t}\n\n\t\t//if it does exist create the user with a random ID and score = 0\n\t\tuuid, err := uuid.NewV4()\n\t\tu.ID = uuid.String()\n\t\tu.Score = 0\n\n\t\tquery := \"INSERT INTO users (id, name, score) VALUES ($1, $2, $3);\"\n\t\t_, err = db.Exec(query, u.ID, u.Name, u.Score);\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(201)\n\t\tjson.NewEncoder(w).Encode(u)\n\n}",
"func CreateUser(w http.ResponseWriter, r *http.Request) {\r\n\tdefer r.Body.Close()\r\n\tvar user model.User\r\n\r\n\tif err := json.NewDecoder(r.Body).Decode(&user); err != nil {\r\n\t\tlog.Println(err)\r\n\t\tu.RespondWithError(w, http.StatusBadRequest, \"Invalid request payload\")\r\n\t\treturn\r\n\t}\r\n\r\n\tif resp, ok := validate(&user); !ok {\r\n\t\tlog.Println(resp)\r\n\t\tu.RespondWithError(w, http.StatusBadRequest, resp)\r\n\t\treturn\r\n\t}\r\n\r\n\thashedPassword, _ := bcrypt.GenerateFromPassword([]byte(user.Password), bcrypt.DefaultCost)\r\n\tuser.Password = string(hashedPassword)\r\n\r\n\tif err := dao.DBConn.InsertUser(user); err != nil {\r\n\t\tlog.Println(err)\r\n\t\tu.RespondWithError(w, http.StatusInternalServerError, err.Error())\r\n\t\treturn\r\n\t}\r\n\r\n\tuser.Token = model.GenerateToken(user.Email)\r\n\r\n\t// Delete password before response\r\n\tuser.Password = \"\"\r\n\r\n\tu.RespondWithJSON(w, http.StatusOK, user)\r\n}",
"func (r *UsersResource) PostUser(c *gin.Context) {\n\tvar nu newUser\n\n\t// validate form input\n\tif err := c.Bind(&nu); err != nil {\n\t\tc.JSON(http.StatusBadRequest, strings.Split(err.Error(), \"\\n\"))\n\t\treturn\n\t}\n\n\t// verify a user with ID does not already exist\n\t// TODO: try to put this in middleware\n\tif existing, _ := r.userStore.User(nu.ID); existing != nil {\n\t\tc.Status(http.StatusConflict)\n\t\treturn\n\t}\n\n\t// create new user\n\tu := fountain.User{\n\t\tID: nu.ID,\n\t\tFullName: nu.FullName,\n\t\tEmail: nu.Email,\n\t}\n\n\t// insert new user into store\n\tif err := r.userStore.PutUser(&u); err != nil {\n\t\tc.Error(err)\n\t\tc.Status(http.StatusInternalServerError)\n\t} else {\n\t\tloc := fmt.Sprintf(\"%s/%s\", c.Request.URL.Path, u.ID)\n\t\tc.Redirect(http.StatusSeeOther, loc)\n\t}\n}",
"func (h *UserHandler) Create(w http.ResponseWriter, r *http.Request) {\n\tlog.Print(\"/users POST handled\")\n\n\treq := &CreateRequest{}\n\tif err := util.ScanRequest(r, req); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tuser := &schema.User{\n\t\tName: req.Name,\n\t}\n\n\tif err := h.model.Validate(user); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tres, err := h.model.Create(user)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif err := util.JSONWrite(w, res, http.StatusCreated); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n}",
"func (server Server) CreateNewUser(w http.ResponseWriter, r *http.Request) {\n\tvar user models.User // make a user\n\tvar res models.APIResponse // make a response\n\n\terr := json.NewDecoder(r.Body).Decode(&user) //decode the user\n\tif err != nil {\n\t\tlog.Printf(\"Unable to decode the request body. %v\", err)\n\t\tres = models.BuildAPIResponseFail(\"Unable to decode the request body\", nil)\n\t}\n\tif user.Name == \"\" || user.Email == \"\" || user.Password == \"\" {\n\t\tres = models.BuildAPIResponseFail(\"Blank users cannot be created\", nil)\n\t} else {\n\t\tinsertID := insertUser(user, server.db) // call insert user function and pass the note\n\t\tres = models.BuildAPIResponseSuccess(fmt.Sprintf(\"User Created with %d id\", insertID), nil) // format a response object\n\t}\n\tjson.NewEncoder(w).Encode(res)\n\n}",
"func (a *API) userLoginPostHandler(w http.ResponseWriter, r *http.Request) {\n\t// Validate user input\n\tvar u model.User\n\terr := json.NewDecoder(r.Body).Decode(&u)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t} else if u.Email == \"\" {\n\t\tresponse.Errorf(w, r, nil, http.StatusBadRequest, \"Email address is missing\")\n\t\treturn\n\t} else if u.Password == \"\" {\n\t\tresponse.Errorf(w, r, nil, http.StatusBadRequest, \"Password is missing\")\n\t\treturn\n\t}\n\t// Always use the lower case email address\n\tu.Email = strings.ToLower(u.Email)\n\t// Get the user database entry\n\tuser, err := a.db.GetUserByEmail(u.Email)\n\tif err != nil {\n\t\tresponse.Errorf(w, r, err, http.StatusInternalServerError, \"Internal Server Error\")\n\t\treturn\n\t} else if user == nil {\n\t\tresponse.Errorf(w, r, err, http.StatusBadRequest, \"Invalid email address or password\")\n\t\treturn\n\t}\n\t// Check the password\n\tif !user.MatchPassword(u.Password) {\n\t\tresponse.Errorf(w, r, err, http.StatusBadRequest, \"Invalid email address or password\")\n\t\treturn\n\t}\n\t// Create jwt token\n\tuser.Token, err = a.createJWT(jwt.MapClaims{\n\t\t\"email\": user.Email,\n\t\t\"name\": user.Name,\n\t\t\"lastname\": user.Lastname,\n\t\t\"password\": user.Password,\n\t\t\"exp\": time.Now().Add(time.Hour * 24 * 7).Unix(),\n\t})\n\tif err != nil {\n\t\tresponse.Errorf(w, r, err, http.StatusInternalServerError, \"Internal Server Error\")\n\t\treturn\n\t}\n\t//\thttp.SetCookie(w, &http.Cookie{\n\t//\t\tName: \"token\",\n\t//\t\tValue: u.Token,\n\t//\t\tPath: \"/\",\n\t//\t})\n\tresponse.Write(w, r, user)\n}",
"func CreateUserHandler(ur UserRepo) func(c *gin.Context) {\n\treturn func(c *gin.Context) {\n\t\tswitch c.DefaultQuery(\"type\", \"direct\") {\n\t\tcase \"direct\":\n\t\t\tcreateUserDirectly(ur, c)\n\t\t\treturn\n\t\tcase \"facebook\":\n\t\t\tcreateUserByFacebook(ur, c)\n\t\t\treturn\n\t\tcase \"account-kit\":\n\t\t\tcreateUserByAccountKit(ur, c)\n\t\t\treturn\n\t\tcase \"gmail\":\n\t\t\tcreateUserByGmail(ur, c)\n\t\t\treturn\n\t\tcase \"apple\":\n\t\t\tcreateUserByApple(ur, c)\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (h *HTTPClientHandler) addUserHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Add(\"Access-Control-Allow-Origin\", ServerName)\n\t// adding new user to database\n\tvar userRequest User\n\n\tdefer r.Body.Close()\n\tbody, err := ioutil.ReadAll(r.Body)\n\n\tif err != nil {\n\t\t// failed to read response body\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t}).Error(\"Could not read response body!\")\n\t\thttp.Error(w, \"Failed to read request body.\", 400)\n\t\treturn\n\t}\n\n\terr = json.Unmarshal(body, &userRequest)\n\n\tif err != nil {\n\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t\tw.WriteHeader(422) // can't process this entity\n\t\treturn\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"firstName\": userRequest.FirstName,\n\t\t\"lastName\": userRequest.LastName,\n\t\t\"userID\": userRequest.UserID,\n\t\t\"profilePicUrl\": userRequest.ProfilePicUrl,\n\t\t\"gender\": userRequest.Gender,\n\t\t\"body\": string(body),\n\t}).Info(\"Got user info\")\n\n\t// adding user\n\terr = h.db.addUser(userRequest)\n\n\tif err == nil {\n\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t\tw.WriteHeader(201) // user inserted\n\t\treturn\n\t} else {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t}).Warn(\"Failed to insert..\")\n\n\t\tcontent, code := responseDetailsFromMongoError(err)\n\n\t\t// Marshal provided interface into JSON structure\n\t\tuj, _ := json.Marshal(content)\n\n\t\t// Write content-type, statuscode, payload\n\t\twriteJsonResponse(w, &uj, code)\n\n\t}\n\n}",
"func CreateUser(w http.ResponseWriter, r *http.Request) {\n\tu := User{}\n\terr := json.NewDecoder(r.Body).Decode(&u)\n\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(500), http.StatusInternalServerError)\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\terr = SaveUser(u.FullName, u.NickName, u.Email, u.Balance)\n\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(500), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusCreated)\n}",
"func (userHandler UserHandler) CreateUser() http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tvar u user\n\n\t\tif err := json.NewDecoder(r.Body).Decode(&u); err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t_, _ = w.Write([]byte(\"Something went wrong while parsing the user from request body\"))\n\t\t\treturn\n\t\t}\n\n\t\tcreateUserCmd := application.CreateUserCommand{\n\t\t\tUsername: u.Username,\n\t\t\tPassword: u.Password,\n\t\t}\n\n\t\tid, err := userHandler.Cmd.Handle(r.Context(), createUserCmd)\n\n\t\tif err != nil {\n\t\t\thttpError, _ := err.(helpers.HttpError)\n\t\t\thttp.Error(w, httpError.Message, httpError.Code)\n\t\t\treturn\n\t\t}\n\n\t\tw.WriteHeader(http.StatusOK)\n\t\tjson.NewEncoder(w).Encode(id)\n\t}\n}",
"func CreateUser(w http.ResponseWriter, r *http.Request) {\n\tvar user models.User\n\tbodyRequest, err := ioutil.ReadAll(r.Body)\n\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusUnprocessableEntity, err)\n\t\treturn\n\t}\n\n\tif err = json.Unmarshal(bodyRequest, &user); err != nil {\n\t\tresponses.Error(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\tif err := user.Prepare(true); err != nil {\n\t\tresponses.Error(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\tif err := validateUniqueDataUser(user, true); err != nil {\n\t\tresponses.Error(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\tdb, err := database.Connect()\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, err)\n\t}\n\tdefer db.Close()\n\n\trepository := repository.NewRepositoryUser(db)\n\n\tuser.Id, err = repository.Insert(user)\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tresponses.JSON(w, http.StatusCreated, user)\n\n}",
"func (ctx *Context) UserHandler(w http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\tcase \"POST\":\n\t\tdecoder := json.NewDecoder(r.Body)\n\t\tnewuser := &users.NewUser{}\n\t\tif err := decoder.Decode(newuser); err != nil {\n\t\t\thttp.Error(w, \"Invalid JSON\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\terr := newuser.Validate()\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"User not valid\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tusr, _ := ctx.UserStore.GetByEmail(newuser.Email)\n\t\tif usr != nil {\n\t\t\thttp.Error(w, \"Email Already Exists\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tuser, err := ctx.UserStore.Insert(newuser)\n\t\tstate := &SessionState{\n\t\t\tBeganAt: time.Now(),\n\t\t\tClientAddr: r.RequestURI,\n\t\t\tUser: user,\n\t\t}\n\t\t_, err = sessions.BeginSession(ctx.SessionKey, ctx.SessionStore, state, w)\n\n\t\t_, err = ctx.UserStore.CreateLikesList(user)\n\t\t_, err = ctx.UserStore.CreateGroceryList(user)\n\n\t\tw.Header().Add(\"Content-Type\", contentTypeJSONUTF8)\n\t\tencoder := json.NewEncoder(w)\n\t\tencoder.Encode(user)\n\tcase \"GET\":\n\t\tusers, err := ctx.UserStore.GetAll()\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Error fetching users\", http.StatusInternalServerError)\n\t\t}\n\t\tw.Header().Add(\"Content-Type\", contentTypeJSONUTF8)\n\t\tencoder := json.NewEncoder(w)\n\t\tencoder.Encode(users)\n\t}\n}",
"func HandleUserCreate(c *gin.Context) {\n\tvar user User\n\terr := c.ShouldBindJSON(&user)\n\tif err != nil {\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\n\t\t\t\"code\": 1,\n\t\t\t\"msg\": err.Error(),\n\t\t\t\"data\": gin.H{},\n\t\t})\n\t\treturn\n\t}\n\n\tuid, err := user.Create()\n\tif err != nil {\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\n\t\t\t\"code\": 1,\n\t\t\t\"msg\": err.Error(),\n\t\t\t\"data\": gin.H{},\n\t\t})\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"code\": 0,\n\t\t\"msg\": \"ok\",\n\t\t\"data\": gin.H{\n\t\t\t\"uid\": uid,\n\t\t},\n\t})\n}",
"func CreateUser(w http.ResponseWriter, r *http.Request) {\n\tdata := authInfo{}\n\terr := json.NewDecoder(r.Body).Decode(&data)\n\tif err != nil {\n\t\tutils.JSONRespnseWithErr(w, &utils.ErrPostDataNotCorrect)\n\t\treturn\n\t}\n\tmessage := models.SignUp(data.Email, data.Password, data.RoleID)\n\tutils.JSONResonseWithMessage(w, message)\n}",
"func createHandler(w http.ResponseWriter, r *http.Request) {\n user := new(User)\n user.Token = validateToken(r.FormValue(\"token\"))\n user.PasswordHash = validateHash(r.FormValue(\"passHash\"))\n user.PublicKey = validatePublicKey(r.FormValue(\"publicKey\"))\n user.PublicHash = computePublicHash(user.PublicKey)\n user.CipherPrivateKey = validateHex(r.FormValue(\"cipherPrivateKey\"))\n\n log.Printf(\"Woot! New user %s %s\\n\", user.Token, user.PublicHash)\n\n if !SaveUser(user) {\n http.Error(w, \"That username is taken\", http.StatusBadRequest)\n }\n}",
"func CreateUserHandler(w http.ResponseWriter, r *http.Request) {\n\tlog.Debug(\"userservice.CreateUserHandler called\")\n\tusername, err := apiserver.Authn(apiserver.CREATE_USER_PERM, w, r)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tw.Header().Set(\"WWW-Authenticate\", `Basic realm=\"Restricted\"`)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\n\tvar request msgs.CreateUserRequest\n\t_ = json.NewDecoder(r.Body).Decode(&request)\n\n\tresp := msgs.CreateUserResponse{}\n\n\tvar ns string\n\tns, err = apiserver.GetNamespace(apiserver.Clientset, username, request.Namespace)\n\tif err != nil {\n\t\tresp.Status.Code = msgs.Error\n\t\tresp.Status.Msg = apiserver.VERSION_MISMATCH_ERROR\n\t\tjson.NewEncoder(w).Encode(resp)\n\t\treturn\n\t}\n\n\tif request.ClientVersion != msgs.PGO_VERSION {\n\t\tresp.Status.Code = msgs.Error\n\t\tresp.Status.Msg = apiserver.VERSION_MISMATCH_ERROR\n\t\tjson.NewEncoder(w).Encode(resp)\n\t\treturn\n\t}\n\n\tresp = CreateUser(&request, ns)\n\tjson.NewEncoder(w).Encode(resp)\n\n}",
"func NewUser(c *fiber.Ctx) {\n\t// create new user\n\tuser := new(User)\n\t// put post req body onto user struct\n\tif err := c.BodyParser(user); err != nil {\n\t\tc.Status(503).Send(err)\n\t\treturn\n\t}\n\t// put into db\n\tdatabase.DBConn.Create(&user)\n\tc.JSON(user)\n}",
"func CreateUser(c *gin.Context) {}",
"func (ac *ApiConfig) AddUserHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != http.MethodPost {\n\t\thttp.Error(w, r.Method+\" is not available\", http.StatusInternalServerError)\n\t\tzerolog.Error().Msg(r.Method + \" is not available\")\n\t\treturn\n\t}\n\n\tvar user *models.Users\n\terr := json.NewDecoder(r.Body).Decode(&user)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\thashedPass, err := bcrypt.GenerateFromPassword([]byte(user.Password), 12)\n\tif err != nil {\n\t\tzerolog.Error().Msg(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tuser.Password = string(hashedPass)\n\n\terr = ac.DHolder.AddUser(user)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tstat := &models.StatusIdentifier{\n\t\tOk: true,\n\t\tMessage: \"User Added\",\n\t}\n\n\terr = dResponseWriter(w, stat, http.StatusOK)\n\tif err != nil {\n\t\tzerolog.Error().Msg(err.Error())\n\t\treturn\n\t}\n\n\treturn\n}",
"func CreateUser(clients *common.ClientContainer, handler common.HandlerInterface) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tuser := User{}\n\t\terr := json.NewDecoder(r.Body).Decode(&user)\n\t\tif err != nil {\n\t\t\tcommon.WriteErrorToResponse(w, http.StatusBadRequest,\n\t\t\t\thttp.StatusText(http.StatusBadRequest),\n\t\t\t\terr.Error())\n\t\t\treturn\n\t\t}\n\t\tif len(user.Email) == 0 {\n\t\t\tif len(user.Name) == 0 {\n\t\t\t\tcommon.WriteErrorToResponse(w, http.StatusInternalServerError,\n\t\t\t\t\thttp.StatusText(http.StatusInternalServerError),\n\t\t\t\t\t\"provide Either Name or Email in parameters\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif len(user.Email) != 0 {\n\t\t\tif !(emailValid.MatchString(user.Email)) {\n\t\t\t\tcommon.WriteErrorToResponse(w, http.StatusInternalServerError,\n\t\t\t\t\thttp.StatusText(http.StatusInternalServerError),\n\t\t\t\t\t\"Email Invalid\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif len(user.Login) == 0 {\n\t\t\tcommon.WriteErrorToResponse(w, http.StatusInternalServerError,\n\t\t\t\thttp.StatusText(http.StatusInternalServerError),\n\t\t\t\t\"provide Login in parameters\")\n\t\t\treturn\n\t\t}\n\t\tif len(user.Password) == 0 {\n\t\t\tcommon.WriteErrorToResponse(w, http.StatusInternalServerError,\n\t\t\t\thttp.StatusText(http.StatusInternalServerError),\n\t\t\t\t\"provide Password in parameters\")\n\t\t\treturn\n\t\t}\n\n\t\t// Create user if no error\n\t\thelperCreateUser(clients, handler, w, user)\n\t}\n}",
"func UsersRegisterPost(c buffalo.Context) error {\n\t// Allocate an empty User\n\tuser := &models.User{}\n\t// Bind user to the html form elements\n\tif err := c.Bind(user); err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\t// Get the DB connection from the context\n\ttx := c.Value(\"tx\").(*pop.Connection)\n\t// Validate the data from the html form\n\tverrs, err := user.Create(tx)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\tif verrs.HasAny() {\n\t\t// Make user available inside the html template\n\t\tc.Set(\"user\", user)\n\t\t// Make the errors available inside the html template\n\t\tc.Set(\"errors\", verrs.Errors)\n\t\t// Render again the register.html template that the user can\n\t\t// correct the input.\n\t\treturn c.Render(422, r.HTML(\"users/register.html\"))\n\t}\n\t// If there are no errors set a success message\n\tc.Flash().Add(\"success\", \"Account created successfully.\")\n\t// and redirect to the home page\n\treturn c.Redirect(302, \"/\")\n}",
"func (env *Env) RegisterUser(c *gin.Context) {\n\n\ttype registerRequest struct {\n\t\tUsername string `json:\"username\"`\n\t\tPassword string `json:\"password\"`\n\t\tDeviceID string `json:\"device_id\"`\n\t}\n\n\ttype registerResponse struct {\n\t\tAccessToken string `json:\"access_token\"`\n\t\tRefreshToken string `json:\"refresh_token\"`\n\t\tUser mysql.User `json:\"user\"`\n\t\tResetCode string `json:\"reset_code\"`\n\t}\n\n\t//decode request body\n\tjsonData, err := ioutil.ReadAll(c.Request.Body)\n\tif err != nil {\n\t\tLog.WithField(\"module\", \"handler\").WithError(err)\n\t\tc.AbortWithStatusJSON(http.StatusBadRequest, errs.RQST001)\n\t\treturn\n\t}\n\n\tvar request registerRequest\n\terr = json.Unmarshal(jsonData, &request)\n\tif err != nil {\n\t\tLog.WithField(\"module\", \"handler\").WithError(err)\n\t\tc.AbortWithStatusJSON(http.StatusBadRequest, errs.RQST001)\n\t\treturn\n\t}\n\n\tif request.Username == \"\" || request.Password == \"\" || request.DeviceID == \"\" {\n\t\tLog.WithField(\"module\", \"handler\").Error(\"Empty Fields in Request Body\")\n\t\tc.AbortWithStatusJSON(http.StatusBadRequest, errs.RQST002)\n\t\treturn\n\t}\n\n\tvar empty int64\n\tresult := env.db.Model(&mysql.User{}).Count(&empty)\n\tif result.Error != nil {\n\t\tLog.WithField(\"module\", \"handler\").WithError(result.Error)\n\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.DBSQ001)\n\t\treturn\n\t}\n\n\tuser := mysql.User{}\n\tperms := mysql.Permissions{}\n\tdefaultGroup := mysql.UserGroup{}\n\n\tif empty == 0 {\n\n\t\tperms.Admin = true\n\t\tperms.CanEdit = true\n\n\t\tdefaultGroupPerms := mysql.Permissions{CanEdit: false, Admin: false}\n\n\t\tdefaultGroup.Name = \"default\"\n\n\t\tresult = env.db.Save(&defaultGroupPerms)\n\t\tif result.Error != nil {\n\t\t\tLog.WithField(\"module\", \"handler\").WithError(result.Error)\n\t\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.DBSQ001)\n\t\t\treturn\n\t\t}\n\n\t\tdefaultGroup.Permissions = defaultGroupPerms\n\n\t\tresult = env.db.Save(&defaultGroup)\n\t\tif result.Error != nil {\n\t\t\tLog.WithField(\"module\", \"handler\").WithError(result.Error)\n\t\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.DBSQ001)\n\t\t\treturn\n\t\t}\n\n\t} else {\n\t\tvar exists int64\n\t\t//Check if Username already exists in Database\n\t\tresult = env.db.Model(&user).Where(\"upper(username) = upper(?)\", user.Username).Count(&exists)\n\t\tif result.Error != nil {\n\t\t\tLog.WithField(\"module\", \"handler\").WithError(result.Error)\n\t\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.DBSQ001)\n\t\t\treturn\n\t\t}\n\t\tLog.WithField(\"module\", \"handler\").Debug(\"Users found: \", exists)\n\n\t\tif exists != 0 {\n\t\t\tLog.WithField(\"module\", \"handler\").Error(\"Username already exists in Database\")\n\t\t\tc.AbortWithStatusJSON(http.StatusForbidden, errs.AUTH004)\n\t\t\treturn\n\t\t}\n\n\t\tperms.Admin = false\n\t\tperms.CanEdit = false\n\n\t\tdefaultGroup.Name = \"default\"\n\t\tresult = env.db.Model(&defaultGroup).Find(&defaultGroup)\n\t\tif result.Error != nil {\n\t\t\tLog.WithField(\"module\", \"handler\").WithError(result.Error)\n\t\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.DBSQ001)\n\t\t\treturn\n\t\t}\n\n\t}\n\n\t//Create permission entry for new user in permissions table\n\tresult = env.db.Save(&perms)\n\tif result.Error != nil {\n\t\tLog.WithField(\"module\", \"sql\").WithError(result.Error)\n\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.DBSQ001)\n\t\treturn\n\t}\n\n\tuser.Username = request.Username\n\tuser.Password = request.Password\n\tuser.AvatarID = \"default\"\n\tuser.PermID = perms.ID\n\tuser.UserGroups = append(user.UserGroups, &defaultGroup)\n\tuser.ResetCode = utils.GenerateCode()\n\n\t//Save new user to users database\n\tresult = env.db.Save(&user)\n\tif result.Error != nil {\n\t\tLog.WithField(\"module\", \"sql\").WithError(result.Error)\n\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.DBSQ001)\n\t\treturn\n\t}\n\n\t//Generate JWT AccessToken\n\taccessToken, err := utils.JWTAuthService(config.JWTAccessSecret).GenerateToken(user.ID, request.DeviceID, time.Hour*24)\n\tif err != nil {\n\t\tLog.WithField(\"module\", \"jwt\").WithError(err)\n\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.AUTH002)\n\t\treturn\n\t}\n\n\t//Add AccessToken to Redis\n\terr = env.rdis.AddPair(fmt.Sprint(user.ID), accessToken, time.Hour*24)\n\tif err != nil {\n\t\tLog.WithField(\"module\", \"redis\").WithError(err).Error(\"Error adding AccessToken to Redis.\")\n\t\terr = nil\n\t}\n\n\t//Generate RefreshToken\n\trefreshToken, err := utils.JWTAuthService(config.JWTRefreshSecret).GenerateToken(user.ID, request.DeviceID, time.Hour*24)\n\tif err != nil {\n\t\tLog.WithField(\"module\", \"jwt\").WithError(err)\n\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.AUTH002)\n\t\treturn\n\t}\n\n\tuser.RefreshToken = refreshToken\n\n\t//Save RefreshToken to Database\n\tresult = env.db.Save(&user)\n\tif result.Error != nil {\n\t\tLog.WithField(\"module\", \"sql\").WithError(result.Error)\n\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.DBSQ002)\n\t\treturn\n\t}\n\n\tc.JSON(200, registerResponse{AccessToken: accessToken, RefreshToken: refreshToken, User: user, ResetCode: user.ResetCode})\n}",
"func handleSignUp(w http.ResponseWriter, r *http.Request) {\n\tif parseFormErr := r.ParseForm(); parseFormErr != nil {\n\t\thttp.Error(w, \"Sent invalid form\", 400)\n\t}\n\n\tname := r.FormValue(\"name\")\n\tuserHandle := r.FormValue(\"userHandle\")\n\temail := r.FormValue(\"email\")\n\tpassword := r.FormValue(\"password\")\n\n\tif !verifyUserHandle(userHandle) {\n\t\thttp.Error(w, \"Invalid userHandle\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif !verifyEmail(email) {\n\t\thttp.Error(w, \"Invalid email\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif !verifyPassword(password) {\n\t\thttp.Error(w, \"Password does not meet complexity requirements\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\thashed, _ := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)\n\n\turChannel := make(chan *database.InsertResponse)\n\tgo createUser(\n\t\tmodel.User{Name: name, UserHandle: userHandle, Email: email, Password: hashed},\n\t\turChannel,\n\t)\n\tcreatedUser := <-urChannel\n\n\tif createdUser.Err != nil {\n\t\tlog.Println(createdUser.Err)\n\n\t\tif strings.Contains(createdUser.Err.Error(), \"E11000\") {\n\t\t\tif strings.Contains(createdUser.Err.Error(), \"index: userHandle_1\") {\n\t\t\t\thttp.Error(w, \"Userhandle \"+userHandle+\" already registered\", http.StatusConflict)\n\t\t\t} else {\n\t\t\t\thttp.Error(w, \"Email \"+email+\" already registered\", http.StatusConflict)\n\t\t\t}\n\t\t} else {\n\t\t\tcommon.SendInternalServerError(w)\n\t\t}\n\n\t} else {\n\t\tlog.Println(\"Created user with ID \" + createdUser.ID)\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_, wError := w.Write([]byte(\"Created user with ID \" + createdUser.ID))\n\n\t\tif wError != nil {\n\t\t\tlog.Println(\"Error while writing: \" + wError.Error())\n\t\t}\n\t}\n\n}",
"func postUser(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tvar user User\n\terr := json.NewDecoder(r.Body).Decode(&user)\n\tlog.ErrorHandler(err)\n\tvar (\n\t\temail = strings.ToLower(user.Email)\n\t\talias = user.Alias\n\t\tuserName = user.UserName\n\t\tpassword = user.Password\n\t\tfullName = user.FullName\n\t\tsafeNames bool\n\t\tsafeEmail = emailValidator(email)\n\t\tsafePassword = passwordValidator(password)\n\t\tsimilarToUser = similarToUser(fullName, alias, userName, password)\n\t)\n\n\tduplicateEmail := DuplicateCheck(email)\n\n\tif duplicateEmail {\n\t\tw.WriteHeader(http.StatusConflict)\n\t\terr := json.NewEncoder(w).Encode(core.FourONine)\n\t\tlog.ErrorHandler(err)\n\t\tlog.AccessHandler(r, 409)\n\t\treturn\n\t}\n\n\tsafeNames = userDetails(fullName, alias, userName)\n\n\tif safeNames {\n\t\t// Some or all of the details in the body are empty\n\t\t//\tAll fields are required\n\t\tw.WriteHeader(http.StatusUnprocessableEntity)\n\t\terr := json.NewEncoder(w).Encode(core.FourTwoTwo)\n\t\tlog.ErrorHandler(err)\n\t\tlog.AccessHandler(r, 422)\n\t\treturn\n\t}\n\n\tif !safeEmail {\n\t\t// Issue with Email\n\t\t//Email couldn't be verified or invalid email\n\t\tw.WriteHeader(http.StatusUnprocessableEntity)\n\t\terr := json.NewEncoder(w).Encode(core.FourTwoTwo)\n\t\tlog.ErrorHandler(err)\n\t\tlog.AccessHandler(r, 422)\n\t\treturn\n\t}\n\n\tif similarToUser {\n\t\t// Issue with Password\n\t\t// Password is similar to user information\n\t\tw.WriteHeader(http.StatusUnprocessableEntity)\n\t\terr := json.NewEncoder(w).Encode(core.FourTwoTwo)\n\t\tlog.ErrorHandler(err)\n\t\tlog.AccessHandler(r, 422)\n\t\treturn\n\t}\n\n\tif !safePassword {\n\t\t// Issue with Password\n\t\t//\tPassword doesn't go through the validator successfully\n\t\tw.WriteHeader(http.StatusUnprocessableEntity)\n\t\terr := json.NewEncoder(w).Encode(core.FourTwoTwo)\n\t\tlog.ErrorHandler(err)\n\t\tlog.AccessHandler(r, 422)\n\t\treturn\n\t}\n\n\tpasswordHash, err := generatePasswordHash(password)\n\tlog.ErrorHandler(err)\n\n\tuser = User{\n\t\tUserName: userName,\n\t\tFullName: fullName,\n\t\tAlias: alias,\n\t\tEmail: email,\n\t\tIsAdmin: false,\n\t\tPassword: passwordHash,\n\t\tLastLogin: time.Time{},\n\t\tIsActive: false,\n\t\tIsEmailVerified: false,\n\t}\n\n\t//\tfmt.Println(\"Create The Fucking User Here\")\n\n\tdb.Create(&user)\n\terr = json.NewEncoder(w).Encode(user)\n\tlog.ErrorHandler(err)\n\n\t// Create OTP to verify email by\n\t// OTP expires in 30 minutes\n\t// Stored in Redis with key new_user_otp_email\n\tverifiableToken := generateOTP()\n\terr = redisClient.Set(ctx, \"new_user_otp_\"+email, verifiableToken, 30*time.Minute).Err()\n\tlog.ErrorHandler(err)\n\n\t//payload := struct {\n\t//\tToken string\n\t//}{\n\t//\tToken: verifiableToken,\n\t//}\n\t//\n\t//var status bool\n\t//\n\t////status, err = core.SendEmailNoAttachment(email, \"OTP for Verification\", payload, \"token.txt\")\n\t//if !status {\n\t//\tw.WriteHeader(http.StatusInternalServerError)\n\t//\terr = json.NewEncoder(w).Encode(core.FiveHundred)\n\t//\tlog.ErrorHandler(err)\n\t//\tlog.AccessHandler(r, 500)\n\t//\treturn\n\t//}\n\tlog.ErrorHandler(err)\n\tlog.AccessHandler(r, 200)\n\treturn\n}",
"func SignUpUser(c *gin.Context) {\n\tvar db = models.InitDB()\n\tvar userData models.User\n\terr := c.Bind(&userData)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfmt.Println(userData)\n\tif err := db.Create(&userData).Error; err != nil {\n\t\tc.JSON(200, gin.H{\n\t\t\t\"creation\": \"false\",\n\t\t})\n\t\treturn\n\t}\n\tc.JSON(200, gin.H{\n\t\t\"creation\": \"true\",\n\t})\n}",
"func UserCreate(w http.ResponseWriter, r *http.Request) {\n\n\t// Init output\n\toutput := []byte(\"\")\n\n\t// Add content type header to the response\n\tcontentType := \"application/json\"\n\tcharset := \"utf-8\"\n\tw.Header().Add(\"Content-Type\", fmt.Sprintf(\"%s; charset=%s\", contentType, charset))\n\n\t// Grab url path variables\n\turlVars := mux.Vars(r)\n\turlUser := urlVars[\"user\"]\n\n\t// Grab context references\n\trefStr := gorillaContext.Get(r, \"str\").(stores.Store)\n\trefUserUUID := gorillaContext.Get(r, \"auth_user_uuid\").(string)\n\n\t// Read POST JSON body\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\terr := APIErrorInvalidRequestBody()\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// Parse pull options\n\tpostBody, err := auth.GetUserFromJSON(body)\n\tif err != nil {\n\t\terr := APIErrorInvalidArgument(\"User\")\n\t\trespondErr(w, err)\n\t\tlog.Error(string(body[:]))\n\t\treturn\n\t}\n\n\tuuid := uuid.NewV4().String() // generate a new uuid to attach to the new project\n\ttoken, err := auth.GenToken() // generate a new user token\n\tcreated := time.Now().UTC()\n\t// Get Result Object\n\tres, err := auth.CreateUser(uuid, urlUser, postBody.FirstName, postBody.LastName, postBody.Organization, postBody.Description,\n\t\tpostBody.Projects, token, postBody.Email, postBody.ServiceRoles, created, refUserUUID, refStr)\n\n\tif err != nil {\n\t\tif err.Error() == \"exists\" {\n\t\t\terr := APIErrorConflict(\"User\")\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\n\t\tif strings.HasPrefix(err.Error(), \"duplicate\") {\n\t\t\terr := APIErrorInvalidData(err.Error())\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\n\t\tif strings.HasPrefix(err.Error(), \"invalid\") {\n\t\t\terr := APIErrorInvalidData(err.Error())\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\n\t\terr := APIErrGenericInternal(err.Error())\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// Output result to JSON\n\tresJSON, err := res.ExportJSON()\n\tif err != nil {\n\t\terr := APIErrExportJSON()\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// Write response\n\toutput = []byte(resJSON)\n\trespondOK(w, output)\n\n}",
"func (e *env) UserSignupTokenPostHandler(w http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\tcase \"GET\":\n\tcase \"POST\":\n\t\tusername := r.FormValue(\"username\")\n\t\tpassword := r.FormValue(\"password\")\n\t\tgivenToken := r.FormValue(\"register_key\")\n\n\t\tisValid, userRole := e.authState.ValidateRegisterToken(givenToken)\n\n\t\tif isValid {\n\n\t\t\t// Delete the token so it cannot be reused if the token is not blank\n\t\t\t// The first user can signup without a token and is granted admin rights\n\t\t\tif givenToken != \"\" {\n\t\t\t\te.authState.DeleteRegisterToken(givenToken)\n\t\t\t}\n\n\t\t\tif userRole == auth.RoleAdmin {\n\t\t\t\terr := e.authState.NewAdmin(username, password)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(\"Error adding admin:\", err)\n\t\t\t\t\te.authState.SetFlash(\"Error adding user. Check logs.\", r)\n\t\t\t\t\thttp.Redirect(w, r, r.Referer(), http.StatusInternalServerError)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else if userRole == auth.RoleUser {\n\t\t\t\terr := e.authState.NewUser(username, password)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(\"Error adding user:\", err)\n\t\t\t\t\te.authState.SetFlash(\"Error adding user. Check logs.\", r)\n\t\t\t\t\thttp.Redirect(w, r, r.Referer(), http.StatusInternalServerError)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Login the recently added user\n\t\t\tif e.authState.Auth(username, password) {\n\t\t\t\te.authState.Login(username, r)\n\t\t\t}\n\n\t\t\te.authState.SetFlash(\"Successfully added '\"+username+\"' user.\", r)\n\t\t\thttp.Redirect(w, r, \"/\", http.StatusSeeOther)\n\t\t} else {\n\t\t\te.authState.SetFlash(\"Registration token is invalid.\", r)\n\t\t\thttp.Redirect(w, r, \"/\", http.StatusInternalServerError)\n\t\t}\n\n\tcase \"PUT\":\n\t\t// Update an existing record.\n\tcase \"DELETE\":\n\t\t// Remove the record.\n\tdefault:\n\t\t// Give an error message.\n\t}\n}",
"func CreateUser(db *gorm.DB) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\t// Get the mandatory query parameters.\n\t\tname, ok := c.GetPostForm(\"name\")\n\t\tif !ok {\n\t\t\terrors.Apply(c, errors.MissingParameters)\n\t\t\treturn\n\t\t}\n\t\tusername, ok := c.GetPostForm(\"username\")\n\t\tif !ok {\n\t\t\terrors.Apply(c, errors.MissingParameters)\n\t\t\treturn\n\t\t}\n\t\tif !usernameRegexp.MatchString(username) {\n\t\t\terrors.Apply(c, errors.BadParameters)\n\t\t\treturn\n\t\t}\n\t\tpassword, ok := c.GetPostForm(\"password\")\n\t\tif !ok {\n\t\t\terrors.Apply(c, errors.MissingParameters)\n\t\t\treturn\n\t\t}\n\n\t\t// Try getting type.\n\t\tuserType, ok := c.GetPostForm(\"type\")\n\t\tif !ok {\n\t\t\tuserType = models.General\n\t\t}\n\t\tif userType != models.Admin && userType != models.Writer && userType != models.General {\n\t\t\terrors.Apply(c, errors.BadParameters)\n\t\t\treturn\n\t\t}\n\t\tif _, ok := c.Get(\"user\"); userType != models.General && !ok {\n\t\t\terrors.Apply(c, errors.NoPermission)\n\t\t\treturn\n\t\t}\n\n\t\t// Check if any users have the same username.\n\t\tvar checkUsers []models.User\n\t\terr := db.Where(\"user_name = ?\", username).\n\t\t\tFind(&checkUsers).\n\t\t\tError\n\t\tif err != nil && err != gorm.ErrRecordNotFound {\n\t\t\terrors.Apply(c, err)\n\t\t\treturn\n\t\t}\n\t\tif len(checkUsers) != 0 {\n\t\t\terrors.Apply(c, errors.UserExists)\n\t\t\treturn\n\t\t}\n\n\t\t// Create the user.\n\t\tuser := &models.User{\n\t\t\tType: userType,\n\t\t\tName: name,\n\t\t\tUserName: username,\n\t\t}\n\t\tif err := user.SetPassword(password); err != nil {\n\t\t\terrors.Apply(c, err)\n\t\t\treturn\n\t\t}\n\t\tif err := db.Create(user).Error; err != nil {\n\t\t\terrors.Apply(c, err)\n\t\t\treturn\n\t\t}\n\n\t\t// Respond with the user's JSON.\n\t\tc.JSON(200, user)\n\t}\n}",
"func CreateUser(w http.ResponseWriter, r *http.Request) {\n\tvar user User\n\tvar b []byte\n\tr.Body.Read(b)\n\terr := json.Unmarshal(b, &user)\n\tif err != nil {\n\t\tjson.NewEncoder(w).Encode(err)\n\t}\n}",
"func CreateUser(w http.ResponseWriter, r *http.Request) {\n\tr.ParseForm()\n\n\tuser, err := json.Marshal(map[string]string{\n\t\t\"name\": r.FormValue(\"name\"),\n\t\t\"email\": r.FormValue(\"email\"),\n\t\t\"nick\": r.FormValue(\"nick\"),\n\t\t\"password\": r.FormValue(\"password\"),\n\t})\n\tif err != nil {\n\t\tresponses.JSON(w, http.StatusBadRequest, responses.ErrorAPI{Err: err.Error()})\n\t\treturn\n\t}\n\n\turl := fmt.Sprintf(\"%s/users\", config.APIURL)\n\tresponse, err := http.Post(url, \"application/json\", bytes.NewBuffer(user))\n\tif err != nil {\n\t\tresponses.JSON(w, http.StatusInternalServerError, responses.ErrorAPI{Err: err.Error()})\n\t\treturn\n\t}\n\tdefer response.Body.Close()\n\n\tif response.StatusCode >= 400 {\n\t\tresponses.TreatStatusCode(w, response)\n\t\treturn\n\t}\n\n\tresponses.JSON(w, response.StatusCode, nil)\n}",
"func (_this *UserHandler) CreateUser() echo.HandlerFunc {\n\treturn func(c echo.Context) error {\n\t\tvar request dtos.CreateUserRequest\n\t\tif err := c.Bind(&request); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tresp, err := _this.userService.CreateUser(&request)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn c.JSON(http.StatusOK, resp)\n\t}\n}",
"func (h *userHandler) CreatePost(w http.ResponseWriter, r *http.Request) {\r\n\tvar u post\r\n\tif err := json.NewDecoder(r.Body).Decode(&u); err != nil {\r\n\t\tinternalServerError(w, r)\r\n\t\treturn\r\n\t}\r\n\th.store.Lock()\r\n\th.store.m[u.ID] = u\r\n\th.store.Unlock()\r\n\tjsonBytes, err := json.Marshal(u)\r\n\tif err != nil {\r\n\t\tinternalServerError(w, r)\r\n\t\treturn\r\n\t}\r\n\tw.WriteHeader(http.StatusOK)\r\n\tw.Write(jsonBytes)\r\n}",
"func (handler *UserHandler) Create(w http.ResponseWriter, req *http.Request, _ httprouter.Params) {\n\tpayload := &User{}\n\n\tif err := json.NewDecoder(req.Body).Decode(payload); err != nil {\n\t\thandler.Formatter.JSON(w, http.StatusBadRequest, util.NewError(\"1001\",\n\t\t\t\"Invalid JSON payload supplied.\", err.Error()))\n\t\treturn\n\t}\n\n\tif err := payload.Validate(); err != nil {\n\t\thandler.Formatter.JSON(w, http.StatusBadRequest, util.NewError(\"1002\",\n\t\t\t\"Unable to validate the payload provided.\", err.Error()))\n\t\treturn\n\t}\n\n\tuser, err := handler.UserService.CreateUser(payload)\n\n\tif err != nil {\n\t\thandler.Formatter.JSON(w, http.StatusBadRequest, util.NewError(\"1003\",\n\t\t\t\"Unable to create a new user.\", err.Error()))\n\t\treturn\n\t}\n\n\thandler.Formatter.JSON(w, http.StatusCreated, user.hidePassword())\n}",
"func createUser(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\tvar user schema.User\n\n\t// we decode our body request params in JSON\n\t_ = json.NewDecoder(r.Body).Decode(&user)\n\n\tresult, err := users.InsertOne(context.TODO(), user)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// we decode the recieved params in JSON\n\tjson.NewEncoder(w).Encode(result)\n}",
"func (uc UserController) CreateUser(w http.ResponseWriter, r *http.Request, p httprouter.Params) {\n\t// Stub an user to be populated from the body\n\tu := models.User{}\n\n\t// Populate the user data\n\tjson.NewDecoder(r.Body).Decode(&u)\n\n\t// Add an Id\n\tu.Id = bson.NewObjectId()\n\n\thPass, err := bcrypt.GenerateFromPassword([]byte(u.Password), bcrypt.DefaultCost)\n\tif err != nil {\n\t\tw.WriteHeader(500)\n\t\treturn\n\t}\n\tu.HashPassword = hPass\n\t// clear the incoming text password\n\tu.Password = \"\"\n\n\t// Write the user to mongo\n\terr = uc.session.DB(\"todos\").C(\"users\").Insert(&u)\n\n\t// clear hashed password\n\tu.HashPassword = nil\n\n\t// Marshal provided interface into JSON structure\n\tuj, _ := json.Marshal(u)\n\n\t// Write content-type, statuscode, payload\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(201)\n\tfmt.Fprintf(w, \"%s\", uj)\n}",
"func CreateUser(res http.ResponseWriter, req *http.Request) {\n\tres.Header().Set(\"Content-Type\", \"application/json\")\n\n\tvar user models.User\n\tif err := json.NewDecoder(req.Body).Decode(&user); err != nil {\n\t\tmsg := \"Error while reading input body\"\n\n\t\tutils.ReturnErrorResponse(http.StatusBadRequest, msg, \"\", nil, nil, res)\n\t\treturn\n\t}\n\n\t// Helper function to generate encrypted password hash\n\tpasswordHash := helpers.GeneratePasswordHash(user.Password)\n\n\tif passwordHash == \"\" {\n\t\tmsg := \"Error occurred while hashing the password\"\n\n\t\tutils.ReturnErrorResponse(http.StatusBadRequest, msg, \"\", nil, nil, res)\n\t\treturn\n\t}\n\n\tuser.ID = bson.NewObjectId()\n\tuser.Password = passwordHash\n\n\terr := db.CreateUser(user)\n\tif err != nil {\n\t\tmsg := \"Error occurred while creating user\"\n\n\t\tutils.ReturnErrorResponse(http.StatusBadRequest, msg, \"\", nil, nil, res)\n\t\treturn\n\t}\n\n\tmsg := \"User created successfully\"\n\tutils.ReturnSuccessReponse(http.StatusCreated, msg, user.ID, res)\n\n}",
"func HandleUserRegister(context *gin.Context) {\n\n\tuserAcc := context.PostForm(\"user_acc\")\n\tuserAvatar := context.PostForm(\"user_avatar\")\n\tuserNickName := context.PostForm(\"user_nick_name\")\n\tuserPassword := context.PostForm(\"user_password\")\n\tuserPhone := context.PostForm(\"user_phone\")\n\tuserEmail := context.PostForm(\"user_email\")\n\tuserGender := context.PostForm(\"user_gender\")\n\tuserSign := context.PostForm(\"user_sign\")\n\n\tuserType := context.PostForm(\"user_type\")\n\tuserTypeInt, _ := strconv.Atoi(userType)\n\n\tif userAcc == \"\" || userNickName == \"\" || userPassword == \"\"{\n\t\tcontext.JSON(http.StatusBadRequest, gin.H{\n\t\t\t\"status\": \"invalid\",\n\t\t\t\"code\": http.StatusBadRequest,\n\t\t\t\"msg\": \"user_acc, user_nick_name, user_password must not be none\",\n\t\t\t\"data\": \"\",\n\t\t})\n\t}\n\tuser := models.User{\n\t\tUserAcc:userAcc,\n\t\tUserAvatar:userAvatar,\n\t\tUserNickName:userNickName,\n\t\tUserPassword:userPassword,\n\t\tUserPhone:userPhone,\n\t\tUserEmail:userEmail,\n\t\tUserGender:userGender,\n\t\tUserSign:userSign,\n\t\tUserType:models.UserType(userTypeInt),\n\t}\n\tuserTry := models.User{}\n\tif db.DB.Where(\"user_acc=?\", userAcc).First(&userTry).RecordNotFound(){\n\t\t// user not found, create it\n\t\tdb.DB.Create(&user)\n\t\tuAddr := utils.GenAddr(user.ID)\n\t\tuser.UserAddr = \"usr\" + uAddr\n\n\t\tlog.Infof(\"FUCK GenAddr: %s gened: %s\", user.UserAddr, uAddr)\n\t\tdb.DB.Save(&user)\n\n\t\t// should return a token to user, as well as login\n\t\tclaims := make(map[string]interface{})\n\t\tclaims[\"id\"] = user.ID\n\t\tclaims[\"msg\"] = \"hiding egg\"\n\t\tclaims[\"user_addr\"] = user.UserAddr\n\t\ttoken, _ := utils.Encrypt(claims)\n\t\tlog.Infof(\"Request new user: %s, it is new.\", user)\n\t\tdata := map[string]interface{}{\"token\": token, \"id\": user.ID, \"user_addr\": user.UserAddr}\n\t\tcontext.JSON(200, gin.H{\n\t\t\t\"status\": \"success\",\n\t\t\t\"code\": http.StatusOK,\n\t\t\t\"msg\": \"user register succeed.\",\n\t\t\t\"data\": data,\n\t\t})\n\t}else{\n\t\tlog.Info(\"user exist.\")\n\t\tcontext.JSON(200, gin.H{\n\t\t\t\"status\": \"conflict\",\n\t\t\t\"code\": http.StatusConflict,\n\t\t\t\"msg\": \"user already exist.\",\n\t\t\t\"data\": nil,\n\t\t})\n\t}\n}",
"func (srv *UsersService) CreateHandler(ctx *gin.Context) {\n\tlogger := srv.logger.New(\"action\", \"CreateHandler\")\n\n\t// Checks if the query entry is valid\n\tvalidator := validators.CreateUserValidator{}\n\tif err := validator.BindJSON(ctx); err != nil {\n\t\t// Returns a \"422 StatusUnprocessableEntity\" response\n\t\tsrv.ResponseService.ValidatorErrorResponse(ctx, responses.UnprocessableEntity, err)\n\t\treturn\n\t}\n\n\t// Check permissions\n\tcurrentUser := GetCurrentUser(ctx)\n\tif hasPerm := srv.PermissionsService.CanCreateProfile(currentUser.UID, &validator.UserModel); !hasPerm {\n\t\tsrv.ResponseService.Forbidden(ctx)\n\t\treturn\n\t}\n\n\ttmpPassword := validator.UserModel.Password\n\n\t// Create new user\n\tcreatedUser, err := srv.userCreator.Create(&validator.UserModel, true, false, nil)\n\tif err != nil {\n\t\tlogger.Error(\"сan't create a user\", \"error\", err)\n\t\t// Returns a \"500 StatusInternalServerError\" response\n\t\tsrv.ResponseService.Error(ctx, responses.CanNotCreateUser, \"Can't create a user\")\n\t\treturn\n\t}\n\n\tif nil != currentUser {\n\t\tsrv.SystemLogsService.LogCreateUserProfileAsync(createdUser, currentUser.UID)\n\t}\n\t// TODO: refactor - use events, move above functionality to the event subscriber\n\tconfirmationCode, err := srv.confirmationCodeService.GenerateSetPasswordCode(createdUser)\n\tif err != nil {\n\t\tlogger.Error(\"unable to generate set_password confirmation code\")\n\t\treturn\n\t}\n\n\tif _, err = srv.notificationsService.ProfileCreated(createdUser.UID, tmpPassword, confirmationCode.Code); nil != err {\n\t\tlogger.Error(\"сan't send notification\", \"error\", err)\n\t\treturn\n\t}\n\n\t// Returns a \"201 Created\" response\n\tsrv.ResponseService.SuccessResponse(ctx, http.StatusCreated, validator.UserModel)\n}",
"func (u *UserHandler) Create(c *fiber.Ctx) error {\n\tuser := models.User{}\n\terr := c.BodyParser(&user)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = u.Repo.Create(user)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.Status(fiber.StatusOK).JSON(user)\n}",
"func signupHandler(usr string, pass string) string {\n\t// query for the number of users with the passed in username\n\tquery := \"SELECT count(*) FROM users WHERE username = ?;\"\n\thashed_usr := Hash1(usr)\n\trows := QueryDB(query, hashed_usr)\n\trows.Next()\n\tvar count int\n\t// scan the result\n\terr = rows.Scan(&count)\n\tif err != nil {\n\t\tfmt.Println(\"ERROR: querying for number of users with a given username (my_server.go: signupHandler)\")\n\t\tfmt.Println(err)\n\t\treturn \"\";\n\t}\n\trows.Close()\n\t// make sure that username is unique\n\tif count == 0 {\n\t\t// generate a per user salt\n\t\tsalt := GenerateRandomString()\n\t\t// hash the password with the generated salt\n\t\thashed_pass := Hash256(pass, salt)\n\t\t// make the RC4 key for the user\n\t\tkey := GenerateRandomString()\n\t\tfor KeyExists(key) {\n\t\t\tkey = GenerateRandomString()\n\t\t}\n\t\t// insert the information into the DB\n\t\tquery := \"INSERT INTO users VALUES (?,?,?,?);\"\n\t\t// make a call to execute the query\n\t\tExecDB(query, hashed_usr, hashed_pass, salt, key)\n\t\t// mkdir for new user\n\t\tencrypted := EncryptString(hashed_usr, key)\n\t\tpath := GetBaseDir(encrypted)\n\t\tusr_info := []string{hashed_usr, hashed_pass, salt, key}\n\t\ttoken := NewUserSignIn(path, usr, usr_info)\n\t\treturn token\n\t}\n\treturn \"\"\n}",
"func (a *Server) CreateUser(w http.ResponseWriter, r *http.Request) {\n\tfmt.Println(\"create a new user\")\n}",
"func (a *UserApiService) UserCreatePost(ctx context.Context, body UserCreateReq) (UserResp, *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Post\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t \tsuccessPayload UserResp\n\t)\n\n\t// validate body params\n\tif err := body.Validate(); err != nil {\n\t\treturn successPayload, nil, err\n\t}\n\t\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/user/create\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &body\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"X-Auth-Token\"] = key\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn successPayload, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn successPayload, localVarHttpResponse, err\n\t}\n\tdefer localVarHttpResponse.Body.Close()\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tbodyBytes, _ := ioutil.ReadAll(localVarHttpResponse.Body)\n\t\treturn successPayload, localVarHttpResponse, reportError(\"Status: %v, Body: %s\", localVarHttpResponse.Status, bodyBytes)\n\t}\n\n\tif err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil {\n\t\treturn successPayload, localVarHttpResponse, err\n\t}\n\n\n\treturn successPayload, localVarHttpResponse, err\n}",
"func (h *Handler) CreateUser(c *fiber.Ctx) error {\n\tvar service = services.NewUserService()\n\tvar usr = &user.User{}\n\tif err := c.BodyParser(usr); err != nil {\n\t\treturn c.Status(422).JSON(fiber.Map{\"status\": \"error\", \"message\": err})\n\t}\n\n\tnewUser, err := service.CreateUser(usr)\n\tif err != nil {\n\t\treturn c.Status(400).JSON(fiber.Map{\"status\": \"error\", \"message\": err.Error()})\n\t}\n\n\treturn c.JSON(fiber.Map{\"status\": \"success\", \"message\": \"Created usr\", \"data\": newUser})\n}",
"func (h *Handler) CreateHandler(w http.ResponseWriter, r *http.Request) {\n\tvar u user.User\n\n\terr := json.NewDecoder(r.Body).Decode(&u)\n\tif err != nil {\n\t\th.log.Error(err)\n\t\t_ = response.HTTPError(w, http.StatusBadRequest, response.ErrorParsingUser.Error())\n\t\treturn\n\t}\n\n\tu.Role = user.Client\n\n\tctx, cancel := context.WithCancel(r.Context())\n\tdefer cancel()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\t_ = response.HTTPError(w, http.StatusBadGateway, response.ErrTimeout.Error())\n\t\treturn\n\tdefault:\n\t\terr = h.service.Create(ctx, &u)\n\t}\n\n\tif err != nil {\n\t\th.log.Error(err)\n\t\t_ = response.HTTPError(w, http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\n\tw.Header().Add(\"Location\", r.URL.String()+u.ID.Hex())\n\t_ = response.JSON(w, http.StatusCreated, response.Map{\"user\": u})\n}",
"func (e *Example) PostUserAuth(ctx context.Context, req *example.Request, rsp *example.Response) error {\n\tlog.Log(\"POST /api/v1.0/user/name PutUersinfo()\")\n\n\t//创建返回空间\n\trsp.Errno= utils.RECODE_OK\n\trsp.Errmsg = utils.RecodeText(rsp.Errno)\n\n\t/*从从sessionid获取当前的userid*/\n\t//连接redis\n\tredis_config_map := map[string]string{\n\t\t\"key\": utils.G_server_name,\n\t\t\"conn\": utils.G_redis_addr + \":\" + utils.G_redis_port,\n\t\t\"dbNum\": utils.G_redis_dbnum,\n\t}\n\tredis_config , _ := json.Marshal(redis_config_map)\n\t//连接redis数据库 创建句柄\n\tbm, err := cache.NewCache(\"redis\", string(redis_config) )\n\tif err != nil {\n\t\tlog.Log(\"缓存创建失败\",err)\n\t\trsp.Errno = utils.RECODE_DBERR\n\t\trsp.Errmsg = utils.RecodeText(rsp.Errno)\n\t\treturn nil\n\t}\n\t//拼接key\n\tsessioniduserid := req.Sessionid + \"user_id\"\n\t//获取userid\n\tvalue_id := bm.Get(sessioniduserid)\n\tid := int(value_id.([]uint8)[0])\n\n\t//创建表对象\n\tuser := models.User{ Id: id, Real_name: req.Realname, Id_card: req.Idcard }\n\n\t//创建数据库句柄\n\to:= orm.NewOrm()\n\n\t//更新\n\t_ , err = o.Update(&user ,\"real_name\", \"id_card\")\n\tif err !=nil{\n\t\trsp.Errno= utils.RECODE_DBERR\n\t\trsp.Errmsg = utils.RecodeText(rsp.Errno)\n\t\treturn nil\n\t}\n\n\t// 更新缓存\n\tbm.Put(sessioniduserid, string(user.Id), time.Second * 600)\n\n\tlog.Log(\"更新实名认证信息成功\")\n\treturn nil\n}",
"func (ctx *HandlerContext) SignUpHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"POST\" {\n\t\thttp.Error(w, \"that method is not allowed\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\tcontentType := r.Header.Get(\"Content-Type\")\n\tif contentType != \"application/json\" {\n\t\thttp.Error(w, \"request body must be in JSON\", http.StatusUnsupportedMediaType)\n\t\treturn\n\t}\n\tnewUser := &users.NewUser{}\n\tdec := json.NewDecoder(r.Body)\n\terr := dec.Decode(newUser)\n\tif err != nil {\n\t\thttp.Error(w, \"Bad request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif err := newUser.Validate(); err != nil {\n\t\thttp.Error(w, \"Bad request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tuser, err := newUser.ToUser()\n\tif err != nil {\n\t\thttp.Error(w, \"Bad request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\taddedUser, err := ctx.Users.Insert(user)\n\tif err != nil {\n\t\thttp.Error(w, \"Bad request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tstate := sessions.SessionState{SessionTime: time.Now(), User: addedUser}\n\n\t_, err2 := sessions.BeginSession(ctx.SigningKey, ctx.Store, state, w)\n\tif err2 != nil {\n\t\thttp.Error(w, \"Bad request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusCreated)\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\n\terr = json.NewEncoder(w).Encode(addedUser)\n\n\tif err != nil {\n\t\thttp.Error(w, \"Bad request\", http.StatusBadRequest)\n\t}\n\n}",
"func createUser(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tvar user User\n\tif err := json.NewDecoder(r.Body).Decode(&user); err != nil {\n\t\tpanic(err)\n\t}\n\t//Todo (Farouk): Mock ID - not safe\n\tuser.ID = strconv.Itoa(rand.Intn(1000000))\n\tusers = append(users, user)\n}",
"func Handler(w http.ResponseWriter, r *http.Request) {\n\tu := user.Create()\n\n\tj, _ := json.Marshal(u)\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(j))\n}",
"func (app *App) createUser(w http.ResponseWriter, r *http.Request) {\n\tvar user users.User\n\tdecoder := json.NewDecoder(r.Body)\n\tif err := decoder.Decode(&user); err != nil {\n\t\trespondWithError(w, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\tdefer r.Body.Close()\n\n\tif (user.Email == \"\") || (user.FName == \"\") || (user.LName == \"\") || (user.Role == \"\") || (user.Password == \"\") {\n\t\trespondWithError(w, http.StatusBadRequest, \"Missing fields\")\n\t\treturn\n\t}\n\n\tif !inArray(user.Role, []string{\"base\", \"admin\"}) {\n\t\trespondWithError(w, http.StatusBadRequest, \"The 'Role' field must be one of: base, admin\")\n\t\treturn\n\t}\n\n\terr := user.CreateUser(app.Db)\n\tif err != nil {\n\t\trespondWithError(w, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\trespondWithJSON(w, http.StatusOK, user)\n}",
"func CreateUser(c *gin.Context) {\n\n\tfmt.Println(\"Endpoint Hit: Create A new User\")\n\n\tuser := model.Users{}\n\n\terr := c.Bind(&user)\n\n\tif err != nil {\n\t\tfmt.Print(err)\n\t}\n\n\tfmt.Println(user.Name)\n\n\tfmt.Println(user.Email)\n\tfmt.Println(user.Password)\n\tdb, err := sql.Open(\"mysql\", \"root:password@tcp(127.0.0.1:3306)/twitter\")\n\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\n\t_, errQ := db.Query(\"INSERT INTO users(name, email, password) VALUES (?,?,?)\", user.Name, user.Email, user.Password)\n\n\tif errQ != nil {\n\t\tpanic(err.Error())\n\t}\n\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"Name\": user.Name,\n\t\t\"Email\": user.Email,\n\t\t\"Password\": user.Password,\n\t})\n\n\tdefer db.Close()\n\n}",
"func (a *App) CreateUser(w http.ResponseWriter, r *http.Request) {\n\thandler.CreateUser(a.DB, w, r)\n}",
"func (ur *UserResource) handleCreateUser(c *gin.Context) {\n\tvar u model.User\n\n\tif err := c.ShouldBindJSON(&u); err != nil {\n\t\tc.JSON(http.StatusUnprocessableEntity, errUserCreateInvalidFields)\n\t\treturn\n\t}\n\n\thash, err := auth.GeneratePassword(u.Password)\n\tif err != nil {\n\t\tlogging.Logger.Errorln(\"[API] Failed to generate hash from password\", err)\n\t\tc.JSON(http.StatusBadRequest, errUserCreateGeneric)\n\t\treturn\n\t}\n\tu.Password = hash\n\n\tnewUser, err := ur.Store.CreateUser(u)\n\tif err != nil {\n\t\tc.JSON(http.StatusBadRequest, errUserCreateGeneric)\n\t\treturn\n\t}\n\n\tnewUser.Password = \"\"\n\tc.JSON(http.StatusCreated, newUser)\n}",
"func (e *Example) PostUserAuth(ctx context.Context, req *example.Request, rsp *example.Response) error {\n\tbeego.Info(\" 实名认证 Postuserauth api/v1.0/user/auth \")\n\n\t//创建返回空间\n\trsp.Errno = utils.RECODE_OK\n\trsp.Errmsg = utils.RecodeText(rsp.Errno)\n\n\t/*从session中获取我们的user_id*/\n\t//连接redis数据库\n\tbm, err := utils.GetRedisConnector()\n\n\tuserInfo_redis := bm.Get(req.SessionId)\n\tuserInfo_string, _ := redis.String(userInfo_redis, nil)\n\tuserOld := models.User{}\n\tjson.Unmarshal([]byte(userInfo_string), &userOld)\n\n\t//创建user对象\n\tuser := models.User{Uid: userOld.Uid, Real_name: req.RealName, Id_card: req.IdCard}\n\t/*更新user表中的 姓名和 身份号*/\n\to := orm.NewOrm()\n\t//更新表\n\t_, err = o.Update(&user, \"real_name\", \"id_card\")\n\tif err != nil {\n\t\trsp.Errno = utils.RECODE_DBERR\n\t\trsp.Errmsg = utils.RecodeText(rsp.Errno)\n\t\treturn nil\n\t}\n\n\t//更新缓存\n\tuserOld.Real_name = req.RealName\n\tuserOld.Id_card = req.IdCard\n\tuserInfo, _ := json.Marshal(userOld)\n\tbm.Put(req.SessionId, userInfo, time.Second*600)\n\treturn nil\n}",
"func RegisterHandler(w http.ResponseWriter, r *http.Request) {\n\tr.ParseForm()\n\tusername := r.PostFormValue(\"username\")\n\temail := r.PostFormValue(\"email\")\n\tpassword := r.PostFormValue(\"password\")\n\tuser, err := models.RegisterUser(username, email, password)\n\tif err != nil {\n\t\tlog.Print(err)\n\t} else {\n\t\tlog.Print(user)\n\t}\n}",
"func Register(w http.ResponseWriter, r *http.Request) {\n\tt:= models.Users{}\n\n\terr := json.NewDecoder(r.Body).Decode(&t)\n\n\tif err != nil {\n\t\thttp.Error(w, \"Error en los datos recibidos \"+err.Error(), 400)\n\t\treturn\n\t}\n\tif len(t.Login) < 6 {\n\t\thttp.Error(w, \"Error en los datos recibidos, ingrese un login mayor a 5 digitos \", 400)\n\t\treturn\n\t}\n\tif len(t.Password) < 6 {\n\t\thttp.Error(w, \"Ingrese una contraseña mayor a 5 digitos \", 400)\n\t\treturn\n\t}\n\n\t_, found, _ := bd.CheckUser(t.Login)\n\tif found == true {\n\t\thttp.Error(w, \"Ya existe un usuario registrado con ese login\", 400)\n\t\treturn\n\t}\n\n\tif t.Id_role == 3 {\n\t\tcod := bd.CodFamiliar(t.Cod_familiar)\n\t\tif cod == false {\n\t\t\thttp.Error(w, \"Debe ingresar un codigo de familia correcto\", 400)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif t.Id_role == 1 {\n\t\thttp.Error(w, \"Usted no esta autorizado para crear este tipo de usuario\", 400)\n\t\treturn\n\t}\n\n\t_, status, err := bd.InsertRegister(t)\n\tif err != nil {\n\t\thttp.Error(w, \"Ocurrió un error al intentar realizar el registro de usuario \"+err.Error(), 400)\n\t\treturn\n\t}\n\n\tif status == false {\n\t\thttp.Error(w, \"No se ha logrado insertar el registro del usuario\", 400)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusCreated)\n}",
"func signupHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.Method == \"POST\" {\n\t\tdecoder := json.NewDecoder(r.Body)\n\t\tvar u user\n\t\terr := decoder.Decode(&u)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err.Error())\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tfmt.Fprintf(w, err.Error())\n\t\t} else {\n\t\t\tdb.Table(\"user\").Insert(u).Run(session)\n\t\t\tfmt.Fprintf(w, \"Hello from api.\")\n\t\t}\n\t} else {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tfmt.Fprintf(w, \"no such method\")\n\t}\n}",
"func (uh *UserHandler) Register(w http.ResponseWriter, r *http.Request) {\n\n\tvar userHolder *entity.User\n\tvar password string\n\n\tif r.Method == http.MethodGet {\n\t\tuh.CSRF, _ = stringTools.GenerateRandomBytes(30)\n\t\ttoken, err := stringTools.CSRFToken(uh.CSRF)\n\t\tif err != nil {\n\t\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\t}\n\t\tinputContainer := InputContainer{CSRF: token}\n\t\tuh.Temp.ExecuteTemplate(w, \"SignUp.html\", inputContainer)\n\t\treturn\n\t}\n\n\tif r.Method == http.MethodPost {\n\n\t\tthirdParty := r.FormValue(\"thirdParty\")\n\t\tvar identification entity.Identification\n\t\tfirstname := r.FormValue(\"firstname\")\n\t\tlastname := r.FormValue(\"lastname\")\n\t\temail := r.FormValue(\"email\")\n\t\tidentification.ConfirmPassword = r.FormValue(\"confirmPassword\")\n\n\t\tif thirdParty == \"true\" {\n\n\t\t\tif r.FormValue(\"serverAUT\") != ServerAUT {\n\t\t\t\thttp.Error(w, \"Invalid server key\", http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tidentification.From = r.FormValue(\"from\")\n\t\t\tidentification.TpFlag = true\n\t\t} else {\n\t\t\tpassword = r.FormValue(\"password\")\n\t\t\tidentification.ConfirmPassword = r.FormValue(\"confirmPassword\")\n\t\t}\n\n\t\t// Validating CSRF Token\n\t\tcsrfToken := r.FormValue(\"csrf\")\n\t\tok, errCRFS := stringTools.ValidCSRF(csrfToken, uh.CSRF)\n\n\t\tuserHolder = entity.NewUserFR(firstname, lastname, email, password)\n\t\terrMap := uh.UService.Verification(userHolder, identification)\n\t\tif !ok || errCRFS != nil {\n\t\t\tif len(errMap) == 0 {\n\t\t\t\terrMap = make(map[string]string)\n\t\t\t}\n\t\t\terrMap[\"csrf\"] = \"Invalid token used!\"\n\t\t}\n\t\tif len(errMap) > 0 {\n\t\t\tuh.CSRF, _ = stringTools.GenerateRandomBytes(30)\n\t\t\ttoken, _ := stringTools.CSRFToken(uh.CSRF)\n\t\t\tinputContainer := InputContainer{Error: errMap, CSRF: token}\n\t\t\tuh.Temp.ExecuteTemplate(w, \"SignUp.html\", inputContainer)\n\t\t\treturn\n\t\t}\n\n\t\tif identification.TpFlag {\n\n\t\t\tnewSession := uh.configSess()\n\t\t\tclaims := stringTools.Claims(email, newSession.Expires)\n\t\t\tsession.Create(claims, newSession, w)\n\t\t\t_, err := uh.SService.StoreSession(newSession)\n\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\thttp.Redirect(w, r, \"/Dashboard\", http.StatusSeeOther)\n\t\t}\n\n\t\tuh.Temp.ExecuteTemplate(w, \"CheckEmail.html\", nil)\n\t\treturn\n\t}\n}",
"func CreateUser(db *gorm.DB, w http.ResponseWriter, r *http.Request) {\n\tuser := models.AppUser{}\n\tdecoder := json.NewDecoder(r.Body)\n\tdefer r.Body.Close()\n\tdecoder.DisallowUnknownFields()\n\tif err := decoder.Decode(&user); err != nil {\n\t\trespondJSON(w, http.StatusBadRequest, JSONResponse{models.AppUser{}, \"Erro interno del servidor\"})\n\t\treturn\n\t}\n\tuserTemp := getUserOrNull(db, user.AppUserID, w, r)\n\tif userTemp != nil {\n\t\trespondJSON(w, http.StatusBadRequest, JSONResponse{models.AppUser{}, \"Ya existe un usuario con este ID\"})\n\t\treturn\n\t}\n\t//hashing the password\n\tpass := user.AppUserPassword\n\thashPass, err := bcrypt.GenerateFromPassword([]byte(pass), 10)\n\tif err != nil {\n\t\trespondJSON(w, http.StatusInternalServerError, JSONResponse{models.AppUser{}, \"Error Interno del servidor\"})\n\t\treturn\n\t}\n\ts := bytes.NewBuffer(hashPass).String()\n\tuser.AppUserPassword = s\n\t//end hashing\n\n\tif result := db.Create(&user); result.Error != nil || result.RowsAffected == 0 {\n\t\tif result.Error != nil {\n\t\t\trespondJSON(w, http.StatusBadRequest, JSONResponse{models.AppUser{}, err.Error()})\n\t\t\treturn\n\t\t}\n\t\trespondJSON(w, http.StatusInternalServerError, JSONResponse{models.AppUser{}, \"Error No se pudo realizar el registro\"})\n\t\treturn\n\t}\n\trespondJSON(w, http.StatusCreated, JSONResponse{user, \"Registro realizado\"})\n}",
"func CreateUser(c *gin.Context) {\n\ttype result struct {\n\t\tFirstName string `json:\"first_name\"`\n\t\tLastName string `json:\"last_name\"`\n\t\tEmail string `json:\"email\"`\n\t\tPassword string `json:\"password\"`\n\t\tDateOfBirth string `json:\"birth_date\"`\n\t}\n\tUserParams := result{}\n\n\terr := c.ShouldBindJSON(&UserParams)\n\tlayout := \"2006-01-02\"\n\tstr := UserParams.DateOfBirth\n\tt, er := time.Parse(layout, str)\n\n\tif er != nil {\n\t\tfmt.Println(er)\n\t}\n\n\tvar user model.User\n\tif err != nil {\n\t\tc.JSON(http.StatusBadRequest, err)\n\t\treturn\n\t}\n\tif len(UserParams.Password) == 0 {\n\t\tfmt.Println(\"err2\")\n\t\tlog.Println(err)\n\t\tc.JSON(http.StatusBadRequest, \"No given password\")\n\t\treturn\n\t}\n\tif age.Age(t) < 18 {\n\t\tlog.Println(err)\n\t\tc.JSON(http.StatusBadRequest, \"You are not adult!\")\n\t\treturn\n\t}\n\tif !db.Where(\"email = ?\", UserParams.Email).Find(&user).RecordNotFound() {\n\t\tc.JSON(http.StatusBadRequest, \"User with this email already exist\")\n\t\treturn\n\t}\n\tid := uuid.NewV4()\n\t// 1 = single user; 2 = admin\n\tuser.AccessLevel = 1\n\tuser.UUID = id.String()\n\tvar hash = hashPassword(UserParams.Password)\n\tuser.Password = hash\n\tuser.FirstName = UserParams.FirstName\n\tuser.LastName = UserParams.LastName\n\tuser.Email = UserParams.Email\n\tuser.DateOfBirth = t\n\tdb.Create(&user)\n\tuser.Password = \"\"\n\tc.JSON(200, &user)\n}",
"func CreateNewUser(w http.ResponseWriter, r *http.Request) {\n\tfLog := userMgmtLogger.WithField(\"func\", \"CreateNewUser\").WithField(\"RequestID\", r.Context().Value(constants.RequestID)).WithField(\"path\", r.URL.Path).WithField(\"method\", r.Method)\n\n\tiauthctx := r.Context().Value(constants.HansipAuthentication)\n\tif iauthctx == nil {\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusUnauthorized, \"You are not authorized to access this resource\", nil, nil)\n\t\treturn\n\t}\n\n\tfLog.Trace(\"Creating new user\")\n\treq := &CreateNewUserRequest{}\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tfLog.Errorf(\"ioutil.ReadAll got %s\", err.Error())\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusInternalServerError, err.Error(), nil, nil)\n\t\treturn\n\t}\n\terr = json.Unmarshal(body, req)\n\tif err != nil {\n\t\tfLog.Errorf(\"json.Unmarshal got %s\", err.Error())\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusBadRequest, err.Error(), nil, nil)\n\t\treturn\n\t}\n\tisValidPassphrase := passphrase.Validate(req.Passphrase, config.GetInt(\"security.passphrase.minchars\"), config.GetInt(\"security.passphrase.minwords\"), config.GetInt(\"security.passphrase.mincharsinword\"))\n\tif !isValidPassphrase {\n\t\tfLog.Errorf(\"Passphrase invalid\")\n\t\tinvalidMsg := fmt.Sprintf(\"Invalid passphrase. Passphrase must at least has %d characters and %d words and for each word have minimum %d characters\", config.GetInt(\"security.passphrase.minchars\"), config.GetInt(\"security.passphrase.minwords\"), config.GetInt(\"security.passphrase.mincharsinword\"))\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusBadRequest, \"invalid passphrase\", nil, invalidMsg)\n\t\treturn\n\t}\n\tuser, err := UserRepo.CreateUserRecord(r.Context(), req.Email, req.Passphrase)\n\tif err != nil {\n\t\tfLog.Errorf(\"UserRepo.CreateUserRecord got %s\", err.Error())\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusBadRequest, err.Error(), nil, nil)\n\t\treturn\n\t}\n\tresp := &CreateNewUserResponse{\n\t\tRecordID: user.RecID,\n\t\tEmail: user.Email,\n\t\tEnabled: user.Enabled,\n\t\tSuspended: user.Suspended,\n\t\tLastSeen: user.LastSeen,\n\t\tLastLogin: user.LastLogin,\n\t\tTotpEnabled: user.Enable2FactorAuth,\n\t}\n\tfLog.Warnf(\"Sending email\")\n\tmailer.Send(r.Context(), &mailer.Email{\n\t\tFrom: config.Get(\"mailer.from\"),\n\t\tFromName: config.Get(\"mailer.from.name\"),\n\t\tTo: []string{user.Email},\n\t\tCc: nil,\n\t\tBcc: nil,\n\t\tTemplate: \"EMAIL_VERIFY\",\n\t\tData: user,\n\t})\n\n\thelper.WriteHTTPResponse(r.Context(), w, http.StatusOK, \"Success creating user\", nil, resp)\n\treturn\n}"
] | [
"0.78383803",
"0.7456648",
"0.74130136",
"0.73975974",
"0.7373872",
"0.73664296",
"0.73215455",
"0.7290208",
"0.728816",
"0.7268887",
"0.7192617",
"0.71799153",
"0.71711975",
"0.71696395",
"0.71660227",
"0.7104086",
"0.710336",
"0.7103203",
"0.7039367",
"0.7038086",
"0.70310396",
"0.70294183",
"0.69984925",
"0.6976975",
"0.69390804",
"0.69234383",
"0.69204587",
"0.6880102",
"0.68657106",
"0.68530154",
"0.6817111",
"0.6809694",
"0.6792934",
"0.67825264",
"0.6779558",
"0.6770838",
"0.67498994",
"0.6748949",
"0.67466027",
"0.67413694",
"0.67213947",
"0.6718605",
"0.6714131",
"0.6705692",
"0.66797507",
"0.6666807",
"0.66649276",
"0.6662693",
"0.6630949",
"0.6617591",
"0.6590869",
"0.6587597",
"0.65794027",
"0.65771884",
"0.6576558",
"0.657552",
"0.6573201",
"0.6571512",
"0.6570066",
"0.65632975",
"0.6555752",
"0.655369",
"0.6553161",
"0.65505695",
"0.65497804",
"0.65485877",
"0.6545178",
"0.65358114",
"0.65348804",
"0.6526257",
"0.6511398",
"0.6508399",
"0.6497926",
"0.64954615",
"0.6485888",
"0.6478527",
"0.6477599",
"0.646827",
"0.644814",
"0.64429504",
"0.64381456",
"0.64326304",
"0.6430453",
"0.6421207",
"0.6417862",
"0.6411681",
"0.6408126",
"0.6401768",
"0.63938856",
"0.6393648",
"0.6389857",
"0.63872993",
"0.6383413",
"0.637845",
"0.6366463",
"0.63662153",
"0.6362731",
"0.6349646",
"0.6343867",
"0.63395303"
] | 0.75723433 | 1 |
PutUserHandler Actualiza un usuario en base al id | func PutUserHandler(w http.ResponseWriter, r *http.Request) {
params := mux.Vars(r)
k := params["id"]
var userupdate User
err := json.NewDecoder(r.Body).Decode(&userupdate)
if err != nil {
panic(err)
}
if user, ok := Listusers[k]; ok {
userupdate.CreateAt = user.CreateAt
delete(Listusers, k)
Listusers[k] = userupdate
} else {
log.Printf("No encontramos el id %s", k)
}
w.WriteHeader(http.StatusNoContent)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func PutUserHandler(w http.ResponseWriter, r *http.Request) {\n\tid := mux.Vars(r)[\"id\"]\n\tvar userUpdate models.User\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\terr := json.NewDecoder(r.Body).Decode(&userUpdate)\n\tif err != nil {\n\t\tlog.Printf(\"Error al parsear usuario con el id %s\", id)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\tstatus := users.PutUser(id, userUpdate)\n\tw.WriteHeader(status)\n}",
"func (auh *AdminUserHandler) PutUser(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\n\tvar apiKey = r.Header.Get(\"api-key\")\n\tif apiKey == \"\" || (apiKey != adminApiKey && apiKey != userApiKey) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)\n\t\treturn\n\t}\n\tid, err := strconv.Atoi(ps.ByName(\"id\"))\n\tif err != nil {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\n\tuser, errs := auh.userService.User(uint(id))\n\n\tif len(errs) > 0 {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\n\tl := r.ContentLength\n\n\tbody := make([]byte, l)\n\n\tr.Body.Read(body)\n\n\tjson.Unmarshal(body, &user)\n\n\tuser, errs = auh.userService.UpdateUser(user)\n\n\tif len(errs) > 0 {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\n\toutput, err := json.MarshalIndent(user, \"\", \"\\t\")\n\n\tif err != nil {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write(output)\n\treturn\n}",
"func updateUserByIDHandler(c *gin.Context) {\n\tid, _ := strconv.Atoi(c.Param(\"id\"))\n\tuser, _ := c.Get(JwtIdentityKey)\n\n\t// Role check.\n\tif !isAdmin(user) {\n\t\tc.JSON(http.StatusUnauthorized, gin.H{\"message\": \"unauthorized\"})\n\t\treturn\n\t}\n\n\t// Decode json.\n\tvar json userUpdateRequest\n\tif err := c.ShouldBindJSON(&json); err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error()})\n\t\treturn\n\t}\n\n\tdb := data.New()\n\tu, err := db.Users.GetUserByID(id)\n\tif err != nil {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"status\": http.StatusNotFound,\n\t\t\t\"message\": \"User does not exist\",\n\t\t})\n\t\treturn\n\t}\n\n\t// Disallow updates on master user.\n\tif id != 1 {\n\t\t// Set role.\n\t\tif json.Role != \"\" {\n\t\t\tu.Role = json.Role\n\t\t}\n\n\t\t// Set active status.\n\t\tu.Active = json.Active\n\t}\n\n\tupdatedUser, _ := db.Users.UpdateUserByID(id, u)\n\tc.JSON(http.StatusOK, updatedUser)\n}",
"func UpdateUserHandler(connection *sql.DB, cnf config.Config) negroni.HandlerFunc {\n\treturn negroni.HandlerFunc(func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\t\tvar queryToken = r.URL.Query().Get(\"token\")\n\n\t\tif len(queryToken) < 1 {\n\t\t\tqueryToken = r.Header.Get(\"token\")\n\t\t}\n\n\t\tif len(queryToken) < 1 {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tw.Write([]byte(string(\"token is mandatory\")))\n\t\t\treturn\n\t\t}\n\n\t\tuser := &models.UserResponse{}\n\t\terr := util.RequestToJSON(r, user)\n\t\tif err != nil {\n\t\t\tutil.SendBadRequest(w, errors.New(\"bad json\"))\n\t\t\treturn\n\t\t}\n\n\t\tsecretKey := cnf.SecretKey\n\t\ttok, err := jwt.Parse(queryToken, func(t *jwt.Token) (interface{}, error) {\n\t\t\treturn []byte(secretKey), nil\n\t\t})\n\t\tif err != nil {\n\t\t\tutil.SendBadRequest(w, err)\n\t\t\treturn\n\t\t}\n\n\t\tclaims := tok.Claims.(jwt.MapClaims)\n\t\tvar ID = claims[\"sub\"].(float64) // gets the ID\n\n\t\tif int64(ID) != user.ID {\n\t\t\tutil.SendBadRequest(w, errors.New(\"you can only change your own user object\"))\n\t\t\treturn\n\t\t}\n\n\t\tif err := user.Validate(); err == nil {\n\n\t\t\tdb.UpdateUser(connection, user)\n\n\t\t\tutil.SendOK(w, user)\n\n\t\t} else {\n\t\t\tutil.SendBadRequest(w, err)\n\t\t}\n\t})\n}",
"func UpdateUser(w http.ResponseWriter, r *http.Request) {\n\tparams := mux.Vars(r)\n\tuserID, err := strconv.ParseInt(params[\"id\"], 10, 64)\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tuserIDToken, err := authentication.ExtractUserId(r)\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusUnauthorized, err)\n\t\treturn\n\t}\n\n\tif userIDToken != userID {\n\t\tresponses.Error(w, http.StatusForbidden, errors.New(\"não é possível manipular usuário de terceiros\"))\n\t\treturn\n\t}\n\n\tbodyRequest, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusUnprocessableEntity, err)\n\t\treturn\n\t}\n\n\tvar user models.User\n\tif err := json.Unmarshal(bodyRequest, &user); err != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tif err := user.Prepare(false); err != nil {\n\t\tresponses.Error(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\tuser.Id = userID\n\tif err := validateUniqueDataUser(user, false); err != nil {\n\t\tresponses.Error(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\tdb, err := database.Connect()\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\tdefer db.Close()\n\n\trepository := repository.NewRepositoryUser(db)\n\tif err = repository.UpdateUser(userID, user); err != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tresponses.JSON(w, http.StatusNoContent, nil)\n\n}",
"func (h *UserHandler) Update(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tidStr := vars[\"id\"]\n\tid, err := strconv.ParseUint(idStr, 10, 64)\n\tif err != nil {\n\t\tlog.Println(errors.Wrapf(err, \"error parse uint:%v\", idStr))\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tlog.Printf(\"/users/%d PUT handled\", id)\n\n\treq := &UpdateRequest{}\n\tif err := util.ScanRequest(r, req); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tuser := &schema.User{\n\t\tID: id,\n\t\tName: req.Name,\n\t}\n\n\tif err := h.model.Validate(user); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tres, err := h.model.Update(user)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif err := util.JSONWrite(w, res, http.StatusOK); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n}",
"func (u *UserResource) updateUser(request *restful.Request, response *restful.Response) {\n\tusr := new(User)\n\terr := request.ReadEntity(&usr)\n\tif err == nil {\n\t\tdb.WLock()\n\t\tdefer db.WUnlock() //unlock when exit this method\n\n\t\tif _, err = db.Engine.Id(usr.ID).Update(usr); err != nil {\n\t\t\tresponse.WriteHeaderAndEntity(http.StatusInternalServerError, UsersResponse{Error: err.Error()})\n\t\t} else {\n\t\t\tresponse.WriteEntity(UsersResponse{Success: true})\n\t\t}\n\t} else {\n\t\tresponse.WriteHeaderAndEntity(http.StatusInternalServerError, UsersResponse{Error: err.Error()})\n\t}\n}",
"func UpdateUser(c *gin.Context) {}",
"func UpdateHandler(db *sql.DB) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tb, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(w, \"%v\\n\", err.Error())\n\t\t\treturn\n\t\t}\n\t\tdefer r.Body.Close()\n\n\t\tvar u User\n\t\tif err := json.Unmarshal(b, &u); err != nil {\n\t\t\tfmt.Fprintf(w, \"%v\\n\", err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tvars := mux.Vars(r)\n\t\tu.ID = vars[\"id\"]\n\n\t\tuser, err := update(db, u)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(w, \"%v\\n\", err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tjson.NewEncoder(w).Encode(user)\n\t\treturn\n\t}\n}",
"func (h *Handler) EditUser(c *fiber.Ctx) error {\n\tservice := services.NewUserService()\n\tid, err := strconv.ParseInt(c.Params(\"id\"), 10, 32)\n\n\tif err != nil {\n\t\treturn c.Status(400).JSON(fiber.Map{\"status\": \"error\", \"message\": err.Error()})\n\t}\n\n\tvar usr user.User\n\tif err := c.BodyParser(&usr); err != nil {\n\t\treturn c.Status(422).JSON(fiber.Map{\"status\": \"error\", \"message\": \"Invalid fields\"})\n\t}\n\n\terr = service.UpdateUser(&usr, int(id))\n\n\tif err != nil {\n\t\treturn c.Status(500).JSON(fiber.Map{\"status\": \"error\", \"message\": err.Error()})\n\t}\n\n\treturn c.JSON(fiber.Map{\"status\": \"success\", \"message\": \"UpdatedUser\", \"data\": usr})\n}",
"func UpdateUserHandler(w http.ResponseWriter, r *http.Request, db *gorp.DbMap, c *businesscontext.Ctx) error {\n\tvars := mux.Vars(r)\n\tusername := vars[\"username\"]\n\n\tif !c.User.Admin && username != c.User.Username {\n\t\treturn WriteJSON(w, r, nil, http.StatusForbidden)\n\t}\n\n\tuserDB, errload := user.LoadUserWithoutAuth(db, username)\n\tif errload != nil {\n\t\treturn sdk.WrapError(errload, \"getUserHandler: Cannot load user from db\")\n\t}\n\n\tvar userBody sdk.User\n\tif err := UnmarshalBody(r, &userBody); err != nil {\n\t\treturn err\n\t}\n\n\tuserBody.ID = userDB.ID\n\n\tif !user.IsValidEmail(userBody.Email) {\n\t\treturn sdk.WrapError(sdk.ErrWrongRequest, \"updateUserHandler: Email address %s is not valid\", userBody.Email)\n\t}\n\n\tif err := user.UpdateUser(db, userBody); err != nil {\n\t\treturn sdk.WrapError(err, \"updateUserHandler: Cannot update user table\")\n\t}\n\n\treturn WriteJSON(w, r, userBody, http.StatusOK)\n}",
"func (handler *Handler) handleUserActivationPut(w http.ResponseWriter, r *http.Request) {\n\n\t//Define a local struct to get the email out of the request\n\ttype ActivationGet struct {\n\t\tEmail string `json:\"email\"`\n\t\tActToken string `json:\"activation_token\"`\n\t}\n\n\t//Create a new password change object\n\tinfo := ActivationGet{}\n\n\t//Now get the json info\n\terr := json.NewDecoder(r.Body).Decode(&info)\n\tif err != nil {\n\t\tutils.ReturnJsonError(w, http.StatusUnprocessableEntity, err)\n\t\treturn\n\n\t}\n\n\t//Lookup the user id\n\tuser, err := handler.userHelper.GetUserByEmail(info.Email)\n\n\t//Return the error\n\tif err != nil {\n\t\tutils.ReturnJsonStatus(w, http.StatusForbidden, false, \"activation_forbidden\")\n\t\treturn\n\t}\n\n\t//Try to use the token\n\trequestId, err := handler.userHelper.CheckForActivationToken(user.Id(), info.ActToken)\n\n\t//Return the error\n\tif err != nil {\n\t\tutils.ReturnJsonStatus(w, http.StatusForbidden, false, \"activation_forbidden\")\n\t\treturn\n\t}\n\t//Now activate the user\n\terr = handler.userHelper.ActivateUser(user)\n\n\t//Return the error\n\tif err != nil {\n\t\tutils.ReturnJsonError(w, http.StatusForbidden, err)\n\t\treturn\n\t}\n\t//Mark the request as used\n\terr = handler.userHelper.UseToken(requestId)\n\n\t//Check to see if the user was created\n\tif err == nil {\n\t\tutils.ReturnJsonStatus(w, http.StatusAccepted, true, \"user_activated\")\n\t} else {\n\t\tutils.ReturnJsonError(w, http.StatusForbidden, err)\n\t}\n}",
"func (h *HTTPClientHandler) addUserHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Add(\"Access-Control-Allow-Origin\", ServerName)\n\t// adding new user to database\n\tvar userRequest User\n\n\tdefer r.Body.Close()\n\tbody, err := ioutil.ReadAll(r.Body)\n\n\tif err != nil {\n\t\t// failed to read response body\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t}).Error(\"Could not read response body!\")\n\t\thttp.Error(w, \"Failed to read request body.\", 400)\n\t\treturn\n\t}\n\n\terr = json.Unmarshal(body, &userRequest)\n\n\tif err != nil {\n\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t\tw.WriteHeader(422) // can't process this entity\n\t\treturn\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"firstName\": userRequest.FirstName,\n\t\t\"lastName\": userRequest.LastName,\n\t\t\"userID\": userRequest.UserID,\n\t\t\"profilePicUrl\": userRequest.ProfilePicUrl,\n\t\t\"gender\": userRequest.Gender,\n\t\t\"body\": string(body),\n\t}).Info(\"Got user info\")\n\n\t// adding user\n\terr = h.db.addUser(userRequest)\n\n\tif err == nil {\n\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t\tw.WriteHeader(201) // user inserted\n\t\treturn\n\t} else {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t}).Warn(\"Failed to insert..\")\n\n\t\tcontent, code := responseDetailsFromMongoError(err)\n\n\t\t// Marshal provided interface into JSON structure\n\t\tuj, _ := json.Marshal(content)\n\n\t\t// Write content-type, statuscode, payload\n\t\twriteJsonResponse(w, &uj, code)\n\n\t}\n\n}",
"func (handler *Handler) handleUserUpdate(w http.ResponseWriter, r *http.Request) {\n\n\t//We have gone through the auth, so we should know the id of the logged in user\n\tloggedInUser := r.Context().Value(\"user\").(int) //Grab the id of the user that send the request\n\n\t//Now load the current user from the repo\n\tuser, err := handler.userHelper.GetUser(loggedInUser)\n\n\t//Check for an error\n\tif err != nil {\n\t\tutils.ReturnJsonError(w, http.StatusUnprocessableEntity, err)\n\t\treturn\n\t}\n\n\t//decode the request body into struct with all of the info specified and failed if any error occur\n\terr = json.NewDecoder(r.Body).Decode(user)\n\tif err != nil {\n\t\tutils.ReturnJsonError(w, http.StatusUnprocessableEntity, err)\n\t\treturn\n\n\t}\n\n\t//Now update the user\n\tuser, err = handler.userHelper.updateUser(loggedInUser, user)\n\n\t//Check to see if the user was created\n\tif err == nil {\n\t\tutils.ReturnJson(w, http.StatusAccepted, user)\n\t} else {\n\t\tutils.ReturnJsonError(w, http.StatusForbidden, err)\n\t}\n\n}",
"func updateUser(w http.ResponseWriter, r *http.Request) {\r\n\tparams := mux.Vars(r)\r\n\tstmt, err := db.Prepare(\"UPDATE users SET name = ? WHERE id = ?\")\r\n\tif err != nil {\r\n\t\tpanic(err.Error())\r\n\t}\r\n\tbody, err := ioutil.ReadAll(r.Body)\r\n\tif err != nil {\r\n\t\tpanic(err.Error())\r\n\t}\r\n\tkeyVal := make(map[string]string)\r\n\tjson.Unmarshal(body, &keyVal)\r\n\tnewName := keyVal[\"name\"]\r\n\t_, err = stmt.Exec(newName, params[\"id\"])\r\n\tif err != nil {\r\n\t\tpanic(err.Error())\r\n\t}\r\n\tfmt.Fprintf(w, \"User with id = %s was updated\", params[\"id\"])\r\n}",
"func UpdateUser(w http.ResponseWriter, r *http.Request) {\n\n\t// w.Header().Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\t// w.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\t// w.Header().Set(\"Access-Control-Allow-Methods\", \"PUT\")\n\t// w.Header().Set(\"Access-Control-Allow-Headers\", \"Content-Type\")\n\n\t// get the userid from the request params, key is \"id\"\n\tparams := mux.Vars(r)\n\n\t// convert the id type from string to int\n\tid, err := strconv.Atoi(params[\"id\"])\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to convert the string into int. %v\", err)\n\t}\n\n\t// create an empty user of type models.User\n\tvar user TempUsers\n\n\t// decode the json request to user\n\terr = json.NewDecoder(r.Body).Decode(&user)\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to decode the request body. %v\", err)\n\t}\n\n\tdb := createConnection()\n\t// close the db connection\n\tdefer db.Close()\n\n\t// create the update sql query\n\tsqlStatement := `UPDATE users SET full_name=$2, email=$3, mobile_no=$4, username=$5, passwd=$6, created_at=$7 WHERE userid=$1`\n\n\t// execute the sql statement\n\tres, err := db.Exec(sqlStatement, id, user.FullName, user.Email, user.MobileNo, user.UserName, user.Password, time.Now())\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to execute the query. %v\", err)\n\t}\n\n\t// check how many rows affected\n\trowsAffected, err := res.RowsAffected()\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Error while checking the affected rows. %v\", err)\n\t}\n\n\tif rowsAffected > 0 {\n\t\tmsg := map[string]string{\"msg\": \"Updated Successfully.\"}\n\t\tjson.NewEncoder(w).Encode(msg)\n\t} else {\n\t\tmsg := map[string]string{\"msg\": \"Unable to Update, ID does not exists.\"}\n\t\tjson.NewEncoder(w).Encode(msg)\n\t}\n}",
"func (ctl *controller) APIUserPutAction(ctx *gin.Context) {\n\tctl.logger.Info(\"[PUT] UserPutAction\")\n\n\tvar userRequest UserRequest\n\tuserID, err := validateUserRequestUpdate(ctx, &userRequest)\n\tif err != nil {\n\t\tctx.AbortWithError(http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\taffected, err := ctl.updateUser(&userRequest, userID)\n\tif err != nil {\n\t\tctx.AbortWithError(http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\tif affected == 0 {\n\t\tctl.logger.Debug(\"nothing updated\")\n\t}\n\n\t// json response\n\tctx.JSON(http.StatusOK, jsonresp.CreateUserJSON(userID))\n}",
"func (app *application) EditUser(w http.ResponseWriter, r *http.Request) {\n\tid := chi.URLParam(r, \"id\")\n\tuserID, _ := strconv.Atoi(id)\n\n\tvar user models.User\n\n\terr := app.readJSON(w, r, &user)\n\tif err != nil {\n\t\tapp.badRequest(w, r, err)\n\t\treturn\n\t}\n\n\tif userID > 0 { // For an existing user, update the user record\n\t\terr = app.DB.EditUser(user)\n\t\tif err != nil {\n\t\t\tapp.badRequest(w, r, err)\n\t\t\treturn\n\t\t}\n\n\t\tif user.Password != \"\" {\n\t\t\tnewHash, err := bcrypt.GenerateFromPassword([]byte(user.Password), 12)\n\t\t\tif err != nil {\n\t\t\t\tapp.badRequest(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\terr = app.DB.UpdatePasswordForUser(user, string(newHash))\n\t\t\tif err != nil {\n\t\t\t\tapp.badRequest(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t} else { // For a new user, simply add the user to the users table\n\t\tnewHash, err := bcrypt.GenerateFromPassword([]byte(user.Password), 12)\n\t\tif err != nil {\n\t\t\tapp.badRequest(w, r, err)\n\t\t\treturn\n\t\t}\n\t\terr = app.DB.AddUser(user, string(newHash))\n\t\tif err != nil {\n\t\t\tapp.badRequest(w, r, err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tvar resp struct {\n\t\tError bool `json:\"error\"`\n\t\tMessage string `json:\"message\"`\n\t}\n\n\tresp.Error = false\n\tapp.writeJSON(w, http.StatusOK, resp)\n}",
"func setUser(ctx context.Context, data *User) error {\n\t// clear session_token and API_token for user\n\tk := datastore.NameKey(\"Users\", strings.ToLower(data.Username), nil)\n\n\t// New struct, to not add body, author etc\n\n\tif _, err := dbclient.Put(ctx, k, data); err != nil {\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (s *Service) PutUser(ctx context.Context, o *platform.User) error {\n\ts.userKV.Store(o.ID.String(), o)\n\treturn nil\n}",
"func UpdateUser(w http.ResponseWriter, r *http.Request) {\n\n}",
"func (h *User) Update(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tR.JSON500(w)\n\t\treturn\n\t}\n\n\t// @todo we might want extra check that /users/id equals to user.ID received in body\n\tuser, err := validator.UserCreate(body)\n\tif err != nil || user.ID == 0 {\n\t\tlog.Println(err)\n\t\tR.JSON400(w)\n\t\treturn\n\t}\n\n\terr = h.Storage.UpdateUser(user)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tR.JSON500(w)\n\t\treturn\n\t}\n\n\tR.JSON200OK(w)\n}",
"func updateUser(c *gin.Context) {\n\tvar user user\n\tuserID := c.Param(\"id\")\n\n\tdb.First(&user, userID)\n\n\tif user.Id == 0 {\n\t\tc.JSON(http.StatusNotFound, gin.H{\"status\": http.StatusNotFound, \"message\": \"No user found!\"})\n\t\treturn\n\t}\n\n\tdb.Model(&user).Update(\"login\", c.PostForm(\"login\"))\n password,_ := HashPassword(c.PostForm(\"password\"))\n\tdb.Model(&user).Update(\"password\", password)\n\tc.JSON(http.StatusOK, gin.H{\"status\": http.StatusOK, \"message\": \"User updated successfully!\"})\n}",
"func (_obj *WebApiAuth) SysUser_Insert(req *SysUser, id *int32, _opt ...map[string]string) (err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = req.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = _os.Write_int32((*id), 2)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\ttarsCtx := context.Background()\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"SysUser_Insert\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = _is.Read_int32(&(*id), 2, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn nil\n}",
"func HandleUserUpdate(c *gin.Context) {\n\tuid := c.Param(\"uid\")\n\tvar u User\n\tu.Username = c.DefaultPostForm(\"username\", \"\")\n\tu.Password = c.DefaultPostForm(\"password\", \"\")\n\tu.Nickname = c.DefaultPostForm(\"nickname\", \"\")\n\n\tuser, err := u.Update(uid)\n\tif err != nil {\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\n\t\t\t\"code\": 1,\n\t\t\t\"msg\": err.Error(),\n\t\t\t\"data\": gin.H{},\n\t\t})\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"code\": 0,\n\t\t\"msg\": \"ok\",\n\t\t\"data\": user,\n\t})\n}",
"func UpdateUser(c *gin.Context) {\n\tvar user Models.User\n\tid := c.Params.ByName(\"id\")\n\tfmt.Println(\"id\", id)\n\terr := Models.GetUserByID(&user, id)\n\tif err != nil {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"error\" : gin.H { \n\t\t\t\"status\": http.StatusNotFound,\n\t\t\t\"message\": \"Not Found\",\n\t\t}})\n\t\treturn\n\t} else {\n\tc.BindJSON(&user)\n\t\n\terr = Models.UpdateUser(&user, id)\n\tif err != nil {\n\t\tc.AbortWithStatus(http.StatusNotFound)\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"data\":gin.H { \n\t\t\t\"error\" : gin.H { \n\t\t\t\"status\": http.StatusBadRequest,\n\t\t\t\"message\": \"Can´t update user\",\n\t\t}}})\n\t} else {\n\t\tc.JSON(http.StatusOK, user)\n\t}\n}\n}",
"func (ctx *Context) UserHandler(w http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\tcase \"POST\":\n\t\tdecoder := json.NewDecoder(r.Body)\n\t\tnewuser := &users.NewUser{}\n\t\tif err := decoder.Decode(newuser); err != nil {\n\t\t\thttp.Error(w, \"Invalid JSON\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\terr := newuser.Validate()\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"User not valid\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tusr, _ := ctx.UserStore.GetByEmail(newuser.Email)\n\t\tif usr != nil {\n\t\t\thttp.Error(w, \"Email Already Exists\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tuser, err := ctx.UserStore.Insert(newuser)\n\t\tstate := &SessionState{\n\t\t\tBeganAt: time.Now(),\n\t\t\tClientAddr: r.RequestURI,\n\t\t\tUser: user,\n\t\t}\n\t\t_, err = sessions.BeginSession(ctx.SessionKey, ctx.SessionStore, state, w)\n\n\t\t_, err = ctx.UserStore.CreateLikesList(user)\n\t\t_, err = ctx.UserStore.CreateGroceryList(user)\n\n\t\tw.Header().Add(\"Content-Type\", contentTypeJSONUTF8)\n\t\tencoder := json.NewEncoder(w)\n\t\tencoder.Encode(user)\n\tcase \"GET\":\n\t\tusers, err := ctx.UserStore.GetAll()\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Error fetching users\", http.StatusInternalServerError)\n\t\t}\n\t\tw.Header().Add(\"Content-Type\", contentTypeJSONUTF8)\n\t\tencoder := json.NewEncoder(w)\n\t\tencoder.Encode(users)\n\t}\n}",
"func AddUserHandler(w http.ResponseWriter, r *http.Request) {\n\terr := AddUserProcessor(w, r)\n\tif err != nil {\n\t\tdata, statusCode, _ := oauth2Svr.GetErrorData(err)\n\t\tdata[\"user_id\"] = username(r)\n\t\tHttpResponse(w, data, statusCode)\n\t\treturn\n\t}\n\tHttpResponse(w, defaultSuccessResponse(), http.StatusOK)\n\treturn\n}",
"func SetUser(next echo.HandlerFunc) echo.HandlerFunc {\n\treturn func(c echo.Context) error {\n\t\tc.Set(\"userIdFromToken\", \"12345\")\n\t\treturn next(c)\n\t}\n}",
"func (ah *AuthHandler) UpdateUsername(w http.ResponseWriter, r *http.Request) {\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\tuser := &data.User{}\n\terr := data.FromJSON(user, r.Body)\n\tif err != nil {\n\t\tah.logger.Error(\"unable to decode user json\", \"error\", err.Error())\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t// data.ToJSON(&GenericError{Error: err.Error()}, w)\n\t\tdata.ToJSON(&GenericResponse{Status: false, Message: err.Error()}, w)\n\t\treturn\n\t}\n\n\tuser.ID = r.Context().Value(UserIDKey{}).(string)\n\tah.logger.Debug(\"udpating username for user : \", user)\n\n\terr = ah.repo.UpdateUsername(context.Background(), user)\n\tif err != nil {\n\t\tah.logger.Error(\"unable to update username\", \"error\", err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t// data.ToJSON(&GenericError{Error: err.Error()}, w)\n\t\tdata.ToJSON(&GenericResponse{Status: false, Message: \"Unable to update username. Please try again later\"}, w)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\t// data.ToJSON(&UsernameUpdate{Username: user.Username}, w)\n\tdata.ToJSON(&GenericResponse{\n\t\tStatus: true,\n\t\tMessage: \"Successfully updated username\",\n\t\tData: &UsernameUpdate{Username: user.Username},\n\t}, w)\n}",
"func (h *Handler) updateUser(c *gin.Context) handlerResponse {\n\n\tvar updatedUser types.User\n\tif err := c.ShouldBindJSON(&updatedUser); err != nil {\n\t\treturn handleBadRequest(err)\n\t}\n\tif updatedUser.Name != c.Param(userParameter) {\n\t\treturn handleNameMismatch()\n\t}\n\tstoredUser, err := h.service.User.Update(updatedUser, h.who(c))\n\tif err != nil {\n\t\treturn handleError(err)\n\t}\n\t// Remove password so we do not show in response\n\tstoredUser.Password = \"\"\n\treturn handleOK(storedUser)\n}",
"func putUserToKeyServer(cfg upspin.Config, ep *upspin.Endpoint) (upspin.Config, error) {\n\tcfg = config.SetStoreEndpoint(cfg, *ep)\n\tcfg = config.SetDirEndpoint(cfg, *ep)\n\tuser := &upspin.User{\n\t\tName: cfg.UserName(),\n\t\tDirs: []upspin.Endpoint{cfg.DirEndpoint()},\n\t\tStores: []upspin.Endpoint{cfg.StoreEndpoint()},\n\t\tPublicKey: cfg.Factotum().PublicKey(),\n\t}\n\tkey, err := bind.KeyServer(cfg, cfg.KeyEndpoint())\n\tif err != nil {\n\t\treturn cfg, err\n\t}\n\terr = key.Put(user)\n\treturn cfg, err\n}",
"func PUT(ctx *web.Context) {\n\t// Deserialize request into PutRequest.\n\treq := &PutRequest{}\n\tvar err error\n\tif err = ctx.Decode(req); nil != err {\n\t\tctx.Respond().Status(http.StatusBadRequest).With(err).Do()\n\t\treturn\n\t}\n\n\t// Check that request is valid.\n\tif err = req.Err(); nil != err {\n\t\tctx.Respond().Status(http.StatusBadRequest).With(err).Do()\n\t\treturn\n\t}\n\n\t// Add new user.\n\tif err = model.User.Add(req.Username, req.Password); nil != err {\n\t\tctx.Respond().Status(http.StatusConflict).With(err).Do()\n\t\treturn\n\t}\n\n\t// Reply with success.\n\tresp := &PutResponse{}\n\tctx.Respond().With(resp).Do()\n}",
"func PostUserHandler(w http.ResponseWriter, r *http.Request) {\n\tvar user User\n\terr := json.NewDecoder(r.Body).Decode(&user)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tuser.CreateAt = time.Now()\n\tid++\n\tk := strconv.Itoa(id)\n\tListusers[k] = user\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tj, err := json.Marshal(user)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tw.WriteHeader(http.StatusCreated)\n\tw.Write(j)\n}",
"func (a DefaultApi) UserPut(body User, xAuthToken string) (*APIResponse, error) {\n\n\tvar httpMethod = \"Put\"\n\t// create path and map variables\n\tpath := a.Configuration.BasePath + \"/user\"\n\n\n\theaderParams := make(map[string]string)\n\tqueryParams := url.Values{}\n\tformParams := make(map[string]string)\n\tvar postBody interface{}\n\tvar fileName string\n\tvar fileBytes []byte\n\t// add default headers if any\n\tfor key := range a.Configuration.DefaultHeader {\n\t\theaderParams[key] = a.Configuration.DefaultHeader[key]\n\t}\n\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\theaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\theaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\n\t// header params \"X-Auth-Token\"\n\theaderParams[\"X-Auth-Token\"] = xAuthToken\n\n\t// body params\n\tpostBody = &body\n\n\n\thttpResponse, err := a.Configuration.APIClient.CallAPI(path, httpMethod, postBody, headerParams, queryParams, formParams, fileName, fileBytes)\n\tif err != nil {\n\t\treturn NewAPIResponse(httpResponse.RawResponse), err\n\t}\n\n\treturn NewAPIResponse(httpResponse.RawResponse), err\n}",
"func PutUserInGroupHandler(c *gin.Context) {\n\tsrc := c.Param(\"source\")\n\tdst := c.Param(\"destination\")\n\tif(PutUserInGroup(src, dst)){\n\t\tc.AbortWithStatus(200);\n\t}else{\n\t\tc.AbortWithStatus(http.StatusInternalServerError);\n\t}\n}",
"func (ac *ApiConfig) AddUserHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != http.MethodPost {\n\t\thttp.Error(w, r.Method+\" is not available\", http.StatusInternalServerError)\n\t\tzerolog.Error().Msg(r.Method + \" is not available\")\n\t\treturn\n\t}\n\n\tvar user *models.Users\n\terr := json.NewDecoder(r.Body).Decode(&user)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\thashedPass, err := bcrypt.GenerateFromPassword([]byte(user.Password), 12)\n\tif err != nil {\n\t\tzerolog.Error().Msg(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tuser.Password = string(hashedPass)\n\n\terr = ac.DHolder.AddUser(user)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tstat := &models.StatusIdentifier{\n\t\tOk: true,\n\t\tMessage: \"User Added\",\n\t}\n\n\terr = dResponseWriter(w, stat, http.StatusOK)\n\tif err != nil {\n\t\tzerolog.Error().Msg(err.Error())\n\t\treturn\n\t}\n\n\treturn\n}",
"func UpdateUserProfileHandler(w http.ResponseWriter, r *http.Request) {\n\n}",
"func (h *Handler) UpdateHandler(w http.ResponseWriter, r *http.Request) {\n\tvar u, updatedUser user.User\n\terr := json.NewDecoder(r.Body).Decode(&u)\n\tif err != nil {\n\t\th.log.Error(err)\n\t\t_ = response.HTTPError(w, http.StatusBadRequest, response.ErrorParsingUser.Error())\n\t\treturn\n\t}\n\n\tid := chi.URLParam(r, \"id\")\n\n\tcu, err := auth.GetID(r)\n\tif err != nil {\n\t\t_ = response.HTTPError(w, http.StatusBadRequest, response.ErrorBadRequest.Error())\n\t\treturn\n\t}\n\trole, err := auth.GetRole(r)\n\tif err != nil {\n\t\t_ = response.HTTPError(w, http.StatusBadRequest, response.ErrorBadRequest.Error())\n\t\treturn\n\t}\n\n\tctx, cancel := context.WithCancel(r.Context())\n\tdefer cancel()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\t_ = response.HTTPError(w, http.StatusBadGateway, response.ErrTimeout.Error())\n\t\treturn\n\tdefault:\n\t\tupdatedUser, err = h.service.Update(ctx, id, cu, role, &u)\n\t}\n\n\tif err != nil {\n\t\th.log.Error(err)\n\t\t_ = response.HTTPError(w, http.StatusNotFound, err.Error())\n\t\treturn\n\t}\n\n\trender.JSON(w, r, render.M{\"user\": updatedUser})\n}",
"func (c *UserController) PutBy(id int) string {\n\t// Update user by ID == $id\n\treturn \"User updated\"\n}",
"func (client IdentityClient) updateUser(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodPut, \"/users/{userId}\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response UpdateUserResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}",
"func PutUsermetaViaUserId(UserId_ int64, iUsermeta *Usermeta) (int64, error) {\n\trow, err := Engine.Update(iUsermeta, &Usermeta{UserId: UserId_})\n\treturn row, err\n}",
"func UpdateUser(w http.ResponseWriter, r *http.Request) {\n\temail, err := getEmailFromTokenHeader(r)\n\tif err != nil || email == \"\" {\n\t\thttp.Error(w, \"Invalid Token\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tw.Header().Set(\"Access-Control-Allow-Methods\", \"PUT\")\n\tw.Header().Set(\"Access-Control-Allow-Headers\", \"Content-Type\")\n\n\t// create an empty user of type models.User\n\tvar user models.User\n\t// decode the json request to user\n\terr = json.NewDecoder(r.Body).Decode(&user)\n\tif err != nil {\n\t\tlogrus.Debugf(\"Unable to decode the request body. %v\", err)\n\t\thttp.Error(w, \"Invalid Request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\t// call update user to update the user\n\tupdatedRows, err := database.UpdateUser(email, user.FirstName, user.LastName)\n\tif err != nil {\n\t\tlogrus.Errorf(\"failed updating user. %v\", err)\n\t\thttp.Error(w, \"Invalid Request\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\tlogrus.Debugf(\"User updated successfully. Total rows/record affected %v\", updatedRows)\n}",
"func (s *Server) registerUserWithEnrollID(id string, enrollID string, attr []*pb.Attribute) (string, error) {\n\tmutex.Lock()\n\tdefer mutex.Unlock()\n\n\tlog.Debug(\"Registering user: \", id)\n\n\tvar tok string\n\ttok = randomString(12)\n\n\t// TODO: Update db with registered user\n\n\treturn tok, nil\n}",
"func (e *Example) PostUserAuth(ctx context.Context, req *example.Request, rsp *example.Response) error {\n\tlog.Log(\"POST /api/v1.0/user/name PutUersinfo()\")\n\n\t//创建返回空间\n\trsp.Errno= utils.RECODE_OK\n\trsp.Errmsg = utils.RecodeText(rsp.Errno)\n\n\t/*从从sessionid获取当前的userid*/\n\t//连接redis\n\tredis_config_map := map[string]string{\n\t\t\"key\": utils.G_server_name,\n\t\t\"conn\": utils.G_redis_addr + \":\" + utils.G_redis_port,\n\t\t\"dbNum\": utils.G_redis_dbnum,\n\t}\n\tredis_config , _ := json.Marshal(redis_config_map)\n\t//连接redis数据库 创建句柄\n\tbm, err := cache.NewCache(\"redis\", string(redis_config) )\n\tif err != nil {\n\t\tlog.Log(\"缓存创建失败\",err)\n\t\trsp.Errno = utils.RECODE_DBERR\n\t\trsp.Errmsg = utils.RecodeText(rsp.Errno)\n\t\treturn nil\n\t}\n\t//拼接key\n\tsessioniduserid := req.Sessionid + \"user_id\"\n\t//获取userid\n\tvalue_id := bm.Get(sessioniduserid)\n\tid := int(value_id.([]uint8)[0])\n\n\t//创建表对象\n\tuser := models.User{ Id: id, Real_name: req.Realname, Id_card: req.Idcard }\n\n\t//创建数据库句柄\n\to:= orm.NewOrm()\n\n\t//更新\n\t_ , err = o.Update(&user ,\"real_name\", \"id_card\")\n\tif err !=nil{\n\t\trsp.Errno= utils.RECODE_DBERR\n\t\trsp.Errmsg = utils.RecodeText(rsp.Errno)\n\t\treturn nil\n\t}\n\n\t// 更新缓存\n\tbm.Put(sessioniduserid, string(user.Id), time.Second * 600)\n\n\tlog.Log(\"更新实名认证信息成功\")\n\treturn nil\n}",
"func (handler *Handler) handleUserCreate(w http.ResponseWriter, r *http.Request) {\n\n\t//Create an empty new user\n\tnewUser := handler.userHelper.NewEmptyUser()\n\n\t/**\n\tDefine a struct for just updating password\n\t*/\n\ttype newUserStruct struct {\n\t\tEmail string `json:\"email\"`\n\t\tPassword string `json:\"password\"`\n\t}\n\n\t//Create the new user\n\tnewUserInfo := &newUserStruct{}\n\n\t//decode the request body into struct and failed if any error occur\n\terr := json.NewDecoder(r.Body).Decode(newUserInfo)\n\tif err != nil {\n\t\tutils.ReturnJsonStatus(w, http.StatusUnprocessableEntity, false, err.Error())\n\t\treturn\n\n\t}\n\n\t//Copy over the new user data\n\tnewUser.SetEmail(newUserInfo.Email)\n\tnewUser.SetPassword(newUserInfo.Password)\n\n\t//Now create the new suer\n\terr = handler.userHelper.createUser(newUser)\n\n\tif err != nil {\n\t\tutils.ReturnJsonStatus(w, http.StatusUnprocessableEntity, false, err.Error())\n\t\treturn\n\t}\n\n\t//Check to see if the user was created\n\tif err == nil {\n\t\tutils.ReturnJsonStatus(w, http.StatusCreated, true, \"create_user_added\")\n\t} else {\n\t\tutils.ReturnJsonStatus(w, http.StatusUnprocessableEntity, false, err.Error())\n\t}\n\n}",
"func (s MockStore) Put(u User) error {\n\ts.id[u.ID] = u\n\ts.name[u.Name] = u\n\ts.email[u.Email] = u\n\n\treturn nil\n}",
"func UpdateUser(db *gorm.DB, w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\n\tid := vars[\"id\"]\n\tuser := getUserByID(db, id, w, r)\n\tif user == nil {\n\t\treturn\n\t}\n\n\tdecoder := json.NewDecoder(r.Body)\n\tif err := decoder.Decode(&user); err != nil {\n\t\tRespondError(w, http.StatusBadRequest, \"\")\n\t\treturn\n\t}\n\tdefer r.Body.Close()\n\n\tif err := db.Save(&user).Error; err != nil {\n\t\tRespondError(w, http.StatusInternalServerError, \"\")\n\t\treturn\n\t}\n\tRespondJSON(w, http.StatusOK, user)\n}",
"func (handler *Handler) handleUserGet(w http.ResponseWriter, r *http.Request) {\n\n\t//We have gone through the auth, so we should know the id of the logged in user\n\tloggedInUser := r.Context().Value(\"user\").(int) //Grab the id of the user that send the request\n\n\t//Get the user\n\tuser, err := handler.userHelper.GetUser(loggedInUser)\n\n\t//Make sure we null the password\n\t//Blank out the password before returning\n\tuser.SetPassword(\"\")\n\n\t//Check to see if the user was created\n\tif err == nil {\n\t\tutils.ReturnJson(w, http.StatusOK, user)\n\t} else {\n\t\tutils.ReturnJsonStatus(w, http.StatusUnsupportedMediaType, false, err.Error())\n\t}\n\n}",
"func SetUserOnContext(param string) gin.HandlerFunc {\n\n\treturn func(c *gin.Context) {\n\n\t\tuid := bson.ObjectIdHex(c.Params.ByName(param))\n\t\tu, err := service.ReadUserByID(uid)\n\t\tif err != nil {\n\t\t\tif err == mgo.ErrNotFound {\n\t\t\t\terrors.Send(c, errors.NotFound())\n\t\t\t} else {\n\t\t\t\tlogger.Error(err)\n\t\t\t\terrors.Send(c, fmt.Errorf(\"failed to get a user\"))\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tutils.SetTargetUser(c, u)\n\t}\n}",
"func patchAPIUserHandler(w http.ResponseWriter, r *http.Request, _ map[string]string) {\n\tuserName := sessionHandler.GetUserName(r)\n\tuserID, err := getUserID(userName)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdecoder := json.NewDecoder(r.Body)\n\tvar json JSONUser\n\terr = decoder.Decode(&json)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\t// Make sure user id is over 0\n\tif json.ID < 1 {\n\t\thttp.Error(w, \"Wrong user id.\", http.StatusInternalServerError)\n\t\treturn\n\t} else if userID != json.ID { // Make sure the authenticated user is only changing his/her own data. TODO: Make sure the user is admin when multiple users have been introduced\n\t\thttp.Error(w, \"You don't have permission to change this data.\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\t// Get old user data to compare\n\ttempUser, err := database.RetrieveUser(json.ID)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\t// Make sure user email is provided\n\tif json.Email == \"\" {\n\t\tjson.Email = string(tempUser.Email)\n\t}\n\t// Make sure user name is provided\n\tif json.Name == \"\" {\n\t\tjson.Name = string(tempUser.Name)\n\t}\n\t// Make sure user slug is provided\n\tif json.Slug == \"\" {\n\t\tjson.Slug = tempUser.Slug\n\t}\n\t// Check if new name is already taken\n\tif json.Name != string(tempUser.Name) {\n\t\t_, err = database.RetrieveUserByName([]byte(json.Name))\n\t\tif err == nil {\n\t\t\t// The new user name is already taken. Assign the old name.\n\t\t\t// TODO: Return error that will be displayed in the admin interface.\n\t\t\tjson.Name = string(tempUser.Name)\n\t\t}\n\t}\n\t// Check if new slug is already taken\n\tif json.Slug != tempUser.Slug {\n\t\t_, err = database.RetrieveUserBySlug(json.Slug)\n\t\tif err == nil {\n\t\t\t// The new user slug is already taken. Assign the old slug.\n\t\t\t// TODO: Return error that will be displayed in the admin interface.\n\t\t\tjson.Slug = tempUser.Slug\n\t\t}\n\t}\n\tuser := structure.User{ID: json.ID, Name: []byte(json.Name), Slug: json.Slug, Email: []byte(json.Email), Image: []byte(json.Image), Cover: []byte(json.Cover), Bio: []byte(json.Bio), Website: []byte(json.Website), Location: []byte(json.Location)}\n\terr = methods.UpdateUser(&user, userID)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tif json.Password != \"\" && (json.Password == json.PasswordRepeated) { // Update password if a new one was submitted\n\t\tencryptedPassword, err := authentication.EncryptPassword(json.Password)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\terr = database.UpdateUserPassword(user.ID, encryptedPassword, date.GetCurrentTime(), json.ID)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\t// Check if the user name was changed. If so, update the session cookie to the new user name.\n\tif json.Name != string(tempUser.Name) {\n\t\tlogInUser(json.Name, w)\n\t}\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(\"User settings updated!\"))\n\treturn\n}",
"func RegisterUser(w http.ResponseWriter, r *http.Request) {\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\thttp.Error(w, \"No input found!\", http.StatusBadRequest)\n\t\treturn\n\t}\n\tvar newReq User\n\terr = json.Unmarshal(body, &newReq)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tvar username = newReq.UserID\n\tif _, ok := userData[username]; ok {\n\t\thttp.Error(w, \"User already exists!\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// log.Println(util.StringWithCharset(random.Intn(20)+10, charset))\n\tpreHashString := newReq.UserID + util.StringWithCharset(random.Intn(20)+10, util.Charset)\n\thashedString := crypto.CreateSHA256Hash(preHashString)\n\tuserData[username] = hashedString\n\thashOutput := UserHash{hashedString}\n\tlog.Println(userData)\n\toutJSON, err := json.Marshal(hashOutput)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write(outJSON)\n}",
"func (h *Handler) update() http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tuser := &model.User{}\n\t\tif err := json.NewDecoder(r.Body).Decode(user); err != nil {\n\t\t\tmsg := &errorMessage{\n\t\t\t\tError: err.Error(),\n\t\t\t\tMessage: \"user json decode error\",\n\t\t\t}\n\t\t\tresponse.JSON(w, http.StatusBadRequest, msg)\n\t\t\treturn\n\t\t}\n\t\tif len(user.FirstName) == 0 && len(user.LastName) == 0 {\n\t\t\tmsg := &errorMessage{\n\t\t\t\tMessage: \"user must have fields to update\",\n\t\t\t}\n\t\t\tresponse.JSON(w, http.StatusBadRequest, msg)\n\t\t\treturn\n\t\t}\n\n\t\tvars := mux.Vars(r)\n\t\tid := vars[userID]\n\t\tentity, err := h.UserDAO.Update(r.Context(), id, user)\n\t\tswitch {\n\t\tcase errors.Is(err, errorx.ErrNoUser):\n\t\t\tmsg := &errorMessage{\n\t\t\t\tMessage: fmt.Sprintf(\"user %s does not exist\", id),\n\t\t\t}\n\t\t\tresponse.JSON(w, http.StatusNotFound, msg)\n\t\t\treturn\n\t\tcase errors.Is(err, errorx.ErrDeleteUser):\n\t\t\tmsg := &errorMessage{\n\t\t\t\tMessage: fmt.Sprintf(\"user %s has been deleted\", id),\n\t\t\t}\n\t\t\tresponse.JSON(w, http.StatusGone, msg)\n\t\t\treturn\n\t\tcase err != nil:\n\t\t\tmsg := &errorMessage{\n\t\t\t\tError: err.Error(),\n\t\t\t\tMessage: \"user datastore error\",\n\t\t\t}\n\t\t\tresponse.JSON(w, http.StatusInternalServerError, msg)\n\t\t\treturn\n\t\tdefault:\n\t\t\tresponse.JSON(w, http.StatusOK, entity)\n\t\t}\n\n\t}\n}",
"func PutUser(u structs.User) error {\n\tuserexists := false\n\tcuru := &structs.User{}\n\tif u.Username != \"\" {\n\t\terr := User([]byte(u.Username), curu)\n\t\tif err == nil {\n\t\t\tuserexists = true\n\t\t} else {\n\t\t\tlog.Errorw(\"PutUser userexists lookup error\",\n\t\t\t\t\"error\", err.Error(),\n\t\t\t\t\"userexists\", userexists,\n\t\t\t\t\"u\", u,\n\t\t\t\t\"curu\", curu,\n\t\t\t)\n\t\t}\n\t}\n\n\treturn Db.Update(func(tx *bolt.Tx) error {\n\t\tb := getBucket(tx, userBucket)\n\n\t\tu.LastUpdate = time.Now().Unix()\n\t\tif userexists {\n\t\t\tlog.Debugf(\"userexists.. keeping time at %v\", curu.CreatedOn)\n\t\t\tu.CreatedOn = curu.CreatedOn\n\t\t} else {\n\t\t\tu.CreatedOn = u.LastUpdate\n\t\t\tid, _ := b.NextSequence()\n\t\t\tu.ID = int(id)\n\t\t\tlog.Debugf(\"new user.. setting created on to %v\", u.CreatedOn)\n\t\t}\n\n\t\teU, err := gobEncodeUser(&u)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn err\n\t\t}\n\n\t\terr = b.Put([]byte(u.Username), eU)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn err\n\t\t}\n\t\tlog.Debugf(\"user created %v\", u)\n\t\treturn nil\n\t})\n}",
"func PostUserHandler(w http.ResponseWriter, r *http.Request) {\n\tvar user models.User\n\terr := json.NewDecoder(r.Body).Decode(&user)\n\tif err != nil {\n\t\tlog.Println(\"Error al parsear usuario\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tstatus := users.CreateUser(user)\n\tw.WriteHeader(status)\n}",
"func updateUser(user UserID, params map[string]interface{}, client *Client) error {\n\treturn client.Put(params, \"/access/users/\"+user.ToString())\n}",
"func (c UserController) handleUser(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != http.MethodGet {\n\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\tuserInContext := r.Context().Value(\"CustomUser\")\n\t//fmt.Println(\"added and fetched from context in controller: \", tester)\n\tresponse, err := json.Marshal(userInContext)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(string(response)))\n}",
"func UpdateUser(w http.ResponseWriter, r *http.Request) {\n\tfmt.Println(\"Update user endpoint hit\")\n\n\tvars := mux.Vars(r)\n\n\tid := vars[\"id\"]\n\n\temail := r.FormValue(\"email\")\n\n\tuser := &models.User{}\n\n\tuser.Update(id, email)\n\n\tjson.NewEncoder(w).Encode(user)\n}",
"func (handler *Handler) handleUserLogin(w http.ResponseWriter, r *http.Request) {\n\n\t/**\n\tDefine a struct for just updating password\n\t*/\n\ttype loginUserStruct struct {\n\t\tEmail string `json:\"email\"`\n\t\tPassword string `json:\"password\"`\n\t}\n\n\tuserCred := &loginUserStruct{}\n\n\t//decode the request body into struct and failed if any error occur\n\terr := json.NewDecoder(r.Body).Decode(userCred)\n\tif err != nil {\n\t\tutils.ReturnJsonError(w, http.StatusUnprocessableEntity, err)\n\t\treturn\n\n\t}\n\n\t//Now look up the user\n\tuser, err := handler.userHelper.GetUserByEmail(strings.TrimSpace(strings.ToLower(userCred.Email)))\n\n\t//check for an error\n\tif err != nil {\n\t\t//There prob is not a user to return\n\t\tutils.ReturnJsonError(w, http.StatusForbidden, err)\n\t\treturn\n\t}\n\n\t//We have the user, try to login\n\tuser, err = handler.userHelper.login(userCred.Password, user)\n\n\t//If there is an error, don't login\n\tif err != nil {\n\t\t//There prob is not a user to return\n\t\tutils.ReturnJsonError(w, http.StatusForbidden, err)\n\t\treturn\n\t}\n\n\t//Check to see if the user was created\n\tif err == nil {\n\t\tutils.ReturnJson(w, http.StatusCreated, user)\n\t} else {\n\t\tutils.ReturnJsonError(w, http.StatusForbidden, err)\n\t}\n\n}",
"func UserHandler(w http.ResponseWriter, r *http.Request) {\n\tconn := &sql.DB{}\n\tuserRepo := repository.NewUserRepository(conn)\n\tuserUsecase := usecase.NewUserUsecase(userRepo)\n\tc := NewUserController(userUsecase, w, r)\n\tswitch r.Method {\n\tcase \"GET\":\n\t\tc.List()\n\tcase \"POST\":\n\t\tc.Create()\n\tdefault:\n\t\tfmt.Println(\"404 error\")\n\t}\n}",
"func (_obj *WebApiAuth) SysUser_Update(id int32, req *SysUser, res *SysUser, _opt ...map[string]string) (err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = _os.Write_int32(id, 1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = req.WriteBlock(_os, 2)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = (*res).WriteBlock(_os, 3)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\ttarsCtx := context.Background()\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"SysUser_Update\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = (*res).ReadBlock(_is, 3, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn nil\n}",
"func (this *UserController) Update() {\n\tflash \t := beego.ReadFromRequest(&this.Controller)\n\n\tid, _ := strconv.Atoi(this.Ctx.Input.Param(\":id\"))\n\tuser := &models.User{Id:id}\n\tuser.GetOne()\n\n\tnamesurname \t\t:= this.GetString(\"name_surname\")\n\tusername \t\t\t:= this.GetString(\"user_name\")\n\temail \t\t\t\t:= this.GetString(\"email\")\n\tpassword\t \t\t:= this.GetString(\"password\")\n\turl\t\t\t \t\t:= this.GetString(\"url\")\n\tinfo\t\t\t\t:= this.GetString(\"info\")\n\n\tvalid := validation.Validation{}\n\n\tvalid.Email(email, \"Email\")\n\n\tvalid.Required(username, \"Username\")\n\tvalid.Required(password, \"Password\")\n\n\tvalid.MaxSize(username, 20, \"Username\")\n\tvalid.MaxSize(password, 16, \"Password\")\n\n\tswitch {\n\tcase valid.HasErrors():\n\t\tfor _, err := range valid.Errors {\n\t\t\tlog.Println(err.Key, err.Message)\n\t\t}\n\t\tvalid.Error(\"Problem creating user!\")\n\t\tflash.Error(\"Problem creating user!\")\n\t\tflash.Store(&this.Controller)\n\tdefault:\n\t\tuser := &models.User{\n\t\t\tNameSurname\t\t:namesurname,\n\t\t\tUserName\t\t:username,\n\t\t\tEmail\t\t\t:email,\n\t\t\tPassword\t\t:Md5(password),\n\t\t\tUrl\t\t\t\t:url,\n\t\t\tInfo\t\t\t:info,\n\t\t\tRegisterTime \t:time.Now(),\n\t\t}\n\t\tswitch {\n\t\t\tcase user.ExistUserName():\n\t\t\t\tvalid.Error(\"This username is in use!\")\n\t\t\t\tflash.Error(\"This username is in use!\")\n\t\t\t\tflash.Store(&this.Controller)\n\t\t\tcase user.ExistEmail():\n\t\t\t\tvalid.Error(\"This email is in use!\")\n\t\t\t\tflash.Error(\"This email is in use!\")\n\t\t\t\tflash.Store(&this.Controller)\n\t\t\tdefault:\n\t\t\t\terr := user.Update()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tvalid.Error(fmt.Sprintf(\"%v\", err))\n\t\t\t\tflash.Notice(\"User updated successfully!\")\n\t\t\t\tflash.Store(&this.Controller)\n\t\t\t\tthis.Redirect(\"/admin/users\", 302)\n\t\t\t\treturn\n\t\t}\n\n\t}\n\n\tredirectUrl := \"/admin/users/edit/\" + strconv.Itoa(id)\n\tthis.Redirect(redirectUrl, 302)\n\tthis.Abort(\"302\")\n\treturn\n}",
"func (ch *Context) UserPatchHandler(ctx *fasthttp.RequestCtx) {\n\treq := &ctx.Request\n\tres := &ctx.Response\n\t_, err := ch.getUserBody(req)\n\tif err != nil {\n\t\tres.AppendBodyString(\"User not found\")\n\t\tres.SetStatusCode(400)\n\t}\n\tdata := make(map[string]interface{})\n\terr = json.Unmarshal(req.Body(), data)\n\tif err != nil {\n\t\tfmt.Println(\"Error unmarshalling\")\n\t}\n\n\t//更新现有的数据\n\t//for k, v := range data {\n\t//\tuserBody[k] = data[v]\n\t//}\n\t//res.AppendBody(userJson)\n\t//res.Header.SetContentType(\"application/json\")\n\tres.SetStatusCode(200)\n}",
"func (auh *AdminUserHandler) PostUser(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tvar apiKey = r.Header.Get(\"api-key\")\n\tif apiKey == \"\" || (apiKey != adminApiKey && apiKey != userApiKey) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)\n\t\treturn\n\t}\n\tl := r.ContentLength\n\tbody := make([]byte, l)\n\tr.Body.Read(body)\n\n\tuser := &entity.User{}\n\n\terr := json.Unmarshal(body, user)\n\n\tif err != nil {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\n\tuser, errs := auh.userService.StoreUser(user)\n\n\tif len(errs) > 0 {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\toutput, err := json.MarshalIndent(user, \"\", \"\\t\")\n\tw.WriteHeader(http.StatusCreated)\n\tw.Write(output)\n\treturn\n}",
"func (srv *UsersService) UpdateHandler(ctx *gin.Context) {\n\tlogger := srv.logger.New(\"action\", \"UpdateHandler\")\n\tuser := GetRequestedUser(ctx)\n\tif user == nil {\n\t\t// Returns a \"404 StatusNotFound\" response\n\t\tsrv.ResponseService.NotFound(ctx)\n\t\treturn\n\t}\n\n\trawData, err := ctx.GetRawData()\n\tif err != nil {\n\t\tlogger.Error(\"cannot read body\", \"err\", err)\n\t\tsrv.ResponseService.Error(ctx, responses.CanNotUpdateUser, \"Can't update user.\")\n\t\treturn\n\t}\n\n\tcurrentUser := GetCurrentUser(ctx)\n\tif currentUser.UID == user.UID ||\n\t\tcurrentUser.RoleName == \"root\" ||\n\t\tcurrentUser.RoleName == \"admin\" {\n\n\t\terr = srv.userForm.Update(user, currentUser, rawData)\n\t\tif err != nil {\n\t\t\tlogger.Error(\"cannot update a user\", \"err\", err)\n\t\t\tsrv.ResponseService.ValidatorErrorResponse(ctx, responses.UnprocessableEntity, err)\n\t\t\treturn\n\t\t}\n\n\t\told, err := srv.Repository.GetUsersRepository().FindByUID(user.UID)\n\t\tif err != nil {\n\t\t\tlogger.Error(\"cannot found user\", \"err\", err)\n\t\t\tsrv.ResponseService.NotFound(ctx)\n\t\t\treturn\n\t\t}\n\n\t\terr = srv.userLoaderService.LoadUserCompletely(old)\n\t\tif err != nil {\n\t\t\tlogger.Error(\"cannot load user\", \"err\", err)\n\t\t\tsrv.ResponseService.Error(ctx, responses.CanNotUpdateUser, \"Can't update a user\")\n\t\t\treturn\n\t\t}\n\n\t\ttx := srv.Repository.GetUsersRepository().DB.Begin()\n\t\terr = srv.userCreator.Update(user, tx)\n\t\tif err != nil {\n\t\t\ttx.Rollback()\n\t\t\t// Returns a \"400 StatusBadRequest\" response\n\t\t\tsrv.ResponseService.Error(ctx, responses.CanNotUpdateUser, \"Can't update a user\")\n\t\t\treturn\n\t\t}\n\n\t\tif currentUser.UID != user.UID &&\n\t\t\t(currentUser.RoleName == \"admin\" || currentUser.RoleName == \"root\") {\n\t\t\tsrv.SystemLogsService.LogModifyUserProfileAsync(old, user, currentUser.UID)\n\t\t}\n\n\t\ttx.Commit()\n\t}\n\n\t// Returns a \"204 StatusNoContent\" response\n\tctx.JSON(http.StatusNoContent, nil)\n}",
"func (_obj *WebApiAuth) SysUser_InsertWithContext(tarsCtx context.Context, req *SysUser, id *int32, _opt ...map[string]string) (err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = req.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = _os.Write_int32((*id), 2)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"SysUser_Insert\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = _is.Read_int32(&(*id), 2, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn nil\n}",
"func UpdateUser(res http.ResponseWriter, req *http.Request) {\n\tvar response responses.User\n\tuser := new(model.User)\n\tID := req.Context().Value(\"ID\").(string)\n\tdata := req.Context().Value(\"data\").(*validation.UpdateUser)\n\tnow := time.Now()\n\tdocKey, err := connectors.ReadDocument(\"users\", ID, user)\n\tif err != nil {\n\t\trender.Render(res, req, responses.NewHTTPError(http.StatusServiceUnavailable, constants.Unavailable))\n\t\treturn\n\t} else if len(docKey) == 0 {\n\t\trender.Render(res, req, responses.NewHTTPError(http.StatusBadRequest, constants.NotFoundResource))\n\t\treturn\n\t}\n\tcopier.Copy(&user, data)\n\tuser.UpdatedAt = now.Unix()\n\tdocKey, err = connectors.UpdateDocument(\"users\", docKey, user)\n\tif err != nil {\n\t\trender.Render(res, req, responses.NewHTTPError(http.StatusServiceUnavailable, constants.Unavailable))\n\t\treturn\n\t}\n\tresponse.ID = docKey\n\tcopier.Copy(&response, user)\n\trender.Render(res, req, responses.NewHTTPSucess(http.StatusOK, response))\n}",
"func editUser(userID int, firstName string, MI string, lastName string, privLevel int) error {\n\n\tdb, err := sql.Open(\"mysql\", DB_USER_NAME+\":\"+DB_PASSWORD+\"@unix(/var/run/mysql/mysql.sock)/\"+DB_NAME)\n\tif err != nil {\n\t\treturn errors.New(\"No connection\")\n\t}\n\n\tres, err := db.Exec(\"update Users set FirstName=?, MiddleInitial=?, LastName=?, PrivLevel=? where UserID=?\", firstName, MI, lastName, privLevel, userID)\n\n\tif err != nil {\n\t\treturn errors.New(\"User update failed.\")\n\t}\n\n\trowsAffected, err := res.RowsAffected()\n\n\tif rowsAffected != 1 {\n\t\treturn errors.New(\"Query didn't match any users.\")\n\t}\n\n\treturn nil\n}",
"func PutUsermetaViaUmetaId(UmetaId_ int64, iUsermeta *Usermeta) (int64, error) {\n\trow, err := Engine.Update(iUsermeta, &Usermeta{UmetaId: UmetaId_})\n\treturn row, err\n}",
"func UpdateUser(person *Person, id string) (err error) {\n\tfmt.Println(person)\n\tConfig.DB.Save(person)\n\treturn nil\n}",
"func db_update_user(username string, sessionid string, follow_username string, post Post){\n file_path := path.Join(\"db/users\", strings.ToLower(username)+\".json\")\n \n if _, err := os.Stat(file_path); os.IsNotExist(err) {\n return\n }\n user := db_JSON_to_user(username)\n \n if sessionid != \"\" {\n user.SessionID = sessionid\n }\n if follow_username != \"\" {\n user.Follows = append(user.Follows, follow_username)\n }\n if post.Content != \"\" {\n user.Posts = append(user.Posts, &post)\n }\n \n updated_user := db_user_to_JSON(user)\n \n writeerr := ioutil.WriteFile(file_path, updated_user, 0644)\n\n if writeerr != nil {\n panic(writeerr)\n }\n}",
"func (h *Handler) Add(_ context.Context, usr *usersapi.User) (err error) {\n\treturn h.provider.Add(&users.User{\n\t\tUsername: usr.Username,\n\t\tPassword: usr.Password,\n\t})\n}",
"func Update(c *gin.Context) {\n\tuserID, err := getUserID(c.Param(\"user_id\"))\n\tif err != nil {\n\t\tc.JSON(err.Status, err)\n\t\treturn\n\t}\n\n\tvar newUser users.User\n\tif err := c.ShouldBindJSON(&newUser); err != nil {\n\t\tbdErr := errors.NewBadRequestError(fmt.Sprintf(\"invalid json body %s\", err.Error()))\n\t\tc.JSON(bdErr.Status, bdErr)\n\t\treturn\n\t}\n\n\tnewUser.ID = userID\n\tisPartial := c.Request.Method == http.MethodPatch\n\n\tresult, updateErr := services.UserServ.UpdateUser(newUser, isPartial)\n\tif err != nil {\n\t\tc.JSON(updateErr.Status, updateErr)\n\t\treturn\n\t}\n\n\tisPublic := c.GetHeader(\"X-Public\") == \"true\"\n\tc.JSON(http.StatusOK, result.Marshall(isPublic))\n}",
"func (s *Server) handleDashboardUserEdit() http.HandlerFunc {\n\tvar o sync.Once\n\tvar tpl *template.Template\n\n\t//steps on the page\n\tsteps := struct {\n\t\tStepDel string\n\t}{\n\t\tStepDel: \"stepDel\",\n\t}\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tctx, logger := GetLogger(s.getCtx(r))\n\t\to.Do(func() {\n\t\t\ttpl = s.loadWebTemplateDashboard(ctx, \"user-edit.html\")\n\t\t})\n\t\tctx, provider, data, _, ok := s.createTemplateDataDashboard(w, r.WithContext(ctx), tpl, true)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\n\t\t//setup the breadcrumbs\n\t\tbreadcrumbs := []breadcrumb{\n\t\t\t{\"Users\", provider.GetURLUsers()},\n\t\t\t{\"Edit Team Member\", \"\"},\n\t\t}\n\t\tdata[TplParamBreadcrumbs] = breadcrumbs\n\t\tdata[TplParamActiveNav] = provider.GetURLUsers()\n\t\tdata[TplParamFormAction] = provider.GetURLUserEdit()\n\t\tdata[TplParamSteps] = steps\n\n\t\t//handle the input\n\t\tidStr := r.FormValue(URLParams.UserID)\n\t\tstep := r.FormValue(URLParams.Step)\n\n\t\t//prepare the data\n\t\tdata[TplParamUserID] = idStr\n\n\t\t//load the provider user\n\t\tid := uuid.FromStringOrNil(idStr)\n\t\tif id == uuid.Nil {\n\t\t\tlogger.Warnw(\"invalid uuid\", \"id\", idStr)\n\t\t\ts.SetCookieErr(w, Err)\n\t\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLUsers(), http.StatusSeeOther)\n\t\t\treturn\n\t\t}\n\t\tctx, providerUser, err := LoadProviderUserByProviderIDAndID(ctx, s.getDB(), provider.ID, &id)\n\t\tif err != nil {\n\t\t\tlogger.Errorw(\"load provider user\", \"error\", err, \"id\", id)\n\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\treturn\n\t\t}\n\t\tif providerUser == nil {\n\t\t\tlogger.Errorw(\"no provider user\", \"id\", id)\n\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\treturn\n\t\t}\n\t\tdata[TplParamUser] = providerUser\n\n\t\t//prepare the confirmation modal\n\t\tdata[TplParamConfirmMsg] = GetMsgText(MsgUserDelConfirm)\n\t\tdata[TplParamConfirmSubmitName] = URLParams.Step\n\t\tdata[TplParamConfirmSubmitValue] = steps.StepDel\n\n\t\t//check the method\n\t\tif r.Method == http.MethodGet {\n\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\treturn\n\t\t}\n\n\t\t//execute the correct operation\n\t\tvar msgKey MsgKey\n\t\tswitch step {\n\t\tcase steps.StepDel:\n\t\t\t//delete the provider user\n\t\t\tctx, err := DeleteUserProvider(ctx, s.getDB(), provider.ID, providerUser.ID)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorw(\"delete provider user\", \"error\", err, \"id\", providerUser.ID)\n\t\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tmsgKey = MsgUserDel\n\t\tdefault:\n\t\t\tlogger.Errorw(\"invalid step\", \"id\", providerUser.ID, \"step\", step)\n\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\treturn\n\t\t}\n\n\t\t//success\n\t\ts.SetCookieMsg(w, msgKey, providerUser.Login)\n\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLUsers(), http.StatusSeeOther)\n\t}\n}",
"func UserHandler(w http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\n\tcase \"GET\":\n\n\t\tusersJSON, err := json.Marshal(db.Users)\n\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tw.Write(usersJSON)\n\n\tcase \"POST\":\n\n\t\terr := utils.IsJsonValid(w, r)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tuserPayload := db.UserPayload{}\n\t\tb, err := ioutil.ReadAll(r.Body)\n\t\tdefer r.Body.Close()\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\terr = json.Unmarshal(b, &userPayload)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\t//validate email\n\t\tvalidEmail := utils.IsEmailValid(userPayload.UserEmail)\n\t\tif !validEmail {\n\t\t\tmsg := \"Email address is not valid\"\n\t\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\taccessToken := utils.String(50)\n\n\t\tcookie := http.Cookie{\n\t\t\tName: \"userToken\",\n\t\t\tValue: accessToken,\n\t\t\tExpires: time.Time{},\n\t\t\tMaxAge: 86400,\n\t\t\tSecure: false,\n\t\t\tHttpOnly: false,\n\t\t\tSameSite: 0,\n\t\t}\n\t\thttp.SetCookie(w, &cookie)\n\n\t\tcookie = http.Cookie{\n\t\t\tName: \"userEmail\",\n\t\t\tValue: userPayload.UserEmail,\n\t\t\tExpires: time.Time{},\n\t\t\tMaxAge: 86400,\n\t\t\tSecure: false,\n\t\t\tHttpOnly: false,\n\t\t\tSameSite: 0,\n\t\t}\n\t\thttp.SetCookie(w, &cookie)\n\n\t\tuser := db.User{}\n\t\tuser.Token = accessToken\n\t\tuser.Devices = make(map[string]*db.Device)\n\n\t\tdb.Users[userPayload.UserEmail] = &user\n\n\t\tw.Write([]byte(accessToken))\n\t}\n}",
"func UpdateUser(c *gin.Context) {\n\tuserID, userErr := strconv.ParseInt(c.Param(\"user_id\"), 10, 64)\n\tif userErr != nil {\n\t\terr := errors.NewBadRequestError(\"user id should be a number\")\n\t\tc.JSON(err.Status, err)\n\t\treturn\n\t}\n\n\t//intialize\n\tvar user users.User\n\t//check whether the given json body is valid or not\n\tif err := c.ShouldBindJSON(&user); err != nil {\n\t\tinvalidErr := errors.NewInternalServerError(\"invalid json body\")\n\t\tc.JSON(invalidErr.Status, invalidErr)\n\t\treturn\n\t}\n\n\t//send the user struct to the services\n\tuser.ID = userID\n\t//check whether the request method is PATCH and PUT\n\tisPartial := c.Request.Method == http.MethodPatch\n\n\tresult, err := services.UsersService.UpdateUser(isPartial, user)\n\tif err != nil {\n\t\tc.JSON(err.Status, err)\n\t\treturn\n\t}\n\n\t//final implementation\n\tc.JSON(http.StatusOK, result.Marshall(c.GetHeader(\"X-Public\") == \"true\"))\n}",
"func Update(user User) error {\n\n}",
"func (ac *ApiConfig) UpdateUserHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != http.MethodPost {\n\t\thttp.Error(w, r.Method+\" is not available\", http.StatusInternalServerError)\n\t\tzerolog.Error().Msg(r.Method + \" is not available\")\n\t\treturn\n\t}\n\tvar user *models.Users = &models.Users{}\n\terr := json.NewDecoder(r.Body).Decode(user)\n\tif err != nil {\n\t\tzerolog.Error().Msg(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tsPass, err := bcrypt.GenerateFromPassword([]byte(user.Password), 12)\n\tif err != nil {\n\t\tzerolog.Error().Msg(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tuser.Password = string(sPass)\n\n\terr = ac.DHolder.UpdateUser(user)\n\tif err != nil {\n\t\tzerolog.Error().Msg(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tstat := &models.StatusIdentifier{\n\t\tOk: true,\n\t\tMessage: \"User Deleted\",\n\t}\n\n\terr = dResponseWriter(w, stat, http.StatusOK)\n\tif err != nil {\n\t\tzerolog.Error().Msg(err.Error())\n\t\treturn\n\t}\n\n\treturn\n}",
"func getUserHandler(res http.ResponseWriter, req *http.Request) {\n\tres.Header().Add(\"content-type\", \"application/json\")\n\tparams := mux.Vars(req)\n\tid, _ := primitive.ObjectIDFromHex(params[\"id\"])\n\tvar user MongoUserSchema\n\tusersCol := client.Database(\"Aviroop_Nandy_Appointy\").Collection(\"users\")\n\tctx, _ := context.WithTimeout(context.Background(), 15*time.Second)\n\terr := usersCol.FindOne(ctx, MongoUserSchema{ID: id}).Decode(&user)\n\tif err != nil {\n\t\tres.WriteHeader(http.StatusInternalServerError)\n\t\tres.Write([]byte(`{\"Error message\":\"` + err.Error() + `\"}`))\n\t\treturn\n\t}\n\tjson.NewEncoder(res).Encode(user)\n}",
"func (server Server) UpdateUser(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r) // mux params\n\tid, err := strconv.Atoi(vars[\"id\"]) // convert the id type from string to int\n\tvar res models.APIResponse // make a response\n\tvar user models.User // make a user\n\n\tif err != nil {\n\t\tlog.Printf(\"Unable to convert the string into an int. %v\", err)\n\t\tres = models.BuildAPIResponseFail(\"Unable to convert the string into an int.\", nil)\n\t} else {\n\t\terr = json.NewDecoder(r.Body).Decode(&user) // decode the json request to note\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Unable to decode the request body. %v\", err)\n\t\t\tres = models.BuildAPIResponseFail(\"Unable to decode the request body.\", nil)\n\t\t} else {\n\t\t\tupdatedRows := updateUser(int64(id), user, server.db) // call update note to update the note\n\t\t\tres = models.BuildAPIResponseSuccess(\"User updated successfully.\", updatedRows)\n\t\t}\n\t}\n\tjson.NewEncoder(w).Encode(res) // send the response\n}",
"func (a *API) addUser(w http.ResponseWriter, req *http.Request) {\n\t// NOTE(kiennt): Who can signup (create new user)?\n\tif err := req.ParseForm(); err != nil {\n\t\ta.respondError(w, apiError{\n\t\t\tcode: http.StatusBadRequest,\n\t\t\terr: err,\n\t\t})\n\t\treturn\n\t}\n\tusername := req.Form.Get(\"username\")\n\tpassword := req.Form.Get(\"password\")\n\tif username == \"\" || password == \"\" {\n\t\ta.respondError(w, apiError{\n\t\t\tcode: http.StatusBadRequest,\n\t\t\terr: errors.New(\"Incorrect sign up form\"),\n\t\t})\n\t\treturn\n\t}\n\n\t// Check to see if the user is already taken\n\tpath := common.Path(model.DefaultUsersPrefix, common.Hash(username, crypto.MD5))\n\tresp, err := a.etcdcli.DoGet(path)\n\tif err != nil {\n\t\ta.respondError(w, apiError{\n\t\t\tcode: http.StatusInternalServerError,\n\t\t\terr: err,\n\t\t})\n\t\treturn\n\t}\n\tif len(resp.Kvs) != 0 {\n\t\ta.respondError(w, apiError{\n\t\t\tcode: http.StatusBadRequest,\n\t\t\terr: errors.New(\"The username is already taken\"),\n\t\t})\n\t\treturn\n\t}\n\t// Do not store the plain text password, encrypt it!\n\thashed, err := common.GenerateBcryptHash(password, config.Get().PasswordHashingCost)\n\tif err != nil {\n\t\ta.respondError(w, apiError{\n\t\t\tcode: http.StatusInternalServerError,\n\t\t\terr: errors.Wrap(err, \"Something went wrong\"),\n\t\t})\n\t\treturn\n\t}\n\n\tuser := &model.User{\n\t\tUsername: username,\n\t\tPassword: hashed,\n\t}\n\t_ = user.Validate()\n\tr, _ := json.Marshal(&user)\n\t_, err = a.etcdcli.DoPut(path, string(r))\n\tif err != nil {\n\t\ta.respondError(w, apiError{\n\t\t\tcode: http.StatusInternalServerError,\n\t\t\terr: errors.Wrap(err, \"Unable to put a key-value pair into etcd\"),\n\t\t})\n\t\treturn\n\t}\n\t// Add user permission to view clouds\n\tif ok, err := a.policyEngine.AddPolicy(username, \"/clouds\", \"GET\"); !ok || err != nil {\n\t\ta.respondError(w, apiError{\n\t\t\tcode: http.StatusInternalServerError,\n\t\t\terr: errors.Wrap(err, \"Unable to add view cloud permission\"),\n\t\t})\n\t\treturn\n\t}\n\ta.respondSuccess(w, http.StatusOK, nil)\n}",
"func (ctx *HandlerContext) SignUpHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"POST\" {\n\t\thttp.Error(w, \"that method is not allowed\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\tcontentType := r.Header.Get(\"Content-Type\")\n\tif contentType != \"application/json\" {\n\t\thttp.Error(w, \"request body must be in JSON\", http.StatusUnsupportedMediaType)\n\t\treturn\n\t}\n\tnewUser := &users.NewUser{}\n\tdec := json.NewDecoder(r.Body)\n\terr := dec.Decode(newUser)\n\tif err != nil {\n\t\thttp.Error(w, \"Bad request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif err := newUser.Validate(); err != nil {\n\t\thttp.Error(w, \"Bad request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tuser, err := newUser.ToUser()\n\tif err != nil {\n\t\thttp.Error(w, \"Bad request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\taddedUser, err := ctx.Users.Insert(user)\n\tif err != nil {\n\t\thttp.Error(w, \"Bad request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tstate := sessions.SessionState{SessionTime: time.Now(), User: addedUser}\n\n\t_, err2 := sessions.BeginSession(ctx.SigningKey, ctx.Store, state, w)\n\tif err2 != nil {\n\t\thttp.Error(w, \"Bad request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusCreated)\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\n\terr = json.NewEncoder(w).Encode(addedUser)\n\n\tif err != nil {\n\t\thttp.Error(w, \"Bad request\", http.StatusBadRequest)\n\t}\n\n}",
"func (pk *JSONPasswordKeeper) Put(userinfo basicauth.Account) error {\n\tpk.mutex.Lock()\n\tdefer pk.mutex.Unlock()\n\tif _, exists := pk.userInfo[userinfo.UserName]; exists {\n\t\treturn ErrUserExists\n\t}\n\tpk.userInfo[userinfo.UserName] = userinfo\n\treturn pk.flushToDisk()\n}",
"func UpdateUser(c *gin.Context) {\n\tid := c.Param(\"id\")\n\tlog.Printf(\"UpdateUser in db %v\", id)\n\tvar user models.User\n\n\tdb := db.GetDB()\n\tif err := db.Where(\"id = ?\", id).First(&user).Error; err != nil {\n\t\tc.AbortWithStatus(http.StatusNotFound)\n\t\tlog.Println(\"Failed to UpdateUser in db\")\n\t\treturn\n\t}\n\tc.BindJSON(&user)\n\tdb.Save(&user)\n\tc.JSON(http.StatusOK, &user)\n}",
"func (ctx *Context) UsersMeHandler(w http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\tcase \"GET\":\n\t\tstate := &SessionState{}\n\t\ts, err := sessions.GetSessionID(r, ctx.SessionKey)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Could not find user\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\terr = ctx.SessionStore.Get(s, state)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Error getting sessionID\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tencoder := json.NewEncoder(w)\n\t\tencoder.Encode(state.User)\n\tcase \"PATCH\":\n\t\t// get the current authenticated user\n\t\tstate := &SessionState{}\n\t\ts, err := sessions.GetSessionID(r, ctx.SessionKey)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Could not find user\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\terr = ctx.SessionStore.Get(s, state)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Error getting sessionID\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\t//get updates and apply to user\n\t\tdecoder := json.NewDecoder(r.Body)\n\t\tupdated := &users.UserUpdates{}\n\t\tif err := decoder.Decode(updated); err != nil {\n\t\t\thttp.Error(w, \"Invalid JSON\", http.StatusBadRequest)\n\t\t}\n\t\terr = ctx.UserStore.Update(updated, state.User)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Error updating user\", http.StatusInternalServerError)\n\t\t}\n\t}\n}",
"func (s *authService) saveUser(u *User) error {\n\tif err := u.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\tvar sqlExec string\n\n\t// if id is nil then it is a new user\n\tif u.ID == uuid.Nil {\n\t\t// generate ID\n\t\tu.ID = uuid.NewV4()\n\t\tsqlExec = `INSERT INTO user \n\t\t(id, email, password, firstname, lastname, is_superuser, is_active, is_deleted, created_at, updated_at, deleted_at, avatar_url) \n\t\tVALUES (:id, :email, :password, :firstname, :lastname, :is_superuser, :is_active, :is_deleted, :created_at, :updated_at, :deleted_at, :avatar_url)`\n\t} else {\n\t\tsqlExec = `UPDATE user SET email=:email, password=:password, firstname=:firstname, lastname=:lastname, is_superuser=:is_superuser, \n\t\tis_active=:is_active, is_deleted=:is_deleted, created_at=:created_at, updated_at=:updated_at, deleted_at=:deleted_at, avatar_url=:avatar_url WHERE id=:id`\n\t}\n\n\ttx, err := s.db.Beginx()\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = tx.NamedExec(sqlExec, &u)\n\tif err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\terr = tx.Commit()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func Update(c *gin.Context) {\n\tuserId, idErr := getUserID(c.Param(\"user_id\"))\n\tif idErr != nil {\n\t\tc.JSON(idErr.Status, idErr)\n\t\treturn\n\t}\n\n\tvar user models.User\n\tif err := c.ShouldBindJSON(&user); err != nil {\n\t\trestErr := rest_errors.NewBadRequestError(\"invalid json body\")\n\t\tc.JSON(restErr.Status, restErr)\n\t\treturn\n\t}\n\n\tuser.Id = userId\n\n\tisPartial := c.Request.Method == http.MethodPatch\n\n\tresult, err := services.UsersService.UpdateUser(isPartial, user)\n\tif err != nil {\n\t\tc.JSON(err.Status, err)\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, result.Marshal(c.GetHeader(\"X-Public\") == \"true\"))\n}",
"func UpdateUser(userId int64, userData *UserEntry) error {\n _ , nerr := model.Database.Exec(\"UPDATE users SET username = ?, isadmin = ?, email = ? WHERE userid = ?\", userData.Username, userData.IsAdmin, userData.Email, userId)\n if nerr != nil {\n return nerr\n }\n return nil\n}",
"func (f *Factory) UpdateUser(id string,firstname string, lastname string, age int) * domain.User {\n\treturn &domain.User{\n\t\tID:\t\t\tid,\t\t\n\t\tFirstname: firstname,\n\t\tLastname: lastname,\n\t\tAge: age,\n\t}\n\n}",
"func (h *Host) SetUser(u string) {\n}",
"func (h *userHandler) createUser(ctx context.Context, rw http.ResponseWriter, r *http.Request) {\n\n\tvar user = &model.User{}\n\n\terr := json.NewDecoder(r.Body).Decode(user)\n\tif err != nil {\n\n\t\th.serv.writeResponse(ctx, rw, err.Error(), http.StatusBadRequest, nil)\n\n\t\treturn\n\n\t}\n\n\tif user.Login == \"\" || user.Password == \"\" {\n\n\t\th.serv.writeResponse(ctx, rw, \"Login or password are empty\", http.StatusBadRequest, nil)\n\n\t\treturn\n\t}\n\n\terr = h.registerUser(ctx, user)\n\tif err != nil {\n\n\t\th.serv.writeResponse(ctx, rw, err.Error(), http.StatusBadRequest, nil)\n\n\t\treturn\n\t}\n\n\th.serv.writeResponse(ctx, rw, \"user was created: \"+user.Login, http.StatusCreated, user)\n\n}",
"func UpdateUser(ctx iris.Context) {\n\tvar (\n\t\tuser model.User\n\t\tnewUser model.User\n\t\tresult iris.Map\n\t)\n\tid := ctx.Params().Get(\"id\") // get id by params\n\tdb := config.GetDatabaseConnection()\n\tdefer db.Close()\n\terr := db.First(&user, id).Error\n\tif err != nil {\n\t\tresult = iris.Map{\n\t\t\t\"error\": \"true\",\n\t\t\t\"status\": iris.StatusBadRequest,\n\t\t\t\"message\": \"user not found\",\n\t\t\t\"result\": nil,\n\t\t}\n\t}\n\tctx.ReadJSON(&newUser)\n\terr = db.Model(&user).Updates(newUser).Error\n\tif err != nil {\n\t\tresult = iris.Map{\n\t\t\t\"error\": \"true\",\n\t\t\t\"status\": iris.StatusBadRequest,\n\t\t\t\"message\": \"error when update user\",\n\t\t\t\"result\": err.Error(),\n\t\t}\n\t} else {\n\t\tresult = iris.Map{\n\t\t\t\"error\": \"false\",\n\t\t\t\"status\": iris.StatusOK,\n\t\t\t\"message\": \"success update user\",\n\t\t\t\"result\": newUser,\n\t\t}\n\t}\n\tctx.JSON(result)\n\treturn\n}",
"func (a *Users) Update(w http.ResponseWriter, r *http.Request) {\n\tid := getUserID(r)\n\ta.l.Println(\"[DEBUG] get record id\", id)\n\n\t// fetch the user from the context\n\tacc := r.Context().Value(KeyUser{}).(*models.User)\n\tacc.ID = id\n\ta.l.Println(\"[DEBUG] updating user with id\", acc.ID)\n\n\terr := models.UpdateUser(acc)\n\n\tif err == models.ErrUserNotFound {\n\t\ta.l.Println(\"[ERROR] user not found\", err)\n\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tmodels.ToJSON(&GenericError{Message: \"User not found in database\"}, w)\n\t\treturn\n\t}\n\n\t// write the no content success header\n\tw.WriteHeader(http.StatusNoContent)\n}",
"func CreateUserHandler(connection *sql.DB, cnf config.Config) negroni.HandlerFunc {\n\treturn negroni.HandlerFunc(func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\t\tuser := &models.UserCreate{}\n\t\terr := util.RequestToJSON(r, user)\n\t\tif err != nil {\n\t\t\tutil.SendBadRequest(w, errors.New(\"Bad json\"))\n\t\t\treturn\n\t\t}\n\n\t\tif err := user.Validate(); err == nil {\n\t\t\tif err := user.ValidatePassword(); err == nil {\n\n\t\t\t\thash, _ := bcrypt.GenerateFromPassword([]byte(user.Password), 10)\n\t\t\t\tuser.Hash = string(hash)\n\n\t\t\t\tcreatedID, err := db.CreateUser(connection, user)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tutil.SendBadRequest(w, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tcreatedUser, err := db.GetUserByID(connection, createdID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tutil.SendBadRequest(w, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// create JWT object with claims\n\t\t\t\texpiration := time.Now().Add(time.Hour * 24 * 31).Unix()\n\t\t\t\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\t\t\t\"sub\": createdUser.ID,\n\t\t\t\t\t\"iat\": time.Now().Unix(),\n\t\t\t\t\t\"exp\": expiration,\n\t\t\t\t})\n\n\t\t\t\t// Load secret key from config and generate a signed token\n\t\t\t\tsecretKey := cnf.SecretKey\n\t\t\t\ttokenString, err := token.SignedString([]byte(secretKey))\n\t\t\t\tif err != nil {\n\t\t\t\t\tutil.SendError(w, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\ttype Token struct {\n\t\t\t\t\tToken string `json:\"token\"`\n\t\t\t\t\tExpiresOn string `json:\"expires_on\"`\n\t\t\t\t\tUser *models.UserResponse `json:\"user\"`\n\t\t\t\t}\n\n\t\t\t\tutil.SendOK(w, &Token{\n\t\t\t\t\tToken: tokenString,\n\t\t\t\t\tExpiresOn: strconv.Itoa(int(expiration)),\n\t\t\t\t\tUser: &createdUser,\n\t\t\t\t})\n\n\t\t\t} else {\n\t\t\t\tutil.SendBadRequest(w, err)\n\t\t\t}\n\t\t} else {\n\t\t\tutil.SendBadRequest(w, err)\n\t\t}\n\t})\n}",
"func (m *SmsLogRow) SetUserId(value *string)() {\n err := m.GetBackingStore().Set(\"userId\", value)\n if err != nil {\n panic(err)\n }\n}",
"func (s *Service) PutUser(ctx context.Context, u *influxdb.User) error {\n\treturn s.kv.Update(ctx, func(tx Tx) error {\n\t\treturn s.putUser(ctx, tx, u)\n\t})\n}",
"func (context *HandlerContext) SpecificUserHandler(w http.ResponseWriter, r *http.Request) {\n\t// Get session state from session store.\n\n\tsessionState := &SessionState{}\n\tsessionID, err := sessions.GetState(r, context.SigningKey, context.SessionStore, sessionState)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"error getting session state: %v\", err), http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tpath := path.Base(r.URL.Path)\n\n\tvar givenID int64\n\n\tif path != \"me\" {\n\t\tgivenID, err = strconv.ParseInt(path, 10, 64)\n\t}\n\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"error parsing ID: %v\", err), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tswitch r.Method {\n\n\t// Get the current user from the session state and respond with that user encoded as JSON object.\n\tcase \"GET\":\n\n\t\tvar user *users.User\n\n\t\tif path == \"me\" {\n\t\t\tuser, err = sessionState.User, nil\n\t\t} else {\n\t\t\tuser, err = context.UserStore.GetByID(givenID)\n\t\t}\n\n\t\tif err != nil {\n\t\t\thttp.Error(w, fmt.Sprintf(\"no user is found with given ID: %v\", err), http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\n\t\tw.Header().Add(headerContentType, contentTypeJSON)\n\t\tw.WriteHeader(http.StatusOK)\n\n\t\terr = json.NewEncoder(w).Encode(user)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"error encoding SessionState Struct to JSON\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t// Update the current user with the JSON in the request body,\n\t// and respond with the newly updated user, encoded as a JSON object.\n\tcase \"PATCH\":\n\t\t// Get Updates struct from request body.\n\t\tif path != \"me\" || givenID != sessionState.User.ID {\n\t\t\thttp.Error(w, \"User ID is not valid or does not match current-authenticaled user\", http.StatusForbidden)\n\t\t\treturn\n\t\t}\n\t\t// containJSON(r.Header.Get(headerContentType), w)\n\n\t\tif !strings.HasPrefix(r.Header.Get(headerContentType), contentTypeJSON) {\n\t\t\thttp.Error(w, \"request body must be in JSON\", http.StatusUnsupportedMediaType)\n\t\t\treturn\n\t\t}\n\n\t\t// Remove the user old fields from the trie.\n\t\tcontext.Trie.Remove(sessionState.User.FirstName, sessionState.User.ID)\n\t\tcontext.Trie.Remove(sessionState.User.LastName, sessionState.User.ID)\n\n\t\tupdates := &users.Updates{}\n\t\terr := json.NewDecoder(r.Body).Decode(updates)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"error decoding request body: invalid JSON in request body\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\t// Update session state.\n\t\tsessionState.User.FirstName = updates.FirstName\n\t\tsessionState.User.LastName = updates.LastName\n\n\t\t// Update session store.\n\t\terr = context.SessionStore.Save(sessionID, sessionState)\n\t\tif err != nil {\n\t\t\thttp.Error(w, fmt.Sprintf(\"error saving updated session state to session store: %s\", err), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\t// Insert the updated user fields into the trie.\n\t\tcontext.Trie.Insert(sessionState.User.FirstName, sessionState.User.ID)\n\t\tcontext.Trie.Insert(sessionState.User.LastName, sessionState.User.ID)\n\n\t\t// Update user store.\n\t\tuser, err := context.UserStore.Update(sessionState.User.ID, updates)\n\n\t\tif err != nil {\n\t\t\thttp.Error(w, fmt.Sprintf(\"error updating user store: %s\", err), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tw.Header().Add(headerContentType, contentTypeJSON)\n\t\tw.WriteHeader(http.StatusOK)\n\n\t\t// err = json.NewEncoder(w).Encode(sessionState.User)\n\t\terr = json.NewEncoder(w).Encode(user)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"error encoding SessionState Struct to JSON\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\tdefault:\n\t\thttp.Error(w, \"expect GET or PATCH method only\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n}",
"func (user *User) Put(field string, value interface{}) {\n\tdb.First(user).Update(field, value)\n}",
"func (p *Processor) updateUser(user User) {\n\tp.users[user.Id] = user\n}",
"func OnBoardUser(u *models.User) error {\n\to := GetOrmer()\n\tcreated, id, err := o.ReadOrCreate(u, \"Username\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif created {\n\t\tu.UserID = int(id)\n\t\t// current orm framework doesn't support to fetch a pointer or sql.NullString with QueryRow\n\t\t// https://github.com/astaxie/beego/issues/3767\n\t\tif len(u.Email) == 0 {\n\t\t\t_, err = o.Raw(\"update harbor_user set email = null where user_id = ? \", id).Exec()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t} else {\n\t\texisting, err := GetUser(*u)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tu.Email = existing.Email\n\t\tu.SysAdminFlag = existing.SysAdminFlag\n\t\tu.Realname = existing.Realname\n\t\tu.UserID = existing.UserID\n\t}\n\treturn nil\n}"
] | [
"0.7637592",
"0.7349026",
"0.676662",
"0.6664704",
"0.64797777",
"0.64533484",
"0.6371254",
"0.6328185",
"0.62599856",
"0.62352246",
"0.62276334",
"0.6226214",
"0.6223639",
"0.62121433",
"0.6204188",
"0.6164084",
"0.6130815",
"0.6123331",
"0.6111087",
"0.61034936",
"0.60974777",
"0.609597",
"0.60953236",
"0.6079551",
"0.6048001",
"0.6046353",
"0.60435945",
"0.60421264",
"0.60281533",
"0.60269654",
"0.602096",
"0.6015632",
"0.5983673",
"0.598316",
"0.5950145",
"0.59456414",
"0.5927528",
"0.5905189",
"0.5893824",
"0.5888378",
"0.58848035",
"0.5881218",
"0.58448744",
"0.58408195",
"0.5836028",
"0.5831159",
"0.58236706",
"0.5820083",
"0.5817802",
"0.5816679",
"0.5808205",
"0.58013475",
"0.5790225",
"0.5782465",
"0.57712084",
"0.5768169",
"0.57642597",
"0.576399",
"0.5759173",
"0.5749179",
"0.5746776",
"0.57455677",
"0.5744283",
"0.5733939",
"0.57154197",
"0.57103574",
"0.5702464",
"0.5699568",
"0.56931716",
"0.5691207",
"0.5683006",
"0.5675591",
"0.5675211",
"0.5661125",
"0.5657748",
"0.56566006",
"0.5654865",
"0.56528085",
"0.56521326",
"0.56440437",
"0.5637734",
"0.56348646",
"0.5624609",
"0.5622909",
"0.5615753",
"0.5612259",
"0.56080484",
"0.56035906",
"0.55992705",
"0.55986196",
"0.55966985",
"0.55921674",
"0.5588647",
"0.55882186",
"0.5587865",
"0.55871195",
"0.5585392",
"0.55825114",
"0.5581896",
"0.55707645"
] | 0.7697687 | 0 |
DeleteUserHandler elimina un usuario en base al id | func DeleteUserHandler(w http.ResponseWriter, r *http.Request) {
params := mux.Vars(r)
k := params["id"]
if _, ok := Listusers[k]; ok {
delete(Listusers, k)
} else {
log.Printf("No encontramos el id %s", k)
}
w.WriteHeader(http.StatusNoContent)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func DeleteUserHandler(connection *sql.DB, cnf config.Config) negroni.HandlerFunc {\n\treturn negroni.HandlerFunc(func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\t\tvar queryToken = r.URL.Query().Get(\"token\")\n\n\t\tif len(queryToken) < 1 {\n\t\t\tqueryToken = r.Header.Get(\"token\")\n\t\t}\n\n\t\tif len(queryToken) < 1 {\n\t\t\tutil.SendBadRequest(w, errors.New(\"token is mandatory\"))\n\t\t\treturn\n\t\t}\n\n\t\tuser := &models.UserResponse{}\n\t\terr := util.RequestToJSON(r, user)\n\t\tif err != nil {\n\t\t\tutil.SendBadRequest(w, errors.New(\"Bad json\"))\n\t\t\treturn\n\t\t}\n\n\t\tsecretKey := cnf.SecretKey\n\t\ttok, err := jwt.Parse(queryToken, func(t *jwt.Token) (interface{}, error) {\n\t\t\treturn []byte(secretKey), nil\n\t\t})\n\n\t\tclaims := tok.Claims.(jwt.MapClaims)\n\t\tvar ID = claims[\"sub\"].(float64)\n\n\t\tif int64(ID) != user.ID {\n\t\t\tutil.SendBadRequest(w, errors.New(\"you can only delete your own user object\"))\n\t\t\treturn\n\t\t}\n\n\t\tdb.DeleteUser(connection, user)\n\t\tif err != nil {\n\t\t\tutil.SendBadRequest(w, err)\n\t\t\treturn\n\t\t}\n\t\tutil.SendOK(w, string(\"\"))\n\n\t})\n}",
"func (h *UserHandler) Delete(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tidStr := vars[\"id\"]\n\tid, err := strconv.ParseUint(idStr, 10, 64)\n\tif err != nil {\n\t\tlog.Println(errors.Wrapf(err, \"error parse uint:%v\", idStr))\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tlog.Printf(\"/users/%d DELETE handled\", id)\n\n\tif err := h.model.Delete(id); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusNoContent)\n}",
"func deleteUser(w http.ResponseWriter, r *http.Request) {\r\n\tparams := mux.Vars(r)\r\n\tstmt, err := db.Prepare(\"DELETE FROM users WHERE id = ?\")\r\n\tif err != nil {\r\n\t\tpanic(err.Error())\r\n\t}\r\n\t_, err = stmt.Exec(params[\"id\"])\r\n\tif err != nil {\r\n\t\tpanic(err.Error())\r\n\t}\r\n\tfmt.Fprintf(w, \"User with id = %s was deleted\", params[\"id\"])\r\n}",
"func DeleteUserHandler(w http.ResponseWriter, r *http.Request, db *gorp.DbMap, c *businesscontext.Ctx) error {\n\tvars := mux.Vars(r)\n\tusername := vars[\"username\"]\n\n\tif !c.User.Admin && username != c.User.Username {\n\t\treturn WriteJSON(w, r, nil, http.StatusForbidden)\n\t}\n\n\tu, errLoad := user.LoadUserWithoutAuth(db, username)\n\tif errLoad != nil {\n\t\treturn sdk.WrapError(errLoad, \"deleteUserHandler> Cannot load user from db\")\n\t}\n\n\ttx, errb := db.Begin()\n\tif errb != nil {\n\t\treturn sdk.WrapError(errb, \"deleteUserHandler> cannot start transaction\")\n\t}\n\tdefer tx.Rollback()\n\n\tif err := user.DeleteUserWithDependencies(tx, u); err != nil {\n\t\treturn sdk.WrapError(err, \"deleteUserHandler> cannot delete user\")\n\t}\n\n\tif err := tx.Commit(); err != nil {\n\t\treturn sdk.WrapError(err, \"deleteUserHandler> cannot commit transaction\")\n\t}\n\n\treturn nil\n}",
"func (api *API) deleteUserHandler() service.Handler {\n\treturn func(ctx context.Context, w http.ResponseWriter, r *http.Request) error {\n\t\tvars := mux.Vars(r)\n\t\tusername := vars[\"permUsernamePublic\"]\n\n\t\tconsumer := getUserConsumer(ctx)\n\n\t\ttx, err := api.mustDB().Begin()\n\t\tif err != nil {\n\t\t\treturn sdk.WrapError(err, \"cannot start transaction\")\n\t\t}\n\t\tdefer tx.Rollback() // nolint\n\n\t\tvar u *sdk.AuthentifiedUser\n\t\tif username == \"me\" {\n\t\t\tu, err = user.LoadByID(ctx, tx, consumer.AuthConsumerUser.AuthentifiedUserID)\n\t\t} else {\n\t\t\tu, err = user.LoadByUsername(ctx, tx, username)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// We can't delete the last admin\n\t\tif u.Ring == sdk.UserRingAdmin {\n\t\t\tcount, err := user.CountAdmin(tx)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif count < 2 {\n\t\t\t\treturn sdk.NewErrorFrom(sdk.ErrForbidden, \"can't remove the last admin\")\n\t\t\t}\n\t\t}\n\n\t\t// We can't delete a user if it's the last admin in a group\n\t\tvar adminGroupIDs []int64\n\t\tgus, err := group.LoadLinksGroupUserForUserIDs(ctx, tx, []string{u.ID})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor i := range gus {\n\t\t\tif gus[i].Admin {\n\t\t\t\tadminGroupIDs = append(adminGroupIDs, gus[i].GroupID)\n\t\t\t}\n\t\t}\n\t\tif len(adminGroupIDs) > 0 {\n\t\t\tgus, err := group.LoadLinksGroupUserForGroupIDs(ctx, tx, adminGroupIDs)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tadminLeftCount := make(map[int64]int)\n\t\t\tfor _, id := range adminGroupIDs {\n\t\t\t\tadminLeftCount[id] = 0\n\t\t\t}\n\t\t\tfor i := range gus {\n\t\t\t\tif gus[i].AuthentifiedUserID != u.ID && gus[i].Admin {\n\t\t\t\t\tadminLeftCount[gus[i].GroupID] += 1\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, count := range adminLeftCount {\n\t\t\t\tif count < 1 {\n\t\t\t\t\treturn sdk.NewErrorFrom(sdk.ErrForbidden, \"cannot remove user because it is the last admin of a group\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif err := user.DeleteByID(tx, u.ID); err != nil {\n\t\t\treturn sdk.WrapError(err, \"cannot delete user\")\n\t\t}\n\n\t\tif err := tx.Commit(); err != nil {\n\t\t\treturn sdk.WithStack(err)\n\t\t}\n\n\t\treturn service.WriteJSON(w, nil, http.StatusOK)\n\t}\n}",
"func DeleteUser(c *gin.Context) {}",
"func (uh *UserHandler) Delete(c echo.Context) error {\n\tid_, err := strconv.Atoi(c.Param(\"id\"))\n\tid := uint(id_)\n\n\terr = uh.UserUseCase.Delete(id)\n\n\tif err != nil {\n\t\treturn c.JSON(GetStatusCode(err), ResponseError{Message: err.Error()})\n\t}\n\n\treturn c.NoContent(http.StatusNoContent)\n}",
"func DeleteUser(w http.ResponseWriter, req *http.Request) {\n\tvars := mux.Vars(req)\n\tid := vars[\"id\"]\n\n\tif err := db.Remove(id); err != nil {\n\t\thandleError(err, \"Failed to remove User: %v\", w)\n\t\treturn\n\t}\n\n\tw.Write([]byte(\"OK\"))\n}",
"func (app *App) deleteUser(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid, err := strconv.Atoi(vars[\"id\"])\n\tif err != nil {\n\t\trespondWithError(w, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\n\tuser := &users.User{ID: int64(id)}\n\terr = user.DeleteUser(app.Db)\n\tif err != nil {\n\t\trespondWithError(w, http.StatusNotFound, err.Error())\n\t\treturn\n\t}\n\n\trespondWithJSON(w, http.StatusOK, map[string]string{\"message\": \"User deleted successfully\"})\n}",
"func (ac *ApiConfig) DeleteUserHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != http.MethodGet {\n\t\thttp.Error(w, r.Method+\" is not available\", http.StatusInternalServerError)\n\t\tzerolog.Error().Msg(r.Method + \" is not available\")\n\t\treturn\n\t}\n\n\tuserID := r.URL.Query().Get(\"user_id\")\n\tif userID == \"\" {\n\t\thttp.Error(w, \"user_id is empty, fill it \", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tid, err := strconv.Atoi(userID)\n\tif err != nil {\n\t\thttp.Error(w, \"user_id is not an integer\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\terr = ac.DHolder.DeleteUser(id)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tstat := &models.StatusIdentifier{\n\t\tOk: true,\n\t\tMessage: \"User Deleted\",\n\t}\n\n\terr = dResponseWriter(w, stat, http.StatusOK)\n\tif err != nil {\n\t\tzerolog.Error().Msg(err.Error())\n\t\treturn\n\t}\n\n\treturn\n}",
"func deleteUser(res http.ResponseWriter, req *http.Request, p httprouter.Params) {\n\t_, err := db.Exec(`\n\t\tDELETE FROM accounts\n\t\tWHERE username = $1;`, p.ByName(\"username\"),\n\t)\n\tif err != nil {\n\t\tlog.Println(\"deleteUser:\", err)\n\t}\n\n\twriteJSON(res, 200, jsMap{\"status\": \"OK\"})\n}",
"func deleteUser(c *gin.Context) {\n\tvar user user\n\tuserID := c.Param(\"id\")\n\n\tdb.First(&user, userID)\n\n\tif user.Id == 0 {\n\t\tc.JSON(http.StatusNotFound, gin.H{\"status\": http.StatusNotFound, \"message\": \"No user found!\"})\n\t\treturn\n\t}\n\n\tdb.Delete(&user)\n\tc.JSON(http.StatusOK, gin.H{\"status\": http.StatusOK, \"message\": \"User deleted successfully!\"})\n}",
"func DeleteUserHandler(w http.ResponseWriter, req *http.Request) {\n\n\t// Get session values or redirect to Login\n\tsession, err := sessions.Store.Get(req, \"session\")\n\n\tif err != nil {\n\t\tlog.Println(\"error identifying session\")\n\t\thttp.Redirect(w, req, \"/login/\", 302)\n\t\treturn\n\t\t// in case of error\n\t}\n\n\t// Prep for user authentication\n\tsessionMap := getUserSessionValues(session)\n\n\tusername := sessionMap[\"username\"]\n\tloggedIn := sessionMap[\"loggedin\"]\n\tisAdmin := sessionMap[\"isAdmin\"]\n\n\tvars := mux.Vars(req)\n\tidString := vars[\"id\"]\n\n\tpk, err := strconv.Atoi(idString)\n\tif err != nil {\n\t\tpk = 0\n\t\tlog.Println(err)\n\t}\n\n\tfmt.Println(session)\n\n\tif isAdmin != \"true\" {\n\t\thttp.Redirect(w, req, \"/\", 302)\n\t\treturn\n\t}\n\n\tuser, err := database.PKLoadUser(db, int64(pk))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\tfmt.Println(\"Unable to load User\")\n\t\thttp.Redirect(w, req, \"/\", http.StatusSeeOther)\n\t}\n\n\tuser.IsAdmin = true\n\n\terr = database.UpdateUser(db, user)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\twv := WebView{\n\t\tUser: user,\n\t\tIsLoggedIn: loggedIn,\n\t\tSessionUser: username,\n\t\tIsAdmin: isAdmin,\n\t\tUserFrame: false,\n\t\tArchitecture: baseArchitecture,\n\t}\n\n\tif req.Method == \"GET\" {\n\t\tRender(w, \"templates/delete_user.html\", wv)\n\t}\n\n\tif req.Method == \"POST\" {\n\n\t\terr := database.DeleteUser(db, user.ID)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\n\t\turl := \"/user_index/\"\n\n\t\thttp.Redirect(w, req, url, http.StatusFound)\n\t}\n\n}",
"func (auh *AdminUserHandler) DeleteUser(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\n\tvar apiKey = r.Header.Get(\"api-key\")\n\tif apiKey == \"\" || (apiKey != adminApiKey && apiKey != userApiKey) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)\n\t\treturn\n\t}\n\tid, err := strconv.Atoi(ps.ByName(\"id\"))\n\n\tif err != nil {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\n\t_, errs := auh.userService.DeleteUser(uint(id))\n\n\tif len(errs) > 0 {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusNoContent)\n\treturn\n}",
"func (h *Handler) deleteUser(c *gin.Context) handlerResponse {\n\n\tdeletedUser, err := h.service.User.Delete(c.Param(userParameter), h.who(c))\n\tif err != nil {\n\t\treturn handleError(err)\n\t}\n\t// Remove password so we do not show in response\n\tdeletedUser.Password = \"\"\n\treturn handleOK(deletedUser)\n}",
"func DeleteUser(w http.ResponseWriter, r *http.Request) {\n\tparams := mux.Vars(r)\n\n\tuserID, err := strconv.ParseInt(params[\"id\"], 10, 64)\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\tuserIDToken, err := authentication.ExtractUserId(r)\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusUnauthorized, err)\n\t\treturn\n\t}\n\n\tif userIDToken != userID {\n\t\tresponses.Error(w, http.StatusForbidden, errors.New(\"não é possível manipular usuário de terceiros\"))\n\t\treturn\n\t}\n\n\tdb, err := database.Connect()\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\tdefer db.Close()\n\n\trepository := repository.NewRepositoryUser(db)\n\n\tif err := repository.DeleteUser(userID); err != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tresponses.JSON(w, http.StatusNoContent, nil)\n}",
"func DeleteUser(id int) {\n\tvar i int\n\ti = GetIndexOfUser(id)\n\tDeleteUserFromDatabase(i)\n}",
"func Delete(w http.ResponseWriter, r *http.Request) {\n\tuserID := context.Get(r, \"userID\").(int)\n\n\t// Excluindo usuário logado\n\terr := ServiceUser.Delete(userID)\n\n\tif err != nil {\n\t\tw.Write(util.MessageInfo(\"message\", err.Error()))\n\t\treturn\n\t}\n\n\tw.Write(util.MessageInfo(\"message\", \"Excluído com sucesso\"))\n}",
"func (pc UserController) Delete(c *gin.Context) {\n\tid := c.Params.ByName(\"id\")\n\tvar u repository.UserRepository\n\tidInt, _ := strconv.Atoi(id)\n\tif err := u.DeleteByID(idInt); err != nil {\n\t\tc.AbortWithStatus(403)\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error()})\n\t\treturn\n\t}\n\n\tc.JSON(200, gin.H{\"success\": \"ID\" + id + \"のユーザーを削除しました\"})\n\treturn\n}",
"func DeleteUser(w http.ResponseWriter, r *http.Request) {\r\n\tdefer r.Body.Close()\r\n\tuser := r.Context().Value(\"user\").(string)\r\n\r\n\tif err := dao.DBConn.RemoveUserByEmail(user); err != nil {\r\n\t\tlog.Println(err)\r\n\t\tu.RespondWithError(w, http.StatusBadRequest, \"User doesn't exist or has already been deleted\")\r\n\t\treturn\r\n\t}\r\n\r\n\tif err := dao.DBConn.RemoveUserExpenses(user); err != nil {\r\n\t\tlog.Println(err)\r\n\t\tu.RespondWithError(w, http.StatusBadRequest, \"User doesn't exist or has already been deleted\")\r\n\t\treturn\r\n\t}\r\n\r\n\tu.RespondWithJSON(w, http.StatusOK, \"User deleted\")\r\n}",
"func DeleteUser(c *gin.Context) {\n\tuuid := c.Params.ByName(\"uuid\")\n\tvar user models.User\n\tdb := db.GetDB()\n\tif uuid != \"\" {\n\n\t\tjwtClaims := jwt.ExtractClaims(c)\n\t\tauthUserAccessLevel := jwtClaims[\"access_level\"].(float64)\n\t\tauthUserUUID := jwtClaims[\"uuid\"].(string)\n\t\tif authUserAccessLevel != 1 {\n\t\t\tif authUserUUID != uuid {\n\t\t\t\tc.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{\n\t\t\t\t\t\"error\": \"Sorry but you can't delete user, ONLY admins can\",\n\t\t\t\t})\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\t// DELETE FROM users WHERE uuid= user.uuid\n\t\t// exemple : UPDATE users SET deleted_at=date.now WHERE uuid = user.uuid;\n\t\tif err := db.Where(\"uuid = ?\", uuid).Delete(&user).Error; err != nil {\n\t\t\t// error handling...\n\t\t\tc.AbortWithStatusJSON(http.StatusBadRequest, gin.H{\n\t\t\t\t\"error\": err.Error(),\n\t\t\t})\n\t\t\treturn\n\t\t}\n\n\t\t// Display JSON result\n\t\t// c.JSON(200, gin.H{\"success\": \"User #\" + uuid + \" deleted\"})\n\t\tc.JSON(200, gin.H{\"success\": \"User successfully deleted\"})\n\t} else {\n\t\t// Display JSON error\n\t\tc.JSON(404, gin.H{\"error\": \"User not found\"})\n\t}\n\n}",
"func DeleteUser(c *gin.Context) {\n\tvar user Models.User\n\tid := c.Params.ByName(\"id\")\n\terr := Models.DeleteUser(&user, id)\n\tif err != nil {\n\t\tc.AbortWithStatus(http.StatusNotFound)\n\t} else { \n\t\tc.JSON(http.StatusOK, gin.H{\"id\":\"is deleted\"})\n\t}\n}",
"func HandleUserDelete(c *gin.Context) {\n\tuid := c.Param(\"uid\")\n\n\tvar u User\n\taffected, err := u.Delete(uid)\n\tif err != nil {\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\n\t\t\t\"code\": 1,\n\t\t\t\"msg\": err.Error(),\n\t\t\t\"data\": gin.H{},\n\t\t})\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"code\": 0,\n\t\t\"msg\": \"ok\",\n\t\t\"data\": gin.H{\n\t\t\t\"affected\": affected,\n\t\t},\n\t})\n}",
"func DeleteUser(clients *common.ClientContainer, handler common.HandlerInterface) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tuserID := chi.URLParam(r, \"userID\")\n\t\tID, err := strconv.Atoi(userID)\n\t\tif err != nil {\n\t\t\tcommon.WriteErrorToResponse(w, http.StatusUnprocessableEntity,\n\t\t\t\thttp.StatusText(http.StatusUnprocessableEntity),\n\t\t\t\t\"userID provided is not integer\")\n\t\t\treturn\n\t\t}\n\n\t\t// check if the ID exists\n\t\t_, err = handler.GetUserID(clients, ID)\n\t\tif err != nil {\n\t\t\tswitch err.(type) {\n\t\t\t// grafanaclient.NotFound means, that user provided the\n\t\t\t// ID of non existent user. We return 404\n\t\t\tcase grafanaclient.NotFound:\n\t\t\t\terrMsg := fmt.Sprintf(\"User Not Found\")\n\t\t\t\tcommon.WriteErrorToResponse(w, http.StatusNotFound,\n\t\t\t\t\terrMsg, err.Error())\n\t\t\t\treturn\n\t\t\t// If any other error happened -> return 500 error\n\t\t\tdefault:\n\t\t\t\tlog.Logger.Error(err)\n\t\t\t\tcommon.WriteErrorToResponse(w, http.StatusInternalServerError,\n\t\t\t\t\thttp.StatusText(http.StatusInternalServerError),\n\t\t\t\t\t\"Internal server error occured\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// if ID exists then delete that user\n\t\terr = handler.DeleteUser(clients, ID)\n\t\tif err != nil {\n\t\t\tlog.Logger.Error(err)\n\t\t\tcommon.WriteErrorToResponse(w, http.StatusInternalServerError,\n\t\t\t\thttp.StatusText(http.StatusInternalServerError),\n\t\t\t\t\"Internal server error occured\")\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (server Server) DeleteUser(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r) //mux params\n\tid, err := strconv.Atoi(vars[\"id\"]) // convert the id in string to int\n\tvar res models.APIResponse // make a response\n\n\tif err != nil {\n\t\tlog.Printf(\"Unable to convert the string into int. %v\", err)\n\t\tres = models.BuildAPIResponseFail(\"Unable to convert the string into int\", nil)\n\t} else {\n\t\tdeletedRows := deleteUser(int64(id), server.db) // call the deleteUser, convert the int to int64\n\t\tres = models.BuildAPIResponseSuccess(\"User updated successfully.\", deletedRows)\n\t}\n\t// send the response\n\tjson.NewEncoder(w).Encode(res)\n}",
"func (u *UserCtr) DeleteUser(c *gin.Context) {\n\tid,err := strconv.Atoi(c.Param(\"id\"))\n\tif err != nil {\n\t\tresp := errors.New(err.Error())\n\t\tc.JSON(http.StatusInternalServerError, resp)\n\t\treturn\n\t}\n\n\terr = model.DeleteUser(u.DB,id)\n\tif err != nil {\n\t\tresp := errors.New(err.Error())\n\t\tc.JSON(http.StatusInternalServerError, resp)\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, gin.H{\n\t})\n\treturn\n}",
"func UserDelete(w http.ResponseWriter, r *http.Request) {\n\n\t// Init output\n\toutput := []byte(\"\")\n\n\t// Add content type header to the response\n\tcontentType := \"application/json\"\n\tcharset := \"utf-8\"\n\tw.Header().Add(\"Content-Type\", fmt.Sprintf(\"%s; charset=%s\", contentType, charset))\n\n\t// Grab context references\n\trefStr := gorillaContext.Get(r, \"str\").(stores.Store)\n\t// Grab url path variables\n\turlVars := mux.Vars(r)\n\turlUser := urlVars[\"user\"]\n\n\tuserUUID := auth.GetUUIDByName(urlUser, refStr)\n\n\terr := auth.RemoveUser(userUUID, refStr)\n\tif err != nil {\n\t\tif err.Error() == \"not found\" {\n\t\t\terr := APIErrorNotFound(\"User\")\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\t\terr := APIErrGenericInternal(err.Error())\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// Write empty response if anything ok\n\trespondOK(w, output)\n\n}",
"func DeleteUser(db *gorm.DB, w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\n\tid := vars[\"id\"]\n\tuser := getUserByID(db, id, w, r)\n\tif user == nil {\n\t\treturn\n\t}\n\tif err := db.Delete(&user).Error; err != nil {\n\t\tRespondError(w, http.StatusInternalServerError, \"\")\n\t\treturn\n\t}\n\tRespondJSON(w, http.StatusNoContent, nil)\n}",
"func DeleteUserHandler(w http.ResponseWriter, r *http.Request) {\n\tid := mux.Vars(r)[\"id\"]\n\tstatus := users.DeleteUser(id)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(status)\n}",
"func DeleteUser(c *gin.Context) {\n\tid := c.Param(\"id\")\n\tlog.Printf(\"DeleteUser in db %v\", id)\n\tvar user models.User\n\tdb := db.GetDB()\n\n\tif err := db.Where(\"id = ?\", id).First(&user).Error; err != nil {\n\t\tc.AbortWithStatus(http.StatusNotFound)\n\t\tlog.Println(\"Failed to DeleteUser in db\")\n\t\treturn\n\t}\n\n\tdb.Delete(&user)\n}",
"func DeleteUser(ctx iris.Context) {\n\tvar (\n\t\tuser model.User\n\t\tresult iris.Map\n\t)\n\tid := ctx.Params().Get(\"id\") // get id by params\n\tdb := config.GetDatabaseConnection()\n\tdefer db.Close()\n\terr := db.First(&user, id).Error\n\tif err != nil {\n\t\tresult = iris.Map{\n\t\t\t\"error\": \"true\",\n\t\t\t\"status\": iris.StatusBadRequest,\n\t\t\t\"message\": \"User not found\",\n\t\t\t\"result\": nil,\n\t\t}\n\t}\n\n\terr = db.Where(\"id = ?\", id).Delete(&user, id).Error\n\tif err != nil {\n\t\tresult = iris.Map{\n\t\t\t\"error\": \"true\",\n\t\t\t\"status\": iris.StatusBadRequest,\n\t\t\t\"message\": \"Failed Delete user\",\n\t\t\t\"result\": err.Error(),\n\t\t}\n\t} else {\n\t\tresult = iris.Map{\n\t\t\t\"error\": \"false\",\n\t\t\t\"status\": iris.StatusOK,\n\t\t\t\"message\": \"Failed Delete user\",\n\t\t\t\"result\": nil,\n\t\t}\n\t}\n\tctx.JSON(result)\n\treturn\n}",
"func (s *Server) deleteUser(request *restful.Request, response *restful.Response) {\n\t// Authorize\n\tif !s.auth(request, response) {\n\t\treturn\n\t}\n\t// get user-id and put into temp\n\tuserId := request.PathParameter(\"user-id\")\n\tif err := s.dataStore.DeleteUser(userId); err != nil {\n\t\tinternalServerError(response, err)\n\t\treturn\n\t}\n\tok(response, Success{RowAffected: 1})\n}",
"func DeleteUser(c *gin.Context) {\n\tvar user models.User\n\tid := c.Params.ByName(\"id\")\n\terr := models.DeleteUser(&user, id)\n\tif err != nil {\n\t\tc.AbortWithStatus(http.StatusNotFound)\n\t} else {\n\t\tc.JSON(http.StatusOK, gin.H{\"id\" + id: \"is deleted\"})\n\t}\n}",
"func DeleteUser(w http.ResponseWriter, r *http.Request) {\n\tfmt.Println(\"Delete user endpoint hit\")\n\t\n\tvars := mux.Vars(r)\n\n\tid := vars[\"id\"]\n\n\tvar user models.User\n\n\tmessage := user.Destroy(id)\n\n json.NewEncoder(w).Encode(message)\n}",
"func (h *userHandler) deleteUser(ctx context.Context, rw http.ResponseWriter, r *http.Request) {\n\n\tlogin := fmt.Sprint(r.URL.Query().Get(\"login\"))\n\n\terr := h.serv.DB.UserCol.UpdateActive(ctx, login)\n\n\tif err != nil {\n\n\t\th.serv.writeResponse(ctx, rw, err.Error(), http.StatusBadRequest, nil)\n\n\t\treturn\n\t}\n\n\tuser, _ := h.serv.getUserFromClaimsFromCookie(ctx, r)\n\n\tif login == user.Login {\n\n\t\th.serv.writeResponse(ctx, rw, \"user active was updated and logged out\", http.StatusOK, user)\n\n\t} else {\n\n\t\th.serv.writeResponse(ctx, rw, \"user active was updated\", http.StatusOK, nil)\n\n\t}\n}",
"func (h *Handler) delete() http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tvars := mux.Vars(r)\n\t\tid := vars[userID]\n\t\terr := h.UserDAO.Delete(r.Context(), id)\n\t\tswitch {\n\t\tcase errors.Is(err, errorx.ErrNoUser):\n\t\t\tmsg := &errorMessage{\n\t\t\t\tMessage: fmt.Sprintf(\"user %s does not exist\", id),\n\t\t\t}\n\t\t\tresponse.JSON(w, http.StatusNotFound, msg)\n\t\t\treturn\n\t\tcase errors.Is(err, errorx.ErrDeleteUser):\n\t\t\tmsg := &errorMessage{\n\t\t\t\tMessage: fmt.Sprintf(\"user %s has been deleted\", id),\n\t\t\t}\n\t\t\tresponse.JSON(w, http.StatusGone, msg)\n\t\t\treturn\n\t\tcase err != nil:\n\t\t\tmsg := &errorMessage{\n\t\t\t\tError: err.Error(),\n\t\t\t\tMessage: \"user datastore error\",\n\t\t\t}\n\t\t\tresponse.JSON(w, http.StatusInternalServerError, msg)\n\t\t\treturn\n\t\tdefault:\n\t\t\tresponse.JSON(w, http.StatusNoContent, nil)\n\t\t}\n\t}\n\n}",
"func DeleteUser(c *gin.Context) {\n\t// Get model if exist\n\tvar user models.User\n\tif err := models.DB.Where(\"id = ?\", c.Param(\"id\")).First(&user).Error; err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": \"Record not found!\"})\n\t\treturn\n\t}\n\n\tmodels.DB.Delete(&user)\n\n\tc.JSON(http.StatusOK, gin.H{\"data\": true})\n}",
"func DeleteUser(user *models.User, id string) (err error) {\n\tconfig.DB.Where(\"id = ?\", id).Delete(user)\n\treturn nil\n}",
"func (s *Server) deleteUser(request *restful.Request, response *restful.Response) {\n\t// Authorize\n\tif !s.auth(request, response) {\n\t\treturn\n\t}\n\t// get user-id and put into temp\n\tuserId := request.PathParameter(\"user-id\")\n\tif err := s.DataStore.DeleteUser(userId); err != nil {\n\t\tinternalServerError(response, err)\n\t\treturn\n\t}\n\tok(response, Success{RowAffected: 1})\n}",
"func _delete(context echo.Context, user *User) error {\n\tdeleteErr := Remove(user.Key)\n\tif deleteErr != nil {\n\t\tlog.Printf(\"Cannot delete user %v\", deleteErr)\n\t\treturn context.JSON(http.StatusInternalServerError, errors.New(\"Cannot delete user with ID: \"+user.ID))\n\t}\n\treturn context.NoContent(http.StatusNoContent)\n}",
"func Delete(c *gin.Context) {\n\tuserID, err := getUserID(c.Param(\"user_id\"))\n\tif err != nil {\n\t\tc.JSON(err.Status, err)\n\t\treturn\n\t}\n\n\tif err := services.UserServ.DeleteUser(userID); err != nil {\n\t\tc.JSON(err.Status, err)\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, map[string]string{\"status\": \"deleted\"})\n}",
"func DeleteUser(user *entity.User, id string, client *statsd.Client) (err error) {\n\tt := client.NewTiming()\n\tif config.DB.Where(\"id = ?\", id).First(&user); user.ID == \"\" {\n\t\treturn errors.New(\"the user doesn't exist!!!\")\n\t}\n\tconfig.DB.Where(\"id = ?\", id).Delete(&user)\n\tt.Send(\"delete_user.query_time\")\n\treturn nil\n}",
"func Delete(c *gin.Context) {\n\tuserId, idErr := getUserID(c.Param(\"user_id\"))\n\tif idErr != nil {\n\t\tc.JSON(idErr.Status, idErr)\n\t\treturn\n\t}\n\n\tif err := services.UsersService.DeleteUser(userId); err != nil {\n\t\tc.JSON(err.Status, err)\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, map[string]string{\"status\": \"deleted\"})\n}",
"func (serv *AppServer) DeleteUser(delID int) {\n\tserv.ServerRequest([]string{\"DeleteUser\", strconv.Itoa(delID)})\n}",
"func DeleteUser(c *gin.Context, client *statsd.Client) {\n\tlog.Info(\"deleting user\")\n\tvar user entity.User\n\tid := c.Params.ByName(\"id\")\n\terr := model.DeleteUser(&user, id, client)\n\n\tif err != nil {\n\t\tlog.Error(err)\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"error\": err.Error(),\n\t\t})\n\t\treturn\n\t} else {\n\t\tc.JSON(http.StatusOK, gin.H{\"id\" + id: \"is deleted\"})\n\t}\n\tlog.Info(\"user deleted\")\n}",
"func DeleteUser(person *Person, id string) (err error) {\n\tConfig.DB.Where(\"id = ?\", id).Delete(person)\n\treturn nil\n}",
"func DeleteUser(w http.ResponseWriter, r *http.Request) {\n\n\thttpext.SuccessAPI(w, \"ok\")\n}",
"func deleteUser(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tparams := mux.Vars(r)\n\tresult := delete.DeleteUserData(params[\"id\"])\n\tjson.NewEncoder(w).Encode(map[string]string{\n\t\t\"result\": result,\n\t})\n}",
"func (a *Server) DeleteUser(w http.ResponseWriter, r *http.Request) {\n\tfmt.Println(\"delete a user\")\n}",
"func (s *peerRESTServer) DeleteUserHandler(w http.ResponseWriter, r *http.Request) {\n\tif !s.IsValid(w, r) {\n\t\ts.writeErrorResponse(w, errors.New(\"Invalid request\"))\n\t\treturn\n\t}\n\n\tobjAPI := newObjectLayerFn()\n\tif objAPI == nil {\n\t\ts.writeErrorResponse(w, errServerNotInitialized)\n\t\treturn\n\t}\n\n\tvars := mux.Vars(r)\n\taccessKey := vars[peerRESTUser]\n\tif accessKey == \"\" {\n\t\ts.writeErrorResponse(w, errors.New(\"username is missing\"))\n\t\treturn\n\t}\n\n\tif err := globalIAMSys.DeleteUser(accessKey); err != nil {\n\t\ts.writeErrorResponse(w, err)\n\t\treturn\n\t}\n\n\tw.(http.Flusher).Flush()\n}",
"func (uv *userValidator) Delete(id uint) error{\n\tvar user User\n\tuser.ID = id\n\terr := runUserValidatorFunction(&user, uv.idGreaterThan(0))\n\tif err != nil{\n\t\treturn err\n\t}\n\treturn uv.UserDB.Delete(id)\n}",
"func DeleteUser(c *gin.Context) {\n\tuserID := c.Param(\"userID\")\n\tuser := &userModel.User{ID: userID}\n\n\terr := dbConnect.Delete(user)\n\tif err != nil {\n\t\tlog.Printf(\"Error while deleting a single user, Reason: %v\\n\", err)\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\n\t\t\t\"status\": http.StatusInternalServerError,\n\t\t\t\"message\": \"Something went wrong\",\n\t\t})\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"status\": http.StatusOK,\n\t\t\"message\": \"User deleted successfully\",\n\t})\n\treturn\n}",
"func DeleteUser(dbmap *gorp.DbMap, id string) error {\n\tvar u User\n\terr := dbmap.SelectOne(&u, \"SELECT * FROM user WHERE object_id = ?\", id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttx, err := dbmap.Begin()\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\t_, err = tx.Exec(\"DELETE FROM user_session WHERE user_id = ?;\", u.PK)\n\tif err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\t_, err = tx.Exec(\"DELETE FROM user_role WHERE user_id = ?;\", u.PK)\n\tif err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\t_, err = tx.Exec(\"DELETE FROM domain_user WHERE user_id = ?;\", u.PK)\n\tif err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\t_, err = tx.Delete(&u)\n\tif err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\treturn tx.Commit()\n}",
"func (uc UserController) DeleteUser(w http.ResponseWriter, r *http.Request, p httprouter.Params) {\n\tid := p.ByName(\"id\")\n\n\tif _, ok := users[id]; !ok {\n\t\tw.WriteHeader(404)\n\t\treturn\n\t}\n\n\t// Delete user\n\tdelete(users, id)\n\n\tw.WriteHeader(http.StatusOK) // 200\n\tfmt.Fprint(w, \"Deleted user\", id, \"\\n\")\n}",
"func DeleteUser(id int) (err error) {\n\to := orm.NewOrm()\n\tv := User{Id: id}\n\t// ascertain id exists in the database\n\tif err = o.Read(&v); err == nil {\n\t\tvar num int64\n\t\tif num, err = o.Delete(&User{Id: id}); err == nil {\n\t\t\tfmt.Println(\"Number of records deleted in database:\", num)\n\t\t}\n\t}\n\treturn\n}",
"func DeleteUser(userid int64) error {\n _, err := model.Database.Exec(\"DELETE FROM users WHERE userid = ? AND isadmin = ?\", userid, false)\n if err != nil {\n return err\n }\n return nil\n}",
"func DeleteUser(db *pg.DB, pk int64) error {\n\n\tuser := models.User{ID: pk}\n\n\tfmt.Println(\"Deleting User...\")\n\n\terr := db.Delete(&user)\n\n\treturn err\n}",
"func (w *ServerInterfaceWrapper) DeleteUser(ctx echo.Context) error {\n\tvar err error\n\t// ------------- Path parameter \"id\" -------------\n\tvar id int\n\n\terr = runtime.BindStyledParameter(\"simple\", false, \"id\", ctx.Param(\"id\"), &id)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf(\"Invalid format for parameter id: %s\", err))\n\t}\n\n\tctx.Set(\"OAuth.Scopes\", []string{\"\"})\n\n\t// Invoke the callback with all the unmarshalled arguments\n\terr = w.Handler.DeleteUser(ctx, id)\n\treturn err\n}",
"func DeleteUser(id int) error {\n\tuser := User{ID: id}\n\t_, err := db.Model(&user).WherePK().Delete()\n\treturn err\n}",
"func (uv *userValidator) Delete(id uint) error {\n\tvar user User\n\tuser.ID = id\n\terr := runUserValFuncs(&user, uv.idGreaterThan(0))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn uv.UserDB.Delete(id)\n}",
"func (uv *userValidator) Delete(id uint) error {\n\tvar user User\n\tuser.ID = id\n\terr := runUserValFuncs(&user, uv.idGreaterThan(0))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn uv.UserDB.Delete(id)\n}",
"func (uv *userValidator) Delete(id uint) error {\r\n\tvar user User\r\n\tuser.ID = id\r\n\terr := runUserValFns(&user, uv.idGreaterThan(0))\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\treturn uv.UserDB.Delete(id)\r\n}",
"func DeleteUser(\n\tctx context.Context,\n\ttx *sql.Tx,\n\trequest *models.DeleteUserRequest) error {\n\tdeleteQuery := deleteUserQuery\n\tselectQuery := \"select count(uuid) from user where uuid = ?\"\n\tvar err error\n\tvar count int\n\tuuid := request.ID\n\tauth := common.GetAuthCTX(ctx)\n\tif auth.IsAdmin() {\n\t\trow := tx.QueryRowContext(ctx, selectQuery, uuid)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"not found\")\n\t\t}\n\t\trow.Scan(&count)\n\t\tif count == 0 {\n\t\t\treturn errors.New(\"Not found\")\n\t\t}\n\t\t_, err = tx.ExecContext(ctx, deleteQuery, uuid)\n\t} else {\n\t\tdeleteQuery += \" and owner = ?\"\n\t\tselectQuery += \" and owner = ?\"\n\t\trow := tx.QueryRowContext(ctx, selectQuery, uuid, auth.ProjectID())\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"not found\")\n\t\t}\n\t\trow.Scan(&count)\n\t\tif count == 0 {\n\t\t\treturn errors.New(\"Not found\")\n\t\t}\n\t\t_, err = tx.ExecContext(ctx, deleteQuery, uuid, auth.ProjectID())\n\t}\n\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"delete failed\")\n\t}\n\n\terr = common.DeleteMetaData(tx, uuid)\n\tlog.WithFields(log.Fields{\n\t\t\"uuid\": uuid,\n\t}).Debug(\"deleted\")\n\treturn err\n}",
"func DeleteHandler(w http.ResponseWriter, r *http.Request) {\n\t_, _, ok := r.BasicAuth()\n\tif !ok {\n\t\tw.Header().Set(\"WWW-Authenticate\", fmt.Sprintf(`Basic realm=\"%s\"`, BasicAuthRealm))\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\tw.Write([]byte(http.StatusText(http.StatusUnauthorized) + \"\\n\"))\n\t\treturn\n\t}\n\tif !reqIsAdmin(r) {\n\t\thttp.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)\n\t\treturn\n\t}\n\tu := &User{\n\t\tUsername: strings.ToLower(r.FormValue(\"username\")),\n\t}\n\terr := u.Delete()\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusUnauthorized)\n\t\treturn\n\t}\n\tfmt.Fprintf(w, \"User Deleted\\n\")\n}",
"func DeleteUser(c echo.Context) error {\n\tid, _ := strconv.Atoi(c.Param(\"id\"))\n\tdelete(users, id)\n\treturn c.NoContent(http.StatusNoContent)\n}",
"func (uv *userValidator) Delete(id uint) error {\n\tvar user User\n\tuser.ID = id\n\terr := runUserValFns(&user, uv.idGreaterThan(0))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn uv.UserDB.Delete(id)\n}",
"func (uv *userValidator) Delete(id uint) error {\n\tvar user User\n\tuser.ID = id\n\terr := runUserValFns(&user, uv.idGreaterThan(0))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn uv.UserDB.Delete(id)\n}",
"func (uv *userValidator) Delete(id uint) error {\n\tvar user User\n\tuser.ID = id\n\terr := runUserValFns(&user, uv.idGreaterThan(0))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn uv.UserDB.Delete(id)\n}",
"func (s *Service) DeleteUser(c *tokay.Context) {\n\tID := uint64(c.ParamUint(\"id\"))\n\n\tfilter := obj{\"_id\": ID}\n\terr = db.UserCol.Remove(filter)\n\tif errorAlert(\"User was not deleted\", err, c) {\n\t\treturn\n\t}\n\n\tc.JSON(200, obj{\"ok\": \"true\"})\n}",
"func DeleteUser(c *gin.Context) {\n\tvar json db.UserDeleteForm\n\tif err := c.ShouldBind(&json); err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\n\t\t\t\"msg\": \"Form doens't bind.\",\n\t\t\t\"err\": err.Error(),\n\t\t})\n\t\treturn\n\t}\n\tsession := sessions.Default(c)\n\tuserID := session.Get(\"userID\")\n\tvar user db.Users\n\tif err := db.DB.Where(userID).\n\t\tFirst(&user).Error; gorm.IsRecordNotFoundError(err) {\n\t\t// User not found\n\t\tc.JSON(http.StatusOK, gin.H{\n\t\t\t\"msg\": \"User not found in database.\",\n\t\t\t\"err\": err,\n\t\t})\n\t\treturn\n\t}\n\tif checkPasswordHash(json.Password, user.Password) {\n\t\tsession.Clear()\n\t\tsession.Save()\n\t\t// Soft delete user\n\t\tdb.DB.Where(userID).Delete(&user)\n\t\tc.JSON(http.StatusOK, gin.H{\n\t\t\t\"msg\": user.Username,\n\t\t\t\"err\": \"\",\n\t\t})\n\t} else {\n\t\tc.JSON(http.StatusUnauthorized, gin.H{\n\t\t\t\"msg\": fmt.Sprintf(\"Check password hash failed for user %s\", user.Username),\n\t\t\t\"err\": user.Username,\n\t\t})\n\t}\n}",
"func (env *Env) DeleteUser(c *gin.Context) {\n\n\t//Convert ID Parameter into int32\n\ttmp, err := strconv.Atoi(c.Param(\"id\"))\n\tif err != nil {\n\t\tLog.WithField(\"module\", \"handler\").WithError(err)\n\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.RQST001)\n\t\treturn\n\t}\n\tuserid := int32(tmp)\n\n\treqUserId, _ := c.Get(\"userid\")\n\n\t//Check if UserID\n\tvar exists int64\n\tresult := env.db.Model(mysql.User{}).Where(\"id = ?\", userid).Count(&exists)\n\tif result.Error != nil {\n\t\tLog.WithField(\"module\", \"sql\").WithError(result.Error)\n\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.DBSQ001)\n\t\treturn\n\t}\n\n\tif exists == 0 {\n\t\tLog.WithField(\"module\", \"handler\").Error(\"User not Found in Database\")\n\t\tc.AbortWithStatusJSON(http.StatusNotFound, errs.DBSQ006)\n\t\treturn\n\t}\n\n\tif userid != reqUserId {\n\t\tvar user mysql.User\n\n\t\tresult := env.db.Where(\"id = ?\", reqUserId).First(&user)\n\t\tif result.Error != nil {\n\t\t\tLog.WithField(\"module\", \"sql\").WithError(result.Error)\n\t\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.DBSQ001)\n\t\t\treturn\n\t\t}\n\n\t\tLog.Debug(user)\n\n\t\terr = env.db.Model(&user).Association(\"Permissions\").Find(&user.Permissions)\n\t\tif err != nil {\n\t\t\tLog.WithField(\"module\", \"sql\").WithError(err)\n\t\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.DBSQ001)\n\t\t\treturn\n\t\t}\n\n\t\tif !user.Permissions.Admin {\n\t\t\tLog.WithField(\"module\", \"handler\").Error(\"User not Authorized for this Action\")\n\t\t\tc.AbortWithStatusJSON(http.StatusUnauthorized, errs.AUTH009)\n\t\t\treturn\n\t\t}\n\t}\n\n\tresult = env.db.Delete(mysql.User{}, userid)\n\tif result.Error != nil {\n\t\tif errors.Is(result.Error, gorm.ErrRecordNotFound) {\n\t\t\tLog.WithField(\"module\", \"handler\").WithError(result.Error)\n\t\t\tc.AbortWithStatusJSON(http.StatusNotFound, errs.DBSQ006)\n\t\t\treturn\n\t\t} else {\n\t\t\tLog.WithField(\"module\", \"sql\").WithError(result.Error)\n\t\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.DBSQ001)\n\t\t\treturn\n\t\t}\n\t}\n\n}",
"func DeleteUser(id string) error {\n\t_, err := db.Exec(\"DELETE FROM web_users WHERE ID = ?\", id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func DeleteUser(c echo.Context) error {\n\tid := c.FormValue(\"id\")\n\n\tconvID, err := strconv.Atoi(id)\n\tif err != nil {\n\t\treturn c.JSON(http.StatusInternalServerError, err.Error())\n\t}\n\n\tresult, err := models.DeleteUser(convID)\n\tif err != nil {\n\t\treturn c.JSON(http.StatusInternalServerError, err.Error())\n\t}\n\n\treturn c.JSON(http.StatusOK, result)\n}",
"func DeleteUserHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\n\tname := vars[\"name\"]\n\tselector := r.URL.Query().Get(\"selector\")\n\tnamespace := r.URL.Query().Get(\"namespace\")\n\tclientVersion := r.URL.Query().Get(\"version\")\n\n\tlog.Debugf(\"DeleteUserHandler parameters selector [%s] namespace [%s] version [%s] name [%s]\", selector, namespace, clientVersion, name)\n\n\tusername, err := apiserver.Authn(apiserver.DELETE_USER_PERM, w, r)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tw.Header().Set(\"WWW-Authenticate\", `Basic realm=\"Restricted\"`)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\n\tresp := msgs.DeleteUserResponse{}\n\n\tvar ns string\n\tns, err = apiserver.GetNamespace(apiserver.Clientset, username, namespace)\n\tif err != nil {\n\t\tresp.Status.Code = msgs.Error\n\t\tresp.Status.Msg = err.Error()\n\t\tjson.NewEncoder(w).Encode(resp)\n\t\treturn\n\t}\n\n\tif clientVersion != msgs.PGO_VERSION {\n\t\tresp.Status.Code = msgs.Error\n\t\tresp.Status.Msg = apiserver.VERSION_MISMATCH_ERROR\n\t\tjson.NewEncoder(w).Encode(resp)\n\t\treturn\n\t}\n\n\tresp = DeleteUser(name, selector, ns)\n\tjson.NewEncoder(w).Encode(resp)\n\n}",
"func (m *MgoUserManager) DeleteUser(id interface{}) error {\n\toid, err := getId(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn m.UserColl.RemoveId(oid)\n}",
"func (u *User) Delete() *errorsutils.RestErr {\n\tstmt, err := usersdb.Client.Prepare(queryDeleteUser)\n\tif err != nil {\n\t\tlogger.Error(\"error when trying to prepare delete user statement\", err)\n\t\treturn errorsutils.NewInternalServerError(\"database error\", errors.New(\"database error\"))\n\t}\n\tdefer stmt.Close()\n\n\tif _, err = stmt.Exec(u.ID); err != nil {\n\t\tlogger.Error(\"error when trying to delete user\", err)\n\t\treturn errorsutils.NewInternalServerError(\"database error\", errors.New(\"database error\"))\n\t}\n\n\treturn nil\n}",
"func (app *application) DeleteUser(w http.ResponseWriter, r *http.Request) {\n\tid := chi.URLParam(r, \"id\")\n\tuserID, _ := strconv.Atoi(id)\n\n\terr := app.DB.DeleteUser(userID)\n\tif err != nil {\n\t\tapp.badRequest(w, r, err)\n\t\treturn\n\t}\n\n\tvar resp struct {\n\t\tError bool `json:\"error\"`\n\t\tMessage string `json:\"message\"`\n\t}\n\n\tresp.Error = false\n\tapp.writeJSON(w, http.StatusOK, resp)\n}",
"func DeleteUser(c *gin.Context) {\n\tuserID, err := strconv.ParseInt(c.Param(\"user_id\"), 10, 64)\n\tif err != nil {\n\t\tparamErr := errors.NewBadRequestError(\"user id should be a number\")\n\t\tc.JSON(paramErr.Status, paramErr)\n\t\treturn\n\t}\n\n\t//send the userID to the services\n\tresult, deleteErr := services.UsersService.DeleteUser(userID)\n\tif deleteErr != nil {\n\t\tc.JSON(deleteErr.Status, deleteErr)\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, result)\n}",
"func DeleteHandler(db *sql.DB) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tvars := mux.Vars(r)\n\t\tuser, err := delete(db, vars[\"id\"])\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(w, \"%v\\n\", err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tjson.NewEncoder(w).Encode(user)\n\t\treturn\n\t}\n}",
"func UserDelete(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\n}",
"func (user *User) Delete() *errors.RestErr {\n\t//prepare and execute the delete query\n\tstmt, err := usersdb.Client.Prepare(queryDeleteUser)\n\tif err != nil {\n\t\treturn errors.NewInternalServerError(err.Error())\n\t}\n\tdefer stmt.Close()\n\n\t//\n\tif _, err = stmt.Exec(user.ID); err != nil {\n\t\treturn errors.ParseError(err)\n\t}\n\n\treturn nil\n\n}",
"func (userRepository UserRepository) Delete(userId uint64) error {\n\tstatement, err := userRepository.db.Prepare(\n\t\t\"delete from users where id = ?\",\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer statement.Close()\n\n\tif _, err = statement.Exec(userId); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (u *User) Delete(ctx context.Context, w http.ResponseWriter, r *http.Request, params map[string]string) error {\n\tctx, span := trace.StartSpan(ctx, \"handlers.User.Delete\")\n\tdefer span.End()\n\n\terr := user.Delete(ctx, u.db, params[\"id\"])\n\tif err != nil {\n\t\tswitch err {\n\t\tcase user.ErrInvalidID:\n\t\t\treturn web.NewRequestError(err, http.StatusBadRequest)\n\t\tcase user.ErrNotFound:\n\t\t\treturn web.NewRequestError(err, http.StatusNotFound)\n\t\tcase user.ErrForbidden:\n\t\t\treturn web.NewRequestError(err, http.StatusForbidden)\n\t\tdefault:\n\t\t\treturn errors.Wrapf(err, \"Id: %s\", params[\"id\"])\n\t\t}\n\t}\n\n\treturn web.Respond(ctx, w, nil, http.StatusNoContent)\n}",
"func DeleteUser(id int) error {\n\tq := \"DELETE FROM users WHERE id=$1\"\n\t_, err := dbConn.Exec(q, id)\n\treturn err\n}",
"func (uc UserController) DeleteUser(w http.ResponseWriter, req *http.Request, p httprouter.Params) {\n\tw.WriteHeader(http.StatusOK)\n\tfmt.Fprint(w, \"Write code to delete user\\n\")\n}",
"func Duser(w http.ResponseWriter, r *http.Request) {\n\tid := r.PostFormValue(\"id\")\n\tidint, _ := strconv.Atoi(id)\n\tstmt := datastorage.GetDataRouter().GetStmt(\"delete_user\")\n\t_, err := stmt.Exec(idint)\n\tif err != nil {\n\t\tmessages.SetMessage(r, \"Σφάλμα κατά την διαγραφή του χρήστη\")\n\t\tlog.Println(err)\n\t\thttp.Redirect(w, r, \"/retrieveuser?id=\"+id, http.StatusMovedPermanently)\n\t\treturn\n\t}\n\thttp.Redirect(w, r, \"/listusers\", http.StatusMovedPermanently)\n}",
"func DeleteHandler(w http.ResponseWriter, r *http.Request, serv *AppServer) {\n\tsession, err := r.Cookie(\"UserID\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tdelID, err := strconv.Atoi(session.Value)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tserv.DeleteUser(delID)\n\n\tLogoutHandler(w, r, serv)\n}",
"func (u *User) Delete(tx *sql.Tx) error {\n\tif u.ID == \"\" {\n\t\treturn errors.New(`user ID is not valid`)\n\t}\n\tlog.Printf(\"db.User.Delete %s\", u.ID)\n\n\tstmt := bytes.Buffer{}\n\tstmt.WriteString(`DELETE FROM `)\n\tstmt.WriteString(userTable)\n\tstmt.WriteString(` WHERE id = ?`)\n\tlog.Printf(\"SQL QUERY: %s: with values %s\", stmt.String(), u.ID)\n\n\t_, err := tx.Exec(stmt.String(), u.ID)\n\n\treturn err\n}",
"func DeleteUser(db sqlx.Execer, id int64) error {\n\tres, err := db.Exec(\"delete from \\\"user\\\" where id = $1\", id)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"delete error\")\n\t}\n\tra, err := res.RowsAffected()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"get rows affected error\")\n\t}\n\tif ra == 0 {\n\t\treturn ErrDoesNotExist\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"id\": id,\n\t}).Info(\"user deleted\")\n\treturn nil\n}",
"func DeleteUser(c *gin.Context) {\n\tnID := c.Param(\"user_id\")\n\tdb := dbConn()\n\tstatement, _ := db.Prepare(\"CALL delete_user(?)\")\n\tstatement.Exec(nID)\n\tdefer db.Close()\n}",
"func (ug *userDbHandle) Delete(id uint) error {\n\tuser := User{Model: gorm.Model{ID: id}}\n\treturn ug.db.Delete(&user).Error\n}",
"func DeleteUser(w http.ResponseWriter, r *http.Request) {\n\tfLog := userMgmtLogger.WithField(\"func\", \"DeleteUser\").WithField(\"RequestID\", r.Context().Value(constants.RequestID)).WithField(\"path\", r.URL.Path).WithField(\"method\", r.Method)\n\tparams, err := helper.ParsePathParams(fmt.Sprintf(\"%s/management/user/{userRecId}\", apiPrefix), r.URL.Path)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tuser, err := UserRepo.GetUserByRecID(r.Context(), params[\"userRecId\"])\n\tif err != nil {\n\t\tfLog.Errorf(\"UserRepo.GetUserByRecID got %s\", err.Error())\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusInternalServerError, err.Error(), nil, nil)\n\t\treturn\n\t}\n\tif user == nil {\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusNotFound, fmt.Sprintf(\"User recid %s not found\", params[\"userRecId\"]), nil, nil)\n\t\treturn\n\t}\n\tUserRepo.DeleteUser(r.Context(), user)\n\tRevocationRepo.Revoke(r.Context(), user.Email)\n\thelper.WriteHTTPResponse(r.Context(), w, http.StatusOK, \"User deleted\", nil, nil)\n}",
"func (u *UserServiceHandler) Delete(ctx context.Context, userID string) error {\n\n\turi := \"/v1/user/delete\"\n\n\tvalues := url.Values{\n\t\t\"USERID\": {userID},\n\t}\n\n\treq, err := u.client.NewRequest(ctx, http.MethodPost, uri, values)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = u.client.DoWithContext(ctx, req, nil)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (us *UserStorage) DeleteUser(id string) error {\n\tif !bson.IsObjectIdHex(id) {\n\t\treturn model.ErrorWrongDataFormat\n\t}\n\ts := us.db.Session(UsersCollection)\n\tdefer s.Close()\n\n\terr := s.C.RemoveId(bson.ObjectIdHex(id))\n\treturn err\n}",
"func DeleteUser(id int32) error {\n\treturn dalums.DeleteUser(id)\n}",
"func DeleteUser(w http.ResponseWriter, r *http.Request) {\n\tcookie, _ := cookies.Read(r)\n\tuserID, _ := strconv.ParseUint(cookie[\"id\"], 10, 64)\n\n\turl := fmt.Sprintf(\"%s/users/%d\", config.APIURL, userID)\n\n\tresponse, err := requests.RequestsWithAuthentication(r, http.MethodDelete, url, nil)\n\tif err != nil {\n\t\tresponses.JSON(w, http.StatusInternalServerError, responses.ErrorAPI{Err: err.Error()})\n\t\treturn\n\t}\n\tdefer response.Body.Close()\n\n\tif response.StatusCode >= 400 {\n\t\tresponses.TreatStatusCode(w, response)\n\t\treturn\n\t}\n\n\tresponses.JSON(w, response.StatusCode, nil)\n}",
"func (ur *UserRepository) Delete(ctx context.Context, id uint) error {\n\tq := `\n\tDELETE FROM users WHERE id=$1;\n\t`\n\n\tstmt, err := ur.Data.DB.PrepareContext(ctx, q)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer stmt.Close()\n\n\t_, err = stmt.ExecContext(ctx, id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (ur *UserRepository) Delete(ctx context.Context, id uint) error {\n\tq := `\n\tDELETE FROM users WHERE id=$1;\n\t`\n\n\tstmt, err := ur.Data.DB.PrepareContext(ctx, q)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer stmt.Close()\n\n\t_, err = stmt.ExecContext(ctx, id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (chat *Chat) DeleteUser(id string) {\n\tchat.lock.Lock()\n\tdefer chat.lock.Unlock()\n\n\tusers := []*User{}\n\tfor _, chatUser := range chat.users {\n\t\tif chatUser.Id == id {\n\t\t\t//close ws\n\t\t\tchatUser.Ws.Close()\n\t\t\tchatUser.Dt = time.Since(chatUser.OnlineAt) / 1e9\n\n\t\t\t//进行数据跟踪\n\t\t\tgo httpPostForm(chatUser)\n\n\t\t\tcontinue\n\t\t}\n\t\tusers = append(users, chatUser)\n\t}\n\n\tchat.users = users\n}",
"func (c *UserRepoImpl) Delete(id int) (*model.User, error) {\n\n\tuser := new(model.User)\n\n\tif err := c.db.Table(\"user\").First(&user, id).Error; err != nil {\n\t\treturn nil, errors.New(\"id is doesnt exists\")\n\t}\n\n\tif err := c.db.Table(\"user\").Where(\"user_id = ?\", id).Delete(&model.User{}).Error; err != nil {\n\t\treturn nil, errors.New(\"delete courier data: error\")\n\t}\n\n\treturn nil, nil\n}"
] | [
"0.7829194",
"0.75990427",
"0.758616",
"0.7584279",
"0.75630176",
"0.7557841",
"0.7556832",
"0.75329816",
"0.75098014",
"0.7507609",
"0.750512",
"0.74947095",
"0.7493544",
"0.7471709",
"0.7445412",
"0.74386734",
"0.7437152",
"0.74366146",
"0.7431234",
"0.73927724",
"0.7389141",
"0.7386181",
"0.7377011",
"0.7373788",
"0.7367931",
"0.7358367",
"0.73526007",
"0.73381394",
"0.73310083",
"0.7319692",
"0.73159283",
"0.7310957",
"0.7308265",
"0.7305798",
"0.7273584",
"0.72679794",
"0.7265321",
"0.72636884",
"0.7245354",
"0.7238874",
"0.72126997",
"0.72069526",
"0.7206773",
"0.7199146",
"0.7195916",
"0.7187915",
"0.71802515",
"0.716842",
"0.71662205",
"0.71468097",
"0.7141946",
"0.71327543",
"0.71180695",
"0.70857936",
"0.7078777",
"0.7064752",
"0.7054795",
"0.7051538",
"0.70468664",
"0.70355415",
"0.70355415",
"0.7034293",
"0.7031061",
"0.7017051",
"0.70161796",
"0.70114845",
"0.70114845",
"0.70114845",
"0.69995874",
"0.6999347",
"0.698853",
"0.6981889",
"0.69790256",
"0.69607943",
"0.69601154",
"0.69526964",
"0.6952403",
"0.69521546",
"0.6947989",
"0.69454104",
"0.6942407",
"0.69422156",
"0.6938683",
"0.6933395",
"0.6932374",
"0.6925556",
"0.6923764",
"0.692273",
"0.6922459",
"0.6917455",
"0.69152564",
"0.6896339",
"0.68757886",
"0.68674433",
"0.6863079",
"0.6862909",
"0.68577963",
"0.68577963",
"0.6857009",
"0.6839651"
] | 0.77397424 | 1 |
MarshalJSON supports json.Marshaler interface | func (v UnloadCheckResponse) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjson6a975c40EncodeJsonBenchmark(&w, v)
return w.Buffer.BuildBytes(), w.Error
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func Marshal(v Marshaler) ([]byte, error) {\n\tw := jwriter.Writer{}\n\tv.MarshalEasyJSON(&w)\n\treturn w.BuildBytes()\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\tif ImplementsPreJSONMarshaler(v) {\n\t\terr := v.(PreJSONMarshaler).PreMarshalJSON()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn json.Marshal(v)\n}",
"func (j *JSON) Marshal(target interface{}) (output interface{}, err error) {\n\treturn jsonEncoding.Marshal(target)\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\treturn json.Marshal(v)\n}",
"func Marshal(v Marshaler) ([]byte, error) {\n\tif isNilInterface(v) {\n\t\treturn nullBytes, nil\n\t}\n\n\tw := jwriter.Writer{}\n\tv.MarshalTinyJSON(&w)\n\treturn w.BuildBytes()\n}",
"func JsonMarshal(t interface{}) ([]byte, error) {\n\tbuffer := &bytes.Buffer{}\n\tenc := json.NewEncoder(buffer)\n\tenc.SetEscapeHTML(false)\n\terr := enc.Encode(t)\n\treturn buffer.Bytes(), err\n}",
"func jsonMarshal(t interface{}) ([]byte, error) {\n\tbuffer := &bytes.Buffer{}\n\tencoder := json.NewEncoder(buffer)\n\tencoder.SetEscapeHTML(false)\n\tencoder.SetIndent(\"\", \" \")\n\terr := encoder.Encode(t)\n\treturn buffer.Bytes(), err\n}",
"func marshal() {\n\tfmt.Println(\"=== json.marshal ===\")\n\tryan := &Person{\"Ryan\", 25}\n\twire, err := json.Marshal(ryan)\n\tcheck(err)\n\tfmt.Println(string(wire))\n}",
"func jsonMarshal(t interface{}) ([]byte, error) {\n\tvar buffer bytes.Buffer\n\tencoder := json.NewEncoder(&buffer)\n\tencoder.SetEscapeHTML(false)\n\tif err := encoder.Encode(t); err != nil {\n\t\treturn nil, err\n\t}\n\t// Prettify\n\tvar out bytes.Buffer\n\tif err := json.Indent(&out, buffer.Bytes(), \"\", \"\\t\"); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn out.Bytes(), nil\n}",
"func JSONEncoder() Encoder { return jsonEncoder }",
"func JSONMarshal(data interface{}) ([]byte, error) {\n\tvar b []byte\n\tvar err error\n\n\tb, err = json.MarshalIndent(data, \"\", \" \")\n\n\treturn b, err\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn canonicaljson.Transform(b)\n}",
"func (c *JsonCodec) Marshal(object interface{}, options map[string]interface{}) ([]byte, error) {\n\treturn jsonEncoding.Marshal(object)\n}",
"func (c *JSONCodec) Marshal(v interface{}) ([]byte, error) {\n\treturn json.Marshal(v)\n}",
"func Marshal(in interface{}) ([]byte, error) {\n\tres, err := jsoniter.ConfigCompatibleWithStandardLibrary.Marshal(in)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"marshaling error: %w\", err)\n\t}\n\treturn res, nil\n}",
"func Marshal(p Payload) ([]byte, error) {\n\treturn json.Marshal(p)\n}",
"func (j *JsonlMarshaler) Marshal(v interface{}) ([]byte, error) {\n\treturn json.Marshal(v)\n}",
"func (s *Serializer) Marshal(v interface{}) ([]byte, error) {\n\treturn jsoniter.Marshal(v)\n}",
"func (j *TextMarshaler) Marshal(v interface{}) ([]byte, error) {\n\treturn json.Marshal(v)\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\n\treturn replaceUnicodeConversion(b), err\n}",
"func Marshal(object interface{}) (data string, err error) {\n\tif t, err := json.Marshal(object); err != nil {\n\t\tdata = \"\"\n\t} else {\n\t\tdata = string(t)\n\t}\n\treturn\n}",
"func (j *JsonMarshaler) Marshal(v interface{}) ([]byte, error) {\n\tswitch v.(type) {\n\tcase *distribute.GetResponse:\n\t\tvalue, err := protobuf.MarshalAny(v.(*distribute.GetResponse).Fields)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn json.Marshal(\n\t\t\tmap[string]interface{}{\n\t\t\t\t\"fields\": value,\n\t\t\t},\n\t\t)\n\tcase *distribute.SearchResponse:\n\t\tvalue, err := protobuf.MarshalAny(v.(*distribute.SearchResponse).SearchResult)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn json.Marshal(\n\t\t\tmap[string]interface{}{\n\t\t\t\t\"search_result\": value,\n\t\t\t},\n\t\t)\n\tdefault:\n\t\treturn json.Marshal(v)\n\t}\n}",
"func (j *JSON) Marshal(obj interface{}) error {\n\tres, err := json.Marshal(obj)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Call our implementation of\n\t// JSON UnmarshalJSON through json.Unmarshal\n\t// to set the result to the JSON object\n\treturn json.Unmarshal(res, j)\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\trv := reflect.ValueOf(v)\n\tif rv.Kind() != reflect.Slice {\n\t\treturn nil, &InvalidMarshalError{rv.Kind()}\n\t}\n\n\tvar buf bytes.Buffer\n\tencoder := json.NewEncoder(&buf)\n\tfor i := 0; i < rv.Len(); i++ {\n\t\tif err := encoder.Encode(rv.Index(i).Interface()); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn buf.Bytes(), nil\n}",
"func marshalJSON(i *big.Int) ([]byte, error) {\n\ttext, err := i.MarshalText()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn json.Marshal(string(text))\n}",
"func (p *HJSON) Marshal(o map[string]interface{}) ([]byte, error) {\n\treturn hjson.Marshal(o)\n}",
"func JSONMarshal(v interface{}) ([]byte, error) {\n\tb, err := json.MarshalIndent(v, \"\", \" \")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn b, err\n\t}\n\tb = bytes.Replace(b, []byte(\"\\\\u003c\"), []byte(\"<\"), -1)\n\tb = bytes.Replace(b, []byte(\"\\\\u003e\"), []byte(\">\"), -1)\n\tb = bytes.Replace(b, []byte(\"\\\\u0026\"), []byte(\"&\"), -1)\n\treturn b, err\n}",
"func JSONMarshal(content interface{}, escape bool) ([]byte, error) {\n\tvar buf bytes.Buffer\n\tenc := json.NewEncoder(&buf)\n\tenc.SetEscapeHTML(escape)\n\tif err := enc.Encode(content); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}",
"func MarshalJSON(v interface{}, config MarshalConfig) ([]byte, error) {\n\tres, err := Marshal(v, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn json.Marshal(res)\n}",
"func (jz JSONGzipEncoding) Marshal(v interface{}) ([]byte, error) {\n\tbuf, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// var bufSizeBefore = len(buf)\n\n\tbuf, err = GzipEncode(buf)\n\t// coloredoutput.Infof(\"gzip_json_compress_ratio=%d/%d=%.2f\",\n\t// bufSizeBefore, len(buf), float64(bufSizeBefore)/float64(len(buf)))\n\treturn buf, err\n}",
"func (c *Codec) Marshal(v interface{}) ([]byte, error) {\n\tresult, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn nil, xerrors.New(err.Error())\n\t}\n\n\treturn result, nil\n}",
"func Marshal(obj interface{}) ([]byte, error) {\n\treturn mgobson.Marshal(obj)\n}",
"func jsonify(v interface{}) string { return string(mustMarshalJSON(v)) }",
"func MarshalToWriter(v Marshaler, w io.Writer) (written int, err error) {\n\tif isNilInterface(v) {\n\t\treturn w.Write(nullBytes)\n\t}\n\n\tjw := jwriter.Writer{}\n\tv.MarshalTinyJSON(&jw)\n\treturn jw.DumpTo(w)\n}",
"func (sc *Contract) Marshal() ([]byte, error) {\n\treturn json.Marshal(sc)\n}",
"func (f *Formatter) Marshal(v interface{}) ([]byte, error) {\n\tdata, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn f.Format(data)\n}",
"func marshal(v interface{}) []byte {\n\tb, err := json.Marshal(v)\n\tassert(err == nil, \"marshal error: %s\", err)\n\treturn b\n}",
"func JSONMarshal(obj interface{}) ([]byte, error) {\n\tb := new(bytes.Buffer)\n\tenc := json.NewEncoder(b)\n\tenc.SetEscapeHTML(false)\n\terr := enc.Encode(obj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// json.NewEncoder.Encode adds a final '\\n', json.Marshal does not.\n\t// Let's keep the default json.Marshal behaviour.\n\tres := b.Bytes()\n\tif len(res) >= 1 && res[len(res)-1] == '\\n' {\n\t\tres = res[:len(res)-1]\n\t}\n\treturn res, nil\n}",
"func DefaultMarshalJSON(obj interface{}) ([]byte, error) {\n\treturn json.MarshalIndent(obj, \"\", \" \")\n}",
"func marshal(v interface{}) string {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn string(b)\n}",
"func MarshalJSON(v interface{}) []byte {\n\tdata, err := json.Marshal(v)\n\tAbortIf(err)\n\treturn data\n}",
"func encode(ins interface{}) ([]byte, error) {\n\treturn json.Marshal(ins)\n}",
"func Marshal(data interface{}) ([]byte, error) {\n\tdocument, err := MarshalToStruct(data, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn json.Marshal(document)\n}",
"func NewJSONMarshaler() Marshaler {\n\treturn newJSONMarshaler()\n}",
"func JsonMarshal(val any) ([]byte, error) {\n\tbuf := &bytes.Buffer{}\n\tencoder := json.NewEncoder(buf)\n\tencoder.SetEscapeHTML(false)\n\tif err := encoder.Encode(val); err != nil {\n\t\treturn nil, err\n\t}\n\t// Return without a trailing line feed.\n\tlineTerminatedJson := buf.Bytes()\n\treturn bytes.TrimSuffix(lineTerminatedJson, []byte(\"\\n\")), nil\n}",
"func (j *jsonNative) MarshalJSON() ([]byte, error) {\n\tvar buf fflib.Buffer\n\tif j == nil {\n\t\tbuf.WriteString(\"null\")\n\t\treturn buf.Bytes(), nil\n\t}\n\terr := j.MarshalJSONBuf(&buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}",
"func (j *Publisher) MarshalJSON() ([]byte, error) {\n\tvar buf fflib.Buffer\n\tif j == nil {\n\t\tbuf.WriteString(\"null\")\n\t\treturn buf.Bytes(), nil\n\t}\n\terr := j.MarshalJSONBuf(&buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}",
"func serialize(toMarshal interface{}) *bytes.Buffer {\n\tjsonStr, _ := json.Marshal(toMarshal)\n\treturn bytes.NewBuffer(jsonStr)\n}",
"func toJSON(a interface{}) ([]byte, error) {\n\tbs, err := json.Marshal(a)\n\tif err != nil {\n\t\t//return []byte{}, fmt.Errorf(\"Erro no json %v\", err)\n\t\treturn []byte{}, errors.New(fmt.Sprintf(\"Erro no json %v\", err))\n\t}\n\treturn bs, nil\n}",
"func (r *Anilist) Marshal() ([]byte, error) {\n\treturn json.Marshal(r)\n}",
"func JSONEncode(data interface{}) string {\n\tbt, _ := json.Marshal(data)\n\treturn string(bt)\n}",
"func Marshal(o interface{}) ([]byte, error) {\n\tj, err := json.Marshal(o)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error marshaling into JSON: %v\", err)\n\t}\n\n\ty, err := JSONToYAML(j)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error converting JSON to YAML: %v\", err)\n\t}\n\n\treturn y, nil\n}",
"func (v Post) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson783c1624EncodeGithubComGobwasVk7(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) {\n\tbs, fnerr := iv.MarshalJSON()\n\tf.e.marshalAsis(bs, fnerr)\n}",
"func Marshal(obj interface{}) ([]byte, error) {\n\treturn MarshalValue(reflect.ValueOf(obj))\n}",
"func toJSON(a interface{}) ([]byte, error) {\n\tbs, err := json.Marshal(a)\n\tif err != nil {\n\t\t// return []byte{}, errors.New(fmt.Sprintf(\"Can't create json with error: %v\", err))\n\t\treturn []byte{}, fmt.Errorf(\"Can't create json for person: %v error: %v\", a, err)\n\t}\n\treturn bs, nil\n}",
"func (v Join) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer21(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func JsonEncode(i interface{}) string {\n\tb, err := json.Marshal(i)\n\n\tif err != nil {\n\t\tfmt.Println(\"util.getJsonStr.error\", err)\n\t\treturn \"\"\n\t}\n\n\treturn string(b)\n}",
"func (b *SampleFJSONBuilder) Marshal(orig *SampleF) ([]byte, error) {\n\tret, err := b.Convert(orig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn json.Marshal(ret)\n}",
"func (m *Marshaler) JSON(v interface{}) ([]byte, error) {\n\tif _, ok := v.(proto.Message); ok {\n\t\tvar buf bytes.Buffer\n\t\tjm := &jsonpb.Marshaler{}\n\t\tjm.OrigName = true\n\t\tif err := jm.Marshal(&buf, v.(proto.Message)); err != nil {\n\t\t\treturn []byte{}, err\n\t\t}\n\n\t\tif m.FilterProtoJson {\n\t\t\treturn m.FilterJsonWithStruct(buf.Bytes(), v)\n\t\t}\n\t\treturn buf.Bytes(), nil\n\t}\n\treturn json.Marshal(v)\n}",
"func (v ExportItem) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjsonB83d7b77EncodeGoplaygroundMyjson1(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func mustMarshalJSON(v interface{}) []byte {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\tpanic(\"marshal json: \" + err.Error())\n\t}\n\treturn b\n}",
"func (v publicKey) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson94b2531bEncodeGitRonaksoftComRiverWebWasmConnection(w, v)\n}",
"func EncodedJson(v interface{}) []byte {\n\tif p, err := json.Marshal(v); err != nil {\n\t\treturn []byte{}\n\t} else {\n\t\treturn p\n\t}\n}",
"func (js JSONSerializable) MarshalJSON() ([]byte, error) {\n\tif !js.Valid {\n\t\treturn json.Marshal(nil)\n\t}\n\tjsWithHex := replaceBytesWithHex(js.Val)\n\treturn json.Marshal(jsWithHex)\n}",
"func MarshalJSON(v interface{}) string {\n\tcontents, _ := json.MarshalIndent(v, \"\", \" \")\n\treturn string(contents)\n}",
"func (j Json) MarshalJSON() ([]byte, error) {\n\treturn j.ToJson()\n}",
"func (j JSON) MarshalJSON() ([]byte, error) {\n\tif j.Valid {\n\t\treturn json.Marshal(j.Map)\n\t}\n\n\treturn json.Marshal(nil)\n}",
"func (p PatchObject) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]any)\n\tpopulate(objectMap, \"tags\", p.Tags)\n\treturn json.Marshal(objectMap)\n}",
"func (v Posts) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson783c1624EncodeGithubComGobwasVk(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (spbi SuccessfulPropertyBatchInfo) MarshalJSON() ([]byte, error) {\n\tspbi.Kind = KindSuccessful\n\tobjectMap := make(map[string]interface{})\n\tif spbi.Properties != nil {\n\t\tobjectMap[\"Properties\"] = spbi.Properties\n\t}\n\tif spbi.Kind != \"\" {\n\t\tobjectMap[\"Kind\"] = spbi.Kind\n\t}\n\treturn json.Marshal(objectMap)\n}",
"func toJSON(a interface{}) ([]byte, error) {\n\tbs, err := json.Marshal(a)\n\n\tif err != nil {\n\t\treturn []byte{}, fmt.Errorf(\"Error caught by Onur Gurel\")\n\t}\n\n\treturn bs, nil\n}",
"func (v BindParams) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjsonC5a4559bEncodeGithubComChromedpCdprotoTethering2(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (handler Handler) EncodeJSON(v interface{}) (b []byte, err error) {\n\n\t//if(w.Get(\"pretty\",\"false\")==\"true\"){\n\tb, err = json.MarshalIndent(v, \"\", \" \")\n\t//}else{\n\t//\tb, err = json.Marshal(v)\n\t//}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn b, nil\n}",
"func Marshal(val interface{}) ([]byte, error) {}",
"func testJSONMarshal(t *testing.T, v interface{}, want string) {\n\tj, err := json.Marshal(v)\n\tif err != nil {\n\t\tt.Errorf(\"Unable to marshal JSON for %v\", v)\n\t}\n\n\tw := new(bytes.Buffer)\n\terr = json.Compact(w, []byte(want))\n\tif err != nil {\n\t\tt.Errorf(\"String is not valid json: %s\", want)\n\t}\n\n\tif w.String() != string(j) {\n\t\tt.Errorf(\"json.Marshal(%q) returned %s, want %s\", v, j, w)\n\t}\n\n\t// now go the other direction and make sure things unmarshal as expected\n\tu := reflect.ValueOf(v).Interface()\n\tif err := json.Unmarshal([]byte(want), u); err != nil {\n\t\tt.Errorf(\"Unable to unmarshal JSON for %v: %v\", want, err)\n\t}\n\n\tif !reflect.DeepEqual(v, u) {\n\t\tt.Errorf(\"json.Unmarshal(%q) returned %s, want %s\", want, u, v)\n\t}\n}",
"func (o *Object) MarshalJSON() ([]byte, error) {\n\tctx := _builtinJSON_stringifyContext{\n\t\tr: o.runtime,\n\t}\n\tex := o.runtime.vm.try(func() {\n\t\tif !ctx.do(o) {\n\t\t\tctx.buf.WriteString(\"null\")\n\t\t}\n\t})\n\tif ex != nil {\n\t\treturn nil, ex\n\t}\n\treturn ctx.buf.Bytes(), nil\n}",
"func testJSONMarshal(t *testing.T, v interface{}, want string) {\n\tj, err := json.Marshal(v)\n\tif err != nil {\n\t\tt.Errorf(\"Unable to marshal JSON for %v\", v)\n\t}\n\n\tw := new(bytes.Buffer)\n\terr = json.Compact(w, []byte(want))\n\tif err != nil {\n\t\tt.Errorf(\"String is not valid json: %s\", want)\n\t}\n\n\tif w.String() != string(j) {\n\t\tt.Errorf(\"json.Marshal(%q) returned %s, want %s\", v, j, w)\n\t}\n\n\t// now go the other direction and make sure things unmarshal as expected\n\tu := reflect.ValueOf(v).Interface()\n\tif err := json.Unmarshal([]byte(want), u); err != nil {\n\t\tt.Errorf(\"Unable to unmarshal JSON for %v\", want)\n\t}\n\n\tif !reflect.DeepEqual(v, u) {\n\t\tt.Errorf(\"json.Unmarshal(%q) returned %s, want %s\", want, u, v)\n\t}\n}",
"func (v BlitzedItemResponse) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson6a975c40EncodeJsonBenchmark4(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (v item) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjsonD2b7633eEncodeGithubComZhekabyGoGeneratorMongoRequestwrapperTests(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (i Interface) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]any)\n\tpopulate(objectMap, \"etag\", i.Etag)\n\tpopulate(objectMap, \"extendedLocation\", i.ExtendedLocation)\n\tpopulate(objectMap, \"id\", i.ID)\n\tpopulate(objectMap, \"location\", i.Location)\n\tpopulate(objectMap, \"name\", i.Name)\n\tpopulate(objectMap, \"properties\", i.Properties)\n\tpopulate(objectMap, \"tags\", i.Tags)\n\tpopulate(objectMap, \"type\", i.Type)\n\treturn json.Marshal(objectMap)\n}",
"func marshalJSON(namingStrategy string, that interface{}) ([]byte, error) {\n\tout := map[string]interface{}{}\n\tt := reflect.TypeOf(that)\n\tv := reflect.ValueOf(that)\n\n\tfnctn := v.MethodByName(namingStrategy)\n\tfname := func(params ...interface{}) string {\n\t\tin := make([]reflect.Value, len(params))\n\t\tfor k, param := range params {\n\t\t\tin[k] = reflect.ValueOf(param)\n\t\t}\n\t\treturn fnctn.Call(in)[0].String()\n\t}\n\toutName := \"\"\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tf := t.Field(i)\n\t\tswitch n := f.Tag.Get(\"json\"); n {\n\t\tcase \"\":\n\t\t\toutName = f.Name\n\t\tcase \"-\":\n\t\t\toutName = \"\"\n\t\tdefault:\n\t\t\toutName = fname(n)\n\t\t}\n\t\tif outName != \"\" {\n\t\t\tout[outName] = v.Field(i).Interface()\n\t\t}\n\t}\n\treturn json.Marshal(out)\n}",
"func testMarshalJSON(t *testing.T, cmd interface{}) {\n\tjsonCmd, err := json.Marshal(cmd)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tfmt.Println(string(jsonCmd))\n}",
"func (v PbTestObject) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson5fcf962eEncodeGithubComJsonIteratorGoBenchmarkWith10IntFields(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (pbi PropertyBatchInfo) MarshalJSON() ([]byte, error) {\n\tpbi.Kind = KindPropertyBatchInfo\n\tobjectMap := make(map[string]interface{})\n\tif pbi.Kind != \"\" {\n\t\tobjectMap[\"Kind\"] = pbi.Kind\n\t}\n\treturn json.Marshal(objectMap)\n}",
"func My_json(demo interface{}) *bytes.Buffer {\r\n\tif bs, err := json.Marshal(demo); err == nil {\r\n\t\treq := bytes.NewBuffer([]byte(bs))\r\n\t\treturn req\r\n\t} else {\r\n\t\tpanic(err)\r\n\t}\r\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\treturn MarshalEx(v, false)\n}",
"func MarshalJSON(a interface{}) (b []byte, err error) {\n\tif m, ok := a.(proto.Message); ok {\n\t\tmarshaller := &jsonpb.Marshaler{}\n\t\tvar buf bytes.Buffer\n\t\terr = marshaller.Marshal(&buf, m)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tb = buf.Bytes()\n\t} else {\n\t\tb, err = json.Marshal(a)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}",
"func (v PostParams) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjsonC80ae7adEncodeGithubComDeiklovTechDbRomanovAndrGolangModels10(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (v Post) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson5a72dc82EncodeGithubComTimRazumovTechnoparkDBAppModels6(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (o *ExportDataPartial) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(o.ToMap())\n}",
"func (n *Node) Marshal() ([]byte, error) {\n\treturn json.Marshal(n)\n}",
"func (n *Node) Marshal() ([]byte, error) {\n\treturn json.Marshal(n)\n}",
"func jsonEnc(in T) ([]byte, error) {\n\treturn jsonx.Marshal(in)\n}",
"func (s *HTTPServer) marshalJSON(req *http.Request, obj interface{}) ([]byte, error) {\n\tif _, ok := req.URL.Query()[\"pretty\"]; ok || s.agent.config.DevMode {\n\t\tbuf, err := json.MarshalIndent(obj, \"\", \" \")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tbuf = append(buf, \"\\n\"...)\n\t\treturn buf, nil\n\t}\n\n\tbuf, err := json.Marshal(obj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf, err\n}",
"func (m Message) Marshal() ([]byte, error) {\n\treturn jsoniter.Marshal(m)\n}",
"func (m Json) MarshalJSON() ([]byte, error) {\n\tif m == nil {\n\t\treturn []byte(\"null\"), nil\n\t}\n\treturn m, nil\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\treturn NewFormatter().Marshal(v)\n}",
"func (s *ServiceSecrets) MarshalJson() ([]byte, error) {\n\treturn json.Marshal(s)\n}",
"func JsonEncode(data []byte, v interface{}) error {\n\n\treturn json.Unmarshal(data, v)\n}",
"func (out *GetOutput) Marshal() ([]byte, error) {\n\treturn json.Marshal(out)\n}"
] | [
"0.75134",
"0.7502133",
"0.7500753",
"0.74823195",
"0.7446766",
"0.7371689",
"0.73370403",
"0.7304601",
"0.72591853",
"0.72539127",
"0.72181046",
"0.717537",
"0.7162588",
"0.7161582",
"0.71608186",
"0.7072197",
"0.70587647",
"0.7044735",
"0.7022404",
"0.6973228",
"0.6963657",
"0.69578344",
"0.69243026",
"0.6924262",
"0.68824863",
"0.68681127",
"0.68572986",
"0.6818534",
"0.68102",
"0.67969906",
"0.67913324",
"0.67774016",
"0.67717487",
"0.67700523",
"0.6754375",
"0.67300195",
"0.67154574",
"0.6711641",
"0.6708163",
"0.6686554",
"0.6676971",
"0.6670713",
"0.6667217",
"0.6665734",
"0.6651805",
"0.664897",
"0.6601639",
"0.65936595",
"0.6570477",
"0.65671986",
"0.65637034",
"0.6562716",
"0.65555567",
"0.6544248",
"0.65373516",
"0.6532906",
"0.65273225",
"0.65230805",
"0.6517934",
"0.65155387",
"0.6507946",
"0.65065837",
"0.65061134",
"0.65058106",
"0.6502681",
"0.6501059",
"0.6492431",
"0.64840174",
"0.6483743",
"0.64832276",
"0.64799786",
"0.6479202",
"0.6476907",
"0.64739543",
"0.6469417",
"0.6467924",
"0.6463269",
"0.64624554",
"0.6460338",
"0.6457592",
"0.6454149",
"0.6448956",
"0.6447831",
"0.64472353",
"0.6436354",
"0.64353037",
"0.64196956",
"0.64188385",
"0.64096874",
"0.64083934",
"0.6407883",
"0.6405311",
"0.6405311",
"0.640528",
"0.6403627",
"0.6403197",
"0.6402974",
"0.64011514",
"0.6401083",
"0.6395421",
"0.6394115"
] | 0.0 | -1 |
MarshalEasyJSON supports easyjson.Marshaler interface | func (v UnloadCheckResponse) MarshalEasyJSON(w *jwriter.Writer) {
easyjson6a975c40EncodeJsonBenchmark(w, v)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (v item) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeGithubComZhekabyGoGeneratorMongoRequestwrapperTests(w, v)\n}",
"func (v Fruit) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels11(w, v)\n}",
"func (v BlitzedItemResponse) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6a975c40EncodeJsonBenchmark4(w, v)\n}",
"func (ce *CustomEvent) MarshalEasyJSON(w *jwriter.Writer) {\n\tce.marshalerCtor().MarshalEasyJSON(w)\n}",
"func (v Boo) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeMsgpJson(w, v)\n}",
"func (v DocumentResponse) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6a975c40EncodeJsonBenchmark3(w, v)\n}",
"func (v Post) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson783c1624EncodeGithubComGobwasVk7(w, v)\n}",
"func (v invocationMessage) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson2802b09fEncodeGithubComPhilippseithSignalr1(w, v)\n}",
"func (v Native) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson10(w, v)\n}",
"func (v ItemCheckResponse) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6a975c40EncodeJsonBenchmark2(w, v)\n}",
"func (v User) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson393a2a40EncodeCodegen(w, v)\n}",
"func (v User) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels(w, v)\n}",
"func (v ExportItem) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB83d7b77EncodeGoplaygroundMyjson1(w, v)\n}",
"func (v Format) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson14(w, v)\n}",
"func (v managerListener) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonEd74d837EncodeGithubComKihamoBoggartComponentsBoggartInternalHandlers(w, v)\n}",
"func (v Part) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer12(w, v)\n}",
"func (v publicKey) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson94b2531bEncodeGitRonaksoftComRiverWebWasmConnection(w, v)\n}",
"func (v VisitArray) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonE564fc13EncodeGithubComLa0rgHighloadcupModel(w, v)\n}",
"func (v Banner) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson23(w, v)\n}",
"func (v MOTD) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer19(w, v)\n}",
"func (v ProductToAdd) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels1(w, v)\n}",
"func (v App) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson25(w, v)\n}",
"func (v Ingredient) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels8(w, v)\n}",
"func (v Visit) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonE564fc13EncodeGithubComLa0rgHighloadcupModel1(w, v)\n}",
"func (v InfoUser) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6601e8cdEncodeJsongen3(w, v)\n}",
"func (v Deal) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson8a221a72EncodeGithubComVungleVungoOpenrtb(w, v)\n}",
"func (v FormDataMQ) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB83d7b77EncodeGoplaygroundMyjson(w, v)\n}",
"func (v Msg) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels6(w, v)\n}",
"func (v Nick) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer14(w, v)\n}",
"func (v Program) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson1c6ddb42EncodeGithubComSturfeeincGlTF(w, v)\n}",
"func (v OrderCheckResponse) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6a975c40EncodeJsonBenchmark1(w, v)\n}",
"func (v BasicUser) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6601e8cdEncodeJsongen4(w, v)\n}",
"func (v Pet) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson14a1085EncodeGithubComIamStubborNPetstoreDbModels1(w, v)\n}",
"func (v ChannelForward) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer32(w, v)\n}",
"func (v Element) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB83d7b77EncodeGoplaygroundMyjson2(w, v)\n}",
"func (v Pmp) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson9(w, v)\n}",
"func (v Responce) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6a975c40EncodeGithubComSerhio83DruidPkgStructs(w, v)\n}",
"func (v Post) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson5a72dc82EncodeGithubComTimRazumovTechnoparkDBAppModels6(w, v)\n}",
"func (c Context) MarshalEasyJSON(writer *ej_jwriter.Writer) {\n\tif err := c.Err(); err != nil {\n\t\twriter.Error = err\n\t\treturn\n\t}\n\twrappedWriter := jwriter.NewWriterFromEasyJSONWriter(writer)\n\tContextSerialization.MarshalToJSONWriter(&wrappedWriter, &c)\n}",
"func (v Features) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer25(w, v)\n}",
"func (v Node) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6601e8cdEncodeGithubComSkydiveProjectSkydiveGraffitiApiTypes1(w, v)\n}",
"func (v Posts) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson783c1624EncodeGithubComGobwasVk(w, v)\n}",
"func (v ThirdParty) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson2(w, v)\n}",
"func (v Student) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonF1627ba7EncodeGithubComDuchiporexiaGoutilsXmsgTests1(w, v)\n}",
"func (v UsersHandler) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson84c0690eEncodeMainHandlers(w, v)\n}",
"func (v Segment) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson6(w, v)\n}",
"func (v Info) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonC80ae7adEncodeGithubComDeiklovTechDbRomanovAndrGolangModels13(w, v)\n}",
"func (v CBPerson) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonE242b40eEncodeGithubComExampleSample2(w, v)\n}",
"func (v Invite) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer22(w, v)\n}",
"func (v GetUserResponse) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson84c0690eEncodeMainHandlers1(w, v)\n}",
"func (v User) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson7da3ae25EncodeCourseraGolangHomeworks(w, v)\n}",
"func (v PostSource) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson783c1624EncodeGithubComGobwasVk2(w, v)\n}",
"func (v ShadowModelSt) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB7ed31d3EncodeMevericcoreMccommon5(w, v)\n}",
"func (v BindParams) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonC5a4559bEncodeGithubComChromedpCdprotoTethering2(w, v)\n}",
"func (v Error) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer26(w, v)\n}",
"func (v Deal) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson16(w, v)\n}",
"func (v Message) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer18(w, v)\n}",
"func (v PostParams) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonC80ae7adEncodeGithubComDeiklovTechDbRomanovAndrGolangModels10(w, v)\n}",
"func (v Musician) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson62dc445bEncode20211NoskoolTeamInternalAppMusiciansModels2(w, v)\n}",
"func (v Impression) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson12(w, v)\n}",
"func (v Post) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonC80ae7adEncodeGithubComDeiklovTechDbRomanovAndrGolangModels11(w, v)\n}",
"func (v BaseInstrumentInfo) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson25363b2dEncodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi128(w, v)\n}",
"func (v Grade) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonF1627ba7EncodeGithubComDuchiporexiaGoutilsXmsgTests2(w, v)\n}",
"func (v Mode) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer16(w, v)\n}",
"func (v Whois) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer1(w, v)\n}",
"func (v Foo) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonAbe23ddeEncodeGithubComUberZanzibarExamplesExampleGatewayBuildGenCodeClientsCorgeCorge1(w, v)\n}",
"func (v Source) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson3(w, v)\n}",
"func (v ProductExtended) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels5(w, v)\n}",
"func (v Away) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer33(w, v)\n}",
"func (c EventOutputContext) MarshalEasyJSON(writer *ej_jwriter.Writer) {\n\tif err := c.Err(); err != nil {\n\t\twriter.Error = err\n\t\treturn\n\t}\n\twrappedWriter := jwriter.NewWriterFromEasyJSONWriter(writer)\n\tContextSerialization.MarshalToJSONWriterEventOutput(&wrappedWriter, &c)\n}",
"func (v IngredientArr) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels7(w, v)\n}",
"func (v RiverConnection) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson94b2531bEncodeGitRonaksoftComRiverWebWasmConnection4(w, v)\n}",
"func (v streamItemMessage) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson2802b09fEncodeGithubComPhilippseithSignalr(w, v)\n}",
"func (v Vacancy) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeGithubComIskiyRabotauaTelegramBotPkgRabotaua4(w, v)\n}",
"func (v Item) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeDrhyuComIndexerModels2(w, v)\n}",
"func (v PostAttachement) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson783c1624EncodeGithubComGobwasVk6(w, v)\n}",
"func (v ExtFilter) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson795c59c6EncodeGrapeGuardRules11(w, v)\n}",
"func (v User) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson1(w, v)\n}",
"func (v ProductShrinked) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels3(w, v)\n}",
"func (v RiverConnectionJS) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson94b2531bEncodeGitRonaksoftComRiverWebWasmConnection3(w, v)\n}",
"func (v DCCSend) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer27(w, v)\n}",
"func (v MediumPayload) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonE242b40eEncodeGithubComExampleSample1(w, v)\n}",
"func (v Stash) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeDrhyuComIndexerModels(w, v)\n}",
"func (v WSRequest) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer3(w, v)\n}",
"func (v ApiMessage) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson25363b2dEncodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi132(w, v)\n}",
"func (v flattenedField) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson390b7126EncodeGithubComChancedPicker35(w, v)\n}",
"func (v SFMetric) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson51bca34dEncodeGithubComSkydiveProjectSkydiveSflow2(w, v)\n}",
"func (v managerHandlerDevice) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonEd74d837EncodeGithubComKihamoBoggartComponentsBoggartInternalHandlers1(w, v)\n}",
"func (v BaseOp) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson25363b2dEncodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi125(w, v)\n}",
"func (v Data) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson17(w, v)\n}",
"func (v ResultReq) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson25363b2dEncodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi54(w, v)\n}",
"func (v CreateUserResponse) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson84c0690eEncodeMainHandlers2(w, v)\n}",
"func (v CreateIsolatedWorldReturns) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonC5a4559bEncodeGithubComChromedpCdprotoPage90(w, v)\n}",
"func (v ServerKeys) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson94b2531bEncodeGitRonaksoftComRiverWebWasmConnection2(w, v)\n}",
"func (v ShadowUpdateMsgSt) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB7ed31d3EncodeMevericcoreMccommon1(w, v)\n}",
"func (v WSResponse) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer2(w, v)\n}",
"func (v Content) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson18(w, v)\n}",
"func (v Join) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer21(w, v)\n}",
"func (v Bid) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson22(w, v)\n}",
"func (v moreLikeThisQuery) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson390b7126EncodeGithubComChancedPicker18(w, v)\n}",
"func (v Device) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson15(w, v)\n}"
] | [
"0.77204984",
"0.7672636",
"0.7653424",
"0.7591104",
"0.7549626",
"0.7543635",
"0.7506079",
"0.74917865",
"0.74814856",
"0.747845",
"0.74776804",
"0.7472024",
"0.74326074",
"0.74075466",
"0.74005824",
"0.7393129",
"0.7387327",
"0.7384699",
"0.7380124",
"0.7379066",
"0.73720104",
"0.73705596",
"0.73606724",
"0.7350565",
"0.73454297",
"0.73452127",
"0.73444057",
"0.73321444",
"0.7307502",
"0.73051214",
"0.73029035",
"0.729492",
"0.72922665",
"0.7291515",
"0.72859746",
"0.72832805",
"0.7281536",
"0.72810227",
"0.72690594",
"0.7265171",
"0.7263047",
"0.72606456",
"0.7260384",
"0.725305",
"0.72479904",
"0.724739",
"0.7246577",
"0.72447056",
"0.7243153",
"0.72394603",
"0.72375077",
"0.7237401",
"0.7235131",
"0.7232335",
"0.7225127",
"0.72224236",
"0.722217",
"0.722101",
"0.7216019",
"0.7215666",
"0.72131526",
"0.7212736",
"0.71996164",
"0.7197805",
"0.71939874",
"0.71824425",
"0.7180386",
"0.71780163",
"0.7173894",
"0.71658254",
"0.7163895",
"0.7163262",
"0.7161551",
"0.7154433",
"0.7154243",
"0.7153383",
"0.71522945",
"0.71498144",
"0.71420634",
"0.71419924",
"0.7141065",
"0.7132712",
"0.71298224",
"0.71237564",
"0.71226513",
"0.71173185",
"0.7116985",
"0.7109691",
"0.71048075",
"0.7099136",
"0.7098513",
"0.70949537",
"0.7090401",
"0.7087397",
"0.7085066",
"0.70830184",
"0.70823175",
"0.7082235",
"0.7080599",
"0.7080532",
"0.7079698"
] | 0.0 | -1 |
UnmarshalJSON supports json.Unmarshaler interface | func (v *UnloadCheckResponse) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjson6a975c40DecodeJsonBenchmark(&r, v)
return r.Error()
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (j *jsonNative) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func unmarshalJSON(j extv1.JSON, output *any) error {\n\tif len(j.Raw) == 0 {\n\t\treturn nil\n\t}\n\treturn json.Unmarshal(j.Raw, output)\n}",
"func (j *JSON) Unmarshal(input, target interface{}) error {\n\t// take the input and convert it to target\n\treturn jsonEncoding.Unmarshal(input.([]byte), target)\n}",
"func (j *Publisher) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (v *OneLike) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\tdecodeOneLike(&r, v)\n\treturn r.Error()\n}",
"func UnmarshalJSON(data []byte, v interface{}) {\n\terr := json.Unmarshal(data, v)\n\tAbortIf(err)\n}",
"func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error {\n\td := json.NewDecoder(r)\n\tfor _, opt := range opts {\n\t\td = opt(d)\n\t}\n\tif err := d.Decode(&o); err != nil {\n\t\treturn fmt.Errorf(\"while decoding JSON: %v\", err)\n\t}\n\treturn nil\n}",
"func UnmarshalJSON(b []byte, discriminator string, f Factory) (interface{}, error) {\n\tm := make(map[string]interface{})\n\terr := json.Unmarshal(b, &m)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn Decode(m, discriminator, f)\n}",
"func UnmarshalFromJSON(data []byte, target interface{}) error {\n\tvar ctx map[string]interface{}\n\terr := json.Unmarshal(data, &ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn Unmarshal(ctx, target)\n}",
"func (s *Serializer) Unmarshal(data []byte, v interface{}) error {\n\treturn jsoniter.Unmarshal(data,v)\n}",
"func (j *ThirdParty) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func unmarshal() {\n\tfmt.Println(\"=== json.unmarshal ===\")\n\tvar jsonBlob = []byte(`[\n\t\t{\"name\": \"Bill\", \"age\": 109},\n\t\t{\"name\": \"Bob\", \"age\": 5}\n\t]`)\n\n\tvar persons []Person\n\terr := json.Unmarshal(jsonBlob, &persons)\n\tcheck(err)\n\n\tfmt.Printf(\"%+v\\n\", persons)\n}",
"func (j *Data) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *JsonMarshaler) Unmarshal(data []byte, v interface{}) error {\n\treturn json.Unmarshal(data, v)\n}",
"func (j *Type) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (this *Simple) UnmarshalJSON(b []byte) error {\n\treturn TypesUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}",
"func (j *Response) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (pl PLUtil) Unmarshal(data []byte, v interface{}) error {\n\tcmd := pl.execCommand(\n\t\t\"plutil\",\n\t\t\"-convert\", \"json\",\n\t\t// Read from stdin.\n\t\t\"-\",\n\t\t// Output to stdout.\n\t\t\"-o\", \"-\")\n\tcmd.Stdin = bytes.NewReader(data)\n\tstdout, err := cmd.Output()\n\tif exitErr, ok := err.(*exec.ExitError); ok {\n\t\treturn fmt.Errorf(\"`%s` failed (%w) with stderr: %s\", cmd, err, exitErr.Stderr)\n\t}\n\tif err != nil {\n\t\treturn fmt.Errorf(\"`%s` failed (%w)\", cmd, err)\n\t}\n\tif err := json.Unmarshal(stdout, v); err != nil {\n\t\treturn fmt.Errorf(\"failed to parse json: %w\", err)\n\t}\n\treturn nil\n}",
"func (j *LuaFunction) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (v *BlitzedItemResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson6a975c40DecodeJsonBenchmark4(&r, v)\n\treturn r.Error()\n}",
"func Unmarshal(data []byte, v Unmarshaler) error {\n\tl := jlexer.Lexer{Data: data}\n\tv.UnmarshalTinyJSON(&l)\n\treturn l.Error()\n}",
"func (j *Producer) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *User) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *Json) UnmarshalJSON(b []byte) error {\n\tr, err := loadContentWithOptions(b, Options{\n\t\tType: ContentTypeJson,\n\t\tStrNumber: true,\n\t})\n\tif r != nil {\n\t\t// Value copy.\n\t\t*j = *r\n\t}\n\treturn err\n}",
"func (j *Error) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (v *Element) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson2(&r, v)\n\treturn r.Error()\n}",
"func (j *Message) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func jsonDecode(reader io.ReadCloser, v interface{}) error {\n\tdecoder := json.NewDecoder(reader)\n\terr := decoder.Decode(v)\n\treturn err\n}",
"func (j *FactoryPluginRespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (m *gohaiMarshaler) UnmarshalJSON(bytes []byte) error {\n\tfirstUnmarshall := \"\"\n\terr := json.Unmarshal(bytes, &firstUnmarshall)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal([]byte(firstUnmarshall), &(m.gohai))\n\treturn err\n}",
"func (j *Packet) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (receiver *Type) UnmarshalJSON(src []byte) error {\n\tif nil == receiver {\n\t\treturn errNilReceiver\n\t}\n\n\tvar s string\n\tif err := json.Unmarshal(src, &s); nil != err {\n\t\treturn err\n\t}\n\n\tif err := receiver.Scan(s); nil != err {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (j *qProxyClient) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func Unmarshal(data []byte, v Unmarshaler) error {\n\tl := jlexer.Lexer{Data: data}\n\tv.UnmarshalEasyJSON(&l)\n\treturn l.Error()\n}",
"func (j *TextMarshaler) Unmarshal(data []byte, v interface{}) error {\n\treturn json.Unmarshal(data, v)\n}",
"func (w *Entry) UnmarshalJSON(bb []byte) error {\n\t<<!!YOUR_CODE!!>>\n}",
"func UnmarshalJSON(body io.Reader, v interface{}) error {\n\tdecoder := json.NewDecoder(body)\n\treturn decoder.Decode(v)\n}",
"func Unmarshal(data []byte) (interface{}, error) {\n\tvar value marble\n\terr := json.Unmarshal(data, &value)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &value, nil\n}",
"func (j *RunPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (i *Transform) UnmarshalJSON(data []byte) error {\n\tvar s string\n\tif err := json.Unmarshal(data, &s); err != nil {\n\t\treturn fmt.Errorf(\"Transform should be a string, got %[1]s\", data)\n\t}\n\n\tvar err error\n\t*i, err = ParseTransformString(s)\n\treturn err\n}",
"func (this *Service) UnmarshalJSON(b []byte) error {\n\treturn CommonUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}",
"func (v *OneUpdateLike) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\tdecodeOneUpdateLike(&r, v)\n\treturn r.Error()\n}",
"func (j *RunRespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (v *Raw) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer10(&r, v)\n\treturn r.Error()\n}",
"func (j *Server) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *JSONText) Unmarshal(v interface{}) error {\n\treturn json.Unmarshal([]byte(*j), v)\n}",
"func (j *JsonlMarshaler) Unmarshal(data []byte, v interface{}) error {\n\treturn json.Unmarshal(data, v)\n}",
"func (j *PublishMessagesResponse) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *Event) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *FactoryPluginPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j JSON) Unmarshal(dest interface{}) error {\n\tif dest == nil {\n\t\treturn errors.New(\"destination is nil, not a valid pointer to an object\")\n\t}\n\n\t// Call our implementation of\n\t// JSON MarshalJSON through json.Marshal\n\t// to get the value of the JSON object\n\tres, err := json.Marshal(j)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn json.Unmarshal(res, dest)\n}",
"func (v *User) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson9e1087fdDecodeHw3Bench(&r, v)\n\treturn r.Error()\n}",
"func Unmarshal(data []byte, v interface{}) error {\n\terr := json.Unmarshal(data, v)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif ImplementsPostJSONUnmarshaler(v) {\n\t\terr := v.(PostJSONUnmarshaler).PostUnmarshalJSON()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func FromJSON(i interface{}, r io.Reader) error {\n\td := json.NewDecoder(r)\n\treturn d.Decode(i)\n}",
"func (v *FormulaAndFunctionResponseFormat) UnmarshalJSON(src []byte) error {\n\tvar value string\n\terr := json.Unmarshal(src, &value)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*v = FormulaAndFunctionResponseFormat(value)\n\treturn nil\n}",
"func (j *GetMessagesResponse) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *RespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {\n\tf.d.jsonUnmarshalV(tm)\n}",
"func (j *LuaTable) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *Regulations) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (v *User) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson2bc03518DecodeLangTaskOnBench(&r, v)\n\treturn r.Error()\n}",
"func Unmarshal(b []byte, v interface{}) error {\n\treturn json.Unmarshal(b, v)\n}",
"func (this *DeploymentStrategy) UnmarshalJSON(b []byte) error {\n\treturn CommonUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}",
"func (j *LuaInt) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (v *Visit) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonE564fc13DecodeGithubComLa0rgHighloadcupModel1(&r, v)\n\treturn r.Error()\n}",
"func (u *Unstructured) UnmarshalJSON(b []byte) error {\n\t_, _, err := UnstructuredJSONScheme.Decode(b, nil, u)\n\treturn err\n}",
"func UnmarshalJSON(b []byte) (dgo.Value, error) {\n\tdec := json.NewDecoder(bytes.NewReader(b))\n\tdec.UseNumber()\n\treturn jsonDecodeValue(dec)\n}",
"func unmarshal(data []byte, v interface{}) {\n\terr := json.Unmarshal(data, v)\n\tassert(err == nil, \"unmarshal error: %s\", err)\n}",
"func (j *Balance) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (t *Type) UnmarshalJSON(b []byte) error {\n\tvar text string\n\tif err := json.Unmarshal(b, &text); err != nil {\n\t\treturn err\n\t}\n\n\treturn t.UnmarshalText([]byte(text))\n}",
"func JSONDecode(ctx context.Context, data []byte, obj interface{}) error {\n\treturn json.Unmarshal(data, obj)\n}",
"func (j *RegisterRespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *ListPluginRespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (this *Probe) UnmarshalJSON(b []byte) error {\n\treturn CommonUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}",
"func (j *BootInitiationRespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (v *VisitArray) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonE564fc13DecodeGithubComLa0rgHighloadcupModel(&r, v)\n\treturn r.Error()\n}",
"func (v *ExportItem) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson1(&r, v)\n\treturn r.Error()\n}",
"func (this *ImportedReference) UnmarshalJSON(b []byte) error {\n\treturn TypesUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}",
"func (j *LuaString) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (jz JSONGzipEncoding) Unmarshal(data []byte, value interface{}) error {\n\tjsonData, err := GzipDecode(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal(jsonData, value)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (v *ItemCheckResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson6a975c40DecodeJsonBenchmark2(&r, v)\n\treturn r.Error()\n}",
"func (i *Interface) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", i, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"etag\":\n\t\t\terr = unpopulate(val, \"Etag\", &i.Etag)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"extendedLocation\":\n\t\t\terr = unpopulate(val, \"ExtendedLocation\", &i.ExtendedLocation)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &i.ID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"location\":\n\t\t\terr = unpopulate(val, \"Location\", &i.Location)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &i.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &i.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"tags\":\n\t\t\terr = unpopulate(val, \"Tags\", &i.Tags)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &i.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", i, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func Unmarshal(data []byte, typ DataFormat, target interface{}) {\n\tswitch typ {\n\tcase GOB:\n\t\tbuf := bytes.NewReader(data)\n\t\tgob.NewDecoder(buf).Decode(target)\n\n\tdefault:\n\t\tif err := json.Unmarshal(data, target); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}",
"func (j *Json) UnmarshalJSON(data []byte) error {\n\terr := json.Unmarshal(data, &j.data)\n\n\tj.exists = (err == nil)\n\treturn err\n}",
"func (v *PbTestObject) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson5fcf962eDecodeGithubComJsonIteratorGoBenchmarkWith10IntFields(&r, v)\n\treturn r.Error()\n}",
"func (j *ModifyQueueResponse) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *UnInstallRespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func unmarshalJSON(i *big.Int, bz []byte) error {\n\tvar text string\n\terr := json.Unmarshal(bz, &text)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn i.UnmarshalText([]byte(text))\n}",
"func (j *LuaBool) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *WorkerCreateOperation) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func jsonDec(t reflect.Type, in []byte) (T, error) {\n\tval := reflect.New(t)\n\tif err := jsonx.Unmarshal(val.Interface(), in); err != nil {\n\t\treturn nil, err\n\t}\n\treturn val.Elem().Interface(), nil\n}",
"func (z *Int) UnmarshalJSON(text []byte) error {}",
"func (j *PeerInfo) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (obj *miner) UnmarshalJSON(data []byte) error {\n\tins := new(JSONMiner)\n\terr := json.Unmarshal(data, ins)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpr, err := createMinerFromJSON(ins)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinsMiner := pr.(*miner)\n\tobj.toTransact = insMiner.toTransact\n\tobj.queue = insMiner.queue\n\tobj.broadcasted = insMiner.broadcasted\n\tobj.toLink = insMiner.toLink\n\treturn nil\n}",
"func (j *UnInstallPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func deJSONify(i interface{}) (interface{}, error) {\n\tvar data []byte\n\tswitch t := i.(type) {\n\tcase string:\n\t\tdata = []byte(t)\n\tcase []byte:\n\t\tdata = t\n\tcase json.RawMessage:\n\t\tdata = []byte(t)\n\tdefault:\n\t\treturn i, nil\n\t}\n\tvar x interface{}\n\tif err := json.Unmarshal(data, &x); err != nil {\n\t\treturn nil, &kivik.Error{HTTPStatus: http.StatusBadRequest, Err: err}\n\t}\n\treturn x, nil\n}",
"func (v *DocumentResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson6a975c40DecodeJsonBenchmark3(&r, v)\n\treturn r.Error()\n}",
"func (c *JSONCodec) Unmarshal(data []byte, v interface{}) error {\n\treturn json.Unmarshal(data, v)\n}",
"func NewJSONUnmarshaler(resolver Resolver) Unmarshaler {\n\treturn newJSONUnmarshaler(resolver)\n}",
"func NewJSONUnmarshaler(resolver Resolver) Unmarshaler {\n\treturn newJSONUnmarshaler(resolver)\n}"
] | [
"0.70113605",
"0.698139",
"0.6947301",
"0.6867781",
"0.68005323",
"0.67680764",
"0.6741481",
"0.67051035",
"0.6688701",
"0.66797084",
"0.6676911",
"0.6669605",
"0.6661001",
"0.66579056",
"0.6652777",
"0.66498846",
"0.6632663",
"0.663189",
"0.6627629",
"0.66243863",
"0.6612909",
"0.6587119",
"0.65519077",
"0.6545157",
"0.6537283",
"0.6533197",
"0.6532074",
"0.6526187",
"0.6518123",
"0.6512875",
"0.6505786",
"0.64908326",
"0.64847505",
"0.64830405",
"0.64820194",
"0.6469316",
"0.64528453",
"0.64508975",
"0.6441661",
"0.6441397",
"0.6438974",
"0.6438737",
"0.642948",
"0.6408435",
"0.640738",
"0.6396278",
"0.6394157",
"0.6385808",
"0.63855124",
"0.63844603",
"0.6375449",
"0.63702816",
"0.63625103",
"0.63553596",
"0.63552856",
"0.63477194",
"0.6344893",
"0.6339914",
"0.6331977",
"0.63298523",
"0.6323917",
"0.63238263",
"0.631284",
"0.63110864",
"0.6310182",
"0.6305762",
"0.63040566",
"0.62972116",
"0.62931895",
"0.6291462",
"0.62913823",
"0.62810636",
"0.6280757",
"0.6274565",
"0.6273215",
"0.62724316",
"0.62711626",
"0.6271133",
"0.62660044",
"0.6263724",
"0.62590677",
"0.62587553",
"0.62568384",
"0.6255846",
"0.6252581",
"0.62471205",
"0.6244156",
"0.6241776",
"0.62323636",
"0.62298375",
"0.6226608",
"0.6226587",
"0.62243664",
"0.6220156",
"0.6218001",
"0.6216062",
"0.6215216",
"0.621471",
"0.62088907",
"0.62088907"
] | 0.6318712 | 62 |
UnmarshalEasyJSON supports easyjson.Unmarshaler interface | func (v *UnloadCheckResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {
easyjson6a975c40DecodeJsonBenchmark(l, v)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (v *BlitzedItemResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6a975c40DecodeJsonBenchmark4(l, v)\n}",
"func (v *Fruit) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels11(l, v)\n}",
"func (v *Boo) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeMsgpJson(l, v)\n}",
"func (v *Element) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson2(l, v)\n}",
"func (c *Context) UnmarshalEasyJSON(in *jlexer.Lexer) {\n\tContextSerialization.UnmarshalFromEasyJSONLexer(in, c)\n}",
"func (v *Format) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson14(l, v)\n}",
"func (v *DetectedFruit) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels12(l, v)\n}",
"func (v *item) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeGithubComZhekabyGoGeneratorMongoRequestwrapperTests(l, v)\n}",
"func (v *ItemCheckResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6a975c40DecodeJsonBenchmark2(l, v)\n}",
"func (v *Native) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson10(l, v)\n}",
"func (v *FormDataMQ) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson(l, v)\n}",
"func (v *DocumentResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6a975c40DecodeJsonBenchmark3(l, v)\n}",
"func (v *Node) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6601e8cdDecodeGithubComSkydiveProjectSkydiveGraffitiApiTypes1(l, v)\n}",
"func (v *flattenedField) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson390b7126DecodeGithubComChancedPicker35(l, v)\n}",
"func (v *ExtFilter) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson795c59c6DecodeGrapeGuardRules11(l, v)\n}",
"func (v *Deal) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson8a221a72DecodeGithubComVungleVungoOpenrtb(l, v)\n}",
"func (v *OrderCheckResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6a975c40DecodeJsonBenchmark1(l, v)\n}",
"func (v *Visit) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonE564fc13DecodeGithubComLa0rgHighloadcupModel1(l, v)\n}",
"func (v *Error) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer26(l, v)\n}",
"func (v *GetUserResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson84c0690eDecodeMainHandlers1(l, v)\n}",
"func (v *User) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson9e1087fdDecodeHw3Bench(l, v)\n}",
"func (v *IngredientArr) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels7(l, v)\n}",
"func (v *VisitArray) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonE564fc13DecodeGithubComLa0rgHighloadcupModel(l, v)\n}",
"func (v *Foo) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonAbe23ddeDecodeGithubComUberZanzibarExamplesExampleGatewayBuildGenCodeClientsCorgeCorge1(l, v)\n}",
"func (v *Ingredient) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels8(l, v)\n}",
"func (v *Musician) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson62dc445bDecode20211NoskoolTeamInternalAppMusiciansModels2(l, v)\n}",
"func (v *ThirdParty) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson2(l, v)\n}",
"func (v *Data) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson17(l, v)\n}",
"func (v *Deal) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson16(l, v)\n}",
"func (v *Raw) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer10(l, v)\n}",
"func (v *EasyResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6ff3ac1dDecodeGithubComWenweihBitcoinRpcGolangProto1(l, v)\n}",
"func (v *AdvFormData) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson3(l, v)\n}",
"func (v *Message) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer18(l, v)\n}",
"func (v *Teacher) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonF1627ba7DecodeGithubComDuchiporexiaGoutilsXmsgTests(l, v)\n}",
"func (v *Invite) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer22(l, v)\n}",
"func (v *CBPerson) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonE242b40eDecodeGithubComExampleSample2(l, v)\n}",
"func (v *BidResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson326edDecodeGithubComMxmCherryOpenrtb(l, v)\n}",
"func (v *BidRequest) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson21(l, v)\n}",
"func (v *Impression) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson12(l, v)\n}",
"func (v *Msg) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels6(l, v)\n}",
"func (v *BidResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson326edDecodeGithubComApplifierGoOpenrtbOpenrtb2(l, v)\n}",
"func (v *Info) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonC80ae7adDecodeGithubComDeiklovTechDbRomanovAndrGolangModels13(l, v)\n}",
"func (v *MediumPayload) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonE242b40eDecodeGithubComExampleSample1(l, v)\n}",
"func (v *Part) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer12(l, v)\n}",
"func (v *ProductExtendedArr) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels4(l, v)\n}",
"func (v *Whois) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer1(l, v)\n}",
"func (v *App) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson25(l, v)\n}",
"func (v *Content) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson18(l, v)\n}",
"func (v *Responce) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6a975c40DecodeGithubComSerhio83DruidPkgStructs(l, v)\n}",
"func (v *TransactionResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonE82c8e88DecodeGithubComKamaiuOandaGoModel5(l, v)\n}",
"func (v *ProductExtended) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels5(l, v)\n}",
"func (v *Error) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonC5a4559bDecodeGithubComChromedpCdproto2(l, v)\n}",
"func (v *BidResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson20(l, v)\n}",
"func (v *HireManager) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonAf94a8adDecodeGithubComGoParkMailRu20192ComandusInternalModel(l, v)\n}",
"func (v *PlantainerShadowMetadataSt) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson5bd79fa1DecodeMevericcoreMcplantainer9(l, v)\n}",
"func (v *User) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels(l, v)\n}",
"func (v *RespStruct) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeDrhyuComIndexerModels1(l, v)\n}",
"func (v *Item) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeDrhyuComIndexerModels2(l, v)\n}",
"func (v *Annotation) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6a975c40DecodeGithubComSerhio83DruidPkgStructs3(l, v)\n}",
"func (v *Fundamental) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson3e8ab7adDecodeGithubComAlpacahqAlpacaTradeApiGoV3Alpaca14(l, v)\n}",
"func (v *BasicUser) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6601e8cdDecodeJsongen4(l, v)\n}",
"func (v *Features) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer25(l, v)\n}",
"func (v *Edge) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6601e8cdDecodeGithubComSkydiveProjectSkydiveGraffitiApiTypes2(l, v)\n}",
"func (v *ShadowModelSt) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB7ed31d3DecodeMevericcoreMccommon5(l, v)\n}",
"func (v *AdvForm) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson4(l, v)\n}",
"func (v *binaryField) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson390b7126DecodeGithubComChancedPicker46(l, v)\n}",
"func (v *ShadowUpdateMsgSt) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB7ed31d3DecodeMevericcoreMccommon1(l, v)\n}",
"func (v *User) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson393a2a40DecodeCodegen(l, v)\n}",
"func (v *InfoUser) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6601e8cdDecodeJsongen3(l, v)\n}",
"func (v *Message) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonC5a4559bDecodeGithubComChromedpCdproto1(l, v)\n}",
"func (v *Pmp) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson9(l, v)\n}",
"func (v *MOTD) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer19(l, v)\n}",
"func (v *Attack) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6601e8cdDecodeGithubComGoParkMailRu2018242GameServerTypes4(l, v)\n}",
"func (v *moreLikeThisQuery) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson390b7126DecodeGithubComChancedPicker18(l, v)\n}",
"func (v *ExportItem) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson1(l, v)\n}",
"func (v *EventLoadEventFired) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonC5a4559bDecodeGithubComChromedpCdprotoPage70(l, v)\n}",
"func (v *managerListener) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonEd74d837DecodeGithubComKihamoBoggartComponentsBoggartInternalHandlers(l, v)\n}",
"func (v *WSResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer2(l, v)\n}",
"func (v *PbTestObject) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson5fcf962eDecodeGithubComJsonIteratorGoBenchmarkWith10IntFields(l, v)\n}",
"func (v *Student) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonF1627ba7DecodeGithubComDuchiporexiaGoutilsXmsgTests1(l, v)\n}",
"func (v *Device) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson15(l, v)\n}",
"func (v *Messages) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer17(l, v)\n}",
"func (v *User) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson2bc03518DecodeLangTaskOnBench(l, v)\n}",
"func (v *BaseTickerInfo) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson25363b2dDecodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi117(l, v)\n}",
"func (v *Topic) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer5(l, v)\n}",
"func (v *BaseLedgerInfo) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson25363b2dDecodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi127(l, v)\n}",
"func (v *Banner) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson23(l, v)\n}",
"func (v *APIError) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson3e8ab7adDecodeGithubComAlpacahqAlpacaTradeApiGoV3Alpaca24(l, v)\n}",
"func (v *Bid) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson22(l, v)\n}",
"func (v *Post) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson783c1624DecodeGithubComGobwasVk7(l, v)\n}",
"func (v *BaseTradeInfo) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson25363b2dDecodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi116(l, v)\n}",
"func (v *MusicianFullInformation) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson62dc445bDecode20211NoskoolTeamInternalAppMusiciansModels1(l, v)\n}",
"func (v *matchRule) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson390b7126DecodeGithubComChancedPicker19(l, v)\n}",
"func (v *managerHandlerDevice) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonEd74d837DecodeGithubComKihamoBoggartComponentsBoggartInternalHandlers1(l, v)\n}",
"func (v *ResultReq) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson25363b2dDecodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi54(l, v)\n}",
"func (v *invocationMessage) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson2802b09fDecodeGithubComPhilippseithSignalr1(l, v)\n}",
"func (v *fuzzyRule) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson390b7126DecodeGithubComChancedPicker34(l, v)\n}",
"func (v *PlantainerShadowSt) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson5bd79fa1DecodeMevericcoreMcplantainer8(l, v)\n}",
"func (v *completionMessage) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson2802b09fDecodeGithubComPhilippseithSignalr5(l, v)\n}",
"func (v *Source) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson3(l, v)\n}"
] | [
"0.73436594",
"0.73405373",
"0.72584677",
"0.72040373",
"0.71776104",
"0.71510446",
"0.7143438",
"0.71413064",
"0.71286225",
"0.7112999",
"0.7103849",
"0.71005577",
"0.7097653",
"0.7085183",
"0.70850646",
"0.7081146",
"0.7077145",
"0.70403785",
"0.70357895",
"0.7030433",
"0.7028725",
"0.7021155",
"0.70114094",
"0.70109946",
"0.70103574",
"0.7002987",
"0.69937176",
"0.6981908",
"0.6981736",
"0.69811034",
"0.6980795",
"0.69711286",
"0.6965327",
"0.695678",
"0.69543517",
"0.6948873",
"0.69404715",
"0.69387776",
"0.6935085",
"0.6930436",
"0.6922759",
"0.6904652",
"0.6894174",
"0.68897486",
"0.6889671",
"0.6888647",
"0.6887437",
"0.6887124",
"0.68862444",
"0.68853265",
"0.68804044",
"0.6874087",
"0.6870016",
"0.6869092",
"0.6868185",
"0.6858964",
"0.6846011",
"0.68405616",
"0.6836571",
"0.6835831",
"0.68291616",
"0.6823791",
"0.6822216",
"0.6817067",
"0.6815519",
"0.68133044",
"0.6812743",
"0.6811037",
"0.68107563",
"0.6809271",
"0.680744",
"0.68065774",
"0.68030846",
"0.68029016",
"0.67965585",
"0.6794714",
"0.678028",
"0.67772484",
"0.67714006",
"0.6769638",
"0.67685604",
"0.67657346",
"0.6763771",
"0.67634416",
"0.6762939",
"0.67570746",
"0.6756749",
"0.6754731",
"0.6750861",
"0.6749626",
"0.6745531",
"0.6744763",
"0.6743289",
"0.67418313",
"0.6734197",
"0.6732776",
"0.67303044",
"0.67287326",
"0.67265445",
"0.67261595"
] | 0.6772522 | 78 |
MarshalJSON supports json.Marshaler interface | func (v OrderCheckResponse) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjson6a975c40EncodeJsonBenchmark1(&w, v)
return w.Buffer.BuildBytes(), w.Error
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func Marshal(v Marshaler) ([]byte, error) {\n\tw := jwriter.Writer{}\n\tv.MarshalEasyJSON(&w)\n\treturn w.BuildBytes()\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\tif ImplementsPreJSONMarshaler(v) {\n\t\terr := v.(PreJSONMarshaler).PreMarshalJSON()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn json.Marshal(v)\n}",
"func (j *JSON) Marshal(target interface{}) (output interface{}, err error) {\n\treturn jsonEncoding.Marshal(target)\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\treturn json.Marshal(v)\n}",
"func Marshal(v Marshaler) ([]byte, error) {\n\tif isNilInterface(v) {\n\t\treturn nullBytes, nil\n\t}\n\n\tw := jwriter.Writer{}\n\tv.MarshalTinyJSON(&w)\n\treturn w.BuildBytes()\n}",
"func JsonMarshal(t interface{}) ([]byte, error) {\n\tbuffer := &bytes.Buffer{}\n\tenc := json.NewEncoder(buffer)\n\tenc.SetEscapeHTML(false)\n\terr := enc.Encode(t)\n\treturn buffer.Bytes(), err\n}",
"func jsonMarshal(t interface{}) ([]byte, error) {\n\tbuffer := &bytes.Buffer{}\n\tencoder := json.NewEncoder(buffer)\n\tencoder.SetEscapeHTML(false)\n\tencoder.SetIndent(\"\", \" \")\n\terr := encoder.Encode(t)\n\treturn buffer.Bytes(), err\n}",
"func marshal() {\n\tfmt.Println(\"=== json.marshal ===\")\n\tryan := &Person{\"Ryan\", 25}\n\twire, err := json.Marshal(ryan)\n\tcheck(err)\n\tfmt.Println(string(wire))\n}",
"func jsonMarshal(t interface{}) ([]byte, error) {\n\tvar buffer bytes.Buffer\n\tencoder := json.NewEncoder(&buffer)\n\tencoder.SetEscapeHTML(false)\n\tif err := encoder.Encode(t); err != nil {\n\t\treturn nil, err\n\t}\n\t// Prettify\n\tvar out bytes.Buffer\n\tif err := json.Indent(&out, buffer.Bytes(), \"\", \"\\t\"); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn out.Bytes(), nil\n}",
"func JSONEncoder() Encoder { return jsonEncoder }",
"func JSONMarshal(data interface{}) ([]byte, error) {\n\tvar b []byte\n\tvar err error\n\n\tb, err = json.MarshalIndent(data, \"\", \" \")\n\n\treturn b, err\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn canonicaljson.Transform(b)\n}",
"func (c *JsonCodec) Marshal(object interface{}, options map[string]interface{}) ([]byte, error) {\n\treturn jsonEncoding.Marshal(object)\n}",
"func (c *JSONCodec) Marshal(v interface{}) ([]byte, error) {\n\treturn json.Marshal(v)\n}",
"func Marshal(in interface{}) ([]byte, error) {\n\tres, err := jsoniter.ConfigCompatibleWithStandardLibrary.Marshal(in)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"marshaling error: %w\", err)\n\t}\n\treturn res, nil\n}",
"func Marshal(p Payload) ([]byte, error) {\n\treturn json.Marshal(p)\n}",
"func (j *JsonlMarshaler) Marshal(v interface{}) ([]byte, error) {\n\treturn json.Marshal(v)\n}",
"func (s *Serializer) Marshal(v interface{}) ([]byte, error) {\n\treturn jsoniter.Marshal(v)\n}",
"func (j *TextMarshaler) Marshal(v interface{}) ([]byte, error) {\n\treturn json.Marshal(v)\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\n\treturn replaceUnicodeConversion(b), err\n}",
"func Marshal(object interface{}) (data string, err error) {\n\tif t, err := json.Marshal(object); err != nil {\n\t\tdata = \"\"\n\t} else {\n\t\tdata = string(t)\n\t}\n\treturn\n}",
"func (j *JsonMarshaler) Marshal(v interface{}) ([]byte, error) {\n\tswitch v.(type) {\n\tcase *distribute.GetResponse:\n\t\tvalue, err := protobuf.MarshalAny(v.(*distribute.GetResponse).Fields)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn json.Marshal(\n\t\t\tmap[string]interface{}{\n\t\t\t\t\"fields\": value,\n\t\t\t},\n\t\t)\n\tcase *distribute.SearchResponse:\n\t\tvalue, err := protobuf.MarshalAny(v.(*distribute.SearchResponse).SearchResult)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn json.Marshal(\n\t\t\tmap[string]interface{}{\n\t\t\t\t\"search_result\": value,\n\t\t\t},\n\t\t)\n\tdefault:\n\t\treturn json.Marshal(v)\n\t}\n}",
"func (j *JSON) Marshal(obj interface{}) error {\n\tres, err := json.Marshal(obj)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Call our implementation of\n\t// JSON UnmarshalJSON through json.Unmarshal\n\t// to set the result to the JSON object\n\treturn json.Unmarshal(res, j)\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\trv := reflect.ValueOf(v)\n\tif rv.Kind() != reflect.Slice {\n\t\treturn nil, &InvalidMarshalError{rv.Kind()}\n\t}\n\n\tvar buf bytes.Buffer\n\tencoder := json.NewEncoder(&buf)\n\tfor i := 0; i < rv.Len(); i++ {\n\t\tif err := encoder.Encode(rv.Index(i).Interface()); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn buf.Bytes(), nil\n}",
"func marshalJSON(i *big.Int) ([]byte, error) {\n\ttext, err := i.MarshalText()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn json.Marshal(string(text))\n}",
"func (p *HJSON) Marshal(o map[string]interface{}) ([]byte, error) {\n\treturn hjson.Marshal(o)\n}",
"func JSONMarshal(v interface{}) ([]byte, error) {\n\tb, err := json.MarshalIndent(v, \"\", \" \")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn b, err\n\t}\n\tb = bytes.Replace(b, []byte(\"\\\\u003c\"), []byte(\"<\"), -1)\n\tb = bytes.Replace(b, []byte(\"\\\\u003e\"), []byte(\">\"), -1)\n\tb = bytes.Replace(b, []byte(\"\\\\u0026\"), []byte(\"&\"), -1)\n\treturn b, err\n}",
"func JSONMarshal(content interface{}, escape bool) ([]byte, error) {\n\tvar buf bytes.Buffer\n\tenc := json.NewEncoder(&buf)\n\tenc.SetEscapeHTML(escape)\n\tif err := enc.Encode(content); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}",
"func MarshalJSON(v interface{}, config MarshalConfig) ([]byte, error) {\n\tres, err := Marshal(v, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn json.Marshal(res)\n}",
"func (jz JSONGzipEncoding) Marshal(v interface{}) ([]byte, error) {\n\tbuf, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// var bufSizeBefore = len(buf)\n\n\tbuf, err = GzipEncode(buf)\n\t// coloredoutput.Infof(\"gzip_json_compress_ratio=%d/%d=%.2f\",\n\t// bufSizeBefore, len(buf), float64(bufSizeBefore)/float64(len(buf)))\n\treturn buf, err\n}",
"func (c *Codec) Marshal(v interface{}) ([]byte, error) {\n\tresult, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn nil, xerrors.New(err.Error())\n\t}\n\n\treturn result, nil\n}",
"func Marshal(obj interface{}) ([]byte, error) {\n\treturn mgobson.Marshal(obj)\n}",
"func jsonify(v interface{}) string { return string(mustMarshalJSON(v)) }",
"func MarshalToWriter(v Marshaler, w io.Writer) (written int, err error) {\n\tif isNilInterface(v) {\n\t\treturn w.Write(nullBytes)\n\t}\n\n\tjw := jwriter.Writer{}\n\tv.MarshalTinyJSON(&jw)\n\treturn jw.DumpTo(w)\n}",
"func (sc *Contract) Marshal() ([]byte, error) {\n\treturn json.Marshal(sc)\n}",
"func (f *Formatter) Marshal(v interface{}) ([]byte, error) {\n\tdata, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn f.Format(data)\n}",
"func marshal(v interface{}) []byte {\n\tb, err := json.Marshal(v)\n\tassert(err == nil, \"marshal error: %s\", err)\n\treturn b\n}",
"func JSONMarshal(obj interface{}) ([]byte, error) {\n\tb := new(bytes.Buffer)\n\tenc := json.NewEncoder(b)\n\tenc.SetEscapeHTML(false)\n\terr := enc.Encode(obj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// json.NewEncoder.Encode adds a final '\\n', json.Marshal does not.\n\t// Let's keep the default json.Marshal behaviour.\n\tres := b.Bytes()\n\tif len(res) >= 1 && res[len(res)-1] == '\\n' {\n\t\tres = res[:len(res)-1]\n\t}\n\treturn res, nil\n}",
"func DefaultMarshalJSON(obj interface{}) ([]byte, error) {\n\treturn json.MarshalIndent(obj, \"\", \" \")\n}",
"func marshal(v interface{}) string {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn string(b)\n}",
"func MarshalJSON(v interface{}) []byte {\n\tdata, err := json.Marshal(v)\n\tAbortIf(err)\n\treturn data\n}",
"func encode(ins interface{}) ([]byte, error) {\n\treturn json.Marshal(ins)\n}",
"func Marshal(data interface{}) ([]byte, error) {\n\tdocument, err := MarshalToStruct(data, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn json.Marshal(document)\n}",
"func NewJSONMarshaler() Marshaler {\n\treturn newJSONMarshaler()\n}",
"func JsonMarshal(val any) ([]byte, error) {\n\tbuf := &bytes.Buffer{}\n\tencoder := json.NewEncoder(buf)\n\tencoder.SetEscapeHTML(false)\n\tif err := encoder.Encode(val); err != nil {\n\t\treturn nil, err\n\t}\n\t// Return without a trailing line feed.\n\tlineTerminatedJson := buf.Bytes()\n\treturn bytes.TrimSuffix(lineTerminatedJson, []byte(\"\\n\")), nil\n}",
"func (j *jsonNative) MarshalJSON() ([]byte, error) {\n\tvar buf fflib.Buffer\n\tif j == nil {\n\t\tbuf.WriteString(\"null\")\n\t\treturn buf.Bytes(), nil\n\t}\n\terr := j.MarshalJSONBuf(&buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}",
"func (j *Publisher) MarshalJSON() ([]byte, error) {\n\tvar buf fflib.Buffer\n\tif j == nil {\n\t\tbuf.WriteString(\"null\")\n\t\treturn buf.Bytes(), nil\n\t}\n\terr := j.MarshalJSONBuf(&buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}",
"func serialize(toMarshal interface{}) *bytes.Buffer {\n\tjsonStr, _ := json.Marshal(toMarshal)\n\treturn bytes.NewBuffer(jsonStr)\n}",
"func toJSON(a interface{}) ([]byte, error) {\n\tbs, err := json.Marshal(a)\n\tif err != nil {\n\t\t//return []byte{}, fmt.Errorf(\"Erro no json %v\", err)\n\t\treturn []byte{}, errors.New(fmt.Sprintf(\"Erro no json %v\", err))\n\t}\n\treturn bs, nil\n}",
"func (r *Anilist) Marshal() ([]byte, error) {\n\treturn json.Marshal(r)\n}",
"func JSONEncode(data interface{}) string {\n\tbt, _ := json.Marshal(data)\n\treturn string(bt)\n}",
"func Marshal(o interface{}) ([]byte, error) {\n\tj, err := json.Marshal(o)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error marshaling into JSON: %v\", err)\n\t}\n\n\ty, err := JSONToYAML(j)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error converting JSON to YAML: %v\", err)\n\t}\n\n\treturn y, nil\n}",
"func (v Post) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson783c1624EncodeGithubComGobwasVk7(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) {\n\tbs, fnerr := iv.MarshalJSON()\n\tf.e.marshalAsis(bs, fnerr)\n}",
"func Marshal(obj interface{}) ([]byte, error) {\n\treturn MarshalValue(reflect.ValueOf(obj))\n}",
"func toJSON(a interface{}) ([]byte, error) {\n\tbs, err := json.Marshal(a)\n\tif err != nil {\n\t\t// return []byte{}, errors.New(fmt.Sprintf(\"Can't create json with error: %v\", err))\n\t\treturn []byte{}, fmt.Errorf(\"Can't create json for person: %v error: %v\", a, err)\n\t}\n\treturn bs, nil\n}",
"func (v Join) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer21(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func JsonEncode(i interface{}) string {\n\tb, err := json.Marshal(i)\n\n\tif err != nil {\n\t\tfmt.Println(\"util.getJsonStr.error\", err)\n\t\treturn \"\"\n\t}\n\n\treturn string(b)\n}",
"func (b *SampleFJSONBuilder) Marshal(orig *SampleF) ([]byte, error) {\n\tret, err := b.Convert(orig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn json.Marshal(ret)\n}",
"func (m *Marshaler) JSON(v interface{}) ([]byte, error) {\n\tif _, ok := v.(proto.Message); ok {\n\t\tvar buf bytes.Buffer\n\t\tjm := &jsonpb.Marshaler{}\n\t\tjm.OrigName = true\n\t\tif err := jm.Marshal(&buf, v.(proto.Message)); err != nil {\n\t\t\treturn []byte{}, err\n\t\t}\n\n\t\tif m.FilterProtoJson {\n\t\t\treturn m.FilterJsonWithStruct(buf.Bytes(), v)\n\t\t}\n\t\treturn buf.Bytes(), nil\n\t}\n\treturn json.Marshal(v)\n}",
"func (v ExportItem) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjsonB83d7b77EncodeGoplaygroundMyjson1(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func mustMarshalJSON(v interface{}) []byte {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\tpanic(\"marshal json: \" + err.Error())\n\t}\n\treturn b\n}",
"func (v publicKey) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson94b2531bEncodeGitRonaksoftComRiverWebWasmConnection(w, v)\n}",
"func EncodedJson(v interface{}) []byte {\n\tif p, err := json.Marshal(v); err != nil {\n\t\treturn []byte{}\n\t} else {\n\t\treturn p\n\t}\n}",
"func (js JSONSerializable) MarshalJSON() ([]byte, error) {\n\tif !js.Valid {\n\t\treturn json.Marshal(nil)\n\t}\n\tjsWithHex := replaceBytesWithHex(js.Val)\n\treturn json.Marshal(jsWithHex)\n}",
"func MarshalJSON(v interface{}) string {\n\tcontents, _ := json.MarshalIndent(v, \"\", \" \")\n\treturn string(contents)\n}",
"func (j Json) MarshalJSON() ([]byte, error) {\n\treturn j.ToJson()\n}",
"func (j JSON) MarshalJSON() ([]byte, error) {\n\tif j.Valid {\n\t\treturn json.Marshal(j.Map)\n\t}\n\n\treturn json.Marshal(nil)\n}",
"func (p PatchObject) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]any)\n\tpopulate(objectMap, \"tags\", p.Tags)\n\treturn json.Marshal(objectMap)\n}",
"func (v Posts) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson783c1624EncodeGithubComGobwasVk(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (spbi SuccessfulPropertyBatchInfo) MarshalJSON() ([]byte, error) {\n\tspbi.Kind = KindSuccessful\n\tobjectMap := make(map[string]interface{})\n\tif spbi.Properties != nil {\n\t\tobjectMap[\"Properties\"] = spbi.Properties\n\t}\n\tif spbi.Kind != \"\" {\n\t\tobjectMap[\"Kind\"] = spbi.Kind\n\t}\n\treturn json.Marshal(objectMap)\n}",
"func toJSON(a interface{}) ([]byte, error) {\n\tbs, err := json.Marshal(a)\n\n\tif err != nil {\n\t\treturn []byte{}, fmt.Errorf(\"Error caught by Onur Gurel\")\n\t}\n\n\treturn bs, nil\n}",
"func (v BindParams) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjsonC5a4559bEncodeGithubComChromedpCdprotoTethering2(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (handler Handler) EncodeJSON(v interface{}) (b []byte, err error) {\n\n\t//if(w.Get(\"pretty\",\"false\")==\"true\"){\n\tb, err = json.MarshalIndent(v, \"\", \" \")\n\t//}else{\n\t//\tb, err = json.Marshal(v)\n\t//}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn b, nil\n}",
"func Marshal(val interface{}) ([]byte, error) {}",
"func testJSONMarshal(t *testing.T, v interface{}, want string) {\n\tj, err := json.Marshal(v)\n\tif err != nil {\n\t\tt.Errorf(\"Unable to marshal JSON for %v\", v)\n\t}\n\n\tw := new(bytes.Buffer)\n\terr = json.Compact(w, []byte(want))\n\tif err != nil {\n\t\tt.Errorf(\"String is not valid json: %s\", want)\n\t}\n\n\tif w.String() != string(j) {\n\t\tt.Errorf(\"json.Marshal(%q) returned %s, want %s\", v, j, w)\n\t}\n\n\t// now go the other direction and make sure things unmarshal as expected\n\tu := reflect.ValueOf(v).Interface()\n\tif err := json.Unmarshal([]byte(want), u); err != nil {\n\t\tt.Errorf(\"Unable to unmarshal JSON for %v: %v\", want, err)\n\t}\n\n\tif !reflect.DeepEqual(v, u) {\n\t\tt.Errorf(\"json.Unmarshal(%q) returned %s, want %s\", want, u, v)\n\t}\n}",
"func (o *Object) MarshalJSON() ([]byte, error) {\n\tctx := _builtinJSON_stringifyContext{\n\t\tr: o.runtime,\n\t}\n\tex := o.runtime.vm.try(func() {\n\t\tif !ctx.do(o) {\n\t\t\tctx.buf.WriteString(\"null\")\n\t\t}\n\t})\n\tif ex != nil {\n\t\treturn nil, ex\n\t}\n\treturn ctx.buf.Bytes(), nil\n}",
"func testJSONMarshal(t *testing.T, v interface{}, want string) {\n\tj, err := json.Marshal(v)\n\tif err != nil {\n\t\tt.Errorf(\"Unable to marshal JSON for %v\", v)\n\t}\n\n\tw := new(bytes.Buffer)\n\terr = json.Compact(w, []byte(want))\n\tif err != nil {\n\t\tt.Errorf(\"String is not valid json: %s\", want)\n\t}\n\n\tif w.String() != string(j) {\n\t\tt.Errorf(\"json.Marshal(%q) returned %s, want %s\", v, j, w)\n\t}\n\n\t// now go the other direction and make sure things unmarshal as expected\n\tu := reflect.ValueOf(v).Interface()\n\tif err := json.Unmarshal([]byte(want), u); err != nil {\n\t\tt.Errorf(\"Unable to unmarshal JSON for %v\", want)\n\t}\n\n\tif !reflect.DeepEqual(v, u) {\n\t\tt.Errorf(\"json.Unmarshal(%q) returned %s, want %s\", want, u, v)\n\t}\n}",
"func (v BlitzedItemResponse) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson6a975c40EncodeJsonBenchmark4(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (v item) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjsonD2b7633eEncodeGithubComZhekabyGoGeneratorMongoRequestwrapperTests(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (i Interface) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]any)\n\tpopulate(objectMap, \"etag\", i.Etag)\n\tpopulate(objectMap, \"extendedLocation\", i.ExtendedLocation)\n\tpopulate(objectMap, \"id\", i.ID)\n\tpopulate(objectMap, \"location\", i.Location)\n\tpopulate(objectMap, \"name\", i.Name)\n\tpopulate(objectMap, \"properties\", i.Properties)\n\tpopulate(objectMap, \"tags\", i.Tags)\n\tpopulate(objectMap, \"type\", i.Type)\n\treturn json.Marshal(objectMap)\n}",
"func marshalJSON(namingStrategy string, that interface{}) ([]byte, error) {\n\tout := map[string]interface{}{}\n\tt := reflect.TypeOf(that)\n\tv := reflect.ValueOf(that)\n\n\tfnctn := v.MethodByName(namingStrategy)\n\tfname := func(params ...interface{}) string {\n\t\tin := make([]reflect.Value, len(params))\n\t\tfor k, param := range params {\n\t\t\tin[k] = reflect.ValueOf(param)\n\t\t}\n\t\treturn fnctn.Call(in)[0].String()\n\t}\n\toutName := \"\"\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tf := t.Field(i)\n\t\tswitch n := f.Tag.Get(\"json\"); n {\n\t\tcase \"\":\n\t\t\toutName = f.Name\n\t\tcase \"-\":\n\t\t\toutName = \"\"\n\t\tdefault:\n\t\t\toutName = fname(n)\n\t\t}\n\t\tif outName != \"\" {\n\t\t\tout[outName] = v.Field(i).Interface()\n\t\t}\n\t}\n\treturn json.Marshal(out)\n}",
"func testMarshalJSON(t *testing.T, cmd interface{}) {\n\tjsonCmd, err := json.Marshal(cmd)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tfmt.Println(string(jsonCmd))\n}",
"func (v PbTestObject) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson5fcf962eEncodeGithubComJsonIteratorGoBenchmarkWith10IntFields(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (pbi PropertyBatchInfo) MarshalJSON() ([]byte, error) {\n\tpbi.Kind = KindPropertyBatchInfo\n\tobjectMap := make(map[string]interface{})\n\tif pbi.Kind != \"\" {\n\t\tobjectMap[\"Kind\"] = pbi.Kind\n\t}\n\treturn json.Marshal(objectMap)\n}",
"func My_json(demo interface{}) *bytes.Buffer {\r\n\tif bs, err := json.Marshal(demo); err == nil {\r\n\t\treq := bytes.NewBuffer([]byte(bs))\r\n\t\treturn req\r\n\t} else {\r\n\t\tpanic(err)\r\n\t}\r\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\treturn MarshalEx(v, false)\n}",
"func MarshalJSON(a interface{}) (b []byte, err error) {\n\tif m, ok := a.(proto.Message); ok {\n\t\tmarshaller := &jsonpb.Marshaler{}\n\t\tvar buf bytes.Buffer\n\t\terr = marshaller.Marshal(&buf, m)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tb = buf.Bytes()\n\t} else {\n\t\tb, err = json.Marshal(a)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}",
"func (v PostParams) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjsonC80ae7adEncodeGithubComDeiklovTechDbRomanovAndrGolangModels10(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (v Post) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson5a72dc82EncodeGithubComTimRazumovTechnoparkDBAppModels6(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (o *ExportDataPartial) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(o.ToMap())\n}",
"func (n *Node) Marshal() ([]byte, error) {\n\treturn json.Marshal(n)\n}",
"func (n *Node) Marshal() ([]byte, error) {\n\treturn json.Marshal(n)\n}",
"func jsonEnc(in T) ([]byte, error) {\n\treturn jsonx.Marshal(in)\n}",
"func (s *HTTPServer) marshalJSON(req *http.Request, obj interface{}) ([]byte, error) {\n\tif _, ok := req.URL.Query()[\"pretty\"]; ok || s.agent.config.DevMode {\n\t\tbuf, err := json.MarshalIndent(obj, \"\", \" \")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tbuf = append(buf, \"\\n\"...)\n\t\treturn buf, nil\n\t}\n\n\tbuf, err := json.Marshal(obj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf, err\n}",
"func (m Message) Marshal() ([]byte, error) {\n\treturn jsoniter.Marshal(m)\n}",
"func (m Json) MarshalJSON() ([]byte, error) {\n\tif m == nil {\n\t\treturn []byte(\"null\"), nil\n\t}\n\treturn m, nil\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\treturn NewFormatter().Marshal(v)\n}",
"func (s *ServiceSecrets) MarshalJson() ([]byte, error) {\n\treturn json.Marshal(s)\n}",
"func JsonEncode(data []byte, v interface{}) error {\n\n\treturn json.Unmarshal(data, v)\n}",
"func (out *GetOutput) Marshal() ([]byte, error) {\n\treturn json.Marshal(out)\n}"
] | [
"0.75134",
"0.7502133",
"0.7500753",
"0.74823195",
"0.7446766",
"0.7371689",
"0.73370403",
"0.7304601",
"0.72591853",
"0.72539127",
"0.72181046",
"0.717537",
"0.7162588",
"0.7161582",
"0.71608186",
"0.7072197",
"0.70587647",
"0.7044735",
"0.7022404",
"0.6973228",
"0.6963657",
"0.69578344",
"0.69243026",
"0.6924262",
"0.68824863",
"0.68681127",
"0.68572986",
"0.6818534",
"0.68102",
"0.67969906",
"0.67913324",
"0.67774016",
"0.67717487",
"0.67700523",
"0.6754375",
"0.67300195",
"0.67154574",
"0.6711641",
"0.6708163",
"0.6686554",
"0.6676971",
"0.6670713",
"0.6667217",
"0.6665734",
"0.6651805",
"0.664897",
"0.6601639",
"0.65936595",
"0.6570477",
"0.65671986",
"0.65637034",
"0.6562716",
"0.65555567",
"0.6544248",
"0.65373516",
"0.6532906",
"0.65273225",
"0.65230805",
"0.6517934",
"0.65155387",
"0.6507946",
"0.65065837",
"0.65061134",
"0.65058106",
"0.6502681",
"0.6501059",
"0.6492431",
"0.64840174",
"0.6483743",
"0.64832276",
"0.64799786",
"0.6479202",
"0.6476907",
"0.64739543",
"0.6469417",
"0.6467924",
"0.6463269",
"0.64624554",
"0.6460338",
"0.6457592",
"0.6454149",
"0.6448956",
"0.6447831",
"0.64472353",
"0.6436354",
"0.64353037",
"0.64196956",
"0.64188385",
"0.64096874",
"0.64083934",
"0.6407883",
"0.6405311",
"0.6405311",
"0.640528",
"0.6403627",
"0.6403197",
"0.6402974",
"0.64011514",
"0.6401083",
"0.6395421",
"0.6394115"
] | 0.0 | -1 |
MarshalEasyJSON supports easyjson.Marshaler interface | func (v OrderCheckResponse) MarshalEasyJSON(w *jwriter.Writer) {
easyjson6a975c40EncodeJsonBenchmark1(w, v)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (v item) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeGithubComZhekabyGoGeneratorMongoRequestwrapperTests(w, v)\n}",
"func (v Fruit) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels11(w, v)\n}",
"func (v BlitzedItemResponse) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6a975c40EncodeJsonBenchmark4(w, v)\n}",
"func (ce *CustomEvent) MarshalEasyJSON(w *jwriter.Writer) {\n\tce.marshalerCtor().MarshalEasyJSON(w)\n}",
"func (v Boo) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeMsgpJson(w, v)\n}",
"func (v DocumentResponse) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6a975c40EncodeJsonBenchmark3(w, v)\n}",
"func (v Post) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson783c1624EncodeGithubComGobwasVk7(w, v)\n}",
"func (v invocationMessage) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson2802b09fEncodeGithubComPhilippseithSignalr1(w, v)\n}",
"func (v Native) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson10(w, v)\n}",
"func (v ItemCheckResponse) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6a975c40EncodeJsonBenchmark2(w, v)\n}",
"func (v User) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson393a2a40EncodeCodegen(w, v)\n}",
"func (v User) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels(w, v)\n}",
"func (v ExportItem) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB83d7b77EncodeGoplaygroundMyjson1(w, v)\n}",
"func (v Format) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson14(w, v)\n}",
"func (v managerListener) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonEd74d837EncodeGithubComKihamoBoggartComponentsBoggartInternalHandlers(w, v)\n}",
"func (v Part) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer12(w, v)\n}",
"func (v publicKey) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson94b2531bEncodeGitRonaksoftComRiverWebWasmConnection(w, v)\n}",
"func (v VisitArray) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonE564fc13EncodeGithubComLa0rgHighloadcupModel(w, v)\n}",
"func (v Banner) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson23(w, v)\n}",
"func (v MOTD) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer19(w, v)\n}",
"func (v ProductToAdd) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels1(w, v)\n}",
"func (v App) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson25(w, v)\n}",
"func (v Ingredient) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels8(w, v)\n}",
"func (v Visit) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonE564fc13EncodeGithubComLa0rgHighloadcupModel1(w, v)\n}",
"func (v InfoUser) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6601e8cdEncodeJsongen3(w, v)\n}",
"func (v Deal) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson8a221a72EncodeGithubComVungleVungoOpenrtb(w, v)\n}",
"func (v FormDataMQ) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB83d7b77EncodeGoplaygroundMyjson(w, v)\n}",
"func (v Msg) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels6(w, v)\n}",
"func (v Nick) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer14(w, v)\n}",
"func (v Program) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson1c6ddb42EncodeGithubComSturfeeincGlTF(w, v)\n}",
"func (v BasicUser) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6601e8cdEncodeJsongen4(w, v)\n}",
"func (v Pet) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson14a1085EncodeGithubComIamStubborNPetstoreDbModels1(w, v)\n}",
"func (v ChannelForward) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer32(w, v)\n}",
"func (v Element) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB83d7b77EncodeGoplaygroundMyjson2(w, v)\n}",
"func (v Pmp) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson9(w, v)\n}",
"func (v Responce) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6a975c40EncodeGithubComSerhio83DruidPkgStructs(w, v)\n}",
"func (v Post) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson5a72dc82EncodeGithubComTimRazumovTechnoparkDBAppModels6(w, v)\n}",
"func (c Context) MarshalEasyJSON(writer *ej_jwriter.Writer) {\n\tif err := c.Err(); err != nil {\n\t\twriter.Error = err\n\t\treturn\n\t}\n\twrappedWriter := jwriter.NewWriterFromEasyJSONWriter(writer)\n\tContextSerialization.MarshalToJSONWriter(&wrappedWriter, &c)\n}",
"func (v Features) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer25(w, v)\n}",
"func (v Node) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6601e8cdEncodeGithubComSkydiveProjectSkydiveGraffitiApiTypes1(w, v)\n}",
"func (v Posts) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson783c1624EncodeGithubComGobwasVk(w, v)\n}",
"func (v ThirdParty) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson2(w, v)\n}",
"func (v Student) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonF1627ba7EncodeGithubComDuchiporexiaGoutilsXmsgTests1(w, v)\n}",
"func (v UsersHandler) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson84c0690eEncodeMainHandlers(w, v)\n}",
"func (v Segment) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson6(w, v)\n}",
"func (v Info) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonC80ae7adEncodeGithubComDeiklovTechDbRomanovAndrGolangModels13(w, v)\n}",
"func (v CBPerson) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonE242b40eEncodeGithubComExampleSample2(w, v)\n}",
"func (v Invite) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer22(w, v)\n}",
"func (v GetUserResponse) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson84c0690eEncodeMainHandlers1(w, v)\n}",
"func (v User) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson7da3ae25EncodeCourseraGolangHomeworks(w, v)\n}",
"func (v PostSource) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson783c1624EncodeGithubComGobwasVk2(w, v)\n}",
"func (v ShadowModelSt) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB7ed31d3EncodeMevericcoreMccommon5(w, v)\n}",
"func (v BindParams) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonC5a4559bEncodeGithubComChromedpCdprotoTethering2(w, v)\n}",
"func (v Error) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer26(w, v)\n}",
"func (v Deal) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson16(w, v)\n}",
"func (v Message) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer18(w, v)\n}",
"func (v PostParams) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonC80ae7adEncodeGithubComDeiklovTechDbRomanovAndrGolangModels10(w, v)\n}",
"func (v Musician) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson62dc445bEncode20211NoskoolTeamInternalAppMusiciansModels2(w, v)\n}",
"func (v Impression) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson12(w, v)\n}",
"func (v Post) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonC80ae7adEncodeGithubComDeiklovTechDbRomanovAndrGolangModels11(w, v)\n}",
"func (v BaseInstrumentInfo) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson25363b2dEncodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi128(w, v)\n}",
"func (v Grade) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonF1627ba7EncodeGithubComDuchiporexiaGoutilsXmsgTests2(w, v)\n}",
"func (v Mode) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer16(w, v)\n}",
"func (v Whois) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer1(w, v)\n}",
"func (v Foo) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonAbe23ddeEncodeGithubComUberZanzibarExamplesExampleGatewayBuildGenCodeClientsCorgeCorge1(w, v)\n}",
"func (v Source) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson3(w, v)\n}",
"func (v ProductExtended) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels5(w, v)\n}",
"func (v Away) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer33(w, v)\n}",
"func (c EventOutputContext) MarshalEasyJSON(writer *ej_jwriter.Writer) {\n\tif err := c.Err(); err != nil {\n\t\twriter.Error = err\n\t\treturn\n\t}\n\twrappedWriter := jwriter.NewWriterFromEasyJSONWriter(writer)\n\tContextSerialization.MarshalToJSONWriterEventOutput(&wrappedWriter, &c)\n}",
"func (v IngredientArr) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels7(w, v)\n}",
"func (v RiverConnection) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson94b2531bEncodeGitRonaksoftComRiverWebWasmConnection4(w, v)\n}",
"func (v streamItemMessage) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson2802b09fEncodeGithubComPhilippseithSignalr(w, v)\n}",
"func (v Vacancy) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeGithubComIskiyRabotauaTelegramBotPkgRabotaua4(w, v)\n}",
"func (v Item) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeDrhyuComIndexerModels2(w, v)\n}",
"func (v PostAttachement) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson783c1624EncodeGithubComGobwasVk6(w, v)\n}",
"func (v ExtFilter) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson795c59c6EncodeGrapeGuardRules11(w, v)\n}",
"func (v User) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson1(w, v)\n}",
"func (v ProductShrinked) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels3(w, v)\n}",
"func (v RiverConnectionJS) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson94b2531bEncodeGitRonaksoftComRiverWebWasmConnection3(w, v)\n}",
"func (v DCCSend) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer27(w, v)\n}",
"func (v MediumPayload) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonE242b40eEncodeGithubComExampleSample1(w, v)\n}",
"func (v Stash) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeDrhyuComIndexerModels(w, v)\n}",
"func (v WSRequest) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer3(w, v)\n}",
"func (v ApiMessage) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson25363b2dEncodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi132(w, v)\n}",
"func (v flattenedField) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson390b7126EncodeGithubComChancedPicker35(w, v)\n}",
"func (v SFMetric) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson51bca34dEncodeGithubComSkydiveProjectSkydiveSflow2(w, v)\n}",
"func (v managerHandlerDevice) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonEd74d837EncodeGithubComKihamoBoggartComponentsBoggartInternalHandlers1(w, v)\n}",
"func (v BaseOp) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson25363b2dEncodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi125(w, v)\n}",
"func (v Data) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson17(w, v)\n}",
"func (v ResultReq) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson25363b2dEncodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi54(w, v)\n}",
"func (v CreateUserResponse) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson84c0690eEncodeMainHandlers2(w, v)\n}",
"func (v CreateIsolatedWorldReturns) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonC5a4559bEncodeGithubComChromedpCdprotoPage90(w, v)\n}",
"func (v ServerKeys) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson94b2531bEncodeGitRonaksoftComRiverWebWasmConnection2(w, v)\n}",
"func (v ShadowUpdateMsgSt) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB7ed31d3EncodeMevericcoreMccommon1(w, v)\n}",
"func (v WSResponse) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer2(w, v)\n}",
"func (v Content) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson18(w, v)\n}",
"func (v Join) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer21(w, v)\n}",
"func (v Bid) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson22(w, v)\n}",
"func (v moreLikeThisQuery) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson390b7126EncodeGithubComChancedPicker18(w, v)\n}",
"func (v Device) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson15(w, v)\n}"
] | [
"0.77204984",
"0.7672636",
"0.7653424",
"0.7591104",
"0.7549626",
"0.7543635",
"0.7506079",
"0.74917865",
"0.74814856",
"0.747845",
"0.74776804",
"0.7472024",
"0.74326074",
"0.74075466",
"0.74005824",
"0.7393129",
"0.7387327",
"0.7384699",
"0.7380124",
"0.7379066",
"0.73720104",
"0.73705596",
"0.73606724",
"0.7350565",
"0.73454297",
"0.73452127",
"0.73444057",
"0.73321444",
"0.7307502",
"0.73051214",
"0.729492",
"0.72922665",
"0.7291515",
"0.72859746",
"0.72832805",
"0.7281536",
"0.72810227",
"0.72690594",
"0.7265171",
"0.7263047",
"0.72606456",
"0.7260384",
"0.725305",
"0.72479904",
"0.724739",
"0.7246577",
"0.72447056",
"0.7243153",
"0.72394603",
"0.72375077",
"0.7237401",
"0.7235131",
"0.7232335",
"0.7225127",
"0.72224236",
"0.722217",
"0.722101",
"0.7216019",
"0.7215666",
"0.72131526",
"0.7212736",
"0.71996164",
"0.7197805",
"0.71939874",
"0.71824425",
"0.7180386",
"0.71780163",
"0.7173894",
"0.71658254",
"0.7163895",
"0.7163262",
"0.7161551",
"0.7154433",
"0.7154243",
"0.7153383",
"0.71522945",
"0.71498144",
"0.71420634",
"0.71419924",
"0.7141065",
"0.7132712",
"0.71298224",
"0.71237564",
"0.71226513",
"0.71173185",
"0.7116985",
"0.7109691",
"0.71048075",
"0.7099136",
"0.7098513",
"0.70949537",
"0.7090401",
"0.7087397",
"0.7085066",
"0.70830184",
"0.70823175",
"0.7082235",
"0.7080599",
"0.7080532",
"0.7079698"
] | 0.73029035 | 30 |
UnmarshalJSON supports json.Unmarshaler interface | func (v *OrderCheckResponse) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjson6a975c40DecodeJsonBenchmark1(&r, v)
return r.Error()
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (j *jsonNative) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func unmarshalJSON(j extv1.JSON, output *any) error {\n\tif len(j.Raw) == 0 {\n\t\treturn nil\n\t}\n\treturn json.Unmarshal(j.Raw, output)\n}",
"func (j *JSON) Unmarshal(input, target interface{}) error {\n\t// take the input and convert it to target\n\treturn jsonEncoding.Unmarshal(input.([]byte), target)\n}",
"func (j *Publisher) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (v *OneLike) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\tdecodeOneLike(&r, v)\n\treturn r.Error()\n}",
"func UnmarshalJSON(data []byte, v interface{}) {\n\terr := json.Unmarshal(data, v)\n\tAbortIf(err)\n}",
"func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error {\n\td := json.NewDecoder(r)\n\tfor _, opt := range opts {\n\t\td = opt(d)\n\t}\n\tif err := d.Decode(&o); err != nil {\n\t\treturn fmt.Errorf(\"while decoding JSON: %v\", err)\n\t}\n\treturn nil\n}",
"func UnmarshalJSON(b []byte, discriminator string, f Factory) (interface{}, error) {\n\tm := make(map[string]interface{})\n\terr := json.Unmarshal(b, &m)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn Decode(m, discriminator, f)\n}",
"func UnmarshalFromJSON(data []byte, target interface{}) error {\n\tvar ctx map[string]interface{}\n\terr := json.Unmarshal(data, &ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn Unmarshal(ctx, target)\n}",
"func (s *Serializer) Unmarshal(data []byte, v interface{}) error {\n\treturn jsoniter.Unmarshal(data,v)\n}",
"func (j *ThirdParty) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func unmarshal() {\n\tfmt.Println(\"=== json.unmarshal ===\")\n\tvar jsonBlob = []byte(`[\n\t\t{\"name\": \"Bill\", \"age\": 109},\n\t\t{\"name\": \"Bob\", \"age\": 5}\n\t]`)\n\n\tvar persons []Person\n\terr := json.Unmarshal(jsonBlob, &persons)\n\tcheck(err)\n\n\tfmt.Printf(\"%+v\\n\", persons)\n}",
"func (j *Data) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *JsonMarshaler) Unmarshal(data []byte, v interface{}) error {\n\treturn json.Unmarshal(data, v)\n}",
"func (j *Type) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (this *Simple) UnmarshalJSON(b []byte) error {\n\treturn TypesUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}",
"func (j *Response) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (pl PLUtil) Unmarshal(data []byte, v interface{}) error {\n\tcmd := pl.execCommand(\n\t\t\"plutil\",\n\t\t\"-convert\", \"json\",\n\t\t// Read from stdin.\n\t\t\"-\",\n\t\t// Output to stdout.\n\t\t\"-o\", \"-\")\n\tcmd.Stdin = bytes.NewReader(data)\n\tstdout, err := cmd.Output()\n\tif exitErr, ok := err.(*exec.ExitError); ok {\n\t\treturn fmt.Errorf(\"`%s` failed (%w) with stderr: %s\", cmd, err, exitErr.Stderr)\n\t}\n\tif err != nil {\n\t\treturn fmt.Errorf(\"`%s` failed (%w)\", cmd, err)\n\t}\n\tif err := json.Unmarshal(stdout, v); err != nil {\n\t\treturn fmt.Errorf(\"failed to parse json: %w\", err)\n\t}\n\treturn nil\n}",
"func (j *LuaFunction) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (v *BlitzedItemResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson6a975c40DecodeJsonBenchmark4(&r, v)\n\treturn r.Error()\n}",
"func Unmarshal(data []byte, v Unmarshaler) error {\n\tl := jlexer.Lexer{Data: data}\n\tv.UnmarshalTinyJSON(&l)\n\treturn l.Error()\n}",
"func (j *Producer) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *User) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *Json) UnmarshalJSON(b []byte) error {\n\tr, err := loadContentWithOptions(b, Options{\n\t\tType: ContentTypeJson,\n\t\tStrNumber: true,\n\t})\n\tif r != nil {\n\t\t// Value copy.\n\t\t*j = *r\n\t}\n\treturn err\n}",
"func (j *Error) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (v *Element) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson2(&r, v)\n\treturn r.Error()\n}",
"func (j *Message) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func jsonDecode(reader io.ReadCloser, v interface{}) error {\n\tdecoder := json.NewDecoder(reader)\n\terr := decoder.Decode(v)\n\treturn err\n}",
"func (j *FactoryPluginRespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (m *gohaiMarshaler) UnmarshalJSON(bytes []byte) error {\n\tfirstUnmarshall := \"\"\n\terr := json.Unmarshal(bytes, &firstUnmarshall)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal([]byte(firstUnmarshall), &(m.gohai))\n\treturn err\n}",
"func (j *Packet) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (receiver *Type) UnmarshalJSON(src []byte) error {\n\tif nil == receiver {\n\t\treturn errNilReceiver\n\t}\n\n\tvar s string\n\tif err := json.Unmarshal(src, &s); nil != err {\n\t\treturn err\n\t}\n\n\tif err := receiver.Scan(s); nil != err {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (j *qProxyClient) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func Unmarshal(data []byte, v Unmarshaler) error {\n\tl := jlexer.Lexer{Data: data}\n\tv.UnmarshalEasyJSON(&l)\n\treturn l.Error()\n}",
"func (j *TextMarshaler) Unmarshal(data []byte, v interface{}) error {\n\treturn json.Unmarshal(data, v)\n}",
"func (w *Entry) UnmarshalJSON(bb []byte) error {\n\t<<!!YOUR_CODE!!>>\n}",
"func UnmarshalJSON(body io.Reader, v interface{}) error {\n\tdecoder := json.NewDecoder(body)\n\treturn decoder.Decode(v)\n}",
"func Unmarshal(data []byte) (interface{}, error) {\n\tvar value marble\n\terr := json.Unmarshal(data, &value)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &value, nil\n}",
"func (j *RunPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (i *Transform) UnmarshalJSON(data []byte) error {\n\tvar s string\n\tif err := json.Unmarshal(data, &s); err != nil {\n\t\treturn fmt.Errorf(\"Transform should be a string, got %[1]s\", data)\n\t}\n\n\tvar err error\n\t*i, err = ParseTransformString(s)\n\treturn err\n}",
"func (this *Service) UnmarshalJSON(b []byte) error {\n\treturn CommonUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}",
"func (v *OneUpdateLike) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\tdecodeOneUpdateLike(&r, v)\n\treturn r.Error()\n}",
"func (j *RunRespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (v *Raw) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer10(&r, v)\n\treturn r.Error()\n}",
"func (j *Server) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *JSONText) Unmarshal(v interface{}) error {\n\treturn json.Unmarshal([]byte(*j), v)\n}",
"func (j *JsonlMarshaler) Unmarshal(data []byte, v interface{}) error {\n\treturn json.Unmarshal(data, v)\n}",
"func (j *PublishMessagesResponse) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *Event) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *FactoryPluginPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j JSON) Unmarshal(dest interface{}) error {\n\tif dest == nil {\n\t\treturn errors.New(\"destination is nil, not a valid pointer to an object\")\n\t}\n\n\t// Call our implementation of\n\t// JSON MarshalJSON through json.Marshal\n\t// to get the value of the JSON object\n\tres, err := json.Marshal(j)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn json.Unmarshal(res, dest)\n}",
"func (v *User) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson9e1087fdDecodeHw3Bench(&r, v)\n\treturn r.Error()\n}",
"func Unmarshal(data []byte, v interface{}) error {\n\terr := json.Unmarshal(data, v)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif ImplementsPostJSONUnmarshaler(v) {\n\t\terr := v.(PostJSONUnmarshaler).PostUnmarshalJSON()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func FromJSON(i interface{}, r io.Reader) error {\n\td := json.NewDecoder(r)\n\treturn d.Decode(i)\n}",
"func (v *FormulaAndFunctionResponseFormat) UnmarshalJSON(src []byte) error {\n\tvar value string\n\terr := json.Unmarshal(src, &value)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*v = FormulaAndFunctionResponseFormat(value)\n\treturn nil\n}",
"func (j *GetMessagesResponse) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *RespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {\n\tf.d.jsonUnmarshalV(tm)\n}",
"func (j *LuaTable) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *Regulations) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (v *User) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson2bc03518DecodeLangTaskOnBench(&r, v)\n\treturn r.Error()\n}",
"func Unmarshal(b []byte, v interface{}) error {\n\treturn json.Unmarshal(b, v)\n}",
"func (v *UnloadCheckResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson6a975c40DecodeJsonBenchmark(&r, v)\n\treturn r.Error()\n}",
"func (this *DeploymentStrategy) UnmarshalJSON(b []byte) error {\n\treturn CommonUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}",
"func (j *LuaInt) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (v *Visit) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonE564fc13DecodeGithubComLa0rgHighloadcupModel1(&r, v)\n\treturn r.Error()\n}",
"func (u *Unstructured) UnmarshalJSON(b []byte) error {\n\t_, _, err := UnstructuredJSONScheme.Decode(b, nil, u)\n\treturn err\n}",
"func UnmarshalJSON(b []byte) (dgo.Value, error) {\n\tdec := json.NewDecoder(bytes.NewReader(b))\n\tdec.UseNumber()\n\treturn jsonDecodeValue(dec)\n}",
"func unmarshal(data []byte, v interface{}) {\n\terr := json.Unmarshal(data, v)\n\tassert(err == nil, \"unmarshal error: %s\", err)\n}",
"func (j *Balance) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (t *Type) UnmarshalJSON(b []byte) error {\n\tvar text string\n\tif err := json.Unmarshal(b, &text); err != nil {\n\t\treturn err\n\t}\n\n\treturn t.UnmarshalText([]byte(text))\n}",
"func JSONDecode(ctx context.Context, data []byte, obj interface{}) error {\n\treturn json.Unmarshal(data, obj)\n}",
"func (j *RegisterRespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *ListPluginRespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (this *Probe) UnmarshalJSON(b []byte) error {\n\treturn CommonUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}",
"func (j *BootInitiationRespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (v *VisitArray) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonE564fc13DecodeGithubComLa0rgHighloadcupModel(&r, v)\n\treturn r.Error()\n}",
"func (v *ExportItem) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson1(&r, v)\n\treturn r.Error()\n}",
"func (this *ImportedReference) UnmarshalJSON(b []byte) error {\n\treturn TypesUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}",
"func (j *LuaString) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (jz JSONGzipEncoding) Unmarshal(data []byte, value interface{}) error {\n\tjsonData, err := GzipDecode(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal(jsonData, value)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (v *ItemCheckResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson6a975c40DecodeJsonBenchmark2(&r, v)\n\treturn r.Error()\n}",
"func (i *Interface) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", i, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"etag\":\n\t\t\terr = unpopulate(val, \"Etag\", &i.Etag)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"extendedLocation\":\n\t\t\terr = unpopulate(val, \"ExtendedLocation\", &i.ExtendedLocation)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &i.ID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"location\":\n\t\t\terr = unpopulate(val, \"Location\", &i.Location)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &i.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &i.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"tags\":\n\t\t\terr = unpopulate(val, \"Tags\", &i.Tags)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &i.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", i, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func Unmarshal(data []byte, typ DataFormat, target interface{}) {\n\tswitch typ {\n\tcase GOB:\n\t\tbuf := bytes.NewReader(data)\n\t\tgob.NewDecoder(buf).Decode(target)\n\n\tdefault:\n\t\tif err := json.Unmarshal(data, target); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}",
"func (j *Json) UnmarshalJSON(data []byte) error {\n\terr := json.Unmarshal(data, &j.data)\n\n\tj.exists = (err == nil)\n\treturn err\n}",
"func (v *PbTestObject) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson5fcf962eDecodeGithubComJsonIteratorGoBenchmarkWith10IntFields(&r, v)\n\treturn r.Error()\n}",
"func (j *ModifyQueueResponse) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *UnInstallRespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func unmarshalJSON(i *big.Int, bz []byte) error {\n\tvar text string\n\terr := json.Unmarshal(bz, &text)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn i.UnmarshalText([]byte(text))\n}",
"func (j *LuaBool) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *WorkerCreateOperation) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func jsonDec(t reflect.Type, in []byte) (T, error) {\n\tval := reflect.New(t)\n\tif err := jsonx.Unmarshal(val.Interface(), in); err != nil {\n\t\treturn nil, err\n\t}\n\treturn val.Elem().Interface(), nil\n}",
"func (z *Int) UnmarshalJSON(text []byte) error {}",
"func (j *PeerInfo) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (obj *miner) UnmarshalJSON(data []byte) error {\n\tins := new(JSONMiner)\n\terr := json.Unmarshal(data, ins)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpr, err := createMinerFromJSON(ins)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinsMiner := pr.(*miner)\n\tobj.toTransact = insMiner.toTransact\n\tobj.queue = insMiner.queue\n\tobj.broadcasted = insMiner.broadcasted\n\tobj.toLink = insMiner.toLink\n\treturn nil\n}",
"func (j *UnInstallPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func deJSONify(i interface{}) (interface{}, error) {\n\tvar data []byte\n\tswitch t := i.(type) {\n\tcase string:\n\t\tdata = []byte(t)\n\tcase []byte:\n\t\tdata = t\n\tcase json.RawMessage:\n\t\tdata = []byte(t)\n\tdefault:\n\t\treturn i, nil\n\t}\n\tvar x interface{}\n\tif err := json.Unmarshal(data, &x); err != nil {\n\t\treturn nil, &kivik.Error{HTTPStatus: http.StatusBadRequest, Err: err}\n\t}\n\treturn x, nil\n}",
"func (v *DocumentResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson6a975c40DecodeJsonBenchmark3(&r, v)\n\treturn r.Error()\n}",
"func (c *JSONCodec) Unmarshal(data []byte, v interface{}) error {\n\treturn json.Unmarshal(data, v)\n}",
"func NewJSONUnmarshaler(resolver Resolver) Unmarshaler {\n\treturn newJSONUnmarshaler(resolver)\n}",
"func NewJSONUnmarshaler(resolver Resolver) Unmarshaler {\n\treturn newJSONUnmarshaler(resolver)\n}"
] | [
"0.70113605",
"0.698139",
"0.6947301",
"0.6867781",
"0.68005323",
"0.67680764",
"0.6741481",
"0.67051035",
"0.6688701",
"0.66797084",
"0.6676911",
"0.6669605",
"0.6661001",
"0.66579056",
"0.6652777",
"0.66498846",
"0.6632663",
"0.663189",
"0.6627629",
"0.66243863",
"0.6612909",
"0.6587119",
"0.65519077",
"0.6545157",
"0.6537283",
"0.6533197",
"0.6532074",
"0.6526187",
"0.6518123",
"0.6512875",
"0.6505786",
"0.64908326",
"0.64847505",
"0.64830405",
"0.64820194",
"0.6469316",
"0.64528453",
"0.64508975",
"0.6441661",
"0.6441397",
"0.6438974",
"0.6438737",
"0.642948",
"0.6408435",
"0.640738",
"0.6396278",
"0.6394157",
"0.6385808",
"0.63855124",
"0.63844603",
"0.6375449",
"0.63702816",
"0.63625103",
"0.63553596",
"0.63552856",
"0.63477194",
"0.6344893",
"0.6339914",
"0.6331977",
"0.63298523",
"0.6323917",
"0.63238263",
"0.6318712",
"0.631284",
"0.63110864",
"0.6310182",
"0.6305762",
"0.63040566",
"0.62972116",
"0.62931895",
"0.6291462",
"0.62913823",
"0.62810636",
"0.6280757",
"0.6274565",
"0.6273215",
"0.62724316",
"0.62711626",
"0.6271133",
"0.62660044",
"0.6263724",
"0.62590677",
"0.62587553",
"0.62568384",
"0.6255846",
"0.6252581",
"0.62471205",
"0.6244156",
"0.6241776",
"0.62323636",
"0.62298375",
"0.6226608",
"0.6226587",
"0.62243664",
"0.6220156",
"0.6218001",
"0.6216062",
"0.6215216",
"0.621471",
"0.62088907",
"0.62088907"
] | 0.0 | -1 |
UnmarshalEasyJSON supports easyjson.Unmarshaler interface | func (v *OrderCheckResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {
easyjson6a975c40DecodeJsonBenchmark1(l, v)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (v *BlitzedItemResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6a975c40DecodeJsonBenchmark4(l, v)\n}",
"func (v *Fruit) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels11(l, v)\n}",
"func (v *Boo) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeMsgpJson(l, v)\n}",
"func (v *Element) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson2(l, v)\n}",
"func (c *Context) UnmarshalEasyJSON(in *jlexer.Lexer) {\n\tContextSerialization.UnmarshalFromEasyJSONLexer(in, c)\n}",
"func (v *Format) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson14(l, v)\n}",
"func (v *DetectedFruit) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels12(l, v)\n}",
"func (v *item) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeGithubComZhekabyGoGeneratorMongoRequestwrapperTests(l, v)\n}",
"func (v *ItemCheckResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6a975c40DecodeJsonBenchmark2(l, v)\n}",
"func (v *Native) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson10(l, v)\n}",
"func (v *FormDataMQ) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson(l, v)\n}",
"func (v *DocumentResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6a975c40DecodeJsonBenchmark3(l, v)\n}",
"func (v *Node) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6601e8cdDecodeGithubComSkydiveProjectSkydiveGraffitiApiTypes1(l, v)\n}",
"func (v *flattenedField) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson390b7126DecodeGithubComChancedPicker35(l, v)\n}",
"func (v *ExtFilter) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson795c59c6DecodeGrapeGuardRules11(l, v)\n}",
"func (v *Deal) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson8a221a72DecodeGithubComVungleVungoOpenrtb(l, v)\n}",
"func (v *Visit) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonE564fc13DecodeGithubComLa0rgHighloadcupModel1(l, v)\n}",
"func (v *Error) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer26(l, v)\n}",
"func (v *GetUserResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson84c0690eDecodeMainHandlers1(l, v)\n}",
"func (v *User) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson9e1087fdDecodeHw3Bench(l, v)\n}",
"func (v *IngredientArr) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels7(l, v)\n}",
"func (v *VisitArray) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonE564fc13DecodeGithubComLa0rgHighloadcupModel(l, v)\n}",
"func (v *Foo) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonAbe23ddeDecodeGithubComUberZanzibarExamplesExampleGatewayBuildGenCodeClientsCorgeCorge1(l, v)\n}",
"func (v *Ingredient) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels8(l, v)\n}",
"func (v *Musician) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson62dc445bDecode20211NoskoolTeamInternalAppMusiciansModels2(l, v)\n}",
"func (v *ThirdParty) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson2(l, v)\n}",
"func (v *Data) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson17(l, v)\n}",
"func (v *Deal) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson16(l, v)\n}",
"func (v *Raw) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer10(l, v)\n}",
"func (v *EasyResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6ff3ac1dDecodeGithubComWenweihBitcoinRpcGolangProto1(l, v)\n}",
"func (v *AdvFormData) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson3(l, v)\n}",
"func (v *Message) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer18(l, v)\n}",
"func (v *Teacher) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonF1627ba7DecodeGithubComDuchiporexiaGoutilsXmsgTests(l, v)\n}",
"func (v *Invite) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer22(l, v)\n}",
"func (v *CBPerson) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonE242b40eDecodeGithubComExampleSample2(l, v)\n}",
"func (v *BidResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson326edDecodeGithubComMxmCherryOpenrtb(l, v)\n}",
"func (v *BidRequest) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson21(l, v)\n}",
"func (v *Impression) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson12(l, v)\n}",
"func (v *Msg) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels6(l, v)\n}",
"func (v *BidResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson326edDecodeGithubComApplifierGoOpenrtbOpenrtb2(l, v)\n}",
"func (v *Info) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonC80ae7adDecodeGithubComDeiklovTechDbRomanovAndrGolangModels13(l, v)\n}",
"func (v *MediumPayload) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonE242b40eDecodeGithubComExampleSample1(l, v)\n}",
"func (v *Part) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer12(l, v)\n}",
"func (v *ProductExtendedArr) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels4(l, v)\n}",
"func (v *Whois) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer1(l, v)\n}",
"func (v *App) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson25(l, v)\n}",
"func (v *Content) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson18(l, v)\n}",
"func (v *Responce) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6a975c40DecodeGithubComSerhio83DruidPkgStructs(l, v)\n}",
"func (v *TransactionResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonE82c8e88DecodeGithubComKamaiuOandaGoModel5(l, v)\n}",
"func (v *ProductExtended) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels5(l, v)\n}",
"func (v *Error) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonC5a4559bDecodeGithubComChromedpCdproto2(l, v)\n}",
"func (v *BidResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson20(l, v)\n}",
"func (v *HireManager) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonAf94a8adDecodeGithubComGoParkMailRu20192ComandusInternalModel(l, v)\n}",
"func (v *PlantainerShadowMetadataSt) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson5bd79fa1DecodeMevericcoreMcplantainer9(l, v)\n}",
"func (v *User) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels(l, v)\n}",
"func (v *RespStruct) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeDrhyuComIndexerModels1(l, v)\n}",
"func (v *Item) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeDrhyuComIndexerModels2(l, v)\n}",
"func (v *Annotation) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6a975c40DecodeGithubComSerhio83DruidPkgStructs3(l, v)\n}",
"func (v *Fundamental) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson3e8ab7adDecodeGithubComAlpacahqAlpacaTradeApiGoV3Alpaca14(l, v)\n}",
"func (v *BasicUser) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6601e8cdDecodeJsongen4(l, v)\n}",
"func (v *Features) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer25(l, v)\n}",
"func (v *Edge) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6601e8cdDecodeGithubComSkydiveProjectSkydiveGraffitiApiTypes2(l, v)\n}",
"func (v *ShadowModelSt) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB7ed31d3DecodeMevericcoreMccommon5(l, v)\n}",
"func (v *AdvForm) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson4(l, v)\n}",
"func (v *binaryField) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson390b7126DecodeGithubComChancedPicker46(l, v)\n}",
"func (v *ShadowUpdateMsgSt) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB7ed31d3DecodeMevericcoreMccommon1(l, v)\n}",
"func (v *User) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson393a2a40DecodeCodegen(l, v)\n}",
"func (v *InfoUser) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6601e8cdDecodeJsongen3(l, v)\n}",
"func (v *Message) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonC5a4559bDecodeGithubComChromedpCdproto1(l, v)\n}",
"func (v *Pmp) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson9(l, v)\n}",
"func (v *MOTD) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer19(l, v)\n}",
"func (v *Attack) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6601e8cdDecodeGithubComGoParkMailRu2018242GameServerTypes4(l, v)\n}",
"func (v *moreLikeThisQuery) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson390b7126DecodeGithubComChancedPicker18(l, v)\n}",
"func (v *ExportItem) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson1(l, v)\n}",
"func (v *EventLoadEventFired) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonC5a4559bDecodeGithubComChromedpCdprotoPage70(l, v)\n}",
"func (v *managerListener) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonEd74d837DecodeGithubComKihamoBoggartComponentsBoggartInternalHandlers(l, v)\n}",
"func (v *WSResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer2(l, v)\n}",
"func (v *UnloadCheckResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6a975c40DecodeJsonBenchmark(l, v)\n}",
"func (v *PbTestObject) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson5fcf962eDecodeGithubComJsonIteratorGoBenchmarkWith10IntFields(l, v)\n}",
"func (v *Student) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonF1627ba7DecodeGithubComDuchiporexiaGoutilsXmsgTests1(l, v)\n}",
"func (v *Device) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson15(l, v)\n}",
"func (v *Messages) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer17(l, v)\n}",
"func (v *User) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson2bc03518DecodeLangTaskOnBench(l, v)\n}",
"func (v *BaseTickerInfo) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson25363b2dDecodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi117(l, v)\n}",
"func (v *Topic) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer5(l, v)\n}",
"func (v *BaseLedgerInfo) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson25363b2dDecodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi127(l, v)\n}",
"func (v *Banner) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson23(l, v)\n}",
"func (v *APIError) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson3e8ab7adDecodeGithubComAlpacahqAlpacaTradeApiGoV3Alpaca24(l, v)\n}",
"func (v *Bid) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson22(l, v)\n}",
"func (v *Post) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson783c1624DecodeGithubComGobwasVk7(l, v)\n}",
"func (v *BaseTradeInfo) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson25363b2dDecodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi116(l, v)\n}",
"func (v *MusicianFullInformation) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson62dc445bDecode20211NoskoolTeamInternalAppMusiciansModels1(l, v)\n}",
"func (v *matchRule) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson390b7126DecodeGithubComChancedPicker19(l, v)\n}",
"func (v *managerHandlerDevice) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonEd74d837DecodeGithubComKihamoBoggartComponentsBoggartInternalHandlers1(l, v)\n}",
"func (v *ResultReq) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson25363b2dDecodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi54(l, v)\n}",
"func (v *invocationMessage) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson2802b09fDecodeGithubComPhilippseithSignalr1(l, v)\n}",
"func (v *fuzzyRule) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson390b7126DecodeGithubComChancedPicker34(l, v)\n}",
"func (v *PlantainerShadowSt) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson5bd79fa1DecodeMevericcoreMcplantainer8(l, v)\n}",
"func (v *completionMessage) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson2802b09fDecodeGithubComPhilippseithSignalr5(l, v)\n}",
"func (v *Source) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson3(l, v)\n}"
] | [
"0.73436594",
"0.73405373",
"0.72584677",
"0.72040373",
"0.71776104",
"0.71510446",
"0.7143438",
"0.71413064",
"0.71286225",
"0.7112999",
"0.7103849",
"0.71005577",
"0.7097653",
"0.7085183",
"0.70850646",
"0.7081146",
"0.70403785",
"0.70357895",
"0.7030433",
"0.7028725",
"0.7021155",
"0.70114094",
"0.70109946",
"0.70103574",
"0.7002987",
"0.69937176",
"0.6981908",
"0.6981736",
"0.69811034",
"0.6980795",
"0.69711286",
"0.6965327",
"0.695678",
"0.69543517",
"0.6948873",
"0.69404715",
"0.69387776",
"0.6935085",
"0.6930436",
"0.6922759",
"0.6904652",
"0.6894174",
"0.68897486",
"0.6889671",
"0.6888647",
"0.6887437",
"0.6887124",
"0.68862444",
"0.68853265",
"0.68804044",
"0.6874087",
"0.6870016",
"0.6869092",
"0.6868185",
"0.6858964",
"0.6846011",
"0.68405616",
"0.6836571",
"0.6835831",
"0.68291616",
"0.6823791",
"0.6822216",
"0.6817067",
"0.6815519",
"0.68133044",
"0.6812743",
"0.6811037",
"0.68107563",
"0.6809271",
"0.680744",
"0.68065774",
"0.68030846",
"0.68029016",
"0.67965585",
"0.6794714",
"0.678028",
"0.67772484",
"0.6772522",
"0.67714006",
"0.6769638",
"0.67685604",
"0.67657346",
"0.6763771",
"0.67634416",
"0.6762939",
"0.67570746",
"0.6756749",
"0.6754731",
"0.6750861",
"0.6749626",
"0.6745531",
"0.6744763",
"0.6743289",
"0.67418313",
"0.6734197",
"0.6732776",
"0.67303044",
"0.67287326",
"0.67265445",
"0.67261595"
] | 0.7077145 | 16 |
MarshalJSON supports json.Marshaler interface | func (v ItemCheckResponse) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjson6a975c40EncodeJsonBenchmark2(&w, v)
return w.Buffer.BuildBytes(), w.Error
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func Marshal(v Marshaler) ([]byte, error) {\n\tw := jwriter.Writer{}\n\tv.MarshalEasyJSON(&w)\n\treturn w.BuildBytes()\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\tif ImplementsPreJSONMarshaler(v) {\n\t\terr := v.(PreJSONMarshaler).PreMarshalJSON()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn json.Marshal(v)\n}",
"func (j *JSON) Marshal(target interface{}) (output interface{}, err error) {\n\treturn jsonEncoding.Marshal(target)\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\treturn json.Marshal(v)\n}",
"func Marshal(v Marshaler) ([]byte, error) {\n\tif isNilInterface(v) {\n\t\treturn nullBytes, nil\n\t}\n\n\tw := jwriter.Writer{}\n\tv.MarshalTinyJSON(&w)\n\treturn w.BuildBytes()\n}",
"func JsonMarshal(t interface{}) ([]byte, error) {\n\tbuffer := &bytes.Buffer{}\n\tenc := json.NewEncoder(buffer)\n\tenc.SetEscapeHTML(false)\n\terr := enc.Encode(t)\n\treturn buffer.Bytes(), err\n}",
"func jsonMarshal(t interface{}) ([]byte, error) {\n\tbuffer := &bytes.Buffer{}\n\tencoder := json.NewEncoder(buffer)\n\tencoder.SetEscapeHTML(false)\n\tencoder.SetIndent(\"\", \" \")\n\terr := encoder.Encode(t)\n\treturn buffer.Bytes(), err\n}",
"func marshal() {\n\tfmt.Println(\"=== json.marshal ===\")\n\tryan := &Person{\"Ryan\", 25}\n\twire, err := json.Marshal(ryan)\n\tcheck(err)\n\tfmt.Println(string(wire))\n}",
"func jsonMarshal(t interface{}) ([]byte, error) {\n\tvar buffer bytes.Buffer\n\tencoder := json.NewEncoder(&buffer)\n\tencoder.SetEscapeHTML(false)\n\tif err := encoder.Encode(t); err != nil {\n\t\treturn nil, err\n\t}\n\t// Prettify\n\tvar out bytes.Buffer\n\tif err := json.Indent(&out, buffer.Bytes(), \"\", \"\\t\"); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn out.Bytes(), nil\n}",
"func JSONEncoder() Encoder { return jsonEncoder }",
"func JSONMarshal(data interface{}) ([]byte, error) {\n\tvar b []byte\n\tvar err error\n\n\tb, err = json.MarshalIndent(data, \"\", \" \")\n\n\treturn b, err\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn canonicaljson.Transform(b)\n}",
"func (c *JsonCodec) Marshal(object interface{}, options map[string]interface{}) ([]byte, error) {\n\treturn jsonEncoding.Marshal(object)\n}",
"func (c *JSONCodec) Marshal(v interface{}) ([]byte, error) {\n\treturn json.Marshal(v)\n}",
"func Marshal(in interface{}) ([]byte, error) {\n\tres, err := jsoniter.ConfigCompatibleWithStandardLibrary.Marshal(in)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"marshaling error: %w\", err)\n\t}\n\treturn res, nil\n}",
"func Marshal(p Payload) ([]byte, error) {\n\treturn json.Marshal(p)\n}",
"func (j *JsonlMarshaler) Marshal(v interface{}) ([]byte, error) {\n\treturn json.Marshal(v)\n}",
"func (s *Serializer) Marshal(v interface{}) ([]byte, error) {\n\treturn jsoniter.Marshal(v)\n}",
"func (j *TextMarshaler) Marshal(v interface{}) ([]byte, error) {\n\treturn json.Marshal(v)\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\n\treturn replaceUnicodeConversion(b), err\n}",
"func Marshal(object interface{}) (data string, err error) {\n\tif t, err := json.Marshal(object); err != nil {\n\t\tdata = \"\"\n\t} else {\n\t\tdata = string(t)\n\t}\n\treturn\n}",
"func (j *JsonMarshaler) Marshal(v interface{}) ([]byte, error) {\n\tswitch v.(type) {\n\tcase *distribute.GetResponse:\n\t\tvalue, err := protobuf.MarshalAny(v.(*distribute.GetResponse).Fields)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn json.Marshal(\n\t\t\tmap[string]interface{}{\n\t\t\t\t\"fields\": value,\n\t\t\t},\n\t\t)\n\tcase *distribute.SearchResponse:\n\t\tvalue, err := protobuf.MarshalAny(v.(*distribute.SearchResponse).SearchResult)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn json.Marshal(\n\t\t\tmap[string]interface{}{\n\t\t\t\t\"search_result\": value,\n\t\t\t},\n\t\t)\n\tdefault:\n\t\treturn json.Marshal(v)\n\t}\n}",
"func (j *JSON) Marshal(obj interface{}) error {\n\tres, err := json.Marshal(obj)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Call our implementation of\n\t// JSON UnmarshalJSON through json.Unmarshal\n\t// to set the result to the JSON object\n\treturn json.Unmarshal(res, j)\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\trv := reflect.ValueOf(v)\n\tif rv.Kind() != reflect.Slice {\n\t\treturn nil, &InvalidMarshalError{rv.Kind()}\n\t}\n\n\tvar buf bytes.Buffer\n\tencoder := json.NewEncoder(&buf)\n\tfor i := 0; i < rv.Len(); i++ {\n\t\tif err := encoder.Encode(rv.Index(i).Interface()); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn buf.Bytes(), nil\n}",
"func marshalJSON(i *big.Int) ([]byte, error) {\n\ttext, err := i.MarshalText()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn json.Marshal(string(text))\n}",
"func (p *HJSON) Marshal(o map[string]interface{}) ([]byte, error) {\n\treturn hjson.Marshal(o)\n}",
"func JSONMarshal(v interface{}) ([]byte, error) {\n\tb, err := json.MarshalIndent(v, \"\", \" \")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn b, err\n\t}\n\tb = bytes.Replace(b, []byte(\"\\\\u003c\"), []byte(\"<\"), -1)\n\tb = bytes.Replace(b, []byte(\"\\\\u003e\"), []byte(\">\"), -1)\n\tb = bytes.Replace(b, []byte(\"\\\\u0026\"), []byte(\"&\"), -1)\n\treturn b, err\n}",
"func JSONMarshal(content interface{}, escape bool) ([]byte, error) {\n\tvar buf bytes.Buffer\n\tenc := json.NewEncoder(&buf)\n\tenc.SetEscapeHTML(escape)\n\tif err := enc.Encode(content); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}",
"func MarshalJSON(v interface{}, config MarshalConfig) ([]byte, error) {\n\tres, err := Marshal(v, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn json.Marshal(res)\n}",
"func (jz JSONGzipEncoding) Marshal(v interface{}) ([]byte, error) {\n\tbuf, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// var bufSizeBefore = len(buf)\n\n\tbuf, err = GzipEncode(buf)\n\t// coloredoutput.Infof(\"gzip_json_compress_ratio=%d/%d=%.2f\",\n\t// bufSizeBefore, len(buf), float64(bufSizeBefore)/float64(len(buf)))\n\treturn buf, err\n}",
"func (c *Codec) Marshal(v interface{}) ([]byte, error) {\n\tresult, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn nil, xerrors.New(err.Error())\n\t}\n\n\treturn result, nil\n}",
"func Marshal(obj interface{}) ([]byte, error) {\n\treturn mgobson.Marshal(obj)\n}",
"func jsonify(v interface{}) string { return string(mustMarshalJSON(v)) }",
"func MarshalToWriter(v Marshaler, w io.Writer) (written int, err error) {\n\tif isNilInterface(v) {\n\t\treturn w.Write(nullBytes)\n\t}\n\n\tjw := jwriter.Writer{}\n\tv.MarshalTinyJSON(&jw)\n\treturn jw.DumpTo(w)\n}",
"func (sc *Contract) Marshal() ([]byte, error) {\n\treturn json.Marshal(sc)\n}",
"func (f *Formatter) Marshal(v interface{}) ([]byte, error) {\n\tdata, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn f.Format(data)\n}",
"func marshal(v interface{}) []byte {\n\tb, err := json.Marshal(v)\n\tassert(err == nil, \"marshal error: %s\", err)\n\treturn b\n}",
"func JSONMarshal(obj interface{}) ([]byte, error) {\n\tb := new(bytes.Buffer)\n\tenc := json.NewEncoder(b)\n\tenc.SetEscapeHTML(false)\n\terr := enc.Encode(obj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// json.NewEncoder.Encode adds a final '\\n', json.Marshal does not.\n\t// Let's keep the default json.Marshal behaviour.\n\tres := b.Bytes()\n\tif len(res) >= 1 && res[len(res)-1] == '\\n' {\n\t\tres = res[:len(res)-1]\n\t}\n\treturn res, nil\n}",
"func DefaultMarshalJSON(obj interface{}) ([]byte, error) {\n\treturn json.MarshalIndent(obj, \"\", \" \")\n}",
"func marshal(v interface{}) string {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn string(b)\n}",
"func MarshalJSON(v interface{}) []byte {\n\tdata, err := json.Marshal(v)\n\tAbortIf(err)\n\treturn data\n}",
"func encode(ins interface{}) ([]byte, error) {\n\treturn json.Marshal(ins)\n}",
"func Marshal(data interface{}) ([]byte, error) {\n\tdocument, err := MarshalToStruct(data, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn json.Marshal(document)\n}",
"func NewJSONMarshaler() Marshaler {\n\treturn newJSONMarshaler()\n}",
"func JsonMarshal(val any) ([]byte, error) {\n\tbuf := &bytes.Buffer{}\n\tencoder := json.NewEncoder(buf)\n\tencoder.SetEscapeHTML(false)\n\tif err := encoder.Encode(val); err != nil {\n\t\treturn nil, err\n\t}\n\t// Return without a trailing line feed.\n\tlineTerminatedJson := buf.Bytes()\n\treturn bytes.TrimSuffix(lineTerminatedJson, []byte(\"\\n\")), nil\n}",
"func (j *jsonNative) MarshalJSON() ([]byte, error) {\n\tvar buf fflib.Buffer\n\tif j == nil {\n\t\tbuf.WriteString(\"null\")\n\t\treturn buf.Bytes(), nil\n\t}\n\terr := j.MarshalJSONBuf(&buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}",
"func (j *Publisher) MarshalJSON() ([]byte, error) {\n\tvar buf fflib.Buffer\n\tif j == nil {\n\t\tbuf.WriteString(\"null\")\n\t\treturn buf.Bytes(), nil\n\t}\n\terr := j.MarshalJSONBuf(&buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}",
"func serialize(toMarshal interface{}) *bytes.Buffer {\n\tjsonStr, _ := json.Marshal(toMarshal)\n\treturn bytes.NewBuffer(jsonStr)\n}",
"func toJSON(a interface{}) ([]byte, error) {\n\tbs, err := json.Marshal(a)\n\tif err != nil {\n\t\t//return []byte{}, fmt.Errorf(\"Erro no json %v\", err)\n\t\treturn []byte{}, errors.New(fmt.Sprintf(\"Erro no json %v\", err))\n\t}\n\treturn bs, nil\n}",
"func (r *Anilist) Marshal() ([]byte, error) {\n\treturn json.Marshal(r)\n}",
"func JSONEncode(data interface{}) string {\n\tbt, _ := json.Marshal(data)\n\treturn string(bt)\n}",
"func Marshal(o interface{}) ([]byte, error) {\n\tj, err := json.Marshal(o)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error marshaling into JSON: %v\", err)\n\t}\n\n\ty, err := JSONToYAML(j)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error converting JSON to YAML: %v\", err)\n\t}\n\n\treturn y, nil\n}",
"func (v Post) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson783c1624EncodeGithubComGobwasVk7(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) {\n\tbs, fnerr := iv.MarshalJSON()\n\tf.e.marshalAsis(bs, fnerr)\n}",
"func Marshal(obj interface{}) ([]byte, error) {\n\treturn MarshalValue(reflect.ValueOf(obj))\n}",
"func toJSON(a interface{}) ([]byte, error) {\n\tbs, err := json.Marshal(a)\n\tif err != nil {\n\t\t// return []byte{}, errors.New(fmt.Sprintf(\"Can't create json with error: %v\", err))\n\t\treturn []byte{}, fmt.Errorf(\"Can't create json for person: %v error: %v\", a, err)\n\t}\n\treturn bs, nil\n}",
"func (v Join) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer21(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func JsonEncode(i interface{}) string {\n\tb, err := json.Marshal(i)\n\n\tif err != nil {\n\t\tfmt.Println(\"util.getJsonStr.error\", err)\n\t\treturn \"\"\n\t}\n\n\treturn string(b)\n}",
"func (b *SampleFJSONBuilder) Marshal(orig *SampleF) ([]byte, error) {\n\tret, err := b.Convert(orig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn json.Marshal(ret)\n}",
"func (m *Marshaler) JSON(v interface{}) ([]byte, error) {\n\tif _, ok := v.(proto.Message); ok {\n\t\tvar buf bytes.Buffer\n\t\tjm := &jsonpb.Marshaler{}\n\t\tjm.OrigName = true\n\t\tif err := jm.Marshal(&buf, v.(proto.Message)); err != nil {\n\t\t\treturn []byte{}, err\n\t\t}\n\n\t\tif m.FilterProtoJson {\n\t\t\treturn m.FilterJsonWithStruct(buf.Bytes(), v)\n\t\t}\n\t\treturn buf.Bytes(), nil\n\t}\n\treturn json.Marshal(v)\n}",
"func (v ExportItem) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjsonB83d7b77EncodeGoplaygroundMyjson1(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func mustMarshalJSON(v interface{}) []byte {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\tpanic(\"marshal json: \" + err.Error())\n\t}\n\treturn b\n}",
"func (v publicKey) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson94b2531bEncodeGitRonaksoftComRiverWebWasmConnection(w, v)\n}",
"func EncodedJson(v interface{}) []byte {\n\tif p, err := json.Marshal(v); err != nil {\n\t\treturn []byte{}\n\t} else {\n\t\treturn p\n\t}\n}",
"func (js JSONSerializable) MarshalJSON() ([]byte, error) {\n\tif !js.Valid {\n\t\treturn json.Marshal(nil)\n\t}\n\tjsWithHex := replaceBytesWithHex(js.Val)\n\treturn json.Marshal(jsWithHex)\n}",
"func MarshalJSON(v interface{}) string {\n\tcontents, _ := json.MarshalIndent(v, \"\", \" \")\n\treturn string(contents)\n}",
"func (j Json) MarshalJSON() ([]byte, error) {\n\treturn j.ToJson()\n}",
"func (j JSON) MarshalJSON() ([]byte, error) {\n\tif j.Valid {\n\t\treturn json.Marshal(j.Map)\n\t}\n\n\treturn json.Marshal(nil)\n}",
"func (p PatchObject) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]any)\n\tpopulate(objectMap, \"tags\", p.Tags)\n\treturn json.Marshal(objectMap)\n}",
"func (v Posts) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson783c1624EncodeGithubComGobwasVk(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (spbi SuccessfulPropertyBatchInfo) MarshalJSON() ([]byte, error) {\n\tspbi.Kind = KindSuccessful\n\tobjectMap := make(map[string]interface{})\n\tif spbi.Properties != nil {\n\t\tobjectMap[\"Properties\"] = spbi.Properties\n\t}\n\tif spbi.Kind != \"\" {\n\t\tobjectMap[\"Kind\"] = spbi.Kind\n\t}\n\treturn json.Marshal(objectMap)\n}",
"func toJSON(a interface{}) ([]byte, error) {\n\tbs, err := json.Marshal(a)\n\n\tif err != nil {\n\t\treturn []byte{}, fmt.Errorf(\"Error caught by Onur Gurel\")\n\t}\n\n\treturn bs, nil\n}",
"func (v BindParams) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjsonC5a4559bEncodeGithubComChromedpCdprotoTethering2(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (handler Handler) EncodeJSON(v interface{}) (b []byte, err error) {\n\n\t//if(w.Get(\"pretty\",\"false\")==\"true\"){\n\tb, err = json.MarshalIndent(v, \"\", \" \")\n\t//}else{\n\t//\tb, err = json.Marshal(v)\n\t//}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn b, nil\n}",
"func Marshal(val interface{}) ([]byte, error) {}",
"func testJSONMarshal(t *testing.T, v interface{}, want string) {\n\tj, err := json.Marshal(v)\n\tif err != nil {\n\t\tt.Errorf(\"Unable to marshal JSON for %v\", v)\n\t}\n\n\tw := new(bytes.Buffer)\n\terr = json.Compact(w, []byte(want))\n\tif err != nil {\n\t\tt.Errorf(\"String is not valid json: %s\", want)\n\t}\n\n\tif w.String() != string(j) {\n\t\tt.Errorf(\"json.Marshal(%q) returned %s, want %s\", v, j, w)\n\t}\n\n\t// now go the other direction and make sure things unmarshal as expected\n\tu := reflect.ValueOf(v).Interface()\n\tif err := json.Unmarshal([]byte(want), u); err != nil {\n\t\tt.Errorf(\"Unable to unmarshal JSON for %v: %v\", want, err)\n\t}\n\n\tif !reflect.DeepEqual(v, u) {\n\t\tt.Errorf(\"json.Unmarshal(%q) returned %s, want %s\", want, u, v)\n\t}\n}",
"func (o *Object) MarshalJSON() ([]byte, error) {\n\tctx := _builtinJSON_stringifyContext{\n\t\tr: o.runtime,\n\t}\n\tex := o.runtime.vm.try(func() {\n\t\tif !ctx.do(o) {\n\t\t\tctx.buf.WriteString(\"null\")\n\t\t}\n\t})\n\tif ex != nil {\n\t\treturn nil, ex\n\t}\n\treturn ctx.buf.Bytes(), nil\n}",
"func testJSONMarshal(t *testing.T, v interface{}, want string) {\n\tj, err := json.Marshal(v)\n\tif err != nil {\n\t\tt.Errorf(\"Unable to marshal JSON for %v\", v)\n\t}\n\n\tw := new(bytes.Buffer)\n\terr = json.Compact(w, []byte(want))\n\tif err != nil {\n\t\tt.Errorf(\"String is not valid json: %s\", want)\n\t}\n\n\tif w.String() != string(j) {\n\t\tt.Errorf(\"json.Marshal(%q) returned %s, want %s\", v, j, w)\n\t}\n\n\t// now go the other direction and make sure things unmarshal as expected\n\tu := reflect.ValueOf(v).Interface()\n\tif err := json.Unmarshal([]byte(want), u); err != nil {\n\t\tt.Errorf(\"Unable to unmarshal JSON for %v\", want)\n\t}\n\n\tif !reflect.DeepEqual(v, u) {\n\t\tt.Errorf(\"json.Unmarshal(%q) returned %s, want %s\", want, u, v)\n\t}\n}",
"func (v BlitzedItemResponse) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson6a975c40EncodeJsonBenchmark4(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (v item) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjsonD2b7633eEncodeGithubComZhekabyGoGeneratorMongoRequestwrapperTests(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (i Interface) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]any)\n\tpopulate(objectMap, \"etag\", i.Etag)\n\tpopulate(objectMap, \"extendedLocation\", i.ExtendedLocation)\n\tpopulate(objectMap, \"id\", i.ID)\n\tpopulate(objectMap, \"location\", i.Location)\n\tpopulate(objectMap, \"name\", i.Name)\n\tpopulate(objectMap, \"properties\", i.Properties)\n\tpopulate(objectMap, \"tags\", i.Tags)\n\tpopulate(objectMap, \"type\", i.Type)\n\treturn json.Marshal(objectMap)\n}",
"func marshalJSON(namingStrategy string, that interface{}) ([]byte, error) {\n\tout := map[string]interface{}{}\n\tt := reflect.TypeOf(that)\n\tv := reflect.ValueOf(that)\n\n\tfnctn := v.MethodByName(namingStrategy)\n\tfname := func(params ...interface{}) string {\n\t\tin := make([]reflect.Value, len(params))\n\t\tfor k, param := range params {\n\t\t\tin[k] = reflect.ValueOf(param)\n\t\t}\n\t\treturn fnctn.Call(in)[0].String()\n\t}\n\toutName := \"\"\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tf := t.Field(i)\n\t\tswitch n := f.Tag.Get(\"json\"); n {\n\t\tcase \"\":\n\t\t\toutName = f.Name\n\t\tcase \"-\":\n\t\t\toutName = \"\"\n\t\tdefault:\n\t\t\toutName = fname(n)\n\t\t}\n\t\tif outName != \"\" {\n\t\t\tout[outName] = v.Field(i).Interface()\n\t\t}\n\t}\n\treturn json.Marshal(out)\n}",
"func testMarshalJSON(t *testing.T, cmd interface{}) {\n\tjsonCmd, err := json.Marshal(cmd)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tfmt.Println(string(jsonCmd))\n}",
"func (v PbTestObject) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson5fcf962eEncodeGithubComJsonIteratorGoBenchmarkWith10IntFields(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (pbi PropertyBatchInfo) MarshalJSON() ([]byte, error) {\n\tpbi.Kind = KindPropertyBatchInfo\n\tobjectMap := make(map[string]interface{})\n\tif pbi.Kind != \"\" {\n\t\tobjectMap[\"Kind\"] = pbi.Kind\n\t}\n\treturn json.Marshal(objectMap)\n}",
"func My_json(demo interface{}) *bytes.Buffer {\r\n\tif bs, err := json.Marshal(demo); err == nil {\r\n\t\treq := bytes.NewBuffer([]byte(bs))\r\n\t\treturn req\r\n\t} else {\r\n\t\tpanic(err)\r\n\t}\r\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\treturn MarshalEx(v, false)\n}",
"func MarshalJSON(a interface{}) (b []byte, err error) {\n\tif m, ok := a.(proto.Message); ok {\n\t\tmarshaller := &jsonpb.Marshaler{}\n\t\tvar buf bytes.Buffer\n\t\terr = marshaller.Marshal(&buf, m)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tb = buf.Bytes()\n\t} else {\n\t\tb, err = json.Marshal(a)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}",
"func (v PostParams) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjsonC80ae7adEncodeGithubComDeiklovTechDbRomanovAndrGolangModels10(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (v Post) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson5a72dc82EncodeGithubComTimRazumovTechnoparkDBAppModels6(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (o *ExportDataPartial) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(o.ToMap())\n}",
"func (n *Node) Marshal() ([]byte, error) {\n\treturn json.Marshal(n)\n}",
"func (n *Node) Marshal() ([]byte, error) {\n\treturn json.Marshal(n)\n}",
"func jsonEnc(in T) ([]byte, error) {\n\treturn jsonx.Marshal(in)\n}",
"func (s *HTTPServer) marshalJSON(req *http.Request, obj interface{}) ([]byte, error) {\n\tif _, ok := req.URL.Query()[\"pretty\"]; ok || s.agent.config.DevMode {\n\t\tbuf, err := json.MarshalIndent(obj, \"\", \" \")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tbuf = append(buf, \"\\n\"...)\n\t\treturn buf, nil\n\t}\n\n\tbuf, err := json.Marshal(obj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf, err\n}",
"func (m Message) Marshal() ([]byte, error) {\n\treturn jsoniter.Marshal(m)\n}",
"func (m Json) MarshalJSON() ([]byte, error) {\n\tif m == nil {\n\t\treturn []byte(\"null\"), nil\n\t}\n\treturn m, nil\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\treturn NewFormatter().Marshal(v)\n}",
"func (s *ServiceSecrets) MarshalJson() ([]byte, error) {\n\treturn json.Marshal(s)\n}",
"func JsonEncode(data []byte, v interface{}) error {\n\n\treturn json.Unmarshal(data, v)\n}",
"func (out *GetOutput) Marshal() ([]byte, error) {\n\treturn json.Marshal(out)\n}"
] | [
"0.75134",
"0.7502133",
"0.7500753",
"0.74823195",
"0.7446766",
"0.7371689",
"0.73370403",
"0.7304601",
"0.72591853",
"0.72539127",
"0.72181046",
"0.717537",
"0.7162588",
"0.7161582",
"0.71608186",
"0.7072197",
"0.70587647",
"0.7044735",
"0.7022404",
"0.6973228",
"0.6963657",
"0.69578344",
"0.69243026",
"0.6924262",
"0.68824863",
"0.68681127",
"0.68572986",
"0.6818534",
"0.68102",
"0.67969906",
"0.67913324",
"0.67774016",
"0.67717487",
"0.67700523",
"0.6754375",
"0.67300195",
"0.67154574",
"0.6711641",
"0.6708163",
"0.6686554",
"0.6676971",
"0.6670713",
"0.6667217",
"0.6665734",
"0.6651805",
"0.664897",
"0.6601639",
"0.65936595",
"0.6570477",
"0.65671986",
"0.65637034",
"0.6562716",
"0.65555567",
"0.6544248",
"0.65373516",
"0.6532906",
"0.65273225",
"0.65230805",
"0.6517934",
"0.65155387",
"0.6507946",
"0.65065837",
"0.65061134",
"0.65058106",
"0.6502681",
"0.6501059",
"0.6492431",
"0.64840174",
"0.6483743",
"0.64832276",
"0.64799786",
"0.6479202",
"0.6476907",
"0.64739543",
"0.6469417",
"0.6467924",
"0.6463269",
"0.64624554",
"0.6460338",
"0.6457592",
"0.6454149",
"0.6448956",
"0.6447831",
"0.64472353",
"0.6436354",
"0.64353037",
"0.64196956",
"0.64188385",
"0.64096874",
"0.64083934",
"0.6407883",
"0.6405311",
"0.6405311",
"0.640528",
"0.6403627",
"0.6403197",
"0.6402974",
"0.64011514",
"0.6401083",
"0.6395421",
"0.6394115"
] | 0.0 | -1 |
MarshalEasyJSON supports easyjson.Marshaler interface | func (v ItemCheckResponse) MarshalEasyJSON(w *jwriter.Writer) {
easyjson6a975c40EncodeJsonBenchmark2(w, v)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (v item) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeGithubComZhekabyGoGeneratorMongoRequestwrapperTests(w, v)\n}",
"func (v Fruit) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels11(w, v)\n}",
"func (v BlitzedItemResponse) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6a975c40EncodeJsonBenchmark4(w, v)\n}",
"func (ce *CustomEvent) MarshalEasyJSON(w *jwriter.Writer) {\n\tce.marshalerCtor().MarshalEasyJSON(w)\n}",
"func (v Boo) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeMsgpJson(w, v)\n}",
"func (v DocumentResponse) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6a975c40EncodeJsonBenchmark3(w, v)\n}",
"func (v Post) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson783c1624EncodeGithubComGobwasVk7(w, v)\n}",
"func (v invocationMessage) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson2802b09fEncodeGithubComPhilippseithSignalr1(w, v)\n}",
"func (v Native) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson10(w, v)\n}",
"func (v User) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson393a2a40EncodeCodegen(w, v)\n}",
"func (v User) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels(w, v)\n}",
"func (v ExportItem) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB83d7b77EncodeGoplaygroundMyjson1(w, v)\n}",
"func (v Format) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson14(w, v)\n}",
"func (v managerListener) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonEd74d837EncodeGithubComKihamoBoggartComponentsBoggartInternalHandlers(w, v)\n}",
"func (v Part) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer12(w, v)\n}",
"func (v publicKey) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson94b2531bEncodeGitRonaksoftComRiverWebWasmConnection(w, v)\n}",
"func (v VisitArray) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonE564fc13EncodeGithubComLa0rgHighloadcupModel(w, v)\n}",
"func (v Banner) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson23(w, v)\n}",
"func (v MOTD) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer19(w, v)\n}",
"func (v ProductToAdd) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels1(w, v)\n}",
"func (v App) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson25(w, v)\n}",
"func (v Ingredient) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels8(w, v)\n}",
"func (v Visit) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonE564fc13EncodeGithubComLa0rgHighloadcupModel1(w, v)\n}",
"func (v InfoUser) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6601e8cdEncodeJsongen3(w, v)\n}",
"func (v Deal) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson8a221a72EncodeGithubComVungleVungoOpenrtb(w, v)\n}",
"func (v FormDataMQ) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB83d7b77EncodeGoplaygroundMyjson(w, v)\n}",
"func (v Msg) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels6(w, v)\n}",
"func (v Nick) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer14(w, v)\n}",
"func (v Program) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson1c6ddb42EncodeGithubComSturfeeincGlTF(w, v)\n}",
"func (v OrderCheckResponse) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6a975c40EncodeJsonBenchmark1(w, v)\n}",
"func (v BasicUser) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6601e8cdEncodeJsongen4(w, v)\n}",
"func (v Pet) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson14a1085EncodeGithubComIamStubborNPetstoreDbModels1(w, v)\n}",
"func (v ChannelForward) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer32(w, v)\n}",
"func (v Element) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB83d7b77EncodeGoplaygroundMyjson2(w, v)\n}",
"func (v Pmp) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson9(w, v)\n}",
"func (v Responce) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6a975c40EncodeGithubComSerhio83DruidPkgStructs(w, v)\n}",
"func (v Post) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson5a72dc82EncodeGithubComTimRazumovTechnoparkDBAppModels6(w, v)\n}",
"func (c Context) MarshalEasyJSON(writer *ej_jwriter.Writer) {\n\tif err := c.Err(); err != nil {\n\t\twriter.Error = err\n\t\treturn\n\t}\n\twrappedWriter := jwriter.NewWriterFromEasyJSONWriter(writer)\n\tContextSerialization.MarshalToJSONWriter(&wrappedWriter, &c)\n}",
"func (v Features) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer25(w, v)\n}",
"func (v Node) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6601e8cdEncodeGithubComSkydiveProjectSkydiveGraffitiApiTypes1(w, v)\n}",
"func (v Posts) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson783c1624EncodeGithubComGobwasVk(w, v)\n}",
"func (v ThirdParty) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson2(w, v)\n}",
"func (v Student) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonF1627ba7EncodeGithubComDuchiporexiaGoutilsXmsgTests1(w, v)\n}",
"func (v UsersHandler) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson84c0690eEncodeMainHandlers(w, v)\n}",
"func (v Segment) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson6(w, v)\n}",
"func (v Info) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonC80ae7adEncodeGithubComDeiklovTechDbRomanovAndrGolangModels13(w, v)\n}",
"func (v CBPerson) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonE242b40eEncodeGithubComExampleSample2(w, v)\n}",
"func (v Invite) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer22(w, v)\n}",
"func (v GetUserResponse) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson84c0690eEncodeMainHandlers1(w, v)\n}",
"func (v User) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson7da3ae25EncodeCourseraGolangHomeworks(w, v)\n}",
"func (v PostSource) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson783c1624EncodeGithubComGobwasVk2(w, v)\n}",
"func (v ShadowModelSt) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB7ed31d3EncodeMevericcoreMccommon5(w, v)\n}",
"func (v BindParams) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonC5a4559bEncodeGithubComChromedpCdprotoTethering2(w, v)\n}",
"func (v Error) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer26(w, v)\n}",
"func (v Deal) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson16(w, v)\n}",
"func (v Message) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer18(w, v)\n}",
"func (v PostParams) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonC80ae7adEncodeGithubComDeiklovTechDbRomanovAndrGolangModels10(w, v)\n}",
"func (v Musician) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson62dc445bEncode20211NoskoolTeamInternalAppMusiciansModels2(w, v)\n}",
"func (v Impression) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson12(w, v)\n}",
"func (v Post) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonC80ae7adEncodeGithubComDeiklovTechDbRomanovAndrGolangModels11(w, v)\n}",
"func (v BaseInstrumentInfo) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson25363b2dEncodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi128(w, v)\n}",
"func (v Grade) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonF1627ba7EncodeGithubComDuchiporexiaGoutilsXmsgTests2(w, v)\n}",
"func (v Mode) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer16(w, v)\n}",
"func (v Whois) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer1(w, v)\n}",
"func (v Foo) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonAbe23ddeEncodeGithubComUberZanzibarExamplesExampleGatewayBuildGenCodeClientsCorgeCorge1(w, v)\n}",
"func (v Source) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson3(w, v)\n}",
"func (v ProductExtended) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels5(w, v)\n}",
"func (v Away) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer33(w, v)\n}",
"func (c EventOutputContext) MarshalEasyJSON(writer *ej_jwriter.Writer) {\n\tif err := c.Err(); err != nil {\n\t\twriter.Error = err\n\t\treturn\n\t}\n\twrappedWriter := jwriter.NewWriterFromEasyJSONWriter(writer)\n\tContextSerialization.MarshalToJSONWriterEventOutput(&wrappedWriter, &c)\n}",
"func (v IngredientArr) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels7(w, v)\n}",
"func (v RiverConnection) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson94b2531bEncodeGitRonaksoftComRiverWebWasmConnection4(w, v)\n}",
"func (v streamItemMessage) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson2802b09fEncodeGithubComPhilippseithSignalr(w, v)\n}",
"func (v Vacancy) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeGithubComIskiyRabotauaTelegramBotPkgRabotaua4(w, v)\n}",
"func (v Item) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeDrhyuComIndexerModels2(w, v)\n}",
"func (v PostAttachement) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson783c1624EncodeGithubComGobwasVk6(w, v)\n}",
"func (v ExtFilter) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson795c59c6EncodeGrapeGuardRules11(w, v)\n}",
"func (v User) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson1(w, v)\n}",
"func (v ProductShrinked) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels3(w, v)\n}",
"func (v RiverConnectionJS) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson94b2531bEncodeGitRonaksoftComRiverWebWasmConnection3(w, v)\n}",
"func (v DCCSend) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer27(w, v)\n}",
"func (v MediumPayload) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonE242b40eEncodeGithubComExampleSample1(w, v)\n}",
"func (v Stash) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeDrhyuComIndexerModels(w, v)\n}",
"func (v WSRequest) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer3(w, v)\n}",
"func (v ApiMessage) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson25363b2dEncodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi132(w, v)\n}",
"func (v flattenedField) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson390b7126EncodeGithubComChancedPicker35(w, v)\n}",
"func (v SFMetric) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson51bca34dEncodeGithubComSkydiveProjectSkydiveSflow2(w, v)\n}",
"func (v managerHandlerDevice) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonEd74d837EncodeGithubComKihamoBoggartComponentsBoggartInternalHandlers1(w, v)\n}",
"func (v BaseOp) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson25363b2dEncodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi125(w, v)\n}",
"func (v Data) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson17(w, v)\n}",
"func (v ResultReq) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson25363b2dEncodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi54(w, v)\n}",
"func (v CreateUserResponse) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson84c0690eEncodeMainHandlers2(w, v)\n}",
"func (v CreateIsolatedWorldReturns) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonC5a4559bEncodeGithubComChromedpCdprotoPage90(w, v)\n}",
"func (v ServerKeys) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson94b2531bEncodeGitRonaksoftComRiverWebWasmConnection2(w, v)\n}",
"func (v ShadowUpdateMsgSt) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB7ed31d3EncodeMevericcoreMccommon1(w, v)\n}",
"func (v WSResponse) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer2(w, v)\n}",
"func (v Content) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson18(w, v)\n}",
"func (v Join) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer21(w, v)\n}",
"func (v Bid) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson22(w, v)\n}",
"func (v moreLikeThisQuery) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson390b7126EncodeGithubComChancedPicker18(w, v)\n}",
"func (v Device) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson15(w, v)\n}"
] | [
"0.77204984",
"0.7672636",
"0.7653424",
"0.7591104",
"0.7549626",
"0.7543635",
"0.7506079",
"0.74917865",
"0.74814856",
"0.74776804",
"0.7472024",
"0.74326074",
"0.74075466",
"0.74005824",
"0.7393129",
"0.7387327",
"0.7384699",
"0.7380124",
"0.7379066",
"0.73720104",
"0.73705596",
"0.73606724",
"0.7350565",
"0.73454297",
"0.73452127",
"0.73444057",
"0.73321444",
"0.7307502",
"0.73051214",
"0.73029035",
"0.729492",
"0.72922665",
"0.7291515",
"0.72859746",
"0.72832805",
"0.7281536",
"0.72810227",
"0.72690594",
"0.7265171",
"0.7263047",
"0.72606456",
"0.7260384",
"0.725305",
"0.72479904",
"0.724739",
"0.7246577",
"0.72447056",
"0.7243153",
"0.72394603",
"0.72375077",
"0.7237401",
"0.7235131",
"0.7232335",
"0.7225127",
"0.72224236",
"0.722217",
"0.722101",
"0.7216019",
"0.7215666",
"0.72131526",
"0.7212736",
"0.71996164",
"0.7197805",
"0.71939874",
"0.71824425",
"0.7180386",
"0.71780163",
"0.7173894",
"0.71658254",
"0.7163895",
"0.7163262",
"0.7161551",
"0.7154433",
"0.7154243",
"0.7153383",
"0.71522945",
"0.71498144",
"0.71420634",
"0.71419924",
"0.7141065",
"0.7132712",
"0.71298224",
"0.71237564",
"0.71226513",
"0.71173185",
"0.7116985",
"0.7109691",
"0.71048075",
"0.7099136",
"0.7098513",
"0.70949537",
"0.7090401",
"0.7087397",
"0.7085066",
"0.70830184",
"0.70823175",
"0.7082235",
"0.7080599",
"0.7080532",
"0.7079698"
] | 0.747845 | 9 |
UnmarshalJSON supports json.Unmarshaler interface | func (v *ItemCheckResponse) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjson6a975c40DecodeJsonBenchmark2(&r, v)
return r.Error()
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (j *jsonNative) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func unmarshalJSON(j extv1.JSON, output *any) error {\n\tif len(j.Raw) == 0 {\n\t\treturn nil\n\t}\n\treturn json.Unmarshal(j.Raw, output)\n}",
"func (j *JSON) Unmarshal(input, target interface{}) error {\n\t// take the input and convert it to target\n\treturn jsonEncoding.Unmarshal(input.([]byte), target)\n}",
"func (j *Publisher) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (v *OneLike) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\tdecodeOneLike(&r, v)\n\treturn r.Error()\n}",
"func UnmarshalJSON(data []byte, v interface{}) {\n\terr := json.Unmarshal(data, v)\n\tAbortIf(err)\n}",
"func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error {\n\td := json.NewDecoder(r)\n\tfor _, opt := range opts {\n\t\td = opt(d)\n\t}\n\tif err := d.Decode(&o); err != nil {\n\t\treturn fmt.Errorf(\"while decoding JSON: %v\", err)\n\t}\n\treturn nil\n}",
"func UnmarshalJSON(b []byte, discriminator string, f Factory) (interface{}, error) {\n\tm := make(map[string]interface{})\n\terr := json.Unmarshal(b, &m)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn Decode(m, discriminator, f)\n}",
"func UnmarshalFromJSON(data []byte, target interface{}) error {\n\tvar ctx map[string]interface{}\n\terr := json.Unmarshal(data, &ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn Unmarshal(ctx, target)\n}",
"func (s *Serializer) Unmarshal(data []byte, v interface{}) error {\n\treturn jsoniter.Unmarshal(data,v)\n}",
"func (j *ThirdParty) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func unmarshal() {\n\tfmt.Println(\"=== json.unmarshal ===\")\n\tvar jsonBlob = []byte(`[\n\t\t{\"name\": \"Bill\", \"age\": 109},\n\t\t{\"name\": \"Bob\", \"age\": 5}\n\t]`)\n\n\tvar persons []Person\n\terr := json.Unmarshal(jsonBlob, &persons)\n\tcheck(err)\n\n\tfmt.Printf(\"%+v\\n\", persons)\n}",
"func (j *Data) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *JsonMarshaler) Unmarshal(data []byte, v interface{}) error {\n\treturn json.Unmarshal(data, v)\n}",
"func (j *Type) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (this *Simple) UnmarshalJSON(b []byte) error {\n\treturn TypesUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}",
"func (j *Response) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (pl PLUtil) Unmarshal(data []byte, v interface{}) error {\n\tcmd := pl.execCommand(\n\t\t\"plutil\",\n\t\t\"-convert\", \"json\",\n\t\t// Read from stdin.\n\t\t\"-\",\n\t\t// Output to stdout.\n\t\t\"-o\", \"-\")\n\tcmd.Stdin = bytes.NewReader(data)\n\tstdout, err := cmd.Output()\n\tif exitErr, ok := err.(*exec.ExitError); ok {\n\t\treturn fmt.Errorf(\"`%s` failed (%w) with stderr: %s\", cmd, err, exitErr.Stderr)\n\t}\n\tif err != nil {\n\t\treturn fmt.Errorf(\"`%s` failed (%w)\", cmd, err)\n\t}\n\tif err := json.Unmarshal(stdout, v); err != nil {\n\t\treturn fmt.Errorf(\"failed to parse json: %w\", err)\n\t}\n\treturn nil\n}",
"func (j *LuaFunction) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (v *BlitzedItemResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson6a975c40DecodeJsonBenchmark4(&r, v)\n\treturn r.Error()\n}",
"func Unmarshal(data []byte, v Unmarshaler) error {\n\tl := jlexer.Lexer{Data: data}\n\tv.UnmarshalTinyJSON(&l)\n\treturn l.Error()\n}",
"func (j *Producer) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *User) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *Json) UnmarshalJSON(b []byte) error {\n\tr, err := loadContentWithOptions(b, Options{\n\t\tType: ContentTypeJson,\n\t\tStrNumber: true,\n\t})\n\tif r != nil {\n\t\t// Value copy.\n\t\t*j = *r\n\t}\n\treturn err\n}",
"func (j *Error) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (v *Element) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson2(&r, v)\n\treturn r.Error()\n}",
"func (j *Message) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func jsonDecode(reader io.ReadCloser, v interface{}) error {\n\tdecoder := json.NewDecoder(reader)\n\terr := decoder.Decode(v)\n\treturn err\n}",
"func (j *FactoryPluginRespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (m *gohaiMarshaler) UnmarshalJSON(bytes []byte) error {\n\tfirstUnmarshall := \"\"\n\terr := json.Unmarshal(bytes, &firstUnmarshall)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal([]byte(firstUnmarshall), &(m.gohai))\n\treturn err\n}",
"func (j *Packet) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (receiver *Type) UnmarshalJSON(src []byte) error {\n\tif nil == receiver {\n\t\treturn errNilReceiver\n\t}\n\n\tvar s string\n\tif err := json.Unmarshal(src, &s); nil != err {\n\t\treturn err\n\t}\n\n\tif err := receiver.Scan(s); nil != err {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (j *qProxyClient) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func Unmarshal(data []byte, v Unmarshaler) error {\n\tl := jlexer.Lexer{Data: data}\n\tv.UnmarshalEasyJSON(&l)\n\treturn l.Error()\n}",
"func (j *TextMarshaler) Unmarshal(data []byte, v interface{}) error {\n\treturn json.Unmarshal(data, v)\n}",
"func (w *Entry) UnmarshalJSON(bb []byte) error {\n\t<<!!YOUR_CODE!!>>\n}",
"func UnmarshalJSON(body io.Reader, v interface{}) error {\n\tdecoder := json.NewDecoder(body)\n\treturn decoder.Decode(v)\n}",
"func Unmarshal(data []byte) (interface{}, error) {\n\tvar value marble\n\terr := json.Unmarshal(data, &value)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &value, nil\n}",
"func (j *RunPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (i *Transform) UnmarshalJSON(data []byte) error {\n\tvar s string\n\tif err := json.Unmarshal(data, &s); err != nil {\n\t\treturn fmt.Errorf(\"Transform should be a string, got %[1]s\", data)\n\t}\n\n\tvar err error\n\t*i, err = ParseTransformString(s)\n\treturn err\n}",
"func (this *Service) UnmarshalJSON(b []byte) error {\n\treturn CommonUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}",
"func (v *OneUpdateLike) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\tdecodeOneUpdateLike(&r, v)\n\treturn r.Error()\n}",
"func (j *RunRespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (v *Raw) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer10(&r, v)\n\treturn r.Error()\n}",
"func (j *Server) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *JSONText) Unmarshal(v interface{}) error {\n\treturn json.Unmarshal([]byte(*j), v)\n}",
"func (j *JsonlMarshaler) Unmarshal(data []byte, v interface{}) error {\n\treturn json.Unmarshal(data, v)\n}",
"func (j *PublishMessagesResponse) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *Event) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *FactoryPluginPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j JSON) Unmarshal(dest interface{}) error {\n\tif dest == nil {\n\t\treturn errors.New(\"destination is nil, not a valid pointer to an object\")\n\t}\n\n\t// Call our implementation of\n\t// JSON MarshalJSON through json.Marshal\n\t// to get the value of the JSON object\n\tres, err := json.Marshal(j)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn json.Unmarshal(res, dest)\n}",
"func (v *User) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson9e1087fdDecodeHw3Bench(&r, v)\n\treturn r.Error()\n}",
"func Unmarshal(data []byte, v interface{}) error {\n\terr := json.Unmarshal(data, v)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif ImplementsPostJSONUnmarshaler(v) {\n\t\terr := v.(PostJSONUnmarshaler).PostUnmarshalJSON()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func FromJSON(i interface{}, r io.Reader) error {\n\td := json.NewDecoder(r)\n\treturn d.Decode(i)\n}",
"func (v *FormulaAndFunctionResponseFormat) UnmarshalJSON(src []byte) error {\n\tvar value string\n\terr := json.Unmarshal(src, &value)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*v = FormulaAndFunctionResponseFormat(value)\n\treturn nil\n}",
"func (j *GetMessagesResponse) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *RespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {\n\tf.d.jsonUnmarshalV(tm)\n}",
"func (j *LuaTable) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *Regulations) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (v *User) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson2bc03518DecodeLangTaskOnBench(&r, v)\n\treturn r.Error()\n}",
"func Unmarshal(b []byte, v interface{}) error {\n\treturn json.Unmarshal(b, v)\n}",
"func (v *UnloadCheckResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson6a975c40DecodeJsonBenchmark(&r, v)\n\treturn r.Error()\n}",
"func (this *DeploymentStrategy) UnmarshalJSON(b []byte) error {\n\treturn CommonUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}",
"func (j *LuaInt) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (v *Visit) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonE564fc13DecodeGithubComLa0rgHighloadcupModel1(&r, v)\n\treturn r.Error()\n}",
"func (u *Unstructured) UnmarshalJSON(b []byte) error {\n\t_, _, err := UnstructuredJSONScheme.Decode(b, nil, u)\n\treturn err\n}",
"func UnmarshalJSON(b []byte) (dgo.Value, error) {\n\tdec := json.NewDecoder(bytes.NewReader(b))\n\tdec.UseNumber()\n\treturn jsonDecodeValue(dec)\n}",
"func unmarshal(data []byte, v interface{}) {\n\terr := json.Unmarshal(data, v)\n\tassert(err == nil, \"unmarshal error: %s\", err)\n}",
"func (j *Balance) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (t *Type) UnmarshalJSON(b []byte) error {\n\tvar text string\n\tif err := json.Unmarshal(b, &text); err != nil {\n\t\treturn err\n\t}\n\n\treturn t.UnmarshalText([]byte(text))\n}",
"func JSONDecode(ctx context.Context, data []byte, obj interface{}) error {\n\treturn json.Unmarshal(data, obj)\n}",
"func (j *RegisterRespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *ListPluginRespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (this *Probe) UnmarshalJSON(b []byte) error {\n\treturn CommonUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}",
"func (j *BootInitiationRespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (v *VisitArray) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonE564fc13DecodeGithubComLa0rgHighloadcupModel(&r, v)\n\treturn r.Error()\n}",
"func (v *ExportItem) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson1(&r, v)\n\treturn r.Error()\n}",
"func (this *ImportedReference) UnmarshalJSON(b []byte) error {\n\treturn TypesUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}",
"func (j *LuaString) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (jz JSONGzipEncoding) Unmarshal(data []byte, value interface{}) error {\n\tjsonData, err := GzipDecode(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal(jsonData, value)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (i *Interface) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", i, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"etag\":\n\t\t\terr = unpopulate(val, \"Etag\", &i.Etag)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"extendedLocation\":\n\t\t\terr = unpopulate(val, \"ExtendedLocation\", &i.ExtendedLocation)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &i.ID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"location\":\n\t\t\terr = unpopulate(val, \"Location\", &i.Location)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &i.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &i.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"tags\":\n\t\t\terr = unpopulate(val, \"Tags\", &i.Tags)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &i.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", i, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func Unmarshal(data []byte, typ DataFormat, target interface{}) {\n\tswitch typ {\n\tcase GOB:\n\t\tbuf := bytes.NewReader(data)\n\t\tgob.NewDecoder(buf).Decode(target)\n\n\tdefault:\n\t\tif err := json.Unmarshal(data, target); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}",
"func (j *Json) UnmarshalJSON(data []byte) error {\n\terr := json.Unmarshal(data, &j.data)\n\n\tj.exists = (err == nil)\n\treturn err\n}",
"func (v *PbTestObject) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson5fcf962eDecodeGithubComJsonIteratorGoBenchmarkWith10IntFields(&r, v)\n\treturn r.Error()\n}",
"func (j *ModifyQueueResponse) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *UnInstallRespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func unmarshalJSON(i *big.Int, bz []byte) error {\n\tvar text string\n\terr := json.Unmarshal(bz, &text)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn i.UnmarshalText([]byte(text))\n}",
"func (j *LuaBool) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *WorkerCreateOperation) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func jsonDec(t reflect.Type, in []byte) (T, error) {\n\tval := reflect.New(t)\n\tif err := jsonx.Unmarshal(val.Interface(), in); err != nil {\n\t\treturn nil, err\n\t}\n\treturn val.Elem().Interface(), nil\n}",
"func (z *Int) UnmarshalJSON(text []byte) error {}",
"func (j *PeerInfo) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (obj *miner) UnmarshalJSON(data []byte) error {\n\tins := new(JSONMiner)\n\terr := json.Unmarshal(data, ins)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpr, err := createMinerFromJSON(ins)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinsMiner := pr.(*miner)\n\tobj.toTransact = insMiner.toTransact\n\tobj.queue = insMiner.queue\n\tobj.broadcasted = insMiner.broadcasted\n\tobj.toLink = insMiner.toLink\n\treturn nil\n}",
"func (j *UnInstallPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func deJSONify(i interface{}) (interface{}, error) {\n\tvar data []byte\n\tswitch t := i.(type) {\n\tcase string:\n\t\tdata = []byte(t)\n\tcase []byte:\n\t\tdata = t\n\tcase json.RawMessage:\n\t\tdata = []byte(t)\n\tdefault:\n\t\treturn i, nil\n\t}\n\tvar x interface{}\n\tif err := json.Unmarshal(data, &x); err != nil {\n\t\treturn nil, &kivik.Error{HTTPStatus: http.StatusBadRequest, Err: err}\n\t}\n\treturn x, nil\n}",
"func (v *DocumentResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson6a975c40DecodeJsonBenchmark3(&r, v)\n\treturn r.Error()\n}",
"func (c *JSONCodec) Unmarshal(data []byte, v interface{}) error {\n\treturn json.Unmarshal(data, v)\n}",
"func NewJSONUnmarshaler(resolver Resolver) Unmarshaler {\n\treturn newJSONUnmarshaler(resolver)\n}",
"func NewJSONUnmarshaler(resolver Resolver) Unmarshaler {\n\treturn newJSONUnmarshaler(resolver)\n}"
] | [
"0.70113605",
"0.698139",
"0.6947301",
"0.6867781",
"0.68005323",
"0.67680764",
"0.6741481",
"0.67051035",
"0.6688701",
"0.66797084",
"0.6676911",
"0.6669605",
"0.6661001",
"0.66579056",
"0.6652777",
"0.66498846",
"0.6632663",
"0.663189",
"0.6627629",
"0.66243863",
"0.6612909",
"0.6587119",
"0.65519077",
"0.6545157",
"0.6537283",
"0.6533197",
"0.6532074",
"0.6526187",
"0.6518123",
"0.6512875",
"0.6505786",
"0.64908326",
"0.64847505",
"0.64830405",
"0.64820194",
"0.6469316",
"0.64528453",
"0.64508975",
"0.6441661",
"0.6441397",
"0.6438974",
"0.6438737",
"0.642948",
"0.6408435",
"0.640738",
"0.6396278",
"0.6394157",
"0.6385808",
"0.63855124",
"0.63844603",
"0.6375449",
"0.63702816",
"0.63625103",
"0.63553596",
"0.63552856",
"0.63477194",
"0.6344893",
"0.6339914",
"0.6331977",
"0.63298523",
"0.6323917",
"0.63238263",
"0.6318712",
"0.631284",
"0.63110864",
"0.6310182",
"0.6305762",
"0.63040566",
"0.62972116",
"0.62931895",
"0.6291462",
"0.62913823",
"0.62810636",
"0.6280757",
"0.6274565",
"0.6273215",
"0.62724316",
"0.62711626",
"0.6271133",
"0.62660044",
"0.6263724",
"0.62587553",
"0.62568384",
"0.6255846",
"0.6252581",
"0.62471205",
"0.6244156",
"0.6241776",
"0.62323636",
"0.62298375",
"0.6226608",
"0.6226587",
"0.62243664",
"0.6220156",
"0.6218001",
"0.6216062",
"0.6215216",
"0.621471",
"0.62088907",
"0.62088907"
] | 0.62590677 | 81 |
UnmarshalEasyJSON supports easyjson.Unmarshaler interface | func (v *ItemCheckResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {
easyjson6a975c40DecodeJsonBenchmark2(l, v)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (v *BlitzedItemResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6a975c40DecodeJsonBenchmark4(l, v)\n}",
"func (v *Fruit) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels11(l, v)\n}",
"func (v *Boo) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeMsgpJson(l, v)\n}",
"func (v *Element) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson2(l, v)\n}",
"func (c *Context) UnmarshalEasyJSON(in *jlexer.Lexer) {\n\tContextSerialization.UnmarshalFromEasyJSONLexer(in, c)\n}",
"func (v *Format) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson14(l, v)\n}",
"func (v *DetectedFruit) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels12(l, v)\n}",
"func (v *item) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeGithubComZhekabyGoGeneratorMongoRequestwrapperTests(l, v)\n}",
"func (v *Native) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson10(l, v)\n}",
"func (v *FormDataMQ) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson(l, v)\n}",
"func (v *DocumentResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6a975c40DecodeJsonBenchmark3(l, v)\n}",
"func (v *Node) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6601e8cdDecodeGithubComSkydiveProjectSkydiveGraffitiApiTypes1(l, v)\n}",
"func (v *flattenedField) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson390b7126DecodeGithubComChancedPicker35(l, v)\n}",
"func (v *ExtFilter) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson795c59c6DecodeGrapeGuardRules11(l, v)\n}",
"func (v *Deal) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson8a221a72DecodeGithubComVungleVungoOpenrtb(l, v)\n}",
"func (v *OrderCheckResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6a975c40DecodeJsonBenchmark1(l, v)\n}",
"func (v *Visit) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonE564fc13DecodeGithubComLa0rgHighloadcupModel1(l, v)\n}",
"func (v *Error) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer26(l, v)\n}",
"func (v *GetUserResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson84c0690eDecodeMainHandlers1(l, v)\n}",
"func (v *User) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson9e1087fdDecodeHw3Bench(l, v)\n}",
"func (v *IngredientArr) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels7(l, v)\n}",
"func (v *VisitArray) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonE564fc13DecodeGithubComLa0rgHighloadcupModel(l, v)\n}",
"func (v *Foo) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonAbe23ddeDecodeGithubComUberZanzibarExamplesExampleGatewayBuildGenCodeClientsCorgeCorge1(l, v)\n}",
"func (v *Ingredient) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels8(l, v)\n}",
"func (v *Musician) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson62dc445bDecode20211NoskoolTeamInternalAppMusiciansModels2(l, v)\n}",
"func (v *ThirdParty) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson2(l, v)\n}",
"func (v *Data) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson17(l, v)\n}",
"func (v *Deal) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson16(l, v)\n}",
"func (v *Raw) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer10(l, v)\n}",
"func (v *EasyResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6ff3ac1dDecodeGithubComWenweihBitcoinRpcGolangProto1(l, v)\n}",
"func (v *AdvFormData) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson3(l, v)\n}",
"func (v *Message) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer18(l, v)\n}",
"func (v *Teacher) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonF1627ba7DecodeGithubComDuchiporexiaGoutilsXmsgTests(l, v)\n}",
"func (v *Invite) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer22(l, v)\n}",
"func (v *CBPerson) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonE242b40eDecodeGithubComExampleSample2(l, v)\n}",
"func (v *BidResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson326edDecodeGithubComMxmCherryOpenrtb(l, v)\n}",
"func (v *BidRequest) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson21(l, v)\n}",
"func (v *Impression) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson12(l, v)\n}",
"func (v *Msg) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels6(l, v)\n}",
"func (v *BidResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson326edDecodeGithubComApplifierGoOpenrtbOpenrtb2(l, v)\n}",
"func (v *Info) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonC80ae7adDecodeGithubComDeiklovTechDbRomanovAndrGolangModels13(l, v)\n}",
"func (v *MediumPayload) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonE242b40eDecodeGithubComExampleSample1(l, v)\n}",
"func (v *Part) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer12(l, v)\n}",
"func (v *ProductExtendedArr) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels4(l, v)\n}",
"func (v *Whois) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer1(l, v)\n}",
"func (v *App) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson25(l, v)\n}",
"func (v *Content) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson18(l, v)\n}",
"func (v *Responce) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6a975c40DecodeGithubComSerhio83DruidPkgStructs(l, v)\n}",
"func (v *TransactionResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonE82c8e88DecodeGithubComKamaiuOandaGoModel5(l, v)\n}",
"func (v *ProductExtended) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels5(l, v)\n}",
"func (v *Error) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonC5a4559bDecodeGithubComChromedpCdproto2(l, v)\n}",
"func (v *BidResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson20(l, v)\n}",
"func (v *HireManager) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonAf94a8adDecodeGithubComGoParkMailRu20192ComandusInternalModel(l, v)\n}",
"func (v *PlantainerShadowMetadataSt) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson5bd79fa1DecodeMevericcoreMcplantainer9(l, v)\n}",
"func (v *User) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels(l, v)\n}",
"func (v *RespStruct) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeDrhyuComIndexerModels1(l, v)\n}",
"func (v *Item) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeDrhyuComIndexerModels2(l, v)\n}",
"func (v *Annotation) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6a975c40DecodeGithubComSerhio83DruidPkgStructs3(l, v)\n}",
"func (v *Fundamental) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson3e8ab7adDecodeGithubComAlpacahqAlpacaTradeApiGoV3Alpaca14(l, v)\n}",
"func (v *BasicUser) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6601e8cdDecodeJsongen4(l, v)\n}",
"func (v *Features) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer25(l, v)\n}",
"func (v *Edge) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6601e8cdDecodeGithubComSkydiveProjectSkydiveGraffitiApiTypes2(l, v)\n}",
"func (v *ShadowModelSt) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB7ed31d3DecodeMevericcoreMccommon5(l, v)\n}",
"func (v *AdvForm) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson4(l, v)\n}",
"func (v *binaryField) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson390b7126DecodeGithubComChancedPicker46(l, v)\n}",
"func (v *ShadowUpdateMsgSt) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB7ed31d3DecodeMevericcoreMccommon1(l, v)\n}",
"func (v *User) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson393a2a40DecodeCodegen(l, v)\n}",
"func (v *InfoUser) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6601e8cdDecodeJsongen3(l, v)\n}",
"func (v *Message) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonC5a4559bDecodeGithubComChromedpCdproto1(l, v)\n}",
"func (v *Pmp) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson9(l, v)\n}",
"func (v *MOTD) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer19(l, v)\n}",
"func (v *Attack) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6601e8cdDecodeGithubComGoParkMailRu2018242GameServerTypes4(l, v)\n}",
"func (v *moreLikeThisQuery) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson390b7126DecodeGithubComChancedPicker18(l, v)\n}",
"func (v *ExportItem) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson1(l, v)\n}",
"func (v *EventLoadEventFired) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonC5a4559bDecodeGithubComChromedpCdprotoPage70(l, v)\n}",
"func (v *managerListener) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonEd74d837DecodeGithubComKihamoBoggartComponentsBoggartInternalHandlers(l, v)\n}",
"func (v *WSResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer2(l, v)\n}",
"func (v *UnloadCheckResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6a975c40DecodeJsonBenchmark(l, v)\n}",
"func (v *PbTestObject) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson5fcf962eDecodeGithubComJsonIteratorGoBenchmarkWith10IntFields(l, v)\n}",
"func (v *Student) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonF1627ba7DecodeGithubComDuchiporexiaGoutilsXmsgTests1(l, v)\n}",
"func (v *Device) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson15(l, v)\n}",
"func (v *Messages) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer17(l, v)\n}",
"func (v *User) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson2bc03518DecodeLangTaskOnBench(l, v)\n}",
"func (v *BaseTickerInfo) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson25363b2dDecodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi117(l, v)\n}",
"func (v *Topic) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer5(l, v)\n}",
"func (v *BaseLedgerInfo) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson25363b2dDecodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi127(l, v)\n}",
"func (v *Banner) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson23(l, v)\n}",
"func (v *APIError) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson3e8ab7adDecodeGithubComAlpacahqAlpacaTradeApiGoV3Alpaca24(l, v)\n}",
"func (v *Bid) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson22(l, v)\n}",
"func (v *Post) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson783c1624DecodeGithubComGobwasVk7(l, v)\n}",
"func (v *BaseTradeInfo) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson25363b2dDecodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi116(l, v)\n}",
"func (v *MusicianFullInformation) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson62dc445bDecode20211NoskoolTeamInternalAppMusiciansModels1(l, v)\n}",
"func (v *matchRule) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson390b7126DecodeGithubComChancedPicker19(l, v)\n}",
"func (v *managerHandlerDevice) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonEd74d837DecodeGithubComKihamoBoggartComponentsBoggartInternalHandlers1(l, v)\n}",
"func (v *ResultReq) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson25363b2dDecodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi54(l, v)\n}",
"func (v *invocationMessage) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson2802b09fDecodeGithubComPhilippseithSignalr1(l, v)\n}",
"func (v *fuzzyRule) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson390b7126DecodeGithubComChancedPicker34(l, v)\n}",
"func (v *PlantainerShadowSt) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson5bd79fa1DecodeMevericcoreMcplantainer8(l, v)\n}",
"func (v *completionMessage) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson2802b09fDecodeGithubComPhilippseithSignalr5(l, v)\n}",
"func (v *Source) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson3(l, v)\n}"
] | [
"0.73436594",
"0.73405373",
"0.72584677",
"0.72040373",
"0.71776104",
"0.71510446",
"0.7143438",
"0.71413064",
"0.7112999",
"0.7103849",
"0.71005577",
"0.7097653",
"0.7085183",
"0.70850646",
"0.7081146",
"0.7077145",
"0.70403785",
"0.70357895",
"0.7030433",
"0.7028725",
"0.7021155",
"0.70114094",
"0.70109946",
"0.70103574",
"0.7002987",
"0.69937176",
"0.6981908",
"0.6981736",
"0.69811034",
"0.6980795",
"0.69711286",
"0.6965327",
"0.695678",
"0.69543517",
"0.6948873",
"0.69404715",
"0.69387776",
"0.6935085",
"0.6930436",
"0.6922759",
"0.6904652",
"0.6894174",
"0.68897486",
"0.6889671",
"0.6888647",
"0.6887437",
"0.6887124",
"0.68862444",
"0.68853265",
"0.68804044",
"0.6874087",
"0.6870016",
"0.6869092",
"0.6868185",
"0.6858964",
"0.6846011",
"0.68405616",
"0.6836571",
"0.6835831",
"0.68291616",
"0.6823791",
"0.6822216",
"0.6817067",
"0.6815519",
"0.68133044",
"0.6812743",
"0.6811037",
"0.68107563",
"0.6809271",
"0.680744",
"0.68065774",
"0.68030846",
"0.68029016",
"0.67965585",
"0.6794714",
"0.678028",
"0.67772484",
"0.6772522",
"0.67714006",
"0.6769638",
"0.67685604",
"0.67657346",
"0.6763771",
"0.67634416",
"0.6762939",
"0.67570746",
"0.6756749",
"0.6754731",
"0.6750861",
"0.6749626",
"0.6745531",
"0.6744763",
"0.6743289",
"0.67418313",
"0.6734197",
"0.6732776",
"0.67303044",
"0.67287326",
"0.67265445",
"0.67261595"
] | 0.71286225 | 8 |
MarshalJSON supports json.Marshaler interface | func (v DocumentResponse) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjson6a975c40EncodeJsonBenchmark3(&w, v)
return w.Buffer.BuildBytes(), w.Error
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func Marshal(v Marshaler) ([]byte, error) {\n\tw := jwriter.Writer{}\n\tv.MarshalEasyJSON(&w)\n\treturn w.BuildBytes()\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\tif ImplementsPreJSONMarshaler(v) {\n\t\terr := v.(PreJSONMarshaler).PreMarshalJSON()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn json.Marshal(v)\n}",
"func (j *JSON) Marshal(target interface{}) (output interface{}, err error) {\n\treturn jsonEncoding.Marshal(target)\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\treturn json.Marshal(v)\n}",
"func Marshal(v Marshaler) ([]byte, error) {\n\tif isNilInterface(v) {\n\t\treturn nullBytes, nil\n\t}\n\n\tw := jwriter.Writer{}\n\tv.MarshalTinyJSON(&w)\n\treturn w.BuildBytes()\n}",
"func JsonMarshal(t interface{}) ([]byte, error) {\n\tbuffer := &bytes.Buffer{}\n\tenc := json.NewEncoder(buffer)\n\tenc.SetEscapeHTML(false)\n\terr := enc.Encode(t)\n\treturn buffer.Bytes(), err\n}",
"func jsonMarshal(t interface{}) ([]byte, error) {\n\tbuffer := &bytes.Buffer{}\n\tencoder := json.NewEncoder(buffer)\n\tencoder.SetEscapeHTML(false)\n\tencoder.SetIndent(\"\", \" \")\n\terr := encoder.Encode(t)\n\treturn buffer.Bytes(), err\n}",
"func marshal() {\n\tfmt.Println(\"=== json.marshal ===\")\n\tryan := &Person{\"Ryan\", 25}\n\twire, err := json.Marshal(ryan)\n\tcheck(err)\n\tfmt.Println(string(wire))\n}",
"func jsonMarshal(t interface{}) ([]byte, error) {\n\tvar buffer bytes.Buffer\n\tencoder := json.NewEncoder(&buffer)\n\tencoder.SetEscapeHTML(false)\n\tif err := encoder.Encode(t); err != nil {\n\t\treturn nil, err\n\t}\n\t// Prettify\n\tvar out bytes.Buffer\n\tif err := json.Indent(&out, buffer.Bytes(), \"\", \"\\t\"); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn out.Bytes(), nil\n}",
"func JSONEncoder() Encoder { return jsonEncoder }",
"func JSONMarshal(data interface{}) ([]byte, error) {\n\tvar b []byte\n\tvar err error\n\n\tb, err = json.MarshalIndent(data, \"\", \" \")\n\n\treturn b, err\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn canonicaljson.Transform(b)\n}",
"func (c *JsonCodec) Marshal(object interface{}, options map[string]interface{}) ([]byte, error) {\n\treturn jsonEncoding.Marshal(object)\n}",
"func (c *JSONCodec) Marshal(v interface{}) ([]byte, error) {\n\treturn json.Marshal(v)\n}",
"func Marshal(in interface{}) ([]byte, error) {\n\tres, err := jsoniter.ConfigCompatibleWithStandardLibrary.Marshal(in)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"marshaling error: %w\", err)\n\t}\n\treturn res, nil\n}",
"func Marshal(p Payload) ([]byte, error) {\n\treturn json.Marshal(p)\n}",
"func (j *JsonlMarshaler) Marshal(v interface{}) ([]byte, error) {\n\treturn json.Marshal(v)\n}",
"func (s *Serializer) Marshal(v interface{}) ([]byte, error) {\n\treturn jsoniter.Marshal(v)\n}",
"func (j *TextMarshaler) Marshal(v interface{}) ([]byte, error) {\n\treturn json.Marshal(v)\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\n\treturn replaceUnicodeConversion(b), err\n}",
"func Marshal(object interface{}) (data string, err error) {\n\tif t, err := json.Marshal(object); err != nil {\n\t\tdata = \"\"\n\t} else {\n\t\tdata = string(t)\n\t}\n\treturn\n}",
"func (j *JsonMarshaler) Marshal(v interface{}) ([]byte, error) {\n\tswitch v.(type) {\n\tcase *distribute.GetResponse:\n\t\tvalue, err := protobuf.MarshalAny(v.(*distribute.GetResponse).Fields)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn json.Marshal(\n\t\t\tmap[string]interface{}{\n\t\t\t\t\"fields\": value,\n\t\t\t},\n\t\t)\n\tcase *distribute.SearchResponse:\n\t\tvalue, err := protobuf.MarshalAny(v.(*distribute.SearchResponse).SearchResult)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn json.Marshal(\n\t\t\tmap[string]interface{}{\n\t\t\t\t\"search_result\": value,\n\t\t\t},\n\t\t)\n\tdefault:\n\t\treturn json.Marshal(v)\n\t}\n}",
"func (j *JSON) Marshal(obj interface{}) error {\n\tres, err := json.Marshal(obj)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Call our implementation of\n\t// JSON UnmarshalJSON through json.Unmarshal\n\t// to set the result to the JSON object\n\treturn json.Unmarshal(res, j)\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\trv := reflect.ValueOf(v)\n\tif rv.Kind() != reflect.Slice {\n\t\treturn nil, &InvalidMarshalError{rv.Kind()}\n\t}\n\n\tvar buf bytes.Buffer\n\tencoder := json.NewEncoder(&buf)\n\tfor i := 0; i < rv.Len(); i++ {\n\t\tif err := encoder.Encode(rv.Index(i).Interface()); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn buf.Bytes(), nil\n}",
"func marshalJSON(i *big.Int) ([]byte, error) {\n\ttext, err := i.MarshalText()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn json.Marshal(string(text))\n}",
"func (p *HJSON) Marshal(o map[string]interface{}) ([]byte, error) {\n\treturn hjson.Marshal(o)\n}",
"func JSONMarshal(v interface{}) ([]byte, error) {\n\tb, err := json.MarshalIndent(v, \"\", \" \")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn b, err\n\t}\n\tb = bytes.Replace(b, []byte(\"\\\\u003c\"), []byte(\"<\"), -1)\n\tb = bytes.Replace(b, []byte(\"\\\\u003e\"), []byte(\">\"), -1)\n\tb = bytes.Replace(b, []byte(\"\\\\u0026\"), []byte(\"&\"), -1)\n\treturn b, err\n}",
"func JSONMarshal(content interface{}, escape bool) ([]byte, error) {\n\tvar buf bytes.Buffer\n\tenc := json.NewEncoder(&buf)\n\tenc.SetEscapeHTML(escape)\n\tif err := enc.Encode(content); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}",
"func MarshalJSON(v interface{}, config MarshalConfig) ([]byte, error) {\n\tres, err := Marshal(v, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn json.Marshal(res)\n}",
"func (jz JSONGzipEncoding) Marshal(v interface{}) ([]byte, error) {\n\tbuf, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// var bufSizeBefore = len(buf)\n\n\tbuf, err = GzipEncode(buf)\n\t// coloredoutput.Infof(\"gzip_json_compress_ratio=%d/%d=%.2f\",\n\t// bufSizeBefore, len(buf), float64(bufSizeBefore)/float64(len(buf)))\n\treturn buf, err\n}",
"func (c *Codec) Marshal(v interface{}) ([]byte, error) {\n\tresult, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn nil, xerrors.New(err.Error())\n\t}\n\n\treturn result, nil\n}",
"func Marshal(obj interface{}) ([]byte, error) {\n\treturn mgobson.Marshal(obj)\n}",
"func jsonify(v interface{}) string { return string(mustMarshalJSON(v)) }",
"func MarshalToWriter(v Marshaler, w io.Writer) (written int, err error) {\n\tif isNilInterface(v) {\n\t\treturn w.Write(nullBytes)\n\t}\n\n\tjw := jwriter.Writer{}\n\tv.MarshalTinyJSON(&jw)\n\treturn jw.DumpTo(w)\n}",
"func (sc *Contract) Marshal() ([]byte, error) {\n\treturn json.Marshal(sc)\n}",
"func (f *Formatter) Marshal(v interface{}) ([]byte, error) {\n\tdata, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn f.Format(data)\n}",
"func marshal(v interface{}) []byte {\n\tb, err := json.Marshal(v)\n\tassert(err == nil, \"marshal error: %s\", err)\n\treturn b\n}",
"func JSONMarshal(obj interface{}) ([]byte, error) {\n\tb := new(bytes.Buffer)\n\tenc := json.NewEncoder(b)\n\tenc.SetEscapeHTML(false)\n\terr := enc.Encode(obj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// json.NewEncoder.Encode adds a final '\\n', json.Marshal does not.\n\t// Let's keep the default json.Marshal behaviour.\n\tres := b.Bytes()\n\tif len(res) >= 1 && res[len(res)-1] == '\\n' {\n\t\tres = res[:len(res)-1]\n\t}\n\treturn res, nil\n}",
"func DefaultMarshalJSON(obj interface{}) ([]byte, error) {\n\treturn json.MarshalIndent(obj, \"\", \" \")\n}",
"func marshal(v interface{}) string {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn string(b)\n}",
"func MarshalJSON(v interface{}) []byte {\n\tdata, err := json.Marshal(v)\n\tAbortIf(err)\n\treturn data\n}",
"func encode(ins interface{}) ([]byte, error) {\n\treturn json.Marshal(ins)\n}",
"func Marshal(data interface{}) ([]byte, error) {\n\tdocument, err := MarshalToStruct(data, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn json.Marshal(document)\n}",
"func NewJSONMarshaler() Marshaler {\n\treturn newJSONMarshaler()\n}",
"func JsonMarshal(val any) ([]byte, error) {\n\tbuf := &bytes.Buffer{}\n\tencoder := json.NewEncoder(buf)\n\tencoder.SetEscapeHTML(false)\n\tif err := encoder.Encode(val); err != nil {\n\t\treturn nil, err\n\t}\n\t// Return without a trailing line feed.\n\tlineTerminatedJson := buf.Bytes()\n\treturn bytes.TrimSuffix(lineTerminatedJson, []byte(\"\\n\")), nil\n}",
"func (j *jsonNative) MarshalJSON() ([]byte, error) {\n\tvar buf fflib.Buffer\n\tif j == nil {\n\t\tbuf.WriteString(\"null\")\n\t\treturn buf.Bytes(), nil\n\t}\n\terr := j.MarshalJSONBuf(&buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}",
"func (j *Publisher) MarshalJSON() ([]byte, error) {\n\tvar buf fflib.Buffer\n\tif j == nil {\n\t\tbuf.WriteString(\"null\")\n\t\treturn buf.Bytes(), nil\n\t}\n\terr := j.MarshalJSONBuf(&buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}",
"func serialize(toMarshal interface{}) *bytes.Buffer {\n\tjsonStr, _ := json.Marshal(toMarshal)\n\treturn bytes.NewBuffer(jsonStr)\n}",
"func toJSON(a interface{}) ([]byte, error) {\n\tbs, err := json.Marshal(a)\n\tif err != nil {\n\t\t//return []byte{}, fmt.Errorf(\"Erro no json %v\", err)\n\t\treturn []byte{}, errors.New(fmt.Sprintf(\"Erro no json %v\", err))\n\t}\n\treturn bs, nil\n}",
"func (r *Anilist) Marshal() ([]byte, error) {\n\treturn json.Marshal(r)\n}",
"func JSONEncode(data interface{}) string {\n\tbt, _ := json.Marshal(data)\n\treturn string(bt)\n}",
"func Marshal(o interface{}) ([]byte, error) {\n\tj, err := json.Marshal(o)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error marshaling into JSON: %v\", err)\n\t}\n\n\ty, err := JSONToYAML(j)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error converting JSON to YAML: %v\", err)\n\t}\n\n\treturn y, nil\n}",
"func (v Post) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson783c1624EncodeGithubComGobwasVk7(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) {\n\tbs, fnerr := iv.MarshalJSON()\n\tf.e.marshalAsis(bs, fnerr)\n}",
"func Marshal(obj interface{}) ([]byte, error) {\n\treturn MarshalValue(reflect.ValueOf(obj))\n}",
"func toJSON(a interface{}) ([]byte, error) {\n\tbs, err := json.Marshal(a)\n\tif err != nil {\n\t\t// return []byte{}, errors.New(fmt.Sprintf(\"Can't create json with error: %v\", err))\n\t\treturn []byte{}, fmt.Errorf(\"Can't create json for person: %v error: %v\", a, err)\n\t}\n\treturn bs, nil\n}",
"func (v Join) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer21(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func JsonEncode(i interface{}) string {\n\tb, err := json.Marshal(i)\n\n\tif err != nil {\n\t\tfmt.Println(\"util.getJsonStr.error\", err)\n\t\treturn \"\"\n\t}\n\n\treturn string(b)\n}",
"func (b *SampleFJSONBuilder) Marshal(orig *SampleF) ([]byte, error) {\n\tret, err := b.Convert(orig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn json.Marshal(ret)\n}",
"func (m *Marshaler) JSON(v interface{}) ([]byte, error) {\n\tif _, ok := v.(proto.Message); ok {\n\t\tvar buf bytes.Buffer\n\t\tjm := &jsonpb.Marshaler{}\n\t\tjm.OrigName = true\n\t\tif err := jm.Marshal(&buf, v.(proto.Message)); err != nil {\n\t\t\treturn []byte{}, err\n\t\t}\n\n\t\tif m.FilterProtoJson {\n\t\t\treturn m.FilterJsonWithStruct(buf.Bytes(), v)\n\t\t}\n\t\treturn buf.Bytes(), nil\n\t}\n\treturn json.Marshal(v)\n}",
"func (v ExportItem) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjsonB83d7b77EncodeGoplaygroundMyjson1(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func mustMarshalJSON(v interface{}) []byte {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\tpanic(\"marshal json: \" + err.Error())\n\t}\n\treturn b\n}",
"func (v publicKey) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson94b2531bEncodeGitRonaksoftComRiverWebWasmConnection(w, v)\n}",
"func EncodedJson(v interface{}) []byte {\n\tif p, err := json.Marshal(v); err != nil {\n\t\treturn []byte{}\n\t} else {\n\t\treturn p\n\t}\n}",
"func (js JSONSerializable) MarshalJSON() ([]byte, error) {\n\tif !js.Valid {\n\t\treturn json.Marshal(nil)\n\t}\n\tjsWithHex := replaceBytesWithHex(js.Val)\n\treturn json.Marshal(jsWithHex)\n}",
"func MarshalJSON(v interface{}) string {\n\tcontents, _ := json.MarshalIndent(v, \"\", \" \")\n\treturn string(contents)\n}",
"func (j Json) MarshalJSON() ([]byte, error) {\n\treturn j.ToJson()\n}",
"func (j JSON) MarshalJSON() ([]byte, error) {\n\tif j.Valid {\n\t\treturn json.Marshal(j.Map)\n\t}\n\n\treturn json.Marshal(nil)\n}",
"func (p PatchObject) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]any)\n\tpopulate(objectMap, \"tags\", p.Tags)\n\treturn json.Marshal(objectMap)\n}",
"func (v Posts) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson783c1624EncodeGithubComGobwasVk(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (spbi SuccessfulPropertyBatchInfo) MarshalJSON() ([]byte, error) {\n\tspbi.Kind = KindSuccessful\n\tobjectMap := make(map[string]interface{})\n\tif spbi.Properties != nil {\n\t\tobjectMap[\"Properties\"] = spbi.Properties\n\t}\n\tif spbi.Kind != \"\" {\n\t\tobjectMap[\"Kind\"] = spbi.Kind\n\t}\n\treturn json.Marshal(objectMap)\n}",
"func toJSON(a interface{}) ([]byte, error) {\n\tbs, err := json.Marshal(a)\n\n\tif err != nil {\n\t\treturn []byte{}, fmt.Errorf(\"Error caught by Onur Gurel\")\n\t}\n\n\treturn bs, nil\n}",
"func (v BindParams) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjsonC5a4559bEncodeGithubComChromedpCdprotoTethering2(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (handler Handler) EncodeJSON(v interface{}) (b []byte, err error) {\n\n\t//if(w.Get(\"pretty\",\"false\")==\"true\"){\n\tb, err = json.MarshalIndent(v, \"\", \" \")\n\t//}else{\n\t//\tb, err = json.Marshal(v)\n\t//}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn b, nil\n}",
"func Marshal(val interface{}) ([]byte, error) {}",
"func testJSONMarshal(t *testing.T, v interface{}, want string) {\n\tj, err := json.Marshal(v)\n\tif err != nil {\n\t\tt.Errorf(\"Unable to marshal JSON for %v\", v)\n\t}\n\n\tw := new(bytes.Buffer)\n\terr = json.Compact(w, []byte(want))\n\tif err != nil {\n\t\tt.Errorf(\"String is not valid json: %s\", want)\n\t}\n\n\tif w.String() != string(j) {\n\t\tt.Errorf(\"json.Marshal(%q) returned %s, want %s\", v, j, w)\n\t}\n\n\t// now go the other direction and make sure things unmarshal as expected\n\tu := reflect.ValueOf(v).Interface()\n\tif err := json.Unmarshal([]byte(want), u); err != nil {\n\t\tt.Errorf(\"Unable to unmarshal JSON for %v: %v\", want, err)\n\t}\n\n\tif !reflect.DeepEqual(v, u) {\n\t\tt.Errorf(\"json.Unmarshal(%q) returned %s, want %s\", want, u, v)\n\t}\n}",
"func (o *Object) MarshalJSON() ([]byte, error) {\n\tctx := _builtinJSON_stringifyContext{\n\t\tr: o.runtime,\n\t}\n\tex := o.runtime.vm.try(func() {\n\t\tif !ctx.do(o) {\n\t\t\tctx.buf.WriteString(\"null\")\n\t\t}\n\t})\n\tif ex != nil {\n\t\treturn nil, ex\n\t}\n\treturn ctx.buf.Bytes(), nil\n}",
"func testJSONMarshal(t *testing.T, v interface{}, want string) {\n\tj, err := json.Marshal(v)\n\tif err != nil {\n\t\tt.Errorf(\"Unable to marshal JSON for %v\", v)\n\t}\n\n\tw := new(bytes.Buffer)\n\terr = json.Compact(w, []byte(want))\n\tif err != nil {\n\t\tt.Errorf(\"String is not valid json: %s\", want)\n\t}\n\n\tif w.String() != string(j) {\n\t\tt.Errorf(\"json.Marshal(%q) returned %s, want %s\", v, j, w)\n\t}\n\n\t// now go the other direction and make sure things unmarshal as expected\n\tu := reflect.ValueOf(v).Interface()\n\tif err := json.Unmarshal([]byte(want), u); err != nil {\n\t\tt.Errorf(\"Unable to unmarshal JSON for %v\", want)\n\t}\n\n\tif !reflect.DeepEqual(v, u) {\n\t\tt.Errorf(\"json.Unmarshal(%q) returned %s, want %s\", want, u, v)\n\t}\n}",
"func (v BlitzedItemResponse) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson6a975c40EncodeJsonBenchmark4(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (v item) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjsonD2b7633eEncodeGithubComZhekabyGoGeneratorMongoRequestwrapperTests(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (i Interface) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]any)\n\tpopulate(objectMap, \"etag\", i.Etag)\n\tpopulate(objectMap, \"extendedLocation\", i.ExtendedLocation)\n\tpopulate(objectMap, \"id\", i.ID)\n\tpopulate(objectMap, \"location\", i.Location)\n\tpopulate(objectMap, \"name\", i.Name)\n\tpopulate(objectMap, \"properties\", i.Properties)\n\tpopulate(objectMap, \"tags\", i.Tags)\n\tpopulate(objectMap, \"type\", i.Type)\n\treturn json.Marshal(objectMap)\n}",
"func marshalJSON(namingStrategy string, that interface{}) ([]byte, error) {\n\tout := map[string]interface{}{}\n\tt := reflect.TypeOf(that)\n\tv := reflect.ValueOf(that)\n\n\tfnctn := v.MethodByName(namingStrategy)\n\tfname := func(params ...interface{}) string {\n\t\tin := make([]reflect.Value, len(params))\n\t\tfor k, param := range params {\n\t\t\tin[k] = reflect.ValueOf(param)\n\t\t}\n\t\treturn fnctn.Call(in)[0].String()\n\t}\n\toutName := \"\"\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tf := t.Field(i)\n\t\tswitch n := f.Tag.Get(\"json\"); n {\n\t\tcase \"\":\n\t\t\toutName = f.Name\n\t\tcase \"-\":\n\t\t\toutName = \"\"\n\t\tdefault:\n\t\t\toutName = fname(n)\n\t\t}\n\t\tif outName != \"\" {\n\t\t\tout[outName] = v.Field(i).Interface()\n\t\t}\n\t}\n\treturn json.Marshal(out)\n}",
"func testMarshalJSON(t *testing.T, cmd interface{}) {\n\tjsonCmd, err := json.Marshal(cmd)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tfmt.Println(string(jsonCmd))\n}",
"func (v PbTestObject) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson5fcf962eEncodeGithubComJsonIteratorGoBenchmarkWith10IntFields(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (pbi PropertyBatchInfo) MarshalJSON() ([]byte, error) {\n\tpbi.Kind = KindPropertyBatchInfo\n\tobjectMap := make(map[string]interface{})\n\tif pbi.Kind != \"\" {\n\t\tobjectMap[\"Kind\"] = pbi.Kind\n\t}\n\treturn json.Marshal(objectMap)\n}",
"func My_json(demo interface{}) *bytes.Buffer {\r\n\tif bs, err := json.Marshal(demo); err == nil {\r\n\t\treq := bytes.NewBuffer([]byte(bs))\r\n\t\treturn req\r\n\t} else {\r\n\t\tpanic(err)\r\n\t}\r\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\treturn MarshalEx(v, false)\n}",
"func MarshalJSON(a interface{}) (b []byte, err error) {\n\tif m, ok := a.(proto.Message); ok {\n\t\tmarshaller := &jsonpb.Marshaler{}\n\t\tvar buf bytes.Buffer\n\t\terr = marshaller.Marshal(&buf, m)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tb = buf.Bytes()\n\t} else {\n\t\tb, err = json.Marshal(a)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}",
"func (v PostParams) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjsonC80ae7adEncodeGithubComDeiklovTechDbRomanovAndrGolangModels10(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (v Post) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson5a72dc82EncodeGithubComTimRazumovTechnoparkDBAppModels6(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (o *ExportDataPartial) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(o.ToMap())\n}",
"func (n *Node) Marshal() ([]byte, error) {\n\treturn json.Marshal(n)\n}",
"func (n *Node) Marshal() ([]byte, error) {\n\treturn json.Marshal(n)\n}",
"func jsonEnc(in T) ([]byte, error) {\n\treturn jsonx.Marshal(in)\n}",
"func (s *HTTPServer) marshalJSON(req *http.Request, obj interface{}) ([]byte, error) {\n\tif _, ok := req.URL.Query()[\"pretty\"]; ok || s.agent.config.DevMode {\n\t\tbuf, err := json.MarshalIndent(obj, \"\", \" \")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tbuf = append(buf, \"\\n\"...)\n\t\treturn buf, nil\n\t}\n\n\tbuf, err := json.Marshal(obj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf, err\n}",
"func (m Message) Marshal() ([]byte, error) {\n\treturn jsoniter.Marshal(m)\n}",
"func (m Json) MarshalJSON() ([]byte, error) {\n\tif m == nil {\n\t\treturn []byte(\"null\"), nil\n\t}\n\treturn m, nil\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\treturn NewFormatter().Marshal(v)\n}",
"func (s *ServiceSecrets) MarshalJson() ([]byte, error) {\n\treturn json.Marshal(s)\n}",
"func JsonEncode(data []byte, v interface{}) error {\n\n\treturn json.Unmarshal(data, v)\n}",
"func (out *GetOutput) Marshal() ([]byte, error) {\n\treturn json.Marshal(out)\n}"
] | [
"0.75134",
"0.7502133",
"0.7500753",
"0.74823195",
"0.7446766",
"0.7371689",
"0.73370403",
"0.7304601",
"0.72591853",
"0.72539127",
"0.72181046",
"0.717537",
"0.7162588",
"0.7161582",
"0.71608186",
"0.7072197",
"0.70587647",
"0.7044735",
"0.7022404",
"0.6973228",
"0.6963657",
"0.69578344",
"0.69243026",
"0.6924262",
"0.68824863",
"0.68681127",
"0.68572986",
"0.6818534",
"0.68102",
"0.67969906",
"0.67913324",
"0.67774016",
"0.67717487",
"0.67700523",
"0.6754375",
"0.67300195",
"0.67154574",
"0.6711641",
"0.6708163",
"0.6686554",
"0.6676971",
"0.6670713",
"0.6667217",
"0.6665734",
"0.6651805",
"0.664897",
"0.6601639",
"0.65936595",
"0.6570477",
"0.65671986",
"0.65637034",
"0.6562716",
"0.65555567",
"0.6544248",
"0.65373516",
"0.6532906",
"0.65273225",
"0.65230805",
"0.6517934",
"0.65155387",
"0.6507946",
"0.65065837",
"0.65061134",
"0.65058106",
"0.6502681",
"0.6501059",
"0.6492431",
"0.64840174",
"0.6483743",
"0.64832276",
"0.64799786",
"0.6479202",
"0.6476907",
"0.64739543",
"0.6469417",
"0.6467924",
"0.6463269",
"0.64624554",
"0.6460338",
"0.6457592",
"0.6454149",
"0.6448956",
"0.6447831",
"0.64472353",
"0.6436354",
"0.64353037",
"0.64196956",
"0.64188385",
"0.64096874",
"0.64083934",
"0.6407883",
"0.6405311",
"0.6405311",
"0.640528",
"0.6403627",
"0.6403197",
"0.6402974",
"0.64011514",
"0.6401083",
"0.6395421",
"0.6394115"
] | 0.0 | -1 |
MarshalEasyJSON supports easyjson.Marshaler interface | func (v DocumentResponse) MarshalEasyJSON(w *jwriter.Writer) {
easyjson6a975c40EncodeJsonBenchmark3(w, v)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (v item) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeGithubComZhekabyGoGeneratorMongoRequestwrapperTests(w, v)\n}",
"func (v Fruit) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels11(w, v)\n}",
"func (v BlitzedItemResponse) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6a975c40EncodeJsonBenchmark4(w, v)\n}",
"func (ce *CustomEvent) MarshalEasyJSON(w *jwriter.Writer) {\n\tce.marshalerCtor().MarshalEasyJSON(w)\n}",
"func (v Boo) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeMsgpJson(w, v)\n}",
"func (v Post) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson783c1624EncodeGithubComGobwasVk7(w, v)\n}",
"func (v invocationMessage) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson2802b09fEncodeGithubComPhilippseithSignalr1(w, v)\n}",
"func (v Native) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson10(w, v)\n}",
"func (v ItemCheckResponse) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6a975c40EncodeJsonBenchmark2(w, v)\n}",
"func (v User) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson393a2a40EncodeCodegen(w, v)\n}",
"func (v User) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels(w, v)\n}",
"func (v ExportItem) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB83d7b77EncodeGoplaygroundMyjson1(w, v)\n}",
"func (v Format) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson14(w, v)\n}",
"func (v managerListener) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonEd74d837EncodeGithubComKihamoBoggartComponentsBoggartInternalHandlers(w, v)\n}",
"func (v Part) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer12(w, v)\n}",
"func (v publicKey) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson94b2531bEncodeGitRonaksoftComRiverWebWasmConnection(w, v)\n}",
"func (v VisitArray) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonE564fc13EncodeGithubComLa0rgHighloadcupModel(w, v)\n}",
"func (v Banner) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson23(w, v)\n}",
"func (v MOTD) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer19(w, v)\n}",
"func (v ProductToAdd) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels1(w, v)\n}",
"func (v App) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson25(w, v)\n}",
"func (v Ingredient) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels8(w, v)\n}",
"func (v Visit) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonE564fc13EncodeGithubComLa0rgHighloadcupModel1(w, v)\n}",
"func (v InfoUser) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6601e8cdEncodeJsongen3(w, v)\n}",
"func (v Deal) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson8a221a72EncodeGithubComVungleVungoOpenrtb(w, v)\n}",
"func (v FormDataMQ) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB83d7b77EncodeGoplaygroundMyjson(w, v)\n}",
"func (v Msg) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels6(w, v)\n}",
"func (v Nick) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer14(w, v)\n}",
"func (v Program) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson1c6ddb42EncodeGithubComSturfeeincGlTF(w, v)\n}",
"func (v OrderCheckResponse) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6a975c40EncodeJsonBenchmark1(w, v)\n}",
"func (v BasicUser) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6601e8cdEncodeJsongen4(w, v)\n}",
"func (v Pet) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson14a1085EncodeGithubComIamStubborNPetstoreDbModels1(w, v)\n}",
"func (v ChannelForward) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer32(w, v)\n}",
"func (v Element) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB83d7b77EncodeGoplaygroundMyjson2(w, v)\n}",
"func (v Pmp) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson9(w, v)\n}",
"func (v Responce) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6a975c40EncodeGithubComSerhio83DruidPkgStructs(w, v)\n}",
"func (v Post) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson5a72dc82EncodeGithubComTimRazumovTechnoparkDBAppModels6(w, v)\n}",
"func (c Context) MarshalEasyJSON(writer *ej_jwriter.Writer) {\n\tif err := c.Err(); err != nil {\n\t\twriter.Error = err\n\t\treturn\n\t}\n\twrappedWriter := jwriter.NewWriterFromEasyJSONWriter(writer)\n\tContextSerialization.MarshalToJSONWriter(&wrappedWriter, &c)\n}",
"func (v Features) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer25(w, v)\n}",
"func (v Node) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6601e8cdEncodeGithubComSkydiveProjectSkydiveGraffitiApiTypes1(w, v)\n}",
"func (v Posts) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson783c1624EncodeGithubComGobwasVk(w, v)\n}",
"func (v ThirdParty) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson2(w, v)\n}",
"func (v Student) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonF1627ba7EncodeGithubComDuchiporexiaGoutilsXmsgTests1(w, v)\n}",
"func (v UsersHandler) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson84c0690eEncodeMainHandlers(w, v)\n}",
"func (v Segment) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson6(w, v)\n}",
"func (v Info) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonC80ae7adEncodeGithubComDeiklovTechDbRomanovAndrGolangModels13(w, v)\n}",
"func (v CBPerson) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonE242b40eEncodeGithubComExampleSample2(w, v)\n}",
"func (v Invite) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer22(w, v)\n}",
"func (v GetUserResponse) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson84c0690eEncodeMainHandlers1(w, v)\n}",
"func (v User) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson7da3ae25EncodeCourseraGolangHomeworks(w, v)\n}",
"func (v PostSource) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson783c1624EncodeGithubComGobwasVk2(w, v)\n}",
"func (v ShadowModelSt) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB7ed31d3EncodeMevericcoreMccommon5(w, v)\n}",
"func (v BindParams) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonC5a4559bEncodeGithubComChromedpCdprotoTethering2(w, v)\n}",
"func (v Error) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer26(w, v)\n}",
"func (v Deal) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson16(w, v)\n}",
"func (v Message) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer18(w, v)\n}",
"func (v PostParams) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonC80ae7adEncodeGithubComDeiklovTechDbRomanovAndrGolangModels10(w, v)\n}",
"func (v Musician) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson62dc445bEncode20211NoskoolTeamInternalAppMusiciansModels2(w, v)\n}",
"func (v Impression) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson12(w, v)\n}",
"func (v Post) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonC80ae7adEncodeGithubComDeiklovTechDbRomanovAndrGolangModels11(w, v)\n}",
"func (v BaseInstrumentInfo) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson25363b2dEncodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi128(w, v)\n}",
"func (v Grade) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonF1627ba7EncodeGithubComDuchiporexiaGoutilsXmsgTests2(w, v)\n}",
"func (v Mode) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer16(w, v)\n}",
"func (v Whois) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer1(w, v)\n}",
"func (v Foo) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonAbe23ddeEncodeGithubComUberZanzibarExamplesExampleGatewayBuildGenCodeClientsCorgeCorge1(w, v)\n}",
"func (v Source) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson3(w, v)\n}",
"func (v ProductExtended) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels5(w, v)\n}",
"func (v Away) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer33(w, v)\n}",
"func (c EventOutputContext) MarshalEasyJSON(writer *ej_jwriter.Writer) {\n\tif err := c.Err(); err != nil {\n\t\twriter.Error = err\n\t\treturn\n\t}\n\twrappedWriter := jwriter.NewWriterFromEasyJSONWriter(writer)\n\tContextSerialization.MarshalToJSONWriterEventOutput(&wrappedWriter, &c)\n}",
"func (v IngredientArr) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels7(w, v)\n}",
"func (v RiverConnection) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson94b2531bEncodeGitRonaksoftComRiverWebWasmConnection4(w, v)\n}",
"func (v streamItemMessage) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson2802b09fEncodeGithubComPhilippseithSignalr(w, v)\n}",
"func (v Vacancy) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeGithubComIskiyRabotauaTelegramBotPkgRabotaua4(w, v)\n}",
"func (v Item) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeDrhyuComIndexerModels2(w, v)\n}",
"func (v PostAttachement) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson783c1624EncodeGithubComGobwasVk6(w, v)\n}",
"func (v ExtFilter) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson795c59c6EncodeGrapeGuardRules11(w, v)\n}",
"func (v User) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson1(w, v)\n}",
"func (v ProductShrinked) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels3(w, v)\n}",
"func (v RiverConnectionJS) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson94b2531bEncodeGitRonaksoftComRiverWebWasmConnection3(w, v)\n}",
"func (v DCCSend) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer27(w, v)\n}",
"func (v MediumPayload) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonE242b40eEncodeGithubComExampleSample1(w, v)\n}",
"func (v Stash) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeDrhyuComIndexerModels(w, v)\n}",
"func (v WSRequest) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer3(w, v)\n}",
"func (v ApiMessage) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson25363b2dEncodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi132(w, v)\n}",
"func (v flattenedField) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson390b7126EncodeGithubComChancedPicker35(w, v)\n}",
"func (v SFMetric) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson51bca34dEncodeGithubComSkydiveProjectSkydiveSflow2(w, v)\n}",
"func (v managerHandlerDevice) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonEd74d837EncodeGithubComKihamoBoggartComponentsBoggartInternalHandlers1(w, v)\n}",
"func (v BaseOp) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson25363b2dEncodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi125(w, v)\n}",
"func (v Data) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson17(w, v)\n}",
"func (v ResultReq) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson25363b2dEncodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi54(w, v)\n}",
"func (v CreateUserResponse) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson84c0690eEncodeMainHandlers2(w, v)\n}",
"func (v CreateIsolatedWorldReturns) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonC5a4559bEncodeGithubComChromedpCdprotoPage90(w, v)\n}",
"func (v ServerKeys) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson94b2531bEncodeGitRonaksoftComRiverWebWasmConnection2(w, v)\n}",
"func (v ShadowUpdateMsgSt) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB7ed31d3EncodeMevericcoreMccommon1(w, v)\n}",
"func (v WSResponse) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer2(w, v)\n}",
"func (v Content) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson18(w, v)\n}",
"func (v Join) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer21(w, v)\n}",
"func (v Bid) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson22(w, v)\n}",
"func (v moreLikeThisQuery) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson390b7126EncodeGithubComChancedPicker18(w, v)\n}",
"func (v Device) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson15(w, v)\n}"
] | [
"0.77204984",
"0.7672636",
"0.7653424",
"0.7591104",
"0.7549626",
"0.7506079",
"0.74917865",
"0.74814856",
"0.747845",
"0.74776804",
"0.7472024",
"0.74326074",
"0.74075466",
"0.74005824",
"0.7393129",
"0.7387327",
"0.7384699",
"0.7380124",
"0.7379066",
"0.73720104",
"0.73705596",
"0.73606724",
"0.7350565",
"0.73454297",
"0.73452127",
"0.73444057",
"0.73321444",
"0.7307502",
"0.73051214",
"0.73029035",
"0.729492",
"0.72922665",
"0.7291515",
"0.72859746",
"0.72832805",
"0.7281536",
"0.72810227",
"0.72690594",
"0.7265171",
"0.7263047",
"0.72606456",
"0.7260384",
"0.725305",
"0.72479904",
"0.724739",
"0.7246577",
"0.72447056",
"0.7243153",
"0.72394603",
"0.72375077",
"0.7237401",
"0.7235131",
"0.7232335",
"0.7225127",
"0.72224236",
"0.722217",
"0.722101",
"0.7216019",
"0.7215666",
"0.72131526",
"0.7212736",
"0.71996164",
"0.7197805",
"0.71939874",
"0.71824425",
"0.7180386",
"0.71780163",
"0.7173894",
"0.71658254",
"0.7163895",
"0.7163262",
"0.7161551",
"0.7154433",
"0.7154243",
"0.7153383",
"0.71522945",
"0.71498144",
"0.71420634",
"0.71419924",
"0.7141065",
"0.7132712",
"0.71298224",
"0.71237564",
"0.71226513",
"0.71173185",
"0.7116985",
"0.7109691",
"0.71048075",
"0.7099136",
"0.7098513",
"0.70949537",
"0.7090401",
"0.7087397",
"0.7085066",
"0.70830184",
"0.70823175",
"0.7082235",
"0.7080599",
"0.7080532",
"0.7079698"
] | 0.7543635 | 5 |
UnmarshalJSON supports json.Unmarshaler interface | func (v *DocumentResponse) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjson6a975c40DecodeJsonBenchmark3(&r, v)
return r.Error()
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (j *jsonNative) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func unmarshalJSON(j extv1.JSON, output *any) error {\n\tif len(j.Raw) == 0 {\n\t\treturn nil\n\t}\n\treturn json.Unmarshal(j.Raw, output)\n}",
"func (j *JSON) Unmarshal(input, target interface{}) error {\n\t// take the input and convert it to target\n\treturn jsonEncoding.Unmarshal(input.([]byte), target)\n}",
"func (j *Publisher) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (v *OneLike) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\tdecodeOneLike(&r, v)\n\treturn r.Error()\n}",
"func UnmarshalJSON(data []byte, v interface{}) {\n\terr := json.Unmarshal(data, v)\n\tAbortIf(err)\n}",
"func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error {\n\td := json.NewDecoder(r)\n\tfor _, opt := range opts {\n\t\td = opt(d)\n\t}\n\tif err := d.Decode(&o); err != nil {\n\t\treturn fmt.Errorf(\"while decoding JSON: %v\", err)\n\t}\n\treturn nil\n}",
"func UnmarshalJSON(b []byte, discriminator string, f Factory) (interface{}, error) {\n\tm := make(map[string]interface{})\n\terr := json.Unmarshal(b, &m)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn Decode(m, discriminator, f)\n}",
"func UnmarshalFromJSON(data []byte, target interface{}) error {\n\tvar ctx map[string]interface{}\n\terr := json.Unmarshal(data, &ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn Unmarshal(ctx, target)\n}",
"func (s *Serializer) Unmarshal(data []byte, v interface{}) error {\n\treturn jsoniter.Unmarshal(data,v)\n}",
"func (j *ThirdParty) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func unmarshal() {\n\tfmt.Println(\"=== json.unmarshal ===\")\n\tvar jsonBlob = []byte(`[\n\t\t{\"name\": \"Bill\", \"age\": 109},\n\t\t{\"name\": \"Bob\", \"age\": 5}\n\t]`)\n\n\tvar persons []Person\n\terr := json.Unmarshal(jsonBlob, &persons)\n\tcheck(err)\n\n\tfmt.Printf(\"%+v\\n\", persons)\n}",
"func (j *Data) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *JsonMarshaler) Unmarshal(data []byte, v interface{}) error {\n\treturn json.Unmarshal(data, v)\n}",
"func (j *Type) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (this *Simple) UnmarshalJSON(b []byte) error {\n\treturn TypesUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}",
"func (j *Response) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (pl PLUtil) Unmarshal(data []byte, v interface{}) error {\n\tcmd := pl.execCommand(\n\t\t\"plutil\",\n\t\t\"-convert\", \"json\",\n\t\t// Read from stdin.\n\t\t\"-\",\n\t\t// Output to stdout.\n\t\t\"-o\", \"-\")\n\tcmd.Stdin = bytes.NewReader(data)\n\tstdout, err := cmd.Output()\n\tif exitErr, ok := err.(*exec.ExitError); ok {\n\t\treturn fmt.Errorf(\"`%s` failed (%w) with stderr: %s\", cmd, err, exitErr.Stderr)\n\t}\n\tif err != nil {\n\t\treturn fmt.Errorf(\"`%s` failed (%w)\", cmd, err)\n\t}\n\tif err := json.Unmarshal(stdout, v); err != nil {\n\t\treturn fmt.Errorf(\"failed to parse json: %w\", err)\n\t}\n\treturn nil\n}",
"func (j *LuaFunction) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (v *BlitzedItemResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson6a975c40DecodeJsonBenchmark4(&r, v)\n\treturn r.Error()\n}",
"func Unmarshal(data []byte, v Unmarshaler) error {\n\tl := jlexer.Lexer{Data: data}\n\tv.UnmarshalTinyJSON(&l)\n\treturn l.Error()\n}",
"func (j *Producer) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *User) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *Json) UnmarshalJSON(b []byte) error {\n\tr, err := loadContentWithOptions(b, Options{\n\t\tType: ContentTypeJson,\n\t\tStrNumber: true,\n\t})\n\tif r != nil {\n\t\t// Value copy.\n\t\t*j = *r\n\t}\n\treturn err\n}",
"func (j *Error) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (v *Element) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson2(&r, v)\n\treturn r.Error()\n}",
"func (j *Message) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func jsonDecode(reader io.ReadCloser, v interface{}) error {\n\tdecoder := json.NewDecoder(reader)\n\terr := decoder.Decode(v)\n\treturn err\n}",
"func (j *FactoryPluginRespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (m *gohaiMarshaler) UnmarshalJSON(bytes []byte) error {\n\tfirstUnmarshall := \"\"\n\terr := json.Unmarshal(bytes, &firstUnmarshall)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal([]byte(firstUnmarshall), &(m.gohai))\n\treturn err\n}",
"func (j *Packet) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (receiver *Type) UnmarshalJSON(src []byte) error {\n\tif nil == receiver {\n\t\treturn errNilReceiver\n\t}\n\n\tvar s string\n\tif err := json.Unmarshal(src, &s); nil != err {\n\t\treturn err\n\t}\n\n\tif err := receiver.Scan(s); nil != err {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (j *qProxyClient) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func Unmarshal(data []byte, v Unmarshaler) error {\n\tl := jlexer.Lexer{Data: data}\n\tv.UnmarshalEasyJSON(&l)\n\treturn l.Error()\n}",
"func (j *TextMarshaler) Unmarshal(data []byte, v interface{}) error {\n\treturn json.Unmarshal(data, v)\n}",
"func (w *Entry) UnmarshalJSON(bb []byte) error {\n\t<<!!YOUR_CODE!!>>\n}",
"func UnmarshalJSON(body io.Reader, v interface{}) error {\n\tdecoder := json.NewDecoder(body)\n\treturn decoder.Decode(v)\n}",
"func Unmarshal(data []byte) (interface{}, error) {\n\tvar value marble\n\terr := json.Unmarshal(data, &value)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &value, nil\n}",
"func (j *RunPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (i *Transform) UnmarshalJSON(data []byte) error {\n\tvar s string\n\tif err := json.Unmarshal(data, &s); err != nil {\n\t\treturn fmt.Errorf(\"Transform should be a string, got %[1]s\", data)\n\t}\n\n\tvar err error\n\t*i, err = ParseTransformString(s)\n\treturn err\n}",
"func (this *Service) UnmarshalJSON(b []byte) error {\n\treturn CommonUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}",
"func (v *OneUpdateLike) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\tdecodeOneUpdateLike(&r, v)\n\treturn r.Error()\n}",
"func (j *RunRespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (v *Raw) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer10(&r, v)\n\treturn r.Error()\n}",
"func (j *Server) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *JSONText) Unmarshal(v interface{}) error {\n\treturn json.Unmarshal([]byte(*j), v)\n}",
"func (j *JsonlMarshaler) Unmarshal(data []byte, v interface{}) error {\n\treturn json.Unmarshal(data, v)\n}",
"func (j *PublishMessagesResponse) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *Event) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *FactoryPluginPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j JSON) Unmarshal(dest interface{}) error {\n\tif dest == nil {\n\t\treturn errors.New(\"destination is nil, not a valid pointer to an object\")\n\t}\n\n\t// Call our implementation of\n\t// JSON MarshalJSON through json.Marshal\n\t// to get the value of the JSON object\n\tres, err := json.Marshal(j)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn json.Unmarshal(res, dest)\n}",
"func (v *User) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson9e1087fdDecodeHw3Bench(&r, v)\n\treturn r.Error()\n}",
"func Unmarshal(data []byte, v interface{}) error {\n\terr := json.Unmarshal(data, v)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif ImplementsPostJSONUnmarshaler(v) {\n\t\terr := v.(PostJSONUnmarshaler).PostUnmarshalJSON()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func FromJSON(i interface{}, r io.Reader) error {\n\td := json.NewDecoder(r)\n\treturn d.Decode(i)\n}",
"func (v *FormulaAndFunctionResponseFormat) UnmarshalJSON(src []byte) error {\n\tvar value string\n\terr := json.Unmarshal(src, &value)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*v = FormulaAndFunctionResponseFormat(value)\n\treturn nil\n}",
"func (j *GetMessagesResponse) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *RespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {\n\tf.d.jsonUnmarshalV(tm)\n}",
"func (j *LuaTable) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *Regulations) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (v *User) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson2bc03518DecodeLangTaskOnBench(&r, v)\n\treturn r.Error()\n}",
"func Unmarshal(b []byte, v interface{}) error {\n\treturn json.Unmarshal(b, v)\n}",
"func (v *UnloadCheckResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson6a975c40DecodeJsonBenchmark(&r, v)\n\treturn r.Error()\n}",
"func (this *DeploymentStrategy) UnmarshalJSON(b []byte) error {\n\treturn CommonUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}",
"func (j *LuaInt) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (v *Visit) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonE564fc13DecodeGithubComLa0rgHighloadcupModel1(&r, v)\n\treturn r.Error()\n}",
"func (u *Unstructured) UnmarshalJSON(b []byte) error {\n\t_, _, err := UnstructuredJSONScheme.Decode(b, nil, u)\n\treturn err\n}",
"func UnmarshalJSON(b []byte) (dgo.Value, error) {\n\tdec := json.NewDecoder(bytes.NewReader(b))\n\tdec.UseNumber()\n\treturn jsonDecodeValue(dec)\n}",
"func unmarshal(data []byte, v interface{}) {\n\terr := json.Unmarshal(data, v)\n\tassert(err == nil, \"unmarshal error: %s\", err)\n}",
"func (j *Balance) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (t *Type) UnmarshalJSON(b []byte) error {\n\tvar text string\n\tif err := json.Unmarshal(b, &text); err != nil {\n\t\treturn err\n\t}\n\n\treturn t.UnmarshalText([]byte(text))\n}",
"func JSONDecode(ctx context.Context, data []byte, obj interface{}) error {\n\treturn json.Unmarshal(data, obj)\n}",
"func (j *RegisterRespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *ListPluginRespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (this *Probe) UnmarshalJSON(b []byte) error {\n\treturn CommonUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}",
"func (j *BootInitiationRespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (v *VisitArray) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonE564fc13DecodeGithubComLa0rgHighloadcupModel(&r, v)\n\treturn r.Error()\n}",
"func (v *ExportItem) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson1(&r, v)\n\treturn r.Error()\n}",
"func (this *ImportedReference) UnmarshalJSON(b []byte) error {\n\treturn TypesUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}",
"func (j *LuaString) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (jz JSONGzipEncoding) Unmarshal(data []byte, value interface{}) error {\n\tjsonData, err := GzipDecode(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal(jsonData, value)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (v *ItemCheckResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson6a975c40DecodeJsonBenchmark2(&r, v)\n\treturn r.Error()\n}",
"func (i *Interface) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", i, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"etag\":\n\t\t\terr = unpopulate(val, \"Etag\", &i.Etag)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"extendedLocation\":\n\t\t\terr = unpopulate(val, \"ExtendedLocation\", &i.ExtendedLocation)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &i.ID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"location\":\n\t\t\terr = unpopulate(val, \"Location\", &i.Location)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &i.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &i.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"tags\":\n\t\t\terr = unpopulate(val, \"Tags\", &i.Tags)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &i.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", i, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func Unmarshal(data []byte, typ DataFormat, target interface{}) {\n\tswitch typ {\n\tcase GOB:\n\t\tbuf := bytes.NewReader(data)\n\t\tgob.NewDecoder(buf).Decode(target)\n\n\tdefault:\n\t\tif err := json.Unmarshal(data, target); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}",
"func (j *Json) UnmarshalJSON(data []byte) error {\n\terr := json.Unmarshal(data, &j.data)\n\n\tj.exists = (err == nil)\n\treturn err\n}",
"func (v *PbTestObject) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson5fcf962eDecodeGithubComJsonIteratorGoBenchmarkWith10IntFields(&r, v)\n\treturn r.Error()\n}",
"func (j *ModifyQueueResponse) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *UnInstallRespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func unmarshalJSON(i *big.Int, bz []byte) error {\n\tvar text string\n\terr := json.Unmarshal(bz, &text)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn i.UnmarshalText([]byte(text))\n}",
"func (j *LuaBool) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (j *WorkerCreateOperation) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func jsonDec(t reflect.Type, in []byte) (T, error) {\n\tval := reflect.New(t)\n\tif err := jsonx.Unmarshal(val.Interface(), in); err != nil {\n\t\treturn nil, err\n\t}\n\treturn val.Elem().Interface(), nil\n}",
"func (z *Int) UnmarshalJSON(text []byte) error {}",
"func (j *PeerInfo) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (obj *miner) UnmarshalJSON(data []byte) error {\n\tins := new(JSONMiner)\n\terr := json.Unmarshal(data, ins)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpr, err := createMinerFromJSON(ins)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinsMiner := pr.(*miner)\n\tobj.toTransact = insMiner.toTransact\n\tobj.queue = insMiner.queue\n\tobj.broadcasted = insMiner.broadcasted\n\tobj.toLink = insMiner.toLink\n\treturn nil\n}",
"func (j *UnInstallPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func deJSONify(i interface{}) (interface{}, error) {\n\tvar data []byte\n\tswitch t := i.(type) {\n\tcase string:\n\t\tdata = []byte(t)\n\tcase []byte:\n\t\tdata = t\n\tcase json.RawMessage:\n\t\tdata = []byte(t)\n\tdefault:\n\t\treturn i, nil\n\t}\n\tvar x interface{}\n\tif err := json.Unmarshal(data, &x); err != nil {\n\t\treturn nil, &kivik.Error{HTTPStatus: http.StatusBadRequest, Err: err}\n\t}\n\treturn x, nil\n}",
"func (c *JSONCodec) Unmarshal(data []byte, v interface{}) error {\n\treturn json.Unmarshal(data, v)\n}",
"func NewJSONUnmarshaler(resolver Resolver) Unmarshaler {\n\treturn newJSONUnmarshaler(resolver)\n}",
"func NewJSONUnmarshaler(resolver Resolver) Unmarshaler {\n\treturn newJSONUnmarshaler(resolver)\n}"
] | [
"0.70113605",
"0.698139",
"0.6947301",
"0.6867781",
"0.68005323",
"0.67680764",
"0.6741481",
"0.67051035",
"0.6688701",
"0.66797084",
"0.6676911",
"0.6669605",
"0.6661001",
"0.66579056",
"0.6652777",
"0.66498846",
"0.6632663",
"0.663189",
"0.6627629",
"0.66243863",
"0.6612909",
"0.6587119",
"0.65519077",
"0.6545157",
"0.6537283",
"0.6533197",
"0.6532074",
"0.6526187",
"0.6518123",
"0.6512875",
"0.6505786",
"0.64908326",
"0.64847505",
"0.64830405",
"0.64820194",
"0.6469316",
"0.64528453",
"0.64508975",
"0.6441661",
"0.6441397",
"0.6438974",
"0.6438737",
"0.642948",
"0.6408435",
"0.640738",
"0.6396278",
"0.6394157",
"0.6385808",
"0.63855124",
"0.63844603",
"0.6375449",
"0.63702816",
"0.63625103",
"0.63553596",
"0.63552856",
"0.63477194",
"0.6344893",
"0.6339914",
"0.6331977",
"0.63298523",
"0.6323917",
"0.63238263",
"0.6318712",
"0.631284",
"0.63110864",
"0.6310182",
"0.6305762",
"0.63040566",
"0.62972116",
"0.62931895",
"0.6291462",
"0.62913823",
"0.62810636",
"0.6280757",
"0.6274565",
"0.6273215",
"0.62724316",
"0.62711626",
"0.6271133",
"0.62660044",
"0.6263724",
"0.62590677",
"0.62587553",
"0.62568384",
"0.6255846",
"0.6252581",
"0.62471205",
"0.6244156",
"0.6241776",
"0.62323636",
"0.62298375",
"0.6226608",
"0.6226587",
"0.62243664",
"0.6220156",
"0.6218001",
"0.6216062",
"0.621471",
"0.62088907",
"0.62088907"
] | 0.6215216 | 97 |
UnmarshalEasyJSON supports easyjson.Unmarshaler interface | func (v *DocumentResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {
easyjson6a975c40DecodeJsonBenchmark3(l, v)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (v *BlitzedItemResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6a975c40DecodeJsonBenchmark4(l, v)\n}",
"func (v *Fruit) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels11(l, v)\n}",
"func (v *Boo) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeMsgpJson(l, v)\n}",
"func (v *Element) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson2(l, v)\n}",
"func (c *Context) UnmarshalEasyJSON(in *jlexer.Lexer) {\n\tContextSerialization.UnmarshalFromEasyJSONLexer(in, c)\n}",
"func (v *Format) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson14(l, v)\n}",
"func (v *DetectedFruit) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels12(l, v)\n}",
"func (v *item) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeGithubComZhekabyGoGeneratorMongoRequestwrapperTests(l, v)\n}",
"func (v *ItemCheckResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6a975c40DecodeJsonBenchmark2(l, v)\n}",
"func (v *Native) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson10(l, v)\n}",
"func (v *FormDataMQ) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson(l, v)\n}",
"func (v *Node) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6601e8cdDecodeGithubComSkydiveProjectSkydiveGraffitiApiTypes1(l, v)\n}",
"func (v *flattenedField) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson390b7126DecodeGithubComChancedPicker35(l, v)\n}",
"func (v *ExtFilter) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson795c59c6DecodeGrapeGuardRules11(l, v)\n}",
"func (v *Deal) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson8a221a72DecodeGithubComVungleVungoOpenrtb(l, v)\n}",
"func (v *OrderCheckResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6a975c40DecodeJsonBenchmark1(l, v)\n}",
"func (v *Visit) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonE564fc13DecodeGithubComLa0rgHighloadcupModel1(l, v)\n}",
"func (v *Error) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer26(l, v)\n}",
"func (v *GetUserResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson84c0690eDecodeMainHandlers1(l, v)\n}",
"func (v *User) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson9e1087fdDecodeHw3Bench(l, v)\n}",
"func (v *IngredientArr) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels7(l, v)\n}",
"func (v *VisitArray) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonE564fc13DecodeGithubComLa0rgHighloadcupModel(l, v)\n}",
"func (v *Foo) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonAbe23ddeDecodeGithubComUberZanzibarExamplesExampleGatewayBuildGenCodeClientsCorgeCorge1(l, v)\n}",
"func (v *Ingredient) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels8(l, v)\n}",
"func (v *Musician) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson62dc445bDecode20211NoskoolTeamInternalAppMusiciansModels2(l, v)\n}",
"func (v *ThirdParty) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson2(l, v)\n}",
"func (v *Data) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson17(l, v)\n}",
"func (v *Deal) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson16(l, v)\n}",
"func (v *Raw) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer10(l, v)\n}",
"func (v *EasyResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6ff3ac1dDecodeGithubComWenweihBitcoinRpcGolangProto1(l, v)\n}",
"func (v *AdvFormData) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson3(l, v)\n}",
"func (v *Message) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer18(l, v)\n}",
"func (v *Teacher) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonF1627ba7DecodeGithubComDuchiporexiaGoutilsXmsgTests(l, v)\n}",
"func (v *Invite) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer22(l, v)\n}",
"func (v *CBPerson) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonE242b40eDecodeGithubComExampleSample2(l, v)\n}",
"func (v *BidResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson326edDecodeGithubComMxmCherryOpenrtb(l, v)\n}",
"func (v *BidRequest) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson21(l, v)\n}",
"func (v *Impression) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson12(l, v)\n}",
"func (v *Msg) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels6(l, v)\n}",
"func (v *BidResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson326edDecodeGithubComApplifierGoOpenrtbOpenrtb2(l, v)\n}",
"func (v *Info) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonC80ae7adDecodeGithubComDeiklovTechDbRomanovAndrGolangModels13(l, v)\n}",
"func (v *MediumPayload) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonE242b40eDecodeGithubComExampleSample1(l, v)\n}",
"func (v *Part) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer12(l, v)\n}",
"func (v *ProductExtendedArr) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels4(l, v)\n}",
"func (v *Whois) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer1(l, v)\n}",
"func (v *App) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson25(l, v)\n}",
"func (v *Content) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson18(l, v)\n}",
"func (v *Responce) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6a975c40DecodeGithubComSerhio83DruidPkgStructs(l, v)\n}",
"func (v *TransactionResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonE82c8e88DecodeGithubComKamaiuOandaGoModel5(l, v)\n}",
"func (v *ProductExtended) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels5(l, v)\n}",
"func (v *Error) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonC5a4559bDecodeGithubComChromedpCdproto2(l, v)\n}",
"func (v *BidResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson20(l, v)\n}",
"func (v *HireManager) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonAf94a8adDecodeGithubComGoParkMailRu20192ComandusInternalModel(l, v)\n}",
"func (v *PlantainerShadowMetadataSt) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson5bd79fa1DecodeMevericcoreMcplantainer9(l, v)\n}",
"func (v *User) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels(l, v)\n}",
"func (v *RespStruct) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeDrhyuComIndexerModels1(l, v)\n}",
"func (v *Item) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeDrhyuComIndexerModels2(l, v)\n}",
"func (v *Annotation) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6a975c40DecodeGithubComSerhio83DruidPkgStructs3(l, v)\n}",
"func (v *Fundamental) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson3e8ab7adDecodeGithubComAlpacahqAlpacaTradeApiGoV3Alpaca14(l, v)\n}",
"func (v *BasicUser) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6601e8cdDecodeJsongen4(l, v)\n}",
"func (v *Features) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer25(l, v)\n}",
"func (v *Edge) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6601e8cdDecodeGithubComSkydiveProjectSkydiveGraffitiApiTypes2(l, v)\n}",
"func (v *ShadowModelSt) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB7ed31d3DecodeMevericcoreMccommon5(l, v)\n}",
"func (v *AdvForm) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson4(l, v)\n}",
"func (v *binaryField) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson390b7126DecodeGithubComChancedPicker46(l, v)\n}",
"func (v *ShadowUpdateMsgSt) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB7ed31d3DecodeMevericcoreMccommon1(l, v)\n}",
"func (v *User) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson393a2a40DecodeCodegen(l, v)\n}",
"func (v *InfoUser) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6601e8cdDecodeJsongen3(l, v)\n}",
"func (v *Message) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonC5a4559bDecodeGithubComChromedpCdproto1(l, v)\n}",
"func (v *Pmp) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson9(l, v)\n}",
"func (v *MOTD) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer19(l, v)\n}",
"func (v *Attack) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6601e8cdDecodeGithubComGoParkMailRu2018242GameServerTypes4(l, v)\n}",
"func (v *moreLikeThisQuery) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson390b7126DecodeGithubComChancedPicker18(l, v)\n}",
"func (v *ExportItem) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson1(l, v)\n}",
"func (v *EventLoadEventFired) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonC5a4559bDecodeGithubComChromedpCdprotoPage70(l, v)\n}",
"func (v *managerListener) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonEd74d837DecodeGithubComKihamoBoggartComponentsBoggartInternalHandlers(l, v)\n}",
"func (v *WSResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer2(l, v)\n}",
"func (v *UnloadCheckResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6a975c40DecodeJsonBenchmark(l, v)\n}",
"func (v *PbTestObject) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson5fcf962eDecodeGithubComJsonIteratorGoBenchmarkWith10IntFields(l, v)\n}",
"func (v *Student) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonF1627ba7DecodeGithubComDuchiporexiaGoutilsXmsgTests1(l, v)\n}",
"func (v *Device) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson15(l, v)\n}",
"func (v *Messages) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer17(l, v)\n}",
"func (v *User) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson2bc03518DecodeLangTaskOnBench(l, v)\n}",
"func (v *BaseTickerInfo) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson25363b2dDecodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi117(l, v)\n}",
"func (v *Topic) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer5(l, v)\n}",
"func (v *BaseLedgerInfo) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson25363b2dDecodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi127(l, v)\n}",
"func (v *Banner) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson23(l, v)\n}",
"func (v *APIError) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson3e8ab7adDecodeGithubComAlpacahqAlpacaTradeApiGoV3Alpaca24(l, v)\n}",
"func (v *Bid) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson22(l, v)\n}",
"func (v *Post) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson783c1624DecodeGithubComGobwasVk7(l, v)\n}",
"func (v *BaseTradeInfo) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson25363b2dDecodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi116(l, v)\n}",
"func (v *MusicianFullInformation) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson62dc445bDecode20211NoskoolTeamInternalAppMusiciansModels1(l, v)\n}",
"func (v *matchRule) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson390b7126DecodeGithubComChancedPicker19(l, v)\n}",
"func (v *managerHandlerDevice) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonEd74d837DecodeGithubComKihamoBoggartComponentsBoggartInternalHandlers1(l, v)\n}",
"func (v *ResultReq) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson25363b2dDecodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi54(l, v)\n}",
"func (v *invocationMessage) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson2802b09fDecodeGithubComPhilippseithSignalr1(l, v)\n}",
"func (v *fuzzyRule) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson390b7126DecodeGithubComChancedPicker34(l, v)\n}",
"func (v *PlantainerShadowSt) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson5bd79fa1DecodeMevericcoreMcplantainer8(l, v)\n}",
"func (v *completionMessage) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson2802b09fDecodeGithubComPhilippseithSignalr5(l, v)\n}",
"func (v *Source) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson3(l, v)\n}"
] | [
"0.73436594",
"0.73405373",
"0.72584677",
"0.72040373",
"0.71776104",
"0.71510446",
"0.7143438",
"0.71413064",
"0.71286225",
"0.7112999",
"0.7103849",
"0.7097653",
"0.7085183",
"0.70850646",
"0.7081146",
"0.7077145",
"0.70403785",
"0.70357895",
"0.7030433",
"0.7028725",
"0.7021155",
"0.70114094",
"0.70109946",
"0.70103574",
"0.7002987",
"0.69937176",
"0.6981908",
"0.6981736",
"0.69811034",
"0.6980795",
"0.69711286",
"0.6965327",
"0.695678",
"0.69543517",
"0.6948873",
"0.69404715",
"0.69387776",
"0.6935085",
"0.6930436",
"0.6922759",
"0.6904652",
"0.6894174",
"0.68897486",
"0.6889671",
"0.6888647",
"0.6887437",
"0.6887124",
"0.68862444",
"0.68853265",
"0.68804044",
"0.6874087",
"0.6870016",
"0.6869092",
"0.6868185",
"0.6858964",
"0.6846011",
"0.68405616",
"0.6836571",
"0.6835831",
"0.68291616",
"0.6823791",
"0.6822216",
"0.6817067",
"0.6815519",
"0.68133044",
"0.6812743",
"0.6811037",
"0.68107563",
"0.6809271",
"0.680744",
"0.68065774",
"0.68030846",
"0.68029016",
"0.67965585",
"0.6794714",
"0.678028",
"0.67772484",
"0.6772522",
"0.67714006",
"0.6769638",
"0.67685604",
"0.67657346",
"0.6763771",
"0.67634416",
"0.6762939",
"0.67570746",
"0.6756749",
"0.6754731",
"0.6750861",
"0.6749626",
"0.6745531",
"0.6744763",
"0.6743289",
"0.67418313",
"0.6734197",
"0.6732776",
"0.67303044",
"0.67287326",
"0.67265445",
"0.67261595"
] | 0.71005577 | 11 |
MarshalJSON supports json.Marshaler interface | func (v BlitzedItemResponse) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjson6a975c40EncodeJsonBenchmark4(&w, v)
return w.Buffer.BuildBytes(), w.Error
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func Marshal(v Marshaler) ([]byte, error) {\n\tw := jwriter.Writer{}\n\tv.MarshalEasyJSON(&w)\n\treturn w.BuildBytes()\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\tif ImplementsPreJSONMarshaler(v) {\n\t\terr := v.(PreJSONMarshaler).PreMarshalJSON()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn json.Marshal(v)\n}",
"func (j *JSON) Marshal(target interface{}) (output interface{}, err error) {\n\treturn jsonEncoding.Marshal(target)\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\treturn json.Marshal(v)\n}",
"func Marshal(v Marshaler) ([]byte, error) {\n\tif isNilInterface(v) {\n\t\treturn nullBytes, nil\n\t}\n\n\tw := jwriter.Writer{}\n\tv.MarshalTinyJSON(&w)\n\treturn w.BuildBytes()\n}",
"func JsonMarshal(t interface{}) ([]byte, error) {\n\tbuffer := &bytes.Buffer{}\n\tenc := json.NewEncoder(buffer)\n\tenc.SetEscapeHTML(false)\n\terr := enc.Encode(t)\n\treturn buffer.Bytes(), err\n}",
"func jsonMarshal(t interface{}) ([]byte, error) {\n\tbuffer := &bytes.Buffer{}\n\tencoder := json.NewEncoder(buffer)\n\tencoder.SetEscapeHTML(false)\n\tencoder.SetIndent(\"\", \" \")\n\terr := encoder.Encode(t)\n\treturn buffer.Bytes(), err\n}",
"func marshal() {\n\tfmt.Println(\"=== json.marshal ===\")\n\tryan := &Person{\"Ryan\", 25}\n\twire, err := json.Marshal(ryan)\n\tcheck(err)\n\tfmt.Println(string(wire))\n}",
"func jsonMarshal(t interface{}) ([]byte, error) {\n\tvar buffer bytes.Buffer\n\tencoder := json.NewEncoder(&buffer)\n\tencoder.SetEscapeHTML(false)\n\tif err := encoder.Encode(t); err != nil {\n\t\treturn nil, err\n\t}\n\t// Prettify\n\tvar out bytes.Buffer\n\tif err := json.Indent(&out, buffer.Bytes(), \"\", \"\\t\"); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn out.Bytes(), nil\n}",
"func JSONEncoder() Encoder { return jsonEncoder }",
"func JSONMarshal(data interface{}) ([]byte, error) {\n\tvar b []byte\n\tvar err error\n\n\tb, err = json.MarshalIndent(data, \"\", \" \")\n\n\treturn b, err\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn canonicaljson.Transform(b)\n}",
"func (c *JsonCodec) Marshal(object interface{}, options map[string]interface{}) ([]byte, error) {\n\treturn jsonEncoding.Marshal(object)\n}",
"func (c *JSONCodec) Marshal(v interface{}) ([]byte, error) {\n\treturn json.Marshal(v)\n}",
"func Marshal(in interface{}) ([]byte, error) {\n\tres, err := jsoniter.ConfigCompatibleWithStandardLibrary.Marshal(in)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"marshaling error: %w\", err)\n\t}\n\treturn res, nil\n}",
"func Marshal(p Payload) ([]byte, error) {\n\treturn json.Marshal(p)\n}",
"func (j *JsonlMarshaler) Marshal(v interface{}) ([]byte, error) {\n\treturn json.Marshal(v)\n}",
"func (s *Serializer) Marshal(v interface{}) ([]byte, error) {\n\treturn jsoniter.Marshal(v)\n}",
"func (j *TextMarshaler) Marshal(v interface{}) ([]byte, error) {\n\treturn json.Marshal(v)\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\n\treturn replaceUnicodeConversion(b), err\n}",
"func Marshal(object interface{}) (data string, err error) {\n\tif t, err := json.Marshal(object); err != nil {\n\t\tdata = \"\"\n\t} else {\n\t\tdata = string(t)\n\t}\n\treturn\n}",
"func (j *JsonMarshaler) Marshal(v interface{}) ([]byte, error) {\n\tswitch v.(type) {\n\tcase *distribute.GetResponse:\n\t\tvalue, err := protobuf.MarshalAny(v.(*distribute.GetResponse).Fields)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn json.Marshal(\n\t\t\tmap[string]interface{}{\n\t\t\t\t\"fields\": value,\n\t\t\t},\n\t\t)\n\tcase *distribute.SearchResponse:\n\t\tvalue, err := protobuf.MarshalAny(v.(*distribute.SearchResponse).SearchResult)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn json.Marshal(\n\t\t\tmap[string]interface{}{\n\t\t\t\t\"search_result\": value,\n\t\t\t},\n\t\t)\n\tdefault:\n\t\treturn json.Marshal(v)\n\t}\n}",
"func (j *JSON) Marshal(obj interface{}) error {\n\tres, err := json.Marshal(obj)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Call our implementation of\n\t// JSON UnmarshalJSON through json.Unmarshal\n\t// to set the result to the JSON object\n\treturn json.Unmarshal(res, j)\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\trv := reflect.ValueOf(v)\n\tif rv.Kind() != reflect.Slice {\n\t\treturn nil, &InvalidMarshalError{rv.Kind()}\n\t}\n\n\tvar buf bytes.Buffer\n\tencoder := json.NewEncoder(&buf)\n\tfor i := 0; i < rv.Len(); i++ {\n\t\tif err := encoder.Encode(rv.Index(i).Interface()); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn buf.Bytes(), nil\n}",
"func marshalJSON(i *big.Int) ([]byte, error) {\n\ttext, err := i.MarshalText()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn json.Marshal(string(text))\n}",
"func (p *HJSON) Marshal(o map[string]interface{}) ([]byte, error) {\n\treturn hjson.Marshal(o)\n}",
"func JSONMarshal(v interface{}) ([]byte, error) {\n\tb, err := json.MarshalIndent(v, \"\", \" \")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn b, err\n\t}\n\tb = bytes.Replace(b, []byte(\"\\\\u003c\"), []byte(\"<\"), -1)\n\tb = bytes.Replace(b, []byte(\"\\\\u003e\"), []byte(\">\"), -1)\n\tb = bytes.Replace(b, []byte(\"\\\\u0026\"), []byte(\"&\"), -1)\n\treturn b, err\n}",
"func JSONMarshal(content interface{}, escape bool) ([]byte, error) {\n\tvar buf bytes.Buffer\n\tenc := json.NewEncoder(&buf)\n\tenc.SetEscapeHTML(escape)\n\tif err := enc.Encode(content); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}",
"func MarshalJSON(v interface{}, config MarshalConfig) ([]byte, error) {\n\tres, err := Marshal(v, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn json.Marshal(res)\n}",
"func (jz JSONGzipEncoding) Marshal(v interface{}) ([]byte, error) {\n\tbuf, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// var bufSizeBefore = len(buf)\n\n\tbuf, err = GzipEncode(buf)\n\t// coloredoutput.Infof(\"gzip_json_compress_ratio=%d/%d=%.2f\",\n\t// bufSizeBefore, len(buf), float64(bufSizeBefore)/float64(len(buf)))\n\treturn buf, err\n}",
"func (c *Codec) Marshal(v interface{}) ([]byte, error) {\n\tresult, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn nil, xerrors.New(err.Error())\n\t}\n\n\treturn result, nil\n}",
"func Marshal(obj interface{}) ([]byte, error) {\n\treturn mgobson.Marshal(obj)\n}",
"func jsonify(v interface{}) string { return string(mustMarshalJSON(v)) }",
"func MarshalToWriter(v Marshaler, w io.Writer) (written int, err error) {\n\tif isNilInterface(v) {\n\t\treturn w.Write(nullBytes)\n\t}\n\n\tjw := jwriter.Writer{}\n\tv.MarshalTinyJSON(&jw)\n\treturn jw.DumpTo(w)\n}",
"func (sc *Contract) Marshal() ([]byte, error) {\n\treturn json.Marshal(sc)\n}",
"func (f *Formatter) Marshal(v interface{}) ([]byte, error) {\n\tdata, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn f.Format(data)\n}",
"func marshal(v interface{}) []byte {\n\tb, err := json.Marshal(v)\n\tassert(err == nil, \"marshal error: %s\", err)\n\treturn b\n}",
"func JSONMarshal(obj interface{}) ([]byte, error) {\n\tb := new(bytes.Buffer)\n\tenc := json.NewEncoder(b)\n\tenc.SetEscapeHTML(false)\n\terr := enc.Encode(obj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// json.NewEncoder.Encode adds a final '\\n', json.Marshal does not.\n\t// Let's keep the default json.Marshal behaviour.\n\tres := b.Bytes()\n\tif len(res) >= 1 && res[len(res)-1] == '\\n' {\n\t\tres = res[:len(res)-1]\n\t}\n\treturn res, nil\n}",
"func DefaultMarshalJSON(obj interface{}) ([]byte, error) {\n\treturn json.MarshalIndent(obj, \"\", \" \")\n}",
"func marshal(v interface{}) string {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn string(b)\n}",
"func MarshalJSON(v interface{}) []byte {\n\tdata, err := json.Marshal(v)\n\tAbortIf(err)\n\treturn data\n}",
"func encode(ins interface{}) ([]byte, error) {\n\treturn json.Marshal(ins)\n}",
"func Marshal(data interface{}) ([]byte, error) {\n\tdocument, err := MarshalToStruct(data, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn json.Marshal(document)\n}",
"func NewJSONMarshaler() Marshaler {\n\treturn newJSONMarshaler()\n}",
"func JsonMarshal(val any) ([]byte, error) {\n\tbuf := &bytes.Buffer{}\n\tencoder := json.NewEncoder(buf)\n\tencoder.SetEscapeHTML(false)\n\tif err := encoder.Encode(val); err != nil {\n\t\treturn nil, err\n\t}\n\t// Return without a trailing line feed.\n\tlineTerminatedJson := buf.Bytes()\n\treturn bytes.TrimSuffix(lineTerminatedJson, []byte(\"\\n\")), nil\n}",
"func (j *jsonNative) MarshalJSON() ([]byte, error) {\n\tvar buf fflib.Buffer\n\tif j == nil {\n\t\tbuf.WriteString(\"null\")\n\t\treturn buf.Bytes(), nil\n\t}\n\terr := j.MarshalJSONBuf(&buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}",
"func (j *Publisher) MarshalJSON() ([]byte, error) {\n\tvar buf fflib.Buffer\n\tif j == nil {\n\t\tbuf.WriteString(\"null\")\n\t\treturn buf.Bytes(), nil\n\t}\n\terr := j.MarshalJSONBuf(&buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}",
"func serialize(toMarshal interface{}) *bytes.Buffer {\n\tjsonStr, _ := json.Marshal(toMarshal)\n\treturn bytes.NewBuffer(jsonStr)\n}",
"func toJSON(a interface{}) ([]byte, error) {\n\tbs, err := json.Marshal(a)\n\tif err != nil {\n\t\t//return []byte{}, fmt.Errorf(\"Erro no json %v\", err)\n\t\treturn []byte{}, errors.New(fmt.Sprintf(\"Erro no json %v\", err))\n\t}\n\treturn bs, nil\n}",
"func (r *Anilist) Marshal() ([]byte, error) {\n\treturn json.Marshal(r)\n}",
"func JSONEncode(data interface{}) string {\n\tbt, _ := json.Marshal(data)\n\treturn string(bt)\n}",
"func Marshal(o interface{}) ([]byte, error) {\n\tj, err := json.Marshal(o)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error marshaling into JSON: %v\", err)\n\t}\n\n\ty, err := JSONToYAML(j)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error converting JSON to YAML: %v\", err)\n\t}\n\n\treturn y, nil\n}",
"func (v Post) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson783c1624EncodeGithubComGobwasVk7(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) {\n\tbs, fnerr := iv.MarshalJSON()\n\tf.e.marshalAsis(bs, fnerr)\n}",
"func Marshal(obj interface{}) ([]byte, error) {\n\treturn MarshalValue(reflect.ValueOf(obj))\n}",
"func toJSON(a interface{}) ([]byte, error) {\n\tbs, err := json.Marshal(a)\n\tif err != nil {\n\t\t// return []byte{}, errors.New(fmt.Sprintf(\"Can't create json with error: %v\", err))\n\t\treturn []byte{}, fmt.Errorf(\"Can't create json for person: %v error: %v\", a, err)\n\t}\n\treturn bs, nil\n}",
"func (v Join) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer21(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func JsonEncode(i interface{}) string {\n\tb, err := json.Marshal(i)\n\n\tif err != nil {\n\t\tfmt.Println(\"util.getJsonStr.error\", err)\n\t\treturn \"\"\n\t}\n\n\treturn string(b)\n}",
"func (b *SampleFJSONBuilder) Marshal(orig *SampleF) ([]byte, error) {\n\tret, err := b.Convert(orig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn json.Marshal(ret)\n}",
"func (m *Marshaler) JSON(v interface{}) ([]byte, error) {\n\tif _, ok := v.(proto.Message); ok {\n\t\tvar buf bytes.Buffer\n\t\tjm := &jsonpb.Marshaler{}\n\t\tjm.OrigName = true\n\t\tif err := jm.Marshal(&buf, v.(proto.Message)); err != nil {\n\t\t\treturn []byte{}, err\n\t\t}\n\n\t\tif m.FilterProtoJson {\n\t\t\treturn m.FilterJsonWithStruct(buf.Bytes(), v)\n\t\t}\n\t\treturn buf.Bytes(), nil\n\t}\n\treturn json.Marshal(v)\n}",
"func (v ExportItem) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjsonB83d7b77EncodeGoplaygroundMyjson1(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func mustMarshalJSON(v interface{}) []byte {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\tpanic(\"marshal json: \" + err.Error())\n\t}\n\treturn b\n}",
"func (v publicKey) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson94b2531bEncodeGitRonaksoftComRiverWebWasmConnection(w, v)\n}",
"func EncodedJson(v interface{}) []byte {\n\tif p, err := json.Marshal(v); err != nil {\n\t\treturn []byte{}\n\t} else {\n\t\treturn p\n\t}\n}",
"func (js JSONSerializable) MarshalJSON() ([]byte, error) {\n\tif !js.Valid {\n\t\treturn json.Marshal(nil)\n\t}\n\tjsWithHex := replaceBytesWithHex(js.Val)\n\treturn json.Marshal(jsWithHex)\n}",
"func MarshalJSON(v interface{}) string {\n\tcontents, _ := json.MarshalIndent(v, \"\", \" \")\n\treturn string(contents)\n}",
"func (j Json) MarshalJSON() ([]byte, error) {\n\treturn j.ToJson()\n}",
"func (j JSON) MarshalJSON() ([]byte, error) {\n\tif j.Valid {\n\t\treturn json.Marshal(j.Map)\n\t}\n\n\treturn json.Marshal(nil)\n}",
"func (p PatchObject) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]any)\n\tpopulate(objectMap, \"tags\", p.Tags)\n\treturn json.Marshal(objectMap)\n}",
"func (v Posts) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson783c1624EncodeGithubComGobwasVk(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (spbi SuccessfulPropertyBatchInfo) MarshalJSON() ([]byte, error) {\n\tspbi.Kind = KindSuccessful\n\tobjectMap := make(map[string]interface{})\n\tif spbi.Properties != nil {\n\t\tobjectMap[\"Properties\"] = spbi.Properties\n\t}\n\tif spbi.Kind != \"\" {\n\t\tobjectMap[\"Kind\"] = spbi.Kind\n\t}\n\treturn json.Marshal(objectMap)\n}",
"func toJSON(a interface{}) ([]byte, error) {\n\tbs, err := json.Marshal(a)\n\n\tif err != nil {\n\t\treturn []byte{}, fmt.Errorf(\"Error caught by Onur Gurel\")\n\t}\n\n\treturn bs, nil\n}",
"func (v BindParams) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjsonC5a4559bEncodeGithubComChromedpCdprotoTethering2(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (handler Handler) EncodeJSON(v interface{}) (b []byte, err error) {\n\n\t//if(w.Get(\"pretty\",\"false\")==\"true\"){\n\tb, err = json.MarshalIndent(v, \"\", \" \")\n\t//}else{\n\t//\tb, err = json.Marshal(v)\n\t//}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn b, nil\n}",
"func Marshal(val interface{}) ([]byte, error) {}",
"func testJSONMarshal(t *testing.T, v interface{}, want string) {\n\tj, err := json.Marshal(v)\n\tif err != nil {\n\t\tt.Errorf(\"Unable to marshal JSON for %v\", v)\n\t}\n\n\tw := new(bytes.Buffer)\n\terr = json.Compact(w, []byte(want))\n\tif err != nil {\n\t\tt.Errorf(\"String is not valid json: %s\", want)\n\t}\n\n\tif w.String() != string(j) {\n\t\tt.Errorf(\"json.Marshal(%q) returned %s, want %s\", v, j, w)\n\t}\n\n\t// now go the other direction and make sure things unmarshal as expected\n\tu := reflect.ValueOf(v).Interface()\n\tif err := json.Unmarshal([]byte(want), u); err != nil {\n\t\tt.Errorf(\"Unable to unmarshal JSON for %v: %v\", want, err)\n\t}\n\n\tif !reflect.DeepEqual(v, u) {\n\t\tt.Errorf(\"json.Unmarshal(%q) returned %s, want %s\", want, u, v)\n\t}\n}",
"func (o *Object) MarshalJSON() ([]byte, error) {\n\tctx := _builtinJSON_stringifyContext{\n\t\tr: o.runtime,\n\t}\n\tex := o.runtime.vm.try(func() {\n\t\tif !ctx.do(o) {\n\t\t\tctx.buf.WriteString(\"null\")\n\t\t}\n\t})\n\tif ex != nil {\n\t\treturn nil, ex\n\t}\n\treturn ctx.buf.Bytes(), nil\n}",
"func testJSONMarshal(t *testing.T, v interface{}, want string) {\n\tj, err := json.Marshal(v)\n\tif err != nil {\n\t\tt.Errorf(\"Unable to marshal JSON for %v\", v)\n\t}\n\n\tw := new(bytes.Buffer)\n\terr = json.Compact(w, []byte(want))\n\tif err != nil {\n\t\tt.Errorf(\"String is not valid json: %s\", want)\n\t}\n\n\tif w.String() != string(j) {\n\t\tt.Errorf(\"json.Marshal(%q) returned %s, want %s\", v, j, w)\n\t}\n\n\t// now go the other direction and make sure things unmarshal as expected\n\tu := reflect.ValueOf(v).Interface()\n\tif err := json.Unmarshal([]byte(want), u); err != nil {\n\t\tt.Errorf(\"Unable to unmarshal JSON for %v\", want)\n\t}\n\n\tif !reflect.DeepEqual(v, u) {\n\t\tt.Errorf(\"json.Unmarshal(%q) returned %s, want %s\", want, u, v)\n\t}\n}",
"func (v item) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjsonD2b7633eEncodeGithubComZhekabyGoGeneratorMongoRequestwrapperTests(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (i Interface) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]any)\n\tpopulate(objectMap, \"etag\", i.Etag)\n\tpopulate(objectMap, \"extendedLocation\", i.ExtendedLocation)\n\tpopulate(objectMap, \"id\", i.ID)\n\tpopulate(objectMap, \"location\", i.Location)\n\tpopulate(objectMap, \"name\", i.Name)\n\tpopulate(objectMap, \"properties\", i.Properties)\n\tpopulate(objectMap, \"tags\", i.Tags)\n\tpopulate(objectMap, \"type\", i.Type)\n\treturn json.Marshal(objectMap)\n}",
"func marshalJSON(namingStrategy string, that interface{}) ([]byte, error) {\n\tout := map[string]interface{}{}\n\tt := reflect.TypeOf(that)\n\tv := reflect.ValueOf(that)\n\n\tfnctn := v.MethodByName(namingStrategy)\n\tfname := func(params ...interface{}) string {\n\t\tin := make([]reflect.Value, len(params))\n\t\tfor k, param := range params {\n\t\t\tin[k] = reflect.ValueOf(param)\n\t\t}\n\t\treturn fnctn.Call(in)[0].String()\n\t}\n\toutName := \"\"\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tf := t.Field(i)\n\t\tswitch n := f.Tag.Get(\"json\"); n {\n\t\tcase \"\":\n\t\t\toutName = f.Name\n\t\tcase \"-\":\n\t\t\toutName = \"\"\n\t\tdefault:\n\t\t\toutName = fname(n)\n\t\t}\n\t\tif outName != \"\" {\n\t\t\tout[outName] = v.Field(i).Interface()\n\t\t}\n\t}\n\treturn json.Marshal(out)\n}",
"func testMarshalJSON(t *testing.T, cmd interface{}) {\n\tjsonCmd, err := json.Marshal(cmd)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tfmt.Println(string(jsonCmd))\n}",
"func (v PbTestObject) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson5fcf962eEncodeGithubComJsonIteratorGoBenchmarkWith10IntFields(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (pbi PropertyBatchInfo) MarshalJSON() ([]byte, error) {\n\tpbi.Kind = KindPropertyBatchInfo\n\tobjectMap := make(map[string]interface{})\n\tif pbi.Kind != \"\" {\n\t\tobjectMap[\"Kind\"] = pbi.Kind\n\t}\n\treturn json.Marshal(objectMap)\n}",
"func My_json(demo interface{}) *bytes.Buffer {\r\n\tif bs, err := json.Marshal(demo); err == nil {\r\n\t\treq := bytes.NewBuffer([]byte(bs))\r\n\t\treturn req\r\n\t} else {\r\n\t\tpanic(err)\r\n\t}\r\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\treturn MarshalEx(v, false)\n}",
"func MarshalJSON(a interface{}) (b []byte, err error) {\n\tif m, ok := a.(proto.Message); ok {\n\t\tmarshaller := &jsonpb.Marshaler{}\n\t\tvar buf bytes.Buffer\n\t\terr = marshaller.Marshal(&buf, m)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tb = buf.Bytes()\n\t} else {\n\t\tb, err = json.Marshal(a)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}",
"func (v PostParams) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjsonC80ae7adEncodeGithubComDeiklovTechDbRomanovAndrGolangModels10(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (v Post) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson5a72dc82EncodeGithubComTimRazumovTechnoparkDBAppModels6(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (o *ExportDataPartial) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(o.ToMap())\n}",
"func (n *Node) Marshal() ([]byte, error) {\n\treturn json.Marshal(n)\n}",
"func (n *Node) Marshal() ([]byte, error) {\n\treturn json.Marshal(n)\n}",
"func jsonEnc(in T) ([]byte, error) {\n\treturn jsonx.Marshal(in)\n}",
"func (s *HTTPServer) marshalJSON(req *http.Request, obj interface{}) ([]byte, error) {\n\tif _, ok := req.URL.Query()[\"pretty\"]; ok || s.agent.config.DevMode {\n\t\tbuf, err := json.MarshalIndent(obj, \"\", \" \")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tbuf = append(buf, \"\\n\"...)\n\t\treturn buf, nil\n\t}\n\n\tbuf, err := json.Marshal(obj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf, err\n}",
"func (m Message) Marshal() ([]byte, error) {\n\treturn jsoniter.Marshal(m)\n}",
"func (m Json) MarshalJSON() ([]byte, error) {\n\tif m == nil {\n\t\treturn []byte(\"null\"), nil\n\t}\n\treturn m, nil\n}",
"func Marshal(v interface{}) ([]byte, error) {\n\treturn NewFormatter().Marshal(v)\n}",
"func (s *ServiceSecrets) MarshalJson() ([]byte, error) {\n\treturn json.Marshal(s)\n}",
"func JsonEncode(data []byte, v interface{}) error {\n\n\treturn json.Unmarshal(data, v)\n}",
"func (out *GetOutput) Marshal() ([]byte, error) {\n\treturn json.Marshal(out)\n}"
] | [
"0.75134",
"0.7502133",
"0.7500753",
"0.74823195",
"0.7446766",
"0.7371689",
"0.73370403",
"0.7304601",
"0.72591853",
"0.72539127",
"0.72181046",
"0.717537",
"0.7162588",
"0.7161582",
"0.71608186",
"0.7072197",
"0.70587647",
"0.7044735",
"0.7022404",
"0.6973228",
"0.6963657",
"0.69578344",
"0.69243026",
"0.6924262",
"0.68824863",
"0.68681127",
"0.68572986",
"0.6818534",
"0.68102",
"0.67969906",
"0.67913324",
"0.67774016",
"0.67717487",
"0.67700523",
"0.6754375",
"0.67300195",
"0.67154574",
"0.6711641",
"0.6708163",
"0.6686554",
"0.6676971",
"0.6670713",
"0.6667217",
"0.6665734",
"0.6651805",
"0.664897",
"0.6601639",
"0.65936595",
"0.6570477",
"0.65671986",
"0.65637034",
"0.6562716",
"0.65555567",
"0.6544248",
"0.65373516",
"0.6532906",
"0.65273225",
"0.65230805",
"0.6517934",
"0.65155387",
"0.6507946",
"0.65065837",
"0.65061134",
"0.65058106",
"0.6502681",
"0.6501059",
"0.6492431",
"0.64840174",
"0.6483743",
"0.64832276",
"0.64799786",
"0.6479202",
"0.6476907",
"0.64739543",
"0.6469417",
"0.6467924",
"0.6463269",
"0.64624554",
"0.6457592",
"0.6454149",
"0.6448956",
"0.6447831",
"0.64472353",
"0.6436354",
"0.64353037",
"0.64196956",
"0.64188385",
"0.64096874",
"0.64083934",
"0.6407883",
"0.6405311",
"0.6405311",
"0.640528",
"0.6403627",
"0.6403197",
"0.6402974",
"0.64011514",
"0.6401083",
"0.6395421",
"0.6394115"
] | 0.6460338 | 78 |
MarshalEasyJSON supports easyjson.Marshaler interface | func (v BlitzedItemResponse) MarshalEasyJSON(w *jwriter.Writer) {
easyjson6a975c40EncodeJsonBenchmark4(w, v)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (v item) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeGithubComZhekabyGoGeneratorMongoRequestwrapperTests(w, v)\n}",
"func (v Fruit) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels11(w, v)\n}",
"func (ce *CustomEvent) MarshalEasyJSON(w *jwriter.Writer) {\n\tce.marshalerCtor().MarshalEasyJSON(w)\n}",
"func (v Boo) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeMsgpJson(w, v)\n}",
"func (v DocumentResponse) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6a975c40EncodeJsonBenchmark3(w, v)\n}",
"func (v Post) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson783c1624EncodeGithubComGobwasVk7(w, v)\n}",
"func (v invocationMessage) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson2802b09fEncodeGithubComPhilippseithSignalr1(w, v)\n}",
"func (v Native) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson10(w, v)\n}",
"func (v ItemCheckResponse) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6a975c40EncodeJsonBenchmark2(w, v)\n}",
"func (v User) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson393a2a40EncodeCodegen(w, v)\n}",
"func (v User) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels(w, v)\n}",
"func (v ExportItem) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB83d7b77EncodeGoplaygroundMyjson1(w, v)\n}",
"func (v Format) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson14(w, v)\n}",
"func (v managerListener) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonEd74d837EncodeGithubComKihamoBoggartComponentsBoggartInternalHandlers(w, v)\n}",
"func (v Part) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer12(w, v)\n}",
"func (v publicKey) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson94b2531bEncodeGitRonaksoftComRiverWebWasmConnection(w, v)\n}",
"func (v VisitArray) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonE564fc13EncodeGithubComLa0rgHighloadcupModel(w, v)\n}",
"func (v Banner) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson23(w, v)\n}",
"func (v MOTD) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer19(w, v)\n}",
"func (v ProductToAdd) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels1(w, v)\n}",
"func (v App) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson25(w, v)\n}",
"func (v Ingredient) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels8(w, v)\n}",
"func (v Visit) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonE564fc13EncodeGithubComLa0rgHighloadcupModel1(w, v)\n}",
"func (v InfoUser) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6601e8cdEncodeJsongen3(w, v)\n}",
"func (v Deal) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson8a221a72EncodeGithubComVungleVungoOpenrtb(w, v)\n}",
"func (v FormDataMQ) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB83d7b77EncodeGoplaygroundMyjson(w, v)\n}",
"func (v Msg) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels6(w, v)\n}",
"func (v Nick) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer14(w, v)\n}",
"func (v Program) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson1c6ddb42EncodeGithubComSturfeeincGlTF(w, v)\n}",
"func (v OrderCheckResponse) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6a975c40EncodeJsonBenchmark1(w, v)\n}",
"func (v BasicUser) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6601e8cdEncodeJsongen4(w, v)\n}",
"func (v Pet) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson14a1085EncodeGithubComIamStubborNPetstoreDbModels1(w, v)\n}",
"func (v ChannelForward) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer32(w, v)\n}",
"func (v Element) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB83d7b77EncodeGoplaygroundMyjson2(w, v)\n}",
"func (v Pmp) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson9(w, v)\n}",
"func (v Responce) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6a975c40EncodeGithubComSerhio83DruidPkgStructs(w, v)\n}",
"func (v Post) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson5a72dc82EncodeGithubComTimRazumovTechnoparkDBAppModels6(w, v)\n}",
"func (c Context) MarshalEasyJSON(writer *ej_jwriter.Writer) {\n\tif err := c.Err(); err != nil {\n\t\twriter.Error = err\n\t\treturn\n\t}\n\twrappedWriter := jwriter.NewWriterFromEasyJSONWriter(writer)\n\tContextSerialization.MarshalToJSONWriter(&wrappedWriter, &c)\n}",
"func (v Features) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer25(w, v)\n}",
"func (v Node) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson6601e8cdEncodeGithubComSkydiveProjectSkydiveGraffitiApiTypes1(w, v)\n}",
"func (v Posts) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson783c1624EncodeGithubComGobwasVk(w, v)\n}",
"func (v ThirdParty) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson2(w, v)\n}",
"func (v Student) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonF1627ba7EncodeGithubComDuchiporexiaGoutilsXmsgTests1(w, v)\n}",
"func (v UsersHandler) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson84c0690eEncodeMainHandlers(w, v)\n}",
"func (v Segment) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson6(w, v)\n}",
"func (v Info) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonC80ae7adEncodeGithubComDeiklovTechDbRomanovAndrGolangModels13(w, v)\n}",
"func (v CBPerson) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonE242b40eEncodeGithubComExampleSample2(w, v)\n}",
"func (v Invite) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer22(w, v)\n}",
"func (v GetUserResponse) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson84c0690eEncodeMainHandlers1(w, v)\n}",
"func (v User) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson7da3ae25EncodeCourseraGolangHomeworks(w, v)\n}",
"func (v PostSource) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson783c1624EncodeGithubComGobwasVk2(w, v)\n}",
"func (v ShadowModelSt) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB7ed31d3EncodeMevericcoreMccommon5(w, v)\n}",
"func (v BindParams) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonC5a4559bEncodeGithubComChromedpCdprotoTethering2(w, v)\n}",
"func (v Error) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer26(w, v)\n}",
"func (v Deal) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson16(w, v)\n}",
"func (v Message) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer18(w, v)\n}",
"func (v PostParams) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonC80ae7adEncodeGithubComDeiklovTechDbRomanovAndrGolangModels10(w, v)\n}",
"func (v Musician) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson62dc445bEncode20211NoskoolTeamInternalAppMusiciansModels2(w, v)\n}",
"func (v Impression) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson12(w, v)\n}",
"func (v Post) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonC80ae7adEncodeGithubComDeiklovTechDbRomanovAndrGolangModels11(w, v)\n}",
"func (v BaseInstrumentInfo) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson25363b2dEncodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi128(w, v)\n}",
"func (v Grade) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonF1627ba7EncodeGithubComDuchiporexiaGoutilsXmsgTests2(w, v)\n}",
"func (v Mode) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer16(w, v)\n}",
"func (v Whois) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer1(w, v)\n}",
"func (v Foo) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonAbe23ddeEncodeGithubComUberZanzibarExamplesExampleGatewayBuildGenCodeClientsCorgeCorge1(w, v)\n}",
"func (v Source) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson3(w, v)\n}",
"func (v ProductExtended) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels5(w, v)\n}",
"func (v Away) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer33(w, v)\n}",
"func (c EventOutputContext) MarshalEasyJSON(writer *ej_jwriter.Writer) {\n\tif err := c.Err(); err != nil {\n\t\twriter.Error = err\n\t\treturn\n\t}\n\twrappedWriter := jwriter.NewWriterFromEasyJSONWriter(writer)\n\tContextSerialization.MarshalToJSONWriterEventOutput(&wrappedWriter, &c)\n}",
"func (v IngredientArr) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels7(w, v)\n}",
"func (v RiverConnection) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson94b2531bEncodeGitRonaksoftComRiverWebWasmConnection4(w, v)\n}",
"func (v streamItemMessage) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson2802b09fEncodeGithubComPhilippseithSignalr(w, v)\n}",
"func (v Vacancy) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeGithubComIskiyRabotauaTelegramBotPkgRabotaua4(w, v)\n}",
"func (v Item) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeDrhyuComIndexerModels2(w, v)\n}",
"func (v PostAttachement) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson783c1624EncodeGithubComGobwasVk6(w, v)\n}",
"func (v ExtFilter) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson795c59c6EncodeGrapeGuardRules11(w, v)\n}",
"func (v User) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson1(w, v)\n}",
"func (v ProductShrinked) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels3(w, v)\n}",
"func (v RiverConnectionJS) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson94b2531bEncodeGitRonaksoftComRiverWebWasmConnection3(w, v)\n}",
"func (v DCCSend) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer27(w, v)\n}",
"func (v MediumPayload) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonE242b40eEncodeGithubComExampleSample1(w, v)\n}",
"func (v Stash) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeDrhyuComIndexerModels(w, v)\n}",
"func (v WSRequest) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer3(w, v)\n}",
"func (v ApiMessage) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson25363b2dEncodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi132(w, v)\n}",
"func (v flattenedField) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson390b7126EncodeGithubComChancedPicker35(w, v)\n}",
"func (v SFMetric) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson51bca34dEncodeGithubComSkydiveProjectSkydiveSflow2(w, v)\n}",
"func (v managerHandlerDevice) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonEd74d837EncodeGithubComKihamoBoggartComponentsBoggartInternalHandlers1(w, v)\n}",
"func (v BaseOp) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson25363b2dEncodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi125(w, v)\n}",
"func (v Data) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson17(w, v)\n}",
"func (v ResultReq) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson25363b2dEncodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi54(w, v)\n}",
"func (v CreateUserResponse) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson84c0690eEncodeMainHandlers2(w, v)\n}",
"func (v CreateIsolatedWorldReturns) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonC5a4559bEncodeGithubComChromedpCdprotoPage90(w, v)\n}",
"func (v ServerKeys) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson94b2531bEncodeGitRonaksoftComRiverWebWasmConnection2(w, v)\n}",
"func (v ShadowUpdateMsgSt) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB7ed31d3EncodeMevericcoreMccommon1(w, v)\n}",
"func (v WSResponse) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer2(w, v)\n}",
"func (v Content) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson18(w, v)\n}",
"func (v Join) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer21(w, v)\n}",
"func (v Bid) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson22(w, v)\n}",
"func (v moreLikeThisQuery) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson390b7126EncodeGithubComChancedPicker18(w, v)\n}",
"func (v Device) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonB27eec76EncodeGithubComTisonetOpenrtbEasyjson15(w, v)\n}"
] | [
"0.77204984",
"0.7672636",
"0.7591104",
"0.7549626",
"0.7543635",
"0.7506079",
"0.74917865",
"0.74814856",
"0.747845",
"0.74776804",
"0.7472024",
"0.74326074",
"0.74075466",
"0.74005824",
"0.7393129",
"0.7387327",
"0.7384699",
"0.7380124",
"0.7379066",
"0.73720104",
"0.73705596",
"0.73606724",
"0.7350565",
"0.73454297",
"0.73452127",
"0.73444057",
"0.73321444",
"0.7307502",
"0.73051214",
"0.73029035",
"0.729492",
"0.72922665",
"0.7291515",
"0.72859746",
"0.72832805",
"0.7281536",
"0.72810227",
"0.72690594",
"0.7265171",
"0.7263047",
"0.72606456",
"0.7260384",
"0.725305",
"0.72479904",
"0.724739",
"0.7246577",
"0.72447056",
"0.7243153",
"0.72394603",
"0.72375077",
"0.7237401",
"0.7235131",
"0.7232335",
"0.7225127",
"0.72224236",
"0.722217",
"0.722101",
"0.7216019",
"0.7215666",
"0.72131526",
"0.7212736",
"0.71996164",
"0.7197805",
"0.71939874",
"0.71824425",
"0.7180386",
"0.71780163",
"0.7173894",
"0.71658254",
"0.7163895",
"0.7163262",
"0.7161551",
"0.7154433",
"0.7154243",
"0.7153383",
"0.71522945",
"0.71498144",
"0.71420634",
"0.71419924",
"0.7141065",
"0.7132712",
"0.71298224",
"0.71237564",
"0.71226513",
"0.71173185",
"0.7116985",
"0.7109691",
"0.71048075",
"0.7099136",
"0.7098513",
"0.70949537",
"0.7090401",
"0.7087397",
"0.7085066",
"0.70830184",
"0.70823175",
"0.7082235",
"0.7080599",
"0.7080532",
"0.7079698"
] | 0.7653424 | 2 |