filename
stringlengths
4
198
content
stringlengths
25
939k
environment
sequence
variablearg
sequence
constarg
sequence
variableargjson
stringclasses
1 value
constargjson
stringlengths
2
3.9k
lang
stringclasses
3 values
constargcount
float64
0
129
variableargcount
float64
0
0
sentence
stringclasses
1 value
setup.py
"""`Dependency injector` setup script.""" import os import re from setuptools import setup, Extension # Defining setup variables: defined_macros = dict() defined_macros['CYTHON_CLINE_IN_TRACEBACK'] = 0 # Getting description: with open('README.rst') as readme_file: description = readme_file.read() # Getting requirements: with open('requirements.txt') as requirements_file: requirements = requirements_file.readlines() # Getting version: with open('src/dependency_injector/__init__.py') as init_file: version = re.search('__version__ = \'(.*?)\'', init_file.read()).group(1) # Adding debug options: if os.environ.get('DEPENDENCY_INJECTOR_DEBUG_MODE') == '1': defined_macros['CYTHON_TRACE'] = 1 defined_macros['CYTHON_TRACE_NOGIL'] = 1 defined_macros['CYTHON_CLINE_IN_TRACEBACK'] = 1 setup(name='dependency-injector', version=version, description='Dependency injection framework for Python', long_description=description, author='ETS Labs', author_email='rmogilatov@gmail.com', maintainer='Roman Mogylatov', maintainer_email='rmogilatov@gmail.com', url='https://github.com/ets-labs/python-dependency-injector', download_url='https://pypi.python.org/pypi/dependency_injector', packages=[ 'dependency_injector', 'dependency_injector.ext', ], package_dir={ '': 'src', }, package_data={ 'dependency_injector': ['*.pxd', '*.pyi', 'py.typed'], }, ext_modules=[ Extension('dependency_injector.containers', ['src/dependency_injector/containers.c'], define_macros=list(defined_macros.items()), extra_compile_args=['-O2']), Extension('dependency_injector.providers', ['src/dependency_injector/providers.c'], define_macros=list(defined_macros.items()), extra_compile_args=['-O2']), ], install_requires=requirements, extras_require={ 'yaml': [ 'pyyaml', ], 'pydantic': [ 'pydantic', ], 'flask': [ 'flask', ], 'aiohttp': [ 'aiohttp', ], }, zip_safe=True, license='BSD New', platforms=['any'], keywords=[ 'Dependency injection', 'DI', 'Inversion of Control', 'IoC', 'Factory', 'Singleton', 'Design patterns', 'Flask', ], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Framework :: AsyncIO', 'Framework :: Bottle', 'Framework :: Django', 'Framework :: Flask', 'Framework :: Pylons', 'Framework :: Pyramid', 'Framework :: Pytest', 'Framework :: TurboGears', 'Topic :: Software Development', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', ])
[]
[]
[ "DEPENDENCY_INJECTOR_DEBUG_MODE" ]
[]
["DEPENDENCY_INJECTOR_DEBUG_MODE"]
python
1
0
litmus-portal/graphql-server/pkg/handlers/file_handler.go
package handlers import ( "errors" "log" "net/http" "os" "github.com/gorilla/mux" "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/cluster" dbOperationsCluster "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/database/mongodb/cluster" "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/k8s" "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/types" "github.com/litmuschaos/litmus/litmus-portal/graphql-server/utils" ) const ( clusterScope string = "cluster" namespaceScope string = "namespace" ) var subscriberConfiguration = &types.SubscriberConfigurationVars{ AgentNamespace: os.Getenv("AGENT_NAMESPACE"), AgentScope: os.Getenv("AGENT_SCOPE"), AgentDeployments: os.Getenv("AGENT_DEPLOYMENTS"), SubscriberImage: os.Getenv("SUBSCRIBER_IMAGE"), EventTrackerImage: os.Getenv("EVENT_TRACKER_IMAGE"), WorkflowControllerImage: os.Getenv("ARGO_WORKFLOW_CONTROLLER_IMAGE"), ChaosOperatorImage: os.Getenv("LITMUS_CHAOS_OPERATOR_IMAGE"), WorkflowExecutorImage: os.Getenv("ARGO_WORKFLOW_EXECUTOR_IMAGE"), ChaosRunnerImage: os.Getenv("LITMUS_CHAOS_RUNNER_IMAGE"), ChaosExporterImage: os.Getenv("LITMUS_CHAOS_EXPORTER_IMAGE"), ContainerRuntimeExecutor: os.Getenv("CONTAINER_RUNTIME_EXECUTOR"), Version: os.Getenv("VERSION"), } // FileHandler dynamically generates the manifest file and sends it as a response func FileHandler(w http.ResponseWriter, r *http.Request) { var ( vars = mux.Vars(r) token = vars["key"] ) response, statusCode, err := GetManifest(token) if err != nil { log.Print("error: ", err) utils.WriteHeaders(&w, statusCode) } utils.WriteHeaders(&w, statusCode) w.Write(response) } func GetManifest(token string) ([]byte, int, error) { id, err := cluster.ClusterValidateJWT(token) if err != nil { return nil, 404, err } reqCluster, err := dbOperationsCluster.GetCluster(id) if err != nil { return nil, 500, err } if os.Getenv("PORTAL_SCOPE") == "cluster" { subscriberConfiguration.GQLServerURI, err = k8s.GetServerEndpoint() if err != nil { return nil, 500, err } } else if os.Getenv("PORTAL_SCOPE") == "namespace" { subscriberConfiguration.GQLServerURI = os.Getenv("PORTAL_ENDPOINT") + "/query" } if !reqCluster.IsRegistered { var respData []byte if reqCluster.AgentScope == "cluster" { respData, err = utils.ManifestParser(reqCluster, "manifests/cluster", subscriberConfiguration) } else if reqCluster.AgentScope == "namespace" { respData, err = utils.ManifestParser(reqCluster, "manifests/namespace", subscriberConfiguration) } else { log.Print("ERROR- AGENT SCOPE NOT SELECTED!") } if err != nil { return nil, 500, err } return respData, 200, nil } else { return []byte("Cluster is already registered"), 409, nil } } // Returns manifest for a given cluster func GetManifestWithClusterID(id string, key string) ([]byte, error) { reqCluster, err := dbOperationsCluster.GetCluster(id) if err != nil { return nil, err } // Checking if cluster with given clusterID and accesskey is present if reqCluster.AccessKey != key { return nil, errors.New("Invalid access key") } if os.Getenv("PORTAL_SCOPE") == clusterScope { subscriberConfiguration.GQLServerURI, err = k8s.GetServerEndpoint() if err != nil { return nil, err } } else if os.Getenv("PORTAL_SCOPE") == namespaceScope { subscriberConfiguration.GQLServerURI = os.Getenv("PORTAL_ENDPOINT") + "/query" } var respData []byte if reqCluster.AgentScope == clusterScope { respData, err = utils.ManifestParser(reqCluster, "manifests/cluster", subscriberConfiguration) } else if reqCluster.AgentScope == namespaceScope { respData, err = utils.ManifestParser(reqCluster, "manifests/namespace", subscriberConfiguration) } else { log.Print("ERROR- AGENT SCOPE NOT SELECTED!") } if err != nil { return nil, err } return respData, nil }
[ "\"AGENT_NAMESPACE\"", "\"AGENT_SCOPE\"", "\"AGENT_DEPLOYMENTS\"", "\"SUBSCRIBER_IMAGE\"", "\"EVENT_TRACKER_IMAGE\"", "\"ARGO_WORKFLOW_CONTROLLER_IMAGE\"", "\"LITMUS_CHAOS_OPERATOR_IMAGE\"", "\"ARGO_WORKFLOW_EXECUTOR_IMAGE\"", "\"LITMUS_CHAOS_RUNNER_IMAGE\"", "\"LITMUS_CHAOS_EXPORTER_IMAGE\"", "\"CONTAINER_RUNTIME_EXECUTOR\"", "\"VERSION\"", "\"PORTAL_SCOPE\"", "\"PORTAL_SCOPE\"", "\"PORTAL_ENDPOINT\"", "\"PORTAL_SCOPE\"", "\"PORTAL_SCOPE\"", "\"PORTAL_ENDPOINT\"" ]
[]
[ "AGENT_SCOPE", "AGENT_NAMESPACE", "SUBSCRIBER_IMAGE", "AGENT_DEPLOYMENTS", "VERSION", "LITMUS_CHAOS_EXPORTER_IMAGE", "CONTAINER_RUNTIME_EXECUTOR", "ARGO_WORKFLOW_CONTROLLER_IMAGE", "LITMUS_CHAOS_OPERATOR_IMAGE", "PORTAL_SCOPE", "LITMUS_CHAOS_RUNNER_IMAGE", "EVENT_TRACKER_IMAGE", "ARGO_WORKFLOW_EXECUTOR_IMAGE", "PORTAL_ENDPOINT" ]
[]
["AGENT_SCOPE", "AGENT_NAMESPACE", "SUBSCRIBER_IMAGE", "AGENT_DEPLOYMENTS", "VERSION", "LITMUS_CHAOS_EXPORTER_IMAGE", "CONTAINER_RUNTIME_EXECUTOR", "ARGO_WORKFLOW_CONTROLLER_IMAGE", "LITMUS_CHAOS_OPERATOR_IMAGE", "PORTAL_SCOPE", "LITMUS_CHAOS_RUNNER_IMAGE", "EVENT_TRACKER_IMAGE", "ARGO_WORKFLOW_EXECUTOR_IMAGE", "PORTAL_ENDPOINT"]
go
14
0
main.go
package main import ( "database/sql" "encoding/json" "fmt" "net/http" "strconv" "time" "github.com/dgrijalva/jwt-go" "github.com/gorilla/mux" _ "github.com/mattn/go-sqlite3" ) var database, _ = sql.Open("sqlite3", "./post.db") var jwtKey = []byte("kartaca") var userId string type Middleware func(http.HandlerFunc) http.HandlerFunc type User struct { ID string `json:"id"` USERNAME string `json:"username"` PASSWORD string `json:"password"` } type Post struct { USERID string `json:"userId"` MESSAGE string `json:"message"` DATE string `json:"date"` COUNT string `json:"count"` } type PostGet struct { USERNAME string `json:"username"` MESSAGE string `json:"message"` DATE string `json:"date"` } type Claims struct { ID string jwt.StandardClaims } func main() { router := mux.NewRouter() router.HandleFunc("/login", login).Methods("POST") createPost := Chain(createPost, loggingMiddleware()) router.HandleFunc("/posts", createPost).Methods("POST") getPost := Chain(getPost, loggingMiddleware()) router.HandleFunc("/posts", getPost).Methods("GET") simpleGetPost := Chain(simpleGetPost, loggingMiddleware()) router.HandleFunc("/posts/{id}", simpleGetPost).Methods("GET") updatePost := Chain(updatePost, loggingMiddleware()) router.HandleFunc("/posts/{id}", updatePost).Methods("PUT") deletePost := Chain(deletePost, loggingMiddleware()) router.HandleFunc("/posts/{id}", deletePost).Methods("DELETE") // headersOk := handlers.AllowedHeaders([]string{"X-Requested-With"}) // originsOk := handlers.AllowedOrigins([]string{os.Getenv("ORIGIN_ALLOWED")}) // methodsOk := handlers.AllowedMethods([]string{"GET", "HEAD", "POST", "PUT", "OPTIONS"}) http.ListenAndServe(":8000", (router)) } func deletePost(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") postId := mux.Vars(r)["id"] smt, err := database.Prepare("DELETE FROM post WHERE id=?") if err != nil { fmt.Print(err) return } smt.Exec(postId) w.WriteHeader(http.StatusOK) } func updatePost(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") var post Post postId := mux.Vars(r)["id"] _ = json.NewDecoder(r.Body).Decode(&post) smt, err := database.Prepare("UPDATE post SET post_message = ? WHERE post.user_id=? AND id =?;") if err != nil { fmt.Print(err) return } smt.Exec(post.MESSAGE, userId, postId) w.WriteHeader(http.StatusOK) } func createPost(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") date := date() var post Post var postTemp PostGet var posts []PostGet _ = json.NewDecoder(r.Body).Decode(&post) stm, err := database.Prepare("INSERT INTO post (user_id, post_message, post_date) VALUES (?, ?, ?)") if err != nil { fmt.Print(err) return } stm.Exec(userId, post.MESSAGE, date) newPostCount, _ := strconv.ParseInt(postCount(), 10, 64) lastPostCount, _ := strconv.ParseInt(post.COUNT, 10, 64) count := newPostCount - lastPostCount rows, err := database.Query("SELECT post_message, post_date,username FROM post INNER JOIN user ON post.user_id = user.id ORDER BY post.id DESC LIMIT ?", count) if err != nil { fmt.Print(err) return } for rows.Next() { rows.Scan(&postTemp.MESSAGE, &postTemp.DATE, &postTemp.USERNAME) posts = append(posts, postTemp) } json.NewEncoder(w).Encode(posts) } func simpleGetPost(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") var posts PostGet postId := mux.Vars(r)["id"] rows, _ := database.Query("SELECT post_message, post_date,username FROM post INNER JOIN user ON post.user_id = user.id WHERE post.id=?", postId) for rows.Next() { rows.Scan(&posts.MESSAGE, &posts.DATE, &posts.USERNAME) } json.NewEncoder(w).Encode(posts) } func getPost(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Methods", "GET") w.Header().Set("Access-Control-Allow-Headers", "Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token") var postTemp PostGet var posts []PostGet rows, _ := database.Query("SELECT post_message, post_date,username FROM post INNER JOIN user ON post.user_id = user.id") for rows.Next() { rows.Scan(&postTemp.MESSAGE, &postTemp.DATE, &postTemp.USERNAME) posts = append(posts, postTemp) } json.NewEncoder(w).Encode(posts) } func login(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Methods", "POST") w.Header().Set("Access-Control-Allow-Headers", "Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization") var user User var id string _ = json.NewDecoder(r.Body).Decode(&user) fmt.Println(user) rows, _ := database.Query("SELECT id FROM user WHERE username=? AND password=?", user.USERNAME, user.PASSWORD) for rows.Next() { rows.Scan(&id) getToken(w, id) } } func Chain(f http.HandlerFunc, middlewares ...Middleware) http.HandlerFunc { for _, m := range middlewares { f = m(f) } return f } func getToken(w http.ResponseWriter, id string) { expirationTime := time.Now().Add(50 * time.Hour) claims := &Claims{ ID: id, StandardClaims: jwt.StandardClaims{ ExpiresAt: expirationTime.Unix(), }, } fmt.Println(id) token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) tokenString, err := token.SignedString(jwtKey) if err != nil { w.WriteHeader(http.StatusInternalServerError) return } fmt.Println(tokenString) smt, err := database.Prepare("UPDATE user SET token=? WHERE id=?") if err != nil { fmt.Print(err) return } smt.Exec(tokenString, id) http.SetCookie(w, &http.Cookie{ Name: "token", Value: tokenString, Expires: expirationTime, }) } func loggingMiddleware() Middleware { return func(next http.HandlerFunc) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { c, err := r.Cookie("token") if err != nil { if err == http.ErrNoCookie { http.Error(w, "StatusUnauthorized", http.StatusUnauthorized) return } http.Error(w, "StatusBadRequest", http.StatusBadRequest) return } tknStr := c.Value claims := &Claims{} tkn, err := jwt.ParseWithClaims(tknStr, claims, func(token *jwt.Token) (interface{}, error) { return jwtKey, nil }) if err != nil { if err == jwt.ErrSignatureInvalid { http.Error(w, "StatusUnauthorized", http.StatusUnauthorized) return } http.Error(w, "StatusBadRequest", http.StatusBadRequest) return } if !tkn.Valid { http.Error(w, "StatusUnauthorized", http.StatusUnauthorized) return } userId = claims.ID //http.Redirect(w, r, fmt.Sprintf("https://%s%s", r.Host, "/login"), 302) next.ServeHTTP(w, r) } } } func date() string { t := time.Now() date := fmt.Sprintf("%d-%02d-%02d %02d:%02d:%02d", t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second()) return date } func postCount() string { var count string row, err := database.Query("SELECT COUNT(*) count FROM post") if err != nil { fmt.Print(err) return "" } for row.Next() { row.Scan(&count) } return count }
[ "\"ORIGIN_ALLOWED\"" ]
[]
[ "ORIGIN_ALLOWED" ]
[]
["ORIGIN_ALLOWED"]
go
1
0
src/results/show_first_convs.py
""" File: show_first_convs.py Author: David Solanas Sanz TFG """ import argparse import os import keras import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from keras.backend.tensorflow_backend import set_session from keras.models import load_model def plot_conv_weights(res): """ Plots the first convolutional layer weights Parameters ---------- res: Weights to plot Returns ------- """ W = res if len(W.shape) == 4: W = np.squeeze(W) fig, axs = plt.subplots(8, 4, figsize=(8, 8)) axs = axs.ravel() for i in range(32): x = W[:, :, :, i] x = (x - x.min()) / (x.max() - x.min()) axs[i].imshow(x) def main(): ap = argparse.ArgumentParser() ap.add_argument("-m", "--model", default=None, help="path to the file where the model is stored") args = ap.parse_args() model_file = '/models/model_cat_BUENO.h5' # Load the model architecture and its weights model = load_model(model_file) model2 = keras.applications.InceptionV3() layer_names = [] layer_weights = [] for i, layer in enumerate(model.layers): if 'conv2d' in layer.name: print(i, layer.name) layer_weights.append(layer.weights) layer_names.append(layer.name) layer_namesV3 = [] layer_weightsV3 = [] for i, layer in enumerate(model2.layers): if 'conv2d' in layer.name: layer_weightsV3.append(layer.weights) layer_namesV3.append(layer.name) res = layer_weights[0][0].numpy() - layer_weightsV3[0][0].numpy() plot_conv_weights(res) return res if __name__ == '__main__': """ Match input image or current life video feed with the selected template """ # GPU memory growth and just use GPU 0 os.environ["CUDA_VISIBLE_DEVICES"] = "0" # only see the gpu 0 config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) set_session(sess) # set this TensorFlow session as the default session for Keras res = main()
[]
[]
[ "CUDA_VISIBLE_DEVICES" ]
[]
["CUDA_VISIBLE_DEVICES"]
python
1
0
watson/cli.py
import datetime import itertools import json import operator import os from dateutil import tz from functools import reduce, wraps import arrow import click from click_didyoumean import DYMGroup import watson as _watson from .autocompletion import ( get_frames, get_project_or_task_completion, get_projects, get_rename_name, get_rename_types, get_tags, ) from .frames import Frame from .utils import ( apply_weekday_offset, build_csv, confirm_project, confirm_tags, create_watson, flatten_report_for_csv, format_timedelta, frames_to_csv, frames_to_json, get_frame_from_argument, get_start_time_for_period, options, safe_save, sorted_groupby, style, parse_tags, json_arrow_encoder, ) class MutuallyExclusiveOption(click.Option): def __init__(self, *args, **kwargs): self.mutually_exclusive = set(kwargs.pop('mutually_exclusive', [])) super(MutuallyExclusiveOption, self).__init__(*args, **kwargs) def handle_parse_result(self, ctx, opts, args): if self.name in opts: if self.mutually_exclusive.intersection(opts): self._raise_exclusive_error() if self.multiple and len(set(opts[self.name])) > 1: self._raise_exclusive_error() return super(MutuallyExclusiveOption, self).handle_parse_result( ctx, opts, args ) def _raise_exclusive_error(self): # Use self.opts[-1] instead of self.name to handle options with a # different internal name. self.mutually_exclusive.add(self.opts[-1].strip('-')) raise click.ClickException( style( 'error', 'The following options are mutually exclusive: ' '{options}'.format(options=', '.join( ['`--{}`'.format(_) for _ in self.mutually_exclusive])))) def local_tz_info() -> datetime.tzinfo: """Get the local time zone object, respects the TZ env variable.""" timezone = os.environ.get("TZ", None) # If timezone is None or an empty string, gettz returns the local time tzinfo = tz.gettz(timezone) # gettz returns None if the timezone passed to gettz is invalid if tzinfo is None: raise click.ClickException( f"Invalid timezone {timezone} specified, " "please set the TZ environment variable with" " a valid timezone." ) return tzinfo class DateTimeParamType(click.ParamType): name = 'datetime' def convert(self, value, param, ctx) -> arrow: if value: date = self._parse_multiformat(value) if date is None: raise click.UsageError( "Could not match value '{}' to any supported date format" .format(value) ) # When we parse a date, we want to parse it in the timezone # expected by the user, so that midnight is midnight in the local # timezone, or respect the TZ environment variable not in UTC. # Cf issue #16. date = date.replace(tzinfo=local_tz_info()) # Add an offset to match the week beginning specified in the # configuration if param.name == "week": week_start = ctx.obj.config.get( "options", "week_start", "monday") date = apply_weekday_offset( start_time=date, week_start=week_start) return date def _parse_multiformat(self, value) -> arrow: date = None for fmt in (None, 'HH:mm:ss', 'HH:mm'): try: if fmt is None: date = arrow.get(value) else: date = arrow.get(value, fmt) date = arrow.now().replace( hour=date.hour, minute=date.minute, second=date.second ) break except (ValueError, TypeError): pass return date DateTime = DateTimeParamType() def catch_watson_error(func): @wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except _watson.WatsonError as e: raise click.ClickException(style('error', str(e))) return wrapper @click.group(cls=DYMGroup) @click.version_option(version=_watson.__version__, prog_name='Watson') @click.option('--color/--no-color', 'color', default=None, help="(Don't) color output.") @click.pass_context def cli(ctx, color): """ Watson is a tool aimed at helping you monitoring your time. You just have to tell Watson when you start working on your project with the `start` command, and you can stop the timer when you're done with the `stop` command. """ if color is not None: ctx.color = True if color else False # This is the main command group, needed by click in order # to handle the subcommands ctx.obj = create_watson() @cli.command() @click.argument('command', required=False) @click.pass_context def help(ctx, command): """ Display help information """ if not command: click.echo(ctx.parent.get_help()) return cmd = cli.get_command(ctx, command) if not cmd: raise click.ClickException("No such command: {}".format(command)) click.echo(cmd.get_help(ctx)) def _start(watson, project, tags, restart=False, start_at=None, gap=True): """ Start project with given list of tags and save status. """ current = watson.start(project, tags, restart=restart, start_at=start_at, gap=gap,) click.echo("Starting project {}{} at {}".format( style('project', project), (" " if current['tags'] else "") + style('tags', current['tags']), style('time', "{:HH:mm}".format(current['start'])) )) watson.save() @cli.command() @click.option('--at', 'at_', type=DateTime, default=None, cls=MutuallyExclusiveOption, mutually_exclusive=['gap_'], help=('Start frame at this time. Must be in ' '(YYYY-MM-DDT)?HH:MM(:SS)? format.')) @click.option('-g/-G', '--gap/--no-gap', 'gap_', is_flag=True, default=True, cls=MutuallyExclusiveOption, mutually_exclusive=['at_'], help=("(Don't) leave gap between end time of previous project " "and start time of the current.")) @click.argument('args', nargs=-1, autocompletion=get_project_or_task_completion) @click.option('-c', '--confirm-new-project', is_flag=True, default=False, help="Confirm addition of new project.") @click.option('-b', '--confirm-new-tag', is_flag=True, default=False, help="Confirm creation of new tag.") @click.pass_obj @click.pass_context @catch_watson_error def start(ctx, watson, confirm_new_project, confirm_new_tag, args, at_, gap_=True): """ Start monitoring time for the given project. You can add tags indicating more specifically what you are working on with `+tag`. If there is already a running project and the configuration option `options.stop_on_start` is set to a true value (`1`, `on`, `true`, or `yes`), it is stopped before the new project is started. If `--at` option is given, the provided starting time is used. The specified time must be after the end of the previous frame and must not be in the future. Example: \b $ watson start --at 13:37 Starting project apollo11 at 13:37 If the `--no-gap` flag is given, the start time of the new project is set to the stop time of the most recently stopped project. Example: \b $ watson start apollo11 +module +brakes --no-gap Starting project apollo11 [module, brakes] at 16:34 """ project = ' '.join( itertools.takewhile(lambda s: not s.startswith('+'), args) ) if not project: raise click.ClickException("No project given.") # Confirm creation of new project if that option is set if (watson.config.getboolean('options', 'confirm_new_project') or confirm_new_project): confirm_project(project, watson.projects) # Parse all the tags tags = parse_tags(args) # Confirm creation of new tag(s) if that option is set if (watson.config.getboolean('options', 'confirm_new_tag') or confirm_new_tag): confirm_tags(tags, watson.tags) if project and watson.is_started and not gap_: current = watson.current errmsg = ("Project '{}' is already started and '--no-gap' is passed. " "Please stop manually.") raise click.ClickException( style( 'error', errmsg.format(current['project']) ) ) if (project and watson.is_started and watson.config.getboolean('options', 'stop_on_start')): ctx.invoke(stop) _start(watson, project, tags, start_at=at_, gap=gap_) @cli.command(context_settings={'ignore_unknown_options': True}) @click.option('--at', 'at_', type=DateTime, default=None, help=('Stop frame at this time. Must be in ' '(YYYY-MM-DDT)?HH:MM(:SS)? format.')) @click.pass_obj @catch_watson_error def stop(watson, at_): """ Stop monitoring time for the current project. If `--at` option is given, the provided stopping time is used. The specified time must be after the beginning of the to-be-ended frame and must not be in the future. Example: \b $ watson stop --at 13:37 Stopping project apollo11, started an hour ago and stopped 30 minutes ago. (id: e9ccd52) # noqa: E501 """ frame = watson.stop(stop_at=at_) output_str = "Stopping project {}{}, started {} and stopped {}. (id: {})" click.echo(output_str.format( style('project', frame.project), (" " if frame.tags else "") + style('tags', frame.tags), style('time', frame.start.humanize()), style('time', frame.stop.humanize()), style('short_id', frame.id), )) watson.save() @cli.command(context_settings={'ignore_unknown_options': True}) @click.option('--at', 'at_', type=DateTime, default=None, cls=MutuallyExclusiveOption, mutually_exclusive=['gap_'], help=('Start frame at this time. Must be in ' '(YYYY-MM-DDT)?HH:MM(:SS)? format.')) @click.option('-g/-G', '--gap/--no-gap', 'gap_', is_flag=True, default=True, cls=MutuallyExclusiveOption, mutually_exclusive=['at_'], help=("(Don't) leave gap between end time of previous project " "and start time of the current.")) @click.option('-s/-S', '--stop/--no-stop', 'stop_', default=None, help="(Don't) Stop an already running project.") @click.argument('id', default='-1', autocompletion=get_frames) @click.pass_obj @click.pass_context @catch_watson_error def restart(ctx, watson, id, stop_, at_, gap_=True): """ Restart monitoring time for a previously stopped project. By default, the project from the last frame, which was recorded, is restarted, using the same tags as recorded in that frame. You can specify the frame to use with an integer frame index argument or a frame ID. For example, to restart the second-to-last frame, pass `-2` as the frame index. Normally, if a project is currently started, Watson will print an error and do nothing. If you set the configuration option `options.stop_on_restart` to a true value (`1`, `on`, `true`, or `yes`), the current project, if any, will be stopped before the new frame is started. You can pass the option `-s` or `--stop` resp. `-S` or `--no-stop` to override the default or configured behaviour. If no previous frame exists or an invalid frame index or ID was given, an error is printed and no further action taken. Example: \b $ watson start apollo11 +module +brakes Starting project apollo11 [module, brakes] at 16:34 $ watson stop Stopping project apollo11, started a minute ago. (id: e7ccd52) $ watson restart Starting project apollo11 [module, brakes] at 16:36 If the `--no-gap` flag is given, the start time of the new project is set to the stop time of the most recently stopped project. """ if not watson.frames and not watson.is_started: raise click.ClickException( style('error', "No frames recorded yet. It's time to create your " "first one!")) if watson.is_started and not gap_: current = watson.current errmsg = ("Project '{}' is already started and '--no-gap' is passed. " "Please stop manually.") raise click.ClickException( style( 'error', errmsg.format(current['project']) ) ) if watson.is_started: if stop_ or (stop_ is None and watson.config.getboolean('options', 'stop_on_restart')): ctx.invoke(stop) else: # Raise error here, instead of in watson.start(), otherwise # will give misleading error if running frame is the first one raise click.ClickException("{} {} {}".format( style('error', "Project already started:"), style('project', watson.current['project']), style('tags', watson.current['tags']))) frame = get_frame_from_argument(watson, id) _start(watson, frame.project, frame.tags, restart=True, start_at=at_, gap=gap_) @cli.command() @click.pass_obj @catch_watson_error def cancel(watson): """ Cancel the last call to the start command. The time will not be recorded. """ old = watson.cancel() click.echo("Canceling the timer for project {}{}".format( style('project', old['project']), (" " if old['tags'] else "") + style('tags', old['tags']) )) watson.save() @cli.command() @click.option('-p', '--project', is_flag=True, help="only output project") @click.option('-t', '--tags', is_flag=True, help="only show tags") @click.option('-e', '--elapsed', is_flag=True, help="only show time elapsed") @click.pass_obj @catch_watson_error def status(watson, project, tags, elapsed): """ Display when the current project was started and the time spent since. You can configure how the date and time of when the project was started are displayed by setting `options.date_format` and `options.time_format` in the configuration. The syntax of these formatting strings and the supported placeholders are the same as for the `strftime` method of Python's `datetime.datetime` class. Example: \b $ watson status Project apollo11 [brakes] started seconds ago (2014-05-19 14:32:41+0100) $ watson config options.date_format %d.%m.%Y $ watson config options.time_format "at %I:%M %p" $ watson status Project apollo11 [brakes] started a minute ago (19.05.2014 at 02:32 PM) """ if not watson.is_started: click.echo("No project started.") return current = watson.current if project: click.echo("{}".format( style('project', current['project']), )) return if tags: click.echo("{}".format( style('tags', current['tags']) )) return if elapsed: click.echo("{}".format( style('time', current['start'].humanize()) )) return datefmt = watson.config.get('options', 'date_format', '%Y.%m.%d') timefmt = watson.config.get('options', 'time_format', '%H:%M:%S%z') click.echo("Project {}{} started {} ({} {})".format( style('project', current['project']), (" " if current['tags'] else "") + style('tags', current['tags']), style('time', current['start'].humanize()), style('date', current['start'].strftime(datefmt)), style('time', current['start'].strftime(timefmt)) )) _SHORTCUT_OPTIONS = ['all', 'year', 'month', 'luna', 'week', 'day'] _SHORTCUT_OPTIONS_VALUES = { k: get_start_time_for_period(k) for k in _SHORTCUT_OPTIONS } @cli.command() @click.option('-c/-C', '--current/--no-current', 'current', default=None, help="(Don't) include currently running frame in report.") @click.option('-f', '--from', 'from_', cls=MutuallyExclusiveOption, type=DateTime, default=arrow.now().shift(days=-7), mutually_exclusive=_SHORTCUT_OPTIONS, help="The date from when the report should start. Defaults " "to seven days ago.") @click.option('-t', '--to', cls=MutuallyExclusiveOption, type=DateTime, default=arrow.now(), mutually_exclusive=_SHORTCUT_OPTIONS, help="The date at which the report should stop (inclusive). " "Defaults to tomorrow.") @click.option('-y', '--year', cls=MutuallyExclusiveOption, type=DateTime, flag_value=_SHORTCUT_OPTIONS_VALUES['year'], mutually_exclusive=['day', 'week', 'luna', 'month', 'all'], help='Reports activity for the current year.') @click.option('-m', '--month', cls=MutuallyExclusiveOption, type=DateTime, flag_value=_SHORTCUT_OPTIONS_VALUES['month'], mutually_exclusive=['day', 'week', 'luna', 'year', 'all'], help='Reports activity for the current month.') @click.option('-l', '--luna', cls=MutuallyExclusiveOption, type=DateTime, flag_value=_SHORTCUT_OPTIONS_VALUES['luna'], mutually_exclusive=['day', 'week', 'month', 'year', 'all'], help='Reports activity for the current moon cycle.') @click.option('-w', '--week', cls=MutuallyExclusiveOption, type=DateTime, flag_value=_SHORTCUT_OPTIONS_VALUES['week'], mutually_exclusive=['day', 'month', 'luna', 'year', 'all'], help='Reports activity for the current week.') @click.option('-d', '--day', cls=MutuallyExclusiveOption, type=DateTime, flag_value=_SHORTCUT_OPTIONS_VALUES['day'], mutually_exclusive=['week', 'month', 'luna', 'year', 'all'], help='Reports activity for the current day.') @click.option('-a', '--all', cls=MutuallyExclusiveOption, type=DateTime, flag_value=_SHORTCUT_OPTIONS_VALUES['all'], mutually_exclusive=['day', 'week', 'month', 'luna', 'year'], help='Reports all activities.') @click.option('-p', '--project', 'projects', autocompletion=get_projects, multiple=True, help="Reports activity only for the given project. You can add " "other projects by using this option several times.") @click.option('-T', '--tag', 'tags', autocompletion=get_tags, multiple=True, help="Reports activity only for frames containing the given " "tag. You can add several tags by using this option multiple " "times") @click.option('--ignore-project', 'ignore_projects', multiple=True, help="Reports activity for all projects but the given ones. You " "can ignore several projects by using the option multiple " "times. Any given project will be ignored") @click.option('--ignore-tag', 'ignore_tags', multiple=True, help="Reports activity for all tags but the given ones. You can " "ignore several tags by using the option multiple times. Any " "given tag will be ignored") @click.option('-j', '--json', 'output_format', cls=MutuallyExclusiveOption, flag_value='json', mutually_exclusive=['csv'], multiple=True, help="Format output in JSON instead of plain text") @click.option('-s', '--csv', 'output_format', cls=MutuallyExclusiveOption, flag_value='csv', mutually_exclusive=['json'], multiple=True, help="Format output in CSV instead of plain text") @click.option('--plain', 'output_format', cls=MutuallyExclusiveOption, flag_value='plain', mutually_exclusive=['json', 'csv'], multiple=True, default=True, hidden=True, help="Format output in plain text (default)") @click.option('-g/-G', '--pager/--no-pager', 'pager', default=None, help="(Don't) view output through a pager.") @click.option('-i','--ignore-current', 'ignore_current',default=False, flag_value=True, help="ignore current started Project in Projecttime") @click.pass_obj @catch_watson_error def report(watson, current, from_, to, projects, tags, ignore_projects, ignore_tags, year, month, week, day, luna, all, output_format, pager, ignore_current, aggregated=False, include_partial_frames=True): """ Display a report of the time spent on each project. If a project is given, the time spent on this project is printed. Else, print the total for each root project. By default, the time spent the last 7 days is printed. This timespan can be controlled with the `--from` and `--to` arguments. The dates must have the format `YEAR-MONTH-DAY`, like: `2014-05-19`. You can also use special shortcut options for easier timespan control: `--day` sets the report timespan to the current day (beginning at `00:00h`) and `--year`, `--month` and `--week` to the current year, month, or week, respectively. The shortcut `--luna` sets the timespan to the current moon cycle with the last full moon marking the start of the cycle. You can limit the report to a project or a tag using the `--project`, `--tag`, `--ignore-project` and `--ignore-tag` options. They can be specified several times each to add or ignore multiple projects or tags to the report. If you are outputting to the terminal, you can selectively enable a pager through the `--pager` option. You can change the output format for the report from *plain text* to *JSON* using the `--json` option or to *CSV* using the `--csv` option. Only one of these two options can be used at once. Example: \b $ watson report Mon 05 May 2014 -> Mon 12 May 2014 \b apollo11 - 13h 22m 20s [brakes 7h 53m 18s] [module 7h 41m 41s] [reactor 8h 35m 50s] [steering 10h 33m 37s] [wheels 10h 11m 35s] \b hubble - 8h 54m 46s [camera 8h 38m 17s] [lens 5h 56m 22s] [transmission 6h 27m 07s] \b voyager1 - 11h 45m 13s [antenna 5h 53m 57s] [generators 9h 04m 58s] [probe 10h 14m 29s] [sensors 10h 30m 26s] \b voyager2 - 16h 16m 09s [antenna 7h 05m 50s] [generators 12h 20m 29s] [probe 12h 20m 29s] [sensors 11h 23m 17s] \b Total: 43h 42m 20s \b $ watson report --from 2014-04-01 --to 2014-04-30 --project apollo11 Tue 01 April 2014 -> Wed 30 April 2014 \b apollo11 - 13h 22m 20s [brakes 7h 53m 18s] [module 7h 41m 41s] [reactor 8h 35m 50s] [steering 10h 33m 37s] [wheels 10h 11m 35s] \b $ watson report --json { "projects": [ { "name": "watson", "tags": [ { "name": "export", "time": 530.0 }, { "name": "report", "time": 530.0 } ], "time": 530.0 } ], "time": 530.0, "timespan": { "from": "2016-02-21T00:00:00-08:00", "to": "2016-02-28T23:59:59.999999-08:00" } } \b $ watson report --from 2014-04-01 --to 2014-04-30 --project apollo11 --csv from,to,project,tag,time 2014-04-01 00:00:00,2014-04-30 23:59:59,apollo11,,48140.0 2014-04-01 00:00:00,2014-04-30 23:59:59,apollo11,brakes,28421.0 2014-04-01 00:00:00,2014-04-30 23:59:59,apollo11,module,27701.0 2014-04-01 00:00:00,2014-04-30 23:59:59,apollo11,reactor,30950.0 2014-04-01 00:00:00,2014-04-30 23:59:59,apollo11,steering,38017.0 2014-04-01 00:00:00,2014-04-30 23:59:59,apollo11,wheels,36695.0 """ # if the report is an aggregate report, add whitespace using this # aggregate tab which will be prepended to the project name if aggregated: tab = ' ' else: tab = '' if not watson.is_started and not aggregated: click.echo("No project started.") else: current = watson.current report = watson.report(from_, to, current, projects, tags, ignore_projects, ignore_tags, year=year, month=month, week=week, day=day, luna=luna, all=all, ignore_current=ignore_current, include_partial_frames=include_partial_frames) if 'json' in output_format and not aggregated: click.echo(json.dumps(report, indent=4, sort_keys=True, default=json_arrow_encoder)) return elif 'csv' in output_format and not aggregated: click.echo(build_csv(flatten_report_for_csv(report))) return elif 'plain' not in output_format and aggregated: return report lines = [] # use the pager, or print directly to the terminal if pager or (pager is None and watson.config.getboolean('options', 'pager', True)): def _print(line): lines.append(line) def _final_print(lines): click.echo_via_pager('\n'.join(lines)) elif aggregated: def _print(line): lines.append(line) def _final_print(lines): pass else: def _print(line): click.echo(line) def _final_print(lines): pass # handle special title formatting for aggregate reports if aggregated: _print('{} - {}'.format( style('date', '{:ddd DD MMMM YYYY}'.format( report['timespan']['from'] )), style('time', '{}'.format(format_timedelta( datetime.timedelta(seconds=report['time']) ))) )) else: _print('{} -> {}\n'.format( style('date', '{:ddd DD MMMM YYYY}'.format( report['timespan']['from'] )), style('date', '{:ddd DD MMMM YYYY}'.format( report['timespan']['to'] )) )) projects = report['projects'] for project in projects: _print('{tab}{project} - {time}'.format( tab=tab, time=style('time', format_timedelta( datetime.timedelta(seconds=project['time']) )), project=style('project', project['name']) )) tags = project['tags'] if tags: longest_tag = max(len(tag) for tag in tags or ['']) for tag in tags: _print('\t[{tag} {time}]'.format( time=style('time', '{:>11}'.format(format_timedelta( datetime.timedelta(seconds=tag['time']) ))), tag=style('tag', '{:<{}}'.format( tag['name'], longest_tag )), )) _print("") # if this is a report invoked from `aggregate` return the lines; do not # show total time if aggregated: return lines _print('Total: {}'.format( style('time', '{}'.format(format_timedelta( datetime.timedelta(seconds=report['time']) ))) )) ##### show status of current if current is set if watson.is_started and not ignore_current: datefmt = watson.config.get('options', 'date_format', '%Y.%m.%d') timefmt = watson.config.get('options', 'time_format', '%H:%M:%S%z') _print("\nProject {}{} started {} ({} {})".format( style('project', current['project']), (" " if current['tags'] else "") + style('tags', current['tags']), style('time', current['start'].humanize()), style('date', current['start'].strftime(datefmt)), style('time', current['start'].strftime(timefmt)) )) _final_print(lines) @cli.command() @click.option('-c/-C', '--current/--no-current', 'current', default=None, help="(Don't) include currently running frame in report.") @click.option('-f', '--from', 'from_', cls=MutuallyExclusiveOption, type=DateTime, default=arrow.now().shift(days=-7), mutually_exclusive=_SHORTCUT_OPTIONS, help="The date from when the report should start. Defaults " "to seven days ago.") @click.option('-t', '--to', cls=MutuallyExclusiveOption, type=DateTime, default=arrow.now(), mutually_exclusive=_SHORTCUT_OPTIONS, help="The date at which the report should stop (inclusive). " "Defaults to tomorrow.") @click.option('-p', '--project', 'projects', autocompletion=get_projects, multiple=True, help="Reports activity only for the given project. You can add " "other projects by using this option several times.") @click.option('-T', '--tag', 'tags', autocompletion=get_tags, multiple=True, help="Reports activity only for frames containing the given " "tag. You can add several tags by using this option multiple " "times") @click.option('-j', '--json', 'output_format', cls=MutuallyExclusiveOption, flag_value='json', mutually_exclusive=['csv'], multiple=True, help="Format output in JSON instead of plain text") @click.option('-s', '--csv', 'output_format', cls=MutuallyExclusiveOption, flag_value='csv', mutually_exclusive=['json'], multiple=True, help="Format output in CSV instead of plain text") @click.option('--plain', 'output_format', cls=MutuallyExclusiveOption, flag_value='plain', mutually_exclusive=['json', 'csv'], multiple=True, default=True, hidden=True, help="Format output in plain text (default)") @click.option('-g/-G', '--pager/--no-pager', 'pager', default=None, help="(Don't) view output through a pager.") @click.option('-i','--ignore-current', 'ignore_current',default=False, flag_value=True, help="ignore current started Project in Projecttime") @click.pass_obj @click.pass_context @catch_watson_error def aggregate(ctx, watson, current, from_, to, projects, tags, output_format, pager, ignore_current): """ Display a report of the time spent on each project aggregated by day. If a project is given, the time spent on this project is printed. Else, print the total for each root project. By default, the time spent the last 7 days is printed. This timespan can be controlled with the `--from` and `--to` arguments. The dates must have the format `YEAR-MONTH-DAY`, like: `2014-05-19`. You can limit the report to a project or a tag using the `--project` and `--tag` options. They can be specified several times each to add multiple projects or tags to the report. If you are outputting to the terminal, you can selectively enable a pager through the `--pager` option. You can change the output format from *plain text* to *JSON* using the `--json` option or to *CSV* using the `--csv` option. Only one of these two options can be used at once. Example: \b $ watson aggregate Wed 14 November 2018 - 5h 42m 22s watson - 5h 42m 22s [features 34m 06s] [docs 5h 08m 16s] \b Thu 15 November 2018 - 00s \b Fri 16 November 2018 - 00s \b Sat 17 November 2018 - 00s \b Sun 18 November 2018 - 00s \b Mon 19 November 2018 - 5h 58m 52s watson - 5h 58m 52s [features 1h 12m 03s] [docs 4h 46m 49s] \b Tue 20 November 2018 - 2h 50m 35s watson - 2h 50m 35s [features 15m 17s] [docs 1h 37m 43s] [website 57m 35s] \b Wed 21 November 2018 - 01m 17s watson - 01m 17s [docs 01m 17s] \b $ watson aggregate --csv from,to,project,tag,time 2018-11-14 00:00:00,2018-11-14 23:59:59,watson,,20542.0 2018-11-14 00:00:00,2018-11-14 23:59:59,watson,features,2046.0 2018-11-14 00:00:00,2018-11-14 23:59:59,watson,docs,18496.0 2018-11-19 00:00:00,2018-11-19 23:59:59,watson,,21532.0 2018-11-19 00:00:00,2018-11-19 23:59:59,watson,features,4323.0 2018-11-19 00:00:00,2018-11-19 23:59:59,watson,docs,17209.0 2018-11-20 00:00:00,2018-11-20 23:59:59,watson,,10235.0 2018-11-20 00:00:00,2018-11-20 23:59:59,watson,features,917.0 2018-11-20 00:00:00,2018-11-20 23:59:59,watson,docs,5863.0 2018-11-20 00:00:00,2018-11-20 23:59:59,watson,website,3455.0 2018-11-21 00:00:00,2018-11-21 23:59:59,watson,,77.0 2018-11-21 00:00:00,2018-11-21 23:59:59,watson,docs,77.0 """ delta = (to - from_).days lines = [] for i in range(delta + 1): offset = datetime.timedelta(days=i) from_offset = from_ + offset output = ctx.invoke(report, current=current, from_=from_offset, to=from_offset, projects=projects, tags=tags, output_format=output_format, ignore_current=ignore_current, pager=pager, aggregated=True, include_partial_frames=True) if 'json' in output_format: lines.append(output) elif 'csv' in output_format: lines.extend(flatten_report_for_csv(output)) else: # if there is no activity for the day, append a newline # this ensures even spacing throughout the report if (len(output)) == 1: output[0] += '\n' lines.append('\n'.join(output)) if 'json' in output_format: click.echo(json.dumps(lines, indent=4, sort_keys=True, default=json_arrow_encoder)) elif 'csv' in output_format: click.echo(build_csv(lines)) elif pager or (pager is None and watson.config.getboolean('options', 'pager', True)): click.echo_via_pager('\n\n'.join(lines)) else: click.echo('\n\n'.join(lines)) @cli.command() @click.option('-c/-C', '--current/--no-current', 'current', default=None, help="(Don't) include currently running frame in output.") @click.option('-r/-R', '--reverse/--no-reverse', 'reverse', default=None, help="(Don't) reverse the order of the days in output.") @click.option('-f', '--from', 'from_', type=DateTime, default=arrow.now().shift(days=-7), help="The date from when the log should start. Defaults " "to seven days ago.") @click.option('-t', '--to', type=DateTime, default=arrow.now(), help="The date at which the log should stop (inclusive). " "Defaults to tomorrow.") @click.option('-y', '--year', cls=MutuallyExclusiveOption, type=DateTime, flag_value=_SHORTCUT_OPTIONS_VALUES['year'], mutually_exclusive=['day', 'week', 'month', 'all'], help='Reports activity for the current year.') @click.option('-m', '--month', cls=MutuallyExclusiveOption, type=DateTime, flag_value=_SHORTCUT_OPTIONS_VALUES['month'], mutually_exclusive=['day', 'week', 'year', 'all'], help='Reports activity for the current month.') @click.option('-l', '--luna', cls=MutuallyExclusiveOption, type=DateTime, flag_value=_SHORTCUT_OPTIONS_VALUES['luna'], mutually_exclusive=['day', 'week', 'month', 'year', 'all'], help='Reports activity for the current moon cycle.') @click.option('-w', '--week', cls=MutuallyExclusiveOption, type=DateTime, flag_value=_SHORTCUT_OPTIONS_VALUES['week'], mutually_exclusive=['day', 'month', 'year', 'all'], help='Reports activity for the current week.') @click.option('-d', '--day', cls=MutuallyExclusiveOption, type=DateTime, flag_value=_SHORTCUT_OPTIONS_VALUES['day'], mutually_exclusive=['week', 'month', 'year', 'all'], help='Reports activity for the current day.') @click.option('-a', '--all', cls=MutuallyExclusiveOption, type=DateTime, flag_value=_SHORTCUT_OPTIONS_VALUES['all'], mutually_exclusive=['day', 'week', 'month', 'year'], help='Reports all activities.') @click.option('-p', '--project', 'projects', autocompletion=get_projects, multiple=True, help="Logs activity only for the given project. You can add " "other projects by using this option several times.") @click.option('-T', '--tag', 'tags', autocompletion=get_tags, multiple=True, help="Logs activity only for frames containing the given " "tag. You can add several tags by using this option multiple " "times") @click.option('--ignore-project', 'ignore_projects', multiple=True, help="Logs activity for all projects but the given ones. You " "can ignore several projects by using the option multiple " "times. Any given project will be ignored") @click.option('--ignore-tag', 'ignore_tags', multiple=True, help="Logs activity for all tags but the given ones. You can " "ignore several tags by using the option multiple times. Any " "given tag will be ignored") @click.option('-j', '--json', 'output_format', cls=MutuallyExclusiveOption, flag_value='json', mutually_exclusive=['csv'], multiple=True, help="Format output in JSON instead of plain text") @click.option('-s', '--csv', 'output_format', cls=MutuallyExclusiveOption, flag_value='csv', mutually_exclusive=['json'], multiple=True, help="Format output in CSV instead of plain text") @click.option('--plain', 'output_format', cls=MutuallyExclusiveOption, flag_value='plain', mutually_exclusive=['json', 'csv'], multiple=True, default=True, hidden=True, help="Format output in plain text (default)") @click.option('-g/-G', '--pager/--no-pager', 'pager', default=None, help="(Don't) view output through a pager.") @click.pass_obj @catch_watson_error def log(watson, current, reverse, from_, to, projects, tags, ignore_projects, ignore_tags, year, month, week, day, luna, all, output_format, pager): """ Display each recorded session during the given timespan. By default, the sessions from the last 7 days are printed. This timespan can be controlled with the `--from` and `--to` arguments. The dates must have the format `YEAR-MONTH-DAY`, like: `2014-05-19`. You can also use special shortcut options for easier timespan control: `--day` sets the log timespan to the current day (beginning at `00:00h`) and `--year`, `--month` and `--week` to the current year, month, or week, respectively. The shortcut `--luna` sets the timespan to the current moon cycle with the last full moon marking the start of the cycle. If you are outputting to the terminal, you can selectively enable a pager through the `--pager` option. You can limit the log to a project or a tag using the `--project`, `--tag`, `--ignore-project` and `--ignore-tag` options. They can be specified several times each to add or ignore multiple projects or tags in the log. You can change the output format from *plain text* to *JSON* using the `--json` option or to *CSV* using the `--csv` option. Only one of these two options can be used at once. Example: \b $ watson log --project voyager2 --project apollo11 Thursday 08 May 2015 (56m 33s) f35bb24 09:26 to 10:22 56m 33s apollo11 [reactor, brakes, steering, wheels, module] \b Wednesday 07 May 2015 (27m 29s) 9a1325d 09:48 to 10:15 27m 29s voyager2 [sensors, generators, probe] \b Tuesday 06 May 2015 (1h 47m 22s) 530768b 12:40 to 14:16 1h 35m 45s apollo11 [wheels] 84164f0 14:23 to 14:35 11m 37s apollo11 [brakes, steering] \b Monday 05 May 2015 (8h 18m 26s) 26a2817 09:05 to 10:03 57m 12s voyager2 [probe, generators] 5590aca 10:51 to 14:47 3h 55m 40s apollo11 c32c74e 15:12 to 18:38 3h 25m 34s voyager2 [probe, generators, sensors, antenna] \b $ watson log --from 2014-04-16 --to 2014-04-17 Thursday 17 April 2014 (4h 19m 13s) a96fcde 09:15 to 09:43 28m 11s hubble [lens, camera, transmission] 5e91316 10:19 to 12:59 2h 39m 15s hubble [camera, transmission] 761dd51 14:42 to 15:54 1h 11m 47s voyager1 [antenna] \b Wednesday 16 April 2014 (5h 19m 18s) 02cb269 09:53 to 12:43 2h 50m 07s apollo11 [wheels] 1070ddb 13:48 to 16:17 2h 29m 11s voyager1 [antenna, sensors] \b $ watson log --from 2014-04-16 --to 2014-04-17 --csv id,start,stop,project,tags a96fcde,2014-04-17 09:15,2014-04-17 09:43,hubble,"lens, camera, transmission" 5e91316,2014-04-17 10:19,2014-04-17 12:59,hubble,"camera, transmission" 761dd51,2014-04-17 14:42,2014-04-17 15:54,voyager1,antenna 02cb269,2014-04-16 09:53,2014-04-16 12:43,apollo11,wheels 1070ddb,2014-04-16 13:48,2014-04-16 16:17,voyager1,"antenna, sensors" """ # noqa for start_time in (_ for _ in [day, week, month, luna, year, all] if _ is not None): from_ = start_time if from_ > to: raise click.ClickException("'from' must be anterior to 'to'") if bool(projects and ignore_projects and set(projects).intersection(set(ignore_projects))): raise click.ClickException( "given projects can't be ignored at the same time") if bool(tags and ignore_tags and set(tags).intersection(set(ignore_tags))): raise click.ClickException( "given tags can't be ignored at the same time") if watson.current: if current or (current is None and watson.config.getboolean('options', 'log_current')): cur = watson.current watson.frames.add(cur['project'], cur['start'], arrow.utcnow(), cur['tags'], id="current") if reverse is None: reverse = watson.config.getboolean('options', 'reverse_log', True) span = watson.frames.span(from_, to) filtered_frames = watson.frames.filter( projects=projects or None, tags=tags or None, ignore_projects=ignore_projects or None, ignore_tags=ignore_tags or None, span=span ) if 'json' in output_format: click.echo(frames_to_json(filtered_frames)) return if 'csv' in output_format: click.echo(frames_to_csv(filtered_frames)) return frames_by_day = sorted_groupby( filtered_frames, operator.attrgetter('day'), reverse=reverse ) lines = [] # use the pager, or print directly to the terminal if pager or (pager is None and watson.config.getboolean('options', 'pager', True)): def _print(line): lines.append(line) def _final_print(lines): click.echo_via_pager('\n'.join(lines)) else: def _print(line): click.echo(line) def _final_print(lines): pass for i, (day, frames) in enumerate(frames_by_day): if i != 0: _print('') frames = sorted(frames, key=operator.attrgetter('start')) longest_project = max(len(frame.project) for frame in frames) daily_total = reduce( operator.add, (frame.stop - frame.start for frame in frames) ) _print( "{date} ({daily_total})".format( date=style('date', "{:dddd DD MMMM YYYY}".format(day)), daily_total=style('time', format_timedelta(daily_total)) ) ) _print("\n".join( "\t{id} {start} to {stop} {delta:>11} {project}{tags}".format( delta=format_timedelta(frame.stop - frame.start), project=style('project', '{:>{}}'.format( frame.project, longest_project )), tags=(" "*2 if frame.tags else "") + style('tags', frame.tags), start=style('time', '{:HH:mm}'.format(frame.start)), stop=style('time', '{:HH:mm}'.format(frame.stop)), id=style('short_id', frame.id) ) for frame in frames )) _final_print(lines) @cli.command() @click.pass_obj @catch_watson_error def projects(watson): """ Display the list of all the existing projects. Example: \b $ watson projects apollo11 hubble voyager1 voyager2 """ for project in watson.projects: click.echo(style('project', project)) @cli.command() @click.pass_obj @catch_watson_error def tags(watson): """ Display the list of all the tags. Example: \b $ watson tags antenna brakes camera generators lens module probe reactor sensors steering transmission wheels """ for tag in watson.tags: click.echo(style('tag', tag)) @cli.command() @click.pass_obj @catch_watson_error def frames(watson): """ Display the list of all frame IDs. Example: \b $ watson frames f1c4815 9d1a989 8801ec3 [...] """ for frame in watson.frames: click.echo(style('short_id', frame.id)) @cli.command(context_settings={'ignore_unknown_options': True}) @click.argument('args', nargs=-1, autocompletion=get_project_or_task_completion) @click.option('-f', '--from', 'from_', required=True, type=DateTime, help="Date and time of start of tracked activity") @click.option('-t', '--to', required=True, type=DateTime, help="Date and time of end of tracked activity") @click.option('-c', '--confirm-new-project', is_flag=True, default=False, help="Confirm addition of new project.") @click.option('-b', '--confirm-new-tag', is_flag=True, default=False, help="Confirm creation of new tag.") @click.pass_obj @catch_watson_error def add(watson, args, from_, to, confirm_new_project, confirm_new_tag): """ Add time to a project with tag(s) that was not tracked live. Example: \b $ watson add --from "2018-03-20 12:00:00" --to "2018-03-20 13:00:00" \\ programming +addfeature """ # parse project name from args project = ' '.join( itertools.takewhile(lambda s: not s.startswith('+'), args) ) if not project: raise click.ClickException("No project given.") # Confirm creation of new project if that option is set if (watson.config.getboolean('options', 'confirm_new_project') or confirm_new_project): confirm_project(project, watson.projects) # Parse all the tags tags = parse_tags(args) # Confirm creation of new tag(s) if that option is set if (watson.config.getboolean('options', 'confirm_new_tag') or confirm_new_tag): confirm_tags(tags, watson.tags) # add a new frame, call watson save to update state files frame = watson.add(project=project, tags=tags, from_date=from_, to_date=to) click.echo( "Adding project {}{}, started {} and stopped {}. (id: {})".format( style('project', frame.project), (" " if frame.tags else "") + style('tags', frame.tags), style('time', frame.start.humanize()), style('time', frame.stop.humanize()), style('short_id', frame.id) ) ) watson.save() @cli.command(context_settings={'ignore_unknown_options': True}) @click.option('-c', '--confirm-new-project', is_flag=True, default=False, help="Confirm addition of new project.") @click.option('-b', '--confirm-new-tag', is_flag=True, default=False, help="Confirm creation of new tag.") @click.argument('id', required=False, autocompletion=get_frames) @click.pass_obj @catch_watson_error def edit(watson, confirm_new_project, confirm_new_tag, id): """ Edit a frame. You can specify the frame to edit by its position or by its frame id. For example, to edit the second-to-last frame, pass `-2` as the frame index. You can get the id of a frame with the `watson log` command. If no id or index is given, the frame defaults to the current frame (or the last recorded frame, if no project is currently running). The editor used is determined by the `VISUAL` or `EDITOR` environment variables (in that order) and defaults to `notepad` on Windows systems and to `vim`, `nano`, or `vi` (first one found) on all other systems. """ date_format = 'YYYY-MM-DD' time_format = 'HH:mm:ss' datetime_format = '{} {}'.format(date_format, time_format) local_tz = local_tz_info() if id: frame = get_frame_from_argument(watson, id) id = frame.id elif watson.is_started: frame = Frame(watson.current['start'], None, watson.current['project'], None, watson.current['tags']) elif watson.frames: frame = watson.frames[-1] id = frame.id else: raise click.ClickException( style('error', "No frames recorded yet. It's time to create your " "first one!")) data = { 'start': frame.start.format(datetime_format), 'project': frame.project, 'tags': frame.tags, } if id: data['stop'] = frame.stop.format(datetime_format) text = json.dumps(data, indent=4, sort_keys=True, ensure_ascii=False) start = None stop = None # enter into while loop until successful and validated # edit has been performed while True: output = click.edit(text, extension='.json') if not output: click.echo("No change made.") return try: data = json.loads(output) project = data['project'] # Confirm creation of new project if that option is set if (watson.config.getboolean('options', 'confirm_new_project') or confirm_new_project): confirm_project(project, watson.projects) tags = data['tags'] # Confirm creation of new tag(s) if that option is set if (watson.config.getboolean('options', 'confirm_new_tag') or confirm_new_tag): confirm_tags(tags, watson.tags) start = arrow.get(data['start'], datetime_format).replace( tzinfo=local_tz).to('utc') stop = arrow.get(data['stop'], datetime_format).replace( tzinfo=local_tz).to('utc') if id else None # if start time of the project is not before end time # raise ValueException if not watson.is_started and start > stop: raise ValueError( "Task cannot end before it starts.") if start > arrow.utcnow(): raise ValueError("Start time cannot be in the future") if stop and stop > arrow.utcnow(): raise ValueError("Stop time cannot be in the future") # break out of while loop and continue execution of # the edit function normally break except (ValueError, TypeError, RuntimeError) as e: click.echo("Error while parsing inputted values: {}".format(e), err=True) except KeyError: click.echo( "The edited frame must contain the project, " "start, and stop keys.", err=True) # we reach here if exception was thrown, wait for user # to acknowledge the error before looping in while and # showing user the editor again click.pause(err=True) # use previous entered values to the user in editor # instead of original ones text = output # we reach this when we break out of the while loop above if id: watson.frames[id] = (project, start, stop, tags) else: watson.current = dict(start=start, project=project, tags=tags) watson.save() click.echo( "Edited frame for project {project}{tags}, from {start} to {stop} " "({delta})".format( delta=format_timedelta(stop - start) if stop else '-', project=style('project', project), tags=(" " if tags else "") + style('tags', tags), start=style( 'time', start.to(local_tz).format(time_format) ), stop=style( 'time', stop.to(local_tz).format(time_format) if stop else '-' ) ) ) @cli.command(context_settings={'ignore_unknown_options': True}) @click.argument('id', autocompletion=get_frames) @click.option('-f', '--force', is_flag=True, help="Don't ask for confirmation.") @click.pass_obj @catch_watson_error def remove(watson, id, force): """ Remove a frame. You can specify the frame either by id or by position (ex: `-1` for the last frame). """ frame = get_frame_from_argument(watson, id) id = frame.id if not force: click.confirm( "You are about to remove frame " "{project}{tags} from {start} to {stop}, continue?".format( project=style('project', frame.project), tags=(" " if frame.tags else "") + style('tags', frame.tags), start=style('time', '{:HH:mm}'.format(frame.start)), stop=style('time', '{:HH:mm}'.format(frame.stop)) ), abort=True ) del watson.frames[id] watson.save() click.echo("Frame removed.") @cli.command() @click.argument('key', required=False, metavar='SECTION.OPTION') @click.argument('value', required=False) @click.option('-e', '--edit', is_flag=True, help="Edit the configuration file with an editor.") @click.pass_context @catch_watson_error def config(context, key, value, edit): """ Get and set configuration options. If `value` is not provided, the content of the `key` is displayed. Else, the given `value` is set. You can edit the config file with an editor with the `--edit` option. Example: \b $ watson config backend.token 7e329263e329 $ watson config backend.token 7e329263e329 """ watson = context.obj wconfig = watson.config if edit: try: with open(watson.config_file) as fp: rawconfig = fp.read() except (IOError, OSError): rawconfig = '' newconfig = click.edit(text=rawconfig, extension='.ini') if newconfig: safe_save(watson.config_file, newconfig) try: watson.config = None watson.config # triggers reloading config from file except _watson.ConfigurationError as exc: watson.config = wconfig watson.save() raise click.ClickException(style('error', str(exc))) return if not key: click.echo(context.get_help()) return try: section, option = key.split('.') except ValueError: raise click.ClickException( "The key must have the format 'section.option'" ) if value is None: if not wconfig.has_section(section): raise click.ClickException("No such section {}".format(section)) if not wconfig.has_option(section, option): raise click.ClickException( "No such option {} in {}".format(option, section) ) click.echo(wconfig.get(section, option)) else: if not wconfig.has_section(section): wconfig.add_section(section) wconfig.set(section, option, value) watson.config = wconfig watson.save() @cli.command() @click.pass_obj @catch_watson_error def sync(watson): """ Get the frames from the server and push the new ones. The URL of the server and the User Token must be defined via the `watson config` command. Example: \b $ watson config backend.url http://localhost:4242 $ watson config backend.token 7e329263e329 $ watson sync Received 42 frames from the server Pushed 23 frames to the server """ last_pull = arrow.utcnow() pulled = watson.pull() click.echo("Received {} frames from the server".format(len(pulled))) pushed = watson.push(last_pull) click.echo("Pushed {} frames to the server".format(len(pushed))) watson.last_sync = arrow.utcnow() watson.save() @cli.command() @click.argument('frames_with_conflict', type=click.Path(exists=True)) @click.option('-f', '--force', 'force', is_flag=True, help="If specified, then the merge will automatically " "be performed.") @click.pass_obj @catch_watson_error def merge(watson, frames_with_conflict, force): """ Perform a merge of the existing frames with a conflicting frames file. When storing the frames on a file hosting service, there is the possibility that the frame file goes out-of-sync due to one or more of the connected clients going offline. This can cause the frames to diverge. If the `--force` command is specified, the merge operation will automatically be performed. The only argument is a path to the the conflicting `frames` file. Merge will output statistics about the merge operation. Example: \b $ watson merge frames-with-conflicts 120 frames will be left unchanged 12 frames will be merged 3 frame conflicts need to be resolved To perform a merge operation, the user will be prompted to select the frame they would like to keep. Example: \b $ watson merge frames-with-conflicts --force 120 frames will be left unchanged 12 frames will be merged 3 frame conflicts need to be resolved Will resolve conflicts: frame 8804872: < { < "project": "tailordev", < "start": "2015-07-28 09:33:33", < "stop": "2015-07-28 10:39:36", < "tags": [ < "intern", < "daily-meeting" < ] < } --- > { > "project": "tailordev", > "start": "2015-07-28 09:33:33", > "stop": "**2015-07-28 11:39:36**", > "tags": [ > "intern", > "daily-meeting" > ] > } Select the frame you want to keep: left or right? (L/r) """ original_frames = watson.frames conflicting, merging = watson.merge_report(frames_with_conflict) # find the length of the largest returned list, then get the number of # digits of this length dig = len(str(max(len(original_frames), len(merging), len(conflicting)))) click.echo("{:<{width}} frames will be left unchanged".format( len(original_frames) - len(conflicting), width=dig)) click.echo("{:<{width}} frames will be merged".format( len(merging), width=dig)) click.echo("{:<{width}} frames will need to be resolved".format( len(conflicting), width=dig)) # No frames to resolve or merge. if not conflicting and not merging: return # Confirm user would like to merge if not force and not click.confirm("Do you want to continue?"): return if conflicting: click.echo("Will resolve conflicts:") date_format = 'YYYY-MM-DD HH:mm:ss' for conflict_frame in conflicting: original_frame = original_frames[conflict_frame.id] # Print original frame original_frame_data = { 'project': original_frame.project, 'start': original_frame.start.format(date_format), 'stop': original_frame.stop.format(date_format), 'tags': original_frame.tags } click.echo("frame {}:".format(style('short_id', original_frame.id))) click.echo("{}".format('\n'.join('<' + line for line in json.dumps( original_frame_data, indent=4, ensure_ascii=False).splitlines()))) click.echo("---") # make a copy of the namedtuple conflict_frame_copy = conflict_frame._replace() # highlight conflicts if conflict_frame.project != original_frame.project: project = '**' + str(conflict_frame.project) + '**' conflict_frame_copy = conflict_frame_copy._replace(project=project) if conflict_frame.start != original_frame.start: start = '**' + str(conflict_frame.start.format(date_format)) + '**' conflict_frame_copy = conflict_frame_copy._replace(start=start) if conflict_frame.stop != original_frame.stop: stop = '**' + str(conflict_frame.stop.format(date_format)) + '**' conflict_frame_copy = conflict_frame_copy._replace(stop=stop) for idx, tag in enumerate(conflict_frame.tags): if tag not in original_frame.tags: conflict_frame_copy.tags[idx] = '**' + str(tag) + '**' # Print conflicting frame conflict_frame_data = { 'project': conflict_frame_copy.project, 'start': conflict_frame_copy.start.format(date_format), 'stop': conflict_frame_copy.stop.format(date_format), 'tags': conflict_frame_copy.tags } click.echo("{}".format('\n'.join('>' + line for line in json.dumps( conflict_frame_data, indent=4, ensure_ascii=False).splitlines()))) resp = click.prompt( "Select the frame you want to keep: left or right? (L/r)", value_proc=options(['L', 'r'])) if resp == 'r': # replace original frame with conflicting frame original_frames[conflict_frame.id] = conflict_frame # merge in any non-conflicting frames for frame in merging: start, stop, project, id, tags, updated_at = frame.dump() original_frames.add(project, start, stop, tags=tags, id=id, updated_at=updated_at) watson.frames = original_frames watson.frames.changed = True watson.save() @cli.command() @click.argument('rename_type', required=True, metavar='TYPE', autocompletion=get_rename_types) @click.argument('old_name', required=True, autocompletion=get_rename_name) @click.argument('new_name', required=True, autocompletion=get_rename_name) @click.pass_obj @catch_watson_error def rename(watson, rename_type, old_name, new_name): """ Rename a project or tag. Example: \b $ watson rename project read-python-intro learn-python Renamed project "read-python-intro" to "learn-python" $ watson rename tag company-meeting meeting Renamed tag "company-meeting" to "meeting" """ if rename_type == 'tag': watson.rename_tag(old_name, new_name) click.echo('Renamed tag "{}" to "{}"'.format( style('tag', old_name), style('tag', new_name) )) elif rename_type == 'project': watson.rename_project(old_name, new_name) click.echo('Renamed project "{}" to "{}"'.format( style('project', old_name), style('project', new_name) )) else: raise click.ClickException(style( 'error', 'You have to call rename with type "project" or "tag"; ' 'you supplied "%s"' % rename_type ))
[]
[]
[ "TZ" ]
[]
["TZ"]
python
1
0
digits/config/torch.py
# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. from __future__ import absolute_import import os from . import option_list def find_executable(path=None): """ Finds th on the given path and returns it if found If path is None, searches through PATH """ if path is None: dirnames = os.environ['PATH'].split(os.pathsep) suffixes = ['th'] else: dirnames = [path] # fuzzy search suffixes = ['th', os.path.join('bin', 'th'), os.path.join('install', 'bin', 'th')] for dirname in dirnames: dirname = dirname.strip('"') for suffix in suffixes: path = os.path.join(dirname, suffix) if os.path.isfile(path) and os.access(path, os.X_OK): return path return None if 'TORCH_ROOT' in os.environ: executable = find_executable(os.environ['TORCH_ROOT']) if executable is None: raise ValueError('Torch executable not found at "%s" (TORCH_ROOT)' % os.environ['TORCH_ROOT']) elif 'TORCH_HOME' in os.environ: executable = find_executable(os.environ['TORCH_HOME']) if executable is None: raise ValueError('Torch executable not found at "%s" (TORCH_HOME)' % os.environ['TORCH_HOME']) else: executable = find_executable() if executable is None: option_list['torch'] = { 'enabled': False, } else: option_list['torch'] = { 'enabled': True, 'executable': executable, }
[]
[]
[ "PATH", "TORCH_ROOT", "TORCH_HOME" ]
[]
["PATH", "TORCH_ROOT", "TORCH_HOME"]
python
3
0
pkg/acceptance/cluster/docker.go
// Copyright 2015 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package cluster import ( "context" "encoding/binary" "fmt" "io" "math" "net" "net/url" "os" "os/user" "path/filepath" "strconv" "strings" "time" "github.com/cockroachdb/cockroach/pkg/util/contextutil" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/network" "github.com/docker/docker/client" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/go-connections/nat" isatty "github.com/mattn/go-isatty" ) // Retrieve the IP address of docker itself. func dockerIP() net.IP { host := os.Getenv("DOCKER_HOST") if host == "" { host = client.DefaultDockerHost } u, err := url.Parse(host) if err != nil { panic(err) } if u.Scheme == "unix" { return net.IPv4(127, 0, 0, 1) } h, _, err := net.SplitHostPort(u.Host) if err != nil { panic(err) } return net.ParseIP(h) } // Container provides the programmatic interface for a single docker // container. type Container struct { id string name string cluster *DockerCluster } // Name returns the container's name. func (c Container) Name() string { return c.name } func hasImage(ctx context.Context, l *DockerCluster, ref string) error { distributionRef, err := reference.ParseNamed(ref) if err != nil { return err } path := reference.Path(distributionRef) // Correct for random docker stupidity: // // https://github.com/moby/moby/blob/7248742/registry/service.go#L207:L215 path = strings.TrimPrefix(path, "library/") images, err := l.client.ImageList(ctx, types.ImageListOptions{ All: true, Filters: filters.NewArgs( filters.Arg("reference", path), ), }) if err != nil { return err } tagged, ok := distributionRef.(reference.Tagged) if !ok { return errors.Errorf("untagged reference %s not permitted", ref) } wanted := fmt.Sprintf("%s:%s", path, tagged.Tag()) for _, image := range images { for _, repoTag := range image.RepoTags { // The Image.RepoTags field contains strings of the form <path>:<tag>. if repoTag == wanted { return nil } } } var imageList []string for _, image := range images { for _, tag := range image.RepoTags { imageList = append(imageList, "%s %s", tag, image.ID) } } return errors.Errorf("%s not found in:\n%s", wanted, strings.Join(imageList, "\n")) } func pullImage( ctx context.Context, l *DockerCluster, ref string, options types.ImagePullOptions, ) error { // HACK: on CircleCI, docker pulls the image on the first access from an // acceptance test even though that image is already present. So we first // check to see if our image is present in order to avoid this slowness. if hasImage(ctx, l, ref) == nil { log.Infof(ctx, "ImagePull %s already exists", ref) return nil } log.Infof(ctx, "ImagePull %s starting", ref) defer log.Infof(ctx, "ImagePull %s complete", ref) rc, err := l.client.ImagePull(ctx, ref, options) if err != nil { return err } defer rc.Close() out := os.Stderr outFd := out.Fd() isTerminal := isatty.IsTerminal(outFd) if err := jsonmessage.DisplayJSONMessagesStream(rc, out, outFd, isTerminal, nil); err != nil { return err } if err := hasImage(ctx, l, ref); err != nil { return errors.Wrapf(err, "pulled image %s but still don't have it", ref) } return nil } // splitBindSpec splits a Docker bind specification into its host and container // paths. func splitBindSpec(bind string) (hostPath string, containerPath string) { s := strings.SplitN(bind, ":", 2) return s[0], s[1] } // getNonRootContainerUser determines a non-root UID and GID to use in the // container to minimize file ownership problems in bind mounts. It returns a // UID:GID string suitable for use as the User field container.Config. func getNonRootContainerUser() (string, error) { // This number is Debian-specific, but for now all of our acceptance test // containers are based on Debian. // See: https://www.debian.org/doc/debian-policy/#uid-and-gid-classes const minUnreservedID = 101 user, err := user.Current() if err != nil { return "", err } uid, err := strconv.Atoi(user.Uid) if err != nil { return "", errors.Wrap(err, "looking up host UID") } if uid < minUnreservedID { return "", fmt.Errorf("host UID %d in container's reserved UID space", uid) } gid, err := strconv.Atoi(user.Gid) if err != nil { return "", errors.Wrap(err, "looking up host GID") } if gid < minUnreservedID { // If the GID is in the reserved space, silently upconvert to the known-good // UID. We don't want to return an error because users on a macOS host // typically have a GID in the reserved space, and this upconversion has // been empirically verified to not cause ownership issues. gid = uid } return fmt.Sprintf("%d:%d", uid, gid), nil } // createContainer creates a new container using the specified // options. Per the docker API, the created container is not running // and must be started explicitly. Note that the passed-in hostConfig // will be augmented with the necessary settings to use the network // defined by l.createNetwork(). func createContainer( ctx context.Context, l *DockerCluster, containerConfig container.Config, hostConfig container.HostConfig, containerName string, ) (*Container, error) { hostConfig.NetworkMode = container.NetworkMode(l.networkID) // Disable DNS search under the host machine's domain. This can // catch upstream wildcard DNS matching and result in odd behavior. hostConfig.DNSSearch = []string{"."} // Run the container as the current user to avoid creating root-owned files // and directories from within the container. user, err := getNonRootContainerUser() if err != nil { return nil, err } containerConfig.User = user // Additionally ensure that the host side of every bind exists. Otherwise, the // Docker daemon will create the host directory as root before running the // container. for _, bind := range hostConfig.Binds { hostPath, _ := splitBindSpec(bind) if _, err := os.Stat(hostPath); os.IsNotExist(err) { maybePanic(os.MkdirAll(hostPath, 0755)) } else { maybePanic(err) } } resp, err := l.client.ContainerCreate(ctx, &containerConfig, &hostConfig, nil, containerName) if err != nil { return nil, err } return &Container{ id: resp.ID, name: containerName, cluster: l, }, nil } func maybePanic(err error) { if err != nil { panic(err) } } // Remove removes the container from docker. It is an error to remove a running // container. func (c *Container) Remove(ctx context.Context) error { return c.cluster.client.ContainerRemove(ctx, c.id, types.ContainerRemoveOptions{ RemoveVolumes: true, Force: true, }) } // Kill stops a running container, without removing it. func (c *Container) Kill(ctx context.Context) error { if err := c.cluster.client.ContainerKill(ctx, c.id, "9"); err != nil && !strings.Contains(err.Error(), "is not running") { return err } c.cluster.expectEvent(c, eventDie) return nil } // Start starts a non-running container. // // TODO(pmattis): Generalize the setting of parameters here. func (c *Container) Start(ctx context.Context) error { return c.cluster.client.ContainerStart(ctx, c.id, types.ContainerStartOptions{}) } // Restart restarts a running container. // Container will be killed after 'timeout' seconds if it fails to stop. func (c *Container) Restart(ctx context.Context, timeout *time.Duration) error { var exp []string if ci, err := c.Inspect(ctx); err != nil { return err } else if ci.State.Running { exp = append(exp, eventDie) } if err := c.cluster.client.ContainerRestart(ctx, c.id, timeout); err != nil { return err } c.cluster.expectEvent(c, append(exp, eventRestart)...) return nil } // Wait waits for a running container to exit. func (c *Container) Wait(ctx context.Context, condition container.WaitCondition) error { waitOKBodyCh, errCh := c.cluster.client.ContainerWait(ctx, c.id, condition) select { case err := <-errCh: return err case waitOKBody := <-waitOKBodyCh: outputLog := filepath.Join(c.cluster.volumesDir, "logs", "console-output.log") cmdLog, err := os.Create(outputLog) if err != nil { return err } defer cmdLog.Close() out := io.MultiWriter(cmdLog, os.Stderr) if err := c.Logs(ctx, out); err != nil { log.Warningf(ctx, "%v", err) } if exitCode := waitOKBody.StatusCode; exitCode != 0 { err = errors.Errorf("non-zero exit code: %d", exitCode) fmt.Fprintln(out, err.Error()) log.Shoutf(ctx, log.Severity_INFO, "command left-over files in %s", c.cluster.volumesDir) } return err } } // Logs outputs the containers logs to the given io.Writer. func (c *Container) Logs(ctx context.Context, w io.Writer) error { rc, err := c.cluster.client.ContainerLogs(ctx, c.id, types.ContainerLogsOptions{ ShowStdout: true, ShowStderr: true, }) if err != nil { return err } defer rc.Close() // The docker log output is not quite plaintext: each line has a // prefix consisting of one byte file descriptor (stdout vs stderr), // three bytes padding, four byte length. We could use this to // disentangle stdout and stderr if we wanted to output them into // separate streams, but we don't really care. for { var header uint64 if err := binary.Read(rc, binary.BigEndian, &header); err == io.EOF { break } else if err != nil { return err } size := header & math.MaxUint32 if _, err := io.CopyN(w, rc, int64(size)); err != nil { return err } } return nil } // Inspect retrieves detailed info about a container. func (c *Container) Inspect(ctx context.Context) (types.ContainerJSON, error) { return c.cluster.client.ContainerInspect(ctx, c.id) } // Addr returns the TCP address to connect to. func (c *Container) Addr(ctx context.Context, port nat.Port) *net.TCPAddr { containerInfo, err := c.Inspect(ctx) if err != nil { log.Errorf(ctx, "%v", err) return nil } bindings, ok := containerInfo.NetworkSettings.Ports[port] if !ok || len(bindings) == 0 { return nil } portNum, err := strconv.Atoi(bindings[0].HostPort) if err != nil { log.Errorf(ctx, "%v", err) return nil } return &net.TCPAddr{ IP: dockerIP(), Port: portNum, } } // resilientDockerClient handles certain recoverable Docker usage errors. // // For example, `ContainerCreate` will fail if a container with the requested // name already exists. resilientDockerClient will catch this, delete the // existing container and try again. type resilientDockerClient struct { client.APIClient } func (cli resilientDockerClient) ContainerStart( clientCtx context.Context, id string, opts types.ContainerStartOptions, ) error { for { err := contextutil.RunWithTimeout(clientCtx, "start container", 20*time.Second, func(ctx context.Context) error { return cli.APIClient.ContainerStart(ctx, id, opts) }) // Keep going if ContainerStart timed out, but client's context is not // expired. if errors.Is(err, context.DeadlineExceeded) && clientCtx.Err() == nil { log.Warningf(clientCtx, "ContainerStart timed out, retrying") continue } return err } } func (cli resilientDockerClient) ContainerCreate( ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string, ) (container.ContainerCreateCreatedBody, error) { response, err := cli.APIClient.ContainerCreate(ctx, config, hostConfig, networkingConfig, containerName) if err != nil && strings.Contains(err.Error(), "already in use") { log.Infof(ctx, "unable to create container %s: %v", containerName, err) containers, cerr := cli.ContainerList(ctx, types.ContainerListOptions{ All: true, Limit: -1, // no limit, see docker/docker/client/container_list.go }) if cerr != nil { log.Infof(ctx, "unable to list containers: %v", cerr) return container.ContainerCreateCreatedBody{}, err } for _, c := range containers { for _, n := range c.Names { // The container names begin with a "/". n = strings.TrimPrefix(n, "/") if n != containerName { continue } log.Infof(ctx, "trying to remove %s", c.ID) options := types.ContainerRemoveOptions{ RemoveVolumes: true, Force: true, } if rerr := cli.ContainerRemove(ctx, c.ID, options); rerr != nil { log.Infof(ctx, "unable to remove container: %v", rerr) return container.ContainerCreateCreatedBody{}, err } return cli.ContainerCreate(ctx, config, hostConfig, networkingConfig, containerName) } } log.Warningf(ctx, "error indicated existing container %s, "+ "but none found:\nerror: %s\ncontainers: %+v", containerName, err, containers) // We likely raced with a previous (late) removal of the container. // Return a timeout so a higher level can retry and hopefully // succeed (or get stuck in an infinite loop, at which point at // least we'll have gathered an additional bit of information). return response, context.DeadlineExceeded } return response, err }
[ "\"DOCKER_HOST\"" ]
[]
[ "DOCKER_HOST" ]
[]
["DOCKER_HOST"]
go
1
0
cloud/scope/virtualmachine.go
/* Copyright 2020 The Kubernetes Authors. Portions Copyright © Microsoft Corporation. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package scope import ( "context" "os" "github.com/go-logr/logr" infrav1 "github.com/microsoft/cluster-api-provider-azurestackhci/api/v1alpha3" azhciauth "github.com/microsoft/cluster-api-provider-azurestackhci/pkg/auth" "github.com/microsoft/moc/pkg/auth" "github.com/pkg/errors" "k8s.io/klog/klogr" "k8s.io/utils/pointer" capierrors "sigs.k8s.io/cluster-api/errors" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" ) // MachineScopeParams defines the input parameters used to create a new VirtualMachineScope. type VirtualMachineScopeParams struct { AzureStackHCIClients Client client.Client Logger logr.Logger AzureStackHCIVirtualMachine *infrav1.AzureStackHCIVirtualMachine } // NewMachineScope creates a new VirtualMachineScope from the supplied parameters. // This is meant to be called for each reconcile iteration. func NewVirtualMachineScope(params VirtualMachineScopeParams) (*VirtualMachineScope, error) { if params.Client == nil { return nil, errors.New("client is required when creating a VirtualMachineScope") } if params.AzureStackHCIVirtualMachine == nil { return nil, errors.New("azurestackhci virtual machine is required when creating a VirtualMachineScope") } if params.Logger == nil { params.Logger = klogr.New() } agentFqdn := os.Getenv("AZURESTACKHCI_CLOUDAGENT_FQDN") if agentFqdn == "" { return nil, errors.New("error creating azurestackhci services. Environment variable AZURESTACKHCI_CLOUDAGENT_FQDN is not set") } params.AzureStackHCIClients.CloudAgentFqdn = agentFqdn authorizer, err := azhciauth.ReconcileAzureStackHCIAccess(context.Background(), params.Client, agentFqdn) if err != nil { return nil, errors.Wrap(err, "failed to create azurestackhci session") } params.AzureStackHCIClients.Authorizer = authorizer helper, err := patch.NewHelper(params.AzureStackHCIVirtualMachine, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") } return &VirtualMachineScope{ client: params.Client, AzureStackHCIVirtualMachine: params.AzureStackHCIVirtualMachine, AzureStackHCIClients: params.AzureStackHCIClients, Logger: params.Logger, patchHelper: helper, Context: context.Background(), }, nil } // VirtualMachineScope defines a scope defined around a machine. type VirtualMachineScope struct { logr.Logger client client.Client patchHelper *patch.Helper Context context.Context AzureStackHCIClients AzureStackHCIVirtualMachine *infrav1.AzureStackHCIVirtualMachine } // GetResourceGroup allows VirtualMachineScope to fulfill ScopeInterface and thus to be used by the cloud services. func (m *VirtualMachineScope) GetResourceGroup() string { return m.AzureStackHCIVirtualMachine.Spec.ResourceGroup } // GetCloudAgentFqdn returns the cloud agent fqdn string. func (m *VirtualMachineScope) GetCloudAgentFqdn() string { return m.CloudAgentFqdn } // GetAuthorizer is a getter for the environment generated authorizer. func (m *VirtualMachineScope) GetAuthorizer() auth.Authorizer { return m.Authorizer } // VnetName returns the vnet name given in the vm spec. func (m *VirtualMachineScope) VnetName() string { return m.AzureStackHCIVirtualMachine.Spec.VnetName } // SubnetName returns the subnet name given in the vm spec. func (m *VirtualMachineScope) SubnetName() string { return m.AzureStackHCIVirtualMachine.Spec.SubnetName } // ClusterName returns the cluster name in the vm spec. func (m *VirtualMachineScope) ClusterName() string { return m.AzureStackHCIVirtualMachine.Spec.ClusterName } // Location returns the AzureStackHCIVirtualMachine location. func (m *VirtualMachineScope) Location() string { return m.AzureStackHCIVirtualMachine.Spec.Location } // AvailabilityZone returns the AzureStackHCIVirtualMachine Availability Zone. func (m *VirtualMachineScope) AvailabilityZone() string { return *m.AzureStackHCIVirtualMachine.Spec.AvailabilityZone.ID } // Name returns the AzureStackHCIVirtualMachine name. func (m *VirtualMachineScope) Name() string { return m.AzureStackHCIVirtualMachine.Name } // Namespace returns the namespace name. func (m *VirtualMachineScope) Namespace() string { return m.AzureStackHCIVirtualMachine.Namespace } // GetVMState returns the AzureStackHCIVirtualMachine VM state. func (m *VirtualMachineScope) GetVMState() *infrav1.VMState { return m.AzureStackHCIVirtualMachine.Status.VMState } // SetVMState sets the AzureStackHCIVirtualMachine VM state. func (m *VirtualMachineScope) SetVMState(v infrav1.VMState) { m.AzureStackHCIVirtualMachine.Status.VMState = new(infrav1.VMState) *m.AzureStackHCIVirtualMachine.Status.VMState = v } // SetReady sets the AzureStackHCIVirtualMachine Ready Status func (m *VirtualMachineScope) SetReady() { m.AzureStackHCIVirtualMachine.Status.Ready = true } // SetFailureMessage sets the AzureStackHCIVirtualMachine status failure message. func (m *VirtualMachineScope) SetFailureMessage(v error) { m.AzureStackHCIVirtualMachine.Status.FailureMessage = pointer.StringPtr(v.Error()) } // SetFailureReason sets the AzureStackHCIVirtualMachine status failure reason. func (m *VirtualMachineScope) SetFailureReason(v capierrors.MachineStatusError) { m.AzureStackHCIVirtualMachine.Status.FailureReason = &v } // SetAnnotation sets a key value annotation on the AzureStackHCIVirtualMachine. func (m *VirtualMachineScope) SetAnnotation(key, value string) { if m.AzureStackHCIVirtualMachine.Annotations == nil { m.AzureStackHCIVirtualMachine.Annotations = map[string]string{} } m.AzureStackHCIVirtualMachine.Annotations[key] = value } // PatchObject persists the virtual machine spec and status. func (m *VirtualMachineScope) PatchObject() error { return m.patchHelper.Patch(context.TODO(), m.AzureStackHCIVirtualMachine) } // Close the VirtualMachineScope by updating the machine spec, machine status. func (m *VirtualMachineScope) Close() error { return m.patchHelper.Patch(context.TODO(), m.AzureStackHCIVirtualMachine) } // AzureStackHCILoadBalancerVM returns true if the AzureStackHCIVirtualMachine is owned by a LoadBalancer resource and false otherwise (Tenant). func (m *VirtualMachineScope) AzureStackHCILoadBalancerVM() bool { for _, ref := range m.AzureStackHCIVirtualMachine.ObjectMeta.GetOwnerReferences() { m.Info("owner references", "type", ref.Kind) if ref.Kind == "AzureStackHCILoadBalancer" && ref.APIVersion == m.AzureStackHCIVirtualMachine.APIVersion { return true } } return false } // BackendPoolNames returns the backend pool name for the virtual machine func (m *VirtualMachineScope) BackendPoolNames() []string { return m.AzureStackHCIVirtualMachine.Spec.BackendPoolNames }
[ "\"AZURESTACKHCI_CLOUDAGENT_FQDN\"" ]
[]
[ "AZURESTACKHCI_CLOUDAGENT_FQDN" ]
[]
["AZURESTACKHCI_CLOUDAGENT_FQDN"]
go
1
0
src/server/pkg/testutil/kubeclient.go
package testutil import ( "fmt" "os" "testing" "time" "github.com/pachyderm/pachyderm/src/client/pkg/require" "github.com/pachyderm/pachyderm/src/server/pkg/backoff" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kube "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" ) var ( zero int64 ) // GetKubeClient connects to the Kubernetes API server either from inside the // cluster or from a test binary running on a machine with kubectl (it will // connect to the same cluster as kubectl) func GetKubeClient(t testing.TB) *kube.Clientset { var config *rest.Config var err error host := os.Getenv("KUBERNETES_SERVICE_HOST") if host != "" { config, err = rest.InClusterConfig() } else { rules := clientcmd.NewDefaultClientConfigLoadingRules() kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, &clientcmd.ConfigOverrides{}) config, err = kubeConfig.ClientConfig() } require.NoError(t, err) k, err := kube.NewForConfig(config) require.NoError(t, err) return k } // DeletePachdPod deletes the pachd pod in a test cluster (restarting it, e.g. // to retart the PPS master) func DeletePachdPod(t testing.TB) { kubeClient := GetKubeClient(t) podList, err := kubeClient.CoreV1().Pods(v1.NamespaceDefault).List( metav1.ListOptions{ LabelSelector: metav1.FormatLabelSelector(metav1.SetAsLabelSelector( map[string]string{"app": "pachd", "suite": "pachyderm"}, )), }) require.NoError(t, err) require.Equal(t, 1, len(podList.Items)) require.NoError(t, kubeClient.CoreV1().Pods(v1.NamespaceDefault).Delete( podList.Items[0].ObjectMeta.Name, &metav1.DeleteOptions{})) // Make sure pachd goes down startTime := time.Now() require.NoError(t, backoff.Retry(func() error { podList, err := kubeClient.CoreV1().Pods(v1.NamespaceDefault).List( metav1.ListOptions{ LabelSelector: metav1.FormatLabelSelector(metav1.SetAsLabelSelector( map[string]string{"app": "pachd", "suite": "pachyderm"}, )), }) if err != nil { return err } if len(podList.Items) == 0 { return nil } if time.Since(startTime) > 10*time.Second { return nil } return fmt.Errorf("waiting for old pachd pod to be killed") }, backoff.NewTestingBackOff())) // Make sure pachd comes back up require.NoErrorWithinTRetry(t, 30*time.Second, func() error { podList, err := kubeClient.CoreV1().Pods(v1.NamespaceDefault).List( metav1.ListOptions{ LabelSelector: metav1.FormatLabelSelector(metav1.SetAsLabelSelector( map[string]string{"app": "pachd", "suite": "pachyderm"}, )), }) if err != nil { return err } if len(podList.Items) == 0 { return fmt.Errorf("no pachd pod up yet") } return nil }) require.NoErrorWithinTRetry(t, 30*time.Second, func() error { podList, err := kubeClient.CoreV1().Pods(v1.NamespaceDefault).List( metav1.ListOptions{ LabelSelector: metav1.FormatLabelSelector(metav1.SetAsLabelSelector( map[string]string{"app": "pachd", "suite": "pachyderm"}, )), }) if err != nil { return err } if len(podList.Items) == 0 { return fmt.Errorf("no pachd pod up yet") } if podList.Items[0].Status.Phase != v1.PodRunning { return fmt.Errorf("pachd not running yet") } return err }) } // DeletePipelineRC deletes the RC belonging to the pipeline 'pipeline'. This // can be used to test PPS's robustness func DeletePipelineRC(t testing.TB, pipeline string) { kubeClient := GetKubeClient(t) rcs, err := kubeClient.CoreV1().ReplicationControllers(v1.NamespaceDefault).List( metav1.ListOptions{ LabelSelector: metav1.FormatLabelSelector(metav1.SetAsLabelSelector( map[string]string{"pipelineName": pipeline}, )), }) require.NoError(t, err) require.Equal(t, 1, len(rcs.Items)) require.NoError(t, kubeClient.CoreV1().ReplicationControllers(v1.NamespaceDefault).Delete( rcs.Items[0].ObjectMeta.Name, &metav1.DeleteOptions{ GracePeriodSeconds: &zero, })) require.NoErrorWithinTRetry(t, 30*time.Second, func() error { rcs, err := kubeClient.CoreV1().ReplicationControllers(v1.NamespaceDefault).List( metav1.ListOptions{ LabelSelector: metav1.FormatLabelSelector(metav1.SetAsLabelSelector( map[string]string{"pipelineName": pipeline}, )), }) if err != nil { return err } if len(rcs.Items) != 0 { return fmt.Errorf("RC %q not deleted yet", pipeline) } return nil }) }
[ "\"KUBERNETES_SERVICE_HOST\"" ]
[]
[ "KUBERNETES_SERVICE_HOST" ]
[]
["KUBERNETES_SERVICE_HOST"]
go
1
0
handle_model_files.py
import site import os import sys import glob import time from mpi4py import MPI from settings import req_dirs, models_folder, ray_folder from shutil import copyfile import subprocess pathname = os.getcwd() homepath = os.path.expanduser("~") s3pathname = homepath+'/s3-drive/groups/Behavior/Pinaki' def makedirpath(pathname=None): if pathname is None: print("No path provided for creation.") return if not os.path.exists(pathname): try: os.makedirs(pathname, exist_ok=True) except: print("Failed to created s3 drive at", pathname) pass first_default_args_call = True LOAD_PREV_MODEL = True RUN_WITH_RAY = True def is_predict_only(**kwargs): global num_timesteps if 'predict' in kwargs: if kwargs['predict'] == "True": num_timesteps = '1' #if 'predict' in sys.argv: # num_timesteps = '1' return float(num_timesteps) == 1 ############################################################### # DEFINE YOUR "BASELINE" (AGENT) PARAMETERS HERE ############################################################### ''' train_env_id = 'parking_2outs-v0' play_env_id = 'parking_2outs-v0' alg = 'her' network = 'mlp' num_timesteps = '1' # Keeping steps at 1 will only sping off prediction/simulation. > 1 for training. # To be compatible with Ray please keep this a normal integer representation. i.e 1000 not 1e3 ''' train_env_id = 'LG-SIM-ENV-v0' play_env_id = 'LG-SIM-ENV-v0' train_env_id = 'parking_2outs-v0' play_env_id = 'parking_2outs-v0' alg = 'ppo2' network = 'mlp' # Keeping steps at 1 will only sping off prediction/simulation. > 1 for training. num_timesteps = '1900' # To be compatible with Ray please keep this a normal integer representation. i.e 1000 not 1e3 ################################################################# urban_AD_env_path = pathname + '/urban_env/envs' sys.path.append(urban_AD_env_path) site.addsitedir(urban_AD_env_path) site.addsitedir(pathname) os.environ["PYTHONPATH"] = pathname def create_dirs(req_dirs): for dirName in req_dirs: if not os.path.exists(dirName): os.mkdir(dirName) # print("Directory " , dirName , " Created ") # else: # print("Directory " , dirName , " already exists") def is_master(): return MPI.COMM_WORLD.Get_rank() == 0 if is_master(): InceptcurrentDT = time.strftime("%Y%m%d-%H%M%S") else: InceptcurrentDT = None InceptcurrentDT = MPI.COMM_WORLD.bcast(InceptcurrentDT, root=0) terminal_output_file_name = 'output.txt' def copy_terminal_output_file(save_folder=None, terminal_output_file_name=None ): if save_folder is not None and terminal_output_file_name is not None: src = os.getcwd() + '/' + terminal_output_file_name dst = save_folder + '/' + terminal_output_file_name if not os.path.exists(save_folder): os.mkdir(save_folder) if os.path.exists(src): copyfile(src, dst) else: print("out put file ", terminal_output_file_name, "doesn't exist") def default_args(save_in_sub_folder=None): create_dirs(req_dirs) currentDT = time.strftime("%Y%m%d-%H%M%S") global first_default_args_call #################################################################### # DEFINE YOUR SAVE FILE, LOAD FILE AND LOGGING FILE PARAMETERS HERE #################################################################### modelpath = pathname if RUN_WITH_RAY: modelpath += '/' + ray_folder '''try: if os.path.exists(s3pathname): modelpath = s3pathname except: print("s3 pathname doesn't exist")''' save_folder = modelpath + '/' + models_folder + \ '/' + train_env_id + '/' + alg + '/' + network load_folder = modelpath + '/' + models_folder + \ '/' + train_env_id + '/' + alg + '/' + network if first_default_args_call: list_of_file = glob.glob(load_folder+'/*') if save_in_sub_folder is not None: save_folder += '/' + str(save_in_sub_folder) save_file = save_folder + '/' + str(currentDT) first_default_args_call_Trigger = True else: if save_in_sub_folder is not None: save_folder += '/' + str(save_in_sub_folder) save_file = save_folder + '/' + str(currentDT) list_of_file = glob.glob(save_folder+'/*') first_default_args_call_Trigger = False # Specifiy log directories for open AI '''logger_path = save_folder + '/log/' tb_logger_path = save_folder + '/tb/' os.environ['OPENAI_LOGDIR'] = logger_path os.environ['OPENAI_LOG_FORMAT'] = 'stdout,tensorboard''' ############################################################### DEFAULT_ARGUMENTS = [ '--env=' + train_env_id, '--alg=' + alg, '--network=' + network, '--num_timesteps=' + num_timesteps, # '--num_env=0', # '--save_path=' + save_file, # '--tensorboard --logdir=' + tb_logger_path, # '--play' # '--num_env=8' ] DEFAULT_ARGUMENTS_DICT = { 'env': train_env_id, 'alg': alg, 'network': network, 'num_timesteps': num_timesteps } def create_save_folder(save_folder): try: os.makedirs(save_folder) except OSError: # print ("Creation of the save path %s failed. It might already exist" % save_folder) a = 1 else: print("Successfully created the save path folder %s " % save_folder) def save_model(save_file=None): if save_file is not None: if not is_predict_only(): if MPI is None or is_master(): create_save_folder(save_folder=save_folder) DEFAULT_ARGUMENTS.append('--save_path=' + save_file) DEFAULT_ARGUMENTS_DICT['save_path'] = save_file print("Saving file", save_file) copy_terminal_output_file(save_folder=save_folder, terminal_output_file_name=terminal_output_file_name) # DEFAULT_ARGUMENTS.append('--tensorboard --logdir=' + tb_logger_path) return def load_model(load_file=None): if load_file is not None: if (not LOAD_PREV_MODEL) and first_default_args_call: return DEFAULT_ARGUMENTS.append('--load_path=' + load_file) DEFAULT_ARGUMENTS_DICT['load_path'] = load_file print("Loading file", load_file) return def is_empty_directory(directorypath): if not os.path.isdir(directorypath): return False if not os.listdir(directorypath): return True return False def filetonum(filename): try: return int(filename.split('/')[-1].replace('-', '')) except: return -1 def purge_names_not_matching_pattern(list_of_file_or_folders): if not list_of_file_or_folders: return None for fileorfoldername in list_of_file_or_folders: if '.' in fileorfoldername: list_of_file_or_folders.remove(fileorfoldername) # remove empty directories elif is_empty_directory(directorypath=fileorfoldername): list_of_file_or_folders.remove(fileorfoldername) return list_of_file_or_folders def latest_model_file_from_list_of_files_and_folders(list_of_files): list_of_file_or_folders = purge_names_not_matching_pattern( list_of_file_or_folders=list_of_files) if not list_of_file_or_folders: return None latest_file_or_folder = max(list_of_file_or_folders, key=filetonum) if os.path.isdir(latest_file_or_folder): list_of_files_and_folders_in_subdir = glob.glob( latest_file_or_folder+'/*') latest_model_file_in_subdir = \ latest_model_file_from_list_of_files_and_folders( list_of_files_and_folders_in_subdir) if latest_model_file_in_subdir is None: list_of_file_or_folders.remove(latest_file_or_folder) return latest_model_file_from_list_of_files_and_folders(list_of_file_or_folders) else: return latest_model_file_in_subdir return latest_file_or_folder # must be a file if list_of_file: # is there anything in the save directory if save_in_sub_folder is None: load_last_model = LOAD_PREV_MODEL else: load_last_model = LOAD_PREV_MODEL or not first_default_args_call if load_last_model: latest_file = latest_model_file_from_list_of_files_and_folders( list_of_files=list_of_file) load_model(load_file=latest_file) save_model(save_file=save_file) else: save_model(save_file=save_file) else: print(" list_of_file empty in load path ", load_folder) save_model(save_file=save_file) # print(" DEFAULT_ARGUMENTS ", DEFAULT_ARGUMENTS) if first_default_args_call_Trigger: first_default_args_call = False return DEFAULT_ARGUMENTS, DEFAULT_ARGUMENTS_DICT
[]
[]
[ "OPENAI_LOGDIR", "OPENAI_LOG_FORMAT", "PYTHONPATH" ]
[]
["OPENAI_LOGDIR", "OPENAI_LOG_FORMAT", "PYTHONPATH"]
python
3
0
lib/streamer/jsonstreamer.go
/* Copyright IBM Corp. 2017 All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // StreamJSONArray scans the JSON stream associated with 'decoder' to find // an array value associated with the json element at 'pathToArray'. // It then calls the 'cb' callback function so that it can decode one element // in the stream at a time. package streamer import ( "encoding/json" "fmt" "os" "reflect" "strings" "github.com/cloudflare/cfssl/api" "github.com/cloudflare/cfssl/log" "github.com/pkg/errors" ) // SearchElement defines the JSON arrays for which to search type SearchElement struct { Path string CB func(*json.Decoder) error } // StreamJSONArray searches the JSON stream for an array matching 'path'. // For each element of this array, it streams one element at a time. func StreamJSONArray(decoder *json.Decoder, path string, cb func(*json.Decoder) error) error { ses := []SearchElement{ SearchElement{Path: path, CB: cb}, SearchElement{Path: "errors", CB: errCB}, } return StreamJSON(decoder, ses) } // StreamJSON searches the JSON stream for arrays matching a search element. // For each array that it finds, it streams them one element at a time. func StreamJSON(decoder *json.Decoder, search []SearchElement) error { js := &jsonStream{decoder: decoder, search: search, stack: []string{}} return js.stream() } type jsonStream struct { decoder *json.Decoder search []SearchElement stack []string } func (js *jsonStream) stream() error { t, err := js.getToken() if err != nil { return err } if _, ok := t.(json.Delim); !ok { return nil } path := strings.Join(js.stack, ".") se := js.getSearchElement(path) d := fmt.Sprintf("%s", t) switch d { case "[": if se != nil { for js.decoder.More() { err = se.CB(js.decoder) if err != nil { return err } } } err = js.skipToDelim("]") if err != nil { return err } case "]": return errors.Errorf("Unexpected '%s'", d) case "{": if se != nil { return errors.Errorf("Expecting array for value of '%s'", path) } for { name, err := js.getNextName() if err != nil { return err } if name == "" { return nil } stack := js.stack js.stack = append(stack, name) err = js.stream() if err != nil { return err } js.stack = stack } case "}": return errors.Errorf("Unexpected '%s'", d) default: return errors.Errorf("unknown JSON delimiter: '%s'", d) } return nil } // Find a search element named 'path' func (js *jsonStream) getSearchElement(path string) *SearchElement { for _, ele := range js.search { if ele.Path == path { return &ele } } return nil } // Skip over tokens until we hit the delimiter func (js *jsonStream) skipToDelim(delim string) error { for { t, err := js.getToken() if err != nil { return err } // Skip anything that isn't a delimiter if _, ok := t.(json.Delim); !ok { continue } // It is a delimiter d := fmt.Sprintf("%s", t) if d == delim { return nil } switch d { case "[": err = js.skipToDelim("]") case "]": err = errors.Errorf("Expecting '%s' but found '%s'", delim, d) case "{": err = js.skipToDelim("}") case "}": err = errors.Errorf("Expecting '%s' but found '%s'", delim, d) default: err = errors.Errorf("unknown JSON delimiter: '%s'", d) } if err != nil { return err } } } func (js *jsonStream) getNextName() (string, error) { token, err := js.getToken() if err != nil { return "", err } switch v := token.(type) { case string: return v, nil case json.Delim: d := fmt.Sprintf("%s", v) if d == "}" { return "", nil } return "", errors.Errorf("Expecting '}' delimiter but found '%s'", d) default: return "", errors.Errorf("Expecting string or delimiter but found '%s'", v) } } func (js *jsonStream) getToken() (interface{}, error) { token, err := js.decoder.Token() if os.Getenv("ZIGLEDGER_CA_JSON_STREAM_DEBUG") != "" { log.Debugf("TOKEN: type=%s, %+v\n", reflect.TypeOf(token), token) } return token, err } func errCB(decoder *json.Decoder) error { errMsg := &api.ResponseMessage{} err := decoder.Decode(errMsg) if err != nil { return errors.Errorf("Invalid JSON error format: %s", err) } return errors.Errorf("%+v", errMsg) }
[ "\"ZIGLEDGER_CA_JSON_STREAM_DEBUG\"" ]
[]
[ "ZIGLEDGER_CA_JSON_STREAM_DEBUG" ]
[]
["ZIGLEDGER_CA_JSON_STREAM_DEBUG"]
go
1
0
config/config.go
package config import ( "html/template" "os" "github.com/jinzhu/configor" "github.com/microcosm-cc/bluemonday" "github.com/qor/render" ) type SMTPConfig struct { Host string Port string User string Password string Site string } var Config = struct { Port uint `default:"80" env:"PORT"` DB struct { Name string `default:"qor_scbn"` Adapter string `default:"mysql"` User string Password string } SMTP SMTPConfig }{} var ( Root = os.Getenv("GOPATH") + "/src/github.com/sunwukonga/qor-scbn" View *render.Render ) func init() { if err := configor.Load(&Config, "config/database.yml", "config/smtp.yml"); err != nil { panic(err) } View = render.New() htmlSanitizer := bluemonday.UGCPolicy() View.RegisterFuncMap("raw", func(str string) template.HTML { return template.HTML(htmlSanitizer.Sanitize(str)) }) } func (s SMTPConfig) HostWithPort() string { return s.Host + ":" + s.Port }
[ "\"GOPATH\"" ]
[]
[ "GOPATH" ]
[]
["GOPATH"]
go
1
0
hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
/** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hbase.zookeeper; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; import java.io.PrintWriter; import java.net.InetSocketAddress; import java.net.Socket; import java.util.ArrayList; import java.util.Arrays; import java.util.Deque; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import javax.security.auth.login.AppConfigurationEntry; import javax.security.auth.login.AppConfigurationEntry.LoginModuleControlFlag; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.AuthUtil; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.replication.ReplicationStateZKBase; import org.apache.hadoop.hbase.security.Superusers; import org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException; import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp.CreateAndFailSilent; import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp.DeleteNodeFailSilent; import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp.SetData; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.authentication.util.KerberosUtil; import org.apache.zookeeper.AsyncCallback; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException.NoNodeException; import org.apache.zookeeper.Op; import org.apache.zookeeper.Watcher; import org.apache.zookeeper.ZooDefs.Ids; import org.apache.zookeeper.ZooDefs.Perms; import org.apache.zookeeper.ZooKeeper; import org.apache.zookeeper.client.ZooKeeperSaslClient; import org.apache.zookeeper.data.ACL; import org.apache.zookeeper.data.Id; import org.apache.zookeeper.data.Stat; import org.apache.zookeeper.proto.CreateRequest; import org.apache.zookeeper.proto.DeleteRequest; import org.apache.zookeeper.proto.SetDataRequest; import org.apache.zookeeper.server.ZooKeeperSaslServer; /** * Internal HBase utility class for ZooKeeper. * * <p>Contains only static methods and constants. * * <p>Methods all throw {@link KeeperException} if there is an unexpected * zookeeper exception, so callers of these methods must handle appropriately. * If ZK is required for the operation, the server will need to be aborted. */ @InterfaceAudience.Private public class ZKUtil { private static final Log LOG = LogFactory.getLog(ZKUtil.class); // TODO: Replace this with ZooKeeper constant when ZOOKEEPER-277 is resolved. public static final char ZNODE_PATH_SEPARATOR = '/'; private static int zkDumpConnectionTimeOut; /** * Creates a new connection to ZooKeeper, pulling settings and ensemble config * from the specified configuration object using methods from {@link ZKConfig}. * * Sets the connection status monitoring watcher to the specified watcher. * * @param conf configuration to pull ensemble and other settings from * @param watcher watcher to monitor connection changes * @return connection to zookeeper * @throws IOException if unable to connect to zk or config problem */ public static RecoverableZooKeeper connect(Configuration conf, Watcher watcher) throws IOException { String ensemble = ZKConfig.getZKQuorumServersString(conf); return connect(conf, ensemble, watcher); } public static RecoverableZooKeeper connect(Configuration conf, String ensemble, Watcher watcher) throws IOException { return connect(conf, ensemble, watcher, null); } public static RecoverableZooKeeper connect(Configuration conf, String ensemble, Watcher watcher, final String identifier) throws IOException { if(ensemble == null) { throw new IOException("Unable to determine ZooKeeper ensemble"); } int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT); if (LOG.isTraceEnabled()) { LOG.trace(identifier + " opening connection to ZooKeeper ensemble=" + ensemble); } int retry = conf.getInt("zookeeper.recovery.retry", 3); int retryIntervalMillis = conf.getInt("zookeeper.recovery.retry.intervalmill", 1000); int maxSleepTime = conf.getInt("zookeeper.recovery.retry.maxsleeptime", 60000); zkDumpConnectionTimeOut = conf.getInt("zookeeper.dump.connection.timeout", 1000); return new RecoverableZooKeeper(ensemble, timeout, watcher, retry, retryIntervalMillis, maxSleepTime, identifier); } /** * Log in the current zookeeper server process using the given configuration * keys for the credential file and login principal. * * <p><strong>This is only applicable when running on secure hbase</strong> * On regular HBase (without security features), this will safely be ignored. * </p> * * @param conf The configuration data to use * @param keytabFileKey Property key used to configure the path to the credential file * @param userNameKey Property key used to configure the login principal * @param hostname Current hostname to use in any credentials * @throws IOException underlying exception from SecurityUtil.login() call */ public static void loginServer(Configuration conf, String keytabFileKey, String userNameKey, String hostname) throws IOException { login(conf, keytabFileKey, userNameKey, hostname, ZooKeeperSaslServer.LOGIN_CONTEXT_NAME_KEY, JaasConfiguration.SERVER_KEYTAB_KERBEROS_CONFIG_NAME); } /** * Log in the current zookeeper client using the given configuration * keys for the credential file and login principal. * * <p><strong>This is only applicable when running on secure hbase</strong> * On regular HBase (without security features), this will safely be ignored. * </p> * * @param conf The configuration data to use * @param keytabFileKey Property key used to configure the path to the credential file * @param userNameKey Property key used to configure the login principal * @param hostname Current hostname to use in any credentials * @throws IOException underlying exception from SecurityUtil.login() call */ public static void loginClient(Configuration conf, String keytabFileKey, String userNameKey, String hostname) throws IOException { login(conf, keytabFileKey, userNameKey, hostname, ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY, JaasConfiguration.CLIENT_KEYTAB_KERBEROS_CONFIG_NAME); } /** * Log in the current process using the given configuration keys for the * credential file and login principal. * * <p><strong>This is only applicable when running on secure hbase</strong> * On regular HBase (without security features), this will safely be ignored. * </p> * * @param conf The configuration data to use * @param keytabFileKey Property key used to configure the path to the credential file * @param userNameKey Property key used to configure the login principal * @param hostname Current hostname to use in any credentials * @param loginContextProperty property name to expose the entry name * @param loginContextName jaas entry name * @throws IOException underlying exception from SecurityUtil.login() call */ private static void login(Configuration conf, String keytabFileKey, String userNameKey, String hostname, String loginContextProperty, String loginContextName) throws IOException { if (!isSecureZooKeeper(conf)) return; // User has specified a jaas.conf, keep this one as the good one. // HBASE_OPTS="-Djava.security.auth.login.config=jaas.conf" if (System.getProperty("java.security.auth.login.config") != null) return; // No keytab specified, no auth String keytabFilename = conf.get(keytabFileKey); if (keytabFilename == null) { LOG.warn("no keytab specified for: " + keytabFileKey); return; } String principalConfig = conf.get(userNameKey, System.getProperty("user.name")); String principalName = SecurityUtil.getServerPrincipal(principalConfig, hostname); // Initialize the "jaas.conf" for keyTab/principal, // If keyTab is not specified use the Ticket Cache. // and set the zookeeper login context name. JaasConfiguration jaasConf = new JaasConfiguration(loginContextName, principalName, keytabFilename); javax.security.auth.login.Configuration.setConfiguration(jaasConf); System.setProperty(loginContextProperty, loginContextName); } /** * A JAAS configuration that defines the login modules that we want to use for login. */ private static class JaasConfiguration extends javax.security.auth.login.Configuration { private static final String SERVER_KEYTAB_KERBEROS_CONFIG_NAME = "zookeeper-server-keytab-kerberos"; private static final String CLIENT_KEYTAB_KERBEROS_CONFIG_NAME = "zookeeper-client-keytab-kerberos"; private static final Map<String, String> BASIC_JAAS_OPTIONS = new HashMap<>(); static { String jaasEnvVar = System.getenv("HBASE_JAAS_DEBUG"); if (jaasEnvVar != null && "true".equalsIgnoreCase(jaasEnvVar)) { BASIC_JAAS_OPTIONS.put("debug", "true"); } } private static final Map<String,String> KEYTAB_KERBEROS_OPTIONS = new HashMap<>(); static { KEYTAB_KERBEROS_OPTIONS.put("doNotPrompt", "true"); KEYTAB_KERBEROS_OPTIONS.put("storeKey", "true"); KEYTAB_KERBEROS_OPTIONS.put("refreshKrb5Config", "true"); KEYTAB_KERBEROS_OPTIONS.putAll(BASIC_JAAS_OPTIONS); } private static final AppConfigurationEntry KEYTAB_KERBEROS_LOGIN = new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(), LoginModuleControlFlag.REQUIRED, KEYTAB_KERBEROS_OPTIONS); private static final AppConfigurationEntry[] KEYTAB_KERBEROS_CONF = new AppConfigurationEntry[]{KEYTAB_KERBEROS_LOGIN}; private javax.security.auth.login.Configuration baseConfig; private final String loginContextName; private final boolean useTicketCache; private final String keytabFile; private final String principal; public JaasConfiguration(String loginContextName, String principal, String keytabFile) { this(loginContextName, principal, keytabFile, keytabFile == null || keytabFile.length() == 0); } private JaasConfiguration(String loginContextName, String principal, String keytabFile, boolean useTicketCache) { try { this.baseConfig = javax.security.auth.login.Configuration.getConfiguration(); } catch (SecurityException e) { this.baseConfig = null; } this.loginContextName = loginContextName; this.useTicketCache = useTicketCache; this.keytabFile = keytabFile; this.principal = principal; LOG.info("JaasConfiguration loginContextName=" + loginContextName + " principal=" + principal + " useTicketCache=" + useTicketCache + " keytabFile=" + keytabFile); } @Override public AppConfigurationEntry[] getAppConfigurationEntry(String appName) { if (loginContextName.equals(appName)) { if (!useTicketCache) { KEYTAB_KERBEROS_OPTIONS.put("keyTab", keytabFile); KEYTAB_KERBEROS_OPTIONS.put("useKeyTab", "true"); } KEYTAB_KERBEROS_OPTIONS.put("principal", principal); KEYTAB_KERBEROS_OPTIONS.put("useTicketCache", useTicketCache ? "true" : "false"); return KEYTAB_KERBEROS_CONF; } if (baseConfig != null) return baseConfig.getAppConfigurationEntry(appName); return(null); } } // // Helper methods // /** * Join the prefix znode name with the suffix znode name to generate a proper * full znode name. * * Assumes prefix does not end with slash and suffix does not begin with it. * * @param prefix beginning of znode name * @param suffix ending of znode name * @return result of properly joining prefix with suffix */ public static String joinZNode(String prefix, String suffix) { return prefix + ZNODE_PATH_SEPARATOR + suffix; } /** * Returns the full path of the immediate parent of the specified node. * @param node path to get parent of * @return parent of path, null if passed the root node or an invalid node */ public static String getParent(String node) { int idx = node.lastIndexOf(ZNODE_PATH_SEPARATOR); return idx <= 0 ? null : node.substring(0, idx); } /** * Get the name of the current node from the specified fully-qualified path. * @param path fully-qualified path * @return name of the current node */ public static String getNodeName(String path) { return path.substring(path.lastIndexOf("/")+1); } // // Existence checks and watches // /** * Watch the specified znode for delete/create/change events. The watcher is * set whether or not the node exists. If the node already exists, the method * returns true. If the node does not exist, the method returns false. * * @param zkw zk reference * @param znode path of node to watch * @return true if znode exists, false if does not exist or error * @throws KeeperException if unexpected zookeeper exception */ public static boolean watchAndCheckExists(ZooKeeperWatcher zkw, String znode) throws KeeperException { try { Stat s = zkw.getRecoverableZooKeeper().exists(znode, zkw); boolean exists = s != null ? true : false; if (exists) { LOG.debug(zkw.prefix("Set watcher on existing znode=" + znode)); } else { LOG.debug(zkw.prefix("Set watcher on znode that does not yet exist, " + znode)); } return exists; } catch (KeeperException e) { LOG.warn(zkw.prefix("Unable to set watcher on znode " + znode), e); zkw.keeperException(e); return false; } catch (InterruptedException e) { LOG.warn(zkw.prefix("Unable to set watcher on znode " + znode), e); zkw.interruptedException(e); return false; } } /** * Watch the specified znode, but only if exists. Useful when watching * for deletions. Uses .getData() (and handles NoNodeException) instead * of .exists() to accomplish this, as .getData() will only set a watch if * the znode exists. * @param zkw zk reference * @param znode path of node to watch * @return true if the watch is set, false if node does not exists * @throws KeeperException if unexpected zookeeper exception */ public static boolean setWatchIfNodeExists(ZooKeeperWatcher zkw, String znode) throws KeeperException { try { zkw.getRecoverableZooKeeper().getData(znode, true, null); return true; } catch (NoNodeException e) { return false; } catch (InterruptedException e) { LOG.warn(zkw.prefix("Unable to set watcher on znode " + znode), e); zkw.interruptedException(e); return false; } } /** * Check if the specified node exists. Sets no watches. * * @param zkw zk reference * @param znode path of node to watch * @return version of the node if it exists, -1 if does not exist * @throws KeeperException if unexpected zookeeper exception */ public static int checkExists(ZooKeeperWatcher zkw, String znode) throws KeeperException { try { Stat s = zkw.getRecoverableZooKeeper().exists(znode, null); return s != null ? s.getVersion() : -1; } catch (KeeperException e) { LOG.warn(zkw.prefix("Unable to set watcher on znode (" + znode + ")"), e); zkw.keeperException(e); return -1; } catch (InterruptedException e) { LOG.warn(zkw.prefix("Unable to set watcher on znode (" + znode + ")"), e); zkw.interruptedException(e); return -1; } } // // Znode listings // /** * Lists the children znodes of the specified znode. Also sets a watch on * the specified znode which will capture a NodeDeleted event on the specified * znode as well as NodeChildrenChanged if any children of the specified znode * are created or deleted. * * Returns null if the specified node does not exist. Otherwise returns a * list of children of the specified node. If the node exists but it has no * children, an empty list will be returned. * * @param zkw zk reference * @param znode path of node to list and watch children of * @return list of children of the specified node, an empty list if the node * exists but has no children, and null if the node does not exist * @throws KeeperException if unexpected zookeeper exception */ public static List<String> listChildrenAndWatchForNewChildren( ZooKeeperWatcher zkw, String znode) throws KeeperException { try { List<String> children = zkw.getRecoverableZooKeeper().getChildren(znode, zkw); return children; } catch(KeeperException.NoNodeException ke) { LOG.debug(zkw.prefix("Unable to list children of znode " + znode + " " + "because node does not exist (not an error)")); return null; } catch (KeeperException e) { LOG.warn(zkw.prefix("Unable to list children of znode " + znode + " "), e); zkw.keeperException(e); return null; } catch (InterruptedException e) { LOG.warn(zkw.prefix("Unable to list children of znode " + znode + " "), e); zkw.interruptedException(e); return null; } } /** * List all the children of the specified znode, setting a watch for children * changes and also setting a watch on every individual child in order to get * the NodeCreated and NodeDeleted events. * @param zkw zookeeper reference * @param znode node to get children of and watch * @return list of znode names, null if the node doesn't exist * @throws KeeperException */ public static List<String> listChildrenAndWatchThem(ZooKeeperWatcher zkw, String znode) throws KeeperException { List<String> children = listChildrenAndWatchForNewChildren(zkw, znode); if (children == null) { return null; } for (String child : children) { watchAndCheckExists(zkw, joinZNode(znode, child)); } return children; } /** * Lists the children of the specified znode without setting any watches. * * Sets no watches at all, this method is best effort. * * Returns an empty list if the node has no children. Returns null if the * parent node itself does not exist. * * @param zkw zookeeper reference * @param znode node to get children * @return list of data of children of specified znode, empty if no children, * null if parent does not exist * @throws KeeperException if unexpected zookeeper exception */ public static List<String> listChildrenNoWatch(ZooKeeperWatcher zkw, String znode) throws KeeperException { List<String> children = null; try { // List the children without watching children = zkw.getRecoverableZooKeeper().getChildren(znode, null); } catch(KeeperException.NoNodeException nne) { return null; } catch(InterruptedException ie) { zkw.interruptedException(ie); } return children; } /** * Simple class to hold a node path and node data. * @deprecated Unused */ @Deprecated public static class NodeAndData { private String node; private byte [] data; public NodeAndData(String node, byte [] data) { this.node = node; this.data = data; } public String getNode() { return node; } public byte [] getData() { return data; } @Override public String toString() { return node; } public boolean isEmpty() { return (data == null || data.length == 0); } } /** * Checks if the specified znode has any children. Sets no watches. * * Returns true if the node exists and has children. Returns false if the * node does not exist or if the node does not have any children. * * Used during master initialization to determine if the master is a * failed-over-to master or the first master during initial cluster startup. * If the directory for regionserver ephemeral nodes is empty then this is * a cluster startup, if not then it is not cluster startup. * * @param zkw zk reference * @param znode path of node to check for children of * @return true if node has children, false if not or node does not exist * @throws KeeperException if unexpected zookeeper exception */ public static boolean nodeHasChildren(ZooKeeperWatcher zkw, String znode) throws KeeperException { try { return !zkw.getRecoverableZooKeeper().getChildren(znode, null).isEmpty(); } catch(KeeperException.NoNodeException ke) { LOG.debug(zkw.prefix("Unable to list children of znode " + znode + " " + "because node does not exist (not an error)")); return false; } catch (KeeperException e) { LOG.warn(zkw.prefix("Unable to list children of znode " + znode), e); zkw.keeperException(e); return false; } catch (InterruptedException e) { LOG.warn(zkw.prefix("Unable to list children of znode " + znode), e); zkw.interruptedException(e); return false; } } /** * Get the number of children of the specified node. * * If the node does not exist or has no children, returns 0. * * Sets no watches at all. * * @param zkw zk reference * @param znode path of node to count children of * @return number of children of specified node, 0 if none or parent does not * exist * @throws KeeperException if unexpected zookeeper exception */ public static int getNumberOfChildren(ZooKeeperWatcher zkw, String znode) throws KeeperException { try { Stat stat = zkw.getRecoverableZooKeeper().exists(znode, null); return stat == null ? 0 : stat.getNumChildren(); } catch(KeeperException e) { LOG.warn(zkw.prefix("Unable to get children of node " + znode)); zkw.keeperException(e); } catch(InterruptedException e) { zkw.interruptedException(e); } return 0; } // // Data retrieval // /** * Get znode data. Does not set a watcher. * @return ZNode data, null if the node does not exist or if there is an * error. */ public static byte [] getData(ZooKeeperWatcher zkw, String znode) throws KeeperException, InterruptedException { try { byte [] data = zkw.getRecoverableZooKeeper().getData(znode, null, null); logRetrievedMsg(zkw, znode, data, false); return data; } catch (KeeperException.NoNodeException e) { LOG.debug(zkw.prefix("Unable to get data of znode " + znode + " " + "because node does not exist (not an error)")); return null; } catch (KeeperException e) { LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); zkw.keeperException(e); return null; } } /** * Get the data at the specified znode and set a watch. * * Returns the data and sets a watch if the node exists. Returns null and no * watch is set if the node does not exist or there is an exception. * * @param zkw zk reference * @param znode path of node * @return data of the specified znode, or null * @throws KeeperException if unexpected zookeeper exception */ public static byte [] getDataAndWatch(ZooKeeperWatcher zkw, String znode) throws KeeperException { return getDataInternal(zkw, znode, null, true); } /** * Get the data at the specified znode and set a watch. * * Returns the data and sets a watch if the node exists. Returns null and no * watch is set if the node does not exist or there is an exception. * * @param zkw zk reference * @param znode path of node * @param stat object to populate the version of the znode * @return data of the specified znode, or null * @throws KeeperException if unexpected zookeeper exception */ public static byte[] getDataAndWatch(ZooKeeperWatcher zkw, String znode, Stat stat) throws KeeperException { return getDataInternal(zkw, znode, stat, true); } private static byte[] getDataInternal(ZooKeeperWatcher zkw, String znode, Stat stat, boolean watcherSet) throws KeeperException { try { byte [] data = zkw.getRecoverableZooKeeper().getData(znode, zkw, stat); logRetrievedMsg(zkw, znode, data, watcherSet); return data; } catch (KeeperException.NoNodeException e) { // This log can get pretty annoying when we cycle on 100ms waits. // Enable trace if you really want to see it. LOG.trace(zkw.prefix("Unable to get data of znode " + znode + " " + "because node does not exist (not an error)")); return null; } catch (KeeperException e) { LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); zkw.keeperException(e); return null; } catch (InterruptedException e) { LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); zkw.interruptedException(e); return null; } } /** * Get the data at the specified znode without setting a watch. * * Returns the data if the node exists. Returns null if the node does not * exist. * * Sets the stats of the node in the passed Stat object. Pass a null stat if * not interested. * * @param zkw zk reference * @param znode path of node * @param stat node status to get if node exists * @return data of the specified znode, or null if node does not exist * @throws KeeperException if unexpected zookeeper exception */ public static byte [] getDataNoWatch(ZooKeeperWatcher zkw, String znode, Stat stat) throws KeeperException { try { byte [] data = zkw.getRecoverableZooKeeper().getData(znode, null, stat); logRetrievedMsg(zkw, znode, data, false); return data; } catch (KeeperException.NoNodeException e) { LOG.debug(zkw.prefix("Unable to get data of znode " + znode + " " + "because node does not exist (not necessarily an error)")); return null; } catch (KeeperException e) { LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); zkw.keeperException(e); return null; } catch (InterruptedException e) { LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); zkw.interruptedException(e); return null; } } /** * Returns the date of child znodes of the specified znode. Also sets a watch on * the specified znode which will capture a NodeDeleted event on the specified * znode as well as NodeChildrenChanged if any children of the specified znode * are created or deleted. * * Returns null if the specified node does not exist. Otherwise returns a * list of children of the specified node. If the node exists but it has no * children, an empty list will be returned. * * @param zkw zk reference * @param baseNode path of node to list and watch children of * @return list of data of children of the specified node, an empty list if the node * exists but has no children, and null if the node does not exist * @throws KeeperException if unexpected zookeeper exception * @deprecated Unused */ @Deprecated public static List<NodeAndData> getChildDataAndWatchForNewChildren( ZooKeeperWatcher zkw, String baseNode) throws KeeperException { List<String> nodes = ZKUtil.listChildrenAndWatchForNewChildren(zkw, baseNode); if (nodes != null) { List<NodeAndData> newNodes = new ArrayList<>(); for (String node : nodes) { String nodePath = ZKUtil.joinZNode(baseNode, node); byte[] data = ZKUtil.getDataAndWatch(zkw, nodePath); newNodes.add(new NodeAndData(nodePath, data)); } return newNodes; } return null; } /** * Update the data of an existing node with the expected version to have the * specified data. * * Throws an exception if there is a version mismatch or some other problem. * * Sets no watches under any conditions. * * @param zkw zk reference * @param znode * @param data * @param expectedVersion * @throws KeeperException if unexpected zookeeper exception * @throws KeeperException.BadVersionException if version mismatch * @deprecated Unused */ @Deprecated public static void updateExistingNodeData(ZooKeeperWatcher zkw, String znode, byte [] data, int expectedVersion) throws KeeperException { try { zkw.getRecoverableZooKeeper().setData(znode, data, expectedVersion); } catch(InterruptedException ie) { zkw.interruptedException(ie); } } // // Data setting // /** * Sets the data of the existing znode to be the specified data. Ensures that * the current data has the specified expected version. * * <p>If the node does not exist, a {@link NoNodeException} will be thrown. * * <p>If their is a version mismatch, method returns null. * * <p>No watches are set but setting data will trigger other watchers of this * node. * * <p>If there is another problem, a KeeperException will be thrown. * * @param zkw zk reference * @param znode path of node * @param data data to set for node * @param expectedVersion version expected when setting data * @return true if data set, false if version mismatch * @throws KeeperException if unexpected zookeeper exception */ public static boolean setData(ZooKeeperWatcher zkw, String znode, byte [] data, int expectedVersion) throws KeeperException, KeeperException.NoNodeException { try { return zkw.getRecoverableZooKeeper().setData(znode, data, expectedVersion) != null; } catch (InterruptedException e) { zkw.interruptedException(e); return false; } } /** * Set data into node creating node if it doesn't yet exist. * Does not set watch. * * @param zkw zk reference * @param znode path of node * @param data data to set for node * @throws KeeperException */ public static void createSetData(final ZooKeeperWatcher zkw, final String znode, final byte [] data) throws KeeperException { if (checkExists(zkw, znode) == -1) { ZKUtil.createWithParents(zkw, znode, data); } else { ZKUtil.setData(zkw, znode, data); } } /** * Sets the data of the existing znode to be the specified data. The node * must exist but no checks are done on the existing data or version. * * <p>If the node does not exist, a {@link NoNodeException} will be thrown. * * <p>No watches are set but setting data will trigger other watchers of this * node. * * <p>If there is another problem, a KeeperException will be thrown. * * @param zkw zk reference * @param znode path of node * @param data data to set for node * @throws KeeperException if unexpected zookeeper exception */ public static void setData(ZooKeeperWatcher zkw, String znode, byte [] data) throws KeeperException, KeeperException.NoNodeException { setData(zkw, (SetData)ZKUtilOp.setData(znode, data)); } private static void setData(ZooKeeperWatcher zkw, SetData setData) throws KeeperException, KeeperException.NoNodeException { SetDataRequest sd = (SetDataRequest)toZooKeeperOp(zkw, setData).toRequestRecord(); setData(zkw, sd.getPath(), sd.getData(), sd.getVersion()); } /** * Returns whether or not secure authentication is enabled * (whether <code>hbase.security.authentication</code> is set to * <code>kerberos</code>. */ public static boolean isSecureZooKeeper(Configuration conf) { // Detection for embedded HBase client with jaas configuration // defined for third party programs. try { javax.security.auth.login.Configuration testConfig = javax.security.auth.login.Configuration.getConfiguration(); if (testConfig.getAppConfigurationEntry("Client") == null && testConfig.getAppConfigurationEntry( JaasConfiguration.CLIENT_KEYTAB_KERBEROS_CONFIG_NAME) == null && testConfig.getAppConfigurationEntry( JaasConfiguration.SERVER_KEYTAB_KERBEROS_CONFIG_NAME) == null && conf.get(HConstants.ZK_CLIENT_KERBEROS_PRINCIPAL) == null && conf.get(HConstants.ZK_SERVER_KERBEROS_PRINCIPAL) == null) { return false; } } catch(Exception e) { // No Jaas configuration defined. return false; } // Master & RSs uses hbase.zookeeper.client.* return "kerberos".equalsIgnoreCase(conf.get("hbase.security.authentication")); } private static ArrayList<ACL> createACL(ZooKeeperWatcher zkw, String node) { return createACL(zkw, node, isSecureZooKeeper(zkw.getConfiguration())); } public static ArrayList<ACL> createACL(ZooKeeperWatcher zkw, String node, boolean isSecureZooKeeper) { if (!node.startsWith(zkw.znodePaths.baseZNode)) { return Ids.OPEN_ACL_UNSAFE; } if (isSecureZooKeeper) { ArrayList<ACL> acls = new ArrayList<>(); // add permission to hbase supper user String[] superUsers = zkw.getConfiguration().getStrings(Superusers.SUPERUSER_CONF_KEY); if (superUsers != null) { List<String> groups = new ArrayList<>(); for (String user : superUsers) { if (AuthUtil.isGroupPrincipal(user)) { // TODO: Set node ACL for groups when ZK supports this feature groups.add(user); } else { acls.add(new ACL(Perms.ALL, new Id("sasl", user))); } } if (!groups.isEmpty()) { LOG.warn("Znode ACL setting for group " + groups + " is skipped, ZooKeeper doesn't support this feature presently."); } } // Certain znodes are accessed directly by the client, // so they must be readable by non-authenticated clients if (zkw.isClientReadable(node)) { acls.addAll(Ids.CREATOR_ALL_ACL); acls.addAll(Ids.READ_ACL_UNSAFE); } else { acls.addAll(Ids.CREATOR_ALL_ACL); } return acls; } else { return Ids.OPEN_ACL_UNSAFE; } } // // Node creation // /** * * Set the specified znode to be an ephemeral node carrying the specified * data. * * If the node is created successfully, a watcher is also set on the node. * * If the node is not created successfully because it already exists, this * method will also set a watcher on the node. * * If there is another problem, a KeeperException will be thrown. * * @param zkw zk reference * @param znode path of node * @param data data of node * @return true if node created, false if not, watch set in both cases * @throws KeeperException if unexpected zookeeper exception */ public static boolean createEphemeralNodeAndWatch(ZooKeeperWatcher zkw, String znode, byte [] data) throws KeeperException { boolean ret = true; try { zkw.getRecoverableZooKeeper().create(znode, data, createACL(zkw, znode), CreateMode.EPHEMERAL); } catch (KeeperException.NodeExistsException nee) { ret = false; } catch (InterruptedException e) { LOG.info("Interrupted", e); Thread.currentThread().interrupt(); } if(!watchAndCheckExists(zkw, znode)) { // It did exist but now it doesn't, try again return createEphemeralNodeAndWatch(zkw, znode, data); } return ret; } /** * Creates the specified znode to be a persistent node carrying the specified * data. * * Returns true if the node was successfully created, false if the node * already existed. * * If the node is created successfully, a watcher is also set on the node. * * If the node is not created successfully because it already exists, this * method will also set a watcher on the node but return false. * * If there is another problem, a KeeperException will be thrown. * * @param zkw zk reference * @param znode path of node * @param data data of node * @return true if node created, false if not, watch set in both cases * @throws KeeperException if unexpected zookeeper exception */ public static boolean createNodeIfNotExistsAndWatch( ZooKeeperWatcher zkw, String znode, byte [] data) throws KeeperException { boolean ret = true; try { zkw.getRecoverableZooKeeper().create(znode, data, createACL(zkw, znode), CreateMode.PERSISTENT); } catch (KeeperException.NodeExistsException nee) { ret = false; } catch (InterruptedException e) { zkw.interruptedException(e); return false; } try { zkw.getRecoverableZooKeeper().exists(znode, zkw); } catch (InterruptedException e) { zkw.interruptedException(e); return false; } return ret; } /** * Creates the specified znode with the specified data but does not watch it. * * Returns the znode of the newly created node * * If there is another problem, a KeeperException will be thrown. * * @param zkw zk reference * @param znode path of node * @param data data of node * @param createMode specifying whether the node to be created is ephemeral and/or sequential * @return true name of the newly created znode or null * @throws KeeperException if unexpected zookeeper exception */ public static String createNodeIfNotExistsNoWatch(ZooKeeperWatcher zkw, String znode, byte[] data, CreateMode createMode) throws KeeperException { String createdZNode = null; try { createdZNode = zkw.getRecoverableZooKeeper().create(znode, data, createACL(zkw, znode), createMode); } catch (KeeperException.NodeExistsException nee) { return znode; } catch (InterruptedException e) { zkw.interruptedException(e); return null; } return createdZNode; } /** * Creates the specified node with the specified data and watches it. * * <p>Throws an exception if the node already exists. * * <p>The node created is persistent and open access. * * <p>Returns the version number of the created node if successful. * * @param zkw zk reference * @param znode path of node to create * @param data data of node to create * @return version of node created * @throws KeeperException if unexpected zookeeper exception * @throws KeeperException.NodeExistsException if node already exists */ public static int createAndWatch(ZooKeeperWatcher zkw, String znode, byte [] data) throws KeeperException, KeeperException.NodeExistsException { try { zkw.getRecoverableZooKeeper().create(znode, data, createACL(zkw, znode), CreateMode.PERSISTENT); Stat stat = zkw.getRecoverableZooKeeper().exists(znode, zkw); if (stat == null){ // Likely a race condition. Someone deleted the znode. throw KeeperException.create(KeeperException.Code.SYSTEMERROR, "ZK.exists returned null (i.e.: znode does not exist) for znode=" + znode); } return stat.getVersion(); } catch (InterruptedException e) { zkw.interruptedException(e); return -1; } } /** * Async creates the specified node with the specified data. * * <p>Throws an exception if the node already exists. * * <p>The node created is persistent and open access. * * @param zkw zk reference * @param znode path of node to create * @param data data of node to create * @param cb * @param ctx */ public static void asyncCreate(ZooKeeperWatcher zkw, String znode, byte [] data, final AsyncCallback.StringCallback cb, final Object ctx) { zkw.getRecoverableZooKeeper().getZooKeeper().create(znode, data, createACL(zkw, znode), CreateMode.PERSISTENT, cb, ctx); } /** * Creates the specified node, iff the node does not exist. Does not set a * watch and fails silently if the node already exists. * * The node created is persistent and open access. * * @param zkw zk reference * @param znode path of node * @throws KeeperException if unexpected zookeeper exception */ public static void createAndFailSilent(ZooKeeperWatcher zkw, String znode) throws KeeperException { createAndFailSilent(zkw, znode, new byte[0]); } /** * Creates the specified node containing specified data, iff the node does not exist. Does * not set a watch and fails silently if the node already exists. * * The node created is persistent and open access. * * @param zkw zk reference * @param znode path of node * @param data a byte array data to store in the znode * @throws KeeperException if unexpected zookeeper exception */ public static void createAndFailSilent(ZooKeeperWatcher zkw, String znode, byte[] data) throws KeeperException { createAndFailSilent(zkw, (CreateAndFailSilent)ZKUtilOp.createAndFailSilent(znode, data)); } private static void createAndFailSilent(ZooKeeperWatcher zkw, CreateAndFailSilent cafs) throws KeeperException { CreateRequest create = (CreateRequest)toZooKeeperOp(zkw, cafs).toRequestRecord(); String znode = create.getPath(); try { RecoverableZooKeeper zk = zkw.getRecoverableZooKeeper(); if (zk.exists(znode, false) == null) { zk.create(znode, create.getData(), create.getAcl(), CreateMode.fromFlag(create.getFlags())); } } catch(KeeperException.NodeExistsException nee) { } catch(KeeperException.NoAuthException nee){ try { if (null == zkw.getRecoverableZooKeeper().exists(znode, false)) { // If we failed to create the file and it does not already exist. throw(nee); } } catch (InterruptedException ie) { zkw.interruptedException(ie); } } catch(InterruptedException ie) { zkw.interruptedException(ie); } } /** * Creates the specified node and all parent nodes required for it to exist. * * No watches are set and no errors are thrown if the node already exists. * * The nodes created are persistent and open access. * * @param zkw zk reference * @param znode path of node * @throws KeeperException if unexpected zookeeper exception */ public static void createWithParents(ZooKeeperWatcher zkw, String znode) throws KeeperException { createWithParents(zkw, znode, new byte[0]); } /** * Creates the specified node and all parent nodes required for it to exist. The creation of * parent znodes is not atomic with the leafe znode creation but the data is written atomically * when the leaf node is created. * * No watches are set and no errors are thrown if the node already exists. * * The nodes created are persistent and open access. * * @param zkw zk reference * @param znode path of node * @throws KeeperException if unexpected zookeeper exception */ public static void createWithParents(ZooKeeperWatcher zkw, String znode, byte[] data) throws KeeperException { try { if(znode == null) { return; } zkw.getRecoverableZooKeeper().create(znode, data, createACL(zkw, znode), CreateMode.PERSISTENT); } catch(KeeperException.NodeExistsException nee) { return; } catch(KeeperException.NoNodeException nne) { createWithParents(zkw, getParent(znode)); createWithParents(zkw, znode, data); } catch(InterruptedException ie) { zkw.interruptedException(ie); } } // // Deletes // /** * Delete the specified node. Sets no watches. Throws all exceptions. */ public static void deleteNode(ZooKeeperWatcher zkw, String node) throws KeeperException { deleteNode(zkw, node, -1); } /** * Delete the specified node with the specified version. Sets no watches. * Throws all exceptions. */ public static boolean deleteNode(ZooKeeperWatcher zkw, String node, int version) throws KeeperException { try { zkw.getRecoverableZooKeeper().delete(node, version); return true; } catch(KeeperException.BadVersionException bve) { return false; } catch(InterruptedException ie) { zkw.interruptedException(ie); return false; } } /** * Deletes the specified node. Fails silent if the node does not exist. * @param zkw * @param node * @throws KeeperException */ public static void deleteNodeFailSilent(ZooKeeperWatcher zkw, String node) throws KeeperException { deleteNodeFailSilent(zkw, (DeleteNodeFailSilent)ZKUtilOp.deleteNodeFailSilent(node)); } private static void deleteNodeFailSilent(ZooKeeperWatcher zkw, DeleteNodeFailSilent dnfs) throws KeeperException { DeleteRequest delete = (DeleteRequest)toZooKeeperOp(zkw, dnfs).toRequestRecord(); try { zkw.getRecoverableZooKeeper().delete(delete.getPath(), delete.getVersion()); } catch(KeeperException.NoNodeException nne) { } catch(InterruptedException ie) { zkw.interruptedException(ie); } } /** * Delete the specified node and all of it's children. * <p> * If the node does not exist, just returns. * <p> * Sets no watches. Throws all exceptions besides dealing with deletion of * children. */ public static void deleteNodeRecursively(ZooKeeperWatcher zkw, String node) throws KeeperException { deleteNodeRecursivelyMultiOrSequential(zkw, true, node); } /** * Delete all the children of the specified node but not the node itself. * * Sets no watches. Throws all exceptions besides dealing with deletion of * children. * * @throws KeeperException */ public static void deleteChildrenRecursively(ZooKeeperWatcher zkw, String node) throws KeeperException { deleteChildrenRecursivelyMultiOrSequential(zkw, true, node); } /** * Delete all the children of the specified node but not the node itself. This * will first traverse the znode tree for listing the children and then delete * these znodes using multi-update api or sequential based on the specified * configurations. * <p> * Sets no watches. Throws all exceptions besides dealing with deletion of * children. * <p> * If the following is true: * <ul> * <li>runSequentialOnMultiFailure is true * </ul> * on calling multi, we get a ZooKeeper exception that can be handled by a * sequential call(*), we retry the operations one-by-one (sequentially). * * @param zkw * - zk reference * @param runSequentialOnMultiFailure * - if true when we get a ZooKeeper exception that could retry the * operations one-by-one (sequentially) * @param pathRoots * - path of the parent node(s) * @throws KeeperException.NotEmptyException * if node has children while deleting * @throws KeeperException * if unexpected ZooKeeper exception * @throws IllegalArgumentException * if an invalid path is specified */ public static void deleteChildrenRecursivelyMultiOrSequential( ZooKeeperWatcher zkw, boolean runSequentialOnMultiFailure, String... pathRoots) throws KeeperException { if (pathRoots == null || pathRoots.length <= 0) { LOG.warn("Given path is not valid!"); return; } List<ZKUtilOp> ops = new ArrayList<>(); for (String eachRoot : pathRoots) { List<String> children = listChildrenBFSNoWatch(zkw, eachRoot); // Delete the leaves first and eventually get rid of the root for (int i = children.size() - 1; i >= 0; --i) { ops.add(ZKUtilOp.deleteNodeFailSilent(children.get(i))); } } // atleast one element should exist if (ops.size() > 0) { multiOrSequential(zkw, ops, runSequentialOnMultiFailure); } } /** * Delete the specified node and its children. This traverse the * znode tree for listing the children and then delete * these znodes including the parent using multi-update api or * sequential based on the specified configurations. * <p> * Sets no watches. Throws all exceptions besides dealing with deletion of * children. * <p> * If the following is true: * <ul> * <li>runSequentialOnMultiFailure is true * </ul> * on calling multi, we get a ZooKeeper exception that can be handled by a * sequential call(*), we retry the operations one-by-one (sequentially). * * @param zkw * - zk reference * @param runSequentialOnMultiFailure * - if true when we get a ZooKeeper exception that could retry the * operations one-by-one (sequentially) * @param pathRoots * - path of the parent node(s) * @throws KeeperException.NotEmptyException * if node has children while deleting * @throws KeeperException * if unexpected ZooKeeper exception * @throws IllegalArgumentException * if an invalid path is specified */ public static void deleteNodeRecursivelyMultiOrSequential(ZooKeeperWatcher zkw, boolean runSequentialOnMultiFailure, String... pathRoots) throws KeeperException { if (pathRoots == null || pathRoots.length <= 0) { LOG.warn("Given path is not valid!"); return; } List<ZKUtilOp> ops = new ArrayList<>(); for (String eachRoot : pathRoots) { // ZooKeeper Watches are one time triggers; When children of parent nodes are deleted // recursively, must set another watch, get notified of delete node List<String> children = listChildrenBFSAndWatchThem(zkw, eachRoot); // Delete the leaves first and eventually get rid of the root for (int i = children.size() - 1; i >= 0; --i) { ops.add(ZKUtilOp.deleteNodeFailSilent(children.get(i))); } try { if (zkw.getRecoverableZooKeeper().exists(eachRoot, zkw) != null) { ops.add(ZKUtilOp.deleteNodeFailSilent(eachRoot)); } } catch (InterruptedException e) { zkw.interruptedException(e); } } // atleast one element should exist if (ops.size() > 0) { multiOrSequential(zkw, ops, runSequentialOnMultiFailure); } } /** * BFS Traversal of all the children under path, with the entries in the list, * in the same order as that of the traversal. Lists all the children without * setting any watches. * * @param zkw * - zk reference * @param znode * - path of node * @return list of children znodes under the path * @throws KeeperException * if unexpected ZooKeeper exception */ private static List<String> listChildrenBFSNoWatch(ZooKeeperWatcher zkw, final String znode) throws KeeperException { Deque<String> queue = new LinkedList<>(); List<String> tree = new ArrayList<>(); queue.add(znode); while (true) { String node = queue.pollFirst(); if (node == null) { break; } List<String> children = listChildrenNoWatch(zkw, node); if (children == null) { continue; } for (final String child : children) { final String childPath = node + "/" + child; queue.add(childPath); tree.add(childPath); } } return tree; } /** * BFS Traversal of all the children under path, with the entries in the list, * in the same order as that of the traversal. * Lists all the children and set watches on to them. * * @param zkw * - zk reference * @param znode * - path of node * @return list of children znodes under the path * @throws KeeperException * if unexpected ZooKeeper exception */ private static List<String> listChildrenBFSAndWatchThem(ZooKeeperWatcher zkw, final String znode) throws KeeperException { Deque<String> queue = new LinkedList<>(); List<String> tree = new ArrayList<>(); queue.add(znode); while (true) { String node = queue.pollFirst(); if (node == null) { break; } List<String> children = listChildrenAndWatchThem(zkw, node); if (children == null) { continue; } for (final String child : children) { final String childPath = node + "/" + child; queue.add(childPath); tree.add(childPath); } } return tree; } /** * Represents an action taken by ZKUtil, e.g. createAndFailSilent. * These actions are higher-level than ZKOp actions, which represent * individual actions in the ZooKeeper API, like create. */ public abstract static class ZKUtilOp { private String path; private ZKUtilOp(String path) { this.path = path; } /** * @return a createAndFailSilent ZKUtilOp */ public static ZKUtilOp createAndFailSilent(String path, byte[] data) { return new CreateAndFailSilent(path, data); } /** * @return a deleteNodeFailSilent ZKUtilOP */ public static ZKUtilOp deleteNodeFailSilent(String path) { return new DeleteNodeFailSilent(path); } /** * @return a setData ZKUtilOp */ public static ZKUtilOp setData(String path, byte [] data) { return new SetData(path, data); } /** * @return path to znode where the ZKOp will occur */ public String getPath() { return path; } /** * ZKUtilOp representing createAndFailSilent in ZooKeeper * (attempt to create node, ignore error if already exists) */ public static class CreateAndFailSilent extends ZKUtilOp { private byte [] data; private CreateAndFailSilent(String path, byte [] data) { super(path); this.data = data; } public byte[] getData() { return data; } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof CreateAndFailSilent)) return false; CreateAndFailSilent op = (CreateAndFailSilent) o; return getPath().equals(op.getPath()) && Arrays.equals(data, op.data); } @Override public int hashCode() { int ret = 17 + getPath().hashCode() * 31; return ret * 31 + Bytes.hashCode(data); } } /** * ZKUtilOp representing deleteNodeFailSilent in ZooKeeper * (attempt to delete node, ignore error if node doesn't exist) */ public static class DeleteNodeFailSilent extends ZKUtilOp { private DeleteNodeFailSilent(String path) { super(path); } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof DeleteNodeFailSilent)) return false; return super.equals(o); } @Override public int hashCode() { return getPath().hashCode(); } } /** * ZKUtilOp representing setData in ZooKeeper */ public static class SetData extends ZKUtilOp { private byte [] data; private SetData(String path, byte [] data) { super(path); this.data = data; } public byte[] getData() { return data; } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof SetData)) return false; SetData op = (SetData) o; return getPath().equals(op.getPath()) && Arrays.equals(data, op.data); } @Override public int hashCode() { int ret = getPath().hashCode(); return ret * 31 + Bytes.hashCode(data); } } } /** * Convert from ZKUtilOp to ZKOp */ private static Op toZooKeeperOp(ZooKeeperWatcher zkw, ZKUtilOp op) throws UnsupportedOperationException { if(op == null) return null; if (op instanceof CreateAndFailSilent) { CreateAndFailSilent cafs = (CreateAndFailSilent)op; return Op.create(cafs.getPath(), cafs.getData(), createACL(zkw, cafs.getPath()), CreateMode.PERSISTENT); } else if (op instanceof DeleteNodeFailSilent) { DeleteNodeFailSilent dnfs = (DeleteNodeFailSilent)op; return Op.delete(dnfs.getPath(), -1); } else if (op instanceof SetData) { SetData sd = (SetData)op; return Op.setData(sd.getPath(), sd.getData(), -1); } else { throw new UnsupportedOperationException("Unexpected ZKUtilOp type: " + op.getClass().getName()); } } /** * Use ZooKeeper's multi-update functionality. * * If all of the following are true: * - runSequentialOnMultiFailure is true * - on calling multi, we get a ZooKeeper exception that can be handled by a sequential call(*) * Then: * - we retry the operations one-by-one (sequentially) * * Note *: an example is receiving a NodeExistsException from a "create" call. Without multi, * a user could call "createAndFailSilent" to ensure that a node exists if they don't care who * actually created the node (i.e. the NodeExistsException from ZooKeeper is caught). * This will cause all operations in the multi to fail, however, because * the NodeExistsException that zk.create throws will fail the multi transaction. * In this case, if the previous conditions hold, the commands are run sequentially, which should * result in the correct final state, but means that the operations will not run atomically. * * @throws KeeperException */ public static void multiOrSequential(ZooKeeperWatcher zkw, List<ZKUtilOp> ops, boolean runSequentialOnMultiFailure) throws KeeperException { if (zkw.getConfiguration().get("hbase.zookeeper.useMulti") != null) { LOG.warn("hbase.zookeeper.useMulti is deprecated. Default to true always."); } if (ops == null) return; List<Op> zkOps = new LinkedList<>(); for (ZKUtilOp op : ops) { zkOps.add(toZooKeeperOp(zkw, op)); } try { zkw.getRecoverableZooKeeper().multi(zkOps); } catch (KeeperException ke) { switch (ke.code()) { case NODEEXISTS: case NONODE: case BADVERSION: case NOAUTH: // if we get an exception that could be solved by running sequentially // (and the client asked us to), then break out and run sequentially if (runSequentialOnMultiFailure) { LOG.info("On call to ZK.multi, received exception: " + ke.toString() + "." + " Attempting to run operations sequentially because" + " runSequentialOnMultiFailure is: " + runSequentialOnMultiFailure + "."); processSequentially(zkw, ops); break; } default: throw ke; } } catch (InterruptedException ie) { zkw.interruptedException(ie); } } private static void processSequentially(ZooKeeperWatcher zkw, List<ZKUtilOp> ops) throws KeeperException, NoNodeException { for (ZKUtilOp op : ops) { if (op instanceof CreateAndFailSilent) { createAndFailSilent(zkw, (CreateAndFailSilent) op); } else if (op instanceof DeleteNodeFailSilent) { deleteNodeFailSilent(zkw, (DeleteNodeFailSilent) op); } else if (op instanceof SetData) { setData(zkw, (SetData) op); } else { throw new UnsupportedOperationException("Unexpected ZKUtilOp type: " + op.getClass().getName()); } } } // // ZooKeeper cluster information // /** @return String dump of everything in ZooKeeper. */ public static String dump(ZooKeeperWatcher zkw) { StringBuilder sb = new StringBuilder(); try { sb.append("HBase is rooted at ").append(zkw.znodePaths.baseZNode); sb.append("\nActive master address: "); try { sb.append(MasterAddressTracker.getMasterAddress(zkw)); } catch (IOException e) { sb.append("<<FAILED LOOKUP: " + e.getMessage() + ">>"); } sb.append("\nBackup master addresses:"); for (String child : listChildrenNoWatch(zkw, zkw.znodePaths.backupMasterAddressesZNode)) { sb.append("\n ").append(child); } sb.append("\nRegion server holding hbase:meta: " + new MetaTableLocator().getMetaRegionLocation(zkw)); Configuration conf = HBaseConfiguration.create(); int numMetaReplicas = conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM); for (int i = 1; i < numMetaReplicas; i++) { sb.append("\nRegion server holding hbase:meta, replicaId " + i + " " + new MetaTableLocator().getMetaRegionLocation(zkw, i)); } sb.append("\nRegion servers:"); for (String child : listChildrenNoWatch(zkw, zkw.znodePaths.rsZNode)) { sb.append("\n ").append(child); } try { getReplicationZnodesDump(zkw, sb); } catch (KeeperException ke) { LOG.warn("Couldn't get the replication znode dump", ke); } sb.append("\nQuorum Server Statistics:"); String[] servers = zkw.getQuorum().split(","); for (String server : servers) { sb.append("\n ").append(server); try { String[] stat = getServerStats(server, ZKUtil.zkDumpConnectionTimeOut); if (stat == null) { sb.append("[Error] invalid quorum server: " + server); break; } for (String s : stat) { sb.append("\n ").append(s); } } catch (Exception e) { sb.append("\n ERROR: ").append(e.getMessage()); } } } catch (KeeperException ke) { sb.append("\nFATAL ZooKeeper Exception!\n"); sb.append("\n" + ke.getMessage()); } return sb.toString(); } /** * Appends replication znodes to the passed StringBuilder. * @param zkw * @param sb * @throws KeeperException */ private static void getReplicationZnodesDump(ZooKeeperWatcher zkw, StringBuilder sb) throws KeeperException { String replicationZNodeName = zkw.getConfiguration().get("zookeeper.znode.replication", "replication"); String replicationZnode = joinZNode(zkw.znodePaths.baseZNode, replicationZNodeName); if (ZKUtil.checkExists(zkw, replicationZnode) == -1) return; // do a ls -r on this znode sb.append("\n").append(replicationZnode).append(": "); List<String> children = ZKUtil.listChildrenNoWatch(zkw, replicationZnode); for (String child : children) { String znode = joinZNode(replicationZnode, child); if (child.equals(zkw.getConfiguration().get("zookeeper.znode.replication.peers", "peers"))) { appendPeersZnodes(zkw, znode, sb); } else if (child.equals(zkw.getConfiguration(). get("zookeeper.znode.replication.rs", "rs"))) { appendRSZnodes(zkw, znode, sb); } else if (child.equals(zkw.getConfiguration().get( ReplicationStateZKBase.ZOOKEEPER_ZNODE_REPLICATION_HFILE_REFS_KEY, ReplicationStateZKBase.ZOOKEEPER_ZNODE_REPLICATION_HFILE_REFS_DEFAULT))) { appendHFileRefsZnodes(zkw, znode, sb); } } } private static void appendHFileRefsZnodes(ZooKeeperWatcher zkw, String hfileRefsZnode, StringBuilder sb) throws KeeperException { sb.append("\n").append(hfileRefsZnode).append(": "); for (String peerIdZnode : ZKUtil.listChildrenNoWatch(zkw, hfileRefsZnode)) { String znodeToProcess = ZKUtil.joinZNode(hfileRefsZnode, peerIdZnode); sb.append("\n").append(znodeToProcess).append(": "); List<String> peerHFileRefsZnodes = ZKUtil.listChildrenNoWatch(zkw, znodeToProcess); int size = peerHFileRefsZnodes.size(); for (int i = 0; i < size; i++) { sb.append(peerHFileRefsZnodes.get(i)); if (i != size - 1) { sb.append(", "); } } } } /** * Returns a string with replication znodes and position of the replication log * @param zkw * @return aq string of replication znodes and log positions */ public static String getReplicationZnodesDump(ZooKeeperWatcher zkw) throws KeeperException { StringBuilder sb = new StringBuilder(); getReplicationZnodesDump(zkw, sb); return sb.toString(); } private static void appendRSZnodes(ZooKeeperWatcher zkw, String znode, StringBuilder sb) throws KeeperException { List<String> stack = new LinkedList<>(); stack.add(znode); do { String znodeToProcess = stack.remove(stack.size() - 1); sb.append("\n").append(znodeToProcess).append(": "); byte[] data; try { data = ZKUtil.getData(zkw, znodeToProcess); } catch (InterruptedException e) { zkw.interruptedException(e); return; } if (data != null && data.length > 0) { // log position long position = 0; try { position = ZKUtil.parseWALPositionFrom(ZKUtil.getData(zkw, znodeToProcess)); sb.append(position); } catch (DeserializationException ignored) { } catch (InterruptedException e) { zkw.interruptedException(e); return; } } for (String zNodeChild : ZKUtil.listChildrenNoWatch(zkw, znodeToProcess)) { stack.add(ZKUtil.joinZNode(znodeToProcess, zNodeChild)); } } while (stack.size() > 0); } private static void appendPeersZnodes(ZooKeeperWatcher zkw, String peersZnode, StringBuilder sb) throws KeeperException { int pblen = ProtobufUtil.lengthOfPBMagic(); sb.append("\n").append(peersZnode).append(": "); for (String peerIdZnode : ZKUtil.listChildrenNoWatch(zkw, peersZnode)) { String znodeToProcess = ZKUtil.joinZNode(peersZnode, peerIdZnode); byte[] data; try { data = ZKUtil.getData(zkw, znodeToProcess); } catch (InterruptedException e) { zkw.interruptedException(e); return; } // parse the data of the above peer znode. try { ReplicationProtos.ReplicationPeer.Builder builder = ReplicationProtos.ReplicationPeer.newBuilder(); ProtobufUtil.mergeFrom(builder, data, pblen, data.length - pblen); String clusterKey = builder.getClusterkey(); sb.append("\n").append(znodeToProcess).append(": ").append(clusterKey); // add the peer-state. appendPeerState(zkw, znodeToProcess, sb); } catch (IOException ipbe) { LOG.warn("Got Exception while parsing peer: " + znodeToProcess, ipbe); } } } private static void appendPeerState(ZooKeeperWatcher zkw, String znodeToProcess, StringBuilder sb) throws KeeperException, InvalidProtocolBufferException { String peerState = zkw.getConfiguration().get("zookeeper.znode.replication.peers.state", "peer-state"); int pblen = ProtobufUtil.lengthOfPBMagic(); for (String child : ZKUtil.listChildrenNoWatch(zkw, znodeToProcess)) { if (!child.equals(peerState)) continue; String peerStateZnode = ZKUtil.joinZNode(znodeToProcess, child); sb.append("\n").append(peerStateZnode).append(": "); byte[] peerStateData; try { peerStateData = ZKUtil.getData(zkw, peerStateZnode); ReplicationProtos.ReplicationState.Builder builder = ReplicationProtos.ReplicationState.newBuilder(); ProtobufUtil.mergeFrom(builder, peerStateData, pblen, peerStateData.length - pblen); sb.append(builder.getState().name()); } catch (IOException ipbe) { LOG.warn("Got Exception while parsing peer: " + znodeToProcess, ipbe); } catch (InterruptedException e) { zkw.interruptedException(e); return; } } } /** * Gets the statistics from the given server. * * @param server The server to get the statistics from. * @param timeout The socket timeout to use. * @return The array of response strings. * @throws IOException When the socket communication fails. */ public static String[] getServerStats(String server, int timeout) throws IOException { String[] sp = server.split(":"); if (sp == null || sp.length == 0) { return null; } String host = sp[0]; int port = sp.length > 1 ? Integer.parseInt(sp[1]) : HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT; InetSocketAddress sockAddr = new InetSocketAddress(host, port); try (Socket socket = new Socket()) { socket.connect(sockAddr, timeout); socket.setSoTimeout(timeout); PrintWriter out = new PrintWriter(socket.getOutputStream(), true); BufferedReader in = new BufferedReader(new InputStreamReader( socket.getInputStream())); out.println("stat"); out.flush(); ArrayList<String> res = new ArrayList<>(); while (true) { String line = in.readLine(); if (line != null) { res.add(line); } else { break; } } return res.toArray(new String[res.size()]); } } private static void logRetrievedMsg(final ZooKeeperWatcher zkw, final String znode, final byte [] data, final boolean watcherSet) { if (!LOG.isTraceEnabled()) return; LOG.trace(zkw.prefix("Retrieved " + ((data == null)? 0: data.length) + " byte(s) of data from znode " + znode + (watcherSet? " and set watcher; ": "; data=") + (data == null? "null": data.length == 0? "empty": ( znode.startsWith(zkw.znodePaths.metaZNodePrefix)? getServerNameOrEmptyString(data): znode.startsWith(zkw.znodePaths.backupMasterAddressesZNode)? getServerNameOrEmptyString(data): StringUtils.abbreviate(Bytes.toStringBinary(data), 32))))); } private static String getServerNameOrEmptyString(final byte [] data) { try { return ProtobufUtil.parseServerNameFrom(data).toString(); } catch (DeserializationException e) { return ""; } } /** * Waits for HBase installation's base (parent) znode to become available. * @throws IOException on ZK errors */ public static void waitForBaseZNode(Configuration conf) throws IOException { LOG.info("Waiting until the base znode is available"); String parentZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); ZooKeeper zk = new ZooKeeper(ZKConfig.getZKQuorumServersString(conf), conf.getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT), EmptyWatcher.instance); final int maxTimeMs = 10000; final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS; KeeperException keeperEx = null; try { try { for (int attempt = 0; attempt < maxNumAttempts; ++attempt) { try { if (zk.exists(parentZNode, false) != null) { LOG.info("Parent znode exists: " + parentZNode); keeperEx = null; break; } } catch (KeeperException e) { keeperEx = e; } Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS); } } finally { zk.close(); } } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } if (keeperEx != null) { throw new IOException(keeperEx); } } /** * Convert a {@link DeserializationException} to a more palatable {@link KeeperException}. * Used when can't let a {@link DeserializationException} out w/o changing public API. * @param e Exception to convert * @return Converted exception */ public static KeeperException convert(final DeserializationException e) { KeeperException ke = new KeeperException.DataInconsistencyException(); ke.initCause(e); return ke; } /** * Recursively print the current state of ZK (non-transactional) * @param root name of the root directory in zk to print */ public static void logZKTree(ZooKeeperWatcher zkw, String root) { if (!LOG.isDebugEnabled()) return; LOG.debug("Current zk system:"); String prefix = "|-"; LOG.debug(prefix + root); try { logZKTree(zkw, root, prefix); } catch (KeeperException e) { throw new RuntimeException(e); } } /** * Helper method to print the current state of the ZK tree. * @see #logZKTree(ZooKeeperWatcher, String) * @throws KeeperException if an unexpected exception occurs */ protected static void logZKTree(ZooKeeperWatcher zkw, String root, String prefix) throws KeeperException { List<String> children = ZKUtil.listChildrenNoWatch(zkw, root); if (children == null) return; for (String child : children) { LOG.debug(prefix + child); String node = ZKUtil.joinZNode(root.equals("/") ? "" : root, child); logZKTree(zkw, node, prefix + "---"); } } /** * @param position * @return Serialized protobuf of <code>position</code> with pb magic prefix prepended suitable * for use as content of an wal position in a replication queue. */ public static byte[] positionToByteArray(final long position) { byte[] bytes = ReplicationProtos.ReplicationHLogPosition.newBuilder().setPosition(position) .build().toByteArray(); return ProtobufUtil.prependPBMagic(bytes); } /** * @param bytes - Content of a WAL position znode. * @return long - The current WAL position. * @throws DeserializationException */ public static long parseWALPositionFrom(final byte[] bytes) throws DeserializationException { if (bytes == null) { throw new DeserializationException("Unable to parse null WAL position."); } if (ProtobufUtil.isPBMagicPrefix(bytes)) { int pblen = ProtobufUtil.lengthOfPBMagic(); ReplicationProtos.ReplicationHLogPosition.Builder builder = ReplicationProtos.ReplicationHLogPosition.newBuilder(); ReplicationProtos.ReplicationHLogPosition position; try { ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen); position = builder.build(); } catch (IOException e) { throw new DeserializationException(e); } return position.getPosition(); } else { if (bytes.length > 0) { return Bytes.toLong(bytes); } return 0; } } /** * @param regionLastFlushedSequenceId the flushed sequence id of a region which is the min of its * store max seq ids * @param storeSequenceIds column family to sequence Id map * @return Serialized protobuf of <code>RegionSequenceIds</code> with pb magic prefix prepended * suitable for use to filter wal edits in distributedLogReplay mode */ public static byte[] regionSequenceIdsToByteArray(final Long regionLastFlushedSequenceId, final Map<byte[], Long> storeSequenceIds) { ClusterStatusProtos.RegionStoreSequenceIds.Builder regionSequenceIdsBuilder = ClusterStatusProtos.RegionStoreSequenceIds.newBuilder(); ClusterStatusProtos.StoreSequenceId.Builder storeSequenceIdBuilder = ClusterStatusProtos.StoreSequenceId.newBuilder(); if (storeSequenceIds != null) { for (Map.Entry<byte[], Long> e : storeSequenceIds.entrySet()){ byte[] columnFamilyName = e.getKey(); Long curSeqId = e.getValue(); storeSequenceIdBuilder.setFamilyName(UnsafeByteOperations.unsafeWrap(columnFamilyName)); storeSequenceIdBuilder.setSequenceId(curSeqId); regionSequenceIdsBuilder.addStoreSequenceId(storeSequenceIdBuilder.build()); storeSequenceIdBuilder.clear(); } } regionSequenceIdsBuilder.setLastFlushedSequenceId(regionLastFlushedSequenceId); byte[] result = regionSequenceIdsBuilder.build().toByteArray(); return ProtobufUtil.prependPBMagic(result); } /** * @param bytes Content of serialized data of RegionStoreSequenceIds * @return a RegionStoreSequenceIds object * @throws DeserializationException */ public static RegionStoreSequenceIds parseRegionStoreSequenceIds(final byte[] bytes) throws DeserializationException { if (bytes == null || !ProtobufUtil.isPBMagicPrefix(bytes)) { throw new DeserializationException("Unable to parse RegionStoreSequenceIds."); } RegionStoreSequenceIds.Builder regionSequenceIdsBuilder = ClusterStatusProtos.RegionStoreSequenceIds.newBuilder(); int pblen = ProtobufUtil.lengthOfPBMagic(); RegionStoreSequenceIds storeIds = null; try { ProtobufUtil.mergeFrom(regionSequenceIdsBuilder, bytes, pblen, bytes.length - pblen); storeIds = regionSequenceIdsBuilder.build(); } catch (IOException e) { throw new DeserializationException(e); } return storeIds; } }
[ "\"HBASE_JAAS_DEBUG\"" ]
[]
[ "HBASE_JAAS_DEBUG" ]
[]
["HBASE_JAAS_DEBUG"]
java
1
0
eventarc/audit_storage/main.go
// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // [START eventarc_gcs_handler] // Sample run-events-storage is a Cloud Run service which handles Cloud Audit Log messages with Cloud Storage data. package main import ( "fmt" "log" "net/http" "os" ) // HelloEventsStorage receives and processes a Pub/Sub message via a CloudEvent. func HelloEventsStorage(w http.ResponseWriter, r *http.Request) { s := fmt.Sprintf("GCS CloudEvent type: %s", string(r.Header.Get("Ce-Subject"))) log.Printf(s) fmt.Fprintln(w, s) } // [END eventarc_gcs_handler] // [START eventarc_gcs_server] func main() { http.HandleFunc("/", HelloEventsStorage) // Determine port for HTTP service. port := os.Getenv("PORT") if port == "" { port = "8080" } // Start HTTP server. log.Printf("Listening on port %s", port) if err := http.ListenAndServe(":"+port, nil); err != nil { log.Fatal(err) } } // [END eventarc_gcs_server]
[ "\"PORT\"" ]
[]
[ "PORT" ]
[]
["PORT"]
go
1
0
python_modules/dagit/dagit/cli.py
import os import sys import threading from contextlib import contextmanager from typing import Optional import click from dagster import check from dagster.cli.utils import get_instance_for_service from dagster.cli.workspace import ( get_workspace_process_context_from_kwargs, workspace_target_argument, ) from dagster.cli.workspace.cli_target import WORKSPACE_TARGET_WARNING from dagster.core.telemetry import START_DAGIT_WEBSERVER, log_action from dagster.core.workspace import WorkspaceProcessContext from dagster.utils import DEFAULT_WORKSPACE_YAML_FILENAME from gevent import pywsgi from geventwebsocket.handler import WebSocketHandler from .app import create_app_from_workspace_process_context from .telemetry import upload_logs from .version import __version__ def create_dagit_cli(): return ui # pylint: disable=no-value-for-parameter DEFAULT_DAGIT_HOST = "127.0.0.1" DEFAULT_DAGIT_PORT = 3000 DEFAULT_DB_STATEMENT_TIMEOUT = 5000 # 5 sec @click.command( name="ui", help=( "Run dagit. Loads a repository or pipeline/job.\n\n{warning}".format( warning=WORKSPACE_TARGET_WARNING ) + ( "\n\nExamples:" "\n\n1. dagit (works if .{default_filename} exists)" "\n\n2. dagit -w path/to/{default_filename}" "\n\n3. dagit -f path/to/file.py" "\n\n4. dagit -f path/to/file.py -d path/to/working_directory" "\n\n5. dagit -m some_module" "\n\n6. dagit -f path/to/file.py -a define_repo" "\n\n7. dagit -m some_module -a define_repo" "\n\n8. dagit -p 3333" "\n\nOptions can also provide arguments via environment variables prefixed with DAGIT" "\n\nFor example, DAGIT_PORT=3333 dagit" ).format(default_filename=DEFAULT_WORKSPACE_YAML_FILENAME) ), ) @workspace_target_argument @click.option( "--host", "-h", type=click.STRING, default=DEFAULT_DAGIT_HOST, help="Host to run server on", show_default=True, ) @click.option( "--port", "-p", type=click.INT, help="Port to run server on, default is {default_port}".format(default_port=DEFAULT_DAGIT_PORT), ) @click.option( "--path-prefix", "-l", type=click.STRING, default="", help="The path prefix where Dagit will be hosted (eg: /dagit)", show_default=True, ) @click.option( "--db-statement-timeout", help="The timeout in milliseconds to set on database statements sent " "to the DagsterInstance. Not respected in all configurations.", default=DEFAULT_DB_STATEMENT_TIMEOUT, type=click.INT, show_default=True, ) @click.option( "--read-only", help="Start Dagit in read-only mode, where all mutations such as launching runs and " "turning schedules on/off are turned off.", is_flag=True, ) @click.option( "--suppress-warnings", help="Filter all warnings when hosting Dagit.", is_flag=True, ) @click.version_option(version=__version__, prog_name="dagit") def ui(host, port, path_prefix, db_statement_timeout, read_only, suppress_warnings, **kwargs): # add the path for the cwd so imports in dynamically loaded code work correctly sys.path.append(os.getcwd()) if port is None: port_lookup = True port = DEFAULT_DAGIT_PORT else: port_lookup = False host_dagit_ui( host, port, path_prefix, db_statement_timeout, port_lookup, read_only, suppress_warnings, **kwargs, ) def host_dagit_ui( host, port, path_prefix, db_statement_timeout, port_lookup=True, read_only=False, suppress_warnings=False, **kwargs, ): if suppress_warnings: os.environ["PYTHONWARNINGS"] = "ignore" with get_instance_for_service("dagit") as instance: # Allow the instance components to change behavior in the context of a long running server process instance.optimize_for_dagit(db_statement_timeout) with get_workspace_process_context_from_kwargs( instance, version=__version__, read_only=read_only, kwargs=kwargs, ) as workspace_process_context: host_dagit_ui_with_workspace_process_context( workspace_process_context, host, port, path_prefix, port_lookup ) def host_dagit_ui_with_workspace_process_context( workspace_process_context: WorkspaceProcessContext, host: Optional[str], port: int, path_prefix: str, port_lookup: bool = True, ): check.inst_param( workspace_process_context, "workspace_process_context", WorkspaceProcessContext ) check.opt_str_param(host, "host") check.int_param(port, "port") check.str_param(path_prefix, "path_prefix") check.bool_param(port_lookup, "port_lookup") app = create_app_from_workspace_process_context(workspace_process_context, path_prefix) start_server(workspace_process_context.instance, host, port, path_prefix, app, port_lookup) @contextmanager def uploading_logging_thread(instance): from dagster.core.instance import is_dagit_telemetry_enabled stop_event = threading.Event() logging_thread = threading.Thread( target=upload_logs, args=([stop_event]), name="telemetry-upload" ) try: # Telemetry data is still experimental, so don't upload if telemetry flags are enabled. if not is_dagit_telemetry_enabled(instance): logging_thread.start() yield finally: if not is_dagit_telemetry_enabled(instance): stop_event.set() logging_thread.join() def start_server(instance, host, port, path_prefix, app, port_lookup, port_lookup_attempts=0): server = pywsgi.WSGIServer((host, port), app, handler_class=WebSocketHandler) click.echo( "Serving on http://{host}:{port}{path_prefix} in process {pid}".format( host=host, port=port, path_prefix=path_prefix, pid=os.getpid() ) ) log_action(instance, START_DAGIT_WEBSERVER) with uploading_logging_thread(instance): try: server.serve_forever() except OSError as os_error: if "Address already in use" in str(os_error): if port_lookup and ( port_lookup_attempts > 0 or click.confirm( ( "Another process on your machine is already listening on port {port}. " "Would you like to run the app at another port instead?" ).format(port=port) ) ): port_lookup_attempts += 1 start_server( instance, host, port + port_lookup_attempts, path_prefix, app, True, port_lookup_attempts, ) else: raise Exception( f"Another process on your machine is already listening on port {port}. " "It is possible that you have another instance of dagit " "running somewhere using the same port. Or it could be another " "random process. Either kill that process or use the -p option to " "select another port." ) from os_error else: raise os_error cli = create_dagit_cli() def main(): # click magic cli(auto_envvar_prefix="DAGIT") # pylint:disable=E1120
[]
[]
[ "PYTHONWARNINGS" ]
[]
["PYTHONWARNINGS"]
python
1
0
pb/dfuse/eosio/codec/v1/merge_events_test.go
// Copyright 2019 dfuse Platform Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pbcodec import ( "os" "testing" "github.com/dfuse-io/logging" "go.uber.org/zap" "github.com/stretchr/testify/assert" ) func TestSort(t *testing.T) { evs := []*TransactionEvent{ &TransactionEvent{Id: "trx1", Irreversible: false}, &TransactionEvent{Id: "trx2", Irreversible: true}, &TransactionEvent{Id: "trx3", Irreversible: false}, &TransactionEvent{Id: "trx4", Irreversible: true}, &TransactionEvent{Id: "trx5", Irreversible: false}, &TransactionEvent{Id: "trx6", Irreversible: true}, &TransactionEvent{Id: "trx7", Irreversible: true}, } evs = sortEvents(evs) assert.True(t, evs[0].Irreversible) assert.True(t, evs[1].Irreversible) assert.True(t, evs[2].Irreversible) assert.True(t, evs[3].Irreversible) assert.False(t, evs[4].Irreversible) assert.False(t, evs[5].Irreversible) assert.False(t, evs[6].Irreversible) } func init() { if os.Getenv("DEBUG") != "" { logger, _ := zap.NewDevelopment() logging.Override(logger) } } func TestMergeTransactionEvents(t *testing.T) { tests := []struct { name string events []*TransactionEvent canonicalChain func(t *testing.T, id string) bool expect *TransactionLifecycle }{ { name: "single, irreversible event", events: []*TransactionEvent{ {Id: "trx1", BlockId: "abc", Irreversible: true, Event: NewTestAddEvent(1)}, }, canonicalChain: func(t *testing.T, id string) bool { return true }, expect: &TransactionLifecycle{ Id: "trx1", TransactionReceipt: &TransactionReceipt{Index: 1}, }, }, { name: "two additions, none irr, check canonical chain", events: []*TransactionEvent{ {Id: "trx1", BlockId: "a", Irreversible: false, Event: NewTestAddEvent(1)}, // skip this one since it isn't IRR and is NOT in the longest chain {Id: "trx1", BlockId: "b", Irreversible: false, Event: NewTestAddEvent(2)}, }, canonicalChain: func(t *testing.T, id string) bool { return id == "b" }, expect: &TransactionLifecycle{ Id: "trx1", TransactionReceipt: &TransactionReceipt{Index: 2}, }, }, { name: "multiple, select the irr of each kind, never call canonical chain", events: []*TransactionEvent{ {Id: "trx1", BlockId: "a", Irreversible: false, Event: NewTestAddEvent(1)}, {Id: "trx1", BlockId: "b", Irreversible: false, Event: NewTestAddEvent(2)}, {Id: "trx1", BlockId: "c", Irreversible: true, Event: NewTestAddEvent(3)}, {Id: "trx1", BlockId: "d", Irreversible: false, Event: NewTestExecEvent(4)}, {Id: "trx1", BlockId: "c", Irreversible: true, Event: NewTestExecEvent(6)}, {Id: "trx1", BlockId: "e", Irreversible: false, Event: NewTestExecEvent(5)}, }, canonicalChain: func(t *testing.T, id string) bool { t.Error("we said never call canonicalChain!") return true }, expect: &TransactionLifecycle{ Id: "trx1", TransactionStatus: TransactionStatus_TRANSACTIONSTATUS_EXECUTED, // no receipt, ignore TransactionReceipt: &TransactionReceipt{Index: 3}, ExecutionTrace: &TransactionTrace{ Receipt: &TransactionReceiptHeader{ Status: TransactionStatus_TRANSACTIONSTATUS_EXECUTED, }, Index: 6, }, ExecutionIrreversible: true, }, }, { name: "multiple, select one of each, ignore dtrx cancels if execution irreversible", events: []*TransactionEvent{ {Id: "trx1", BlockId: "2a", Irreversible: false, Event: NewTestDtrxCreateEvent("1")}, {Id: "trx1", BlockId: "2b", Irreversible: true, Event: NewTestDtrxCreateEvent("2")}, {Id: "trx1", BlockId: "2c", Irreversible: false, Event: NewTestDtrxCreateEvent("3")}, // delay trx thus execution could be at a later block {Id: "trx1", BlockId: "4a", Irreversible: false, Event: NewTestExecEvent(4)}, {Id: "trx1", BlockId: "4b", Irreversible: true, Event: NewTestExecEvent(5)}, {Id: "trx1", BlockId: "4c", Irreversible: false, Event: NewTestExecEvent(6)}, {Id: "trx1", BlockId: "5b", Irreversible: false, Event: NewTestDtrxCancelEvent("1")}, {Id: "trx1", BlockId: "5d", Irreversible: false, Event: NewTestDtrxCancelEvent("2")}, }, canonicalChain: func(t *testing.T, id string) bool { if id == "5b" { return true } return false }, expect: &TransactionLifecycle{ Id: "trx1", TransactionStatus: TransactionStatus_TRANSACTIONSTATUS_EXECUTED, // no receipt, ignore ExecutionTrace: &TransactionTrace{ Receipt: &TransactionReceiptHeader{ Status: TransactionStatus_TRANSACTIONSTATUS_EXECUTED, }, Index: 5, }, ExecutionIrreversible: true, CreationIrreversible: true, CreatedBy: &ExtDTrxOp{SourceTransactionId: "2"}}, }, { name: "cancellation arrives before irreversible execution, should not show cancelled at all", events: []*TransactionEvent{ {Id: "trx1", BlockId: "d", BlockNum: 4, Irreversible: false, Event: NewTestDtrxCancelEvent("1")}, {Id: "trx1", BlockId: "f", BlockNum: 5, Irreversible: true, Event: NewTestExecEvent(6)}, }, canonicalChain: func(t *testing.T, id string) bool { return true }, expect: &TransactionLifecycle{ Id: "trx1", TransactionStatus: TransactionStatus_TRANSACTIONSTATUS_EXECUTED, // no receipt, ignore ExecutionTrace: &TransactionTrace{ Receipt: &TransactionReceiptHeader{ Status: TransactionStatus_TRANSACTIONSTATUS_EXECUTED, }, Index: 6, }, ExecutionIrreversible: true, }, }, { name: "dev1: deferred transaction push, has multiple execution traces, execution succeeded", events: []*TransactionEvent{ { Id: "480a4adde14100097abec586d1dec805b3bfdb48c9efed5695ca02c61ea043bd", BlockId: "0000002ca98c204ec8f93e91332baf2a0eea5edd4e9f262bfb3e9b997d6f2415", Irreversible: true, Event: &TransactionEvent_Addition{Addition: &TransactionEvent_Added{ Receipt: &TransactionReceipt{Index: uint64(1)}, Transaction: &SignedTransaction{ Transaction: &Transaction{ Actions: []*Action{ { Account: "eosio.token", Name: "transfer", JsonData: "", }, }, }, }, PublicKeys: &PublicKeys{ PublicKeys: []string{"EOS5MHPYyhjBjnQZejzZHqHewPWhGTfQWSVTWYEhDmJu4SXkzgweP"}, }, }}, }, { Id: "480a4adde14100097abec586d1dec805b3bfdb48c9efed5695ca02c61ea043bd", BlockId: "0000002ca98c204ec8f93e91332baf2a0eea5edd4e9f262bfb3e9b997d6f2415", Irreversible: true, Event: &TransactionEvent_Execution{Execution: &TransactionEvent_Executed{ BlockHeader: &BlockHeader{ ActionMroot: []byte("1"), }, Trace: &TransactionTrace{ Receipt: &TransactionReceiptHeader{ Status: TransactionStatus_TRANSACTIONSTATUS_DELAYED, }, Id: "480a4adde14100097abec586d1dec805b3bfdb48c9efed5695ca02c61ea043bd", Index: uint64(2), RamOps: []*RAMOp{ { Operation: RAMOp_OPERATION_DEFERRED_TRX_PUSHED, ActionIndex: 0, Payer: "eosio", Delta: 371, Usage: 1182644, Namespace: RAMOp_NAMESPACE_DEFERRED_TRX, UniqueKey: "9", Action: RAMOp_ACTION_PUSH, }, }, }, }}, }, { Id: "480a4adde14100097abec586d1dec805b3bfdb48c9efed5695ca02c61ea043bd", BlockId: "0000002ca98c204ec8f93e91332baf2a0eea5edd4e9f262bfb3e9b997d6f2415", Irreversible: true, Event: &TransactionEvent_DtrxScheduling{DtrxScheduling: &TransactionEvent_DtrxScheduled{ Transaction: &SignedTransaction{ Transaction: &Transaction{ Actions: []*Action{ { Account: "eosio.token", Name: "transfer", JsonData: "{\"from\":\"eosio\",\"to\":\"battlefield1\",\"quantity\":\"1.0000 EOS\",\"memo\":\"push delayed trx\"}", }, }, }, }, CreatedBy: &ExtDTrxOp{ SourceTransactionId: "480a4adde14100097abec586d1dec805b3bfdb48c9efed5695ca02c61ea043bd", DtrxOp: &DTrxOp{ Operation: DTrxOp_OPERATION_PUSH_CREATE, }, }, }}, }, { Id: "480a4adde14100097abec586d1dec805b3bfdb48c9efed5695ca02c61ea043bd", BlockId: "0000002e7936c0363549d9f1bfc737e75a42a02d9e6cf72437e347f36580cfd8", Irreversible: true, Event: &TransactionEvent_Execution{Execution: &TransactionEvent_Executed{ BlockHeader: &BlockHeader{ ActionMroot: []byte("2"), }, Trace: &TransactionTrace{ Receipt: &TransactionReceiptHeader{ Status: TransactionStatus_TRANSACTIONSTATUS_EXECUTED, }, Id: "480a4adde14100097abec586d1dec805b3bfdb48c9efed5695ca02c61ea043bd", Index: uint64(1), RamOps: []*RAMOp{ { Operation: RAMOp_OPERATION_DEFERRED_TRX_REMOVED, ActionIndex: 0, Payer: "eosio", Delta: -371, Usage: 1182273, Namespace: RAMOp_NAMESPACE_DEFERRED_TRX, UniqueKey: "9", Action: RAMOp_ACTION_REMOVE, }, }, }, }}, }, }, canonicalChain: func(t *testing.T, id string) bool { return true }, expect: &TransactionLifecycle{ Id: "480a4adde14100097abec586d1dec805b3bfdb48c9efed5695ca02c61ea043bd", TransactionStatus: TransactionStatus_TRANSACTIONSTATUS_EXECUTED, TransactionReceipt: &TransactionReceipt{Index: uint64(1)}, PublicKeys: []string{"EOS5MHPYyhjBjnQZejzZHqHewPWhGTfQWSVTWYEhDmJu4SXkzgweP"}, Transaction: &SignedTransaction{ Transaction: &Transaction{ Actions: []*Action{ { Account: "eosio.token", Name: "transfer", JsonData: "{\"from\":\"eosio\",\"to\":\"battlefield1\",\"quantity\":\"1.0000 EOS\",\"memo\":\"push delayed trx\"}", }, }, }, }, CreatedBy: &ExtDTrxOp{ SourceTransactionId: "480a4adde14100097abec586d1dec805b3bfdb48c9efed5695ca02c61ea043bd", DtrxOp: &DTrxOp{ Operation: DTrxOp_OPERATION_PUSH_CREATE, }, }, ExecutionBlockHeader: &BlockHeader{ ActionMroot: []byte("2"), }, ExecutionTrace: &TransactionTrace{ Receipt: &TransactionReceiptHeader{ Status: TransactionStatus_TRANSACTIONSTATUS_EXECUTED, }, Id: "480a4adde14100097abec586d1dec805b3bfdb48c9efed5695ca02c61ea043bd", Index: 1, RamOps: []*RAMOp{ { Operation: RAMOp_OPERATION_DEFERRED_TRX_PUSHED, ActionIndex: 0, Payer: "eosio", Delta: 371, Usage: 1182644, Namespace: RAMOp_NAMESPACE_DEFERRED_TRX, UniqueKey: "9", Action: RAMOp_ACTION_PUSH, }, { Operation: RAMOp_OPERATION_DEFERRED_TRX_REMOVED, ActionIndex: 0, Payer: "eosio", Delta: -371, Usage: 1182273, Namespace: RAMOp_NAMESPACE_DEFERRED_TRX, UniqueKey: "9", Action: RAMOp_ACTION_REMOVE, }, }, }, CreationIrreversible: true, ExecutionIrreversible: true, }, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { res := MergeTransactionEvents(test.events, func(id string) bool { return test.canonicalChain(t, id) }) assert.Equal(t, test.expect, res) }) } } func Test_deepMergeTransactionTrace(t *testing.T) { a := &TransactionTrace{ Id: "trx1", RamOps: []*RAMOp{{Operation: RAMOp_OPERATION_DEFERRED_TRX_REMOVED}}, RlimitOps: []*RlimitOp{{Operation: RlimitOp_OPERATION_INSERT}}, } b := &TransactionTrace{ Id: "trx1", RamOps: []*RAMOp{{Operation: RAMOp_OPERATION_DEFERRED_TRX_PUSHED}}, RlimitOps: []*RlimitOp{{Operation: RlimitOp_OPERATION_UPDATE}}, } r1 := mergeTransactionTrace(a, b) assert.Equal(t, []*RAMOp{{Operation: RAMOp_OPERATION_DEFERRED_TRX_PUSHED}, {Operation: RAMOp_OPERATION_DEFERRED_TRX_REMOVED}}, r1.RamOps) assert.Equal(t, []*RlimitOp{{Operation: RlimitOp_OPERATION_UPDATE}, {Operation: RlimitOp_OPERATION_INSERT}}, r1.RlimitOps) c := &TransactionTrace{ Id: "trx1", RamOps: []*RAMOp{{Operation: RAMOp_OPERATION_DEFERRED_TRX_REMOVED}}, RlimitOps: []*RlimitOp{{Operation: RlimitOp_OPERATION_INSERT}}, } d := &TransactionTrace{ Id: "trx1", } r2 := mergeTransactionTrace(c, d) assert.Equal(t, []*RAMOp{{Operation: RAMOp_OPERATION_DEFERRED_TRX_REMOVED}}, r2.RamOps) assert.Equal(t, []*RlimitOp{{Operation: RlimitOp_OPERATION_INSERT}}, r2.RlimitOps) }
[ "\"DEBUG\"" ]
[]
[ "DEBUG" ]
[]
["DEBUG"]
go
1
0
plc4go/tests/initializetest/init.go
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package initializetest import ( "github.com/rs/zerolog" "github.com/rs/zerolog/log" "os" ) func init() { onJenkins := os.Getenv("JENKINS_URL") != "" log.Logger = log. //// Enable below if you want to see the filenames //With().Caller().Logger(). Output(zerolog.ConsoleWriter{Out: os.Stderr, NoColor: onJenkins}). Level(zerolog.InfoLevel) }
[ "\"JENKINS_URL\"" ]
[]
[ "JENKINS_URL" ]
[]
["JENKINS_URL"]
go
1
0
src/main/java/org/asciidoc/intellij/AsciiDoc.java
/* * Copyright 2013 Julien Viet * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.asciidoc.intellij; import com.intellij.ide.plugins.CannotUnloadPluginException; import com.intellij.notification.Notification; import com.intellij.notification.NotificationType; import com.intellij.notification.Notifications; import com.intellij.openapi.application.ApplicationManager; import com.intellij.openapi.editor.Document; import com.intellij.openapi.fileEditor.FileDocumentManager; import com.intellij.openapi.progress.ProcessCanceledException; import com.intellij.openapi.progress.ProgressIndicator; import com.intellij.openapi.progress.ProgressManager; import com.intellij.openapi.project.Project; import com.intellij.openapi.vfs.LocalFileSystem; import com.intellij.openapi.vfs.VirtualFile; import com.intellij.psi.PsiFile; import org.apache.commons.io.IOUtils; import org.apache.commons.io.output.ByteArrayOutputStream; import org.apache.commons.lang.StringUtils; import org.apache.geronimo.gshell.io.SystemOutputHijacker; import org.asciidoc.intellij.actions.asciidoc.AsciiDocAction; import org.asciidoc.intellij.asciidoc.AntoraIncludeAdapter; import org.asciidoc.intellij.asciidoc.AntoraReferenceAdapter; import org.asciidoc.intellij.asciidoc.AttributesRetriever; import org.asciidoc.intellij.asciidoc.PrependConfig; import org.asciidoc.intellij.download.AsciiDocDownloaderUtil; import org.asciidoc.intellij.editor.AsciiDocPreviewEditor; import org.asciidoc.intellij.editor.javafx.JavaFxHtmlPanelProvider; import org.asciidoc.intellij.editor.jcef.AsciiDocJCEFHtmlPanelProvider; import org.asciidoc.intellij.settings.AsciiDocApplicationSettings; import org.asciidoctor.Asciidoctor; import org.asciidoctor.Attributes; import org.asciidoctor.AttributesBuilder; import org.asciidoctor.Options; import org.asciidoctor.OptionsBuilder; import org.asciidoctor.jruby.AsciidoctorJRuby; import org.asciidoctor.log.LogHandler; import org.asciidoctor.log.LogRecord; import org.asciidoctor.log.Severity; import org.intellij.lang.annotations.Language; import org.jcodings.EncodingDB; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import org.jruby.exceptions.MainExitException; import org.jruby.platform.Platform; import org.jruby.util.ByteList; import org.jruby.util.SafePropertyAccessor; import org.yaml.snakeyaml.Yaml; import org.yaml.snakeyaml.error.YAMLException; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.io.PrintStream; import java.lang.reflect.Field; import java.net.MalformedURLException; import java.net.URL; import java.net.URLClassLoader; import java.nio.charset.StandardCharsets; import java.nio.file.DirectoryStream; import java.nio.file.FileSystems; import java.nio.file.Files; import java.nio.file.Path; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.IdentityHashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.ServiceConfigurationError; import java.util.Set; import java.util.StringTokenizer; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.ReentrantLock; import java.util.logging.Logger; import java.util.stream.Collectors; import static org.asciidoc.intellij.psi.AsciiDocUtil.ANTORA_YML; import static org.asciidoc.intellij.psi.AsciiDocUtil.findAntoraAttachmentsDirRelative; import static org.asciidoc.intellij.psi.AsciiDocUtil.findAntoraExamplesDir; import static org.asciidoc.intellij.psi.AsciiDocUtil.findAntoraImagesDirRelative; import static org.asciidoc.intellij.psi.AsciiDocUtil.findAntoraModuleDir; import static org.asciidoc.intellij.psi.AsciiDocUtil.findAntoraPagesDir; import static org.asciidoc.intellij.psi.AsciiDocUtil.findAntoraPartials; import static org.asciidoc.intellij.psi.AsciiDocUtil.findSpringRestDocSnippets; /** * @author Julien Viet */ public class AsciiDoc { /** * Base directory to look up includes. */ private File fileBaseDir; private String name; private static final ReentrantLock LOCK = new ReentrantLock(); private static class MaxHashMap extends LinkedHashMap<String, Asciidoctor> { @Override protected boolean removeEldestEntry(Map.Entry<String, Asciidoctor> eldest) { // cache up to three instances (for example: javafx, pdf, spring-restdocs) if (this.size() > 3) { eldest.getValue().shutdown(); return true; } else { return false; } } } private static final MaxHashMap INSTANCES = new MaxHashMap(); private static boolean shutdown = false; private static final PrependConfig PREPEND_CONFIG = new PrependConfig(); private static final AntoraIncludeAdapter ANTORA_INCLUDE_ADAPTER = new AntoraIncludeAdapter(); private static final AttributesRetriever ATTRIBUTES_RETRIEVER = new AttributesRetriever(); private static final com.intellij.openapi.diagnostic.Logger LOG = com.intellij.openapi.diagnostic.Logger.getInstance(AsciiDoc.class); static { SystemOutputHijacker.install(); } public static void checkUnloadPlugin() { lock(); try { if (INSTANCES.size() > 0) { // as beforePluginUnload() is incomplete, vote against reloading // as an incomplete unload would leave the user with disabled AsciiDoc funtionality until the next restart. throw new CannotUnloadPluginException("expecting JRuby classloader issues, don't allow unloading"); } } finally { unlock(); } } public static void beforePluginUnload() { LOG.info("shutting down Asciidoctor instances"); lock(); try { shutdown = true; LOG.info("about to shutdown " + INSTANCES.size() + " instances"); INSTANCES.forEach((key, value) -> { value.unregisterAllExtensions(); value.close(); }); LOG.info("all instances shut down"); INSTANCES.clear(); if (SystemOutputHijacker.isInstalled()) { SystemOutputHijacker.uninstall(); } try { Class<?> shutdownHooks = Class.forName("java.lang.ApplicationShutdownHooks"); Field fieldHooks = shutdownHooks.getDeclaredField("hooks"); fieldHooks.setAccessible(true); @SuppressWarnings("unchecked") IdentityHashMap<Thread, Thread> hooks = (IdentityHashMap<Thread, Thread>) fieldHooks.get(null); List<Thread> jrubyShutdownThreads = hooks.keySet().stream().filter(thread -> thread.getClass().getName().startsWith("org.jruby.util.JRubyClassLoader")).collect(Collectors.toList()); jrubyShutdownThreads.forEach(thread -> { // as we want to run this shutdown thing now until it completes // noinspection CallToThreadRun thread.run(); Runtime.getRuntime().removeShutdownHook(thread); }); } catch (ClassNotFoundException | NoSuchFieldException | IllegalAccessException e) { LOG.error("unable to de-register shutdown hook", e); } System.gc(); // still, this is not enough; there are dangling ThreadLocals like "org.jruby.Ruby$FStringEqual" // in addition to that: classes are marked at "Held by JVM" and not unloaded. Reason is unknown, maybe // "custom class loaders when they are in the process of loading classes" as of // https://www.yourkit.com/docs/java/help/gc_roots.jsp } finally { unlock(); } } /** * Update file name and folder. File name might change if file is renamed or moved. */ public void updateFileName(File fileBaseDir, String name) { this.fileBaseDir = fileBaseDir; this.name = name; } /** * Images directory. */ private final Path imagesPath; private final String projectBasePath; private final Project project; public AsciiDoc(Project project, File fileBaseDir, Path imagesPath, String name) { this.projectBasePath = project.getBasePath(); this.fileBaseDir = fileBaseDir; this.imagesPath = imagesPath; this.name = name; this.project = project; } @SuppressWarnings("checkstyle:MethodLength") private Asciidoctor initWithExtensions(List<String> extensions, boolean springRestDocs, FileType format) { if (shutdown) { throw new ProcessCanceledException(); } boolean extensionsEnabled; AsciiDocApplicationSettings asciiDocApplicationSettings = AsciiDocApplicationSettings.getInstance(); if (extensions.size() > 0) { asciiDocApplicationSettings.setExtensionsPresent(projectBasePath, true); } String md; if (Boolean.TRUE.equals(asciiDocApplicationSettings.getExtensionsEnabled(projectBasePath))) { extensionsEnabled = true; md = calcMd(projectBasePath, extensions); } else { extensionsEnabled = false; md = calcMd(projectBasePath, Collections.emptyList()); } if (springRestDocs) { md = md + ".restdoc"; } if (format == FileType.JAVAFX || format == FileType.HTML) { // special ruby extensions loaded for JAVAFX and HTML md = md + "." + format.name(); } boolean krokiEnabled = AsciiDocApplicationSettings.getInstance().getAsciiDocPreviewSettings().isKrokiEnabled(); if (krokiEnabled) { md = md + ".kroki"; } boolean diagramPresent = isDiagramPresent(); if (diagramPresent) { md = md + ".diagram"; } boolean pdfPresent = isPdfPresent(); if (pdfPresent) { md = md + ".pdf"; } Asciidoctor asciidoctor = INSTANCES.get(md); if (asciidoctor == null) { ByteArrayOutputStream boasOut = new ByteArrayOutputStream(); ByteArrayOutputStream boasErr = new ByteArrayOutputStream(); SystemOutputHijacker.register(new PrintStream(boasOut), new PrintStream(boasErr)); LogHandler logHandler = new IntellijLogHandler("initialize"); String oldEncoding = null; if (Platform.IS_WINDOWS) { /* There is an initialization procedure in Ruby.java that will abort when the encoding in file.encoding is not known to JRuby. Therefore default to UTF-8 in this case as a most sensible default. */ String encoding = System.getProperty("file.encoding", "UTF-8"); ByteList bytes = ByteList.create(encoding); EncodingDB.Entry entry = EncodingDB.getEncodings().get(bytes.getUnsafeBytes(), bytes.getBegin(), bytes.getBegin() + bytes.getRealSize()); if (entry == null) { entry = EncodingDB.getAliases().get(bytes.getUnsafeBytes(), bytes.getBegin(), bytes.getBegin() + bytes.getRealSize()); } if (entry == null) { // this happes for example with -Dfile.encoding=MS949 (Korean?) oldEncoding = encoding; LOG.warn("unsupported encoding " + encoding + " in JRuby, defaulting to UTF-8"); System.setProperty("file.encoding", "UTF-8"); } } try { asciidoctor = createInstance(); asciidoctor.registerLogHandler(logHandler); // require openssl library here to enable download content via https // requiring it later after other libraries have been loaded results in "undefined method `set_params' for #<OpenSSL::SSL::SSLContext" asciidoctor.requireLibrary("openssl"); asciidoctor.javaExtensionRegistry().preprocessor(PREPEND_CONFIG); asciidoctor.javaExtensionRegistry().includeProcessor(ANTORA_INCLUDE_ADAPTER); if (format == FileType.JAVAFX || format == FileType.HTML) { asciidoctor.javaExtensionRegistry().postprocessor(ATTRIBUTES_RETRIEVER); } // disable JUL logging of captured messages // https://github.com/asciidoctor/asciidoctorj/issues/669 Logger.getLogger("asciidoctor").setUseParentHandlers(false); if (!krokiEnabled && diagramPresent) { asciidoctor.requireLibrary("asciidoctor-diagram"); } else if (!diagramPresent) { try (InputStream is = this.getClass().getResourceAsStream("/diagram-placeholder.rb")) { if (is == null) { throw new RuntimeException("unable to load script diagram-placeholder.rb"); } asciidoctor.rubyExtensionRegistry().loadClass(is); } } if (format == FileType.JAVAFX) { try (InputStream is = this.getClass().getResourceAsStream("/sourceline-treeprocessor.rb")) { if (is == null) { throw new RuntimeException("unable to load script sourceline-treeprocessor.rb"); } asciidoctor.rubyExtensionRegistry().loadClass(is).treeprocessor("SourceLineTreeProcessor"); } } if (format == FileType.JAVAFX && diagramPresent) { try (InputStream is = this.getClass().getResourceAsStream("/plantuml-png-patch.rb")) { if (is == null) { throw new RuntimeException("unable to load script plantuml-png-patch.rb"); } asciidoctor.rubyExtensionRegistry().loadClass(is); } } if (format.backend.equals("html5")) { try (InputStream is = this.getClass().getResourceAsStream("/html5-antora.rb")) { if (is == null) { throw new RuntimeException("unable to load script html5-antora.rb"); } asciidoctor.rubyExtensionRegistry().loadClass(is); } } else if (format.backend.equals("pdf")) { try (InputStream is = this.getClass().getResourceAsStream("/pdf-antora.rb")) { if (is == null) { throw new RuntimeException("unable to load script pdf-antora.rb"); } asciidoctor.rubyExtensionRegistry().loadClass(is); } } if (springRestDocs) { try (InputStream is = this.getClass().getResourceAsStream("/springrestdoc-operation-blockmacro.rb")) { if (is == null) { throw new RuntimeException("unable to load script springrestdoc-operation-blockmacro.rb"); } asciidoctor.rubyExtensionRegistry().loadClass(is); } } if (krokiEnabled) { try (InputStream is = this.getClass().getResourceAsStream("/kroki-extension.rb")) { if (is == null) { throw new RuntimeException("unable to load script kroki-extension.rb"); } asciidoctor.rubyExtensionRegistry().loadClass(is); } } if (extensionsEnabled) { for (String extension : extensions) { asciidoctor.rubyExtensionRegistry().requireLibrary(extension); } } INSTANCES.put(md, asciidoctor); } catch (IOException e) { throw new RuntimeException(e); } finally { if (oldEncoding != null) { System.setProperty("file.encoding", oldEncoding); } if (asciidoctor != null) { asciidoctor.unregisterLogHandler(logHandler); } SystemOutputHijacker.deregister(); notify(boasOut, boasErr, Collections.emptyList()); } } return asciidoctor; } private boolean isDiagramPresent() { boolean diagramPresent = AsciiDocDownloaderUtil.getAsciidoctorJDiagramFile().exists(); if (!diagramPresent) { // try to find it in the class path for tests try (InputStream is = this.getClass().getResourceAsStream("/gems/asciidoctor-diagram-" + AsciiDocDownloaderUtil.ASCIIDOCTORJ_DIAGRAM_VERSION + "/lib/asciidoctor-diagram.rb")) { if (is != null) { diagramPresent = true; } } catch (IOException ex) { throw new RuntimeException("unable to open stream", ex); } } return diagramPresent; } private boolean isPdfPresent() { boolean pdfPresent = AsciiDocDownloaderUtil.getAsciidoctorJPdfFile().exists(); if (!pdfPresent) { // try to find it in the class path for tests try (InputStream is = this.getClass().getResourceAsStream("/gems/asciidoctor-pdf-" + AsciiDocDownloaderUtil.ASCIIDOCTORJ_PDF_VERSION + "/lib/asciidoctor-pdf.rb")) { if (is != null) { pdfPresent = true; } } catch (IOException ex) { throw new RuntimeException("unable to open stream", ex); } } return pdfPresent; } /** * Create an instance of Asciidoctor. */ private Asciidoctor createInstance() { ClassLoader cl = AsciiDocAction.class.getClassLoader(); List<URL> urls = new ArrayList<>(); try { File file1 = AsciiDocDownloaderUtil.getAsciidoctorJPdfFile(); if (file1.exists()) { urls.add(file1.toURI().toURL()); } File file2 = AsciiDocDownloaderUtil.getAsciidoctorJDiagramFile(); if (file2.exists()) { urls.add(file2.toURI().toURL()); } } catch (MalformedURLException e) { throw new RuntimeException("unable to add AsciidoctorJ to class path"); } if (urls.size() > 0) { cl = new URLClassLoader(urls.toArray(new URL[]{}), cl); } else if (cl instanceof URLClassLoader) { // Wrap an existing URLClassLoader with an empty list to prevent scanning of JARs by Ruby Runtime during Unit Tests. cl = new URLClassLoader(new URL[]{}, cl); } return AsciidoctorJRuby.Factory.create(cl); } /** * Calculate a hash for the extensions. * Hash will change if the project has been changed, of the contents of files have changed. * This will also include all files in subdirectories of the extension when creating the hash. */ private String calcMd(String projectBasePath, List<String> extensions) { try { MessageDigest md = MessageDigest.getInstance("MD5"); md.update(projectBasePath.getBytes(StandardCharsets.UTF_8)); List<Path> folders = new ArrayList<>(); for (String s : extensions) { try { try (InputStream is = new FileInputStream(s)) { md.update(IOUtils.toByteArray(is)); } Path parent = FileSystems.getDefault().getPath(s).getParent(); if (!folders.contains(parent)) { folders.add(parent); try (DirectoryStream<Path> stream = Files.newDirectoryStream(parent, Files::isDirectory)) { for (Path p : stream) { scanForRubyFiles(p, md); } } } } catch (IOException e) { throw new RuntimeException("unable to read file", e); } } byte[] mdbytes = md.digest(); StringBuilder sb = new StringBuilder(); for (byte mdbyte : mdbytes) { sb.append(Integer.toString((mdbyte & 0xff) + 0x100, 16).substring(1)); } return sb.toString(); } catch (NoSuchAlgorithmException e) { throw new RuntimeException("unknown hash", e); } } private void scanForRubyFiles(Path path, MessageDigest md) throws IOException { try (DirectoryStream<Path> stream = Files.newDirectoryStream(path)) { for (Path p : stream) { if (Files.isDirectory(p)) { scanForRubyFiles(p, md); } if (Files.isRegularFile(p) && Files.isReadable(p)) { try (InputStream is = Files.newInputStream(p)) { md.update(IOUtils.toByteArray(is)); } } } } } private void notify(ByteArrayOutputStream boasOut, ByteArrayOutputStream boasErr, List<LogRecord> logRecords) { notify(boasOut, boasErr, logRecords, !AsciiDocApplicationSettings.getInstance().getAsciiDocPreviewSettings().isShowAsciiDocWarningsAndErrorsInEditor()); } public void notifyAlways(ByteArrayOutputStream boasOut, ByteArrayOutputStream boasErr, List<LogRecord> logRecords) { notify(boasOut, boasErr, logRecords, true); } private void notify(ByteArrayOutputStream boasOut, ByteArrayOutputStream boasErr, List<LogRecord> logRecords, boolean logAll) { String out = boasOut.toString(); String err = boasErr.toString(); if (logAll) { // logRecords will not be handled in the org.asciidoc.intellij.annotator.ExternalAnnotator for (LogRecord logRecord : logRecords) { if (logRecord.getSeverity() == Severity.DEBUG) { continue; } StringBuilder message = new StringBuilder(); message.append("Error during rendering ").append(name).append("; ").append(logRecord.getSeverity().name()).append(" "); if (logRecord.getCursor() != null && logRecord.getCursor().getFile() != null) { message.append(logRecord.getCursor().getFile()).append(":").append(logRecord.getCursor().getLineNumber()); } message.append(" ").append(logRecord.getMessage()); Notification notification = AsciiDocPreviewEditor.NOTIFICATION_GROUP.createNotification("Message during rendering " + name, message.toString(), NotificationType.INFORMATION, null); notification.setImportant(true); Notifications.Bus.notify(notification); } } if (out.length() > 0) { Notification notification = AsciiDocPreviewEditor.NOTIFICATION_GROUP.createNotification("Message during rendering " + name, out, NotificationType.INFORMATION, null); notification.setImportant(false); Notifications.Bus.notify(notification); } if (err.length() > 0) { Notification notification = AsciiDocPreviewEditor.NOTIFICATION_GROUP.createNotification("Error during rendering " + name, err, NotificationType.INFORMATION, null); notification.setImportant(true); Notifications.Bus.notify(notification); } } @Nullable public static Path tempImagesPath() { Path tempImagesPath = null; try { tempImagesPath = Files.createTempDirectory("asciidoctor-intellij"); } catch (IOException _ex) { String message = "Can't create temp folder to render images: " + _ex.getMessage(); Notification notification = AsciiDocPreviewEditor.NOTIFICATION_GROUP .createNotification("Error rendering asciidoctor", message, NotificationType.ERROR, null); // increase event log counter notification.setImportant(true); Notifications.Bus.notify(notification); } return tempImagesPath; } @NotNull public static @Language("asciidoc") String config(Document document, Project project) { VirtualFile currentFile = FileDocumentManager.getInstance().getFile(document); return config(currentFile, project); } @NotNull public static @Language("asciidoc") String config(VirtualFile currentFile, Project project) { StringBuilder tempContent = new StringBuilder(); if (currentFile != null) { VirtualFile folder = currentFile.getParent(); if (folder != null) { while (true) { for (String configName : new String[]{".asciidoctorconfig", ".asciidoctorconfig.adoc"}) { VirtualFile configFile = folder.findChild(configName); if (configFile != null && !currentFile.equals(configFile)) { final VirtualFile folderFinal = folder; ApplicationManager.getApplication().runReadAction(() -> { Document config = FileDocumentManager.getInstance().getDocument(configFile); if (config != null) { // TODO: for tracibility add current file name as a comment // prepend the new config, followed by two newlines to avoid sticking-together content tempContent.insert(0, "\n\n"); tempContent.insert(0, config.getText()); // prepend the location of the config file tempContent.insert(0, ":asciidoctorconfigdir: " + folderFinal.getCanonicalPath() + "\n\n"); } }); } } if (folder.getPath().equals(project.getBasePath())) { break; } folder = folder.getParent(); if (folder == null) { break; } } } } return tempContent.toString(); } @NotNull public static List<String> getExtensions(Project project) { VirtualFile lib = project.getBaseDir().findChild(".asciidoctor"); if (lib != null) { lib = lib.findChild("lib"); } List<String> extensions = new ArrayList<>(); if (lib != null) { for (VirtualFile vf : lib.getChildren()) { if ("rb".equals(vf.getExtension())) { extensions.add(vf.getCanonicalPath()); } } } return extensions; } @FunctionalInterface public interface Notifier { void notify(ByteArrayOutputStream boasOut, ByteArrayOutputStream boasErr, List<LogRecord> logRecords); } public String render(@Language("asciidoc") String text, List<String> extensions) { return render(text, "", extensions, this::notify); } public String render(@Language("asciidoc") String text, String config, List<String> extensions) { return render(text, config, extensions, this::notify); } public String render(@Language("asciidoc") String text, String config, List<String> extensions, Notifier notifier) { return render(text, config, extensions, notifier, FileType.JAVAFX); } public String render(@Language("asciidoc") String text, String config, List<String> extensions, Notifier notifier, FileType format) { VirtualFile springRestDocsSnippets = findSpringRestDocSnippets( LocalFileSystem.getInstance().findFileByIoFile(new File(projectBasePath)), LocalFileSystem.getInstance().findFileByIoFile(fileBaseDir) ); VirtualFile antoraModuleDir = findAntoraModuleDir( LocalFileSystem.getInstance().findFileByIoFile(new File(projectBasePath)), LocalFileSystem.getInstance().findFileByIoFile(fileBaseDir) ); Map<String, String> attributes = populateAntoraAttributes(projectBasePath, fileBaseDir, antoraModuleDir); attributes.putAll(populateDocumentAttributes(fileBaseDir, name)); lock(); try { if (shutdown) { throw new ProcessCanceledException(); } CollectingLogHandler logHandler = new CollectingLogHandler(); ByteArrayOutputStream boasOut = new ByteArrayOutputStream(); ByteArrayOutputStream boasErr = new ByteArrayOutputStream(); // SystemOutputHijacker.register(new PrintStream(boasOut), new PrintStream(boasErr)); try { Asciidoctor asciidoctor = initWithExtensions(extensions, springRestDocsSnippets != null, format); asciidoctor.registerLogHandler(logHandler); PREPEND_CONFIG.setConfig(config); ANTORA_INCLUDE_ADAPTER.setAntoraDetails(project, antoraModuleDir); AntoraReferenceAdapter.setAntoraDetails(project, antoraModuleDir, fileBaseDir, name); try { return "<div id=\"content\"" + (antoraModuleDir != null ? " class=\"doc\"" : "") + ">\n" + asciidoctor.convert(text, getDefaultOptions(FileType.JAVAFX, springRestDocsSnippets, attributes)) + "\n</div>"; } finally { PREPEND_CONFIG.setConfig(""); ANTORA_INCLUDE_ADAPTER.setAntoraDetails(null, null); asciidoctor.unregisterLogHandler(logHandler); } } catch (ProcessCanceledException ex) { throw ex; } catch (Exception | ServiceConfigurationError ex) { LOG.warn("unable to render AsciiDoc document", ex); logHandler.log(new LogRecord(Severity.FATAL, ex.getMessage())); StringBuilder response = new StringBuilder(); response.append("unable to render AsciiDoc document"); Throwable t = ex; do { response.append("<p>").append(t.getClass().getCanonicalName()).append(": ").append(t.getMessage()); if (t instanceof MainExitException && t.getMessage().startsWith("unknown encoding name")) { response.append("<p>Either your local encoding is not supported by JRuby, or you passed an unrecognized value to the Java property 'file.encoding' either in the IntelliJ options file or via the JAVA_TOOL_OPTION environment variable."); String property = SafePropertyAccessor.getProperty("file.encoding", null); response.append("<p>encoding passed by system property 'file.encoding': ").append(property); response.append("<p>available encodings (excuding aliases): "); EncodingDB.getEncodings().forEach(entry -> response.append(entry.getEncoding().getCharsetName()).append(" ")); } t = t.getCause(); } while (t != null); response.append("<p>(the full exception stack trace is available in the IDE's log file. Visit menu item 'Help | Show Log in Explorer' to see the log)"); return response.toString(); } finally { // SystemOutputHijacker.deregister(); notifier.notify(boasOut, boasErr, logHandler.getLogRecords()); } } finally { unlock(); } } private Map<String, String> populateDocumentAttributes(File fileBaseDir, String name) { Map<String, String> attributes = new HashMap<>(); attributes.put("docname", name.replaceAll("\\..*$", "")); if (name.contains(".")) { attributes.put("docfilesuffix", name.replaceAll("^(.*)(\\..*)$", "$2")); } attributes.put("docfile", new File(fileBaseDir, name).getAbsolutePath()); attributes.put("docdir", fileBaseDir.getAbsolutePath()); return attributes; } private static int validateAccess() { /* This class will lock on AsciiDoc.java so that only one instance of Asciidoctor is running at any time. This allows re-using the instances that are expensive to create (both in terms of memory and cpu seconds). When rendering an AsciiDoc document, this requires read-access to document for example to resolve Antora information or includes. By ensuring no previous read or write lock exists, this avoids the following dead-lock situation: process 1: waiting for write lock, allowing no-one else to acquire a read lock -> will not proceed due to 3 process 2: already acquired AsciiDoc lock, running AsciiDoctor rendering and waiting for a read-lock -> will not proceed due to 1 process 3: already acquired read-lock, now waiting for AsciiDoc lock -> will not proceed due to 2 */ if (ApplicationManager.getApplication().isWriteAccessAllowed()) { throw new IllegalStateException("no write access should be allowed here as it might cause a deadlock"); } if (ApplicationManager.getApplication().isUnitTestMode()) { // in unit test mode there is always read-access allowed return 0; } if (ApplicationManager.getApplication().isReadAccessAllowed()) { // the AsciiDocJavaDocInfoGenerator will get here with an existing ReadLock, use a timeout here to avoid a deadlock. Set<StackTraceElement> nonblocking = Arrays.stream(Thread.currentThread().getStackTrace()).filter(stackTraceElement -> stackTraceElement.getClassName().endsWith("AsciiDocJavaDocInfoGenerator") || stackTraceElement.getClassName().endsWith("AsciidocletJavaDocInfoGenerator") ).collect(Collectors.toSet()); if (nonblocking.size() > 0) { return 20; } throw new IllegalStateException("no read access should be allowed here as it might cause a deadlock"); } return 0; } public void convertTo(File file, String config, List<String> extensions, FileType format) { VirtualFile springRestDocsSnippets = findSpringRestDocSnippets( LocalFileSystem.getInstance().findFileByIoFile(new File(projectBasePath)), LocalFileSystem.getInstance().findFileByIoFile(fileBaseDir)); VirtualFile antoraModuleDir = findAntoraModuleDir( LocalFileSystem.getInstance().findFileByIoFile(new File(projectBasePath)), LocalFileSystem.getInstance().findFileByIoFile(fileBaseDir) ); Map<String, String> attributes = populateAntoraAttributes(projectBasePath, fileBaseDir, antoraModuleDir); lock(); try { if (shutdown) { throw new ProcessCanceledException(); } CollectingLogHandler logHandler = new CollectingLogHandler(); ByteArrayOutputStream boasOut = new ByteArrayOutputStream(); ByteArrayOutputStream boasErr = new ByteArrayOutputStream(); // SystemOutputHijacker.register(new PrintStream(boasOut), new PrintStream(boasErr)); try { Asciidoctor asciidoctor = initWithExtensions(extensions, springRestDocsSnippets != null, format); PREPEND_CONFIG.setConfig(config); ANTORA_INCLUDE_ADAPTER.setAntoraDetails(project, antoraModuleDir); AntoraReferenceAdapter.setAntoraDetails(project, antoraModuleDir, fileBaseDir, name); asciidoctor.registerLogHandler(logHandler); try { ProgressIndicator indicator = ProgressManager.getInstance().getProgressIndicator(); if (indicator == null || !indicator.isCanceled()) { asciidoctor.convertFile(file, getExportOptions( getDefaultOptions(format, springRestDocsSnippets, attributes), format)); } } finally { PREPEND_CONFIG.setConfig(""); ANTORA_INCLUDE_ADAPTER.setAntoraDetails(null, null); asciidoctor.unregisterLogHandler(logHandler); } } catch (ProcessCanceledException ex) { throw ex; } catch (Exception | ServiceConfigurationError ex) { LOG.warn("unable to render AsciiDoc document", ex); logHandler.log(new LogRecord(Severity.FATAL, ex.getMessage())); StringBuilder response = new StringBuilder(); response.append("unable to render AsciiDoc document"); Throwable t = ex; do { response.append("<p>").append(t.getClass().getCanonicalName()).append(": ").append(t.getMessage()); if (t instanceof MainExitException && t.getMessage().startsWith("unknown encoding name")) { response.append("<p>Either your local encoding is not supported by JRuby, or you passed an unrecognized value to the Java property 'file.encoding' either in the IntelliJ options file or via the JAVA_TOOL_OPTION environment variable."); String property = SafePropertyAccessor.getProperty("file.encoding", null); response.append("<p>encoding passed by system property 'file.encoding': ").append(property); response.append("<p>available encodings (excuding aliases): "); EncodingDB.getEncodings().forEach(entry -> response.append(entry.getEncoding().getCharsetName()).append(" ")); } t = t.getCause(); } while (t != null); response.append("<p>(the full exception stack trace is available in the IDE's log file. Visit menu item 'Help | Show Log in Explorer' to see the log)"); try { boasErr.write(response.toString().getBytes(StandardCharsets.UTF_8)); } catch (IOException e) { throw new RuntimeException("Unable to write bytes"); } } finally { // SystemOutputHijacker.deregister(); Notifier notifier = this::notifyAlways; notifier.notify(boasOut, boasErr, logHandler.getLogRecords()); } } finally { unlock(); } } private static void lock() { int timeout = validateAccess(); if (timeout == 0) { LOCK.lock(); } else { try { if (!LOCK.tryLock(timeout, TimeUnit.SECONDS)) { LOG.warn("unabel to acquire lock after timeout"); throw new ProcessCanceledException(new RuntimeException("unable to acquire lock after timeout")); } } catch (InterruptedException e) { throw new RuntimeException("unable to acquire lock", e); } } } private static void unlock() { LOCK.unlock(); } public static Map<String, String> populateAntoraAttributes(String projectBasePath, File fileBaseDir, VirtualFile antoraModuleDir) { Map<String, String> result = new HashMap<>(); if (antoraModuleDir != null) { result.putAll(collectAntoraAttributes(antoraModuleDir)); VirtualFile projectBase = LocalFileSystem.getInstance().findFileByIoFile(new File(projectBasePath)); VirtualFile baseDir = LocalFileSystem.getInstance().findFileByIoFile(fileBaseDir); VirtualFile antoraPages = findAntoraPagesDir(projectBase, baseDir); VirtualFile antoraPartials = findAntoraPartials(projectBase, baseDir); String antoraImagesDir = findAntoraImagesDirRelative(projectBase, baseDir); String antoraAttachmentsDir = findAntoraAttachmentsDirRelative(projectBase, baseDir); VirtualFile antoraExamplesDir = findAntoraExamplesDir(projectBase, baseDir); if (antoraPages != null) { result.put("pagesdir", antoraPages.getCanonicalPath()); } if (antoraPartials != null) { result.put("partialsdir", antoraPartials.getCanonicalPath()); } if (antoraImagesDir != null) { result.put("imagesdir", antoraImagesDir); } if (antoraAttachmentsDir != null) { result.put("attachmentsdir", antoraAttachmentsDir); } if (antoraExamplesDir != null) { result.put("examplesdir", antoraExamplesDir.getCanonicalPath()); } } return result; } public static Map<String, String> collectAntoraAttributes(VirtualFile antoraModuleDir) { Map<String, String> result = new HashMap<>(); result.put("icons", "font"); result.put("env-site", ""); result.put("site-gen", "antora"); result.put("site-gen-antora", ""); result.put("page-module", antoraModuleDir.getName()); if (antoraModuleDir.getParent() != null && antoraModuleDir.getParent().getParent() != null) { VirtualFile antoraFile = antoraModuleDir.getParent().getParent().findChild(ANTORA_YML); if (antoraFile != null) { ApplicationManager.getApplication().runReadAction(() -> { Document document = FileDocumentManager.getInstance().getDocument(antoraFile); if (document != null) { try { Map<String, Object> antora = readAntoraYaml(antoraFile); mapAttribute(result, antora, "name", "page-component-name"); mapAttribute(result, antora, "version", "page-component-version"); mapAttribute(result, antora, "title", "page-component-title"); mapAttribute(result, antora, "version", "page-version"); mapAttribute(result, antora, "display-version", "page-display-version"); Object asciidoc = antora.get("asciidoc"); if (asciidoc instanceof Map) { @SuppressWarnings("rawtypes") Object attributes = ((Map) asciidoc).get("attributes"); if (attributes instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> map = (Map<String, Object>) attributes; map.forEach((k, v) -> { String vs; if (v == null) { // null -> not allowed in YAML file as attribute value Notification notification = AsciiDocPreviewEditor.NOTIFICATION_GROUP.createNotification("AsciiDoc attribute '" + k + "' is null in " + antoraFile.getCanonicalPath(), "Will be treated as unset. Use either false to explicitly unset, or set by providing for example an empty string ''", NotificationType.ERROR, null); notification.setImportant(true); Notifications.Bus.notify(notification); vs = null; } else if (v instanceof Boolean && !(Boolean) v) { // false -> soft unset vs = null; } else if (v instanceof String && v.equals("~")) { // "~" -> hard unset vs = null; } else { vs = v.toString(); if (vs.endsWith("@")) { // "...@" -> soft set vs = vs.substring(0, vs.length() - 1); } } result.put(k, vs); }); } } } catch (YAMLException ignored) { // continue without detailed Antora information } } }); } } return result; } public static Map<String, Object> readAntoraYaml(VirtualFile antoraFile) { try { Document document = FileDocumentManager.getInstance().getDocument(antoraFile); if (document == null) { throw new YAMLException("unable to read file"); } Yaml yaml = new Yaml(); return yaml.load(document.getText()); } catch (YAMLException ex) { handleAntoraYamlException(ex, antoraFile.getCanonicalPath()); throw ex; } } public static Map<String, Object> readAntoraYaml(PsiFile antoraFile) { try { Yaml yaml = new Yaml(); return yaml.load(antoraFile.getText()); } catch (YAMLException ex) { String fileName = null; VirtualFile virtualFile = antoraFile.getVirtualFile(); if (virtualFile != null) { fileName = virtualFile.getCanonicalPath(); } handleAntoraYamlException(ex, fileName); throw new YAMLException("Error when reading file " + fileName); } } private static void handleAntoraYamlException(YAMLException ex, @Nullable String canonicalPath) { String message = canonicalPath + ": " + ex.getMessage(); LOG.warn("Error reading Antora component information", ex); Notification notification = AsciiDocPreviewEditor.NOTIFICATION_GROUP.createNotification("Error reading Antora component information", message, NotificationType.ERROR, null); notification.setImportant(true); Notifications.Bus.notify(notification); } public Map<String, Object> getExportOptions(Map<String, Object> options, FileType fileType) { if (fileType == FileType.HTML) { options.put(Options.HEADER_FOOTER, true); } return options; } @SuppressWarnings("checkstyle:ParameterNumber") private Map<String, Object> getDefaultOptions(FileType fileType, VirtualFile springRestDocsSnippets, Map<String, String> attributes) { AttributesBuilder builder = AttributesBuilder.attributes() .showTitle(true) .backend(fileType.backend) .sourceHighlighter("coderay@") .attribute("coderay-css@", "style") .attribute("env", "idea") .attribute("skip-front-matter@") .attribute("env-idea"); if (springRestDocsSnippets != null) { builder.attribute("snippets", springRestDocsSnippets.getCanonicalPath()); } for (Map.Entry<String, String> entry : attributes.entrySet()) { builder.attribute(entry.getKey(), entry.getValue()); } String graphvizDot = System.getenv("GRAPHVIZ_DOT"); if (graphvizDot != null) { builder.attribute("graphvizdot@", graphvizDot); } Attributes attrs = builder.get(); final AsciiDocApplicationSettings settings = AsciiDocApplicationSettings.getInstance(); if (imagesPath != null) { if (fileType == FileType.JAVAFX) { if (settings.getAsciiDocPreviewSettings().getHtmlPanelProviderInfo().getClassName().equals(JavaFxHtmlPanelProvider.class.getName()) || settings.getAsciiDocPreviewSettings().getHtmlPanelProviderInfo().getClassName().equals(AsciiDocJCEFHtmlPanelProvider.class.getName())) { attrs.setAttribute("outdir", imagesPath.toAbsolutePath().normalize().toString()); // this prevents asciidoctor diagram to render images to a folder {outdir}/{imagesdir} ... // ... that might then be outside of the temporary folder as {imagesdir} might traverse to a parent folder // beware that the HTML output will still prepends {imagesdir} that later needs to be removed from HTML output // https://github.com/asciidoctor/asciidoctor-diagram/issues/110 attrs.setAttribute("imagesoutdir", imagesPath.toAbsolutePath().normalize().toString()); } } } if (AsciiDocApplicationSettings.getInstance().getAsciiDocPreviewSettings().isKrokiEnabled()) { String krokiUrl = AsciiDocApplicationSettings.getInstance().getAsciiDocPreviewSettings().getKrokiUrl(); if (!StringUtils.isEmpty(krokiUrl)) { attrs.setAttribute("kroki-server-url", krokiUrl); } } settings.getAsciiDocPreviewSettings().getAttributes().forEach(attrs::setAttribute); OptionsBuilder opts = OptionsBuilder.options().safe(settings.getSafe()).backend(fileType.backend).headerFooter(false) .attributes(attrs) .option("sourcemap", "true") .baseDir(fileBaseDir); return opts.asMap(); } public Map<String, String> getAttributes() { return ATTRIBUTES_RETRIEVER.getAttributes(); } public enum FileType { PDF("pdf"), HTML("html5"), JAVAFX("html5"), JEDITOR("html5"); private final String backend; FileType(String backend) { this.backend = backend; } @Override public String toString() { return backend; } } private static void mapAttribute(Map<String, String> result, Map<String, Object> antora, String nameSource, String nameTarget) { Object value = antora.get(nameSource); if (value != null) { result.put(nameTarget, value.toString()); } } @NotNull public static String enrichPage(@NotNull String html, String standardCss, @NotNull Map<String, String> attributes) { /* Add CSS line */ String stylesheet = attributes.get("stylesheet"); if (stylesheet != null && stylesheet.length() != 0) { // custom stylesheet set String stylesdir = attributes.get("stylesdir"); VirtualFile stylesdirVf = LocalFileSystem.getInstance().findFileByPath(attributes.get("docdir")); if (stylesdirVf != null) { if (stylesdir != null && stylesdir.length() != 0) { File stylesdirFile = new File(stylesdir); if (!stylesdirFile.isAbsolute()) { stylesdirVf = stylesdirVf.findFileByRelativePath(stylesdir); } else { stylesdirVf = LocalFileSystem.getInstance().findFileByIoFile(stylesdirFile); } } if (stylesdirVf != null) { VirtualFile stylesheetVf = stylesdirVf.findChild(stylesheet); if (stylesheetVf != null) { String css; try (InputStream is = stylesheetVf.getInputStream()) { css = IOUtils.toString(is); } catch (IOException ex) { css = "/* unable to read CSS from " + stylesdirVf.getCanonicalPath() + ": " + ex.getMessage() + " */"; } html = html .replace("<head>", "<head>" + "<style>" + css + "</style>"); } } } } else { // use standard stylesheet if (standardCss != null) { html = html .replace("<head>", "<head>" + standardCss); } } String docinfo = attributes.get("docinfo"); if (docinfo != null && docinfo.length() != 0) { // custom stylesheet set String docinfodir = attributes.get("docinfodir"); VirtualFile docinfodirVf = LocalFileSystem.getInstance().findFileByPath(attributes.get("docdir")); if (docinfodirVf != null) { if (docinfodir != null && docinfodir.length() != 0) { File docinfodirFile = new File(docinfodir); if (!docinfodirFile.isAbsolute()) { docinfodirVf = docinfodirVf.findFileByRelativePath(docinfodir); } else { docinfodirVf = LocalFileSystem.getInstance().findFileByIoFile(docinfodirFile); } } if (docinfodirVf != null) { StringTokenizer st = new StringTokenizer(docinfo, ","); while (st.hasMoreTokens()) { String token = st.nextToken().trim(); if (token.equals("shared") || token.equals("shared-head") || token.equals("private") || token.equals("private-head")) { String prefix = ""; if (token.startsWith("private")) { prefix = attributes.get("docname") + "-"; } VirtualFile file = docinfodirVf.findChild(prefix + "docinfo.html"); if (file != null) { String content; try (InputStream is = file.getInputStream()) { content = IOUtils.toString(is); } catch (IOException ex) { content = "<!-- unable to read contents from from " + file.getCanonicalPath() + ": " + ex.getMessage() + " -->"; } html = html .replace("</head>", content + "</head>"); } } if (token.equals("shared") || token.equals("shared-footer") || token.equals("private") || token.equals("private-footer")) { String prefix = ""; if (token.startsWith("private")) { prefix = attributes.get("docname") + "-"; } VirtualFile file = docinfodirVf.findChild(prefix + "docinfo-footer.html"); if (file != null) { String content; try (InputStream is = file.getInputStream()) { content = IOUtils.toString(is); } catch (IOException ex) { content = "<!-- unable to read contents from from " + file.getCanonicalPath() + ": " + ex.getMessage() + " -->"; } html = html .replace("</body>", content + "</body>"); } } } } } } return html; } }
[ "\"GRAPHVIZ_DOT\"" ]
[]
[ "GRAPHVIZ_DOT" ]
[]
["GRAPHVIZ_DOT"]
java
1
0
server_test.go
package main_test import ( "fmt" "io/ioutil" "os" "os/exec" "path" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" . "github.com/onsi/gomega/gbytes" "github.com/onsi/gomega/gexec" ) var goodConfig = []byte(`--- environments: - name: test domain: examples.are.cool.com authenticate: false skip_ssl: true instances: 2 foundations: - https://example.endpoint1.cf.com - https://example.endpoint2.cf.com `) var badConfig = []byte(`--- environments: - name: sandbox `) var _ = Describe("Server", func() { var ( session *gexec.Session err error ) BeforeEach(func() { os.Setenv("CF_USERNAME", "test user") os.Setenv("CF_PASSWORD", "test pwd") }) AfterEach(func() { os.Unsetenv("CF_USERNAME") os.Unsetenv("CF_PASSWORD") session.Terminate() }) Describe("log level flag", func() { Context("when a log level is not specified", func() { It("uses the default log level ", func() { level := os.Getenv("DEPLOYADACTYL_LOGLEVEL") os.Unsetenv("DEPLOYADACTYL_LOGLEVEL") Expect(err).ToNot(HaveOccurred()) session, err = gexec.Start(exec.Command(pathToCLI), GinkgoWriter, GinkgoWriter) Expect(err).ToNot(HaveOccurred()) Eventually(session.Out).Should(Say("log level")) Eventually(session.Out).Should(Say("DEBUG")) os.Setenv("DEPLOYADACTYL_LOGLEVEL", level) }) }) Context("when log level is invalid", func() { It("throws an error", func() { level := os.Getenv("DEPLOYADACTYL_LOGLEVEL") Expect(os.Setenv("DEPLOYADACTYL_LOGLEVEL", "tanystropheus")).To(Succeed()) session, err = gexec.Start(exec.Command(pathToCLI), GinkgoWriter, GinkgoWriter) Expect(err).ToNot(HaveOccurred()) Eventually(session.Err).Should(Say("invalid log level")) os.Setenv("DEPLOYADACTYL_LOGLEVEL", level) }) }) }) Describe("command line flags", func() { Describe("config flag", func() { Context("when the config flag is not provided", func() { It("throws an error", func() { session, err = gexec.Start(exec.Command(pathToCLI), GinkgoWriter, GinkgoWriter) Expect(err).ToNot(HaveOccurred()) Eventually(session.Out).Should(Say("no such file or directory")) }) }) Context("when an invalid config path is specified", func() { It("throws an error", func() { session, err = gexec.Start(exec.Command(pathToCLI, "-config", "./gorgosaurus.yml"), GinkgoWriter, GinkgoWriter) Expect(err).ToNot(HaveOccurred()) Eventually(session.Out).Should(Say("no such file or directory")) }) }) Context("when a bad config is provided", func() { It("returns an error", func() { configLocation := fmt.Sprintf("%s/config.yml", path.Dir(pathToCLI)) Expect(ioutil.WriteFile(configLocation, badConfig, 0777)).To(Succeed()) session, err = gexec.Start(exec.Command(pathToCLI, "-config", configLocation), GinkgoWriter, GinkgoWriter) Expect(err).ToNot(HaveOccurred()) Eventually(session.Out).Should(Say("missing required parameter")) }) }) }) }) })
[ "\"DEPLOYADACTYL_LOGLEVEL\"", "\"DEPLOYADACTYL_LOGLEVEL\"" ]
[]
[ "DEPLOYADACTYL_LOGLEVEL" ]
[]
["DEPLOYADACTYL_LOGLEVEL"]
go
1
0
realtime_detection.py
import numpy as np # from numpy_ringbuffer import RingBuffer import librosa import librosa.display import matplotlib.pyplot as plt import noisereduce as nr from tensorflow.keras.models import model_from_json from sklearn.preprocessing import LabelEncoder import pyaudio import wave # for save audio file import datetime import os from collections import deque import math #os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Load segment audio classification model model_path = r"audio_model/" model_name = "audio_CNN_2021_09_07_12_19_57_acc_97.53" # Model reconstruction from JSON file with open(model_path + model_name + '.json', 'r') as f: model = model_from_json(f.read()) # Load weights into the new model model.load_weights(model_path + model_name + '.h5') # Replicate label encoder lb = LabelEncoder() lb.fit_transform(['Speaking', 'OtherSound']) # Some Utils # Plot audio def plotAudio2(output): fig, ax = plt.subplots(nrows=1,ncols=1, figsize=(20,4)) #figsize=20,4 plt.plot(output, color='blue') ax.set_xlim((0, len(output))) plt.show() # 스펙트로그램: 가로축은 시간, 세로축은 주파수, 진한 정도는 진폭을 나타냅니다. def draw_spectrogram(X): X = librosa.effects.preemphasis(X) clip, index = librosa.effects.trim(X, top_db=20, hop_length=256) stfts = librosa.stft(clip, n_fft=512, hop_length=256, win_length=512) stftsdb = librosa.amplitude_to_db(abs(stfts)) plt.figure(figsize=(20,4)) librosa.display.specshow(stftsdb, sr=22050, hop_length=256, x_axis='s', y_axis='hz') plt.colorbar() # 입력 오디오를 normalization하는 함수입니다. def minMaxNormalize(arr): mn = np.min(arr) mx = np.max(arr) return (arr-mn)/(mx-mn) # 입력 오디오 buffer가 말소리인지 아닌지를 판별하는 함수입니다. def predictSound(X): #Triming: 입력 오디오에서 무음 구간을 제거합니다. clip, index = librosa.effects.trim(X, top_db=20, frame_length=512, hop_length=256) # Trimming X, index = librosa.effects.trim(X, top_db=20, frame_length=512,hop_length=256) #get mel-spectrogram: 입력 오디오로부터 mel-spectrogram feature 추출 X = librosa.feature.melspectrogram(y=X, sr=16000, n_fft=512, hop_length=256, win_length=512) X = librosa.power_to_db(X, ref=np.max) X = X.T X = np.mean(X, axis=0) X = minMaxNormalize(X) X = np.reshape(X, (1, 16, 8)) # get prob result = model.predict(np.array([X])) predictions = [np.argmax(y) for y in result] prob = np.max(result) result = lb.inverse_transform([predictions[0]])[0] #print('predict: ', result, round(prob, 2)) return result, prob # 현재 입력값의 dB를 출력하는 함수입니다. def showdB(y): # y, sr =librosa.load(...) clip, index = librosa.effects.trim(y, top_db=20, frame_length=512, hop_length=256) stfts = librosa.stft(clip, n_fft=512, hop_length=256, win_length=512) dB = librosa.amplitude_to_db(abs(stfts), ref=1/1000) dB = np.mean(dB) return dB def pcm2float(sig, dtype='float32'): sig = np.asarray(sig) if sig.dtype.kind not in 'iu': raise TypeError("'sig' must be an array of integers") dtype = np.dtype(dtype) if dtype.kind != 'f': raise TypeError("'dtype' must be a floating point type") i = np.iinfo(sig.dtype) abs_max = 2 ** (i.bits - 1) offset = i.min + abs_max return (sig.astype(dtype) - offset) / abs_max # 발화 끝으로 갈수록 에너지가 약해지므로 정확도가 낮아집니다. # 이를 보완하기 위해 후처리 과정에서 이동평균을 이용합니다. class MovingAverage: def __init__(self, size: int): self.data = deque(maxlen = size) def next(self, val: int) -> float: self.data.append(val) return sum(self.data)/len(self.data) class RealtimeRecording(): def __init__(self): self.CHUNKSIZE = 8192 # 8192: 256ms. 입력 오디오 신호를 256ms 단위로 받습니다. self.RATE = 16000 # sample rate self.FORMAT = pyaudio.paInt16 # self.FORMAT = pyaudio.paFloat32 # original = paFloat32 self.CHANNELS = 1 # mono self.audio_buffer = bytes() self.ma = MovingAverage(3) self.STATE = False # for saving speaking buffers self.speaking_buffer = np.array([]) self.SAVE = False self.previous_result = '' self.category = 0.0 def start(self): # initialize portaudio print("Stream Start") now = datetime.datetime.now() p = pyaudio.PyAudio() stream = p.open(format=self.FORMAT, channels=self.CHANNELS, rate=self.RATE, input=True, frames_per_buffer=self.CHUNKSIZE) if not self.STATE: self.audio_buffer += audio_data.data if len(self.audio_buffer) == 20480: #10240 = 640ms. np.frombuffer를 거치면 320ms data = self.audio_buffer self.noise_sample = np.frombuffer(data, dtype=np.int16) self.noise_sample = np.nan_to_num(self.noise_sample) self.noise_sample_float = pcm2float(self.noise_sample) #plotAudio2(self.noise_sample_float) self.audio_buffer = bytes() self.STATE = True print('Noise reduction setting complete') if self.STATE: self.audio_buffer += audio_data.data if len(self.audio_buffer) == 8192: #8192: 256ms data = self.audio_buffer self.sample = np.frombuffer(data, dtype=np.int16) self.sample_float = pcm2float(self.sample) # nan 값 발견 시 제거 if not np.isfinite(self.sample_float).all(): self.sample = np.nan_to_num(self.sample) # 노이즈 샘플로 노이즈 제거 noisy_part = self.noise_sample_float self.current_window = nr.reduce_noise(y=self.sample_float, y_noise=noisy_part, prop_decrease=1.0, sr=16000) # dB Threshold. 특정 dB 이상의 오디오에 대해서만 판별을 수행합니다. current_dB = showdB(self.current_window) dB_threshold = 16 # predict self.pred, self.prob = predictSound(np.array(self.current_window)) # dB filtering and hangover # 이전 buffer의 상태에 따라서 speaking 판단 여부를 조금씩 조정합니다. if current_dB > dB_threshold: # false positive를 줄이기 위해 설정한 값입니다. 사용 환경에 따라서 조정할 수 있습니다. if self.pred == 'Speaking' and self.prob > 0.75: #print('pred: ', self.pred, round(self.prob,2)) self.result = 'Speaking' self.category = self.ma.next(1) #print('result: ', self.result, self.category, 'loud speaking') else: #print('pred: ', self.pred, round(self.prob,2)) if self.previous_result == 'Speaking' and self.category >0.7: #print('previous: ', self.previous_result) self.result = 'Speaking' self.category = self.ma.next(0) #print('result: ', self.result, self.category, 'possible speaking') else: #print('previous: ', self.previous_result) self.result = 'OtherSound' self.category = self.ma.next(0) #print('result: ', self.result, self.category, 'loud othersound') else: #print('pred: ', self.pred, round(self.prob,2)) if self.previous_result == 'Speaking' and self.category >0.5: #print('previous: ', self.previous_result) self.result = 'Speaking' self.category = self.ma.next(0) #print('result: ', self.result, self.category, 'quite speaking') else: #print('previous: ', self.previous_result) self.result = 'OtherSound' self.category = self.ma.next(0) #print('result: ', self.result, self.category, 'quite othersound') now = datetime.datetime.now() print('final result: ', self.result, round(self.category,2)) #print('dB: ', round(current_dB, 2)) #print('*'*20) # maximum length of speaking buffer max_buffer_len = 16000 * 10 # 10S if self.category != 0 and len(self.speaking_buffer) < max_buffer_len: self.speaking_buffer = np.concatenate((self.speaking_buffer, self.current_window)) self.SAVE=True else: self.SAVE=False self.audio_buffer = bytes() # Saving speaking buffer (optional) try: now = datetime.datetime.now() if self.SAVE == False and len(self.speaking_buffer) !=0: speaking_length = len(self.speaking_buffer) / 16000. print('speaking_length: ', speaking_length, 's') self.speaking_buffer = np.array([]) except AttributeError: pass # audio_buffer = [] # frames = [] # for i in range(0, int(self.RATE / self.CHUNKSIZE * self.RECORD_SECONDS)): # data = stream.read(self.CHUNKSIZE) # current_window = np.frombuffer(data, dtype=np.int16) # dtype=np.float32 # audio_buffer = np.concatenate((audio_buffer, current_window)) # noisy_part = audio_buffer[0:20480] # 주변 소음을 수집한 뒤 noise reduction을 수행합니다. # audio_buffer = nr.reduce_noise(y = audio_buffer, y_noise=noisy_part, sr=16000) # close stream now = datetime.datetime.now() print(now) stream.stop_stream() stream.close() p.terminate() print('End.') return audio_buffer if __name__ == '__main__': rr = RealtimeRecording() audio_data = rr.start() predicted = predictSound(np.array(audio_data)) print(predicted)
[]
[]
[ "TF_CPP_MIN_LOG_LEVEL" ]
[]
["TF_CPP_MIN_LOG_LEVEL"]
python
1
0
start.go
package main import ( "context" "fmt" "io" "os" "os/exec" "path" "path/filepath" "strconv" "strings" "syscall" "time" eventsapi "github.com/containerd/containerd/api/events" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" "github.com/containerd/containerd/namespaces" taskapi "github.com/containerd/containerd/runtime/v2/task" "github.com/coreos/go-systemd/unit" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" "golang.org/x/sys/unix" ) // Start the primary user process inside the container func (s *Service) Start(ctx context.Context, r *taskapi.StartRequest) (_ *taskapi.StartResponse, retErr error) { ns, err := namespaces.NamespaceRequired(ctx) if err != nil { return nil, errdefs.ToGRPC(err) } ctx, span := StartSpan(ctx, "service.Start", trace.WithAttributes(attribute.String(nsAttr, ns), attribute.String(cIDAttr, r.ID), attribute.String(eIDAttr, r.ExecID))) defer func() { if retErr != nil { retErr = errdefs.ToGRPCf(retErr, "start") span.SetStatus(codes.Error, retErr.Error()) } span.End() }() ctx = log.WithLogger(ctx, log.G(ctx).WithField("id", r.ID).WithField("ns", ns).WithField("execID", r.ExecID)) p := s.processes.Get(path.Join(ns, r.ID)) if p == nil { return nil, fmt.Errorf("%w: %s", errdefs.ErrNotFound, r.ID) } ctx = WithShimLog(ctx, p.LogWriter()) var pid uint32 if r.ExecID != "" { ep := p.(*initProcess).execs.Get(r.ExecID) if ep == nil { return nil, fmt.Errorf("exec %s: %w", r.ExecID, errdefs.ErrNotFound) } pid, err = ep.Start(ctx) if err != nil { s.units.Delete(ep) return nil, err } s.send(ctx, ns, &eventsapi.TaskExecStarted{ ContainerID: r.ID, ExecID: r.ExecID, Pid: pid, }) } else { pid, err = p.Start(ctx) if err != nil { return nil, err } s.send(ctx, ns, &eventsapi.TaskStart{ ContainerID: r.ID, Pid: pid, }) } return &taskapi.StartResponse{Pid: pid}, nil } func (p *process) runcCmd(cmd []string) ([]string, error) { root := []string{p.runc.Command, "--debug=" + strconv.FormatBool(p.runc.Debug), "--systemd-cgroup=" + strconv.FormatBool(p.opts.SystemdCgroup), "--root", p.runc.Root} if p.runc.Debug { root = append(root, "--log="+p.runc.Log) } return append(root, cmd...), nil } func writeUnit(name string, opts []*unit.UnitOption) error { rdr := unit.Serialize(opts) f, err := os.Create(filepath.Join("/run/systemd/system", name)) if err != nil { return err } defer f.Close() if _, err := io.Copy(f, rdr); err != nil { return err } return nil } func (p *initProcess) startOptions(rcmd []string) ([]*unit.UnitOption, error) { const svc = "Service" sysctl, err := exec.LookPath("systemctl") if err != nil { return nil, err } opts := []*unit.UnitOption{ unit.NewUnitOption(svc, "Type", p.unitType()), unit.NewUnitOption(svc, "RemainAfterExit", "no"), unit.NewUnitOption(svc, "PIDFile", p.pidFile()), unit.NewUnitOption(svc, "Delegate", "yes"), unit.NewUnitOption(svc, "ExecStopPost", "-"+p.exe+" --bundle="+p.Bundle+" exit "+os.Getenv("UNIT_NAME")), // Set this as env vars here because we only want these fifos to be used for the container stdio, not the other commands we run. // Otherwise we can run into interesting cases like the client has closeed the fifo and our Pre/Post commands hang // We already had to open these fifos in process to prevent such hangs with `ExecStart`, now instead it'll open them just before // executing runc. unit.NewUnitOption(svc, "Environment", "STDIN_FIFO="+p.Stdin), unit.NewUnitOption(svc, "Environment", "STDOUT_FIFO="+p.Stdout), unit.NewUnitOption(svc, "Environment", "STDERR_FIFO="+p.Stderr), unit.NewUnitOption(svc, "Environment", "DAEMON_UNIT_NAME="+os.Getenv("UNIT_NAME")), unit.NewUnitOption(svc, "Environment", "UNIT_NAME=%n"), // %n is replaced with the unit name by systemd unit.NewUnitOption(svc, "Environment", "EXIT_STATE_PATH="+p.exitStatePath()), } prefix := []string{p.exe, "--debug=" + strconv.FormatBool(p.runc.Debug), "--bundle=" + p.Bundle, "create"} if len(p.Rootfs) > 0 { if p.noNewNamespace { opts = append(opts, unit.NewUnitOption(svc, "ExecStartPre", p.exe+" mount "+p.mountConfigPath())) opts = append(opts, unit.NewUnitOption(svc, "ExecStopPost", "-"+p.exe+" unmount "+filepath.Join(p.Bundle, "rootfs"))) } else { // Unfortunately with PrivateMounts we can't use `ExecStartPre` to mount the rootfs b/c it does not share a mount namespace // with the main process. Instead we re-exec with `create` subcommand which will mount and exec the main process. opts = append(opts, unit.NewUnitOption(svc, "PrivateMounts", "yes")) prefix = append(prefix, "--mounts="+p.mountConfigPath()) } } if p.Terminal || p.opts.Terminal { opts = append(opts, unit.NewUnitOption("Service", "ExecStopPost", "-"+sysctl+" stop "+p.ttyUnitName())) prefix = append(prefix, "--tty") } execStart, err := p.runcCmd(append(rcmd, p.id)) if err != nil { return nil, err } opts = append(opts, unit.NewUnitOption(svc, "ExecStart", strings.Join(append(prefix, execStart...), " "))) return opts, nil } func (p *execProcess) startOptions() ([]*unit.UnitOption, error) { const svc = "Service" sysctl, err := exec.LookPath("systemctl") if err != nil { return nil, err } opts := []*unit.UnitOption{ unit.NewUnitOption(svc, "Type", p.unitType()), // unit.NewUnitOption(svc, "PIDFile", p.pidFile()), unit.NewUnitOption(svc, "GuessMainPID", "yes"), unit.NewUnitOption(svc, "Delegate", "yes"), unit.NewUnitOption(svc, "RemainAfterExit", "no"), unit.NewUnitOption(svc, "ExecStopPost", "-"+p.exe+" --debug="+strconv.FormatBool(p.runc.Debug)+" --id="+p.id+" --bundle="+p.parent.Bundle+" exit"), // Set this as env vars here because we only want these fifos to be used for the container stdio, not the other commands we run. // Otherwise we can run into interesting cases like the client has closeed the fifo and our Pre/Post commands hang // We already had to open these fifos in process to prevent such hangs with `ExecStart`, now instead it'll open them just before // executing runc. unit.NewUnitOption(svc, "Environment", "STDIN_FIFO="+p.Stdin), unit.NewUnitOption(svc, "Environment", "STDOUT_FIFO="+p.Stdout), unit.NewUnitOption(svc, "Environment", "STDERR_FIFO="+p.Stderr), unit.NewUnitOption(svc, "Environment", "DAEMON_UNIT_NAME="+os.Getenv("UNIT_NAME")), unit.NewUnitOption(svc, "Environment", "UNIT_NAME=%n"), // %n is replaced with the unit name by systemd unit.NewUnitOption(svc, "Environment", "EXIT_STATE_PATH="+p.exitStatePath()), unit.NewUnitOption(svc, "Environment", "PIDFILE="+p.pidFile()), } prefix := []string{p.exe, "--debug=" + strconv.FormatBool(p.runc.Debug), "--bundle=" + p.parent.Bundle, "create"} // TODO: Really need to use --detach here so we don't have a runc process hanging around... cmd := []string{"exec", "--process=" + p.processFilePath(), "--pid-file=" + p.pidFile(), "--detach"} if p.Terminal || p.opts.Terminal { s, err := p.ttySockPath() if err != nil { return nil, err } cmd = append(cmd, "-t") cmd = append(cmd, "--console-socket="+s) opts = append(opts, unit.NewUnitOption(svc, "ExecStopPost", "-"+sysctl+" stop "+p.ttyUnitName())) prefix = append(prefix, "--tty") } execStart, err := p.runcCmd(append(cmd, p.parent.id)) if err != nil { return nil, err } execStart = append(prefix, execStart...) opts = append(opts, unit.NewUnitOption(svc, "ExecStart", strings.Join(execStart, " "))) return opts, nil } func (p *process) unitType() string { if p.opts.SdNotifyEnable { return "notify" } return "forking" } func (p *initProcess) Start(ctx context.Context) (pid uint32, retErr error) { ctx, span := StartSpan(ctx, "InitProcess.Start") defer func() { if retErr != nil { span.SetStatus(codes.Error, retErr.Error()) } span.SetAttributes(attribute.Int("pid", int(pid))) span.End() }() if p.checkpoint != "" { return p.restore(ctx) } if p.ProcessState().Exited() { return 0, fmt.Errorf("process has already exited: %s: %w", p.ProcessState(), errdefs.ErrFailedPrecondition) } if err := p.runc.Start(ctx, p.id); err != nil { log.G(ctx).WithError(err).Error("Error calling runc start") ret := fmt.Errorf("failed runc start: %w", err) if err := p.LoadState(ctx); err != nil { log.G(ctx).WithError(err).Warn("Error loading process state") } if !p.ProcessState().Exited() { log.G(ctx).Debug("runc start failed but process is still running, sending sigkill") p.systemd.KillUnitContext(ctx, p.Name(), int32(unix.SIGKILL)) if err := p.LoadState(ctx); err != nil { log.G(ctx).WithError(err).Debug("Error loading process state") } if !p.ProcessState().Exited() { p.SetState(ctx, pState{ExitCode: 255, ExitedAt: time.Now()}) } } p.cond.Broadcast() if p.runc.Debug { unitData, err := os.ReadFile("/run/systemd/system/" + p.Name()) if err == nil { ret = fmt.Errorf("%w:\n%s\n%s", ret, p.Name(), unitData) } processData, err := os.ReadFile(filepath.Join(p.Bundle, "config.json")) if err == nil { ret = fmt.Errorf("%w:\nprocess.json:\n%s", ret, string(processData)) } debug, err := os.ReadFile(p.runc.Log) if err == nil { ret = fmt.Errorf("%w:\nrunc debug:\n%s", ret, string(debug)) } else { log.G(ctx).WithError(err).Warn("Error opening runc debug log") } } return 0, ret } for p.Pid() == 0 && !p.ProcessState().Exited() { select { case <-ctx.Done(): default: } if err := p.LoadState(ctx); err != nil { log.G(ctx).WithError(err).Warn("Error loading process state") } } return pid, nil } func (p *initProcess) restore(ctx context.Context) (pid uint32, retErr error) { if p.Terminal || p.opts.Terminal { sockPath, err := p.ttySockPath() if err != nil { return 0, err } u, _, err := p.makePty(ctx, sockPath) if err != nil { return 0, err } defer func() { if retErr != nil { p.systemd.KillUnitContext(ctx, u, int32(syscall.SIGKILL)) } }() } return p.startUnit(ctx) } func (p *execProcess) Start(ctx context.Context) (_ uint32, retErr error) { if !p.parent.ProcessState().Started() { p.parent.LoadState(ctx) if !p.parent.ProcessState().Started() { return 0, fmt.Errorf("%w: container is not started", errdefs.ErrFailedPrecondition) } } if p.Terminal || p.opts.Terminal { sockPath, err := p.ttySockPath() if err != nil { return 0, err } u, _, err := p.makePty(ctx, sockPath) if err != nil { return 0, err } defer func() { if retErr != nil { p.systemd.KillUnitContext(ctx, u, int32(syscall.SIGKILL)) } }() } ch := make(chan string, 1) if _, err := p.systemd.StartUnitContext(ctx, p.Name(), "replace", ch); err != nil { return 0, err } select { case <-ctx.Done(): p.systemd.KillUnitContext(ctx, p.Name(), int32(syscall.SIGKILL)) case status := <-ch: if status != "done" { st, err := p.Wait(ctx) if err == nil { return st.Pid, nil } pid, err := p.getPid(ctx) if err == nil { return pid, nil } getUnitState(ctx, p.systemd, p.Name(), &st) ret := fmt.Errorf("error starting exec process") if p.runc.Debug { ret = fmt.Errorf("%w:\n%s", ret, p.Name()) unitData, err := os.ReadFile("/run/systemd/system/" + p.Name()) if err == nil { ret = fmt.Errorf("%w:\n%s\n%s", ret, p.Name(), unitData) } processData, err := os.ReadFile(p.processFilePath()) if err == nil { ret = fmt.Errorf("%w:\nprocess.json:\n%s", ret, string(processData)) } debug, err := os.ReadFile(p.runc.Log) if err == nil { ret = fmt.Errorf("%w:\nrunc debug:\n%s", ret, string(debug)) } else { log.G(ctx).WithError(err).Warn("Error opening runc debug log") } } return 0, ret } } p.LoadState(ctx) if p.ProcessState().ExitCode == 255 { ret := fmt.Errorf("error starting exec process") if p.runc.Debug { debug, err := os.ReadFile(p.runc.Log) if err == nil { ret = fmt.Errorf("%w:\nrunc debug:\n%s", ret, string(debug)) } } return 0, ret } pid, err := p.getPid(ctx) if err != nil { return 0, err } p.mu.Lock() p.state.Pid = pid p.mu.Unlock() return pid, nil }
[ "\"UNIT_NAME\"", "\"UNIT_NAME\"", "\"UNIT_NAME\"" ]
[]
[ "UNIT_NAME" ]
[]
["UNIT_NAME"]
go
1
0
novajoin/configure_ipa.py
#!/usr/bin/python # Copyright 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import getpass import logging import os import pwd import six import socket import string import sys import tempfile from ipalib import api from ipalib import certstore from ipalib import errors from ipalib import x509 from ipapython import certdb from ipapython import ipaldap from ipapython.ipautil import CalledProcessError from ipapython.ipautil import ipa_generate_password from ipapython.ipautil import kinit_password from ipapython.ipautil import realm_to_suffix from ipapython.ipautil import run from ipapython.ipautil import user_input from ipapython.ipautil import write_tmp_file from novajoin.errors import ConfigurationError import nss.nss as nss logger = logging.getLogger() allowed_chars = string.letters + string.digits KRB5_CONF_TMPL = """ includedir /var/lib/sss/pubconf/krb5.include.d/ [libdefaults] default_realm = $REALM dns_lookup_realm = false dns_lookup_kdc = false rdns = false ticket_lifetime = 24h forwardable = yes udp_preference_limit = 0 default_ccache_name = KEYRING:persistent:%{uid} [realms] $REALM = { kdc = $MASTER:88 master_kdc = $MASTER:88 admin_server = $MASTER:749 default_domain = $DOMAIN } [domain_realm] .$DOMAIN = $REALM $DOMAIN = $REALM """ class NovajoinRole(object): """One-stop shopping for creating the IPA permissions, privilege and role. Assumes that ipalib is imported and initialized and an RPC context already exists. """ def __init__(self, keytab='/etc/nova/krb5.keytab', user='nova', hostname=None): self.keytab = keytab self.user = user if not hostname: self.hostname = self._get_fqdn() else: self.hostname = hostname self.service = u'nova/%s' % self.hostname self.ccache_name = None def _get_fqdn(self): """Try to determine the fully-qualfied domain name of this box""" fqdn = "" try: fqdn = socket.getfqdn() except Exception: # pylint: disable=broad-except try: # assume it is in the IPA domain if it comes back # not fully-qualified fqdn = socket.gethostname() # pylint: disable=no-member fqdn = fqdn + '.' + api.env.domain except Exception: # pylint: disable=broad-except fqdn = "" return fqdn def write_tmp_krb5_conf(self, opts, filename): options = {'MASTER': opts.server, 'DOMAIN': opts.domain, 'REALM': opts.realm} template = string.Template(KRB5_CONF_TMPL) text = template.substitute(options) with open(filename, 'w+') as f: f.write(text) def create_krb5_conf(self, opts): (krb_fd, krb_name) = tempfile.mkstemp() os.close(krb_fd) self.write_tmp_krb5_conf(opts, krb_name) return krb_name def _get_ca_certs(self, server, realm): basedn = realm_to_suffix(realm) try: conn = ipaldap.IPAdmin(server, sasl_nocanon=True) conn.do_sasl_gssapi_bind() certs = certstore.get_ca_certs(conn, basedn, realm, False) except Exception as e: raise ConfigurationError("get_ca_certs_from_ldap() error: %s" % e) certs = [x509.load_certificate(c[0], x509.DER) for c in certs if c[2] is not False] return certs def create_nssdb(self, server, realm): nss.nss_init_nodb() nss_db = certdb.NSSDatabase() ca_certs = self._get_ca_certs(server, realm) ca_certs = [cert.der_data for cert in ca_certs] # Add CA certs to a temporary NSS database try: pwd_file = write_tmp_file(ipa_generate_password()) nss_db.create_db(pwd_file.name) for i, cert in enumerate(ca_certs): nss_db.add_cert(cert, 'CA certificate %d' % (i + 1), 'C,,') except CalledProcessError: raise ConfigurationError( 'Failed to add CA to temporary NSS database.') return nss_db def kinit(self, principal, realm, password, config=None): ccache_dir = tempfile.mkdtemp(prefix='krbcc') self.ccache_name = os.path.join(ccache_dir, 'ccache') current_ccache = os.environ.get('KRB5CCNAME') os.environ['KRB5CCNAME'] = self.ccache_name if principal.find('@') == -1: # pylint: disable=no-member principal = '%s@%s' % (principal, realm) try: kinit_password(principal, password, self.ccache_name, config=config) except RuntimeError as e: raise ConfigurationError("Kerberos authentication failed: %s" % e) finally: if current_ccache: os.environ['KRB5CCNAME'] = current_ccache return ccache_dir def _call_ipa(self, command, args, kw): """Call into the IPA API. Duplicates are ignored to be idempotent. Other errors are ignored implitly because they are encapsulated in the result for some calls. """ try: api.Command[command](args, **kw) except errors.DuplicateEntry: pass except Exception as e: # pylint: disable=broad-except logger.error("Unhandled exception: %s", e) def _add_permissions(self): logging.debug('Add permissions') self._call_ipa(u'permission_add', u'Modify host password', {'ipapermright': u'write', 'type': u'host', 'attrs': u'userpassword'}) self._call_ipa(u'permission_add', u'Write host certificate', {'ipapermright': u'write', 'type': u'host', 'attrs': u'usercertificate'}) self._call_ipa(u'permission_add', u'Modify host userclass', {'ipapermright': u'write', 'type': u'host', 'attrs': u'userclass'}) self._call_ipa(u'permission_add', u'Modify service managedBy attribute', {'ipapermright': u'write', 'type': u'service', 'attrs': u'managedby'}) def _add_privileges(self): logging.debug('Add privileges') self._call_ipa(u'privilege_add', u'Nova Host Management', {'description': u'Nova Host Management'}) self._call_ipa(u'privilege_add_permission', u'Nova Host Management', {u'permission': [ u'System: add hosts', u'System: remove hosts', u'modify host password', u'modify host userclass', u'modify hosts', u'modify service managedBy attribute', u'System: Add krbPrincipalName to a Host', u'System: Add Services', u'System: Remove Services', u'System: revoke certificate', u'System: manage host keytab', u'System: write host certificate', u'System: retrieve certificates from the ca', u'System: modify services', u'System: manage service keytab', u'System: read dns entries', u'System: remove dns entries', u'System: add dns entries', u'System: update dns entries', u'Retrieve Certificates from the CA', u'Revoke Certificate']}) def _add_role(self): logging.debug('Add role') self._call_ipa(u'role_add', u'Nova Host Manager', {'description': u'Nova Host Manager'}) self._call_ipa(u'role_add_privilege', u'Nova Host Manager', {'privilege': u'Nova Host Management'}) self._call_ipa(u'role_add_member', u'Nova Host Manager', {u'service': self.service}) def _add_host(self, filename): logging.debug('Add host %s', self.hostname) otp = ipa_generate_password(allowed_chars) self._call_ipa(u'host_add', six.text_type(self.hostname), {'description': u'Undercloud host', 'userpassword': six.text_type(otp), 'force': True}) if filename: with open(filename, "w") as fd: fd.write("%s\n" % otp) else: return otp def _add_service(self): logging.debug('Add service %s', self.service) self._call_ipa(u'service_add', self.service, {'force': True}) def _get_keytab(self): logging.debug('Getting keytab %s for %s', self.keytab, self.service) if self.ccache_name: current_ccache = os.environ.get('KRB5CCNAME') os.environ['KRB5CCNAME'] = self.ccache_name try: if os.path.exists(self.keytab): os.unlink(self.keytab) except OSError as e: sys.exit('Could not remove %s: %s' % (self.keytab, e)) try: run(['ipa-getkeytab', '-s', api.env.server, # pylint: disable=no-member '-p', self.service, '-k', self.keytab]) finally: if current_ccache: os.environ['KRB5CCNAME'] = current_ccache # s/b already validated user = pwd.getpwnam(self.user) os.chown(self.keytab, user.pw_uid, user.pw_gid) os.chmod(self.keytab, 0o600) def configure_ipa(self, precreate, otp_filename=None): otp = None if precreate: otp = self._add_host(otp_filename) self._add_service() if not precreate: self._get_keytab() self._add_permissions() self._add_privileges() self._add_role() if otp: print(otp) def ipa_options(parser): parser.add_argument('--debug', help='Additional logging output', action="store_true", default=False) parser.add_argument('--no-kinit', help='Assume the user has already done a kinit', action="store_true", default=False) parser.add_argument('--user', help='User that nova services run as', default='nova') parser.add_argument('--principal', dest='principal', default='admin', help='principal to use to setup IPA integration') parser.add_argument('--password', dest='password', help='password for the principal') parser.add_argument('--password-file', dest='passwordfile', help='path to file containing password for ' 'the principal') parser.add_argument('--precreate', default=False, help='Pre-create the IPA host with an OTP', action="store_true") noconfig = parser.add_argument_group('Pre-create options') noconfig.add_argument('--server', dest='server', help='IPA server') noconfig.add_argument('--realm', dest='realm', help='IPA realm name') noconfig.add_argument('--domain', dest='domain', help='IPA domain name') noconfig.add_argument('--hostname', dest='hostname', help='Hostname of IPA host to create') noconfig.add_argument('--otp-file', dest='otp_filename', help='File to write OTP to instead of stdout') return parser def validate_options(opts): if opts.precreate and not os.path.exists('/etc/ipa/default.conf'): if not opts.hostname: raise ConfigurationError('hostname is required') if not opts.domain: raise ConfigurationError('IPA domain is required') if not opts.realm: raise ConfigurationError('IPA realm is required') if not opts.server: raise ConfigurationError('IPA server is required') if opts.no_kinit: return if not opts.principal: opts.principal = user_input("IPA admin user", "admin", allow_empty=False) if opts.passwordfile: try: with open(opts.passwordfile) as f: opts.password = f.read() except IOError as e: raise ConfigurationError('Unable to read password file: %s' % e) if not opts.password: try: opts.password = getpass.getpass("Password for %s: " % opts.principal) except EOFError: opts.password = None if not opts.password: raise ConfigurationError('Password must be provided.') if not opts.precreate: try: pwd.getpwnam(opts.user) except KeyError: raise ConfigurationError('User: %s not found on the system' % opts.user)
[]
[]
[ "KRB5CCNAME" ]
[]
["KRB5CCNAME"]
python
1
0
rates/rates_test.go
package rates import ( "os" "testing" ) func TestApiClient_GetRates_Integration(t *testing.T) { // Marked as integration test because it requires network connection // to Open Exchange Rates API if testing.Short() { t.Skip("Skipping test because it requires network") } // Fail test if not set, with descriptive err apiKey := os.Getenv("GOPHER_MONEY_API_KEY") if apiKey == "" { t.Fatal("Could not get environment variable required for test: GOPHER_MONEY_API_KEY") } client := NewApiClient(apiKey) rates, err := client.GetRates() if err != nil { t.Fatal(err) } if len(rates) < 1 { t.Fatal("No rates returned from API") } }
[ "\"GOPHER_MONEY_API_KEY\"" ]
[]
[ "GOPHER_MONEY_API_KEY" ]
[]
["GOPHER_MONEY_API_KEY"]
go
1
0
resource_pool/resource_pool.go
package resource_pool import ( "encoding/json" "errors" "fmt" "io" "io/ioutil" "net" "net/url" "os" "os/exec" "path" "path/filepath" "strconv" "strings" "time" "github.com/blang/semver" "code.cloudfoundry.org/garden" "github.com/cloudfoundry/gunk/command_runner" "code.cloudfoundry.org/lager" "code.cloudfoundry.org/garden-linux/linux_backend" "code.cloudfoundry.org/garden-linux/linux_container" "code.cloudfoundry.org/garden-linux/logging" "code.cloudfoundry.org/garden-linux/network" "code.cloudfoundry.org/garden-linux/network/bridgemgr" "code.cloudfoundry.org/garden-linux/network/iptables" "code.cloudfoundry.org/garden-linux/network/subnets" "code.cloudfoundry.org/garden-linux/process" "code.cloudfoundry.org/garden-linux/sysconfig" "code.cloudfoundry.org/garden-shed/layercake" "code.cloudfoundry.org/garden-shed/rootfs_provider" ) var ( ErrUnknownRootFSProvider = errors.New("unknown rootfs provider") ) //go:generate counterfeiter -o fake_filter_provider/FakeFilterProvider.go . FilterProvider type FilterProvider interface { ProvideFilter(containerId string) network.Filter } //go:generate counterfeiter -o fake_subnet_pool/FakeSubnetPool.go . SubnetPool type SubnetPool interface { Acquire(subnet subnets.SubnetSelector, ip subnets.IPSelector, logger lager.Logger) (*linux_backend.Network, error) Release(network *linux_backend.Network, logger lager.Logger) error Remove(network *linux_backend.Network, logger lager.Logger) error Capacity() int } //go:generate counterfeiter -o fake_rootfs_provider/FakeRootFSProvider.go . RootFSProvider type RootFSProvider interface { Create(log lager.Logger, id string, spec rootfs_provider.Spec) (mountpoint string, envvar []string, err error) Destroy(log lager.Logger, id string) error GC(log lager.Logger) error } //go:generate counterfeiter -o fake_rootfs_cleaner/FakeRootFSCleaner.go . RootFSCleaner type RootFSCleaner interface { Clean(log lager.Logger, path string) error } type Remover interface { Remove(id layercake.ID) error } //go:generate counterfeiter -o fake_mkdir_chowner/FakeMkdirChowner.go . MkdirChowner type MkdirChowner interface { MkdirChown(path string, uid, gid uint32, mode os.FileMode) error } type LinuxResourcePool struct { logger lager.Logger binPath string depotPath string sysconfig sysconfig.Config denyNetworks []string allowNetworks []string rootFSProvider RootFSProvider rootFSCleaner RootFSCleaner mappingList rootfs_provider.MappingList subnetPool SubnetPool externalIP net.IP mtu int portPool linux_container.PortPool bridges bridgemgr.BridgeManager iptablesMgr linux_container.IPTablesManager filterProvider FilterProvider defaultChain iptables.Chain runner command_runner.CommandRunner quotaManager linux_container.QuotaManager containerIDs chan string currentContainerVersion semver.Version mkdirChowner MkdirChowner } func New( logger lager.Logger, binPath, depotPath string, sysconfig sysconfig.Config, rootFSProvider RootFSProvider, rootFSCleaner RootFSCleaner, mappingList rootfs_provider.MappingList, externalIP net.IP, mtu int, subnetPool SubnetPool, bridges bridgemgr.BridgeManager, iptablesMgr linux_container.IPTablesManager, filterProvider FilterProvider, defaultChain iptables.Chain, portPool linux_container.PortPool, denyNetworks, allowNetworks []string, runner command_runner.CommandRunner, quotaManager linux_container.QuotaManager, currentContainerVersion semver.Version, mkdirChowner MkdirChowner, ) *LinuxResourcePool { pool := &LinuxResourcePool{ logger: logger.Session("pool"), binPath: binPath, depotPath: depotPath, sysconfig: sysconfig, rootFSProvider: rootFSProvider, rootFSCleaner: rootFSCleaner, mappingList: mappingList, allowNetworks: allowNetworks, denyNetworks: denyNetworks, externalIP: externalIP, mtu: mtu, subnetPool: subnetPool, bridges: bridges, iptablesMgr: iptablesMgr, filterProvider: filterProvider, defaultChain: defaultChain, portPool: portPool, runner: runner, quotaManager: quotaManager, containerIDs: make(chan string), currentContainerVersion: currentContainerVersion, mkdirChowner: mkdirChowner, } go pool.generateContainerIDs() return pool } func (p *LinuxResourcePool) MaxContainers() int { return p.subnetPool.Capacity() } func (p *LinuxResourcePool) Setup() error { setup := exec.Command(path.Join(p.binPath, "setup.sh")) setup.Env = []string{ "CONTAINER_DEPOT_PATH=" + p.depotPath, "PATH=" + os.Getenv("PATH"), } err := p.runner.Run(setup) if err != nil { return err } if err := p.quotaManager.Setup(); err != nil { return fmt.Errorf("resource_pool: enable disk quotas: %s", err) } return p.setupIPTables() } func (p *LinuxResourcePool) setupIPTables() error { for _, n := range p.allowNetworks { if n == "" { continue } if err := p.defaultChain.AppendRule("", n, iptables.Return); err != nil { return fmt.Errorf("resource_pool: setting up allow rules in iptables: %v", err) } } for _, n := range p.denyNetworks { if n == "" { continue } if err := p.defaultChain.AppendRule("", n, iptables.Reject); err != nil { return fmt.Errorf("resource_pool: setting up deny rules in iptables: %v", err) } } return nil } func (p *LinuxResourcePool) Prune(keep map[string]bool) error { entries, err := ioutil.ReadDir(p.depotPath) if err != nil { p.logger.Error("prune-container-pool-path-error", err, lager.Data{"depotPath": p.depotPath}) return fmt.Errorf("Cannot read path %q: %s", p.depotPath, err) } for _, entry := range entries { id := entry.Name() if id == "tmp" { // ignore temporary directory in depotPath continue } _, found := keep[id] if found { continue } p.pruneEntry(id) } if err := p.bridges.Prune(); err != nil { p.logger.Error("prune-bridges", err) } return nil } // pruneEntry does not report errors, only log them func (p *LinuxResourcePool) pruneEntry(id string) { pLog := p.logger.Session("prune", lager.Data{"id": id}) pLog.Info("prune") err := p.releaseSystemResources(pLog, id) if err != nil { pLog.Error("release-system-resources-error", err) } pLog.Info("end of prune") } func (p *LinuxResourcePool) Acquire(spec garden.ContainerSpec) (linux_backend.LinuxContainerSpec, error) { id := <-p.containerIDs containerPath := path.Join(p.depotPath, id) handle := getHandle(spec.Handle, id) pLog := p.logger.Session("acquire", lager.Data{"handle": handle, "id": id}) iptablesCh := make(chan error, 1) go func(iptablesCh chan error) { pLog.Debug("setup-iptables-starting") if err := p.filterProvider.ProvideFilter(id).Setup(handle); err != nil { pLog.Error("setup-iptables-failed", err) iptablesCh <- fmt.Errorf("resource_pool: set up filter: %v", err) } else { pLog.Debug("setup-iptables-ended") iptablesCh <- nil } }(iptablesCh) pLog.Info("creating") resources, err := p.acquirePoolResources(spec, id, pLog) if err != nil { return linux_backend.LinuxContainerSpec{}, err } defer cleanup(&err, func() { p.releasePoolResources(resources, pLog) }) pLog.Info("acquired-pool-resources") pLog.Info("running-graph-cleanup") if err := p.rootFSProvider.GC(pLog); err != nil { pLog.Error("graph-cleanup-failed", err) } containerRootFSPath, rootFSEnv, err := p.acquireSystemResources( spec, id, resources, pLog, ) if err != nil { return linux_backend.LinuxContainerSpec{}, err } err = <-iptablesCh if err != nil { p.tryReleaseSystemResources(p.logger, id) return linux_backend.LinuxContainerSpec{}, err } pLog.Info("created") specEnv, err := process.NewEnv(spec.Env) if err != nil { p.tryReleaseSystemResources(p.logger, id) return linux_backend.LinuxContainerSpec{}, err } spec.Env = rootFSEnv.Merge(specEnv).Array() spec.Handle = handle return linux_backend.LinuxContainerSpec{ ID: id, ContainerPath: containerPath, ContainerRootFSPath: containerRootFSPath, Resources: resources, Events: []string{}, Version: p.currentContainerVersion, State: linux_backend.StateBorn, ContainerSpec: spec, }, nil } func (p *LinuxResourcePool) Restore(snapshot io.Reader) (linux_backend.LinuxContainerSpec, error) { var containerSnapshot linux_container.ContainerSnapshot err := json.NewDecoder(snapshot).Decode(&containerSnapshot) if err != nil { return linux_backend.LinuxContainerSpec{}, err } id := containerSnapshot.ID rLog := p.logger.Session("restore", lager.Data{ "handle": containerSnapshot.Handle, "id": id, }) rLog.Debug("restoring") resources := containerSnapshot.Resources subnetLogger := rLog.Session("subnet-pool") if err = p.subnetPool.Remove(resources.Network, subnetLogger); err != nil { return linux_backend.LinuxContainerSpec{}, err } if err = p.bridges.Rereserve(resources.Bridge, resources.Network.Subnet, id); err != nil { p.subnetPool.Release(resources.Network, subnetLogger) return linux_backend.LinuxContainerSpec{}, err } for _, port := range resources.Ports { err = p.portPool.Remove(port) if err != nil { p.subnetPool.Release(resources.Network, subnetLogger) for _, port := range resources.Ports { p.portPool.Release(port) } return linux_backend.LinuxContainerSpec{}, err } } version, err := p.restoreContainerVersion(id) if err != nil { return linux_backend.LinuxContainerSpec{}, err } spec := linux_backend.LinuxContainerSpec{ ID: id, ContainerPath: path.Join(p.depotPath, id), ContainerRootFSPath: containerSnapshot.RootFSPath, State: linux_backend.State(containerSnapshot.State), Events: containerSnapshot.Events, ContainerSpec: garden.ContainerSpec{ Handle: containerSnapshot.Handle, GraceTime: containerSnapshot.GraceTime, Properties: containerSnapshot.Properties, }, Resources: linux_backend.NewResources( resources.RootUID, resources.Network, resources.Bridge, resources.Ports, p.externalIP, ), Limits: containerSnapshot.Limits, NetIns: containerSnapshot.NetIns, NetOuts: containerSnapshot.NetOuts, Processes: containerSnapshot.Processes, Version: version, } return spec, nil } func (p *LinuxResourcePool) Release(container linux_backend.LinuxContainerSpec) error { pLog := p.logger.Session("release", lager.Data{ "handle": container.Handle, "id": container.ID, }) pLog.Info("releasing") err := p.releaseSystemResources(pLog, container.ID) if err != nil { pLog.Error("release-system-resources", err) return err } p.releasePoolResources(container.Resources, pLog) pLog.Info("released") return nil } func (p *LinuxResourcePool) generateContainerIDs() { for containerNum := time.Now().UnixNano(); ; containerNum++ { containerID := []byte{} var i uint for i = 0; i < 11; i++ { containerID = strconv.AppendInt( containerID, (containerNum>>(55-(i+1)*5))&31, 32, ) } p.containerIDs <- string(containerID) } } func (p *LinuxResourcePool) writeBindMounts(containerPath string, rootFSPath string, bindMounts []garden.BindMount, mkdirUID int) error { hook := path.Join(containerPath, "lib", "hook-parent-before-clone.sh") for _, bm := range bindMounts { dstMount := path.Join(rootFSPath, bm.DstPath) srcPath := bm.SrcPath if bm.Origin == garden.BindMountOriginContainer { srcPath = path.Join(rootFSPath, srcPath) } mode := "ro" if bm.Mode == garden.BindMountModeRW { mode = "rw" } linebreak := exec.Command("bash", "-c", "echo >> "+hook) if err := p.runner.Run(linebreak); err != nil { return err } if err := p.mkdirChowner.MkdirChown(dstMount, uint32(mkdirUID), uint32(mkdirUID), 0755); err != nil { return err } mount := exec.Command("bash", "-c", "echo mount -n --bind "+srcPath+" "+dstMount+" >> "+hook) if err := p.runner.Run(mount); err != nil { return err } remount := exec.Command("bash", "-c", "echo mount -n --bind -o remount,"+mode+" "+srcPath+" "+dstMount+" >> "+hook) if err := p.runner.Run(remount); err != nil { return err } } return nil } func (p *LinuxResourcePool) saveBridgeName(id string, bridgeName string) error { bridgeNameFile := path.Join(p.depotPath, id, "bridge-name") return ioutil.WriteFile(bridgeNameFile, []byte(bridgeName), 0644) } func (p *LinuxResourcePool) saveRootFSProvider(id string, provider string) error { providerFile := path.Join(p.depotPath, id, "rootfs-provider") return ioutil.WriteFile(providerFile, []byte(provider), 0644) } func (p *LinuxResourcePool) saveContainerVersion(id string) error { versionFile := path.Join(p.depotPath, id, "version") return ioutil.WriteFile(versionFile, []byte(p.currentContainerVersion.String()), 0644) } func (p *LinuxResourcePool) restoreContainerVersion(id string) (semver.Version, error) { content, err := ioutil.ReadFile(filepath.Join(p.depotPath, id, "version")) if err != nil { if os.IsNotExist(err) { return linux_container.MissingVersion, nil } return semver.Version{}, err } return semver.Make(string(content)) } func (p *LinuxResourcePool) acquirePoolResources(spec garden.ContainerSpec, id string, logger lager.Logger) (*linux_backend.Resources, error) { resources := linux_backend.NewResources(0, nil, "", nil, p.externalIP) subnet, ip, err := parseNetworkSpec(spec.Network) if err != nil { return nil, fmt.Errorf("create container: invalid network spec: %v", err) } if err := p.acquireUID(resources, spec.Privileged); err != nil { return nil, err } if resources.Network, err = p.subnetPool.Acquire(subnet, ip, logger.Session("subnet-pool")); err != nil { p.releasePoolResources(resources, logger) return nil, err } return resources, nil } func (p *LinuxResourcePool) acquireUID(resources *linux_backend.Resources, privileged bool) error { if !privileged { resources.RootUID = p.mappingList.Map(0) return nil } resources.RootUID = 0 return nil } func (p *LinuxResourcePool) releasePoolResources(resources *linux_backend.Resources, logger lager.Logger) { for _, port := range resources.Ports { p.portPool.Release(port) } if resources.Network != nil { p.subnetPool.Release(resources.Network, logger.Session("subnet-pool")) } } func (p *LinuxResourcePool) acquireSystemResources(spec garden.ContainerSpec, id string, resources *linux_backend.Resources, pLog lager.Logger) (string, process.Env, error) { containerPath := path.Join(p.depotPath, id) if err := os.MkdirAll(containerPath, 0755); err != nil { return "", nil, fmt.Errorf("resource_pool: creating container directory: %v", err) } rootFSPath, rootFSEnvVars, err := p.setupContainerDirectories(spec, id, resources, pLog) if err != nil { os.RemoveAll(containerPath) return "", nil, err } createCmd := path.Join(p.binPath, "create.sh") create := exec.Command(createCmd, containerPath) suff, _ := resources.Network.Subnet.Mask.Size() env := process.Env{ "id": id, "rootfs_path": rootFSPath, "network_host_ip": subnets.GatewayIP(resources.Network.Subnet).String(), "network_container_ip": resources.Network.IP.String(), "network_cidr_suffix": strconv.Itoa(suff), "network_cidr": resources.Network.Subnet.String(), "external_ip": p.externalIP.String(), "container_iface_mtu": fmt.Sprintf("%d", p.mtu), "bridge_iface": resources.Bridge, "root_uid": strconv.FormatUint(uint64(resources.RootUID), 10), "PATH": os.Getenv("PATH"), } create.Env = env.Array() pRunner := logging.Runner{ CommandRunner: p.runner, Logger: pLog.Session("create-script"), } err = pRunner.Run(create) defer cleanup(&err, func() { p.tryReleaseSystemResources(pLog, id) }) if err != nil { pLog.Error("create-command-failed", err, lager.Data{ "CreateCmd": createCmd, "Env": create.Env, }) return "", nil, err } err = p.saveRootFSProvider(id, "docker-composite") if err != nil { pLog.Error("save-rootfs-provider-failed", err, lager.Data{ "Id": id, "rootfs": spec.RootFSPath, }) return "", nil, err } err = p.saveContainerVersion(id) if err != nil { pLog.Error("save-container-version-failed", err, lager.Data{ "Id": id, "ContainerPath": containerPath, }) return "", nil, err } err = p.writeBindMounts(containerPath, rootFSPath, spec.BindMounts, resources.RootUID) if err != nil { pLog.Error("bind-mounts-failed", err) return "", nil, err } return rootFSPath, rootFSEnvVars, nil } func (p *LinuxResourcePool) setupRootfs(spec garden.ContainerSpec, id string, resources *linux_backend.Resources, pLog lager.Logger) (string, process.Env, error) { rootFSURL, err := url.Parse(spec.RootFSPath) if err != nil { pLog.Error("parse-rootfs-path-failed", err, lager.Data{ "RootFSPath": spec.RootFSPath, }) return "", nil, err } rootFSSpec := rootfs_provider.Spec{ RootFS: rootFSURL, Namespaced: resources.RootUID != 0, QuotaSize: int64(spec.Limits.Disk.ByteHard), QuotaScope: spec.Limits.Disk.Scope, } pLog.Debug("provide-rootfs-starting") rootFSPath, rootFSEnvVars, err := p.rootFSProvider.Create(pLog, id, rootFSSpec) if err != nil { pLog.Error("provide-rootfs-failed", err) return "", nil, err } pLog.Debug("provide-rootfs-ended") pLog.Debug("clean-rootfs-starting") if err := p.rootFSCleaner.Clean(pLog, rootFSPath); err != nil { return "", nil, err } pLog.Debug("clean-rootfs-ended") rootFSProcessEnv, err := process.NewEnv(rootFSEnvVars) if err != nil { pLog.Error("rootfs-env-malformed", err) return "", nil, err } return rootFSPath, rootFSProcessEnv, nil } func (p *LinuxResourcePool) setupContainerDirectories(spec garden.ContainerSpec, id string, resources *linux_backend.Resources, pLog lager.Logger) (string, process.Env, error) { rootFSPath, rootFSEnvVars, err := p.setupRootfs(spec, id, resources, pLog) if err != nil { return "", nil, err } pLog.Debug("setup-bridge-starting") if err := p.setupBridge(pLog, id, resources); err != nil { p.rootFSProvider.Destroy(pLog, id) return "", nil, err } pLog.Debug("setup-bridge-ended") return rootFSPath, rootFSEnvVars, nil } func (p *LinuxResourcePool) setupBridge(pLog lager.Logger, id string, resources *linux_backend.Resources) error { var err error if resources.Bridge, err = p.bridges.Reserve(resources.Network.Subnet, id); err != nil { pLog.Error("reserve-bridge-failed", err, lager.Data{ "Id": id, "Subnet": resources.Network.Subnet, "Bridge": resources.Bridge, }) return err } if err = p.saveBridgeName(id, resources.Bridge); err != nil { pLog.Error("save-bridge-name-failed", err, lager.Data{ "Id": id, "Bridge": resources.Bridge, }) return err } return nil } func (p *LinuxResourcePool) tryReleaseSystemResources(logger lager.Logger, id string) { err := p.releaseSystemResources(logger, id) if err != nil { logger.Error("failed-to-undo-failed-create", err) } } func (p *LinuxResourcePool) releaseSystemResources(logger lager.Logger, id string) error { pRunner := logging.Runner{ CommandRunner: p.runner, Logger: logger, } bridgeName, err := ioutil.ReadFile(path.Join(p.depotPath, id, "bridge-name")) if err == nil { if err := p.bridges.Release(string(bridgeName), id); err != nil { return fmt.Errorf("containerpool: release bridge %s: %v", bridgeName, err) } } rootFSProvider, err := ioutil.ReadFile(path.Join(p.depotPath, id, "rootfs-provider")) if err != nil { rootFSProvider = []byte("invalid-rootfs-provider") } if err = p.iptablesMgr.ContainerTeardown(id); err != nil { return err } destroy := exec.Command(path.Join(p.binPath, "destroy.sh"), path.Join(p.depotPath, id)) err = pRunner.Run(destroy) if err != nil { return err } if shouldCleanRootfs(string(rootFSProvider)) { if err = p.rootFSProvider.Destroy(logger, id); err != nil { return err } } p.filterProvider.ProvideFilter(id).TearDown() return nil } func shouldCleanRootfs(rootFSProvider string) bool { // invalid-rootfs-provider indicates that this is probably a recent container that failed on create. // we should try to clean it up providers := []string{ "docker-local-aufs", "docker-local-vfs", "docker-remote-aufs", "docker-remote-vfs", "docker-composite", "invalid-rootfs-provider", } for _, provider := range providers { if provider == rootFSProvider { return true } } return false } func getHandle(handle, id string) string { if handle != "" { return handle } return id } func cleanup(err *error, undo func()) { if *err != nil { undo() } } func parseNetworkSpec(spec string) (subnets.SubnetSelector, subnets.IPSelector, error) { var ipSelector subnets.IPSelector = subnets.DynamicIPSelector var subnetSelector subnets.SubnetSelector = subnets.DynamicSubnetSelector if spec != "" { specifiedIP, ipn, err := net.ParseCIDR(suffixIfNeeded(spec)) if err != nil { return nil, nil, err } subnetSelector = subnets.StaticSubnetSelector{ipn} if !specifiedIP.Equal(subnets.NetworkIP(ipn)) { ipSelector = subnets.StaticIPSelector{specifiedIP} } } return subnetSelector, ipSelector, nil } func suffixIfNeeded(spec string) string { if !strings.Contains(spec, "/") { spec = spec + "/30" } return spec }
[ "\"PATH\"", "\"PATH\"" ]
[]
[ "PATH" ]
[]
["PATH"]
go
1
0
docs/source/conf.py
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import sphinx_rtd_theme import os import sys # -- Project information ----------------------------------------------------- project = 'GDScript to reStructured' copyright = '2021, GDScript' author = 'Nathan Lavato' # The full version, including alpha/beta/rc tags version = '0.1.0' release = version pygments_style = 'sphinx' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. needs_sphinx = '3.0' sys.path.append(os.path.abspath("_extensions")) extensions = [ 'sphinx_tabs.tabs', "notfound.extension", "sphinx.ext.extlinks" ] # Warning when the Sphinx Tabs extension is used with unknown # builders (like the dummy builder) - as it doesn't cause errors, # we can ignore this so we still can treat other warnings as errors. sphinx_tabs_nowarn = True extlinks = { 'godot_class' : ('https://docs.godotengine.org/en/stable/classes/class_%s.html', '') } # Custom 4O4 page HTML template. # https://github.com/readthedocs/sphinx-notfound-page notfound_context = { "title": "Page not found", "body": """ <h1>Page not found</h1> <p> Sorry, we couldn't find that page. It may have been renamed or removed in the version of the documentation you're currently browsing. </p> <p> If you're currently browsing the <em>latest</em> version of the documentation, try browsing the <a href="/en/stable/"><em>stable</em> version of the documentation</a>. </p> <p> Alternatively, use the <a href="#" onclick="$('#rtd-search-form [name=\\'q\\']').focus()">Search docs</a> box on the left or <a href="/">go to the homepage</a>. </p> """, } # on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org on_rtd = os.environ.get("READTHEDOCS", None) == "True" if not on_rtd: notfound_urls_prefix = '' if not os.getenv("SPHINX_NO_GDSCRIPT"): extensions.append("gdscript") # if not os.getenv("SPHINX_NO_SEARCH"): # extensions.append("sphinx_search.extension") if not os.getenv("SPHINX_NO_DESCRIPTIONS"): extensions.append("godot_descriptions") # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # You can specify multiple suffix as a list of string: ['.rst', '.md'] # source_suffix = ['.rst', '.md'] source_suffix = '.rst' source_encoding = 'utf-8-sig' # The master toctree document master_doc = 'index' # Parse Sphinx tags passed from RTD via environment env_tags = os.getenv("SPHINX_TAGS") if env_tags is not None: for tag in env_tags.split(","): print("Adding Sphinx tag: %s" % tag.strip()) tags.add(tag.strip()) # noqa: F82 supported_languages = { "en": "Godot Engine (%s) documentation in English", } language = os.getenv("READTHEDOCS_LANGUAGE", "en") if not language in supported_languages.keys(): print("Unknown language: " + language) print("Supported languages: " + ", ".join(supported_languages.keys())) print( "The configured language is either wrong, or it should be added to supported_languages in conf.py. Falling back to 'en'." ) language = "en" is_i18n = tags.has("i18n") # noqa: F821 exclude_patterns = ["_build"] # fmt: off # These imports should *not* be moved to the start of the file, # they depend on the sys.path.append call registering "_extensions". # GDScript syntax highlighting from gdscript import GDScriptLexer from sphinx.highlighting import lexers lexers["gdscript"] = GDScriptLexer() # fmt: on smartquotes = False # Pygments (syntax highlighting) style to use pygments_style = "sphinx" highlight_language = "gdscript" # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # These paths are either relative to html_static_path # or fully qualified paths (eg. https://...) html_css_files = [ "css/custom.css", "css/my.css" ] html_js_files = [ "js/custom.js", ] html_theme_options = { 'logo_only': True, 'collapse_navigation': False } html_logo = "docs_logo.png" latex_elements = { 'extraclassoptions': 'openany', 'preamble': r''' \usepackage{subfig} \usepackage{graphicx} ''', 'papersize': 'a4paper' }
[]
[]
[ "READTHEDOCS_LANGUAGE", "SPHINX_NO_SEARCH", "SPHINX_NO_GDSCRIPT", "SPHINX_NO_DESCRIPTIONS", "SPHINX_TAGS", "READTHEDOCS" ]
[]
["READTHEDOCS_LANGUAGE", "SPHINX_NO_SEARCH", "SPHINX_NO_GDSCRIPT", "SPHINX_NO_DESCRIPTIONS", "SPHINX_TAGS", "READTHEDOCS"]
python
6
0
openmcp-dns-controller/pkg/mypdns/pdns.go
package mypdns import ( "context" "github.com/mittwald/go-powerdns" "github.com/mittwald/go-powerdns/apis/zones" dnsv1alpha1 "openmcp/openmcp/apis/dns/v1alpha1" "openmcp/openmcp/omcplog" "os" "sigs.k8s.io/controller-runtime/pkg/client" ) var ( PDNS_IP = os.Getenv("PDNS_IP") // "10.0.3.12" PDNS_PORT = os.Getenv("PDNS_PORT") // "8081" PDNS_API_KEY = os.Getenv("PDNS_API_KEY") // "1234" ) func PdnsNewClient() (pdns.Client, error) { pdnsClient, err := pdns.New( pdns.WithBaseURL("http://"+PDNS_IP+":"+PDNS_PORT), pdns.WithAPIKeyAuthentication(PDNS_API_KEY), ) return pdnsClient, err } func GetZone(pdnsClient pdns.Client, domain string) (*zones.Zone, error) { zone, err := pdnsClient.Zones().GetZone(context.TODO(), "localhost", domain+".") return zone, err } func GetZoneList(pdnsClient pdns.Client) ([]zones.Zone, error) { zoneList, err := pdnsClient.Zones().ListZones(context.TODO(), "localhost") return zoneList, err } func DeleteZone(pdnsClient pdns.Client, liveClient client.Client) error { instanceDNSEndpointList := &dnsv1alpha1.OpenMCPDNSEndpointList{} err := liveClient.List(context.TODO(), instanceDNSEndpointList, &client.ListOptions{}) if err != nil { return err } zoneList, err := GetZoneList(pdnsClient) if err != nil { return err } var deleteZone zones.Zone for _, zone := range zoneList { find := false for _, instanceDNSEndpoint := range instanceDNSEndpointList.Items { for _, domain := range instanceDNSEndpoint.Spec.Domains { if zone.Name == domain+"." { find = true break } } if find { break } } if !find { deleteZone = zone err := pdnsClient.Zones().DeleteZone(context.TODO(), "localhost", zone.Name) if err != nil { for { omcplog.V(0).Info("[ERROR Retry Delete] ", err) err = pdnsClient.Zones().DeleteZone(context.TODO(), "localhost", zone.Name) if err == nil { break } } } } } omcplog.V(2).Info("[Deleted Pdns Zone] ", deleteZone.Name) return nil } func GetResourceRecordSets(domainName string, Endpoints []*dnsv1alpha1.Endpoint) []zones.ResourceRecordSet { ResourceRecordSets := []zones.ResourceRecordSet{} for _, endpoint := range Endpoints { startIndex := len(endpoint.DNSName) - len(domainName) if startIndex < 0 { continue } if domainName != endpoint.DNSName[startIndex:] { continue } records := []zones.Record{} for _, target := range endpoint.Targets { record := zones.Record{ Content: target, Disabled: false, SetPTR: false, } records = append(records, record) } if len(records) == 0 { continue } existDNS := false for i, ResourceRecordSet := range ResourceRecordSets { if endpoint.DNSName + "." == ResourceRecordSet.Name { for _, record := range records { ResourceRecordSets[i].Records = append(ResourceRecordSets[i].Records, record) } existDNS = true break } } if !existDNS { ResourceRecordSet := zones.ResourceRecordSet{ Name: endpoint.DNSName + ".", Type: endpoint.RecordType, TTL: int(endpoint.RecordTTL), ChangeType: zones.ChangeTypeReplace, Records: records, Comments: nil, } ResourceRecordSets = append(ResourceRecordSets, ResourceRecordSet) } } omcplog.V(3).Info("[Get RecordSets] ", ResourceRecordSets) return ResourceRecordSets } func UpdateZoneWithRecords(client pdns.Client, domainName string, resourceRecordSets []zones.ResourceRecordSet) error { for _, resourceRecordSet := range resourceRecordSets { err := client.Zones().AddRecordSetToZone(context.TODO(), "localhost", domainName+".", resourceRecordSet) if err != nil { return err } } return nil } func CreateZoneWithRecords(client pdns.Client, domainName string, resourceRecordSets []zones.ResourceRecordSet) error { _, err := client.Zones().CreateZone(context.Background(), "localhost", zones.Zone{ Name: domainName + ".", Type: zones.ZoneTypeZone, Kind: zones.ZoneKindNative, Nameservers: []string{ "ns1.example.com.", "ns2.example.com.", }, ResourceRecordSets: resourceRecordSets, }) if err != nil { return err } return nil } func SyncZone(pdnsClient pdns.Client, domainName string, Endpoints []*dnsv1alpha1.Endpoint) error { _, err := GetZone(pdnsClient, domainName) resourceRecordSets := GetResourceRecordSets(domainName, Endpoints) if err == nil { // Already Exist err = UpdateZoneWithRecords(pdnsClient, domainName, resourceRecordSets) if err != nil { omcplog.V(0).Info("[OpenMCP External DNS Controller] : UpdateZone? ", err) } omcplog.V(2).Info("Update Zone ", domainName) } else { err = CreateZoneWithRecords(pdnsClient, domainName, resourceRecordSets) if err != nil { omcplog.V(0).Info("[OpenMCP External DNS Controller] : CreateZone? ", err) } omcplog.V(2).Info("Create Zone ", domainName) } return err }
[ "\"PDNS_IP\"", "\"PDNS_PORT\"", "\"PDNS_API_KEY\"" ]
[]
[ "PDNS_API_KEY", "PDNS_PORT", "PDNS_IP" ]
[]
["PDNS_API_KEY", "PDNS_PORT", "PDNS_IP"]
go
3
0
sdk/resourcemanager/operationsmanagement/armoperationsmanagement/zz_generated_solutions_client.go
//go:build go1.16 // +build go1.16 // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. package armoperationsmanagement import ( "context" "errors" "fmt" "net/http" "net/url" "strings" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" ) // SolutionsClient contains the methods for the Solutions group. // Don't use this type directly, use NewSolutionsClient() instead. type SolutionsClient struct { ep string pl runtime.Pipeline subscriptionID string } // NewSolutionsClient creates a new instance of SolutionsClient with the specified values. func NewSolutionsClient(con *arm.Connection, subscriptionID string) *SolutionsClient { return &SolutionsClient{ep: con.Endpoint(), pl: con.NewPipeline(module, version), subscriptionID: subscriptionID} } // BeginCreateOrUpdate - Creates or updates the Solution. // If the operation fails it returns the *CodeMessageError error type. func (client *SolutionsClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, solutionName string, parameters Solution, options *SolutionsBeginCreateOrUpdateOptions) (SolutionsCreateOrUpdatePollerResponse, error) { resp, err := client.createOrUpdate(ctx, resourceGroupName, solutionName, parameters, options) if err != nil { return SolutionsCreateOrUpdatePollerResponse{}, err } result := SolutionsCreateOrUpdatePollerResponse{ RawResponse: resp, } pt, err := armruntime.NewPoller("SolutionsClient.CreateOrUpdate", "", resp, client.pl, client.createOrUpdateHandleError) if err != nil { return SolutionsCreateOrUpdatePollerResponse{}, err } result.Poller = &SolutionsCreateOrUpdatePoller{ pt: pt, } return result, nil } // CreateOrUpdate - Creates or updates the Solution. // If the operation fails it returns the *CodeMessageError error type. func (client *SolutionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, solutionName string, parameters Solution, options *SolutionsBeginCreateOrUpdateOptions) (*http.Response, error) { req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, solutionName, parameters, options) if err != nil { return nil, err } resp, err := client.pl.Do(req) if err != nil { return nil, err } if !runtime.HasStatusCode(resp, http.StatusCreated) { return nil, client.createOrUpdateHandleError(resp) } return resp, nil } // createOrUpdateCreateRequest creates the CreateOrUpdate request. func (client *SolutionsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, solutionName string, parameters Solution, options *SolutionsBeginCreateOrUpdateOptions) (*policy.Request, error) { urlPath := "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationsManagement/solutions/{solutionName}" if client.subscriptionID == "" { return nil, errors.New("parameter client.subscriptionID cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) if resourceGroupName == "" { return nil, errors.New("parameter resourceGroupName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) if solutionName == "" { return nil, errors.New("parameter solutionName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{solutionName}", url.PathEscape(solutionName)) req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath)) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2015-11-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header.Set("Accept", "application/json") return req, runtime.MarshalAsJSON(req, parameters) } // createOrUpdateHandleError handles the CreateOrUpdate error response. func (client *SolutionsClient) createOrUpdateHandleError(resp *http.Response) error { body, err := runtime.Payload(resp) if err != nil { return runtime.NewResponseError(err, resp) } errType := CodeMessageError{raw: string(body)} if err := runtime.UnmarshalAsJSON(resp, &errType); err != nil { return runtime.NewResponseError(fmt.Errorf("%s\n%s", string(body), err), resp) } return runtime.NewResponseError(&errType, resp) } // BeginDelete - Deletes the solution in the subscription. // If the operation fails it returns the *CodeMessageError error type. func (client *SolutionsClient) BeginDelete(ctx context.Context, resourceGroupName string, solutionName string, options *SolutionsBeginDeleteOptions) (SolutionsDeletePollerResponse, error) { resp, err := client.deleteOperation(ctx, resourceGroupName, solutionName, options) if err != nil { return SolutionsDeletePollerResponse{}, err } result := SolutionsDeletePollerResponse{ RawResponse: resp, } pt, err := armruntime.NewPoller("SolutionsClient.Delete", "", resp, client.pl, client.deleteHandleError) if err != nil { return SolutionsDeletePollerResponse{}, err } result.Poller = &SolutionsDeletePoller{ pt: pt, } return result, nil } // Delete - Deletes the solution in the subscription. // If the operation fails it returns the *CodeMessageError error type. func (client *SolutionsClient) deleteOperation(ctx context.Context, resourceGroupName string, solutionName string, options *SolutionsBeginDeleteOptions) (*http.Response, error) { req, err := client.deleteCreateRequest(ctx, resourceGroupName, solutionName, options) if err != nil { return nil, err } resp, err := client.pl.Do(req) if err != nil { return nil, err } if !runtime.HasStatusCode(resp, http.StatusOK) { return nil, client.deleteHandleError(resp) } return resp, nil } // deleteCreateRequest creates the Delete request. func (client *SolutionsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, solutionName string, options *SolutionsBeginDeleteOptions) (*policy.Request, error) { urlPath := "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationsManagement/solutions/{solutionName}" if client.subscriptionID == "" { return nil, errors.New("parameter client.subscriptionID cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) if resourceGroupName == "" { return nil, errors.New("parameter resourceGroupName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) if solutionName == "" { return nil, errors.New("parameter solutionName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{solutionName}", url.PathEscape(solutionName)) req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath)) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2015-11-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header.Set("Accept", "application/json") return req, nil } // deleteHandleError handles the Delete error response. func (client *SolutionsClient) deleteHandleError(resp *http.Response) error { body, err := runtime.Payload(resp) if err != nil { return runtime.NewResponseError(err, resp) } errType := CodeMessageError{raw: string(body)} if err := runtime.UnmarshalAsJSON(resp, &errType); err != nil { return runtime.NewResponseError(fmt.Errorf("%s\n%s", string(body), err), resp) } return runtime.NewResponseError(&errType, resp) } // Get - Retrieves the user solution. // If the operation fails it returns the *CodeMessageError error type. func (client *SolutionsClient) Get(ctx context.Context, resourceGroupName string, solutionName string, options *SolutionsGetOptions) (SolutionsGetResponse, error) { req, err := client.getCreateRequest(ctx, resourceGroupName, solutionName, options) if err != nil { return SolutionsGetResponse{}, err } resp, err := client.pl.Do(req) if err != nil { return SolutionsGetResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { return SolutionsGetResponse{}, client.getHandleError(resp) } return client.getHandleResponse(resp) } // getCreateRequest creates the Get request. func (client *SolutionsClient) getCreateRequest(ctx context.Context, resourceGroupName string, solutionName string, options *SolutionsGetOptions) (*policy.Request, error) { urlPath := "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationsManagement/solutions/{solutionName}" if client.subscriptionID == "" { return nil, errors.New("parameter client.subscriptionID cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) if resourceGroupName == "" { return nil, errors.New("parameter resourceGroupName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) if solutionName == "" { return nil, errors.New("parameter solutionName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{solutionName}", url.PathEscape(solutionName)) req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath)) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2015-11-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header.Set("Accept", "application/json") return req, nil } // getHandleResponse handles the Get response. func (client *SolutionsClient) getHandleResponse(resp *http.Response) (SolutionsGetResponse, error) { result := SolutionsGetResponse{RawResponse: resp} if err := runtime.UnmarshalAsJSON(resp, &result.Solution); err != nil { return SolutionsGetResponse{}, err } return result, nil } // getHandleError handles the Get error response. func (client *SolutionsClient) getHandleError(resp *http.Response) error { body, err := runtime.Payload(resp) if err != nil { return runtime.NewResponseError(err, resp) } errType := CodeMessageError{raw: string(body)} if err := runtime.UnmarshalAsJSON(resp, &errType); err != nil { return runtime.NewResponseError(fmt.Errorf("%s\n%s", string(body), err), resp) } return runtime.NewResponseError(&errType, resp) } // ListByResourceGroup - Retrieves the solution list. It will retrieve both first party and third party solutions // If the operation fails it returns the *CodeMessageError error type. func (client *SolutionsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string, options *SolutionsListByResourceGroupOptions) (SolutionsListByResourceGroupResponse, error) { req, err := client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) if err != nil { return SolutionsListByResourceGroupResponse{}, err } resp, err := client.pl.Do(req) if err != nil { return SolutionsListByResourceGroupResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { return SolutionsListByResourceGroupResponse{}, client.listByResourceGroupHandleError(resp) } return client.listByResourceGroupHandleResponse(resp) } // listByResourceGroupCreateRequest creates the ListByResourceGroup request. func (client *SolutionsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *SolutionsListByResourceGroupOptions) (*policy.Request, error) { urlPath := "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationsManagement/solutions" if client.subscriptionID == "" { return nil, errors.New("parameter client.subscriptionID cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) if resourceGroupName == "" { return nil, errors.New("parameter resourceGroupName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath)) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2015-11-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header.Set("Accept", "application/json") return req, nil } // listByResourceGroupHandleResponse handles the ListByResourceGroup response. func (client *SolutionsClient) listByResourceGroupHandleResponse(resp *http.Response) (SolutionsListByResourceGroupResponse, error) { result := SolutionsListByResourceGroupResponse{RawResponse: resp} if err := runtime.UnmarshalAsJSON(resp, &result.SolutionPropertiesList); err != nil { return SolutionsListByResourceGroupResponse{}, err } return result, nil } // listByResourceGroupHandleError handles the ListByResourceGroup error response. func (client *SolutionsClient) listByResourceGroupHandleError(resp *http.Response) error { body, err := runtime.Payload(resp) if err != nil { return runtime.NewResponseError(err, resp) } errType := CodeMessageError{raw: string(body)} if err := runtime.UnmarshalAsJSON(resp, &errType); err != nil { return runtime.NewResponseError(fmt.Errorf("%s\n%s", string(body), err), resp) } return runtime.NewResponseError(&errType, resp) } // ListBySubscription - Retrieves the solution list. It will retrieve both first party and third party solutions // If the operation fails it returns the *CodeMessageError error type. func (client *SolutionsClient) ListBySubscription(ctx context.Context, options *SolutionsListBySubscriptionOptions) (SolutionsListBySubscriptionResponse, error) { req, err := client.listBySubscriptionCreateRequest(ctx, options) if err != nil { return SolutionsListBySubscriptionResponse{}, err } resp, err := client.pl.Do(req) if err != nil { return SolutionsListBySubscriptionResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { return SolutionsListBySubscriptionResponse{}, client.listBySubscriptionHandleError(resp) } return client.listBySubscriptionHandleResponse(resp) } // listBySubscriptionCreateRequest creates the ListBySubscription request. func (client *SolutionsClient) listBySubscriptionCreateRequest(ctx context.Context, options *SolutionsListBySubscriptionOptions) (*policy.Request, error) { urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.OperationsManagement/solutions" if client.subscriptionID == "" { return nil, errors.New("parameter client.subscriptionID cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath)) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2015-11-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header.Set("Accept", "application/json") return req, nil } // listBySubscriptionHandleResponse handles the ListBySubscription response. func (client *SolutionsClient) listBySubscriptionHandleResponse(resp *http.Response) (SolutionsListBySubscriptionResponse, error) { result := SolutionsListBySubscriptionResponse{RawResponse: resp} if err := runtime.UnmarshalAsJSON(resp, &result.SolutionPropertiesList); err != nil { return SolutionsListBySubscriptionResponse{}, err } return result, nil } // listBySubscriptionHandleError handles the ListBySubscription error response. func (client *SolutionsClient) listBySubscriptionHandleError(resp *http.Response) error { body, err := runtime.Payload(resp) if err != nil { return runtime.NewResponseError(err, resp) } errType := CodeMessageError{raw: string(body)} if err := runtime.UnmarshalAsJSON(resp, &errType); err != nil { return runtime.NewResponseError(fmt.Errorf("%s\n%s", string(body), err), resp) } return runtime.NewResponseError(&errType, resp) } // BeginUpdate - Patch a Solution. Only updating tags supported. // If the operation fails it returns the *CodeMessageError error type. func (client *SolutionsClient) BeginUpdate(ctx context.Context, resourceGroupName string, solutionName string, parameters SolutionPatch, options *SolutionsBeginUpdateOptions) (SolutionsUpdatePollerResponse, error) { resp, err := client.update(ctx, resourceGroupName, solutionName, parameters, options) if err != nil { return SolutionsUpdatePollerResponse{}, err } result := SolutionsUpdatePollerResponse{ RawResponse: resp, } pt, err := armruntime.NewPoller("SolutionsClient.Update", "", resp, client.pl, client.updateHandleError) if err != nil { return SolutionsUpdatePollerResponse{}, err } result.Poller = &SolutionsUpdatePoller{ pt: pt, } return result, nil } // Update - Patch a Solution. Only updating tags supported. // If the operation fails it returns the *CodeMessageError error type. func (client *SolutionsClient) update(ctx context.Context, resourceGroupName string, solutionName string, parameters SolutionPatch, options *SolutionsBeginUpdateOptions) (*http.Response, error) { req, err := client.updateCreateRequest(ctx, resourceGroupName, solutionName, parameters, options) if err != nil { return nil, err } resp, err := client.pl.Do(req) if err != nil { return nil, err } if !runtime.HasStatusCode(resp, http.StatusOK) { return nil, client.updateHandleError(resp) } return resp, nil } // updateCreateRequest creates the Update request. func (client *SolutionsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, solutionName string, parameters SolutionPatch, options *SolutionsBeginUpdateOptions) (*policy.Request, error) { urlPath := "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationsManagement/solutions/{solutionName}" if client.subscriptionID == "" { return nil, errors.New("parameter client.subscriptionID cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) if resourceGroupName == "" { return nil, errors.New("parameter resourceGroupName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) if solutionName == "" { return nil, errors.New("parameter solutionName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{solutionName}", url.PathEscape(solutionName)) req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.ep, urlPath)) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2015-11-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header.Set("Accept", "application/json") return req, runtime.MarshalAsJSON(req, parameters) } // updateHandleError handles the Update error response. func (client *SolutionsClient) updateHandleError(resp *http.Response) error { body, err := runtime.Payload(resp) if err != nil { return runtime.NewResponseError(err, resp) } errType := CodeMessageError{raw: string(body)} if err := runtime.UnmarshalAsJSON(resp, &errType); err != nil { return runtime.NewResponseError(fmt.Errorf("%s\n%s", string(body), err), resp) } return runtime.NewResponseError(&errType, resp) }
[]
[]
[]
[]
[]
go
null
null
null
appengine-java8/datastore-schedule-export/src/main/java/com/example/datastore/DatastoreExportServlet.java
/* * Copyright 2018 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.example.datastore; import com.google.appengine.api.appidentity.AppIdentityService; import com.google.appengine.api.appidentity.AppIdentityServiceFactory; import com.google.apphosting.api.ApiProxy; import com.google.common.io.CharStreams; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStreamWriter; import java.net.HttpURLConnection; import java.net.URL; import java.nio.charset.StandardCharsets; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Date; import java.util.logging.Logger; import javax.servlet.annotation.WebServlet; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.json.JSONArray; import org.json.JSONObject; import org.json.JSONTokener; @WebServlet(name = "DatastoreExportServlet", value = "/cloud-datastore-export") public class DatastoreExportServlet extends HttpServlet { private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); private static final Logger log = Logger.getLogger(DatastoreExportServlet.class.getName()); @Override public void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException { // Validate outputURL parameter String outputUrlPrefix = request.getParameter("output_url_prefix"); if (outputUrlPrefix == null || !outputUrlPrefix.matches("^gs://.*")) { // Send error response if outputURL not set or not a Cloud Storage bucket response.setStatus(HttpServletResponse.SC_CONFLICT); response.setContentType("text/plain"); response.getWriter().println("Error: Must provide a valid output_url_prefix."); } else { // Put together export request headers URL url = new URL("https://datastore.googleapis.com/v1/projects/" + PROJECT_ID + ":export"); HttpURLConnection connection = (HttpURLConnection) url.openConnection(); connection.setDoOutput(true); connection.setRequestMethod("POST"); connection.addRequestProperty("Content-Type", "application/json"); // Get an access token to authorize export request ArrayList<String> scopes = new ArrayList<String>(); scopes.add("https://www.googleapis.com/auth/datastore"); final AppIdentityService appIdentity = AppIdentityServiceFactory.getAppIdentityService(); final AppIdentityService.GetAccessTokenResult accessToken = AppIdentityServiceFactory.getAppIdentityService().getAccessToken(scopes); connection.addRequestProperty("Authorization", "Bearer " + accessToken.getAccessToken()); // Build export request payload based on URL parameters // Required: output_url_prefix // Optional: entity filter JSONObject exportRequest = new JSONObject(); // If output prefix ends with a slash, use as-is // Otherwise, add a timestamp to form unique output url if (!outputUrlPrefix.endsWith("/")) { String timeStamp = new SimpleDateFormat("yyyyMMddHHmmss").format(new Date()); outputUrlPrefix = outputUrlPrefix + "/" + timeStamp + "/"; } // Add outputUrl to payload exportRequest.put("output_url_prefix", outputUrlPrefix); // Build optional entity filter to export subset of // kinds or namespaces JSONObject entityFilter = new JSONObject(); // Read kind parameters and add to export request if not null String[] kinds = request.getParameterValues("kind"); if (kinds != null) { JSONArray kindsJson = new JSONArray(kinds); entityFilter.put("kinds", kindsJson); } // Read namespace parameters and add to export request if not null String[] namespaces = request.getParameterValues("namespace_id"); if (namespaces != null) { JSONArray namespacesJson = new JSONArray(namespaces); entityFilter.put("namespaceIds", namespacesJson); } // Add entity filter to payload // Finish export request payload exportRequest.put("entityFilter", entityFilter); // Send export request OutputStreamWriter writer = new OutputStreamWriter(connection.getOutputStream()); exportRequest.write(writer); writer.close(); // Examine server's response if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) { // Request failed, log errors and return InputStream s = connection.getErrorStream(); InputStreamReader r = new InputStreamReader(s, StandardCharsets.UTF_8); String errorMessage = String.format( "got error (%d) response %s from %s", connection.getResponseCode(), CharStreams.toString(r), connection.toString()); log.warning(errorMessage); response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); response.setContentType("text/plain"); response.getWriter().println( "Failed to initiate export."); return; } // Success, print export operation information JSONObject exportResponse = new JSONObject(new JSONTokener(connection.getInputStream())); response.setContentType("text/plain"); response.getWriter().println( "Export started:\n" + exportResponse.toString(4)); } } }
[ "\"GOOGLE_CLOUD_PROJECT\"" ]
[]
[ "GOOGLE_CLOUD_PROJECT" ]
[]
["GOOGLE_CLOUD_PROJECT"]
java
1
0
pkg/util/cmdutil/path.go
// Licensed to Elasticsearch B.V. under one or more contributor // license agreements. See the NOTICE file distributed with // this work for additional information regarding copyright // ownership. Elasticsearch B.V. licenses this file to you under // the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package cmdutil import "os" // GetHomePath obtains a home path from a GOOS architecture. // // based on spf13/viper (userHomeDir) func GetHomePath(goos string) string { if goos != "windows" { return os.Getenv("HOME") } if home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH"); home != "" { return home } return os.Getenv("USERPROFILE") }
[ "\"HOME\"", "\"HOMEDRIVE\"", "\"HOMEPATH\"", "\"USERPROFILE\"" ]
[]
[ "USERPROFILE", "HOME", "HOMEPATH", "HOMEDRIVE" ]
[]
["USERPROFILE", "HOME", "HOMEPATH", "HOMEDRIVE"]
go
4
0
chain/store/store.go
package store import ( "bytes" "context" "encoding/binary" "encoding/json" "io" "os" "strconv" "sync" "github.com/filecoin-project/go-state-types/crypto" "github.com/minio/blake2b-simd" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/specs-actors/actors/util/adt" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/journal" bstore "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/metrics" "go.opencensus.io/stats" "go.opencensus.io/trace" "go.uber.org/multierr" "github.com/filecoin-project/lotus/chain/types" lru "github.com/hashicorp/golang-lru" block "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" dstore "github.com/ipfs/go-datastore" cbor "github.com/ipfs/go-ipld-cbor" logging "github.com/ipfs/go-log/v2" car "github.com/ipld/go-car" carutil "github.com/ipld/go-car/util" cbg "github.com/whyrusleeping/cbor-gen" pubsub "github.com/whyrusleeping/pubsub" "golang.org/x/xerrors" ) var log = logging.Logger("chainstore") var chainHeadKey = dstore.NewKey("head") var blockValidationCacheKeyPrefix = dstore.NewKey("blockValidation") var DefaultTipSetCacheSize = 8192 var DefaultMsgMetaCacheSize = 2048 func init() { if s := os.Getenv("LOTUS_CHAIN_TIPSET_CACHE"); s != "" { tscs, err := strconv.Atoi(s) if err != nil { log.Errorf("failed to parse 'LOTUS_CHAIN_TIPSET_CACHE' env var: %s", err) } DefaultTipSetCacheSize = tscs } if s := os.Getenv("LOTUS_CHAIN_MSGMETA_CACHE"); s != "" { mmcs, err := strconv.Atoi(s) if err != nil { log.Errorf("failed to parse 'LOTUS_CHAIN_MSGMETA_CACHE' env var: %s", err) } DefaultMsgMetaCacheSize = mmcs } } // ReorgNotifee represents a callback that gets called upon reorgs. type ReorgNotifee func(rev, app []*types.TipSet) error // ChainStore is the main point of access to chain data. // // Raw chain data is stored in the Blockstore, with relevant markers (genesis, // latest head tipset references) being tracked in the Datastore (key-value // store). // // To alleviate disk access, the ChainStore has two ARC caches: // 1. a tipset cache // 2. a block => messages references cache. type ChainStore struct { bs bstore.Blockstore ds dstore.Datastore heaviestLk sync.Mutex heaviest *types.TipSet bestTips *pubsub.PubSub pubLk sync.Mutex tstLk sync.Mutex tipsets map[abi.ChainEpoch][]cid.Cid cindex *ChainIndex reorgCh chan<- reorg reorgNotifeeCh chan ReorgNotifee mmCache *lru.ARCCache tsCache *lru.ARCCache vmcalls vm.SyscallBuilder } func NewChainStore(bs bstore.Blockstore, ds dstore.Batching, vmcalls vm.SyscallBuilder) *ChainStore { c, _ := lru.NewARC(DefaultMsgMetaCacheSize) tsc, _ := lru.NewARC(DefaultTipSetCacheSize) cs := &ChainStore{ bs: bs, ds: ds, bestTips: pubsub.New(64), tipsets: make(map[abi.ChainEpoch][]cid.Cid), mmCache: c, tsCache: tsc, vmcalls: vmcalls, } ci := NewChainIndex(cs.LoadTipSet) cs.cindex = ci hcnf := func(rev, app []*types.TipSet) error { cs.pubLk.Lock() defer cs.pubLk.Unlock() notif := make([]*api.HeadChange, len(rev)+len(app)) for i, r := range rev { notif[i] = &api.HeadChange{ Type: HCRevert, Val: r, } } for i, r := range app { notif[i+len(rev)] = &api.HeadChange{ Type: HCApply, Val: r, } } cs.bestTips.Pub(notif, "headchange") return nil } hcmetric := func(rev, app []*types.TipSet) error { ctx := context.Background() for _, r := range app { stats.Record(ctx, metrics.ChainNodeHeight.M(int64(r.Height()))) } return nil } cs.reorgNotifeeCh = make(chan ReorgNotifee) cs.reorgCh = cs.reorgWorker(context.TODO(), []ReorgNotifee{hcnf, hcmetric}) return cs } func (cs *ChainStore) Load() error { head, err := cs.ds.Get(chainHeadKey) if err == dstore.ErrNotFound { log.Warn("no previous chain state found") return nil } if err != nil { return xerrors.Errorf("failed to load chain state from datastore: %w", err) } var tscids []cid.Cid if err := json.Unmarshal(head, &tscids); err != nil { return xerrors.Errorf("failed to unmarshal stored chain head: %w", err) } ts, err := cs.LoadTipSet(types.NewTipSetKey(tscids...)) if err != nil { return xerrors.Errorf("loading tipset: %w", err) } cs.heaviest = ts return nil } func (cs *ChainStore) writeHead(ts *types.TipSet) error { data, err := json.Marshal(ts.Cids()) if err != nil { return xerrors.Errorf("failed to marshal tipset: %w", err) } if err := cs.ds.Put(chainHeadKey, data); err != nil { return xerrors.Errorf("failed to write chain head to datastore: %w", err) } return nil } const ( HCRevert = "revert" HCApply = "apply" HCCurrent = "current" ) func (cs *ChainStore) SubHeadChanges(ctx context.Context) chan []*api.HeadChange { cs.pubLk.Lock() subch := cs.bestTips.Sub("headchange") head := cs.GetHeaviestTipSet() cs.pubLk.Unlock() out := make(chan []*api.HeadChange, 16) out <- []*api.HeadChange{{ Type: HCCurrent, Val: head, }} go func() { defer close(out) var unsubOnce sync.Once for { select { case val, ok := <-subch: if !ok { log.Warn("chain head sub exit loop") return } if len(out) > 0 { log.Warnf("head change sub is slow, has %d buffered entries", len(out)) } select { case out <- val.([]*api.HeadChange): case <-ctx.Done(): } case <-ctx.Done(): unsubOnce.Do(func() { go cs.bestTips.Unsub(subch) }) } } }() return out } func (cs *ChainStore) SubscribeHeadChanges(f ReorgNotifee) { cs.reorgNotifeeCh <- f } func (cs *ChainStore) IsBlockValidated(ctx context.Context, blkid cid.Cid) (bool, error) { key := blockValidationCacheKeyPrefix.Instance(blkid.String()) return cs.ds.Has(key) } func (cs *ChainStore) MarkBlockAsValidated(ctx context.Context, blkid cid.Cid) error { key := blockValidationCacheKeyPrefix.Instance(blkid.String()) if err := cs.ds.Put(key, []byte{0}); err != nil { return xerrors.Errorf("cache block validation: %w", err) } return nil } func (cs *ChainStore) SetGenesis(b *types.BlockHeader) error { ts, err := types.NewTipSet([]*types.BlockHeader{b}) if err != nil { return err } if err := cs.PutTipSet(context.TODO(), ts); err != nil { return err } return cs.ds.Put(dstore.NewKey("0"), b.Cid().Bytes()) } func (cs *ChainStore) PutTipSet(ctx context.Context, ts *types.TipSet) error { for _, b := range ts.Blocks() { if err := cs.PersistBlockHeaders(b); err != nil { return err } } expanded, err := cs.expandTipset(ts.Blocks()[0]) if err != nil { return xerrors.Errorf("errored while expanding tipset: %w", err) } log.Debugf("expanded %s into %s\n", ts.Cids(), expanded.Cids()) if err := cs.MaybeTakeHeavierTipSet(ctx, expanded); err != nil { return xerrors.Errorf("MaybeTakeHeavierTipSet failed in PutTipSet: %w", err) } return nil } // MaybeTakeHeavierTipSet evaluates the incoming tipset and locks it in our // internal state as our new head, if and only if it is heavier than the current // head. func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipSet) error { cs.heaviestLk.Lock() defer cs.heaviestLk.Unlock() w, err := cs.Weight(ctx, ts) if err != nil { return err } heaviestW, err := cs.Weight(ctx, cs.heaviest) if err != nil { return err } if w.GreaterThan(heaviestW) { // TODO: don't do this for initial sync. Now that we don't have a // difference between 'bootstrap sync' and 'caught up' sync, we need // some other heuristic. return cs.takeHeaviestTipSet(ctx, ts) } return nil } type reorg struct { old *types.TipSet new *types.TipSet } func (cs *ChainStore) reorgWorker(ctx context.Context, initialNotifees []ReorgNotifee) chan<- reorg { out := make(chan reorg, 32) notifees := make([]ReorgNotifee, len(initialNotifees)) copy(notifees, initialNotifees) go func() { defer log.Warn("reorgWorker quit") for { select { case n := <-cs.reorgNotifeeCh: notifees = append(notifees, n) case r := <-out: revert, apply, err := cs.ReorgOps(r.old, r.new) if err != nil { log.Error("computing reorg ops failed: ", err) continue } journal.Add("sync", map[string]interface{}{ "op": "headChange", "from": r.old.Key(), "to": r.new.Key(), "rev": len(revert), "apply": len(apply), }) // reverse the apply array for i := len(apply)/2 - 1; i >= 0; i-- { opp := len(apply) - 1 - i apply[i], apply[opp] = apply[opp], apply[i] } for _, hcf := range notifees { if err := hcf(revert, apply); err != nil { log.Error("head change func errored (BAD): ", err) } } case <-ctx.Done(): return } } }() return out } // takeHeaviestTipSet actually sets the incoming tipset as our head both in // memory and in the ChainStore. It also sends a notification to deliver to // ReorgNotifees. func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet) error { _, span := trace.StartSpan(ctx, "takeHeaviestTipSet") defer span.End() if cs.heaviest != nil { // buf if len(cs.reorgCh) > 0 { log.Warnf("Reorg channel running behind, %d reorgs buffered", len(cs.reorgCh)) } cs.reorgCh <- reorg{ old: cs.heaviest, new: ts, } } else { log.Warnf("no heaviest tipset found, using %s", ts.Cids()) } span.AddAttributes(trace.BoolAttribute("newHead", true)) log.Infof("New heaviest tipset! %s (height=%d)", ts.Cids(), ts.Height()) cs.heaviest = ts if err := cs.writeHead(ts); err != nil { log.Errorf("failed to write chain head: %s", err) return nil } return nil } // SetHead sets the chainstores current 'best' head node. // This should only be called if something is broken and needs fixing func (cs *ChainStore) SetHead(ts *types.TipSet) error { cs.heaviestLk.Lock() defer cs.heaviestLk.Unlock() return cs.takeHeaviestTipSet(context.TODO(), ts) } // Contains returns whether our BlockStore has all blocks in the supplied TipSet. func (cs *ChainStore) Contains(ts *types.TipSet) (bool, error) { for _, c := range ts.Cids() { has, err := cs.bs.Has(c) if err != nil { return false, err } if !has { return false, nil } } return true, nil } // GetBlock fetches a BlockHeader with the supplied CID. It returns // blockstore.ErrNotFound if the block was not found in the BlockStore. func (cs *ChainStore) GetBlock(c cid.Cid) (*types.BlockHeader, error) { sb, err := cs.bs.Get(c) if err != nil { return nil, err } return types.DecodeBlock(sb.RawData()) } func (cs *ChainStore) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) { v, ok := cs.tsCache.Get(tsk) if ok { return v.(*types.TipSet), nil } var blks []*types.BlockHeader for _, c := range tsk.Cids() { b, err := cs.GetBlock(c) if err != nil { return nil, xerrors.Errorf("get block %s: %w", c, err) } blks = append(blks, b) } ts, err := types.NewTipSet(blks) if err != nil { return nil, err } cs.tsCache.Add(tsk, ts) return ts, nil } // IsAncestorOf returns true if 'a' is an ancestor of 'b' func (cs *ChainStore) IsAncestorOf(a, b *types.TipSet) (bool, error) { if b.Height() <= a.Height() { return false, nil } cur := b for !a.Equals(cur) && cur.Height() > a.Height() { next, err := cs.LoadTipSet(cur.Parents()) if err != nil { return false, err } cur = next } return cur.Equals(a), nil } func (cs *ChainStore) NearestCommonAncestor(a, b *types.TipSet) (*types.TipSet, error) { l, _, err := cs.ReorgOps(a, b) if err != nil { return nil, err } return cs.LoadTipSet(l[len(l)-1].Parents()) } func (cs *ChainStore) ReorgOps(a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) { return ReorgOps(cs.LoadTipSet, a, b) } func ReorgOps(lts func(types.TipSetKey) (*types.TipSet, error), a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) { left := a right := b var leftChain, rightChain []*types.TipSet for !left.Equals(right) { if left.Height() > right.Height() { leftChain = append(leftChain, left) par, err := lts(left.Parents()) if err != nil { return nil, nil, err } left = par } else { rightChain = append(rightChain, right) par, err := lts(right.Parents()) if err != nil { log.Infof("failed to fetch right.Parents: %s", err) return nil, nil, err } right = par } } return leftChain, rightChain, nil } // GetHeaviestTipSet returns the current heaviest tipset known (i.e. our head). func (cs *ChainStore) GetHeaviestTipSet() *types.TipSet { cs.heaviestLk.Lock() defer cs.heaviestLk.Unlock() return cs.heaviest } func (cs *ChainStore) AddToTipSetTracker(b *types.BlockHeader) error { cs.tstLk.Lock() defer cs.tstLk.Unlock() tss := cs.tipsets[b.Height] for _, oc := range tss { if oc == b.Cid() { log.Debug("tried to add block to tipset tracker that was already there") return nil } } cs.tipsets[b.Height] = append(tss, b.Cid()) // TODO: do we want to look for slashable submissions here? might as well... return nil } func (cs *ChainStore) PersistBlockHeaders(b ...*types.BlockHeader) error { sbs := make([]block.Block, len(b)) for i, header := range b { var err error sbs[i], err = header.ToStorageBlock() if err != nil { return err } } batchSize := 256 calls := len(b) / batchSize var err error for i := 0; i <= calls; i++ { start := batchSize * i end := start + batchSize if end > len(b) { end = len(b) } err = multierr.Append(err, cs.bs.PutMany(sbs[start:end])) } return err } type storable interface { ToStorageBlock() (block.Block, error) } func PutMessage(bs bstore.Blockstore, m storable) (cid.Cid, error) { b, err := m.ToStorageBlock() if err != nil { return cid.Undef, err } if err := bs.Put(b); err != nil { return cid.Undef, err } return b.Cid(), nil } func (cs *ChainStore) PutMessage(m storable) (cid.Cid, error) { return PutMessage(cs.bs, m) } func (cs *ChainStore) expandTipset(b *types.BlockHeader) (*types.TipSet, error) { // Hold lock for the whole function for now, if it becomes a problem we can // fix pretty easily cs.tstLk.Lock() defer cs.tstLk.Unlock() all := []*types.BlockHeader{b} tsets, ok := cs.tipsets[b.Height] if !ok { return types.NewTipSet(all) } inclMiners := map[address.Address]bool{b.Miner: true} for _, bhc := range tsets { if bhc == b.Cid() { continue } h, err := cs.GetBlock(bhc) if err != nil { return nil, xerrors.Errorf("failed to load block (%s) for tipset expansion: %w", bhc, err) } if inclMiners[h.Miner] { log.Warnf("Have multiple blocks from miner %s at height %d in our tipset cache", h.Miner, h.Height) continue } if types.CidArrsEqual(h.Parents, b.Parents) { all = append(all, h) inclMiners[h.Miner] = true } } // TODO: other validation...? return types.NewTipSet(all) } func (cs *ChainStore) AddBlock(ctx context.Context, b *types.BlockHeader) error { if err := cs.PersistBlockHeaders(b); err != nil { return err } ts, err := cs.expandTipset(b) if err != nil { return err } if err := cs.MaybeTakeHeavierTipSet(ctx, ts); err != nil { return xerrors.Errorf("MaybeTakeHeavierTipSet failed: %w", err) } return nil } func (cs *ChainStore) GetGenesis() (*types.BlockHeader, error) { data, err := cs.ds.Get(dstore.NewKey("0")) if err != nil { return nil, err } c, err := cid.Cast(data) if err != nil { return nil, err } genb, err := cs.bs.Get(c) if err != nil { return nil, err } return types.DecodeBlock(genb.RawData()) } func (cs *ChainStore) GetCMessage(c cid.Cid) (types.ChainMsg, error) { m, err := cs.GetMessage(c) if err == nil { return m, nil } if err != bstore.ErrNotFound { log.Warnf("GetCMessage: unexpected error getting unsigned message: %s", err) } return cs.GetSignedMessage(c) } func (cs *ChainStore) GetMessage(c cid.Cid) (*types.Message, error) { sb, err := cs.bs.Get(c) if err != nil { log.Errorf("get message get failed: %s: %s", c, err) return nil, err } return types.DecodeMessage(sb.RawData()) } func (cs *ChainStore) GetSignedMessage(c cid.Cid) (*types.SignedMessage, error) { sb, err := cs.bs.Get(c) if err != nil { log.Errorf("get message get failed: %s: %s", c, err) return nil, err } return types.DecodeSignedMessage(sb.RawData()) } func (cs *ChainStore) readAMTCids(root cid.Cid) ([]cid.Cid, error) { ctx := context.TODO() a, err := adt.AsArray(cs.Store(ctx), root) if err != nil { return nil, xerrors.Errorf("amt load: %w", err) } var ( cids []cid.Cid cborCid cbg.CborCid ) if err := a.ForEach(&cborCid, func(i int64) error { c := cid.Cid(cborCid) cids = append(cids, c) return nil }); err != nil { return nil, xerrors.Errorf("failed to traverse amt: %w", err) } if uint64(len(cids)) != a.Length() { return nil, xerrors.Errorf("found %d cids, expected %d", len(cids), a.Length()) } return cids, nil } type BlockMessages struct { Miner address.Address BlsMessages []types.ChainMsg SecpkMessages []types.ChainMsg WinCount int64 } func (cs *ChainStore) BlockMsgsForTipset(ts *types.TipSet) ([]BlockMessages, error) { applied := make(map[address.Address]uint64) cst := cbor.NewCborStore(cs.bs) st, err := state.LoadStateTree(cst, ts.Blocks()[0].ParentStateRoot) if err != nil { return nil, xerrors.Errorf("failed to load state tree") } preloadAddr := func(a address.Address) error { if _, ok := applied[a]; !ok { act, err := st.GetActor(a) if err != nil { return err } applied[a] = act.Nonce } return nil } selectMsg := func(m *types.Message) (bool, error) { if err := preloadAddr(m.From); err != nil { return false, err } if applied[m.From] != m.Nonce { return false, nil } applied[m.From]++ return true, nil } var out []BlockMessages for _, b := range ts.Blocks() { bms, sms, err := cs.MessagesForBlock(b) if err != nil { return nil, xerrors.Errorf("failed to get messages for block: %w", err) } bm := BlockMessages{ Miner: b.Miner, BlsMessages: make([]types.ChainMsg, 0, len(bms)), SecpkMessages: make([]types.ChainMsg, 0, len(sms)), WinCount: b.ElectionProof.WinCount, } for _, bmsg := range bms { b, err := selectMsg(bmsg.VMMessage()) if err != nil { return nil, xerrors.Errorf("failed to decide whether to select message for block: %w", err) } if b { bm.BlsMessages = append(bm.BlsMessages, bmsg) } } for _, smsg := range sms { b, err := selectMsg(smsg.VMMessage()) if err != nil { return nil, xerrors.Errorf("failed to decide whether to select message for block: %w", err) } if b { bm.SecpkMessages = append(bm.SecpkMessages, smsg) } } out = append(out, bm) } return out, nil } func (cs *ChainStore) MessagesForTipset(ts *types.TipSet) ([]types.ChainMsg, error) { bmsgs, err := cs.BlockMsgsForTipset(ts) if err != nil { return nil, err } var out []types.ChainMsg for _, bm := range bmsgs { for _, blsm := range bm.BlsMessages { out = append(out, blsm) } for _, secm := range bm.SecpkMessages { out = append(out, secm) } } return out, nil } type mmCids struct { bls []cid.Cid secpk []cid.Cid } func (cs *ChainStore) ReadMsgMetaCids(mmc cid.Cid) ([]cid.Cid, []cid.Cid, error) { o, ok := cs.mmCache.Get(mmc) if ok { mmcids := o.(*mmCids) return mmcids.bls, mmcids.secpk, nil } cst := cbor.NewCborStore(cs.bs) var msgmeta types.MsgMeta if err := cst.Get(context.TODO(), mmc, &msgmeta); err != nil { return nil, nil, xerrors.Errorf("failed to load msgmeta (%s): %w", mmc, err) } blscids, err := cs.readAMTCids(msgmeta.BlsMessages) if err != nil { return nil, nil, xerrors.Errorf("loading bls message cids for block: %w", err) } secpkcids, err := cs.readAMTCids(msgmeta.SecpkMessages) if err != nil { return nil, nil, xerrors.Errorf("loading secpk message cids for block: %w", err) } cs.mmCache.Add(mmc, &mmCids{ bls: blscids, secpk: secpkcids, }) return blscids, secpkcids, nil } func (cs *ChainStore) GetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*api.HeadChange, error) { fts, err := cs.LoadTipSet(from) if err != nil { return nil, xerrors.Errorf("loading from tipset %s: %w", from, err) } tts, err := cs.LoadTipSet(to) if err != nil { return nil, xerrors.Errorf("loading to tipset %s: %w", to, err) } revert, apply, err := cs.ReorgOps(fts, tts) if err != nil { return nil, xerrors.Errorf("error getting tipset branches: %w", err) } path := make([]*api.HeadChange, len(revert)+len(apply)) for i, r := range revert { path[i] = &api.HeadChange{Type: HCRevert, Val: r} } for j, i := 0, len(apply)-1; i >= 0; j, i = j+1, i-1 { path[j+len(revert)] = &api.HeadChange{Type: HCApply, Val: apply[i]} } return path, nil } func (cs *ChainStore) MessagesForBlock(b *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { blscids, secpkcids, err := cs.ReadMsgMetaCids(b.Messages) if err != nil { return nil, nil, err } blsmsgs, err := cs.LoadMessagesFromCids(blscids) if err != nil { return nil, nil, xerrors.Errorf("loading bls messages for block: %w", err) } secpkmsgs, err := cs.LoadSignedMessagesFromCids(secpkcids) if err != nil { return nil, nil, xerrors.Errorf("loading secpk messages for block: %w", err) } return blsmsgs, secpkmsgs, nil } func (cs *ChainStore) GetParentReceipt(b *types.BlockHeader, i int) (*types.MessageReceipt, error) { ctx := context.TODO() a, err := adt.AsArray(cs.Store(ctx), b.ParentMessageReceipts) if err != nil { return nil, xerrors.Errorf("amt load: %w", err) } var r types.MessageReceipt if found, err := a.Get(uint64(i), &r); err != nil { return nil, err } else if !found { return nil, xerrors.Errorf("failed to find receipt %d", i) } return &r, nil } func (cs *ChainStore) LoadMessagesFromCids(cids []cid.Cid) ([]*types.Message, error) { msgs := make([]*types.Message, 0, len(cids)) for i, c := range cids { m, err := cs.GetMessage(c) if err != nil { return nil, xerrors.Errorf("failed to get message: (%s):%d: %w", c, i, err) } msgs = append(msgs, m) } return msgs, nil } func (cs *ChainStore) LoadSignedMessagesFromCids(cids []cid.Cid) ([]*types.SignedMessage, error) { msgs := make([]*types.SignedMessage, 0, len(cids)) for i, c := range cids { m, err := cs.GetSignedMessage(c) if err != nil { return nil, xerrors.Errorf("failed to get message: (%s):%d: %w", c, i, err) } msgs = append(msgs, m) } return msgs, nil } func (cs *ChainStore) Blockstore() bstore.Blockstore { return cs.bs } func ActorStore(ctx context.Context, bs bstore.Blockstore) adt.Store { return adt.WrapStore(ctx, cbor.NewCborStore(bs)) } func (cs *ChainStore) Store(ctx context.Context) adt.Store { return ActorStore(ctx, cs.bs) } func (cs *ChainStore) VMSys() vm.SyscallBuilder { return cs.vmcalls } func (cs *ChainStore) TryFillTipSet(ts *types.TipSet) (*FullTipSet, error) { var out []*types.FullBlock for _, b := range ts.Blocks() { bmsgs, smsgs, err := cs.MessagesForBlock(b) if err != nil { // TODO: check for 'not found' errors, and only return nil if this // is actually a 'not found' error return nil, nil } fb := &types.FullBlock{ Header: b, BlsMessages: bmsgs, SecpkMessages: smsgs, } out = append(out, fb) } return NewFullTipSet(out), nil } func DrawRandomness(rbase []byte, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { h := blake2b.New256() if err := binary.Write(h, binary.BigEndian, int64(pers)); err != nil { return nil, xerrors.Errorf("deriving randomness: %w", err) } VRFDigest := blake2b.Sum256(rbase) _, err := h.Write(VRFDigest[:]) if err != nil { return nil, xerrors.Errorf("hashing VRFDigest: %w", err) } if err := binary.Write(h, binary.BigEndian, round); err != nil { return nil, xerrors.Errorf("deriving randomness: %w", err) } _, err = h.Write(entropy) if err != nil { return nil, xerrors.Errorf("hashing entropy: %w", err) } return h.Sum(nil), nil } func (cs *ChainStore) GetBeaconRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { _, span := trace.StartSpan(ctx, "store.GetBeaconRandomness") defer span.End() span.AddAttributes(trace.Int64Attribute("round", int64(round))) ts, err := cs.LoadTipSet(types.NewTipSetKey(blks...)) if err != nil { return nil, err } if round > ts.Height() { return nil, xerrors.Errorf("cannot draw randomness from the future") } searchHeight := round if searchHeight < 0 { searchHeight = 0 } randTs, err := cs.GetTipsetByHeight(ctx, searchHeight, ts, true) if err != nil { return nil, err } be, err := cs.GetLatestBeaconEntry(randTs) if err != nil { return nil, err } // if at (or just past -- for null epochs) appropriate epoch // or at genesis (works for negative epochs) return DrawRandomness(be.Data, pers, round, entropy) } func (cs *ChainStore) GetChainRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { _, span := trace.StartSpan(ctx, "store.GetChainRandomness") defer span.End() span.AddAttributes(trace.Int64Attribute("round", int64(round))) ts, err := cs.LoadTipSet(types.NewTipSetKey(blks...)) if err != nil { return nil, err } if round > ts.Height() { return nil, xerrors.Errorf("cannot draw randomness from the future") } searchHeight := round if searchHeight < 0 { searchHeight = 0 } randTs, err := cs.GetTipsetByHeight(ctx, searchHeight, ts, true) if err != nil { return nil, err } mtb := randTs.MinTicketBlock() // if at (or just past -- for null epochs) appropriate epoch // or at genesis (works for negative epochs) return DrawRandomness(mtb.Ticket.VRFProof, pers, round, entropy) } // GetTipsetByHeight returns the tipset on the chain behind 'ts' at the given // height. In the case that the given height is a null round, the 'prev' flag // selects the tipset before the null round if true, and the tipset following // the null round if false. func (cs *ChainStore) GetTipsetByHeight(ctx context.Context, h abi.ChainEpoch, ts *types.TipSet, prev bool) (*types.TipSet, error) { if ts == nil { ts = cs.GetHeaviestTipSet() } if h > ts.Height() { return nil, xerrors.Errorf("looking for tipset with height greater than start point") } if h == ts.Height() { return ts, nil } lbts, err := cs.cindex.GetTipsetByHeight(ctx, ts, h) if err != nil { return nil, err } if lbts.Height() < h { log.Warnf("chain index returned the wrong tipset at height %d, using slow retrieval", h) lbts, err = cs.cindex.GetTipsetByHeightWithoutCache(ts, h) if err != nil { return nil, err } } if lbts.Height() == h || !prev { return lbts, nil } return cs.LoadTipSet(lbts.Parents()) } func recurseLinks(bs bstore.Blockstore, walked *cid.Set, root cid.Cid, in []cid.Cid) ([]cid.Cid, error) { if root.Prefix().Codec != cid.DagCBOR { return in, nil } data, err := bs.Get(root) if err != nil { return nil, xerrors.Errorf("recurse links get (%s) failed: %w", root, err) } var rerr error err = cbg.ScanForLinks(bytes.NewReader(data.RawData()), func(c cid.Cid) { if rerr != nil { // No error return on ScanForLinks :( return } // traversed this already... if !walked.Visit(c) { return } in = append(in, c) var err error in, err = recurseLinks(bs, walked, c, in) if err != nil { rerr = err } }) if err != nil { return nil, xerrors.Errorf("scanning for links failed: %w", err) } return in, rerr } func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRoots abi.ChainEpoch, skipOldMsgs bool, w io.Writer) error { if ts == nil { ts = cs.GetHeaviestTipSet() } seen := cid.NewSet() walked := cid.NewSet() h := &car.CarHeader{ Roots: ts.Cids(), Version: 1, } if err := car.WriteHeader(h, w); err != nil { return xerrors.Errorf("failed to write car header: %s", err) } blocksToWalk := ts.Cids() walkChain := func(blk cid.Cid) error { if !seen.Visit(blk) { return nil } data, err := cs.bs.Get(blk) if err != nil { return xerrors.Errorf("getting block: %w", err) } if err := carutil.LdWrite(w, blk.Bytes(), data.RawData()); err != nil { return xerrors.Errorf("failed to write block to car output: %w", err) } var b types.BlockHeader if err := b.UnmarshalCBOR(bytes.NewBuffer(data.RawData())); err != nil { return xerrors.Errorf("unmarshaling block header (cid=%s): %w", blk, err) } var cids []cid.Cid if !skipOldMsgs || b.Height > ts.Height()-inclRecentRoots { mcids, err := recurseLinks(cs.bs, walked, b.Messages, []cid.Cid{b.Messages}) if err != nil { return xerrors.Errorf("recursing messages failed: %w", err) } cids = mcids } if b.Height > 0 { for _, p := range b.Parents { blocksToWalk = append(blocksToWalk, p) } } else { // include the genesis block cids = append(cids, b.Parents...) } out := cids if b.Height == 0 || b.Height > ts.Height()-inclRecentRoots { cids, err := recurseLinks(cs.bs, walked, b.ParentStateRoot, []cid.Cid{b.ParentStateRoot}) if err != nil { return xerrors.Errorf("recursing genesis state failed: %w", err) } out = append(out, cids...) } for _, c := range out { if seen.Visit(c) { if c.Prefix().Codec != cid.DagCBOR { continue } data, err := cs.bs.Get(c) if err != nil { return xerrors.Errorf("writing object to car (get %s): %w", c, err) } if err := carutil.LdWrite(w, c.Bytes(), data.RawData()); err != nil { return xerrors.Errorf("failed to write out car object: %w", err) } } } return nil } for len(blocksToWalk) > 0 { next := blocksToWalk[0] blocksToWalk = blocksToWalk[1:] if err := walkChain(next); err != nil { return xerrors.Errorf("walk chain failed: %w", err) } } return nil } func (cs *ChainStore) Import(r io.Reader) (*types.TipSet, error) { header, err := car.LoadCar(cs.Blockstore(), r) if err != nil { return nil, xerrors.Errorf("loadcar failed: %w", err) } root, err := cs.LoadTipSet(types.NewTipSetKey(header.Roots...)) if err != nil { return nil, xerrors.Errorf("failed to load root tipset from chainfile: %w", err) } return root, nil } func (cs *ChainStore) GetLatestBeaconEntry(ts *types.TipSet) (*types.BeaconEntry, error) { cur := ts for i := 0; i < 20; i++ { cbe := cur.Blocks()[0].BeaconEntries if len(cbe) > 0 { return &cbe[len(cbe)-1], nil } if cur.Height() == 0 { return nil, xerrors.Errorf("made it back to genesis block without finding beacon entry") } next, err := cs.LoadTipSet(cur.Parents()) if err != nil { return nil, xerrors.Errorf("failed to load parents when searching back for latest beacon entry: %w", err) } cur = next } if os.Getenv("LOTUS_IGNORE_DRAND") == "_yes_" { return &types.BeaconEntry{ Data: []byte{9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9}, }, nil } return nil, xerrors.Errorf("found NO beacon entries in the 20 blocks prior to given tipset") } type chainRand struct { cs *ChainStore blks []cid.Cid } func NewChainRand(cs *ChainStore, blks []cid.Cid) vm.Rand { return &chainRand{ cs: cs, blks: blks, } } func (cr *chainRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { return cr.cs.GetChainRandomness(ctx, cr.blks, pers, round, entropy) } func (cr *chainRand) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { return cr.cs.GetBeaconRandomness(ctx, cr.blks, pers, round, entropy) } func (cs *ChainStore) GetTipSetFromKey(tsk types.TipSetKey) (*types.TipSet, error) { if tsk.IsEmpty() { return cs.GetHeaviestTipSet(), nil } return cs.LoadTipSet(tsk) }
[ "\"LOTUS_CHAIN_TIPSET_CACHE\"", "\"LOTUS_CHAIN_MSGMETA_CACHE\"", "\"LOTUS_IGNORE_DRAND\"" ]
[]
[ "LOTUS_CHAIN_TIPSET_CACHE", "LOTUS_IGNORE_DRAND", "LOTUS_CHAIN_MSGMETA_CACHE" ]
[]
["LOTUS_CHAIN_TIPSET_CACHE", "LOTUS_IGNORE_DRAND", "LOTUS_CHAIN_MSGMETA_CACHE"]
go
3
0
completers/gh_completer/cmd/action/run/run.go
package run import ( "bytes" "fmt" exec "golang.org/x/sys/execabs" "io" "os" "path/filepath" "strings" ) // Runnable is typically an exec.Cmd or its stub in tests type Runnable interface { Output() ([]byte, error) Run() error } // PrepareCmd extends exec.Cmd with extra error reporting features and provides a // hook to stub command execution in tests var PrepareCmd = func(cmd *exec.Cmd) Runnable { return &cmdWithStderr{cmd} } // cmdWithStderr augments exec.Cmd by adding stderr to the error message type cmdWithStderr struct { *exec.Cmd } func (c cmdWithStderr) Output() ([]byte, error) { if os.Getenv("DEBUG") != "" { _ = printArgs(os.Stderr, c.Cmd.Args) } if c.Cmd.Stderr != nil { return c.Cmd.Output() } errStream := &bytes.Buffer{} c.Cmd.Stderr = errStream out, err := c.Cmd.Output() if err != nil { err = &CmdError{errStream, c.Cmd.Args, err} } return out, err } func (c cmdWithStderr) Run() error { if os.Getenv("DEBUG") != "" { _ = printArgs(os.Stderr, c.Cmd.Args) } if c.Cmd.Stderr != nil { return c.Cmd.Run() } errStream := &bytes.Buffer{} c.Cmd.Stderr = errStream err := c.Cmd.Run() if err != nil { err = &CmdError{errStream, c.Cmd.Args, err} } return err } // CmdError provides more visibility into why an exec.Cmd had failed type CmdError struct { Stderr *bytes.Buffer Args []string Err error } func (e CmdError) Error() string { msg := e.Stderr.String() if msg != "" && !strings.HasSuffix(msg, "\n") { msg += "\n" } return fmt.Sprintf("%s%s: %s", msg, e.Args[0], e.Err) } func printArgs(w io.Writer, args []string) error { if len(args) > 0 { // print commands, but omit the full path to an executable args = append([]string{filepath.Base(args[0])}, args[1:]...) } _, err := fmt.Fprintf(w, "%v\n", args) return err }
[ "\"DEBUG\"", "\"DEBUG\"" ]
[]
[ "DEBUG" ]
[]
["DEBUG"]
go
1
0
manage.py
#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "TwitterClone.settings") try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv)
[]
[]
[]
[]
[]
python
0
0
tests/dist/utils.py
import subprocess import multiprocessing as mp from typing import Optional import os def run(ssh_cmd): subprocess.check_call(ssh_cmd, shell=True) def execute_remote( cmd: str, ip: str, port: Optional[int] = 22, username: Optional[str] = "" ) -> mp.Process: """Execute command line on remote machine via ssh. Args: cmd: User-defined command (udf) to execute on the remote host. ip: The ip-address of the host to run the command on. port: Port number that the host is listening on. username: Optional. If given, this will specify a username to use when issuing commands over SSH. Useful when your infra requires you to explicitly specify a username to avoid permission issues. Returns: Process: The Process whose run() is to run the `cmd` on the remote host. Returns when the cmd completes on the remote host. """ ip_prefix = "" if username: ip_prefix += "{username}@".format(username=username) custom_port = os.getenv('DIST_DGL_TEST_SSH_PORT', '') if custom_port: port = custom_port custom_ssh_key = os.getenv('DIST_DGL_TEST_SSH_KEY', '') if custom_ssh_key: custom_ssh_key = os.path.expanduser(custom_ssh_key) custom_ssh_key = "-i " + custom_ssh_key ssh_setup = os.getenv('DIST_DGL_TEST_SSH_SETUP', '') if ssh_setup: cmd = ssh_setup + ';' + cmd # Construct ssh command that executes `cmd` on the remote host ssh_cmd = "ssh -o StrictHostKeyChecking=no {ssh_key} -p {port} {ip_prefix}{ip} '{cmd}'".format( ssh_key=custom_ssh_key, port=str(port), ip_prefix=ip_prefix, ip=ip, cmd=cmd, ) ctx = mp.get_context('spawn') proc = ctx.Process(target=run, args=(ssh_cmd,)) proc.start() return proc def get_ips(ip_config): ips = [] with open(ip_config) as f: for line in f: result = line.strip().split() if len(result) != 1: raise RuntimeError( "Invalid format of ip_config:{}".format(ip_config)) ips.append(result[0]) return ips
[]
[]
[ "DIST_DGL_TEST_SSH_KEY", "DIST_DGL_TEST_SSH_PORT", "DIST_DGL_TEST_SSH_SETUP" ]
[]
["DIST_DGL_TEST_SSH_KEY", "DIST_DGL_TEST_SSH_PORT", "DIST_DGL_TEST_SSH_SETUP"]
python
3
0
vendor/github.com/HewlettPackard/oneview-golang/examples/storage_pool.go
package main import ( "fmt" "github.com/HewlettPackard/oneview-golang/ov" "os" "strconv" ) func main() { var ( ClientOV *ov.OVClient storage_pool = "Cluster-1" ) apiversion, _ := strconv.Atoi(os.Getenv("ONEVIEW_APIVERSION")) ovc := ClientOV.NewOVClient( os.Getenv("ONEVIEW_OV_USER"), os.Getenv("ONEVIEW_OV_PASSWORD"), os.Getenv("ONEVIEW_OV_DOMAIN"), os.Getenv("ONEVIEW_OV_ENDPOINT"), false, apiversion, "*") //Get storage pool by name to update update_pool, _ := ovc.GetStoragePoolByName(storage_pool) // Update the given storage pool // This API can be used to manage/unmanage a storage pool, update storage pool attributes or to request a refresh of a storage pool. // To manage/unmanage a storage pool, issue a PUT with the isManaged attribute set as true to manage or false to unmanage. // Attempting to unmanage a StoreVirtual pool is not allowed and the attempt will return a task error. // To request a refresh of a storage pool the user must set the "requestingRefresh" attribute to true. The user cannot perform any other attribute update to the storage pool while also requesting a refresh of the pool. update_pool.IsManaged = true err := ovc.UpdateStoragePool(update_pool) if err != nil { fmt.Println("Could not update the pool", err) } // Get All the pools present fmt.Println("\nGetting all the storage pools present in the system: \n") sort := "name:desc" pool_list, err := ovc.GetStoragePools("", sort, "", "") if err != nil { fmt.Println("Error Getting the storage pools ", err) } for i := 0; i < len(pool_list.Members); i++ { fmt.Println(pool_list.Members[i].Name) } }
[ "\"ONEVIEW_APIVERSION\"", "\"ONEVIEW_OV_USER\"", "\"ONEVIEW_OV_PASSWORD\"", "\"ONEVIEW_OV_DOMAIN\"", "\"ONEVIEW_OV_ENDPOINT\"" ]
[]
[ "ONEVIEW_OV_ENDPOINT", "ONEVIEW_OV_DOMAIN", "ONEVIEW_APIVERSION", "ONEVIEW_OV_PASSWORD", "ONEVIEW_OV_USER" ]
[]
["ONEVIEW_OV_ENDPOINT", "ONEVIEW_OV_DOMAIN", "ONEVIEW_APIVERSION", "ONEVIEW_OV_PASSWORD", "ONEVIEW_OV_USER"]
go
5
0
common/constants.py
import os from common import cli_helpers # HOTSOS GLOBALS DATA_ROOT = os.environ.get('DATA_ROOT', '/') MASTER_YAML_OUT = os.environ.get('MASTER_YAML_OUT') PLUGIN_TMP_DIR = os.environ.get('PLUGIN_TMP_DIR') PLUGIN_NAME = os.environ.get('PLUGIN_NAME') PART_NAME = os.environ.get('PART_NAME') USE_ALL_LOGS = os.environ.get('USE_ALL_LOGS', "False") if cli_helpers.bool_str(USE_ALL_LOGS): USE_ALL_LOGS = True else: USE_ALL_LOGS = False
[]
[]
[ "DATA_ROOT", "USE_ALL_LOGS", "PLUGIN_NAME", "PART_NAME", "MASTER_YAML_OUT", "PLUGIN_TMP_DIR" ]
[]
["DATA_ROOT", "USE_ALL_LOGS", "PLUGIN_NAME", "PART_NAME", "MASTER_YAML_OUT", "PLUGIN_TMP_DIR"]
python
6
0
main.go
/* Copyright The Stash Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "os" "runtime" _ "stash.appscode.dev/apimachinery/client/clientset/versioned/fake" "stash.appscode.dev/stash/pkg/cmds" "github.com/appscode/go/log" _ "k8s.io/client-go/kubernetes/fake" _ "k8s.io/client-go/plugin/pkg/client/auth" "kmodules.xyz/client-go/logs" ) func main() { logs.InitLogs() defer logs.FlushLogs() if len(os.Getenv("GOMAXPROCS")) == 0 { runtime.GOMAXPROCS(runtime.NumCPU()) } if err := cmds.NewRootCmd().Execute(); err != nil { log.Fatalln("Error in Stash Main:", err) } log.Infoln("Exiting Stash Main") }
[ "\"GOMAXPROCS\"" ]
[]
[ "GOMAXPROCS" ]
[]
["GOMAXPROCS"]
go
1
0
imageops/imageops/server.py
""" # Copyright 2021 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ import os from pathlib import Path from imageops.utils import Utils from imageops.logger import Logger class Server(object): """ Backend server for imageops API The request_id is the only input param which used to identify this request """ logger = Logger(__name__).get_logger() def __init__(self, request_id=None): """ Init Server class """ if not request_id: msg = 'Lacking request_id.' self.logger.error(msg) raise ValueError(msg) self.request_id = str(request_id) if not os.getenv('TMP_PATH'): msg = 'No TMP_PATH found in env.' self.logger.error(msg) raise ValueError(msg) self.tmp_path = os.getenv('TMP_PATH') if not os.getenv('IMAGE_PATH'): msg = 'No IMAGE_PATH found in env.' self.logger.error(msg) raise ValueError(msg) self.image_path = os.getenv('IMAGE_PATH') self.check_record_file = 'check_info.json' self.compress_record_file = 'compress_status.log' self.check_rc = {0: 'Check Completed, the image is (now) consistent', 1: 'Check completed, image is corrupted', 2: 'Check completed, image has leaked clusters, but is not corrupted', 3: 'Check failed', 4: 'Check in Progress', 5: 'Check Exiting because of not support this type of image', 6: 'Check Time Out'} self.compress_rc = {0: 'Compress Completed', 1: 'Compress In Progress', 2: 'Compress Failed', 3: 'Compress Exiting because of No enouth space left', 4: 'Compress Time Out'} def check_vm_image(self, input_image=None): """ Check the input vm image to get it's checksum and basic info such as type and size """ self.logger.info('Start to check VM image %s ...', input_image) if not input_image: msg = 'No image is given to do the check.' self.logger.error(msg) raise ValueError(msg) image = Path(input_image) if not image.is_file(): msg = 'Given image {} is not exist.'.format(input_image) self.logger.error(msg) raise ValueError(msg) try: check_record_path = os.path.join(self.tmp_path, self.request_id) os.makedirs(check_record_path) check_record_file = os.path.join(check_record_path, self.check_record_file) check_info = {'checkResult': 4} check_info['imageInfo'] = Utils.check_cmd_exec(input_image, check_record_file) check_info['checksum'] = Utils.get_md5_checksum(input_image, check_record_file) Utils.write_json_file(check_record_file, check_info) status = 0 msg = 'Check In Progress' except Exception as exception: status = 1 msg = 'Check Failed' check_info = {'checkResult': 99} Utils.write_json_file(check_record_file, check_info) self.logger.error(exception) self.logger.info('Check image %s, status: %s, msg: %s', input_image, status, msg) return status, msg def get_check_status(self): """ Get the status of one check with the request ID """ self.logger.info('Start to get check status...') check_info = {} try: check_record_file = os.path.join(self.tmp_path, self.request_id, self.check_record_file) check_info = Utils.read_json_file(check_record_file) self.logger.debug(check_info) if not check_info.get('imageInfo'): return 4, self.check_rc[4], check_info image_info = check_info.get('imageInfo') if image_info.get('filename'): file_name = image_info.get('filename').split('/')[-1] check_info['imageInfo']['filename'] = file_name if check_info.get('checkResult') == 4 or not check_info.get('checksum'): return 4, self.check_rc[4], check_info if check_info.get('checkResult') == 99: return 3, self.check_rc[3], check_info if check_info.get('checkResult') == 100: return 6, self.check_rc[6], check_info if check_info.get('checkResult') == 63: return 5, self.check_rc[5], check_info if check_info.get('checkResult') == 0: return 0, self.check_rc[0], check_info if check_info.get('checkResult') == 2: return 1, self.check_rc[1], check_info if check_info.get('checkResult') == 3: return 2, self.check_rc[2], check_info return 3, self.check_rc[3], check_info except IOError as io_exception: self.logger.exception(io_exception) return 3, '{}, {}'.format(self.check_rc[3], 'nonexistent request ID'), check_info except Exception: return 3, self.check_rc[3], check_info def compress_vm_image(self, input_image=None, output_image=None): """ Compress the input vm image to get a slim one which is sparsify Also can transfer raw image to qcow2 one """ self.logger.info('Start to compress vm image %s ...', input_image) if not input_image: msg = 'No image is given.' self.logger.error(msg) raise ValueError(msg) if not output_image: msg = 'No output image path is given.' self.logger.error(msg) raise ValueError(msg) image = Path(input_image) if not image.is_file(): msg = 'Image {} is not exist.'.format(input_image) self.logger.error(msg) raise ValueError(msg) try: compress_record_path = os.path.join(self.tmp_path, self.request_id) os.makedirs(compress_record_path) compress_record_file = os.path.join(compress_record_path, self.compress_record_file) self.logger.info('Start to compress image %s ...', input_image) if not Utils.check_compress_requires(input_image, self.tmp_path): self.logger.error('Free disk space under %s is not enough to compress image %s', self.tmp_path, input_image) status = 1 msg = '{}'.format(self.compress_rc.get(3)) Utils.append_write_plain_file(compress_record_file, msg) else: self.logger.info('Free disk space under %s is enough to compress image %s', self.tmp_path, input_image) Utils.compress_cmd_exec(input_image, output_image, compress_record_file) status = 0 msg = '{}'.format('Compress In Progress') except Exception as exception: self.logger.error(exception) status = 1 msg = '{}'.format(self.compress_rc.get(2)) Utils.append_write_plain_file(compress_record_file, msg) self.logger.info('Compress image %s with status: %s and msg: %s', input_image, status, msg) return status, msg def get_compress_status(self): """ Get the status of one compress with the request ID """ self.logger.info('Start to get status of compress image ...') try: compress_record_file = os.path.join(self.tmp_path, self.request_id, self.compress_record_file) with open(compress_record_file, 'r') as compress_file: for line in compress_file: if self.compress_rc[0] in line: self.logger.info(self.compress_rc[0]) return 0, self.compress_rc[0], 1 for item in [2, 3, 4]: if self.compress_rc[item] in line: self.logger.error(self.compress_rc[item]) return item, self.compress_rc[item], 0 except IOError as io_exception: self.logger.exception(io_exception) return 2, '{}, {}'.format(self.compress_rc[2], 'nonexistent request ID'), 0 except Exception as exception: self.logger.exception(exception) return 2, self.compress_rc[2], 0 try: compress_rate = Utils.get_compress_rate(compress_record_file) self.logger.info(self.compress_rc[1]) return 1, self.compress_rc[1], compress_rate except Exception as exception: self.logger.exception(exception) return 2, self.compress_rc[2], 0
[]
[]
[ "IMAGE_PATH", "TMP_PATH" ]
[]
["IMAGE_PATH", "TMP_PATH"]
python
2
0
elastalert/util.py
# -*- coding: utf-8 -*- import collections import datetime import logging import os import re import sys import dateutil.parser import pytz from six import string_types from . import ElasticSearchClient from .auth import Auth logging.basicConfig() elastalert_logger = logging.getLogger('elastalert') def get_module(module_name): """ Loads a module and returns a specific object. module_name should 'module.file.object'. Returns object or raises EAException on error. """ sys.path.append(os.getcwd()) try: module_path, module_class = module_name.rsplit('.', 1) base_module = __import__(module_path, globals(), locals(), [module_class]) module = getattr(base_module, module_class) except (ImportError, AttributeError, ValueError) as e: raise EAException("Could not import module %s: %s" % (module_name, e)).with_traceback(sys.exc_info()[2]) return module def new_get_event_ts(ts_field): """ Constructs a lambda that may be called to extract the timestamp field from a given event. :returns: A callable function that takes an event and outputs that event's timestamp field. """ return lambda event: lookup_es_key(event[0], ts_field) def _find_es_dict_by_key(lookup_dict, term): """ Performs iterative dictionary search based upon the following conditions: 1. Subkeys may either appear behind a full stop (.) or at one lookup_dict level lower in the tree. 2. No wildcards exist within the provided ES search terms (these are treated as string literals) This is necessary to get around inconsistencies in ES data. For example: {'ad.account_name': 'bob'} Or: {'csp_report': {'blocked_uri': 'bob.com'}} And even: {'juniper_duo.geoip': {'country_name': 'Democratic People's Republic of Korea'}} We want a search term of form "key.subkey.subsubkey" to match in all cases. :returns: A tuple with the first element being the dict that contains the key and the second element which is the last subkey used to access the target specified by the term. None is returned for both if the key can not be found. """ if term in lookup_dict: return lookup_dict, term # If the term does not match immediately, perform iterative lookup: # 1. Split the search term into tokens # 2. Recurrently concatenate these together to traverse deeper into the dictionary, # clearing the subkey at every successful lookup. # # This greedy approach is correct because subkeys must always appear in order, # preferring full stops and traversal interchangeably. # # Subkeys will NEVER be duplicated between an alias and a traversal. # # For example: # {'foo.bar': {'bar': 'ray'}} to look up foo.bar will return {'bar': 'ray'}, not 'ray' dict_cursor = lookup_dict while term: split_results = re.split(r'\[(\d)\]', term, maxsplit=1) if len(split_results) == 3: sub_term, index, term = split_results index = int(index) else: sub_term, index, term = split_results + [None, ''] subkeys = sub_term.split('.') subkey = '' while len(subkeys) > 0: if not dict_cursor: return {}, None subkey += subkeys.pop(0) if subkey in dict_cursor: if len(subkeys) == 0: break dict_cursor = dict_cursor[subkey] subkey = '' elif len(subkeys) == 0: # If there are no keys left to match, return None values dict_cursor = None subkey = None else: subkey += '.' if index is not None and subkey: dict_cursor = dict_cursor[subkey] if type(dict_cursor) == list and len(dict_cursor) > index: subkey = index if term: dict_cursor = dict_cursor[subkey] else: return {}, None return dict_cursor, subkey def set_es_key(lookup_dict, term, value): """ Looks up the location that the term maps to and sets it to the given value. :returns: True if the value was set successfully, False otherwise. """ value_dict, value_key = _find_es_dict_by_key(lookup_dict, term) if value_dict is not None: value_dict[value_key] = value return True return False def lookup_es_key(lookup_dict, term): """ Performs iterative dictionary search for the given term. :returns: The value identified by term or None if it cannot be found. """ value_dict, value_key = _find_es_dict_by_key(lookup_dict, term) return None if value_key is None else value_dict[value_key] def ts_to_dt(timestamp): if isinstance(timestamp, datetime.datetime): return timestamp dt = dateutil.parser.parse(timestamp) # Implicitly convert local timestamps to UTC if dt.tzinfo is None: dt = dt.replace(tzinfo=pytz.utc) return dt def dt_to_ts(dt): if not isinstance(dt, datetime.datetime): logging.warning('Expected datetime, got %s' % (type(dt))) return dt ts = dt.isoformat() # Round microseconds to milliseconds if dt.tzinfo is None: # Implicitly convert local times to UTC return ts + 'Z' # isoformat() uses microsecond accuracy and timezone offsets # but we should try to use millisecond accuracy and Z to indicate UTC return ts.replace('000+00:00', 'Z').replace('+00:00', 'Z') def ts_to_dt_with_format(timestamp, ts_format): if isinstance(timestamp, datetime.datetime): return timestamp dt = datetime.datetime.strptime(timestamp, ts_format) # Implicitly convert local timestamps to UTC if dt.tzinfo is None: dt = dt.replace(tzinfo=dateutil.tz.tzutc()) return dt def dt_to_ts_with_format(dt, ts_format): if not isinstance(dt, datetime.datetime): logging.warning('Expected datetime, got %s' % (type(dt))) return dt ts = dt.strftime(ts_format) return ts def ts_now(): return datetime.datetime.utcnow().replace(tzinfo=dateutil.tz.tzutc()) def inc_ts(timestamp, milliseconds=1): """Increment a timestamp by milliseconds.""" dt = ts_to_dt(timestamp) dt += datetime.timedelta(milliseconds=milliseconds) return dt_to_ts(dt) def pretty_ts(timestamp, tz=True): """Pretty-format the given timestamp (to be printed or logged hereafter). If tz, the timestamp will be converted to local time. Format: YYYY-MM-DD HH:MM TZ""" dt = timestamp if not isinstance(timestamp, datetime.datetime): dt = ts_to_dt(timestamp) if tz: dt = dt.astimezone(dateutil.tz.tzlocal()) return dt.strftime('%Y-%m-%d %H:%M %Z') def ts_add(ts, td): """ Allows a timedelta (td) add operation on a string timestamp (ts) """ return dt_to_ts(ts_to_dt(ts) + td) def hashable(obj): """ Convert obj to a hashable obj. We use the value of some fields from Elasticsearch as keys for dictionaries. This means that whatever Elasticsearch returns must be hashable, and it sometimes returns a list or dict.""" if not obj.__hash__: return str(obj) return obj def format_index(index, start, end, add_extra=False): """ Takes an index, specified using strftime format, start and end time timestamps, and outputs a wildcard based index string to match all possible timestamps. """ # Convert to UTC start -= start.utcoffset() end -= end.utcoffset() original_start = start indices = set() while start.date() <= end.date(): indices.add(start.strftime(index)) start += datetime.timedelta(days=1) num = len(indices) if add_extra: while len(indices) == num: original_start -= datetime.timedelta(days=1) new_index = original_start.strftime(index) assert new_index != index, "You cannot use a static index with search_extra_index" indices.add(new_index) return ','.join(indices) class EAException(Exception): pass def seconds(td): return td.seconds + td.days * 24 * 3600 def total_seconds(dt): # For python 2.6 compatability if dt is None: return 0 elif hasattr(dt, 'total_seconds'): return dt.total_seconds() else: return (dt.microseconds + (dt.seconds + dt.days * 24 * 3600) * 10**6) / 10**6 def dt_to_int(dt): dt = dt.replace(tzinfo=None) return int(total_seconds((dt - datetime.datetime.utcfromtimestamp(0))) * 1000) def unixms_to_dt(ts): return unix_to_dt(float(ts) / 1000) def unix_to_dt(ts): dt = datetime.datetime.utcfromtimestamp(float(ts)) dt = dt.replace(tzinfo=dateutil.tz.tzutc()) return dt def dt_to_unix(dt): return int(total_seconds(dt - datetime.datetime(1970, 1, 1, tzinfo=dateutil.tz.tzutc()))) def dt_to_unixms(dt): return int(dt_to_unix(dt) * 1000) def cronite_datetime_to_timestamp(self, d): """ Converts a `datetime` object `d` into a UNIX timestamp. """ if d.tzinfo is not None: d = d.replace(tzinfo=None) - d.utcoffset() return total_seconds((d - datetime.datetime(1970, 1, 1))) def add_raw_postfix(field, is_five_or_above): if is_five_or_above: end = '.keyword' else: end = '.raw' if not field.endswith(end): field += end return field def remove_raw_postfix(field, is_five_or_above): if is_five_or_above: end = '.keyword' else: end = '.raw' if field.endswith(end): field = field[:len(field) - len(end)] return field def replace_dots_in_field_names(document): """ This method destructively modifies document by replacing any dots in field names with an underscore. """ for key, value in list(document.items()): if isinstance(value, dict): value = replace_dots_in_field_names(value) if isinstance(key, string_types) and key.find('.') != -1: del document[key] document[key.replace('.', '_')] = value return document def elasticsearch_client(conf): """ returns an :class:`ElasticSearchClient` instance configured using an es_conn_config """ es_conn_conf = build_es_conn_config(conf) auth = Auth() es_conn_conf['http_auth'] = auth(host=es_conn_conf['es_host'], username=es_conn_conf['es_username'], password=es_conn_conf['es_password'], aws_region=es_conn_conf['aws_region'], profile_name=es_conn_conf['profile']) return ElasticSearchClient(es_conn_conf) def build_es_conn_config(conf): """ Given a conf dictionary w/ raw config properties 'use_ssl', 'es_host', 'es_port' 'es_username' and 'es_password', this will return a new dictionary with properly initialized values for 'es_host', 'es_port', 'use_ssl' and 'http_auth' which will be a basicauth username:password formatted string """ parsed_conf = {} parsed_conf['use_ssl'] = os.environ.get('ES_USE_SSL', False) parsed_conf['verify_certs'] = True parsed_conf['ca_certs'] = None parsed_conf['client_cert'] = None parsed_conf['client_key'] = None parsed_conf['http_auth'] = None parsed_conf['es_username'] = None parsed_conf['es_password'] = None parsed_conf['aws_region'] = None parsed_conf['profile'] = None parsed_conf['es_host'] = os.environ.get('ES_HOST', conf['es_host']) parsed_conf['es_port'] = int(os.environ.get('ES_PORT', conf['es_port'])) parsed_conf['es_url_prefix'] = '' parsed_conf['es_conn_timeout'] = conf.get('es_conn_timeout', 20) parsed_conf['send_get_body_as'] = conf.get('es_send_get_body_as', 'GET') if os.environ.get('ES_USERNAME'): parsed_conf['es_username'] = os.environ.get('ES_USERNAME') parsed_conf['es_password'] = os.environ.get('ES_PASSWORD') elif 'es_username' in conf: parsed_conf['es_username'] = conf['es_username'] parsed_conf['es_password'] = conf['es_password'] if 'aws_region' in conf: parsed_conf['aws_region'] = conf['aws_region'] # Deprecated if 'boto_profile' in conf: logging.warning('Found deprecated "boto_profile", use "profile" instead!') parsed_conf['profile'] = conf['boto_profile'] if 'profile' in conf: parsed_conf['profile'] = conf['profile'] if 'use_ssl' in conf: parsed_conf['use_ssl'] = conf['use_ssl'] if 'verify_certs' in conf: parsed_conf['verify_certs'] = conf['verify_certs'] if 'ca_certs' in conf: parsed_conf['ca_certs'] = conf['ca_certs'] if 'client_cert' in conf: parsed_conf['client_cert'] = conf['client_cert'] if 'client_key' in conf: parsed_conf['client_key'] = conf['client_key'] if 'es_url_prefix' in conf: parsed_conf['es_url_prefix'] = conf['es_url_prefix'] return parsed_conf def pytzfy(dt): # apscheduler requires pytz timezone objects # This function will replace a dateutil.tz one with a pytz one if dt.tzinfo is not None: new_tz = pytz.timezone(dt.tzinfo.tzname('Y is this even required??')) return dt.replace(tzinfo=new_tz) return dt def parse_duration(value): """Convert ``unit=num`` spec into a ``timedelta`` object.""" unit, num = value.split('=') return datetime.timedelta(**{unit: int(num)}) def parse_deadline(value): """Convert ``unit=num`` spec into a ``datetime`` object.""" duration = parse_duration(value) return ts_now() + duration def flatten_dict(dct, delim='.', prefix=''): ret = {} for key, val in list(dct.items()): if type(val) == dict: ret.update(flatten_dict(val, prefix=prefix + key + delim)) else: ret[prefix + key] = val return ret def resolve_string(string, match, missing_text='<MISSING VALUE>'): """ Given a python string that may contain references to fields on the match dictionary, the strings are replaced using the corresponding values. However, if the referenced field is not found on the dictionary, it is replaced by a default string. Strings can be formatted using the old-style format ('%(field)s') or the new-style format ('{match[field]}'). :param string: A string that may contain references to values of the 'match' dictionary. :param match: A dictionary with the values to replace where referenced by keys in the string. :param missing_text: The default text to replace a formatter with if the field doesnt exist. """ flat_match = flatten_dict(match) flat_match.update(match) dd_match = collections.defaultdict(lambda: missing_text, flat_match) dd_match['_missing_value'] = missing_text while True: try: string = string % dd_match string = string.format(**dd_match) break except KeyError as e: if '{%s}' % str(e).strip("'") not in string: break string = string.replace('{%s}' % str(e).strip("'"), '{_missing_value}') return string def should_scrolling_continue(rule_conf): """ Tells about a rule config if it can scroll still or should stop the scrolling. :param: rule_conf as dict :rtype: bool """ max_scrolling = rule_conf.get('max_scrolling_count') stop_the_scroll = 0 < max_scrolling <= rule_conf.get('scrolling_cycle') return not stop_the_scroll
[]
[]
[ "ES_USE_SSL", "ES_HOST", "ES_USERNAME", "ES_PASSWORD", "ES_PORT" ]
[]
["ES_USE_SSL", "ES_HOST", "ES_USERNAME", "ES_PASSWORD", "ES_PORT"]
python
5
0
utils/authentication.go
package utils import ( "fmt" "os" "time" "github.com/dgrijalva/jwt-go" "github.com/hculpan/lordsofcrime/entity" ) var jwtSecret []byte = []byte{} // Claims defines the keys we want to put // in the JWT token type Claims struct { Username string `json:"username"` Password string `json:"password"` FullName string `json:"fullname"` DisplayName string `json:"displayname"` jwt.StandardClaims } // Authenticate authenticates the user func Authenticate(username, password string) (*entity.User, error) { user := entity.FindUserByUsername(username) if user.ID == 0 { return nil, fmt.Errorf("Invalid username/password") } result := user.VerifyPassword(password) if result == nil { user.LastLogin = time.Now() if err := user.Save(); err != nil { result = err } } return user, result } func getSecretKey() { if len(jwtSecret) == 0 { if os.Getenv("LOC_SECRET_KEY") == "" { panic("LOC_SECRET_KEY is not setup correctly") } jwtSecret = []byte(os.Getenv("LOC_SECRET_KEY")) } } // CreateToken create a jwt token func CreateToken(u entity.User) (string, error) { getSecretKey() expireTime := time.Now().Add(3 * time.Hour) claims := Claims{ Username: u.Username, Password: string(u.Password), FullName: u.FullName, DisplayName: u.DisplayName, StandardClaims: jwt.StandardClaims{ //Expiration time ExpiresAt: expireTime.Unix(), //Designated token publisher Issuer: "lords_of_crime", }, } tokenClaims := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) return tokenClaims.SignedString([]byte(jwtSecret)) } // DecodeToken decodes a JWT token func DecodeToken(t string) (*Claims, error) { getSecretKey() result := &Claims{} tkn, err := jwt.ParseWithClaims(t, result, func(token *jwt.Token) (interface{}, error) { return jwtSecret, nil }) if err != nil { return result, err } if !tkn.Valid { return result, fmt.Errorf("Invalid token") } return result, nil }
[ "\"LOC_SECRET_KEY\"", "\"LOC_SECRET_KEY\"" ]
[]
[ "LOC_SECRET_KEY" ]
[]
["LOC_SECRET_KEY"]
go
1
0
layers/poky/meta/lib/oeqa/selftest/cases/wic.py
#!/usr/bin/env python # ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- # # Copyright (c) 2015, Intel Corporation. # All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # AUTHORS # Ed Bartosh <ed.bartosh@linux.intel.com> """Test cases for wic.""" import os import sys import unittest from glob import glob from shutil import rmtree, copy from functools import wraps, lru_cache from tempfile import NamedTemporaryFile from oeqa.selftest.case import OESelftestTestCase from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_bb_vars, runqemu from oeqa.core.decorator.oeid import OETestID @lru_cache(maxsize=32) def get_host_arch(recipe): """A cached call to get_bb_var('HOST_ARCH', <recipe>)""" return get_bb_var('HOST_ARCH', recipe) def only_for_arch(archs, image='core-image-minimal'): """Decorator for wrapping test cases that can be run only for specific target architectures. A list of compatible architectures is passed in `archs`. Current architecture will be determined by parsing bitbake output for `image` recipe. """ def wrapper(func): @wraps(func) def wrapped_f(*args, **kwargs): arch = get_host_arch(image) if archs and arch not in archs: raise unittest.SkipTest("Testcase arch dependency not met: %s" % arch) return func(*args, **kwargs) wrapped_f.__name__ = func.__name__ return wrapped_f return wrapper class WicTestCase(OESelftestTestCase): """Wic test class.""" image_is_ready = False wicenv_cache = {} def setUpLocal(self): """This code is executed before each test method.""" self.resultdir = self.builddir + "/wic-tmp/" super(WicTestCase, self).setUpLocal() # Do this here instead of in setUpClass as the base setUp does some # clean up which can result in the native tools built earlier in # setUpClass being unavailable. if not WicTestCase.image_is_ready: if get_bb_var('USE_NLS') == 'yes': bitbake('wic-tools') else: self.skipTest('wic-tools cannot be built due its (intltool|gettext)-native dependency and NLS disable') bitbake('core-image-minimal') WicTestCase.image_is_ready = True rmtree(self.resultdir, ignore_errors=True) def tearDownLocal(self): """Remove resultdir as it may contain images.""" rmtree(self.resultdir, ignore_errors=True) super(WicTestCase, self).tearDownLocal() def _get_image_env_path(self, image): """Generate and obtain the path to <image>.env""" if image not in WicTestCase.wicenv_cache: self.assertEqual(0, bitbake('%s -c do_rootfs_wicenv' % image).status) bb_vars = get_bb_vars(['STAGING_DIR', 'MACHINE'], image) stdir = bb_vars['STAGING_DIR'] machine = bb_vars['MACHINE'] WicTestCase.wicenv_cache[image] = os.path.join(stdir, machine, 'imgdata') return WicTestCase.wicenv_cache[image] class Wic(WicTestCase): @OETestID(1552) def test_version(self): """Test wic --version""" runCmd('wic --version') @OETestID(1208) def test_help(self): """Test wic --help and wic -h""" runCmd('wic --help') runCmd('wic -h') @OETestID(1209) def test_createhelp(self): """Test wic create --help""" runCmd('wic create --help') @OETestID(1210) def test_listhelp(self): """Test wic list --help""" runCmd('wic list --help') @OETestID(1553) def test_help_create(self): """Test wic help create""" runCmd('wic help create') @OETestID(1554) def test_help_list(self): """Test wic help list""" runCmd('wic help list') @OETestID(1215) def test_help_overview(self): """Test wic help overview""" runCmd('wic help overview') @OETestID(1216) def test_help_plugins(self): """Test wic help plugins""" runCmd('wic help plugins') @OETestID(1217) def test_help_kickstart(self): """Test wic help kickstart""" runCmd('wic help kickstart') @OETestID(1555) def test_list_images(self): """Test wic list images""" runCmd('wic list images') @OETestID(1556) def test_list_source_plugins(self): """Test wic list source-plugins""" runCmd('wic list source-plugins') @OETestID(1557) def test_listed_images_help(self): """Test wic listed images help""" output = runCmd('wic list images').output imagelist = [line.split()[0] for line in output.splitlines()] for image in imagelist: runCmd('wic list %s help' % image) @OETestID(1213) def test_unsupported_subcommand(self): """Test unsupported subcommand""" self.assertNotEqual(0, runCmd('wic unsupported', ignore_status=True).status) @OETestID(1214) def test_no_command(self): """Test wic without command""" self.assertEqual(1, runCmd('wic', ignore_status=True).status) @OETestID(1211) def test_build_image_name(self): """Test wic create wictestdisk --image-name=core-image-minimal""" cmd = "wic create wictestdisk --image-name=core-image-minimal -o %s" % self.resultdir runCmd(cmd) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct"))) @OETestID(1157) @only_for_arch(['i586', 'i686', 'x86_64']) def test_gpt_image(self): """Test creation of core-image-minimal with gpt table and UUID boot""" cmd = "wic create directdisk-gpt --image-name core-image-minimal -o %s" % self.resultdir runCmd(cmd) self.assertEqual(1, len(glob(self.resultdir + "directdisk-*.direct"))) @OETestID(1346) @only_for_arch(['i586', 'i686', 'x86_64']) def test_iso_image(self): """Test creation of hybrid iso image with legacy and EFI boot""" config = 'INITRAMFS_IMAGE = "core-image-minimal-initramfs"\n'\ 'MACHINE_FEATURES_append = " efi"\n'\ 'DEPENDS_pn-core-image-minimal += "syslinux"\n' self.append_config(config) bitbake('core-image-minimal core-image-minimal-initramfs') self.remove_config(config) cmd = "wic create mkhybridiso --image-name core-image-minimal -o %s" % self.resultdir runCmd(cmd) self.assertEqual(1, len(glob(self.resultdir + "HYBRID_ISO_IMG-*.direct"))) self.assertEqual(1, len(glob(self.resultdir + "HYBRID_ISO_IMG-*.iso"))) @OETestID(1348) @only_for_arch(['i586', 'i686', 'x86_64']) def test_qemux86_directdisk(self): """Test creation of qemux-86-directdisk image""" cmd = "wic create qemux86-directdisk -e core-image-minimal -o %s" % self.resultdir runCmd(cmd) self.assertEqual(1, len(glob(self.resultdir + "qemux86-directdisk-*direct"))) @OETestID(1350) @only_for_arch(['i586', 'i686', 'x86_64']) def test_mkefidisk(self): """Test creation of mkefidisk image""" cmd = "wic create mkefidisk -e core-image-minimal -o %s" % self.resultdir runCmd(cmd) self.assertEqual(1, len(glob(self.resultdir + "mkefidisk-*direct"))) @OETestID(1385) @only_for_arch(['i586', 'i686', 'x86_64']) def test_bootloader_config(self): """Test creation of directdisk-bootloader-config image""" config = 'DEPENDS_pn-core-image-minimal += "syslinux"\n' self.append_config(config) bitbake('core-image-minimal') self.remove_config(config) cmd = "wic create directdisk-bootloader-config -e core-image-minimal -o %s" % self.resultdir runCmd(cmd) self.assertEqual(1, len(glob(self.resultdir + "directdisk-bootloader-config-*direct"))) @OETestID(1560) @only_for_arch(['i586', 'i686', 'x86_64']) def test_systemd_bootdisk(self): """Test creation of systemd-bootdisk image""" config = 'MACHINE_FEATURES_append = " efi"\n' self.append_config(config) bitbake('core-image-minimal') self.remove_config(config) cmd = "wic create systemd-bootdisk -e core-image-minimal -o %s" % self.resultdir runCmd(cmd) self.assertEqual(1, len(glob(self.resultdir + "systemd-bootdisk-*direct"))) @OETestID(1561) def test_sdimage_bootpart(self): """Test creation of sdimage-bootpart image""" cmd = "wic create sdimage-bootpart -e core-image-minimal -o %s" % self.resultdir kimgtype = get_bb_var('KERNEL_IMAGETYPE', 'core-image-minimal') self.write_config('IMAGE_BOOT_FILES = "%s"\n' % kimgtype) runCmd(cmd) self.assertEqual(1, len(glob(self.resultdir + "sdimage-bootpart-*direct"))) @OETestID(1562) @only_for_arch(['i586', 'i686', 'x86_64']) def test_default_output_dir(self): """Test default output location""" for fname in glob("directdisk-*.direct"): os.remove(fname) config = 'DEPENDS_pn-core-image-minimal += "syslinux"\n' self.append_config(config) bitbake('core-image-minimal') self.remove_config(config) cmd = "wic create directdisk -e core-image-minimal" runCmd(cmd) self.assertEqual(1, len(glob("directdisk-*.direct"))) @OETestID(1212) @only_for_arch(['i586', 'i686', 'x86_64']) def test_build_artifacts(self): """Test wic create directdisk providing all artifacts.""" bb_vars = get_bb_vars(['STAGING_DATADIR', 'RECIPE_SYSROOT_NATIVE'], 'wic-tools') bb_vars.update(get_bb_vars(['DEPLOY_DIR_IMAGE', 'IMAGE_ROOTFS'], 'core-image-minimal')) bbvars = {key.lower(): value for key, value in bb_vars.items()} bbvars['resultdir'] = self.resultdir runCmd("wic create directdisk " "-b %(staging_datadir)s " "-k %(deploy_dir_image)s " "-n %(recipe_sysroot_native)s " "-r %(image_rootfs)s " "-o %(resultdir)s" % bbvars) self.assertEqual(1, len(glob(self.resultdir + "directdisk-*.direct"))) @OETestID(1264) def test_compress_gzip(self): """Test compressing an image with gzip""" runCmd("wic create wictestdisk " "--image-name core-image-minimal " "-c gzip -o %s" % self.resultdir) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct.gz"))) @OETestID(1265) def test_compress_bzip2(self): """Test compressing an image with bzip2""" runCmd("wic create wictestdisk " "--image-name=core-image-minimal " "-c bzip2 -o %s" % self.resultdir) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct.bz2"))) @OETestID(1266) def test_compress_xz(self): """Test compressing an image with xz""" runCmd("wic create wictestdisk " "--image-name=core-image-minimal " "--compress-with=xz -o %s" % self.resultdir) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct.xz"))) @OETestID(1267) def test_wrong_compressor(self): """Test how wic breaks if wrong compressor is provided""" self.assertEqual(2, runCmd("wic create wictestdisk " "--image-name=core-image-minimal " "-c wrong -o %s" % self.resultdir, ignore_status=True).status) @OETestID(1558) def test_debug_short(self): """Test -D option""" runCmd("wic create wictestdisk " "--image-name=core-image-minimal " "-D -o %s" % self.resultdir) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct"))) @OETestID(1658) def test_debug_long(self): """Test --debug option""" runCmd("wic create wictestdisk " "--image-name=core-image-minimal " "--debug -o %s" % self.resultdir) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct"))) @OETestID(1563) def test_skip_build_check_short(self): """Test -s option""" runCmd("wic create wictestdisk " "--image-name=core-image-minimal " "-s -o %s" % self.resultdir) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct"))) @OETestID(1671) def test_skip_build_check_long(self): """Test --skip-build-check option""" runCmd("wic create wictestdisk " "--image-name=core-image-minimal " "--skip-build-check " "--outdir %s" % self.resultdir) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct"))) @OETestID(1564) def test_build_rootfs_short(self): """Test -f option""" runCmd("wic create wictestdisk " "--image-name=core-image-minimal " "-f -o %s" % self.resultdir) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct"))) @OETestID(1656) def test_build_rootfs_long(self): """Test --build-rootfs option""" runCmd("wic create wictestdisk " "--image-name=core-image-minimal " "--build-rootfs " "--outdir %s" % self.resultdir) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct"))) @OETestID(1268) @only_for_arch(['i586', 'i686', 'x86_64']) def test_rootfs_indirect_recipes(self): """Test usage of rootfs plugin with rootfs recipes""" runCmd("wic create directdisk-multi-rootfs " "--image-name=core-image-minimal " "--rootfs rootfs1=core-image-minimal " "--rootfs rootfs2=core-image-minimal " "--outdir %s" % self.resultdir) self.assertEqual(1, len(glob(self.resultdir + "directdisk-multi-rootfs*.direct"))) @OETestID(1269) @only_for_arch(['i586', 'i686', 'x86_64']) def test_rootfs_artifacts(self): """Test usage of rootfs plugin with rootfs paths""" bb_vars = get_bb_vars(['STAGING_DATADIR', 'RECIPE_SYSROOT_NATIVE'], 'wic-tools') bb_vars.update(get_bb_vars(['DEPLOY_DIR_IMAGE', 'IMAGE_ROOTFS'], 'core-image-minimal')) bbvars = {key.lower(): value for key, value in bb_vars.items()} bbvars['wks'] = "directdisk-multi-rootfs" bbvars['resultdir'] = self.resultdir runCmd("wic create %(wks)s " "--bootimg-dir=%(staging_datadir)s " "--kernel-dir=%(deploy_dir_image)s " "--native-sysroot=%(recipe_sysroot_native)s " "--rootfs-dir rootfs1=%(image_rootfs)s " "--rootfs-dir rootfs2=%(image_rootfs)s " "--outdir %(resultdir)s" % bbvars) self.assertEqual(1, len(glob(self.resultdir + "%(wks)s-*.direct" % bbvars))) @OETestID(1661) def test_exclude_path(self): """Test --exclude-path wks option.""" oldpath = os.environ['PATH'] os.environ['PATH'] = get_bb_var("PATH", "wic-tools") try: wks_file = 'temp.wks' with open(wks_file, 'w') as wks: rootfs_dir = get_bb_var('IMAGE_ROOTFS', 'core-image-minimal') wks.write(""" part / --source rootfs --ondisk mmcblk0 --fstype=ext4 --exclude-path usr part /usr --source rootfs --ondisk mmcblk0 --fstype=ext4 --rootfs-dir %s/usr part /etc --source rootfs --ondisk mmcblk0 --fstype=ext4 --exclude-path bin/ --rootfs-dir %s/usr""" % (rootfs_dir, rootfs_dir)) runCmd("wic create %s -e core-image-minimal -o %s" \ % (wks_file, self.resultdir)) os.remove(wks_file) wicout = glob(self.resultdir + "%s-*direct" % 'temp') self.assertEqual(1, len(wicout)) wicimg = wicout[0] # verify partition size with wic res = runCmd("parted -m %s unit b p 2>/dev/null" % wicimg) # parse parted output which looks like this: # BYT;\n # /var/tmp/wic/build/tmpfwvjjkf_-201611101222-hda.direct:200MiB:file:512:512:msdos::;\n # 1:0.00MiB:200MiB:200MiB:ext4::;\n partlns = res.output.splitlines()[2:] self.assertEqual(3, len(partlns)) for part in [1, 2, 3]: part_file = os.path.join(self.resultdir, "selftest_img.part%d" % part) partln = partlns[part-1].split(":") self.assertEqual(7, len(partln)) start = int(partln[1].rstrip("B")) / 512 length = int(partln[3].rstrip("B")) / 512 runCmd("dd if=%s of=%s skip=%d count=%d" % (wicimg, part_file, start, length)) def extract_files(debugfs_output): """ extract file names from the output of debugfs -R 'ls -p', which looks like this: /2/040755/0/0/.//\n /2/040755/0/0/..//\n /11/040700/0/0/lost+found^M//\n /12/040755/1002/1002/run//\n /13/040755/1002/1002/sys//\n /14/040755/1002/1002/bin//\n /80/040755/1002/1002/var//\n /92/040755/1002/1002/tmp//\n """ # NOTE the occasional ^M in file names return [line.split('/')[5].strip() for line in \ debugfs_output.strip().split('/\n')] # Test partition 1, should contain the normal root directories, except # /usr. res = runCmd("debugfs -R 'ls -p' %s 2>/dev/null" % \ os.path.join(self.resultdir, "selftest_img.part1")) files = extract_files(res.output) self.assertIn("etc", files) self.assertNotIn("usr", files) # Partition 2, should contain common directories for /usr, not root # directories. res = runCmd("debugfs -R 'ls -p' %s 2>/dev/null" % \ os.path.join(self.resultdir, "selftest_img.part2")) files = extract_files(res.output) self.assertNotIn("etc", files) self.assertNotIn("usr", files) self.assertIn("share", files) # Partition 3, should contain the same as partition 2, including the bin # directory, but not the files inside it. res = runCmd("debugfs -R 'ls -p' %s 2>/dev/null" % \ os.path.join(self.resultdir, "selftest_img.part3")) files = extract_files(res.output) self.assertNotIn("etc", files) self.assertNotIn("usr", files) self.assertIn("share", files) self.assertIn("bin", files) res = runCmd("debugfs -R 'ls -p bin' %s 2>/dev/null" % \ os.path.join(self.resultdir, "selftest_img.part3")) files = extract_files(res.output) self.assertIn(".", files) self.assertIn("..", files) self.assertEqual(2, len(files)) for part in [1, 2, 3]: part_file = os.path.join(self.resultdir, "selftest_img.part%d" % part) os.remove(part_file) finally: os.environ['PATH'] = oldpath @OETestID(1662) def test_exclude_path_errors(self): """Test --exclude-path wks option error handling.""" wks_file = 'temp.wks' # Absolute argument. with open(wks_file, 'w') as wks: wks.write("part / --source rootfs --ondisk mmcblk0 --fstype=ext4 --exclude-path /usr") self.assertNotEqual(0, runCmd("wic create %s -e core-image-minimal -o %s" \ % (wks_file, self.resultdir), ignore_status=True).status) os.remove(wks_file) # Argument pointing to parent directory. with open(wks_file, 'w') as wks: wks.write("part / --source rootfs --ondisk mmcblk0 --fstype=ext4 --exclude-path ././..") self.assertNotEqual(0, runCmd("wic create %s -e core-image-minimal -o %s" \ % (wks_file, self.resultdir), ignore_status=True).status) os.remove(wks_file) class Wic2(WicTestCase): @OETestID(1496) def test_bmap_short(self): """Test generation of .bmap file -m option""" cmd = "wic create wictestdisk -e core-image-minimal -m -o %s" % self.resultdir runCmd(cmd) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*direct"))) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*direct.bmap"))) @OETestID(1655) def test_bmap_long(self): """Test generation of .bmap file --bmap option""" cmd = "wic create wictestdisk -e core-image-minimal --bmap -o %s" % self.resultdir runCmd(cmd) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*direct"))) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*direct.bmap"))) @OETestID(1347) def test_image_env(self): """Test generation of <image>.env files.""" image = 'core-image-minimal' imgdatadir = self._get_image_env_path(image) bb_vars = get_bb_vars(['IMAGE_BASENAME', 'WICVARS'], image) basename = bb_vars['IMAGE_BASENAME'] self.assertEqual(basename, image) path = os.path.join(imgdatadir, basename) + '.env' self.assertTrue(os.path.isfile(path)) wicvars = set(bb_vars['WICVARS'].split()) # filter out optional variables wicvars = wicvars.difference(('DEPLOY_DIR_IMAGE', 'IMAGE_BOOT_FILES', 'INITRD', 'INITRD_LIVE', 'ISODIR')) with open(path) as envfile: content = dict(line.split("=", 1) for line in envfile) # test if variables used by wic present in the .env file for var in wicvars: self.assertTrue(var in content, "%s is not in .env file" % var) self.assertTrue(content[var]) @OETestID(1559) def test_image_vars_dir_short(self): """Test image vars directory selection -v option""" image = 'core-image-minimal' imgenvdir = self._get_image_env_path(image) native_sysroot = get_bb_var("RECIPE_SYSROOT_NATIVE", "wic-tools") runCmd("wic create wictestdisk " "--image-name=%s -v %s -n %s -o %s" % (image, imgenvdir, native_sysroot, self.resultdir)) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*direct"))) @OETestID(1665) def test_image_vars_dir_long(self): """Test image vars directory selection --vars option""" image = 'core-image-minimal' imgenvdir = self._get_image_env_path(image) native_sysroot = get_bb_var("RECIPE_SYSROOT_NATIVE", "wic-tools") runCmd("wic create wictestdisk " "--image-name=%s " "--vars %s " "--native-sysroot %s " "--outdir %s" % (image, imgenvdir, native_sysroot, self.resultdir)) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*direct"))) @OETestID(1351) @only_for_arch(['i586', 'i686', 'x86_64']) def test_wic_image_type(self): """Test building wic images by bitbake""" config = 'IMAGE_FSTYPES += "wic"\nWKS_FILE = "wic-image-minimal"\n'\ 'MACHINE_FEATURES_append = " efi"\n' self.append_config(config) self.assertEqual(0, bitbake('wic-image-minimal').status) self.remove_config(config) bb_vars = get_bb_vars(['DEPLOY_DIR_IMAGE', 'MACHINE']) deploy_dir = bb_vars['DEPLOY_DIR_IMAGE'] machine = bb_vars['MACHINE'] prefix = os.path.join(deploy_dir, 'wic-image-minimal-%s.' % machine) # check if we have result image and manifests symlinks # pointing to existing files for suffix in ('wic', 'manifest'): path = prefix + suffix self.assertTrue(os.path.islink(path)) self.assertTrue(os.path.isfile(os.path.realpath(path))) @OETestID(1424) @only_for_arch(['i586', 'i686', 'x86_64']) def test_qemu(self): """Test wic-image-minimal under qemu""" config = 'IMAGE_FSTYPES += "wic"\nWKS_FILE = "wic-image-minimal"\n'\ 'MACHINE_FEATURES_append = " efi"\n' self.append_config(config) self.assertEqual(0, bitbake('wic-image-minimal').status) self.remove_config(config) with runqemu('wic-image-minimal', ssh=False) as qemu: cmd = "mount | grep '^/dev/' | cut -f1,3 -d ' ' | egrep -c -e '/dev/sda1 /boot' " \ "-e '/dev/root /|/dev/sda2 /' -e '/dev/sda3 /media' -e '/dev/sda4 /mnt'" status, output = qemu.run_serial(cmd) self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output)) self.assertEqual(output, '4') cmd = "grep UUID= /etc/fstab" status, output = qemu.run_serial(cmd) self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output)) self.assertEqual(output, 'UUID=2c71ef06-a81d-4735-9d3a-379b69c6bdba\t/media\text4\tdefaults\t0\t0') @only_for_arch(['i586', 'i686', 'x86_64']) @OETestID(1852) def test_qemu_efi(self): """Test core-image-minimal efi image under qemu""" config = 'IMAGE_FSTYPES = "wic"\nWKS_FILE = "mkefidisk.wks"\n' self.append_config(config) self.assertEqual(0, bitbake('core-image-minimal ovmf').status) self.remove_config(config) with runqemu('core-image-minimal', ssh=False, runqemuparams='ovmf', image_fstype='wic') as qemu: cmd = "grep sda. /proc/partitions |wc -l" status, output = qemu.run_serial(cmd) self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output)) self.assertEqual(output, '3') @staticmethod def _make_fixed_size_wks(size): """ Create a wks of an image with a single partition. Size of the partition is set using --fixed-size flag. Returns a tuple: (path to wks file, wks image name) """ with NamedTemporaryFile("w", suffix=".wks", delete=False) as tempf: wkspath = tempf.name tempf.write("part " \ "--source rootfs --ondisk hda --align 4 --fixed-size %d " "--fstype=ext4\n" % size) wksname = os.path.splitext(os.path.basename(wkspath))[0] return wkspath, wksname @OETestID(1847) def test_fixed_size(self): """ Test creation of a simple image with partition size controlled through --fixed-size flag """ wkspath, wksname = Wic2._make_fixed_size_wks(200) runCmd("wic create %s -e core-image-minimal -o %s" \ % (wkspath, self.resultdir)) os.remove(wkspath) wicout = glob(self.resultdir + "%s-*direct" % wksname) self.assertEqual(1, len(wicout)) wicimg = wicout[0] native_sysroot = get_bb_var("RECIPE_SYSROOT_NATIVE", "wic-tools") # verify partition size with wic res = runCmd("parted -m %s unit mib p 2>/dev/null" % wicimg, native_sysroot=native_sysroot) # parse parted output which looks like this: # BYT;\n # /var/tmp/wic/build/tmpfwvjjkf_-201611101222-hda.direct:200MiB:file:512:512:msdos::;\n # 1:0.00MiB:200MiB:200MiB:ext4::;\n partlns = res.output.splitlines()[2:] self.assertEqual(1, len(partlns)) self.assertEqual("1:0.00MiB:200MiB:200MiB:ext4::;", partlns[0]) @OETestID(1848) def test_fixed_size_error(self): """ Test creation of a simple image with partition size controlled through --fixed-size flag. The size of partition is intentionally set to 1MiB in order to trigger an error in wic. """ wkspath, wksname = Wic2._make_fixed_size_wks(1) self.assertEqual(1, runCmd("wic create %s -e core-image-minimal -o %s" \ % (wkspath, self.resultdir), ignore_status=True).status) os.remove(wkspath) wicout = glob(self.resultdir + "%s-*direct" % wksname) self.assertEqual(0, len(wicout)) @only_for_arch(['i586', 'i686', 'x86_64']) @OETestID(1854) def test_rawcopy_plugin_qemu(self): """Test rawcopy plugin in qemu""" # build ext4 and wic images for fstype in ("ext4", "wic"): config = 'IMAGE_FSTYPES = "%s"\nWKS_FILE = "test_rawcopy_plugin.wks.in"\n' % fstype self.append_config(config) self.assertEqual(0, bitbake('core-image-minimal').status) self.remove_config(config) with runqemu('core-image-minimal', ssh=False, image_fstype='wic') as qemu: cmd = "grep sda. /proc/partitions |wc -l" status, output = qemu.run_serial(cmd) self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output)) self.assertEqual(output, '2') @OETestID(1853) def test_rawcopy_plugin(self): """Test rawcopy plugin""" img = 'core-image-minimal' machine = get_bb_var('MACHINE', img) with NamedTemporaryFile("w", suffix=".wks") as wks: wks.writelines(['part /boot --active --source bootimg-pcbios\n', 'part / --source rawcopy --sourceparams="file=%s-%s.ext4" --use-uuid\n'\ % (img, machine), 'bootloader --timeout=0 --append="console=ttyS0,115200n8"\n']) wks.flush() cmd = "wic create %s -e %s -o %s" % (wks.name, img, self.resultdir) runCmd(cmd) wksname = os.path.splitext(os.path.basename(wks.name))[0] out = glob(self.resultdir + "%s-*direct" % wksname) self.assertEqual(1, len(out)) @OETestID(1849) def test_fs_types(self): """Test filesystem types for empty and not empty partitions""" img = 'core-image-minimal' with NamedTemporaryFile("w", suffix=".wks") as wks: wks.writelines(['part ext2 --fstype ext2 --source rootfs\n', 'part btrfs --fstype btrfs --source rootfs --size 40M\n', 'part squash --fstype squashfs --source rootfs\n', 'part swap --fstype swap --size 1M\n', 'part emptyvfat --fstype vfat --size 1M\n', 'part emptymsdos --fstype msdos --size 1M\n', 'part emptyext2 --fstype ext2 --size 1M\n', 'part emptybtrfs --fstype btrfs --size 150M\n']) wks.flush() cmd = "wic create %s -e %s -o %s" % (wks.name, img, self.resultdir) runCmd(cmd) wksname = os.path.splitext(os.path.basename(wks.name))[0] out = glob(self.resultdir + "%s-*direct" % wksname) self.assertEqual(1, len(out)) @OETestID(1851) def test_kickstart_parser(self): """Test wks parser options""" with NamedTemporaryFile("w", suffix=".wks") as wks: wks.writelines(['part / --fstype ext3 --source rootfs --system-id 0xFF '\ '--overhead-factor 1.2 --size 100k\n']) wks.flush() cmd = "wic create %s -e core-image-minimal -o %s" % (wks.name, self.resultdir) runCmd(cmd) wksname = os.path.splitext(os.path.basename(wks.name))[0] out = glob(self.resultdir + "%s-*direct" % wksname) self.assertEqual(1, len(out)) @OETestID(1850) def test_image_bootpart_globbed(self): """Test globbed sources with image-bootpart plugin""" img = "core-image-minimal" cmd = "wic create sdimage-bootpart -e %s -o %s" % (img, self.resultdir) config = 'IMAGE_BOOT_FILES = "%s*"' % get_bb_var('KERNEL_IMAGETYPE', img) self.append_config(config) runCmd(cmd) self.remove_config(config) self.assertEqual(1, len(glob(self.resultdir + "sdimage-bootpart-*direct"))) @OETestID(1855) def test_sparse_copy(self): """Test sparse_copy with FIEMAP and SEEK_HOLE filemap APIs""" libpath = os.path.join(get_bb_var('COREBASE'), 'scripts', 'lib', 'wic') sys.path.insert(0, libpath) from filemap import FilemapFiemap, FilemapSeek, sparse_copy, ErrorNotSupp with NamedTemporaryFile("w", suffix=".wic-sparse") as sparse: src_name = sparse.name src_size = 1024 * 10 sparse.truncate(src_size) # write one byte to the file with open(src_name, 'r+b') as sfile: sfile.seek(1024 * 4) sfile.write(b'\x00') dest = sparse.name + '.out' # copy src file to dest using different filemap APIs for api in (FilemapFiemap, FilemapSeek, None): if os.path.exists(dest): os.unlink(dest) try: sparse_copy(sparse.name, dest, api=api) except ErrorNotSupp: continue # skip unsupported API dest_stat = os.stat(dest) self.assertEqual(dest_stat.st_size, src_size) # 8 blocks is 4K (physical sector size) self.assertEqual(dest_stat.st_blocks, 8) os.unlink(dest) @OETestID(1857) def test_wic_ls(self): """Test listing image content using 'wic ls'""" runCmd("wic create wictestdisk " "--image-name=core-image-minimal " "-D -o %s" % self.resultdir) images = glob(self.resultdir + "wictestdisk-*.direct") self.assertEqual(1, len(images)) sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools') # list partitions result = runCmd("wic ls %s -n %s" % (images[0], sysroot)) self.assertEqual(3, len(result.output.split('\n'))) # list directory content of the first partition result = runCmd("wic ls %s:1/ -n %s" % (images[0], sysroot)) self.assertEqual(6, len(result.output.split('\n'))) @OETestID(1856) def test_wic_cp(self): """Test copy files and directories to the the wic image.""" runCmd("wic create wictestdisk " "--image-name=core-image-minimal " "-D -o %s" % self.resultdir) images = glob(self.resultdir + "wictestdisk-*.direct") self.assertEqual(1, len(images)) sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools') # list directory content of the first partition result = runCmd("wic ls %s:1/ -n %s" % (images[0], sysroot)) self.assertEqual(6, len(result.output.split('\n'))) with NamedTemporaryFile("w", suffix=".wic-cp") as testfile: testfile.write("test") # copy file to the partition runCmd("wic cp %s %s:1/ -n %s" % (testfile.name, images[0], sysroot)) # check if file is there result = runCmd("wic ls %s:1/ -n %s" % (images[0], sysroot)) self.assertEqual(7, len(result.output.split('\n'))) self.assertTrue(os.path.basename(testfile.name) in result.output) # prepare directory testdir = os.path.join(self.resultdir, 'wic-test-cp-dir') testsubdir = os.path.join(testdir, 'subdir') os.makedirs(os.path.join(testsubdir)) copy(testfile.name, testdir) # copy directory to the partition runCmd("wic cp %s %s:1/ -n %s" % (testdir, images[0], sysroot)) # check if directory is there result = runCmd("wic ls %s:1/ -n %s" % (images[0], sysroot)) self.assertEqual(8, len(result.output.split('\n'))) self.assertTrue(os.path.basename(testdir) in result.output) @OETestID(1858) def test_wic_rm(self): """Test removing files and directories from the the wic image.""" runCmd("wic create mkefidisk " "--image-name=core-image-minimal " "-D -o %s" % self.resultdir) images = glob(self.resultdir + "mkefidisk-*.direct") self.assertEqual(1, len(images)) sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools') # list directory content of the first partition result = runCmd("wic ls %s:1 -n %s" % (images[0], sysroot)) self.assertIn('\nBZIMAGE ', result.output) self.assertIn('\nEFI <DIR> ', result.output) # remove file runCmd("wic rm %s:1/bzimage -n %s" % (images[0], sysroot)) # remove directory runCmd("wic rm %s:1/efi -n %s" % (images[0], sysroot)) # check if they're removed result = runCmd("wic ls %s:1 -n %s" % (images[0], sysroot)) self.assertNotIn('\nBZIMAGE ', result.output) self.assertNotIn('\nEFI <DIR> ', result.output) @OETestID(1922) def test_mkfs_extraopts(self): """Test wks option --mkfs-extraopts for empty and not empty partitions""" img = 'core-image-minimal' with NamedTemporaryFile("w", suffix=".wks") as wks: wks.writelines( ['part ext2 --fstype ext2 --source rootfs --mkfs-extraopts "-D -F -i 8192"\n', "part btrfs --fstype btrfs --source rootfs --size 40M --mkfs-extraopts='--quiet'\n", 'part squash --fstype squashfs --source rootfs --mkfs-extraopts "-no-sparse -b 4096"\n', 'part emptyvfat --fstype vfat --size 1M --mkfs-extraopts "-S 1024 -s 64"\n', 'part emptymsdos --fstype msdos --size 1M --mkfs-extraopts "-S 1024 -s 64"\n', 'part emptyext2 --fstype ext2 --size 1M --mkfs-extraopts "-D -F -i 8192"\n', 'part emptybtrfs --fstype btrfs --size 100M --mkfs-extraopts "--mixed -K"\n']) wks.flush() cmd = "wic create %s -e %s -o %s" % (wks.name, img, self.resultdir) runCmd(cmd) wksname = os.path.splitext(os.path.basename(wks.name))[0] out = glob(self.resultdir + "%s-*direct" % wksname) self.assertEqual(1, len(out)) def test_expand_mbr_image(self): """Test wic write --expand command for mbr image""" # build an image config = 'IMAGE_FSTYPES = "wic"\nWKS_FILE = "directdisk.wks"\n' self.append_config(config) self.assertEqual(0, bitbake('core-image-minimal').status) # get path to the image bb_vars = get_bb_vars(['DEPLOY_DIR_IMAGE', 'MACHINE']) deploy_dir = bb_vars['DEPLOY_DIR_IMAGE'] machine = bb_vars['MACHINE'] image_path = os.path.join(deploy_dir, 'core-image-minimal-%s.wic' % machine) self.remove_config(config) try: # expand image to 1G new_image_path = None with NamedTemporaryFile(mode='wb', suffix='.wic.exp', dir=deploy_dir, delete=False) as sparse: sparse.truncate(1024 ** 3) new_image_path = sparse.name sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools') cmd = "wic write -n %s --expand 1:0 %s %s" % (sysroot, image_path, new_image_path) runCmd(cmd) # check if partitions are expanded orig = runCmd("wic ls %s -n %s" % (image_path, sysroot)) exp = runCmd("wic ls %s -n %s" % (new_image_path, sysroot)) orig_sizes = [int(line.split()[3]) for line in orig.output.split('\n')[1:]] exp_sizes = [int(line.split()[3]) for line in exp.output.split('\n')[1:]] self.assertEqual(orig_sizes[0], exp_sizes[0]) # first partition is not resized self.assertTrue(orig_sizes[1] < exp_sizes[1]) # Check if all free space is partitioned result = runCmd("%s/usr/sbin/sfdisk -F %s" % (sysroot, new_image_path)) self.assertTrue("0 B, 0 bytes, 0 sectors" in result.output) os.rename(image_path, image_path + '.bak') os.rename(new_image_path, image_path) # Check if it boots in qemu with runqemu('core-image-minimal', ssh=False) as qemu: cmd = "ls /etc/" status, output = qemu.run_serial('true') self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output)) finally: if os.path.exists(new_image_path): os.unlink(new_image_path) if os.path.exists(image_path + '.bak'): os.rename(image_path + '.bak', image_path) def test_wic_ls_ext(self): """Test listing content of the ext partition using 'wic ls'""" runCmd("wic create wictestdisk " "--image-name=core-image-minimal " "-D -o %s" % self.resultdir) images = glob(self.resultdir + "wictestdisk-*.direct") self.assertEqual(1, len(images)) sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools') # list directory content of the second ext4 partition result = runCmd("wic ls %s:2/ -n %s" % (images[0], sysroot)) self.assertTrue(set(['bin', 'home', 'proc', 'usr', 'var', 'dev', 'lib', 'sbin']).issubset( set(line.split()[-1] for line in result.output.split('\n') if line))) def test_wic_cp_ext(self): """Test copy files and directories to the ext partition.""" runCmd("wic create wictestdisk " "--image-name=core-image-minimal " "-D -o %s" % self.resultdir) images = glob(self.resultdir + "wictestdisk-*.direct") self.assertEqual(1, len(images)) sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools') # list directory content of the ext4 partition result = runCmd("wic ls %s:2/ -n %s" % (images[0], sysroot)) dirs = set(line.split()[-1] for line in result.output.split('\n') if line) self.assertTrue(set(['bin', 'home', 'proc', 'usr', 'var', 'dev', 'lib', 'sbin']).issubset(dirs)) with NamedTemporaryFile("w", suffix=".wic-cp") as testfile: testfile.write("test") # copy file to the partition runCmd("wic cp %s %s:2/ -n %s" % (testfile.name, images[0], sysroot)) # check if file is there result = runCmd("wic ls %s:2/ -n %s" % (images[0], sysroot)) newdirs = set(line.split()[-1] for line in result.output.split('\n') if line) self.assertEqual(newdirs.difference(dirs), set([os.path.basename(testfile.name)])) def test_wic_rm_ext(self): """Test removing files from the ext partition.""" runCmd("wic create mkefidisk " "--image-name=core-image-minimal " "-D -o %s" % self.resultdir) images = glob(self.resultdir + "mkefidisk-*.direct") self.assertEqual(1, len(images)) sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools') # list directory content of the /etc directory on ext4 partition result = runCmd("wic ls %s:2/etc/ -n %s" % (images[0], sysroot)) self.assertTrue('fstab' in [line.split()[-1] for line in result.output.split('\n') if line]) # remove file runCmd("wic rm %s:2/etc/fstab -n %s" % (images[0], sysroot)) # check if it's removed result = runCmd("wic ls %s:2/etc/ -n %s" % (images[0], sysroot)) self.assertTrue('fstab' not in [line.split()[-1] for line in result.output.split('\n') if line])
[]
[]
[ "PATH" ]
[]
["PATH"]
python
1
0
utils/config.go
package utils import ( "encoding/json" "flag" "fmt" "io/ioutil" "os" "strconv" ) type TCPConfig struct { IP string Port int CertPath string KeyPath string BufSize uint32 } type DBConfig struct { IP string Port int DbName string User string Password string } var Config struct { DB DBConfig TCP TCPConfig IP string Port int ServerId int LogsDir string SrcDir string PublicUrl string } var OauthConfig struct { Google struct { ClientID string ClientSecret string } } var ServerType = "lb" func LoadConfig(configPath string) { configFile, err := ioutil.ReadFile(configPath + "/config.json") if err != nil { LogCritical(fmt.Errorf("No config file was supplied"), "read config file") } json.Unmarshal(configFile, &Config) if err != nil { LogCritical(fmt.Errorf("Wrong config file format"), "unmarshal config file") } if ip := os.Getenv("IP"); ip != "" { Config.IP = ip } if port := os.Getenv("PORT"); port != "" { Config.Port, _ = strconv.Atoi(port) } flag.Usage = func() { fmt.Fprintf(flag.CommandLine.Output(), "Usage:\n %v [options]\n\nParameters:\n\n", os.Args[0]) flag.PrintDefaults() } flag.StringVar(&Config.TCP.IP, "tcp-ip", Config.TCP.IP, "tcp server ip") flag.IntVar(&Config.TCP.Port, "tcp-port", Config.TCP.Port, "tcp server port") flag.StringVar(&Config.TCP.CertPath, "tcp-cert", Config.TCP.CertPath, "tcp server cert path") flag.StringVar(&Config.TCP.KeyPath, "tcp-key", Config.TCP.KeyPath, "tcp server key path") flag.StringVar(&Config.DB.IP, "db-ip", Config.DB.IP, "database server ip") flag.IntVar(&Config.DB.Port, "db-port", Config.DB.Port, "database server port") flag.StringVar(&Config.DB.DbName, "db-name", Config.DB.DbName, "database name") flag.StringVar(&Config.DB.User, "db-user", Config.DB.User, "database username") flag.StringVar(&Config.DB.Password, "db-pwd", Config.DB.Password, "database password") flag.StringVar(&Config.IP, "ip", Config.IP, "http server ip") flag.IntVar(&Config.Port, "port", Config.Port, "http server port") flag.IntVar(&Config.ServerId, "id", Config.ServerId, "tcp server id") flag.StringVar(&Config.LogsDir, "logs-dir", Config.LogsDir, "logs directory") flag.StringVar(&Config.SrcDir, "files-dir", Config.SrcDir, "directory to store files(for file servers only)") flag.StringVar(&Config.PublicUrl, "public-url", Config.PublicUrl, "main http server public domain name") flag.StringVar(&ServerType, "server-type", ServerType, "server type(lb, http, api, file)") flag.Parse() configFile, err = ioutil.ReadFile(configPath + "/oauth.json") if err != nil { LogCritical(fmt.Errorf("No OAuth config file was supplied"), "read OAuth config file") } json.Unmarshal(configFile, &OauthConfig) if err != nil { LogCritical(fmt.Errorf("Wrong OAuth config file format"), "unmarshal OAuth config file") } }
[ "\"IP\"", "\"PORT\"" ]
[]
[ "PORT", "IP" ]
[]
["PORT", "IP"]
go
2
0
libcore/luni/src/test/java/org/apache/harmony/luni/tests/java/lang/RuntimeTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.harmony.luni.tests.java.lang; import dalvik.annotation.KnownFailure; import dalvik.annotation.TestLevel; import dalvik.annotation.TestTargetNew; import dalvik.annotation.TestTargetClass; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.io.UnsupportedEncodingException; import java.security.Permission; import java.util.Arrays; import java.util.Vector; import tests.support.resource.Support_Resources; import tests.util.TestEnvironment; @TestTargetClass(Runtime.class) public class RuntimeTest extends junit.framework.TestCase { Runtime r = Runtime.getRuntime(); InputStream is; String s; static boolean flag = false; static boolean ranFinalize = false; int statusCode = -1; class HasFinalizer { String internalString; HasFinalizer(String s) { internalString = s; } @Override protected void finalize() { internalString = "hit"; } } @Override protected void finalize() { if (flag) ranFinalize = true; } protected RuntimeTest createInstance() { return new RuntimeTest("FT"); } @Override protected void tearDown() throws Exception { TestEnvironment.reset(); super.tearDown(); } /** * @tests java.lang.Runtime#exec(java.lang.String) */ @TestTargetNew( level = TestLevel.ADDITIONAL, notes = "", method = "exec", args = {java.lang.String.class} ) public void test_exec() { /* successful exec's are tested by java.lang.Process */ try { Runtime.getRuntime().exec("AnInexistentProgram"); fail("failed to throw IOException when exec'ed inexistent program"); } catch (IOException e) { /* expected */ } } /** * @tests java.lang.Runtime#freeMemory() */ @TestTargetNew( level = TestLevel.COMPLETE, notes = "", method = "freeMemory", args = {} ) public void test_freeMemory() { // Heap might grow or do GC at any time, // so we can't really test a lot. Hence // we are just doing some basic sanity // checks here. assertTrue("must have some free memory", r.freeMemory() > 0); assertTrue("must not exceed total memory", r.freeMemory() < r.totalMemory()); long before = r.totalMemory() - r.freeMemory(); Vector<byte[]> v = new Vector<byte[]>(); for (int i = 1; i < 10; i++) { v.addElement(new byte[10000]); } long after = r.totalMemory() - r.freeMemory(); assertTrue("free memory must change with allocations", after != before); } /** * @tests java.lang.Runtime#gc() */ @TestTargetNew( level = TestLevel.COMPLETE, notes = "", method = "gc", args = {} ) public void test_gc() { // Test for method void java.lang.Runtime.gc() try { r.gc(); // ensure all garbage objects have been collected r.gc(); // two GCs force collection phase to complete long firstRead = r.totalMemory() - r.freeMemory(); Vector<StringBuffer> v = new Vector<StringBuffer>(); for (int i = 1; i < 10; i++) v.addElement(new StringBuffer(10000)); long secondRead = r.totalMemory() - r.freeMemory(); v = null; r.gc(); r.gc(); assertTrue("object memory did not grow", secondRead > firstRead); assertTrue("space was not reclaimed", (r.totalMemory() - r .freeMemory()) < secondRead); } catch (Throwable t) { System.out.println("Out of memory during gc test"); r.gc(); r.gc(); } } /** * @tests java.lang.Runtime#getRuntime() */ @TestTargetNew( level = TestLevel.COMPLETE, notes = "getRuntime method is verified in initial setup for other tests.", method = "getRuntime", args = {} ) public void test_getRuntime() { // Test for method java.lang.Runtime java.lang.Runtime.getRuntime() assertNotNull(Runtime.getRuntime()); } /** * @tests java.lang.Runtime#runFinalization() */ @TestTargetNew( level = TestLevel.COMPLETE, notes = "", method = "runFinalization", args = {} ) public void test_runFinalization() { // Test for method void java.lang.Runtime.runFinalization() flag = true; createInstance(); int count = 10; // the gc below likely bogosifies the test, but will have to do for // the moment while (!ranFinalize && count-- > 0) { r.gc(); r.runFinalization(); } assertTrue("Failed to run finalization", ranFinalize); } /** * @tests java.lang.Runtime#totalMemory() */ @TestTargetNew( level = TestLevel.COMPLETE, notes = "", method = "totalMemory", args = {} ) public void test_totalMemory() { // Test for method long java.lang.Runtime.totalMemory() assertTrue("totalMemory returned nonsense value", r.totalMemory() >= r .freeMemory()); } @TestTargetNew( level = TestLevel.COMPLETE, notes = "", method = "addShutdownHook", args = {java.lang.Thread.class} ) public void test_addShutdownHook() { Thread thrException = new Thread () { public void run() { try { Runtime.getRuntime().addShutdownHook(this); fail("IllegalStateException was not thrown."); } catch(IllegalStateException ise) { //expected } } }; try { Runtime.getRuntime().addShutdownHook(thrException); } catch (Throwable t) { fail(t.getMessage()); } try { Runtime.getRuntime().addShutdownHook(thrException); fail("IllegalArgumentException was not thrown."); } catch(IllegalArgumentException iae) { // expected } SecurityManager sm = new SecurityManager() { public void checkPermission(Permission perm) { if (perm.getName().equals("shutdownHooks")) { throw new SecurityException(); } } }; // remove previously added hook so we're not depending on the priority // of the Exceptions to be thrown. Runtime.getRuntime().removeShutdownHook(thrException); SecurityManager oldSm = System.getSecurityManager(); System.setSecurityManager(sm); try { Runtime.getRuntime().addShutdownHook(thrException); fail("SecurityException should be thrown."); } catch (SecurityException e) { // expected } finally { System.setSecurityManager(oldSm); } try { Thread.currentThread().sleep(1000); } catch (InterruptedException ie) { } } @TestTargetNew( level = TestLevel.COMPLETE, notes = "", method = "availableProcessors", args = {} ) public void test_availableProcessors() { assertTrue(Runtime.getRuntime().availableProcessors() > 0); } @TestTargetNew( level = TestLevel.COMPLETE, notes = "", method = "exec", args = {java.lang.String.class, java.lang.String[].class} ) public void test_execLjava_lang_StringLjava_lang_StringArray() { String [] envp = getEnv(); checkExec(0, envp, null); checkExec(0, null, null); try { Runtime.getRuntime().exec((String)null, null); fail("NullPointerException should be thrown."); } catch(IOException ioe) { fail("IOException was thrown."); } catch(NullPointerException npe) { //expected } SecurityManager sm = new SecurityManager() { public void checkPermission(Permission perm) { if (perm.getName().equals("checkExec")) { throw new SecurityException(); } } public void checkExec(String cmd) { throw new SecurityException(); } }; SecurityManager oldSm = System.getSecurityManager(); System.setSecurityManager(sm); try { Runtime.getRuntime().exec("ls", envp); fail("SecurityException should be thrown."); } catch (SecurityException e) { // expected } catch (IOException e) { fail("IOException was thrown."); } finally { System.setSecurityManager(oldSm); } try { Runtime.getRuntime().exec("", envp); fail("IllegalArgumentException should be thrown."); } catch(IllegalArgumentException iae) { //expected } catch (IOException e) { fail("IOException was thrown."); } } @TestTargetNew( level = TestLevel.COMPLETE, notes = "", method = "exec", args = {java.lang.String[].class, java.lang.String[].class} ) public void test_execLjava_lang_StringArrayLjava_lang_StringArray() { String [] envp = getEnv(); checkExec(4, envp, null); checkExec(4, null, null); try { Runtime.getRuntime().exec((String[])null, null); fail("NullPointerException should be thrown."); } catch(IOException ioe) { fail("IOException was thrown."); } catch(NullPointerException npe) { //expected } try { Runtime.getRuntime().exec(new String[]{"ls", null}, null); fail("NullPointerException should be thrown."); } catch(IOException ioe) { fail("IOException was thrown."); } catch(NullPointerException npe) { //expected } SecurityManager sm = new SecurityManager() { public void checkPermission(Permission perm) { if (perm.getName().equals("checkExec")) { throw new SecurityException(); } } public void checkExec(String cmd) { throw new SecurityException(); } }; SecurityManager oldSm = System.getSecurityManager(); System.setSecurityManager(sm); try { Runtime.getRuntime().exec(new String[]{"ls"}, envp); fail("SecurityException should be thrown."); } catch (SecurityException e) { // expected } catch (IOException e) { fail("IOException was thrown."); } finally { System.setSecurityManager(oldSm); } try { Runtime.getRuntime().exec(new String[]{}, envp); fail("IndexOutOfBoundsException should be thrown."); } catch(IndexOutOfBoundsException ioob) { //expected } catch (IOException e) { fail("IOException was thrown."); } try { Runtime.getRuntime().exec(new String[]{""}, envp); fail("IOException should be thrown."); } catch (IOException e) { /* expected */ } } @TestTargetNew( level = TestLevel.COMPLETE, notes = "", method = "exec", args = {java.lang.String.class, java.lang.String[].class, java.io.File.class} ) public void test_execLjava_lang_StringLjava_lang_StringArrayLjava_io_File() { String [] envp = getEnv(); File workFolder = Support_Resources.createTempFolder(); checkExec(2, envp, workFolder); checkExec(2, null, null); try { Runtime.getRuntime().exec((String)null, null, workFolder); fail("NullPointerException should be thrown."); } catch(IOException ioe) { fail("IOException was thrown."); } catch(NullPointerException npe) { //expected } SecurityManager sm = new SecurityManager() { public void checkPermission(Permission perm) { if (perm.getName().equals("checkExec")) { throw new SecurityException(); } } public void checkExec(String cmd) { throw new SecurityException(); } }; SecurityManager oldSm = System.getSecurityManager(); System.setSecurityManager(sm); try { Runtime.getRuntime().exec("ls", envp, workFolder); fail("SecurityException should be thrown."); } catch (SecurityException e) { // expected } catch (IOException e) { fail("IOException was thrown."); } finally { System.setSecurityManager(oldSm); } try { Runtime.getRuntime().exec("", envp, workFolder); fail("SecurityException should be thrown."); } catch(IllegalArgumentException iae) { //expected } catch (IOException e) { fail("IOException was thrown."); } } @TestTargetNew( level = TestLevel.COMPLETE, notes = "", method = "exec", args = {java.lang.String[].class, java.lang.String[].class, java.io.File.class} ) public void test_execLjava_lang_StringArrayLjava_lang_StringArrayLjava_io_File() { String [] envp = getEnv(); File workFolder = Support_Resources.createTempFolder(); checkExec(5, envp, workFolder); checkExec(5, null, null); try { Runtime.getRuntime().exec((String[])null, null, workFolder); fail("NullPointerException should be thrown."); } catch(IOException ioe) { fail("IOException was thrown."); } catch(NullPointerException npe) { //expected } try { Runtime.getRuntime().exec(new String[]{"ls", null}, null, workFolder); fail("NullPointerException should be thrown."); } catch(IOException ioe) { fail("IOException was thrown."); } catch(NullPointerException npe) { //expected } SecurityManager sm = new SecurityManager() { public void checkPermission(Permission perm) { if (perm.getName().equals("checkExec")) { throw new SecurityException(); } } public void checkExec(String cmd) { throw new SecurityException(); } }; SecurityManager oldSm = System.getSecurityManager(); System.setSecurityManager(sm); try { Runtime.getRuntime().exec(new String[] {"ls"}, envp, workFolder); fail("SecurityException should be thrown."); } catch (SecurityException e) { // expected } catch (IOException e) { fail("IOException was thrown."); } finally { System.setSecurityManager(oldSm); } try { Runtime.getRuntime().exec(new String[]{""}, envp, workFolder); fail("IOException should be thrown."); } catch (IOException e) { //expected } } String [] getEnv() { Object [] valueSet = System.getenv().values().toArray(); Object [] keySet = System.getenv().keySet().toArray(); String [] envp = new String[valueSet.length]; for(int i = 0; i < envp.length; i++) { envp[i] = keySet[i] + "=" + valueSet[i]; } return envp; } void checkExec(int testCase, String [] envp, File file) { String dirName = "Test_Directory"; String dirParentName = "Parent_Directory"; File resources = Support_Resources.createTempFolder(); String folder = resources.getAbsolutePath() + "/" + dirName; String folderWithParent = resources.getAbsolutePath() + "/" + dirParentName + "/" + dirName; String command = "mkdir " + folder; String [] commandArguments = {"mkdir", folder}; try { Process proc = null; switch(testCase) { case 0: proc = Runtime.getRuntime().exec(command, envp); break; case 1: proc = Runtime.getRuntime().exec(command); break; case 2: proc = Runtime.getRuntime().exec(command, envp, file); break; case 3: proc = Runtime.getRuntime().exec(commandArguments); break; case 4: proc = Runtime.getRuntime().exec(commandArguments, envp); break; case 5: proc = Runtime.getRuntime().exec(commandArguments, envp, file); break; } assertNotNull(proc); try { Thread.sleep(3000); } catch(InterruptedException ie) { fail("InterruptedException was thrown."); } File f = new File(folder); assertTrue(f.exists()); if(f.exists()) { f.delete(); } } catch(IOException io) { fail("IOException was thrown."); } } @TestTargetNew( level = TestLevel.COMPLETE, notes = "", method = "exec", args = {java.lang.String.class} ) public void test_execLjava_lang_String() { checkExec(1, null, null); try { Runtime.getRuntime().exec((String) null); fail("NullPointerException was not thrown."); } catch(NullPointerException npe) { //expected } catch (IOException e) { fail("IOException was thrown."); } try { Runtime.getRuntime().exec(""); fail("IllegalArgumentException was not thrown."); } catch(IllegalArgumentException iae) { //expected } catch (IOException e) { fail("IOException was thrown."); } SecurityManager sm = new SecurityManager() { public void checkPermission(Permission perm) { } public void checkExec(String cmd) { throw new SecurityException(); } }; SecurityManager oldSm = System.getSecurityManager(); System.setSecurityManager(sm); try { Runtime.getRuntime().exec("ls"); fail("SecurityException should be thrown."); } catch (SecurityException e) { // expected } catch (IOException ioe) { fail("IOException was thrown."); } finally { System.setSecurityManager(oldSm); } } @TestTargetNew( level = TestLevel.COMPLETE, notes = "", method = "exec", args = {java.lang.String[].class} ) public void test_execLjava_lang_StringArray() { checkExec(3, null, null); try { Runtime.getRuntime().exec((String[]) null); fail("NullPointerException was not thrown."); } catch(NullPointerException npe) { //expected } catch (IOException e) { fail("IOException was thrown."); } try { Runtime.getRuntime().exec(new String[]{"ls", null}); fail("NullPointerException was not thrown."); } catch(NullPointerException npe) { //expected } catch (IOException e) { fail("IOException was thrown."); } try { Runtime.getRuntime().exec(new String[]{}); fail("IndexOutOfBoundsException was not thrown."); } catch(IndexOutOfBoundsException iobe) { //expected } catch (IOException e) { fail("IOException was thrown."); } SecurityManager sm = new SecurityManager() { public void checkPermission(Permission perm) { } public void checkExec(String cmd) { throw new SecurityException(); } }; SecurityManager oldSm = System.getSecurityManager(); System.setSecurityManager(sm); try { Runtime.getRuntime().exec(new String[]{"ls"}); fail("SecurityException should be thrown."); } catch (SecurityException e) { // expected } catch (IOException ioe) { fail("IOException was thrown."); } finally { System.setSecurityManager(oldSm); } try { Runtime.getRuntime().exec(new String[]{""}); fail("IOException should be thrown."); } catch (IOException e) { //expected } } @TestTargetNew( level = TestLevel.COMPLETE, notes = "", method = "runFinalizersOnExit", args = {boolean.class} ) public void test_runFinalizersOnExit() { Runtime.getRuntime().runFinalizersOnExit(true); SecurityManager sm = new SecurityManager() { public void checkPermission(Permission perm) { } public void checkExit(int status) { throw new SecurityException(); } }; SecurityManager oldSm = System.getSecurityManager(); System.setSecurityManager(sm); try { Runtime.getRuntime().runFinalizersOnExit(true); fail("SecurityException should be thrown."); } catch (SecurityException e) { // expected } finally { System.setSecurityManager(oldSm); } } @TestTargetNew( level = TestLevel.COMPLETE, notes = "", method = "removeShutdownHook", args = {java.lang.Thread.class} ) public void test_removeShutdownHookLjava_lang_Thread() { Thread thr1 = new Thread () { public void run() { try { Runtime.getRuntime().addShutdownHook(this); } catch(IllegalStateException ise) { fail("IllegalStateException shouldn't be thrown."); } } }; try { Runtime.getRuntime().addShutdownHook(thr1); Runtime.getRuntime().removeShutdownHook(thr1); } catch (Throwable t) { fail(t.getMessage()); } Thread thr2 = new Thread () { public void run() { try { Runtime.getRuntime().removeShutdownHook(this); fail("IllegalStateException wasn't thrown."); } catch(IllegalStateException ise) { //expected } } }; try { Runtime.getRuntime().addShutdownHook(thr2); } catch (Throwable t) { fail(t.getMessage()); } SecurityManager sm = new SecurityManager() { public void checkPermission(Permission perm) { if (perm.getName().equals("shutdownHooks")) { throw new SecurityException(); } } }; SecurityManager oldSm = System.getSecurityManager(); System.setSecurityManager(sm); try { Runtime.getRuntime().addShutdownHook(thr1); fail("SecurityException should be thrown."); } catch (SecurityException e) { // expected } finally { System.setSecurityManager(oldSm); } try { Thread.currentThread().sleep(1000); } catch (InterruptedException ie) { } } @TestTargetNew( level = TestLevel.COMPLETE, notes = "", method = "maxMemory", args = {} ) public void test_maxMemory() { assertTrue(Runtime.getRuntime().maxMemory() < Long.MAX_VALUE); } @TestTargetNew( level = TestLevel.COMPLETE, notes = "", method = "traceInstructions", args = {boolean.class} ) public void test_traceInstructions() { Runtime.getRuntime().traceInstructions(false); Runtime.getRuntime().traceInstructions(true); Runtime.getRuntime().traceInstructions(false); } @TestTargetNew( level = TestLevel.COMPLETE, notes = "", method = "traceMethodCalls", args = {boolean.class} ) @KnownFailure("Fails in CTS but passes under run-core-tests") public void test_traceMethodCalls() { try { Runtime.getRuntime().traceMethodCalls(false); Runtime.getRuntime().traceMethodCalls(true); Runtime.getRuntime().traceMethodCalls(false); } catch (RuntimeException ex) { // Slightly ugly: we default to the SD card, which may or may not // be there. So we also accept the error case as a success, since // it means we actually did enable tracing (or tried to). if (!"file open failed".equals(ex.getMessage())) { throw ex; } } } @SuppressWarnings("deprecation") @TestTargetNew( level = TestLevel.COMPLETE, notes = "", method = "getLocalizedInputStream", args = {java.io.InputStream.class} ) public void test_getLocalizedInputStream() { String simpleString = "Heart \u2f3c"; byte[] expected = {72, 0, 101, 0, 97, 0, 114, 0, 116, 0, 32, 0, 60, 47}; byte[] returned = new byte[expected.length]; System.setProperty("file.encoding", "UTF-16LE"); try { ByteArrayInputStream bais = new ByteArrayInputStream( simpleString.getBytes("UTF-8")); InputStream lcIn = Runtime.getRuntime().getLocalizedInputStream(bais); try { lcIn.read(returned); } catch(IOException ioe) { fail("IOException was thrown."); } assertTrue("wrong result for String: " + simpleString, Arrays.equals(expected, returned)); } catch (UnsupportedEncodingException e) { fail("UnsupportedEncodingException was thrown."); } } @SuppressWarnings("deprecation") @TestTargetNew( level = TestLevel.SUFFICIENT, notes = "", method = "getLocalizedOutputStream", args = {java.io.OutputStream.class} ) public void test_getLocalizedOutputStream() { String simpleString = "Heart \u2f3c"; byte[] expected = {72, 0, 101, 0, 97, 0, 114, 0, 116, 0, 32, 0, 60, 47}; byte[] returned; String oldEncoding = System.getProperty("file.encoding"); System.setProperty("file.encoding", "UTF-16LE"); try { ByteArrayOutputStream out = new ByteArrayOutputStream(); OutputStream lcOut = Runtime.getRuntime().getLocalizedOutputStream(out); try { lcOut.write(simpleString.getBytes("UTF-8")); lcOut.flush(); lcOut.close(); } catch(IOException ioe) { fail("IOException was thrown."); } returned = out.toByteArray(); assertTrue("wrong result for String: " + returned.toString() + " expected string: " + expected.toString(), Arrays.equals(expected, returned)); } finally { System.setProperty("file.encoding", oldEncoding); } } @TestTargetNew( level = TestLevel.COMPLETE, notes = "", method = "load", args = {java.lang.String.class} ) public void test_load() { try { Runtime.getRuntime().load("nonExistentLibrary"); fail("UnsatisfiedLinkError was not thrown."); } catch(UnsatisfiedLinkError ule) { //expected } try { Runtime.getRuntime().load(null); fail("NullPointerException was not thrown."); } catch(NullPointerException npe) { //expected } SecurityManager sm = new SecurityManager() { public void checkPermission(Permission perm) { } public void checkLink(String lib) { if (lib.endsWith("libjvm.so")) { throw new SecurityException(); } } }; SecurityManager oldSm = System.getSecurityManager(); System.setSecurityManager(sm); try { Runtime.getRuntime().load("libjvm.so"); fail("SecurityException should be thrown."); } catch (SecurityException e) { // expected } finally { System.setSecurityManager(oldSm); } } @TestTargetNew( level = TestLevel.COMPLETE, notes = "", method = "loadLibrary", args = {java.lang.String.class} ) public void test_loadLibrary() { try { Runtime.getRuntime().loadLibrary("nonExistentLibrary"); fail("UnsatisfiedLinkError was not thrown."); } catch(UnsatisfiedLinkError ule) { //expected } try { Runtime.getRuntime().loadLibrary(null); fail("NullPointerException was not thrown."); } catch(NullPointerException npe) { //expected } SecurityManager sm = new SecurityManager() { public void checkPermission(Permission perm) { } public void checkLink(String lib) { if (lib.endsWith("libjvm.so")) { throw new SecurityException(); } } }; SecurityManager oldSm = System.getSecurityManager(); System.setSecurityManager(sm); try { Runtime.getRuntime().loadLibrary("libjvm.so"); fail("SecurityException should be thrown."); } catch (SecurityException e) { // expected } finally { System.setSecurityManager(oldSm); } } @TestTargetNew( level = TestLevel.SUFFICIENT, notes = "This method never returns normally, " + "and can't be tested. Only SecurityException can be checked.", method = "exit", args = {int.class} ) public void test_exit() { statusCode = -1; SecurityManager sm = new SecurityManager() { public void checkPermission(Permission perm) { } public void checkExit(int status) { statusCode = status; throw new SecurityException(); } }; SecurityManager oldSm = System.getSecurityManager(); System.setSecurityManager(sm); try { r.exit(0); fail("SecurityException should be thrown."); } catch (SecurityException e) { // expected } finally { assertTrue("Incorrect status code was received: " + statusCode, statusCode == 0); System.setSecurityManager(oldSm); } } @TestTargetNew( level = TestLevel.SUFFICIENT, notes = "Can't be tested. This method terminates the currently " + "running VM. Only SecurityException can be checked.", method = "halt", args = {int.class} ) public void test_halt() { statusCode = -1; SecurityManager sm = new SecurityManager() { public void checkPermission(Permission perm) { } public void checkExit(int status) { statusCode = status; throw new SecurityException(); } }; SecurityManager oldSm = System.getSecurityManager(); System.setSecurityManager(sm); try { r.halt(0); fail("SecurityException should be thrown."); } catch (SecurityException e) { // expected } finally { assertTrue("Incorrect status code was received: " + statusCode, statusCode == 0); System.setSecurityManager(oldSm); } } public RuntimeTest() { } public RuntimeTest(String name) { super(name); } }
[]
[]
[]
[]
[]
java
0
0
src/main/java/com/google/googleidentity/oauth2/endpoint/RiscDocEndpoint.java
/* Copyright 2020 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.google.googleidentity.oauth2.endpoint; import com.google.inject.Singleton; import java.io.IOException; import java.util.logging.Logger; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import net.minidev.json.JSONObject; /** * Provide risc configuration including issuer and public key url */ @Singleton public class RiscDocEndpoint extends HttpServlet { private static final long serialVersionUID = 13L; private static final Logger log = Logger.getLogger("RiscDocEndpoint"); private JSONObject json; // Set it in appengine-web.xml private static String WEB_URL = System.getenv("WEB_URL") == null ? "localhost:8080" : System.getenv("WEB_URL"); public void init() throws ServletException { json = new JSONObject(); json.appendField("issuer", WEB_URL + "/oauth2/risc"); json.appendField("jwks_uri", WEB_URL + "/oauth2/risc/key"); } protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { response.getWriter().println(json.toJSONString()); response.getWriter().flush(); } }
[ "\"WEB_URL\"", "\"WEB_URL\"" ]
[]
[ "WEB_URL" ]
[]
["WEB_URL"]
java
1
0
acareeg/visualization.py
# Authors: Christian O'Reilly <christian.oreilly@gmail.com> # License: MIT import matplotlib.pyplot as plt from mne.externals.pymatreader import read_mat from mne.transforms import apply_trans import numpy as np import nibabel as nib import pandas as pd from scipy.spatial import KDTree from nibabel.freesurfer.io import read_geometry import trimesh import matplotlib as mpl import matplotlib.cm as cm import open3d import mne import pyrender from pathlib import Path import os from .infantmodels import get_bem_artifacts def get_plotting_meshes(amplitudes, vertices, age=None, template=None, norm=None, cmap=None): if template is None: if age is not None: template = f"ANTS{age}-0Months3T" else: raise ValueError("The age or the template must be specified.") montage, trans, bem_model, bem_solution, surface_src = get_bem_artifacts(template) df = split_vertices_by_hemi(vertices, amplitudes, surface_src) vertices, triangles = get_pial_meshes(6) hemi_dict = {"lh": 0, "rh": 1} if norm is None: norm = mpl.colors.Normalize(vmin=min(df["lh"].amplitude.min(), df["rh"].amplitude.min()), vmax=max(df["lh"].amplitude.max(), df["rh"].amplitude.max())) if cmap is None: cmap = plt.get_cmap("Reds") meshes = {} for hemi in hemi_dict: vertno = surface_src[hemi_dict[hemi]]["vertno"] rr = surface_src[hemi_dict[hemi]]["rr"] pos = rr[vertno[df[hemi].vertice.values]] * 1000 meshes[hemi] = trimesh.Trimesh(vertices=vertices[hemi], faces=triangles[hemi]) points = KDTree(pos).query(meshes[hemi].vertices, 1)[1] colors = plt.cm.ScalarMappable(norm=norm, cmap=cmap).to_rgba(df[hemi].amplitude.values[points]) meshes[hemi].visual.vertex_colors = np.round(colors * 255).astype(np.uint8) return meshes def split_vertices_by_hemi(vertices, amplitudes, surface_src): offset = surface_src[0]["nuse"] return {"lh": pd.DataFrame(dict(vertice=vertices[vertices < offset], amplitude=amplitudes[vertices < offset])), "rh": pd.DataFrame(dict(vertice=vertices[vertices >= offset] - offset, amplitude=amplitudes[vertices >= offset]))} def get_atlas_info(age=None, template=None, subjects_dir=None): if template is None: if age is not None: template = f"ANTS{age}-0Months3T" else: raise ValueError("The age or the template must be specified.") if subjects_dir is None: subjects_dir = Path(os.environ["SUBJECTS_DIR"]) atlas = subjects_dir / template / "mri" / "aparc+aseg.mgz" epi_img = nib.load(str(atlas)) epi_img_data = epi_img.get_fdata() # zeroing non cortical regions name_regions = mne.source_space._get_lut()["name"] no_regions = mne.source_space._get_lut()["id"] cortical_nos = [no for no, name in zip(no_regions, name_regions) if "ctx" in name] for no in np.unique(epi_img_data): if no not in cortical_nos: epi_img_data[epi_img_data == no] = 0 vox2ras_tkr = epi_img.header.get_vox2ras_tkr() vox2ras = epi_img.header.get_vox2ras() pos_atlas = apply_trans(vox2ras_tkr, np.array(np.where(epi_img_data)).T) kdtree = KDTree(pos_atlas) return kdtree, vox2ras_tkr, vox2ras, epi_img_data def get_pial_meshes(age=None, template=None, face_count=20000, subjects_dir=None): if template is None: if age is not None: template = f"ANTS{age}-0Months3T" else: raise ValueError("The age or the template must be specified.") if subjects_dir is None: subjects_dir = Path(os.environ["SUBJECTS_DIR"]) mesh_pattern = "{}/{}/surf/{}.pial" vertices = {} faces = {} for hemi in ["lh", "rh"]: vertices_hemi, faces_hemi = read_geometry(mesh_pattern.format(subjects_dir, template, hemi)) open3d_mesh = open3d.geometry.TriangleMesh(vertices=open3d.utility.Vector3dVector(vertices_hemi), triangles=open3d.utility.Vector3iVector(faces_hemi)) mesh = open3d_mesh.simplify_quadric_decimation(int(face_count/2)) vertices[hemi] = np.asarray(mesh.vertices) faces[hemi] = np.asarray(mesh.triangles) return vertices, faces def get_source_model(template, source_model, subjects_dir=None): path = "/home/christian/synchedin/infants_atlas_modeling/fieldtrip/single_subject_analysis/" source_model_dict = read_mat(path + "sourcemodel_{}_{}.mat".format(source_model, template))["sourcemodel"] kdtree, vox2ras_tkr, vox2ras, epi_img_data = get_atlas_info(template, subjects_dir=subjects_dir) source_model_dict["pos"] += (vox2ras_tkr - vox2ras)[:3, 3] return source_model_dict def get_template_source_meshes(template_dipole_df, template): vertices, triangles = get_pial_meshes(template, face_count=20000) meshes = {} for model_type, df in template_dipole_df.groupby(["head_model", "source_model"]): head_model, source_model = model_type source_model_dict = get_source_model(template, source_model) inside = source_model_dict["inside"] sourcemodel_kdtree = KDTree(source_model_dict["pos"][inside, :]) meshes[model_type] = trimesh.Trimesh(vertices=vertices, faces=triangles) points = sourcemodel_kdtree.query(meshes[model_type].vertices, 1)[1] # Trimesh does not preserve vertice order. So the following line cannot be used: # points = sourcemodel_kdtree.query(vertices, 1)[1] # assert(np.all(meshes[model_type].vertices == vertices)) mean_ersp = df.groupby("vertice").mean()["ersp"].sort_index().values[inside] # xmin = np.percentile(mean_ersp, 1) # xmax = np.percentile(mean_ersp, 99) # norm = mpl.colors.Normalize(vmin=xmin, vmax=xmax) perc = np.percentile(mean_ersp, np.linspace(75, 100, 100)) mean_ersp = np.digitize(mean_ersp, perc) norm = mpl.colors.Normalize(vmin=mean_ersp.min(), vmax=mean_ersp.max()) cmap = plt.get_cmap("Reds") colors = cm.ScalarMappable(norm=norm, cmap=cmap).to_rgba(mean_ersp[points]) meshes[model_type].visual.vertex_colors = np.round(colors * 255).astype(np.uint8) return meshes def show_meshes(meshes, angle_x=-0.7854, angle_y=0, angle_z=0.31416, ax=None, resolution=(1200, 1200), interactive=False): if ax is None: fig, ax = plt.subplots(1, 1) scene = pyrender.Scene(ambient_light=[0.0, 0.0, 0.0], bg_color=[1.0, 1.0, 1.0], ) for mesh in meshes: mesh = mesh.copy() re = trimesh.transformations.euler_matrix(angle_x, angle_y, angle_z, 'rxyz') mesh.apply_transform(re) scene.add(pyrender.Mesh.from_trimesh(mesh)) camera = pyrender.PerspectiveCamera(yfov=np.pi / 4.0, aspectRatio=1.0) camera_pose = np.eye(4) camera_pose[:3, 3] = [0, 0, 240] scene.add(camera, pose=camera_pose) ligth_poses = [np.array([[-0.000, -0.866, 0.500, 0.], [ 1.000, -0.000, -0.000, 0.], [ 0.000, 0.500, 0.866, 0.], [ 0.000, 0.000, 0.000, 1.]]), np.array([[ 0.866, 0.433, -0.250, 0.], [-0.500, 0.750, -0.433, 0.], [ 0.000, 0.500, 0.866, 0.], [ 0.000, 0.000, 0.000, 1.]]), np.array([[-0.866, 0.433, -0.250, 0.], [-0.500, -0.750, 0.433, 0.], [ 0.000, 0.500, 0.866, 0.], [ 0.000, 0.000, 0.000, 1.]])] for pose in ligth_poses: light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=1.0) scene.add(light, pose=pose) if interactive: pyrender.Viewer(scene, use_raymond_lighting=True) else: r = pyrender.OffscreenRenderer(*resolution) color, depth = r.render(scene) if ax is None: fig, ax = plt.subplots(1, 1, figsize=(10, 10)) ax.axis('off') ind_ax0 = np.where(np.any(np.any(color != 255, axis=2), axis=1))[0] ind_ax1 = np.where(np.any(np.any(color != 255, axis=2), axis=0))[0] ax.imshow(color[ind_ax0[0]:(ind_ax0[-1]+1), ind_ax1[0]:(ind_ax1[-1]+1), :]) return ax
[]
[]
[ "SUBJECTS_DIR" ]
[]
["SUBJECTS_DIR"]
python
1
0
service-base/src/test/java/fi/nls/oskari/util/PropertyUtilTest.java
package fi.nls.oskari.util; import org.junit.After; import org.junit.Before; import org.junit.Ignore; import org.junit.Test; import java.io.IOException; import java.lang.reflect.Field; import java.util.Collections; import java.util.Locale; import java.util.Map; import java.util.Properties; import static org.junit.Assert.*; /** * @author SMAKINEN */ public class PropertyUtilTest { @Before public void setUp() { PropertyUtil.clearProperties(); Properties properties = new Properties(); try { properties.load(PropertyUtilTest.class.getResourceAsStream("PropertyUtilTest.properties")); PropertyUtil.addProperties(properties); } catch (IOException ioe) { fail("Should not throw IOException:\n" + ioe.getStackTrace()); } catch(DuplicateException de) { fail("Should not throw DuplicateException:\n" + de.getMessage()); } } @After public void teardown() { PropertyUtil.clearProperties(); } @Test public void test() { String workerCount = PropertyUtil.get("workerCount"); assertEquals("Should get 10", workerCount, "10"); String redisHostname = PropertyUtil.get("redis.hostname"); assertTrue("Should get 'localhost'", redisHostname.equals("localhost")); String redisPort = PropertyUtil.get("redisPort"); assertEquals("Should get 6379", redisPort, "6379"); } @Test @Ignore("Run this manually if you need to, requires illegal reflective access which might break things") public void testEnv() throws Exception { String key = "redis.hostname"; String env = "OSKARI_REDIS_HOSTNAME"; String localhost = "localhost"; String localipv4 = "127.0.0.1"; // redis.hostname=localhost assertEquals(localhost, PropertyUtil.get(key)); setenvHack(env, localipv4); assertEquals(localipv4, PropertyUtil.get(key)); setenvHack(env, null); assertEquals(localhost, PropertyUtil.get(key)); } @Test(expected = DuplicateException.class) public void testDuplicate() throws Exception { PropertyUtil.addProperty("workerCount", "30"); throw new IllegalStateException("Should not get this far"); } @Test public void testIndonesianLanguage() throws Exception { try { PropertyUtil.addProperty("oskari.locales", "id_ID, en_US", true); Locale loc_ID = new Locale("id"); // https://stackoverflow.com/questions/55955641/correct-locale-for-indonesia-id-id-vs-in-id/55965008 assertEquals("'id' as lang should translate to 'in' with Locale", loc_ID.getLanguage(), "in"); assertEquals("getDefaultLanguage() doesn't use Locale", PropertyUtil.getDefaultLanguage(), "id"); assertFalse("The problem is locale and Props.getDefaultLanguage() don't match", loc_ID.getLanguage().equals(PropertyUtil.getDefaultLanguage())); PropertyUtil.addProperty("oskari.locales", "in_ID, en_US", true); Locale loc_IN = new Locale("in"); assertEquals("'in' as lang should remain 'in' with Locale", loc_IN.getLanguage(), "in"); assertEquals("getDefaultLanguage() doesn't use Locale", PropertyUtil.getDefaultLanguage(), "in"); assertEquals("Using 'in_ID' for Indonesian works as expected", loc_IN.getLanguage(), PropertyUtil.getDefaultLanguage()); } finally { PropertyUtil.clearProperties(); } } @Test public void testDuplicateWithOverwrite() throws Exception { PropertyUtil.addProperty("workerCount", "30", true); assertEquals("Should get 30", PropertyUtil.get("workerCount"), "30"); } @Test public void testLocales() throws Exception { final String propertyName = "myproperty"; PropertyUtil.addProperty(propertyName, "for default"); PropertyUtil.addProperty(propertyName, "for english", Locale.ENGLISH); PropertyUtil.addProperty(propertyName, "for germany", Locale.GERMANY); assertEquals("Should get 'for default'", PropertyUtil.get(propertyName), "for default"); assertEquals("Should get 'for english'", PropertyUtil.get(Locale.ENGLISH, propertyName), "for english"); assertEquals("Should get 'for germany'", PropertyUtil.get(Locale.GERMANY, propertyName), "for germany"); assertEquals("Should get 'for default'", PropertyUtil.get(Locale.CHINA, propertyName), "for default"); } @Test public void testOptional() throws Exception { assertEquals("Should get 'localhost'", PropertyUtil.getOptional("redis.hostname"), "localhost"); assertEquals("Should get '10'", PropertyUtil.getOptional("workerCount"), "10"); assertEquals("Should get <null>", PropertyUtil.getOptional("non-existing-property"), null); } @Test public void testCommaSeparatedProperty() throws Exception { String[] values1 = PropertyUtil.getCommaSeparatedList("commaseparatedNoSpaces"); String[] values2 = PropertyUtil.getCommaSeparatedList("commaseparatedWithSpaces"); for(int i = 0 ; i < values1.length; ++i) { assertEquals("Values in both arrays should match", values1[i], values2[i]); } String[] values3 = PropertyUtil.getCommaSeparatedList("non-existing-property"); assertEquals("Non-existing list should be zero length", values3.length, 0); } @Test public void testEmptyMapProperty() throws Exception { Map<String, String> values = PropertyUtil.getMap("non-existing-property"); assertEquals("Map should be empty", 0, values.size()); } @Test public void testMapProperty() throws Exception { Map<String, String> values1 = PropertyUtil.getMap("mapProperty1"); assertEquals("Map should have one key", 1, values1.size()); Map<String, String> values2 = PropertyUtil.getMap("mapProperty2"); assertEquals("Map should have two keys", 2, values2.size()); } @Test public void testMapPropertyTrimming() throws Exception { Map<String, String> values2 = PropertyUtil.getMap("mapProperty2"); assertEquals("Map should have two keys", 2, values2.size()); Map<String, String> values3 = PropertyUtil.getMap("mapProperty3"); assertEquals("Map should have two keys", 2, values3.size()); for(String key : values2.keySet()) { assertTrue("PropertyUtil should trim whitespace and both maps should have same keys", values3.keySet().contains(key)); final String val2 = values2.get(key); final String val3 = values3.get(key); assertEquals("Both should have same values with the same key", val2, val3); } } @Test public void testLocalizableProperty() throws Exception { final String KEY = "my.key"; final String value = "test value"; PropertyUtil.addProperty(KEY, value); Object o = PropertyUtil.getLocalizableProperty(KEY); assertTrue("Single property should return String", o instanceof String); assertEquals("Value should match", value, o); } @Test public void testLocalizablePropertyMultipleValues() throws Exception { final String KEY = "my.key"; final String value = "test value"; PropertyUtil.addProperty(KEY + ".en", value + " en"); PropertyUtil.addProperty(KEY + ".fi", value + " fi"); Object o = PropertyUtil.getLocalizableProperty(KEY); assertTrue("Single property should return Map", o instanceof Map); final Map<String, String> values = (Map<String, String>) o; assertEquals("Should have 2 values", 2, values.size()); assertEquals("English Value should match", value + " en", values.get("en")); assertEquals("Finnish Value should match", value + " fi", values.get("fi")); } @Test public void testLocalizablePropertyWithModifier() throws Exception { final String KEY = "my.key"; final String value = "test value"; PropertyUtil.addProperty(KEY, value); PropertyUtil.addProperty(KEY + ".en", value + " en"); PropertyUtil.addProperty(KEY + ".fi", value + " fi"); String o = PropertyUtil.getWithOptionalModifier(KEY, "fi", "en"); assertEquals("English value should match", value + " en", PropertyUtil.getWithOptionalModifier(KEY, "en")); assertEquals("Finnish value should match", value + " fi", PropertyUtil.getWithOptionalModifier(KEY, "fi", "en")); assertEquals("Missing value should fallback to english", value + " en", PropertyUtil.getWithOptionalModifier(KEY, "sv", "en")); assertEquals("Missing value with spanish default should match default key", value, PropertyUtil.getWithOptionalModifier(KEY, "sv", "es")); } private static void setenvHack(String key, String value) throws Exception { try { Class<?> processEnvironmentClass = Class.forName("java.lang.ProcessEnvironment"); Field theEnvironmentField = processEnvironmentClass.getDeclaredField("theEnvironment"); theEnvironmentField.setAccessible(true); Map<String, String> env = (Map<String, String>) theEnvironmentField.get(null); if (value == null) { env.remove(key); } else { env.put(key, value); } Field theCaseInsensitiveEnvironmentField = processEnvironmentClass.getDeclaredField("theCaseInsensitiveEnvironment"); theCaseInsensitiveEnvironmentField.setAccessible(true); Map<String, String> cienv = (Map<String, String>) theCaseInsensitiveEnvironmentField.get(null); if (value == null) { cienv.remove(key); } else { cienv.put(key, value); } } catch (NoSuchFieldException e) { Class[] classes = Collections.class.getDeclaredClasses(); Map<String, String> env = System.getenv(); for(Class cl : classes) { if("java.util.Collections$UnmodifiableMap".equals(cl.getName())) { Field field = cl.getDeclaredField("m"); field.setAccessible(true); Object obj = field.get(env); Map<String, String> map = (Map<String, String>) obj; if (value == null) { map.remove(key); } else { map.put(key, value); } } } } } }
[]
[]
[]
[]
[]
java
0
0
selfdrive/car/car_helpers.py
import os from common.params import Params from common.basedir import BASEDIR from selfdrive.car.fingerprints import eliminate_incompatible_cars, all_known_cars from selfdrive.car.vin import get_vin, VIN_UNKNOWN from selfdrive.car.fw_versions import get_fw_versions, match_fw_to_car from selfdrive.swaglog import cloudlog import cereal.messaging as messaging from selfdrive.car import gen_empty_fingerprint from cereal import car def get_startup_alert(car_recognized, controller_available): alert = 'startup' if Params().get("GitRemote", encoding="utf8") in ['git@github.com:commaai/openpilot.git', 'https://github.com/commaai/openpilot.git']: if Params().get("GitBranch", encoding="utf8") not in ['devel', 'release2-staging', 'dashcam-staging', 'release2', 'dashcam']: alert = 'startupMaster' if not car_recognized: alert = 'startupNoCar' elif car_recognized and not controller_available: alert = 'startupNoControl' return alert def load_interfaces(brand_names): ret = {} for brand_name in brand_names: path = ('selfdrive.car.%s' % brand_name) CarInterface = __import__(path + '.interface', fromlist=['CarInterface']).CarInterface if os.path.exists(BASEDIR + '/' + path.replace('.', '/') + '/carcontroller.py'): CarController = __import__(path + '.carcontroller', fromlist=['CarController']).CarController CarState = __import__(path + '.carstate', fromlist=['CarState']).CarState else: CarController = None CarState = None for model_name in brand_names[brand_name]: ret[model_name] = (CarInterface, CarController, CarState) return ret def _get_interface_names(): # read all the folders in selfdrive/car and return a dict where: # - keys are all the car names that which we have an interface for # - values are lists of spefic car models for a given car brand_names = {} for car_folder in [x[0] for x in os.walk(BASEDIR + '/selfdrive/car')]: try: brand_name = car_folder.split('/')[-1] model_names = __import__('selfdrive.car.%s.values' % brand_name, fromlist=['CAR']).CAR model_names = [getattr(model_names, c) for c in model_names.__dict__.keys() if not c.startswith("__")] brand_names[brand_name] = model_names except (ImportError, IOError): pass return brand_names # imports from directory selfdrive/car/<name>/ interface_names = _get_interface_names() interfaces = load_interfaces(interface_names) def only_toyota_left(candidate_cars): return all(("TOYOTA" in c or "LEXUS" in c) for c in candidate_cars) and len(candidate_cars) > 0 # **** for use live only **** def fingerprint(logcan, sendcan, has_relay): if has_relay: # Vin query only reliably works thorugh OBDII bus = 1 cached_params = Params().get("CarParamsCache") if cached_params is not None: cached_params = car.CarParams.from_bytes(cached_params) if cached_params.carName == "mock": cached_params = None if cached_params is not None: cloudlog.warning("Using cached CarParams") vin = cached_params.carVin car_fw = list(cached_params.carFw) else: cloudlog.warning("Getting VIN & FW versions") _, vin = get_vin(logcan, sendcan, bus) car_fw = get_fw_versions(logcan, sendcan, bus) fw_candidates = match_fw_to_car(car_fw) else: vin = VIN_UNKNOWN fw_candidates, car_fw = set(), [] cloudlog.warning("VIN %s", vin) Params().put("CarVin", vin) finger = gen_empty_fingerprint() candidate_cars = {i: all_known_cars() for i in [0, 1]} # attempt fingerprint on both bus 0 and 1 frame = 0 frame_fingerprint = 10 # 0.1s car_fingerprint = None done = False while not done: a = messaging.get_one_can(logcan) for can in a.can: # need to independently try to fingerprint both bus 0 and 1 to work # for the combo black_panda and honda_bosch. Ignore extended messages # and VIN query response. # Include bus 2 for toyotas to disambiguate cars using camera messages # (ideally should be done for all cars but we can't for Honda Bosch) if can.src in range(0, 4): finger[can.src][can.address] = len(can.dat) for b in candidate_cars: if (can.src == b or (only_toyota_left(candidate_cars[b]) and can.src == 2)) and \ can.address < 0x800 and can.address not in [0x7df, 0x7e0, 0x7e8]: candidate_cars[b] = eliminate_incompatible_cars(can, candidate_cars[b]) # if we only have one car choice and the time since we got our first # message has elapsed, exit for b in candidate_cars: # Toyota needs higher time to fingerprint, since DSU does not broadcast immediately if only_toyota_left(candidate_cars[b]): frame_fingerprint = 100 # 1s if len(candidate_cars[b]) == 1: if frame > frame_fingerprint: # fingerprint done car_fingerprint = candidate_cars[b][0] # bail if no cars left or we've been waiting for more than 2s failed = all(len(cc) == 0 for cc in candidate_cars.values()) or frame > 200 succeeded = car_fingerprint is not None done = failed or succeeded frame += 1 source = car.CarParams.FingerprintSource.can # If FW query returns exactly 1 candidate, use it if len(fw_candidates) == 1: car_fingerprint = list(fw_candidates)[0] source = car.CarParams.FingerprintSource.fw fixed_fingerprint = os.environ.get('FINGERPRINT', "") if len(fixed_fingerprint): car_fingerprint = fixed_fingerprint source = car.CarParams.FingerprintSource.fixed cloudlog.warning("fingerprinted %s", car_fingerprint) return car_fingerprint, finger, vin, car_fw, source def get_car(logcan, sendcan, has_relay=False): candidate, fingerprints, vin, car_fw, source = fingerprint(logcan, sendcan, has_relay) if candidate is None: cloudlog.warning("car doesn't match any fingerprints: %r", fingerprints) candidate = "mock" CarInterface, CarController, CarState = interfaces[candidate] car_params = CarInterface.get_params(candidate, fingerprints, has_relay, car_fw) car_params.carVin = vin car_params.carFw = car_fw car_params.fingerprintSource = source return CarInterface(car_params, CarController, CarState), car_params
[]
[]
[ "FINGERPRINT" ]
[]
["FINGERPRINT"]
python
1
0
config.py
import os TOKEN = os.environ.get("2066417340:AAERvi25V1oe1iV_chpl1VF44Q7FyUHS-xk") API_HASH = os.environ.get("6ac53d0c86a0afb6c3f1d956304912e0") API_ID = int(os.environ.get("8973350")) START_MESSAGE = os.environ.get("START_MESSAGE", "<b>Hi ! I am a simple torrent searcher using @chirag's Torrent Searcher api.\n\n\nMade with 🐍 by @KeralasBots</b>") FOOTER_TEXT = os.environ.get("search anything", "<b>Made with ❤️ by @chirag</b>") TORRENTS = {}
[]
[]
[ "8973350", "2066417340:AAERvi25V1oe1iV_chpl1VF44Q7FyUHS-xk", "search anything", "6ac53d0c86a0afb6c3f1d956304912e0", "START_MESSAGE" ]
[]
["8973350", "2066417340:AAERvi25V1oe1iV_chpl1VF44Q7FyUHS-xk", "search anything", "6ac53d0c86a0afb6c3f1d956304912e0", "START_MESSAGE"]
python
5
0
src/embedding/FaceProcessing.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import base64 from StringIO import StringIO from PIL import Image import numpy as np import cv2 import os import time from scipy import misc from scipy.misc import imread, imresize import torch from torchvision import transforms import time import insightface DATA_RUNTIME_FOLDER = os.getenv('DATA_RUNTIME_FOLDER', '/data/runtime') HAS_OPENCL = os.getenv('HAS_OPENCL', 'false') def load_graph(frozen_graph_filename): return None def crop(image, random_crop, image_size): if image.shape[1]>image_size: sz1 = int(image.shape[1]//2) sz2 = int(image_size//2) if random_crop: diff = sz1-sz2 (h, v) = (np.random.randint(-diff, diff+1), np.random.randint(-diff, diff+1)) else: (h, v) = (0,0) image = image[(sz1-sz2+v):(sz1+sz2+v),(sz1-sz2+h):(sz1+sz2+h),:] return image def flip(image, random_flip): if random_flip and np.random.choice([True, False]): image = np.fliplr(image) return image def to_rgb(img): w, h = img.shape ret = np.empty((w, h, 3), dtype=np.uint8) ret[:, :, 0] = ret[:, :, 1] = ret[:, :, 2] = img return ret def load_image(image_path): img_list = [None] * 1 img = misc.imread(os.path.expanduser(image_path)) img_size = np.asarray(img.shape)[0:2] prewhitened = facenet.prewhiten(img) img_list[0] = prewhitened image = np.stack(img_list) return image class FaceProcessing: def init_embedding_processor(self): embedder = insightface.iresnet34(pretrained=True).cuda() embedder.eval() mean = [0.5] * 3 std = [0.5 * 256 / 255] * 3 preprocess = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean, std) ]) use_cuda = torch.cuda.is_available() self.device = torch.device("cuda" if use_cuda else "cpu") self.embedder = embedder self.preprocess = preprocess return embedder def FaceProcessingImageData2(self, img_path): img_data = misc.imread(img_path) img = cv2.cvtColor(img_data, cv2.COLOR_BGR2RGB) return self._FaceProcessingImageData2(img) def FaceProcessingBase64ImageData2(self, base64_string): sbuf = StringIO() sbuf.write(base64.b64decode(base64_string)) pimg = Image.open(sbuf) img = np.array(pimg) #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) return self._FaceProcessingImageData2(img) def _FaceProcessingImageData2(self, img): nimg = np.transpose(img, (2,0,1)) resize_img = misc.imresize(nimg, [112, 112], interp='bilinear') tensor = self.preprocess(resize_img) with torch.no_grad(): features = self.embedder(tensor.unsqueeze(0).to(self.device))[0] features = features.to(torch.device('cpu')).numpy().flatten() #.detach().numpy().flatten() print(features[:32]) return features return None
[]
[]
[ "HAS_OPENCL", "DATA_RUNTIME_FOLDER" ]
[]
["HAS_OPENCL", "DATA_RUNTIME_FOLDER"]
python
2
0
go/cmd/zk/zkcmd.go
/* Copyright 2017 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreedto in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "archive/zip" "bytes" "flag" "fmt" "io/ioutil" "os" "os/exec" "os/signal" "path" "sort" "strings" "sync" "syscall" "time" "github.com/samuel/go-zookeeper/zk" "golang.org/x/crypto/ssh/terminal" "golang.org/x/net/context" "vitess.io/vitess/go/exit" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/topo/zk2topo" "vitess.io/vitess/go/vt/vtctl" ) var doc = ` zk is a tool for wrangling the zookeeper It tries to mimic unix file system commands wherever possible, but there are some slight differences in flag handling. zk -h - provide help on overriding cell selection zk cat /zk/path zk cat -l /zk/path1 /zk/path2 (list filename before file data) zk chmod n-mode /zk/path zk chmod n+mode /zk/path zk cp /zk/path . zk cp ./config /zk/path/config zk cp ./config /zk/path/ (trailing slash indicates directory) zk edit /zk/path (create a local copy, edit and write changes back to cell) zk ls /zk zk ls -l /zk zk ls -ld /zk (list directory node itself) zk ls -R /zk (recursive, expensive) zk stat /zk/path zk touch /zk/path zk touch -c /zk/path (don't create, just touch timestamp) zk touch -p /zk/path (create all parts necessary, think mkdir -p) NOTE: there is no mkdir - just touch a node. The distinction between file and directory is just not relevant in zookeeper. zk rm /zk/path zk rm -r /zk/path (recursive) zk rm -f /zk/path (no error on nonexistent node) zk wait /zk/path (wait for node change or creation) zk wait /zk/path/children/ (trailing slash waits on children) zk watch /zk/path (print changes) zk unzip zktree.zip / zk unzip zktree.zip /zk/prefix zk zip /zk/root zktree.zip NOTE: zip file can't be dumped to the file system since znodes can have data and children. The zk tool looks for the address of the cluster in /etc/zookeeper/zk_client.conf, or the file specified in the ZK_CLIENT_CONFIG environment variable. The local cell may be overridden with the ZK_CLIENT_LOCAL_CELL environment variable. ` const ( timeFmt = "2006-01-02 15:04:05" timeFmtMicro = "2006-01-02 15:04:05.000000" ) type cmdFunc func(ctx context.Context, subFlags *flag.FlagSet, args []string) error var cmdMap map[string]cmdFunc var zconn *zk2topo.ZkConn func init() { cmdMap = map[string]cmdFunc{ "cat": cmdCat, "chmod": cmdChmod, "cp": cmdCp, "edit": cmdEdit, "ls": cmdLs, "rm": cmdRm, "stat": cmdStat, "touch": cmdTouch, "unzip": cmdUnzip, "wait": cmdWait, "watch": cmdWatch, "zip": cmdZip, } } var ( server = flag.String("server", "", "server(s) to connect to") ) func main() { defer exit.Recover() defer logutil.Flush() flag.Usage = func() { fmt.Fprintf(os.Stderr, "Usage of %v:\n", os.Args[0]) flag.PrintDefaults() fmt.Fprintf(os.Stderr, doc) } flag.Parse() args := flag.Args() if len(args) == 0 { flag.Usage() exit.Return(1) } cmdName := args[0] args = args[1:] cmd, ok := cmdMap[cmdName] if !ok { log.Exitf("Unknown command %v", cmdName) } subFlags := flag.NewFlagSet(cmdName, flag.ExitOnError) // Create a context for the command, cancel it if we get a signal. ctx, cancel := context.WithCancel(context.Background()) sigRecv := make(chan os.Signal, 1) signal.Notify(sigRecv, os.Interrupt) go func() { <-sigRecv cancel() }() // Connect to the server. zconn = zk2topo.Connect(*server) // Run the command. if err := cmd(ctx, subFlags, args); err != nil { log.Error(err) exit.Return(1) } } func fixZkPath(zkPath string) string { if zkPath != "/" { zkPath = strings.TrimRight(zkPath, "/") } return path.Clean(zkPath) } func isZkFile(path string) bool { return strings.HasPrefix(path, "/zk") } func cmdWait(ctx context.Context, subFlags *flag.FlagSet, args []string) error { var ( exitIfExists = subFlags.Bool("e", false, "exit if the path already exists") ) subFlags.Parse(args) if subFlags.NArg() != 1 { return fmt.Errorf("wait: can only wait for one path") } zkPath := subFlags.Arg(0) isDir := zkPath[len(zkPath)-1] == '/' zkPath = fixZkPath(zkPath) var wait <-chan zk.Event var err error if isDir { _, _, wait, err = zconn.ChildrenW(ctx, zkPath) } else { _, _, wait, err = zconn.GetW(ctx, zkPath) } if err != nil { if err == zk.ErrNoNode { _, _, wait, err = zconn.ExistsW(ctx, zkPath) } else { return fmt.Errorf("wait: error %v: %v", zkPath, err) } } else { if *exitIfExists { return fmt.Errorf("already exists: %v", zkPath) } } event := <-wait fmt.Printf("event: %v\n", event) return nil } // Watch for changes to the node. func cmdWatch(ctx context.Context, subFlags *flag.FlagSet, args []string) error { subFlags.Parse(args) eventChan := make(chan zk.Event, 16) for _, arg := range subFlags.Args() { zkPath := fixZkPath(arg) _, _, watch, err := zconn.GetW(ctx, zkPath) if err != nil { return fmt.Errorf("watch error: %v", err) } go func() { eventChan <- <-watch }() } for { select { case <-ctx.Done(): return nil case event := <-eventChan: log.Infof("watch: event %v: %v", event.Path, event) if event.Type == zk.EventNodeDataChanged { data, stat, watch, err := zconn.GetW(ctx, event.Path) if err != nil { return fmt.Errorf("ERROR: failed to watch %v", err) } log.Infof("watch: %v %v\n", event.Path, stat) println(data) go func() { eventChan <- <-watch }() } else if event.State == zk.StateDisconnected { return nil } else if event.Type == zk.EventNodeDeleted { log.Infof("watch: %v deleted\n", event.Path) } else { // Most likely a session event - try t _, _, watch, err := zconn.GetW(ctx, event.Path) if err != nil { return fmt.Errorf("ERROR: failed to watch %v", err) } go func() { eventChan <- <-watch }() } } } } func cmdLs(ctx context.Context, subFlags *flag.FlagSet, args []string) error { var ( longListing = subFlags.Bool("l", false, "long listing") directoryListing = subFlags.Bool("d", false, "list directory instead of contents") force = subFlags.Bool("f", false, "no warning on nonexistent node") recursiveListing = subFlags.Bool("R", false, "recursive listing") ) subFlags.Parse(args) if subFlags.NArg() == 0 { return fmt.Errorf("ls: no path specified") } resolved, err := zk2topo.ResolveWildcards(ctx, zconn, subFlags.Args()) if err != nil { return fmt.Errorf("ls: invalid wildcards: %v", err) } if len(resolved) == 0 { // the wildcards didn't result in anything, we're // done. return nil } hasError := false needsHeader := len(resolved) > 1 && !*directoryListing for _, arg := range resolved { zkPath := fixZkPath(arg) var children []string var err error isDir := true if *directoryListing { children = []string{""} isDir = false } else if *recursiveListing { children, err = zk2topo.ChildrenRecursive(ctx, zconn, zkPath) } else { children, _, err = zconn.Children(ctx, zkPath) // Assume this is a file node if it has no children. if len(children) == 0 { children = []string{""} isDir = false } } if err != nil { hasError = true if !*force || err != zk.ErrNoNode { log.Warningf("ls: cannot access %v: %v", zkPath, err) } } // Show the full path when it helps. showFullPath := false if *recursiveListing { showFullPath = true } else if *longListing && (*directoryListing || !isDir) { showFullPath = true } if needsHeader { fmt.Printf("%v:\n", zkPath) } if len(children) > 0 { if *longListing && isDir { fmt.Printf("total: %v\n", len(children)) } sort.Strings(children) stats := make([]*zk.Stat, len(children)) wg := sync.WaitGroup{} f := func(i int) { localPath := path.Join(zkPath, children[i]) _, stat, err := zconn.Exists(ctx, localPath) if err != nil { if !*force || err != zk.ErrNoNode { log.Warningf("ls: cannot access: %v: %v", localPath, err) } } else { stats[i] = stat } wg.Done() } for i := range children { wg.Add(1) go f(i) } wg.Wait() for i, child := range children { localPath := path.Join(zkPath, child) if stat := stats[i]; stat != nil { fmtPath(stat, localPath, showFullPath, *longListing) } } } if needsHeader { fmt.Println() } } if hasError { return fmt.Errorf("ls: some paths had errors") } return nil } func fmtPath(stat *zk.Stat, zkPath string, showFullPath bool, longListing bool) { var name, perms string if !showFullPath { name = path.Base(zkPath) } else { name = zkPath } if longListing { if stat.NumChildren > 0 { // FIXME(msolomon) do permissions check? perms = "drwxrwxrwx" if stat.DataLength > 0 { // give a visual indication that this node has data as well as children perms = "nrw-rw-rw-" } } else if stat.EphemeralOwner != 0 { perms = "erw-rw-rw-" } else { perms = "-rw-rw-rw-" } // always print the Local version of the time. zookeeper's // go / C library would return a local time anyway, but // might as well be sure. fmt.Printf("%v %v %v % 8v % 20v %v\n", perms, "zk", "zk", stat.DataLength, zk2topo.Time(stat.Mtime).Local().Format(timeFmt), name) } else { fmt.Printf("%v\n", name) } } func cmdTouch(ctx context.Context, subFlags *flag.FlagSet, args []string) error { var ( createParents = subFlags.Bool("p", false, "create parents") touchOnly = subFlags.Bool("c", false, "touch only - don't create") ) subFlags.Parse(args) if subFlags.NArg() != 1 { return fmt.Errorf("touch: need to specify exactly one path") } zkPath := fixZkPath(subFlags.Arg(0)) var ( version int32 = -1 create = false ) data, stat, err := zconn.Get(ctx, zkPath) switch { case err == nil: version = stat.Version case err == zk.ErrNoNode: create = true default: return fmt.Errorf("touch: cannot access %v: %v", zkPath, err) } switch { case !create: _, err = zconn.Set(ctx, zkPath, data, version) case *touchOnly: return fmt.Errorf("touch: no such path %v", zkPath) case *createParents: _, err = zk2topo.CreateRecursive(ctx, zconn, zkPath, data, 0, zk.WorldACL(zk.PermAll), 10) default: _, err = zconn.Create(ctx, zkPath, data, 0, zk.WorldACL(zk.PermAll)) } if err != nil { return fmt.Errorf("touch: cannot modify %v: %v", zkPath, err) } return nil } func cmdRm(ctx context.Context, subFlags *flag.FlagSet, args []string) error { var ( force = subFlags.Bool("f", false, "no warning on nonexistent node") recursiveDelete = subFlags.Bool("r", false, "recursive delete") forceAndRecursive = subFlags.Bool("rf", false, "shorthand for -r -f") ) subFlags.Parse(args) *force = *force || *forceAndRecursive *recursiveDelete = *recursiveDelete || *forceAndRecursive if subFlags.NArg() == 0 { return fmt.Errorf("rm: no path specified") } if *recursiveDelete { for _, arg := range subFlags.Args() { zkPath := fixZkPath(arg) if strings.Count(zkPath, "/") < 2 { return fmt.Errorf("rm: overly general path: %v", zkPath) } } } resolved, err := zk2topo.ResolveWildcards(ctx, zconn, subFlags.Args()) if err != nil { return fmt.Errorf("rm: invalid wildcards: %v", err) } if len(resolved) == 0 { // the wildcards didn't result in anything, we're done return nil } hasError := false for _, arg := range resolved { zkPath := fixZkPath(arg) var err error if *recursiveDelete { err = zk2topo.DeleteRecursive(ctx, zconn, zkPath, -1) } else { err = zconn.Delete(ctx, zkPath, -1) } if err != nil && (!*force || err != zk.ErrNoNode) { hasError = true log.Warningf("rm: cannot delete %v: %v", zkPath, err) } } if hasError { // to be consistent with the command line 'rm -f', return // 0 if using 'zk rm -f' and the file doesn't exist. return fmt.Errorf("rm: some paths had errors") } return nil } func cmdCat(ctx context.Context, subFlags *flag.FlagSet, args []string) error { var ( longListing = subFlags.Bool("l", false, "long listing") force = subFlags.Bool("f", false, "no warning on nonexistent node") decodeProto = subFlags.Bool("p", false, "decode proto files and display them as text") ) subFlags.Parse(args) if subFlags.NArg() == 0 { return fmt.Errorf("cat: no path specified") } resolved, err := zk2topo.ResolveWildcards(ctx, zconn, subFlags.Args()) if err != nil { return fmt.Errorf("cat: invalid wildcards: %v", err) } if len(resolved) == 0 { // the wildcards didn't result in anything, we're done return nil } hasError := false for _, arg := range resolved { zkPath := fixZkPath(arg) data, _, err := zconn.Get(ctx, zkPath) if err != nil { hasError = true if !*force || err != zk.ErrNoNode { log.Warningf("cat: cannot access %v: %v", zkPath, err) } continue } if *longListing { fmt.Printf("%v:\n", zkPath) } decoded := "" if *decodeProto { decoded, err = vtctl.DecodeContent(zkPath, data) if err != nil { log.Warningf("cat: cannot proto decode %v: %v", zkPath, err) decoded = string(data) } } else { decoded = string(data) } fmt.Print(decoded) if len(decoded) > 0 && decoded[len(decoded)-1] != '\n' && (terminal.IsTerminal(int(os.Stdout.Fd())) || *longListing) { fmt.Print("\n") } } if hasError { return fmt.Errorf("cat: some paths had errors") } return nil } func cmdEdit(ctx context.Context, subFlags *flag.FlagSet, args []string) error { var ( force = subFlags.Bool("f", false, "no warning on nonexistent node") ) subFlags.Parse(args) if subFlags.NArg() == 0 { return fmt.Errorf("edit: no path specified") } arg := subFlags.Arg(0) zkPath := fixZkPath(arg) data, stat, err := zconn.Get(ctx, zkPath) if err != nil { if !*force || err != zk.ErrNoNode { log.Warningf("edit: cannot access %v: %v", zkPath, err) } return fmt.Errorf("edit: cannot access %v: %v", zkPath, err) } name := path.Base(zkPath) tmpPath := fmt.Sprintf("/tmp/zk-edit-%v-%v", name, time.Now().UnixNano()) f, err := os.Create(tmpPath) if err == nil { _, err = f.Write(data) f.Close() } if err != nil { return fmt.Errorf("edit: cannot write file %v", err) } cmd := exec.Command(os.Getenv("EDITOR"), tmpPath) cmd.Stdin = os.Stdin cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr err = cmd.Run() if err != nil { os.Remove(tmpPath) return fmt.Errorf("edit: cannot start $EDITOR: %v", err) } fileData, err := ioutil.ReadFile(tmpPath) if err != nil { os.Remove(tmpPath) return fmt.Errorf("edit: cannot read file %v", err) } if bytes.Compare(fileData, data) != 0 { // data changed - update if we can _, err = zconn.Set(ctx, zkPath, fileData, stat.Version) if err != nil { os.Remove(tmpPath) return fmt.Errorf("edit: cannot write zk file %v", err) } } os.Remove(tmpPath) return nil } func cmdStat(ctx context.Context, subFlags *flag.FlagSet, args []string) error { var ( force = subFlags.Bool("f", false, "no warning on nonexistent node") ) subFlags.Parse(args) if subFlags.NArg() == 0 { return fmt.Errorf("stat: no path specified") } resolved, err := zk2topo.ResolveWildcards(ctx, zconn, subFlags.Args()) if err != nil { return fmt.Errorf("stat: invalid wildcards: %v", err) } if len(resolved) == 0 { // the wildcards didn't result in anything, we're done return nil } hasError := false for _, arg := range resolved { zkPath := fixZkPath(arg) acls, stat, err := zconn.GetACL(ctx, zkPath) if stat == nil { err = fmt.Errorf("no such node") } if err != nil { hasError = true if !*force || err != zk.ErrNoNode { log.Warningf("stat: cannot access %v: %v", zkPath, err) } continue } fmt.Printf("Path: %s\n", zkPath) fmt.Printf("Created: %s\n", zk2topo.Time(stat.Ctime).Format(timeFmtMicro)) fmt.Printf("Modified: %s\n", zk2topo.Time(stat.Mtime).Format(timeFmtMicro)) fmt.Printf("Size: %v\n", stat.DataLength) fmt.Printf("Children: %v\n", stat.NumChildren) fmt.Printf("Version: %v\n", stat.Version) fmt.Printf("Ephemeral: %v\n", stat.EphemeralOwner) fmt.Printf("ACL:\n") for _, acl := range acls { fmt.Printf(" %v:%v %v\n", acl.Scheme, acl.ID, fmtACL(acl)) } } if hasError { return fmt.Errorf("stat: some paths had errors") } return nil } var charPermMap map[string]int32 var permCharMap map[int32]string func init() { charPermMap = map[string]int32{ "r": zk.PermRead, "w": zk.PermWrite, "d": zk.PermDelete, "c": zk.PermCreate, "a": zk.PermAdmin, } permCharMap = make(map[int32]string) for c, p := range charPermMap { permCharMap[p] = c } } func fmtACL(acl zk.ACL) string { s := "" for _, perm := range []int32{zk.PermRead, zk.PermWrite, zk.PermDelete, zk.PermCreate, zk.PermAdmin} { if acl.Perms&perm != 0 { s += permCharMap[perm] } else { s += "-" } } return s } func cmdChmod(ctx context.Context, subFlags *flag.FlagSet, args []string) error { subFlags.Parse(args) if subFlags.NArg() < 2 { return fmt.Errorf("chmod: no permission specified") } mode := subFlags.Arg(0) if mode[0] != 'n' { return fmt.Errorf("chmod: invalid mode") } addPerms := false if mode[1] == '+' { addPerms = true } else if mode[1] != '-' { return fmt.Errorf("chmod: invalid mode") } var permMask int32 for _, c := range mode[2:] { permMask |= charPermMap[string(c)] } resolved, err := zk2topo.ResolveWildcards(ctx, zconn, subFlags.Args()[1:]) if err != nil { return fmt.Errorf("chmod: invalid wildcards: %v", err) } if len(resolved) == 0 { // the wildcards didn't result in anything, we're done return nil } hasError := false for _, arg := range resolved { zkPath := fixZkPath(arg) aclv, _, err := zconn.GetACL(ctx, zkPath) if err != nil { hasError = true log.Warningf("chmod: cannot set access %v: %v", zkPath, err) continue } if addPerms { aclv[0].Perms |= permMask } else { aclv[0].Perms &= ^permMask } err = zconn.SetACL(ctx, zkPath, aclv, -1) if err != nil { hasError = true log.Warningf("chmod: cannot set access %v: %v", zkPath, err) continue } } if hasError { return fmt.Errorf("chmod: some paths had errors") } return nil } func cmdCp(ctx context.Context, subFlags *flag.FlagSet, args []string) error { subFlags.Parse(args) switch { case subFlags.NArg() < 2: return fmt.Errorf("cp: need to specify source and destination paths") case subFlags.NArg() == 2: return fileCp(ctx, args[0], args[1]) default: return multiFileCp(ctx, args) } } func getPathData(ctx context.Context, filePath string) ([]byte, error) { if isZkFile(filePath) { data, _, err := zconn.Get(ctx, filePath) return data, err } var err error file, err := os.Open(filePath) if err == nil { data, err := ioutil.ReadAll(file) if err == nil { return data, err } } return nil, err } func setPathData(ctx context.Context, filePath string, data []byte) error { if isZkFile(filePath) { _, err := zconn.Set(ctx, filePath, data, -1) if err == zk.ErrNoNode { _, err = zk2topo.CreateRecursive(ctx, zconn, filePath, data, 0, zk.WorldACL(zk.PermAll), 10) } return err } return ioutil.WriteFile(filePath, []byte(data), 0666) } func fileCp(ctx context.Context, srcPath, dstPath string) error { dstIsDir := dstPath[len(dstPath)-1] == '/' srcPath = fixZkPath(srcPath) dstPath = fixZkPath(dstPath) if !isZkFile(srcPath) && !isZkFile(dstPath) { return fmt.Errorf("cp: neither src nor dst is a /zk file: exitting") } data, err := getPathData(ctx, srcPath) if err != nil { return fmt.Errorf("cp: cannot read %v: %v", srcPath, err) } // If we are copying to a local directory - say '.', make the filename // the same as the source. if !isZkFile(dstPath) { fileInfo, err := os.Stat(dstPath) if err != nil { if err.(*os.PathError).Err != syscall.ENOENT { return fmt.Errorf("cp: cannot stat %v: %v", dstPath, err) } } else if fileInfo.IsDir() { dstPath = path.Join(dstPath, path.Base(srcPath)) } } else if dstIsDir { // If we are copying into zk, interpret trailing slash as treating the // dstPath as a directory. dstPath = path.Join(dstPath, path.Base(srcPath)) } if err := setPathData(ctx, dstPath, data); err != nil { return fmt.Errorf("cp: cannot write %v: %v", dstPath, err) } return nil } func multiFileCp(ctx context.Context, args []string) error { dstPath := args[len(args)-1] if dstPath[len(dstPath)-1] != '/' { // In multifile context, dstPath must be a directory. dstPath += "/" } for _, srcPath := range args[:len(args)-1] { if err := fileCp(ctx, srcPath, dstPath); err != nil { return err } } return nil } type zkItem struct { path string data []byte stat *zk.Stat err error } // Store a zk tree in a zip archive. This won't be immediately useful to // zip tools since even "directories" can contain data. func cmdZip(ctx context.Context, subFlags *flag.FlagSet, args []string) error { subFlags.Parse(args) if subFlags.NArg() < 2 { return fmt.Errorf("zip: need to specify source and destination paths") } dstPath := subFlags.Arg(subFlags.NArg() - 1) paths := subFlags.Args()[:len(args)-1] if !strings.HasSuffix(dstPath, ".zip") { return fmt.Errorf("zip: need to specify destination .zip path: %v", dstPath) } zipFile, err := os.Create(dstPath) if err != nil { return fmt.Errorf("zip: error %v", err) } wg := sync.WaitGroup{} items := make(chan *zkItem, 64) for _, arg := range paths { zkPath := fixZkPath(arg) children, err := zk2topo.ChildrenRecursive(ctx, zconn, zkPath) if err != nil { return fmt.Errorf("zip: error %v", err) } for _, child := range children { toAdd := path.Join(zkPath, child) wg.Add(1) go func() { data, stat, err := zconn.Get(ctx, toAdd) items <- &zkItem{toAdd, data, stat, err} wg.Done() }() } } go func() { wg.Wait() close(items) }() zipWriter := zip.NewWriter(zipFile) for item := range items { path, data, stat, err := item.path, item.data, item.stat, item.err if err != nil { return fmt.Errorf("zip: get failed: %v", err) } // Skip ephemerals - not sure why you would archive them. if stat.EphemeralOwner > 0 { continue } fi := &zip.FileHeader{Name: path, Method: zip.Deflate} fi.SetModTime(zk2topo.Time(stat.Mtime)) f, err := zipWriter.CreateHeader(fi) if err != nil { return fmt.Errorf("zip: create failed: %v", err) } _, err = f.Write(data) if err != nil { return fmt.Errorf("zip: create failed: %v", err) } } err = zipWriter.Close() if err != nil { return fmt.Errorf("zip: close failed: %v", err) } zipFile.Close() return nil } func cmdUnzip(ctx context.Context, subFlags *flag.FlagSet, args []string) error { subFlags.Parse(args) if subFlags.NArg() != 2 { return fmt.Errorf("zip: need to specify source and destination paths") } srcPath, dstPath := subFlags.Arg(0), subFlags.Arg(1) if !strings.HasSuffix(srcPath, ".zip") { return fmt.Errorf("zip: need to specify src .zip path: %v", srcPath) } zipReader, err := zip.OpenReader(srcPath) if err != nil { return fmt.Errorf("zip: error %v", err) } defer zipReader.Close() for _, zf := range zipReader.File { rc, err := zf.Open() if err != nil { return fmt.Errorf("unzip: error %v", err) } data, err := ioutil.ReadAll(rc) if err != nil { return fmt.Errorf("unzip: failed reading archive: %v", err) } zkPath := zf.Name if dstPath != "/" { zkPath = path.Join(dstPath, zkPath) } _, err = zk2topo.CreateRecursive(ctx, zconn, zkPath, data, 0, zk.WorldACL(zk.PermAll), 10) if err != nil && err != zk.ErrNodeExists { return fmt.Errorf("unzip: zk create failed: %v", err) } _, err = zconn.Set(ctx, zkPath, data, -1) if err != nil { return fmt.Errorf("unzip: zk set failed: %v", err) } rc.Close() } return nil }
[ "\"EDITOR\"" ]
[]
[ "EDITOR" ]
[]
["EDITOR"]
go
1
0
resources/lib/speech_recognition/__init__.py
#!/usr/bin/env python3 """Library for performing speech recognition, with support for several engines and APIs, online and offline.""" import io import os import sys import subprocess import wave import aifc import math import audioop import collections import json import base64 import threading import platform import stat import hashlib import hmac import time import uuid __author__ = "Anthony Zhang (Uberi)" __version__ = "3.8.1" __license__ = "BSD" try: # attempt to use the Python 2 modules from urllib import urlencode from urllib2 import Request, urlopen, URLError, HTTPError except ImportError: # use the Python 3 modules from urllib.parse import urlencode from urllib.request import Request, urlopen from urllib.error import URLError, HTTPError class WaitTimeoutError(Exception): pass class RequestError(Exception): pass class UnknownValueError(Exception): pass class AudioSource(object): def __init__(self): raise NotImplementedError("this is an abstract class") def __enter__(self): raise NotImplementedError("this is an abstract class") def __exit__(self, exc_type, exc_value, traceback): raise NotImplementedError("this is an abstract class") class Microphone(AudioSource): """ Creates a new ``Microphone`` instance, which represents a physical microphone on the computer. Subclass of ``AudioSource``. This will throw an ``AttributeError`` if you don't have PyAudio 0.2.11 or later installed. If ``device_index`` is unspecified or ``None``, the default microphone is used as the audio source. Otherwise, ``device_index`` should be the index of the device to use for audio input. A device index is an integer between 0 and ``pyaudio.get_device_count() - 1`` (assume we have used ``import pyaudio`` beforehand) inclusive. It represents an audio device such as a microphone or speaker. See the `PyAudio documentation <http://people.csail.mit.edu/hubert/pyaudio/docs/>`__ for more details. The microphone audio is recorded in chunks of ``chunk_size`` samples, at a rate of ``sample_rate`` samples per second (Hertz). If not specified, the value of ``sample_rate`` is determined automatically from the system's microphone settings. Higher ``sample_rate`` values result in better audio quality, but also more bandwidth (and therefore, slower recognition). Additionally, some CPUs, such as those in older Raspberry Pi models, can't keep up if this value is too high. Higher ``chunk_size`` values help avoid triggering on rapidly changing ambient noise, but also makes detection less sensitive. This value, generally, should be left at its default. """ def __init__(self, device_index=None, sample_rate=None, chunk_size=1024): assert device_index is None or isinstance(device_index, int), "Device index must be None or an integer" assert sample_rate is None or (isinstance(sample_rate, int) and sample_rate > 0), "Sample rate must be None or a positive integer" assert isinstance(chunk_size, int) and chunk_size > 0, "Chunk size must be a positive integer" # set up PyAudio self.pyaudio_module = self.get_pyaudio() audio = self.pyaudio_module.PyAudio() try: count = audio.get_device_count() # obtain device count if device_index is not None: # ensure device index is in range assert 0 <= device_index < count, "Device index out of range ({} devices available; device index should be between 0 and {} inclusive)".format(count, count - 1) if sample_rate is None: # automatically set the sample rate to the hardware's default sample rate if not specified device_info = audio.get_device_info_by_index(device_index) if device_index is not None else audio.get_default_input_device_info() assert isinstance(device_info.get("defaultSampleRate"), (float, int)) and device_info["defaultSampleRate"] > 0, "Invalid device info returned from PyAudio: {}".format(device_info) sample_rate = int(device_info["defaultSampleRate"]) except Exception: audio.terminate() raise self.device_index = device_index self.format = self.pyaudio_module.paInt16 # 16-bit int sampling self.SAMPLE_WIDTH = self.pyaudio_module.get_sample_size(self.format) # size of each sample self.SAMPLE_RATE = sample_rate # sampling rate in Hertz self.CHUNK = chunk_size # number of frames stored in each buffer self.audio = None self.stream = None @staticmethod def get_pyaudio(): """ Imports the pyaudio module and checks its version. Throws exceptions if pyaudio can't be found or a wrong version is installed """ try: #import pyaudio #import resources.lib.pyaudio as pyaudio try: print("Try to load PyAudioRpiB264Py37....") from resources.lib.pyaudio.PyAudioRpiB264Py37 import pyaudio except: try: print("Try to load PyAudioRpi64Py37....") from resources.lib.pyaudio.PyAudioRpi64Py37 import pyaudio except: try: print("Try to load PyAudioRpi64Py27....") from resources.lib.pyaudio.PyAudioRpi64Py27 import pyaudio except: try: print("Try to load PyAudioWin64Py27....") from resources.lib.pyaudio.PyAudioWin64Py27 import pyaudio except: try: print("Try to load PyAudioWin64Py37....") from resources.lib.pyaudio.PyAudioWin64Py37 import pyaudio except Exception as e: raise e except ImportError as e: raise AttributeError("Could not find PyAudio; check installation "+str(e)) from distutils.version import LooseVersion if LooseVersion(pyaudio.__version__) < LooseVersion("0.2.11"): raise AttributeError("PyAudio 0.2.11 or later is required (found version {})".format(pyaudio.__version__)) return pyaudio @staticmethod def list_microphone_names(): """ Returns a list of the names of all available microphones. For microphones where the name can't be retrieved, the list entry contains ``None`` instead. The index of each microphone's name is the same as its device index when creating a ``Microphone`` instance - indices in this list can be used as values of ``device_index``. """ audio = Microphone.get_pyaudio().PyAudio() try: result = [] for i in range(audio.get_device_count()): device_info = audio.get_device_info_by_index(i) result.append(device_info.get("name")) finally: audio.terminate() return result def __enter__(self): assert self.stream is None, "This audio source is already inside a context manager" self.audio = self.pyaudio_module.PyAudio() try: self.stream = Microphone.MicrophoneStream( self.audio.open( input_device_index=self.device_index, channels=1, format=self.format, rate=self.SAMPLE_RATE, frames_per_buffer=self.CHUNK, input=True, # stream is an input stream ) ) except Exception: self.audio.terminate() raise return self def __exit__(self, exc_type, exc_value, traceback): try: self.stream.close() finally: self.stream = None self.audio.terminate() class MicrophoneStream(object): def __init__(self, pyaudio_stream): self.pyaudio_stream = pyaudio_stream def read(self, size): return self.pyaudio_stream.read(size, exception_on_overflow=False) def close(self): try: # sometimes, if the stream isn't stopped, closing the stream throws an exception if not self.pyaudio_stream.is_stopped(): self.pyaudio_stream.stop_stream() finally: self.pyaudio_stream.close() class AudioFile(AudioSource): """ Creates a new ``AudioFile`` instance given a WAV/AIFF/FLAC audio file ``filename_or_fileobject``. Subclass of ``AudioSource``. If ``filename_or_fileobject`` is a string, then it is interpreted as a path to an audio file on the filesystem. Otherwise, ``filename_or_fileobject`` should be a file-like object such as ``io.BytesIO`` or similar. Note that functions that read from the audio (such as ``recognizer_instance.record`` or ``recognizer_instance.listen``) will move ahead in the stream. For example, if you execute ``recognizer_instance.record(audiofile_instance, duration=10)`` twice, the first time it will return the first 10 seconds of audio, and the second time it will return the 10 seconds of audio right after that. This is always reset to the beginning when entering an ``AudioFile`` context. WAV files must be in PCM/LPCM format; WAVE_FORMAT_EXTENSIBLE and compressed WAV are not supported and may result in undefined behaviour. Both AIFF and AIFF-C (compressed AIFF) formats are supported. FLAC files must be in native FLAC format; OGG-FLAC is not supported and may result in undefined behaviour. """ def __init__(self, filename_or_fileobject): assert isinstance(filename_or_fileobject, (type(""), type(u""))) or hasattr(filename_or_fileobject, "read"), "Given audio file must be a filename string or a file-like object" self.filename_or_fileobject = filename_or_fileobject self.stream = None self.DURATION = None self.audio_reader = None self.little_endian = False self.SAMPLE_RATE = None self.CHUNK = None self.FRAME_COUNT = None def __enter__(self): assert self.stream is None, "This audio source is already inside a context manager" try: # attempt to read the file as WAV self.audio_reader = wave.open(self.filename_or_fileobject, "rb") self.little_endian = True # RIFF WAV is a little-endian format (most ``audioop`` operations assume that the frames are stored in little-endian form) except (wave.Error, EOFError): try: # attempt to read the file as AIFF self.audio_reader = aifc.open(self.filename_or_fileobject, "rb") self.little_endian = False # AIFF is a big-endian format except (aifc.Error, EOFError): # attempt to read the file as FLAC if hasattr(self.filename_or_fileobject, "read"): flac_data = self.filename_or_fileobject.read() else: with open(self.filename_or_fileobject, "rb") as f: flac_data = f.read() # run the FLAC converter with the FLAC data to get the AIFF data flac_converter = get_flac_converter() if os.name == "nt": # on Windows, specify that the process is to be started without showing a console window startup_info = subprocess.STARTUPINFO() startup_info.dwFlags |= subprocess.STARTF_USESHOWWINDOW # specify that the wShowWindow field of `startup_info` contains a value startup_info.wShowWindow = subprocess.SW_HIDE # specify that the console window should be hidden else: startup_info = None # default startupinfo process = subprocess.Popen([ flac_converter, "--stdout", "--totally-silent", # put the resulting AIFF file in stdout, and make sure it's not mixed with any program output "--decode", "--force-aiff-format", # decode the FLAC file into an AIFF file "-", # the input FLAC file contents will be given in stdin ], stdin=subprocess.PIPE, stdout=subprocess.PIPE, startupinfo=startup_info) aiff_data, _ = process.communicate(flac_data) aiff_file = io.BytesIO(aiff_data) try: self.audio_reader = aifc.open(aiff_file, "rb") except (aifc.Error, EOFError): raise ValueError("Audio file could not be read as PCM WAV, AIFF/AIFF-C, or Native FLAC; check if file is corrupted or in another format") self.little_endian = False # AIFF is a big-endian format assert 1 <= self.audio_reader.getnchannels() <= 2, "Audio must be mono or stereo" self.SAMPLE_WIDTH = self.audio_reader.getsampwidth() # 24-bit audio needs some special handling for old Python versions (workaround for https://bugs.python.org/issue12866) samples_24_bit_pretending_to_be_32_bit = False if self.SAMPLE_WIDTH == 3: # 24-bit audio try: audioop.bias(b"", self.SAMPLE_WIDTH, 0) # test whether this sample width is supported (for example, ``audioop`` in Python 3.3 and below don't support sample width 3, while Python 3.4+ do) except audioop.error: # this version of audioop doesn't support 24-bit audio (probably Python 3.3 or less) samples_24_bit_pretending_to_be_32_bit = True # while the ``AudioFile`` instance will outwardly appear to be 32-bit, it will actually internally be 24-bit self.SAMPLE_WIDTH = 4 # the ``AudioFile`` instance should present itself as a 32-bit stream now, since we'll be converting into 32-bit on the fly when reading self.SAMPLE_RATE = self.audio_reader.getframerate() self.CHUNK = 4096 self.FRAME_COUNT = self.audio_reader.getnframes() self.DURATION = self.FRAME_COUNT / float(self.SAMPLE_RATE) self.stream = AudioFile.AudioFileStream(self.audio_reader, self.little_endian, samples_24_bit_pretending_to_be_32_bit) return self def __exit__(self, exc_type, exc_value, traceback): if not hasattr(self.filename_or_fileobject, "read"): # only close the file if it was opened by this class in the first place (if the file was originally given as a path) self.audio_reader.close() self.stream = None self.DURATION = None class AudioFileStream(object): def __init__(self, audio_reader, little_endian, samples_24_bit_pretending_to_be_32_bit): self.audio_reader = audio_reader # an audio file object (e.g., a `wave.Wave_read` instance) self.little_endian = little_endian # whether the audio data is little-endian (when working with big-endian things, we'll have to convert it to little-endian before we process it) self.samples_24_bit_pretending_to_be_32_bit = samples_24_bit_pretending_to_be_32_bit # this is true if the audio is 24-bit audio, but 24-bit audio isn't supported, so we have to pretend that this is 32-bit audio and convert it on the fly def read(self, size=-1): buffer = self.audio_reader.readframes(self.audio_reader.getnframes() if size == -1 else size) if not isinstance(buffer, bytes): buffer = b"" # workaround for https://bugs.python.org/issue24608 sample_width = self.audio_reader.getsampwidth() if not self.little_endian: # big endian format, convert to little endian on the fly if hasattr(audioop, "byteswap"): # ``audioop.byteswap`` was only added in Python 3.4 (incidentally, that also means that we don't need to worry about 24-bit audio being unsupported, since Python 3.4+ always has that functionality) buffer = audioop.byteswap(buffer, sample_width) else: # manually reverse the bytes of each sample, which is slower but works well enough as a fallback buffer = buffer[sample_width - 1::-1] + b"".join(buffer[i + sample_width:i:-1] for i in range(sample_width - 1, len(buffer), sample_width)) # workaround for https://bugs.python.org/issue12866 if self.samples_24_bit_pretending_to_be_32_bit: # we need to convert samples from 24-bit to 32-bit before we can process them with ``audioop`` functions buffer = b"".join(b"\x00" + buffer[i:i + sample_width] for i in range(0, len(buffer), sample_width)) # since we're in little endian, we prepend a zero byte to each 24-bit sample to get a 32-bit sample sample_width = 4 # make sure we thread the buffer as 32-bit audio now, after converting it from 24-bit audio if self.audio_reader.getnchannels() != 1: # stereo audio buffer = audioop.tomono(buffer, sample_width, 1, 1) # convert stereo audio data to mono return buffer class AudioData(object): """ Creates a new ``AudioData`` instance, which represents mono audio data. The raw audio data is specified by ``frame_data``, which is a sequence of bytes representing audio samples. This is the frame data structure used by the PCM WAV format. The width of each sample, in bytes, is specified by ``sample_width``. Each group of ``sample_width`` bytes represents a single audio sample. The audio data is assumed to have a sample rate of ``sample_rate`` samples per second (Hertz). Usually, instances of this class are obtained from ``recognizer_instance.record`` or ``recognizer_instance.listen``, or in the callback for ``recognizer_instance.listen_in_background``, rather than instantiating them directly. """ def __init__(self, frame_data, sample_rate, sample_width): assert sample_rate > 0, "Sample rate must be a positive integer" assert sample_width % 1 == 0 and 1 <= sample_width <= 4, "Sample width must be between 1 and 4 inclusive" self.frame_data = frame_data self.sample_rate = sample_rate self.sample_width = int(sample_width) def get_segment(self, start_ms=None, end_ms=None): """ Returns a new ``AudioData`` instance, trimmed to a given time interval. In other words, an ``AudioData`` instance with the same audio data except starting at ``start_ms`` milliseconds in and ending ``end_ms`` milliseconds in. If not specified, ``start_ms`` defaults to the beginning of the audio, and ``end_ms`` defaults to the end. """ assert start_ms is None or start_ms >= 0, "``start_ms`` must be a non-negative number" assert end_ms is None or end_ms >= (0 if start_ms is None else start_ms), "``end_ms`` must be a non-negative number greater or equal to ``start_ms``" if start_ms is None: start_byte = 0 else: start_byte = int((start_ms * self.sample_rate * self.sample_width) // 1000) if end_ms is None: end_byte = len(self.frame_data) else: end_byte = int((end_ms * self.sample_rate * self.sample_width) // 1000) return AudioData(self.frame_data[start_byte:end_byte], self.sample_rate, self.sample_width) def get_raw_data(self, convert_rate=None, convert_width=None): """ Returns a byte string representing the raw frame data for the audio represented by the ``AudioData`` instance. If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match. If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match. Writing these bytes directly to a file results in a valid `RAW/PCM audio file <https://en.wikipedia.org/wiki/Raw_audio_format>`__. """ assert convert_rate is None or convert_rate > 0, "Sample rate to convert to must be a positive integer" assert convert_width is None or (convert_width % 1 == 0 and 1 <= convert_width <= 4), "Sample width to convert to must be between 1 and 4 inclusive" raw_data = self.frame_data # make sure unsigned 8-bit audio (which uses unsigned samples) is handled like higher sample width audio (which uses signed samples) if self.sample_width == 1: raw_data = audioop.bias(raw_data, 1, -128) # subtract 128 from every sample to make them act like signed samples # resample audio at the desired rate if specified if convert_rate is not None and self.sample_rate != convert_rate: raw_data, _ = audioop.ratecv(raw_data, self.sample_width, 1, self.sample_rate, convert_rate, None) # convert samples to desired sample width if specified if convert_width is not None and self.sample_width != convert_width: if convert_width == 3: # we're converting the audio into 24-bit (workaround for https://bugs.python.org/issue12866) raw_data = audioop.lin2lin(raw_data, self.sample_width, 4) # convert audio into 32-bit first, which is always supported try: audioop.bias(b"", 3, 0) # test whether 24-bit audio is supported (for example, ``audioop`` in Python 3.3 and below don't support sample width 3, while Python 3.4+ do) except audioop.error: # this version of audioop doesn't support 24-bit audio (probably Python 3.3 or less) raw_data = b"".join(raw_data[i + 1:i + 4] for i in range(0, len(raw_data), 4)) # since we're in little endian, we discard the first byte from each 32-bit sample to get a 24-bit sample else: # 24-bit audio fully supported, we don't need to shim anything raw_data = audioop.lin2lin(raw_data, self.sample_width, convert_width) else: raw_data = audioop.lin2lin(raw_data, self.sample_width, convert_width) # if the output is 8-bit audio with unsigned samples, convert the samples we've been treating as signed to unsigned again if convert_width == 1: raw_data = audioop.bias(raw_data, 1, 128) # add 128 to every sample to make them act like unsigned samples again return raw_data def get_wav_data(self, convert_rate=None, convert_width=None): """ Returns a byte string representing the contents of a WAV file containing the audio represented by the ``AudioData`` instance. If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match. If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match. Writing these bytes directly to a file results in a valid `WAV file <https://en.wikipedia.org/wiki/WAV>`__. """ raw_data = self.get_raw_data(convert_rate, convert_width) sample_rate = self.sample_rate if convert_rate is None else convert_rate sample_width = self.sample_width if convert_width is None else convert_width # generate the WAV file contents with io.BytesIO() as wav_file: wav_writer = wave.open(wav_file, "wb") try: # note that we can't use context manager, since that was only added in Python 3.4 wav_writer.setframerate(sample_rate) wav_writer.setsampwidth(sample_width) wav_writer.setnchannels(1) wav_writer.writeframes(raw_data) wav_data = wav_file.getvalue() finally: # make sure resources are cleaned up wav_writer.close() return wav_data def get_aiff_data(self, convert_rate=None, convert_width=None): """ Returns a byte string representing the contents of an AIFF-C file containing the audio represented by the ``AudioData`` instance. If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match. If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match. Writing these bytes directly to a file results in a valid `AIFF-C file <https://en.wikipedia.org/wiki/Audio_Interchange_File_Format>`__. """ raw_data = self.get_raw_data(convert_rate, convert_width) sample_rate = self.sample_rate if convert_rate is None else convert_rate sample_width = self.sample_width if convert_width is None else convert_width # the AIFF format is big-endian, so we need to covnert the little-endian raw data to big-endian if hasattr(audioop, "byteswap"): # ``audioop.byteswap`` was only added in Python 3.4 raw_data = audioop.byteswap(raw_data, sample_width) else: # manually reverse the bytes of each sample, which is slower but works well enough as a fallback raw_data = raw_data[sample_width - 1::-1] + b"".join(raw_data[i + sample_width:i:-1] for i in range(sample_width - 1, len(raw_data), sample_width)) # generate the AIFF-C file contents with io.BytesIO() as aiff_file: aiff_writer = aifc.open(aiff_file, "wb") try: # note that we can't use context manager, since that was only added in Python 3.4 aiff_writer.setframerate(sample_rate) aiff_writer.setsampwidth(sample_width) aiff_writer.setnchannels(1) aiff_writer.writeframes(raw_data) aiff_data = aiff_file.getvalue() finally: # make sure resources are cleaned up aiff_writer.close() return aiff_data def get_flac_data(self, convert_rate=None, convert_width=None): """ Returns a byte string representing the contents of a FLAC file containing the audio represented by the ``AudioData`` instance. Note that 32-bit FLAC is not supported. If the audio data is 32-bit and ``convert_width`` is not specified, then the resulting FLAC will be a 24-bit FLAC. If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match. If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match. Writing these bytes directly to a file results in a valid `FLAC file <https://en.wikipedia.org/wiki/FLAC>`__. """ assert convert_width is None or (convert_width % 1 == 0 and 1 <= convert_width <= 3), "Sample width to convert to must be between 1 and 3 inclusive" if self.sample_width > 3 and convert_width is None: # resulting WAV data would be 32-bit, which is not convertable to FLAC using our encoder convert_width = 3 # the largest supported sample width is 24-bit, so we'll limit the sample width to that # run the FLAC converter with the WAV data to get the FLAC data wav_data = self.get_wav_data(convert_rate, convert_width) flac_converter = get_flac_converter() if os.name == "nt": # on Windows, specify that the process is to be started without showing a console window startup_info = subprocess.STARTUPINFO() startup_info.dwFlags |= subprocess.STARTF_USESHOWWINDOW # specify that the wShowWindow field of `startup_info` contains a value startup_info.wShowWindow = subprocess.SW_HIDE # specify that the console window should be hidden else: startup_info = None # default startupinfo process = subprocess.Popen([ flac_converter, "--stdout", "--totally-silent", # put the resulting FLAC file in stdout, and make sure it's not mixed with any program output "--best", # highest level of compression available "-", # the input FLAC file contents will be given in stdin ], stdin=subprocess.PIPE, stdout=subprocess.PIPE, startupinfo=startup_info) flac_data, stderr = process.communicate(wav_data) return flac_data class Recognizer(AudioSource): def __init__(self): """ Creates a new ``Recognizer`` instance, which represents a collection of speech recognition functionality. """ self.energy_threshold = 300 # minimum audio energy to consider for recording self.dynamic_energy_threshold = True self.dynamic_energy_adjustment_damping = 0.15 self.dynamic_energy_ratio = 1.5 self.pause_threshold = 0.8 # seconds of non-speaking audio before a phrase is considered complete self.operation_timeout = None # seconds after an internal operation (e.g., an API request) starts before it times out, or ``None`` for no timeout self.phrase_threshold = 0.3 # minimum seconds of speaking audio before we consider the speaking audio a phrase - values below this are ignored (for filtering out clicks and pops) self.non_speaking_duration = 0.5 # seconds of non-speaking audio to keep on both sides of the recording def record(self, source, duration=None, offset=None): """ Records up to ``duration`` seconds of audio from ``source`` (an ``AudioSource`` instance) starting at ``offset`` (or at the beginning if not specified) into an ``AudioData`` instance, which it returns. If ``duration`` is not specified, then it will record until there is no more audio input. """ assert isinstance(source, AudioSource), "Source must be an audio source" assert source.stream is not None, "Audio source must be entered before recording, see documentation for ``AudioSource``; are you using ``source`` outside of a ``with`` statement?" frames = io.BytesIO() seconds_per_buffer = (source.CHUNK + 0.0) / source.SAMPLE_RATE elapsed_time = 0 offset_time = 0 offset_reached = False while True: # loop for the total number of chunks needed if offset and not offset_reached: offset_time += seconds_per_buffer if offset_time > offset: offset_reached = True buffer = source.stream.read(source.CHUNK) if len(buffer) == 0: break if offset_reached or not offset: elapsed_time += seconds_per_buffer if duration and elapsed_time > duration: break frames.write(buffer) frame_data = frames.getvalue() frames.close() return AudioData(frame_data, source.SAMPLE_RATE, source.SAMPLE_WIDTH) def adjust_for_ambient_noise(self, source, duration=1): """ Adjusts the energy threshold dynamically using audio from ``source`` (an ``AudioSource`` instance) to account for ambient noise. Intended to calibrate the energy threshold with the ambient energy level. Should be used on periods of audio without speech - will stop early if any speech is detected. The ``duration`` parameter is the maximum number of seconds that it will dynamically adjust the threshold for before returning. This value should be at least 0.5 in order to get a representative sample of the ambient noise. """ assert isinstance(source, AudioSource), "Source must be an audio source" assert source.stream is not None, "Audio source must be entered before adjusting, see documentation for ``AudioSource``; are you using ``source`` outside of a ``with`` statement?" assert self.pause_threshold >= self.non_speaking_duration >= 0 seconds_per_buffer = (source.CHUNK + 0.0) / source.SAMPLE_RATE elapsed_time = 0 # adjust energy threshold until a phrase starts while True: elapsed_time += seconds_per_buffer if elapsed_time > duration: break buffer = source.stream.read(source.CHUNK) energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal # dynamically adjust the energy threshold using asymmetric weighted average damping = self.dynamic_energy_adjustment_damping ** seconds_per_buffer # account for different chunk sizes and rates target_energy = energy * self.dynamic_energy_ratio self.energy_threshold = self.energy_threshold * damping + target_energy * (1 - damping) def snowboy_wait_for_hot_word(self, snowboy_location, snowboy_hot_word_files, source, timeout=None): # load snowboy library (NOT THREAD SAFE) sys.path.append(snowboy_location) import snowboydetect sys.path.pop() detector = snowboydetect.SnowboyDetect( resource_filename=os.path.join(snowboy_location, "resources", "common.res").encode(), model_str=",".join(snowboy_hot_word_files).encode() ) detector.SetAudioGain(1.0) detector.SetSensitivity(",".join(["0.4"] * len(snowboy_hot_word_files)).encode()) snowboy_sample_rate = detector.SampleRate() elapsed_time = 0 seconds_per_buffer = float(source.CHUNK) / source.SAMPLE_RATE resampling_state = None # buffers capable of holding 5 seconds of original and resampled audio five_seconds_buffer_count = int(math.ceil(5 / seconds_per_buffer)) frames = collections.deque(maxlen=five_seconds_buffer_count) resampled_frames = collections.deque(maxlen=five_seconds_buffer_count) while True: elapsed_time += seconds_per_buffer if timeout and elapsed_time > timeout: raise WaitTimeoutError("listening timed out while waiting for hotword to be said") buffer = source.stream.read(source.CHUNK) if len(buffer) == 0: break # reached end of the stream frames.append(buffer) # resample audio to the required sample rate resampled_buffer, resampling_state = audioop.ratecv(buffer, source.SAMPLE_WIDTH, 1, source.SAMPLE_RATE, snowboy_sample_rate, resampling_state) resampled_frames.append(resampled_buffer) # run Snowboy on the resampled audio snowboy_result = detector.RunDetection(b"".join(resampled_frames)) assert snowboy_result != -1, "Error initializing streams or reading audio data" if snowboy_result > 0: break # wake word found return b"".join(frames), elapsed_time def listen(self, source, timeout=None, phrase_time_limit=None, snowboy_configuration=None): """ Records a single phrase from ``source`` (an ``AudioSource`` instance) into an ``AudioData`` instance, which it returns. This is done by waiting until the audio has an energy above ``recognizer_instance.energy_threshold`` (the user has started speaking), and then recording until it encounters ``recognizer_instance.pause_threshold`` seconds of non-speaking or there is no more audio input. The ending silence is not included. The ``timeout`` parameter is the maximum number of seconds that this will wait for a phrase to start before giving up and throwing an ``speech_recognition.WaitTimeoutError`` exception. If ``timeout`` is ``None``, there will be no wait timeout. The ``phrase_time_limit`` parameter is the maximum number of seconds that this will allow a phrase to continue before stopping and returning the part of the phrase processed before the time limit was reached. The resulting audio will be the phrase cut off at the time limit. If ``phrase_timeout`` is ``None``, there will be no phrase time limit. The ``snowboy_configuration`` parameter allows integration with `Snowboy <https://snowboy.kitt.ai/>`__, an offline, high-accuracy, power-efficient hotword recognition engine. When used, this function will pause until Snowboy detects a hotword, after which it will unpause. This parameter should either be ``None`` to turn off Snowboy support, or a tuple of the form ``(SNOWBOY_LOCATION, LIST_OF_HOT_WORD_FILES)``, where ``SNOWBOY_LOCATION`` is the path to the Snowboy root directory, and ``LIST_OF_HOT_WORD_FILES`` is a list of paths to Snowboy hotword configuration files (`*.pmdl` or `*.umdl` format). This operation will always complete within ``timeout + phrase_timeout`` seconds if both are numbers, either by returning the audio data, or by raising a ``speech_recognition.WaitTimeoutError`` exception. """ assert isinstance(source, AudioSource), "Source must be an audio source" assert source.stream is not None, "Audio source must be entered before listening, see documentation for ``AudioSource``; are you using ``source`` outside of a ``with`` statement?" assert self.pause_threshold >= self.non_speaking_duration >= 0 if snowboy_configuration is not None: assert os.path.isfile(os.path.join(snowboy_configuration[0], "snowboydetect.py")), "``snowboy_configuration[0]`` must be a Snowboy root directory containing ``snowboydetect.py``" for hot_word_file in snowboy_configuration[1]: assert os.path.isfile(hot_word_file), "``snowboy_configuration[1]`` must be a list of Snowboy hot word configuration files" seconds_per_buffer = float(source.CHUNK) / source.SAMPLE_RATE pause_buffer_count = int(math.ceil(self.pause_threshold / seconds_per_buffer)) # number of buffers of non-speaking audio during a phrase, before the phrase should be considered complete phrase_buffer_count = int(math.ceil(self.phrase_threshold / seconds_per_buffer)) # minimum number of buffers of speaking audio before we consider the speaking audio a phrase non_speaking_buffer_count = int(math.ceil(self.non_speaking_duration / seconds_per_buffer)) # maximum number of buffers of non-speaking audio to retain before and after a phrase # read audio input for phrases until there is a phrase that is long enough elapsed_time = 0 # number of seconds of audio read buffer = b"" # an empty buffer means that the stream has ended and there is no data left to read while True: frames = collections.deque() if snowboy_configuration is None: # store audio input until the phrase starts while True: # handle waiting too long for phrase by raising an exception elapsed_time += seconds_per_buffer if timeout and elapsed_time > timeout: raise WaitTimeoutError("listening timed out while waiting for phrase to start") buffer = source.stream.read(source.CHUNK) if len(buffer) == 0: break # reached end of the stream frames.append(buffer) if len(frames) > non_speaking_buffer_count: # ensure we only keep the needed amount of non-speaking buffers frames.popleft() # detect whether speaking has started on audio input energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal if energy > self.energy_threshold: break # dynamically adjust the energy threshold using asymmetric weighted average if self.dynamic_energy_threshold: damping = self.dynamic_energy_adjustment_damping ** seconds_per_buffer # account for different chunk sizes and rates target_energy = energy * self.dynamic_energy_ratio self.energy_threshold = self.energy_threshold * damping + target_energy * (1 - damping) else: # read audio input until the hotword is said snowboy_location, snowboy_hot_word_files = snowboy_configuration buffer, delta_time = self.snowboy_wait_for_hot_word(snowboy_location, snowboy_hot_word_files, source, timeout) elapsed_time += delta_time if len(buffer) == 0: break # reached end of the stream frames.append(buffer) # read audio input until the phrase ends pause_count, phrase_count = 0, 0 phrase_start_time = elapsed_time while True: # handle phrase being too long by cutting off the audio elapsed_time += seconds_per_buffer if phrase_time_limit and elapsed_time - phrase_start_time > phrase_time_limit: break buffer = source.stream.read(source.CHUNK) if len(buffer) == 0: break # reached end of the stream frames.append(buffer) phrase_count += 1 # check if speaking has stopped for longer than the pause threshold on the audio input energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # unit energy of the audio signal within the buffer if energy > self.energy_threshold: pause_count = 0 else: pause_count += 1 if pause_count > pause_buffer_count: # end of the phrase break # check how long the detected phrase is, and retry listening if the phrase is too short phrase_count -= pause_count # exclude the buffers for the pause before the phrase if phrase_count >= phrase_buffer_count or len(buffer) == 0: break # phrase is long enough or we've reached the end of the stream, so stop listening # obtain frame data for i in range(pause_count - non_speaking_buffer_count): frames.pop() # remove extra non-speaking frames at the end frame_data = b"".join(frames) return AudioData(frame_data, source.SAMPLE_RATE, source.SAMPLE_WIDTH) def listen_in_background(self, source, callback, phrase_time_limit=None): """ Spawns a thread to repeatedly record phrases from ``source`` (an ``AudioSource`` instance) into an ``AudioData`` instance and call ``callback`` with that ``AudioData`` instance as soon as each phrase are detected. Returns a function object that, when called, requests that the background listener thread stop. The background thread is a daemon and will not stop the program from exiting if there are no other non-daemon threads. The function accepts one parameter, ``wait_for_stop``: if truthy, the function will wait for the background listener to stop before returning, otherwise it will return immediately and the background listener thread might still be running for a second or two afterwards. Additionally, if you are using a truthy value for ``wait_for_stop``, you must call the function from the same thread you originally called ``listen_in_background`` from. Phrase recognition uses the exact same mechanism as ``recognizer_instance.listen(source)``. The ``phrase_time_limit`` parameter works in the same way as the ``phrase_time_limit`` parameter for ``recognizer_instance.listen(source)``, as well. The ``callback`` parameter is a function that should accept two parameters - the ``recognizer_instance``, and an ``AudioData`` instance representing the captured audio. Note that ``callback`` function will be called from a non-main thread. """ assert isinstance(source, AudioSource), "Source must be an audio source" running = [True] def threaded_listen(): with source as s: while running[0]: try: # listen for 1 second, then check again if the stop function has been called audio = self.listen(s, 1, phrase_time_limit) except WaitTimeoutError: # listening timed out, just try again pass else: if running[0]: callback(self, audio) def stopper(wait_for_stop=True): running[0] = False if wait_for_stop: listener_thread.join() # block until the background thread is done, which can take around 1 second listener_thread = threading.Thread(target=threaded_listen) listener_thread.daemon = True listener_thread.start() return stopper def recognize_sphinx(self, audio_data, language="en-US", keyword_entries=None, grammar=None, show_all=False): """ Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using CMU Sphinx. The recognition language is determined by ``language``, an RFC5646 language tag like ``"en-US"`` or ``"en-GB"``, defaulting to US English. Out of the box, only ``en-US`` is supported. See `Notes on using `PocketSphinx <https://github.com/Uberi/speech_recognition/blob/master/reference/pocketsphinx.rst>`__ for information about installing other languages. This document is also included under ``reference/pocketsphinx.rst``. The ``language`` parameter can also be a tuple of filesystem paths, of the form ``(acoustic_parameters_directory, language_model_file, phoneme_dictionary_file)`` - this allows you to load arbitrary Sphinx models. If specified, the keywords to search for are determined by ``keyword_entries``, an iterable of tuples of the form ``(keyword, sensitivity)``, where ``keyword`` is a phrase, and ``sensitivity`` is how sensitive to this phrase the recognizer should be, on a scale of 0 (very insensitive, more false negatives) to 1 (very sensitive, more false positives) inclusive. If not specified or ``None``, no keywords are used and Sphinx will simply transcribe whatever words it recognizes. Specifying ``keyword_entries`` is more accurate than just looking for those same keywords in non-keyword-based transcriptions, because Sphinx knows specifically what sounds to look for. Sphinx can also handle FSG or JSGF grammars. The parameter ``grammar`` expects a path to the grammar file. Note that if a JSGF grammar is passed, an FSG grammar will be created at the same location to speed up execution in the next run. If ``keyword_entries`` are passed, content of ``grammar`` will be ignored. Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the Sphinx ``pocketsphinx.pocketsphinx.Decoder`` object resulting from the recognition. Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if there are any issues with the Sphinx installation. """ assert isinstance(audio_data, AudioData), "``audio_data`` must be audio data" assert isinstance(language, str) or (isinstance(language, tuple) and len(language) == 3), "``language`` must be a string or 3-tuple of Sphinx data file paths of the form ``(acoustic_parameters, language_model, phoneme_dictionary)``" assert keyword_entries is None or all(isinstance(keyword, (type(""), type(u""))) and 0 <= sensitivity <= 1 for keyword, sensitivity in keyword_entries), "``keyword_entries`` must be ``None`` or a list of pairs of strings and numbers between 0 and 1" # import the PocketSphinx speech recognition module try: from pocketsphinx import pocketsphinx, Jsgf, FsgModel except ImportError: raise RequestError("missing PocketSphinx module: ensure that PocketSphinx is set up correctly.") except ValueError: raise RequestError("bad PocketSphinx installation; try reinstalling PocketSphinx version 0.0.9 or better.") if not hasattr(pocketsphinx, "Decoder") or not hasattr(pocketsphinx.Decoder, "default_config"): raise RequestError("outdated PocketSphinx installation; ensure you have PocketSphinx version 0.0.9 or better.") if isinstance(language, str): # directory containing language data language_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "pocketsphinx-data", language) if not os.path.isdir(language_directory): raise RequestError("missing PocketSphinx language data directory: \"{}\"".format(language_directory)) acoustic_parameters_directory = os.path.join(language_directory, "acoustic-model") language_model_file = os.path.join(language_directory, "language-model.lm.bin") phoneme_dictionary_file = os.path.join(language_directory, "pronounciation-dictionary.dict") else: # 3-tuple of Sphinx data file paths acoustic_parameters_directory, language_model_file, phoneme_dictionary_file = language if not os.path.isdir(acoustic_parameters_directory): raise RequestError("missing PocketSphinx language model parameters directory: \"{}\"".format(acoustic_parameters_directory)) if not os.path.isfile(language_model_file): raise RequestError("missing PocketSphinx language model file: \"{}\"".format(language_model_file)) if not os.path.isfile(phoneme_dictionary_file): raise RequestError("missing PocketSphinx phoneme dictionary file: \"{}\"".format(phoneme_dictionary_file)) # create decoder object config = pocketsphinx.Decoder.default_config() config.set_string("-hmm", acoustic_parameters_directory) # set the path of the hidden Markov model (HMM) parameter files config.set_string("-lm", language_model_file) config.set_string("-dict", phoneme_dictionary_file) config.set_string("-logfn", os.devnull) # disable logging (logging causes unwanted output in terminal) decoder = pocketsphinx.Decoder(config) # obtain audio data raw_data = audio_data.get_raw_data(convert_rate=16000, convert_width=2) # the included language models require audio to be 16-bit mono 16 kHz in little-endian format # obtain recognition results if keyword_entries is not None: # explicitly specified set of keywords with PortableNamedTemporaryFile("w") as f: # generate a keywords file - Sphinx documentation recommendeds sensitivities between 1e-50 and 1e-5 f.writelines("{} /1e{}/\n".format(keyword, 100 * sensitivity - 110) for keyword, sensitivity in keyword_entries) f.flush() # perform the speech recognition with the keywords file (this is inside the context manager so the file isn;t deleted until we're done) decoder.set_kws("keywords", f.name) decoder.set_search("keywords") decoder.start_utt() # begin utterance processing decoder.process_raw(raw_data, False, True) # process audio data with recognition enabled (no_search = False), as a full utterance (full_utt = True) decoder.end_utt() # stop utterance processing elif grammar is not None: # a path to a FSG or JSGF grammar if not os.path.exists(grammar): raise ValueError("Grammar '{0}' does not exist.".format(grammar)) grammar_path = os.path.abspath(os.path.dirname(grammar)) grammar_name = os.path.splitext(os.path.basename(grammar))[0] fsg_path = "{0}/{1}.fsg".format(grammar_path, grammar_name) if not os.path.exists(fsg_path): # create FSG grammar if not available jsgf = Jsgf(grammar) rule = jsgf.get_rule("{0}.{0}".format(grammar_name)) fsg = jsgf.build_fsg(rule, decoder.get_logmath(), 7.5) fsg.writefile(fsg_path) else: fsg = FsgModel(fsg_path, decoder.get_logmath(), 7.5) decoder.set_fsg(grammar_name, fsg) decoder.set_search(grammar_name) decoder.start_utt() decoder.process_raw(raw_data, False, True) # process audio data with recognition enabled (no_search = False), as a full utterance (full_utt = True) decoder.end_utt() # stop utterance processing else: # no keywords, perform freeform recognition decoder.start_utt() # begin utterance processing decoder.process_raw(raw_data, False, True) # process audio data with recognition enabled (no_search = False), as a full utterance (full_utt = True) decoder.end_utt() # stop utterance processing if show_all: return decoder # return results hypothesis = decoder.hyp() if hypothesis is not None: return hypothesis.hypstr raise UnknownValueError() # no transcriptions available def recognize_google(self, audio_data, key=None, language="en-US", show_all=False): """ Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Google Speech Recognition API. The Google Speech Recognition API key is specified by ``key``. If not specified, it uses a generic key that works out of the box. This should generally be used for personal or testing purposes only, as it **may be revoked by Google at any time**. To obtain your own API key, simply following the steps on the `API Keys <http://www.chromium.org/developers/how-tos/api-keys>`__ page at the Chromium Developers site. In the Google Developers Console, Google Speech Recognition is listed as "Speech API". The recognition language is determined by ``language``, an RFC5646 language tag like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting to US English. A list of supported language tags can be found in this `StackOverflow answer <http://stackoverflow.com/a/14302134>`__. Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the raw API response as a JSON dictionary. Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection. """ assert isinstance(audio_data, AudioData), "``audio_data`` must be audio data" assert key is None or isinstance(key, str), "``key`` must be ``None`` or a string" assert isinstance(language, str), "``language`` must be a string" flac_data = audio_data.get_flac_data( convert_rate=None if audio_data.sample_rate >= 8000 else 8000, # audio samples must be at least 8 kHz convert_width=2 # audio samples must be 16-bit ) if key is None: key = "AIzaSyBOti4mM-6x9WDnZIjIeyEU21OpBXqWBgw" url = "http://www.google.com/speech-api/v2/recognize?{}".format(urlencode({ "client": "chromium", "lang": language, "key": key, })) request = Request(url, data=flac_data, headers={"Content-Type": "audio/x-flac; rate={}".format(audio_data.sample_rate)}) # obtain audio transcription results try: response = urlopen(request, timeout=self.operation_timeout) except HTTPError as e: raise RequestError("recognition request failed: {}".format(e.reason)) except URLError as e: raise RequestError("recognition connection failed: {}".format(e.reason)) response_text = response.read().decode("utf-8") # ignore any blank blocks actual_result = [] for line in response_text.split("\n"): if not line: continue result = json.loads(line)["result"] if len(result) != 0: actual_result = result[0] break # return results if show_all: return actual_result if not isinstance(actual_result, dict) or len(actual_result.get("alternative", [])) == 0: raise UnknownValueError() if "confidence" in actual_result["alternative"]: # return alternative with highest confidence score best_hypothesis = max(actual_result["alternative"], key=lambda alternative: alternative["confidence"]) else: # when there is no confidence available, we arbitrarily choose the first hypothesis. best_hypothesis = actual_result["alternative"][0] if "transcript" not in best_hypothesis: raise UnknownValueError() return best_hypothesis["transcript"] def recognize_google_cloud(self, audio_data, credentials_json=None, language="en-US", preferred_phrases=None, show_all=False): """ Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Google Cloud Speech API. This function requires a Google Cloud Platform account; see the `Google Cloud Speech API Quickstart <https://cloud.google.com/speech/docs/getting-started>`__ for details and instructions. Basically, create a project, enable billing for the project, enable the Google Cloud Speech API for the project, and set up Service Account Key credentials for the project. The result is a JSON file containing the API credentials. The text content of this JSON file is specified by ``credentials_json``. If not specified, the library will try to automatically `find the default API credentials JSON file <https://developers.google.com/identity/protocols/application-default-credentials>`__. The recognition language is determined by ``language``, which is a BCP-47 language tag like ``"en-US"`` (US English). A list of supported language tags can be found in the `Google Cloud Speech API documentation <https://cloud.google.com/speech/docs/languages>`__. If ``preferred_phrases`` is an iterable of phrase strings, those given phrases will be more likely to be recognized over similar-sounding alternatives. This is useful for things like keyword/command recognition or adding new phrases that aren't in Google's vocabulary. Note that the API imposes certain `restrictions on the list of phrase strings <https://cloud.google.com/speech/limits#content>`__. Returns the most likely transcription if ``show_all`` is False (the default). Otherwise, returns the raw API response as a JSON dictionary. Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the credentials aren't valid, or if there is no Internet connection. """ assert isinstance(audio_data, AudioData), "``audio_data`` must be audio data" if credentials_json is not None: try: json.loads(credentials_json) except Exception: raise AssertionError("``credentials_json`` must be ``None`` or a valid JSON string") assert isinstance(language, str), "``language`` must be a string" assert preferred_phrases is None or all(isinstance(preferred_phrases, (type(""), type(u""))) for preferred_phrases in preferred_phrases), "``preferred_phrases`` must be a list of strings" # See https://cloud.google.com/speech/reference/rest/v1/RecognitionConfig flac_data = audio_data.get_flac_data( convert_rate=None if 8000 <= audio_data.sample_rate <= 48000 else max(8000, min(audio_data.sample_rate, 48000)), # audio sample rate must be between 8 kHz and 48 kHz inclusive - clamp sample rate into this range convert_width=2 # audio samples must be 16-bit ) try: from oauth2client.client import GoogleCredentials from googleapiclient.discovery import build import googleapiclient.errors # cannot simply use 'http = httplib2.Http(timeout=self.operation_timeout)' # because discovery.build() says 'Arguments http and credentials are mutually exclusive' import socket import googleapiclient.http if self.operation_timeout and socket.getdefaulttimeout() is None: # override constant (used by googleapiclient.http.build_http()) googleapiclient.http.DEFAULT_HTTP_TIMEOUT_SEC = self.operation_timeout if credentials_json is None: api_credentials = GoogleCredentials.get_application_default() else: # the credentials can only be read from a file, so we'll make a temp file and write in the contents to work around that with PortableNamedTemporaryFile("w") as f: f.write(credentials_json) f.flush() api_credentials = GoogleCredentials.from_stream(f.name) speech_service = build("speech", "v1", credentials=api_credentials, cache_discovery=False) except ImportError: raise RequestError("missing google-api-python-client module: ensure that google-api-python-client is set up correctly.") speech_config = {"encoding": "FLAC", "sampleRateHertz": audio_data.sample_rate, "languageCode": language} if preferred_phrases is not None: speech_config["speechContext"] = {"phrases": preferred_phrases} if show_all: speech_config["enableWordTimeOffsets"] = True # some useful extra options for when we want all the output request = speech_service.speech().recognize(body={"audio": {"content": base64.b64encode(flac_data).decode("utf8")}, "config": speech_config}) try: response = request.execute() except googleapiclient.errors.HttpError as e: raise RequestError(e) except URLError as e: raise RequestError("recognition connection failed: {0}".format(e.reason)) if show_all: return response if "results" not in response or len(response["results"]) == 0: raise UnknownValueError() transcript = "" for result in response["results"]: transcript += result["alternatives"][0]["transcript"].strip() + " " return transcript def recognize_wit(self, audio_data, key, show_all=False): """ Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Wit.ai API. The Wit.ai API key is specified by ``key``. Unfortunately, these are not available without `signing up for an account <https://wit.ai/>`__ and creating an app. You will need to add at least one intent to the app before you can see the API key, though the actual intent settings don't matter. To get the API key for a Wit.ai app, go to the app's overview page, go to the section titled "Make an API request", and look for something along the lines of ``Authorization: Bearer XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX``; ``XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`` is the API key. Wit.ai API keys are 32-character uppercase alphanumeric strings. The recognition language is configured in the Wit.ai app settings. Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response <https://wit.ai/docs/http/20141022#get-intent-via-text-link>`__ as a JSON dictionary. Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection. """ assert isinstance(audio_data, AudioData), "Data must be audio data" assert isinstance(key, str), "``key`` must be a string" wav_data = audio_data.get_wav_data( convert_rate=None if audio_data.sample_rate >= 8000 else 8000, # audio samples must be at least 8 kHz convert_width=2 # audio samples should be 16-bit ) url = "https://api.wit.ai/speech?v=20160526" request = Request(url, data=wav_data, headers={"Authorization": "Bearer {}".format(key), "Content-Type": "audio/wav"}) try: response = urlopen(request, timeout=self.operation_timeout) except HTTPError as e: raise RequestError("recognition request failed: {}".format(e.reason)) except URLError as e: raise RequestError("recognition connection failed: {}".format(e.reason)) response_text = response.read().decode("utf-8") result = json.loads(response_text) # return results if show_all: return result if "_text" not in result or result["_text"] is None: raise UnknownValueError() return result["_text"] def recognize_bing(self, audio_data, key, language="en-US", show_all=False): """ Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Microsoft Bing Speech API. The Microsoft Bing Speech API key is specified by ``key``. Unfortunately, these are not available without `signing up for an account <https://azure.microsoft.com/en-ca/pricing/details/cognitive-services/speech-api/>`__ with Microsoft Azure. To get the API key, go to the `Microsoft Azure Portal Resources <https://portal.azure.com/>`__ page, go to "All Resources" > "Add" > "See All" > Search "Bing Speech API > "Create", and fill in the form to make a "Bing Speech API" resource. On the resulting page (which is also accessible from the "All Resources" page in the Azure Portal), go to the "Show Access Keys" page, which will have two API keys, either of which can be used for the `key` parameter. Microsoft Bing Speech API keys are 32-character lowercase hexadecimal strings. The recognition language is determined by ``language``, a BCP-47 language tag like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting to US English. A list of supported language values can be found in the `API documentation <https://docs.microsoft.com/en-us/azure/cognitive-services/speech/api-reference-rest/bingvoicerecognition#recognition-language>`__ under "Interactive and dictation mode". Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response <https://docs.microsoft.com/en-us/azure/cognitive-services/speech/api-reference-rest/bingvoicerecognition#sample-responses>`__ as a JSON dictionary. Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection. """ assert isinstance(audio_data, AudioData), "Data must be audio data" assert isinstance(key, str), "``key`` must be a string" assert isinstance(language, str), "``language`` must be a string" access_token, expire_time = getattr(self, "bing_cached_access_token", None), getattr(self, "bing_cached_access_token_expiry", None) allow_caching = True try: from time import monotonic # we need monotonic time to avoid being affected by system clock changes, but this is only available in Python 3.3+ except ImportError: try: from monotonic import monotonic # use time.monotonic backport for Python 2 if available (from https://pypi.python.org/pypi/monotonic) except (ImportError, RuntimeError): expire_time = None # monotonic time not available, don't cache access tokens allow_caching = False # don't allow caching, since monotonic time isn't available if expire_time is None or monotonic() > expire_time: # caching not enabled, first credential request, or the access token from the previous one expired # get an access token using OAuth credential_url = "https://api.cognitive.microsoft.com/sts/v1.0/issueToken" credential_request = Request(credential_url, data=b"", headers={ "Content-type": "application/x-www-form-urlencoded", "Content-Length": "0", "Ocp-Apim-Subscription-Key": key, }) if allow_caching: start_time = monotonic() try: credential_response = urlopen(credential_request, timeout=60) # credential response can take longer, use longer timeout instead of default one except HTTPError as e: raise RequestError("credential request failed: {}".format(e.reason)) except URLError as e: raise RequestError("credential connection failed: {}".format(e.reason)) access_token = credential_response.read().decode("utf-8") if allow_caching: # save the token for the duration it is valid for self.bing_cached_access_token = access_token self.bing_cached_access_token_expiry = start_time + 600 # according to https://docs.microsoft.com/en-us/azure/cognitive-services/speech/api-reference-rest/bingvoicerecognition, the token expires in exactly 10 minutes wav_data = audio_data.get_wav_data( convert_rate=16000, # audio samples must be 8kHz or 16 kHz convert_width=2 # audio samples should be 16-bit ) url = "https://speech.platform.bing.com/speech/recognition/interactive/cognitiveservices/v1?{}".format(urlencode({ "language": language, "locale": language, "requestid": uuid.uuid4(), })) if sys.version_info >= (3, 6): # chunked-transfer requests are only supported in the standard library as of Python 3.6+, use it if possible request = Request(url, data=io.BytesIO(wav_data), headers={ "Authorization": "Bearer {}".format(access_token), "Content-type": "audio/wav; codec=\"audio/pcm\"; samplerate=16000", "Transfer-Encoding": "chunked", }) else: # fall back on manually formatting the POST body as a chunked request ascii_hex_data_length = "{:X}".format(len(wav_data)).encode("utf-8") chunked_transfer_encoding_data = ascii_hex_data_length + b"\r\n" + wav_data + b"\r\n0\r\n\r\n" request = Request(url, data=chunked_transfer_encoding_data, headers={ "Authorization": "Bearer {}".format(access_token), "Content-type": "audio/wav; codec=\"audio/pcm\"; samplerate=16000", "Transfer-Encoding": "chunked", }) try: response = urlopen(request, timeout=self.operation_timeout) except HTTPError as e: raise RequestError("recognition request failed: {}".format(e.reason)) except URLError as e: raise RequestError("recognition connection failed: {}".format(e.reason)) response_text = response.read().decode("utf-8") result = json.loads(response_text) # return results if show_all: return result if "RecognitionStatus" not in result or result["RecognitionStatus"] != "Success" or "DisplayText" not in result: raise UnknownValueError() return result["DisplayText"] def recognize_houndify(self, audio_data, client_id, client_key, show_all=False): """ Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Houndify API. The Houndify client ID and client key are specified by ``client_id`` and ``client_key``, respectively. Unfortunately, these are not available without `signing up for an account <https://www.houndify.com/signup>`__. Once logged into the `dashboard <https://www.houndify.com/dashboard>`__, you will want to select "Register a new client", and fill in the form as necessary. When at the "Enable Domains" page, enable the "Speech To Text Only" domain, and then select "Save & Continue". To get the client ID and client key for a Houndify client, go to the `dashboard <https://www.houndify.com/dashboard>`__ and select the client's "View Details" link. On the resulting page, the client ID and client key will be visible. Client IDs and client keys are both Base64-encoded strings. Currently, only English is supported as a recognition language. Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the raw API response as a JSON dictionary. Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection. """ assert isinstance(audio_data, AudioData), "Data must be audio data" assert isinstance(client_id, str), "``client_id`` must be a string" assert isinstance(client_key, str), "``client_key`` must be a string" wav_data = audio_data.get_wav_data( convert_rate=None if audio_data.sample_rate in [8000, 16000] else 16000, # audio samples must be 8 kHz or 16 kHz convert_width=2 # audio samples should be 16-bit ) url = "https://api.houndify.com/v1/audio" user_id, request_id = str(uuid.uuid4()), str(uuid.uuid4()) request_time = str(int(time.time())) request_signature = base64.urlsafe_b64encode( hmac.new( base64.urlsafe_b64decode(client_key), user_id.encode("utf-8") + b";" + request_id.encode("utf-8") + request_time.encode("utf-8"), hashlib.sha256 ).digest() # get the HMAC digest as bytes ).decode("utf-8") request = Request(url, data=wav_data, headers={ "Content-Type": "application/json", "Hound-Request-Info": json.dumps({"ClientID": client_id, "UserID": user_id}), "Hound-Request-Authentication": "{};{}".format(user_id, request_id), "Hound-Client-Authentication": "{};{};{}".format(client_id, request_time, request_signature) }) try: response = urlopen(request, timeout=self.operation_timeout) except HTTPError as e: raise RequestError("recognition request failed: {}".format(e.reason)) except URLError as e: raise RequestError("recognition connection failed: {}".format(e.reason)) response_text = response.read().decode("utf-8") result = json.loads(response_text) # return results if show_all: return result if "Disambiguation" not in result or result["Disambiguation"] is None: raise UnknownValueError() return result['Disambiguation']['ChoiceData'][0]['Transcription'] def recognize_ibm(self, audio_data, username, password, language="en-US", show_all=False): """ Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the IBM Speech to Text API. The IBM Speech to Text username and password are specified by ``username`` and ``password``, respectively. Unfortunately, these are not available without `signing up for an account <https://console.ng.bluemix.net/registration/>`__. Once logged into the Bluemix console, follow the instructions for `creating an IBM Watson service instance <https://www.ibm.com/watson/developercloud/doc/getting_started/gs-credentials.shtml>`__, where the Watson service is "Speech To Text". IBM Speech to Text usernames are strings of the form XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX, while passwords are mixed-case alphanumeric strings. The recognition language is determined by ``language``, an RFC5646 language tag with a dialect like ``"en-US"`` (US English) or ``"zh-CN"`` (Mandarin Chinese), defaulting to US English. The supported language values are listed under the ``model`` parameter of the `audio recognition API documentation <https://www.ibm.com/watson/developercloud/speech-to-text/api/v1/#sessionless_methods>`__, in the form ``LANGUAGE_BroadbandModel``, where ``LANGUAGE`` is the language value. Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response <https://www.ibm.com/watson/developercloud/speech-to-text/api/v1/#sessionless_methods>`__ as a JSON dictionary. Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection. """ assert isinstance(audio_data, AudioData), "Data must be audio data" assert isinstance(username, str), "``username`` must be a string" assert isinstance(password, str), "``password`` must be a string" flac_data = audio_data.get_flac_data( convert_rate=None if audio_data.sample_rate >= 16000 else 16000, # audio samples should be at least 16 kHz convert_width=None if audio_data.sample_width >= 2 else 2 # audio samples should be at least 16-bit ) url = "https://stream.watsonplatform.net/speech-to-text/api/v1/recognize?{}".format(urlencode({ "profanity_filter": "false", "model": "{}_BroadbandModel".format(language), "inactivity_timeout": -1, # don't stop recognizing when the audio stream activity stops })) request = Request(url, data=flac_data, headers={ "Content-Type": "audio/x-flac", "X-Watson-Learning-Opt-Out": "true", # prevent requests from being logged, for improved privacy }) authorization_value = base64.standard_b64encode("{}:{}".format(username, password).encode("utf-8")).decode("utf-8") request.add_header("Authorization", "Basic {}".format(authorization_value)) try: response = urlopen(request, timeout=self.operation_timeout) except HTTPError as e: raise RequestError("recognition request failed: {}".format(e.reason)) except URLError as e: raise RequestError("recognition connection failed: {}".format(e.reason)) response_text = response.read().decode("utf-8") result = json.loads(response_text) # return results if show_all: return result if "results" not in result or len(result["results"]) < 1 or "alternatives" not in result["results"][0]: raise UnknownValueError() transcription = [] for utterance in result["results"]: if "alternatives" not in utterance: raise UnknownValueError() for hypothesis in utterance["alternatives"]: if "transcript" in hypothesis: transcription.append(hypothesis["transcript"]) return "\n".join(transcription) def get_flac_converter(): """Returns the absolute path of a FLAC converter executable, or raises an OSError if none can be found.""" flac_converter = shutil_which("flac") # check for installed version first if flac_converter is None: # flac utility is not installed base_path = os.path.dirname(os.path.abspath(__file__)) # directory of the current module file, where all the FLAC bundled binaries are stored system, machine = platform.system(), platform.machine() if system == "Windows" and machine in {"i686", "i786", "x86", "x86_64", "AMD64"}: flac_converter = os.path.join(base_path, "flac-win32.exe") elif system == "Darwin" and machine in {"i686", "i786", "x86", "x86_64", "AMD64"}: flac_converter = os.path.join(base_path, "flac-mac") elif system == "Linux" and machine in {"i686", "i786", "x86"}: flac_converter = os.path.join(base_path, "flac-linux-x86") elif system == "Linux" and machine in {"x86_64", "AMD64"}: flac_converter = os.path.join(base_path, "flac-linux-x86_64") else: # no FLAC converter available raise OSError("FLAC conversion utility not available - consider installing the FLAC command line application by running `apt-get install flac` or your operating system's equivalent") # mark FLAC converter as executable if possible try: # handle known issue when running on docker: # run executable right after chmod() may result in OSError "Text file busy" # fix: flush FS with sync if not os.access(flac_converter, os.X_OK): stat_info = os.stat(flac_converter) os.chmod(flac_converter, stat_info.st_mode | stat.S_IEXEC) if 'Linux' in platform.system(): os.sync() if sys.version_info >= (3, 3) else os.system('sync') except OSError: pass return flac_converter def shutil_which(pgm): """Python 2 compatibility: backport of ``shutil.which()`` from Python 3""" path = os.getenv('PATH') for p in path.split(os.path.pathsep): p = os.path.join(p, pgm) if os.path.exists(p) and os.access(p, os.X_OK): return p class PortableNamedTemporaryFile(object): """Limited replacement for ``tempfile.NamedTemporaryFile``, except unlike ``tempfile.NamedTemporaryFile``, the file can be opened again while it's currently open, even on Windows.""" def __init__(self, mode="w+b"): self.mode = mode def __enter__(self): # create the temporary file and open it import tempfile file_descriptor, file_path = tempfile.mkstemp() self._file = os.fdopen(file_descriptor, self.mode) # the name property is a public field self.name = file_path return self def __exit__(self, exc_type, exc_value, traceback): self._file.close() os.remove(self.name) def write(self, *args, **kwargs): return self._file.write(*args, **kwargs) def writelines(self, *args, **kwargs): return self._file.writelines(*args, **kwargs) def flush(self, *args, **kwargs): return self._file.flush(*args, **kwargs) # =============================== # backwards compatibility shims # =============================== WavFile = AudioFile # WavFile was renamed to AudioFile in 3.4.1 def recognize_api(self, audio_data, client_access_token, language="en", session_id=None, show_all=False): wav_data = audio_data.get_wav_data(convert_rate=16000, convert_width=2) url = "https://api.api.ai/v1/query" while True: boundary = uuid.uuid4().hex if boundary.encode("utf-8") not in wav_data: break if session_id is None: session_id = uuid.uuid4().hex data = b"--" + boundary.encode("utf-8") + b"\r\n" + b"Content-Disposition: form-data; name=\"request\"\r\n" + b"Content-Type: application/json\r\n" + b"\r\n" + b"{\"v\": \"20150910\", \"sessionId\": \"" + session_id.encode("utf-8") + b"\", \"lang\": \"" + language.encode("utf-8") + b"\"}\r\n" + b"--" + boundary.encode("utf-8") + b"\r\n" + b"Content-Disposition: form-data; name=\"voiceData\"; filename=\"audio.wav\"\r\n" + b"Content-Type: audio/wav\r\n" + b"\r\n" + wav_data + b"\r\n" + b"--" + boundary.encode("utf-8") + b"--\r\n" request = Request(url, data=data, headers={"Authorization": "Bearer {}".format(client_access_token), "Content-Length": str(len(data)), "Expect": "100-continue", "Content-Type": "multipart/form-data; boundary={}".format(boundary)}) try: response = urlopen(request, timeout=10) except HTTPError as e: raise RequestError("recognition request failed: {}".format(e.reason)) except URLError as e: raise RequestError("recognition connection failed: {}".format(e.reason)) response_text = response.read().decode("utf-8") result = json.loads(response_text) if show_all: return result if "status" not in result or "errorType" not in result["status"] or result["status"]["errorType"] != "success": raise UnknownValueError() return result["result"]["resolvedQuery"] Recognizer.recognize_api = classmethod(recognize_api) # API.AI Speech Recognition is deprecated/not recommended as of 3.5.0, and currently is only optionally available for paid plans
[]
[]
[ "PATH" ]
[]
["PATH"]
python
1
0
test/fields/fields.go
// Copyright 2014 The go-github AUTHORS. All rights reserved. // // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // This tool tests for the JSON mappings in the go-github data types. It will // identify fields that are returned by the live GitHub API, but that are not // currently mapped into a struct field of the relevant go-github type. This // helps to ensure that all relevant data returned by the API is being made // accessible, particularly new fields that are periodically (and sometimes // quietly) added to the API over time. // // These tests simply aid in identifying which fields aren't being mapped; it // is not necessarily true that every one of them should always be mapped. // Some fields may be undocumented for a reason, either because they aren't // actually used yet or should not be relied upon. package main import ( "context" "encoding/json" "flag" "fmt" "os" "reflect" "strings" "github.com/google/go-github/v18/github" "golang.org/x/oauth2" ) var ( client *github.Client // auth indicates whether tests are being run with an OAuth token. // Tests can use this flag to skip certain tests when run without auth. auth bool skipURLs = flag.Bool("skip_urls", false, "skip url fields") ) func main() { flag.Parse() token := os.Getenv("GITHUB_AUTH_TOKEN") if token == "" { print("!!! No OAuth token. Some tests won't run. !!!\n\n") client = github.NewClient(nil) } else { tc := oauth2.NewClient(context.Background(), oauth2.StaticTokenSource( &oauth2.Token{AccessToken: token}, )) client = github.NewClient(tc) auth = true } for _, tt := range []struct { url string typ interface{} }{ //{"rate_limit", &github.RateLimits{}}, {"users/octocat", &github.User{}}, {"user", &github.User{}}, {"users/willnorris/keys", &[]github.Key{}}, {"orgs/google-test", &github.Organization{}}, {"repos/google/go-github", &github.Repository{}}, {"repos/google/go-github/issues/1", &github.Issue{}}, {"/gists/9257657", &github.Gist{}}, } { err := testType(tt.url, tt.typ) if err != nil { fmt.Printf("error: %v\n", err) } } } // testType fetches the JSON resource at urlStr and compares its keys to the // struct fields of typ. func testType(urlStr string, typ interface{}) error { slice := reflect.Indirect(reflect.ValueOf(typ)).Kind() == reflect.Slice req, err := client.NewRequest("GET", urlStr, nil) if err != nil { return err } // start with a json.RawMessage so we can decode multiple ways below raw := new(json.RawMessage) _, err = client.Do(context.Background(), req, raw) if err != nil { return err } // unmarshal directly to a map var m1 map[string]interface{} if slice { var s []map[string]interface{} err = json.Unmarshal(*raw, &s) if err != nil { return err } m1 = s[0] } else { err = json.Unmarshal(*raw, &m1) if err != nil { return err } } // unmarshal to typ first, then re-marshal and unmarshal to a map err = json.Unmarshal(*raw, typ) if err != nil { return err } var byt []byte if slice { // use first item in slice v := reflect.Indirect(reflect.ValueOf(typ)) byt, err = json.Marshal(v.Index(0).Interface()) if err != nil { return err } } else { byt, err = json.Marshal(typ) if err != nil { return err } } var m2 map[string]interface{} err = json.Unmarshal(byt, &m2) if err != nil { return err } // now compare the two maps for k, v := range m1 { if *skipURLs && strings.HasSuffix(k, "_url") { continue } if _, ok := m2[k]; !ok { fmt.Printf("%v missing field for key: %v (example value: %v)\n", reflect.TypeOf(typ), k, v) } } return nil }
[ "\"GITHUB_AUTH_TOKEN\"" ]
[]
[ "GITHUB_AUTH_TOKEN" ]
[]
["GITHUB_AUTH_TOKEN"]
go
1
0
pkg/awsglue/integration_test.go
package awsglue /** * Copyright 2020 Panther Labs Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import ( "os" "strings" "testing" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/glue" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) const ( testBucket = "panther_glue_test_bucket" testDb = "panther_glue_test_db" testTable = "panther_glue_test_table" ) type testEvent struct { Col1 int } var ( integrationTest bool awsSession *session.Session glueClient *glue.Glue columns = []*glue.Column{ { Name: aws.String("Col1"), Type: aws.String("int"), }, } partitionKeys = []*glue.Column{ { Name: aws.String("year"), Type: aws.String("int"), }, { Name: aws.String("month"), Type: aws.String("int"), }, { Name: aws.String("day"), Type: aws.String("int"), }, { Name: aws.String("hour"), Type: aws.String("int"), }, } ) func TestMain(m *testing.M) { integrationTest = strings.ToLower(os.Getenv("INTEGRATION_TEST")) == "true" if integrationTest { awsSession = session.Must(session.NewSession()) glueClient = glue.New(awsSession) } os.Exit(m.Run()) } func TestIntegrationGlueMetadataPartitions(t *testing.T) { if !integrationTest { t.Skip() } var err error refTime := time.Date(2020, 1, 3, 1, 1, 1, 0, time.UTC) setupTables(t) defer func() { removeTables(t) }() gm, err := NewGlueMetadata(testDb, testTable, "test table", GlueTableHourly, false, &testEvent{}) require.NoError(t, err) expectedPath := "s3://" + testBucket + "/logs/" + testTable + "/year=2020/month=01/day=03/hour=01/" err = gm.CreateJSONPartition(glueClient, testBucket, refTime) require.NoError(t, err) // do it again, should fail err = gm.CreateJSONPartition(glueClient, testBucket, refTime) require.Error(t, err) partitionInfo, err := gm.GetPartition(glueClient, refTime) require.NoError(t, err) assert.Equal(t, expectedPath, *partitionInfo.Partition.StorageDescriptor.Location) _, err = gm.DeletePartition(glueClient, refTime) require.NoError(t, err) // ensure deleted _, err = gm.GetPartition(glueClient, refTime) require.Error(t, err) } func setupTables(t *testing.T) { removeTables(t) // in case of left over addTables(t) } func addTables(t *testing.T) { var err error dbInput := &glue.CreateDatabaseInput{ DatabaseInput: &glue.DatabaseInput{ Name: aws.String(testDb), }, } _, err = glueClient.CreateDatabase(dbInput) require.NoError(t, err) tableInput := &glue.CreateTableInput{ DatabaseName: aws.String(testDb), TableInput: &glue.TableInput{ Name: aws.String(testTable), PartitionKeys: partitionKeys, StorageDescriptor: &glue.StorageDescriptor{ // configure as JSON Columns: columns, Location: aws.String("bar"), InputFormat: aws.String("org.apache.hadoop.mapred.TextInputFormat"), OutputFormat: aws.String("org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"), SerdeInfo: &glue.SerDeInfo{ SerializationLibrary: aws.String("org.openx.data.jsonserde.JsonSerDe"), Parameters: map[string]*string{ "serialization.format": aws.String("1"), "case.insensitive": aws.String("TRUE"), // treat as lower case }, }, }, TableType: aws.String("EXTERNAL_TABLE"), }, } _, err = glueClient.CreateTable(tableInput) require.NoError(t, err) } func removeTables(t *testing.T) { // best effort, no error checks tableInput := &glue.DeleteTableInput{ DatabaseName: aws.String(testDb), Name: aws.String(testTable), } glueClient.DeleteTable(tableInput) // nolint (errcheck) dbInput := &glue.DeleteDatabaseInput{ Name: aws.String(testDb), } glueClient.DeleteDatabase(dbInput) // nolint (errcheck) }
[ "\"INTEGRATION_TEST\"" ]
[]
[ "INTEGRATION_TEST" ]
[]
["INTEGRATION_TEST"]
go
1
0
non_essentials/ss.py
''' get date ''' def id_to_date(id): import re, os file = "/afs/slac.stanford.edu/u/ki/pkelly/pipeline/bluh2" command = "/afs/slac/u/ki/anja/software/ldacpipeline-0.12.20/bin/Linux_64/caldate -d 31/12/1998 -i " + id + " > " + file os.system(command) yy = open(file,'r').readlines() date = ((re.split('\s+',yy[0])[-2])) return date ''' get gabodsid ''' def gabodsid(inputdate): import re, os file = "/afs/slac.stanford.edu/u/ki/pkelly/pipeline/bluh" command = "/afs/slac/u/ki/anja/software/ldacpipeline-0.12.20/bin/Linux_64/mjd -t 22:00:00 -d " + inputdate + " > " + file print command os.system(command) yy = open(file,'r').readlines() MJD = ((re.split('\s+',yy[0])[-2])) file = "/afs/slac.stanford.edu/u/ki/pkelly/pipeline/bluh" os.system("/afs/slac/u/ki/anja/software/ldacpipeline-0.12.20/bin/Linux_64/nightid -t 22:00:00 -d 31/12/1998 -m " + MJD + "> " + file ) yy = open(file,'r').readlines() oo = int(float(re.split('\s+',yy[1])[-2])) return oo def get_gabodsid(file,filter,type): import os command = "dfits " + file + " | grep GABODSID > hh" + filter + type print command os.system(command) jj = open('hh' + filter + type,'r').readlines() import re date = re.split('\s+',jj[0])[1] return date def get_date(file,filter,type): import os command = "dfits " + file + " | grep DATE-OBS > hh" + filter print command os.system(command) jj = open('hh' + filter + type,'r').readlines() import re date = re.split('\'',jj[0])[1] year = float(date[0:4]) month = float(date[5:7]) day = float(date[8:]) return (year, month, day) ''' see if one date falls inside two limits format is date = [yyyy,mm,dd] ''' def inside(date, down_limit, up_limit): good = 0 print date[0], date[1], date[2], down_limit[0], up_limit[0] if (date[0] > down_limit[0]) and (date[0] < up_limit[0]): good = 1 elif date[0] == down_limit[0]: if date[1] > down_limit[1]: good = 1 elif date[1] == down_limit[1]: if date[2] > down_limit[2]: good = 1 elif date[0] == up_limit[0]: if date[1] < up_limit[1]: good = 1 elif date[1] == up_limit[1]: if date[2] < up_limit[2]: good = 1 print good return good ''' the only thing that matters is the date ''' ''' P_IMCOMBFLAT_IMCAT ''' ''' P_IMCOMBFLAT_IMCAT=${BIN}/imcombflat ${P_IMCOMBFLAT_IMCAT} -i flat_images_$$\ -o ${RESULTDIR[${CHIP}]}/$3_${CHIP}.fits \ -s 1 -e 0 1 ''' def runit(): #dir,target_dir): import os dir = os.environ['skyflatraw'] + 'nobackup/SUBARU/auxilary/W-J-B_SKYFLAT/' target_dir = os.environ['xoc'] + 'nobackup/SUBARU/auxiliary/skyflat_B' chip_confs = [[[2001,10,18],[2002,9,5],'_10_1'],[[2002,9,5],[2100,1,1],'_10_2']] ''' split into different chip configurations ''' from glob import glob list = glob(dir + "*fits") for chip_conf in chip_confs: newdir = target_dir + chip_conf[2] #anjadir = os.environ['subdir'] + "SUBARU/skyflat" + chip_conf[2] os.system('mkdir ' + newdir) #os.system('ln -s ' + newdir + ' ' + anjadir ) for file in list: (year, month, day) = get_date(file) for chip_conf in chip_confs: print year, month,day, chip_conf[0], chip_conf[1] print inside([year,month,day],chip_conf[0],chip_conf[1]) if inside((year,month,day),chip_conf[0],chip_conf[1]): try: os.system("cp " + file + ' ' + target_dir + chip_conf[2] + '/') except: print 'failed' def combineperiods(interval,dir,filter,type,edit): paramslist = [{'method': 'MEDIAN','lo_clip':'3.0 3.0'}]#,{'method': 'CLIPMEAN','lo_clip':'3.0 3.0'},{'method': 'CLIPMEAN','lo_clip':'2.0 2.0'}] params = paramslist[0] import os, re statsxmin = '500' statsxmax = '1500' statsymin = '1500' statsymax = '2500' statsxminbin = '100' statsxmaxbin = '1100' statsyminbin = '100' statsymaxbin = '900' firstchip = 'yes' uu = open('rosetta','w') fildir = filter + '_' os.system('mkdir ' + fildir) os.system('chmod +rwx ' + fildir) batchfiles = open(fildir + 'batchfiles_' + filter + type ,'w') batchbincovfiles = open(fildir + 'batchbincovfiles_' + filter + type ,'w') batchdivfiles = open(fildir + 'batchdivfiles_' + filter + type,'w') batchnormfiles = open(fildir + 'batchnormfiles_' + filter + type,'w') batchcovfiles = open(fildir + 'batchcovfiles_' + filter + type,'w') batcheachfiles = open(fildir + 'batcheachfiles_' + filter + type,'w') batchnormeachfiles = open(fildir + 'batchnormeachfiles_' + filter + type,'w') from glob import glob u2 = open('reject.' + filter + '.' + type,'r').readlines() rejectfiles = [] for line in u2: temp = re.split('\/',line[:-1])[-1] out = re.split('_',temp)[0] if out != '': rejectfiles.append(out) print len(rejectfiles) ## now reject files that have been labelled after dividing them by the median badfilename = 'reject.' + filter + '.norm.' + type if edit != '' and len(glob(badfilename)) > 0: u2 = open(badfilename,'r').readlines() for line in u2: temp = re.split('\/',line[:-1].replace('div_',''))[-1] out = re.split('_',temp)[0] if out != '' and out[0] == 'S': rejectfiles.append(out) print len(rejectfiles) list = glob(dir + "/SUPA*10OC.fits") print dir print dir + "/SUPA*10OC.fits" # isolate the good binned divided files div_list = glob(dir + "BINNED/div_SUPA*mos_normal.fits") goodfiles = open(fildir + 'goodfile','w') print 'total number of divided images', len(div_list) for line in div_list: #print re.split('\/',line) temp = re.split('\/',line)[-1] temp = re.split('_',temp)[0] bad = 0 for file in rejectfiles: import string if string.find(temp,file) != -1: bad = 1 if bad == 0: goodfiles.write(line + '\n') files = [] badfiles = [] for line in list: #print re.split('\/',line) temp = re.split('\/',line)[-1] temp = re.split('_',temp)[0] bad = 0 for file in rejectfiles: import string if string.find(temp,file) != -1: bad = 1 if bad == 0: files.append(temp) else: badfiles.append(temp) print files print dir imstatfile = open(fildir + 'imstatfile_' + type,'w') for chipnumber in range(1,11): ''' first break up images into different epochs ''' month_period = 6 from glob import glob #list = glob(dir + "*OC.fits") command = "imstats `ls " + dir + "/SUPA*" + str(chipnumber) + "OC.fits` -s " + statsxmin + " " + statsxmax + " " + statsymin + " " + statsymax + " -o " + fildir + "outliststats" + str(chipnumber) + '_' + type imstatfile.write('bsub -R rhel40 -q short ' + command + '\n') print "REMEMBER TO RUN BATCH QUEUE" #raw_input() #import random #random_number = str(int(100000*random.random())) raw_input() for chipnumber in range(1,11): #print command #os.system(command) list = open(fildir + 'outliststats' + str(chipnumber)+ '_' + type,'r').readlines() datelist = [] index = 0 for file in list: if file[0] != '#' and file[0] != '': filename = re.split('\s+',file)[0] #mode = re.split('\s+',file)[1] bad = 1 for file2 in files: if string.find(filename,file2) != -1: bad = 0 # reject files with low or high modes #if mode < or mode > : # bad = 1 if bad == 0: index = index + 1 gabodsid = get_gabodsid(filename,filter,type) datelist.append([gabodsid,file[:-1],filename]) command = "/afs/slac/u/ki/anja/software/ldacpipeline-0.12.20/bin/Linux_64/caldate -d 31/12/1998 -i " + gabodsid + " > " + fildir + "temp " os.system(command) yy = open(fildir + 'temp','r').readlines() date = ((re.split('\s+',yy[0])[-2])) uu.write(gabodsid + " " + date + "\n") datelist.sort() print datelist rr = open(fildir + 'dates_' + type,'w') rr2 = open(fildir + 'inputflipper','w') for obs in datelist: date = id_to_date(obs[0]) rr.write(str(obs[0]) + ' ' + str(date) + ' ' + str(obs[2]) + '\n') rr2.write(str(obs[2]).replace(type.upper(),type.upper() + '/BINNED').replace('10OC','mosOC') + '\n') rr.close() rr2.close() limit_up = float(datelist[-1][0]) limit_down = float(datelist[0][0]) ''' a six month period is approximately 30*6=180 days ''' diff_year = int((limit_up - limit_down) / 180.0) + 1 ''' define your date ranges from dates file written out above ''' #brackets = [[1523,1639],[1843,1846],[1878,1902],[1993,1994],[2268,2668]] #brackets = [[1878,1878],[1902,1902],[2268,2668]] #brackets = [[2268,2668]] if type == 'domeflat': if filter == 'b': random_number = 1 if filter == 'v': random_number = 1 if filter == 'r': random_number = 1 if filter == 'i': random_number = 1 if filter == 'z': random_number = 1 if type == 'skyflat': if filter == 'b': random_number = 1 if filter == 'v': random_number = 1 if filter == 'r': random_number = 1 if filter == 'i': random_number = 1 if filter == 'z': random_number = 1 if type == 'skyflat': if filter == 'b': brackets = [[1345,1729],[1845,2555],[1345,1524],[1640,1729],[1845,1998],[2316,2555],[1340,1345],[1490,1495],[1638,1643],[1666,1672],[1725,1730],[1845,1849],[1870,1875],[1877,1883],[1900,1907],[1928,1933],[1994,2003],[2312,2319],[2523,2529],[2729,2735],[2876,2882]] if filter == 'v': brackets = [[1550,1800],[1801,2399],[2400,10000],[1550,1581],[1703,1758],[1842,1874],[2198,2319],[2407,2556]] if filter == 'r': brackets = [[1403,2000],[2001,10000],[1496,1497],[1526,1554],[1639,1694],[2196,2261],[2732,2823]] if filter == 'i': brackets = [[1551,1755],[1877,2052],[2315,2731]] #[1551,1611],[1877,2731],[1551,1554],[1577,1611],[1755,1755],[1877,1903],[2052,2558],[2731,2731],[1548,1553],[1582,1590],[1606,1614],[1723,1732],[1752,1758]] if filter == 'z': brackets = [[1555,1639],[1843,1846],[1902,2268]] #[[1523,1846],[1878,2668],[1523,1639],[1843,1846],[1878,1902],[1993,1994],[2268,2668],[1521,1525],[1553,1557],[1637,1642],[1841,1849],[1875,1881],[1900,1906],[1991,1997]] if type == 'domeflat': if filter == 'b': brackets = [[1340,1345],[1490,1495],[1638,1643],[1666,1672],[1725,1730],[1845,1849],[1870,1875],[1877,1883],[1900,1907],[1928,1933],[1994,2003],[2312,2319],[2523,2529],[2729,2735],[2876,2882]] if filter == 'i': brackets = [[1551,1611],[1728,1903],[2439,2529]] #[[[1548,1553],[1582,1590],[1606,1614],[1723,1732],[1752,1758]] if filter == 'z': brackets = [[1523,1642],[1843,1879],[1901,2268]] #[[[1521,1525],[1553,1557],[1637,1642],[1841,1849],[1875,1881],[1900,1906],[1991,1997]] ''' read in dates and make brackets ''' filelists = {} for bracket in brackets: filelists[str(bracket[0]) + '_' + str(bracket[1])] = [] firstchip = 'no' if 1 == 0: #firstchip == 'yes': ''' loop through the periods and make date brackets ''' brackets = [] filelists = {} for i in range(diff_year): start = limit_down + i * 180 end = limit_down + (i + 1) * 180 brackets.append([start, end]) filelists[str(start) + '_' + str(month_period)] = [] firstchip = 'no' filelists['all'] = [] ''' go through observations and which time bracket each observation fits into ''' for obs in datelist: filelists['all'].append(obs[1]) for bracket in brackets: if bracket[0] <= float(obs[0]) and float(obs[0]) <= bracket[1]: filelists[str(bracket[0]) + '_' + str(bracket[1])].append(obs[1]) #for params in paramslist: scriptname = fildir + type + 'script' + params['method'] + '_' + params['lo_clip'].replace(' ','_') + "_chip" + str(chipnumber) outinfo = dir + scriptname ''' script to do imcombine ''' script = open(scriptname,'w') ''' script to divide by superflat''' divscript = open(scriptname + ".py",'w') divcovscript = open(scriptname + "_cov.py",'w') scriptnameeach = fildir + type + 'eachscript' + params['method'] + '_' + params['lo_clip'].replace(' ','_') + "_chip" + str(chipnumber) + '.py' outinfoeach = dir + scriptnameeach ''' script to do each each image ''' scripteach = open(scriptnameeach,'w') batcheachfiles.write('bsub -R rhel40 -q medium -e ' + outinfoeach + 'norm -o ' + outinfoeach + 'norm python ' + scriptnameeach + '\n') if chipnumber == 1: ''' script to bin and normalize''' normscript = open(scriptname + "_norm.py",'w') bincovscript = open(scriptname + "_bincov.py",'w') normeachscript = open(scriptname + "_normeach.py",'w') batchnormfiles.write('bsub -R rhel40 -q medium -e ' + outinfo + 'norm -o ' + outinfo + 'norm python ' + scriptname + '_norm.py\n') batchnormeachfiles.write('bsub -R rhel40 -q medium -e ' + outinfo + 'norm -o ' + outinfo + 'norm python ' + scriptname + '_normeach.py\n') batchbincovfiles.write('bsub -R rhel40 -q short -e ' + outinfo + 'bincov -o ' + outinfo + 'bincov python ' + scriptname + '_bincov.py\n') batchcovfiles.write('bsub -R rhel40 -q short -e ' + outinfo + 'cov -o ' + outinfo + 'cov python ' + scriptname + '_cov.py\n') batchdivfiles.write('bsub -R rhel40 -q medium -e ' + outinfo + 'py -o ' + outinfo + 'py python ' + scriptname + '.py\n') batchfiles.write('bsub -R rhel40 -q medium -e ' + outinfo + ' -o ' + outinfo + ' source ' + scriptname + '\n') for key in filelists.keys(): file = open(fildir + key + '_chip' + str(chipnumber),'w') for ele in filelists[key]: file.write(ele.replace('1OC.fits',str(chipnumber) + 'OC.fits') + '\n') file.close() ''' rescale -s 1 ''' method = params['method'] if len(filelists[key]) > 10: if type == 'domeflat': lo_hi_rank = '1 3' if type == 'skyflat': lo_hi_rank = '1 3' elif len(filelists[key]) > 6: if type == 'domeflat': lo_hi_rank = '0 0' if type == 'skyflat': lo_hi_rank = '1 1' else: if type == 'domeflat': lo_hi_rank = '0 0' if type == 'skyflat': lo_hi_rank = '0 0' lo_hi_rej = '10 30000' lo_hi_clip = params['lo_clip'] input_list = key output_image = edit + key + '_' + params['method'] + '_' + params['lo_clip'].replace(' ','_') + "_chip" + str(chipnumber) + ".fits" command = "imcombflat -i " + fildir + input_list + "_chip" + str(chipnumber) + " -o " + dir + output_image + " -s 1 -c " + method + " -e " + lo_hi_rank + " -t " + lo_hi_rej + " -l " + lo_hi_clip script.write(command + '\n') divided_prefix = edit + 'div_' + key + '_' + params['method'] + '_' + params['lo_clip'].replace(' ','_') + "_chip" cov_prefix = divided_prefix.replace('div','cov') divided_image = divided_prefix + "_" + str(chipnumber) + ".fits" cov_image = divided_prefix.replace('div','cov') + "_" + str(chipnumber) + ".fits" binned_image = "/BINNED/" + divided_prefix + "_mos.fits" binned_normal_image = "/BINNED/" + divided_prefix + "_mos_normal.fits" binned_image = "/BINNED/" + divided_prefix + "_mos.fits" binned_normal_image = "/BINNED/" + divided_prefix + "_mos_normal.fits" cov_binned_image = "/BINNED/" + cov_prefix + "_mos.fits" cov_binned_normal_image = "/BINNED/" + cov_prefix + "_mos_normal.fits" if key != 'all': ''' divide each chip by the comprehensive 'all' flat ''' all_image = edit + 'all_' + params['method'] + '_' + params['lo_clip'].replace(' ','_') + "_chip" + str(chipnumber) + ".fits" divscript.write("print '" + output_image + "'\n") divscript.write("import re,os, anydbm\n") divscript.write("os.system('rm " + dir + divided_image + "')\n") divscript.write("os.system(\"ic '%1 %2 / ' " + dir + output_image + " " + dir + all_image + " > " + dir + divided_image + "\")\n") ''' bin chips and normalize binned image ''' normscript.write("import re,os, anydbm\n") normscript.write("os.putenv('INSTRUMENT','SUBARU')\n") dd = re.split('\/',dir) basedir = reduce(lambda x,y: x + '/' + y,dd[:-2]) + '/' enddir = dd[-2] if chipnumber == 1: normscript.write("os.system(\"rm " + dir + binned_image + "\")\n") normscript.write("os.system(\"rm " + dir + binned_normal_image + "\")\n") normscript.write("os.system(\"./create_binnedmosaics.sh " + basedir + " " + enddir + " " + divided_prefix + " '' 8 -32 \")\n") normscript.write("os.system(\"imstats " + dir + binned_image + " -s " + statsxminbin + " " + statsxmaxbin + " " + statsyminbin + " " + statsymaxbin + " -o " +fildir + "outlist \")\n") normscript.write("p = open('" + fildir + "outlist').readlines()[-1]\n") normscript.write("mode = re.split('\s+',p)[1]\n") normscript.write("anydbm.open('modedir','c')['mode_" + filter + type + divided_prefix + '_' + str(random_number )+ '_\'] = mode\n' ) normscript.write("os.system(\"ic '%1 \" + mode + \" / ' " + dir + binned_image + " > " + dir + binned_normal_image + "\")\n") bincovscript.write("import re,os, anydbm\n") bincovscript.write("os.putenv('INSTRUMENT','SUBARU')\n") bincovscript.write("os.system(\"rm " + dir + binned_image + "\")\n") bincovscript.write("os.system(\"rm " + dir + binned_normal_image + "\")\n") bincovscript.write("os.system(\"./create_binnedmosaics.sh " + basedir + " " + enddir + " " + cov_prefix + " '' 8 -32 \")\n") divcovscript.write("import re,os, anydbm\n") divcovscript.write("os.system('rm " + dir + cov_image + "')\n") divcovscript.write("mode = anydbm.open('modedir','r')['mode_" + divided_prefix + '_' + str(random_number) + '_\']\n' ) divcovscript.write("os.system(\"ic '%1 \" + mode + \" / 1 - ' " + dir + divided_image + " > " + dir + cov_image + "\")\n") #divscript.write("os.system('rm divoutA.fits')\n") #divscript.write("os.system('rm divoutB.fits')\n") #divscript.write("os.system(\"imstats " + dir + output_image + " -s " + statsxmin + " " + statsxmax + " " + statsymin + " " + statsymax + " -o outlist\")\n") #divscript.write("p = open('outlist').readlines()[-1]\n") #divscript.write("import re\n") #divscript.write("mode = re.split('\s+',p)[1]\n") #divscript.write("os.system(\"ic '%1 \" + mode + \" / ' " + dir + output_image + " > divoutA.fits \")\n") #divscript.write("os.system(\"imstats " + dir + all_image + " -s " + statsxmin + " " + statsxmax + " " + statsymin + " " + statsymax + " -o outlist\")\n") #divscript.write("p = open('outlist').readlines()[-1]\n") #divscript.write("mode = re.split('\s+',p)[1]\n") #divscript.write("os.system(\"ic '%1 \" + mode + \" / ' " + dir + all_image + " > divoutB.fits \")\n") #divscript.write("os.system(\"ic '%1 %2 / ' divoutA.fits divoutB.fits > " + dir + divided_image + "\")\n") #divscript.write("os.system('rm divoutA.fits')\n") #divscript.write("os.system('rm divoutB.fits')\n") print command for ele in datelist: print ele filename = ele[2] all_image = edit + 'all_' + params['method'] + '_' + params['lo_clip'].replace(' ','_') + "_chip" + str(chipnumber) + ".fits" divided_image = filename.replace('SUPA','div_SUPA') all_image = edit + 'all_' + params['method'] + '_' + params['lo_clip'].replace(' ','_') + "_chip" + str(chipnumber) + ".fits" scripteach.write("print '" + filename + "'\n") scripteach.write("import re,os\n") scripteach.write("os.system('rm " + divided_image + "')\n") scripteach.write("os.system(\"ic '%1 %2 / ' " + filename + " " + dir + all_image + " > " + divided_image + "\")\n") dd = re.split('\/',dir) basedir = reduce(lambda x,y: x + '/' + y,dd[:-2]) + '/' enddir = dd[-2] if chipnumber == 1: temp = re.split('\/',filename[:-1])[-1] out = re.split('_',temp)[0] divided_prefix = 'div_' + out binned_image = "/BINNED/" + divided_prefix + "_mosOC.fits" binned_normal_image = "/BINNED/" + divided_prefix + "_mos_normal.fits" normeachscript.write("import os,re\n") normeachscript.write("os.putenv('INSTRUMENT','SUBARU')\n") normeachscript.write("os.system(\"rm " + dir + binned_image + "\")\n") normeachscript.write("os.system(\"rm " + dir + binned_normal_image + "\")\n") normeachscript.write("os.system(\"./create_binnedmosaics.sh " + basedir + " " + enddir + " " + divided_prefix + " OC 8 -32 \")\n") normeachscript.write("os.system(\"imstats " + divided_image + " -s " + statsxmin + " " + statsxmax + " " + statsymin + " " + statsymax + " -o " +fildir + "outlist \")\n") normeachscript.write("p = open('" + fildir + "outlist').readlines()[-1]\n") normeachscript.write("mode = re.split('\s+',p)[1]\n") normeachscript.write("os.system(\"ic '%1 \" + mode + \" / ' " + dir + binned_image + " > " + dir + binned_normal_image + "\")\n") script.close() #os.system(command) ''' make lists of files to combine together, then combine them ''' #os.system("ic '%1 %2 /' " + img + " " + flat + " > " + img.replace('.fits','M.fits')) #get_date() batchfiles.close() if __name__ == '__main__': import os #dir = os.environ['dougdir'] + 'nobackup/SUBARU/skyflats/' #dir = os.environ['skyflatraw'] + 'nobackup/SUBARU/auxilary/W-J-B_SKYFLAT/' #target_dir = os.environ['xoc'] + 'nobackup/SUBARU/skyflats/skyflat_B' #runit(dir,target_dir) #raw_input() import sys filter = sys.argv[1] type = sys.argv[2] if type == 'domeflat': if filter == 'b': location = os.environ['subdir'] + '2007-07-18_domeflat_b/DOMEFLAT/' if filter == 'v': location = os.environ['subdir'] + '2007-07-18_domeflat_v/DOMEFLAT/' if filter == 'r': location = os.environ['subdir'] + '2007-07-18_domeflat_r/DOMEFLAT/' if filter == 'i': location = os.environ['subdir'] + '2007-07-18_domeflat_i/DOMEFLAT/' if filter == 'z': location = os.environ['subdir'] + '2007-07-18_domeflat_z/DOMEFLAT/' if type == 'skyflat': if filter == 'b': location = os.environ['subdir'] + '2007-07-18_skyflat_b/SKYFLAT/' if filter == 'v': location = os.environ['subdir'] + '2007-07-18_skyflat_v/SKYFLAT/' if filter == 'r': location = os.environ['subdir'] + '2007-07-18_skyflat_r/SKYFLAT/' if filter == 'i': location = os.environ['subdir'] + '2007-07-18_skyflat_i/SKYFLAT/' if filter == 'z': location = os.environ['subdir'] + '2007-07-18_skyflat_z/SKYFLAT/' combineperiods(6,location,filter,type,'edit') ''' then need to run ./cp_aux_data.sh ${SUBARUDIR} ${run}_${filter} ${DOUGDIR}/skyflat_10_2 ''' #os.environ['dougdir'] + 'nobackup/SUBARU/skyflats' +
[]
[]
[ "dougdir", "skyflatraw", "subdir", "xoc" ]
[]
["dougdir", "skyflatraw", "subdir", "xoc"]
python
4
0
template_extends/template_extends/wsgi.py
""" WSGI config for template_extends project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "template_extends.settings") application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
concurrent/concurrent.go
// DBDeployer - The MySQL Sandbox // Copyright © 2006-2018 Giuseppe Maxia // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package concurrent import ( "fmt" "github.com/datacharmer/dbdeployer/defaults" "os" "os/exec" "sync" ) type CommonChan chan *exec.Cmd type ExecCommand struct { Cmd string Args []string } type ExecCommands []ExecCommand type ExecutionList struct { Logger *defaults.Logger Priority int Command ExecCommand } var DebugConcurrency bool var VerboseConcurrency bool func add_task(num int, wg *sync.WaitGroup, tasks CommonChan, cmd string, args []string) { wg.Add(1) go start_task(num, wg, tasks) tasks <- exec.Command(cmd, args...) } func start_task(num int, w *sync.WaitGroup, tasks CommonChan) { defer w.Done() var ( out []byte err error ) for cmd := range tasks { // this will exit the loop when the channel closes out, err = cmd.Output() if err != nil { fmt.Printf("Error executing goroutine %d : %s", num, err) //os.Exit(1) } if DebugConcurrency { fmt.Printf("goroutine %d command output: %s", num, string(out)) } else { if VerboseConcurrency { fmt.Printf("%s", string(out)) } } } } // Run several tasks in parallel func RunParallelTasks(priority_level int, operations ExecCommands) { tasks := make(CommonChan, 64) var wg sync.WaitGroup for N, ec := range operations { add_task(N, &wg, tasks, ec.Cmd, ec.Args) } close(tasks) wg.Wait() if VerboseConcurrency { fmt.Printf("#%d\n", priority_level) } } /* // Given a list of tasks with different priorities // This function organizes the queued tasks by priority // and runs concurrently the tasks with the same priority // until no task is left in the queue. // For example we may have: priority command 1 /some/path/init_db 2 /some/path/start 3 /some/path/load_grants 1 /some/other/path/init_db 2 /some/other/path/start 3 /some/other/path/load_grants 1 /some/alternative/path/init_db 2 /some/alternative/path/start 3 /some/alternative/path/load_grants This function will receive the commands, and re-arrange them as follows run concurrently: { 1 /some/path/init_db 1 /some/other/path/init_db 1 /some/alternative/path/init_db } run concurrently: { 2 /some/path/start 2 /some/other/path/start 2 /some/alternative/path/start } run concurrently: { 3 /some/path/load_grants 3 /some/other/path/load_grants 3 /some/alternative/path/load_grants } */ func RunParallelTasksByPriority(exec_lists []ExecutionList) { maxPriority := 0 if len(exec_lists) == 0 { return } if DebugConcurrency { fmt.Printf("RunParallelTasksByPriority exec_list %#v\n", exec_lists) } for _, list := range exec_lists { if list.Priority > maxPriority { maxPriority = list.Priority } } for N := 0; N <= maxPriority; N++ { var operations ExecCommands for _, list := range exec_lists { if list.Priority == N { operations = append(operations, list.Command) if list.Logger != nil { list.Logger.Printf(" Queueing command %s [%v] with priority # %d\n", list.Command.Cmd, list.Command.Args, list.Priority) } } } if DebugConcurrency { fmt.Printf("%d %v\n", N, operations) } RunParallelTasks(N, operations) } } func init() { if os.Getenv("DEBUG_CONCURRENCY") != "" { DebugConcurrency = true } if os.Getenv("VERBOSE_CONCURRENCY") != "" { VerboseConcurrency = true } }
[ "\"DEBUG_CONCURRENCY\"", "\"VERBOSE_CONCURRENCY\"" ]
[]
[ "DEBUG_CONCURRENCY", "VERBOSE_CONCURRENCY" ]
[]
["DEBUG_CONCURRENCY", "VERBOSE_CONCURRENCY"]
go
2
0
bccsp/pkcs11/impl_test.go
// +build pkcs11 /* Copyright IBM Corp. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ package pkcs11 import ( "bytes" "github.com/studyzy/crypto" "github.com/studyzy/crypto/elliptic" "crypto/rand" "github.com/studyzy/crypto/rsa" "github.com/studyzy/crypto/sha256" "crypto/sha512" "github.com/studyzy/crypto/x509" "crypto/x509/pkix" "encoding/asn1" "fmt" "hash" "io/ioutil" "math/big" "net" "os" "strings" "testing" "time" "github.com/hyperledger/fabric/bccsp" "github.com/hyperledger/fabric/bccsp/signer" "github.com/hyperledger/fabric/bccsp/sw" "github.com/hyperledger/fabric/bccsp/utils" "github.com/stretchr/testify/assert" "golang.org/x/crypto/sha3" ) var ( currentKS bccsp.KeyStore currentBCCSP bccsp.BCCSP currentTestConfig testConfig ) type testConfig struct { securityLevel int hashFamily string softVerify bool immutable bool } func TestMain(m *testing.M) { os.Exit(testMain(m)) } func testMain(m *testing.M) int { tmpDir, err := ioutil.TempDir("", "pkcs11_ks") if err != nil { fmt.Printf("Failed to create keystore directory [%s]\n", err) return -1 } defer os.RemoveAll(tmpDir) keyStore, err := sw.NewFileBasedKeyStore(nil, tmpDir, false) if err != nil { fmt.Printf("Failed initiliazing KeyStore [%s]\n", err) return -1 } currentKS = keyStore lib, pin, label := FindPKCS11Lib() tests := []testConfig{ {256, "SHA2", true, false}, {256, "SHA3", false, false}, {384, "SHA2", false, false}, {384, "SHA3", false, false}, {384, "SHA3", true, false}, } if strings.Contains(lib, "softhsm") { tests = append(tests, []testConfig{ {256, "SHA2", true, true}, }...) } opts := PKCS11Opts{ Library: lib, Label: label, Pin: pin, } for _, config := range tests { currentTestConfig = config opts.HashFamily = config.hashFamily opts.SecLevel = config.securityLevel opts.SoftVerify = config.softVerify opts.Immutable = config.immutable currentBCCSP, err = New(opts, keyStore) if err != nil { fmt.Printf("Failed initiliazing BCCSP at [%+v] \n%s\n", opts, err) return -1 } ret := m.Run() if ret != 0 { fmt.Printf("Failed testing at [%+v]\n", opts) return -1 } } return 0 } func TestNew(t *testing.T) { opts := PKCS11Opts{ HashFamily: "SHA2", SecLevel: 256, SoftVerify: false, Library: "lib", Label: "ForFabric", Pin: "98765432", } // Setup PKCS11 library and provide initial set of values lib, _, _ := FindPKCS11Lib() opts.Library = lib // Test for nil keystore _, err := New(opts, nil) assert.Error(t, err) assert.Contains(t, err.Error(), "Invalid bccsp.KeyStore instance. It must be different from nil.") // Test for invalid PKCS11 loadLib opts.Library = "" _, err = New(opts, currentKS) assert.Error(t, err) assert.Contains(t, err.Error(), "pkcs11: library path not provided") } func TestFindPKCS11LibEnvVars(t *testing.T) { const ( dummy_PKCS11_LIB = "/usr/lib/pkcs11" dummy_PKCS11_PIN = "98765432" dummy_PKCS11_LABEL = "testing" ) // Set environment variables used for test and preserve // original values for restoration after test completion orig_PKCS11_LIB := os.Getenv("PKCS11_LIB") os.Setenv("PKCS11_LIB", dummy_PKCS11_LIB) orig_PKCS11_PIN := os.Getenv("PKCS11_PIN") os.Setenv("PKCS11_PIN", dummy_PKCS11_PIN) orig_PKCS11_LABEL := os.Getenv("PKCS11_LABEL") os.Setenv("PKCS11_LABEL", dummy_PKCS11_LABEL) lib, pin, label := FindPKCS11Lib() assert.EqualValues(t, dummy_PKCS11_LIB, lib, "FindPKCS11Lib did not return expected library") assert.EqualValues(t, dummy_PKCS11_PIN, pin, "FindPKCS11Lib did not return expected pin") assert.EqualValues(t, dummy_PKCS11_LABEL, label, "FindPKCS11Lib did not return expected label") os.Setenv("PKCS11_LIB", orig_PKCS11_LIB) os.Setenv("PKCS11_PIN", orig_PKCS11_PIN) os.Setenv("PKCS11_LABEL", orig_PKCS11_LABEL) } func TestInvalidNewParameter(t *testing.T) { lib, pin, label := FindPKCS11Lib() opts := PKCS11Opts{ Library: lib, Label: label, Pin: pin, SoftVerify: true, } opts.HashFamily = "SHA2" opts.SecLevel = 0 r, err := New(opts, currentKS) if err == nil { t.Fatal("Error should be different from nil in this case") } if r != nil { t.Fatal("Return value should be equal to nil in this case") } opts.HashFamily = "SHA8" opts.SecLevel = 256 r, err = New(opts, currentKS) if err == nil { t.Fatal("Error should be different from nil in this case") } if r != nil { t.Fatal("Return value should be equal to nil in this case") } opts.HashFamily = "SHA2" opts.SecLevel = 256 r, err = New(opts, nil) if err == nil { t.Fatal("Error should be different from nil in this case") } if r != nil { t.Fatal("Return value should be equal to nil in this case") } opts.HashFamily = "SHA3" opts.SecLevel = 0 r, err = New(opts, nil) if err == nil { t.Fatal("Error should be different from nil in this case") } if r != nil { t.Fatal("Return value should be equal to nil in this case") } } func TestInvalidSKI(t *testing.T) { k, err := currentBCCSP.GetKey(nil) if err == nil { t.Fatal("Error should be different from nil in this case") } if k != nil { t.Fatal("Return value should be equal to nil in this case") } k, err = currentBCCSP.GetKey([]byte{0, 1, 2, 3, 4, 5, 6}) if err == nil { t.Fatal("Error should be different from nil in this case") } if k != nil { t.Fatal("Return value should be equal to nil in this case") } } func TestInvalidAltId(t *testing.T) { opts := PKCS11Opts{ HashFamily: currentTestConfig.hashFamily, SecLevel: currentTestConfig.securityLevel, SoftVerify: currentTestConfig.softVerify, Immutable: currentTestConfig.immutable, AltId: "ADifferentAltId", Library: "lib", Label: "ForFabric", Pin: "98765432", } // Setup PKCS11 library and provide initial set of values lib, _, _ := FindPKCS11Lib() opts.Library = lib // Create temporary BCCSP set with an initial label testBCCSP, err := New(opts, currentKS) if err != nil { t.Fatalf("Failed initiliazing Test BCCSP at [%+v] \n%s\n", opts, err) } // Now, try to retrieve the key using a different label k, err := testBCCSP.GetKey([]byte{0, 1, 2, 3, 4, 5, 6}) if err == nil { t.Fatal("Error should be different from nil in this case") } if k != nil { t.Fatal("Return value should be equal to nil in this case") } } func TestKeyGenECDSAOpts(t *testing.T) { // Curve P256 k, err := currentBCCSP.KeyGen(&bccsp.ECDSAP256KeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating ECDSA P256 key [%s]", err) } if k == nil { t.Fatal("Failed generating ECDSA P256 key. Key must be different from nil") } if !k.Private() { t.Fatal("Failed generating ECDSA P256 key. Key should be private") } if k.Symmetric() { t.Fatal("Failed generating ECDSA P256 key. Key should be asymmetric") } ecdsaKey := k.(*ecdsaPrivateKey).pub if elliptic.P256() != ecdsaKey.pub.Curve { t.Fatal("P256 generated key in invalid. The curve must be P256.") } // Curve P384 k, err = currentBCCSP.KeyGen(&bccsp.ECDSAP384KeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating ECDSA P384 key [%s]", err) } if k == nil { t.Fatal("Failed generating ECDSA P384 key. Key must be different from nil") } if !k.Private() { t.Fatal("Failed generating ECDSA P384 key. Key should be private") } if k.Symmetric() { t.Fatal("Failed generating ECDSA P384 key. Key should be asymmetric") } ecdsaKey = k.(*ecdsaPrivateKey).pub if elliptic.P384() != ecdsaKey.pub.Curve { t.Fatal("P256 generated key in invalid. The curve must be P384.") } } func TestKeyGenRSAOpts(t *testing.T) { // 1024 k, err := currentBCCSP.KeyGen(&bccsp.RSA1024KeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating RSA 1024 key [%s]", err) } if k == nil { t.Fatal("Failed generating RSA 1024 key. Key must be different from nil") } if !k.Private() { t.Fatal("Failed generating RSA 1024 key. Key should be private") } if k.Symmetric() { t.Fatal("Failed generating RSA 1024 key. Key should be asymmetric") } // 2048 k, err = currentBCCSP.KeyGen(&bccsp.RSA2048KeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating RSA 2048 key [%s]", err) } if k == nil { t.Fatal("Failed generating RSA 2048 key. Key must be different from nil") } if !k.Private() { t.Fatal("Failed generating RSA 2048 key. Key should be private") } if k.Symmetric() { t.Fatal("Failed generating RSA 2048 key. Key should be asymmetric") } } func TestKeyGenAESOpts(t *testing.T) { // AES 128 k, err := currentBCCSP.KeyGen(&bccsp.AES128KeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating AES 128 key [%s]", err) } if k == nil { t.Fatal("Failed generating AES 128 key. Key must be different from nil") } if !k.Private() { t.Fatal("Failed generating AES 128 key. Key should be private") } if !k.Symmetric() { t.Fatal("Failed generating AES 128 key. Key should be symmetric") } // AES 192 k, err = currentBCCSP.KeyGen(&bccsp.AES192KeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating AES 192 key [%s]", err) } if k == nil { t.Fatal("Failed generating AES 192 key. Key must be different from nil") } if !k.Private() { t.Fatal("Failed generating AES 192 key. Key should be private") } if !k.Symmetric() { t.Fatal("Failed generating AES 192 key. Key should be symmetric") } // AES 256 k, err = currentBCCSP.KeyGen(&bccsp.AES256KeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating AES 256 key [%s]", err) } if k == nil { t.Fatal("Failed generating AES 256 key. Key must be different from nil") } if !k.Private() { t.Fatal("Failed generating AES 256 key. Key should be private") } if !k.Symmetric() { t.Fatal("Failed generating AES 256 key. Key should be symmetric") } } func TestHashOpts(t *testing.T) { msg := []byte("abcd") // SHA256 digest1, err := currentBCCSP.Hash(msg, &bccsp.SHA256Opts{}) if err != nil { t.Fatalf("Failed computing SHA256 [%s]", err) } h := sha256.New() h.Write(msg) digest2 := h.Sum(nil) if !bytes.Equal(digest1, digest2) { t.Fatalf("Different SHA256 computed. [%x][%x]", digest1, digest2) } // SHA384 digest1, err = currentBCCSP.Hash(msg, &bccsp.SHA384Opts{}) if err != nil { t.Fatalf("Failed computing SHA384 [%s]", err) } h = sha512.New384() h.Write(msg) digest2 = h.Sum(nil) if !bytes.Equal(digest1, digest2) { t.Fatalf("Different SHA384 computed. [%x][%x]", digest1, digest2) } // SHA3_256O digest1, err = currentBCCSP.Hash(msg, &bccsp.SHA3_256Opts{}) if err != nil { t.Fatalf("Failed computing SHA3_256 [%s]", err) } h = sha3.New256() h.Write(msg) digest2 = h.Sum(nil) if !bytes.Equal(digest1, digest2) { t.Fatalf("Different SHA3_256 computed. [%x][%x]", digest1, digest2) } // SHA3_384 digest1, err = currentBCCSP.Hash(msg, &bccsp.SHA3_384Opts{}) if err != nil { t.Fatalf("Failed computing SHA3_384 [%s]", err) } h = sha3.New384() h.Write(msg) digest2 = h.Sum(nil) if !bytes.Equal(digest1, digest2) { t.Fatalf("Different SHA3_384 computed. [%x][%x]", digest1, digest2) } } func TestECDSAKeyGenEphemeral(t *testing.T) { k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: true}) if err != nil { t.Fatalf("Failed generating ECDSA key [%s]", err) } if k == nil { t.Fatal("Failed generating ECDSA key. Key must be different from nil") } if !k.Private() { t.Fatal("Failed generating ECDSA key. Key should be private") } if k.Symmetric() { t.Fatal("Failed generating ECDSA key. Key should be asymmetric") } raw, err := k.Bytes() if err == nil { t.Fatal("Failed marshalling to bytes. Marshalling must fail.") } if len(raw) != 0 { t.Fatal("Failed marshalling to bytes. Output should be 0 bytes") } pk, err := k.PublicKey() if err != nil { t.Fatalf("Failed getting corresponding public key [%s]", err) } if pk == nil { t.Fatal("Public key must be different from nil.") } } func TestECDSAPrivateKeySKI(t *testing.T) { k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating ECDSA key [%s]", err) } ski := k.SKI() if len(ski) == 0 { t.Fatal("SKI not valid. Zero length.") } } func TestECDSAKeyGenNonEphemeral(t *testing.T) { k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating ECDSA key [%s]", err) } if k == nil { t.Fatal("Failed generating ECDSA key. Key must be different from nil") } if !k.Private() { t.Fatal("Failed generating ECDSA key. Key should be private") } if k.Symmetric() { t.Fatal("Failed generating ECDSA key. Key should be asymmetric") } } func TestECDSAGetKeyBySKI(t *testing.T) { k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating ECDSA key [%s]", err) } k2, err := currentBCCSP.GetKey(k.SKI()) if err != nil { t.Fatalf("Failed getting ECDSA key [%s]", err) } if k2 == nil { t.Fatal("Failed getting ECDSA key. Key must be different from nil") } if !k2.Private() { t.Fatal("Failed getting ECDSA key. Key should be private") } if k2.Symmetric() { t.Fatal("Failed getting ECDSA key. Key should be asymmetric") } // Check that the SKIs are the same if !bytes.Equal(k.SKI(), k2.SKI()) { t.Fatalf("SKIs are different [%x]!=[%x]", k.SKI(), k2.SKI()) } } func TestECDSAPublicKeyFromPrivateKey(t *testing.T) { k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating ECDSA key [%s]", err) } pk, err := k.PublicKey() if err != nil { t.Fatalf("Failed getting public key from private ECDSA key [%s]", err) } if pk == nil { t.Fatal("Failed getting public key from private ECDSA key. Key must be different from nil") } if pk.Private() { t.Fatal("Failed generating ECDSA key. Key should be public") } if pk.Symmetric() { t.Fatal("Failed generating ECDSA key. Key should be asymmetric") } } func TestECDSAPublicKeyBytes(t *testing.T) { k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating ECDSA key [%s]", err) } pk, err := k.PublicKey() if err != nil { t.Fatalf("Failed getting public key from private ECDSA key [%s]", err) } raw, err := pk.Bytes() if err != nil { t.Fatalf("Failed marshalling ECDSA public key [%s]", err) } if len(raw) == 0 { t.Fatal("Failed marshalling ECDSA public key. Zero length") } } func TestECDSAPublicKeySKI(t *testing.T) { k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating ECDSA key [%s]", err) } pk, err := k.PublicKey() if err != nil { t.Fatalf("Failed getting public key from private ECDSA key [%s]", err) } ski := pk.SKI() if len(ski) == 0 { t.Fatal("SKI not valid. Zero length.") } } func TestECDSASign(t *testing.T) { k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating ECDSA key [%s]", err) } msg := []byte("Hello World") digest, err := currentBCCSP.Hash(msg, &bccsp.SHAOpts{}) if err != nil { t.Fatalf("Failed computing HASH [%s]", err) } signature, err := currentBCCSP.Sign(k, digest, nil) if err != nil { t.Fatalf("Failed generating ECDSA signature [%s]", err) } if len(signature) == 0 { t.Fatal("Failed generating ECDSA key. Signature must be different from nil") } _, err = currentBCCSP.Sign(nil, digest, nil) assert.Error(t, err) assert.Contains(t, err.Error(), "Invalid Key. It must not be nil") _, err = currentBCCSP.Sign(k, nil, nil) assert.Error(t, err) assert.Contains(t, err.Error(), "Invalid digest. Cannot be empty") } func TestECDSAVerify(t *testing.T) { k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating ECDSA key [%s]", err) } msg := []byte("Hello World") digest, err := currentBCCSP.Hash(msg, &bccsp.SHAOpts{}) if err != nil { t.Fatalf("Failed computing HASH [%s]", err) } signature, err := currentBCCSP.Sign(k, digest, nil) if err != nil { t.Fatalf("Failed generating ECDSA signature [%s]", err) } valid, err := currentBCCSP.Verify(k, signature, digest, nil) if err != nil { t.Fatalf("Failed verifying ECDSA signature [%s]", err) } if !valid { t.Fatal("Failed verifying ECDSA signature. Signature not valid.") } pk, err := k.PublicKey() if err != nil { t.Fatalf("Failed getting corresponding public key [%s]", err) } valid, err = currentBCCSP.Verify(pk, signature, digest, nil) if err != nil { t.Fatalf("Failed verifying ECDSA signature [%s]", err) } if !valid { t.Fatal("Failed verifying ECDSA signature. Signature not valid.") } _, err = currentBCCSP.Verify(nil, signature, digest, nil) assert.Error(t, err) assert.Contains(t, err.Error(), "Invalid Key. It must not be nil") _, err = currentBCCSP.Verify(pk, nil, digest, nil) assert.Error(t, err) assert.Contains(t, err.Error(), "Invalid signature. Cannot be empty") _, err = currentBCCSP.Verify(pk, signature, nil, nil) assert.Error(t, err) assert.Contains(t, err.Error(), "Invalid digest. Cannot be empty") // Import the exported public key pkRaw, err := pk.Bytes() if err != nil { t.Fatalf("Failed getting ECDSA raw public key [%s]", err) } // Store public key _, err = currentBCCSP.KeyImport(pkRaw, &bccsp.ECDSAPKIXPublicKeyImportOpts{Temporary: false}) if err != nil { t.Fatalf("Failed storing corresponding public key [%s]", err) } pk2, err := currentBCCSP.GetKey(pk.SKI()) if err != nil { t.Fatalf("Failed retrieving corresponding public key [%s]", err) } valid, err = currentBCCSP.Verify(pk2, signature, digest, nil) if err != nil { t.Fatalf("Failed verifying ECDSA signature [%s]", err) } if !valid { t.Fatal("Failed verifying ECDSA signature. Signature not valid.") } } func TestECDSAKeyImportFromExportedKey(t *testing.T) { // Generate an ECDSA key k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating ECDSA key [%s]", err) } // Export the public key pk, err := k.PublicKey() if err != nil { t.Fatalf("Failed getting ECDSA public key [%s]", err) } pkRaw, err := pk.Bytes() if err != nil { t.Fatalf("Failed getting ECDSA raw public key [%s]", err) } // Import the exported public key pk2, err := currentBCCSP.KeyImport(pkRaw, &bccsp.ECDSAPKIXPublicKeyImportOpts{Temporary: false}) if err != nil { t.Fatalf("Failed importing ECDSA public key [%s]", err) } if pk2 == nil { t.Fatal("Failed importing ECDSA public key. Return BCCSP key cannot be nil.") } // Sign and verify with the imported public key msg := []byte("Hello World") digest, err := currentBCCSP.Hash(msg, &bccsp.SHAOpts{}) if err != nil { t.Fatalf("Failed computing HASH [%s]", err) } signature, err := currentBCCSP.Sign(k, digest, nil) if err != nil { t.Fatalf("Failed generating ECDSA signature [%s]", err) } valid, err := currentBCCSP.Verify(pk2, signature, digest, nil) if err != nil { t.Fatalf("Failed verifying ECDSA signature [%s]", err) } if !valid { t.Fatal("Failed verifying ECDSA signature. Signature not valid.") } } func TestECDSAKeyImportFromECDSAPublicKey(t *testing.T) { // Generate an ECDSA key k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating ECDSA key [%s]", err) } // Export the public key pk, err := k.PublicKey() if err != nil { t.Fatalf("Failed getting ECDSA public key [%s]", err) } pkRaw, err := pk.Bytes() if err != nil { t.Fatalf("Failed getting ECDSA raw public key [%s]", err) } pub, err := utils.DERToPublicKey(pkRaw) if err != nil { t.Fatalf("Failed converting raw to ecdsa.PublicKey [%s]", err) } // Import the ecdsa.PublicKey pk2, err := currentBCCSP.KeyImport(pub, &bccsp.ECDSAGoPublicKeyImportOpts{Temporary: false}) if err != nil { t.Fatalf("Failed importing ECDSA public key [%s]", err) } if pk2 == nil { t.Fatal("Failed importing ECDSA public key. Return BCCSP key cannot be nil.") } // Sign and verify with the imported public key msg := []byte("Hello World") digest, err := currentBCCSP.Hash(msg, &bccsp.SHAOpts{}) if err != nil { t.Fatalf("Failed computing HASH [%s]", err) } signature, err := currentBCCSP.Sign(k, digest, nil) if err != nil { t.Fatalf("Failed generating ECDSA signature [%s]", err) } valid, err := currentBCCSP.Verify(pk2, signature, digest, nil) if err != nil { t.Fatalf("Failed verifying ECDSA signature [%s]", err) } if !valid { t.Fatal("Failed verifying ECDSA signature. Signature not valid.") } } func TestKeyImportFromX509ECDSAPublicKey(t *testing.T) { // Generate an ECDSA key k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating ECDSA key [%s]", err) } // Generate a self-signed certificate testExtKeyUsage := []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth} testUnknownExtKeyUsage := []asn1.ObjectIdentifier{[]int{1, 2, 3}, []int{2, 59, 1}} extraExtensionData := []byte("extra extension") commonName := "test.example.com" template := x509.Certificate{ SerialNumber: big.NewInt(1), Subject: pkix.Name{ CommonName: commonName, Organization: []string{"Σ Acme Co"}, Country: []string{"US"}, ExtraNames: []pkix.AttributeTypeAndValue{ { Type: []int{2, 5, 4, 42}, Value: "Gopher", }, // This should override the Country, above. { Type: []int{2, 5, 4, 6}, Value: "NL", }, }, }, NotBefore: time.Now().Add(-1 * time.Hour), NotAfter: time.Now().Add(1 * time.Hour), SignatureAlgorithm: x509.ECDSAWithSHA256, SubjectKeyId: []byte{1, 2, 3, 4}, KeyUsage: x509.KeyUsageCertSign, ExtKeyUsage: testExtKeyUsage, UnknownExtKeyUsage: testUnknownExtKeyUsage, BasicConstraintsValid: true, IsCA: true, OCSPServer: []string{"http://ocurrentBCCSP.example.com"}, IssuingCertificateURL: []string{"http://crt.example.com/ca1.crt"}, DNSNames: []string{"test.example.com"}, EmailAddresses: []string{"gopher@golang.org"}, IPAddresses: []net.IP{net.IPv4(127, 0, 0, 1).To4(), net.ParseIP("2001:4860:0:2001::68")}, PolicyIdentifiers: []asn1.ObjectIdentifier{[]int{1, 2, 3}}, PermittedDNSDomains: []string{".example.com", "example.com"}, CRLDistributionPoints: []string{"http://crl1.example.com/ca1.crl", "http://crl2.example.com/ca1.crl"}, ExtraExtensions: []pkix.Extension{ { Id: []int{1, 2, 3, 4}, Value: extraExtensionData, }, }, } cryptoSigner, err := signer.New(currentBCCSP, k) if err != nil { t.Fatalf("Failed initializing CyrptoSigner [%s]", err) } // Export the public key pk, err := k.PublicKey() if err != nil { t.Fatalf("Failed getting ECDSA public key [%s]", err) } pkRaw, err := pk.Bytes() if err != nil { t.Fatalf("Failed getting ECDSA raw public key [%s]", err) } pub, err := utils.DERToPublicKey(pkRaw) if err != nil { t.Fatalf("Failed converting raw to ECDSA.PublicKey [%s]", err) } certRaw, err := x509.CreateCertificate(rand.Reader, &template, &template, pub, cryptoSigner) if err != nil { t.Fatalf("Failed generating self-signed certificate [%s]", err) } cert, err := utils.DERToX509Certificate(certRaw) if err != nil { t.Fatalf("Failed generating X509 certificate object from raw [%s]", err) } // Import the certificate's public key pk2, err := currentBCCSP.KeyImport(cert, &bccsp.X509PublicKeyImportOpts{Temporary: false}) if err != nil { t.Fatalf("Failed importing ECDSA public key [%s]", err) } if pk2 == nil { t.Fatal("Failed importing ECDSA public key. Return BCCSP key cannot be nil.") } // Sign and verify with the imported public key msg := []byte("Hello World") digest, err := currentBCCSP.Hash(msg, &bccsp.SHAOpts{}) if err != nil { t.Fatalf("Failed computing HASH [%s]", err) } signature, err := currentBCCSP.Sign(k, digest, nil) if err != nil { t.Fatalf("Failed generating ECDSA signature [%s]", err) } valid, err := currentBCCSP.Verify(pk2, signature, digest, nil) if err != nil { t.Fatalf("Failed verifying ECDSA signature [%s]", err) } if !valid { t.Fatal("Failed verifying ECDSA signature. Signature not valid.") } } func TestECDSASignatureEncoding(t *testing.T) { v := []byte{0x30, 0x07, 0x02, 0x01, 0x8F, 0x02, 0x02, 0xff, 0xf1} _, err := asn1.Unmarshal(v, &utils.ECDSASignature{}) if err == nil { t.Fatalf("Unmarshalling should fail for [% x]", v) } t.Logf("Unmarshalling correctly failed for [% x] [%s]", v, err) v = []byte{0x30, 0x07, 0x02, 0x01, 0x8F, 0x02, 0x02, 0x00, 0x01} _, err = asn1.Unmarshal(v, &utils.ECDSASignature{}) if err == nil { t.Fatalf("Unmarshalling should fail for [% x]", v) } t.Logf("Unmarshalling correctly failed for [% x] [%s]", v, err) v = []byte{0x30, 0x07, 0x02, 0x01, 0x8F, 0x02, 0x81, 0x01, 0x01} _, err = asn1.Unmarshal(v, &utils.ECDSASignature{}) if err == nil { t.Fatalf("Unmarshalling should fail for [% x]", v) } t.Logf("Unmarshalling correctly failed for [% x] [%s]", v, err) v = []byte{0x30, 0x07, 0x02, 0x01, 0x8F, 0x02, 0x81, 0x01, 0x8F} _, err = asn1.Unmarshal(v, &utils.ECDSASignature{}) if err == nil { t.Fatalf("Unmarshalling should fail for [% x]", v) } t.Logf("Unmarshalling correctly failed for [% x] [%s]", v, err) v = []byte{0x30, 0x0A, 0x02, 0x01, 0x8F, 0x02, 0x05, 0x00, 0x00, 0x00, 0x00, 0x8F} _, err = asn1.Unmarshal(v, &utils.ECDSASignature{}) if err == nil { t.Fatalf("Unmarshalling should fail for [% x]", v) } t.Logf("Unmarshalling correctly failed for [% x] [%s]", v, err) } func TestECDSALowS(t *testing.T) { // Ensure that signature with low-S are generated k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating ECDSA key [%s]", err) } msg := []byte("Hello World") digest, err := currentBCCSP.Hash(msg, &bccsp.SHAOpts{}) if err != nil { t.Fatalf("Failed computing HASH [%s]", err) } signature, err := currentBCCSP.Sign(k, digest, nil) if err != nil { t.Fatalf("Failed generating ECDSA signature [%s]", err) } R, S, err := utils.UnmarshalECDSASignature(signature) if err != nil { t.Fatalf("Failed unmarshalling signature [%s]", err) } if S.Cmp(utils.GetCurveHalfOrdersAt(k.(*ecdsaPrivateKey).pub.pub.Curve)) >= 0 { t.Fatal("Invalid signature. It must have low-S") } valid, err := currentBCCSP.Verify(k, signature, digest, nil) if err != nil { t.Fatalf("Failed verifying ECDSA signature [%s]", err) } if !valid { t.Fatal("Failed verifying ECDSA signature. Signature not valid.") } // Ensure that signature with high-S are rejected. for { R, S, err = currentBCCSP.(*impl).signP11ECDSA(k.SKI(), digest) if err != nil { t.Fatalf("Failed generating signature [%s]", err) } if S.Cmp(utils.GetCurveHalfOrdersAt(k.(*ecdsaPrivateKey).pub.pub.Curve)) > 0 { break } } sig, err := utils.MarshalECDSASignature(R, S) if err != nil { t.Fatalf("Failing unmarshalling signature [%s]", err) } valid, err = currentBCCSP.Verify(k, sig, digest, nil) if err == nil { t.Fatal("Failed verifying ECDSA signature. It must fail for a signature with high-S") } if valid { t.Fatal("Failed verifying ECDSA signature. It must fail for a signature with high-S") } } func TestAESKeyGen(t *testing.T) { k, err := currentBCCSP.KeyGen(&bccsp.AESKeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating AES_256 key [%s]", err) } if k == nil { t.Fatal("Failed generating AES_256 key. Key must be different from nil") } if !k.Private() { t.Fatal("Failed generating AES_256 key. Key should be private") } if !k.Symmetric() { t.Fatal("Failed generating AES_256 key. Key should be symmetric") } pk, err := k.PublicKey() if err == nil { t.Fatal("Error should be different from nil in this case") } if pk != nil { t.Fatal("Return value should be equal to nil in this case") } } func TestAESEncrypt(t *testing.T) { k, err := currentBCCSP.KeyGen(&bccsp.AESKeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating AES_256 key [%s]", err) } ct, err := currentBCCSP.Encrypt(k, []byte("Hello World"), &bccsp.AESCBCPKCS7ModeOpts{}) if err != nil { t.Fatalf("Failed encrypting [%s]", err) } if len(ct) == 0 { t.Fatal("Failed encrypting. Nil ciphertext") } } func TestAESDecrypt(t *testing.T) { k, err := currentBCCSP.KeyGen(&bccsp.AESKeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating AES_256 key [%s]", err) } msg := []byte("Hello World") ct, err := currentBCCSP.Encrypt(k, msg, &bccsp.AESCBCPKCS7ModeOpts{}) if err != nil { t.Fatalf("Failed encrypting [%s]", err) } pt, err := currentBCCSP.Decrypt(k, ct, bccsp.AESCBCPKCS7ModeOpts{}) if err != nil { t.Fatalf("Failed decrypting [%s]", err) } if len(ct) == 0 { t.Fatal("Failed decrypting. Nil plaintext") } if !bytes.Equal(msg, pt) { t.Fatalf("Failed decrypting. Decrypted plaintext is different from the original. [%x][%x]", msg, pt) } } func TestHMACTruncated256KeyDerivOverAES256Key(t *testing.T) { k, err := currentBCCSP.KeyGen(&bccsp.AESKeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating AES_256 key [%s]", err) } hmcaedKey, err := currentBCCSP.KeyDeriv(k, &bccsp.HMACTruncated256AESDeriveKeyOpts{Temporary: false, Arg: []byte{1}}) if err != nil { t.Fatalf("Failed HMACing AES_256 key [%s]", err) } if k == nil { t.Fatal("Failed HMACing AES_256 key. HMACed Key must be different from nil") } if !hmcaedKey.Private() { t.Fatal("Failed HMACing AES_256 key. HMACed Key should be private") } if !hmcaedKey.Symmetric() { t.Fatal("Failed HMACing AES_256 key. HMACed Key should be asymmetric") } raw, err := hmcaedKey.Bytes() if err == nil { t.Fatal("Failed marshalling to bytes. Operation must be forbidden") } if len(raw) != 0 { t.Fatal("Failed marshalling to bytes. Operation must return 0 bytes") } msg := []byte("Hello World") ct, err := currentBCCSP.Encrypt(hmcaedKey, msg, &bccsp.AESCBCPKCS7ModeOpts{}) if err != nil { t.Fatalf("Failed encrypting [%s]", err) } pt, err := currentBCCSP.Decrypt(hmcaedKey, ct, bccsp.AESCBCPKCS7ModeOpts{}) if err != nil { t.Fatalf("Failed decrypting [%s]", err) } if len(ct) == 0 { t.Fatal("Failed decrypting. Nil plaintext") } if !bytes.Equal(msg, pt) { t.Fatalf("Failed decrypting. Decrypted plaintext is different from the original. [%x][%x]", msg, pt) } } func TestHMACKeyDerivOverAES256Key(t *testing.T) { k, err := currentBCCSP.KeyGen(&bccsp.AESKeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating AES_256 key [%s]", err) } hmcaedKey, err := currentBCCSP.KeyDeriv(k, &bccsp.HMACDeriveKeyOpts{Temporary: false, Arg: []byte{1}}) if err != nil { t.Fatalf("Failed HMACing AES_256 key [%s]", err) } if k == nil { t.Fatal("Failed HMACing AES_256 key. HMACed Key must be different from nil") } if !hmcaedKey.Private() { t.Fatal("Failed HMACing AES_256 key. HMACed Key should be private") } if !hmcaedKey.Symmetric() { t.Fatal("Failed HMACing AES_256 key. HMACed Key should be asymmetric") } raw, err := hmcaedKey.Bytes() if err != nil { t.Fatalf("Failed marshalling to bytes [%s]", err) } if len(raw) == 0 { t.Fatal("Failed marshalling to bytes. 0 bytes") } } func TestAES256KeyImport(t *testing.T) { raw, err := sw.GetRandomBytes(32) if err != nil { t.Fatalf("Failed generating AES key [%s]", err) } k, err := currentBCCSP.KeyImport(raw, &bccsp.AES256ImportKeyOpts{Temporary: false}) if err != nil { t.Fatalf("Failed importing AES_256 key [%s]", err) } if k == nil { t.Fatal("Failed importing AES_256 key. Imported Key must be different from nil") } if !k.Private() { t.Fatal("Failed HMACing AES_256 key. Imported Key should be private") } if !k.Symmetric() { t.Fatal("Failed HMACing AES_256 key. Imported Key should be asymmetric") } raw, err = k.Bytes() if err == nil { t.Fatal("Failed marshalling to bytes. Marshalling must fail.") } if len(raw) != 0 { t.Fatal("Failed marshalling to bytes. Output should be 0 bytes") } msg := []byte("Hello World") ct, err := currentBCCSP.Encrypt(k, msg, &bccsp.AESCBCPKCS7ModeOpts{}) if err != nil { t.Fatalf("Failed encrypting [%s]", err) } pt, err := currentBCCSP.Decrypt(k, ct, bccsp.AESCBCPKCS7ModeOpts{}) if err != nil { t.Fatalf("Failed decrypting [%s]", err) } if len(ct) == 0 { t.Fatal("Failed decrypting. Nil plaintext") } if !bytes.Equal(msg, pt) { t.Fatalf("Failed decrypting. Decrypted plaintext is different from the original. [%x][%x]", msg, pt) } } func TestAES256KeyImportBadPaths(t *testing.T) { _, err := currentBCCSP.KeyImport(nil, &bccsp.AES256ImportKeyOpts{Temporary: false}) if err == nil { t.Fatal("Failed importing key. Must fail on importing nil key") } _, err = currentBCCSP.KeyImport([]byte{1}, &bccsp.AES256ImportKeyOpts{Temporary: false}) if err == nil { t.Fatal("Failed importing key. Must fail on importing a key with an invalid length") } } func TestAES256KeyGenSKI(t *testing.T) { k, err := currentBCCSP.KeyGen(&bccsp.AESKeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating AES_256 key [%s]", err) } k2, err := currentBCCSP.GetKey(k.SKI()) if err != nil { t.Fatalf("Failed getting AES_256 key [%s]", err) } if k2 == nil { t.Fatal("Failed getting AES_256 key. Key must be different from nil") } if !k2.Private() { t.Fatal("Failed getting AES_256 key. Key should be private") } if !k2.Symmetric() { t.Fatal("Failed getting AES_256 key. Key should be symmetric") } // Check that the SKIs are the same if !bytes.Equal(k.SKI(), k2.SKI()) { t.Fatalf("SKIs are different [%x]!=[%x]", k.SKI(), k2.SKI()) } } func TestSHA(t *testing.T) { for i := 0; i < 100; i++ { b, err := sw.GetRandomBytes(i) if err != nil { t.Fatalf("Failed getting random bytes [%s]", err) } h1, err := currentBCCSP.Hash(b, &bccsp.SHAOpts{}) if err != nil { t.Fatalf("Failed computing SHA [%s]", err) } var h hash.Hash switch currentTestConfig.hashFamily { case "SHA2": switch currentTestConfig.securityLevel { case 256: h = sha256.New() case 384: h = sha512.New384() default: t.Fatalf("Invalid security level [%d]", currentTestConfig.securityLevel) } case "SHA3": switch currentTestConfig.securityLevel { case 256: h = sha3.New256() case 384: h = sha3.New384() default: t.Fatalf("Invalid security level [%d]", currentTestConfig.securityLevel) } default: t.Fatalf("Invalid hash family [%s]", currentTestConfig.hashFamily) } h.Write(b) h2 := h.Sum(nil) if !bytes.Equal(h1, h2) { t.Fatalf("Discrempancy found in HASH result [%x], [%x]!=[%x]", b, h1, h2) } } } func TestRSAKeyGenEphemeral(t *testing.T) { k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: true}) if err != nil { t.Fatalf("Failed generating RSA key [%s]", err) } if k == nil { t.Fatal("Failed generating RSA key. Key must be different from nil") } if !k.Private() { t.Fatal("Failed generating RSA key. Key should be private") } if k.Symmetric() { t.Fatal("Failed generating RSA key. Key should be asymmetric") } pk, err := k.PublicKey() if err != nil { t.Fatalf("Failed generating RSA corresponding public key [%s]", err) } if pk == nil { t.Fatal("PK must be different from nil") } b, err := k.Bytes() if err == nil { t.Fatal("Secret keys cannot be exported. It must fail in this case") } if len(b) != 0 { t.Fatal("Secret keys cannot be exported. It must be nil") } } func TestRSAPrivateKeySKI(t *testing.T) { k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating RSA key [%s]", err) } ski := k.SKI() if len(ski) == 0 { t.Fatal("SKI not valid. Zero length.") } } func TestRSAKeyGenNonEphemeral(t *testing.T) { k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating RSA key [%s]", err) } if k == nil { t.Fatal("Failed generating RSA key. Key must be different from nil") } if !k.Private() { t.Fatal("Failed generating RSA key. Key should be private") } if k.Symmetric() { t.Fatal("Failed generating RSA key. Key should be asymmetric") } } func TestRSAGetKeyBySKI(t *testing.T) { k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating RSA key [%s]", err) } k2, err := currentBCCSP.GetKey(k.SKI()) if err != nil { t.Fatalf("Failed getting RSA key [%s]", err) } if k2 == nil { t.Fatal("Failed getting RSA key. Key must be different from nil") } if !k2.Private() { t.Fatal("Failed getting RSA key. Key should be private") } if k2.Symmetric() { t.Fatal("Failed getting RSA key. Key should be asymmetric") } // Check that the SKIs are the same if !bytes.Equal(k.SKI(), k2.SKI()) { t.Fatalf("SKIs are different [%x]!=[%x]", k.SKI(), k2.SKI()) } } func TestRSAPublicKeyFromPrivateKey(t *testing.T) { k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating RSA key [%s]", err) } pk, err := k.PublicKey() if err != nil { t.Fatalf("Failed getting public key from private RSA key [%s]", err) } if pk == nil { t.Fatal("Failed getting public key from private RSA key. Key must be different from nil") } if pk.Private() { t.Fatal("Failed generating RSA key. Key should be public") } if pk.Symmetric() { t.Fatal("Failed generating RSA key. Key should be asymmetric") } } func TestRSAPublicKeyBytes(t *testing.T) { k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating RSA key [%s]", err) } pk, err := k.PublicKey() if err != nil { t.Fatalf("Failed getting public key from private RSA key [%s]", err) } raw, err := pk.Bytes() if err != nil { t.Fatalf("Failed marshalling RSA public key [%s]", err) } if len(raw) == 0 { t.Fatal("Failed marshalling RSA public key. Zero length") } } func TestRSAPublicKeySKI(t *testing.T) { k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating RSA key [%s]", err) } pk, err := k.PublicKey() if err != nil { t.Fatalf("Failed getting public key from private RSA key [%s]", err) } ski := pk.SKI() if len(ski) == 0 { t.Fatal("SKI not valid. Zero length.") } } func TestRSASign(t *testing.T) { k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating RSA key [%s]", err) } msg := []byte("Hello World") digest, err := currentBCCSP.Hash(msg, &bccsp.SHAOpts{}) if err != nil { t.Fatalf("Failed computing HASH [%s]", err) } signature, err := currentBCCSP.Sign(k, digest, &rsa.PSSOptions{SaltLength: 32, Hash: getCryptoHashIndex(t)}) if err != nil { t.Fatalf("Failed generating RSA signature [%s]", err) } if len(signature) == 0 { t.Fatal("Failed generating RSA key. Signature must be different from nil") } } func TestRSAVerify(t *testing.T) { k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating RSA key [%s]", err) } msg := []byte("Hello World") digest, err := currentBCCSP.Hash(msg, &bccsp.SHAOpts{}) if err != nil { t.Fatalf("Failed computing HASH [%s]", err) } signature, err := currentBCCSP.Sign(k, digest, &rsa.PSSOptions{SaltLength: 32, Hash: getCryptoHashIndex(t)}) if err != nil { t.Fatalf("Failed generating RSA signature [%s]", err) } valid, err := currentBCCSP.Verify(k, signature, digest, &rsa.PSSOptions{SaltLength: 32, Hash: getCryptoHashIndex(t)}) if err != nil { t.Fatalf("Failed verifying RSA signature [%s]", err) } if !valid { t.Fatal("Failed verifying RSA signature. Signature not valid.") } pk, err := k.PublicKey() if err != nil { t.Fatalf("Failed getting corresponding public key [%s]", err) } valid, err = currentBCCSP.Verify(pk, signature, digest, &rsa.PSSOptions{SaltLength: 32, Hash: getCryptoHashIndex(t)}) if err != nil { t.Fatalf("Failed verifying RSA signature [%s]", err) } if !valid { t.Fatal("Failed verifying RSA signature. Signature not valid.") } // Store public key err = currentKS.StoreKey(pk) if err != nil { t.Fatalf("Failed storing corresponding public key [%s]", err) } pk2, err := currentKS.GetKey(pk.SKI()) if err != nil { t.Fatalf("Failed retrieving corresponding public key [%s]", err) } valid, err = currentBCCSP.Verify(pk2, signature, digest, &rsa.PSSOptions{SaltLength: 32, Hash: getCryptoHashIndex(t)}) if err != nil { t.Fatalf("Failed verifying RSA signature [%s]", err) } if !valid { t.Fatal("Failed verifying RSA signature. Signature not valid.") } } func TestRSAKeyImportFromRSAPublicKey(t *testing.T) { // Generate an RSA key k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating RSA key [%s]", err) } // Export the public key pk, err := k.PublicKey() if err != nil { t.Fatalf("Failed getting RSA public key [%s]", err) } pkRaw, err := pk.Bytes() if err != nil { t.Fatalf("Failed getting RSA raw public key [%s]", err) } pub, err := utils.DERToPublicKey(pkRaw) if err != nil { t.Fatalf("Failed converting raw to RSA.PublicKey [%s]", err) } // Import the RSA.PublicKey pk2, err := currentBCCSP.KeyImport(pub, &bccsp.RSAGoPublicKeyImportOpts{Temporary: false}) if err != nil { t.Fatalf("Failed importing RSA public key [%s]", err) } if pk2 == nil { t.Fatal("Failed importing RSA public key. Return BCCSP key cannot be nil.") } // Sign and verify with the imported public key msg := []byte("Hello World") digest, err := currentBCCSP.Hash(msg, &bccsp.SHAOpts{}) if err != nil { t.Fatalf("Failed computing HASH [%s]", err) } signature, err := currentBCCSP.Sign(k, digest, &rsa.PSSOptions{SaltLength: 32, Hash: getCryptoHashIndex(t)}) if err != nil { t.Fatalf("Failed generating RSA signature [%s]", err) } valid, err := currentBCCSP.Verify(pk2, signature, digest, &rsa.PSSOptions{SaltLength: 32, Hash: getCryptoHashIndex(t)}) if err != nil { t.Fatalf("Failed verifying RSA signature [%s]", err) } if !valid { t.Fatal("Failed verifying RSA signature. Signature not valid.") } } func TestKeyImportFromX509RSAPublicKey(t *testing.T) { // Generate an RSA key k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating RSA key [%s]", err) } // Generate a self-signed certificate testExtKeyUsage := []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth} testUnknownExtKeyUsage := []asn1.ObjectIdentifier{[]int{1, 2, 3}, []int{2, 59, 1}} extraExtensionData := []byte("extra extension") commonName := "test.example.com" template := x509.Certificate{ SerialNumber: big.NewInt(1), Subject: pkix.Name{ CommonName: commonName, Organization: []string{"Σ Acme Co"}, Country: []string{"US"}, ExtraNames: []pkix.AttributeTypeAndValue{ { Type: []int{2, 5, 4, 42}, Value: "Gopher", }, // This should override the Country, above. { Type: []int{2, 5, 4, 6}, Value: "NL", }, }, }, NotBefore: time.Now().Add(-1 * time.Hour), NotAfter: time.Now().Add(1 * time.Hour), SignatureAlgorithm: x509.SHA256WithRSA, SubjectKeyId: []byte{1, 2, 3, 4}, KeyUsage: x509.KeyUsageCertSign, ExtKeyUsage: testExtKeyUsage, UnknownExtKeyUsage: testUnknownExtKeyUsage, BasicConstraintsValid: true, IsCA: true, OCSPServer: []string{"http://ocurrentBCCSP.example.com"}, IssuingCertificateURL: []string{"http://crt.example.com/ca1.crt"}, DNSNames: []string{"test.example.com"}, EmailAddresses: []string{"gopher@golang.org"}, IPAddresses: []net.IP{net.IPv4(127, 0, 0, 1).To4(), net.ParseIP("2001:4860:0:2001::68")}, PolicyIdentifiers: []asn1.ObjectIdentifier{[]int{1, 2, 3}}, PermittedDNSDomains: []string{".example.com", "example.com"}, CRLDistributionPoints: []string{"http://crl1.example.com/ca1.crl", "http://crl2.example.com/ca1.crl"}, ExtraExtensions: []pkix.Extension{ { Id: []int{1, 2, 3, 4}, Value: extraExtensionData, }, }, } cryptoSigner, err := signer.New(currentBCCSP, k) if err != nil { t.Fatalf("Failed initializing CyrptoSigner [%s]", err) } // Export the public key pk, err := k.PublicKey() if err != nil { t.Fatalf("Failed getting RSA public key [%s]", err) } pkRaw, err := pk.Bytes() if err != nil { t.Fatalf("Failed getting RSA raw public key [%s]", err) } pub, err := utils.DERToPublicKey(pkRaw) if err != nil { t.Fatalf("Failed converting raw to RSA.PublicKey [%s]", err) } certRaw, err := x509.CreateCertificate(rand.Reader, &template, &template, pub, cryptoSigner) if err != nil { t.Fatalf("Failed generating self-signed certificate [%s]", err) } cert, err := utils.DERToX509Certificate(certRaw) if err != nil { t.Fatalf("Failed generating X509 certificate object from raw [%s]", err) } // Import the certificate's public key pk2, err := currentBCCSP.KeyImport(cert, &bccsp.X509PublicKeyImportOpts{Temporary: false}) if err != nil { t.Fatalf("Failed importing RSA public key [%s]", err) } if pk2 == nil { t.Fatal("Failed importing RSA public key. Return BCCSP key cannot be nil.") } // Sign and verify with the imported public key msg := []byte("Hello World") digest, err := currentBCCSP.Hash(msg, &bccsp.SHAOpts{}) if err != nil { t.Fatalf("Failed computing HASH [%s]", err) } signature, err := currentBCCSP.Sign(k, digest, &rsa.PSSOptions{SaltLength: 32, Hash: getCryptoHashIndex(t)}) if err != nil { t.Fatalf("Failed generating RSA signature [%s]", err) } valid, err := currentBCCSP.Verify(pk2, signature, digest, &rsa.PSSOptions{SaltLength: 32, Hash: getCryptoHashIndex(t)}) if err != nil { t.Fatalf("Failed verifying RSA signature [%s]", err) } if !valid { t.Fatal("Failed verifying RSA signature. Signature not valid.") } } func getCryptoHashIndex(t *testing.T) crypto.Hash { switch currentTestConfig.hashFamily { case "SHA2": switch currentTestConfig.securityLevel { case 256: return crypto.SHA256 case 384: return crypto.SHA384 default: t.Fatalf("Invalid security level [%d]", currentTestConfig.securityLevel) } case "SHA3": switch currentTestConfig.securityLevel { case 256: return crypto.SHA3_256 case 384: return crypto.SHA3_384 default: t.Fatalf("Invalid security level [%d]", currentTestConfig.securityLevel) } default: t.Fatalf("Invalid hash family [%s]", currentTestConfig.hashFamily) } return crypto.SHA3_256 }
[ "\"PKCS11_LIB\"", "\"PKCS11_PIN\"", "\"PKCS11_LABEL\"" ]
[]
[ "PKCS11_PIN", "PKCS11_LIB", "PKCS11_LABEL" ]
[]
["PKCS11_PIN", "PKCS11_LIB", "PKCS11_LABEL"]
go
3
0
mwdust/Combined19.py
############################################################################### # # Combined19: extinction model obtained from a combination of Marshall et al. # (2006), Green et al. (2019), and Drimmel et al. (2003) # ############################################################################### import os, os.path import numpy import h5py from mwdust.HierarchicalHealpixMap import HierarchicalHealpixMap _DEGTORAD= numpy.pi/180. _combineddir= os.path.join(os.getenv('DUST_DIR'),'combined19') class Combined19(HierarchicalHealpixMap): """extinction model obtained from a combination of Marshall et al. (2006), Green et al. (2019), and Drimmel et al. (2003)""" def __init__(self,filter=None,sf10=True,load_samples=False, interpk=1): """ NAME: __init__ PURPOSE: Initialize the combined dust map INPUT: filter= filter to return the extinction in sf10= (True) if True, use the Schlafly & Finkbeiner calibrations interpk= (1) interpolation order OUTPUT: object HISTORY: 2015-07-28 - Started - Bovy (UofT) 2019-09-12 - Updated - Rybizki (MPIA) """ HierarchicalHealpixMap.__init__(self,filter=filter,sf10=sf10) #Read the map with h5py.File(os.path.join(_combineddir,'combine19.h5'),'r') \ as combineddata: self._pix_info= combineddata['/pixel_info'][:] self._best_fit= combineddata['/best_fit'][:] # Utilities self._distmods= numpy.linspace(4,18.875,120) self._minnside= numpy.amin(self._pix_info['nside']) self._maxnside= numpy.amax(self._pix_info['nside']) nlevels= int(numpy.log2(self._maxnside//self._minnside))+1 self._nsides= [self._maxnside//2**ii for ii in range(nlevels)] self._indexArray= numpy.arange(len(self._pix_info['healpix_index'])) # For the interpolation self._intps= numpy.zeros(len(self._pix_info['healpix_index']), dtype='object') #array to cache interpolated extinctions self._interpk= interpk return None
[]
[]
[ "DUST_DIR" ]
[]
["DUST_DIR"]
python
1
0
substrabac/substrabac/wsgi.py
""" WSGI config for substrabac project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "substrabac.settings.prod") application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
config.py
import os from dotenv import load_dotenv basedir = os.path.abspath(os.path.dirname(__file__)) load_dotenv(os.path.join(basedir, '.env')) class Config(object): SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess' SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL', '').replace( 'postgres://', 'postgresql://') or \ 'sqlite:///' + os.path.join(basedir, 'app.db') SQLALCHEMY_TRACK_MODIFICATIONS = False LOG_TO_STDOUT = os.environ.get('LOG_TO_STDOUT') MAIL_SERVER = os.environ.get('MAIL_SERVER') MAIL_PORT = int(os.environ.get('MAIL_PORT') or 25) MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS') is not None MAIL_USERNAME = os.environ.get('MAIL_USERNAME') MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD') ADMINS = ['gwiese@tpg.com.au'] LANGUAGES = ['en', 'es', 'de'] MS_TRANSLATOR_KEY = os.environ.get('MS_TRANSLATOR_KEY') ELASTICSEARCH_URL = os.environ.get('ELASTICSEARCH_URL') POSTS_PER_PAGE = 10
[]
[]
[ "MAIL_SERVER", "MAIL_PASSWORD", "DATABASE_URL", "MAIL_PORT", "SECRET_KEY", "LOG_TO_STDOUT", "MAIL_USERNAME", "MAIL_USE_TLS", "MS_TRANSLATOR_KEY", "ELASTICSEARCH_URL" ]
[]
["MAIL_SERVER", "MAIL_PASSWORD", "DATABASE_URL", "MAIL_PORT", "SECRET_KEY", "LOG_TO_STDOUT", "MAIL_USERNAME", "MAIL_USE_TLS", "MS_TRANSLATOR_KEY", "ELASTICSEARCH_URL"]
python
10
0
send.go
package main import ( "fmt" "log" "math/rand" "os" "strconv" "github.com/streadway/amqp" ) // failOnError is a helper function, so I can type if err != nil less. func failOnError(err error, msg string) { if err != nil { log.Fatalf("%s: %s", msg, err) } } func main() { // Grab parameters username := os.Getenv("RABBITMQ_USERNAME") if username == "" { log.Fatalf("No Username env variable set") } password := os.Getenv("RABBITMQ_PASSWORD") if password == "" { log.Fatalf("No password env variable set") } service := os.Getenv("ROO_TEST_SERVICE_HOST") if service == "" { log.Fatalf("No service env variable set") } connection := fmt.Sprintf("amqp://%s:%s@%s", username, password, service) // Connect to RabbitMQ. conn, err := amqp.Dial(connection) failOnError(err, "Failed to connect to RabbitMQ") defer conn.Close() // Create a channel to interact with RabbitMQ. ch, err := conn.Channel() failOnError(err, "Failed to open a channel") defer ch.Close() // Declare the queue we're sending to - this is idempotent. q, err := ch.QueueDeclare( "hello", // name false, // durable false, // delete when unused false, // exclusive false, // no-wait nil, // arguments ) failOnError(err, "Failed to declare a queue") // Create some messages. for i := 0; i < 10; i++ { body := strconv.Itoa(rand.Intn(1000)) err = ch.Publish( "", // exchange q.Name, // routing key false, // mandatory false, // immediate amqp.Publishing{ ContentType: "text/plain", Body: []byte(body), }) failOnError(err, "Failed to publish a message") log.Printf("Published Message: %s", body) } }
[ "\"RABBITMQ_USERNAME\"", "\"RABBITMQ_PASSWORD\"", "\"ROO_TEST_SERVICE_HOST\"" ]
[]
[ "ROO_TEST_SERVICE_HOST", "RABBITMQ_USERNAME", "RABBITMQ_PASSWORD" ]
[]
["ROO_TEST_SERVICE_HOST", "RABBITMQ_USERNAME", "RABBITMQ_PASSWORD"]
go
3
0
server.go
// Package server implements a HTTP(S) server for kites. package kite import ( "crypto/tls" "fmt" "io/ioutil" "net" "net/http" "os" "strconv" "strings" "sync" ) // Run is a blocking method. It runs the kite server and then accepts requests // asynchronously. It supports graceful restart via SIGUSR2. func (k *Kite) Run() { if os.Getenv("KITE_VERSION") != "" { fmt.Println(k.Kite().Version) os.Exit(0) } // An error string equivalent to net.errClosing for using with http.Serve() // during a graceful exit. Needed to declare here again because it is not // exported by "net" package. const errClosing = "use of closed network connection" err := k.listenAndServe() if err != nil { if strings.Contains(err.Error(), errClosing) { // The server is closed by Close() method k.Log.Info("Kite server is closed.") return } k.Log.Fatal(err.Error()) } } // Close stops the server and the kontrol client instance. func (k *Kite) Close() { k.Log.Info("Closing kite...") k.kontrol.Lock() if k.kontrol != nil && k.kontrol.Client != nil { k.kontrol.Close() } k.kontrol.Unlock() if k.listener != nil { k.listener.Close() k.listener = nil } k.mu.Lock() cache := k.verifyCache k.mu.Unlock() if cache != nil { cache.StopGC() } } func (k *Kite) Addr() string { return net.JoinHostPort(k.Config.IP, strconv.Itoa(k.Config.Port)) } // listenAndServe listens on the TCP network address k.URL.Host and then // calls Serve to handle requests on incoming connectionk. func (k *Kite) listenAndServe() error { // create a new one if there doesn't exist l, err := net.Listen("tcp4", k.Addr()) if err != nil { return err } k.Log.Info("New listening: %s", l.Addr()) if k.TLSConfig != nil { if k.TLSConfig.NextProtos == nil { k.TLSConfig.NextProtos = []string{"http/1.1"} } l = tls.NewListener(l, k.TLSConfig) } k.listener = newGracefulListener(l) // listener is ready, notify waiters. close(k.readyC) defer close(k.closeC) // serving is finished, notify waiters. k.Log.Info("Serving...") return k.serve(k.listener, k) } func (k *Kite) serve(l net.Listener, h http.Handler) error { if k.Config.Serve != nil { return k.Config.Serve(l, h) } return http.Serve(l, h) } // Port returns the TCP port number that the kite listens. // Port must be called after the listener is initialized. // You can use ServerReadyNotify function to get notified when listener is ready. // // Kite starts to listen the port when Run() is called. // Since Run() is blocking you need to run it as a goroutine the call this function when listener is ready. // // Example: // // k := kite.New("x", "1.0.0") // go k.Run() // <-k.ServerReadyNotify() // port := k.Port() // func (k *Kite) Port() int { if k.listener == nil { return 0 } return k.listener.Addr().(*net.TCPAddr).Port } func (k *Kite) UseTLS(certPEM, keyPEM string) { if k.TLSConfig == nil { k.TLSConfig = &tls.Config{} } cert, err := tls.X509KeyPair([]byte(certPEM), []byte(keyPEM)) if err != nil { panic(err) } k.TLSConfig.Certificates = append(k.TLSConfig.Certificates, cert) } func (k *Kite) UseTLSFile(certFile, keyFile string) { certData, err := ioutil.ReadFile(certFile) if err != nil { k.Log.Fatal("Cannot read certificate file: %s", err.Error()) } keyData, err := ioutil.ReadFile(keyFile) if err != nil { k.Log.Fatal("Cannot read certificate file: %s", err.Error()) } k.UseTLS(string(certData), string(keyData)) } func (k *Kite) ServerCloseNotify() chan bool { return k.closeC } func (k *Kite) ServerReadyNotify() chan bool { return k.readyC } // gracefulListener closes all accepted connections upon Close to ensure // no dangling websocket/xhr sessions outlive the kite. type gracefulListener struct { net.Listener conns map[net.Conn]struct{} connsMu sync.Mutex } func newGracefulListener(l net.Listener) *gracefulListener { return &gracefulListener{ Listener: l, conns: make(map[net.Conn]struct{}), } } func (l *gracefulListener) Accept() (net.Conn, error) { conn, err := l.Listener.Accept() if err != nil { return nil, err } l.connsMu.Lock() l.conns[conn] = struct{}{} l.connsMu.Unlock() return &gracefulConn{ Conn: conn, close: func() { l.connsMu.Lock() delete(l.conns, conn) l.connsMu.Unlock() }, }, nil } func (l *gracefulListener) Close() error { err := l.Listener.Close() l.connsMu.Lock() for conn := range l.conns { conn.Close() } l.conns = nil l.connsMu.Unlock() return err } type gracefulConn struct { net.Conn close func() } func (c *gracefulConn) Close() error { c.close() return c.Conn.Close() }
[ "\"KITE_VERSION\"" ]
[]
[ "KITE_VERSION" ]
[]
["KITE_VERSION"]
go
1
0
services/common/golib/sns.go
package golib import ( "fmt" "os" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/sns" "github.com/sirupsen/logrus" ) // SNSPublishMsg publishes a message to an SNS topic func SNSPublishMsg(ctx aws.Context, snsSvc *sns.SNS, input string, topicArn *string) error { if input == "" { return fmt.Errorf("input can't be empty") } logrus.Debug("sns input to publish: ", input) var err error if os.Getenv("LOCAL") == "true" { return nil // publishing encrypted messages to SNS doesn't work from SAM+CDK, I suspect that SAM uses an incomplete name // CDK, auto generates a name, but those names aren't used locally // e.g. wm-dev-DynamoDBtableF8E87752-HSV525WR7KN3 is the name of the ddb in the cloud // locally it the name it knows is wm-dev-DynamoDBtableF8E87752 the last bit is missing // the same is gonna be the problem for the KMS key, and I don't know how or if I can pass in the complete key // _, err = snsSvc.Publish(&sns.PublishInput{ // Message: aws.String(input), // TopicArn: topicArn, // }) } else { _, err = snsSvc.PublishWithContext(ctx, &sns.PublishInput{ Message: aws.String(input), TopicArn: topicArn, }) } if err != nil { return fmt.Errorf("failed publishing a message to sns: %v", err) } logrus.Debug("message successfully sent to topic") return nil } /* func SNSPublishMsg2(client *sns2.Client, input string, topicArn *string) error { if input == "" { return fmt.Errorf("combatlog_uuid can't be empty") } logrus.Debug("sns2 input to publish: ", input) _, err := client.Publish(context.TODO(), &sns2.PublishInput{ Message: aws2.String(input), TopicArn: topicArn, }) if err != nil { return err } logrus.Debug("message successfully sent to topic") return nil } */
[ "\"LOCAL\"" ]
[]
[ "LOCAL" ]
[]
["LOCAL"]
go
1
0
vendor/github.com/vmware/govmomi/govc/flags/output.go
/* Copyright (c) 2014-2016 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package flags import ( "context" "encoding/json" "errors" "flag" "fmt" "io" "os" "reflect" "sync" "time" "github.com/kr/pretty" "github.com/vmware/govmomi/task" "github.com/vmware/govmomi/vim25/progress" "github.com/vmware/govmomi/vim25/soap" "github.com/vmware/govmomi/vim25/xml" ) type OutputWriter interface { Write(io.Writer) error } type OutputFlag struct { common JSON bool XML bool TTY bool Dump bool Out io.Writer formatError bool formatIndent bool } var outputFlagKey = flagKey("output") func NewOutputFlag(ctx context.Context) (*OutputFlag, context.Context) { if v := ctx.Value(outputFlagKey); v != nil { return v.(*OutputFlag), ctx } v := &OutputFlag{Out: os.Stdout} ctx = context.WithValue(ctx, outputFlagKey, v) return v, ctx } func (flag *OutputFlag) Register(ctx context.Context, f *flag.FlagSet) { flag.RegisterOnce(func() { f.BoolVar(&flag.JSON, "json", false, "Enable JSON output") f.BoolVar(&flag.XML, "xml", false, "Enable XML output") f.BoolVar(&flag.Dump, "dump", false, "Enable Go output") // Avoid adding more flags for now.. flag.formatIndent = os.Getenv("GOVC_INDENT") != "false" // Default to indented output flag.formatError = os.Getenv("GOVC_FORMAT_ERROR") != "false" // Default to formatted errors }) } func (flag *OutputFlag) Process(ctx context.Context) error { return flag.ProcessOnce(func() error { if !flag.All() { // Assume we have a tty if not outputting JSON flag.TTY = true } return nil }) } // Log outputs the specified string, prefixed with the current time. // A newline is not automatically added. If the specified string // starts with a '\r', the current line is cleared first. func (flag *OutputFlag) Log(s string) (int, error) { if len(s) > 0 && s[0] == '\r' { flag.Write([]byte{'\r', 033, '[', 'K'}) s = s[1:] } return flag.WriteString(time.Now().Format("[02-01-06 15:04:05] ") + s) } func (flag *OutputFlag) Write(b []byte) (int, error) { if !flag.TTY { return 0, nil } n, err := os.Stdout.Write(b) os.Stdout.Sync() return n, err } func (flag *OutputFlag) WriteString(s string) (int, error) { return flag.Write([]byte(s)) } func (flag *OutputFlag) All() bool { return flag.JSON || flag.XML || flag.Dump } func dumpValue(val interface{}) interface{} { type dumper interface { Dump() interface{} } if d, ok := val.(dumper); ok { return d.Dump() } rval := reflect.ValueOf(val) if rval.Type().Kind() != reflect.Ptr { return val } rval = rval.Elem() if rval.Type().Kind() == reflect.Struct { f := rval.Field(0) if f.Type().Kind() == reflect.Slice { // common case for the various 'type infoResult' if f.Len() == 1 { return f.Index(0).Interface() } return f.Interface() } if rval.NumField() == 1 && rval.Type().Field(0).Anonymous { // common case where govc type wraps govmomi type to implement OutputWriter return f.Interface() } } return val } func (flag *OutputFlag) WriteResult(result OutputWriter) error { var err error switch { case flag.Dump: format := "%#v\n" if flag.formatIndent { format = "%# v\n" } _, err = pretty.Fprintf(flag.Out, format, dumpValue(result)) case flag.JSON: e := json.NewEncoder(flag.Out) if flag.formatIndent { e.SetIndent("", " ") } err = e.Encode(result) case flag.XML: e := xml.NewEncoder(flag.Out) if flag.formatIndent { e.Indent("", " ") } err = e.Encode(dumpValue(result)) if err == nil { fmt.Fprintln(flag.Out) } default: err = result.Write(flag.Out) } return err } func (flag *OutputFlag) WriteError(err error) bool { if flag.formatError { flag.Out = os.Stderr return flag.WriteResult(&errorOutput{err}) == nil } return false } type errorOutput struct { error } func (e errorOutput) Write(w io.Writer) error { _, ferr := fmt.Fprintf(w, "%s: %s\n", os.Args[0], e.error) return ferr } func (e errorOutput) Dump() interface{} { if f, ok := e.error.(task.Error); ok { return f.LocalizedMethodFault } if soap.IsSoapFault(e.error) { return soap.ToSoapFault(e.error) } if soap.IsVimFault(e.error) { return soap.ToVimFault(e.error) } return e } func (e errorOutput) canEncode() bool { switch e.error.(type) { case task.Error: return true } return soap.IsSoapFault(e.error) || soap.IsVimFault(e.error) } // cannotEncode causes cli.Run to output err.Error() as it would without an error format specified var cannotEncode = errors.New("cannot encode error") func (e errorOutput) MarshalJSON() ([]byte, error) { _, ok := e.error.(json.Marshaler) if ok || e.canEncode() { return json.Marshal(e.error) } return nil, cannotEncode } func (e errorOutput) MarshalXML(encoder *xml.Encoder, start xml.StartElement) error { _, ok := e.error.(xml.Marshaler) if ok || e.canEncode() { return encoder.Encode(e.error) } return cannotEncode } type progressLogger struct { flag *OutputFlag prefix string wg sync.WaitGroup sink chan chan progress.Report done chan struct{} } func newProgressLogger(flag *OutputFlag, prefix string) *progressLogger { p := &progressLogger{ flag: flag, prefix: prefix, sink: make(chan chan progress.Report), done: make(chan struct{}), } p.wg.Add(1) go p.loopA() return p } // loopA runs before Sink() has been called. func (p *progressLogger) loopA() { var err error defer p.wg.Done() tick := time.NewTicker(100 * time.Millisecond) defer tick.Stop() called := false for stop := false; !stop; { select { case ch := <-p.sink: err = p.loopB(tick, ch) stop = true called = true case <-p.done: stop = true case <-tick.C: line := fmt.Sprintf("\r%s", p.prefix) p.flag.Log(line) } } if err != nil && err != io.EOF { p.flag.Log(fmt.Sprintf("\r%sError: %s\n", p.prefix, err)) } else if called { p.flag.Log(fmt.Sprintf("\r%sOK\n", p.prefix)) } } // loopA runs after Sink() has been called. func (p *progressLogger) loopB(tick *time.Ticker, ch <-chan progress.Report) error { var r progress.Report var ok bool var err error for ok = true; ok; { select { case r, ok = <-ch: if !ok { break } err = r.Error() case <-tick.C: line := fmt.Sprintf("\r%s", p.prefix) if r != nil { line += fmt.Sprintf("(%.0f%%", r.Percentage()) detail := r.Detail() if detail != "" { line += fmt.Sprintf(", %s", detail) } line += ")" } p.flag.Log(line) } } return err } func (p *progressLogger) Sink() chan<- progress.Report { ch := make(chan progress.Report) p.sink <- ch return ch } func (p *progressLogger) Wait() { close(p.done) p.wg.Wait() } func (flag *OutputFlag) ProgressLogger(prefix string) *progressLogger { return newProgressLogger(flag, prefix) }
[ "\"GOVC_INDENT\"", "\"GOVC_FORMAT_ERROR\"" ]
[]
[ "GOVC_INDENT", "GOVC_FORMAT_ERROR" ]
[]
["GOVC_INDENT", "GOVC_FORMAT_ERROR"]
go
2
0
collector/main.go
// Copyright 2017 Daniel Erat <dan@erat.org> // All rights reserved. // Package main implements a daemon that collects local data and sends it to the App Engine server. package main import ( "flag" "fmt" "log" "os" "path/filepath" ) func main() { var configPath string flag.Usage = func() { fmt.Fprintf(os.Stderr, "Usage: %s [option]...\n\nOptions:\n", os.Args[0]) flag.PrintDefaults() } flag.StringVar(&configPath, "config", filepath.Join(os.Getenv("HOME"), ".home_collector.json"), "Path to JSON config file") flag.Parse() // TODO: Log to syslog instead using log/syslog: // syslog.NewLogger(syslog.LOG_INFO|syslog.LOG_DAEMON, log.LstdFlags) logger := log.New(os.Stderr, "", log.LstdFlags) cfg, err := readConfig(configPath, logger) if err != nil { logger.Fatalf("Unable to read config from %v: %v", configPath, err) } r := newReporter(cfg) r.start() if cfg.PingHost != "" { go runPingLoop(cfg, r) } if cfg.PowerCommand != "" { go runPowerLoop(cfg, r) } l := &listener{cfg: cfg, rep: r} if err = l.run(); err != nil { logger.Fatalf("Got error while serving: %v", err) } }
[ "\"HOME\"" ]
[]
[ "HOME" ]
[]
["HOME"]
go
1
0
mantle/cmd/ore/gcloud/create-image.go
// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gcloud import ( "context" "fmt" "net/url" "os" "path" "strings" "github.com/spf13/cobra" "google.golang.org/api/storage/v1" "github.com/coreos/mantle/platform/api/gcloud" ) var ( cmdCreateImage = &cobra.Command{ Use: "create-image", Short: "Create GCE image", Long: "Create GCE image from an existing file in Google Storage", Run: runCreateImage, } createImageFamily string createImageBoard string createImageVersion string createImageRoot string createImageName string createImageForce bool ) func init() { user := os.Getenv("USER") cmdCreateImage.Flags().StringVar(&createImageFamily, "family", user, "GCE image group and name prefix") cmdCreateImage.Flags().StringVar(&createImageBoard, "board", "amd64-usr", "OS board name") cmdCreateImage.Flags().StringVar(&createImageVersion, "version", "", "OS build version") cmdCreateImage.Flags().StringVar(&createImageRoot, "source-root", "gs://users.developer.core-os.net/"+user+"/boards", "Storage URL prefix") cmdCreateImage.Flags().StringVar(&createImageName, "source-name", "coreos_production_gce.tar.gz", "Storage image name") cmdCreateImage.Flags().BoolVar(&createImageForce, "force", false, "overwrite existing GCE images without prompt") GCloud.AddCommand(cmdCreateImage) } func runCreateImage(cmd *cobra.Command, args []string) { if len(args) != 0 { fmt.Fprintf(os.Stderr, "Unrecognized args: %v\n", args) os.Exit(2) } if createImageVersion == "" { fmt.Fprintln(os.Stderr, "--version is required") os.Exit(2) } gsURL, err := url.Parse(createImageRoot) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } if gsURL.Scheme != "gs" { fmt.Fprintf(os.Stderr, "URL missing gs:// scheme: %v\n", createImageRoot) os.Exit(1) } if gsURL.Host == "" { fmt.Fprintf(os.Stderr, "URL missing bucket name %v\n", createImageRoot) os.Exit(1) } bucket := gsURL.Host imageNameGS := strings.TrimPrefix(path.Join(gsURL.Path, createImageBoard, createImageVersion, createImageName), "/") imageNameGCE := gceSanitize(createImageFamily + "-" + createImageVersion) ctx := context.Background() storageAPI, err := storage.NewService(ctx) if err != nil { fmt.Fprintf(os.Stderr, "Storage client failed: %v\n", err) os.Exit(1) } // check if this file actually exists if ok, err := fileQuery(storageAPI, bucket, imageNameGS); err != nil { fmt.Fprintf(os.Stderr, "Checking source image %s failed: %v\n", gsURL, err) os.Exit(1) } else if !ok { fmt.Fprintf(os.Stderr, "Source image %s does not exist\n", gsURL) os.Exit(1) } fmt.Printf("Creating image in GCE: %v...\n", imageNameGCE) // create image on gce storageSrc := fmt.Sprintf("https://storage.googleapis.com/%v/%v", bucket, imageNameGS) _, pending, err := api.CreateImage(&gcloud.ImageSpec{ Name: imageNameGCE, SourceImage: storageSrc, }, createImageForce) if err == nil { err = pending.Wait() } if err != nil { fmt.Fprintf(os.Stderr, "Creating GCE image failed: %v\n", err) os.Exit(1) } }
[ "\"USER\"" ]
[]
[ "USER" ]
[]
["USER"]
go
1
0
enterprise/dev/ci/internal/ci/pipeline.go
// Package ci is responsible for generating a Buildkite pipeline configuration. It is invoked by the // gen-pipeline.go command. package ci import ( "context" "fmt" "net/http" "os" "strconv" "strings" "time" "github.com/google/go-github/v41/github" "github.com/slack-go/slack" "github.com/sourcegraph/sourcegraph/dev/ci/runtype" "github.com/sourcegraph/sourcegraph/dev/team" "github.com/sourcegraph/sourcegraph/enterprise/dev/ci/images" bk "github.com/sourcegraph/sourcegraph/enterprise/dev/ci/internal/buildkite" "github.com/sourcegraph/sourcegraph/enterprise/dev/ci/internal/ci/changed" "github.com/sourcegraph/sourcegraph/enterprise/dev/ci/internal/ci/operations" "github.com/sourcegraph/sourcegraph/lib/errors" ) // GeneratePipeline is the main pipeline generation function. It defines the build pipeline for each of the // main CI cases, which are defined in the main switch statement in the function. func GeneratePipeline(c Config) (*bk.Pipeline, error) { if err := c.ensureCommit(); err != nil { return nil, err } // Common build env env := map[string]string{ // Build meta "BUILDKITE_PULL_REQUEST": os.Getenv("BUILDKITE_PULL_REQUEST"), "BUILDKITE_PULL_REQUEST_BASE_BRANCH": os.Getenv("BUILDKITE_PULL_REQUEST_BASE_BRANCH"), "BUILDKITE_PULL_REQUEST_REPO": os.Getenv("BUILDKITE_PULL_REQUEST_REPO"), "COMMIT_SHA": c.Commit, "DATE": c.Time.Format(time.RFC3339), "VERSION": c.Version, // Go flags "GO111MODULE": "on", // Additional flags "FORCE_COLOR": "3", "ENTERPRISE": "1", // Add debug flags for scripts to consume "CI_DEBUG_PROFILE": strconv.FormatBool(c.MessageFlags.ProfilingEnabled), // Bump Node.js memory to prevent OOM crashes "NODE_OPTIONS": "--max_old_space_size=8192", // Bundlesize configuration: https://github.com/siddharthkp/bundlesize2#build-status-and-checks-for-github "CI_REPO_OWNER": "sourcegraph", "CI_REPO_NAME": "sourcegraph", "CI_COMMIT_SHA": os.Getenv("BUILDKITE_COMMIT"), // $ in commit messages must be escaped to not attempt interpolation which will fail. "CI_COMMIT_MESSAGE": strings.ReplaceAll(os.Getenv("BUILDKITE_MESSAGE"), "$", "$$"), // HoneyComb dataset that stores build traces. "CI_BUILDEVENT_DATASET": "buildkite", } bk.FeatureFlags.ApplyEnv(env) // On release branches Percy must compare to the previous commit of the release branch, not main. if c.RunType.Is(runtype.ReleaseBranch) { env["PERCY_TARGET_BRANCH"] = c.Branch } // Build options for pipeline operations that spawn more build steps buildOptions := bk.BuildOptions{ Message: os.Getenv("BUILDKITE_MESSAGE"), Commit: c.Commit, Branch: c.Branch, Env: env, } // Test upgrades from mininum upgradeable Sourcegraph version - updated by release tool const minimumUpgradeableVersion = "3.39.0" // Set up operations that add steps to a pipeline. ops := operations.NewSet() // This statement outlines the pipeline steps for each CI case. // // PERF: Try to order steps such that slower steps are first. switch c.RunType { case runtype.PullRequest: if c.Diff.Has(changed.Client) { // triggers a slow pipeline, currently only affects web. It's optional so we // set it up separately from CoreTestOperations ops.Merge(operations.NewNamedSet(operations.PipelineSetupSetName, triggerAsync(buildOptions))) // Do not create client PR preview if Go or GraphQL is changed to avoid confusing // preview behavior, because only Client code is used to deploy application preview. if !c.Diff.Has(changed.Go) && !c.Diff.Has(changed.GraphQL) { ops.Append(prPreview()) } } ops.Merge(CoreTestOperations(c.Diff, CoreTestOperationsOptions{ MinimumUpgradeableVersion: minimumUpgradeableVersion, ClientLintOnlyChangedFiles: c.RunType.Is(runtype.PullRequest), })) case runtype.ReleaseNightly: ops.Append(triggerReleaseBranchHealthchecks(minimumUpgradeableVersion)) case runtype.BackendIntegrationTests: ops.Append( buildCandidateDockerImage("server", c.Version, c.candidateImageTag()), backendIntegrationTests(c.candidateImageTag())) // always include very backend-oriented changes in this set of tests testDiff := c.Diff | changed.DatabaseSchema | changed.Go ops.Merge(CoreTestOperations( testDiff, CoreTestOperationsOptions{MinimumUpgradeableVersion: minimumUpgradeableVersion}, )) case runtype.BextReleaseBranch: // If this is a browser extension release branch, run the browser-extension tests and // builds. ops = operations.NewSet( addClientLintersForAllFiles, addBrowserExtensionUnitTests, addBrowserExtensionIntegrationTests(0), // we pass 0 here as we don't have other pipeline steps to contribute to the resulting Percy build frontendTests, wait, addBrowserExtensionReleaseSteps) case runtype.BextNightly: // If this is a browser extension nightly build, run the browser-extension tests and // e2e tests. ops = operations.NewSet( addClientLintersForAllFiles, addBrowserExtensionUnitTests, recordBrowserExtensionIntegrationTests, frontendTests, wait, addBrowserExtensionE2ESteps) case runtype.ImagePatch: // only build image for the specified image in the branch name // see https://handbook.sourcegraph.com/engineering/deployments#building-docker-images-for-a-specific-branch patchImage, err := c.RunType.Matcher().ExtractBranchArgument(c.Branch) if err != nil { panic(fmt.Sprintf("ExtractBranchArgument: %s", err)) } if !contains(images.SourcegraphDockerImages, patchImage) { panic(fmt.Sprintf("no image %q found", patchImage)) } ops = operations.NewSet( buildCandidateDockerImage(patchImage, c.Version, c.candidateImageTag()), trivyScanCandidateImage(patchImage, c.candidateImageTag())) // Test images ops.Merge(CoreTestOperations(changed.All, CoreTestOperationsOptions{MinimumUpgradeableVersion: minimumUpgradeableVersion})) // Publish images after everything is done ops.Append( wait, publishFinalDockerImage(c, patchImage)) case runtype.ImagePatchNoTest: // If this is a no-test branch, then run only the Docker build. No tests are run. patchImage, err := c.RunType.Matcher().ExtractBranchArgument(c.Branch) if err != nil { panic(fmt.Sprintf("ExtractBranchArgument: %s", err)) } if !contains(images.SourcegraphDockerImages, patchImage) { panic(fmt.Sprintf("no image %q found", patchImage)) } ops = operations.NewSet( buildCandidateDockerImage(patchImage, c.Version, c.candidateImageTag()), wait, publishFinalDockerImage(c, patchImage)) case runtype.CandidatesNoTest: for _, dockerImage := range images.SourcegraphDockerImages { ops.Append( buildCandidateDockerImage(dockerImage, c.Version, c.candidateImageTag())) } case runtype.ExecutorPatchNoTest: ops = operations.NewSet( buildExecutor(c.Version, c.MessageFlags.SkipHashCompare), publishExecutor(c.Version, c.MessageFlags.SkipHashCompare), buildExecutorDockerMirror(c.Version), publishExecutorDockerMirror(c.Version)) default: // Slow async pipeline ops.Merge(operations.NewNamedSet(operations.PipelineSetupSetName, triggerAsync(buildOptions))) // Slow image builds imageBuildOps := operations.NewNamedSet("Image builds") for _, dockerImage := range images.SourcegraphDockerImages { imageBuildOps.Append(buildCandidateDockerImage(dockerImage, c.Version, c.candidateImageTag())) } // Executor VM image skipHashCompare := c.MessageFlags.SkipHashCompare || c.RunType.Is(runtype.ReleaseBranch) if c.RunType.Is(runtype.MainDryRun, runtype.MainBranch, runtype.ReleaseBranch) { imageBuildOps.Append(buildExecutor(c.Version, skipHashCompare)) if c.RunType.Is(runtype.ReleaseBranch) || c.Diff.Has(changed.ExecutorDockerRegistryMirror) { imageBuildOps.Append(buildExecutorDockerMirror(c.Version)) } } ops.Merge(imageBuildOps) // Trivy security scans imageScanOps := operations.NewNamedSet("Image security scans") for _, dockerImage := range images.SourcegraphDockerImages { imageScanOps.Append(trivyScanCandidateImage(dockerImage, c.candidateImageTag())) } ops.Merge(imageScanOps) // Core tests ops.Merge(CoreTestOperations(changed.All, CoreTestOperationsOptions{ ChromaticShouldAutoAccept: c.RunType.Is(runtype.MainBranch), MinimumUpgradeableVersion: minimumUpgradeableVersion, })) // Integration tests ops.Merge(operations.NewNamedSet("Integration tests", backendIntegrationTests(c.candidateImageTag()), codeIntelQA(c.candidateImageTag()), )) // End-to-end tests ops.Merge(operations.NewNamedSet("End-to-end tests", serverE2E(c.candidateImageTag()), serverQA(c.candidateImageTag()), clusterQA(c.candidateImageTag()), testUpgrade(c.candidateImageTag(), minimumUpgradeableVersion), )) // All operations before this point are required ops.Append(wait) // Add final artifacts publishOps := operations.NewNamedSet("Publish images") for _, dockerImage := range images.SourcegraphDockerImages { publishOps.Append(publishFinalDockerImage(c, dockerImage)) } // Executor VM image if c.RunType.Is(runtype.MainBranch, runtype.ReleaseBranch) { publishOps.Append(publishExecutor(c.Version, skipHashCompare)) if c.RunType.Is(runtype.ReleaseBranch) || c.Diff.Has(changed.ExecutorDockerRegistryMirror) { publishOps.Append(publishExecutorDockerMirror(c.Version)) } } ops.Merge(publishOps) } ops.Append( wait, // wait for all steps to pass uploadBuildeventTrace(), // upload the final buildevent trace if the build succeeded. ) // Construct pipeline pipeline := &bk.Pipeline{ Env: env, AfterEveryStepOpts: []bk.StepOpt{ withDefaultTimeout, withAgentQueueDefaults, withAgentLostRetries, }, } // Toggle profiling of each step if c.MessageFlags.ProfilingEnabled { pipeline.AfterEveryStepOpts = append(pipeline.AfterEveryStepOpts, withProfiling) } // Apply operations on pipeline ops.Apply(pipeline) // Validate generated pipeline have unique keys if err := pipeline.EnsureUniqueKeys(make(map[string]int)); err != nil { return nil, err } // Add a notify block if c.RunType.Is(runtype.MainBranch) { ctx := context.Background() // Slack client for retriving Slack profile data, not for making the request - for // more details, see the config.Notify docstring. slc := slack.New(c.Notify.SlackToken) // For now, we use an unauthenticated GitHub client because `sourcegraph/sourcegraph` // is a public repository. ghc := github.NewClient(http.DefaultClient) // Get teammate based on GitHub author of commit teammates := team.NewTeammateResolver(ghc, slc) tm, err := teammates.ResolveByCommitAuthor(ctx, "sourcegraph", "sourcegraph", c.Commit) if err != nil { pipeline.AddFailureSlackNotify(c.Notify.Channel, "", errors.Newf("failed to get Slack user: %w", err)) } else { pipeline.AddFailureSlackNotify(c.Notify.Channel, tm.SlackID, nil) } } return pipeline, nil } // withDefaultTimeout makes all command steps timeout after 60 minutes in case a buildkite // agent got stuck / died. func withDefaultTimeout(s *bk.Step) { // bk.Step is a union containing fields across all the different step types. // However, "timeout_in_minutes" only applies to the "command" step type. // // Testing the length of the "Command" field seems to be the most reliable way // of differentiating "command" steps from other step types without refactoring // everything. if len(s.Command) > 0 { if s.TimeoutInMinutes == "" { // Set the default value iff someone else hasn't set a custom one. s.TimeoutInMinutes = "60" } } } // withAgentQueueDefaults ensures all agents target a specific queue, and ensures they // steps are configured appropriately to run on the queue func withAgentQueueDefaults(s *bk.Step) { if len(s.Agents) == 0 || s.Agents["queue"] == "" { s.Agents["queue"] = bk.AgentQueueStateless } } // withProfiling wraps "time -v" around each command for CPU/RAM utilization information func withProfiling(s *bk.Step) { var prefixed []string for _, cmd := range s.Command { prefixed = append(prefixed, fmt.Sprintf("env time -v %s", cmd)) } s.Command = prefixed } // withAgentLostRetries insert automatic retries when the job has failed because it lost its agent. // // If the step has been marked as not retryable, the retry will be skipped. func withAgentLostRetries(s *bk.Step) { if s.Retry != nil && s.Retry.Manual != nil && !s.Retry.Manual.Allowed { return } if s.Retry == nil { s.Retry = &bk.RetryOptions{} } if s.Retry.Automatic == nil { s.Retry.Automatic = []bk.AutomaticRetryOptions{} } s.Retry.Automatic = append(s.Retry.Automatic, bk.AutomaticRetryOptions{ Limit: 1, ExitStatus: -1, }) }
[ "\"BUILDKITE_PULL_REQUEST\"", "\"BUILDKITE_PULL_REQUEST_BASE_BRANCH\"", "\"BUILDKITE_PULL_REQUEST_REPO\"", "\"BUILDKITE_COMMIT\"", "\"BUILDKITE_MESSAGE\"", "\"BUILDKITE_MESSAGE\"" ]
[]
[ "BUILDKITE_PULL_REQUEST", "BUILDKITE_COMMIT", "BUILDKITE_MESSAGE", "BUILDKITE_PULL_REQUEST_REPO", "BUILDKITE_PULL_REQUEST_BASE_BRANCH" ]
[]
["BUILDKITE_PULL_REQUEST", "BUILDKITE_COMMIT", "BUILDKITE_MESSAGE", "BUILDKITE_PULL_REQUEST_REPO", "BUILDKITE_PULL_REQUEST_BASE_BRANCH"]
go
5
0
cmd/cloudFoundryCreateService_generated.go
// Code generated by piper's step-generator. DO NOT EDIT. package cmd import ( "fmt" "os" "time" "github.com/SAP/jenkins-library/pkg/config" "github.com/SAP/jenkins-library/pkg/log" "github.com/SAP/jenkins-library/pkg/telemetry" "github.com/spf13/cobra" ) type cloudFoundryCreateServiceOptions struct { CfAPIEndpoint string `json:"cfApiEndpoint,omitempty"` Username string `json:"username,omitempty"` Password string `json:"password,omitempty"` CfOrg string `json:"cfOrg,omitempty"` CfSpace string `json:"cfSpace,omitempty"` CfService string `json:"cfService,omitempty"` CfServicePlan string `json:"cfServicePlan,omitempty"` CfServiceInstanceName string `json:"cfServiceInstanceName,omitempty"` CfServiceBroker string `json:"cfServiceBroker,omitempty"` CfCreateServiceConfig string `json:"cfCreateServiceConfig,omitempty"` CfServiceTags string `json:"cfServiceTags,omitempty"` ServiceManifest string `json:"serviceManifest,omitempty"` ManifestVariables []string `json:"manifestVariables,omitempty"` ManifestVariablesFiles []string `json:"manifestVariablesFiles,omitempty"` } // CloudFoundryCreateServiceCommand Creates one or multiple Services in Cloud Foundry func CloudFoundryCreateServiceCommand() *cobra.Command { const STEP_NAME = "cloudFoundryCreateService" metadata := cloudFoundryCreateServiceMetadata() var stepConfig cloudFoundryCreateServiceOptions var startTime time.Time var createCloudFoundryCreateServiceCmd = &cobra.Command{ Use: STEP_NAME, Short: "Creates one or multiple Services in Cloud Foundry", Long: `Creates one or multiple Cloud Foundry Services in Cloud Foundry Mandatory: * Cloud Foundry API endpoint, Organization, Space and user are available Please provide either of the following options: * If you chose to create a single Service the Service Instance Name, Service Plan and Service Broker of the Service to be created have to be available. You can set the optional ` + "`" + `cfCreateServiceConfig` + "`" + ` flag to configure the Service creation with your respective JSON configuration. The JSON configuration can either be an in-line JSON string or the path a dedicated JSON configuration file containing the JSON configuration. If you chose a dedicated config file, you must store the file in the same folder as your ` + "`" + `Jenkinsfile` + "`" + ` that starts the Pipeline in order for the Pipeline to be able to find the file. Most favourable SCM is Git. If you want the service to be created from a particular broker you can set the optional ` + "`" + `cfServiceBroker` + "`" + `flag. You can set user provided tags for the Service creation using a flat list as the value for the optional ` + "`" + `cfServiceTags` + "`" + ` flag. The optional ` + "`" + `cfServiceBroker` + "`" + ` flag can be used when the service name is ambiguous. * For creating one or multiple Cloud Foundry Services at once with the Cloud Foundry Create-Service-Push Plugin using the optional ` + "`" + `serviceManifest` + "`" + ` flag. If you chose to set this flag, the Create-Service-Push Plugin will be used for all Service creations in this step and you will need to provide a ` + "`" + `serviceManifest.yml` + "`" + ` file. In that case, above described flags and options will not be used for the Service creations, since you chose to use the Create-Service-Push Plugin. Please see below examples for more information on how to make use of the plugin with the appropriate step configuation. Additionally the Plugin provides the option to make use of variable substitution for the Service creations. You can find further information regarding the functionality of the Cloud Foundry Create-Service-Push Plugin in the respective documentation: [Cloud Foundry Create-Service-Push Plugin](https://github.com/dawu415/CF-CLI-Create-Service-Push-Plugin)`, PreRunE: func(cmd *cobra.Command, _ []string) error { startTime = time.Now() log.SetStepName(STEP_NAME) log.SetVerbose(GeneralConfig.Verbose) path, _ := os.Getwd() fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path} log.RegisterHook(fatalHook) err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile) if err != nil { log.SetErrorCategory(log.ErrorConfiguration) return err } log.RegisterSecret(stepConfig.Username) log.RegisterSecret(stepConfig.Password) if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 { sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID) log.RegisterHook(&sentryHook) } return nil }, Run: func(_ *cobra.Command, _ []string) { telemetryData := telemetry.CustomData{} telemetryData.ErrorCode = "1" handler := func() { config.RemoveVaultSecretFiles() telemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds()) telemetryData.ErrorCategory = log.GetErrorCategory().String() telemetry.Send(&telemetryData) } log.DeferExitHandler(handler) defer handler() telemetry.Initialize(GeneralConfig.NoTelemetry, STEP_NAME) cloudFoundryCreateService(stepConfig, &telemetryData) telemetryData.ErrorCode = "0" log.Entry().Info("SUCCESS") }, } addCloudFoundryCreateServiceFlags(createCloudFoundryCreateServiceCmd, &stepConfig) return createCloudFoundryCreateServiceCmd } func addCloudFoundryCreateServiceFlags(cmd *cobra.Command, stepConfig *cloudFoundryCreateServiceOptions) { cmd.Flags().StringVar(&stepConfig.CfAPIEndpoint, "cfApiEndpoint", `https://api.cf.eu10.hana.ondemand.com`, "Cloud Foundry API endpoint") cmd.Flags().StringVar(&stepConfig.Username, "username", os.Getenv("PIPER_username"), "User or E-Mail for CF") cmd.Flags().StringVar(&stepConfig.Password, "password", os.Getenv("PIPER_password"), "Password for Cloud Foundry User") cmd.Flags().StringVar(&stepConfig.CfOrg, "cfOrg", os.Getenv("PIPER_cfOrg"), "Cloud Foundry org") cmd.Flags().StringVar(&stepConfig.CfSpace, "cfSpace", os.Getenv("PIPER_cfSpace"), "Cloud Foundry Space") cmd.Flags().StringVar(&stepConfig.CfService, "cfService", os.Getenv("PIPER_cfService"), "Parameter for Cloud Foundry Service to be used for creating Cloud Foundry Service") cmd.Flags().StringVar(&stepConfig.CfServicePlan, "cfServicePlan", os.Getenv("PIPER_cfServicePlan"), "Parameter for Cloud Foundry Service Plan to be used when creating a Cloud Foundry Service") cmd.Flags().StringVar(&stepConfig.CfServiceInstanceName, "cfServiceInstanceName", os.Getenv("PIPER_cfServiceInstanceName"), "Parameter for naming the Service Instance when creating a Cloud Foundry Service") cmd.Flags().StringVar(&stepConfig.CfServiceBroker, "cfServiceBroker", os.Getenv("PIPER_cfServiceBroker"), "Parameter for Service Broker to be used when creating a Cloud Foundry Service") cmd.Flags().StringVar(&stepConfig.CfCreateServiceConfig, "cfCreateServiceConfig", os.Getenv("PIPER_cfCreateServiceConfig"), "Path to JSON file or JSON in-line string for a Cloud Foundry Service creation") cmd.Flags().StringVar(&stepConfig.CfServiceTags, "cfServiceTags", os.Getenv("PIPER_cfServiceTags"), "Flat list of Tags to be used when creating a Cloud Foundry Service in a single string") cmd.Flags().StringVar(&stepConfig.ServiceManifest, "serviceManifest", `service-manifest.yml`, "Path to Cloud Foundry Service Manifest in YAML format for multiple service creations that are being passed to a Create-Service-Push Cloud Foundry cli plugin") cmd.Flags().StringSliceVar(&stepConfig.ManifestVariables, "manifestVariables", []string{}, "Defines a List of variables as key-value Map objects used for variable substitution within the file given by the Manifest. Defaults to an empty list, if not specified otherwise. This can be used to set variables like it is provided by `cf push --var key=value`. The order of the maps of variables given in the list is relevant in case there are conflicting variable names and values between maps contained within the list. In case of conflicts, the last specified map in the list will win. Though each map entry in the list can contain more than one key-value pair for variable substitution, it is recommended to stick to one entry per map, and rather declare more maps within the list. The reason is that if a map in the list contains more than one key-value entry, and the entries are conflicting, the conflict resolution behavior is undefined (since map entries have no sequence). Variables defined via `manifestVariables` always win over conflicting variables defined via any file given by `manifestVariablesFiles` - no matter what is declared before. This is the same behavior as can be observed when using `cf push --var` in combination with `cf push --vars-file`") cmd.Flags().StringSliceVar(&stepConfig.ManifestVariablesFiles, "manifestVariablesFiles", []string{}, "Defines the manifest variables Yaml files to be used to replace variable references in manifest. This parameter is optional and will default to `manifest-variables.yml`. This can be used to set variable files like it is provided by `cf push --vars-file <file>`. If the manifest is present and so are all variable files, a variable substitution will be triggered that uses the `cfManifestSubstituteVariables` step before deployment. The format of variable references follows the Cloud Foundry standard in `https://docs.cloudfoundry.org/devguide/deploy-apps/manifest-attributes.html#variable-substitution`") cmd.MarkFlagRequired("cfApiEndpoint") cmd.MarkFlagRequired("username") cmd.MarkFlagRequired("password") cmd.MarkFlagRequired("cfOrg") cmd.MarkFlagRequired("cfSpace") } // retrieve step metadata func cloudFoundryCreateServiceMetadata() config.StepData { var theMetaData = config.StepData{ Metadata: config.StepMetadata{ Name: "cloudFoundryCreateService", Aliases: []config.Alias{}, Description: "Creates one or multiple Services in Cloud Foundry", }, Spec: config.StepSpec{ Inputs: config.StepInputs{ Parameters: []config.StepParameters{ { Name: "cfApiEndpoint", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"}, Type: "string", Mandatory: true, Aliases: []config.Alias{{Name: "cloudFoundry/apiEndpoint"}}, }, { Name: "username", ResourceRef: []config.ResourceReference{ { Name: "cfCredentialsId", Param: "username", Type: "secret", }, { Name: "", Paths: []string{"$(vaultPath)/cloudfoundry-$(org)-$(space)", "$(vaultBasePath)/$(vaultPipelineName)/cloudfoundry-$(org)-$(space)", "$(vaultBasePath)/GROUP-SECRETS/cloudfoundry-$(org)-$(space)"}, Type: "vaultSecret", }, }, Scope: []string{"PARAMETERS", "STAGES", "STEPS"}, Type: "string", Mandatory: true, Aliases: []config.Alias{}, }, { Name: "password", ResourceRef: []config.ResourceReference{ { Name: "cfCredentialsId", Param: "password", Type: "secret", }, { Name: "", Paths: []string{"$(vaultPath)/cloudfoundry-$(org)-$(space)", "$(vaultBasePath)/$(vaultPipelineName)/cloudfoundry-$(org)-$(space)", "$(vaultBasePath)/GROUP-SECRETS/cloudfoundry-$(org)-$(space)"}, Type: "vaultSecret", }, }, Scope: []string{"PARAMETERS", "STAGES", "STEPS"}, Type: "string", Mandatory: true, Aliases: []config.Alias{}, }, { Name: "cfOrg", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"}, Type: "string", Mandatory: true, Aliases: []config.Alias{{Name: "cloudFoundry/org"}}, }, { Name: "cfSpace", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"}, Type: "string", Mandatory: true, Aliases: []config.Alias{{Name: "cloudFoundry/space"}}, }, { Name: "cfService", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"}, Type: "string", Mandatory: false, Aliases: []config.Alias{{Name: "cloudFoundry/service"}}, }, { Name: "cfServicePlan", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"}, Type: "string", Mandatory: false, Aliases: []config.Alias{{Name: "cloudFoundry/servicePlan"}}, }, { Name: "cfServiceInstanceName", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"}, Type: "string", Mandatory: false, Aliases: []config.Alias{{Name: "cloudFoundry/serviceInstanceName"}}, }, { Name: "cfServiceBroker", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"}, Type: "string", Mandatory: false, Aliases: []config.Alias{{Name: "cloudFoundry/serviceBroker"}}, }, { Name: "cfCreateServiceConfig", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"}, Type: "string", Mandatory: false, Aliases: []config.Alias{{Name: "cloudFoundry/createServiceConfig"}}, }, { Name: "cfServiceTags", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"}, Type: "string", Mandatory: false, Aliases: []config.Alias{{Name: "cloudFoundry/serviceTags"}}, }, { Name: "serviceManifest", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"}, Type: "string", Mandatory: false, Aliases: []config.Alias{{Name: "cloudFoundry/serviceManifest"}, {Name: "cfServiceManifest"}}, }, { Name: "manifestVariables", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"}, Type: "[]string", Mandatory: false, Aliases: []config.Alias{{Name: "cloudFoundry/manifestVariables"}, {Name: "cfManifestVariables"}}, }, { Name: "manifestVariablesFiles", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"}, Type: "[]string", Mandatory: false, Aliases: []config.Alias{{Name: "cloudFoundry/manifestVariablesFiles"}, {Name: "cfManifestVariablesFiles"}}, }, }, }, Containers: []config.Container{ {Name: "cf", Image: "ppiper/cf-cli:7"}, }, }, } return theMetaData }
[ "\"PIPER_username\"", "\"PIPER_password\"", "\"PIPER_cfOrg\"", "\"PIPER_cfSpace\"", "\"PIPER_cfService\"", "\"PIPER_cfServicePlan\"", "\"PIPER_cfServiceInstanceName\"", "\"PIPER_cfServiceBroker\"", "\"PIPER_cfCreateServiceConfig\"", "\"PIPER_cfServiceTags\"" ]
[]
[ "PIPER_cfServiceTags", "PIPER_cfSpace", "PIPER_cfServicePlan", "PIPER_password", "PIPER_cfService", "PIPER_username", "PIPER_cfServiceInstanceName", "PIPER_cfServiceBroker", "PIPER_cfOrg", "PIPER_cfCreateServiceConfig" ]
[]
["PIPER_cfServiceTags", "PIPER_cfSpace", "PIPER_cfServicePlan", "PIPER_password", "PIPER_cfService", "PIPER_username", "PIPER_cfServiceInstanceName", "PIPER_cfServiceBroker", "PIPER_cfOrg", "PIPER_cfCreateServiceConfig"]
go
10
0
tests/secretbox_test.py
"""Unit tests against secretbox.py""" import os from typing import Any from typing import Generator from unittest.mock import patch import pytest from secretbox import SecretBox from tests.conftest import ENV_FILE_EXPECTED @pytest.fixture def secretbox() -> Generator[SecretBox, None, None]: """Default instance of LoadEnv""" secrets = SecretBox() assert not secrets.loaded_values yield secrets def test_load_from_with_unknown(secretbox: SecretBox, mock_env_file: str) -> None: """Load secrets, throw an unknown loader in to ensure clean fall-through""" assert not secretbox.loaded_values secretbox.load_from(["envfile", "unknown"], filename=mock_env_file) assert secretbox.loaded_values def test_load_order_file_over_environ(secretbox: SecretBox, mock_env_file: str) -> None: """Loaded file should override existing environ values""" altered_expected = {key: f"{value} ALT" for key, value in ENV_FILE_EXPECTED.items()} with patch.dict(os.environ, altered_expected): secretbox.load_from(["environ", "envfile"], filename=mock_env_file) for key, value in ENV_FILE_EXPECTED.items(): assert secretbox.get(key) == value, f"Expected: {key}, {value}" def test_load_order_environ_over_file(secretbox: SecretBox, mock_env_file: str) -> None: """Loaded environ should override file values""" altered_expected = {key: f"{value} ALT" for key, value in ENV_FILE_EXPECTED.items()} with patch.dict(os.environ, altered_expected): secretbox.load_from(["envfile", "environ"], filename=mock_env_file) for key, value in ENV_FILE_EXPECTED.items(): assert secretbox.get(key) == f"{value} ALT", f"Expected: {key}, {value} ALT" def test_update_loaded_values(secretbox: SecretBox) -> None: """Ensure we are updating state correctly""" secretbox._update_loaded_values({"TEST": "TEST01"}) assert secretbox.get("TEST") == "TEST01" secretbox._update_loaded_values({"TEST": "TEST02"}) assert secretbox.get("TEST") == "TEST02" def test_join_kwarg_defaults(secretbox: SecretBox) -> None: """Mutables are fun, this should never create side-effects""" secretbox.kwarg_defaults = {"TEST": "TEST01"} new_kwargs = {"TEST": "TEST02"} final_kwargs = secretbox._join_kwarg_defaults(new_kwargs) assert secretbox.kwarg_defaults == {"TEST": "TEST01"} assert new_kwargs == {"TEST": "TEST02"} assert final_kwargs == new_kwargs def test_autoload_tempfile(mock_env_file: str) -> None: """One less line of code needed""" secretbox = SecretBox(filename=mock_env_file, auto_load=True) for key, value in ENV_FILE_EXPECTED.items(): assert secretbox.get(key) == value def test_get_missing_key_is_empty(secretbox: SecretBox) -> None: """Missing key? Check behind the milk""" with pytest.raises(KeyError): secretbox.get("BYWHATCHANCEWOULDTHISSEXIST") def test_get_default_missing_key(secretbox: SecretBox) -> None: """Missing key? Return the provided default instead""" assert secretbox.get("BYWHATCHANCEWOULDTHISSEXIST", "Hello") == "Hello" def test_get_as_valid_int(secretbox: SecretBox) -> None: """Helper to return ints""" with patch.dict(os.environ, {"TEST_INT": "42"}): secretbox.load_from(["environ"]) assert secretbox.get_int("TEST_INT") == 42 assert secretbox.get_int("TEST_INT", 0) == 42 def test_get_as_invalid_int(secretbox: SecretBox) -> None: """Helper to return ints should raise on assumption that value is an int""" with patch.dict(os.environ, {"TEST_INT": "Forty-two"}): secretbox.load_from(["environ"]) with pytest.raises(ValueError): secretbox.get_int("TEST_INT", -1) def test_get_default_int(secretbox: SecretBox) -> None: """Return the default if provided instead of raising""" assert secretbox.get_int("NOTTHERE", 10) == 10 def test_get_as_list(secretbox: SecretBox) -> None: """Helper to return a list based on given delimiter""" with patch.dict(os.environ, {"TEST_STR": "rooBlank", "TEST_LIST": "1 | 2|3"}): secretbox.load_from(["environ"]) assert secretbox.get_list("TEST_LIST") == ["1 | 2|3"] assert secretbox.get_list("TEST_STR", "|") == ["rooBlank"] assert secretbox.get_list("TEST_LIST", "|") == ["1 ", " 2", "3"] def test_get_as_list_default(secretbox: SecretBox) -> None: """Return the default if provided instead of raising""" assert secretbox.get_list("NOTTHERE", ",", ["1", "2", "3"]) == ["1", "2", "3"] def test_load_debug_flag(caplog: Any) -> None: """Ensure logging is silentish""" _ = SecretBox() assert "Debug flag passed." not in caplog.text _ = SecretBox(debug_flag=True) assert "Debug flag passed." in caplog.text
[]
[]
[]
[]
[]
python
0
0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.applications.unmanagedamlauncher; import java.io.BufferedReader; import java.io.DataOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStreamReader; import java.net.InetAddress; import java.nio.charset.Charset; import java.util.ArrayList; import java.util.EnumSet; import java.util.Map; import java.util.Set; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.GnuParser; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.util.Records; /** * The UnmanagedLauncher is a simple client that launches and unmanaged AM. An * unmanagedAM is an AM that is not launched and managed by the RM. The client * creates a new application on the RM and negotiates a new attempt id. Then it * waits for the RM app state to reach be YarnApplicationState.ACCEPTED after * which it spawns the AM in another process and passes it the container id via * env variable Environment.CONTAINER_ID. The AM can be in any * language. The AM can register with the RM using the attempt id obtained * from the container id and proceed as normal. * The client redirects app stdout and stderr to its own stdout and * stderr and waits for the AM process to exit. Then it waits for the RM to * report app completion. */ public class UnmanagedAMLauncher { private static final Log LOG = LogFactory.getLog(UnmanagedAMLauncher.class); private Configuration conf; // Handle to talk to the Resource Manager/Applications Manager protected YarnClient rmClient; // Application master specific info to register a new Application with RM/ASM private String appName = ""; // App master priority private int amPriority = 0; // Queue for App master private String amQueue = ""; // cmd to start AM private String amCmd = null; // set the classpath explicitly private String classpath = null; private volatile boolean amCompleted = false; private static final long AM_STATE_WAIT_TIMEOUT_MS = 10000; /** * @param args * Command line arguments */ public static void main(String[] args) { try { UnmanagedAMLauncher client = new UnmanagedAMLauncher(); LOG.info("Initializing Client"); boolean doRun = client.init(args); if (!doRun) { System.exit(0); } client.run(); } catch (Throwable t) { LOG.fatal("Error running Client", t); System.exit(1); } } /** */ public UnmanagedAMLauncher(Configuration conf) throws Exception { // Set up RPC this.conf = conf; } public UnmanagedAMLauncher() throws Exception { this(new Configuration()); } private void printUsage(Options opts) { new HelpFormatter().printHelp("Client", opts); } public boolean init(String[] args) throws ParseException { Options opts = new Options(); opts.addOption("appname", true, "Application Name. Default value - UnmanagedAM"); opts.addOption("priority", true, "Application Priority. Default 0"); opts.addOption("queue", true, "RM Queue in which this application is to be submitted"); opts.addOption("master_memory", true, "Amount of memory in MB to be requested to run the application master"); opts.addOption("cmd", true, "command to start unmanaged AM (required)"); opts.addOption("classpath", true, "additional classpath"); opts.addOption("help", false, "Print usage"); CommandLine cliParser = new GnuParser().parse(opts, args); if (args.length == 0) { printUsage(opts); throw new IllegalArgumentException( "No args specified for client to initialize"); } if (cliParser.hasOption("help")) { printUsage(opts); return false; } appName = cliParser.getOptionValue("appname", "UnmanagedAM"); amPriority = Integer.parseInt(cliParser.getOptionValue("priority", "0")); amQueue = cliParser.getOptionValue("queue", "default"); classpath = cliParser.getOptionValue("classpath", null); amCmd = cliParser.getOptionValue("cmd"); if (amCmd == null) { printUsage(opts); throw new IllegalArgumentException( "No cmd specified for application master"); } YarnConfiguration yarnConf = new YarnConfiguration(conf); rmClient = YarnClient.createYarnClient(); rmClient.init(yarnConf); return true; } public void launchAM(ApplicationAttemptId attemptId) throws IOException, YarnException { Credentials credentials = new Credentials(); Token<AMRMTokenIdentifier> token = rmClient.getAMRMToken(attemptId.getApplicationId()); // Service will be empty but that's okay, we are just passing down only // AMRMToken down to the real AM which eventually sets the correct // service-address. credentials.addToken(token.getService(), token); File tokenFile = File.createTempFile("unmanagedAMRMToken","", new File(System.getProperty("user.dir"))); try { FileUtil.chmod(tokenFile.getAbsolutePath(), "600"); } catch (InterruptedException ex) { throw new RuntimeException(ex); } tokenFile.deleteOnExit(); try (DataOutputStream os = new DataOutputStream( new FileOutputStream(tokenFile, true))) { credentials.writeTokenStorageToStream(os); } Map<String, String> env = System.getenv(); ArrayList<String> envAMList = new ArrayList<String>(); boolean setClasspath = false; for (Map.Entry<String, String> entry : env.entrySet()) { String key = entry.getKey(); String value = entry.getValue(); if(key.equals("CLASSPATH")) { setClasspath = true; if(classpath != null) { value = value + File.pathSeparator + classpath; } } envAMList.add(key + "=" + value); } if(!setClasspath && classpath!=null) { envAMList.add("CLASSPATH="+classpath); } ContainerId containerId = ContainerId.newContainerId(attemptId, 0); String hostname = InetAddress.getLocalHost().getHostName(); envAMList.add(Environment.CONTAINER_ID.name() + "=" + containerId); envAMList.add(Environment.NM_HOST.name() + "=" + hostname); envAMList.add(Environment.NM_HTTP_PORT.name() + "=0"); envAMList.add(Environment.NM_PORT.name() + "=0"); envAMList.add(Environment.LOCAL_DIRS.name() + "= /tmp"); envAMList.add(ApplicationConstants.APP_SUBMIT_TIME_ENV + "=" + System.currentTimeMillis()); envAMList.add(ApplicationConstants.CONTAINER_TOKEN_FILE_ENV_NAME + "=" + tokenFile.getAbsolutePath()); String[] envAM = new String[envAMList.size()]; Process amProc = Runtime.getRuntime().exec(amCmd, envAMList.toArray(envAM)); final BufferedReader errReader = new BufferedReader(new InputStreamReader( amProc.getErrorStream(), Charset.forName("UTF-8"))); final BufferedReader inReader = new BufferedReader(new InputStreamReader( amProc.getInputStream(), Charset.forName("UTF-8"))); // read error and input streams as this would free up the buffers // free the error stream buffer Thread errThread = new Thread() { @Override public void run() { try { String line = errReader.readLine(); while((line != null) && !isInterrupted()) { System.err.println(line); line = errReader.readLine(); } } catch(IOException ioe) { LOG.warn("Error reading the error stream", ioe); } } }; Thread outThread = new Thread() { @Override public void run() { try { String line = inReader.readLine(); while((line != null) && !isInterrupted()) { System.out.println(line); line = inReader.readLine(); } } catch(IOException ioe) { LOG.warn("Error reading the out stream", ioe); } } }; try { errThread.start(); outThread.start(); } catch (IllegalStateException ise) { } // wait for the process to finish and check the exit code try { int exitCode = amProc.waitFor(); LOG.info("AM process exited with value: " + exitCode); } catch (InterruptedException e) { e.printStackTrace(); } finally { amCompleted = true; } try { // make sure that the error thread exits // on Windows these threads sometimes get stuck and hang the execution // timeout and join later after destroying the process. errThread.join(); outThread.join(); errReader.close(); inReader.close(); } catch (InterruptedException ie) { LOG.info("ShellExecutor: Interrupted while reading the error/out stream", ie); } catch (IOException ioe) { LOG.warn("Error while closing the error/out stream", ioe); } amProc.destroy(); } public boolean run() throws IOException, YarnException { LOG.info("Starting Client"); // Connect to ResourceManager rmClient.start(); try { // Create launch context for app master LOG.info("Setting up application submission context for ASM"); ApplicationSubmissionContext appContext = rmClient.createApplication() .getApplicationSubmissionContext(); ApplicationId appId = appContext.getApplicationId(); // set the application name appContext.setApplicationName(appName); // Set the priority for the application master Priority pri = Records.newRecord(Priority.class); pri.setPriority(amPriority); appContext.setPriority(pri); // Set the queue to which this application is to be submitted in the RM appContext.setQueue(amQueue); // Set up the container launch context for the application master ContainerLaunchContext amContainer = Records .newRecord(ContainerLaunchContext.class); appContext.setAMContainerSpec(amContainer); // unmanaged AM appContext.setUnmanagedAM(true); LOG.info("Setting unmanaged AM"); // Submit the application to the applications manager LOG.info("Submitting application to ASM"); rmClient.submitApplication(appContext); ApplicationReport appReport = monitorApplication(appId, EnumSet.of(YarnApplicationState.ACCEPTED, YarnApplicationState.KILLED, YarnApplicationState.FAILED, YarnApplicationState.FINISHED)); if (appReport.getYarnApplicationState() == YarnApplicationState.ACCEPTED) { // Monitor the application attempt to wait for launch state ApplicationAttemptReport attemptReport = monitorCurrentAppAttempt(appId, YarnApplicationAttemptState.LAUNCHED); ApplicationAttemptId attemptId = attemptReport.getApplicationAttemptId(); LOG.info("Launching AM with application attempt id " + attemptId); // launch AM launchAM(attemptId); // Monitor the application for end state appReport = monitorApplication(appId, EnumSet.of(YarnApplicationState.KILLED, YarnApplicationState.FAILED, YarnApplicationState.FINISHED)); } YarnApplicationState appState = appReport.getYarnApplicationState(); FinalApplicationStatus appStatus = appReport.getFinalApplicationStatus(); LOG.info("App ended with state: " + appReport.getYarnApplicationState() + " and status: " + appStatus); boolean success; if (YarnApplicationState.FINISHED == appState && FinalApplicationStatus.SUCCEEDED == appStatus) { LOG.info("Application has completed successfully."); success = true; } else { LOG.info("Application did finished unsuccessfully." + " YarnState=" + appState.toString() + ", FinalStatus=" + appStatus.toString()); success = false; } return success; } finally { rmClient.stop(); } } private ApplicationAttemptReport monitorCurrentAppAttempt( ApplicationId appId, YarnApplicationAttemptState attemptState) throws YarnException, IOException { long startTime = System.currentTimeMillis(); ApplicationAttemptId attemptId = null; while (true) { if (attemptId == null) { attemptId = rmClient.getApplicationReport(appId) .getCurrentApplicationAttemptId(); } ApplicationAttemptReport attemptReport = null; if (attemptId != null) { attemptReport = rmClient.getApplicationAttemptReport(attemptId); if (attemptState.equals(attemptReport.getYarnApplicationAttemptState())) { return attemptReport; } } LOG.info("Current attempt state of " + appId + " is " + (attemptReport == null ? " N/A " : attemptReport.getYarnApplicationAttemptState()) + ", waiting for current attempt to reach " + attemptState); try { Thread.sleep(1000); } catch (InterruptedException e) { LOG.warn("Interrupted while waiting for current attempt of " + appId + " to reach " + attemptState); } if (System.currentTimeMillis() - startTime > AM_STATE_WAIT_TIMEOUT_MS) { String errmsg = "Timeout for waiting current attempt of " + appId + " to reach " + attemptState; LOG.error(errmsg); throw new RuntimeException(errmsg); } } } /** * Monitor the submitted application for completion. Kill application if time * expires. * * @param appId * Application Id of application to be monitored * @return true if application completed successfully * @throws YarnException * @throws IOException */ private ApplicationReport monitorApplication(ApplicationId appId, Set<YarnApplicationState> finalState) throws YarnException, IOException { long foundAMCompletedTime = 0; StringBuilder expectedFinalState = new StringBuilder(); boolean first = true; for (YarnApplicationState state : finalState) { if (first) { first = false; expectedFinalState.append(state.name()); } else { expectedFinalState.append("," + state.name()); } } while (true) { // Check app status every 1 second. try { Thread.sleep(1000); } catch (InterruptedException e) { LOG.debug("Thread sleep in monitoring loop interrupted"); } // Get application report for the appId we are interested in ApplicationReport report = rmClient.getApplicationReport(appId); LOG.info("Got application report from ASM for" + ", appId=" + appId.getId() + ", appAttemptId=" + report.getCurrentApplicationAttemptId() + ", clientToAMToken=" + report.getClientToAMToken() + ", appDiagnostics=" + report.getDiagnostics() + ", appMasterHost=" + report.getHost() + ", appQueue=" + report.getQueue() + ", appMasterRpcPort=" + report.getRpcPort() + ", appStartTime=" + report.getStartTime() + ", yarnAppState=" + report.getYarnApplicationState().toString() + ", distributedFinalState=" + report.getFinalApplicationStatus().toString() + ", appTrackingUrl=" + report.getTrackingUrl() + ", appUser=" + report.getUser()); YarnApplicationState state = report.getYarnApplicationState(); if (finalState.contains(state)) { return report; } // wait for 10 seconds after process has completed for app report to // come back if (amCompleted) { if (foundAMCompletedTime == 0) { foundAMCompletedTime = System.currentTimeMillis(); } else if ((System.currentTimeMillis() - foundAMCompletedTime) > AM_STATE_WAIT_TIMEOUT_MS) { LOG.warn("Waited " + AM_STATE_WAIT_TIMEOUT_MS/1000 + " seconds after process completed for AppReport" + " to reach desired final state. Not waiting anymore." + "CurrentState = " + state + ", ExpectedStates = " + expectedFinalState.toString()); throw new RuntimeException("Failed to receive final expected state" + " in ApplicationReport" + ", CurrentState=" + state + ", ExpectedStates=" + expectedFinalState.toString()); } } } } }
[]
[]
[]
[]
[]
java
0
0
setup.py
# -*- coding: utf-8 -*- """ Setup file for sabato. Use setup.cfg to configure your project. This file was generated with PyScaffold 3.2.3. PyScaffold helps you to put up the scaffold of your new Python project. Learn more under: https://pyscaffold.org/ """ import sys from pkg_resources import VersionConflict, require from setuptools import setup try: require('setuptools>=38.3') except VersionConflict: print("Error: version of setuptools is too old (<38.3)!") sys.exit(1) if __name__ == "__main__": setup(use_pyscaffold=True)
[]
[]
[]
[]
[]
python
null
null
null
cmd/breakCaesar/breakCaesar.go
package main import "fmt" import "io/ioutil" import "log" import "os" import "github.com/karlek/tombraid/broken" import "github.com/karlek/tombraid/frequency" import "github.com/karlek/tombraid/caesar" const ( charset = "abcdefghijklmnopqrstuvwxyz" ) func main() { err := breakCaesar() if err != nil { log.Println(err) } } func breakCaesar() (err error) { buf, err := ioutil.ReadFile(os.Getenv("GOPATH") + "/src/github.com/karlek/tombraid/message.txt") if err != nil { return err } message := string(buf) csLen := len(charset) isFreqBroken := false isBruteBroken := false for i := 0; i < csLen; i++ { cipher, err := broken.Caesar(charset, message, i) if err != nil { return err } commonChars := frequency.EnglishAnalysis(cipher) freqGuesses, err := caesar.FrequencyAttack(commonChars, charset, cipher) if err != nil { return err } bruteGuesses, err := caesar.BruteAttack(charset, cipher) if err != nil { return err } for _, guess := range freqGuesses { if guess == message { isFreqBroken = true } } for _, guess := range bruteGuesses { if guess == message { isBruteBroken = true } } } switch { case isFreqBroken && isBruteBroken: fmt.Println("All attacks successful!") case isFreqBroken: fmt.Println("Only frequency attack worked!") case isBruteBroken: fmt.Println("Only brute force attack worked!") default: fmt.Println("No attack broke the cipher!") } return nil }
[ "\"GOPATH\"" ]
[]
[ "GOPATH" ]
[]
["GOPATH"]
go
1
0
tools/train_face_netDivFC.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. r""" Basic training script for PyTorch """ # Set up custom environment before nearly anything else is imported # NOTE: this should be the first import (no not reorder) import os,sys sys.path.insert(0,'/data/hongwei/face_benchmark') from maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip #172.24.42.80 import argparse # os.environ["CUDA_VISIBLE_DEVICES"] = "4,5,6,7" import torch import torchvision.transforms as T from maskrcnn_benchmark.config import face_cfg as cfg from maskrcnn_benchmark.data import make_face_data_loader from maskrcnn_benchmark.solver import make_lr_scheduler from maskrcnn_benchmark.solver import make_optimizer from maskrcnn_benchmark.engine.inference import inference from maskrcnn_benchmark.engine import do_face_train_dist_DIV_FC,do_face_train_dist from maskrcnn_benchmark.modeling.face_reg import build_dist_face_trainer from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer from maskrcnn_benchmark.utils.collect_env import collect_env_info from maskrcnn_benchmark.utils.comm import synchronize, \ get_rank from maskrcnn_benchmark.utils.imports import import_file from maskrcnn_benchmark.utils.logger import setup_logger from maskrcnn_benchmark.utils.miscellaneous import mkdir from maskrcnn_benchmark.modeling.face_reg import FaceDistributedDataParallel from maskrcnn_benchmark.modeling.face_reg import face_trainer try: from apex import amp except ImportError: raise ImportError('Use APEX for multi-precision via apex.amp') def train(cfg, local_rank, distributed): model,head = build_dist_face_trainer(cfg,local_rank) device = torch.device(cfg.MODEL.DEVICE) model.to(device) if cfg.MODEL.USE_SYNCBN: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) if True: model = FaceDistributedDataParallel( model, device_ids=local_rank, output_device=local_rank, # this should be removed if we update BatchNorm stats broadcast_buffers=False,chunk_sizes=None, #[32,56,56,56] ) head_local_rank=None if len(local_rank)==1: head_local_rank = local_rank head = FaceDistributedDataParallel( head, device_ids=head_local_rank, output_device=head_local_rank, # this should be removed if we update BatchNorm stats broadcast_buffers=False, ) model = torch.nn.Sequential(*[model, head]) optimizer = make_optimizer(cfg, model) scheduler = make_lr_scheduler(cfg, optimizer) # head_optimizer = make_optimizer(cfg, head) # head_scheduler = make_lr_scheduler(cfg, head_optimizer) # Initialize mixed-precision training use_mixed_precision = cfg.DTYPE == "float16" amp_opt_level = 'O1' if use_mixed_precision else 'O0' model, optimizer = amp.initialize(model, optimizer, opt_level=amp_opt_level) # head, head_optimizer = amp.initialize(head, head_optimizer, opt_level=amp_opt_level) arguments = {} arguments["iteration"] = 0 output_dir = cfg.OUTPUT_DIR save_to_disk = get_rank() == 0 checkpointer = DetectronCheckpointer( cfg, model, optimizer, scheduler, output_dir, save_to_disk ) # head_checkpointer = DetectronCheckpointer( # cfg, head, head_optimizer, head_scheduler, output_dir, save_to_disk # ) extra_checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT) arguments.update(extra_checkpoint_data) #### init transforms ##### transforms = T.Compose( [ T.RandomCrop( (cfg.INPUT.SIZE_TRAIN[0], cfg.INPUT.SIZE_TRAIN[1]) ), T.RandomHorizontalFlip(), T.ToTensor(), T.Normalize(mean=cfg.INPUT.RGB_MEAN, std=cfg.INPUT.RGB_STD), ] ) data_loader = make_face_data_loader( cfg, is_train=True, is_distributed=distributed, start_iter=arguments["iteration"], transforms=transforms, ) test_period = cfg.SOLVER.TEST_PERIOD checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD divs_nums = cfg.SOLVER.DIVS_NUMS_PER_BATCH do_face_train_dist_DIV_FC( cfg, model,#[model,head], data_loader, None, optimizer,#[optimizer,head_optimizer], scheduler,#[scheduler,head_scheduler], checkpointer,#[checkpointer,head_checkpointer], device, checkpoint_period, test_period, arguments, divs_nums, ) return model def run_test(cfg, model, distributed): if distributed: model = model.module torch.cuda.empty_cache() # TODO check if it helps iou_types = ("bbox",) if cfg.MODEL.MASK_ON: iou_types = iou_types + ("segm",) if cfg.MODEL.KEYPOINT_ON: iou_types = iou_types + ("keypoints",) output_folders = [None] * len(cfg.DATASETS.TEST) dataset_names = cfg.DATASETS.TEST if cfg.OUTPUT_DIR: for idx, dataset_name in enumerate(dataset_names): output_folder = os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name) mkdir(output_folder) output_folders[idx] = output_folder data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed) for output_folder, dataset_name, data_loader_val in zip(output_folders, dataset_names, data_loaders_val): inference( model, data_loader_val, dataset_name=dataset_name, iou_types=iou_types, box_only=False if cfg.MODEL.FCOS_ON or cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY, device=cfg.MODEL.DEVICE, expected_results=cfg.TEST.EXPECTED_RESULTS, expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL, output_folder=output_folder, ) synchronize() def main(): parser = argparse.ArgumentParser(description="PyTorch Object Detection Training") parser.add_argument( "--config-file", default="", metavar="FILE", help="path to config file", type=str, ) parser.add_argument("--local_rank", type=int, default=0) parser.add_argument("--ngpu_shared_fc", type=list, default=1) parser.add_argument( "--skip-test", dest="skip_test", help="Do not test the final model", action="store_true", ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) args = parser.parse_args() num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 args.distributed = num_gpus > 1 size = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 "MASTER_ADDR" "MASTER_PORT" "RANK" "WORLD_SIZE" if True: torch.cuda.set_device(args.local_rank) torch.distributed.init_process_group( backend="nccl", init_method="env://",#rank=args.local_rank,world_size=size ) synchronize() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() output_dir = cfg.OUTPUT_DIR if output_dir: mkdir(output_dir) logger = setup_logger("maskrcnn_benchmark", output_dir, get_rank()) logger.info("Using {} GPUs".format(num_gpus)) logger.info(args) logger.info("Collecting env info (might take some time)") logger.info("\n" + collect_env_info()) logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, "r") as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) proc_gpus = [int(i) for i in args.ngpu_shared_fc] model = train(cfg,proc_gpus, args.distributed) if not args.skip_test: run_test(cfg, model, args.distributed) if __name__ == "__main__": main() ##--nnodes=2 --node_rank=0 --master_addr="192.168.1.1" ## 杀掉所有python进程 ps aux|grep python|grep -v grep|grep -v usr|cut -c 9-15|xargs kill -9 # python tools/Muti_GPUS_Train.py --ngpus_per_node=8 --npgpu_per_proc=1 tools/train_face_netDivFC.py --skip-test --config-file configs/face_reg/face_net_msra_celeb.yaml DATALOADER.NUM_WORKERS 16 OUTPUT_DIR
[]
[]
[ "CUDA_VISIBLE_DEVICES", "WORLD_SIZE" ]
[]
["CUDA_VISIBLE_DEVICES", "WORLD_SIZE"]
python
2
0
internal/env/env.go
// Package env holds environment variables used by the application. They differ // in use to the fflags package which also uses environment variables, but is // only to enable or disable feature flags. package env import "os" type Vars struct { } // // Options // func (v *Vars) LogFile() string { return os.Getenv("KICK_LOG") } // // Development // // Debug Turn debug logging on. See di.DI func (v *Vars) Debug() bool { return os.Getenv("KICK_DEBUG") == "true" }
[ "\"KICK_LOG\"", "\"KICK_DEBUG\"" ]
[]
[ "KICK_LOG", "KICK_DEBUG" ]
[]
["KICK_LOG", "KICK_DEBUG"]
go
2
0
config/wsgi.py
""" WSGI config for Django GeoIP Middleware project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace the whole Django WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ import os import sys from django.core.wsgi import get_wsgi_application # This allows easy placement of apps within the interior # django_geoip2_middleware directory. app_path = os.path.dirname(os.path.abspath(__file__)).replace('/config', '') sys.path.append(os.path.join(app_path, 'django_geoip2_middleware')) # We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks # if running multiple sites in the same mod_wsgi process. To fix this, use # mod_wsgi daemon mode with each site in its own daemon process, or use # os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production" os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production") # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. application = get_wsgi_application() # Apply WSGI middleware here. # from helloworld.wsgi import HelloWorldApplication # application = HelloWorldApplication(application)
[]
[]
[ "DJANGO_SETTINGS_MODULE" ]
[]
["DJANGO_SETTINGS_MODULE"]
python
1
0
cmd/event-driven/listener/confirmation/main.go
package main import ( "go-kafka-example/utils" "os" ) func main() { // give your group name and custom group handler utils.LoadConfigs() utils.GetNewConsumerGroup("confirmation-worker", os.Getenv("PROVIDER_TOPIC"), utils.ConsumerGroupHandler{}) }
[ "\"PROVIDER_TOPIC\"" ]
[]
[ "PROVIDER_TOPIC" ]
[]
["PROVIDER_TOPIC"]
go
1
0
final_project/machinetranslation/translator.py
import os from ibm_watson import LanguageTranslatorV3 from ibm_cloud_sdk_core.authenticators import IAMAuthenticator from dotenv import load_dotenv load_dotenv() apikey = os.environ['apikey'] url = os.environ['url'] authenticator = IAMAuthenticator(apikey) language_translator = LanguageTranslatorV3( version='2022-01-17', authenticator=authenticator ) language_translator.set_service_url(url) def english_to_french(eng_text): frenchtext = None if eng_text is not None: frenchtext = language_translator.translate( text=eng_text, model_id='en-fr').get_result()['translations'][0]['translation'] return frenchtext def french_to_english(fre_text): englishtext = None if fre_text is not None: englishtext = language_translator.translate( text=fre_text, model_id='fr-en').get_result()['translations'][0]['translation'] return englishtext
[]
[]
[ "url", "apikey" ]
[]
["url", "apikey"]
python
2
0
worker/executor.go
package main import ( "bytes" "encoding/json" "fmt" "io/ioutil" "net" "net/http" "os" "runtime" "strconv" "sync" "github.com/sethgrid/pester" docker "github.com/fsouza/go-dockerclient" . "github.com/smartfog/fogflow/common/config" . "github.com/smartfog/fogflow/common/datamodel" . "github.com/smartfog/fogflow/common/ngsi" ) type taskContext struct { ListeningPort string Subscriptions []string EntityID2SubID map[string]string OutputStreams []EntityId ContainerID string } type pullResult struct { imageRef string err error } type Executor struct { client *docker.Client workerCfg *Config brokerURL string taskInstances map[string]*taskContext taskMap_lock sync.RWMutex } func (e *Executor) Init(cfg *Config, selectedBrokerURL string) bool { // for Windows if runtime.GOOS == "windows" { endpoint := os.Getenv("DOCKER_HOST") path := os.Getenv("DOCKER_CERT_PATH") ca := fmt.Sprintf("%s/ca.pem", path) cert := fmt.Sprintf("%s/cert.pem", path) key := fmt.Sprintf("%s/key.pem", path) client, err := docker.NewTLSClient(endpoint, cert, key, ca) if err != nil || client == nil { INFO.Println("Couldn't connect to docker: %v", err) return false } e.client = client } else { // for Linux endpoint := "unix:///var/run/docker.sock" client, err := docker.NewClient(endpoint) if err != nil || client == nil { INFO.Println("Couldn't connect to docker: %v", err) return false } e.client = client } e.workerCfg = cfg e.brokerURL = selectedBrokerURL e.taskInstances = make(map[string]*taskContext) return true } func (e *Executor) Shutdown() { e.terminateAllTasks() } func (e *Executor) GetNumOfTasks() int { e.taskMap_lock.RLock() defer e.taskMap_lock.RUnlock() return len(e.taskInstances) } func (e *Executor) ListImages() { imgs, _ := e.client.ListImages(docker.ListImagesOptions{All: false}) for _, img := range imgs { fmt.Println("ID: ", img.ID) fmt.Println("RepoTags: ", img.RepoTags) fmt.Println("Created: ", img.Created) fmt.Println("Size: ", img.Size) fmt.Println("VirtualSize: ", img.VirtualSize) fmt.Println("ParentId: ", img.ParentID) } } func (e *Executor) InspectImage(dockerImage string) bool { _, err := e.client.InspectImage(dockerImage) if err != nil { INFO.Printf("operator image %s does not exist locally\r\n", dockerImage) return false } else { INFO.Printf("operator image %s exists locally\r\n", dockerImage) return true } } func (e *Executor) PullImage(dockerImage string, tag string) (string, error) { auth := docker.AuthConfiguration{} if e.workerCfg.Worker.Registry.IsConfigured() == true { auth.Username = e.workerCfg.Worker.Registry.Username auth.Password = e.workerCfg.Worker.Registry.Password auth.Email = e.workerCfg.Worker.Registry.Email auth.ServerAddress = e.workerCfg.Worker.Registry.ServerAddress dockerImage = dockerImage } fmt.Printf("options : %+v\r\n", auth) opts := docker.PullImageOptions{ Repository: dockerImage, Tag: tag, } fmt.Printf("options : %+v\r\n", opts) err := e.client.PullImage(opts, auth) if err != nil { ERROR.Printf("failed to pull this image %s, error %v\r\n", dockerImage, err) return "", err } // check if the image exists now resp, err := e.client.InspectImage(dockerImage) if err != nil { ERROR.Printf("the image %s does not exist, even throug it has been pulled, error %v\r\n", dockerImage, err) return "", err } if resp == nil { return "", nil } imageRef := resp.ID if len(resp.RepoDigests) > 0 { imageRef = resp.RepoDigests[0] } INFO.Println("fetched image ", dockerImage) return imageRef, nil } func (e *Executor) ListContainers() { containers, _ := e.client.ListContainers(docker.ListContainersOptions{All: true}) for _, container := range containers { fmt.Println("Name: ", container.Names) } } func (e *Executor) startContainerWithBridge(dockerImage string, portNum string) (string, error) { // prepare the configuration for a docker container config := docker.Config{Image: dockerImage} portBindings := map[docker.Port][]docker.PortBinding{ "8080/tcp": []docker.PortBinding{docker.PortBinding{HostIP: "0.0.0.0", HostPort: portNum}}} hostConfig := docker.HostConfig{PortBindings: portBindings} containerOptions := docker.CreateContainerOptions{Config: &config, HostConfig: &hostConfig} // create a new docker container container, err := e.client.CreateContainer(containerOptions) if err != nil { ERROR.Println(err) return "", err } // start the new container err = e.client.StartContainer(container.ID, &hostConfig) if err != nil { ERROR.Println(err) return "", err } return container.ID, nil } func (e *Executor) writeTempFile(fileName string, fileContent string) { content := []byte(fileContent) tmpfile, err := os.Create(fileName) if err != nil { ERROR.Println(err) } if _, err := tmpfile.Write(content); err != nil { ERROR.Println(err) } if err := tmpfile.Close(); err != nil { ERROR.Println(err) } } func (e *Executor) startContainer(dockerImage string, portNum string, functionCode string, taskID string) (string, error) { // prepare the configuration for a docker container, host mode for the container network evs := make([]string, 0) evs = append(evs, fmt.Sprintf("myport=%s", portNum)) config := docker.Config{Image: dockerImage, Env: evs} hostConfig := docker.HostConfig{} hostConfig.NetworkMode = "host" hostConfig.AutoRemove = e.workerCfg.Worker.ContainerAutoRemove if functionCode != "" { fileName := "/tmp/" + taskID e.writeTempFile(fileName, functionCode) mount := docker.HostMount{} mount.Source = fileName mount.Target = "/app/function.js" mount.ReadOnly = true mount.Type = "bind" DEBUG.Println("mounting configuration ", mount) hostConfig.Mounts = make([]docker.HostMount, 0) hostConfig.Mounts = append(hostConfig.Mounts, mount) } containerOptions := docker.CreateContainerOptions{Config: &config, HostConfig: &hostConfig} // create a new docker container container, err := e.client.CreateContainer(containerOptions) if err != nil { ERROR.Println(err) return "", err } // start the new container err = e.client.StartContainer(container.ID, &hostConfig) if err != nil { ERROR.Println(err) return "", err } return container.ID, nil } // Ask the kernel for a free open port that is ready to use func (e *Executor) findFreePortNumber() int { addr, err := net.ResolveTCPAddr("tcp", "localhost:0") if err != nil { panic(err) } l, err := net.ListenTCP("tcp", addr) if err != nil { panic(err) } defer l.Close() return l.Addr().(*net.TCPAddr).Port } func (e *Executor) LaunchTask(task *ScheduledTaskInstance) bool { dockerImage := task.DockerImage INFO.Println("to execute Task ", task.ID, " to perform Operation ", dockerImage) // first check the image locally if e.InspectImage(dockerImage) == false { // if the image does not exist locally, try to fetch it from docker hub _, pullError := e.PullImage(dockerImage, "latest") if pullError != nil { ERROR.Printf("failed to fetch the image %s\r\n", task.DockerImage) return false } } taskCtx := taskContext{} taskCtx.EntityID2SubID = make(map[string]string) // find a free listening port number available on the host machine freePort := strconv.Itoa(e.findFreePortNumber()) // function code functionCode := task.FunctionCode // start a container to run the scheduled task instance containerId, err := e.startContainer(dockerImage, freePort, functionCode, task.ID) if err != nil { ERROR.Println(err) return false } INFO.Printf(" task %s started within container = %s\n", task.ID, containerId) taskCtx.ListeningPort = freePort taskCtx.ContainerID = containerId // configure the task with its output streams via its admin interface commands := make([]interface{}, 0) // set broker URL setBrokerCmd := make(map[string]interface{}) setBrokerCmd["command"] = "CONNECT_BROKER" setBrokerCmd["brokerURL"] = e.brokerURL commands = append(commands, setBrokerCmd) // pass the reference URL to the task so that the task can issue context subscription as well setReferenceCmd := make(map[string]interface{}) setReferenceCmd["command"] = "SET_REFERENCE" setReferenceCmd["url"] = "http://" + e.workerCfg.InternalIP + ":" + freePort commands = append(commands, setReferenceCmd) // set output stream for _, outStream := range task.Outputs { setOutputCmd := make(map[string]interface{}) setOutputCmd["command"] = "SET_OUTPUTS" setOutputCmd["type"] = outStream.Type setOutputCmd["id"] = outStream.StreamID commands = append(commands, setOutputCmd) // record its outputs var eid EntityId eid.ID = outStream.StreamID eid.Type = outStream.Type eid.IsPattern = false taskCtx.OutputStreams = append(taskCtx.OutputStreams, eid) } INFO.Printf("configure the task with %+v, via port %s\r\n", commands, freePort) if e.configurateTask(freePort, commands) == false { ERROR.Println("failed to configure the task instance") return false } INFO.Printf("subscribe its input streams") // subscribe input streams on behalf of the launched task taskCtx.Subscriptions = make([]string, 0) for _, streamType := range task.Inputs { for _, streamId := range streamType.Streams { subID, err := e.subscribeInputStream(freePort, streamType.Type, streamId) if err == nil { fmt.Println("===========subID = ", subID) taskCtx.Subscriptions = append(taskCtx.Subscriptions, subID) taskCtx.EntityID2SubID[streamId] = subID } else { fmt.Println(err) } } } // update the task list e.taskMap_lock.Lock() e.taskInstances[task.ID] = &taskCtx e.taskMap_lock.Unlock() INFO.Printf("register this task") // register this new task entity to IoT Broker e.registerTask(task, freePort, containerId) return true } func (e *Executor) configurateTask(port string, commands []interface{}) bool { taskAdminURL := fmt.Sprintf("http://%s:%s/admin", e.workerCfg.InternalIP, port) jsonText, _ := json.Marshal(commands) INFO.Println(taskAdminURL) INFO.Printf("configuration: %s\r\n", string(jsonText)) req, _ := http.NewRequest("POST", taskAdminURL, bytes.NewBuffer(jsonText)) req.Header.Set("Content-Type", "application/json") client := pester.New() client.MaxRetries = 30 client.Backoff = pester.LinearBackoff resp, err := client.Do(req) if err != nil { ERROR.Println(err) return false } defer resp.Body.Close() body, _ := ioutil.ReadAll(resp.Body) INFO.Println("task on port ", port, " has been configured with parameters ", jsonText) INFO.Println("response Body:", string(body)) return true } func (e *Executor) registerTask(task *ScheduledTaskInstance, portNum string, containerID string) { ctxObj := ContextObject{} ctxObj.Entity.ID = "Task." + task.ID ctxObj.Entity.Type = "Task" ctxObj.Entity.IsPattern = false ctxObj.Attributes = make(map[string]ValueObject) ctxObj.Attributes["id"] = ValueObject{Type: "string", Value: task.ID} ctxObj.Attributes["port"] = ValueObject{Type: "string", Value: portNum} ctxObj.Attributes["status"] = ValueObject{Type: "string", Value: task.Status} ctxObj.Attributes["worker"] = ValueObject{Type: "string", Value: task.WorkerID} ctxObj.Metadata = make(map[string]ValueObject) ctxObj.Metadata["topology"] = ValueObject{Type: "string", Value: task.ServiceName} ctxObj.Metadata["worker"] = ValueObject{Type: "string", Value: task.WorkerID} client := NGSI10Client{IoTBrokerURL: e.brokerURL} err := client.UpdateContext(&ctxObj) if err != nil { fmt.Println(err) } } func (e *Executor) updateTask(taskID string, status string) { ctxObj := ContextObject{} ctxObj.Entity.ID = "Task." + taskID ctxObj.Entity.Type = "Task" ctxObj.Entity.IsPattern = false ctxObj.Attributes = make(map[string]ValueObject) ctxObj.Attributes["status"] = ValueObject{Type: "string", Value: status} client := NGSI10Client{IoTBrokerURL: e.brokerURL} err := client.UpdateContext(&ctxObj) if err != nil { fmt.Println(err) } } func (e *Executor) deregisterTask(taskID string) { entity := EntityId{} entity.ID = "Task." + taskID entity.Type = "Task" entity.IsPattern = false client := NGSI10Client{IoTBrokerURL: e.brokerURL} err := client.DeleteContext(&entity) if err != nil { fmt.Println(err) } } func (e *Executor) subscribeInputStream(agentPort string, streamType string, streamId string) (string, error) { subscription := SubscribeContextRequest{} newEntity := EntityId{} if len(streamId) > 0 { // for a specific context entity newEntity.IsPattern = false newEntity.Type = streamType newEntity.ID = streamId } else { // for all context entities with a specific type newEntity.Type = streamType newEntity.IsPattern = true } subscription.Entities = make([]EntityId, 0) subscription.Entities = append(subscription.Entities, newEntity) subscription.Reference = "http://" + e.workerCfg.InternalIP + ":" + agentPort fmt.Printf(" =========== issue the following subscription =========== %+v\r\n", subscription) client := NGSI10Client{IoTBrokerURL: e.brokerURL} sid, err := client.SubscribeContext(&subscription, true) if err != nil { fmt.Println(err) return "", err } else { return sid, nil } } func (e *Executor) unsubscribeInputStream(sid string) error { client := NGSI10Client{IoTBrokerURL: e.brokerURL} err := client.UnsubscribeContext(sid) if err != nil { fmt.Println(err) return err } else { return nil } } func (e *Executor) deleteOuputStream(eid *EntityId) error { client := NGSI10Client{IoTBrokerURL: e.brokerURL} err := client.DeleteContext(eid) if err != nil { fmt.Println(err) return err } else { return nil } } func (e *Executor) PauseTask(taskID string) { e.taskMap_lock.RLock() defer e.taskMap_lock.RUnlock() containerID := e.taskInstances[taskID].ContainerID err := e.client.PauseContainer(containerID) if err != nil { ERROR.Println(err) } } func (e *Executor) ResumeTask(taskID string) { e.taskMap_lock.RLock() defer e.taskMap_lock.RUnlock() containerID := e.taskInstances[taskID].ContainerID err := e.client.UnpauseContainer(containerID) if err != nil { ERROR.Println(err) } } func (e *Executor) TerminateTask(taskID string, paused bool) { INFO.Println("================== terminate task ID ============ ", taskID) e.taskMap_lock.Lock() if _, ok := e.taskInstances[taskID]; ok == false { e.taskMap_lock.Unlock() return } containerID := e.taskInstances[taskID].ContainerID e.taskMap_lock.Unlock() //stop the container first go e.client.StopContainer(containerID, 1) INFO.Printf(" task %s terminate from the container = %s\n", taskID, containerID) e.taskMap_lock.Lock() // issue unsubscribe for _, subID := range e.taskInstances[taskID].Subscriptions { INFO.Println("issued subscription: ", subID) err := e.unsubscribeInputStream(subID) if err != nil { ERROR.Println(err) } INFO.Printf(" subscriptions (%s) have been canceled\n", subID) } // delete the output streams of the terminated task for _, outStream := range e.taskInstances[taskID].OutputStreams { e.deleteOuputStream(&outStream) } delete(e.taskInstances, taskID) e.taskMap_lock.Unlock() if paused == true { // only update its status go e.updateTask(taskID, "paused") } else { // deregister this task entity go e.deregisterTask(taskID) } } func (e *Executor) terminateAllTasks() { var wg sync.WaitGroup wg.Add(len(e.taskInstances)) for taskID, _ := range e.taskInstances { go func(tID string) { defer wg.Done() e.TerminateTask(tID, false) }(taskID) } wg.Wait() } func (e *Executor) onAddInput(flow *FlowInfo) { e.taskMap_lock.Lock() defer e.taskMap_lock.Unlock() taskCtx := e.taskInstances[flow.TaskInstanceID] if taskCtx == nil { return } subID, err := e.subscribeInputStream(taskCtx.ListeningPort, flow.EntityType, flow.EntityID) if err == nil { fmt.Println("===========subscribe new input = ", flow, " , subID = ", subID) taskCtx.Subscriptions = append(taskCtx.Subscriptions, subID) taskCtx.EntityID2SubID[flow.EntityID] = subID } else { ERROR.Println(err) } } func (e *Executor) onRemoveInput(flow *FlowInfo) { e.taskMap_lock.Lock() defer e.taskMap_lock.Unlock() taskCtx := e.taskInstances[flow.TaskInstanceID] subID := taskCtx.EntityID2SubID[flow.EntityID] err := e.unsubscribeInputStream(subID) if err != nil { ERROR.Println(err) } for i, sid := range taskCtx.Subscriptions { if sid == subID { taskCtx.Subscriptions = append(taskCtx.Subscriptions[:i], taskCtx.Subscriptions[i+1:]...) break } } delete(taskCtx.EntityID2SubID, flow.EntityID) }
[ "\"DOCKER_HOST\"", "\"DOCKER_CERT_PATH\"" ]
[]
[ "DOCKER_HOST", "DOCKER_CERT_PATH" ]
[]
["DOCKER_HOST", "DOCKER_CERT_PATH"]
go
2
0
providers/dns/dyn/dyn.go
// Package dyn implements a DNS provider for solving the DNS-01 challenge // using Dyn Managed DNS. package dyn import ( "bytes" "encoding/json" "fmt" "net/http" "os" "strconv" "time" "github.com/nicky-dev/lego/acmev2" ) var dynBaseURL = "https://api.dynect.net/REST" type dynResponse struct { // One of 'success', 'failure', or 'incomplete' Status string `json:"status"` // The structure containing the actual results of the request Data json.RawMessage `json:"data"` // The ID of the job that was created in response to a request. JobID int `json:"job_id"` // A list of zero or more messages Messages json.RawMessage `json:"msgs"` } // DNSProvider is an implementation of the acmev2.ChallengeProvider interface that uses // Dyn's Managed DNS API to manage TXT records for a domain. type DNSProvider struct { customerName string userName string password string token string } // NewDNSProvider returns a DNSProvider instance configured for Dyn DNS. // Credentials must be passed in the environment variables: DYN_CUSTOMER_NAME, // DYN_USER_NAME and DYN_PASSWORD. func NewDNSProvider() (*DNSProvider, error) { customerName := os.Getenv("DYN_CUSTOMER_NAME") userName := os.Getenv("DYN_USER_NAME") password := os.Getenv("DYN_PASSWORD") return NewDNSProviderCredentials(customerName, userName, password) } // NewDNSProviderCredentials uses the supplied credentials to return a // DNSProvider instance configured for Dyn DNS. func NewDNSProviderCredentials(customerName, userName, password string) (*DNSProvider, error) { if customerName == "" || userName == "" || password == "" { return nil, fmt.Errorf("DynDNS credentials missing") } return &DNSProvider{ customerName: customerName, userName: userName, password: password, }, nil } func (d *DNSProvider) sendRequest(method, resource string, payload interface{}) (*dynResponse, error) { url := fmt.Sprintf("%s/%s", dynBaseURL, resource) body, err := json.Marshal(payload) if err != nil { return nil, err } req, err := http.NewRequest(method, url, bytes.NewReader(body)) if err != nil { return nil, err } req.Header.Set("Content-Type", "application/json") if len(d.token) > 0 { req.Header.Set("Auth-Token", d.token) } client := &http.Client{Timeout: time.Duration(10 * time.Second)} resp, err := client.Do(req) if err != nil { return nil, err } defer resp.Body.Close() if resp.StatusCode >= 500 { return nil, fmt.Errorf("Dyn API request failed with HTTP status code %d", resp.StatusCode) } var dynRes dynResponse err = json.NewDecoder(resp.Body).Decode(&dynRes) if err != nil { return nil, err } if resp.StatusCode >= 400 { return nil, fmt.Errorf("Dyn API request failed with HTTP status code %d: %s", resp.StatusCode, dynRes.Messages) } else if resp.StatusCode == 307 { // TODO add support for HTTP 307 response and long running jobs return nil, fmt.Errorf("Dyn API request returned HTTP 307. This is currently unsupported") } if dynRes.Status == "failure" { // TODO add better error handling return nil, fmt.Errorf("Dyn API request failed: %s", dynRes.Messages) } return &dynRes, nil } // Starts a new Dyn API Session. Authenticates using customerName, userName, // password and receives a token to be used in for subsequent requests. func (d *DNSProvider) login() error { type creds struct { Customer string `json:"customer_name"` User string `json:"user_name"` Pass string `json:"password"` } type session struct { Token string `json:"token"` Version string `json:"version"` } payload := &creds{Customer: d.customerName, User: d.userName, Pass: d.password} dynRes, err := d.sendRequest("POST", "Session", payload) if err != nil { return err } var s session err = json.Unmarshal(dynRes.Data, &s) if err != nil { return err } d.token = s.Token return nil } // Destroys Dyn Session func (d *DNSProvider) logout() error { if len(d.token) == 0 { // nothing to do return nil } url := fmt.Sprintf("%s/Session", dynBaseURL) req, err := http.NewRequest("DELETE", url, nil) if err != nil { return err } req.Header.Set("Content-Type", "application/json") req.Header.Set("Auth-Token", d.token) client := &http.Client{Timeout: time.Duration(10 * time.Second)} resp, err := client.Do(req) if err != nil { return err } resp.Body.Close() if resp.StatusCode != 200 { return fmt.Errorf("Dyn API request failed to delete session with HTTP status code %d", resp.StatusCode) } d.token = "" return nil } // Present creates a TXT record using the specified parameters func (d *DNSProvider) Present(domain, token, keyAuth string) error { fqdn, value, ttl := acmev2.DNS01Record(domain, keyAuth) authZone, err := acmev2.FindZoneByFqdn(fqdn, acmev2.RecursiveNameservers) if err != nil { return err } err = d.login() if err != nil { return err } data := map[string]interface{}{ "rdata": map[string]string{ "txtdata": value, }, "ttl": strconv.Itoa(ttl), } resource := fmt.Sprintf("TXTRecord/%s/%s/", authZone, fqdn) _, err = d.sendRequest("POST", resource, data) if err != nil { return err } err = d.publish(authZone, "Added TXT record for ACME dns-01 challenge using lego client") if err != nil { return err } err = d.logout() if err != nil { return err } return nil } func (d *DNSProvider) publish(zone, notes string) error { type publish struct { Publish bool `json:"publish"` Notes string `json:"notes"` } pub := &publish{Publish: true, Notes: notes} resource := fmt.Sprintf("Zone/%s/", zone) _, err := d.sendRequest("PUT", resource, pub) if err != nil { return err } return nil } // CleanUp removes the TXT record matching the specified parameters func (d *DNSProvider) CleanUp(domain, token, keyAuth string) error { fqdn, _, _ := acmev2.DNS01Record(domain, keyAuth) authZone, err := acmev2.FindZoneByFqdn(fqdn, acmev2.RecursiveNameservers) if err != nil { return err } err = d.login() if err != nil { return err } resource := fmt.Sprintf("TXTRecord/%s/%s/", authZone, fqdn) url := fmt.Sprintf("%s/%s", dynBaseURL, resource) req, err := http.NewRequest("DELETE", url, nil) if err != nil { return err } req.Header.Set("Content-Type", "application/json") req.Header.Set("Auth-Token", d.token) client := &http.Client{Timeout: time.Duration(10 * time.Second)} resp, err := client.Do(req) if err != nil { return err } resp.Body.Close() if resp.StatusCode != 200 { return fmt.Errorf("Dyn API request failed to delete TXT record HTTP status code %d", resp.StatusCode) } err = d.publish(authZone, "Removed TXT record for ACME dns-01 challenge using lego client") if err != nil { return err } err = d.logout() if err != nil { return err } return nil }
[ "\"DYN_CUSTOMER_NAME\"", "\"DYN_USER_NAME\"", "\"DYN_PASSWORD\"" ]
[]
[ "DYN_CUSTOMER_NAME", "DYN_USER_NAME", "DYN_PASSWORD" ]
[]
["DYN_CUSTOMER_NAME", "DYN_USER_NAME", "DYN_PASSWORD"]
go
3
0
mysql-practice/mysql-sqlalchemy-practice.py
import mysql.connector as mysql from mysqlx import Error from sqlalchemy import Column, String, ForeignKey, create_engine, Integer from sqlalchemy.orm import sessionmaker, relationship from sqlalchemy.ext.declarative import declarative_base import os # Since sqlalchemy does not initialize database, it just connects to it, # mysql-connector is used to initialize the database # create a database and connect to it ROOT_PASSWORD = os.environ.get('ROOT_PASSWORD') def connect(): try: return mysql.connect( host="127.0.0.1", port="3306", user="root", password=ROOT_PASSWORD , auth_plugin='mysql_native_password') except Error as e: print(e) household_db = connect() cursor = household_db.cursor() # create a database called household # cursor.execute("CREATE DATABASE household") # -----Working with sqlalchemy---------# engine = create_engine(f"mysql+mysqlconnector://root:{ROOT_PASSWORD}@localhost:3306/household", echo=True) # the parameter for the engine creation are: # type of the db, driver( since it is on the server), username, password, # host of the db, database name, echo=True to print the executed command # --Object relational mapping--# # create a base, since all models in the ORM are based on the declarative base Base = declarative_base() class Project(Base): __tablename__ = 'projects' __tableargs__ = {"schema": "household"} project_id = Column(Integer, primary_key=True) title = Column(String(length=50)) description = Column(String(length=50)) def __repr__(self): """ shows a printable representation of the object""" return f"<Project(title={self.title}, description={self.description})>" class Task(Base): __tablename__ = 'tasks' __tableargs__ = {"schema": "household"} task_id = Column(Integer, primary_key=True) project_id = Column(Integer, ForeignKey("projects.project_id"), nullable=False) description = Column(String(length=50)) project = relationship("Project") # Project here is the name of the class, so the relationship is between objects of classes( between two models) def __repr__(self): """ shows a printable representation of the object""" return f"<Task(description={self.description})>" Base.metadata.create_all(engine) # create a session to query the database session_maker = sessionmaker() session_maker.configure(bind=engine) session = session_maker() # adding a project in the session into the database learn_rdbms_project = Project(title="Learn RDBMS", description="learn relational database management systems") session.add(learn_rdbms_project) # commit project addition session.commit() # adding tasks for the first project tasks = [Task(project_id=learn_rdbms_project.project_id, description=" learn Mysql"), Task(project_id=learn_rdbms_project.project_id, description=" learn SQLite"), Task(project_id=learn_rdbms_project.project_id, description=" learn POSTGRESQL")] session.bulk_save_objects(tasks) session.commit() # Retrieve the committed project in this session project = session.query(Project).filter_by(title="Learn RDBMS").first() print(project) # to retrieve the committed tasks in this project project_tasks = session.query(Task).all() print(project_tasks)
[]
[]
[ "ROOT_PASSWORD" ]
[]
["ROOT_PASSWORD"]
python
1
0
starport/pkg/gocmd/gocmd.go
package gocmd import ( "context" "errors" "fmt" "os" "path/filepath" "strings" "github.com/tendermint/starport/starport/pkg/cmdrunner/exec" "github.com/tendermint/starport/starport/pkg/cmdrunner/step" "github.com/tendermint/starport/starport/pkg/goenv" ) const ( // CommandInstall represents go "install" command. CommandInstall = "install" // CommandBuild represents go "build" command. CommandBuild = "build" // CommandMod represents go "mod" command. CommandMod = "mod" // CommandModTidy represents go mod "tidy" command. CommandModTidy = "tidy" // CommandModVerify represents go mod "verify" command. CommandModVerify = "verify" ) const ( FlagMod = "-mod" FlagModValueReadOnly = "readonly" FlagLdflags = "-ldflags" FlagOut = "-o" ) const ( EnvGOOS = "GOOS" EnvGOARCH = "GOARCH" ) // Name returns the name of Go binary to use. func Name() string { custom := os.Getenv("GONAME") if custom != "" { return custom } return "go" } // ModTidy runs go mod tidy on path with options. func ModTidy(ctx context.Context, path string, options ...exec.Option) error { return exec.Exec(ctx, []string{Name(), CommandMod, CommandModTidy}, append(options, exec.StepOption(step.Workdir(path)))...) } // ModVerify runs go mod verify on path with options. func ModVerify(ctx context.Context, path string, options ...exec.Option) error { return exec.Exec(ctx, []string{Name(), CommandMod, CommandModVerify}, append(options, exec.StepOption(step.Workdir(path)))...) } // BuildPath runs go install on cmd folder with options. func BuildPath(ctx context.Context, output, binary, path string, flags []string, options ...exec.Option) error { binaryOutput, err := binaryPath(output, binary) if err != nil { return err } command := []string{ Name(), CommandBuild, FlagOut, binaryOutput, } command = append(command, flags...) command = append(command, ".") return exec.Exec(ctx, command, append(options, exec.StepOption(step.Workdir(path)))...) } // BuildAll runs go build ./... on path with options. func BuildAll(ctx context.Context, out, path string, flags []string, options ...exec.Option) error { command := []string{ Name(), CommandBuild, FlagOut, out, } command = append(command, flags...) command = append(command, "./...") return exec.Exec(ctx, command, append(options, exec.StepOption(step.Workdir(path)))...) } // InstallAll runs go install ./... on path with options. func InstallAll(ctx context.Context, path string, flags []string, options ...exec.Option) error { command := []string{ Name(), CommandInstall, } command = append(command, flags...) command = append(command, "./...") return exec.Exec(ctx, command, append(options, exec.StepOption(step.Workdir(path)))...) } // Ldflags returns a combined ldflags set from flags. func Ldflags(flags ...string) string { return strings.Join(flags, " ") } // BuildTarget builds a GOOS:GOARCH pair. func BuildTarget(goos, goarch string) string { return fmt.Sprintf("%s:%s", goos, goarch) } // ParseTarget parses GOOS:GOARCH pair. func ParseTarget(t string) (goos, goarch string, err error) { parsed := strings.Split(t, ":") if len(parsed) != 2 { return "", "", errors.New("invalid Go target, expected in GOOS:GOARCH format") } return parsed[0], parsed[1], nil } // PackageLiteral returns the string representation of package part of go get [package]. func PackageLiteral(path, version string) string { return fmt.Sprintf("%s@%s", path, version) } // binaryPath determines the path where binary will be located at. func binaryPath(output, binary string) (string, error) { if output != "" { outputAbs, err := filepath.Abs(output) if err != nil { return "", err } return filepath.Join(outputAbs, binary), nil } return filepath.Join(goenv.Bin(), binary), nil }
[ "\"GONAME\"" ]
[]
[ "GONAME" ]
[]
["GONAME"]
go
1
0
pkg/services/slack/slack_test.go
package slack_test import ( "errors" . "github.com/containrrr/shoutrrr/pkg/services/slack" "github.com/containrrr/shoutrrr/pkg/util" "github.com/jarcoal/httpmock" "log" "net/url" "os" "testing" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) func TestSlack(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Shoutrrr Slack Suite") } var ( service *Service envSlackURL *url.URL logger *log.Logger ) var _ = Describe("the slack service", func() { BeforeSuite(func() { service = &Service{} logger = log.New(GinkgoWriter, "Test", log.LstdFlags) envSlackURL, _ = url.Parse(os.Getenv("SHOUTRRR_SLACK_URL")) }) When("running integration tests", func() { It("should not error out", func() { if envSlackURL.String() == "" { return } serviceURL, _ := url.Parse(envSlackURL.String()) err := service.Initialize(serviceURL, util.TestLogger()) Expect(err).NotTo(HaveOccurred()) err = service.Send("This is an integration test message", nil) Expect(err).NotTo(HaveOccurred()) }) }) When("given a token with a malformed part", func() { It("should return an error if part A is not 9 letters", func() { slackURL, err := url.Parse("slack://lol@12345678/123456789/123456789123456789123456") Expect(err).NotTo(HaveOccurred()) expectErrorMessageGivenURL( TokenAMalformed, slackURL, ) }) It("should return an error if part B is not 9 letters", func() { slackURL, err := url.Parse("slack://lol@123456789/12345678/123456789123456789123456") Expect(err).NotTo(HaveOccurred()) expectErrorMessageGivenURL( TokenBMalformed, slackURL, ) }) It("should return an error if part C is not 24 letters", func() { slackURL, err := url.Parse("slack://123456789/123456789/12345678912345678912345") Expect(err).NotTo(HaveOccurred()) expectErrorMessageGivenURL( TokenCMalformed, slackURL, ) }) }) When("given a token missing a part", func() { It("should return an error if the missing part is A", func() { slackURL, err := url.Parse("slack://lol@/123456789/123456789123456789123456") Expect(err).NotTo(HaveOccurred()) expectErrorMessageGivenURL( TokenAMissing, slackURL, ) }) It("should return an error if the missing part is B", func() { slackURL, err := url.Parse("slack://lol@123456789//123456789") Expect(err).NotTo(HaveOccurred()) expectErrorMessageGivenURL( TokenBMissing, slackURL, ) }) It("should return an error if the missing part is C", func() { slackURL, err := url.Parse("slack://lol@123456789/123456789/") Expect(err).NotTo(HaveOccurred()) expectErrorMessageGivenURL( TokenCMissing, slackURL, ) }) }) Describe("the slack config", func() { When("parsing the configuration URL", func() { It("should be identical after de-/serialization", func() { testURL := "slack://testbot@AAAAAAAAA/BBBBBBBBB/123456789123456789123456?color=3f00fe&title=Test+title" url, err := url.Parse(testURL) Expect(err).NotTo(HaveOccurred(), "parsing") config := &Config{} err = config.SetURL(url) Expect(err).NotTo(HaveOccurred(), "verifying") outputURL := config.GetURL() Expect(outputURL.String()).To(Equal(testURL)) }) }) When("generating a config object", func() { It("should use the default botname if the argument list contains three strings", func() { slackURL, _ := url.Parse("slack://AAAAAAAAA/BBBBBBBBB/123456789123456789123456") config, configError := CreateConfigFromURL(slackURL) Expect(configError).NotTo(HaveOccurred()) Expect(config.BotName).To(BeEmpty()) }) It("should set the botname if the argument list is three", func() { slackURL, _ := url.Parse("slack://testbot@AAAAAAAAA/BBBBBBBBB/123456789123456789123456") config, configError := CreateConfigFromURL(slackURL) Expect(configError).NotTo(HaveOccurred()) Expect(config.BotName).To(Equal("testbot")) }) It("should return an error if the argument list is shorter than three", func() { slackURL, _ := url.Parse("slack://AAAAAAAA") _, configError := CreateConfigFromURL(slackURL) Expect(configError).To(HaveOccurred()) }) }) }) Describe("sending the payload", func() { var err error BeforeEach(func() { httpmock.Activate() }) AfterEach(func() { httpmock.DeactivateAndReset() }) It("should not report an error if the server accepts the payload", func() { serviceURL, _ := url.Parse("slack://testbot@AAAAAAAAA/BBBBBBBBB/123456789123456789123456") err = service.Initialize(serviceURL, logger) Expect(err).NotTo(HaveOccurred()) targetURL := "https://hooks.slack.com/services/AAAAAAAAA/BBBBBBBBB/123456789123456789123456" httpmock.RegisterResponder("POST", targetURL, httpmock.NewStringResponder(200, "")) err = service.Send("Message", nil) Expect(err).NotTo(HaveOccurred()) }) It("should not panic if an error occurs when sending the payload", func() { serviceURL, _ := url.Parse("slack://testbot@AAAAAAAAA/BBBBBBBBB/123456789123456789123456") err = service.Initialize(serviceURL, logger) Expect(err).NotTo(HaveOccurred()) targetURL := "https://hooks.slack.com/services/AAAAAAAAA/BBBBBBBBB/123456789123456789123456" httpmock.RegisterResponder("POST", targetURL, httpmock.NewErrorResponder(errors.New("dummy error"))) err = service.Send("Message", nil) Expect(err).To(HaveOccurred()) }) }) }) func expectErrorMessageGivenURL(msg ErrorMessage, slackURL *url.URL) { err := service.Initialize(slackURL, util.TestLogger()) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(Equal(string(msg))) }
[ "\"SHOUTRRR_SLACK_URL\"" ]
[]
[ "SHOUTRRR_SLACK_URL" ]
[]
["SHOUTRRR_SLACK_URL"]
go
1
0
src/yajsw/src/main/java/org/rzo/yajsw/os/ms/win/w32/OperatingSystemWindowsXP.java
/******************************************************************************* * Copyright 2015 rzorzorzo@users.sf.net * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ package org.rzo.yajsw.os.ms.win.w32; import java.util.Map; import org.apache.commons.configuration2.Configuration; import org.rzo.yajsw.os.ErrorHandler; import org.rzo.yajsw.os.FileManager; import org.rzo.yajsw.os.JavaHome; import org.rzo.yajsw.os.Keyboard; import org.rzo.yajsw.os.Mouse; import org.rzo.yajsw.os.OperatingSystem; import org.rzo.yajsw.os.ProcessManager; import org.rzo.yajsw.os.ServiceManager; import org.rzo.yajsw.os.SystemInformation; import com.sun.jna.platform.win32.Kernel32Util; // TODO: Auto-generated Javadoc /** * The Class OperatingSystemWindowsXP. */ public class OperatingSystemWindowsXP extends OperatingSystem { /** The _keyboard instance. */ static Keyboard _keyboardInstance; static Mouse _mouseInstance; /** The _process manager. */ static ProcessManager _processManager; static FileManager _fileManager; /** The _process manager. */ static ServiceManager _serviceManager; /** The _error handler. */ static ErrorHandler _errorHandler = new WindowsXPErrorHandler(); static SystemInformation _systemInformation = new WindowsXPSystemInformation(); /* * (non-Javadoc) * * @see org.rzo.yajsw.os.OperatingSystem#keyboardInstance() */ @Override public Keyboard keyboardInstance() { if (_keyboardInstance == null) _keyboardInstance = WindowsXPKeyboard.instance(); return _keyboardInstance; } public Mouse mouseInstance() { if (_mouseInstance == null) _mouseInstance = WindowsXPMouse.instance(); return _mouseInstance; } /* * (non-Javadoc) * * @see org.rzo.yajsw.os.OperatingSystem#processManagerInstance() */ @Override public ProcessManager processManagerInstance() { if (_processManager == null) _processManager = WindowsXPProcessManager.instance(); return _processManager; } public FileManager fileManagerInstance() { if (_fileManager == null) _fileManager = WindowsXPFileManager.instance(); return _fileManager; } /* * (non-Javadoc) * * @see org.rzo.yajsw.os.OperatingSystem#errorHandlerInstance() */ @Override public ErrorHandler errorHandlerInstance() { // TODO Auto-generated method stub return _errorHandler; } public JavaHome getJavaHome(Configuration config) { return new WindowsJavaHome(config); } @Override public ServiceManager serviceManagerInstance() { if (_serviceManager == null) _serviceManager = WindowsXPServiceManager.instance(); return _serviceManager; } @Override public SystemInformation systemInformation() { return _systemInformation; } @Override public boolean setWorkingDir(String name) { return new WindowsXPProcess().changeWorkingDir(name); } @Override public Object getServiceFailureActions(Configuration config) { return WindowsXPService.getServiceFailureActions(config); } @Override public long getUptime() { return new WindowsXPProcess().getUptime(); } @Override public void reboot() { try { new WindowsXPProcess().reboot(); } catch (Exception ex) { ex.printStackTrace(); } } public Map<String, String> getOSEnv() { try { Map<String, String> result = Kernel32Util.getEnvironmentVariables(); return result; } catch (Error ex) { System.out.println("warning: "+ex.getMessage()); return System.getenv(); } } }
[]
[]
[]
[]
[]
java
0
0
rest/_examples/zones.go
package main import ( "encoding/json" "fmt" "log" "net/http" "os" "time" api "gopkg.in/nkpetko/ns1-go-dev.v2/rest" "gopkg.in/nkpetko/ns1-go-dev.v2/rest/model/dns" "gopkg.in/nkpetko/ns1-go-dev.v2/rest/model/filter" ) var client *api.Client // Helper that initializes rest api client from environment variable. func init() { k := os.Getenv("NS1_APIKEY") if k == "" { fmt.Println("NS1_APIKEY environment variable is not set, giving up") } httpClient := &http.Client{Timeout: time.Second * 10} // Adds logging to each http request. doer := api.Decorate(httpClient, api.Logging(log.New(os.Stdout, "", log.LstdFlags))) client = api.NewClient(doer, api.SetAPIKey(k)) } func main() { zones, _, err := client.Zones.List() if err != nil { log.Fatal(err) } for _, z := range zones { b, _ := json.MarshalIndent(z, "", " ") fmt.Println(string(b)) } // Construct/Create a zone. domain := "myzonetest.com" z := dns.NewZone(domain) z.NxTTL = 3600 _, err = client.Zones.Create(z) if err != nil { // Ignore if zone already exists if err != api.ErrZoneExists { log.Fatal(err) } else { log.Printf("Create %s: %s \n", z, err) } } // Update the zone. z.Retry = 5401 _, err = client.Zones.Update(z) if err != nil { log.Fatal(err) } // Add an A record with a single static answer. orchidRec := dns.NewRecord(domain, "orchid", "A") orchidRec.AddAnswer(dns.NewAv4Answer("2.2.2.2")) _, err = client.Records.Create(orchidRec) if err != nil { switch { case err == api.ErrRecordExists: // Ignore if record already exists log.Printf("Create %s: %s \n", orchidRec, err) case err == api.ErrZoneMissing: log.Printf("Create %s: %s \n", orchidRec, err) return default: log.Fatal(err) } } orchidRec.TTL = 333 _, err = client.Records.Update(orchidRec) if err != nil { switch { case err == api.ErrRecordExists: // Ignore if record already exists log.Printf("Update %s: %s \n", orchidRec, err) case err == api.ErrZoneMissing: log.Printf("Update %s: %s \n", orchidRec, err) return default: log.Fatal(err) } } fmt.Printf("%#v \n", orchidRec) bRec, _ := json.MarshalIndent(orchidRec, "", " ") fmt.Println(string(bRec)) // Add an A record with two static answers. honeyRec := dns.NewRecord(domain, "honey", "A") honeyRec.Answers = []*dns.Answer{ dns.NewAv4Answer("1.2.3.4"), dns.NewAv4Answer("5.6.7.8"), } _, err = client.Records.Create(honeyRec) if err != nil { // Ignore if record already exists if err != api.ErrRecordExists { log.Fatal(err) } else { log.Printf("Create %s: %s \n", honeyRec, err) } } // Add a cname potRec := dns.NewRecord(domain, "pot", "CNAME") potRec.AddAnswer(dns.NewCNAMEAnswer("honey.test.com")) _, err = client.Records.Create(potRec) if err != nil { // Ignore if record already exists if err != api.ErrRecordExists { log.Fatal(err) } else { log.Printf("Create %s: %s \n", potRec, err) } } // Add a MX with two answers, priority 5 and 10 mailRec := dns.NewRecord(domain, "mail", "MX") mailRec.Answers = []*dns.Answer{ dns.NewMXAnswer(5, "mail1.test.com"), dns.NewMXAnswer(10, "mail2.test.com"), } _, err = client.Records.Create(mailRec) if err != nil { // Ignore if record already exists if err != api.ErrRecordExists { log.Fatal(err) } else { log.Printf("Create %s: %s \n", mailRec, err) } } // Add a AAAA, specify ttl of 300 seconds aaaaRec := dns.NewRecord(domain, "honey6", "AAAA") aaaaRec.TTL = 300 aaaaRec.AddAnswer(dns.NewAv6Answer("2607:f8b0:4006:806::1010")) _, err = client.Records.Create(aaaaRec) if err != nil { // Ignore if record already exists if err != api.ErrRecordExists { log.Fatal(err) } else { log.Printf("Create %s: %s \n", aaaaRec, err) } } // Add an A record using full answer format to specify 2 answers with meta data. // ensure edns-client-subnet is in use, and add two filters: geotarget_country, // and select_first_n, which has a filter config option N set to 1. bumbleRec := dns.NewRecord(domain, "bumble", "A") usAns := dns.NewAv4Answer("1.1.1.1") usAns.Meta.Up = false usAns.Meta.Country = []string{"US"} fraAns := dns.NewAv4Answer("1.1.1.1") fraAns.Meta.Up = true fraAns.Meta.Country = []string{"FR"} bumbleRec.AddAnswer(usAns) bumbleRec.AddAnswer(fraAns) geotarget := filter.NewGeotargetCountry() selFirstN := filter.NewSelFirstN(1) bumbleRec.AddFilter(geotarget) bumbleRec.AddFilter(selFirstN) _, err = client.Records.Create(bumbleRec) if err != nil { // Ignore if record already exists if err != api.ErrRecordExists { log.Fatal(err) } else { log.Printf("Create %s: %s \n", bumbleRec, err) } } // _, err = client.Zones.Delete(domain) // if err != nil { // // Ignore if zone doesnt yet exist // if err != api.ErrZoneMissing { // log.Fatal(err) // } else { // log.Printf("Delete %s: %s \n", z, err) // } // } }
[ "\"NS1_APIKEY\"" ]
[]
[ "NS1_APIKEY" ]
[]
["NS1_APIKEY"]
go
1
0
ml-agents/tests/yamato/yamato_utils.py
import os import shutil import subprocess import yaml from typing import List, Optional def get_unity_executable_path(): UNITY_VERSION = os.environ["UNITY_VERSION"] BOKKEN_UNITY = f"/Users/bokken/{UNITY_VERSION}/Unity.app/Contents/MacOS/Unity" HUB_UNITY = ( f"/Applications/Unity/Hub/Editor/{UNITY_VERSION}/Unity.app/Contents/MacOS/Unity" ) if os.path.exists(BOKKEN_UNITY): return BOKKEN_UNITY if os.path.exists(HUB_UNITY): return HUB_UNITY raise FileNotFoundError("Can't find bokken or hub executables") def get_base_path(): # We might need to do some more work here if the working directory ever changes # E.g. take the full path and back out the main module main. # But for now, this should work return os.getcwd() def get_base_output_path(): """" Returns the artifact folder to use for yamato jobs. """ return os.path.join(get_base_path(), "artifacts") def run_standalone_build( base_path: str, verbose: bool = False, output_path: str = None, scene_path: str = None, log_output_path: str = f"{get_base_output_path()}/standalone_build.txt", ) -> int: """ Run BuildStandalonePlayerOSX test to produce a player. The location defaults to artifacts/standalone_build/testPlayer. """ unity_exe = get_unity_executable_path() print(f"Running BuildStandalonePlayerOSX via {unity_exe}") test_args = [ unity_exe, "-projectPath", f"{base_path}/Project", "-batchmode", "-executeMethod", "Unity.MLAgents.StandaloneBuildTest.BuildStandalonePlayerOSX", ] os.makedirs(os.path.dirname(log_output_path), exist_ok=True) subprocess.run(["touch", log_output_path]) test_args += ["-logfile", log_output_path] if output_path is not None: output_path = os.path.join(get_base_output_path(), output_path) test_args += ["--mlagents-build-output-path", output_path] os.makedirs(os.path.dirname(output_path), exist_ok=True) if scene_path is not None: test_args += ["--mlagents-build-scene-path", scene_path] print(f"{' '.join(test_args)} ...") timeout = 30 * 60 # 30 minutes, just in case res: subprocess.CompletedProcess = subprocess.run(test_args, timeout=timeout) # Copy the default build name into the artifacts folder. if output_path is None and res.returncode == 0: shutil.move( os.path.join(base_path, "Project", "testPlayer.app"), os.path.join(get_base_output_path(), "testPlayer.app"), ) # Print if we fail or want verbosity. if verbose or res.returncode != 0: subprocess.run(["cat", log_output_path]) return res.returncode def init_venv( mlagents_python_version: str = None, extra_packages: Optional[List[str]] = None ) -> str: """ Set up the virtual environment, and return the venv path. :param mlagents_python_version: The version of mlagents python packcage to install. If None, will do a local install, otherwise will install from pypi :return: """ # Use a different venv path for different versions venv_path = "venv" if mlagents_python_version: venv_path += "_" + mlagents_python_version # Set up the venv and install mlagents subprocess.check_call(f"python -m venv {venv_path}", shell=True) pip_commands = [ "--upgrade pip", "--upgrade setuptools", # TODO build these and publish to internal pypi "~/tensorflow_pkg/tensorflow-2.0.0-cp37-cp37m-macosx_10_14_x86_64.whl", ] if mlagents_python_version: # install from pypi pip_commands += [ f"mlagents=={mlagents_python_version}", f"gym-unity=={mlagents_python_version}", ] else: # Local install pip_commands += ["-e ./ml-agents-envs", "-e ./ml-agents", "-e ./gym-unity"] if extra_packages: pip_commands += extra_packages for cmd in pip_commands: subprocess.check_call( f"source {venv_path}/bin/activate; python -m pip install -q {cmd}", shell=True, ) return venv_path def checkout_csharp_version(csharp_version): """ Checks out the specific git revision (usually a tag) for the C# package and Project. If csharp_version is None, no changes are made. :param csharp_version: :return: """ if csharp_version is None: return csharp_tag = f"com.unity.ml-agents_{csharp_version}" csharp_dirs = ["com.unity.ml-agents", "Project"] for csharp_dir in csharp_dirs: subprocess.check_call(f"rm -rf {csharp_dir}", shell=True) subprocess.check_call(f"git checkout {csharp_tag} -- {csharp_dir}", shell=True) def undo_git_checkout(): """ Clean up the git working directory. """ subprocess.check_call("git reset HEAD .", shell=True) subprocess.check_call("git checkout -- .", shell=True) # Ensure the cache isn't polluted with old compiled assemblies. subprocess.check_call(f"rm -rf Project/Library", shell=True) def override_config_file(src_path, dest_path, **kwargs): """ Override settings in a trainer config file. For example, override_config_file(src_path, dest_path, max_steps=42) will copy the config file at src_path to dest_path, but override the max_steps field to 42 for all brains. """ with open(src_path) as f: configs = yaml.safe_load(f) behavior_configs = configs["behaviors"] for config in behavior_configs.values(): config.update(**kwargs) with open(dest_path, "w") as f: yaml.dump(configs, f) def override_legacy_config_file(python_version, src_path, dest_path, **kwargs): """ Override settings in a trainer config file, using an old version of the src_path. For example, override_config_file("0.16.0", src_path, dest_path, max_steps=42) will sync the file at src_path from version 0.16.0, copy it to dest_path, and override the max_steps field to 42 for all brains. """ # Sync the old version of the file python_tag = f"python-packages_{python_version}" subprocess.check_call(f"git checkout {python_tag} -- {src_path}", shell=True) with open(src_path) as f: configs = yaml.safe_load(f) for config in configs.values(): config.update(**kwargs) with open(dest_path, "w") as f: yaml.dump(configs, f)
[]
[]
[ "UNITY_VERSION" ]
[]
["UNITY_VERSION"]
python
1
0
manage.py
#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "search_engine.settings") from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)
[]
[]
[]
[]
[]
python
0
0
datalad/core/distributed/tests/test_push.py
# -*- coding: utf-8 -*- # ex: set sts=4 ts=4 sw=4 et: # ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # # See COPYING file distributed along with the datalad package for the # copyright and license terms. # # ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Test push """ import logging import os import pytest from datalad.core.distributed.clone import Clone from datalad.core.distributed.push import Push from datalad.distribution.dataset import Dataset from datalad.support.annexrepo import AnnexRepo from datalad.support.exceptions import ( IncompleteResultsError, InsufficientArgumentsError, ) from datalad.support.gitrepo import GitRepo from datalad.support.network import get_local_file_url from datalad.tests.utils_pytest import ( DEFAULT_BRANCH, DEFAULT_REMOTE, SkipTest, assert_false, assert_in, assert_in_results, assert_not_in, assert_not_in_results, assert_raises, assert_repo_status, assert_result_count, assert_status, eq_, known_failure_githubci_osx, known_failure_githubci_win, neq_, ok_, ok_file_has_content, serve_path_via_http, skip_if_adjusted_branch, skip_if_on_windows, skip_ssh, slow, swallow_logs, with_tempfile, with_tree, ) from datalad.utils import ( Path, chpwd, path_startswith, swallow_outputs, ) DEFAULT_REFSPEC = "refs/heads/{0}:refs/heads/{0}".format(DEFAULT_BRANCH) @with_tempfile(mkdir=True) @with_tempfile(mkdir=True) def test_invalid_call(origin=None, tdir=None): ds = Dataset(origin).create() # no target assert_status('impossible', ds.push(on_failure='ignore')) # no dataset with chpwd(tdir): assert_raises(InsufficientArgumentsError, Push.__call__) # dataset, but outside path assert_raises(IncompleteResultsError, ds.push, path=tdir) # given a path constraint that doesn't match anything, will cause # nothing to be done assert_status('notneeded', ds.push(path=ds.pathobj / 'nothere')) # unavailable subdataset dummy_sub = ds.create('sub') dummy_sub.drop(what='all', reckless='kill', recursive=True) assert_in('sub', ds.subdatasets(state='absent', result_xfm='relpaths')) # now an explicit call to publish the unavailable subdataset assert_raises(ValueError, ds.push, 'sub') target = mk_push_target(ds, 'target', tdir, annex=True) # revision that doesn't exist assert_raises( ValueError, ds.push, to='target', since='09320957509720437523') # If a publish() user accidentally passes since='', which push() spells as # since='^', the call is aborted. assert_raises( ValueError, ds.push, to='target', since='') def mk_push_target(ds, name, path, annex=True, bare=True): # life could be simple, but nothing is simple on windows #src.create_sibling(dst_path, name='target') if annex: if bare: target = GitRepo(path=path, bare=True, create=True) # cannot use call_annex() target.call_git(['annex', 'init']) else: target = AnnexRepo(path, init=True, create=True) if not target.is_managed_branch(): # for managed branches we need more fireworks->below target.config.set( 'receive.denyCurrentBranch', 'updateInstead', scope='local') else: target = GitRepo(path=path, bare=bare, create=True) ds.siblings('add', name=name, url=path, result_renderer='disabled') if annex and not bare and target.is_managed_branch(): # maximum complication # the target repo already has a commit that is unrelated # to the source repo, because it has built a reference # commit for the managed branch. # the only sane approach is to let git-annex establish a shared # history if AnnexRepo.git_annex_version > "8.20210631": ds.repo.call_annex(['sync', '--allow-unrelated-histories']) else: ds.repo.call_annex(['sync']) ds.repo.call_annex(['sync', '--cleanup']) return target @with_tempfile(mkdir=True) @with_tempfile(mkdir=True) def check_push(annex, src_path, dst_path): # prepare src src = Dataset(src_path).create(annex=annex) src_repo = src.repo # push should not add branches to the local dataset orig_branches = src_repo.get_branches() assert_not_in('synced/' + DEFAULT_BRANCH, orig_branches) res = src.push(on_failure='ignore') assert_result_count(res, 1) assert_in_results( res, status='impossible', message='No push target given, and none could be auto-detected, ' 'please specify via --to') eq_(orig_branches, src_repo.get_branches()) # target sibling target = mk_push_target(src, 'target', dst_path, annex=annex) eq_(orig_branches, src_repo.get_branches()) res = src.push(to="target") eq_(orig_branches, src_repo.get_branches()) assert_result_count(res, 2 if annex else 1) assert_in_results( res, action='publish', status='ok', target='target', refspec=DEFAULT_REFSPEC, operations=['new-branch']) assert_repo_status(src_repo, annex=annex) eq_(list(target.get_branch_commits_(DEFAULT_BRANCH)), list(src_repo.get_branch_commits_(DEFAULT_BRANCH))) # configure a default merge/upstream target src.config.set('branch.{}.remote'.format(DEFAULT_BRANCH), 'target', scope='local') src.config.set('branch.{}.merge'.format(DEFAULT_BRANCH), DEFAULT_BRANCH, scope='local') # don't fail when doing it again, no explicit target specification # needed anymore res = src.push() eq_(orig_branches, src_repo.get_branches()) # and nothing is pushed assert_status('notneeded', res) assert_repo_status(src_repo, annex=annex) eq_(list(target.get_branch_commits_(DEFAULT_BRANCH)), list(src_repo.get_branch_commits_(DEFAULT_BRANCH))) # some modification: (src.pathobj / 'test_mod_file').write_text("Some additional stuff.") src.save(to_git=True, message="Modified.") (src.pathobj / 'test_mod_annex_file').write_text("Heavy stuff.") src.save(to_git=not annex, message="Modified again.") assert_repo_status(src_repo, annex=annex) # we could say since='HEAD~2' to make things fast, or we are lazy # and say since='^' to indicate the state of the tracking remote # which is the same, because we made to commits since the last push. res = src.push(to='target', since="^", jobs=2) assert_in_results( res, action='publish', status='ok', target='target', refspec=DEFAULT_REFSPEC, # we get to see what happened operations=['fast-forward']) if annex: # we got to see the copy result for the annexed files assert_in_results( res, action='copy', status='ok', path=str(src.pathobj / 'test_mod_annex_file')) # we published, so we can drop and reobtain ok_(src_repo.file_has_content('test_mod_annex_file')) src_repo.drop('test_mod_annex_file') ok_(not src_repo.file_has_content('test_mod_annex_file')) src_repo.get('test_mod_annex_file') ok_(src_repo.file_has_content('test_mod_annex_file')) ok_file_has_content( src_repo.pathobj / 'test_mod_annex_file', 'Heavy stuff.') eq_(list(target.get_branch_commits_(DEFAULT_BRANCH)), list(src_repo.get_branch_commits_(DEFAULT_BRANCH))) if not (annex and src_repo.is_managed_branch()): # the following doesn't make sense in managed branches, because # a commit that could be amended is no longer the last commit # of a branch after a sync has happened (which did happen # during the last push above # amend and change commit msg in order to test for force push: src_repo.commit("amended", options=['--amend']) # push should be rejected (non-fast-forward): res = src.push(to='target', since='HEAD~2', on_failure='ignore') # fails before even touching the annex branch assert_in_results( res, action='publish', status='error', target='target', refspec=DEFAULT_REFSPEC, operations=['rejected', 'error']) # push with force=True works: res = src.push(to='target', since='HEAD~2', force='gitpush') assert_in_results( res, action='publish', status='ok', target='target', refspec=DEFAULT_REFSPEC, operations=['forced-update']) eq_(list(target.get_branch_commits_(DEFAULT_BRANCH)), list(src_repo.get_branch_commits_(DEFAULT_BRANCH))) # we do not have more branches than we had in the beginning # in particular no 'synced/<default branch>' eq_(orig_branches, src_repo.get_branches()) @pytest.mark.parametrize("annex", [False, True]) def test_push(annex): check_push(annex) def check_datasets_order(res, order='bottom-up'): """Check that all type=dataset records not violating the expected order it is somewhat weak test, i.e. records could be produced so we do not detect that order is violated, e.g. a/b c/d would satisfy either although they might be neither depth nor breadth wise. But this test would allow to catch obvious violations like a, a/b, a """ prev = None for r in res: if r.get('type') != 'dataset': continue if prev and r['path'] != prev: if order == 'bottom-up': assert_false(path_startswith(r['path'], prev)) elif order == 'top-down': assert_false(path_startswith(prev, r['path'])) else: raise ValueError(order) prev = r['path'] @slow # 33sec on Yarik's laptop @with_tempfile @with_tempfile(mkdir=True) @with_tempfile(mkdir=True) @with_tempfile(mkdir=True, suffix='sub') @with_tempfile(mkdir=True, suffix='subnoannex') @with_tempfile(mkdir=True, suffix='subsub') def test_push_recursive( origin_path=None, src_path=None, dst_top=None, dst_sub=None, dst_subnoannex=None, dst_subsub=None): # dataset with two submodules and one subsubmodule origin = Dataset(origin_path).create() origin_subm1 = origin.create('sub m') origin_subm1.create('subsub m') origin.create('subm noannex', annex=False) origin.save() assert_repo_status(origin.path) # prepare src as a fresh clone with all subdatasets checkout out recursively # running on a clone should make the test scenario more different than # test_push(), even for the pieces that should be identical top = Clone.__call__(source=origin.path, path=src_path) subs = top.get('.', recursive=True, get_data=False, result_xfm='datasets') # order for '.' should not be relied upon, so sort by path sub, subsub, subnoannex = sorted(subs, key=lambda ds: ds.path) target_top = mk_push_target(top, 'target', dst_top, annex=True) # subdatasets have no remote yet, so recursive publishing should fail: res = top.push(to="target", recursive=True, on_failure='ignore') check_datasets_order(res) assert_in_results( res, path=top.path, type='dataset', refspec=DEFAULT_REFSPEC, operations=['new-branch'], action='publish', status='ok', target='target') for d in (sub, subsub, subnoannex): assert_in_results( res, status='error', type='dataset', path=d.path, message=("Unknown target sibling '%s'.", 'target')) # now fix that and set up targets for the submodules target_sub = mk_push_target(sub, 'target', dst_sub, annex=True) target_subnoannex = mk_push_target( subnoannex, 'target', dst_subnoannex, annex=False) target_subsub = mk_push_target(subsub, 'target', dst_subsub, annex=True) # and same push call as above res = top.push(to="target", recursive=True) check_datasets_order(res) # topds skipped assert_in_results( res, path=top.path, type='dataset', action='publish', status='notneeded', target='target') # the rest pushed for d in (sub, subsub, subnoannex): assert_in_results( res, status='ok', type='dataset', path=d.path, refspec=DEFAULT_REFSPEC) # all corresponding branches match across all datasets for s, d in zip((top, sub, subnoannex, subsub), (target_top, target_sub, target_subnoannex, target_subsub)): eq_(list(s.repo.get_branch_commits_(DEFAULT_BRANCH)), list(d.get_branch_commits_(DEFAULT_BRANCH))) if s != subnoannex: eq_(list(s.repo.get_branch_commits_("git-annex")), list(d.get_branch_commits_("git-annex"))) # rerun should not result in further pushes of the default branch res = top.push(to="target", recursive=True) check_datasets_order(res) assert_not_in_results( res, status='ok', refspec=DEFAULT_REFSPEC) assert_in_results( res, status='notneeded', refspec=DEFAULT_REFSPEC) # now annex a file in subsub test_copy_file = subsub.pathobj / 'test_mod_annex_file' test_copy_file.write_text("Heavy stuff.") # save all the way up assert_status( ('ok', 'notneeded'), top.save(message='subsub got something', recursive=True)) assert_repo_status(top.path) # publish straight up, should be smart by default res = top.push(to="target", recursive=True) check_datasets_order(res) # we see 3 out of 4 datasets pushed (sub noannex was left unchanged) for d in (top, sub, subsub): assert_in_results( res, status='ok', type='dataset', path=d.path, refspec=DEFAULT_REFSPEC) # file content copied too assert_in_results( res, action='copy', status='ok', path=str(test_copy_file)) # verify it is accessible, drop and bring back assert_status('ok', top.drop(str(test_copy_file))) ok_(not subsub.repo.file_has_content('test_mod_annex_file')) top.get(test_copy_file) ok_file_has_content(test_copy_file, 'Heavy stuff.') # make two modification (sub.pathobj / 'test_mod_annex_file').write_text('annex') (subnoannex.pathobj / 'test_mod_file').write_text('git') # save separately top.save(sub.pathobj, message='annexadd', recursive=True) top.save(subnoannex.pathobj, message='gitadd', recursive=True) # now only publish the latter one res = top.push(to="target", since=DEFAULT_BRANCH + '~1', recursive=True) # nothing copied, no reports on the other modification assert_not_in_results(res, action='copy') assert_not_in_results(res, path=sub.path) for d in (top, subnoannex): assert_in_results( res, status='ok', type='dataset', path=d.path, refspec=DEFAULT_REFSPEC) # an unconditional push should now pick up the remaining changes res = top.push(to="target", recursive=True) assert_in_results( res, action='copy', status='ok', path=str(sub.pathobj / 'test_mod_annex_file')) assert_in_results( res, status='ok', type='dataset', path=sub.path, refspec=DEFAULT_REFSPEC) for d in (top, subnoannex, subsub): assert_in_results( res, status='notneeded', type='dataset', path=d.path, refspec=DEFAULT_REFSPEC) # if noannex target gets some annex, we still should not fail to push target_subnoannex.call_git(['annex', 'init']) # just to ensure that we do need something to push (subnoannex.pathobj / "newfile").write_text("content") subnoannex.save() res = subnoannex.push(to="target") assert_in_results(res, status='ok', type='dataset') @slow # 12sec on Yarik's laptop @with_tempfile(mkdir=True) @with_tempfile(mkdir=True) @with_tempfile(mkdir=True) @with_tempfile(mkdir=True) def test_push_subds_no_recursion(src_path=None, dst_top=None, dst_sub=None, dst_subsub=None): # dataset with one submodule and one subsubmodule top = Dataset(src_path).create() sub = top.create('sub m') test_file = sub.pathobj / 'subdir' / 'test_file' test_file.parent.mkdir() test_file.write_text('some') subsub = sub.create(sub.pathobj / 'subdir' / 'subsub m') top.save(recursive=True) assert_repo_status(top.path) target_top = mk_push_target(top, 'target', dst_top, annex=True) target_sub = mk_push_target(sub, 'target', dst_sub, annex=True) target_subsub = mk_push_target(subsub, 'target', dst_subsub, annex=True) # now publish, but NO recursion, instead give the parent dir of # both a subdataset and a file in the middle subdataset res = top.push( to='target', # give relative to top dataset to elevate the difficulty a little path=str(test_file.relative_to(top.pathobj).parent)) assert_status('ok', res) assert_in_results(res, action='publish', type='dataset', path=top.path) assert_in_results(res, action='publish', type='dataset', path=sub.path) assert_in_results(res, action='copy', type='file', path=str(test_file)) # the lowest-level subdataset isn't touched assert_not_in_results( res, action='publish', type='dataset', path=subsub.path) @with_tempfile(mkdir=True) @with_tempfile(mkdir=True) def test_force_checkdatapresent(srcpath=None, dstpath=None): src = Dataset(srcpath).create() target = mk_push_target(src, 'target', dstpath, annex=True, bare=True) (src.pathobj / 'test_mod_annex_file').write_text("Heavy stuff.") src.save(to_git=False, message="New annex file") assert_repo_status(src.path, annex=True) whereis_prior = src.repo.whereis(files=['test_mod_annex_file'])[0] res = src.push(to='target', data='nothing') # nothing reported to be copied assert_not_in_results(res, action='copy') # we got the git-push nevertheless eq_(src.repo.get_hexsha(DEFAULT_BRANCH), target.get_hexsha(DEFAULT_BRANCH)) # nothing moved eq_(whereis_prior, src.repo.whereis(files=['test_mod_annex_file'])[0]) # now a push without forced no-transfer # we do not give since, so the non-transfered file is picked up # and transferred res = src.push(to='target', force=None) # no branch change, done before assert_in_results(res, action='publish', status='notneeded', refspec=DEFAULT_REFSPEC) # but availability update assert_in_results(res, action='publish', status='ok', refspec='refs/heads/git-annex:refs/heads/git-annex') assert_in_results(res, status='ok', path=str(src.pathobj / 'test_mod_annex_file'), action='copy') # whereis info reflects the change ok_(len(whereis_prior) < len( src.repo.whereis(files=['test_mod_annex_file'])[0])) # do it yet again will do nothing, because all is up-to-date assert_status('notneeded', src.push(to='target', force=None)) # an explicit reference point doesn't change that assert_status('notneeded', src.push(to='target', force=None, since='HEAD~1')) # now force data transfer res = src.push(to='target', force='checkdatapresent') # no branch change, done before assert_in_results(res, action='publish', status='notneeded', refspec=DEFAULT_REFSPEC) # no availability update assert_in_results(res, action='publish', status='notneeded', refspec='refs/heads/git-annex:refs/heads/git-annex') # but data transfer assert_in_results(res, status='ok', path=str(src.pathobj / 'test_mod_annex_file'), action='copy') # force data transfer, but data isn't available src.repo.drop('test_mod_annex_file') res = src.push(to='target', path='.', force='checkdatapresent', on_failure='ignore') assert_in_results(res, status='impossible', path=str(src.pathobj / 'test_mod_annex_file'), action='copy', message='Slated for transport, but no content present') @known_failure_githubci_win @with_tempfile(mkdir=True) @with_tree(tree={'ria-layout-version': '1\n'}) def test_ria_push(srcpath=None, dstpath=None): # complex test involving a git remote, a special remote, and a # publication dependency src = Dataset(srcpath).create() testfile = src.pathobj / 'test_mod_annex_file' testfile.write_text("Heavy stuff.") src.save() assert_status( 'ok', src.create_sibling_ria( "ria+{}".format(get_local_file_url(dstpath, compatibility='git')), "datastore", new_store_ok=True)) res = src.push(to='datastore') assert_in_results( res, action='publish', target='datastore', status='ok', refspec=DEFAULT_REFSPEC) assert_in_results( res, action='publish', target='datastore', status='ok', refspec='refs/heads/git-annex:refs/heads/git-annex') assert_in_results( res, action='copy', target='datastore-storage', status='ok', path=str(testfile)) @with_tempfile(mkdir=True) @with_tempfile(mkdir=True) def test_gh1426(origin_path=None, target_path=None): # set up a pair of repos, one the published copy of the other origin = Dataset(origin_path).create() target = mk_push_target( origin, 'target', target_path, annex=True, bare=False) origin.push(to='target') assert_repo_status(origin.path) assert_repo_status(target.path) eq_(origin.repo.get_hexsha(DEFAULT_BRANCH), target.get_hexsha(DEFAULT_BRANCH)) # gist of #1426 is that a newly added subdataset does not cause the # superdataset to get published origin.create('sub') assert_repo_status(origin.path) neq_(origin.repo.get_hexsha(DEFAULT_BRANCH), target.get_hexsha(DEFAULT_BRANCH)) # now push res = origin.push(to='target') assert_result_count( res, 1, status='ok', type='dataset', path=origin.path, action='publish', target='target', operations=['fast-forward']) eq_(origin.repo.get_hexsha(DEFAULT_BRANCH), target.get_hexsha(DEFAULT_BRANCH)) @skip_if_adjusted_branch # gh-4075 @skip_if_on_windows # create_sibling incompatible with win servers @skip_ssh @with_tree(tree={'1': '123'}) @with_tempfile(mkdir=True) @serve_path_via_http def test_publish_target_url(src=None, desttop=None, desturl=None): # https://github.com/datalad/datalad/issues/1762 ds = Dataset(src).create(force=True) ds.save('1') ds.create_sibling('ssh://datalad-test:%s/subdir' % desttop, name='target', target_url=desturl + 'subdir/.git') results = ds.push(to='target') assert results ok_file_has_content(Path(desttop, 'subdir', '1'), '123') @with_tempfile(mkdir=True) @with_tempfile() @with_tempfile() def test_gh1763(src=None, target1=None, target2=None): # this test is very similar to test_publish_depends, but more # comprehensible, and directly tests issue 1763 src = Dataset(src).create(force=True) target1 = mk_push_target(src, 'target1', target1, bare=False) target2 = mk_push_target(src, 'target2', target2, bare=False) src.siblings('configure', name='target2', publish_depends='target1', result_renderer='disabled') # a file to annex (src.pathobj / 'probe1').write_text('probe1') src.save('probe1', to_git=False) # make sure the probe is annexed, not straight in Git assert_in('probe1', src.repo.get_annexed_files(with_content_only=True)) # publish to target2, must handle dependency src.push(to='target2') for target in (target1, target2): # with a managed branch we are pushing into the corresponding branch # and do not see a change in the worktree if not target.is_managed_branch(): # direct test for what is in the checkout assert_in( 'probe1', target.get_annexed_files(with_content_only=True)) # ensure git-annex knows this target has the file assert_in(target.config.get('annex.uuid'), src.repo.whereis(['probe1'])[0]) @with_tempfile() @with_tempfile() def test_gh1811(srcpath=None, clonepath=None): orig = Dataset(srcpath).create() (orig.pathobj / 'some').write_text('some') orig.save() clone = Clone.__call__(source=orig.path, path=clonepath) (clone.pathobj / 'somemore').write_text('somemore') clone.save() clone.repo.call_git(['checkout', 'HEAD~1']) res = clone.push(to=DEFAULT_REMOTE, on_failure='ignore') assert_result_count(res, 1) assert_result_count( res, 1, path=clone.path, type='dataset', action='publish', status='impossible', message='There is no active branch, cannot determine remote ' 'branch', ) # FIXME: on crippled FS post-update hook enabling via create-sibling doesn't # work ATM @skip_if_adjusted_branch @with_tempfile() @with_tempfile() def test_push_wanted(srcpath=None, dstpath=None): src = Dataset(srcpath).create() (src.pathobj / 'data.0').write_text('0') (src.pathobj / 'secure.1').write_text('1') (src.pathobj / 'secure.2').write_text('2') src.save() # Dropping a file to mimic a case of simply not having it locally (thus not # to be "pushed") src.drop('secure.2', reckless='kill') # Annotate sensitive content, actual value "verysecure" does not matter in # this example src.repo.set_metadata( add={'distribution-restrictions': 'verysecure'}, files=['secure.1', 'secure.2']) src.create_sibling( dstpath, annex_wanted="not metadata=distribution-restrictions=*", name='target', ) # check that wanted is obeyed, since set in sibling configuration res = src.push(to='target') assert_in_results( res, action='copy', path=str(src.pathobj / 'data.0'), status='ok') for p in ('secure.1', 'secure.2'): assert_not_in_results(res, path=str(src.pathobj / p)) assert_status('notneeded', src.push(to='target')) # check the target to really make sure dst = Dataset(dstpath) # normal file, yes eq_((dst.pathobj / 'data.0').read_text(), '0') # secure file, no if dst.repo.is_managed_branch(): neq_((dst.pathobj / 'secure.1').read_text(), '1') else: assert_raises(FileNotFoundError, (dst.pathobj / 'secure.1').read_text) # reset wanted config, which must enable push of secure file src.repo.set_preferred_content('wanted', '', remote='target') res = src.push(to='target') assert_in_results(res, path=str(src.pathobj / 'secure.1')) eq_((dst.pathobj / 'secure.1').read_text(), '1') # FIXME: on crippled FS post-update hook enabling via create-sibling doesn't # work ATM @skip_if_adjusted_branch @slow # 10sec on Yarik's laptop @with_tempfile(mkdir=True) def test_auto_data_transfer(path=None): path = Path(path) ds_a = Dataset(path / "a").create() (ds_a.pathobj / "foo.dat").write_text("foo") ds_a.save() # Should be the default, but just in case. ds_a.repo.config.set("annex.numcopies", "1", scope="local") ds_a.create_sibling(str(path / "b"), name="b") # With numcopies=1, no data is copied with data="auto". res = ds_a.push(to="b", data="auto", since=None) assert_not_in_results(res, action="copy") # Even when a file is explicitly given. res = ds_a.push(to="b", path="foo.dat", data="auto", since=None) assert_not_in_results(res, action="copy") # numcopies=2 changes that. ds_a.repo.config.set("annex.numcopies", "2", scope="local") res = ds_a.push(to="b", data="auto", since=None) assert_in_results( res, action="copy", target="b", status="ok", path=str(ds_a.pathobj / "foo.dat")) # --since= limits the files considered by --auto. (ds_a.pathobj / "bar.dat").write_text("bar") ds_a.save() (ds_a.pathobj / "baz.dat").write_text("baz") ds_a.save() res = ds_a.push(to="b", data="auto", since="HEAD~1") assert_not_in_results( res, action="copy", path=str(ds_a.pathobj / "bar.dat")) assert_in_results( res, action="copy", target="b", status="ok", path=str(ds_a.pathobj / "baz.dat")) # --auto also considers preferred content. ds_a.repo.config.unset("annex.numcopies", scope="local") ds_a.repo.set_preferred_content("wanted", "nothing", remote="b") res = ds_a.push(to="b", data="auto", since=None) assert_not_in_results( res, action="copy", path=str(ds_a.pathobj / "bar.dat")) ds_a.repo.set_preferred_content("wanted", "anything", remote="b") res = ds_a.push(to="b", data="auto", since=None) assert_in_results( res, action="copy", target="b", status="ok", path=str(ds_a.pathobj / "bar.dat")) # FIXME: on crippled FS post-update hook enabling via create-sibling doesn't # work ATM @skip_if_adjusted_branch @slow # 16sec on Yarik's laptop @with_tempfile(mkdir=True) def test_auto_if_wanted_data_transfer_path_restriction(path=None): path = Path(path) ds_a = Dataset(path / "a").create() ds_a_sub0 = ds_a.create("sub0") ds_a_sub1 = ds_a.create("sub1") for ds in [ds_a, ds_a_sub0, ds_a_sub1]: (ds.pathobj / "sec.dat").write_text("sec") (ds.pathobj / "reg.dat").write_text("reg") ds_a.save(recursive=True) ds_a.create_sibling(str(path / "b"), name="b", annex_wanted="not metadata=distribution-restrictions=*", recursive=True) for ds in [ds_a, ds_a_sub0, ds_a_sub1]: ds.repo.set_metadata(add={"distribution-restrictions": "doesntmatter"}, files=["sec.dat"]) # wanted-triggered --auto can be restricted to subdataset... res = ds_a.push(to="b", path="sub0", data="auto-if-wanted", recursive=True) assert_not_in_results( res, action="copy", target="b", status="ok", path=str(ds_a.pathobj / "reg.dat")) assert_in_results( res, action="copy", target="b", status="ok", path=str(ds_a_sub0.pathobj / "reg.dat")) assert_not_in_results( res, action="copy", target="b", status="ok", path=str(ds_a_sub0.pathobj / "sec.dat")) assert_not_in_results( res, action="copy", target="b", status="ok", path=str(ds_a_sub1.pathobj / "reg.dat")) # ... and to a wanted file. res = ds_a.push(to="b", path="reg.dat", data="auto-if-wanted", recursive=True) assert_in_results( res, action="copy", target="b", status="ok", path=str(ds_a.pathobj / "reg.dat")) assert_not_in_results( res, action="copy", target="b", status="ok", path=str(ds_a_sub1.pathobj / "reg.dat")) # But asking to transfer a file does not do it if the remote has a # wanted setting and doesn't want it. res = ds_a.push(to="b", path="sec.dat", data="auto-if-wanted", recursive=True) assert_not_in_results( res, action="copy", target="b", status="ok", path=str(ds_a.pathobj / "sec.dat")) res = ds_a.push(to="b", path="sec.dat", data="anything", recursive=True) assert_in_results( res, action="copy", target="b", status="ok", path=str(ds_a.pathobj / "sec.dat")) @with_tempfile(mkdir=True) def test_push_git_annex_branch_when_no_data(path=None): path = Path(path) ds = Dataset(path / "a").create() target = mk_push_target(ds, "target", str(path / "target"), annex=False, bare=True) (ds.pathobj / "f0").write_text("0") ds.save() ds.push(to="target", data="nothing") assert_in("git-annex", {d["refname:strip=2"] for d in target.for_each_ref_(fields="refname:strip=2")}) @known_failure_githubci_osx @with_tree(tree={"ds": {"f0": "0", "f1": "0", "f2": "0", "f3": "1", "f4": "2", "f5": "2"}}) def test_push_git_annex_branch_many_paths_same_data(path=None): path = Path(path) ds = Dataset(path / "ds").create(force=True) ds.save() mk_push_target(ds, "target", str(path / "target"), annex=True, bare=False) nbytes = sum(ds.repo.get_content_annexinfo(paths=[f])[f]["bytesize"] for f in [ds.repo.pathobj / "f0", ds.repo.pathobj / "f3", ds.repo.pathobj / "f4"]) with swallow_logs(new_level=logging.DEBUG) as cml: res = ds.push(to="target") assert_in("{} bytes of annex data".format(nbytes), cml.out) # 3 files point to content already covered by another file. assert_result_count(res, 3, action="copy", type="file", status="notneeded") @known_failure_githubci_osx @with_tree(tree={"ds": {"f0": "0"}}) def test_push_matching(path=None): path = Path(path) ds = Dataset(path / "ds").create(force=True) ds.config.set('push.default', 'matching', scope='local') ds.save() remote_ds = mk_push_target(ds, 'local', str(path / 'dssibling'), annex=True, bare=False) # that fact that the next one even runs makes sure that we are in a better # place than https://github.com/datalad/datalad/issues/4888 ds.push(to='local') # and we pushed the commit in the current branch eq_(remote_ds.get_hexsha(DEFAULT_BRANCH), ds.repo.get_hexsha(DEFAULT_BRANCH)) @known_failure_githubci_win # https://github.com/datalad/datalad/issues/5271 @with_tempfile(mkdir=True) @with_tempfile(mkdir=True) @with_tempfile(mkdir=True) def test_nested_pushclone_cycle_allplatforms(origpath=None, storepath=None, clonepath=None): if 'DATALAD_SEED' in os.environ: # we are using create-sibling-ria via the cmdline in here # this will create random UUIDs for datasets # however, given a fixed seed each call to this command will start # with the same RNG seed, hence yield the same UUID on the same # machine -- leading to a collision raise SkipTest( 'Test incompatible with fixed random number generator seed') # the aim here is this high-level test a std create-push-clone cycle for a # dataset with a subdataset, with the goal to ensure that correct branches # and commits are tracked, regardless of platform behavior and condition # of individual clones. Nothing fancy, just that the defaults behave in # sensible ways from datalad.cmd import WitlessRunner as Runner run = Runner().run # create original nested dataset with chpwd(origpath): run(['datalad', 'create', 'super']) run(['datalad', 'create', '-d', 'super', str(Path('super', 'sub'))]) # verify essential linkage properties orig_super = Dataset(Path(origpath, 'super')) orig_sub = Dataset(orig_super.pathobj / 'sub') (orig_super.pathobj / 'file1.txt').write_text('some1') (orig_sub.pathobj / 'file2.txt').write_text('some1') with chpwd(orig_super.path): run(['datalad', 'save', '--recursive']) # TODO not yet reported clean with adjusted branches #assert_repo_status(orig_super.path) # the "true" branch that sub is on, and the gitsha of the HEAD commit of it orig_sub_corr_branch = \ orig_sub.repo.get_corresponding_branch() or orig_sub.repo.get_active_branch() orig_sub_corr_commit = orig_sub.repo.get_hexsha(orig_sub_corr_branch) # make sure the super trackes this commit assert_in_results( orig_super.subdatasets(), path=orig_sub.path, gitshasum=orig_sub_corr_commit, # TODO it should also track the branch name # Attempted: https://github.com/datalad/datalad/pull/3817 # But reverted: https://github.com/datalad/datalad/pull/4375 ) # publish to a store, to get into a platform-agnostic state # (i.e. no impact of an annex-init of any kind) store_url = 'ria+' + get_local_file_url(storepath) with chpwd(orig_super.path): run(['datalad', 'create-sibling-ria', '--recursive', '-s', 'store', store_url, '--new-store-ok']) run(['datalad', 'push', '--recursive', '--to', 'store']) # we are using the 'store' sibling's URL, which should be a plain path store_super = AnnexRepo(orig_super.siblings(name='store')[0]['url'], init=False) store_sub = AnnexRepo(orig_sub.siblings(name='store')[0]['url'], init=False) # both datasets in the store only carry the real branches, and nothing # adjusted for r in (store_super, store_sub): eq_(set(r.get_branches()), set([orig_sub_corr_branch, 'git-annex'])) # and reobtain from a store cloneurl = 'ria+' + get_local_file_url(str(storepath), compatibility='git') with chpwd(clonepath): run(['datalad', 'clone', cloneurl + '#' + orig_super.id, 'super']) run(['datalad', '-C', 'super', 'get', '--recursive', '.']) # verify that nothing has changed as a result of a push/clone cycle clone_super = Dataset(Path(clonepath, 'super')) clone_sub = Dataset(clone_super.pathobj / 'sub') assert_in_results( clone_super.subdatasets(), path=clone_sub.path, gitshasum=orig_sub_corr_commit, ) for ds1, ds2, f in ((orig_super, clone_super, 'file1.txt'), (orig_sub, clone_sub, 'file2.txt')): eq_((ds1.pathobj / f).read_text(), (ds2.pathobj / f).read_text()) # get status info that does not recursive into subdatasets, i.e. not # looking for uncommitted changes # we should see no modification reported assert_not_in_results( clone_super.status(eval_subdataset_state='commit'), state='modified') # and now the same for a more expensive full status assert_not_in_results( clone_super.status(recursive=True), state='modified') @with_tempfile def test_push_custom_summary(path=None): path = Path(path) ds = Dataset(path / "ds").create() sib = mk_push_target(ds, "sib", str(path / "sib"), bare=False, annex=False) (sib.pathobj / "f1").write_text("f1") sib.save() (ds.pathobj / "f2").write_text("f2") ds.save() # These options are true by default and our tests usually run with a # temporary home, but set them to be sure. ds.config.set("advice.pushUpdateRejected", "true", scope="local") ds.config.set("advice.pushFetchFirst", "true", scope="local") with swallow_outputs() as cmo: ds.push(to="sib", result_renderer="default", on_failure="ignore") assert_in("Hints:", cmo.out) assert_in("action summary:", cmo.out)
[]
[]
[]
[]
[]
python
0
0
mongo/integration/mtest/setup.go
// Copyright (C) MongoDB, Inc. 2017-present. // // Licensed under the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 package mtest import ( "context" "errors" "fmt" "math" "os" "strconv" "strings" "time" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/mongo/readpref" "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/connstring" "go.mongodb.org/mongo-driver/x/mongo/driver/ocsp" "go.mongodb.org/mongo-driver/x/mongo/driver/topology" ) const ( // TestDb specifies the name of default test database. TestDb = "test" ) // testContext holds the global context for the integration tests. The testContext members should only be initialized // once during the global setup in TestMain. These variables should only be accessed indirectly through MongoTest // instances. var testContext struct { connString connstring.ConnString topo *topology.Topology topoKind TopologyKind // shardedReplicaSet will be true if we're connected to a sharded cluster and each shard is backed by a replica set. // We track this as a separate boolean rather than setting topoKind to ShardedReplicaSet because a general // "Sharded" constraint in a test should match both Sharded and ShardedReplicaSet. shardedReplicaSet bool client *mongo.Client // client used for setup and teardown serverVersion string authEnabled bool sslEnabled bool enterpriseServer bool dataLake bool requireAPIVersion bool serverParameters bson.Raw } func setupClient(cs connstring.ConnString, opts *options.ClientOptions) (*mongo.Client, error) { wcMajority := writeconcern.New(writeconcern.WMajority()) // set ServerAPIOptions to latest version if required if opts.ServerAPIOptions == nil && testContext.requireAPIVersion { opts.SetServerAPIOptions(options.ServerAPI(driver.TestServerAPIVersion)) } // for sharded clusters, pin to one host return mongo.Connect(Background, opts.ApplyURI(cs.Original).SetWriteConcern(wcMajority).SetHosts(cs.Hosts[:1])) } // Setup initializes the current testing context. // This function must only be called one time and must be called before any tests run. func Setup(setupOpts ...*SetupOptions) error { opts := MergeSetupOptions(setupOpts...) var err error switch { case opts.URI != nil: testContext.connString, err = connstring.ParseAndValidate(*opts.URI) default: testContext.connString, err = getConnString() } if err != nil { return fmt.Errorf("error getting connection string: %v", err) } testContext.dataLake = os.Getenv("ATLAS_DATA_LAKE_INTEGRATION_TEST") == "true" testContext.requireAPIVersion = os.Getenv("REQUIRE_API_VERSION") == "true" connectionOpts := []topology.ConnectionOption{ topology.WithOCSPCache(func(ocsp.Cache) ocsp.Cache { return ocsp.NewCache() }), } serverOpts := []topology.ServerOption{ topology.WithConnectionOptions(func(opts ...topology.ConnectionOption) []topology.ConnectionOption { return append(opts, connectionOpts...) }), } if testContext.requireAPIVersion { serverOpts = append(serverOpts, topology.WithServerAPI(func(*driver.ServerAPIOptions) *driver.ServerAPIOptions { return driver.NewServerAPIOptions(driver.TestServerAPIVersion) }), ) } testContext.topo, err = topology.New( topology.WithConnString(func(connstring.ConnString) connstring.ConnString { return testContext.connString }), topology.WithServerOptions(func(opts ...topology.ServerOption) []topology.ServerOption { return append(opts, serverOpts...) }), ) if err != nil { return fmt.Errorf("error creating topology: %v", err) } if err = testContext.topo.Connect(); err != nil { return fmt.Errorf("error connecting topology: %v", err) } testContext.client, err = setupClient(testContext.connString, options.Client()) if err != nil { return fmt.Errorf("error connecting test client: %v", err) } pingCtx, cancel := context.WithTimeout(Background, 2*time.Second) defer cancel() if err := testContext.client.Ping(pingCtx, readpref.Primary()); err != nil { return fmt.Errorf("ping error: %v; make sure the deployment is running on URI %v", err, testContext.connString.Original) } if testContext.serverVersion, err = getServerVersion(); err != nil { return fmt.Errorf("error getting server version: %v", err) } switch testContext.topo.Kind() { case description.Single: testContext.topoKind = Single case description.ReplicaSet, description.ReplicaSetWithPrimary, description.ReplicaSetNoPrimary: testContext.topoKind = ReplicaSet case description.Sharded: testContext.topoKind = Sharded default: return fmt.Errorf("could not detect topology kind; current topology: %s", testContext.topo.String()) } // If we're connected to a sharded cluster, determine if the cluster is backed by replica sets. if testContext.topoKind == Sharded { // Run a find against config.shards and get each document in the collection. cursor, err := testContext.client.Database("config").Collection("shards").Find(Background, bson.D{}) if err != nil { return fmt.Errorf("error running find against config.shards: %v", err) } defer cursor.Close(Background) var shards []struct { Host string `bson:"host"` } if err := cursor.All(Background, &shards); err != nil { return fmt.Errorf("error getting results find against config.shards: %v", err) } // Each document's host field will contain a single hostname if the shard is a standalone. If it's a replica // set, the host field will be in the format "replicaSetName/host1,host2,...". Therefore, we can determine that // the shard is a standalone if the "/" character isn't present. var foundStandalone bool for _, shard := range shards { if strings.Index(shard.Host, "/") == -1 { foundStandalone = true break } } if !foundStandalone { testContext.shardedReplicaSet = true } } if testContext.topoKind == ReplicaSet && CompareServerVersions(testContext.serverVersion, "4.0") >= 0 { err = testContext.client.Database("admin").RunCommand(Background, bson.D{ {"setParameter", 1}, {"transactionLifetimeLimitSeconds", 3}, }).Err() if err != nil { return fmt.Errorf("error setting transactionLifetimeLimitSeconds: %v", err) } } testContext.authEnabled = os.Getenv("AUTH") == "auth" testContext.sslEnabled = os.Getenv("SSL") == "ssl" biRes, err := testContext.client.Database("admin").RunCommand(Background, bson.D{{"buildInfo", 1}}).DecodeBytes() if err != nil { return fmt.Errorf("buildInfo error: %v", err) } modulesRaw, err := biRes.LookupErr("modules") if err == nil { // older server versions don't report "modules" field in buildInfo result modules, _ := modulesRaw.Array().Values() for _, module := range modules { if module.StringValue() == "enterprise" { testContext.enterpriseServer = true break } } } // Get server parameters if test is not running against ADL; ADL does not have "getParameter" command. if !testContext.dataLake { db := testContext.client.Database("admin") testContext.serverParameters, err = db.RunCommand(Background, bson.D{{"getParameter", "*"}}).DecodeBytes() if err != nil { return fmt.Errorf("error getting serverParameters: %v", err) } } return nil } // Teardown cleans up resources initialized by Setup. // This function must be called once after all tests have finished running. func Teardown() error { // Dropping the test database causes an error against Atlas Data Lake. if !testContext.dataLake { if err := testContext.client.Database(TestDb).Drop(Background); err != nil { return fmt.Errorf("error dropping test database: %v", err) } } if err := testContext.client.Disconnect(Background); err != nil { return fmt.Errorf("error disconnecting test client: %v", err) } if err := testContext.topo.Disconnect(Background); err != nil { return fmt.Errorf("error disconnecting test topology: %v", err) } return nil } func getServerVersion() (string, error) { var serverStatus bson.Raw err := testContext.client.Database(TestDb).RunCommand( Background, bson.D{{"buildInfo", 1}}, ).Decode(&serverStatus) if err != nil { return "", err } version, err := serverStatus.LookupErr("version") if err != nil { return "", errors.New("no version string in serverStatus response") } return version.StringValue(), nil } // addOptions appends connection string options to a URI. func addOptions(uri string, opts ...string) string { if !strings.ContainsRune(uri, '?') { if uri[len(uri)-1] != '/' { uri += "/" } uri += "?" } else { uri += "&" } for _, opt := range opts { uri += opt } return uri } // addTLSConfig checks for the environmental variable indicating that the tests are being run // on an SSL-enabled server, and if so, returns a new URI with the necessary configuration. func addTLSConfig(uri string) string { caFile := os.Getenv("MONGO_GO_DRIVER_CA_FILE") if len(caFile) == 0 { return uri } return addOptions(uri, "ssl=true&sslCertificateAuthorityFile=", caFile) } // addCompressors checks for the environment variable indicating that the tests are being run with compression // enabled. If so, it returns a new URI with the necessary configuration func addCompressors(uri string) string { comp := os.Getenv("MONGO_GO_DRIVER_COMPRESSOR") if len(comp) == 0 { return uri } return addOptions(uri, "compressors=", comp) } // ConnString gets the globally configured connection string. func getConnString() (connstring.ConnString, error) { uri := os.Getenv("MONGODB_URI") if uri == "" { uri = "mongodb://localhost:27017" } uri = addTLSConfig(uri) uri = addCompressors(uri) return connstring.ParseAndValidate(uri) } // CompareServerVersions compares two version number strings (i.e. positive integers separated by // periods). Comparisons are done to the lesser precision of the two versions. For example, 3.2 is // considered equal to 3.2.11, whereas 3.2.0 is considered less than 3.2.11. // // Returns a positive int if version1 is greater than version2, a negative int if version1 is less // than version2, and 0 if version1 is equal to version2. func CompareServerVersions(v1 string, v2 string) int { n1 := strings.Split(v1, ".") n2 := strings.Split(v2, ".") for i := 0; i < int(math.Min(float64(len(n1)), float64(len(n2)))); i++ { i1, err := strconv.Atoi(n1[i]) if err != nil { return 1 } i2, err := strconv.Atoi(n2[i]) if err != nil { return -1 } difference := i1 - i2 if difference != 0 { return difference } } return 0 }
[ "\"ATLAS_DATA_LAKE_INTEGRATION_TEST\"", "\"REQUIRE_API_VERSION\"", "\"AUTH\"", "\"SSL\"", "\"MONGO_GO_DRIVER_CA_FILE\"", "\"MONGO_GO_DRIVER_COMPRESSOR\"", "\"MONGODB_URI\"" ]
[]
[ "REQUIRE_API_VERSION", "SSL", "AUTH", "ATLAS_DATA_LAKE_INTEGRATION_TEST", "MONGO_GO_DRIVER_CA_FILE", "MONGO_GO_DRIVER_COMPRESSOR", "MONGODB_URI" ]
[]
["REQUIRE_API_VERSION", "SSL", "AUTH", "ATLAS_DATA_LAKE_INTEGRATION_TEST", "MONGO_GO_DRIVER_CA_FILE", "MONGO_GO_DRIVER_COMPRESSOR", "MONGODB_URI"]
go
7
0
tests/flask/test_oauth2/oauth2_server.py
import os import base64 import unittest from flask import Flask, request from authlib.common.security import generate_token from authlib.common.encoding import to_bytes, to_unicode from authlib.common.urls import url_encode from authlib.flask.oauth2.sqla import ( create_query_client_func, create_save_token_func, ) from authlib.flask.oauth2 import AuthorizationServer from authlib.oauth2 import OAuth2Error from .models import db, User, Client, Token os.environ['AUTHLIB_INSECURE_TRANSPORT'] = 'true' def token_generator(client, grant_type, user=None, scope=None): token = '{}-{}'.format(client.client_id[0], grant_type) if user: token = '{}.{}'.format(token, user.get_user_id()) return '{}.{}'.format(token, generate_token(32)) def create_authorization_server(app, lazy=False): query_client = create_query_client_func(db.session, Client) save_token = create_save_token_func(db.session, Token) if lazy: server = AuthorizationServer() server.init_app(app, query_client, save_token) else: server = AuthorizationServer(app, query_client, save_token) @app.route('/oauth/authorize', methods=['GET', 'POST']) def authorize(): if request.method == 'GET': user_id = request.args.get('user_id') if user_id: end_user = User.query.get(int(user_id)) else: end_user = None try: grant = server.validate_consent_request(end_user=end_user) return grant.prompt or 'ok' except OAuth2Error as error: return url_encode(error.get_body()) user_id = request.form.get('user_id') if user_id: grant_user = User.query.get(int(user_id)) else: grant_user = None return server.create_authorization_response(grant_user=grant_user) @app.route('/oauth/token', methods=['GET', 'POST']) def issue_token(): return server.create_token_response() @app.route('/oauth/revoke', methods=['POST']) def revoke_token(): return server.create_endpoint_response('revocation') @app.route('/oauth/introspect', methods=['POST']) def introspect_token(): return server.create_endpoint_response('introspection') return server def create_flask_app(): app = Flask(__name__) app.debug = True app.testing = True app.secret_key = 'testing' app.config.update({ 'SQLALCHEMY_TRACK_MODIFICATIONS': False, 'SQLALCHEMY_DATABASE_URI': 'sqlite://', 'OAUTH2_ERROR_URIS': [ ('invalid_client', 'https://a.b/e#invalid_client') ] }) return app class TestCase(unittest.TestCase): def setUp(self): app = create_flask_app() self._ctx = app.app_context() self._ctx.push() db.init_app(app) db.create_all() self.app = app self.client = app.test_client() def tearDown(self): db.drop_all() self._ctx.pop() def create_basic_header(self, username, password): text = '{}:{}'.format(username, password) auth = to_unicode(base64.b64encode(to_bytes(text))) return {'Authorization': 'Basic ' + auth}
[]
[]
[ "AUTHLIB_INSECURE_TRANSPORT" ]
[]
["AUTHLIB_INSECURE_TRANSPORT"]
python
1
0
pp/pp_item_server.py
import pp_keys import pp_lineage from pp_item import Item import cPickle import logging import os import time from jsonrpc import ServiceProxy USER_ACCOUNT_FILENAME = 'user_account.pickle' #BLOCK_DIRECTORY = '/home/than/windows-than/bitcoin_testnet/testnet-box/1/testnet3' BLOCK_DIRECTORY = '/home/' + os.environ['USER'] + '/.bitcoin' class ItemServer(): """ An issuer of items, among other things... It also maintains the heavy data structures and interfaces.""" def __init__(self, key_name, btc_proxy = 'http://test:123@localhost:19001', lineage_filename = 'lineage.pickle'): if pp_keys.ExportPubKey(key_name) == '': raise Exception('Unknown key name ' + key_name) self.key_name = key_name self.btc = ServiceProxy(btc_proxy) self.btc.getinfo() # Will raise an exception if we aren't connected # Load user accounts try: self.user_account = cPickle.load(file(USER_ACCOUNT_FILENAME)) except: logging.info('Starting with empty user accounts') self.user_account= {} # Load lineage try: self.lineage = cPickle.load(file(lineage_filename)) except: logging.info('Starting with empty lineage') self.lineage = pp_lineage.Lineage() logging.info('Loading block chain from' + BLOCK_DIRECTORY) self.lineage.UpdateDir(BLOCK_DIRECTORY) # Save the updated lineage (only at __init__). logging.info('Saving updated lineage...') cPickle.dump(self.lineage, file(lineage_filename, 'w'), -1) def CreateAccount(self, password): """ Clients create a new account by making a payment to an address. """ new_address = self.btc.getnewaddress() self.user_account[new_address] = password # Save the user accounts to disk. cPickle.dump(self.user_account, file('user_account.pickle', 'w'), -1) return new_address def VerifyAccount(self, btc_address, password, proof_sig): """ Clients log in by verifying their payment to a btc_address. """ # TODO: add a day-stamp to the text? if not btc_address in self.user_account: logging.warning('Unknown btc_address %s' % btc_address) return False if self.user_account[btc_address] != password: logging.warning('Password does not match for account %s' % btc_address) return False MIN_ACCOUNT_AMOUNT = 0.001 account_amount = self.btc.getreceivedbyaddress(btc_address, 1) if account_amount < MIN_ACCOUNT_AMOUNT: logging.warning('Address %s not sufficiently funded %f < %f' % (btc_address, account_amount, MIN_ACCOUNT_AMOUNT)) return False # TODO: actually verify that they can prove they made the (first?) payment. #return self.btc.verifymessage(btc_address, proof_sig, self.key_name) return True def Create(self, item_name, content): item = Item(item_name, content, self.key_name) return item def GrantToKey(self, item, key_name): """ Give an item to a particular owner: non-transferrable. """ if item.creator_key_name != self.key_name: logging.warning('This server is not the creator of the item %s' % item) return None if item.owner_key_name != '': logging.warning('This item has already been granted') return None item.owner_key_name = key_name # Sign the item to make it official. item.signature = pp_keys.Sign(item.signed_content(), self.key_name) return item def GrantToAddress(self, item, address, btc_value=0.01): """ Give an item to a bitcoin address: transferrable. """ if item.creator_key_name != self.key_name: logging.warning('This server is not the creator of the item %s' % item) return None if item.origin_tx_id != '': logging.warning('This item has already been granted') return None item.origin_tx_id = self.btc.sendtoaddress(address, btc_value) item.last_tx_id = item.origin_tx_id # Sign the item to make it official. item.signature = pp_keys.Sign(item.signed_content(), self.key_name) return item def Transfer(self, item, address, min_btc_confirmations=6, btc_fee=0.0005): if item.last_tx_id == '': print 'This item is not transferrable (no last tx id)' return False # NOTE: we assume the btc server knows of this this transaction because the # user is the owner of the item. last_tx = self.btc.gettransaction(item.last_tx_id) value = float(last_tx['amount']) confirmations = last_tx['confirmations'] if confirmations < min_btc_confirmations: print 'Not enough confirmations of last_tx:', print confirmations, '<', min_btc_confirmations return False # For the initial grant TX, vout will be 1, otherwise 0. vout = 0 if (item.origin_tx_id == item.last_tx_id): vout = 1 raw_tx = self.btc.createrawtransaction( [{"txid":item.last_tx_id, "vout":vout}], {address:value - btc_fee}) signed = self.btc.signrawtransaction(raw_tx) if signed['complete']: item.last_tx_id = self.btc.sendrawtransaction(signed['hex']) return True return False def Verify(self, item, proof_sig): """ Verify the integrity and ownership of the item: 1) The serialized item is properly signed, 2) and that the claimed owner also controls the heir bitcoin. """ # TODO: Check that the last tx has sufficient confirmations. # TODO: Support non-transferrable items. if item.origin_tx_id == '' or item.last_tx_id == '' or item.signature == '': print 'This item has not been granted' return False if not pp_keys.FindAndImportVerifiedKey(item.creator_key_name): print 'Could not find item creator key', item.creator_key_name return False if not pp_keys.Verify(item.signed_content(), item.signature): print 'Could not verify item signature' return False self.lineage.UpdateAll() if not self.lineage.VerifyLineage(item.last_tx_id, item.origin_tx_id): print 'Could not verify item lineage' return False last_address = self.lineage.GetOutputAddress(item.last_tx_id) if not last_address: print 'Could not find last tx output address' return False last_address_verified = self.btc.verifymessage( last_address, proof_sig, self.key_name) if not last_address_verified: print 'Could not verify ownership of last tx output address' return False return True def GenerateProofSig(self, item, target): """ Prove that this server owns the output of last_tx_id. """ self.lineage.UpdateAll() last_address = self.lineage.GetOutputAddress(item.last_tx_id) if not last_address: print 'Could not find last tx output address' return '' proof_sig = self.btc.signmessage(last_address, target) return proof_sig ############################################################################### if __name__ == '__main__': if 1: # Setup server1_name = 'Passe-Partout Shen' item_server1 = ItemServer(server1_name) server2_name = 'Passe-Partout Ando' item_server2 = ItemServer(server2_name, btc_proxy = 'http://test:123@localhost:19011' ) if 0: # Item tests if 0: # Creating item hat = item_server1.Create('hat', 'a fluffy hat') if 0: # Granting item btc_address2 = item_server2.btc.getnewaddress() # This should fail print 'Trying to grant from 2 (should fail)' item_server2.GrantToAddress(hat, btc_address2) # This should work print 'Trying to grant from 1 (should pass)' item_server1.GrantToAddress(hat, btc_address2) if 0: # Let the bitcoin server generate for a while. print 'Starting generation' item_server1.btc.setgenerate(True) print 'Sleeping for 20 seconds' time.sleep(20) item_server1.btc.setgenerate(False) if 0: # Proof sigs and verification print 'Verifying' # This should fail because server1 doesn't own hat. try: print 'Trying to generate proof sig from 1 (should fail)' proof_sig = item_server1.GenerateProofSig(hat, server1_name) print 'Should never see this! Proof sig:', proof_sig except: print 'Couldnt generate proof sig from server 1 (correct!)' # These should work print 'Trying to generate proof sig from 2' proof_sig = item_server2.GenerateProofSig(hat, server1_name) print 'Sig for 1' print 'Proof sig:', proof_sig print 'Verify 1:', item_server1.Verify(hat, proof_sig) print 'Verify 2:', item_server2.Verify(hat, proof_sig) proof_sig = item_server2.GenerateProofSig(hat, server2_name) print 'Sig for 2' print 'Proof sig:', proof_sig print 'Verify 1:', item_server1.Verify(hat, proof_sig) print 'Verify 2:', item_server2.Verify(hat, proof_sig) if 0: # Transferring item print 'Transferring to server 1' btc_address1 = item_server1.btc.getnewaddress() item_server2.Transfer(hat, btc_address1) if 0: # Let the bitcoin server generate for a while. print 'Starting generation' item_server1.btc.setgenerate(True) print 'Sleeping for 20 seconds' time.sleep(20) item_server1.btc.setgenerate(False) if 0: # Proof sigs and verification print 'Verifying' # This should fail because server1 doesn't own hat. try: print 'Trying to generate proof sig from 2 (should fail)' proof_sig = item_server2.GenerateProofSig(hat, server1_name) print 'Should never see this! Proof sig:', proof_sig except: print 'Couldnt generate proof sig from server 2 (correct!)' # These should work print 'Trying to generate proof sig from 1' proof_sig = item_server1.GenerateProofSig(hat, server1_name) print 'Sig for 1' print 'Proof sig:', proof_sig print 'Verify 1:', item_server1.Verify(hat, proof_sig) print 'Verify 2:', item_server2.Verify(hat, proof_sig) proof_sig = item_server1.GenerateProofSig(hat, server2_name) print 'Sig for 2' print 'Proof sig:', proof_sig print 'Verify 1:', item_server1.Verify(hat, proof_sig) print 'Verify 2:', item_server2.Verify(hat, proof_sig) if 1: # Account tests btc_address = item_server1.CreateAccount('kittens') print 'Trying to VerifyAccount, should be false:', print item_server1.VerifyAccount(btc_address, 'kittens', '') print 'Funding the account' item_server2.btc.sendtoaddress(btc_address, 0.1) if 1: # Let the bitcoin server generate for a while. print 'Starting generation' item_server1.btc.setgenerate(True) print 'Sleeping for 20 seconds' time.sleep(20) item_server1.btc.setgenerate(False) print 'Trying to VerifyAccount, should be true:', print item_server1.VerifyAccount(btc_address, 'kittens', '')
[]
[]
[ "USER" ]
[]
["USER"]
python
1
0
openNMT_integ_dataset.py
import os import torch import datasets import translation_models.model as tmm import translation_models.help_fun as transl_hf import onmt import model_manager import quantization import copy import pickle import functools import quantization.help_functions as qhf import helpers.functions as mhf cuda_devices = os.environ['CUDA_VISIBLE_DEVICES'].split(',') print('CUDA_VISIBLE_DEVICES: {} for a total of {}'.format(cuda_devices, len(cuda_devices))) datasets.BASE_DATA_FOLDER = '...' SAVED_MODELS_FOLDER = '...' USE_CUDA = torch.cuda.is_available() NUM_GPUS = len(cuda_devices) TRAIN_TEACHER_MODEL=False TRAIN_SMALLER_MODEL=False TRAIN_SEQUENCE_DISTILLED=False TRAIN_WORD_DISTILLED=False TRAIN_QUANTIZED_DISTILLED=False TRAIN_DIFFERENTIABLE_QUANTIZATION=False CREATE_DISTILLATION_DATASET=False COMPUTE_BLEU_MODELS = False CHECK_PM_QUANTIZATION = True COMPUTE_WORD_PERCENTAGE_SIMILARITY = True try: os.mkdir(datasets.BASE_DATA_FOLDER) except:pass try: os.mkdir(SAVED_MODELS_FOLDER) except:pass epochsToTrainOnmtIntegDataset = 15 onmtManager = model_manager.ModelManager('model_manager_integ_dataset.tst', 'model_manager', create_new_model_manager=False) for x in onmtManager.list_models(): if onmtManager.get_num_training_runs(x) > 0: print(x, onmtManager.load_metadata(x)[1]['perplexity'][-1]) integ_dataset_saved_models_folder = os.path.join(SAVED_MODELS_FOLDER, 'integ_dataset') try: os.mkdir(integ_dataset_saved_models_folder) except:pass #load the data batch_size = 64 * NUM_GPUS if batch_size % NUM_GPUS != 0: raise ValueError('Batch size: {} must be a multiple of the number of gpus:{}'.format(batch_size, NUM_GPUS)) transl_dataset = datasets.onmt_integ_dataset(pin_memory=True) train_loader, test_loader = transl_dataset.getTrainLoader(batch_size), transl_dataset.getTestLoader(batch_size) #Teacher model teacherOptions = copy.deepcopy(onmt.standard_options.stdOptions) #it only matter in the creation of the distillation dataset teacherOptions['rnn_size'] = 500 teacherOptions['epochs'] = epochsToTrainOnmtIntegDataset teacherModel_name = 'integ_dataset_teacherModel' teacherModelPath = os.path.join(integ_dataset_saved_models_folder, teacherModel_name) teacherModel = tmm.create_model(transl_dataset.fields, options=teacherOptions) if USE_CUDA: teacherModel = teacherModel.cuda() if teacherModel_name not in onmtManager.saved_models: onmtManager.add_new_model(teacherModel_name, teacherModelPath, arguments_creator_function=teacherOptions) if TRAIN_TEACHER_MODEL: onmtManager.train_model(teacherModel, model_name=teacherModel_name, train_function=tmm.train_model, arguments_train_function={'options':teacherOptions}, train_loader=train_loader, test_loader=test_loader) if onmtManager.get_num_training_runs(teacherModel_name) > 0: teacherModel.load_state_dict(onmtManager.load_model_state_dict(teacherModel_name)) #now create a distillation dataset standardTranslateOptions = onmt.standard_options.standardTranslationOptions create_distilled_dataset_options = copy.deepcopy(teacherOptions) folder_distillation_dataset = os.path.join(transl_dataset.dataFolder, 'distilled_dataset_' + teacherModel_name) if CREATE_DISTILLATION_DATASET: print('Creating distillation dataset from scratch') transl_hf.create_distillation_dataset(teacherModel, create_distilled_dataset_options, standardTranslateOptions, transl_dataset, folder_distillation_dataset) print('Distillation dataset created') try: distilled_dataset = datasets.translation_datasets.TranslationDataset(folder_distillation_dataset, src_language='de', tgt_language='en', pin_memory=True) train_distilled_loader, test_distilled_loader = distilled_dataset.getTrainLoader(batch_size), distilled_dataset.getTestLoader(batch_size) print('Distillation dataset loaded') except: print('Problems loading the distillation dataset') #just so they don't raise errors.. distilled_dataset = transl_dataset train_distilled_loader = train_loader test_distilled_loader = test_loader # quick last minute experiment of distill vs normal loss # smallerOptions = copy.deepcopy(onmt.standard_options.stdOptions) # #if not specified, it was trained with 2 layers (2 for encoder and 2 for decoder, that is) with rnn size of 200 # smallerOptions['batch_size'] = batch_size # smallerOptions['rnn_size'] = 512 # smallerOptions['layers'] = 1 # smallerOptions['epochs'] = epochsToTrainOnmtIntegDataset # for numBit in [4]: # model_name = 'integ_dataset_smallerModel_{}rnn_size1_layer_quantized{}bits'.format(512, numBit) # smallerModelPath = os.path.join(integ_dataset_saved_models_folder, model_name) # smallerModel = tmm.create_model(transl_dataset.fields, options=smallerOptions) # if USE_CUDA: smallerModel = smallerModel.cuda() # if model_name not in onmtManager.saved_models: # onmtManager.add_new_model(model_name, smallerModelPath, # arguments_creator_function=smallerOptions) # onmtManager.train_model(smallerModel, model_name=model_name, # train_function=tmm.train_model, # arguments_train_function={'options':smallerOptions, # 'quantizeWeights': True, # 'numBits':numBit, # 'bucket_size':256}, # train_loader=train_loader, test_loader=test_loader) # if onmtManager.get_num_training_runs(model_name) > 0: # smallerModel.load_state_dict(onmtManager.load_model_state_dict(model_name)) # print('finished training, computing BLEU') # bleu = transl_hf.get_bleu_model(smallerModel, transl_dataset, smallerOptions, standardTranslateOptions) # bleu='not computed' # ppl = tmm.evaluate_model(smallerModel, test_loader).ppl() # print('BLEU is : {}'.format(bleu)) del teacherModel rnn_sizes = [128, 256, 512] numBits = [2,4,8] # for rnn_size in rnn_sizes: # #smaller model # smallerOptions = copy.deepcopy(onmt.standard_options.stdOptions) # #if not specified, it was trained with 2 layers (2 for encoder and 2 for decoder, that is) with rnn size of 200 # smallerOptions['batch_size'] = batch_size # smallerOptions['rnn_size'] = rnn_size # smallerOptions['layers'] = 1 # smallerOptions['epochs'] = epochsToTrainOnmtIntegDataset # model_name = 'integ_dataset_smallerModel_{}rnn_size1_layer'.format(rnn_size) # smallerModelPath = os.path.join(integ_dataset_saved_models_folder, model_name) # smallerModel = tmm.create_model(transl_dataset.fields, options=smallerOptions) # if USE_CUDA: smallerModel = smallerModel.cuda() # if model_name not in onmtManager.saved_models: # onmtManager.add_new_model(model_name, smallerModelPath, # arguments_creator_function=smallerOptions) # if TRAIN_SMALLER_MODEL: # onmtManager.train_model(smallerModel, model_name=model_name, # train_function=tmm.train_model, # arguments_train_function={'options':smallerOptions}, # train_loader=train_loader, test_loader=test_loader) # if onmtManager.get_num_training_runs(model_name) > 0: # smallerModel.load_state_dict(onmtManager.load_model_state_dict(model_name)) # del smallerModel # # #Distilled model with word-level knowledge distillation # teacherModel = tmm.create_model(transl_dataset.fields, options=teacherOptions) # if USE_CUDA: teacherModel = teacherModel.cuda() # teacherModel.load_state_dict(onmtManager.load_model_state_dict(teacherModel_name)) # # distilledOptions = copy.deepcopy(smallerOptions) # distilled_model_name = 'integ_dataset_distilledModel_word_level_{}rnn_size1_layer'.format(rnn_size) # distilled_model_word_level = tmm.create_model(transl_dataset.fields, options=distilledOptions) # if USE_CUDA: distilled_model_word_level = distilled_model_word_level.cuda() # distilledModelPath = os.path.join(integ_dataset_saved_models_folder, distilled_model_name) # if distilled_model_name not in onmtManager.saved_models: # onmtManager.add_new_model(distilled_model_name, distilledModelPath, # arguments_creator_function=distilledOptions) # if TRAIN_WORD_DISTILLED: # onmtManager.train_model(distilled_model_word_level, model_name=distilled_model_name, # train_function=tmm.train_model, # arguments_train_function={'options':distilledOptions, # 'teacher_model': teacherModel, # 'use_distillation_loss':True}, # train_loader=train_loader, test_loader=test_loader) # if onmtManager.get_num_training_runs(distilled_model_name) > 0: # distilled_model_word_level.load_state_dict(onmtManager.load_model_state_dict(distilled_model_name)) # del distilled_model_word_level # # #Quantized word level distillation # for numBit in numBits: # distilled_model_name_quantized = 'integ_dataset_distilledModel_word_level_quantized{}bits{}rnn_size1_layer'.format( # numBit, rnn_size) # distilled_model_word_level = tmm.create_model(transl_dataset.fields, options=distilledOptions) # if USE_CUDA: distilled_model_word_level = distilled_model_word_level.cuda() # distilledModelPath = os.path.join(integ_dataset_saved_models_folder, distilled_model_name_quantized) # if distilled_model_name_quantized not in onmtManager.saved_models: # onmtManager.add_new_model(distilled_model_name_quantized, distilledModelPath, # arguments_creator_function=distilledOptions) # if TRAIN_WORD_DISTILLED and TRAIN_QUANTIZED_DISTILLED: # onmtManager.train_model(distilled_model_word_level, model_name=distilled_model_name_quantized, # train_function=tmm.train_model, # arguments_train_function={'options':distilledOptions, # 'teacher_model': teacherModel, # 'use_distillation_loss':True, # 'quantizeWeights':True, # 'numBits':numBit, # 'bucket_size':256}, # train_loader=train_loader, test_loader=test_loader) # if onmtManager.get_num_training_runs(distilled_model_name_quantized) > 0: # distilled_model_word_level.load_state_dict(onmtManager.load_model_state_dict(distilled_model_name_quantized)) # del distilled_model_word_level # # #optimize quantization points # if numBit == 8:#but no 8 bits with differentiable quantization # continue # # optQuanPointOptions = copy.deepcopy(onmt.standard_options.stdOptions) # optQuanPointOptions['learning_rate'] = 1e-4 # optQuanPointOptions['epochs'] = 3 # learning_rate_str = str(optQuanPointOptions['learning_rate']) # save_path = onmtManager.get_model_base_path(distilled_model_name) + \ # 'quant_points_{}bit_bucket_size256'.format(numBit) # if TRAIN_DIFFERENTIABLE_QUANTIZATION: # distilledModel = tmm.create_model(transl_dataset.fields, options=distilledOptions) # distilledModel.load_state_dict(onmtManager.load_model_state_dict(distilled_model_name)) # if USE_CUDA: distilledModel = distilledModel.cuda() # points, infoDict = tmm.optimize_quantization_points(distilledModel, train_loader, test_loader, # optQuanPointOptions, numPointsPerTensor=2**numBit, # bucket_size=256, assignBitsAutomatically=True, # use_distillation_loss=True) # quantization_points = [x.data.view(1, -1).cpu().numpy().tolist()[0] for x in points] # with open(save_path, 'wb') as p: # pickle.dump((quantization_points, infoDict), p) #print bleu for the models example_translations=False file_results = 'results_file_BLEU_models' if COMPUTE_BLEU_MODELS or COMPUTE_WORD_PERCENTAGE_SIMILARITY: with open(file_results, 'a') as fr: fr.write('\n\n== New Testing Run == \n\n') if COMPUTE_WORD_PERCENTAGE_SIMILARITY: #we need the ref file with the teacher teacherModelOptions = onmtManager.load_metadata('integ_dataset_teacherModel', 0)[0] for key, val in teacherModelOptions.items(): #remeding to an old bug in save_metadata function if val == 'None': teacherModelOptions[key] = None teacherModel = tmm.create_model(transl_dataset.fields, options=teacherModelOptions) if USE_CUDA: teacherModel = teacherModel.cuda() teacherModel.load_state_dict(onmtManager.load_model_state_dict('integ_dataset_teacherModel', 1)) pathTeacherTranslation = transl_hf.get_translation_file_model(teacherModel, transl_dataset, teacherModelOptions, standardTranslateOptions) for x in onmtManager.list_models(): if onmtManager.get_num_training_runs(x) == 0: continue modelOptions = onmtManager.load_metadata(x, 0)[0] for key, val in modelOptions.items(): #remeding to an old bug in save_metadata function if val == 'None': modelOptions[key] = None if 'distilled' in x and 'word' not in x: dataset = distilled_dataset else: dataset = transl_dataset model = tmm.create_model(dataset.fields, options=modelOptions) if USE_CUDA: model = model.cuda() model.load_state_dict(onmtManager.load_model_state_dict(x, 1)) if example_translations: print('Example of translation for model: "{}"'.format(x)) num_examples = 5 linesToTranslate, translated_lines, referenceLines = transl_hf.get_translation_examples(model, dataset, num_examples, modelOptions, standardTranslateOptions, shuffle_examples=False) print('Original Sentences == Translation == Ref Translation') print('\n'.join(' == '.join(x) for x in zip(linesToTranslate, translated_lines, referenceLines))) if COMPUTE_BLEU_MODELS or COMPUTE_WORD_PERCENTAGE_SIMILARITY: if COMPUTE_WORD_PERCENTAGE_SIMILARITY is False or (COMPUTE_WORD_PERCENTAGE_SIMILARITY and x != 'integ_dataset_teacherModel'): file_translation_model = transl_hf.get_translation_file_model(model, dataset, modelOptions, standardTranslateOptions) else: file_translation_model = pathTeacherTranslation if COMPUTE_BLEU_MODELS: bleu = transl_hf.get_bleu_moses(file_translation_model, dataset.testFilesPath[1], file_input=True) else: bleu = 'Not computed' if COMPUTE_WORD_PERCENTAGE_SIMILARITY and x != 'integ_dataset_teacherModel': percentage_word_similarity = transl_hf.compute_percentage_word_similarity(pathTeacherTranslation, file_translation_model, file_input=True) else: percentage_word_similarity = 'not computed' else: bleu = 'Not computed' percentage_word_similarity = 'not computed' perplexity = onmtManager.load_metadata(x,1)[1]['perplexity'][-1] str_to_save = 'Model "{}" ==> Perplexity: {}, BLEU: {} Percentage word similarity with teacher: {}'.format(x, perplexity, bleu, percentage_word_similarity) if COMPUTE_BLEU_MODELS or COMPUTE_WORD_PERCENTAGE_SIMILARITY: with open(file_results, 'a') as fr: fr.write(str_to_save + '\n') print(str_to_save) curr_num_bit = onmtManager.load_metadata(x)[0].get('numBits', None) if curr_num_bit is not None: quant_fun = functools.partial(quantization.uniformQuantization, s=2**curr_num_bit, bucket_size=256) actual_bit_huffmman = qhf.get_huffman_encoding_mean_bit_length(model.parameters(), quant_fun, 'uniform', s=2**curr_num_bit) print('Effective bit Huffman: {} - Size reduction: {}'.format(actual_bit_huffmman, mhf.get_size_reduction(actual_bit_huffmman, bucket_size=256))) if CHECK_PM_QUANTIZATION: if 'distilledModel_word_level' in x and 'quantized' not in x: for numBit in numBits: model.load_state_dict(onmtManager.load_model_state_dict(x, 1)) for p in model.parameters(): p.data = quantization.uniformQuantization(p.data, s=2**numBit, type_of_scaling='linear', bucket_size=256)[0] perplexity = tmm.evaluate_model(model, test_loader).ppl() if COMPUTE_BLEU_MODELS or COMPUTE_WORD_PERCENTAGE_SIMILARITY: file_translation_model = transl_hf.get_translation_file_model(model, dataset, modelOptions, standardTranslateOptions) if COMPUTE_BLEU_MODELS: bleu = transl_hf.get_bleu_moses(file_translation_model, dataset.testFilesPath[1], file_input=True) else: bleu = 'Not computed' if COMPUTE_WORD_PERCENTAGE_SIMILARITY: percentage_word_similarity = transl_hf.compute_percentage_word_similarity( pathTeacherTranslation, file_translation_model, file_input=True) else: percentage_word_similarity = 'not computed' else: bleu = 'Not computed' percentage_word_similarity = 'not computed' str_to_save = 'PM quantization of model "{}" with "{}" bits and bucket size 256: Perplexity : {}, BLEU: {}'.format( x, numBit, perplexity, bleu) str_to_save += 'Percentage word similarity with teacher:{}'.format(percentage_word_similarity) if COMPUTE_BLEU_MODELS or COMPUTE_WORD_PERCENTAGE_SIMILARITY: with open(file_results, 'a') as fr: fr.write(str_to_save + '\n') print(str_to_save) quant_fun = functools.partial(quantization.uniformQuantization, s=2**numBit, bucket_size=256) actual_bit_huffmman = qhf.get_huffman_encoding_mean_bit_length(model.parameters(), quant_fun, 'uniform', s=2**numBit) print('Effective bit Huffman: {} - Size reduction: {}'.format(actual_bit_huffmman, mhf.get_size_reduction( actual_bit_huffmman, bucket_size=256))) #now for the models trained with the differentiable quantization algorithm list_distilled_models = ['integ_dataset_distilledModel_word_level_{}rnn_size1_layer'.format(x) for x in rnn_sizes] optQuanPointOptions = copy.deepcopy(onmt.onmt.standard_options.stdOptions) for idx_model_distilled, distilled_model_name_to_quantize in enumerate(list_distilled_models): modelOptions = onmtManager.load_metadata(distilled_model_name_to_quantize, 0)[0] for key, val in modelOptions.items(): # remeding to an old bug in save_metadata function if val == 'None': modelOptions[key] = None dataset = transl_dataset #since we don't use sequence level distillation for numBit in numBits: if numBit == 8: continue save_path = onmtManager.get_model_base_path(distilled_model_name_to_quantize) + \ 'quant_points_{}bit_bucket_size256'.format(numBit) with open(save_path, 'rb') as p: quantization_points, infoDict = pickle.load(p) distilledModel = tmm.create_model(dataset.fields, options=modelOptions) distilledModel.load_state_dict(onmtManager.load_model_state_dict(distilled_model_name_to_quantize)) if USE_CUDA: distilledModel = distilledModel.cuda() for idx, p in enumerate(distilledModel.parameters()): p.data = quantization.nonUniformQuantization(p.data, quantization_points[idx], bucket_size=256)[0] reported_perplexity = infoDict['perplexity'][-1] perplexity = tmm.evaluate_model(distilledModel, test_loader).ppl() if COMPUTE_BLEU_MODELS or COMPUTE_WORD_PERCENTAGE_SIMILARITY: file_translation_model = transl_hf.get_translation_file_model(distilledModel, dataset, modelOptions, standardTranslateOptions) if COMPUTE_BLEU_MODELS: bleu = transl_hf.get_bleu_moses(file_translation_model, dataset.testFilesPath[1], file_input=True) else: bleu = 'Not computed' if COMPUTE_WORD_PERCENTAGE_SIMILARITY: percentage_word_similarity = transl_hf.compute_percentage_word_similarity( pathTeacherTranslation, file_translation_model, file_input=True) else: percentage_word_similarity = 'not computed' else: bleu = 'Not computed' percentage_word_similarity = 'not computed' str_to_save = 'Model "{}" ==> Reported perplexity : {}, Actual perplexity: {}, BLEU: {}'.format( distilled_model_name_to_quantize + 'quant_points_{}bit_bucket_size256'.format(numBit), reported_perplexity, perplexity, bleu) str_to_save += 'Percentage word similarity with teacher:{}'.format(percentage_word_similarity) if COMPUTE_BLEU_MODELS or COMPUTE_WORD_PERCENTAGE_SIMILARITY: with open(file_results, 'a') as fr: fr.write(str_to_save + '\n') print(str_to_save) quantization_functions = [functools.partial(quantization.nonUniformQuantization, listQuantizationPoints=qp, bucket_size=256) for qp in quantization_points] actual_bit_huffmman = qhf.get_huffman_encoding_mean_bit_length(distilledModel.parameters(), quantization_functions, 'nonUniform') print('Effective bit Huffman: {} - Size reduction: {}'.format(actual_bit_huffmman, mhf.get_size_reduction( actual_bit_huffmman, bucket_size=256))) try: os.remove(pathTeacherTranslation) except:pass try: os.remove(file_translation_model) except:pass
[]
[]
[ "CUDA_VISIBLE_DEVICES" ]
[]
["CUDA_VISIBLE_DEVICES"]
python
1
0
homeassistant/components/alarm_control_panel/__init__.py
"""Component to interface with an alarm control panel.""" from __future__ import annotations from dataclasses import dataclass from datetime import timedelta import logging from typing import Any, Final, final import voluptuous as vol from homeassistant.config_entries import ConfigEntry from homeassistant.const import ( ATTR_CODE, ATTR_CODE_FORMAT, SERVICE_ALARM_ARM_AWAY, SERVICE_ALARM_ARM_CUSTOM_BYPASS, SERVICE_ALARM_ARM_HOME, SERVICE_ALARM_ARM_NIGHT, SERVICE_ALARM_ARM_VACATION, SERVICE_ALARM_DISARM, SERVICE_ALARM_TRIGGER, ) from homeassistant.core import HomeAssistant import homeassistant.helpers.config_validation as cv from homeassistant.helpers.config_validation import make_entity_service_schema from homeassistant.helpers.entity import Entity, EntityDescription from homeassistant.helpers.entity_component import EntityComponent from homeassistant.helpers.typing import ConfigType from .const import ( SUPPORT_ALARM_ARM_AWAY, SUPPORT_ALARM_ARM_CUSTOM_BYPASS, SUPPORT_ALARM_ARM_HOME, SUPPORT_ALARM_ARM_NIGHT, SUPPORT_ALARM_ARM_VACATION, SUPPORT_ALARM_TRIGGER, ) _LOGGER: Final = logging.getLogger(__name__) DOMAIN: Final = "alarm_control_panel" SCAN_INTERVAL: Final = timedelta(seconds=30) ATTR_CHANGED_BY: Final = "changed_by" FORMAT_TEXT: Final = "text" FORMAT_NUMBER: Final = "number" ATTR_CODE_ARM_REQUIRED: Final = "code_arm_required" ENTITY_ID_FORMAT: Final = DOMAIN + ".{}" ALARM_SERVICE_SCHEMA: Final = make_entity_service_schema( {vol.Optional(ATTR_CODE): cv.string} ) PLATFORM_SCHEMA: Final = cv.PLATFORM_SCHEMA PLATFORM_SCHEMA_BASE: Final = cv.PLATFORM_SCHEMA_BASE async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool: """Track states and offer events for sensors.""" component = hass.data[DOMAIN] = EntityComponent( logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL ) await component.async_setup(config) component.async_register_entity_service( SERVICE_ALARM_DISARM, ALARM_SERVICE_SCHEMA, "async_alarm_disarm" ) component.async_register_entity_service( SERVICE_ALARM_ARM_HOME, ALARM_SERVICE_SCHEMA, "async_alarm_arm_home", [SUPPORT_ALARM_ARM_HOME], ) component.async_register_entity_service( SERVICE_ALARM_ARM_AWAY, ALARM_SERVICE_SCHEMA, "async_alarm_arm_away", [SUPPORT_ALARM_ARM_AWAY], ) component.async_register_entity_service( SERVICE_ALARM_ARM_NIGHT, ALARM_SERVICE_SCHEMA, "async_alarm_arm_night", [SUPPORT_ALARM_ARM_NIGHT], ) component.async_register_entity_service( SERVICE_ALARM_ARM_VACATION, ALARM_SERVICE_SCHEMA, "async_alarm_arm_vacation", [SUPPORT_ALARM_ARM_VACATION], ) component.async_register_entity_service( SERVICE_ALARM_ARM_CUSTOM_BYPASS, ALARM_SERVICE_SCHEMA, "async_alarm_arm_custom_bypass", [SUPPORT_ALARM_ARM_CUSTOM_BYPASS], ) component.async_register_entity_service( SERVICE_ALARM_TRIGGER, ALARM_SERVICE_SCHEMA, "async_alarm_trigger", [SUPPORT_ALARM_TRIGGER], ) return True async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Set up a config entry.""" component: EntityComponent = hass.data[DOMAIN] return await component.async_setup_entry(entry) async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Unload a config entry.""" component: EntityComponent = hass.data[DOMAIN] return await component.async_unload_entry(entry) @dataclass class AlarmControlPanelEntityDescription(EntityDescription): """A class that describes alarm control panel entities.""" class AlarmControlPanelEntity(Entity): """An abstract class for alarm control entities.""" entity_description: AlarmControlPanelEntityDescription _attr_changed_by: str | None = None _attr_code_arm_required: bool = True _attr_code_format: str | None = None _attr_supported_features: int @property def code_format(self) -> str | None: """Regex for code format or None if no code is required.""" return self._attr_code_format @property def changed_by(self) -> str | None: """Last change triggered by.""" return self._attr_changed_by @property def code_arm_required(self) -> bool: """Whether the code is required for arm actions.""" return self._attr_code_arm_required def alarm_disarm(self, code: str | None = None) -> None: """Send disarm command.""" raise NotImplementedError() async def async_alarm_disarm(self, code: str | None = None) -> None: """Send disarm command.""" await self.hass.async_add_executor_job(self.alarm_disarm, code) def alarm_arm_home(self, code: str | None = None) -> None: """Send arm home command.""" raise NotImplementedError() async def async_alarm_arm_home(self, code: str | None = None) -> None: """Send arm home command.""" await self.hass.async_add_executor_job(self.alarm_arm_home, code) def alarm_arm_away(self, code: str | None = None) -> None: """Send arm away command.""" raise NotImplementedError() async def async_alarm_arm_away(self, code: str | None = None) -> None: """Send arm away command.""" await self.hass.async_add_executor_job(self.alarm_arm_away, code) def alarm_arm_night(self, code: str | None = None) -> None: """Send arm night command.""" raise NotImplementedError() async def async_alarm_arm_night(self, code: str | None = None) -> None: """Send arm night command.""" await self.hass.async_add_executor_job(self.alarm_arm_night, code) def alarm_arm_vacation(self, code: str | None = None) -> None: """Send arm vacation command.""" raise NotImplementedError() async def async_alarm_arm_vacation(self, code: str | None = None) -> None: """Send arm vacation command.""" await self.hass.async_add_executor_job(self.alarm_arm_vacation, code) def alarm_trigger(self, code: str | None = None) -> None: """Send alarm trigger command.""" raise NotImplementedError() async def async_alarm_trigger(self, code: str | None = None) -> None: """Send alarm trigger command.""" await self.hass.async_add_executor_job(self.alarm_trigger, code) def alarm_arm_custom_bypass(self, code: str | None = None) -> None: """Send arm custom bypass command.""" raise NotImplementedError() async def async_alarm_arm_custom_bypass(self, code: str | None = None) -> None: """Send arm custom bypass command.""" await self.hass.async_add_executor_job(self.alarm_arm_custom_bypass, code) @property def supported_features(self) -> int: """Return the list of supported features.""" return self._attr_supported_features @final @property def state_attributes(self) -> dict[str, Any] | None: """Return the state attributes.""" return { ATTR_CODE_FORMAT: self.code_format, ATTR_CHANGED_BY: self.changed_by, ATTR_CODE_ARM_REQUIRED: self.code_arm_required, }
[]
[]
[]
[]
[]
python
null
null
null
main.go
package main import ( "io" "log" "net/http" "os" "strconv" "github.com/didip/tollbooth" "github.com/didip/tollbooth_negroni" "github.com/urfave/negroni" ) var apiList Entries var categories []string func main() { jsonFile := os.Getenv("JSONFILE") if jsonFile == "" { jsonFile = "/entries.json" } getList(jsonFile) categories = parseCategories(apiList.Entries) port := os.Getenv("PORT") if port == "" { port = "8080" } rate := os.Getenv("RATE") if rate == "" { rate = "10" } i, err := strconv.Atoi(rate) if err != nil { panic(err) } limiter := tollbooth.NewLimiter(float64(i), nil) filename := os.Getenv("LOGFILE") if filename == "" { filename = "/tmp/public-api.log" } // If the file does not exist, create it. Otherwise, append to the file. f, err := os.OpenFile(filename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) if err != nil { panic(err) } logger := NewLogger(Options{ Out: io.MultiWriter(f, os.Stdout), }) mux := http.NewServeMux() mux.Handle("/", http.FileServer(http.Dir("./static"))) mux.Handle("/entries", negroni.New( tollbooth_negroni.LimitHandler(limiter), negroni.Wrap(getEntriesHandler()), )) mux.Handle("/categories", negroni.New( tollbooth_negroni.LimitHandler(limiter), negroni.Wrap(getCategoriesHandler()), )) mux.Handle("/random", negroni.New( tollbooth_negroni.LimitHandler(limiter), negroni.Wrap(getRandomHandler()), )) mux.Handle("/health", negroni.New( tollbooth_negroni.LimitHandler(limiter), negroni.Wrap(healthCheckHandler()), )) n := negroni.New() recovery := negroni.NewRecovery() recovery.PrintStack = false n.Use(recovery) n.Use(negroni.HandlerFunc(logger.logFunc)) n.UseHandler(mux) log.Println("logging requests in " + filename) log.Printf("listening on port %s\n", port) log.Fatal(http.ListenAndServe(":"+port, n)) }
[ "\"JSONFILE\"", "\"PORT\"", "\"RATE\"", "\"LOGFILE\"" ]
[]
[ "PORT", "RATE", "LOGFILE", "JSONFILE" ]
[]
["PORT", "RATE", "LOGFILE", "JSONFILE"]
go
4
0
logkit/logkit/asgi.py
""" ASGI config for logkit project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'logkit.settings') application = get_asgi_application()
[]
[]
[]
[]
[]
python
0
0
app.go
package main import ( "bufio" "fmt" "log" "os" "regexp" "time" "github.com/mitchellh/go-homedir" ) const ( // AppName コマンドの名前 AppName = "dt" unixSeconds = "unix" unixMilliSeconds = "unixm" ) // AdjustDay 対応する月に同じ日が存在しないときの調整 type AdjustDay int const ( // AdjustToEndOfMonth 対応する月に同じ日が存在しないときは代わりにその月の末日が使われます AdjustToEndOfMonth AdjustDay = iota // Normalize 対応する月に同じ日が存在しないときは time.Time. Date と同じ方法で正規化します Normalize ) var version = "0.11.1" var splitRegexp = regexp.MustCompile(`\s*=\s*`) // Dt 日付計算とフォーマット機能をもつ type Dt struct { time time.Time format string } func (dt *Dt) get() time.Time { return dt.time } // AddYear 月を加算. 負値のときは減算. func (dt *Dt) AddYear(year int) *Dt { return &Dt{ time: dt.time.AddDate(year, 0, 0), format: dt.format, } } // AddMonth 月を加算. 負値のときは減算. func (dt *Dt) AddMonth(month int, adjust AdjustDay) *Dt { result := &Dt{ time: dt.time.AddDate(0, month, 0), format: dt.format, } if adjust == Normalize { return result } t := dt.time firstDayOfMonth := time.Date(t.Year(), t.Month(), 1, t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), t.Location()).AddDate(0, month, 0) if result.time.Month() == firstDayOfMonth.Month() { return result } lastDayOfPreviousMonth := firstDayOfMonth.AddDate(0, 1, -1) return &Dt{ time: lastDayOfPreviousMonth, format: dt.format, } } // AddDay 日を加算. 負値のときは減算. func (dt *Dt) AddDay(day int) *Dt { return &Dt{ time: dt.time.AddDate(0, 0, day), format: dt.format, } } // AddHour 時を加算. 負値のときは減算. func (dt *Dt) AddHour(hour int) *Dt { return &Dt{ time: dt.time.Add(time.Duration(hour) * time.Hour), format: dt.format, } } // AddMinute 分を加算. 負値のときは減算. func (dt *Dt) AddMinute(minute int) *Dt { return &Dt{ time: dt.time.Add(time.Duration(minute) * time.Minute), format: dt.format, } } // AddSecond 秒を加算. 負値のときは減算. func (dt *Dt) AddSecond(second int) *Dt { return &Dt{ time: dt.time.Add(time.Duration(second) * time.Second), format: dt.format, } } func (dt *Dt) String() string { t := dt.time f := dt.format switch f { case unixSeconds: return fmt.Sprintf("%d", t.Unix()) case unixMilliSeconds: return fmt.Sprintf("%d", t.UnixNano()/int64(time.Millisecond)) default: return t.Format(f) } } func loadConfig() { configPath := os.Getenv("XDG_CONFIG_HOME") if configPath == "" { configPath = "~/.config" } log.Printf("config path: %s\n", configPath) path, err := homedir.Expand(configPath + "/dt/.dt") if err != nil { return } f, err := os.Open(path) if err != nil { return } defer f.Close() scanner := bufio.NewScanner(f) m := map[string]string{} for scanner.Scan() { k, v := splitFormat(scanner.Text()) if k == "" || v == "" { continue } m[k] = v } for k, v := range m { log.Printf("custom format: %s => %s\n", k, v) formats[k] = v } } func splitFormat(s string) (string, string) { cols := splitRegexp.Split(s, 2) if len(cols) != 2 { return "", "" } if cols[0] == "" || cols[1] == "" { return "", "" } return cols[0], cols[1] } func main() { cli := &CLO{outStream: os.Stdout, errStream: os.Stderr} os.Exit(cli.Run(os.Args)) }
[ "\"XDG_CONFIG_HOME\"" ]
[]
[ "XDG_CONFIG_HOME" ]
[]
["XDG_CONFIG_HOME"]
go
1
0
pkg/apis/management.cattle.io/v3/authn_types.go
package v3 import ( "github.com/rancher/wrangler/pkg/genericcondition" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type User struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` DisplayName string `json:"displayName,omitempty"` Description string `json:"description"` Username string `json:"username,omitempty"` Password string `json:"password,omitempty" norman:"writeOnly,noupdate"` MustChangePassword bool `json:"mustChangePassword,omitempty"` PrincipalIDs []string `json:"principalIds,omitempty" norman:"type=array[reference[principal]]"` Me bool `json:"me,omitempty"` Enabled *bool `json:"enabled,omitempty" norman:"default=true"` Spec UserSpec `json:"spec,omitempty"` Status UserStatus `json:"status"` } type UserStatus struct { Conditions []genericcondition.GenericCondition `json:"conditions"` } type UserSpec struct{}
[]
[]
[]
[]
[]
go
null
null
null
pkg/go209/modules.go
package go209 import ( "errors" "fmt" "os" "plugin" "strings" log "github.com/sirupsen/logrus" ) // PreloadedModules is the list of .so files from the root directory we load by default const PreloadedModules = "email-mod:slackwebhook-mod" var modules = LoadedModules{} // Module defines what our plugins have to define type Module interface { Name() string EnvVars() []string Run(in interface{}, ev map[string]string, interactions map[string]string) error } // LoadedModules is a struct we use to hold modules and load modules etc type LoadedModules struct { Modules []Module } // LoadModules loads up the plugin .so files func (m *LoadedModules) LoadModules() error { modsToLoad := make(map[string]struct{}) // var modsToLoad []string // Load Preloaded modules first for _, preLoad := range strings.Split(PreloadedModules, ":") { if _, ok := modsToLoad[preLoad]; !ok { modsToLoad[preLoad] = struct{}{} // modsToLoad = append(modsToLoad, preLoad) } } // Load dynamic modules from env var if len(os.Getenv("DYNAMIC_MODULES")) > 0 { for _, preLoad := range strings.Split(os.Getenv("DYNAMIC_MODULES"), ":") { if _, ok := modsToLoad[preLoad]; !ok { modsToLoad[preLoad] = struct{}{} // modsToLoad = append(modsToLoad, preLoad) } } } for preLoad := range modsToLoad { plug, err := plugin.Open(fmt.Sprintf("./%s.so", preLoad)) if err != nil { return err } symMod, err := plug.Lookup("Module") if err != nil { return err } var mod Module mod, ok := symMod.(Module) if !ok { return errors.New("Unexpected type from module") } m.Modules = append(m.Modules, mod) } return nil } // DumpMods prints information about the loaded modules func DumpMods() error { fmt.Println("Number of modules loaded: ", len(modules.Modules)) fmt.Println("Listing loaded modules:") for _, mod := range modules.Modules { fmt.Printf("Module: %s\n", mod.Name()) if len(mod.EnvVars()) > 0 { fmt.Println("EnvVars:") for _, ev := range mod.EnvVars() { adjusted := strings.ToUpper(fmt.Sprintf("%s_%s", mod.Name(), ev)) fmt.Printf("\t%s (%s)\n", ev, adjusted) } } } return nil } // FetchMods returns the modules func FetchMods() LoadedModules { return modules } func init() { err := modules.LoadModules() if err != nil { log.Fatal(fmt.Sprintf("Error loading modules: %s", err)) } // log.Info(fmt.Sprintf("*** Modules loaded successfully count: %d\n", len(modules.Modules))) }
[ "\"DYNAMIC_MODULES\"", "\"DYNAMIC_MODULES\"" ]
[]
[ "DYNAMIC_MODULES" ]
[]
["DYNAMIC_MODULES"]
go
1
0
kubetest/federation.go
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "errors" "os" "os/exec" "strings" "k8s.io/test-infra/kubetest/util" ) /* multiClusterDeployment type holds the data passed to `--multi-clusters` flag. The format of value that should be passed to the flag is `[Zone1:]Cluster1[,[ZoneN:]ClusterN]]*`. Multiple clusters can be specified as a comma separated list. Zone can be optionally specified along with cluster name as described above in the format. If zone is not specified along with cluster then cluster would be deployed in default zone. */ type multiClusterDeployment struct { zones map[string]string clusters []string } func (m *multiClusterDeployment) String() string { var str string for _, cluster := range m.clusters { if len(str) != 0 { str += "," } zone, exist := m.zones[cluster] if exist { str += zone + ":" } str += cluster } return str } func (m *multiClusterDeployment) Set(value string) error { if len(value) == 0 { return errors.New("invalid value passed to --multi-clusters flag, should specify at least one cluster") } if m.zones == nil { m.zones = make(map[string]string) } clusterZones := strings.Split(value, ",") for _, czTuple := range clusterZones { czSlice := strings.SplitN(czTuple, ":", 2) if len(czSlice[0]) == 0 || (len(czSlice) == 2 && len(czSlice[1]) == 0) { return errors.New("invalid value passed to --multi-clusters flag") } if len(czSlice) == 2 { m.zones[czSlice[1]] = czSlice[0] m.clusters = append(m.clusters, czSlice[1]) } else { m.clusters = append(m.clusters, czSlice[0]) } } return nil } func (m *multiClusterDeployment) Type() string { return "multiClusterDeployment" } func (m *multiClusterDeployment) Enabled() bool { return len(m.clusters) > 0 } // TODO: Remove below logic in main repo once we drop testing support for // federation in release branches. ~ around 1.11 release frame. func useFederationRepo() bool { federationRepo := os.Getenv("FEDERATION_REPO") if federationRepo == "" { return false } return true } func fedUp() error { var cmd string if useFederationRepo() { cmd = "../federation/deploy/cluster/federation-up.sh" } else { cmd = "./federation/cluster/federation-up.sh" } return control.FinishRunning(exec.Command(cmd)) } func federationTest(testArgs []string) error { var cmd string if useFederationRepo() { cmd = "../federation/hack/federated-ginkgo-e2e.sh" } else { cmd = "./hack/federated-ginkgo-e2e.sh" } testArgs = util.SetFieldDefault(testArgs, "--ginkgo.focus", "\\[Feature:Federation\\]") return control.FinishRunning(exec.Command(cmd, testArgs...)) } func fedDown() error { var cmd string if useFederationRepo() { cmd = "../federation/deploy/cluster/federation-down.sh" } else { cmd = "./federation/cluster/federation-down.sh" } return control.FinishRunning(exec.Command(cmd)) }
[ "\"FEDERATION_REPO\"" ]
[]
[ "FEDERATION_REPO" ]
[]
["FEDERATION_REPO"]
go
1
0
colcon_acceleration/subverb/__init__.py
# ____ ____ # / /\/ / # /___/ \ / Copyright (c) 2021, Xilinx®. # \ \ \/ Author: Víctor Mayoral Vilches <victorma@xilinx.com> # \ \ # / / # /___/ /\ # \ \ / \ # \___\/\___\ # # Licensed under the Apache License, Version 2.0 # import os import subprocess import sys from colcon_core.logging import colcon_logger from colcon_core.plugin_system import instantiate_extensions from colcon_core.plugin_system import order_extensions_by_name from colcon_acceleration.verb import gray, yellow, red, green logger = colcon_logger.getChild(__name__) # used by class below as the nth partition as # as by external modules to generalize mountpointn = "/tmp/sdcard_img_p" # used by external modules mountpoint1 = "/tmp/sdcard_img_p1" mountpoint2 = "/tmp/sdcard_img_p2" class AccelerationSubverbExtensionPoint: """ The interface for vitis subverb extensions. A vitis subverb extension provides a subverb to the `vitis` verb of the command line tool. For each instance the attribute `SUBVERB_NAME` is being set to the basename of the entry point registering the extension. """ """The version of the vitis subverb extension interface.""" EXTENSION_POINT_VERSION = "1.0" def add_arguments(self, *, parser): """ Add command line arguments specific to the subverb. The method is intended to be overridden in a subclass. :param parser: The argument parser """ pass def main(self, *, context): """ Execute the subverb extension logic. This method must be overridden in a subclass. :param context: The context providing the parsed command line arguments :returns: The return code """ raise NotImplementedError() def get_board(self): """ Get the board of the acceleration/firmware/select/ deployment. Reads BOARD file and returns it as a string. NOTE: firmware is board-specific. Consult the README of acceleration_firmware_xilinx and/or change branch as per your hardware/board requirements. :rtype: String """ current_dir = os.environ.get("PWD", "") board_file = current_dir + "/acceleration/firmware/select/BOARD" if os.path.exists(board_file): with open(board_file, "r") as myfile: data = myfile.readlines() return data[0].strip() else: raise FileNotFoundError( board_file, "consider running " + "this command from the root directory of the workspace " + "after xilinx's firmware has been deployed. \n" + "Try 'colcon build --merge-install' first.", ) def get_platform(self): """ Get the board of the acceleration/firmware/select/ deployment. Reads BOARD file and returns it as a string. NOTE: firmware is board-specific. Consult the README of acceleration_firmware_xilinx and/or change branch as per your hardware/board requirements. :rtype: String """ platform_dir = get_platform_dir() cmd = "ls " + platform_dir + " | grep xpfm" outs, errs = run(cmd, shell=True) return outs.replace(".xpfm", "") def get_subverb_extensions(): """ Get the available subverb extensions. The extensions are ordered by their entry point name. :rtype: OrderedDict """ extensions = instantiate_extensions(__name__) for name, extension in extensions.items(): extension.SUBVERB_NAME = name return order_extensions_by_name(extensions) def get_vitis_dir(): """ Get the path to the Vitis deployed software. Tries first the XILINX_VITIS env. variable and defaults to the current directory's PWD/xilinx/vitis. :rtype: String """ if "XILINX_VITIS" in os.environ: vitis_dir = os.getenv("XILINX_VITIS") else: # take it from current directory current_dir = os.environ.get("PWD", "") vitis_dir = current_dir + "/xilinx/vitis" if os.path.exists(vitis_dir): return vitis_dir else: raise FileNotFoundError( vitis_dir, "consider setting XILINX_VITIS or running " + "this command from the root directory of the workspace " + "after xilinx's firmware has been deployed. \n" + "Try 'colcon build --merge-install' first.", ) def get_vivado_dir(): """ Get the path to the Vivado software. Tries first the XILINX_VIVADO env. variable and defaults to the current directory's PWD/xilinx/vivado. :rtype: String """ if "XILINX_VIVADO" in os.environ: vivado_dir = os.getenv("XILINX_VIVADO") else: # take it from current directory current_dir = os.environ.get("PWD", "") vivado_dir = current_dir + "/xilinx/vivado" if os.path.exists(vivado_dir): return vivado_dir else: raise FileNotFoundError( vivado_dir, "consider setting XILINX_VIVADO or running " + "this command from the root directory of the workspace " + "after xilinx's firmware has been deployed. \n" + "Try 'colcon build --merge-install' first.", ) def get_vitis_hls_dir(): """ Get the path to the Vitis HLS deployed software. Tries first the XILINX_HLS env. variable and defaults to the current directory's PWD/xilinx/vitis_hls. :rtype: String """ if "XILINX_HLS" in os.environ: vitis_hls_dir = os.getenv("XILINX_HLS") else: # take it from current directory current_dir = os.environ.get("PWD", "") vitis_hls_dir = current_dir + "/xilinx/vitis_hls" if os.path.exists(vitis_hls_dir): return vitis_hls_dir else: raise FileNotFoundError( vitis_hls_dir, "consider setting XILINX_HLS or running " + "this command from the root directory of the workspace " + "after xilinx's firmware has been deployed. \n" + "Try 'colcon build --merge-install' first.", ) def get_build_dir(): """ Get the path to the build directory :rtype: String """ current_dir = os.environ.get("PWD", "") build_dir = current_dir + "/build" if os.path.exists(build_dir): return build_dir else: raise FileNotFoundError( build_dir, "consider running " + "this command from the root directory of the workspace " + "after building the ROS 2 workspace overlay. \n" + "Try 'colcon build --merge-install' first.", ) def get_firmware_dir(): """ Get the path to the firmware NOTE: firmware is board-specific. Consult the README and/or change branch as per your hardware/board requirements. :rtype: String """ current_dir = os.environ.get("PWD", "") firmware_dir = current_dir + "/acceleration/firmware/select" if os.path.exists(firmware_dir): return firmware_dir else: # raise FileNotFoundError( # firmware_dir, # "consider running " # + "this command from the root directory of the workspace " # + "after firmware has been deployed. \n" # + "Try 'colcon build --merge-install' first.", # ) red("Firmware directory (" + firmware_dir + ") not found.") return None def get_platform_dir(): """ Get the path to the hardware platform deployed software. Usually lives within "<ros2_ws>/acceleration/firmware/select/platform". NOTE: platform is board-specific. Consult the README and/or change branch as per your hardware/board requirements. :rtype: String """ current_dir = os.environ.get("PWD", "") platform_dir = current_dir + "/acceleration/firmware/select/platform" if os.path.exists(platform_dir): return platform_dir else: raise FileNotFoundError( platform_dir, "consider running " + "this command from the root directory of the workspace " + "after firmware has been deployed. \n" + "Try 'colcon build --merge-install' first.", ) def get_install_dir(install_dir_input="install"): """ Get the path to the install directory of the current ROS 2 overlay worksapce :rtype: String """ current_dir = os.environ.get("PWD", "") install_dir = current_dir + "/" + install_dir_input if os.path.exists(install_dir): return install_dir else: raise FileNotFoundError( install_dir, "no install directory " + "found in the current workspace. Consider building it first. " + "Try 'colcon build --merge-install'.", ) def check_install_directory(installdir=None): """ Check if the install directory exits in the root of the current workspace. :rtype: Bool """ current_dir = os.environ.get("PWD", "") if installdir: install_dir = current_dir + "/" + installdir else: install_dir = current_dir + "/install" if os.path.exists(install_dir): return True else: return False def get_rawimage_path(rawimage_filename="sd_card.img"): """ Retring the full path of the raw image "sd_card.img" contained in the firmware directory if exists, None otherwise. Image is meant for both hardware and emulation. It usually lives in "<ros2_ws>/acceleration/firmware/select/sd_card.img". :rtype: String """ firmware_dir = get_firmware_dir() rawimage_path = firmware_dir + "/" + rawimage_filename if os.path.exists(rawimage_path): return rawimage_path else: return None def run(cmd, shell=False, timeout=1): """ Spawns a new process launching cmd, connect to their input/output/error pipes, and obtain their return codes. :param cmd: command split in the form of a list :returns: stdout """ proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell ) try: outs, errs = proc.communicate(timeout=timeout) except subprocess.TimeoutExpired: proc.kill() outs, errs = proc.communicate() # decode, or None if outs: outs = outs.decode("utf-8").strip() else: outs = None if errs and proc.returncode: errs = errs.decode("utf-8").strip() else: errs = None # # # debug # print(cmd) # gray(outs) # red(errs) # red("returncode: " + str(proc.returncode)) return outs, errs def mount_rawimage(rawimage_path, partition=1, debug=False): """ Mounts a disk image as provided by the parameter rawimage_path. Image is assumed to have two partitions and both are mounted respectively in /tmp/sdcard_img_p1 and /tmp/sdcard_img_p2. param: rawimage_path, the path of the raw disk image obtained by calling get_rawimage_path() param: partition number return: String (mountpoint) """ # TODO: transform this into a check that "partition" isn't greater # than the number of them in the actual raw image # # if partition != "p1" and partition != "p2": # red("Partition value not accepted: " + partition) # sys.exit(1) if not rawimage_path: red( "raw image file not found. Consider running " + "this command from the root directory of the workspace and build " + "the workspace first so that packages deploy automatically " + "the image." ) sys.exit(1) green("- Confirmed availability of raw image file at: " + rawimage_path) # fetch UNITS units = None cmd = "fdisk -l " + rawimage_path + " | grep 'Units\|Unidades' | awk '{print $8}'" outs, errs = run(cmd, shell=True) if outs: units = int(outs) if not units: red( "Something went wrong while fetching the raw image UNITS.\n" + "Review the output: " + outs ) sys.exit(1) # fetch STARTSECTORPn startsectorpn = None sectorpn = "img" + str(partition) # partitions will look like: # # Device Boot Start End Sectors Size Id Type # sd_card.img1 * 2048 1148927 1146880 560M c W95 FAT32 (LBA) # sd_card.img2 1148928 6703103 5554176 2.7G 83 Linux if partition == 1: cmd = ( "fdisk -l " + rawimage_path + " | grep '" + sectorpn + "' | awk '{print $3}'" ) else: cmd = ( "fdisk -l " + rawimage_path + " | grep '" + sectorpn + "' | awk '{print $2}'" ) if debug: print(cmd) outs, errs = run(cmd, shell=True) if outs: startsectorpn = int(outs) if debug: print("startsectorpn: " + str(startsectorpn)) if not startsectorpn: red( "Something went wrong while fetching the raw image STARTSECTOR for partition: " + str(partition) + ".\n" + "Review the output: " + outs ) sys.exit(1) green( "- Finished inspecting raw image, obtained UNITS and STARTSECTOR for partition: " + str(partition) + "." ) # create mountpoint mountpointnth = mountpointn + str(partition) cmd = "mkdir -p " + mountpointnth outs, errs = run(cmd, shell=True) if errs: red( "Something went wrong while setting MOUNTPOINT.\n" + "Review the output: " + errs ) sys.exit(1) # mount pnth cmd = ( "sudo mount -o loop,offset=" + str(units * startsectorpn) + " " + rawimage_path + " " + mountpointnth ) if debug: print(cmd) outs, errs = run( cmd, shell=True, timeout=10 ) # longer timeout, allow user to input password if errs: red( "Something went wrong while mounting partition: " + str(partition) + ".\n" + "Review the output: " + errs ) sys.exit(1) green("- Image mounted successfully at: " + mountpointnth) return mountpointnth def umount_rawimage(partition=None): """ Unmounts a disk image. Image paths are assumed to correspond with /tmp/sdcard_img_p1 and /tmp/sdcard_img_p2, etc. param (int): partition to umount """ # syncs and umount both partitions, regardless of what's mounted (oversimplification) toumount = "1 and 2" if partition: cmd = "sync && sudo umount " + mountpointn + str(partition) toumount = str(partition) else: # umount first and second by default cmd = "sync && sudo umount " + mountpoint1 + " && sudo umount " + mountpoint2 outs, errs = run(cmd, shell=True, timeout=15) if errs: red( "Something went wrong while umounting the raw image partitions: " + toumount + ".\n" + "Review the output: " + errs ) sys.exit(1) green("- Umounted the raw image.") def replace_kernel(kernel_filename): """ Mount sd_card disk image in the workspace and replace kernel according to argument kernel_filename. NOTE: Refer to get_sdcard_img_dir() function for the location of the file """ # # Add a security warning # yellow( # "SECURITY WARNING: This class invokes explicitly a shell via the " # "shell=True argument of the Python subprocess library, and uses " # "admin privileges to manage raw disk images. It is the user's " # "responsibility to ensure that all whitespace and metacharacters " # "passed are quoted appropriately to avoid shell injection vulnerabilities." # ) firmware_dir = get_firmware_dir() # check that target kernel exists kernel_filename_path = firmware_dir + "/kernel/" + kernel_filename if not os.path.exists(kernel_filename_path): red("kernel file " + kernel_filename_path + " not found.") sys.exit(1) green("- Found kernel file " + kernel_filename_path) # copy the corresponding kernel file cmd = "sudo cp " + kernel_filename_path + " " + mountpoint1 + "/Image" outs, errs = run(cmd, shell=True, timeout=15) if errs: red( "Something went wrong while replacig the kernel.\n" + "Review the output: " + errs ) sys.exit(1) green("- Kernel deployed successfully (" + kernel_filename_path + ").") def add_kernel(kernel_filename): """ Mount sd_card disk image in the workspace and add kernel according to argument kernel_filename. NOTE: As opposed to replace_kernel(), this function aims to allow adding additional kernel images (e.g. for Xen setups involving VMs with different kernels) NOTE 2: Refer to get_sdcard_img_dir() function for the location of the file """ # # Add a security warning # yellow( # "SECURITY WARNING: This class invokes explicitly a shell via the " # "shell=True argument of the Python subprocess library, and uses " # "admin privileges to manage raw disk images. It is the user's " # "responsibility to ensure that all whitespace and metacharacters " # "passed are quoted appropriately to avoid shell injection vulnerabilities." # ) firmware_dir = get_firmware_dir() # check that target kernel exists kernel_filename_path = firmware_dir + "/kernel/" + kernel_filename if not os.path.exists(kernel_filename_path): red("kernel file " + kernel_filename_path + " not found.") sys.exit(1) green("- Found kernel file " + kernel_filename_path) # copy the corresponding kernel file cmd = "sudo cp " + kernel_filename_path + " " + mountpoint1 + "/" + kernel_filename outs, errs = run(cmd, shell=True, timeout=15) if errs: red( "Something went wrong while replacig the kernel.\n" + "Review the output: " + errs ) sys.exit(1) green("- Kernel added successfully (" + kernel_filename_path + ").") def exists(file_path): """ Check if file exists param file_path: absolute path of the file return: bool """ if os.path.exists(file_path): return True else: return False def copy_ros2_workspace(install_dir): # noqa: D102 """ Prepare the emulation param: context: superclass context containing arguments, etc. return: emulation_file_qemu, emulation_file_pmu, rawimage_path """ # Add a security warning # yellow( # "SECURITY WARNING: This class invokes explicitly a shell via the shell=True argument of the Python" # " subprocess library, and uses admin privileges to manage raw disk images. It is the user's " # "responsibility to ensure that all whitespace and metacharacters passed are quoted appropriately" # " to avoid shell injection vulnerabilities." # ) ######################### # 1. verifies that the `<workspace>/"install_dir"/` directory exists in the workspace. ######################### if not check_install_directory(install_dir): red( "workspace " + install_dir + " directory not found. Consider running " + "this command from the root directory of the workspace and build " + "the workspace first" ) sys.exit(1) green("- Verified that install/ is available in the current ROS 2 workspace") ######################### # 2. mounts the embedded raw image ("sd_card.img" file) available in deployed firmware # and deploys the `<workspace>/install/` directory under "/ros2_ws" in the rootfs. ######################### rawimage_path = get_rawimage_path() if not rawimage_path: red( "raw image file not found. Consider running " + "this command from the root directory of the workspace and build " + "the workspace first so that Xilinx packages deploy automatically " + "the image." ) sys.exit(1) green("- Confirmed availability of raw image file at: " + rawimage_path) # fetch UNITS units = None cmd = "fdisk -l " + rawimage_path + " | grep 'Units\|Unidades' | awk '{print $8}'" outs, errs = run(cmd, shell=True) if outs: units = int(outs) if not units: red( "Something went wrong while fetching the raw image UNITS.\n" + "Review the output: " + outs ) sys.exit(1) # fetch STARTSECTORP1 startsectorp1 = None cmd = "fdisk -l " + rawimage_path + " | grep 'img1' | awk '{print $3}'" outs, errs = run(cmd, shell=True) if outs: startsectorp1 = int(outs) if not startsectorp1: red( "Something went wrong while fetching the raw image STARTSECTORP1.\n" + "Review the output: " + outs ) sys.exit(1) # fetch STARTSECTORP2 startsectorp2 = None cmd = "fdisk -l " + rawimage_path + " | grep 'img2' | awk '{print $2}'" outs, errs = run(cmd, shell=True) if outs: startsectorp2 = int(outs) if not startsectorp2: red( "Something went wrong while fetching the raw image STARTSECTORP2.\n" + "Review the output: " + outs if outs else "None" ) sys.exit(1) green("- Finished inspecting raw image, obtained UNITS and STARTSECTOR P1/P2") # define mountpoint and mount mountpoint = "/tmp/sdcard_img_p2" cmd = "mkdir -p " + mountpoint outs, errs = run(cmd, shell=True) if errs: red( "Something went wrong while setting MOUNTPOINT.\n" + "Review the output: " + errs ) sys.exit(1) cmd = ( "sudo mount -o loop,offset=" + str(units * startsectorp2) + " " + rawimage_path + " " + mountpoint ) # # debug # print(cmd) outs, errs = run( cmd, shell=True, timeout=15 ) # longer timeout, allow user to input password if errs: red("Something went wrong while mounting.\n" + "Review the output: " + errs) sys.exit(1) green("- Image mounted successfully at: " + mountpoint) # remove prior overlay ROS 2 workspace files at "/ros2_ws", # and copy the <ws>/install directory as such if os.path.exists(mountpoint + "/ros2_ws"): cmd = "sudo rm -r " + mountpoint + "/ros2_ws/*" outs, errs = run(cmd, shell=True) if errs: red( "Something went wrong while removing image workspace.\n" + "Review the output: " + errs ) sys.exit(1) green( "- Successfully cleaned up prior overlay ROS 2 workspace " + "at: " + mountpoint + "/ros2_ws" ) else: yellow( "No prior overlay ROS 2 workspace found " + "at: " + mountpoint + "/ros2_ws, creating it." ) cmd = "sudo mkdir " + mountpoint + "/ros2_ws" outs, errs = run(cmd, shell=True) if errs: red( "Something went wrong while creating overlay ROS 2 workspace.\n" + "Review the output: " + errs ) sys.exit(1) cmd = "sudo cp -r " + install_dir + "/* " + mountpoint + "/ros2_ws" outs, errs = run(cmd, shell=True) if errs: red( "Something went wrong while copying overlay ROS 2 workspace to mountpoint.\n" + "Review the output: " + errs ) sys.exit(1) green( "- Copied '" + install_dir + "' directory as a ROS 2 overlay workspace in the raw image." ) ######################### # 3. syncs and umount the raw image ######################### cmd = "sync && sudo umount " + mountpoint outs, errs = run(cmd, shell=True, timeout=15) if errs: red( "Something went wrong while umounting the raw image.\n" + "Review the output: " + errs ) sys.exit(1) green("- Umounted the raw image.") def copy_libstdcppfs(partition=2): # noqa: D102 """ Copy libstdc++fs.a into rootfs return: None """ rawimage_path = get_rawimage_path("sd_card.img") mount_rawimage(rawimage_path, partition) firmware_dir = get_firmware_dir() cmd = ( "sudo cp -r " + firmware_dir + "/lib/libstdc++fs.a " + mountpointn + str(partition) + "/usr/lib/libstdc++fs.a" ) outs, errs = run(cmd, shell=True) # umount raw disk image umount_rawimage(partition)
[]
[]
[ "XILINX_VIVADO", "PWD", "XILINX_HLS", "XILINX_VITIS" ]
[]
["XILINX_VIVADO", "PWD", "XILINX_HLS", "XILINX_VITIS"]
python
4
0
src/main/java/cn/leancloud/demo/todo/Application.java
package cn.leancloud.demo.todo; import javax.servlet.ServletContextListener; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.springframework.boot.SpringApplication; import org.springframework.boot.autoconfigure.EnableAutoConfiguration; import org.springframework.boot.autoconfigure.SpringBootApplication; import org.springframework.boot.web.servlet.ServletComponentScan; import org.springframework.boot.web.servlet.ServletListenerRegistrationBean; import org.springframework.context.annotation.Bean; import com.avos.avoscloud.AVOSCloud; import com.avos.avoscloud.AVObject; import com.avos.avoscloud.internal.impl.JavaRequestSignImplementation; import cn.leancloud.LeanEngine; @ServletComponentScan(basePackages = { "cn.leancloud" }) @EnableAutoConfiguration @SpringBootApplication public class Application { // private static final Logger logger = LogManager.getLogger(Application.class); // // private static String appId = System.getenv("LEANCLOUD_APP_ID"); // private static String appKey = System.getenv("LEANCLOUD_APP_KEY"); // private static String appMasterKey = // System.getenv("LEANCLOUD_APP_MASTER_KEY"); // // static { // logger.info("LeanEngine app init."); // // 注册子类化 // AVObject.registerSubclass(Todo.class); // // 初始化AVOSCloud,请保证在整个项目中间只初始化一次 // LeanEngine.initialize(appId, appKey, appMasterKey); // // 在请求签名中使用masterKey以激活云代码的最高权限 // JavaRequestSignImplementation.instance().setUseMasterKey(true); // // 打开 debug 日志 // // AVOSCloud.setDebugLogEnabled(true); // // 向云引擎注册云函数 // LeanEngine.register(Cloud.class); // if (System.getenv("LEANCLOUD_APP_ENV").equals("development")) { // // 如果是开发环境,则设置 AVCloud.callFunction 和 AVCloud.rpcFunction 调用本地云函数实现 // // 如果需要本地开发时调用云端云函数实现,则注释掉下面语句。 // LeanEngine.setLocalEngineCallEnabled(true); // } // } @Bean ServletListenerRegistrationBean<ServletContextListener> myServletListener() { ServletListenerRegistrationBean<ServletContextListener> srb = new ServletListenerRegistrationBean<>(); srb.setListener(new AppInitListener()); return srb; } public static void main(String[] args) { SpringApplication.run(Application.class); } }
[ "\"LEANCLOUD_APP_ID\"", "\"LEANCLOUD_APP_KEY\"", "\"LEANCLOUD_APP_MASTER_KEY\"", "\"LEANCLOUD_APP_ENV\"" ]
[]
[ "LEANCLOUD_APP_ID", "LEANCLOUD_APP_ENV", "LEANCLOUD_APP_MASTER_KEY", "LEANCLOUD_APP_KEY" ]
[]
["LEANCLOUD_APP_ID", "LEANCLOUD_APP_ENV", "LEANCLOUD_APP_MASTER_KEY", "LEANCLOUD_APP_KEY"]
java
4
0
countingStrategy.go
package wonging type CountingStrategy func(*Counter) float32 // strategies func getHiLo(c *Counter) float32 { return float32(c.HiLo) } func getHiOpt1(c *Counter) float32 { return float32(c.HiOpt1) } func getHiOpt2(c *Counter) float32 { return float32(c.HiOpt2) } func getKO(c *Counter) float32 { return float32(c.KO) } func getOmega2(c *Counter) float32 { return float32(c.Omega2) } func getRed7(c *Counter) float32 { return float32(c.Red7) } func getZenCount(c *Counter) float32 { return float32(c.ZenCount) } func getAceFiveCount(c *Counter) float32 { return float32(c.AceFiveCount) } // strategies assignments func randomCountingStrategy() (CountingStrategy, string) { strategies := []CountingStrategy{getHiLo, getHiOpt1, getHiOpt2, getKO, getOmega2, getRed7, getZenCount, getAceFiveCount} description := []string{"Hi Lo", "Hi Opt 1", "Hi Opt 2", "KO", "Omega 2", "Red 7", "Zen Count", "Ace/Five Count"} randomInt := randInt(0, len(strategies)) return strategies[randomInt], description[randomInt] }
[]
[]
[]
[]
[]
go
null
null
null