filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
sequence | variablearg
sequence | constarg
sequence | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
โ | variableargcount
float64 0
0
โ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
config/configuration.go | // Package config defines the Cloudinary configuration.
package config
import (
"errors"
"net/url"
"os"
"github.com/creasty/defaults"
"github.com/gorilla/schema"
)
// Configuration is the main configuration struct.
type Configuration struct {
Cloud Cloud
API API
URL URL
AuthToken AuthToken
}
var decoder = schema.NewDecoder()
// New returns a new Configuration instance from the environment variable
func New() (*Configuration, error) {
return NewFromURL(os.Getenv("CLOUDINARY_URL"))
}
// NewFromURL returns a new Configuration instance from a cloudinary url.
func NewFromURL(cldURLStr string) (*Configuration, error) {
if cldURLStr == "" {
return nil, errors.New("must provide CLOUDINARY_URL")
}
cldURL, err := url.Parse(cldURLStr)
if err != nil {
return nil, err
}
pass, _ := cldURL.User.Password()
params := cldURL.Query()
conf, err := NewFromQueryParams(cldURL.Host, cldURL.User.Username(), pass, params)
if err != nil {
return nil, err
}
return conf, err
}
// NewFromParams returns a new Configuration instance from the provided parameters.
func NewFromParams(cloud string, key string, secret string) (*Configuration, error) {
return NewFromQueryParams(cloud, key, secret, map[string][]string{})
}
// NewFromQueryParams returns a new Configuration instance from the provided url query parameters.
func NewFromQueryParams(cloud string, key string, secret string, params map[string][]string) (*Configuration, error) {
cloudConf := Cloud{
CloudName: cloud,
APIKey: key,
APISecret: secret,
}
conf := &Configuration{
Cloud: cloudConf,
API: API{},
URL: URL{},
AuthToken: AuthToken{},
}
if err := defaults.Set(conf); err != nil {
return nil, err
}
// import configuration keys from parameters
decoder.IgnoreUnknownKeys(true)
err := decoder.Decode(&conf.Cloud, params)
if err != nil {
return nil, err
}
err = decoder.Decode(&conf.API, params)
if err != nil {
return nil, err
}
err = decoder.Decode(&conf.URL, params)
if err != nil {
return nil, err
}
err = decoder.Decode(&conf.AuthToken, params)
if err != nil {
return nil, err
}
return conf, nil
}
| [
"\"CLOUDINARY_URL\""
] | [] | [
"CLOUDINARY_URL"
] | [] | ["CLOUDINARY_URL"] | go | 1 | 0 | |
grpc/grpc_server.go | package main
import (
"fmt"
"log"
"net"
"os"
pb "myapp/rpc/math"
"myapp/grpc/resolver"
"google.golang.org/grpc"
)
const defaultPort = "8080"
func main() {
port := os.Getenv("PORT")
if port == "" {
port = defaultPort
}
lis, err := net.Listen("tcp", fmt.Sprintf(":%s", port))
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
var opts []grpc.ServerOption
grpcServer := grpc.NewServer(opts...)
pb.RegisterMathGuideServer(grpcServer, &resolver.Server{})
log.Printf("Listen on port : %s\n", port)
grpcServer.Serve(lis)
}
| [
"\"PORT\""
] | [] | [
"PORT"
] | [] | ["PORT"] | go | 1 | 0 | |
itest/starboard/starboard_cli_test.go | package starboard
import (
. "github.com/nilesh-akhade/starboard/itest/matcher"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gbytes"
. "github.com/onsi/gomega/gstruct"
"context"
"os"
"strings"
"time"
"github.com/nilesh-akhade/starboard/itest/helper"
"github.com/nilesh-akhade/starboard/pkg/apis/aquasecurity/v1alpha1"
"github.com/nilesh-akhade/starboard/pkg/cmd"
"github.com/nilesh-akhade/starboard/pkg/kube"
"github.com/nilesh-akhade/starboard/pkg/starboard"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
batchv1beta1 "k8s.io/api/batch/v1beta1"
corev1 "k8s.io/api/core/v1"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
)
var _ = Describe("Starboard CLI", func() {
BeforeEach(func() {
err := cmd.Run(versionInfo, []string{
"starboard", "init",
"-v", starboardCLILogLevel,
}, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())
})
Describe("Command init", func() {
It("should initialize Starboard", func() {
crdList, err := customResourceDefinitions.List(context.TODO(), metav1.ListOptions{})
Expect(err).ToNot(HaveOccurred())
groupByName := func(element interface{}) string {
return element.(apiextensionsv1beta1.CustomResourceDefinition).Name
}
Expect(crdList.Items).To(MatchAllElements(groupByName, Elements{
"vulnerabilityreports.aquasecurity.github.io": MatchFields(IgnoreExtras, Fields{
"Spec": MatchFields(IgnoreExtras, Fields{
"Group": Equal("aquasecurity.github.io"),
"Version": Equal("v1alpha1"),
"Names": Equal(apiextensionsv1beta1.CustomResourceDefinitionNames{
Plural: "vulnerabilityreports",
Singular: "vulnerabilityreport",
ShortNames: []string{"vuln", "vulns"},
Kind: "VulnerabilityReport",
ListKind: "VulnerabilityReportList",
Categories: []string{"all"},
}),
"Scope": Equal(apiextensionsv1beta1.NamespaceScoped),
}),
}),
"configauditreports.aquasecurity.github.io": MatchFields(IgnoreExtras, Fields{
"Spec": MatchFields(IgnoreExtras, Fields{
"Group": Equal("aquasecurity.github.io"),
"Version": Equal("v1alpha1"),
"Scope": Equal(apiextensionsv1beta1.NamespaceScoped),
"Names": Equal(apiextensionsv1beta1.CustomResourceDefinitionNames{
Plural: "configauditreports",
Singular: "configauditreport",
ShortNames: []string{"configaudit"},
Kind: "ConfigAuditReport",
ListKind: "ConfigAuditReportList",
Categories: []string{"all"},
}),
}),
}),
"clusterconfigauditreports.aquasecurity.github.io": MatchFields(IgnoreExtras, Fields{
"Spec": MatchFields(IgnoreExtras, Fields{
"Group": Equal("aquasecurity.github.io"),
"Version": Equal("v1alpha1"),
"Scope": Equal(apiextensionsv1beta1.ClusterScoped),
"Names": Equal(apiextensionsv1beta1.CustomResourceDefinitionNames{
Plural: "clusterconfigauditreports",
Singular: "clusterconfigauditreport",
ShortNames: []string{"clusterconfigaudit"},
Kind: "ClusterConfigAuditReport",
ListKind: "ClusterConfigAuditReportList",
Categories: []string{"all"},
}),
}),
}),
"ciskubebenchreports.aquasecurity.github.io": MatchFields(IgnoreExtras, Fields{
"Spec": MatchFields(IgnoreExtras, Fields{
"Group": Equal("aquasecurity.github.io"),
"Version": Equal("v1alpha1"),
"Scope": Equal(apiextensionsv1beta1.ClusterScoped),
"Names": Equal(apiextensionsv1beta1.CustomResourceDefinitionNames{
Plural: "ciskubebenchreports",
Singular: "ciskubebenchreport",
ShortNames: []string{"kubebench"},
Kind: "CISKubeBenchReport",
ListKind: "CISKubeBenchReportList",
Categories: []string{"all"},
}),
}),
}),
"kubehunterreports.aquasecurity.github.io": MatchFields(IgnoreExtras, Fields{
"Spec": MatchFields(IgnoreExtras, Fields{
"Group": Equal("aquasecurity.github.io"),
"Version": Equal("v1alpha1"),
"Scope": Equal(apiextensionsv1beta1.ClusterScoped),
"Names": Equal(apiextensionsv1beta1.CustomResourceDefinitionNames{
Plural: "kubehunterreports",
Singular: "kubehunterreport",
ShortNames: []string{"kubehunter"},
Kind: "KubeHunterReport",
ListKind: "KubeHunterReportList",
Categories: []string{"all"},
}),
}),
}),
}))
err = kubeClient.Get(context.TODO(), types.NamespacedName{
Name: starboard.NamespaceName,
}, &corev1.Namespace{})
Expect(err).ToNot(HaveOccurred())
var cm corev1.ConfigMap
err = kubeClient.Get(context.TODO(), types.NamespacedName{
Name: starboard.ConfigMapName,
Namespace: starboard.NamespaceName,
}, &cm)
Expect(err).ToNot(HaveOccurred())
Expect(cm.Data).To(BeEquivalentTo(starboard.GetDefaultConfig()))
var secret corev1.Secret
err = kubeClient.Get(context.TODO(), types.NamespacedName{
Name: starboard.SecretName,
Namespace: starboard.NamespaceName,
}, &secret)
Expect(err).ToNot(HaveOccurred())
Expect(secret.Data).To(Equal(map[string][]byte(nil)))
err = kubeClient.Get(context.TODO(), types.NamespacedName{
Name: starboard.ServiceAccountName,
Namespace: starboard.NamespaceName,
}, &corev1.ServiceAccount{})
Expect(err).ToNot(HaveOccurred())
})
})
Describe("Command version", func() {
It("should print the current version of the executable binary", func() {
out := NewBuffer()
err := cmd.Run(versionInfo, []string{
"starboard",
"version",
}, out, out)
Expect(err).ToNot(HaveOccurred())
Eventually(out).Should(Say("Starboard Version: {Version:dev Commit:none Date:unknown}"))
})
})
Describe("Command scan vulnerabilityreports", func() {
groupByContainerName := func(element interface{}) string {
return element.(v1alpha1.VulnerabilityReport).
Labels[starboard.LabelContainerName]
}
Context("when unmanaged Pod is specified as workload", func() {
var pod *corev1.Pod
BeforeEach(func() {
pod = helper.NewPod().WithName("nginx").
WithNamespace(testNamespace.Name).
WithContainer("nginx-container", "nginx:1.16").
Build()
err := kubeClient.Create(context.TODO(), pod)
Expect(err).ToNot(HaveOccurred())
})
It("should create VulnerabilityReport", func() {
err := cmd.Run(versionInfo, []string{
"starboard",
"scan", "vulnerabilityreports", "pod/" + pod.Name,
"--namespace", pod.Namespace,
"-v", starboardCLILogLevel,
}, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())
var reportList v1alpha1.VulnerabilityReportList
err = kubeClient.List(context.TODO(), &reportList, client.MatchingLabels{
starboard.LabelResourceKind: string(kube.KindPod),
starboard.LabelResourceName: pod.Name,
starboard.LabelResourceNamespace: pod.Namespace,
})
Expect(err).ToNot(HaveOccurred())
Expect(reportList.Items).To(MatchAllElements(groupByContainerName, Elements{
"nginx-container": IsVulnerabilityReportForContainerOwnedBy("nginx-container", pod),
}))
})
AfterEach(func() {
err := kubeClient.Delete(context.TODO(), pod)
Expect(err).ToNot(HaveOccurred())
})
})
Context("when unmanaged Pod with multiple containers is specified as workload", func() {
var pod *corev1.Pod
BeforeEach(func() {
pod = helper.NewPod().WithName("nginx-and-tomcat").
WithNamespace(testNamespace.Name).
WithContainer("nginx-container", "nginx:1.16").
WithContainer("tomcat-container", "tomcat:8").
Build()
err := kubeClient.Create(context.TODO(), pod)
Expect(err).ToNot(HaveOccurred())
})
It("should create VulnerabilityReports", func() {
err := cmd.Run(versionInfo, []string{
"starboard",
"scan", "vulnerabilityreports", "pod/" + pod.Name,
"--namespace", pod.Namespace,
"-v", starboardCLILogLevel,
}, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())
var reportList v1alpha1.VulnerabilityReportList
err = kubeClient.List(context.TODO(), &reportList, client.MatchingLabels{
starboard.LabelResourceKind: string(kube.KindPod),
starboard.LabelResourceName: pod.Name,
starboard.LabelResourceNamespace: pod.Namespace,
})
Expect(err).ToNot(HaveOccurred())
Expect(reportList.Items).To(MatchAllElements(groupByContainerName, Elements{
"nginx-container": IsVulnerabilityReportForContainerOwnedBy("nginx-container", pod),
"tomcat-container": IsVulnerabilityReportForContainerOwnedBy("tomcat-container", pod),
}))
})
AfterEach(func() {
err := kubeClient.Delete(context.TODO(), pod)
Expect(err).ToNot(HaveOccurred())
})
})
// TODO Run with other integration tests
// The only reason this test is marked as pending is that I don't know
// how to pass DockerHub private repository credentials to this test case.
PContext("when unmanaged Pod with private image is specified as workload", func() {
var pod *corev1.Pod
var imagePullSecret *corev1.Secret
BeforeEach(func() {
var err error
imagePullSecret, err = kube.NewImagePullSecret(metav1.ObjectMeta{
Name: "registry-credentials",
Namespace: testNamespace.Name,
}, "https://index.docker.io/v1",
os.Getenv("STARBOARD_TEST_DOCKERHUB_REGISTRY_USERNAME"),
os.Getenv("STARBOARD_TEST_DOCKERHUB_REGISTRY_PASSWORD"))
Expect(err).ToNot(HaveOccurred())
err = kubeClient.Create(context.TODO(), imagePullSecret)
Expect(err).ToNot(HaveOccurred())
pod = helper.NewPod().WithName("nginx-with-private-image").
WithNamespace(testNamespace.Name).
WithContainer("nginx-container", "starboardcicd/private-nginx:1.16").
WithImagePullSecret(imagePullSecret.Name).
Build()
err = kubeClient.Create(context.TODO(), pod)
Expect(err).ToNot(HaveOccurred())
})
It("should create VulnerabilityReport", func() {
err := cmd.Run(versionInfo, []string{
"starboard",
"scan", "vulnerabilityreports", "po/" + pod.Name,
"--namespace", pod.Namespace,
"-v", starboardCLILogLevel,
}, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())
var reportList v1alpha1.VulnerabilityReportList
err = kubeClient.List(context.TODO(), &reportList, client.MatchingLabels{
starboard.LabelResourceKind: string(kube.KindPod),
starboard.LabelResourceName: pod.Name,
starboard.LabelResourceNamespace: pod.Namespace,
})
Expect(err).ToNot(HaveOccurred())
Expect(reportList.Items).To(MatchAllElements(groupByContainerName, Elements{
"nginx-container": IsVulnerabilityReportForContainerOwnedBy("nginx-container", pod),
}))
})
AfterEach(func() {
err := kubeClient.Delete(context.TODO(), pod)
Expect(err).ToNot(HaveOccurred())
err = kubeClient.Delete(context.TODO(), imagePullSecret)
Expect(err).ToNot(HaveOccurred())
})
})
Context("when ReplicaSet is specified as workload", func() {
var rs *appsv1.ReplicaSet
BeforeEach(func() {
var err error
rs = &appsv1.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Name: "nginx",
Namespace: testNamespace.Name,
},
Spec: appsv1.ReplicaSetSpec{
Replicas: pointer.Int32Ptr(1),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"app": "nginx"},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels.Set{
"app": "nginx",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "nginx",
Image: "nginx:1.16",
},
},
},
},
},
}
err = kubeClient.Create(context.TODO(), rs)
Expect(err).ToNot(HaveOccurred())
})
It("should create VulnerabilityReport", func() {
err := cmd.Run(versionInfo, []string{
"starboard",
"scan", "vulnerabilityreports", "replicaset/" + rs.Name,
"--namespace", rs.Namespace,
"-v", starboardCLILogLevel,
}, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())
var reportList v1alpha1.VulnerabilityReportList
err = kubeClient.List(context.TODO(), &reportList, client.MatchingLabels{
starboard.LabelResourceKind: string(kube.KindReplicaSet),
starboard.LabelResourceName: rs.Name,
starboard.LabelResourceNamespace: rs.Namespace,
})
Expect(err).ToNot(HaveOccurred())
Expect(reportList.Items).To(MatchAllElements(groupByContainerName, Elements{
"nginx": IsVulnerabilityReportForContainerOwnedBy("nginx", rs),
}))
})
AfterEach(func() {
err := kubeClient.Delete(context.TODO(), rs)
Expect(err).ToNot(HaveOccurred())
})
})
Context("when ReplicationController is specified as workload", func() {
var rc *corev1.ReplicationController
BeforeEach(func() {
var err error
rc = &corev1.ReplicationController{
ObjectMeta: metav1.ObjectMeta{
Name: "nginx",
Namespace: testNamespace.Name,
},
Spec: corev1.ReplicationControllerSpec{
Replicas: pointer.Int32Ptr(1),
Selector: map[string]string{
"app": "nginx",
},
Template: &corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels.Set{
"app": "nginx",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "nginx",
Image: "nginx:1.16",
},
},
},
},
},
}
err = kubeClient.Create(context.TODO(), rc)
Expect(err).ToNot(HaveOccurred())
})
It("should create VulnerabilityReport", func() {
err := cmd.Run(versionInfo, []string{
"starboard",
"scan", "vulnerabilityreports", "rc/" + rc.Name,
"--namespace", rc.Namespace,
"-v", starboardCLILogLevel,
}, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())
var reportList v1alpha1.VulnerabilityReportList
err = kubeClient.List(context.TODO(), &reportList, client.MatchingLabels{
starboard.LabelResourceKind: string(kube.KindReplicationController),
starboard.LabelResourceName: rc.Name,
starboard.LabelResourceNamespace: rc.Namespace,
})
Expect(err).ToNot(HaveOccurred())
Expect(reportList.Items).To(MatchAllElements(groupByContainerName, Elements{
"nginx": IsVulnerabilityReportForContainerOwnedBy("nginx", rc),
}))
})
AfterEach(func() {
err := kubeClient.Delete(context.TODO(), rc)
Expect(err).ToNot(HaveOccurred())
})
})
Context("when Deployment is specified as workload", func() {
var deploy *appsv1.Deployment
BeforeEach(func() {
deploy = helper.NewDeployment().WithName("nginx").
WithNamespace(testNamespace.Name).
WithContainer("nginx-container", "nginx:1.16").
Build()
err := kubeClient.Create(context.TODO(), deploy)
Expect(err).ToNot(HaveOccurred())
})
It("should create VulnerabilityReport", func() {
err := cmd.Run(versionInfo, []string{
"starboard",
"scan", "vulnerabilityreports", "deployment/" + deploy.Name,
"--namespace", deploy.Namespace,
"-v", starboardCLILogLevel,
}, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())
var reportList v1alpha1.VulnerabilityReportList
err = kubeClient.List(context.TODO(), &reportList, client.MatchingLabels{
starboard.LabelResourceKind: string(kube.KindDeployment),
starboard.LabelResourceName: deploy.Name,
starboard.LabelResourceNamespace: deploy.Namespace,
})
Expect(err).ToNot(HaveOccurred())
Expect(reportList.Items).To(MatchAllElements(groupByContainerName, Elements{
"nginx-container": IsVulnerabilityReportForContainerOwnedBy("nginx-container", deploy),
}))
})
AfterEach(func() {
err := kubeClient.Delete(context.TODO(), deploy)
Expect(err).ToNot(HaveOccurred())
})
})
Context("when StatefulSet is specified as workload", func() {
var sts *appsv1.StatefulSet
BeforeEach(func() {
sts = &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-sts",
Namespace: testNamespace.Name,
},
Spec: appsv1.StatefulSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "test-sts",
},
},
ServiceName: "test-sts",
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": "test-sts",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "test-sts-container",
Image: "busybox:1.28",
Command: []string{"sleep", "5000"},
},
},
},
},
},
}
err := kubeClient.Create(context.TODO(), sts)
Expect(err).ToNot(HaveOccurred())
})
It("should create VulnerabilityReport", func() {
err := cmd.Run(versionInfo, []string{
"starboard",
"scan", "vulnerabilityreports", "sts/" + sts.Name,
"--namespace", sts.Namespace,
"-v", starboardCLILogLevel,
}, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())
var reportList v1alpha1.VulnerabilityReportList
err = kubeClient.List(context.TODO(), &reportList, client.MatchingLabels{
starboard.LabelResourceKind: string(kube.KindStatefulSet),
starboard.LabelResourceName: sts.Name,
starboard.LabelResourceNamespace: sts.Namespace,
})
Expect(err).ToNot(HaveOccurred())
Expect(reportList.Items).To(MatchAllElements(groupByContainerName, Elements{
"test-sts-container": IsVulnerabilityReportForContainerOwnedBy("test-sts-container", sts),
}))
})
AfterEach(func() {
err := kubeClient.Delete(context.TODO(), sts)
Expect(err).ToNot(HaveOccurred())
})
})
Context("when DaemonSet is specified as workload", func() {
var ds *appsv1.DaemonSet
BeforeEach(func() {
ds = &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-ds",
Namespace: testNamespace.Name,
},
Spec: appsv1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "test-ds",
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": "test-ds",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "test-ds-container",
Image: "busybox:1.28",
Command: []string{"sleep", "5000"},
},
},
},
},
},
}
err := kubeClient.Create(context.TODO(), ds)
Expect(err).ToNot(HaveOccurred())
})
It("Should create VulnerabilityReport", func() {
err := cmd.Run(versionInfo, []string{
"starboard",
"scan", "vulnerabilityreports", "ds/" + ds.Name,
"--namespace", ds.Namespace,
"-v", starboardCLILogLevel,
}, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())
var reportList v1alpha1.VulnerabilityReportList
err = kubeClient.List(context.TODO(), &reportList, client.MatchingLabels{
starboard.LabelResourceKind: string(kube.KindDaemonSet),
starboard.LabelResourceName: ds.Name,
starboard.LabelResourceNamespace: ds.Namespace,
})
Expect(err).ToNot(HaveOccurred())
Expect(reportList.Items).To(MatchAllElements(groupByContainerName, Elements{
"test-ds-container": IsVulnerabilityReportForContainerOwnedBy("test-ds-container", ds),
}))
})
AfterEach(func() {
err := kubeClient.Delete(context.TODO(), ds)
Expect(err).ToNot(HaveOccurred())
})
})
})
Describe("Command get vulnerabilities", func() {
Context("for deployment/nginx resource", func() {
When("vulnerabilities are associated with the deployment itself", func() {
var ctx context.Context
var deploy *appsv1.Deployment
var report *v1alpha1.VulnerabilityReport
BeforeEach(func() {
ctx = context.TODO()
deploy = helper.NewDeployment().WithName("nginx").
WithNamespace(testNamespace.Name).
WithContainer("nginx", "nginx:1.16").
Build()
err := kubeClient.Create(ctx, deploy)
Expect(err).ToNot(HaveOccurred())
report = helper.NewVulnerabilityReport().WithName("0e1e25ab-8c55-4cdc-af64-21fb8f412cb0").
WithNamespace(testNamespace.Name).
WithOwnerKind(kube.KindDeployment).
WithOwnerName(deploy.Name).
Build()
err = kubeClient.Create(ctx, report)
Expect(err).ToNot(HaveOccurred())
})
When("getting vulnerabilities by deployment name", func() {
It("should return the vulnerabilities", func() {
stdout := NewBuffer()
stderr := NewBuffer()
err := cmd.Run(versionInfo, []string{
"starboard", "get", "vulnerabilities",
"deployment/" + deploy.Name,
"--namespace", deploy.Namespace,
"-v", starboardCLILogLevel,
}, stdout, stderr)
Expect(err).ToNot(HaveOccurred())
var list v1alpha1.VulnerabilityReportList
err = yaml.Unmarshal(stdout.Contents(), &list)
Expect(err).ToNot(HaveOccurred())
if Expect(len(list.Items)).To(Equal(1)) {
item := list.Items[0]
item.Report.UpdateTimestamp = report.Report.UpdateTimestamp // TODO A Hack to skip comparing timestamp
Expect(item.Report).To(Equal(report.Report))
}
Expect(stderr).Should(Say(""))
})
})
AfterEach(func() {
err := kubeClient.Delete(ctx, report)
Expect(err).ToNot(HaveOccurred())
err = kubeClient.Delete(ctx, deploy)
Expect(err).ToNot(HaveOccurred())
})
})
When("vulnerabilities are associated with the managed replicaset", func() {
var deploy *appsv1.Deployment
var replicasetName string
var podName string
var report *v1alpha1.VulnerabilityReport
BeforeEach(func() {
deploy = helper.NewDeployment().WithRandomName("nginx").
WithNamespace(testNamespace.Name).
WithContainer("nginx", "nginx:1.16").
Build()
err := kubeClient.Create(context.TODO(), deploy)
Expect(err).ToNot(HaveOccurred())
replicasetName = ""
for i := 0; i < 10; i++ {
var rsList appsv1.ReplicaSetList
err := kubeClient.List(context.TODO(), &rsList)
Expect(err).ToNot(HaveOccurred())
if len(rsList.Items) > 0 {
for _, rs := range rsList.Items {
for _, ownerRef := range rs.OwnerReferences {
if ownerRef.Name == deploy.Name && *ownerRef.Controller {
replicasetName = rs.Name
}
}
}
if replicasetName != "" {
break
}
}
time.Sleep(time.Second)
}
Expect(replicasetName).ToNot(BeEmpty())
podName = ""
for i := 0; i < 10; i++ {
var podList corev1.PodList
err := kubeClient.List(context.TODO(), &podList)
Expect(err).ToNot(HaveOccurred())
if len(podList.Items) > 0 {
for _, pod := range podList.Items {
for _, ownerRef := range pod.OwnerReferences {
if ownerRef.Name == replicasetName && *ownerRef.Controller {
podName = pod.Name
}
}
}
if podName != "" {
break
}
}
time.Sleep(time.Second)
}
Expect(podName).ToNot(BeEmpty())
report = helper.NewVulnerabilityReport().WithName("0e1e25ab-8c55-4cdc-af64-21fb8f412cb0").
WithNamespace(testNamespace.Name).
WithOwnerKind(kube.KindReplicaSet).
WithOwnerName(replicasetName).
Build()
err = kubeClient.Create(context.TODO(), report)
Expect(err).ToNot(HaveOccurred())
})
When("getting vulnerabilities by deployment name", func() {
It("should return the vulnerabilities", func() {
stdout := NewBuffer()
stderr := NewBuffer()
err := cmd.Run(versionInfo, []string{
"starboard", "get", "vulnerabilities",
"deployment/" + deploy.Name,
"--namespace", testNamespace.Name,
"-v", starboardCLILogLevel,
}, stdout, stderr)
Expect(err).ToNot(HaveOccurred())
var list v1alpha1.VulnerabilityReportList
err = yaml.Unmarshal(stdout.Contents(), &list)
Expect(err).ToNot(HaveOccurred())
if Expect(len(list.Items)).To(Equal(1)) {
item := list.Items[0]
item.Report.UpdateTimestamp = report.Report.UpdateTimestamp // TODO A Hack to skip comparing timestamp
Expect(item.Report).To(Equal(report.Report))
}
Expect(stderr).Should(Say(""))
})
})
When("getting vulnerabilities by replicaset name", func() {
It("should return the vulnerabilities", func() {
stdout := NewBuffer()
stderr := NewBuffer()
err := cmd.Run(versionInfo, []string{
"starboard", "get", "vulnerabilities",
"replicaset/" + replicasetName,
"--namespace", testNamespace.Name,
"-v", starboardCLILogLevel,
}, stdout, stderr)
Expect(err).ToNot(HaveOccurred())
var list v1alpha1.VulnerabilityReportList
err = yaml.Unmarshal(stdout.Contents(), &list)
Expect(err).ToNot(HaveOccurred())
if Expect(len(list.Items)).To(Equal(1)) {
item := list.Items[0]
item.Report.UpdateTimestamp = report.Report.UpdateTimestamp // TODO A Hack to skip comparing timestamp
Expect(item.Report).To(Equal(report.Report))
}
Expect(stderr).Should(Say(""))
})
})
When("getting vulnerabilities by pod name", func() {
It("should return the vulnerabilities", func() {
stdout := NewBuffer()
stderr := NewBuffer()
err := cmd.Run(versionInfo, []string{
"starboard", "get", "vulnerabilities",
"pod/" + podName,
"--namespace", testNamespace.Name,
"-v", starboardCLILogLevel,
}, stdout, stderr)
Expect(err).ToNot(HaveOccurred())
var list v1alpha1.VulnerabilityReportList
err = yaml.Unmarshal(stdout.Contents(), &list)
Expect(err).ToNot(HaveOccurred())
if Expect(len(list.Items)).To(Equal(1)) {
item := list.Items[0]
item.Report.UpdateTimestamp = report.Report.UpdateTimestamp // TODO A Hack to skip comparing timestamp
Expect(item.Report).To(Equal(report.Report))
}
Expect(stderr).Should(Say(""))
})
})
AfterEach(func() {
err := kubeClient.Delete(context.TODO(), report)
Expect(err).ToNot(HaveOccurred())
err = kubeClient.Delete(context.TODO(), deploy)
Expect(err).ToNot(HaveOccurred())
})
})
})
})
Describe("Command scan configauditreports", func() {
var object client.Object
groupByWorkloadName := func(element interface{}) string {
return element.(v1alpha1.ConfigAuditReport).
Labels[starboard.LabelResourceName]
}
updateScannerConfig := func(scanner starboard.Scanner) {
configmap := &corev1.ConfigMap{}
err := kubeClient.Get(context.Background(), types.NamespacedName{
Namespace: "starboard",
Name: "starboard",
}, configmap)
Expect(err).ToNot(HaveOccurred())
configmapCopy := configmap.DeepCopy()
configmapCopy.Data["configAuditReports.scanner"] = string(scanner)
err = kubeClient.Update(context.Background(), configmapCopy)
Expect(err).ToNot(HaveOccurred())
}
assertConfigAuditReportCreated := func(scanner starboard.Scanner, kind kube.Kind) {
It("should create ConfigAuditReport", func() {
updateScannerConfig(scanner)
err := cmd.Run(versionInfo, []string{
"starboard",
"scan", "configauditreports", strings.ToLower(string(kind)) + "/" + object.GetName(),
"--namespace", object.GetNamespace(),
"-v", starboardCLILogLevel,
}, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())
var reportList v1alpha1.ConfigAuditReportList
err = kubeClient.List(context.TODO(), &reportList, client.MatchingLabels{
starboard.LabelResourceKind: string(kind),
starboard.LabelResourceName: object.GetName(),
starboard.LabelResourceNamespace: object.GetNamespace(),
})
Expect(err).ToNot(HaveOccurred())
Expect(reportList.Items).To(MatchAllElements(groupByWorkloadName, Elements{
object.GetName(): IsConfigAuditReportOwnedBy(object, scanner),
}))
})
}
BeforeEach(func() {
configmapCopy := conftestConfigMap.DeepCopy()
err := kubeClient.Create(context.TODO(), configmapCopy)
Expect(err).ToNot(HaveOccurred())
})
AfterEach(func() {
err := kubeClient.Delete(context.TODO(), conftestConfigMap)
Expect(err).ToNot(HaveOccurred())
})
Context("when unmanaged Pod is specified as workload", func() {
var ctx context.Context
BeforeEach(func() {
ctx = context.TODO()
object = helper.NewPod().WithName("nginx-polaris"+"-"+rand.String(5)).
WithNamespace(testNamespace.Name).
WithContainer("nginx-container", "nginx:1.16").
Build()
err := kubeClient.Create(ctx, object)
Expect(err).ToNot(HaveOccurred())
})
Context("with Polaris as scanner", func() {
assertConfigAuditReportCreated(starboard.Polaris, kube.KindPod)
})
Context("with Conftest as scanner", func() {
assertConfigAuditReportCreated(starboard.Conftest, kube.KindPod)
})
AfterEach(func() {
err := kubeClient.Delete(ctx, object)
Expect(err).ToNot(HaveOccurred())
})
})
Context("when unmanaged Pod with multiple containers is specified as workload", func() {
var ctx context.Context
BeforeEach(func() {
ctx = context.TODO()
object = helper.NewPod().WithName("nginx-and-tomcat-starboard"+"-"+rand.String(5)).
WithNamespace(testNamespace.Name).
WithContainer("nginx-container", "nginx:1.16").
WithContainer("tomcat-container", "tomcat:8").
Build()
err := kubeClient.Create(ctx, object)
Expect(err).ToNot(HaveOccurred())
})
Context("with Polaris as scanner", func() {
assertConfigAuditReportCreated(starboard.Polaris, kube.KindPod)
})
Context("with Conftest as scanner", func() {
assertConfigAuditReportCreated(starboard.Conftest, kube.KindPod)
})
AfterEach(func() {
err := kubeClient.Delete(ctx, object)
Expect(err).ToNot(HaveOccurred())
})
})
Context("when CronJob is specified as workload", func() {
var ctx context.Context
BeforeEach(func() {
ctx = context.TODO()
object = &batchv1beta1.CronJob{
ObjectMeta: metav1.ObjectMeta{
Name: "hello" + "-" + rand.String(5),
Namespace: testNamespace.Name,
},
Spec: batchv1beta1.CronJobSpec{
Schedule: "*/1 * * * *",
JobTemplate: batchv1beta1.JobTemplateSpec{
Spec: batchv1.JobSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyOnFailure,
Containers: []corev1.Container{
{
Name: "hello",
Image: "busybox",
Command: []string{
"/bin/sh",
"-c",
"date; echo Hello from the Kubernetes cluster",
},
},
},
},
},
},
},
},
}
err := kubeClient.Create(ctx, object)
Expect(err).ToNot(HaveOccurred())
})
Context("with Polaris as scanner", func() {
assertConfigAuditReportCreated(starboard.Polaris, kube.KindCronJob)
})
Context("with Conftest as scanner", func() {
assertConfigAuditReportCreated(starboard.Conftest, kube.KindCronJob)
})
AfterEach(func() {
err := kubeClient.Delete(ctx, object)
Expect(err).ToNot(HaveOccurred())
})
})
})
Describe("Command scan ciskubebenchreports", func() {
It("should create CISKubeBenchReports", func() {
err := cmd.Run(versionInfo, []string{
"starboard",
"scan", "ciskubebenchreports",
"-v", starboardCLILogLevel,
}, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())
var nodeList corev1.NodeList
err = kubeClient.List(context.TODO(), &nodeList)
Expect(err).ToNot(HaveOccurred())
for _, node := range nodeList.Items {
var report v1alpha1.CISKubeBenchReport
err := kubeClient.Get(context.TODO(), types.NamespacedName{Name: node.Name}, &report)
Expect(err).ToNot(HaveOccurred(), "Expected CISKubeBenchReport for node %s but not found", node.Name)
// Note: The MatchFieldsMatcher expects struct, not pointer.
Expect(report).To(MatchFields(IgnoreExtras, Fields{
"ObjectMeta": MatchFields(IgnoreExtras, Fields{
"Labels": MatchAllKeys(Keys{
starboard.LabelResourceKind: Equal("Node"),
starboard.LabelResourceName: Equal(node.Name),
}),
"OwnerReferences": ConsistOf(metav1.OwnerReference{
APIVersion: "v1",
Kind: "Node",
Name: node.Name,
UID: node.UID,
Controller: pointer.BoolPtr(true),
BlockOwnerDeletion: pointer.BoolPtr(false),
}),
}),
"Report": MatchFields(IgnoreExtras, Fields{
"Scanner": Equal(v1alpha1.Scanner{
Name: "kube-bench",
Vendor: "Aqua Security",
Version: "0.6.3",
}),
}),
}))
}
})
})
Describe("Command scan kubehunterreports", func() {
BeforeEach(func() {
var cm corev1.ConfigMap
err := kubeClient.Get(context.TODO(), types.NamespacedName{
Name: starboard.ConfigMapName,
Namespace: starboard.NamespaceName,
}, &cm)
Expect(err).ToNot(HaveOccurred())
// Need to use kube-hunter quick scanning mode (subnet 24), otherwise
// when running the test in Azure (e.g., in a GitHub actions runner)
// kube-hunter may attempt to scan a large CIDR (subnet 16), which takes a long
// time and isn't necessary for the purposes of the test.
cm.Data["kube-hunter.quick"] = "true"
err = kubeClient.Update(context.TODO(), &cm)
Expect(err).ToNot(HaveOccurred())
})
It("should create KubeHunterReport", func() {
err := cmd.Run(versionInfo, []string{
"starboard",
"scan", "kubehunterreports",
"-v", starboardCLILogLevel,
}, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())
var report v1alpha1.KubeHunterReport
err = kubeClient.Get(context.TODO(), types.NamespacedName{Name: "cluster"}, &report)
Expect(err).ToNot(HaveOccurred())
Expect(report.Labels).To(MatchAllKeys(Keys{
starboard.LabelResourceKind: Equal("Cluster"),
starboard.LabelResourceName: Equal("cluster"),
}))
})
})
AfterEach(func() {
err := cmd.Run(versionInfo, []string{
"starboard",
"cleanup",
"-v", starboardCLILogLevel,
}, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())
})
})
| [
"\"STARBOARD_TEST_DOCKERHUB_REGISTRY_USERNAME\"",
"\"STARBOARD_TEST_DOCKERHUB_REGISTRY_PASSWORD\""
] | [] | [
"STARBOARD_TEST_DOCKERHUB_REGISTRY_PASSWORD",
"STARBOARD_TEST_DOCKERHUB_REGISTRY_USERNAME"
] | [] | ["STARBOARD_TEST_DOCKERHUB_REGISTRY_PASSWORD", "STARBOARD_TEST_DOCKERHUB_REGISTRY_USERNAME"] | go | 2 | 0 | |
oauthlib/oauth2/rfc6749/utils.py | # -*- coding: utf-8 -*-
"""
oauthlib.utils
~~~~~~~~~~~~~~
This module contains utility methods used by various parts of the OAuth 2 spec.
"""
import datetime
import os
from urllib.parse import quote, urlparse
from oauthlib.common import urldecode
def list_to_scope(scope):
"""Convert a list of scopes to a space separated string."""
if isinstance(scope, str) or scope is None:
return scope
elif isinstance(scope, (set, tuple, list)):
return " ".join([str(s) for s in scope])
else:
raise ValueError("Invalid scope (%s), must be string, tuple, set, or list." % scope)
def scope_to_list(scope):
"""Convert a space separated string to a list of scopes."""
if isinstance(scope, (tuple, list, set)):
return [str(s) for s in scope]
elif scope is None:
return None
else:
return scope.strip().split(" ")
def params_from_uri(uri):
params = dict(urldecode(urlparse(uri).query))
if 'scope' in params:
params['scope'] = scope_to_list(params['scope'])
return params
def host_from_uri(uri):
"""Extract hostname and port from URI.
Will use default port for HTTP and HTTPS if none is present in the URI.
"""
default_ports = {
'HTTP': '80',
'HTTPS': '443',
}
sch, netloc, path, par, query, fra = urlparse(uri)
if ':' in netloc:
netloc, port = netloc.split(':', 1)
else:
port = default_ports.get(sch.upper())
return netloc, port
def escape(u):
"""Escape a string in an OAuth-compatible fashion.
TODO: verify whether this can in fact be used for OAuth 2
"""
if not isinstance(u, str):
raise ValueError('Only unicode objects are escapable.')
return quote(u.encode('utf-8'), safe=b'~')
def generate_age(issue_time):
"""Generate a age parameter for MAC authentication draft 00."""
td = datetime.datetime.now() - issue_time
age = (td.microseconds + (td.seconds + td.days * 24 * 3600)
* 10 ** 6) / 10 ** 6
return str(age)
def is_secure_transport(uri):
"""Check if the uri is over ssl."""
if os.environ.get('OAUTHLIB_INSECURE_TRANSPORT'):
return True
return uri.lower().startswith('https://')
| [] | [] | [
"OAUTHLIB_INSECURE_TRANSPORT"
] | [] | ["OAUTHLIB_INSECURE_TRANSPORT"] | python | 1 | 0 | |
amfe/component/__init__.py | #
# Copyright (c) 2018 TECHNICAL UNIVERSITY OF MUNICH, DEPARTMENT OF MECHANICAL ENGINEERING, CHAIR OF APPLIED MECHANICS,
# BOLTZMANNSTRASSE 15, 85748 GARCHING/MUNICH, GERMANY, RIXEN@TUM.DE.
#
# Distributed under 3-Clause BSD license. See LICENSE file for more information.
#
"""
Component module.
Module for partitioned component-structure.
"""
from .component_composite import *
from .component_connector import *
from .constants import *
from .mesh_component import *
from .partitioner import *
from .structural_component import *
from .tree_manager import *
| [] | [] | [] | [] | [] | python | null | null | null |
pdfscraper/manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pdfscraper.settings.local")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [] | [] | [] | [] | [] | python | 0 | 0 | |
modules/setting/setting.go | // Copyright 2014 The Gogs Authors. All rights reserved.
// Copyright 2017 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package setting
import (
"encoding/base64"
"fmt"
"net"
"net/mail"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"time"
"code.gitea.io/git"
"code.gitea.io/gitea/modules/generate"
"code.gitea.io/gitea/modules/log"
_ "code.gitea.io/gitea/modules/minwinsvc" // import minwinsvc for windows services
"code.gitea.io/gitea/modules/user"
"github.com/Unknwon/com"
_ "github.com/go-macaron/cache/memcache" // memcache plugin for cache
_ "github.com/go-macaron/cache/redis"
"github.com/go-macaron/session"
_ "github.com/go-macaron/session/redis" // redis plugin for store session
"github.com/go-xorm/core"
"github.com/kballard/go-shellquote"
"github.com/mcuadros/go-version"
"gopkg.in/ini.v1"
"strk.kbt.io/projects/go/libravatar"
)
// Scheme describes protocol types
type Scheme string
// enumerates all the scheme types
const (
HTTP Scheme = "http"
HTTPS Scheme = "https"
FCGI Scheme = "fcgi"
UnixSocket Scheme = "unix"
)
// LandingPage describes the default page
type LandingPage string
// enumerates all the landing page types
const (
LandingPageHome LandingPage = "/"
LandingPageExplore LandingPage = "/explore"
LandingPageOrganizations LandingPage = "/explore/organizations"
)
// MarkupParser defines the external parser configured in ini
type MarkupParser struct {
Enabled bool
MarkupName string
Command string
FileExtensions []string
IsInputFile bool
}
// enumerates all the policy repository creating
const (
RepoCreatingLastUserVisibility = "last"
RepoCreatingPrivate = "private"
RepoCreatingPublic = "public"
)
// enumerates all the types of captchas
const (
ImageCaptcha = "image"
ReCaptcha = "recaptcha"
)
// settings
var (
// AppVer settings
AppVer string
AppBuiltWith string
AppName string
AppURL string
AppSubURL string
AppSubURLDepth int // Number of slashes
AppPath string
AppDataPath string
AppWorkPath string
// Server settings
Protocol Scheme
Domain string
HTTPAddr string
HTTPPort string
LocalURL string
RedirectOtherPort bool
PortToRedirect string
OfflineMode bool
DisableRouterLog bool
CertFile string
KeyFile string
StaticRootPath string
EnableGzip bool
LandingPageURL LandingPage
UnixSocketPermission uint32
EnablePprof bool
PprofDataPath string
EnableLetsEncrypt bool
LetsEncryptTOS bool
LetsEncryptDirectory string
LetsEncryptEmail string
SSH = struct {
Disabled bool `ini:"DISABLE_SSH"`
StartBuiltinServer bool `ini:"START_SSH_SERVER"`
BuiltinServerUser string `ini:"BUILTIN_SSH_SERVER_USER"`
Domain string `ini:"SSH_DOMAIN"`
Port int `ini:"SSH_PORT"`
ListenHost string `ini:"SSH_LISTEN_HOST"`
ListenPort int `ini:"SSH_LISTEN_PORT"`
RootPath string `ini:"SSH_ROOT_PATH"`
ServerCiphers []string `ini:"SSH_SERVER_CIPHERS"`
ServerKeyExchanges []string `ini:"SSH_SERVER_KEY_EXCHANGES"`
ServerMACs []string `ini:"SSH_SERVER_MACS"`
KeyTestPath string `ini:"SSH_KEY_TEST_PATH"`
KeygenPath string `ini:"SSH_KEYGEN_PATH"`
AuthorizedKeysBackup bool `ini:"SSH_AUTHORIZED_KEYS_BACKUP"`
MinimumKeySizeCheck bool `ini:"-"`
MinimumKeySizes map[string]int `ini:"-"`
CreateAuthorizedKeysFile bool `ini:"SSH_CREATE_AUTHORIZED_KEYS_FILE"`
ExposeAnonymous bool `ini:"SSH_EXPOSE_ANONYMOUS"`
}{
Disabled: false,
StartBuiltinServer: false,
Domain: "",
Port: 22,
ServerCiphers: []string{"aes128-ctr", "aes192-ctr", "aes256-ctr", "aes128-gcm@openssh.com", "arcfour256", "arcfour128"},
ServerKeyExchanges: []string{"diffie-hellman-group1-sha1", "diffie-hellman-group14-sha1", "ecdh-sha2-nistp256", "ecdh-sha2-nistp384", "ecdh-sha2-nistp521", "curve25519-sha256@libssh.org"},
ServerMACs: []string{"hmac-sha2-256-etm@openssh.com", "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96"},
KeygenPath: "ssh-keygen",
}
LFS struct {
StartServer bool `ini:"LFS_START_SERVER"`
ContentPath string `ini:"LFS_CONTENT_PATH"`
JWTSecretBase64 string `ini:"LFS_JWT_SECRET"`
JWTSecretBytes []byte `ini:"-"`
HTTPAuthExpiry time.Duration `ini:"LFS_HTTP_AUTH_EXPIRY"`
}
// Security settings
InstallLock bool
SecretKey string
LogInRememberDays int
CookieUserName string
CookieRememberName string
ReverseProxyAuthUser string
MinPasswordLength int
ImportLocalPaths bool
DisableGitHooks bool
// Database settings
UseSQLite3 bool
UseMySQL bool
UseMSSQL bool
UsePostgreSQL bool
UseTiDB bool
LogSQL bool
// Indexer settings
Indexer struct {
IssuePath string
RepoIndexerEnabled bool
RepoPath string
UpdateQueueLength int
MaxIndexerFileSize int64
}
// Webhook settings
Webhook = struct {
QueueLength int
DeliverTimeout int
SkipTLSVerify bool
Types []string
PagingNum int
}{
QueueLength: 1000,
DeliverTimeout: 5,
SkipTLSVerify: false,
PagingNum: 10,
}
// Repository settings
Repository = struct {
AnsiCharset string
ForcePrivate bool
DefaultPrivate string
MaxCreationLimit int
MirrorQueueLength int
PullRequestQueueLength int
PreferredLicenses []string
DisableHTTPGit bool
UseCompatSSHURI bool
// Repository editor settings
Editor struct {
LineWrapExtensions []string
PreviewableFileModes []string
} `ini:"-"`
// Repository upload settings
Upload struct {
Enabled bool
TempPath string
AllowedTypes []string `delim:"|"`
FileMaxSize int64
MaxFiles int
} `ini:"-"`
// Repository local settings
Local struct {
LocalCopyPath string
LocalWikiPath string
} `ini:"-"`
// Pull request settings
PullRequest struct {
WorkInProgressPrefixes []string
} `ini:"repository.pull-request"`
}{
AnsiCharset: "",
ForcePrivate: false,
DefaultPrivate: RepoCreatingLastUserVisibility,
MaxCreationLimit: -1,
MirrorQueueLength: 1000,
PullRequestQueueLength: 1000,
PreferredLicenses: []string{"Apache License 2.0,MIT License"},
DisableHTTPGit: false,
UseCompatSSHURI: false,
// Repository editor settings
Editor: struct {
LineWrapExtensions []string
PreviewableFileModes []string
}{
LineWrapExtensions: strings.Split(".txt,.md,.markdown,.mdown,.mkd,", ","),
PreviewableFileModes: []string{"markdown"},
},
// Repository upload settings
Upload: struct {
Enabled bool
TempPath string
AllowedTypes []string `delim:"|"`
FileMaxSize int64
MaxFiles int
}{
Enabled: true,
TempPath: "data/tmp/uploads",
AllowedTypes: []string{},
FileMaxSize: 3,
MaxFiles: 5,
},
// Repository local settings
Local: struct {
LocalCopyPath string
LocalWikiPath string
}{
LocalCopyPath: "tmp/local-repo",
LocalWikiPath: "tmp/local-wiki",
},
// Pull request settings
PullRequest: struct {
WorkInProgressPrefixes []string
}{
WorkInProgressPrefixes: defaultPullRequestWorkInProgressPrefixes,
},
}
RepoRootPath string
ScriptType = "bash"
// UI settings
UI = struct {
ExplorePagingNum int
IssuePagingNum int
RepoSearchPagingNum int
FeedMaxCommitNum int
GraphMaxCommitNum int
CodeCommentLines int
ReactionMaxUserNum int
ThemeColorMetaTag string
MaxDisplayFileSize int64
ShowUserEmail bool
DefaultTheme string
Admin struct {
UserPagingNum int
RepoPagingNum int
NoticePagingNum int
OrgPagingNum int
} `ini:"ui.admin"`
User struct {
RepoPagingNum int
} `ini:"ui.user"`
Meta struct {
Author string
Description string
Keywords string
} `ini:"ui.meta"`
}{
ExplorePagingNum: 20,
IssuePagingNum: 10,
RepoSearchPagingNum: 10,
FeedMaxCommitNum: 5,
GraphMaxCommitNum: 100,
CodeCommentLines: 4,
ReactionMaxUserNum: 10,
ThemeColorMetaTag: `#6cc644`,
MaxDisplayFileSize: 8388608,
DefaultTheme: `gitea`,
Admin: struct {
UserPagingNum int
RepoPagingNum int
NoticePagingNum int
OrgPagingNum int
}{
UserPagingNum: 50,
RepoPagingNum: 50,
NoticePagingNum: 25,
OrgPagingNum: 50,
},
User: struct {
RepoPagingNum int
}{
RepoPagingNum: 15,
},
Meta: struct {
Author string
Description string
Keywords string
}{
Author: "Gitea - Git with a cup of tea",
Description: "Gitea (Git with a cup of tea) is a painless self-hosted Git service written in Go",
Keywords: "go,git,self-hosted,gitea",
},
}
// Markdown settings
Markdown = struct {
EnableHardLineBreak bool
CustomURLSchemes []string `ini:"CUSTOM_URL_SCHEMES"`
FileExtensions []string
}{
EnableHardLineBreak: false,
FileExtensions: strings.Split(".md,.markdown,.mdown,.mkd", ","),
}
// Admin settings
Admin struct {
DisableRegularOrgCreation bool
}
// Picture settings
AvatarUploadPath string
AvatarMaxWidth int
AvatarMaxHeight int
GravatarSource string
GravatarSourceURL *url.URL
DisableGravatar bool
EnableFederatedAvatar bool
LibravatarService *libravatar.Libravatar
// Log settings
LogLevel string
LogRootPath string
LogModes []string
LogConfigs []string
// Attachment settings
AttachmentPath string
AttachmentAllowedTypes string
AttachmentMaxSize int64
AttachmentMaxFiles int
AttachmentEnabled bool
// Time settings
TimeFormat string
// Session settings
SessionConfig session.Options
CSRFCookieName = "_csrf"
// Cron tasks
Cron = struct {
UpdateMirror struct {
Enabled bool
RunAtStart bool
Schedule string
} `ini:"cron.update_mirrors"`
RepoHealthCheck struct {
Enabled bool
RunAtStart bool
Schedule string
Timeout time.Duration
Args []string `delim:" "`
} `ini:"cron.repo_health_check"`
CheckRepoStats struct {
Enabled bool
RunAtStart bool
Schedule string
} `ini:"cron.check_repo_stats"`
ArchiveCleanup struct {
Enabled bool
RunAtStart bool
Schedule string
OlderThan time.Duration
} `ini:"cron.archive_cleanup"`
SyncExternalUsers struct {
Enabled bool
RunAtStart bool
Schedule string
UpdateExisting bool
} `ini:"cron.sync_external_users"`
DeletedBranchesCleanup struct {
Enabled bool
RunAtStart bool
Schedule string
OlderThan time.Duration
} `ini:"cron.deleted_branches_cleanup"`
}{
UpdateMirror: struct {
Enabled bool
RunAtStart bool
Schedule string
}{
Enabled: true,
RunAtStart: false,
Schedule: "@every 10m",
},
RepoHealthCheck: struct {
Enabled bool
RunAtStart bool
Schedule string
Timeout time.Duration
Args []string `delim:" "`
}{
Enabled: true,
RunAtStart: false,
Schedule: "@every 24h",
Timeout: 60 * time.Second,
Args: []string{},
},
CheckRepoStats: struct {
Enabled bool
RunAtStart bool
Schedule string
}{
Enabled: true,
RunAtStart: true,
Schedule: "@every 24h",
},
ArchiveCleanup: struct {
Enabled bool
RunAtStart bool
Schedule string
OlderThan time.Duration
}{
Enabled: true,
RunAtStart: true,
Schedule: "@every 24h",
OlderThan: 24 * time.Hour,
},
SyncExternalUsers: struct {
Enabled bool
RunAtStart bool
Schedule string
UpdateExisting bool
}{
Enabled: true,
RunAtStart: false,
Schedule: "@every 24h",
UpdateExisting: true,
},
DeletedBranchesCleanup: struct {
Enabled bool
RunAtStart bool
Schedule string
OlderThan time.Duration
}{
Enabled: true,
RunAtStart: true,
Schedule: "@every 24h",
OlderThan: 24 * time.Hour,
},
}
// Git settings
Git = struct {
Version string `ini:"-"`
DisableDiffHighlight bool
MaxGitDiffLines int
MaxGitDiffLineCharacters int
MaxGitDiffFiles int
GCArgs []string `delim:" "`
Timeout struct {
Migrate int
Mirror int
Clone int
Pull int
GC int `ini:"GC"`
} `ini:"git.timeout"`
}{
DisableDiffHighlight: false,
MaxGitDiffLines: 1000,
MaxGitDiffLineCharacters: 5000,
MaxGitDiffFiles: 100,
GCArgs: []string{},
Timeout: struct {
Migrate int
Mirror int
Clone int
Pull int
GC int `ini:"GC"`
}{
Migrate: 600,
Mirror: 300,
Clone: 300,
Pull: 300,
GC: 60,
},
}
// Mirror settings
Mirror struct {
DefaultInterval time.Duration
MinInterval time.Duration
}
// API settings
API = struct {
EnableSwagger bool
MaxResponseItems int
}{
EnableSwagger: true,
MaxResponseItems: 50,
}
U2F = struct {
AppID string
TrustedFacets []string
}{}
// Metrics settings
Metrics = struct {
Enabled bool
Token string
}{
Enabled: false,
Token: "",
}
// I18n settings
Langs []string
Names []string
dateLangs map[string]string
// Highlight settings are loaded in modules/template/highlight.go
// Other settings
ShowFooterBranding bool
ShowFooterVersion bool
ShowFooterTemplateLoadTime bool
// Global setting objects
Cfg *ini.File
CustomPath string // Custom directory path
CustomConf string
CustomPID string
ProdMode bool
RunUser string
IsWindows bool
HasRobotsTxt bool
InternalToken string // internal access token
IterateBufferSize int
ExternalMarkupParsers []MarkupParser
// UILocation is the location on the UI, so that we can display the time on UI.
// Currently only show the default time.Local, it could be added to app.ini after UI is ready
UILocation = time.Local
)
// DateLang transforms standard language locale name to corresponding value in datetime plugin.
func DateLang(lang string) string {
name, ok := dateLangs[lang]
if ok {
return name
}
return "en"
}
func getAppPath() (string, error) {
var appPath string
var err error
if IsWindows && filepath.IsAbs(os.Args[0]) {
appPath = filepath.Clean(os.Args[0])
} else {
appPath, err = exec.LookPath(os.Args[0])
}
if err != nil {
return "", err
}
appPath, err = filepath.Abs(appPath)
if err != nil {
return "", err
}
// Note: we don't use path.Dir here because it does not handle case
// which path starts with two "/" in Windows: "//psf/Home/..."
return strings.Replace(appPath, "\\", "/", -1), err
}
func getWorkPath(appPath string) string {
workPath := ""
giteaWorkPath := os.Getenv("GITEA_WORK_DIR")
if len(giteaWorkPath) > 0 {
workPath = giteaWorkPath
} else {
i := strings.LastIndex(appPath, "/")
if i == -1 {
workPath = appPath
} else {
workPath = appPath[:i]
}
}
return strings.Replace(workPath, "\\", "/", -1)
}
func init() {
IsWindows = runtime.GOOS == "windows"
log.NewLogger(0, "console", `{"level": 0}`)
var err error
if AppPath, err = getAppPath(); err != nil {
log.Fatal(4, "Failed to get app path: %v", err)
}
AppWorkPath = getWorkPath(AppPath)
}
func forcePathSeparator(path string) {
if strings.Contains(path, "\\") {
log.Fatal(4, "Do not use '\\' or '\\\\' in paths, instead, please use '/' in all places")
}
}
// IsRunUserMatchCurrentUser returns false if configured run user does not match
// actual user that runs the app. The first return value is the actual user name.
// This check is ignored under Windows since SSH remote login is not the main
// method to login on Windows.
func IsRunUserMatchCurrentUser(runUser string) (string, bool) {
if IsWindows {
return "", true
}
currentUser := user.CurrentUsername()
return currentUser, runUser == currentUser
}
func createPIDFile(pidPath string) {
currentPid := os.Getpid()
if err := os.MkdirAll(filepath.Dir(pidPath), os.ModePerm); err != nil {
log.Fatal(4, "Failed to create PID folder: %v", err)
}
file, err := os.Create(pidPath)
if err != nil {
log.Fatal(4, "Failed to create PID file: %v", err)
}
defer file.Close()
if _, err := file.WriteString(strconv.FormatInt(int64(currentPid), 10)); err != nil {
log.Fatal(4, "Failed to write PID information: %v", err)
}
}
// NewContext initializes configuration context.
// NOTE: do not print any log except error.
func NewContext() {
Cfg = ini.Empty()
CustomPath = os.Getenv("GITEA_CUSTOM")
if len(CustomPath) == 0 {
CustomPath = path.Join(AppWorkPath, "custom")
} else if !filepath.IsAbs(CustomPath) {
CustomPath = path.Join(AppWorkPath, CustomPath)
}
if len(CustomPID) > 0 {
createPIDFile(CustomPID)
}
if len(CustomConf) == 0 {
CustomConf = path.Join(CustomPath, "conf/app.ini")
} else if !filepath.IsAbs(CustomConf) {
CustomConf = path.Join(CustomPath, CustomConf)
}
if com.IsFile(CustomConf) {
if err := Cfg.Append(CustomConf); err != nil {
log.Fatal(4, "Failed to load custom conf '%s': %v", CustomConf, err)
}
} else {
log.Warn("Custom config '%s' not found, ignore this if you're running first time", CustomConf)
}
Cfg.NameMapper = ini.AllCapsUnderscore
homeDir, err := com.HomeDir()
if err != nil {
log.Fatal(4, "Failed to get home directory: %v", err)
}
homeDir = strings.Replace(homeDir, "\\", "/", -1)
LogLevel = getLogLevel("log", "LEVEL", "Info")
LogRootPath = Cfg.Section("log").Key("ROOT_PATH").MustString(path.Join(AppWorkPath, "log"))
forcePathSeparator(LogRootPath)
sec := Cfg.Section("server")
AppName = Cfg.Section("").Key("APP_NAME").MustString("Gitea: Git with a cup of tea")
Protocol = HTTP
if sec.Key("PROTOCOL").String() == "https" {
Protocol = HTTPS
CertFile = sec.Key("CERT_FILE").String()
KeyFile = sec.Key("KEY_FILE").String()
} else if sec.Key("PROTOCOL").String() == "fcgi" {
Protocol = FCGI
} else if sec.Key("PROTOCOL").String() == "unix" {
Protocol = UnixSocket
UnixSocketPermissionRaw := sec.Key("UNIX_SOCKET_PERMISSION").MustString("666")
UnixSocketPermissionParsed, err := strconv.ParseUint(UnixSocketPermissionRaw, 8, 32)
if err != nil || UnixSocketPermissionParsed > 0777 {
log.Fatal(4, "Failed to parse unixSocketPermission: %s", UnixSocketPermissionRaw)
}
UnixSocketPermission = uint32(UnixSocketPermissionParsed)
}
EnableLetsEncrypt = sec.Key("ENABLE_LETSENCRYPT").MustBool(false)
LetsEncryptTOS = sec.Key("LETSENCRYPT_ACCEPTTOS").MustBool(false)
if !LetsEncryptTOS && EnableLetsEncrypt {
log.Warn("Failed to enable Let's Encrypt due to Let's Encrypt TOS not being accepted")
EnableLetsEncrypt = false
}
LetsEncryptDirectory = sec.Key("LETSENCRYPT_DIRECTORY").MustString("https")
LetsEncryptEmail = sec.Key("LETSENCRYPT_EMAIL").MustString("")
Domain = sec.Key("DOMAIN").MustString("localhost")
HTTPAddr = sec.Key("HTTP_ADDR").MustString("0.0.0.0")
HTTPPort = sec.Key("HTTP_PORT").MustString("3000")
defaultAppURL := string(Protocol) + "://" + Domain
if (Protocol == HTTP && HTTPPort != "80") || (Protocol == HTTPS && HTTPPort != "443") {
defaultAppURL += ":" + HTTPPort
}
AppURL = sec.Key("ROOT_URL").MustString(defaultAppURL)
AppURL = strings.TrimRight(AppURL, "/") + "/"
// Check if has app suburl.
url, err := url.Parse(AppURL)
if err != nil {
log.Fatal(4, "Invalid ROOT_URL '%s': %s", AppURL, err)
}
// Suburl should start with '/' and end without '/', such as '/{subpath}'.
// This value is empty if site does not have sub-url.
AppSubURL = strings.TrimSuffix(url.Path, "/")
AppSubURLDepth = strings.Count(AppSubURL, "/")
// Check if Domain differs from AppURL domain than update it to AppURL's domain
// TODO: Can be replaced with url.Hostname() when minimal GoLang version is 1.8
urlHostname := strings.SplitN(url.Host, ":", 2)[0]
if urlHostname != Domain && net.ParseIP(urlHostname) == nil {
Domain = urlHostname
}
var defaultLocalURL string
switch Protocol {
case UnixSocket:
defaultLocalURL = "http://unix/"
case FCGI:
defaultLocalURL = AppURL
default:
defaultLocalURL = string(Protocol) + "://"
if HTTPAddr == "0.0.0.0" {
defaultLocalURL += "localhost"
} else {
defaultLocalURL += HTTPAddr
}
defaultLocalURL += ":" + HTTPPort + "/"
}
LocalURL = sec.Key("LOCAL_ROOT_URL").MustString(defaultLocalURL)
RedirectOtherPort = sec.Key("REDIRECT_OTHER_PORT").MustBool(false)
PortToRedirect = sec.Key("PORT_TO_REDIRECT").MustString("80")
OfflineMode = sec.Key("OFFLINE_MODE").MustBool()
DisableRouterLog = sec.Key("DISABLE_ROUTER_LOG").MustBool()
StaticRootPath = sec.Key("STATIC_ROOT_PATH").MustString(AppWorkPath)
AppDataPath = sec.Key("APP_DATA_PATH").MustString(path.Join(AppWorkPath, "data"))
EnableGzip = sec.Key("ENABLE_GZIP").MustBool()
EnablePprof = sec.Key("ENABLE_PPROF").MustBool(false)
PprofDataPath = sec.Key("PPROF_DATA_PATH").MustString(path.Join(AppWorkPath, "data/tmp/pprof"))
if !filepath.IsAbs(PprofDataPath) {
PprofDataPath = filepath.Join(AppWorkPath, PprofDataPath)
}
switch sec.Key("LANDING_PAGE").MustString("home") {
case "explore":
LandingPageURL = LandingPageExplore
case "organizations":
LandingPageURL = LandingPageOrganizations
default:
LandingPageURL = LandingPageHome
}
if len(SSH.Domain) == 0 {
SSH.Domain = Domain
}
SSH.RootPath = path.Join(homeDir, ".ssh")
serverCiphers := sec.Key("SSH_SERVER_CIPHERS").Strings(",")
if len(serverCiphers) > 0 {
SSH.ServerCiphers = serverCiphers
}
serverKeyExchanges := sec.Key("SSH_SERVER_KEY_EXCHANGES").Strings(",")
if len(serverKeyExchanges) > 0 {
SSH.ServerKeyExchanges = serverKeyExchanges
}
serverMACs := sec.Key("SSH_SERVER_MACS").Strings(",")
if len(serverMACs) > 0 {
SSH.ServerMACs = serverMACs
}
SSH.KeyTestPath = os.TempDir()
if err = Cfg.Section("server").MapTo(&SSH); err != nil {
log.Fatal(4, "Failed to map SSH settings: %v", err)
}
SSH.KeygenPath = sec.Key("SSH_KEYGEN_PATH").MustString("ssh-keygen")
SSH.Port = sec.Key("SSH_PORT").MustInt(22)
SSH.ListenPort = sec.Key("SSH_LISTEN_PORT").MustInt(SSH.Port)
// When disable SSH, start builtin server value is ignored.
if SSH.Disabled {
SSH.StartBuiltinServer = false
}
if !SSH.Disabled && !SSH.StartBuiltinServer {
if err := os.MkdirAll(SSH.RootPath, 0700); err != nil {
log.Fatal(4, "Failed to create '%s': %v", SSH.RootPath, err)
} else if err = os.MkdirAll(SSH.KeyTestPath, 0644); err != nil {
log.Fatal(4, "Failed to create '%s': %v", SSH.KeyTestPath, err)
}
}
SSH.MinimumKeySizeCheck = sec.Key("MINIMUM_KEY_SIZE_CHECK").MustBool()
SSH.MinimumKeySizes = map[string]int{}
minimumKeySizes := Cfg.Section("ssh.minimum_key_sizes").Keys()
for _, key := range minimumKeySizes {
if key.MustInt() != -1 {
SSH.MinimumKeySizes[strings.ToLower(key.Name())] = key.MustInt()
}
}
SSH.AuthorizedKeysBackup = sec.Key("SSH_AUTHORIZED_KEYS_BACKUP").MustBool(true)
SSH.CreateAuthorizedKeysFile = sec.Key("SSH_CREATE_AUTHORIZED_KEYS_FILE").MustBool(true)
SSH.ExposeAnonymous = sec.Key("SSH_EXPOSE_ANONYMOUS").MustBool(false)
sec = Cfg.Section("server")
if err = sec.MapTo(&LFS); err != nil {
log.Fatal(4, "Failed to map LFS settings: %v", err)
}
LFS.ContentPath = sec.Key("LFS_CONTENT_PATH").MustString(filepath.Join(AppDataPath, "lfs"))
if !filepath.IsAbs(LFS.ContentPath) {
LFS.ContentPath = filepath.Join(AppWorkPath, LFS.ContentPath)
}
LFS.HTTPAuthExpiry = sec.Key("LFS_HTTP_AUTH_EXPIRY").MustDuration(20 * time.Minute)
if LFS.StartServer {
if err := os.MkdirAll(LFS.ContentPath, 0700); err != nil {
log.Fatal(4, "Failed to create '%s': %v", LFS.ContentPath, err)
}
LFS.JWTSecretBytes = make([]byte, 32)
n, err := base64.RawURLEncoding.Decode(LFS.JWTSecretBytes, []byte(LFS.JWTSecretBase64))
if err != nil || n != 32 {
LFS.JWTSecretBase64, err = generate.NewLfsJwtSecret()
if err != nil {
log.Fatal(4, "Error generating JWT Secret for custom config: %v", err)
return
}
// Save secret
cfg := ini.Empty()
if com.IsFile(CustomConf) {
// Keeps custom settings if there is already something.
if err := cfg.Append(CustomConf); err != nil {
log.Error(4, "Failed to load custom conf '%s': %v", CustomConf, err)
}
}
cfg.Section("server").Key("LFS_JWT_SECRET").SetValue(LFS.JWTSecretBase64)
if err := os.MkdirAll(filepath.Dir(CustomConf), os.ModePerm); err != nil {
log.Fatal(4, "Failed to create '%s': %v", CustomConf, err)
}
if err := cfg.SaveTo(CustomConf); err != nil {
log.Fatal(4, "Error saving generated JWT Secret to custom config: %v", err)
return
}
}
//Disable LFS client hooks if installed for the current OS user
//Needs at least git v2.1.2
binVersion, err := git.BinVersion()
if err != nil {
log.Fatal(4, "Error retrieving git version: %v", err)
}
if !version.Compare(binVersion, "2.1.2", ">=") {
LFS.StartServer = false
log.Error(4, "LFS server support needs at least Git v2.1.2")
} else {
git.GlobalCommandArgs = append(git.GlobalCommandArgs, "-c", "filter.lfs.required=",
"-c", "filter.lfs.smudge=", "-c", "filter.lfs.clean=")
}
}
sec = Cfg.Section("security")
InstallLock = sec.Key("INSTALL_LOCK").MustBool(false)
SecretKey = sec.Key("SECRET_KEY").MustString("!#@FDEWREWR&*(")
LogInRememberDays = sec.Key("LOGIN_REMEMBER_DAYS").MustInt(7)
CookieUserName = sec.Key("COOKIE_USERNAME").MustString("gitea_awesome")
CookieRememberName = sec.Key("COOKIE_REMEMBER_NAME").MustString("gitea_incredible")
ReverseProxyAuthUser = sec.Key("REVERSE_PROXY_AUTHENTICATION_USER").MustString("X-WEBAUTH-USER")
MinPasswordLength = sec.Key("MIN_PASSWORD_LENGTH").MustInt(6)
ImportLocalPaths = sec.Key("IMPORT_LOCAL_PATHS").MustBool(false)
DisableGitHooks = sec.Key("DISABLE_GIT_HOOKS").MustBool(false)
InternalToken = sec.Key("INTERNAL_TOKEN").String()
if len(InternalToken) == 0 {
InternalToken, err = generate.NewInternalToken()
if err != nil {
log.Fatal(4, "Error generate internal token: %v", err)
}
// Save secret
cfgSave := ini.Empty()
if com.IsFile(CustomConf) {
// Keeps custom settings if there is already something.
if err := cfgSave.Append(CustomConf); err != nil {
log.Error(4, "Failed to load custom conf '%s': %v", CustomConf, err)
}
}
cfgSave.Section("security").Key("INTERNAL_TOKEN").SetValue(InternalToken)
if err := os.MkdirAll(filepath.Dir(CustomConf), os.ModePerm); err != nil {
log.Fatal(4, "Failed to create '%s': %v", CustomConf, err)
}
if err := cfgSave.SaveTo(CustomConf); err != nil {
log.Fatal(4, "Error saving generated JWT Secret to custom config: %v", err)
}
}
IterateBufferSize = Cfg.Section("database").Key("ITERATE_BUFFER_SIZE").MustInt(50)
LogSQL = Cfg.Section("database").Key("LOG_SQL").MustBool(true)
sec = Cfg.Section("attachment")
AttachmentPath = sec.Key("PATH").MustString(path.Join(AppDataPath, "attachments"))
if !filepath.IsAbs(AttachmentPath) {
AttachmentPath = path.Join(AppWorkPath, AttachmentPath)
}
AttachmentAllowedTypes = strings.Replace(sec.Key("ALLOWED_TYPES").MustString("image/jpeg,image/png,application/zip,application/gzip"), "|", ",", -1)
AttachmentMaxSize = sec.Key("MAX_SIZE").MustInt64(4)
AttachmentMaxFiles = sec.Key("MAX_FILES").MustInt(5)
AttachmentEnabled = sec.Key("ENABLED").MustBool(true)
TimeFormatKey := Cfg.Section("time").Key("FORMAT").MustString("RFC1123")
TimeFormat = map[string]string{
"ANSIC": time.ANSIC,
"UnixDate": time.UnixDate,
"RubyDate": time.RubyDate,
"RFC822": time.RFC822,
"RFC822Z": time.RFC822Z,
"RFC850": time.RFC850,
"RFC1123": time.RFC1123,
"RFC1123Z": time.RFC1123Z,
"RFC3339": time.RFC3339,
"RFC3339Nano": time.RFC3339Nano,
"Kitchen": time.Kitchen,
"Stamp": time.Stamp,
"StampMilli": time.StampMilli,
"StampMicro": time.StampMicro,
"StampNano": time.StampNano,
}[TimeFormatKey]
// When the TimeFormatKey does not exist in the previous map e.g.'2006-01-02 15:04:05'
if len(TimeFormat) == 0 {
TimeFormat = TimeFormatKey
TestTimeFormat, _ := time.Parse(TimeFormat, TimeFormat)
if TestTimeFormat.Format(time.RFC3339) != "2006-01-02T15:04:05Z" {
log.Fatal(4, "Can't create time properly, please check your time format has 2006, 01, 02, 15, 04 and 05")
}
log.Trace("Custom TimeFormat: %s", TimeFormat)
}
RunUser = Cfg.Section("").Key("RUN_USER").MustString(user.CurrentUsername())
// Does not check run user when the install lock is off.
if InstallLock {
currentUser, match := IsRunUserMatchCurrentUser(RunUser)
if !match {
log.Fatal(4, "Expect user '%s' but current user is: %s", RunUser, currentUser)
}
}
SSH.BuiltinServerUser = Cfg.Section("server").Key("BUILTIN_SSH_SERVER_USER").MustString(RunUser)
// Determine and create root git repository path.
sec = Cfg.Section("repository")
Repository.DisableHTTPGit = sec.Key("DISABLE_HTTP_GIT").MustBool()
Repository.UseCompatSSHURI = sec.Key("USE_COMPAT_SSH_URI").MustBool()
Repository.MaxCreationLimit = sec.Key("MAX_CREATION_LIMIT").MustInt(-1)
RepoRootPath = sec.Key("ROOT").MustString(path.Join(homeDir, "gitea-repositories"))
forcePathSeparator(RepoRootPath)
if !filepath.IsAbs(RepoRootPath) {
RepoRootPath = filepath.Join(AppWorkPath, RepoRootPath)
} else {
RepoRootPath = filepath.Clean(RepoRootPath)
}
ScriptType = sec.Key("SCRIPT_TYPE").MustString("bash")
if err = Cfg.Section("repository").MapTo(&Repository); err != nil {
log.Fatal(4, "Failed to map Repository settings: %v", err)
} else if err = Cfg.Section("repository.editor").MapTo(&Repository.Editor); err != nil {
log.Fatal(4, "Failed to map Repository.Editor settings: %v", err)
} else if err = Cfg.Section("repository.upload").MapTo(&Repository.Upload); err != nil {
log.Fatal(4, "Failed to map Repository.Upload settings: %v", err)
} else if err = Cfg.Section("repository.local").MapTo(&Repository.Local); err != nil {
log.Fatal(4, "Failed to map Repository.Local settings: %v", err)
} else if err = Cfg.Section("repository.pull-request").MapTo(&Repository.PullRequest); err != nil {
log.Fatal(4, "Failed to map Repository.PullRequest settings: %v", err)
}
if !filepath.IsAbs(Repository.Upload.TempPath) {
Repository.Upload.TempPath = path.Join(AppWorkPath, Repository.Upload.TempPath)
}
sec = Cfg.Section("picture")
AvatarUploadPath = sec.Key("AVATAR_UPLOAD_PATH").MustString(path.Join(AppDataPath, "avatars"))
forcePathSeparator(AvatarUploadPath)
if !filepath.IsAbs(AvatarUploadPath) {
AvatarUploadPath = path.Join(AppWorkPath, AvatarUploadPath)
}
AvatarMaxWidth = sec.Key("AVATAR_MAX_WIDTH").MustInt(4096)
AvatarMaxHeight = sec.Key("AVATAR_MAX_HEIGHT").MustInt(3072)
switch source := sec.Key("GRAVATAR_SOURCE").MustString("gravatar"); source {
case "duoshuo":
GravatarSource = "http://gravatar.duoshuo.com/avatar/"
case "gravatar":
GravatarSource = "https://secure.gravatar.com/avatar/"
case "libravatar":
GravatarSource = "https://seccdn.libravatar.org/avatar/"
default:
GravatarSource = source
}
DisableGravatar = sec.Key("DISABLE_GRAVATAR").MustBool()
EnableFederatedAvatar = sec.Key("ENABLE_FEDERATED_AVATAR").MustBool(!InstallLock)
if OfflineMode {
DisableGravatar = true
EnableFederatedAvatar = false
}
if DisableGravatar {
EnableFederatedAvatar = false
}
if EnableFederatedAvatar || !DisableGravatar {
GravatarSourceURL, err = url.Parse(GravatarSource)
if err != nil {
log.Fatal(4, "Failed to parse Gravatar URL(%s): %v",
GravatarSource, err)
}
}
if EnableFederatedAvatar {
LibravatarService = libravatar.New()
if GravatarSourceURL.Scheme == "https" {
LibravatarService.SetUseHTTPS(true)
LibravatarService.SetSecureFallbackHost(GravatarSourceURL.Host)
} else {
LibravatarService.SetUseHTTPS(false)
LibravatarService.SetFallbackHost(GravatarSourceURL.Host)
}
}
if err = Cfg.Section("ui").MapTo(&UI); err != nil {
log.Fatal(4, "Failed to map UI settings: %v", err)
} else if err = Cfg.Section("markdown").MapTo(&Markdown); err != nil {
log.Fatal(4, "Failed to map Markdown settings: %v", err)
} else if err = Cfg.Section("admin").MapTo(&Admin); err != nil {
log.Fatal(4, "Fail to map Admin settings: %v", err)
} else if err = Cfg.Section("cron").MapTo(&Cron); err != nil {
log.Fatal(4, "Failed to map Cron settings: %v", err)
} else if err = Cfg.Section("git").MapTo(&Git); err != nil {
log.Fatal(4, "Failed to map Git settings: %v", err)
} else if err = Cfg.Section("api").MapTo(&API); err != nil {
log.Fatal(4, "Failed to map API settings: %v", err)
} else if err = Cfg.Section("metrics").MapTo(&Metrics); err != nil {
log.Fatal(4, "Failed to map Metrics settings: %v", err)
}
sec = Cfg.Section("mirror")
Mirror.MinInterval = sec.Key("MIN_INTERVAL").MustDuration(10 * time.Minute)
Mirror.DefaultInterval = sec.Key("DEFAULT_INTERVAL").MustDuration(8 * time.Hour)
if Mirror.MinInterval.Minutes() < 1 {
log.Warn("Mirror.MinInterval is too low")
Mirror.MinInterval = 1 * time.Minute
}
if Mirror.DefaultInterval < Mirror.MinInterval {
log.Warn("Mirror.DefaultInterval is less than Mirror.MinInterval")
Mirror.DefaultInterval = time.Hour * 8
}
Langs = Cfg.Section("i18n").Key("LANGS").Strings(",")
if len(Langs) == 0 {
Langs = defaultLangs
}
Names = Cfg.Section("i18n").Key("NAMES").Strings(",")
if len(Names) == 0 {
Names = defaultLangNames
}
dateLangs = Cfg.Section("i18n.datelang").KeysHash()
ShowFooterBranding = Cfg.Section("other").Key("SHOW_FOOTER_BRANDING").MustBool(false)
ShowFooterVersion = Cfg.Section("other").Key("SHOW_FOOTER_VERSION").MustBool(true)
ShowFooterTemplateLoadTime = Cfg.Section("other").Key("SHOW_FOOTER_TEMPLATE_LOAD_TIME").MustBool(true)
UI.ShowUserEmail = Cfg.Section("ui").Key("SHOW_USER_EMAIL").MustBool(true)
HasRobotsTxt = com.IsFile(path.Join(CustomPath, "robots.txt"))
extensionReg := regexp.MustCompile(`\.\w`)
for _, sec := range Cfg.Section("markup").ChildSections() {
name := strings.TrimPrefix(sec.Name(), "markup.")
if name == "" {
log.Warn("name is empty, markup " + sec.Name() + "ignored")
continue
}
extensions := sec.Key("FILE_EXTENSIONS").Strings(",")
var exts = make([]string, 0, len(extensions))
for _, extension := range extensions {
if !extensionReg.MatchString(extension) {
log.Warn(sec.Name() + " file extension " + extension + " is invalid. Extension ignored")
} else {
exts = append(exts, extension)
}
}
if len(exts) == 0 {
log.Warn(sec.Name() + " file extension is empty, markup " + name + " ignored")
continue
}
command := sec.Key("RENDER_COMMAND").MustString("")
if command == "" {
log.Warn(" RENDER_COMMAND is empty, markup " + name + " ignored")
continue
}
ExternalMarkupParsers = append(ExternalMarkupParsers, MarkupParser{
Enabled: sec.Key("ENABLED").MustBool(false),
MarkupName: name,
FileExtensions: exts,
Command: command,
IsInputFile: sec.Key("IS_INPUT_FILE").MustBool(false),
})
}
sec = Cfg.Section("U2F")
U2F.TrustedFacets, _ = shellquote.Split(sec.Key("TRUSTED_FACETS").MustString(strings.TrimRight(AppURL, "/")))
U2F.AppID = sec.Key("APP_ID").MustString(strings.TrimRight(AppURL, "/"))
binVersion, err := git.BinVersion()
if err != nil {
log.Fatal(4, "Error retrieving git version: %v", err)
}
if version.Compare(binVersion, "2.9", ">=") {
// Explicitly disable credential helper, otherwise Git credentials might leak
git.GlobalCommandArgs = append(git.GlobalCommandArgs, "-c", "credential.helper=")
}
}
// Service settings
var Service struct {
ActiveCodeLives int
ResetPwdCodeLives int
RegisterEmailConfirm bool
EmailDomainWhitelist []string
DisableRegistration bool
AllowOnlyExternalRegistration bool
ShowRegistrationButton bool
RequireSignInView bool
EnableNotifyMail bool
EnableReverseProxyAuth bool
EnableReverseProxyAutoRegister bool
EnableCaptcha bool
CaptchaType string
RecaptchaSecret string
RecaptchaSitekey string
DefaultKeepEmailPrivate bool
DefaultAllowCreateOrganization bool
EnableTimetracking bool
DefaultEnableTimetracking bool
DefaultEnableDependencies bool
DefaultAllowOnlyContributorsToTrackTime bool
NoReplyAddress string
EnableUserHeatmap bool
// OpenID settings
EnableOpenIDSignIn bool
EnableOpenIDSignUp bool
OpenIDWhitelist []*regexp.Regexp
OpenIDBlacklist []*regexp.Regexp
}
func newService() {
sec := Cfg.Section("service")
Service.ActiveCodeLives = sec.Key("ACTIVE_CODE_LIVE_MINUTES").MustInt(180)
Service.ResetPwdCodeLives = sec.Key("RESET_PASSWD_CODE_LIVE_MINUTES").MustInt(180)
Service.DisableRegistration = sec.Key("DISABLE_REGISTRATION").MustBool()
Service.AllowOnlyExternalRegistration = sec.Key("ALLOW_ONLY_EXTERNAL_REGISTRATION").MustBool()
Service.EmailDomainWhitelist = sec.Key("EMAIL_DOMAIN_WHITELIST").Strings(",")
Service.ShowRegistrationButton = sec.Key("SHOW_REGISTRATION_BUTTON").MustBool(!(Service.DisableRegistration || Service.AllowOnlyExternalRegistration))
Service.RequireSignInView = sec.Key("REQUIRE_SIGNIN_VIEW").MustBool()
Service.EnableReverseProxyAuth = sec.Key("ENABLE_REVERSE_PROXY_AUTHENTICATION").MustBool()
Service.EnableReverseProxyAutoRegister = sec.Key("ENABLE_REVERSE_PROXY_AUTO_REGISTRATION").MustBool()
Service.EnableCaptcha = sec.Key("ENABLE_CAPTCHA").MustBool(false)
Service.CaptchaType = sec.Key("CAPTCHA_TYPE").MustString(ImageCaptcha)
Service.RecaptchaSecret = sec.Key("RECAPTCHA_SECRET").MustString("")
Service.RecaptchaSitekey = sec.Key("RECAPTCHA_SITEKEY").MustString("")
Service.DefaultKeepEmailPrivate = sec.Key("DEFAULT_KEEP_EMAIL_PRIVATE").MustBool()
Service.DefaultAllowCreateOrganization = sec.Key("DEFAULT_ALLOW_CREATE_ORGANIZATION").MustBool(true)
Service.EnableTimetracking = sec.Key("ENABLE_TIMETRACKING").MustBool(true)
if Service.EnableTimetracking {
Service.DefaultEnableTimetracking = sec.Key("DEFAULT_ENABLE_TIMETRACKING").MustBool(true)
}
Service.DefaultEnableDependencies = sec.Key("DEFAULT_ENABLE_DEPENDENCIES").MustBool(true)
Service.DefaultAllowOnlyContributorsToTrackTime = sec.Key("DEFAULT_ALLOW_ONLY_CONTRIBUTORS_TO_TRACK_TIME").MustBool(true)
Service.NoReplyAddress = sec.Key("NO_REPLY_ADDRESS").MustString("noreply.example.org")
Service.EnableUserHeatmap = sec.Key("ENABLE_USER_HEATMAP").MustBool(true)
sec = Cfg.Section("openid")
Service.EnableOpenIDSignIn = sec.Key("ENABLE_OPENID_SIGNIN").MustBool(!InstallLock)
Service.EnableOpenIDSignUp = sec.Key("ENABLE_OPENID_SIGNUP").MustBool(!Service.DisableRegistration && Service.EnableOpenIDSignIn)
pats := sec.Key("WHITELISTED_URIS").Strings(" ")
if len(pats) != 0 {
Service.OpenIDWhitelist = make([]*regexp.Regexp, len(pats))
for i, p := range pats {
Service.OpenIDWhitelist[i] = regexp.MustCompilePOSIX(p)
}
}
pats = sec.Key("BLACKLISTED_URIS").Strings(" ")
if len(pats) != 0 {
Service.OpenIDBlacklist = make([]*regexp.Regexp, len(pats))
for i, p := range pats {
Service.OpenIDBlacklist[i] = regexp.MustCompilePOSIX(p)
}
}
}
var logLevels = map[string]string{
"Trace": "0",
"Debug": "1",
"Info": "2",
"Warn": "3",
"Error": "4",
"Critical": "5",
}
func getLogLevel(section string, key string, defaultValue string) string {
validLevels := []string{"Trace", "Debug", "Info", "Warn", "Error", "Critical"}
return Cfg.Section(section).Key(key).In(defaultValue, validLevels)
}
func newLogService() {
log.Info("Gitea v%s%s", AppVer, AppBuiltWith)
LogModes = strings.Split(Cfg.Section("log").Key("MODE").MustString("console"), ",")
LogConfigs = make([]string, len(LogModes))
useConsole := false
for i := 0; i < len(LogModes); i++ {
LogModes[i] = strings.TrimSpace(LogModes[i])
if LogModes[i] == "console" {
useConsole = true
}
}
if !useConsole {
log.DelLogger("console")
}
for i, mode := range LogModes {
sec, err := Cfg.GetSection("log." + mode)
if err != nil {
sec, _ = Cfg.NewSection("log." + mode)
}
// Log level.
levelName := getLogLevel("log."+mode, "LEVEL", LogLevel)
level, ok := logLevels[levelName]
if !ok {
log.Fatal(4, "Unknown log level: %s", levelName)
}
// Generate log configuration.
switch mode {
case "console":
LogConfigs[i] = fmt.Sprintf(`{"level":%s}`, level)
case "file":
logPath := sec.Key("FILE_NAME").MustString(path.Join(LogRootPath, "gitea.log"))
if err = os.MkdirAll(path.Dir(logPath), os.ModePerm); err != nil {
panic(err.Error())
}
LogConfigs[i] = fmt.Sprintf(
`{"level":%s,"filename":"%s","rotate":%v,"maxsize":%d,"daily":%v,"maxdays":%d}`, level,
logPath,
sec.Key("LOG_ROTATE").MustBool(true),
1<<uint(sec.Key("MAX_SIZE_SHIFT").MustInt(28)),
sec.Key("DAILY_ROTATE").MustBool(true),
sec.Key("MAX_DAYS").MustInt(7))
case "conn":
LogConfigs[i] = fmt.Sprintf(`{"level":%s,"reconnectOnMsg":%v,"reconnect":%v,"net":"%s","addr":"%s"}`, level,
sec.Key("RECONNECT_ON_MSG").MustBool(),
sec.Key("RECONNECT").MustBool(),
sec.Key("PROTOCOL").In("tcp", []string{"tcp", "unix", "udp"}),
sec.Key("ADDR").MustString(":7020"))
case "smtp":
LogConfigs[i] = fmt.Sprintf(`{"level":%s,"username":"%s","password":"%s","host":"%s","sendTos":["%s"],"subject":"%s"}`, level,
sec.Key("USER").MustString("example@example.com"),
sec.Key("PASSWD").MustString("******"),
sec.Key("HOST").MustString("127.0.0.1:25"),
strings.Replace(sec.Key("RECEIVERS").MustString("example@example.com"), ",", "\",\"", -1),
sec.Key("SUBJECT").MustString("Diagnostic message from serve"))
case "database":
LogConfigs[i] = fmt.Sprintf(`{"level":%s,"driver":"%s","conn":"%s"}`, level,
sec.Key("DRIVER").String(),
sec.Key("CONN").String())
}
log.NewLogger(Cfg.Section("log").Key("BUFFER_LEN").MustInt64(10000), mode, LogConfigs[i])
log.Info("Log Mode: %s(%s)", strings.Title(mode), levelName)
}
}
// NewXORMLogService initializes xorm logger service
func NewXORMLogService(disableConsole bool) {
logModes := strings.Split(Cfg.Section("log").Key("MODE").MustString("console"), ",")
var logConfigs string
for _, mode := range logModes {
mode = strings.TrimSpace(mode)
if disableConsole && mode == "console" {
continue
}
sec, err := Cfg.GetSection("log." + mode)
if err != nil {
sec, _ = Cfg.NewSection("log." + mode)
}
// Log level.
levelName := getLogLevel("log."+mode, "LEVEL", LogLevel)
level, ok := logLevels[levelName]
if !ok {
log.Fatal(4, "Unknown log level: %s", levelName)
}
// Generate log configuration.
switch mode {
case "console":
logConfigs = fmt.Sprintf(`{"level":%s}`, level)
case "file":
logPath := sec.Key("FILE_NAME").MustString(path.Join(LogRootPath, "xorm.log"))
if err = os.MkdirAll(path.Dir(logPath), os.ModePerm); err != nil {
panic(err.Error())
}
logPath = path.Join(filepath.Dir(logPath), "xorm.log")
logConfigs = fmt.Sprintf(
`{"level":%s,"filename":"%s","rotate":%v,"maxsize":%d,"daily":%v,"maxdays":%d}`, level,
logPath,
sec.Key("LOG_ROTATE").MustBool(true),
1<<uint(sec.Key("MAX_SIZE_SHIFT").MustInt(28)),
sec.Key("DAILY_ROTATE").MustBool(true),
sec.Key("MAX_DAYS").MustInt(7))
case "conn":
logConfigs = fmt.Sprintf(`{"level":%s,"reconnectOnMsg":%v,"reconnect":%v,"net":"%s","addr":"%s"}`, level,
sec.Key("RECONNECT_ON_MSG").MustBool(),
sec.Key("RECONNECT").MustBool(),
sec.Key("PROTOCOL").In("tcp", []string{"tcp", "unix", "udp"}),
sec.Key("ADDR").MustString(":7020"))
case "smtp":
logConfigs = fmt.Sprintf(`{"level":%s,"username":"%s","password":"%s","host":"%s","sendTos":"%s","subject":"%s"}`, level,
sec.Key("USER").MustString("example@example.com"),
sec.Key("PASSWD").MustString("******"),
sec.Key("HOST").MustString("127.0.0.1:25"),
sec.Key("RECEIVERS").MustString("[]"),
sec.Key("SUBJECT").MustString("Diagnostic message from serve"))
case "database":
logConfigs = fmt.Sprintf(`{"level":%s,"driver":"%s","conn":"%s"}`, level,
sec.Key("DRIVER").String(),
sec.Key("CONN").String())
}
log.NewXORMLogger(Cfg.Section("log").Key("BUFFER_LEN").MustInt64(10000), mode, logConfigs)
if !disableConsole {
log.Info("XORM Log Mode: %s(%s)", strings.Title(mode), levelName)
}
var lvl core.LogLevel
switch levelName {
case "Trace", "Debug":
lvl = core.LOG_DEBUG
case "Info":
lvl = core.LOG_INFO
case "Warn":
lvl = core.LOG_WARNING
case "Error", "Critical":
lvl = core.LOG_ERR
}
log.XORMLogger.SetLevel(lvl)
}
if len(logConfigs) == 0 {
log.DiscardXORMLogger()
}
}
// Cache represents cache settings
type Cache struct {
Adapter string
Interval int
Conn string
TTL time.Duration
}
var (
// CacheService the global cache
CacheService *Cache
)
func newCacheService() {
sec := Cfg.Section("cache")
CacheService = &Cache{
Adapter: sec.Key("ADAPTER").In("memory", []string{"memory", "redis", "memcache"}),
}
switch CacheService.Adapter {
case "memory":
CacheService.Interval = sec.Key("INTERVAL").MustInt(60)
case "redis", "memcache":
CacheService.Conn = strings.Trim(sec.Key("HOST").String(), "\" ")
default:
log.Fatal(4, "Unknown cache adapter: %s", CacheService.Adapter)
}
CacheService.TTL = sec.Key("ITEM_TTL").MustDuration(16 * time.Hour)
log.Info("Cache Service Enabled")
}
func newSessionService() {
SessionConfig.Provider = Cfg.Section("session").Key("PROVIDER").In("memory",
[]string{"memory", "file", "redis", "mysql"})
SessionConfig.ProviderConfig = strings.Trim(Cfg.Section("session").Key("PROVIDER_CONFIG").MustString(path.Join(AppDataPath, "sessions")), "\" ")
if SessionConfig.Provider == "file" && !filepath.IsAbs(SessionConfig.ProviderConfig) {
SessionConfig.ProviderConfig = path.Join(AppWorkPath, SessionConfig.ProviderConfig)
}
SessionConfig.CookieName = Cfg.Section("session").Key("COOKIE_NAME").MustString("i_like_gitea")
SessionConfig.CookiePath = AppSubURL
SessionConfig.Secure = Cfg.Section("session").Key("COOKIE_SECURE").MustBool(false)
SessionConfig.Gclifetime = Cfg.Section("session").Key("GC_INTERVAL_TIME").MustInt64(86400)
SessionConfig.Maxlifetime = Cfg.Section("session").Key("SESSION_LIFE_TIME").MustInt64(86400)
log.Info("Session Service Enabled")
}
// Mailer represents mail service.
type Mailer struct {
// Mailer
QueueLength int
Name string
From string
FromName string
FromEmail string
SendAsPlainText bool
// SMTP sender
Host string
User, Passwd string
DisableHelo bool
HeloHostname string
SkipVerify bool
UseCertificate bool
CertFile, KeyFile string
IsTLSEnabled bool
// Sendmail sender
UseSendmail bool
SendmailPath string
SendmailArgs []string
}
var (
// MailService the global mailer
MailService *Mailer
)
func newMailService() {
sec := Cfg.Section("mailer")
// Check mailer setting.
if !sec.Key("ENABLED").MustBool() {
return
}
MailService = &Mailer{
QueueLength: sec.Key("SEND_BUFFER_LEN").MustInt(100),
Name: sec.Key("NAME").MustString(AppName),
SendAsPlainText: sec.Key("SEND_AS_PLAIN_TEXT").MustBool(false),
Host: sec.Key("HOST").String(),
User: sec.Key("USER").String(),
Passwd: sec.Key("PASSWD").String(),
DisableHelo: sec.Key("DISABLE_HELO").MustBool(),
HeloHostname: sec.Key("HELO_HOSTNAME").String(),
SkipVerify: sec.Key("SKIP_VERIFY").MustBool(),
UseCertificate: sec.Key("USE_CERTIFICATE").MustBool(),
CertFile: sec.Key("CERT_FILE").String(),
KeyFile: sec.Key("KEY_FILE").String(),
IsTLSEnabled: sec.Key("IS_TLS_ENABLED").MustBool(),
UseSendmail: sec.Key("USE_SENDMAIL").MustBool(),
SendmailPath: sec.Key("SENDMAIL_PATH").MustString("sendmail"),
}
MailService.From = sec.Key("FROM").MustString(MailService.User)
if sec.HasKey("ENABLE_HTML_ALTERNATIVE") {
log.Warn("ENABLE_HTML_ALTERNATIVE is deprecated, use SEND_AS_PLAIN_TEXT")
MailService.SendAsPlainText = !sec.Key("ENABLE_HTML_ALTERNATIVE").MustBool(false)
}
parsed, err := mail.ParseAddress(MailService.From)
if err != nil {
log.Fatal(4, "Invalid mailer.FROM (%s): %v", MailService.From, err)
}
MailService.FromName = parsed.Name
MailService.FromEmail = parsed.Address
if MailService.UseSendmail {
MailService.SendmailArgs, err = shellquote.Split(sec.Key("SENDMAIL_ARGS").String())
if err != nil {
log.Error(4, "Failed to parse Sendmail args: %v", CustomConf, err)
}
}
log.Info("Mail Service Enabled")
}
func newRegisterMailService() {
if !Cfg.Section("service").Key("REGISTER_EMAIL_CONFIRM").MustBool() {
return
} else if MailService == nil {
log.Warn("Register Mail Service: Mail Service is not enabled")
return
}
Service.RegisterEmailConfirm = true
log.Info("Register Mail Service Enabled")
}
func newNotifyMailService() {
if !Cfg.Section("service").Key("ENABLE_NOTIFY_MAIL").MustBool() {
return
} else if MailService == nil {
log.Warn("Notify Mail Service: Mail Service is not enabled")
return
}
Service.EnableNotifyMail = true
log.Info("Notify Mail Service Enabled")
}
func newWebhookService() {
sec := Cfg.Section("webhook")
Webhook.QueueLength = sec.Key("QUEUE_LENGTH").MustInt(1000)
Webhook.DeliverTimeout = sec.Key("DELIVER_TIMEOUT").MustInt(5)
Webhook.SkipTLSVerify = sec.Key("SKIP_TLS_VERIFY").MustBool()
Webhook.Types = []string{"gitea", "gogs", "slack", "discord", "dingtalk"}
Webhook.PagingNum = sec.Key("PAGING_NUM").MustInt(10)
}
// NewServices initializes the services
func NewServices() {
newService()
newLogService()
NewXORMLogService(false)
newCacheService()
newSessionService()
newMailService()
newRegisterMailService()
newNotifyMailService()
newWebhookService()
}
| [
"\"GITEA_WORK_DIR\"",
"\"GITEA_CUSTOM\""
] | [] | [
"GITEA_CUSTOM",
"GITEA_WORK_DIR"
] | [] | ["GITEA_CUSTOM", "GITEA_WORK_DIR"] | go | 2 | 0 | |
tests/test_browser.py | # coding=utf-8
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import argparse
import json
import multiprocessing
import os
import random
import shlex
import shutil
import subprocess
import time
import unittest
import webbrowser
import zlib
from http.server import BaseHTTPRequestHandler, HTTPServer
from pathlib import Path
from urllib.request import urlopen
from common import BrowserCore, RunnerCore, path_from_root, has_browser, EMTEST_BROWSER, Reporting
from common import create_file, parameterized, ensure_dir, disabled, test_file, WEBIDL_BINDER
from common import read_file, require_v8
from tools import shared
from tools import system_libs
from tools.shared import EMCC, WINDOWS, FILE_PACKAGER, PIPE
from tools.shared import try_delete
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum, port):
class ChunkedServerHandler(BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:%s" % port)
s.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
s.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if s.path == '/':
s.sendheaders()
elif not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
start, end = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data) - 1, end)
length = end - start + 1
s.sendheaders([], length)
s.wfile.write(data[start:end + 1])
# CORS preflight makes OPTIONS requests which we need to account for.
expectedConns = 22
httpd = HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns + 1):
httpd.handle_request()
def shell_with_script(shell_file, output_file, replacement):
shell = read_file(path_from_root('src', shell_file))
create_file(output_file, shell.replace('{{{ SCRIPT }}}', replacement))
def is_chrome():
return EMTEST_BROWSER and 'chrom' in EMTEST_BROWSER.lower()
def no_chrome(note='chrome is not supported'):
if is_chrome():
return unittest.skip(note)
return lambda f: f
def is_firefox():
return EMTEST_BROWSER and 'firefox' in EMTEST_BROWSER.lower()
def no_firefox(note='firefox is not supported'):
if is_firefox():
return unittest.skip(note)
return lambda f: f
def no_swiftshader(f):
assert callable(f)
def decorated(self):
if is_chrome() and '--use-gl=swiftshader' in EMTEST_BROWSER:
self.skipTest('not compatible with swiftshader')
return f(self)
return decorated
def requires_threads(f):
assert callable(f)
def decorated(self, *args, **kwargs):
if os.environ.get('EMTEST_LACKS_THREAD_SUPPORT'):
self.skipTest('EMTEST_LACKS_THREAD_SUPPORT is set')
return f(self, *args, **kwargs)
return decorated
def requires_asmfs(f):
assert callable(f)
def decorated(self, *args, **kwargs):
# https://github.com/emscripten-core/emscripten/issues/9534
self.skipTest('ASMFS is looking for a maintainer')
return f(self, *args, **kwargs)
return decorated
def also_with_threads(f):
def decorated(self):
f(self)
if not os.environ.get('EMTEST_LACKS_THREAD_SUPPORT'):
print('(threads)')
self.emcc_args += ['-pthread']
f(self)
return decorated
# Today we only support the wasm backend so any tests that is disabled under the llvm
# backend is always disabled.
# TODO(sbc): Investigate all tests with this decorator and either fix of remove the test.
def no_wasm_backend(note=''):
assert not callable(note)
return unittest.skip(note)
requires_graphics_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_GRAPHICS_HARDWARE'), "This test requires graphics hardware")
requires_sound_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_SOUND_HARDWARE'), "This test requires sound hardware")
requires_sync_compilation = unittest.skipIf(is_chrome(), "This test requires synchronous compilation, which does not work in Chrome (except for tiny wasms)")
requires_offscreen_canvas = unittest.skipIf(os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'), "This test requires a browser with OffscreenCanvas")
class browser(BrowserCore):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.browser_timeout = 60
print()
print('Running the browser tests. Make sure the browser allows popups from localhost.')
print()
def setUp(self):
super().setUp()
# avoid various compiler warnings that many browser tests currently generate
self.emcc_args += [
'-Wno-pointer-sign',
'-Wno-int-conversion',
]
def test_sdl1_in_emscripten_nonstrict_mode(self):
if 'EMCC_STRICT' in os.environ and int(os.environ['EMCC_STRICT']):
self.skipTest('This test requires being run in non-strict mode (EMCC_STRICT env. variable unset)')
# TODO: This test is verifying behavior that will be deprecated at some point in the future, remove this test once
# system JS libraries are no longer automatically linked to anymore.
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-lSDL', '-lGL'])
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL', '-lGL']) # is the default anyhow
def test_sdl1_es6(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL', '-lGL', '-s', 'EXPORT_ES6'])
# Deliberately named as test_zzz_* to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_html_source_map(self):
if not has_browser():
self.skipTest('need a browser')
cpp_file = 'src.cpp'
html_file = 'src.html'
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
self.compile_btest(['src.cpp', '-o', 'src.html', '-gsource-map'])
self.assertExists(html_file)
self.assertExists('src.wasm.map')
webbrowser.open_new('file://' + html_file)
print('''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step
through and see the print (best to run with EMTEST_SAVE_DIR=1 for the reload).
''')
def test_emscripten_log(self):
self.btest_exit(test_file('emscripten_log/emscripten_log.cpp'),
args=['--pre-js', path_from_root('src', 'emscripten-source-map.min.js'), '-gsource-map'])
def test_preload_file(self):
create_file('somefile.txt', 'load me right before running the code please')
create_file('.somefile.txt', 'load me right before running the code please')
create_file('some@file.txt', 'load me right before running the code please')
absolute_src_path = os.path.abspath('somefile.txt')
def make_main(path):
print('make main at', path)
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
create_file('main.cpp', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
assert(strcmp("load me right before", buf) == 0);
return 0;
}
''' % path)
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt"),
("some@@file.txt@other.txt", "other.txt"),
("some@@file.txt@some@@otherfile.txt", "some@otherfile.txt")]
for srcpath, dstpath in test_cases:
print('Testing', srcpath, dstpath)
make_main(dstpath)
self.btest_exit('main.cpp', args=['--preload-file', srcpath])
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
create_file(tricky_filename, 'load me right before running the code please')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
self.btest_exit('main.cpp', args=['--preload-file', tricky_filename.replace('@', '@@')])
# By absolute path
make_main('somefile.txt') # absolute becomes relative
self.btest_exit('main.cpp', args=['--preload-file', absolute_src_path])
# Test subdirectory handling with asset packaging.
try_delete('assets')
ensure_dir('assets/sub/asset1/'.replace('\\', '/'))
ensure_dir('assets/sub/asset1/.git'.replace('\\', '/')) # Test adding directory that shouldn't exist.
ensure_dir('assets/sub/asset2/'.replace('\\', '/'))
create_file('assets/sub/asset1/file1.txt', '''load me right before running the code please''')
create_file('assets/sub/asset1/.git/shouldnt_be_embedded.txt', '''this file should not get embedded''')
create_file('assets/sub/asset2/file2.txt', '''load me right before running the code please''')
absolute_assets_src_path = 'assets'.replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
create_file('main.cpp', r'''
#include <stdio.h>
#include <assert.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
assert(strcmp("load me right before", buf) == 0);
f = fopen("%s", "r");
assert(f != NULL);
fclose(f);
f = fopen("%s", "r");
assert(f == NULL);
return 0;
}
''' % (path1, path2, nonexistingpath))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print(srcpath)
self.btest_exit('main.cpp', args=['--preload-file', srcpath, '--exclude-file', '*/.*'])
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
ensure_dir('dirrey')
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'dirrey/page.html'], reporting=Reporting.JS_ONLY)
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?exit:0')
# With FS.preloadFile
create_file('pre.js', '''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); // we need --use-preload-plugins for this.
};
''')
make_main('someotherfile.txt')
self.btest_exit('main.cpp', args=['--pre-js', 'pre.js', '--use-preload-plugins'])
# Tests that user .html shell files can manually download .data files created with --preload-file cmdline.
def test_preload_file_with_manual_data_download(self):
src = test_file('manual_download_data.cpp')
create_file('file.txt', '''Hello!''')
self.compile_btest([src, '-o', 'manual_download_data.js', '--preload-file', 'file.txt@/file.txt'])
shutil.copyfile(test_file('manual_download_data.html'), 'manual_download_data.html')
self.run_browser('manual_download_data.html', 'Hello!', '/report_result?1')
# Tests that if the output files have single or double quotes in them, that it will be handled by
# correctly escaping the names.
def test_output_file_escaping(self):
self.set_setting('EXIT_RUNTIME')
tricky_part = '\'' if WINDOWS else '\' and \"' # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that.
d = 'dir with ' + tricky_part
abs_d = os.path.abspath(d)
ensure_dir(abs_d)
txt = 'file with ' + tricky_part + '.txt'
create_file(os.path.join(d, txt), 'load me right before')
cpp = os.path.join(d, 'file with ' + tricky_part + '.cpp')
create_file(cpp, r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
assert(strcmp("load me right before", buf) == 0);
return 0;
}
''' % (txt.replace('\'', '\\\'').replace('\"', '\\"')))
data_file = os.path.join(abs_d, 'file with ' + tricky_part + '.data')
data_js_file = os.path.join(abs_d, 'file with ' + tricky_part + '.js')
abs_txt = os.path.join(abs_d, txt)
self.run_process([FILE_PACKAGER, data_file, '--use-preload-cache', '--indexedDB-name=testdb', '--preload', abs_txt + '@' + txt, '--js-output=' + data_js_file])
page_file = os.path.join(d, 'file with ' + tricky_part + '.html')
abs_page_file = os.path.abspath(page_file)
self.compile_btest([cpp, '--pre-js', data_js_file, '-o', abs_page_file, '-s', 'FORCE_FILESYSTEM'], reporting=Reporting.JS_ONLY)
self.run_browser(page_file, '|load me right before|.', '/report_result?exit:0')
@parameterized({
'0': (0,),
'1mb': (1 * 1024 * 1024,),
'100mb': (100 * 1024 * 1024,),
'150mb': (150 * 1024 * 1024,),
})
def test_preload_caching(self, extra_size):
self.set_setting('EXIT_RUNTIME')
create_file('main.c', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern int checkPreloadResults();
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
assert(strcmp("load me right before", buf) == 0);
return checkPreloadResults();
}
''' % 'somefile.txt')
create_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
# test caching of various sizes, including sizes higher than 128MB which is
# chrome's limit on IndexedDB item sizes, see
# https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&q=%22The+serialized+value+is+too+large%22&sq=package:chromium&g=0&l=177
# https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60
if is_chrome() and extra_size >= 100 * 1024 * 1024:
self.skipTest('chrome bug')
create_file('somefile.txt', '''load me right before running the code please''' + ('_' * extra_size))
print('size:', os.path.getsize('somefile.txt'))
self.compile_btest(['main.c', '--use-preload-cache', '--js-library', 'test.js', '--preload-file', 'somefile.txt', '-o', 'page.html', '-s', 'ALLOW_MEMORY_GROWTH'], reporting=Reporting.JS_ONLY)
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?exit:0')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?exit:1')
def test_preload_caching_indexeddb_name(self):
self.set_setting('EXIT_RUNTIME')
create_file('somefile.txt', '''load me right before running the code please''')
def make_main(path):
print(path)
create_file('main.c', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern int checkPreloadResults();
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
assert(strcmp("load me right before", buf) == 0);
return checkPreloadResults();
}
''' % path)
create_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
self.run_process([FILE_PACKAGER, 'somefile.data', '--use-preload-cache', '--indexedDB-name=testdb', '--preload', 'somefile.txt', '--js-output=' + 'somefile.js'])
self.compile_btest(['main.c', '--js-library', 'test.js', '--pre-js', 'somefile.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM'], reporting=Reporting.JS_ONLY)
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?exit:0')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?exit:1')
def test_multifile(self):
# a few files inside a directory
ensure_dir('subdirr/moar')
create_file('subdirr/data1.txt', '1214141516171819')
create_file('subdirr/moar/data2.txt', '3.14159265358979')
create_file('main.c', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
assert(strcmp("1214141516171819", buf) == 0);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
assert(strcmp("3.14159265358979", buf) == 0);
return 0;
}
''')
# by individual files
self.btest_exit('main.c', args=['--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt'])
# by directory, and remove files to make sure
self.set_setting('EXIT_RUNTIME')
self.compile_btest(['main.c', '--preload-file', 'subdirr', '-o', 'page.html'], reporting=Reporting.JS_ONLY)
shutil.rmtree('subdirr')
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?exit:0')
def test_custom_file_package_url(self):
# a few files inside a directory
ensure_dir('subdirr')
ensure_dir('cdn')
create_file(Path('subdirr/data1.txt'), '1214141516171819')
# change the file package base dir to look in a "cdn". note that normally
# you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
default_shell = read_file(path_from_root('src', 'shell.html'))
create_file('shell.html', default_shell.replace('var Module = {', '''
var Module = {
locateFile: function(path, prefix) {
if (path.endsWith(".wasm")) {
return prefix + path;
} else {
return "cdn/" + path;
}
},
'''))
create_file('main.c', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
assert(strcmp("1214141516171819", buf) == 0);
return 0;
}
''')
self.set_setting('EXIT_RUNTIME')
self.compile_btest(['main.c', '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html'], reporting=Reporting.JS_ONLY)
shutil.move('test.data', Path('cdn/test.data'))
self.run_browser('test.html', '', '/report_result?exit:0')
def test_missing_data_throws_error(self):
def setup(assetLocalization):
self.clear()
create_file('data.txt', 'data')
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
// This code should never be executed in terms of missing required dependency file.
return 0;
}
''')
create_file('on_window_error_shell.html', r'''
<html>
<center><canvas id='canvas' width='256' height='256'></canvas></center>
<hr><div id='output'></div><hr>
<script type='text/javascript'>
window.onerror = function(error) {
window.onerror = null;
var result = error.indexOf("test.data") >= 0 ? 1 : 0;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + result, true);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}
var Module = {
locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "''' + assetLocalization + r'''" + path;}},
print: (function() {
var element = document.getElementById('output');
return function(text) { element.innerHTML += text.replace('\n', '<br>', 'g') + '<br>';};
})(),
canvas: document.getElementById('canvas')
};
</script>
{{{ SCRIPT }}}
</body>
</html>''')
def test():
# test test missing file should run xhr.onload with status different than 200, 304 or 206
setup("")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
shutil.move('test.data', 'missing.data')
self.run_browser('test.html', '', '/report_result?1')
# test unknown protocol should go through xhr.onerror
setup("unknown_protocol://")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
# test wrong protocol and port
setup("https://localhost:8800/")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for locateFile
# create_file('shell.html', read_file(path_from_root('src', 'shell.html')).replace('var Module = {', 'var Module = { locateFile: function (path) {return "http:/localhost:8888/cdn/" + path;}, '))
# test()
def test_dev_random(self):
self.btest(Path('filesystem/dev_random.cpp'), expected='0')
def test_sdl_swsurface(self):
self.btest('sdl_swsurface.c', args=['-lSDL', '-lGL'], expected='1')
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS', '-lSDL', '-lGL'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
src = test_file('sdl_image.c')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
src, '-o', 'page.html', '-O2', '-lSDL', '-lGL', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpeg')
src = test_file('sdl_image.c')
self.compile_btest([
src, '-o', 'page.html', '-lSDL', '-lGL',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], also_proxied=True, manually_trigger_reftest=True)
@parameterized({
'': ([],),
# add testing for closure on preloaded files + ENVIRONMENT=web (we must not
# emit any node.js code here, see
# https://github.com/emscripten-core/emscripten/issues/14486
'closure_webonly': (['--closure', '1', '-s', 'ENVIRONMENT=web'],)
})
def test_sdl_image_prepare_data(self, args):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'] + args, manually_trigger_reftest=True)
def test_sdl_image_must_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl_image_must_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.jpg', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_bpp(self):
# load grayscale image without alpha
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp1.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp1.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load grayscale image with alpha
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp2.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp2.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGB image
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp3.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp3.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGBA image
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp4.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp4.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_cleanup(self):
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_cleanup.c', expected='0', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL', '--memoryprofiler'])
def test_sdl_canvas(self):
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lSDL', '-lGL'])
# some extra coverage
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-O0', '-s', 'SAFE_HEAP', '-lSDL', '-lGL'])
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-O2', '-s', 'SAFE_HEAP', '-lSDL', '-lGL'])
def post_manual_reftest(self, reference=None):
self.reftest(test_file(self.reference if reference is None else reference))
html = read_file('test.html')
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % read_file('reftest.js'))
create_file('test.html', html)
def test_sdl_canvas_proxy(self):
create_file('data.txt', 'datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt', '-lSDL', '-lGL'], manual_reference=True, post_build=self.post_manual_reftest)
@requires_graphics_hardware
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
self.compile_btest([test_file('hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'GL_TESTING', '-lGL', '-lglut'])
shell_with_script('shell_minimal.html', 'test.html', '<script src="test.js"></script>')
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
# N.B. On Linux with Intel integrated graphics cards, this test needs Firefox 49 or newer.
# See https://github.com/emscripten-core/emscripten/issues/4069.
create_file('flag_0.js', '''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_alpha.png', reference_slack=12)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js', '-lSDL', '-lGL'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=12)
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for async_ in [
[],
['-DTEST_SLEEP', '-s', 'ASSERTIONS', '-s', 'SAFE_HEAP', '-s', 'ASYNCIFY']
]:
print(delay, defines, async_)
create_file('pre.js', '''
function keydown(c) {
%s
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
self.compile_btest([test_file('sdl_key.c'), '-o', 'page.html'] + defines + async_ + ['--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=_main', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
create_file('pre.js', '''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = read_file('test.html')
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_file('test.html', html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=_main,_one', '-lSDL', '-lGL'], manual_reference=True, post_build=post)
def test_canvas_focus(self):
self.btest('canvas_focus.c', '1')
def test_keydown_preventdefault_proxy(self):
def post():
html = read_file('test.html')
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keypress(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function sendKey(c) {
// Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
if (keydown(c) === false) {
console.log('keydown prevent defaulted, NOT sending keypress!!!');
} else {
keypress(c);
}
keyup(c);
}
// Send 'a'. Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
sendKey(65);
// Send backspace. Keypress should not be sent over as default handling of
// the Keydown event should be prevented.
sendKey(8);
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_file('test.html', html)
self.btest('keydown_preventdefault_proxy.cpp', '300', args=['--proxy-to-worker', '-s', 'EXPORTED_FUNCTIONS=_main'], manual_reference=True, post_build=post)
def test_sdl_text(self):
create_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([test_file('sdl_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=_main,_one', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([test_file('sdl_mouse.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse_offsets(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([test_file('sdl_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS', '-O2', '--minify=0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1', args=['-lglut'])
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1', args=['-lglut'])
@requires_graphics_hardware
def test_glut_glutget_no_antialias(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_graphics_hardware
def test_glut_glutget(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
create_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([test_file('sdl_joystick.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([test_file('sdl_joystick.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_glfw_joystick(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
var gamepad = {
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
};
gamepads.push(gamepad)
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
// Dispatch event (required for glfw joystick; note not used in SDL test)
var event = new Event('gamepadconnected');
event.gamepad = gamepad;
window.dispatchEvent(event);
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([test_file('test_glfw_joystick.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lGL', '-lglfw3', '-s', 'USE_GLFW=3'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
create_file('check_webgl_attributes_support.js', '''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
},
webglAlphaSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {alpha: true});
attributes = context.getContextAttributes();
return attributes.alpha;
}
});
''')
# Copy common code file to temporary directory
filepath = test_file('test_webgl_context_attributes_common.c')
temp_filepath = os.path.basename(filepath)
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest_exit('test_webgl_context_attributes_glut.c', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglut', '-lGLEW'])
self.btest_exit('test_webgl_context_attributes_sdl.c', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lSDL', '-lGLEW'])
self.btest_exit('test_webgl_context_attributes_sdl2.c', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-s', 'USE_SDL=2', '-lGLEW'])
self.btest_exit('test_webgl_context_attributes_glfw.c', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglfw', '-lGLEW'])
# perform tests with attributes desactivated
self.btest_exit('test_webgl_context_attributes_glut.c', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglut', '-lGLEW'])
self.btest_exit('test_webgl_context_attributes_sdl.c', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lSDL', '-lGLEW'])
self.btest_exit('test_webgl_context_attributes_glfw.c', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglfw', '-lGLEW'])
@requires_graphics_hardware
def test_webgl_no_double_error(self):
self.btest_exit('webgl_error.cpp')
@requires_graphics_hardware
def test_webgl_parallel_shader_compile(self):
self.btest_exit('webgl_parallel_shader_compile.cpp')
@requires_graphics_hardware
def test_webgl_explicit_uniform_location(self):
self.btest_exit('webgl_explicit_uniform_location.c', args=['-s', 'GL_EXPLICIT_UNIFORM_LOCATION=1', '-s', 'MIN_WEBGL_VERSION=2'])
@requires_graphics_hardware
def test_webgl_sampler_layout_binding(self):
self.btest_exit('webgl_sampler_layout_binding.c', args=['-s', 'GL_EXPLICIT_UNIFORM_BINDING=1'])
@requires_graphics_hardware
def test_webgl2_ubo_layout_binding(self):
self.btest_exit('webgl2_ubo_layout_binding.c', args=['-s', 'GL_EXPLICIT_UNIFORM_BINDING=1', '-s', 'MIN_WEBGL_VERSION=2'])
# Test that -s GL_PREINITIALIZED_CONTEXT=1 works and allows user to set Module['preinitializedWebGLContext'] to a preinitialized WebGL context.
@requires_graphics_hardware
def test_preinitialized_webgl_context(self):
self.btest_exit('preinitialized_webgl_context.cpp', args=['-s', 'GL_PREINITIALIZED_CONTEXT', '--shell-file', test_file('preinitialized_webgl_context.html')])
@requires_threads
def test_emscripten_get_now(self):
for args in [[], ['-s', 'USE_PTHREADS'], ['-s', 'ENVIRONMENT=web', '-O2', '--closure=1']]:
self.btest_exit('emscripten_get_now.cpp', args=args)
def test_write_file_in_environment_web(self):
self.btest_exit('write_file.c', args=['-s', 'ENVIRONMENT=web', '-Os', '--closure=1'])
def test_fflush(self):
self.btest('test_fflush.cpp', '0', args=['-s', 'EXIT_RUNTIME', '--shell-file', test_file('test_fflush.html')], reporting=Reporting.NONE)
def test_file_db(self):
secret = str(time.time())
create_file('moar.txt', secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret, args=['-s', 'FORCE_FILESYSTEM'])
shutil.copyfile('test.html', 'second.html')
create_file('moar.txt', 'aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for extra in [[], ['-DEXTRA_WORK']]:
secret = str(time.time())
self.btest(test_file('fs/test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=_main,_test,_success', '-lidbfs.js'])
self.btest(test_file('fs/test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=_main,_test,_success', '-lidbfs.js'] + extra)
def test_fs_idbfs_sync_force_exit(self):
secret = str(time.time())
self.btest(test_file('fs/test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=_main,_test,_success', '-s', 'EXIT_RUNTIME', '-DFORCE_EXIT', '-lidbfs.js'])
self.btest(test_file('fs/test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=_main,_test,_success', '-s', 'EXIT_RUNTIME', '-DFORCE_EXIT', '-lidbfs.js'])
def test_fs_idbfs_fsync(self):
# sync from persisted state into memory before main()
create_file('pre.js', '''
Module.preRun = function() {
addRunDependency('syncfs');
FS.mkdir('/working1');
FS.mount(IDBFS, {}, '/working1');
FS.syncfs(true, function (err) {
if (err) throw err;
removeRunDependency('syncfs');
});
};
''')
args = ['--pre-js', 'pre.js', '-lidbfs.js', '-s', 'EXIT_RUNTIME', '-s', 'ASYNCIFY']
secret = str(time.time())
self.btest(test_file('fs/test_idbfs_fsync.c'), '1', args=args + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=_main,_success', '-lidbfs.js'])
self.btest(test_file('fs/test_idbfs_fsync.c'), '1', args=args + ['-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=_main,_success', '-lidbfs.js'])
def test_fs_memfs_fsync(self):
args = ['-s', 'ASYNCIFY', '-s', 'EXIT_RUNTIME']
secret = str(time.time())
self.btest(test_file('fs/test_memfs_fsync.c'), '1', args=args + ['-DSECRET=\"' + secret + '\"'])
def test_fs_workerfs_read(self):
secret = 'a' * 10
secret2 = 'b' * 10
create_file('pre.js', '''
var Module = {};
Module.preRun = function() {
var blob = new Blob(['%s']);
var file = new File(['%s'], 'file.txt');
FS.mkdir('/work');
FS.mount(WORKERFS, {
blobs: [{ name: 'blob.txt', data: blob }],
files: [file],
}, '/work');
};
''' % (secret, secret2))
self.btest(test_file('fs/test_workerfs_read.c'), '1', args=['-lworkerfs.js', '--pre-js', 'pre.js', '-DSECRET=\"' + secret + '\"', '-DSECRET2=\"' + secret2 + '\"', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_workerfs_package(self):
create_file('file1.txt', 'first')
ensure_dir('sub')
open(Path('sub/file2.txt'), 'w').write('second')
self.run_process([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', Path('sub/file2.txt'), '--separate-metadata', '--js-output=files.js'])
self.btest(Path('fs/test_workerfs_package.cpp'), '1', args=['-lworkerfs.js', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_lz4fs_package(self):
# generate data
ensure_dir('subdir')
create_file('file1.txt', '0123456789' * (1024 * 128))
open(Path('subdir/file2.txt'), 'w').write('1234567890' * (1024 * 128))
random_data = bytearray(random.randint(0, 255) for x in range(1024 * 128 * 10 + 1))
random_data[17] = ord('X')
open('file3.txt', 'wb').write(random_data)
# compress in emcc, -s LZ4=1 tells it to tell the file packager
print('emcc-normal')
self.btest(Path('fs/test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt'])
assert os.path.getsize('file1.txt') + os.path.getsize(Path('subdir/file2.txt')) + os.path.getsize('file3.txt') == 3 * 1024 * 128 * 10 + 1
assert os.path.getsize('test.data') < (3 * 1024 * 128 * 10) / 2 # over half is gone
print(' emcc-opts')
self.btest(Path('fs/test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt', '-O2'])
# compress in the file packager, on the server. the client receives compressed data and can just use it. this is typical usage
print('normal')
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--lz4'])
open('files.js', 'wb').write(out)
self.btest(Path('fs/test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM'])
print(' opts')
self.btest(Path('fs/test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2'])
print(' modularize')
self.compile_btest([test_file('fs/test_lz4fs.cpp'), '--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-s', 'MODULARIZE=1'])
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
Module()
</script>
''')
self.run_browser('a.html', '.', '/report_result?2')
# load the data into LZ4FS manually at runtime. This means we compress on the client. This is generally not recommended
print('manual')
subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--separate-metadata', '--js-output=files.js'])
self.btest(Path('fs/test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM'])
print(' opts')
self.btest(Path('fs/test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2'])
print(' opts+closure')
self.btest(Path('fs/test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2', '--closure=1', '-g1', '-s', 'CLOSURE_WARNINGS=quiet'])
'''# non-lz4 for comparison
try:
os.mkdir('files')
except OSError:
pass
shutil.copyfile('file1.txt', Path('files/file1.txt'))
shutil.copyfile('file2.txt', Path('files/file2.txt'))
shutil.copyfile('file3.txt', Path('files/file3.txt'))
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'files/file1.txt', 'files/file2.txt', 'files/file3.txt'])
open('files.js', 'wb').write(out)
self.btest(Path('fs/test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js'])'''
def test_separate_metadata_later(self):
# see issue #6654 - we need to handle separate-metadata both when we run before
# the main program, and when we are run later
create_file('data.dat', ' ')
self.run_process([FILE_PACKAGER, 'more.data', '--preload', 'data.dat', '--separate-metadata', '--js-output=more.js'])
self.btest(Path('browser/separate_metadata_later.cpp'), '1', args=['-s', 'FORCE_FILESYSTEM'])
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
self.btest(test_file('idbstore.c'), str(stage), args=['-lidbstore.js', '-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(test_file('idbstore_sync.c'), '6', args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '-s', 'ASYNCIFY'])
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.clear()
self.btest(test_file('idbstore_sync_worker.c'), '6', args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-s', 'INITIAL_MEMORY=80MB', '-s', 'ASYNCIFY'])
def test_force_exit(self):
self.btest('force_exit.c', expected='17', args=['-s', 'EXIT_RUNTIME'])
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest_exit('sdl_pumpevents.c', assert_returncode=7, args=['--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', expected='1',
args=['-O2', '--minify=0', '--shell-file',
test_file('sdl_canvas_size.html'), '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([test_file('sdl_gl_read.c'), '-o', 'something.html', '-lSDL', '-lGL'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-s', 'FULL_ES3=1', '-lSDL', '-lGL'],
message='You should see a blue triangle.')
@requires_graphics_hardware
def test_sdl_ogl(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify=0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_regal(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify=0', '--preload-file', 'screenshot.png', '-s', 'USE_REGAL', '-DUSE_REGAL', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify=0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_fog_simple(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify=0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_negative(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_density(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_exp2(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_linear(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_glfw(self):
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION', '-lglfw', '-lGL'])
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_minimal(self):
self.btest('glfw_minimal.c', '1', args=['-lglfw', '-lGL'])
self.btest('glfw_minimal.c', '1', args=['-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_time(self):
self.btest('test_glfw_time.c', '1', args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'])
def _test_egl_base(self, *args):
self.compile_btest([test_file('test_egl.c'), '-O2', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_egl(self):
self._test_egl_base()
@requires_threads
@requires_graphics_hardware
def test_egl_with_proxy_to_pthread(self):
self._test_egl_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'OFFSCREEN_FRAMEBUFFER')
def _test_egl_width_height_base(self, *args):
self.compile_btest([test_file('test_egl_width_height.c'), '-O2', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def test_egl_width_height(self):
self._test_egl_width_height_base()
@requires_threads
def test_egl_width_height_with_proxy_to_pthread(self):
self._test_egl_width_height_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD')
@requires_graphics_hardware
def test_egl_createcontext_error(self):
self.btest('test_egl_createcontext_error.c', '1', args=['-lEGL', '-lGL'])
def test_worker(self):
# Test running in a web worker
create_file('file.dat', 'data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''' % self.port)
html_file.close()
for file_data in [1, 0]:
cmd = [EMCC, test_file('hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else [])
print(cmd)
self.run_process(cmd)
self.assertExists('worker.js')
self.run_browser('main.html', '', '/report_result?hello from worker, and :' + ('data for w' if file_data else '') + ':')
self.assertContained('you should not see this text when in a worker!', self.run_js('worker.js')) # code should run standalone too
@no_firefox('keeps sending OPTIONS requests, and eventually errors')
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""" % self.port)
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["printErr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. test_file('hello_world_gles.c')
self.compile_btest([test_file(c_source_filename), '-g', '-s', 'SMALL_XHR_CHUNKS', '-o', worker_filename,
'--pre-js', prejs_filename])
chunkSize = 1024
data = os.urandom(10 * chunkSize + 1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data) & 0xffffffff # Python 2 compatibility: force bigint
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True, chunkSize, data, checksum, self.port))
server.start()
# block until the server is actually ready
for i in range(60):
try:
urlopen('http://localhost:11111')
break
except Exception as e:
print('(sleep for server)')
time.sleep(1)
if i == 60:
raise e
try:
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
finally:
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
@requires_graphics_hardware
def test_glgears(self, extra_args=[]):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=3,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'] + extra_args)
@requires_graphics_hardware
@requires_threads
def test_glgears_pthreads(self, extra_args=[]):
# test that a program that doesn't use pthreads still works with with pthreads enabled
# (regression test for https://github.com/emscripten-core/emscripten/pull/8059#issuecomment-488105672)
self.test_glgears(['-s', 'USE_PTHREADS'])
@requires_graphics_hardware
def test_glgears_long(self):
for proxy in [0, 1]:
print('proxy', proxy)
self.btest('hello_world_gles.c', expected=list(map(str, range(15, 500))), args=['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST', '-lGL', '-lglut', '-DANIMATE'] + (['--proxy-to-worker'] if proxy else []))
@requires_graphics_hardware
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
print(full_es2)
self.compile_btest([test_file('hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING', '-lGL', '-lglut',
'--shell-file', test_file('hello_world_gles_shell.html')] +
(['-s', 'FULL_ES2=1'] if full_es2 else []))
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
@requires_graphics_hardware
def test_fulles2_sdlproc(self):
self.btest_exit('full_es2_sdlproc.c', assert_returncode=1, args=['-s', 'GL_TESTING', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2', '-lGL', '-lSDL', '-lglut'])
@requires_graphics_hardware
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'],
message='You should see animating gears.')
assert 'gl-matrix' not in read_file('test.html'), 'Should not include glMatrix when not needed'
@requires_graphics_hardware
def test_glbook(self):
self.emcc_args.remove('-Werror')
programs = self.get_library('glbook', [
Path('Chapter_2/Hello_Triangle', 'CH02_HelloTriangle.o'),
Path('Chapter_8/Simple_VertexShader', 'CH08_SimpleVertexShader.o'),
Path('Chapter_9/Simple_Texture2D', 'CH09_SimpleTexture2D.o'),
Path('Chapter_9/Simple_TextureCubemap', 'CH09_TextureCubemap.o'),
Path('Chapter_9/TextureWrap', 'CH09_TextureWrap.o'),
Path('Chapter_10/MultiTexture', 'CH10_MultiTexture.o'),
Path('Chapter_13/ParticleSystem', 'CH13_ParticleSystem.o'),
], configure=None)
def book_path(*pathelems):
return test_file('glbook', *pathelems)
for program in programs:
print(program)
basename = os.path.basename(program)
args = ['-lGL', '-lEGL', '-lX11']
if basename == 'CH10_MultiTexture.o':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
args += ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.o':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
args += ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.o', '.png')),
args=args)
@requires_graphics_hardware
@parameterized({
'normal': (['-s', 'FULL_ES2=1'],),
# Enabling FULL_ES3 also enables ES2 automatically
'full_es3': (['-s', 'FULL_ES3=1'],)
})
def test_gles2_emulation(self, args):
print(args)
shutil.copyfile(test_file('glbook/Chapter_10/MultiTexture/basemap.tga'), 'basemap.tga')
shutil.copyfile(test_file('glbook/Chapter_10/MultiTexture/lightmap.tga'), 'lightmap.tga')
shutil.copyfile(test_file('glbook/Chapter_13/ParticleSystem/smoke.tga'), 'smoke.tga')
for source, reference in [
(Path('glbook/Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), test_file('glbook/CH02_HelloTriangle.png')),
# (Path('glbook/Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), test_file('glbook/CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(Path('glbook/Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), test_file('glbook/CH09_TextureWrap.png')),
# (Path('glbook/Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), test_file('glbook/CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(Path('glbook/Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), test_file('glbook/CH09_SimpleTexture2D.png')),
(Path('glbook/Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), test_file('glbook/CH10_MultiTexture.png')),
(Path('glbook/Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), test_file('glbook/CH13_ParticleSystem.png')),
]:
print(source)
self.btest(source,
reference=reference,
args=['-I' + test_file('glbook/Common'),
test_file('glbook/Common/esUtil.c'),
test_file('glbook/Common/esShader.c'),
test_file('glbook/Common/esShapes.c'),
test_file('glbook/Common/esTransform.c'),
'-lGL', '-lEGL', '-lX11',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'] + args)
@requires_graphics_hardware
def test_clientside_vertex_arrays_es3(self):
self.btest('clientside_vertex_arrays_es3.c', reference='gl_triangle.png', args=['-s', 'FULL_ES3=1', '-s', 'USE_GLFW=3', '-lglfw', '-lGLESv2'])
def test_emscripten_api(self):
self.btest_exit('emscripten_api_browser.c', args=['-s', 'EXPORTED_FUNCTIONS=_main,_third', '-lSDL'])
def test_emscripten_api2(self):
def setup():
create_file('script1.js', '''
Module._set(456);
''')
create_file('file1.txt', 'first')
create_file('file2.txt', 'second')
setup()
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
self.btest_exit('emscripten_api_browser2.c', args=['-s', 'EXPORTED_FUNCTIONS=_main,_set', '-s', 'FORCE_FILESYSTEM'])
# check using file packager to another dir
self.clear()
setup()
ensure_dir('sub')
self.run_process([FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
shutil.copyfile(Path('sub/test.data'), 'test.data')
self.btest_exit('emscripten_api_browser2.c', args=['-s', 'EXPORTED_FUNCTIONS=_main,_set', '-s', 'FORCE_FILESYSTEM'])
def test_emscripten_api_infloop(self):
self.btest_exit('emscripten_api_browser_infloop.cpp', assert_returncode=7)
def test_emscripten_fs_api(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png') # preloaded *after* run
self.btest_exit('emscripten_fs_api_browser.c', assert_returncode=1, args=['-lSDL'])
def test_emscripten_fs_api2(self):
self.btest_exit('emscripten_fs_api_browser2.c', assert_returncode=1, args=['-s', "ASSERTIONS=0"])
self.btest_exit('emscripten_fs_api_browser2.c', assert_returncode=1, args=['-s', "ASSERTIONS=1"])
@requires_threads
def test_emscripten_main_loop(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'EXIT_RUNTIME']]:
self.btest_exit('emscripten_main_loop.cpp', args=args)
@requires_threads
def test_emscripten_main_loop_settimeout(self):
for args in [
[],
# test pthreads + AUTO_JS_LIBRARIES mode as well
['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'AUTO_JS_LIBRARIES=0'],
]:
self.btest_exit('emscripten_main_loop_settimeout.cpp', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest_exit('emscripten_main_loop_and_blocker.cpp', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker_exit(self):
# Same as above but tests that EXIT_RUNTIME works with emscripten_main_loop. The
# app should still stay alive until the loop ends
self.btest_exit('emscripten_main_loop_and_blocker.cpp')
@requires_threads
def test_emscripten_main_loop_setimmediate(self):
for args in [[], ['--proxy-to-worker'], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest_exit('emscripten_main_loop_setimmediate.cpp', args=args)
def test_fs_after_main(self):
for args in [[], ['-O1']]:
self.btest('fs_after_main.cpp', '0', args=args)
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1', args=['-lSDL', '-lGL'])
def test_sdl_resize(self):
# FIXME(https://github.com/emscripten-core/emscripten/issues/12978)
self.emcc_args.append('-Wno-deprecated-declarations')
self.btest('sdl_resize.c', '1', args=['-lSDL', '-lGL'])
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1', args=['-lGL', '-lEGL'])
# Covered by dEQP text suite (we can remove it later if we add coverage for that).
@requires_graphics_hardware
def test_glframebufferattachmentinfo(self):
self.btest('glframebufferattachmentinfo.c', '1', args=['-lGLESv2', '-lEGL'])
@requires_graphics_hardware
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure=1', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_sdlglshader2(self):
self.btest('sdlglshader2.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1', args=['-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_threads
def test_gl_textures(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'OFFSCREEN_FRAMEBUFFER']]:
self.btest('gl_textures.cpp', '0', args=['-lGL'] + args)
@requires_graphics_hardware
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_strides(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_gl_ps_worker(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps_worker.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1, also_proxied=True)
@requires_graphics_hardware
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], reference_slack=1)
@requires_graphics_hardware
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS', '-lGL', '-lSDL'], expected=['1'], also_proxied=True)
@requires_graphics_hardware
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS', '-lGL', '-lSDL'], expected=['1'])
@requires_graphics_hardware
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328', '2411982848'], args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_regal(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre.png'), args=['-s', 'USE_REGAL', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_sync_compilation
def test_cubegeom_pre_relocatable(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '-s', 'RELOCATABLE'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre2.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre2.png'), args=['-s', 'GL_DEBUG', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL']) # some coverage for GL_DEBUG not breaking the build
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre3(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre3.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@parameterized({
'': ([],),
'tracing': (['-sTRACE_WEBGL_CALLS'],),
})
@requires_graphics_hardware
def test_cubegeom(self, args):
# proxy only in the simple, normal case (we can't trace GL calls when
# proxied)
self.btest(Path('third_party/cubegeom', 'cubegeom.c'), reference=Path('third_party/cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'] + args, also_proxied=not args)
@requires_graphics_hardware
def test_cubegeom_regal(self):
self.btest(Path('third_party/cubegeom', 'cubegeom.c'), reference=Path('third_party/cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-DUSE_REGAL', '-s', 'USE_REGAL', '-lGL', '-lSDL'], also_proxied=True)
@requires_threads
@requires_graphics_hardware
def test_cubegeom_regal_mt(self):
self.btest(Path('third_party/cubegeom', 'cubegeom.c'), reference=Path('third_party/cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-pthread', '-DUSE_REGAL', '-s', 'USE_PTHREADS', '-s', 'USE_REGAL', '-lGL', '-lSDL'], also_proxied=False)
@requires_graphics_hardware
def test_cubegeom_proc(self):
create_file('side.c', r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
# also test -Os in wasm, which uses meta-dce, which should not break legacy gl emulation hacks
for opts in [[], ['-O1'], ['-Os']]:
self.btest(Path('third_party/cubegeom', 'cubegeom_proc.c'), reference=Path('third_party/cubegeom', 'cubegeom.png'), args=opts + ['side.c', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_glew(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_glew.c'), reference=Path('third_party/cubegeom', 'cubegeom.png'), args=['-O2', '--closure=1', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lGLEW', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_color(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_color.c'), reference=Path('third_party/cubegeom', 'cubegeom_color.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_normal.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest(Path('third_party/cubegeom', 'cubegeom_normal_dap.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest(Path('third_party/cubegeom', 'cubegeom_normal_dap_far.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest(Path('third_party/cubegeom', 'cubegeom_normal_dap_far_range.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest(Path('third_party/cubegeom', 'cubegeom_normal_dap_far_glda.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal_dap_far_glda.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_firefox('fails on CI but works locally')
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest(Path('third_party/cubegeom', 'cubegeom_normal_dap_far_glda_quad.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal_dap_far_glda_quad.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_mt(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_mt.c'), reference=Path('third_party/cubegeom', 'cubegeom_mt.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL']) # multitexture
@requires_graphics_hardware
def test_cubegeom_color2(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_color2.c'), reference=Path('third_party/cubegeom', 'cubegeom_color2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_texturematrix(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_texturematrix.c'), reference=Path('third_party/cubegeom', 'cubegeom_texturematrix.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_fog(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_fog.c'), reference=Path('third_party/cubegeom', 'cubegeom_fog.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre_vao.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_regal(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre_vao.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'USE_REGAL', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2_vao(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre2_vao.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_pre2_vao2(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre2_vao2.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre2_vao2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_es(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre_vao_es.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'FULL_ES2=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_u4fv_2(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_u4fv_2.c'), reference=Path('third_party/cubegeom', 'cubegeom_u4fv_2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', args=['-lSDL', '-lGL'], reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', args=['-lSDL', '-lGL'], reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', args=['-lSDL', '-lGL'], reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png', '--use-preload-plugins', '-lSDL', '-lGL'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', args=['-lSDL', '-lGL'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
create_file('pre.js', '''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
create_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js', '-lSDL', '-lGL'])
def test_sdl_ttf_render_text_solid(self):
self.btest('sdl_ttf_render_text_solid.c', reference='sdl_ttf_render_text_solid.png', args=['-O2', '-s', 'INITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'INITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', args=['-lSDL'], expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', args=['-lSDL', '-lGL'], reference='htmltest.png')
@requires_graphics_hardware
def test_glbegin_points(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_s3tc(self):
shutil.copyfile(test_file('screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_s3tc_ffp_only(self):
shutil.copyfile(test_file('screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION', '-s', 'GL_FFP_ONLY', '-lGL', '-lSDL'])
@no_chrome('see #7117')
@requires_graphics_hardware
def test_aniso(self):
shutil.copyfile(test_file('water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
@requires_graphics_hardware
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_glerror(self):
self.btest('gl_error.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lGL'])
def test_openal_error(self):
for args in [
[],
['-lopenal', '-s', 'STRICT'],
['--closure=1']
]:
print(args)
self.btest('openal_error.c', expected='1', args=args)
def test_openal_capture_sanity(self):
self.btest('openal_capture_sanity.c', expected='0')
def test_runtimelink(self):
create_file('header.h', r'''
struct point {
int x, y;
};
''')
create_file('supp.c', r'''
#include <stdio.h>
#include "header.h"
extern void mainFunc(int x);
extern int mainInt;
void suppFunc(struct point *p) {
printf("supp: %d,%d\n", p->x, p->y);
mainFunc(p->x + p->y);
printf("supp see: %d\n", mainInt);
}
int suppInt = 76;
''')
create_file('main.c', r'''
#include <stdio.h>
#include <assert.h>
#include "header.h"
extern void suppFunc(struct point *p);
extern int suppInt;
void mainFunc(int x) {
printf("main: %d\n", x);
assert(x == 56);
}
int mainInt = 543;
int main( int argc, const char *argv[] ) {
struct point p = { 54, 2 };
suppFunc(&p);
printf("main see: %d\nok.\n", suppInt);
assert(suppInt == 76);
return 0;
}
''')
self.run_process([EMCC, 'supp.c', '-o', 'supp.wasm', '-s', 'SIDE_MODULE', '-O2'])
self.btest_exit('main.c', args=['-s', 'MAIN_MODULE=2', '-O2', 'supp.wasm'])
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
create_file('pre.js', '''
Module.preRun = function() {
addRunDependency();
out('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
@no_wasm_backend('mem init file')
def test_mem_init(self):
create_file('pre.js', '''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
create_file('post.js', '''
var assert = function(check, text) {
if (!check) {
console.log('assert failed: ' + text);
maybeReportResultToServer(9);
}
}
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''')
# with assertions, we notice when memory was written to too early
self.btest('mem_init.cpp', expected='9', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1'])
# otherwise, we just overwrite
self.btest('mem_init.cpp', expected='3', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1', '-s', 'ASSERTIONS=0'])
@no_wasm_backend('mem init file')
def test_mem_init_request(self):
def test(what, status):
print(what, status)
create_file('pre.js', '''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
console.warn = function(x) {
if (x.indexOf('a problem seems to have happened with Module.memoryInitializerRequest') >= 0) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?0');
setTimeout(xhr.onload = function() {
console.log('close!');
window.close();
}, 1000);
xhr.send();
throw 'halt';
}
console.log('WARNING: ' + x);
};
''' % self.port)
self.btest('mem_init_request.cpp', expected=status, args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--memory-init-file', '1'])
test('test.html.mem', '1')
test('nothing.nowhere', '0')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
out('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
out('expected fail 1');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
out('expected fail 2');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
out('expected fail 3');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// Run on the next event loop, as code may run in a postRun right after main().
setTimeout(function() {
var xhr = new XMLHttpRequest();
assert(Module.noted);
xhr.open('GET', 'http://localhost:%s/report_result?' + HEAP32[Module.noted>>2]);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}, 0);
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
''' % self.port
create_file('pre_runtime.js', r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
for mode in [[], ['-s', 'WASM=0']]:
print('\n', filename, extra_args, mode)
print('mem init, so async, call too early')
create_file('post.js', post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1', '-s', 'EXIT_RUNTIME'] + extra_args + mode, reporting=Reporting.NONE)
print('sync startup, call too late')
create_file('post.js', post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '-s', 'EXIT_RUNTIME'] + extra_args + mode, reporting=Reporting.NONE)
print('sync, runtime still alive, so all good')
create_file('post.js', post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected='606', args=['--post-js', 'post.js'] + extra_args + mode, reporting=Reporting.NONE)
def test_cwrap_early(self):
self.btest(Path('browser/cwrap_early.cpp'), args=['-O2', '-s', 'ASSERTIONS', '--pre-js', test_file('browser/cwrap_early.js'), '-s', 'EXPORTED_RUNTIME_METHODS=[cwrap]'], expected='0')
def test_worker_api(self):
self.compile_btest([test_file('worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=_one'])
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
self.compile_btest([test_file('worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-O2', '--minify=0', '-s', 'EXPORTED_FUNCTIONS=_one,_two,_three,_four', '--closure=1'])
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify=0'], expected='11')
def test_worker_api_3(self):
self.compile_btest([test_file('worker_api_3_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=_one'])
self.btest('worker_api_3_main.cpp', expected='5')
def test_worker_api_sleep(self):
self.compile_btest([test_file('worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=_one', '-s', 'ASYNCIFY'])
self.btest('worker_api_main.cpp', expected='566')
def test_emscripten_async_wget2(self):
self.btest_exit('test_emscripten_async_wget2.cpp')
def test_emscripten_async_wget2_data(self):
create_file('hello.txt', 'Hello Emscripten!')
self.btest('test_emscripten_async_wget2_data.cpp', expected='0')
time.sleep(10)
def test_emscripten_async_wget_side_module(self):
self.run_process([EMCC, test_file('browser_module.c'), '-o', 'lib.wasm', '-O2', '-s', 'SIDE_MODULE'])
self.btest_exit('browser_main.c', args=['-O2', '-s', 'MAIN_MODULE=2'])
@parameterized({
'non-lz4': ([],),
'lz4': (['-s', 'LZ4'],)
})
def test_preload_module(self, args):
create_file('library.c', r'''
#include <stdio.h>
int library_func() {
return 42;
}
''')
self.run_process([EMCC, 'library.c', '-s', 'SIDE_MODULE', '-O2', '-o', 'library.so'])
create_file('main.c', r'''
#include <dlfcn.h>
#include <stdio.h>
#include <emscripten.h>
int main() {
int found = EM_ASM_INT(
return Module['preloadedWasm']['/library.so'] !== undefined;
);
if (!found) {
return 1;
}
void *lib_handle = dlopen("/library.so", RTLD_NOW);
if (!lib_handle) {
return 2;
}
typedef int (*voidfunc)();
voidfunc x = (voidfunc)dlsym(lib_handle, "library_func");
if (!x || x() != 42) {
return 3;
}
return 0;
}
''')
self.btest_exit(
'main.c',
args=['-s', 'MAIN_MODULE=2', '--preload-file', '.@/', '-O2', '--use-preload-plugins'] + args)
def test_mmap_file(self):
create_file('data.dat', 'data from the file ' + ('.' * 9000))
self.btest(test_file('mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'])
# This does not actually verify anything except that --cpuprofiler and --memoryprofiler compiles.
# Run interactive.test_cpuprofiler_memoryprofiler for interactive testing.
@requires_graphics_hardware
def test_cpuprofiler_memoryprofiler(self):
self.btest('hello_world_gles.c', expected='0', args=['-DLONGTEST=1', '-DTEST_MEMORYPROFILER_ALLOCATIONS_MAP=1', '-O2', '--cpuprofiler', '--memoryprofiler', '-lGL', '-lglut', '-DANIMATE'])
def test_uuid(self):
# Run with ./runner browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using self.run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
self.run_process([EMCC, '-O2', '--closure=1', test_file('uuid/test.c'), '-o', 'test.js', '-luuid'])
test_js_closure = read_file('test.js')
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = self.run_js('test.js')
print(out)
# Tidy up files that might have been created by this test.
try_delete(test_file('uuid/test.js'))
try_delete(test_file('uuid/test.js.map'))
# Now run test in browser
self.btest(test_file('uuid/test.c'), '1', args=['-luuid'])
@requires_graphics_hardware
def test_glew(self):
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW'], expected='1')
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION'], expected='1')
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-DGLEW_MX'], expected='1')
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
create_file('pre.js', r'''
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
addRunDependency('test_run_dependency');
removeRunDependency('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js'], expected='1')
@parameterized({
'': ([],),
'closure': (['-O2', '-g1', '--closure=1', '-s', 'HTML5_SUPPORT_DEFERRING_USER_SENSITIVE_REQUESTS=0'],),
'pthread': (['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],),
'legacy': (['-s', 'MIN_FIREFOX_VERSION=0', '-s', 'MIN_SAFARI_VERSION=0', '-s', 'MIN_IE_VERSION=0', '-s', 'MIN_EDGE_VERSION=0', '-s', 'MIN_CHROME_VERSION=0'],)
})
@requires_threads
def test_html5_core(self, opts):
self.btest(test_file('test_html5_core.c'), args=opts, expected='0')
@requires_threads
def test_html5_gamepad(self):
for opts in [[], ['-O2', '-g1', '--closure=1'], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
print(opts)
self.btest(test_file('test_gamepad.c'), args=[] + opts, expected='0')
@requires_graphics_hardware
def test_html5_webgl_create_context_no_antialias(self):
for opts in [[], ['-O2', '-g1', '--closure=1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest_exit(test_file('webgl_create_context.cpp'), args=opts + ['-DNO_ANTIALIAS', '-lGL'])
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_threads
@requires_graphics_hardware
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure=1'], ['-s', 'FULL_ES2=1'], ['-s', 'USE_PTHREADS']]:
print(opts)
self.btest_exit(test_file('webgl_create_context.cpp'), args=opts + ['-lGL'])
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
def test_html5_webgl_create_context2(self):
self.btest_exit(test_file('webgl_create_context2.cpp'))
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
# (this only makes sense in the old deprecated -s DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=0 mode)
def test_html5_special_event_targets(self):
self.btest(test_file('browser/html5_special_event_targets.cpp'), args=['-lGL'], expected='0')
@requires_graphics_hardware
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest_exit(test_file('webgl_destroy_context.cpp'), args=opts + ['--shell-file', test_file('webgl_destroy_context_shell.html'), '-lGL'])
@no_chrome('see #7373')
@requires_graphics_hardware
def test_webgl_context_params(self):
if WINDOWS:
self.skipTest('SKIPPED due to bug https://bugzilla.mozilla.org/show_bug.cgi?id=1310005 - WebGL implementation advertises implementation defined GL_IMPLEMENTATION_COLOR_READ_TYPE/FORMAT pair that it cannot read with')
self.btest_exit(test_file('webgl_color_buffer_readpixels.cpp'), args=['-lGL'])
# Test for PR#5373 (https://github.com/emscripten-core/emscripten/pull/5373)
@requires_graphics_hardware
def test_webgl_shader_source_length(self):
for opts in [[], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest_exit(test_file('webgl_shader_source_length.cpp'), args=opts + ['-lGL'])
# Tests calling glGetString(GL_UNMASKED_VENDOR_WEBGL).
@requires_graphics_hardware
def test_webgl_unmasked_vendor_webgl(self):
self.btest_exit(test_file('webgl_unmasked_vendor_webgl.c'), args=['-lGL'])
@requires_graphics_hardware
def test_webgl2(self):
for opts in [
['-s', 'MIN_CHROME_VERSION=0'],
['-O2', '-g1', '--closure=1', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG'],
['-s', 'FULL_ES2=1'],
]:
print(opts)
self.btest_exit(test_file('webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'] + opts)
# Tests the WebGL 2 glGetBufferSubData() functionality.
@requires_graphics_hardware
def test_webgl2_get_buffer_sub_data(self):
self.btest_exit(test_file('webgl2_get_buffer_sub_data.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'])
@requires_graphics_hardware
@requires_threads
def test_webgl2_pthreads(self):
# test that a program can be compiled with pthreads and render WebGL2 properly on the main thread
# (the testcase doesn't even use threads, but is compiled with thread support).
self.btest_exit(test_file('webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL', '-s', 'USE_PTHREADS'])
@requires_graphics_hardware
def test_webgl2_objects(self):
self.btest_exit(test_file('webgl2_objects.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'])
@requires_graphics_hardware
def test_html5_webgl_api(self):
for mode in [['-s', 'OFFSCREENCANVAS_SUPPORT', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],
['-s', 'OFFSCREEN_FRAMEBUFFER', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],
[]]:
if 'OFFSCREENCANVAS_SUPPORT' in mode and os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'):
continue
self.btest_exit(test_file('html5_webgl.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'] + mode)
@requires_graphics_hardware
def test_webgl2_ubos(self):
self.btest_exit(test_file('webgl2_ubos.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'])
@requires_graphics_hardware
def test_webgl2_garbage_free_entrypoints(self):
self.btest_exit(test_file('webgl2_garbage_free_entrypoints.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1'])
self.btest_exit(test_file('webgl2_garbage_free_entrypoints.cpp'))
@requires_graphics_hardware
def test_webgl2_backwards_compatibility_emulation(self):
self.btest_exit(test_file('webgl2_backwards_compatibility_emulation.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-s', 'WEBGL2_BACKWARDS_COMPATIBILITY_EMULATION=1'])
@requires_graphics_hardware
def test_webgl2_runtime_no_context(self):
# tests that if we support WebGL1 and 2, and WebGL2RenderingContext exists,
# but context creation fails, that we can then manually try to create a
# WebGL1 context and succeed.
self.btest_exit(test_file('test_webgl2_runtime_no_context.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2'])
@requires_graphics_hardware
def test_webgl2_invalid_teximage2d_type(self):
self.btest_exit(test_file('webgl2_invalid_teximage2d_type.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2'])
@requires_graphics_hardware
def test_webgl_with_closure(self):
self.btest_exit(test_file('webgl_with_closure.cpp'), args=['-O2', '-s', 'MAX_WEBGL_VERSION=2', '--closure=1', '-lGL'])
# Tests that -s GL_ASSERTIONS=1 and glVertexAttribPointer with packed types works
@requires_graphics_hardware
def test_webgl2_packed_types(self):
self.btest_exit(test_file('webgl2_draw_packed_triangle.c'), args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2', '-s', 'GL_ASSERTIONS'])
@requires_graphics_hardware
def test_webgl2_pbo(self):
self.btest_exit(test_file('webgl2_pbo.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'])
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mipmap(self):
self.btest(test_file('third_party/sokol/mipmap-emsc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL', '-O1'],
reference=Path('third_party/sokol', 'mipmap-emsc.png'), reference_slack=2)
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mrt(self):
self.btest(test_file('third_party/sokol/mrt-emcc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'],
reference=Path('third_party/sokol', 'mrt-emcc.png'))
@requires_graphics_hardware
def test_webgl2_sokol_arraytex(self):
self.btest(test_file('third_party/sokol/arraytex-emsc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'],
reference=Path('third_party/sokol', 'arraytex-emsc.png'))
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure=1']]:
print(opts)
self.btest(test_file('sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure=1']]:
print(opts)
self.btest(test_file('test_html5_mouse.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure=1']]:
print(opts)
self.btest(test_file('test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_wget(self):
create_file('test.txt', 'emscripten')
self.btest_exit(test_file('test_wget.c'), args=['-s', 'ASYNCIFY'])
def test_wget_data(self):
create_file('test.txt', 'emscripten')
self.btest_exit(test_file('test_wget_data.c'), args=['-O2', '-g2', '-s', 'ASYNCIFY'])
@parameterized({
'': ([],),
'es6': (['-s', 'EXPORT_ES6=1'],),
})
def test_locate_file(self, args):
self.set_setting('EXIT_RUNTIME')
for wasm in [0, 1]:
self.clear()
create_file('src.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
printf("|%s|\n", buf);
assert(strcmp("load me right before", buf) == 0);
return 0;
}
''')
create_file('data.txt', 'load me right before...')
create_file('pre.js', 'Module.locateFile = function(x) { return "sub/" + x };')
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w'))
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
self.compile_btest(['src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM', '-s', 'WASM=' + str(wasm)] + args, reporting=Reporting.JS_ONLY)
ensure_dir('sub')
if wasm:
shutil.move('page.wasm', Path('sub/page.wasm'))
else:
shutil.move('page.html.mem', Path('sub/page.html.mem'))
shutil.move('test.data', Path('sub/test.data'))
self.run_browser('page.html', None, '/report_result?exit:0')
# alternatively, put locateFile in the HTML
print('in html')
create_file('shell.html', '''
<body>
<script>
var Module = {
locateFile: function(x) { return "sub/" + x }
};
</script>
{{{ SCRIPT }}}
</body>
''')
def in_html(expected):
self.compile_btest(['src.cpp', '-O2', '-g', '--shell-file', 'shell.html', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'SAFE_HEAP', '-s', 'ASSERTIONS', '-s', 'FORCE_FILESYSTEM', '-s', 'WASM=' + str(wasm)] + args, reporting=Reporting.JS_ONLY)
if wasm:
shutil.move('page.wasm', Path('sub/page.wasm'))
else:
shutil.move('page.html.mem', Path('sub/page.html.mem'))
self.run_browser('page.html', None, '/report_result?exit:' + expected)
in_html('0')
# verify that the mem init request succeeded in the latter case
if not wasm:
create_file('src.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
int result = EM_ASM_INT({
return Module['memoryInitializerRequest'].status;
});
printf("memory init request: %d\n", result);
return result;
}
''')
in_html('200')
@requires_graphics_hardware
@parameterized({
'no_gl': (['-DCLIENT_API=GLFW_NO_API'],),
'gl_es': (['-DCLIENT_API=GLFW_OPENGL_ES_API'],)
})
def test_glfw3(self, args):
for opts in [[], ['-s', 'LEGACY_GL_EMULATION'], ['-Os', '--closure=1']]:
print(opts)
self.btest(test_file('glfw3.c'), args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'] + args + opts, expected='1')
@requires_graphics_hardware
def test_glfw_events(self):
self.btest(test_file('glfw_events.c'), args=['-s', 'USE_GLFW=2', "-DUSE_GLFW=2", '-lglfw', '-lGL'], expected='1')
self.btest(test_file('glfw_events.c'), args=['-s', 'USE_GLFW=3', "-DUSE_GLFW=3", '-lglfw', '-lGL'], expected='1')
@requires_graphics_hardware
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
test_file('sdl2_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_jpeg(self):
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpeg')
self.compile_btest([
test_file('sdl2_image.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_formats(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl2_image.c', expected='512', args=['--preload-file', 'screenshot.png', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.png"',
'-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["png"]'])
self.btest('sdl2_image.c', expected='600', args=['--preload-file', 'screenshot.jpg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpg"',
'-DBITSPERPIXEL=24', '-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["jpg"]'])
def test_sdl2_key(self):
create_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
var prevented = !document.dispatchEvent(event);
//send keypress if not prevented
if (!prevented) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.compile_btest([test_file('sdl2_key.c'), '-o', 'page.html', '-s', 'USE_SDL=2', '--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=_main,_one'])
self.run_browser('page.html', '', '/report_result?37182145')
def test_sdl2_text(self):
create_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([test_file('sdl2_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=_main,_one', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([test_file('sdl2_mouse.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse_offsets(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([test_file('sdl2_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS=1', '-O2', '--minify=0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_threads
def test_sdl2_threads(self):
self.btest('sdl2_threads.c', expected='4', args=['-s', 'USE_PTHREADS', '-s', 'USE_SDL=2', '-s', 'PROXY_TO_PTHREAD'])
@requires_graphics_hardware
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '--closure=1', '-g1', '-s', 'LEGACY_GL_EMULATION'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '-s', 'LEGACY_GL_EMULATION'], also_proxied=True) # XXX closure fails on proxy
@requires_graphics_hardware
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gfx(self):
self.btest('sdl2_gfx.cpp', args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_GFX=2'], reference='sdl2_gfx.png', reference_slack=2)
@requires_graphics_hardware
def test_sdl2_canvas_palette_2(self):
create_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-s', 'USE_SDL=2', '-s', 'INITIAL_MEMORY=64MB'])
@requires_graphics_hardware
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_canvas_proxy(self):
def post():
html = read_file('test.html')
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % read_file('reftest.js'))
create_file('test.html', html)
create_file('data.txt', 'datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-s', 'USE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-s', 'GL_TESTING'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
def test_sdl2_timer(self):
self.btest('sdl2_timer.c', expected='5', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([test_file('sdl2_gl_read.c'), '-o', 'something.html', '-s', 'USE_SDL=2'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_glmatrixmode_texture(self):
self.btest('sdl2_glmatrixmode_texture.c', reference='sdl2_glmatrixmode_texture.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='You should see a (top) red-white and (bottom) white-red image.')
@requires_graphics_hardware
def test_sdl2_gldrawelements(self):
self.btest('sdl2_gldrawelements.c', reference='sdl2_gldrawelements.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='GL drawing modes. Bottom: points, lines, line loop, line strip. Top: triangles, triangle strip, triangle fan, quad.')
@requires_graphics_hardware
def test_sdl2_glclipplane_gllighting(self):
self.btest('sdl2_glclipplane_gllighting.c', reference='sdl2_glclipplane_gllighting.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='glClipPlane and GL_LIGHTING emulation. You should see a torus cut open on one side with lighting from one lightsource applied.')
@requires_graphics_hardware
def test_sdl2_glalphatest(self):
self.btest('sdl2_glalphatest.c', reference='sdl2_glalphatest.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='GL_ALPHA_TEST emulation. You should see gradients with different alpha testing modes and reference values.')
@requires_graphics_hardware
def test_sdl2_fog_simple(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-O2', '--minify=0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_negative(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_density(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_exp2(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_linear(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-s', 'USE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_frames_swap(self):
def post_build(*args):
self.post_manual_reftest(*args)
html = read_file('test.html')
html2 = html.replace('''Module['postRun'] = doReftest;''', '') # we don't want the very first frame
assert html != html2
create_file('test.html', html2)
self.btest('sdl2_gl_frames_swap.c', reference='sdl2_gl_frames_swap.png', args=['--proxy-to-worker', '-s', 'GL_TESTING', '-s', 'USE_SDL=2'], manual_reference=True, post_build=post_build)
@requires_graphics_hardware
def test_sdl2_ttf(self):
shutil.copy2(test_file('freetype/LiberationSansBold.ttf'), self.get_dir())
self.btest('sdl2_ttf.c', reference='sdl2_ttf.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'LiberationSansBold.ttf'],
message='You should see colorful "hello" and "world" in the window')
@requires_graphics_hardware
def test_sdl2_ttf_rtl(self):
shutil.copy2(test_file('third_party/notofont/NotoNaskhArabic-Regular.ttf'), self.get_dir())
self.btest('sdl2_ttf_rtl.c', reference='sdl2_ttf_rtl.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'NotoNaskhArabic-Regular.ttf'],
message='You should see colorful "ุณูุงู
" and "ุฌูุงู" with shaped Arabic script in the window')
def test_sdl2_custom_cursor(self):
shutil.copyfile(test_file('cursor.bmp'), 'cursor.bmp')
self.btest('sdl2_custom_cursor.c', expected='1', args=['--preload-file', 'cursor.bmp', '-s', 'USE_SDL=2'])
def test_sdl2_misc(self):
self.btest_exit('sdl2_misc.c', args=['-s', 'USE_SDL=2'])
def test_sdl2_misc_main_module(self):
self.btest_exit('sdl2_misc.c', args=['-s', 'USE_SDL=2', '-s', 'MAIN_MODULE'])
def test_sdl2_misc_via_object(self):
self.run_process([EMCC, '-c', test_file('sdl2_misc.c'), '-s', 'USE_SDL=2', '-o', 'test.o'])
self.compile_btest(['test.o', '-s', 'EXIT_RUNTIME', '-s', 'USE_SDL=2', '-o', 'test.html'])
self.run_browser('test.html', '...', '/report_result?exit:0')
@parameterized({
'dash_s': (['-s', 'USE_SDL=2', '-s', 'USE_SDL_MIXER=2'],),
'dash_l': (['-lSDL2', '-lSDL2_mixer'],),
})
@requires_sound_hardware
def test_sdl2_mixer_wav(self, flags):
shutil.copyfile(test_file('sounds/the_entertainer.wav'), 'sound.wav')
self.btest('sdl2_mixer_wav.c', expected='1', args=['--preload-file', 'sound.wav', '-s', 'INITIAL_MEMORY=33554432'] + flags)
@parameterized({
'wav': ([], '0', 'the_entertainer.wav'),
'ogg': (['ogg'], 'MIX_INIT_OGG', 'alarmvictory_1.ogg'),
'mp3': (['mp3'], 'MIX_INIT_MP3', 'pudinha.mp3'),
'mod': (['mod'], 'MIX_INIT_MOD', 'bleep.xm'),
})
@requires_sound_hardware
def test_sdl2_mixer_music(self, formats, flags, music_name):
shutil.copyfile(test_file('sounds', music_name), music_name)
self.btest('sdl2_mixer_music.c', expected='1', args=[
'--preload-file', music_name,
'-DSOUND_PATH=' + json.dumps(music_name),
'-DFLAGS=' + flags,
'-s', 'USE_SDL=2',
'-s', 'USE_SDL_MIXER=2',
'-s', 'SDL2_MIXER_FORMATS=' + json.dumps(formats),
'-s', 'INITIAL_MEMORY=33554432'
])
@no_wasm_backend('cocos2d needs to be ported')
@requires_graphics_hardware
def test_cocos2d_hello(self):
cocos2d_root = os.path.join(system_libs.Ports.get_build_dir(), 'cocos2d')
preload_file = os.path.join(cocos2d_root, 'samples', 'HelloCpp', 'Resources') + '@'
self.btest('cocos2d_hello.cpp', reference='cocos2d_hello.png', reference_slack=1,
args=['-s', 'USE_COCOS2D=3', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=0',
'--preload-file', preload_file, '--use-preload-plugins',
'-Wno-inconsistent-missing-override'],
message='You should see Cocos2d logo')
def test_async(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('browser/async.cpp', '1', args=['-O' + str(opts), '-g2', '-s', 'ASYNCIFY'])
def test_asyncify_tricky_function_sig(self):
self.btest('browser/test_asyncify_tricky_function_sig.cpp', '85', args=['-s', 'ASYNCIFY_ONLY=[foo(char.const*?.int#),foo2(),main,__original_main]', '-s', 'ASYNCIFY=1'])
@requires_threads
def test_async_in_pthread(self):
self.btest('browser/async.cpp', '1', args=['-s', 'ASYNCIFY', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-g'])
def test_async_2(self):
# Error.stackTraceLimit default to 10 in chrome but this test relies on more
# than 40 stack frames being reported.
create_file('pre.js', 'Error.stackTraceLimit = 80;\n')
self.btest('browser/async_2.cpp', '40', args=['-O3', '--pre-js', 'pre.js', '-s', 'ASYNCIFY'])
def test_async_virtual(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual.cpp', '5', args=['-O' + str(opts), '-profiling', '-s', 'ASYNCIFY'])
def test_async_virtual_2(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual_2.cpp', '1', args=['-O' + str(opts), '-s', 'ASSERTIONS', '-s', 'SAFE_HEAP', '-profiling', '-s', 'ASYNCIFY'])
# Test async sleeps in the presence of invoke_* calls, which can happen with
# longjmp or exceptions.
@parameterized({
'O0': ([],), # noqa
'O3': (['-O3'],), # noqa
})
def test_async_longjmp(self, args):
self.btest('browser/async_longjmp.cpp', '2', args=args + ['-s', 'ASYNCIFY'])
def test_async_mainloop(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_mainloop.cpp', '121', args=['-O' + str(opts), '-s', 'ASYNCIFY'])
@requires_sound_hardware
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-Os', '-s', 'ASSERTIONS', '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-profiling', '-s', 'SAFE_HEAP', '-lSDL', '-s', 'ASYNCIFY'], timeout=90)
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-Os', '-s', 'ASYNCIFY'])
def test_mainloop_infloop(self):
self.btest('mainloop_infloop.cpp', '1', args=['-s', 'ASYNCIFY'])
def test_async_iostream(self):
self.btest('browser/async_iostream.cpp', '1', args=['-s', 'ASYNCIFY'])
# Test an async return value. The value goes through a custom JS library
# method that uses asyncify, and therefore it needs to be declared in
# ASYNCIFY_IMPORTS.
# To make the test more precise we also use ASYNCIFY_IGNORE_INDIRECT here.
@parameterized({
'normal': (['-s', 'ASYNCIFY_IMPORTS=[sync_tunnel, sync_tunnel_bool]'],), # noqa
'response': (['-s', 'ASYNCIFY_IMPORTS=@filey.txt'],), # noqa
'nothing': (['-DBAD'],), # noqa
'empty_list': (['-DBAD', '-s', 'ASYNCIFY_IMPORTS=[]'],), # noqa
'em_js_bad': (['-DBAD', '-DUSE_EM_JS'],), # noqa
})
def test_async_returnvalue(self, args):
if '@' in str(args):
create_file('filey.txt', 'sync_tunnel\nsync_tunnel_bool\n')
self.btest('browser/async_returnvalue.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_IGNORE_INDIRECT', '--js-library', test_file('browser/async_returnvalue.js')] + args + ['-s', 'ASSERTIONS'])
def test_async_stack_overflow(self):
self.btest('browser/async_stack_overflow.cpp', 'abort:RuntimeError: unreachable', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_STACK_SIZE=4'])
def test_async_bad_list(self):
self.btest('browser/async_bad_list.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_ONLY=[waka]', '--profiling'])
# Tests that when building with -s MINIMAL_RUNTIME=1, the build can use -s MODULARIZE=1 as well.
def test_minimal_runtime_modularize(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.html', '-s', 'MODULARIZE', '-s', 'MINIMAL_RUNTIME'])
self.run_browser('test.html', None, '/report_result?0')
@requires_sync_compilation
def test_modularize(self):
for opts in [
[],
['-O1'],
['-O2', '-profiling'],
['-O2'],
['-O2', '--closure=1']
]:
for args, code in [
# defaults
([], '''
let promise = Module();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# use EXPORT_NAME
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld.noInitialRun = true; // errorneous module capture will load this and cause timeout
let promise = HelloWorld();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
# Even without a mem init file, everything is async
(['-s', 'EXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
]:
print('test on', opts, args, code)
# this test is synchronous, so avoid async startup due to wasm features
self.compile_btest([test_file('browser_test_hello_world.c'), '-s', 'MODULARIZE', '-s', 'SINGLE_FILE'] + args + opts)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
def test_modularize_network_error(self):
test_c_path = test_file('browser_test_hello_world.c')
browser_reporting_js_path = test_file('browser_reporting.js')
self.compile_btest([test_c_path, '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path], reporting=Reporting.NONE)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err.message.slice(0, 54));
});
</script>
''')
print('Deleting a.out.wasm to cause a download error')
os.remove('a.out.wasm')
self.run_browser('a.html', '...', '/report_result?abort(both async and sync fetching of the wasm failed)')
def test_modularize_init_error(self):
test_cpp_path = test_file('browser/test_modularize_init_error.cpp')
browser_reporting_js_path = test_file('browser_reporting.js')
self.compile_btest([test_cpp_path, '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path], reporting=Reporting.NONE)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
if (typeof window === 'object') {
window.addEventListener('unhandledrejection', function(event) {
reportResultToServer("Unhandled promise rejection: " + event.reason.message);
});
}
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err);
});
</script>
''')
self.run_browser('a.html', '...', '/report_result?intentional error to test rejection')
# test illustrating the regression on the modularize feature since commit c5af8f6
# when compiling with the --preload-file option
def test_modularize_and_preload_files(self):
self.set_setting('EXIT_RUNTIME')
# amount of memory different from the default one that will be allocated for the emscripten heap
totalMemory = 33554432
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure=1']]:
# the main function simply checks that the amount of allocated heap memory is correct
create_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({
// use eval here in order for the test with closure compiler enabled to succeed
var totalMemory = Module['INITIAL_MEMORY'];
assert(totalMemory === %d, 'bad memory size');
});
return 0;
}
''' % totalMemory)
# generate a dummy file
create_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
# no wasm, since this tests customizing total memory at runtime
self.compile_btest(['test.c', '-s', 'WASM=0', '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts, reporting=Reporting.JS_ONLY)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
// instantiate the Foo module with custom INITIAL_MEMORY value
var foo = Foo({ INITIAL_MEMORY: %d });
</script>
''' % totalMemory)
self.run_browser('a.html', '...', '/report_result?exit:0')
def test_webidl(self):
# see original in test_core.py
self.run_process([WEBIDL_BINDER, test_file('webidl/test.idl'), 'glue'])
self.assertExists('glue.cpp')
self.assertExists('glue.js')
for opts in [[], ['-O1'], ['-O2']]:
print(opts)
self.btest(Path('webidl/test.cpp'), '1', args=['--post-js', 'glue.js', '-I.', '-DBROWSER'] + opts)
@requires_sync_compilation
def test_dynamic_link(self):
create_file('main.c', r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
char *side(const char *data);
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
EM_ASM({
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = x;
Module.realPrint(x);
};
});
puts(ret);
EM_ASM({ assert(Module.printed === 'hello through side', ['expected', Module.printed]); });
return 0;
}
''')
create_file('side.c', r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''')
self.run_process([EMCC, 'side.c', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm'])
self.btest_exit(self.in_dir('main.c'), args=['-s', 'MAIN_MODULE=2', '-O2', 'side.wasm'])
print('wasm in worker (we can read binary data synchronously there)')
self.run_process([EMCC, 'side.c', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm'])
self.btest_exit(self.in_dir('main.c'), args=['-s', 'MAIN_MODULE=2', '-O2', '--proxy-to-worker', 'side.wasm'])
print('wasm (will auto-preload since no sync binary reading)')
# same wasm side module works
self.btest_exit(self.in_dir('main.c'), args=['-s', 'MAIN_MODULE=2', '-O2', '-s', 'EXPORT_ALL', 'side.wasm'])
def test_dlopen_async(self):
create_file('side.c', 'int foo = 42;\n')
self.run_process([EMCC, 'side.c', '-o', 'libside.so', '-s', 'SIDE_MODULE'])
self.btest_exit(test_file('other/test_dlopen_async.c'), args=['-s', 'MAIN_MODULE=2'])
def test_dlopen_blocking(self):
create_file('side.c', 'int foo = 42;\n')
self.run_process([EMCC, 'side.c', '-o', 'libside.so', '-s', 'SIDE_MODULE', '-s', 'USE_PTHREADS', '-Wno-experimental'])
# Attempt to use dlopen the side module (without preloading) should fail on the main thread
# since the syncronous `readBinary` function does not exist.
self.btest_exit(test_file('other/test_dlopen_blocking.c'), assert_returncode=1, args=['-s', 'MAIN_MODULE=2'])
# But with PROXY_TO_PTHEAD it does work, since we can do blocking and sync XHR in a worker.
self.btest_exit(test_file('other/test_dlopen_blocking.c'), args=['-s', 'MAIN_MODULE=2', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-Wno-experimental'])
# verify that dynamic linking works in all kinds of in-browser environments.
# don't mix different kinds in a single test.
@parameterized({
'': ([0],),
'inworker': ([1],),
})
def test_dylink_dso_needed(self, inworker):
self.emcc_args += ['-O2']
# --proxy-to-worker only on main
if inworker:
self.emcc_args += ['--proxy-to-worker']
def do_run(src, expected_output, emcc_args=[]):
# XXX there is no infrastructure (yet ?) to retrieve stdout from browser in tests.
# -> do the assert about expected output inside browser.
#
# we have to put the hook into post.js because in main it is too late
# (in main we won't be able to catch what static constructors inside
# linked dynlibs printed), and in pre.js it is too early (out is not yet
# setup by the shell).
create_file('post.js', r'''
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = "";
Module.printed += x + '\n'; // out is passed str without last \n
Module.realPrint(x);
};
''')
create_file('test_dylink_dso_needed.c', src + r'''
#include <emscripten/em_asm.h>
int main() {
int rtn = test_main();
EM_ASM({
var expected = %r;
assert(Module.printed === expected, ['stdout expected:', expected]);
});
return rtn;
}
''' % expected_output)
self.btest_exit(self.in_dir('test_dylink_dso_needed.c'), args=self.get_emcc_args() + ['--post-js', 'post.js'] + emcc_args)
self._test_dylink_dso_needed(do_run)
@requires_graphics_hardware
@requires_sync_compilation
def test_dynamic_link_glemu(self):
create_file('main.c', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
const char *side();
int main() {
const char *exts = side();
puts(side());
assert(strstr(exts, "GL_EXT_texture_env_combine"));
return 0;
}
''')
create_file('side.c', r'''
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
const char *side() {
SDL_Init(SDL_INIT_VIDEO);
SDL_SetVideoMode(600, 600, 16, SDL_OPENGL);
return (const char *)glGetString(GL_EXTENSIONS);
}
''')
self.run_process([EMCC, 'side.c', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm', '-lSDL'])
self.btest_exit(self.in_dir('main.c'), args=['-s', 'MAIN_MODULE=2', '-O2', '-s', 'LEGACY_GL_EMULATION', '-lSDL', '-lGL', 'side.wasm'])
def test_dynamic_link_many(self):
# test asynchronously loading two side modules during startup
create_file('main.c', r'''
#include <assert.h>
int side1();
int side2();
int main() {
assert(side1() == 1);
assert(side2() == 2);
return 0;
}
''')
create_file('side1.c', r'''
int side1() { return 1; }
''')
create_file('side2.c', r'''
int side2() { return 2; }
''')
self.run_process([EMCC, 'side1.c', '-s', 'SIDE_MODULE', '-o', 'side1.wasm'])
self.run_process([EMCC, 'side2.c', '-s', 'SIDE_MODULE', '-o', 'side2.wasm'])
self.btest_exit(self.in_dir('main.c'), args=['-s', 'MAIN_MODULE=2', 'side1.wasm', 'side2.wasm'])
def test_dynamic_link_pthread_many(self):
# Test asynchronously loading two side modules during startup
# They should always load in the same order
# Verify that function pointers in the browser's main thread
# reffer to the same function as in a pthread worker.
# The main thread function table is populated asynchronously
# in the browser's main thread. However, it should still be
# populated in the same order as in a pthread worker to
# guarantee function pointer interop.
create_file('main.cpp', r'''
#include <cassert>
#include <thread>
#include <emscripten/emscripten.h>
int side1();
int side2();
int main() {
auto side1_ptr = &side1;
auto side2_ptr = &side2;
// Don't join the thread since this is running in the
// browser's main thread.
std::thread([=]{
assert(side1_ptr == &side1);
assert(side2_ptr == &side2);
emscripten_force_exit(0);
}).detach();
emscripten_exit_with_live_runtime();
}
''')
# The browser will try to load side1 first.
# Use a big payload in side1 so that it takes longer to load than side2
create_file('side1.cpp', r'''
char const * payload1 = "''' + str(list(range(1, int(1e5)))) + r'''";
int side1() { return 1; }
''')
create_file('side2.cpp', r'''
char const * payload2 = "0";
int side2() { return 2; }
''')
self.run_process([EMCC, 'side1.cpp', '-Wno-experimental', '-pthread', '-s', 'SIDE_MODULE', '-o', 'side1.wasm'])
self.run_process([EMCC, 'side2.cpp', '-Wno-experimental', '-pthread', '-s', 'SIDE_MODULE', '-o', 'side2.wasm'])
self.btest_exit(self.in_dir('main.cpp'),
args=['-Wno-experimental', '-pthread', '-s', 'MAIN_MODULE=2', 'side1.wasm', 'side2.wasm'])
def test_memory_growth_during_startup(self):
create_file('data.dat', 'X' * (30 * 1024 * 1024))
self.btest('browser_test_hello_world.c', '0', args=['-s', 'ASSERTIONS', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=16MB', '-s', 'TOTAL_STACK=16384', '--preload-file', 'data.dat'])
# pthreads tests
def prep_no_SAB(self):
create_file('html.html', read_file(path_from_root('src', 'shell_minimal.html')).replace('''<body>''', '''<body>
<script>
SharedArrayBuffer = undefined;
Atomics = undefined;
</script>
'''))
@requires_threads
def test_pthread_c11_threads(self):
self.btest_exit(test_file('pthread/test_pthread_c11_threads.c'),
args=['-gsource-map', '-std=gnu11', '-xc', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'TOTAL_MEMORY=64mb'])
@requires_threads
def test_pthread_pool_size_strict(self):
# Check that it doesn't fail with sufficient number of threads in the pool.
self.btest_exit(test_file('pthread/test_pthread_c11_threads.c'),
args=['-g2', '-xc', '-std=gnu11', '-pthread', '-s', 'PTHREAD_POOL_SIZE=4', '-s', 'PTHREAD_POOL_SIZE_STRICT=2', '-s', 'TOTAL_MEMORY=64mb'])
# Check that it fails instead of deadlocking on insufficient number of threads in the pool.
self.btest(test_file('pthread/test_pthread_c11_threads.c'),
expected='abort:Assertion failed: thrd_create(&t4, thread_main, NULL) == thrd_success',
args=['-g2', '-xc', '-std=gnu11', '-pthread', '-s', 'PTHREAD_POOL_SIZE=3', '-s', 'PTHREAD_POOL_SIZE_STRICT=2', '-s', 'TOTAL_MEMORY=64mb'])
@requires_threads
def test_pthread_in_pthread_pool_size_strict(self):
# Check that it fails when there's a pthread creating another pthread.
self.btest_exit(test_file('pthread/test_pthread_create_pthread.cpp'), args=['-g2', '-pthread', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'PTHREAD_POOL_SIZE_STRICT=2'])
# Check that it fails when there's a pthread creating another pthread.
self.btest_exit(test_file('pthread/test_pthread_create_pthread.cpp'), args=['-g2', '-pthread', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'PTHREAD_POOL_SIZE_STRICT=2', '-DSMALL_POOL'])
# Test that the emscripten_ atomics api functions work.
@parameterized({
'normal': ([],),
'closure': (['--closure=1'],),
})
@requires_threads
def test_pthread_atomics(self, args=[]):
self.btest_exit(test_file('pthread/test_pthread_atomics.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-g1'] + args)
# Test 64-bit atomics.
@requires_threads
def test_pthread_64bit_atomics(self):
self.btest_exit(test_file('pthread/test_pthread_64bit_atomics.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test 64-bit C++11 atomics.
@parameterized({
'': ([],),
'O3': (['-O3'],)
})
@requires_threads
def test_pthread_64bit_cxx11_atomics(self, opt):
for pthreads in [[], ['-s', 'USE_PTHREADS']]:
self.btest_exit(test_file('pthread/test_pthread_64bit_cxx11_atomics.cpp'), args=opt + pthreads)
# Test c++ std::thread::hardware_concurrency()
@requires_threads
def test_pthread_hardware_concurrency(self):
self.btest_exit(test_file('pthread/test_pthread_hardware_concurrency.cpp'), args=['-O2', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE="navigator.hardwareConcurrency"'])
@parameterized({
'join': ('join',),
'wait': ('wait',),
})
@requires_threads
def test_pthread_main_thread_blocking(self, name):
print('Test that we error if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(test_file('pthread/main_thread_%s.cpp' % name), expected='abort:Blocking on the main thread is not allowed by default.', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
if name == 'join':
print('Test that by default we just warn about blocking on the main thread.')
self.btest_exit(test_file('pthread/main_thread_%s.cpp' % name), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest_exit(test_file('pthread/main_thread_join.cpp'), assert_returncode=2, args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD, and even without a pool')
self.btest_exit(test_file('pthread/main_thread_join.cpp'), assert_returncode=2, args=['-O3', '-s', 'USE_PTHREADS', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that everything works ok when we are on a pthread.')
self.btest_exit(test_file('pthread/main_thread_%s.cpp' % name), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'PROXY_TO_PTHREAD', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
# Test the old GCC atomic __sync_fetch_and_op builtin operations.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomic_fetch_and_op(self):
for opt in [[], ['-O1'], ['-O2'], ['-O3'], ['-Os']]:
for debug in [[], ['-g']]:
args = opt + debug
print(args)
self.btest_exit(test_file('pthread/test_pthread_gcc_atomic_fetch_and_op.cpp'), args=args + ['-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# 64 bit version of the above test.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_64bit_atomic_fetch_and_op(self):
self.btest_exit(test_file('pthread/test_pthread_gcc_64bit_atomic_fetch_and_op.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Test the old GCC atomic __sync_op_and_fetch builtin operations.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomic_op_and_fetch(self):
self.btest_exit(test_file('pthread/test_pthread_gcc_atomic_op_and_fetch.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# 64 bit version of the above test.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_64bit_atomic_op_and_fetch(self):
self.btest_exit(test_file('pthread/test_pthread_gcc_64bit_atomic_op_and_fetch.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Tests the rest of the remaining GCC atomics after the two above tests.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomics(self):
self.btest_exit(test_file('pthread/test_pthread_gcc_atomics.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the __sync_lock_test_and_set and __sync_lock_release primitives.
@requires_threads
def test_pthread_gcc_spinlock(self):
for arg in [[], ['-DUSE_EMSCRIPTEN_INTRINSICS']]:
self.btest_exit(test_file('pthread/test_pthread_gcc_spinlock.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, also_asmjs=True)
# Test that basic thread creation works.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_create(self):
def test(args):
print(args)
self.btest_exit(test_file('pthread/test_pthread_create.cpp'),
args=['-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + args,
extra_tries=0) # this should be 100% deterministic
print() # new line
test([])
test(['-O3'])
# TODO: re-enable minimal runtime once the flakiness is figure out,
# https://github.com/emscripten-core/emscripten/issues/12368
# test(['-s', 'MINIMAL_RUNTIME'])
# Test that preallocating worker threads work.
@requires_threads
def test_pthread_preallocates_workers(self):
self.btest_exit(test_file('pthread/test_pthread_preallocates_workers.cpp'), args=['-O3', '-s', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=4', '-s', 'PTHREAD_POOL_DELAY_LOAD'])
# Test that allocating a lot of threads doesn't regress. This needs to be checked manually!
@requires_threads
def test_pthread_large_pthread_allocation(self):
self.btest_exit(test_file('pthread/test_large_pthread_allocation.cpp'), args=['-s', 'INITIAL_MEMORY=128MB', '-O3', '-s', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=50'], message='Check output from test to ensure that a regression in time it takes to allocate the threads has not occurred.')
# Tests the -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_pthread_proxy_to_pthread(self):
self.btest_exit(test_file('pthread/test_pthread_proxy_to_pthread.c'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Test that a pthread can spawn another pthread of its own.
@requires_threads
def test_pthread_create_pthread(self):
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')]]:
self.btest_exit(test_file('pthread/test_pthread_create_pthread.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'] + modularize)
# Test another case of pthreads spawning pthreads, but this time the callers immediately join on the threads they created.
@requires_threads
def test_pthread_nested_spawns(self):
self.btest_exit(test_file('pthread/test_pthread_nested_spawns.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that main thread can wait for a pthread to finish via pthread_join().
@requires_threads
def test_pthread_join(self):
self.btest_exit(test_file('pthread/test_pthread_join.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that threads can rejoin the pool once detached and finished
@requires_threads
def test_std_thread_detach(self):
self.btest_exit(test_file('pthread/test_std_thread_detach.cpp'), args=['-s', 'USE_PTHREADS'])
# Test pthread_cancel() operation
@requires_threads
def test_pthread_cancel(self):
self.btest_exit(test_file('pthread/test_pthread_cancel.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthread_cancel() cancels pthread_cond_wait() operation
@requires_threads
def test_pthread_cancel_cond_wait(self):
self.btest_exit(test_file('pthread/test_pthread_cancel_cond_wait.cpp'), assert_returncode=1, args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test pthread_kill() operation
@no_chrome('pthread_kill hangs chrome renderer, and keep subsequent tests from passing')
@requires_threads
def test_pthread_kill(self):
self.btest_exit(test_file('pthread/test_pthread_kill.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthread cleanup stack (pthread_cleanup_push/_pop) works.
@requires_threads
def test_pthread_cleanup(self):
self.btest_exit(test_file('pthread/test_pthread_cleanup.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Tests the pthread mutex api.
@requires_threads
def test_pthread_mutex(self):
for arg in [[], ['-DSPINLOCK_TEST']]:
self.btest_exit(test_file('pthread/test_pthread_mutex.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
@requires_threads
def test_pthread_attr_getstack(self):
self.btest_exit(test_file('pthread/test_pthread_attr_getstack.cpp'), args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that memory allocation is thread-safe.
@requires_threads
def test_pthread_malloc(self):
self.btest_exit(test_file('pthread/test_pthread_malloc.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Stress test pthreads allocating memory that will call to sbrk(), and main thread has to free up the data.
@requires_threads
def test_pthread_malloc_free(self):
self.btest_exit(test_file('pthread/test_pthread_malloc_free.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'INITIAL_MEMORY=256MB'])
# Test that the pthread_barrier API works ok.
@requires_threads
def test_pthread_barrier(self):
self.btest_exit(test_file('pthread/test_pthread_barrier.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the pthread_once() function.
@requires_threads
def test_pthread_once(self):
self.btest_exit(test_file('pthread/test_pthread_once.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test against a certain thread exit time handling bug by spawning tons of threads.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_spawns(self):
self.btest_exit(test_file('pthread/test_pthread_spawns.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '--closure=1', '-s', 'ENVIRONMENT=web,worker'])
# It is common for code to flip volatile global vars for thread control. This is a bit lax, but nevertheless, test whether that
# kind of scheme will work with Emscripten as well.
@requires_threads
def test_pthread_volatile(self):
for arg in [[], ['-DUSE_C_VOLATILE']]:
self.btest_exit(test_file('pthread/test_pthread_volatile.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
# Test thread-specific data (TLS).
@requires_threads
def test_pthread_thread_local_storage(self):
self.btest_exit(test_file('pthread/test_pthread_thread_local_storage.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ASSERTIONS'])
# Test the pthread condition variable creation and waiting.
@requires_threads
def test_pthread_condition_variable(self):
self.btest_exit(test_file('pthread/test_pthread_condition_variable.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthreads are able to do printf.
@requires_threads
def test_pthread_printf(self):
def run(debug):
self.btest_exit(test_file('pthread/test_pthread_printf.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'LIBRARY_DEBUG=%d' % debug])
run(debug=True)
run(debug=False)
# Test that pthreads are able to do cout. Failed due to https://bugzilla.mozilla.org/show_bug.cgi?id=1154858.
@requires_threads
def test_pthread_iostream(self):
self.btest_exit(test_file('pthread/test_pthread_iostream.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
@requires_threads
def test_pthread_unistd_io_bigint(self):
self.btest_exit(test_file('unistd/io.c'), args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'WASM_BIGINT'])
# Test that the main thread is able to use pthread_set/getspecific.
@requires_threads
def test_pthread_setspecific_mainthread(self):
self.btest_exit(test_file('pthread/test_pthread_setspecific_mainthread.c'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS'], also_asmjs=True)
# Test that pthreads have access to filesystem.
@requires_threads
def test_pthread_file_io(self):
self.btest_exit(test_file('pthread/test_pthread_file_io.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that the pthread_create() function operates benignly in the case that threading is not supported.
@requires_threads
def test_pthread_supported(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8']]:
self.btest_exit(test_file('pthread/test_pthread_supported.cpp'), args=['-O3'] + args)
@requires_threads
def test_pthread_dispatch_after_exit(self):
self.btest_exit(test_file('pthread/test_pthread_dispatch_after_exit.c'), args=['-s', 'USE_PTHREADS'])
# Test the operation of Module.pthreadMainPrefixURL variable
@requires_threads
def test_pthread_custom_pthread_main_url(self):
self.set_setting('EXIT_RUNTIME')
ensure_dir('cdn')
create_file('main.cpp', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten/emscripten.h>
#include <emscripten/threading.h>
#include <pthread.h>
_Atomic int result = 0;
void *thread_main(void *arg) {
result = 1;
pthread_exit(0);
}
int main() {
pthread_t t;
pthread_create(&t, 0, thread_main, 0);
pthread_join(t, 0);
assert(result == 1);
return 0;
}
''')
# Test that it is possible to define "Module.locateFile" string to locate where worker.js will be loaded from.
create_file('shell.html', read_file(path_from_root('src', 'shell.html')).replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-o', 'test.html'], reporting=Reporting.JS_ONLY)
shutil.move('test.worker.js', Path('cdn/test.worker.js'))
if os.path.exists('test.html.mem'):
shutil.copyfile('test.html.mem', Path('cdn/test.html.mem'))
self.run_browser('test.html', '', '/report_result?exit:0')
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
create_file('shell2.html', read_file(path_from_root('src', 'shell.html')).replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.worker.js") return "cdn/test.worker.js"; else return filename; }, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-o', 'test2.html'], reporting=Reporting.JS_ONLY)
try_delete('test.worker.js')
self.run_browser('test2.html', '', '/report_result?exit:0')
# Test that if the main thread is performing a futex wait while a pthread needs it to do a proxied operation (before that pthread would wake up the main thread), that it's not a deadlock.
@requires_threads
def test_pthread_proxying_in_futex_wait(self):
self.btest_exit(test_file('pthread/test_pthread_proxying_in_futex_wait.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that sbrk() operates properly in multithreaded conditions
@requires_threads
def test_pthread_sbrk(self):
for aborting_malloc in [0, 1]:
print('aborting malloc=' + str(aborting_malloc))
# With aborting malloc = 1, test allocating memory in threads
# With aborting malloc = 0, allocate so much memory in threads that some of the allocations fail.
self.btest_exit(test_file('pthread/test_pthread_sbrk.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ABORTING_MALLOC=' + str(aborting_malloc), '-DABORTING_MALLOC=' + str(aborting_malloc), '-s', 'INITIAL_MEMORY=128MB'])
# Test that -s ABORTING_MALLOC=0 works in both pthreads and non-pthreads builds. (sbrk fails gracefully)
@requires_threads
def test_pthread_gauge_available_memory(self):
for opts in [[], ['-O2']]:
for args in [[], ['-s', 'USE_PTHREADS']]:
self.btest(test_file('gauge_available_memory.cpp'), expected='1', args=['-s', 'ABORTING_MALLOC=0'] + args + opts)
# Test that the proxying operations of user code from pthreads to main thread work
@requires_threads
def test_pthread_run_on_main_thread(self):
self.btest_exit(test_file('pthread/test_pthread_run_on_main_thread.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test how a lot of back-to-back called proxying operations behave.
@requires_threads
def test_pthread_run_on_main_thread_flood(self):
self.btest_exit(test_file('pthread/test_pthread_run_on_main_thread_flood.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async(self):
self.btest_exit(test_file('pthread/call_async.c'), args=['-s', 'USE_PTHREADS'])
# Test that it is possible to synchronously call a JavaScript function on the main thread and get a return value back.
@requires_threads
def test_pthread_call_sync_on_main_thread(self):
self.btest_exit(test_file('pthread/call_sync_on_main_thread.c'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-DPROXY_TO_PTHREAD=1', '--js-library', test_file('pthread/call_sync_on_main_thread.js')])
self.btest_exit(test_file('pthread/call_sync_on_main_thread.c'), args=['-O3', '-s', 'USE_PTHREADS', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread/call_sync_on_main_thread.js')])
self.btest_exit(test_file('pthread/call_sync_on_main_thread.c'), args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread/call_sync_on_main_thread.js'), '-s', 'EXPORTED_FUNCTIONS=_main,_malloc'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async_on_main_thread(self):
self.btest(test_file('pthread/call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-DPROXY_TO_PTHREAD=1', '--js-library', test_file('pthread/call_async_on_main_thread.js')])
self.btest(test_file('pthread/call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread/call_async_on_main_thread.js')])
self.btest(test_file('pthread/call_async_on_main_thread.c'), expected='7', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread/call_async_on_main_thread.js')])
# Tests that spawning a new thread does not cause a reinitialization of the global data section of the application memory area.
@requires_threads
def test_pthread_global_data_initialization(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
for args in [['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')], ['-O3']]:
self.btest_exit(test_file('pthread/test_pthread_global_data_initialization.c'), args=args + mem_init_mode + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'PTHREAD_POOL_SIZE'])
@requires_threads
@requires_sync_compilation
def test_pthread_global_data_initialization_in_sync_compilation_mode(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
args = ['-s', 'WASM_ASYNC_COMPILATION=0']
self.btest_exit(test_file('pthread/test_pthread_global_data_initialization.c'), args=args + mem_init_mode + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'PTHREAD_POOL_SIZE'])
# Test that emscripten_get_now() reports coherent wallclock times across all pthreads, instead of each pthread independently reporting wallclock times since the launch of that pthread.
@requires_threads
def test_pthread_clock_drift(self):
self.btest_exit(test_file('pthread/test_pthread_clock_drift.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_pthread_utf8_funcs(self):
self.btest_exit(test_file('pthread/test_pthread_utf8_funcs.cpp'), args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test the emscripten_futex_wake(addr, INT_MAX); functionality to wake all waiters
@requires_threads
def test_pthread_wake_all(self):
self.btest_exit(test_file('pthread/test_futex_wake_all.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'INITIAL_MEMORY=64MB'], also_asmjs=True)
# Test that stack base and max correctly bound the stack on pthreads.
@requires_threads
def test_pthread_stack_bounds(self):
self.btest_exit(test_file('pthread/test_pthread_stack_bounds.cpp'), args=['-s', 'USE_PTHREADS'])
# Test that real `thread_local` works.
@requires_threads
def test_pthread_tls(self):
self.btest_exit(test_file('pthread/test_pthread_tls.cpp'), args=['-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
# Test that real `thread_local` works in main thread without PROXY_TO_PTHREAD.
@requires_threads
def test_pthread_tls_main(self):
self.btest_exit(test_file('pthread/test_pthread_tls_main.cpp'), args=['-s', 'USE_PTHREADS'])
@requires_threads
def test_pthread_safe_stack(self):
# Note that as the test runs with PROXY_TO_PTHREAD, we set TOTAL_STACK,
# and not DEFAULT_PTHREAD_STACK_SIZE, as the pthread for main() gets the
# same stack size as the main thread normally would.
self.btest(test_file('core/test_safe_stack.c'), expected='abort:stack overflow', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'STACK_OVERFLOW_CHECK=2', '-s', 'TOTAL_STACK=64KB'])
@parameterized({
'leak': ['test_pthread_lsan_leak', ['-gsource-map']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_lsan(self, name, args=[]):
self.btest(test_file('pthread', name + '.cpp'), expected='1', args=['-fsanitize=leak', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', test_file('pthread', name + '.js')] + args)
@parameterized({
# Reusing the LSan test files for ASan.
'leak': ['test_pthread_lsan_leak', ['-gsource-map']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_asan(self, name, args=[]):
self.btest(test_file('pthread', name + '.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', test_file('pthread', name + '.js')] + args)
@requires_threads
def test_pthread_asan_use_after_free(self):
self.btest(test_file('pthread/test_pthread_asan_use_after_free.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', test_file('pthread/test_pthread_asan_use_after_free.js')])
@requires_threads
def test_pthread_asan_use_after_free_2(self):
# similiar to test_pthread_asan_use_after_free, but using a pool instead
# of proxy-to-pthread, and also the allocation happens on the pthread
# (which tests that it can use the offset converter to get the stack
# trace there)
self.btest(test_file('pthread/test_pthread_asan_use_after_free_2.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=1', '--pre-js', test_file('pthread/test_pthread_asan_use_after_free_2.js')])
@requires_threads
def test_pthread_exit_process(self):
args = ['-s', 'USE_PTHREADS',
'-s', 'PROXY_TO_PTHREAD',
'-s', 'PTHREAD_POOL_SIZE=2',
'-s', 'EXIT_RUNTIME',
'-DEXIT_RUNTIME',
'-O0']
args += ['--pre-js', test_file('core/pthread/test_pthread_exit_runtime.pre.js')]
self.btest(test_file('core/pthread/test_pthread_exit_runtime.c'), expected='onExit status: 42', args=args)
@requires_threads
def test_pthread_no_exit_process(self):
# Same as above but without EXIT_RUNTIME. In this case we don't expect onExit to
# ever be called.
args = ['-s', 'USE_PTHREADS',
'-s', 'PROXY_TO_PTHREAD',
'-s', 'PTHREAD_POOL_SIZE=2',
'-O0']
args += ['--pre-js', test_file('core/pthread/test_pthread_exit_runtime.pre.js')]
self.btest(test_file('core/pthread/test_pthread_exit_runtime.c'), expected='43', args=args)
# Tests MAIN_THREAD_EM_ASM_INT() function call signatures.
def test_main_thread_em_asm_signatures(self):
self.btest_exit(test_file('core/test_em_asm_signatures.cpp'), assert_returncode=121, args=[])
@requires_threads
def test_main_thread_em_asm_signatures_pthreads(self):
self.btest_exit(test_file('core/test_em_asm_signatures.cpp'), assert_returncode=121, args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'ASSERTIONS'])
@requires_threads
def test_main_thread_async_em_asm(self):
self.btest_exit(test_file('core/test_main_thread_async_em_asm.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'ASSERTIONS'])
@requires_threads
def test_main_thread_em_asm_blocking(self):
create_file('page.html', read_file(test_file('browser/test_em_asm_blocking.html')))
self.compile_btest([test_file('browser/test_em_asm_blocking.cpp'), '-O2', '-o', 'wasm.js', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
self.run_browser('page.html', '', '/report_result?8')
# Test that it is possible to send a signal via calling alarm(timeout), which in turn calls to the signal handler set by signal(SIGALRM, func);
def test_sigalrm(self):
self.btest_exit(test_file('test_sigalrm.c'), args=['-O3'])
def test_canvas_style_proxy(self):
self.btest('canvas_style_proxy.c', expected='1', args=['--proxy-to-worker', '--shell-file', test_file('canvas_style_proxy_shell.html'), '--pre-js', test_file('canvas_style_proxy_pre.js')])
def test_canvas_size_proxy(self):
self.btest(test_file('canvas_size_proxy.c'), expected='0', args=['--proxy-to-worker'])
def test_custom_messages_proxy(self):
self.btest(test_file('custom_messages_proxy.c'), expected='1', args=['--proxy-to-worker', '--shell-file', test_file('custom_messages_proxy_shell.html'), '--post-js', test_file('custom_messages_proxy_postjs.js')])
def test_vanilla_html_when_proxying(self):
for opts in [0, 1, 2]:
print(opts)
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js', '-O' + str(opts), '--proxy-to-worker'])
create_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
def test_in_flight_memfile_request(self):
# test the XHR for an asm.js mem init file being in flight already
for o in [0, 1, 2]:
print(o)
opts = ['-O' + str(o), '-s', 'WASM=0']
print('plain html')
self.compile_btest([test_file('in_flight_memfile_request.c'), '-o', 'test.js'] + opts)
create_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0') # never when we provide our own HTML like this.
print('default html')
self.btest('in_flight_memfile_request.c', expected='0' if o < 2 else '1', args=opts) # should happen when there is a mem init file (-O2+)
@requires_sync_compilation
def test_binaryen_async(self):
# notice when we use async compilation
script = '''
<script>
// note if we do async compilation
var real_wasm_instantiate = WebAssembly.instantiate;
var real_wasm_instantiateStreaming = WebAssembly.instantiateStreaming;
if (typeof real_wasm_instantiateStreaming === 'function') {
WebAssembly.instantiateStreaming = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiateStreaming(a, b);
};
} else {
WebAssembly.instantiate = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiate(a, b);
};
}
// show stderr for the viewer's fun
err = function(x) {
out('<<< ' + x + ' >>>');
console.log(x);
};
</script>
{{{ SCRIPT }}}
'''
shell_with_script('shell.html', 'shell.html', script)
common_args = ['--shell-file', 'shell.html']
for opts, returncode in [
([], 1),
(['-O1'], 1),
(['-O2'], 1),
(['-O3'], 1),
(['-s', 'WASM_ASYNC_COMPILATION'], 1), # force it on
(['-O1', '-s', 'WASM_ASYNC_COMPILATION=0'], 0), # force it off
]:
print(opts, returncode)
self.btest_exit('binaryen_async.c', assert_returncode=returncode, args=common_args + opts)
# Ensure that compilation still works and is async without instantiateStreaming available
no_streaming = ' <script> WebAssembly.instantiateStreaming = undefined;</script>'
shell_with_script('shell.html', 'shell.html', no_streaming + script)
self.btest_exit('binaryen_async.c', assert_returncode=1, args=common_args)
# Test that implementing Module.instantiateWasm() callback works.
@parameterized({
'': ([],),
'asan': (['-fsanitize=address', '-s', 'INITIAL_MEMORY=128MB'],)
})
def test_manual_wasm_instantiate(self, args=[]):
self.compile_btest([test_file('manual_wasm_instantiate.cpp'), '-o', 'manual_wasm_instantiate.js'] + args)
shutil.copyfile(test_file('manual_wasm_instantiate.html'), 'manual_wasm_instantiate.html')
self.run_browser('manual_wasm_instantiate.html', 'wasm instantiation succeeded', '/report_result?1')
def test_wasm_locate_file(self):
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
ensure_dir('cdn')
create_file('shell2.html', read_file(path_from_root('src', 'shell.html')).replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.wasm") return "cdn/test.wasm"; else return filename; }, '))
self.compile_btest([test_file('browser_test_hello_world.c'), '--shell-file', 'shell2.html', '-o', 'test.html'])
shutil.move('test.wasm', Path('cdn/test.wasm'))
self.run_browser('test.html', '', '/report_result?0')
@also_with_threads
def test_utf8_textdecoder(self):
self.btest_exit('benchmark_utf8.cpp', 0, args=['--embed-file', test_file('utf8_corpus.txt') + '@/utf8_corpus.txt', '-s', 'EXPORTED_RUNTIME_METHODS=[UTF8ToString]'])
@also_with_threads
def test_utf16_textdecoder(self):
self.btest_exit('benchmark_utf16.cpp', 0, args=['--embed-file', test_file('utf16_corpus.txt') + '@/utf16_corpus.txt', '-s', 'EXPORTED_RUNTIME_METHODS=[UTF16ToString,stringToUTF16,lengthBytesUTF16]'])
@also_with_threads
def test_TextDecoder(self):
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=0'])
just_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0')
td_with_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=2'])
td_without_fallback = os.path.getsize('test.js')
# pthread TextDecoder support is more complex due to
# https://github.com/whatwg/encoding/issues/172
# and therefore the expected code size win there is actually a loss
if '-pthread' not in self.emcc_args:
self.assertLess(td_without_fallback, just_fallback)
else:
self.assertGreater(td_without_fallback, just_fallback)
self.assertLess(just_fallback, td_with_fallback)
def test_small_js_flags(self):
self.btest('browser_test_hello_world.c', '0', args=['-O3', '--closure=1', '-s', 'INCOMING_MODULE_JS_API=[]', '-s', 'ENVIRONMENT=web'])
# Check an absolute js code size, with some slack.
size = os.path.getsize('test.js')
print('size:', size)
# Note that this size includes test harness additions (for reporting the result, etc.).
self.assertLess(abs(size - 5477), 100)
# Tests that it is possible to initialize and render WebGL content in a pthread by using OffscreenCanvas.
# -DTEST_CHAINED_WEBGL_CONTEXT_PASSING: Tests that it is possible to transfer WebGL canvas in a chain from main thread -> thread 1 -> thread 2 and then init and render WebGL content there.
@no_chrome('see https://crbug.com/961765')
@requires_threads
@requires_offscreen_canvas
@requires_graphics_hardware
def test_webgl_offscreen_canvas_in_pthread(self):
for args in [[], ['-DTEST_CHAINED_WEBGL_CONTEXT_PASSING']]:
self.btest('gl_in_pthread.cpp', expected='1', args=args + ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL'])
# Tests that it is possible to render WebGL content on a <canvas> on the main thread, after it has once been used to render WebGL content in a pthread first
# -DTEST_MAIN_THREAD_EXPLICIT_COMMIT: Test the same (WebGL on main thread after pthread), but by using explicit .commit() to swap on the main thread instead of implicit "swap when rAF ends" logic
@requires_threads
@requires_offscreen_canvas
@requires_graphics_hardware
@disabled('This test is disabled because current OffscreenCanvas does not allow transfering it after a rendering context has been created for it.')
def test_webgl_offscreen_canvas_in_mainthread_after_pthread(self):
for args in [[], ['-DTEST_MAIN_THREAD_EXPLICIT_COMMIT']]:
self.btest('gl_in_mainthread_after_pthread.cpp', expected='0', args=args + ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL'])
@requires_threads
@requires_offscreen_canvas
@requires_graphics_hardware
def test_webgl_offscreen_canvas_only_in_pthread(self):
self.btest_exit('gl_only_in_pthread.cpp', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER'])
# Tests that rendering from client side memory without default-enabling extensions works.
@requires_graphics_hardware
def test_webgl_from_client_side_memory_without_default_enabled_extensions(self):
self.btest_exit('webgl_draw_triangle.c', args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1', '-DDRAW_FROM_CLIENT_MEMORY=1', '-s', 'FULL_ES2=1'])
# Tests for WEBGL_multi_draw extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
@requires_graphics_hardware
def test_webgl_multi_draw(self):
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ARRAYS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ARRAYS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ELEMENTS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ELEMENTS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
# Tests for base_vertex/base_instance extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
# If testing on Mac, you also need --use-cmd-decoder=passthrough to get this extension.
# Also there is a known bug with Mac Intel baseInstance which can fail producing the expected image result.
@requires_graphics_hardware
def test_webgl_draw_base_vertex_base_instance(self):
for multiDraw in [0, 1]:
for drawElements in [0, 1]:
self.btest('webgl_draw_base_vertex_base_instance_test.c', reference='webgl_draw_instanced_base_vertex_base_instance.png',
args=['-lGL',
'-s', 'MAX_WEBGL_VERSION=2',
'-s', 'OFFSCREEN_FRAMEBUFFER',
'-DMULTI_DRAW=' + str(multiDraw),
'-DDRAW_ELEMENTS=' + str(drawElements),
'-DEXPLICIT_SWAP=1',
'-DWEBGL_CONTEXT_VERSION=2'])
@requires_graphics_hardware
def test_webgl_sample_query(self):
cmd = ['-s', 'MAX_WEBGL_VERSION=2', '-lGL']
self.btest_exit('webgl_sample_query.cpp', args=cmd)
@requires_graphics_hardware
def test_webgl_timer_query(self):
for args in [
# EXT query entrypoints on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION'],
# builtin query entrypoints on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2'],
# EXT query entrypoints on a WebGL 1.0 context while built for WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2'],
]:
cmd = args + ['-lGL']
self.btest_exit('webgl_timer_query.cpp', args=cmd)
# Tests that -s OFFSCREEN_FRAMEBUFFER=1 rendering works.
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer(self):
# Tests all the different possible versions of libgl
for threads in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
for version in [[], ['-s', 'FULL_ES3'], ['-s', 'FULL_ES3']]:
args = ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1'] + threads + version
print('with args: %s' % str(args))
self.btest_exit('webgl_draw_triangle.c', args=args)
# Tests that VAOs can be used even if WebGL enableExtensionsByDefault is set to 0.
@requires_graphics_hardware
def test_webgl_vao_without_automatic_extensions(self):
self.btest_exit('test_webgl_no_auto_init_extensions.c', args=['-lGL', '-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=0'])
# Tests that offscreen framebuffer state restoration works
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer_state_restoration(self):
for args in [
# full state restoration path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH'],
# VAO path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION'],
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=0'],
# VAO path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-DTEST_REQUIRE_VAO=1'],
# full state restoration path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH'],
# blitFramebuffer path on WebGL 2.0 (falls back to VAO on Firefox < 67)
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=0'],
]:
cmd = args + ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1']
self.btest_exit('webgl_offscreen_framebuffer_swap_with_bad_state.c', args=cmd)
# Tests that -s WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1 rendering works.
@requires_graphics_hardware
def test_webgl_workaround_webgl_uniform_upload_bug(self):
self.btest_exit('webgl_draw_triangle_with_uniform_color.c', args=['-lGL', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG'])
# Tests that using an array of structs in GL uniforms works.
@requires_graphics_hardware
def test_webgl_array_of_structs_uniform(self):
self.btest('webgl_array_of_structs_uniform.c', args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2'], reference='webgl_array_of_structs_uniform.png')
# Tests that if a WebGL context is created in a pthread on a canvas that has not been transferred to that pthread, WebGL calls are then proxied to the main thread
# -DTEST_OFFSCREEN_CANVAS=1: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via using Emscripten's EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES="#canvas", then OffscreenCanvas is used
# -DTEST_OFFSCREEN_CANVAS=2: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via automatic transferring of Module.canvas when EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES is not defined, then OffscreenCanvas is also used
@requires_threads
@requires_offscreen_canvas
@requires_graphics_hardware
def test_webgl_offscreen_canvas_in_proxied_pthread(self):
for asyncify in [0, 1]:
cmd = ['-s', 'USE_PTHREADS', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL', '-s', 'GL_DEBUG', '-s', 'PROXY_TO_PTHREAD']
if asyncify:
# given the synchronous render loop here, asyncify is needed to see intermediate frames and
# the gradual color change
cmd += ['-s', 'ASYNCIFY', '-DASYNCIFY']
print(str(cmd))
self.btest('gl_in_proxy_pthread.cpp', expected='1', args=cmd)
@parameterized({
'proxy': (['-sPROXY_TO_PTHREAD'],),
'': ([],),
})
@requires_threads
@requires_graphics_hardware
@requires_offscreen_canvas
def test_webgl_resize_offscreencanvas_from_main_thread(self, args):
for args2 in [[], ['-DTEST_SYNC_BLOCKING_LOOP=1']]:
for args3 in [[], ['-s', 'OFFSCREENCANVAS_SUPPORT', '-s', 'OFFSCREEN_FRAMEBUFFER']]:
cmd = args + args2 + args3 + ['-s', 'USE_PTHREADS', '-lGL', '-s', 'GL_DEBUG']
print(str(cmd))
self.btest('resize_offscreencanvas_from_main_thread.cpp', expected='1', args=cmd)
@requires_graphics_hardware
def test_webgl_simple_enable_extensions(self):
for webgl_version in [1, 2]:
for simple_enable_extensions in [0, 1]:
cmd = ['-DWEBGL_CONTEXT_VERSION=' + str(webgl_version),
'-DWEBGL_SIMPLE_ENABLE_EXTENSION=' + str(simple_enable_extensions),
'-s', 'MAX_WEBGL_VERSION=2',
'-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=' + str(simple_enable_extensions),
'-s', 'GL_SUPPORT_SIMPLE_ENABLE_EXTENSIONS=' + str(simple_enable_extensions)]
self.btest_exit('webgl2_simple_enable_extensions.c', args=cmd)
# Tests the feature that shell html page can preallocate the typed array and place it
# to Module.buffer before loading the script page.
# In this build mode, the -s INITIAL_MEMORY=xxx option will be ignored.
# Preallocating the buffer in this was is asm.js only (wasm needs a Memory).
def test_preallocated_heap(self):
self.btest_exit('test_preallocated_heap.cpp', args=['-s', 'WASM=0', '-s', 'INITIAL_MEMORY=16MB', '-s', 'ABORTING_MALLOC=0', '--shell-file', test_file('test_preallocated_heap_shell.html')])
# Tests emscripten_fetch() usage to XHR data directly to memory without persisting results to IndexedDB.
def test_fetch_to_memory(self):
# Test error reporting in the negative case when the file URL doesn't exist. (http 404)
self.btest_exit('fetch/to_memory.cpp',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-DFILE_DOES_NOT_EXIST'],
also_asmjs=True)
# Test the positive case when the file URL exists. (http 200)
shutil.copyfile(test_file('gears.png'), 'gears.png')
for arg in [[], ['-s', 'FETCH_SUPPORT_INDEXEDDB=0']]:
self.btest_exit('fetch/to_memory.cpp',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'] + arg,
also_asmjs=True)
@parameterized({
'': ([],),
'pthread_exit': (['-DDO_PTHREAD_EXIT'],),
})
@requires_threads
def test_fetch_from_thread(self, args):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/from_thread.cpp',
expected='42',
args=args + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'FETCH_DEBUG', '-s', 'FETCH', '-DFILE_DOES_NOT_EXIST'],
also_asmjs=True)
def test_fetch_to_indexdb(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/to_indexeddb.cpp',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'],
also_asmjs=True)
# Tests emscripten_fetch() usage to persist an XHR into IndexedDB and subsequently load up from there.
def test_fetch_cached_xhr(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/cached_xhr.cpp',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'],
also_asmjs=True)
# Tests that response headers get set on emscripten_fetch_t values.
@requires_threads
def test_fetch_response_headers(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/response_headers.cpp', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'], also_asmjs=True)
# Test emscripten_fetch() usage to stream a XHR in to memory without storing the full file in memory
def test_fetch_stream_file(self):
self.skipTest('moz-chunked-arraybuffer was firefox-only and has been removed')
# Strategy: create a large 128MB file, and compile with a small 16MB Emscripten heap, so that the tested file
# won't fully fit in the heap. This verifies that streaming works properly.
s = '12345678'
for i in range(14):
s = s[::-1] + s # length of str will be 2^17=128KB
with open('largefile.txt', 'w') as f:
for i in range(1024):
f.write(s)
self.btest_exit('fetch/stream_file.cpp',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'INITIAL_MEMORY=536870912'],
also_asmjs=True)
# Tests emscripten_fetch() usage in synchronous mode when used from the main
# thread proxied to a Worker with -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_fetch_sync_xhr(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/sync_xhr.cpp', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests emscripten_fetch() usage when user passes none of the main 3 flags (append/replace/no_download).
# In that case, in append is implicitly understood.
@requires_threads
def test_fetch_implicit_append(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/example_synchronous_fetch.cpp', args=['-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests synchronous emscripten_fetch() usage from wasm pthread in fastcomp.
@requires_threads
def test_fetch_sync_xhr_in_wasm(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/example_synchronous_fetch.cpp', args=['-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests that the Fetch API works for synchronous XHRs when used with --proxy-to-worker.
@requires_threads
def test_fetch_sync_xhr_in_proxy_to_worker(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/sync_xhr.cpp',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '--proxy-to-worker'],
also_asmjs=True)
# Tests waiting on EMSCRIPTEN_FETCH_WAITABLE request from a worker thread
@no_wasm_backend("emscripten_fetch_wait uses an asm.js based web worker")
@requires_threads
def test_fetch_sync_fetch_in_main_thread(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/sync_fetch_in_main_thread.cpp', expected='0', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_idb_store(self):
self.btest('fetch/idb_store.cpp', expected='0', args=['-s', 'USE_PTHREADS', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_idb_delete(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/idb_delete.cpp', expected='0', args=['-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_hello_file(self):
# Test basic file loading and the valid character set for files.
ensure_dir('dirrey')
shutil.copyfile(test_file('asmfs/hello_file.txt'), Path('dirrey', 'hello file !#$%&\'()+,-.;=@[]^_`{}~ %%.txt'))
self.btest_exit('asmfs/hello_file.cpp', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_read_file_twice(self):
shutil.copyfile(test_file('asmfs/hello_file.txt'), 'hello_file.txt')
self.btest_exit('asmfs/read_file_twice.cpp', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_fopen_write(self):
self.btest_exit('asmfs/fopen_write.cpp', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_mkdir_create_unlink_rmdir(self):
self.btest_exit('cstdio/test_remove.cpp', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir(self):
self.btest('dirent/test_readdir.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir_empty(self):
self.btest('dirent/test_readdir_empty.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_close(self):
self.btest_exit(test_file('unistd/close.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_access(self):
self.btest_exit(test_file('unistd/access.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_unlink(self):
# TODO: Once symlinks are supported, remove -DNO_SYMLINK=1
self.btest_exit(test_file('unistd/unlink.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-DNO_SYMLINK=1'])
@requires_asmfs
@requires_threads
def test_asmfs_test_fcntl_open(self):
self.btest('fcntl/test_fcntl_open.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_relative_paths(self):
self.btest_exit('asmfs/relative_paths.cpp', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_threads
def test_pthread_locale(self):
for args in [
[],
['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'],
]:
print("Testing with: ", args)
self.btest_exit('pthread/test_pthread_locale.c', args=args)
# Tests the Emscripten HTML5 API emscripten_set_canvas_element_size() and
# emscripten_get_canvas_element_size() functionality in singlethreaded programs.
def test_emscripten_set_canvas_element_size(self):
self.btest_exit('emscripten_set_canvas_element_size.c')
# Test that emscripten_get_device_pixel_ratio() is callable from pthreads (and proxies to main
# thread to obtain the proper window.devicePixelRatio value).
@requires_threads
def test_emscripten_get_device_pixel_ratio(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest_exit('emscripten_get_device_pixel_ratio.c', args=args)
# Tests that emscripten_run_script() variants of functions work in pthreads.
@requires_threads
def test_pthread_run_script(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest_exit(test_file('pthread/test_pthread_run_script.cpp'), args=['-O3'] + args)
# Tests emscripten_set_canvas_element_size() and OffscreenCanvas functionality in different build configurations.
@requires_threads
@requires_graphics_hardware
def test_emscripten_animate_canvas_element_size(self):
for args in [
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_EXPLICIT_CONTEXT_SWAP=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_MANUALLY_SET_ELEMENT_CSS_SIZE=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'OFFSCREENCANVAS_SUPPORT'],
]:
cmd = ['-lGL', '-O3', '-g2', '--shell-file', test_file('canvas_animate_resize_shell.html'), '-s', 'GL_DEBUG', '--threadprofiler'] + args
print(' '.join(cmd))
self.btest_exit('canvas_animate_resize.cpp', args=cmd)
# Tests the absolute minimum pthread-enabled application.
@parameterized({
'': ([],),
'O3': (['-O3'],)
})
@requires_threads
def test_pthread_hello_thread(self, opts):
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')]]:
self.btest_exit(test_file('pthread/hello_thread.c'), expected='1', args=['-s', 'USE_PTHREADS'] + modularize + opts)
# Tests that a pthreads build of -s MINIMAL_RUNTIME=1 works well in different build modes
@parameterized({
'': ([],),
'O3': (['-O3'],)
})
def test_minimal_runtime_hello_thread(self, opts):
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule']]:
self.btest_exit(test_file('pthread/hello_thread.c'), expected='1', args=['-s', 'MINIMAL_RUNTIME', '-s', 'USE_PTHREADS'] + modularize + opts)
# Tests memory growth in pthreads mode, but still on the main thread.
@requires_threads
def test_pthread_growth_mainthread(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest_exit(test_file('pthread/test_pthread_memory_growth_mainthread.c'), args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=32MB', '-s', 'MAXIMUM_MEMORY=256MB'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'PROXY_TO_PTHREAD'])
# Tests memory growth in a pthread.
@requires_threads
def test_pthread_growth(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest_exit(test_file('pthread/test_pthread_memory_growth.c'), args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=32MB', '-s', 'MAXIMUM_MEMORY=256MB', '-g'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'ASSERTIONS'])
run(['-s', 'PROXY_TO_PTHREAD'])
# Tests that time in a pthread is relative to the main thread, so measurements
# on different threads are still monotonic, as if checking a single central
# clock.
@requires_threads
def test_pthread_reltime(self):
self.btest_exit(test_file('pthread/test_pthread_reltime.cpp'), args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Tests that it is possible to load the main .js file of the application manually via a Blob URL, and still use pthreads.
@requires_threads
def test_load_js_from_blob_with_pthreads(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
self.set_setting('EXIT_RUNTIME')
self.compile_btest([test_file('pthread/hello_thread.c'), '-s', 'USE_PTHREADS', '-o', 'hello_thread_with_blob_url.js'], reporting=Reporting.JS_ONLY)
shutil.copyfile(test_file('pthread/main_js_as_blob_loader.html'), 'hello_thread_with_blob_url.html')
self.run_browser('hello_thread_with_blob_url.html', 'hello from thread!', '/report_result?exit:0')
# Tests that base64 utils work in browser with no native atob function
def test_base64_atob_fallback(self):
create_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
return 0;
}
''')
# generate a dummy file
create_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-s', 'EXIT_RUNTIME', '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file', '-s', 'SINGLE_FILE'])
create_file('a.html', '''
<script>
atob = undefined;
fetch = undefined;
</script>
<script src="a.out.js"></script>
<script>
var foo = Foo();
</script>
''')
self.run_browser('a.html', '...', '/report_result?exit:0')
# Tests that SINGLE_FILE works as intended in generated HTML (with and without Worker)
def test_single_file_html(self):
self.btest('single_file_static_initializer.cpp', '19', args=['-s', 'SINGLE_FILE'], also_proxied=True)
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.mem')
# Tests that SINGLE_FILE works as intended in generated HTML with MINIMAL_RUNTIME
def test_minimal_runtime_single_file_html(self):
for wasm in [0, 1]:
for opts in [[], ['-O3']]:
self.btest('single_file_static_initializer.cpp', '19', args=opts + ['-s', 'MINIMAL_RUNTIME', '-s', 'SINGLE_FILE', '-s', 'WASM=' + str(wasm)])
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.asm.js')
self.assertNotExists('test.mem')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that SINGLE_FILE works when built with ENVIRONMENT=web and Closure enabled (#7933)
def test_single_file_in_web_environment_with_closure(self):
self.btest('minimal_hello.c', '0', args=['-s', 'SINGLE_FILE', '-s', 'ENVIRONMENT=web', '-O2', '--closure=1'])
# Tests that SINGLE_FILE works as intended with locateFile
def test_single_file_locate_file(self):
for wasm_enabled in [True, False]:
args = [test_file('browser_test_hello_world.c'), '-o', 'test.js', '-s', 'SINGLE_FILE']
if not wasm_enabled:
args += ['-s', 'WASM=0']
self.compile_btest(args)
create_file('test.html', '''
<script>
var Module = {
locateFile: function (path) {
if (path.indexOf('data:') === 0) {
throw new Error('Unexpected data URI.');
}
return path;
}
};
</script>
<script src="test.js"></script>
''')
self.run_browser('test.html', None, '/report_result?0')
# Tests that SINGLE_FILE works as intended in a Worker in JS output
def test_single_file_worker_js(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'SINGLE_FILE'])
create_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
self.assertExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that pthreads code works as intended in a Worker. That is, a pthreads-using
# program can run either on the main thread (normal tests) or when we start it in
# a Worker in this test (in that case, both the main application thread and the worker threads
# are all inside Web Workers).
@requires_threads
def test_pthreads_started_in_worker(self):
self.set_setting('EXIT_RUNTIME')
self.compile_btest([test_file('pthread/test_pthread_atomics.cpp'), '-o', 'test.js', '-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], reporting=Reporting.JS_ONLY)
create_file('test.html', '''
<script>
new Worker('test.js');
</script>
''')
self.run_browser('test.html', None, '/report_result?exit:0')
def test_access_file_after_heap_resize(self):
create_file('test.txt', 'hello from file')
self.compile_btest([test_file('access_file_after_heap_resize.c'), '-s', 'ALLOW_MEMORY_GROWTH', '--preload-file', 'test.txt', '-o', 'page.html'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
# with separate file packager invocation
self.run_process([FILE_PACKAGER, 'data.data', '--preload', 'test.txt', '--js-output=' + 'data.js'])
self.compile_btest([test_file('access_file_after_heap_resize.c'), '-s', 'ALLOW_MEMORY_GROWTH', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
def test_unicode_html_shell(self):
create_file('main.cpp', r'''
int main() {
return 0;
}
''')
create_file('shell.html', read_file(path_from_root('src', 'shell.html')).replace('Emscripten-Generated Code', 'Emscripten-Generated Emoji ๐
'))
self.btest_exit('main.cpp', args=['--shell-file', 'shell.html'])
# Tests the functionality of the emscripten_thread_sleep() function.
@requires_threads
def test_emscripten_thread_sleep(self):
self.btest_exit(test_file('pthread/emscripten_thread_sleep.c'), args=['-s', 'USE_PTHREADS', '-s', 'EXPORTED_RUNTIME_METHODS=[print]'])
# Tests that Emscripten-compiled applications can be run from a relative path in browser that is different than the address of the current page
def test_browser_run_from_different_directory(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.html', '-O3'])
ensure_dir('subdir')
shutil.move('test.js', Path('subdir/test.js'))
shutil.move('test.wasm', Path('subdir/test.wasm'))
src = read_file('test.html')
# Make sure JS is loaded from subdirectory
create_file('test-subdir.html', src.replace('test.js', 'subdir/test.js'))
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but asynchronous because of `-s MODULARIZE=1`
def test_browser_run_from_different_directory_async(self):
for args, creations in [
(['-s', 'MODULARIZE'], [
'Module();', # documented way for using modularize
'new Module();' # not documented as working, but we support it
]),
]:
print(args)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js', '-O3'] + args)
ensure_dir('subdir')
shutil.move('test.js', Path('subdir/test.js'))
shutil.move('test.wasm', Path('subdir/test.wasm'))
for creation in creations:
print(creation)
# Make sure JS is loaded from subdirectory
create_file('test-subdir.html', '''
<script src="subdir/test.js"></script>
<script>
%s
</script>
''' % creation)
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but
# also also we eval the initial code, so currentScript is not present. That prevents us
# from finding the file in a subdir, but here we at least check we do not regress compared to the
# normal case of finding in the current dir.
def test_browser_modularize_no_current_script(self):
# test both modularize (and creating an instance) and modularize-instance
# (which creates by itself)
for path, args, creation in [
([], ['-s', 'MODULARIZE'], 'Module();'),
(['subdir'], ['-s', 'MODULARIZE'], 'Module();'),
]:
print(path, args, creation)
filesystem_path = os.path.join('.', *path)
ensure_dir(filesystem_path)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js'] + args)
shutil.move('test.js', Path(filesystem_path, 'test.js'))
shutil.move('test.wasm', Path(filesystem_path, 'test.wasm'))
create_file(Path(filesystem_path, 'test.html'), '''
<script>
setTimeout(function() {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'test.js', false);
xhr.send(null);
eval(xhr.responseText);
%s
}, 1);
</script>
''' % creation)
self.run_browser('/'.join(path + ['test.html']), None, '/report_result?0')
def test_emscripten_request_animation_frame(self):
self.btest(test_file('emscripten_request_animation_frame.c'), '0')
def test_emscripten_request_animation_frame_loop(self):
self.btest(test_file('emscripten_request_animation_frame_loop.c'), '0')
def test_request_animation_frame(self):
self.btest('request_animation_frame.cpp', '0', also_proxied=True)
@requires_threads
def test_emscripten_set_timeout(self):
self.btest_exit(test_file('emscripten_set_timeout.c'), args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_emscripten_set_timeout_loop(self):
self.btest_exit(test_file('emscripten_set_timeout_loop.c'), args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
def test_emscripten_set_immediate(self):
self.btest_exit(test_file('emscripten_set_immediate.c'))
def test_emscripten_set_immediate_loop(self):
self.btest_exit(test_file('emscripten_set_immediate_loop.c'))
@requires_threads
def test_emscripten_set_interval(self):
self.btest_exit(test_file('emscripten_set_interval.c'), args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Test emscripten_performance_now() and emscripten_date_now()
@requires_threads
def test_emscripten_performance_now(self):
self.btest(test_file('emscripten_performance_now.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_embind_with_pthreads(self):
self.btest('embind_with_pthreads.cpp', '1', args=['--bind', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
def test_embind_with_asyncify(self):
self.btest('embind_with_asyncify.cpp', '1', args=['--bind', '-s', 'ASYNCIFY'])
# Test emscripten_console_log(), emscripten_console_warn() and emscripten_console_error()
def test_emscripten_console_log(self):
self.btest(test_file('emscripten_console_log.c'), '0', args=['--pre-js', test_file('emscripten_console_log_pre.js')])
def test_emscripten_throw_number(self):
self.btest(test_file('emscripten_throw_number.c'), '0', args=['--pre-js', test_file('emscripten_throw_number_pre.js')])
def test_emscripten_throw_string(self):
self.btest(test_file('emscripten_throw_string.c'), '0', args=['--pre-js', test_file('emscripten_throw_string_pre.js')])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a minimal console.log() application
def test_closure_in_web_only_target_environment_console_log(self):
self.btest('minimal_hello.c', '0', args=['-s', 'ENVIRONMENT=web', '-O3', '--closure=1'])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a small WebGL application
@requires_graphics_hardware
def test_closure_in_web_only_target_environment_webgl(self):
self.btest_exit('webgl_draw_triangle.c', args=['-lGL', '-s', 'ENVIRONMENT=web', '-O3', '--closure=1'])
def test_no_declare_asm_module_exports_asmjs(self):
for minimal_runtime in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.btest(test_file('declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure=1', '-s', 'WASM=0'] + minimal_runtime)
def test_no_declare_asm_module_exports_wasm_minimal_runtime(self):
self.btest(test_file('declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure=1', '-s', 'MINIMAL_RUNTIME'])
# Tests that the different code paths in src/shell_minimal_runtime.html all work ok.
def test_minimal_runtime_loader_shell(self):
args = ['-s', 'MINIMAL_RUNTIME=2']
for wasm in [[], ['-s', 'WASM=0', '--memory-init-file', '0'], ['-s', 'WASM=0', '--memory-init-file', '1'], ['-s', 'SINGLE_FILE'], ['-s', 'WASM=0', '-s', 'SINGLE_FILE']]:
for modularize in [[], ['-s', 'MODULARIZE']]:
print(str(args + wasm + modularize))
self.btest('minimal_hello.c', '0', args=args + wasm + modularize)
# Tests that -s MINIMAL_RUNTIME=1 works well in different build modes
def test_minimal_runtime_hello_world(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_COMPILATION', '--closure=1'], ['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_INSTANTIATION', '--closure', '1']]:
self.btest(test_file('small_hello_world.c'), '0', args=args + ['-s', 'MINIMAL_RUNTIME'])
@requires_threads
def test_offset_converter(self, *args):
self.btest_exit(test_file('browser/test_offset_converter.c'), assert_returncode=1, args=['-s', 'USE_OFFSET_CONVERTER', '-gsource-map', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
# Tests emscripten_unwind_to_js_event_loop() behavior
def test_emscripten_unwind_to_js_event_loop(self, *args):
self.btest_exit(test_file('browser/test_emscripten_unwind_to_js_event_loop.c'))
def test_wasm2js_fallback(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.compile_btest([test_file('small_hello_world.c'), '-s', 'WASM=2', '-o', 'test.html'] + args)
# First run with WebAssembly support enabled
# Move the Wasm2js fallback away to test it is not accidentally getting loaded.
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?0')
os.rename('test.wasm.js.unused', 'test.wasm.js')
# Then disable WebAssembly support in VM, and try again.. Should still work with Wasm2JS fallback.
html = read_file('test.html')
html = html.replace('<body>', '<body><script>delete WebAssembly;</script>')
open('test.html', 'w').write(html)
os.remove('test.wasm') # Also delete the Wasm file to test that it is not attempted to be loaded.
self.run_browser('test.html', 'hello!', '/report_result?0')
def test_wasm2js_fallback_on_wasm_compilation_failure(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.compile_btest([test_file('small_hello_world.c'), '-s', 'WASM=2', '-o', 'test.html'] + args)
# Run without the .wasm.js file present: with Wasm support, the page should still run
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?0')
# Restore the .wasm.js file, then corrupt the .wasm file, that should trigger the Wasm2js fallback to run
os.rename('test.wasm.js.unused', 'test.wasm.js')
shutil.copyfile('test.js', 'test.wasm')
self.run_browser('test.html', 'hello!', '/report_result?0')
def test_system(self):
self.btest_exit(test_file('system.c'))
@no_firefox('no 4GB support yet')
@require_v8
def test_zzz_zzz_4gb(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we can allocate in the 2-4GB range, if we enable growth and
# set the max appropriately
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=4GB']
self.do_run_in_out_file_test('browser', 'test_4GB.cpp')
# Tests that emmalloc supports up to 4GB Wasm heaps.
@no_firefox('no 4GB support yet')
def test_zzz_zzz_emmalloc_4gb(self):
self.btest(test_file('mem_growth.cpp'),
expected='-65536', # == 4*1024*1024*1024 - 65536 casted to signed
args=['-s', 'MALLOC=emmalloc', '-s', 'ABORTING_MALLOC=0', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'MAXIMUM_MEMORY=4GB'])
# Test that it is possible to malloc() a huge 3GB memory block in 4GB mode using emmalloc.
# Also test emmalloc-memvalidate and emmalloc-memvalidate-verbose build configurations.
@no_firefox('no 4GB support yet')
def test_emmalloc_3GB(self):
def test(args):
self.btest(test_file('alloc_3gb.cpp'),
expected='0',
args=['-s', 'MAXIMUM_MEMORY=4GB', '-s', 'ALLOW_MEMORY_GROWTH=1'] + args)
test(['-s', 'MALLOC=emmalloc'])
test(['-s', 'MALLOC=emmalloc-debug'])
test(['-s', 'MALLOC=emmalloc-memvalidate'])
test(['-s', 'MALLOC=emmalloc-memvalidate-verbose'])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_emmalloc_memgrowth(self, *args):
self.btest(test_file('browser/emmalloc_memgrowth.cpp'), expected='0', args=['-s', 'MALLOC=emmalloc', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'ABORTING_MALLOC=0', '-s', 'ASSERTIONS=2', '-s', 'MINIMAL_RUNTIME=1', '-s', 'MAXIMUM_MEMORY=4GB'])
@no_firefox('no 4GB support yet')
@require_v8
def test_zzz_zzz_2gb_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that growth doesn't go beyond 2GB without the max being set for that,
# and that we can catch an allocation failure exception for that
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=2GB']
self.do_run_in_out_file_test('browser', 'test_2GB_fail.cpp')
@no_firefox('no 4GB support yet')
@require_v8
def test_zzz_zzz_4gb_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we properly report an allocation error that would overflow over
# 4GB.
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=4GB', '-s', 'ABORTING_MALLOC=0']
self.do_run_in_out_file_test('browser', 'test_4GB_fail.cpp')
@disabled("only run this manually, to test for race conditions")
@parameterized({
'normal': ([],),
'assertions': (['-s', 'ASSERTIONS'],)
})
@requires_threads
def test_manual_pthread_proxy_hammer(self, args):
# the specific symptom of the hang that was fixed is that the test hangs
# at some point, using 0% CPU. often that occured in 0-200 iterations, but
# you may want to adjust "ITERATIONS".
self.btest_exit(test_file('pthread/test_pthread_proxy_hammer.cpp'),
args=['-s', 'USE_PTHREADS', '-O2', '-s', 'PROXY_TO_PTHREAD',
'-DITERATIONS=1024', '-g1'] + args,
timeout=10000,
# don't run this with the default extra_tries value, as this is
# *meant* to notice something random, a race condition.
extra_tries=0)
def test_assert_failure(self):
self.btest(test_file('browser/test_assert_failure.c'), 'abort:Assertion failed: false && "this is a test"')
def test_full_js_library_strict(self):
self.btest_exit(test_file('hello_world.c'), args=['-sINCLUDE_FULL_LIBRARY', '-sSTRICT_JS'])
EMRUN = path_from_root('emrun')
class emrun(RunnerCore):
def test_emrun_info(self):
if not has_browser():
self.skipTest('need a browser')
result = self.run_process([EMRUN, '--system_info', '--browser_info'], stdout=PIPE).stdout
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = self.run_process([EMRUN, '--list_browsers'], stdout=PIPE).stdout
assert 'Traceback' not in result
def test_no_browser(self):
# Test --no_browser mode where we have to take care of launching the browser ourselves
# and then killing emrun when we are done.
if not has_browser():
self.skipTest('need a browser')
self.run_process([EMCC, test_file('test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
proc = subprocess.Popen([EMRUN, '--no_browser', '.', '--port=3333'], stdout=PIPE)
try:
if EMTEST_BROWSER:
print('Starting browser')
browser_cmd = shlex.split(EMTEST_BROWSER)
browser = subprocess.Popen(browser_cmd + ['http://localhost:3333/hello_world.html'])
try:
while True:
stdout = proc.stdout.read()
if b'Dumping out file' in stdout:
break
finally:
print('Terminating browser')
browser.terminate()
browser.wait()
finally:
print('Terminating emrun server')
proc.terminate()
proc.wait()
def test_emrun(self):
self.run_process([EMCC, test_file('test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
if not has_browser():
self.skipTest('need a browser')
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the
# browser that is launched will have that directory as startup directory, and the browser will
# not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to
# delete it. Therefore switch away from that directory before launching.
os.chdir(path_from_root())
args_base = [EMRUN, '--timeout', '30', '--safe_firefox_profile',
'--kill_exit', '--port', '6939', '--verbose',
'--log_stdout', self.in_dir('stdout.txt'),
'--log_stderr', self.in_dir('stderr.txt')]
# Verify that trying to pass argument to the page without the `--` separator will
# generate an actionable error message
err = self.expect_fail(args_base + ['--foo'])
self.assertContained('error: unrecognized arguments: --foo', err)
self.assertContained('remember to add `--` between arguments', err)
if EMTEST_BROWSER is not None:
# If EMTEST_BROWSER carried command line arguments to pass to the browser,
# (e.g. "firefox -profile /path/to/foo") those can't be passed via emrun,
# so strip them out.
browser_cmd = shlex.split(EMTEST_BROWSER)
browser_path = browser_cmd[0]
args_base += ['--browser', browser_path]
if len(browser_cmd) > 1:
browser_args = browser_cmd[1:]
if 'firefox' in browser_path and ('-profile' in browser_args or '--profile' in browser_args):
# emrun uses its own -profile, strip it out
parser = argparse.ArgumentParser(add_help=False) # otherwise it throws with -headless
parser.add_argument('-profile')
parser.add_argument('--profile')
browser_args = parser.parse_known_args(browser_args)[1]
if browser_args:
args_base += ['--browser_args', ' ' + ' '.join(browser_args)]
for args in [
args_base,
args_base + ['--private_browsing', '--port', '6941']
]:
args += [self.in_dir('hello_world.html'), '--', '1', '2', '--3']
print(shared.shlex_join(args))
proc = self.run_process(args, check=False)
self.assertEqual(proc.returncode, 100)
stdout = read_file(self.in_dir('stdout.txt'))
stderr = read_file(self.in_dir('stderr.txt'))
self.assertContained('argc: 4', stdout)
self.assertContained('argv[3]: --3', stdout)
self.assertContained('hello, world!', stdout)
self.assertContained('Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~', stdout)
self.assertContained('Testing char sequences: %20%21 ä', stdout)
self.assertContained('hello, error stream!', stderr)
| [] | [] | [
"EMTEST_LACKS_SOUND_HARDWARE",
"EMTEST_LACKS_GRAPHICS_HARDWARE",
"EMTEST_LACKS_THREAD_SUPPORT",
"EMTEST_LACKS_OFFSCREEN_CANVAS",
"EMCC_STRICT"
] | [] | ["EMTEST_LACKS_SOUND_HARDWARE", "EMTEST_LACKS_GRAPHICS_HARDWARE", "EMTEST_LACKS_THREAD_SUPPORT", "EMTEST_LACKS_OFFSCREEN_CANVAS", "EMCC_STRICT"] | python | 5 | 0 | |
tests/e2e/pkg/tester/tester.go | /*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tester
import (
"bytes"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/octago/sflags/gen/gpflag"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"
"sigs.k8s.io/kubetest2/pkg/testers/ginkgo"
api "k8s.io/kops/pkg/apis/kops/v1alpha2"
"k8s.io/kops/tests/e2e/pkg/kops"
)
// Tester wraps kubetest2's ginkgo tester with additional functionality
type Tester struct {
*ginkgo.Tester
kopsCluster *api.Cluster
kopsInstanceGroups []*api.InstanceGroup
}
func (t *Tester) pretestSetup() error {
kubectlPath, err := t.AcquireKubectl()
if err != nil {
return fmt.Errorf("failed to get kubectl package from published releases: %s", err)
}
existingPath := os.Getenv("PATH")
newPath := fmt.Sprintf("%v:%v", filepath.Dir(kubectlPath), existingPath)
klog.Info("Setting PATH=", newPath)
return os.Setenv("PATH", newPath)
}
// parseKubeconfig will get the current kubeconfig, and extract the specified field by jsonpath.
func parseKubeconfig(jsonPath string) (string, error) {
args := []string{
"kubectl", "config", "view", "--minify", "-o", "jsonpath={" + jsonPath + "}",
}
c := exec.Command(args[0], args[1:]...)
var stdout bytes.Buffer
c.Stdout = &stdout
var stderr bytes.Buffer
c.Stderr = &stderr
if err := c.Run(); err != nil {
klog.Warningf("failed to run %s; stderr=%s", strings.Join(args, " "), stderr.String())
return "", fmt.Errorf("error querying current config from kubectl: %w", err)
}
s := strings.TrimSpace(stdout.String())
if s == "" {
return "", fmt.Errorf("kubeconfig did not contain " + jsonPath)
}
return s, nil
}
// The --host flag was required in the kubernetes e2e tests, until https://github.com/kubernetes/kubernetes/pull/87030
// We can likely drop this when we drop support / testing for k8s 1.17
func (t *Tester) addHostFlag() error {
server, err := parseKubeconfig(".clusters[0].cluster.server")
if err != nil {
return err
}
klog.Infof("Adding --host=%s", server)
t.TestArgs += " --host=" + server
return nil
}
// hasFlag detects if the specified flag has been passed in the args
func hasFlag(args string, flag string) bool {
for _, arg := range strings.Split(args, " ") {
if !strings.HasPrefix(arg, "-") {
continue
}
arg = strings.TrimLeft(arg, "-")
if arg == flag || strings.HasPrefix(arg, flag+"=") {
return true
}
}
return false
}
func (t *Tester) getKopsCluster() (*api.Cluster, error) {
if t.kopsCluster != nil {
return t.kopsCluster, nil
}
currentContext, err := parseKubeconfig(".current-context")
if err != nil {
return nil, err
}
kopsClusterName := currentContext
cluster, err := kops.GetCluster(kopsClusterName)
if err != nil {
return nil, err
}
t.kopsCluster = cluster
return cluster, nil
}
func (t *Tester) getKopsInstanceGroups() ([]*api.InstanceGroup, error) {
if t.kopsInstanceGroups != nil {
return t.kopsInstanceGroups, nil
}
cluster, err := t.getKopsCluster()
if err != nil {
return nil, err
}
igs, err := kops.GetInstanceGroups(cluster.Name)
if err != nil {
return nil, err
}
t.kopsInstanceGroups = igs
return igs, nil
}
func (t *Tester) addProviderFlag() error {
if hasFlag(t.TestArgs, "provider") {
return nil
}
cluster, err := t.getKopsCluster()
if err != nil {
return err
}
provider := ""
switch cluster.Spec.CloudProvider {
case "aws", "gce":
provider = cluster.Spec.CloudProvider
case "digitalocean":
default:
klog.Warningf("unhandled cluster.spec.cloudProvider %q for determining ginkgo Provider", cluster.Spec.CloudProvider)
}
klog.Infof("Setting --provider=%s", provider)
t.TestArgs += " --provider=" + provider
return nil
}
func (t *Tester) addZoneFlag() error {
// gce-zone is indeed used for AWS as well!
if hasFlag(t.TestArgs, "gce-zone") {
return nil
}
zoneNames, err := t.getZones()
if err != nil {
return err
}
// gce-zone only expects one zone, we just pass the first one
zone := zoneNames[0]
klog.Infof("Setting --gce-zone=%s", zone)
t.TestArgs += " --gce-zone=" + zone
// TODO: Pass the new gce-zones flag for 1.21 with all zones?
return nil
}
func (t *Tester) addMultiZoneFlag() error {
if hasFlag(t.TestArgs, "gce-multizone") {
return nil
}
zoneNames, err := t.getZones()
if err != nil {
return err
}
klog.Infof("Setting --gce-multizone=%t", len(zoneNames) > 1)
t.TestArgs += fmt.Sprintf(" --gce-multizone=%t", len(zoneNames) > 1)
return nil
}
func (t *Tester) addRegionFlag() error {
// gce-zone is used for other cloud providers as well
if hasFlag(t.TestArgs, "gce-region") {
return nil
}
cluster, err := t.getKopsCluster()
if err != nil {
return err
}
// We don't explicitly set the provider's region in the spec so we need to extract it from vairous fields
var region string
switch cluster.Spec.CloudProvider {
case "aws":
zone := cluster.Spec.Subnets[0].Zone
region = zone[:len(zone)-1]
case "gce":
region = cluster.Spec.Subnets[0].Region
default:
klog.Warningf("unhandled region detection for cloud provider: %v", cluster.Spec.CloudProvider)
}
klog.Infof("Setting --gce-region=%s", region)
t.TestArgs += " --gce-region=" + region
return nil
}
func (t *Tester) addClusterTagFlag() error {
if hasFlag(t.TestArgs, "cluster-tag") {
return nil
}
cluster, err := t.getKopsCluster()
if err != nil {
return err
}
clusterName := cluster.ObjectMeta.Name
klog.Infof("Setting --cluster-tag=%s", clusterName)
t.TestArgs += " --cluster-tag=" + clusterName
return nil
}
func (t *Tester) addProjectFlag() error {
if hasFlag(t.TestArgs, "gce-project") {
return nil
}
cluster, err := t.getKopsCluster()
if err != nil {
return err
}
projectID := cluster.Spec.Project
if projectID == "" {
return nil
}
klog.Infof("Setting --gce-project=%s", projectID)
t.TestArgs += " --gce-project=" + projectID
return nil
}
func (t *Tester) getZones() ([]string, error) {
cluster, err := t.getKopsCluster()
if err != nil {
return nil, err
}
igs, err := t.getKopsInstanceGroups()
if err != nil {
return nil, err
}
zones := sets.NewString()
// Gather zones on AWS
for _, subnet := range cluster.Spec.Subnets {
if subnet.Zone != "" {
zones.Insert(subnet.Zone)
}
}
// Gather zones on GCE
for _, ig := range igs {
for _, zone := range ig.Spec.Zones {
zones.Insert(zone)
}
}
zoneNames := zones.List()
if len(zoneNames) == 0 {
klog.Warningf("no zones found in instance groups")
return nil, nil
}
return zoneNames, nil
}
func (t *Tester) execute() error {
fs, err := gpflag.Parse(t)
if err != nil {
return fmt.Errorf("failed to initialize tester: %v", err)
}
help := fs.BoolP("help", "h", false, "")
if err := fs.Parse(os.Args); err != nil {
return fmt.Errorf("failed to parse flags: %v", err)
}
if *help {
fs.SetOutput(os.Stdout)
fs.PrintDefaults()
return nil
}
if err := t.pretestSetup(); err != nil {
return err
}
if err := t.addHostFlag(); err != nil {
return err
}
if err := t.addProviderFlag(); err != nil {
return err
}
if err := t.addZoneFlag(); err != nil {
return err
}
if err := t.addClusterTagFlag(); err != nil {
return err
}
if err := t.addRegionFlag(); err != nil {
return err
}
if err := t.addMultiZoneFlag(); err != nil {
return err
}
if err := t.addProjectFlag(); err != nil {
return err
}
return t.Test()
}
func NewDefaultTester() *Tester {
t := &Tester{}
t.Tester = ginkgo.NewDefaultTester()
return t
}
func Main() {
t := NewDefaultTester()
if err := t.execute(); err != nil {
klog.Fatalf("failed to run ginkgo tester: %v", err)
}
}
| [
"\"PATH\""
] | [] | [
"PATH"
] | [] | ["PATH"] | go | 1 | 0 | |
pkg/cmd/step/verify/step_verify_preinstall_integration_test.go | // +build integration
package verify_test
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
"github.com/jenkins-x/jx/pkg/cmd/opts/step"
"github.com/jenkins-x/jx/pkg/cmd/step/create"
"github.com/jenkins-x/jx/pkg/config"
"github.com/jenkins-x/jx/pkg/cmd/clients"
"github.com/jenkins-x/jx/pkg/cmd/namespace"
"github.com/jenkins-x/jx/pkg/tests"
"github.com/jenkins-x/jx/pkg/cmd/opts"
"github.com/jenkins-x/jx/pkg/cmd/step/verify"
"github.com/jenkins-x/jx/pkg/cmd/testhelpers"
gits_test "github.com/jenkins-x/jx/pkg/gits/mocks"
helm_test "github.com/jenkins-x/jx/pkg/helm/mocks"
"github.com/stretchr/testify/assert"
)
const (
testDeployNamespace = "new-jx-ns"
)
func TestStepVerifyPreInstallTerraformKaniko(t *testing.T) {
tests.Retry(t, 5, time.Second*10, func(r *tests.R) {
options := createTestStepVerifyPreInstallOptions(filepath.Join("test_data", "preinstall", "terraform_kaniko"))
_, origNamespace, err := options.KubeClientAndDevNamespace()
assert.NoError(t, err)
defer resetNamespace(t, origNamespace)
err = options.Run()
assert.Errorf(r, err, "the command should have failed for terraform and kaniko with a missing kaniko secret")
})
}
func TestStepVerifyPreInstallNoKanikoNoLazyCreate(t *testing.T) {
// TODO the fake k8s client always seems to lazily create a namespace on demand so the 'jx step verify preinstall' never fails
t.SkipNow()
options := createTestStepVerifyPreInstallOptions(filepath.Join("test_data", "preinstall", "no_kaniko_or_terraform"))
// explicitly disable lazy create
options.LazyCreateFlag = "false"
_, origNamespace, err := options.KubeClientAndDevNamespace()
assert.NoError(t, err)
defer resetNamespace(t, origNamespace)
err = options.Run()
if err != nil {
t.Logf("returned error: %s", err.Error())
}
assert.Errorf(t, err, "the command should have failed due to missing namespace")
}
func TestStepVerifyPreInstallNoKanikoLazyCreate(t *testing.T) {
origJxHome := os.Getenv("JX_HOME")
tmpJxHome, err := ioutil.TempDir("", "jx-test-TestStepVerifyPreInstallNoKanikoLazyCreate")
assert.NoError(t, err)
err = os.Setenv("JX_HOME", tmpJxHome)
assert.NoError(t, err)
defer func() {
_ = os.RemoveAll(tmpJxHome)
err = os.Setenv("JX_HOME", origJxHome)
}()
options := createTestStepVerifyPreInstallOptions(filepath.Join("test_data", "preinstall", "no_kaniko_or_terraform"))
_, origNamespace, err := options.KubeClientAndDevNamespace()
assert.NoError(t, err)
defer resetNamespace(t, origNamespace)
// we default to lazy create if not using terraform
err = options.Run()
assert.NoErrorf(t, err, "the command should not have failed as we should have lazily created the deploy namespace")
}
func TestStepVerifyPreInstallNoTLS(t *testing.T) {
options := createTestStepVerifyPreInstallOptions(filepath.Join("test_data", "preinstall", "no_tls"))
_, origNamespace, err := options.KubeClientAndDevNamespace()
assert.NoError(t, err)
defer resetNamespace(t, origNamespace)
// we default to lazy create if not using terraform
err = options.Run()
assert.NoError(t, err)
}
func TestStepVerifyPreInstallRequirements(t *testing.T) {
tests := map[string]bool{
"lighthouse_gitlab": true,
"prow_github": true,
"prow_gitlab": false,
}
for dir, actual := range tests {
testDir := filepath.Join("test_data", "preinstall", dir)
assert.DirExists(t, testDir)
options := createTestStepVerifyPreInstallOptions(testDir)
options.Namespace = "jx"
_, origNamespace, err := options.KubeClientAndDevNamespace()
assert.NoError(t, err)
defer resetNamespace(t, origNamespace)
requirements, requirementsFileName, err := config.LoadRequirementsConfig(testDir)
assert.NoError(t, err, "for test %s", dir)
err = options.ValidateRequirements(requirements, requirementsFileName)
if actual {
assert.NoError(t, err, "for test %s", dir)
t.Logf("correctly validated test %s", dir)
} else {
assert.Error(t, err, "for test %s", dir)
t.Logf("correctly failed to validate test %s with error: %v", dir, err)
}
}
}
func TestStepVerifyPreInstallSetClusterRequirementsViaEnvars(t *testing.T) {
options := createTestStepVerifyPreInstallOptions(filepath.Join("test_data", "preinstall", "set_cluster_req_via_envvar"))
kc, origNamespace, err := options.KubeClientAndDevNamespace()
assert.NoError(t, err)
defer resetNamespace(t, origNamespace)
// we default to lazy create if not using terraform
err = options.VerifyInstallConfig(kc, origNamespace, config.NewRequirementsConfig(), "")
assert.NoErrorf(t, err, "the command should not have failed as we should have lazily created the deploy namespace")
t.Parallel()
commonOpts := opts.CommonOptions{
BatchMode: false,
}
o := &create.StepCreateInstallValuesOptions{
StepOptions: step.StepOptions{
CommonOptions: &commonOpts,
},
}
dir, err := ioutil.TempDir("", "test_set_cluster_req_via_envvar")
assert.NoError(t, err, "should create a temporary config dir")
o.Dir = dir
file := filepath.Join(o.Dir, config.RequirementsConfigFileName)
requirements := getBaseRequirements()
// using nip.io on gke should disable the use of external dns as we cannot transfer domain ownership to google dns
requirements.Ingress.Domain = "34.76.24.247.nip.io"
requirements.Cluster.Provider = "gke"
err = requirements.SaveConfig(file)
assert.NoError(t, err, "failed to save file %s", file)
requirements, fileName, err := config.LoadRequirementsConfig(o.Dir)
assert.NoError(t, err, "failed to load requirements file in dir %s", o.Dir)
assert.FileExists(t, fileName)
assert.Equal(t, false, requirements.Ingress.ExternalDNS, "requirements.Ingress.ExternalDNS")
}
func createTestStepVerifyPreInstallOptions(dir string) *verify.StepVerifyPreInstallOptions {
options := &verify.StepVerifyPreInstallOptions{
DisableVerifyHelm: true,
TestKanikoSecretData: "test-kaniko-secret",
}
// fake the output stream to be checked later
commonOpts := opts.NewCommonOptionsWithFactory(nil)
options.CommonOptions = &commonOpts
testhelpers.ConfigureTestOptions(options.CommonOptions, gits_test.NewMockGitter(), helm_test.NewMockHelmer())
testhelpers.SetFakeFactoryFromKubeClients(options.CommonOptions)
options.Dir = dir
options.Namespace = testDeployNamespace
options.Err = os.Stdout
options.Out = os.Stdout
return options
}
func resetNamespace(t *testing.T, ns string) {
commonOpts := opts.NewCommonOptionsWithFactory(clients.NewFactory())
commonOpts.Out = os.Stdout
namespaceOptions := &namespace.NamespaceOptions{
CommonOptions: &commonOpts,
}
namespaceOptions.Args = []string{ns}
err := namespaceOptions.Run()
assert.NoError(t, err)
}
func getBaseRequirements() *config.RequirementsConfig {
requirements := config.NewRequirementsConfig()
requirements.Cluster.ProjectID = "test-project"
requirements.Cluster.ClusterName = "test-cluster"
requirements.Cluster.EnvironmentGitOwner = "test-org"
requirements.Cluster.Zone = "test-zone"
return requirements
}
| [
"\"JX_HOME\""
] | [] | [
"JX_HOME"
] | [] | ["JX_HOME"] | go | 1 | 0 | |
jira/examples/issue-watcher/delete/delete.go | package main
import (
"context"
"github.com/ctreminiom/go-atlassian/jira"
"log"
"os"
)
func main() {
/*
----------- Set an environment variable in git bash -----------
export HOST="https://ctreminiom.atlassian.net/"
export MAIL="MAIL_ADDRESS"
export TOKEN="TOKEN_API"
Docs: https://stackoverflow.com/questions/34169721/set-an-environment-variable-in-git-bash
*/
var (
host = os.Getenv("HOST")
mail = os.Getenv("MAIL")
token = os.Getenv("TOKEN")
)
atlassian, err := jira.New(nil, host)
if err != nil {
log.Fatal(err)
}
atlassian.Auth.SetBasicAuth(mail, token)
response, err := atlassian.Issue.Watchers.Delete(context.Background(), "KP-2", "5b86be50b8e3cb5895860d6d")
if err != nil {
log.Fatal(err)
}
log.Println("HTTP Endpoint Used", response.Endpoint)
}
| [
"\"HOST\"",
"\"MAIL\"",
"\"TOKEN\""
] | [] | [
"MAIL",
"HOST",
"TOKEN"
] | [] | ["MAIL", "HOST", "TOKEN"] | go | 3 | 0 | |
cmd/auctioneer/main_test.go | package main_test
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"os"
"os/exec"
"path"
"time"
"code.cloudfoundry.org/auctioneer"
"code.cloudfoundry.org/auctioneer/cmd/auctioneer/config"
"code.cloudfoundry.org/bbs"
"code.cloudfoundry.org/bbs/models"
"code.cloudfoundry.org/bbs/models/test/model_helpers"
"code.cloudfoundry.org/clock"
"code.cloudfoundry.org/diego-logging-client"
"code.cloudfoundry.org/diego-logging-client/testhelpers"
"code.cloudfoundry.org/durationjson"
"code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2"
"code.cloudfoundry.org/lager/lagerflags"
"code.cloudfoundry.org/locket"
locketconfig "code.cloudfoundry.org/locket/cmd/locket/config"
locketrunner "code.cloudfoundry.org/locket/cmd/locket/testrunner"
"code.cloudfoundry.org/locket/lock"
locketmodels "code.cloudfoundry.org/locket/models"
"code.cloudfoundry.org/rep"
"code.cloudfoundry.org/rep/maintain"
"github.com/hashicorp/consul/api"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
. "github.com/onsi/gomega/gexec"
"github.com/tedsuo/ifrit"
"github.com/tedsuo/ifrit/ginkgomon"
)
const (
defaultAuctioneerClientRequestTimeout = 5 * time.Second
)
var dummyAction = &models.RunAction{
User: "me",
Path: "cat",
Args: []string{"/tmp/file"},
}
var exampleDesiredLRP = models.DesiredLRP{
ProcessGuid: "process-guid",
DiskMb: 1,
MemoryMb: 1,
MaxPids: 1,
RootFs: linuxRootFSURL,
Action: models.WrapAction(dummyAction),
Domain: "test",
Instances: 2,
}
func exampleTaskDefinition() *models.TaskDefinition {
taskDef := model_helpers.NewValidTaskDefinition()
taskDef.RootFs = linuxRootFSURL
taskDef.Action = models.WrapAction(dummyAction)
taskDef.PlacementTags = nil
return taskDef
}
var _ = Describe("Auctioneer", func() {
var (
auctioneerConfig config.AuctioneerConfig
runner *ginkgomon.Runner
auctioneerProcess ifrit.Process
auctioneerClient auctioneer.Client
locketRunner ifrit.Runner
locketProcess ifrit.Process
locketAddress string
testIngressServer *testhelpers.TestIngressServer
testMetricsChan chan *loggregator_v2.Envelope
fakeMetronClient *testhelpers.FakeIngressClient
signalMetricsChan chan struct{}
)
BeforeEach(func() {
fixturesPath := path.Join(os.Getenv("GOPATH"), "src/code.cloudfoundry.org/auctioneer/cmd/auctioneer/fixtures")
caFile := path.Join(fixturesPath, "green-certs", "ca.crt")
clientCertFile := path.Join(fixturesPath, "green-certs", "client.crt")
clientKeyFile := path.Join(fixturesPath, "green-certs", "client.key")
metronCAFile := path.Join(fixturesPath, "metron", "CA.crt")
metronClientCertFile := path.Join(fixturesPath, "metron", "client.crt")
metronClientKeyFile := path.Join(fixturesPath, "metron", "client.key")
metronServerCertFile := path.Join(fixturesPath, "metron", "metron.crt")
metronServerKeyFile := path.Join(fixturesPath, "metron", "metron.key")
var err error
testIngressServer, err = testhelpers.NewTestIngressServer(metronServerCertFile, metronServerKeyFile, metronCAFile)
Expect(err).NotTo(HaveOccurred())
receiversChan := testIngressServer.Receivers()
testIngressServer.Start()
metricsPort, err := testIngressServer.Port()
Expect(err).NotTo(HaveOccurred())
testMetricsChan, signalMetricsChan = testhelpers.TestMetricChan(receiversChan)
fakeMetronClient = &testhelpers.FakeIngressClient{}
bbsClient, err = bbs.NewClient(bbsURL.String(), caFile, clientCertFile, clientKeyFile, 0, 0)
Expect(err).NotTo(HaveOccurred())
auctioneerConfig = config.AuctioneerConfig{
AuctionRunnerWorkers: 1000,
CellStateTimeout: durationjson.Duration(1 * time.Second),
CommunicationTimeout: durationjson.Duration(10 * time.Second),
LagerConfig: lagerflags.DefaultLagerConfig(),
LockTTL: durationjson.Duration(locket.DefaultSessionTTL),
StartingContainerCountMaximum: 0,
StartingContainerWeight: .25,
BBSAddress: bbsURL.String(),
BBSCACertFile: caFile,
BBSClientCertFile: clientCertFile,
BBSClientKeyFile: clientKeyFile,
ListenAddress: auctioneerLocation,
LocksLocketEnabled: false,
LockRetryInterval: durationjson.Duration(time.Second),
ConsulCluster: consulRunner.ConsulCluster(),
UUID: "auctioneer-boshy-bosh",
ReportInterval: durationjson.Duration(10 * time.Millisecond),
LoggregatorConfig: diego_logging_client.Config{
BatchFlushInterval: 10 * time.Millisecond,
BatchMaxSize: 1,
UseV2API: true,
APIPort: metricsPort,
CACertPath: metronCAFile,
KeyPath: metronClientKeyFile,
CertPath: metronClientCertFile,
},
}
auctioneerClient = auctioneer.NewClient("http://"+auctioneerLocation, defaultAuctioneerClientRequestTimeout)
})
JustBeforeEach(func() {
configFile, err := ioutil.TempFile("", "auctioneer-config")
Expect(err).NotTo(HaveOccurred())
encoder := json.NewEncoder(configFile)
err = encoder.Encode(&auctioneerConfig)
Expect(err).NotTo(HaveOccurred())
runner = ginkgomon.New(ginkgomon.Config{
Name: "auctioneer",
Command: exec.Command(
auctioneerPath,
"-config", configFile.Name(),
),
StartCheck: "auctioneer.started",
Cleanup: func() {
os.RemoveAll(configFile.Name())
},
})
})
AfterEach(func() {
ginkgomon.Interrupt(locketProcess)
ginkgomon.Interrupt(auctioneerProcess)
testIngressServer.Stop()
close(signalMetricsChan)
})
Context("when the metron agent isn't up", func() {
BeforeEach(func() {
testIngressServer.Stop()
})
It("exits with non-zero status code", func() {
auctioneerProcess = ifrit.Background(runner)
Eventually(auctioneerProcess.Wait()).Should(Receive(HaveOccurred()))
})
})
Context("when the bbs is down", func() {
BeforeEach(func() {
ginkgomon.Interrupt(bbsProcess)
})
It("starts", func() {
auctioneerProcess = ginkgomon.Invoke(runner)
Consistently(runner).ShouldNot(Exit())
})
})
Context("when the auctioneer starts up", func() {
Context("when consul service registration is enabled", func() {
BeforeEach(func() {
auctioneerConfig.EnableConsulServiceRegistration = true
})
It("registers itself as a service and registers a TTL Healthcheck", func() {
auctioneerProcess = ginkgomon.Invoke(runner)
client := consulRunner.NewClient()
services, err := client.Agent().Services()
Expect(err).NotTo(HaveOccurred())
Expect(services).To(HaveKeyWithValue("auctioneer", &api.AgentService{
ID: "auctioneer",
Service: "auctioneer",
Port: int(auctioneerServerPort),
Address: "",
}))
checks, err := client.Agent().Checks()
Expect(err).NotTo(HaveOccurred())
Expect(checks).To(HaveKeyWithValue("service:auctioneer", &api.AgentCheck{
Node: "0",
CheckID: "service:auctioneer",
Name: "Service 'auctioneer' check",
Status: "passing",
Notes: "",
Output: "",
ServiceID: "auctioneer",
ServiceName: "auctioneer",
}))
})
})
Context("when consul service registration is disabled", func() {
It("does not register itself with consul", func() {
auctioneerProcess = ginkgomon.Invoke(runner)
client := consulRunner.NewClient()
services, err := client.Agent().Services()
Expect(err).NotTo(HaveOccurred())
Expect(services).NotTo(HaveKey("auctioneer"))
})
})
Context("when a debug address is specified", func() {
BeforeEach(func() {
port, err := portAllocator.ClaimPorts(1)
Expect(err).NotTo(HaveOccurred())
auctioneerConfig.DebugAddress = fmt.Sprintf("0.0.0.0:%d", port)
})
It("starts the debug server", func() {
auctioneerProcess = ginkgomon.Invoke(runner)
_, err := net.Dial("tcp", auctioneerConfig.DebugAddress)
Expect(err).NotTo(HaveOccurred())
})
})
})
Context("with cells of different stacks", func() {
var (
dotNetCell, linuxCell *FakeCell
)
BeforeEach(func() {
cellPresenceClient := maintain.NewCellPresenceClient(consulClient, clock.NewClock())
dotNetCell = NewFakeCell(cellPresenceClient, "dot-net-cell", "", dotNetStack, 100, 0)
linuxCell = NewFakeCell(cellPresenceClient, "linux-cell", "", linuxStack, 100, 0)
dotNetCell.SpinUp(cellPresenceClient)
linuxCell.SpinUp(cellPresenceClient)
})
AfterEach(func() {
dotNetCell.Stop()
linuxCell.Stop()
})
Context("when a start auction message arrives", func() {
It("should start the process running on reps of the appropriate stack", func() {
auctioneerProcess = ginkgomon.Invoke(runner)
err := auctioneerClient.RequestLRPAuctions(logger, []*auctioneer.LRPStartRequest{{
ProcessGuid: exampleDesiredLRP.ProcessGuid,
Domain: exampleDesiredLRP.Domain,
Indices: []int{0},
Resource: rep.Resource{
MemoryMB: 5,
DiskMB: 5,
},
PlacementConstraint: rep.PlacementConstraint{
RootFs: exampleDesiredLRP.RootFs,
},
}})
Expect(err).NotTo(HaveOccurred())
err = auctioneerClient.RequestLRPAuctions(logger, []*auctioneer.LRPStartRequest{{
ProcessGuid: exampleDesiredLRP.ProcessGuid,
Domain: exampleDesiredLRP.Domain,
Indices: []int{1},
Resource: rep.Resource{
MemoryMB: 5,
DiskMB: 5,
},
PlacementConstraint: rep.PlacementConstraint{
RootFs: exampleDesiredLRP.RootFs,
},
}})
Expect(err).NotTo(HaveOccurred())
Eventually(linuxCell.LRPs).Should(HaveLen(2))
Expect(dotNetCell.LRPs()).To(BeEmpty())
})
})
Context("when exceeding max inflight container counts", func() {
BeforeEach(func() {
auctioneerConfig.StartingContainerCountMaximum = 1
})
It("should only start up to the max inflight processes", func() {
auctioneerProcess = ginkgomon.Invoke(runner)
err := auctioneerClient.RequestLRPAuctions(logger, []*auctioneer.LRPStartRequest{{
ProcessGuid: exampleDesiredLRP.ProcessGuid,
Domain: exampleDesiredLRP.Domain,
Indices: []int{0},
Resource: rep.Resource{
MemoryMB: 5,
DiskMB: 5,
},
PlacementConstraint: rep.PlacementConstraint{
RootFs: exampleDesiredLRP.RootFs,
},
}})
Expect(err).NotTo(HaveOccurred())
err = auctioneerClient.RequestLRPAuctions(logger, []*auctioneer.LRPStartRequest{{
ProcessGuid: exampleDesiredLRP.ProcessGuid,
Domain: exampleDesiredLRP.Domain,
Indices: []int{1},
Resource: rep.Resource{
MemoryMB: 5,
DiskMB: 5,
},
}})
Expect(err).NotTo(HaveOccurred())
Eventually(linuxCell.LRPs).Should(HaveLen(1))
})
})
Context("when a task message arrives", func() {
Context("when there are sufficient resources to start the task", func() {
It("should start the task running on reps of the appropriate stack", func() {
auctioneerProcess = ginkgomon.Invoke(runner)
taskDef := exampleTaskDefinition()
taskDef.DiskMb = 1
taskDef.MemoryMb = 1
taskDef.MaxPids = 1
err := bbsClient.DesireTask(logger, "guid", "domain", taskDef)
Expect(err).NotTo(HaveOccurred())
Eventually(linuxCell.Tasks).Should(HaveLen(1))
Expect(dotNetCell.Tasks()).To(BeEmpty())
})
})
Context("when there are insufficient resources to start the task", func() {
var taskDef *models.TaskDefinition
BeforeEach(func() {
taskDef = exampleTaskDefinition()
taskDef.DiskMb = 1000
taskDef.MemoryMb = 1000
taskDef.MaxPids = 1000
})
It("should not place the tasks and mark the task as failed in the BBS", func() {
auctioneerProcess = ginkgomon.Invoke(runner)
err := bbsClient.DesireTask(logger, "task-guid", "domain", taskDef)
Expect(err).NotTo(HaveOccurred())
Consistently(linuxCell.Tasks).Should(BeEmpty())
Consistently(dotNetCell.Tasks).Should(BeEmpty())
Eventually(func() []*models.Task {
return getTasksByState(bbsClient, models.Task_Completed)
}).Should(HaveLen(1))
completedTasks := getTasksByState(bbsClient, models.Task_Completed)
completedTask := completedTasks[0]
Expect(completedTask.TaskGuid).To(Equal("task-guid"))
Expect(completedTask.Failed).To(BeTrue())
Expect(completedTask.FailureReason).To(Equal("insufficient resources: disk, memory"))
})
})
})
})
Context("with a proxy-enabled cell and a proxy-disabled cell", func() {
const (
proxiedCellAvailableMemory = 268
unproxiedCellAvailableMemory = 256
proxyMemoryFootprint = 32
lrpRequiredMemory = 256
)
var (
proxiedCell *FakeCell
unproxiedCell *FakeCell
)
BeforeEach(func() {
cellPresenceClient := maintain.NewCellPresenceClient(consulClient, clock.NewClock())
proxiedCell = NewFakeCell(cellPresenceClient, "proxy-enabled-cell", "", linuxStack, proxiedCellAvailableMemory, proxyMemoryFootprint)
unproxiedCell = NewFakeCell(cellPresenceClient, "proxy-disabled-cell", "", linuxStack, unproxiedCellAvailableMemory, 0)
proxiedCell.SpinUp(cellPresenceClient)
unproxiedCell.SpinUp(cellPresenceClient)
})
AfterEach(func() {
proxiedCell.Stop()
unproxiedCell.Stop()
})
Context("when auctioning the lrp on the proxy-enabled cell puts lrp's memory requirements above proxied cell's memory limits", func() {
It("auctions the cell on the proxy-disabled cell", func() {
auctioneerProcess = ginkgomon.Invoke(runner)
err := auctioneerClient.RequestLRPAuctions(logger, []*auctioneer.LRPStartRequest{
{
ProcessGuid: exampleDesiredLRP.ProcessGuid,
Domain: exampleDesiredLRP.Domain,
Indices: []int{0},
Resource: rep.Resource{
MemoryMB: lrpRequiredMemory,
DiskMB: 5,
},
PlacementConstraint: rep.PlacementConstraint{
RootFs: exampleDesiredLRP.RootFs,
},
},
})
Expect(err).NotTo(HaveOccurred())
Consistently(proxiedCell.LRPs).Should(HaveLen(0))
Eventually(unproxiedCell.LRPs).Should(HaveLen(1))
})
})
})
Context("when the auctioneer loses the consul lock", func() {
It("exits with an error", func() {
auctioneerProcess = ginkgomon.Invoke(runner)
consulRunner.Reset()
Eventually(runner.ExitCode, 3).Should(Equal(1))
})
})
Context("when the auctioneer cannot acquire the consul lock on startup", func() {
var (
task *rep.Task
competingAuctioneerProcess ifrit.Process
)
JustBeforeEach(func() {
task = &rep.Task{
TaskGuid: "task-guid",
Domain: "test",
Resource: rep.Resource{
MemoryMB: 124,
DiskMB: 456,
},
PlacementConstraint: rep.PlacementConstraint{
RootFs: "some-rootfs",
},
}
competingAuctioneerLock := locket.NewLock(logger, consulClient, locket.LockSchemaPath("auctioneer_lock"), []byte{}, clock.NewClock(), 500*time.Millisecond, 10*time.Second, locket.WithMetronClient(fakeMetronClient))
competingAuctioneerProcess = ifrit.Invoke(competingAuctioneerLock)
auctioneerProcess = ifrit.Background(runner)
})
AfterEach(func() {
ginkgomon.Kill(competingAuctioneerProcess)
})
It("should not advertise its presence, and should not be reachable", func() {
Consistently(func() error {
return auctioneerClient.RequestTaskAuctions(logger, []*auctioneer.TaskStartRequest{
&auctioneer.TaskStartRequest{*task},
})
}).Should(HaveOccurred())
})
It("should eventually come up in the event that the lock is released", func() {
ginkgomon.Kill(competingAuctioneerProcess)
Eventually(func() error {
return auctioneerClient.RequestTaskAuctions(logger, []*auctioneer.TaskStartRequest{
&auctioneer.TaskStartRequest{*task},
})
}).ShouldNot(HaveOccurred())
})
})
Context("when the auctioneer is configured to grab the lock from the sql locking server", func() {
var (
task *rep.Task
competingAuctioneerProcess ifrit.Process
)
BeforeEach(func() {
task = &rep.Task{
TaskGuid: "task-guid",
Domain: "test",
Resource: rep.Resource{
MemoryMB: 124,
DiskMB: 456,
},
PlacementConstraint: rep.PlacementConstraint{
RootFs: "some-rootfs",
},
}
locketPort, err := portAllocator.ClaimPorts(1)
Expect(err).NotTo(HaveOccurred())
locketAddress = fmt.Sprintf("localhost:%d", locketPort)
locketRunner = locketrunner.NewLocketRunner(locketBinPath, func(cfg *locketconfig.LocketConfig) {
cfg.ConsulCluster = consulRunner.ConsulCluster()
cfg.DatabaseConnectionString = sqlRunner.ConnectionString()
cfg.DatabaseDriver = sqlRunner.DriverName()
cfg.ListenAddress = locketAddress
})
locketProcess = ginkgomon.Invoke(locketRunner)
auctioneerConfig.LocksLocketEnabled = true
auctioneerConfig.ClientLocketConfig = locketrunner.ClientLocketConfig()
auctioneerConfig.LocketAddress = locketAddress
})
JustBeforeEach(func() {
auctioneerProcess = ifrit.Background(runner)
})
AfterEach(func() {
ginkgomon.Interrupt(auctioneerProcess)
})
It("acquires the lock and becomes active", func() {
Eventually(func() error {
return auctioneerClient.RequestTaskAuctions(logger, []*auctioneer.TaskStartRequest{
&auctioneer.TaskStartRequest{*task},
})
}).ShouldNot(HaveOccurred())
})
It("uses the configured UUID as the owner", func() {
locketClient, err := locket.NewClient(logger, auctioneerConfig.ClientLocketConfig)
Expect(err).NotTo(HaveOccurred())
var lock *locketmodels.FetchResponse
Eventually(func() error {
lock, err = locketClient.Fetch(context.Background(), &locketmodels.FetchRequest{
Key: "auctioneer",
})
return err
}).ShouldNot(HaveOccurred())
Expect(lock.Resource.Owner).To(Equal(auctioneerConfig.UUID))
})
It("emits metric about holding lock", func() {
Eventually(func() error {
return auctioneerClient.RequestTaskAuctions(logger, []*auctioneer.TaskStartRequest{
&auctioneer.TaskStartRequest{*task},
})
}).ShouldNot(HaveOccurred())
Eventually(testMetricsChan).Should(Receive(testhelpers.MatchV2MetricAndValue(testhelpers.MetricAndValue{
Name: "LockHeld",
Value: 1,
})))
})
Context("and the locking server becomes unreachable after grabbing the lock", func() {
It("exits", func() {
ginkgomon.Interrupt(locketProcess)
Eventually(auctioneerProcess.Wait()).Should(Receive())
})
})
Context("when the consul lock is not required", func() {
BeforeEach(func() {
auctioneerConfig.SkipConsulLock = true
competingAuctioneerLock := locket.NewLock(logger, consulClient, locket.LockSchemaPath("auctioneer_lock"), []byte{}, clock.NewClock(), 500*time.Millisecond, 10*time.Second, locket.WithMetronClient(fakeMetronClient))
competingAuctioneerProcess = ifrit.Invoke(competingAuctioneerLock)
})
AfterEach(func() {
ginkgomon.Interrupt(competingAuctioneerProcess)
})
It("only grabs the sql lock and starts succesfully", func() {
Eventually(func() error {
return auctioneerClient.RequestTaskAuctions(logger, []*auctioneer.TaskStartRequest{
&auctioneer.TaskStartRequest{*task},
})
}).ShouldNot(HaveOccurred())
})
})
Context("when the lock is not available", func() {
var competingProcess ifrit.Process
BeforeEach(func() {
locketClient, err := locket.NewClient(logger, auctioneerConfig.ClientLocketConfig)
Expect(err).NotTo(HaveOccurred())
lockIdentifier := &locketmodels.Resource{
Key: "auctioneer",
Owner: "Your worst enemy.",
Value: "Something",
TypeCode: locketmodels.LOCK,
}
clock := clock.NewClock()
competingRunner := lock.NewLockRunner(
logger,
locketClient,
lockIdentifier,
locket.DefaultSessionTTLInSeconds,
clock,
locket.RetryInterval,
)
competingProcess = ginkgomon.Invoke(competingRunner)
})
AfterEach(func() {
ginkgomon.Interrupt(competingProcess)
})
It("starts but does not accept auctions", func() {
Consistently(func() error {
return auctioneerClient.RequestTaskAuctions(logger, []*auctioneer.TaskStartRequest{
&auctioneer.TaskStartRequest{*task},
})
}).Should(HaveOccurred())
})
It("emits metric about not holding lock", func() {
Eventually(runner.Buffer()).Should(gbytes.Say("failed-to-acquire-lock"))
Eventually(testMetricsChan).Should(Receive(testhelpers.MatchV2MetricAndValue(testhelpers.MetricAndValue{
Name: "LockHeld",
Value: 0,
})))
})
Context("and continues to be unavailable", func() {
It("exits", func() {
Eventually(auctioneerProcess.Wait(), locket.DefaultSessionTTL*2).Should(Receive())
})
})
Context("and the lock becomes available", func() {
JustBeforeEach(func() {
Eventually(runner.Buffer()).Should(gbytes.Say(
"failed-to-acquire-lock"))
ginkgomon.Interrupt(competingProcess)
})
It("acquires the lock and becomes active", func() {
Eventually(func() error {
return auctioneerClient.RequestTaskAuctions(logger, []*auctioneer.TaskStartRequest{
&auctioneer.TaskStartRequest{*task},
})
}, 2*time.Second).ShouldNot(HaveOccurred())
})
})
})
Context("and the locket address is invalid", func() {
BeforeEach(func() {
auctioneerConfig.LocketAddress = "{{{}}}}{{{{"
})
It("exits with an error", func() {
Eventually(auctioneerProcess.Wait()).Should(Receive(Not(BeNil())))
})
})
Context("when the locket addess isn't set", func() {
BeforeEach(func() {
auctioneerConfig.LocketAddress = ""
})
It("exits with an error", func() {
Eventually(auctioneerProcess.Wait()).Should(Receive(Not(BeNil())))
})
})
Context("and the UUID is not present", func() {
BeforeEach(func() {
auctioneerConfig.UUID = ""
})
It("exits with an error", func() {
Eventually(auctioneerProcess.Wait()).Should(Receive())
})
})
Context("when neither lock is configured", func() {
BeforeEach(func() {
auctioneerConfig.LocksLocketEnabled = false
auctioneerConfig.SkipConsulLock = true
})
It("exits with an error", func() {
Eventually(auctioneerProcess.Wait()).Should(Receive())
})
})
})
Context("when the auctioneer is configured with TLS options", func() {
var caCertFile, serverCertFile, serverKeyFile string
BeforeEach(func() {
caCertFile = "fixtures/green-certs/ca.crt"
serverCertFile = "fixtures/green-certs/server.crt"
serverKeyFile = "fixtures/green-certs/server.key"
auctioneerConfig.CACertFile = caCertFile
auctioneerConfig.ServerCertFile = serverCertFile
auctioneerConfig.ServerKeyFile = serverKeyFile
})
JustBeforeEach(func() {
auctioneerProcess = ifrit.Background(runner)
})
AfterEach(func() {
ginkgomon.Interrupt(auctioneerProcess)
})
Context("when invalid values for the certificates are supplied", func() {
BeforeEach(func() {
auctioneerConfig.CACertFile = caCertFile
auctioneerConfig.ServerCertFile = "invalid-certs/server.crt"
auctioneerConfig.ServerKeyFile = serverKeyFile
})
It("fails", func() {
Eventually(runner.Buffer()).Should(gbytes.Say(
"invalid-tls-config"))
Eventually(runner.ExitCode()).ShouldNot(Equal(0))
})
})
Context("when invalid combinations of the certificates are supplied", func() {
Context("when the server cert file isn't specified", func() {
BeforeEach(func() {
auctioneerConfig.CACertFile = caCertFile
auctioneerConfig.ServerCertFile = ""
auctioneerConfig.ServerKeyFile = serverKeyFile
})
It("fails", func() {
Eventually(runner.Buffer()).Should(gbytes.Say(
"invalid-tls-config"))
Eventually(runner.ExitCode()).ShouldNot(Equal(0))
})
})
Context("when the server cert file and server key file aren't specified", func() {
BeforeEach(func() {
auctioneerConfig.CACertFile = caCertFile
auctioneerConfig.ServerCertFile = ""
auctioneerConfig.ServerKeyFile = ""
})
It("fails", func() {
Eventually(runner.Buffer()).Should(gbytes.Say(
"invalid-tls-config"))
Eventually(runner.ExitCode()).ShouldNot(Equal(0))
})
})
Context("when the server key file isn't specified", func() {
BeforeEach(func() {
auctioneerConfig.CACertFile = caCertFile
auctioneerConfig.ServerCertFile = serverCertFile
auctioneerConfig.ServerKeyFile = ""
})
It("fails", func() {
Eventually(runner.Buffer()).Should(gbytes.Say(
"invalid-tls-config"))
Eventually(runner.ExitCode()).ShouldNot(Equal(0))
})
})
})
Context("when the server key and the CA cert don't match", func() {
BeforeEach(func() {
auctioneerConfig.CACertFile = caCertFile
auctioneerConfig.ServerCertFile = serverCertFile
auctioneerConfig.ServerKeyFile = "fixtures/blue-certs/server.key"
})
It("fails", func() {
Eventually(runner.Buffer()).Should(gbytes.Say(
"invalid-tls-config"))
Eventually(runner.ExitCode()).ShouldNot(Equal(0))
})
})
Context("when correct TLS options are supplied", func() {
It("starts", func() {
Eventually(auctioneerProcess.Ready()).Should(BeClosed())
Consistently(runner).ShouldNot(Exit())
})
It("responds successfully to a TLS client", func() {
Eventually(auctioneerProcess.Ready()).Should(BeClosed())
secureAuctioneerClient, err := auctioneer.NewSecureClient("https://"+auctioneerLocation, caCertFile, serverCertFile, serverKeyFile, false, defaultAuctioneerClientRequestTimeout)
Expect(err).NotTo(HaveOccurred())
err = secureAuctioneerClient.RequestLRPAuctions(logger, nil)
Expect(err).NotTo(HaveOccurred())
})
})
})
Context("Auctioneer Client", func() {
var client auctioneer.Client
JustBeforeEach(func() {
auctioneerProcess = ginkgomon.Invoke(runner)
})
Context("when the auctioneer is configured with TLS", func() {
BeforeEach(func() {
auctioneerConfig.CACertFile = "fixtures/green-certs/ca.crt"
auctioneerConfig.ServerCertFile = "fixtures/green-certs/server.crt"
auctioneerConfig.ServerKeyFile = "fixtures/green-certs/server.key"
})
Context("and the auctioneer client is not configured with TLS", func() {
BeforeEach(func() {
client = auctioneer.NewClient("http://"+auctioneerLocation, defaultAuctioneerClientRequestTimeout)
})
It("does not work", func() {
err := client.RequestLRPAuctions(logger, []*auctioneer.LRPStartRequest{})
Expect(err).To(HaveOccurred())
err = client.RequestTaskAuctions(logger, []*auctioneer.TaskStartRequest{})
Expect(err).To(HaveOccurred())
})
})
Context("and the auctioneer client is configured with tls", func() {
BeforeEach(func() {
var err error
client, err = auctioneer.NewSecureClient(
"https://"+auctioneerLocation,
"fixtures/green-certs/ca.crt",
"fixtures/green-certs/client.crt",
"fixtures/green-certs/client.key",
true,
defaultAuctioneerClientRequestTimeout,
)
Expect(err).NotTo(HaveOccurred())
})
It("works", func() {
err := client.RequestLRPAuctions(logger, []*auctioneer.LRPStartRequest{})
Expect(err).NotTo(HaveOccurred())
err = client.RequestTaskAuctions(logger, []*auctioneer.TaskStartRequest{})
Expect(err).NotTo(HaveOccurred())
})
})
})
Context("when the auctioneer is not configured with TLS", func() {
Context("and the auctioneer client is not configured with TLS", func() {
BeforeEach(func() {
client = auctioneer.NewClient("http://"+auctioneerLocation, defaultAuctioneerClientRequestTimeout)
})
It("works", func() {
err := client.RequestLRPAuctions(logger, []*auctioneer.LRPStartRequest{})
Expect(err).NotTo(HaveOccurred())
err = client.RequestTaskAuctions(logger, []*auctioneer.TaskStartRequest{})
Expect(err).NotTo(HaveOccurred())
})
})
Context("and the auctioneer client is configured with TLS", func() {
Context("and the client requires tls", func() {
BeforeEach(func() {
var err error
client, err = auctioneer.NewSecureClient(
"https://"+auctioneerLocation,
"fixtures/green-certs/ca.crt",
"fixtures/green-certs/client.crt",
"fixtures/green-certs/client.key",
true,
defaultAuctioneerClientRequestTimeout,
)
Expect(err).NotTo(HaveOccurred())
})
It("does not work", func() {
err := client.RequestLRPAuctions(logger, []*auctioneer.LRPStartRequest{})
Expect(err).To(HaveOccurred())
err = client.RequestTaskAuctions(logger, []*auctioneer.TaskStartRequest{})
Expect(err).To(HaveOccurred())
})
})
Context("and the client does not require tls", func() {
BeforeEach(func() {
var err error
client, err = auctioneer.NewSecureClient(
"https://"+auctioneerLocation,
"fixtures/green-certs/ca.crt",
"fixtures/green-certs/client.crt",
"fixtures/green-certs/client.key",
false,
defaultAuctioneerClientRequestTimeout,
)
Expect(err).NotTo(HaveOccurred())
})
It("falls back to http and does work", func() {
err := client.RequestLRPAuctions(logger, []*auctioneer.LRPStartRequest{})
Expect(err).NotTo(HaveOccurred())
err = client.RequestTaskAuctions(logger, []*auctioneer.TaskStartRequest{})
Expect(err).NotTo(HaveOccurred())
})
})
})
})
})
})
func getTasksByState(client bbs.InternalClient, state models.Task_State) []*models.Task {
tasks, err := client.Tasks(logger)
Expect(err).NotTo(HaveOccurred())
filteredTasks := make([]*models.Task, 0)
for _, task := range tasks {
if task.State == state {
filteredTasks = append(filteredTasks, task)
}
}
return filteredTasks
}
| [
"\"GOPATH\""
] | [] | [
"GOPATH"
] | [] | ["GOPATH"] | go | 1 | 0 | |
release/lightgbm_tests/workloads/train_moderate.py | """Moderate cluster training
This training run will start 32 workers on 32 nodes (including head node).
Test owner: Yard1 (primary), krfricke
Acceptance criteria: Should run through and report final results.
"""
import json
import os
import time
import ray
from lightgbm_ray import RayParams
from ray.util.lightgbm.release_test_util import train_ray
if __name__ == "__main__":
ray.init(address="auto")
ray_params = RayParams(
elastic_training=False,
max_actor_restarts=2,
num_actors=32,
cpus_per_actor=4,
gpus_per_actor=0,
)
start = time.time()
train_ray(
path="/data/classification.parquet",
num_workers=None,
num_boost_rounds=100,
num_files=128,
regression=False,
use_gpu=False,
ray_params=ray_params,
lightgbm_params=None,
)
taken = time.time() - start
result = {
"time_taken": taken,
}
test_output_json = os.environ.get("TEST_OUTPUT_JSON", "/tmp/train_moderate.json")
with open(test_output_json, "wt") as f:
json.dump(result, f)
print("PASSED.")
| [] | [] | [
"TEST_OUTPUT_JSON"
] | [] | ["TEST_OUTPUT_JSON"] | python | 1 | 0 | |
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/wsgi.py | """
WSGI config for {{ cookiecutter.project_name }} project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from pathlib import Path
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# {{ cookiecutter.project_slug }} directory.
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent
sys.path.append(str(ROOT_DIR / "{{ cookiecutter.project_slug }}"))
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "{{ cookiecutter.project_slug }}.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "{{ cookiecutter.project_slug }}.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| [] | [] | [
"DJANGO_SETTINGS_MODULE"
] | [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_dl_gateway.go | // Copyright IBM Corp. 2017, 2021 All Rights Reserved.
// Licensed under the Mozilla Public License v2.0
package ibm
import (
"fmt"
"log"
"os"
"strings"
"time"
"github.com/IBM/networking-go-sdk/directlinkv1"
"github.com/hashicorp/terraform-plugin-sdk/helper/customdiff"
"github.com/hashicorp/terraform-plugin-sdk/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
)
const (
dlLoaRejectReason = "loa_reject_reason"
dlCustomerName = "customer_name"
dlCarrierName = "carrier_name"
dlResourceGroup = "resource_group"
dlBgpAsn = "bgp_asn"
dlBgpBaseCidr = "bgp_base_cidr"
dlBgpCerCidr = "bgp_cer_cidr"
dlBgpIbmCidr = "bgp_ibm_cidr"
dlCrossConnectRouter = "cross_connect_router"
dlGlobal = "global"
dlLocationName = "location_name"
dlName = "name"
dlSpeedMbps = "speed_mbps"
dlOperationalStatus = "operational_status"
dlBgpStatus = "bgp_status"
dlLinkStatus = "link_status"
dlType = "type"
dlCrn = "crn"
dlCreatedAt = "created_at"
dlMetered = "metered"
dlLocationDisplayName = "location_display_name"
dlBgpIbmAsn = "bgp_ibm_asn"
dlCompletionNoticeRejectReason = "completion_notice_reject_reason"
dlPort = "port"
dlProviderAPIManaged = "provider_api_managed"
dlVlan = "vlan"
dlTags = "tags"
dlActive = "active"
dlFallbackCak = "fallback_cak"
dlPrimaryCak = "primary_cak"
dlSakExpiryTime = "sak_expiry_time"
dlWindowSize = "window_size"
dlMacSecConfig = "macsec_config"
dlCipherSuite = "cipher_suite"
dlConfidentialityOffset = "confidentiality_offset"
dlCryptographicAlgorithm = "cryptographic_algorithm"
dlKeyServerPriority = "key_server_priority"
dlMacSecConfigStatus = "status"
dlChangeRequest = "change_request"
dlGatewayProvisioning = "configuring"
dlGatewayProvisioningDone = "provisioned"
dlGatewayProvisioningRejected = "create_rejected"
)
func resourceIBMDLGateway() *schema.Resource {
return &schema.Resource{
Create: resourceIBMdlGatewayCreate,
Read: resourceIBMdlGatewayRead,
Delete: resourceIBMdlGatewayDelete,
Exists: resourceIBMdlGatewayExists,
Update: resourceIBMdlGatewayUpdate,
Importer: &schema.ResourceImporter{},
Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(60 * time.Minute),
Delete: schema.DefaultTimeout(60 * time.Minute),
Update: schema.DefaultTimeout(60 * time.Minute),
},
CustomizeDiff: customdiff.Sequence(
func(diff *schema.ResourceDiff, v interface{}) error {
return resourceTagsCustomizeDiff(diff)
},
),
Schema: map[string]*schema.Schema{
dlBgpAsn: {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Description: "BGP ASN",
},
dlBgpBaseCidr: {
Type: schema.TypeString,
Optional: true,
ForceNew: false,
DiffSuppressFunc: applyOnce,
Description: "BGP base CIDR",
},
dlPort: {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Computed: true,
Description: "Gateway port",
ConflictsWith: []string{"location_name", "cross_connect_router", "carrier_name", "customer_name"},
},
dlCrossConnectRouter: {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Description: "Cross connect router",
},
dlGlobal: {
Type: schema.TypeBool,
Required: true,
ForceNew: false,
Description: "Gateways with global routing (true) can connect to networks outside their associated region",
},
dlLocationName: {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Computed: true,
Description: "Gateway location",
},
dlMetered: {
Type: schema.TypeBool,
Required: true,
ForceNew: false,
Description: "Metered billing option",
},
dlName: {
Type: schema.TypeString,
Required: true,
ForceNew: false,
Description: "The unique user-defined name for this gateway",
ValidateFunc: InvokeValidator("ibm_dl_gateway", dlName),
// ValidateFunc: validateRegexpLen(1, 63, "^([a-zA-Z]|[a-zA-Z][-_a-zA-Z0-9]*[a-zA-Z0-9])$"),
},
dlCarrierName: {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Description: "Carrier name",
// ValidateFunc: validateRegexpLen(1, 128, "^[a-z][A-Z][0-9][ -_]$"),
},
dlCustomerName: {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Description: "Customer name",
// ValidateFunc: validateRegexpLen(1, 128, "^[a-z][A-Z][0-9][ -_]$"),
},
dlSpeedMbps: {
Type: schema.TypeInt,
Required: true,
ForceNew: false,
Description: "Gateway speed in megabits per second",
},
dlType: {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: "Gateway type",
ValidateFunc: InvokeValidator("ibm_dl_gateway", dlType),
// ValidateFunc: validateAllowedStringValue([]string{"dedicated", "connect"}),
},
dlMacSecConfig: {
Type: schema.TypeList,
MinItems: 0,
MaxItems: 1,
Optional: true,
ForceNew: false,
Description: "MACsec configuration information",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
dlActive: {
Type: schema.TypeBool,
Required: true,
ForceNew: false,
Description: "Indicate whether MACsec protection should be active (true) or inactive (false) for this MACsec enabled gateway",
},
dlPrimaryCak: {
Type: schema.TypeString,
Required: true,
ForceNew: false,
Description: "Desired primary connectivity association key. Keys for a MACsec configuration must have names with an even number of characters from [0-9a-fA-F]",
},
dlFallbackCak: {
Type: schema.TypeString,
Optional: true,
ForceNew: false,
Description: "Fallback connectivity association key. Keys used for MACsec configuration must have names with an even number of characters from [0-9a-fA-F]",
},
dlWindowSize: {
Type: schema.TypeInt,
Optional: true,
ForceNew: false,
Default: 148809600,
Description: "Replay protection window size",
},
dlActiveCak: {
Type: schema.TypeString,
Computed: true,
Description: "Active connectivity association key.",
},
dlSakExpiryTime: {
Type: schema.TypeInt,
Computed: true,
Description: "Secure Association Key (SAK) expiry time in seconds",
},
dlCipherSuite: {
Type: schema.TypeString,
Computed: true,
Description: "SAK cipher suite",
},
dlConfidentialityOffset: {
Type: schema.TypeInt,
Computed: true,
Description: "Confidentiality Offset",
},
dlCryptographicAlgorithm: {
Type: schema.TypeString,
Computed: true,
Description: "Cryptographic Algorithm",
},
dlKeyServerPriority: {
Type: schema.TypeInt,
Computed: true,
Description: "Key Server Priority",
},
dlMacSecConfigStatus: {
Type: schema.TypeString,
Computed: true,
Description: "The current status of MACsec on the device for this gateway",
},
dlSecurityPolicy: {
Type: schema.TypeString,
Computed: true,
Description: "Packets without MACsec headers are not dropped when security_policy is should_secure.",
},
},
},
},
dlBgpCerCidr: {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
Description: "BGP customer edge router CIDR",
},
dlLoaRejectReason: {
Type: schema.TypeString,
Computed: true,
Optional: true,
ForceNew: false,
Description: "Loa reject reason",
},
dlBgpIbmCidr: {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
Description: "BGP IBM CIDR",
},
dlResourceGroup: {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
Description: "Gateway resource group",
},
dlOperationalStatus: {
Type: schema.TypeString,
Computed: true,
Description: "Gateway operational status",
},
dlProviderAPIManaged: {
Type: schema.TypeBool,
Computed: true,
Description: "Indicates whether gateway was created through a provider portal",
},
dlVlan: {
Type: schema.TypeInt,
Computed: true,
Description: "VLAN allocated for this gateway",
},
dlBgpIbmAsn: {
Type: schema.TypeInt,
Computed: true,
Description: "IBM BGP ASN",
},
dlBgpStatus: {
Type: schema.TypeString,
Computed: true,
Description: "Gateway BGP status",
},
dlChangeRequest: {
Type: schema.TypeString,
Computed: true,
Description: "Changes pending approval for provider managed Direct Link Connect gateways",
},
dlCompletionNoticeRejectReason: {
Type: schema.TypeString,
Computed: true,
Description: "Reason for completion notice rejection",
},
dlCreatedAt: {
Type: schema.TypeString,
Computed: true,
Description: "The date and time resource was created",
},
dlCrn: {
Type: schema.TypeString,
Computed: true,
Description: "The CRN (Cloud Resource Name) of this gateway",
},
dlLinkStatus: {
Type: schema.TypeString,
Computed: true,
Description: "Gateway link status",
},
dlLocationDisplayName: {
Type: schema.TypeString,
Computed: true,
Description: "Gateway location long name",
},
dlTags: {
Type: schema.TypeSet,
Optional: true,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString, ValidateFunc: InvokeValidator("ibm_dl_gateway", "tag")},
Set: resourceIBMVPCHash,
Description: "Tags for the direct link gateway",
},
ResourceControllerURL: {
Type: schema.TypeString,
Computed: true,
Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance",
},
ResourceName: {
Type: schema.TypeString,
Computed: true,
Description: "The name of the resource",
},
ResourceCRN: {
Type: schema.TypeString,
Computed: true,
Description: "The crn of the resource",
},
ResourceStatus: {
Type: schema.TypeString,
Computed: true,
Description: "The status of the resource",
},
ResourceGroupName: {
Type: schema.TypeString,
Computed: true,
Description: "The resource group name in which resource is provisioned",
},
},
}
}
func resourceIBMDLGatewayValidator() *ResourceValidator {
validateSchema := make([]ValidateSchema, 2)
dlTypeAllowedValues := "dedicated, connect"
validateSchema = append(validateSchema,
ValidateSchema{
Identifier: dlType,
ValidateFunctionIdentifier: ValidateAllowedStringValue,
Type: TypeString,
Required: true,
AllowedValues: dlTypeAllowedValues})
validateSchema = append(validateSchema,
ValidateSchema{
Identifier: dlName,
ValidateFunctionIdentifier: ValidateRegexpLen,
Type: TypeString,
Required: true,
Regexp: `^([a-zA-Z]|[a-zA-Z][-_a-zA-Z0-9]*[a-zA-Z0-9])$`,
MinValueLength: 1,
MaxValueLength: 63})
validateSchema = append(validateSchema,
ValidateSchema{
Identifier: "tag",
ValidateFunctionIdentifier: ValidateRegexpLen,
Type: TypeString,
Optional: true,
Regexp: `^[A-Za-z0-9:_ .-]+$`,
MinValueLength: 1,
MaxValueLength: 128})
ibmISDLGatewayResourceValidator := ResourceValidator{ResourceName: "ibm_dl_gateway", Schema: validateSchema}
return &ibmISDLGatewayResourceValidator
}
func directlinkClient(meta interface{}) (*directlinkv1.DirectLinkV1, error) {
sess, err := meta.(ClientSession).DirectlinkV1API()
return sess, err
}
func resourceIBMdlGatewayCreate(d *schema.ResourceData, meta interface{}) error {
directLink, err := directlinkClient(meta)
if err != nil {
return err
}
dtype := d.Get(dlType).(string)
createGatewayOptionsModel := &directlinkv1.CreateGatewayOptions{}
name := d.Get(dlName).(string)
speed := int64(d.Get(dlSpeedMbps).(int))
global := d.Get(dlGlobal).(bool)
bgpAsn := int64(d.Get(dlBgpAsn).(int))
metered := d.Get(dlMetered).(bool)
if dtype == "dedicated" {
var crossConnectRouter, carrierName, locationName, customerName string
if _, ok := d.GetOk(dlCarrierName); ok {
carrierName = d.Get(dlCarrierName).(string)
// gatewayTemplateModel.CarrierName = &carrierName
} else {
err = fmt.Errorf("Error creating gateway, %s is a required field", dlCarrierName)
log.Printf("%s is a required field", dlCarrierName)
return err
}
if _, ok := d.GetOk(dlCrossConnectRouter); ok {
crossConnectRouter = d.Get(dlCrossConnectRouter).(string)
// gatewayTemplateModel.CrossConnectRouter = &crossConnectRouter
} else {
err = fmt.Errorf("Error creating gateway, %s is a required field", dlCrossConnectRouter)
log.Printf("%s is a required field", dlCrossConnectRouter)
return err
}
if _, ok := d.GetOk(dlLocationName); ok {
locationName = d.Get(dlLocationName).(string)
//gatewayTemplateModel.LocationName = &locationName
} else {
err = fmt.Errorf("Error creating gateway, %s is a required field", dlLocationName)
log.Printf("%s is a required field", dlLocationName)
return err
}
if _, ok := d.GetOk(dlCustomerName); ok {
customerName = d.Get(dlCustomerName).(string)
//gatewayTemplateModel.CustomerName = &customerName
} else {
err = fmt.Errorf("Error creating gateway, %s is a required field", dlCustomerName)
log.Printf("%s is a required field", dlCustomerName)
return err
}
gatewayDedicatedTemplateModel, _ := directLink.NewGatewayTemplateGatewayTypeDedicatedTemplate(bgpAsn, global, metered, name, speed, dtype, carrierName, crossConnectRouter, customerName, locationName)
if _, ok := d.GetOk(dlBgpIbmCidr); ok {
bgpIbmCidr := d.Get(dlBgpIbmCidr).(string)
gatewayDedicatedTemplateModel.BgpIbmCidr = &bgpIbmCidr
}
if _, ok := d.GetOk(dlBgpCerCidr); ok {
bgpCerCidr := d.Get(dlBgpCerCidr).(string)
gatewayDedicatedTemplateModel.BgpCerCidr = &bgpCerCidr
}
if _, ok := d.GetOk(dlResourceGroup); ok {
resourceGroup := d.Get(dlResourceGroup).(string)
gatewayDedicatedTemplateModel.ResourceGroup = &directlinkv1.ResourceGroupIdentity{ID: &resourceGroup}
}
if _, ok := d.GetOk(dlBgpBaseCidr); ok {
bgpBaseCidr := d.Get(dlBgpBaseCidr).(string)
gatewayDedicatedTemplateModel.BgpBaseCidr = &bgpBaseCidr
}
if _, ok := d.GetOk(dlMacSecConfig); ok {
// Construct an instance of the GatewayMacsecConfigTemplate model
gatewayMacsecConfigTemplateModel := new(directlinkv1.GatewayMacsecConfigTemplate)
activebool := d.Get("macsec_config.0.active").(bool)
gatewayMacsecConfigTemplateModel.Active = &activebool
// Construct an instance of the GatewayMacsecCak model
gatewayMacsecCakModel := new(directlinkv1.GatewayMacsecConfigTemplatePrimaryCak)
primaryCakstr := d.Get("macsec_config.0.primary_cak").(string)
gatewayMacsecCakModel.Crn = &primaryCakstr
gatewayMacsecConfigTemplateModel.PrimaryCak = gatewayMacsecCakModel
if fallbackCak, ok := d.GetOk("macsec_config.0.fallback_cak"); ok {
// Construct an instance of the GatewayMacsecCak model
gatewayMacsecCakModel := new(directlinkv1.GatewayMacsecConfigTemplateFallbackCak)
fallbackCakstr := fallbackCak.(string)
gatewayMacsecCakModel.Crn = &fallbackCakstr
gatewayMacsecConfigTemplateModel.FallbackCak = gatewayMacsecCakModel
}
if windowSize, ok := d.GetOk("macsec_config.0.window_size"); ok {
windowSizeint := int64(windowSize.(int))
gatewayMacsecConfigTemplateModel.WindowSize = &windowSizeint
}
gatewayDedicatedTemplateModel.MacsecConfig = gatewayMacsecConfigTemplateModel
}
createGatewayOptionsModel.GatewayTemplate = gatewayDedicatedTemplateModel
} else if dtype == "connect" {
var portID string
if _, ok := d.GetOk(dlPort); ok {
portID = d.Get(dlPort).(string)
}
if portID != "" {
portIdentity, _ := directLink.NewGatewayPortIdentity(portID)
gatewayConnectTemplateModel, _ := directLink.NewGatewayTemplateGatewayTypeConnectTemplate(bgpAsn, global, metered, name, speed, dtype, portIdentity)
if _, ok := d.GetOk(dlBgpIbmCidr); ok {
bgpIbmCidr := d.Get(dlBgpIbmCidr).(string)
gatewayConnectTemplateModel.BgpIbmCidr = &bgpIbmCidr
}
if _, ok := d.GetOk(dlBgpBaseCidr); ok {
bgpBaseCidr := d.Get(dlBgpBaseCidr).(string)
gatewayConnectTemplateModel.BgpBaseCidr = &bgpBaseCidr
}
if _, ok := d.GetOk(dlBgpCerCidr); ok {
bgpCerCidr := d.Get(dlBgpCerCidr).(string)
gatewayConnectTemplateModel.BgpCerCidr = &bgpCerCidr
}
if _, ok := d.GetOk(dlResourceGroup); ok {
resourceGroup := d.Get(dlResourceGroup).(string)
gatewayConnectTemplateModel.ResourceGroup = &directlinkv1.ResourceGroupIdentity{ID: &resourceGroup}
}
createGatewayOptionsModel.GatewayTemplate = gatewayConnectTemplateModel
} else {
err = fmt.Errorf("Error creating direct link connect gateway, %s is a required field", dlPort)
return err
}
}
gateway, response, err := directLink.CreateGateway(createGatewayOptionsModel)
if err != nil {
return fmt.Errorf("[DEBUG] Create Direct Link Gateway (%s) err %s\n%s", dtype, err, response)
}
d.SetId(*gateway.ID)
log.Printf("[INFO] Created Direct Link Gateway (%s Template) : %s", dtype, *gateway.ID)
if dtype == "connect" {
getPortOptions := directLink.NewGetPortOptions(*gateway.Port.ID)
port, response, err := directLink.GetPort(getPortOptions)
if err != nil {
return fmt.Errorf("[ERROR] Error getting port %s %s", response, err)
}
if port != nil && port.ProviderName != nil && !strings.Contains(strings.ToLower(*port.ProviderName), "netbond") && !strings.Contains(strings.ToLower(*port.ProviderName), "megaport") {
_, err = isWaitForDirectLinkAvailable(directLink, d.Id(), d.Timeout(schema.TimeoutCreate))
if err != nil {
return err
}
}
}
v := os.Getenv("IC_ENV_TAGS")
if _, ok := d.GetOk(dlTags); ok || v != "" {
oldList, newList := d.GetChange(dlTags)
err = UpdateTagsUsingCRN(oldList, newList, meta, *gateway.Crn)
if err != nil {
log.Printf(
"Error on create of resource direct link gateway %s (%s) tags: %s", dtype, d.Id(), err)
}
}
return resourceIBMdlGatewayRead(d, meta)
}
func resourceIBMdlGatewayRead(d *schema.ResourceData, meta interface{}) error {
dtype := d.Get(dlType).(string)
log.Printf("[INFO] Inside resourceIBMdlGatewayRead: %s", dtype)
directLink, err := directlinkClient(meta)
if err != nil {
return err
}
ID := d.Id()
getOptions := &directlinkv1.GetGatewayOptions{
ID: &ID,
}
log.Printf("[INFO] Calling getgateway api: %s", dtype)
instance, response, err := directLink.GetGateway(getOptions)
if err != nil {
if response != nil && response.StatusCode == 404 {
d.SetId("")
return nil
}
return fmt.Errorf("Error Getting Direct Link Gateway (%s Template): %s\n%s", dtype, err, response)
}
if instance.Name != nil {
d.Set(dlName, *instance.Name)
}
if instance.Crn != nil {
d.Set(dlCrn, *instance.Crn)
}
if instance.BgpAsn != nil {
d.Set(dlBgpAsn, *instance.BgpAsn)
}
if instance.BgpIbmCidr != nil {
d.Set(dlBgpIbmCidr, *instance.BgpIbmCidr)
}
if instance.BgpIbmAsn != nil {
d.Set(dlBgpIbmAsn, *instance.BgpIbmAsn)
}
if instance.Metered != nil {
d.Set(dlMetered, *instance.Metered)
}
if instance.CrossConnectRouter != nil {
d.Set(dlCrossConnectRouter, *instance.CrossConnectRouter)
}
if instance.BgpBaseCidr != nil {
d.Set(dlBgpBaseCidr, *instance.BgpBaseCidr)
}
if instance.BgpCerCidr != nil {
d.Set(dlBgpCerCidr, *instance.BgpCerCidr)
}
if instance.ProviderApiManaged != nil {
d.Set(dlProviderAPIManaged, *instance.ProviderApiManaged)
}
if instance.Type != nil {
d.Set(dlType, *instance.Type)
}
if instance.SpeedMbps != nil {
d.Set(dlSpeedMbps, *instance.SpeedMbps)
}
if instance.OperationalStatus != nil {
d.Set(dlOperationalStatus, *instance.OperationalStatus)
}
if instance.BgpStatus != nil {
d.Set(dlBgpStatus, *instance.BgpStatus)
}
if instance.CompletionNoticeRejectReason != nil {
d.Set(dlCompletionNoticeRejectReason, *instance.CompletionNoticeRejectReason)
}
if instance.LocationName != nil {
d.Set(dlLocationName, *instance.LocationName)
}
if instance.LocationDisplayName != nil {
d.Set(dlLocationDisplayName, *instance.LocationDisplayName)
}
if instance.Vlan != nil {
d.Set(dlVlan, *instance.Vlan)
}
if instance.Global != nil {
d.Set(dlGlobal, *instance.Global)
}
if instance.Port != nil {
d.Set(dlPort, *instance.Port.ID)
}
if instance.LinkStatus != nil {
d.Set(dlLinkStatus, *instance.LinkStatus)
}
if instance.CreatedAt != nil {
d.Set(dlCreatedAt, instance.CreatedAt.String())
}
if dtype == "dedicated" {
if instance.MacsecConfig != nil {
macsecList := make([]map[string]interface{}, 0)
currentMacSec := map[string]interface{}{}
// Construct an instance of the GatewayMacsecConfigTemplate model
gatewayMacsecConfigTemplateModel := instance.MacsecConfig
if gatewayMacsecConfigTemplateModel.Active != nil {
currentMacSec[dlActive] = *gatewayMacsecConfigTemplateModel.Active
}
if gatewayMacsecConfigTemplateModel.ActiveCak != nil {
if gatewayMacsecConfigTemplateModel.ActiveCak.Crn != nil {
currentMacSec[dlActiveCak] = *gatewayMacsecConfigTemplateModel.ActiveCak.Crn
}
}
if gatewayMacsecConfigTemplateModel.PrimaryCak != nil {
currentMacSec[dlPrimaryCak] = *gatewayMacsecConfigTemplateModel.PrimaryCak.Crn
}
if gatewayMacsecConfigTemplateModel.FallbackCak != nil {
if gatewayMacsecConfigTemplateModel.FallbackCak.Crn != nil {
currentMacSec[dlFallbackCak] = *gatewayMacsecConfigTemplateModel.FallbackCak.Crn
}
}
if gatewayMacsecConfigTemplateModel.SakExpiryTime != nil {
currentMacSec[dlSakExpiryTime] = *gatewayMacsecConfigTemplateModel.SakExpiryTime
}
if gatewayMacsecConfigTemplateModel.SecurityPolicy != nil {
currentMacSec[dlSecurityPolicy] = *gatewayMacsecConfigTemplateModel.SecurityPolicy
}
if gatewayMacsecConfigTemplateModel.WindowSize != nil {
currentMacSec[dlWindowSize] = *gatewayMacsecConfigTemplateModel.WindowSize
}
if gatewayMacsecConfigTemplateModel.CipherSuite != nil {
currentMacSec[dlCipherSuite] = *gatewayMacsecConfigTemplateModel.CipherSuite
}
if gatewayMacsecConfigTemplateModel.ConfidentialityOffset != nil {
currentMacSec[dlConfidentialityOffset] = *gatewayMacsecConfigTemplateModel.ConfidentialityOffset
}
if gatewayMacsecConfigTemplateModel.CryptographicAlgorithm != nil {
currentMacSec[dlCryptographicAlgorithm] = *gatewayMacsecConfigTemplateModel.CryptographicAlgorithm
}
if gatewayMacsecConfigTemplateModel.KeyServerPriority != nil {
currentMacSec[dlKeyServerPriority] = *gatewayMacsecConfigTemplateModel.KeyServerPriority
}
if gatewayMacsecConfigTemplateModel.Status != nil {
currentMacSec[dlMacSecConfigStatus] = *gatewayMacsecConfigTemplateModel.Status
}
macsecList = append(macsecList, currentMacSec)
d.Set(dlMacSecConfig, macsecList)
}
}
if instance.ChangeRequest != nil {
gatewayChangeRequestIntf := instance.ChangeRequest
gatewayChangeRequest := gatewayChangeRequestIntf.(*directlinkv1.GatewayChangeRequest)
d.Set(dlChangeRequest, *gatewayChangeRequest.Type)
}
tags, err := GetTagsUsingCRN(meta, *instance.Crn)
if err != nil {
log.Printf(
"Error on get of resource direct link gateway (%s) tags: %s", d.Id(), err)
}
d.Set(dlTags, tags)
controller, err := getBaseController(meta)
if err != nil {
return err
}
d.Set(ResourceControllerURL, controller+"/interconnectivity/direct-link")
d.Set(ResourceName, *instance.Name)
d.Set(ResourceCRN, *instance.Crn)
d.Set(ResourceStatus, *instance.OperationalStatus)
if instance.ResourceGroup != nil {
rg := instance.ResourceGroup
d.Set(dlResourceGroup, *rg.ID)
d.Set(ResourceGroupName, *rg.ID)
}
return nil
}
func isWaitForDirectLinkAvailable(client *directlinkv1.DirectLinkV1, id string, timeout time.Duration) (interface{}, error) {
log.Printf("Waiting for direct link (%s) to be provisioned.", id)
stateConf := &resource.StateChangeConf{
Pending: []string{"retry", dlGatewayProvisioning},
Target: []string{dlGatewayProvisioningDone, ""},
Refresh: isDirectLinkRefreshFunc(client, id),
Timeout: timeout,
Delay: 10 * time.Second,
MinTimeout: 10 * time.Second,
}
return stateConf.WaitForState()
}
func isDirectLinkRefreshFunc(client *directlinkv1.DirectLinkV1, id string) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
getOptions := &directlinkv1.GetGatewayOptions{
ID: &id,
}
instance, response, err := client.GetGateway(getOptions)
if err != nil {
return nil, "", fmt.Errorf("Error Getting Direct Link: %s\n%s", err, response)
}
if *instance.OperationalStatus == "provisioned" || *instance.OperationalStatus == "failed" || *instance.OperationalStatus == "create_rejected" {
return instance, dlGatewayProvisioningDone, nil
}
return instance, dlGatewayProvisioning, nil
}
}
func resourceIBMdlGatewayUpdate(d *schema.ResourceData, meta interface{}) error {
directLink, err := directlinkClient(meta)
if err != nil {
return err
}
ID := d.Id()
getOptions := &directlinkv1.GetGatewayOptions{
ID: &ID,
}
instance, detail, err := directLink.GetGateway(getOptions)
if err != nil {
log.Printf("Error fetching Direct Link Gateway :%s", detail)
return err
}
updateGatewayOptionsModel := &directlinkv1.UpdateGatewayOptions{}
updateGatewayOptionsModel.ID = &ID
dtype := *instance.Type
if d.HasChange(dlTags) {
oldList, newList := d.GetChange(dlTags)
err = UpdateTagsUsingCRN(oldList, newList, meta, *instance.Crn)
if err != nil {
log.Printf(
"Error on update of resource direct link gateway (%s) tags: %s", *instance.ID, err)
}
}
if d.HasChange(dlName) {
name := d.Get(dlName).(string)
updateGatewayOptionsModel.Name = &name
}
if d.HasChange(dlSpeedMbps) {
speed := int64(d.Get(dlSpeedMbps).(int))
updateGatewayOptionsModel.SpeedMbps = &speed
}
/*
NOTE: Operational Status cannot be maintained in terraform. The status keeps changing automatically in server side.
Hence, cannot be maintained in terraform.
Operational Status and LoaRejectReason are linked.
Hence, a user cannot update through terraform.
if d.HasChange(dlOperationalStatus) {
if _, ok := d.GetOk(dlOperationalStatus); ok {
operStatus := d.Get(dlOperationalStatus).(string)
updateGatewayOptionsModel.OperationalStatus = &operStatus
}
if _, ok := d.GetOk(dlLoaRejectReason); ok {
loaRejectReason := d.Get(dlLoaRejectReason).(string)
updateGatewayOptionsModel.LoaRejectReason = &loaRejectReason
}
}
*/
if d.HasChange(dlGlobal) {
global := d.Get(dlGlobal).(bool)
updateGatewayOptionsModel.Global = &global
}
if d.HasChange(dlMetered) {
metered := d.Get(dlMetered).(bool)
updateGatewayOptionsModel.Metered = &metered
}
if dtype == "dedicated" {
if d.HasChange(dlMacSecConfig) && !d.IsNewResource() {
// Construct an instance of the GatewayMacsecConfigTemplate model
gatewayMacsecConfigTemplatePatchModel := new(directlinkv1.GatewayMacsecConfigPatchTemplate)
if d.HasChange("macsec_config.0.active") {
activebool := d.Get("macsec_config.0.active").(bool)
gatewayMacsecConfigTemplatePatchModel.Active = &activebool
}
if d.HasChange("macsec_config.0.primary_cak") {
// Construct an instance of the GatewayMacsecCak model
gatewayMacsecCakModel := new(directlinkv1.GatewayMacsecConfigPatchTemplatePrimaryCak)
primaryCakstr := d.Get("macsec_config.0.primary_cak").(string)
gatewayMacsecCakModel.Crn = &primaryCakstr
gatewayMacsecConfigTemplatePatchModel.PrimaryCak = gatewayMacsecCakModel
}
if d.HasChange("macsec_config.0.fallback_cak") {
// Construct an instance of the GatewayMacsecCak model
gatewayMacsecCakModel := new(directlinkv1.GatewayMacsecConfigPatchTemplateFallbackCak)
if _, ok := d.GetOk("macsec_config.0.fallback_cak"); ok {
fallbackCakstr := d.Get("macsec_config.0.fallback_cak").(string)
gatewayMacsecCakModel.Crn = &fallbackCakstr
gatewayMacsecConfigTemplatePatchModel.FallbackCak = gatewayMacsecCakModel
} else {
fallbackCakstr := ""
gatewayMacsecCakModel.Crn = &fallbackCakstr
}
gatewayMacsecConfigTemplatePatchModel.FallbackCak = gatewayMacsecCakModel
}
if d.HasChange("macsec_config.0.window_size") {
if _, ok := d.GetOk("macsec_config.0.window_size"); ok {
windowSizeint := int64(d.Get("macsec_config.0.window_size").(int))
gatewayMacsecConfigTemplatePatchModel.WindowSize = &windowSizeint
}
}
updateGatewayOptionsModel.MacsecConfig = gatewayMacsecConfigTemplatePatchModel
} else {
updateGatewayOptionsModel.MacsecConfig = nil
}
}
_, response, err := directLink.UpdateGateway(updateGatewayOptionsModel)
if err != nil {
log.Printf("[DEBUG] Update Direct Link Gateway err %s\n%s", err, response)
return err
}
return resourceIBMdlGatewayRead(d, meta)
}
func resourceIBMdlGatewayDelete(d *schema.ResourceData, meta interface{}) error {
directLink, err := directlinkClient(meta)
if err != nil {
return err
}
ID := d.Id()
delOptions := &directlinkv1.DeleteGatewayOptions{
ID: &ID,
}
response, err := directLink.DeleteGateway(delOptions)
if err != nil && response.StatusCode != 404 {
log.Printf("Error deleting Direct Link Gateway : %s", response)
return err
}
d.SetId("")
return nil
}
func resourceIBMdlGatewayExists(d *schema.ResourceData, meta interface{}) (bool, error) {
directLink, err := directlinkClient(meta)
if err != nil {
return false, err
}
ID := d.Id()
getOptions := &directlinkv1.GetGatewayOptions{
ID: &ID,
}
_, response, err := directLink.GetGateway(getOptions)
if err != nil {
if response != nil && response.StatusCode == 404 {
d.SetId("")
return false, nil
}
return false, fmt.Errorf("Error Getting Direct Link Gateway : %s\n%s", err, response)
}
return true, nil
}
| [
"\"IC_ENV_TAGS\""
] | [] | [
"IC_ENV_TAGS"
] | [] | ["IC_ENV_TAGS"] | go | 1 | 0 | |
storm-server/src/main/java/org/apache/storm/daemon/supervisor/BasicContainer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.storm.daemon.supervisor;
import static org.apache.storm.utils.Utils.OR;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.NavigableMap;
import java.util.stream.Collectors;
import org.apache.commons.lang.StringUtils;
import org.apache.storm.Config;
import org.apache.storm.DaemonConfig;
import org.apache.storm.container.ResourceIsolationInterface;
import org.apache.storm.generated.LocalAssignment;
import org.apache.storm.generated.ProfileAction;
import org.apache.storm.generated.ProfileRequest;
import org.apache.storm.generated.StormTopology;
import org.apache.storm.generated.WorkerResources;
import org.apache.storm.utils.ConfigUtils;
import org.apache.storm.utils.LocalState;
import org.apache.storm.utils.ObjectReader;
import org.apache.storm.utils.ServerConfigUtils;
import org.apache.storm.utils.ServerUtils;
import org.apache.storm.utils.SimpleVersion;
import org.apache.storm.utils.Utils;
import org.apache.storm.utils.VersionInfo;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Joiner;
import com.google.common.collect.Lists;
/**
* A container that runs processes on the local box.
*/
public class BasicContainer extends Container {
private static final Logger LOG = LoggerFactory.getLogger(BasicContainer.class);
private static final FilenameFilter jarFilter = (dir, name) -> name.endsWith(".jar");
private static final Joiner CPJ =
Joiner.on(ServerUtils.CLASS_PATH_SEPARATOR).skipNulls();
protected final LocalState _localState;
protected final String _profileCmd;
protected final String _stormHome = System.getProperty("storm.home");
protected volatile boolean _exitedEarly = false;
private class ProcessExitCallback implements ExitCodeCallback {
private final String _logPrefix;
public ProcessExitCallback(String logPrefix) {
_logPrefix = logPrefix;
}
@Override
public void call(int exitCode) {
LOG.info("{} exited with code: {}", _logPrefix, exitCode);
_exitedEarly = true;
}
}
/**
* Create a new BasicContainer
* @param type the type of container being made.
* @param conf the supervisor config
* @param supervisorId the ID of the supervisor this is a part of.
* @param port the port the container is on. Should be <= 0 if only a partial recovery
* @param assignment the assignment for this container. Should be null if only a partial recovery.
* @param resourceIsolationManager used to isolate resources for a container can be null if no isolation is used.
* @param localState the local state of the supervisor. May be null if partial recovery
* @param workerId the id of the worker to use. Must not be null if doing a partial recovery.
*/
public BasicContainer(ContainerType type, Map<String, Object> conf, String supervisorId, int port,
LocalAssignment assignment, ResourceIsolationInterface resourceIsolationManager,
LocalState localState, String workerId) throws IOException {
this(type, conf, supervisorId, port, assignment, resourceIsolationManager, localState, workerId, null, null, null);
}
/**
* Create a new BasicContainer
* @param type the type of container being made.
* @param conf the supervisor config
* @param supervisorId the ID of the supervisor this is a part of.
* @param port the port the container is on. Should be <= 0 if only a partial recovery
* @param assignment the assignment for this container. Should be null if only a partial recovery.
* @param resourceIsolationManager used to isolate resources for a container can be null if no isolation is used.
* @param localState the local state of the supervisor. May be null if partial recovery
* @param workerId the id of the worker to use. Must not be null if doing a partial recovery.
* @param ops file system operations (mostly for testing) if null a new one is made
* @param topoConf the config of the topology (mostly for testing) if null
* and not a partial recovery the real conf is read.
* @param profileCmd the command to use when profiling (used for testing)
* @throws IOException on any error
* @throws ContainerRecoveryException if the Container could not be recovered.
*/
BasicContainer(ContainerType type, Map<String, Object> conf, String supervisorId, int port,
LocalAssignment assignment, ResourceIsolationInterface resourceIsolationManager,
LocalState localState, String workerId, Map<String, Object> topoConf,
AdvancedFSOps ops, String profileCmd) throws IOException {
super(type, conf, supervisorId, port, assignment, resourceIsolationManager, workerId, topoConf, ops);
assert(localState != null);
_localState = localState;
if (type.isRecovery() && !type.isOnlyKillable()) {
synchronized (localState) {
String wid = null;
Map<String, Integer> workerToPort = localState.getApprovedWorkers();
for (Map.Entry<String, Integer> entry : workerToPort.entrySet()) {
if (port == entry.getValue().intValue()) {
wid = entry.getKey();
}
}
if (wid == null) {
throw new ContainerRecoveryException("Could not find worker id for " + port + " " + assignment);
}
LOG.info("Recovered Worker {}", wid);
_workerId = wid;
}
} else if (_workerId == null){
createNewWorkerId();
}
if (profileCmd == null) {
profileCmd = _stormHome + Utils.FILE_PATH_SEPARATOR + "bin" + Utils.FILE_PATH_SEPARATOR
+ conf.get(DaemonConfig.WORKER_PROFILER_COMMAND);
}
_profileCmd = profileCmd;
}
/**
* Create a new worker ID for this process and store in in this object and
* in the local state. Never call this if a worker is currently up and running.
* We will lose track of the process.
*/
protected void createNewWorkerId() {
_type.assertFull();
assert(_workerId == null);
synchronized (_localState) {
_workerId = Utils.uuid();
Map<String, Integer> workerToPort = _localState.getApprovedWorkers();
if (workerToPort == null) {
workerToPort = new HashMap<>(1);
}
removeWorkersOn(workerToPort, _port);
workerToPort.put(_workerId, _port);
_localState.setApprovedWorkers(workerToPort);
LOG.info("Created Worker ID {}", _workerId);
}
}
private static void removeWorkersOn(Map<String, Integer> workerToPort, int _port) {
for (Iterator<Entry<String, Integer>> i = workerToPort.entrySet().iterator(); i.hasNext();) {
Entry<String, Integer> found = i.next();
if (_port == found.getValue().intValue()) {
LOG.warn("Deleting worker {} from state", found.getKey());
i.remove();
}
}
}
@Override
public void cleanUpForRestart() throws IOException {
String origWorkerId = _workerId;
super.cleanUpForRestart();
synchronized (_localState) {
Map<String, Integer> workersToPort = _localState.getApprovedWorkers();
workersToPort.remove(origWorkerId);
removeWorkersOn(workersToPort, _port);
_localState.setApprovedWorkers(workersToPort);
LOG.info("Removed Worker ID {}", origWorkerId);
}
}
@Override
public void relaunch() throws IOException {
_type.assertFull();
//We are launching it now...
_type = ContainerType.LAUNCH;
createNewWorkerId();
setup();
launch();
}
@Override
public boolean didMainProcessExit() {
return _exitedEarly;
}
/**
* Run the given command for profiling
*
* @param command
* the command to run
* @param env
* the environment to run the command
* @param logPrefix
* the prefix to include in the logs
* @param targetDir
* the working directory to run the command in
* @return true if it ran successfully, else false
* @throws IOException
* on any error
* @throws InterruptedException
* if interrupted wile waiting for the process to exit.
*/
protected boolean runProfilingCommand(List<String> command, Map<String, String> env, String logPrefix,
File targetDir) throws IOException, InterruptedException {
_type.assertFull();
Process p = ClientSupervisorUtils.launchProcess(command, env, logPrefix, null, targetDir);
int ret = p.waitFor();
return ret == 0;
}
@Override
public boolean runProfiling(ProfileRequest request, boolean stop) throws IOException, InterruptedException {
_type.assertFull();
String targetDir = ConfigUtils.workerArtifactsRoot(_conf, _topologyId, _port);
@SuppressWarnings("unchecked")
Map<String, String> env = (Map<String, String>) _topoConf.get(Config.TOPOLOGY_ENVIRONMENT);
if (env == null) {
env = new HashMap<String, String>();
}
String str = ConfigUtils.workerArtifactsPidPath(_conf, _topologyId, _port);
String workerPid = _ops.slurpString(new File(str)).trim();
ProfileAction profileAction = request.get_action();
String logPrefix = "ProfilerAction process " + _topologyId + ":" + _port + " PROFILER_ACTION: " + profileAction
+ " ";
List<String> command = mkProfileCommand(profileAction, stop, workerPid, targetDir);
File targetFile = new File(targetDir);
if (command.size() > 0) {
return runProfilingCommand(command, env, logPrefix, targetFile);
}
LOG.warn("PROFILING REQUEST NOT SUPPORTED {} IGNORED...", request);
return true;
}
/**
* Get the command to run when doing profiling
* @param action the profiling action to perform
* @param stop if this is meant to stop the profiling or start it
* @param workerPid the PID of the process to profile
* @param targetDir the current working directory of the worker process
* @return the command to run for profiling.
*/
private List<String> mkProfileCommand(ProfileAction action, boolean stop, String workerPid, String targetDir) {
switch(action) {
case JMAP_DUMP:
return jmapDumpCmd(workerPid, targetDir);
case JSTACK_DUMP:
return jstackDumpCmd(workerPid, targetDir);
case JPROFILE_DUMP:
return jprofileDump(workerPid, targetDir);
case JVM_RESTART:
return jprofileJvmRestart(workerPid);
case JPROFILE_STOP:
if (stop) {
return jprofileStop(workerPid, targetDir);
}
return jprofileStart(workerPid);
default:
return Lists.newArrayList();
}
}
private List<String> jmapDumpCmd(String pid, String targetDir) {
return Lists.newArrayList(_profileCmd, pid, "jmap", targetDir);
}
private List<String> jstackDumpCmd(String pid, String targetDir) {
return Lists.newArrayList(_profileCmd, pid, "jstack", targetDir);
}
private List<String> jprofileStart(String pid) {
return Lists.newArrayList(_profileCmd, pid, "start");
}
private List<String> jprofileStop(String pid, String targetDir) {
return Lists.newArrayList(_profileCmd, pid, "stop", targetDir);
}
private List<String> jprofileDump(String pid, String targetDir) {
return Lists.newArrayList(_profileCmd, pid, "dump", targetDir);
}
private List<String> jprofileJvmRestart(String pid) {
return Lists.newArrayList(_profileCmd, pid, "kill");
}
/**
* Compute the java.library.path that should be used for the worker.
* This helps it to load JNI libraries that are packaged in the uber jar.
* @param stormRoot the root directory of the worker process
* @param conf the config for the supervisor.
* @return the java.library.path/LD_LIBRARY_PATH to use so native libraries load correctly.
*/
protected String javaLibraryPath(String stormRoot, Map<String, Object> conf) {
String resourceRoot = stormRoot + Utils.FILE_PATH_SEPARATOR + ServerConfigUtils.RESOURCES_SUBDIR;
String os = System.getProperty("os.name").replaceAll("\\s+", "_");
String arch = System.getProperty("os.arch");
String archResourceRoot = resourceRoot + Utils.FILE_PATH_SEPARATOR + os + "-" + arch;
String ret = CPJ.join(archResourceRoot, resourceRoot,
conf.get(DaemonConfig.JAVA_LIBRARY_PATH));
return ret;
}
/**
* Returns a path with a wildcard as the final element, so that the JVM will expand
* that to all JARs in the directory.
* @param dir the directory to which a wildcard will be appended
* @return the path with wildcard ("*") suffix
*/
protected String getWildcardDir(File dir) {
return Paths.get(dir.toString(), "*").toString();
}
protected List<String> frameworkClasspath(SimpleVersion topoVersion) {
File stormWorkerLibDir = new File(_stormHome, "lib-worker");
String topoConfDir =
System.getenv("STORM_CONF_DIR") != null ?
System.getenv("STORM_CONF_DIR") :
new File(_stormHome, "conf").getAbsolutePath();
File stormExtlibDir = new File(_stormHome, "extlib");
String extcp = System.getenv("STORM_EXT_CLASSPATH");
List<String> pathElements = new LinkedList<>();
pathElements.add(getWildcardDir(stormWorkerLibDir));
pathElements.add(getWildcardDir(stormExtlibDir));
pathElements.add(extcp);
pathElements.add(topoConfDir);
NavigableMap<SimpleVersion, List<String>> classpaths = Utils.getConfiguredClasspathVersions(_conf, pathElements);
return Utils.getCompatibleVersion(classpaths, topoVersion, "classpath", pathElements);
}
protected String getWorkerMain(SimpleVersion topoVersion) {
String defaultWorkerGuess = "org.apache.storm.daemon.worker.Worker";
if (topoVersion.getMajor() == 0) {
//Prior to the org.apache change
defaultWorkerGuess = "backtype.storm.daemon.worker";
} else if (topoVersion.getMajor() == 1) {
//Have not moved to a java worker yet
defaultWorkerGuess = "org.apache.storm.daemon.worker";
}
NavigableMap<SimpleVersion,String> mains = Utils.getConfiguredWorkerMainVersions(_conf);
return Utils.getCompatibleVersion(mains, topoVersion, "worker main class", defaultWorkerGuess);
}
protected String getWorkerLogWriter(SimpleVersion topoVersion) {
String defaultGuess = "org.apache.storm.LogWriter";
if (topoVersion.getMajor() == 0) {
//Prior to the org.apache change
defaultGuess = "backtype.storm.LogWriter";
}
NavigableMap<SimpleVersion,String> mains = Utils.getConfiguredWorkerLogWriterVersions(_conf);
return Utils.getCompatibleVersion(mains, topoVersion, "worker log writer class", defaultGuess);
}
@SuppressWarnings("unchecked")
private List<String> asStringList(Object o) {
if (o instanceof String) {
return Arrays.asList((String)o);
} else if (o instanceof List) {
return (List<String>)o;
}
return Collections.EMPTY_LIST;
}
/**
* Compute the classpath for the worker process
* @param stormJar the topology jar
* @param dependencyLocations any dependencies from the topology
* @param stormVerison the version of the storm framework to use
* @return the full classpath
*/
protected String getWorkerClassPath(String stormJar, List<String> dependencyLocations, SimpleVersion topoVersion) {
List<String> workercp = new ArrayList<>();
workercp.addAll(asStringList(_topoConf.get(Config.TOPOLOGY_CLASSPATH_BEGINNING)));
workercp.addAll(frameworkClasspath(topoVersion));
workercp.add(stormJar);
workercp.addAll(dependencyLocations);
workercp.addAll(asStringList(_topoConf.get(Config.TOPOLOGY_CLASSPATH)));
return CPJ.join(workercp);
}
private String substituteChildOptsInternal(String string, int memOnheap) {
if (StringUtils.isNotBlank(string)) {
String p = String.valueOf(_port);
string = string.replace("%ID%", p);
string = string.replace("%WORKER-ID%", _workerId);
string = string.replace("%TOPOLOGY-ID%", _topologyId);
string = string.replace("%WORKER-PORT%", p);
if (memOnheap > 0) {
string = string.replace("%HEAP-MEM%", String.valueOf(memOnheap));
}
}
return string;
}
protected List<String> substituteChildopts(Object value) {
return substituteChildopts(value, -1);
}
protected List<String> substituteChildopts(Object value, int memOnheap) {
List<String> rets = new ArrayList<>();
if (value instanceof String) {
String string = substituteChildOptsInternal((String) value, memOnheap);
if (StringUtils.isNotBlank(string)) {
String[] strings = string.split("\\s+");
for (String s: strings) {
if (StringUtils.isNotBlank(s)) {
rets.add(s);
}
}
}
} else if (value instanceof List) {
@SuppressWarnings("unchecked")
List<String> objects = (List<String>) value;
for (String object : objects) {
String str = substituteChildOptsInternal(object, memOnheap);
if (StringUtils.isNotBlank(str)) {
rets.add(str);
}
}
}
return rets;
}
/**
* Launch the worker process (non-blocking)
*
* @param command
* the command to run
* @param env
* the environment to run the command
* @param processExitcallback
* a callback for when the process exits
* @param logPrefix
* the prefix to include in the logs
* @param targetDir
* the working directory to run the command in
* @return true if it ran successfully, else false
* @throws IOException
* on any error
*/
protected void launchWorkerProcess(List<String> command, Map<String, String> env, String logPrefix,
ExitCodeCallback processExitCallback, File targetDir) throws IOException {
if (_resourceIsolationManager != null) {
command = _resourceIsolationManager.getLaunchCommand(_workerId, command);
}
ClientSupervisorUtils.launchProcess(command, env, logPrefix, processExitCallback, targetDir);
}
private String getWorkerLoggingConfigFile() {
String log4jConfigurationDir = (String) (_conf.get(DaemonConfig.STORM_LOG4J2_CONF_DIR));
if (StringUtils.isNotBlank(log4jConfigurationDir)) {
if (!ServerUtils.isAbsolutePath(log4jConfigurationDir)) {
log4jConfigurationDir = _stormHome + Utils.FILE_PATH_SEPARATOR + log4jConfigurationDir;
}
} else {
log4jConfigurationDir = _stormHome + Utils.FILE_PATH_SEPARATOR + "log4j2";
}
if (ServerUtils.IS_ON_WINDOWS && !log4jConfigurationDir.startsWith("file:")) {
log4jConfigurationDir = "file:///" + log4jConfigurationDir;
}
return log4jConfigurationDir + Utils.FILE_PATH_SEPARATOR + "worker.xml";
}
private static class TopologyMetaData {
private boolean _dataCached = false;
private List<String> _depLocs = null;
private String _stormVersion = null;
private final Map<String, Object> _conf;
private final String _topologyId;
private final AdvancedFSOps _ops;
private final String _stormRoot;
public TopologyMetaData(final Map<String, Object> conf, final String topologyId, final AdvancedFSOps ops, final String stormRoot) {
_conf = conf;
_topologyId = topologyId;
_ops = ops;
_stormRoot = stormRoot;
}
public String toString() {
List<String> data;
String stormVersion;
synchronized(this) {
data = _depLocs;
stormVersion = _stormVersion;
}
return "META for " + _topologyId +" DEP_LOCS => " + data + " STORM_VERSION => " + stormVersion;
}
private synchronized void readData() throws IOException {
final StormTopology stormTopology = ConfigUtils.readSupervisorTopology(_conf, _topologyId, _ops);
final List<String> dependencyLocations = new ArrayList<>();
if (stormTopology.get_dependency_jars() != null) {
for (String dependency : stormTopology.get_dependency_jars()) {
dependencyLocations.add(new File(_stormRoot, dependency).getAbsolutePath());
}
}
if (stormTopology.get_dependency_artifacts() != null) {
for (String dependency : stormTopology.get_dependency_artifacts()) {
dependencyLocations.add(new File(_stormRoot, dependency).getAbsolutePath());
}
}
_depLocs = dependencyLocations;
_stormVersion = stormTopology.get_storm_version();
_dataCached = true;
}
public synchronized List<String> getDepLocs() throws IOException {
if (!_dataCached) {
readData();
}
return _depLocs;
}
public synchronized String getStormVersion() throws IOException {
if (!_dataCached) {
readData();
}
return _stormVersion;
}
}
static class TopoMetaLRUCache {
public final int _maxSize = 100; //We could make this configurable in the future...
@SuppressWarnings("serial")
private LinkedHashMap<String, TopologyMetaData> _cache = new LinkedHashMap<String, TopologyMetaData>() {
@Override
protected boolean removeEldestEntry(Map.Entry<String,TopologyMetaData> eldest) {
return (size() > _maxSize);
}
};
public synchronized TopologyMetaData get(final Map<String, Object> conf, final String topologyId, final AdvancedFSOps ops, String stormRoot) {
//Only go off of the topology id for now.
TopologyMetaData dl = _cache.get(topologyId);
if (dl == null) {
_cache.putIfAbsent(topologyId, new TopologyMetaData(conf, topologyId, ops, stormRoot));
dl = _cache.get(topologyId);
}
return dl;
}
public synchronized void clear() {
_cache.clear();
}
}
static final TopoMetaLRUCache TOPO_META_CACHE = new TopoMetaLRUCache();
public static List<String> getDependencyLocationsFor(final Map<String, Object> conf, final String topologyId, final AdvancedFSOps ops, String stormRoot) throws IOException {
return TOPO_META_CACHE.get(conf, topologyId, ops, stormRoot).getDepLocs();
}
public static String getStormVersionFor(final Map<String, Object> conf, final String topologyId, final AdvancedFSOps ops, String stormRoot) throws IOException {
return TOPO_META_CACHE.get(conf, topologyId, ops, stormRoot).getStormVersion();
}
/**
* Get parameters for the class path of the worker process. Also used by the
* log Writer
* @param stormRoot the root dist dir for the topology
* @return the classpath for the topology as command line arguments.
* @throws IOException on any error.
*/
private List<String> getClassPathParams(final String stormRoot, final SimpleVersion topoVersion) throws IOException {
final String stormJar = ConfigUtils.supervisorStormJarPath(stormRoot);
final List<String> dependencyLocations = getDependencyLocationsFor(_conf, _topologyId, _ops, stormRoot);
final String workerClassPath = getWorkerClassPath(stormJar, dependencyLocations, topoVersion);
List<String> classPathParams = new ArrayList<>();
classPathParams.add("-cp");
classPathParams.add(workerClassPath);
return classPathParams;
}
/**
* Get a set of java properties that are common to both the log writer and the worker processes.
* These are mostly system properties that are used by logging.
* @return a list of command line options
*/
private List<String> getCommonParams() {
final String workersArtifacts = ConfigUtils.workerArtifactsRoot(_conf);
String stormLogDir = ConfigUtils.getLogDir();
String log4jConfigurationFile = getWorkerLoggingConfigFile();
List<String> commonParams = new ArrayList<>();
commonParams.add("-Dlogging.sensitivity=" + OR((String) _topoConf.get(Config.TOPOLOGY_LOGGING_SENSITIVITY), "S3"));
commonParams.add("-Dlogfile.name=worker.log");
commonParams.add("-Dstorm.home=" + OR(_stormHome, ""));
commonParams.add("-Dworkers.artifacts=" + workersArtifacts);
commonParams.add("-Dstorm.id=" + _topologyId);
commonParams.add("-Dworker.id=" + _workerId);
commonParams.add("-Dworker.port=" + _port);
commonParams.add("-Dstorm.log.dir=" + stormLogDir);
commonParams.add("-Dlog4j.configurationFile=" + log4jConfigurationFile);
commonParams.add("-DLog4jContextSelector=org.apache.logging.log4j.core.selector.BasicContextSelector");
commonParams.add("-Dstorm.local.dir=" + _conf.get(Config.STORM_LOCAL_DIR));
return commonParams;
}
private int getMemOnHeap(WorkerResources resources) {
int memOnheap = 0;
if (resources != null && resources.is_set_mem_on_heap() &&
resources.get_mem_on_heap() > 0) {
memOnheap = (int) Math.ceil(resources.get_mem_on_heap());
} else {
// set the default heap memory size for supervisor-test
memOnheap = ObjectReader.getInt(_topoConf.get(Config.WORKER_HEAP_MEMORY_MB), 768);
}
return memOnheap;
}
private List<String> getWorkerProfilerChildOpts(int memOnheap) {
List<String> workerProfilerChildopts = new ArrayList<>();
if (ObjectReader.getBoolean(_conf.get(DaemonConfig.WORKER_PROFILER_ENABLED), false)) {
workerProfilerChildopts = substituteChildopts(_conf.get(DaemonConfig.WORKER_PROFILER_CHILDOPTS), memOnheap);
}
return workerProfilerChildopts;
}
protected String javaCmd(String cmd) {
String ret = null;
String javaHome = System.getenv().get("JAVA_HOME");
if (StringUtils.isNotBlank(javaHome)) {
ret = javaHome + Utils.FILE_PATH_SEPARATOR + "bin" + Utils.FILE_PATH_SEPARATOR + cmd;
} else {
ret = cmd;
}
return ret;
}
/**
* Create the command to launch the worker process
* @param memOnheap the on heap memory for the worker
* @param stormRoot the root dist dir for the topology
* @param jlp java library path for the topology
* @return the command to run
* @throws IOException on any error.
*/
private List<String> mkLaunchCommand(final int memOnheap, final String stormRoot,
final String jlp) throws IOException {
final String javaCmd = javaCmd("java");
final String stormOptions = ConfigUtils.concatIfNotNull(System.getProperty("storm.options"));
final String topoConfFile = ConfigUtils.concatIfNotNull(System.getProperty("storm.conf.file"));
final String workerTmpDir = ConfigUtils.workerTmpRoot(_conf, _workerId);
String topoVersionString = getStormVersionFor(_conf, _topologyId, _ops, stormRoot);
if (topoVersionString == null) {
topoVersionString = (String)_conf.getOrDefault(Config.SUPERVISOR_WORKER_DEFAULT_VERSION, VersionInfo.getVersion());
}
final SimpleVersion topoVersion = new SimpleVersion(topoVersionString);
List<String> classPathParams = getClassPathParams(stormRoot, topoVersion);
List<String> commonParams = getCommonParams();
List<String> commandList = new ArrayList<>();
String logWriter = getWorkerLogWriter(topoVersion);
if (logWriter != null) {
//Log Writer Command...
commandList.add(javaCmd);
commandList.addAll(classPathParams);
commandList.addAll(substituteChildopts(_topoConf.get(Config.TOPOLOGY_WORKER_LOGWRITER_CHILDOPTS)));
commandList.addAll(commonParams);
commandList.add(logWriter); //The LogWriter in turn launches the actual worker.
}
//Worker Command...
commandList.add(javaCmd);
commandList.add("-server");
commandList.addAll(commonParams);
commandList.addAll(substituteChildopts(_conf.get(Config.WORKER_CHILDOPTS), memOnheap));
commandList.addAll(substituteChildopts(_topoConf.get(Config.TOPOLOGY_WORKER_CHILDOPTS), memOnheap));
commandList.addAll(substituteChildopts(Utils.OR(
_topoConf.get(Config.TOPOLOGY_WORKER_GC_CHILDOPTS),
_conf.get(Config.WORKER_GC_CHILDOPTS)), memOnheap));
commandList.addAll(getWorkerProfilerChildOpts(memOnheap));
commandList.add("-Djava.library.path=" + jlp);
commandList.add("-Dstorm.conf.file=" + topoConfFile);
commandList.add("-Dstorm.options=" + stormOptions);
commandList.add("-Djava.io.tmpdir=" + workerTmpDir);
commandList.addAll(classPathParams);
commandList.add(getWorkerMain(topoVersion));
commandList.add(_topologyId);
commandList.add(_supervisorId);
commandList.add(String.valueOf(_port));
commandList.add(_workerId);
return commandList;
}
@Override
public void launch() throws IOException {
_type.assertFull();
LOG.info("Launching worker with assignment {} for this supervisor {} on port {} with id {}", _assignment,
_supervisorId, _port, _workerId);
String logPrefix = "Worker Process " + _workerId;
ProcessExitCallback processExitCallback = new ProcessExitCallback(logPrefix);
_exitedEarly = false;
final WorkerResources resources = _assignment.get_resources();
final int memOnheap = getMemOnHeap(resources);
final String stormRoot = ConfigUtils.supervisorStormDistRoot(_conf, _topologyId);
final String jlp = javaLibraryPath(stormRoot, _conf);
List<String> commandList = mkLaunchCommand(memOnheap, stormRoot, jlp);
Map<String, String> topEnvironment = new HashMap<String, String>();
@SuppressWarnings("unchecked")
Map<String, String> environment = (Map<String, String>) _topoConf.get(Config.TOPOLOGY_ENVIRONMENT);
if (environment != null) {
topEnvironment.putAll(environment);
}
topEnvironment.put("LD_LIBRARY_PATH", jlp);
if (_resourceIsolationManager != null) {
int memoffheap = (int) Math.ceil(resources.get_mem_off_heap());
int cpu = (int) Math.ceil(resources.get_cpu());
int cGroupMem = (int) (Math.ceil((double) _conf.get(DaemonConfig.STORM_CGROUP_MEMORY_LIMIT_TOLERANCE_MARGIN_MB)));
int memoryValue = memoffheap + memOnheap + cGroupMem;
int cpuValue = cpu;
Map<String, Number> map = new HashMap<>();
map.put("cpu", cpuValue);
map.put("memory", memoryValue);
_resourceIsolationManager.reserveResourcesForWorker(_workerId, map);
}
LOG.info("Launching worker with command: {}. ", ServerUtils.shellCmd(commandList));
String workerDir = ConfigUtils.workerRoot(_conf, _workerId);
launchWorkerProcess(commandList, topEnvironment, logPrefix, processExitCallback, new File(workerDir));
}
}
| [
"\"STORM_CONF_DIR\"",
"\"STORM_CONF_DIR\"",
"\"STORM_EXT_CLASSPATH\""
] | [] | [
"STORM_EXT_CLASSPATH",
"STORM_CONF_DIR"
] | [] | ["STORM_EXT_CLASSPATH", "STORM_CONF_DIR"] | java | 2 | 0 | |
practice/interview_preparation_kit/warm_up_challenges/repeated_string/repeated_string.go | // Package main implements Repeated String problem
//
// https://www.hackerrank.com/challenges/repeated-string/problem
package main
import (
"bufio"
"fmt"
"io"
"os"
"strconv"
"strings"
)
// Complete the repeatedString function below.
func repeatedString(s string, n int64) int64 {
var stringLength = int64(len(s))
var loops int64 = n / stringLength
var prefixLength int64 = n % stringLength
var occurrencesInString int64 = 0
var occurrencesInPrefix int64 = 0
for pos, char := range s {
if char == 'a' {
occurrencesInString++
if int64(pos) < prefixLength {
occurrencesInPrefix++
}
}
}
return occurrencesInString * loops + occurrencesInPrefix
}
func main() {
reader := bufio.NewReaderSize(os.Stdin, 1024 * 1024)
var writer *bufio.Writer
if (os.Getenv("OUTPUT_PATH") != "") {
stdout, err := os.Create(os.Getenv("OUTPUT_PATH"))
checkError(err)
writer = bufio.NewWriterSize(stdout, 1024 * 1024)
defer stdout.Close()
} else {
writer = bufio.NewWriterSize(os.Stdout, 1024 * 1024)
}
s := readLine(reader)
n, err := strconv.ParseInt(readLine(reader), 10, 64)
checkError(err)
result := repeatedString(s, n)
fmt.Fprintf(writer, "%d\n", result)
err = writer.Flush()
checkError(err)
}
func readLine(reader *bufio.Reader) string {
str, _, err := reader.ReadLine()
if err == io.EOF {
return ""
}
return strings.TrimRight(string(str), "\r\n")
}
func checkError(err error) {
if err != nil {
panic(err)
}
}
| [
"\"OUTPUT_PATH\"",
"\"OUTPUT_PATH\""
] | [] | [
"OUTPUT_PATH"
] | [] | ["OUTPUT_PATH"] | go | 1 | 0 | |
main.go | package main
import (
"flag"
"fmt"
"log"
"os"
"os/signal"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"github.com/allan-simon/go-singleinstance"
"github.com/dlasky/gotk3-layershell/layershell"
"github.com/gotk3/gotk3/gdk"
"github.com/gotk3/gotk3/glib"
"github.com/gotk3/gotk3/gtk"
)
const version = "0.1.10"
var (
appDirs []string
configDirectory string
pinnedFile string
pinned []string
src glib.SourceHandle
id2entry map[string]desktopEntry
preferredApps map[string]interface{}
)
var categoryNames = [...]string{
"utility",
"development",
"game",
"graphics",
"internet-and-network",
"office",
"audio-video",
"system-tools",
"other",
}
type category struct {
Name string
DisplayName string
Icon string
}
var categories []category
type desktopEntry struct {
DesktopID string
Name string
NameLoc string
Comment string
CommentLoc string
Icon string
Exec string
Category string
Terminal bool
NoDisplay bool
}
// slices below will hold DesktopID strings
var (
listUtility []string
listDevelopment []string
listGame []string
listGraphics []string
listInternetAndNetwork []string
listOffice []string
listAudioVideo []string
listSystemTools []string
listOther []string
)
var desktopEntries []desktopEntry
// UI elements
var (
resultWindow *gtk.ScrolledWindow
fileSearchResults []string
searchEntry *gtk.SearchEntry
phrase string
fileSearchResultFlowBox *gtk.FlowBox
userDirsMap map[string]string
appFlowBox *gtk.FlowBox
appSearchResultWrapper *gtk.Box
fileSearchResultWrapper *gtk.Box
pinnedFlowBox *gtk.FlowBox
pinnedFlowBoxWrapper *gtk.Box
categoriesWrapper *gtk.Box
catButtons []*gtk.Button
statusLabel *gtk.Label
status string
ignore string
)
func defaultStringIfBlank(s, fallback string) string {
s = strings.TrimSpace(s)
if s == "" {
return fallback
}
return s
}
// Flags
var cssFileName = flag.String("s", "drawer.css", "Styling: css file name")
var targetOutput = flag.String("o", "", "name of the Output to display the drawer on (sway only)")
var displayVersion = flag.Bool("v", false, "display Version information")
var overlay = flag.Bool("ovl", false, "use OVerLay layer")
var iconSize = flag.Int("is", 64, "Icon Size")
var fsColumns = flag.Uint("fscol", 2, "File Search result COLumns")
var columnsNumber = flag.Uint("c", 6, "number of Columns")
var itemSpacing = flag.Uint("spacing", 20, "icon spacing")
var lang = flag.String("lang", "", "force lang, e.g. \"en\", \"pl\"")
var fileManager = flag.String("fm", "thunar", "File Manager")
var term = flag.String("term", defaultStringIfBlank(os.Getenv("TERM"), "alacritty"), "Terminal emulator")
var nameLimit = flag.Int("fslen", 80, "File Search name length Limit")
var noCats = flag.Bool("nocats", false, "Disable filtering by category")
var noFS = flag.Bool("nofs", false, "Disable file search")
func main() {
timeStart := time.Now()
flag.Parse()
if *displayVersion {
fmt.Printf("nwg-drawer version %s\n", version)
os.Exit(0)
}
// Gentle SIGTERM handler thanks to reiki4040 https://gist.github.com/reiki4040/be3705f307d3cd136e85
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, syscall.SIGTERM)
go func() {
for {
s := <-signalChan
if s == syscall.SIGTERM {
println("SIGTERM received, bye bye!")
gtk.MainQuit()
}
}
}()
// We want the same key/mouse binding to turn the dock off: kill the running instance and exit.
lockFilePath := fmt.Sprintf("%s/nwg-drawer.lock", tempDir())
lockFile, err := singleinstance.CreateLockFile(lockFilePath)
if err != nil {
pid, err := readTextFile(lockFilePath)
if err == nil {
i, err := strconv.Atoi(pid)
if err == nil {
println("Running instance found, sending SIGTERM and exiting...")
syscall.Kill(i, syscall.SIGTERM)
}
}
os.Exit(0)
}
defer lockFile.Close()
// LANGUAGE
if *lang == "" && os.Getenv("LANG") != "" {
*lang = strings.Split(os.Getenv("LANG"), ".")[0]
}
println(fmt.Sprintf("lang: %s", *lang))
// ENVIRONMENT
configDirectory = configDir()
if !pathExists(filepath.Join(configDirectory, "drawer.css")) {
copyFile(filepath.Join(getDataHome(), "nwg-drawer/drawer.css"), filepath.Join(configDirectory, "drawer.css"))
}
cacheDirectory := cacheDir()
if cacheDirectory == "" {
log.Panic("Couldn't determine cache directory location")
}
// DATA
pinnedFile = filepath.Join(cacheDirectory, "nwg-pin-cache")
pinned, err = loadTextFile(pinnedFile)
if err != nil {
pinned = nil
}
println(fmt.Sprintf("Found %v pinned items", len(pinned)))
cssFile := filepath.Join(configDirectory, *cssFileName)
appDirs = getAppDirs()
setUpCategories()
desktopFiles := listDesktopFiles()
println(fmt.Sprintf("Found %v desktop files", len(desktopFiles)))
status = parseDesktopFiles(desktopFiles)
// For opening files we use xdg-open. As its configuration is PITA, we may override some associations
// in the ~/.config/nwg-panel/preferred-apps.json file.
paFile := filepath.Join(configDirectory, "preferred-apps.json")
preferredApps, err = loadPreferredApps(paFile)
if err != nil {
println(fmt.Sprintf("Custom associations file %s not found or invalid", paFile))
} else {
println(fmt.Sprintf("Found %v associations in %s", len(preferredApps), paFile))
}
// USER INTERFACE
gtk.Init(nil)
cssProvider, _ := gtk.CssProviderNew()
err = cssProvider.LoadFromPath(cssFile)
if err != nil {
println(fmt.Sprintf("ERROR: %s css file not found or erroneous. Using GTK styling.", cssFile))
println(fmt.Sprintf("%s", err))
} else {
println(fmt.Sprintf("Using style from %s", cssFile))
screen, _ := gdk.ScreenGetDefault()
gtk.AddProviderForScreen(screen, cssProvider, gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
}
win, err := gtk.WindowNew(gtk.WINDOW_TOPLEVEL)
if err != nil {
log.Fatal("Unable to create window:", err)
}
if wayland() {
layershell.InitForWindow(win)
var output2mon map[string]*gdk.Monitor
if *targetOutput != "" {
// We want to assign layershell to a monitor, but we only know the output name!
output2mon, err = mapOutputs()
if err == nil {
monitor := output2mon[*targetOutput]
layershell.SetMonitor(win, monitor)
} else {
println(fmt.Sprintf("%s", err))
}
}
layershell.SetAnchor(win, layershell.LAYER_SHELL_EDGE_BOTTOM, true)
layershell.SetAnchor(win, layershell.LAYER_SHELL_EDGE_TOP, true)
layershell.SetAnchor(win, layershell.LAYER_SHELL_EDGE_LEFT, true)
layershell.SetAnchor(win, layershell.LAYER_SHELL_EDGE_RIGHT, true)
if *overlay {
layershell.SetLayer(win, layershell.LAYER_SHELL_LAYER_OVERLAY)
layershell.SetExclusiveZone(win, -1)
} else {
layershell.SetLayer(win, layershell.LAYER_SHELL_LAYER_TOP)
}
layershell.SetKeyboardMode(win, layershell.LAYER_SHELL_KEYBOARD_MODE_EXCLUSIVE)
}
win.Connect("destroy", func() {
gtk.MainQuit()
})
win.Connect("key-press-event", func(window *gtk.Window, event *gdk.Event) bool {
key := &gdk.EventKey{Event: event}
switch key.KeyVal() {
case gdk.KEY_Escape:
s, _ := searchEntry.GetText()
if s != "" {
searchEntry.GrabFocus()
searchEntry.SetText("")
} else {
gtk.MainQuit()
}
return false
case gdk.KEY_downarrow, gdk.KEY_Up, gdk.KEY_Down, gdk.KEY_Left, gdk.KEY_Right, gdk.KEY_Tab,
gdk.KEY_Return, gdk.KEY_Page_Up, gdk.KEY_Page_Down, gdk.KEY_Home, gdk.KEY_End:
return false
default:
if !searchEntry.IsFocus() {
searchEntry.GrabFocusWithoutSelecting()
}
return false
}
})
// Close the window on leave, but not immediately, to avoid accidental closes
win.Connect("leave-notify-event", func() {
src = glib.TimeoutAdd(uint(500), func() bool {
gtk.MainQuit()
return false
})
})
win.Connect("enter-notify-event", func() {
cancelClose()
})
/*
In case someone REALLY needed to use X11 - for some stupid Zoom meeting or something, this allows
the drawer to behave properly on Openbox, and possibly somewhere else. For sure not on i3.
This feature is not really supported and will stay undocumented.
*/
if !wayland() {
println("Not Wayland, oh really?")
win.SetDecorated(false)
win.Maximize()
}
// Set up UI
outerVBox, _ := gtk.BoxNew(gtk.ORIENTATION_VERTICAL, 0)
win.Add(outerVBox)
searchBoxWrapper, _ := gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
outerVBox.PackStart(searchBoxWrapper, false, false, 10)
searchEntry = setUpSearchEntry()
searchEntry.SetMaxWidthChars(30)
searchBoxWrapper.PackStart(searchEntry, true, false, 0)
if !*noCats {
categoriesWrapper, _ = gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
categoriesButtonBox := setUpCategoriesButtonBox()
categoriesWrapper.PackStart(categoriesButtonBox, true, false, 0)
outerVBox.PackStart(categoriesWrapper, false, false, 0)
}
pinnedWrapper, _ := gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
outerVBox.PackStart(pinnedWrapper, false, false, 0)
pinnedFlowBoxWrapper, _ = gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
outerVBox.PackStart(pinnedFlowBoxWrapper, false, false, 0)
pinnedFlowBox = setUpPinnedFlowBox()
resultWindow, _ = gtk.ScrolledWindowNew(nil, nil)
resultWindow.SetEvents(int(gdk.ALL_EVENTS_MASK))
resultWindow.SetPolicy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
resultWindow.Connect("enter-notify-event", func() {
cancelClose()
})
resultWindow.Connect("button-release-event", func(sw *gtk.ScrolledWindow, e *gdk.Event) bool {
btnEvent := gdk.EventButtonNewFromEvent(e)
if btnEvent.Button() == 1 || btnEvent.Button() == 3 {
gtk.MainQuit()
return true
}
return false
})
outerVBox.PackStart(resultWindow, true, true, 10)
resultsWrapper, _ := gtk.BoxNew(gtk.ORIENTATION_VERTICAL, 0)
resultWindow.Add(resultsWrapper)
appSearchResultWrapper, _ = gtk.BoxNew(gtk.ORIENTATION_VERTICAL, 0)
resultsWrapper.PackStart(appSearchResultWrapper, false, false, 0)
appFlowBox = setUpAppsFlowBox(nil, "")
// Focus 1st pinned item if any, otherwise focus 1st found app icon
var button gtk.IWidget
if pinnedFlowBox.GetChildren().Length() > 0 {
button, err = pinnedFlowBox.GetChildAtIndex(0).GetChild()
} else {
button, err = appFlowBox.GetChildAtIndex(0).GetChild()
}
if err == nil {
button.ToWidget().GrabFocus()
}
userDirsMap = mapXdgUserDirs()
placeholder, _ := gtk.BoxNew(gtk.ORIENTATION_VERTICAL, 0)
resultsWrapper.PackStart(placeholder, true, true, 0)
placeholder.SetSizeRequest(20, 20)
if !*noFS {
wrapper, _ := gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
fileSearchResultWrapper, _ = gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
fileSearchResultWrapper.SetProperty("name", "files-box")
wrapper.PackStart(fileSearchResultWrapper, true, false, 0)
resultsWrapper.PackEnd(wrapper, false, false, 10)
}
statusLineWrapper, _ := gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 0)
outerVBox.PackStart(statusLineWrapper, false, false, 10)
statusLabel, _ = gtk.LabelNew(status)
statusLineWrapper.PackStart(statusLabel, true, false, 0)
win.ShowAll()
if !*noFS {
fileSearchResultWrapper.SetSizeRequest(appFlowBox.GetAllocatedWidth(), 1)
fileSearchResultWrapper.Hide()
}
if !*noCats {
categoriesWrapper.SetSizeRequest(1, categoriesWrapper.GetAllocatedHeight()*2)
}
t := time.Now()
println(fmt.Sprintf("UI created in %v ms. Thank you for your patience.", t.Sub(timeStart).Milliseconds()))
gtk.Main()
}
| [
"\"TERM\"",
"\"LANG\"",
"\"LANG\""
] | [] | [
"LANG",
"TERM"
] | [] | ["LANG", "TERM"] | go | 2 | 0 | |
pilot/pkg/networking/core/v1alpha3/listener.go | // Copyright 2017 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha3
import (
"encoding/json"
"fmt"
"os"
"reflect"
"sort"
"strings"
"time"
xdsapi "github.com/envoyproxy/go-control-plane/envoy/api/v2"
"github.com/envoyproxy/go-control-plane/envoy/api/v2/auth"
"github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
"github.com/envoyproxy/go-control-plane/envoy/api/v2/listener"
fileaccesslog "github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v2"
accesslog "github.com/envoyproxy/go-control-plane/envoy/config/filter/accesslog/v2"
http_conn "github.com/envoyproxy/go-control-plane/envoy/config/filter/network/http_connection_manager/v2"
tcp_proxy "github.com/envoyproxy/go-control-plane/envoy/config/filter/network/tcp_proxy/v2"
"github.com/envoyproxy/go-control-plane/envoy/type"
xdsutil "github.com/envoyproxy/go-control-plane/pkg/util"
google_protobuf "github.com/gogo/protobuf/types"
"github.com/prometheus/client_golang/prometheus"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/plugin"
"istio.io/istio/pilot/pkg/networking/plugin/authn"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pkg/log"
)
const (
fileAccessLog = "envoy.file_access_log"
envoyHTTPConnectionManager = "envoy.http_connection_manager"
// RDSHttpProxy is the special name for HTTP PROXY route
RDSHttpProxy = "http_proxy"
// VirtualListenerName is the name for traffic capture listener
VirtualListenerName = "virtual"
// WildcardAddress binds to all IP addresses
WildcardAddress = "0.0.0.0"
// LocalhostAddress for local binding
LocalhostAddress = "127.0.0.1"
)
var (
// Very verbose output in the logs - full LDS response logged for each sidecar.
// Use /debug/ldsz instead.
verboseDebug = os.Getenv("PILOT_DUMP_ALPHA3") != ""
// TODO: gauge should be reset on refresh, not the best way to represent errors but better
// than nothing.
// TODO: add dimensions - namespace of rule, service, rule name
invalidOutboundListeners = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "pilot_invalid_out_listeners",
Help: "Number of invalid outbound listeners.",
})
filterChainsConflict = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "pilot_conf_filter_chains",
Help: "Number of conflicting filter chains.",
})
)
func init() {
prometheus.MustRegister(invalidOutboundListeners)
prometheus.MustRegister(filterChainsConflict)
}
// ListenersALPNProtocols denotes the the list of ALPN protocols that the listener
// should expose
var ListenersALPNProtocols = []string{"h2", "http/1.1"}
// BuildListeners produces a list of listeners and referenced clusters for all proxies
func (configgen *ConfigGeneratorImpl) BuildListeners(env *model.Environment, node *model.Proxy, push *model.PushStatus) ([]*xdsapi.Listener, error) {
switch node.Type {
case model.Sidecar:
return configgen.buildSidecarListeners(env, node)
case model.Router, model.Ingress:
return configgen.buildGatewayListeners(env, node, push)
}
return nil, nil
}
// buildSidecarListeners produces a list of listeners for sidecar proxies
func (configgen *ConfigGeneratorImpl) buildSidecarListeners(env *model.Environment, node *model.Proxy) ([]*xdsapi.Listener, error) {
mesh := env.Mesh
managementPorts := env.ManagementPorts(node.IPAddress)
proxyInstances, err := env.GetProxyServiceInstances(node)
if err != nil {
return nil, err
}
services, err := env.Services()
if err != nil {
return nil, err
}
listeners := make([]*xdsapi.Listener, 0)
if mesh.ProxyListenPort > 0 {
inbound := configgen.buildSidecarInboundListeners(env, node, proxyInstances)
outbound := configgen.buildSidecarOutboundListeners(env, node, proxyInstances, services)
listeners = append(listeners, inbound...)
listeners = append(listeners, outbound...)
mgmtListeners := buildSidecarInboundMgmtListeners(managementPorts, node.IPAddress)
// If management listener port and service port are same, bad things happen
// when running in kubernetes, as the probes stop responding. So, append
// non overlapping listeners only.
for i := range mgmtListeners {
m := mgmtListeners[i]
l := util.GetByAddress(listeners, m.Address.String())
if l != nil {
log.Warnf("Omitting listener for management address %s (%s) due to collision with service listener %s (%s)",
m.Name, m.Address, l.Name, l.Address)
continue
}
listeners = append(listeners, m)
}
// We need a dummy filter to fill in the filter stack for orig_dst listener
// TODO: Move to Listener filters and set up original dst filter there.
dummyTCPProxy := &tcp_proxy.TcpProxy{
StatPrefix: util.BlackHoleCluster,
Cluster: util.BlackHoleCluster,
}
var transparent *google_protobuf.BoolValue
if mode := node.Metadata["INTERCEPTION_MODE"]; mode == "TPROXY" {
transparent = &google_protobuf.BoolValue{true}
}
// add an extra listener that binds to the port that is the recipient of the iptables redirect
listeners = append(listeners, &xdsapi.Listener{
Name: VirtualListenerName,
Address: util.BuildAddress(WildcardAddress, uint32(mesh.ProxyListenPort)),
Transparent: transparent,
UseOriginalDst: &google_protobuf.BoolValue{true},
FilterChains: []listener.FilterChain{
{
Filters: []listener.Filter{
{
Name: xdsutil.TCPProxy,
Config: util.MessageToStruct(dummyTCPProxy),
},
},
},
},
})
}
// enable HTTP PROXY port if necessary; this will add an RDS route for this port
if mesh.ProxyHttpPort > 0 {
useRemoteAddress := false
traceOperation := http_conn.EGRESS
listenAddress := LocalhostAddress
if node.Type == model.Router {
useRemoteAddress = true
traceOperation = http_conn.INGRESS
listenAddress = WildcardAddress
}
opts := buildListenerOpts{
env: env,
proxy: node,
proxyInstances: proxyInstances,
ip: listenAddress,
port: int(mesh.ProxyHttpPort),
protocol: model.ProtocolHTTP,
filterChainOpts: []*filterChainOpts{{
httpOpts: &httpListenerOpts{
rds: RDSHttpProxy,
useRemoteAddress: useRemoteAddress,
direction: traceOperation,
connectionManager: &http_conn.HttpConnectionManager{
HttpProtocolOptions: &core.Http1ProtocolOptions{
AllowAbsoluteUrl: &google_protobuf.BoolValue{
Value: true,
},
},
},
},
}},
bindToPort: true,
}
l := buildListener(opts)
if err := marshalFilters(l, opts, []plugin.FilterChain{{}}); err != nil {
log.Warna("buildSidecarListeners ", err.Error())
} else {
listeners = append(listeners, l)
}
// TODO: need inbound listeners in HTTP_PROXY case, with dedicated ingress listener.
}
return listeners, nil
}
// buildSidecarInboundListeners creates listeners for the server-side (inbound)
// configuration for co-located service proxyInstances.
func (configgen *ConfigGeneratorImpl) buildSidecarInboundListeners(env *model.Environment, node *model.Proxy,
proxyInstances []*model.ServiceInstance) []*xdsapi.Listener {
var listeners []*xdsapi.Listener
listenerMap := make(map[string]*model.ServiceInstance)
// inbound connections/requests are redirected to the endpoint address but appear to be sent
// to the service address.
for _, instance := range proxyInstances {
endpoint := instance.Endpoint
protocol := endpoint.ServicePort.Protocol
// Local service instances can be accessed through one of three
// addresses: localhost, endpoint IP, and service
// VIP. Localhost bypasses the proxy and doesn't need any TCP
// route config. Endpoint IP is handled below and Service IP is handled
// by outbound routes.
// Traffic sent to our service VIP is redirected by remote
// services' kubeproxy to our specific endpoint IP.
var listenerType plugin.ListenerProtocol
listenerOpts := buildListenerOpts{
env: env,
proxy: node,
proxyInstances: proxyInstances,
ip: endpoint.Address,
port: endpoint.Port,
protocol: protocol,
}
for _, p := range configgen.Plugins {
if authnPolicy, ok := p.(authn.Plugin); ok {
if authnPolicy.RequireTLSMultiplexing(env.Mesh, env.IstioConfigStore, instance.Service.Hostname, instance.Endpoint.ServicePort) {
listenerOpts.tlsMultiplexed = true
log.Infof("Uses TLS multiplexing for %v %v\n", instance.Service.Hostname.String(), *instance.Endpoint.ServicePort)
}
}
}
listenerMapKey := fmt.Sprintf("%s:%d", endpoint.Address, endpoint.Port)
if old, exists := listenerMap[listenerMapKey]; exists {
env.PushStatus.Add(model.ProxyStatusConflictInboundListener, node.ID, node,
fmt.Sprintf("Rejected %s, used %s for %s", instance.Service.Hostname, old.Service.Hostname, listenerMapKey))
// Skip building listener for the same ip port
continue
}
listenerType = plugin.ModelProtocolToListenerProtocol(protocol)
switch listenerType {
case plugin.ListenerProtocolHTTP:
httpOpts := &httpListenerOpts{
routeConfig: configgen.buildSidecarInboundHTTPRouteConfig(env, node, instance),
rds: "", // no RDS for inbound traffic
useRemoteAddress: false,
direction: http_conn.INGRESS,
}
if listenerOpts.tlsMultiplexed {
listenerOpts.filterChainOpts = []*filterChainOpts{
{
httpOpts: httpOpts,
transportProtocol: authn.EnvoyRawBufferMatch,
}, {
httpOpts: httpOpts,
transportProtocol: authn.EnvoyTLSMatch,
},
}
} else {
listenerOpts.filterChainOpts = []*filterChainOpts{
{
httpOpts: httpOpts,
},
}
}
case plugin.ListenerProtocolTCP:
listenerOpts.filterChainOpts = []*filterChainOpts{{
networkFilters: buildInboundNetworkFilters(instance),
}}
default:
log.Warnf("Unsupported inbound protocol %v for port %#v", protocol, instance.Endpoint.ServicePort)
continue
}
// call plugins
l := buildListener(listenerOpts)
mutable := &plugin.MutableObjects{
Listener: l,
FilterChains: make([]plugin.FilterChain, len(l.FilterChains)),
}
for _, p := range configgen.Plugins {
params := &plugin.InputParams{
ListenerProtocol: listenerType,
Env: env,
Node: node,
ProxyInstances: proxyInstances,
ServiceInstance: instance,
Port: endpoint.ServicePort,
}
if err := p.OnInboundListener(params, mutable); err != nil {
log.Warn(err.Error())
}
}
// Filters are serialized one time into an opaque struct once we have the complete list.
if err := marshalFilters(mutable.Listener, listenerOpts, mutable.FilterChains); err != nil {
log.Warna("buildSidecarInboundListeners ", err.Error())
} else {
listeners = append(listeners, mutable.Listener)
listenerMap[listenerMapKey] = instance
}
}
return listeners
}
type listenerEntry struct {
// TODO: Clean this up
services []*model.Service
servicePort *model.Port
listener *xdsapi.Listener
}
// sortServicesByCreationTime sorts the list of services in ascending order by their creation time (if available).
func sortServicesByCreationTime(services []*model.Service) []*model.Service {
sort.SliceStable(services, func(i, j int) bool {
return services[i].CreationTime.Before(services[j].CreationTime)
})
return services
}
func protocolName(p model.Protocol) string {
switch plugin.ModelProtocolToListenerProtocol(p) {
case plugin.ListenerProtocolHTTP:
return "HTTP"
case plugin.ListenerProtocolTCP:
return "TCP"
default:
return "UNKNOWN"
}
}
type outboundListenerConflict struct {
metric *model.PushMetric
env *model.Environment
node *model.Proxy
listenerName string
currentProtocol model.Protocol
currentServices []*model.Service
newHostname model.Hostname
newProtocol model.Protocol
}
func (c outboundListenerConflict) addMetric() {
currentHostnames := make([]string, len(c.currentServices))
for i, s := range c.currentServices {
currentHostnames[i] = string(s.Hostname)
}
concatHostnames := strings.Join(currentHostnames, ",")
c.env.PushStatus.Add(c.metric,
c.listenerName,
c.node,
fmt.Sprintf("Listener=%s Accepted%s=%s Rejected%s=%s %sServices=%d",
c.listenerName,
protocolName(c.currentProtocol),
concatHostnames,
protocolName(c.newProtocol),
c.newHostname,
protocolName(c.currentProtocol),
len(c.currentServices)))
}
// buildSidecarOutboundListeners generates http and tcp listeners for outbound connections from the service instance
// TODO(github.com/istio/pilot/issues/237)
//
// Sharing tcp_proxy and http_connection_manager filters on the same port for
// different destination services doesn't work with Envoy (yet). When the
// tcp_proxy filter's route matching fails for the http service the connection
// is closed without falling back to the http_connection_manager.
//
// Temporary workaround is to add a listener for each service IP that requires
// TCP routing
//
// Connections to the ports of non-load balanced services are directed to
// the connection's original destination. This avoids costly queries of instance
// IPs and ports, but requires that ports of non-load balanced service be unique.
func (configgen *ConfigGeneratorImpl) buildSidecarOutboundListeners(env *model.Environment, node *model.Proxy,
proxyInstances []*model.ServiceInstance, services []*model.Service) []*xdsapi.Listener {
// Sort the services in order of creation.
services = sortServicesByCreationTime(services)
var proxyLabels model.LabelsCollection
for _, w := range proxyInstances {
proxyLabels = append(proxyLabels, w.Labels)
}
meshGateway := map[string]bool{model.IstioMeshGateway: true}
configs := env.VirtualServices(meshGateway)
var tcpListeners, httpListeners []*xdsapi.Listener
// For conflicit resolution
var currentListenerEntry *listenerEntry
listenerMap := make(map[string]*listenerEntry)
for _, service := range services {
for _, servicePort := range service.Ports {
listenAddress := WildcardAddress
var destinationIPAddress string
var listenerMapKey string
listenerOpts := buildListenerOpts{
env: env,
proxy: node,
proxyInstances: proxyInstances,
ip: WildcardAddress,
port: servicePort.Port,
protocol: servicePort.Protocol,
}
currentListenerEntry = nil
switch plugin.ModelProtocolToListenerProtocol(servicePort.Protocol) {
case plugin.ListenerProtocolHTTP:
listenerMapKey = fmt.Sprintf("%s:%d", listenAddress, servicePort.Port)
var exists bool
// Check if this HTTP listener conflicts with an existing wildcard TCP listener
// i.e. one of NONE resolution type, since we collapse all HTTP listeners into
// a single 0.0.0.0:port listener and use vhosts to distinguish individual http
// services in that port
if currentListenerEntry, exists = listenerMap[listenerMapKey]; exists {
if !currentListenerEntry.servicePort.Protocol.IsHTTP() {
outboundListenerConflict{
metric: model.ProxyStatusConflictOutboundListenerTCPOverHTTP,
env: env,
node: node,
listenerName: listenerMapKey,
currentServices: currentListenerEntry.services,
currentProtocol: currentListenerEntry.servicePort.Protocol,
newHostname: service.Hostname,
newProtocol: servicePort.Protocol,
}.addMetric()
}
// Skip building listener for the same http port
currentListenerEntry.services = append(currentListenerEntry.services, service)
continue
}
operation := http_conn.EGRESS
useRemoteAddress := false
listenerOpts.protocol = servicePort.Protocol
listenerOpts.filterChainOpts = []*filterChainOpts{{
httpOpts: &httpListenerOpts{
rds: fmt.Sprintf("%d", servicePort.Port),
useRemoteAddress: useRemoteAddress,
direction: operation,
},
}}
case plugin.ListenerProtocolTCP:
// Determine the listener address
// we listen on the service VIP if and only
// if the address is an IP address. If its a CIDR, we listen on
// 0.0.0.0, and setup a filter chain match for the CIDR range.
// As a small optimization, CIDRs with /32 prefix will be converted
// into listener address so that there is a dedicated listener for this
// ip:port. This will reduce the impact of a listener reload
var svcListenAddress string
// This is to maintain backward compatibility with 0.8 envoy
if _, is10Proxy := node.GetProxyVersion(); !is10Proxy {
if service.Resolution != model.Passthrough {
svcListenAddress = service.GetServiceAddressForProxy(node)
}
} else {
svcListenAddress = service.GetServiceAddressForProxy(node)
}
// We should never get an empty address.
// This is a safety guard, in case some platform adapter isn't doing things
// properly
if len(svcListenAddress) > 0 {
if !strings.Contains(svcListenAddress, "/") {
listenAddress = svcListenAddress
} else {
// Address is a CIDR. Fall back to 0.0.0.0 and
// filter chain match
destinationIPAddress = svcListenAddress
}
}
listenerMapKey = fmt.Sprintf("%s:%d", listenAddress, servicePort.Port)
var exists bool
// Check if this TCP listener conflicts with an existing HTTP listener on 0.0.0.0:Port
if currentListenerEntry, exists = listenerMap[listenerMapKey]; exists {
// Check for port collisions between TCP/TLS and HTTP.
// If configured correctly, TCP/TLS ports may not collide.
// We'll need to do additional work to find out if there is a collision within TCP/TLS.
if !currentListenerEntry.servicePort.Protocol.IsTCP() {
outboundListenerConflict{
metric: model.ProxyStatusConflictOutboundListenerHTTPOverTCP,
env: env,
node: node,
listenerName: listenerMapKey,
currentServices: currentListenerEntry.services,
currentProtocol: currentListenerEntry.servicePort.Protocol,
newHostname: service.Hostname,
newProtocol: servicePort.Protocol,
}.addMetric()
continue
}
// WE have a collision with another TCP port.
// This can happen only if the service is listening on 0.0.0.0:<port>
// which is the case for headless services, or non-k8s services that do not have a VIP.
// Unfortunately we won't know if this is a real conflict or not
// until we process the VirtualServices, etc.
// The conflict resolution is done later in this code
}
listenerOpts.filterChainOpts = buildSidecarOutboundTCPTLSFilterChainOpts(node, env, configs,
destinationIPAddress, service, servicePort, proxyLabels, meshGateway)
default:
// UDP or other protocols: no need to log, it's too noisy
continue
}
// Even if we have a non empty current listener, lets build the new listener with the filter chains
// In the end, we will merge the filter chains
// call plugins
listenerOpts.ip = listenAddress
l := buildListener(listenerOpts)
mutable := &plugin.MutableObjects{
Listener: l,
FilterChains: make([]plugin.FilterChain, len(l.FilterChains)),
}
for _, p := range configgen.Plugins {
params := &plugin.InputParams{
ListenerProtocol: plugin.ModelProtocolToListenerProtocol(servicePort.Protocol),
Env: env,
Node: node,
ProxyInstances: proxyInstances,
Service: service,
Port: servicePort,
}
if err := p.OnOutboundListener(params, mutable); err != nil {
log.Warn(err.Error())
}
}
// Filters are serialized one time into an opaque struct once we have the complete list.
if err := marshalFilters(mutable.Listener, listenerOpts, mutable.FilterChains); err != nil {
log.Warna("buildSidecarOutboundListeners: ", err.Error())
continue
}
// TODO(rshriram) merge multiple identical filter chains with just a single destination CIDR based
// filter chain matche, into a single filter chain and array of destinationcidr matches
// We checked TCP over HTTP, and HTTP over TCP conflicts above.
// The code below checks for TCP over TCP conflicts and merges listeners
if currentListenerEntry != nil {
// merge the newly built listener with the existing listener
// if and only if the filter chains have distinct conditions
// Extract the current filter chain matches
// For every new filter chain match being added, check if any previous match is same
// if so, skip adding this filter chain with a warning
// This is very unoptimized.
newFilterChains := make([]listener.FilterChain, 0,
len(currentListenerEntry.listener.FilterChains)+len(mutable.Listener.FilterChains))
newFilterChains = append(newFilterChains, currentListenerEntry.listener.FilterChains...)
for _, incomingFilterChain := range mutable.Listener.FilterChains {
conflictFound := false
compareWithExisting:
for _, existingFilterChain := range currentListenerEntry.listener.FilterChains {
if existingFilterChain.FilterChainMatch == nil {
// This is a catch all filter chain.
// We can only merge with a non-catch all filter chain
// Else mark it as conflict
if incomingFilterChain.FilterChainMatch == nil {
conflictFound = true
outboundListenerConflict{
metric: model.ProxyStatusConflictOutboundListenerTCPOverTCP,
env: env,
node: node,
listenerName: listenerMapKey,
currentServices: currentListenerEntry.services,
currentProtocol: currentListenerEntry.servicePort.Protocol,
newHostname: service.Hostname,
newProtocol: servicePort.Protocol,
}.addMetric()
break compareWithExisting
} else {
continue
}
}
if incomingFilterChain.FilterChainMatch == nil {
continue
}
// We have two non-catch all filter chains. Check for duplicates
if reflect.DeepEqual(*existingFilterChain.FilterChainMatch, *incomingFilterChain.FilterChainMatch) {
conflictFound = true
outboundListenerConflict{
metric: model.ProxyStatusConflictOutboundListenerTCPOverTCP,
env: env,
node: node,
listenerName: listenerMapKey,
currentServices: currentListenerEntry.services,
currentProtocol: currentListenerEntry.servicePort.Protocol,
newHostname: service.Hostname,
newProtocol: servicePort.Protocol,
}.addMetric()
break compareWithExisting
}
}
if !conflictFound {
// There is no conflict with any filter chain in the existing listener.
// So append the new filter chains to the existing listener's filter chains
newFilterChains = append(newFilterChains, incomingFilterChain)
lEntry := listenerMap[listenerMapKey]
lEntry.services = append(lEntry.services, service)
}
}
currentListenerEntry.listener.FilterChains = newFilterChains
} else {
listenerMap[listenerMapKey] = &listenerEntry{
services: []*model.Service{service},
servicePort: servicePort,
listener: mutable.Listener,
}
}
if log.DebugEnabled() && len(mutable.Listener.FilterChains) > 1 || currentListenerEntry != nil {
var numChains int
if currentListenerEntry != nil {
numChains = len(currentListenerEntry.listener.FilterChains)
} else {
numChains = len(mutable.Listener.FilterChains)
}
log.Debugf("buildSidecarOutboundListeners: multiple filter chain listener %s with %d chains", mutable.Listener.Name, numChains)
}
}
}
for name, l := range listenerMap {
if err := l.listener.Validate(); err != nil {
log.Warnf("buildSidecarOutboundListeners: error validating listener %s (type %v): %v", name, l.servicePort.Protocol, err)
invalidOutboundListeners.Add(1)
continue
}
if l.servicePort.Protocol.IsTCP() {
tcpListeners = append(tcpListeners, l.listener)
} else {
httpListeners = append(httpListeners, l.listener)
}
}
return append(tcpListeners, httpListeners...)
}
// buildSidecarInboundMgmtListeners creates inbound TCP only listeners for the management ports on
// server (inbound). Management port listeners are slightly different from standard Inbound listeners
// in that, they do not have mixer filters nor do they have inbound auth.
// N.B. If a given management port is same as the service instance's endpoint port
// the pod will fail to start in Kubernetes, because the mixer service tries to
// lookup the service associated with the Pod. Since the pod is yet to be started
// and hence not bound to the service), the service lookup fails causing the mixer
// to fail the health check call. This results in a vicious cycle, where kubernetes
// restarts the unhealthy pod after successive failed health checks, and the mixer
// continues to reject the health checks as there is no service associated with
// the pod.
// So, if a user wants to use kubernetes probes with Istio, she should ensure
// that the health check ports are distinct from the service ports.
func buildSidecarInboundMgmtListeners(managementPorts model.PortList, managementIP string) []*xdsapi.Listener {
listeners := make([]*xdsapi.Listener, 0, len(managementPorts))
if managementIP == "" {
managementIP = "127.0.0.1"
}
// assumes that inbound connections/requests are sent to the endpoint address
for _, mPort := range managementPorts {
switch mPort.Protocol {
case model.ProtocolHTTP, model.ProtocolHTTP2, model.ProtocolGRPC, model.ProtocolTCP,
model.ProtocolHTTPS, model.ProtocolTLS, model.ProtocolMongo, model.ProtocolRedis:
instance := &model.ServiceInstance{
Endpoint: model.NetworkEndpoint{
Address: managementIP,
Port: mPort.Port,
ServicePort: mPort,
},
Service: &model.Service{
Hostname: ManagementClusterHostname,
},
}
listenerOpts := buildListenerOpts{
ip: managementIP,
port: mPort.Port,
protocol: model.ProtocolTCP,
filterChainOpts: []*filterChainOpts{{
networkFilters: buildInboundNetworkFilters(instance),
}},
}
l := buildListener(listenerOpts)
// TODO: should we call plugins for the admin port listeners too? We do everywhere else we contruct listeners.
if err := marshalFilters(l, listenerOpts, []plugin.FilterChain{{}}); err != nil {
log.Warna("buildSidecarInboundMgmtListeners ", err.Error())
} else {
listeners = append(listeners, l)
}
default:
log.Warnf("Unsupported inbound protocol %v for management port %#v",
mPort.Protocol, mPort)
}
}
return listeners
}
// httpListenerOpts are options for an HTTP listener
type httpListenerOpts struct {
//nolint: maligned
routeConfig *xdsapi.RouteConfiguration
rds string
useRemoteAddress bool
direction http_conn.HttpConnectionManager_Tracing_OperationName
// If set, use this as a basis
connectionManager *http_conn.HttpConnectionManager
// stat prefix for the http connection manager
// DO not set this field. Will be overridden by marshalFilters
statPrefix string
}
// filterChainOpts describes a filter chain: a set of filters with the same TLS context
type filterChainOpts struct {
sniHosts []string
destinationCIDRs []string
transportProtocol string
tlsContext *auth.DownstreamTlsContext
httpOpts *httpListenerOpts
networkFilters []listener.Filter
}
// buildListenerOpts are the options required to build a Listener
type buildListenerOpts struct {
// nolint: maligned
env *model.Environment
proxy *model.Proxy
proxyInstances []*model.ServiceInstance
ip string
port int
protocol model.Protocol
bindToPort bool
filterChainOpts []*filterChainOpts
tlsMultiplexed bool
}
func buildHTTPConnectionManager(env *model.Environment, node *model.Proxy, httpOpts *httpListenerOpts,
httpFilters []*http_conn.HttpFilter) *http_conn.HttpConnectionManager {
filters := append(httpFilters,
&http_conn.HttpFilter{Name: xdsutil.CORS},
&http_conn.HttpFilter{Name: xdsutil.Fault},
&http_conn.HttpFilter{Name: xdsutil.Router},
)
if httpOpts.connectionManager == nil {
httpOpts.connectionManager = &http_conn.HttpConnectionManager{}
}
connectionManager := httpOpts.connectionManager
connectionManager.CodecType = http_conn.AUTO
connectionManager.AccessLog = []*accesslog.AccessLog{}
connectionManager.HttpFilters = filters
connectionManager.StatPrefix = httpOpts.statPrefix
connectionManager.UseRemoteAddress = &google_protobuf.BoolValue{httpOpts.useRemoteAddress}
if _, is10Proxy := node.GetProxyVersion(); is10Proxy {
// Allow websocket upgrades
websocketUpgrade := &http_conn.HttpConnectionManager_UpgradeConfig{UpgradeType: "websocket"}
connectionManager.UpgradeConfigs = []*http_conn.HttpConnectionManager_UpgradeConfig{websocketUpgrade}
notimeout := 0 * time.Second
// Setting IdleTimeout to 0 seems to break most tests, causing
// envoy to disconnect.
// connectionManager.IdleTimeout = ¬imeout
connectionManager.StreamIdleTimeout = ¬imeout
}
if httpOpts.rds != "" {
rds := &http_conn.HttpConnectionManager_Rds{
Rds: &http_conn.Rds{
ConfigSource: core.ConfigSource{
ConfigSourceSpecifier: &core.ConfigSource_Ads{
Ads: &core.AggregatedConfigSource{},
},
},
RouteConfigName: httpOpts.rds,
},
}
connectionManager.RouteSpecifier = rds
} else {
connectionManager.RouteSpecifier = &http_conn.HttpConnectionManager_RouteConfig{RouteConfig: httpOpts.routeConfig}
}
if env.Mesh.AccessLogFile != "" {
fl := &fileaccesslog.FileAccessLog{
Path: env.Mesh.AccessLogFile,
}
connectionManager.AccessLog = []*accesslog.AccessLog{
{
Config: util.MessageToStruct(fl),
Name: fileAccessLog,
},
}
}
if env.Mesh.EnableTracing {
tc := model.GetTraceConfig()
connectionManager.Tracing = &http_conn.HttpConnectionManager_Tracing{
OperationName: httpOpts.direction,
ClientSampling: &envoy_type.Percent{
Value: tc.ClientSampling,
},
RandomSampling: &envoy_type.Percent{
Value: tc.RandomSampling,
},
OverallSampling: &envoy_type.Percent{
Value: tc.OverallSampling,
},
}
connectionManager.GenerateRequestId = &google_protobuf.BoolValue{true}
}
if verboseDebug {
connectionManagerJSON, _ := json.MarshalIndent(connectionManager, " ", " ")
log.Infof("LDS: %s \n", string(connectionManagerJSON))
}
return connectionManager
}
// buildListener builds and initializes a Listener proto based on the provided opts. It does not set any filters.
func buildListener(opts buildListenerOpts) *xdsapi.Listener {
filterChains := make([]listener.FilterChain, 0, len(opts.filterChainOpts))
var listenerFilters []listener.ListenerFilter
if opts.tlsMultiplexed {
listenerFilters = []listener.ListenerFilter{
{
Name: authn.EnvoyTLSInspectorFilterName,
Config: &google_protobuf.Struct{},
},
}
}
for _, chain := range opts.filterChainOpts {
match := &listener.FilterChainMatch{
TransportProtocol: chain.transportProtocol,
}
if len(chain.sniHosts) > 0 {
sort.Strings(chain.sniHosts)
fullWildcardFound := false
for _, h := range chain.sniHosts {
if h == "*" {
fullWildcardFound = true
// If we have a host with *, it effectively means match anything, i.e.
// no SNI based matching for this host.
break
}
}
if !fullWildcardFound {
match.ServerNames = chain.sniHosts
}
}
if len(chain.destinationCIDRs) > 0 {
sort.Strings(chain.destinationCIDRs)
for _, d := range chain.destinationCIDRs {
if len(d) == 0 {
continue
}
cidr := util.ConvertAddressToCidr(d)
if cidr != nil && cidr.AddressPrefix != model.UnspecifiedIP {
match.PrefixRanges = append(match.PrefixRanges, cidr)
}
}
}
if reflect.DeepEqual(*match, listener.FilterChainMatch{}) {
match = nil
}
filterChains = append(filterChains, listener.FilterChain{
FilterChainMatch: match,
TlsContext: chain.tlsContext,
})
}
var deprecatedV1 *xdsapi.Listener_DeprecatedV1
if !opts.bindToPort {
deprecatedV1 = &xdsapi.Listener_DeprecatedV1{
BindToPort: boolFalse,
}
}
return &xdsapi.Listener{
Name: fmt.Sprintf("%s_%d", opts.ip, opts.port),
Address: util.BuildAddress(opts.ip, uint32(opts.port)),
ListenerFilters: listenerFilters,
FilterChains: filterChains,
DeprecatedV1: deprecatedV1,
}
}
// marshalFilters adds the provided TCP and HTTP filters to the provided Listener and serializes them.
//
// TODO: should we change this from []plugins.FilterChains to [][]listener.Filter, [][]*http_conn.HttpFilter?
// TODO: given how tightly tied listener.FilterChains, opts.filterChainOpts, and mutable.FilterChains are to eachother
// we should encapsulate them some way to ensure they remain consistent (mainly that in each an index refers to the same
// chain)
func marshalFilters(l *xdsapi.Listener, opts buildListenerOpts, chains []plugin.FilterChain) error {
if len(opts.filterChainOpts) == 0 {
return fmt.Errorf("must have more than 0 chains in listener: %#v", l)
}
for i, chain := range chains {
opt := opts.filterChainOpts[i]
if len(chain.TCP) > 0 {
l.FilterChains[i].Filters = append(l.FilterChains[i].Filters, chain.TCP...)
}
if len(opt.networkFilters) > 0 {
l.FilterChains[i].Filters = append(l.FilterChains[i].Filters, opt.networkFilters...)
}
if log.DebugEnabled() {
log.Debugf("attached %d network filters to listener %q filter chain %d", len(chain.TCP)+len(opt.networkFilters), l.Name, i)
}
if opt.httpOpts != nil {
opt.httpOpts.statPrefix = l.Name
connectionManager := buildHTTPConnectionManager(opts.env, opts.proxy, opt.httpOpts, chain.HTTP)
l.FilterChains[i].Filters = append(l.FilterChains[i].Filters, listener.Filter{
Name: envoyHTTPConnectionManager,
Config: util.MessageToStruct(connectionManager),
})
log.Debugf("attached HTTP filter with %d http_filter options to listener %q filter chain %d", 1+len(chain.HTTP), l.Name, i)
}
}
return nil
}
| [
"\"PILOT_DUMP_ALPHA3\""
] | [] | [
"PILOT_DUMP_ALPHA3"
] | [] | ["PILOT_DUMP_ALPHA3"] | go | 1 | 0 | |
cmd/sms/main.go | // Copyright 2015 go-smpp authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// SMPP client for the command line.
//
// We bind to the SMSC as a transmitter, therefore can do SubmitSM
// (send Short Message) or QuerySM (query for message status). The
// latter may not be available depending on the SMSC.
package main
import (
"crypto/tls"
"fmt"
"log"
"net"
"os"
"strings"
"github.com/urfave/cli"
"github.com/veoo/go-smpp/smpp"
"github.com/veoo/go-smpp/smpp/pdu/pdufield"
"github.com/veoo/go-smpp/smpp/pdu/pdutext"
)
// Version of smppcli.
var Version = "tip"
// Author of smppcli.
var Author = "go-smpp authors"
func main() {
app := cli.NewApp()
app.Name = "smppcli"
app.Usage = "SMPP client for the command line"
app.Version = Version
app.Author = Author
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "addr",
Value: "localhost:2775",
Usage: "Set SMPP server host:port",
},
cli.StringFlag{
Name: "user",
Value: "",
Usage: "Set SMPP username",
},
cli.StringFlag{
Name: "passwd",
Value: "",
Usage: "Set SMPP password",
},
cli.BoolFlag{
Name: "tls",
Usage: "Use client TLS connection",
},
cli.BoolFlag{
Name: "precaire",
Usage: "Accept invalid TLS certificate",
},
}
app.Commands = []cli.Command{
cmdShortMessage,
cmdQueryMessage,
}
app.Run(os.Args)
}
var cmdShortMessage = cli.Command{
Name: "send",
Usage: "send short message",
Flags: []cli.Flag{
cli.BoolFlag{
Name: "register",
Usage: "register for delivery receipt",
},
cli.StringFlag{
Name: "encoding",
Usage: "set text encoding: raw, ucs2 or latin1",
Value: "raw",
},
cli.StringFlag{
Name: "service-type",
Usage: "set service_type PDU (optional)",
Value: "",
},
cli.IntFlag{
Name: "source-addr-ton",
Usage: "set source_addr_ton PDU (optional)",
Value: 0,
},
cli.IntFlag{
Name: "source-addr-npi",
Usage: "set source_addr_npi PDU (optional)",
Value: 0,
},
cli.IntFlag{
Name: "dest-addr-ton",
Usage: "set dest_addr_ton PDU (optional)",
Value: 0,
},
cli.IntFlag{
Name: "dest-addr-npi",
Usage: "set dest_addr_npi PDU (optional)",
Value: 0,
},
cli.IntFlag{
Name: "esm-class",
Usage: "set esm_class PDU (optional)",
Value: 0,
},
cli.IntFlag{
Name: "protocol-id",
Usage: "set protocol_id PDU (optional)",
Value: 0,
},
cli.IntFlag{
Name: "priority-flag",
Usage: "set priority_flag PDU (optional)",
Value: 0,
},
cli.StringFlag{
Name: "schedule-delivery-time",
Usage: "set schedule_delivery_time PDU (optional)",
Value: "",
},
cli.IntFlag{
Name: "replace-if-present-flag",
Usage: "set replace_if_present_flag PDU (optional)",
Value: 0,
},
cli.IntFlag{
Name: "sm-default-msg-id",
Usage: "set sm_default_msg_id PDU (optional)",
Value: 0,
},
},
Action: func(c *cli.Context) {
if len(c.Args()) < 3 {
fmt.Println("usage: send [options] <sender> <recipient> <message...>")
fmt.Println("example: send --register foobar 011-236-0873 รฉ nรณis")
return
}
log.Println("Connecting...")
tx := newTransmitter(c)
defer tx.Close()
log.Println("Connected to", tx.Addr)
sender := c.Args()[0]
recipient := c.Args()[1]
text := strings.Join(c.Args()[2:], " ")
log.Printf("Command: send %q %q %q", sender, recipient, text)
var register pdufield.DeliverySetting
if c.Bool("register") {
register = pdufield.FinalDeliveryReceipt
}
var codec pdutext.Codec
switch c.String("encoding") {
case "ucs2", "ucs-2":
codec = pdutext.UCS2(text)
case "latin1", "latin-1":
codec = pdutext.Latin1(text)
default:
codec = pdutext.Raw(text)
}
sm, err := tx.Submit(&smpp.ShortMessage{
Src: sender,
Dst: recipient,
Text: codec,
Register: register,
ServiceType: c.String("service-type"),
SourceAddrTON: uint8(c.Int("source-addr-ton")),
SourceAddrNPI: uint8(c.Int("source-addr-npi")),
DestAddrTON: uint8(c.Int("dest-addr-ton")),
DestAddrNPI: uint8(c.Int("dest-addr-npi")),
ESMClass: uint8(c.Int("esm-class")),
ProtocolID: uint8(c.Int("protocol-id")),
PriorityFlag: uint8(c.Int("priority-flag")),
ScheduleDeliveryTime: c.String("schedule-delivery-time"),
ReplaceIfPresentFlag: uint8(c.Int("replace-if-present-flag")),
SMDefaultMsgID: uint8(c.Int("sm-default-msg-id")),
})
if err != nil {
log.Fatalln("Failed:", err)
}
log.Printf("Message ID: %q", sm.RespID())
},
}
var cmdQueryMessage = cli.Command{
Name: "query",
Usage: "status of short message",
Action: func(c *cli.Context) {
if len(c.Args()) != 2 {
fmt.Println("usage: query [sender] [message ID]")
return
}
log.Println("Connecting...")
tx := newTransmitter(c)
defer tx.Close()
log.Println("Connected to", tx.Addr)
sender, msgid := c.Args()[0], c.Args()[1]
log.Printf("Command: query %q %q", sender, msgid)
qr, err := tx.QuerySM(
sender,
msgid,
uint8(c.Int("source-addr-ton")),
uint8(c.Int("source-addr-npi")),
)
if err != nil {
log.Fatalln("Failed:", err)
}
log.Printf("Status: %#v", *qr)
},
}
func newTransmitter(c *cli.Context) *smpp.Transmitter {
tx := &smpp.Transmitter{
Addr: c.GlobalString("addr"),
User: os.Getenv("SMPP_USER"),
Passwd: os.Getenv("SMPP_PASSWD"),
}
if s := c.GlobalString("user"); s != "" {
tx.User = s
}
if s := c.GlobalString("passwd"); s != "" {
tx.Passwd = s
}
if c.GlobalBool("tls") {
host, _, _ := net.SplitHostPort(tx.Addr)
tx.TLS = &tls.Config{
ServerName: host,
}
if c.GlobalBool("precaire") {
tx.TLS.InsecureSkipVerify = true
}
}
conn := <-tx.Bind()
switch conn.Status() {
case smpp.Connected:
default:
log.Fatalln("Connection failed:", conn.Error())
}
return tx
}
| [
"\"SMPP_USER\"",
"\"SMPP_PASSWD\""
] | [] | [
"SMPP_USER",
"SMPP_PASSWD"
] | [] | ["SMPP_USER", "SMPP_PASSWD"] | go | 2 | 0 | |
example/callee/main.go | package main
import (
"context"
"flag"
"fmt"
"log"
"os"
"os/signal"
"syscall"
"time"
"github.com/atotto/skyway-webrtc-gateway-go-client/client"
"github.com/atotto/skyway-webrtc-gateway-go-client/client/data"
"github.com/atotto/skyway-webrtc-gateway-go-client/client/media"
"github.com/atotto/skyway-webrtc-gateway-go-client/client/peers"
"github.com/atotto/skyway-webrtc-gateway-go-client/models"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
var apikey = os.Getenv("SKYWAY_API_KEY")
var peerID = flag.String("peer", "callee", "peer id")
var useData = flag.Bool("data", false, "data")
var useMedia = flag.Bool("media", true, "media")
var domain = flag.String("domain", "localhost", "domain name")
var address = flag.String("addr", "localhost:8000", "gateway address")
var (
DATA_RECV_ADDR = "127.0.0.1"
DATA_RECV_PORT = uint16(10002)
RECV_ADDR = "127.0.0.1"
VIDEO_RECV_PORT = uint16(20000)
AUDIO_RECV_PORT = uint16(20001)
)
func main() {
flag.Parse()
sig := make(chan os.Signal, 1)
signal.Notify(sig, syscall.SIGTERM, syscall.SIGINT)
ctx, cancel := context.WithCancel(context.Background())
go func() {
select {
case <-sig:
log.Print("stopping..")
cancel()
return
}
}()
c := client.NewHTTPClientWithConfig(strfmt.Default, &client.TransportConfig{
Host: *address,
BasePath: "/",
Schemes: []string{"http"},
})
// create_peer
peer, err := c.Peers.Peer(&peers.PeerParams{
Context: ctx,
Body: &models.PeerOptions{
Domain: domain,
Key: &apikey,
PeerID: *peerID,
Turn: false,
}})
if err != nil {
log.Printf("failed to connect peer: %s", err)
return
}
peerToken := peer.Payload.Params.Token
peerID = peer.Payload.Params.PeerID
fmt.Printf("peer_id: %s peer_token: %s\n", *peerID, *peerToken)
defer func() {
c.Peers.PeerDestroy(&peers.PeerDestroyParams{
Context: context.Background(),
PeerID: *peerID,
Token: *peerToken,
})
log.Println("delete peer")
}()
// wait_open
for {
select {
case <-ctx.Done():
return
default:
}
ev, err := c.Peers.PeerEvent(&peers.PeerEventParams{
Context: ctx,
PeerID: *peerID,
Token: *peerToken,
})
if err != nil {
log.Printf("failed to : %s", err)
return
}
if *ev.Payload.Event == "OPEN" {
break
}
}
peerEventParams := peers.NewPeerEventParams()
peerEventParams.Context = ctx
peerEventParams.PeerID = *peerID
peerEventParams.Token = *peerToken
if *useMedia {
// create_media
vMedia, err := c.Media.Media(&media.MediaParams{
Context: ctx,
Body: &models.MediaOptions{
IsVideo: swag.Bool(true),
},
})
if err != nil {
log.Printf("failed to create video media: %s", err)
return
}
log.Printf("media video: port %d", *vMedia.Payload.Port)
aMedia, err := c.Media.Media(&media.MediaParams{
Context: ctx,
Body: &models.MediaOptions{
IsVideo: swag.Bool(false),
},
})
if err != nil {
log.Printf("failed to create audio media: %s", err)
return
}
log.Printf("media audio: port %d", *aMedia.Payload.Port)
// wait call
var mediaConnID string
for {
select {
case <-ctx.Done():
return
default:
}
ev, err := c.Peers.PeerEvent(peerEventParams)
if err != nil {
log.Printf("failed to get peer event: %s", err)
time.Sleep(time.Second)
continue
}
if *ev.Payload.Event == "CALL" {
mediaConnID = *ev.Payload.CallParams.MediaConnectionID
break
}
}
// caller peer_id
mStatus, err := c.Media.MediaConnectionStatus(&media.MediaConnectionStatusParams{
Context: ctx,
MediaConnectionID: mediaConnID,
})
if err != nil {
log.Printf("failed to get media connection statue: %s", err)
return
}
log.Printf("call: peer_id=%s", *mStatus.Payload.RemoteID)
// answer
constraints := &models.PeerCallConstraints{}
if err := constraints.UnmarshalBinary([]byte(defaultConstraints)); err != nil {
log.Printf("failed to create constraints: %s", err)
return
}
constraints.VideoParams.MediaID = vMedia.Payload.MediaID
constraints.AudioParams.MediaID = aMedia.Payload.MediaID
_, err = c.Media.MediaConnectionAnswer(&media.MediaConnectionAnswerParams{
Context: ctx,
MediaConnectionID: mediaConnID,
Body: &models.MediaConnectionAnswerOptions{
Constraints: constraints,
RedirectParams: &models.MediaRedirectOptions{
Video: &models.MediaRedirectOptionsVideo{
IPV4: RECV_ADDR,
Port: VIDEO_RECV_PORT,
},
Audio: &models.MediaRedirectOptionsAudio{
IPV4: RECV_ADDR,
Port: AUDIO_RECV_PORT,
},
},
},
})
if err != nil {
log.Printf("failed to answer media connection: %s", err)
return
}
log.Println("answered")
// wait_stream
for {
select {
case <-ctx.Done():
return
default:
}
ev, err := c.Media.MediaConnectionEvent(&media.MediaConnectionEventParams{
Context: ctx,
MediaConnectionID: mediaConnID,
})
if err != nil {
log.Printf("failed to : %s", err)
return
}
if *ev.Payload.Event == "STREAM" {
break
}
}
log.Println("stream")
defer c.Media.MediaConnectionClose(&media.MediaConnectionCloseParams{
Context: context.Background(),
MediaConnectionID: mediaConnID,
})
}
if *useData {
// create_data
d, err := c.Data.Data(&data.DataParams{
Body: struct{}{},
Context: ctx,
})
if err != nil {
log.Printf("failed to data: %s", err)
return
}
dataID := d.Payload.DataID
log.Printf("data channel: port %d", *d.Payload.Port)
// wait_connection
var dataConnID string
for {
select {
case <-ctx.Done():
return
default:
}
ev, err := c.Peers.PeerEvent(peerEventParams)
if err != nil {
log.Printf("failed to get peer event: %s", err)
time.Sleep(time.Second)
continue
}
if *ev.Payload.Event == "CONNECTION" {
dataConnID = *ev.Payload.DataParams.DataConnectionID
break
}
}
log.Println("connected")
// wait_open
for {
select {
case <-ctx.Done():
return
default:
}
ev, err := c.Data.DataConnectionEvents(&data.DataConnectionEventsParams{
Context: ctx,
DataConnectionID: dataConnID,
})
if err != nil {
log.Printf("failed to : %s", err)
return
}
if *ev.Payload.Event == "OPEN" {
break
}
}
log.Println("opened data channel")
defer c.Data.DataConnectionClose(&data.DataConnectionCloseParams{
Context: context.Background(),
DataConnectionID: dataConnID,
})
_, err = c.Data.DataConnectionPut(&data.DataConnectionPutParams{
Context: ctx,
DataConnectionID: dataConnID,
Body: &models.DataConnectionPutOptions{
FeedParams: &models.DataConnectionFeedOptions{
DataID: *dataID,
},
RedirectParams: &models.DataConnectionRedirectOptions{
IPV4: DATA_RECV_ADDR,
Port: &DATA_RECV_PORT,
},
},
})
if err != nil {
log.Printf("failed to : %s", err)
return
}
}
<-ctx.Done()
}
var defaultConstraints = `
{
"video": true,
"videoReceiveEnabled": true,
"video_params": {
"band_width": 1500,
"codec": "H264",
"media_id": "video_id",
"payload_type": 100
},
"audio": true,
"audioReceiveEnabled": true,
"audio_params": {
"band_width": 1500,
"codec": "opus",
"media_id": "audio_id",
"payload_type": 111
}
}`
| [
"\"SKYWAY_API_KEY\""
] | [] | [
"SKYWAY_API_KEY"
] | [] | ["SKYWAY_API_KEY"] | go | 1 | 0 | |
soracom/generated/cmd/lagoon_update_user_email.go | // Code generated by soracom-cli generate-cmd. DO NOT EDIT.
package cmd
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/url"
"os"
"strings"
"github.com/spf13/cobra"
)
// LagoonUpdateUserEmailCmdUserEmail holds value of 'userEmail' option
var LagoonUpdateUserEmailCmdUserEmail string
// LagoonUpdateUserEmailCmdLagoonUserId holds value of 'lagoon_user_id' option
var LagoonUpdateUserEmailCmdLagoonUserId int64
// LagoonUpdateUserEmailCmdBody holds contents of request body to be sent
var LagoonUpdateUserEmailCmdBody string
func init() {
LagoonUpdateUserEmailCmd.Flags().StringVar(&LagoonUpdateUserEmailCmdUserEmail, "user-email", "", TRAPI(""))
LagoonUpdateUserEmailCmd.Flags().Int64Var(&LagoonUpdateUserEmailCmdLagoonUserId, "lagoon-user-id", 0, TRAPI("Target ID of the lagoon user"))
LagoonUpdateUserEmailCmd.Flags().StringVar(&LagoonUpdateUserEmailCmdBody, "body", "", TRCLI("cli.common_params.body.short_help"))
LagoonCmd.AddCommand(LagoonUpdateUserEmailCmd)
}
// LagoonUpdateUserEmailCmd defines 'update-user-email' subcommand
var LagoonUpdateUserEmailCmd = &cobra.Command{
Use: "update-user-email",
Short: TRAPI("/lagoon/users/{lagoon_user_id}/email:put:summary"),
Long: TRAPI(`/lagoon/users/{lagoon_user_id}/email:put:description`),
RunE: func(cmd *cobra.Command, args []string) error {
opt := &apiClientOptions{
BasePath: "/v1",
Language: getSelectedLanguage(),
}
ac := newAPIClient(opt)
if v := os.Getenv("SORACOM_VERBOSE"); v != "" {
ac.SetVerbose(true)
}
err := authHelper(ac, cmd, args)
if err != nil {
cmd.SilenceUsage = true
return err
}
param, err := collectLagoonUpdateUserEmailCmdParams(ac)
if err != nil {
return err
}
body, err := ac.callAPI(param)
if err != nil {
cmd.SilenceUsage = true
return err
}
if body == "" {
return nil
}
if rawOutput {
_, err = os.Stdout.Write([]byte(body))
} else {
return prettyPrintStringAsJSON(body)
}
return err
},
}
func collectLagoonUpdateUserEmailCmdParams(ac *apiClient) (*apiParams, error) {
body, err := buildBodyForLagoonUpdateUserEmailCmd()
if err != nil {
return nil, err
}
contentType := "application/json"
if LagoonUpdateUserEmailCmdLagoonUserId == 0 {
if body == "" {
return nil, fmt.Errorf("required parameter '%s' is not specified", "lagoon-user-id")
}
}
return &apiParams{
method: "PUT",
path: buildPathForLagoonUpdateUserEmailCmd("/lagoon/users/{lagoon_user_id}/email"),
query: buildQueryForLagoonUpdateUserEmailCmd(),
contentType: contentType,
body: body,
noRetryOnError: noRetryOnError,
}, nil
}
func buildPathForLagoonUpdateUserEmailCmd(path string) string {
path = strReplace(path, "{"+"lagoon_user_id"+"}", url.PathEscape(sprintf("%d", LagoonUpdateUserEmailCmdLagoonUserId)), -1)
return path
}
func buildQueryForLagoonUpdateUserEmailCmd() url.Values {
result := url.Values{}
return result
}
func buildBodyForLagoonUpdateUserEmailCmd() (string, error) {
var result map[string]interface{}
if LagoonUpdateUserEmailCmdBody != "" {
var b []byte
var err error
if strings.HasPrefix(LagoonUpdateUserEmailCmdBody, "@") {
fname := strings.TrimPrefix(LagoonUpdateUserEmailCmdBody, "@")
// #nosec
b, err = ioutil.ReadFile(fname)
} else if LagoonUpdateUserEmailCmdBody == "-" {
b, err = ioutil.ReadAll(os.Stdin)
} else {
b = []byte(LagoonUpdateUserEmailCmdBody)
}
if err != nil {
return "", err
}
err = json.Unmarshal(b, &result)
if err != nil {
return "", err
}
}
if result == nil {
result = make(map[string]interface{})
}
if LagoonUpdateUserEmailCmdUserEmail != "" {
result["userEmail"] = LagoonUpdateUserEmailCmdUserEmail
}
resultBytes, err := json.Marshal(result)
if err != nil {
return "", err
}
return string(resultBytes), nil
}
| [
"\"SORACOM_VERBOSE\""
] | [] | [
"SORACOM_VERBOSE"
] | [] | ["SORACOM_VERBOSE"] | go | 1 | 0 | |
c_all_in/manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "c_all_in.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [] | [] | [] | [] | [] | python | 0 | 0 | |
userbot/uniborgConfig.py | import os
from telethon.tl.types import ChatBannedRights
ENV = bool(os.environ.get("ENV", False))
if ENV:
import os
class Config(object):
LOGGER = True
# Get this value from my.telegram.org! Please do not steal
LOCATION = os.environ.get("LOCATION", None)
OPEN_WEATHER_MAP_APPID = os.environ.get("OPEN_WEATHER_MAP_APPID", None)
# Get your own ACCESS_KEY from http://api.screenshotlayer.com/api/capture
SCREEN_SHOT_LAYER_ACCESS_KEY = os.environ.get("SCREEN_SHOT_LAYER_ACCESS_KEY", None)
# Send .get_id in any group to fill this value.
# This is required for the plugins involving the file system.
TMP_DOWNLOAD_DIRECTORY = os.environ.get("TMP_DOWNLOAD_DIRECTORY", "./DOWNLOADS/")
# This is required for the speech to text module. Get your USERNAME from https://console.bluemix.net/docs/services/speech-to-text/getting-started.html
IBM_WATSON_CRED_URL = os.environ.get("IBM_WATSON_CRED_URL", None)
IBM_WATSON_CRED_PASSWORD = os.environ.get("IBM_WATSON_CRED_PASSWORD", None)
# This is required for the hash to torrent file functionality to work.
HASH_TO_TORRENT_API = os.environ.get("HASH_TO_TORRENT_API", "https://example.com/torrent/{}");
# This is required for the @telegraph functionality.
TELEGRAPH_SHORT_NAME = os.environ.get("TELEGRAPH_SHORT_NAME", "IndianBot")
# Get a Free API Key from OCR.Space
OCR_SPACE_API_KEY = os.environ.get("OCR_SPACE_API_KEY", None)
# Send .get_id in any group with all your administration bots (added)
G_BAN_LOGGER_GROUP = int(os.environ.get("G_BAN_LOGGER_GROUP", -1001198699233))
# TG API limit. An album can have atmost 10 media!
GOOGLE_SEARCH_COUNT_LIMIT = int(os.environ.get("GOOGLE_SEARCH_COUNT_LIMIT", 9))
TG_GLOBAL_ALBUM_LIMIT = int(os.environ.get("TG_GLOBAL_ALBUM_LIMIT", 9))
# Telegram BOT Token from @BotFather
TG_BOT_TOKEN_BF_HER = os.environ.get("TG_BOT_TOKEN_BF_HER", None)
TG_BOT_USER_NAME_BF_HER = os.environ.get("TG_BOT_USER_NAME_BF_HER", None)
#
#
# DO NOT EDIT BELOW THIS LINE IF YOU DO NOT KNOW WHAT YOU ARE DOING
# TG API limit. A message can have maximum 4096 characters!
MAX_MESSAGE_SIZE_LIMIT = 4095
# set blacklist_chats where you do not want userbot's features
UB_BLACK_LIST_CHAT = set(int(x) for x in os.environ.get("UB_BLACK_LIST_CHAT", "").split())
# maximum number of messages for antiflood
MAX_ANTI_FLOOD_MESSAGES = 10
# warn mode for anti flood
ANTI_FLOOD_WARN_MODE = ChatBannedRights(
until_date=None,
view_messages=None,
send_messages=True
)
# chat ids or usernames, it is recommended to use chat ids,
# providing usernames means an additional overhead for the user
CHATS_TO_MONITOR_FOR_ANTI_FLOOD = []
# Get your own API key from https://www.remove.bg/ or
# feel free to use http://telegram.dog/Remove_BGBot
REM_BG_API_KEY = os.environ.get("REM_BG_API_KEY", None)
# Set to True if you want to block users that are spamming your PMs.
SLAP_USERNAME = os.environ.get("SLAP_USERNAME", None)
GITHUB_ACCESS_TOKEN = os.environ.get("GITHUB_ACCESS_TOKEN", None)
GIT_REPO_NAME = os.environ.get("GIT_REPO_NAME", None)
NO_P_M_SPAM = bool(os.environ.get("NO_P_M_SPAM", False))
# define "spam" in PMs
NO_SONGS = bool(os.environ.get("NO_SONGS", False))
MAX_FLOOD_IN_P_M_s = int(os.environ.get("MAX_FLOOD_IN_P_M_s", 3))
# set to True if you want to log PMs to your PM_LOGGR_BOT_API_ID
NC_LOG_P_M_S = bool(os.environ.get("NC_LOG_P_M_S", False))
# send .get_id in any channel to forward all your NEW PMs to this group
PM_LOGGR_BOT_API_ID = os.environ.get("PM_LOGGR_BOT_API_ID", None)
if PM_LOGGR_BOT_API_ID:
PM_LOGGR_BOT_API_ID = int(PM_LOGGR_BOT_API_ID)
# For Databases
# can be None in which case plugins requiring
# DataBase would not work
DB_URI = os.environ.get("DATABASE_URL", None)
# specify command handler that should be used for the plugins
# this should be a valid "regex" pattern
COMMAND_HAND_LER = os.environ.get("COMMAND_HAND_LER", "\.")
# specify list of users allowed to use bot
# WARNING: be careful who you grant access to your bot.
# malicious users could do ".exec rm -rf /*"
SUDO_USERS = set(int(x) for x in os.environ.get("SUDO_USERS", "").split())
# VeryStream only supports video formats
VERY_STREAM_LOGIN = os.environ.get("VERY_STREAM_LOGIN", None)
VERY_STREAM_KEY = os.environ.get("VERY_STREAM_KEY", None)
GROUP_REG_SED_EX_BOT_S = os.environ.get("GROUP_REG_SED_EX_BOT_S", r"(regex|moku|BananaButler_|rgx|l4mR)bot")
TEMP_DIR = os.environ.get("TEMP_DIR", None)
CHANNEL_ID = int(os.environ.get("CHANNEL_ID", -100))
#alive
ALIVE_MSG = os.environ.get("ALIVE_MSG", None)
ALIVE_PHOTTO = os.environ.get("ALIVE_PHOTTO", None)
#inline
EMOJI_TO_DISPLAY_IN_HELP = os.environ.get("EMOJI_TO_DISPLAY_IN_HELP", None)
NO_OF_COLOUMS_DISPLAYED_IN_H_ME_CMD = os.environ.get("NO_OF_COLOUMS_DISPLAYED_IN_H_ME_CMD", None)
NO_OF_BUTTONS_DISPLAYED_IN_H_ME_CMD = os.environ.get("NO_OF_BUTTONS_DISPLAYED_IN_H_ME_CMD", None)
#pm permit
PM_PRMT_PRVT_GRP_ID = os.environ.get("PM_PRMT_PRVT_GRP_ID", None)
#Google Chrome Stuff
CHROME_DRIVER = os.environ.get("CHROME_DRIVER", "/app/.chromedriver/bin/chromedriver")
GOOGLE_CHROME_BIN = os.environ.get("GOOGLE_CHROME_BIN", "/app/.apt/usr/bin/google-chrome")
# Google Drive ()
G_DRIVE_CLIENT_ID = os.environ.get("G_DRIVE_CLIENT_ID", None)
G_DRIVE_CLIENT_SECRET = os.environ.get("G_DRIVE_CLIENT_SECRET", None)
GDRIVE_FOLDER_ID = os.environ.get("GDRIVE_FOLDER_ID", None)
AUTH_TOKEN_DATA = os.environ.get("AUTH_TOKEN_DATA", None)
if AUTH_TOKEN_DATA != None:
os.makedirs(TMP_DOWNLOAD_DIRECTORY)
t_file = open(TMP_DOWNLOAD_DIRECTORY+"auth_token.txt","w")
t_file.write(AUTH_TOKEN_DATA)
t_file.close()
YOUTUBE_API_KEY = os.environ.get("YOUTUBE_API_KEY", None)
GDRIVE_FOLDER_ID = os.environ.get("GDRIVE_FOLDER_ID", None)
#MongoDB
MONGO_URI = os.environ.get("MONGO_URI", None)
#Lydia API
LYDIA_API = os.environ.get("LYDIA_API",None)
PLUGIN_CHANNEL = int(os.environ.get("PLUGIN_CHANNEL", None))
else:
class Config(object):
DB_URI = None
| [] | [] | [
"SLAP_USERNAME",
"TEMP_DIR",
"GOOGLE_SEARCH_COUNT_LIMIT",
"NO_P_M_SPAM",
"CHANNEL_ID",
"GOOGLE_CHROME_BIN",
"G_DRIVE_CLIENT_SECRET",
"NO_SONGS",
"AUTH_TOKEN_DATA",
"OCR_SPACE_API_KEY",
"TG_BOT_USER_NAME_BF_HER",
"VERY_STREAM_LOGIN",
"NC_LOG_P_M_S",
"SUDO_USERS",
"SCREEN_SHOT_LAYER_ACCESS_KEY",
"MAX_FLOOD_IN_P_M_s",
"TELEGRAPH_SHORT_NAME",
"ENV",
"DATABASE_URL",
"GDRIVE_FOLDER_ID",
"GIT_REPO_NAME",
"TG_BOT_TOKEN_BF_HER",
"GROUP_REG_SED_EX_BOT_S",
"G_BAN_LOGGER_GROUP",
"PM_LOGGR_BOT_API_ID",
"CHROME_DRIVER",
"PM_PRMT_PRVT_GRP_ID",
"IBM_WATSON_CRED_PASSWORD",
"MONGO_URI",
"G_DRIVE_CLIENT_ID",
"YOUTUBE_API_KEY",
"PLUGIN_CHANNEL",
"HASH_TO_TORRENT_API",
"TG_GLOBAL_ALBUM_LIMIT",
"VERY_STREAM_KEY",
"LYDIA_API",
"OPEN_WEATHER_MAP_APPID",
"UB_BLACK_LIST_CHAT",
"ALIVE_MSG",
"NO_OF_BUTTONS_DISPLAYED_IN_H_ME_CMD",
"NO_OF_COLOUMS_DISPLAYED_IN_H_ME_CMD",
"ALIVE_PHOTTO",
"GITHUB_ACCESS_TOKEN",
"IBM_WATSON_CRED_URL",
"TMP_DOWNLOAD_DIRECTORY",
"EMOJI_TO_DISPLAY_IN_HELP",
"REM_BG_API_KEY",
"COMMAND_HAND_LER",
"LOCATION"
] | [] | ["SLAP_USERNAME", "TEMP_DIR", "GOOGLE_SEARCH_COUNT_LIMIT", "NO_P_M_SPAM", "CHANNEL_ID", "GOOGLE_CHROME_BIN", "G_DRIVE_CLIENT_SECRET", "NO_SONGS", "AUTH_TOKEN_DATA", "OCR_SPACE_API_KEY", "TG_BOT_USER_NAME_BF_HER", "VERY_STREAM_LOGIN", "NC_LOG_P_M_S", "SUDO_USERS", "SCREEN_SHOT_LAYER_ACCESS_KEY", "MAX_FLOOD_IN_P_M_s", "TELEGRAPH_SHORT_NAME", "ENV", "DATABASE_URL", "GDRIVE_FOLDER_ID", "GIT_REPO_NAME", "TG_BOT_TOKEN_BF_HER", "GROUP_REG_SED_EX_BOT_S", "G_BAN_LOGGER_GROUP", "PM_LOGGR_BOT_API_ID", "CHROME_DRIVER", "PM_PRMT_PRVT_GRP_ID", "IBM_WATSON_CRED_PASSWORD", "MONGO_URI", "G_DRIVE_CLIENT_ID", "YOUTUBE_API_KEY", "PLUGIN_CHANNEL", "HASH_TO_TORRENT_API", "TG_GLOBAL_ALBUM_LIMIT", "VERY_STREAM_KEY", "LYDIA_API", "OPEN_WEATHER_MAP_APPID", "UB_BLACK_LIST_CHAT", "ALIVE_MSG", "NO_OF_BUTTONS_DISPLAYED_IN_H_ME_CMD", "NO_OF_COLOUMS_DISPLAYED_IN_H_ME_CMD", "ALIVE_PHOTTO", "GITHUB_ACCESS_TOKEN", "IBM_WATSON_CRED_URL", "TMP_DOWNLOAD_DIRECTORY", "EMOJI_TO_DISPLAY_IN_HELP", "REM_BG_API_KEY", "COMMAND_HAND_LER", "LOCATION"] | python | 49 | 0 | |
operators/conv_autogenerated_test.go | package operators
import (
"os"
"testing"
onnx "github.com/owulveryck/onnx-go"
"github.com/stretchr/testify/assert"
"gorgonia.org/gorgonia"
"gorgonia.org/tensor"
)
// TestConv_with_strides_and_asymmetric_padding is autogenerated from test_conv_with_strides_and_asymmetric_padding
func TestConv_with_strides_and_asymmetric_padding(t *testing.T) {
debug := os.Getenv("SKIP_NOT_IMPLEMENTED")
skip := true
if debug == "false" {
skip = false
}
assert := assert.New(t)
g := gorgonia.NewGraph()
op := &Conv{}
attribute0Name := "kernel_shape"
attribute0Type := onnx.AttributeProto_AttributeType(7)
attribute0 := &onnx.AttributeProto{
Name: &attribute0Name,
Type: &attribute0Type,
Ints: []int64{3, 3},
}
attribute1Name := "pads"
attribute1Type := onnx.AttributeProto_AttributeType(7)
attribute1 := &onnx.AttributeProto{
Name: &attribute1Name,
Type: &attribute1Type,
Ints: []int64{1, 0, 1, 0},
}
attribute2Name := "strides"
attribute2Type := onnx.AttributeProto_AttributeType(7)
attribute2 := &onnx.AttributeProto{
Name: &attribute2Name,
Type: &attribute2Type,
Ints: []int64{2, 2},
}
attributes := []*onnx.AttributeProto{
attribute0,
attribute1,
attribute2,
}
if len(attributes) != 0 {
err := op.Init(attributes)
t.Logf("Info: operator %#v", op)
if err != nil {
_, ok := err.(*onnx.ErrNotImplemented)
if ok && skip {
t.Skip(err)
}
t.Fatal(err)
}
}
x := gorgonia.NodeFromAny(g,
tensor.New(
tensor.WithShape(1, 1, 7, 5),
tensor.WithBacking([]float32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34})),
gorgonia.WithName("x"))
W := gorgonia.NodeFromAny(g,
tensor.New(
tensor.WithShape(1, 1, 3, 3),
tensor.WithBacking([]float32{1, 1, 1, 1, 1, 1, 1, 1, 1})),
gorgonia.WithName("W"))
yT := tensor.New(
tensor.WithShape(1, 1, 4, 2),
tensor.WithBacking([]float32{21, 33, 99, 117, 189, 207, 171, 183}))
y := new(gorgonia.Node)
o, err := op.Apply(
x, W,
)
if err != nil {
_, ok := err.(*onnx.ErrNotImplemented)
if ok && skip {
t.Skip(err)
}
_, ok = err.(*gorgonia.ErrNotImplemented)
if ok && skip {
t.Skip(err)
}
t.Fatal(err)
}
y = o[0]
machine := gorgonia.NewTapeMachine(g)
if err = machine.RunAll(); err != nil {
t.Fatal(err)
}
assert.Equal(yT.Shape(), y.Shape(), "Tensors should be the same")
assert.InDeltaSlice(yT.Data(), y.Value().Data(), 1e-5, "Tensors should be the same")
}
// TestConv_with_strides_no_padding is autogenerated from test_conv_with_strides_no_padding
func TestConv_with_strides_no_padding(t *testing.T) {
debug := os.Getenv("SKIP_NOT_IMPLEMENTED")
skip := true
if debug == "false" {
skip = false
}
assert := assert.New(t)
g := gorgonia.NewGraph()
op := &Conv{}
attribute0Name := "kernel_shape"
attribute0Type := onnx.AttributeProto_AttributeType(7)
attribute0 := &onnx.AttributeProto{
Name: &attribute0Name,
Type: &attribute0Type,
Ints: []int64{3, 3},
}
attribute1Name := "pads"
attribute1Type := onnx.AttributeProto_AttributeType(7)
attribute1 := &onnx.AttributeProto{
Name: &attribute1Name,
Type: &attribute1Type,
Ints: []int64{0, 0, 0, 0},
}
attribute2Name := "strides"
attribute2Type := onnx.AttributeProto_AttributeType(7)
attribute2 := &onnx.AttributeProto{
Name: &attribute2Name,
Type: &attribute2Type,
Ints: []int64{2, 2},
}
attributes := []*onnx.AttributeProto{
attribute0,
attribute1,
attribute2,
}
if len(attributes) != 0 {
err := op.Init(attributes)
t.Logf("Info: operator %#v", op)
if err != nil {
_, ok := err.(*onnx.ErrNotImplemented)
if ok && skip {
t.Skip(err)
}
t.Fatal(err)
}
}
x := gorgonia.NodeFromAny(g,
tensor.New(
tensor.WithShape(1, 1, 7, 5),
tensor.WithBacking([]float32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34})),
gorgonia.WithName("x"))
W := gorgonia.NodeFromAny(g,
tensor.New(
tensor.WithShape(1, 1, 3, 3),
tensor.WithBacking([]float32{1, 1, 1, 1, 1, 1, 1, 1, 1})),
gorgonia.WithName("W"))
yT := tensor.New(
tensor.WithShape(1, 1, 3, 2),
tensor.WithBacking([]float32{54, 72, 144, 162, 234, 252}))
y := new(gorgonia.Node)
o, err := op.Apply(
x, W,
)
if err != nil {
_, ok := err.(*onnx.ErrNotImplemented)
if ok && skip {
t.Skip(err)
}
_, ok = err.(*gorgonia.ErrNotImplemented)
if ok && skip {
t.Skip(err)
}
t.Fatal(err)
}
y = o[0]
machine := gorgonia.NewTapeMachine(g)
if err = machine.RunAll(); err != nil {
t.Fatal(err)
}
assert.Equal(yT.Shape(), y.Shape(), "Tensors should be the same")
assert.InDeltaSlice(yT.Data(), y.Value().Data(), 1e-5, "Tensors should be the same")
}
// TestConv_with_strides_padding is autogenerated from test_conv_with_strides_padding
func TestConv_with_strides_padding(t *testing.T) {
debug := os.Getenv("SKIP_NOT_IMPLEMENTED")
skip := true
if debug == "false" {
skip = false
}
assert := assert.New(t)
g := gorgonia.NewGraph()
op := &Conv{}
attribute0Name := "kernel_shape"
attribute0Type := onnx.AttributeProto_AttributeType(7)
attribute0 := &onnx.AttributeProto{
Name: &attribute0Name,
Type: &attribute0Type,
Ints: []int64{3, 3},
}
attribute1Name := "pads"
attribute1Type := onnx.AttributeProto_AttributeType(7)
attribute1 := &onnx.AttributeProto{
Name: &attribute1Name,
Type: &attribute1Type,
Ints: []int64{1, 1, 1, 1},
}
attribute2Name := "strides"
attribute2Type := onnx.AttributeProto_AttributeType(7)
attribute2 := &onnx.AttributeProto{
Name: &attribute2Name,
Type: &attribute2Type,
Ints: []int64{2, 2},
}
attributes := []*onnx.AttributeProto{
attribute0,
attribute1,
attribute2,
}
if len(attributes) != 0 {
err := op.Init(attributes)
t.Logf("Info: operator %#v", op)
if err != nil {
_, ok := err.(*onnx.ErrNotImplemented)
if ok && skip {
t.Skip(err)
}
t.Fatal(err)
}
}
x := gorgonia.NodeFromAny(g,
tensor.New(
tensor.WithShape(1, 1, 7, 5),
tensor.WithBacking([]float32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34})),
gorgonia.WithName("x"))
W := gorgonia.NodeFromAny(g,
tensor.New(
tensor.WithShape(1, 1, 3, 3),
tensor.WithBacking([]float32{1, 1, 1, 1, 1, 1, 1, 1, 1})),
gorgonia.WithName("W"))
yT := tensor.New(
tensor.WithShape(1, 1, 4, 3),
tensor.WithBacking([]float32{12, 27, 24, 63, 108, 81, 123, 198, 141, 112, 177, 124}))
y := new(gorgonia.Node)
o, err := op.Apply(
x, W,
)
if err != nil {
_, ok := err.(*onnx.ErrNotImplemented)
if ok && skip {
t.Skip(err)
}
_, ok = err.(*gorgonia.ErrNotImplemented)
if ok && skip {
t.Skip(err)
}
t.Fatal(err)
}
y = o[0]
machine := gorgonia.NewTapeMachine(g)
if err = machine.RunAll(); err != nil {
t.Fatal(err)
}
assert.Equal(yT.Shape(), y.Shape(), "Tensors should be the same")
assert.InDeltaSlice(yT.Data(), y.Value().Data(), 1e-5, "Tensors should be the same")
}
| [
"\"SKIP_NOT_IMPLEMENTED\"",
"\"SKIP_NOT_IMPLEMENTED\"",
"\"SKIP_NOT_IMPLEMENTED\""
] | [] | [
"SKIP_NOT_IMPLEMENTED"
] | [] | ["SKIP_NOT_IMPLEMENTED"] | go | 1 | 0 | |
admin.go | package admin
import (
"html/template"
"os"
"path/filepath"
"reflect"
"strings"
// Inflection pluralizes and singularizes English nouns
"github.com/jinzhu/inflection"
// a golang library using Common Locale Data Repository to format dates
"github.com/theplant/cldr"
"github.com/Sky-And-Hammer/TM_EC"
"github.com/Sky-And-Hammer/TM_EC/resource"
"github.com/Sky-And-Hammer/TM_EC/utils"
)
// 'Admin' is a struct that used to generate admin/api interface
type Admin struct {
SiteName string
Config *TM_EC.Config
I18n I18n
AssetFS AssetFSInterface
menus []*Menu
resources []*Resource
searchResources []*Resource
Auth Auth
router *Router
funcMaps template.FuncMap
metaConfigorMaps map[string]func(*Meta)
}
// 'ResourceNamer' is an interface for models that defined methos 'ResourceName'
type ResourceNamer interface {
ResourceName() string
}
// 'New' new admin with configuration
func New(config *TM_EC.Config) *Admin {
admin := Admin{
Config: config,
funcMaps: make(template.FuncMap),
router: newRouter(),
metaConfigorMaps: metaConfigorMaps,
}
admin.SetAssetFS(&AssetFileSystem{})
admin.registerCompositePrimaryKeyCallback()
return &admin
}
// SetSiteName set site's name, the name will be used as admin HTML title and admin interface will auto load javascripts, stylesheets files based on its value
// For example, if you named it as `Qor Demo`, admin will look up `qor_demo.js`, `qor_demo.css` in QOR view paths, and load them if found
func (admin *Admin) SetSiteName(siteName string) {
admin.SiteName = siteName
}
// 'SetAuth' set admin's authorization gateway
func (admin *Admin) SetAuth(auth Auth) {
admin.Auth = auth
}
// 'SetAssetFS' set AssetFS for admin
func (admin *Admin) SetAssetFS(assetFS AssetFSInterface) {
admin.AssetFS = assetFS
globalAssetFSes = append(globalAssetFSes, assetFS)
admin.AssetFS.RegisterPath(filepath.Join(root, "app/views/ec"))
for _, viewPath := range globalViewPaths {
admin.RegisterViewPath(viewPath)
}
}
// 'RegisterViewPath' register view path for admin
func (admin *Admin) RegisterViewPath(pth string) {
if admin.AssetFS.RegisterPath(filepath.Join(root, "root", pth)) != nil {
for _, gopath := range strings.Split(os.Getenv("GOPATH"), ":") {
if admin.AssetFS.RegisterPath(filepath.Join(gopath, "src", pth)) == nil {
break
}
}
}
}
// 'RegisterMetaConfigor' register configor for a kind, it will be called when register those kind of metas
func (admin *Admin) RegisterMetaConfigor(king string, fc func(*Meta)) {
admin.metaConfigorMaps[king] = fc
}
// 'RegisterFuncMap' register view funcs, it could be used in view templates
func (admin *Admin) RegisterFuncMap(name string, fc interface{}) {
admin.funcMaps[name] = fc
}
// 'GetRouter' get router from admin
func (admin *Admin) GetRouter() *Router {
return admin.router
}
func (admin *Admin) newResource(value interface{}, config ...*Config) *Resource {
var configuration *Config
if len(config) > 0 {
configuration = config[0]
}
if configuration == nil {
configuration = &Config{}
}
res := &Resource{
Resource: *resource.New(value),
Config: configuration,
cachedMetas: &map[string][]*Meta{},
admin: admin,
}
res.Permission = configuration.Permission
if configuration.Name != "" {
res.Name = configuration.Name
} else if namer, ok := value.(ResourceNamer); ok {
res.Name = namer.ResourceName()
}
modelType := utils.ModelType(res.Value)
for i := 0; i < modelType.NumField(); i++ {
if filedStruct := modelType.Field(i); filedStruct.Anonymous {
if injector, ok := reflect.New(filedStruct.Type).Interface().(resource.ConfigureResourceBeforeInitializeInterface); ok {
injector.ConfigureECResourceBeforeInitialize(res)
}
}
}
if injector, ok := res.Value.(resource.ConfigureResourceBeforeInitializeInterface); ok {
injector.ConfigureECResourceBeforeInitialize(res)
}
findOneHandler := res.FindOneHandler
res.FindOneHandler = func(result interface{}, metaValues *resource.MetaValues, context *TM_EC.Context) error {
if context.ResourceID == "" {
context.ResourceID = res.GetPrimaryValue(context.Request)
}
return findOneHandler(result, metaValues, context)
}
res.UseTheme("slideout")
return res
}
// 'NewResource' initialize a new ec resource, won't add it to admin, just initialize it
func (admin *Admin) NewResource(value interface{}, config ...*Config) *Resource {
res := admin.newResource(value, config...)
res.configure()
return res
}
// 'AddResource' make a model manageable from admin interface
func (admin *Admin) AddResource(value interface{}, config ...*Config) *Resource {
res := admin.newResource(value, config...)
if !res.Config.Invisible {
var menuName string
if res.Config.Singleton {
menuName = res.Name
} else {
menuName = inflection.Plural(res.Name)
}
menu := &Menu{rawPath: res.ToParam(), Name: menuName, Permission: res.Config.Permission, Priority: res.Config.Priority}
admin.menus = appendMenu(admin.menus, res.Config.Menu, menu)
res.Action(&Action{
Name: "Delete",
Method: "DELETE",
URL: func(record interface{}, context *Context) string {
return context.URLFor(record, res)
},
Permission: res.Config.Permission,
Modes: []string{"menu_item"},
})
}
admin.resources = append(admin.resources, res)
if admin.router.Mounted() {
admin.generateMenuLinks()
res.configure()
if !res.Config.Invisible {
admin.RegisterResourceRouters(res, "create", "updata", "read", "delete")
}
}
return res
}
// 'GetResources' get defined resource from admin
func (admin *Admin) GetResources() []*Resource {
return admin.resources
}
// 'GetResource' get resource with name
func (admin *Admin) GetResource(name string) (resource *Resource) {
for _, res := range admin.resources {
modelType := utils.ModelType(res.Value)
if res.ToParam() == name || res.Name == name || modelType.String() == name {
return res
}
if modelType.Name() == name {
resource = res
}
}
return
}
func (admin *Admin) AddSearchResrouce(resources ...*Resource) {
admin.searchResources = append(admin.searchResources, resources...)
}
func (admin *Admin) GetSearchResrouces() []*Resource {
return admin.searchResources
}
type I18n interface {
Scope(scope string) I18n
Default(value string) I18n
T(locale string, key string, args ...interface{}) template.HTML
}
func (admin *Admin) T(context *TM_EC.Context, key string, value string, values ...interface{}) template.HTML {
locale := utils.GetLocale(context)
if admin.I18n == nil {
if result, err := cldr.Parse(locale, value, values...); err == nil {
return template.HTML(result)
}
return template.HTML(key)
}
return admin.I18n.Default(value).T(locale, key, values...)
}
| [
"\"GOPATH\""
] | [] | [
"GOPATH"
] | [] | ["GOPATH"] | go | 1 | 0 | |
setup.py | #!/usr/bin/env python
import os
from setuptools import setup, find_packages
requires = [
]
version = os.environ.get('VERSION')
if version is None:
with open(os.path.join('.', 'VERSION')) as version_file:
version = version_file.read().strip()
setup_options = {
'name': 'iconcommons',
'version': version,
'description': 'ICON Commmons package for Python',
'long_description': open('README.md').read(),
'long_description_content_type': 'text/markdown',
'url': 'https://github.com/icon-project/icon-commons',
'author': 'ICON Foundation',
'author_email': 'foo@icon.foundation',
'packages': find_packages(exclude=['tests*', 'docs']),
'license': "Apache License 2.0",
'install_requires': requires,
'setup_requires': ['pytest-runner'],
'tests_require': ['pytest'],
'classifiers': [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
]
}
setup(**setup_options)
| [] | [] | [
"VERSION"
] | [] | ["VERSION"] | python | 1 | 0 | |
build/android/adb_install_apk.py | #!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility script to install APKs from the command line quickly."""
import argparse
import glob
import logging
import os
import sys
import devil_chromium
from devil.android import apk_helper
from devil.android import device_blacklist
from devil.android import device_errors
from devil.android import device_utils
from devil.utils import run_tests_helper
from pylib import constants
def main():
parser = argparse.ArgumentParser()
apk_group = parser.add_mutually_exclusive_group(required=True)
apk_group.add_argument('--apk', dest='apk_name',
help='DEPRECATED The name of the apk containing the'
' application (with the .apk extension).')
apk_group.add_argument('apk_path', nargs='?',
help='The path to the APK to install.')
# TODO(jbudorick): Remove once no clients pass --apk_package
parser.add_argument('--apk_package', help='DEPRECATED unused')
parser.add_argument('--split',
action='append',
dest='splits',
help='A glob matching the apk splits. '
'Can be specified multiple times.')
parser.add_argument('--keep_data',
action='store_true',
default=False,
help='Keep the package data when installing '
'the application.')
parser.add_argument('--debug', action='store_const', const='Debug',
dest='build_type',
default=os.environ.get('BUILDTYPE', 'Debug'),
help='If set, run test suites under out/Debug. '
'Default is env var BUILDTYPE or Debug')
parser.add_argument('--release', action='store_const', const='Release',
dest='build_type',
help='If set, run test suites under out/Release. '
'Default is env var BUILDTYPE or Debug.')
parser.add_argument('-d', '--device', dest='devices', action='append',
default=[],
help='Target device for apk to install on. Enter multiple'
' times for multiple devices.')
parser.add_argument('--adb-path', type=os.path.abspath,
help='Absolute path to the adb binary to use.')
parser.add_argument('--blacklist-file', help='Device blacklist JSON file.')
parser.add_argument('-v', '--verbose', action='count',
help='Enable verbose logging.')
parser.add_argument('--downgrade', action='store_true',
help='If set, allows downgrading of apk.')
parser.add_argument('--timeout', type=int,
default=device_utils.DeviceUtils.INSTALL_DEFAULT_TIMEOUT,
help='Seconds to wait for APK installation. '
'(default: %(default)s)')
args = parser.parse_args()
run_tests_helper.SetLogLevel(args.verbose)
constants.SetBuildType(args.build_type)
devil_chromium.Initialize(
output_directory=constants.GetOutDirectory(),
adb_path=args.adb_path)
apk = args.apk_path or args.apk_name
if not apk.endswith('.apk'):
apk += '.apk'
if not os.path.exists(apk):
apk = os.path.join(constants.GetOutDirectory(), 'apks', apk)
if not os.path.exists(apk):
parser.error('%s not found.' % apk)
if args.splits:
splits = []
base_apk_package = apk_helper.ApkHelper(apk).GetPackageName()
for split_glob in args.splits:
apks = [f for f in glob.glob(split_glob) if f.endswith('.apk')]
if not apks:
logging.warning('No apks matched for %s.', split_glob)
for f in apks:
helper = apk_helper.ApkHelper(f)
if (helper.GetPackageName() == base_apk_package
and helper.GetSplitName()):
splits.append(f)
blacklist = (device_blacklist.Blacklist(args.blacklist_file)
if args.blacklist_file
else None)
devices = device_utils.DeviceUtils.HealthyDevices(blacklist=blacklist,
device_arg=args.devices)
def blacklisting_install(device):
try:
if args.splits:
device.InstallSplitApk(apk, splits, reinstall=args.keep_data,
allow_downgrade=args.downgrade)
else:
device.Install(apk, reinstall=args.keep_data,
allow_downgrade=args.downgrade,
timeout=args.timeout)
except device_errors.CommandFailedError:
logging.exception('Failed to install %s', args.apk_name)
if blacklist:
blacklist.Extend([str(device)], reason='install_failure')
logging.warning('Blacklisting %s', str(device))
except device_errors.CommandTimeoutError:
logging.exception('Timed out while installing %s', args.apk_name)
if blacklist:
blacklist.Extend([str(device)], reason='install_timeout')
logging.warning('Blacklisting %s', str(device))
device_utils.DeviceUtils.parallel(devices).pMap(blacklisting_install)
if __name__ == '__main__':
sys.exit(main())
| [] | [] | [
"BUILDTYPE"
] | [] | ["BUILDTYPE"] | python | 1 | 0 | |
src/main/java/edu/wpi/first/gradlerio/wpi/WPIExtension.java | package edu.wpi.first.gradlerio.wpi;
import java.io.File;
import java.util.List;
import javax.inject.Inject;
import org.gradle.api.Project;
import org.gradle.api.model.ObjectFactory;
import org.gradle.api.plugins.JavaPlugin;
import org.gradle.internal.os.OperatingSystem;
import org.gradle.nativeplatform.plugins.NativeComponentPlugin;
import edu.wpi.first.gradlerio.wpi.dependencies.WPIVendorDepsExtension;
import edu.wpi.first.gradlerio.wpi.java.WPIJavaExtension;
import edu.wpi.first.gradlerio.wpi.cpp.WPINativeExtension;
import edu.wpi.first.gradlerio.wpi.simulation.SimulationExtension;
import edu.wpi.first.toolchain.NativePlatforms;
public class WPIExtension {
// WPILib (first.wpi.edu/FRC/roborio/maven) libs
private static final List<String> validImageVersions = List.of("2022_v2.*");
private String jreArtifactLocation = "edu.wpi.first.jdk:roborio-2022:11.0.9u12-1";
// WPILib (first.wpi.edu/FRC/roborio/maven) Utilities
private final WPIMavenExtension maven;
private final SimulationExtension sim;
private String frcYear = "2022";
private final NativePlatforms platforms;
private final Project project;
private final String toolsClassifier;
private final String cppToolsClassifier;
private final WPIVendorDepsExtension vendor;
public WPIVendorDepsExtension getVendor() {
return vendor;
}
public SimulationExtension getSim() {
return sim;
}
@Inject
public WPIExtension(Project project) {
this.project = project;
ObjectFactory factory = project.getObjects();
platforms = new NativePlatforms();
versions = factory.newInstance(WPIVersionsExtension.class);
vendor = project.getObjects().newInstance(WPIVendorDepsExtension.class, this);
sim = factory.newInstance(SimulationExtension.class, project, versions.getWpilibVersion(), NativePlatforms.desktop);
project.getPlugins().withType(NativeComponentPlugin.class, p -> {
cpp = factory.newInstance(WPINativeExtension.class, project, vendor, versions);
});
project.getPlugins().withType(JavaPlugin.class, p -> {
java = factory.newInstance(WPIJavaExtension.class, project, sim, versions, vendor);
});
maven = factory.newInstance(WPIMavenExtension.class, project);
if (project.hasProperty("forceToolsClassifier")) {
this.toolsClassifier = (String)project.findProperty("forceToolsClassifier");
} else {
this.toolsClassifier = (
OperatingSystem.current().isWindows() ?
System.getProperty("os.arch").equals("amd64") ? "win64" : "win32" :
OperatingSystem.current().isMacOsX() ? "mac64" :
OperatingSystem.current().isLinux() ? "linux64" :
null
);
}
if (project.hasProperty("forceCppToolsClassifier")) {
this.cppToolsClassifier = (String)project.findProperty("forceCppToolsClassifier");
} else {
this.cppToolsClassifier = (
OperatingSystem.current().isWindows() ?
System.getProperty("os.arch").equals("amd64") ? "windowsx86-64" : "windowsx86" :
OperatingSystem.current().isMacOsX() ? "osxx86-64" :
OperatingSystem.current().isLinux() ? "linuxx86-64" :
null
);
}
}
private String frcHomeCache;
public String getFrcHome() {
if (frcHomeCache != null) {
return this.frcHomeCache;
}
String frcHome = "";
if (OperatingSystem.current().isWindows()) {
String publicFolder = System.getenv("PUBLIC");
if (publicFolder == null) {
publicFolder = "C:\\Users\\Public";
}
File homeRoot = new File(publicFolder, "wpilib");
frcHome = new File(homeRoot, this.frcYear).toString();
} else {
String userFolder = System.getProperty("user.home");
File homeRoot = new File(userFolder, "wpilib");
frcHome = new File(homeRoot, this.frcYear).toString();
}
frcHomeCache = frcHome;
return frcHomeCache;
}
// public Map<String, Tuple> versions() {
// // Format:
// // property: [ PrettyName, Version, RecommendedKey ]
// return [
// "wpilibVersion" : new Tuple("WPILib", wpilibVersion, "wpilib"),
// "opencvVersion" : new Tuple("OpenCV", opencvVersion, "opencv"),
// "frcYear " : new Tuple("FRC Year", frcYear, "frcYear"),
// "googleTestVersion" : new Tuple("Google Test", googleTestVersion, "googleTest"),
// "imguiVersion" : new Tuple("ImGUI", imguiVersion, "imgui"),
// "wpimathVersion" : new Tuple("WPIMath", wpimathVersion, "wpimath"),
// "ejmlVersion" : new Tuple("EJML", ejmlVersion, "ejml"),
// "jacksonVersion" : new Tuple("Jackson", jacksonVersion, "jackson"),
// "smartDashboardVersion": new Tuple("SmartDashboard", smartDashboardVersion, "smartdashboard"),
// "shuffleboardVersion" : new Tuple("Shuffleboard", shuffleboardVersion, "shuffleboard"),
// "outlineViewerVersion" : new Tuple("OutlineViewer", outlineViewerVersion, "outlineviewer"),
// "robotBuilderVersion" : new Tuple("RobotBuilder", robotBuilderVersion, "robotbuilder"),
// "RobotBuilderOldVersion" : new Tuple("RobotBuilder-Old", robotBuilderOldVersion, "robotbuilderold"),
// "glassVersion" : new Tuple("Glass", glassVersion, "glass"),
// "pathWeaverVersion" : new Tuple("PathWeaver", pathWeaverVersion, "pathweaver"),
// ]
// }
public static List<String> getValidImageVersions() {
return validImageVersions;
}
public String getJreArtifactLocation() {
return jreArtifactLocation;
}
private final WPIVersionsExtension versions;
public WPIVersionsExtension getVersions() {
return versions;
}
private WPINativeExtension cpp;
public WPINativeExtension getCpp() {
return cpp;
}
private WPIJavaExtension java;
public WPIJavaExtension getJava() {
return java;
}
public WPIMavenExtension getMaven() {
return maven;
}
public String getFrcYear() {
return frcYear;
}
public NativePlatforms getPlatforms() {
return platforms;
}
public Project getProject() {
return project;
}
public String getToolsClassifier() {
return toolsClassifier;
}
public String getCppToolsClassifier() {
return cppToolsClassifier;
}
public void setJreArtifactLocation(String jreArtifactLocation) {
this.jreArtifactLocation = jreArtifactLocation;
}
public void setFrcYear(String frcYear) {
this.frcYear = frcYear;
}
}
| [
"\"PUBLIC\""
] | [] | [
"PUBLIC"
] | [] | ["PUBLIC"] | java | 1 | 0 | |
docs/conch/howto/listings/echoclient_ssh.py | #!/usr/bin/env python
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from __future__ import print_function
if __name__ == '__main__':
import sys
import echoclient_ssh
from twisted.internet.task import react
react(echoclient_ssh.main, sys.argv[1:])
import os, getpass
from twisted.python.filepath import FilePath
from twisted.python.usage import Options
from twisted.internet.defer import Deferred
from twisted.internet.protocol import Factory, Protocol
from twisted.internet.endpoints import UNIXClientEndpoint
from twisted.conch.ssh.keys import EncryptedKeyError, Key
from twisted.conch.client.knownhosts import KnownHostsFile
from twisted.conch.endpoints import SSHCommandClientEndpoint
class EchoOptions(Options):
optParameters = [
("host", "h", "localhost",
"hostname of the SSH server to which to connect"),
("port", "p", 22,
"port number of SSH server to which to connect", int),
("username", "u", getpass.getuser(),
"username with which to authenticate with the SSH server"),
("identity", "i", None,
"file from which to read a private key to use for authentication"),
("password", None, None,
"password to use for authentication"),
("knownhosts", "k", "~/.ssh/known_hosts",
"file containing known ssh server public key data"),
]
optFlags = [
["no-agent", None, "Disable use of key agent"],
]
class NoiseProtocol(Protocol):
def connectionMade(self):
self.finished = Deferred()
self.strings = ["bif", "pow", "zot"]
self.sendNoise()
def sendNoise(self):
if self.strings:
self.transport.write(self.strings.pop(0) + "\n")
else:
self.transport.loseConnection()
def dataReceived(self, data):
print("Server says:", data)
self.sendNoise()
def connectionLost(self, reason):
self.finished.callback(None)
def readKey(path):
try:
return Key.fromFile(path)
except EncryptedKeyError:
passphrase = getpass.getpass("%r keyphrase: " % (path,))
return Key.fromFile(path, passphrase=passphrase)
class ConnectionParameters(object):
def __init__(self, reactor, host, port, username, password, keys,
knownHosts, agent):
self.reactor = reactor
self.host = host
self.port = port
self.username = username
self.password = password
self.keys = keys
self.knownHosts = knownHosts
self.agent = agent
@classmethod
def fromCommandLine(cls, reactor, argv):
config = EchoOptions()
config.parseOptions(argv)
keys = []
if config["identity"]:
keyPath = os.path.expanduser(config["identity"])
if os.path.exists(keyPath):
keys.append(readKey(keyPath))
knownHostsPath = FilePath(os.path.expanduser(config["knownhosts"]))
if knownHostsPath.exists():
knownHosts = KnownHostsFile.fromPath(knownHostsPath)
else:
knownHosts = None
if config["no-agent"] or "SSH_AUTH_SOCK" not in os.environ:
agentEndpoint = None
else:
agentEndpoint = UNIXClientEndpoint(
reactor, os.environ["SSH_AUTH_SOCK"])
return cls(
reactor, config["host"], config["port"],
config["username"], config["password"], keys,
knownHosts, agentEndpoint)
def endpointForCommand(self, command):
return SSHCommandClientEndpoint.newConnection(
self.reactor, command, self.username, self.host,
port=self.port, keys=self.keys, password=self.password,
agentEndpoint=self.agent, knownHosts=self.knownHosts)
def main(reactor, *argv):
parameters = ConnectionParameters.fromCommandLine(reactor, argv)
endpoint = parameters.endpointForCommand(b"/bin/cat")
factory = Factory()
factory.protocol = NoiseProtocol
d = endpoint.connect(factory)
d.addCallback(lambda proto: proto.finished)
return d
| [] | [] | [
"SSH_AUTH_SOCK"
] | [] | ["SSH_AUTH_SOCK"] | python | 1 | 0 | |
ponylib_app/db/db.go | package db
import (
"context"
"fmt"
"os"
"github.com/jackc/pgx/v4/pgxpool"
)
func Connect() (*pgxpool.Pool, error) {
conn, err := pgxpool.Connect(context.Background(), os.Getenv("DATABASE_URL"))
if err != nil {
return nil, fmt.Errorf("unable to connect to database: %w", err)
}
return conn, nil
}
func CloseConnection(db *pgxpool.Pool) {
db.Close()
}
| [
"\"DATABASE_URL\""
] | [] | [
"DATABASE_URL"
] | [] | ["DATABASE_URL"] | go | 1 | 0 | |
internal/klogs/loggers.go | /*
* MIT License
*
* Copyright (c) 2018 Kasun Vithanage
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package klogs
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/sirupsen/logrus"
"github.com/kasvith/kache/internal/config"
"github.com/kasvith/kache/internal/sys"
)
var Logger *logrus.Entry
func InitLoggers(config config.AppConfig) {
var logrusLogger = logrus.New()
if config.Debug == true {
logrusLogger.SetLevel(logrus.DebugLevel)
} else if config.Verbose == true {
logrusLogger.SetLevel(logrus.InfoLevel)
} else {
logrusLogger.SetLevel(logrus.WarnLevel)
}
fields := logrus.Fields{"pid": os.Getpid()}
switch strings.ToLower(config.LogType) {
case "json":
logrusLogger.Formatter = &logrus.JSONFormatter{}
break
case "logfmt":
logrusLogger.Formatter = &logrus.TextFormatter{DisableColors: true, ForceColors: false}
break
case "default":
logrusLogger.Formatter = &kacheFormatter{}
break
default:
logrusLogger.Formatter = &kacheFormatter{}
logrusLogger.WithFields(fields).Warnf("%s format is unknown, continuing with default", config.LogType)
break
}
Logger = logrusLogger.WithFields(fields)
// if we dont want logging, just discard all to a null device
if config.Logging == false {
logrusLogger.Out = ioutil.Discard
}
if config.Logging && config.Logfile != "" {
// try to create folder path if not exists
err := sys.AutoCreateSubDirs(config.Logfile)
// if failed, we can skip for logging to a file, warn user and continue
if err != nil {
Logger.Warningf("%s cannot be opened, continue with stderr", config.Logfile)
return
}
// try to create the file
fp, err := os.OpenFile(config.Logfile, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666)
// if failed, skip and warn
if err != nil {
Logger.Warningf("%s cannot be opened, continue with stderr", config.Logfile)
return
}
// info about log file
path, err := filepath.Abs(config.Logfile)
if err != nil {
Logger.Errorf("cannot resolve absolute path for %s", config.Logfile)
} else {
Logger.Infof("log file is %s", path)
}
// use two writers
multi := io.MultiWriter(os.Stderr, fp)
logrusLogger.Out = multi
}
}
type kacheFormatter struct {
}
func (kacheFormatter) Format(e *logrus.Entry) ([]byte, error) {
buffer := bytes.Buffer{}
lvl := strings.ToUpper(e.Level.String()[0:4])
t := e.Time.Format("2006-01-02 15:04:05")
str := fmt.Sprintf("[%s] %s(%d): %s\n", lvl, t, e.Data["pid"], e.Message)
buffer.WriteString(str)
return buffer.Bytes(), nil
}
func PrintErrorAndExit(err error, exit int) {
if os.Getenv("ENV") == "DEBUG" {
panic(err)
}
fmt.Fprintln(os.Stderr, err)
os.Exit(exit)
}
| [
"\"ENV\""
] | [] | [
"ENV"
] | [] | ["ENV"] | go | 1 | 0 | |
integration-tests/job_service_test.go | // +build integration
package integrationTests
import (
"encoding/json"
"flag"
"fmt"
"os"
"testing"
"time"
"github.com/sapcc/arc/api-server/models"
"github.com/sapcc/arc/arc"
)
var agentIdentityFlag = flag.String("agent-identity", "", "integration-test")
type systemFact struct {
Os string `json:"os"`
PlatformFamily string `json:"platform_family"`
}
func TestRunJob(t *testing.T) {
// override flags if enviroment variable exists
if os.Getenv("AGENT_IDENTITY") != "" {
agentIdentity := os.Getenv("AGENT_IDENTITY")
agentIdentityFlag = &agentIdentity
}
client, err := NewTestClient()
if err != nil {
t.Fatal(err)
}
// get info about the agent
statusCode, body := client.GetApiV1(fmt.Sprint("/agents/", *agentIdentityFlag, "/facts"), ApiServer)
if statusCode != "200 OK" {
t.Errorf("Expected to get 200 response code getting facts for agent %s got %s ", *agentIdentityFlag, statusCode)
return
}
// transform the body to system facts struct
var sysFact systemFact
if err := json.Unmarshal(*body, &sysFact); err != nil {
t.Error("Expected not to get an error unmarshaling: ", err)
return
}
// build the post request
payload := `echo Start; for i in {1..2}; do echo $i; sleep 1s; done; echo Done`
if sysFact.Os == "windows" || sysFact.PlatformFamily == "windows" {
payload = `echo Start; for($i=1;$i -le 2;$i++){echo $i; sleep -seconds 1}; echo Done`
}
to := *agentIdentityFlag
timeout := 60
agent := "execute"
action := "script"
data := fmt.Sprintf(`{"to":%q,"timeout":%v,"agent":%q,"action":%q,"payload":%q}`, to, timeout, agent, action, payload)
jsonStr := []byte(data)
// post the job
statusCode, body = client.PostApiV1("/jobs", ApiServer, nil, jsonStr)
if statusCode != "200 OK" {
t.Errorf("Expected to get 200 response code posting the job, got %s", statusCode)
return
}
var jobId models.JobID
err = json.Unmarshal(*body, &jobId)
if err != nil {
t.Error("Error unmarshalling jobs reply: ", err)
return
}
err = checkStatus(client, jobId, arc.Executing, 3000)
if err != nil {
t.Error(err)
return
}
err = checkStatus(client, jobId, arc.Complete, 8000)
if err != nil {
t.Error(err)
return
}
// check log
statusCode, body = client.GetApiV1(fmt.Sprint("/jobs/", jobId.RequestID, "/log"), ApiServer)
if statusCode != "200 OK" {
t.Errorf("Expected to get 200 response code for job %s, got %s", jobId.RequestID, statusCode)
}
if len(string(*body)) == 0 {
t.Error("Expected to get a log")
}
}
// private
func checkStatus(client *Client, jobId models.JobID, status arc.JobState, timeout int) error {
var job *models.Job
var err error
for {
job, err = getJobStatus(client, jobId)
if err != nil {
break
}
if job.Status == status {
break
}
if timeout < 0 {
err = fmt.Errorf(fmt.Sprint("Timeout: Expected to get status ", status, " for job ", jobId.RequestID, ". Got status ", job.Status))
break
}
timeout = timeout - 100
time.Sleep(time.Millisecond * 100)
}
return err
}
func getJobStatus(client *Client, jobId models.JobID) (*models.Job, error) {
var job models.Job
statusCode, body := client.GetApiV1(fmt.Sprint("/jobs/", jobId.RequestID), ApiServer)
if statusCode != "200 OK" {
return nil, fmt.Errorf("Expected to get 200 response code getting job %s", jobId.RequestID)
}
err := json.Unmarshal(*body, &job)
if err != nil {
return nil, fmt.Errorf("Expected not to get an error unmarshaling body from job %s", jobId.RequestID)
}
return &job, nil
}
| [
"\"AGENT_IDENTITY\"",
"\"AGENT_IDENTITY\""
] | [] | [
"AGENT_IDENTITY"
] | [] | ["AGENT_IDENTITY"] | go | 1 | 0 | |
pkg/controller/dnsendpoint/nameserver/aws_live_test.go | package nameserver
import (
"fmt"
"math/rand"
"os"
"testing"
"time"
"github.com/stretchr/testify/suite"
"k8s.io/apimachinery/pkg/util/sets"
"github.com/openshift/hive/pkg/awsclient"
)
// This test will perform a test using real queries with AWS.
// By default, this test will be skipped.
// To enable the test, set the TEST_LIVE_AWS environment variable to the value
// of the root domain that you would like to use for the tests. Note that there
// must be a public hosted zone for that root domain in the default AWS account.
func TestLiveAWS(t *testing.T) {
rootDomain := os.Getenv("TEST_LIVE_AWS")
if rootDomain == "" {
t.SkipNow()
}
rand.Seed(time.Now().UnixNano())
suite.Run(t, &LiveAWSTestSuite{rootDomain: rootDomain})
}
type LiveAWSTestSuite struct {
suite.Suite
rootDomain string
}
func (s *LiveAWSTestSuite) TestGetForNonExistentZone() {
nameServers, err := s.getCUT().Get("non-existent.zone.live-aws-test.com")
s.NoError(err, "expected no error")
s.Empty(nameServers, "expected no name servers")
}
func (s *LiveAWSTestSuite) TestGetForExistentZone() {
nameServers, err := s.getCUT().Get(s.rootDomain)
s.NoError(err, "expected no error")
s.NotEmpty(nameServers, "expected some name servers")
s.Len(nameServers[s.rootDomain], 4, "expected NS to have 4 values")
}
func (s *LiveAWSTestSuite) TestCreateAndDelete() {
cases := []struct {
name string
createValues []string
deleteValues []string
}{
{
name: "single value",
createValues: []string{"test-value"},
deleteValues: []string{"test-value"},
},
{
name: "single value, outdated delete",
createValues: []string{"test-value"},
deleteValues: []string{"bad-value"},
},
{
name: "multiple values",
createValues: []string{"test-value-1", "test-value-2", "test-value-3"},
deleteValues: []string{"test-value-1", "test-value-2", "test-value-3"},
},
{
name: "multiple values, outdated delete",
createValues: []string{"test-value-1", "test-value-2", "test-value-3"},
deleteValues: []string{"test-value-1", "test-value-2"},
},
{
name: "unknown delete values",
createValues: []string{"test-value"},
},
}
for _, tc := range cases {
s.T().Run(tc.name, func(t *testing.T) {
cut := s.getCUT()
domain := fmt.Sprintf("live-aws-test-%08d.%s", rand.Intn(100000000), s.rootDomain)
s.T().Logf("domain = %q", domain)
err := cut.Create(s.rootDomain, domain, sets.NewString(tc.createValues...))
if s.NoError(err, "unexpected error creating NS") {
defer func() {
err := cut.Delete(s.rootDomain, domain, sets.NewString(tc.deleteValues...))
s.NoError(err, "unexpected error deleting NS")
}()
}
nameServers, err := cut.Get(s.rootDomain)
s.NoError(err, "unexpected error querying domain")
s.NotEmpty(nameServers, "expected some name servers")
actualValues := nameServers[domain]
s.Equal(sets.NewString(tc.createValues...), actualValues, "unexpected values for domain")
})
}
}
func (s *LiveAWSTestSuite) TestDeleteOfNonExistentNS() {
cases := []struct {
name string
deleteValues []string
}{
{
name: "known values",
deleteValues: []string{"test-value"},
},
{
name: "unknown values",
},
}
for _, tc := range cases {
s.T().Run(tc.name, func(t *testing.T) {
err := s.getCUT().Delete(s.rootDomain, fmt.Sprintf("non-existent.subdomain.%s", s.rootDomain), sets.NewString(tc.deleteValues...))
s.NoError(err, "expected no error")
})
}
}
func (s *LiveAWSTestSuite) getCUT() *awsQuery {
return &awsQuery{
getAWSClient: func() (awsclient.Client, error) {
return awsclient.NewClient(nil, "", "", "us-east-1")
},
}
}
| [
"\"TEST_LIVE_AWS\""
] | [] | [
"TEST_LIVE_AWS"
] | [] | ["TEST_LIVE_AWS"] | go | 1 | 0 | |
pkg/scmprovider/client.go | package scmprovider
import (
"context"
"fmt"
"net/url"
"os"
"github.com/jenkins-x/go-scm/scm"
"k8s.io/apimachinery/pkg/util/sets"
)
// ToClient converts the scm client to an API that the prow plugins expect
func ToClient(client *scm.Client, botName string) *Client {
return &Client{client: client, botName: botName}
}
// SCMClient is an interface providing all functions on the Client struct.
type SCMClient interface {
// Functions implemented in client.go
BotName() (string, error)
SetBotName(string)
SupportsGraphQL() bool
ProviderType() string
PRRefFmt() string
SupportsPRLabels() bool
ServerURL() *url.URL
QuoteAuthorForComment(string) string
// Functions implemented in content.go
GetFile(string, string, string, string) ([]byte, error)
// Functions implemented in git.go
GetRef(string, string, string) (string, error)
DeleteRef(string, string, string) error
GetSingleCommit(string, string, string) (*scm.Commit, error)
// Functions implemented in issues.go
Query(context.Context, interface{}, map[string]interface{}) error
Search(scm.SearchOptions) ([]*scm.SearchIssue, *RateLimits, error)
ListIssueEvents(string, string, int) ([]*scm.ListedIssueEvent, error)
AssignIssue(string, string, int, []string) error
UnassignIssue(string, string, int, []string) error
AddLabel(string, string, int, string, bool) error
RemoveLabel(string, string, int, string, bool) error
DeleteComment(string, string, int, int, bool) error
DeleteStaleComments(string, string, int, []*scm.Comment, bool, func(*scm.Comment) bool) error
ListIssueComments(string, string, int) ([]*scm.Comment, error)
GetIssueLabels(string, string, int, bool) ([]*scm.Label, error)
CreateComment(string, string, int, bool, string) error
ReopenIssue(string, string, int) error
FindIssues(string, string, bool) ([]scm.Issue, error)
CloseIssue(string, string, int) error
EditComment(owner, repo string, number int, id int, comment string, pr bool) error
// Functions implemented in organizations.go
ListTeams(string) ([]*scm.Team, error)
ListTeamMembers(int, string) ([]*scm.TeamMember, error)
ListOrgMembers(string) ([]*scm.TeamMember, error)
IsOrgAdmin(string, string) (bool, error)
// Functions implemented in pull_requests.go
GetPullRequest(string, string, int) (*scm.PullRequest, error)
ListPullRequestComments(string, string, int) ([]*scm.Comment, error)
GetPullRequestChanges(string, string, int) ([]*scm.Change, error)
Merge(string, string, int, MergeDetails) error
ReopenPR(string, string, int) error
ClosePR(string, string, int) error
ListAllPullRequestsForFullNameRepo(string, scm.PullRequestListOptions) ([]*scm.PullRequest, error)
// Functions implemented in repositories.go
GetRepoLabels(string, string) ([]*scm.Label, error)
IsCollaborator(string, string, string) (bool, error)
ListCollaborators(string, string) ([]scm.User, error)
CreateStatus(string, string, string, *scm.StatusInput) (*scm.Status, error)
CreateGraphQLStatus(string, string, string, *Status) (*scm.Status, error)
ListStatuses(string, string, string) ([]*scm.Status, error)
GetCombinedStatus(string, string, string) (*scm.CombinedStatus, error)
HasPermission(string, string, string, ...string) (bool, error)
GetUserPermission(string, string, string) (string, error)
IsMember(string, string) (bool, error)
GetRepositoryByFullName(string) (*scm.Repository, error)
// Functions implemented in reviews.go
ListReviews(string, string, int) ([]*scm.Review, error)
RequestReview(string, string, int, []string) error
UnrequestReview(string, string, int, []string) error
// Functions not yet implemented
ClearMilestone(string, string, int) error
SetMilestone(string, string, int, int) error
ListMilestones(string, string) ([]Milestone, error)
}
// Client represents an interface that prow plugins expect on top of go-scm
type Client struct {
client *scm.Client
botName string
}
// ClearMilestone clears milestone
func (c *Client) ClearMilestone(org, repo string, num int) error {
return scm.ErrNotSupported
}
// SetMilestone sets milestone
func (c *Client) SetMilestone(org, repo string, issueNum, milestoneNum int) error {
return scm.ErrNotSupported
}
// ListMilestones list milestones
func (c *Client) ListMilestones(org, repo string) ([]Milestone, error) {
return nil, scm.ErrNotSupported
}
// BotName returns the bot name
func (c *Client) BotName() (string, error) {
botName := c.botName
if botName == "" {
botName = os.Getenv("GIT_USER")
if botName == "" {
botName = "jenkins-x-bot"
}
c.botName = botName
}
return botName, nil
}
// SetBotName sets the bot name
func (c *Client) SetBotName(botName string) {
c.botName = botName
}
// SupportsPRLabels returns true if the underlying provider supports PR labels
func (c *Client) SupportsPRLabels() bool {
return !NoLabelProviders().Has(c.ProviderType())
}
// QuoteAuthorForComment will quote the author login for use in "@author" if appropriate for the provider.
func (c *Client) QuoteAuthorForComment(author string) string {
if c.ProviderType() == "stash" {
return `"` + author + `"`
}
return author
}
// ServerURL returns the server URL for the client
func (c *Client) ServerURL() *url.URL {
return c.client.BaseURL
}
// SupportsGraphQL returns true if the underlying provider supports our GraphQL queries
// Currently, that means it has to be GitHub.
func (c *Client) SupportsGraphQL() bool {
return c.client.Driver == scm.DriverGithub
}
// ProviderType returns the type of the underlying SCM provider
func (c *Client) ProviderType() string {
return c.client.Driver.String()
}
// PRRefFmt returns the "refs/(something)/%d/(something)" sprintf format used for constructing PR refs for this provider
func (c *Client) PRRefFmt() string {
switch c.client.Driver {
case scm.DriverStash:
return "refs/pull-requests/%d/from"
case scm.DriverGitlab:
return "refs/merge-requests/%d/head"
default:
return "refs/pull/%d/head"
}
}
func (c *Client) repositoryName(owner string, repo string) string {
return fmt.Sprintf("%s/%s", owner, repo)
}
func (c *Client) createListOptions() scm.ListOptions {
return scm.ListOptions{}
}
// FileNotFound happens when github cannot find the file requested by GetFile().
type FileNotFound struct {
org, repo, path, commit string
}
// Error formats a file not found error
func (e *FileNotFound) Error() string {
return fmt.Sprintf("%s/%s/%s @ %s not found", e.org, e.repo, e.path, e.commit)
}
// NoLabelProviders returns a set of provider names that don't support labels.
func NoLabelProviders() sets.String {
// "coding" is a placeholder provider name from go-scm that we'll use for testing the comment support for label logic.
return sets.NewString("stash", "coding")
}
| [
"\"GIT_USER\""
] | [] | [
"GIT_USER"
] | [] | ["GIT_USER"] | go | 1 | 0 | |
azure-samples/src/main/java/com/microsoft/azure/management/network/samples/ManageInternalLoadBalancer.java | /**
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for
* license information.
*/
package com.microsoft.azure.management.network.samples;
import com.microsoft.azure.management.Azure;
import com.microsoft.azure.management.compute.AvailabilitySet;
import com.microsoft.azure.management.compute.AvailabilitySetSkuTypes;
import com.microsoft.azure.management.compute.KnownLinuxVirtualMachineImage;
import com.microsoft.azure.management.compute.VirtualMachine;
import com.microsoft.azure.management.compute.VirtualMachineSizeTypes;
import com.microsoft.azure.management.network.LoadBalancer;
import com.microsoft.azure.management.network.Network;
import com.microsoft.azure.management.network.NetworkInterface;
import com.microsoft.azure.management.network.TransportProtocol;
import com.microsoft.azure.management.resources.fluentcore.arm.Region;
import com.microsoft.azure.management.resources.fluentcore.model.Creatable;
import com.microsoft.azure.management.resources.fluentcore.utils.SdkContext;
import com.microsoft.azure.management.samples.Utils;
import com.microsoft.rest.LogLevel;
import java.io.File;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.apache.commons.lang3.time.StopWatch;
/**
* Azure Network sample for managing internal load balancers -
*
* High-level ...
*
* - Create an internal load balancer that receives network traffic on
* port 1521 (Oracle SQL Node Port) and sends load-balanced traffic
* to two virtual machines
*
* - Create NAT rules for SSH and TELNET access to virtual
* machines behind the load balancer
*
* - Create a health probe
*
* Details ...
*
* Create an internal facing load balancer with ...
* - A frontend private IP address
* - One backend address pool which contains network interfaces for the virtual
* machines to receive 1521 (Oracle SQL Node Port) network traffic from the load balancer
* - One load balancing rule fto map port 1521 on the load balancer to
* ports in the backend address pool
* - One probe which contains HTTP health probe used to check availability
* of virtual machines in the backend address pool
* - Two inbound NAT rules which contain rules that map a public port on the load
* balancer to a port for a specific virtual machine in the backend address pool
* - this provides direct VM connectivity for SSH to port 22 and TELNET to port 23
*
* Create two network interfaces in the backend subnet ...
* - And associate network interfaces to backend pools and NAT rules
*
* Create two virtual machines in the backend subnet ...
* - And assign network interfaces
*
* Update an existing load balancer, configure TCP idle timeout
* Create another load balancer
* List load balancers
* Remove an existing load balancer.
*/
public final class ManageInternalLoadBalancer {
/**
* Main function which runs the actual sample.
* @param azure instance of the azure client
* @return true if sample runs successfully
*/
public static boolean runSample(Azure azure) {
final String rgName = SdkContext.randomResourceName("rgNEML", 15);
final String vnetName = SdkContext.randomResourceName("vnet", 24);
final String loadBalancerName3 = SdkContext.randomResourceName("intlb3" + "-", 18);
final String loadBalancerName4 = SdkContext.randomResourceName("intlb4" + "-", 18);
final String privateFrontEndName = loadBalancerName3 + "-BE";
final String backendPoolName3 = loadBalancerName3 + "-BAP3";
final int orcaleSQLNodePort = 1521;
final String httpProbe = "httpProbe";
final String tcpLoadBalancingRule = "tcpRule";
final String natRule6000to22forVM3 = "nat6000to22forVM3";
final String natRule6001to23forVM3 = "nat6001to23forVM3";
final String natRule6002to22forVM4 = "nat6002to22forVM4";
final String natRule6003to23forVM4 = "nat6003to23forVM4";
final String networkInterfaceName3 = SdkContext.randomResourceName("nic3", 24);
final String networkInterfaceName4 = SdkContext.randomResourceName("nic4", 24);
final String availSetName = SdkContext.randomResourceName("av2", 24);
final String vmName3 = SdkContext.randomResourceName("lVM3", 24);
final String vmName4 = SdkContext.randomResourceName("lVM4", 24);
final String userName = "tirekicker";
final String sshKey = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCfSPC2K7LZcFKEO+/t3dzmQYtrJFZNxOsbVgOVKietqHyvmYGHEC0J2wPdAqQ/63g/hhAEFRoyehM+rbeDri4txB3YFfnOK58jqdkyXzupWqXzOrlKY4Wz9SKjjN765+dqUITjKRIaAip1Ri137szRg71WnrmdP3SphTRlCx1Bk2nXqWPsclbRDCiZeF8QOTi4JqbmJyK5+0UqhqYRduun8ylAwKKQJ1NJt85sYIHn9f1Rfr6Tq2zS0wZ7DHbZL+zB5rSlAr8QyUdg/GQD+cmSs6LvPJKL78d6hMGk84ARtFo4A79ovwX/Fj01znDQkU6nJildfkaolH2rWFG/qttD azjava@javalib.com";
try {
//=============================================================
// Create a virtual network with a frontend and a backend subnets
System.out.println("Creating virtual network with a frontend and a backend subnets...");
Network network = azure.networks().define(vnetName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAddressSpace("172.16.0.0/16")
.defineSubnet("Front-end")
.withAddressPrefix("172.16.1.0/24")
.attach()
.defineSubnet("Back-end")
.withAddressPrefix("172.16.3.0/24")
.attach()
.create();
System.out.println("Created a virtual network");
// Print the virtual network details
Utils.print(network);
//=============================================================
// Create an internal load balancer
// Create a frontend IP address
// Two backend address pools which contain network interfaces for the virtual
// machines to receive HTTP and HTTPS network traffic from the load balancer
// Two load balancing rules for HTTP and HTTPS to map public ports on the load
// balancer to ports in the backend address pool
// Two probes which contain HTTP and HTTPS health probes used to check availability
// of virtual machines in the backend address pool
// Two inbound NAT rules which contain rules that map a public port on the load
// balancer to a port for a specific virtual machine in the backend address pool
// - this provides direct VM connectivity for SSH to port 22 and TELNET to port 23
System.out.println("Creating an internal facing load balancer with ...");
System.out.println("- A private IP address");
System.out.println("- One backend address pool which contain network interfaces for the virtual\n"
+ " machines to receive 1521 network traffic from the load balancer");
System.out.println("- One load balancing rules for 1521 to map public ports on the load\n"
+ " balancer to ports in the backend address pool");
System.out.println("- One probe which contains HTTP health probe used to check availability\n"
+ " of virtual machines in the backend address pool");
System.out.println("- Two inbound NAT rules which contain rules that map a port on the load\n"
+ " balancer to a port for a specific virtual machine in the backend address pool\n"
+ " - this provides direct VM connectivity for SSH to port 22 and TELNET to port 23");
LoadBalancer loadBalancer3 = azure.loadBalancers().define(loadBalancerName3)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.definePrivateFrontend(privateFrontEndName)
.withExistingSubnet(network, "Back-end")
.withPrivateIPAddressStatic("172.16.3.5")
.attach()
// Add one backend - one per rule
.defineBackend(backendPoolName3)
.attach()
// Add one probes - one per rule
.defineHttpProbe("httpProbe")
.withRequestPath("/")
.attach()
// Add one rule that uses above backend and probe
.defineLoadBalancingRule(tcpLoadBalancingRule)
.withProtocol(TransportProtocol.TCP)
.withFrontend(privateFrontEndName)
.withFrontendPort(orcaleSQLNodePort)
.withProbe(httpProbe)
.withBackend(backendPoolName3)
.attach()
// Add two nat pools to enable direct VM connectivity for
// SSH to port 22 and TELNET to port 23
.defineInboundNatRule(natRule6000to22forVM3)
.withProtocol(TransportProtocol.TCP)
.withFrontend(privateFrontEndName)
.withFrontendPort(6000)
.withBackendPort(22)
.attach()
.defineInboundNatRule(natRule6001to23forVM3)
.withProtocol(TransportProtocol.TCP)
.withFrontend(privateFrontEndName)
.withFrontendPort(6001)
.withBackendPort(23)
.attach()
.defineInboundNatRule(natRule6002to22forVM4)
.withProtocol(TransportProtocol.TCP)
.withFrontend(privateFrontEndName)
.withFrontendPort(6002)
.withBackendPort(22)
.attach()
.defineInboundNatRule(natRule6003to23forVM4)
.withProtocol(TransportProtocol.TCP)
.withFrontend(privateFrontEndName)
.withFrontendPort(6003)
.withBackendPort(23)
.attach()
.create();
// Print load balancer details
System.out.println("Created an internal load balancer");
Utils.print(loadBalancer3);
//=============================================================
// Define two network interfaces in the backend subnet
// associate network interfaces to NAT rules, backend pools
Creatable<NetworkInterface> networkInterface3Creatable = azure.networkInterfaces().define(networkInterfaceName3)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withExistingPrimaryNetwork(network)
.withSubnet("Back-end")
.withPrimaryPrivateIPAddressDynamic()
.withExistingLoadBalancerBackend(loadBalancer3, backendPoolName3)
.withExistingLoadBalancerInboundNatRule(loadBalancer3, natRule6000to22forVM3)
.withExistingLoadBalancerInboundNatRule(loadBalancer3, natRule6001to23forVM3);
Creatable<NetworkInterface> networkInterface4Creatable = azure.networkInterfaces().define(networkInterfaceName4)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withExistingPrimaryNetwork(network)
.withSubnet("Back-end")
.withPrimaryPrivateIPAddressDynamic()
.withExistingLoadBalancerBackend(loadBalancer3, backendPoolName3)
.withExistingLoadBalancerInboundNatRule(loadBalancer3, natRule6002to22forVM4)
.withExistingLoadBalancerInboundNatRule(loadBalancer3, natRule6003to23forVM4);
//=============================================================
// Define an availability set
Creatable<AvailabilitySet> availSet2Definition = azure.availabilitySets().define(availSetName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withFaultDomainCount(2)
.withUpdateDomainCount(4)
.withSku(AvailabilitySetSkuTypes.MANAGED);
//=============================================================
// Create two virtual machines and assign network interfaces
System.out.println("Creating two virtual machines in the frontend subnet ...");
System.out.println("- And assigning network interfaces");
List <Creatable<VirtualMachine>> virtualMachineCreateables2 = new ArrayList<Creatable<VirtualMachine>>();
Creatable<VirtualMachine> virtualMachine3Creatable = azure.virtualMachines().define(vmName3)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.withNewPrimaryNetworkInterface(networkInterface3Creatable)
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername(userName)
.withSsh(sshKey)
.withSize(VirtualMachineSizeTypes.STANDARD_D3_V2)
.withNewAvailabilitySet(availSet2Definition);
virtualMachineCreateables2.add(virtualMachine3Creatable);
Creatable<VirtualMachine> virtualMachine4Creatable = azure.virtualMachines().define(vmName4)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.withNewPrimaryNetworkInterface(networkInterface4Creatable)
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername(userName)
.withSsh(sshKey)
.withSize(VirtualMachineSizeTypes.STANDARD_D3_V2)
.withNewAvailabilitySet(availSet2Definition);
virtualMachineCreateables2.add(virtualMachine4Creatable);
StopWatch stopwatch = new StopWatch();
stopwatch.start();
Collection<VirtualMachine> virtualMachines = azure.virtualMachines().create(virtualMachineCreateables2).values();
stopwatch.stop();
System.out.println("Created 2 Linux VMs: (took " + (stopwatch.getTime() / 1000) + " seconds) ");
System.out.println();
// Print virtual machine details
for (VirtualMachine vm : virtualMachines) {
Utils.print(vm);
System.out.println();
}
//=============================================================
// Update a load balancer
// configure TCP idle timeout to 15 minutes
System.out.println("Updating the load balancer ...");
loadBalancer3.update()
.updateLoadBalancingRule(tcpLoadBalancingRule)
.withIdleTimeoutInMinutes(15)
.parent()
.apply();
System.out.println("Update the load balancer with a TCP idle timeout to 15 minutes");
//=============================================================
// Create another internal load balancer
// Create a frontend IP address
// Two backend address pools which contain network interfaces for the virtual
// machines to receive HTTP and HTTPS network traffic from the load balancer
// Two load balancing rules for HTTP and HTTPS to map public ports on the load
// balancer to ports in the backend address pool
// Two probes which contain HTTP and HTTPS health probes used to check availability
// of virtual machines in the backend address pool
// Two inbound NAT rules which contain rules that map a public port on the load
// balancer to a port for a specific virtual machine in the backend address pool
// - this provides direct VM connectivity for SSH to port 22 and TELNET to port 23
System.out.println("Creating another internal facing load balancer with ...");
System.out.println("- A private IP address");
System.out.println("- One backend address pool which contain network interfaces for the virtual\n"
+ " machines to receive 1521 network traffic from the load balancer");
System.out.println("- One load balancing rules for 1521 to map public ports on the load\n"
+ " balancer to ports in the backend address pool");
System.out.println("- One probe which contains HTTP health probe used to check availability\n"
+ " of virtual machines in the backend address pool");
System.out.println("- Two inbound NAT rules which contain rules that map a port on the load\n"
+ " balancer to a port for a specific virtual machine in the backend address pool\n"
+ " - this provides direct VM connectivity for SSH to port 22 and TELNET to port 23");
LoadBalancer loadBalancer4 = azure.loadBalancers().define(loadBalancerName4)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.definePrivateFrontend(privateFrontEndName)
.withExistingSubnet(network, "Back-end")
.withPrivateIPAddressStatic("172.16.3.15")
.attach()
// Add one backend - one per rule
.defineBackend(backendPoolName3)
.attach()
// Add one probes - one per rule
.defineHttpProbe("httpProbe")
.withRequestPath("/")
.attach()
// Add one rule that uses above backend and probe
.defineLoadBalancingRule(tcpLoadBalancingRule)
.withProtocol(TransportProtocol.TCP)
.withFrontend(privateFrontEndName)
.withFrontendPort(orcaleSQLNodePort)
.withProbe(httpProbe)
.withBackend(backendPoolName3)
.attach()
// Add two nat pools to enable direct VM connectivity for
// SSH to port 22 and TELNET to port 23
.defineInboundNatRule(natRule6000to22forVM3)
.withProtocol(TransportProtocol.TCP)
.withFrontend(privateFrontEndName)
.withFrontendPort(6000)
.withBackendPort(22)
.attach()
.defineInboundNatRule(natRule6001to23forVM3)
.withProtocol(TransportProtocol.TCP)
.withFrontend(privateFrontEndName)
.withFrontendPort(6001)
.withBackendPort(23)
.attach()
.defineInboundNatRule(natRule6002to22forVM4)
.withProtocol(TransportProtocol.TCP)
.withFrontend(privateFrontEndName)
.withFrontendPort(6002)
.withBackendPort(22)
.attach()
.defineInboundNatRule(natRule6003to23forVM4)
.withProtocol(TransportProtocol.TCP)
.withFrontend(privateFrontEndName)
.withFrontendPort(6003)
.withBackendPort(23)
.attach()
.create();
// Print load balancer details
System.out.println("Created an internal load balancer");
Utils.print(loadBalancer4);
//=============================================================
// List load balancers
List<LoadBalancer> loadBalancers = azure.loadBalancers().list();
System.out.println("Walking through the list of load balancers");
for (LoadBalancer loadBalancer : loadBalancers) {
Utils.print(loadBalancer);
System.out.println();
}
//=============================================================
// Remove a load balancer
System.out.println("Deleting load balancer " + loadBalancerName4
+ "(" + loadBalancer4.id() + ")");
azure.loadBalancers().deleteById(loadBalancer4.id());
System.out.println("Deleted load balancer" + loadBalancerName4);
return true;
} catch (Exception f) {
System.out.println(f.getMessage());
f.printStackTrace();
} finally {
try {
System.out.println("Deleting Resource Group: " + rgName);
azure.resourceGroups().deleteByName(rgName);
System.out.println("Deleted Resource Group: " + rgName);
} catch (NullPointerException npe) {
System.out.println("Did not create any resources in Azure. No clean up is necessary");
} catch (Exception g) {
g.printStackTrace();
}
}
return false;
}
/**
* Main entry point.
* @param args parameters.\
*/
public static void main(String[] args) {
try {
//=============================================================
// Authenticate
final File credFile = new File(System.getenv("AZURE_AUTH_LOCATION"));
Azure azure = Azure.configure()
.withLogLevel(LogLevel.BODY)
.authenticate(credFile)
.withDefaultSubscription();
// Print selected subscription
System.out.println("Selected subscription: " + azure.subscriptionId());
runSample(azure);
} catch (Exception e) {
System.out.println(e.getMessage());
e.printStackTrace();
}
}
private ManageInternalLoadBalancer() {
}
}
| [
"\"AZURE_AUTH_LOCATION\""
] | [] | [
"AZURE_AUTH_LOCATION"
] | [] | ["AZURE_AUTH_LOCATION"] | java | 1 | 0 | |
boiler/__init__.py | import os
def load_dotenvs():
"""
Load dotenvs
Loads .env and .flaskenv files from project root directory.
:return:
"""
if not os.getenv('DOTENVS_LOADED'):
envs = ['.env', '.flaskenv']
for env in envs:
path = os.path.join(os.getcwd(), env)
if os.path.isfile(path):
dotenvs(path)
os.environ['DOTENVS_LOADED'] = 'yes'
# run immediately
dotenvs = True
try:
from dotenv import load_dotenv as dotenvs
load_dotenvs()
except ImportError:
pass
| [] | [] | [
"DOTENVS_LOADED"
] | [] | ["DOTENVS_LOADED"] | python | 1 | 0 | |
server/handleAnalytics.go | package server
import (
"net/http"
"os"
)
func (s *Server) handleAnalytics() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
r.ParseForm()
d := r.FormValue("domain")
tp := r.FormValue("period")
if d == "" || tp == "" {
sendJSONError(w, http.StatusBadRequest, map[string]string{
"error": "the query params `domain` and `period` are both required",
})
return
}
switch tp {
case "1":
tp = ".day"
case "30":
tp = ".month"
default:
tp = ""
}
filename := os.Getenv("REPORTS_ROOT") + d + tp + ".html"
content, err := os.ReadFile(filename)
if err != nil {
sendJSONError(w, http.StatusInternalServerError, map[string]string{
"error": "There was an error reading the file: " + err.Error(),
})
return
}
w.Write(content)
}
}
| [
"\"REPORTS_ROOT\""
] | [] | [
"REPORTS_ROOT"
] | [] | ["REPORTS_ROOT"] | go | 1 | 0 | |
django_project/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
test_package/conanfile.py | from conans.model.conan_file import ConanFile
from conans import CMake
import os
#This easily allows to copy the package in other user or channel
channel = os.getenv("CONAN_CHANNEL", "testing")
username = os.getenv("CONAN_USERNAME", "sunside")
class DefaultNameConan(ConanFile):
name = "DefaultName"
version = "0.1"
settings = "os", "compiler", "arch", "build_type"
generators = "cmake"
requires = "lmdb/0.9.18@%s/%s" % (username, channel)
default_options = "lmdb:shared=False"
def build(self):
cmake = CMake(self.settings)
self.run('cmake %s %s' % (self.conanfile_directory, cmake.command_line))
self.run("cmake --build . %s" % cmake.build_config)
def test(self):
self.run("cd bin && .%smytest" % os.sep)
| [] | [] | [
"CONAN_USERNAME",
"CONAN_CHANNEL"
] | [] | ["CONAN_USERNAME", "CONAN_CHANNEL"] | python | 2 | 0 | |
core/cvt_00014.py | #!/usr/bin/env python
# -*- coding: utf8 -*-
print(unicode("็ฉ้ชใกใฝใใ","UTF-8"))
__author__ = "Arlo Emerson <arlo.emerson@sekisetsumethod.com>"
__status__ = "production"
__version__ = "14.0"
__date__ = "17 August 2018"
#--- LICENSE ------------------------------------------------------------------
# This code cvt_[version number].py and all software created by Sekisetsu Method and or Arlo Emerson or other designated authors covered by the MIT License.
# MIT License
# Copyright (c) 2017, 2018 Arlo Emerson, Sekisetsu Method
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#--- THE SEKISETSU METHOD EXPLAINED -------------------------------------------
'''
WHAT IT IS
Sekisetsu is a Japanese word for snowfall accumulation. The Sekisetsu Method (็ฉ้ชใกใฝใใ) is a technical analysis tool combining price action geometry (candlesticks) with fluid dynamics to reveal otherwise hidden structure and market participant intention, and displays this information on a price chart. The method can also be termed "Price Action Fluid Dynamics". Many terms and ideas from fluid dynamics and chaotic systems are borrowed and used both in the code and as allegory in the training material. Regardless of terminology, the goal of the method is to empower the user to align trades with larger players.
HOW IT WORKS
OHLC price action data (in the form of a modified candlestick chart) creates the surfaces and boundaries within a control volume tank (CVT). The tank is filled with both heavy and light particles. The particles accumulate in the cavities and recesses of the price action surfaces. The pooling of particles results in three patterns: top-heavy, bottom-heavy and stagnant accumulations. These differences can be viewed directly on the chart, and are further expressed as an "imbalance ratio histogram". A standard deviation method is employed to locate relatively stagnant periods of price action. It is these periods of lower volotility coinciding with imbalances in particle accumulation where major entry signals emerge, revealing both the location and trading direction of large players, i.e. market makers.
The software is open source, highly configurable, and easily lends itself to integration with existing trading systems.
'''
#--- USAGE --------------------------------------------------------------------
# โข CSV files need to live in the "csv" folder at project root.
# โข Histograms are created in the "histograms" folder at project root.
# โข Simulations (frame by frame PNGs of the simulation) are created in the "simulations" folder at project root.
# To run the program:
# โข Use PIP to install all requirements (see requirements.txt).
# โข Add CSV files to the "csv" folder at project root.
# โข If running native linux, comment out os.environ['SDL_VIDEODRIVER']='dummy'
# (this is a Windows workaround when running the Ubuntu shell on Windows)
# โข from a shell, run:
# $ python cvt_[version_number].py
#------------------------------------------------------------------------------
#--- NOTES --------------------------------------------------------------------
# Project website with how-to and trading ideas: http://www.sekisetsumethod.com
# See the README file for detailed information on usage.
# See http://www.sekisetsumethod.com for charts, signals and training.
# Thanks to Maarten Baert's excellent physics engine (licensed under LGPL).
# More info: http://www.maartenbaert.be/extremephysics/
#------------------------------------------------------------------------------
import os as os
os.environ['SDL_VIDEODRIVER']='dummy' # Use this if running the Ubuntu bash on windows
import pygame, sys, math, random, csv, glob, subprocess, shutil, heapq, argparse, textwrap
import lib.standard_deviation_function as sdef
import lib.TextColors as TextColors
from lib.extremephysics import *
from numpy import interp
from PIL import Image, ImageDraw
target_dir = "../csv/"
file_type = '.csv'
particle_birth_count = 1280 # should match window width
# Particle/fluid simulations occur within a Control Volume Tank.
# The current settings in this version are tuned to USDJPY 15 and 30 minute chart data.
class ControlVolumeTank():
def __init__(self):
print(self.__class__.__name__, __version__)
print("Running " + TextColors.HEADERLEFT3 + TextColors.INVERTED + self.__class__.__name__ + " " + \
TextColors.ENDC + " version " + __version__ + " of Sekisetsu Method Star Eyes fork.")
self.dataset_file = '' # overridden
self.save_sequences = True
self.particles_birth_count = 0 # overridden
self.FRAME_LIMIT = 200 # 200 for production
self.render_frames_directory = "../simulations/"
self.render_histogram_directory = "../histograms/"
self.code_name = "star_eyes"
self.permutation_name = __version__
self.histogram_animation_directory = self.code_name + "_" + __version__ + "/"
self.PARTICLE_SHAPE_MODE = "CIRCLE"
self.PARTICLE_DIAMETER = .1
self.COEFFICIENT_RESTITUTION = 0.1 #0.01
self.FRICTION = 0.1
self.DATASET_HIGHEST_INDEX = 0
self.DATASET_LOWEST_INDEX = 0
self.draw = ImageDraw.Draw
self.previous_sdev_y = 900
self.standard_dev_start_y = 900
self.previous_money_flow_y = 900
self.previous_sdev_vol_y = 850
self.standard_dev_vol_start_y = 850
self.previous_sd_mfi_y = 800
self.standardDevMFI = 0
self.FRAME_RATE = 24
self.CANDLESTICK_WIDTH = 1
self.new_x_default_value = 10
self.CONTAINER_WALLS_WIDTH = 2
self.CANDLE_GUTTER = 3
self.run = True
self.DATASET_LOWEST = 107 # overridden, used for scaling the chart into this game window
self.DATASET_HIGHEST = 111 # overridden
self.DATASET_VOLUME_HIGHEST = 0 # overridden
self.DATASET_VOLUME_LOWEST = 0 # overridden
self.price_high = 0
self.price_low = 0
self.offset_index = 0 # used for cycling through the T axis
self.truncated_dataset_file_name = ""
self.PAINTABLE_LIMIT = 1268 # used as a canvas limit so there are some venting gaps on L and R side of chart
self.HEIGHT_SCALING_FACTOR = 1.1 # set to 1.2 initially. if things are getting truncated, lower this number to fit more into the screen
# note: set to negative number to do interesting head-on particle collisions.
random.seed()
pygame.display.init()
pygame.font.init()
self.fpsclock = pygame.time.Clock()
self.WINDOW_WIDTH = 1280
self.WINDOW_HEIGHT = 720
self.surf_window = pygame.display.set_mode((self.WINDOW_WIDTH, self.WINDOW_HEIGHT))
self.font = pygame.font.SysFont("Sans", 12)
self.font_large = pygame.font.SysFont("Sans", 24)
self.cx = self.WINDOW_WIDTH / 2
self.cy = self.WINDOW_HEIGHT / 2
self.mouse_x = 0
self.mouse_y = 0
self.color_static = pygame.Color(52, 30, 162)
self.COLOR_STANDARD_DEVIATION = pygame.Color("yellow")
self.COLOR_STANDARD_DEVIATION_VOL = pygame.Color("blue")
self.COLOR_HEAVY_PARTICLES = pygame.Color(0, 146, 255)
self.COLOR_LIGHT_PARTICLES = pygame.Color(255, 0, 255)
self.COLOR_HISTOGRAM_UP = (0, 146, 255)
self.COLOR_HISTOGRAM_DOWN = (255, 0, 255)
self.COLOR_ENTRY_SIGNAL = (0, 255, 100)
self.COLOR_MONEY_FLOW_INDEX = pygame.Color("green")
self.MOUSE_HINGE_JOINT = -1.0
self.edge_boxes = []
self.candlestick_boxes = []
self.heavy_particles = []
self.light_particles = []
self.standard_dev_list = []
self.standard_dev_list_vol = []
self.mfi = []
self.mfi_calc = []
self.mfi_standard_dev = []
self.new_x = self.new_x_default_value
self.index_counter = 0
self.verbose = False
self.debug = False
self.candleIndex = 0
self.highlight_sigma = True # can be overridden by passing in -highlight_sigma argument
self.sigma_period = 17 # can be overridden by passing in -sigma_period argument
self.show_histogram_ratio = True
self.show_histogram_standard_dev = False
self.show_MFI = False
self.histogram_standard_dev_period = 7
self.show_histogram_simple_average = False
self.histogram_simple_average_period = 9
self.sigma_sort_low = 315 # the number of sigma lines to draw
self.offset_index_override = 0 # the index of the candle to begin a simulation
self.sample_period_size = 0 # override this to e.g. 10, and set the offset_index_override to e.g. 55
self.permutation_index = 0 # the outer loop index, this will be appended to file name, and is useful for running multiple simulations on one dataset in order to observe variances in particle distribution
self.candlePlusGutterWidth = (self.CANDLESTICK_WIDTH + self.CANDLE_GUTTER)
helpMessage = 'See README.md and setup_instructions.md for specifics. Here are some commands to try: \n' + \
"โข Standard deviation of price (SD, yellow line) + Volume SD (blue line) + 100 lowest sigma values highlighted in green: " + TextColors.OKGREEN + 'python cvt_00014.py --sigma_period 17 -hrat 1 -v -ssl 100' + TextColors.ENDC + "\n" + \
"โข Price SD + lowest sigma values highlighted in green: " + TextColors.OKGREEN + 'python cvt_00014.py --sigma_period 23 --highlight_sigma True -v ' + TextColors.ENDC + "\n" + \
"โข Price SD + histogram SD of particle distribution: " + TextColors.OKGREEN + "python cvt_00014.py --sigma_period 19 -v -hrat False -hsd True -hsdp 34" + TextColors.ENDC + "\n" + \
"โข Price SD + histogram moving average (MA) of particle distribution: " + TextColors.OKGREEN + "python cvt_00014.py --sigma_period 17 -v -hrat False -hsa True -hsap 23" + TextColors.ENDC + "\n" + \
"โข Price SD + histogram MA with a larger set of low SD highlighted: " + TextColors.OKGREEN + "python cvt_00014.py --sigma_period 34 -v -hrat True -ssl 100" + TextColors.ENDC + "\n" + \
"โข Start at some other index in the dataset (e.g. 120 candles from latest): " + TextColors.OKGREEN + "python cvt_00014.py --sigma_period 34 -v -oo 120 -hrat 1" + TextColors.ENDC + "\n" + \
"โข Start at some other index and march forward N candles: " + TextColors.OKGREEN + "python cvt_00014.py --sigma_period 34 -v -oo 120 -sps 10 -hrat 1" + TextColors.ENDC + "\n" + \
"โข Run a series of simulations at the same index: " + TextColors.OKGREEN + "python cvt_00014.py --sigma_period 23 -v -oo 127 -hrat 1 -ssl 1" + TextColors.ENDC + "\n" + \
" "
parser = argparse.ArgumentParser(description=helpMessage, epilog=textwrap.dedent(""), formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-s', '--highlight_sigma', dest='highlight_sigma', required=False, help="Paint lines from low sigma regions to the top of the chart. This helps isolate important areas in the histogram.")
parser.add_argument('-p', '--sigma_period', dest='sigma_period', required=False, help="The sigma period used to calculate the standard deviation. Default is 17.")
parser.add_argument('-hrat', '--show_histo_ratio', dest='show_histo_ratio', required=False, help="Show the histogram ratio lines.")
parser.add_argument('-hsd', '--show_histo_sd', dest='show_histo_sd', required=False, help="Show a standard deviation line of the histogram.")
parser.add_argument('-hsdp', '--histo_sd_period', dest='histo_sd_period', required=False, help="Histogram standard deviation period. Default is 7.")
parser.add_argument('-hsa', '--show_histo_simple_average', dest='show_histo_simple_average', required=False, help="Show a simple average line of the histogram.")
parser.add_argument('-hsap', '--histo_simple_average_period', dest='histo_simple_average_period', required=False, help="Histogram simple average period. Default is 9.")
parser.add_argument('-ssl', '--sigma_sort_low', dest='sigma_sort_low', required=False, help="The number of samples to use for highlighting the low points in sigma. Default is 40. Higher numbers will add more lines and include a larger range.")
parser.add_argument('-oo', '--offset_index_override', dest='offset_index_override', required=False, help="The index of the current data set to begin at. This is helpful if you see a breakout candle somewhere in the past and want to run the simulation with that price being at the far right of the chart.")
parser.add_argument('-sps', '--sample_period_size', dest='sample_period_size', required=False, help="The size of the sample set of candles to run a simulation on. Use with offset index override -oo.")
parser.add_argument('-mfi', '--show_mfi', dest='show_mfi', required=False, help="Display both MFI over the chart and MFI standard deviation at bottom.")
parser.add_argument('-v','--verbose', dest='verbose', action='store_true', help="Explain what is being done.")
parser.add_argument('-d','--debug', dest='debug', action='store_true', help="Lower level messages for debugging.")
parser.add_argument('--version', action='version', version='%(prog)s ' + __version__)
args = parser.parse_args()
if args.verbose:
self.verbose = True
if args.debug:
self.debug = True
if self.string_to_bool(args.highlight_sigma):
self.highlight_sigma = True
if self.string_to_bool(args.show_mfi):
self.show_MFI = True
if args.show_histo_ratio:
self.show_histogram_ratio = self.string_to_bool(args.show_histo_ratio)
if args.sigma_period:
self.sigma_period = int( args.sigma_period )
if args.sigma_sort_low:
self.sigma_sort_low = int( args.sigma_sort_low )
if self.string_to_bool(args.show_histo_sd):
self.show_histogram_standard_dev = True
if args.sample_period_size:
self.sample_period_size = int(args.sample_period_size)
if args.histo_sd_period:
self.histogram_standard_dev_period = int(args.histo_sd_period)
if self.string_to_bool(args.show_histo_simple_average):
self.show_histogram_simple_average = True
if args.histo_simple_average_period:
self.histogram_simple_average_period = int(args.histo_simple_average_period)
if args.offset_index_override:
self.offset_index_override = int(args.offset_index_override)
if args.debug and args.verbose:
self.print_debug("Running in verbose mode with debug messages.")
elif args.debug and not args.verbose:
self.print_debug("Running in debug mode.")
elif args.verbose and not args.debug:
self.print_verbose("Running in verbose mode.")
def string_to_bool(self, pArg):
if None == pArg:
return False
elif pArg.lower() in ("y", "yes", "true", "t", "1"):
return True
else:
return False
def set_dataset_file(self, pFileName):
self.dataset_file = pFileName
def draw_box(self, x, y, w, h, rot, color):
points = [[-w / 2.0, -h / 2.0], [w / 2.0, -h / 2.0], [w / 2.0, h / 2.0], [-w / 2.0, h / 2.0]]
for p in points:
p[:] = [x + p[0] * math.cos(rot) + p[1] * math.sin(rot), y - p[0] * math.sin(rot) + p[1] * math.cos(rot)]
pygame.draw.polygon(self.surf_window, color, points, 1)
def draw_circle(self, x, y, d, color):
points = [[-d / 2.0, -d / 2.0], [d / 2.0, -d / 2.0]]
pygame.draw.circle(self.surf_window, color, [x,y], d/2, 1)
# circle(Surface, color, pos, radius, width=0) -> Rect
# for drawing a progress bar
def draw_growing_rectangle(self, pInt):
points = (20,20,50+pInt, 30)
# TODO: make this grow automatically
pygame.draw.rect(self.surf_window, self.COLOR_STANDARD_DEVIATION, points, 1)
def draw_standard_dev_line(self, pCoords):
pygame.draw.line(self.surf_window, self.COLOR_STANDARD_DEVIATION, pCoords[0], pCoords[1], 1)
def draw_standard_dev_line_vol(self, pCoords):
pygame.draw.line(self.surf_window, self.COLOR_STANDARD_DEVIATION_VOL, pCoords[0], pCoords[1], 1)
def draw_mfi(self, pCoords, pIndex):
# self.new_x-candlePlusGutterWidth, self.previous_money_flow_y
# self.new_x, self.standard_dev_vol_start_y - newMfCalc
# priceHigh, priceLow
twoCandles = self.candlePlusGutterWidth * 2
# if self.mfi[pIndex][1][1] < self.mfi[pIndex-1][1][1] and self.mfi[pIndex-1][1][1] > self.mfi[pIndex-2][1][1]:
# # we have spiked up and down
# pygame.draw.line( self.surf_window, pygame.Color("red"), ( self.candlePlusGutterWidth * pIndex,pCoords[2][0]-20 ), ( self.candlePlusGutterWidth * pIndex,pCoords[2][0] + twoCandles) )
pygame.draw.line(self.surf_window, pygame.Color("gray"), pCoords[0], pCoords[1], 1)
def draw_sd_mfi(self, pCoords):
pygame.draw.line(self.surf_window, pygame.Color("gray"), pCoords[0], pCoords[1], 1)
def init_dataset(self):
csvfile = open(self.dataset_file, 'r')
lines = csvfile.readlines()
rowCount = 0
for row in lines:
rowCount += 1
tmpDataSet = []
# this reverse orders the orig data so we can paint from left to right with it
startIndex = rowCount - self.offset_index - 315
for i in range( startIndex, rowCount - self.offset_index ):
tmpDataSet.append(lines[i])
self.dataset = tmpDataSet
tmpList = []
tmpVolList = []
tmpCount = 0
tmpMFI = []
for row in self.dataset:
# if tmpCount > 0:
# this is to determine the min/max
# tmpTruncatedRow = row[1:4] # works for dukascopy
rowList = row.split(",")
# self.print_debug(rowList)
tmpTruncatedRow = rowList[2:6] # works for metatrader
if tmpTruncatedRow != []:
tmpList.append( max(tmpTruncatedRow) )
tmpList.append( min(tmpTruncatedRow) )
tmpTruncatedRow = rowList[6:7]
if tmpTruncatedRow != []:
tmpVolList.append( float( tmpTruncatedRow[0].strip() ) )
self.DATASET_LOWEST = int( round( float( min(tmpList)) ) ) -1
self.DATASET_HIGHEST = int( round( float( max(tmpList)) ) ) +1
self.DATASET_VOLUME_LOWEST = int( round( float( min(tmpVolList) * self.DATASET_LOWEST ) ) ) -1
self.DATASET_VOLUME_HIGHEST = int( round( float( max(tmpVolList) * self.DATASET_HIGHEST ) ) ) +1
self.DATASET_MFI_HIGHEST = 100 #self.DATASET_HIGHEST * self.DATASET_VOLUME_HIGHEST
self.DATASET_MFI_LOWEST = 0 #self.DATASET_LOWEST * self.DATASET_VOLUME_LOWEST
# firstRowRead = 0
for row in self.dataset:
self.paint_candle(row) # returns 0 if row volume is empty
self.candleIndex += 1
self.print_verbose( str(self.candleIndex) + " records in data set" )
slashLocation = self.dataset_file.rfind('/')
directory = self.dataset_file[slashLocation+1:]
self.truncated_dataset_file_name = directory[:-4] #trim off the '.csv'
self.print_verbose( self.truncated_dataset_file_name )
def game_start(self):
self.world = ep_world_create()
ep_world_set_sleeping(self.world, True, 30, 0, 0.002, 0.0001)
ep_world_set_settings(self.world, 1.0 / 4.0, 20, 10, 0.1, 0.5, 0, 0.5, 1)
self.init_dataset()
self.mouseParticleId = self.get_static_body_id()
self.MOUSE_HINGE_JOINT = -1.0
particlePosition_X = 10
# HOW TO SET THE FRICTIONS...
# ep_shape_set_material(global.world,body,shape1,0.5,0.4,0,0);
# 0.5: coefficient of restitution.
# 0.4: friction.
# 0: normal surface velocity.
# 0: tangential surface velocity.
# physics boundaries of the stage, AKA the Control Volume Tank.
# MAKE FLOOR
tmpW = self.WINDOW_WIDTH - self.CONTAINER_WALLS_WIDTH
tmpH = self.CONTAINER_WALLS_WIDTH
tmpX = self.WINDOW_WIDTH / 2
tmpY = self.WINDOW_HEIGHT - self.CONTAINER_WALLS_WIDTH
# ep_shape_create_box(world_id, body_id, w, h, x, y, rot, density)
tmpBodyId = self.get_static_body_id()
self.edge_boxes.append([tmpW, tmpH, tmpX, tmpY, math.radians(0)])
shape = ep_shape_create_box(self.world, tmpBodyId, tmpW, tmpH, tmpX, tmpY, math.radians(0), 1)
ep_shape_set_collision(self.world, tmpBodyId, shape, 1, 1, 0)
ep_shape_set_material(self.world, tmpBodyId, shape, self.COEFFICIENT_RESTITUTION, self.FRICTION, 0, 0)
# LEFT WALL
tmpW = self.CONTAINER_WALLS_WIDTH
tmpH = self.WINDOW_HEIGHT - self.CONTAINER_WALLS_WIDTH
tmpX = 0
tmpY = self.WINDOW_HEIGHT / 2
tmpBodyId = self.get_static_body_id()
self.edge_boxes.append([tmpW, tmpH, tmpX, tmpY, math.radians(0)])
shape = ep_shape_create_box(self.world, tmpBodyId, tmpW, tmpH, tmpX, tmpY, math.radians(0), 1)
ep_shape_set_collision(self.world, tmpBodyId, shape, 1, 1, 0)
ep_shape_set_material(self.world, tmpBodyId, shape, self.COEFFICIENT_RESTITUTION, self.FRICTION, 0, 0)
# RIGHT WALL
tmpW = self.CONTAINER_WALLS_WIDTH
tmpH = self.WINDOW_HEIGHT - self.CONTAINER_WALLS_WIDTH
tmpX = self.WINDOW_WIDTH - self.CONTAINER_WALLS_WIDTH
tmpY = self.WINDOW_HEIGHT / 2
tmpBodyId = self.get_static_body_id()
self.edge_boxes.append([tmpW, tmpH, tmpX, tmpY, math.radians(0)])
shape = ep_shape_create_box(self.world, tmpBodyId, tmpW, tmpH, tmpX, tmpY, math.radians(0), 1)
ep_shape_set_collision(self.world, tmpBodyId, shape, 1, 1, 0)
ep_shape_set_material(self.world, tmpBodyId, shape, self.COEFFICIENT_RESTITUTION, self.FRICTION, 0, 0)
# MAKE CEILING
tmpW = self.WINDOW_WIDTH - self.CONTAINER_WALLS_WIDTH
tmpH = self.CONTAINER_WALLS_WIDTH
tmpX = self.WINDOW_WIDTH / 2
tmpY = self.CONTAINER_WALLS_WIDTH
tmpBodyId = self.get_static_body_id()
self.edge_boxes.append([tmpW, tmpH, tmpX, tmpY, math.radians(0)])
shape = ep_shape_create_box(self.world, tmpBodyId, tmpW, tmpH, tmpX, tmpY, math.radians(0), 1)
ep_shape_set_collision(self.world, tmpBodyId, shape, 1, 1, 0)
ep_shape_set_material(self.world, tmpBodyId, shape, self.COEFFICIENT_RESTITUTION, self.FRICTION, 0, 0)
# GENERATE PARTICLES
particleCount = 0
for i in range(0, self.particles_birth_count):
# HEAVY PARTICLES
tmpId = self.get_dynamic_body_id()
shape = self.get_particle_shape(tmpId)
ep_shape_set_collision(self.world, tmpId, shape, 1, 1, 0)
ep_shape_set_material(self.world, tmpId, shape, self.COEFFICIENT_RESTITUTION, self.FRICTION, 0, 0)
ep_body_calculate_mass(self.world, tmpId)
if particlePosition_X >= self.WINDOW_WIDTH:
particlePosition_X = 0
else:
particlePosition_X += 1
tmpRadian = random.randrange(0,57)
ep_body_set_position(self.world, tmpId, particlePosition_X, 10, math.radians(tmpRadian))
ep_body_set_gravity(self.world, tmpId, 0, 1.0)
self.heavy_particles.append(tmpId)
particleCount += 1
# LIGHTWEIGHT PARTICLES
tmpId = self.get_dynamic_body_id()
shape = self.get_particle_shape(tmpId)
ep_shape_set_collision(self.world, tmpId, shape, 1, 1, 0)
ep_shape_set_material(self.world, tmpId, shape, self.COEFFICIENT_RESTITUTION, self.FRICTION, 0, 0)
ep_body_calculate_mass(self.world, tmpId)
tmpRadian = random.randrange(0,57)
ep_body_set_position(self.world, tmpId, particlePosition_X, self.WINDOW_HEIGHT-10, math.radians(tmpRadian))
ep_body_set_gravity(self.world, tmpId, 0, -1.0)
self.light_particles.append(tmpId)
particleCount += 1
def get_particle_shape(self, tmpId):
# ep_shape_create_circle method API...
# shape1 = ep_shape_create_circle(global.world,body,32,0,0,0,1);
# 32: the radius of the circle.
# 0,0,0: the relative coordinates of the shape (x,y,rot).
# 1: the density of the circle (not used for static bodies).
if self.PARTICLE_SHAPE_MODE == "CIRCLE":
return ep_shape_create_circle(self.world, tmpId, self.PARTICLE_DIAMETER,0,0,0,1);
else: #default square
return ep_shape_create_box(self.world, tmpId, self.PARTICLE_DIAMETER, self.PARTICLE_DIAMETER, 0, 0, 0, 1)
def paint_candle(self, pRow):
if self.new_x >= self.PAINTABLE_LIMIT: # no matter the record count, limit candles to window width
return 0
if pRow == []:
return 0
timestamp = pRow[0][0]
self.print_debug(timestamp)
# for dukascopy the rows are 1 thru 4
# for metatrader it's 2 through 5
priceOpen = self.interpolate(float(pRow.split(",")[2]))
priceHigh = self.interpolate(float(pRow.split(",")[3]))
priceLow = self.interpolate(float(pRow.split(",")[4]))
priceClose = self.interpolate(float(pRow.split(",")[5]))
volume = self.interpolate_volume(float(pRow.split(",")[6]))
'''
experiment: use open/close rather than high low
initial result seems to be high/low is more accurate
if priceOpen > priceClose:
priceHigh = priceOpen
priceLow = priceClose
else:
priceHigh = priceClose
priceLow = priceOpen
if priceOpen < priceClose:
priceLow = priceOpen
priceHigh = priceClose
else:
priceHigh = priceOpen
priceLow = priceClose
'''
if self.DATASET_HIGHEST == priceHigh:
self.DATASET_HIGHEST_INDEX = self.candleIndex
if self.DATASET_LOWEST == priceLow:
self.DATASET_LOWEST_INDEX = self.candleIndex
# PRICE STANDARD DEVIATION
sdSet = self.get_last_n_prices(self.candleIndex)
standardDev = sdef.getStandardDeviation(sdSet).real
standardDev *= (math.pow( math.pi*self.get_phi(), 4) )
self.standard_dev_list.append([[self.new_x-self.candlePlusGutterWidth, self.previous_sdev_y], [self.new_x, self.standard_dev_start_y-standardDev]])
self.previous_sdev_y = self.standard_dev_start_y-standardDev
# VOLUME SD
sdSetVol = self.get_last_n_volumes(self.candleIndex)
standardDevVol = sdef.getStandardDeviation(sdSetVol).real
standardDevVol *= (math.pow( math.pi*self.get_phi(), 2.5) )
self.standard_dev_list_vol.append([[self.new_x-self.candlePlusGutterWidth, self.previous_sdev_vol_y], [self.new_x, self.standard_dev_vol_start_y-standardDevVol]])
# MONEY FLOW INDEX
positive_money_flow = 0
negative_money_flow = 0
highPriceSet = self.get_last_n_high_prices(self.candleIndex)
lowPriceSet = self.get_last_n_low_prices(self.candleIndex)
# sdSet is a present to past ordered list
# so we need to loop it in reverse
# this example uses high and low
# magicNumber = 1/137
# for i, k in reversed( list(enumerate(sdSet)) ):
# if i > 0:
# if highPriceSet[i] > highPriceSet[i-1]:
# positive_money_flow += highPriceSet[i] * sdSetVol[i] # * (1+magicNumber)
# if lowPriceSet[i] < lowPriceSet[i-1]:
# negative_money_flow += lowPriceSet[i] * sdSetVol[i] # * (1+magicNumber)
for i, k in reversed( list(enumerate(sdSet)) ):
if i > 0:
if highPriceSet[i] > highPriceSet[i-1]:
positive_money_flow += highPriceSet[i] * sdSetVol[i]
if lowPriceSet[i] < lowPriceSet[i-1]:
negative_money_flow += lowPriceSet[i] * sdSetVol[i]
money_flow_index = 100 * ( positive_money_flow / ( positive_money_flow + negative_money_flow) )
newMfCalc = self.interpolate_mfi( money_flow_index )
# RAW MFI
self.mfi.append( [[self.new_x-self.candlePlusGutterWidth, self.previous_money_flow_y], [self.new_x, self.standard_dev_vol_start_y - newMfCalc], [priceHigh, priceLow]] )
self.previous_money_flow_y = self.standard_dev_vol_start_y - newMfCalc
# SD MFI
mfiSDAdjust = self.WINDOW_HEIGHT + 150
self.mfi_calc.append( newMfCalc )
if (self.candleIndex >= self.sigma_period):
sdSetMFI = self.mfi_calc[ -self.sigma_period:-1 ]
self.standardDevMFI = sdef.getStandardDeviation(sdSetMFI).real
self.standardDevMFI *= (math.pow( math.pi*self.get_phi(), (2.97)) )
self.mfi_standard_dev.append( [[self.new_x-self.candlePlusGutterWidth, self.previous_sd_mfi_y], [self.new_x, mfiSDAdjust - self.standardDevMFI]] )
self.previous_sd_mfi_y = mfiSDAdjust - self.standardDevMFI
# VOLUME SD
self.previous_sdev_vol_y = self.standard_dev_vol_start_y - standardDevVol
# experimental, use to filter out zero volume periods
# if volume == 0:
# return 0
candleHeight = 0
# DETERMINE CANDLE PRICE HEIGHT
candleHeight = priceHigh - priceLow
newY = ((candleHeight/2)) + priceLow
candleHeight = abs(candleHeight)
tmpBodyId = self.get_static_body_id()
self.edge_boxes.append([self.CANDLESTICK_WIDTH, candleHeight, self.new_x, newY, math.radians(0)])
shape = ep_shape_create_box(self.world, tmpBodyId, self.CANDLESTICK_WIDTH, candleHeight, self.new_x, newY, math.radians(0), 1)
# self.price_high = priceHigh + candleHeight/2
# self.price_low = newY
ep_shape_set_collision(self.world, tmpBodyId, shape, 1, 1, 0)
tmpCoef = 2
tmpFric = 1
ep_shape_set_material(self.world, tmpBodyId, shape, tmpCoef, tmpFric, 0, 0)
# advance the x
self.new_x += self.candlePlusGutterWidth
return 1
def get_x_location_of_candle(self, pIndex):
tmpAdd = self.new_x_default_value
for i in range(0, pIndex):
tmpAdd += (self.CANDLESTICK_WIDTH + self.CANDLE_GUTTER)
return tmpAdd
def get_last_n_prices(self, pIndex):
tmpList = []
returnList = []
dsSubset = []
lookback = self.sigma_period
dsSubset.append( self.dataset[pIndex] )
try:
for i in range(1, lookback):
dsSubset.append( self.dataset[pIndex-i] )
except Exception as e:
pass
for i in range(0, len(dsSubset)):
# priceOpen = float(dsSubset[i].split(",")[2])
# priceHigh = float(dsSubset[i].split(",")[3])
# priceLow = float(dsSubset[i].split(",")[4])
priceClose = float(dsSubset[i].split(",")[5])
# tmpList.append(priceOpen)
# tmpList.append(priceHigh)
# tmpList.append(priceLow)
tmpList.append(priceClose) # note: just using the close makes for a bit spikier, low notches are more defined
return tmpList
def get_last_n_high_prices(self, pIndex):
tmpList = []
returnList = []
dsSubset = []
lookback = self.sigma_period
dsSubset.append( self.dataset[pIndex] )
try:
for i in range(1, lookback):
dsSubset.append( self.dataset[pIndex-i] )
except Exception as e:
pass
for i in range(0, len(dsSubset)):
# priceOpen = float(dsSubset[i].split(",")[2])
priceHigh = float(dsSubset[i].split(",")[3])
# priceLow = float(dsSubset[i].split(",")[4])
# priceClose = float(dsSubset[i].split(",")[5])
# tmpList.append(priceOpen)
tmpList.append(priceHigh)
# tmpList.append(priceLow)
# tmpList.append(priceClose)
return tmpList
def get_last_n_low_prices(self, pIndex):
tmpList = []
returnList = []
dsSubset = []
lookback = self.sigma_period
dsSubset.append( self.dataset[pIndex] )
try:
for i in range(1, lookback):
dsSubset.append( self.dataset[pIndex-i] )
except Exception as e:
pass
for i in range(0, len(dsSubset)):
# priceOpen = float(dsSubset[i].split(",")[2])
# priceHigh = float(dsSubset[i].split(",")[3])
priceLow = float(dsSubset[i].split(",")[4])
# priceClose = float(dsSubset[i].split(",")[5])
# tmpList.append(priceOpen)
# tmpList.append(priceHigh)
tmpList.append(priceLow)
# tmpList.append(priceClose)
return tmpList
def get_last_n_volumes(self, pIndex):
tmpList = []
returnList = []
dsSubset = []
lookback = self.sigma_period
dsSubset.append( self.dataset[pIndex] )
try:
for i in range(1, lookback):
dsSubset.append( self.dataset[pIndex-i] )
except Exception as e:
pass
for i in range(0, len(dsSubset)):
# priceOpen = float(dsSubset[i].split(",")[2])
# priceHigh = float(dsSubset[i].split(",")[3])
# priceLow = float(dsSubset[i].split(",")[4])
# priceClose = float(dsSubset[i].split(",")[5])
volume = int(dsSubset[i].split(",")[6])
# tmpList.append(priceOpen)
# tmpList.append(priceHigh)
# tmpList.append(priceLow)
# tmpList.append(priceClose)
tmpList.append(volume)
return tmpList
def get_static_body_id(self):
return ep_body_create_static(self.world)
def get_dynamic_body_id(self):
return ep_body_create_dynamic(self.world, False)
def interpolate(self, pVal):
newVal = interp(pVal, [self.DATASET_LOWEST, self.DATASET_HIGHEST ], [self.WINDOW_HEIGHT*self.HEIGHT_SCALING_FACTOR, 0])
return newVal
def interpolate_volume(self, pVal):
newVal = interp(pVal, [self.DATASET_VOLUME_LOWEST, self.DATASET_VOLUME_HIGHEST ], [self.WINDOW_HEIGHT*self.HEIGHT_SCALING_FACTOR, 0])
return newVal
def interpolate_mfi(self, pVal):
newVal = interp(pVal, [self.DATASET_MFI_LOWEST, self.DATASET_MFI_HIGHEST ], [self.WINDOW_HEIGHT, 0])
return newVal
def game_end(self):
ep_world_destroy(self.world)
def game_run(self):
self.game_start()
while self.run == True:
for event in pygame.event.get():
if event.type == "QUIT":
pygame.quit()
sys.exit()
elif event.type == "MOUSEMOTION":
self.mouse_x, self.mouse_y = event.pos
elif event.type == "MOUSEBUTTONDOWN":
self.mouse_x, self.mouse_y = event.pos
if ep_world_collision_test_circle(self.world, 0, self.mouse_x, self.mouse_y, 0, 1, 1, 0) > 0:
b = ep_world_get_collision_body(self.world, 0)
s = ep_world_get_collision_shape(self.world, 0)
if not ep_body_is_static(self.world, b):
xx = ep_body_coord_world_to_local_x(self.world, b, self.mouse_x, self.mouse_y)
yy = ep_body_coord_world_to_local_y(self.world, b, self.mouse_x, self.mouse_y)
mousehingejoint = ep_hingejoint_create(self.world, b, self.mouseParticleId, xx, yy, 0, 0, 0)
ep_hingejoint_set_max_force(self.world, mousehingejoint, 10000)
elif event.type == "MOUSEBUTTONUP":
self.mouse_x, self.mouse_y = event.pos
if self.MOUSE_HINGE_JOINT != -1.0:
ep_hingejoint_destroy(self.world, self.MOUSE_HINGE_JOINT)
self.MOUSE_HINGE_JOINT = -1.0
elif event.type == "KEYDOWN":
if event.key == "K_ESCAPE":
pygame.event.post(pygame.event.Event(QUIT))
elif event.key == "K_r":
self.game_end()
self.game_start()
vx = self.mouse_x - ep_body_get_x_center(self.world, self.mouseParticleId)
vy = self.mouse_y - ep_body_get_y_center(self.world, self.mouseParticleId)
if self.MOUSE_HINGE_JOINT != -1.0:
d = math.sqrt(vx * vx + vy * vy)
if d > 10:
vx *= 10 / d
vy *= 10 / d
ep_body_set_velocity_center(self.world, self.mouseParticleId, vx, vy, 0)
for i in range(4):
ep_world_update_contacts(self.world)
ep_world_simulate_step(self.world)
self.surf_window.lock()
self.surf_window.fill(pygame.Color(0, 0, 0))
for b in self.edge_boxes:
self.draw_box(b[2], b[3], b[0], b[1], b[4], self.color_static)
for b in self.heavy_particles:
self.draw_box(ep_body_get_x(self.world, b), \
ep_body_get_y(self.world, b), self.PARTICLE_DIAMETER, self.PARTICLE_DIAMETER, ep_body_get_rot(self.world, b), \
self.COLOR_HEAVY_PARTICLES)
for b in self.light_particles:
self.draw_box(ep_body_get_x(self.world, b), \
ep_body_get_y(self.world, b), self.PARTICLE_DIAMETER, self.PARTICLE_DIAMETER, ep_body_get_rot(self.world, b), \
self.COLOR_LIGHT_PARTICLES)
for b in self.candlestick_boxes:
self.draw_box(b[2], b[3], b[0], b[1], b[4], self.color_static)
for b in self.standard_dev_list:
self.draw_standard_dev_line(b)
for b in self.standard_dev_list_vol:
self.draw_standard_dev_line_vol(b)
for b in self.mfi:
if self.show_MFI == True:
tmpIndex = self.mfi.index(b)
self.draw_mfi(b, tmpIndex)
for b in self.mfi_standard_dev:
if self.show_MFI == True:
self.draw_sd_mfi(b)
pygame.display.set_caption(self.truncated_dataset_file_name + " ||| " + str( self.offset_index ) + " steps back " )
self.surf_window.unlock()
self.display_text_large(self.truncated_dataset_file_name, 10, 695, pygame.Color(255, 255, 255))
# chart labels
# text = "----" + str(self.DATASET_HIGHEST)
# self.displayText(text, self.interpolate(self.DATASET_HIGHEST + 2), self.get_x_location_of_candle(self.DATASET_HIGHEST_INDEX),\
# pygame.Color(255, 255, 0))
pygame.display.update()
self.fpsclock.tick(self.FRAME_RATE)
self.index_counter += 1
# make another frame for the animation
if self.save_sequences == True:
if not os.path.exists(self.render_frames_directory + self.truncated_dataset_file_name):
os.makedirs(self.render_frames_directory + self.truncated_dataset_file_name)
tmpDir = self.render_frames_directory + self.truncated_dataset_file_name + "/" + self.code_name + "_" + self.number_formatter( self.index_counter )
pygame.image.save(self.surf_window, tmpDir + ".png")
# make the histogram
if self.index_counter == self.FRAME_LIMIT:
tmpFileName = self.render_histogram_directory + self.truncated_dataset_file_name + ".png"
# make the histogram folder if it's absent
if not os.path.exists(self.render_histogram_directory):
os.makedirs(self.render_histogram_directory)
self.print_verbose( "Preparing final frame output to " + tmpFileName )
pygame.image.save(self.surf_window, tmpFileName)
self.make_histogram( tmpFileName )
# Delete the temp file
os.system( "rm " + tmpFileName )
self.make_video_from_sequence()
self.run = False
self.game_end()
def make_video_from_sequence(self):
tmpDir = self.render_frames_directory + self.truncated_dataset_file_name + "/"
files = sorted( glob.glob( tmpDir + '*.png') )
if len( files ) == 0:
print("nothing to convert in " + tmpDir)
return
# arg = "ffmpeg -framerate 30 -pattern_type glob -i '" + tmpDir + "*.png' -c:v libx264 -pix_fmt yuv420p -crf 23 -y " + self.render_frames_directory + "/" + self.truncated_dataset_file_name + ".mp4"
# os.system( arg )
# make an AVI so we can convert into GIF
arg = "ffmpeg -framerate 30 -pattern_type glob -i '" + tmpDir + "*.png' -c:v ffv1 -y " + self.render_frames_directory + "/temp.avi"
os.system( arg )
arg = "ffmpeg -i " + self.render_frames_directory + "/temp.avi -pix_fmt rgb8 -y " + self.render_frames_directory + "/" + \
self.truncated_dataset_file_name + "_" + self.number_formatter(self.offset_index) + "_sig" + str( self.sigma_period ) + ".gif"
os.system( arg )
os.system( "rm " + self.render_frames_directory + "temp.avi" )
# delete all PNGs from this location when done.
shutil.rmtree(tmpDir)
def number_formatter(self, pNum):
return "%03d" % (pNum,)
def displayText(self, pTxt, pPosLeft, pPosRight, pColor):
surf_text = self.font.render(pTxt, False, pColor)
rect = surf_text.get_rect()
rect.topleft = (pPosLeft, pPosRight)
self.surf_window.blit(surf_text, rect)
def display_text_large(self, pTxt, pPosLeft, pPosRight, pColor):
surf_text = self.font_large.render(pTxt, False, pColor)
rect = surf_text.get_rect()
rect.topleft = (pPosLeft, pPosRight)
self.surf_window.blit(surf_text, rect)
def special_number(self):
return math.pi
return ((1+5 ** 0.5) / 2) * pow(math.pi, 4)
def get_phi(self):
return ((1+5 ** 0.5) / 2)
def make_histogram(self, pImg):
img = Image.open(pImg)
img_bbox = img.getbbox()
self.draw = ImageDraw.Draw(img)
pixels = img.load()
imbalanceRatioArray = []
offsetY = 80
for xx in range( img.size[0] ):
heavyParticleCounter = 0.0
lightParticleCounter = 0.0
tmpOffset = 12
for yy in range(tmpOffset, img.size[1] - tmpOffset): # filter out particle detritus from the histogram data
if pixels[ xx, yy ] == (0, 146, 255):
heavyParticleCounter += 1.0
elif pixels[ xx, yy ] == (255, 0, 255):
lightParticleCounter += 1.0
imbalanceRatio1 = (heavyParticleCounter+1.0)/(lightParticleCounter+1.0)
imbalanceRatio2 = (lightParticleCounter+1.0)/(heavyParticleCounter+1.0)
imbalanceRatioArray.append( [-imbalanceRatio1, imbalanceRatio2] )
tmpParticleFlowIndex = [] # experimental
# Draw histogram at the top of the chart
if self.show_histogram_ratio == True:
for r in range(0, len(imbalanceRatioArray)):
self.draw.line(( r-1, 100+imbalanceRatioArray[r-1][0]*self.special_number(), r, 100+imbalanceRatioArray[r][0]*self.special_number()), \
fill=(self.COLOR_HISTOGRAM_UP), width=1 )
self.draw.line(( r-1, 100+imbalanceRatioArray[r-1][1]*self.special_number(), r, 100+imbalanceRatioArray[r][1]*self.special_number()), \
fill=(self.COLOR_HISTOGRAM_DOWN), width=1 )
# experimental
# particle_flow_index = 100 / ( (imbalanceRatioArray[r][0]+1) + (imbalanceRatioArray[r][1]+1) )
# tmpParticleFlowIndex.append( particle_flow_index )
# print(particle_flow_index)
# end experimental
# reducerFactor = 1
# for r in range(0, len( tmpParticleFlowIndex ) ):
# self.draw.line(( r-1, tmpParticleFlowIndex[r-1]*reducerFactor, r, tmpParticleFlowIndex[r]*reducerFactor), fill=(self.COLOR_HISTOGRAM_UP), width=2 )
# ---------------------------------------------------------------------------
# Draw a simple average of the ratio - this section draws for the blue side
# note: we are doing the averaging even if we don't show it,
# this is because we need the average to perform other work later on
tmpAvg1 = []
for r in range(0, len(imbalanceRatioArray)):
tmpAvg = 0
tmpthing = 0
for f in range(0, self.histogram_simple_average_period):
tmpthing += imbalanceRatioArray[r-f][0]
tmpAvg = tmpthing/self.histogram_simple_average_period
tmpAvg1.append(tmpAvg)
if self.show_histogram_simple_average == True:
for r in range(0, len( tmpAvg1 ) ):
self.draw.line(( r-1, offsetY+tmpAvg1[r-1]*self.special_number(), r, offsetY+tmpAvg1[r]*self.special_number()), fill=(self.COLOR_HISTOGRAM_UP), width=1 )
# Draw a simple average of the ratio - this section draws for the pink side
tmpAvg1 = []
for r in range(0, len(imbalanceRatioArray)):
tmpAvg = 0
tmpthing = 0
for f in range(0, self.histogram_simple_average_period):
tmpthing += imbalanceRatioArray[r-f][1]
tmpAvg = tmpthing/self.histogram_simple_average_period
tmpAvg1.append(tmpAvg)
if self.show_histogram_simple_average == True:
for r in range(0, len( tmpAvg1 ) ):
self.draw.line(( r-1, offsetY+tmpAvg1[r-1]*self.special_number(), r, offsetY+tmpAvg1[r]*self.special_number()), fill=(self.COLOR_HISTOGRAM_DOWN), width=1 )
if self.highlight_sigma == True:
# DRAW VERTICAL LINE AT POINT OF LOWEST STANDARD DEV
# find the low points in the standard dev
# put all the Y values of the standard deviation in a separate list
# an entry in the list looks like [[0, 900], [10, 639.1957450511611]]
# we want to look a the second nested list, and only the Y component
# the higher this number is, the lower it occurs on the chart, i.e. the lowest standard dev value
tmpList = []
for index in range(0, len(self.standard_dev_list)):
tmpList.append( self.standard_dev_list[index][1][1] )
# this works fine for the lowest, but only one result
# tmpX = self.standard_dev_list[tmpList.index( max(tmpList) )][1][0]
# tmpY = max(tmpList) # returns what represents the lowest standard dev value
# # print(tmpX, tmpY)
# self.draw.line(( tmpX, 0, tmpX, tmpY ), fill=(self.COLOR_ENTRY_SIGNAL), width=1 )
# ----- TEST AREA -----------------------------------------------------------------------
# TODO: determine if we can be smarter about how many lines to show per sigma low
largest = heapq.nlargest(self.sigma_sort_low, enumerate(tmpList), key=lambda x: x[1])
for item in largest:
# self.print_debug( item )
tmpX = self.standard_dev_list[item[0]][1][0]
tmpY = item[1]
buyers = abs( imbalanceRatioArray[ self.get_x_location_of_candle( item[0] ) ][0] )
sellers = abs( imbalanceRatioArray[ self.get_x_location_of_candle( item[0] ) ][1] )
tmpYIndicatorStart = self.standard_dev_list_vol[ item[0] ][0][1]
if ( buyers > sellers):
self.draw.line(( tmpX, tmpYIndicatorStart, tmpX, tmpY ), fill=(self.COLOR_ENTRY_SIGNAL), width=1 )
elif ( sellers > buyers):
self.draw.line(( tmpX, tmpYIndicatorStart, tmpX, tmpY ), fill=( (255,0,0) ), width=1 )
# orig
# self.draw.line(( tmpX, 150, tmpX, tmpY ), fill=(self.COLOR_ENTRY_SIGNAL), width=1 )
# test area
# new idea:
# while we are looping the low sigmas,
# get the histogram average based on which index we're on
# self.print_debug( imbalanceRatioArray[ self.get_x_location_of_candle( item[0] ) ][0] )
# compare this with what the volume is doing
# self.print_debug( self.standard_dev_list_vol[ item[0] ] )
# ----------------------------------------------------------------------------------------
# print("-------------------")
# print(len(imbalanceRatioArray))
# print(len(self.standard_dev_list))
if self.show_histogram_standard_dev == True:
# Draw a standard deviation line based on the particle counts
# histogram up - blue
sdevParticles = []
sigmaLookbackParticleCount = self.histogram_standard_dev_period
sdevParticlesAdjust = 2
offsetY = 125
for r in range(0, len(imbalanceRatioArray)):
topParticlesSet = []
for f in range(0, sigmaLookbackParticleCount):
topParticlesSet.append( imbalanceRatioArray[r-f][0] )
standardDev = sdef.getStandardDeviation(topParticlesSet).real
standardDev *= (math.pow( math.pi*self.get_phi(), sdevParticlesAdjust) )
standardDev *= -1 # negative adjustment to flip the projection
sdevParticles.append( standardDev )
for r in range(0, len( sdevParticles ) ):
self.draw.line(( r-1, offsetY+sdevParticles[r-1]*self.special_number(), r, offsetY+sdevParticles[r]*self.special_number()), fill=(self.COLOR_HISTOGRAM_UP), width=1 )
# histogram down - pink
sdevParticles = []
for r in range(0, len(imbalanceRatioArray)):
bottomParticlesSet = []
for f in range(0, sigmaLookbackParticleCount):
bottomParticlesSet.append( imbalanceRatioArray[r-f][1] )
standardDev = sdef.getStandardDeviation(bottomParticlesSet).real
standardDev *= (math.pow( math.pi*self.get_phi(), sdevParticlesAdjust) )
standardDev *= -1 # negative adjustment to flip the projection
sdevParticles.append( standardDev )
for r in range(0, len( sdevParticles ) ):
self.draw.line(( r-1, offsetY+sdevParticles[r-1]*self.special_number(), r, offsetY+sdevParticles[r]*self.special_number()), fill=(self.COLOR_HISTOGRAM_DOWN), width=1 )
# Build the histogram directory if it's not there
gif_animation_directory = self.render_histogram_directory + self.histogram_animation_directory + \
self.truncated_dataset_file_name + "_" + self.number_formatter(self.offset_index) + "_sig" + str( self.sigma_period )
if not os.path.exists( gif_animation_directory ):
os.makedirs( gif_animation_directory )
# TODO: consider putting local timestamp on histogram
local_current_time = "" # TBD
print(TextColors.HEADERLEFT + "โ " + TextColors.ENDC + TextColors.HEADERLEFT2 + "โ " + TextColors.ENDC + TextColors.HEADERLEFT3 + "โ" + TextColors.ENDC)
print(TextColors.HEADERLEFT3 + " โ" + TextColors.ENDC + TextColors.HEADERLEFT + " โ" + TextColors.ENDC + TextColors.HEADERLEFT2 + " โ" + TextColors.ENDC)
# Save the histogram
img.save(gif_animation_directory + "/" + \
self.truncated_dataset_file_name + "_" + \
# local_current_time + "_" + \
self.number_formatter(self.offset_index) + "_" + \
self.number_formatter(self.permutation_index) + \
"_sig" + str( self.sigma_period ) + \
".png", format='PNG')
# make a gif from available images
arg = "ffmpeg -pattern_type glob -i '" + gif_animation_directory + "/*.png' -y " + gif_animation_directory + "/temp.avi"
os.system( arg )
arg = "ffmpeg -i " + gif_animation_directory + "/temp.avi -pix_fmt rgb8 -y " + gif_animation_directory + "/" + \
self.truncated_dataset_file_name + "_" + self.number_formatter(self.offset_index) + "_sig" + str( self.sigma_period ) + ".gif"
os.system( arg )
os.system( "rm " + gif_animation_directory + "/temp.avi" )
self.print_verbose(self.dataset_file + " simulation done.")
# Automatically display the image
# img.show()
def set_permutation_name(self, pIterationNumber):
self.permutation_name = \
str(pIterationNumber) + "_" + \
str(self.dataset_file) + "_" + \
str(self.particles_birth_count) + "_" + \
str(self.CANDLESTICK_WIDTH) + "_" + \
str(self.PARTICLE_DIAMETER) + "_" + \
str(self.CANDLE_GUTTER)
def set_particles_birth_count(self, pParticleBirthCount):
self.particles_birth_count = pParticleBirthCount
def set_candlestick_width(self, pCandlestickWidth):
self.CANDLESTICK_WIDTH = pCandlestickWidth
def set_particles_diameter(self, pParticleDiameter):
self.PARTICLE_DIAMETER = pParticleDiameter
def set_candle_gutter(self, pCandleGutter):
self.CANDLE_GUTTER = pCandleGutter
def print_verbose(self, pMessage):
if (self.verbose == True):
print(pMessage)
def print_debug(self, pMessage):
if (self.debug == True):
print(pMessage)
#--- RUNTIME NOTES --------------------------------------------------------------------
# This particular flavor uses CSV files containing OHLC data. These files can be static or
# dynamically updated, provided they adhere to the structure as included in sample CSV.
# Place or write all CSV files in the directory specified in app.yaml.
app_yaml = open("../config/app.yaml", "r").readlines()
path_to_csv_files = app_yaml[0].split(":")[1] # TODO: make this a little smarter
arbitraryRunLimit = 99 # The number of times to run the simulation
for r in range(0, arbitraryRunLimit):
dataset_list = []
if r == 0: # only strip this the first time
path_to_csv_files = path_to_csv_files.strip() + "/*.csv"
files = glob.glob(path_to_csv_files) # Get all the CSV files
files.sort(key=os.path.getmtime) # Sort the files based on latest
for csvfile in reversed(files):
dataset_list.append(csvfile) # Add the files to a list
for dataset in dataset_list[:1]: # Loop up to [:N] datasets e.g. [:3]
lookback = 0 # Default is 1. To loop iterations within a dataset use following loop with lookback. e.g., setting this to 60 will use one dataset to create 60 simulations, each one starting a candle earlier. Useful for looking for patterns on old data. Set lookback to 1 when running in a production/trading mode, assuming your CSV file is being updated in real time.
i = 0
while i <= lookback:
cvt = ControlVolumeTank() # The ControlVolumeTank is the class running the simulation.
lookback = int(cvt.sample_period_size) # override if this was passed in
cvt.permutation_index = r
if lookback > 0:
cvt.offset_index = i # Sets an index based on where we are at in the lookback sequence. If lookback is 1 then we aren't running multiple simulations off the same dataset, but fresh ones every time.
if cvt.offset_index_override != 0:
cvt.offset_index = cvt.offset_index_override - i
print("Beginning at candle " + str( cvt.offset_index ))
cvt.dataset_file = dataset
print( "Current OHLC dataset: " + TextColors.HEADERLEFT2 + TextColors.INVERTED + dataset + TextColors.ENDC)
random.seed()
cvt.set_particles_diameter( 2 )
cvt.set_candlestick_width( 3 )
cvt.set_particles_birth_count( particle_birth_count )
cvt.set_candle_gutter( 1 )
cvt.game_run()
i += 1
| [] | [] | [
"SDL_VIDEODRIVER"
] | [] | ["SDL_VIDEODRIVER"] | python | 1 | 0 | |
docs/conf.py | # -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
]
# TODO: Please Read!
# Uncomment the below if you use native CircuitPython modules such as
# digitalio, micropython and busio. List the modules you use. Without it, the
# autodoc module docs will fail to generate with a warning.
autodoc_mock_imports = ["adafruit_minimqtt"]
intersphinx_mapping = {
"python": ("https://docs.python.org/3.4", None),
"CircuitPython": ("https://circuitpython.readthedocs.io/en/latest/", None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Adafruit AWS_IOT Library"
copyright = "2019 Brent Rubell"
author = "Brent Rubell"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", ".env", "CODE_OF_CONDUCT.md"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
napoleon_numpy_docstring = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), "."]
except:
html_theme = "default"
html_theme_path = ["."]
else:
html_theme_path = ["."]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = "_static/favicon.ico"
# Output file base name for HTML help builder.
htmlhelp_basename = "AdafruitAws_iotLibrarydoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"AdafruitAWS_IOTLibrary.tex",
"AdafruitAWS_IOT Library Documentation",
author,
"manual",
),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"AdafruitAWS_IOTlibrary",
"Adafruit AWS_IOT Library Documentation",
[author],
1,
)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"AdafruitAWS_IOTLibrary",
"Adafruit AWS_IOT Library Documentation",
author,
"AdafruitAWS_IOTLibrary",
"One line description of project.",
"Miscellaneous",
),
]
| [] | [] | [
"READTHEDOCS"
] | [] | ["READTHEDOCS"] | python | 1 | 0 | |
Lesson08/Activity09/main.go | package main
import (
"os"
"time"
"github.com/sirupsen/logrus"
)
var logger = logrus.New()
func main() {
logger.SetFormatter(&logrus.JSONFormatter{})
logLevel := os.Getenv("LOG_LEVEL")
if logLevel == "" {
logger.Warn("LOG_LEVEL is not set!")
}
switch logLevel {
case "DEBUG":
logger.Level = logrus.DebugLevel
break
case "WARNING":
logger.Level = logrus.WarnLevel
break
case "ERROR":
logger.Level = logrus.ErrorLevel
break
default:
logger.Level = logrus.InfoLevel
}
logger.Info("Application is starting..")
connEnabled := os.Getenv("ENABLE_CONNECTION")
if connEnabled == "" {
logger.Warn("ENABLE_CONNECTION is not set!")
} else if connEnabled != "Yes" && connEnabled != "No" {
logger.Error("ENABLE_CONNECTION can only be set to [Yes] or [No]!")
} else {
logger.Debugf("ENABLE_CONNECTION set to [%v]", connEnabled)
}
if connEnabled == "Yes" {
logger.Info("Application is running..")
time.Sleep(999999 * time.Second)
}
logger.Info("Application is exiting..")
time.Sleep(10 * time.Second)
}
| [
"\"LOG_LEVEL\"",
"\"ENABLE_CONNECTION\""
] | [] | [
"ENABLE_CONNECTION",
"LOG_LEVEL"
] | [] | ["ENABLE_CONNECTION", "LOG_LEVEL"] | go | 2 | 0 | |
app/main.py | from fastapi import FastAPI
import random
import os
app = FastAPI()
prng = random.Random()
dice_min = int(os.getenv('DICE_MIN') or '1')
dice_max = int(os.getenv('DICE_MAX') or '6')
@app.get('/')
def roll_die():
return dict(result=prng.randint(dice_min, dice_max))
| [] | [] | [
"DICE_MIN",
"DICE_MAX"
] | [] | ["DICE_MIN", "DICE_MAX"] | python | 2 | 0 | |
engine/hatchery/swarm/swarm.go | package swarm
import (
"context"
"fmt"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/fsouza/go-dockerclient"
"github.com/moby/moby/pkg/namesgenerator"
"github.com/ovh/cds/sdk"
"github.com/ovh/cds/sdk/cdsclient"
"github.com/ovh/cds/sdk/hatchery"
"github.com/ovh/cds/sdk/log"
)
// New instanciates a new Hatchery Swarm
func New() *HatcherySwarm {
return new(HatcherySwarm)
}
// ApplyConfiguration apply an object of type HatcheryConfiguration after checking it
func (h *HatcherySwarm) ApplyConfiguration(cfg interface{}) error {
if err := h.CheckConfiguration(cfg); err != nil {
return err
}
var ok bool
h.Config, ok = cfg.(HatcheryConfiguration)
if !ok {
return fmt.Errorf("Invalid configuration")
}
return nil
}
// CheckConfiguration checks the validity of the configuration object
func (h *HatcherySwarm) CheckConfiguration(cfg interface{}) error {
hconfig, ok := cfg.(HatcheryConfiguration)
if !ok {
return fmt.Errorf("Invalid configuration")
}
if hconfig.API.HTTP.URL == "" {
return fmt.Errorf("API HTTP(s) URL is mandatory")
}
if hconfig.API.Token == "" {
return fmt.Errorf("API Token URL is mandatory")
}
if hconfig.MaxContainers <= 0 {
return fmt.Errorf("max-containers must be > 0")
}
if hconfig.WorkerTTL <= 0 {
return fmt.Errorf("worker-ttl must be > 0")
}
if hconfig.DefaultMemory <= 1 {
return fmt.Errorf("worker-memory must be > 1")
}
if hconfig.Name == "" {
return fmt.Errorf("please enter a name in your swarm hatchery configuration")
}
if os.Getenv("DOCKER_HOST") == "" {
return fmt.Errorf("Please export docker client env variables DOCKER_HOST, DOCKER_TLS_VERIFY, DOCKER_CERT_PATH")
}
return nil
}
// Serve start the HatcherySwarm server
func (h *HatcherySwarm) Serve(ctx context.Context) error {
hatchery.Create(h)
return nil
}
//Init connect the hatchery to the docker api
func (h *HatcherySwarm) Init() error {
h.hatch = &sdk.Hatchery{
Name: h.Configuration().Name,
Version: sdk.VERSION,
}
h.client = cdsclient.NewHatchery(
h.Configuration().API.HTTP.URL,
h.Configuration().API.Token,
h.Configuration().Provision.RegisterFrequency,
h.Configuration().API.HTTP.Insecure,
h.hatch.Name,
)
if err := hatchery.Register(h); err != nil {
return fmt.Errorf("Cannot register: %s", err)
}
var errc error
h.dockerClient, errc = docker.NewClientFromEnv()
if errc != nil {
log.Error("Unable to connect to a docker client:%s", errc)
return errc
}
if errPing := h.dockerClient.Ping(); errPing != nil {
log.Error("Unable to ping docker host:%s", errPing)
return errPing
}
go h.killAwolWorkerRoutine()
return nil
}
//This a embeded cache for containers list
var containersCache = struct {
mu sync.RWMutex
list []docker.APIContainers
}{
mu: sync.RWMutex{},
list: []docker.APIContainers{},
}
func (h *HatcherySwarm) getContainers() ([]docker.APIContainers, error) {
containersCache.mu.RLock()
nbServers := len(containersCache.list)
containersCache.mu.RUnlock()
if nbServers == 0 {
s, err := h.dockerClient.ListContainers(docker.ListContainersOptions{
All: true,
})
if err != nil {
return nil, sdk.WrapError(err, "getContainers> error: %s")
}
containersCache.mu.Lock()
containersCache.list = s
containersCache.mu.Unlock()
for _, v := range s {
log.Debug("getContainers> container ID:%s names:%+v image:%s created:%d state:%s, status:%s", v.ID, v.Names, v.Image, v.Created, v.State, v.Status)
}
//Remove data from the cache after 2 seconds
go func() {
time.Sleep(2 * time.Second)
containersCache.mu.Lock()
containersCache.list = []docker.APIContainers{}
containersCache.mu.Unlock()
}()
}
return containersCache.list, nil
}
func (h *HatcherySwarm) getContainer(name string) (*docker.APIContainers, error) {
containers, err := h.getContainers()
if err != nil {
return nil, sdk.WrapError(err, "getContainer> cannot getContainers")
}
for i := range containers {
if strings.Replace(containers[i].Names[0], "/", "", 1) == strings.Replace(name, "/", "", 1) {
return &containers[i], nil
}
}
return nil, nil
}
func (h *HatcherySwarm) killAndRemoveContainer(ID string) {
log.Info("killAndRemove>Remove container %s", ID)
if err := h.dockerClient.KillContainer(docker.KillContainerOptions{
ID: ID,
Signal: docker.SIGKILL,
}); err != nil {
if !strings.Contains(err.Error(), "is not running") && !strings.Contains(err.Error(), "No such container") {
log.Warning("killAndRemove> Unable to kill container %s", err)
}
}
if err := h.dockerClient.RemoveContainer(docker.RemoveContainerOptions{
ID: ID,
}); err != nil {
// container could be already removed by a previous call to docker
if !strings.Contains(err.Error(), "No such container") {
log.Warning("killAndRemove> Unable to remove container %s", err)
}
}
}
func (h *HatcherySwarm) killAndRemove(ID string) {
container, err := h.dockerClient.InspectContainer(ID)
if err != nil {
log.Info("killAndRemove> cannot InspectContainer: %v", err)
h.killAndRemoveContainer(ID)
return
}
for _, cnetwork := range container.NetworkSettings.Networks {
network, err := h.dockerClient.NetworkInfo(cnetwork.NetworkID)
if err != nil {
log.Info("killAndRemove> cannot NetworkInfo: %v", err)
h.killAndRemoveContainer(ID)
return
}
// If we succeed to get the network, kill and remove all the container on the network
if netname, ok := network.Labels["worker_net"]; ok {
log.Info("killAndRemove> Remove network %s", netname)
for id := range network.Containers {
h.killAndRemoveContainer(id)
}
}
}
}
//SpawnWorker start a new docker container
func (h *HatcherySwarm) SpawnWorker(spawnArgs hatchery.SpawnArguments) (string, error) {
//name is the name of the worker and the name of the container
name := fmt.Sprintf("swarmy-%s-%s", strings.ToLower(spawnArgs.Model.Name), strings.Replace(namesgenerator.GetRandomName(0), "_", "-", -1))
if spawnArgs.RegisterOnly {
name = "register-" + name
}
log.Info("SpawnWorker> Spawning worker %s - %s", name, spawnArgs.LogInfo)
//Create a network
network := name + "-net"
h.createNetwork(network)
//Memory for the worker
memory := int64(h.Config.DefaultMemory)
services := []string{}
if spawnArgs.JobID > 0 {
for _, r := range spawnArgs.Requirements {
if r.Type == sdk.MemoryRequirement {
var err error
memory, err = strconv.ParseInt(r.Value, 10, 64)
if err != nil {
log.Warning("SpawnWorker>Unable to parse memory requirement %s :s", memory, err)
return "", err
}
} else if r.Type == sdk.ServiceRequirement {
//name= <alias> => the name of the host put in /etc/hosts of the worker
//value= "postgres:latest env_1=blabla env_2=blabla"" => we can add env variables in requirement name
tuple := strings.Split(r.Value, " ")
img := tuple[0]
env := []string{}
serviceMemory := int64(1024)
if len(tuple) > 1 {
env = append(env, tuple[1:]...)
}
//option for power user : set the service memory with CDS_SERVICE_MEMORY=1024
for _, e := range env {
if strings.HasPrefix(e, "CDS_SERVICE_MEMORY=") {
m := strings.Replace(e, "CDS_SERVICE_MEMORY=", "", -1)
i, err := strconv.Atoi(m)
if err != nil {
log.Warning("SpawnWorker> Unable to parse service option %s : %s", e, err)
continue
}
serviceMemory = int64(i)
}
}
serviceName := r.Name + "-" + name
//labels are used to make container cleanup easier. We "link" the service to its worker this way.
labels := map[string]string{
"service_worker": name,
"service_name": serviceName,
"hatchery": h.Config.Name,
}
//Start the services
if err := h.createAndStartContainer(serviceName, img, network, r.Name, []string{}, env, labels, serviceMemory); err != nil {
log.Warning("SpawnWorker>Unable to start required container: %s", err)
return "", err
}
services = append(services, serviceName)
}
}
}
var registerCmd string
if spawnArgs.RegisterOnly {
registerCmd = " register"
}
//cmd is the command to start the worker (we need curl to download current version of the worker binary)
cmd := []string{"sh", "-c", fmt.Sprintf("curl %s/download/worker/`uname -m` -o worker && echo chmod worker && chmod +x worker && echo starting worker && ./worker%s", h.Client().APIURL(), registerCmd)}
//CDS env needed by the worker binary
env := []string{
"CDS_API" + "=" + h.Configuration().API.HTTP.URL,
"CDS_NAME" + "=" + name,
"CDS_TOKEN" + "=" + h.Configuration().API.Token,
"CDS_MODEL" + "=" + strconv.FormatInt(spawnArgs.Model.ID, 10),
"CDS_HATCHERY" + "=" + strconv.FormatInt(h.hatch.ID, 10),
"CDS_HATCHERY_NAME" + "=" + h.hatch.Name,
"CDS_TTL" + "=" + strconv.Itoa(h.Config.WorkerTTL),
"CDS_SINGLE_USE=1",
}
if h.Configuration().Provision.WorkerLogsOptions.Graylog.Host != "" {
env = append(env, "CDS_GRAYLOG_HOST"+"="+h.Configuration().Provision.WorkerLogsOptions.Graylog.Host)
}
if h.Configuration().Provision.WorkerLogsOptions.Graylog.Port > 0 {
env = append(env, fmt.Sprintf("CDS_GRAYLOG_PORT=%d", h.Configuration().Provision.WorkerLogsOptions.Graylog.Port))
}
if h.Configuration().Provision.WorkerLogsOptions.Graylog.ExtraKey != "" {
env = append(env, "CDS_GRAYLOG_EXTRA_KEY"+"="+h.Configuration().Provision.WorkerLogsOptions.Graylog.ExtraKey)
}
if h.Configuration().Provision.WorkerLogsOptions.Graylog.ExtraValue != "" {
env = append(env, "CDS_GRAYLOG_EXTRA_VALUE"+"="+h.Configuration().Provision.WorkerLogsOptions.Graylog.ExtraValue)
}
if h.Configuration().API.GRPC.URL != "" && spawnArgs.Model.Communication == sdk.GRPC {
env = append(env, fmt.Sprintf("CDS_GRPC_API=%s", h.Configuration().API.GRPC.URL))
env = append(env, fmt.Sprintf("CDS_GRPC_INSECURE=%t", h.Configuration().API.GRPC.Insecure))
}
if spawnArgs.JobID > 0 {
if spawnArgs.IsWorkflowJob {
env = append(env, fmt.Sprintf("CDS_BOOKED_WORKFLOW_JOB_ID=%d", spawnArgs.JobID))
} else {
env = append(env, fmt.Sprintf("CDS_BOOKED_PB_JOB_ID=%d", spawnArgs.JobID))
}
}
//labels are used to make container cleanup easier
labels := map[string]string{
"worker_model": strconv.FormatInt(spawnArgs.Model.ID, 10),
"worker_name": name,
"worker_requirements": strings.Join(services, ","),
"hatchery": h.Config.Name,
}
//start the worker
if err := h.createAndStartContainer(name, spawnArgs.Model.Image, network, "worker", cmd, env, labels, memory); err != nil {
log.Warning("SpawnWorker> Unable to start container named %s with image %s err:%s", name, spawnArgs.Model.Image, err)
}
return name, nil
}
//create the docker bridge
func (h *HatcherySwarm) createNetwork(name string) error {
log.Debug("createAndStartContainer> Create network %s", name)
_, err := h.dockerClient.CreateNetwork(docker.CreateNetworkOptions{
Name: name,
Driver: "bridge",
Internal: false,
CheckDuplicate: true,
EnableIPv6: false,
IPAM: docker.IPAMOptions{
Driver: "default",
},
Labels: map[string]string{
"worker_net": name,
},
})
return err
}
//shortcut to create+start(=run) a container
func (h *HatcherySwarm) createAndStartContainer(name, image, network, networkAlias string, cmd, env []string, labels map[string]string, memory int64) error {
//Memory is set to 1GB by default
if memory <= 4 {
memory = 1024
} else {
//Moaaaaar memory
memory = memory * 110 / 100
}
log.Info("createAndStartContainer> Create container %s from %s on network %s as %s (memory=%dMB)", name, image, network, networkAlias, memory)
opts := docker.CreateContainerOptions{
Name: name,
Config: &docker.Config{
Image: image,
Cmd: cmd,
Env: env,
Labels: labels,
Memory: memory * 1024 * 1024, //from MB to B
MemorySwap: -1,
},
NetworkingConfig: &docker.NetworkingConfig{
EndpointsConfig: map[string]*docker.EndpointConfig{
network: &docker.EndpointConfig{
Aliases: []string{networkAlias, name},
},
},
},
}
c, err := h.dockerClient.CreateContainer(opts)
if err != nil {
log.Warning("startAndCreateContainer> Unable to create container with opts: %+v err:%s", opts, err)
return err
}
if err := h.dockerClient.StartContainer(c.ID, nil); err != nil {
log.Warning("startAndCreateContainer> Unable to start container %s err:%s", c.ID, err)
return err
}
return nil
}
// ModelType returns type of hatchery
func (*HatcherySwarm) ModelType() string {
return sdk.Docker
}
// CanSpawn checks if the model can be spawned by this hatchery
func (h *HatcherySwarm) CanSpawn(model *sdk.Model, jobID int64, requirements []sdk.Requirement) bool {
//List all containers to check if we can spawn a new one
cs, errList := h.getContainers()
if errList != nil {
log.Error("CanSpawn> Unable to list containers: %s", errList)
return false
}
//List all workers
ws, errWList := h.getWorkersStarted(cs)
if errWList != nil {
log.Error("CanSpawn> Unable to list workers: %s", errWList)
return false
}
if len(cs) > h.Config.MaxContainers {
log.Warning("CanSpawn> max containers reached. current:%d max:%d", len(cs), h.Config.MaxContainers)
return false
}
//Get links from requirements
links := map[string]string{}
for _, r := range requirements {
if r.Type == sdk.ServiceRequirement {
links[r.Name] = strings.Split(r.Value, " ")[0]
}
}
// hatcherySwarm.ratioService: Percent reserved for spawning worker with service requirement
// if no link -> we need to check ratioService
if len(links) == 0 {
if h.Config.RatioService >= 100 {
log.Debug("CanSpawn> ratioService 100 by conf - no spawn worker without CDS Service")
return false
}
if len(cs) > 0 {
percentFree := 100 - (100 * len(ws) / h.Config.MaxContainers)
if percentFree <= h.Config.RatioService {
log.Debug("CanSpawn> ratio reached. percentFree:%d ratioService:%d", percentFree, h.Config.RatioService)
return false
}
}
}
log.Debug("CanSpawn> %s need %v", model.Name, links)
// If one image have a "latest" tag, we don't have to listImage
listImagesToDoForLinkedImages := true
for _, i := range links {
if strings.HasSuffix(i, ":latest") {
listImagesToDoForLinkedImages = false
break
}
}
var images []docker.APIImages
// if we don't need to force pull links, we check if model is "latest"
// if model is not "latest" tag too, ListImages to get images locally
if listImagesToDoForLinkedImages || !strings.HasSuffix(model.Image, ":latest") {
var errl error
images, errl = h.dockerClient.ListImages(docker.ListImagesOptions{})
if errl != nil {
log.Warning("CanSpawn> Unable to list images: %s", errl)
}
}
var imageFound bool
// model is not latest, check if image exists locally
if !strings.HasSuffix(model.Image, ":latest") {
checkImage:
for _, img := range images {
for _, t := range img.RepoTags {
if model.Image == t {
imageFound = true
break checkImage
}
}
}
}
if !imageFound {
//Pull the worker image
opts := docker.PullImageOptions{
Repository: model.Image,
OutputStream: nil,
}
auth := docker.AuthConfiguration{}
log.Info("CanSpawn> pulling image %s", model.Image)
if err := h.dockerClient.PullImage(opts, auth); err != nil {
log.Warning("CanSpawn> Unable to pull image %s : %s", model.Image, err)
return false
}
}
//Pull the service image
for _, i := range links {
var imageFound2 bool
// model is not latest for this link, check if image exists locally
if !strings.HasSuffix(i, ":latest") {
checkLink:
for _, img := range images {
for _, t := range img.RepoTags {
if i == t {
imageFound2 = true
break checkLink
}
}
}
}
if !imageFound2 {
opts := docker.PullImageOptions{
Repository: i,
OutputStream: nil,
}
auth := docker.AuthConfiguration{}
log.Info("CanSpawn> pulling image %s", i)
if err := h.dockerClient.PullImage(opts, auth); err != nil {
log.Warning("CanSpawn> Unable to pull image %s : %s", i, err)
return false
}
}
}
//Ready to spawn
log.Debug("CanSpawn> %s can be spawned", model.Name)
return true
}
func (h *HatcherySwarm) getWorkersStarted(containers []docker.APIContainers) ([]docker.APIContainers, error) {
if containers == nil {
var errList error
containers, errList = h.getContainers()
if errList != nil {
log.Error("WorkersStarted> Unable to list containers: %s", errList)
return nil, errList
}
}
res := []docker.APIContainers{}
//We only count worker
for _, c := range containers {
cont, err := h.getContainer(c.Names[0])
if err != nil {
log.Error("WorkersStarted> Unable to get worker %s: %v", c.Names[0], err)
continue
}
if _, ok := cont.Labels["worker_name"]; ok {
if hatch, ok := cont.Labels["hatchery"]; !ok || hatch == h.Config.Name {
res = append(res, *cont)
}
}
}
return res, nil
}
// WorkersStarted returns the number of instances started but
// not necessarily register on CDS yet
func (h *HatcherySwarm) WorkersStarted() int {
workers, _ := h.getWorkersStarted(nil)
return len(workers)
}
// WorkersStartedByModel returns the number of started workers
func (h *HatcherySwarm) WorkersStartedByModel(model *sdk.Model) int {
workers, errList := h.getWorkersStarted(nil)
if errList != nil {
log.Error("WorkersStartedByModel> Unable to list containers: %s", errList)
return 0
}
list := []string{}
for _, c := range workers {
log.Debug("Container : %s %s [%s]", c.ID, c.Image, c.Status)
if c.Image == model.Image {
list = append(list, c.ID)
}
}
log.Debug("WorkersStartedByModel> %s \t %d", model.Name, len(list))
return len(list)
}
// Hatchery returns Hatchery instances
func (h *HatcherySwarm) Hatchery() *sdk.Hatchery {
return h.hatch
}
//Client returns cdsclient instance
func (h *HatcherySwarm) Client() cdsclient.Interface {
return h.client
}
//Configuration returns Hatchery CommonConfiguration
func (h *HatcherySwarm) Configuration() hatchery.CommonConfiguration {
return h.Config.CommonConfiguration
}
// ID returns ID of the Hatchery
func (h *HatcherySwarm) ID() int64 {
if h.hatch == nil {
return 0
}
return h.hatch.ID
}
func (h *HatcherySwarm) killAwolWorkerRoutine() {
for {
time.Sleep(30 * time.Second)
h.killAwolWorker()
}
}
func (h *HatcherySwarm) killAwolWorker() {
apiworkers, err := h.Client().WorkerList()
if err != nil {
log.Warning("killAwolWorker> Cannot get workers: %s", err)
os.Exit(1)
}
containers, errList := h.getWorkersStarted(nil)
if errList != nil {
log.Warning("killAwolWorker> Cannot list containers: %s", errList)
os.Exit(1)
}
//Checking workers
oldContainers := []docker.APIContainers{}
for _, c := range containers {
//If there isn't any worker registered on the API. Kill the container
if len(apiworkers) == 0 {
oldContainers = append(oldContainers, c)
continue
}
//Loop on all worker registered on the API
//Try to find the worker matching this container
var found = false
for _, n := range apiworkers {
if n.Name == c.Names[0] || n.Name == strings.Replace(c.Names[0], "/", "", 1) {
found = true
// If worker is disabled, kill it
if n.Status == sdk.StatusDisabled {
log.Debug("killAwolWorker> Worker %s is disabled. Kill it with fire !", c.Names[0])
oldContainers = append(oldContainers, c)
break
}
}
}
//If the container doesn't match any worker : Kill it.
if !found {
oldContainers = append(oldContainers, c)
}
}
//Delete the workers
for _, c := range oldContainers {
log.Info("killAwolWorker> Delete worker %s", c.Names[0])
h.killAndRemove(c.ID)
}
var errC error
containers, errC = h.getContainers()
if errC != nil {
log.Warning("killAwolWorker> Cannot list containers: %s", errC)
os.Exit(1)
}
//Checking services
for _, c := range containers {
if c.Labels["service_worker"] == "" {
continue
}
//check if the service is linked to a worker which doesn't exist
if w, _ := h.getContainer(c.Labels["service_worker"]); w == nil {
oldContainers = append(oldContainers, c)
continue
}
}
for _, c := range oldContainers {
h.killAndRemove(c.ID)
log.Info("killAwolWorker> Delete worker %s", c.Names[0])
}
//Checking networks
nets, errLN := h.dockerClient.ListNetworks()
if errLN != nil {
log.Warning("killAwolWorker> Cannot get networks: %s", errLN)
return
}
for i := range nets {
n, err := h.dockerClient.NetworkInfo(nets[i].ID)
if err != nil {
log.Warning("killAwolWorker> Unable to get network info: %v", err)
continue
}
if n.Driver != "bridge" || n.Name == "docker0" || n.Name == "bridge" {
continue
}
if _, ok := n.Labels["worker_net"]; !ok {
continue
}
if len(n.Containers) > 0 {
continue
}
log.Info("killAwolWorker> Delete network %s", n.Name)
if err := h.dockerClient.RemoveNetwork(n.ID); err != nil {
log.Warning("killAwolWorker> Unable to delete network %s err:%s", n.Name, err)
}
}
}
// NeedRegistration return true if worker model need regsitration
func (h *HatcherySwarm) NeedRegistration(m *sdk.Model) bool {
if m.NeedRegistration || m.LastRegistration.Unix() < m.UserLastModified.Unix() {
return true
}
return false
}
| [
"\"DOCKER_HOST\""
] | [] | [
"DOCKER_HOST"
] | [] | ["DOCKER_HOST"] | go | 1 | 0 | |
e2e/e2e_test.go | /*
Copyright 2021 The Cockroach Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"flag"
"os"
"path/filepath"
"testing"
api "github.com/cockroachdb/cockroach-operator/api/v1alpha1"
"github.com/cockroachdb/cockroach-operator/pkg/actor"
"github.com/cockroachdb/cockroach-operator/pkg/controller"
"github.com/cockroachdb/cockroach-operator/pkg/testutil"
testenv "github.com/cockroachdb/cockroach-operator/pkg/testutil/env"
"github.com/cockroachdb/cockroach-operator/pkg/testutil/exec"
"github.com/cockroachdb/cockroach-operator/pkg/testutil/paths"
"github.com/cockroachdb/cockroach-operator/pkg/utilfeature"
"github.com/go-logr/zapr"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
"k8s.io/apimachinery/pkg/runtime"
)
var updateOpt = flag.Bool("update", false, "update the golden files of this test")
var env *testenv.ActiveEnv
type Step struct {
name string
test func(t *testing.T)
}
type Steps []Step
func (ss Steps) WithStep(s Step) Steps {
return append(ss, s)
}
func (ss Steps) Run(t *testing.T) {
for _, s := range ss {
if !t.Run(s.name, s.test) {
t.FailNow()
}
}
}
// TestMain wraps the unit tests. Set TEST_DO_NOT_USE_KIND evnvironment variable to any value
// if you do not want this test to start a k8s cluster using kind.
func TestMain(m *testing.M) {
flag.Parse()
// We are running in bazel so set up the directory for the test binaries
if os.Getenv("TEST_WORKSPACE") != "" {
// TODO create a toolchain for this
paths.MaybeSetEnv("PATH", "kubetest2-kind", "hack", "bin", "kubetest2-kind")
}
noKind := os.Getenv("TEST_DO_NOT_USE_KIND")
if noKind == "" {
os.Setenv("USE_EXISTING_CLUSTER", "true")
// TODO random name for server and also random open port
err := exec.StartKubeTest2("test")
if err != nil {
panic(err)
}
}
// TODO verify success of cluster start? Does kind do it?
e := testenv.NewEnv(runtime.NewSchemeBuilder(api.AddToScheme),
filepath.Join("..", "config", "crd", "bases"),
filepath.Join("..", "config", "rbac", "bases"))
env = e.Start()
code := m.Run()
e.Stop()
if noKind == "" {
err := exec.StopKubeTest2("test")
if err != nil {
panic(err)
}
}
os.Exit(code)
}
func TestCreatesInsecureCluster(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
testLog := zapr.NewLogger(zaptest.NewLogger(t))
actor.Log = testLog
sb := testenv.NewDiffingSandbox(t, env)
sb.StartManager(t, controller.InitClusterReconcilerWithLogger(testLog))
b := testutil.NewBuilder("crdb").WithNodeCount(3).WithEmptyDirDataStore()
create := Step{
name: "creates 3-node insecure cluster",
test: func(t *testing.T) {
require.NoError(t, sb.Create(b.Cr()))
requireClusterToBeReadyEventually(t, sb, b)
state, err := sb.Diff()
require.NoError(t, err)
expected := testutil.ReadOrUpdateGoldenFile(t, state, *updateOpt)
testutil.AssertDiff(t, expected, state)
},
}
steps := Steps{create}
steps.Run(t)
}
func TestCreatesSecureClusterWithGeneratedCert(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
testLog := zapr.NewLogger(zaptest.NewLogger(t))
actor.Log = testLog
sb := testenv.NewDiffingSandbox(t, env)
sb.StartManager(t, controller.InitClusterReconcilerWithLogger(testLog))
b := testutil.NewBuilder("crdb").WithNodeCount(1).WithTLS().WithEmptyDirDataStore()
create := Step{
name: "creates 1-node secure cluster",
test: func(t *testing.T) {
require.NoError(t, sb.Create(b.Cr()))
requireClusterToBeReadyEventually(t, sb, b)
state, err := sb.Diff()
require.NoError(t, err)
expected := testutil.ReadOrUpdateGoldenFile(t, state, *updateOpt)
testutil.AssertDiff(t, expected, state)
},
}
steps := Steps{create}
steps.Run(t)
}
func TestCreatesSecureClusterWithGeneratedCertCRv20(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
testLog := zapr.NewLogger(zaptest.NewLogger(t))
actor.Log = testLog
sb := testenv.NewDiffingSandbox(t, env)
sb.StartManager(t, controller.InitClusterReconcilerWithLogger(testLog))
builder := testutil.NewBuilder("crdb").WithNodeCount(3).WithTLS().
WithImage("cockroachdb/cockroach:v20.1.6").
WithPVDataStore("1Gi", "standard" /* default storage class in KIND */)
create := Step{
name: "creates 3-node secure cluster with v20.1.6",
test: func(t *testing.T) {
require.NoError(t, sb.Create(builder.Cr()))
requireClusterToBeReadyEventually(t, sb, builder)
},
}
steps := Steps{create}
steps.Run(t)
}
func TestUpgradesMinorVersion(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
testLog := zapr.NewLogger(zaptest.NewLogger(t))
actor.Log = testLog
sb := testenv.NewDiffingSandbox(t, env)
sb.StartManager(t, controller.InitClusterReconcilerWithLogger(testLog))
builder := testutil.NewBuilder("crdb").WithNodeCount(1).WithTLS().
WithImage("cockroachdb/cockroach:v19.2.5").
WithPVDataStore("1Gi", "standard" /* default storage class in KIND */)
steps := Steps{
{
name: "creates a 1-node secure cluster",
test: func(t *testing.T) {
require.NoError(t, sb.Create(builder.Cr()))
requireClusterToBeReadyEventually(t, sb, builder)
},
},
{
name: "upgrades the cluster to the next patch version",
test: func(t *testing.T) {
current := builder.Cr()
require.NoError(t, sb.Get(current))
current.Spec.Image.Name = "cockroachdb/cockroach:v19.2.6"
require.NoError(t, sb.Update(current))
requireClusterToBeReadyEventually(t, sb, builder)
requireDbContainersToUseImage(t, sb, current)
},
},
}
steps.Run(t)
}
func TestUpgradesMajorVersion19to20(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
testLog := zapr.NewLogger(zaptest.NewLogger(t))
actor.Log = testLog
sb := testenv.NewDiffingSandbox(t, env)
sb.StartManager(t, controller.InitClusterReconcilerWithLogger(testLog))
builder := testutil.NewBuilder("crdb").WithNodeCount(1).WithTLS().
WithImage("cockroachdb/cockroach:v19.2.6").
WithPVDataStore("1Gi", "standard" /* default storage class in KIND */)
steps := Steps{
{
name: "creates a 1-node secure cluster",
test: func(t *testing.T) {
require.NoError(t, sb.Create(builder.Cr()))
requireClusterToBeReadyEventually(t, sb, builder)
},
},
{
name: "upgrades the cluster to the next minor version",
test: func(t *testing.T) {
current := builder.Cr()
require.NoError(t, sb.Get(current))
current.Spec.Image.Name = "cockroachdb/cockroach:v20.1.1"
require.NoError(t, sb.Update(current))
requireClusterToBeReadyEventually(t, sb, builder)
requireDbContainersToUseImage(t, sb, current)
},
},
}
steps.Run(t)
}
func TestUpgradesMajorVersion19_1To19_2(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
testLog := zapr.NewLogger(zaptest.NewLogger(t))
actor.Log = testLog
sb := testenv.NewDiffingSandbox(t, env)
sb.StartManager(t, controller.InitClusterReconcilerWithLogger(testLog))
builder := testutil.NewBuilder("crdb").WithNodeCount(1).WithTLS().
WithImage("cockroachdb/cockroach:v19.1.4").
WithPVDataStore("1Gi", "standard" /* default storage class in KIND */)
steps := Steps{
{
name: "creates a 1-node secure cluster",
test: func(t *testing.T) {
require.NoError(t, sb.Create(builder.Cr()))
requireClusterToBeReadyEventually(t, sb, builder)
},
},
{
name: "upgrades the cluster to the next minor version",
test: func(t *testing.T) {
current := builder.Cr()
require.NoError(t, sb.Get(current))
current.Spec.Image.Name = "cockroachdb/cockroach:v19.2.1"
require.NoError(t, sb.Update(current))
requireClusterToBeReadyEventually(t, sb, builder)
requireDbContainersToUseImage(t, sb, current)
},
},
}
steps.Run(t)
}
// this is giving us an error of no inbound stream connection (SQLSTATE XXUUU)
// intermitently.
// Test the new partioned upgrades
func TestParitionedUpgradesMajorVersion19to20(t *testing.T) {
if doNotTestFlakes(t) {
t.Log("This test is marked as a flake, not running test")
return
} else {
t.Log("Running this test, although this test is flakey")
}
if testing.Short() {
t.Skip("skipping test in short mode.")
}
testLog := zapr.NewLogger(zaptest.NewLogger(t))
actor.Log = testLog
require.NoError(t, utilfeature.DefaultMutableFeatureGate.Set("PartitionedUpdate=true"))
sb := testenv.NewDiffingSandbox(t, env)
sb.StartManager(t, controller.InitClusterReconcilerWithLogger(testLog))
builder := testutil.NewBuilder("crdb").WithNodeCount(3).WithTLS().
WithImage("cockroachdb/cockroach:v19.2.6").
WithPVDataStore("1Gi", "standard" /* default storage class in KIND */)
steps := Steps{
{
name: "creates a 3-node secure cluster for partitioned update",
test: func(t *testing.T) {
require.NoError(t, sb.Create(builder.Cr()))
requireClusterToBeReadyEventually(t, sb, builder)
},
},
{
name: "upgrades the cluster to the next minor version",
test: func(t *testing.T) {
current := builder.Cr()
require.NoError(t, sb.Get(current))
current.Spec.Image.Name = "cockroachdb/cockroach:v20.1.6"
require.NoError(t, sb.Update(current))
requireClusterToBeReadyEventually(t, sb, builder)
requireDbContainersToUseImage(t, sb, current)
// This value matches the WithImage value above, without patch
requireDownGradeOptionSet(t, sb, builder, "19.2")
requireDatabaseToFunction(t, sb, builder)
},
},
}
steps.Run(t)
// Disable the feature flag
require.NoError(t, utilfeature.DefaultMutableFeatureGate.Set("PartitionedUpdate=false"))
}
func TestDatabaseFunctionality(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
testLog := zapr.NewLogger(zaptest.NewLogger(t))
actor.Log = testLog
sb := testenv.NewDiffingSandbox(t, env)
sb.StartManager(t, controller.InitClusterReconcilerWithLogger(testLog))
builder := testutil.NewBuilder("crdb").WithNodeCount(3).WithTLS().
WithImage("cockroachdb/cockroach:v20.1.7").
WithPVDataStore("1Gi", "standard" /* default storage class in KIND */)
steps := Steps{
{
name: "creates a 3-node secure cluster and tests db",
test: func(t *testing.T) {
require.NoError(t, sb.Create(builder.Cr()))
requireClusterToBeReadyEventually(t, sb, builder)
requireDatabaseToFunction(t, sb, builder)
},
},
}
steps.Run(t)
}
func TestDecommissionFunctionality(t *testing.T) {
if doNotTestFlakes(t) {
t.Log("This test is marked as a flake, not running test")
return
} else {
t.Log("Running this test, although this test is flakey")
}
if testing.Short() {
t.Skip("skipping test in short mode.")
}
testLog := zapr.NewLogger(zaptest.NewLogger(t))
actor.Log = testLog
//Enable decommission feature gate
require.NoError(t, utilfeature.DefaultMutableFeatureGate.Set("UseDecommission=true"))
sb := testenv.NewDiffingSandbox(t, env)
sb.StartManager(t, controller.InitClusterReconcilerWithLogger(testLog))
builder := testutil.NewBuilder("crdb").WithNodeCount(4).WithTLS().
WithImage("cockroachdb/cockroach:v20.1.7").
WithPVDataStore("1Gi", "standard" /* default storage class in KIND */)
steps := Steps{
{
name: "creates a 4-node secure cluster and tests db",
test: func(t *testing.T) {
require.NoError(t, sb.Create(builder.Cr()))
requireClusterToBeReadyEventually(t, sb, builder)
},
},
{
name: "decommission a node",
test: func(t *testing.T) {
current := builder.Cr()
require.NoError(t, sb.Get(current))
current.Spec.Nodes = 3
require.NoError(t, sb.Update(current))
requireClusterToBeReadyEventually(t, sb, builder)
requireDecommissionNode(t, sb, builder)
requireDatabaseToFunction(t, sb, builder)
},
},
}
steps.Run(t)
//Disable decommission feature gate
require.NoError(t, utilfeature.DefaultMutableFeatureGate.Set("UseDecommission=false"))
}
func doNotTestFlakes(t *testing.T) bool {
if os.Getenv("TEST_FLAKES") != "" {
t.Log("running flakey tests")
return false
}
t.Log("not running flakey tests")
return true
}
| [
"\"TEST_WORKSPACE\"",
"\"TEST_DO_NOT_USE_KIND\"",
"\"TEST_FLAKES\""
] | [] | [
"TEST_FLAKES",
"TEST_WORKSPACE",
"TEST_DO_NOT_USE_KIND"
] | [] | ["TEST_FLAKES", "TEST_WORKSPACE", "TEST_DO_NOT_USE_KIND"] | go | 3 | 0 | |
vendor/github.com/Azure/azure-sdk-for-go/tools/profileBuilder/program.go | // +build go1.9
// Copyright 2017 Microsoft Corporation and contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// profileBuilder creates a series of packages filled entirely with alias types
// and functions supporting those alias types by directing traffic to the
// functions supporting the original types. This is useful associating a series
// of packages in separate API Versions for easier/safer use.
//
// The Azure-SDK-for-Go teams intends to use this tool to generated profiles
// that we will publish in this repository for general use. However, this tool
// in the case that one has their own list of Services at given API Versions,
// this may prove to be a useful tool for you.
package main
import (
"bytes"
"errors"
"flag"
"fmt"
"go/ast"
"go/parser"
"go/printer"
"go/token"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"time"
"github.com/marstr/collection"
goalias "github.com/marstr/goalias/model"
"github.com/marstr/randname"
)
var (
profileName string
outputLocation string
inputRoot string
inputList io.Reader
packageStrategy collection.Enumerable
outputLog *log.Logger
errLog *log.Logger
)
// WellKnownStrategy is an Enumerable which lists all known strategies for choosing packages for a profile.
type WellKnownStrategy string
// This block declares the definitive list of WellKnownStrategies
const (
WellKnownStrategyList WellKnownStrategy = "list"
WellKnownStrategyLatest WellKnownStrategy = "latest"
WellKnownStrategyPreview WellKnownStrategy = "preview"
)
const armPathModifier = "mgmt"
// If not the empty string, this string should be stamped into files generated by the profileBuilder.
// Note: This variable should be set by passing the argument "-X main.version=`{your value}`" to the Go linker. example: `go build -ldflags "-X main.version=f43d726b6e3f1e3eb7cbdba3982f0253000d5dc5"`
var version string
func main() {
var packages collection.Enumerator
type alias struct {
*goalias.AliasPackage
TargetPath string
}
// Find the names of all of the packages for inclusion in this profile.
packages = packageStrategy.Enumerate(nil).Select(func(x interface{}) interface{} {
if cast, ok := x.(string); ok {
return cast
}
return nil
})
// Parse the packages that were selected for inclusion in this profile.
packages = packages.SelectMany(func(x interface{}) collection.Enumerator {
results := make(chan interface{})
go func() {
defer close(results)
cast, ok := x.(string)
if !ok {
return
}
files := token.NewFileSet()
parsed, err := parser.ParseDir(files, cast, nil, 0)
if err != nil {
errLog.Printf("Couldn't open %q because: %v", cast, err)
return
}
for _, entry := range parsed {
results <- entry
}
}()
return results
})
// Generate the alias package from the originally parsed one.
packages = packages.ParallelSelect(func(x interface{}) interface{} {
var err error
var subject *goalias.AliasPackage
cast, ok := x.(*ast.Package)
if !ok {
return nil
}
var bundle alias
for filename := range cast.Files {
bundle.TargetPath = filepath.Dir(filename)
bundle.TargetPath = trimGoPath(bundle.TargetPath)
subject, err = goalias.NewAliasPackage(cast, bundle.TargetPath)
if err != nil {
errLog.Print(err)
return nil
}
bundle.TargetPath, err = getAliasPath(bundle.TargetPath, profileName)
if err != nil {
errLog.Print(err)
return nil
}
break
}
bundle.AliasPackage = subject
return &bundle
})
packages = packages.Where(func(x interface{}) bool {
return x != nil
})
// Update the "UserAgent" function in the generated profile, if it is present.
packages = packages.Select(func(x interface{}) interface{} {
cast := x.(*alias)
var userAgent *ast.FuncDecl
// Grab all functions in the alias package named "UserAgent"
userAgentCandidates := collection.Where(collection.AsEnumerable(cast.Files["models.go"].Decls), func(x interface{}) bool {
cast, ok := x.(*ast.FuncDecl)
return ok && cast.Name.Name == "UserAgent"
})
// There should really only be one of them, otherwise bailout because we don't understand the world anymore.
candidate, err := collection.Single(userAgentCandidates)
if err != nil {
return x
}
userAgent, ok := candidate.(*ast.FuncDecl)
if !ok {
return x
}
// Grab the expression being returned.
retResults := &userAgent.Body.List[0].(*ast.ReturnStmt).Results[0]
// Append a string literal to the result
updated := &ast.BinaryExpr{
Op: token.ADD,
X: *retResults,
Y: &ast.BasicLit{
Value: fmt.Sprintf("\" profiles/%s\"", profileName),
},
}
*retResults = updated
return x
})
// Add the MSFT Copyright Header, then write the alias package to disk.
products := packages.ParallelSelect(func(x interface{}) interface{} {
cast, ok := x.(*alias)
if !ok {
return false
}
files := token.NewFileSet()
outputPath := filepath.Join(outputLocation, cast.TargetPath, "models.go")
outputPath = strings.Replace(outputPath, `\`, `/`, -1)
err := os.MkdirAll(path.Dir(outputPath), os.ModePerm|os.ModeDir)
if err != nil {
errLog.Print("error creating directory:", err)
return false
}
outputFile, err := os.Create(outputPath)
if err != nil {
errLog.Print("error creating file: ", err)
return false
}
// TODO: This should really be added by the `goalias` package itself. Doing it here is a work around
fmt.Fprintln(outputFile, "// +build go1.9")
fmt.Fprintln(outputFile)
generatorStampBuilder := new(bytes.Buffer)
fmt.Fprintf(generatorStampBuilder, "// Copyright %4d Microsoft Corporation\n", time.Now().Year())
fmt.Fprintln(generatorStampBuilder, `//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.`)
fmt.Fprintln(outputFile, generatorStampBuilder.String())
generatorStampBuilder.Reset()
fmt.Fprintln(generatorStampBuilder, "// This code was auto-generated by:")
fmt.Fprintln(generatorStampBuilder, "// github.com/Azure/azure-sdk-for-go/tools/profileBuilder")
if version != "" {
fmt.Fprintln(generatorStampBuilder, "// commit ID:", version)
}
fmt.Fprintln(generatorStampBuilder)
fmt.Fprint(outputFile, generatorStampBuilder.String())
outputLog.Printf("Writing File: %s", outputPath)
printer.Fprint(outputFile, files, cast.ModelFile())
return true
})
generated := 0
// Write each aliased package that was found
for entry := range products {
if entry.(bool) {
generated++
}
}
outputLog.Print(generated, " packages generated.")
if err := exec.Command("gofmt", "-w", outputLocation).Run(); err == nil {
outputLog.Print("Success formatting profile.")
} else {
errLog.Print("Trouble formatting profile: ", err)
}
}
func init() {
const defaultName = "{randomly generated}"
var selectedStrategy string
var inputListLocation string
var useVerbose bool
flag.StringVar(&profileName, "name", defaultName, "The name that should be given to the generated profile.")
flag.StringVar(&outputLocation, "o", defaultOutputLocation(), "The output location for the package generated as a profile.")
flag.StringVar(&inputRoot, "root", defaultInputRoot(), "The location of the Azure SDK for Go's service packages.")
flag.StringVar(&inputListLocation, "l", "", "If the `list` strategy is chosen, -l is the location of the file to read for said list. If not present, stdin is used.")
flag.StringVar(&selectedStrategy, "s", string(WellKnownStrategyLatest), "The strategy to employ for finding packages to put in a profile.")
flag.BoolVar(&useVerbose, "v", false, "Write status to stderr as the program progresses")
flag.Parse()
// Setup Verbose Status Log and Error Log
var logWriter io.Writer
if useVerbose {
logWriter = os.Stderr
} else {
logWriter = ioutil.Discard
}
outputLog = log.New(logWriter, "[STATUS] ", 0)
outputLog.Print("Status Logging Enabled")
errLog = log.New(logWriter, "[ERROR] ", 0)
if version != "" {
outputLog.Print("profileBuilder Version: ", version)
}
// Sort out the Profile Name to be used.
if profileName == defaultName {
profileName = randname.AdjNoun{}.Generate()
outputLog.Print("Profile Name Set to: ", profileName)
}
inputList = os.Stdin
if inputListLocation == "" {
outputLog.Print("Reading input from standard input")
} else {
var err error
outputLog.Print("Reading input from: ", inputListLocation)
inputList, err = os.Open(inputListLocation)
if err != nil {
errLog.Print(err)
os.Exit(1)
}
}
wellKnownStrategies := map[WellKnownStrategy]collection.Enumerable{
WellKnownStrategyList: ListStrategy{Reader: inputList},
WellKnownStrategyLatest: LatestStrategy{Root: inputRoot, Predicate: IgnorePreview, VerboseOutput: outputLog},
WellKnownStrategyPreview: LatestStrategy{Root: inputRoot, Predicate: AcceptAll},
}
if s, ok := wellKnownStrategies[WellKnownStrategy(selectedStrategy)]; ok {
packageStrategy = s
outputLog.Printf("Using Well Known Strategy: %s", selectedStrategy)
} else {
errLog.Printf("Unknown strategy for identifying packages: %s\n", selectedStrategy)
os.Exit(1)
}
}
// AzureSDKforGoLocation returns the default location for the Azure-SDK-for-Go to reside.
func AzureSDKforGoLocation() string {
return path.Join(
os.Getenv("GOPATH"),
"src",
"github.com",
"Azure",
"azure-sdk-for-go",
)
}
func defaultOutputLocation() string {
return path.Join(AzureSDKforGoLocation(), "profiles")
}
func defaultInputRoot() string {
return path.Join(AzureSDKforGoLocation(), "services")
}
// getAliasPath takes an existing API Version path and a package name, and converts the path
// to a path which uses the new profile layout.
func getAliasPath(subject, profile string) (transformed string, err error) {
subject = strings.TrimSuffix(subject, "/")
subject = trimGoPath(subject)
matches := packageName.FindAllStringSubmatch(subject, -1)
if matches == nil {
err = errors.New("path does not resemble a known package path")
return
}
output := []string{
profile,
matches[0][1],
}
if matches[0][2] == armPathModifier {
output = append(output, armPathModifier)
}
output = append(output, matches[0][4])
transformed = strings.Join(output, "/")
return
}
// trimGoPath removes the prefix defined in the environment variabe GOPATH if it is present in the string provided.
var trimGoPath = func() func(string) string {
splitGo := strings.Split(os.Getenv("GOPATH"), string(os.PathSeparator))
splitGo = append(splitGo, "src")
return func(subject string) string {
splitPath := strings.Split(subject, string(os.PathSeparator))
for i, dir := range splitGo {
if splitPath[i] != dir {
return subject
}
}
packageIdentifier := splitPath[len(splitGo):]
return path.Join(packageIdentifier...)
}
}()
| [
"\"GOPATH\"",
"\"GOPATH\""
] | [] | [
"GOPATH"
] | [] | ["GOPATH"] | go | 1 | 0 | |
sdk/digitaltwins/azure-digitaltwins-core/src/samples/java/com/azure/digitaltwins/core/DigitalTwinsLifecycleAsyncSample.java | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package com.azure.digitaltwins.core;
import com.azure.core.http.policy.HttpLogDetailLevel;
import com.azure.core.http.policy.HttpLogOptions;
import com.azure.digitaltwins.core.implementation.models.ErrorResponseException;
import com.azure.digitaltwins.core.implementation.serialization.BasicRelationship;
import com.azure.identity.ClientSecretCredentialBuilder;
import org.apache.http.HttpStatus;
import java.io.IOException;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Map;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
/**
* This sample creates all the models in \DTDL\Models folder in the ADT service instance and creates the corresponding twins in \DTDL\DigitalTwins folder.
* The Diagram for the Hospital model looks like this:
*
* +------------+
* | Building +-----isEquippedWith-----+
* +------------+ |
* | v
* has +-----+
* | | HVAC|
* v +-----+
* +------------+ |
* | Floor +<--controlsTemperature--+
* +------------+
* |
* contains
* |
* v
* +------------+ +-----------------+
* | Room |-with component->| WifiAccessPoint |
* +------------+ +-----------------+
*
*/
public class DigitalTwinsLifecycleAsyncSample {
private static final String tenantId = System.getenv("TENANT_ID");
private static final String clientId = System.getenv("CLIENT_ID");
private static final String clientSecret = System.getenv("CLIENT_SECRET");
private static final String endpoint = System.getenv("DIGITAL_TWINS_ENDPOINT");
private static final int MaxWaitTimeAsyncOperationsInSeconds = 10;
private static final URL DtdlDirectoryUrl = DigitalTwinsLifecycleAsyncSample.class.getClassLoader().getResource("DTDL");
private static final Path DtDlDirectoryPath;
private static final Path TwinsPath;
private static final Path ModelsPath;
private static final Path RelationshipsPath;
private static final DigitalTwinsAsyncClient client;
static {
try {
assert DtdlDirectoryUrl != null;
DtDlDirectoryPath = Paths.get(DtdlDirectoryUrl.toURI());
} catch (URISyntaxException e) {
throw new RuntimeException("Unable to convert the DTDL directory URL to URI", e);
}
TwinsPath = Paths.get(DtDlDirectoryPath.toString(), "DigitalTwins");
ModelsPath = Paths.get(DtDlDirectoryPath.toString(), "Models");
RelationshipsPath = Paths.get(DtDlDirectoryPath.toString(), "Relationships");
client = new DigitalTwinsClientBuilder()
.tokenCredential(
new ClientSecretCredentialBuilder()
.tenantId(tenantId)
.clientId(clientId)
.clientSecret(clientSecret)
.build()
)
.endpoint(endpoint)
.httpLogOptions(
new HttpLogOptions()
.setLogLevel(HttpLogDetailLevel.NONE))
.buildAsyncClient();
}
public static void main(String[] args) throws IOException, InterruptedException {
// Ensure existing twins with the same name are deleted first
deleteTwins();
// Create twin counterparts for all the models
createTwins();
}
/**
* Delete a twin, and any relationships it might have.
* @throws IOException If an I/O error is thrown when accessing the starting file.
* @throws InterruptedException If the current thread is interrupted while waiting to acquire permits on a semaphore.
*/
public static void deleteTwins() throws IOException, InterruptedException {
System.out.println("DELETE DIGITAL TWINS");
Map<String, String> twins = FileHelper.loadAllFilesInPath(TwinsPath);
final Semaphore deleteTwinsSemaphore = new Semaphore(0);
final Semaphore deleteRelationshipsSemaphore = new Semaphore(0);
// Call APIs to clean up any pre-existing resources that might be referenced by this sample. If digital twin does not exist, ignore.
twins
.forEach((twinId, twinContent) -> {
// Call APIs to delete all relationships.
client.listRelationships(twinId, BasicRelationship.class)
.doOnComplete(deleteRelationshipsSemaphore::release)
.doOnError(throwable -> {
if (throwable instanceof ErrorResponseException && ((ErrorResponseException) throwable).getResponse().getStatusCode() == HttpStatus.SC_NOT_FOUND) {
deleteRelationshipsSemaphore.release();
} else {
System.err.println("List relationships error: " + throwable);
}
})
.subscribe(
relationship -> client.deleteRelationship(twinId, relationship.getId())
.subscribe(
aVoid -> System.out.println("Found and deleted relationship: " + relationship.getId()),
throwable -> System.err.println("Delete relationship error: " + throwable)
));
// Call APIs to delete any incoming relationships.
client.listIncomingRelationships(twinId)
.doOnComplete(deleteRelationshipsSemaphore::release)
.doOnError(throwable -> {
if (throwable instanceof ErrorResponseException && ((ErrorResponseException) throwable).getResponse().getStatusCode() == HttpStatus.SC_NOT_FOUND) {
deleteRelationshipsSemaphore.release();
} else {
System.err.println("List incoming relationships error: " + throwable);
}
})
.subscribe(
incomingRelationship -> client.deleteRelationship(incomingRelationship.getSourceId(), incomingRelationship.getRelationshipId())
.subscribe(
aVoid -> System.out.println("Found and deleted incoming relationship: " + incomingRelationship.getRelationshipId()),
throwable -> System.err.println("Delete incoming relationship error: " + throwable)
));
try {
// Verify that the list relationships and list incoming relationships async operations have completed.
if (deleteRelationshipsSemaphore.tryAcquire(2, MaxWaitTimeAsyncOperationsInSeconds, TimeUnit.SECONDS)) {
// Now the digital twin should be safe to delete
// Call APIs to delete the twins.
client.deleteDigitalTwin(twinId)
.doOnSuccess(aVoid -> {
System.out.println("Deleted digital twin: " + twinId);
deleteTwinsSemaphore.release();
})
.doOnError(throwable -> {
if (throwable instanceof ErrorResponseException && ((ErrorResponseException) throwable).getResponse().getStatusCode() == HttpStatus.SC_NOT_FOUND) {
deleteTwinsSemaphore.release();
} else {
System.err.println("Could not delete digital twin " + twinId + " due to " + throwable);
}
})
.subscribe();
}
} catch (InterruptedException e) {
throw new RuntimeException("Could not cleanup the pre-existing resources: ", e);
}
});
// Verify that a semaphore has been released for each delete async operation, signifying that the async call has completed successfully..
boolean created = deleteTwinsSemaphore.tryAcquire(twins.size(), MaxWaitTimeAsyncOperationsInSeconds, TimeUnit.SECONDS);
System.out.println("Twins deleted: " + created);
}
/**
* Creates all twins specified in the DTDL->DigitalTwins directory.
* @throws IOException If an I/O error is thrown when accessing the starting file.
* @throws InterruptedException If the current thread is interrupted while waiting to acquire permits on a semaphore.
*/
public static void createTwins() throws IOException, InterruptedException {
System.out.println("CREATE DIGITAL TWINS");
Map<String, String> twins = FileHelper.loadAllFilesInPath(TwinsPath);
final Semaphore createTwinsSemaphore = new Semaphore(0);
// Call APIs to create the twins. For each async operation, once the operation is completed successfully, a semaphore is released.
twins
.forEach((twinId, twinContent) -> client.createDigitalTwinWithResponse(twinId, twinContent)
.subscribe(
response -> System.out.println("Created digital twin: " + twinId + "\n\t Body: " + response.getValue()),
throwable -> System.err.println("Could not create digital twin " + twinId + " due to " + throwable),
createTwinsSemaphore::release));
// Verify that a semaphore has been released for each async operation, signifying that the async call has completed successfully.
boolean created = createTwinsSemaphore.tryAcquire(twins.size(), MaxWaitTimeAsyncOperationsInSeconds, TimeUnit.SECONDS);
System.out.println("Twins created: " + created);
}
}
| [
"\"TENANT_ID\"",
"\"CLIENT_ID\"",
"\"CLIENT_SECRET\"",
"\"DIGITAL_TWINS_ENDPOINT\""
] | [] | [
"DIGITAL_TWINS_ENDPOINT",
"CLIENT_SECRET",
"TENANT_ID",
"CLIENT_ID"
] | [] | ["DIGITAL_TWINS_ENDPOINT", "CLIENT_SECRET", "TENANT_ID", "CLIENT_ID"] | java | 4 | 0 | |
src/os/env.go | // Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// General environment variables.
package os
import "syscall"
// Expand replaces ${var} or $var in the string based on the mapping function.
// For example, os.ExpandEnv(s) is equivalent to os.Expand(s, os.Getenv).
func Expand(s string, mapping func(string) string) string {
buf := make([]byte, 0, 2*len(s))
// ${} is all ASCII, so bytes are fine for this operation.
i := 0
for j := 0; j < len(s); j++ {
if s[j] == '$' && j+1 < len(s) {
buf = append(buf, s[i:j]...)
name, w := getShellName(s[j+1:])
buf = append(buf, mapping(name)...)
j += w
i = j + 1
}
}
return string(buf) + s[i:]
}
// ExpandEnv replaces ${var} or $var in the string according to the values
// of the current environment variables. References to undefined
// variables are replaced by the empty string.
func ExpandEnv(s string) string {
return Expand(s, Getenv)
}
// isShellSpecialVar reports whether the character identifies a special
// shell variable such as $*.
func isShellSpecialVar(c uint8) bool {
switch c {
case '*', '#', '$', '@', '!', '?', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
return true
}
return false
}
// isAlphaNum reports whether the byte is an ASCII letter, number, or underscore
func isAlphaNum(c uint8) bool {
return c == '_' || '0' <= c && c <= '9' || 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z'
}
// getShellName returns the name that begins the string and the number of bytes
// consumed to extract it. If the name is enclosed in {}, it's part of a ${}
// expansion and two more bytes are needed than the length of the name.
func getShellName(s string) (string, int) {
switch {
case s[0] == '{':
if len(s) > 2 && isShellSpecialVar(s[1]) && s[2] == '}' {
return s[1:2], 3
}
// Scan to closing brace
for i := 1; i < len(s); i++ {
if s[i] == '}' {
return s[1:i], i + 1
}
}
return "", 1 // Bad syntax; just eat the brace.
case isShellSpecialVar(s[0]):
return s[0:1], 1
}
// Scan alphanumerics.
var i int
for i = 0; i < len(s) && isAlphaNum(s[i]); i++ {
}
return s[:i], i
}
// Getenv retrieves the value of the environment variable named by the key.
// It returns the value, which will be empty if the variable is not present.
func Getenv(key string) string {
v, _ := syscall.Getenv(key)
return v
}
// LookupEnv retrieves the value of the environment variable named
// by the key. If the variable is present in the environment the
// value (which may be empty) is returned and the boolean is true.
// Otherwise the returned value will be empty and the boolean will
// be false.
func LookupEnv(key string) (string, bool) {
return syscall.Getenv(key)
}
// Setenv sets the value of the environment variable named by the key.
// It returns an error, if any.
func Setenv(key, value string) error {
err := syscall.Setenv(key, value)
if err != nil {
return NewSyscallError("setenv", err)
}
return nil
}
// Unsetenv unsets a single environment variable.
func Unsetenv(key string) error {
return syscall.Unsetenv(key)
}
// Clearenv deletes all environment variables.
func Clearenv() {
syscall.Clearenv()
}
// Environ returns a copy of strings representing the environment,
// in the form "key=value".
func Environ() []string {
return syscall.Environ()
}
| [] | [] | [] | [] | [] | go | 0 | 0 | |
python/paddle/fluid/tests/unittests/test_fleet_base_single.py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import os
cuda_visible_devices = os.getenv('CUDA_VISIBLE_DEVICES')
if cuda_visible_devices is None or cuda_visible_devices == "":
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
else:
os.environ['CUDA_VISIBLE_DEVICES'] = cuda_visible_devices.split(',')[0]
import paddle
import paddle.distributed.fleet as fleet
import paddle.distributed.fleet.base.role_maker as role_maker
import paddle.fluid as fluid
import unittest
import paddle.nn as nn
class LinearNet(nn.Layer):
def __init__(self):
super(LinearNet, self).__init__()
self._linear1 = nn.Linear(10, 10)
self._linear2 = nn.Linear(10, 1)
def forward(self, x):
return self._linear2(self._linear1(x))
class TestFleetDygraphSingle(unittest.TestCase):
def setUp(self):
os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36213"
os.environ["PADDLE_CURRENT_ENDPOINTS"] = "127.0.0.1:36213"
os.environ["PADDLE_TRAINERS_NUM"] = "1"
os.environ["PADDLE_TRAINER_ID"] = "0"
def test_dygraph_single(self):
paddle.disable_static()
fleet.init(is_collective=True)
layer = LinearNet()
loss_fn = nn.MSELoss()
adam = paddle.optimizer.Adam(
learning_rate=0.001, parameters=layer.parameters())
adam = fleet.distributed_optimizer(adam)
dp_layer = fleet.distributed_model(layer)
for step in range(2):
inputs = paddle.randn([10, 10], 'float32')
outputs = dp_layer(inputs)
labels = paddle.randn([10, 1], 'float32')
loss = loss_fn(outputs, labels)
loss = dp_layer.scale_loss(loss)
loss.backward()
adam.step()
adam.clear_grad()
class TestFleetBaseSingleRunCollective(unittest.TestCase):
def setUp(self):
pass
def gen_data(self):
return {
"x": np.random.random(size=(128, 32)).astype('float32'),
"y": np.random.randint(
2, size=(128, 1)).astype('int64')
}
def test_single_run_collective_minimize(self):
input_x = paddle.static.data(name="x", shape=[-1, 32], dtype='float32')
input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64')
fc_1 = fluid.layers.fc(input=input_x, size=64, act='tanh')
prediction = fluid.layers.fc(input=fc_1, size=2, act='softmax')
cost = fluid.layers.cross_entropy(input=prediction, label=input_y)
avg_cost = paddle.mean(x=cost)
fleet.init(is_collective=True)
optimizer = fluid.optimizer.SGD(learning_rate=0.001)
optimizer = fleet.distributed_optimizer(optimizer)
optimizer.minimize(avg_cost)
place = fluid.CUDAPlace(0) if paddle.fluid.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(paddle.static.default_startup_program())
for i in range(10):
cost_val = exe.run(feed=self.gen_data(), fetch_list=[avg_cost.name])
print("cost of step[{}] = {}".format(i, cost_val))
class TestFleetBaseSingleRunPS(unittest.TestCase):
def setUp(self):
pass
def gen_data(self):
return {
"x": np.random.random(size=(128, 32)).astype('float32'),
"y": np.random.randint(
2, size=(128, 1)).astype('int64')
}
def test_single_run_ps_minimize(self):
input_x = paddle.static.data(name="x", shape=[-1, 32], dtype='float32')
input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64')
fc_1 = fluid.layers.fc(input=input_x, size=64, act='tanh')
prediction = fluid.layers.fc(input=fc_1, size=2, act='softmax')
cost = fluid.layers.cross_entropy(input=prediction, label=input_y)
avg_cost = paddle.mean(x=cost)
fleet.init()
strategy = paddle.distributed.fleet.DistributedStrategy()
optimizer = fluid.optimizer.SGD(learning_rate=0.01)
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
optimizer.minimize(avg_cost)
if fleet.is_server():
fleet.init_server()
fleet.run_server()
elif fleet.is_worker():
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(paddle.static.default_startup_program())
step = 10
for i in range(step):
cost_val = exe.run(program=fluid.default_main_program(),
feed=self.gen_data(),
fetch_list=[avg_cost.name])
print("worker_index: %d, step%d cost = %f" %
(fleet.worker_index(), i, cost_val[0]))
if __name__ == "__main__":
unittest.main()
| [] | [] | [
"PADDLE_TRAINER_ID",
"PADDLE_TRAINER_ENDPOINTS",
"PADDLE_CURRENT_ENDPOINTS",
"CUDA_VISIBLE_DEVICES",
"PADDLE_TRAINERS_NUM"
] | [] | ["PADDLE_TRAINER_ID", "PADDLE_TRAINER_ENDPOINTS", "PADDLE_CURRENT_ENDPOINTS", "CUDA_VISIBLE_DEVICES", "PADDLE_TRAINERS_NUM"] | python | 5 | 0 | |
cli/src/main/java/com/liferay/blade/cli/util/BladeUtil.java | /**
* Copyright (c) 2000-present Liferay, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.liferay.blade.cli.util;
import com.liferay.blade.cli.BladeCLI;
import com.liferay.blade.cli.Extensions;
import com.liferay.blade.cli.command.SamplesCommand;
import com.liferay.blade.cli.command.validator.WorkspaceProductComparator;
import com.liferay.portal.tools.bundle.support.commands.DownloadCommand;
import com.liferay.project.templates.ProjectTemplates;
import com.liferay.project.templates.extensions.util.ProjectTemplatesUtil;
import groovy.json.JsonSlurper;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.PrintStream;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.file.DirectoryStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
import java.security.CodeSource;
import java.security.ProtectionDomain;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Properties;
import java.util.Scanner;
import java.util.function.Predicate;
import java.util.jar.Attributes;
import java.util.jar.JarFile;
import java.util.jar.Manifest;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import java.util.zip.ZipEntry;
import java.util.zip.ZipFile;
import org.gradle.internal.impldep.com.google.common.base.Strings;
import org.osgi.framework.Version;
/**
* @author Gregory Amerson
* @author David Truong
*/
public class BladeUtil {
public static final String APP_SERVER_PARENT_DIR_PROPERTY = "app.server.parent.dir";
public static final String APP_SERVER_TYPE_PROPERTY = "app.server.type";
public static void addGradleWrapper(File destinationDir) throws Exception {
InputStream inputStream = SamplesCommand.class.getResourceAsStream("/wrapper.zip");
FileUtil.unzip(inputStream, destinationDir);
File gradlewFile = new File(destinationDir, "gradlew");
gradlewFile.setExecutable(true);
}
public static boolean canConnect(String host, int port) {
InetSocketAddress localAddress = new InetSocketAddress(0);
InetSocketAddress remoteAddress = new InetSocketAddress(host, Integer.valueOf(port));
return _canConnect(localAddress, remoteAddress);
}
public static int compareVersions(Version v1, Version v2) {
if (v2 == v1) {
// quicktest
return 0;
}
int result = v1.getMajor() - v2.getMajor();
if (result != 0) {
return result;
}
result = v1.getMinor() - v2.getMinor();
if (result != 0) {
return result;
}
result = v1.getMicro() - v2.getMicro();
if (result != 0) {
return result;
}
String s1 = v1.getQualifier();
return s1.compareTo(v2.getQualifier());
}
public static void downloadGithubProject(String url, Path target) throws IOException {
String zipUrl = url + "/archive/master.zip";
downloadLink(zipUrl, target);
}
public static void downloadLink(String link, Path target) throws IOException {
if (_isURLAvailable(link)) {
LinkDownloader downloader = new LinkDownloader(link, target);
downloader.run();
}
else {
throw new RuntimeException("url '" + link + "' is not accessible.");
}
}
public static File findParentFile(File dir, String[] fileNames, boolean checkParents) {
if (dir == null) {
return null;
}
else if (Objects.equals(".", dir.toString()) || !dir.isAbsolute()) {
try {
dir = dir.getCanonicalFile();
}
catch (Exception exception) {
dir = dir.getAbsoluteFile();
}
}
for (String fileName : fileNames) {
File file = new File(dir, fileName);
if (file.exists()) {
return dir;
}
}
if (checkParents) {
return findParentFile(dir.getParentFile(), fileNames, checkParents);
}
return null;
}
public static List<Properties> getAppServerProperties(File dir) {
File projectRoot = findParentFile(dir, _APP_SERVER_PROPERTIES_FILE_NAMES, true);
List<Properties> properties = new ArrayList<>();
for (String fileName : _APP_SERVER_PROPERTIES_FILE_NAMES) {
File file = new File(projectRoot, fileName);
if (file.exists()) {
properties.add(getProperties(file));
}
}
return properties;
}
public static Path getBladeCachePath() {
File userHome = new File(System.getProperty("user.home"));
Path userHomePath = userHome.toPath();
return userHomePath.resolve(".blade" + File.separator + "cache");
}
public static Path getBladeJarPath() {
try {
ProtectionDomain protectionDomain = BladeCLI.class.getProtectionDomain();
CodeSource codeSource = protectionDomain.getCodeSource();
URL location = codeSource.getLocation();
File file = new File(location.toURI());
return file.toPath();
}
catch (URISyntaxException uriSyntaxException) {
throw new RuntimeException(uriSyntaxException);
}
}
public static String getBundleVersion(Path pathToJar) throws IOException {
return getManifestProperty(pathToJar, "Bundle-Version");
}
public static File getGradleWrapper(File dir) {
File gradleRoot = findParentFile(dir, new String[] {_GRADLEW_UNIX_FILE_NAME, _GRADLEW_WINDOWS_FILE_NAME}, true);
if (gradleRoot != null) {
if (isWindows()) {
return new File(gradleRoot, _GRADLEW_WINDOWS_FILE_NAME);
}
return new File(gradleRoot, _GRADLEW_UNIX_FILE_NAME);
}
return null;
}
public static Map<String, String> getInitTemplates(BladeCLI bladeCLI) throws IOException {
Map<String, String> initTemplates = new HashMap<>();
initTemplates.put("workspace", "Liferay Workspace built with Gradle or Maven.");
Path extensions = bladeCLI.getExtensionsPath();
try {
DirectoryStream<Path> directoryStream = Files.newDirectoryStream(
extensions, "*.project.templates.workspace*");
Iterator<Path> iterator = directoryStream.iterator();
while (iterator.hasNext()) {
Path path = iterator.next();
String fileName = String.valueOf(path.getFileName());
String template = ProjectTemplatesUtil.getTemplateName(fileName);
String bundleDescription = FileUtil.getManifestProperty(path.toFile(), "Bundle-Description");
initTemplates.put(template, bundleDescription);
}
}
catch (IOException ioException) {
}
return initTemplates;
}
public static String getManifestProperty(Path pathToJar, String propertyName) throws IOException {
File file = pathToJar.toFile();
try (JarFile jar = new JarFile(file)) {
Manifest manifest = jar.getManifest();
Attributes attributes = manifest.getMainAttributes();
return attributes.getValue(propertyName);
}
}
public static Map<String, Object> getProductInfos() {
return getProductInfos(false, null);
}
@SuppressWarnings("unchecked")
public static synchronized Map<String, Object> getProductInfos(boolean trace, PrintStream printStream) {
if (!_productInfoMap.isEmpty()) {
return _productInfoMap;
}
JsonSlurper jsonSlurper = new JsonSlurper();
try {
DownloadCommand downloadCommand = new DownloadCommand();
downloadCommand.setCacheDir(_workspaceCacheDir);
downloadCommand.setConnectionTimeout(5000);
downloadCommand.setPassword(null);
downloadCommand.setToken(false);
downloadCommand.setUrl(new URL(_PRODUCT_INFO_URL));
downloadCommand.setUserName(null);
downloadCommand.setQuiet(true);
downloadCommand.execute();
try (BufferedReader reader = Files.newBufferedReader(downloadCommand.getDownloadPath())) {
_productInfoMap = (Map<String, Object>)jsonSlurper.parse(reader);
}
}
catch (Exception exception1) {
if (trace && (printStream != null)) {
exception1.printStackTrace(printStream);
}
try (InputStream resourceAsStream = BladeUtil.class.getResourceAsStream("/.product_info.json")) {
_productInfoMap = (Map<String, Object>)jsonSlurper.parse(resourceAsStream);
}
catch (Exception exception2) {
if (trace && (printStream != null)) {
exception2.printStackTrace(printStream);
}
}
}
return _productInfoMap;
}
public static Properties getProperties(File file) {
try (InputStream inputStream = new FileInputStream(file)) {
Properties properties = new Properties();
properties.load(inputStream);
return properties;
}
catch (Exception exception) {
return null;
}
}
public static Collection<String> getTemplateNames(BladeCLI blade) throws Exception {
Map<String, String> templates = getTemplates(blade);
return templates.keySet();
}
public static Map<String, String> getTemplates(BladeCLI bladeCLI) throws Exception {
Path extensionsPath = bladeCLI.getExtensionsPath();
Collection<File> templatesFiles = new HashSet<>();
templatesFiles.add(extensionsPath.toFile());
Extensions extensions = bladeCLI.getExtensions();
Path extensionTemplates = extensions.getTemplatesPath();
templatesFiles.add(extensionTemplates.toFile());
return ProjectTemplates.getTemplates(templatesFiles);
}
@SuppressWarnings("unchecked")
public static List<String> getWorkspaceProductKeys(boolean promoted) {
Map<String, Object> productInfos = getProductInfos();
return productInfos.entrySet(
).stream(
).filter(
entry -> Objects.nonNull(productInfos.get(entry.getKey()))
).map(
entry -> new Pair<>(entry.getKey(), new ProductInfo((Map<String, String>)productInfos.get(entry.getKey())))
).filter(
pair -> {
ProductInfo productInfo = pair.second();
return Objects.nonNull(productInfo.getTargetPlatformVersion()) &&
(!promoted || (promoted && productInfo.isPromoted()));
}
).sorted(
new WorkspaceProductComparator()
).map(
Pair::first
).collect(
Collectors.toList()
);
}
public static boolean hasGradleWrapper(File dir) {
File gradlew = new File(dir, _GRADLEW_UNIX_FILE_NAME);
File gradlebat = new File(dir, _GRADLEW_WINDOWS_FILE_NAME);
if (gradlew.exists() && gradlebat.exists()) {
return true;
}
File parent = dir.getParentFile();
if ((parent != null) && parent.exists()) {
return hasGradleWrapper(parent);
}
return false;
}
public static boolean isDirEmpty(final Path directory) throws IOException {
try (DirectoryStream<Path> directoryStream = Files.newDirectoryStream(directory)) {
Iterator<Path> iterator = directoryStream.iterator();
return !iterator.hasNext();
}
}
public static boolean isEmpty(List<?> list) {
if ((list == null) || list.isEmpty()) {
return true;
}
return false;
}
public static boolean isEmpty(Object[] array) {
if ((array == null) || (array.length == 0)) {
return true;
}
return false;
}
public static boolean isEmpty(String string) {
if ((string == null) || string.isEmpty()) {
return true;
}
return false;
}
public static boolean isNotEmpty(List<?> list) {
return !isEmpty(list);
}
public static boolean isNotEmpty(Object[] array) {
return !isEmpty(array);
}
public static boolean isNotEmpty(String string) {
return !isEmpty(string);
}
public static boolean isSafelyRelative(File file, File destDir) {
Path destPath = destDir.toPath();
destPath = destPath.toAbsolutePath();
destPath = destPath.normalize();
Path path = file.toPath();
path = path.toAbsolutePath();
path = path.normalize();
return path.startsWith(destPath);
}
public static boolean isWindows() {
String osName = System.getProperty("os.name");
osName = osName.toLowerCase();
return osName.contains("windows");
}
public static boolean isZipValid(File file) {
try (ZipFile zipFile = new ZipFile(file)) {
return true;
}
catch (IOException ioException) {
return false;
}
}
public static String read(File file) throws IOException {
return new String(Files.readAllBytes(file.toPath()));
}
public static void readProcessStream(final InputStream inputStream, final PrintStream printStream) {
Thread thread = new Thread(
new Runnable() {
@Override
public void run() {
try (Scanner scanner = new Scanner(inputStream)) {
while (scanner.hasNextLine()) {
String line = scanner.nextLine();
if (line != null) {
AnsiLinePrinter.println(printStream, line);
}
}
}
}
});
thread.setDaemon(true);
thread.start();
}
public static boolean searchZip(Path path, Predicate<String> test) {
if (Files.exists(path) && !Files.isDirectory(path)) {
try (ZipFile zipFile = new ZipFile(path.toFile())) {
Stream<? extends ZipEntry> stream = zipFile.stream();
Collection<ZipEntry> entryCollection = stream.collect(Collectors.toSet());
for (ZipEntry zipEntry : entryCollection) {
if (!zipEntry.isDirectory()) {
String entryName = zipEntry.getName();
if (test.test(entryName)) {
return true;
}
}
}
}
catch (Exception exception) {
exception.printStackTrace();
}
}
return false;
}
public static void setShell(ProcessBuilder processBuilder, String cmd) {
Map<String, String> env = processBuilder.environment();
List<String> commands = new ArrayList<>();
if (isWindows()) {
commands.add("cmd.exe");
commands.add("/c");
}
else {
env.put("PATH", env.get("PATH") + ":/bin:/usr/local/bin");
commands.add("sh");
commands.add("-c");
}
commands.add(cmd);
processBuilder.command(commands);
}
public static String simplifyTargetPlatformVersion(String targetPlatformVersion) {
if (targetPlatformVersion == null) {
return null;
}
String[] segments = targetPlatformVersion.split("\\.");
StringBuilder sb = new StringBuilder();
sb.append(segments[0]);
sb.append('.');
sb.append(segments[1]);
sb.append('.');
String micro = segments[2];
int dashPosition = micro.indexOf("-");
if (dashPosition > 0) {
sb.append(micro.substring(0, dashPosition));
if (segments.length == 3) {
sb.append(".");
sb.append(micro.substring(dashPosition + 1));
}
}
else {
sb.append(micro);
}
if (segments.length > 3) {
sb.append(".");
String qualifier = segments[3];
Matcher matcher = _microPattern.matcher(qualifier);
if (matcher.matches() && (matcher.groupCount() >= 5)) {
qualifier = matcher.group(5);
}
if (!Strings.isNullOrEmpty(qualifier)) {
sb.append(qualifier);
}
}
return sb.toString();
}
public static Process startProcess(String command, File workingDir) throws Exception {
return startProcess(command, workingDir, null);
}
public static Process startProcess(String command, File dir, Map<String, String> environment) throws Exception {
ProcessBuilder processBuilder = _buildProcessBuilder(command, dir, environment, true);
Process process = processBuilder.start();
OutputStream outputStream = process.getOutputStream();
outputStream.close();
return process;
}
public static Process startProcess(
String command, File dir, Map<String, String> environment, PrintStream out, PrintStream err)
throws Exception {
ProcessBuilder processBuilder = _buildProcessBuilder(command, dir, environment, false);
Process process = processBuilder.start();
readProcessStream(process.getInputStream(), out);
readProcessStream(process.getErrorStream(), err);
OutputStream outputStream = process.getOutputStream();
outputStream.close();
return process;
}
public static Process startProcess(String command, File dir, PrintStream out, PrintStream err) throws Exception {
return startProcess(command, dir, null, out, err);
}
public static void tail(Path path, PrintStream printStream) throws IOException {
try (BufferedReader input = new BufferedReader(new FileReader(path.toFile()))) {
String currentLine = null;
while (true) {
if ((currentLine = input.readLine()) != null) {
printStream.println(currentLine);
continue;
}
try {
Thread.sleep(1000);
}
catch (InterruptedException interruptedException) {
Thread currentThread = Thread.currentThread();
currentThread.interrupt();
break;
}
}
}
}
public static void writePropertyValue(File propertyFile, String key, String value) throws Exception {
String property = System.lineSeparator() + key + "=" + value;
Files.write(propertyFile.toPath(), property.getBytes(), StandardOpenOption.APPEND);
}
private static ProcessBuilder _buildProcessBuilder(
String command, File dir, Map<String, String> environment, boolean inheritIO) {
ProcessBuilder processBuilder = new ProcessBuilder();
Map<String, String> env = processBuilder.environment();
if (environment != null) {
env.putAll(environment);
}
if ((dir != null) && dir.exists()) {
processBuilder.directory(dir);
}
setShell(processBuilder, command);
if (inheritIO) {
processBuilder.inheritIO();
}
return processBuilder;
}
private static boolean _canConnect(InetSocketAddress localAddress, InetSocketAddress remoteAddress) {
boolean connected = false;
try (Socket socket = new Socket()) {
socket.bind(localAddress);
socket.connect(remoteAddress, 3000);
socket.getInputStream();
connected = true;
}
catch (IOException ioException) {
}
if (connected) {
return true;
}
return false;
}
private static boolean _isURLAvailable(String urlString) throws IOException {
URL url = new URL(urlString);
HttpURLConnection.setFollowRedirects(false);
HttpURLConnection httpURLConnection = (HttpURLConnection)url.openConnection();
httpURLConnection.setRequestMethod("HEAD");
int responseCode = httpURLConnection.getResponseCode();
if ((responseCode == HttpURLConnection.HTTP_OK) || (responseCode == HttpURLConnection.HTTP_MOVED_PERM) ||
(responseCode == HttpURLConnection.HTTP_MOVED_TEMP)) {
return true;
}
return false;
}
private static final String[] _APP_SERVER_PROPERTIES_FILE_NAMES = {
"app.server." + System.getProperty("user.name") + ".properties",
"app.server." + System.getenv("COMPUTERNAME") + ".properties",
"app.server." + System.getenv("HOST") + ".properties",
"app.server." + System.getenv("HOSTNAME") + ".properties", "app.server.properties",
"build." + System.getProperty("user.name") + ".properties",
"build." + System.getenv("COMPUTERNAME") + ".properties", "build." + System.getenv("HOST") + ".properties",
"build." + System.getenv("HOSTNAME") + ".properties", "build.properties"
};
private static final String _DEFAULT_WORKSPACE_CACHE_DIR_NAME = ".liferay/workspace";
private static final String _GRADLEW_UNIX_FILE_NAME = "gradlew";
private static final String _GRADLEW_WINDOWS_FILE_NAME = "gradlew.bat";
private static final String _PRODUCT_INFO_URL = "https://releases.liferay.com/tools/workspace/.product_info.json";
private static final Pattern _microPattern = Pattern.compile("(((e|f|s)p)|(ga))([0-9]+)(-[0-9]+)?");
private static Map<String, Object> _productInfoMap = Collections.emptyMap();
private static File _workspaceCacheDir = new File(
System.getProperty("user.home"), _DEFAULT_WORKSPACE_CACHE_DIR_NAME);
} | [
"\"COMPUTERNAME\"",
"\"HOST\"",
"\"HOSTNAME\"",
"\"COMPUTERNAME\"",
"\"HOST\"",
"\"HOSTNAME\""
] | [] | [
"COMPUTERNAME",
"HOSTNAME",
"HOST"
] | [] | ["COMPUTERNAME", "HOSTNAME", "HOST"] | java | 3 | 0 | |
config/wsgi.py | """
WSGI config for teherangram project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# teherangram directory.
app_path = os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir))
sys.path.append(os.path.join(app_path, 'teherangram'))
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| [] | [] | [
"DJANGO_SETTINGS_MODULE"
] | [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
pattern-proxy/src/main/java/org/kwrx/proxy/FabbricaImmagini.java | /*
* MIT License
*
* Copyright (c) 2020 Antonino Natale
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
package org.kwrx.proxy;
public abstract class FabbricaImmagini {
private static FabbricaImmagini instance;
public static FabbricaImmagini getInstance() {
if(instance == null) {
instance = (System.getenv("NO_USE_PROXY") != null)
? new FabbricaImmaginiNormali()
: new FabbricaImmaginiProxy();
}
return instance;
}
public abstract ImmagineBase createImage(String url);
}
| [
"\"NO_USE_PROXY\""
] | [] | [
"NO_USE_PROXY"
] | [] | ["NO_USE_PROXY"] | java | 1 | 0 | |
kie-ci/src/main/java/org/kie/scanner/embedder/MavenEmbedder.java | /*
* Copyright 2001-2005 The Apache Software Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.scanner.embedder;
import org.apache.maven.DefaultMaven;
import org.apache.maven.Maven;
import org.apache.maven.artifact.InvalidRepositoryException;
import org.apache.maven.artifact.repository.ArtifactRepository;
import org.apache.maven.execution.DefaultMavenExecutionRequest;
import org.apache.maven.execution.DefaultMavenExecutionResult;
import org.apache.maven.execution.MavenExecutionRequest;
import org.apache.maven.execution.MavenExecutionRequestPopulationException;
import org.apache.maven.execution.MavenExecutionRequestPopulator;
import org.apache.maven.execution.MavenSession;
import org.apache.maven.model.Profile;
import org.apache.maven.model.building.ModelSource;
import org.apache.maven.plugin.LegacySupport;
import org.apache.maven.project.MavenProject;
import org.apache.maven.project.ProjectBuilder;
import org.apache.maven.project.ProjectBuildingException;
import org.apache.maven.project.ProjectBuildingRequest;
import org.apache.maven.project.ProjectBuildingResult;
import org.apache.maven.repository.RepositorySystem;
import org.apache.maven.settings.Settings;
import org.apache.maven.settings.building.DefaultSettingsBuildingRequest;
import org.apache.maven.settings.building.SettingsBuilder;
import org.apache.maven.settings.building.SettingsBuildingException;
import org.apache.maven.settings.building.SettingsBuildingRequest;
import org.codehaus.plexus.component.repository.exception.ComponentLookupException;
import org.codehaus.plexus.logging.Logger;
import org.codehaus.plexus.util.Os;
import org.eclipse.aether.RepositorySystemSession;
import org.kie.scanner.MavenRepositoryConfiguration;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.Map.Entry;
import java.util.Properties;
public class MavenEmbedder {
public static final File DEFAULT_GLOBAL_SETTINGS_FILE =
new File( System.getProperty( "maven.home", System.getProperty( "user.dir", "" ) ), "conf/settings.xml" );
private final MavenRequest mavenRequest;
private final ComponentProvider componentProvider;
private MavenExecutionRequest mavenExecutionRequest;
public MavenEmbedder( MavenRequest mavenRequest ) throws MavenEmbedderException {
this( Thread.currentThread().getContextClassLoader(), null, mavenRequest );
}
public MavenEmbedder( ClassLoader mavenClassLoader,
ClassLoader parent,
MavenRequest mavenRequest ) throws MavenEmbedderException {
this( mavenRequest, MavenEmbedderUtils.buildComponentProvider( mavenClassLoader, parent, mavenRequest ) );
}
private MavenEmbedder( MavenRequest mavenRequest,
ComponentProvider componentProvider ) throws MavenEmbedderException {
this.mavenRequest = mavenRequest;
this.componentProvider = componentProvider;
try {
this.mavenExecutionRequest = this.buildMavenExecutionRequest( mavenRequest );
RepositorySystemSession rss = ( (DefaultMaven) componentProvider.lookup( Maven.class ) ).newRepositorySession( mavenExecutionRequest );
MavenSession mavenSession = new MavenSession( componentProvider.getPlexusContainer(), rss, mavenExecutionRequest, new DefaultMavenExecutionResult() );
componentProvider.lookup( LegacySupport.class ).setSession( mavenSession );
} catch ( MavenEmbedderException e ) {
throw new MavenEmbedderException( e.getMessage(), e );
} catch ( ComponentLookupException e ) {
throw new MavenEmbedderException( e.getMessage(), e );
}
}
protected MavenExecutionRequest buildMavenExecutionRequest( MavenRequest mavenRequest )
throws MavenEmbedderException, ComponentLookupException {
MavenExecutionRequest mavenExecutionRequest = new DefaultMavenExecutionRequest();
if ( mavenRequest.getGlobalSettingsFile() != null ) {
mavenExecutionRequest.setGlobalSettingsFile( new File( mavenRequest.getGlobalSettingsFile() ) );
}
if ( mavenRequest.getUserSettingsFile() != null ) {
mavenExecutionRequest.setUserSettingsFile( new File( mavenRequest.getUserSettingsFile() ) );
}
try {
componentProvider.lookup( MavenExecutionRequestPopulator.class ).populateFromSettings( mavenExecutionRequest, getSettings() );
componentProvider.lookup( MavenExecutionRequestPopulator.class ).populateDefaults( mavenExecutionRequest );
} catch ( MavenExecutionRequestPopulationException e ) {
throw new MavenEmbedderException( e.getMessage(), e );
}
ArtifactRepository localRepository = getLocalRepository();
mavenExecutionRequest.setLocalRepository( localRepository );
mavenExecutionRequest.setLocalRepositoryPath( localRepository.getBasedir() );
mavenExecutionRequest.setOffline( mavenRequest.isOffline() );
mavenExecutionRequest.setUpdateSnapshots( mavenRequest.isUpdateSnapshots() );
// TODO check null and create a console one ?
mavenExecutionRequest.setTransferListener( mavenRequest.getTransferListener() );
mavenExecutionRequest.setCacheNotFound( mavenRequest.isCacheNotFound() );
mavenExecutionRequest.setCacheTransferError( true );
mavenExecutionRequest.setUserProperties( mavenRequest.getUserProperties() );
mavenExecutionRequest.getSystemProperties().putAll( System.getProperties() );
if ( mavenRequest.getSystemProperties() != null ) {
mavenExecutionRequest.getSystemProperties().putAll( mavenRequest.getSystemProperties() );
}
mavenExecutionRequest.getSystemProperties().putAll( getEnvVars() );
if ( mavenRequest.getProfiles() != null && !mavenRequest.getProfiles().isEmpty() ) {
for ( String id : mavenRequest.getProfiles() ) {
Profile p = new Profile();
p.setId( id );
p.setSource( "cli" );
mavenExecutionRequest.addProfile( p );
mavenExecutionRequest.addActiveProfile( id );
}
}
//DROOLS-899: Copy repositories defined in settings to execution request
for ( ArtifactRepository artifactRepository : getMavenRepositoryConfiguration().getArtifactRepositoriesForRequest() ) {
mavenExecutionRequest.addRemoteRepository( artifactRepository );
}
mavenExecutionRequest.setLoggingLevel( mavenRequest.getLoggingLevel() );
componentProvider.lookup( Logger.class ).setThreshold( mavenRequest.getLoggingLevel() );
mavenExecutionRequest.setExecutionListener( mavenRequest.getExecutionListener() )
.setInteractiveMode( mavenRequest.isInteractive() )
.setGlobalChecksumPolicy( mavenRequest.getGlobalChecksumPolicy() )
.setGoals( mavenRequest.getGoals() );
if ( mavenRequest.getPom() != null ) {
mavenExecutionRequest.setPom( new File( mavenRequest.getPom() ) );
}
if ( mavenRequest.getWorkspaceReader() != null ) {
mavenExecutionRequest.setWorkspaceReader( mavenRequest.getWorkspaceReader() );
}
return mavenExecutionRequest;
}
protected MavenRepositoryConfiguration getMavenRepositoryConfiguration() {
return MavenSettings.getMavenRepositoryConfiguration();
}
private Properties getEnvVars() {
Properties envVars = new Properties();
boolean caseSensitive = !Os.isFamily( Os.FAMILY_WINDOWS );
for ( Entry<String, String> entry : System.getenv().entrySet() ) {
String key = "env." + ( caseSensitive ? entry.getKey() : entry.getKey().toUpperCase( Locale.ENGLISH ) );
envVars.setProperty( key, entry.getValue() );
}
return envVars;
}
public Settings getSettings() throws MavenEmbedderException, ComponentLookupException {
SettingsBuildingRequest settingsBuildingRequest = new DefaultSettingsBuildingRequest();
if ( this.mavenRequest.getGlobalSettingsFile() != null ) {
settingsBuildingRequest.setGlobalSettingsFile( new File( this.mavenRequest.getGlobalSettingsFile() ) );
} else {
settingsBuildingRequest.setGlobalSettingsFile( DEFAULT_GLOBAL_SETTINGS_FILE );
}
if ( this.mavenRequest.getUserSettingsFile() != null ) {
settingsBuildingRequest.setUserSettingsFile( new File( this.mavenRequest.getUserSettingsFile() ) );
} else {
File userSettingsFile = MavenSettings.getUserSettingsFile();
if ( userSettingsFile != null ) {
settingsBuildingRequest.setUserSettingsFile( userSettingsFile );
}
}
settingsBuildingRequest.setUserProperties( this.mavenRequest.getUserProperties() );
settingsBuildingRequest.getSystemProperties().putAll( System.getProperties() );
settingsBuildingRequest.getSystemProperties().putAll( this.mavenRequest.getSystemProperties() );
settingsBuildingRequest.getSystemProperties().putAll( getEnvVars() );
try {
return componentProvider.lookup( SettingsBuilder.class ).build( settingsBuildingRequest ).getEffectiveSettings();
} catch ( SettingsBuildingException e ) {
throw new MavenEmbedderException( e.getMessage(), e );
}
}
public ArtifactRepository getLocalRepository() throws ComponentLookupException {
try {
String localRepositoryPath = getLocalRepositoryPath();
if ( localRepositoryPath != null ) {
return componentProvider.lookup( RepositorySystem.class ).createLocalRepository( new File( localRepositoryPath ) );
}
return componentProvider.lookup( RepositorySystem.class ).createLocalRepository( RepositorySystem.defaultUserLocalRepository );
} catch ( InvalidRepositoryException e ) {
// never happened
throw new IllegalStateException( e );
}
}
public String getLocalRepositoryPath() {
String path = null;
try {
Settings settings = getSettings();
path = settings.getLocalRepository();
} catch ( MavenEmbedderException e ) {
// ignore
} catch ( ComponentLookupException e ) {
// ignore
}
if ( this.mavenRequest.getLocalRepositoryPath() != null ) {
path = this.mavenRequest.getLocalRepositoryPath();
}
if ( path == null ) {
path = RepositorySystem.defaultUserLocalRepository.getAbsolutePath();
}
return path;
}
// ----------------------------------------------------------------------
// Project
// ----------------------------------------------------------------------
public MavenProject readProject( final InputStream mavenProjectStream ) throws ProjectBuildingException, MavenEmbedderException {
ModelSource modelSource = new ModelSource() {
@Override
public InputStream getInputStream() throws IOException {
return mavenProjectStream;
}
@Override
public String getLocation() {
return "";
}
};
ClassLoader originalCl = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader( componentProvider.getSystemClassLoader() );
ProjectBuilder projectBuilder = componentProvider.lookup( ProjectBuilder.class );
// BZ-1007894: Check if added dependencies are resolvable.
ProjectBuildingResult result = projectBuilder.build( modelSource, getProjectBuildingRequest() );
if ( result != null && result.getDependencyResolutionResult() != null && !result.getDependencyResolutionResult().getCollectionErrors().isEmpty() ) {
// A dependency resolution error has been produced. It can contains some error. Throw the first one to the client, so the user will fix every one sequentially.
Exception depedencyResolutionException = result.getDependencyResolutionResult().getCollectionErrors().get( 0 );
if ( depedencyResolutionException != null ) {
throw new MavenEmbedderException( depedencyResolutionException.getMessage(), depedencyResolutionException );
}
}
return result.getProject();
} catch ( ComponentLookupException e ) {
throw new MavenEmbedderException( e.getMessage(), e );
} finally {
Thread.currentThread().setContextClassLoader( originalCl );
try {
mavenProjectStream.close();
} catch ( IOException e ) {
}
}
}
public MavenProject readProject( File mavenProject ) throws ProjectBuildingException, MavenEmbedderException {
List<MavenProject> projects = readProjects( mavenProject, false );
return projects == null || projects.isEmpty() ? null : projects.get( 0 );
}
public List<MavenProject> readProjects( File mavenProject,
boolean recursive ) throws ProjectBuildingException, MavenEmbedderException {
ClassLoader originalCl = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader( componentProvider.getSystemClassLoader() );
List<ProjectBuildingResult> results = buildProjects( mavenProject, recursive );
List<MavenProject> projects = new ArrayList<MavenProject>( results.size() );
for ( ProjectBuildingResult result : results ) {
projects.add( result.getProject() );
}
return projects;
} finally {
Thread.currentThread().setContextClassLoader( originalCl );
}
}
public List<ProjectBuildingResult> buildProjects( File mavenProject,
boolean recursive ) throws ProjectBuildingException, MavenEmbedderException {
ClassLoader originalCl = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader( componentProvider.getSystemClassLoader() );
ProjectBuilder projectBuilder = componentProvider.lookup( ProjectBuilder.class );
return projectBuilder.build( Collections.singletonList( mavenProject ), recursive, getProjectBuildingRequest() );
} catch ( ComponentLookupException e ) {
throw new MavenEmbedderException( e.getMessage(), e );
} finally {
Thread.currentThread().setContextClassLoader( originalCl );
}
}
private ProjectBuildingRequest getProjectBuildingRequest() throws ComponentLookupException {
ProjectBuildingRequest projectBuildingRequest = this.mavenExecutionRequest.getProjectBuildingRequest();
projectBuildingRequest.setValidationLevel( this.mavenRequest.getValidationLevel() );
RepositorySystemSession repositorySystemSession = componentProvider.getRepositorySystemSession( mavenExecutionRequest );
projectBuildingRequest.setRepositorySession( repositorySystemSession );
projectBuildingRequest.setProcessPlugins( this.mavenRequest.isProcessPlugins() );
projectBuildingRequest.setResolveDependencies( this.mavenRequest.isResolveDependencies() );
return projectBuildingRequest;
}
}
| [] | [] | [] | [] | [] | java | 0 | 0 | |
tools/test-proxy/sample-clients/python/pytest-httpsresponse/RecordedByProxy.py | import requests
import functools
import os
from contextlib import contextmanager
import pdb
PROXY_URL = "http://localhost:5000"
RECORDING_START_URL = "{}/record/start".format(PROXY_URL)
RECORDING_STOP_URL = "{}/record/stop".format(PROXY_URL)
PLAYBACK_START_URL = "{}/playback/start".format(PROXY_URL)
PLAYBACK_STOP_URL = "{}/playback/stop".format(PROXY_URL)
TEST_FILE_FORMAT = "recordings/{}.txt"
def write_recording_id(test_id, recording_id):
try:
os.makedirs("recordings")
except:
pass
with open(TEST_FILE_FORMAT.format(test_id), "w") as f:
f.write(recording_id)
def get_recording_id(test_id):
with open(TEST_FILE_FORMAT.format(test_id), "r") as f:
result = f.readline()
return result.strip()
# this is the specific patching implementation that needs to be updated for whichever methodology is being used
# not everyone uses requests. Not everyone uses HttpResponse.
@contextmanager
def patch_requests_func(request_transform):
original_func = requests.get
def combined_call(*args, **kwargs):
adjusted_args, adjusted_kwargs = request_transform(*args, **kwargs)
return original_func(*adjusted_args, **adjusted_kwargs)
requests.get = combined_call
yield None
requests.get = original_func
def RecordedByProxy(func):
@functools.wraps(func)
def record_wrap(*args, **kwargs):
recording_id = ""
test_id = func.__name__
if os.getenv("AZURE_RECORD_MODE") == "record":
result = requests.post(
RECORDING_START_URL, headers={"x-recording-file": test_id}, verify=False
)
recording_id = result.headers["x-recording-id"]
elif os.getenv("AZURE_RECORD_MODE") == "playback":
result = requests.post(
PLAYBACK_START_URL,
headers={"x-recording-file": test_id, "x-recording-id": recording_id},
verify=False,
)
recording_id = result.headers["x-recording-id"]
def transform_args(*args, **kwargs):
copied_positional_args = list(args)
headers = {}
if "headers" in kwargs:
headers = kwargs["headers"]
else:
kwargs["headers"] = headers
# we do not want to verify, otherwise https to the local server will fail
kwargs["verify"] = False
# in recording, we want to forward the request with record mode of record
if os.getenv("AZURE_RECORD_MODE") == "record":
upstream_url = copied_positional_args[0]
headers["x-recording-upstream-base-uri"] = upstream_url
headers["x-recording-id"] = recording_id
headers["x-recording-mode"] = "record"
copied_positional_args[0] = PROXY_URL
# otherwise we want to forward the request with record mode of playback
elif os.getenv("AZURE_RECORD_MODE") == "playback":
upstream_url = copied_positional_args[0]
headers["x-recording-upstream-base-uri"] = upstream_url
headers["x-recording-id"] = recording_id
headers["x-recording-mode"] = "playback"
copied_positional_args[0] = PROXY_URL
return tuple(copied_positional_args), kwargs
with patch_requests_func(transform_args):
value = func(*args, **kwargs)
if os.getenv("AZURE_RECORD_MODE") == "record":
result = requests.post(
RECORDING_STOP_URL,
headers={"x-recording-file": test_id, "x-recording-id": recording_id, "x-recording-save": "true"},
verify=False,
)
write_recording_id(test_id, recording_id)
elif os.getenv("AZURE_RECORD_MODE") == "playback":
result = requests.post(
PLAYBACK_STOP_URL,
headers={"x-recording-file": test_id, "x-recording-id": recording_id},
verify=False,
)
return value
return record_wrap
| [] | [] | [
"AZURE_RECORD_MODE"
] | [] | ["AZURE_RECORD_MODE"] | python | 1 | 0 | |
acceptance/openstack/networking/v2/extensions/quotas/quotas_test.go | //go:build acceptance || networking || quotas
// +build acceptance networking quotas
package quotas
import (
"log"
"os"
"reflect"
"testing"
"github.com/nexclipper/gophercloud/acceptance/clients"
"github.com/nexclipper/gophercloud/acceptance/tools"
"github.com/nexclipper/gophercloud/openstack/networking/v2/extensions/quotas"
th "github.com/nexclipper/gophercloud/testhelper"
)
func TestQuotasGet(t *testing.T) {
clients.RequireAdmin(t)
client, err := clients.NewNetworkV2Client()
th.AssertNoErr(t, err)
quotasInfo, err := quotas.Get(client, os.Getenv("OS_PROJECT_NAME")).Extract()
th.AssertNoErr(t, err)
tools.PrintResource(t, quotasInfo)
}
func TestQuotasUpdate(t *testing.T) {
clients.RequireAdmin(t)
client, err := clients.NewNetworkV2Client()
th.AssertNoErr(t, err)
originalQuotas, err := quotas.Get(client, os.Getenv("OS_PROJECT_NAME")).Extract()
th.AssertNoErr(t, err)
newQuotas, err := quotas.Update(client, os.Getenv("OS_PROJECT_NAME"), updateOpts).Extract()
th.AssertNoErr(t, err)
tools.PrintResource(t, newQuotas)
if reflect.DeepEqual(originalQuotas, newQuotas) {
log.Fatal("Original and New Networking Quotas are the same")
}
// Restore original quotas.
restoredQuotas, err := quotas.Update(client, os.Getenv("OS_PROJECT_NAME"), quotas.UpdateOpts{
FloatingIP: &originalQuotas.FloatingIP,
Network: &originalQuotas.Network,
Port: &originalQuotas.Port,
RBACPolicy: &originalQuotas.RBACPolicy,
Router: &originalQuotas.Router,
SecurityGroup: &originalQuotas.SecurityGroup,
SecurityGroupRule: &originalQuotas.SecurityGroupRule,
Subnet: &originalQuotas.Subnet,
SubnetPool: &originalQuotas.SubnetPool,
}).Extract()
th.AssertNoErr(t, err)
th.AssertDeepEquals(t, originalQuotas, restoredQuotas)
tools.PrintResource(t, restoredQuotas)
}
| [
"\"OS_PROJECT_NAME\"",
"\"OS_PROJECT_NAME\"",
"\"OS_PROJECT_NAME\"",
"\"OS_PROJECT_NAME\""
] | [] | [
"OS_PROJECT_NAME"
] | [] | ["OS_PROJECT_NAME"] | go | 1 | 0 | |
replayer-agent/install/codeCov/main_test.go | package main
import (
"flag"
"os"
"os/signal"
"sync"
"syscall"
"testing"
"time"
_ "github.com/light-pan/sharingan"
)
const waitFlagParseTime = 10
var endRunning chan struct{}
var callback sync.Once
func stop() {
endRunning <- struct{}{}
}
func signalHandler() {
// ๅฎไนๅนถ็ๅฌ killไฟกๅท, On ^C or SIGTERM
sigc := make(chan os.Signal, 1)
signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM)
go func() {
<-sigc
callback.Do(stop)
}()
}
// TestMain Test started when the test binary is started. Only calls main.
func TestMain(m *testing.M) {
if os.Getenv("BAN_SYSTEM_TEST") == "1" {
// Original test flow
os.Exit(m.Run())
return
}
endRunning = make(chan struct{}, 1)
signalHandler()
go func() {
main()
callback.Do(stop)
}()
// Maximum waiting time(10s) for flag.Parse.
// If the flag still missed to execute after 10 seconds, check your logic with main function.
checkTime := time.After(waitFlagParseTime * time.Second)
for {
if flag.Parsed() {
break
}
select {
case <-checkTime:
if !flag.Parsed() {
flag.Parse()
}
break
case <-endRunning:
os.Exit(m.Run())
return
default:
time.Sleep(200 * time.Millisecond)
}
}
<-endRunning
os.Exit(m.Run())
}
| [
"\"BAN_SYSTEM_TEST\""
] | [] | [
"BAN_SYSTEM_TEST"
] | [] | ["BAN_SYSTEM_TEST"] | go | 1 | 0 | |
serial_scripts/floatingip/test_mx.py | from __future__ import absolute_import
# Need to import path to test/fixtures and test/scripts/
# Ex : export PYTHONPATH='$PATH:/root/test/fixtures/:/root/test/scripts/'
#
# To run tests, you can do 'python -m testtools.run mx_tests'. To run specific tests,
# You can do 'python -m testtools.run -l mx_test'
# Set the env variable PARAMS_FILE to point to your ini file. Else it will try to pick params.ini in PWD
# Set the env variable MX_GW_TESTto 1 to run the test
#
from . import base
import os
from common.openstack_libs import nova_client as mynovaclient
from common.openstack_libs import nova_exception as novaException
import unittest
import fixtures
import testtools
import socket
import test
from common.contrail_test_init import ContrailTestInit
from vn_test import *
from quantum_test import *
from vnc_api_test import *
from nova_test import *
from vm_test import *
from common.connections import ContrailConnections
from floating_ip import *
from policy_test import *
from contrail_fixtures import *
from control_node import *
from tcutils.wrappers import preposttest_wrapper
class TestSerialSanity_MX(base.FloatingIpBaseTest):
@classmethod
def setUpClass(cls):
super(TestSerialSanity_MX, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestSerialSanity_MX, cls).tearDownClass()
def is_test_applicable(self):
if os.environ.get('MX_GW_TEST') != '1':
return (False, 'Skipping Test. Env variable MX_GW_TEST is not set')
return (True, None)
@test.attr(type=['mx_test', 'serial', 'sanity', 'vcenter', 'vcenter_compute'])
@preposttest_wrapper
def test_change_of_rt_in_vn(self):
'''
Verify the impact of change in route target of a vn
Test Steps:
1.Test configuration is simillar with (test_mx_gateway)
2.In this test, first configure the public100 VN with wrong route target value (Mismatch with MX)
3.Check the communication outside virtual network cluster fails
4.Modify the route target value(Matching with MX)
5.Communication should pass
Pass criteria: Step 3 and 5 should pass.
Maintainer: chhandak@juniper.net
'''
result = True
fip_pool_name = self.inputs.fip_pool_name
vm1_name = 'vm200'
vn1_name = get_random_name('vn200')
vn1_subnets = ['12.1.1.0/24']
mx_rt = self.inputs.mx_rt
mx_rt_wrong = '11111'
vn1_fixture = self.useFixture(
VNFixture(project_name=self.inputs.project_name,
connections=self.connections, vn_name=vn1_name, inputs=self.inputs, subnets=vn1_subnets))
assert vn1_fixture.verify_on_setup()
vm1_fixture = self.useFixture(
VMFixture(project_name=self.inputs.project_name,
connections=self.connections, vn_obj=vn1_fixture.obj, vm_name=vm1_name))
vm1_fixture.wait_till_vm_is_up()
assert vm1_fixture.verify_on_setup()
# Delete the correct RT value and add the wrong one.
routing_instance = self.public_vn_obj.public_vn_fixture.ri_name
self.public_vn_obj.public_vn_fixture.del_route_target(
routing_instance, self.inputs.router_asn, mx_rt)
sleep(2)
self.public_vn_obj.public_vn_fixture.add_route_target(
routing_instance, self.inputs.router_asn, mx_rt_wrong)
sleep(10)
# Adding further projects to floating IP.
self.logger.info('Adding project %s to FIP pool %s' %
(self.inputs.project_name, fip_pool_name))
project_obj = self.public_vn_obj.fip_fixture.assoc_project\
(self.inputs.project_name)
fip_id = self.public_vn_obj.fip_fixture.create_and_assoc_fip(
self.public_vn_obj.public_vn_fixture.vn_id, vm1_fixture.vm_id, project_obj)
self.addCleanup(self.public_vn_obj.fip_fixture.disassoc_and_delete_fip, fip_id)
assert self.public_vn_obj.fip_fixture.verify_fip(fip_id, vm1_fixture,
self.public_vn_obj.public_vn_fixture)
self.logger.info(
"BGP Peer configuraion done and trying to outside the VN cluster")
if not vm1_fixture.ping_to_ip('www-int.juniper.net'):
self.logger.info(
"Here ping should fail as VN is configured with wrong RT values" )
else:
self.logger.error(
"Ping should fail. But ping is successful even with wrong RT values")
result = result and False
# Change the RT value to correct one.
routing_instance = self.public_vn_obj.public_vn_fixture.ri_name
self.public_vn_obj.public_vn_fixture.del_route_target(
routing_instance, self.inputs.router_asn, mx_rt_wrong)
sleep(2)
self.public_vn_obj.public_vn_fixture.add_route_target(
routing_instance, self.inputs.router_asn, mx_rt)
sleep(10)
self.logger.info("Now trying to ping %s" % (self.inputs.public_host))
if not vm1_fixture.ping_with_certainty(self.inputs.public_host):
result = result and False
# Reverting the RT value for fixture cleanup.
self.public_vn_obj.public_vn_fixture.del_route_target(
routing_instance, self.inputs.router_asn, mx_rt)
sleep(2)
self.public_vn_obj.public_vn_fixture.add_route_target(
routing_instance, self.inputs.router_asn, mx_rt_wrong)
# Removing further projects from floating IP pool. For cleanup
self.logger.info('Removing project %s to FIP pool %s' %
(self.inputs.project_name, fip_pool_name))
project_obj = self.public_vn_obj.fip_fixture.deassoc_project\
(self.inputs.project_name)
if not result:
self.logger.error(
'Test ping outside VN cluster from VM %s failed' % (vm1_name))
assert result
return True
# end test_change_of_rt_in_vn
| [] | [] | [
"MX_GW_TEST"
] | [] | ["MX_GW_TEST"] | python | 1 | 0 | |
batch/batch/cloud/azure/worker/worker_api.py | import abc
import os
from typing import Dict, Optional, Tuple
import aiohttp
from hailtop import httpx
from hailtop.utils import request_retry_transient_errors, time_msecs
from ....worker.worker_api import CloudWorkerAPI
from ..instance_config import AzureSlimInstanceConfig
from .credentials import AzureUserCredentials
from .disk import AzureDisk
class AzureWorkerAPI(CloudWorkerAPI):
@staticmethod
def from_env():
subscription_id = os.environ['SUBSCRIPTION_ID']
resource_group = os.environ['RESOURCE_GROUP']
acr_url = os.environ['DOCKER_PREFIX']
assert acr_url.endswith('azurecr.io'), acr_url
return AzureWorkerAPI(subscription_id, resource_group, acr_url)
def __init__(self, subscription_id: str, resource_group: str, acr_url: str):
self.subscription_id = subscription_id
self.resource_group = resource_group
self.acr_refresh_token = AcrRefreshToken(acr_url, AadAccessToken())
@property
def nameserver_ip(self):
return '168.63.129.16'
def create_disk(self, instance_name: str, disk_name: str, size_in_gb: int, mount_path: str) -> AzureDisk:
return AzureDisk(disk_name, instance_name, size_in_gb, mount_path)
def user_credentials(self, credentials: Dict[str, bytes]) -> AzureUserCredentials:
return AzureUserCredentials(credentials)
async def worker_access_token(self, session: httpx.ClientSession) -> Dict[str, str]:
# https://docs.microsoft.com/en-us/azure/container-registry/container-registry-authentication?tabs=azure-cli#az-acr-login-with---expose-token
return {
'username': '00000000-0000-0000-0000-000000000000',
'password': await self.acr_refresh_token.token(session),
}
def instance_config_from_config_dict(self, config_dict: Dict[str, str]) -> AzureSlimInstanceConfig:
return AzureSlimInstanceConfig.from_dict(config_dict)
def write_cloudfuse_credentials(self, root_dir: str, credentials: str, bucket: str) -> str:
path = f'{root_dir}/cloudfuse/{bucket}/credentials'
os.makedirs(os.path.dirname(path))
with open(path, 'w', encoding='utf-8') as f:
f.write(credentials)
return path
def _mount_cloudfuse(
self, fuse_credentials_path: str, mount_base_path_data: str, mount_base_path_tmp: str, config: dict
) -> str:
# https://docs.microsoft.com/en-us/azure/storage/blobs/storage-how-to-mount-container-linux#mount
bucket = config['bucket']
account, container = bucket.split('/', maxsplit=1)
assert account and container
options = ['allow_other']
if config['read_only']:
options.append('ro')
return f'''
blobfuse \
{mount_base_path_data} \
--tmp-path={mount_base_path_tmp} \
--config-file={fuse_credentials_path} \
--pre-mount-validate=true \
-o {",".join(options)} \
-o attr_timeout=240 \
-o entry_timeout=240 \
-o negative_timeout=120
'''
def _unmount_cloudfuse(self, mount_base_path: str) -> str:
return f'''
fusermount -u {mount_base_path} # blobfuse cleans up the temporary directory when unmounting
'''
def __str__(self):
return f'subscription_id={self.subscription_id} resource_group={self.resource_group}'
class LazyShortLivedToken(abc.ABC):
def __init__(self):
self._token: Optional[str] = None
self._expiration_time_ms: Optional[int] = None
async def token(self, session: httpx.ClientSession) -> str:
now = time_msecs()
if not self._expiration_time_ms or now >= self._expiration_time_ms:
self._token, self._expiration_time_ms = await self._fetch(session)
assert self._token
return self._token
@abc.abstractmethod
async def _fetch(self, session: httpx.ClientSession) -> Tuple[str, int]:
raise NotImplementedError()
class AadAccessToken(LazyShortLivedToken):
async def _fetch(self, session: httpx.ClientSession) -> Tuple[str, int]:
# https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token#get-a-token-using-http
params = {'api-version': '2018-02-01', 'resource': 'https://management.azure.com/'}
async with await request_retry_transient_errors(
session,
'GET',
'http://169.254.169.254/metadata/identity/oauth2/token',
headers={'Metadata': 'true'},
params=params,
timeout=aiohttp.ClientTimeout(total=60), # type: ignore
) as resp:
resp_json = await resp.json()
access_token: str = resp_json['access_token']
expiration_time_ms = int(resp_json['expires_on']) * 1000
return access_token, expiration_time_ms
class AcrRefreshToken(LazyShortLivedToken):
def __init__(self, acr_url: str, aad_access_token: AadAccessToken):
super().__init__()
self.acr_url: str = acr_url
self.aad_access_token: AadAccessToken = aad_access_token
async def _fetch(self, session: httpx.ClientSession) -> Tuple[str, int]:
# https://github.com/Azure/acr/blob/main/docs/AAD-OAuth.md#calling-post-oauth2exchange-to-get-an-acr-refresh-token
data = {
'grant_type': 'access_token',
'service': self.acr_url,
'access_token': await self.aad_access_token.token(session),
}
async with await request_retry_transient_errors(
session,
'POST',
f'https://{self.acr_url}/oauth2/exchange',
headers={'Content-Type': 'application/x-www-form-urlencoded'},
data=data,
timeout=aiohttp.ClientTimeout(total=60), # type: ignore
) as resp:
refresh_token: str = (await resp.json())['refresh_token']
expiration_time_ms = time_msecs() + 60 * 60 * 1000 # token expires in 3 hours so we refresh after 1 hour
return refresh_token, expiration_time_ms
| [] | [] | [
"RESOURCE_GROUP",
"SUBSCRIPTION_ID",
"DOCKER_PREFIX"
] | [] | ["RESOURCE_GROUP", "SUBSCRIPTION_ID", "DOCKER_PREFIX"] | python | 3 | 0 | |
cmd/pipelineCreateScanSummary_generated.go | // Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/splunk"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/spf13/cobra"
)
type pipelineCreateScanSummaryOptions struct {
FailedOnly bool `json:"failedOnly,omitempty"`
OutputFilePath string `json:"outputFilePath,omitempty"`
PipelineLink string `json:"pipelineLink,omitempty"`
}
// PipelineCreateScanSummaryCommand Collect scan result information anc create a summary report
func PipelineCreateScanSummaryCommand() *cobra.Command {
const STEP_NAME = "pipelineCreateScanSummary"
metadata := pipelineCreateScanSummaryMetadata()
var stepConfig pipelineCreateScanSummaryOptions
var startTime time.Time
var logCollector *log.CollectorHook
var createPipelineCreateScanSummaryCmd = &cobra.Command{
Use: STEP_NAME,
Short: "Collect scan result information anc create a summary report",
Long: `This step allows you to create a summary report of your scan results.
It is for example used to create a markdown file which can be used to create a GitHub issue.`,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
GeneralConfig.GitHubAccessTokens = ResolveAccessTokens(GeneralConfig.GitHubTokens)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
logCollector = &log.CollectorHook{CorrelationID: GeneralConfig.CorrelationID}
log.RegisterHook(logCollector)
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
telemetryData := telemetry.CustomData{}
telemetryData.ErrorCode = "1"
handler := func() {
config.RemoveVaultSecretFiles()
telemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
telemetryData.ErrorCategory = log.GetErrorCategory().String()
telemetry.Send(&telemetryData)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunk.Send(&telemetryData, logCollector)
}
}
log.DeferExitHandler(handler)
defer handler()
telemetry.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunk.Initialize(GeneralConfig.CorrelationID,
GeneralConfig.HookConfig.SplunkConfig.Dsn,
GeneralConfig.HookConfig.SplunkConfig.Token,
GeneralConfig.HookConfig.SplunkConfig.Index,
GeneralConfig.HookConfig.SplunkConfig.SendLogs)
}
pipelineCreateScanSummary(stepConfig, &telemetryData)
telemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addPipelineCreateScanSummaryFlags(createPipelineCreateScanSummaryCmd, &stepConfig)
return createPipelineCreateScanSummaryCmd
}
func addPipelineCreateScanSummaryFlags(cmd *cobra.Command, stepConfig *pipelineCreateScanSummaryOptions) {
cmd.Flags().BoolVar(&stepConfig.FailedOnly, "failedOnly", false, "Defines if only failed scans should be included into the summary.")
cmd.Flags().StringVar(&stepConfig.OutputFilePath, "outputFilePath", `scanSummary.md`, "Defines the filepath to the target file which will be created by the step.")
cmd.Flags().StringVar(&stepConfig.PipelineLink, "pipelineLink", os.Getenv("PIPER_pipelineLink"), "Link to the pipeline (e.g. Jenkins job url) for reference in the scan summary.")
}
// retrieve step metadata
func pipelineCreateScanSummaryMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "pipelineCreateScanSummary",
Aliases: []config.Alias{},
Description: "Collect scan result information anc create a summary report",
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Parameters: []config.StepParameters{
{
Name: "failedOnly",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{},
Default: false,
},
{
Name: "outputFilePath",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: `scanSummary.md`,
},
{
Name: "pipelineLink",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_pipelineLink"),
},
},
},
},
}
return theMetaData
}
| [
"\"PIPER_pipelineLink\"",
"\"PIPER_pipelineLink\""
] | [] | [
"PIPER_pipelineLink"
] | [] | ["PIPER_pipelineLink"] | go | 1 | 0 | |
java/yb-pgsql/src/test/java/org/yb/pgsql/BasePgSQLTest.java | // Copyright (c) YugaByte, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations
// under the License.
//
package org.yb.pgsql;
import static com.google.common.base.Preconditions.*;
import static org.yb.AssertionWrappers.*;
import static org.yb.util.BuildTypeUtil.isASAN;
import static org.yb.util.BuildTypeUtil.isTSAN;
import com.google.common.collect.ImmutableMap;
import com.google.common.net.HostAndPort;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.StringUtils;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import com.yugabyte.core.TransactionState;
import com.yugabyte.jdbc.PgArray;
import com.yugabyte.jdbc.PgConnection;
import com.yugabyte.util.PGobject;
import com.yugabyte.util.PSQLException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.yb.client.IsInitDbDoneResponse;
import org.yb.client.TestUtils;
import org.yb.minicluster.*;
import org.yb.minicluster.Metrics.YSQLStat;
import org.yb.util.EnvAndSysPropertyUtil;
import org.yb.util.MiscUtil.ThrowingCallable;
import org.yb.util.BuildTypeUtil;
import org.yb.util.YBBackupUtil;
import org.yb.util.YBBackupException;
import org.yb.master.MasterDdlOuterClass;
import java.io.File;
import java.net.InetSocketAddress;
import java.net.URL;
import java.net.URLConnection;
import java.nio.charset.StandardCharsets;
import java.sql.*;
import java.util.*;
import java.util.concurrent.ConcurrentSkipListSet;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.Stream;
public class BasePgSQLTest extends BaseMiniClusterTest {
private static final Logger LOG = LoggerFactory.getLogger(BasePgSQLTest.class);
// Postgres settings.
protected static final String DEFAULT_PG_DATABASE = "yugabyte";
protected static final String DEFAULT_PG_USER = "yugabyte";
protected static final String DEFAULT_PG_PASS = "yugabyte";
protected static final String TEST_PG_USER = "yugabyte_test";
// Non-standard PSQL states defined in yb_pg_errcodes.h
protected static final String SERIALIZATION_FAILURE_PSQL_STATE = "40001";
protected static final String SNAPSHOT_TOO_OLD_PSQL_STATE = "72000";
// Postgres flags.
private static final String MASTERS_FLAG = "FLAGS_pggate_master_addresses";
private static final String YB_ENABLED_IN_PG_ENV_VAR_NAME = "YB_ENABLED_IN_POSTGRES";
// Metric names.
protected static final String METRIC_PREFIX = "handler_latency_yb_ysqlserver_SQLProcessor_";
protected static final String SELECT_STMT_METRIC = METRIC_PREFIX + "SelectStmt";
protected static final String INSERT_STMT_METRIC = METRIC_PREFIX + "InsertStmt";
protected static final String DELETE_STMT_METRIC = METRIC_PREFIX + "DeleteStmt";
protected static final String UPDATE_STMT_METRIC = METRIC_PREFIX + "UpdateStmt";
protected static final String BEGIN_STMT_METRIC = METRIC_PREFIX + "BeginStmt";
protected static final String COMMIT_STMT_METRIC = METRIC_PREFIX + "CommitStmt";
protected static final String ROLLBACK_STMT_METRIC = METRIC_PREFIX + "RollbackStmt";
protected static final String OTHER_STMT_METRIC = METRIC_PREFIX + "OtherStmts";
protected static final String SINGLE_SHARD_TRANSACTIONS_METRIC_DEPRECATED = METRIC_PREFIX
+ "Single_Shard_Transactions";
protected static final String SINGLE_SHARD_TRANSACTIONS_METRIC =
METRIC_PREFIX + "SingleShardTransactions";
protected static final String TRANSACTIONS_METRIC = METRIC_PREFIX + "Transactions";
protected static final String AGGREGATE_PUSHDOWNS_METRIC = METRIC_PREFIX + "AggregatePushdowns";
protected static final String CATALOG_CACHE_MISSES_METRICS = METRIC_PREFIX + "CatalogCacheMisses";
// CQL and Redis settings, will be reset before each test via resetSettings method.
protected boolean startCqlProxy = false;
protected boolean startRedisProxy = false;
protected static Connection connection;
protected File pgBinDir;
protected static final int DEFAULT_STATEMENT_TIMEOUT_MS = 30000;
protected static ConcurrentSkipListSet<Integer> stuckBackendPidsConcMap =
new ConcurrentSkipListSet<>();
protected static boolean pgInitialized = false;
public void runPgRegressTest(
File inputDir, String schedule, long maxRuntimeMillis, File executable) throws Exception {
final int tserverIndex = 0;
PgRegressRunner pgRegress = new PgRegressRunner(inputDir, schedule, maxRuntimeMillis);
ProcessBuilder procBuilder = new PgRegressBuilder(executable)
.setDirs(inputDir, pgRegress.outputDir())
.setSchedule(schedule)
.setHost(getPgHost(tserverIndex))
.setPort(getPgPort(tserverIndex))
.setUser(DEFAULT_PG_USER)
.setDatabase("yugabyte")
.setEnvVars(getPgRegressEnvVars())
.getProcessBuilder();
pgRegress.start(procBuilder);
pgRegress.stop();
}
public void runPgRegressTest(File inputDir, String schedule) throws Exception {
runPgRegressTest(
inputDir, schedule, 0 /* maxRuntimeMillis */,
PgRegressBuilder.PG_REGRESS_EXECUTABLE);
}
public void runPgRegressTest(String schedule, long maxRuntimeMillis) throws Exception {
runPgRegressTest(
PgRegressBuilder.PG_REGRESS_DIR /* inputDir */, schedule, maxRuntimeMillis,
PgRegressBuilder.PG_REGRESS_EXECUTABLE);
}
public void runPgRegressTest(String schedule) throws Exception {
runPgRegressTest(schedule, 0 /* maxRuntimeMillis */);
}
public static void perfAssertLessThan(double time1, double time2) {
if (TestUtils.isReleaseBuild()) {
assertLessThan(time1, time2);
}
}
public static void perfAssertEquals(double time1, double time2) {
if (TestUtils.isReleaseBuild()) {
assertLessThan(time1, time2 * 1.3);
assertLessThan(time1 * 1.3, time2);
}
}
protected static int getPerfMaxRuntime(int releaseRuntime,
int debugRuntime,
int asanRuntime,
int tsanRuntime,
int macRuntime) {
if (TestUtils.isReleaseBuild()) {
return releaseRuntime;
} else if (TestUtils.IS_LINUX) {
if (BuildTypeUtil.isASAN()) {
return asanRuntime;
} else if (BuildTypeUtil.isTSAN()) {
return tsanRuntime;
} else {
// Linux debug builds.
return debugRuntime;
}
} else {
// We get a lot of timeouts in macOS debug builds.
return macRuntime;
}
}
protected Integer getYsqlPrefetchLimit() {
return null;
}
protected Integer getYsqlRequestLimit() {
return null;
}
/**
* @return flags shared between tablet server and initdb
*/
@Override
protected Map<String, String> getTServerFlags() {
Map<String, String> flagMap = super.getTServerFlags();
if (isTSAN() || isASAN()) {
flagMap.put("pggate_rpc_timeout_secs", "120");
}
flagMap.put("start_cql_proxy", String.valueOf(startCqlProxy));
flagMap.put("start_redis_proxy", String.valueOf(startRedisProxy));
// Setup flag for postgres test on prefetch-limit when starting tserver.
if (getYsqlPrefetchLimit() != null) {
flagMap.put("ysql_prefetch_limit", getYsqlPrefetchLimit().toString());
}
if (getYsqlRequestLimit() != null) {
flagMap.put("ysql_request_limit", getYsqlRequestLimit().toString());
}
flagMap.put("ysql_beta_features", "true");
flagMap.put("ysql_sleep_before_retry_on_txn_conflict", "false");
flagMap.put("ysql_max_write_restart_attempts", "2");
return flagMap;
}
@Override
protected Map<String, String> getMasterFlags() {
Map<String, String> flagMap = super.getMasterFlags();
flagMap.put("client_read_write_timeout_ms",
String.valueOf(BuildTypeUtil.adjustTimeout(120000)));
flagMap.put("memory_limit_hard_bytes", String.valueOf(2L * 1024 * 1024 * 1024));
return flagMap;
}
@Override
protected int getNumShardsPerTServer() {
return 1;
}
@Override
protected int getReplicationFactor() {
return 3;
}
@Override
protected void customizeMiniClusterBuilder(MiniYBClusterBuilder builder) {
super.customizeMiniClusterBuilder(builder);
builder.enableYsql(true);
}
@Before
public void initYBBackupUtil() {
YBBackupUtil.setMasterAddresses(masterAddresses);
YBBackupUtil.setPostgresContactPoint(miniCluster.getPostgresContactPoints().get(0));
}
@Before
public void initPostgresBefore() throws Exception {
if (pgInitialized)
return;
LOG.info("Loading PostgreSQL JDBC driver");
Class.forName("com.yugabyte.Driver");
// Postgres bin directory.
pgBinDir = new File(TestUtils.getBuildRootDir(), "postgres/bin");
LOG.info("Waiting for initdb to complete on master");
TestUtils.waitFor(
() -> {
IsInitDbDoneResponse initdbStatusResp = miniCluster.getClient().getIsInitDbDone();
if (initdbStatusResp.hasError()) {
throw new RuntimeException(
"Could not request initdb status: " + initdbStatusResp.getServerError());
}
String initdbError = initdbStatusResp.getInitDbError();
if (initdbError != null && !initdbError.isEmpty()) {
throw new RuntimeException("initdb failed: " + initdbError);
}
return initdbStatusResp.isDone();
},
600000);
LOG.info("initdb has completed successfully on master");
if (connection != null) {
LOG.info("Closing previous connection");
connection.close();
connection = null;
}
// Create test role.
try (Connection initialConnection = getConnectionBuilder().withUser(DEFAULT_PG_USER).connect();
Statement statement = initialConnection.createStatement()) {
statement.execute(
"CREATE ROLE " + TEST_PG_USER + " SUPERUSER CREATEROLE CREATEDB BYPASSRLS LOGIN");
}
connection = getConnectionBuilder().connect();
pgInitialized = true;
}
public void restartClusterWithFlags(
Map<String, String> additionalMasterFlags,
Map<String, String> additionalTserverFlags) throws Exception {
destroyMiniCluster();
createMiniCluster(additionalMasterFlags, additionalTserverFlags);
pgInitialized = false;
initPostgresBefore();
}
public void restartCluster() throws Exception {
restartClusterWithFlags(
Collections.<String, String>emptyMap(),
Collections.<String, String>emptyMap());
}
@Override
protected void resetSettings() {
super.resetSettings();
startCqlProxy = false;
startRedisProxy = false;
}
protected ConnectionBuilder getConnectionBuilder() {
return new ConnectionBuilder(miniCluster);
}
public String getPgHost(int tserverIndex) {
return miniCluster.getPostgresContactPoints().get(tserverIndex).getHostName();
}
public int getPgPort(int tserverIndex) {
return miniCluster.getPostgresContactPoints().get(tserverIndex).getPort();
}
protected Map<String, String> getPgRegressEnvVars() {
Map<String, String> pgRegressEnvVars = new TreeMap<>();
pgRegressEnvVars.put(MASTERS_FLAG, masterAddresses);
pgRegressEnvVars.put(YB_ENABLED_IN_PG_ENV_VAR_NAME, "1");
for (Map.Entry<String, String> entry : System.getenv().entrySet()) {
String envVarName = entry.getKey();
if (envVarName.startsWith("postgres_FLAGS_")) {
String downstreamEnvVarName = envVarName.substring(9);
LOG.info("Found env var " + envVarName + ", setting " + downstreamEnvVarName + " for " +
"pg_regress to " + entry.getValue());
pgRegressEnvVars.put(downstreamEnvVarName, entry.getValue());
}
}
// A temporary workaround for a failure to look up a user name by uid in an LDAP environment.
pgRegressEnvVars.put("YB_PG_FALLBACK_SYSTEM_USER_NAME", "yugabyte");
return pgRegressEnvVars;
}
@After
public void cleanUpAfter() throws Exception {
LOG.info("Cleaning up after {}", getCurrentTestMethodName());
if (connection == null) {
LOG.warn("No connection created, skipping cleanup");
return;
}
// If root connection was closed, open a new one for cleaning.
if (connection.isClosed()) {
connection = getConnectionBuilder().connect();
}
try (Statement stmt = connection.createStatement()) {
stmt.execute("RESET SESSION AUTHORIZATION");
// TODO(dmitry): Workaround for #1721, remove after fix.
stmt.execute("ROLLBACK");
stmt.execute("DISCARD TEMP");
}
cleanUpCustomDatabases();
cleanUpCustomEntities();
if (isClusterNeedsRecreation()) {
pgInitialized = false;
}
}
/**
* Removes all databases excluding `postgres`, `yugabyte`, `system_platform`, `template1`, and
* `template2`. Any lower-priority cleaners should only clean objects in one of the remaining
* three databases, or cluster-wide objects (e.g. roles).
*/
private void cleanUpCustomDatabases() throws Exception {
LOG.info("Cleaning up custom databases");
try (Statement stmt = connection.createStatement()) {
for (int i = 0; i < 2; i++) {
try {
List<String> databases = getRowList(stmt,
"SELECT datname FROM pg_database" +
" WHERE datname <> 'template0'" +
" AND datname <> 'template1'" +
" AND datname <> 'postgres'" +
" AND datname <> 'yugabyte'" +
" AND datname <> 'system_platform'").stream().map(r -> r.getString(0))
.collect(Collectors.toList());
for (String database : databases) {
LOG.info("Dropping database '{}'", database);
stmt.execute("DROP DATABASE " + database);
}
} catch (Exception e) {
if (e.toString().contains("Catalog Version Mismatch: A DDL occurred while processing")) {
continue;
} else {
throw e;
}
}
}
}
}
/** Drop entities owned by non-system roles, and drop custom roles. */
private void cleanUpCustomEntities() throws Exception {
LOG.info("Cleaning up roles");
List<String> persistentUsers = Arrays.asList(DEFAULT_PG_USER, TEST_PG_USER);
try (Statement stmt = connection.createStatement()) {
for (int i = 0; i < 2; i++) {
try {
List<String> roles = getRowList(stmt, "SELECT rolname FROM pg_roles"
+ " WHERE rolname <> 'postgres'"
+ " AND rolname NOT LIKE 'pg_%'"
+ " AND rolname NOT LIKE 'yb_%'").stream()
.map(r -> r.getString(0))
.collect(Collectors.toList());
for (String role : roles) {
boolean isPersistent = persistentUsers.contains(role);
LOG.info("Cleaning up role {} (persistent? {})", role, isPersistent);
stmt.execute("DROP OWNED BY " + role + " CASCADE");
if (!isPersistent) {
stmt.execute("DROP ROLE " + role);
}
}
} catch (Exception e) {
if (e.toString().contains("Catalog Version Mismatch: A DDL occurred while processing")) {
continue;
} else {
throw e;
}
}
}
}
}
@AfterClass
public static void tearDownAfter() throws Exception {
// Close the root connection, which is not cleaned up after each test.
if (connection != null && !connection.isClosed()) {
connection.close();
}
pgInitialized = false;
LOG.info("Destroying mini-cluster");
if (miniCluster != null) {
destroyMiniCluster();
miniCluster = null;
}
}
protected void recreateWithYsqlVersion(YsqlSnapshotVersion version) throws Exception {
destroyMiniCluster();
pgInitialized = false;
markClusterNeedsRecreation();
createMiniCluster((builder) -> {
builder.ysqlSnapshotVersion(version);
});
initPostgresBefore();
}
/**
* Commit the current transaction on the given connection, catch and report the exception.
* @param conn connection to use
* @param extraMsg an extra part of the error message
* @return whether commit succeeded
*/
protected static boolean commitAndCatchException(Connection conn, String extraMsg) {
extraMsg = extraMsg.trim();
if (!extraMsg.isEmpty()) {
extraMsg = " (" + extraMsg + ")";
}
try {
conn.commit();
return true;
} catch (SQLException ex) {
// TODO: validate the exception message.
LOG.info("Error during commit" + extraMsg + ": " + ex.getMessage());
return false;
}
}
protected static PgConnection toPgConnection(Connection connection) {
return (PgConnection) connection;
}
protected static TransactionState getPgTxnState(Connection connection) {
return toPgConnection(connection).getTransactionState();
}
protected static int getPgBackendPid(Connection connection) {
return toPgConnection(connection).getBackendPID();
}
protected static class AggregatedValue {
long count;
double value;
long rows;
}
protected void resetStatementStat() throws Exception {
for (MiniYBDaemon ts : miniCluster.getTabletServers().values()) {
URL url = new URL(String.format("http://%s:%d/statements-reset",
ts.getLocalhostIP(),
ts.getPgsqlWebPort()));
Scanner scanner = new Scanner(url.openConnection().getInputStream());
scanner.close();
}
}
protected AggregatedValue getStatementStat(String statName) throws Exception {
AggregatedValue value = new AggregatedValue();
for (MiniYBDaemon ts : miniCluster.getTabletServers().values()) {
URL url = new URL(String.format("http://%s:%d/statements",
ts.getLocalhostIP(),
ts.getPgsqlWebPort()));
Scanner scanner = new Scanner(url.openConnection().getInputStream());
JsonParser parser = new JsonParser();
JsonElement tree = parser.parse(scanner.useDelimiter("\\A").next());
JsonObject obj = tree.getAsJsonObject();
YSQLStat ysqlStat = new Metrics(obj, true).getYSQLStat(statName);
if (ysqlStat != null) {
value.count += ysqlStat.calls;
value.value += ysqlStat.total_time;
value.rows += ysqlStat.rows;
}
scanner.close();
}
return value;
}
protected void verifyStatementStat(Statement stmt, String sql, String statName,
int stmtMetricDelta, boolean validStmt) throws Exception {
long oldValue = 0;
if (statName != null) {
oldValue = getStatementStat(statName).count;
}
if (validStmt) {
stmt.execute(sql);
} else {
runInvalidQuery(stmt, sql, "ERROR");
}
long newValue = 0;
if (statName != null) {
newValue = getStatementStat(statName).count;
}
assertEquals(oldValue + stmtMetricDelta, newValue);
}
protected void verifyStatementStats(Statement stmt, String sql, String statName,
long numLoops, long oldValue) throws Exception {
for (int i = 0; i < numLoops; i++) {
stmt.execute(sql);
}
long newValue = getStatementStat(statName).count;
assertEquals(oldValue + numLoops, newValue);
}
protected void verifyStatementStatWithReset(Statement stmt, String sql, String statName,
long numLoopsBeforeReset, long numLoopsAfterReset)
throws Exception {
long oldValue = getStatementStat(statName).count;
verifyStatementStats(stmt, sql, statName, numLoopsBeforeReset, oldValue);
resetStatementStat();
oldValue = 0;
verifyStatementStats(stmt, sql, statName, numLoopsAfterReset, oldValue);
}
private JsonArray[] getRawMetric(
Function<MiniYBDaemon, Integer> portFetcher) throws Exception {
Collection<MiniYBDaemon> servers = miniCluster.getTabletServers().values();
JsonArray[] result = new JsonArray[servers.size()];
int index = 0;
for (MiniYBDaemon ts : servers) {
URLConnection connection = new URL(String.format("http://%s:%d/metrics",
ts.getLocalhostIP(),
portFetcher.apply(ts))).openConnection();
connection.setUseCaches(false);
Scanner scanner = new Scanner(connection.getInputStream());
result[index++] =
new JsonParser().parse(scanner.useDelimiter("\\A").next()).getAsJsonArray();
scanner.close();
}
return result;
}
protected JsonArray[] getRawTSMetric() throws Exception {
return getRawMetric((ts) -> ts.getWebPort());
}
protected JsonArray[] getRawYSQLMetric() throws Exception {
return getRawMetric((ts) -> ts.getPgsqlWebPort());
}
protected AggregatedValue getMetric(String metricName) throws Exception {
AggregatedValue value = new AggregatedValue();
for (JsonArray rawMetric : getRawYSQLMetric()) {
JsonObject obj = rawMetric.get(0).getAsJsonObject();
assertEquals(obj.get("type").getAsString(), "server");
assertEquals(obj.get("id").getAsString(), "yb.ysqlserver");
Metrics.YSQLMetric metric = new Metrics(obj).getYSQLMetric(metricName);
value.count += metric.count;
value.value += metric.sum;
value.rows += metric.rows;
}
return value;
}
protected Long getTserverMetricCountForTable(String metricName, String tableName)
throws Exception {
long count = 0;
for (JsonArray rawMetric : getRawTSMetric()) {
for (JsonElement elem : rawMetric.getAsJsonArray()) {
JsonObject obj = elem.getAsJsonObject();
if (obj.get("type").getAsString().equals("tablet") &&
obj.getAsJsonObject("attributes").get("table_name").getAsString().equals(tableName)) {
for (JsonElement subelem : obj.getAsJsonArray("metrics")) {
if (!subelem.isJsonObject()) {
continue;
}
JsonObject metric = subelem.getAsJsonObject();
if (metric.has("name") && metric.get("name").getAsString().equals(metricName)) {
count += metric.get("value").getAsLong();
}
}
}
}
}
return count;
}
protected AggregatedValue getTServerMetric(String metricName) throws Exception {
AggregatedValue value = new AggregatedValue();
for (JsonArray rawMetric : getRawTSMetric()) {
for (JsonElement elem : rawMetric.getAsJsonArray()) {
JsonObject obj = elem.getAsJsonObject();
if (obj.get("type").getAsString().equals("server")) {
assertEquals(obj.get("id").getAsString(), "yb.tabletserver");
Metrics.Histogram histogram = new Metrics(obj).getHistogram(metricName);
value.count += histogram.totalCount;
value.value += histogram.totalSum;
}
}
}
return value;
}
protected List<String> getTabletsForTable(
String database, String tableName) throws Exception {
try {
return YBBackupUtil.getTabletsForTable("ysql." + database, tableName);
} catch (YBBackupException e) {
return new ArrayList<>();
}
}
protected String getOwnerForTable(Statement stmt, String tableName) throws Exception {
return getSingleRow(stmt, "SELECT pg_get_userbyid(relowner) FROM pg_class WHERE relname = '" +
tableName + "'").getString(0);
}
protected String getTablespaceForTable(Statement stmt, String tableName) throws Exception {
ResultSet rs = stmt.executeQuery(
"SELECT ts.spcname FROM pg_tablespace ts INNER JOIN pg_class c " +
"ON ts.oid = c.reltablespace WHERE c.oid = '" + tableName + "'::regclass");
if (!rs.next()) {
return null; // No tablespace for the table.
}
Row row = Row.fromResultSet(rs);
assertFalse("Result set has more than one row", rs.next());
return row.getString(0);
}
protected long getMetricCounter(String metricName) throws Exception {
return getMetric(metricName).count;
}
private interface MetricFetcher {
AggregatedValue fetch(String name) throws Exception;
}
private static abstract class QueryExecutionMetricChecker {
private MetricFetcher fetcher;
private String metricName;
private AggregatedValue oldValue;
public QueryExecutionMetricChecker(String metricName, MetricFetcher fetcher) {
this.fetcher = fetcher;
this.metricName = metricName;
}
public void beforeQueryExecution() throws Exception {
oldValue = fetcher.fetch(metricName);
}
public void afterQueryExecution(String query) throws Exception {
check(query, metricName, oldValue, fetcher.fetch(metricName));
}
protected abstract void check(
String query, String metricName, AggregatedValue oldValue, AggregatedValue newValue);
}
private class MetricCountChecker extends QueryExecutionMetricChecker {
private long countDelta;
public MetricCountChecker(String name, MetricFetcher fetcher, long countDelta) {
super(name, fetcher);
this.countDelta = countDelta;
}
@Override
public void check(
String query, String metric, AggregatedValue oldValue, AggregatedValue newValue) {
assertEquals(
String.format("'%s' count delta assertion failed for query '%s'", metric, query),
countDelta, newValue.count - oldValue.count);
}
}
private class MetricRowsChecker extends MetricCountChecker {
private long rowsDelta;
public MetricRowsChecker(String name, MetricFetcher fetcher, long countDelta, long rowsDelta) {
super(name, fetcher, countDelta);
this.rowsDelta = rowsDelta;
}
@Override
public void check(
String query, String metric, AggregatedValue oldValue, AggregatedValue newValue) {
super.check(query, metric, oldValue, newValue);
assertEquals(
String.format("'%s' row count delta assertion failed for query '%s'", metric, query),
rowsDelta, newValue.rows - oldValue.rows);
}
}
/** Time execution of a query. */
private long verifyQuery(Statement statement,
String query,
boolean validStmt,
QueryExecutionMetricChecker... checkers) throws Exception {
for (QueryExecutionMetricChecker checker : checkers) {
checker.beforeQueryExecution();
}
final long startTimeMillis = System.currentTimeMillis();
if (validStmt) {
statement.execute(query);
} else {
runInvalidQuery(statement, query, "ERROR");
}
// Check the elapsed time.
final long result = System.currentTimeMillis() - startTimeMillis;
for (QueryExecutionMetricChecker checker : checkers) {
checker.afterQueryExecution(query);
}
return result;
}
/** Time execution of a query. */
protected long verifyStatementMetric(
Statement statement, String query, String metricName, int queryMetricDelta,
int singleShardTxnMetricDelta, int txnMetricDelta, boolean validStmt) throws Exception {
return verifyQuery(
statement, query, validStmt,
new MetricCountChecker(
SINGLE_SHARD_TRANSACTIONS_METRIC_DEPRECATED, this::getMetric,
singleShardTxnMetricDelta),
new MetricCountChecker(
SINGLE_SHARD_TRANSACTIONS_METRIC, this::getMetric, singleShardTxnMetricDelta),
new MetricCountChecker(TRANSACTIONS_METRIC, this::getMetric, txnMetricDelta),
new MetricCountChecker(metricName, this::getMetric, queryMetricDelta));
}
protected void verifyStatementTxnMetric(
Statement statement, String query, int singleShardTxnMetricDelta) throws Exception {
verifyQuery(
statement, query,true,
new MetricCountChecker(
SINGLE_SHARD_TRANSACTIONS_METRIC_DEPRECATED, this::getMetric,
singleShardTxnMetricDelta),
new MetricCountChecker(
SINGLE_SHARD_TRANSACTIONS_METRIC, this::getMetric, singleShardTxnMetricDelta));
}
protected void verifyStatementMetricRows(
Statement statement, String query, String metricName,
int countDelta, int rowsDelta) throws Exception {
verifyQuery(statement, query, true,
new MetricRowsChecker(metricName, this::getMetric, countDelta, rowsDelta));
}
protected void executeWithTimeout(Statement statement, String sql)
throws SQLException, TimeoutException, InterruptedException {
// Maintain our map saying how many statements are being run by each backend pid.
// Later we can determine (possibly) stuck backends based on this.
final int backendPid = getPgBackendPid(statement.getConnection());
AtomicReference<SQLException> sqlExceptionWrapper = new AtomicReference<>();
boolean timedOut = false;
try {
String taskDescription = "SQL statement (PG backend pid: " + backendPid + "): " + sql;
runWithTimeout(DEFAULT_STATEMENT_TIMEOUT_MS, taskDescription, () -> {
try {
statement.execute(sql);
} catch (SQLException e) {
sqlExceptionWrapper.set(e);
}
});
} catch (TimeoutException ex) {
// Record that this backend is possibly "stuck" so we can force a core dump and examine it.
stuckBackendPidsConcMap.add(backendPid);
timedOut = true;
throw ex;
} finally {
// Make sure we propagate the SQLException. But TimeoutException takes precedence.
if (!timedOut && sqlExceptionWrapper.get() != null) {
throw sqlExceptionWrapper.get();
}
}
}
public class PgTxnState {
public PgTxnState(Connection connection, String connectionName) {
this.connection = connection;
this.connectionName = connectionName;
}
boolean isFinished() {
return stmtExecuted != null &&
beforeCommitState != null &&
afterCommitState != null &&
committed != null;
}
public boolean isSuccess() {
return isFinished() &&
stmtExecuted &&
TransactionState.OPEN == beforeCommitState &&
committed &&
TransactionState.IDLE == afterCommitState;
}
public boolean isFailure() {
if (!isFinished()) {
return false;
}
// We have two cases:
// 1. If stmt execution succeeded but commit failed.
// 2. If stmt exec failed. Then txn should be in failed state and commit should succeed (but
// effectively do a rollback/abort).
if (stmtExecuted) {
return TransactionState.OPEN == beforeCommitState &&
!committed &&
TransactionState.IDLE == afterCommitState;
} else {
return TransactionState.FAILED == beforeCommitState &&
committed &&
TransactionState.IDLE == afterCommitState;
}
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("PgTxnState: ").append(connectionName).append("\n");
sb.append("{\n");
sb.append(" stmtExecuted: ").append(String.valueOf(stmtExecuted)).append("\n");
sb.append(" beforeCommitState: ").append(String.valueOf(beforeCommitState)).append("\n");
sb.append(" committed: ").append(String.valueOf(committed)).append("\n");
sb.append(" afterCommitState: ").append(String.valueOf(afterCommitState)).append("\n");
sb.append("}\n");
return sb.toString();
}
private Statement getStatement() throws SQLException {
if (statement != null) {
return statement;
}
return connection.createStatement();
}
private String connectionName;
private Connection connection;
private Statement statement = null;
private Boolean stmtExecuted = null;
private TransactionState beforeCommitState = null;
private Boolean committed = null;
private TransactionState afterCommitState = null;
}
protected void executeWithTxnState(PgTxnState txnState, String sql) throws Exception {
boolean previousStmtFailed = Boolean.FALSE.equals(txnState.stmtExecuted);
txnState.stmtExecuted = false;
try {
executeWithTimeout(txnState.getStatement(), sql);
txnState.stmtExecuted = !previousStmtFailed;
} catch (PSQLException ex) {
// TODO: validate the exception message.
// Not reporting a stack trace here on purpose, because this will happen a lot in a test.
LOG.info("Error while inserting on the second connection:" + ex.getMessage());
}
}
protected void commitWithTxnState(PgTxnState txnState) {
txnState.beforeCommitState = getPgTxnState(txnState.connection);
txnState.committed = commitAndCatchException(txnState.connection, txnState.connectionName);
txnState.afterCommitState = getPgTxnState(txnState.connection);
}
//------------------------------------------------------------------------------------------------
// Test Utilities
protected static class Row implements Comparable<Row>, Cloneable {
static Row fromResultSet(ResultSet rs) throws SQLException {
List<Object> elems = new ArrayList<>();
List<String> columnNames = new ArrayList<>();
for (int i = 1; i <= rs.getMetaData().getColumnCount(); i++) {
elems.add(rs.getObject(i));
columnNames.add(rs.getMetaData().getColumnLabel(i));
}
// Pre-initialize stuff while connection is still available
for (Object el : elems) {
if (el instanceof PgArray)
((PgArray) el).getArray();
}
return new Row(elems, columnNames);
}
List<Object> elems = new ArrayList<>();
/**
* List of column names, should have the same size as {@link #elems}.
* <p>
* Not used for equality, hash code and comparison.
*/
List<String> columnNames = new ArrayList<>();
Row(Object... elems) {
Collections.addAll(this.elems, elems);
}
Row(List<Object> elems, List<String> columnNames) {
checkArgument(elems.size() == columnNames.size());
this.elems = elems;
this.columnNames = columnNames;
}
/** Returns a column name if available, or {@code null} otherwise. */
String getColumnName(int index) {
return columnNames.size() > 0 ? columnNames.get(index) : null;
}
Object get(int index) {
return elems.get(index);
}
Boolean getBoolean(int index) {
return (Boolean) elems.get(index);
}
Integer getInt(int index) {
return (Integer) elems.get(index);
}
Long getLong(int index) {
return (Long) elems.get(index);
}
Double getDouble(int index) {
return (Double) elems.get(index);
}
String getString(int index) {
return (String) elems.get(index);
}
public boolean elementEquals(int idx, Object value) {
return compare(elems.get(idx), value) == 0;
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof Row)) {
return false;
}
Row other = (Row)obj;
return compareTo(other) == 0;
}
@Override
public int compareTo(Row that) {
// In our test, if selected Row has different number of columns from expected row, something
// must be very wrong. Stop the test here.
assertEquals("Row width mismatch between " + this + " and " + that,
this.elems.size(), that.elems.size());
return compare(this.elems, that.elems);
}
@Override
public int hashCode() {
return elems.hashCode();
}
@Override
public String toString() {
return toString(false /* printColumnNames */);
}
public String toString(boolean printColumnNames) {
StringBuilder sb = new StringBuilder();
sb.append("Row[");
for (int i = 0; i < elems.size(); i++) {
if (i > 0) sb.append(',');
if (printColumnNames) {
String columnNameOrNull = getColumnName(i);
sb.append((columnNameOrNull != null ? columnNameOrNull : i) + "=");
}
if (elems.get(i) == null) {
sb.append("null");
} else {
sb.append(elems.get(i).getClass().getName() + "::");
sb.append(elems.get(i).toString());
}
}
sb.append(']');
return sb.toString();
}
@Override
public Row clone() {
try {
Row clone = (Row) super.clone();
clone.elems = new ArrayList<>(this.elems);
clone.columnNames = new ArrayList<>(this.columnNames);
return clone;
} catch (CloneNotSupportedException ex) {
// Not possible
throw new RuntimeException(ex);
}
}
//
// Helpers
//
/**
* Compare two objects if possible. Is able to compare:
* <ul>
* <li>Primitives
* <li>Comparables (including {@code String}) - but cannot handle the case of two unrelated
* Comparables
* <li>{@code PGobject}s wrapping {@code Comparable}s
* <li>Arrays, {@code PgArray}s or lists of the above types
* </ul>
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
private static int compare(Object o1, Object o2) {
if (o1 == null || o2 == null) {
if (o1 != o2) {
return o1 == null ? -1 : 1;
}
return 0;
} else {
Object promoted1 = promoteType(o1);
Object promoted2 = promoteType(o2);
if (promoted1 instanceof Long && promoted2 instanceof Long) {
return ((Long) promoted1).compareTo((Long) promoted2);
} else if (promoted1 instanceof Double && promoted2 instanceof Double) {
return ((Double) promoted1).compareTo((Double) promoted2);
} else if (promoted1 instanceof Number && promoted2 instanceof Number) {
return Double.compare(
((Number) promoted1).doubleValue(),
((Number) promoted2).doubleValue());
} else if (promoted1 instanceof Comparable && promoted2 instanceof Comparable) {
// This is unsafe but we dont expect arbitrary types here.
return ((Comparable) promoted1).compareTo((Comparable) promoted2);
} else if (promoted1 instanceof List && promoted2 instanceof List) {
List list1 = (List) promoted1;
List list2 = (List) promoted2;
if (list1.size() != list2.size()) {
return Integer.compare(list1.size(), list2.size());
}
for (int i = 0; i < list1.size(); ++i) {
int comparisonResult = compare(list1.get(i), list2.get(i));
if (comparisonResult != 0) {
return comparisonResult;
}
}
return 0;
} else {
throw new IllegalArgumentException("Cannot compare "
+ o1 + " (of class " + o1.getClass().getCanonicalName() + ") with "
+ o2 + " (of class " + o1.getClass().getCanonicalName() + ")");
}
}
}
/** Converts the value to a widest one of the same type for comparison */
@SuppressWarnings({ "rawtypes", "unchecked" })
private static Object promoteType(Object v) {
if (v instanceof Byte || v instanceof Short || v instanceof Integer) {
return ((Number)v).longValue();
} else if (v instanceof Float) {
return ((Float)v).doubleValue();
} else if (v instanceof Comparable) {
return v;
} else if (v instanceof List) {
return v;
} else if (v instanceof PGobject) {
return promoteType(((PGobject) v).getValue()); // For PG_LSN type.
} else if (v instanceof PgArray) {
try {
return promoteType(((PgArray) v).getArray());
} catch (SQLException ex) {
throw new RuntimeException("SQL exception during type promotion", ex);
}
} else if (v.getClass().isArray()) {
List list = new ArrayList<>();
// Unfortunately there's no easy way to automate that, we have to enumerate all array types
// explicitly.
if (v instanceof byte[]) {
for (byte ve : (byte[]) v) {
list.add(promoteType(ve));
}
} else if (v instanceof short[]) {
for (short ve : (short[]) v) {
list.add(promoteType(ve));
}
} else if (v instanceof int[]) {
for (int ve : (int[]) v) {
list.add(promoteType(ve));
}
} else if (v instanceof long[]) {
for (long ve : (long[]) v) {
list.add(promoteType(ve));
}
} else if (v instanceof float[]) {
for (float ve : (float[]) v) {
list.add(promoteType(ve));
}
} else if (v instanceof double[]) {
for (double ve : (double[]) v) {
list.add(promoteType(ve));
}
} else if (v instanceof boolean[]) {
for (boolean ve : (boolean[]) v) {
list.add(promoteType(ve));
}
} else if (v instanceof char[]) {
for (char ve : (char[]) v) {
list.add(promoteType(ve));
}
} else if (v instanceof Object[]) {
for (Object ve : (Object[]) v) {
list.add(promoteType(ve));
}
}
return list;
} else {
throw new IllegalArgumentException(v + " (of class " + v.getClass().getSimpleName() + ")"
+ " cannot be promoted for comparison!");
}
}
}
protected List<Row> deepCopyRows(List<Row> rows) {
List<Row> copy = new ArrayList<>();
for (Row row : rows) {
copy.add(row.clone());
}
return copy;
}
protected Set<Row> getRowSet(ResultSet rs) throws SQLException {
Set<Row> rows = new HashSet<>();
while (rs.next()) {
rows.add(Row.fromResultSet(rs));
}
return rows;
}
protected Row getSingleRow(ResultSet rs) throws SQLException {
assertTrue("Result set has no rows", rs.next());
Row row = Row.fromResultSet(rs);
assertFalse("Result set has more than one row", rs.next());
return row;
}
protected List<Row> getRowList(Statement stmt, String query) throws SQLException {
try (ResultSet rs = stmt.executeQuery(query)) {
return getRowList(rs);
}
}
protected static int executeSystemTableDml(
Statement stmt, String dml) throws SQLException {
return systemTableQueryHelper(stmt, () -> stmt.executeUpdate(dml));
}
protected List<Row> getSystemTableRowsList(
Statement stmt, String query) throws SQLException {
return systemTableQueryHelper(stmt, () -> {
try (ResultSet result = stmt.executeQuery(query)) {
return getRowList(result);
}
});
}
private static <T> T systemTableQueryHelper(
Statement stmt, ThrowingCallable<T, SQLException> callable) throws SQLException {
String allow_non_ddl_pattern = "SET yb_non_ddl_txn_for_sys_tables_allowed=%d";
stmt.execute(String.format(allow_non_ddl_pattern, 1));
try {
return callable.call();
} finally {
stmt.execute(String.format(allow_non_ddl_pattern, 0));
}
}
protected Row getSingleRow(Statement stmt, String query) throws SQLException {
try (ResultSet rs = stmt.executeQuery(query)) {
return getSingleRow(rs);
}
}
protected List<Row> getRowList(ResultSet rs) throws SQLException {
List<Row> rows = new ArrayList<>();
while (rs.next()) {
rows.add(Row.fromResultSet(rs));
}
return rows;
}
protected List<Row> getSortedRowList(ResultSet rs) throws SQLException {
// Sort all rows and return.
List<Row> rows = getRowList(rs);
Collections.sort(rows);
return rows;
}
/**
* Checks that collections are of the same sizes, printing unexpected and missing rows otherwise.
*/
protected <T> void assertCollectionSizes(
String errorPrefix,
Collection<T> expected,
Collection<T> actual) {
if (expected.size() != actual.size()) {
List<T> unexpected = new ArrayList<>(actual);
unexpected.removeAll(expected);
List<T> missing = new ArrayList<>(expected);
missing.removeAll(actual);
fail(errorPrefix + "Collection length mismatch: expected<" + expected.size()
+ "> but was:<" + actual.size() + ">"
+ "\nUnexpected rows: " + unexpected
+ "\nMissing rows: " + missing);
}
}
/** Better alternative to assertEquals that provides more mismatch details. */
protected void assertRows(List<Row> expected, List<Row> actual) {
assertRows(null, expected, actual);
}
/** Better alternative to assertEquals that provides more mismatch details. */
protected void assertRows(String messagePrefix, List<Row> expected, List<Row> actual) {
String fullPrefix = StringUtils.isEmpty(messagePrefix) ? "" : (messagePrefix + ": ");
assertCollectionSizes(fullPrefix, expected, actual);
for (int i = 0; i < expected.size(); ++i) {
assertRow(fullPrefix + "Mismatch at row " + (i + 1) + ": ", expected.get(i), actual.get(i));
}
}
/** Better alternative to assertEquals, which provides more mismatch details. */
protected void assertRow(String messagePrefix, Row expected, Row actual) {
assertEquals(messagePrefix
+ "Expected row width mismatch: expected:<" + expected.elems.size()
+ "> but was:<" + actual.elems.size() + ">"
+ "\nExpected row: " + expected
+ "\nActual row: " + actual,
expected.elems.size(), actual.elems.size());
for (int i = 0; i < expected.elems.size(); ++i) {
String columnNameOrNull = expected.getColumnName(i);
assertTrue(messagePrefix
+ "Column #" + (i + 1)
+ (columnNameOrNull != null ? " (" + columnNameOrNull + ") " : " ")
+ "mismatch: expected:<" + expected.elems.get(i)
+ "> but was:<" + actual.elems.get(i) + ">"
+ "\nExpected row: " + expected.toString(true /* printColumnNames */)
+ "\nActual row: " + actual.toString(true /* printColumnNames */),
expected.elementEquals(i, actual.elems.get(i)));
}
}
protected void assertRow(Row expected, Row actual) {
assertRow("", expected, actual);
}
protected void assertQuery(Statement stmt, String query, Row... expectedRows)
throws SQLException {
List<Row> actualRows = getRowList(stmt.executeQuery(query));
assertEquals(
"Expected " + expectedRows.length + " rows, got " + actualRows.size() + ": " + actualRows,
expectedRows.length, actualRows.size());
assertRows(Arrays.asList(expectedRows), actualRows);
}
protected void assertQuery(PreparedStatement stmt, Row... expectedRows)
throws SQLException {
List<Row> actualRows = getRowList(stmt.executeQuery());
assertEquals(
"Expected " + expectedRows.length + " rows, got " + actualRows.size() + ": " + actualRows,
expectedRows.length, actualRows.size());
assertRows(Arrays.asList(expectedRows), actualRows);
}
protected void assertNoRows(Statement stmt, String query) throws SQLException {
List<Row> actualRows = getRowList(stmt.executeQuery(query));
assertTrue("Expected no results, got " + actualRows, actualRows.isEmpty());
}
protected void assertNextRow(ResultSet rs, Object... values) throws SQLException {
assertTrue(rs.next());
Row expected = new Row(values);
Row actual = Row.fromResultSet(rs);
assertRow(expected, actual);
}
protected void assertOneRow(Statement statement,
String query,
Object... values) throws SQLException {
try (ResultSet rs = statement.executeQuery(query)) {
assertNextRow(rs, values);
assertFalse(rs.next());
}
}
protected void assertRowSet(Statement statement,
String query,
Set<Row> expectedRows) throws SQLException {
try (ResultSet rs = statement.executeQuery(query)) {
assertEquals(expectedRows, getRowSet(rs));
}
}
@SafeVarargs
protected final <T> Set<T> asSet(T... ts) {
return Stream.of(ts).collect(Collectors.toSet());
}
protected void assertRowList(Statement statement,
String query,
List<Row> expectedRows) throws SQLException {
try (ResultSet rs = statement.executeQuery(query)) {
assertRows(expectedRows, getRowList(rs));
}
}
private boolean doesQueryPlanContainsSubstring(Statement stmt, String query, String substring)
throws SQLException {
return getQueryPlanString(stmt, query).contains(substring);
}
protected String getQueryPlanString(Statement stmt, String query) throws SQLException {
LOG.info("EXPLAIN " + query);
StringBuilder builder = new StringBuilder();
try (ResultSet rs = stmt.executeQuery("EXPLAIN " + query)) {
assert (rs.getMetaData().getColumnCount() == 1); // Expecting one string column.
while (rs.next()) {
builder.append(rs.getString(1) + "\n");
}
}
LOG.info(builder.toString());
return builder.toString();
}
/** Whether or not this select query uses Index Scan with a given index. */
protected boolean isIndexScan(Statement stmt, String query, String index)
throws SQLException {
return doesQueryPlanContainsSubstring(stmt, query, "Index Scan using " + index);
}
/** Whether or not this select query uses Index Only Scan with a given index. */
protected boolean isIndexOnlyScan(Statement stmt, String query, String index)
throws SQLException {
return doesQueryPlanContainsSubstring(stmt, query, "Index Only Scan using " + index);
}
/**
* Whether or not this select query requires filtering by Postgres (i.e. not all
* conditions can be pushed down to YugaByte).
*/
protected boolean doesNeedPgFiltering(Statement stmt, String query) throws SQLException {
return doesQueryPlanContainsSubstring(stmt, query, "Filter:");
}
/**
* Return whether this select query uses a partitioned index in an IndexScan for ordering.
*/
protected void isPartitionedOrderedIndexScan(Statement stmt,
String query,
String index)
throws SQLException {
String query_plan = getQueryPlanString(stmt, query);
assertTrue(query_plan.contains("Merge Append"));
assertTrue(query_plan.contains("Index Scan using " + index));
}
/**
* Return whether this select query uses a partitioned index in an
* Index Only Scan for ordering.
*/
protected void isPartitionedOrderedIndexOnlyScan(Statement stmt,
String query,
String index)
throws SQLException {
String query_plan = getQueryPlanString(stmt, query);
assertTrue(query_plan.contains("Merge Append"));
assertTrue(query_plan.contains("Index Only Scan using " + index));
}
/**
* Return whether this select query uses the given index in an
* Index Scan for ordering.
*/
protected void isOrderedIndexScan(Statement stmt,
String query,
String index)
throws SQLException {
String query_plan = getQueryPlanString(stmt, query);
assertFalse(query_plan.contains("Sort"));
assertTrue(query_plan.contains("Index Scan using " + index));
}
/**
* Return whether this select query uses the given index in an
* Index Only Scan for ordering.
*/
protected void isOrderedIndexOnlyScan(Statement stmt,
String query,
String index)
throws SQLException {
String query_plan = getQueryPlanString(stmt, query);
assertFalse(query_plan.contains("Sort"));
assertTrue(query_plan.contains("Index Only Scan using " + index));
}
protected void createSimpleTableWithSingleColumnKey(String tableName) throws SQLException {
try (Statement statement = connection.createStatement()) {
String sql =
"CREATE TABLE " + tableName + "(h bigint PRIMARY KEY, r float, vi int, vs text)";
LOG.info("Creating table " + tableName + ", SQL statement: " + sql);
statement.execute(sql);
LOG.info("Table creation finished: " + tableName);
}
}
protected void createSimpleTable(String tableName) throws SQLException {
try (Statement statement = connection.createStatement()) {
createSimpleTable(statement, tableName);
}
}
protected void createSimpleTable(Statement statement, String tableName) throws SQLException {
String sql =
"CREATE TABLE " + tableName + "(h bigint, r float, vi int, vs text, PRIMARY KEY (h, r))";
LOG.info("Creating table " + tableName + ", SQL statement: " + sql);
statement.execute(sql);
LOG.info("Table creation finished: " + tableName);
}
protected void createPartitionedTable(Statement stmt,
String tablename,
YSQLPartitionType mode) throws SQLException {
createPartitionedTable(stmt, tablename, mode, "", "", "");
}
protected void createPartitionedTable(Statement stmt,
String tablename,
YSQLPartitionType mode,
String primary_keys) throws SQLException {
createPartitionedTable(stmt, tablename, mode, primary_keys, "", "");
}
protected void createPartitionedTable(Statement stmt,
String tablename,
YSQLPartitionType mode,
String unique_keys,
String unique_includes) throws SQLException {
createPartitionedTable(stmt, tablename, mode, "", unique_keys, unique_includes);
}
protected void createPartitionedTable(Statement stmt,
String tablename,
YSQLPartitionType mode,
String primary_keys,
String unique_keys,
String unique_includes)
throws SQLException {
String pk_constraint = primary_keys.isEmpty() ? "" : (", PRIMARY KEY(" + primary_keys + ")");
String unique_constraint = "";
if (!unique_keys.isEmpty()) {
unique_constraint = ", UNIQUE(" + unique_keys + ")";
if (!unique_includes.isEmpty()) {
unique_constraint += " INCLUDE (" + unique_includes + ")";
}
}
unique_constraint += ")";
final String create_sql = "CREATE TABLE " + tablename +
"(k1 int, k2 text, k3 int, v1 int, v2 text" + pk_constraint +
unique_constraint + " PARTITION BY " + mode + " (k1)";
LOG.info("Creating table " + tablename + ", SQL statement: " + create_sql);
stmt.execute(create_sql);
LOG.info("Table creation finished: " + tablename);
}
protected void createPartition(Statement stmt,
String tablename,
YSQLPartitionType mode,
int partIndex) throws SQLException {
createPartition(stmt, tablename, mode, partIndex, "", "", "");
}
protected void createPartition(Statement stmt,
String tablename,
YSQLPartitionType mode,
int partIndex,
String primary_keys) throws SQLException {
createPartition(stmt, tablename, mode, partIndex, primary_keys, "", "");
}
protected void createPartition(Statement stmt,
String tablename,
YSQLPartitionType mode,
int partIndex,
String unique_keys,
String unique_includes) throws SQLException {
createPartition(stmt, tablename, mode, partIndex, "", unique_keys, unique_includes);
}
protected void createPartition(Statement stmt,
String tablename,
YSQLPartitionType mode,
int partIndex,
String primary_keys,
String unique_keys,
String unique_includes) throws SQLException {
String partition_clause = "";
if (mode.equals(YSQLPartitionType.HASH)) {
partition_clause = "WITH (modulus 2, remainder " + (partIndex - 1) + ")";
} else if (mode.equals(YSQLPartitionType.LIST)) {
partition_clause = "IN (" + partIndex + ")";
} else {
partition_clause = "FROM (" + partIndex + ") TO (" + (partIndex + 1) + ")";
}
String pk_constraint = primary_keys.isEmpty() ? "" : (", PRIMARY KEY(" + primary_keys + ")");
String unique_constraint = "";
if (!unique_keys.isEmpty()) {
unique_constraint = ", UNIQUE(" + unique_keys + ")";
if (!unique_includes.isEmpty()) {
unique_constraint += " INCLUDE (" + unique_includes + ")";
}
}
unique_constraint += ")";
final String create_sql = "CREATE TABLE " + tablename + "_" + partIndex +
" PARTITION OF " + tablename + "(k1, k2, k3, v1, v2" + pk_constraint +
unique_constraint + " FOR VALUES " + partition_clause;
LOG.info("Creating table " + tablename + ", SQL statement: " + create_sql);
stmt.execute(create_sql);
LOG.info("Table creation finished: " + tablename);
}
/**
* @param statement The statement used to execute the query.
* @param query The query string.
* @param errorSubstring A (case-insensitive) substring of the expected error message.
*/
protected void runInvalidQuery(Statement statement, String query, String errorSubstring) {
try {
statement.execute(query);
fail(String.format("Statement did not fail: %s", query));
} catch (SQLException e) {
if (StringUtils.containsIgnoreCase(e.getMessage(), errorSubstring)) {
LOG.info("Expected exception", e);
} else {
fail(String.format("Unexpected Error Message. Got: '%s', Expected to contain: '%s'",
e.getMessage(), errorSubstring));
}
}
}
protected void runInvalidSystemQuery(Statement stmt, String query, String errorSubstring)
throws Exception {
systemTableQueryHelper(stmt, () -> {
runInvalidQuery(stmt, query, errorSubstring);
return 0;
});
}
/**
* Verify that a (write) query succeeds with a warning matching the given substring.
* @param statement The statement used to execute the query.
* @param query The query string.
* @param warningSubstring A (case-insensitive) substring of the expected warning message.
*/
protected void verifyStatementWarning(Statement statement,
String query,
String warningSubstring) throws SQLException {
statement.execute(query);
SQLWarning warning = statement.getWarnings();
assertNotEquals("Expected (at least) one warning", null, warning);
assertTrue(String.format("Unexpected Warning Message. Got: '%s', expected to contain : '%s",
warning.getMessage(), warningSubstring),
StringUtils.containsIgnoreCase(warning.getMessage(), warningSubstring));
assertEquals("Expected (at most) one warning", null, warning.getNextWarning());
}
protected String getSimpleTableCreationStatement(
String tableName,
String valueColumnName,
PartitioningMode partitioningMode) {
String firstColumnIndexMode;
if (partitioningMode == PartitioningMode.HASH) {
firstColumnIndexMode = "HASH";
} else {
firstColumnIndexMode = "ASC";
}
return "CREATE TABLE " + tableName + "(h int, r int, " + valueColumnName + " int, " +
"PRIMARY KEY (h " + firstColumnIndexMode + ", r))";
}
protected void createSimpleTable(
String tableName,
String valueColumnName,
PartitioningMode partitioningMode) throws SQLException {
try (Statement statement = connection.createStatement()) {
String sql = getSimpleTableCreationStatement(tableName, valueColumnName, partitioningMode);
LOG.info("Creating table " + tableName + ", SQL statement: " + sql);
statement.execute(sql);
LOG.info("Table creation finished: " + tableName);
}
}
protected void createSimpleTable(String tableName, String valueColumnName) throws SQLException {
createSimpleTable(tableName, valueColumnName, PartitioningMode.HASH);
}
protected List<Row> setupSimpleTable(String tableName) throws SQLException {
List<Row> allRows = new ArrayList<>();
try (Statement statement = connection.createStatement()) {
createSimpleTable(statement, tableName);
String insertTemplate = "INSERT INTO %s(h, r, vi, vs) VALUES (%d, %f, %d, '%s')";
for (int h = 0; h < 10; h++) {
for (int r = 0; r < 10; r++) {
statement.execute(String.format(insertTemplate, tableName,
h, r + 0.5, h * 10 + r, "v" + h + r));
allRows.add(new Row((long) h,
r + 0.5,
h * 10 + r,
"v" + h + r));
}
}
}
// Sort inserted rows and return.
Collections.sort(allRows);
return allRows;
}
@Override
public int getTestMethodTimeoutSec() {
// initdb takes a really long time on macOS in debug mode.
return 1200;
}
void waitForTServerHeartbeat() throws InterruptedException {
// Wait an extra heartbeat interval to avoid race conditions due to deviations
// in the real heartbeat frequency (due to latency, scheduling, etc.).
Thread.sleep(MiniYBCluster.TSERVER_HEARTBEAT_INTERVAL_MS * 2);
}
/** Run a query and check row-count. */
public int runQueryWithRowCount(Statement stmt, String query, int expectedRowCount)
throws Exception {
// Query and check row count.
int rowCount = 0;
try (ResultSet rs = stmt.executeQuery(query)) {
while (rs.next()) {
rowCount++;
}
}
if (expectedRowCount >= 0) {
// Caller wants to assert row-count.
assertEquals(expectedRowCount, rowCount);
} else {
LOG.info(String.format("Exec query: row count = %d", rowCount));
}
return rowCount;
}
/** Run a query and check row-count. */
private void runQueryWithRowCount(PreparedStatement pstmt, int expectedRowCount)
throws Exception {
// Query and check row count.
int rowCount = 0;
try (ResultSet rs = pstmt.executeQuery()) {
while (rs.next()) {
rowCount++;
}
}
if (expectedRowCount >= 0) {
// Caller wants to assert row-count.
assertEquals(expectedRowCount, rowCount);
} else {
LOG.info(String.format("Exec query: row count = %d", rowCount));
}
}
/** Time execution of a query. */
protected long timeQueryWithRowCount(Statement stmt,
String query,
int expectedRowCount,
int numberOfRuns) throws Exception {
LOG.info(String.format("Exec statement: %s", stmt));
// Not timing the first query run as its result is not predictable.
runQueryWithRowCount(stmt, query, expectedRowCount);
// Seek average run-time for a few different run.
final long startTimeMillis = System.currentTimeMillis();
for (int qrun = 0; qrun < numberOfRuns; qrun++) {
runQueryWithRowCount(stmt, query, expectedRowCount);
}
// Check the elapsed time.
long result = System.currentTimeMillis() - startTimeMillis;
LOG.info(String.format("Ran query %d times. Total elapsed time = %d msecs",
numberOfRuns, result));
return result;
}
/** Time execution of a query. */
protected long timeQueryWithRowCount(
PreparedStatement pstmt,
int expectedRowCount,
int numberOfRuns) throws Exception {
LOG.info("Exec prepared statement");
// Not timing the first query run as its result is not predictable.
runQueryWithRowCount(pstmt, expectedRowCount);
// Seek average run-time for a few different run.
final long startTimeMillis = System.currentTimeMillis();
for (int qrun = 0; qrun < numberOfRuns; qrun++) {
runQueryWithRowCount(pstmt, expectedRowCount);
}
// Check the elapsed time.
long result = System.currentTimeMillis() - startTimeMillis;
LOG.info(String.format("Ran statement %d times. Total elapsed time = %d msecs",
numberOfRuns, result));
return result;
}
/** Time execution of a statement. */
protected long timeStatement(String sql, int numberOfRuns)
throws Exception {
LOG.info(String.format("Exec statement: %s", sql));
// Not timing the first query run as its result is not predictable.
try (Statement statement = connection.createStatement()) {
statement.executeUpdate(sql);
}
// Seek average run-time for a few different run.
final long startTimeMillis = System.currentTimeMillis();
try (Statement statement = connection.createStatement()) {
for (int qrun = 0; qrun < numberOfRuns; qrun++) {
statement.executeUpdate(sql);
}
}
// Check the elapsed time.
long result = System.currentTimeMillis() - startTimeMillis;
LOG.info(String.format("Ran statement %d times. Total elapsed time = %d msecs",
numberOfRuns, result));
return result;
}
/** Time execution of a statement. */
protected long timeStatement(PreparedStatement pstmt, int numberOfRuns)
throws Exception {
LOG.info("Exec prepared statement");
// Not timing the first query run as its result is not predictable.
pstmt.executeUpdate();
// Seek average run-time for a few different run.
final long startTimeMillis = System.currentTimeMillis();
for (int qrun = 0; qrun < numberOfRuns; qrun++) {
pstmt.executeUpdate();
}
// Check the elapsed time.
long result = System.currentTimeMillis() - startTimeMillis;
LOG.info(String.format("Ran statement %d times. Total elapsed time = %d msecs",
numberOfRuns, result));
return result;
}
/** Time execution of a query. */
protected long assertQueryRuntimeWithRowCount(
Statement stmt,
String query,
int expectedRowCount,
int numberOfRuns,
long maxTotalMillis) throws Exception {
long elapsedMillis = timeQueryWithRowCount(stmt, query, expectedRowCount, numberOfRuns);
assertTrue(
String.format("Query '%s' took %d ms! Expected %d ms at most", stmt, elapsedMillis,
maxTotalMillis),
elapsedMillis <= maxTotalMillis);
return elapsedMillis;
}
/** Time execution of a query. */
protected long assertQueryRuntimeWithRowCount(
PreparedStatement pstmt,
int expectedRowCount,
int numberOfRuns,
long maxTotalMillis) throws Exception {
long elapsedMillis = timeQueryWithRowCount(pstmt, expectedRowCount, numberOfRuns);
assertTrue(
String.format("Query took %d ms! Expected %d ms at most", elapsedMillis, maxTotalMillis),
elapsedMillis <= maxTotalMillis);
return elapsedMillis;
}
/** Time execution of a statement. */
protected long assertStatementRuntime(
String sql,
int numberOfRuns,
long maxTotalMillis) throws Exception {
long elapsedMillis = timeStatement(sql, numberOfRuns);
assertTrue(
String.format("Statement '%s' took %d ms! Expected %d ms at most", sql, elapsedMillis,
maxTotalMillis),
elapsedMillis <= maxTotalMillis);
return elapsedMillis;
}
/** Time execution of a statement. */
protected long assertStatementRuntime(
PreparedStatement pstmt,
int numberOfRuns,
long maxTotalMillis) throws Exception {
long elapsedMillis = timeStatement(pstmt, numberOfRuns);
assertTrue(
String.format("Statement took %d ms! Expected %d ms at most", elapsedMillis,
maxTotalMillis),
elapsedMillis <= maxTotalMillis);
return elapsedMillis;
}
/** UUID of the first table with specified name. **/
private String getTableUUID(String tableName) throws Exception {
for (MasterDdlOuterClass.ListTablesResponsePB.TableInfo table :
miniCluster.getClient().getTablesList().getTableInfoList()) {
if (table.getName().equals(tableName)) {
return table.getId().toStringUtf8();
}
}
throw new Exception(String.format("YSQL table ''%s' not found", tableName));
}
protected RocksDBMetrics getRocksDBMetric(String tableName) throws Exception {
return getRocksDBMetricByTableUUID(getTableUUID(tableName));
}
protected int getTableCounterMetric(String tableName,
String metricName) throws Exception {
return getTableCounterMetricByTableUUID(getTableUUID(tableName), metricName);
}
protected String getExplainAnalyzeOutput(Statement stmt, String query) throws Exception {
try (ResultSet rs = stmt.executeQuery("EXPLAIN ANALYZE " + query)) {
StringBuilder sb = new StringBuilder();
while (rs.next()) {
sb.append(rs.getString(1)).append("\n");
}
return sb.toString().trim();
}
}
protected int spawnTServerWithFlags(Map<String, String> additionalFlags) throws Exception {
Map<String, String> tserverFlags = getTServerFlags();
tserverFlags.putAll(additionalFlags);
int tserver = miniCluster.getNumTServers();
miniCluster.startTServer(tserverFlags);
return tserver;
}
/**
* Simple helper for {@link #spawnTServerWithFlags(Map)}.
* <p>
* Please use {@code ImmutableMap.of} for more arguments!
*/
protected int spawnTServerWithFlags(
String additionalFlagKey, String additionalFlagValue) throws Exception {
return spawnTServerWithFlags(ImmutableMap.of(additionalFlagKey, additionalFlagValue));
}
/** Run a process, returning output lines. */
protected List<String> runProcess(String... args) throws Exception {
return runProcess(new ProcessBuilder(args));
}
/** Run a process, returning output lines. */
protected List<String> runProcess(ProcessBuilder procBuilder) throws Exception {
Process proc = procBuilder.start();
int code = proc.waitFor();
if (code != 0) {
String err = IOUtils.toString(proc.getErrorStream(), StandardCharsets.UTF_8);
fail("Process exited with code " + code + ", message: <" + err.trim() + ">");
}
String output = IOUtils.toString(proc.getInputStream(), StandardCharsets.UTF_8);
return Arrays.asList(output.split("\n"));
}
protected HostAndPort getMasterLeaderAddress() {
return miniCluster.getClient().getLeaderMasterHostAndPort();
}
protected void setServerFlag(HostAndPort server, String flag, String value) throws Exception {
runProcess(TestUtils.findBinary("yb-ts-cli"),
"--server_address",
server.toString(),
"set_flag",
"-force",
flag,
value);
}
public static class ConnectionBuilder implements Cloneable {
private static final int MAX_CONNECTION_ATTEMPTS = 15;
private static final int INITIAL_CONNECTION_DELAY_MS = 500;
private final MiniYBCluster miniCluster;
private boolean loadBalance;
private int tserverIndex = 0;
private String database = DEFAULT_PG_DATABASE;
private String user = TEST_PG_USER;
private String password = null;
private String preferQueryMode = null;
private String sslmode = null;
private String sslcert = null;
private String sslkey = null;
private String sslrootcert = null;
private IsolationLevel isolationLevel = IsolationLevel.DEFAULT;
private AutoCommit autoCommit = AutoCommit.DEFAULT;
ConnectionBuilder(MiniYBCluster miniCluster) {
this.miniCluster = checkNotNull(miniCluster);
}
ConnectionBuilder withTServer(int tserverIndex) {
ConnectionBuilder copy = clone();
copy.tserverIndex = tserverIndex;
return copy;
}
ConnectionBuilder withDatabase(String database) {
ConnectionBuilder copy = clone();
copy.database = database;
return copy;
}
ConnectionBuilder withUser(String user) {
ConnectionBuilder copy = clone();
copy.user = user;
return copy;
}
ConnectionBuilder withPassword(String password) {
ConnectionBuilder copy = clone();
copy.password = password;
return copy;
}
ConnectionBuilder withIsolationLevel(IsolationLevel isolationLevel) {
ConnectionBuilder copy = clone();
copy.isolationLevel = isolationLevel;
return copy;
}
ConnectionBuilder withAutoCommit(AutoCommit autoCommit) {
ConnectionBuilder copy = clone();
copy.autoCommit = autoCommit;
return copy;
}
ConnectionBuilder withPreferQueryMode(String preferQueryMode) {
ConnectionBuilder copy = clone();
copy.preferQueryMode = preferQueryMode;
return copy;
}
ConnectionBuilder withSslMode(String sslmode) {
ConnectionBuilder copy = clone();
copy.sslmode = sslmode;
return copy;
}
ConnectionBuilder withSslCert(String sslcert) {
ConnectionBuilder copy = clone();
copy.sslcert = sslcert;
return copy;
}
ConnectionBuilder withSslKey(String sslkey) {
ConnectionBuilder copy = clone();
copy.sslkey = sslkey;
return copy;
}
ConnectionBuilder withSslRootCert(String sslrootcert) {
ConnectionBuilder copy = clone();
copy.sslrootcert = sslrootcert;
return copy;
}
@Override
protected ConnectionBuilder clone() {
try {
return (ConnectionBuilder) super.clone();
} catch (CloneNotSupportedException ex) {
throw new RuntimeException("This can't happen, but to keep compiler happy", ex);
}
}
Connection connect() throws Exception {
final InetSocketAddress postgresAddress = miniCluster.getPostgresContactPoints()
.get(tserverIndex);
String url = String.format(
"jdbc:yugabytedb://%s:%d/%s",
postgresAddress.getHostName(),
postgresAddress.getPort(),
database
);
Properties props = new Properties();
props.setProperty("user", user);
if (password != null) {
props.setProperty("password", password);
}
if (preferQueryMode != null) {
props.setProperty("preferQueryMode", preferQueryMode);
}
if (sslmode != null) {
props.setProperty("sslmode", sslmode);
}
if (sslcert != null) {
props.setProperty("sslcert", sslcert);
}
if (sslkey != null) {
props.setProperty("sslkey", sslkey);
}
if (sslrootcert != null) {
props.setProperty("sslrootcert", sslrootcert);
}
if (EnvAndSysPropertyUtil.isEnvVarOrSystemPropertyTrue("YB_PG_JDBC_TRACE_LOGGING")) {
props.setProperty("loggerLevel", "TRACE");
}
boolean loadBalance = getLoadBalance();
String lbValue = loadBalance ? "true" : "false";
props.setProperty("load-balance", lbValue);
int delayMs = INITIAL_CONNECTION_DELAY_MS;
for (int attempt = 1; attempt <= MAX_CONNECTION_ATTEMPTS; ++attempt) {
Connection connection = null;
try {
connection = checkNotNull(DriverManager.getConnection(url, props));
if (isolationLevel != null) {
connection.setTransactionIsolation(isolationLevel.pgIsolationLevel);
}
if (autoCommit != null) {
connection.setAutoCommit(autoCommit.enabled);
}
return connection;
} catch (SQLException sqlEx) {
// Close the connection now if we opened it, instead of waiting until the end of the test.
if (connection != null) {
try {
connection.close();
} catch (SQLException closingError) {
LOG.error("Failure to close connection during failure cleanup before a retry:",
closingError);
LOG.error("When handling this exception when opening/setting up connection:", sqlEx);
}
}
boolean retry = false;
if (attempt < MAX_CONNECTION_ATTEMPTS) {
if (sqlEx.getMessage().contains("FATAL: the database system is starting up")
|| sqlEx.getMessage().contains("refused. Check that the hostname and port are " +
"correct and that the postmaster is accepting")) {
retry = true;
LOG.info("Postgres is still starting up, waiting for " + delayMs + " ms. " +
"Got message: " + sqlEx.getMessage());
} else if (sqlEx.getMessage().contains("the database system is in recovery mode")) {
retry = true;
LOG.info("Postgres is in recovery mode, waiting for " + delayMs + " ms. " +
"Got message: " + sqlEx.getMessage());
}
}
if (retry) {
Thread.sleep(delayMs);
delayMs = Math.min(delayMs + 500, 10000);
} else {
LOG.error("Exception while trying to create connection (after " + attempt +
" attempts): " + sqlEx.getMessage());
throw sqlEx;
}
}
}
throw new IllegalStateException("Should not be able to reach here");
}
public boolean getLoadBalance() {
return loadBalance;
}
public void setLoadBalance(boolean lb) {
loadBalance = lb;
}
}
}
| [] | [] | [] | [] | [] | java | 0 | 0 | |
tests/integration/test_keycloak.py | # Copyright (C) 2020 Extreme Networks, Inc - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest2
from st2auth_oidc_backend import oidc_backend
class KeycloakAuthenticationTest(unittest2.TestCase):
@classmethod
def setUpClass(cls):
super(KeycloakAuthenticationTest, cls).setUpClass()
cls.base_url = os.environ.get('ST2_OIDC_URL', 'https://127.0.0.1:8443')
cls.realm = os.environ.get('ST2_OIDC_REALM', 'Test')
cls.client_name = os.environ.get('ST2_OIDC_CLIENT_NAME', 'spring-boot')
cls.client_secret = os.environ.get('ST2_OIDC_CLIENT_SECRET', '45892b6')
cls.verify_ssl = False
def test_authenticate(self):
username = 'developer'
passwd = 'developer_pass'
backend = oidc_backend.OIDCAuthenticationBackend(self.base_url, self.realm, self.client_name,
self.client_secret, verify_ssl=self.verify_ssl)
authenticated = backend.authenticate(username, passwd)
self.assertTrue(authenticated)
def test_user(self):
username = 'developer'
backend = oidc_backend.OIDCAuthenticationBackend(self.base_url, self.realm, self.client_name,
self.client_secret, verify_ssl=self.verify_ssl)
user = backend.get_user(username)
self.assertIsNotNone(user)
self.assertIn('id', user)
self.assertIn('username', user)
self.assertIn('firstName', user)
self.assertIn('lastName', user)
def test_groups_client_roles(self):
username = 'developer'
roles = ['st2-read', 'st2-execute']
backend = oidc_backend.OIDCAuthenticationBackend(self.base_url, self.realm, self.client_name,
self.client_secret, verify_ssl=self.verify_ssl)
groups = backend.get_user_groups(username)
self.assertIsNotNone(groups)
for role in roles:
self.assertIn(role, groups)
def test_groups_realm_roles(self):
username = 'developer'
roles = ['St2-developers']
backend = oidc_backend.OIDCAuthenticationBackend(self.base_url, self.realm, self.client_name,
self.client_secret, use_client_roles=False,
verify_ssl=self.verify_ssl)
groups = backend.get_user_groups(username)
self.assertIsNotNone(groups)
for role in roles:
self.assertIn(role, groups)
| [] | [] | [
"ST2_OIDC_REALM",
"ST2_OIDC_CLIENT_NAME",
"ST2_OIDC_CLIENT_SECRET",
"ST2_OIDC_URL"
] | [] | ["ST2_OIDC_REALM", "ST2_OIDC_CLIENT_NAME", "ST2_OIDC_CLIENT_SECRET", "ST2_OIDC_URL"] | python | 4 | 0 | |
pkg/crd/client.go | /*
Copyright 2021 The Fission Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package crd
import (
"errors"
"fmt"
"os"
"path"
"time"
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
genClientset "github.com/fnlize/fnlize/pkg/apis/genclient/clientset/versioned"
)
type (
FissionClient struct {
genClientset.Interface
}
)
// GetKubeconfig Get kube config file path from environment var $KUBECONFIG,
// or get $HOME/.kube/config
func GetKubeconfig() (kubeconfig string, err error) {
kubeconfig = os.Getenv("KUBECONFIG")
if len(kubeconfig) != 0 {
return
}
var homeDir string
if homeDir, err = os.UserHomeDir(); err != nil {
err = fmt.Errorf("cannot get kube config file")
return
}
var fileInfo os.FileInfo
if fileInfo, err = os.Stat(path.Join(homeDir, ".kube", "config")); err != nil {
err = fmt.Errorf("cannot get kube config file")
return
}
if fileInfo.IsDir() {
err = fmt.Errorf("cannot get kube config file")
return
}
kubeconfig = path.Join(homeDir, ".kube", "config")
return
}
// GetClientset Get a kubernetes client using the kubeconfig file at the
// environment var $KUBECONFIG, or an in-cluster config if that's undefined.
func GetClientset() (config *rest.Config, err error) {
var kubeConfig string
if kubeConfig, err = GetKubeconfig(); err != nil {
fmt.Printf("get kube config file with error: %v", err)
err = nil // clean errors
if config, err = rest.InClusterConfig(); err != nil {
return
}
} else {
if config, err = clientcmd.BuildConfigFromFlags("", kubeConfig); err != nil {
return
}
}
return
}
// GetKubernetesClient Get a kubernetes client using the kubeconfig file at the
// environment var $KUBECONFIG, or an in-cluster config if that's undefined.
func GetKubernetesClient() (*rest.Config, *kubernetes.Clientset, *apiextensionsclient.Clientset, error) {
var config *rest.Config
var err error
if config, err = GetClientset(); err != nil {
return nil, nil, nil, err
}
// creates the client set
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, nil, nil, err
}
apiExtClientset, err := apiextensionsclient.NewForConfig(config)
if err != nil {
return nil, nil, nil, err
}
return config, clientset, apiExtClientset, nil
}
func MakeFissionClient() (*FissionClient, *kubernetes.Clientset, *apiextensionsclient.Clientset, error) {
config, kubeClient, apiExtClient, err := GetKubernetesClient()
if err != nil {
return nil, nil, nil, err
}
// make a CRD REST client with the config
crdClient, err := genClientset.NewForConfig(config)
if err != nil {
return nil, nil, nil, err
}
fc := &FissionClient{
Interface: crdClient,
}
return fc, kubeClient, apiExtClient, nil
}
func (fc *FissionClient) WaitForCRDs() error {
start := time.Now()
for {
fi := fc.CoreV1().Functions(metav1.NamespaceDefault)
_, err := fi.List(metav1.ListOptions{})
if err != nil {
time.Sleep(100 * time.Millisecond)
} else {
return nil
}
if time.Since(start) > 30*time.Second {
return errors.New("timeout waiting for CRDs")
}
}
}
// GetDynamicClient creates and returns new dynamic client or returns an error
func GetDynamicClient() (dynamic.Interface, error) {
var config *rest.Config
var err error
if config, err = GetClientset(); err != nil {
return nil, err
}
dynamicClient, err := dynamic.NewForConfig(config)
if err != nil {
return nil, err
}
return dynamicClient, nil
}
| [
"\"KUBECONFIG\""
] | [] | [
"KUBECONFIG"
] | [] | ["KUBECONFIG"] | go | 1 | 0 | |
freeport.go | package freeport
import (
"errors"
"fmt"
"net"
"os"
"strconv"
)
type Options struct {
Address string
Min int
Max int
}
// MakeOptions makes options to get non-privileged port [1024:49151]. It will use
// env var PORT, if set, as min value.
func MakeOptions(isAllInterfaces bool) (*Options, error) {
min := 0
s := os.Getenv("PORT")
if s != "" {
min, _ = strconv.Atoi(s)
}
if min == 0 {
min = 1024
}
address := "127.0.0.1"
if isAllInterfaces {
address = ""
}
// limit to non-privileged ports, sys apps should not be using FreePort
return &Options{Address: address, Min: min, Max: 49151}, nil
}
var ErrPortNotFound = errors.New("port not found")
// GetFreePortEx asks the kernel for a free open port that is ready to use. If options is nil, then
// default options are used.
func GetFreePortEx(options *Options) (int, error) {
var err error
if options == nil {
options, err = MakeOptions(false)
if err != nil {
return 0, err
}
}
for port := options.Min; port <= options.Max; port++ {
pingAddr := fmt.Sprintf("%s:%d", options.Address, port)
addr, err := net.ResolveTCPAddr("tcp", pingAddr)
if err != nil {
continue
}
l, err := net.ListenTCP("tcp", addr)
if err != nil {
continue
}
defer l.Close()
return l.Addr().(*net.TCPAddr).Port, nil
}
return 0, ErrPortNotFound
}
// GetFreePort gets non-privileged open port that is ready to use.
func GetFreePort() (int, error) {
return GetFreePortEx(nil)
}
// MustGetFreePort calls GetFreePort and panics on error
func MustGetFreePort() int {
port, err := GetFreePortEx(nil)
if err != nil {
panic(err)
}
return port
}
// GetFreePorts gets an array of non-privileged open ports that are ready to use.
func GetFreePorts(count int) ([]int, error) {
ports := make([]int, count)
options, err := MakeOptions(false)
if err != nil {
return nil, err
}
for i := 0; i < count; i++ {
port, err := GetFreePortEx(options)
if err != nil && err != ErrPortNotFound {
return nil, err
}
ports[i] = port
options.Min = port + 1
}
return ports, nil
}
| [
"\"PORT\""
] | [] | [
"PORT"
] | [] | ["PORT"] | go | 1 | 0 | |
cmd/gateway/gcs/gateway-gcs.go | /*
* MinIO Cloud Storage, (C) 2017-2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gcs
import (
"context"
"encoding/base64"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"net/http"
"os"
"path"
"strconv"
"regexp"
"strings"
"time"
"cloud.google.com/go/storage"
humanize "github.com/dustin/go-humanize"
"github.com/minio/cli"
miniogopolicy "github.com/minio/minio-go/v7/pkg/policy"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/bucket/policy/condition"
"github.com/minio/minio/pkg/env"
"google.golang.org/api/googleapi"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
minio "github.com/minio/minio/cmd"
)
var (
// Project ID format is not valid.
errGCSInvalidProjectID = fmt.Errorf("GCS project id is either empty or invalid")
// Project ID not found
errGCSProjectIDNotFound = fmt.Errorf("Unknown project id")
// Invalid format.
errGCSFormat = fmt.Errorf("Unknown format")
)
const (
// Path where multipart objects are saved.
// If we change the backend format we will use a different url path like /multipart/v2
// but we will not migrate old data.
gcsMinioMultipartPathV1 = minio.GatewayMinioSysTmp + "multipart/v1"
// Multipart meta file.
gcsMinioMultipartMeta = "gcs.json"
// gcs.json version number
gcsMinioMultipartMetaCurrentVersion = "1"
// token prefixed with GCS returned marker to differentiate
// from user supplied marker.
gcsTokenPrefix = "{minio}"
// Maximum component object count to create a composite object.
// Refer https://cloud.google.com/storage/docs/composite-objects
gcsMaxComponents = 32
// Every 24 hours we scan minio.sys.tmp to delete expired multiparts in minio.sys.tmp
gcsCleanupInterval = time.Hour * 24
// The cleanup routine deletes files older than 2 weeks in minio.sys.tmp
gcsMultipartExpiry = time.Hour * 24 * 14
// Project ID key in credentials.json
gcsProjectIDKey = "project_id"
)
func init() {
const gcsGatewayTemplate = `NAME:
{{.HelpName}} - {{.Usage}}
USAGE:
{{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} [PROJECTID]
{{if .VisibleFlags}}
FLAGS:
{{range .VisibleFlags}}{{.}}
{{end}}{{end}}
PROJECTID:
optional GCS project-id expected GOOGLE_APPLICATION_CREDENTIALS env is not set
GOOGLE_APPLICATION_CREDENTIALS:
path to credentials.json, generated it from here https://developers.google.com/identity/protocols/application-default-credentials
EXAMPLES:
1. Start minio gateway server for GCS backend
{{.Prompt}} {{.EnvVarSetCommand}} GOOGLE_APPLICATION_CREDENTIALS{{.AssignmentOperator}}/path/to/credentials.json
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}accesskey
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}secretkey
{{.Prompt}} {{.HelpName}} mygcsprojectid
2. Start minio gateway server for GCS backend with edge caching enabled
{{.Prompt}} {{.EnvVarSetCommand}} GOOGLE_APPLICATION_CREDENTIALS{{.AssignmentOperator}}/path/to/credentials.json
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}accesskey
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}secretkey
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_DRIVES{{.AssignmentOperator}}"/mnt/drive1,/mnt/drive2,/mnt/drive3,/mnt/drive4"
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXCLUDE{{.AssignmentOperator}}"bucket1/*;*.png"
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_AFTER{{.AssignmentOperator}}3
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_LOW{{.AssignmentOperator}}75
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_HIGH{{.AssignmentOperator}}85
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_QUOTA{{.AssignmentOperator}}90
{{.Prompt}} {{.HelpName}} mygcsprojectid
`
minio.RegisterGatewayCommand(cli.Command{
Name: minio.GCSBackendGateway,
Usage: "Google Cloud Storage",
Action: gcsGatewayMain,
CustomHelpTemplate: gcsGatewayTemplate,
HideHelpCommand: true,
})
}
// Handler for 'minio gateway gcs' command line.
func gcsGatewayMain(ctx *cli.Context) {
projectID := ctx.Args().First()
if projectID == "" && os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") == "" {
logger.LogIf(minio.GlobalContext, errGCSProjectIDNotFound, logger.Application)
cli.ShowCommandHelpAndExit(ctx, minio.GCSBackendGateway, 1)
}
if projectID != "" && !isValidGCSProjectIDFormat(projectID) {
reqInfo := (&logger.ReqInfo{}).AppendTags("projectID", ctx.Args().First())
contxt := logger.SetReqInfo(minio.GlobalContext, reqInfo)
logger.LogIf(contxt, errGCSInvalidProjectID, logger.Application)
cli.ShowCommandHelpAndExit(ctx, minio.GCSBackendGateway, 1)
}
minio.StartGateway(ctx, &GCS{projectID})
}
// GCS implements Azure.
type GCS struct {
projectID string
}
// Name returns the name of gcs ObjectLayer.
func (g *GCS) Name() string {
return minio.GCSBackendGateway
}
// NewGatewayLayer returns gcs ObjectLayer.
func (g *GCS) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, error) {
ctx := minio.GlobalContext
var err error
if g.projectID == "" {
// If project ID is not provided on command line, we figure it out
// from the credentials.json file.
g.projectID, err = gcsParseProjectID(env.Get("GOOGLE_APPLICATION_CREDENTIALS", ""))
if err != nil {
return nil, err
}
}
metrics := minio.NewMetrics()
t := &minio.MetricsTransport{
Transport: minio.NewGatewayHTTPTransport(),
Metrics: metrics,
}
// Initialize a GCS client.
// Send user-agent in this format for Google to obtain usage insights while participating in the
// Google Cloud Technology Partners (https://cloud.google.com/partners/)
client, err := storage.NewClient(ctx, option.WithUserAgent(fmt.Sprintf("MinIO/%s (GPN:MinIO;)", minio.Version)))
if err != nil {
return nil, err
}
gcs := &gcsGateway{
client: client,
projectID: g.projectID,
metrics: metrics,
httpClient: &http.Client{
Transport: t,
},
}
// Start background process to cleanup old files in minio.sys.tmp
go gcs.CleanupGCSMinioSysTmp(ctx)
return gcs, nil
}
// Production - GCS gateway is production ready.
func (g *GCS) Production() bool {
return true
}
// Stored in gcs.json - Contents of this file is not used anywhere. It can be
// used for debugging purposes.
type gcsMultipartMetaV1 struct {
Version string `json:"version"` // Version number
Bucket string `json:"bucket"` // Bucket name
Object string `json:"object"` // Object name
}
// Returns name of the multipart meta object.
func gcsMultipartMetaName(uploadID string) string {
return fmt.Sprintf("%s/%s/%s", gcsMinioMultipartPathV1, uploadID, gcsMinioMultipartMeta)
}
// Returns name of the part object.
func gcsMultipartDataName(uploadID string, partNumber int, etag string) string {
return fmt.Sprintf("%s/%s/%05d.%s", gcsMinioMultipartPathV1, uploadID, partNumber, etag)
}
// Convert MinIO errors to minio object layer errors.
func gcsToObjectError(err error, params ...string) error {
if err == nil {
return nil
}
bucket := ""
object := ""
uploadID := ""
if len(params) >= 1 {
bucket = params[0]
}
if len(params) == 2 {
object = params[1]
}
if len(params) == 3 {
uploadID = params[2]
}
// in some cases just a plain error is being returned
switch err.Error() {
case "storage: bucket doesn't exist":
err = minio.BucketNotFound{
Bucket: bucket,
}
return err
case "storage: object doesn't exist":
if uploadID != "" {
err = minio.InvalidUploadID{
UploadID: uploadID,
}
} else {
err = minio.ObjectNotFound{
Bucket: bucket,
Object: object,
}
}
return err
}
googleAPIErr, ok := err.(*googleapi.Error)
if !ok {
// We don't interpret non MinIO errors. As minio errors will
// have StatusCode to help to convert to object errors.
return err
}
if len(googleAPIErr.Errors) == 0 {
return err
}
reason := googleAPIErr.Errors[0].Reason
message := googleAPIErr.Errors[0].Message
switch reason {
case "required":
// Anonymous users does not have storage.xyz access to project 123.
fallthrough
case "keyInvalid":
fallthrough
case "forbidden":
err = minio.PrefixAccessDenied{
Bucket: bucket,
Object: object,
}
case "invalid":
err = minio.BucketNameInvalid{
Bucket: bucket,
}
case "notFound":
if object != "" {
err = minio.ObjectNotFound{
Bucket: bucket,
Object: object,
}
break
}
err = minio.BucketNotFound{Bucket: bucket}
case "conflict":
if message == "You already own this bucket. Please select another name." {
err = minio.BucketAlreadyOwnedByYou{Bucket: bucket}
break
}
if message == "Sorry, that name is not available. Please try a different one." {
err = minio.BucketAlreadyExists{Bucket: bucket}
break
}
err = minio.BucketNotEmpty{Bucket: bucket}
}
return err
}
// gcsProjectIDRegex defines a valid gcs project id format
var gcsProjectIDRegex = regexp.MustCompile("^[a-z][a-z0-9-]{5,29}$")
// isValidGCSProjectIDFormat - checks if a given project id format is valid or not.
// Project IDs must start with a lowercase letter and can have lowercase ASCII letters,
// digits or hyphens. Project IDs must be between 6 and 30 characters.
// Ref: https://cloud.google.com/resource-manager/reference/rest/v1/projects#Project (projectId section)
func isValidGCSProjectIDFormat(projectID string) bool {
// Checking projectID format
return gcsProjectIDRegex.MatchString(projectID)
}
// gcsGateway - Implements gateway for MinIO and GCS compatible object storage servers.
type gcsGateway struct {
minio.GatewayUnsupported
client *storage.Client
httpClient *http.Client
metrics *minio.Metrics
projectID string
}
// Returns projectID from the GOOGLE_APPLICATION_CREDENTIALS file.
func gcsParseProjectID(credsFile string) (projectID string, err error) {
contents, err := ioutil.ReadFile(credsFile)
if err != nil {
return projectID, err
}
googleCreds := make(map[string]string)
if err = json.Unmarshal(contents, &googleCreds); err != nil {
return projectID, err
}
return googleCreds[gcsProjectIDKey], err
}
// GetMetrics returns this gateway's metrics
func (l *gcsGateway) GetMetrics(ctx context.Context) (*minio.Metrics, error) {
return l.metrics, nil
}
// Cleanup old files in minio.sys.tmp of the given bucket.
func (l *gcsGateway) CleanupGCSMinioSysTmpBucket(ctx context.Context, bucket string) {
it := l.client.Bucket(bucket).Objects(ctx, &storage.Query{Prefix: minio.GatewayMinioSysTmp, Versions: false})
for {
attrs, err := it.Next()
if err != nil {
if err != iterator.Done {
reqInfo := &logger.ReqInfo{BucketName: bucket}
ctx := logger.SetReqInfo(minio.GlobalContext, reqInfo)
logger.LogIf(ctx, err)
}
return
}
if time.Since(attrs.Updated) > gcsMultipartExpiry {
// Delete files older than 2 weeks.
err := l.client.Bucket(bucket).Object(attrs.Name).Delete(ctx)
if err != nil {
reqInfo := &logger.ReqInfo{BucketName: bucket, ObjectName: attrs.Name}
ctx := logger.SetReqInfo(minio.GlobalContext, reqInfo)
logger.LogIf(ctx, err)
return
}
}
}
}
// Cleanup old files in minio.sys.tmp of all buckets.
func (l *gcsGateway) CleanupGCSMinioSysTmp(ctx context.Context) {
for {
it := l.client.Buckets(ctx, l.projectID)
for {
attrs, err := it.Next()
if err != nil {
break
}
l.CleanupGCSMinioSysTmpBucket(ctx, attrs.Name)
}
// Run the cleanup loop every 1 day.
time.Sleep(gcsCleanupInterval)
}
}
// Shutdown - save any gateway metadata to disk
// if necessary and reload upon next restart.
func (l *gcsGateway) Shutdown(ctx context.Context) error {
return nil
}
// StorageInfo - Not relevant to GCS backend.
func (l *gcsGateway) StorageInfo(ctx context.Context, _ bool) (si minio.StorageInfo, _ []error) {
si.Backend.Type = minio.BackendGateway
si.Backend.GatewayOnline = minio.IsBackendOnline(ctx, l.httpClient, "https://storage.googleapis.com")
return si, nil
}
// MakeBucketWithLocation - Create a new container on GCS backend.
func (l *gcsGateway) MakeBucketWithLocation(ctx context.Context, bucket string, opts minio.BucketOptions) error {
if opts.LockEnabled || opts.VersioningEnabled {
return minio.NotImplemented{}
}
bkt := l.client.Bucket(bucket)
// we'll default to the us multi-region in case of us-east-1
location := opts.Location
if location == "us-east-1" {
location = "us"
}
err := bkt.Create(ctx, l.projectID, &storage.BucketAttrs{
Location: location,
})
logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket)
}
// GetBucketInfo - Get bucket metadata..
func (l *gcsGateway) GetBucketInfo(ctx context.Context, bucket string) (minio.BucketInfo, error) {
attrs, err := l.client.Bucket(bucket).Attrs(ctx)
if err != nil {
logger.LogIf(ctx, err)
return minio.BucketInfo{}, gcsToObjectError(err, bucket)
}
return minio.BucketInfo{
Name: attrs.Name,
Created: attrs.Created,
}, nil
}
// ListBuckets lists all buckets under your project-id on GCS.
func (l *gcsGateway) ListBuckets(ctx context.Context) (buckets []minio.BucketInfo, err error) {
it := l.client.Buckets(ctx, l.projectID)
// Iterate and capture all the buckets.
for {
attrs, ierr := it.Next()
if ierr == iterator.Done {
break
}
if ierr != nil {
return buckets, gcsToObjectError(ierr)
}
buckets = append(buckets, minio.BucketInfo{
Name: attrs.Name,
Created: attrs.Created,
})
}
return buckets, nil
}
// DeleteBucket delete a bucket on GCS.
func (l *gcsGateway) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error {
itObject := l.client.Bucket(bucket).Objects(ctx, &storage.Query{
Delimiter: minio.SlashSeparator,
Versions: false,
})
// We list the bucket and if we find any objects we return BucketNotEmpty error. If we
// find only "minio.sys.tmp/" then we remove it before deleting the bucket.
gcsMinioPathFound := false
nonGCSMinioPathFound := false
for {
objAttrs, err := itObject.Next()
if err == iterator.Done {
break
}
if err != nil {
logger.LogIf(ctx, err)
return gcsToObjectError(err)
}
if objAttrs.Prefix == minio.GatewayMinioSysTmp {
gcsMinioPathFound = true
continue
}
nonGCSMinioPathFound = true
break
}
if nonGCSMinioPathFound {
logger.LogIf(ctx, minio.BucketNotEmpty{})
return gcsToObjectError(minio.BucketNotEmpty{})
}
if gcsMinioPathFound {
// Remove minio.sys.tmp before deleting the bucket.
itObject = l.client.Bucket(bucket).Objects(ctx, &storage.Query{Versions: false, Prefix: minio.GatewayMinioSysTmp})
for {
objAttrs, err := itObject.Next()
if err == iterator.Done {
break
}
if err != nil {
logger.LogIf(ctx, err)
return gcsToObjectError(err)
}
err = l.client.Bucket(bucket).Object(objAttrs.Name).Delete(ctx)
if err != nil {
logger.LogIf(ctx, err)
return gcsToObjectError(err)
}
}
}
err := l.client.Bucket(bucket).Delete(ctx)
logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket)
}
func toGCSPageToken(name string) string {
length := uint16(len(name))
b := []byte{
0xa,
byte(length & 0xFF),
}
length = length >> 7
if length > 0 {
b = append(b, byte(length&0xFF))
}
b = append(b, []byte(name)...)
return base64.StdEncoding.EncodeToString(b)
}
// Returns true if marker was returned by GCS, i.e prefixed with
// ##minio by minio gcs minio.
func isGCSMarker(marker string) bool {
return strings.HasPrefix(marker, gcsTokenPrefix)
}
// ListObjects - lists all blobs in GCS bucket filtered by prefix
func (l *gcsGateway) ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (minio.ListObjectsInfo, error) {
if maxKeys == 0 {
return minio.ListObjectsInfo{}, nil
}
it := l.client.Bucket(bucket).Objects(ctx, &storage.Query{
Delimiter: delimiter,
Prefix: prefix,
Versions: false,
})
// To accommodate S3-compatible applications using
// ListObjectsV1 to use object keys as markers to control the
// listing of objects, we use the following encoding scheme to
// distinguish between GCS continuation tokens and application
// supplied markers.
//
// - NextMarker in ListObjectsV1 response is constructed by
// prefixing "{minio}" to the GCS continuation token,
// e.g, "{minio}CgRvYmoz"
//
// - Application supplied markers are transformed to a
// GCS continuation token.
// If application is using GCS continuation token we should
// strip the gcsTokenPrefix we added.
token := ""
if marker != "" {
if isGCSMarker(marker) {
token = strings.TrimPrefix(marker, gcsTokenPrefix)
} else {
token = toGCSPageToken(marker)
}
}
nextMarker := ""
var prefixes []string
var objects []minio.ObjectInfo
var nextPageToken string
var err error
pager := iterator.NewPager(it, maxKeys, token)
for {
gcsObjects := make([]*storage.ObjectAttrs, 0)
nextPageToken, err = pager.NextPage(&gcsObjects)
if err != nil {
logger.LogIf(ctx, err)
return minio.ListObjectsInfo{}, gcsToObjectError(err, bucket, prefix)
}
for _, attrs := range gcsObjects {
// Due to minio.GatewayMinioSysTmp keys being skipped, the number of objects + prefixes
// returned may not total maxKeys. This behavior is compatible with the S3 spec which
// allows the response to include less keys than maxKeys.
if attrs.Prefix == minio.GatewayMinioSysTmp {
// We don't return our metadata prefix.
continue
}
if !strings.HasPrefix(prefix, minio.GatewayMinioSysTmp) {
// If client lists outside gcsMinioPath then we filter out gcsMinioPath/* entries.
// But if the client lists inside gcsMinioPath then we return the entries in gcsMinioPath/
// which will be helpful to observe the "directory structure" for debugging purposes.
if strings.HasPrefix(attrs.Prefix, minio.GatewayMinioSysTmp) ||
strings.HasPrefix(attrs.Name, minio.GatewayMinioSysTmp) {
continue
}
}
if attrs.Prefix != "" {
prefixes = append(prefixes, attrs.Prefix)
} else {
objects = append(objects, fromGCSAttrsToObjectInfo(attrs))
}
// The NextMarker property should only be set in the response if a delimiter is used
if delimiter != "" {
if attrs.Prefix > nextMarker {
nextMarker = attrs.Prefix
} else if attrs.Name > nextMarker {
nextMarker = attrs.Name
}
}
}
// Exit the loop if at least one item can be returned from
// the current page or there are no more pages available
if nextPageToken == "" || len(prefixes)+len(objects) > 0 {
break
}
}
if nextPageToken == "" {
nextMarker = ""
} else if nextMarker != "" {
nextMarker = gcsTokenPrefix + toGCSPageToken(nextMarker)
}
return minio.ListObjectsInfo{
IsTruncated: nextPageToken != "",
NextMarker: nextMarker,
Prefixes: prefixes,
Objects: objects,
}, nil
}
// ListObjectsV2 - lists all blobs in GCS bucket filtered by prefix
func (l *gcsGateway) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (minio.ListObjectsV2Info, error) {
if maxKeys == 0 {
return minio.ListObjectsV2Info{ContinuationToken: continuationToken}, nil
}
it := l.client.Bucket(bucket).Objects(ctx, &storage.Query{
Delimiter: delimiter,
Prefix: prefix,
Versions: false,
})
token := continuationToken
if token == "" && startAfter != "" {
token = toGCSPageToken(startAfter)
}
var prefixes []string
var objects []minio.ObjectInfo
var nextPageToken string
var err error
pager := iterator.NewPager(it, maxKeys, token)
for {
gcsObjects := make([]*storage.ObjectAttrs, 0)
nextPageToken, err = pager.NextPage(&gcsObjects)
if err != nil {
logger.LogIf(ctx, err)
return minio.ListObjectsV2Info{}, gcsToObjectError(err, bucket, prefix)
}
for _, attrs := range gcsObjects {
// Due to minio.GatewayMinioSysTmp keys being skipped, the number of objects + prefixes
// returned may not total maxKeys. This behavior is compatible with the S3 spec which
// allows the response to include less keys than maxKeys.
if attrs.Prefix == minio.GatewayMinioSysTmp {
// We don't return our metadata prefix.
continue
}
if !strings.HasPrefix(prefix, minio.GatewayMinioSysTmp) {
// If client lists outside gcsMinioPath then we filter out gcsMinioPath/* entries.
// But if the client lists inside gcsMinioPath then we return the entries in gcsMinioPath/
// which will be helpful to observe the "directory structure" for debugging purposes.
if strings.HasPrefix(attrs.Prefix, minio.GatewayMinioSysTmp) ||
strings.HasPrefix(attrs.Name, minio.GatewayMinioSysTmp) {
continue
}
}
if attrs.Prefix != "" {
prefixes = append(prefixes, attrs.Prefix)
} else {
objects = append(objects, fromGCSAttrsToObjectInfo(attrs))
}
}
// Exit the loop if at least one item can be returned from
// the current page or there are no more pages available
if nextPageToken == "" || len(prefixes)+len(objects) > 0 {
break
}
}
return minio.ListObjectsV2Info{
IsTruncated: nextPageToken != "",
ContinuationToken: continuationToken,
NextContinuationToken: nextPageToken,
Prefixes: prefixes,
Objects: objects,
}, nil
}
// GetObjectNInfo - returns object info and locked object ReadCloser
func (l *gcsGateway) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header, lockType minio.LockType, opts minio.ObjectOptions) (gr *minio.GetObjectReader, err error) {
var objInfo minio.ObjectInfo
objInfo, err = l.GetObjectInfo(ctx, bucket, object, opts)
if err != nil {
return nil, err
}
var startOffset, length int64
startOffset, length, err = rs.GetOffsetLength(objInfo.Size)
if err != nil {
return nil, err
}
pr, pw := io.Pipe()
go func() {
err := l.GetObject(ctx, bucket, object, startOffset, length, pw, objInfo.ETag, opts)
pw.CloseWithError(err)
}()
// Setup cleanup function to cause the above go-routine to
// exit in case of partial read
pipeCloser := func() { pr.Close() }
return minio.NewGetObjectReaderFromReader(pr, objInfo, opts, pipeCloser)
}
// GetObject - reads an object from GCS. Supports additional
// parameters like offset and length which are synonymous with
// HTTP Range requests.
//
// startOffset indicates the starting read location of the object.
// length indicates the total length of the object.
func (l *gcsGateway) GetObject(ctx context.Context, bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error {
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
// otherwise gcs will just return object not exist in case of non-existing bucket
if _, err := l.client.Bucket(bucket).Attrs(ctx); err != nil {
logger.LogIf(ctx, err, logger.Application)
return gcsToObjectError(err, bucket)
}
// GCS storage decompresses a gzipped object by default and returns the data.
// Refer to https://cloud.google.com/storage/docs/transcoding#decompressive_transcoding
// Need to set `Accept-Encoding` header to `gzip` when issuing a GetObject call, to be able
// to download the object in compressed state.
// Calling ReadCompressed with true accomplishes that.
object := l.client.Bucket(bucket).Object(key).ReadCompressed(true)
r, err := object.NewRangeReader(ctx, startOffset, length)
if err != nil {
logger.LogIf(ctx, err, logger.Application)
return gcsToObjectError(err, bucket, key)
}
defer r.Close()
if _, err := io.Copy(writer, r); err != nil {
logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket, key)
}
return nil
}
// fromGCSAttrsToObjectInfo converts GCS BucketAttrs to gateway ObjectInfo
func fromGCSAttrsToObjectInfo(attrs *storage.ObjectAttrs) minio.ObjectInfo {
// All google cloud storage objects have a CRC32c hash, whereas composite objects may not have a MD5 hash
// Refer https://cloud.google.com/storage/docs/hashes-etags. Use CRC32C for ETag
metadata := make(map[string]string)
var (
expiry time.Time
e error
)
for k, v := range attrs.Metadata {
k = http.CanonicalHeaderKey(k)
// Translate the GCS custom metadata prefix
if strings.HasPrefix(k, "X-Goog-Meta-") {
k = strings.Replace(k, "X-Goog-Meta-", "X-Amz-Meta-", 1)
}
if k == "Expires" {
if expiry, e = time.Parse(http.TimeFormat, v); e == nil {
expiry = expiry.UTC()
}
continue
}
metadata[k] = v
}
if attrs.ContentType != "" {
metadata["Content-Type"] = attrs.ContentType
}
if attrs.ContentEncoding != "" {
metadata["Content-Encoding"] = attrs.ContentEncoding
}
if attrs.CacheControl != "" {
metadata["Cache-Control"] = attrs.CacheControl
}
if attrs.ContentDisposition != "" {
metadata["Content-Disposition"] = attrs.ContentDisposition
}
if attrs.ContentLanguage != "" {
metadata["Content-Language"] = attrs.ContentLanguage
}
etag := hex.EncodeToString(attrs.MD5)
if etag == "" {
etag = minio.ToS3ETag(fmt.Sprintf("%d", attrs.CRC32C))
}
return minio.ObjectInfo{
Name: attrs.Name,
Bucket: attrs.Bucket,
ModTime: attrs.Updated,
Size: attrs.Size,
ETag: etag,
UserDefined: metadata,
ContentType: attrs.ContentType,
ContentEncoding: attrs.ContentEncoding,
Expires: expiry,
}
}
// applyMetadataToGCSAttrs applies metadata to a GCS ObjectAttrs instance
func applyMetadataToGCSAttrs(metadata map[string]string, attrs *storage.ObjectAttrs) {
attrs.Metadata = make(map[string]string)
for k, v := range metadata {
k = http.CanonicalHeaderKey(k)
switch {
case strings.HasPrefix(k, "X-Amz-Meta-"):
// Translate the S3 user-defined metadata prefix
k = strings.Replace(k, "X-Amz-Meta-", "x-goog-meta-", 1)
attrs.Metadata[k] = v
case k == "Content-Type":
attrs.ContentType = v
case k == "Content-Encoding":
attrs.ContentEncoding = v
case k == "Cache-Control":
attrs.CacheControl = v
case k == "Content-Disposition":
attrs.ContentDisposition = v
case k == "Content-Language":
attrs.ContentLanguage = v
}
}
}
// GetObjectInfo - reads object info and replies back ObjectInfo
func (l *gcsGateway) GetObjectInfo(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) {
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
// otherwise gcs will just return object not exist in case of non-existing bucket
if _, err := l.client.Bucket(bucket).Attrs(ctx); err != nil {
logger.LogIf(ctx, err, logger.Application)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket)
}
attrs, err := l.client.Bucket(bucket).Object(object).Attrs(ctx)
if err != nil {
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, object)
}
return fromGCSAttrsToObjectInfo(attrs), nil
}
// PutObject - Create a new object with the incoming data,
func (l *gcsGateway) PutObject(ctx context.Context, bucket string, key string, r *minio.PutObjReader, opts minio.ObjectOptions) (minio.ObjectInfo, error) {
data := r.Reader
nctx, cancel := context.WithCancel(ctx)
defer cancel()
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
// otherwise gcs will just return object not exist in case of non-existing bucket
if _, err := l.client.Bucket(bucket).Attrs(nctx); err != nil {
logger.LogIf(ctx, err, logger.Application)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket)
}
object := l.client.Bucket(bucket).Object(key)
w := object.NewWriter(nctx)
// Disable "chunked" uploading in GCS client if the size of the data to be uploaded is below
// the current chunk-size of the writer. This avoids an unnecessary memory allocation.
if data.Size() < int64(w.ChunkSize) {
w.ChunkSize = 0
}
applyMetadataToGCSAttrs(opts.UserDefined, &w.ObjectAttrs)
if _, err := io.Copy(w, data); err != nil {
// Close the object writer upon error.
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key)
}
// Close the object writer upon success.
if err := w.Close(); err != nil {
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key)
}
return fromGCSAttrsToObjectInfo(w.Attrs()), nil
}
// CopyObject - Copies a blob from source container to destination container.
func (l *gcsGateway) CopyObject(ctx context.Context, srcBucket string, srcObject string, destBucket string, destObject string,
srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (minio.ObjectInfo, error) {
if srcOpts.CheckPrecondFn != nil && srcOpts.CheckPrecondFn(srcInfo) {
return minio.ObjectInfo{}, minio.PreConditionFailed{}
}
src := l.client.Bucket(srcBucket).Object(srcObject)
dst := l.client.Bucket(destBucket).Object(destObject)
copier := dst.CopierFrom(src)
applyMetadataToGCSAttrs(srcInfo.UserDefined, &copier.ObjectAttrs)
attrs, err := copier.Run(ctx)
if err != nil {
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, destBucket, destObject)
}
return fromGCSAttrsToObjectInfo(attrs), nil
}
// DeleteObject - Deletes a blob in bucket
func (l *gcsGateway) DeleteObject(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) {
err := l.client.Bucket(bucket).Object(object).Delete(ctx)
if err != nil {
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, object)
}
return minio.ObjectInfo{
Bucket: bucket,
Name: object,
}, nil
}
func (l *gcsGateway) DeleteObjects(ctx context.Context, bucket string, objects []minio.ObjectToDelete, opts minio.ObjectOptions) ([]minio.DeletedObject, []error) {
errs := make([]error, len(objects))
dobjects := make([]minio.DeletedObject, len(objects))
for idx, object := range objects {
_, errs[idx] = l.DeleteObject(ctx, bucket, object.ObjectName, opts)
if errs[idx] == nil {
dobjects[idx] = minio.DeletedObject{
ObjectName: object.ObjectName,
}
}
}
return dobjects, errs
}
// NewMultipartUpload - upload object in multiple parts
func (l *gcsGateway) NewMultipartUpload(ctx context.Context, bucket string, key string, o minio.ObjectOptions) (uploadID string, err error) {
// generate new uploadid
uploadID = minio.MustGetUUID()
// generate name for part zero
meta := gcsMultipartMetaName(uploadID)
w := l.client.Bucket(bucket).Object(meta).NewWriter(ctx)
defer w.Close()
applyMetadataToGCSAttrs(o.UserDefined, &w.ObjectAttrs)
if err = json.NewEncoder(w).Encode(gcsMultipartMetaV1{
gcsMinioMultipartMetaCurrentVersion,
bucket,
key,
}); err != nil {
logger.LogIf(ctx, err)
return "", gcsToObjectError(err, bucket, key)
}
return uploadID, nil
}
// ListMultipartUploads - lists the (first) multipart upload for an object
// matched _exactly_ by the prefix
func (l *gcsGateway) ListMultipartUploads(ctx context.Context, bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (minio.ListMultipartsInfo, error) {
// List objects under <bucket>/gcsMinioMultipartPathV1
it := l.client.Bucket(bucket).Objects(ctx, &storage.Query{
Prefix: gcsMinioMultipartPathV1,
})
var uploads []minio.MultipartInfo
for {
attrs, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
logger.LogIf(ctx, err)
return minio.ListMultipartsInfo{
KeyMarker: keyMarker,
UploadIDMarker: uploadIDMarker,
MaxUploads: maxUploads,
Prefix: prefix,
Delimiter: delimiter,
}, gcsToObjectError(err)
}
// Skip entries other than gcs.json
if !strings.HasSuffix(attrs.Name, gcsMinioMultipartMeta) {
continue
}
// Extract multipart upload information from gcs.json
obj := l.client.Bucket(bucket).Object(attrs.Name)
objReader, rErr := obj.NewReader(ctx)
if rErr != nil {
logger.LogIf(ctx, rErr)
return minio.ListMultipartsInfo{}, rErr
}
defer objReader.Close()
var mpMeta gcsMultipartMetaV1
dec := json.NewDecoder(objReader)
decErr := dec.Decode(&mpMeta)
if decErr != nil {
logger.LogIf(ctx, decErr)
return minio.ListMultipartsInfo{}, decErr
}
if prefix == mpMeta.Object {
// Extract uploadId
// E.g minio.sys.tmp/multipart/v1/d063ad89-fdc4-4ea3-a99e-22dba98151f5/gcs.json
components := strings.SplitN(attrs.Name, minio.SlashSeparator, 5)
if len(components) != 5 {
compErr := errors.New("Invalid multipart upload format")
logger.LogIf(ctx, compErr)
return minio.ListMultipartsInfo{}, compErr
}
upload := minio.MultipartInfo{
Object: mpMeta.Object,
UploadID: components[3],
Initiated: attrs.Created,
}
uploads = append(uploads, upload)
}
}
return minio.ListMultipartsInfo{
KeyMarker: keyMarker,
UploadIDMarker: uploadIDMarker,
MaxUploads: maxUploads,
Prefix: prefix,
Delimiter: delimiter,
Uploads: uploads,
NextKeyMarker: "",
NextUploadIDMarker: "",
IsTruncated: false,
}, nil
}
// Checks if minio.sys.tmp/multipart/v1/<upload-id>/gcs.json exists, returns
// an object layer compatible error upon any error.
func (l *gcsGateway) checkUploadIDExists(ctx context.Context, bucket string, key string, uploadID string) error {
_, err := l.client.Bucket(bucket).Object(gcsMultipartMetaName(uploadID)).Attrs(ctx)
logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket, key, uploadID)
}
// PutObjectPart puts a part of object in bucket
func (l *gcsGateway) PutObjectPart(ctx context.Context, bucket string, key string, uploadID string, partNumber int, r *minio.PutObjReader, opts minio.ObjectOptions) (minio.PartInfo, error) {
data := r.Reader
if err := l.checkUploadIDExists(ctx, bucket, key, uploadID); err != nil {
return minio.PartInfo{}, err
}
etag := data.MD5HexString()
if etag == "" {
// Generate random ETag.
etag = minio.GenETag()
}
object := l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, partNumber, etag))
w := object.NewWriter(ctx)
// Disable "chunked" uploading in GCS client. If enabled, it can cause a corner case
// where it tries to upload 0 bytes in the last chunk and get error from server.
w.ChunkSize = 0
if _, err := io.Copy(w, data); err != nil {
// Make sure to close object writer upon error.
w.Close()
logger.LogIf(ctx, err)
return minio.PartInfo{}, gcsToObjectError(err, bucket, key)
}
// Make sure to close the object writer upon success.
if err := w.Close(); err != nil {
logger.LogIf(ctx, err)
return minio.PartInfo{}, gcsToObjectError(err, bucket, key)
}
return minio.PartInfo{
PartNumber: partNumber,
ETag: etag,
LastModified: minio.UTCNow(),
Size: data.Size(),
}, nil
}
// gcsGetPartInfo returns PartInfo of a given object part
func gcsGetPartInfo(ctx context.Context, attrs *storage.ObjectAttrs) (minio.PartInfo, error) {
components := strings.SplitN(attrs.Name, minio.SlashSeparator, 5)
if len(components) != 5 {
logger.LogIf(ctx, errors.New("Invalid multipart upload format"))
return minio.PartInfo{}, errors.New("Invalid multipart upload format")
}
partComps := strings.SplitN(components[4], ".", 2)
if len(partComps) != 2 {
logger.LogIf(ctx, errors.New("Invalid multipart part format"))
return minio.PartInfo{}, errors.New("Invalid multipart part format")
}
partNum, pErr := strconv.Atoi(partComps[0])
if pErr != nil {
logger.LogIf(ctx, pErr)
return minio.PartInfo{}, errors.New("Invalid part number")
}
return minio.PartInfo{
PartNumber: partNum,
LastModified: attrs.Updated,
Size: attrs.Size,
ETag: partComps[1],
}, nil
}
// GetMultipartInfo returns multipart info of the uploadId of the object
func (l *gcsGateway) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts minio.ObjectOptions) (result minio.MultipartInfo, err error) {
result.Bucket = bucket
result.Object = object
result.UploadID = uploadID
return result, nil
}
// ListObjectParts returns all object parts for specified object in specified bucket
func (l *gcsGateway) ListObjectParts(ctx context.Context, bucket string, key string, uploadID string, partNumberMarker int, maxParts int, opts minio.ObjectOptions) (minio.ListPartsInfo, error) {
it := l.client.Bucket(bucket).Objects(ctx, &storage.Query{
Prefix: path.Join(gcsMinioMultipartPathV1, uploadID),
})
var (
count int
partInfos []minio.PartInfo
)
isTruncated := true
for count < maxParts {
attrs, err := it.Next()
if err == iterator.Done {
isTruncated = false
break
}
if err != nil {
logger.LogIf(ctx, err)
return minio.ListPartsInfo{}, gcsToObjectError(err)
}
if strings.HasSuffix(attrs.Name, gcsMinioMultipartMeta) {
continue
}
partInfo, pErr := gcsGetPartInfo(ctx, attrs)
if pErr != nil {
logger.LogIf(ctx, pErr)
return minio.ListPartsInfo{}, pErr
}
if partInfo.PartNumber <= partNumberMarker {
continue
}
partInfos = append(partInfos, partInfo)
count++
}
nextPartNumberMarker := 0
if isTruncated {
nextPartNumberMarker = partInfos[maxParts-1].PartNumber
}
return minio.ListPartsInfo{
Bucket: bucket,
Object: key,
UploadID: uploadID,
PartNumberMarker: partNumberMarker,
NextPartNumberMarker: nextPartNumberMarker,
MaxParts: maxParts,
Parts: partInfos,
IsTruncated: isTruncated,
}, nil
}
// Called by AbortMultipartUpload and CompleteMultipartUpload for cleaning up.
func (l *gcsGateway) cleanupMultipartUpload(ctx context.Context, bucket, key, uploadID string) error {
prefix := fmt.Sprintf("%s/%s/", gcsMinioMultipartPathV1, uploadID)
// iterate through all parts and delete them
it := l.client.Bucket(bucket).Objects(ctx, &storage.Query{Prefix: prefix, Versions: false})
for {
attrs, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket, key)
}
object := l.client.Bucket(bucket).Object(attrs.Name)
// Ignore the error as parallel AbortMultipartUpload might have deleted it.
object.Delete(ctx)
}
return nil
}
// AbortMultipartUpload aborts a ongoing multipart upload
func (l *gcsGateway) AbortMultipartUpload(ctx context.Context, bucket string, key string, uploadID string) error {
if err := l.checkUploadIDExists(ctx, bucket, key, uploadID); err != nil {
return err
}
return l.cleanupMultipartUpload(ctx, bucket, key, uploadID)
}
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object
// Note that there is a limit (currently 32) to the number of components that can
// be composed in a single operation. There is a per-project rate limit (currently 200)
// to the number of source objects you can compose per second.
func (l *gcsGateway) CompleteMultipartUpload(ctx context.Context, bucket string, key string, uploadID string, uploadedParts []minio.CompletePart, opts minio.ObjectOptions) (minio.ObjectInfo, error) {
meta := gcsMultipartMetaName(uploadID)
object := l.client.Bucket(bucket).Object(meta)
partZeroAttrs, err := object.Attrs(ctx)
if err != nil {
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key, uploadID)
}
r, err := object.NewReader(ctx)
if err != nil {
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key)
}
defer r.Close()
// Check version compatibility of the meta file before compose()
multipartMeta := gcsMultipartMetaV1{}
if err = json.NewDecoder(r).Decode(&multipartMeta); err != nil {
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key)
}
if multipartMeta.Version != gcsMinioMultipartMetaCurrentVersion {
logger.LogIf(ctx, errGCSFormat)
return minio.ObjectInfo{}, gcsToObjectError(errGCSFormat, bucket, key)
}
// Validate if the gcs.json stores valid entries for the bucket and key.
if multipartMeta.Bucket != bucket || multipartMeta.Object != key {
return minio.ObjectInfo{}, gcsToObjectError(minio.InvalidUploadID{
UploadID: uploadID,
}, bucket, key)
}
var parts []*storage.ObjectHandle
partSizes := make([]int64, len(uploadedParts))
for i, uploadedPart := range uploadedParts {
parts = append(parts, l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID,
uploadedPart.PartNumber, uploadedPart.ETag)))
partAttr, pErr := l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, uploadedPart.PartNumber, uploadedPart.ETag)).Attrs(ctx)
if pErr != nil {
logger.LogIf(ctx, pErr)
return minio.ObjectInfo{}, gcsToObjectError(pErr, bucket, key, uploadID)
}
partSizes[i] = partAttr.Size
}
// Error out if parts except last part sizing < 5MiB.
for i, size := range partSizes[:len(partSizes)-1] {
if size < 5*humanize.MiByte {
logger.LogIf(ctx, minio.PartTooSmall{
PartNumber: uploadedParts[i].PartNumber,
PartSize: size,
PartETag: uploadedParts[i].ETag,
})
return minio.ObjectInfo{}, minio.PartTooSmall{
PartNumber: uploadedParts[i].PartNumber,
PartSize: size,
PartETag: uploadedParts[i].ETag,
}
}
}
// Returns name of the composed object.
gcsMultipartComposeName := func(uploadID string, composeNumber int) string {
return fmt.Sprintf("%s/tmp/%s/composed-object-%05d", minio.GatewayMinioSysTmp, uploadID, composeNumber)
}
composeCount := int(math.Ceil(float64(len(parts)) / float64(gcsMaxComponents)))
if composeCount > 1 {
// Create composes of every 32 parts.
composeParts := make([]*storage.ObjectHandle, composeCount)
for i := 0; i < composeCount; i++ {
// Create 'composed-object-N' using next 32 parts.
composeParts[i] = l.client.Bucket(bucket).Object(gcsMultipartComposeName(uploadID, i))
start := i * gcsMaxComponents
end := start + gcsMaxComponents
if end > len(parts) {
end = len(parts)
}
composer := composeParts[i].ComposerFrom(parts[start:end]...)
composer.ContentType = partZeroAttrs.ContentType
composer.Metadata = partZeroAttrs.Metadata
if _, err = composer.Run(ctx); err != nil {
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key)
}
}
// As composes are successfully created, final object needs to be created using composes.
parts = composeParts
}
composer := l.client.Bucket(bucket).Object(key).ComposerFrom(parts...)
composer.ContentType = partZeroAttrs.ContentType
composer.ContentEncoding = partZeroAttrs.ContentEncoding
composer.CacheControl = partZeroAttrs.CacheControl
composer.ContentDisposition = partZeroAttrs.ContentDisposition
composer.ContentLanguage = partZeroAttrs.ContentLanguage
composer.Metadata = partZeroAttrs.Metadata
attrs, err := composer.Run(ctx)
if err != nil {
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key)
}
if err = l.cleanupMultipartUpload(ctx, bucket, key, uploadID); err != nil {
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key)
}
return fromGCSAttrsToObjectInfo(attrs), nil
}
// SetBucketPolicy - Set policy on bucket
func (l *gcsGateway) SetBucketPolicy(ctx context.Context, bucket string, bucketPolicy *policy.Policy) error {
policyInfo, err := minio.PolicyToBucketAccessPolicy(bucketPolicy)
if err != nil {
logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket)
}
var policies []minio.BucketAccessPolicy
for prefix, policy := range miniogopolicy.GetPolicies(policyInfo.Statements, bucket, "") {
policies = append(policies, minio.BucketAccessPolicy{
Prefix: prefix,
Policy: policy,
})
}
prefix := bucket + "/*" // For all objects inside the bucket.
if len(policies) != 1 {
logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
}
if policies[0].Prefix != prefix {
logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
}
acl := l.client.Bucket(bucket).ACL()
if policies[0].Policy == miniogopolicy.BucketPolicyNone {
if err := acl.Delete(ctx, storage.AllUsers); err != nil {
logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket)
}
return nil
}
var role storage.ACLRole
switch policies[0].Policy {
case miniogopolicy.BucketPolicyReadOnly:
role = storage.RoleReader
case miniogopolicy.BucketPolicyWriteOnly:
role = storage.RoleWriter
default:
logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
}
if err := acl.Set(ctx, storage.AllUsers, role); err != nil {
logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket)
}
return nil
}
// GetBucketPolicy - Get policy on bucket
func (l *gcsGateway) GetBucketPolicy(ctx context.Context, bucket string) (*policy.Policy, error) {
rules, err := l.client.Bucket(bucket).ACL().List(ctx)
if err != nil {
return nil, gcsToObjectError(err, bucket)
}
var readOnly, writeOnly bool
for _, r := range rules {
if r.Entity != storage.AllUsers || r.Role == storage.RoleOwner {
continue
}
switch r.Role {
case storage.RoleReader:
readOnly = true
case storage.RoleWriter:
writeOnly = true
}
}
actionSet := policy.NewActionSet()
if readOnly {
actionSet.Add(policy.GetBucketLocationAction)
actionSet.Add(policy.ListBucketAction)
actionSet.Add(policy.GetObjectAction)
}
if writeOnly {
actionSet.Add(policy.GetBucketLocationAction)
actionSet.Add(policy.ListBucketMultipartUploadsAction)
actionSet.Add(policy.AbortMultipartUploadAction)
actionSet.Add(policy.DeleteObjectAction)
actionSet.Add(policy.ListMultipartUploadPartsAction)
actionSet.Add(policy.PutObjectAction)
}
// Return NoSuchBucketPolicy error, when policy is not set
if len(actionSet) == 0 {
return nil, gcsToObjectError(minio.BucketPolicyNotFound{}, bucket)
}
return &policy.Policy{
Version: policy.DefaultVersion,
Statements: []policy.Statement{
policy.NewStatement(
policy.Allow,
policy.NewPrincipal("*"),
actionSet,
policy.NewResourceSet(
policy.NewResource(bucket, ""),
policy.NewResource(bucket, "*"),
),
condition.NewFunctions(),
),
},
}, nil
}
// DeleteBucketPolicy - Delete all policies on bucket
func (l *gcsGateway) DeleteBucketPolicy(ctx context.Context, bucket string) error {
// This only removes the storage.AllUsers policies
if err := l.client.Bucket(bucket).ACL().Delete(ctx, storage.AllUsers); err != nil {
return gcsToObjectError(err, bucket)
}
return nil
}
// IsCompressionSupported returns whether compression is applicable for this layer.
func (l *gcsGateway) IsCompressionSupported() bool {
return false
}
| [
"\"GOOGLE_APPLICATION_CREDENTIALS\""
] | [] | [
"GOOGLE_APPLICATION_CREDENTIALS"
] | [] | ["GOOGLE_APPLICATION_CREDENTIALS"] | go | 1 | 0 | |
sliderule/ipysliderule.py | # Copyright (c) 2021, University of Washington
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the University of Washington nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF WASHINGTON AND CONTRIBUTORS
# โAS ISโ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF WASHINGTON OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import copy
import datetime
import numpy as np
from traitlets.utils.bunch import Bunch
import sliderule.io
# imports with warnings if not present
try:
import ipywidgets
except ModuleNotFoundError as e:
sys.stderr.write("Warning: missing packages, some functions will throw an exception if called. (%s)\n" % (str(e)))
try:
import tkinter.filedialog
except ModuleNotFoundError as e:
sys.stderr.write("Warning: missing packages, some functions will throw an exception if called. (%s)\n" % (str(e)))
try:
import IPython.display
except ModuleNotFoundError as e:
sys.stderr.write("Warning: missing packages, some functions will throw an exception if called. (%s)\n" % (str(e)))
# imports that raise error if not present
try:
import ipyleaflet
except ModuleNotFoundError as e:
sys.stderr.write("Error: missing required packages. (%s)\n" % (str(e)))
raise
try:
import xyzservices
except ModuleNotFoundError as e:
sys.stderr.write("Error: missing required packages. (%s)\n" % (str(e)))
raise
class widgets:
def __init__(self):
# dropdown menu for setting asset
self.asset = ipywidgets.Dropdown(
options=['atlas-local', 'atlas-s3', 'nsidc-s3'],
value='nsidc-s3',
description='Asset:',
disabled=False,
)
# dropdown menu for setting data release
self.release = ipywidgets.Dropdown(
options=['003', '004'],
value='004',
description='Release:',
disabled=False,
)
# dropdown menu for setting surface type
# 0-land, 1-ocean, 2-sea ice, 3-land ice, 4-inland water
surface_type_options = [
'Land',
'Ocean',
'Sea ice',
'Land ice',
'Inland water'
]
self.surface_type = ipywidgets.Dropdown(
options=surface_type_options,
value='Land',
description='Surface Type:',
disabled=False,
)
# slider for setting length of ATL06-SR segment in meters
self.length = ipywidgets.IntSlider(
value=40,
min=5,
max=200,
step=5,
description='Length:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
# slider for setting step distance for successive segments in meters
self.step = ipywidgets.IntSlider(
value=20,
min=5,
max=200,
step=5,
description='Step:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
# slider for setting confidence level for PE selection
# eventually would be good to switch this to a IntRangeSlider with value=[0,4]
self.confidence = ipywidgets.IntSlider(
value=4,
min=0,
max=4,
step=1,
description='Confidence:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
# selection for land surface classifications
land_options = [
'atl08_noise',
'atl08_ground',
'atl08_canopy',
'atl08_top_of_canopy',
'atl08_unclassified'
]
self.land_class = ipywidgets.SelectMultiple(
options=land_options,
description='Land Class:',
disabled=False
)
# slider for setting maximum number of iterations
# (not including initial least-squares-fit selection)
self.iteration = ipywidgets.IntSlider(
value=1,
min=0,
max=20,
step=1,
description='Iterations:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
# slider for setting minimum along track spread
self.spread = ipywidgets.FloatSlider(
value=20,
min=1,
max=100,
step=0.1,
description='Spread:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='0.1f'
)
# slider for setting minimum photon event (PE) count
self.count = ipywidgets.IntSlider(
value=10,
min=1,
max=50,
step=1,
description='PE Count:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
# slider for setting minimum height of PE window in meters
self.window = ipywidgets.FloatSlider(
value=3,
min=0.5,
max=10,
step=0.1,
description='Window:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='0.1f'
)
# slider for setting maximum robust dispersion in meters
self.sigma = ipywidgets.FloatSlider(
value=5,
min=1,
max=10,
step=0.1,
description='Sigma:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='0.1f'
)
# dropdown menu for setting map projection for polygons
# Global: Web Mercator (EPSG:3857)
# North: Alaska Polar Stereographic (EPSG:5936)
# South: Polar Stereographic South (EPSG:3031)
projection_list = ['Global','North','South']
self.projection = ipywidgets.Dropdown(
options=projection_list,
value='Global',
description='Projection:',
disabled=False,
)
# button and label for output file selection
self.file = copy.copy(self.filename)
self.savebutton = ipywidgets.Button(
description="Save As"
)
self.savelabel = ipywidgets.Text(
value=self.file,
disabled=False
)
# connect fileselect button with action
self.savebutton.on_click(self.saveas_file)
self.savelabel.observe(self.set_savefile)
# create hbox of file selection
if os.environ.get("DISPLAY"):
self.filesaver = ipywidgets.HBox([
self.savebutton,
self.savelabel
])
else:
self.filesaver = copy.copy(self.savelabel)
# button and label for input file selection
self.loadbutton = ipywidgets.Button(
description="File select"
)
self.loadlabel = ipywidgets.Text(
value='',
disabled=False
)
# connect fileselect button with action
self.loadbutton.on_click(self.select_file)
self.loadlabel.observe(self.set_loadfile)
# create hbox of file selection
if os.environ.get("DISPLAY"):
self.fileloader = ipywidgets.HBox([
self.loadbutton,
self.loadlabel
])
else:
self.fileloader = copy.copy(self.loadlabel)
def saveas_file(self, b):
"""function for file save
"""
IPython.display.clear_output()
root = tkinter.Tk()
root.withdraw()
root.call('wm', 'attributes', '.', '-topmost', True)
filetypes = (("HDF5 file", "*.h5"),
("netCDF file", "*.nc"),
("All Files", "*.*"))
b.files = tkinter.filedialog.asksaveasfilename(
initialfile=self.file,
defaultextension='h5',
filetypes=filetypes)
self.savelabel.value = b.files
self.file = b.files
return self
def set_savefile(self, sender):
self.file = self.savelabel.value
def select_file(self, b):
"""function for file selection
"""
IPython.display.clear_output()
root = tkinter.Tk()
root.withdraw()
root.call('wm', 'attributes', '.', '-topmost', True)
filetypes = (("HDF5 file", "*.h5"),
("netCDF file", "*.nc"),
("All Files", "*.*"))
b.files = tkinter.filedialog.askopenfilename(
defaultextension='h5',
filetypes=filetypes,
multiple=False)
self.loadlabel.value = b.files
self.file = b.files
return self
def set_loadfile(self, sender):
self.file = self.loadlabel.value
@property
def filename(self):
"""default input and output file string
"""
# get sliderule submission time
now = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
args = (now, self.release.value)
return "ATL06-SR_{0}_{1}.h5".format(*args)
@property
def format(self):
"""return the file format from file string
"""
hdf = ('h5','hdf5','hdf')
netcdf = ('nc','netcdf','nc3')
if self.file.endswith(hdf):
return 'hdf'
elif self.file.endswith(netcdf):
return 'netcdf'
else:
return ''
# define projections for ipyleaflet tiles
projections = Bunch(
# Alaska Polar Stereographic (WGS84)
EPSG5936=dict(
name='EPSG5936',
custom=True,
proj4def="""+proj=stere +lat_0=90 +lat_ts=90 +lon_0=-150 +k=0.994
+x_0=2000000 +y_0=2000000 +datum=WGS84 +units=m +no_defs""",
origin=[-2.8567784109255e+07, 3.2567784109255e+07],
resolutions=[
238810.813354,
119405.406677,
59702.7033384999,
29851.3516692501,
14925.675834625,
7462.83791731252,
3731.41895865639,
1865.70947932806,
932.854739664032,
466.427369832148,
233.213684916074,
116.60684245803701,
58.30342122888621,
29.151710614575396,
14.5758553072877,
7.28792765351156,
3.64396382688807,
1.82198191331174,
0.910990956788164,
0.45549547826179,
0.227747739130895,
0.113873869697739,
0.05693693484887,
0.028468467424435
],
bounds=[
[-2623285.8808999992907047,-2623285.8808999992907047],
[6623285.8803000003099442,6623285.8803000003099442]
]
)
,
# Polar Stereographic South (WGS84)
EPSG3031=dict(
name='EPSG3031',
custom=True,
proj4def="""+proj=stere +lat_0=-90 +lat_ts=-71 +lon_0=0 +k=1
+x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs""",
origin=[-3.06361E7, 3.0636099999999993E7],
resolutions=[
67733.46880027094,
33866.73440013547,
16933.367200067736,
8466.683600033868,
4233.341800016934,
2116.670900008467,
1058.3354500042335,
529.1677250021168,
264.5838625010584,
],
bounds=[
[-4524583.19363305,-4524449.487765655],
[4524449.4877656475,4524583.193633042]
]
)
)
# attributions for the different basemaps
glims_attribution = """
Imagery reproduced from GLIMS and NSIDC (2005, updated 2018):
Global Land Ice Measurements from Space glacier database. (doi:10.7265/N5V98602)
"""
esri_attribution = """
Tiles © Esri — Esri, DeLorme, NAVTEQ, TomTom, Intermap, iPC,
USGS, FAO, NPS, NRCAN, GeoBase, Kadaster NL, Ordnance Survey, Esri Japan,
METI, Esri China (Hong Kong), and the GIS User Community
"""
noaa_attribution = """
Imagery provided by NOAA National Centers for Environmental Information (NCEI);
International Bathymetric Chart of the Southern Ocean (IBCSO);
General Bathymetric Chart of the Oceans (GEBCO).
"""
# define background ipyleaflet tiles
basemaps = {
"Esri": {
"ArcticOceanBase": {
"name": 'Esri.ArcticOceanBase',
"crs": projections.EPSG5936,
"attribution": esri_attribution,
"url": 'http://server.arcgisonline.com/ArcGIS/rest/services/Polar/Arctic_Ocean_Base/MapServer/tile/{z}/{y}/{x}'
},
"ArcticOceanReference": {
"name": 'Esri.ArcticOceanReference',
"crs": projections.EPSG5936,
"attribution": esri_attribution,
"url": 'http://server.arcgisonline.com/ArcGIS/rest/services/Polar/Arctic_Ocean_Reference/MapServer/tile/{z}/{y}/{x}'
},
"AntarcticBasemap": {
"name": 'Esri.AntarcticBasemap',
"crs": projections.EPSG3031,
"attribution":noaa_attribution,
"url": 'https://tiles.arcgis.com/tiles/C8EMgrsFcRFL6LrL/arcgis/rest/services/Antarctic_Basemap/MapServer/tile/{z}/{y}/{x}'
}
}
}
# define background ipyleaflet WMS layers
layers = Bunch(
GLIMS = Bunch(
glaciers = ipyleaflet.WMSLayer(
attribution=glims_attribution,
layers='GLIMS_GLACIERS',
format='image/png',
url='https://www.glims.org/mapservice'
)
)
)
# load basemap providers from dict
# https://github.com/geopandas/xyzservices/blob/main/xyzservices/lib.py
def _load_dict(data):
providers = Bunch()
for provider_name in data.keys():
provider = data[provider_name]
if "url" in provider.keys():
providers[provider_name] = xyzservices.lib.TileProvider(provider)
else:
providers[provider_name] = Bunch(
{i: xyzservices.lib.TileProvider(provider[i]) for i in provider.keys()}
)
return providers
# draw ipyleaflet map
class leaflet:
def __init__(self, projection, **kwargs):
# set default keyword arguments
kwargs.setdefault('zoom',False)
kwargs.setdefault('scale',True)
kwargs.setdefault('cursor',True)
kwargs.setdefault('center',(39,-108))
kwargs.setdefault('color','green')
providers = _load_dict(basemaps)
# create basemap in projection
if (projection == 'Global'):
self.map = ipyleaflet.Map(center=kwargs['center'],
zoom=9, max_zoom=15,
basemap=ipyleaflet.basemaps.Esri.WorldTopoMap)
self.map.add_layer(layers.GLIMS.glaciers)
elif (projection == 'North'):
self.map = ipyleaflet.Map(center=(90,0),
zoom=5, max_zoom=24,
basemap=providers.Esri.ArcticOceanBase,
crs=projections.EPSG5936)
self.map.add_layer(providers.Esri.ArcticOceanReference)
elif (projection == 'South'):
self.map = ipyleaflet.Map(center=(-90,0),
zoom=2, max_zoom=9,
basemap=providers.Esri.AntarcticBasemap,
crs=projections.EPSG3031)
# add control for zoom
if kwargs['zoom']:
zoom_slider = ipywidgets.IntSlider(description='Zoom level:',
min=self.map.min_zoom, max=self.map.max_zoom, value=self.map.zoom)
ipywidgets.jslink((zoom_slider, 'value'), (self.map, 'zoom'))
zoom_control = ipyleaflet.WidgetControl(widget=zoom_slider,
position='topright')
self.map.add_control(zoom_control)
# add scale bar
if kwargs['scale']:
scale_control = ipyleaflet.ScaleControl(position='topright')
self.map.add_control(scale_control)
# add label for cursor position
if kwargs['cursor']:
self.cursor = ipywidgets.Label()
label_control = ipyleaflet.WidgetControl(widget=self.cursor,
position='bottomright')
self.map.add_control(label_control)
# keep track of cursor position
self.map.on_interaction(self.handle_interaction)
# add control for drawing polygons or bounding boxes
draw_control = ipyleaflet.DrawControl(polyline={},circlemarker={},
edit=False)
shapeOptions = {'color':kwargs['color'],'fill_color':kwargs['color']}
draw_control.rectangle = dict(shapeOptions=shapeOptions,
metric=['km','m'])
draw_control.polygon = dict(shapeOptions=shapeOptions,
allowIntersection=False,showArea=True,metric=['km','m'])
# create regions
self.regions = []
draw_control.on_draw(self.handle_draw)
self.map.add_control(draw_control)
# handle cursor movements for label
def handle_interaction(self, **kwargs):
if (kwargs.get('type') == 'mousemove'):
lat,lon = kwargs.get('coordinates')
lon = sliderule.io.wrap_longitudes(lon)
self.cursor.value = u"""Latitude: {d[0]:8.4f}\u00B0,
Longitude: {d[1]:8.4f}\u00B0""".format(d=[lat,lon])
# keep track of rectangles and polygons drawn on map
def handle_draw(self, obj, action, geo_json):
lon,lat = np.transpose(geo_json['geometry']['coordinates'])
lon = sliderule.io.wrap_longitudes(lon)
cx,cy = sliderule.io.centroid(lon,lat)
wind = sliderule.io.winding(lon,lat)
# set winding to counter-clockwise
if (wind > 0):
lon = lon[::-1]
lat = lat[::-1]
# create sliderule region from list
region = sliderule.io.to_region(lon,lat)
# append coordinates to list
if (action == 'created'):
self.regions.append(region)
elif (action == 'deleted'):
self.regions.remove(region)
return self
| [] | [] | [
"DISPLAY"
] | [] | ["DISPLAY"] | python | 1 | 0 | |
tpincrement.py | #!/usr/bin/env python3
"""Cyberjunky's 3Commas bot helpers."""
import argparse
import configparser
import json
import os
import sqlite3
import sys
import time
from pathlib import Path
from helpers.logging import Logger, NotificationHandler
from helpers.misc import check_deal, wait_time_interval
from helpers.threecommas import init_threecommas_api
def load_config():
"""Create default or load existing config file."""
cfg = configparser.ConfigParser()
if cfg.read(f"{datadir}/{program}.ini"):
return cfg
cfg["settings"] = {
"timezone": "Europe/Amsterdam",
"timeinterval": 3600,
"debug": False,
"logrotate": 7,
"botids": [12345, 67890],
"increment-step-scale": [0.10, 0.05, 0.05, 0.05, 0.05, 0.05],
"3c-apikey": "Your 3Commas API Key",
"3c-apisecret": "Your 3Commas API Secret",
"notifications": False,
"notify-urls": ["notify-url1"],
}
with open(f"{datadir}/{program}.ini", "w") as cfgfile:
cfg.write(cfgfile)
return None
def upgrade_config(thelogger, cfg):
"""Upgrade config file if needed."""
try:
cfg.get("settings", "increment-step-scale")
except configparser.NoOptionError:
cfg.set(
"settings", "increment-step-scale", "[0.10, 0.05, 0.05, 0.05, 0.05, 0.05]"
)
cfg.remove_option("settings", "increment-percentage")
with open(f"{datadir}/{program}.ini", "w+") as cfgfile:
cfg.write(cfgfile)
thelogger.info("Upgraded the configuration file")
return cfg
def update_deal(thebot, deal, to_increment, new_percentage):
"""Update deal with new take profit percentage."""
bot_name = thebot["name"]
deal_id = deal["id"]
error, data = api.request(
entity="deals",
action="update_deal",
action_id=str(deal_id),
payload={
"deal_id": thebot["id"],
"take_profit": new_percentage,
},
)
if data:
logger.info(
f"Incremented TP for deal {deal_id}/{deal['pair']} and bot \"{bot_name}\"\n"
f"Changed TP from {deal['take_profit']}% to {new_percentage}% (+{to_increment}%)",
True,
)
else:
if error and "msg" in error:
logger.error(
"Error occurred updating bot with new take profit values: %s"
% error["msg"]
)
else:
logger.error("Error occurred updating bot with new take profit values")
def increment_takeprofit(thebot):
"""Check deals from bot and compare safety orders against the database."""
deals_count = 0
deals = thebot["active_deals"]
if deals:
for deal in deals:
deal_id = deal["id"]
completed_safety_orders_count = int(deal["completed_safety_orders_count"])
to_increment = 0
deals_count += 1
existing_deal = check_deal(cursor, deal_id)
if existing_deal is not None:
db.execute(
f"UPDATE deals SET safety_count = {completed_safety_orders_count} "
f"WHERE dealid = {deal_id}"
)
else:
db.execute(
f"INSERT INTO deals (dealid, safety_count) VALUES ({deal_id}, "
f"{completed_safety_orders_count})"
)
existing_deal_safety_count = (
0 if existing_deal is None else existing_deal["safety_count"]
)
for cnt in range(
existing_deal_safety_count + 1, completed_safety_orders_count + 1
):
try:
to_increment += float(increment_step_scale[cnt - 1])
except IndexError:
pass
if to_increment != 0.0:
new_percentage = round(float(deal["take_profit"]) + to_increment, 2)
update_deal(thebot, deal, round(to_increment, 2), new_percentage)
logger.info(
f"Finished updating {deals_count} deals for bot \"{thebot['name']}\""
)
db.commit()
def init_tpincrement_db():
"""Create or open database to store bot and deals data."""
try:
dbname = f"{program}.sqlite3"
dbpath = f"file:{datadir}/{dbname}?mode=rw"
dbconnection = sqlite3.connect(dbpath, uri=True)
dbconnection.row_factory = sqlite3.Row
logger.info(f"Database '{datadir}/{dbname}' opened successfully")
except sqlite3.OperationalError:
dbconnection = sqlite3.connect(f"{datadir}/{dbname}")
dbconnection.row_factory = sqlite3.Row
dbcursor = dbconnection.cursor()
logger.info(f"Database '{datadir}/{dbname}' created successfully")
dbcursor.execute(
"CREATE TABLE deals (dealid INT Primary Key, safety_count INT)"
)
logger.info("Database tables created successfully")
return dbconnection
def upgrade_tpincrement_db():
"""Upgrade database if needed."""
try:
cursor.execute("ALTER TABLE deals DROP COLUMN increment")
logger.info("Database schema upgraded")
except sqlite3.OperationalError:
logger.debug("Database schema is up-to-date")
# Start application
program = Path(__file__).stem
# Parse and interpret options.
parser = argparse.ArgumentParser(description="Cyberjunky's 3Commas bot helper.")
parser.add_argument("-d", "--datadir", help="data directory to use", type=str)
args = parser.parse_args()
if args.datadir:
datadir = args.datadir
else:
datadir = os.getcwd()
# Create or load configuration file
config = load_config()
if not config:
# Initialise temp logging
logger = Logger(datadir, program, None, 7, False, False)
logger.info(
f"Created example config file '{datadir}/{program}.ini', edit it and restart the program"
)
sys.exit(0)
else:
# Handle timezone
if hasattr(time, "tzset"):
os.environ["TZ"] = config.get(
"settings", "timezone", fallback="Europe/Amsterdam"
)
time.tzset()
# Init notification handler
notification = NotificationHandler(
program,
config.getboolean("settings", "notifications"),
config.get("settings", "notify-urls"),
)
# Initialise logging
logger = Logger(
datadir,
program,
notification,
int(config.get("settings", "logrotate", fallback=7)),
config.getboolean("settings", "debug"),
config.getboolean("settings", "notifications"),
)
# Upgrade config file if needed
config = upgrade_config(logger, config)
logger.info(f"Loaded configuration from '{datadir}/{program}.ini'")
# Initialize 3Commas API
api = init_threecommas_api(config)
# Initialize or open the database
db = init_tpincrement_db()
cursor = db.cursor()
# Upgrade the database if needed
upgrade_tpincrement_db()
# Auto increment TakeProfit %
while True:
config = load_config()
logger.info(f"Reloaded configuration from '{datadir}/{program}.ini'")
# Configuration settings
botids = json.loads(config.get("settings", "botids"))
timeint = int(config.get("settings", "timeinterval"))
increment_step_scale = json.loads(config.get("settings", "increment-step-scale"))
# Walk through all bots configured
for bot in botids:
boterror, botdata = api.request(
entity="bots",
action="show",
action_id=str(bot),
)
if botdata:
increment_takeprofit(botdata)
else:
if boterror and "msg" in boterror:
logger.error("Error occurred updating bots: %s" % boterror["msg"])
else:
logger.error("Error occurred updating bots")
if not wait_time_interval(logger, notification, timeint):
break
| [] | [] | [
"TZ"
] | [] | ["TZ"] | python | 1 | 0 | |
tools/get-token/main.go | // Copyright 2020 the Exposure Notifications Verification Server authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Exchanges a verification code for a verification token.
package main
import (
"context"
"flag"
"os"
"os/signal"
"syscall"
"time"
"github.com/google/exposure-notifications-verification-server/internal/clients"
"github.com/google/exposure-notifications-verification-server/pkg/api"
"github.com/google/exposure-notifications-server/pkg/logging"
)
var (
userAgent = flag.String("user-agent", "", "if present, will set as the user agent on the HTTP request")
codeFlag = flag.String("code", "", "verification code to exchange")
apikeyFlag = flag.String("apikey", "", "API Key to use")
addrFlag = flag.String("addr", "http://localhost:8080", "protocol, address and port on which to make the API call")
nonceFlag = flag.String("nonce", "", "optional, nonce to pass on verify call, base64 encoded")
timeoutFlag = flag.Duration("timeout", 5*time.Second, "request time out duration in the format: 0h0m0s")
)
func main() {
flag.Parse()
ctx, done := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
if os.Getenv("LOG_LEVEL") == "" {
os.Setenv("LOG_LEVEL", "DEBUG")
}
logger := logging.NewLoggerFromEnv().Named("get-token")
ctx = logging.WithLogger(ctx, logger)
err := realMain(ctx)
done()
if err != nil {
logger.Fatal(err)
}
}
func realMain(ctx context.Context) error {
logger := logging.FromContext(ctx)
opts := make([]clients.Option, 0, 2)
opts = append(opts, clients.WithTimeout(*timeoutFlag))
if ua := *userAgent; ua != "" {
opts = append(opts, clients.WithUserAgent(ua))
}
client, err := clients.NewAPIServerClient(*addrFlag, *apikeyFlag, opts...)
if err != nil {
return err
}
request := &api.VerifyCodeRequest{
VerificationCode: *codeFlag,
AcceptTestTypes: []string{api.TestTypeConfirmed, api.TestTypeLikely, api.TestTypeNegative, api.TestTypeUserReport},
}
if len(*nonceFlag) > 0 {
request.Nonce = *nonceFlag
}
resp, err := client.Verify(ctx, request)
if err != nil {
return err
}
logger.Infow("success", "response", resp)
return nil
}
| [
"\"LOG_LEVEL\""
] | [] | [
"LOG_LEVEL"
] | [] | ["LOG_LEVEL"] | go | 1 | 0 | |
mrrt/mri/operators/tests/test_mri_noncartesian.py | """Tests related to Non-Cartesian MRI reconstruction."""
from itertools import product
import time
import numpy as np
from numpy.testing import assert_, assert_equal
import pytest
from mrrt.operators.LinOp import DiagonalOperatorMulti
from mrrt.mri.operators.tests._generate_testdata import generate_sim_data
from mrrt.nufft import dtft, dtft_adj
from mrrt.utils import embed, have_cupy, profile
import os
OMIT_CPU = int(os.environ.get("OMIT_CPU", False))
OMIT_GPU = int(os.environ.get("OMIT_GPU", False))
all_xp = [np]
cpu_cases = ["CPU,Tab0", "CPU,Tab", "CPU,Sp"] if not OMIT_CPU else []
all_cases = cpu_cases
if have_cupy:
import cupy
if cupy.cuda.runtime.getDeviceCount() > 0 and not OMIT_GPU:
gpu_cases = ["GPU,Tab0", "GPU,Tab", "GPU,Sp"]
all_cases += gpu_cases
all_xp += [cupy]
# To ignore PendingDeprecationWarning related to scipy.sparse we use:
# @pytest.mark.filterwarnings("ignore:the matrix subclass is not")
#
# This class of warnings could also be ignored via the command line, e.g.:
# pytest -W ignore::PendingDeprecationWarning test_mri_reconstruction.py
@profile
def _test_mri_multi(
ndim=3,
N0=8,
grid_os_factor=1.5,
J0=4,
Ld=4096,
n_coils=1,
fieldmap_segments=None,
precisions=["single", "double"],
phasings=["real", "complex"],
recon_cases=["CPU,Tab0", "CPU,Tab", "CPU,Sp"],
rtol=1e-3,
compare_to_exact=False,
show_figures=False,
nufft_kwargs={},
navg_time=1,
n_creation=1,
return_errors=False,
gpu_memflags=None,
verbose=False,
return_operator=False,
spectral_offsets=None,
):
"""Run a batch of NUFFT tests."""
all_err_forward = np.zeros(
(len(recon_cases), len(precisions), len(phasings))
)
all_err_adj = np.zeros((len(recon_cases), len(precisions), len(phasings)))
alltimes = {}
if not np.isscalar(navg_time):
navg_time_cpu, navg_time_gpu = navg_time
else:
navg_time_cpu = navg_time_gpu = navg_time
for i, recon_case in enumerate(recon_cases):
if "CPU" in recon_case:
navg_time = navg_time_cpu
else:
navg_time = navg_time_gpu
for j, precision in enumerate(precisions):
for k, phasing in enumerate(phasings):
if verbose:
print(
"phasing={}, precision={}, type={}".format(
phasing, precision, recon_case
)
)
if "Tab" in recon_case:
# may want to create twice when benchmarking GPU case
# because the custom kernels are compiled the first time
ncr_max = n_creation
else:
ncr_max = 1
# on_gpu = ('GPU' in recon_case)
for ncr in range(ncr_max):
(
Gn,
wi_full,
xTrue,
ig,
data_true,
times,
) = generate_sim_data(
recon_case=recon_case,
ndim=ndim,
N0=N0,
J0=J0,
grid_os_factor=grid_os_factor,
fieldmap_segments=fieldmap_segments,
Ld=Ld,
n_coils=n_coils,
precision=precision,
phasing=phasing,
nufft_kwargs=nufft_kwargs,
MRI_object_kwargs=dict(gpu_memflags=gpu_memflags),
spectral_offsets=spectral_offsets,
)
xp = Gn.xp
# time the forward operator
sim_data = Gn * xTrue # dry run
tstart = time.time()
for nt in range(navg_time):
sim_data = Gn * xTrue
sim_data += 0.0
sim_data = xp.squeeze(sim_data) # TODO: should be 1D already?
# print("type(xTrue) = {}".format(type(xTrue)))
# print("type(sim_data) = {}".format(type(sim_data)))
t_for = (time.time() - tstart) / navg_time
times["MRI: forward"] = t_for
# time the norm operator
Gn.norm(xTrue) # dry run
tstart = time.time()
for nt in range(navg_time):
Gn.norm(xTrue)
t_norm = (time.time() - tstart) / navg_time
times["MRI: norm"] = t_norm
if precision == "single":
dtype_real = np.float32
dtype_cplx = np.complex64
else:
dtype_real = np.float64
dtype_cplx = np.complex128
if "Tab" in recon_case:
if phasing == "complex":
assert_equal(Gn.Gnufft.h[0].dtype, dtype_cplx)
else:
assert_equal(Gn.Gnufft.h[0].dtype, dtype_real)
else:
if phasing == "complex":
assert_equal(Gn.Gnufft.p.dtype, dtype_cplx)
else:
assert_equal(Gn.Gnufft.p.dtype, dtype_real)
assert_equal(sim_data.dtype, dtype_cplx)
if compare_to_exact:
# compare_to_exact only currently for single-coil,
# no fieldmap case
if spectral_offsets is not None:
raise NotImplementedError(
"compare_to_exact doesn't currently support "
"spectral offsets"
)
nshift_exact = tuple(s / 2 for s in Gn.Nd)
sim_data2 = dtft(
xTrue, Gn.omega, shape=Gn.Nd, n_shift=nshift_exact
)
sd2_norm = xp.linalg.norm(sim_data2)
rel_err = xp.linalg.norm(sim_data - sim_data2) / sd2_norm
if "GPU" in recon_case:
if hasattr(rel_err, "get"):
rel_err = rel_err.get()
all_err_forward[i, j, k] = rel_err
print(
"{},{},{}: forward error = {}".format(
recon_case, precision, phasing, rel_err
)
)
rel_err_mag = (
xp.linalg.norm(np.abs(sim_data) - np.abs(sim_data2))
/ sd2_norm
)
print(
f"{recon_case},{precision},{phasing}: "
f"forward mag diff error = {rel_err_mag}"
)
assert rel_err < rtol
# TODO: update DiagonalOperatorMulti to auto-set loc_in,
# loc_out appropriately
if xp is np:
diag_args = dict(loc_in="cpu", loc_out="cpu")
else:
diag_args = dict(loc_in="gpu", loc_out="gpu")
diag_op = DiagonalOperatorMulti(wi_full, **diag_args)
if n_coils == 1:
data_dcf = diag_op * data_true
else:
data_dcf = diag_op * sim_data
# time the adjoint operation
im_est = Gn.H * data_dcf # dry run
tstart = time.time()
for nt in range(navg_time):
im_est = Gn.H * data_dcf
t_adj = (time.time() - tstart) / navg_time
times["MRI: adjoint"] = t_adj
if hasattr(Gn, "mask") and Gn.mask is not None:
im_est = embed(im_est, Gn.mask)
else:
if spectral_offsets is None:
im_est = im_est.reshape(Gn.Nd, order=Gn.order)
else:
im_est = im_est.reshape(
tuple(Gn.Nd) + (len(spectral_offsets),),
order=Gn.order,
)
if compare_to_exact:
im_est_exact = dtft_adj(
data_dcf, Gn.omega, shape=Gn.Nd, n_shift=nshift_exact
)
ex_norm = xp.linalg.norm(im_est_exact)
rel_err = xp.linalg.norm(im_est - im_est_exact) / ex_norm
all_err_adj[i, j, k] = rel_err
if verbose:
print(
"{},{},{}: adjoint error = {}".format(
recon_case, precision, phasing, rel_err
)
)
rel_err_mag = (
xp.linalg.norm(np.abs(im_est) - np.abs(im_est_exact))
/ ex_norm
)
if verbose:
print(
"{},{},{}: adjoint mag diff error = {}".format(
recon_case, precision, phasing, rel_err
)
)
assert_(rel_err < rtol)
title = ", ".join([recon_case, precision, phasing])
if show_figures:
from matplotlib import pyplot as plt
from pyvolplot import volshow
if compare_to_exact:
volshow(
[
im_est_exact,
im_est,
im_est_exact - im_est,
xp.abs(im_est_exact) - xp.abs(im_est),
]
)
else:
volshow(im_est)
plt.title(title)
alltimes[title] = times
if return_operator:
if return_errors:
return Gn, alltimes, all_err_forward, all_err_adj
return Gn, alltimes
if return_errors:
return alltimes, all_err_forward, all_err_adj
return alltimes
@pytest.mark.parametrize(
"recon_case, precision, phasing",
product(all_cases, ["single", "double"], ["complex", "real"]),
)
@pytest.mark.filterwarnings("ignore:the matrix subclass is not")
def test_mri_2d_nocoils_nofieldmap_nocompare(
recon_case, precision, phasing, show_figures=False, verbose=False
):
_test_mri_multi(
ndim=2,
N0=16,
grid_os_factor=1.5,
J0=6,
Ld=4096,
n_coils=1,
fieldmap_segments=None,
precisions=[precision],
phasings=[phasing],
recon_cases=[recon_case],
show_figures=show_figures,
verbose=verbose,
compare_to_exact=False,
nufft_kwargs={},
rtol=1e-4,
)
# @dec.slow
@pytest.mark.parametrize(
"recon_case, precision, phasing",
product(all_cases, ["single", "double"], ["complex", "real"]),
)
@pytest.mark.filterwarnings("ignore:the matrix subclass is not")
def test_mri_2d_nocoils_nofieldmap(
recon_case, precision, phasing, show_figures=False, verbose=False
):
_test_mri_multi(
ndim=2,
N0=16,
grid_os_factor=1.5,
J0=6,
Ld=4096,
n_coils=1,
fieldmap_segments=None,
precisions=[precision],
phasings=[phasing],
recon_cases=[recon_case],
show_figures=show_figures,
verbose=verbose,
compare_to_exact=True,
nufft_kwargs={},
rtol=1e-3,
)
# @dec.slow
@pytest.mark.parametrize(
"recon_case, precision, phasing",
product(all_cases, ["single", "double"], ["complex", "real"]),
)
@pytest.mark.filterwarnings("ignore:the matrix subclass is not")
def test_mri_2d_nocoils_nofieldmap_kernels(
recon_case, precision, phasing, show_figures=False, verbose=False
):
N = 32
grid_os_factor = 2
J = 6
t, ef, ea = _test_mri_multi(
ndim=2,
N0=N,
grid_os_factor=grid_os_factor,
J0=J,
Ld=512,
n_coils=1,
fieldmap_segments=None,
precisions=[precision],
phasings=[phasing],
recon_cases=[recon_case],
show_figures=show_figures,
verbose=verbose,
compare_to_exact=True,
nufft_kwargs={},
rtol=1e-2,
return_errors=True,
)
# @dec.slow
@pytest.mark.parametrize(
"recon_case, precision, phasing",
product(all_cases, ["single", "double"], ["complex", "real"]),
)
@pytest.mark.filterwarnings("ignore:the matrix subclass is not")
def test_mri_2d_multicoils_nofieldmap(
recon_case, precision, phasing, show_figures=False, verbose=False
):
_test_mri_multi(
ndim=2,
N0=16,
grid_os_factor=1.5,
J0=6,
Ld=4096,
n_coils=4,
fieldmap_segments=None,
precisions=[precision],
phasings=[phasing],
recon_cases=[recon_case],
compare_to_exact=False,
rtol=1e-3,
show_figures=show_figures,
verbose=verbose,
)
# @dec.slow
@pytest.mark.parametrize(
"recon_case, precision, phasing",
product(all_cases, ["single", "double"], ["complex", "real"]),
)
@pytest.mark.filterwarnings("ignore:the matrix subclass is not")
def test_mri_2d_multicoils_fieldmap(
recon_case, precision, phasing, show_figures=False, verbose=False
):
_test_mri_multi(
ndim=2,
N0=16,
grid_os_factor=1.5,
J0=6,
Ld=4096,
n_coils=4,
fieldmap_segments=6,
precisions=[precision],
phasings=[phasing],
recon_cases=[recon_case],
compare_to_exact=False,
show_figures=show_figures,
verbose=verbose,
)
# @dec.slow
@pytest.mark.parametrize(
"recon_case, precision, phasing",
product(all_cases, ["single", "double"], ["complex", "real"]),
)
@pytest.mark.filterwarnings("ignore:the matrix subclass is not")
def test_mri_3d_nocoils_nofieldmap(
recon_case, precision, phasing, show_figures=False, verbose=False
):
if "Tab0" in recon_case:
rtol = 1e-2
else:
rtol = 1e-3
_test_mri_multi(
ndim=3,
N0=12,
grid_os_factor=1.5,
J0=4,
Ld=4096,
n_coils=1,
fieldmap_segments=None,
precisions=[precision],
phasings=[phasing],
recon_cases=[recon_case],
compare_to_exact=True,
show_figures=show_figures,
rtol=rtol,
verbose=verbose,
)
# @dec.slow
@pytest.mark.parametrize(
"recon_case, precision, phasing",
product(all_cases, ["single", "double"], ["complex", "real"]),
)
@pytest.mark.filterwarnings("ignore:the matrix subclass is not")
def test_mri_3d_multicoils_nofieldmap(
recon_case, precision, phasing, show_figures=False, verbose=False
):
_test_mri_multi(
ndim=3,
N0=12,
grid_os_factor=1.5,
J0=4,
Ld=4096,
n_coils=4,
fieldmap_segments=None,
precisions=[precision],
phasings=[phasing],
recon_cases=[recon_case],
compare_to_exact=False,
show_figures=show_figures,
verbose=verbose,
)
# @dec.slow
@pytest.mark.parametrize(
"recon_case, precision, phasing",
product(all_cases, ["single", "double"], ["complex", "real"]),
)
@pytest.mark.filterwarnings("ignore:the matrix subclass is not")
def test_mri_3d_multicoils_fieldmap(
recon_case, precision, phasing, show_figures=False, verbose=False
):
_test_mri_multi(
ndim=3,
N0=12,
grid_os_factor=1.5,
J0=4,
Ld=4096,
n_coils=4,
fieldmap_segments=6,
precisions=[precision],
phasings=[phasing],
recon_cases=[recon_case],
compare_to_exact=False,
show_figures=show_figures,
verbose=verbose,
)
| [] | [] | [
"OMIT_GPU",
"OMIT_CPU"
] | [] | ["OMIT_GPU", "OMIT_CPU"] | python | 2 | 0 | |
kayak/mock_kayak_test.go | /*
* Copyright 2018 The CovenantSQL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kayak
import (
"context"
"crypto/rand"
"errors"
"fmt"
"os"
"reflect"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/CovenantSQL/CovenantSQL/crypto/asymmetric"
"github.com/CovenantSQL/CovenantSQL/proto"
"github.com/CovenantSQL/CovenantSQL/twopc"
"github.com/CovenantSQL/CovenantSQL/utils/log"
. "github.com/smartystreets/goconvey/convey"
)
// common mocks
type MockTransportRouter struct {
reqSeq uint64
transports map[proto.NodeID]*MockTransport
transportLock sync.Mutex
}
type MockTransport struct {
nodeID proto.NodeID
router *MockTransportRouter
queue chan Request
waitQueue chan *MockResponse
giveUp map[uint64]bool
}
type MockRequest struct {
transport *MockTransport
ctx context.Context
RequestID uint64
NodeID proto.NodeID
Method string
Log *Log
}
type MockResponse struct {
ResponseID uint64
Data []byte
Error error
}
type MockTwoPCWorker struct {
nodeID proto.NodeID
state string
data int64
total int64
}
var (
_ twopc.Worker = &MockTwoPCWorker{}
)
func (m *MockTransportRouter) getTransport(nodeID proto.NodeID) *MockTransport {
m.transportLock.Lock()
defer m.transportLock.Unlock()
if _, ok := m.transports[nodeID]; !ok {
m.transports[nodeID] = &MockTransport{
nodeID: nodeID,
router: m,
queue: make(chan Request, 1000),
waitQueue: make(chan *MockResponse, 1000),
giveUp: make(map[uint64]bool),
}
}
return m.transports[nodeID]
}
func (m *MockTransportRouter) ResetTransport(nodeID proto.NodeID) {
m.transportLock.Lock()
defer m.transportLock.Unlock()
if _, ok := m.transports[nodeID]; ok {
// reset
delete(m.transports, nodeID)
}
}
func (m *MockTransportRouter) ResetAll() {
m.transportLock.Lock()
defer m.transportLock.Unlock()
m.transports = make(map[proto.NodeID]*MockTransport)
}
func (m *MockTransportRouter) getReqID() uint64 {
return atomic.AddUint64(&m.reqSeq, 1)
}
func (m *MockTransport) Init() error {
return nil
}
func (m *MockTransport) Request(ctx context.Context, nodeID proto.NodeID, method string, log *Log) ([]byte, error) {
return m.router.getTransport(nodeID).sendRequest(&MockRequest{
RequestID: m.router.getReqID(),
NodeID: m.nodeID,
Method: method,
Log: log,
ctx: ctx,
})
}
func (m *MockTransport) Process() <-chan Request {
return m.queue
}
func (m *MockTransport) Shutdown() error {
return nil
}
func (m *MockTransport) sendRequest(req Request) ([]byte, error) {
r := req.(*MockRequest)
r.transport = m
if log.GetLevel() >= log.DebugLevel {
fmt.Println()
}
log.Debugf("[%v] [%v] -> [%v] request %v", r.RequestID, r.NodeID, req.GetPeerNodeID(), r.GetLog())
m.queue <- r
for {
select {
case <-r.ctx.Done():
// deadline reached
log.Debugf("[%v] [%v] -> [%v] request timeout",
r.RequestID, r.NodeID, req.GetPeerNodeID())
m.giveUp[r.RequestID] = true
return nil, r.ctx.Err()
case res := <-m.waitQueue:
if res.ResponseID != r.RequestID {
// put back to queue
if !m.giveUp[res.ResponseID] {
m.waitQueue <- res
} else {
delete(m.giveUp, res.ResponseID)
}
} else {
log.Debugf("[%v] [%v] -> [%v] response %v: %v",
r.RequestID, req.GetPeerNodeID(), r.NodeID, res.Data, res.Error)
return res.Data, res.Error
}
}
}
}
func (m *MockRequest) GetPeerNodeID() proto.NodeID {
return m.NodeID
}
func (m *MockRequest) GetMethod() string {
return m.Method
}
func (m *MockRequest) GetLog() *Log {
return m.Log
}
func (m *MockRequest) SendResponse(v []byte, err error) error {
m.transport.waitQueue <- &MockResponse{
ResponseID: m.RequestID,
Data: v,
Error: err,
}
return nil
}
func (w *MockTwoPCWorker) Prepare(ctx context.Context, wb twopc.WriteBatch) error {
// test prepare
if w.state != "" {
return errors.New("invalid state")
}
value, ok := wb.(int64)
if !ok {
return errors.New("invalid data")
}
w.state = "prepared"
w.data = value
return nil
}
func (w *MockTwoPCWorker) Commit(ctx context.Context, wb twopc.WriteBatch) error {
// test commit
if w.state != "prepared" {
return errors.New("invalid state")
}
if !reflect.DeepEqual(wb, w.data) {
return errors.New("commit data not same as last")
}
w.total += w.data
w.state = ""
return nil
}
func (w *MockTwoPCWorker) Rollback(ctx context.Context, wb twopc.WriteBatch) error {
// test rollback
if w.state != "prepared" {
return errors.New("invalid state")
}
if !reflect.DeepEqual(wb, w.data) {
return errors.New("commit data not same as last")
}
w.data = 0
w.state = ""
return nil
}
func (w *MockTwoPCWorker) GetTotal() int64 {
return w.total
}
func (w *MockTwoPCWorker) GetState() string {
return w.state
}
type CallCollector struct {
l sync.Mutex
callOrder []string
}
func (c *CallCollector) Append(call string) {
c.l.Lock()
defer c.l.Unlock()
c.callOrder = append(c.callOrder, call)
}
func (c *CallCollector) Get() []string {
c.l.Lock()
defer c.l.Unlock()
return c.callOrder[:]
}
func (c *CallCollector) Reset() {
c.l.Lock()
defer c.l.Unlock()
c.callOrder = c.callOrder[:0]
}
func testPeersFixture(term uint64, servers []*Server) *Peers {
testPriv := []byte{
0xea, 0xf0, 0x2c, 0xa3, 0x48, 0xc5, 0x24, 0xe6,
0x39, 0x26, 0x55, 0xba, 0x4d, 0x29, 0x60, 0x3c,
0xd1, 0xa7, 0x34, 0x7d, 0x9d, 0x65, 0xcf, 0xe9,
0x3c, 0xe1, 0xeb, 0xff, 0xdc, 0xa2, 0x26, 0x94,
}
privKey, pubKey := asymmetric.PrivKeyFromBytes(testPriv)
newServers := make([]*Server, 0, len(servers))
var leaderNode *Server
for _, s := range servers {
newS := &Server{
Role: s.Role,
ID: s.ID,
PubKey: pubKey,
}
newServers = append(newServers, newS)
if newS.Role == proto.Leader {
leaderNode = newS
}
}
peers := &Peers{
Term: term,
Leader: leaderNode,
Servers: servers,
PubKey: pubKey,
}
peers.Sign(privKey)
return peers
}
func testLogFixture(data []byte) (log *Log) {
log = &Log{
Index: uint64(1),
Term: uint64(1),
Data: data,
}
log.ComputeHash()
return
}
// test mock library itself
func TestMockTransport(t *testing.T) {
Convey("test transport with request timeout", t, func() {
mockRouter := &MockTransportRouter{
transports: make(map[proto.NodeID]*MockTransport),
}
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*50)
defer cancel()
var err error
var response []byte
response, err = mockRouter.getTransport("a").Request(
ctx, "b", "Test", testLogFixture([]byte("happy")))
So(response, ShouldBeNil)
So(err, ShouldNotBeNil)
})
Convey("test transport with successful request", t, func(c C) {
mockRouter := &MockTransportRouter{
transports: make(map[proto.NodeID]*MockTransport),
}
testLog := testLogFixture([]byte("happy"))
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
select {
case req := <-mockRouter.getTransport("d").Process():
c.So(req.GetPeerNodeID(), ShouldEqual, proto.NodeID("c"))
c.So(req.GetMethod(), ShouldEqual, "Test")
c.So(req.GetLog(), ShouldResemble, testLog)
req.SendResponse([]byte("happy too"), nil)
}
}()
wg.Add(1)
go func() {
defer wg.Done()
var err error
var response []byte
response, err = mockRouter.getTransport("c").Request(
context.Background(), "d", "Test", testLog)
c.So(err, ShouldBeNil)
c.So(response, ShouldResemble, []byte("happy too"))
}()
wg.Wait()
})
Convey("test transport with concurrent request", t, FailureContinues, func(c C) {
mockRouter := &MockTransportRouter{
transports: make(map[proto.NodeID]*MockTransport),
}
testLog := testLogFixture([]byte("happy"))
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
var err error
var response []byte
response, err = mockRouter.getTransport("e").Request(
context.Background(), "g", "test1", testLog)
c.So(err, ShouldBeNil)
c.So(response, ShouldResemble, []byte("happy e test1"))
}()
wg.Add(1)
go func() {
defer wg.Done()
var err error
var response []byte
response, err = mockRouter.getTransport("f").Request(
context.Background(), "g", "test2", testLog)
c.So(err, ShouldBeNil)
c.So(response, ShouldResemble, []byte("happy f test2"))
}()
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 2; i++ {
select {
case req := <-mockRouter.getTransport("g").Process():
c.So(req.GetPeerNodeID(), ShouldBeIn, []proto.NodeID{"e", "f"})
c.So(req.GetMethod(), ShouldBeIn, []string{"test1", "test2"})
c.So(req.GetLog(), ShouldResemble, testLog)
req.SendResponse([]byte(fmt.Sprintf("happy %s %s", req.GetPeerNodeID(), req.GetMethod())), nil)
}
}
}()
wg.Wait()
})
Convey("test transport with piped request", t, FailureContinues, func(c C) {
mockRouter := &MockTransportRouter{
transports: make(map[proto.NodeID]*MockTransport),
}
var wg sync.WaitGroup
randReq := testLogFixture([]byte("happy"))
randResp := make([]byte, 4)
rand.Read(randResp)
t.Logf("test with request %d, response %d", randReq, randResp)
wg.Add(1)
go func() {
defer wg.Done()
var err error
var response []byte
var req Request
select {
case req = <-mockRouter.getTransport("j").Process():
c.So(req.GetPeerNodeID(), ShouldEqual, proto.NodeID("i"))
c.So(req.GetMethod(), ShouldEqual, "pass1")
}
response, err = mockRouter.getTransport("j").Request(
context.Background(), "k", "pass2", req.GetLog())
c.So(err, ShouldBeNil)
req.SendResponse(response, nil)
}()
wg.Add(1)
go func() {
defer wg.Done()
select {
case req := <-mockRouter.getTransport("k").Process():
c.So(req.GetPeerNodeID(), ShouldEqual, proto.NodeID("j"))
c.So(req.GetMethod(), ShouldEqual, "pass2")
c.So(req.GetLog(), ShouldResemble, randReq)
req.SendResponse(randResp, nil)
}
}()
wg.Add(1)
go func() {
defer wg.Done()
var err error
var response []byte
response, err = mockRouter.getTransport("i").Request(
context.Background(), "j", "pass1", randReq)
c.So(err, ShouldBeNil)
c.So(response, ShouldResemble, randResp)
}()
wg.Wait()
})
}
func init() {
// set logger level by env
if os.Getenv("DEBUG") != "" {
log.SetLevel(log.DebugLevel)
}
}
| [
"\"DEBUG\""
] | [] | [
"DEBUG"
] | [] | ["DEBUG"] | go | 1 | 0 | |
experiment/autobumper/main.go | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"encoding/json"
"flag"
"fmt"
"net/http"
"os"
"os/exec"
"strings"
"github.com/sirupsen/logrus"
"k8s.io/test-infra/experiment/autobumper/bumper"
"k8s.io/test-infra/prow/config/secret"
"k8s.io/test-infra/prow/github"
)
const (
oncallAddress = "https://storage.googleapis.com/kubernetes-jenkins/oncall.json"
githubOrg = "kubernetes"
githubRepo = "test-infra"
)
var extraFiles = map[string]bool{
"experiment/generate_tests.py": true,
}
func cdToRootDir() error {
if bazelWorkspace := os.Getenv("BUILD_WORKSPACE_DIRECTORY"); bazelWorkspace != "" {
if err := os.Chdir(bazelWorkspace); err != nil {
return fmt.Errorf("failed to chdir to bazel workspace (%s): %v", bazelWorkspace, err)
}
return nil
}
cmd := exec.Command("git", "rev-parse", "--show-toplevel")
output, err := cmd.Output()
if err != nil {
return err
}
d := strings.TrimSpace(string(output))
logrus.Infof("Changing working directory to %s...", d)
return os.Chdir(d)
}
type options struct {
githubLogin string
githubToken string
gitName string
gitEmail string
}
func parseOptions() options {
var o options
flag.StringVar(&o.githubLogin, "github-login", "", "The GitHub username to use.")
flag.StringVar(&o.githubToken, "github-token", "", "The path to the GitHub token file.")
flag.StringVar(&o.gitName, "git-name", "", "The name to use on the git commit. Requires --git-email. If not specified, uses values from the user associated with the access token.")
flag.StringVar(&o.gitEmail, "git-email", "", "The email to use on the git commit. Requires --git-name. If not specified, uses values from the user associated with the access token.")
flag.Parse()
return o
}
func validateOptions(o options) error {
if o.githubToken == "" {
return fmt.Errorf("--github-token is mandatory")
}
if (o.gitEmail == "") != (o.gitName == "") {
return fmt.Errorf("--git-name and --git-email must be specified together")
}
return nil
}
func updateConfig() error {
// Try to regenerate security job configs which use an explicit podutils image config
// TODO(krzyzacy): workaround before we resolve https://github.com/kubernetes/test-infra/issues/9783
logrus.Info("Updating generated config...")
return bumper.Call("bazel", "run", "//hack:update-config")
}
func getOncaller() (string, error) {
req, err := http.Get(oncallAddress)
if err != nil {
return "", err
}
defer req.Body.Close()
if req.StatusCode != http.StatusOK {
return "", fmt.Errorf("HTTP error %d (%q) fetching current oncaller", req.StatusCode, req.Status)
}
oncall := struct {
Oncall struct {
TestInfra string `json:"testinfra"`
} `json:"Oncall"`
}{}
if err := json.NewDecoder(req.Body).Decode(&oncall); err != nil {
return "", err
}
return oncall.Oncall.TestInfra, nil
}
func getAssignment() string {
oncaller, err := getOncaller()
if err == nil {
if oncaller != "" {
return "/cc @" + oncaller
} else {
return "Nobody is currently oncall, so falling back to Blunderbuss."
}
} else {
return fmt.Sprintf("An error occurred while finding an assignee: `%s`.\nFalling back to Blunderbuss.", err)
}
}
func main() {
o := parseOptions()
if err := validateOptions(o); err != nil {
logrus.WithError(err).Fatal("Invalid arguments.")
}
sa := &secret.Agent{}
if err := sa.Start([]string{o.githubToken}); err != nil {
logrus.WithError(err).Fatal("Failed to start secrets agent")
}
gc := github.NewClient(sa.GetTokenGenerator(o.githubToken), sa.Censor, github.DefaultGraphQLEndpoint, github.DefaultAPIEndpoint)
if o.githubLogin == "" || o.gitName == "" || o.gitEmail == "" {
user, err := gc.BotUser()
if err != nil {
logrus.WithError(err).Fatal("Failed to get the user data for the provided GH token.")
}
if o.githubLogin == "" {
o.githubLogin = user.Login
}
if o.gitName == "" {
o.gitName = user.Name
}
if o.gitEmail == "" {
o.gitEmail = user.Email
}
}
if err := cdToRootDir(); err != nil {
logrus.WithError(err).Fatal("Failed to change to root dir")
}
images, err := bumper.UpdateReferences([]string{"."}, extraFiles)
if err != nil {
logrus.WithError(err).Fatal("Failed to update references.")
}
if err := updateConfig(); err != nil {
logrus.WithError(err).Fatal("Failed to update generated config.")
}
remoteBranch := "autobump"
if err := bumper.MakeGitCommit(fmt.Sprintf("git@github.com:%s/test-infra.git", o.githubLogin), remoteBranch, o.gitName, o.gitEmail, images); err != nil {
logrus.WithError(err).Fatal("Failed to push changes.")
}
if err := bumper.UpdatePR(gc, githubOrg, githubRepo, images, getAssignment(), "Update prow to", o.githubLogin+":"+remoteBranch, "master"); err != nil {
logrus.WithError(err).Fatal("PR creation failed.")
}
}
| [
"\"BUILD_WORKSPACE_DIRECTORY\""
] | [] | [
"BUILD_WORKSPACE_DIRECTORY"
] | [] | ["BUILD_WORKSPACE_DIRECTORY"] | go | 1 | 0 | |
test/unit/test_dashy_things.py | import pytest
import sys
import os
os.environ['SENTINEL_CONFIG'] = os.path.normpath(os.path.join(os.path.dirname(__file__), '../test_sentinel.conf'))
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../../lib')))
@pytest.fixture
def valid_ukkey_address(network='mainnet'):
return 'yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui' if (network == 'testnet') else 'XpjStRH8SgA6PjgebtPZqCa9y7hLXP767n'
@pytest.fixture
def invalid_ukkey_address(network='mainnet'):
return 'yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Uj' if (network == 'testnet') else 'XpjStRH8SgA6PjgebtPZqCa9y7hLXP767m'
@pytest.fixture
def current_block_hash():
return '000001c9ba1df5a1c58a4e458fb6febfe9329b1947802cd60a4ae90dd754b534'
@pytest.fixture
def mn_list():
from masternode import Masternode
masternodelist_full = {
u'701854b26809343704ab31d1c45abc08f9f83c5c2bd503a9d5716ef3c0cda857-1': u' ENABLED 70201 yjaFS6dudxUTxYPTDB9BYd1Nv4vMJXm3vK 1474157572 82842 1474152618 71111 52.90.74.124:19999',
u'f68a2e5d64f4a9be7ff8d0fbd9059dcd3ce98ad7a19a9260d1d6709127ffac56-1': u' ENABLED 70201 yUuAsYCnG5XrjgsGvRwcDqPhgLUnzNfe8L 1474157732 1590425 1474155175 71122 [2604:a880:800:a1::9b:0]:19999',
u'656695ed867e193490261bea74783f0a39329ff634a10a9fb6f131807eeca744-1': u' ENABLED 70201 yepN97UoBLoP2hzWnwWGRVTcWtw1niKwcB 1474157704 824622 1474152571 71110 178.62.203.249:19999',
}
mnlist = [Masternode(vin, mnstring) for (vin, mnstring) in masternodelist_full.items()]
return mnlist
@pytest.fixture
def mn_status_good():
# valid masternode status enabled & running
status = {
"vin": "CTxIn(COutPoint(f68a2e5d64f4a9be7ff8d0fbd9059dcd3ce98ad7a19a9260d1d6709127ffac56, 1), scriptSig=)",
"service": "[2604:a880:800:a1::9b:0]:19999",
"pubkey": "yUuAsYCnG5XrjgsGvRwcDqPhgLUnzNfe8L",
"status": "Masternode successfully started"
}
return status
@pytest.fixture
def mn_status_bad():
# valid masternode but not running/waiting
status = {
"vin": "CTxIn(COutPoint(0000000000000000000000000000000000000000000000000000000000000000, 4294967295), coinbase )",
"service": "[::]:0",
"status": "Node just started, not yet activated"
}
return status
# ========================================================================
def test_valid_ukkey_address():
from ukkeylib import is_valid_ukkey_address
main = valid_ukkey_address()
test = valid_ukkey_address('testnet')
assert is_valid_ukkey_address(main) is True
assert is_valid_ukkey_address(main, 'mainnet') is True
assert is_valid_ukkey_address(main, 'testnet') is False
assert is_valid_ukkey_address(test) is False
assert is_valid_ukkey_address(test, 'mainnet') is False
assert is_valid_ukkey_address(test, 'testnet') is True
def test_invalid_ukkey_address():
from ukkeylib import is_valid_ukkey_address
main = invalid_ukkey_address()
test = invalid_ukkey_address('testnet')
assert is_valid_ukkey_address(main) is False
assert is_valid_ukkey_address(main, 'mainnet') is False
assert is_valid_ukkey_address(main, 'testnet') is False
assert is_valid_ukkey_address(test) is False
assert is_valid_ukkey_address(test, 'mainnet') is False
assert is_valid_ukkey_address(test, 'testnet') is False
def test_deterministic_masternode_elections(current_block_hash, mn_list):
winner = elect_mn(block_hash=current_block_hash, mnlist=mn_list)
assert winner == 'f68a2e5d64f4a9be7ff8d0fbd9059dcd3ce98ad7a19a9260d1d6709127ffac56-1'
winner = elect_mn(block_hash='00000056bcd579fa3dc9a1ee41e8124a4891dcf2661aa3c07cc582bfb63b52b9', mnlist=mn_list)
assert winner == '656695ed867e193490261bea74783f0a39329ff634a10a9fb6f131807eeca744-1'
def test_deterministic_masternode_elections(current_block_hash, mn_list):
from ukkeylib import elect_mn
winner = elect_mn(block_hash=current_block_hash, mnlist=mn_list)
assert winner == 'f68a2e5d64f4a9be7ff8d0fbd9059dcd3ce98ad7a19a9260d1d6709127ffac56-1'
winner = elect_mn(block_hash='00000056bcd579fa3dc9a1ee41e8124a4891dcf2661aa3c07cc582bfb63b52b9', mnlist=mn_list)
assert winner == '656695ed867e193490261bea74783f0a39329ff634a10a9fb6f131807eeca744-1'
def test_parse_masternode_status_vin():
from ukkeylib import parse_masternode_status_vin
status = mn_status_good()
vin = parse_masternode_status_vin(status['vin'])
assert vin == 'f68a2e5d64f4a9be7ff8d0fbd9059dcd3ce98ad7a19a9260d1d6709127ffac56-1'
status = mn_status_bad()
vin = parse_masternode_status_vin(status['vin'])
assert vin is None
def test_hash_function():
import ukkeylib
sb_data_hex = '7b226576656e745f626c6f636b5f686569676874223a2037323639362c20227061796d656e745f616464726573736573223a2022795965384b77796155753559737753596d42337133727978385854557539793755697c795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e7473223a202232352e37353030303030307c32352e3735303030303030222c202274797065223a20327d'
sb_hash = '7ae8b02730113382ea75cbb1eecc497c3aa1fdd9e76e875e38617e07fb2cb21a'
hex_hash = "%x" % ukkeylib.hashit(sb_data_hex)
assert hex_hash == sb_hash
def test_blocks_to_seconds():
import ukkeylib
from decimal import Decimal
precision = Decimal('0.001')
assert Decimal(ukkeylib.blocks_to_seconds(0)) == Decimal(0.0)
assert Decimal(ukkeylib.blocks_to_seconds(2)).quantize(precision) \
== Decimal(314.4).quantize(precision)
assert int(ukkeylib.blocks_to_seconds(16616)) == 2612035
| [] | [] | [
"SENTINEL_CONFIG"
] | [] | ["SENTINEL_CONFIG"] | python | 1 | 0 | |
subcmds/forall.py | #
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import fcntl
import re
import os
import select
import sys
import subprocess
from color import Coloring
from command import Command, MirrorSafeCommand
_CAN_COLOR = [
'branch',
'diff',
'grep',
'log',
]
class ForallColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'forall')
self.project = self.printer('project', attr='bold')
class Forall(Command, MirrorSafeCommand):
common = False
helpSummary = "Run a shell command in each project"
helpUsage = """
%prog [<project>...] -c <command> [<arg>...]
%prog -r str1 [str2] ... -c <command> [<arg>...]"
"""
helpDescription = """
Executes the same shell command in each project.
The -r option allows running the command only on projects matching
regex or wildcard expression.
Output Formatting
-----------------
The -p option causes '%prog' to bind pipes to the command's stdin,
stdout and stderr streams, and pipe all output into a continuous
stream that is displayed in a single pager session. Project headings
are inserted before the output of each command is displayed. If the
command produces no output in a project, no heading is displayed.
The formatting convention used by -p is very suitable for some
types of searching, e.g. `repo forall -p -c git log -SFoo` will
print all commits that add or remove references to Foo.
The -v option causes '%prog' to display stderr messages if a
command produces output only on stderr. Normally the -p option
causes command output to be suppressed until the command produces
at least one byte of output on stdout.
Environment
-----------
pwd is the project's working directory. If the current client is
a mirror client, then pwd is the Git repository.
REPO_PROJECT is set to the unique name of the project.
REPO_PATH is the path relative the the root of the client.
REPO_REMOTE is the name of the remote system from the manifest.
REPO_LREV is the name of the revision from the manifest, translated
to a local tracking branch. If you need to pass the manifest
revision to a locally executed git command, use REPO_LREV.
REPO_RREV is the name of the revision from the manifest, exactly
as written in the manifest.
REPO_COUNT is the total number of projects being iterated.
REPO_I is the current (1-based) iteration count. Can be used in
conjunction with REPO_COUNT to add a simple progress indicator to your
command.
REPO__* are any extra environment variables, specified by the
"annotation" element under any project element. This can be useful
for differentiating trees based on user-specific criteria, or simply
annotating tree details.
shell positional arguments ($1, $2, .., $#) are set to any arguments
following <command>.
Unless -p is used, stdin, stdout, stderr are inherited from the
terminal and are not redirected.
If -e is used, when a command exits unsuccessfully, '%prog' will abort
without iterating through the remaining projects.
"""
def _Options(self, p):
def cmd(option, opt_str, value, parser):
setattr(parser.values, option.dest, list(parser.rargs))
while parser.rargs:
del parser.rargs[0]
p.add_option('-r', '--regex',
dest='regex', action='store_true',
help="Execute the command only on projects matching regex or wildcard expression")
p.add_option('-c', '--command',
help='Command (and arguments) to execute',
dest='command',
action='callback',
callback=cmd)
p.add_option('-e', '--abort-on-errors',
dest='abort_on_errors', action='store_true',
help='Abort if a command exits unsuccessfully')
g = p.add_option_group('Output')
g.add_option('-p',
dest='project_header', action='store_true',
help='Show project headers before output')
g.add_option('-v', '--verbose',
dest='verbose', action='store_true',
help='Show command error messages')
def WantPager(self, opt):
return opt.project_header
def Execute(self, opt, args):
if not opt.command:
self.Usage()
cmd = [opt.command[0]]
shell = True
if re.compile(r'^[a-z0-9A-Z_/\.-]+$').match(cmd[0]):
shell = False
if shell:
cmd.append(cmd[0])
cmd.extend(opt.command[1:])
if opt.project_header \
and not shell \
and cmd[0] == 'git':
# If this is a direct git command that can enable colorized
# output and the user prefers coloring, add --color into the
# command line because we are going to wrap the command into
# a pipe and git won't know coloring should activate.
#
for cn in cmd[1:]:
if not cn.startswith('-'):
break
else:
cn = None
# pylint: disable=W0631
if cn and cn in _CAN_COLOR:
class ColorCmd(Coloring):
def __init__(self, config, cmd):
Coloring.__init__(self, config, cmd)
if ColorCmd(self.manifest.manifestProject.config, cn).is_on:
cmd.insert(cmd.index(cn) + 1, '--color')
# pylint: enable=W0631
mirror = self.manifest.IsMirror
out = ForallColoring(self.manifest.manifestProject.config)
out.redirect(sys.stdout)
rc = 0
first = True
if not opt.regex:
projects = self.GetProjects(args)
else:
projects = self.FindProjects(args)
os.environ['REPO_COUNT'] = str(len(projects))
for (cnt, project) in enumerate(projects):
env = os.environ.copy()
def setenv(name, val):
if val is None:
val = ''
env[name] = val.encode()
setenv('REPO_PROJECT', project.name)
setenv('REPO_PATH', project.relpath)
setenv('REPO_REMOTE', project.remote.name)
setenv('REPO_LREV', project.GetRevisionId())
setenv('REPO_RREV', project.revisionExpr)
setenv('REPO_I', str(cnt + 1))
for a in project.annotations:
setenv("REPO__%s" % (a.name), a.value)
if mirror:
setenv('GIT_DIR', project.gitdir)
cwd = project.gitdir
else:
cwd = project.worktree
if not os.path.exists(cwd):
if (opt.project_header and opt.verbose) \
or not opt.project_header:
print('skipping %s/' % project.relpath, file=sys.stderr)
continue
if opt.project_header:
stdin = subprocess.PIPE
stdout = subprocess.PIPE
stderr = subprocess.PIPE
else:
stdin = None
stdout = None
stderr = None
p = subprocess.Popen(cmd,
cwd = cwd,
shell = shell,
env = env,
stdin = stdin,
stdout = stdout,
stderr = stderr)
if opt.project_header:
class sfd(object):
def __init__(self, fd, dest):
self.fd = fd
self.dest = dest
def fileno(self):
return self.fd.fileno()
empty = True
errbuf = ''
p.stdin.close()
s_in = [sfd(p.stdout, sys.stdout),
sfd(p.stderr, sys.stderr)]
for s in s_in:
flags = fcntl.fcntl(s.fd, fcntl.F_GETFL)
fcntl.fcntl(s.fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
while s_in:
in_ready, _out_ready, _err_ready = select.select(s_in, [], [])
for s in in_ready:
buf = s.fd.read(4096)
if not buf:
s.fd.close()
s_in.remove(s)
continue
if not opt.verbose:
if s.fd != p.stdout:
errbuf += buf
continue
if empty:
if first:
first = False
else:
out.nl()
if mirror:
project_header_path = project.name
else:
project_header_path = project.relpath
out.project('project %s/', project_header_path)
out.nl()
out.flush()
if errbuf:
sys.stderr.write(errbuf)
sys.stderr.flush()
errbuf = ''
empty = False
s.dest.write(buf)
s.dest.flush()
r = p.wait()
if r != 0:
if r != rc:
rc = r
if opt.abort_on_errors:
print("error: %s: Aborting due to previous error" % project.relpath,
file=sys.stderr)
sys.exit(r)
if rc != 0:
sys.exit(rc)
| [] | [] | [
"REPO_COUNT"
] | [] | ["REPO_COUNT"] | python | 1 | 0 | |
cla-backend-go/utils/auth_user.go | // Copyright The Linux Foundation and each contributor to CommunityBridge.
// SPDX-License-Identifier: MIT
package utils
import (
"os"
"strconv"
"github.com/LF-Engineering/lfx-kit/auth"
log "github.com/communitybridge/easycla/cla-backend-go/logging"
"github.com/sirupsen/logrus"
)
// SetAuthUserProperties adds username and email to auth user
func SetAuthUserProperties(authUser *auth.User, xUserName *string, xEmail *string) {
f := logrus.Fields{
"functionName": "utils.SetAuthUserProperties",
"userName": authUser.UserName,
"userEmail": authUser.Email,
}
if xUserName != nil {
authUser.UserName = *xUserName
}
if xEmail != nil {
authUser.Email = *xEmail
}
tracingEnabled, conversionErr := strconv.ParseBool(os.Getenv("USER_AUTH_TRACING"))
if conversionErr == nil && tracingEnabled {
log.WithFields(f).Debugf("Auth User: %+v", authUser)
}
log.WithFields(f).Debugf("set authuser x-username:%s and x-email:%s", authUser.UserName, authUser.Email)
}
| [
"\"USER_AUTH_TRACING\""
] | [] | [
"USER_AUTH_TRACING"
] | [] | ["USER_AUTH_TRACING"] | go | 1 | 0 | |
todoproject/wsgi.py | """
WSGI config for todoproject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'todoproject.settings')
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
pkg/ansi/ansi.go | package ansi
import (
"fmt"
"io"
"os"
"runtime"
"time"
"github.com/briandowns/spinner"
"github.com/logrusorgru/aurora"
"github.com/tidwall/pretty"
"golang.org/x/term"
)
var darkTerminalStyle = &pretty.Style{
Key: [2]string{"\x1B[34m", "\x1B[0m"},
String: [2]string{"\x1B[30m", "\x1B[0m"},
Number: [2]string{"\x1B[94m", "\x1B[0m"},
True: [2]string{"\x1B[35m", "\x1B[0m"},
False: [2]string{"\x1B[35m", "\x1B[0m"},
Null: [2]string{"\x1B[31m", "\x1B[0m"},
}
//
// Public variables
//
// ForceColors forces the use of colors and other ANSI sequences.
var ForceColors = false
// DisableColors disables all colors and other ANSI sequences.
var DisableColors = false
// EnvironmentOverrideColors overs coloring based on `CLICOLOR` and
// `CLICOLOR_FORCE`. Cf. https://bixense.com/clicolors/
var EnvironmentOverrideColors = true
//
// Public functions
//
// Bold returns bolded text if the writer supports colors
func Bold(text string) string {
color := Color(os.Stdout)
return color.Sprintf(color.Bold(text))
}
// Color returns an aurora.Aurora instance with colors enabled or disabled
// depending on whether the writer supports colors.
func Color(w io.Writer) aurora.Aurora {
return aurora.NewAurora(shouldUseColors(w))
}
// ColorizeJSON returns a colorized version of the input JSON, if the writer
// supports colors.
func ColorizeJSON(json string, darkStyle bool, w io.Writer) string {
if !shouldUseColors(w) {
return json
}
style := (*pretty.Style)(nil)
if darkStyle {
style = darkTerminalStyle
}
return string(pretty.Color([]byte(json), style))
}
// ColorizeStatus returns a colorized number for HTTP status code
func ColorizeStatus(status int) aurora.Value {
color := Color(os.Stdout)
switch {
case status >= 500:
return color.Red(status).Bold()
case status >= 300:
return color.Yellow(status).Bold()
default:
return color.Green(status).Bold()
}
}
// Faint returns slightly offset color text if the writer supports it
func Faint(text string) string {
color := Color(os.Stdout)
return color.Sprintf(color.Faint(text))
}
// Italic returns italicized text if the writer supports it.
func Italic(text string) string {
color := Color(os.Stdout)
return color.Sprintf(color.Italic(text))
}
// Linkify returns an ANSI escape sequence with an hyperlink, if the writer
// supports colors.
func Linkify(text, url string, w io.Writer) string {
if !shouldUseColors(w) {
return text
}
// See https://gist.github.com/egmontkob/eb114294efbcd5adb1944c9f3cb5feda
// for more information about this escape sequence.
return fmt.Sprintf("\x1b]8;;%s\x1b\\%s\x1b]8;;\x1b\\", url, text)
}
type charset = []string
func getCharset() charset {
// See https://github.com/briandowns/spinner#available-character-sets for
// list of available charsets
if runtime.GOOS == "windows" {
// Less fancy, but uses ASCII characters so works with Windows default
// console.
return spinner.CharSets[8]
}
return spinner.CharSets[11]
}
const duration = time.Duration(100) * time.Millisecond
// StartNewSpinner starts a new spinner with the given message. If the writer is not
// a terminal or doesn't support colors, it simply prints the message.
func StartNewSpinner(msg string, w io.Writer) *spinner.Spinner {
if !isTerminal(w) || !shouldUseColors(w) {
fmt.Fprintln(w, msg)
return nil
}
s := spinner.New(getCharset(), duration)
s.Writer = w
if msg != "" {
s.Suffix = " " + msg
}
s.Start()
return s
}
// StartSpinner updates an existing spinner's message, and starts it if it was stopped
func StartSpinner(s *spinner.Spinner, msg string, w io.Writer) {
if s == nil {
fmt.Fprintln(w, msg)
return
}
if msg != "" {
s.Suffix = " " + msg
}
if !s.Active() {
s.Start()
}
}
// StopSpinner stops a spinner with the given message. If the writer is not
// a terminal or doesn't support colors, it simply prints the message.
func StopSpinner(s *spinner.Spinner, msg string, w io.Writer) {
if !isTerminal(w) || !shouldUseColors(w) {
fmt.Fprintln(w, msg)
return
}
if msg != "" {
s.FinalMSG = "> " + msg + "\n"
}
s.Stop()
}
// StrikeThrough returns struck though text if the writer supports colors
func StrikeThrough(text string) string {
color := Color(os.Stdout)
return color.Sprintf(color.StrikeThrough(text))
}
//
// Private functions
//
func isTerminal(w io.Writer) bool {
switch v := w.(type) {
case *os.File:
return term.IsTerminal(int(v.Fd()))
default:
return false
}
}
func isPlugin() bool {
_, isSet := os.LookupEnv("CLIPLUGIN")
return isSet
}
func shouldUseColors(w io.Writer) bool {
useColors := ForceColors || isTerminal(w) || isPlugin()
if EnvironmentOverrideColors {
force, ok := os.LookupEnv("CLICOLOR_FORCE")
switch {
case ok && force != "0":
useColors = true
case ok && force == "0":
useColors = false
case os.Getenv("CLICOLOR") == "0":
useColors = false
}
}
return useColors && !DisableColors
}
| [
"\"CLICOLOR\""
] | [] | [
"CLICOLOR"
] | [] | ["CLICOLOR"] | go | 1 | 0 | |
src/integration_runner/main.go | //
// Copyright 2020 New Relic Corporation. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
//
package main
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"io"
"net"
"net/http"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strconv"
"sync"
"time"
"newrelic"
"newrelic/collector"
"newrelic/integration"
"newrelic/log"
"newrelic/secrets"
"newrelic/utilization"
)
var (
flagAgent = flag.String("agent", "", "")
flagCGI = flag.String("cgi", "", "")
flagCollector = flag.String("collector", "", "the collector host")
flagLoglevel = flag.String("loglevel", "", "agent log level")
flagOutputDir = flag.String("output-dir", ".", "")
flagPattern = flag.String("pattern", "test_*", "shell pattern describing tests to run")
flagPHP = flag.String("php", "", "")
flagPort = flag.String("port", defaultPort(), "")
flagRetry = flag.Int("retry", 0, "maximum retry attempts")
flagTimeout = flag.Duration("timeout", 10*time.Second, "")
flagValgrind = flag.String("valgrind", "", "if given, this is the path to valgrind")
flagWorkers = flag.Int("threads", 1, "")
flagTime = flag.Bool("time", false, "time each test")
// externalPort is the port on which we start a server to handle
// external calls.
flagExternalPort = flag.Int("external_port", 0, "")
// Allows an end user to change the hard coded license key the integration runner
// uses. Useful for running a set of integration tests that are separate from
// the main suite.
//
// Supports an @license.txt format to allow reading the license in from a file.
//
// Expected format:
//
// abcdefghij1234567890abcdefghij1234567890
//
flagLicense FlagStringOrFile
// Allows an end user to set a security policy token to use when connecting. In the
// real world daemon, this value comes from the application's php.ini configuration.
// However, since the integration runner handles agent/daemon communication in its
// own specialized way, we need to allow integration runner users to set this value
//
// Supports a @security-token.txt format to allow reading the token in from
// a file.
//
// Expected format:
//
// ffff-fffb-ffff-ffff
//
flagSecurityToken FlagStringOrFile
// Allows an end user to pass in a set of supported security policies to use when
// connecting. In the real world daemon, this value comes from values hard coded in
// the agent source code. However, since the integration runner handles agent/daemon
// communication in its own specialized way, we need to allow integration runner users
// to set this value.
//
// Flag supports an @file-name.json format to allow reading supported policies in
// from a file.
//
// Expected format:
//
// {
// "record_sql": {
// "enabled": false,
// "supported": true
// },
// "allow_raw_exception_messages": {
// "enabled": false,
// "supported": true
// },
// "custom_events": {
// "enabled": false,
// "supported": true
// },
// "custom_parameters": {
// "enabled": false,
// "supported": true
// }
// }
flagSecuityPolicies FlagStringOrFile
// Header names for headers that are ignored when conducting response
// header checks.
ignoreResponseHeaders = map[string]bool{"Content-Type": true, "X-Powered-By": true}
// Global storage for response headers that are sent by the CAT
// endpoint during a test run.
responseHeaders http.Header
// Lock for protecting against concurrent writes of response headers
responseHeadersLock sync.Mutex
)
// Default directories to search for tests.
var defaultArgs = []string{
"tests/integration",
"tests/library",
"tests/regression",
}
// PHP Test Account 1
var (
TestApp = newrelic.AppInfo{
Appname: "Agent Integration Tests",
RedirectCollector: "collector.newrelic.com",
AgentVersion: "0",
AgentLanguage: "php",
HighSecurity: false,
Environment: nil,
Labels: nil,
Settings:
// Ensure that we get Javascript agent code in the reply
map[string]interface{}{"newrelic.browser_monitoring.debug": false, "newrelic.browser_monitoring.loader": "rum"},
SecurityPolicyToken: "",
}
// Integration tests have this mock cross process id hard coded into
// metric assertions
MockCrossProcessId = fmt.Sprintf("%s#%s", secrets.NewrelicAccountId, secrets.NewrelicAppId)
)
var ctx *integration.Context
func defaultPort() string {
name := fmt.Sprintf("newrelic-daemon-%d.sock", os.Getpid())
name = filepath.Join(os.TempDir(), name)
if runtime.GOOS == "linux" {
// Use an abstract domain socket.
name = "@" + name
}
return name
}
func merge(a, b map[string]string) map[string]string {
merged := make(map[string]string)
for k, v := range a {
merged[k] = v
}
for k, v := range b {
merged[k] = v
}
return merged
}
func catRequest(w http.ResponseWriter, r *http.Request) {
catFile := r.URL.Query().Get("file")
if "" == catFile {
http.Error(w, "cat failure: no file provided", http.StatusBadRequest)
return
}
env := merge(ctx.Env, nil)
settings := merge(ctx.Settings, nil)
settings["newrelic.appname"] = "ignore"
tx, err := integration.CgiTx(integration.ScriptFile(catFile), env, settings, r.Header, ctx)
if nil != err {
http.Error(w, "cat failure: "+err.Error(), http.StatusInternalServerError)
return
}
headers, body, err := tx.Execute()
if nil != err {
http.Error(w, "cat failure: "+err.Error(), http.StatusInternalServerError)
return
}
// Copy response headers
h := w.Header()
for key, vals := range headers {
for _, val := range vals {
h.Add(key, val)
if true != ignoreResponseHeaders[key] && responseHeaders != nil {
responseHeadersLock.Lock()
responseHeaders.Add(key, val)
responseHeadersLock.Unlock()
}
}
}
w.Write(body)
}
func init() {
//setup typed flags
flag.Var(&flagLicense, "license", "use a license key other than the hard coded default. Supports @filename syntax for loading from files.")
flag.Var(&flagSecurityToken, "security_token", "if given, the integration runner will connect with this security token. Supports @filename syntax for loading from files.")
flag.Var(&flagSecuityPolicies, "supported_policies", "if given, the integration runner will connect with the provided supported policies. Supports @filename syntax for loading from files.")
}
func main() {
// Respect GOMAXPROCS if set; otherwise, use all available CPUs.
if os.Getenv("GOMAXPROCS") == "" {
runtime.GOMAXPROCS(runtime.NumCPU())
}
flag.Parse()
// The license key and collector can be set via make variables in make/secrets.mk
TestApp.RedirectCollector = secrets.NewrelicCollectorHost
if TestApp.RedirectCollector == "" {
TestApp.RedirectCollector = "collector.newrelic.com"
}
TestApp.License = collector.LicenseKey(secrets.NewrelicLicenseKey)
// Set the redirect collector from the flag, if given.
if *flagCollector != "" {
TestApp.RedirectCollector = *flagCollector
}
if *flagPHP == "" {
*flagPHP = "php"
}
if flagLicense != "" {
TestApp.License = collector.LicenseKey(flagLicense)
}
if len(*flagCGI) == 0 {
if len(*flagPHP) > 0 {
*flagCGI = stringReplaceLast(*flagPHP, "php", "php-cgi", 1)
} else {
*flagCGI = "php-cgi"
}
}
var err error
*flagCGI, err = exec.LookPath(*flagCGI)
if nil != err {
fmt.Fprintf(os.Stderr, "WARNING: unable to find cgi: %v\n", err)
}
// Start server for external requests.
mux := http.NewServeMux()
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
io.WriteString(w, "Hello world!")
})
mux.HandleFunc("/cat", catRequest)
addr := "127.0.0.1:" + strconv.Itoa(*flagExternalPort)
srv := &http.Server{Addr: addr, Handler: mux}
ln, err := net.Listen("tcp", addr)
if err != nil {
fmt.Fprintf(os.Stderr, "unable start external server: %v\n", err)
os.Exit(1)
}
externalHost := ln.Addr().String()
go func() {
err := srv.Serve(ln)
if nil != err {
fmt.Fprintf(os.Stderr, "unable serve external server: %v\n", err)
os.Exit(1)
}
}()
if len(*flagPort) == 0 {
*flagPort = defaultPort()
}
*flagOutputDir, _ = filepath.Abs(*flagOutputDir)
daemonLog := filepath.Join(*flagOutputDir, "integration-tests.log")
agentLog := filepath.Join(*flagOutputDir, "php_agent.log")
os.Remove(daemonLog)
os.Remove(agentLog)
log.Init(log.LogDebug, daemonLog)
ctx = integration.NewContext(*flagPHP, *flagCGI)
ctx.Valgrind = *flagValgrind
ctx.Timeout = *flagTimeout
// Settings common to all tests.
ctx.Settings = make(map[string]string)
ctx.Settings["newrelic.license"] = string(TestApp.License)
ctx.Settings["newrelic.logfile"] = agentLog
ctx.Settings["newrelic.daemon.port"] = `"` + *flagPort + `"`
ctx.Settings["newrelic.daemon.dont_launch"] = "3"
ctx.Settings["newrelic.special"] = "debug_cat"
if *flagLoglevel != "" {
ctx.Settings["newrelic.loglevel"] = *flagLoglevel
}
// If the user provided a custom agent extension, use it.
if len(*flagAgent) > 0 {
ctx.Settings["extension"], _ = filepath.Abs(*flagAgent)
}
// Env vars common to all tests.
ctx.Env["EXTERNAL_HOST"] = externalHost
handler, err := startDaemon("unix", *flagPort, flagSecurityToken.String(), flagSecuityPolicies.String())
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
args := flag.Args()
if 0 == len(args) {
args = defaultArgs
}
testFiles := discoverTests(*flagPattern, args)
tests := make([]*integration.Test, 0, len(testFiles))
testsToRun := make(chan *integration.Test, len(testFiles))
for _, filename := range testFiles {
if test := integration.ParseTestFile(filename); test != nil {
tests = append(tests, test)
testsToRun <- test
}
}
runTests(testsToRun, *flagWorkers)
// Now wait for all data to be flushed, then delete the sock file.
time.Sleep(50 * time.Millisecond)
for i := 0; i < *flagRetry; i++ {
testsToRetry := make(chan *integration.Test, len(testFiles))
handler.Lock()
for _, tc := range tests {
if !tc.Failed && !tc.Skipped {
if handler.harvests[tc.Name] == nil {
testsToRetry <- tc
}
}
}
handler.Unlock()
if len(testsToRetry) == 0 {
break
}
retryTests(testsToRetry, *flagWorkers)
time.Sleep(50 * time.Millisecond)
}
deleteSockfile("unix", *flagPort)
var numFailed int
// Compare the output
handler.Lock()
for _, tc := range tests {
if !tc.Failed && !tc.Skipped {
tc.Compare(handler.harvests[tc.Name])
}
if tc.Failed && tc.Xfail == "" {
numFailed++
}
}
tapOutput(tests)
if numFailed > 0 {
os.Exit(1)
}
}
var (
skipRE = regexp.MustCompile(`^(?i)\s*skip`)
xfailRE = regexp.MustCompile(`^(?i)\s*xfail`)
)
func runTests(testsToRun chan *integration.Test, numWorkers int) {
var wg sync.WaitGroup
for i := 0; i < numWorkers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case tc := <-testsToRun:
fmt.Println("running", tc.Name)
// Note that runTest will modify test
// fields. These will be visible to the
// main goroutine because of the
// wg.Done() call.
runTest(tc)
default:
return
}
}
}()
}
wg.Wait()
}
func retryTests(testsToRun chan *integration.Test, numWorkers int) {
var wg sync.WaitGroup
for i := 0; i < numWorkers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case tc := <-testsToRun:
fmt.Println("retrying", tc.Name)
// Note that runTest will modify test
// fields. These will be visible to the
// main goroutine because of the
// wg.Done() call.
tc.Reset()
runTest(tc)
default:
return
}
}
}()
}
wg.Wait()
}
func runTest(t *integration.Test) {
if nil != t.Err {
return
}
skipIf, _ := t.MakeSkipIf(ctx)
if skipIf != nil {
_, body, err := skipIf.Execute()
if err != nil {
t.Output = body
t.Fatal(fmt.Errorf("error executing skipif: %v", err))
return
}
if skipRE.Match(body) {
reason := string(bytes.TrimSpace(head(body)))
t.Skip(reason)
return
}
}
// Reset global response headers before the test is run. This feature
// only works for sequential test runs.
responseHeaders = make(http.Header)
run, _ := t.MakeRun(ctx)
start := time.Now()
_, body, err := run.Execute()
// Set the duration on the test if --time was given. A zero duration
// will not be printed.
if *flagTime {
t.Duration = time.Since(start)
} else {
t.Duration = 0
}
// Always save the test output. If an error occurred it may contain
// critical information regarding the cause. Currently, it may also
// contain valgrind commentary which we want to display.
t.Output = body
if *flagWorkers == 1 && t.ShouldCheckResponseHeaders() {
// Response header test active.
t.ResponseHeaders = responseHeaders
} else if t.ShouldCheckResponseHeaders() {
// Response headers expected but disabled because of parallel
// test runs.
fmt.Println("SKIPPING response header test for ", t.Name)
}
if err != nil {
if _, ok := err.(*exec.ExitError); !ok {
t.Fatal(fmt.Errorf("error executing script: %v", err))
return
}
}
if skipRE.Match(body) {
reason := string(bytes.TrimSpace(head(body)))
t.Skip(reason)
return
}
if xfailRE.Match(body) {
// Strip xfail message from body so it does not affect expectations.
tmp := bytes.SplitN(body, []byte("\n"), 2)
t.XFail(string(bytes.TrimSpace(tmp[0])))
if len(tmp) == 2 {
body = tmp[1]
}
}
}
// head returns the first line of s.
func head(s []byte) []byte {
if i := bytes.IndexByte(s, '\n'); i >= 0 {
return s[:i+1]
}
return s
}
// discoverTests recursively searches the paths in searchPaths and
// returns the paths of each file that matches pattern.
func discoverTests(pattern string, searchPaths []string) []string {
testFiles := make([]string, 0, 100)
for _, root := range searchPaths {
if info, err := os.Stat(root); err == nil && info.Mode().IsRegular() {
testFiles = append(testFiles, root)
continue
}
filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
if err != nil {
return nil
}
if info.Mode().IsRegular() {
if ok, _ := filepath.Match(pattern, info.Name()); ok {
testFiles = append(testFiles, path)
}
}
return nil
})
}
return testFiles
}
func injectIntoConnectReply(reply []byte, newRunID, crossProcessId string) []byte {
var x map[string]interface{}
json.Unmarshal(reply, &x)
x["agent_run_id"] = newRunID
x["cross_process_id"] = crossProcessId
out, _ := json.Marshal(x)
return out
}
type IntegrationDataHandler struct {
sync.Mutex // Protects harvests
harvests map[string]*newrelic.Harvest // Keyed by tc.Name (which is used as AgentRunID)
reply []byte // Constant after creation
rawSecurityPolicies []byte // policies from connection attempt, needed for AppInfo reply
}
func (h *IntegrationDataHandler) IncomingTxnData(id newrelic.AgentRunID, sample newrelic.AggregaterInto) {
h.Lock()
defer h.Unlock()
harvest := h.harvests[string(id)]
if nil == harvest {
harvest = newrelic.NewHarvest(time.Now(), collector.NewHarvestLimits())
// Save a little memory by reducing the event pools.
harvest.TxnEvents = newrelic.NewTxnEvents(50)
harvest.CustomEvents = newrelic.NewCustomEvents(50)
h.harvests[string(id)] = harvest
}
sample.AggregateInto(harvest)
}
func (h *IntegrationDataHandler) IncomingSpanBatch(batch newrelic.SpanBatch) {}
func (h *IntegrationDataHandler) IncomingAppInfo(id *newrelic.AgentRunID, info *newrelic.AppInfo) newrelic.AppInfoReply {
return newrelic.AppInfoReply{
State: newrelic.AppStateConnected,
// Use the appname (which has been set to the filename) as
// the agent run id to enable running the tests in
// parallel.
ConnectReply: injectIntoConnectReply(h.reply, info.Appname, MockCrossProcessId),
SecurityPolicies: h.rawSecurityPolicies,
ConnectTimestamp: uint64(time.Now().Unix()),
HarvestFrequency: 60,
SamplingTarget: 10,
}
}
func deleteSockfile(network, address string) {
if network == "unix" && !(address[0] == '@') {
err := os.Remove(address)
if err != nil && !os.IsNotExist(err) {
fmt.Fprintln(os.Stderr, "unable to remove stale sock file: %v"+
" - another daemon may already be running?", err)
}
}
}
// startDaemon bootstraps the daemon components required to run the
// tests. There are two types of messages an agent can send that affect
// the integration tests: appinfo queries, and txndata. Normally, these
// would be handled by newrelic.Process. We do not do so here, instead
// the test runner intercepts these messages for inspection. This has
// the side-effect of disabling the harvest.
//
// Note: with a little refactoring in the daemon we could continue to
// stub out appinfo queries and inspect txndata while preserving the
// harvest.
func startDaemon(network, address string, securityToken string, securityPolicies string) (*IntegrationDataHandler, error) {
// Gathering utilization data during integration tests.
client, _ := newrelic.NewClient(&newrelic.ClientConfig{})
connectPayload := TestApp.ConnectPayload(utilization.Gather(
utilization.Config{
DetectAWS: true,
DetectAzure: true,
DetectGCP: true,
DetectPCF: true,
DetectDocker: true,
}))
policies := newrelic.AgentPolicies{}
json.Unmarshal([]byte(securityPolicies), &policies.Policies)
connectAttempt := newrelic.ConnectApplication(&newrelic.ConnectArgs{
RedirectCollector: TestApp.RedirectCollector,
PayloadRaw: connectPayload,
License: TestApp.License,
Client: client,
SecurityPolicyToken: securityToken,
AppSupportedSecurityPolicies: policies,
})
if nil != connectAttempt.Err {
return nil, fmt.Errorf("unable to connect application: %v", connectAttempt.Err)
}
handler := &IntegrationDataHandler{
reply: connectAttempt.RawReply,
harvests: make(map[string]*newrelic.Harvest),
rawSecurityPolicies: connectAttempt.RawSecurityPolicies,
}
go func() {
deleteSockfile(network, address) // in case there's a stale one hanging around.
list, err := newrelic.Listen(network, address)
if err != nil {
deleteSockfile(network, address)
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
defer list.Close()
if err = list.Serve(newrelic.CommandsHandler{Processor: handler}); err != nil {
deleteSockfile(network, address)
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}()
// Grace period for the listener to come up.
time.Sleep(50 * time.Millisecond)
return handler, nil
}
| [
"\"GOMAXPROCS\""
] | [] | [
"GOMAXPROCS"
] | [] | ["GOMAXPROCS"] | go | 1 | 0 | |
src/cmd/cgo/out.go | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"debug/elf"
"debug/macho"
"debug/pe"
"fmt"
"go/ast"
"go/printer"
"go/token"
"github.com/c12o16h1/go/src/internal/xcoff"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"sort"
"strings"
)
var (
conf = printer.Config{Mode: printer.SourcePos, Tabwidth: 8}
noSourceConf = printer.Config{Tabwidth: 8}
)
// writeDefs creates output files to be compiled by gc and gcc.
func (p *Package) writeDefs() {
var fgo2, fc io.Writer
f := creat(*objDir + "_cgo_gotypes.go")
defer f.Close()
fgo2 = f
if *gccgo {
f := creat(*objDir + "_cgo_defun.c")
defer f.Close()
fc = f
}
fm := creat(*objDir + "_cgo_main.c")
var gccgoInit bytes.Buffer
fflg := creat(*objDir + "_cgo_flags")
for k, v := range p.CgoFlags {
fmt.Fprintf(fflg, "_CGO_%s=%s\n", k, strings.Join(v, " "))
if k == "LDFLAGS" && !*gccgo {
for _, arg := range v {
fmt.Fprintf(fgo2, "//go:cgo_ldflag %q\n", arg)
}
}
}
fflg.Close()
// Write C main file for using gcc to resolve imports.
fmt.Fprintf(fm, "int main() { return 0; }\n")
if *importRuntimeCgo {
fmt.Fprintf(fm, "void crosscall2(void(*fn)(void*, int, __SIZE_TYPE__), void *a, int c, __SIZE_TYPE__ ctxt) { }\n")
fmt.Fprintf(fm, "__SIZE_TYPE__ _cgo_wait_runtime_init_done(void) { return 0; }\n")
fmt.Fprintf(fm, "void _cgo_release_context(__SIZE_TYPE__ ctxt) { }\n")
fmt.Fprintf(fm, "char* _cgo_topofstack(void) { return (char*)0; }\n")
} else {
// If we're not importing runtime/cgo, we *are* runtime/cgo,
// which provides these functions. We just need a prototype.
fmt.Fprintf(fm, "void crosscall2(void(*fn)(void*, int, __SIZE_TYPE__), void *a, int c, __SIZE_TYPE__ ctxt);\n")
fmt.Fprintf(fm, "__SIZE_TYPE__ _cgo_wait_runtime_init_done(void);\n")
fmt.Fprintf(fm, "void _cgo_release_context(__SIZE_TYPE__);\n")
}
fmt.Fprintf(fm, "void _cgo_allocate(void *a, int c) { }\n")
fmt.Fprintf(fm, "void _cgo_panic(void *a, int c) { }\n")
fmt.Fprintf(fm, "void _cgo_reginit(void) { }\n")
// Write second Go output: definitions of _C_xxx.
// In a separate file so that the import of "unsafe" does not
// pollute the original file.
fmt.Fprintf(fgo2, "// Code generated by cmd/cgo; DO NOT EDIT.\n\n")
fmt.Fprintf(fgo2, "package %s\n\n", p.PackageName)
fmt.Fprintf(fgo2, "import \"unsafe\"\n\n")
if !*gccgo && *importRuntimeCgo {
fmt.Fprintf(fgo2, "import _ \"runtime/cgo\"\n\n")
}
if *importSyscall {
fmt.Fprintf(fgo2, "import \"syscall\"\n\n")
fmt.Fprintf(fgo2, "var _ syscall.Errno\n")
}
fmt.Fprintf(fgo2, "func _Cgo_ptr(ptr unsafe.Pointer) unsafe.Pointer { return ptr }\n\n")
if !*gccgo {
fmt.Fprintf(fgo2, "//go:linkname _Cgo_always_false runtime.cgoAlwaysFalse\n")
fmt.Fprintf(fgo2, "var _Cgo_always_false bool\n")
fmt.Fprintf(fgo2, "//go:linkname _Cgo_use runtime.cgoUse\n")
fmt.Fprintf(fgo2, "func _Cgo_use(interface{})\n")
}
typedefNames := make([]string, 0, len(typedef))
for name := range typedef {
typedefNames = append(typedefNames, name)
}
sort.Strings(typedefNames)
for _, name := range typedefNames {
def := typedef[name]
fmt.Fprintf(fgo2, "type %s ", name)
// We don't have source info for these types, so write them out without source info.
// Otherwise types would look like:
//
// type _Ctype_struct_cb struct {
// //line :1
// on_test *[0]byte
// //line :1
// }
//
// Which is not useful. Moreover we never override source info,
// so subsequent source code uses the same source info.
// Moreover, empty file name makes compile emit no source debug info at all.
var buf bytes.Buffer
noSourceConf.Fprint(&buf, fset, def.Go)
if bytes.HasPrefix(buf.Bytes(), []byte("_Ctype_")) {
// This typedef is of the form `typedef a b` and should be an alias.
fmt.Fprintf(fgo2, "= ")
}
fmt.Fprintf(fgo2, "%s", buf.Bytes())
fmt.Fprintf(fgo2, "\n\n")
}
if *gccgo {
fmt.Fprintf(fgo2, "type _Ctype_void byte\n")
} else {
fmt.Fprintf(fgo2, "type _Ctype_void [0]byte\n")
}
if *gccgo {
fmt.Fprint(fgo2, gccgoGoProlog)
fmt.Fprint(fc, p.cPrologGccgo())
} else {
fmt.Fprint(fgo2, goProlog)
}
if fc != nil {
fmt.Fprintf(fc, "#line 1 \"cgo-generated-wrappers\"\n")
}
if fm != nil {
fmt.Fprintf(fm, "#line 1 \"cgo-generated-wrappers\"\n")
}
gccgoSymbolPrefix := p.gccgoSymbolPrefix()
cVars := make(map[string]bool)
for _, key := range nameKeys(p.Name) {
n := p.Name[key]
if !n.IsVar() {
continue
}
if !cVars[n.C] {
if *gccgo {
fmt.Fprintf(fc, "extern byte *%s;\n", n.C)
} else {
fmt.Fprintf(fm, "extern char %s[];\n", n.C)
fmt.Fprintf(fm, "void *_cgohack_%s = %s;\n\n", n.C, n.C)
fmt.Fprintf(fgo2, "//go:linkname __cgo_%s %s\n", n.C, n.C)
fmt.Fprintf(fgo2, "//go:cgo_import_static %s\n", n.C)
fmt.Fprintf(fgo2, "var __cgo_%s byte\n", n.C)
}
cVars[n.C] = true
}
var node ast.Node
if n.Kind == "var" {
node = &ast.StarExpr{X: n.Type.Go}
} else if n.Kind == "fpvar" {
node = n.Type.Go
} else {
panic(fmt.Errorf("invalid var kind %q", n.Kind))
}
if *gccgo {
fmt.Fprintf(fc, `extern void *%s __asm__("%s.%s");`, n.Mangle, gccgoSymbolPrefix, n.Mangle)
fmt.Fprintf(&gccgoInit, "\t%s = &%s;\n", n.Mangle, n.C)
fmt.Fprintf(fc, "\n")
}
fmt.Fprintf(fgo2, "var %s ", n.Mangle)
conf.Fprint(fgo2, fset, node)
if !*gccgo {
fmt.Fprintf(fgo2, " = (")
conf.Fprint(fgo2, fset, node)
fmt.Fprintf(fgo2, ")(unsafe.Pointer(&__cgo_%s))", n.C)
}
fmt.Fprintf(fgo2, "\n")
}
if *gccgo {
fmt.Fprintf(fc, "\n")
}
for _, key := range nameKeys(p.Name) {
n := p.Name[key]
if n.Const != "" {
fmt.Fprintf(fgo2, "const %s = %s\n", n.Mangle, n.Const)
}
}
fmt.Fprintf(fgo2, "\n")
callsMalloc := false
for _, key := range nameKeys(p.Name) {
n := p.Name[key]
if n.FuncType != nil {
p.writeDefsFunc(fgo2, n, &callsMalloc)
}
}
fgcc := creat(*objDir + "_cgo_export.c")
fgcch := creat(*objDir + "_cgo_export.h")
if *gccgo {
p.writeGccgoExports(fgo2, fm, fgcc, fgcch)
} else {
p.writeExports(fgo2, fm, fgcc, fgcch)
}
if callsMalloc && !*gccgo {
fmt.Fprint(fgo2, strings.Replace(cMallocDefGo, "PREFIX", cPrefix, -1))
fmt.Fprint(fgcc, strings.Replace(strings.Replace(cMallocDefC, "PREFIX", cPrefix, -1), "PACKED", p.packedAttribute(), -1))
}
if err := fgcc.Close(); err != nil {
fatalf("%s", err)
}
if err := fgcch.Close(); err != nil {
fatalf("%s", err)
}
if *exportHeader != "" && len(p.ExpFunc) > 0 {
fexp := creat(*exportHeader)
fgcch, err := os.Open(*objDir + "_cgo_export.h")
if err != nil {
fatalf("%s", err)
}
_, err = io.Copy(fexp, fgcch)
if err != nil {
fatalf("%s", err)
}
if err = fexp.Close(); err != nil {
fatalf("%s", err)
}
}
init := gccgoInit.String()
if init != "" {
// The init function does nothing but simple
// assignments, so it won't use much stack space, so
// it's OK to not split the stack. Splitting the stack
// can run into a bug in clang (as of 2018-11-09):
// this is a leaf function, and when clang sees a leaf
// function it won't emit the split stack prologue for
// the function. However, if this function refers to a
// non-split-stack function, which will happen if the
// cgo code refers to a C function not compiled with
// -fsplit-stack, then the linker will think that it
// needs to adjust the split stack prologue, but there
// won't be one. Marking the function explicitly
// no_split_stack works around this problem by telling
// the linker that it's OK if there is no split stack
// prologue.
fmt.Fprintln(fc, "static void init(void) __attribute__ ((constructor, no_split_stack));")
fmt.Fprintln(fc, "static void init(void) {")
fmt.Fprint(fc, init)
fmt.Fprintln(fc, "}")
}
}
// elfImportedSymbols is like elf.File.ImportedSymbols, but it
// includes weak symbols.
//
// A bug in some versions of LLD (at least LLD 8) cause it to emit
// several pthreads symbols as weak, but we need to import those. See
// issue #31912 or https://bugs.llvm.org/show_bug.cgi?id=42442.
//
// When doing external linking, we hand everything off to the external
// linker, which will create its own dynamic symbol tables. For
// internal linking, this may turn weak imports into strong imports,
// which could cause dynamic linking to fail if a symbol really isn't
// defined. However, the standard library depends on everything it
// imports, and this is the primary use of dynamic symbol tables with
// internal linking.
func elfImportedSymbols(f *elf.File) []elf.ImportedSymbol {
syms, _ := f.DynamicSymbols()
var imports []elf.ImportedSymbol
for _, s := range syms {
if (elf.ST_BIND(s.Info) == elf.STB_GLOBAL || elf.ST_BIND(s.Info) == elf.STB_WEAK) && s.Section == elf.SHN_UNDEF {
imports = append(imports, elf.ImportedSymbol{
Name: s.Name,
Library: s.Library,
Version: s.Version,
})
}
}
return imports
}
func dynimport(obj string) {
stdout := os.Stdout
if *dynout != "" {
f, err := os.Create(*dynout)
if err != nil {
fatalf("%s", err)
}
stdout = f
}
fmt.Fprintf(stdout, "package %s\n", *dynpackage)
if f, err := elf.Open(obj); err == nil {
if *dynlinker {
// Emit the cgo_dynamic_linker line.
if sec := f.Section(".interp"); sec != nil {
if data, err := sec.Data(); err == nil && len(data) > 1 {
// skip trailing \0 in data
fmt.Fprintf(stdout, "//go:cgo_dynamic_linker %q\n", string(data[:len(data)-1]))
}
}
}
sym := elfImportedSymbols(f)
for _, s := range sym {
targ := s.Name
if s.Version != "" {
targ += "#" + s.Version
}
fmt.Fprintf(stdout, "//go:cgo_import_dynamic %s %s %q\n", s.Name, targ, s.Library)
}
lib, _ := f.ImportedLibraries()
for _, l := range lib {
fmt.Fprintf(stdout, "//go:cgo_import_dynamic _ _ %q\n", l)
}
return
}
if f, err := macho.Open(obj); err == nil {
sym, _ := f.ImportedSymbols()
for _, s := range sym {
if len(s) > 0 && s[0] == '_' {
s = s[1:]
}
fmt.Fprintf(stdout, "//go:cgo_import_dynamic %s %s %q\n", s, s, "")
}
lib, _ := f.ImportedLibraries()
for _, l := range lib {
fmt.Fprintf(stdout, "//go:cgo_import_dynamic _ _ %q\n", l)
}
return
}
if f, err := pe.Open(obj); err == nil {
sym, _ := f.ImportedSymbols()
for _, s := range sym {
ss := strings.Split(s, ":")
name := strings.Split(ss[0], "@")[0]
fmt.Fprintf(stdout, "//go:cgo_import_dynamic %s %s %q\n", name, ss[0], strings.ToLower(ss[1]))
}
return
}
if f, err := xcoff.Open(obj); err == nil {
sym, err := f.ImportedSymbols()
if err != nil {
fatalf("cannot load imported symbols from XCOFF file %s: %v", obj, err)
}
for _, s := range sym {
if s.Name == "runtime_rt0_go" || s.Name == "_rt0_ppc64_aix_lib" {
// These symbols are imported by runtime/cgo but
// must not be added to _cgo_import.go as there are
// Go symbols.
continue
}
fmt.Fprintf(stdout, "//go:cgo_import_dynamic %s %s %q\n", s.Name, s.Name, s.Library)
}
lib, err := f.ImportedLibraries()
if err != nil {
fatalf("cannot load imported libraries from XCOFF file %s: %v", obj, err)
}
for _, l := range lib {
fmt.Fprintf(stdout, "//go:cgo_import_dynamic _ _ %q\n", l)
}
return
}
fatalf("cannot parse %s as ELF, Mach-O, PE or XCOFF", obj)
}
// Construct a gcc struct matching the gc argument frame.
// Assumes that in gcc, char is 1 byte, short 2 bytes, int 4 bytes, long long 8 bytes.
// These assumptions are checked by the gccProlog.
// Also assumes that gc convention is to word-align the
// input and output parameters.
func (p *Package) structType(n *Name) (string, int64) {
var buf bytes.Buffer
fmt.Fprint(&buf, "struct {\n")
off := int64(0)
for i, t := range n.FuncType.Params {
if off%t.Align != 0 {
pad := t.Align - off%t.Align
fmt.Fprintf(&buf, "\t\tchar __pad%d[%d];\n", off, pad)
off += pad
}
c := t.Typedef
if c == "" {
c = t.C.String()
}
fmt.Fprintf(&buf, "\t\t%s p%d;\n", c, i)
off += t.Size
}
if off%p.PtrSize != 0 {
pad := p.PtrSize - off%p.PtrSize
fmt.Fprintf(&buf, "\t\tchar __pad%d[%d];\n", off, pad)
off += pad
}
if t := n.FuncType.Result; t != nil {
if off%t.Align != 0 {
pad := t.Align - off%t.Align
fmt.Fprintf(&buf, "\t\tchar __pad%d[%d];\n", off, pad)
off += pad
}
fmt.Fprintf(&buf, "\t\t%s r;\n", t.C)
off += t.Size
}
if off%p.PtrSize != 0 {
pad := p.PtrSize - off%p.PtrSize
fmt.Fprintf(&buf, "\t\tchar __pad%d[%d];\n", off, pad)
off += pad
}
if off == 0 {
fmt.Fprintf(&buf, "\t\tchar unused;\n") // avoid empty struct
}
fmt.Fprintf(&buf, "\t}")
return buf.String(), off
}
func (p *Package) writeDefsFunc(fgo2 io.Writer, n *Name, callsMalloc *bool) {
name := n.Go
gtype := n.FuncType.Go
void := gtype.Results == nil || len(gtype.Results.List) == 0
if n.AddError {
// Add "error" to return type list.
// Type list is known to be 0 or 1 element - it's a C function.
err := &ast.Field{Type: ast.NewIdent("error")}
l := gtype.Results.List
if len(l) == 0 {
l = []*ast.Field{err}
} else {
l = []*ast.Field{l[0], err}
}
t := new(ast.FuncType)
*t = *gtype
t.Results = &ast.FieldList{List: l}
gtype = t
}
// Go func declaration.
d := &ast.FuncDecl{
Name: ast.NewIdent(n.Mangle),
Type: gtype,
}
// Builtins defined in the C prolog.
inProlog := builtinDefs[name] != ""
cname := fmt.Sprintf("_cgo%s%s", cPrefix, n.Mangle)
paramnames := []string(nil)
if d.Type.Params != nil {
for i, param := range d.Type.Params.List {
paramName := fmt.Sprintf("p%d", i)
param.Names = []*ast.Ident{ast.NewIdent(paramName)}
paramnames = append(paramnames, paramName)
}
}
if *gccgo {
// Gccgo style hooks.
fmt.Fprint(fgo2, "\n")
conf.Fprint(fgo2, fset, d)
fmt.Fprint(fgo2, " {\n")
if !inProlog {
fmt.Fprint(fgo2, "\tdefer syscall.CgocallDone()\n")
fmt.Fprint(fgo2, "\tsyscall.Cgocall()\n")
}
if n.AddError {
fmt.Fprint(fgo2, "\tsyscall.SetErrno(0)\n")
}
fmt.Fprint(fgo2, "\t")
if !void {
fmt.Fprint(fgo2, "r := ")
}
fmt.Fprintf(fgo2, "%s(%s)\n", cname, strings.Join(paramnames, ", "))
if n.AddError {
fmt.Fprint(fgo2, "\te := syscall.GetErrno()\n")
fmt.Fprint(fgo2, "\tif e != 0 {\n")
fmt.Fprint(fgo2, "\t\treturn ")
if !void {
fmt.Fprint(fgo2, "r, ")
}
fmt.Fprint(fgo2, "e\n")
fmt.Fprint(fgo2, "\t}\n")
fmt.Fprint(fgo2, "\treturn ")
if !void {
fmt.Fprint(fgo2, "r, ")
}
fmt.Fprint(fgo2, "nil\n")
} else if !void {
fmt.Fprint(fgo2, "\treturn r\n")
}
fmt.Fprint(fgo2, "}\n")
// declare the C function.
fmt.Fprintf(fgo2, "//extern %s\n", cname)
d.Name = ast.NewIdent(cname)
if n.AddError {
l := d.Type.Results.List
d.Type.Results.List = l[:len(l)-1]
}
conf.Fprint(fgo2, fset, d)
fmt.Fprint(fgo2, "\n")
return
}
if inProlog {
fmt.Fprint(fgo2, builtinDefs[name])
if strings.Contains(builtinDefs[name], "_cgo_cmalloc") {
*callsMalloc = true
}
return
}
// Wrapper calls into gcc, passing a pointer to the argument frame.
fmt.Fprintf(fgo2, "//go:cgo_import_static %s\n", cname)
fmt.Fprintf(fgo2, "//go:linkname __cgofn_%s %s\n", cname, cname)
fmt.Fprintf(fgo2, "var __cgofn_%s byte\n", cname)
fmt.Fprintf(fgo2, "var %s = unsafe.Pointer(&__cgofn_%s)\n", cname, cname)
nret := 0
if !void {
d.Type.Results.List[0].Names = []*ast.Ident{ast.NewIdent("r1")}
nret = 1
}
if n.AddError {
d.Type.Results.List[nret].Names = []*ast.Ident{ast.NewIdent("r2")}
}
fmt.Fprint(fgo2, "\n")
fmt.Fprint(fgo2, "//go:cgo_unsafe_args\n")
conf.Fprint(fgo2, fset, d)
fmt.Fprint(fgo2, " {\n")
// NOTE: Using uintptr to hide from escape analysis.
arg := "0"
if len(paramnames) > 0 {
arg = "uintptr(unsafe.Pointer(&p0))"
} else if !void {
arg = "uintptr(unsafe.Pointer(&r1))"
}
prefix := ""
if n.AddError {
prefix = "errno := "
}
fmt.Fprintf(fgo2, "\t%s_cgo_runtime_cgocall(%s, %s)\n", prefix, cname, arg)
if n.AddError {
fmt.Fprintf(fgo2, "\tif errno != 0 { r2 = syscall.Errno(errno) }\n")
}
fmt.Fprintf(fgo2, "\tif _Cgo_always_false {\n")
if d.Type.Params != nil {
for i := range d.Type.Params.List {
fmt.Fprintf(fgo2, "\t\t_Cgo_use(p%d)\n", i)
}
}
fmt.Fprintf(fgo2, "\t}\n")
fmt.Fprintf(fgo2, "\treturn\n")
fmt.Fprintf(fgo2, "}\n")
}
// writeOutput creates stubs for a specific source file to be compiled by gc
func (p *Package) writeOutput(f *File, srcfile string) {
base := srcfile
if strings.HasSuffix(base, ".go") {
base = base[0 : len(base)-3]
}
base = filepath.Base(base)
fgo1 := creat(*objDir + base + ".cgo1.go")
fgcc := creat(*objDir + base + ".cgo2.c")
p.GoFiles = append(p.GoFiles, base+".cgo1.go")
p.GccFiles = append(p.GccFiles, base+".cgo2.c")
// Write Go output: Go input with rewrites of C.xxx to _C_xxx.
fmt.Fprintf(fgo1, "// Code generated by cmd/cgo; DO NOT EDIT.\n\n")
fmt.Fprintf(fgo1, "//line %s:1:1\n", srcfile)
fgo1.Write(f.Edit.Bytes())
// While we process the vars and funcs, also write gcc output.
// Gcc output starts with the preamble.
fmt.Fprintf(fgcc, "%s\n", builtinProlog)
fmt.Fprintf(fgcc, "%s\n", f.Preamble)
fmt.Fprintf(fgcc, "%s\n", gccProlog)
fmt.Fprintf(fgcc, "%s\n", tsanProlog)
fmt.Fprintf(fgcc, "%s\n", msanProlog)
for _, key := range nameKeys(f.Name) {
n := f.Name[key]
if n.FuncType != nil {
p.writeOutputFunc(fgcc, n)
}
}
fgo1.Close()
fgcc.Close()
}
// fixGo converts the internal Name.Go field into the name we should show
// to users in error messages. There's only one for now: on input we rewrite
// C.malloc into C._CMalloc, so change it back here.
func fixGo(name string) string {
if name == "_CMalloc" {
return "malloc"
}
return name
}
var isBuiltin = map[string]bool{
"_Cfunc_CString": true,
"_Cfunc_CBytes": true,
"_Cfunc_GoString": true,
"_Cfunc_GoStringN": true,
"_Cfunc_GoBytes": true,
"_Cfunc__CMalloc": true,
}
func (p *Package) writeOutputFunc(fgcc *os.File, n *Name) {
name := n.Mangle
if isBuiltin[name] || p.Written[name] {
// The builtins are already defined in the C prolog, and we don't
// want to duplicate function definitions we've already done.
return
}
p.Written[name] = true
if *gccgo {
p.writeGccgoOutputFunc(fgcc, n)
return
}
ctype, _ := p.structType(n)
// Gcc wrapper unpacks the C argument struct
// and calls the actual C function.
fmt.Fprintf(fgcc, "CGO_NO_SANITIZE_THREAD\n")
if n.AddError {
fmt.Fprintf(fgcc, "int\n")
} else {
fmt.Fprintf(fgcc, "void\n")
}
fmt.Fprintf(fgcc, "_cgo%s%s(void *v)\n", cPrefix, n.Mangle)
fmt.Fprintf(fgcc, "{\n")
if n.AddError {
fmt.Fprintf(fgcc, "\tint _cgo_errno;\n")
}
// We're trying to write a gcc struct that matches gc's layout.
// Use packed attribute to force no padding in this struct in case
// gcc has different packing requirements.
fmt.Fprintf(fgcc, "\t%s %v *_cgo_a = v;\n", ctype, p.packedAttribute())
if n.FuncType.Result != nil {
// Save the stack top for use below.
fmt.Fprintf(fgcc, "\tchar *_cgo_stktop = _cgo_topofstack();\n")
}
tr := n.FuncType.Result
if tr != nil {
fmt.Fprintf(fgcc, "\t__typeof__(_cgo_a->r) _cgo_r;\n")
}
fmt.Fprintf(fgcc, "\t_cgo_tsan_acquire();\n")
if n.AddError {
fmt.Fprintf(fgcc, "\terrno = 0;\n")
}
fmt.Fprintf(fgcc, "\t")
if tr != nil {
fmt.Fprintf(fgcc, "_cgo_r = ")
if c := tr.C.String(); c[len(c)-1] == '*' {
fmt.Fprint(fgcc, "(__typeof__(_cgo_a->r)) ")
}
}
if n.Kind == "macro" {
fmt.Fprintf(fgcc, "%s;\n", n.C)
} else {
fmt.Fprintf(fgcc, "%s(", n.C)
for i := range n.FuncType.Params {
if i > 0 {
fmt.Fprintf(fgcc, ", ")
}
fmt.Fprintf(fgcc, "_cgo_a->p%d", i)
}
fmt.Fprintf(fgcc, ");\n")
}
if n.AddError {
fmt.Fprintf(fgcc, "\t_cgo_errno = errno;\n")
}
fmt.Fprintf(fgcc, "\t_cgo_tsan_release();\n")
if n.FuncType.Result != nil {
// The cgo call may have caused a stack copy (via a callback).
// Adjust the return value pointer appropriately.
fmt.Fprintf(fgcc, "\t_cgo_a = (void*)((char*)_cgo_a + (_cgo_topofstack() - _cgo_stktop));\n")
// Save the return value.
fmt.Fprintf(fgcc, "\t_cgo_a->r = _cgo_r;\n")
// The return value is on the Go stack. If we are using msan,
// and if the C value is partially or completely uninitialized,
// the assignment will mark the Go stack as uninitialized.
// The Go compiler does not update msan for changes to the
// stack. It is possible that the stack will remain
// uninitialized, and then later be used in a way that is
// visible to msan, possibly leading to a false positive.
// Mark the stack space as written, to avoid this problem.
// See issue 26209.
fmt.Fprintf(fgcc, "\t_cgo_msan_write(&_cgo_a->r, sizeof(_cgo_a->r));\n")
}
if n.AddError {
fmt.Fprintf(fgcc, "\treturn _cgo_errno;\n")
}
fmt.Fprintf(fgcc, "}\n")
fmt.Fprintf(fgcc, "\n")
}
// Write out a wrapper for a function when using gccgo. This is a
// simple wrapper that just calls the real function. We only need a
// wrapper to support static functions in the prologue--without a
// wrapper, we can't refer to the function, since the reference is in
// a different file.
func (p *Package) writeGccgoOutputFunc(fgcc *os.File, n *Name) {
fmt.Fprintf(fgcc, "CGO_NO_SANITIZE_THREAD\n")
if t := n.FuncType.Result; t != nil {
fmt.Fprintf(fgcc, "%s\n", t.C.String())
} else {
fmt.Fprintf(fgcc, "void\n")
}
fmt.Fprintf(fgcc, "_cgo%s%s(", cPrefix, n.Mangle)
for i, t := range n.FuncType.Params {
if i > 0 {
fmt.Fprintf(fgcc, ", ")
}
c := t.Typedef
if c == "" {
c = t.C.String()
}
fmt.Fprintf(fgcc, "%s p%d", c, i)
}
fmt.Fprintf(fgcc, ")\n")
fmt.Fprintf(fgcc, "{\n")
if t := n.FuncType.Result; t != nil {
fmt.Fprintf(fgcc, "\t%s _cgo_r;\n", t.C.String())
}
fmt.Fprintf(fgcc, "\t_cgo_tsan_acquire();\n")
fmt.Fprintf(fgcc, "\t")
if t := n.FuncType.Result; t != nil {
fmt.Fprintf(fgcc, "_cgo_r = ")
// Cast to void* to avoid warnings due to omitted qualifiers.
if c := t.C.String(); c[len(c)-1] == '*' {
fmt.Fprintf(fgcc, "(void*)")
}
}
if n.Kind == "macro" {
fmt.Fprintf(fgcc, "%s;\n", n.C)
} else {
fmt.Fprintf(fgcc, "%s(", n.C)
for i := range n.FuncType.Params {
if i > 0 {
fmt.Fprintf(fgcc, ", ")
}
fmt.Fprintf(fgcc, "p%d", i)
}
fmt.Fprintf(fgcc, ");\n")
}
fmt.Fprintf(fgcc, "\t_cgo_tsan_release();\n")
if t := n.FuncType.Result; t != nil {
fmt.Fprintf(fgcc, "\treturn ")
// Cast to void* to avoid warnings due to omitted qualifiers
// and explicit incompatible struct types.
if c := t.C.String(); c[len(c)-1] == '*' {
fmt.Fprintf(fgcc, "(void*)")
}
fmt.Fprintf(fgcc, "_cgo_r;\n")
}
fmt.Fprintf(fgcc, "}\n")
fmt.Fprintf(fgcc, "\n")
}
// packedAttribute returns host compiler struct attribute that will be
// used to match gc's struct layout. For example, on 386 Windows,
// gcc wants to 8-align int64s, but gc does not.
// Use __gcc_struct__ to work around https://gcc.gnu.org/PR52991 on x86,
// and https://golang.org/issue/5603.
func (p *Package) packedAttribute() string {
s := "__attribute__((__packed__"
if !p.GccIsClang && (goarch == "amd64" || goarch == "386") {
s += ", __gcc_struct__"
}
return s + "))"
}
// Write out the various stubs we need to support functions exported
// from Go so that they are callable from C.
func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
p.writeExportHeader(fgcch)
fmt.Fprintf(fgcc, "/* Code generated by cmd/cgo; DO NOT EDIT. */\n\n")
fmt.Fprintf(fgcc, "#include <stdlib.h>\n")
fmt.Fprintf(fgcc, "#include \"_cgo_export.h\"\n\n")
// We use packed structs, but they are always aligned.
// The pragmas and address-of-packed-member are only recognized as
// warning groups in clang 4.0+, so ignore unknown pragmas first.
fmt.Fprintf(fgcc, "#pragma GCC diagnostic ignored \"-Wunknown-pragmas\"\n")
fmt.Fprintf(fgcc, "#pragma GCC diagnostic ignored \"-Wpragmas\"\n")
fmt.Fprintf(fgcc, "#pragma GCC diagnostic ignored \"-Waddress-of-packed-member\"\n")
fmt.Fprintf(fgcc, "extern void crosscall2(void (*fn)(void *, int, __SIZE_TYPE__), void *, int, __SIZE_TYPE__);\n")
fmt.Fprintf(fgcc, "extern __SIZE_TYPE__ _cgo_wait_runtime_init_done(void);\n")
fmt.Fprintf(fgcc, "extern void _cgo_release_context(__SIZE_TYPE__);\n\n")
fmt.Fprintf(fgcc, "extern char* _cgo_topofstack(void);")
fmt.Fprintf(fgcc, "%s\n", tsanProlog)
fmt.Fprintf(fgcc, "%s\n", msanProlog)
for _, exp := range p.ExpFunc {
fn := exp.Func
// Construct a gcc struct matching the gc argument and
// result frame. The gcc struct will be compiled with
// __attribute__((packed)) so all padding must be accounted
// for explicitly.
ctype := "struct {\n"
off := int64(0)
npad := 0
if fn.Recv != nil {
t := p.cgoType(fn.Recv.List[0].Type)
ctype += fmt.Sprintf("\t\t%s recv;\n", t.C)
off += t.Size
}
fntype := fn.Type
forFieldList(fntype.Params,
func(i int, aname string, atype ast.Expr) {
t := p.cgoType(atype)
if off%t.Align != 0 {
pad := t.Align - off%t.Align
ctype += fmt.Sprintf("\t\tchar __pad%d[%d];\n", npad, pad)
off += pad
npad++
}
ctype += fmt.Sprintf("\t\t%s p%d;\n", t.C, i)
off += t.Size
})
if off%p.PtrSize != 0 {
pad := p.PtrSize - off%p.PtrSize
ctype += fmt.Sprintf("\t\tchar __pad%d[%d];\n", npad, pad)
off += pad
npad++
}
forFieldList(fntype.Results,
func(i int, aname string, atype ast.Expr) {
t := p.cgoType(atype)
if off%t.Align != 0 {
pad := t.Align - off%t.Align
ctype += fmt.Sprintf("\t\tchar __pad%d[%d];\n", npad, pad)
off += pad
npad++
}
ctype += fmt.Sprintf("\t\t%s r%d;\n", t.C, i)
off += t.Size
})
if off%p.PtrSize != 0 {
pad := p.PtrSize - off%p.PtrSize
ctype += fmt.Sprintf("\t\tchar __pad%d[%d];\n", npad, pad)
off += pad
npad++
}
if ctype == "struct {\n" {
ctype += "\t\tchar unused;\n" // avoid empty struct
}
ctype += "\t}"
// Get the return type of the wrapper function
// compiled by gcc.
gccResult := ""
if fntype.Results == nil || len(fntype.Results.List) == 0 {
gccResult = "void"
} else if len(fntype.Results.List) == 1 && len(fntype.Results.List[0].Names) <= 1 {
gccResult = p.cgoType(fntype.Results.List[0].Type).C.String()
} else {
fmt.Fprintf(fgcch, "\n/* Return type for %s */\n", exp.ExpName)
fmt.Fprintf(fgcch, "struct %s_return {\n", exp.ExpName)
forFieldList(fntype.Results,
func(i int, aname string, atype ast.Expr) {
fmt.Fprintf(fgcch, "\t%s r%d;", p.cgoType(atype).C, i)
if len(aname) > 0 {
fmt.Fprintf(fgcch, " /* %s */", aname)
}
fmt.Fprint(fgcch, "\n")
})
fmt.Fprintf(fgcch, "};\n")
gccResult = "struct " + exp.ExpName + "_return"
}
// Build the wrapper function compiled by gcc.
s := fmt.Sprintf("%s %s(", gccResult, exp.ExpName)
if fn.Recv != nil {
s += p.cgoType(fn.Recv.List[0].Type).C.String()
s += " recv"
}
forFieldList(fntype.Params,
func(i int, aname string, atype ast.Expr) {
if i > 0 || fn.Recv != nil {
s += ", "
}
s += fmt.Sprintf("%s p%d", p.cgoType(atype).C, i)
})
s += ")"
if len(exp.Doc) > 0 {
fmt.Fprintf(fgcch, "\n%s", exp.Doc)
}
fmt.Fprintf(fgcch, "\nextern %s;\n", s)
fmt.Fprintf(fgcc, "extern void _cgoexp%s_%s(void *, int, __SIZE_TYPE__);\n", cPrefix, exp.ExpName)
fmt.Fprintf(fgcc, "\nCGO_NO_SANITIZE_THREAD")
fmt.Fprintf(fgcc, "\n%s\n", s)
fmt.Fprintf(fgcc, "{\n")
fmt.Fprintf(fgcc, "\t__SIZE_TYPE__ _cgo_ctxt = _cgo_wait_runtime_init_done();\n")
fmt.Fprintf(fgcc, "\t%s %v a;\n", ctype, p.packedAttribute())
if gccResult != "void" && (len(fntype.Results.List) > 1 || len(fntype.Results.List[0].Names) > 1) {
fmt.Fprintf(fgcc, "\t%s r;\n", gccResult)
}
if fn.Recv != nil {
fmt.Fprintf(fgcc, "\ta.recv = recv;\n")
}
forFieldList(fntype.Params,
func(i int, aname string, atype ast.Expr) {
fmt.Fprintf(fgcc, "\ta.p%d = p%d;\n", i, i)
})
fmt.Fprintf(fgcc, "\t_cgo_tsan_release();\n")
fmt.Fprintf(fgcc, "\tcrosscall2(_cgoexp%s_%s, &a, %d, _cgo_ctxt);\n", cPrefix, exp.ExpName, off)
fmt.Fprintf(fgcc, "\t_cgo_tsan_acquire();\n")
fmt.Fprintf(fgcc, "\t_cgo_release_context(_cgo_ctxt);\n")
if gccResult != "void" {
if len(fntype.Results.List) == 1 && len(fntype.Results.List[0].Names) <= 1 {
fmt.Fprintf(fgcc, "\treturn a.r0;\n")
} else {
forFieldList(fntype.Results,
func(i int, aname string, atype ast.Expr) {
fmt.Fprintf(fgcc, "\tr.r%d = a.r%d;\n", i, i)
})
fmt.Fprintf(fgcc, "\treturn r;\n")
}
}
fmt.Fprintf(fgcc, "}\n")
// Build the wrapper function compiled by cmd/compile.
goname := "_cgoexpwrap" + cPrefix + "_"
if fn.Recv != nil {
goname += fn.Recv.List[0].Names[0].Name + "_"
}
goname += exp.Func.Name.Name
fmt.Fprintf(fgo2, "//go:cgo_export_dynamic %s\n", exp.ExpName)
fmt.Fprintf(fgo2, "//go:linkname _cgoexp%s_%s _cgoexp%s_%s\n", cPrefix, exp.ExpName, cPrefix, exp.ExpName)
fmt.Fprintf(fgo2, "//go:cgo_export_static _cgoexp%s_%s\n", cPrefix, exp.ExpName)
fmt.Fprintf(fgo2, "//go:nosplit\n") // no split stack, so no use of m or g
fmt.Fprintf(fgo2, "//go:norace\n") // must not have race detector calls inserted
fmt.Fprintf(fgo2, "func _cgoexp%s_%s(a unsafe.Pointer, n int32, ctxt uintptr) {\n", cPrefix, exp.ExpName)
fmt.Fprintf(fgo2, "\tfn := %s\n", goname)
// The indirect here is converting from a Go function pointer to a C function pointer.
fmt.Fprintf(fgo2, "\t_cgo_runtime_cgocallback(**(**unsafe.Pointer)(unsafe.Pointer(&fn)), a, uintptr(n), ctxt);\n")
fmt.Fprintf(fgo2, "}\n")
fmt.Fprintf(fm, "int _cgoexp%s_%s;\n", cPrefix, exp.ExpName)
// This code uses printer.Fprint, not conf.Fprint,
// because we don't want //line comments in the middle
// of the function types.
fmt.Fprintf(fgo2, "\n")
fmt.Fprintf(fgo2, "func %s(", goname)
comma := false
if fn.Recv != nil {
fmt.Fprintf(fgo2, "recv ")
printer.Fprint(fgo2, fset, fn.Recv.List[0].Type)
comma = true
}
forFieldList(fntype.Params,
func(i int, aname string, atype ast.Expr) {
if comma {
fmt.Fprintf(fgo2, ", ")
}
fmt.Fprintf(fgo2, "p%d ", i)
printer.Fprint(fgo2, fset, atype)
comma = true
})
fmt.Fprintf(fgo2, ")")
if gccResult != "void" {
fmt.Fprint(fgo2, " (")
forFieldList(fntype.Results,
func(i int, aname string, atype ast.Expr) {
if i > 0 {
fmt.Fprint(fgo2, ", ")
}
fmt.Fprintf(fgo2, "r%d ", i)
printer.Fprint(fgo2, fset, atype)
})
fmt.Fprint(fgo2, ")")
}
fmt.Fprint(fgo2, " {\n")
if gccResult == "void" {
fmt.Fprint(fgo2, "\t")
} else {
// Verify that any results don't contain any
// Go pointers.
addedDefer := false
forFieldList(fntype.Results,
func(i int, aname string, atype ast.Expr) {
if !p.hasPointer(nil, atype, false) {
return
}
if !addedDefer {
fmt.Fprint(fgo2, "\tdefer func() {\n")
addedDefer = true
}
fmt.Fprintf(fgo2, "\t\t_cgoCheckResult(r%d)\n", i)
})
if addedDefer {
fmt.Fprint(fgo2, "\t}()\n")
}
fmt.Fprint(fgo2, "\treturn ")
}
if fn.Recv != nil {
fmt.Fprintf(fgo2, "recv.")
}
fmt.Fprintf(fgo2, "%s(", exp.Func.Name)
forFieldList(fntype.Params,
func(i int, aname string, atype ast.Expr) {
if i > 0 {
fmt.Fprint(fgo2, ", ")
}
fmt.Fprintf(fgo2, "p%d", i)
})
fmt.Fprint(fgo2, ")\n")
fmt.Fprint(fgo2, "}\n")
}
fmt.Fprintf(fgcch, "%s", gccExportHeaderEpilog)
}
// Write out the C header allowing C code to call exported gccgo functions.
func (p *Package) writeGccgoExports(fgo2, fm, fgcc, fgcch io.Writer) {
gccgoSymbolPrefix := p.gccgoSymbolPrefix()
p.writeExportHeader(fgcch)
fmt.Fprintf(fgcc, "/* Code generated by cmd/cgo; DO NOT EDIT. */\n\n")
fmt.Fprintf(fgcc, "#include \"_cgo_export.h\"\n")
fmt.Fprintf(fgcc, "%s\n", gccgoExportFileProlog)
fmt.Fprintf(fgcc, "%s\n", tsanProlog)
fmt.Fprintf(fgcc, "%s\n", msanProlog)
for _, exp := range p.ExpFunc {
fn := exp.Func
fntype := fn.Type
cdeclBuf := new(bytes.Buffer)
resultCount := 0
forFieldList(fntype.Results,
func(i int, aname string, atype ast.Expr) { resultCount++ })
switch resultCount {
case 0:
fmt.Fprintf(cdeclBuf, "void")
case 1:
forFieldList(fntype.Results,
func(i int, aname string, atype ast.Expr) {
t := p.cgoType(atype)
fmt.Fprintf(cdeclBuf, "%s", t.C)
})
default:
// Declare a result struct.
fmt.Fprintf(fgcch, "\n/* Return type for %s */\n", exp.ExpName)
fmt.Fprintf(fgcch, "struct %s_return {\n", exp.ExpName)
forFieldList(fntype.Results,
func(i int, aname string, atype ast.Expr) {
t := p.cgoType(atype)
fmt.Fprintf(fgcch, "\t%s r%d;", t.C, i)
if len(aname) > 0 {
fmt.Fprintf(fgcch, " /* %s */", aname)
}
fmt.Fprint(fgcch, "\n")
})
fmt.Fprintf(fgcch, "};\n")
fmt.Fprintf(cdeclBuf, "struct %s_return", exp.ExpName)
}
cRet := cdeclBuf.String()
cdeclBuf = new(bytes.Buffer)
fmt.Fprintf(cdeclBuf, "(")
if fn.Recv != nil {
fmt.Fprintf(cdeclBuf, "%s recv", p.cgoType(fn.Recv.List[0].Type).C.String())
}
// Function parameters.
forFieldList(fntype.Params,
func(i int, aname string, atype ast.Expr) {
if i > 0 || fn.Recv != nil {
fmt.Fprintf(cdeclBuf, ", ")
}
t := p.cgoType(atype)
fmt.Fprintf(cdeclBuf, "%s p%d", t.C, i)
})
fmt.Fprintf(cdeclBuf, ")")
cParams := cdeclBuf.String()
if len(exp.Doc) > 0 {
fmt.Fprintf(fgcch, "\n%s", exp.Doc)
}
fmt.Fprintf(fgcch, "extern %s %s%s;\n", cRet, exp.ExpName, cParams)
// We need to use a name that will be exported by the
// Go code; otherwise gccgo will make it static and we
// will not be able to link against it from the C
// code.
goName := "Cgoexp_" + exp.ExpName
fmt.Fprintf(fgcc, `extern %s %s %s __asm__("%s.%s");`, cRet, goName, cParams, gccgoSymbolPrefix, goName)
fmt.Fprint(fgcc, "\n")
fmt.Fprint(fgcc, "\nCGO_NO_SANITIZE_THREAD\n")
fmt.Fprintf(fgcc, "%s %s %s {\n", cRet, exp.ExpName, cParams)
if resultCount > 0 {
fmt.Fprintf(fgcc, "\t%s r;\n", cRet)
}
fmt.Fprintf(fgcc, "\tif(_cgo_wait_runtime_init_done)\n")
fmt.Fprintf(fgcc, "\t\t_cgo_wait_runtime_init_done();\n")
fmt.Fprintf(fgcc, "\t_cgo_tsan_release();\n")
fmt.Fprint(fgcc, "\t")
if resultCount > 0 {
fmt.Fprint(fgcc, "r = ")
}
fmt.Fprintf(fgcc, "%s(", goName)
if fn.Recv != nil {
fmt.Fprint(fgcc, "recv")
}
forFieldList(fntype.Params,
func(i int, aname string, atype ast.Expr) {
if i > 0 || fn.Recv != nil {
fmt.Fprintf(fgcc, ", ")
}
fmt.Fprintf(fgcc, "p%d", i)
})
fmt.Fprint(fgcc, ");\n")
fmt.Fprintf(fgcc, "\t_cgo_tsan_acquire();\n")
if resultCount > 0 {
fmt.Fprint(fgcc, "\treturn r;\n")
}
fmt.Fprint(fgcc, "}\n")
// Dummy declaration for _cgo_main.c
fmt.Fprintf(fm, `char %s[1] __asm__("%s.%s");`, goName, gccgoSymbolPrefix, goName)
fmt.Fprint(fm, "\n")
// For gccgo we use a wrapper function in Go, in order
// to call CgocallBack and CgocallBackDone.
// This code uses printer.Fprint, not conf.Fprint,
// because we don't want //line comments in the middle
// of the function types.
fmt.Fprint(fgo2, "\n")
fmt.Fprintf(fgo2, "func %s(", goName)
if fn.Recv != nil {
fmt.Fprint(fgo2, "recv ")
printer.Fprint(fgo2, fset, fn.Recv.List[0].Type)
}
forFieldList(fntype.Params,
func(i int, aname string, atype ast.Expr) {
if i > 0 || fn.Recv != nil {
fmt.Fprintf(fgo2, ", ")
}
fmt.Fprintf(fgo2, "p%d ", i)
printer.Fprint(fgo2, fset, atype)
})
fmt.Fprintf(fgo2, ")")
if resultCount > 0 {
fmt.Fprintf(fgo2, " (")
forFieldList(fntype.Results,
func(i int, aname string, atype ast.Expr) {
if i > 0 {
fmt.Fprint(fgo2, ", ")
}
printer.Fprint(fgo2, fset, atype)
})
fmt.Fprint(fgo2, ")")
}
fmt.Fprint(fgo2, " {\n")
fmt.Fprint(fgo2, "\tsyscall.CgocallBack()\n")
fmt.Fprint(fgo2, "\tdefer syscall.CgocallBackDone()\n")
fmt.Fprint(fgo2, "\t")
if resultCount > 0 {
fmt.Fprint(fgo2, "return ")
}
if fn.Recv != nil {
fmt.Fprint(fgo2, "recv.")
}
fmt.Fprintf(fgo2, "%s(", exp.Func.Name)
forFieldList(fntype.Params,
func(i int, aname string, atype ast.Expr) {
if i > 0 {
fmt.Fprint(fgo2, ", ")
}
fmt.Fprintf(fgo2, "p%d", i)
})
fmt.Fprint(fgo2, ")\n")
fmt.Fprint(fgo2, "}\n")
}
fmt.Fprintf(fgcch, "%s", gccExportHeaderEpilog)
}
// writeExportHeader writes out the start of the _cgo_export.h file.
func (p *Package) writeExportHeader(fgcch io.Writer) {
fmt.Fprintf(fgcch, "/* Code generated by cmd/cgo; DO NOT EDIT. */\n\n")
pkg := *importPath
if pkg == "" {
pkg = p.PackagePath
}
fmt.Fprintf(fgcch, "/* package %s */\n\n", pkg)
fmt.Fprintf(fgcch, "%s\n", builtinExportProlog)
// Remove absolute paths from #line comments in the preamble.
// They aren't useful for people using the header file,
// and they mean that the header files change based on the
// exact location of GOPATH.
re := regexp.MustCompile(`(?m)^(#line\s+[0-9]+\s+")[^"]*[/\\]([^"]*")`)
preamble := re.ReplaceAllString(p.Preamble, "$1$2")
fmt.Fprintf(fgcch, "/* Start of preamble from import \"C\" comments. */\n\n")
fmt.Fprintf(fgcch, "%s\n", preamble)
fmt.Fprintf(fgcch, "\n/* End of preamble from import \"C\" comments. */\n\n")
fmt.Fprintf(fgcch, "%s\n", p.gccExportHeaderProlog())
}
// gccgoUsesNewMangling reports whether gccgo uses the new collision-free
// packagepath mangling scheme (see determineGccgoManglingScheme for more
// info).
func gccgoUsesNewMangling() bool {
if !gccgoMangleCheckDone {
gccgoNewmanglingInEffect = determineGccgoManglingScheme()
gccgoMangleCheckDone = true
}
return gccgoNewmanglingInEffect
}
const mangleCheckCode = `
package lรคufer
func Run(x int) int {
return 1
}
`
// determineGccgoManglingScheme performs a runtime test to see which
// flavor of packagepath mangling gccgo is using. Older versions of
// gccgo use a simple mangling scheme where there can be collisions
// between packages whose paths are different but mangle to the same
// string. More recent versions of gccgo use a new mangler that avoids
// these collisions. Return value is whether gccgo uses the new mangling.
func determineGccgoManglingScheme() bool {
// Emit a small Go file for gccgo to compile.
filepat := "*_gccgo_manglecheck.go"
var f *os.File
var err error
if f, err = ioutil.TempFile(*objDir, filepat); err != nil {
fatalf("%v", err)
}
gofilename := f.Name()
defer os.Remove(gofilename)
if err = ioutil.WriteFile(gofilename, []byte(mangleCheckCode), 0666); err != nil {
fatalf("%v", err)
}
// Compile with gccgo, capturing generated assembly.
gccgocmd := os.Getenv("GCCGO")
if gccgocmd == "" {
gpath, gerr := exec.LookPath("gccgo")
if gerr != nil {
fatalf("unable to locate gccgo: %v", gerr)
}
gccgocmd = gpath
}
cmd := exec.Command(gccgocmd, "-S", "-o", "-", gofilename)
buf, cerr := cmd.CombinedOutput()
if cerr != nil {
fatalf("%s", cerr)
}
// New mangling: expect go.l..u00e4ufer.Run
// Old mangling: expect go.l__ufer.Run
return regexp.MustCompile(`go\.l\.\.u00e4ufer\.Run`).Match(buf)
}
// gccgoPkgpathToSymbolNew converts a package path to a gccgo-style
// package symbol.
func gccgoPkgpathToSymbolNew(ppath string) string {
bsl := []byte{}
changed := false
for _, c := range []byte(ppath) {
switch {
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z',
'0' <= c && c <= '9', c == '_':
bsl = append(bsl, c)
case c == '.':
bsl = append(bsl, ".x2e"...)
default:
changed = true
encbytes := []byte(fmt.Sprintf("..z%02x", c))
bsl = append(bsl, encbytes...)
}
}
if !changed {
return ppath
}
return string(bsl)
}
// gccgoPkgpathToSymbolOld converts a package path to a gccgo-style
// package symbol using the older mangling scheme.
func gccgoPkgpathToSymbolOld(ppath string) string {
clean := func(r rune) rune {
switch {
case 'A' <= r && r <= 'Z', 'a' <= r && r <= 'z',
'0' <= r && r <= '9':
return r
}
return '_'
}
return strings.Map(clean, ppath)
}
// gccgoPkgpathToSymbol converts a package path to a mangled packagepath
// symbol.
func gccgoPkgpathToSymbol(ppath string) string {
if gccgoUsesNewMangling() {
return gccgoPkgpathToSymbolNew(ppath)
} else {
return gccgoPkgpathToSymbolOld(ppath)
}
}
// Return the package prefix when using gccgo.
func (p *Package) gccgoSymbolPrefix() string {
if !*gccgo {
return ""
}
if *gccgopkgpath != "" {
return gccgoPkgpathToSymbol(*gccgopkgpath)
}
if *gccgoprefix == "" && p.PackageName == "main" {
return "main"
}
prefix := gccgoPkgpathToSymbol(*gccgoprefix)
if prefix == "" {
prefix = "go"
}
return prefix + "." + p.PackageName
}
// Call a function for each entry in an ast.FieldList, passing the
// index into the list, the name if any, and the type.
func forFieldList(fl *ast.FieldList, fn func(int, string, ast.Expr)) {
if fl == nil {
return
}
i := 0
for _, r := range fl.List {
if r.Names == nil {
fn(i, "", r.Type)
i++
} else {
for _, n := range r.Names {
fn(i, n.Name, r.Type)
i++
}
}
}
}
func c(repr string, args ...interface{}) *TypeRepr {
return &TypeRepr{repr, args}
}
// Map predeclared Go types to Type.
var goTypes = map[string]*Type{
"bool": {Size: 1, Align: 1, C: c("GoUint8")},
"byte": {Size: 1, Align: 1, C: c("GoUint8")},
"int": {Size: 0, Align: 0, C: c("GoInt")},
"uint": {Size: 0, Align: 0, C: c("GoUint")},
"rune": {Size: 4, Align: 4, C: c("GoInt32")},
"int8": {Size: 1, Align: 1, C: c("GoInt8")},
"uint8": {Size: 1, Align: 1, C: c("GoUint8")},
"int16": {Size: 2, Align: 2, C: c("GoInt16")},
"uint16": {Size: 2, Align: 2, C: c("GoUint16")},
"int32": {Size: 4, Align: 4, C: c("GoInt32")},
"uint32": {Size: 4, Align: 4, C: c("GoUint32")},
"int64": {Size: 8, Align: 8, C: c("GoInt64")},
"uint64": {Size: 8, Align: 8, C: c("GoUint64")},
"float32": {Size: 4, Align: 4, C: c("GoFloat32")},
"float64": {Size: 8, Align: 8, C: c("GoFloat64")},
"complex64": {Size: 8, Align: 4, C: c("GoComplex64")},
"complex128": {Size: 16, Align: 8, C: c("GoComplex128")},
}
// Map an ast type to a Type.
func (p *Package) cgoType(e ast.Expr) *Type {
switch t := e.(type) {
case *ast.StarExpr:
x := p.cgoType(t.X)
return &Type{Size: p.PtrSize, Align: p.PtrSize, C: c("%s*", x.C)}
case *ast.ArrayType:
if t.Len == nil {
// Slice: pointer, len, cap.
return &Type{Size: p.PtrSize * 3, Align: p.PtrSize, C: c("GoSlice")}
}
// Non-slice array types are not supported.
case *ast.StructType:
// Not supported.
case *ast.FuncType:
return &Type{Size: p.PtrSize, Align: p.PtrSize, C: c("void*")}
case *ast.InterfaceType:
return &Type{Size: 2 * p.PtrSize, Align: p.PtrSize, C: c("GoInterface")}
case *ast.MapType:
return &Type{Size: p.PtrSize, Align: p.PtrSize, C: c("GoMap")}
case *ast.ChanType:
return &Type{Size: p.PtrSize, Align: p.PtrSize, C: c("GoChan")}
case *ast.Ident:
// Look up the type in the top level declarations.
// TODO: Handle types defined within a function.
for _, d := range p.Decl {
gd, ok := d.(*ast.GenDecl)
if !ok || gd.Tok != token.TYPE {
continue
}
for _, spec := range gd.Specs {
ts, ok := spec.(*ast.TypeSpec)
if !ok {
continue
}
if ts.Name.Name == t.Name {
return p.cgoType(ts.Type)
}
}
}
if def := typedef[t.Name]; def != nil {
return def
}
if t.Name == "uintptr" {
return &Type{Size: p.PtrSize, Align: p.PtrSize, C: c("GoUintptr")}
}
if t.Name == "string" {
// The string data is 1 pointer + 1 (pointer-sized) int.
return &Type{Size: 2 * p.PtrSize, Align: p.PtrSize, C: c("GoString")}
}
if t.Name == "error" {
return &Type{Size: 2 * p.PtrSize, Align: p.PtrSize, C: c("GoInterface")}
}
if r, ok := goTypes[t.Name]; ok {
if r.Size == 0 { // int or uint
rr := new(Type)
*rr = *r
rr.Size = p.IntSize
rr.Align = p.IntSize
r = rr
}
if r.Align > p.PtrSize {
r.Align = p.PtrSize
}
return r
}
error_(e.Pos(), "unrecognized Go type %s", t.Name)
return &Type{Size: 4, Align: 4, C: c("int")}
case *ast.SelectorExpr:
id, ok := t.X.(*ast.Ident)
if ok && id.Name == "unsafe" && t.Sel.Name == "Pointer" {
return &Type{Size: p.PtrSize, Align: p.PtrSize, C: c("void*")}
}
}
error_(e.Pos(), "Go type not supported in export: %s", gofmt(e))
return &Type{Size: 4, Align: 4, C: c("int")}
}
const gccProlog = `
#line 1 "cgo-gcc-prolog"
/*
If x and y are not equal, the type will be invalid
(have a negative array count) and an inscrutable error will come
out of the compiler and hopefully mention "name".
*/
#define __cgo_compile_assert_eq(x, y, name) typedef char name[(x-y)*(x-y)*-2+1];
/* Check at compile time that the sizes we use match our expectations. */
#define __cgo_size_assert(t, n) __cgo_compile_assert_eq(sizeof(t), n, _cgo_sizeof_##t##_is_not_##n)
__cgo_size_assert(char, 1)
__cgo_size_assert(short, 2)
__cgo_size_assert(int, 4)
typedef long long __cgo_long_long;
__cgo_size_assert(__cgo_long_long, 8)
__cgo_size_assert(float, 4)
__cgo_size_assert(double, 8)
extern char* _cgo_topofstack(void);
/*
We use packed structs, but they are always aligned.
The pragmas and address-of-packed-member are only recognized as warning
groups in clang 4.0+, so ignore unknown pragmas first.
*/
#pragma GCC diagnostic ignored "-Wunknown-pragmas"
#pragma GCC diagnostic ignored "-Wpragmas"
#pragma GCC diagnostic ignored "-Waddress-of-packed-member"
#include <errno.h>
#include <string.h>
`
// Prologue defining TSAN functions in C.
const noTsanProlog = `
#define CGO_NO_SANITIZE_THREAD
#define _cgo_tsan_acquire()
#define _cgo_tsan_release()
`
// This must match the TSAN code in runtime/cgo/libcgo.h.
// This is used when the code is built with the C/C++ Thread SANitizer,
// which is not the same as the Go race detector.
// __tsan_acquire tells TSAN that we are acquiring a lock on a variable,
// in this case _cgo_sync. __tsan_release releases the lock.
// (There is no actual lock, we are just telling TSAN that there is.)
//
// When we call from Go to C we call _cgo_tsan_acquire.
// When the C function returns we call _cgo_tsan_release.
// Similarly, when C calls back into Go we call _cgo_tsan_release
// and then call _cgo_tsan_acquire when we return to C.
// These calls tell TSAN that there is a serialization point at the C call.
//
// This is necessary because TSAN, which is a C/C++ tool, can not see
// the synchronization in the Go code. Without these calls, when
// multiple goroutines call into C code, TSAN does not understand
// that the calls are properly synchronized on the Go side.
//
// To be clear, if the calls are not properly synchronized on the Go side,
// we will be hiding races. But when using TSAN on mixed Go C/C++ code
// it is more important to avoid false positives, which reduce confidence
// in the tool, than to avoid false negatives.
const yesTsanProlog = `
#line 1 "cgo-tsan-prolog"
#define CGO_NO_SANITIZE_THREAD __attribute__ ((no_sanitize_thread))
long long _cgo_sync __attribute__ ((common));
extern void __tsan_acquire(void*);
extern void __tsan_release(void*);
__attribute__ ((unused))
static void _cgo_tsan_acquire() {
__tsan_acquire(&_cgo_sync);
}
__attribute__ ((unused))
static void _cgo_tsan_release() {
__tsan_release(&_cgo_sync);
}
`
// Set to yesTsanProlog if we see -fsanitize=thread in the flags for gcc.
var tsanProlog = noTsanProlog
// noMsanProlog is a prologue defining an MSAN function in C.
// This is used when not compiling with -fsanitize=memory.
const noMsanProlog = `
#define _cgo_msan_write(addr, sz)
`
// yesMsanProlog is a prologue defining an MSAN function in C.
// This is used when compiling with -fsanitize=memory.
// See the comment above where _cgo_msan_write is called.
const yesMsanProlog = `
extern void __msan_unpoison(const volatile void *, size_t);
#define _cgo_msan_write(addr, sz) __msan_unpoison((addr), (sz))
`
// msanProlog is set to yesMsanProlog if we see -fsanitize=memory in the flags
// for the C compiler.
var msanProlog = noMsanProlog
const builtinProlog = `
#line 1 "cgo-builtin-prolog"
#include <stddef.h> /* for ptrdiff_t and size_t below */
/* Define intgo when compiling with GCC. */
typedef ptrdiff_t intgo;
#define GO_CGO_GOSTRING_TYPEDEF
typedef struct { const char *p; intgo n; } _GoString_;
typedef struct { char *p; intgo n; intgo c; } _GoBytes_;
_GoString_ GoString(char *p);
_GoString_ GoStringN(char *p, int l);
_GoBytes_ GoBytes(void *p, int n);
char *CString(_GoString_);
void *CBytes(_GoBytes_);
void *_CMalloc(size_t);
__attribute__ ((unused))
static size_t _GoStringLen(_GoString_ s) { return (size_t)s.n; }
__attribute__ ((unused))
static const char *_GoStringPtr(_GoString_ s) { return s.p; }
`
const goProlog = `
//go:linkname _cgo_runtime_cgocall runtime.cgocall
func _cgo_runtime_cgocall(unsafe.Pointer, uintptr) int32
//go:linkname _cgo_runtime_cgocallback runtime.cgocallback
func _cgo_runtime_cgocallback(unsafe.Pointer, unsafe.Pointer, uintptr, uintptr)
//go:linkname _cgoCheckPointer runtime.cgoCheckPointer
func _cgoCheckPointer(interface{}, interface{})
//go:linkname _cgoCheckResult runtime.cgoCheckResult
func _cgoCheckResult(interface{})
`
const gccgoGoProlog = `
func _cgoCheckPointer(interface{}, interface{})
func _cgoCheckResult(interface{})
`
const goStringDef = `
//go:linkname _cgo_runtime_gostring runtime.gostring
func _cgo_runtime_gostring(*_Ctype_char) string
func _Cfunc_GoString(p *_Ctype_char) string {
return _cgo_runtime_gostring(p)
}
`
const goStringNDef = `
//go:linkname _cgo_runtime_gostringn runtime.gostringn
func _cgo_runtime_gostringn(*_Ctype_char, int) string
func _Cfunc_GoStringN(p *_Ctype_char, l _Ctype_int) string {
return _cgo_runtime_gostringn(p, int(l))
}
`
const goBytesDef = `
//go:linkname _cgo_runtime_gobytes runtime.gobytes
func _cgo_runtime_gobytes(unsafe.Pointer, int) []byte
func _Cfunc_GoBytes(p unsafe.Pointer, l _Ctype_int) []byte {
return _cgo_runtime_gobytes(p, int(l))
}
`
const cStringDef = `
func _Cfunc_CString(s string) *_Ctype_char {
p := _cgo_cmalloc(uint64(len(s)+1))
pp := (*[1<<30]byte)(p)
copy(pp[:], s)
pp[len(s)] = 0
return (*_Ctype_char)(p)
}
`
const cBytesDef = `
func _Cfunc_CBytes(b []byte) unsafe.Pointer {
p := _cgo_cmalloc(uint64(len(b)))
pp := (*[1<<30]byte)(p)
copy(pp[:], b)
return p
}
`
const cMallocDef = `
func _Cfunc__CMalloc(n _Ctype_size_t) unsafe.Pointer {
return _cgo_cmalloc(uint64(n))
}
`
var builtinDefs = map[string]string{
"GoString": goStringDef,
"GoStringN": goStringNDef,
"GoBytes": goBytesDef,
"CString": cStringDef,
"CBytes": cBytesDef,
"_CMalloc": cMallocDef,
}
// Definitions for C.malloc in Go and in C. We define it ourselves
// since we call it from functions we define, such as C.CString.
// Also, we have historically ensured that C.malloc does not return
// nil even for an allocation of 0.
const cMallocDefGo = `
//go:cgo_import_static _cgoPREFIX_Cfunc__Cmalloc
//go:linkname __cgofn__cgoPREFIX_Cfunc__Cmalloc _cgoPREFIX_Cfunc__Cmalloc
var __cgofn__cgoPREFIX_Cfunc__Cmalloc byte
var _cgoPREFIX_Cfunc__Cmalloc = unsafe.Pointer(&__cgofn__cgoPREFIX_Cfunc__Cmalloc)
//go:linkname runtime_throw runtime.throw
func runtime_throw(string)
//go:cgo_unsafe_args
func _cgo_cmalloc(p0 uint64) (r1 unsafe.Pointer) {
_cgo_runtime_cgocall(_cgoPREFIX_Cfunc__Cmalloc, uintptr(unsafe.Pointer(&p0)))
if r1 == nil {
runtime_throw("runtime: C malloc failed")
}
return
}
`
// cMallocDefC defines the C version of C.malloc for the gc compiler.
// It is defined here because C.CString and friends need a definition.
// We define it by hand, rather than simply inventing a reference to
// C.malloc, because <stdlib.h> may not have been included.
// This is approximately what writeOutputFunc would generate, but
// skips the cgo_topofstack code (which is only needed if the C code
// calls back into Go). This also avoids returning nil for an
// allocation of 0 bytes.
const cMallocDefC = `
CGO_NO_SANITIZE_THREAD
void _cgoPREFIX_Cfunc__Cmalloc(void *v) {
struct {
unsigned long long p0;
void *r1;
} PACKED *a = v;
void *ret;
_cgo_tsan_acquire();
ret = malloc(a->p0);
if (ret == 0 && a->p0 == 0) {
ret = malloc(1);
}
a->r1 = ret;
_cgo_tsan_release();
}
`
func (p *Package) cPrologGccgo() string {
return strings.Replace(strings.Replace(cPrologGccgo, "PREFIX", cPrefix, -1),
"GCCGOSYMBOLPREF", p.gccgoSymbolPrefix(), -1)
}
const cPrologGccgo = `
#line 1 "cgo-c-prolog-gccgo"
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
typedef unsigned char byte;
typedef intptr_t intgo;
struct __go_string {
const unsigned char *__data;
intgo __length;
};
typedef struct __go_open_array {
void* __values;
intgo __count;
intgo __capacity;
} Slice;
struct __go_string __go_byte_array_to_string(const void* p, intgo len);
struct __go_open_array __go_string_to_byte_array (struct __go_string str);
const char *_cgoPREFIX_Cfunc_CString(struct __go_string s) {
char *p = malloc(s.__length+1);
memmove(p, s.__data, s.__length);
p[s.__length] = 0;
return p;
}
void *_cgoPREFIX_Cfunc_CBytes(struct __go_open_array b) {
char *p = malloc(b.__count);
memmove(p, b.__values, b.__count);
return p;
}
struct __go_string _cgoPREFIX_Cfunc_GoString(char *p) {
intgo len = (p != NULL) ? strlen(p) : 0;
return __go_byte_array_to_string(p, len);
}
struct __go_string _cgoPREFIX_Cfunc_GoStringN(char *p, int32_t n) {
return __go_byte_array_to_string(p, n);
}
Slice _cgoPREFIX_Cfunc_GoBytes(char *p, int32_t n) {
struct __go_string s = { (const unsigned char *)p, n };
return __go_string_to_byte_array(s);
}
extern void runtime_throw(const char *);
void *_cgoPREFIX_Cfunc__CMalloc(size_t n) {
void *p = malloc(n);
if(p == NULL && n == 0)
p = malloc(1);
if(p == NULL)
runtime_throw("runtime: C malloc failed");
return p;
}
struct __go_type_descriptor;
typedef struct __go_empty_interface {
const struct __go_type_descriptor *__type_descriptor;
void *__object;
} Eface;
extern void runtimeCgoCheckPointer(Eface, Eface)
__asm__("runtime.cgoCheckPointer")
__attribute__((weak));
extern void localCgoCheckPointer(Eface, Eface)
__asm__("GCCGOSYMBOLPREF._cgoCheckPointer");
void localCgoCheckPointer(Eface ptr, Eface arg) {
if(runtimeCgoCheckPointer) {
runtimeCgoCheckPointer(ptr, arg);
}
}
extern void runtimeCgoCheckResult(Eface)
__asm__("runtime.cgoCheckResult")
__attribute__((weak));
extern void localCgoCheckResult(Eface)
__asm__("GCCGOSYMBOLPREF._cgoCheckResult");
void localCgoCheckResult(Eface val) {
if(runtimeCgoCheckResult) {
runtimeCgoCheckResult(val);
}
}
`
// builtinExportProlog is a shorter version of builtinProlog,
// to be put into the _cgo_export.h file.
// For historical reasons we can't use builtinProlog in _cgo_export.h,
// because _cgo_export.h defines GoString as a struct while builtinProlog
// defines it as a function. We don't change this to avoid unnecessarily
// breaking existing code.
// The test of GO_CGO_GOSTRING_TYPEDEF avoids a duplicate definition
// error if a Go file with a cgo comment #include's the export header
// generated by a different package.
const builtinExportProlog = `
#line 1 "cgo-builtin-export-prolog"
#include <stddef.h> /* for ptrdiff_t below */
#ifndef GO_CGO_EXPORT_PROLOGUE_H
#define GO_CGO_EXPORT_PROLOGUE_H
#ifndef GO_CGO_GOSTRING_TYPEDEF
typedef struct { const char *p; ptrdiff_t n; } _GoString_;
#endif
#endif
`
func (p *Package) gccExportHeaderProlog() string {
return strings.Replace(gccExportHeaderProlog, "GOINTBITS", fmt.Sprint(8*p.IntSize), -1)
}
// gccExportHeaderProlog is written to the exported header, after the
// import "C" comment preamble but before the generated declarations
// of exported functions. This permits the generated declarations to
// use the type names that appear in goTypes, above.
//
// The test of GO_CGO_GOSTRING_TYPEDEF avoids a duplicate definition
// error if a Go file with a cgo comment #include's the export header
// generated by a different package. Unfortunately GoString means two
// different things: in this prolog it means a C name for the Go type,
// while in the prolog written into the start of the C code generated
// from a cgo-using Go file it means the C.GoString function. There is
// no way to resolve this conflict, but it also doesn't make much
// difference, as Go code never wants to refer to the latter meaning.
const gccExportHeaderProlog = `
/* Start of boilerplate cgo prologue. */
#line 1 "cgo-gcc-export-header-prolog"
#ifndef GO_CGO_PROLOGUE_H
#define GO_CGO_PROLOGUE_H
typedef signed char GoInt8;
typedef unsigned char GoUint8;
typedef short GoInt16;
typedef unsigned short GoUint16;
typedef int GoInt32;
typedef unsigned int GoUint32;
typedef long long GoInt64;
typedef unsigned long long GoUint64;
typedef GoIntGOINTBITS GoInt;
typedef GoUintGOINTBITS GoUint;
typedef __SIZE_TYPE__ GoUintptr;
typedef float GoFloat32;
typedef double GoFloat64;
typedef float _Complex GoComplex64;
typedef double _Complex GoComplex128;
/*
static assertion to make sure the file is being used on architecture
at least with matching size of GoInt.
*/
typedef char _check_for_GOINTBITS_bit_pointer_matching_GoInt[sizeof(void*)==GOINTBITS/8 ? 1:-1];
#ifndef GO_CGO_GOSTRING_TYPEDEF
typedef _GoString_ GoString;
#endif
typedef void *GoMap;
typedef void *GoChan;
typedef struct { void *t; void *v; } GoInterface;
typedef struct { void *data; GoInt len; GoInt cap; } GoSlice;
#endif
/* End of boilerplate cgo prologue. */
#ifdef __cplusplus
extern "C" {
#endif
`
// gccExportHeaderEpilog goes at the end of the generated header file.
const gccExportHeaderEpilog = `
#ifdef __cplusplus
}
#endif
`
// gccgoExportFileProlog is written to the _cgo_export.c file when
// using gccgo.
// We use weak declarations, and test the addresses, so that this code
// works with older versions of gccgo.
const gccgoExportFileProlog = `
#line 1 "cgo-gccgo-export-file-prolog"
extern _Bool runtime_iscgo __attribute__ ((weak));
static void GoInit(void) __attribute__ ((constructor));
static void GoInit(void) {
if(&runtime_iscgo)
runtime_iscgo = 1;
}
extern __SIZE_TYPE__ _cgo_wait_runtime_init_done(void) __attribute__ ((weak));
`
| [
"\"GCCGO\""
] | [] | [
"GCCGO"
] | [] | ["GCCGO"] | go | 1 | 0 | |
qa/rpc-tests/util.py | # Copyright (c) 2014 The Bitcoin Core developers
# Copyright (c) 2014-2015 The Dash developers
# Copyright (c) 2019 The Bluecoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "bluecoin.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
bluecoind and bluecoin-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run bluecoind:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BITCOIND", "bluecoind"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
subprocess.check_call([ os.getenv("BITCOINCLI", "bluecoin-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:rt@127.0.0.1:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in bluecoin.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None):
"""
Start a bluecoind and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
args = [ os.getenv("BITCOIND", "bluecoind"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
subprocess.check_call([ os.getenv("BITCOINCLI", "bluecoin-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None):
"""
Start multiple bluecoinds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using it's output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
| [] | [] | [
"BITCOINCLI",
"BITCOIND"
] | [] | ["BITCOINCLI", "BITCOIND"] | python | 2 | 0 | |
train.py | import os
import gc
import json
import time
import tqdm
import argparse
import datetime
import torch.distributed as dist
import torch.utils.data.distributed
from warpctc_pytorch import CTCLoss
from novograd import (AdamW,
Novograd)
from linknet import (SemsegLoss,
MaskSimilarity)
from decoder import GreedyDecoder
from model import DeepSpeech, supported_rnns
from data.utils import reduce_tensor, get_cer_wer
from data.data_loader_aug import (SpectrogramDataset,
BucketingSampler,
BucketingLenSampler,
DistributedBucketingSampler)
import torch
import warnings
from torch._six import inf
tq = tqdm.tqdm
VISIBLE_DEVICES = os.environ.get('CUDA_VISIBLE_DEVICES', '').split(',') or ['0']
parser = argparse.ArgumentParser(description='DeepSpeech training')
parser.add_argument('--train-manifest', metavar='DIR',
help='path to train manifest csv', default='data/train_manifest.csv')
parser.add_argument('--cache-dir', metavar='DIR',
help='path to save temp audio', default='data/cache/')
parser.add_argument('--train-val-manifest', metavar='DIR',
help='path to train validation manifest csv', default='')
parser.add_argument('--val-manifest', metavar='DIR',
help='path to validation manifest csv', default='data/val_manifest.csv')
parser.add_argument('--curriculum', metavar='DIR',
help='path to curriculum file', default='')
parser.add_argument('--use-curriculum', action='store_true', default=False)
parser.add_argument('--curriculum-ratio', default=0.5, type=float)
parser.add_argument('--cl-point', default=0.1, type=float)
parser.add_argument('--sample-rate', default=16000, type=int, help='Sample rate')
parser.add_argument('--batch-size', default=20, type=int, help='Batch size for training')
parser.add_argument('--val-batch-size', default=20, type=int, help='Batch size for training')
parser.add_argument('--num-workers', default=4, type=int, help='Number of workers used in data-loading')
parser.add_argument('--labels-path', default='labels.json', help='Contains all characters for transcription')
parser.add_argument('--phonemes-path', default='phonemes_ru.json', help='Contains all phonemes for the Russian language')
parser.add_argument('--use-bpe', dest='use_bpe', action='store_true', help='Use sentencepiece BPE tokens')
parser.add_argument('--sp-model', dest='sp_model', default='data/spm_train_v05_cleaned_asr_10s_phoneme.model',
type=str, help='Pre-trained sentencepiece model')
parser.add_argument('--use-phonemes', action='store_true', default=False)
parser.add_argument('--phonemes-only', action='store_true', default=False)
parser.add_argument('--omit-spaces', action='store_true', default=False)
parser.add_argument('--subword-regularization', action='store_true', default=False)
parser.add_argument('--batch-similar-lens', dest='batch_similar_lens', action='store_true',
help='Force usage of sampler that batches items with similar duration together')
parser.add_argument('--pytorch-mel', action='store_true', help='Use pytorch based STFT + MEL')
parser.add_argument('--pytorch-stft', action='store_true', help='Use pytorch based STFT')
parser.add_argument('--denoise', action='store_true', help='Train a denoising head')
parser.add_argument('--use-attention', action='store_true', help='Use attention based decoder instead of CTC')
parser.add_argument('--double-supervision', action='store_true', help='Use both CTC and attention in sequence')
parser.add_argument('--naive-split', action='store_true', help='Use a naive DS2 inspired syllable split')
parser.add_argument('--grapheme-phoneme', action='store_true', help='Use both phonemes and graphemes with BPE to train from scratch')
parser.add_argument('--window-size', default=.02, type=float, help='Window size for spectrogram in seconds')
parser.add_argument('--window-stride', default=.01, type=float, help='Window stride for spectrogram in seconds')
parser.add_argument('--window', default='hamming', help='Window type for spectrogram generation')
parser.add_argument('--hidden-size', default=800, type=int, help='Hidden size of RNNs')
parser.add_argument('--cnn-width', default=256, type=int, help='w2l-like network width')
parser.add_argument('--kernel-size', default=7, type=int, help='cnn kernel size')
parser.add_argument('--hidden-layers', default=6, type=int, help='Number of RNN layers')
parser.add_argument('--rnn-type', default='gru', help='Type of the RNN. rnn|gru|lstm are supported')
parser.add_argument('--decoder-layers', default=4, type=int)
parser.add_argument('--decoder-girth', default=1, type=int)
parser.add_argument('--dropout', default=0, type=float, help='Fixed dropout for CNN based models')
parser.add_argument('--epochs', default=70, type=int, help='Number of training epochs')
parser.add_argument('--cuda', dest='cuda', action='store_true', help='Use cuda to train model')
parser.add_argument('--lr', '--learning-rate', default=3e-4, type=float, help='initial learning rate')
parser.add_argument('--optimizer', default='sgd', help='Optimizer - sgd or adam')
parser.add_argument('--weight-decay', default=0, help='Weight decay for SGD', type=float)
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--batch-norm-momentum', default=0.1, type=float, help='BatchNorm momentum')
parser.add_argument('--max-norm', default=100, type=int, help='Norm cutoff to prevent explosion of gradients')
parser.add_argument('--norm-warmup-epochs', default=1000, type=int, help='Do gradient clipping only before some epoch')
parser.add_argument('--gradient-accumulation-steps', default=1, type=int, help='Accumulate gradients for some time first')
parser.add_argument('--learning-anneal', default=1.1, type=float, help='Annealing applied to learning rate every epoch')
parser.add_argument('--checkpoint-anneal', default=1.0, type=float,
help='Annealing applied to learning rate every checkpoint')
parser.add_argument('--silent', dest='silent', action='store_true', help='Turn off progress tracking per iteration')
parser.add_argument('--checkpoint', dest='checkpoint', action='store_true', help='Enables checkpoint saving of model')
parser.add_argument('--checkpoint-per-samples', default=0, type=int, help='Save checkpoint per samples. 0 means never save')
parser.add_argument('--visdom', dest='visdom', action='store_true', help='Turn on visdom graphing')
parser.add_argument('--enorm', dest='enorm', action='store_true', help='Turn on enorm ( https://github.com/facebookresearch/enorm )')
parser.add_argument('--tensorboard', dest='tensorboard', action='store_true', help='Turn on tensorboard graphing')
parser.add_argument('--log-dir', default='visualize/deepspeech_final', help='Location of tensorboard log')
parser.add_argument('--log-params', dest='log_params', action='store_true', help='Log parameter values and gradients')
parser.add_argument('--id', default='Deepspeech training', help='Identifier for visdom/tensorboard run')
parser.add_argument('--save-folder', default='models/', help='Location to save epoch models')
parser.add_argument('--continue-from', default='', help='Continue from checkpoint model')
parser.add_argument('--norm', default='max_frame', action="store",
help='Normalize sounds. Choices: "mean", "frame", "max_frame", "none"')
parser.add_argument('--finetune', dest='finetune', action='store_true',
help='Finetune the model from checkpoint "continue_from"')
parser.add_argument('--augment', dest='augment', action='store_true', help='Use random tempo and gain perturbations.')
parser.add_argument('--noise-dir', default=None,
help='Directory to inject noise into audio. If default, noise Inject not added')
parser.add_argument('--noise-prob', default=0.4, type=float, help='Probability of noise being added per sample')
parser.add_argument('--aug-type', default=0, type=int, help='Type of augs to use')
parser.add_argument('--aug-prob-8khz', default=0, type=float, help='Probability of dropping half of stft frequencies, robustness to 8kHz audio')
parser.add_argument('--aug-prob-spect', default=0, type=float, help='Probability of applying spectrogram based augmentations')
parser.add_argument('--noise-min', default=0.0,
help='Minimum noise level to sample from. (1.0 means all noise, not original signal)', type=float)
parser.add_argument('--noise-max', default=0.5,
help='Maximum noise levels to sample from. Maximum 1.0', type=float)
parser.add_argument('--no-shuffle', dest='no_shuffle', action='store_true',
help='Turn off shuffling and sample from dataset based on sequence length (smallest to largest)')
parser.add_argument('--no-sortaGrad', dest='no_sorta_grad', action='store_true',
help='Turn off ordering of dataset on sequence length for the first epoch.')
parser.add_argument('--reverse-sort', dest='reverse_sort', action='store_true',
help='Turn off reverse ordering of dataset on sequence length for the first epoch.')
parser.add_argument('--no-bidirectional', dest='bidirectional', action='store_false', default=True,
help='Turn off bi-directional RNNs, introduces lookahead convolution')
parser.add_argument('--dist-url', default='tcp://127.0.0.1:1550', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='gloo', type=str, help='distributed backend')
parser.add_argument('--world-size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--rank', default=0, type=int,
help='The rank of this process')
parser.add_argument('--gpu-rank', default=None,
help='If using distributed parallel for multi-gpu, sets the GPU for the process')
parser.add_argument('--data-parallel', dest='data_parallel', action='store_true',
help='Use data parallel')
parser.add_argument('--use-lookahead', dest='use_lookahead', action='store_true',
help='Use look ahead optimizer')
torch.manual_seed(123456)
torch.cuda.manual_seed_all(123456)
def to_np(x):
return x.data.cpu().numpy()
def clip_grad_norm_(parameters, max_norm, norm_type=2):
r"""Clips gradient norm of an iterable of parameters.
The norm is computed over all gradients together, as if they were
concatenated into a single vector. Gradients are modified in-place.
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
max_norm = float(max_norm)
norm_type = float(norm_type)
if norm_type == inf:
total_norm = max(p.grad.data.abs().max() for p in parameters)
else:
total_norm = 0
for p in parameters:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm.item() ** norm_type
total_norm = total_norm ** (1. / norm_type)
clip_coef = max_norm / (total_norm + 1e-6)
# print(clip_coef)
if clip_coef < 1:
for p in parameters:
p.grad.data.mul_(clip_coef)
return total_norm
def calc_grad_norm(parameters, max_norm, norm_type=2):
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
max_norm = float(max_norm)
norm_type = float(norm_type)
if norm_type == inf:
total_norm = max(p.grad.data.abs().max() for p in parameters)
else:
total_norm = 0
for p in parameters:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm.item() ** norm_type
total_norm = total_norm ** (1. / norm_type)
clip_coef = max_norm / (total_norm + 1e-6)
return clip_coef
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class MultipleOptimizer(object):
def __init__(self, op):
self.optimizers = op
def zero_grad(self):
for op in self.optimizers:
op.zero_grad()
def step(self):
for op in self.optimizers:
op.step()
def state_dict(self):
out = [op.state_dict() for op in self.optimizers]
return out
def load_state_dict(self,
states):
assert len(states) == len(self.optimizers)
for i in range(len(self.optimizers)):
self.optimizers[i].load_state_dict(states[i])
def build_optimizer(args_,
parameters_=None,
model=None):
# import aggmo
# return aggmo.AggMo(model.parameters(), args_.lr, betas=[0, 0.6, 0.9])
if args_.weight_decay > 0:
print('Using weight decay {} for SGD'.format(args_.weight_decay))
if args.double_supervision or 'transformer' in args.rnn_type or args.grapheme_phoneme:
import itertools
adam_lr = 1e-4 # / 10
sgd_lr = args_.lr
print('Using double supervision, SGD with clipping for CTC, ADAM for s2s')
print('SGD LR {} / ADAM LR {}'.format(sgd_lr, adam_lr))
if 'transformer' in args.rnn_type:
print('Using transformer-type double optimizer')
params_ctc = [model.rnns.layers.parameters()]
params_adam = [model.rnns.decoder.parameters(),
model.fc.parameters()]
else:
params_ctc = [model.rnns.layers.parameters(),
model.rnns.ctc_decoder.parameters(),
model.rnns.ctc_fc.parameters()]
params_adam = [model.rnns.s2s_decoder.parameters()]
ctc_optimizer = torch.optim.SGD(itertools.chain(*params_ctc),
lr=args_.lr,
momentum=args_.momentum,
nesterov=True)
s2s_optimizer = torch.optim.Adam(itertools.chain(*params_adam),
lr=adam_lr)
return MultipleOptimizer([ctc_optimizer, s2s_optimizer])
elif args_.optimizer == 'sgd':
print('Using SGD')
try:
base_optimizer = torch.optim.SGD(parameters_, lr=args_.lr,
momentum=args_.momentum, nesterov=True,
weight_decay=args_.weight_decay)
if args_.use_lookahead:
print('Using SGD + Lookahead')
from lookahead import Lookahead
return Lookahead(base_optimizer=base_optimizer,
k=5,
alpha=0.5)
return base_optimizer
except:
# wo nesterov
return torch.optim.SGD(parameters_, lr=args_.lr,
momentum=args_.momentum, nesterov=False,
weight_decay=args_.weight_decay)
elif args_.optimizer=='adam':
print('Using ADAM')
return torch.optim.Adam(parameters_, lr=args_.lr)
elif args_.optimizer=='novograd':
print('Using Novograd')
return Novograd(parameters_, lr=args_.lr)
elif args_.optimizer=='adamw':
print('Using ADAMW')
return AdamW(parameters_, lr=args_.lr)
viz = None
tensorboard_writer = None
class PlotWindow:
def __init__(self, title, suffix, log_x=False, log_y=False):
self.loss_results = torch.Tensor(10000)
self.cer_results = torch.Tensor(10000)
self.wer_results = torch.Tensor(10000)
self.epochs = torch.arange(1, 10000)
self.viz_window = None
self.tb_subplot='/'+suffix
global viz, tensorboard_writer
hour_now = str(datetime.datetime.now()).split('.', 1)[0][:-3]
self.opts = dict(title=title + ': ' + hour_now, ylabel='', xlabel=suffix, legend=['Loss', 'WER', 'CER'])
self.opts['layoutopts'] = {'plotly': {}}
if log_x:
self.opts['layoutopts']['plotly'] = {'xaxis': {'type': 'log'}}
if log_y:
self.opts['layoutopts']['plotly'] = {'yaxis': {'type': 'log'}}
if args.visdom and is_leader:
if viz is None:
from visdom import Visdom
viz = Visdom()
if args.tensorboard and is_leader:
os.makedirs(args.log_dir, exist_ok=True)
if tensorboard_writer is None:
from tensorboardX import SummaryWriter
tensorboard_writer = SummaryWriter(args.log_dir)
def plot_history(self, position):
global viz, tensorboard_writer
if is_leader and args.visdom:
# Add previous scores to visdom graph
x_axis = self.epochs[0:position]
y_axis = torch.stack(
(self.loss_results[0:position],
self.wer_results[0:position],
self.cer_results[0:position]),
dim=1)
self.viz_window = viz.line(
X=x_axis,
Y=y_axis,
opts=self.opts,
)
if is_leader and args.tensorboard:
# Previous scores to tensorboard logs
for i in range(position):
values = {
'Avg Train Loss': self.loss_results[i],
'Avg WER': self.wer_results[i],
'Avg CER': self.cer_results[i]
}
tensorboard_writer.add_scalars(args.id+self.tb_subplot,
values, i + 1)
def plot_progress(self, epoch, avg_loss, cer_avg, wer_avg):
global viz, tensorboard_writer
if args.visdom and is_leader:
x_axis = self.epochs[0:epoch + 1]
y_axis = torch.stack(
(self.loss_results[0:epoch + 1],
self.wer_results[0:epoch + 1],
self.cer_results[0:epoch + 1]), dim=1)
if self.viz_window is None:
self.viz_window = viz.line(
X=x_axis,
Y=y_axis,
opts=self.opts,
)
else:
viz.line(
X=x_axis.unsqueeze(0).expand(y_axis.size(1), x_axis.size(0)).transpose(0, 1), # Visdom fix
Y=y_axis,
win=self.viz_window,
update='replace',
)
if args.tensorboard and is_leader:
values = {
'Avg Train Loss': avg_loss,
'Avg WER': wer_avg,
'Avg CER': cer_avg
}
tensorboard_writer.add_scalars(args.id+self.tb_subplot,
values,
epoch + 1)
if args.log_params:
for tag, value in model.named_parameters():
tag = tag.replace('.', '/')
tensorboard_writer.add_histogram(tag, to_np(value), epoch + 1)
tensorboard_writer.add_histogram(tag + '/grad', to_np(value.grad), epoch + 1)
class LRPlotWindow:
def __init__(self, title, suffix, log_x=False, log_y=False):
self.loss_results = torch.Tensor(10000)
self.epochs = torch.Tensor(10000)
self.viz_window = None
self.suffix = suffix
self.tb_subplot='/'+suffix
global viz, tensorboard_writer
hour_now = str(datetime.datetime.now()).split('.', 1)[0][:-3]
self.opts = dict(title=title + ': ' + hour_now, ylabel='', xlabel=suffix, legend=['Loss'])
self.opts['layoutopts'] = {'plotly': {}}
if log_x:
self.opts['layoutopts']['plotly'] = {'xaxis': {'type': 'log'}}
if log_y:
self.opts['layoutopts']['plotly'] = {'yaxis': {'type': 'log'}}
if args.visdom and is_leader:
if viz is None:
from visdom import Visdom
viz = Visdom()
if args.tensorboard and is_leader:
os.makedirs(args.log_dir, exist_ok=True)
if tensorboard_writer is None:
from tensorboardX import SummaryWriter
tensorboard_writer = SummaryWriter(args.log_dir)
def plot_progress(self, epoch, avg_loss, cer_avg, wer_avg):
global viz, tensorboard_writer
if args.visdom and is_leader:
x_axis = self.epochs[0:epoch + 1]
y_axis = torch.stack((
self.loss_results[0:epoch + 1],
), dim=1)
if self.viz_window is None:
self.viz_window = viz.line(
X=x_axis,
Y=y_axis,
opts=self.opts,
)
else:
viz.line(
X=x_axis,
Y=y_axis,
win=self.viz_window,
update='replace',
)
if args.tensorboard and is_leader:
values = {
'Avg Train Loss': avg_loss,
}
tensorboard_writer.add_scalars(args.id+self.tb_subplot,
values, epoch + 1)
if args.log_params:
for tag, value in model.named_parameters():
tag = tag.replace('.', '/')
tensorboard_writer.add_histogram(tag, to_np(value), epoch + 1)
tensorboard_writer.add_histogram(tag + '/grad', to_np(value.grad), epoch + 1)
def get_lr():
if args.use_lookahead:
return optimizer.optimizer.state_dict()['param_groups'][0]['lr']
if args.double_supervision or 'transformer' in args.rnn_type or args.grapheme_phoneme:
# SGD state
optim_state = optimizer.optimizers[0].state_dict()
else:
optim_state = optimizer.state_dict()
return optim_state['param_groups'][0]['lr']
def set_lr(lr):
print('Learning rate annealed to: {lr:.6g}'.format(lr=lr))
if args.double_supervision or 'transformer' in args.rnn_type or args.grapheme_phoneme:
# ADAM's LR typically is set 10x lower than SGD
sgd_optim_state = optimizer.optimizers[0].state_dict()
sgd_optim_state['param_groups'][0]['lr'] = lr
optimizer.optimizers[0].load_state_dict(sgd_optim_state)
adam_optim_state = optimizer.optimizers[1].state_dict()
# always fixed for adam
adam_optim_state['param_groups'][0]['lr'] = 1e-4
optimizer.optimizers[1].load_state_dict(adam_optim_state)
elif args.use_lookahead:
optim_state = optimizer.optimizer.state_dict()
optim_state['param_groups'][0]['lr'] = lr
optimizer.optimizer.load_state_dict(optim_state)
else:
optim_state = optimizer.state_dict()
optim_state['param_groups'][0]['lr'] = lr
optimizer.load_state_dict(optim_state)
def check_model_quality(epoch, checkpoint, train_loss, train_cer, train_wer):
gc.collect()
torch.cuda.empty_cache()
val_cer_sum, val_wer_sum, val_loss_sum = 0, 0, 0
num_chars, num_words, num_losses = 0, 0, 0
model.eval()
with torch.no_grad():
for i, data in tq(enumerate(test_loader), total=len(test_loader)):
# use if full phoneme decoding will be required
if False:
(inputs,
targets,
filenames,
input_percentages,
target_sizes,
phoneme_targets,
phoneme_target_sizes) = data
elif args.denoise:
(inputs,
targets,
filenames,
input_percentages,
target_sizes,
mask_targets) = data
else:
inputs, targets, filenames, input_percentages, target_sizes = data
input_sizes = input_percentages.mul_(int(inputs.size(3))).int()
# unflatten targets
split_targets = []
offset = 0
for size in target_sizes:
split_targets.append(targets[offset:offset + size])
offset += size
if args.use_attention:
batch_size = inputs.size(0)
max_len = max(target_sizes)
# use CTC blank as pad token
# ctc blank has an index of zero
trg = torch.zeros(batch_size,
max_len)
assert len(target_sizes) == batch_size
for _, split_target in enumerate(split_targets):
trg[_, :target_sizes[_]] = split_target
trg = trg.long().to(device)
# trg_teacher_forcing = trg[:, :-1]
trg_val = trg
inputs = inputs.to(device)
if args.use_phonemes or args.grapheme_phoneme:
(logits, probs,
output_sizes,
phoneme_logits, phoneme_probs) = model(inputs, input_sizes)
elif args.denoise:
logits, probs, output_sizes, mask_logits = model(inputs, input_sizes)
elif args.use_attention:
logits, output_sizes = model(inputs,
lengths=input_sizes)
# for our purposes they are the same
probs = logits
elif args.double_supervision:
ctc_logits, s2s_logits, output_sizes = model(inputs,
lengths=input_sizes)
# s2s decoder is the final decoder
probs = s2s_logits
else:
logits, probs, output_sizes = model(inputs, input_sizes)
if args.use_attention:
# this is kind of murky
# you can calculate this using teacher forcing unrolling
# or you can just assume
# that the smart network will produce outputs of similar length to gt
short_logits = logits[:, :trg_val.size(1), :].contiguous()
loss = criterion(short_logits.view(-1,
short_logits.size(-1)),
trg_val.contiguous().view(-1))
loss = loss / sum(target_sizes) # average the loss by number of tokens
loss = loss.to(device)
elif args.double_supervision:
# do not bother with loss here
loss = 0
loss_value = 0
else:
loss = criterion(logits.transpose(0, 1), targets, output_sizes.cpu(), target_sizes)
loss = loss / inputs.size(0) # average the loss by minibatch
inf = float("inf")
if args.distributed:
loss_value = reduce_tensor(loss, args.world_size).item()
elif args.double_supervision:
pass
else:
loss_value = loss.item()
if loss_value == inf or loss_value == -inf:
print("WARNING: received an inf loss, setting loss value to 1000")
loss_value = 1000
loss_value = float(loss_value)
val_loss_sum = (val_loss_sum * 0.998 + loss_value * 0.002) # discount earlier losses
val_loss_sum += loss_value
num_losses += 1
decoded_output, _ = decoder.decode(probs, output_sizes,
use_attention=args.use_attention or args.double_supervision)
target_strings = decoder.convert_to_strings(split_targets)
for x in range(len(target_strings)):
transcript, reference = decoded_output[x][0], target_strings[x][0]
wer, cer, wer_ref, cer_ref = get_cer_wer(decoder, transcript, reference)
if x < 1:
print("CER: {:6.2f}% WER: {:6.2f}% Filename: {}".format(cer/cer_ref*100, wer/wer_ref*100, filenames[x]))
print('Reference:', reference, '\nTranscript:', transcript)
times_used = test_dataset.curriculum[filenames[x]]['times_used']+1
test_dataset.update_curriculum(filenames[x],
reference, transcript,
None,
cer / cer_ref, wer / wer_ref,
times_used=times_used)
val_wer_sum += wer
val_cer_sum += cer
num_words += wer_ref
num_chars += cer_ref
if args.double_supervision:
del inputs, targets, input_percentages, input_sizes
del probs, output_sizes, target_sizes, loss
del ctc_logits, s2s_logits
del split_targets
else:
del inputs, targets, input_percentages, input_sizes
del logits, probs, output_sizes, target_sizes, loss
del split_targets
if args.cuda:
torch.cuda.synchronize()
val_wer = 100 * val_wer_sum / num_words
val_cer = 100 * val_cer_sum / num_chars
print('Validation Summary Epoch: [{0}]\t'
'Average WER {wer:.3f}\t'
'Average CER {cer:.3f}\t'.format(epoch + 1, wer=val_wer, cer=val_cer))
val_loss = val_loss_sum / num_losses
plots.loss_results[epoch] = train_loss
plots.wer_results[epoch] = train_wer
plots.cer_results[epoch] = train_cer
plots.epochs[epoch] = epoch + 1
checkpoint_plots.loss_results[checkpoint] = val_loss
checkpoint_plots.wer_results[checkpoint] = val_wer
checkpoint_plots.cer_results[checkpoint] = val_cer
checkpoint_plots.epochs[checkpoint] = checkpoint + 1
plots.plot_progress(epoch, train_loss, train_cer, train_wer)
checkpoint_plots.plot_progress(checkpoint, val_loss, val_cer, val_wer)
if args.checkpoint_anneal != 1.0:
global lr_plots
lr_plots.loss_results[checkpoint] = val_loss
lr_plots.epochs[checkpoint] = get_lr()
zero_loss = lr_plots.loss_results == 0
lr_plots.loss_results[zero_loss] = val_loss
lr_plots.epochs[zero_loss] = get_lr()
lr_plots.plot_progress(checkpoint, val_loss, val_cer, val_wer)
# only if trainval manifest provided
# separate scope not to mess with general flow too much
if args.train_val_manifest != '':
calculate_trainval_quality_metrics(checkpoint,
epoch,
trainval_loader,
trainval_checkpoint_plots)
return val_wer, val_cer
def calculate_trainval_quality_metrics(checkpoint,
epoch,
loader,
plots_handle):
val_cer_sum, val_wer_sum, val_loss_sum = 0, 0, 0
num_chars, num_words, num_losses = 0, 0, 0
model.eval()
with torch.no_grad():
for i, data in tq(enumerate(loader), total=len(loader)):
# use if full phoneme decoding will be required
if False:
(inputs,
targets,
filenames,
input_percentages,
target_sizes,
phoneme_targets,
phoneme_target_sizes) = data
elif args.denoise:
(inputs,
targets,
filenames,
input_percentages,
target_sizes,
mask_targets) = data
else:
inputs, targets, filenames, input_percentages, target_sizes = data
input_sizes = input_percentages.mul_(int(inputs.size(3))).int()
# unflatten targets
split_targets = []
offset = 0
for size in target_sizes:
split_targets.append(targets[offset:offset + size])
offset += size
if args.use_attention:
batch_size = inputs.size(0)
max_len = max(target_sizes)
# use CTC blank as pad token
# ctc blank has an index of zero
trg = torch.zeros(batch_size,
max_len)
assert len(target_sizes) == batch_size
for _, split_target in enumerate(split_targets):
trg[_, :target_sizes[_]] = split_target
trg = trg.long().to(device)
# trg_teacher_forcing = trg[:, :-1]
trg_val = trg
inputs = inputs.to(device)
if args.use_phonemes:
(logits, probs,
output_sizes,
phoneme_logits, phoneme_probs) = model(inputs, input_sizes)
elif args.denoise:
logits, probs, output_sizes, mask_logits = model(inputs, input_sizes)
elif args.use_attention:
logits, output_sizes = model(inputs,
lengths=input_sizes)
# for our purposes they are the same
probs = logits
elif args.double_supervision:
ctc_logits, s2s_logits, output_sizes = model(inputs,
lengths=input_sizes)
# s2s decoder is the final decoder
probs = s2s_logits
else:
logits, probs, output_sizes = model(inputs, input_sizes)
if args.use_attention:
# this is kind of murky
# you can calculate this using teacher forcing unrolling
# or you can just assume
# that the smart network will produce outputs of similar length to gt
# some edge cases in annotation also may cause this to fail miserably
# hence a failsafe
max_loss_len = min(trg_val.size(1),
logits.size(1))
short_logits = logits[:, :max_loss_len, :].contiguous()
short_trg = trg_val[:, :max_loss_len].contiguous()
loss = criterion(short_logits.view(-1,
short_logits.size(-1)),
short_trg.view(-1))
loss = loss / sum(target_sizes) # average the loss by number of tokens
loss = loss.to(device)
elif args.double_supervision:
# do not bother with loss here
loss = 0
loss_value = 0
else:
loss = criterion(logits.transpose(0, 1), targets, output_sizes.cpu(), target_sizes)
loss = loss / inputs.size(0) # average the loss by minibatch
inf = float("inf")
if args.distributed:
loss_value = reduce_tensor(loss, args.world_size).item()
elif args.double_supervision:
pass
else:
loss_value = loss.item()
if loss_value == inf or loss_value == -inf:
print("WARNING: received an inf loss, setting loss value to 1000")
loss_value = 1000
loss_value = float(loss_value)
val_loss_sum = (val_loss_sum * 0.998 + loss_value * 0.002) # discount earlier losses
val_loss_sum += loss_value
num_losses += 1
decoded_output, _ = decoder.decode(probs, output_sizes,
use_attention=args.use_attention or args.double_supervision)
target_strings = decoder.convert_to_strings(split_targets)
for x in range(len(target_strings)):
transcript, reference = decoded_output[x][0], target_strings[x][0]
wer, cer, wer_ref, cer_ref = get_cer_wer(decoder, transcript, reference)
if x < 1:
print("CER: {:6.2f}% WER: {:6.2f}% Filename: {}".format(cer/cer_ref*100, wer/wer_ref*100, filenames[x]))
print('Reference:', reference, '\nTranscript:', transcript)
times_used = trainval_dataset.curriculum[filenames[x]]['times_used']+1
trainval_dataset.update_curriculum(filenames[x],
reference, transcript,
None,
cer / cer_ref, wer / wer_ref,
times_used=times_used)
val_wer_sum += wer
val_cer_sum += cer
num_words += wer_ref
num_chars += cer_ref
if args.double_supervision:
del inputs, targets, input_percentages, input_sizes
del probs, output_sizes, target_sizes, loss
del ctc_logits, s2s_logits
del split_targets
else:
del inputs, targets, input_percentages, input_sizes
del logits, probs, output_sizes, target_sizes, loss
del split_targets
if args.cuda:
torch.cuda.synchronize()
val_wer = 100 * val_wer_sum / num_words
val_cer = 100 * val_cer_sum / num_chars
print('TrainVal Summary Epoch: [{0}]\t'
'Average WER {wer:.3f}\t'
'Average CER {cer:.3f}\t'.format(epoch + 1, wer=val_wer, cer=val_cer))
val_loss = val_loss_sum / num_losses
plots_handle.loss_results[checkpoint] = val_loss
plots_handle.wer_results[checkpoint] = val_wer
plots_handle.cer_results[checkpoint] = val_cer
plots_handle.epochs[checkpoint] = checkpoint + 1
plots_handle.plot_progress(checkpoint, val_loss, val_cer, val_wer)
def save_validation_curriculums(save_folder,
checkpoint,
epoch,
iteration=0):
if iteration>0:
test_path = '%s/test_checkpoint_%04d_epoch_%02d_iter_%05d.csv' % (save_folder, checkpoint + 1, epoch + 1, iteration + 1)
else:
test_path = '%s/test_checkpoint_%04d_epoch_%02d.csv' % (save_folder, checkpoint + 1, epoch + 1)
print("Saving test curriculum to {}".format(test_path))
test_dataset.save_curriculum(test_path)
if args.train_val_manifest != '':
if iteration>0:
trainval_path = '%s/trainval_checkpoint_%04d_epoch_%02d_iter_%05d.csv' % (save_folder, checkpoint + 1, epoch + 1, iteration + 1)
else:
trainval_path = '%s/trainval_checkpoint_%04d_epoch_%02d.csv' % (save_folder, checkpoint + 1, epoch + 1)
print("Saving trainval curriculum to {}".format(trainval_path))
trainval_dataset.save_curriculum(trainval_path)
class Trainer:
def __init__(self):
self.end = time.time()
self.train_wer = 0
self.train_cer = 0
self.num_words = 0
self.num_chars = 0
def reset_scores(self):
self.train_wer = 0
self.train_cer = 0
self.num_words = 0
self.num_chars = 0
def get_cer(self):
return 100. * self.train_cer / (self.num_chars or 1)
def get_wer(self):
return 100. * self.train_wer / (self.num_words or 1)
def train_batch(self, epoch, batch_id, data):
if args.use_phonemes:
(inputs,
targets,
filenames,
input_percentages,
target_sizes,
phoneme_targets,
phoneme_target_sizes) = data
elif args.denoise:
(inputs,
targets,
filenames,
input_percentages,
target_sizes,
mask_targets) = data
mask_targets = mask_targets.squeeze(1).to(device)
elif args.double_supervision:
(inputs,
targets, s2s_targets,
filenames, input_percentages,
target_sizes, s2s_target_sizes) = data
else:
inputs, targets, filenames, input_percentages, target_sizes = data
input_sizes = input_percentages.mul_(int(inputs.size(3))).int()
# measure data loading time
data_time.update(time.time() - self.end)
inputs = inputs.to(device)
input_sizes = input_sizes.to(device)
split_targets = []
offset = 0
for size in target_sizes:
split_targets.append(targets[offset:offset + size])
offset += size
if args.double_supervision:
split_s2s_targets = []
offset = 0
for size in s2s_target_sizes:
split_s2s_targets.append(s2s_targets[offset:offset + size])
offset += size
batch_size = inputs.size(0)
max_len = max(s2s_target_sizes)
# use CTC blank as pad token
# ctc blank has an index of zero
trg = torch.zeros(batch_size,
max_len)
assert len(s2s_target_sizes) == batch_size
for _, split_target in enumerate(split_s2s_targets):
trg[_,:s2s_target_sizes[_]] = split_target
trg = trg.long().to(device)
trg_teacher_forcing = trg[:, :-1]
trg_y = trg[:, 1:]
if args.use_attention:
batch_size = inputs.size(0)
max_len = max(target_sizes)
# use CTC blank as pad token
# ctc blank has an index of zero
trg = torch.zeros(batch_size,
max_len)
assert len(target_sizes) == batch_size
for _, split_target in enumerate(split_targets):
trg[_,:target_sizes[_]] = split_target
trg = trg.long().to(device)
trg_teacher_forcing = trg[:, :-1]
trg_y = trg[:, 1:]
if args.use_phonemes:
(logits, probs,
output_sizes,
phoneme_logits, phoneme_probs) = model(inputs, input_sizes)
elif args.denoise:
logits, probs, output_sizes, mask_logits = model(inputs, input_sizes)
elif args.use_attention:
logits, output_sizes = model(inputs,
lengths=input_sizes,
trg=trg_teacher_forcing)
# for our purposes they are the same
probs = logits
elif args.double_supervision:
ctc_logits, s2s_logits, output_sizes = model(inputs,
lengths=input_sizes,
trg=trg_teacher_forcing)
# s2s decoder is the final decoder
probs = s2s_logits
# (batch x sequence x channels) => (seqLength x batch x outputDim)
ctc_logits = ctc_logits.transpose(0, 1)
else:
logits, probs, output_sizes = model(inputs, input_sizes)
if args.double_supervision:
assert ctc_logits.is_cuda
assert s2s_logits.is_cuda
else:
assert logits.is_cuda
assert probs.is_cuda
assert output_sizes.is_cuda
decoded_output, _ = decoder.decode(probs, output_sizes,
use_attention=args.use_attention or args.double_supervision)
if args.double_supervision:
target_strings = decoder.convert_to_strings(split_s2s_targets)
else:
target_strings = decoder.convert_to_strings(split_targets)
for x in range(len(target_strings)):
transcript, reference = decoded_output[x][0], target_strings[x][0]
wer, cer, wer_ref, cer_ref = get_cer_wer(decoder, transcript, reference)
# accessing dict should be fast
times_used = train_dataset.curriculum[filenames[x]]['times_used']+1
train_dataset.update_curriculum(filenames[x],
reference, transcript,
None,
cer / cer_ref, wer / wer_ref,
times_used=times_used)
self.train_wer += wer
self.train_cer += cer
self.num_words += wer_ref
self.num_chars += cer_ref
if args.use_phonemes:
phoneme_logits = phoneme_logits.transpose(0, 1) # TxNxH
if not args.use_attention and not args.double_supervision:
logits = logits.transpose(0, 1) # TxNxH
if not args.double_supervision:
if torch.isnan(logits).any(): # and args.nan == 'zero':
# work around bad data
print("WARNING: Working around NaNs in data")
logits[torch.isnan(logits)] = 0
if args.use_phonemes:
# output_sizes should be the same
# for phoneme and non-phonemes
loss = criterion(logits,
targets,
output_sizes.cpu(),
target_sizes) + criterion(phoneme_logits,
phoneme_targets,
output_sizes.cpu(),
phoneme_target_sizes)
loss = loss / inputs.size(0) # average the loss by minibatch
loss = loss.to(device)
elif args.denoise:
ctc_loss = 0
"""
ctc_loss = criterion(logits,
targets,
output_sizes.cpu(),
target_sizes).to(device) / inputs.size(0)
"""
mask_loss = 50.0 * mask_criterion(mask_logits,
mask_targets).to(device)
if torch.isnan(mask_loss):
print('Nan loss detected')
return 102
loss = ctc_loss + mask_loss
inf = float("inf")
if args.distributed:
loss_value = reduce_tensor(loss, args.world_size).item()
else:
loss_value = loss.item() * args.gradient_accumulation_steps
ctc_loss_value = ctc_loss # .item()
if ctc_loss_value == inf or ctc_loss_value == -inf:
print("WARNING: received an inf CTC loss, setting loss value to 1000")
ctc_loss_value = 1000
loss_value = 1000
elif args.use_attention:
loss = criterion(logits.contiguous().view(-1,
logits.size(-1)),
trg_y.contiguous().view(-1))
loss = loss / sum(target_sizes) # average the loss by number of tokens
if args.gradient_accumulation_steps > 1: # average loss by accumulation steps
loss = loss / args.gradient_accumulation_steps
loss = loss.to(device)
elif args.double_supervision:
ctc_loss = ctc_criterion(ctc_logits,
targets,
output_sizes.cpu(),
target_sizes)
ctc_loss = ctc_loss / inputs.size(0) # average the loss by minibatch
ctc_loss = ctc_loss.to(device)
s2s_loss = s2s_criterion(s2s_logits.contiguous().view(-1,
s2s_logits.size(-1)),
trg_y.contiguous().view(-1))
# average the loss by number of tokens
# multiply by 10 for weight
s2s_loss = 10 * s2s_loss / sum(s2s_target_sizes)
s2s_loss = s2s_loss.to(device)
loss = ctc_loss + s2s_loss
inf = float("inf")
if args.distributed:
loss_value = reduce_tensor(loss, args.world_size).item()
else:
loss_value = loss.item() * args.gradient_accumulation_steps
ctc_loss_value = ctc_loss.item()
if ctc_loss_value == inf or ctc_loss_value == -inf:
print("WARNING: received an inf CTC loss, setting loss value to 1000")
ctc_loss_value = 1000
loss_value = 1000
else:
loss = criterion(logits, targets, output_sizes.cpu(), target_sizes)
loss = loss / inputs.size(0) # average the loss by minibatch
if args.gradient_accumulation_steps > 1: # average loss by accumulation steps
loss = loss / args.gradient_accumulation_steps
loss = loss.to(device)
if not args.denoise:
inf = float("inf")
if args.distributed:
loss_value = reduce_tensor(loss, args.world_size).item()
else:
loss_value = loss.item() * args.gradient_accumulation_steps
if loss_value == inf or loss_value == -inf:
print("WARNING: received an inf loss, setting loss value to 1000")
loss_value = 1000
loss_value = float(loss_value)
losses.update(loss_value, inputs.size(0))
if args.denoise:
mask_accuracy.update(mask_metric(mask_logits, mask_targets).item(),
inputs.size(0))
mask_losses.update(mask_loss.item(),
inputs.size(0))
ctc_losses.update(ctc_loss_value,
inputs.size(0))
elif args.double_supervision:
ctc_losses.update(ctc_loss_value,
inputs.size(0))
s2s_losses.update(s2s_loss.item(),
inputs.size(0))
# update_curriculum
if (batch_id + 1) % args.gradient_accumulation_steps == 0:
# compute gradient
optimizer.zero_grad()
loss.backward()
# try just lr reduction
# instead of gradient clipping
lr_clipping = False
# spare time by doing clipping
# only once each N epochs
if args.max_norm > 0:
if epoch < args.norm_warmup_epochs:
if lr_clipping:
raise ValueError('LEGACY')
clip_coef = calc_grad_norm(model.parameters(),
args.max_norm)
underlying_lr = get_lr()
set_lr(underlying_lr * clip_coef)
else:
clip_grad_norm_(model.parameters(),
args.max_norm)
else:
raise ValueError('LEGACY')
# clip only when gradients explode
if loss_value == inf or loss_value == -inf:
clip_grad_norm_(model.parameters(),
args.max_norm)
# if torch.isnan(logits).any():
# # work around bad data
# print("WARNING: Skipping NaNs in backward step")
# SGD step
optimizer.step()
if lr_clipping:
set_lr(underlying_lr)
if args.enorm:
enorm.step()
# measure elapsed time
batch_time.update(time.time() - self.end)
if not args.silent:
if args.denoise:
print('GPU-{0} Epoch {1} [{2}/{3}]\t'
'Time {batch_time.val:.2f} ({batch_time.avg:.2f})\t'
'Data {data_time.val:.2f} ({data_time.avg:.2f})\t'
'Loss {loss.val:.2f} ({loss.avg:.2f})\t'
'CTC Loss {ctc_losses.val:.2f} ({ctc_losses.avg:.2f})\t'
'Mask Loss {mask_losses.val:.2f} ({mask_losses.avg:.2f})\t'
'Mask {mask_accuracy.val:.2f} ({mask_accuracy.avg:.2f})\t'.format(
args.gpu_rank or VISIBLE_DEVICES[0],
epoch + 1, batch_id + 1, len(train_sampler),
batch_time=batch_time, data_time=data_time, loss=losses,
mask_losses=mask_losses, ctc_losses=ctc_losses,
mask_accuracy=mask_accuracy))
elif args.double_supervision:
print('GPU-{0} Epoch {1} [{2}/{3}]\t'
'Time {batch_time.val:.2f} ({batch_time.avg:.2f})\t'
'Data {data_time.val:.2f} ({data_time.avg:.2f})\t'
'Loss {loss.val:.2f} ({loss.avg:.2f})\t'
'CTC Loss {ctc_losses.val:.2f} ({ctc_losses.avg:.2f})\t'
'S2S Loss {s2s_losses.val:.2f} ({s2s_losses.avg:.2f})\t'.format(
args.gpu_rank or VISIBLE_DEVICES[0],
epoch + 1, batch_id + 1, len(train_sampler),
batch_time=batch_time, data_time=data_time, loss=losses,
ctc_losses=ctc_losses, s2s_losses=s2s_losses))
else:
print('GPU-{0} Epoch {1} [{2}/{3}]\t'
'Time {batch_time.val:.2f} ({batch_time.avg:.2f})\t'
'Data {data_time.val:.2f} ({data_time.avg:.2f})\t'
'Loss {loss.val:.2f} ({loss.avg:.2f})\t'.format(
args.gpu_rank or VISIBLE_DEVICES[0],
epoch + 1, batch_id + 1, len(train_sampler),
batch_time=batch_time, data_time=data_time, loss=losses))
if args.double_supervision:
del inputs, targets, input_percentages, input_sizes
del probs, output_sizes, target_sizes, loss, ctc_loss, s2s_loss
del s2s_targets, s2s_target_sizes
del ctc_logits, s2s_logits
else:
del inputs, targets, input_percentages, input_sizes
del logits, probs, output_sizes, target_sizes, loss
return loss_value
def init_train_set(epoch, from_iter):
#train_dataset.set_curriculum_epoch(epoch, sample=True)
train_dataset.set_curriculum_epoch(epoch,
sample=args.use_curriculum,
sample_size=args.curriculum_ratio,
cl_point=args.cl_point)
global train_loader, train_sampler
if not args.distributed:
if args.batch_similar_lens:
print('Using BucketingLenSampler')
train_sampler = BucketingLenSampler(train_dataset, batch_size=args.batch_size)
else:
train_sampler = BucketingSampler(train_dataset, batch_size=args.batch_size)
train_sampler.bins = train_sampler.bins[from_iter:]
else:
train_sampler = DistributedBucketingSampler(train_dataset,
batch_size=args.batch_size,
num_replicas=args.world_size,
rank=args.rank)
train_loader = AudioDataLoader(train_dataset,
num_workers=args.num_workers,
batch_sampler=train_sampler,
pin_memory=True)
if (not args.no_shuffle and epoch != 0) or args.no_sorta_grad:
print("Shuffling batches for the following epochs")
train_sampler.shuffle(epoch)
def train(from_epoch, from_iter, from_checkpoint):
print('Starting training with id="{}" at GPU="{}" with lr={}'.format(args.id, args.gpu_rank or VISIBLE_DEVICES[0],
get_lr()))
checkpoint_per_batch = 1+(args.checkpoint_per_samples-1) // args.batch_size if args.checkpoint_per_samples > 0 else 0
trainer = Trainer()
checkpoint = from_checkpoint
best_score = None
for epoch in range(from_epoch, args.epochs):
init_train_set(epoch, from_iter=from_iter)
trainer.reset_scores()
total_loss = 0
num_losses = 1
model.train()
trainer.end = time.time()
start_epoch_time = time.time()
for i, data in enumerate(train_loader, start=from_iter):
if i >= len(train_sampler) + start_iter:
break
total_loss += trainer.train_batch(epoch, i, data)
num_losses += 1
if (i + 1) % 50 == 0:
# deal with GPU memory fragmentation
gc.collect()
torch.cuda.empty_cache()
if checkpoint_per_batch > 0 and is_leader:
if (i + 1) % checkpoint_per_batch == 0:
file_path = '%s/checkpoint_%04d_epoch_%02d_iter_%05d.model' % (save_folder, checkpoint + 1, epoch + 1, i + 1)
print("Saving checkpoint model to %s" % file_path)
if args.use_lookahead:
_optimizer = optimizer.optimizer
else:
_optimizer = optimizer
torch.save(DeepSpeech.serialize(model, optimizer=_optimizer, epoch=epoch,
iteration=i,
loss_results=plots.loss_results,
wer_results=plots.wer_results,
cer_results=plots.cer_results,
checkpoint=checkpoint,
checkpoint_loss_results=checkpoint_plots.loss_results,
checkpoint_wer_results=checkpoint_plots.wer_results,
checkpoint_cer_results=checkpoint_plots.cer_results,
trainval_checkpoint_loss_results=trainval_checkpoint_plots.loss_results,
trainval_checkpoint_wer_results=trainval_checkpoint_plots.wer_results,
trainval_checkpoint_cer_results=trainval_checkpoint_plots.cer_results,
avg_loss=total_loss / num_losses), file_path)
train_dataset.save_curriculum(file_path + '.csv')
del _optimizer
check_model_quality(epoch, checkpoint, total_loss / num_losses, trainer.get_cer(), trainer.get_wer())
save_validation_curriculums(save_folder, checkpoint + 1, epoch + 1, i + 1)
checkpoint += 1
gc.collect()
torch.cuda.empty_cache()
model.train()
if args.checkpoint_anneal != 1:
print("Checkpoint:", checkpoint)
set_lr(get_lr() / args.checkpoint_anneal)
trainer.end = time.time()
epoch_time = time.time() - start_epoch_time
print('Training Summary Epoch: [{0}]\t'
'Time taken (s): {epoch_time:.0f}\t'
'Average Loss {loss:.3f}\t'.format(epoch + 1, epoch_time=epoch_time, loss=total_loss / num_losses))
from_iter = 0 # Reset start iteration for next epoch
if trainer.num_chars == 0:
continue
wer_avg, cer_avg = check_model_quality(epoch, checkpoint, total_loss / num_losses, trainer.get_cer(), trainer.get_wer())
new_score = wer_avg + cer_avg
checkpoint += 1
if args.checkpoint and is_leader: # checkpoint after the end of each epoch
file_path = '%s/model_checkpoint_%04d_epoch_%02d.model' % (save_folder, checkpoint+1, epoch + 1)
if args.use_lookahead:
_optimizer = optimizer.optimizer
else:
_optimizer = optimizer
torch.save(DeepSpeech.serialize(model,
optimizer=_optimizer,
epoch=epoch,
loss_results=plots.loss_results,
wer_results=plots.wer_results,
cer_results=plots.cer_results,
checkpoint=checkpoint,
checkpoint_loss_results=checkpoint_plots.loss_results,
checkpoint_wer_results=checkpoint_plots.wer_results,
checkpoint_cer_results=checkpoint_plots.cer_results,
trainval_checkpoint_loss_results=trainval_checkpoint_plots.loss_results,
trainval_checkpoint_wer_results=trainval_checkpoint_plots.wer_results,
trainval_checkpoint_cer_results=trainval_checkpoint_plots.cer_results,
), file_path)
train_dataset.save_curriculum(file_path + '.csv')
save_validation_curriculums(save_folder, checkpoint + 1, epoch + 1, 0)
del _optimizer
# anneal lr
print("Checkpoint:", checkpoint)
set_lr(get_lr() / args.learning_anneal)
if (best_score is None or new_score < best_score) and is_leader:
print("Found better validated model, saving to %s" % args.model_path)
if args.use_lookahead:
_optimizer = optimizer.optimizer
else:
_optimizer = optimizer
torch.save(DeepSpeech.serialize(model,
optimizer=_optimizer,
epoch=epoch,
loss_results=plots.loss_results,
wer_results=plots.wer_results,
cer_results=plots.cer_results,
checkpoint=checkpoint,
checkpoint_loss_results=checkpoint_plots.loss_results,
checkpoint_wer_results=checkpoint_plots.wer_results,
checkpoint_cer_results=checkpoint_plots.cer_results,
trainval_checkpoint_loss_results=trainval_checkpoint_plots.loss_results,
trainval_checkpoint_wer_results=trainval_checkpoint_plots.wer_results,
trainval_checkpoint_cer_results=trainval_checkpoint_plots.cer_results,
),
args.model_path)
train_dataset.save_curriculum(args.model_path + '.csv')
del _optimizer
best_score = new_score
if __name__ == '__main__':
args = parser.parse_args()
assert args.use_phonemes + args.denoise + args.grapheme_phoneme < 2
assert args.double_supervision + args.use_attention < 2
# ัะฟััะบะฐ, ั ะธะดะธะพั, ัะฑะตะนัะต ะผะตะฝั ะบัะพ-ะฝะธะฑัะดั
if args.use_phonemes:
from data.data_loader_aug import AudioDataLoaderPhoneme as AudioDataLoader
elif args.denoise:
from data.data_loader_aug import AudioDataLoaderDenoise as AudioDataLoader
elif args.double_supervision:
from data.data_loader_aug import AudioDataLoaderDouble as AudioDataLoader
else:
from data.data_loader_aug import AudioDataLoader
if args.double_supervision:
from data.data_loader_aug import AudioDataLoader as AudioDataLoaderVal
else:
AudioDataLoaderVal = AudioDataLoader
args.distributed = args.world_size > 1
args.model_path = os.path.join(args.save_folder, 'best.model')
is_leader = True
device = torch.device("cuda" if args.cuda else "cpu")
if args.distributed:
if args.gpu_rank:
torch.cuda.set_device(int(args.gpu_rank))
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
is_leader = args.rank == 0 # Only the first proc should save models
save_folder = args.save_folder
os.makedirs(save_folder, exist_ok=True)
plots = PlotWindow(args.id, 'train_loss_epochs', log_y=True)
checkpoint_plots = PlotWindow(args.id, 'test_loss_checks', log_y=True)
if args.train_val_manifest != '':
trainval_checkpoint_plots = PlotWindow(args.id, 'val_loss_checks', log_y=True)
else:
# set all properties to None for easy backwards compatibility
trainval_checkpoint_plots = t = type('test', (object,), {})()
trainval_checkpoint_plots.loss_results = None
trainval_checkpoint_plots.wer_results = None
trainval_checkpoint_plots.cer_results = None
lr_plots = LRPlotWindow(args.id, 'lr_finder', log_x=True)
total_avg_loss, start_epoch, start_iter, start_checkpoint = 0, 0, 0, 0
if args.use_phonemes:
with open(args.phonemes_path) as phoneme_file:
phoneme_map = {l: i for i, l
in enumerate(json.load(phoneme_file))}
if args.continue_from: # Starting from previous model
print("Loading checkpoint model %s" % args.continue_from)
package = torch.load(args.continue_from, map_location=lambda storage, loc: storage)
# package['dropout']=0.2
model = DeepSpeech.load_model_package(package)
# start with non-phoneme model, continue with phonemes
labels = DeepSpeech.get_labels(model)
audio_conf = DeepSpeech.get_audio_conf(model)
# in case you need to resume and change audio conf manually
"""
audio_conf = dict(sample_rate=args.sample_rate,
window_size=args.window_size,
window_stride=args.window_stride,
window=args.window,
noise_dir=args.noise_dir,
noise_prob=args.noise_prob,
noise_levels=(args.noise_min, args.noise_max),
aug_prob_8khz=args.aug_prob_8khz,
aug_prob_spect=args.aug_prob_spect)
if args.use_phonemes:
audio_conf['phoneme_count'] = len(phoneme_map)
audio_conf['phoneme_map'] = phoneme_map
"""
if args.use_phonemes and package.get('phoneme_count', 0) == 0:
model = DeepSpeech.add_phonemes_to_model(model,
len(phoneme_map))
audio_conf['phoneme_count'] = len(phoneme_map)
audio_conf['phoneme_map'] = phoneme_map
model.phoneme_count = len(phoneme_map)
if args.denoise and package.get('denoise', False) == False:
model = DeepSpeech.add_denoising_to_model(model)
print('Model transformed to a denoising one')
audio_conf['denoise'] = True
audio_conf['noise_prob'] = args.noise_prob
audio_conf['aug_type'] = args.aug_type
audio_conf['pytorch_stft'] = True
print('Changed audio conf params')
if args.use_attention:
if args.use_bpe:
from data.bpe_labels import Labels as BPELabels
labels = BPELabels(sp_model=args.sp_model,
use_phonemes=False,
s2s_decoder=args.use_attention)
# list instead of string
labels = labels.label_list
model = DeepSpeech.add_s2s_decoder_to_model(model,
labels=labels)
print('Model transformed to a model with full s2s decoder')
# REMOVE LATER
# audio_conf['noise_dir'] = '../data/augs/*.wav'
# audio_conf['noise_prob'] = 0.1
if args.double_supervision or 'transformer' in args.rnn_type:
optimizer = build_optimizer(args,
model=model)
else:
parameters = model.parameters()
optimizer = build_optimizer(args,
parameters_=parameters)
if not args.finetune: # Don't want to restart training
model = model.to(device)
# when adding phonemes, optimizer state is not full
try:
optimizer.load_state_dict(package['optim_dict'])
# set_lr(args.lr)
print('Current LR {}'.format(
optimizer.state_dict()['param_groups'][0]['lr']
))
except:
if args.double_supervision or 'transformer' in args.rnn_type or args.grapheme_phoneme:
optim_state = package['optim_dict'][0]
lr = optim_state['param_groups'][0]['lr']
print('Just setting the SGD LR {}'.format(lr))
set_lr(lr)
else:
print('Just changing the LR in the optimizer')
# set_lr(package['optim_dict']['param_groups'][0]['lr'])
set_lr(args.lr)
start_epoch = int(package.get('epoch', 1)) - 1 # Index start at 0 for training
start_iter = package.get('iteration', None)
start_checkpoint = package.get('checkpoint', 0) or 0
if start_iter is None:
start_epoch += 1 # We saved model after epoch finished, start at the next epoch.
start_iter = 0
else:
start_iter += 1
total_avg_loss = int(package.get('avg_loss', 0))
plots.loss_results = package['loss_results']
plots.cer_results = package['cer_results']
plots.wer_results = package['wer_results']
if package.get('checkpoint_cer_results') is not None:
checkpoint_plots.loss_results = package.get('checkpoint_loss_results', torch.Tensor(10000))
checkpoint_plots.cer_results = package.get('checkpoint_cer_results', torch.Tensor(10000))
checkpoint_plots.wer_results = package.get('checkpoint_wer_results', torch.Tensor(10000))
if package['cer_results'] is not None and start_epoch > 0:
plots.plot_history(start_epoch)
if package.get('checkpoint_cer_results') is not None and start_checkpoint > 0:
checkpoint_plots.plot_history(start_checkpoint)
if args.train_val_manifest != '':
if package.get('trainval_checkpoint_cer_results') is not None:
trainval_checkpoint_plots.loss_results = package.get('trainval_checkpoint_loss_results', torch.Tensor(10000))
trainval_checkpoint_plots.cer_results = package.get('trainval_checkpoint_cer_results', torch.Tensor(10000))
trainval_checkpoint_plots.wer_results = package.get('trainval_checkpoint_wer_results', torch.Tensor(10000))
if package.get('trainval_checkpoint_cer_results') is not None and start_checkpoint > 0:
trainval_checkpoint_plots.plot_history(start_checkpoint)
else:
if args.use_bpe:
from data.bpe_labels import Labels as BPELabels
labels = BPELabels(sp_model=args.sp_model,
use_phonemes=args.phonemes_only,
s2s_decoder=args.use_attention or args.double_supervision,
double_supervision=False,
naive_split=args.naive_split,
omit_spaces=args.omit_spaces,
subword_regularization=args.subword_regularization)
# list instead of string
labels = labels.label_list
# in case of double supervision just use the longer
# i.e. s2s = blank(pad) + base_num + space + eos + sos
# ctc = blank(pad) + base_num + space + 2
# len(ctc) = len(s2s) - 1
else:
with open(args.labels_path) as label_file:
# labels is a string
labels = str(''.join(json.load(label_file)))
assert args.pytorch_stft != args.pytorch_mel
audio_conf = dict(sample_rate=args.sample_rate,
window_size=args.window_size,
window_stride=args.window_stride,
window=args.window,
noise_dir=args.noise_dir,
noise_prob=args.noise_prob,
noise_levels=(args.noise_min, args.noise_max),
aug_prob_8khz=args.aug_prob_8khz,
aug_prob_spect=args.aug_prob_spect,
use_bpe=args.use_bpe,
sp_model=args.sp_model,
aug_type=args.aug_type,
pytorch_mel=args.pytorch_mel,
pytorch_stft=args.pytorch_stft,
denoise=args.denoise)
if args.use_phonemes:
audio_conf['phoneme_count'] = len(phoneme_map)
audio_conf['phoneme_map'] = phoneme_map
rnn_type = args.rnn_type.lower()
assert rnn_type in supported_rnns, "rnn_type should be either lstm, rnn or gru"
model = DeepSpeech(rnn_hidden_size=args.hidden_size,
cnn_width=args.cnn_width,
nb_layers=args.hidden_layers,
labels=labels,
rnn_type=rnn_type,
audio_conf=audio_conf,
bidirectional=args.bidirectional,
bnm=args.batch_norm_momentum,
dropout=args.dropout,
phoneme_count=len(phoneme_map) if args.use_phonemes else 0,
decoder_layers=args.decoder_layers,
kernel_size=args.kernel_size,
decoder_girth=args.decoder_girth)
if args.use_lookahead:
model = model.to(device)
if args.double_supervision or 'transformer' in args.rnn_type:
optimizer = build_optimizer(args,
model=model)
else:
parameters = model.parameters()
optimizer = build_optimizer(args,
parameters_=parameters)
# enorm = ENorm(model.named_parameters(), optimizer, c=1)
if args.use_attention:
criterion = torch.nn.NLLLoss(reduction='sum',
ignore_index=0) # use ctc blank token as pad token
elif args.double_supervision:
ctc_criterion = CTCLoss()
s2s_criterion = torch.nn.NLLLoss(reduction='sum',
ignore_index=0) # use ctc blank token as pad token
else:
criterion = CTCLoss()
if args.denoise:
mask_criterion = SemsegLoss(bce_weight=1.0,
dice_weight=0.0,
mse_weight=0.0)
mask_metric = MaskSimilarity(thresholds=[0.05, 0.1, 0.15])
# if double supervision used, s2s head is the last one
# and actually partakes in the decoding
decoder = GreedyDecoder(labels,
cut_after_eos_token=args.use_attention or args.double_supervision,
eos_token=']')
print('Label length {}'.format(len(labels)))
print(labels)
print('Audio conf')
print(audio_conf)
train_dataset = SpectrogramDataset(audio_conf=audio_conf, cache_path=args.cache_dir,
manifest_filepath=args.train_manifest,
labels=labels, normalize=args.norm, augment=args.augment,
curriculum_filepath=args.curriculum,
use_attention=args.use_attention,
double_supervision=args.double_supervision,
naive_split=args.naive_split,
phonemes_only=args.phonemes_only,
omit_spaces=args.omit_spaces,
subword_regularization=args.subword_regularization)
test_audio_conf = {**audio_conf,
'noise_prob': 0,
'aug_prob_8khz':0,
'aug_prob_spect':0,
'phoneme_count':0,
'phoneme_map':None}
print('Test audio conf')
print(test_audio_conf)
# no augs on test
# on test, even in case of double supervision
# we just need s2s data to validate
test_dataset = SpectrogramDataset(audio_conf=test_audio_conf,
cache_path=args.cache_dir,
manifest_filepath=args.val_manifest,
labels=labels, normalize=args.norm, augment=False,
use_attention=args.use_attention or args.double_supervision,
double_supervision=False,
naive_split=args.naive_split,
phonemes_only=args.phonemes_only,
omit_spaces=args.omit_spaces,
subword_regularization=False) # turn off augs on val
# if file is specified
# separate train validation wo domain shift
# also wo augs
# on test, even in case of double supervision
# we just need s2s data to validate
if args.train_val_manifest != '':
trainval_dataset = SpectrogramDataset(audio_conf=test_audio_conf,
cache_path=args.cache_dir,
manifest_filepath=args.train_val_manifest,
labels=labels, normalize=args.norm, augment=False,
use_attention=args.use_attention or args.double_supervision,
double_supervision=False,
naive_split=args.naive_split,
phonemes_only=args.phonemes_only,
omit_spaces=args.omit_spaces,
subword_regularization=False) # turn off augs on val
if args.reverse_sort:
# XXX: A hack to test max memory load.
train_dataset.ids.reverse()
test_loader = AudioDataLoaderVal(test_dataset,
batch_size=args.val_batch_size,
num_workers=args.num_workers)
if args.train_val_manifest != '':
trainval_loader = AudioDataLoaderVal(trainval_dataset,
batch_size=args.val_batch_size,
num_workers=args.num_workers)
if not args.use_lookahead:
model = model.to(device)
if args.distributed:
device_id = [int(args.gpu_rank)] if args.rank else None
model = torch.nn.parallel.DistributedDataParallel(model,
device_ids=device_id)
elif args.data_parallel:
model = torch.nn.DataParallel(model).to(device)
print('Using DP')
print(model)
print("Number of parameters: %d" % DeepSpeech.get_param_size(model))
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
if args.denoise:
mask_accuracy = AverageMeter()
mask_losses = AverageMeter()
ctc_losses = AverageMeter()
if args.double_supervision:
ctc_losses = AverageMeter()
s2s_losses = AverageMeter()
train(start_epoch, start_iter, start_checkpoint)
| [] | [] | [
"CUDA_VISIBLE_DEVICES"
] | [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
uncoverml/__init__.py | import os
import pkg_resources
import uncoverml
__author__ = 'Geoscience Australia, Mineral Systems Branch, ' \
'NICTA Spatial Inference Systems Team (now Data 61)'
__email__ = 'daniel.steinberg@nicta.com.au, basaks@gmail.com, brenainn.moushall@ga.gov.au'
__version__ = pkg_resources.get_distribution('uncover-ml').version
# Turn off MPI warning about network interface
os.environ['OMPI_MCA_btl_base_warn_component_unused'] = '0'
| [] | [] | [
"OMPI_MCA_btl_base_warn_component_unused"
] | [] | ["OMPI_MCA_btl_base_warn_component_unused"] | python | 1 | 0 | |
qa/rpc-tests/test_framework/util.py | # Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections, wait=1):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(wait)
def sync_mempools(rpc_connections, wait=1):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(wait)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "lambocoin.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
bitcoind and bitcoin-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run bitcoinds:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("LAMBOCOIND", "lambocoind"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: lambocoind started, calling lambocoin-cli -rpcwait getblockcount"
subprocess.check_call([ os.getenv("LAMBOCOINCLI", "lambocoin-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: lambocoin-cli -rpcwait getblockcount completed"
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:rt@127.0.0.1:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(15):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in bitcoin.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a bitcoind and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("LAMBOCOIND", "lambocoind")
args = [ binary, "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: lambocoind started, calling lambocoin-cli -rpcwait getblockcount"
subprocess.check_call([ os.getenv("LAMBOCOINCLI", "lambocoin-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: calling lambocoin-cli -rpcwait getblockcount returned"
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
if timewait is not None:
proxy = AuthServiceProxy(url, timeout=timewait)
else:
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None):
"""
Start multiple bitcoinds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
if binary is None: binary = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
| [] | [] | [
"PYTHON_DEBUG",
"LAMBOCOINCLI",
"LAMBOCOIND"
] | [] | ["PYTHON_DEBUG", "LAMBOCOINCLI", "LAMBOCOIND"] | python | 3 | 0 | |
kicost/distributors/api_octopart.py | # MIT license
#
# Copyright (C) 2018 by XESS Corporation / Max Maisel / Hildo Guillardi Jรบnior
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Libraries.
import json
import requests
import tqdm
import re
import os
import sys
from collections import Counter
if sys.version_info[0] < 3:
from urllib import quote_plus
else:
from urllib.parse import quote_plus
# KiCost definitions.
from ..global_vars import DEBUG_OVERVIEW
# Distributors definitions.
from .distributor import distributor_class
# Author information.
__author__ = 'XESS Corporation'
__webpage__ = 'info@xess.com'
# Python2/3 compatibility.
# from __future__ import (unicode_literals, print_function, division, absolute_import
from future import standard_library
standard_library.install_aliases()
OCTOPART_MAX_PARTBYQUERY = 20 # Maximum part list length to one single query.
__all__ = ['api_octopart']
class api_octopart(distributor_class):
name = 'Octopart'
type = 'api'
enabled = False
url = 'https://octopart.com/' # Web site API information.
API_KEY = None
API_DISTRIBUTORS = ['arrow', 'digikey', 'farnell', 'mouser', 'newark', 'rs', 'tme']
DIST_TRANSLATION = { # Distributor translation.
'arrow': 'Arrow Electronics, Inc.',
'digikey': 'Digi-Key',
'farnel': 'Farnell',
'mouser': 'Mouser',
'newark': 'Newark',
'rs': 'RS Components',
'tme': 'TME',
'lcsc': 'LCSC',
}
@staticmethod
def init_dist_dict():
if api_octopart.enabled:
distributor_class.add_distributors(api_octopart.API_DISTRIBUTORS)
def query(query):
"""Send query to Octopart and return results."""
# url = 'http://octopart.com/api/v3/parts/match'
# payload = {'queries': json.dumps(query), 'include\[\]': 'specs', 'apikey': token}
# response = requests.get(url, params=payload)
if api_octopart.API_KEY:
url = 'http://octopart.com/api/v3/parts/match'
data = 'queries=%s' % json.dumps(query)
data += '&apikey=' + api_octopart.API_KEY
else: # Not working 2021/04/28:
url = 'https://temp-octopart-proxy.kitspace.org/parts/match'
data = 'queries=%s' % json.dumps(query)
data += '&include[]=specs'
data += '&include[]=datasheets'
distributor_class.log_request(url, data)
response = requests.get(url + '?' + data)
distributor_class.log_response(response.text)
if response.status_code == requests.codes['ok']:
results = json.loads(response.text).get('results')
return results
elif response.status_code == requests.codes['not_found']: # 404
raise Exception('Octopart server not found.')
elif response.status_code == 403 or 'Invalid API key' in response.text:
raise Exception('Octopart KEY invalid, registre one at "https://www.octopart.com".')
else:
raise Exception('Octopart error: ' + str(response.status_code))
def sku_to_mpn(sku):
"""Find manufacturer part number associated with a distributor SKU."""
part_query = [{'reference': 1, 'sku': quote_plus(sku)}]
results = api_octopart.query(part_query)
if not results:
return None
result = results[0]
mpns = [item['mpn'] for item in result['items']]
if not mpns:
return None
if len(mpns) == 1:
return mpns[0]
mpn_cnts = Counter(mpns)
return mpn_cnts.most_common(1)[0][0] # Return the most common MPN.
def skus_to_mpns(parts, distributors):
"""Find manufaturer's part number for all parts with just distributor SKUs."""
for i, part in enumerate(parts):
# Skip parts that already have a manufacturer's part number.
if part.fields.get('manf#'):
continue
# Get all the SKUs for this part.
skus = list(
set([part.fields.get(dist + '#', '') for dist in distributors]))
skus = [sku for sku in skus
if sku not in ('', None)] # Remove null SKUs.
# Skip this part if there are no SKUs.
if not skus:
continue
# Convert the SKUs to manf. part numbers.
mpns = [api_octopart.sku_to_mpn(sku) for sku in skus]
mpns = [mpn for mpn in mpns
if mpn not in ('', None)] # Remove null manf#.
# Skip assigning manf. part number to this part if there aren't any to assign.
if not mpns:
continue
# Assign the most common manf. part number to this part.
mpn_cnts = Counter(mpns)
part.fields['manf#'] = mpn_cnts.most_common(1)[0][0]
def query_part_info(parts, distributors, currency):
"""Fill-in the parts with price/qty/etc info from Octopart."""
distributor_class.logger.log(DEBUG_OVERVIEW, '# Getting part data from Octopart...')
# Setup progress bar to track progress of Octopart queries.
progress = tqdm.tqdm(desc='Progress', total=len(parts), unit='part', miniters=1)
# Translate from Octopart distributor names to the names used internally by kicost.
dist_xlate = api_octopart.DIST_TRANSLATION
def get_part_info(query, parts, currency='USD'):
"""Query Octopart for quantity/price info and place it into the parts list."""
results = api_octopart.query(query)
# Loop through the response to the query and enter info into the parts list.
for result in results:
i = int(result['reference']) # Get the index into the part dict.
part = parts[i]
# Loop through the offers from various dists for this particular part.
for item in result['items']:
# Assign the lifecycle status 'obsolete' (others possible: 'active'
# and 'not recommended for new designs') but not used.
if 'lifecycle_status' in item['specs']:
lifecycle_status = item['specs']['lifecycle_status']['value'][0].lower()
if lifecycle_status == 'obsolete':
part.lifecycle = lifecycle_status
# Take the datasheet provided by the distributor. This will by used
# in the output spreadsheet if not provide any in the BOM/schematic.
# This will be signed in the file.
if item['datasheets']:
part.datasheet = item['datasheets'][0]['url']
for offer in item['offers']:
# Get the distributor who made the offer and add their
# price/qty info to the parts list if its one of the accepted distributors.
dist = dist_xlate.get(offer['seller']['name'], '')
if dist in distributors:
# Get pricing information from this distributor.
try:
price_tiers = {} # Empty dict in case of exception.
dist_currency = list(offer['prices'].keys())
parts.currency[dist] = dist_currency[0]
price_tiers = {qty: float(price) for qty, price in list(offer['prices'].values())[0]}
# Combine price lists for multiple offers from the same distributor
# to build a complete list of cut-tape and reeled components.
if dist not in part.price_tiers:
part.price_tiers[dist] = {}
part.price_tiers[dist].update(price_tiers)
except Exception:
pass # Price list is probably missing so leave empty default dict in place.
# Compute the quantity increment between the lowest two prices.
# This will be used to distinguish the cut-tape from the reeled components.
try:
part_break_qtys = sorted(price_tiers.keys())
part_qty_increment = part_break_qtys[1] - part_break_qtys[0]
except Exception:
# This will happen if there are not enough entries in the price/qty list.
# As a stop-gap measure, just assign infinity to the part increment.
# A better alternative may be to examine the packaging field of the offer.
part_qty_increment = float("inf")
# Select the part SKU, web page, and available quantity.
# Each distributor can have different stock codes for the same part in different
# quantities / delivery package styles: cut-tape, reel, ...
# Therefore we select and overwrite a previous selection if one of the
# following conditions is met:
# 1. We don't have a selection for this part from this distributor yet.
# 2. The MOQ is smaller than for the current selection.
# 3. The part_qty_increment for this offer smaller than that of the existing selection.
# (we prefer cut-tape style packaging over reels)
# 4. For DigiKey, we can't use part_qty_increment to distinguish between
# reel and cut-tape, so we need to look at the actual DigiKey part number.
# This procedure is made by the definition `distributors_info[dist]['ignore_cat#_re']`
# at the distributor profile.
if not part.part_num.get(dist):
qty_avail = part.qty_avail.get(dist)
if not qty_avail or (offer.get('in_stock_quantity') and qty_avail < offer.get('in_stock_quantity')):
# Keeps the information of more availability.
part.qty_avail[dist] = offer.get('in_stock_quantity')
moq = part.moq.get(dist)
if not moq or (offer.get('moq') and moq > offer.get('moq')):
# Save the link, stock code, ... of the page for minimum purchase.
part.moq[dist] = offer.get('moq') # Minimum order qty.
part.part_num[dist] = offer.get('sku')
part.url[dist] = offer.get('product_url')
part.qty_increment[dist] = part_qty_increment
# Otherwise, check qty increment and see if its the smallest for this part & dist.
elif part_qty_increment < part.qty_increment.get(dist):
# This part looks more like a cut-tape version, so
# update the SKU, web page, and available quantity.
qty_avail = part.qty_avail.get(dist)
if not qty_avail or (offer.get('in_stock_quantity') and qty_avail < offer.get('in_stock_quantity')):
# Keeps the information of more availability.
part.qty_avail[dist] = offer.get('in_stock_quantity')
ign_stock_code = distributor_class.get_distributor_info(dist).ignore_cat
# TODO dist_part_num wan't defined, I copied it from KitSpace API
dist_part_num = offer.get('sku', '').get('part', '')
valid_part = not (ign_stock_code and re.match(ign_stock_code, dist_part_num))
if valid_part and \
(not part.part_num.get(dist) or
(part_qty_increment < part.qty_increment.get(dist)) or
(not part.moq.get(dist) or (offer.get('moq') and part.moq.get(dist) > offer.get('moq')))):
# Save the link, stock code, ... of the page for minimum purchase.
part.moq[dist] = offer.get('moq') # Minimum order qty.
part.part_num[dist] = offer.get('sku')
part.url[dist] = offer.get('product_url')
part.qty_increment[dist] = part_qty_increment
# Don't bother with any extra info from the distributor.
part.info_dist[dist] = {}
# Get the valid distributors names used by them part catalog
# that may be index by Octopart. This is used to remove the
# local distributors and future not implemented in the Octopart
# definition.
# Note: The user can use --exclude and define it with fields.
distributors_octopart = [d for d in distributors if distributor_class.get_distributor_info(d).is_web()
and d in api_octopart.API_DISTRIBUTORS]
# Break list of parts into smaller pieces and get price/quantities from Octopart.
octopart_query = []
prev_i = 0 # Used to record index where parts query occurs.
for i, part in enumerate(parts):
# Create an Octopart query using the manufacturer's part number or
# distributor SKU.
manf_code = part.fields.get('manf#')
if manf_code:
part_query = {'reference': i, 'mpn': quote_plus(manf_code)}
else:
try:
# No MPN, so use the first distributor SKU that's found.
# skus = [part.fields.get(d + '#', '') for d in distributors_octopart
# if part.fields.get(d + '#') ]
for octopart_dist_sku in distributors_octopart:
sku = part.fields.get(octopart_dist_sku + '#', '')
if sku:
break
# Create the part query using SKU matching.
part_query = {'reference': i, 'sku': quote_plus(sku)}
# Because was used the distributor (enrolled at Octopart list)
# despite the normal 'manf#' code, take the sub quantity as
# general sub quantity of the current part.
try:
part.fields['manf#_qty'] = part.fields[octopart_dist_sku + '#_qty']
distributor_class.logger.warning("Associated {q} quantity to '{r}' due \"{f}#={q}:{c}\".".format(
q=part.fields[octopart_dist_sku + '#_qty'], r=part.refs,
f=octopart_dist_sku, c=part.fields[octopart_dist_sku+'#']))
except KeyError:
pass
except IndexError:
# No MPN or SKU, so skip this part.
continue
# Add query for this part to the list of part queries.
octopart_query.append(part_query)
# Once there are enough (but not too many) part queries, make a query request to Octopart.
if len(octopart_query) == OCTOPART_MAX_PARTBYQUERY:
get_part_info(octopart_query, parts)
progress.update(i - prev_i) # Update progress bar.
prev_i = i
octopart_query = [] # Get ready for next batch.
# Query Octopart for the last batch of parts.
if octopart_query:
get_part_info(octopart_query, parts)
progress.update(len(parts)-prev_i) # This will indicate final progress of 100%.
# Done with the scraping progress bar so delete it or else we get an
# error when the program terminates.
del progress
key = os.environ.get('KICOST_OCTOPART_KEY_V3')
if key:
api_octopart.API_KEY = key
api_octopart.enabled = True
elif os.environ.get('KICOST_OCTOPART'):
api_octopart.enabled = True
distributor_class.register(api_octopart, 60)
| [] | [] | [
"KICOST_OCTOPART",
"KICOST_OCTOPART_KEY_V3"
] | [] | ["KICOST_OCTOPART", "KICOST_OCTOPART_KEY_V3"] | python | 2 | 0 | |
peer/common/common.go | /*
Copyright IBM Corp. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package common
import (
"context"
"fmt"
"io/ioutil"
"os"
"runtime"
"strings"
"time"
"github.com/hyperledger/fabric-gm/bccsp/factory"
"github.com/hyperledger/fabric-gm/common/channelconfig"
"github.com/hyperledger/fabric-gm/common/flogging"
"github.com/hyperledger/fabric-gm/common/viperutil"
"github.com/hyperledger/fabric-gm/core/comm"
"github.com/hyperledger/fabric-gm/core/config"
"github.com/hyperledger/fabric-gm/core/scc/cscc"
"github.com/hyperledger/fabric-gm/msp"
mspmgmt "github.com/hyperledger/fabric-gm/msp/mgmt"
"github.com/hyperledger/fabric-gm/peer/common/api"
pcommon "github.com/hyperledger/fabric-gm/protos/common"
pb "github.com/hyperledger/fabric-gm/protos/peer"
putils "github.com/hyperledger/fabric-gm/protos/utils"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/viper"
tls "github.com/tjfoc/gmtls"
)
// UndefinedParamValue defines what undefined parameters in the command line will initialise to
const UndefinedParamValue = ""
const CmdRoot = "core"
var mainLogger = flogging.MustGetLogger("main")
var logOutput = os.Stderr
var (
defaultConnTimeout = 3 * time.Second
// These function variables (xyzFnc) can be used to invoke corresponding xyz function
// this will allow the invoking packages to mock these functions in their unit test cases
// GetEndorserClientFnc is a function that returns a new endorser client connection
// to the provided peer address using the TLS root cert file,
// by default it is set to GetEndorserClient function
GetEndorserClientFnc func(address, tlsRootCertFile string) (pb.EndorserClient, error)
// GetPeerDeliverClientFnc is a function that returns a new deliver client connection
// to the provided peer address using the TLS root cert file,
// by default it is set to GetDeliverClient function
GetPeerDeliverClientFnc func(address, tlsRootCertFile string) (api.PeerDeliverClient, error)
// GetDeliverClientFnc is a function that returns a new deliver client connection
// to the provided peer address using the TLS root cert file,
// by default it is set to GetDeliverClient function
GetDeliverClientFnc func(address, tlsRootCertFile string) (pb.Deliver_DeliverClient, error)
// GetDefaultSignerFnc is a function that returns a default Signer(Default/PERR)
// by default it is set to GetDefaultSigner function
GetDefaultSignerFnc func() (msp.SigningIdentity, error)
// GetBroadcastClientFnc returns an instance of the BroadcastClient interface
// by default it is set to GetBroadcastClient function
GetBroadcastClientFnc func() (BroadcastClient, error)
// GetOrdererEndpointOfChainFnc returns orderer endpoints of given chain
// by default it is set to GetOrdererEndpointOfChain function
GetOrdererEndpointOfChainFnc func(chainID string, signer msp.SigningIdentity,
endorserClient pb.EndorserClient) ([]string, error)
// GetCertificateFnc is a function that returns the client TLS certificate
GetCertificateFnc func() (tls.Certificate, error)
)
type commonClient struct {
*comm.GRPCClient
address string
sn string
}
func init() {
GetEndorserClientFnc = GetEndorserClient
GetDefaultSignerFnc = GetDefaultSigner
GetBroadcastClientFnc = GetBroadcastClient
GetOrdererEndpointOfChainFnc = GetOrdererEndpointOfChain
GetDeliverClientFnc = GetDeliverClient
GetPeerDeliverClientFnc = GetPeerDeliverClient
GetCertificateFnc = GetCertificate
}
// InitConfig initializes viper config
func InitConfig(cmdRoot string) error {
err := config.InitViper(nil, cmdRoot)
if err != nil {
return err
}
err = viper.ReadInConfig() // Find and read the config file
if err != nil { // Handle errors reading the config file
// The version of Viper we use claims the config type isn't supported when in fact the file hasn't been found
// Display a more helpful message to avoid confusing the user.
if strings.Contains(fmt.Sprint(err), "Unsupported Config Type") {
return errors.New(fmt.Sprintf("Could not find config file. "+
"Please make sure that FABRIC_CFG_PATH is set to a path "+
"which contains %s.yaml", cmdRoot))
} else {
return errors.WithMessage(err, fmt.Sprintf("error when reading %s config file", cmdRoot))
}
}
return nil
}
// InitCrypto initializes crypto for this peer
func InitCrypto(mspMgrConfigDir, localMSPID, localMSPType string) error {
var err error
// Check whether msp folder exists
fi, err := os.Stat(mspMgrConfigDir)
if os.IsNotExist(err) || !fi.IsDir() {
// No need to try to load MSP from folder which is not available
return errors.Errorf("cannot init crypto, folder \"%s\" does not exist", mspMgrConfigDir)
}
// Check whether localMSPID exists
if localMSPID == "" {
return errors.New("the local MSP must have an ID")
}
// Init the BCCSP
SetBCCSPKeystorePath()
var bccspConfig *factory.FactoryOpts
err = viperutil.EnhancedExactUnmarshalKey("peer.BCCSP", &bccspConfig)
if err != nil {
return errors.WithMessage(err, "could not parse YAML config")
}
err = mspmgmt.LoadLocalMspWithType(mspMgrConfigDir, bccspConfig, localMSPID, localMSPType)
if err != nil {
return errors.WithMessage(err, fmt.Sprintf("error when setting up MSP of type %s from directory %s", localMSPType, mspMgrConfigDir))
}
return nil
}
// SetBCCSPKeystorePath sets the file keystore path for the SW BCCSP provider
// to an absolute path relative to the config file
func SetBCCSPKeystorePath() {
viper.Set("peer.BCCSP.SW.FileKeyStore.KeyStore",
config.GetPath("peer.BCCSP.SW.FileKeyStore.KeyStore"))
}
// GetDefaultSigner return a default Signer(Default/PERR) for cli
func GetDefaultSigner() (msp.SigningIdentity, error) {
signer, err := mspmgmt.GetLocalMSP().GetDefaultSigningIdentity()
if err != nil {
return nil, errors.WithMessage(err, "error obtaining the default signing identity")
}
return signer, err
}
// GetOrdererEndpointOfChain returns orderer endpoints of given chain
func GetOrdererEndpointOfChain(chainID string, signer msp.SigningIdentity, endorserClient pb.EndorserClient) ([]string, error) {
// query cscc for chain config block
invocation := &pb.ChaincodeInvocationSpec{
ChaincodeSpec: &pb.ChaincodeSpec{
Type: pb.ChaincodeSpec_Type(pb.ChaincodeSpec_Type_value["GOLANG"]),
ChaincodeId: &pb.ChaincodeID{Name: "cscc"},
Input: &pb.ChaincodeInput{Args: [][]byte{[]byte(cscc.GetConfigBlock), []byte(chainID)}},
},
}
creator, err := signer.Serialize()
if err != nil {
return nil, errors.WithMessage(err, fmt.Sprintf("error serializing identity for %s", signer.GetIdentifier()))
}
prop, _, err := putils.CreateProposalFromCIS(pcommon.HeaderType_CONFIG, "", invocation, creator)
if err != nil {
return nil, errors.WithMessage(err, "error creating GetConfigBlock proposal")
}
signedProp, err := putils.GetSignedProposal(prop, signer)
if err != nil {
return nil, errors.WithMessage(err, "error creating signed GetConfigBlock proposal")
}
proposalResp, err := endorserClient.ProcessProposal(context.Background(), signedProp)
if err != nil {
return nil, errors.WithMessage(err, "error endorsing GetConfigBlock")
}
if proposalResp == nil {
return nil, errors.WithMessage(err, "error nil proposal response")
}
if proposalResp.Response.Status != 0 && proposalResp.Response.Status != 200 {
return nil, errors.Errorf("error bad proposal response %d: %s", proposalResp.Response.Status, proposalResp.Response.Message)
}
// parse config block
block, err := putils.GetBlockFromBlockBytes(proposalResp.Response.Payload)
if err != nil {
return nil, errors.WithMessage(err, "error unmarshaling config block")
}
envelopeConfig, err := putils.ExtractEnvelope(block, 0)
if err != nil {
return nil, errors.WithMessage(err, "error extracting config block envelope")
}
bundle, err := channelconfig.NewBundleFromEnvelope(envelopeConfig)
if err != nil {
return nil, errors.WithMessage(err, "error loading config block")
}
return bundle.ChannelConfig().OrdererAddresses(), nil
}
// CheckLogLevel checks that a given log level string is valid
func CheckLogLevel(level string) error {
if !flogging.IsValidLevel(level) {
return errors.Errorf("invalid log level provided - %s", level)
}
return nil
}
func configFromEnv(prefix string) (address, override string, clientConfig comm.ClientConfig, err error) {
address = viper.GetString(prefix + ".address")
override = viper.GetString(prefix + ".tls.serverhostoverride")
clientConfig = comm.ClientConfig{}
connTimeout := viper.GetDuration(prefix + ".client.connTimeout")
if connTimeout == time.Duration(0) {
connTimeout = defaultConnTimeout
}
clientConfig.Timeout = connTimeout
secOpts := &comm.SecureOptions{
UseTLS: viper.GetBool(prefix + ".tls.enabled"),
RequireClientCert: viper.GetBool(prefix + ".tls.clientAuthRequired")}
if secOpts.UseTLS {
caPEM, res := ioutil.ReadFile(config.GetPath(prefix + ".tls.rootcert.file"))
if res != nil {
err = errors.WithMessage(res,
fmt.Sprintf("unable to load %s.tls.rootcert.file", prefix))
return
}
secOpts.ServerRootCAs = [][]byte{caPEM}
}
if secOpts.RequireClientCert {
keyPEM, res := ioutil.ReadFile(config.GetPath(prefix + ".tls.clientKey.file"))
if res != nil {
err = errors.WithMessage(res,
fmt.Sprintf("unable to load %s.tls.clientKey.file", prefix))
return
}
secOpts.Key = keyPEM
certPEM, res := ioutil.ReadFile(config.GetPath(prefix + ".tls.clientCert.file"))
if res != nil {
err = errors.WithMessage(res,
fmt.Sprintf("unable to load %s.tls.clientCert.file", prefix))
return
}
secOpts.Certificate = certPEM
}
clientConfig.SecOpts = secOpts
return
}
func InitCmd(cmd *cobra.Command, args []string) {
err := InitConfig(CmdRoot)
if err != nil { // Handle errors reading the config file
mainLogger.Errorf("Fatal error when initializing %s config : %s", CmdRoot, err)
os.Exit(1)
}
// read in the legacy logging level settings and, if set,
// notify users of the FABRIC_LOGGING_SPEC env variable
var loggingLevel string
if viper.GetString("logging_level") != "" {
loggingLevel = viper.GetString("logging_level")
} else {
loggingLevel = viper.GetString("logging.level")
}
if loggingLevel != "" {
mainLogger.Warning("CORE_LOGGING_LEVEL is no longer supported, please use the FABRIC_LOGGING_SPEC environment variable")
}
loggingSpec := os.Getenv("FABRIC_LOGGING_SPEC")
loggingFormat := os.Getenv("FABRIC_LOGGING_FORMAT")
flogging.Init(flogging.Config{
Format: loggingFormat,
Writer: logOutput,
LogSpec: loggingSpec,
})
// Init the MSP
var mspMgrConfigDir = config.GetPath("peer.mspConfigPath")
var mspID = viper.GetString("peer.localMspId")
var mspType = viper.GetString("peer.localMspType")
if mspType == "" {
mspType = msp.ProviderTypeToString(msp.FABRIC)
}
err = InitCrypto(mspMgrConfigDir, mspID, mspType)
if err != nil { // Handle errors reading the config file
mainLogger.Errorf("Cannot run peer because %s", err.Error())
os.Exit(1)
}
runtime.GOMAXPROCS(viper.GetInt("peer.gomaxprocs"))
}
| [
"\"FABRIC_LOGGING_SPEC\"",
"\"FABRIC_LOGGING_FORMAT\""
] | [] | [
"FABRIC_LOGGING_SPEC",
"FABRIC_LOGGING_FORMAT"
] | [] | ["FABRIC_LOGGING_SPEC", "FABRIC_LOGGING_FORMAT"] | go | 2 | 0 | |
pkg/sources/adapter/awscloudwatchsource/adapter.go | /*
Copyright 2022 TriggerMesh Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package awscloudwatchsource
import (
"context"
"encoding/json"
"fmt"
"time"
"go.uber.org/zap"
cloudevents "github.com/cloudevents/sdk-go/v2"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudwatch"
"github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface"
pkgadapter "knative.dev/eventing/pkg/adapter/v2"
"knative.dev/pkg/logging"
"github.com/triggermesh/triggermesh/pkg/apis/sources"
"github.com/triggermesh/triggermesh/pkg/apis/sources/v1alpha1"
"github.com/triggermesh/triggermesh/pkg/sources/adapter/common/health"
)
// envConfig is a set parameters sourced from the environment for the source's
// adapter.
type envConfig struct {
pkgadapter.EnvConfig
Region string `envconfig:"AWS_REGION"`
Query string `envconfig:"QUERIES" required:"true"` // JSON based array of name/query pairs
PollingInterval string `envconfig:"POLLING_INTERVAL" required:"true"` // free tier is 5m
// The environment variables below aren't read from the envConfig struct
// by the AWS SDK, but rather directly using os.Getenv().
// They are nevertheless listed here for documentation purposes.
_ string `envconfig:"AWS_ACCESS_KEY_ID"`
_ string `envconfig:"AWS_SECRET_ACCESS_KEY"`
}
// adapter implements the source's adapter.
type adapter struct {
logger *zap.SugaredLogger
mt *pkgadapter.MetricTag
eventsource string
cwClient cloudwatchiface.CloudWatchAPI
ceClient cloudevents.Client
metricQueries []*cloudwatch.MetricDataQuery
pollingInterval time.Duration
}
// NewEnvConfig satisfies pkgadapter.EnvConfigConstructor.
func NewEnvConfig() pkgadapter.EnvConfigAccessor {
return &envConfig{}
}
// NewAdapter satisfies pkgadapter.AdapterConstructor.
func NewAdapter(ctx context.Context, envAcc pkgadapter.EnvConfigAccessor, ceClient cloudevents.Client) pkgadapter.Adapter {
logger := logging.FromContext(ctx)
mt := &pkgadapter.MetricTag{
ResourceGroup: sources.AWSCloudWatchSourceResource.String(),
Namespace: envAcc.GetNamespace(),
Name: envAcc.GetName(),
}
eventsource := v1alpha1.AWSCloudWatchSourceName(envAcc.GetNamespace(), envAcc.GetName())
env := envAcc.(*envConfig)
cfg := session.Must(session.NewSession(aws.NewConfig().
WithRegion(env.Region),
))
interval, err := time.ParseDuration(env.PollingInterval)
if err != nil {
logger.Panicf("Unable to parse interval duration: %v", zap.Error(err))
}
metricQueries, err := parseQueries(env.Query)
if err != nil {
logger.Panicf("unable to parse metric queries: %v", zap.Error(err))
}
return &adapter{
logger: logger,
mt: mt,
eventsource: eventsource,
cwClient: cloudwatch.New(cfg),
ceClient: ceClient,
pollingInterval: interval,
metricQueries: metricQueries,
}
}
// parseQueries - Take the JSON representation of the query as passed in, and
// convert it into something useful to aws
func parseQueries(rawQuery string) ([]*cloudwatch.MetricDataQuery, error) {
queries := make([]*cloudwatch.MetricDataQuery, 0)
rawQueries := make([]v1alpha1.AWSCloudWatchMetricQuery, 0)
err := json.Unmarshal([]byte(rawQuery), &rawQueries)
if err != nil {
return nil, err
}
for _, v := range rawQueries {
name := v.Name
if v.Expression != nil {
queries = append(queries, &cloudwatch.MetricDataQuery{
Expression: v.Expression,
Id: &name,
})
} else if v.Metric != nil {
queries = append(queries, &cloudwatch.MetricDataQuery{
Id: &name,
MetricStat: transformQuery(v.Metric),
})
}
}
return queries, nil
}
func transformQuery(q *v1alpha1.AWSCloudWatchMetricStat) *cloudwatch.MetricStat {
dimensions := make([]*cloudwatch.Dimension, 0)
for _, v := range q.Metric.Dimensions {
dimensions = append(dimensions, &cloudwatch.Dimension{
Name: &v.Name,
Value: &v.Value,
})
}
ms := cloudwatch.MetricStat{
Metric: &cloudwatch.Metric{
MetricName: &q.Metric.MetricName,
Namespace: &q.Metric.Namespace,
Dimensions: dimensions,
},
Period: &q.Period,
Stat: &q.Stat,
}
if q.Unit != "" {
ms.SetUnit(q.Unit)
}
return &ms
}
// Start implements adapter.Adapter.
func (a *adapter) Start(ctx context.Context) error {
go health.Start(ctx)
if err := peekMetrics(ctx, a.cwClient); err != nil {
return fmt.Errorf("unable to read metrics: %w", err)
}
health.MarkReady()
a.logger.Info("Enabling CloudWatch")
ctx = pkgadapter.ContextWithMetricTag(ctx, a.mt)
// Setup polling to retrieve metrics
poll := time.NewTicker(a.pollingInterval)
defer poll.Stop()
// Wake up every pollingInterval, and retrieve the logs
var priorTime *time.Time
for {
select {
case <-ctx.Done():
return nil
case t := <-poll.C:
go a.CollectMetrics(ctx, priorTime, t)
priorTime = &t
}
}
}
func (a *adapter) CollectMetrics(ctx context.Context, priorTime *time.Time, currentTime time.Time) {
a.logger.Debug("Firing metrics")
startInterval := currentTime.Add(-a.pollingInterval)
if priorTime != nil {
startInterval = *priorTime
}
metricInput := cloudwatch.GetMetricDataInput{
EndTime: ¤tTime,
StartTime: &startInterval,
MetricDataQueries: a.metricQueries,
}
err := a.cwClient.GetMetricDataPages(&metricInput, func(output *cloudwatch.GetMetricDataOutput, b bool) bool {
err := a.SendMetricEvent(ctx, output)
if err != nil {
a.logger.Errorf("error sending metrics: %v", zap.Error(err))
return false
}
// Ensure that we indicate if there's more work to do
return !b
})
if err != nil {
a.logger.Errorf("error retrieving metrics: %v", zap.Error(err))
return
}
}
func (a *adapter) SendMetricEvent(ctx context.Context, metricOutput *cloudwatch.GetMetricDataOutput) error {
// multiple messages or messages and metric data, and insure the CloudEvent
// ID is common.
for _, v := range metricOutput.Messages {
event := cloudevents.NewEvent(cloudevents.VersionV1)
event.SetType(v1alpha1.AWSEventType(v1alpha1.ServiceCloudWatch, v1alpha1.AWSCloudWatchMessageEventType))
event.SetSource(a.eventsource)
err := event.SetData(cloudevents.ApplicationJSON, v)
if err != nil {
return fmt.Errorf("failed to set event data: %w", err)
}
if result := a.ceClient.Send(ctx, event); !cloudevents.IsACK(result) {
return result
}
}
for _, v := range metricOutput.MetricDataResults {
event := cloudevents.NewEvent(cloudevents.VersionV1)
event.SetType(v1alpha1.AWSEventType(v1alpha1.ServiceCloudWatch, v1alpha1.AWSCloudWatchMetricEventType))
event.SetSource(a.eventsource)
err := event.SetData(cloudevents.ApplicationJSON, v)
if err != nil {
return fmt.Errorf("failed to set event data: %w", err)
}
if result := a.ceClient.Send(context.Background(), event); !cloudevents.IsACK(result) {
return result
}
}
return nil
}
// peekMetrics verifies that the provided client can read metrics from CloudWatch.
func peekMetrics(ctx context.Context, cli cloudwatchiface.CloudWatchAPI) error {
const oneHourInSeconds = 3600
_, err := cli.GetMetricDataWithContext(ctx, &cloudwatch.GetMetricDataInput{
StartTime: aws.Time(time.Unix(0, 0)),
EndTime: aws.Time(time.Unix(1, 0)),
MetricDataQueries: []*cloudwatch.MetricDataQuery{
// This query is technically valid but we don't need it
// to return any result.
{
Id: aws.String("peek"),
MetricStat: &cloudwatch.MetricStat{
Metric: &cloudwatch.Metric{
MetricName: aws.String("Peak"),
Namespace: aws.String("TriggerMesh"),
},
Period: aws.Int64(oneHourInSeconds),
Stat: aws.String(cloudwatch.StatisticSum),
},
},
},
})
return err
}
| [] | [] | [] | [] | [] | go | 0 | 0 | |
porta/wsgi.py | """
WSGI config for porta project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'porta.settings')
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
build/parse_test.go | /*
Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package build
import (
"encoding/json"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"reflect"
"testing"
)
func TestParse(t *testing.T) {
for i, tt := range parseTests {
p, err := Parse("test", []byte(tt.in))
if err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
if tt.out != nil {
compare(t, p, tt.out)
}
}
}
func TestParseTestdata(t *testing.T) {
// Test that files in the testdata directory can all be parsed.
// For this test we don't bother checking what the tree looks like.
// The printing tests will exercise that information.
testdata := os.Getenv("TEST_SRCDIR") + "/" + os.Getenv("TEST_WORKSPACE") + "/build/testdata"
outs, err := filepath.Glob(testdata + "/*")
if err != nil {
t.Fatal(err)
}
if len(outs) == 0 {
t.Fatal("Data set is empty:", testdata)
}
for _, out := range outs {
data, err := ioutil.ReadFile(out)
if err != nil {
t.Error(err)
continue
}
_, err = Parse(filepath.Base(out), data)
if err != nil {
t.Error(err)
}
}
}
// toJSON returns human-readable json for the given syntax tree.
// It is used as input to diff for comparing the actual syntax tree with the expected one.
func toJSON(v interface{}) string {
s, _ := json.MarshalIndent(v, "", "\t")
s = append(s, '\n')
return string(s)
}
// diff returns the output of running diff on b1 and b2.
func diff(b1, b2 []byte) (data []byte, err error) {
f1, err := ioutil.TempFile("", "testdiff")
if err != nil {
return nil, err
}
defer os.Remove(f1.Name())
defer f1.Close()
f2, err := ioutil.TempFile("", "testdiff")
if err != nil {
return nil, err
}
defer os.Remove(f2.Name())
defer f2.Close()
f1.Write(b1)
f2.Write(b2)
data, err = exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput()
if len(data) > 0 {
// diff exits with a non-zero status when the files don't match.
// Ignore that failure as long as we get output.
err = nil
}
return
}
// tdiff logs the diff output to t.Error.
func tdiff(t *testing.T, a, b string) {
data, err := diff([]byte(a), []byte(b))
if err != nil {
t.Error(err)
return
}
t.Error(string(data))
}
// Compare expected and actual values, failing and outputting a diff of the two values if they are not deeply equal
func compare(t *testing.T, actual, expected interface{}) {
if !reflect.DeepEqual(expected, actual) {
tdiff(t, toJSON(expected), toJSON(actual))
}
}
// Small tests checking that the parser returns exactly the right syntax tree.
// If out is nil, we only check that the parser accepts the file.
var parseTests = []struct {
in string
out *File
}{
{
in: `go_binary(name = "x"
)
`,
out: &File{
Path: "test",
Stmt: []Expr{
&CallExpr{
X: &LiteralExpr{
Start: Position{1, 1, 0},
Token: "go_binary",
},
ListStart: Position{1, 10, 9},
List: []Expr{
&BinaryExpr{
X: &LiteralExpr{
Start: Position{1, 11, 10},
Token: "name",
},
OpStart: Position{1, 16, 15},
Op: "=",
Y: &StringExpr{
Start: Position{1, 18, 17},
Value: "x",
End: Position{1, 21, 20},
Token: `"x"`,
},
},
},
End: End{Pos: Position{2, 1, 21}},
ForceMultiLine: true,
},
},
},
},
{
in: `foo.bar.baz(name = "x")`,
out: &File{
Path: "test",
Stmt: []Expr{
&CallExpr{
X: &DotExpr{
X: &DotExpr{
X: &LiteralExpr{
Start: Position{1, 1, 0},
Token: "foo",
},
Dot: Position{1, 4, 3},
NamePos: Position{1, 5, 4},
Name: "bar",
},
Dot: Position{1, 8, 7},
NamePos: Position{1, 9, 8},
Name: "baz",
},
ListStart: Position{1, 12, 11},
List: []Expr{
&BinaryExpr{
X: &LiteralExpr{
Start: Position{1, 13, 12},
Token: "name",
},
OpStart: Position{1, 18, 17},
Op: "=",
Y: &StringExpr{
Start: Position{1, 20, 19},
Value: "x",
End: Position{1, 23, 22},
Token: `"x"`,
},
},
},
End: End{Pos: Position{1, 23, 22}},
},
},
},
},
{
in: `package(default_visibility = ["//visibility:legacy_public"])
`,
},
{
in: `__unused__ = [ foo_binary(
name = "signed_release_%sdpi" % dpi,
srcs = [
":aps_release_%s" % dpi, # all of Maps, obfuscated, w/o NLP
":qlp_release_%s" % dpi, # the NLP
":check_binmode_release",
":check_remote_strings_release",
],
debug_key = "//foo:bar.baz",
resources = ":R_src_release_%sdpi" % dpi)
for dpi in dpis ]
`,
},
}
| [
"\"TEST_SRCDIR\"",
"\"TEST_WORKSPACE\""
] | [] | [
"TEST_WORKSPACE",
"TEST_SRCDIR"
] | [] | ["TEST_WORKSPACE", "TEST_SRCDIR"] | go | 2 | 0 | |
Pytorch/train.py | from __future__ import print_function
import time
import os
import argparse
import numpy as np
import cv2
from subprocess import Popen, PIPE
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader, Dataset
from tensorboardX import SummaryWriter
from augmentations import Augmentation_traininig
from loss import FocalLoss, OHEM_loss
from retinanet import RetinaNet
from datagen import ListDataset
from encoder import DataEncoder
from torch.autograd import Variable
# Indicate visible gpu device
device_ids = [2,3,4,6]
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(map(str, device_ids))
# Multi-semantic transform
def str2bool(v):
return v.lower() in ("yes", "y", "true", "t", "1", "Yes", "Y", "True", "T")
# Dynamic adjust lr
def adjust_learning_rate(cur_lr, optimizer, gamma, step):
lr = cur_lr * (gamma ** (step))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
# usage:
# CUDA_VISIBLE_DEVICES= python train.py --root=./DB/ --dataset=PLATE --batch_size=8 --multi_scale=True --logdir=./logs --save_folder=./models --num_workers=6 --input_size=960 --resume=./models/ckpt_40000.pth
parser = argparse.ArgumentParser(description='PyTorch RetinaTextBoxes++ Training')
parser.add_argument('--root', default='./DB/',
type=str, help='root of the dataset dir')
parser.add_argument('--lr', default=1e-3,
type=float, help='learning rate')
parser.add_argument('--input_size', default=768,
type=int, help='Input size for training')
parser.add_argument('--batch_size', default=8,
type=int, help='Batch size for training')
parser.add_argument('--num_workers', default=8,
type=int, help='Number of workers used in data loading')
parser.add_argument('--resume',
type=str, help='resume from checkpoint')
parser.add_argument('--dataset', default='ICDAR2015',
type=str, help='select training dataset')
parser.add_argument('--multi_scale', default=False,
type=str2bool, help='Use multi-scale training')
parser.add_argument('--focal_loss', default=True,
type=str2bool, help='Use Focal loss or OHEM loss')
parser.add_argument('--logdir', default='./logs/',
type=str, help='Tensorboard log dir')
parser.add_argument('--max_iter', default=40000,
type=int, help='Number of training iterations')
parser.add_argument('--gamma', default=0.5,
type=float, help='Gamma update for SGD')
parser.add_argument('--save_interval', default=5000,
type=int, help='Frequency for saving checkpoint models')
parser.add_argument('--save_folder', default='./models/',
type=str, help='Location to save checkpoint models')
parser.add_argument('--evaluation', default=False,
type=str2bool, help='Evaluation during training')
parser.add_argument('--eval_step', default=1000,
type=int, help='Evaluation step')
parser.add_argument('--eval_device', default=2,
type=int, help='GPU device for evaluation')
parser.add_argument('--cls_thresh', default=0.5,
type=int, help='classification thresh')
parser.add_argument('--nms_thresh', default=0.25,
type=int, help='nms thresh')
args = parser.parse_args()
# confirm GPU & Focal loss use
assert torch.cuda.is_available(), 'Error: CUDA not found!'
assert args.focal_loss, "OHEM + ce_loss is not working... :("
# confirm existence of the folder for saving model and log if there are not exist, if not create them.
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
if not os.path.exists(args.logdir):
os.mkdir(args.logdir)
# Data load
print('==> Preparing data..')
trainset = ListDataset(root=args.root, dataset=args.dataset, train=True,
transform=Augmentation_traininig, input_size=args.input_size, multi_scale=args.multi_scale)
trainloader = DataLoader(trainset, batch_size=args.batch_size,
shuffle=True, collate_fn=trainset.collate_fn, num_workers=args.num_workers)
# Set model (focal_loss vs OHEM_CE loss)
# Backbone - se-resnet50
print('==>loss initializing...\n')
if args.focal_loss:
imagenet_pretrain = 'weights/retinanet_se50.pth'
criterion = FocalLoss()
num_classes = 1
else:
imagenet_pretrain = 'weights/retinanet_se50_OHEM.pth'
criterion = OHEM_loss()
num_classes = 2
# Training Detail option
stepvalues = (10000, 20000, 30000, 40000, 50000) if args.dataset in ["SynthText"] else (2000, 4000, 6000, 8000, 10000)
best_loss = float('inf') # best test loss
start_epoch = 0 # start from epoch 0 or last epoch
iteration = 0
cur_lr = args.lr
mean=(0.485,0.456,0.406)
var=(0.229,0.224,0.225)
step_index = 0
pEval = None
# Model
print('==>network establishing...\n')
net = RetinaNet(num_classes)
net.load_state_dict(torch.load(imagenet_pretrain))
# Resume training if there are any break off
if args.resume:
print('==> Resuming from checkpoint..', args.resume)
checkpoint = torch.load(args.resume)
net.load_state_dict(checkpoint['net'])
start_epoch = checkpoint['epoch']
iteration = checkpoint['iteration']
cur_lr = checkpoint['lr']
step_index = checkpoint['step_index']
print(" net: {0}\n start_epoch: {1}\n iteration: {2}\n current_lr: {3}\n step_index: {4}\n".format(net, start_epoch, iteration, cur_lr, step_index))
#optimizer.load_state_dict(state["optimizer"])
print('==>training detail...\n')
print("multi_scale : ", args.multi_scale)
print("input_size : ", args.input_size)
print("stepvalues : ", stepvalues)
print("start_epoch : ", start_epoch)
print("iteration : ", iteration)
print("cur_lr : ", cur_lr)
print("step_index : ", step_index)
print("gpu available : ", torch.cuda.is_available())
print("num_gpus : ", torch.cuda.device_count())
# Set data parallel training
net = torch.nn.DataParallel(net, device_ids=[0,1,2,3])
net.cuda()
# Training
print("==>training start...")
net.train()
# Freeze BN layer for pre-trained backbone
net.module.freeze_bn()
# Set optimizer -- SGD or Adam
optimizer = optim.SGD(net.parameters(), lr=cur_lr, momentum=0.9, weight_decay=1e-4) #optim.Adam(net.parameters(), lr=cur_lr)
# Encode anchor to each feature maps
encoder = DataEncoder(cls_thresh=0.5, nms_thresh=0.2)
# Tensorboard visualize recorder
writer = SummaryWriter(logdir=args.logdir)
lossest = 1
save_lossest = False
t0 = time.time()
for epoch in range(start_epoch, 10000):
if iteration > args.max_iter:
break
for inputs, loc_targets, cls_targets in trainloader:
# prepare data and cls & loc label
inputs = Variable(inputs.cuda())
loc_targets = Variable(loc_targets.cuda())
cls_targets = Variable(cls_targets.cuda())
optimizer.zero_grad()
# predict result
loc_preds, cls_preds = net(inputs)
# get the loss between prediction and ground truth
loc_loss, cls_loss = criterion(loc_preds, loc_targets, cls_preds, cls_targets)
# total loss
loss = loc_loss + cls_loss
# bp
loss.backward()
# optimizing - stochastic gradient descendent
optimizer.step()
# Recording intermediate log
if iteration % 20 == 0:
t1 = time.time()
print('iter ' + repr(iteration) + ' (epoch ' + repr(epoch) + ') || loss: %.4f || loc_loss: %.4f || cls_loss: %.4f (Time : %.1f)'\
% (loss.sum().item(), loc_loss.sum().item(), cls_loss.sum().item(), (t1 - t0)))
t0 = time.time()
# record log and visualization by tensorboard
writer.add_scalar('loc_loss', loc_loss.sum().item(), iteration)
writer.add_scalar('cls_loss', cls_loss.sum().item(), iteration)
writer.add_scalar('loss', loss.sum().item(), iteration)
# show inference image in tensorboard
infer_img = np.transpose(inputs[0].cpu().numpy(), (1,2,0))
infer_img *= var
infer_img += mean
infer_img *= 255.
infer_img = np.clip(infer_img, 0, 255)
infer_img = infer_img.astype(np.uint8)
h, w, _ = infer_img.shape
boxes, labels, scores = encoder.decode(loc_preds[0], cls_preds[0], (w,h))
boxes = boxes.reshape(-1, 4, 2).astype(np.int32)
if boxes.shape[0] is not 0:
infer_img = cv2.polylines(infer_img, boxes, True, (0,255,0), 2)
writer.add_image('image', img_tensor=infer_img, global_step=iteration, dataformats='HWC')
writer.add_scalar('input_size', h, iteration)
writer.add_scalar('learning_rate', cur_lr, iteration)
t0 = time.time()
if loss.sum().item() < lossest:
lossest = loss.sum().item()
save_lossest = True
# Saving intermediate model
if iteration % args.save_interval == 0 and iteration > 0 or save_lossest == True:
print('Saving model state at iteration : ', iteration)
state = {
'net': net.module.state_dict(),
"optimizer": optimizer.state_dict(),
'iteration' : iteration,
'epoch': epoch,
'lr' : cur_lr,
'step_index' : step_index
}
model_file = "{0}/ckpt_{1}_loss_{2}.pth".format(args.save_folder, repr(iteration), lossest)
torch.save(state, model_file)
save_lossest = False
if iteration in stepvalues:
step_index += 1
cur_lr = adjust_learning_rate(cur_lr, optimizer, args.gamma, step_index)
if iteration > args.max_iter:
break
# Evaluation while training
if args.evaluation and iteration % args.eval_step == 0:
try:
if pEval is None:
print("Evaluation started at iteration {0} on {1}...".format(iteration, args.dataset))
eval_cmd = "CUDA_VISIBLE_DEVICES=" + str(args.eval_device) + \
" python eval.py" + \
" --tune_from=" + args.save_folder + 'ckpt_' + repr(iteration) + '.pth' + \
" --input_size=1024" + \
" --output_zip=result_temp1"
pEval = Popen(eval_cmd, shell=True, stdout=PIPE, stderr=PIPE)
elif pEval.poll() is not None:
(scoreString, stdErrData) = pEval.communicate()
hmean = float(str(scoreString).strip().split(":")[3].split(",")[0].split("}")[0].strip())
writer.add_scalar('test_hmean', hmean, iteration)
print("test_hmean for {}-th iter : {:.4f}".format(iteration, hmean))
if pEval is not None:
pEval.kill()
pEval = None
except Exception as e:
print("exception happened in evaluation ", e)
if pEval is not None:
pEval.kill()
pEval = None
iteration += 1 | [] | [] | [
"CUDA_VISIBLE_DEVICES"
] | [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
tests/test_fsl_utils_path.py | #!/usr/bin/env python
#
# test_fsl_utils_path.py - Tests functions in the fsl.utils.path module.
#
# Author: Paul McCarthy <pauldmccarthy@gmail.com>
#
from __future__ import print_function
import os
import os.path as op
import shutil
import tempfile
import pytest
import mock
import fsl.utils.path as fslpath
import fsl.data.image as fslimage
from . import testdir
def make_dummy_file(path):
with open(path, 'wt') as f:
f.write('{}\n'.format(op.basename(path)))
def make_dummy_image_file(path):
if path.endswith('.nii'): paths = [path]
elif path.endswith('.nii.gz'): paths = [path]
elif path.endswith('.img'): paths = [path, path[:-4] + '.hdr']
elif path.endswith('.hdr'): paths = [path, path[:-4] + '.img']
elif path.endswith('.img.gz'): paths = [path, path[:-7] + '.hdr.gz']
elif path.endswith('.hdr.gz'): paths = [path, path[:-7] + '.img.gz']
else: raise RuntimeError()
for path in paths:
make_dummy_file(path)
def cleardir(dir):
for f in os.listdir(dir):
f = op.join(dir, f)
if op.isfile(f):
os.remove(f)
def test_deepest():
# path, suffixes, output
tests = [
('/blah.feat/foo.ica/fum.gfeat/moo.ica', ['.feat'], '/blah.feat'),
('/blah.feat/foo.ica/fum.gfeat/moo.ica', ['.feat', '.gfeat'], '/blah.feat/foo.ica/fum.gfeat'),
('/blah.feat/foo.ica/fum.gfeat/moo.ica', ['.gfeat'], '/blah.feat/foo.ica/fum.gfeat'),
('/blah.feat/foo.ica/fum.gfeat/moo.ica', ['.ica'], '/blah.feat/foo.ica/fum.gfeat/moo.ica'),
('/blah.feat/foo.ica/fum.gfeat/moo.ica', ['.bob'], None),
('/blah.feat/foo.ica/fum.gfeat/moo.bob', ['.ica'], '/blah.feat/foo.ica'),
('/blah.feat/foo.ica/fum.gfeat/moo.bob', ['.bob'], '/blah.feat/foo.ica/fum.gfeat/moo.bob'),
( 'blah.feat/foo.ica/fum.gfeat/moo.ica', ['.feat'], 'blah.feat'),
( 'blah.feat/foo.ica/fum.gfeat/moo.ica', ['.feat', '.gfeat'], 'blah.feat/foo.ica/fum.gfeat'),
( 'blah.feat/foo.ica/fum.gfeat/moo.ica', ['.gfeat'], 'blah.feat/foo.ica/fum.gfeat'),
( 'blah.feat/foo.ica/fum.gfeat/moo.ica', ['.ica'], 'blah.feat/foo.ica/fum.gfeat/moo.ica'),
( 'blah.feat/foo.ica/fum.gfeat/moo.ica', ['.bob'], None),
( 'blah.feat/foo.ica/fum.gfeat/moo.bob', ['.ica'], 'blah.feat/foo.ica'),
( 'blah.feat/foo.ica/fum.gfeat/moo.bob', ['.ica', '.bob'], 'blah.feat/foo.ica/fum.gfeat/moo.bob'),
('/', [], None),
('', [], None),
('///', [], None),
('/', ['blah'], None),
('', ['blah'], None),
('///', ['blah'], None),
]
for path, suffixes, output in tests:
path = op.join(*path.split('/'))
if output is not None:
output = op.join(*output.split('/'))
assert fslpath.deepest(path, suffixes) == output
def test_shallowest():
# path, suffixes, output
tests = [
('/blah.feat/foo.ica/fum.gfeat/moo.ica', ['.feat'], '/blah.feat'),
('/blah.feat/foo.ica/fum.gfeat/moo.ica', ['.feat', '.gfeat'], '/blah.feat'),
('/blah.feat/foo.ica/fum.gfeat/moo.ica', ['.gfeat'], '/blah.feat/foo.ica/fum.gfeat'),
('/blah.feat/foo.ica/fum.gfeat/moo.ica', ['.ica'], '/blah.feat/foo.ica'),
('/blah.feat/foo.ica/fum.gfeat/moo.ica', ['.bob'], None),
('/blah.feat/foo.ica/fum.gfeat/moo.bob', ['.ica'], '/blah.feat/foo.ica'),
('/blah.feat/foo.ica/fum.gfeat/moo.bob', ['.bob'], '/blah.feat/foo.ica/fum.gfeat/moo.bob'),
( 'blah.feat/foo.ica/fum.gfeat/moo.ica', ['.feat'], 'blah.feat'),
( 'blah.feat/foo.ica/fum.gfeat/moo.ica', ['.feat', '.gfeat'], 'blah.feat'),
( 'blah.feat/foo.ica/fum.gfeat/moo.ica', ['.gfeat'], 'blah.feat/foo.ica/fum.gfeat'),
( 'blah.feat/foo.ica/fum.gfeat/moo.ica', ['.ica'], 'blah.feat/foo.ica'),
( 'blah.feat/foo.ica/fum.gfeat/moo.ica', ['.bob'], None),
( 'blah.feat/foo.ica/fum.gfeat/moo.bob', ['.ica'], 'blah.feat/foo.ica'),
( 'blah.feat/foo.ica/fum.gfeat/moo.bob', ['.ica', '.bob'], 'blah.feat/foo.ica'),
(' blah.feat/foo.ica/fum.gfeat/moo.bob', ['.ica', '.bob'], 'blah.feat/foo.ica'),
('/', [], None),
('', [], None),
('///', [], None),
('/', ['blah'], None),
('', ['blah'], None),
('///', ['blah'], None),
]
for path, suffixes, output in tests:
path = op.join(*path.split('/'))
if output is not None:
output = op.join(*output.split('/'))
assert fslpath.shallowest(path, suffixes) == output
def test_allFiles():
create = [
'a/1',
'a/2',
'a/b/1',
'a/b/2',
'a/b/c/1',
'a/b/d/1',
]
create = [op.join(*c.split('/')) for c in create]
with testdir(create) as td:
assert (sorted(fslpath.allFiles('.')) ==
sorted([op.join('.', c) for c in create]))
assert (sorted(fslpath.allFiles(td)) ==
sorted([op.join(td, c) for c in create]))
def test_hasExt():
tests = [
('file.nii', ['.nii', '.gz'], True),
('file.nii.gz', ['.nii'], False),
('file.nii.gz', ['.nii', '.gz'], True),
('file.nii.gz', ['.nii.gz'], True),
('file.txt', ['.nii', '.gz'], False),
]
for path, aexts, expected in tests:
assert fslpath.hasExt(path, aexts) == expected
def test_addExt_imageFiles_mustExist_shouldPass():
"""Tests the addExt function where the path exists, and the inputs
are valid.
"""
groups = fslimage.FILE_GROUPS
allowedExts = fslimage.ALLOWED_EXTENSIONS
# (files_to_create, path, expected)
tests = [
# Single files
('file.nii', 'file', 'file.nii'),
('file.nii', 'file.nii', 'file.nii'),
('file.nii.gz', 'file', 'file.nii.gz'),
('file.nii.gz', 'file.nii.gz', 'file.nii.gz'),
('file.img', 'file', 'file.hdr'),
('file.img', 'file.hdr', 'file.hdr'),
('file.img', 'file.img', 'file.img'),
('file.img.gz', 'file', 'file.hdr.gz'),
('file.img.gz', 'file.hdr.gz', 'file.hdr.gz'),
('file.img.gz', 'file.img.gz', 'file.img.gz'),
# Multiple suffixes should be handled
('file.blob.nii', 'file.blob', 'file.blob.nii'),
('file.blob.nii', 'file.blob.nii', 'file.blob.nii'),
('file.blob.nii.gz', 'file.blob', 'file.blob.nii.gz'),
('file.blob.nii.gz', 'file.blob.nii.gz', 'file.blob.nii.gz'),
('file.blob.img', 'file.blob', 'file.blob.hdr'),
('file.blob.hdr', 'file.blob.hdr', 'file.blob.hdr'),
('file.blob.img', 'file.blob.img', 'file.blob.img'),
('file.blob.img.gz', 'file.blob', 'file.blob.hdr.gz'),
('file.blob.hdr.gz', 'file.blob.hdr.gz', 'file.blob.hdr.gz'),
('file.blob.img.gz', 'file.blob.img.gz', 'file.blob.img.gz'),
# Even if that suffix is a itself supported
# suffix (as long as the path is unambiguous)
('file.img.nii', 'file.img.nii', 'file.img.nii'),
('file.img.nii.gz', 'file.img.nii.gz', 'file.img.nii.gz'),
('file.img.hdr', 'file.img.hdr', 'file.img.hdr'),
('file.img.img', 'file.img.img', 'file.img.img'),
('file.img.hdr.gz', 'file.img.hdr.gz', 'file.img.hdr.gz'),
('file.img.img.gz', 'file.img.img.gz', 'file.img.img.gz'),
# Multiple files exist, but prefix is unambiguous
('file.nii file.nii.gz', 'file.nii', 'file.nii'),
('file.nii file.nii.gz', 'file.nii.gz', 'file.nii.gz'),
('file1.nii file2.nii.gz', 'file1', 'file1.nii'),
('file1.nii file2.nii.gz', 'file1.nii', 'file1.nii'),
('file1.nii file2.nii.gz', 'file2', 'file2.nii.gz'),
('file1.nii file2.nii.gz', 'file2.nii.gz', 'file2.nii.gz'),
('file.nii file.img', 'file.nii', 'file.nii'),
('file.nii file.img', 'file.img', 'file.img'),
('file.nii file.img', 'file.hdr', 'file.hdr'),
('file.img.gz file.img', 'file.img', 'file.img'),
('file.img.gz file.img', 'file.hdr', 'file.hdr'),
('file.img.gz file.img', 'file.img.gz', 'file.img.gz'),
('file.img.gz file.img', 'file.hdr.gz', 'file.hdr.gz'),
('file1.img.gz file2.img', 'file2', 'file2.hdr'),
('file1.img.gz file2.img', 'file2.img', 'file2.img'),
('file1.img.gz file2.img', 'file2.hdr', 'file2.hdr'),
('file1.img.gz file2.img', 'file1', 'file1.hdr.gz'),
('file1.img.gz file2.img', 'file1.img.gz', 'file1.img.gz'),
('file1.img.gz file2.img', 'file1.hdr.gz', 'file1.hdr.gz'),
('file1.nii file2.img', 'file1', 'file1.nii'),
('file1.nii file2.img', 'file1.nii', 'file1.nii'),
('file1.nii file2.img', 'file2', 'file2.hdr'),
('file1.nii file2.img', 'file2.hdr', 'file2.hdr'),
('file1.nii file2.img', 'file2.img', 'file2.img'),
('file1.img file2.img', 'file1', 'file1.hdr'),
('file1.img file2.img', 'file1.hdr', 'file1.hdr'),
('file1.img file2.img', 'file1.img', 'file1.img'),
('file1.img file2.img', 'file2', 'file2.hdr'),
('file1.img file2.img', 'file2.hdr', 'file2.hdr'),
('file1.img file2.img', 'file2.img', 'file2.img'),
]
workdir = tempfile.mkdtemp()
try:
for files_to_create, prefix, expected in tests:
files_to_create = files_to_create.split()
for f in files_to_create:
make_dummy_image_file(op.join(workdir, f))
print('files_to_create: ', files_to_create)
print('workdir: ', os.listdir(workdir))
print('prefix: ', prefix)
print('expected: ', expected)
result = fslpath.addExt(op.join(workdir, prefix),
allowedExts,
mustExist=True,
fileGroups=groups)
print('result: ', result)
assert result == op.join(workdir, expected)
cleardir(workdir)
finally:
shutil.rmtree(workdir)
def test_addExt_otherFiles_mustExist_shouldPass():
workdir = tempfile.mkdtemp()
# (files_to_create, path, allowedExts, filegroups, expected)
tests = [
# allowedExts is None, fileGroups is None
('file.a', 'file.a', '', [], 'file.a'),
('file.a file.b', 'file.a', '', [], 'file.a'),
('file.a file.b', 'file.b', '', [], 'file.b'),
# fileGroups is None
('file.a', 'file', '.a', [], 'file.a'),
('file.a', 'file.a', '.a', [], 'file.a'),
('file.a file.b', 'file', '.a', [], 'file.a'),
('file.a file.b', 'file.a', '.a', [], 'file.a'),
('file.a file.b', 'file.b', '.a .b', [], 'file.b'),
('file1.a file2.b', 'file1', '.a .b', [], 'file1.a'),
('file1.a file2.b', 'file1.a', '.a .b', [], 'file1.a'),
('file1.a file2.b', 'file2.b', '.a .b', [], 'file2.b'),
('file1.a file2.b', 'file2.b', '.a .b', [], 'file2.b'),
('file.a file.b', 'file', '.a .b', ['.a .b'], 'file.a'),
('file.a file.b', 'file', '.a .b', ['.b .a'], 'file.b'),
('file.a file.b', 'file.a', '.a .b', ['.a .b'], 'file.a'),
('file.a file.b', 'file.b', '.a .b', ['.a .b'], 'file.b'),
('file.a file.b', 'file.a', '.a .b', ['.b .a'], 'file.a'),
('file.a file.b', 'file.b', '.a .b', ['.b .a'], 'file.b'),
('file.a file.b file.c file.d', 'file', '.a .b', ['.a .b'], 'file.a'),
('file.a file.b file.c file.d', 'file', '.a .b', ['.b .a'], 'file.b'),
('file.a file.b file.c file.d', 'file.a', '.a .b', ['.a .b'], 'file.a'),
('file.a file.b file.c file.d', 'file.b', '.a .b', ['.a .b'], 'file.b'),
('file1.a file1.b file2.a file2.b', 'file1', '.a .b', ['.a .b'], 'file1.a'),
('file1.a file1.b file2.a file2.b', 'file1.a', '.a .b', ['.a .b'], 'file1.a'),
('file1.a file1.b file2.a file2.b', 'file1.b', '.a .b', ['.a .b'], 'file1.b'),
('file1.a file1.b file2.a file2.b', 'file2', '.a .b', ['.a .b'], 'file2.a'),
('file1.a file1.b file2.a file2.b', 'file2.a', '.a .b', ['.a .b'], 'file2.a'),
('file1.a file1.b file2.a file2.b', 'file2.b', '.a .b', ['.a .b'], 'file2.b'),
('file1.a file1.b file2.c file2.d', 'file1', '.a .b .c .d', ['.a .b', '.c .d'], 'file1.a'),
('file1.a file1.b file2.c file2.d', 'file1.a', '.a .b .c .d', ['.a .b', '.c .d'], 'file1.a'),
('file1.a file1.b file2.c file2.d', 'file1.b', '.a .b .c .d', ['.a .b', '.c .d'], 'file1.b'),
('file1.a file1.b file2.c file2.d', 'file2', '.a .b .c .d', ['.a .b', '.c .d'], 'file2.c'),
('file1.a file1.b file2.c file2.d', 'file2.c', '.a .b .c .d', ['.a .b', '.c .d'], 'file2.c'),
('file1.a file1.b file2.c file2.d', 'file2.d', '.a .b .c .d', ['.a .b', '.c .d'], 'file2.d'),
]
try:
for files_to_create, prefix, allowedExts, fileGroups, expected in tests:
files_to_create = files_to_create.split()
allowedExts = allowedExts.split()
fileGroups = [g.split() for g in fileGroups]
if len(allowedExts) == 0: allowedExts = None
if len(fileGroups) == 0: fileGroups = None
for f in files_to_create:
make_dummy_file(op.join(workdir, f))
print('files_to_create: ', files_to_create)
print('prefix: ', prefix)
print('allowedExts: ', allowedExts)
print('fileGroups: ', fileGroups)
print('workdir: ', os.listdir(workdir))
print('expected: ', expected)
result = fslpath.addExt(op.join(workdir, prefix),
allowedExts=allowedExts,
mustExist=True,
fileGroups=fileGroups)
print('result: ', result)
assert result == op.join(workdir, expected)
cleardir(workdir)
finally:
shutil.rmtree(workdir)
def test_addExt_imageFiles_mustExist_shouldFail():
"""Tests the addExt function with inputs that should cause it to raise an
error.
"""
fileGroups = fslimage.FILE_GROUPS
allowedExts = fslimage.ALLOWED_EXTENSIONS
# All of these should raise an error
# (files_to_create, path)
tests = [
# Invalid path
('', 'file.img'),
('file.hdr file.img', 'blob'),
('file.hdr.gz file.img.gz', 'file.img'),
('file.hdr file.img', 'file1'),
('file.hdr file.img', 'file1.im'),
('file.hdr file.img', 'filehdr'),
('file.hdr file.img', 'fileimg'),
('filehdr fileimg', 'file.hdr'),
('filehdr fileimg', 'file.img'),
('file.hdr fileimg', 'filehdr'),
('file.hdr fileimg', 'file.img'),
('filehdr file.img', 'fileimg'),
('filehdr file.img', 'file.hdr'),
# Unsupported type/invalid path
('file.blob', 'file'),
('file.blob', 'file.img'),
('file.blob', 'file.nii'),
('file.blob', 'file.blob'),
# Ambiguous path
('file.hdr file.img file.nii', 'file'),
('file.hdr file.img file.hdr.gz file.img.gz', 'file'),
# Incomplete file pairs
('file.hdr', 'file.img'),
('file.img', 'file.hdr'),
('file1.hdr file2.img', 'file1.img'),
('file1.hdr file2.img', 'file2.hdr'),
# Stupid file names
('file.img.nii.gz', 'file.img'),
('file.img.nii', 'file.img'),
('file.img.img', 'file.img'),
('file.img.img.gz', 'file.img'),
]
workdir = tempfile.mkdtemp()
try:
for files_to_create, prefix in tests:
cleardir(workdir)
files_to_create = files_to_create.split()
for f in files_to_create:
make_dummy_file(op.join(workdir, f))
print('files_to_create: ', files_to_create)
print('prefix: ', prefix)
print('workdir: ', os.listdir(workdir))
with pytest.raises(fslpath.PathError):
result = fslpath.addExt(op.join(workdir, prefix),
allowedExts=allowedExts,
mustExist=True,
fileGroups=fileGroups)
print('result: ', result)
finally:
shutil.rmtree(workdir)
def test_addExt_otherFiles_mustExist_shouldFail():
workdir = tempfile.mkdtemp()
# Invalid path
# Unsupported suffix
# (files_to_create, path, allowedExts, fileGroups)
tests = [
# Invalid path
('', 'file.a', '', []),
('file.b', 'file.a', '.a', []),
('file.b', 'file.a', '.a', []),
# No supported extensions/ambiguous
('file.a', 'file', '', []),
('file.a file.b', 'file', '', []),
('file.a file.b', 'file', '.a .b', []),
# Weird group
('file.a file.b', 'file', '.a .b', ['.a']),
# Multiple groups, ambiguous path
('file.a file.b file.c file.d', 'file', '.a .b .c .d', ['.a .b', '.c .d']),
]
try:
for files_to_create, prefix, allowedExts, fileGroups in tests:
cleardir(workdir)
files_to_create = files_to_create.split()
allowedExts = allowedExts.split()
fileGroups = [g.split() for g in fileGroups]
if len(allowedExts) == 0: allowedExts = None
if len(fileGroups) == 0: fileGroups = None
for f in files_to_create:
make_dummy_file(op.join(workdir, f))
print('files_to_create: ', files_to_create)
print('prefix: ', prefix)
print('workdir: ', os.listdir(workdir))
with pytest.raises(fslpath.PathError):
result = fslpath.addExt(op.join(workdir, prefix),
allowedExts=allowedExts,
mustExist=True,
fileGroups=fileGroups)
print('result: ', result)
finally:
shutil.rmtree(workdir)
pass
def test_addExt_noExist():
allowedExts = fslimage.ALLOWED_EXTENSIONS
# When mustExist=False, the addExt
# function does not consult fileGroups.
# So we are not bothering with them
# here.
# Prefix, defaultExt, allowedExts, expected
tests = [
# If the prefix already has a supported extension,
# it should be returned unchanged.
('file.img', None, allowedExts, 'file.img'),
('file.hdr', None, allowedExts, 'file.hdr'),
('file.nii', None, allowedExts, 'file.nii'),
('file.nii.gz', None, allowedExts, 'file.nii.gz'),
('file.img.gz', None, allowedExts, 'file.img.gz'),
('file.hdr.gz', None, allowedExts, 'file.hdr.gz'),
('file.blob.img', '.img', allowedExts, 'file.blob.img'),
('file.blob.img', '.img', None, 'file.blob.img'),
# If the file does not have a prefix,
# it should be given the default prefix
('file', 'img', allowedExts, 'fileimg'),
('file', '.img', allowedExts, 'file.img'),
('file', 'img', None, 'fileimg'),
('file', '.img', None, 'file.img'),
# Unrecognised prefixes should be ignored
('file.blob', 'img', allowedExts, 'file.blobimg'),
('file.blob', '.img', allowedExts, 'file.blob.img'),
('file.blob', 'img', None, 'file.blobimg'),
('file.blob', '.img', None, 'file.blob.img'),
# If no defaultExt or allowedExts, the
# prefix should be returned unchanged
('file', None, None, 'file'),
('file.blob', None, None, 'file.blob'),
]
for prefix, defaultExt, allowedExts, expected in tests:
assert fslpath.addExt(prefix,
allowedExts,
defaultExt=defaultExt,
mustExist=False) == expected
def test_addExt_unambiguous():
allowedExts = fslimage.ALLOWED_EXTENSIONS
fileGroups = fslimage.FILE_GROUPS
# files to create, prefix, file groups, allowedExts, defaultExt, expected,
tests = [
('file.img file.hdr', 'file.img', None, None, None, 'file.img'),
('file.img file.hdr', 'file.hdr', None, None, None, 'file.hdr'),
('file.img file.hdr', 'file.hdr', None, None, None, 'file.hdr'),
('file.img file.hdr', 'file', None, None, '.hdr', 'file.hdr'),
('file.img file.hdr', 'file', None, None, '.img', 'file.img'),
('file.img file.hdr', 'file', None, allowedExts, None, 'file.hdr file.img'),
('file.img file.hdr', 'file.img', None, allowedExts, None, 'file.img'),
('file.img file.hdr', 'file', fileGroups, allowedExts, None, 'file.hdr file.img'),
('file.img file.hdr', 'file.img', fileGroups, allowedExts, None, 'file.img'),
('file.img file.hdr', 'file', None, allowedExts, '.img', 'file.hdr file.img'),
('file.img file.hdr', 'file.img', None, allowedExts, '.img', 'file.img'),
('file.img file.hdr', 'file', fileGroups, allowedExts, '.img', 'file.hdr file.img'),
('file.img file.hdr', 'file.img', fileGroups, allowedExts, '.img', 'file.img'),
]
for create, prefix, groups, exts, defaultExt, expected in tests:
create = create .split()
expected = expected.split()
with testdir(create) as td:
result = fslpath.addExt(prefix,
allowedExts=exts,
fileGroups=groups,
defaultExt=defaultExt,
unambiguous=False)
assert sorted(expected) == sorted(result)
def test_removeExt():
allowedExts = fslimage.ALLOWED_EXTENSIONS
# If len(test) == 2, allowedExts is set from above
# Otherwise, it is set from the test tuple
tests = [
('blah', 'blah'),
('blah.blah', 'blah.blah'),
('blah.blah', 'blah', ['.blah']),
('blah.blah', 'blah.', ['blah']),
('blah.nii', 'blah'),
('blah.nii.gz', 'blah'),
('blah.img', 'blah'),
('blah.hdr', 'blah'),
('blah.img.gz', 'blah'),
('blah.nii.gz', 'blah.nii.gz', []),
('blah.nii.gz', 'blah.nii', ['.gz']),
('blah.nii.gz', 'blah.nii.gz', ['.nii']),
('blah.nii.gz', 'blah', ['.nii.gz']),
('blah.nii.gz', 'blah.', ['nii.gz']),
]
for test in tests:
path = test[0]
output = test[1]
if len(test) == 2: allowed = allowedExts
else: allowed = test[2]
assert fslpath.removeExt(path, allowed) == output
def test_getExt():
allowedExts = fslimage.ALLOWED_EXTENSIONS
# len(test) == 2 -> allowedExts set from above
# Otherwise, allowedExts set from test tuple
tests = [
('blah.blah', '.blah', None),
('blah.blah', '.blah', ['.blah']),
('blah.blah', 'blah', ['blah']),
('blah', '', None),
('blah.nii', '.nii', None),
('blah.nii.gz', '.gz', None),
('blah.nii', '.nii'),
('blah.nii.gz', '.nii.gz'),
('blah.hdr', '.hdr'),
('blah.img', '.img'),
('blah.img.gz', '.img.gz'),
('blah', ''),
('blah.blah', ''),
('blah.blah', '', ['bla']),
('blah.nii.gz', '', ['.nii']),
]
for test in tests:
filename = test[0]
output = test[1]
if len(test) == 2: allowed = allowedExts
else: allowed = test[2]
print(filename, '==', output)
assert fslpath.getExt(filename, allowed) == output
def test_splitExt():
allowedExts = fslimage.ALLOWED_EXTENSIONS
# len(test) == 2 -> allowedExts set from above
# Otherwise, allowedExts set from test tuple
tests = [
('blah', ('blah', '')),
('blah.blah', ('blah.blah', '')),
('blah.blah', ('blah', '.blah'), ['.blah']),
('blah.blah', ('blah.', 'blah'), ['blah']),
('blah.nii', ('blah', '.nii')),
('blah.nii.gz', ('blah', '.nii.gz')),
('blah.img', ('blah', '.img')),
('blah.hdr', ('blah', '.hdr')),
('blah.img.gz', ('blah', '.img.gz')),
('blah.nii.gz', ('blah.nii.gz', ''), []),
('blah.nii.gz', ('blah.nii', '.gz'), ['.gz']),
('blah.nii.gz', ('blah.nii.gz', ''), ['.nii']),
('blah.nii.gz', ('blah', '.nii.gz'), ['.nii.gz']),
('blah.nii.gz', ('blah.', 'nii.gz'), ['nii.gz']),
('blah.blah', ('blah', '.blah'), None),
('blah.blah', ('blah', '.blah'), ['.blah']),
('blah.blah', ('blah.', 'blah'), ['blah']),
('blah', ('blah', ''), None),
('blah.nii', ('blah', '.nii'), None),
('blah.nii.gz', ('blah.nii', '.gz'), None),
('blah.nii', ('blah', '.nii')),
('blah.nii.gz', ('blah', '.nii.gz')),
('blah.hdr', ('blah', '.hdr')),
('blah.img', ('blah', '.img')),
('blah.img.gz', ('blah', '.img.gz')),
('blah', ('blah', '')),
('blah.blah', ('blah.blah', '')),
('blah.blah', ('blah.blah', ''), ['bla']),
('blah.nii.gz', ('blah.nii.gz', ''), ['.nii']),
]
for test in tests:
filename = test[0]
outbase, outext = test[1]
if len(test) == 2: allowed = allowedExts
else: allowed = test[2]
print(filename, '==', (outbase, outext))
assert fslpath.splitExt(filename, allowed) == (outbase, outext)
# firstDot=True
tests = [
('blah', ('blah', '')),
('blah.blah', ('blah', '.blah')),
('blah.one.two', ('blah', '.one.two')),
('blah.one.two.three', ('blah', '.one.two.three')),
]
for f, exp in tests:
assert fslpath.splitExt(f, firstDot=True) == exp
def test_getFileGroup_imageFiles_shouldPass():
allowedExts = fslimage.ALLOWED_EXTENSIONS
groups = fslimage.FILE_GROUPS
# [(files_to_create, path, files_to_expect [, unambiguous]),
# ...
# ]
#
tests = [
('file.hdr file.img', 'file', 'file.hdr file.img'),
('file.hdr file.img', 'file.img', 'file.hdr file.img'),
('file.hdr file.img', 'file.hdr', 'file.hdr file.img'),
('file.hdr.gz file.img.gz', 'file', 'file.hdr.gz file.img.gz'),
('file.hdr.gz file.img.gz', 'file.img.gz', 'file.hdr.gz file.img.gz'),
('file.hdr.gz file.img.gz', 'file.hdr.gz', 'file.hdr.gz file.img.gz'),
('file.hdr file.img file.hdr.gz file.img.gz', 'file.hdr', 'file.hdr file.img'),
('file.hdr file.img file.hdr.gz file.img.gz', 'file.img', 'file.hdr file.img'),
('file.hdr file.img file.hdr.gz file.img.gz', 'file.hdr.gz', 'file.hdr.gz file.img.gz'),
('file.hdr file.img file.hdr.gz file.img.gz', 'file.hdr.gz', 'file.hdr.gz file.img.gz'),
('file.hdr file.img file.nii', 'file.img', 'file.hdr file.img'),
('file.hdr file.img file.nii', 'file.hdr', 'file.hdr file.img'),
('file.hdr file.img file.blob', 'file', 'file.hdr file.img'),
('file.hdr file.img file.blob', 'file.hdr', 'file.hdr file.img'),
('file.hdr file.img file.blob', 'file.img', 'file.hdr file.img'),
('file.nii', 'file', 'file.nii'),
('file.nii', 'file.nii', 'file.nii'),
('file.nii file.hdr file.img', 'file.nii', 'file.nii'),
('file.nii file.blob', 'file', 'file.nii'),
('file.nii file.blob', 'file.nii', 'file.nii'),
# The unambiguous arg defaults to
# False, so paths to incomplete
# file groups should still work.
('file.hdr', 'file', 'file.hdr'),
('file.hdr', 'file.hdr', 'file.hdr'),
# Unambigiuous paths, when
# unambiguous = True,
# should be ok.
('file.hdr file.img file.nii', 'file.nii', 'file.nii', True),
('file.hdr file.img file.nii', 'file.hdr', 'file.hdr file.img', True),
('file.hdr file.img file.nii', 'file.img', 'file.hdr file.img', True),
]
# TODO You need to add passing tests for unambiguous=True
workdir = tempfile.mkdtemp()
try:
for test in tests:
files_to_create = test[0]
path = test[1]
files_to_expect = test[2]
if len(test) == 4: unambiguous = test[3]
else: unambiguous = False
files_to_create = files_to_create.split()
files_to_expect = files_to_expect.split()
for fn in files_to_create:
with open(op.join(workdir, fn), 'wt') as f:
f.write('{}\n'.format(fn))
print()
print('files_to_create: ', files_to_create)
print('path: ', path)
print('files_to_expect: ', files_to_expect)
fullPaths = fslpath.getFileGroup(
op.join(workdir, path),
allowedExts=allowedExts,
fileGroups=groups,
fullPaths=True,
unambiguous=unambiguous)
exts = fslpath.getFileGroup(
op.join(workdir, path),
allowedExts=allowedExts,
fileGroups=groups,
fullPaths=False,
unambiguous=unambiguous)
assert sorted(fullPaths) == sorted([op.join(workdir, e) for e in files_to_expect])
assert sorted(exts) == sorted([fslpath.getExt(e, allowedExts) for e in files_to_expect])
cleardir(workdir)
finally:
shutil.rmtree(workdir)
def test_getFileGroup_otherFiles_shouldPass():
# (files_to_create, allowedExts, fileGroups, path, files_to_expect [, unambiguous])
tests = [
# allowedExts is None - incomplete paths are not allowed
('file.a', '', '', 'file.a', 'file.a'),
('file.a file.b', '', '', 'file.a', 'file.a'),
('file.a file.b', '', '', 'file.b', 'file.b'),
('file.a file.b', '', ['.a .b'], 'file.a', 'file.a file.b'),
('file.a file.b', '', ['.a .b'], 'file.b', 'file.a file.b'),
('file.a file.b file.c', '', ['.a .b .c'], 'file.a', 'file.a file.b file.c'),
('file.a file.b file.c', '', ['.a .b .c'], 'file.b', 'file.a file.b file.c'),
('file.a file.b file.c', '', ['.a .b .c'], 'file.c', 'file.a file.b file.c'),
('file.a file.b file.c file.d', '', ['.a .b', '.c .d'], 'file.a', 'file.a file.b'),
('file.a file.b file.c file.d', '', ['.a .b', '.c .d'], 'file.b', 'file.a file.b'),
('file.a file.b file.c file.d', '', ['.a .b', '.c .d'], 'file.c', 'file.c file.d'),
('file.a file.b file.c file.d', '', ['.a .b', '.c .d'], 'file.d', 'file.c file.d'),
# allowedExts != None - incomplete paths
# allowed, but must be unambiguous
('file.a', '.a', '', 'file', 'file.a'),
('file.a', '.a', '', 'file.a', 'file.a'),
('file.a file.b', '.a .b', '', 'file.a', 'file.a'),
('file.a file.b', '.a .b', '', 'file.b', 'file.b'),
('file1.a file2.b', '.a .b', '', 'file1', 'file1.a'),
('file1.a file2.b', '.a .b', '', 'file1.a', 'file1.a'),
('file1.a file2.b', '.a .b', '', 'file2', 'file2.b'),
('file1.a file2.b', '.a .b', '', 'file2.b', 'file2.b'),
('file.a file.b', '.a .b', ['.a .b'], 'file', 'file.a file.b'),
('file.a file.b', '.a .b', ['.a .b'], 'file.a', 'file.a file.b'),
('file.a file.b', '.a .b', ['.a .b'], 'file.b', 'file.a file.b'),
('file.a file.b file.c', '.a .b .c', ['.a .b .c'], 'file', 'file.a file.b file.c'),
('file.a file.b file.c', '.a .b .c', ['.a .b .c'], 'file.a', 'file.a file.b file.c'),
('file.a file.b file.c', '.a .b .c', ['.a .b .c'], 'file.b', 'file.a file.b file.c'),
('file.a file.b file.c', '.a .b .c', ['.a .b .c'], 'file.c', 'file.a file.b file.c'),
('file.a file.b file.c file.d', '.a .b .c .d', ['.a .b', '.c .d'], 'file.a', 'file.a file.b'),
('file.a file.b file.c file.d', '.a .b .c .d', ['.a .b', '.c .d'], 'file.b', 'file.a file.b'),
('file.a file.b file.c file.d', '.a .b .c .d', ['.a .b', '.c .d'], 'file.c', 'file.c file.d'),
('file.a file.b file.c file.d', '.a .b .c .d', ['.a .b', '.c .d'], 'file.d', 'file.c file.d'),
('file1.a file1.b file2.c file2.d', '.a .b .c .d', ['.a .b', '.c .d'], 'file1', 'file1.a file1.b'),
('file1.a file1.b file2.c file2.d', '.a .b .c .d', ['.a .b', '.c .d'], 'file1.a', 'file1.a file1.b'),
('file1.a file1.b file2.c file2.d', '.a .b .c .d', ['.a .b', '.c .d'], 'file1.b', 'file1.a file1.b'),
('file1.a file1.b file2.c file2.d', '.a .b .c .d', ['.a .b', '.c .d'], 'file2', 'file2.c file2.d'),
('file1.a file1.b file2.c file2.d', '.a .b .c .d', ['.a .b', '.c .d'], 'file2.c', 'file2.c file2.d'),
('file1.a file1.b file2.c file2.d', '.a .b .c .d', ['.a .b', '.c .d'], 'file2.d', 'file2.c file2.d'),
# incomplete group should be ok when
# unambiguous = False (the default)
('file.a', '.a .b', ['.a .b'], 'file', 'file.a'),
('file.a', '.a .b', ['.a .b'], 'file.a', 'file.a'),
# Unambiguous/complete group should
# be ok when unambiguous = True
('file.a file.b file.c', '.a .b .c', ['.a .b'], 'file.a', 'file.a file.b', True),
('file.a file.b file.c', '.a .b .c', ['.a .b'], 'file.b', 'file.a file.b', True),
('file.a file.b file.c', '.a .b .c', ['.a .b'], 'file.c', 'file.c', True),
]
workdir = tempfile.mkdtemp()
try:
for test in tests:
files_to_create = test[0]
allowedExts = test[1]
fileGroups = test[2]
path = test[3]
files_to_expect = test[4]
if len(test) == 6: unambiguous = test[5]
else: unambiguous = False
files_to_create = files_to_create.split()
allowedExts = allowedExts.split()
fileGroups = [g.split() for g in fileGroups]
files_to_expect = files_to_expect.split()
if len(allowedExts) == 0: allowedExts = None
if len(fileGroups) == 0: fileGroups = None
for fn in files_to_create:
with open(op.join(workdir, fn), 'wt') as f:
f.write('{}\n'.format(fn))
print()
print('files_to_create: ', files_to_create)
print('path: ', path)
print('allowedExts: ', allowedExts)
print('fileGroups: ', fileGroups)
print('files_to_expect: ', files_to_expect)
fullPaths = fslpath.getFileGroup(
op.join(workdir, path),
allowedExts=allowedExts,
fileGroups=fileGroups,
fullPaths=True,
unambiguous=unambiguous)
exts = fslpath.getFileGroup(
op.join(workdir, path),
allowedExts=allowedExts,
fileGroups=fileGroups,
fullPaths=False,
unambiguous=unambiguous)
assert sorted(fullPaths) == sorted([op.join(workdir, e) for e in files_to_expect])
assert sorted(exts) == sorted([fslpath.getExt(e, allowedExts) for e in files_to_expect])
cleardir(workdir)
finally:
shutil.rmtree(workdir)
def test_getFileGroup_shouldFail():
# All of these tests should raise an error
allowedExts = ' '.join(fslimage.ALLOWED_EXTENSIONS)
fileGroups = [' '.join(g) for g in fslimage.FILE_GROUPS]
# (files_to_create, path, allowedExts, fileGroups[, unambiguous])
tests = [
# Unsupported extension
('file.a', 'file.a', '.b', []),
# Incomplete path, and allowedExts is None
('file.a', 'file', '', []),
# non existent path
('file.a', 'file.b', '.a', []),
# ambigiuous
('file.a file.b file.c file.d', 'file', '.a .b .c .d', ['.a .b', '.c .d']),
# Incomplete group, when unambiguous is set to True
('file.a', 'file', '.a .b', ['.a .b'], True),
('file.a', 'file.a', '.a .b', ['.a .b'], True),
('file.hdr', 'file', allowedExts, fileGroups, True),
('file.hdr', 'file.hdr', allowedExts, fileGroups, True),
('file.img', 'file', allowedExts, fileGroups, True),
('file.img', 'file.img', allowedExts, fileGroups, True),
# Part of more than one group, when unambiguous is True
('file.a file.b file.c', 'file.a', '.a .b', ['.a .b', '.a .c'], True),
]
workdir = tempfile.mkdtemp()
try:
for test in tests:
files_to_create = test[0]
path = test[1]
allowedExts = test[2]
fileGroups = test[3]
if len(test) > 4: unambiguous = test[4]
else: unambiguous = False
files_to_create = files_to_create.split()
allowedExts = allowedExts.split()
fileGroups = [g.split() for g in fileGroups]
if len(allowedExts) == 0: allowedExts = None
if len(fileGroups) == 0: fileGroups = None
for fn in files_to_create:
with open(op.join(workdir, fn), 'wt') as f:
f.write('{}\n'.format(fn))
print()
print('files_to_create: ', files_to_create)
print('path: ', path)
print('allowedExts: ', allowedExts)
print('fileGroups: ', fileGroups)
with pytest.raises(fslpath.PathError):
fullPaths = fslpath.getFileGroup(
op.join(workdir, path),
allowedExts=allowedExts,
fileGroups=fileGroups,
fullPaths=True,
unambiguous=unambiguous)
print('fullPaths: ', fullPaths)
with pytest.raises(fslpath.PathError):
exts = fslpath.getFileGroup(
op.join(workdir, path),
allowedExts=allowedExts,
fileGroups=fileGroups,
fullPaths=False,
unambiguous=unambiguous)
print('exts: ', exts)
cleardir(workdir)
finally:
shutil.rmtree(workdir)
def test_removeDuplicates_imageFiles_shouldPass():
allowedExts = fslimage.ALLOWED_EXTENSIONS
groups = fslimage.FILE_GROUPS
# [(files_to_create,
# [(paths, expected),
# ...
# ]),
# ...
# ]
allTests = [
('file.hdr file.img', [
('file', 'file.hdr'),
('file file', 'file.hdr'),
('file file.hdr', 'file.hdr'),
('file file.img', 'file.hdr'),
('file.hdr file', 'file.hdr'),
('file.hdr file.hdr', 'file.hdr'),
('file.hdr file.img', 'file.hdr'),
('file.img file', 'file.hdr'),
('file.img file.hdr', 'file.hdr'),
('file.img file.img', 'file.hdr'),
('file.hdr', 'file.hdr'),
('file.img', 'file.hdr'),
('file.hdr file.img', 'file.hdr'),
('file.img file.hdr', 'file.hdr'),
]),
('file.hdr file.img file.blob', [
('file', 'file.hdr'),
('file.hdr', 'file.hdr'),
('file.img', 'file.hdr'),
('file.hdr file.img', 'file.hdr'),
('file.img file.hdr', 'file.hdr'),
]),
('file.hdr file.img file.nii', [
('file.hdr', 'file.hdr'),
('file.img', 'file.hdr'),
('file.hdr file.nii', 'file.hdr file.nii'),
('file.img file.nii', 'file.hdr file.nii'),
('file.hdr file.img', 'file.hdr'),
('file.img file.hdr', 'file.hdr'),
('file.img file.hdr', 'file.hdr'),
('file.hdr file.img file.nii', 'file.hdr file.nii'),
('file.img file.hdr file.nii', 'file.hdr file.nii'),
('file.img file.hdr file.nii', 'file.hdr file.nii'),
]),
('001.hdr 001.img 002.hdr 002.img 003.hdr 003.img', [
('001 002 003', '001.hdr 002.hdr 003.hdr'),
('001.hdr 002.hdr 003.hdr', '001.hdr 002.hdr 003.hdr'),
('001.img 002.img 003.img', '001.hdr 002.hdr 003.hdr'),
('001.hdr 001.img 002.hdr 002.img 003.img', '001.hdr 002.hdr 003.hdr'),
('001.hdr 001.img 002.hdr 002.img 003.hdr 003.img', '001.hdr 002.hdr 003.hdr'),
('001.img 001.hdr 002.img 002.hdr 003.img 003.hdr', '001.hdr 002.hdr 003.hdr'),
])
]
workdir = tempfile.mkdtemp()
try:
for files_to_create, tests in allTests:
files_to_create = files_to_create.split()
for fn in files_to_create:
with open(op.join(workdir, fn), 'wt') as f:
f.write('{}\n'.format(fn))
for paths, expected in tests:
paths = paths.split()
expected = expected.split()
print()
print('files_to_create: ', files_to_create)
print('paths: ', paths)
print('expected: ', expected)
paths = [op.join(workdir, p) for p in paths]
result = fslpath.removeDuplicates(paths, allowedExts, groups)
print('result: ', result)
assert result == [op.join(workdir, e) for e in expected]
cleardir(workdir)
finally:
shutil.rmtree(workdir)
def test_removeDuplicates_otherFiles_shouldPass():
# files_to_create, paths, allowedExts, fileGroups, expected
tests = [
# allowedExts is None, but paths are unambiguouos
('file.a file.b', 'file.a file.b', '', [], 'file.a file.b'),
# Retured path should be the first in the group
('file.a file.b', 'file.a file.b', '', ['.a .b'], 'file.a'),
('file.a file.b', 'file.a file.b', '', ['.b .a'], 'file.b'),
('file.a file.b file.c', 'file.a file.b file.c', '', ['.a .b'], 'file.a file.c'),
('file.a file.b file.c', 'file.a file.b file.c', '', ['.b .a'], 'file.b file.c'),
('file.a file.b file.c', 'file.a file.b file.c', '', ['.a .b .c'], 'file.a'),
('file.a file.b file.c', 'file.a file.b file.c', '', ['.a .b .c'], 'file.a'),
('file.a file.b file.c', 'file.a file.b file.c', '', ['.a .b .c'], 'file.a'),
('file.a file.b file.c file.d', 'file.a file.b file.c file.d', '', ['.a .b', '.c .d'], 'file.a file.c'),
('file1.a file1.b file2.a file2.b', 'file1.a file1.b file2.a file2.b', '', ['.a .b'], 'file1.a file2.a'),
# Incomplete paths (but are unambiguouos because of allowedExts)
('file.a' , 'file', '.a', [], 'file.a'),
('file.a' , 'file.a', '.a', [], 'file.a'),
('file.a file.b', 'file.a', '.a', [], 'file.a'),
('file.a file.b', 'file.a file.b', '.a .b', [], 'file.a file.b'),
('file.a file.b', 'file', '.a .b', ['.a .b'], 'file.a'),
('file.a file.b', 'file file', '.a .b', ['.a .b'], 'file.a'),
('file.a file.b', 'file file.a file.b', '.a .b', ['.a .b'], 'file.a'),
('file1.a file1.b file2.a file2.b', 'file1 file1.a file2 file2.a', '.a .b', ['.a .b'], 'file1.a file2.a'),
('file1.a file1.b file2.a file2.b', 'file1 file2', '.a .b', ['.a .b'], 'file1.a file2.a'),
('file1.a file1.b file2.a file2.b', 'file1 file1.a file2', '.a .b', ['.a .b'], 'file1.a file2.a'),
# no file groups - should still work
('file1.a file1.b file2.a file2.b', 'file1.a', '', [], 'file1.a'),
('file1.a file1.b file2.a file2.b', 'file1.a file1.b', '', [], 'file1.a file1.b'),
('file1.a file1.b file2.a file2.b', 'file1.a file2.a', '', [], 'file1.a file2.a'),
]
workdir = tempfile.mkdtemp()
try:
for files_to_create, paths, allowedExts, fileGroups, expected in tests:
files_to_create = files_to_create.split()
paths = paths.split()
allowedExts = allowedExts.split()
fileGroups = [g.split() for g in fileGroups]
expected = expected.split()
if len(allowedExts) == 0: allowedExts = None
if len(fileGroups) == 0: fileGroups = None
for f in files_to_create:
make_dummy_file(op.join(workdir, f))
print('files_to_create: {}'.format(files_to_create))
print('paths: {}'.format(paths))
print('allowedExts: {}'.format(allowedExts))
print('fileGroups: {}'.format(fileGroups))
print('workdir: {}'.format(os.listdir(workdir)))
print('expected: {}'.format(expected))
result = fslpath.removeDuplicates([op.join(workdir, p) for p in paths],
allowedExts=allowedExts,
fileGroups=fileGroups)
print('result: {}'.format(result))
assert result == [op.join(workdir, e) for e in expected]
cleardir(workdir)
finally:
shutil.rmtree(workdir)
def test_removeDuplicates_shouldFail():
# (files_to_create, paths, allowedExts, fileGroups)
tests = [
# Invalid path(s)
('', 'file.a', '', []),
('file.a', 'file.b', '', []),
('file.a', 'file.b file.c', '', []),
('file.a', 'file', '', []),
('file.a', 'file.b', '.a .b', ['.a .b']),
# Unsupported extension
('file.a', 'file.a', '.b', []),
# Ambiguous
('file.a file.b', 'file', '.a .b', []),
('file.a file.b file.c', 'file file.c', '.a .b .c', ['.a .b']),
]
workdir = tempfile.mkdtemp()
try:
for files_to_create, path, allowedExts, fileGroups in tests:
cleardir(workdir)
files_to_create = files_to_create.split()
allowedExts = allowedExts.split()
fileGroups = [g.split() for g in fileGroups]
if len(allowedExts) == 0: allowedExts = None
if len(fileGroups) == 0: fileGroups = None
for fn in files_to_create:
with open(op.join(workdir, fn), 'wt') as f:
f.write('{}\n'.format(fn))
print()
print('files_to_create: ', files_to_create)
print('path: ', path)
print('allowedExts: ', allowedExts)
print('fileGroups: ', fileGroups)
with pytest.raises(fslpath.PathError):
result = fslpath.removeDuplicates(path,
allowedExts=allowedExts,
fileGroups=fileGroups)
print('result: ', result)
finally:
shutil.rmtree(workdir)
def test_uniquePrefix():
contents = """
100307.32k_fs_LR.wb.spec
100307.ArealDistortion_FS.32k_fs_LR.dscalar.nii
100307.ArealDistortion_MSMSulc.32k_fs_LR.dscalar.nii
100307.BA.32k_fs_LR.dlabel.nii
100307.L.ArealDistortion_FS.32k_fs_LR.shape.gii
100307.L.ArealDistortion_MSMSulc.32k_fs_LR.shape.gii
100307.L.BA.32k_fs_LR.label.gii
100307.L.MyelinMap.32k_fs_LR.func.gii
100307.L.MyelinMap_BC.32k_fs_LR.func.gii
100307.L.SmoothedMyelinMap.32k_fs_LR.func.gii
100307.L.SmoothedMyelinMap_BC.32k_fs_LR.func.gii
100307.L.aparc.32k_fs_LR.label.gii
100307.L.aparc.a2009s.32k_fs_LR.label.gii
100307.L.atlasroi.32k_fs_LR.shape.gii
100307.L.corrThickness.32k_fs_LR.shape.gii
100307.L.curvature.32k_fs_LR.shape.gii
100307.L.flat.32k_fs_LR.surf.gii
100307.L.inflated.32k_fs_LR.surf.gii
100307.L.midthickness.32k_fs_LR.surf.gii
100307.L.pial.32k_fs_LR.surf.gii
100307.L.sphere.32k_fs_LR.surf.gii
100307.L.sulc.32k_fs_LR.shape.gii
100307.L.thickness.32k_fs_LR.shape.gii
100307.L.very_inflated.32k_fs_LR.surf.gii
100307.L.white.32k_fs_LR.surf.gii
100307.MyelinMap.32k_fs_LR.dscalar.nii
100307.MyelinMap_BC.32k_fs_LR.dscalar.nii
100307.R.ArealDistortion_FS.32k_fs_LR.shape.gii
100307.R.ArealDistortion_MSMSulc.32k_fs_LR.shape.gii
100307.R.BA.32k_fs_LR.label.gii
100307.R.MyelinMap.32k_fs_LR.func.gii
100307.R.MyelinMap_BC.32k_fs_LR.func.gii
100307.R.SmoothedMyelinMap.32k_fs_LR.func.gii
100307.R.SmoothedMyelinMap_BC.32k_fs_LR.func.gii
100307.R.aparc.32k_fs_LR.label.gii
100307.R.aparc.a2009s.32k_fs_LR.label.gii
100307.R.atlasroi.32k_fs_LR.shape.gii
100307.R.corrThickness.32k_fs_LR.shape.gii
100307.R.curvature.32k_fs_LR.shape.gii
100307.R.flat.32k_fs_LR.surf.gii
100307.R.inflated.32k_fs_LR.surf.gii
100307.R.midthickness.32k_fs_LR.surf.gii
100307.R.pial.32k_fs_LR.surf.gii
100307.R.sphere.32k_fs_LR.surf.gii
100307.R.sulc.32k_fs_LR.shape.gii
100307.R.thickness.32k_fs_LR.shape.gii
100307.R.very_inflated.32k_fs_LR.surf.gii
100307.R.white.32k_fs_LR.surf.gii
100307.SmoothedMyelinMap.32k_fs_LR.dscalar.nii
100307.SmoothedMyelinMap_BC.32k_fs_LR.dscalar.nii
100307.aparc.32k_fs_LR.dlabel.nii
100307.aparc.a2009s.32k_fs_LR.dlabel.nii
100307.corrThickness.32k_fs_LR.dscalar.nii
100307.curvature.32k_fs_LR.dscalar.nii
100307.sulc.32k_fs_LR.dscalar.nii
100307.thickness.32k_fs_LR.dscalar.nii
""".split()
# (filename, expected_result)
tests = [
('100307.32k_fs_LR.wb.spec', '100307.3'),
('100307.ArealDistortion_FS.32k_fs_LR.dscalar.nii', '100307.ArealDistortion_F'),
('100307.L.ArealDistortion_FS.32k_fs_LR.shape.gii', '100307.L.ArealDistortion_F'),
('100307.L.flat.32k_fs_LR.surf.gii', '100307.L.f'),
('100307.R.flat.32k_fs_LR.surf.gii', '100307.R.f'),
('100307.MyelinMap.32k_fs_LR.dscalar.nii', '100307.MyelinMap.'),
('100307.SmoothedMyelinMap.32k_fs_LR.dscalar.nii', '100307.SmoothedMyelinMap.'),
('100307.sulc.32k_fs_LR.dscalar.nii', '100307.s'),
]
workdir = tempfile.mkdtemp()
try:
for fname in contents:
with open(op.join(workdir, fname), 'wt') as f:
f.write(fname)
for filename, expected in tests:
expected = op.join(workdir, expected)
result = fslpath.uniquePrefix(op.join(workdir, filename))
assert result == expected
# test that an error is raised on an invalid path
with pytest.raises(fslpath.PathError):
fslpath.uniquePrefix(op.join(workdir, 'not-a-valid-path'))
with pytest.raises(fslpath.PathError):
fslpath.uniquePrefix(op.join(workdir, '100307'))
finally:
shutil.rmtree(workdir)
def test_commonBase():
tests = [
('/',
['/a/b/c',
'/d/e',
'/f/g/h/i']),
('/a',
['/a/b/c',
'/a/d/e/f/g',
'/a/d/h/g/h/i']),
('a',
['a/b/c/d',
'a/e/f/g/h',
'a/i/j/k/'])
]
for exp, paths in tests:
assert fslpath.commonBase(paths) == exp
failtests = [
['a/b/c', 'd/e/f'],
['/a/b/c', 'd/e/f'],
['a', 'b/c/d']]
for ft in failtests:
with pytest.raises(fslpath.PathError):
fslpath.commonBase(ft)
def test_wslpath():
assert fslpath.wslpath('c:\\Users\\Fishcake\\image.nii.gz') == '/mnt/c/Users/Fishcake/image.nii.gz'
assert fslpath.wslpath('--input=x:\\transfers\\scratch\\image_2.nii') == '--input=/mnt/x/transfers/scratch/image_2.nii'
assert fslpath.wslpath('\\\\wsl$\\centos 7\\users\\fsl\\file.nii') == '/users/fsl/file.nii'
assert fslpath.wslpath('--file=\\\\wsl$\\centos 7\\home\\fsl\\img.nii.gz') == '--file=/home/fsl/img.nii.gz'
assert fslpath.wslpath('\\\\wsl$/centos 7/users\\fsl\\file.nii') == '/users/fsl/file.nii'
def test_winpath():
"""
See comment for ``test_fslwsl`` for why we are overwriting FSLDIR
"""
with mock.patch.dict('os.environ', **{ 'FSLDIR' : '\\\\wsl$\\my cool linux distro v2.0\\usr\\local\\fsl'}):
assert fslpath.winpath("/home/fsl/myfile.dat") == '\\\\wsl$\\my cool linux distro v2.0\\home\\fsl\\myfile.dat'
with mock.patch.dict('os.environ', **{ 'FSLDIR' : '/opt/fsl'}):
assert fslpath.winpath("/home/fsl/myfile.dat") == '/home/fsl/myfile.dat'
| [] | [] | [] | [] | [] | python | 0 | 0 | |
dynamodb/dynamodb.go | package dynamodb
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/dynamodbstreams"
"github.com/guregu/dynamo"
"github.com/mumoshu/crdb/api"
"github.com/mumoshu/crdb/dynamodb/awssession"
"github.com/mumoshu/crdb/dynamodb/stream"
"github.com/mumoshu/crdb/framework"
"os"
"time"
)
const HashKeyName = "name_hash_key"
type SingleResourceDB interface {
Get(resource, name string, selectors []string, output string, watch bool) (api.Resources, error)
Wait(resource, name, query, output string, timeout time.Duration, logs bool) error
Apply(file string) error
Delete(resource, name string) error
}
type dynamoResourceDB struct {
databaseName string
db *dynamo.DB
logs *cwlogs
session *session.Session
namespace string
resourceDefs []api.CustomResourceDefinition
}
func (p *dynamoResourceDB) tablePrefix() string {
return fmt.Sprintf("%s%s", databasePrefix, p.databaseName)
}
func (p *dynamoResourceDB) tableNameForResourceNamed(resource string) string {
if resource == crdName {
return p.globalTableName(resource)
}
return p.namespacedTableName(resource)
}
func (p *dynamoResourceDB) namespacedTableName(resource string) string {
return fmt.Sprintf("%s-%s-%s", p.tablePrefix(), p.namespace, resource)
}
func (p *dynamoResourceDB) tableForResourceNamed(resourceName string) dynamo.Table {
return p.db.Table(p.tableNameForResourceNamed(resourceName))
}
func (p *dynamoResourceDB) namespacedTable(resource *api.CustomResourceDefinition) dynamo.Table {
return p.tableForResourceNamed(resource.Metadata.Name)
}
func (p *dynamoResourceDB) globalTableName(resource string) string {
return globalTableName(p.databaseName, resource)
}
func globalTableName(database, resource string) string {
return fmt.Sprintf("%s%s-%s", databasePrefix, database, resource)
}
func partitionKey(name string) string {
// We split tables by namespaces and resources rather than partitioning,
// so that the cost of listing all the resources within the ns is lowest,, and the isolation level is maximum.
// Also, we aren't write-heavy so not adding random suffixes.
// See https://aws.amazon.com/jp/blogs/database/choosing-the-right-dynamodb-partition-key/
return name
}
func newDefaultDynamoDBClient() (*dynamo.DB, error) {
sess, err := awssession.New(os.Getenv("AWSDEBUG") != "")
if err != nil {
return nil, err
}
return dynamo.New(sess), nil
}
func newDynamoDBClient() (*dynamo.DB, error) {
sess, err := awssession.New(os.Getenv("AWSDEBUG") != "")
if err != nil {
return nil, err
}
return dynamo.New(sess), nil
}
func (p *dynamoResourceDB) streamSubscriberForTable(table string) (*stream.StreamSubscriber, error) {
cfg := aws.NewConfig()
streamSvc := dynamodbstreams.New(p.session, cfg)
dynamoSvc := dynamodb.New(p.session, cfg)
return stream.NewStreamSubscriber(dynamoSvc, streamSvc, table), nil
}
func (p *dynamoResourceDB) streamForTable(table string) (<-chan *dynamodbstreams.Record, <-chan error, error) {
subscriber, err := p.streamSubscriberForTable(table)
if err != nil {
return nil, nil, err
}
ch, errch := subscriber.GetStreamDataAsync()
return ch, errch, nil
}
func (p *dynamoResourceDB) streamForResourceNamed(resourceName string) (<-chan *dynamodbstreams.Record, <-chan error, error) {
return p.streamForTable(p.tableNameForResourceNamed(resourceName))
}
func NewDB(configFile string, namespace string) (SingleResourceDB, error) {
config, err := framework.LoadConfigFromYamlFile(configFile)
if err != nil {
return nil, err
}
sess, err := awssession.New(os.Getenv("AWSDEBUG") != "")
if err != nil {
return nil, err
}
db := dynamo.New(sess)
logs, err := newLogs(config, namespace, sess)
if err != nil {
return nil, err
}
//fmt.Fprintf(os.Stderr, "%+v\n", config)
return &dynamoResourceDB{
databaseName: config.Metadata.Name,
db: db,
logs: logs,
session: sess,
namespace: namespace,
resourceDefs: config.Spec.CustomResourceDefinitions,
}, nil
}
| [
"\"AWSDEBUG\"",
"\"AWSDEBUG\"",
"\"AWSDEBUG\""
] | [] | [
"AWSDEBUG"
] | [] | ["AWSDEBUG"] | go | 1 | 0 | |
bootstrap.py | #!/usr/bin/env python3
# coding: utf8
"""
Bootstrap helps you to test scripts without installing them
by patching your PYTHONPATH on the fly
example: ./bootstrap.py ipython
"""
__authors__ = ["Frรฉdรฉric-Emmanuel Picca", "Jรฉrรดme Kieffer"]
__contact__ = "jerome.kieffer@esrf.eu"
__license__ = "MIT"
__date__ = "30/09/2020"
import argparse
import distutils.util
import logging
import os
import subprocess
import sys
import tempfile
logging.basicConfig()
logger = logging.getLogger("bootstrap")
def is_debug_python():
"""Returns true if the Python interpreter is in debug mode."""
try:
import sysconfig
except ImportError: # pragma nocover
# Python < 2.7
import distutils.sysconfig as sysconfig
if sysconfig.get_config_var("Py_DEBUG"):
return True
return hasattr(sys, "gettotalrefcount")
def _distutils_dir_name(dname="lib"):
"""
Returns the name of a distutils build directory
"""
platform = distutils.util.get_platform()
architecture = "%s.%s-%i.%i" % (dname, platform,
sys.version_info[0], sys.version_info[1])
if is_debug_python():
architecture += "-pydebug"
return architecture
def _distutils_scripts_name():
"""Return the name of the distrutils scripts sirectory"""
f = "scripts-{version[0]}.{version[1]}"
return f.format(version=sys.version_info)
def _get_available_scripts(path):
res = []
try:
res = " ".join([s.rstrip('.py') for s in os.listdir(path)])
except OSError:
res = ["no script available, did you ran "
"'python setup.py build' before bootstrapping ?"]
return res
if sys.version_info[0] >= 3: # Python3
def execfile(fullpath, globals=None, locals=None):
"Python3 implementation for execfile"
with open(fullpath) as f:
try:
data = f.read()
except UnicodeDecodeError:
raise SyntaxError("Not a Python script")
code = compile(data, fullpath, 'exec')
exec(code, globals, locals)
def run_file(filename, argv):
"""
Execute a script trying first to use execfile, then a subprocess
:param str filename: Script to execute
:param list[str] argv: Arguments passed to the filename
"""
full_args = [filename]
full_args.extend(argv)
try:
logger.info("Execute target using exec")
# execfile is considered as a local call.
# Providing globals() as locals will force to feed the file into
# globals() (for examples imports).
# Without this any function call from the executed file loses imports
try:
old_argv = sys.argv
sys.argv = full_args
logger.info("Patch the sys.argv: %s", sys.argv)
logger.info("Executing %s.main()", filename)
print("########### EXECFILE ###########")
module_globals = globals().copy()
module_globals['__file__'] = filename
execfile(filename, module_globals, module_globals)
finally:
sys.argv = old_argv
except SyntaxError as error:
logger.error(error)
logger.info("Execute target using subprocess")
env = os.environ.copy()
env.update({"PYTHONPATH": LIBPATH + os.pathsep + os.environ.get("PYTHONPATH", ""),
"PATH": os.environ.get("PATH", "")})
print("########### SUBPROCESS ###########")
run = subprocess.Popen(full_args, shell=False, env=env)
run.wait()
def run_entry_point(entry_point, argv):
"""
Execute an entry_point using the current python context
(http://setuptools.readthedocs.io/en/latest/setuptools.html#automatic-script-creation)
:param str entry_point: A string identifying a function from a module
(NAME = PACKAGE.MODULE:FUNCTION [EXTRA])
"""
import importlib
elements = entry_point.split("=")
target_name = elements[0].strip()
elements = elements[1].split(":")
module_name = elements[0].strip()
# Take care of entry_point optional "extra" requirements declaration
function_name = elements[1].split()[0].strip()
logger.info("Execute target %s (function %s from module %s) using importlib", target_name, function_name, module_name)
full_args = [target_name]
full_args.extend(argv)
try:
old_argv = sys.argv
sys.argv = full_args
print("########### IMPORTLIB ###########")
module = importlib.import_module(module_name)
if hasattr(module, function_name):
func = getattr(module, function_name)
func()
else:
logger.info("Function %s not found", function_name)
finally:
sys.argv = old_argv
def find_executable(target):
"""Find a filename from a script name.
- Check the script name as file path,
- Then checks if the name is a target of the setup.py
- Then search the script from the PATH environment variable.
:param str target: Name of the script
:returns: Returns a tuple: kind, name.
"""
if os.path.isfile(target):
return ("path", os.path.abspath(target))
# search the file from setup.py
import setup
config = setup.get_project_configuration(dry_run=True)
# scripts from project configuration
if "scripts" in config:
for script_name in config["scripts"]:
if os.path.basename(script) == target:
return ("path", os.path.abspath(script_name))
# entry-points from project configuration
if "entry_points" in config:
for kind in config["entry_points"]:
for entry_point in config["entry_points"][kind]:
elements = entry_point.split("=")
name = elements[0].strip()
if name == target:
return ("entry_point", entry_point)
# search the file from env PATH
for dirname in os.environ.get("PATH", "").split(os.pathsep):
path = os.path.join(dirname, target)
if os.path.isfile(path):
return ("path", path)
return None, None
def main(argv):
parser = argparse.ArgumentParser(
prog="bootstrap", usage="./bootstrap.py <script>", description=__doc__)
parser.add_argument("script", nargs=argparse.REMAINDER)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"-m", nargs=argparse.REMAINDER, dest='module',
help="run library module as a script (terminates option list)")
group.add_argument(
"-j", "--jupyter", action='store_true',
help="Start jupyter notebook rather than IPython console")
options = parser.parse_args()
if options.jupyter:
if options.script:
logger.error("-j, --jupyter is mutually exclusive with other options")
parser.print_help()
return
logger.info("Start Jupyter notebook")
from notebook.notebookapp import main as notebook_main
os.environ["PYTHONPATH"] = LIBPATH + os.pathsep + os.environ.get("PYTHONPATH", "")
filename = os.path.join(LIBPATH, '.__bootstrap_pythonstartup.py')
with open(filename, 'w') as fp:
fp.write('import sys; sys.path.pop(0)')
os.environ["PYTHONSTARTUP"] = filename
notebook_main(argv=[])
try:
os.remove(filename)
except:
logger.error("Cannot delete temporary file: %s", filename)
elif options.script:
logger.info("Executing %s from source checkout", options.script)
script = options.script[0]
argv = options.script[1:]
kind, target = find_executable(script)
if kind == "path":
run_file(target, argv)
elif kind == "entry_point":
run_entry_point(target, argv)
else:
logger.error("Script %s not found", options.script)
elif options.module:
logging.info("Running module %s", options.module)
import runpy
module = options.module[0]
try:
old = sys.argv
sys.argv = [None] + options.module[1:]
runpy.run_module(module, run_name="__main__", alter_sys=True)
finally:
sys.argv = old
else:
logging.info("Running IPython by default")
logger.info("Patch the sys.argv: %s", sys.argv)
sys.path.insert(2, "")
try:
from IPython import start_ipython
except Exception as err:
logger.error("Unable to execute iPython, using normal Python")
logger.error(err)
import code
code.interact()
else:
start_ipython(argv=[])
if __name__ == "__main__":
home = os.path.dirname(os.path.abspath(__file__))
LIBPATH = os.path.join(home, 'build', _distutils_dir_name('lib'))
cwd = os.getcwd()
os.chdir(home)
build = subprocess.Popen([sys.executable, "setup.py", "build"], shell=False)
build_rc = build.wait()
if not os.path.exists(LIBPATH):
logger.warning("`lib` directory does not exist, trying common Python3 lib")
LIBPATH = os.path.join(os.path.split(LIBPATH)[0], "lib")
os.chdir(cwd)
if build_rc == 0:
logger.info("Build process ended.")
else:
logger.error("Build process ended with rc=%s", build_rc)
sys.exit(-1)
sys.path.insert(0, LIBPATH)
logger.info("Patched sys.path with %s", LIBPATH)
main(sys.argv)
| [] | [] | [
"PYTHONSTARTUP",
"PATH",
"PYTHONPATH"
] | [] | ["PYTHONSTARTUP", "PATH", "PYTHONPATH"] | python | 3 | 0 | |
docker-plugin/main.go | /*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"flag"
"fmt"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"sync"
"github.com/Sirupsen/logrus"
"github.com/docker/go-plugins-helpers/volume"
"github.com/minio/minio-go"
)
// Used for Plugin discovery.
// Docker identifies the existence of an active plugin process by searching for
// a unit socket file (.sock) in /run/docker/plugins/.
// A unix server is started at the `socketAdress` to enable discovery of this plugin by docker.
const (
socketAddress = "/run/docker/plugins/minfs.sock"
defaultLocation = "us-east-1"
)
// `serverconfig` struct is used to store configuration values of the remote Minio server.
// Minfs uses this info to the mount the remote bucket.
// The server info (endpoint, accessKey and secret Key) is passed during creating a docker volume.
// Here is how to do it,
// $ docker volume create -d minfs \
// --name medical-imaging-store \
// -o endpoint=https://play.minio.io:9000 -o access-key=Q3AM3UQ867SPQQA43P2F\
// -o secret-key=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG -o bucket=test-bucket
type serverConfig struct {
// Endpoint of the remote Minio server.
endpoint string
// `minfs` mounts the remote bucket to a the local `mountpoint`.
bucket string
// accessKey of the remote minio server.
accessKey string
// secretKey of the remote Minio server.
secretKey string
// Additional opts like custom uid,gid etc.
opts string
}
// Represents an instance of `minfs` mount of remote Minio bucket.
// Its defined by
// - The server info of the mount.
// - The local mountpoint.
// - The number of connections alive for the mount (No.Of.Services still using the mount point).
type mountInfo struct {
config serverConfig
mountPoint string
// the number of containers using the mount.
// an active mount is done when the count is 0.
// unmount is done only if the number of connections is 0.
// otherwise just the count is decreased.
connections int
}
// minfsDriver - The struct implements the `github.com/docker/go-plugins-helpers/volume.Driver` interface.
// Here are the sequence of events that defines the interaction between docker and the plugin server.
// 1. The driver implements the interface defined in `github.com/docker/go-plugins-helpers/volume.Driver`.
// In our case the struct `minfsDriver` implements the interface.
// 2. Create a new instance of `minfsDriver` and register it with the `go-plugin-helper`.
// `go-plugin-helper` is a tool built to make development of docker plugins easier, visit https://github.com/docker/go-plugins-helpers/.
// The registration is done using https://godoc.org/github.com/docker/go-plugins-helpers/volume#NewHandler .
// 3. Docker interacts with the plugin server via HTTP endpoints whose
// protocols defined here https://docs.docker.com/engine/extend/plugins_volume/#/volumedrivercreate.
// 4. Once registered the implemented methods on `minfsDriver` are called whenever docker
// interacts with the plugin via HTTP requests. These methods are resposible for responding to docker with
// success or error messages.
type minfsDriver struct {
// used for atomic access to the fields.
sync.RWMutex
mountRoot string
// config of the remote Minio server.
config serverConfig
// the local path to which the remote Minio bucket is mounted to.
// An active volume driver server can be used to mount multiple
// remote buckets possibly even referring to different Minio server
// instances or buckets.
// The state info of these mounts are maintained here.
mounts map[string]*mountInfo
}
// return a new instance of minfsDriver.
func newMinfsDriver(mountRoot string) *minfsDriver {
logrus.WithField("method", "new minfs driver").Debug(mountRoot)
d := &minfsDriver{
mountRoot: mountRoot,
config: serverConfig{},
mounts: make(map[string]*mountInfo),
}
return d
}
// *minfsDriver.Create - This method is called by docker when a volume is created
// using `$docker volume create -d <plugin-name> --name <volume-name>`.
// the name (--name) of the plugin uniquely identifies the mount.
// The name of the plugin is passed by docker to the plugin during the HTTP call, check
// https://docs.docker.com/engine/extend/plugins_volume/#/volumedrivercreate for more details.
// Additional options can be passed only during call to `Create`,
// $ docker volume create -d <plugin-name> --name <volume-name> -o <option-key>=<option-value>
// The name of the volume uniquely identifies the mount.
// The remote bucket will be mounted at `mountRoot + volumeName`.
// mountRoot is passed as `--mountroot` flag when starting the plugin server.
func (d *minfsDriver) Create(r volume.Request) volume.Response {
logrus.WithField("method", "Create").Debugf("%#v", r)
// hold lock for safe access.
d.Lock()
defer d.Unlock()
// validate the inputs.
// verify that the name of the volume is not empty.
if r.Name == "" {
return errorResponse("Name of the driver cannot be empty.Use `$ docker volume create -d <plugin-name> --name <volume-name>`")
}
// if the volume is already created verify that the server configs match.
// If not return with error.
// Since the plugin system identifies a mount uniquely by its name,
// its not possible to create a duplicate volume pointing to a different Minio server or bucket.
if mntInfo, ok := d.mounts[r.Name]; ok {
// Since the volume by the given name already exists,
// match to see whether the endpoint, bucket, accessKey
// and secretKey of the new request and the existing entry
// match. return error on mismatch. else return with success message,
// Since the volume already exists no need to proceed further.
err := matchServerConfig(mntInfo.config, r)
if err != nil {
return errorResponse(err.Error())
}
// return success since the volume exists and the configs match.
return volume.Response{}
}
// verify that all the options are set when the volume is created.
if r.Options == nil {
return errorResponse("No options provided. Please refer example usage.")
}
if r.Options["endpoint"] == "" {
return errorResponse("endpoint option cannot be empty.")
}
if r.Options["bucket"] == "" {
return errorResponse("bucket option cannot be empty.")
}
if r.Options["access-key"] == "" {
return errorResponse("access-key option cannot be empty")
}
if r.Options["secret-key"] == "" {
return errorResponse("secret-key cannot be empty.")
}
mntInfo := &mountInfo{}
config := serverConfig{}
// Additional options passed with `-o` option are parsed here.
config.endpoint = r.Options["endpoint"]
config.bucket = r.Options["bucket"]
config.secretKey = r.Options["secret-key"]
config.accessKey = r.Options["access-key"]
config.opts = r.Options["opts"]
// find out whether the scheme of the URL is HTTPS.
enableSSL, err := isSSL(config.endpoint)
if err != nil {
logrus.Error("Please send a valid URL of form http(s)://my-minio.com:9000 <ERROR> ", err.Error())
return errorResponse(err.Error())
}
minioHost, err := getHost(config.endpoint)
if err != nil {
logrus.Error("Please send a valid URL of form http(s)://my-minio.com:9000 <ERROR> ", err.Error())
return errorResponse(err.Error())
}
// Verify if the bucket exists.
// If it doesnt exist create the bucket on the remote Minio server.
// Initialize minio client object.
minioClient, err := minio.New(minioHost, config.accessKey, config.secretKey, enableSSL)
if err != nil {
logrus.Errorf("Error creating new Minio client. <Error> %s", err.Error())
return errorResponse(err.Error())
}
// Create a bucket.
err = minioClient.MakeBucket(config.bucket, defaultLocation)
if err != nil {
// Check to see if we already own this bucket.
if minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
// return with error response to docker daemon.
logrus.WithFields(logrus.Fields{
"endpoint": config.endpoint,
"bucket": config.bucket,
"opts": config.opts,
}).Fatal(err.Error())
return errorResponse(err.Error())
}
// bucket already exists log and return with success.
logrus.WithFields(logrus.Fields{
"endpoint": config.endpoint,
"bucket": config.bucket,
"opts": config.opts,
}).Info("Bucket already exisits.")
}
// mountpoint is the local path where the remote bucket is mounted.
// `mountroot` is passed as an argument while starting the server with `--mountroot` option.
// the given bucket is mounted locally at path `mountroot + volume (r.Name is the name of
// the volume passed by docker when a volume is created).
mountpoint := filepath.Join(d.mountRoot, r.Name)
// Cache the info.
mntInfo.mountPoint = mountpoint
// `Create` is the only function which has the abiility to pass additional options.
// Protocol doc: https://docs.docker.com/engine/extend/plugins_volume/#/volumedrivercreate
// the server config info which is required for the mount later is also passed as an option during create.
// This has to be cached for further usage.
mntInfo.config = config
// `r.Name` contains the plugin name passed with `--name` in
// `$ docker volume create -d <plugin-name> --name <volume-name>`.
// Name of the volume uniquely identifies the mount.
d.mounts[r.Name] = mntInfo
// ..
return volume.Response{}
}
// minfsDriver.Remove - Delete the specified volume from disk.
// This request is issued when a user invokes `docker rm -v` to remove volumes associated with a container.
// Protocol doc: https://docs.docker.com/engine/extend/plugins_volume/#/volumedriverremove
func (d *minfsDriver) Remove(r volume.Request) volume.Response {
logrus.WithField("method", "remove").Debugf("%#v", r)
d.Lock()
defer d.Unlock()
v, ok := d.mounts[r.Name]
// volume doesn't exist in the entry.
// log and return error to docker daemon.
if !ok {
logrus.WithFields(logrus.Fields{
"operation": "Remove",
"volume": r.Name,
}).Error("Volume not found.")
return errorResponse(fmt.Sprintf("volume %s not found", r.Name))
}
// The volume should be under use by any other containers.
// verify if the number of connections is 0.
if v.connections == 0 {
// if the count of existing connections is 0, delete the entry for the volume.
if err := os.RemoveAll(v.mountPoint); err != nil {
return errorResponse(err.Error())
}
// Delete the entry for the mount.
delete(d.mounts, r.Name)
return volume.Response{}
}
// volume is being used by one or more containers.
// log and return error to docker daemon.
logrus.WithFields(logrus.Fields{
"volume": r.Name,
}).Errorf("Volume is currently used by %d containers. ", v.connections)
return errorResponse(fmt.Sprintf("volume %s is currently under use.", r.Name))
}
// *minfsDriver.Path - Respond with the path on the host filesystem where the bucket mount has been made available.
// protocol doc: https://docs.docker.com/engine/extend/plugins_volume/#/volumedriverpath
func (d *minfsDriver) Path(r volume.Request) volume.Response {
logrus.WithField("method", "path").Debugf("%#v", r)
d.RLock()
defer d.RUnlock()
v, ok := d.mounts[r.Name]
if !ok {
logrus.WithFields(logrus.Fields{
"operation": "path",
"volume": r.Name,
}).Error("Volume not found.")
return errorResponse(fmt.Sprintf("volume %s not found", r.Name))
}
return volume.Response{Mountpoint: v.mountPoint}
}
// *minfsDriver.Mount - Does mounting of `minfs`.
// protocol doc: https://docs.docker.com/engine/extend/plugins_volume/#/volumedrivermount
// If the mount alredy exists just increment the number of connections and return.
// Mount is called only when another container shares the created volume.
// Step 1: Create volume.
//
// $ docker volume create -d minfs-plugin \
// --name my-test-store \
// -o endpoint=https://play.minio.io:9000/rao -o access_key=Q3AM3UQ867SPQQA43P2F\
// -o secret-key=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG -o bucket=test-bucket
//
// Step 2: Attach the new volume to a new container.
//
// $ docker run -it --rm -v my-test-store:/data busybox /bin/sh
// # ls /data
//
// The above set of operations create a mount of remote bucket `test-bucket`,
// in the local path of `mountroot + my-test-store`.
// Note: mountroot passed as --mountroot flag while starting the plugin server.
func (d *minfsDriver) Mount(r volume.MountRequest) volume.Response {
logrus.WithField("method", "mount").Debugf("%#v", r)
d.Lock()
defer d.Unlock()
// verify if the volume exists.
// Mount operation should be performed only after creating the bucket.
v, ok := d.mounts[r.Name]
if !ok {
logrus.WithFields(logrus.Fields{
"operation": "mount",
"volume": r.Name,
}).Error("method:mount: Volume not found.")
return errorResponse(fmt.Sprintf("method:mount: volume %s not found", r.Name))
}
// create the directory for the mountpoint.
// This will be the directory at which the remote bucket will be mounted.
err := createDir(v.mountPoint)
if err != nil {
logrus.WithFields(logrus.Fields{
"mountpount": v.mountPoint,
}).Fatalf("Error creating directory for the mountpoint. <ERROR> %v.", err)
return errorResponse(err.Error())
}
// If the mountpoint is already under use just increment the counter of usage and return to docker daemon.
if v.connections > 0 {
v.connections++
return volume.Response{Mountpoint: v.mountPoint}
}
// set access-key and secret-key as env variables.
os.Setenv("MINFS_ACCESS_KEY", v.config.accessKey)
os.Setenv("MINFS_SECRET_KEY", v.config.secretKey)
// Mount the remote Minio bucket to the local mountpoint.
if err := d.mountVolume(*v); err != nil {
logrus.WithFields(logrus.Fields{
"mountpount": v.mountPoint,
"endpoint": v.config.endpoint,
"bucket": v.config.bucket,
"opts": v.config.opts,
}).Fatalf("Mount failed: <ERROR> %v", err)
return errorResponse(err.Error())
}
// Mount succeeds, increment the count for number of connections and return.
v.connections++
return volume.Response{Mountpoint: v.mountPoint}
}
// *minfsDriver.Unmount - unmounts the mount at `mountpoint`.
// protocol doc: https://docs.docker.com/engine/extend/plugins_volume/#/volumedriverunmount
// Unmount is called when a container using the mounted volume is stopped.
func (d *minfsDriver) Unmount(r volume.UnmountRequest) volume.Response {
logrus.WithField("method", "unmount").Debugf("%#v", r)
d.Lock()
defer d.Unlock()
// verify if the mount exists.
v, ok := d.mounts[r.Name]
if !ok {
// mount doesn't exist, return error.
logrus.WithFields(logrus.Fields{
"operation": "unmount",
"volume": r.Name,
}).Error("Volume not found.")
return errorResponse(fmt.Sprintf("volume %s not found", r.Name))
}
// Unmount is done only if no other containers are using the mounted volume.
if v.connections <= 1 {
// unmount.
if err := d.unmountVolume(v.mountPoint); err != nil {
return errorResponse(err.Error())
}
v.connections = 0
} else {
// If the count is > 1, that is if the mounted volume is already being used by
// another container, dont't unmount, just decrease the count and return.
v.connections--
}
return volume.Response{}
}
// *minfsDriver.Get - Get the mount info.
// protocol doc: https://docs.docker.com/engine/extend/plugins_volume/#/volumedriverget
func (d *minfsDriver) Get(r volume.Request) volume.Response {
logrus.WithField("method", "get").Debugf("%#v", r)
d.Lock()
defer d.Unlock()
// verify if the mount exists.
v, ok := d.mounts[r.Name]
if !ok {
// mount doesn't exist, return error.
logrus.WithFields(logrus.Fields{
"operation": "unmount",
"volume": r.Name,
}).Error("Volume not found.")
return errorResponse(fmt.Sprintf("volume %s not found", r.Name))
}
return volume.Response{Volume: &volume.Volume{Name: r.Name, Mountpoint: v.mountPoint}}
}
// *minfsDriver.List - Get the list of existing volumes.
// protocol doc: https://docs.docker.com/engine/extend/plugins_volume/#/volumedriverlist
func (d *minfsDriver) List(r volume.Request) volume.Response {
logrus.WithField("method", "list").Debugf("%#v", r)
d.Lock()
defer d.Unlock()
var vols []*volume.Volume
for name, v := range d.mounts {
vols = append(vols, &volume.Volume{Name: name, Mountpoint: v.mountPoint})
}
return volume.Response{Volumes: vols}
}
// *minfsDriver.Capabilities - Takes values "local" or "global", more info in protocol doc below.
// protocol doc: https://docs.docker.com/engine/extend/plugins_volume/#/volumedrivercapabilities
func (d *minfsDriver) Capabilities(r volume.Request) volume.Response {
logrus.WithField("method", "capabilities").Debugf("%#v", r)
return volume.Response{Capabilities: volume.Capability{Scope: "local"}}
}
// mounts minfs to the local mountpoint.
func (d *minfsDriver) mountVolume(v mountInfo) error {
// URL for the bucket (ex: https://play.minio.io:9000/mybucket).
var bucketPath string
if strings.HasSuffix(v.config.endpoint, "/") {
bucketPath = v.config.endpoint + v.config.bucket
} else {
bucketPath = v.config.endpoint + "/" + v.config.bucket
}
cmd := fmt.Sprintf("mount -t minfs %s %s", bucketPath, v.mountPoint)
if v.config.opts != "" {
// mount command for minfs.
// ex: mount -t minfs https://play.minio.io:9000/testbucket /testbucket
cmd = fmt.Sprintf("mount -t minfs -o %s %s %s", v.config.opts, bucketPath, v.mountPoint)
}
logrus.Debug(cmd)
return exec.Command("sh", "-c", cmd).Run()
}
// executes `unmount` on the specified volume.
func (d *minfsDriver) unmountVolume(target string) error {
// Unmount the volume.
cmd := fmt.Sprintf("umount %s", target)
logrus.Debug(cmd)
return exec.Command("sh", "-c", cmd).Run()
}
func main() {
// --mountroot flag defines the root folder where are the volumes are mounted.
// If the option is not specified '/mnt' is taken as default mount root.
mountRoot := flag.String("mountroot", "/mnt", "root for mouting Minio buckets.")
flag.Parse()
// check if the mount root exists.
// create if it doesn't exist.
err := createDir(*mountRoot)
if err != nil {
logrus.WithFields(logrus.Fields{
"mountroot": mountRoot,
}).Fatalf("Unable to create mountroot.")
return
}
// if `export DEBUG=1` is set, debug logs will be printed.
debug := os.Getenv("DEBUG")
if ok, _ := strconv.ParseBool(debug); ok {
logrus.SetLevel(logrus.DebugLevel)
}
// Create a new instance MinfsDriver.
// The struct implements the `github.com/docker/go-plugins-helpers/volume.Driver` interface.
d := newMinfsDriver(*mountRoot)
// register it with the `go-plugin-helper`.
// `go-plugin-helper` is a tool built to make development of docker plugins easier,
// visit https://github.com/docker/go-plugins-helpers/.
// The registration is done using https://godoc.org/github.com/docker/go-plugins-helpers/volume#NewHandler .
h := volume.NewHandler(d)
// create a server on unix socket.
logrus.Infof("listening on %s", socketAddress)
logrus.Error(h.ServeUnix(socketAddress, 0))
}
| [
"\"DEBUG\""
] | [] | [
"DEBUG"
] | [] | ["DEBUG"] | go | 1 | 0 | |
pkg/kubectl/cmd/util/editor/editor.go | /*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package editor
import (
"fmt"
"io"
"io/ioutil"
"math/rand"
"os"
"os/exec"
"os/signal"
"path/filepath"
"strings"
"github.com/docker/docker/pkg/term"
"github.com/golang/glog"
)
const (
// sorry, blame Git
defaultEditor = "vi"
defaultShell = "/bin/bash"
)
type Editor struct {
Args []string
Shell bool
}
// NewDefaultEditor creates a struct Editor that uses the OS environment to
// locate the editor program, looking at EDITOR environment variable to find
// the proper command line. If the provided editor has no spaces, or no quotes,
// it is treated as a bare command to be loaded. Otherwise, the string will
// be passed to the user's shell for execution.
func NewDefaultEditor() Editor {
args, shell := defaultEnvEditor()
return Editor{
Args: args,
Shell: shell,
}
}
func defaultEnvShell() []string {
shell := os.Getenv("SHELL")
if len(shell) == 0 {
shell = defaultShell
}
return []string{shell, "-c"}
}
func defaultEnvEditor() ([]string, bool) {
editor := os.Getenv("EDITOR")
if len(editor) == 0 {
editor = defaultEditor
}
if !strings.Contains(editor, " ") {
return []string{editor}, false
}
if !strings.ContainsAny(editor, "\"'\\") {
return strings.Split(editor, " "), false
}
// rather than parse the shell arguments ourselves, punt to the shell
shell := defaultEnvShell()
return append(shell, editor), true
}
func (e Editor) args(path string) []string {
args := make([]string, len(e.Args))
copy(args, e.Args)
if e.Shell {
last := args[len(args)-1]
args[len(args)-1] = fmt.Sprintf("%s %q", last, path)
} else {
args = append(args, path)
}
return args
}
// Launch opens the described or returns an error. The TTY will be protected, and
// SIGQUIT, SIGTERM, and SIGINT will all be trapped.
func (e Editor) Launch(path string) error {
if len(e.Args) == 0 {
return fmt.Errorf("no editor defined, can't open %s", path)
}
abs, err := filepath.Abs(path)
if err != nil {
return err
}
args := e.args(abs)
cmd := exec.Command(args[0], args[1:]...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Stdin = os.Stdin
glog.V(5).Infof("Opening file with editor %v", args)
if err := withSafeTTYAndInterrupts(cmd.Run); err != nil {
if err, ok := err.(*exec.Error); ok {
if err.Err == exec.ErrNotFound {
return fmt.Errorf("unable to launch the editor %q", strings.Join(e.Args, " "))
}
}
return fmt.Errorf("there was a problem with the editor %q", strings.Join(e.Args, " "))
}
return nil
}
// LaunchTempFile reads the provided stream into a temporary file in the given directory
// and file prefix, and then invokes Launch with the path of that file. It will return
// the contents of the file after launch, any errors that occur, and the path of the
// temporary file so the caller can clean it up as needed.
func (e Editor) LaunchTempFile(prefix, suffix string, r io.Reader) ([]byte, string, error) {
f, err := tempFile(prefix, suffix)
if err != nil {
return nil, "", err
}
defer f.Close()
path := f.Name()
if _, err := io.Copy(f, r); err != nil {
os.Remove(path)
return nil, path, err
}
if err := e.Launch(path); err != nil {
return nil, path, err
}
bytes, err := ioutil.ReadFile(path)
return bytes, path, err
}
// withSafeTTYAndInterrupts invokes the provided function after the terminal
// state has been stored, and then on any error or termination attempts to
// restore the terminal state to its prior behavior. It also eats signals
// for the duration of the function.
func withSafeTTYAndInterrupts(fn func() error) error {
ch := make(chan os.Signal, 1)
signal.Notify(ch, childSignals...)
defer signal.Stop(ch)
inFd := os.Stdin.Fd()
if !term.IsTerminal(inFd) {
if f, err := os.Open("/dev/tty"); err == nil {
defer f.Close()
inFd = f.Fd()
}
}
if term.IsTerminal(inFd) {
state, err := term.SaveState(inFd)
if err != nil {
return err
}
go func() {
if _, ok := <-ch; !ok {
return
}
term.RestoreTerminal(inFd, state)
}()
defer term.RestoreTerminal(inFd, state)
return fn()
}
return fn()
}
func tempFile(prefix, suffix string) (f *os.File, err error) {
dir := os.TempDir()
for i := 0; i < 10000; i++ {
name := filepath.Join(dir, prefix+randSeq(5)+suffix)
f, err = os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
if os.IsExist(err) {
continue
}
break
}
return
}
var letters = []rune("abcdefghijklmnopqrstuvwxyz0123456789")
func randSeq(n int) string {
b := make([]rune, n)
for i := range b {
b[i] = letters[rand.Intn(len(letters))]
}
return string(b)
}
| [
"\"SHELL\"",
"\"EDITOR\""
] | [] | [
"SHELL",
"EDITOR"
] | [] | ["SHELL", "EDITOR"] | go | 2 | 0 | |
pytools/requester.py | import os
import time
import requests
assert os.getenv("NYCKEL_CLIENT_ID"), "NYCKEL_CLIENT_ID env variable not set; can't setup connection."
assert os.getenv("NYCKEL_CLIENT_SECRET"), "NYCKEL_CLIENT_SECRET env variable not set; can't setup connection."
class Requester:
"""Class to talk to the Server. Manages the OAuth flow and retries in case connection is down."""
def __init__(
self,
client_id: str,
client_secret: str,
host: str,
api_version: str,
nbr_max_attempts=5,
attempt_wait_sec=5,
):
self.client_id = client_id
self.client_secret = client_secret
self.host = host
self.api_version = api_version
self._access_token = "Placeholder"
self.nbr_max_attempts = nbr_max_attempts
self.attempt_wait_sec = attempt_wait_sec
def __call__(self, request, endpoint: str, **kwargs):
url = self._get_full_url(endpoint)
attempt_counter = 0
resp = None
while not resp and attempt_counter < self.nbr_max_attempts:
try:
resp = self._request_with_renewal(request, url, **kwargs)
except requests.exceptions.RequestException as err:
print(f"Can not access {url} with {request.__name__.upper()} {kwargs}. Err: {err}.")
time.sleep(self.attempt_wait_sec)
attempt_counter += 1
return resp
def _request_with_renewal(self, request, url, **kwargs):
kwargs["headers"] = {"authorization": "Bearer " + self._access_token}
resp = request(url, **kwargs)
if resp.status_code == 401:
self._renew_access_token()
kwargs["headers"] = {"authorization": "Bearer " + self._access_token}
resp = request(url, **kwargs)
if resp.status_code == 200:
return resp
else:
raise RuntimeError(f"Call failed with {resp.status_code}: {resp.text}")
def _renew_access_token(self):
payload = {
"client_id": self.client_id,
"client_secret": self.client_secret,
"grant_type": "client_credentials",
}
token_url = self.host.rstrip("/") + "/connect/token"
resp = requests.post(token_url, data=payload)
if "access_token" not in resp.json():
raise RuntimeError(f"Renewing access token failed with {resp.status_code}: {resp.text}")
self._access_token = resp.json()["access_token"]
def _get_full_url(self, endpoint):
return self.host.rstrip("/") + "/v" + self.api_version.lstrip("v").rstrip("/") + "/" + endpoint.lstrip("/")
def requester_factory():
assert os.getenv("NYCKEL_CLIENT_ID"), "NYCKEL_CLIENT_ID env variable not set; can't setup connection."
assert os.getenv("NYCKEL_CLIENT_SECRET"), "NYCKEL_CLIENT_SECRET env variable not set; can't setup connection."
return Requester(os.getenv("NYCKEL_CLIENT_ID"), os.getenv("NYCKEL_CLIENT_SECRET"), "https://www.nyckel.com/", "0.9")
def repeated_get(requester: Requester, endpoint: str):
resp = requester(requests.get, endpoint)
resource_list = resp.json()
while "next" in resp.links:
endpoint = resp.links["next"]["url"]
resp = requester(requests.get, endpoint)
resource_list.extend(resp.json())
return resource_list
| [] | [] | [
"NYCKEL_CLIENT_ID",
"NYCKEL_CLIENT_SECRET"
] | [] | ["NYCKEL_CLIENT_ID", "NYCKEL_CLIENT_SECRET"] | python | 2 | 0 | |
core/configs.go | package core
import (
"fmt"
"io"
"net/url"
"strconv"
)
// Telegram constants
const (
// APIEndpoint is the endpoint for all API methods,
// with formatting for Sprintf.
APIEndpoint = "https://api.telegram.org/bot%s/%s"
// FileEndpoint is the endpoint for downloading a file from Telegram.
FileEndpoint = "https://api.telegram.org/file/bot%s/%s"
)
// Constant values for ChatActions
const (
ChatTyping = "typing"
ChatUploadPhoto = "upload_photo"
ChatRecordVideo = "record_video"
ChatUploadVideo = "upload_video"
ChatRecordVoice = "record_voice"
ChatUploadVoice = "upload_voice"
// Deprecated: use ChatRecordVoice instead.
ChatRecordAudio = "record_audio"
// Deprecated: use ChatUploadVoice instead.
ChatUploadAudio = "upload_audio"
ChatUploadDocument = "upload_document"
ChatFindLocation = "find_location"
ChatRecordVideoNote = "record_video_note"
ChatUploadVideoNote = "upload_video_note"
)
// API errors
const (
// ErrAPIForbidden happens when a token is bad
ErrAPIForbidden = "forbidden"
)
// Constant values for ParseMode in MessageConfig
const (
ModeMarkdown = "Markdown"
ModeMarkdownV2 = "MarkdownV2"
ModeHTML = "HTML"
)
// Constant values for update types
const (
// New incoming message of any kind โ text, photo, sticker, etc.
UpdateTypeMessage = "message"
// New version of a message that is known to the bot and was edited
UpdateTypeEditedMessage = "edited_message"
// New incoming channel post of any kind โ text, photo, sticker, etc.
UpdateTypeChannelPost = "channel_post"
// New version of a channel post that is known to the bot and was edited
UpdateTypeEditedChannelPost = "edited_channel_post"
// New incoming inline query
UpdateTypeInlineQuery = "inline_query"
// The result of an inline query that was chosen by a user and sent to their
// chat partner. Please see the documentation on the feedback collecting for
// details on how to enable these updates for your bot.
UpdateTypeChosenInlineResult = "chosen_inline_result"
// New incoming callback query
UpdateTypeCallbackQuery = "callback_query"
// New incoming shipping query. Only for invoices with flexible price
UpdateTypeShippingQuery = "shipping_query"
// New incoming pre-checkout query. Contains full information about checkout
UpdateTypePreCheckoutQuery = "pre_checkout_query"
// New poll state. Bots receive only updates about stopped polls and polls
// which are sent by the bot
UpdateTypePoll = "poll"
// A user changed their answer in a non-anonymous poll. Bots receive new votes
// only in polls that were sent by the bot itself.
UpdateTypePollAnswer = "poll_answer"
// The bot's chat member status was updated in a chat. For private chats, this
// update is received only when the bot is blocked or unblocked by the user.
UpdateTypeMyChatMember = "my_chat_member"
// The bot must be an administrator in the chat and must explicitly specify
// this update in the list of allowed_updates to receive these updates.
UpdateTypeChatMember = "chat_member"
)
// Library errors
const (
// ErrBadFileType happens when you pass an unknown type
ErrBadFileType = "bad file type"
ErrBadURL = "bad or empty url"
)
// Chattable is any config type that can be sent.
type Chattable interface {
params() (Params, error)
method() string
}
// RequestFile represents a file associated with a request. May involve
// uploading a file, or passing an existing ID.
type RequestFile struct {
// The multipart upload field name.
Name string
// The file to upload.
File interface{}
}
// Fileable is any config type that can be sent that includes a file.
type Fileable interface {
Chattable
files() []RequestFile
}
// LogOutConfig is a request to log out of the cloud Bot API server.
//
// Note that you may not log back in for at least 10 minutes.
type LogOutConfig struct{}
func (LogOutConfig) method() string {
return "logOut"
}
func (LogOutConfig) params() (Params, error) {
return nil, nil
}
// CloseConfig is a request to close the bot instance on a local server.
//
// Note that you may not close an instance for the first 10 minutes after the
// bot has started.
type CloseConfig struct{}
func (CloseConfig) method() string {
return "close"
}
func (CloseConfig) params() (Params, error) {
return nil, nil
}
// BaseChat is base type for all chat config types.
type BaseChat struct {
ChatID int64 // required
ChannelUsername string
ReplyToMessageID int
ReplyMarkup interface{}
DisableNotification bool
AllowSendingWithoutReply bool
}
func (chat *BaseChat) params() (Params, error) {
params := make(Params)
params.AddFirstValid("chat_id", chat.ChatID, chat.ChannelUsername)
params.AddNonZero("reply_to_message_id", chat.ReplyToMessageID)
params.AddBool("disable_notification", chat.DisableNotification)
params.AddBool("allow_sending_without_reply", chat.AllowSendingWithoutReply)
err := params.AddInterface("reply_markup", chat.ReplyMarkup)
return params, err
}
// BaseFile is a base type for all file config types.
type BaseFile struct {
BaseChat
File interface{}
}
func (file BaseFile) params() (Params, error) {
return file.BaseChat.params()
}
// BaseEdit is base type of all chat edits.
type BaseEdit struct {
ChatID int64
ChannelUsername string
MessageID int
InlineMessageID string
ReplyMarkup *InlineKeyboardMarkup
}
func (edit BaseEdit) params() (Params, error) {
params := make(Params)
if edit.InlineMessageID != "" {
params["inline_message_id"] = edit.InlineMessageID
} else {
_ = params.AddFirstValid("chat_id", edit.ChatID, edit.ChannelUsername)
params.AddNonZero("message_id", edit.MessageID)
}
err := params.AddInterface("reply_markup", edit.ReplyMarkup)
return params, err
}
// MessageConfig contains information about a SendMessage request.
type MessageConfig struct {
BaseChat
Text string
ParseMode string
Entities []MessageEntity
DisableWebPagePreview bool
}
func (config MessageConfig) params() (Params, error) {
params, err := config.BaseChat.params()
if err != nil {
return params, err
}
params.AddNonEmpty("text", config.Text)
params.AddBool("disable_web_page_preview", config.DisableWebPagePreview)
params.AddNonEmpty("parse_mode", config.ParseMode)
err = params.AddInterface("entities", config.Entities)
return params, err
}
func (config MessageConfig) method() string {
return "sendMessage"
}
// ForwardConfig contains information about a ForwardMessage request.
type ForwardConfig struct {
BaseChat
FromChatID int64 // required
FromChannelUsername string
MessageID int // required
}
func (config ForwardConfig) params() (Params, error) {
params, err := config.BaseChat.params()
if err != nil {
return params, err
}
params.AddNonZero64("from_chat_id", config.FromChatID)
params.AddNonZero("message_id", config.MessageID)
return params, nil
}
func (config ForwardConfig) method() string {
return "forwardMessage"
}
// CopyMessageConfig contains information about a copyMessage request.
type CopyMessageConfig struct {
BaseChat
FromChatID int64
FromChannelUsername string
MessageID int
Caption string
ParseMode string
CaptionEntities []MessageEntity
}
func (config CopyMessageConfig) params() (Params, error) {
params, err := config.BaseChat.params()
if err != nil {
return params, err
}
params.AddFirstValid("from_chat_id", config.FromChatID, config.FromChannelUsername)
params.AddNonZero("message_id", config.MessageID)
params.AddNonEmpty("caption", config.Caption)
params.AddNonEmpty("parse_mode", config.ParseMode)
err = params.AddInterface("caption_entities", config.CaptionEntities)
return params, err
}
func (config CopyMessageConfig) method() string {
return "copyMessage"
}
// PhotoConfig contains information about a SendPhoto request.
type PhotoConfig struct {
BaseFile
Thumb interface{}
Caption string
ParseMode string
CaptionEntities []MessageEntity
}
func (config PhotoConfig) params() (Params, error) {
params, err := config.BaseFile.params()
if err != nil {
return params, err
}
params.AddNonEmpty("caption", config.Caption)
params.AddNonEmpty("parse_mode", config.ParseMode)
err = params.AddInterface("caption_entities", config.CaptionEntities)
return params, err
}
func (config PhotoConfig) method() string {
return "sendPhoto"
}
func (config PhotoConfig) files() []RequestFile {
files := []RequestFile{{
Name: "photo",
File: config.File,
}}
if config.Thumb != nil {
files = append(files, RequestFile{
Name: "thumb",
File: config.Thumb,
})
}
return files
}
// AudioConfig contains information about a SendAudio request.
type AudioConfig struct {
BaseFile
Thumb interface{}
Caption string
ParseMode string
CaptionEntities []MessageEntity
Duration int
Performer string
Title string
}
func (config AudioConfig) params() (Params, error) {
params, err := config.BaseChat.params()
if err != nil {
return params, err
}
params.AddNonZero("duration", config.Duration)
params.AddNonEmpty("performer", config.Performer)
params.AddNonEmpty("title", config.Title)
params.AddNonEmpty("caption", config.Caption)
params.AddNonEmpty("parse_mode", config.ParseMode)
err = params.AddInterface("caption_entities", config.CaptionEntities)
return params, err
}
func (config AudioConfig) method() string {
return "sendAudio"
}
func (config AudioConfig) files() []RequestFile {
files := []RequestFile{{
Name: "audio",
File: config.File,
}}
if config.Thumb != nil {
files = append(files, RequestFile{
Name: "thumb",
File: config.Thumb,
})
}
return files
}
// DocumentConfig contains information about a SendDocument request.
type DocumentConfig struct {
BaseFile
Thumb interface{}
Caption string
ParseMode string
CaptionEntities []MessageEntity
DisableContentTypeDetection bool
}
func (config DocumentConfig) params() (Params, error) {
params, err := config.BaseFile.params()
params.AddNonEmpty("caption", config.Caption)
params.AddNonEmpty("parse_mode", config.ParseMode)
params.AddBool("disable_content_type_detection", config.DisableContentTypeDetection)
return params, err
}
func (config DocumentConfig) method() string {
return "sendDocument"
}
func (config DocumentConfig) files() []RequestFile {
files := []RequestFile{{
Name: "document",
File: config.File,
}}
if config.Thumb != nil {
files = append(files, RequestFile{
Name: "thumb",
File: config.Thumb,
})
}
return files
}
// StickerConfig contains information about a SendSticker request.
type StickerConfig struct {
BaseFile
}
func (config StickerConfig) params() (Params, error) {
return config.BaseChat.params()
}
func (config StickerConfig) method() string {
return "sendSticker"
}
func (config StickerConfig) files() []RequestFile {
return []RequestFile{{
Name: "sticker",
File: config.File,
}}
}
// VideoConfig contains information about a SendVideo request.
type VideoConfig struct {
BaseFile
Thumb interface{}
Duration int
Caption string
ParseMode string
CaptionEntities []MessageEntity
SupportsStreaming bool
}
func (config VideoConfig) params() (Params, error) {
params, err := config.BaseChat.params()
if err != nil {
return params, err
}
params.AddNonZero("duration", config.Duration)
params.AddNonEmpty("caption", config.Caption)
params.AddNonEmpty("parse_mode", config.ParseMode)
params.AddBool("supports_streaming", config.SupportsStreaming)
err = params.AddInterface("caption_entities", config.CaptionEntities)
return params, err
}
func (config VideoConfig) method() string {
return "sendVideo"
}
func (config VideoConfig) files() []RequestFile {
files := []RequestFile{{
Name: "video",
File: config.File,
}}
if config.Thumb != nil {
files = append(files, RequestFile{
Name: "thumb",
File: config.Thumb,
})
}
return files
}
// AnimationConfig contains information about a SendAnimation request.
type AnimationConfig struct {
BaseFile
Duration int
Thumb interface{}
Caption string
ParseMode string
CaptionEntities []MessageEntity
}
func (config AnimationConfig) params() (Params, error) {
params, err := config.BaseChat.params()
if err != nil {
return params, err
}
params.AddNonZero("duration", config.Duration)
params.AddNonEmpty("caption", config.Caption)
params.AddNonEmpty("parse_mode", config.ParseMode)
err = params.AddInterface("caption_entities", config.CaptionEntities)
return params, err
}
func (config AnimationConfig) method() string {
return "sendAnimation"
}
func (config AnimationConfig) files() []RequestFile {
files := []RequestFile{{
Name: "animation",
File: config.File,
}}
if config.Thumb != nil {
files = append(files, RequestFile{
Name: "thumb",
File: config.Thumb,
})
}
return files
}
// VideoNoteConfig contains information about a SendVideoNote request.
type VideoNoteConfig struct {
BaseFile
Thumb interface{}
Duration int
Length int
}
func (config VideoNoteConfig) params() (Params, error) {
params, err := config.BaseChat.params()
params.AddNonZero("duration", config.Duration)
params.AddNonZero("length", config.Length)
return params, err
}
func (config VideoNoteConfig) method() string {
return "sendVideoNote"
}
func (config VideoNoteConfig) files() []RequestFile {
files := []RequestFile{{
Name: "video_note",
File: config.File,
}}
if config.Thumb != nil {
files = append(files, RequestFile{
Name: "thumb",
File: config.Thumb,
})
}
return files
}
// VoiceConfig contains information about a SendVoice request.
type VoiceConfig struct {
BaseFile
Thumb interface{}
Caption string
ParseMode string
CaptionEntities []MessageEntity
Duration int
}
func (config VoiceConfig) params() (Params, error) {
params, err := config.BaseChat.params()
if err != nil {
return params, err
}
params.AddNonZero("duration", config.Duration)
params.AddNonEmpty("caption", config.Caption)
params.AddNonEmpty("parse_mode", config.ParseMode)
err = params.AddInterface("caption_entities", config.CaptionEntities)
return params, err
}
func (config VoiceConfig) method() string {
return "sendVoice"
}
func (config VoiceConfig) files() []RequestFile {
files := []RequestFile{{
Name: "voice",
File: config.File,
}}
if config.Thumb != nil {
files = append(files, RequestFile{
Name: "thumb",
File: config.Thumb,
})
}
return files
}
// LocationConfig contains information about a SendLocation request.
type LocationConfig struct {
BaseChat
Latitude float64 // required
Longitude float64 // required
HorizontalAccuracy float64 // optional
LivePeriod int // optional
Heading int // optional
ProximityAlertRadius int // optional
}
func (config LocationConfig) params() (Params, error) {
params, err := config.BaseChat.params()
params.AddNonZeroFloat("latitude", config.Latitude)
params.AddNonZeroFloat("longitude", config.Longitude)
params.AddNonZeroFloat("horizontal_accuracy", config.HorizontalAccuracy)
params.AddNonZero("live_period", config.LivePeriod)
params.AddNonZero("heading", config.Heading)
params.AddNonZero("proximity_alert_radius", config.ProximityAlertRadius)
return params, err
}
func (config LocationConfig) method() string {
return "sendLocation"
}
// EditMessageLiveLocationConfig allows you to update a live location.
type EditMessageLiveLocationConfig struct {
BaseEdit
Latitude float64 // required
Longitude float64 // required
HorizontalAccuracy float64 // optional
Heading int // optional
ProximityAlertRadius int // optional
}
func (config EditMessageLiveLocationConfig) params() (Params, error) {
params, err := config.BaseEdit.params()
params.AddNonZeroFloat("latitude", config.Latitude)
params.AddNonZeroFloat("longitude", config.Longitude)
params.AddNonZeroFloat("horizontal_accuracy", config.HorizontalAccuracy)
params.AddNonZero("heading", config.Heading)
params.AddNonZero("proximity_alert_radius", config.ProximityAlertRadius)
return params, err
}
func (config EditMessageLiveLocationConfig) method() string {
return "editMessageLiveLocation"
}
// StopMessageLiveLocationConfig stops updating a live location.
type StopMessageLiveLocationConfig struct {
BaseEdit
}
func (config StopMessageLiveLocationConfig) params() (Params, error) {
return config.BaseEdit.params()
}
func (config StopMessageLiveLocationConfig) method() string {
return "stopMessageLiveLocation"
}
// VenueConfig contains information about a SendVenue request.
type VenueConfig struct {
BaseChat
Latitude float64 // required
Longitude float64 // required
Title string // required
Address string // required
FoursquareID string
FoursquareType string
GooglePlaceID string
GooglePlaceType string
}
func (config VenueConfig) params() (Params, error) {
params, err := config.BaseChat.params()
params.AddNonZeroFloat("latitude", config.Latitude)
params.AddNonZeroFloat("longitude", config.Longitude)
params["title"] = config.Title
params["address"] = config.Address
params.AddNonEmpty("foursquare_id", config.FoursquareID)
params.AddNonEmpty("foursquare_type", config.FoursquareType)
params.AddNonEmpty("google_place_id", config.GooglePlaceID)
params.AddNonEmpty("google_place_type", config.GooglePlaceType)
return params, err
}
func (config VenueConfig) method() string {
return "sendVenue"
}
// ContactConfig allows you to send a contact.
type ContactConfig struct {
BaseChat
PhoneNumber string
FirstName string
LastName string
VCard string
}
func (config ContactConfig) params() (Params, error) {
params, err := config.BaseChat.params()
params["phone_number"] = config.PhoneNumber
params["first_name"] = config.FirstName
params.AddNonEmpty("last_name", config.LastName)
params.AddNonEmpty("vcard", config.VCard)
return params, err
}
func (config ContactConfig) method() string {
return "sendContact"
}
// SendPollConfig allows you to send a poll.
type SendPollConfig struct {
BaseChat
Question string
Options []string
IsAnonymous bool
Type string
AllowsMultipleAnswers bool
CorrectOptionID int64
Explanation string
ExplanationParseMode string
ExplanationEntities []MessageEntity
OpenPeriod int
CloseDate int
IsClosed bool
}
func (config SendPollConfig) params() (Params, error) {
params, err := config.BaseChat.params()
if err != nil {
return params, err
}
params["question"] = config.Question
if err = params.AddInterface("options", config.Options); err != nil {
return params, err
}
params["is_anonymous"] = strconv.FormatBool(config.IsAnonymous)
params.AddNonEmpty("type", config.Type)
params["allows_multiple_answers"] = strconv.FormatBool(config.AllowsMultipleAnswers)
params["correct_option_id"] = strconv.FormatInt(config.CorrectOptionID, 10)
params.AddBool("is_closed", config.IsClosed)
params.AddNonEmpty("explanation", config.Explanation)
params.AddNonEmpty("explanation_parse_mode", config.ExplanationParseMode)
params.AddNonZero("open_period", config.OpenPeriod)
params.AddNonZero("close_date", config.CloseDate)
err = params.AddInterface("explanation_entities", config.ExplanationEntities)
return params, err
}
func (SendPollConfig) method() string {
return "sendPoll"
}
// GameConfig allows you to send a game.
type GameConfig struct {
BaseChat
GameShortName string
}
func (config GameConfig) params() (Params, error) {
params, err := config.BaseChat.params()
params["game_short_name"] = config.GameShortName
return params, err
}
func (config GameConfig) method() string {
return "sendGame"
}
// SetGameScoreConfig allows you to update the game score in a chat.
type SetGameScoreConfig struct {
UserID int64
Score int
Force bool
DisableEditMessage bool
ChatID int64
ChannelUsername string
MessageID int
InlineMessageID string
}
func (config SetGameScoreConfig) params() (Params, error) {
params := make(Params)
params.AddNonZero64("user_id", config.UserID)
params.AddNonZero("scrore", config.Score)
params.AddBool("disable_edit_message", config.DisableEditMessage)
if config.InlineMessageID != "" {
params["inline_message_id"] = config.InlineMessageID
} else {
params.AddFirstValid("chat_id", config.ChatID, config.ChannelUsername)
params.AddNonZero("message_id", config.MessageID)
}
return params, nil
}
func (config SetGameScoreConfig) method() string {
return "setGameScore"
}
// GetGameHighScoresConfig allows you to fetch the high scores for a game.
type GetGameHighScoresConfig struct {
UserID int64
ChatID int64
ChannelUsername string
MessageID int
InlineMessageID string
}
func (config GetGameHighScoresConfig) params() (Params, error) {
params := make(Params)
params.AddNonZero64("user_id", config.UserID)
if config.InlineMessageID != "" {
params["inline_message_id"] = config.InlineMessageID
} else {
params.AddFirstValid("chat_id", config.ChatID, config.ChannelUsername)
params.AddNonZero("message_id", config.MessageID)
}
return params, nil
}
func (config GetGameHighScoresConfig) method() string {
return "getGameHighScores"
}
// ChatActionConfig contains information about a SendChatAction request.
type ChatActionConfig struct {
BaseChat
Action string // required
}
func (config ChatActionConfig) params() (Params, error) {
params, err := config.BaseChat.params()
params["action"] = config.Action
return params, err
}
func (config ChatActionConfig) method() string {
return "sendChatAction"
}
// EditMessageTextConfig allows you to modify the text in a message.
type EditMessageTextConfig struct {
BaseEdit
Text string
ParseMode string
Entities []MessageEntity
DisableWebPagePreview bool
}
func (config EditMessageTextConfig) params() (Params, error) {
params, err := config.BaseEdit.params()
if err != nil {
return params, err
}
params["text"] = config.Text
params.AddNonEmpty("parse_mode", config.ParseMode)
params.AddBool("disable_web_page_preview", config.DisableWebPagePreview)
err = params.AddInterface("entities", config.Entities)
return params, err
}
func (config EditMessageTextConfig) method() string {
return "editMessageText"
}
// EditMessageCaptionConfig allows you to modify the caption of a message.
type EditMessageCaptionConfig struct {
BaseEdit
Caption string
ParseMode string
CaptionEntities []MessageEntity
}
func (config EditMessageCaptionConfig) params() (Params, error) {
params, err := config.BaseEdit.params()
if err != nil {
return params, err
}
params["caption"] = config.Caption
params.AddNonEmpty("parse_mode", config.ParseMode)
err = params.AddInterface("caption_entities", config.CaptionEntities)
return params, err
}
func (config EditMessageCaptionConfig) method() string {
return "editMessageCaption"
}
// EditMessageMediaConfig allows you to make an editMessageMedia request.
type EditMessageMediaConfig struct {
BaseEdit
Media interface{}
}
func (EditMessageMediaConfig) method() string {
return "editMessageMedia"
}
func (config EditMessageMediaConfig) params() (Params, error) {
params, err := config.BaseEdit.params()
if err != nil {
return params, err
}
err = params.AddInterface("media", prepareInputMediaParam(config.Media, 0))
return params, err
}
func (config EditMessageMediaConfig) files() []RequestFile {
return prepareInputMediaFile(config.Media, 0)
}
// EditMessageReplyMarkupConfig allows you to modify the reply markup
// of a message.
type EditMessageReplyMarkupConfig struct {
BaseEdit
}
func (config EditMessageReplyMarkupConfig) params() (Params, error) {
return config.BaseEdit.params()
}
func (config EditMessageReplyMarkupConfig) method() string {
return "editMessageReplyMarkup"
}
// StopPollConfig allows you to stop a poll sent by the bot.
type StopPollConfig struct {
BaseEdit
}
func (config StopPollConfig) params() (Params, error) {
return config.BaseEdit.params()
}
func (StopPollConfig) method() string {
return "stopPoll"
}
// UserProfilePhotosConfig contains information about a
// GetUserProfilePhotos request.
type UserProfilePhotosConfig struct {
UserID int64
Offset int
Limit int
}
func (UserProfilePhotosConfig) method() string {
return "getUserProfilePhotos"
}
func (config UserProfilePhotosConfig) params() (Params, error) {
params := make(Params)
params.AddNonZero64("user_id", config.UserID)
params.AddNonZero("offset", config.Offset)
params.AddNonZero("limit", config.Limit)
return params, nil
}
// FileConfig has information about a file hosted on Telegram.
type FileConfig struct {
FileID string
}
func (FileConfig) method() string {
return "getFile"
}
func (config FileConfig) params() (Params, error) {
params := make(Params)
params["file_id"] = config.FileID
return params, nil
}
// UpdateConfig contains information about a GetUpdates request.
type UpdateConfig struct {
Offset int
Limit int
Timeout int
AllowedUpdates []string
}
func (UpdateConfig) method() string {
return "getUpdates"
}
func (config UpdateConfig) params() (Params, error) {
params := make(Params)
params.AddNonZero("offset", config.Offset)
params.AddNonZero("limit", config.Limit)
params.AddNonZero("timeout", config.Timeout)
params.AddInterface("allowed_updates", config.AllowedUpdates)
return params, nil
}
// WebhookConfig contains information about a SetWebhook request.
type WebhookConfig struct {
URL *url.URL
Certificate interface{}
IPAddress string
MaxConnections int
AllowedUpdates []string
DropPendingUpdates bool
}
func (config WebhookConfig) method() string {
return "setWebhook"
}
func (config WebhookConfig) params() (Params, error) {
params := make(Params)
if config.URL != nil {
params["url"] = config.URL.String()
}
params.AddNonEmpty("ip_address", config.IPAddress)
params.AddNonZero("max_connections", config.MaxConnections)
err := params.AddInterface("allowed_updates", config.AllowedUpdates)
params.AddBool("drop_pending_updates", config.DropPendingUpdates)
return params, err
}
func (config WebhookConfig) files() []RequestFile {
if config.Certificate != nil {
return []RequestFile{{
Name: "certificate",
File: config.Certificate,
}}
}
return nil
}
// DeleteWebhookConfig is a helper to delete a webhook.
type DeleteWebhookConfig struct {
DropPendingUpdates bool
}
func (config DeleteWebhookConfig) method() string {
return "deleteWebhook"
}
func (config DeleteWebhookConfig) params() (Params, error) {
params := make(Params)
params.AddBool("drop_pending_updates", config.DropPendingUpdates)
return params, nil
}
// FileBytes contains information about a set of bytes to upload
// as a File.
type FileBytes struct {
Name string
Bytes []byte
}
// FileReader contains information about a reader to upload as a File.
type FileReader struct {
Name string
Reader io.Reader
}
// FileURL is a URL to use as a file for a request.
type FileURL string
// FileID is an ID of a file already uploaded to Telegram.
type FileID string
// InlineConfig contains information on making an InlineQuery response.
type InlineConfig struct {
InlineQueryID string `json:"inline_query_id"`
Results []interface{} `json:"results"`
CacheTime int `json:"cache_time"`
IsPersonal bool `json:"is_personal"`
NextOffset string `json:"next_offset"`
SwitchPMText string `json:"switch_pm_text"`
SwitchPMParameter string `json:"switch_pm_parameter"`
}
func (config InlineConfig) method() string {
return "answerInlineQuery"
}
func (config InlineConfig) params() (Params, error) {
params := make(Params)
params["inline_query_id"] = config.InlineQueryID
params.AddNonZero("cache_time", config.CacheTime)
params.AddBool("is_personal", config.IsPersonal)
params.AddNonEmpty("next_offset", config.NextOffset)
params.AddNonEmpty("switch_pm_text", config.SwitchPMText)
params.AddNonEmpty("switch_pm_parameter", config.SwitchPMParameter)
err := params.AddInterface("results", config.Results)
return params, err
}
// CallbackConfig contains information on making a CallbackQuery response.
type CallbackConfig struct {
CallbackQueryID string `json:"callback_query_id"`
Text string `json:"text"`
ShowAlert bool `json:"show_alert"`
URL string `json:"url"`
CacheTime int `json:"cache_time"`
}
func (config CallbackConfig) method() string {
return "answerCallbackQuery"
}
func (config CallbackConfig) params() (Params, error) {
params := make(Params)
params["callback_query_id"] = config.CallbackQueryID
params.AddNonEmpty("text", config.Text)
params.AddBool("show_alert", config.ShowAlert)
params.AddNonEmpty("url", config.URL)
params.AddNonZero("cache_time", config.CacheTime)
return params, nil
}
// ChatMemberConfig contains information about a user in a chat for use
// with administrative functions such as kicking or unbanning a user.
type ChatMemberConfig struct {
ChatID int64
SuperGroupUsername string
ChannelUsername string
UserID int64
}
// UnbanChatMemberConfig allows you to unban a user.
type UnbanChatMemberConfig struct {
ChatMemberConfig
OnlyIfBanned bool
}
func (config UnbanChatMemberConfig) method() string {
return "unbanChatMember"
}
func (config UnbanChatMemberConfig) params() (Params, error) {
params := make(Params)
params.AddFirstValid("chat_id", config.ChatID, config.SuperGroupUsername, config.ChannelUsername)
params.AddNonZero64("user_id", config.UserID)
params.AddBool("only_if_banned", config.OnlyIfBanned)
return params, nil
}
// KickChatMemberConfig contains extra fields to kick user
type KickChatMemberConfig struct {
ChatMemberConfig
UntilDate int64
RevokeMessages bool
}
func (config KickChatMemberConfig) method() string {
return "kickChatMember"
}
func (config KickChatMemberConfig) params() (Params, error) {
params := make(Params)
params.AddFirstValid("chat_id", config.ChatID, config.SuperGroupUsername)
params.AddNonZero64("user_id", config.UserID)
params.AddNonZero64("until_date", config.UntilDate)
params.AddBool("revoke_messages", config.RevokeMessages)
return params, nil
}
// RestrictChatMemberConfig contains fields to restrict members of chat
type RestrictChatMemberConfig struct {
ChatMemberConfig
UntilDate int64
Permissions *ChatPermissions
}
func (config RestrictChatMemberConfig) method() string {
return "restrictChatMember"
}
func (config RestrictChatMemberConfig) params() (Params, error) {
params := make(Params)
_ = params.AddFirstValid("chat_id", config.ChatID, config.SuperGroupUsername, config.ChannelUsername)
params.AddNonZero64("user_id", config.UserID)
err := params.AddInterface("permissions", config.Permissions)
params.AddNonZero64("until_date", config.UntilDate)
return params, err
}
// PromoteChatMemberConfig contains fields to promote members of chat
type PromoteChatMemberConfig struct {
ChatMemberConfig
IsAnonymous bool
CanManageChat bool
CanChangeInfo bool
CanPostMessages bool
CanEditMessages bool
CanDeleteMessages bool
CanManageVoiceChats bool
CanInviteUsers bool
CanRestrictMembers bool
CanPinMessages bool
CanPromoteMembers bool
}
func (config PromoteChatMemberConfig) method() string {
return "promoteChatMember"
}
func (config PromoteChatMemberConfig) params() (Params, error) {
params := make(Params)
params.AddFirstValid("chat_id", config.ChatID, config.SuperGroupUsername, config.ChannelUsername)
params.AddNonZero64("user_id", config.UserID)
params.AddBool("is_anonymous", config.IsAnonymous)
params.AddBool("can_manage_chat", config.CanManageChat)
params.AddBool("can_change_info", config.CanChangeInfo)
params.AddBool("can_post_messages", config.CanPostMessages)
params.AddBool("can_edit_messages", config.CanEditMessages)
params.AddBool("can_delete_messages", config.CanDeleteMessages)
params.AddBool("can_manage_voice_chats", config.CanManageVoiceChats)
params.AddBool("can_invite_users", config.CanInviteUsers)
params.AddBool("can_restrict_members", config.CanRestrictMembers)
params.AddBool("can_pin_messages", config.CanPinMessages)
params.AddBool("can_promote_members", config.CanPromoteMembers)
return params, nil
}
// SetChatAdministratorCustomTitle sets the title of an administrative user
// promoted by the bot for a chat.
type SetChatAdministratorCustomTitle struct {
ChatMemberConfig
CustomTitle string
}
func (SetChatAdministratorCustomTitle) method() string {
return "setChatAdministratorCustomTitle"
}
func (config SetChatAdministratorCustomTitle) params() (Params, error) {
params := make(Params)
params.AddFirstValid("chat_id", config.ChatID, config.SuperGroupUsername, config.ChannelUsername)
params.AddNonZero64("user_id", config.UserID)
params.AddNonEmpty("custom_title", config.CustomTitle)
return params, nil
}
// ChatConfig contains information about getting information on a chat.
type ChatConfig struct {
ChatID int64
SuperGroupUsername string
}
func (config ChatConfig) params() (Params, error) {
params := make(Params)
params.AddFirstValid("chat_id", config.ChatID, config.SuperGroupUsername)
return params, nil
}
// ChatInfoConfig contains information about getting chat information.
type ChatInfoConfig struct {
ChatConfig
}
func (ChatInfoConfig) method() string {
return "getChat"
}
// ChatMemberCountConfig contains information about getting the number of users in a chat.
type ChatMemberCountConfig struct {
ChatConfig
}
func (ChatMemberCountConfig) method() string {
return "getChatMembersCount"
}
// ChatAdministratorsConfig contains information about getting chat administrators.
type ChatAdministratorsConfig struct {
ChatConfig
}
func (ChatAdministratorsConfig) method() string {
return "getChatAdministrators"
}
// SetChatPermissionsConfig allows you to set default permissions for the
// members in a group. The bot must be an administrator and have rights to
// restrict members.
type SetChatPermissionsConfig struct {
ChatConfig
Permissions *ChatPermissions
}
func (SetChatPermissionsConfig) method() string {
return "setChatPermissions"
}
func (config SetChatPermissionsConfig) params() (Params, error) {
params := make(Params)
params.AddFirstValid("chat_id", config.ChatID, config.SuperGroupUsername)
err := params.AddInterface("permissions", config.Permissions)
return params, err
}
// ChatInviteLinkConfig contains information about getting a chat link.
//
// Note that generating a new link will revoke any previous links.
type ChatInviteLinkConfig struct {
ChatConfig
}
func (ChatInviteLinkConfig) method() string {
return "exportChatInviteLink"
}
func (config ChatInviteLinkConfig) params() (Params, error) {
params := make(Params)
params.AddFirstValid("chat_id", config.ChatID, config.SuperGroupUsername)
return params, nil
}
// CreateChatInviteLinkConfig allows you to create an additional invite link for
// a chat. The bot must be an administrator in the chat for this to work and
// must have the appropriate admin rights. The link can be revoked using the
// RevokeChatInviteLinkConfig.
type CreateChatInviteLinkConfig struct {
ChatConfig
ExpireDate int
MemberLimit int
}
func (CreateChatInviteLinkConfig) method() string {
return "createChatInviteLink"
}
func (config CreateChatInviteLinkConfig) params() (Params, error) {
params := make(Params)
params.AddFirstValid("chat_id", config.ChatID, config.SuperGroupUsername)
params.AddNonZero("expire_date", config.ExpireDate)
params.AddNonZero("member_limit", config.MemberLimit)
return params, nil
}
// EditChatInviteLinkConfig allows you to edit a non-primary invite link created
// by the bot. The bot must be an administrator in the chat for this to work and
// must have the appropriate admin rights.
type EditChatInviteLinkConfig struct {
ChatConfig
InviteLink string
ExpireDate int
MemberLimit int
}
func (EditChatInviteLinkConfig) method() string {
return "editChatInviteLink"
}
func (config EditChatInviteLinkConfig) params() (Params, error) {
params := make(Params)
params.AddFirstValid("chat_id", config.ChatID, config.SuperGroupUsername)
params["invite_link"] = config.InviteLink
params.AddNonZero("expire_date", config.ExpireDate)
params.AddNonZero("member_limit", config.MemberLimit)
return params, nil
}
// RevokeChatInviteLinkConfig allows you to revoke an invite link created by the
// bot. If the primary link is revoked, a new link is automatically generated.
// The bot must be an administrator in the chat for this to work and must have
// the appropriate admin rights.
type RevokeChatInviteLinkConfig struct {
ChatConfig
InviteLink string
}
func (RevokeChatInviteLinkConfig) method() string {
return "revokeChatInviteLink"
}
func (config RevokeChatInviteLinkConfig) params() (Params, error) {
params := make(Params)
params.AddFirstValid("chat_id", config.ChatID, config.SuperGroupUsername)
params["invite_link"] = config.InviteLink
return params, nil
}
// LeaveChatConfig allows you to leave a chat.
type LeaveChatConfig struct {
ChatID int64
ChannelUsername string
}
func (config LeaveChatConfig) method() string {
return "leaveChat"
}
func (config LeaveChatConfig) params() (Params, error) {
params := make(Params)
params.AddFirstValid("chat_id", config.ChatID, config.ChannelUsername)
return params, nil
}
// ChatConfigWithUser contains information about a chat and a user.
type ChatConfigWithUser struct {
ChatID int64
SuperGroupUsername string
UserID int64
}
func (config ChatConfigWithUser) params() (Params, error) {
params := make(Params)
params.AddFirstValid("chat_id", config.ChatID, config.SuperGroupUsername)
params.AddNonZero64("user_id", config.UserID)
return params, nil
}
// GetChatMemberConfig is information about getting a specific member in a chat.
type GetChatMemberConfig struct {
ChatConfigWithUser
}
func (GetChatMemberConfig) method() string {
return "getChatMember"
}
// InvoiceConfig contains information for sendInvoice request.
type InvoiceConfig struct {
BaseChat
Title string // required
Description string // required
Payload string // required
ProviderToken string // required
Currency string // required
Prices []LabeledPrice // required
MaxTipAmount int
SuggestedTipAmounts []int
StartParameter string
ProviderData string
PhotoURL string
PhotoSize int
PhotoWidth int
PhotoHeight int
NeedName bool
NeedPhoneNumber bool
NeedEmail bool
NeedShippingAddress bool
SendPhoneNumberToProvider bool
SendEmailToProvider bool
IsFlexible bool
}
func (config InvoiceConfig) params() (Params, error) {
params, err := config.BaseChat.params()
if err != nil {
return params, err
}
params["title"] = config.Title
params["description"] = config.Description
params["payload"] = config.Payload
params["provider_token"] = config.ProviderToken
params["currency"] = config.Currency
if err = params.AddInterface("prices", config.Prices); err != nil {
return params, err
}
params.AddNonZero("max_tip_amount", config.MaxTipAmount)
err = params.AddInterface("suggested_tip_amounts", config.SuggestedTipAmounts)
params.AddNonEmpty("start_parameter", config.StartParameter)
params.AddNonEmpty("provider_data", config.ProviderData)
params.AddNonEmpty("photo_url", config.PhotoURL)
params.AddNonZero("photo_size", config.PhotoSize)
params.AddNonZero("photo_width", config.PhotoWidth)
params.AddNonZero("photo_height", config.PhotoHeight)
params.AddBool("need_name", config.NeedName)
params.AddBool("need_phone_number", config.NeedPhoneNumber)
params.AddBool("need_email", config.NeedEmail)
params.AddBool("need_shipping_address", config.NeedShippingAddress)
params.AddBool("is_flexible", config.IsFlexible)
params.AddBool("send_phone_number_to_provider", config.SendPhoneNumberToProvider)
params.AddBool("send_email_to_provider", config.SendEmailToProvider)
return params, err
}
func (config InvoiceConfig) method() string {
return "sendInvoice"
}
// ShippingConfig contains information for answerShippingQuery request.
type ShippingConfig struct {
ShippingQueryID string // required
OK bool // required
ShippingOptions []ShippingOption
ErrorMessage string
}
func (config ShippingConfig) method() string {
return "answerShippingQuery"
}
func (config ShippingConfig) params() (Params, error) {
params := make(Params)
params["shipping_query_id"] = config.ShippingQueryID
params.AddBool("ok", config.OK)
err := params.AddInterface("shipping_options", config.ShippingOptions)
params.AddNonEmpty("error_message", config.ErrorMessage)
return params, err
}
// PreCheckoutConfig conatins information for answerPreCheckoutQuery request.
type PreCheckoutConfig struct {
PreCheckoutQueryID string // required
OK bool // required
ErrorMessage string
}
func (config PreCheckoutConfig) method() string {
return "answerPreCheckoutQuery"
}
func (config PreCheckoutConfig) params() (Params, error) {
params := make(Params)
params["pre_checkout_query_id"] = config.PreCheckoutQueryID
params.AddBool("ok", config.OK)
params.AddNonEmpty("error_message", config.ErrorMessage)
return params, nil
}
// DeleteMessageConfig contains information of a message in a chat to delete.
type DeleteMessageConfig struct {
ChannelUsername string
ChatID int64
MessageID int
}
func (config DeleteMessageConfig) method() string {
return "deleteMessage"
}
func (config DeleteMessageConfig) params() (Params, error) {
params := make(Params)
params.AddFirstValid("chat_id", config.ChatID, config.ChannelUsername)
params.AddNonZero("message_id", config.MessageID)
return params, nil
}
// PinChatMessageConfig contains information of a message in a chat to pin.
type PinChatMessageConfig struct {
ChatID int64
ChannelUsername string
MessageID int
DisableNotification bool
}
func (config PinChatMessageConfig) method() string {
return "pinChatMessage"
}
func (config PinChatMessageConfig) params() (Params, error) {
params := make(Params)
params.AddFirstValid("chat_id", config.ChatID, config.ChannelUsername)
params.AddNonZero("message_id", config.MessageID)
params.AddBool("disable_notification", config.DisableNotification)
return params, nil
}
// UnpinChatMessageConfig contains information of a chat message to unpin.
//
// If MessageID is not specified, it will unpin the most recent pin.
type UnpinChatMessageConfig struct {
ChatID int64
ChannelUsername string
MessageID int
}
func (config UnpinChatMessageConfig) method() string {
return "unpinChatMessage"
}
func (config UnpinChatMessageConfig) params() (Params, error) {
params := make(Params)
params.AddFirstValid("chat_id", config.ChatID, config.ChannelUsername)
params.AddNonZero("message_id", config.MessageID)
return params, nil
}
// UnpinAllChatMessagesConfig contains information of all messages to unpin in
// a chat.
type UnpinAllChatMessagesConfig struct {
ChatID int64
ChannelUsername string
}
func (config UnpinAllChatMessagesConfig) method() string {
return "unpinAllChatMessages"
}
func (config UnpinAllChatMessagesConfig) params() (Params, error) {
params := make(Params)
params.AddFirstValid("chat_id", config.ChatID, config.ChannelUsername)
return params, nil
}
// SetChatPhotoConfig allows you to set a group, supergroup, or channel's photo.
type SetChatPhotoConfig struct {
BaseFile
}
func (config SetChatPhotoConfig) method() string {
return "setChatPhoto"
}
func (config SetChatPhotoConfig) files() []RequestFile {
return []RequestFile{{
Name: "photo",
File: config.File,
}}
}
// DeleteChatPhotoConfig allows you to delete a group, supergroup, or channel's photo.
type DeleteChatPhotoConfig struct {
ChatID int64
ChannelUsername string
}
func (config DeleteChatPhotoConfig) method() string {
return "deleteChatPhoto"
}
func (config DeleteChatPhotoConfig) params() (Params, error) {
params := make(Params)
params.AddFirstValid("chat_id", config.ChatID, config.ChannelUsername)
return params, nil
}
// SetChatTitleConfig allows you to set the title of something other than a private chat.
type SetChatTitleConfig struct {
ChatID int64
ChannelUsername string
Title string
}
func (config SetChatTitleConfig) method() string {
return "setChatTitle"
}
func (config SetChatTitleConfig) params() (Params, error) {
params := make(Params)
params.AddFirstValid("chat_id", config.ChatID, config.ChannelUsername)
params["title"] = config.Title
return params, nil
}
// SetChatDescriptionConfig allows you to set the description of a supergroup or channel.
type SetChatDescriptionConfig struct {
ChatID int64
ChannelUsername string
Description string
}
func (config SetChatDescriptionConfig) method() string {
return "setChatDescription"
}
func (config SetChatDescriptionConfig) params() (Params, error) {
params := make(Params)
params.AddFirstValid("chat_id", config.ChatID, config.ChannelUsername)
params["description"] = config.Description
return params, nil
}
// GetStickerSetConfig allows you to get the stickers in a set.
type GetStickerSetConfig struct {
Name string
}
func (config GetStickerSetConfig) method() string {
return "getStickerSet"
}
func (config GetStickerSetConfig) params() (Params, error) {
params := make(Params)
params["name"] = config.Name
return params, nil
}
// UploadStickerConfig allows you to upload a sticker for use in a set later.
type UploadStickerConfig struct {
UserID int64
PNGSticker interface{}
}
func (config UploadStickerConfig) method() string {
return "uploadStickerFile"
}
func (config UploadStickerConfig) params() (Params, error) {
params := make(Params)
params.AddNonZero64("user_id", config.UserID)
return params, nil
}
func (config UploadStickerConfig) files() []RequestFile {
return []RequestFile{{
Name: "png_sticker",
File: config.PNGSticker,
}}
}
// NewStickerSetConfig allows creating a new sticker set.
//
// You must set either PNGSticker or TGSSticker.
type NewStickerSetConfig struct {
UserID int64
Name string
Title string
PNGSticker interface{}
TGSSticker interface{}
Emojis string
ContainsMasks bool
MaskPosition *MaskPosition
}
func (config NewStickerSetConfig) method() string {
return "createNewStickerSet"
}
func (config NewStickerSetConfig) params() (Params, error) {
params := make(Params)
params.AddNonZero64("user_id", config.UserID)
params["name"] = config.Name
params["title"] = config.Title
params["emojis"] = config.Emojis
params.AddBool("contains_masks", config.ContainsMasks)
err := params.AddInterface("mask_position", config.MaskPosition)
return params, err
}
func (config NewStickerSetConfig) files() []RequestFile {
if config.PNGSticker != nil {
return []RequestFile{{
Name: "png_sticker",
File: config.PNGSticker,
}}
}
return []RequestFile{{
Name: "tgs_sticker",
File: config.TGSSticker,
}}
}
// AddStickerConfig allows you to add a sticker to a set.
type AddStickerConfig struct {
UserID int64
Name string
PNGSticker interface{}
TGSSticker interface{}
Emojis string
MaskPosition *MaskPosition
}
func (config AddStickerConfig) method() string {
return "addStickerToSet"
}
func (config AddStickerConfig) params() (Params, error) {
params := make(Params)
params.AddNonZero64("user_id", config.UserID)
params["name"] = config.Name
params["emojis"] = config.Emojis
err := params.AddInterface("mask_position", config.MaskPosition)
return params, err
}
func (config AddStickerConfig) files() []RequestFile {
if config.PNGSticker != nil {
return []RequestFile{{
Name: "png_sticker",
File: config.PNGSticker,
}}
}
return []RequestFile{{
Name: "tgs_sticker",
File: config.TGSSticker,
}}
}
// SetStickerPositionConfig allows you to change the position of a sticker in a set.
type SetStickerPositionConfig struct {
Sticker string
Position int
}
func (config SetStickerPositionConfig) method() string {
return "setStickerPositionInSet"
}
func (config SetStickerPositionConfig) params() (Params, error) {
params := make(Params)
params["sticker"] = config.Sticker
params.AddNonZero("position", config.Position)
return params, nil
}
// DeleteStickerConfig allows you to delete a sticker from a set.
type DeleteStickerConfig struct {
Sticker string
}
func (config DeleteStickerConfig) method() string {
return "deleteStickerFromSet"
}
func (config DeleteStickerConfig) params() (Params, error) {
params := make(Params)
params["sticker"] = config.Sticker
return params, nil
}
// SetStickerSetThumbConfig allows you to set the thumbnail for a sticker set.
type SetStickerSetThumbConfig struct {
Name string
UserID int64
Thumb interface{}
}
func (config SetStickerSetThumbConfig) method() string {
return "setStickerSetThumb"
}
func (config SetStickerSetThumbConfig) params() (Params, error) {
params := make(Params)
params["name"] = config.Name
params.AddNonZero64("user_id", config.UserID)
return params, nil
}
func (config SetStickerSetThumbConfig) files() []RequestFile {
return []RequestFile{{
Name: "thumb",
File: config.Thumb,
}}
}
// SetChatStickerSetConfig allows you to set the sticker set for a supergroup.
type SetChatStickerSetConfig struct {
ChatID int64
SuperGroupUsername string
StickerSetName string
}
func (config SetChatStickerSetConfig) method() string {
return "setChatStickerSet"
}
func (config SetChatStickerSetConfig) params() (Params, error) {
params := make(Params)
params.AddFirstValid("chat_id", config.ChatID, config.SuperGroupUsername)
params["sticker_set_name"] = config.StickerSetName
return params, nil
}
// DeleteChatStickerSetConfig allows you to remove a supergroup's sticker set.
type DeleteChatStickerSetConfig struct {
ChatID int64
SuperGroupUsername string
}
func (config DeleteChatStickerSetConfig) method() string {
return "deleteChatStickerSet"
}
func (config DeleteChatStickerSetConfig) params() (Params, error) {
params := make(Params)
params.AddFirstValid("chat_id", config.ChatID, config.SuperGroupUsername)
return params, nil
}
// MediaGroupConfig allows you to send a group of media.
//
// Media consist of InputMedia items (InputMediaPhoto, InputMediaVideo).
type MediaGroupConfig struct {
ChatID int64
ChannelUsername string
Media []interface{}
DisableNotification bool
ReplyToMessageID int
}
func (config MediaGroupConfig) method() string {
return "sendMediaGroup"
}
func (config MediaGroupConfig) params() (Params, error) {
params := make(Params)
params.AddFirstValid("chat_id", config.ChatID, config.ChannelUsername)
params.AddBool("disable_notification", config.DisableNotification)
params.AddNonZero("reply_to_message_id", config.ReplyToMessageID)
err := params.AddInterface("media", prepareInputMediaForParams(config.Media))
return params, err
}
func (config MediaGroupConfig) files() []RequestFile {
return prepareInputMediaForFiles(config.Media)
}
// DiceConfig contains information about a sendDice request.
type DiceConfig struct {
BaseChat
// Emoji on which the dice throw animation is based.
// Currently, must be one of ๐ฒ, ๐ฏ, ๐, โฝ, ๐ณ, or ๐ฐ.
// Dice can have values 1-6 for ๐ฒ, ๐ฏ, and ๐ณ, values 1-5 for ๐ and โฝ,
// and values 1-64 for ๐ฐ.
// Defaults to โ๐ฒโ
Emoji string
}
func (config DiceConfig) method() string {
return "sendDice"
}
func (config DiceConfig) params() (Params, error) {
params, err := config.BaseChat.params()
if err != nil {
return params, err
}
params.AddNonEmpty("emoji", config.Emoji)
return params, err
}
// GetMyCommandsConfig gets a list of the currently registered commands.
type GetMyCommandsConfig struct{}
func (config GetMyCommandsConfig) method() string {
return "getMyCommands"
}
func (config GetMyCommandsConfig) params() (Params, error) {
return nil, nil
}
// SetMyCommandsConfig sets a list of commands the bot understands.
type SetMyCommandsConfig struct {
commands []BotCommand
}
func (config SetMyCommandsConfig) method() string {
return "setMyCommands"
}
func (config SetMyCommandsConfig) params() (Params, error) {
params := make(Params)
err := params.AddInterface("commands", config.commands)
return params, err
}
// prepareInputMediaParam evaluates a single InputMedia and determines if it
// needs to be modified for a successful upload. If it returns nil, then the
// value does not need to be included in the params. Otherwise, it will return
// the same type as was originally provided.
//
// The idx is used to calculate the file field name. If you only have a single
// file, 0 may be used. It is formatted into "attach://file-%d" for the primary
// media and "attach://file-%d-thumb" for thumbnails.
//
// It is expected to be used in conjunction with prepareInputMediaFile.
func prepareInputMediaParam(inputMedia interface{}, idx int) interface{} {
switch m := inputMedia.(type) {
case InputMediaPhoto:
switch m.Media.(type) {
case string, FileBytes, FileReader:
m.Media = fmt.Sprintf("attach://file-%d", idx)
}
return m
case InputMediaVideo:
switch m.Media.(type) {
case string, FileBytes, FileReader:
m.Media = fmt.Sprintf("attach://file-%d", idx)
}
switch m.Thumb.(type) {
case string, FileBytes, FileReader:
m.Thumb = fmt.Sprintf("attach://file-%d-thumb", idx)
}
return m
case InputMediaAudio:
switch m.Media.(type) {
case string, FileBytes, FileReader:
m.Media = fmt.Sprintf("attach://file-%d", idx)
}
switch m.Thumb.(type) {
case string, FileBytes, FileReader:
m.Thumb = fmt.Sprintf("attach://file-%d-thumb", idx)
}
return m
case InputMediaDocument:
switch m.Media.(type) {
case string, FileBytes, FileReader:
m.Media = fmt.Sprintf("attach://file-%d", idx)
}
switch m.Thumb.(type) {
case string, FileBytes, FileReader:
m.Thumb = fmt.Sprintf("attach://file-%d-thumb", idx)
}
return m
}
return nil
}
// prepareInputMediaFile generates an array of RequestFile to provide for
// Fileable's files method. It returns an array as a single InputMedia may have
// multiple files, for the primary media and a thumbnail.
//
// The idx parameter is used to generate file field names. It uses the names
// "file-%d" for the main file and "file-%d-thumb" for the thumbnail.
//
// It is expected to be used in conjunction with prepareInputMediaParam.
func prepareInputMediaFile(inputMedia interface{}, idx int) []RequestFile {
files := []RequestFile{}
switch m := inputMedia.(type) {
case InputMediaPhoto:
switch f := m.Media.(type) {
case string, FileBytes, FileReader:
files = append(files, RequestFile{
Name: fmt.Sprintf("file-%d", idx),
File: f,
})
}
case InputMediaVideo:
switch f := m.Media.(type) {
case string, FileBytes, FileReader:
files = append(files, RequestFile{
Name: fmt.Sprintf("file-%d", idx),
File: f,
})
}
switch f := m.Thumb.(type) {
case string, FileBytes, FileReader:
files = append(files, RequestFile{
Name: fmt.Sprintf("file-%d-thumb", idx),
File: f,
})
}
case InputMediaDocument:
switch f := m.Media.(type) {
case string, FileBytes, FileReader:
files = append(files, RequestFile{
Name: fmt.Sprintf("file-%d", idx),
File: f,
})
}
switch f := m.Thumb.(type) {
case string, FileBytes, FileReader:
files = append(files, RequestFile{
Name: fmt.Sprintf("file-%d", idx),
File: f,
})
}
case InputMediaAudio:
switch f := m.Media.(type) {
case string, FileBytes, FileReader:
files = append(files, RequestFile{
Name: fmt.Sprintf("file-%d", idx),
File: f,
})
}
switch f := m.Thumb.(type) {
case string, FileBytes, FileReader:
files = append(files, RequestFile{
Name: fmt.Sprintf("file-%d", idx),
File: f,
})
}
}
return files
}
// prepareInputMediaForParams calls prepareInputMediaParam for each item
// provided and returns a new array with the correct params for a request.
//
// It is expected that files will get data from the associated function,
// prepareInputMediaForFiles.
func prepareInputMediaForParams(inputMedia []interface{}) []interface{} {
newMedia := make([]interface{}, len(inputMedia))
copy(newMedia, inputMedia)
for idx, media := range inputMedia {
if param := prepareInputMediaParam(media, idx); param != nil {
newMedia[idx] = param
}
}
return newMedia
}
// prepareInputMediaForFiles calls prepareInputMediaFile for each item
// provided and returns a new array with the correct files for a request.
//
// It is expected that params will get data from the associated function,
// prepareInputMediaForParams.
func prepareInputMediaForFiles(inputMedia []interface{}) []RequestFile {
files := []RequestFile{}
for idx, media := range inputMedia {
if file := prepareInputMediaFile(media, idx); file != nil {
files = append(files, file...)
}
}
return files
}
| [] | [] | [] | [] | [] | go | null | null | null |
geequery-commons/src/main/java/com/github/geequery/tools/resource/UrlResourceLoader.java | package com.github.geequery.tools.resource;
import java.io.File;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLClassLoader;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Enumeration;
import java.util.List;
import java.util.Set;
import com.github.geequery.tools.reflect.ClassLoaderUtil;
import org.apache.commons.lang.StringUtils;
public class UrlResourceLoader extends AResourceLoader {
/**
* ๅๅฅฝURL
*/
// private URL[] perferUrl;
private java.net.URLClassLoader ul;
/**
* ๅชๅจๆๅฎ็ฎๅฝไธๆ็ดข
*/
private boolean directoryOnly = false;
public UrlResourceLoader() {
}
public static enum EnvURL {
PATH, JAVA_LIBRARY_PATH, JAVA_HOME, USER_DIR, TEMP_DIR, USER_HOME, JAVA_CLASS_PATH, SYSTEM_TMP, WINDIR
}
public UrlResourceLoader(EnvURL... envs) {
Set<URL> u = new java.util.LinkedHashSet<URL>();
String pathSp = System.getProperty("path.separator");
for (EnvURL type : envs) {
switch (type) {
case JAVA_CLASS_PATH:
for (String s : StringUtils.split(System.getProperty("java.class.path"), pathSp)) {
add(u,s);
}
break;
case JAVA_HOME: {
add(u,System.getProperty("java.home"));
break;
}
case JAVA_LIBRARY_PATH:
for (String s : StringUtils.split(System.getProperty("java.library.path"), pathSp)) {
add(u,s);
}
break;
case PATH:
for (String s : StringUtils.split(System.getenv("PATH"), pathSp)) {
add(u,s);
}
break;
case SYSTEM_TMP:
add(u,System.getenv("TEMP"));
break;
case TEMP_DIR:
add(u,System.getProperty("java.io.tmpdir"));
break;
case USER_DIR:
add(u,System.getProperty("user.dir"));
break;
case USER_HOME:
add(u,System.getProperty("user.home"));
break;
case WINDIR:
add(u,System.getenv("windir"));
break;
}
}
setPerferUrl(u.toArray(new URL[u.size()]));
}
private final void add(Set<URL> u, String s) {
if(s==null || s.length()==0)return;
File f = new File(s);
try{
u.add(f.toURI().toURL());
} catch (MalformedURLException e) {
}
}
private void setPerferUrl(URL[] array) {
this.ul = new URLClassLoader(array);
}
public UrlResourceLoader(Class<?> c) {
this(c, false);
}
public UrlResourceLoader(Class<?> c, boolean dirOnly) {
setSearchURLByClass(c);
this.directoryOnly = dirOnly;
}
public boolean isDirectoryOnly() {
return directoryOnly;
}
public void setDirectoryOnly(boolean directoryOnly) {
this.directoryOnly = directoryOnly;
}
public void setSearchURL(URL... urls) {
setPerferUrl(urls);
}
public void setSearchURLByClass(Class<?> clz) {
URL u = ClassLoaderUtil.getCodeSource(clz);
if (u != null) {
setPerferUrl(new URL[] { u });
}
}
public URL getResource(String name) {
if (name.startsWith("//")){
File file=new File(name.substring(1));
if(file.exists())return super.toURL(file);
name = name.substring(2);
}else if(name.startsWith("/")){
name = name.substring(1);
}
if (directoryOnly)
name = "./" + name;
if (ul != null) {
return ul.findResource(name);
}
return null;
}
public List<URL> getResources(String name) {
if (name.startsWith("//")){
File file=new File(name.substring(1));
if(file.exists())return Arrays.asList(super.toURL(file));
name = name.substring(2);
}else if(name.startsWith("/")){
name = name.substring(1);
}
if (directoryOnly)
name = "./" + name;
List<URL> result = new ArrayList<URL>();
try {
if (ul != null) {
for (Enumeration<URL> e = ul.findResources(name); e.hasMoreElements();) {
result.add(e.nextElement());
}
}
} catch (IOException e) {
throw new IllegalStateException(e.getMessage());
}
return result;
}
}
| [
"\"PATH\"",
"\"TEMP\"",
"\"windir\""
] | [] | [
"windir",
"PATH",
"TEMP"
] | [] | ["windir", "PATH", "TEMP"] | java | 3 | 0 | |
infrastructure-provisioning/src/general/scripts/gcp/common_create_notebook_image.py | #!/usr/bin/python3
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import datalab.fab
import datalab.actions_lib
import datalab.meta_lib
import json
import os
import sys
if __name__ == "__main__":
try:
image_conf = dict()
GCPMeta = datalab.meta_lib.GCPMeta()
GCPActions = datalab.actions_lib.GCPActions()
try:
image_conf['exploratory_name'] = (os.environ['exploratory_name']).replace('_', '-').lower()
except:
image_conf['exploratory_name'] = ''
image_conf['service_base_name'] = os.environ['conf_service_base_name'] = datalab.fab.replace_multi_symbols(
os.environ['conf_service_base_name'][:20], '-', True).lower()
image_conf['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
image_conf['endpoint_tag'] = image_conf['endpoint_name']
image_conf['project_name'] = os.environ['project_name'].lower()
image_conf['project_tag'] = image_conf['project_name']
image_conf['instance_name'] = os.environ['notebook_instance_name']
image_conf['instance_tag'] = '{}-tag'.format(image_conf['service_base_name'])
image_conf['application'] = os.environ['application']
image_conf['image_name'] = os.environ['notebook_image_name'].replace('_', '-').lower()
image_conf['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image-{}'.format(
image_conf['service_base_name'], image_conf['project_name'], image_conf['endpoint_name'],
os.environ['application'], image_conf['image_name'])
image_conf['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image-{}'.format(
image_conf['service_base_name'], image_conf['project_name'], image_conf['endpoint_name'],
os.environ['application'], image_conf['image_name'])
image_conf['image_labels'] = {"sbn": image_conf['service_base_name'],
"endpoint_tag": image_conf['endpoint_tag'],
"project_tag": image_conf['project_tag'],
"image": image_conf['image_name'],
os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
image_conf['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(image_conf['service_base_name'],
image_conf['project_name'],
image_conf['endpoint_name'],
image_conf['exploratory_name'])
image_conf['zone'] = os.environ['gcp_zone']
print('[CREATING IMAGE]')
primary_image_id = GCPMeta.get_image_by_name(image_conf['expected_primary_image_name'])
if primary_image_id == '':
image_id_list = GCPActions.create_image_from_instance_disks(
image_conf['expected_primary_image_name'], image_conf['expected_secondary_image_name'],
image_conf['instance_name'], image_conf['zone'], image_conf['image_labels'])
if image_id_list and image_id_list[0] != '':
print("Image of primary disk was successfully created. It's ID is {}".format(image_id_list[0]))
else:
print("Looks like another image creating operation for your template have been started a "
"moment ago.")
if image_id_list and image_id_list[1] != '':
print("Image of secondary disk was successfully created. It's ID is {}".format(image_id_list[1]))
with open("/root/result.json", 'w') as result:
res = {"primary_image_name": image_conf['expected_primary_image_name'],
"secondary_image_name": image_conf['expected_secondary_image_name'],
"project_name": image_conf['project_name'],
"application": image_conf['application'],
"status": "created",
"Action": "Create image from notebook"}
result.write(json.dumps(res))
except Exception as err:
datalab.fab.append_result("Failed to create image from notebook", str(err))
sys.exit(1)
| [] | [] | [
"notebook_instance_name",
"exploratory_name",
"gcp_zone",
"notebook_image_name",
"application",
"endpoint_name",
"project_name",
"conf_service_base_name",
"conf_billing_tag_key",
"conf_billing_tag_value"
] | [] | ["notebook_instance_name", "exploratory_name", "gcp_zone", "notebook_image_name", "application", "endpoint_name", "project_name", "conf_service_base_name", "conf_billing_tag_key", "conf_billing_tag_value"] | python | 10 | 0 | |
unix.go | // +build darwin linux
package main
import (
"os"
"syscall"
)
func sysExec(path string, args []string, env []string) error {
return syscall.Exec(path, args, env)
}
func homePath() string {
return os.Getenv("HOME")
}
| [
"\"HOME\""
] | [] | [
"HOME"
] | [] | ["HOME"] | go | 1 | 0 | |
cmd/frontend/graphqlbackend/site.go | package graphqlbackend
import (
"context"
"os"
"strconv"
"strings"
"github.com/cockroachdb/errors"
"github.com/graph-gophers/graphql-go"
"github.com/graph-gophers/graphql-go/relay"
"github.com/sourcegraph/sourcegraph/cmd/frontend/backend"
"github.com/sourcegraph/sourcegraph/cmd/frontend/internal/siteid"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/env"
"github.com/sourcegraph/sourcegraph/internal/version"
"github.com/sourcegraph/sourcegraph/cmd/frontend/globals"
)
const singletonSiteGQLID = "site"
func (r *schemaResolver) siteByGQLID(ctx context.Context, id graphql.ID) (Node, error) {
siteGQLID, err := unmarshalSiteGQLID(id)
if err != nil {
return nil, err
}
if siteGQLID != singletonSiteGQLID {
return nil, errors.Errorf("site not found: %q", siteGQLID)
}
return &siteResolver{db: r.db, gqlID: siteGQLID}, nil
}
func marshalSiteGQLID(siteID string) graphql.ID { return relay.MarshalID("Site", siteID) }
// SiteGQLID is the GraphQL ID of the Sourcegraph site. It is a constant across all Sourcegraph
// instances.
func SiteGQLID() graphql.ID { return (&siteResolver{gqlID: singletonSiteGQLID}).ID() }
func unmarshalSiteGQLID(id graphql.ID) (siteID string, err error) {
err = relay.UnmarshalSpec(id, &siteID)
return
}
func (r *schemaResolver) Site() *siteResolver {
return &siteResolver{db: r.db, gqlID: singletonSiteGQLID}
}
type siteResolver struct {
db database.DB
gqlID string // == singletonSiteGQLID, not the site ID
}
func (r *siteResolver) ID() graphql.ID { return marshalSiteGQLID(r.gqlID) }
func (r *siteResolver) SiteID() string { return siteid.Get() }
func (r *siteResolver) Configuration(ctx context.Context) (*siteConfigurationResolver, error) {
// ๐จ SECURITY: The site configuration contains secret tokens and credentials,
// so only admins may view it.
if err := backend.CheckCurrentUserIsSiteAdmin(ctx, r.db); err != nil {
return nil, err
}
return &siteConfigurationResolver{db: r.db}, nil
}
func (r *siteResolver) ViewerCanAdminister(ctx context.Context) (bool, error) {
if err := backend.CheckCurrentUserIsSiteAdmin(ctx, r.db); err == backend.ErrMustBeSiteAdmin || err == backend.ErrNotAuthenticated {
return false, nil
} else if err != nil {
return false, err
}
return true, nil
}
func (r *siteResolver) settingsSubject() api.SettingsSubject {
return api.SettingsSubject{Site: true}
}
func (r *siteResolver) LatestSettings(ctx context.Context) (*settingsResolver, error) {
settings, err := database.Settings(r.db).GetLatest(ctx, r.settingsSubject())
if err != nil {
return nil, err
}
if settings == nil {
return nil, nil
}
return &settingsResolver{r.db, &settingsSubject{site: r}, settings, nil}, nil
}
func (r *siteResolver) SettingsCascade() *settingsCascade {
return &settingsCascade{db: r.db, subject: &settingsSubject{site: r}}
}
func (r *siteResolver) ConfigurationCascade() *settingsCascade { return r.SettingsCascade() }
func (r *siteResolver) SettingsURL() *string { return strptr("/site-admin/global-settings") }
func (r *siteResolver) CanReloadSite(ctx context.Context) bool {
err := backend.CheckCurrentUserIsSiteAdmin(ctx, r.db)
return canReloadSite && err == nil
}
func (r *siteResolver) BuildVersion() string { return version.Version() }
func (r *siteResolver) ProductVersion() string { return version.Version() }
func (r *siteResolver) HasCodeIntelligence() bool {
// BACKCOMPAT: Always return true.
return true
}
func (r *siteResolver) ProductSubscription() *productSubscriptionStatus {
return &productSubscriptionStatus{}
}
func (r *siteResolver) AllowSiteSettingsEdits() bool {
return canUpdateSiteConfiguration()
}
type siteConfigurationResolver struct {
db database.DB
}
func (r *siteConfigurationResolver) ID(ctx context.Context) (int32, error) {
// ๐จ SECURITY: The site configuration contains secret tokens and credentials,
// so only admins may view it.
if err := backend.CheckCurrentUserIsSiteAdmin(ctx, r.db); err != nil {
return 0, err
}
return 0, nil // TODO(slimsag): future: return the real ID here to prevent races
}
func (r *siteConfigurationResolver) EffectiveContents(ctx context.Context) (JSONCString, error) {
// ๐จ SECURITY: The site configuration contains secret tokens and credentials,
// so only admins may view it.
if err := backend.CheckCurrentUserIsSiteAdmin(ctx, r.db); err != nil {
return "", err
}
siteConfig, err := conf.RedactSecrets(globals.ConfigurationServerFrontendOnly.Raw())
return JSONCString(siteConfig.Site), err
}
func (r *siteConfigurationResolver) ValidationMessages(ctx context.Context) ([]string, error) {
contents, err := r.EffectiveContents(ctx)
if err != nil {
return nil, err
}
return conf.ValidateSite(string(contents))
}
func (r *schemaResolver) UpdateSiteConfiguration(ctx context.Context, args *struct {
LastID int32
Input string
}) (bool, error) {
// ๐จ SECURITY: The site configuration contains secret tokens and credentials,
// so only admins may view it.
if err := backend.CheckCurrentUserIsSiteAdmin(ctx, r.db); err != nil {
return false, err
}
if !canUpdateSiteConfiguration() {
return false, errors.New("updating site configuration not allowed when using SITE_CONFIG_FILE")
}
if strings.TrimSpace(args.Input) == "" {
return false, errors.Errorf("blank site configuration is invalid (you can clear the site configuration by entering an empty JSON object: {})")
}
if problems, err := conf.ValidateSite(args.Input); err != nil {
return false, errors.Errorf("failed to validate site configuration: %w", err)
} else if len(problems) > 0 {
return false, errors.Errorf("site configuration is invalid: %s", strings.Join(problems, ","))
}
prev := globals.ConfigurationServerFrontendOnly.Raw()
unredacted, err := conf.UnredactSecrets(args.Input, prev)
if err != nil {
return false, errors.Errorf("error unredacting secrets: %s", err)
}
prev.Site = unredacted
// TODO(slimsag): future: actually pass lastID through to prevent race conditions
if err := globals.ConfigurationServerFrontendOnly.Write(ctx, prev); err != nil {
return false, err
}
return globals.ConfigurationServerFrontendOnly.NeedServerRestart(), nil
}
var siteConfigAllowEdits, _ = strconv.ParseBool(env.Get("SITE_CONFIG_ALLOW_EDITS", "false", "When SITE_CONFIG_FILE is in use, allow edits in the application to be made which will be overwritten on next process restart"))
func canUpdateSiteConfiguration() bool {
return os.Getenv("SITE_CONFIG_FILE") == "" || siteConfigAllowEdits
}
| [
"\"SITE_CONFIG_FILE\""
] | [] | [
"SITE_CONFIG_FILE"
] | [] | ["SITE_CONFIG_FILE"] | go | 1 | 0 | |
pkg/test-infra/tests/client.go | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tests
import (
"context"
"fmt"
"os"
"github.com/ngaut/log"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
_ "k8s.io/client-go/plugin/pkg/client/auth" // auth in cluster
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"github.com/pingcap/tipocket/pkg/test-infra/fixture"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// TestClient encapsulates many kinds of clients
var TestClient *TestCli
// TestCli contains clients
type TestCli struct {
Cli client.Client
}
func newTestCli(conf *rest.Config) *TestCli {
kubeCli, err := fixture.BuildGenericKubeClient(conf)
if err != nil {
log.Warnf("error creating kube-client: %v", err)
}
return &TestCli{
Cli: kubeCli,
}
}
// GetNodes gets physical nodes
func (e *TestCli) GetNodes() (*corev1.NodeList, error) {
nodes := &corev1.NodeList{}
err := e.Cli.List(context.TODO(), nodes)
if err != nil {
return nil, err
}
return nodes, nil
}
// CreateNamespace creates the specified namespace
// and enable admission webhook if not exist.
func (e *TestCli) CreateNamespace(name string) error {
ns := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
}
if _, err := controllerutil.CreateOrUpdate(context.TODO(), e.Cli, ns, func() error {
if ns.Labels != nil {
ns.Labels["admission-webhook"] = "enabled"
} else {
ns.Labels = map[string]string{
"admission-webhook": "enabled",
}
}
return nil
}); err != nil {
return err
}
return nil
}
// DeleteNamespace delete the specified namespace
func (e *TestCli) DeleteNamespace(name string) error {
ns := &corev1.Namespace{}
if err := e.Cli.Get(context.TODO(), types.NamespacedName{Name: name}, ns); err != nil {
if errors.IsNotFound(err) {
return nil
}
return fmt.Errorf("get namespace %s failed: %+v", name, err)
}
if err := e.Cli.Delete(context.TODO(), &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
}); err != nil {
return fmt.Errorf("delete namespace %s failed: %+v", name, err)
}
return nil
}
func init() {
conf, err := clientcmd.BuildConfigFromFlags("", os.Getenv("KUBECONFIG"))
if err != nil {
log.Warnf("build config failed: %+v", err)
}
TestClient = newTestCli(conf)
}
| [
"\"KUBECONFIG\""
] | [] | [
"KUBECONFIG"
] | [] | ["KUBECONFIG"] | go | 1 | 0 |