author
int64 658
755k
| date
stringlengths 19
19
| timezone
int64 -46,800
43.2k
| hash
stringlengths 40
40
| message
stringlengths 5
490
| mods
list | language
stringclasses 20
values | license
stringclasses 3
values | repo
stringlengths 5
68
| original_message
stringlengths 12
491
|
---|---|---|---|---|---|---|---|---|---|
259,858 | 03.02.2022 22:24:31 | 28,800 | dce4528a1a95e172c69eefb76cffe0736c2730d9 | Clean documentation and add go vet support for checklocks.
This makes it easier to iterate with checklocks. This pattern will be
duplicated with more complex analyzers.
Updates | [
{
"change_type": "MODIFY",
"old_path": "BUILD",
"new_path": "BUILD",
"diff": "@@ -118,6 +118,7 @@ go_path(\n\"//runsc/cli\",\n\"//shim/cli\",\n\"//webhook/pkg/cli\",\n+ \"//tools/checklocks\",\n# Packages that are not dependencies of the above.\n\"//pkg/sentry/kernel/memevent\",\n"
},
{
"change_type": "MODIFY",
"old_path": "go.mod",
"new_path": "go.mod",
"diff": "@@ -24,7 +24,7 @@ require (\ngithub.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2\ngithub.com/vishvananda/netlink v1.0.1-0.20190930145447-2ec5bdc52b86\ngolang.org/x/sync v0.0.0-20210220032951-036812b2e83c\n- golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac\n+ golang.org/x/sys v0.0.0-20211019181941-9d821ace8654\ngolang.org/x/time v0.0.0-20191024005414-555d28b269f0\ngoogle.golang.org/grpc v1.42.0-dev.0.20211020220737-f00baa6c3c84\ngoogle.golang.org/protobuf v1.27.1\n@@ -57,11 +57,15 @@ require (\ngithub.com/opencontainers/runc v1.0.0-rc90 // indirect\ngithub.com/pkg/errors v0.9.1 // indirect\ngithub.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 // indirect\n+ github.com/yuin/goldmark v1.4.1 // indirect\ngo.opencensus.io v0.23.0 // indirect\ngolang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 // indirect\n- golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420 // indirect\n+ golang.org/x/mod v0.5.1 // indirect\n+ golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f // indirect\ngolang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1 // indirect\n- golang.org/x/text v0.3.6 // indirect\n+ golang.org/x/text v0.3.7 // indirect\n+ golang.org/x/tools v0.1.9 // indirect\n+ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect\ngoogle.golang.org/appengine v1.6.7 // indirect\ngoogle.golang.org/genproto v0.0.0-20210722135532-667f2b7c528f // indirect\ngopkg.in/inf.v0 v0.9.1 // indirect\n"
},
{
"change_type": "MODIFY",
"old_path": "go.sum",
"new_path": "go.sum",
"diff": "@@ -377,6 +377,8 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de\ngithub.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=\ngithub.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=\ngithub.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=\n+github.com/yuin/goldmark v1.4.1 h1:/vn0k+RBvwlxEmP5E7SZMqNxPhfMVFEJiykr15/0XKM=\n+github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=\ngo.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=\ngo.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=\ngo.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=\n@@ -430,6 +432,8 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB\ngolang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\n+golang.org/x/mod v0.5.1 h1:OJxoQ/rynoF0dcCdI7cLPktw/hR2cueqYfjm43oqK38=\n+golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=\ngolang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\n@@ -467,6 +471,8 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY\ngolang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=\ngolang.org/x/net v0.0.0-20210503060351-7fd8e65b6420 h1:a8jGStKg0XqKDlKqjLrXn0ioF5MH36pT7Z0BRTqLhbk=\ngolang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=\n+golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f h1:OfiFi4JbukWwe3lzw+xunroH1mnC1e2Gy5cxNJApiSY=\n+golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=\ngolang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=\ngolang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=\ngolang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=\n@@ -535,6 +541,8 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w\ngolang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20211007075335-d3039528d8ac h1:oN6lz7iLW/YC7un8pq+9bOLyXrprv2+DKfkJY+2LJJw=\ngolang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\n+golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 h1:id054HUawV2/6IGm2IV8KZQjqtwAOo2CYlOToYqa0d0=\n+golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=\ngolang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\n@@ -545,6 +553,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=\ngolang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=\ngolang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=\ngolang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=\n+golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=\n+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=\ngolang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=\n@@ -595,6 +605,8 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc\ngolang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=\ngolang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=\ngolang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=\n+golang.org/x/tools v0.1.9 h1:j9KsMiaP1c3B0OTQGth0/k+miLGTgLsAFUCrF2vLcF8=\n+golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=\ngolang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/checklocks/BUILD",
"new_path": "tools/checklocks/BUILD",
"diff": "@@ -12,7 +12,11 @@ go_library(\n\"state.go\",\n],\nnogo = False,\n- visibility = [\"//tools/nogo:__subpackages__\"],\n+ visibility = [\n+ \"//:__pkg__\",\n+ \"//tools/checklocks/cmd:__subpackages__\",\n+ \"//tools/nogo:__subpackages__\",\n+ ],\ndeps = [\n\"@org_golang_x_tools//go/analysis:go_default_library\",\n\"@org_golang_x_tools//go/analysis/passes/buildssa:go_default_library\",\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/checklocks/README.md",
"new_path": "tools/checklocks/README.md",
"diff": "# CheckLocks Analyzer\n-<!--* freshness: { owner: 'gvisor-eng' reviewed: '2021-10-20' } *-->\n+<!--* freshness: { owner: 'gvisor-eng' reviewed: '2022-02-02' } *-->\nChecklocks is an analyzer for lock and atomic constraints. The analyzer relies\non explicit annotations to identify fields that should be checked for access.\n-## Atomic annotations\n+## Installation and Usage\n+\n+The analyzer is integrated into the gVisor `nogo` framework. It automatically\n+applies to all code in this repository.\n+\n+For external usage and to iterate quickly, it may be used as part of `go vet`.\n+You can install the tool separately via:\n+\n+```sh\n+go install gvisor.dev/gvisor/tools/checklocks/cmd/checklocks@go\n+```\n+\n+And, if installed to the default path, run it via:\n+\n+```sh\n+go vet -vettool=$HOME/go/bin/checklocks ./...\n+```\n+\n+## Annotations\n+\n+This analyzer supports annotations for atomic access and lock enforcement, in\n+order to allow for mixed semantics. These are first described separately, then\n+the combination is discussed.\n+\n+### Atomic Access Enforcement\nIndividual struct members may be noted as requiring atomic access. These\n-annotations are of the form:\n+annotations are of the form `+checkatomic`, for example:\n```go\ntype foo struct {\n@@ -18,22 +42,24 @@ type foo struct {\n```\nThis will ensure that all accesses to bar are atomic, with the exception of\n-operations on newly allocated objects.\n+operations on newly allocated objects (when detectable).\n-## Lock annotations\n+## Lock Enforcement\nIndividual struct members may be protected by annotations that indicate locking\n-requirements for accessing members. These annotations are of the form:\n+requirements for accessing members. These annotations are of the form\n+`+checklocks`, for example:\n```go\ntype foo struct {\nmu sync.Mutex\n+\n// +checklocks:mu\nbar int\nfoo int // No annotation on foo means it's not guarded by mu.\n- secondMu sync.Mutex\n+ secondMu sync.RWMutex\n// Multiple annotations indicate that both must be held but the\n// checker does not assert any lock ordering.\n@@ -43,89 +69,107 @@ type foo struct {\n}\n```\n-The checklocks annotation may also apply to functions. For example:\n+These semantics are enforcable on `sync.Mutex`, `sync.RWMutex` and `sync.Locker`\n+fields. Semantics with respect to reading and writing are automatically detected\n+and enforced. If an access is read-only, then the lock need only be held as a\n+read lock, in the case of an `sync.RWMutex`.\n+\n+The locks must be resolvable within the scope of the declaration. This means the\n+lock must refer to one of:\n+\n+* A struct-local lock (e.g. mu).\n+* A lock resolvable from the local struct (e.g. fieldX.mu).\n+* A global lock (e.g. globalMu).\n+* A lock resolvable from a global struct (e.g. globalX.mu).\n+\n+Like atomic access enforcement, checks may be elided on newly allocated objects.\n+\n+### Function Annotations\n+\n+The `+checklocks` annotation may apply to functions. For example:\n```go\n// +checklocks:f.mu\nfunc (f *foo) doThingLocked() { }\n```\n-This will check that the \"f.mu\" is locked for any calls, where possible.\n+The field provided in the `+checklocks` annotation must be resolvable as one of:\n-In case of functions which initialize structs that may have annotations one can\n-use the following annotation on the function to disable reporting by the lock\n-checker. The lock checker will still track any mutexes acquired or released but\n-won't report any failures for this function for unguarded field access.\n+* A parameter, receiver or return value (e.g. mu).\n+* A lock resolvable from a parameter, receiver or return value (e.g. f.mu).\n+* A global lock (e.g. globalMu).\n+* A lock resolvable from a global struct (e.g. globalX.mu).\n-```go\n-// +checklocks:ignore\n-func newXXX() *X {\n-...\n-}\n-```\n+This annotation will ensure that the given lock is held for all calls, and all\n+analysis of this function will assume that this is the case.\n-***The checker treats both 'sync.Mutex' and 'sync.RWMutex' identically, i.e, as\n-a sync.Mutex. The checker does not distinguish between read locks vs. exclusive\n-locks and treats all locks as exclusive locks***.\n+Additional variants of the `+checklocks` annotation are supported for functions:\n-For cases the checker is able to correctly handle today please see test/test.go.\n+* `+checklocksread`: This enforces that at least a read lock is held. Note\n+ that this assumption will apply locally, so accesses and function calls will\n+ assume that only a read lock is available.\n+* `+checklocksacquire`: This enforces that the given lock is *not* held on\n+ entry, but it will be held on exit. This assertion will be checked locally\n+ and applied to the caller's lock state.\n+* `+checklocksrelease`: This enforces that the given lock is held on entry,\n+ and will be release on exit. This assertion is checked locally and applied\n+ to the caller's lock state.\n+* `+checklocksacquireread`: A read variant of `+checklocksacquire`.\n+* `+checklocksreleaseread`: A read variant of `+checklocksrelease`.\n+* `+checklocksalias:a.b.c=x.y`: For parameters with complex relationships,\n+ this annotation can be used to specify that the `a.b.c` lock is equivalent\n+ to the `x.y` state. That is, any operation on either of these locks applies\n+ to both, and any assertions that can be made about either applies to both.\n-The checklocks check also flags any invalid annotations where the mutex\n-annotation refers either to something that is not a 'sync.Mutex' or\n-'sync.RWMutex' or where the field does not exist at all. This will prevent the\n-annotations from becoming stale over time as fields are renamed, etc.\n+For examples of these cases see the tests.\n-## Lock suggestions\n+#### Anonymous Functions and Closures\n-Based on locks held during field access, the analyzer will suggest annotations.\n-These can be ignored with the standard `+checklocksignore` annotation.\n+Anonymous functions and closures cannot be annotated.\n-The annotation will be generated when the lock is held the vast majority of the\n-time the field is accessed. Note that it is possible for this frequency to be\n-greater than 100%, if the lock is held multiple times. For example:\n+If anonymous functions and closures are bound and invoked within a single scope,\n+the analysis will happen with the available lock state. For example, the\n+following will not report any violations:\n```go\n-func foo(ts1 *testStruct, ts2 *testStruct) {\n- ts1.Lock()\n- ts2.Lock()\n- ts1.guardedField = 1 // 200% locks held.\n- ts1.Unlock()\n- ts2.Unlock()\n+func foo(ts *testStruct) {\n+ x := func() {\n+ ts.guardedField = 1\n+ }\n+ ts.mu.Lock()\n+ x() // We know the context x is being invoked.\n+ ts.mu.Unlock()\n}\n```\n-## Currently not supported\n+This pattern often applies to defer usage, which allows defered functions to be\n+fully analyzed with the lock state at time of execution.\n-1. Anonymous functions are not correctly evaluated. The analyzer does not\n- currently support specifying annotations on anonymous functions as a result\n- evaluation of a function that accesses protected fields will fail.\n+However, if a closure is passed to another function, the anonymous function\n+backing that closure will be analyzed assuming no available lock state. For\n+example, the following will report violations:\n```go\n-type A struct {\n- mu sync.Mutex\n-\n- // +checklocks:mu\n- x int\n+func runFunc(f func()) {\n+ f()\n}\n-func abc() {\n- var a A\n- f := func() { a.x = 1 } <=== This line will be flagged by analyzer\n- a.mu.Lock()\n- f()\n- a.mu.Unlock()\n+func foo(ts *testStruct) {\n+ x := func() {\n+ ts.guardedField = 1\n+ }\n+ ts.mu.Lock()\n+ runFunc(x) // We can't know what will happen with x.\n+ ts.mu.Unlock()\n}\n```\n-### Explicitly Not Supported\n-\n-1. The checker will not support guards on anything other than the cases\n- described above. For example, global mutexes cannot be referred to by\n- checklocks. Only struct members can be used.\n-\n-2. The checker will not support checking for lock ordering violations.\n+Since x cannot be annotated, this may require use of the force annotation used\n+below. However, if anonymous functions and closures require annotations, there\n+may be an opportunity to split them into named functions for improved analysis\n+and debuggability, and avoid the need to use force annotations.\n-## Mixed mode\n+### Mixed Atomic Access and Lock Enforcement\nSome members may allow read-only atomic access, but be protected against writes\nby a mutex. Generally, this imposes the following requirements:\n@@ -151,3 +195,86 @@ type foo struct {\nbar int32\n}\n```\n+\n+This enforces that the preconditions above are upheld.\n+\n+## Ignoring and Forcing\n+\n+From time to time, it may be necessary to ignore results produced by the\n+analyzer. These can be disabled on a per-field, per-function or per-line basis.\n+\n+For fields, only lock suggestions may be ignored. See below for details.\n+\n+For functions, the `+checklocksignore` annotation can be applied. This prevents\n+any local analysis from taking place. Note that the other annotations can still\n+be applied to the function, which will enforce assertions in caller analysis.\n+For example:\n+\n+```go\n+// +checklocks:ts.mu\n+// +checklocksignore\n+func foo(ts *testStruct) {\n+ ts.guardedField = 1\n+}\n+```\n+\n+For individual lines, the `+checklocksforce` annotation can be applied after the\n+statement. This does not simply ignore the line, rather it *forces* the\n+necessary assertion to become true. For example, if a lock must be held, this\n+annotation will mark that lock as held for all subsequent lines. For example:\n+\n+```go\n+func foo(ts *testStruct) {\n+ ts.guardedField = 1 // +checklocksforce: don't care about locking.\n+}\n+```\n+\n+In general, both annotations should be highly discouraged. It should be possible\n+to avoid their use by factoring functions in such a way that annotations can be\n+applied consistently and without the need for ignoring and forcing.\n+\n+## Testing\n+\n+Tests can be built using the `+checklocksfail` annotation. When applied after a\n+statement, these will generate a report if the line does *not* fail an\n+assertion. For example:\n+\n+```go\n+func foo(ts *testStruct) {\n+ ts.guardedField = 1 // +checklocksfail: violation.\n+}\n+```\n+\n+These annotations are primarily useful for analyzer development and testing.\n+\n+## Suggestions\n+\n+Based on locks held during field access, the analyzer may suggest annotations.\n+These can be ignored with the `+checklocksignore` annotation on fields.\n+\n+```go\n+type foo struct {\n+ mu sync.Mutex\n+ // +checklocksignore: mu is not required, it just happens to be held always.\n+ bar int32\n+}\n+```\n+\n+The annotation will be generated when the lock is held the vast majority of the\n+time the field is accessed. Note that it is possible for this frequency to be\n+greater than 100%, if the lock is held multiple times. For example:\n+\n+```go\n+func foo(ts1 *testStruct, ts2 *testStruct) {\n+ ts1.Lock()\n+ ts2.Lock()\n+ ts1.guardedField = 1 // 200% locks held.\n+ ts1.Unlock()\n+ ts2.Unlock()\n+}\n+```\n+\n+It should be expected that this annotation is also rare. If the field is not\n+protected by the mutex, it suggests that the critical section could be made\n+smaller by restructuring the code or the structure instead of applying the\n+ignore annotation.\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "tools/checklocks/cmd/checklocks/BUILD",
"diff": "+load(\"//tools:defs.bzl\", \"go_binary\")\n+\n+go_binary(\n+ name = \"checklocks\",\n+ srcs = [\"main.go\"],\n+ deps = [\n+ \"//tools/checklocks\",\n+ \"@org_golang_x_tools//go/analysis/singlechecker:go_default_library\",\n+ ],\n+)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "tools/checklocks/cmd/checklocks/main.go",
"diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Binary checklocks is a `vettool` for `go vet`.\n+package main\n+\n+import (\n+ \"golang.org/x/tools/go/analysis/singlechecker\"\n+ \"gvisor.dev/gvisor/tools/checklocks\"\n+)\n+\n+func main() { singlechecker.Main(checklocks.Analyzer) }\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/go_branch.sh",
"new_path": "tools/go_branch.sh",
"diff": "@@ -116,7 +116,7 @@ EOF\n# There are a few solitary files that can get left behind due to the way bazel\n# constructs the gopath target. Note that we don't find all Go files here\n# because they may correspond to unused templates, etc.\n-declare -ar binaries=( \"runsc\" \"shim\" \"webhook\" )\n+declare -ar binaries=( \"runsc\" \"shim\" \"webhook\" \"tools/checklocks/cmd/checklocks\" )\nfor target in \"${binaries[@]}\"; do\nmkdir -p \"${target}\"\ncp \"${repo_orig}/${target}\"/*.go \"${target}/\"\n"
}
] | Go | Apache License 2.0 | google/gvisor | Clean documentation and add go vet support for checklocks.
This makes it easier to iterate with checklocks. This pattern will be
duplicated with more complex analyzers.
Updates #7038
PiperOrigin-RevId: 426317618 |
259,978 | 04.02.2022 10:26:35 | 28,800 | 518535de9b0e27cb13ac22b1bf8e6d2a8104d8a4 | Expose address deprecation on Stack
Add a method for setting an address to be deprecated/preferred to
AddressableEndpoint, so that addresses added from outside the stack
can be deprecated/renewed respectively, e.g. by a DHCPv6 client. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/ipv4.go",
"new_path": "pkg/tcpip/network/ipv4/ipv4.go",
"diff": "@@ -1053,6 +1053,13 @@ func (e *endpoint) RemovePermanentAddress(addr tcpip.Address) tcpip.Error {\nreturn e.addressableEndpointState.RemovePermanentAddress(addr)\n}\n+// SetDeprecated implements stack.AddressableEndpoint.\n+func (e *endpoint) SetDeprecated(addr tcpip.Address, deprecated bool) tcpip.Error {\n+ e.mu.RLock()\n+ defer e.mu.RUnlock()\n+ return e.addressableEndpointState.SetDeprecated(addr, deprecated)\n+}\n+\n// MainAddress implements stack.AddressableEndpoint.\nfunc (e *endpoint) MainAddress() tcpip.AddressWithPrefix {\ne.mu.RLock()\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/ipv6.go",
"new_path": "pkg/tcpip/network/ipv6/ipv6.go",
"diff": "@@ -1681,6 +1681,13 @@ func (e *endpoint) getAddressRLocked(localAddr tcpip.Address) stack.AddressEndpo\nreturn e.mu.addressableEndpointState.GetAddress(localAddr)\n}\n+// SetDeprecated implements stack.AddressableEndpoint.\n+func (e *endpoint) SetDeprecated(addr tcpip.Address, deprecated bool) tcpip.Error {\n+ e.mu.RLock()\n+ defer e.mu.RUnlock()\n+ return e.mu.addressableEndpointState.SetDeprecated(addr, deprecated)\n+}\n+\n// MainAddress implements stack.AddressableEndpoint.\nfunc (e *endpoint) MainAddress() tcpip.AddressWithPrefix {\ne.mu.RLock()\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/addressable_endpoint_state.go",
"new_path": "pkg/tcpip/stack/addressable_endpoint_state.go",
"diff": "@@ -374,6 +374,19 @@ func (a *AddressableEndpointState) decAddressRefLocked(addrState *addressState)\na.releaseAddressStateLocked(addrState)\n}\n+// SetDeprecated implements stack.AddressableEndpoint.\n+func (a *AddressableEndpointState) SetDeprecated(addr tcpip.Address, deprecated bool) tcpip.Error {\n+ a.mu.Lock()\n+ defer a.mu.Unlock()\n+\n+ addrState, ok := a.mu.endpoints[addr]\n+ if !ok {\n+ return &tcpip.ErrBadLocalAddress{}\n+ }\n+ addrState.SetDeprecated(deprecated)\n+ return nil\n+}\n+\n// MainAddress implements AddressableEndpoint.\nfunc (a *AddressableEndpointState) MainAddress() tcpip.AddressWithPrefix {\na.mu.RLock()\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/nic.go",
"new_path": "pkg/tcpip/stack/nic.go",
"diff": "@@ -591,6 +591,24 @@ func (n *nic) removeAddress(addr tcpip.Address) tcpip.Error {\nreturn &tcpip.ErrBadLocalAddress{}\n}\n+func (n *nic) setAddressDeprecated(addr tcpip.Address, deprecated bool) tcpip.Error {\n+ for _, ep := range n.networkEndpoints {\n+ ep, ok := ep.(AddressableEndpoint)\n+ if !ok {\n+ continue\n+ }\n+\n+ switch err := ep.SetDeprecated(addr, deprecated); err.(type) {\n+ case *tcpip.ErrBadLocalAddress:\n+ continue\n+ default:\n+ return err\n+ }\n+ }\n+\n+ return &tcpip.ErrBadLocalAddress{}\n+}\n+\nfunc (n *nic) getLinkAddress(addr, localAddr tcpip.Address, protocol tcpip.NetworkProtocolNumber, onResolve func(LinkResolutionResult)) tcpip.Error {\nlinkRes, ok := n.linkAddrResolvers[protocol]\nif !ok {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/registration.go",
"new_path": "pkg/tcpip/stack/registration.go",
"diff": "@@ -491,6 +491,12 @@ type AddressableEndpoint interface {\n// permanent address.\nRemovePermanentAddress(addr tcpip.Address) tcpip.Error\n+ // SetDeprecated sets whether the address should be deprecated or not.\n+ //\n+ // Returns *tcpip.ErrBadLocalAddress if the endpoint does not have the passed\n+ // address.\n+ SetDeprecated(addr tcpip.Address, deprecated bool) tcpip.Error\n+\n// MainAddress returns the endpoint's primary permanent address.\nMainAddress() tcpip.AddressWithPrefix\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/stack.go",
"new_path": "pkg/tcpip/stack/stack.go",
"diff": "@@ -943,6 +943,18 @@ func (s *Stack) RemoveAddress(id tcpip.NICID, addr tcpip.Address) tcpip.Error {\nreturn &tcpip.ErrUnknownNICID{}\n}\n+// SetAddressDeprecated sets an address to be deprecated or preferred.\n+func (s *Stack) SetAddressDeprecated(id tcpip.NICID, addr tcpip.Address, deprecated bool) tcpip.Error {\n+ s.mu.RLock()\n+ defer s.mu.RUnlock()\n+\n+ if nic, ok := s.nics[id]; ok {\n+ return nic.setAddressDeprecated(addr, deprecated)\n+ }\n+\n+ return &tcpip.ErrUnknownNICID{}\n+}\n+\n// AllAddresses returns a map of NICIDs to their protocol addresses (primary\n// and non-primary).\nfunc (s *Stack) AllAddresses() map[tcpip.NICID][]tcpip.ProtocolAddress {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/stack_test.go",
"new_path": "pkg/tcpip/stack/stack_test.go",
"diff": "@@ -2980,7 +2980,7 @@ func TestIPv6SourceAddressSelectionScopeAndSameAddress(t *testing.T) {\nproperties stack.AddressProperties\n}\n- // Rule 3 is not tested here, and is instead tested by NDP's AutoGenAddr test.\n+ // Rule 3 is also tested by NDP's AutoGenAddr test.\ntests := []struct {\nname string\nslaacPrefixForTempAddrBeforeNICAddrAdd tcpip.AddressWithPrefix\n@@ -3101,6 +3101,35 @@ func TestIPv6SourceAddressSelectionScopeAndSameAddress(t *testing.T) {\nexpectedLocalAddr: linkLocalAddr1,\n},\n+ // Test Rule 3 of RFC 6724 section 5 (avoid deprecated addresses).\n+ {\n+ name: \"Deprecated least preferred (last address)\",\n+ nicAddrs: []addressWithProperties{\n+ {addr: globalAddr1},\n+ {\n+ addr: globalAddr2,\n+ properties: stack.AddressProperties{\n+ Deprecated: true,\n+ },\n+ },\n+ },\n+ remoteAddr: globalAddr3,\n+ expectedLocalAddr: globalAddr1,\n+ },\n+ {\n+ name: \"Deprecated least preferred (first address)\",\n+ nicAddrs: []addressWithProperties{\n+ {\n+ addr: globalAddr2,\n+ properties: stack.AddressProperties{\n+ Deprecated: true,\n+ },\n+ },\n+ {addr: globalAddr1},\n+ },\n+ remoteAddr: globalAddr3,\n+ expectedLocalAddr: globalAddr1,\n+ },\n// Test Rule 6 of 6724 section 5 (prefer matching label).\n{\nname: \"Unique Local most preferred (last address)\",\n"
}
] | Go | Apache License 2.0 | google/gvisor | Expose address deprecation on Stack
Add a method for setting an address to be deprecated/preferred to
AddressableEndpoint, so that addresses added from outside the stack
can be deprecated/renewed respectively, e.g. by a DHCPv6 client.
PiperOrigin-RevId: 426435598 |
259,868 | 04.02.2022 14:08:51 | 28,800 | e219f75d8b3c28d4d41f8b232b9879391688ceb2 | Fuse: Cache `maxActiveRequests` in `connection` to avoid reading it from `fs`.
Reported-by: | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/fuse/connection.go",
"new_path": "pkg/sentry/fsimpl/fuse/connection.go",
"diff": "@@ -148,6 +148,11 @@ type connection struct {\n// Negotiated in FUSE_INIT.\nmaxPages uint16\n+ // maxActiveRequests specifies the maximum number of active requests that can\n+ // exist at any time. Any further requests will block when trying to CAll\n+ // the server.\n+ maxActiveRequests uint64\n+\n// minor version of the FUSE protocol.\n// Negotiated and only set in INIT.\nminor uint32\n@@ -217,6 +222,7 @@ func newFUSEConnection(_ context.Context, fuseFD *DeviceFD, opts *filesystemOpti\nasyncCongestionThreshold: fuseDefaultCongestionThreshold,\nmaxRead: opts.maxRead,\nmaxPages: fuseDefaultMaxPagesPerReq,\n+ maxActiveRequests: opts.maxActiveRequests,\ninitializedChan: make(chan struct{}),\nconnected: true,\n}, nil\n@@ -296,7 +302,7 @@ func (conn *connection) callFuture(t *kernel.Task, r *Request) (*futureResponse,\n// This can potentially starve a request forever but this can only happen\n// if there are always too many ongoing requests all the time. The\n// supported maxActiveRequests setting should be really high to avoid this.\n- for conn.fd.numActiveRequests == conn.fd.fs.opts.maxActiveRequests {\n+ for conn.fd.numActiveRequests == conn.maxActiveRequests {\nlog.Infof(\"Blocking request %v from being queued. Too many active requests: %v\",\nr.id, conn.fd.numActiveRequests)\nconn.fd.mu.Unlock()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fuse: Cache `maxActiveRequests` in `connection` to avoid reading it from `fs`.
Reported-by: syzbot+6a1f1b6e126622f61c0e@syzkaller.appspotmail.com
PiperOrigin-RevId: 426488251 |
259,858 | 04.02.2022 14:42:08 | 28,800 | 1ebb4fd3670e1e093bcadc9fc2fa1e6837b532e3 | Fix build configurations.
This standardizes some build configurations and simplifies the structure
of packages intended to avoid fighting with automated formatting tools.
Updates | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/platforms/BUILD",
"new_path": "runsc/boot/platforms/BUILD",
"diff": "-load(\"//tools:defs.bzl\", \"go_library\", \"select_system\")\n+# The go_library rule is imported as a different name in this case,\n+# in order to avoid automated tooling doing the wrong thing with the\n+# operating-specific dependencies listed below.\n+load(\"//tools:defs.bzl\", \"platforms\", \"select_system\", exempt_go_library = \"go_library\")\npackage(licenses = [\"notice\"])\n-# Don't rewrite the deps attribute of :platforms.\n-# @unused\n-glaze_ignore = [\n- \"platforms.go\",\n- \"platforms_darwin.go\",\n-]\n-\n-go_library(\n+exempt_go_library(\nname = \"platforms\",\nsrcs = [\n\"platforms.go\",\n@@ -24,9 +20,8 @@ go_library(\ndeps = select_system(\ndarwin = [],\nlinux = [\n- \"//pkg/sentry/platform/kvm\",\n- \"//pkg/sentry/platform/ptrace\",\n- \"//runsc/boot/platforms/nonstandard\",\n+ \"//pkg/sentry/platform/%s\" % platform\n+ for platform in platforms\n],\n),\n)\n"
},
{
"change_type": "DELETE",
"old_path": "runsc/boot/platforms/nonstandard/BUILD",
"new_path": null,
"diff": "-load(\"//tools:defs.bzl\", \"go_library\")\n-\n-package(licenses = [\"notice\"])\n-\n-go_library(\n- name = \"nonstandard\",\n- srcs = [\"nonstandard.go\"],\n- visibility = [\n- \"//runsc:__subpackages__\",\n- ],\n-)\n"
},
{
"change_type": "DELETE",
"old_path": "runsc/boot/platforms/nonstandard/nonstandard.go",
"new_path": null,
"diff": "-// Copyright 2021 The gVisor Authors.\n-//\n-// Licensed under the Apache License, Version 2.0 (the \"License\");\n-// you may not use this file except in compliance with the License.\n-// You may obtain a copy of the License at\n-//\n-// http://www.apache.org/licenses/LICENSE-2.0\n-//\n-// Unless required by applicable law or agreed to in writing, software\n-// distributed under the License is distributed on an \"AS IS\" BASIS,\n-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-// See the License for the specific language governing permissions and\n-// limitations under the License.\n-\n-// Package nonstandard provides a place for nonstandard platforms.\n-package nonstandard\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/platforms/platforms.go",
"new_path": "runsc/boot/platforms/platforms.go",
"diff": "@@ -22,13 +22,4 @@ import (\n// Import platforms that runsc might use.\n_ \"gvisor.dev/gvisor/pkg/sentry/platform/kvm\"\n_ \"gvisor.dev/gvisor/pkg/sentry/platform/ptrace\"\n- _ \"gvisor.dev/gvisor/runsc/boot/platforms/nonstandard\"\n-)\n-\n-const (\n- // Ptrace runs the sandbox with the ptrace platform.\n- Ptrace = \"ptrace\"\n-\n- // KVM runs the sandbox with the KVM platform.\n- KVM = \"kvm\"\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/BUILD",
"new_path": "runsc/container/BUILD",
"diff": "-load(\"//tools:defs.bzl\", \"go_library\", \"go_test\", \"more_shards\")\n+load(\"//tools:defs.bzl\", \"default_platform\", \"go_library\", \"go_test\", \"more_shards\", \"platforms\")\npackage(licenses = [\"notice\"])\n@@ -39,12 +39,18 @@ go_test(\nsize = \"large\",\nsrcs = [\n\"console_test.go\",\n- \"container_norace_test.go\",\n- \"container_race_test.go\",\n\"container_test.go\",\n\"multi_container_test.go\",\n\"shared_volume_test.go\",\n],\n+ # Only run the default platform for the tsan test, which should\n+ # be compatible. For non-tsan builds, run all platforms.\n+ args = select({\n+ \"//conditions:default\": [\"-test_platforms=%s\" % \",\".join(\n+ [p for (p, tags) in platforms.items() if \"manual\" not in tags],\n+ )],\n+ \"//tools:gotsan\": [\"-test_platforms=%s\" % default_platform],\n+ }),\ndata = [\n\"//runsc\",\n\"//test/cmd/test_app\",\n@@ -60,13 +66,14 @@ go_test(\n\"//pkg/sentry/control\",\n\"//pkg/sentry/kernel\",\n\"//pkg/sentry/kernel/auth\",\n+ \"//pkg/sentry/platform\",\n\"//pkg/sync\",\n\"//pkg/test/testutil\",\n\"//pkg/unet\",\n\"//pkg/urpc\",\n\"//runsc/boot\",\n- \"//runsc/boot/platforms\",\n\"//runsc/config\",\n+ \"//runsc/flag\",\n\"//runsc/specutils\",\n\"@com_github_cenkalti_backoff//:go_default_library\",\n\"@com_github_kr_pty//:go_default_library\",\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/console_test.go",
"new_path": "runsc/container/console_test.go",
"diff": "@@ -121,7 +121,7 @@ func receiveConsolePTY(srv *unet.ServerSocket) (*os.File, error) {\n// Test that an pty FD is sent over the console socket if one is provided.\nfunc TestConsoleSocket(t *testing.T) {\n- for name, conf := range configs(t, all...) {\n+ for name, conf := range configs(t, false /* noOverlay */) {\nt.Run(name, func(t *testing.T) {\nspec := testutil.NewSpecWithArgs(\"true\")\nspec.Process.Terminal = true\n@@ -163,7 +163,7 @@ func TestConsoleSocket(t *testing.T) {\n// Test that an pty FD is sent over the console socket if one is provided.\nfunc TestMultiContainerConsoleSocket(t *testing.T) {\n- for name, conf := range configs(t, all...) {\n+ for name, conf := range configs(t, false /* noOverlay */) {\nt.Run(name, func(t *testing.T) {\nrootDir, cleanup, err := testutil.SetupRootDir()\nif err != nil {\n@@ -500,7 +500,7 @@ func TestJobControlSignalRootContainer(t *testing.T) {\n// Test that terminal works with root and sub-containers.\nfunc TestMultiContainerTerminal(t *testing.T) {\n- for name, conf := range configs(t, all...) {\n+ for name, conf := range configs(t, false /* noOverlay */) {\nt.Run(name, func(t *testing.T) {\nrootDir, cleanup, err := testutil.SetupRootDir()\nif err != nil {\n"
},
{
"change_type": "DELETE",
"old_path": "runsc/container/container_norace_test.go",
"new_path": null,
"diff": "-// Copyright 2018 The gVisor Authors.\n-//\n-// Licensed under the Apache License, Version 2.0 (the \"License\");\n-// you may not use this file except in compliance with the License.\n-// You may obtain a copy of the License at\n-//\n-// http://www.apache.org/licenses/LICENSE-2.0\n-//\n-// Unless required by applicable law or agreed to in writing, software\n-// distributed under the License is distributed on an \"AS IS\" BASIS,\n-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-// See the License for the specific language governing permissions and\n-// limitations under the License.\n-\n-//go:build !race\n-// +build !race\n-\n-package container\n-\n-// Allow both kvm and ptrace for non-race builds.\n-var platformOptions = []configOption{ptrace, kvm}\n"
},
{
"change_type": "DELETE",
"old_path": "runsc/container/container_race_test.go",
"new_path": null,
"diff": "-// Copyright 2018 The gVisor Authors.\n-//\n-// Licensed under the Apache License, Version 2.0 (the \"License\");\n-// you may not use this file except in compliance with the License.\n-// You may obtain a copy of the License at\n-//\n-// http://www.apache.org/licenses/LICENSE-2.0\n-//\n-// Unless required by applicable law or agreed to in writing, software\n-// distributed under the License is distributed on an \"AS IS\" BASIS,\n-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-// See the License for the specific language governing permissions and\n-// limitations under the License.\n-\n-//go:build race\n-// +build race\n-\n-package container\n-\n-// Only enabled ptrace with race builds.\n-var platformOptions = []configOption{ptrace}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -16,7 +16,6 @@ package container\nimport (\n\"bytes\"\n- \"flag\"\n\"fmt\"\n\"io\"\n\"io/ioutil\"\n@@ -39,17 +38,17 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/control\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n+ \"gvisor.dev/gvisor/pkg/sentry/platform\"\n\"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/test/testutil\"\n\"gvisor.dev/gvisor/pkg/urpc\"\n- \"gvisor.dev/gvisor/runsc/boot/platforms\"\n\"gvisor.dev/gvisor/runsc/config\"\n+ \"gvisor.dev/gvisor/runsc/flag\"\n\"gvisor.dev/gvisor/runsc/specutils\"\n)\nfunc TestMain(m *testing.M) {\nlog.SetLevel(log.Debug)\n- flag.Parse()\nif err := testutil.ConfigureExePath(); err != nil {\npanic(err.Error())\n}\n@@ -390,56 +389,44 @@ func run(spec *specs.Spec, conf *config.Config) error {\nreturn nil\n}\n-type configOption int\n+// platforms must be provided by the BUILD rule, or all platforms are included.\n+var platforms = flag.String(\"test_platforms\", strings.Join(platform.List(), \",\"), \"Platforms to test with.\")\n-const (\n- overlay configOption = iota\n- ptrace\n- kvm\n- nonExclusiveFS\n-)\n+// configs generates different configurations to run tests.\n+//\n+// TODO(gvisor.dev/issue/1624): Remove VFS1 dimension.\n+func configs(t *testing.T, noOverlay bool) map[string]*config.Config {\n+ cs := make(map[string]*config.Config)\n+ ps := strings.Split(*platforms, \",\")\n-var (\n- noOverlay = append(platformOptions, nonExclusiveFS)\n- all = append(noOverlay, overlay)\n-)\n+ // Non-overlay versions.\n+ for _, p := range ps {\n+ c := testutil.TestConfig(t)\n+ c.Platform = p\n+ c.VFS2 = true\n+ cs[p] = c\n+ }\n-func configsHelper(t *testing.T, opts ...configOption) map[string]*config.Config {\n- // Always load the default config.\n- cs := make(map[string]*config.Config)\n- for _, o := range opts {\n+ // Overlay versions.\n+ if !noOverlay {\n+ for _, p := range ps {\nc := testutil.TestConfig(t)\n- c.VFS2 = false\n- switch o {\n- case overlay:\n+ c.Platform = p\nc.Overlay = true\n- cs[\"overlay\"] = c\n- case ptrace:\n- c.Platform = platforms.Ptrace\n- cs[\"ptrace\"] = c\n- case kvm:\n- c.Platform = platforms.KVM\n- cs[\"kvm\"] = c\n- case nonExclusiveFS:\n- c.FileAccess = config.FileAccessShared\n- cs[\"non-exclusive\"] = c\n- default:\n- panic(fmt.Sprintf(\"unknown config option %v\", o))\n- }\n+ c.VFS2 = true\n+ cs[p+\"-overlay\"] = c\n}\n- return cs\n}\n-// configs generates different configurations to run tests.\n-//\n-// TODO(gvisor.dev/issue/1624): Remove VFS1 dimension.\n-func configs(t *testing.T, opts ...configOption) map[string]*config.Config {\n- all := configsHelper(t, opts...)\n- for key, value := range configsHelper(t, opts...) {\n- value.VFS2 = true\n- all[key+\"VFS2\"] = value\n+ // FIXME(b/148134013): Delete with VFS1.\n+ for _, p := range ps {\n+ c := testutil.TestConfig(t)\n+ c.Platform = p\n+ c.VFS2 = false\n+ cs[p+\"-vfs1\"] = c\n}\n- return all\n+\n+ return cs\n}\n// sleepSpec generates a spec with sleep 1000 and a conf.\n@@ -456,7 +443,7 @@ func TestLifecycle(t *testing.T) {\nchildReaper.Start()\ndefer childReaper.Stop()\n- for name, conf := range configs(t, all...) {\n+ for name, conf := range configs(t, false /* noOverlay */) {\nt.Run(name, func(t *testing.T) {\n// The container will just sleep for a long time. We will kill it before\n// it finishes sleeping.\n@@ -629,7 +616,7 @@ func TestExePath(t *testing.T) {\nt.Fatalf(\"error making directory: %v\", err)\n}\n- for name, conf := range configs(t, all...) {\n+ for name, conf := range configs(t, false /* noOverlay */) {\nt.Run(name, func(t *testing.T) {\nfor _, test := range []struct {\npath string\n@@ -754,7 +741,7 @@ func doAppExitStatus(t *testing.T, vfs2 bool) {\n// TestExec verifies that a container can exec a new program.\nfunc TestExec(t *testing.T) {\n- for name, conf := range configs(t, all...) {\n+ for name, conf := range configs(t, false /* noOverlay */) {\nt.Run(name, func(t *testing.T) {\ndir, err := ioutil.TempDir(testutil.TmpDir(), \"exec-test\")\nif err != nil {\n@@ -905,7 +892,7 @@ func TestExec(t *testing.T) {\n// TestExecProcList verifies that a container can exec a new program and it\n// shows correcly in the process list.\nfunc TestExecProcList(t *testing.T) {\n- for name, conf := range configs(t, all...) {\n+ for name, conf := range configs(t, false /* noOverlay */) {\nt.Run(name, func(t *testing.T) {\nconst uid = 343\nspec, _ := sleepSpecConf(t)\n@@ -976,7 +963,7 @@ func TestExecProcList(t *testing.T) {\n// TestKillPid verifies that we can signal individual exec'd processes.\nfunc TestKillPid(t *testing.T) {\n- for name, conf := range configs(t, all...) {\n+ for name, conf := range configs(t, false /* noOverlay */) {\nt.Run(name, func(t *testing.T) {\napp, err := testutil.FindFile(\"test/cmd/test_app/test_app\")\nif err != nil {\n@@ -1054,7 +1041,7 @@ func TestKillPid(t *testing.T) {\n// number after the last number from the checkpointed container.\nfunc TestCheckpointRestore(t *testing.T) {\n// Skip overlay because test requires writing to host file.\n- for name, conf := range configs(t, noOverlay...) {\n+ for name, conf := range configs(t, true /* noOverlay */) {\nt.Run(name, func(t *testing.T) {\ndir, err := ioutil.TempDir(testutil.TmpDir(), \"checkpoint-test\")\nif err != nil {\n@@ -1215,7 +1202,7 @@ func TestCheckpointRestore(t *testing.T) {\n// with filesystem Unix Domain Socket use.\nfunc TestUnixDomainSockets(t *testing.T) {\n// Skip overlay because test requires writing to host file.\n- for name, conf := range configs(t, noOverlay...) {\n+ for name, conf := range configs(t, true /* noOverlay */) {\nt.Run(name, func(t *testing.T) {\n// UDS path is limited to 108 chars for compatibility with older systems.\n// Use '/tmp' (instead of testutil.TmpDir) to ensure the size limit is\n@@ -1352,7 +1339,7 @@ func TestUnixDomainSockets(t *testing.T) {\n// recreated. Then it resumes the container, verify that the file gets created\n// again.\nfunc TestPauseResume(t *testing.T) {\n- for name, conf := range configs(t, noOverlay...) {\n+ for name, conf := range configs(t, true /* noOverlay */) {\nt.Run(name, func(t *testing.T) {\ntmpDir, err := ioutil.TempDir(testutil.TmpDir(), \"lock\")\nif err != nil {\n@@ -1494,7 +1481,7 @@ func TestCapabilities(t *testing.T) {\nuid := auth.KUID(os.Getuid() + 1)\ngid := auth.KGID(os.Getgid() + 1)\n- for name, conf := range configs(t, all...) {\n+ for name, conf := range configs(t, false /* noOverlay */) {\nt.Run(name, func(t *testing.T) {\nspec, _ := sleepSpecConf(t)\nrootDir, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)\n@@ -1569,7 +1556,7 @@ func TestCapabilities(t *testing.T) {\n// TestRunNonRoot checks that sandbox can be configured when running as\n// non-privileged user.\nfunc TestRunNonRoot(t *testing.T) {\n- for name, conf := range configs(t, noOverlay...) {\n+ for name, conf := range configs(t, true /* noOverlay */) {\nt.Run(name, func(t *testing.T) {\nspec := testutil.NewSpecWithArgs(\"/bin/true\")\n@@ -1613,7 +1600,7 @@ func TestRunNonRoot(t *testing.T) {\n// TestMountNewDir checks that runsc will create destination directory if it\n// doesn't exit.\nfunc TestMountNewDir(t *testing.T) {\n- for name, conf := range configs(t, all...) {\n+ for name, conf := range configs(t, false /* noOverlay */) {\nt.Run(name, func(t *testing.T) {\nroot, err := ioutil.TempDir(testutil.TmpDir(), \"root\")\nif err != nil {\n@@ -1644,7 +1631,7 @@ func TestMountNewDir(t *testing.T) {\n}\nfunc TestReadonlyRoot(t *testing.T) {\n- for name, conf := range configs(t, all...) {\n+ for name, conf := range configs(t, false /* noOverlay */) {\nt.Run(name, func(t *testing.T) {\nspec, _ := sleepSpecConf(t)\nspec.Root.Readonly = true\n@@ -1692,7 +1679,7 @@ func TestReadonlyRoot(t *testing.T) {\n}\nfunc TestReadonlyMount(t *testing.T) {\n- for name, conf := range configs(t, all...) {\n+ for name, conf := range configs(t, false /* noOverlay */) {\nt.Run(name, func(t *testing.T) {\ndir, err := ioutil.TempDir(testutil.TmpDir(), \"ro-mount\")\nif err != nil {\n@@ -1751,7 +1738,7 @@ func TestReadonlyMount(t *testing.T) {\n}\nfunc TestUIDMap(t *testing.T) {\n- for name, conf := range configs(t, noOverlay...) {\n+ for name, conf := range configs(t, true /* noOverlay */) {\nt.Run(name, func(t *testing.T) {\ntestDir, err := ioutil.TempDir(testutil.TmpDir(), \"test-mount\")\nif err != nil {\n@@ -2039,7 +2026,7 @@ func TestUserLog(t *testing.T) {\n}\nfunc TestWaitOnExitedSandbox(t *testing.T) {\n- for name, conf := range configs(t, all...) {\n+ for name, conf := range configs(t, false /* noOverlay */) {\nt.Run(name, func(t *testing.T) {\n// Run a shell that sleeps for 1 second and then exits with a\n// non-zero code.\n@@ -2181,7 +2168,7 @@ func doDestroyStartingTest(t *testing.T, vfs2 bool) {\n}\nfunc TestCreateWorkingDir(t *testing.T) {\n- for name, conf := range configs(t, all...) {\n+ for name, conf := range configs(t, false /* noOverlay */) {\nt.Run(name, func(t *testing.T) {\ntmpDir, err := ioutil.TempDir(testutil.TmpDir(), \"cwd-create\")\nif err != nil {\n@@ -2295,7 +2282,7 @@ func TestMountPropagation(t *testing.T) {\n}\nfunc TestMountSymlink(t *testing.T) {\n- for name, conf := range configs(t, all...) {\n+ for name, conf := range configs(t, false /* noOverlay */) {\nt.Run(name, func(t *testing.T) {\ndir, err := ioutil.TempDir(testutil.TmpDir(), \"mount-symlink\")\nif err != nil {\n@@ -2518,7 +2505,7 @@ func TestCreateWithCorruptedStateFile(t *testing.T) {\n}\nfunc TestBindMountByOption(t *testing.T) {\n- for name, conf := range configs(t, all...) {\n+ for name, conf := range configs(t, false /* noOverlay */) {\nt.Run(name, func(t *testing.T) {\ndir, err := ioutil.TempDir(testutil.TmpDir(), \"bind-mount\")\nspec := testutil.NewSpecWithArgs(\"/bin/touch\", path.Join(dir, \"file\"))\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/multi_container_test.go",
"new_path": "runsc/container/multi_container_test.go",
"diff": "@@ -137,7 +137,7 @@ func createSharedMount(mount specs.Mount, name string, pod ...*specs.Spec) {\n// TestMultiContainerSanity checks that it is possible to run 2 dead-simple\n// containers in the same sandbox.\nfunc TestMultiContainerSanity(t *testing.T) {\n- for name, conf := range configs(t, all...) {\n+ for name, conf := range configs(t, false /* noOverlay */) {\nt.Run(name, func(t *testing.T) {\nrootDir, cleanup, err := testutil.SetupRootDir()\nif err != nil {\n@@ -175,7 +175,7 @@ func TestMultiContainerSanity(t *testing.T) {\n// TestMultiPIDNS checks that it is possible to run 2 dead-simple containers in\n// the same sandbox with different pidns.\nfunc TestMultiPIDNS(t *testing.T) {\n- for name, conf := range configs(t, all...) {\n+ for name, conf := range configs(t, false /* noOverlay */) {\nt.Run(name, func(t *testing.T) {\nrootDir, cleanup, err := testutil.SetupRootDir()\nif err != nil {\n@@ -247,7 +247,7 @@ func TestMultiPIDNS(t *testing.T) {\n// TestMultiPIDNSPath checks the pidns path.\nfunc TestMultiPIDNSPath(t *testing.T) {\n- for name, conf := range configs(t, all...) {\n+ for name, conf := range configs(t, false /* noOverlay */) {\nt.Run(name, func(t *testing.T) {\nrootDir, cleanup, err := testutil.SetupRootDir()\nif err != nil {\n@@ -365,7 +365,7 @@ func TestMultiPIDNSKill(t *testing.T) {\nt.Fatal(\"error finding test_app:\", err)\n}\n- for name, conf := range configs(t, all...) {\n+ for name, conf := range configs(t, false /* noOverlay */) {\nt.Run(name, func(t *testing.T) {\nrootDir, cleanup, err := testutil.SetupRootDir()\nif err != nil {\n@@ -619,7 +619,7 @@ func TestMultiContainerMount(t *testing.T) {\n// TestMultiContainerSignal checks that it is possible to signal individual\n// containers without killing the entire sandbox.\nfunc TestMultiContainerSignal(t *testing.T) {\n- for name, conf := range configs(t, all...) {\n+ for name, conf := range configs(t, false /* noOverlay */) {\nt.Run(name, func(t *testing.T) {\nrootDir, cleanup, err := testutil.SetupRootDir()\nif err != nil {\n@@ -719,7 +719,7 @@ func TestMultiContainerDestroy(t *testing.T) {\nt.Fatal(\"error finding test_app:\", err)\n}\n- for name, conf := range configs(t, all...) {\n+ for name, conf := range configs(t, false /* noOverlay */) {\nt.Run(name, func(t *testing.T) {\nrootDir, cleanup, err := testutil.SetupRootDir()\nif err != nil {\n@@ -1216,7 +1216,7 @@ func TestMultiContainerContainerDestroyStress(t *testing.T) {\n// Test that pod shared mounts are properly mounted in 2 containers and that\n// changes from one container is reflected in the other.\nfunc TestMultiContainerSharedMount(t *testing.T) {\n- for name, conf := range configs(t, all...) {\n+ for name, conf := range configs(t, false /* noOverlay */) {\nt.Run(name, func(t *testing.T) {\nrootDir, cleanup, err := testutil.SetupRootDir()\nif err != nil {\n@@ -1329,7 +1329,7 @@ func TestMultiContainerSharedMount(t *testing.T) {\n// Test that pod mounts are mounted as readonly when requested.\nfunc TestMultiContainerSharedMountReadonly(t *testing.T) {\n- for name, conf := range configs(t, all...) {\n+ for name, conf := range configs(t, false /* noOverlay */) {\nt.Run(name, func(t *testing.T) {\nrootDir, cleanup, err := testutil.SetupRootDir()\nif err != nil {\n@@ -1463,7 +1463,7 @@ func TestMultiContainerSharedMountCompatible(t *testing.T) {\n// Test that shared pod mounts continue to work after container is restarted.\nfunc TestMultiContainerSharedMountRestart(t *testing.T) {\n- for name, conf := range configs(t, all...) {\n+ for name, conf := range configs(t, false /* noOverlay */) {\nt.Run(name, func(t *testing.T) {\nrootDir, cleanup, err := testutil.SetupRootDir()\nif err != nil {\n@@ -1572,7 +1572,7 @@ func TestMultiContainerSharedMountRestart(t *testing.T) {\n// Test that unsupported pod mounts options are ignored when matching master and\n// replica mounts.\nfunc TestMultiContainerSharedMountUnsupportedOptions(t *testing.T) {\n- for name, conf := range configs(t, all...) {\n+ for name, conf := range configs(t, false /* noOverlay */) {\nt.Run(name, func(t *testing.T) {\nrootDir, cleanup, err := testutil.SetupRootDir()\nif err != nil {\n@@ -1907,7 +1907,7 @@ func TestMultiContainerRunNonRoot(t *testing.T) {\nfunc TestMultiContainerHomeEnvDir(t *testing.T) {\n// NOTE: Don't use overlay since we need changes to persist to the temp dir\n// outside the sandbox.\n- for testName, conf := range configs(t, noOverlay...) {\n+ for testName, conf := range configs(t, true /* noOverlay */) {\nt.Run(testName, func(t *testing.T) {\nrootDir, cleanup, err := testutil.SetupRootDir()\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/BUILD",
"new_path": "runsc/sandbox/BUILD",
"diff": "@@ -28,7 +28,6 @@ go_library(\n\"//pkg/unet\",\n\"//pkg/urpc\",\n\"//runsc/boot\",\n- \"//runsc/boot/platforms\",\n\"//runsc/cgroup\",\n\"//runsc/config\",\n\"//runsc/console\",\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -45,7 +45,6 @@ import (\n\"gvisor.dev/gvisor/pkg/unet\"\n\"gvisor.dev/gvisor/pkg/urpc\"\n\"gvisor.dev/gvisor/runsc/boot\"\n- \"gvisor.dev/gvisor/runsc/boot/platforms\"\n\"gvisor.dev/gvisor/runsc/cgroup\"\n\"gvisor.dev/gvisor/runsc/config\"\n\"gvisor.dev/gvisor/runsc/console\"\n@@ -1426,7 +1425,7 @@ func deviceFileForPlatform(name string) (*os.File, error) {\nfunc checkBinaryPermissions(conf *config.Config) error {\n// All platforms need the other exe bit\nneededBits := os.FileMode(0001)\n- if conf.Platform == platforms.Ptrace {\n+ if conf.Platform == \"ptrace\" {\n// Ptrace needs the other read bit\nneededBits |= os.FileMode(0004)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/BUILD",
"new_path": "tools/BUILD",
"diff": "-load(\"//tools:defs.bzl\", \"bzl_library\")\n+load(\"//tools:defs.bzl\", \"bzl_library\", \"gotsan_flag_values\", \"gotsan_values\")\npackage(licenses = [\"notice\"])\n+config_setting(\n+ name = \"debug\",\n+ values = {\"compilation_mode\": \"dbg\"},\n+ visibility = [\n+ \"//:sandbox\",\n+ ],\n+)\n+\n+config_setting(\n+ name = \"gotsan\",\n+ flag_values = gotsan_flag_values,\n+ values = gotsan_values,\n+ visibility = [\n+ \"//:sandbox\",\n+ ],\n+)\n+\nbzl_library(\nname = \"defs_bzl\",\nsrcs = [\"defs.bzl\"],\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/bazeldefs/go.bzl",
"new_path": "tools/bazeldefs/go.bzl",
"diff": "@@ -57,9 +57,14 @@ def go_binary(name, static = False, pure = False, x_defs = None, system_malloc =\nkwargs[\"static\"] = \"on\"\nif pure:\nkwargs[\"pure\"] = \"on\"\n+ gc_goopts = select({\n+ \"//conditions:default\": kwargs.pop(\"gc_goopts\", []),\n+ \"//tools:debug\": kwargs.pop(\"gc_goopts\", []) + [\"-all=-N -l\"],\n+ })\n_go_binary(\nname = name,\nx_defs = x_defs,\n+ gc_goopts = gc_goopts,\n**kwargs\n)\n@@ -154,3 +159,7 @@ def select_goos():\nlinux = \"linux\",\ndarwin = \"darwin\",\n)\n+\n+# Defined by rules_go.\n+gotsan_values = None\n+gotsan_flag_values = {\"@io_bazel_rules_go//go/config:race\": \"true\"}\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/defs.bzl",
"new_path": "tools/defs.bzl",
"diff": "@@ -10,7 +10,7 @@ load(\"//tools/go_marshal:defs.bzl\", \"go_marshal\", \"marshal_deps\", \"marshal_test_\nload(\"//tools/nogo:defs.bzl\", \"nogo_test\")\nload(\"//tools/bazeldefs:defs.bzl\", _amd64_config = \"amd64_config\", _arch_config = \"arch_config\", _arm64_config = \"arm64_config\", _build_test = \"build_test\", _bzl_library = \"bzl_library\", _coreutil = \"coreutil\", _default_net_util = \"default_net_util\", _more_shards = \"more_shards\", _most_shards = \"most_shards\", _proto_library = \"proto_library\", _select_arch = \"select_arch\", _select_system = \"select_system\", _short_path = \"short_path\", _transition_allowlist = \"transition_allowlist\", _version = \"version\")\nload(\"//tools/bazeldefs:cc.bzl\", _cc_binary = \"cc_binary\", _cc_flags_supplier = \"cc_flags_supplier\", _cc_grpc_library = \"cc_grpc_library\", _cc_library = \"cc_library\", _cc_proto_library = \"cc_proto_library\", _cc_test = \"cc_test\", _cc_toolchain = \"cc_toolchain\", _gbenchmark = \"gbenchmark\", _gbenchmark_internal = \"gbenchmark_internal\", _grpcpp = \"grpcpp\", _gtest = \"gtest\", _vdso_linker_option = \"vdso_linker_option\")\n-load(\"//tools/bazeldefs:go.bzl\", _gazelle = \"gazelle\", _go_binary = \"go_binary\", _go_embed_data = \"go_embed_data\", _go_grpc_and_proto_libraries = \"go_grpc_and_proto_libraries\", _go_library = \"go_library\", _go_path = \"go_path\", _go_proto_library = \"go_proto_library\", _go_rule = \"go_rule\", _go_test = \"go_test\", _select_goarch = \"select_goarch\", _select_goos = \"select_goos\")\n+load(\"//tools/bazeldefs:go.bzl\", _gazelle = \"gazelle\", _go_binary = \"go_binary\", _go_embed_data = \"go_embed_data\", _go_grpc_and_proto_libraries = \"go_grpc_and_proto_libraries\", _go_library = \"go_library\", _go_path = \"go_path\", _go_proto_library = \"go_proto_library\", _go_rule = \"go_rule\", _go_test = \"go_test\", _gotsan_flag_values = \"gotsan_flag_values\", _gotsan_values = \"gotsan_values\", _select_goarch = \"select_goarch\", _select_goos = \"select_goos\")\nload(\"//tools/bazeldefs:pkg.bzl\", _pkg_deb = \"pkg_deb\", _pkg_tar = \"pkg_tar\")\nload(\"//tools/bazeldefs:platforms.bzl\", _default_platform = \"default_platform\", _platforms = \"platforms\")\nload(\"//tools/bazeldefs:tags.bzl\", \"go_suffixes\")\n@@ -47,6 +47,8 @@ select_goos = _select_goos\nselect_goarch = _select_goarch\ngo_embed_data = _go_embed_data\ngo_proto_library = _go_proto_library\n+gotsan_values = _gotsan_values\n+gotsan_flag_values = _gotsan_flag_values\n# Packaging rules.\npkg_deb = _pkg_deb\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix build configurations.
This standardizes some build configurations and simplifies the structure
of packages intended to avoid fighting with automated formatting tools.
Updates #6938
PiperOrigin-RevId: 426495825 |
259,858 | 04.02.2022 22:38:53 | 28,800 | 6c2d9a2359e30c2c5316bc21903f07f52c122872 | Allow disabling nogo stdlib facts.
Rebuilding the set of standard library facts can be time consuming. Allow this
to be disabled for faster iteration. This is done by provided a build setting
which can be toggled directly, for example:
bazel build --//tools/nogo:fast ... | [
{
"change_type": "MODIFY",
"old_path": "tools/bazeldefs/defs.bzl",
"new_path": "tools/bazeldefs/defs.bzl",
"diff": "load(\"@bazel_skylib//rules:build_test.bzl\", _build_test = \"build_test\")\nload(\"@bazel_skylib//:bzl_library.bzl\", _bzl_library = \"bzl_library\")\n+load(\"@bazel_skylib//rules:common_settings.bzl\", _BuildSettingInfo = \"BuildSettingInfo\", _bool_flag = \"bool_flag\")\nbuild_test = _build_test\nbzl_library = _bzl_library\n+bool_flag = _bool_flag\n+BuildSettingInfo = _BuildSettingInfo\nmore_shards = 4\nmost_shards = 8\nversion = \"//tools/bazeldefs:version\"\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/defs.bzl",
"new_path": "tools/defs.bzl",
"diff": "@@ -8,7 +8,7 @@ change for Google-internal and bazel-compatible rules.\nload(\"//tools/go_stateify:defs.bzl\", \"go_stateify\")\nload(\"//tools/go_marshal:defs.bzl\", \"go_marshal\", \"marshal_deps\", \"marshal_test_deps\")\nload(\"//tools/nogo:defs.bzl\", \"nogo_test\")\n-load(\"//tools/bazeldefs:defs.bzl\", _amd64_config = \"amd64_config\", _arch_config = \"arch_config\", _arm64_config = \"arm64_config\", _build_test = \"build_test\", _bzl_library = \"bzl_library\", _coreutil = \"coreutil\", _default_net_util = \"default_net_util\", _more_shards = \"more_shards\", _most_shards = \"most_shards\", _proto_library = \"proto_library\", _select_arch = \"select_arch\", _select_system = \"select_system\", _short_path = \"short_path\", _transition_allowlist = \"transition_allowlist\", _version = \"version\")\n+load(\"//tools/bazeldefs:defs.bzl\", _BuildSettingInfo = \"BuildSettingInfo\", _amd64_config = \"amd64_config\", _arch_config = \"arch_config\", _arm64_config = \"arm64_config\", _bool_flag = \"bool_flag\", _build_test = \"build_test\", _bzl_library = \"bzl_library\", _coreutil = \"coreutil\", _default_net_util = \"default_net_util\", _more_shards = \"more_shards\", _most_shards = \"most_shards\", _proto_library = \"proto_library\", _select_arch = \"select_arch\", _select_system = \"select_system\", _short_path = \"short_path\", _transition_allowlist = \"transition_allowlist\", _version = \"version\")\nload(\"//tools/bazeldefs:cc.bzl\", _cc_binary = \"cc_binary\", _cc_flags_supplier = \"cc_flags_supplier\", _cc_grpc_library = \"cc_grpc_library\", _cc_library = \"cc_library\", _cc_proto_library = \"cc_proto_library\", _cc_test = \"cc_test\", _cc_toolchain = \"cc_toolchain\", _gbenchmark = \"gbenchmark\", _gbenchmark_internal = \"gbenchmark_internal\", _grpcpp = \"grpcpp\", _gtest = \"gtest\", _vdso_linker_option = \"vdso_linker_option\")\nload(\"//tools/bazeldefs:go.bzl\", _gazelle = \"gazelle\", _go_binary = \"go_binary\", _go_embed_data = \"go_embed_data\", _go_grpc_and_proto_libraries = \"go_grpc_and_proto_libraries\", _go_library = \"go_library\", _go_path = \"go_path\", _go_proto_library = \"go_proto_library\", _go_rule = \"go_rule\", _go_test = \"go_test\", _gotsan_flag_values = \"gotsan_flag_values\", _gotsan_values = \"gotsan_values\", _select_goarch = \"select_goarch\", _select_goos = \"select_goos\")\nload(\"//tools/bazeldefs:pkg.bzl\", _pkg_deb = \"pkg_deb\", _pkg_tar = \"pkg_tar\")\n@@ -18,6 +18,8 @@ load(\"//tools/bazeldefs:tags.bzl\", \"go_suffixes\")\n# Core rules.\nbuild_test = _build_test\nbzl_library = _bzl_library\n+bool_flag = _bool_flag\n+BuildSettingInfo = _BuildSettingInfo\ndefault_net_util = _default_net_util\nselect_arch = _select_arch\nselect_system = _select_system\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/nogo/BUILD",
"new_path": "tools/nogo/BUILD",
"diff": "-load(\"//tools:defs.bzl\", \"bzl_library\", \"go_binary\", \"select_goarch\", \"select_goos\")\n+load(\"//tools:defs.bzl\", \"bool_flag\", \"bzl_library\", \"go_binary\", \"select_goarch\", \"select_goos\")\nload(\"//tools/nogo:defs.bzl\", \"nogo_stdlib\", \"nogo_target\")\npackage(licenses = [\"notice\"])\n+bool_flag(\n+ name = \"fast\",\n+ build_setting_default = False,\n+)\n+\nnogo_target(\nname = \"target\",\ngoarch = select_goarch(),\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/nogo/defs.bzl",
"new_path": "tools/nogo/defs.bzl",
"diff": "\"\"\"Nogo rules.\"\"\"\n+load(\"//tools/bazeldefs:defs.bzl\", \"BuildSettingInfo\")\nload(\"//tools/bazeldefs:go.bzl\", \"go_context\", \"go_embed_libraries\", \"go_importpath\", \"go_rule\")\nNogoConfigInfo = provider(\n@@ -63,6 +64,13 @@ NogoStdlibInfo = provider(\n)\ndef _nogo_stdlib_impl(ctx):\n+ # If this is disabled, return nothing.\n+ if ctx.attr._fast[BuildSettingInfo].value:\n+ return [NogoStdlibInfo(\n+ facts = None,\n+ raw_findings = [],\n+ )]\n+\n# Build the configuration for the stdlib.\ngo_ctx, args, inputs, raw_findings = _nogo_config(ctx, deps = [])\n@@ -109,12 +117,16 @@ nogo_stdlib = go_rule(\nattrs = {\n\"_nogo\": attr.label(\ndefault = \"//tools/nogo:nogo\",\n- cfg = \"exec\",\n+ cfg = \"host\",\n),\n\"_target\": attr.label(\ndefault = \"//tools/nogo:target\",\ncfg = \"target\",\n),\n+ \"_fast\": attr.label(\n+ default = \"//tools/nogo:fast\",\n+ cfg = \"host\",\n+ ),\n},\n)\n@@ -216,6 +228,7 @@ def _nogo_package_config(ctx, deps, importpath = None, target = None):\n# Add the standard library facts.\nstdlib_info = ctx.attr._nogo_stdlib[NogoStdlibInfo]\nstdlib_facts = stdlib_info.facts\n+ if stdlib_facts:\ninputs.append(stdlib_facts)\nargs.append(\"-bundle=%s\" % stdlib_facts.path)\n@@ -314,7 +327,7 @@ nogo_aspect = go_rule(\nattrs = {\n\"_nogo\": attr.label(\ndefault = \"//tools/nogo:nogo\",\n- cfg = \"exec\",\n+ cfg = \"host\",\n),\n\"_target\": attr.label(\ndefault = \"//tools/nogo:target\",\n@@ -400,7 +413,7 @@ nogo_test = rule(\n),\n\"_nogo\": attr.label(\ndefault = \"//tools/nogo:nogo\",\n- cfg = \"exec\",\n+ cfg = \"host\",\n),\n\"_target\": attr.label(\ndefault = \"//tools/nogo:target\",\n@@ -478,7 +491,7 @@ nogo_facts = go_rule(\n),\n\"_nogo\": attr.label(\ndefault = \"//tools/nogo:nogo\",\n- cfg = \"exec\",\n+ cfg = \"host\",\n),\n# See _nogo_aspect, above.\n\"_nogo_stdlib\": attr.label(\n"
}
] | Go | Apache License 2.0 | google/gvisor | Allow disabling nogo stdlib facts.
Rebuilding the set of standard library facts can be time consuming. Allow this
to be disabled for faster iteration. This is done by provided a build setting
which can be toggled directly, for example:
bazel build --//tools/nogo:fast ...
PiperOrigin-RevId: 426562548 |
259,853 | 08.02.2022 17:24:47 | 28,800 | a15231dabe4702f166529f5e360358562caeeebc | kvm: fix CPU offsets for arm64
We have to calculate offsets in the CPU structure,
but we calculate offsets in the CPUArchState structure.
Fixes: ("Refactor nogo and provide facts render.") | [
{
"change_type": "MODIFY",
"old_path": "pkg/ring0/entry_arm64.s",
"new_path": "pkg/ring0/entry_arm64.s",
"diff": "#include \"funcdata.h\"\n#include \"textflag.h\"\n-{{ with .CPU }}\n-#define CPU_SELF {{ .self.Offset }}\n-#define CPU_REGISTERS {{ .registers.Offset }}\n-{{ end }}\n-{{ with .CPUArchState }}\n-#define CPU_STACK_TOP ({{ .stack.Offset }} + {{ .stack.Size }})\n-#define CPU_ERROR_CODE {{ .errorCode.Offset }}\n-#define CPU_ERROR_TYPE {{ .errorType.Offset }}\n-#define CPU_FAULT_ADDR {{ .faultAddr.Offset }}\n-#define CPU_FPSTATE_EL0 {{ .el0Fp.Offset }}\n-#define CPU_TTBR0_KVM {{ .ttbr0Kvm.Offset }}\n-#define CPU_TTBR0_APP {{ .ttbr0App.Offset }}\n-#define CPU_VECTOR_CODE {{ .vecCode.Offset }}\n-#define CPU_APP_ADDR {{ .appAddr.Offset }}\n-#define CPU_LAZY_VFP {{ .lazyVFP.Offset }}\n-#define CPU_APP_ASID {{ .appASID.Offset }}\n-{{ end }}\n+#define CPU_SELF {{ .CPU.self.Offset }}\n+#define CPU_REGISTERS {{ .CPU.registers.Offset }}\n+#define CPU_STACK_TOP ({{ .CPU.CPUArchState.Offset }}+{{ .CPUArchState.stack.Offset }} + {{ .CPUArchState.stack.Size }})\n+#define CPU_ERROR_CODE ({{ .CPU.CPUArchState.Offset }}+{{ .CPUArchState.errorCode.Offset }})\n+#define CPU_ERROR_TYPE ({{ .CPU.CPUArchState.Offset }}+{{ .CPUArchState.errorType.Offset }})\n+#define CPU_FAULT_ADDR ({{ .CPU.CPUArchState.Offset }}+{{ .CPUArchState.faultAddr.Offset }})\n+#define CPU_FPSTATE_EL0 ({{ .CPU.CPUArchState.Offset }}+{{ .CPUArchState.el0Fp.Offset }})\n+#define CPU_TTBR0_KVM ({{ .CPU.CPUArchState.Offset }}+{{ .CPUArchState.ttbr0Kvm.Offset }})\n+#define CPU_TTBR0_APP ({{ .CPU.CPUArchState.Offset }}+{{ .CPUArchState.ttbr0App.Offset }})\n+#define CPU_VECTOR_CODE ({{ .CPU.CPUArchState.Offset }}+{{ .CPUArchState.vecCode.Offset }})\n+#define CPU_APP_ADDR ({{ .CPU.CPUArchState.Offset }}+{{ .CPUArchState.appAddr.Offset }})\n+#define CPU_LAZY_VFP ({{ .CPU.CPUArchState.Offset }}+{{ .CPUArchState.lazyVFP.Offset }})\n+#define CPU_APP_ASID ({{ .CPU.CPUArchState.Offset }}+{{ .CPUArchState.appASID.Offset }})\n// Bits.\n#define _KERNEL_FLAGS {{ .KernelFlagsSet.Value }}\n"
}
] | Go | Apache License 2.0 | google/gvisor | kvm: fix CPU offsets for arm64
We have to calculate offsets in the CPU structure,
but we calculate offsets in the CPUArchState structure.
Fixes: 0e492a2b5e6d ("Refactor nogo and provide facts render.")
PiperOrigin-RevId: 427336023 |
259,853 | 09.02.2022 16:26:39 | 28,800 | cd8ba85c624d3e10b63685b4ea841525c501179c | kvm/arm64: exclude PROT_NONE mappings from the read-only list
Go-runtime uses PROT_NONE mappings as place holders for future read-write
mappings.
Accese mode has to be a property of physicalRegion. It makes code cleaner. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/address_space.go",
"new_path": "pkg/sentry/platform/kvm/address_space.go",
"diff": "@@ -123,7 +123,7 @@ func (as *addressSpace) mapLocked(addr hostarch.Addr, m hostMapEntry, at hostarc\n// not have physical mappings, the KVM module may inject\n// spurious exceptions when emulation fails (i.e. it tries to\n// emulate because the RIP is pointed at those pages).\n- as.machine.mapPhysical(physical, length, physicalRegions, _KVM_MEM_FLAGS_NONE)\n+ as.machine.mapPhysical(physical, length, physicalRegions)\n// Install the page table mappings. Note that the ordering is\n// important; if the pagetable mappings were installed before\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/bluepill_allocator.go",
"new_path": "pkg/sentry/platform/kvm/bluepill_allocator.go",
"diff": "@@ -73,8 +73,8 @@ func (a *allocator) PhysicalFor(ptes *pagetables.PTEs) uintptr {\n//\n//go:nosplit\nfunc (a *allocator) LookupPTEs(physical uintptr) *pagetables.PTEs {\n- virtualStart, physicalStart, _, ok := calculateBluepillFault(physical, physicalRegions)\n- if !ok {\n+ virtualStart, physicalStart, _, pr := calculateBluepillFault(physical, physicalRegions)\n+ if pr == nil {\npanic(fmt.Sprintf(\"LookupPTEs failed for 0x%x\", physical)) // escapes: panic.\n}\nreturn a.base.LookupPTEs(virtualStart + (physical - physicalStart))\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/bluepill_fault.go",
"new_path": "pkg/sentry/platform/kvm/bluepill_fault.go",
"diff": "@@ -46,9 +46,9 @@ func yield() {\n// calculateBluepillFault calculates the fault address range.\n//\n//go:nosplit\n-func calculateBluepillFault(physical uintptr, phyRegions []physicalRegion) (virtualStart, physicalStart, length uintptr, ok bool) {\n+func calculateBluepillFault(physical uintptr, phyRegions []physicalRegion) (virtualStart, physicalStart, length uintptr, pr *physicalRegion) {\nalignedPhysical := physical &^ uintptr(hostarch.PageSize-1)\n- for _, pr := range phyRegions {\n+ for i, pr := range phyRegions {\nend := pr.physical + pr.length\nif physical < pr.physical || physical >= end {\ncontinue\n@@ -62,10 +62,10 @@ func calculateBluepillFault(physical uintptr, phyRegions []physicalRegion) (virt\nphysicalEnd = end\n}\nlength = physicalEnd - physicalStart\n- return virtualStart, physicalStart, length, true\n+ return virtualStart, physicalStart, length, &phyRegions[i]\n}\n- return 0, 0, 0, false\n+ return 0, 0, 0, nil\n}\n// handleBluepillFault handles a physical fault.\n@@ -73,13 +73,13 @@ func calculateBluepillFault(physical uintptr, phyRegions []physicalRegion) (virt\n// The corresponding virtual address is returned. This may throw on error.\n//\n//go:nosplit\n-func handleBluepillFault(m *machine, physical uintptr, phyRegions []physicalRegion, flags uint32) (uintptr, bool) {\n+func handleBluepillFault(m *machine, physical uintptr, phyRegions []physicalRegion) (uintptr, bool) {\n// Paging fault: we need to map the underlying physical pages for this\n// fault. This all has to be done in this function because we're in a\n// signal handler context. (We can't call any functions that might\n// split the stack.)\n- virtualStart, physicalStart, length, ok := calculateBluepillFault(physical, phyRegions)\n- if !ok {\n+ virtualStart, physicalStart, length, pr := calculateBluepillFault(physical, phyRegions)\n+ if pr == nil {\nreturn 0, false\n}\n@@ -92,6 +92,10 @@ func handleBluepillFault(m *machine, physical uintptr, phyRegions []physicalRegi\nyield() // Race with another call.\nslot = atomic.SwapUint32(&m.nextSlot, ^uint32(0))\n}\n+ flags := _KVM_MEM_FLAGS_NONE\n+ if pr.readOnly {\n+ flags |= _KVM_MEM_READONLY\n+ }\nerrno := m.setMemoryRegion(int(slot), physicalStart, length, virtualStart, flags)\nif errno == 0 {\n// Store the physical address in the slot. This is used to\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/kvm_const.go",
"new_path": "pkg/sentry/platform/kvm/kvm_const.go",
"diff": "@@ -81,7 +81,7 @@ const (\nconst (\n_KVM_MEM_LOG_DIRTY_PAGES = uint32(1) << 0\n_KVM_MEM_READONLY = uint32(1) << 1\n- _KVM_MEM_FLAGS_NONE = 0\n+ _KVM_MEM_FLAGS_NONE = uint32(0)\n)\n// KVM hypercall list.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/machine.go",
"new_path": "pkg/sentry/platform/kvm/machine.go",
"diff": "@@ -196,10 +196,6 @@ func (m *machine) createVCPU(id int) *vCPU {\nreturn c // Done.\n}\n-// readOnlyGuestRegions contains regions that have to be mapped read-only into\n-// the guest physical address space. Right now, it is used on arm64 only.\n-var readOnlyGuestRegions []virtualRegion\n-\n// newMachine returns a new VM context.\nfunc newMachine(vm int) (*machine, error) {\n// Create the machine.\n@@ -282,24 +278,15 @@ func newMachine(vm int) (*machine, error) {\n}\n// Ensure the physical range is mapped.\n- m.mapPhysical(physical, length, physicalRegions, flags)\n+ m.mapPhysical(physical, length, physicalRegions)\nvirtual += length\n}\n}\n- for _, vr := range readOnlyGuestRegions {\n- mapRegion(vr, _KVM_MEM_READONLY)\n- }\n-\napplyVirtualRegions(func(vr virtualRegion) {\nif excludeVirtualRegion(vr) {\nreturn // skip region.\n}\n- for _, r := range readOnlyGuestRegions {\n- if vr.virtual == r.virtual {\n- return\n- }\n- }\n// Take into account that the stack can grow down.\nif vr.filename == \"[stack]\" {\nvr.virtual -= 1 << 20\n@@ -348,17 +335,17 @@ func (m *machine) hasSlot(physical uintptr) bool {\n// This panics on error.\n//\n//go:nosplit\n-func (m *machine) mapPhysical(physical, length uintptr, phyRegions []physicalRegion, flags uint32) {\n+func (m *machine) mapPhysical(physical, length uintptr, phyRegions []physicalRegion) {\nfor end := physical + length; physical < end; {\n- _, physicalStart, length, ok := calculateBluepillFault(physical, phyRegions)\n- if !ok {\n+ _, physicalStart, length, pr := calculateBluepillFault(physical, phyRegions)\n+ if pr == nil {\n// Should never happen.\npanic(\"mapPhysical on unknown physical address\")\n}\n// Is this already mapped? Check the usedSlots.\nif !m.hasSlot(physicalStart) {\n- if _, ok := handleBluepillFault(m, physical, phyRegions, flags); !ok {\n+ if _, ok := handleBluepillFault(m, physical, phyRegions); !ok {\npanic(\"handleBluepillFault failed\")\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/machine_amd64.go",
"new_path": "pkg/sentry/platform/kvm/machine_amd64.go",
"diff": "@@ -436,16 +436,6 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *linux.SignalInfo)\n}\n}\n-// On x86 platform, the flags for \"setMemoryRegion\" can always be set as 0.\n-// There is no need to return read-only physicalRegions.\n-func rdonlyRegionsForSetMem() (phyRegions []physicalRegion) {\n- return nil\n-}\n-\n-func availableRegionsForSetMem() (phyRegions []physicalRegion) {\n- return physicalRegions\n-}\n-\nfunc (m *machine) mapUpperHalf(pageTable *pagetables.PageTables) {\n// Map all the executable regions so that all the entry functions\n// are mapped in the upper half.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/machine_arm64.go",
"new_path": "pkg/sentry/platform/kvm/machine_arm64.go",
"diff": "@@ -61,57 +61,21 @@ func (m *machine) mapUpperHalf(pageTable *pagetables.PageTables) {\n})\n}\n-// Get all read-only physicalRegions.\n-func rdonlyRegionsForSetMem() (phyRegions []physicalRegion) {\n- var rdonlyRegions []region\n-\n- applyVirtualRegions(func(vr virtualRegion) {\n- if excludeVirtualRegion(vr) {\n- return\n- }\n-\n- if !vr.accessType.Write && vr.accessType.Read {\n- rdonlyRegions = append(rdonlyRegions, vr.region)\n- }\n-\n- // TODO(gvisor.dev/issue/2686): PROT_NONE should be specially treated.\n- // Workaround: treated as rdonly temporarily.\n- if !vr.accessType.Write && !vr.accessType.Read && !vr.accessType.Execute {\n- rdonlyRegions = append(rdonlyRegions, vr.region)\n- }\n- })\n-\n- for _, r := range rdonlyRegions {\n- physical, _, ok := translateToPhysical(r.virtual)\n- if !ok {\n- continue\n- }\n-\n- phyRegions = append(phyRegions, physicalRegion{\n- region: region{\n- virtual: r.virtual,\n- length: r.length,\n- },\n- physical: physical,\n- })\n- }\n-\n- return phyRegions\n-}\n-\n// archPhysicalRegions fills readOnlyGuestRegions and allocates separate\n// physical regions form them.\nfunc archPhysicalRegions(physicalRegions []physicalRegion) []physicalRegion {\n+ rdRegions := []virtualRegion{}\napplyVirtualRegions(func(vr virtualRegion) {\nif excludeVirtualRegion(vr) {\nreturn // skip region.\n}\n- if !vr.accessType.Write {\n- readOnlyGuestRegions = append(readOnlyGuestRegions, vr)\n+ // Skip PROT_NONE mappings. Go-runtime uses them as place\n+ // holders for future read-write mappings.\n+ if !vr.accessType.Write && vr.accessType.Read {\n+ rdRegions = append(rdRegions, vr)\n}\n})\n- rdRegions := readOnlyGuestRegions[:]\n// Add an unreachable region.\nrdRegions = append(rdRegions, virtualRegion{\n@@ -122,7 +86,7 @@ func archPhysicalRegions(physicalRegions []physicalRegion) []physicalRegion {\n})\nvar regions []physicalRegion\n- addValidRegion := func(r *physicalRegion, virtual, length uintptr) {\n+ addValidRegion := func(r *physicalRegion, virtual, length uintptr, readOnly bool) {\nif length == 0 {\nreturn\n}\n@@ -132,6 +96,7 @@ func archPhysicalRegions(physicalRegions []physicalRegion) []physicalRegion {\nlength: length,\n},\nphysical: r.physical + (virtual - r.virtual),\n+ readOnly: readOnly,\n})\n}\ni := 0\n@@ -151,16 +116,16 @@ func archPhysicalRegions(physicalRegions []physicalRegion) []physicalRegion {\nif end < rdStart {\nnewEnd = end\n}\n- addValidRegion(&pr, start, newEnd-start)\n+ addValidRegion(&pr, start, newEnd-start, false)\nstart = rdStart\ncontinue\n}\nif rdEnd < end {\n- addValidRegion(&pr, start, rdEnd-start)\n+ addValidRegion(&pr, start, rdEnd-start, true)\nstart = rdEnd\ncontinue\n}\n- addValidRegion(&pr, start, end-start)\n+ addValidRegion(&pr, start, end-start, start >= rdStart && end <= rdEnd)\nstart = end\n}\n}\n@@ -168,63 +133,6 @@ func archPhysicalRegions(physicalRegions []physicalRegion) []physicalRegion {\nreturn regions\n}\n-// Get all available physicalRegions.\n-func availableRegionsForSetMem() []physicalRegion {\n- var excludedRegions []region\n- applyVirtualRegions(func(vr virtualRegion) {\n- if !vr.accessType.Write {\n- excludedRegions = append(excludedRegions, vr.region)\n- }\n- })\n-\n- // Add an unreachable region.\n- excludedRegions = append(excludedRegions, region{\n- virtual: 0xffffffffffffffff,\n- length: 0,\n- })\n-\n- var regions []physicalRegion\n- addValidRegion := func(r *physicalRegion, virtual, length uintptr) {\n- if length == 0 {\n- return\n- }\n- regions = append(regions, physicalRegion{\n- region: region{\n- virtual: virtual,\n- length: length,\n- },\n- physical: r.physical + (virtual - r.virtual),\n- })\n- }\n- i := 0\n- for _, pr := range physicalRegions {\n- start := pr.virtual\n- end := pr.virtual + pr.length\n- for start < end {\n- er := excludedRegions[i]\n- excludeEnd := er.virtual + er.length\n- excludeStart := er.virtual\n- if excludeEnd < start {\n- i++\n- continue\n- }\n- if excludeStart < start {\n- start = excludeEnd\n- i++\n- continue\n- }\n- rend := excludeStart\n- if rend > end {\n- rend = end\n- }\n- addValidRegion(&pr, start, rend-start)\n- start = excludeEnd\n- }\n- }\n-\n- return regions\n-}\n-\n// nonCanonical generates a canonical address return.\n//\n//go:nosplit\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/machine_unsafe.go",
"new_path": "pkg/sentry/platform/kvm/machine_unsafe.go",
"diff": "@@ -231,7 +231,7 @@ func seccompMmapHandler(context unsafe.Pointer) {\n}\n// Ensure the physical range is mapped.\n- m.mapPhysical(physical, length, physicalRegions, _KVM_MEM_FLAGS_NONE)\n+ m.mapPhysical(physical, length, physicalRegions)\nvirtual += length\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/physical_map.go",
"new_path": "pkg/sentry/platform/kvm/physical_map.go",
"diff": "@@ -32,6 +32,7 @@ type region struct {\ntype physicalRegion struct {\nregion\nphysical uintptr\n+ readOnly bool\n}\n// physicalRegions contains a list of available physical regions.\n"
}
] | Go | Apache License 2.0 | google/gvisor | kvm/arm64: exclude PROT_NONE mappings from the read-only list
Go-runtime uses PROT_NONE mappings as place holders for future read-write
mappings.
Accese mode has to be a property of physicalRegion. It makes code cleaner.
Signed-off-by: Andrei Vagin <avagin@google.com> |
259,992 | 09.02.2022 17:29:31 | 28,800 | 974c0c2c9a108e2e90bf80b5acd4e3136bb1e97d | Allow safe flags to be set from annotations
runsc flags can be set through annotations only when `--allow-flag-override`
is set. However, certain flags can be safely set and should not require
an admin to set the override flag. | [
{
"change_type": "MODIFY",
"old_path": "runsc/config/config.go",
"new_path": "runsc/config/config.go",
"diff": "@@ -31,9 +31,11 @@ import (\n// Follow these steps to add a new flag:\n// 1. Create a new field in Config.\n// 2. Add a field tag with the flag name\n-// 3. Register a new flag in flags.go, with name and description\n+// 3. Register a new flag in flags.go, with same name and add a description\n// 4. Add any necessary validation into validate()\n// 5. If adding an enum, follow the same pattern as FileAccessType\n+// 6. Evaluate if the flag can be changed with OCI annotations. See\n+// overrideAllowlist for more details\n//\ntype Config struct {\n// RootDir is the runtime root directory.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/config/config_test.go",
"new_path": "runsc/config/config_test.go",
"diff": "@@ -305,3 +305,60 @@ func TestOverrideError(t *testing.T) {\n})\n}\n}\n+\n+func TestOverrideAllowlist(t *testing.T) {\n+ c, err := NewFromFlags()\n+ if err != nil {\n+ t.Fatal(err)\n+ }\n+ for _, tc := range []struct {\n+ flag string\n+ value string\n+ error string\n+ }{\n+ {\n+ flag: \"debug\",\n+ value: \"true\",\n+ },\n+ {\n+ flag: \"debug\",\n+ value: \"123\",\n+ error: \"error setting flag\",\n+ },\n+ {\n+ flag: \"oci-seccomp\",\n+ value: \"true\",\n+ },\n+ {\n+ flag: \"oci-seccomp\",\n+ value: \"false\",\n+ error: `disabling \"oci-seccomp\" requires flag`,\n+ },\n+ {\n+ flag: \"oci-seccomp\",\n+ value: \"123\",\n+ error: \"invalid syntax\",\n+ },\n+ {\n+ flag: \"profile\",\n+ value: \"true\",\n+ error: \"flag override disabled\",\n+ },\n+ {\n+ flag: \"profile\",\n+ value: \"123\",\n+ error: \"flag override disabled\",\n+ },\n+ } {\n+ t.Run(tc.flag, func(t *testing.T) {\n+ err := c.Override(tc.flag, tc.value)\n+ if len(tc.error) == 0 {\n+ if err != nil {\n+ t.Errorf(\"Unexpected error: %v\", err)\n+ }\n+ } else if err == nil || !strings.Contains(err.Error(), tc.error) {\n+ t.Errorf(\"Override(%q, %q) wrong error: %v\", tc.flag, tc.value, err)\n+ }\n+ })\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/config/flags.go",
"new_path": "runsc/config/flags.go",
"diff": "@@ -81,8 +81,8 @@ func RegisterFlags() {\nflag.Bool(\"verity\", false, \"specifies whether a verity file system will be mounted.\")\nflag.Bool(\"fsgofer-host-uds\", false, \"allow the gofer to mount Unix Domain Sockets.\")\nflag.Bool(\"vfs2\", true, \"enables VFSv2. This uses the new VFS layer that is faster than the previous one.\")\n- flag.Bool(\"fuse\", false, \"TEST ONLY; use while FUSE in VFSv2 is landing. This allows the use of the new experimental FUSE filesystem.\")\n- flag.Bool(\"lisafs\", false, \"Enables lisafs protocol instead of 9P. This is only effective with VFS2.\")\n+ flag.Bool(\"fuse\", false, \"TEST ONLY; This allows the use of the new experimental FUSE filesystem. Only works with VFS2.\")\n+ flag.Bool(\"lisafs\", false, \"Enables lisafs protocol instead of 9P. Only works with VFS2.\")\nflag.Bool(\"cgroupfs\", false, \"Automatically mount cgroupfs.\")\nflag.Bool(\"ignore-cgroups\", false, \"don't configure cgroups.\")\n@@ -103,6 +103,33 @@ func RegisterFlags() {\n})\n}\n+// overrideAllowlist lists all flags that can be changed using OCI\n+// annotations without an administrator setting `--allow-flag-override` on the\n+// runtime. Flags in this list can be set by container authors and should not\n+// make the sandbox less secure.\n+var overrideAllowlist = map[string]struct {\n+ check func(name string, value string) error\n+}{\n+ \"debug\": {},\n+ \"strace\": {},\n+ \"strace-syscalls\": {},\n+ \"strace-log-size\": {},\n+\n+ \"oci-seccomp\": {check: checkOciSeccomp},\n+}\n+\n+// checkOciSeccomp ensures that seccomp can be enabled but not disabled.\n+func checkOciSeccomp(name string, value string) error {\n+ enable, err := strconv.ParseBool(value)\n+ if err != nil {\n+ return err\n+ }\n+ if !enable {\n+ return fmt.Errorf(\"disabling %q requires flag %q to be enabled\", name, \"allow-flag-override\")\n+ }\n+ return nil\n+}\n+\n// NewFromFlags creates a new Config with values coming from command line flags.\nfunc NewFromFlags() (*Config, error) {\nconf := &Config{}\n@@ -168,10 +195,6 @@ func (c *Config) ToFlags() []string {\n// Override writes a new value to a flag.\nfunc (c *Config) Override(name string, value string) error {\n- if !c.AllowFlagOverride {\n- return fmt.Errorf(\"flag override disabled, use --allow-flag-override to enable it\")\n- }\n-\nobj := reflect.ValueOf(c).Elem()\nst := obj.Type()\nfor i := 0; i < st.NumField(); i++ {\n@@ -186,6 +209,9 @@ func (c *Config) Override(name string, value string) error {\n// Flag must exist if there is a field match above.\npanic(fmt.Sprintf(\"Flag %q not found\", name))\n}\n+ if err := c.isOverrideAllowed(name, value); err != nil {\n+ return fmt.Errorf(\"error setting flag %s=%q: %w\", name, value, err)\n+ }\n// Use flag to convert the string value to the underlying flag type, using\n// the same rules as the command-line for consistency.\n@@ -201,6 +227,23 @@ func (c *Config) Override(name string, value string) error {\nreturn fmt.Errorf(\"flag %q not found. Cannot set it to %q\", name, value)\n}\n+func (c *Config) isOverrideAllowed(name string, value string) error {\n+ if c.AllowFlagOverride {\n+ return nil\n+ }\n+ // If the global override flag is not enabled, check if individual flag is\n+ // safe to apply.\n+ if allow, ok := overrideAllowlist[name]; ok {\n+ if allow.check != nil {\n+ if err := allow.check(name, value); err != nil {\n+ return err\n+ }\n+ }\n+ return nil\n+ }\n+ return fmt.Errorf(\"flag override disabled, use --allow-flag-override to enable it\")\n+}\n+\nfunc getVal(field reflect.Value) string {\nif str, ok := field.Addr().Interface().(fmt.Stringer); ok {\nreturn str.String()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Allow safe flags to be set from annotations
runsc flags can be set through annotations only when `--allow-flag-override`
is set. However, certain flags can be safely set and should not require
an admin to set the override flag.
PiperOrigin-RevId: 427613132 |
259,962 | 09.02.2022 22:58:41 | 28,800 | 5a52fcc5466a7da26994ac168d56ee5e9cd21ae9 | Fix buffer aliasing issue when Merging views. | [
{
"change_type": "MODIFY",
"old_path": "pkg/buffer/view.go",
"new_path": "pkg/buffer/view.go",
"diff": "@@ -385,6 +385,10 @@ func (v *View) Clone() View {\nsize: v.size,\n}\nfor buf := v.data.Front(); buf != nil; buf = buf.Next() {\n+ // Copy the buffer structs itself as they are stateful and\n+ // should not be shared between Views.\n+ //\n+ // TODO(gvisor.dev/issue/7158): revisit need for View.pool.\nnewBuf := other.pool.getNoInit()\n*newBuf = *buf\nother.data.PushBack(newBuf)\n@@ -428,7 +432,13 @@ func (v *View) Merge(other *View) {\n// Copy over all buffers.\nfor buf := other.data.Front(); buf != nil; buf = other.data.Front() {\nother.data.Remove(buf)\n- v.data.PushBack(buf)\n+ // Copy the buffer structs itself as they are stateful and\n+ // should not be shared between Views.\n+ //\n+ // TODO(gvisor.dev/issue/7158): revisit need for View.pool.\n+ newBuf := v.pool.getNoInit()\n+ *newBuf = *buf\n+ v.data.PushBack(newBuf)\n}\n// Adjust sizes.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix buffer aliasing issue when Merging views.
PiperOrigin-RevId: 427662565 |
259,907 | 11.02.2022 11:12:48 | 28,800 | 697d3d52eb9494cee096eeb5d831c9767e02d15e | Fix walking to attach path logic in lisafs.
The attach path (if one exists) is always absolute. This is checked while
setting that option. strings.Split(aname, "/") creates an extra empty string in
the beginning of the slice. Exclude that while making Walk RPC. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/gofer.go",
"new_path": "pkg/sentry/fsimpl/gofer/gofer.go",
"diff": "@@ -541,9 +541,9 @@ func (fs *filesystem) initClientLisa(ctx context.Context) (lisafs.Inode, error)\nreturn rootInode, nil\n}\n- // Walk to the attach point from root inode.\n+ // Walk to the attach point from root inode. aname is always absolute.\nrootFD := fs.clientLisa.NewFD(rootInode.ControlFD)\n- status, inodes, err := rootFD.WalkMultiple(ctx, strings.Split(fs.opts.aname, \"/\"))\n+ status, inodes, err := rootFD.WalkMultiple(ctx, strings.Split(fs.opts.aname, \"/\")[1:])\nrootFD.CloseBatched(ctx)\nif err != nil {\nreturn lisafs.Inode{}, err\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix walking to attach path logic in lisafs.
The attach path (if one exists) is always absolute. This is checked while
setting that option. strings.Split(aname, "/") creates an extra empty string in
the beginning of the slice. Exclude that while making Walk RPC.
PiperOrigin-RevId: 428046171 |
259,885 | 11.02.2022 11:43:37 | 28,800 | da30d5dd1c3d2366bdc9a094d3ea1dd9b6f48774 | Avoid lock recursion when gofer revalidation invalidates a mount point. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/revalidate.go",
"new_path": "pkg/sentry/fsimpl/gofer/revalidate.go",
"diff": "@@ -291,10 +291,18 @@ func (fs *filesystem) revalidateHelper(ctx context.Context, vfsObj *vfs.VirtualF\nreturn nil\n}\n// The file at this path has changed or no longer exists. Mark the\n- // dentry invalidated, and re-evaluate its caching status (i.e. if it\n- // has 0 references, drop it). The dentry will be reloaded next time it's\n- // accessed.\n+ // dentry invalidated, and re-evaluate its caching status (i.e. if\n+ // it has 0 references, drop it). The dentry will be reloaded next\n+ // time it's accessed.\n+ //\n+ // If the dentry is a mountpoint, InvalidateDentry may drop the\n+ // last reference on it, resulting in lock recursion. To avoid\n+ // this, take a dentry reference first, then drop it while\n+ // deferring the call to dentry.checkCachingLocked().\n+ d.IncRef()\nvfsObj.InvalidateDentry(ctx, &d.vfsd)\n+ d.decRefNoCaching()\n+ *ds = appendDentry(*ds, d)\nname := state.names[i]\nd.parent.dirMu.Lock()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Avoid lock recursion when gofer revalidation invalidates a mount point.
PiperOrigin-RevId: 428053753 |
260,004 | 11.02.2022 11:55:38 | 28,800 | 029554dc3ca1360c91252648f9ef8c0fe24cc2a9 | Support SO_BINDTODEVICE on raw endpoints | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/raw/endpoint.go",
"new_path": "pkg/tcpip/transport/raw/endpoint.go",
"diff": "@@ -160,6 +160,11 @@ func newEndpoint(s *stack.Stack, netProto tcpip.NetworkProtocolNumber, transProt\nreturn e, nil\n}\n+// HasNIC implements tcpip.SocketOptionsHandler.\n+func (e *endpoint) HasNIC(id int32) bool {\n+ return e.stack.HasNIC(tcpip.NICID(id))\n+}\n+\n// Abort implements stack.TransportEndpoint.Abort.\nfunc (e *endpoint) Abort() {\ne.Close()\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/raw_socket.cc",
"new_path": "test/syscalls/linux/raw_socket.cc",
"diff": "// limitations under the License.\n#include <arpa/inet.h>\n+#include <net/if.h>\n#include <netinet/in.h>\n#include <netinet/ip.h>\n#include <netinet/ip6.h>\n@@ -875,6 +876,20 @@ TEST_P(RawSocketTest, GetSocketDetachFilter) {\nSyscallFailsWithErrno(ENOPROTOOPT));\n}\n+TEST_P(RawSocketTest, BindToDevice) {\n+ constexpr char kLoopbackDeviceName[] = \"lo\";\n+ ASSERT_THAT(setsockopt(s_, SOL_SOCKET, SO_BINDTODEVICE, &kLoopbackDeviceName,\n+ sizeof(kLoopbackDeviceName)),\n+ SyscallSucceeds());\n+\n+ char got[IFNAMSIZ];\n+ socklen_t got_len = sizeof(got);\n+ ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_BINDTODEVICE, &got, &got_len),\n+ SyscallSucceeds());\n+ ASSERT_EQ(got_len, sizeof(kLoopbackDeviceName));\n+ EXPECT_EQ(strcmp(kLoopbackDeviceName, got), 0);\n+}\n+\n// AF_INET6+SOCK_RAW+IPPROTO_RAW sockets can be created, but not written to.\nTEST(RawSocketTest, IPv6ProtoRaw) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveRawIPSocketCapability()));\n"
}
] | Go | Apache License 2.0 | google/gvisor | Support SO_BINDTODEVICE on raw endpoints
PiperOrigin-RevId: 428056402 |
259,907 | 12.02.2022 16:14:28 | 28,800 | 0255d313a4c825c62634ee41d73a4eef060872d7 | Remove VFS1 continuous runtime tests.
Updates | [
{
"change_type": "MODIFY",
"old_path": ".buildkite/pipeline.yaml",
"new_path": ".buildkite/pipeline.yaml",
"diff": "@@ -214,33 +214,6 @@ steps:\ncommand: make python3.7.3-runtime-tests_vfs2\nparallelism: 20\n- # Runtime tests (VFS1).\n- - <<: *common\n- label: \":php: PHP runtime tests (VFS1)\"\n- command: make php7.3.6-runtime-tests\n- parallelism: 20\n- if: build.message =~ /VFS1/ || build.branch == \"master\"\n- - <<: *common\n- label: \":java: Java runtime tests (VFS1)\"\n- command: make java11-runtime-tests\n- parallelism: 120\n- if: build.message =~ /VFS1/ || build.branch == \"master\"\n- - <<: *common\n- label: \":golang: Go runtime tests (VFS1)\"\n- command: make go1.12-runtime-tests\n- parallelism: 20\n- if: build.message =~ /VFS1/ || build.branch == \"master\"\n- - <<: *common\n- label: \":node: NodeJS runtime tests (VFS1)\"\n- command: make nodejs12.4.0-runtime-tests\n- parallelism: 10\n- if: build.message =~ /VFS1/ || build.branch == \"master\"\n- - <<: *common\n- label: \":python: Python runtime tests (VFS1)\"\n- command: make python3.7.3-runtime-tests\n- parallelism: 20\n- if: build.message =~ /VFS1/ || build.branch == \"master\"\n-\n# ARM tests.\n- <<: *common\nlabel: \":mechanical_arm: ARM\"\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove VFS1 continuous runtime tests.
Updates #1624
PiperOrigin-RevId: 428255277 |
259,951 | 14.02.2022 13:49:24 | 28,800 | 4a94302bafb8cb3f95e740a13c37659e80cf04bf | Support receiving PKTINFO on icmp endpoints | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/control/control.go",
"new_path": "pkg/sentry/socket/control/control.go",
"diff": "@@ -600,6 +600,16 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint)\ntclass.UnmarshalUnsafe(buf)\ncmsgs.IP.TClass = uint32(tclass)\n+ case linux.IPV6_PKTINFO:\n+ if length < linux.SizeOfControlMessageIPv6PacketInfo {\n+ return socket.ControlMessages{}, linuxerr.EINVAL\n+ }\n+\n+ cmsgs.IP.HasIPv6PacketInfo = true\n+ var packetInfo linux.ControlMessageIPv6PacketInfo\n+ packetInfo.UnmarshalUnsafe(buf)\n+ cmsgs.IP.IPv6PacketInfo = packetInfo\n+\ncase linux.IPV6_RECVORIGDSTADDR:\nvar addr linux.SockAddrInet6\nif length < addr.SizeBytes() {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/hostinet/socket.go",
"new_path": "pkg/sentry/socket/hostinet/socket.go",
"diff": "@@ -392,7 +392,7 @@ func (s *socketOpsCommon) GetSockOpt(t *kernel.Task, level int, name int, optVal\n}\ncase linux.SOL_IPV6:\nswitch name {\n- case linux.IPV6_TCLASS, linux.IPV6_RECVTCLASS, linux.IPV6_RECVERR, linux.IPV6_V6ONLY, linux.IPV6_RECVORIGDSTADDR:\n+ case linux.IPV6_TCLASS, linux.IPV6_RECVTCLASS, linux.IPV6_RECVPKTINFO, linux.IPV6_RECVERR, linux.IPV6_V6ONLY, linux.IPV6_RECVORIGDSTADDR:\noptlen = sizeofInt32\n}\ncase linux.SOL_SOCKET:\n@@ -449,7 +449,7 @@ func (s *socketOpsCommon) SetSockOpt(t *kernel.Task, level int, name int, opt []\n}\ncase linux.SOL_IPV6:\nswitch name {\n- case linux.IPV6_TCLASS, linux.IPV6_RECVTCLASS, linux.IPV6_RECVERR, linux.IPV6_V6ONLY, linux.IPV6_RECVORIGDSTADDR:\n+ case linux.IPV6_TCLASS, linux.IPV6_RECVTCLASS, linux.IPV6_RECVPKTINFO, linux.IPV6_RECVERR, linux.IPV6_V6ONLY, linux.IPV6_RECVORIGDSTADDR:\noptlen = sizeofInt32\n}\ncase linux.SOL_SOCKET:\n@@ -632,6 +632,12 @@ func parseUnixControlMessages(unixControlMessages []unix.SocketControlMessage) s\ntclass.UnmarshalUnsafe(unixCmsg.Data)\ncontrolMessages.IP.TClass = uint32(tclass)\n+ case linux.IPV6_PKTINFO:\n+ controlMessages.IP.HasIPv6PacketInfo = true\n+ var packetInfo linux.ControlMessageIPv6PacketInfo\n+ packetInfo.UnmarshalUnsafe(unixCmsg.Data)\n+ controlMessages.IP.IPv6PacketInfo = packetInfo\n+\ncase linux.IPV6_RECVORIGDSTADDR:\nvar addr linux.SockAddrInet6\naddr.UnmarshalUnsafe(unixCmsg.Data)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/checker/checker.go",
"new_path": "pkg/tcpip/checker/checker.go",
"diff": "@@ -346,6 +346,17 @@ func ReceiveIPPacketInfo(want tcpip.IPPacketInfo) ControlMessagesChecker {\n}\n}\n+// NoIPPacketInfoReceived creates a checker that checks the PacketInfo field in\n+// ControlMessages.\n+func NoIPPacketInfoReceived() ControlMessagesChecker {\n+ return func(t *testing.T, cm tcpip.ControlMessages) {\n+ t.Helper()\n+ if cm.HasIPPacketInfo {\n+ t.Error(\"got cm.HasIPPacketInfo = true, want = false\")\n+ }\n+ }\n+}\n+\n// ReceiveIPv6PacketInfo creates a checker that checks the IPv6PacketInfo field\n// in ControlMessages.\nfunc ReceiveIPv6PacketInfo(want tcpip.IPv6PacketInfo) ControlMessagesChecker {\n@@ -359,6 +370,17 @@ func ReceiveIPv6PacketInfo(want tcpip.IPv6PacketInfo) ControlMessagesChecker {\n}\n}\n+// NoIPv6PacketInfoReceived creates a checker that checks the PacketInfo field\n+// in ControlMessages.\n+func NoIPv6PacketInfoReceived() ControlMessagesChecker {\n+ return func(t *testing.T, cm tcpip.ControlMessages) {\n+ t.Helper()\n+ if cm.HasIPv6PacketInfo {\n+ t.Error(\"got cm.HasIPv6PacketInfo = true, want = false\")\n+ }\n+ }\n+}\n+\n// ReceiveOriginalDstAddr creates a checker that checks the OriginalDstAddress\n// field in ControlMessages.\nfunc ReceiveOriginalDstAddr(want tcpip.FullAddress) ControlMessagesChecker {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/icmp/endpoint.go",
"new_path": "pkg/tcpip/transport/icmp/endpoint.go",
"diff": "@@ -35,6 +35,7 @@ import (\ntype icmpPacket struct {\nicmpPacketEntry\nsenderAddress tcpip.FullAddress\n+ packetInfo tcpip.IPPacketInfo\ndata buffer.VectorisedView `state:\".(buffer.VectorisedView)\"`\nreceivedAt time.Time `state:\".(int64)\"`\n@@ -194,12 +195,23 @@ func (e *endpoint) Read(dst io.Writer, opts tcpip.ReadOptions) (tcpip.ReadResult\ncm.HasTOS = true\ncm.TOS = p.tosOrTClass\n}\n+ if e.ops.GetReceivePacketInfo() {\n+ cm.HasIPPacketInfo = true\n+ cm.PacketInfo = p.packetInfo\n+ }\ncase header.IPv6ProtocolNumber:\nif e.ops.GetReceiveTClass() {\ncm.HasTClass = true\n// Although TClass is an 8-bit value it's read in the CMsg as a uint32.\ncm.TClass = uint32(p.tosOrTClass)\n}\n+ if e.ops.GetIPv6ReceivePacketInfo() {\n+ cm.HasIPv6PacketInfo = true\n+ cm.IPv6PacketInfo = tcpip.IPv6PacketInfo{\n+ NIC: p.packetInfo.NIC,\n+ Addr: p.packetInfo.DestinationAddr,\n+ }\n+ }\ndefault:\npanic(fmt.Sprintf(\"unrecognized network protocol = %d\", netProto))\n}\n@@ -696,16 +708,28 @@ func (e *endpoint) HandlePacket(id stack.TransportEndpointID, pkt *stack.PacketB\nwasEmpty := e.rcvBufSize == 0\n+ net := pkt.Network()\n+ dstAddr := net.DestinationAddress()\n// Push new packet into receive list and increment the buffer size.\npacket := &icmpPacket{\nsenderAddress: tcpip.FullAddress{\nNIC: pkt.NICID,\nAddr: id.RemoteAddress,\n},\n+ packetInfo: tcpip.IPPacketInfo{\n+ // Linux does not 'prepare' [1] in_pktinfo on socket buffers destined to\n+ // ping sockets (unlike UDP/RAW sockets). However the interface index [2]\n+ // and the Header Destination Address [3] are always filled.\n+ // [1] https://github.com/torvalds/linux/blob/dcb85f85fa6/net/ipv4/ip_sockglue.c#L1392\n+ // [2] https://github.com/torvalds/linux/blob/dcb85f85fa6/net/ipv4/ip_input.c#L510\n+ // [3] https://github.com/torvalds/linux/blob/dcb85f85fa6/net/ipv4/ip_sockglue.c#L60\n+ NIC: pkt.NICID,\n+ DestinationAddr: dstAddr,\n+ },\n}\n// Save any useful information from the network header to the packet.\n- packet.tosOrTClass, _ = pkt.Network().TOS()\n+ packet.tosOrTClass, _ = net.TOS()\n// ICMP socket's data includes ICMP header.\npacket.data = pkt.TransportHeader().View().ToVectorisedView()\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/icmp/icmp_test.go",
"new_path": "pkg/tcpip/transport/icmp/icmp_test.go",
"diff": "@@ -320,6 +320,8 @@ func buildEchoReplyPacket(payload []byte, flow context.TestFlow) (buffer.View, b\nfunc TestReceiveControlMessages(t *testing.T) {\nvar payload = [...]byte{0, 1, 2, 3, 4, 5}\n+ for _, flow := range []context.TestFlow{context.UnicastV4, context.UnicastV6, context.UnicastV6Only, context.MulticastV4, context.MulticastV6, context.MulticastV6Only, context.Broadcast} {\n+ t.Run(flow.String(), func(t *testing.T) {\nfor _, test := range []struct {\nname string\noptionProtocol tcpip.NetworkProtocolNumber\n@@ -344,10 +346,39 @@ func TestReceiveControlMessages(t *testing.T) {\npresenceChecker: checker.ReceiveTClass(testTOS),\nabsenceChecker: checker.NoTClassReceived(),\n},\n+ {\n+ name: \"IPPacketInfo\",\n+ optionProtocol: header.IPv4ProtocolNumber,\n+ getReceiveOption: func(ep tcpip.Endpoint) bool { return ep.SocketOptions().GetReceivePacketInfo() },\n+ setReceiveOption: func(ep tcpip.Endpoint, value bool) { ep.SocketOptions().SetReceivePacketInfo(value) },\n+ presenceChecker: func() checker.ControlMessagesChecker {\n+ h := flow.MakeHeader4Tuple(context.Incoming)\n+ return checker.ReceiveIPPacketInfo(tcpip.IPPacketInfo{\n+ NIC: context.NICID,\n+ // TODO(https://gvisor.dev/issue/3556): Expect the NIC's address\n+ // instead of the header destination address for the LocalAddr\n+ // field.\n+ DestinationAddr: h.Dst.Addr,\n+ })\n+ }(),\n+ absenceChecker: checker.NoIPPacketInfoReceived(),\n+ },\n+ {\n+ name: \"IPv6PacketInfo\",\n+ optionProtocol: header.IPv6ProtocolNumber,\n+ getReceiveOption: func(ep tcpip.Endpoint) bool { return ep.SocketOptions().GetIPv6ReceivePacketInfo() },\n+ setReceiveOption: func(ep tcpip.Endpoint, value bool) { ep.SocketOptions().SetIPv6ReceivePacketInfo(value) },\n+ presenceChecker: func() checker.ControlMessagesChecker {\n+ h := flow.MakeHeader4Tuple(context.Incoming)\n+ return checker.ReceiveIPv6PacketInfo(tcpip.IPv6PacketInfo{\n+ NIC: context.NICID,\n+ Addr: h.Dst.Addr,\n+ })\n+ }(),\n+ absenceChecker: checker.NoIPv6PacketInfoReceived(),\n+ },\n} {\nt.Run(test.name, func(t *testing.T) {\n- for _, flow := range []context.TestFlow{context.UnicastV4, context.UnicastV6, context.UnicastV6Only, context.MulticastV4, context.MulticastV6, context.MulticastV6Only, context.Broadcast} {\n- t.Run(flow.String(), func(t *testing.T) {\nc := context.New(t, []stack.TransportProtocolFactory{icmp.NewProtocol4, icmp.NewProtocol6})\ndefer c.Cleanup()\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/raw/raw_test.go",
"new_path": "pkg/tcpip/transport/raw/raw_test.go",
"diff": "@@ -36,6 +36,8 @@ const (\nfunc TestReceiveControlMessage(t *testing.T) {\nvar payload = [...]byte{0, 1, 2, 3, 4, 5}\n+ for _, flow := range []context.TestFlow{context.UnicastV4, context.UnicastV6, context.UnicastV6Only, context.MulticastV4, context.MulticastV6, context.MulticastV6Only, context.Broadcast} {\n+ t.Run(flow.String(), func(t *testing.T) {\nfor _, test := range []struct {\nname string\noptionProtocol tcpip.NetworkProtocolNumber\n@@ -60,11 +62,40 @@ func TestReceiveControlMessage(t *testing.T) {\npresenceChecker: checker.ReceiveTClass(testTOS),\nabsenceChecker: checker.NoTClassReceived(),\n},\n+ {\n+ name: \"IPPacketInfo\",\n+ optionProtocol: header.IPv4ProtocolNumber,\n+ getReceiveOption: func(ep tcpip.Endpoint) bool { return ep.SocketOptions().GetReceivePacketInfo() },\n+ setReceiveOption: func(ep tcpip.Endpoint, value bool) { ep.SocketOptions().SetReceivePacketInfo(value) },\n+ presenceChecker: func() checker.ControlMessagesChecker {\n+ h := flow.MakeHeader4Tuple(context.Incoming)\n+ return checker.ReceiveIPPacketInfo(tcpip.IPPacketInfo{\n+ NIC: context.NICID,\n+ // TODO(https://gvisor.dev/issue/3556): Expect the NIC's address\n+ // instead of the header destination address for the LocalAddr\n+ // field.\n+ LocalAddr: h.Dst.Addr,\n+ DestinationAddr: h.Dst.Addr,\n+ })\n+ }(),\n+ absenceChecker: checker.NoIPPacketInfoReceived(),\n+ },\n+ {\n+ name: \"IPv6PacketInfo\",\n+ optionProtocol: header.IPv6ProtocolNumber,\n+ getReceiveOption: func(ep tcpip.Endpoint) bool { return ep.SocketOptions().GetIPv6ReceivePacketInfo() },\n+ setReceiveOption: func(ep tcpip.Endpoint, value bool) { ep.SocketOptions().SetIPv6ReceivePacketInfo(value) },\n+ presenceChecker: func() checker.ControlMessagesChecker {\n+ h := flow.MakeHeader4Tuple(context.Incoming)\n+ return checker.ReceiveIPv6PacketInfo(tcpip.IPv6PacketInfo{\n+ NIC: context.NICID,\n+ Addr: h.Dst.Addr,\n+ })\n+ }(),\n+ absenceChecker: checker.NoIPv6PacketInfoReceived(),\n+ },\n} {\nt.Run(test.name, func(t *testing.T) {\n- for _, flow := range []context.TestFlow{context.UnicastV4, context.UnicastV6, context.UnicastV6Only, context.MulticastV4, context.MulticastV6, context.MulticastV6Only, context.Broadcast} {\n- t.Run(flow.String(), func(t *testing.T) {\n-\nc := context.New(t, []stack.TransportProtocolFactory{udp.NewProtocol})\ndefer c.Cleanup()\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/udp/udp_test.go",
"new_path": "pkg/tcpip/transport/udp/udp_test.go",
"diff": "@@ -844,112 +844,6 @@ func TestReadIncrementsPacketsReceived(t *testing.T) {\n}\n}\n-func TestReadIPPacketInfo(t *testing.T) {\n- tests := []struct {\n- name string\n- proto tcpip.NetworkProtocolNumber\n- flow context.TestFlow\n- checker func(tcpip.NICID) checker.ControlMessagesChecker\n- }{\n- {\n- name: \"IPv4 unicast\",\n- proto: header.IPv4ProtocolNumber,\n- flow: context.UnicastV4,\n- checker: func(id tcpip.NICID) checker.ControlMessagesChecker {\n- return checker.ReceiveIPPacketInfo(tcpip.IPPacketInfo{\n- NIC: id,\n- LocalAddr: context.StackAddr,\n- DestinationAddr: context.StackAddr,\n- })\n- },\n- },\n- {\n- name: \"IPv4 multicast\",\n- proto: header.IPv4ProtocolNumber,\n- flow: context.MulticastV4,\n- checker: func(id tcpip.NICID) checker.ControlMessagesChecker {\n- return checker.ReceiveIPPacketInfo(tcpip.IPPacketInfo{\n- NIC: id,\n- // TODO(gvisor.dev/issue/3556): Check for a unicast address.\n- LocalAddr: context.MulticastAddr,\n- DestinationAddr: context.MulticastAddr,\n- })\n- },\n- },\n- {\n- name: \"IPv4 broadcast\",\n- proto: header.IPv4ProtocolNumber,\n- flow: context.Broadcast,\n- checker: func(id tcpip.NICID) checker.ControlMessagesChecker {\n- return checker.ReceiveIPPacketInfo(tcpip.IPPacketInfo{\n- NIC: id,\n- // TODO(gvisor.dev/issue/3556): Check for a unicast address.\n- LocalAddr: context.BroadcastAddr,\n- DestinationAddr: context.BroadcastAddr,\n- })\n- },\n- },\n- {\n- name: \"IPv6 unicast\",\n- proto: header.IPv6ProtocolNumber,\n- flow: context.UnicastV6,\n- checker: func(id tcpip.NICID) checker.ControlMessagesChecker {\n- return checker.ReceiveIPv6PacketInfo(tcpip.IPv6PacketInfo{\n- NIC: id,\n- Addr: context.StackV6Addr,\n- })\n- },\n- },\n- {\n- name: \"IPv6 multicast\",\n- proto: header.IPv6ProtocolNumber,\n- flow: context.MulticastV6,\n- checker: func(id tcpip.NICID) checker.ControlMessagesChecker {\n- return checker.ReceiveIPv6PacketInfo(tcpip.IPv6PacketInfo{\n- NIC: id,\n- Addr: context.MulticastV6Addr,\n- })\n- },\n- },\n- }\n-\n- for _, test := range tests {\n- t.Run(test.name, func(t *testing.T) {\n- c := context.New(t, []stack.TransportProtocolFactory{udp.NewProtocol, icmp.NewProtocol6, icmp.NewProtocol4})\n- defer c.Cleanup()\n-\n- c.CreateEndpoint(test.proto, udp.ProtocolNumber)\n-\n- bindAddr := tcpip.FullAddress{Port: context.StackPort}\n- if err := c.EP.Bind(bindAddr); err != nil {\n- t.Fatalf(\"Bind(%+v): %s\", bindAddr, err)\n- }\n-\n- if test.flow.IsMulticast() {\n- ifoptSet := tcpip.AddMembershipOption{NIC: context.NICID, MulticastAddr: test.flow.GetMulticastAddr()}\n- if err := c.EP.SetSockOpt(&ifoptSet); err != nil {\n- c.T.Fatalf(\"SetSockOpt(&%#v): %s:\", ifoptSet, err)\n- }\n- }\n-\n- switch f := test.flow.NetProto(); f {\n- case header.IPv4ProtocolNumber:\n- c.EP.SocketOptions().SetReceivePacketInfo(true)\n- case header.IPv6ProtocolNumber:\n- c.EP.SocketOptions().SetIPv6ReceivePacketInfo(true)\n- default:\n- t.Fatalf(\"unhandled protocol number = %d\", f)\n- }\n-\n- testRead(c, test.flow, test.checker(context.NICID))\n-\n- if got := c.Stack.Stats().UDP.PacketsReceived.Value(); got != 1 {\n- t.Fatalf(\"Read did not increment PacketsReceived: got = %d, want = 1\", got)\n- }\n- })\n- }\n-}\n-\nfunc TestReadRecvOriginalDstAddr(t *testing.T) {\ntests := []struct {\nname string\n@@ -1246,6 +1140,8 @@ func TestSetTClass(t *testing.T) {\n}\nfunc TestReceiveControlMessage(t *testing.T) {\n+ for _, flow := range []context.TestFlow{context.UnicastV4, context.UnicastV6, context.UnicastV6Only, context.MulticastV4, context.MulticastV6, context.MulticastV6Only, context.Broadcast} {\n+ t.Run(flow.String(), func(t *testing.T) {\nfor _, test := range []struct {\nname string\noptionProtocol tcpip.NetworkProtocolNumber\n@@ -1270,10 +1166,40 @@ func TestReceiveControlMessage(t *testing.T) {\npresenceChecker: checker.ReceiveTClass(testTOS),\nabsenceChecker: checker.NoTClassReceived(),\n},\n+ {\n+ name: \"PacketInfo\",\n+ optionProtocol: header.IPv4ProtocolNumber,\n+ getReceiveOption: func(ep tcpip.Endpoint) bool { return ep.SocketOptions().GetReceivePacketInfo() },\n+ setReceiveOption: func(ep tcpip.Endpoint, value bool) { ep.SocketOptions().SetReceivePacketInfo(value) },\n+ presenceChecker: func() checker.ControlMessagesChecker {\n+ h := flow.MakeHeader4Tuple(context.Incoming)\n+ return checker.ReceiveIPPacketInfo(tcpip.IPPacketInfo{\n+ NIC: context.NICID,\n+ // TODO(https://gvisor.dev/issue/3556): Expect the NIC's address\n+ // instead of the header destination address for the LocalAddr\n+ // field.\n+ LocalAddr: h.Dst.Addr,\n+ DestinationAddr: h.Dst.Addr,\n+ })\n+ }(),\n+ absenceChecker: checker.NoIPPacketInfoReceived(),\n+ },\n+ {\n+ name: \"IPv6PacketInfo\",\n+ optionProtocol: header.IPv6ProtocolNumber,\n+ getReceiveOption: func(ep tcpip.Endpoint) bool { return ep.SocketOptions().GetIPv6ReceivePacketInfo() },\n+ setReceiveOption: func(ep tcpip.Endpoint, value bool) { ep.SocketOptions().SetIPv6ReceivePacketInfo(value) },\n+ presenceChecker: func() checker.ControlMessagesChecker {\n+ h := flow.MakeHeader4Tuple(context.Incoming)\n+ return checker.ReceiveIPv6PacketInfo(tcpip.IPv6PacketInfo{\n+ NIC: context.NICID,\n+ Addr: h.Dst.Addr,\n+ })\n+ }(),\n+ absenceChecker: checker.NoIPv6PacketInfoReceived(),\n+ },\n} {\nt.Run(test.name, func(t *testing.T) {\n- for _, flow := range []context.TestFlow{context.UnicastV4, context.UnicastV6, context.UnicastV6Only, context.MulticastV4, context.MulticastV6, context.MulticastV6Only, context.Broadcast} {\n- t.Run(flow.String(), func(t *testing.T) {\nc := context.New(t, []stack.TransportProtocolFactory{udp.NewProtocol})\ndefer c.Cleanup()\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/filter/config.go",
"new_path": "runsc/boot/filter/config.go",
"diff": "@@ -389,6 +389,11 @@ func hostInetFilters() seccomp.SyscallRules {\nseccomp.EqualTo(unix.SOL_IPV6),\nseccomp.EqualTo(unix.IPV6_RECVTCLASS),\n},\n+ {\n+ seccomp.MatchAny{},\n+ seccomp.EqualTo(unix.SOL_IPV6),\n+ seccomp.EqualTo(unix.IPV6_RECVPKTINFO),\n+ },\n{\nseccomp.MatchAny{},\nseccomp.EqualTo(unix.SOL_IPV6),\n@@ -548,6 +553,13 @@ func hostInetFilters() seccomp.SyscallRules {\nseccomp.MatchAny{},\nseccomp.EqualTo(4),\n},\n+ {\n+ seccomp.MatchAny{},\n+ seccomp.EqualTo(unix.SOL_IPV6),\n+ seccomp.EqualTo(unix.IPV6_RECVPKTINFO),\n+ seccomp.MatchAny{},\n+ seccomp.EqualTo(4),\n+ },\n{\nseccomp.MatchAny{},\nseccomp.EqualTo(unix.SOL_IP),\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/ip_socket_test_util.cc",
"new_path": "test/syscalls/linux/ip_socket_test_util.cc",
"diff": "namespace gvisor {\nnamespace testing {\n+using ::testing::IsNull;\nusing ::testing::NotNull;\nuint32_t IPFromInetSockaddr(const struct sockaddr* addr) {\n@@ -270,10 +271,11 @@ void RecvCmsg(int sock, int cmsg_level, int cmsg_type, char buf[],\nASSERT_EQ(msg.msg_controllen, CMSG_SPACE(sizeof(*out_cmsg_value)));\nstruct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);\n- ASSERT_NE(cmsg, nullptr);\n+ ASSERT_THAT(cmsg, NotNull());\nASSERT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(*out_cmsg_value)));\nASSERT_EQ(cmsg->cmsg_level, cmsg_level);\nASSERT_EQ(cmsg->cmsg_type, cmsg_type);\n+ ASSERT_THAT(CMSG_NXTHDR(&msg, cmsg), IsNull());\nstd::copy_n(CMSG_DATA(cmsg), sizeof(*out_cmsg_value),\nreinterpret_cast<uint8_t*>(out_cmsg_value));\n@@ -325,5 +327,15 @@ void SendTClass(int sock, char buf[], size_t buf_size, int tclass) {\nSendCmsg(sock, SOL_IPV6, IPV6_TCLASS, buf, buf_size, tclass);\n}\n+void RecvPktInfo(int sock, char buf[], size_t* buf_size,\n+ in_pktinfo* out_pktinfo) {\n+ RecvCmsg(sock, SOL_IP, IP_PKTINFO, buf, buf_size, out_pktinfo);\n+}\n+\n+void RecvIPv6PktInfo(int sock, char buf[], size_t* buf_size,\n+ in6_pktinfo* out_pktinfo) {\n+ RecvCmsg(sock, SOL_IPV6, IPV6_PKTINFO, buf, buf_size, out_pktinfo);\n+}\n+\n} // namespace testing\n} // namespace gvisor\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/ip_socket_test_util.h",
"new_path": "test/syscalls/linux/ip_socket_test_util.h",
"diff": "@@ -144,6 +144,20 @@ void RecvTClass(int sock, char buf[], size_t* buf_size, int* out_tclass);\n// message.\nvoid SendTClass(int sock, char buf[], size_t buf_size, int tclass);\n+// RecvPktInfo attempts to read buf_size bytes into buf, and then update\n+// buf_size with the numbers of bytes actually read. It expects the\n+// IP_PKTINFO cmsg to be received. The buffer must already be allocated with\n+// at least buf_size size.\n+void RecvPktInfo(int sock, char buf[], size_t* buf_size,\n+ in_pktinfo* out_pktinfo);\n+\n+// RecvIPv6PktInfo attempts to read buf_size bytes into buf, and then update\n+// buf_size with the numbers of bytes actually read. It expects the\n+// IPV6_PKTINFO cmsg to be received. The buffer must already be allocated with\n+// at least buf_size size.\n+void RecvIPv6PktInfo(int sock, char buf[], size_t* buf_size,\n+ in6_pktinfo* out_pktinfo);\n+\n} // namespace testing\n} // namespace gvisor\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/ping_socket.cc",
"new_path": "test/syscalls/linux/ping_socket.cc",
"diff": "@@ -112,9 +112,8 @@ TEST(PingSocket, ReceiveTOS) {\nSyscallSucceedsWithValue(sizeof(kSendIcmp)));\n// Register to receive TOS.\n- constexpr int kOne = 1;\n- ASSERT_THAT(\n- setsockopt(ping.get(), IPPROTO_IP, IP_RECVTOS, &kOne, sizeof(kOne)),\n+ ASSERT_THAT(setsockopt(ping.get(), IPPROTO_IP, IP_RECVTOS, &kSockOptOn,\n+ sizeof(kSockOptOn)),\nSyscallSucceeds());\nstruct {\n@@ -167,9 +166,8 @@ TEST(PingSocket, ReceiveTClass) {\nSyscallSucceedsWithValue(sizeof(kSendIcmp)));\n// Register to receive TCLASS.\n- constexpr int kOne = 1;\n- ASSERT_THAT(setsockopt(ping.get(), IPPROTO_IPV6, IPV6_RECVTCLASS, &kOne,\n- sizeof(kOne)),\n+ ASSERT_THAT(setsockopt(ping.get(), IPPROTO_IPV6, IPV6_RECVTCLASS, &kSockOptOn,\n+ sizeof(kSockOptOn)),\nSyscallSucceeds());\nstruct {\n@@ -191,6 +189,111 @@ TEST(PingSocket, ReceiveTClass) {\nEXPECT_EQ(received_tclass, kArbitraryTClass);\n}\n+TEST(PingSocket, ReceiveIPPacketInfo) {\n+ PosixErrorOr<FileDescriptor> result =\n+ Socket(AF_INET, SOCK_DGRAM, IPPROTO_ICMP);\n+ if (!result.ok()) {\n+ int errno_value = result.error().errno_value();\n+ ASSERT_EQ(errno_value, EACCES) << strerror(errno_value);\n+ GTEST_SKIP() << \"ping socket not supported\";\n+ }\n+ FileDescriptor& ping = result.ValueOrDie();\n+\n+ const sockaddr_in kAddr = {\n+ .sin_family = AF_INET,\n+ .sin_addr = {.s_addr = htonl(INADDR_LOOPBACK)},\n+ };\n+ ASSERT_THAT(bind(ping.get(), reinterpret_cast<const sockaddr*>(&kAddr),\n+ sizeof(kAddr)),\n+ SyscallSucceeds());\n+\n+ constexpr icmphdr kSendIcmp = {\n+ .type = ICMP_ECHO,\n+ };\n+ ASSERT_THAT(sendto(ping.get(), &kSendIcmp, sizeof(kSendIcmp), 0,\n+ reinterpret_cast<const sockaddr*>(&kAddr), sizeof(kAddr)),\n+ SyscallSucceedsWithValue(sizeof(kSendIcmp)));\n+\n+ // Register to receive PKTINFO.\n+ ASSERT_THAT(setsockopt(ping.get(), IPPROTO_IP, IP_PKTINFO, &kSockOptOn,\n+ sizeof(kSockOptOn)),\n+ SyscallSucceeds());\n+\n+ struct {\n+ icmphdr icmp;\n+\n+ // Add an extra byte to confirm we did not read unexpected bytes.\n+ char unused;\n+ } ABSL_ATTRIBUTE_PACKED recv_buf;\n+ size_t recv_buf_len = sizeof(recv_buf);\n+ in_pktinfo received_pktinfo;\n+ ASSERT_NO_FATAL_FAILURE(RecvPktInfo(ping.get(),\n+ reinterpret_cast<char*>(&recv_buf),\n+ &recv_buf_len, &received_pktinfo));\n+ ASSERT_EQ(recv_buf_len, sizeof(icmphdr));\n+\n+ EXPECT_EQ(recv_buf.icmp.type, ICMP_ECHOREPLY);\n+ EXPECT_EQ(recv_buf.icmp.code, 0);\n+\n+ EXPECT_EQ(received_pktinfo.ipi_ifindex,\n+ ASSERT_NO_ERRNO_AND_VALUE(GetLoopbackIndex()));\n+ EXPECT_EQ(ntohl(received_pktinfo.ipi_spec_dst.s_addr), INADDR_ANY);\n+ EXPECT_EQ(ntohl(received_pktinfo.ipi_addr.s_addr), INADDR_LOOPBACK);\n+}\n+\n+TEST(PingSocket, ReceiveIPv6PktInfo) {\n+ PosixErrorOr<FileDescriptor> result =\n+ Socket(AF_INET6, SOCK_DGRAM, IPPROTO_ICMPV6);\n+ if (!result.ok()) {\n+ int errno_value = result.error().errno_value();\n+ ASSERT_EQ(errno_value, EACCES) << strerror(errno_value);\n+ GTEST_SKIP() << \"ping socket not supported\";\n+ }\n+ FileDescriptor& ping = result.ValueOrDie();\n+\n+ const sockaddr_in6 kAddr = {\n+ .sin6_family = AF_INET6,\n+ .sin6_addr = in6addr_loopback,\n+ };\n+ ASSERT_THAT(bind(ping.get(), reinterpret_cast<const sockaddr*>(&kAddr),\n+ sizeof(kAddr)),\n+ SyscallSucceeds());\n+\n+ constexpr icmp6_hdr kSendIcmp = {\n+ .icmp6_type = ICMP6_ECHO_REQUEST,\n+ };\n+ ASSERT_THAT(sendto(ping.get(), &kSendIcmp, sizeof(kSendIcmp), 0,\n+ reinterpret_cast<const sockaddr*>(&kAddr), sizeof(kAddr)),\n+ SyscallSucceedsWithValue(sizeof(kSendIcmp)));\n+\n+ // Register to receive PKTINFO.\n+ ASSERT_THAT(setsockopt(ping.get(), IPPROTO_IPV6, IPV6_RECVPKTINFO,\n+ &kSockOptOn, sizeof(kSockOptOn)),\n+ SyscallSucceeds());\n+\n+ struct {\n+ icmp6_hdr icmpv6;\n+\n+ // Add an extra byte to confirm we did not read unexpected bytes.\n+ char unused;\n+ } ABSL_ATTRIBUTE_PACKED recv_buf;\n+ size_t recv_buf_len = sizeof(recv_buf);\n+ in6_pktinfo received_pktinfo;\n+ ASSERT_NO_FATAL_FAILURE(RecvIPv6PktInfo(ping.get(),\n+ reinterpret_cast<char*>(&recv_buf),\n+ &recv_buf_len, &received_pktinfo));\n+ ASSERT_EQ(recv_buf_len, sizeof(kSendIcmp));\n+\n+ EXPECT_EQ(recv_buf.icmpv6.icmp6_type, ICMP6_ECHO_REPLY);\n+ EXPECT_EQ(recv_buf.icmpv6.icmp6_code, 0);\n+\n+ EXPECT_EQ(received_pktinfo.ipi6_ifindex,\n+ ASSERT_NO_ERRNO_AND_VALUE(GetLoopbackIndex()));\n+ ASSERT_EQ(memcmp(&received_pktinfo.ipi6_addr, &in6addr_loopback,\n+ sizeof(in6addr_loopback)),\n+ 0);\n+}\n+\nstruct BindTestCase {\nTestAddress bind_to;\nint want = 0;\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/raw_socket.cc",
"new_path": "test/syscalls/linux/raw_socket.cc",
"diff": "@@ -1091,12 +1091,12 @@ TEST(RawSocketTest, ReceiveIPPacketInfo) {\nSyscallSucceeds());\n// Register to receive IP packet info.\n- constexpr int one = 1;\n- ASSERT_THAT(setsockopt(raw.get(), IPPROTO_IP, IP_PKTINFO, &one, sizeof(one)),\n+ ASSERT_THAT(setsockopt(raw.get(), IPPROTO_IP, IP_PKTINFO, &kSockOptOn,\n+ sizeof(kSockOptOn)),\nSyscallSucceeds());\nconstexpr char send_buf[] = \"malformed UDP\";\n- ASSERT_THAT(sendto(raw.get(), send_buf, sizeof(send_buf), 0 /* flags */,\n+ ASSERT_THAT(sendto(raw.get(), send_buf, sizeof(send_buf), /*flags=*/0,\nreinterpret_cast<const sockaddr*>(&addr_), sizeof(addr_)),\nSyscallSucceedsWithValue(sizeof(send_buf)));\n@@ -1107,20 +1107,14 @@ TEST(RawSocketTest, ReceiveIPPacketInfo) {\n// Extra space in the receive buffer should be unused.\nchar unused_space;\n} ABSL_ATTRIBUTE_PACKED recv_buf;\n- iovec recv_iov = {\n- .iov_base = &recv_buf,\n- .iov_len = sizeof(recv_buf),\n- };\n+\n+ size_t recv_buf_len = sizeof(recv_buf);\nin_pktinfo received_pktinfo;\n- char recv_cmsg_buf[CMSG_SPACE(sizeof(received_pktinfo))];\n- msghdr recv_msg = {\n- .msg_iov = &recv_iov,\n- .msg_iovlen = 1,\n- .msg_control = recv_cmsg_buf,\n- .msg_controllen = CMSG_LEN(sizeof(received_pktinfo)),\n- };\n- ASSERT_THAT(RetryEINTR(recvmsg)(raw.get(), &recv_msg, 0),\n- SyscallSucceedsWithValue(sizeof(iphdr) + sizeof(send_buf)));\n+ ASSERT_NO_FATAL_FAILURE(RecvPktInfo(raw.get(),\n+ reinterpret_cast<char*>(&recv_buf),\n+ &recv_buf_len, &received_pktinfo));\n+\n+ EXPECT_EQ(recv_buf_len, sizeof(iphdr) + sizeof(send_buf));\nEXPECT_EQ(memcmp(send_buf, &recv_buf.data, sizeof(send_buf)), 0);\nEXPECT_EQ(recv_buf.ip.version, static_cast<unsigned int>(IPVERSION));\n// IHL holds the number of header bytes in 4 byte units.\n@@ -1130,18 +1124,10 @@ TEST(RawSocketTest, ReceiveIPPacketInfo) {\nEXPECT_EQ(ntohl(recv_buf.ip.saddr), INADDR_LOOPBACK);\nEXPECT_EQ(ntohl(recv_buf.ip.daddr), INADDR_LOOPBACK);\n- cmsghdr* cmsg = CMSG_FIRSTHDR(&recv_msg);\n- ASSERT_THAT(cmsg, NotNull());\n- EXPECT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(received_pktinfo)));\n- EXPECT_EQ(cmsg->cmsg_level, IPPROTO_IP);\n- EXPECT_EQ(cmsg->cmsg_type, IP_PKTINFO);\n- memcpy(&received_pktinfo, CMSG_DATA(cmsg), sizeof(received_pktinfo));\nEXPECT_EQ(received_pktinfo.ipi_ifindex,\nASSERT_NO_ERRNO_AND_VALUE(GetLoopbackIndex()));\nEXPECT_EQ(ntohl(received_pktinfo.ipi_spec_dst.s_addr), INADDR_LOOPBACK);\nEXPECT_EQ(ntohl(received_pktinfo.ipi_addr.s_addr), INADDR_LOOPBACK);\n-\n- EXPECT_THAT(CMSG_NXTHDR(&recv_msg, cmsg), IsNull());\n}\nTEST(RawSocketTest, ReceiveIPv6PacketInfo) {\n@@ -1159,46 +1145,28 @@ TEST(RawSocketTest, ReceiveIPv6PacketInfo) {\nSyscallSucceeds());\n// Register to receive IPv6 packet info.\n- constexpr int one = 1;\n- ASSERT_THAT(\n- setsockopt(raw.get(), IPPROTO_IPV6, IPV6_RECVPKTINFO, &one, sizeof(one)),\n+ ASSERT_THAT(setsockopt(raw.get(), IPPROTO_IPV6, IPV6_RECVPKTINFO, &kSockOptOn,\n+ sizeof(kSockOptOn)),\nSyscallSucceeds());\nconstexpr char send_buf[] = \"malformed UDP\";\n- ASSERT_THAT(sendto(raw.get(), send_buf, sizeof(send_buf), 0 /* flags */,\n+ ASSERT_THAT(sendto(raw.get(), send_buf, sizeof(send_buf), /*flags=*/0,\nreinterpret_cast<const sockaddr*>(&addr_), sizeof(addr_)),\nSyscallSucceedsWithValue(sizeof(send_buf)));\nchar recv_buf[sizeof(send_buf) + 1];\n- iovec recv_iov = {\n- .iov_base = recv_buf,\n- .iov_len = sizeof(recv_buf),\n- };\n+ size_t recv_buf_len = sizeof(recv_buf);\nin6_pktinfo received_pktinfo;\n- char recv_cmsg_buf[CMSG_SPACE(sizeof(received_pktinfo))];\n- msghdr recv_msg = {\n- .msg_iov = &recv_iov,\n- .msg_iovlen = 1,\n- .msg_control = recv_cmsg_buf,\n- .msg_controllen = CMSG_LEN(sizeof(received_pktinfo)),\n- };\n- ASSERT_THAT(RetryEINTR(recvmsg)(raw.get(), &recv_msg, 0),\n- SyscallSucceedsWithValue(sizeof(send_buf)));\n+ ASSERT_NO_FATAL_FAILURE(RecvIPv6PktInfo(raw.get(),\n+ reinterpret_cast<char*>(&recv_buf),\n+ &recv_buf_len, &received_pktinfo));\n+ EXPECT_EQ(recv_buf_len, sizeof(send_buf));\nEXPECT_EQ(memcmp(send_buf, recv_buf, sizeof(send_buf)), 0);\n-\n- cmsghdr* cmsg = CMSG_FIRSTHDR(&recv_msg);\n- ASSERT_THAT(cmsg, NotNull());\n- EXPECT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(received_pktinfo)));\n- EXPECT_EQ(cmsg->cmsg_level, IPPROTO_IPV6);\n- EXPECT_EQ(cmsg->cmsg_type, IPV6_PKTINFO);\n- memcpy(&received_pktinfo, CMSG_DATA(cmsg), sizeof(received_pktinfo));\nEXPECT_EQ(received_pktinfo.ipi6_ifindex,\nASSERT_NO_ERRNO_AND_VALUE(GetLoopbackIndex()));\nASSERT_EQ(memcmp(&received_pktinfo.ipi6_addr, &in6addr_loopback,\nsizeof(in6addr_loopback)),\n0);\n-\n- EXPECT_THAT(CMSG_NXTHDR(&recv_msg, cmsg), IsNull());\n}\nTEST(RawSocketTest, ReceiveTOS) {\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/udp_socket.cc",
"new_path": "test/syscalls/linux/udp_socket.cc",
"diff": "@@ -2142,6 +2142,126 @@ TEST_P(UdpSocketControlMessagesTest, SendAndReceiveTOSorTClass) {\nEXPECT_EQ(recv_data_len, sizeof(sent_data));\n}\n+TEST_P(UdpSocketControlMessagesTest, SetAndReceivePktInfo) {\n+ // Enable receiving IP_PKTINFO and maybe IPV6_PKTINFO on the receiver.\n+ ASSERT_THAT(setsockopt(server_.get(), SOL_IP, IP_PKTINFO, &kSockOptOn,\n+ sizeof(kSockOptOn)),\n+ SyscallSucceeds());\n+ if (ServerAddressFamily() == AF_INET6) {\n+ ASSERT_THAT(setsockopt(server_.get(), SOL_IPV6, IPV6_RECVPKTINFO,\n+ &kSockOptOn, sizeof(kSockOptOn)),\n+ SyscallSucceeds());\n+ }\n+\n+ constexpr size_t kArbitrarySendSize = 1042;\n+ constexpr char sent_data[kArbitrarySendSize] = {};\n+ ASSERT_THAT(RetryEINTR(send)(client_.get(), sent_data, sizeof(sent_data), 0),\n+ SyscallSucceedsWithValue(sizeof(sent_data)));\n+\n+ char recv_data[sizeof(sent_data) + 1];\n+ size_t recv_data_len = sizeof(recv_data);\n+ switch (GetParam()) {\n+ case AddressFamily::kIpv4: {\n+ in_pktinfo received_pktinfo;\n+ ASSERT_NO_FATAL_FAILURE(RecvPktInfo(server_.get(), recv_data,\n+ &recv_data_len, &received_pktinfo));\n+ EXPECT_EQ(recv_data_len, sizeof(sent_data));\n+ EXPECT_EQ(received_pktinfo.ipi_ifindex,\n+ ASSERT_NO_ERRNO_AND_VALUE(GetLoopbackIndex()));\n+ EXPECT_EQ(ntohl(received_pktinfo.ipi_spec_dst.s_addr), INADDR_LOOPBACK);\n+ EXPECT_EQ(ntohl(received_pktinfo.ipi_addr.s_addr), INADDR_LOOPBACK);\n+ break;\n+ }\n+\n+ case AddressFamily::kIpv6: {\n+ in6_pktinfo received_pktinfo;\n+ ASSERT_NO_FATAL_FAILURE(RecvIPv6PktInfo(\n+ server_.get(), recv_data, &recv_data_len, &received_pktinfo));\n+ EXPECT_EQ(recv_data_len, sizeof(sent_data));\n+ EXPECT_EQ(received_pktinfo.ipi6_ifindex,\n+ ASSERT_NO_ERRNO_AND_VALUE(GetLoopbackIndex()));\n+ ASSERT_EQ(memcmp(&received_pktinfo.ipi6_addr, &in6addr_loopback,\n+ sizeof(in6addr_loopback)),\n+ 0);\n+ break;\n+ }\n+\n+ case AddressFamily::kDualStack: {\n+ // TODO(https://gvisor.dev/issue/7144): On dual stack sockets, Linux can\n+ // receive both the IPv4 and IPv6 packet info. gVisor should do the same.\n+ iovec iov = {\n+ iov.iov_base = recv_data,\n+ iov.iov_len = recv_data_len,\n+ };\n+ // Add an extra byte to confirm we only read what we expected.\n+ char control[CMSG_SPACE(sizeof(in_pktinfo)) +\n+ CMSG_SPACE(sizeof(in6_pktinfo)) + 1];\n+ msghdr msg = {\n+ .msg_iov = &iov,\n+ .msg_iovlen = 1,\n+ .msg_control = control,\n+ .msg_controllen = sizeof(control),\n+ };\n+\n+ ASSERT_THAT(\n+ recv_data_len = RetryEINTR(recvmsg)(server_.get(), &msg, /*flags=*/0),\n+ SyscallSucceeds());\n+ EXPECT_EQ(recv_data_len, sizeof(sent_data));\n+ size_t expected_controllen = CMSG_SPACE(sizeof(in_pktinfo));\n+ if (!IsRunningOnGvisor() || IsRunningWithHostinet()) {\n+ expected_controllen += CMSG_SPACE(sizeof(in6_pktinfo));\n+ }\n+ EXPECT_EQ(msg.msg_controllen, expected_controllen);\n+\n+ std::pair<in_pktinfo, bool> received_pktinfo;\n+ std::pair<in6_pktinfo, bool> received_pktinfo6;\n+\n+ struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);\n+ while (cmsg != nullptr) {\n+ ASSERT_TRUE(cmsg->cmsg_level == SOL_IP || cmsg->cmsg_level == SOL_IPV6);\n+ if (cmsg->cmsg_level == SOL_IP) {\n+ ASSERT_FALSE(received_pktinfo.second);\n+ ASSERT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(in_pktinfo)));\n+ ASSERT_EQ(cmsg->cmsg_type, IP_PKTINFO);\n+ received_pktinfo.second = true;\n+ std::copy_n(CMSG_DATA(cmsg), sizeof(received_pktinfo.first),\n+ reinterpret_cast<uint8_t*>(&received_pktinfo.first));\n+ } else { // SOL_IPV6\n+ ASSERT_FALSE(received_pktinfo6.second);\n+ ASSERT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(in6_pktinfo)));\n+ ASSERT_EQ(cmsg->cmsg_type, IPV6_PKTINFO);\n+ received_pktinfo6.second = true;\n+ std::copy_n(CMSG_DATA(cmsg), sizeof(received_pktinfo6.first),\n+ reinterpret_cast<uint8_t*>(&received_pktinfo6.first));\n+ }\n+ cmsg = CMSG_NXTHDR(&msg, cmsg);\n+ }\n+\n+ ASSERT_TRUE(received_pktinfo.second);\n+ EXPECT_EQ(received_pktinfo.first.ipi_ifindex,\n+ ASSERT_NO_ERRNO_AND_VALUE(GetLoopbackIndex()));\n+ EXPECT_EQ(ntohl(received_pktinfo.first.ipi_spec_dst.s_addr),\n+ INADDR_LOOPBACK);\n+ EXPECT_EQ(ntohl(received_pktinfo.first.ipi_addr.s_addr), INADDR_LOOPBACK);\n+\n+ if (!IsRunningOnGvisor() || IsRunningWithHostinet()) {\n+ ASSERT_TRUE(received_pktinfo6.second);\n+ EXPECT_EQ(received_pktinfo6.first.ipi6_ifindex,\n+ ASSERT_NO_ERRNO_AND_VALUE(GetLoopbackIndex()));\n+ struct in6_addr expected;\n+ inet_pton(AF_INET6, \"::ffff:127.0.0.1\", &expected);\n+ EXPECT_EQ(memcmp(&received_pktinfo6.first.ipi6_addr, &expected,\n+ sizeof(expected)),\n+ 0);\n+ } else {\n+ ASSERT_FALSE(received_pktinfo6.second);\n+ }\n+\n+ break;\n+ }\n+ }\n+}\n+\nINSTANTIATE_TEST_SUITE_P(AllInetTests, UdpSocketControlMessagesTest,\n::testing::Values(AddressFamily::kIpv4,\nAddressFamily::kIpv6,\n"
}
] | Go | Apache License 2.0 | google/gvisor | Support receiving PKTINFO on icmp endpoints
PiperOrigin-RevId: 428599075 |
259,962 | 15.02.2022 18:26:42 | 28,800 | 0917380792ac17eeb27784104bc46704ce7bc126 | Clear pending packet queue when nic is removed.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/nic.go",
"new_path": "pkg/tcpip/stack/nic.go",
"diff": "@@ -316,9 +316,13 @@ func (n *nic) remove() tcpip.Error {\nep.Close()\n}\n+ // drain and drop any packets pending link resolution.\n+ n.linkResQueue.cancel()\n+\n// Prevent packets from going down to the link before shutting the link down.\nn.qDisc.Close()\nn.NetworkLinkEndpoint.Attach(nil)\n+\nreturn nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/pending_packets.go",
"new_path": "pkg/tcpip/stack/pending_packets.go",
"diff": "@@ -70,6 +70,20 @@ func (f *packetsPendingLinkResolution) init(nic *nic) {\nf.mu.packets = make(map[<-chan struct{}][]pendingPacket)\n}\n+// cancel drains all pending packet queues and release all packet\n+// references.\n+func (f *packetsPendingLinkResolution) cancel() {\n+ f.mu.Lock()\n+ defer f.mu.Unlock()\n+ for ch, pendingPackets := range f.mu.packets {\n+ for _, p := range pendingPackets {\n+ p.pkt.DecRef()\n+ }\n+ delete(f.mu.packets, ch)\n+ }\n+ f.mu.cancelChans = nil\n+}\n+\n// dequeue any pending packets associated with ch.\n//\n// If err is nil, packets will be written and sent to the given remote link\n"
}
] | Go | Apache License 2.0 | google/gvisor | Clear pending packet queue when nic is removed.
Updates #6910
PiperOrigin-RevId: 428925334 |
259,962 | 15.02.2022 20:50:46 | 28,800 | ae089adbb223bc1600321ac0563c6fd4c26db97c | Add Leak checking to ipv4 tests. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/internal/fragmentation/fragmentation.go",
"new_path": "pkg/tcpip/network/internal/fragmentation/fragmentation.go",
"diff": "@@ -19,9 +19,9 @@ package fragmentation\nimport (\n\"errors\"\n\"fmt\"\n- \"log\"\n\"time\"\n+ \"gvisor.dev/gvisor/pkg/log\"\n\"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n@@ -219,6 +219,16 @@ func (f *Fragmentation) Process(\nreturn resPkt, firstFragmentProto, done, nil\n}\n+// Release releases all underlying resources.\n+func (f *Fragmentation) Release() {\n+ f.mu.Lock()\n+ defer f.mu.Unlock()\n+ for _, r := range f.reassemblers {\n+ f.release(r, false /* timedOut */)\n+ }\n+ f.reassemblers = nil\n+}\n+\nfunc (f *Fragmentation) release(r *reassembler, timedOut bool) {\n// Before releasing a fragment we need to check if r is already marked as done.\n// Otherwise, we would delete it twice.\n@@ -230,7 +240,7 @@ func (f *Fragmentation) release(r *reassembler, timedOut bool) {\nf.rList.Remove(r)\nf.memSize -= r.memSize\nif f.memSize < 0 {\n- log.Printf(\"memory counter < 0 (%d), this is an accounting bug that requires investigation\", f.memSize)\n+ log.Warningf(\"memory counter < 0 (%d), this is an accounting bug that requires investigation\", f.memSize)\nf.memSize = 0\n}\n@@ -239,12 +249,15 @@ func (f *Fragmentation) release(r *reassembler, timedOut bool) {\n}\nif r.pkt != nil {\nr.pkt.DecRef()\n+ r.pkt = nil\n}\nfor _, h := range r.holes {\nif h.pkt != nil {\nh.pkt.DecRef()\n+ h.pkt = nil\n}\n}\n+ r.holes = nil\n}\n// releaseReassemblersLocked releases already-expired reassemblers, then\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/internal/fragmentation/reassembler.go",
"new_path": "pkg/tcpip/network/internal/fragmentation/reassembler.go",
"diff": "@@ -174,7 +174,7 @@ func (r *reassembler) process(first, last uint16, more bool, proto uint8, pkt *s\nfor i := 1; i < len(r.holes); i++ {\nstack.MergeFragment(resPkt, r.holes[i].pkt)\n}\n- return resPkt, r.proto, true, memConsumed, nil\n+ return resPkt, r.proto, true /* done */, memConsumed, nil\n}\nfunc (r *reassembler) checkDoneOrMark() bool {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/internal/testutil/testutil.go",
"new_path": "pkg/tcpip/network/internal/testutil/testutil.go",
"diff": "@@ -69,6 +69,7 @@ func (ep *MockLinkEndpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpi\nreturn n, ep.err\n}\nep.allowPackets--\n+ pkt.IncRef()\nep.WrittenPackets = append(ep.WrittenPackets, pkt)\nn++\n}\n@@ -90,6 +91,14 @@ func (*MockLinkEndpoint) ARPHardwareType() header.ARPHardwareType { return heade\n// AddHeader implements LinkEndpoint.AddHeader.\nfunc (*MockLinkEndpoint) AddHeader(*stack.PacketBuffer) {}\n+// Close releases all resources.\n+func (ep *MockLinkEndpoint) Close() {\n+ for _, pkt := range ep.WrittenPackets {\n+ pkt.DecRef()\n+ }\n+ ep.WrittenPackets = nil\n+}\n+\n// MakeRandPkt generates a randomized packet. transportHeaderLength indicates\n// how many random bytes will be copied in the Transport Header.\n// extraHeaderReserveLength indicates how much extra space will be reserved for\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/BUILD",
"new_path": "pkg/tcpip/network/ipv4/BUILD",
"diff": "@@ -30,8 +30,11 @@ go_test(\nsrcs = [\n\"igmp_test.go\",\n\"ipv4_test.go\",\n+ \"main_test.go\",\n],\ndeps = [\n+ \"//pkg/refs\",\n+ \"//pkg/refsvfs2\",\n\"//pkg/sync\",\n\"//pkg/tcpip\",\n\"//pkg/tcpip/buffer\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/igmp_test.go",
"new_path": "pkg/tcpip/network/ipv4/igmp_test.go",
"diff": "@@ -63,7 +63,18 @@ func validateIgmpPacket(t *testing.T, pkt *stack.PacketBuffer, igmpType header.I\n)\n}\n-func createStack(t *testing.T, igmpEnabled bool) (*channel.Endpoint, *stack.Stack, *faketime.ManualClock) {\n+type igmpTestContext struct {\n+ s *stack.Stack\n+ ep *channel.Endpoint\n+ clock *faketime.ManualClock\n+}\n+\n+func (ctx igmpTestContext) cleanup() {\n+ ctx.s.Close()\n+ ctx.s.Wait()\n+}\n+\n+func newIGMPTestContext(t *testing.T, igmpEnabled bool) igmpTestContext {\nt.Helper()\n// Create an endpoint of queue size 1, since no more than 1 packets are ever\n@@ -81,7 +92,12 @@ func createStack(t *testing.T, igmpEnabled bool) (*channel.Endpoint, *stack.Stac\nif err := s.CreateNIC(nicID, e); err != nil {\nt.Fatalf(\"CreateNIC(%d, _) = %s\", nicID, err)\n}\n- return e, s, clock\n+\n+ return igmpTestContext{\n+ ep: e,\n+ s: s,\n+ clock: clock,\n+ }\n}\nfunc createAndInjectIGMPPacket(e *channel.Endpoint, igmpType header.IGMPType, maxRespTime byte, ttl uint8, srcAddr, dstAddr, groupAddress tcpip.Address, hasRouterAlertOption bool) {\n@@ -109,17 +125,22 @@ func createAndInjectIGMPPacket(e *channel.Endpoint, igmpType header.IGMPType, ma\nigmp.SetMaxRespTime(maxRespTime)\nigmp.SetGroupAddress(groupAddress)\nigmp.SetChecksum(header.IGMPCalculateChecksum(igmp))\n-\n- e.InjectInbound(ipv4.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: buf.ToVectorisedView(),\n- }))\n+ })\n+ e.InjectInbound(ipv4.ProtocolNumber, pkt)\n+ pkt.DecRef()\n}\n// TestIGMPV1Present tests the node's ability to fallback to V1 when a V1\n// router is detected. V1 present status is expected to be reset when the NIC\n// cycles.\nfunc TestIGMPV1Present(t *testing.T) {\n- e, s, clock := createStack(t, true)\n+ ctx := newIGMPTestContext(t, true /* igmpEnabled */)\n+ defer ctx.cleanup()\n+ s := ctx.s\n+ e := ctx.ep\n+\nprotocolAddr := tcpip.ProtocolAddress{\nProtocol: ipv4.ProtocolNumber,\nAddressWithPrefix: tcpip.AddressWithPrefix{Address: stackAddr, PrefixLen: defaultPrefixLength},\n@@ -168,7 +189,7 @@ func TestIGMPV1Present(t *testing.T) {\nif p := e.Read(); p != nil {\nt.Fatalf(\"sent unexpected packet, expected V1MembershipReport only after advancing the clock = %+v\", p)\n}\n- clock.Advance(ipv4.UnsolicitedReportIntervalMax)\n+ ctx.clock.Advance(ipv4.UnsolicitedReportIntervalMax)\n{\np := e.Read()\nif p == nil {\n@@ -200,7 +221,11 @@ func TestIGMPV1Present(t *testing.T) {\n}\nfunc TestSendQueuedIGMPReports(t *testing.T) {\n- e, s, clock := createStack(t, true)\n+ ctx := newIGMPTestContext(t, true /* igmpEnabled */)\n+ defer ctx.cleanup()\n+ s := ctx.s\n+ e := ctx.ep\n+ clock := ctx.clock\n// Joining a group without an assigned address should queue IGMP packets; none\n// should be sent without an assigned address.\n@@ -358,7 +383,11 @@ func TestIGMPPacketValidation(t *testing.T) {\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\n- e, s, _ := createStack(t, true)\n+ ctx := newIGMPTestContext(t, true /* igmpEnabled */)\n+ defer ctx.cleanup()\n+ s := ctx.s\n+ e := ctx.ep\n+\nfor _, address := range test.stackAddresses {\nprotocolAddr := tcpip.ProtocolAddress{\nProtocol: ipv4.ProtocolNumber,\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/ipv4.go",
"new_path": "pkg/tcpip/network/ipv4/ipv4.go",
"diff": "@@ -417,13 +417,13 @@ func (e *endpoint) handleFragments(_ *stack.Route, networkMTU uint32, pkt *stack\nvar n int\nfor {\nfragPkt, more := buildNextFragment(&pf, networkHeader)\n- if err := handler(fragPkt); err != nil {\n+ err := handler(fragPkt)\nfragPkt.DecRef()\n+ if err != nil {\nreturn n, pf.RemainingFragmentCount() + 1, err\n}\nn++\nif !more {\n- fragPkt.DecRef()\nreturn n, pf.RemainingFragmentCount(), nil\n}\n}\n@@ -1239,7 +1239,9 @@ func (p *protocol) DefaultTTL() uint8 {\n}\n// Close implements stack.TransportProtocol.\n-func (*protocol) Close() {}\n+func (p *protocol) Close() {\n+ p.fragmentation.Release()\n+}\n// Wait implements stack.TransportProtocol.\nfunc (*protocol) Wait() {}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/ipv4_test.go",
"new_path": "pkg/tcpip/network/ipv4/ipv4_test.go",
"diff": "@@ -50,11 +50,31 @@ const (\ndefaultMTU = 65536\n)\n-func TestExcludeBroadcast(t *testing.T) {\n+type testContext struct {\n+ s *stack.Stack\n+ clock *faketime.ManualClock\n+}\n+\n+func newTestContext() testContext {\n+ clock := faketime.NewManualClock()\ns := stack.New(stack.Options{\n- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},\n+ NetworkProtocols: []stack.NetworkProtocolFactory{arp.NewProtocol, ipv4.NewProtocol},\nTransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},\n+ Clock: clock,\n+ RawFactory: raw.EndpointFactory{},\n})\n+ return testContext{s: s, clock: clock}\n+}\n+\n+func (ctx testContext) cleanup() {\n+ ctx.s.Close()\n+ ctx.s.Wait()\n+}\n+\n+func TestExcludeBroadcast(t *testing.T) {\n+ ctx := newTestContext()\n+ defer ctx.cleanup()\n+ s := ctx.s\nep := stack.LinkEndpoint(channel.New(256, defaultMTU, \"\"))\nif testing.Verbose() {\n@@ -342,13 +362,10 @@ func TestForwarding(t *testing.T) {\n}\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\n- clock := faketime.NewManualClock()\n-\n- s := stack.New(stack.Options{\n- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},\n- TransportProtocols: []stack.TransportProtocolFactory{icmp.NewProtocol4},\n- Clock: clock,\n- })\n+ ctx := newTestContext()\n+ defer ctx.cleanup()\n+ s := ctx.s\n+ clock := ctx.clock\n// Advance the clock by some unimportant amount to make\n// it give a more recognisable signature than 00,00,00,00.\n@@ -429,9 +446,9 @@ func TestForwarding(t *testing.T) {\nrequestPkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: hdr.View().ToVectorisedView(),\n})\n+ defer requestPkt.DecRef()\nrequestPkt.NetworkProtocolNumber = header.IPv4ProtocolNumber\nincomingEndpoint.InjectInbound(header.IPv4ProtocolNumber, requestPkt)\n-\nreply := incomingEndpoint.Read()\nif test.expectErrorICMP {\n@@ -1172,12 +1189,11 @@ func TestIPv4Sanity(t *testing.T) {\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\n- clock := faketime.NewManualClock()\n- s := stack.New(stack.Options{\n- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},\n- TransportProtocols: []stack.TransportProtocolFactory{icmp.NewProtocol4},\n- Clock: clock,\n- })\n+ ctx := newTestContext()\n+ defer ctx.cleanup()\n+ s := ctx.s\n+ clock := ctx.clock\n+\n// Advance the clock by some unimportant amount to make\n// it give a more recognisable signature than 00,00,00,00.\nclock.Advance(time.Millisecond * randomTimeOffset)\n@@ -1252,6 +1268,7 @@ func TestIPv4Sanity(t *testing.T) {\nrequestPkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: hdr.View().ToVectorisedView(),\n})\n+ defer requestPkt.DecRef()\ne.InjectInbound(header.IPv4ProtocolNumber, requestPkt)\nreply := e.Read()\nif reply == nil {\n@@ -1544,10 +1561,16 @@ func TestFragmentationWritePacket(t *testing.T) {\nfor _, ft := range fragmentationTests {\nt.Run(ft.description, func(t *testing.T) {\n+ ctx := newTestContext()\n+ defer ctx.cleanup()\n+\nep := iptestutil.NewMockLinkEndpoint(ft.mtu, nil, math.MaxInt32)\n- r := buildRoute(t, ep)\n+ defer ep.Close()\n+ r := buildRoute(t, ctx, ep)\npkt := iptestutil.MakeRandPkt(ft.transportHeaderLength, extraHeaderReserve+header.IPv4MinimumSize, []int{ft.payloadSize}, header.IPv4ProtocolNumber)\n+ defer pkt.DecRef()\nsource := pkt.Clone()\n+ defer source.DecRef()\nerr := r.WritePacket(stack.NetworkHeaderParams{\nProtocol: tcp.ProtocolNumber,\nTTL: ttl,\n@@ -1641,9 +1664,14 @@ func TestFragmentationErrors(t *testing.T) {\nfor _, ft := range tests {\nt.Run(ft.description, func(t *testing.T) {\n- pkt := iptestutil.MakeRandPkt(ft.transportHeaderLength, extraHeaderReserve+header.IPv4MinimumSize, []int{ft.payloadSize}, header.IPv4ProtocolNumber)\n+ ctx := newTestContext()\n+ defer ctx.cleanup()\n+\nep := iptestutil.NewMockLinkEndpoint(ft.mtu, ft.mockError, ft.allowPackets)\n- r := buildRoute(t, ep)\n+ defer ep.Close()\n+ r := buildRoute(t, ctx, ep)\n+ pkt := iptestutil.MakeRandPkt(ft.transportHeaderLength, extraHeaderReserve+header.IPv4MinimumSize, []int{ft.payloadSize}, header.IPv4ProtocolNumber)\n+ defer pkt.DecRef()\nerr := r.WritePacket(stack.NetworkHeaderParams{\nProtocol: tcp.ProtocolNumber,\nTTL: ttl,\n@@ -1924,11 +1952,10 @@ func TestInvalidFragments(t *testing.T) {\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\n- s := stack.New(stack.Options{\n- NetworkProtocols: []stack.NetworkProtocolFactory{\n- ipv4.NewProtocol,\n- },\n- })\n+ ctx := newTestContext()\n+ defer ctx.cleanup()\n+ s := ctx.s\n+\ne := channel.New(0, 1500, linkAddr)\nif err := s.CreateNIC(nicID, e); err != nil {\nt.Fatalf(\"CreateNIC(%d, _) = %s\", nicID, err)\n@@ -1967,9 +1994,11 @@ func TestInvalidFragments(t *testing.T) {\n}\nvv := hdr.View().ToVectorisedView()\n- e.InjectInbound(header.IPv4ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: vv,\n- }))\n+ })\n+ e.InjectInbound(header.IPv4ProtocolNumber, pkt)\n+ pkt.DecRef()\n}\nif got, want := s.Stats().IP.MalformedPacketsReceived.Value(), test.wantMalformedIPPackets; got != want {\n@@ -2151,13 +2180,11 @@ func TestFragmentReassemblyTimeout(t *testing.T) {\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\n- clock := faketime.NewManualClock()\n- s := stack.New(stack.Options{\n- NetworkProtocols: []stack.NetworkProtocolFactory{\n- ipv4.NewProtocol,\n- },\n- Clock: clock,\n- })\n+ ctx := newTestContext()\n+ defer ctx.cleanup()\n+ s := ctx.s\n+ clock := ctx.clock\n+\ne := channel.New(1, 1500, linkAddr)\nif err := s.CreateNIC(nicID, e); err != nil {\nt.Fatalf(\"CreateNIC(%d, _) = %s\", nicID, err)\n@@ -2197,6 +2224,7 @@ func TestFragmentReassemblyTimeout(t *testing.T) {\n}\ne.InjectInbound(header.IPv4ProtocolNumber, pkt)\n+ pkt.DecRef()\n}\nclock.Advance(ipv4.ReassembleTimeout)\n@@ -2622,12 +2650,10 @@ func TestReceiveFragments(t *testing.T) {\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\n- // Setup a stack and endpoint.\n- s := stack.New(stack.Options{\n- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},\n- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},\n- RawFactory: raw.EndpointFactory{},\n- })\n+ ctx := newTestContext()\n+ defer ctx.cleanup()\n+ s := ctx.s\n+\ne := channel.New(0, 1280, \"\\xf0\\x00\")\nif err := s.CreateNIC(nicID, e); err != nil {\nt.Fatalf(\"CreateNIC(%d, _) = %s\", nicID, err)\n@@ -2683,10 +2709,11 @@ func TestReceiveFragments(t *testing.T) {\nvv := hdr.View().ToVectorisedView()\nvv.AppendView(frag.payload)\n-\n- e.InjectInbound(header.IPv4ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: vv,\n- }))\n+ })\n+ e.InjectInbound(header.IPv4ProtocolNumber, pkt)\n+ pkt.DecRef()\n}\nif got, want := s.Stats().UDP.PacketsReceived.Value(), uint64(len(test.expectedPayloads)); got != want {\n@@ -2840,8 +2867,12 @@ func TestWriteStats(t *testing.T) {\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\n+ ctx := newTestContext()\n+ defer ctx.cleanup()\n+\nep := iptestutil.NewMockLinkEndpoint(header.IPv4MinimumMTU, &tcpip.ErrInvalidEndpointState{}, test.allowPackets)\n- rt := buildRoute(t, ep)\n+ defer ep.Close()\n+ rt := buildRoute(t, ctx, ep)\ntest.setup(t, rt.Stack())\nnWritten := 0\n@@ -2850,6 +2881,7 @@ func TestWriteStats(t *testing.T) {\nReserveHeaderBytes: header.UDPMinimumSize + int(rt.MaxHeaderLength()),\nData: buffer.NewView(0).ToVectorisedView(),\n})\n+ defer pkt.DecRef()\npkt.TransportHeader().Push(header.UDPMinimumSize)\nif err := rt.WritePacket(stack.NetworkHeaderParams{}, pkt); err != nil {\nbreak\n@@ -2873,10 +2905,8 @@ func TestWriteStats(t *testing.T) {\n}\n}\n-func buildRoute(t *testing.T, ep stack.LinkEndpoint) *stack.Route {\n- s := stack.New(stack.Options{\n- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},\n- })\n+func buildRoute(t *testing.T, c testContext, ep stack.LinkEndpoint) *stack.Route {\n+ s := c.s\nif err := s.CreateNIC(1, ep); err != nil {\nt.Fatalf(\"CreateNIC(1, _) failed: %s\", err)\n}\n@@ -2979,9 +3009,11 @@ func TestPacketQueuing(t *testing.T) {\nDstAddr: host1IPv4Addr.AddressWithPrefix.Address,\n})\nip.SetChecksum(^ip.CalculateChecksum())\n- e.InjectInbound(ipv4.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: hdr.View().ToVectorisedView(),\n- }))\n+ })\n+ defer pkt.DecRef()\n+ e.InjectInbound(ipv4.ProtocolNumber, pkt)\n},\ncheckResp: func(t *testing.T, e *channel.Endpoint) {\np := e.Read()\n@@ -3022,9 +3054,11 @@ func TestPacketQueuing(t *testing.T) {\nDstAddr: host1IPv4Addr.AddressWithPrefix.Address,\n})\nip.SetChecksum(^ip.CalculateChecksum())\n- e.InjectInbound(header.IPv4ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ echoPkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: hdr.View().ToVectorisedView(),\n- }))\n+ })\n+ defer echoPkt.DecRef()\n+ e.InjectInbound(header.IPv4ProtocolNumber, echoPkt)\n},\ncheckResp: func(t *testing.T, e *channel.Endpoint) {\np := e.Read()\n@@ -3049,15 +3083,13 @@ func TestPacketQueuing(t *testing.T) {\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\n+ ctx := newTestContext()\n+ defer ctx.cleanup()\n+ s := ctx.s\n+ clock := ctx.clock\n+\ne := channel.New(1, defaultMTU, host1NICLinkAddr)\ne.LinkEPCapabilities |= stack.CapabilityResolutionRequired\n- clock := faketime.NewManualClock()\n- s := stack.New(stack.Options{\n- NetworkProtocols: []stack.NetworkProtocolFactory{arp.NewProtocol, ipv4.NewProtocol},\n- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},\n- Clock: clock,\n- })\n-\nif err := s.CreateNIC(nicID, e); err != nil {\nt.Fatalf(\"s.CreateNIC(%d, _): %s\", nicID, err)\n}\n@@ -3114,9 +3146,11 @@ func TestPacketQueuing(t *testing.T) {\ncopy(packet.ProtocolAddressSender(), host2IPv4Addr.AddressWithPrefix.Address)\ncopy(packet.HardwareAddressTarget(), host1NICLinkAddr)\ncopy(packet.ProtocolAddressTarget(), host1IPv4Addr.AddressWithPrefix.Address)\n- e.InjectInbound(arp.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: hdr.ToVectorisedView(),\n- }))\n+ })\n+ e.InjectInbound(arp.ProtocolNumber, pkt)\n+ pkt.DecRef()\n}\n// Expect the response now that the link address has resolved.\n@@ -3146,10 +3180,9 @@ func TestCloseLocking(t *testing.T) {\ndst = testutil.MustParse4(\"16.0.0.2\")\n)\n- s := stack.New(stack.Options{\n- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},\n- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},\n- })\n+ ctx := newTestContext()\n+ defer ctx.cleanup()\n+ s := ctx.s\n// Perform NAT so that the endpoint tries to search for a sibling endpoint\n// which ends up taking the protocol and endpoint lock (in that order).\n@@ -3269,15 +3302,14 @@ func TestIcmpRateLimit(t *testing.T) {\n},\n}\n)\n+ ctx := newTestContext()\n+ defer ctx.cleanup()\n+ s := ctx.s\n+\nconst icmpBurst = 5\n- e := channel.New(1, defaultMTU, tcpip.LinkAddress(\"\"))\n- s := stack.New(stack.Options{\n- NetworkProtocols: []stack.NetworkProtocolFactory{arp.NewProtocol, ipv4.NewProtocol},\n- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},\n- Clock: faketime.NewManualClock(),\n- })\ns.SetICMPBurst(icmpBurst)\n+ e := channel.New(1, defaultMTU, tcpip.LinkAddress(\"\"))\nif err := s.CreateNIC(nicID, e); err != nil {\nt.Fatalf(\"s.CreateNIC(%d, _): %s\", nicID, err)\n}\n@@ -3379,9 +3411,11 @@ func TestIcmpRateLimit(t *testing.T) {\nfor _, testCase := range tests {\nt.Run(testCase.name, func(t *testing.T) {\nfor round := 0; round < icmpBurst+1; round++ {\n- e.InjectInbound(header.IPv4ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: testCase.createPacket().ToVectorisedView(),\n- }))\n+ })\n+ e.InjectInbound(header.IPv4ProtocolNumber, pkt)\n+ pkt.DecRef()\ntestCase.check(t, e, round)\n}\n})\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/tcpip/network/ipv4/main_test.go",
"diff": "+// Copyright 2022 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package ipv4_test\n+\n+import (\n+ \"os\"\n+ \"testing\"\n+\n+ \"gvisor.dev/gvisor/pkg/refs\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n+)\n+\n+func TestMain(m *testing.M) {\n+ refs.SetLeakMode(refs.LeaksPanic)\n+ code := m.Run()\n+ refsvfs2.DoLeakCheck()\n+ os.Exit(code)\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/stats_test.go",
"new_path": "pkg/tcpip/network/ipv4/stats_test.go",
"diff": "@@ -45,10 +45,27 @@ func knownNICIDs(proto *protocol) []tcpip.NICID {\nreturn nicIDs\n}\n-func TestClearEndpointFromProtocolOnClose(t *testing.T) {\n+type statsTestContext struct {\n+ s *stack.Stack\n+}\n+\n+func newStatsTestContext() statsTestContext {\ns := stack.New(stack.Options{\nNetworkProtocols: []stack.NetworkProtocolFactory{NewProtocol},\n})\n+ return statsTestContext{s: s}\n+}\n+\n+func (ctx statsTestContext) cleanup() {\n+ ctx.s.Close()\n+ ctx.s.Wait()\n+}\n+\n+func TestClearEndpointFromProtocolOnClose(t *testing.T) {\n+ ctx := newStatsTestContext()\n+ defer ctx.cleanup()\n+ s := ctx.s\n+\nproto := s.NetworkProtocolInstance(ProtocolNumber).(*protocol)\nnic := testInterface{nicID: 1}\nep := proto.NewEndpoint(&nic, nil).(*endpoint)\n@@ -78,9 +95,10 @@ func TestClearEndpointFromProtocolOnClose(t *testing.T) {\n}\nfunc TestMultiCounterStatsInitialization(t *testing.T) {\n- s := stack.New(stack.Options{\n- NetworkProtocols: []stack.NetworkProtocolFactory{NewProtocol},\n- })\n+ ctx := newStatsTestContext()\n+ defer ctx.cleanup()\n+ s := ctx.s\n+\nproto := s.NetworkProtocolInstance(ProtocolNumber).(*protocol)\nvar nic testInterface\nep := proto.NewEndpoint(&nic, nil).(*endpoint)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add Leak checking to ipv4 tests.
PiperOrigin-RevId: 428946053 |
259,907 | 16.02.2022 00:40:31 | 28,800 | 12a9efa29cd4adb79fc38ce03f7fef25ca655692 | Do not attempt to clear release dentries if fs.root is nil.
When initializing the fs.client and fs.root fails, the filesystem is released.
The release implementation panics when leak checking is enabled (in tests)
because it lacks nil-checks for fs.root. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/gofer.go",
"new_path": "pkg/sentry/fsimpl/gofer/gofer.go",
"diff": "@@ -697,8 +697,9 @@ func (fs *filesystem) Release(ctx context.Context) {\n// If leak checking is enabled, release all outstanding references in the\n// filesystem. We deliberately avoid doing this outside of leak checking; we\n// have released all external resources above rather than relying on dentry\n- // destructors.\n- if refs_vfs1.GetLeakMode() != refs_vfs1.NoLeakChecking {\n+ // destructors. fs.root may be nil if creating the client or initializing the\n+ // root dentry failed in GetFilesystem.\n+ if refs_vfs1.GetLeakMode() != refs_vfs1.NoLeakChecking && fs.root != nil {\nfs.renameMu.Lock()\nfs.root.releaseSyntheticRecursiveLocked(ctx)\nfs.evictAllCachedDentriesLocked(ctx)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Do not attempt to clear release dentries if fs.root is nil.
When initializing the fs.client and fs.root fails, the filesystem is released.
The release implementation panics when leak checking is enabled (in tests)
because it lacks nil-checks for fs.root.
PiperOrigin-RevId: 428976387 |
259,907 | 16.02.2022 10:39:41 | 28,800 | ca697cfd2a7a66714dca372de2d5d2a3b8e99f10 | Do not generate VFS1 tests.
Updates | [
{
"change_type": "MODIFY",
"old_path": "test/perf/BUILD",
"new_path": "test/perf/BUILD",
"diff": "@@ -151,33 +151,28 @@ syscall_test(\nsize = \"large\",\ndebug = False,\ntest = \"//test/perf/linux:verity_open_benchmark\",\n- vfs1 = False,\n)\nsyscall_test(\nsize = \"large\",\ndebug = False,\ntest = \"//test/perf/linux:verity_read_benchmark\",\n- vfs1 = False,\n)\nsyscall_test(\nsize = \"large\",\ndebug = False,\ntest = \"//test/perf/linux:verity_randread_benchmark\",\n- vfs1 = False,\n)\nsyscall_test(\nsize = \"large\",\ndebug = False,\ntest = \"//test/perf/linux:verity_open_read_close_benchmark\",\n- vfs1 = False,\n)\nsyscall_test(\nsize = \"large\",\ndebug = False,\ntest = \"//test/perf/linux:verity_stat_benchmark\",\n- vfs1 = False,\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "test/perf/linux/verity_open_benchmark.cc",
"new_path": "test/perf/linux/verity_open_benchmark.cc",
"diff": "@@ -36,6 +36,12 @@ namespace testing {\nnamespace {\nvoid BM_Open(benchmark::State& state) {\n+ // CAP_SYS_ADMIN is needed for making mount(2) syscall.\n+ if (!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN))) {\n+ state.SkipWithError(\"CAP_SYS_ADMIN missing. Skipping benchmark.\");\n+ return;\n+ }\n+\nconst int size = state.range(0);\nstd::vector<TempPath> cache;\nstd::vector<EnableTarget> targets;\n"
},
{
"change_type": "MODIFY",
"old_path": "test/perf/linux/verity_open_read_close_benchmark.cc",
"new_path": "test/perf/linux/verity_open_read_close_benchmark.cc",
"diff": "@@ -36,6 +36,12 @@ namespace testing {\nnamespace {\nvoid BM_VerityOpenReadClose(benchmark::State& state) {\n+ // CAP_SYS_ADMIN is needed for making mount(2) syscall.\n+ if (!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN))) {\n+ state.SkipWithError(\"CAP_SYS_ADMIN missing. Skipping benchmark.\");\n+ return;\n+ }\n+\nconst int size = state.range(0);\n// Mount a tmpfs file system to be wrapped by a verity fs.\n"
},
{
"change_type": "MODIFY",
"old_path": "test/perf/linux/verity_randread_benchmark.cc",
"new_path": "test/perf/linux/verity_randread_benchmark.cc",
"diff": "// limitations under the License.\n#include <fcntl.h>\n+#include <linux/capability.h>\n#include <stdlib.h>\n#include <sys/mount.h>\n#include <sys/stat.h>\n#include \"gtest/gtest.h\"\n#include \"benchmark/benchmark.h\"\n+#include \"test/util/linux_capability_util.h\"\n#include \"test/util/logging.h\"\n#include \"test/util/temp_path.h\"\n#include \"test/util/test_util.h\"\n@@ -83,6 +85,12 @@ GlobalState& GetGlobalState() {\n}\nvoid BM_VerityRandRead(benchmark::State& state) {\n+ // CAP_SYS_ADMIN is needed for making mount(2) syscall.\n+ if (!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN))) {\n+ state.SkipWithError(\"CAP_SYS_ADMIN missing. Skipping benchmark.\");\n+ return;\n+ }\n+\nconst int size = state.range(0);\nGlobalState& global_state = GetGlobalState();\n"
},
{
"change_type": "MODIFY",
"old_path": "test/perf/linux/verity_read_benchmark.cc",
"new_path": "test/perf/linux/verity_read_benchmark.cc",
"diff": "@@ -36,6 +36,12 @@ namespace testing {\nnamespace {\nvoid BM_VerityRead(benchmark::State& state) {\n+ // CAP_SYS_ADMIN is needed for making mount(2) syscall.\n+ if (!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN))) {\n+ state.SkipWithError(\"CAP_SYS_ADMIN missing. Skipping benchmark.\");\n+ return;\n+ }\n+\nconst int size = state.range(0);\nconst std::string contents(size, 0);\n"
},
{
"change_type": "MODIFY",
"old_path": "test/perf/linux/verity_stat_benchmark.cc",
"new_path": "test/perf/linux/verity_stat_benchmark.cc",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n+#include <linux/capability.h>\n#include <sys/mount.h>\n#include <sys/stat.h>\n#include <sys/types.h>\n#include \"gtest/gtest.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"benchmark/benchmark.h\"\n+#include \"test/util/capability_util.h\"\n#include \"test/util/fs_util.h\"\n#include \"test/util/temp_path.h\"\n#include \"test/util/test_util.h\"\n@@ -36,6 +38,12 @@ namespace {\n// Creates a file in a nested directory hierarchy at least `depth` directories\n// deep, and stats that file multiple times.\nvoid BM_VerityStat(benchmark::State& state) {\n+ // CAP_SYS_ADMIN is needed for making mount(2) syscall.\n+ if (!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN))) {\n+ state.SkipWithError(\"CAP_SYS_ADMIN missing. Skipping benchmark.\");\n+ return;\n+ }\n+\n// Create nested directories with given depth.\nint depth = state.range(0);\n"
},
{
"change_type": "MODIFY",
"old_path": "test/runner/defs.bzl",
"new_path": "test/runner/defs.bzl",
"diff": "@@ -60,7 +60,6 @@ def _syscall_test(\nfile_access = \"exclusive\",\noverlay = False,\nadd_uds_tree = False,\n- vfs2 = False,\nfuse = False,\n**kwargs):\n# Prepend \"runsc\" to non-native platform names.\n@@ -72,8 +71,6 @@ def _syscall_test(\nname += \"_shared\"\nif overlay:\nname += \"_overlay\"\n- if vfs2:\n- name += \"_vfs2\"\nif fuse:\nname += \"_fuse\"\nif network != \"none\":\n@@ -113,7 +110,6 @@ def _syscall_test(\n\"--file-access=\" + file_access,\n\"--overlay=\" + str(overlay),\n\"--add-uds-tree=\" + str(add_uds_tree),\n- \"--vfs2=\" + str(vfs2),\n\"--fuse=\" + str(fuse),\n\"--strace=\" + str(debug),\n\"--debug=\" + str(debug),\n@@ -135,8 +131,6 @@ def syscall_test(\nadd_overlay = False,\nadd_uds_tree = False,\nadd_hostinet = False,\n- vfs1 = True,\n- vfs2 = True,\nfuse = False,\ndebug = True,\ntags = None,\n@@ -149,8 +143,6 @@ def syscall_test(\nadd_overlay: add an overlay test.\nadd_uds_tree: add a UDS test.\nadd_hostinet: add a hostinet test.\n- vfs1: enable VFS1 tests. Could be false only if vfs2 is true.\n- vfs2: enable VFS2 support.\nfuse: enable FUSE support.\ndebug: enable debug output.\ntags: starting test tags.\n@@ -159,21 +151,7 @@ def syscall_test(\nif not tags:\ntags = []\n- if vfs2 and vfs1 and not fuse:\n- # Generate a vfs1 plain test. Most testing will now be\n- # biased towards vfs2, with only a single vfs1 case.\n- _syscall_test(\n- test = test,\n- platform = default_platform,\n- use_tmpfs = use_tmpfs,\n- add_uds_tree = add_uds_tree,\n- tags = tags + platforms[default_platform],\n- debug = debug,\n- vfs2 = False,\n- **kwargs\n- )\n-\n- if vfs1 and not fuse:\n+ if not fuse:\n# Generate a native test if fuse is not required.\n_syscall_test(\ntest = test,\n@@ -193,7 +171,6 @@ def syscall_test(\nadd_uds_tree = add_uds_tree,\ntags = platform_tags + tags,\nfuse = fuse,\n- vfs2 = vfs2,\ndebug = debug,\n**kwargs\n)\n@@ -207,7 +184,6 @@ def syscall_test(\ntags = platforms[default_platform] + tags,\ndebug = debug,\nfuse = fuse,\n- vfs2 = vfs2,\noverlay = True,\n**kwargs\n)\n@@ -221,7 +197,6 @@ def syscall_test(\ntags = platforms[default_platform] + tags,\ndebug = debug,\nfuse = fuse,\n- vfs2 = vfs2,\n**kwargs\n)\nif not use_tmpfs:\n@@ -235,6 +210,5 @@ def syscall_test(\ndebug = debug,\nfile_access = \"shared\",\nfuse = fuse,\n- vfs2 = vfs2,\n**kwargs\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "test/runner/main.go",
"new_path": "test/runner/main.go",
"diff": "@@ -47,7 +47,6 @@ var (\nuseTmpfs = flag.Bool(\"use-tmpfs\", false, \"mounts tmpfs for /tmp\")\nfileAccess = flag.String(\"file-access\", \"exclusive\", \"mounts root in exclusive or shared mode\")\noverlay = flag.Bool(\"overlay\", false, \"wrap filesystem mounts with writable tmpfs overlay\")\n- vfs2 = flag.Bool(\"vfs2\", false, \"enable VFS2\")\nfuse = flag.Bool(\"fuse\", false, \"enable FUSE\")\ncontainer = flag.Bool(\"container\", false, \"run tests in their own namespaces (user ns, network ns, etc), pretending to be root\")\nsetupContainerPath = flag.String(\"setup-container\", \"\", \"path to setup_container binary (for use with --container)\")\n@@ -179,8 +178,7 @@ func runRunsc(tc gtest.TestCase, spec *specs.Spec) error {\nif *overlay {\nargs = append(args, \"-overlay\")\n}\n- args = append(args, fmt.Sprintf(\"-vfs2=%t\", *vfs2))\n- if *vfs2 && *fuse {\n+ if *fuse {\nargs = append(args, \"-fuse\")\n}\nif *debug {\n@@ -393,18 +391,13 @@ func runTestCaseRunsc(testBin string, tc gtest.TestCase, t *testing.T) {\nplatformVar := \"TEST_ON_GVISOR\"\nnetworkVar := \"GVISOR_NETWORK\"\nenv := append(os.Environ(), platformVar+\"=\"+*platform, networkVar+\"=\"+*network)\n- vfsVar := \"GVISOR_VFS\"\n- if *vfs2 {\n- env = append(env, vfsVar+\"=VFS2\")\n+ env = append(env, \"GVISOR_VFS=VFS2\")\nfuseVar := \"FUSE_ENABLED\"\nif *fuse {\nenv = append(env, fuseVar+\"=TRUE\")\n} else {\nenv = append(env, fuseVar+\"=FALSE\")\n}\n- } else {\n- env = append(env, vfsVar+\"=VFS1\")\n- }\n// Remove shard env variables so that the gunit binary does not try to\n// interpret them.\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/BUILD",
"new_path": "test/syscalls/BUILD",
"diff": "@@ -605,7 +605,7 @@ syscall_test(\n)\n# TODO(b/119826902): Enable once the test passes in runsc.\n-# syscall_test(vfs2=\"True\",test = \"//test/syscalls/linux:sigaltstack_test\")\n+# syscall_test(test = \"//test/syscalls/linux:sigaltstack_test\")\nsyscall_test(\ntest = \"//test/syscalls/linux:sigreturn_test\",\n"
}
] | Go | Apache License 2.0 | google/gvisor | Do not generate VFS1 tests.
Updates #1624
PiperOrigin-RevId: 429085588 |
259,885 | 16.02.2022 12:45:12 | 28,800 | 03d7c4895153dbe446447643ceacccc74d12997f | Don't copy up event registrations in VFS2 overlay.
Doing so can deadlock, and regular files and symlinks usually shouldn't be
event-registerable anyway (especially after cl/426516504). | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/overlay/regular_file.go",
"new_path": "pkg/sentry/fsimpl/overlay/regular_file.go",
"diff": "@@ -60,10 +60,6 @@ type regularFileFD struct {\ncopiedUp bool\ncachedFD *vfs.FileDescription\ncachedFlags uint32\n-\n- // If copiedUp is false, lowerWaiters contains all waiter.Entries\n- // registered with cachedFD. lowerWaiters is protected by mu.\n- lowerWaiters map[*waiter.Entry]struct{}\n}\nfunc (fd *regularFileFD) getCurrentFD(ctx context.Context) (*vfs.FileDescription, error) {\n@@ -99,21 +95,10 @@ func (fd *regularFileFD) currentFDLocked(ctx context.Context) (*vfs.FileDescript\nreturn nil, err\n}\n}\n- if len(fd.lowerWaiters) != 0 {\n- ready := upperFD.Readiness(^waiter.EventMask(0))\n- for e := range fd.lowerWaiters {\n- fd.cachedFD.EventUnregister(e)\n- if err := upperFD.EventRegister(e); err != nil {\n- return nil, err\n- }\n- e.NotifyEvent(ready)\n- }\n- }\nfd.cachedFD.DecRef(ctx)\nfd.copiedUp = true\nfd.cachedFD = upperFD\nfd.cachedFlags = statusFlags\n- fd.lowerWaiters = nil\n} else if fd.cachedFlags != statusFlags {\nif err := fd.cachedFD.SetStatusFlags(ctx, d.fs.creds, statusFlags); err != nil {\nreturn nil, err\n@@ -261,22 +246,12 @@ func (fd *regularFileFD) EventRegister(e *waiter.Entry) error {\ndefer fd.mu.Unlock()\nwrappedFD, err := fd.currentFDLocked(context.Background())\nif err != nil {\n- // TODO(b/171089913): Just use fd.cachedFD since EventRegister can't\n- // return an error. This is obviously wrong, but at least consistent\n+ // TODO(b/171089913): Just use fd.cachedFD for backward compatibility\n// with VFS1.\nlog.Warningf(\"overlay.regularFileFD.EventRegister: currentFDLocked failed: %v\", err)\nwrappedFD = fd.cachedFD\n}\n- if err := wrappedFD.EventRegister(e); err != nil {\n- return err\n- }\n- if !fd.copiedUp {\n- if fd.lowerWaiters == nil {\n- fd.lowerWaiters = make(map[*waiter.Entry]struct{})\n- }\n- fd.lowerWaiters[e] = struct{}{}\n- }\n- return nil\n+ return wrappedFD.EventRegister(e)\n}\n// EventUnregister implements waiter.Waitable.EventUnregister.\n@@ -284,9 +259,6 @@ func (fd *regularFileFD) EventUnregister(e *waiter.Entry) {\nfd.mu.Lock()\ndefer fd.mu.Unlock()\nfd.cachedFD.EventUnregister(e)\n- if !fd.copiedUp {\n- delete(fd.lowerWaiters, e)\n- }\n}\n// PRead implements vfs.FileDescriptionImpl.PRead.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Don't copy up event registrations in VFS2 overlay.
Doing so can deadlock, and regular files and symlinks usually shouldn't be
event-registerable anyway (especially after cl/426516504).
PiperOrigin-RevId: 429117155 |
259,962 | 16.02.2022 13:33:28 | 28,800 | 55aa2b2b230025dc666c73cb7916a6fad36dd4da | Enable leak checking for network tests.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/BUILD",
"new_path": "pkg/tcpip/network/BUILD",
"diff": "@@ -7,9 +7,12 @@ go_test(\nsize = \"small\",\nsrcs = [\n\"ip_test.go\",\n+ \"main_test.go\",\n\"multicast_group_test.go\",\n],\ndeps = [\n+ \"//pkg/refs\",\n+ \"//pkg/refsvfs2\",\n\"//pkg/sync\",\n\"//pkg/tcpip\",\n\"//pkg/tcpip/buffer\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ip_test.go",
"new_path": "pkg/tcpip/network/ip_test.go",
"diff": "@@ -222,11 +222,26 @@ func (*testObject) AddHeader(*stack.PacketBuffer) {\npanic(\"not implemented\")\n}\n-func buildIPv4Route(local, remote tcpip.Address) (*stack.Route, tcpip.Error) {\n+type testContext struct {\n+ s *stack.Stack\n+}\n+\n+func newTestContext() *testContext {\ns := stack.New(stack.Options{\n- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},\n+ NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocol},\nTransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol, tcp.NewProtocol},\n+ RawFactory: raw.EndpointFactory{},\n})\n+ return &testContext{s: s}\n+}\n+\n+func (ctx *testContext) cleanup() {\n+ ctx.s.Close()\n+ ctx.s.Wait()\n+}\n+\n+func buildIPv4Route(ctx *testContext, local, remote tcpip.Address) (*stack.Route, tcpip.Error) {\n+ s := ctx.s\ns.CreateNIC(nicID, loopback.New())\nprotocolAddr := tcpip.ProtocolAddress{\nProtocol: ipv4.ProtocolNumber,\n@@ -244,11 +259,8 @@ func buildIPv4Route(local, remote tcpip.Address) (*stack.Route, tcpip.Error) {\nreturn s.FindRoute(nicID, local, remote, ipv4.ProtocolNumber, false /* multicastLoop */)\n}\n-func buildIPv6Route(local, remote tcpip.Address) (*stack.Route, tcpip.Error) {\n- s := stack.New(stack.Options{\n- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocol},\n- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol, tcp.NewProtocol},\n- })\n+func buildIPv6Route(ctx *testContext, local, remote tcpip.Address) (*stack.Route, tcpip.Error) {\n+ s := ctx.s\ns.CreateNIC(nicID, loopback.New())\nprotocolAddr := tcpip.ProtocolAddress{\nProtocol: ipv6.ProtocolNumber,\n@@ -266,13 +278,8 @@ func buildIPv6Route(local, remote tcpip.Address) (*stack.Route, tcpip.Error) {\nreturn s.FindRoute(nicID, local, remote, ipv6.ProtocolNumber, false /* multicastLoop */)\n}\n-func buildDummyStackWithLinkEndpoint(t *testing.T, mtu uint32) (*stack.Stack, *channel.Endpoint) {\n+func addLinkEndpointToStackWithMTU(t *testing.T, s *stack.Stack, mtu uint32) *channel.Endpoint {\nt.Helper()\n-\n- s := stack.New(stack.Options{\n- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocol},\n- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol, tcp.NewProtocol},\n- })\ne := channel.New(1, mtu, \"\")\nif err := s.CreateNIC(nicID, e); err != nil {\nt.Fatalf(\"CreateNIC(%d, _) = %s\", nicID, err)\n@@ -288,14 +295,12 @@ func buildDummyStackWithLinkEndpoint(t *testing.T, mtu uint32) (*stack.Stack, *c\nt.Fatalf(\"AddProtocolAddress(%d, %+v, {}) = %s\", nicID, v6Addr, err)\n}\n- return s, e\n+ return e\n}\n-func buildDummyStack(t *testing.T) *stack.Stack {\n+func addLinkEndpointToStack(t *testing.T, s *stack.Stack) *channel.Endpoint {\nt.Helper()\n-\n- s, _ := buildDummyStackWithLinkEndpoint(t, header.IPv6MinimumMTU)\n- return s\n+ return addLinkEndpointToStackWithMTU(t, s, header.IPv6MinimumMTU)\n}\nvar _ stack.NetworkInterface = (*testInterface)(nil)\n@@ -380,9 +385,11 @@ func TestSourceAddressValidation(t *testing.T) {\n})\nip.SetChecksum(^ip.CalculateChecksum())\n- e.InjectInbound(header.IPv4ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ pktBuf := stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: hdr.View().ToVectorisedView(),\n- }))\n+ })\n+ e.InjectInbound(header.IPv4ProtocolNumber, pktBuf)\n+ pktBuf.DecRef()\n}\nrxIPv6ICMP := func(e *channel.Endpoint, src tcpip.Address) {\n@@ -405,9 +412,11 @@ func TestSourceAddressValidation(t *testing.T) {\nSrcAddr: src,\nDstAddr: localIPv6Addr,\n})\n- e.InjectInbound(header.IPv6ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ pktBuf := stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: hdr.View().ToVectorisedView(),\n- }))\n+ })\n+ e.InjectInbound(header.IPv6ProtocolNumber, pktBuf)\n+ pktBuf.DecRef()\n}\ntests := []struct {\n@@ -471,7 +480,11 @@ func TestSourceAddressValidation(t *testing.T) {\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\n- s, e := buildDummyStackWithLinkEndpoint(t, header.IPv6MinimumMTU)\n+ ctx := newTestContext()\n+ defer ctx.cleanup()\n+ s := ctx.s\n+\n+ e := addLinkEndpointToStack(t, s)\ntest.rxICMP(e, test.srcAddress)\nvar wantValid uint64\n@@ -515,6 +528,11 @@ func TestEnableWhenNICDisabled(t *testing.T) {\ns := stack.New(stack.Options{\nNetworkProtocols: []stack.NetworkProtocolFactory{test.protocolFactory},\n})\n+ defer func() {\n+ s.Close()\n+ s.Wait()\n+ }()\n+\np := s.NetworkProtocolInstance(test.protoNum)\n// We pass nil for all parameters except the NetworkInterface and Stack\n@@ -573,7 +591,10 @@ func TestEnableWhenNICDisabled(t *testing.T) {\n}\nfunc TestIPv4Send(t *testing.T) {\n- s := buildDummyStack(t)\n+ ctx := newTestContext()\n+ defer ctx.cleanup()\n+ s := ctx.s\n+\nproto := s.NetworkProtocolInstance(ipv4.ProtocolNumber)\nnic := testInterface{\ntestObject: testObject{\n@@ -595,6 +616,7 @@ func TestIPv4Send(t *testing.T) {\nReserveHeaderBytes: int(ep.MaxHeaderLength()),\nData: payload.ToVectorisedView(),\n})\n+ defer pkt.DecRef()\n// Issue the write.\nnic.testObject.protocol = 123\n@@ -602,7 +624,7 @@ func TestIPv4Send(t *testing.T) {\nnic.testObject.dstAddr = remoteIPv4Addr\nnic.testObject.contents = payload\n- r, err := buildIPv4Route(localIPv4Addr, remoteIPv4Addr)\n+ r, err := buildIPv4Route(ctx, localIPv4Addr, remoteIPv4Addr)\nif err != nil {\nt.Fatalf(\"could not find route: %v\", err)\n}\n@@ -659,6 +681,7 @@ func TestReceive(t *testing.T) {\nData: view.ToVectorisedView(),\n})\nep.HandlePacket(pkt)\n+ pkt.DecRef()\n},\n},\n{\n@@ -694,6 +717,7 @@ func TestReceive(t *testing.T) {\nData: view.ToVectorisedView(),\n})\nep.HandlePacket(pkt)\n+ pkt.DecRef()\n},\n},\n}\n@@ -703,6 +727,11 @@ func TestReceive(t *testing.T) {\ns := stack.New(stack.Options{\nNetworkProtocols: []stack.NetworkProtocolFactory{test.protoFactory},\n})\n+ defer func() {\n+ s.Close()\n+ s.Wait()\n+ }()\n+\nnic := testInterface{\ntestObject: testObject{\nt: t,\n@@ -823,7 +852,10 @@ func TestIPv4ReceiveControl(t *testing.T) {\n}\nfor _, c := range cases {\nt.Run(c.name, func(t *testing.T) {\n- s := buildDummyStack(t)\n+ ctx := newTestContext()\n+ defer ctx.cleanup()\n+ s := ctx.s\n+\nproto := s.NetworkProtocolInstance(ipv4.ProtocolNumber)\nnic := testInterface{\ntestObject: testObject{\n@@ -900,6 +932,7 @@ func TestIPv4ReceiveControl(t *testing.T) {\npkt := truncatedPacket(view, c.trunc, header.IPv4MinimumSize)\nep.HandlePacket(pkt)\n+ pkt.DecRef()\nif want := c.expectedCount; nic.testObject.controlCalls != want {\nt.Fatalf(\"Bad number of control calls for %q case: got %v, want %v\", c.name, nic.testObject.controlCalls, want)\n}\n@@ -908,9 +941,10 @@ func TestIPv4ReceiveControl(t *testing.T) {\n}\nfunc TestIPv4FragmentationReceive(t *testing.T) {\n- s := stack.New(stack.Options{\n- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},\n- })\n+ ctx := newTestContext()\n+ defer ctx.cleanup()\n+ s := ctx.s\n+\nproto := s.NetworkProtocolInstance(ipv4.ProtocolNumber)\nnic := testInterface{\ntestObject: testObject{\n@@ -968,11 +1002,6 @@ func TestIPv4FragmentationReceive(t *testing.T) {\nnic.testObject.dstAddr = localIPv4Addr\nnic.testObject.contents = append(frag1[header.IPv4MinimumSize:totalLen], frag2[header.IPv4MinimumSize:totalLen]...)\n- // Send first segment.\n- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\n- Data: frag1.ToVectorisedView(),\n- })\n-\naddressableEndpoint, ok := ep.(stack.AddressableEndpoint)\nif !ok {\nt.Fatal(\"expected IPv4 network endpoint to implement stack.AddressableEndpoint\")\n@@ -984,7 +1013,13 @@ func TestIPv4FragmentationReceive(t *testing.T) {\nep.DecRef()\n}\n+ // Send first segment.\n+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ Data: frag1.ToVectorisedView(),\n+ })\nep.HandlePacket(pkt)\n+ pkt.DecRef()\n+\nif nic.testObject.dataCalls != 0 {\nt.Fatalf(\"Bad number of data calls: got %d, want 0\", nic.testObject.dataCalls)\n}\n@@ -997,6 +1032,8 @@ func TestIPv4FragmentationReceive(t *testing.T) {\nData: frag2.ToVectorisedView(),\n})\nep.HandlePacket(pkt)\n+ pkt.DecRef()\n+\nif nic.testObject.dataCalls != 1 {\nt.Fatalf(\"Bad number of data calls: got %d, want 1\", nic.testObject.dataCalls)\n}\n@@ -1006,7 +1043,10 @@ func TestIPv4FragmentationReceive(t *testing.T) {\n}\nfunc TestIPv6Send(t *testing.T) {\n- s := buildDummyStack(t)\n+ ctx := newTestContext()\n+ defer ctx.cleanup()\n+ s := ctx.s\n+\nproto := s.NetworkProtocolInstance(ipv6.ProtocolNumber)\nnic := testInterface{\ntestObject: testObject{\n@@ -1031,14 +1071,14 @@ func TestIPv6Send(t *testing.T) {\nReserveHeaderBytes: int(ep.MaxHeaderLength()),\nData: payload.ToVectorisedView(),\n})\n-\n+ defer pkt.DecRef()\n// Issue the write.\nnic.testObject.protocol = 123\nnic.testObject.srcAddr = localIPv6Addr\nnic.testObject.dstAddr = remoteIPv6Addr\nnic.testObject.contents = payload\n- r, err := buildIPv6Route(localIPv6Addr, remoteIPv6Addr)\n+ r, err := buildIPv6Route(ctx, localIPv6Addr, remoteIPv6Addr)\nif err != nil {\nt.Fatalf(\"could not find route: %v\", err)\n}\n@@ -1161,7 +1201,10 @@ func TestIPv6ReceiveControl(t *testing.T) {\n}\nfor _, c := range cases {\nt.Run(c.name, func(t *testing.T) {\n- s := buildDummyStack(t)\n+ ctx := newTestContext()\n+ defer ctx.cleanup()\n+ s := ctx.s\n+\nproto := s.NetworkProtocolInstance(ipv6.ProtocolNumber)\nnic := testInterface{\ntestObject: testObject{\n@@ -1251,6 +1294,7 @@ func TestIPv6ReceiveControl(t *testing.T) {\n}\npkt := truncatedPacket(view, c.trunc, header.IPv6MinimumSize)\nep.HandlePacket(pkt)\n+ pkt.DecRef()\nif want := c.expectedCount; nic.testObject.controlCalls != want {\nt.Fatalf(\"Bad number of control calls for %q case: got %v, want %v\", c.name, nic.testObject.controlCalls, want)\n}\n@@ -1683,6 +1727,11 @@ func TestWriteHeaderIncludedPacket(t *testing.T) {\ns := stack.New(stack.Options{\nNetworkProtocols: []stack.NetworkProtocolFactory{test.protoFactory},\n})\n+ defer func() {\n+ s.Close()\n+ s.Wait()\n+ }()\n+\ne := channel.New(1, header.IPv6MinimumMTU, \"\")\nif err := s.CreateNIC(nicID, e); err != nil {\nt.Fatalf(\"s.CreateNIC(%d, _): %s\", nicID, err)\n@@ -1704,9 +1753,11 @@ func TestWriteHeaderIncludedPacket(t *testing.T) {\ndefer r.Release()\n{\n- err := r.WriteHeaderIncludedPacket(stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: test.pktGen(t, subTest.srcAddr),\n- }))\n+ })\n+ err := r.WriteHeaderIncludedPacket(pkt)\n+ pkt.DecRef()\nif diff := cmp.Diff(test.expectedErr, err); diff != \"\" {\nt.Fatalf(\"unexpected error from r.WriteHeaderIncludedPacket(_), (-want, +got):\\n%s\", diff)\n}\n@@ -1759,9 +1810,11 @@ func TestICMPInclusionSize(t *testing.T) {\n// Take a copy before InjectInbound takes ownership of vv\n// as vv may be changed during the call.\nv := vv.ToView()\n- e.InjectInbound(header.IPv4ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: vv,\n- }))\n+ })\n+ e.InjectInbound(header.IPv4ProtocolNumber, pkt)\n+ pkt.DecRef()\nreturn v\n}\n@@ -1786,9 +1839,11 @@ func TestICMPInclusionSize(t *testing.T) {\n// as vv may be changed during the call.\nv := vv.ToView()\n- e.InjectInbound(header.IPv6ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: vv,\n- }))\n+ })\n+ e.InjectInbound(header.IPv6ProtocolNumber, pkt)\n+ pkt.DecRef()\nreturn v\n}\n@@ -1939,7 +1994,11 @@ func TestICMPInclusionSize(t *testing.T) {\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\n- s, e := buildDummyStackWithLinkEndpoint(t, test.linkMTU)\n+ ctx := newTestContext()\n+ defer ctx.cleanup()\n+ s := ctx.s\n+\n+ e := addLinkEndpointToStackWithMTU(t, s, test.linkMTU)\n// Allocate and initialize the payload view.\npayload := buffer.NewView(test.payloadLength)\nfor i := 0; i < len(payload); i++ {\n@@ -2009,10 +2068,10 @@ func TestJoinLeaveAllRoutersGroup(t *testing.T) {\nt.Run(test.name, func(t *testing.T) {\nfor _, nicDisabled := range [...]bool{true, false} {\nt.Run(fmt.Sprintf(\"NIC Disabled = %t\", nicDisabled), func(t *testing.T) {\n- s := stack.New(stack.Options{\n- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocol},\n- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol, tcp.NewProtocol},\n- })\n+ ctx := newTestContext()\n+ defer ctx.cleanup()\n+ s := ctx.s\n+\nopts := stack.NICOptions{Disabled: nicDisabled}\nif err := s.CreateNICWithOptions(nicID, channel.New(0, 0, \"\"), opts); err != nil {\nt.Fatalf(\"CreateNICWithOptions(%d, _, %#v) = %s\", nicID, opts, err)\n@@ -2072,14 +2131,10 @@ func TestSetNICIDBeforeDeliveringToRawEndpoint(t *testing.T) {\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\n- s := stack.New(stack.Options{\n- NetworkProtocols: []stack.NetworkProtocolFactory{\n- ipv4.NewProtocol,\n- ipv6.NewProtocol,\n- },\n- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},\n- RawFactory: raw.EndpointFactory{},\n- })\n+ ctx := newTestContext()\n+ defer ctx.cleanup()\n+ s := ctx.s\n+\nif err := s.CreateNIC(nicID, loopback.New()); err != nil {\nt.Fatalf(\"CreateNIC(%d, _): %s\", nicID, err)\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/tcpip/network/main_test.go",
"diff": "+// Copyright 2022 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package ip_test\n+\n+import (\n+ \"os\"\n+ \"testing\"\n+\n+ \"gvisor.dev/gvisor/pkg/refs\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n+)\n+\n+func TestMain(m *testing.M) {\n+ refs.SetLeakMode(refs.LeaksPanic)\n+ code := m.Run()\n+ refsvfs2.DoLeakCheck()\n+ os.Exit(code)\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/multicast_group_test.go",
"new_path": "pkg/tcpip/network/multicast_group_test.go",
"diff": "@@ -118,12 +118,27 @@ func validateIGMPPacket(t *testing.T, p *stack.PacketBuffer, remoteAddress tcpip\n)\n}\n-func createStack(t *testing.T, v4, mgpEnabled bool) (*channel.Endpoint, *stack.Stack, *faketime.ManualClock) {\n+type multicastTestContext struct {\n+ s *stack.Stack\n+ e *channel.Endpoint\n+ clock *faketime.ManualClock\n+}\n+\n+func newMulticastTestContext(t *testing.T, v4, mgpEnabled bool) *multicastTestContext {\nt.Helper()\ne := channel.New(maxUnsolicitedReports, header.IPv6MinimumMTU, linkAddr)\ns, clock := createStackWithLinkEndpoint(t, v4, mgpEnabled, e)\n- return e, s, clock\n+ return &multicastTestContext{\n+ s: s,\n+ e: e,\n+ clock: clock,\n+ }\n+}\n+\n+func (ctx *multicastTestContext) cleanup() {\n+ ctx.s.Close()\n+ ctx.s.Wait()\n}\nfunc createStackWithLinkEndpoint(t *testing.T, v4, mgpEnabled bool, e stack.LinkEndpoint) (*stack.Stack, *faketime.ManualClock) {\n@@ -241,9 +256,11 @@ func createAndInjectIGMPPacket(e *channel.Endpoint, igmpType byte, maxRespTime b\nigmp.SetGroupAddress(groupAddress)\nigmp.SetChecksum(header.IGMPCalculateChecksum(igmp))\n- e.InjectInbound(ipv4.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: buf.ToVectorisedView(),\n- }))\n+ })\n+ e.InjectInbound(ipv4.ProtocolNumber, pkt)\n+ pkt.DecRef()\n}\n// createAndInjectMLDPacket creates and injects an MLD packet with the\n@@ -280,9 +297,11 @@ func createAndInjectMLDPacket(e *channel.Endpoint, mldType uint8, maxRespDelay b\nDst: header.IPv6AllNodesMulticastAddress,\n}))\n- e.InjectInbound(ipv6.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: buf.ToVectorisedView(),\n- }))\n+ })\n+ e.InjectInbound(ipv6.ProtocolNumber, pkt)\n+ pkt.DecRef()\n}\n// TestMGPDisabled tests that the multicast group protocol is not enabled by\n@@ -328,7 +347,11 @@ func TestMGPDisabled(t *testing.T) {\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\n- e, s, clock := createStack(t, test.protoNum == ipv4.ProtocolNumber /* v4 */, false /* mgpEnabled */)\n+ ctx := newMulticastTestContext(t, test.protoNum == ipv4.ProtocolNumber /* v4 */, false /* mgpEnabled */)\n+ defer ctx.cleanup()\n+ s := ctx.s\n+ e := ctx.e\n+ clock := ctx.clock\n// This NIC may join multicast groups when it is enabled but since MGP is\n// disabled, no reports should be sent.\n@@ -451,10 +474,11 @@ func TestMGPReceiveCounters(t *testing.T) {\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\n- e, s, _ := createStack(t, len(test.groupAddress) == header.IPv4AddressSize /* v4 */, true /* mgpEnabled */)\n+ ctx := newMulticastTestContext(t, len(test.groupAddress) == header.IPv4AddressSize /* v4 */, true /* mgpEnabled */)\n+ defer ctx.cleanup()\n- test.rxMGPkt(e, test.headerType, test.maxRespTime, test.groupAddress)\n- if got := test.statCounter(s).Value(); got != 1 {\n+ test.rxMGPkt(ctx.e, test.headerType, test.maxRespTime, test.groupAddress)\n+ if got := test.statCounter(ctx.s).Value(); got != 1 {\nt.Fatalf(\"got %s received = %d, want = 1\", test.name, got)\n}\n})\n@@ -513,7 +537,9 @@ func TestMGPJoinGroup(t *testing.T) {\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\n- e, s, clock := createStack(t, test.protoNum == ipv4.ProtocolNumber /* v4 */, true /* mgpEnabled */)\n+ ctx := newMulticastTestContext(t, test.protoNum == ipv4.ProtocolNumber /* v4 */, true /* mgpEnabled */)\n+ defer ctx.cleanup()\n+ s, e, clock := ctx.s, ctx.e, ctx.clock\nvar reportCounter uint64\nif test.checkInitialGroups != nil {\n@@ -625,7 +651,9 @@ func TestMGPLeaveGroup(t *testing.T) {\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\n- e, s, clock := createStack(t, test.protoNum == ipv4.ProtocolNumber /* v4 */, true /* mgpEnabled */)\n+ ctx := newMulticastTestContext(t, test.protoNum == ipv4.ProtocolNumber /* v4 */, true /* mgpEnabled */)\n+ defer ctx.cleanup()\n+ s, e, clock := ctx.s, ctx.e, ctx.clock\nvar reportCounter uint64\nvar leaveCounter uint64\n@@ -764,7 +792,9 @@ func TestMGPQueryMessages(t *testing.T) {\nfor _, subTest := range subTests {\nt.Run(subTest.name, func(t *testing.T) {\n- e, s, clock := createStack(t, test.protoNum == ipv4.ProtocolNumber /* v4 */, true /* mgpEnabled */)\n+ ctx := newMulticastTestContext(t, test.protoNum == ipv4.ProtocolNumber /* v4 */, true /* mgpEnabled */)\n+ defer ctx.cleanup()\n+ s, e, clock := ctx.s, ctx.e, ctx.clock\nvar reportCounter uint64\nif test.checkInitialGroups != nil {\n@@ -892,7 +922,9 @@ func TestMGPReportMessages(t *testing.T) {\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\n- e, s, clock := createStack(t, test.protoNum == ipv4.ProtocolNumber /* v4 */, true /* mgpEnabled */)\n+ ctx := newMulticastTestContext(t, test.protoNum == ipv4.ProtocolNumber /* v4 */, true /* mgpEnabled */)\n+ defer ctx.cleanup()\n+ s, e, clock := ctx.s, ctx.e, ctx.clock\nvar reportCounter uint64\nvar leaveCounter uint64\n@@ -1076,7 +1108,9 @@ func TestMGPWithNICLifecycle(t *testing.T) {\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\n- e, s, clock := createStack(t, test.protoNum == ipv4.ProtocolNumber /* v4 */, true /* mgpEnabled */)\n+ ctx := newMulticastTestContext(t, test.protoNum == ipv4.ProtocolNumber /* v4 */, true /* mgpEnabled */)\n+ defer ctx.cleanup()\n+ s, e, clock := ctx.s, ctx.e, ctx.clock\nvar reportCounter uint64\nvar leaveCounter uint64\n@@ -1258,7 +1292,10 @@ func TestMGPDisabledOnLoopback(t *testing.T) {\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\ns, clock := createStackWithLinkEndpoint(t, test.protoNum == ipv4.ProtocolNumber /* v4 */, true /* mgpEnabled */, loopback.New())\n-\n+ defer func() {\n+ s.Close()\n+ s.Wait()\n+ }()\nsentReportStat := test.sentReportStat(s)\nif got := sentReportStat.Value(); got != 0 {\nt.Fatalf(\"got sentReportStat.Value() = %d, want = 0\", got)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Enable leak checking for network tests.
Updates #6910
PiperOrigin-RevId: 429127828 |
259,868 | 16.02.2022 14:17:35 | 28,800 | d8500d0467d3d4cede2034f0f38389608b81a5ce | gVisor KVM: Use the KVM device path that was actually opened in error message. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/kvm.go",
"new_path": "pkg/sentry/platform/kvm/kvm.go",
"diff": "@@ -84,7 +84,7 @@ func OpenDevice() (*os.File, error) {\n}\nf, err := os.OpenFile(dev, unix.O_RDWR, 0)\nif err != nil {\n- return nil, fmt.Errorf(\"error opening /dev/kvm: %v\", err)\n+ return nil, fmt.Errorf(\"error opening KVM device file (%s): %v\", dev, err)\n}\nreturn f, nil\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | gVisor KVM: Use the KVM device path that was actually opened in error message.
PiperOrigin-RevId: 429138394 |
259,898 | 16.02.2022 15:07:11 | 28,800 | d96b1860ba05238d0f5196e5e8611ea5bdb137c5 | Add a PacketImpact test for Ack behavior in SYN-RCVD
Updates | [
{
"change_type": "MODIFY",
"old_path": "test/packetimpact/runner/defs.bzl",
"new_path": "test/packetimpact/runner/defs.bzl",
"diff": "@@ -307,6 +307,10 @@ ALL_TESTS = [\nname = \"generic_dgram_socket_send_recv\",\ntimeout = \"long\",\n),\n+ PacketimpactTestInfo(\n+ name = \"tcp_acceptable_ack_syn_rcvd\",\n+ expect_netstack_failure = True,\n+ ),\n]\ndef validate_all_tests():\n"
},
{
"change_type": "MODIFY",
"old_path": "test/packetimpact/tests/BUILD",
"new_path": "test/packetimpact/tests/BUILD",
"diff": "@@ -15,6 +15,16 @@ packetimpact_testbench(\n],\n)\n+packetimpact_testbench(\n+ name = \"tcp_acceptable_ack_syn_rcvd\",\n+ srcs = [\"tcp_acceptable_ack_syn_rcvd_test.go\"],\n+ deps = [\n+ \"//pkg/tcpip/header\",\n+ \"//test/packetimpact/testbench\",\n+ \"@org_golang_x_sys//unix:go_default_library\",\n+ ],\n+)\n+\npacketimpact_testbench(\nname = \"ipv4_id_uniqueness\",\nsrcs = [\"ipv4_id_uniqueness_test.go\"],\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "test/packetimpact/tests/tcp_acceptable_ack_syn_rcvd_test.go",
"diff": "+// Copyright 2022 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+package tcp_acceptable_ack_syn_rcvd_test\n+\n+import (\n+ \"flag\"\n+ \"fmt\"\n+ \"testing\"\n+ \"time\"\n+\n+ \"golang.org/x/sys/unix\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/header\"\n+ \"gvisor.dev/gvisor/test/packetimpact/testbench\"\n+)\n+\n+func init() {\n+ testbench.Initialize(flag.CommandLine)\n+}\n+\n+func TestAcceptableAckInSynRcvd(t *testing.T) {\n+ for _, tt := range []struct {\n+ offset uint32\n+ expectRst bool\n+ }{\n+ {offset: 0, expectRst: true},\n+ // The ACK holds the next expected SEQ so valid segments must hold an ACK\n+ // that is 1 larger than the last SEQ value.\n+ {offset: 1, expectRst: false},\n+ {offset: 2, expectRst: true},\n+ } {\n+ t.Run(fmt.Sprintf(\"offset=%d, expectRst=%t\", tt.offset, tt.expectRst), func(t *testing.T) {\n+ dut := testbench.NewDUT(t)\n+ listenFd, listenerPort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)\n+ defer dut.Close(t, listenFd)\n+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &listenerPort}, testbench.TCP{SrcPort: &listenerPort})\n+ defer conn.Close(t)\n+\n+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)})\n+\n+ synAck, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn | header.TCPFlagAck)}, time.Second)\n+ if err != nil {\n+ t.Fatalf(\"didn't get synack during handshake: %s\", err)\n+ }\n+\n+ // Calculate the ACK number.\n+ ackNum := *synAck.SeqNum + tt.offset\n+ conn.Send(t, testbench.TCP{AckNum: &ackNum, Flags: testbench.TCPFlags(header.TCPFlagAck)})\n+\n+ if tt.expectRst {\n+ if _, err := conn.Expect(t, testbench.TCP{SeqNum: &ackNum, Flags: testbench.TCPFlags(header.TCPFlagRst)}, time.Second); err != nil {\n+ t.Fatalf(\"failed to receive rst for an unacceptable ack: %s\", err)\n+ }\n+ } else {\n+ acceptFd, _ := dut.Accept(t, listenFd)\n+ dut.Close(t, acceptFd)\n+ }\n+ })\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add a PacketImpact test for Ack behavior in SYN-RCVD
Updates #7199.
PiperOrigin-RevId: 429150157 |
259,885 | 16.02.2022 17:21:14 | 28,800 | 44644d9c19499b9e32bedbf5ddca223e8f18002f | Save/restore floating point state in amd64 signal frames. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/arch/arch.go",
"new_path": "pkg/sentry/arch/arch.go",
"diff": "@@ -148,7 +148,9 @@ type Context interface {\n// stack is not going to be used).\n//\n// sigset is the signal mask before entering the signal handler.\n- SignalSetup(st *Stack, act *linux.SigAction, info *linux.SignalInfo, alt *linux.SignalStack, sigset linux.SignalSet) error\n+ //\n+ // featureSet is the application CPU feature set.\n+ SignalSetup(st *Stack, act *linux.SigAction, info *linux.SignalInfo, alt *linux.SignalStack, sigset linux.SignalSet, featureSet cpuid.FeatureSet) error\n// SignalRestore restores context after returning from a signal\n// handler.\n@@ -157,8 +159,11 @@ type Context interface {\n//\n// rt is true if SignalRestore is being entered from rt_sigreturn and\n// false if SignalRestore is being entered from sigreturn.\n+ //\n+ // featureSet is the application CPU feature set.\n+ //\n// SignalRestore returns the thread's new signal mask.\n- SignalRestore(st *Stack, rt bool) (linux.SignalSet, linux.SignalStack, error)\n+ SignalRestore(st *Stack, rt bool, featureSet cpuid.FeatureSet) (linux.SignalSet, linux.SignalStack, error)\n// SingleStep returns true if single stepping is enabled.\nSingleStep() bool\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/arch/arch_amd64.go",
"new_path": "pkg/sentry/arch/arch_amd64.go",
"diff": "@@ -106,7 +106,6 @@ const (\n// +stateify savable\ntype context64 struct {\nState\n- sigFPState []fpu.State // fpstate to be restored on sigreturn.\n}\n// Arch implements Context.Arch.\n@@ -114,14 +113,6 @@ func (c *context64) Arch() Arch {\nreturn AMD64\n}\n-func (c *context64) copySigFPState() []fpu.State {\n- var sigfps []fpu.State\n- for _, s := range c.sigFPState {\n- sigfps = append(sigfps, s.Fork())\n- }\n- return sigfps\n-}\n-\nfunc (c *context64) FloatingPointData() *fpu.State {\nreturn &c.State.fpState\n}\n@@ -130,7 +121,6 @@ func (c *context64) FloatingPointData() *fpu.State {\nfunc (c *context64) Fork() Context {\nreturn &context64{\nState: c.State.Fork(),\n- sigFPState: c.copySigFPState(),\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/arch/arch_x86.go",
"new_path": "pkg/sentry/arch/arch_x86.go",
"diff": "@@ -397,7 +397,6 @@ func New(arch Arch) Context {\nState{\nfpState: fpu.NewState(),\n},\n- []fpu.State(nil),\n}\n}\npanic(fmt.Sprintf(\"unknown architecture %v\", arch))\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/arch/fpu/fpu_amd64.go",
"new_path": "pkg/sentry/arch/fpu/fpu_amd64.go",
"diff": "@@ -63,6 +63,15 @@ func (s *State) Fork() State {\nreturn n\n}\n+// Reset resets s to its initial state.\n+func (s *State) Reset() {\n+ f := *s\n+ for i := range f {\n+ f[i] = 0\n+ }\n+ initX86FPState(&f[0], cpuid.HostFeatureSet().UseXsave())\n+}\n+\n// ptraceFPRegsSize is the size in bytes of Linux's user_i387_struct, the type\n// manipulated by PTRACE_GETFPREGS and PTRACE_SETFPREGS on x86. Equivalently,\n// ptraceFPRegsSize is the size in bytes of the x86 FXSAVE area.\n@@ -107,11 +116,6 @@ const (\nmxcsrMaskOffset = 28\n)\n-var (\n- mxcsrMask uint32\n- initMXCSRMask sync.Once\n-)\n-\nconst (\n// minXstateBytes is the minimum size in bytes of an x86 XSAVE area, equal\n// to the size of the XSAVE legacy area (512 bytes) plus the size of the\n@@ -142,28 +146,6 @@ const (\nxsaveHeaderZeroedBytes = 64 - 8\n)\n-// sanitizeMXCSR coerces reserved bits in the MXCSR field of f to 0. (\"FXRSTOR\n-// generates a general-protection fault (#GP) in response to an attempt to set\n-// any of the reserved bits of the MXCSR register.\" - Intel SDM Vol. 1, Section\n-// 10.5.1.2 \"SSE State\")\n-func sanitizeMXCSR(f State) {\n- mxcsr := hostarch.ByteOrder.Uint32(f[mxcsrOffset:])\n- initMXCSRMask.Do(func() {\n- temp := State(alignedBytes(uint(ptraceFPRegsSize), 16))\n- initX86FPState(&temp[0], false /* useXsave */)\n- mxcsrMask = hostarch.ByteOrder.Uint32(temp[mxcsrMaskOffset:])\n- if mxcsrMask == 0 {\n- // \"If the value of the MXCSR_MASK field is 00000000H, then the\n- // MXCSR_MASK value is the default value of 0000FFBFH.\" - Intel SDM\n- // Vol. 1, Section 11.6.6 \"Guidelines for Writing to the MXCSR\n- // Register\"\n- mxcsrMask = 0xffbf\n- }\n- })\n- mxcsr &= mxcsrMask\n- hostarch.ByteOrder.PutUint32(f[mxcsrOffset:], mxcsr)\n-}\n-\n// PtraceGetXstateRegs implements ptrace(PTRACE_GETREGS, NT_X86_XSTATE) by\n// writing the floating point registers from this state to dst and returning the\n// number of bytes written, which must be less than or equal to maxlen.\n@@ -206,8 +188,19 @@ func (s *State) PtraceSetXstateRegs(src io.Reader, maxlen int, featureSet cpuid.\nif _, err := io.ReadFull(src, f); err != nil {\nreturn 0, err\n}\n+ n := copy(*s, f)\n+ s.SanitizeUser(featureSet)\n+ return n, nil\n+}\n+\n+// SanitizeUser mutates s to ensure that restoring it is safe.\n+func (s *State) SanitizeUser(featureSet cpuid.FeatureSet) {\n+ f := *s\n+\n// Force reserved bits in MXCSR to 0. This is consistent with Linux.\n- sanitizeMXCSR(State(f))\n+ sanitizeMXCSR(f)\n+\n+ if len(f) >= minXstateBytes {\n// Users can't enable *more* XCR0 bits than what we, and the CPU, support.\nxstateBV := hostarch.ByteOrder.Uint64(f[xstateBVOffset:])\nxstateBV &= featureSet.ValidXCR0Mask()\n@@ -217,7 +210,34 @@ func (s *State) PtraceSetXstateRegs(src io.Reader, maxlen int, featureSet cpuid.\nfor i := range reserved {\nreserved[i] = 0\n}\n- return copy(*s, f), nil\n+ }\n+}\n+\n+var (\n+ mxcsrMask uint32\n+ initMXCSRMask sync.Once\n+)\n+\n+// sanitizeMXCSR coerces reserved bits in the MXCSR field of f to 0. (\"FXRSTOR\n+// generates a general-protection fault (#GP) in response to an attempt to set\n+// any of the reserved bits of the MXCSR register.\" - Intel SDM Vol. 1, Section\n+// 10.5.1.2 \"SSE State\")\n+func sanitizeMXCSR(f State) {\n+ mxcsr := hostarch.ByteOrder.Uint32(f[mxcsrOffset:])\n+ initMXCSRMask.Do(func() {\n+ temp := State(alignedBytes(uint(ptraceFPRegsSize), 16))\n+ initX86FPState(&temp[0], false /* useXsave */)\n+ mxcsrMask = hostarch.ByteOrder.Uint32(temp[mxcsrMaskOffset:])\n+ if mxcsrMask == 0 {\n+ // \"If the value of the MXCSR_MASK field is 00000000H, then the\n+ // MXCSR_MASK value is the default value of 0000FFBFH.\" - Intel SDM\n+ // Vol. 1, Section 11.6.6 \"Guidelines for Writing to the MXCSR\n+ // Register\"\n+ mxcsrMask = 0xffbf\n+ }\n+ })\n+ mxcsr &= mxcsrMask\n+ hostarch.ByteOrder.PutUint32(f[mxcsrOffset:], mxcsr)\n}\n// SetMXCSR sets the MXCSR control/status register in the state.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/arch/signal_amd64.go",
"new_path": "pkg/sentry/arch/signal_amd64.go",
"diff": "@@ -22,10 +22,11 @@ import (\n\"golang.org/x/sys/unix\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n+ \"gvisor.dev/gvisor/pkg/context\"\n+ \"gvisor.dev/gvisor/pkg/cpuid\"\n\"gvisor.dev/gvisor/pkg/hostarch\"\n- \"gvisor.dev/gvisor/pkg/log\"\n\"gvisor.dev/gvisor/pkg/marshal/primitive\"\n- \"gvisor.dev/gvisor/pkg/sentry/arch/fpu\"\n+ \"gvisor.dev/gvisor/pkg/usermem\"\n)\n// SignalContext64 is equivalent to struct sigcontext, the type passed as the\n@@ -59,7 +60,7 @@ type SignalContext64 struct {\nTrapno uint64\nOldmask linux.SignalSet\nCr2 uint64\n- // Pointer to a struct _fpstate. See b/33003106#comment8.\n+ // Pointer to a struct _fpstate.\nFpstate uint64\nReserved [8]uint64\n}\n@@ -82,28 +83,38 @@ type UContext64 struct {\nSigset linux.SignalSet\n}\n-// From Linux 'arch/x86/include/uapi/asm/sigcontext.h' the following is the\n-// size of the magic cookie at the end of the xsave frame.\n+// FPSoftwareFrame is equivalent to struct _fpx_sw_bytes, the data stored by\n+// Linux in bytes 464:511 of the fxsave/xsave frame.\n//\n-// NOTE(b/33003106#comment11): Currently we don't actually populate the fpstate\n-// on the signal stack.\n-const _FP_XSTATE_MAGIC2_SIZE = 4\n-\n-func (c *context64) fpuFrameSize() (size int, useXsave bool) {\n- size = len(c.fpState)\n- if size > 512 {\n- // Make room for the magic cookie at the end of the xsave frame.\n- size += _FP_XSTATE_MAGIC2_SIZE\n- useXsave = true\n- }\n- return size, useXsave\n+// +marshal\n+type FPSoftwareFrame struct {\n+ Magic1 uint32\n+ ExtendedSize uint32\n+ Xfeatures uint64\n+ XstateSize uint32\n+ Padding [7]uint32\n}\n+// From Linux's arch/x86/include/uapi/asm/sigcontext.h.\n+const (\n+ // Value of FPSoftwareFrame.Magic1.\n+ _FP_XSTATE_MAGIC1 = 0x46505853\n+\n+ // Value written to the 4 bytes inserted by Linux after the fxsave/xsave\n+ // area in the signal frame.\n+ _FP_XSTATE_MAGIC2 = 0x46505845\n+ _FP_XSTATE_MAGIC2_SIZE = 4\n+)\n+\n+// From Linux's arch/x86/include/asm/fpu/types.h.\n+const (\n+ // xsave features that are always enabled in signal frame fpstate.\n+ _XFEATURE_MASK_FPSSE = 0x3\n+)\n+\n// SignalSetup implements Context.SignalSetup. (Compare to Linux's\n// arch/x86/kernel/signal.c:__setup_rt_frame().)\n-func (c *context64) SignalSetup(st *Stack, act *linux.SigAction, info *linux.SignalInfo, alt *linux.SignalStack, sigset linux.SignalSet) error {\n- sp := st.Bottom\n-\n+func (c *context64) SignalSetup(st *Stack, act *linux.SigAction, info *linux.SignalInfo, alt *linux.SignalStack, sigset linux.SignalSet, featureSet cpuid.FeatureSet) error {\n// \"The 128-byte area beyond the location pointed to by %rsp is considered\n// to be reserved and shall not be modified by signal or interrupt\n// handlers. ... leaf functions may use this area for their entire stack\n@@ -112,23 +123,22 @@ func (c *context64) SignalSetup(st *Stack, act *linux.SigAction, info *linux.Sig\n//\n// (But this doesn't apply if we're starting at the top of the signal\n// stack, in which case there is no following stack frame.)\n+ sp := st.Bottom\nif !(alt.IsEnabled() && sp == alt.Top()) {\nsp -= 128\n}\n// Allocate space for floating point state on the stack.\n- //\n- // This isn't strictly necessary because we don't actually populate\n- // the fpstate. However we do store the floating point state of the\n- // interrupted thread inside the sentry. Simply accounting for this\n- // space on the user stack naturally caps the amount of memory the\n- // sentry will allocate for this purpose.\n- fpSize, _ := c.fpuFrameSize()\n- sp = (sp - hostarch.Addr(fpSize)) & ^hostarch.Addr(63)\n+ fpSize, fpAlign := featureSet.ExtendedStateSize()\n+ if fpSize < 512 {\n+ // We expect support for at least FXSAVE.\n+ fpSize = 512\n+ }\n+ fpSize += _FP_XSTATE_MAGIC2_SIZE\n+ fpStart := (sp - hostarch.Addr(fpSize)) & ^hostarch.Addr(fpAlign-1)\n// Construct the UContext64 now since we need its size.\nuc := &UContext64{\n- // No _UC_FP_XSTATE: see Fpstate above.\n// No _UC_STRICT_RESTORE_SS: we don't allow SS changes.\nFlags: _UC_SIGCONTEXT_SS,\nStack: *alt,\n@@ -154,9 +164,13 @@ func (c *context64) SignalSetup(st *Stack, act *linux.SigAction, info *linux.Sig\nCs: uint16(c.Regs.Cs),\nSs: uint16(c.Regs.Ss),\nOldmask: sigset,\n+ Fpstate: uint64(fpStart),\n},\nSigset: sigset,\n}\n+ if featureSet.UseXsave() {\n+ uc.Flags |= _UC_FP_XSTATE\n+ }\n// TODO(gvisor.dev/issue/159): Set SignalContext64.Err, Trapno, and Cr2\n// based on the fault that caused the signal. For now, leave Err and\n@@ -171,21 +185,46 @@ func (c *context64) SignalSetup(st *Stack, act *linux.SigAction, info *linux.Sig\nucSize := uc.SizeBytes()\n// st.Arch.Width() is for the restorer address. sizeof(siginfo) == 128.\nframeSize := int(st.Arch.Width()) + ucSize + 128\n- frameBottom := (sp-hostarch.Addr(frameSize)) & ^hostarch.Addr(15) - 8\n- sp = frameBottom + hostarch.Addr(frameSize)\n- st.Bottom = sp\n+ frameStart := (fpStart-hostarch.Addr(frameSize)) & ^hostarch.Addr(15) - 8\n+ frameEnd := frameStart + hostarch.Addr(frameSize)\n// Prior to proceeding, figure out if the frame will exhaust the range\n// for the signal stack. This is not allowed, and should immediately\n// force signal delivery (reverting to the default handler).\n- if act.Flags&linux.SA_ONSTACK != 0 && alt.IsEnabled() && !alt.Contains(frameBottom) {\n+ if act.Flags&linux.SA_ONSTACK != 0 && alt.IsEnabled() && !alt.Contains(frameStart) {\nreturn unix.EFAULT\n}\n+ // Set up floating point state on the stack. Compare Linux's\n+ // arch/x86/kernel/fpu/signal.c:copy_fpstate_to_sigframe().\n+ if _, err := st.IO.CopyOut(context.Background(), fpStart, c.fpState[:464], usermem.IOOpts{}); err != nil {\n+ return err\n+ }\n+ fpsw := FPSoftwareFrame{\n+ Magic1: _FP_XSTATE_MAGIC1,\n+ ExtendedSize: uint32(fpSize),\n+ Xfeatures: _XFEATURE_MASK_FPSSE | featureSet.ValidXCR0Mask(),\n+ XstateSize: uint32(fpSize) - _FP_XSTATE_MAGIC2_SIZE,\n+ }\n+ st.Bottom = fpStart + 512\n+ if _, err := fpsw.CopyOut(st, StackBottomMagic); err != nil {\n+ return err\n+ }\n+ if len(c.fpState) > 512 {\n+ if _, err := st.IO.CopyOut(context.Background(), fpStart+512, c.fpState[512:], usermem.IOOpts{}); err != nil {\n+ return err\n+ }\n+ }\n+ st.Bottom = fpStart + hostarch.Addr(fpSize)\n+ if _, err := primitive.CopyUint32Out(st, StackBottomMagic, _FP_XSTATE_MAGIC2); err != nil {\n+ return err\n+ }\n+\n// Adjust the code.\ninfo.FixSignalCodeForUser()\n// Set up the stack frame.\n+ st.Bottom = frameEnd\nif _, err := info.CopyOut(st, StackBottomMagic); err != nil {\nreturn err\n}\n@@ -212,23 +251,21 @@ func (c *context64) SignalSetup(st *Stack, act *linux.SigAction, info *linux.Sig\nc.Regs.Rsi = uint64(infoAddr)\nc.Regs.Rdx = uint64(ucAddr)\nc.Regs.Rax = 0\n+ c.Regs.Eflags &^= eflagsDF | eflagsRF | eflagsTF\nc.Regs.Ds = userDS\nc.Regs.Es = userDS\nc.Regs.Cs = userCS\nc.Regs.Ss = userDS\n- // Save the thread's floating point state.\n- c.sigFPState = append(c.sigFPState, c.fpState)\n-\n- // Signal handler gets a clean floating point state.\n- c.fpState = fpu.NewState()\n+ // Clear floating point registers.\n+ c.fpState.Reset()\nreturn nil\n}\n// SignalRestore implements Context.SignalRestore. (Compare to Linux's\n// arch/x86/kernel/signal.c:sys_rt_sigreturn().)\n-func (c *context64) SignalRestore(st *Stack, rt bool) (linux.SignalSet, linux.SignalStack, error) {\n+func (c *context64) SignalRestore(st *Stack, rt bool, featureSet cpuid.FeatureSet) (linux.SignalSet, linux.SignalStack, error) {\n// Copy out the stack frame.\nvar uc UContext64\nif _, err := uc.CopyIn(st, StackBottomMagic); err != nil {\n@@ -262,20 +299,18 @@ func (c *context64) SignalRestore(st *Stack, rt bool) (linux.SignalSet, linux.Si\n// N.B. _UC_STRICT_RESTORE_SS not supported.\nc.Regs.Orig_rax = math.MaxUint64\n- // Restore floating point state.\n- l := len(c.sigFPState)\n- if l > 0 {\n- c.fpState = c.sigFPState[l-1]\n- // NOTE(cl/133042258): State save requires that any slice\n- // elements from '[len:cap]' to be zero value.\n- c.sigFPState[l-1] = nil\n- c.sigFPState = c.sigFPState[0 : l-1]\n+ // Restore floating point state. Compare Linux's\n+ // arch/x86/kernel/fpu/signal.c:fpu__restore_sig().\n+ if uc.MContext.Fpstate == 0 {\n+ c.fpState.Reset()\n} else {\n- // This might happen if sigreturn(2) calls are unbalanced with\n- // respect to signal handler entries. This is not expected so\n- // don't bother to do anything fancy with the floating point\n- // state.\n- log.Infof(\"sigreturn unable to restore application fpstate\")\n+ fpSize, _ := featureSet.ExtendedStateSize()\n+ f := make([]byte, fpSize)\n+ if _, err := st.IO.CopyIn(context.Background(), hostarch.Addr(uc.MContext.Fpstate), f, usermem.IOOpts{}); err != nil {\n+ return 0, linux.SignalStack{}, err\n+ }\n+ copy(c.fpState, f)\n+ c.fpState.SanitizeUser(featureSet)\n}\nreturn uc.Sigset, uc.Stack, nil\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/arch/signal_arm64.go",
"new_path": "pkg/sentry/arch/signal_arm64.go",
"diff": "@@ -20,6 +20,7 @@ package arch\nimport (\n\"golang.org/x/sys/unix\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n+ \"gvisor.dev/gvisor/pkg/cpuid\"\n\"gvisor.dev/gvisor/pkg/hostarch\"\n\"gvisor.dev/gvisor/pkg/log\"\n\"gvisor.dev/gvisor/pkg/sentry/arch/fpu\"\n@@ -73,7 +74,7 @@ type UContext64 struct {\n}\n// SignalSetup implements Context.SignalSetup.\n-func (c *context64) SignalSetup(st *Stack, act *linux.SigAction, info *linux.SignalInfo, alt *linux.SignalStack, sigset linux.SignalSet) error {\n+func (c *context64) SignalSetup(st *Stack, act *linux.SigAction, info *linux.SignalInfo, alt *linux.SignalStack, sigset linux.SignalSet, featureSet cpuid.FeatureSet) error {\nsp := st.Bottom\n// Construct the UContext64 now since we need its size.\n@@ -138,7 +139,7 @@ func (c *context64) SignalSetup(st *Stack, act *linux.SigAction, info *linux.Sig\n}\n// SignalRestore implements Context.SignalRestore.\n-func (c *context64) SignalRestore(st *Stack, rt bool) (linux.SignalSet, linux.SignalStack, error) {\n+func (c *context64) SignalRestore(st *Stack, rt bool, featureSet cpuid.FeatureSet) (linux.SignalSet, linux.SignalStack, error) {\n// Copy out the stack frame.\nvar uc UContext64\nif _, err := uc.CopyIn(st, StackBottomMagic); err != nil {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_signals.go",
"new_path": "pkg/sentry/kernel/task_signals.go",
"diff": "@@ -282,7 +282,7 @@ func (t *Task) deliverSignalToHandler(info *linux.SignalInfo, act linux.SigActio\nact.Restorer = mm.VDSOSigReturn()\n}\n- if err := t.Arch().SignalSetup(st, &act, info, &alt, mask); err != nil {\n+ if err := t.Arch().SignalSetup(st, &act, info, &alt, mask, t.k.featureSet); err != nil {\nreturn err\n}\nt.p.FullStateChanged()\n@@ -304,7 +304,7 @@ var ctrlResume = &SyscallControl{ignoreReturn: true}\n// rt is true).\nfunc (t *Task) SignalReturn(rt bool) (*SyscallControl, error) {\nst := t.Stack()\n- sigset, alt, err := t.Arch().SignalRestore(st, rt)\n+ sigset, alt, err := t.Arch().SignalRestore(st, rt, t.k.featureSet)\nif err != nil {\nreturn nil, err\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/BUILD",
"new_path": "test/syscalls/BUILD",
"diff": "@@ -193,6 +193,10 @@ syscall_test(\ntest = \"//test/syscalls/linux:fpsig_fork_test\",\n)\n+syscall_test(\n+ test = \"//test/syscalls/linux:fpsig_mut_test\",\n+)\n+\nsyscall_test(\ntest = \"//test/syscalls/linux:fpsig_nested_test\",\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/BUILD",
"new_path": "test/syscalls/linux/BUILD",
"diff": "@@ -888,6 +888,22 @@ cc_binary(\n],\n)\n+cc_binary(\n+ name = \"fpsig_mut_test\",\n+ testonly = 1,\n+ srcs = select_arch(\n+ amd64 = [\"fpsig_mut_amd64.cc\"],\n+ arm64 = [],\n+ ),\n+ linkstatic = 1,\n+ deps = [\n+ gtest,\n+ \"//test/util:test_main\",\n+ \"//test/util:test_util\",\n+ \"//test/util:thread_util\",\n+ ],\n+)\n+\ncc_binary(\nname = \"fpsig_nested_test\",\ntestonly = 1,\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "test/syscalls/linux/fpsig_mut_amd64.cc",
"diff": "+// Copyright 2022 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// This program verifies that application floating point state is visible in\n+// signal frames, and that changes to said state is visible after the signal\n+// handler returns.\n+#include <sys/time.h>\n+#include <sys/ucontext.h>\n+\n+#include \"gtest/gtest.h\"\n+#include \"test/util/test_util.h\"\n+#include \"test/util/thread_util.h\"\n+\n+namespace gvisor {\n+namespace testing {\n+\n+namespace {\n+\n+#define GET_XMM(__var, __xmm) \\\n+ asm volatile(\"movq %%\" #__xmm \", %0\" : \"=r\"(__var))\n+#define SET_XMM(__var, __xmm) asm volatile(\"movq %0, %%\" #__xmm : : \"r\"(__var))\n+\n+int pid;\n+int tid;\n+\n+volatile uint64_t handlerxmm = ~0UL;\n+volatile uint64_t framexmm = ~0UL;\n+\n+constexpr uint64_t kOldFPRegValue = 0xdeadbeeffacefeed;\n+constexpr uint64_t kNewFPRegValue = 0xfacefeedbaad1dea;\n+\n+void sigusr1(int s, siginfo_t* siginfo, void* _uc) {\n+ uint64_t val = SIGUSR1;\n+\n+ // Record the value of %xmm0 on entry and then clobber it.\n+ GET_XMM(handlerxmm, xmm0);\n+ SET_XMM(val, xmm0);\n+\n+ // Record the value of %xmm0 stored in _uc and then replace it.\n+ ucontext_t* uc = reinterpret_cast<ucontext_t*>(_uc);\n+ auto* uc_xmm0 = &uc->uc_mcontext.fpregs->_xmm[0];\n+ framexmm = (static_cast<uint64_t>(uc_xmm0->element[1]) << 32) |\n+ static_cast<uint64_t>(uc_xmm0->element[0]);\n+ uc_xmm0->element[1] = static_cast<uint32_t>(kNewFPRegValue >> 32);\n+ uc_xmm0->element[0] = static_cast<uint32_t>(kNewFPRegValue);\n+}\n+\n+TEST(FPSigTest, StateInFrame) {\n+ pid = getpid();\n+ tid = gettid();\n+\n+ struct sigaction sa = {};\n+ sigemptyset(&sa.sa_mask);\n+ sa.sa_flags = SA_SIGINFO;\n+ sa.sa_sigaction = sigusr1;\n+ ASSERT_THAT(sigaction(SIGUSR1, &sa, nullptr), SyscallSucceeds());\n+\n+ // The amd64 ABI specifies that the XMM register set is caller-saved. This\n+ // implies that if there is any function call between SET_XMM and GET_XMM the\n+ // compiler might save/restore xmm0 implicitly. This defeats the entire\n+ // purpose of the test which is to verify that fpstate is restored by\n+ // sigreturn(2).\n+ //\n+ // This is the reason why 'tgkill(getpid(), gettid(), SIGUSR1)' is implemented\n+ // in inline assembly below.\n+ //\n+ // If the OS is broken and registers are clobbered by the signal, using tgkill\n+ // to signal the current thread ensures that this is the clobbered thread.\n+ SET_XMM(kOldFPRegValue, xmm0);\n+\n+ asm volatile(\n+ \"movl %[killnr], %%eax;\"\n+ \"movl %[pid], %%edi;\"\n+ \"movl %[tid], %%esi;\"\n+ \"movl %[sig], %%edx;\"\n+ \"syscall;\"\n+ :\n+ : [killnr] \"i\"(__NR_tgkill), [pid] \"rm\"(pid), [tid] \"rm\"(tid),\n+ [sig] \"i\"(SIGUSR1)\n+ : \"rax\", \"rdi\", \"rsi\", \"rdx\",\n+ // Clobbered by syscall.\n+ \"rcx\", \"r11\");\n+\n+ uint64_t got;\n+ GET_XMM(got, xmm0);\n+\n+ //\n+ // The checks below verifies the following:\n+ // - signal handlers must called with a clean fpu state.\n+ // - sigreturn(2) must restore fpstate of the interrupted context.\n+ //\n+ EXPECT_EQ(handlerxmm, 0);\n+ EXPECT_EQ(framexmm, kOldFPRegValue);\n+ EXPECT_EQ(got, kNewFPRegValue);\n+}\n+\n+} // namespace\n+\n+} // namespace testing\n+} // namespace gvisor\n"
}
] | Go | Apache License 2.0 | google/gvisor | Save/restore floating point state in amd64 signal frames.
PiperOrigin-RevId: 429178859 |
259,992 | 16.02.2022 17:45:24 | 28,800 | 0b86ae090e00b788cec15a6819c8c925cb67102a | Add tests for containerd v1.6.0-rc.4 | [
{
"change_type": "MODIFY",
"old_path": ".buildkite/pipeline.yaml",
"new_path": ".buildkite/pipeline.yaml",
"diff": "@@ -175,6 +175,14 @@ steps:\ncommand: make containerd-test-1.5.4\nagents:\nqueue: \"cgroupv2\"\n+ - <<: *common\n+ label: \":docker: Containerd 1.6.0-rc.4 tests\"\n+ command: make containerd-test-1.6.0-rc.4\n+ - <<: *common\n+ label: \":docker: Containerd 1.6.0-rc.4 tests (cgroupv2)\"\n+ command: make containerd-test-1.6.0-rc.4\n+ agents:\n+ queue: \"cgroupv2\"\n# Check the website builds.\n- <<: *common\n"
},
{
"change_type": "MODIFY",
"old_path": "Makefile",
"new_path": "Makefile",
"diff": "@@ -337,6 +337,7 @@ containerd-tests: containerd-test-1.3.9\nendif\ncontainerd-tests: containerd-test-1.4.3\ncontainerd-tests: containerd-test-1.5.4\n+containerd-tests: containerd-test-1.6.0-rc.4\n##\n## Benchmarks.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add tests for containerd v1.6.0-rc.4
PiperOrigin-RevId: 429182915 |
259,992 | 16.02.2022 17:49:02 | 28,800 | 81ea55d8fc96150f8a79e8d54ade92f579fbe385 | Misc changes
Make a few nit changes to cgroups
Better OOM watcher debug logging | [
{
"change_type": "MODIFY",
"old_path": "pkg/shim/oom_v2.go",
"new_path": "pkg/shim/oom_v2.go",
"diff": "@@ -65,15 +65,17 @@ func (w *watcherV2) run(ctx context.Context) {\nreturn\ncase i := <-w.itemCh:\nif i.err != nil {\n+ logrus.WithError(i.err).Debugf(\"Error listening for OOM, id: %q\", i.id)\ndelete(lastOOMMap, i.id)\ncontinue\n}\n+ logrus.Debugf(\"Received OOM event, id: %q, event: %+v\", i.id, i.ev)\nlastOOM := lastOOMMap[i.id]\nif i.ev.OOM > lastOOM {\nif err := w.publisher.Publish(ctx, runtime.TaskOOMEventTopic, &TaskOOM{\nContainerID: i.id,\n}); err != nil {\n- logrus.WithError(err).Error(\"publish OOM event\")\n+ logrus.WithError(err).Error(\"Publish OOM event\")\n}\n}\nif i.ev.OOM > 0 {\n@@ -89,8 +91,9 @@ func (w *watcherV2) add(id string, cgx interface{}) error {\nif !ok {\nreturn fmt.Errorf(\"expected *cgroupsv2.Manager, got: %T\", cgx)\n}\n- // NOTE: containerd/cgroups/v2 does not support closing eventCh routine currently.\n- // The routine shuts down when an error happens, mostly when the cgroup is deleted.\n+ // NOTE: containerd/cgroups/v2 does not support closing eventCh routine\n+ // currently. The routine shuts down when an error happens, mostly when the\n+ // cgroup is deleted.\neventCh, errCh := cg.EventChan()\ngo func() {\nfor {\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cgroup/cgroup_v2.go",
"new_path": "runsc/cgroup/cgroup_v2.go",
"diff": "@@ -74,29 +74,16 @@ type cgroupV2 struct {\n}\nfunc newCgroupV2(mountpoint string, group string) (*cgroupV2, error) {\n- cg := &cgroupV2{\n- Mountpoint: mountpoint,\n- Path: group,\n- }\n- err := cg.setupControllers()\n- return cg, err\n-}\n-\n-// setupControllers setup all supported controllers based on cgroup.controllers\n-// in the unified cgroup mount point\n-func (c *cgroupV2) setupControllers() error {\n- if c.Controllers != nil {\n- return nil\n- }\n-\n- data, err := ioutil.ReadFile(filepath.Join(c.Mountpoint, \"cgroup.controllers\"))\n+ data, err := ioutil.ReadFile(filepath.Join(mountpoint, \"cgroup.controllers\"))\nif err != nil {\n- return err\n+ return nil, err\n}\n- fields := strings.Fields(string(data))\n- c.Controllers = fields\n- return nil\n+ return &cgroupV2{\n+ Mountpoint: mountpoint,\n+ Path: group,\n+ Controllers: strings.Fields(string(data)),\n+ }, nil\n}\n// Install creates and configures cgroups.\n@@ -179,7 +166,7 @@ func (c *cgroupV2) Uninstall() error {\ndefer cancel()\nb := backoff.WithContext(backoff.NewConstantBackOff(100*time.Millisecond), ctx)\n- // delete last entry in owned first\n+ // Deletion must occur reverse order, because they may contain ancestors.\nfor i := len(c.Own) - 1; i >= 0; i-- {\ncurrent := c.Own[i]\nlog.Debugf(\"Removing cgroup for path=%q\", current)\n@@ -207,7 +194,8 @@ func (c *cgroupV2) Join() (func(), error) {\nif err != nil {\nreturn nil, err\n}\n- // since this is unified, get the first path of current process's cgroup is enough\n+ // Since this is unified, get the first path of current process's cgroup is\n+ // enough.\nundoPath := filepath.Join(c.Mountpoint, paths[cgroup2Key])\ncu := cleanup.Make(func() {\n@@ -390,8 +378,8 @@ func (*memory2) set(spec *specs.LinuxResources, path string) error {\nif spec.Memory.Swap != nil {\n// in cgroup v2, we set memory and swap separately, but the spec specifies\n- // Swap field as memory+swap, so we need memory limit here to be set in order\n- // to get the correct swap value\n+ // Swap field as memory+swap, so we need memory limit here to be set in\n+ // order to get the correct swap value.\nif spec.Memory.Limit == nil {\nreturn errors.New(\"cgroup: Memory.Swap is set without Memory.Limit\")\n}\n@@ -571,8 +559,9 @@ func convertCPUSharesToCgroupV2Value(cpuShares uint64) uint64 {\n}\n// convertMemorySwapToCgroupV2Value converts MemorySwap value from OCI spec\n-// for use by cgroup v2 drivers. A conversion is needed since Resources.MemorySwap\n-// is defined as memory+swap combined, while in cgroup v2 swap is a separate value.\n+// for use by cgroup v2 drivers. A conversion is needed since\n+// Resources.MemorySwap is defined as memory+swap combined, while in cgroup v2\n+// swap is a separate value.\nfunc convertMemorySwapToCgroupV2Value(memorySwap, memory int64) (int64, error) {\n// for compatibility with cgroup1 controller, set swap to unlimited in\n// case the memory is set to unlimited, and swap is not explicitly set,\n@@ -640,7 +629,8 @@ func bfqDeviceWeightSupported(bfq *os.File) bool {\nif _, err := bfq.Read(buf); err != nil {\nreturn false\n}\n- // If only a single number (default weight) if read back, we have older kernel.\n+ // If only a single number (default weight) if read back, we have older\n+ // kernel.\n_, err := strconv.ParseInt(string(bytes.TrimSpace(buf)), 10, 64)\nreturn err != nil\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Misc changes
Make a few nit changes to cgroups
Better OOM watcher debug logging
PiperOrigin-RevId: 429183470 |
259,992 | 16.02.2022 17:49:15 | 28,800 | 9b93c12a1395a4c291a51f6b9cf8d909852d8753 | Implement CreateRuntime hook
Closes | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -329,7 +329,28 @@ func New(conf *config.Config, args Args) (*Container, error) {\nreturn nil, err\n}\n- // Write the PID file. Containerd considers the create complete after\n+ // \"If any prestart hook fails, the runtime MUST generate an error,\n+ // stop and destroy the container\" -OCI spec.\n+ if c.Spec.Hooks != nil {\n+ // Even though the hook name is Prestart, runc used to call it from create.\n+ // For this reason, it's now deprecated, but the spec requires it to be\n+ // called *before* CreateRuntime and CreateRuntime must be called in create.\n+ //\n+ // \"For runtimes that implement the deprecated prestart hooks as\n+ // createRuntime hooks, createRuntime hooks MUST be called after the\n+ // prestart hooks.\"\n+ if err := executeHooks(c.Spec.Hooks.Prestart, c.State()); err != nil {\n+ return nil, err\n+ }\n+ if err := executeHooks(c.Spec.Hooks.CreateRuntime, c.State()); err != nil {\n+ return nil, err\n+ }\n+ if len(c.Spec.Hooks.CreateContainer) > 0 {\n+ log.Warningf(\"CreateContainer hook skipped because running inside container namespace is not supported\")\n+ }\n+ }\n+\n+ // Write the PID file. Containerd considers the call to create complete after\n// this file is created, so it must be the last thing we do.\nif args.PIDFile != \"\" {\nif err := ioutil.WriteFile(args.PIDFile, []byte(strconv.Itoa(c.SandboxPid())), 0644); err != nil {\n@@ -357,10 +378,8 @@ func (c *Container) Start(conf *config.Config) error {\n// \"If any prestart hook fails, the runtime MUST generate an error,\n// stop and destroy the container\" -OCI spec.\n- if c.Spec.Hooks != nil {\n- if err := executeHooks(c.Spec.Hooks.Prestart, c.State()); err != nil {\n- return err\n- }\n+ if c.Spec.Hooks != nil && len(c.Spec.Hooks.StartContainer) > 0 {\n+ log.Warningf(\"StartContainer hook skipped because running inside container namespace is not supported\")\n}\nif isRoot(c.Spec) {\n@@ -442,10 +461,8 @@ func (c *Container) Restore(spec *specs.Spec, conf *config.Config, restoreFile s\n// \"If any prestart hook fails, the runtime MUST generate an error,\n// stop and destroy the container\" -OCI spec.\n- if c.Spec.Hooks != nil {\n- if err := executeHooks(c.Spec.Hooks.Prestart, c.State()); err != nil {\n- return err\n- }\n+ if c.Spec.Hooks != nil && len(c.Spec.Hooks.StartContainer) > 0 {\n+ log.Warningf(\"StartContainer hook skipped because running inside container namespace is not supported\")\n}\nif err := c.Sandbox.Restore(c.ID, spec, conf, restoreFile); err != nil {\n@@ -692,6 +709,7 @@ func (c *Container) State() specs.State {\nStatus: c.Status,\nPid: c.SandboxPid(),\nBundle: c.BundleDir,\n+ Annotations: c.Spec.Annotations,\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Implement CreateRuntime hook
Closes #7149
PiperOrigin-RevId: 429183505 |
259,907 | 16.02.2022 19:25:35 | 28,800 | ab3728508f021757c0ebe2a1285b0c74d220d74c | Add syscall test for rename when an writable FD exists and file perms changed.
The gofer server should not attempt to re-open the writable FD on server on
rename. runsc/fsgofer works as expected. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/BUILD",
"new_path": "test/syscalls/linux/BUILD",
"diff": "@@ -2148,6 +2148,7 @@ cc_binary(\n\"//test/util:fs_util\",\n\"@com_google_absl//absl/strings\",\ngtest,\n+ \"//test/util:save_util\",\n\"//test/util:temp_path\",\n\"//test/util:test_main\",\n\"//test/util:test_util\",\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/rename.cc",
"new_path": "test/syscalls/linux/rename.cc",
"diff": "#include \"test/util/cleanup.h\"\n#include \"test/util/file_descriptor.h\"\n#include \"test/util/fs_util.h\"\n+#include \"test/util/save_util.h\"\n#include \"test/util/temp_path.h\"\n#include \"test/util/test_util.h\"\n@@ -87,6 +88,19 @@ TEST(RenameTest, FileToSameDirectory) {\nEXPECT_THAT(Exists(newpath), IsPosixErrorOkAndHolds(true));\n}\n+TEST(RenameTest, RenameAfterWritableFDAndChmod) {\n+ // Restore will require re-opening the writable FD which will fail.\n+ const DisableSave ds;\n+ const std::string data = \"hello world\\n\";\n+ auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());\n+ auto wfd = ASSERT_NO_ERRNO_AND_VALUE(Open(f.path(), O_WRONLY));\n+ ASSERT_THAT(chmod(f.path().c_str(), 0444), SyscallSucceeds());\n+ std::string const newpath = NewTempAbsPath();\n+ ASSERT_THAT(rename(f.path().c_str(), newpath.c_str()), SyscallSucceeds());\n+ EXPECT_THAT(WriteFd(wfd.get(), data.c_str(), data.size()),\n+ SyscallSucceedsWithValue(data.size()));\n+}\n+\nTEST(RenameTest, DirectoryToSameDirectory) {\nauto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\nstd::string const newpath = NewTempAbsPath();\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add syscall test for rename when an writable FD exists and file perms changed.
The gofer server should not attempt to re-open the writable FD on server on
rename. runsc/fsgofer works as expected.
PiperOrigin-RevId: 429197853 |
259,962 | 17.02.2022 09:14:16 | 28,800 | 2822b56f303a59c43620f650c247b227e241ea00 | Add Leak checking to arp tests.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/arp/BUILD",
"new_path": "pkg/tcpip/network/arp/BUILD",
"diff": "@@ -26,6 +26,8 @@ go_test(\nsrcs = [\"arp_test.go\"],\ndeps = [\n\":arp\",\n+ \"//pkg/refs\",\n+ \"//pkg/refsvfs2\",\n\"//pkg/tcpip\",\n\"//pkg/tcpip/buffer\",\n\"//pkg/tcpip/faketime\",\n@@ -46,6 +48,8 @@ go_test(\nsrcs = [\"stats_test.go\"],\nlibrary = \":arp\",\ndeps = [\n+ \"//pkg/refs\",\n+ \"//pkg/refsvfs2\",\n\"//pkg/tcpip\",\n\"//pkg/tcpip/stack\",\n\"//pkg/tcpip/testutil\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/arp/arp_test.go",
"new_path": "pkg/tcpip/network/arp/arp_test.go",
"diff": "@@ -16,10 +16,13 @@ package arp_test\nimport (\n\"fmt\"\n+ \"os\"\n\"testing\"\n\"github.com/google/go-cmp/cmp\"\n\"github.com/google/go-cmp/cmp/cmpopts\"\n+ \"gvisor.dev/gvisor/pkg/refs\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n\"gvisor.dev/gvisor/pkg/tcpip/faketime\"\n@@ -171,6 +174,8 @@ func makeTestContext(t *testing.T, eventDepth int, packetDepth int) testContext\nfunc (c *testContext) cleanup() {\nc.linkEP.Close()\n+ c.s.Close()\n+ c.s.Wait()\n}\nfunc TestMalformedPacket(t *testing.T) {\n@@ -183,6 +188,7 @@ func TestMalformedPacket(t *testing.T) {\n})\nc.linkEP.InjectInbound(arp.ProtocolNumber, pkt)\n+ pkt.DecRef()\nif got := c.s.Stats().ARP.PacketsReceived.Value(); got != 1 {\nt.Errorf(\"got c.s.Stats().ARP.PacketsReceived.Value() = %d, want = 1\", got)\n@@ -208,6 +214,7 @@ func TestDisabledEndpoint(t *testing.T) {\n})\nc.linkEP.InjectInbound(arp.ProtocolNumber, pkt)\n+ pkt.DecRef()\nif got := c.s.Stats().ARP.PacketsReceived.Value(); got != 1 {\nt.Errorf(\"got c.s.Stats().ARP.PacketsReceived.Value() = %d, want = 1\", got)\n@@ -239,6 +246,7 @@ func TestDirectReply(t *testing.T) {\n})\nc.linkEP.InjectInbound(arp.ProtocolNumber, pkt)\n+ pkt.DecRef()\nif got := c.s.Stats().ARP.PacketsReceived.Value(); got != 1 {\nt.Errorf(\"got c.s.Stats().ARP.PacketsReceived.Value() = %d, want = 1\", got)\n@@ -249,9 +257,6 @@ func TestDirectReply(t *testing.T) {\n}\nfunc TestDirectRequest(t *testing.T) {\n- c := makeTestContext(t, 1, 1)\n- defer c.cleanup()\n-\ntests := []struct {\nname string\nsenderAddr tcpip.Address\n@@ -284,6 +289,9 @@ func TestDirectRequest(t *testing.T) {\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\n+ c := makeTestContext(t, 1, 1)\n+ defer c.cleanup()\n+\npacketsRecv := c.s.Stats().ARP.PacketsReceived.Value()\nrequestsRecv := c.s.Stats().ARP.RequestsReceived.Value()\nrequestsRecvUnknownAddr := c.s.Stats().ARP.RequestsReceivedUnknownTargetAddress.Value()\n@@ -297,9 +305,11 @@ func TestDirectRequest(t *testing.T) {\ncopy(h.HardwareAddressSender(), test.senderLinkAddr)\ncopy(h.ProtocolAddressSender(), test.senderAddr)\ncopy(h.ProtocolAddressTarget(), test.targetAddr)\n- c.linkEP.InjectInbound(arp.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: v.ToVectorisedView(),\n- }))\n+ })\n+ c.linkEP.InjectInbound(arp.ProtocolNumber, pkt)\n+ pkt.DecRef()\nif got, want := c.s.Stats().ARP.PacketsReceived.Value(), packetsRecv+1; got != want {\nt.Errorf(\"got c.s.Stats().ARP.PacketsReceived.Value() = %d, want = %d\", got, want)\n@@ -558,7 +568,12 @@ func TestLinkAddressRequest(t *testing.T) {\ns := stack.New(stack.Options{\nNetworkProtocols: []stack.NetworkProtocolFactory{arp.NewProtocol, ipv4.NewProtocol},\n})\n+ defer func() {\n+ s.Close()\n+ s.Wait()\n+ }()\nlinkEP := channel.New(1, header.IPv4MinimumMTU, stackLinkAddr)\n+ defer linkEP.Close()\nif err := s.CreateNIC(nicID, &testLinkEndpoint{LinkEndpoint: linkEP, writeErr: test.linkErr}); err != nil {\nt.Fatalf(\"s.CreateNIC(%d, _): %s\", nicID, err)\n}\n@@ -645,7 +660,12 @@ func TestDADARPRequestPacket(t *testing.T) {\n}), ipv4.NewProtocol},\nClock: clock,\n})\n+ defer func() {\n+ s.Close()\n+ s.Wait()\n+ }()\ne := channel.New(1, header.IPv4MinimumMTU, stackLinkAddr)\n+ defer e.Close()\nif err := s.CreateNIC(nicID, e); err != nil {\nt.Fatalf(\"s.CreateNIC(%d, _): %s\", nicID, err)\n}\n@@ -686,3 +706,10 @@ func TestDADARPRequestPacket(t *testing.T) {\nt.Errorf(\"got req.ProtocolAddressTarget() = %s, want = %s\", got, remoteAddr)\n}\n}\n+\n+func TestMain(m *testing.M) {\n+ refs.SetLeakMode(refs.LeaksPanic)\n+ code := m.Run()\n+ refsvfs2.DoLeakCheck()\n+ os.Exit(code)\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/arp/stats_test.go",
"new_path": "pkg/tcpip/network/arp/stats_test.go",
"diff": "package arp\nimport (\n+ \"os\"\n\"reflect\"\n\"testing\"\n+ \"gvisor.dev/gvisor/pkg/refs\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/stack\"\n\"gvisor.dev/gvisor/pkg/tcpip/testutil\"\n@@ -52,3 +55,10 @@ func TestMultiCounterStatsInitialization(t *testing.T) {\nt.Error(err)\n}\n}\n+\n+func TestMain(m *testing.M) {\n+ refs.SetLeakMode(refs.LeaksPanic)\n+ code := m.Run()\n+ refsvfs2.DoLeakCheck()\n+ os.Exit(code)\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add Leak checking to arp tests.
Updates #6910
PiperOrigin-RevId: 429325291 |
259,853 | 18.02.2022 01:45:58 | 28,800 | f51097051ac5707065e262dacb6d9f8f05ff55fb | tun: reject packets larger MTU
Reported-by: | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/devices/tundev/tundev.go",
"new_path": "pkg/sentry/devices/tundev/tundev.go",
"diff": "@@ -143,6 +143,13 @@ func (fd *tunFD) Write(ctx context.Context, src usermem.IOSequence, opts vfs.Wri\nif src.NumBytes() == 0 {\nreturn 0, unix.EINVAL\n}\n+ mtu, err := fd.device.MTU()\n+ if err != nil {\n+ return 0, err\n+ }\n+ if int64(mtu) < src.NumBytes() {\n+ return 0, unix.EMSGSIZE\n+ }\ndata := make([]byte, src.NumBytes())\nif _, err := src.CopyIn(ctx, data); err != nil {\nreturn 0, err\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/tun/device.go",
"new_path": "pkg/tcpip/link/tun/device.go",
"diff": "@@ -166,6 +166,20 @@ func attachOrCreateNIC(s *stack.Stack, name, prefix string, linkCaps stack.LinkE\n}\n}\n+// MTU returns the tun enpoint MTU (maximum transmission unit).\n+func (d *Device) MTU() (uint32, error) {\n+ d.mu.RLock()\n+ endpoint := d.endpoint\n+ d.mu.RUnlock()\n+ if endpoint == nil {\n+ return 0, linuxerr.EBADFD\n+ }\n+ if !endpoint.IsAttached() {\n+ return 0, linuxerr.EIO\n+ }\n+ return endpoint.MTU(), nil\n+}\n+\n// Write inject one inbound packet to the network interface.\nfunc (d *Device) Write(data []byte) (int64, error) {\nd.mu.RLock()\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/tuntap.cc",
"new_path": "test/syscalls/linux/tuntap.cc",
"diff": "// limitations under the License.\n#include <arpa/inet.h>\n+#include <asm-generic/errno.h>\n#include <linux/capability.h>\n#include <linux/if_arp.h>\n#include <linux/if_ether.h>\n@@ -414,6 +415,34 @@ TEST_F(TuntapTest, PingKernel) {\n}\n}\n+TEST_F(TuntapTest, LargeWritesFailWithEMSGSIZE) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN)));\n+\n+ const auto& [fd, link] = ASSERT_NO_ERRNO_AND_VALUE(OpenAndAttachTunTap(\n+ kTapName, kTapIPAddr, true /* tap */, false /* no_pi */));\n+\n+ ping_pkt ping_req =\n+ CreatePingPacket(kMacB, kTapPeerIPAddr, kMacA, kTapIPAddr);\n+ std::string arp_rep =\n+ CreateArpPacket(kMacB, kTapPeerIPAddr, kMacA, kTapIPAddr);\n+\n+ constexpr int kBufSize = 4096;\n+ std::vector<char> buf(kBufSize);\n+ struct iovec iov[2] = {\n+ {\n+ .iov_base = &ping_req,\n+ .iov_len = sizeof(ping_req),\n+ },\n+ {\n+ .iov_base = buf.data(),\n+ .iov_len = kBufSize,\n+ },\n+ };\n+\n+ // A packet is large than MTU which is 1500 by default..\n+ EXPECT_THAT(writev(fd.get(), iov, 2), SyscallFailsWithErrno(EMSGSIZE));\n+}\n+\nTEST_F(TuntapTest, SendUdpTriggersArpResolution) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN)));\n"
}
] | Go | Apache License 2.0 | google/gvisor | tun: reject packets larger MTU
Reported-by: syzbot+d82dbadde2cbe70eb6dd@syzkaller.appspotmail.com
PiperOrigin-RevId: 429508995 |
259,985 | 18.02.2022 14:15:07 | 28,800 | 155ac7c193a036a2d1f3fc1bf1846ad01c77f31c | cgroupfs: Push controller state to inodes.
This is necessary for subcontainers since each cgroup needs to carry
independent state. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/cgroupfs/base.go",
"new_path": "pkg/sentry/fsimpl/cgroupfs/base.go",
"diff": "@@ -45,6 +45,11 @@ func (c *controllerCommon) init(ty kernel.CgroupControllerType, fs *filesystem)\nc.fs = fs\n}\n+func (c *controllerCommon) cloneFrom(other *controllerCommon) {\n+ c.ty = other.ty\n+ c.fs = other.fs\n+}\n+\n// Type implements kernel.CgroupController.Type.\nfunc (c *controllerCommon) Type() kernel.CgroupControllerType {\nreturn kernel.CgroupControllerType(c.ty)\n@@ -78,6 +83,11 @@ func (c *controllerCommon) RootCgroup() kernel.Cgroup {\ntype controller interface {\nkernel.CgroupController\n+ // Clone creates a new controller based on the internal state of the current\n+ // controller. This is used to initialize a sub-cgroup based on the state of\n+ // the parent.\n+ Clone() controller\n+\n// AddControlFiles should extend the contents map with inodes representing\n// control files defined by this controller.\nAddControlFiles(ctx context.Context, creds *auth.Credentials, c *cgroupInode, contents map[string]kernfs.Inode)\n@@ -89,6 +99,12 @@ type controller interface {\ntype cgroupInode struct {\ndir\n+ // controllers is the set of controllers for this cgroup. This is used to\n+ // store controller-specific state per cgroup. The set of controllers should\n+ // match the controllers for this hierarchy as tracked by the filesystem\n+ // object. Immutable.\n+ controllers map[kernel.CgroupControllerType]controller\n+\n// ts is the list of tasks in this cgroup. The kernel is responsible for\n// removing tasks from this list before they're destroyed, so any tasks on\n// this list are always valid.\n@@ -99,10 +115,11 @@ type cgroupInode struct {\nvar _ kernel.CgroupImpl = (*cgroupInode)(nil)\n-func (fs *filesystem) newCgroupInode(ctx context.Context, creds *auth.Credentials) kernfs.Inode {\n+func (fs *filesystem) newCgroupInode(ctx context.Context, creds *auth.Credentials, parent *cgroupInode) kernfs.Inode {\nc := &cgroupInode{\ndir: dir{fs: fs},\nts: make(map[*kernel.Task]struct{}),\n+ controllers: make(map[kernel.CgroupControllerType]controller),\n}\nc.dir.cgi = c\n@@ -110,8 +127,19 @@ func (fs *filesystem) newCgroupInode(ctx context.Context, creds *auth.Credential\ncontents[\"cgroup.procs\"] = fs.newControllerFile(ctx, creds, &cgroupProcsData{c})\ncontents[\"tasks\"] = fs.newControllerFile(ctx, creds, &tasksData{c})\n+ if parent != nil {\n+ for ty, ctl := range parent.controllers {\n+ new := ctl.Clone()\n+ c.controllers[ty] = new\n+ new.AddControlFiles(ctx, creds, c, contents)\n+ }\n+ } else {\nfor _, ctl := range fs.controllers {\n- ctl.AddControlFiles(ctx, creds, c, contents)\n+ new := ctl.Clone()\n+ // Uniqueness of controllers enforced by the filesystem on creation.\n+ c.controllers[ctl.Type()] = new\n+ new.AddControlFiles(ctx, creds, c, contents)\n+ }\n}\nc.dir.InodeAttrs.Init(ctx, creds, linux.UNNAMED_MAJOR, fs.devMinor, fs.NextIno(), linux.ModeDirectory|linux.FileMode(0555))\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/cgroupfs/cgroupfs.go",
"new_path": "pkg/sentry/fsimpl/cgroupfs/cgroupfs.go",
"diff": "@@ -294,7 +294,7 @@ func (fsType FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt\nfs.kcontrollers = append(fs.kcontrollers, c)\n}\n- root := fs.newCgroupInode(ctx, creds)\n+ root := fs.newCgroupInode(ctx, creds, nil)\nvar rootD kernfs.Dentry\nrootD.InitRoot(&fs.Filesystem, root)\nfs.root = &rootD\n@@ -451,7 +451,7 @@ func (d *dir) NewDir(ctx context.Context, name string, opts vfs.MkdirOptions) (k\n}\nreturn d.OrderedChildren.Inserter(name, func() kernfs.Inode {\nd.IncLinks(1)\n- return d.fs.newCgroupInode(ctx, auth.CredentialsFromContext(ctx))\n+ return d.fs.newCgroupInode(ctx, auth.CredentialsFromContext(ctx), d.cgi)\n})\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/cgroupfs/cpu.go",
"new_path": "pkg/sentry/fsimpl/cgroupfs/cpu.go",
"diff": "@@ -62,6 +62,17 @@ func newCPUController(fs *filesystem, defaults map[string]int64) *cpuController\nreturn c\n}\n+// Clone implements controller.Clone.\n+func (c *cpuController) Clone() controller {\n+ new := &cpuController{\n+ cfsPeriod: c.cfsPeriod,\n+ cfsQuota: c.cfsQuota,\n+ shares: c.shares,\n+ }\n+ new.controllerCommon.cloneFrom(&c.controllerCommon)\n+ return new\n+}\n+\n// AddControlFiles implements controller.AddControlFiles.\nfunc (c *cpuController) AddControlFiles(ctx context.Context, creds *auth.Credentials, _ *cgroupInode, contents map[string]kernfs.Inode) {\ncontents[\"cpu.cfs_period_us\"] = c.fs.newStaticControllerFile(ctx, creds, linux.FileMode(0644), fmt.Sprintf(\"%d\\n\", c.cfsPeriod))\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/cgroupfs/cpuacct.go",
"new_path": "pkg/sentry/fsimpl/cgroupfs/cpuacct.go",
"diff": "@@ -38,6 +38,13 @@ func newCPUAcctController(fs *filesystem) *cpuacctController {\nreturn c\n}\n+// Clone implements controller.Clone.\n+func (c *cpuacctController) Clone() controller {\n+ new := &cpuacctController{}\n+ new.controllerCommon.cloneFrom(&new.controllerCommon)\n+ return c\n+}\n+\n// AddControlFiles implements controller.AddControlFiles.\nfunc (c *cpuacctController) AddControlFiles(ctx context.Context, creds *auth.Credentials, cg *cgroupInode, contents map[string]kernfs.Inode) {\ncpuacctCG := &cpuacctCgroup{cg}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/cgroupfs/cpuset.go",
"new_path": "pkg/sentry/fsimpl/cgroupfs/cpuset.go",
"diff": "@@ -61,6 +61,20 @@ func newCPUSetController(k *kernel.Kernel, fs *filesystem) *cpusetController {\nreturn c\n}\n+// Clone implements controller.Clone.\n+func (c *cpusetController) Clone() controller {\n+ cpus := c.cpus.Clone()\n+ mems := c.mems.Clone()\n+ new := &cpusetController{\n+ maxCpus: c.maxCpus,\n+ maxMems: c.maxMems,\n+ cpus: &cpus,\n+ mems: &mems,\n+ }\n+ new.controllerCommon.cloneFrom(&c.controllerCommon)\n+ return new\n+}\n+\n// AddControlFiles implements controller.AddControlFiles.\nfunc (c *cpusetController) AddControlFiles(ctx context.Context, creds *auth.Credentials, _ *cgroupInode, contents map[string]kernfs.Inode) {\ncontents[\"cpuset.cpus\"] = c.fs.newControllerWritableFile(ctx, creds, &cpusData{c: c})\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/cgroupfs/job.go",
"new_path": "pkg/sentry/fsimpl/cgroupfs/job.go",
"diff": "@@ -38,6 +38,15 @@ func newJobController(fs *filesystem) *jobController {\nreturn c\n}\n+// Clone implements controller.Clone.\n+func (c *jobController) Clone() controller {\n+ new := &jobController{\n+ id: c.id,\n+ }\n+ new.controllerCommon.cloneFrom(&c.controllerCommon)\n+ return new\n+}\n+\nfunc (c *jobController) AddControlFiles(ctx context.Context, creds *auth.Credentials, _ *cgroupInode, contents map[string]kernfs.Inode) {\ncontents[\"job.id\"] = c.fs.newControllerWritableFile(ctx, creds, &jobIDData{c: c})\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/cgroupfs/memory.go",
"new_path": "pkg/sentry/fsimpl/cgroupfs/memory.go",
"diff": "@@ -63,6 +63,17 @@ func newMemoryController(fs *filesystem, defaults map[string]int64) *memoryContr\nreturn c\n}\n+// Clone implements controller.Clone.\n+func (c *memoryController) Clone() controller {\n+ new := &memoryController{\n+ limitBytes: c.limitBytes,\n+ softLimitBytes: c.softLimitBytes,\n+ moveChargeAtImmigrate: c.moveChargeAtImmigrate,\n+ }\n+ new.controllerCommon.cloneFrom(&c.controllerCommon)\n+ return new\n+}\n+\n// AddControlFiles implements controller.AddControlFiles.\nfunc (c *memoryController) AddControlFiles(ctx context.Context, creds *auth.Credentials, _ *cgroupInode, contents map[string]kernfs.Inode) {\ncontents[\"memory.usage_in_bytes\"] = c.fs.newControllerFile(ctx, creds, &memoryUsageInBytesData{})\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/cgroup.cc",
"new_path": "test/syscalls/linux/cgroup.cc",
"diff": "@@ -298,6 +298,34 @@ TEST(Cgroup, SubcontainerInitiallyEmpty) {\nEXPECT_TRUE(procs.empty());\n}\n+TEST(Cgroup, SubcontainersHaveIndependentState) {\n+ SKIP_IF(!CgroupsAvailable());\n+ Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));\n+ // Use the job cgroup as a simple cgroup with state we can modify.\n+ Cgroup c = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs(\"job\"));\n+\n+ // Initially job.id should be the default value of 0.\n+ EXPECT_THAT(c.ReadIntegerControlFile(\"job.id\"), IsPosixErrorOkAndHolds(0));\n+\n+ // Set id so it is no longer the default.\n+ ASSERT_NO_ERRNO(c.WriteIntegerControlFile(\"job.id\", 1234));\n+\n+ // Create a child. The child should inherit the value from the parent, and not\n+ // the default value of 0.\n+ Cgroup child = ASSERT_NO_ERRNO_AND_VALUE(c.CreateChild(\"child1\"));\n+ EXPECT_THAT(child.ReadIntegerControlFile(\"job.id\"),\n+ IsPosixErrorOkAndHolds(1234));\n+\n+ // Setting the parent doesn't change the child.\n+ ASSERT_NO_ERRNO(c.WriteIntegerControlFile(\"job.id\", 5678));\n+ EXPECT_THAT(child.ReadIntegerControlFile(\"job.id\"),\n+ IsPosixErrorOkAndHolds(1234));\n+\n+ // Likewise, setting the child doesn't change the parent.\n+ ASSERT_NO_ERRNO(child.WriteIntegerControlFile(\"job.id\", 9012));\n+ EXPECT_THAT(c.ReadIntegerControlFile(\"job.id\"), IsPosixErrorOkAndHolds(5678));\n+}\n+\nTEST(MemoryCgroup, MemoryUsageInBytes) {\nSKIP_IF(!CgroupsAvailable());\n"
}
] | Go | Apache License 2.0 | google/gvisor | cgroupfs: Push controller state to inodes.
This is necessary for subcontainers since each cgroup needs to carry
independent state.
PiperOrigin-RevId: 429646074 |
260,004 | 18.02.2022 16:29:17 | 28,800 | ec0db35a80230f4347b8de95e5d342ce3cbdf090 | Test all ones UDP checksum is valid
...after observing the all ones checksum in an outgoing packet. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/udp/udp_test.go",
"new_path": "pkg/tcpip/transport/udp/udp_test.go",
"diff": "@@ -2049,7 +2049,15 @@ func TestChecksumWithZeroValueOnesComplementSum(t *testing.T) {\nt.Fatal(\"Packet wasn't written out\")\n}\n+ v := stack.PayloadSince(pkt.NetworkHeader())\nchecker.IPv6(t, stack.PayloadSince(pkt.NetworkHeader()), checker.UDP(checker.TransportChecksum(math.MaxUint16)))\n+\n+ // Make sure the all ones checksum is valid.\n+ hdr := header.IPv6(v)\n+ udp := header.UDP(hdr.Payload())\n+ if src, dst, payloadXsum := hdr.SourceAddress(), hdr.DestinationAddress(), header.Checksum(udp.Payload(), 0); !udp.IsChecksumValid(src, dst, payloadXsum) {\n+ t.Errorf(\"got udp.IsChecksumValid(%s, %s, %d) = false, want = true\", src, dst, payloadXsum)\n+ }\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Test all ones UDP checksum is valid
...after observing the all ones checksum in an outgoing packet.
PiperOrigin-RevId: 429671893 |
259,907 | 18.02.2022 21:03:18 | 28,800 | e4f093f7b1739b0872774057932704613343db77 | Fix lisafs client bugs for S/R support.
There was a bug in checking for the availability of STATX_SIZE on restore.
Make the gofer client resilient to bad open(flags). Like 9P, make lisafs
default to O_RDONLY when a valid open mode is not available. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/handle.go",
"new_path": "pkg/sentry/fsimpl/gofer/handle.go",
"diff": "@@ -18,6 +18,7 @@ import (\n\"golang.org/x/sys/unix\"\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/lisafs\"\n+ \"gvisor.dev/gvisor/pkg/log\"\n\"gvisor.dev/gvisor/pkg/p9\"\n\"gvisor.dev/gvisor/pkg/safemem\"\n\"gvisor.dev/gvisor/pkg/sentry/hostfd\"\n@@ -42,7 +43,7 @@ func openHandle(ctx context.Context, file p9file, read, write, trunc bool) (hand\nif err != nil {\nreturn handle{fd: -1}, err\n}\n- var flags p9.OpenFlags\n+ flags := p9.ReadOnly\nswitch {\ncase read && !write:\nflags = p9.ReadOnly\n@@ -50,6 +51,8 @@ func openHandle(ctx context.Context, file p9file, read, write, trunc bool) (hand\nflags = p9.WriteOnly\ncase read && write:\nflags = p9.ReadWrite\n+ default:\n+ log.Debugf(\"openHandle called with read = write = false. Falling back to read only FD.\")\n}\nif trunc {\nflags |= p9.OpenTruncate\n@@ -71,7 +74,7 @@ func openHandle(ctx context.Context, file p9file, read, write, trunc bool) (hand\n// Preconditions: read || write.\nfunc openHandleLisa(ctx context.Context, fdLisa lisafs.ClientFD, read, write, trunc bool) (handle, error) {\n- var flags uint32\n+ flags := uint32(unix.O_RDONLY)\nswitch {\ncase read && write:\nflags = unix.O_RDWR\n@@ -80,7 +83,7 @@ func openHandleLisa(ctx context.Context, fdLisa lisafs.ClientFD, read, write, tr\ncase write:\nflags = unix.O_WRONLY\ndefault:\n- panic(\"tried to open unreadable and unwritable handle\")\n+ log.Debugf(\"openHandleLisa called with read = write = false. Falling back to read only FD.\")\n}\nif trunc {\nflags |= unix.O_TRUNC\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/save_restore.go",
"new_path": "pkg/sentry/fsimpl/gofer/save_restore.go",
"diff": "@@ -325,7 +325,7 @@ func (d *dentry) restoreFileLisa(ctx context.Context, inode *lisafs.Inode, opts\ndefer d.metadataMu.Unlock()\nif d.isRegularFile() {\nif opts.ValidateFileSizes {\n- if inode.Stat.Mask&linux.STATX_SIZE != 0 {\n+ if inode.Stat.Mask&linux.STATX_SIZE == 0 {\nreturn vfs.ErrCorruption{fmt.Errorf(\"gofer.dentry(%q).restoreFile: file size validation failed: file size not available\", genericDebugPathname(d))}\n}\nif d.size != inode.Stat.Size {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix lisafs client bugs for S/R support.
- There was a bug in checking for the availability of STATX_SIZE on restore.
- Make the gofer client resilient to bad open(flags). Like 9P, make lisafs
default to O_RDONLY when a valid open mode is not available.
PiperOrigin-RevId: 429705352 |
259,885 | 22.02.2022 12:22:01 | 28,800 | 377cfd813d4c36fed538274136b85f06fe364613 | Add sync.SeqCount.BeginWriteOk. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sync/seqcount.go",
"new_path": "pkg/sync/seqcount.go",
"diff": "@@ -117,7 +117,17 @@ func (s *SeqCount) BeginWrite() {\n}\n}\n-// EndWrite ends the effect of a preceding BeginWrite.\n+// BeginWriteOk combines the semantics of ReadOk and BeginWrite. If the reader\n+// critical section initiated by a previous call to BeginRead() that returned\n+// epoch did not race with any writer critical sections, it begins a writer\n+// critical section and returns true. Otherwise it does nothing and returns\n+// false.\n+func (s *SeqCount) BeginWriteOk(epoch SeqCountEpoch) bool {\n+ return atomic.CompareAndSwapUint32(&s.epoch, uint32(epoch), uint32(epoch)+1)\n+}\n+\n+// EndWrite ends the effect of a preceding BeginWrite or successful\n+// BeginWriteOk.\nfunc (s *SeqCount) EndWrite() {\nif epoch := atomic.AddUint32(&s.epoch, 1); epoch&1 != 0 {\npanic(\"SeqCount.EndWrite outside writer critical section\")\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add sync.SeqCount.BeginWriteOk.
PiperOrigin-RevId: 430268914 |
259,951 | 22.02.2022 16:16:33 | 28,800 | 9d5239e7147d4385064a06ed159ae709c2006deb | Rename ControlMessages type to indicate the cmsg direction
With the introduction of sendable control messages, the original cmsg type
is now renamed to ReceivableControlMessages. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netstack/netstack.go",
"new_path": "pkg/sentry/socket/netstack/netstack.go",
"diff": "@@ -2897,7 +2897,7 @@ func (s *socketOpsCommon) nonBlockingRead(ctx context.Context, dst usermem.IOSeq\nreturn res.Count, 0, nil, 0, cmsg, syserr.TranslateNetstackError(err)\n}\n-func (s *socketOpsCommon) controlMessages(cm tcpip.ControlMessages) socket.ControlMessages {\n+func (s *socketOpsCommon) controlMessages(cm tcpip.ReceivableControlMessages) socket.ControlMessages {\nreadCM := socket.NewIPControlMessages(s.family, cm)\nreturn socket.ControlMessages{\nIP: socket.IPControlMessages{\n@@ -2927,7 +2927,7 @@ func (s *socketOpsCommon) controlMessages(cm tcpip.ControlMessages) socket.Contr\n// successfully writing packet data out to userspace.\n//\n// Precondition: s.readMu must be locked.\n-func (s *socketOpsCommon) updateTimestamp(cm tcpip.ControlMessages) {\n+func (s *socketOpsCommon) updateTimestamp(cm tcpip.ReceivableControlMessages) {\n// Save the SIOCGSTAMP timestamp only if SO_TIMESTAMP is disabled.\nif !s.sockOptTimestamp {\ns.timestampValid = true\n@@ -2984,7 +2984,7 @@ func (s *socketOpsCommon) recvErr(t *kernel.Task, dst usermem.IOSequence) (int,\n// The original destination address of the datagram that caused the error is\n// supplied via msg_name. -- recvmsg(2)\ndstAddr, dstAddrLen := socket.ConvertAddress(addrFamilyFromNetProto(sockErr.NetProto), sockErr.Dst)\n- cmgs := socket.ControlMessages{IP: socket.NewIPControlMessages(s.family, tcpip.ControlMessages{SockErr: sockErr})}\n+ cmgs := socket.ControlMessages{IP: socket.NewIPControlMessages(s.family, tcpip.ReceivableControlMessages{SockErr: sockErr})}\nreturn n, msgFlags, dstAddr, dstAddrLen, cmgs, syserr.FromError(err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/socket.go",
"new_path": "pkg/sentry/socket/socket.go",
"diff": "@@ -119,9 +119,9 @@ func sockErrCmsgToLinux(sockErr *tcpip.SockError) linux.SockErrCMsg {\n}\n}\n-// NewIPControlMessages converts the tcpip ControlMessages (which does not have\n-// Linux specific format) to Linux format.\n-func NewIPControlMessages(family int, cmgs tcpip.ControlMessages) IPControlMessages {\n+// NewIPControlMessages converts the tcpip.ReceivableControlMessages (which does\n+// not have Linux specific format) to Linux format.\n+func NewIPControlMessages(family int, cmgs tcpip.ReceivableControlMessages) IPControlMessages {\nvar orgDstAddr linux.SockAddr\nif cmgs.HasOriginalDstAddress {\norgDstAddr, _ = ConvertAddress(family, cmgs.OriginalDstAddress)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/checker/checker.go",
"new_path": "pkg/tcpip/checker/checker.go",
"diff": "@@ -36,7 +36,7 @@ type NetworkChecker func(*testing.T, []header.Network)\ntype TransportChecker func(*testing.T, header.Transport)\n// ControlMessagesChecker is a function to check a property of ancillary data.\n-type ControlMessagesChecker func(*testing.T, tcpip.ControlMessages)\n+type ControlMessagesChecker func(*testing.T, tcpip.ReceivableControlMessages)\n// IPv4 checks the validity and properties of the given IPv4 packet. It is\n// expected to be used in conjunction with other network checkers for specific\n@@ -289,7 +289,7 @@ func FragmentFlags(flags uint8) NetworkChecker {\n// ReceiveTClass creates a checker that checks the TCLASS field in\n// ControlMessages.\nfunc ReceiveTClass(want uint32) ControlMessagesChecker {\n- return func(t *testing.T, cm tcpip.ControlMessages) {\n+ return func(t *testing.T, cm tcpip.ReceivableControlMessages) {\nt.Helper()\nif !cm.HasTClass {\nt.Error(\"got cm.HasTClass = false, want = true\")\n@@ -302,7 +302,7 @@ func ReceiveTClass(want uint32) ControlMessagesChecker {\n// NoTClassReceived creates a checker that checks the absence of the TCLASS\n// field in ControlMessages.\nfunc NoTClassReceived() ControlMessagesChecker {\n- return func(t *testing.T, cm tcpip.ControlMessages) {\n+ return func(t *testing.T, cm tcpip.ReceivableControlMessages) {\nt.Helper()\nif cm.HasTClass {\nt.Error(\"got cm.HasTClass = true, want = false\")\n@@ -312,7 +312,7 @@ func NoTClassReceived() ControlMessagesChecker {\n// ReceiveTOS creates a checker that checks the TOS field in ControlMessages.\nfunc ReceiveTOS(want uint8) ControlMessagesChecker {\n- return func(t *testing.T, cm tcpip.ControlMessages) {\n+ return func(t *testing.T, cm tcpip.ReceivableControlMessages) {\nt.Helper()\nif !cm.HasTOS {\nt.Error(\"got cm.HasTOS = false, want = true\")\n@@ -325,7 +325,7 @@ func ReceiveTOS(want uint8) ControlMessagesChecker {\n// NoTOSReceived creates a checker that checks the absence of the TOS field in\n// ControlMessages.\nfunc NoTOSReceived() ControlMessagesChecker {\n- return func(t *testing.T, cm tcpip.ControlMessages) {\n+ return func(t *testing.T, cm tcpip.ReceivableControlMessages) {\nt.Helper()\nif cm.HasTOS {\nt.Error(\"got cm.HasTOS = true, want = false\")\n@@ -336,7 +336,7 @@ func NoTOSReceived() ControlMessagesChecker {\n// ReceiveTTL creates a checker that checks the TTL field in\n// ControlMessages.\nfunc ReceiveTTL(want uint8) ControlMessagesChecker {\n- return func(t *testing.T, cm tcpip.ControlMessages) {\n+ return func(t *testing.T, cm tcpip.ReceivableControlMessages) {\nt.Helper()\nif !cm.HasTTL {\nt.Errorf(\"got cm.HasTTL = %t, want = true\", cm.HasTTL)\n@@ -349,7 +349,7 @@ func ReceiveTTL(want uint8) ControlMessagesChecker {\n// NoTTLReceived creates a checker that checks the absence of the TTL field in\n// ControlMessages.\nfunc NoTTLReceived() ControlMessagesChecker {\n- return func(t *testing.T, cm tcpip.ControlMessages) {\n+ return func(t *testing.T, cm tcpip.ReceivableControlMessages) {\nt.Helper()\nif cm.HasTTL {\nt.Error(\"got cm.HasTTL = true, want = false\")\n@@ -360,7 +360,7 @@ func NoTTLReceived() ControlMessagesChecker {\n// ReceiveHopLimit creates a checker that checks the HopLimit field in\n// ControlMessages.\nfunc ReceiveHopLimit(want uint8) ControlMessagesChecker {\n- return func(t *testing.T, cm tcpip.ControlMessages) {\n+ return func(t *testing.T, cm tcpip.ReceivableControlMessages) {\nt.Helper()\nif !cm.HasHopLimit {\nt.Errorf(\"got cm.HasHopLimit = %t, want = true\", cm.HasHopLimit)\n@@ -373,7 +373,7 @@ func ReceiveHopLimit(want uint8) ControlMessagesChecker {\n// NoHopLimitReceived creates a checker that checks the absence of the HopLimit\n// field in ControlMessages.\nfunc NoHopLimitReceived() ControlMessagesChecker {\n- return func(t *testing.T, cm tcpip.ControlMessages) {\n+ return func(t *testing.T, cm tcpip.ReceivableControlMessages) {\nt.Helper()\nif cm.HasHopLimit {\nt.Error(\"got cm.HasHopLimit = true, want = false\")\n@@ -384,7 +384,7 @@ func NoHopLimitReceived() ControlMessagesChecker {\n// ReceiveIPPacketInfo creates a checker that checks the PacketInfo field in\n// ControlMessages.\nfunc ReceiveIPPacketInfo(want tcpip.IPPacketInfo) ControlMessagesChecker {\n- return func(t *testing.T, cm tcpip.ControlMessages) {\n+ return func(t *testing.T, cm tcpip.ReceivableControlMessages) {\nt.Helper()\nif !cm.HasIPPacketInfo {\nt.Error(\"got cm.HasIPPacketInfo = false, want = true\")\n@@ -397,7 +397,7 @@ func ReceiveIPPacketInfo(want tcpip.IPPacketInfo) ControlMessagesChecker {\n// NoIPPacketInfoReceived creates a checker that checks the PacketInfo field in\n// ControlMessages.\nfunc NoIPPacketInfoReceived() ControlMessagesChecker {\n- return func(t *testing.T, cm tcpip.ControlMessages) {\n+ return func(t *testing.T, cm tcpip.ReceivableControlMessages) {\nt.Helper()\nif cm.HasIPPacketInfo {\nt.Error(\"got cm.HasIPPacketInfo = true, want = false\")\n@@ -408,7 +408,7 @@ func NoIPPacketInfoReceived() ControlMessagesChecker {\n// ReceiveIPv6PacketInfo creates a checker that checks the IPv6PacketInfo field\n// in ControlMessages.\nfunc ReceiveIPv6PacketInfo(want tcpip.IPv6PacketInfo) ControlMessagesChecker {\n- return func(t *testing.T, cm tcpip.ControlMessages) {\n+ return func(t *testing.T, cm tcpip.ReceivableControlMessages) {\nt.Helper()\nif !cm.HasIPv6PacketInfo {\nt.Error(\"got cm.HasIPv6PacketInfo = false, want = true\")\n@@ -421,7 +421,7 @@ func ReceiveIPv6PacketInfo(want tcpip.IPv6PacketInfo) ControlMessagesChecker {\n// NoIPv6PacketInfoReceived creates a checker that checks the PacketInfo field\n// in ControlMessages.\nfunc NoIPv6PacketInfoReceived() ControlMessagesChecker {\n- return func(t *testing.T, cm tcpip.ControlMessages) {\n+ return func(t *testing.T, cm tcpip.ReceivableControlMessages) {\nt.Helper()\nif cm.HasIPv6PacketInfo {\nt.Error(\"got cm.HasIPv6PacketInfo = true, want = false\")\n@@ -432,7 +432,7 @@ func NoIPv6PacketInfoReceived() ControlMessagesChecker {\n// ReceiveOriginalDstAddr creates a checker that checks the OriginalDstAddress\n// field in ControlMessages.\nfunc ReceiveOriginalDstAddr(want tcpip.FullAddress) ControlMessagesChecker {\n- return func(t *testing.T, cm tcpip.ControlMessages) {\n+ return func(t *testing.T, cm tcpip.ReceivableControlMessages) {\nt.Helper()\nif !cm.HasOriginalDstAddress {\nt.Error(\"got cm.HasOriginalDstAddress = false, want = true\")\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tcpip.go",
"new_path": "pkg/tcpip/tcpip.go",
"diff": "@@ -416,10 +416,10 @@ func (l *LimitedWriter) Write(p []byte) (int, error) {\nreturn n, err\n}\n-// A ControlMessages contains socket control messages for IP sockets.\n+// ReceivableControlMessages holds control messages that can be received.\n//\n// +stateify savable\n-type ControlMessages struct {\n+type ReceivableControlMessages struct {\n// HasTimestamp indicates whether Timestamp is valid/set.\nHasTimestamp bool\n@@ -514,7 +514,7 @@ type ReadResult struct {\nTotal int\n// ControlMessages is the control messages received.\n- ControlMessages ControlMessages\n+ ControlMessages ReceivableControlMessages\n// RemoteAddr is the remote address if ReadOptions.NeedAddr is true.\nRemoteAddr FullAddress\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tcpip_state.go",
"new_path": "pkg/tcpip/tcpip_state.go",
"diff": "@@ -18,10 +18,10 @@ import (\n\"time\"\n)\n-func (c *ControlMessages) saveTimestamp() int64 {\n+func (c *ReceivableControlMessages) saveTimestamp() int64 {\nreturn c.Timestamp.UnixNano()\n}\n-func (c *ControlMessages) loadTimestamp(nsec int64) {\n+func (c *ReceivableControlMessages) loadTimestamp(nsec int64) {\nc.Timestamp = time.Unix(0, nsec)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/icmp/endpoint.go",
"new_path": "pkg/tcpip/transport/icmp/endpoint.go",
"diff": "@@ -187,7 +187,7 @@ func (e *endpoint) Read(dst io.Writer, opts tcpip.ReadOptions) (tcpip.ReadResult\n// Control Messages\n// TODO(https://gvisor.dev/issue/7012): Share control message code with other\n// network endpoints.\n- cm := tcpip.ControlMessages{\n+ cm := tcpip.ReceivableControlMessages{\nHasTimestamp: true,\nTimestamp: p.receivedAt,\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/packet/endpoint.go",
"new_path": "pkg/tcpip/transport/packet/endpoint.go",
"diff": "@@ -180,7 +180,7 @@ func (ep *endpoint) Read(dst io.Writer, opts tcpip.ReadOptions) (tcpip.ReadResul\nres := tcpip.ReadResult{\nTotal: packet.data.Size(),\n- ControlMessages: tcpip.ControlMessages{\n+ ControlMessages: tcpip.ReceivableControlMessages{\nHasTimestamp: true,\nTimestamp: packet.receivedAt,\n},\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/raw/endpoint.go",
"new_path": "pkg/tcpip/transport/raw/endpoint.go",
"diff": "@@ -236,7 +236,7 @@ func (e *endpoint) Read(dst io.Writer, opts tcpip.ReadOptions) (tcpip.ReadResult\n// Control Messages\n// TODO(https://gvisor.dev/issue/7012): Share control message code with other\n// network endpoints.\n- cm := tcpip.ControlMessages{\n+ cm := tcpip.ReceivableControlMessages{\nHasTimestamp: true,\nTimestamp: pkt.receivedAt,\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/udp/endpoint.go",
"new_path": "pkg/tcpip/transport/udp/endpoint.go",
"diff": "@@ -239,7 +239,7 @@ func (e *endpoint) Read(dst io.Writer, opts tcpip.ReadOptions) (tcpip.ReadResult\n// Control Messages\n// TODO(https://gvisor.dev/issue/7012): Share control message code with other\n// network endpoints.\n- cm := tcpip.ControlMessages{\n+ cm := tcpip.ReceivableControlMessages{\nHasTimestamp: true,\nTimestamp: p.receivedAt,\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Rename ControlMessages type to indicate the cmsg direction
With the introduction of sendable control messages, the original cmsg type
is now renamed to ReceivableControlMessages.
PiperOrigin-RevId: 430319598 |
259,907 | 22.02.2022 17:05:47 | 28,800 | 395c38be75d1757d3eff9241602eec73d658b40c | Add ECONNABORTED to the partial result error list.
Allow application to receive partial result due to ECONNABORTED. Similar to
how ECONNRESET and ETIMEDOUT are handled.
Reported-by: | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/error.go",
"new_path": "pkg/sentry/syscalls/linux/error.go",
"diff": "@@ -156,9 +156,12 @@ func handleIOErrorImpl(ctx context.Context, partialResult bool, errOrig, intr er\nreturn true, nil\ncase linuxerr.Equals(linuxerr.ECONNRESET, translatedErr):\nfallthrough\n+ case linuxerr.Equals(linuxerr.ECONNABORTED, translatedErr):\n+ fallthrough\ncase linuxerr.Equals(linuxerr.ETIMEDOUT, translatedErr):\n- // For TCP sendfile connections, we may have a reset or timeout. But we\n- // should just return n as the result.\n+ // For TCP sendfile connections, we may have a reset, abort or timeout. But\n+ // we should just return the partial result. The next call will return the\n+ // error without a partial IO operation.\nreturn true, nil\ncase linuxerr.Equals(linuxerr.EWOULDBLOCK, translatedErr):\n// Syscall would block, but completed a partial read/write.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add ECONNABORTED to the partial result error list.
Allow application to receive partial result due to ECONNABORTED. Similar to
how ECONNRESET and ETIMEDOUT are handled.
Reported-by: syzbot+a0029790ed0bda86356e@syzkaller.appspotmail.com
PiperOrigin-RevId: 430328305 |
259,898 | 23.02.2022 00:41:50 | 28,800 | 45835a60fd09e083cdd39058ab30ca3d5a6bac47 | Fix the RST response to unacceptable ACK in SYN-RCVD
Fixes | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/connect.go",
"new_path": "pkg/tcpip/transport/tcp/connect.go",
"diff": "@@ -214,12 +214,11 @@ func (h *handshake) resetToSynRcvd(iss seqnum.Value, irs seqnum.Value, opts head\n// response.\nfunc (h *handshake) checkAck(s *segment) bool {\nif s.flags.Contains(header.TCPFlagAck) && s.ackNumber != h.iss+1 {\n- // RFC 793, page 36, states that a reset must be generated when\n- // the connection is in any non-synchronized state and an\n- // incoming segment acknowledges something not yet sent. The\n- // connection remains in the same state.\n- ack := s.sequenceNumber.Add(s.logicalLen())\n- h.ep.sendRaw(buffer.VectorisedView{}, header.TCPFlagRst|header.TCPFlagAck, s.ackNumber, ack, 0)\n+ // RFC 793, page 72 (https://datatracker.ietf.org/doc/html/rfc793#page-72):\n+ // If the segment acknowledgment is not acceptable, form a reset segment,\n+ // <SEQ=SEG.ACK><CTL=RST>\n+ // and send it.\n+ h.ep.sendRaw(buffer.VectorisedView{}, header.TCPFlagRst, s.ackNumber, 0, 0)\nreturn false\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/packetimpact/runner/defs.bzl",
"new_path": "test/packetimpact/runner/defs.bzl",
"diff": "@@ -309,7 +309,6 @@ ALL_TESTS = [\n),\nPacketimpactTestInfo(\nname = \"tcp_acceptable_ack_syn_rcvd\",\n- expect_netstack_failure = True,\n),\n]\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix the RST response to unacceptable ACK in SYN-RCVD
Fixes #7199.
PiperOrigin-RevId: 430388697 |
259,951 | 23.02.2022 15:38:20 | 28,800 | 21dffa8f4c8a6d9c744f3629782eab26890d7187 | Support sending TTL and HopLimit | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/control/control.go",
"new_path": "pkg/sentry/socket/control/control.go",
"diff": "package control\nimport (\n+ \"math\"\n\"time\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n@@ -522,6 +523,10 @@ func CmsgsSpace(t *kernel.Task, cmsgs socket.ControlMessages) int {\n}\n// Parse parses a raw socket control message into portable objects.\n+// TODO(https://gvisor.dev/issue/7188): Parse is only called on raw cmsg that\n+// are used when sending a messages. We should fail with EINVAL when we find a\n+// non-sendable control messages (such as IP_RECVERR). And the function should\n+// be renamed to reflect that.\nfunc Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint) (socket.ControlMessages, error) {\nvar (\ncmsgs socket.ControlMessages\n@@ -601,10 +606,13 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint)\nif length < linux.SizeOfControlMessageTTL {\nreturn socket.ControlMessages{}, linuxerr.EINVAL\n}\n- cmsgs.IP.HasTTL = true\nvar ttl primitive.Uint32\nttl.UnmarshalUnsafe(buf)\n+ if ttl == 0 || ttl > math.MaxUint8 {\n+ return socket.ControlMessages{}, linuxerr.EINVAL\n+ }\ncmsgs.IP.TTL = uint32(ttl)\n+ cmsgs.IP.HasTTL = true\ncase linux.IP_PKTINFO:\nif length < linux.SizeOfControlMessageIPPacketInfo {\n@@ -661,9 +669,12 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint)\nif length < linux.SizeOfControlMessageHopLimit {\nreturn socket.ControlMessages{}, linuxerr.EINVAL\n}\n- cmsgs.IP.HasHopLimit = true\nvar hoplimit primitive.Uint32\nhoplimit.UnmarshalUnsafe(buf)\n+ if hoplimit > math.MaxUint8 {\n+ return socket.ControlMessages{}, linuxerr.EINVAL\n+ }\n+ cmsgs.IP.HasHopLimit = true\ncmsgs.IP.HopLimit = uint32(hoplimit)\ncase linux.IPV6_RECVORIGDSTADDR:\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netstack/netstack.go",
"new_path": "pkg/sentry/socket/netstack/netstack.go",
"diff": "@@ -2869,7 +2869,7 @@ func (s *socketOpsCommon) nonBlockingRead(ctx context.Context, dst usermem.IOSeq\nflags |= linux.MSG_TRUNC\n}\n- return msgLen, flags, addr, addrLen, s.controlMessages(res.ControlMessages), nil\n+ return msgLen, flags, addr, addrLen, s.netstackToLinuxControlMessages(res.ControlMessages), nil\n}\nif peek {\n@@ -2892,12 +2892,12 @@ func (s *socketOpsCommon) nonBlockingRead(ctx context.Context, dst usermem.IOSeq\ns.Endpoint.ModerateRecvBuf(n)\n}\n- cmsg := s.controlMessages(res.ControlMessages)\n+ cmsg := s.netstackToLinuxControlMessages(res.ControlMessages)\ns.fillCmsgInq(&cmsg)\nreturn res.Count, 0, nil, 0, cmsg, syserr.TranslateNetstackError(err)\n}\n-func (s *socketOpsCommon) controlMessages(cm tcpip.ReceivableControlMessages) socket.ControlMessages {\n+func (s *socketOpsCommon) netstackToLinuxControlMessages(cm tcpip.ReceivableControlMessages) socket.ControlMessages {\nreadCM := socket.NewIPControlMessages(s.family, cm)\nreturn socket.ControlMessages{\nIP: socket.IPControlMessages{\n@@ -2923,6 +2923,15 @@ func (s *socketOpsCommon) controlMessages(cm tcpip.ReceivableControlMessages) so\n}\n}\n+func (s *socketOpsCommon) linuxToNetstackControlMessages(cm socket.ControlMessages) tcpip.SendableControlMessages {\n+ return tcpip.SendableControlMessages{\n+ HasTTL: cm.IP.HasTTL,\n+ TTL: uint8(cm.IP.TTL),\n+ HasHopLimit: cm.IP.HasHopLimit,\n+ HopLimit: uint8(cm.IP.HopLimit),\n+ }\n+}\n+\n// updateTimestamp sets the timestamp for SIOCGSTAMP. It should be called after\n// successfully writing packet data out to userspace.\n//\n@@ -3086,6 +3095,7 @@ func (s *socketOpsCommon) SendMsg(t *kernel.Task, src usermem.IOSequence, to []b\nTo: addr,\nMore: flags&linux.MSG_MORE != 0,\nEndOfRecord: flags&linux.MSG_EOR != 0,\n+ ControlMessages: s.linuxToNetstackControlMessages(controlMessages),\n}\nr := src.Reader(t)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tcpip.go",
"new_path": "pkg/tcpip/tcpip.go",
"diff": "@@ -416,13 +416,28 @@ func (l *LimitedWriter) Write(p []byte) (int, error) {\nreturn n, err\n}\n-// ReceivableControlMessages holds control messages that can be received.\n+// SendableControlMessages contains socket control messages that can be written.\n//\n// +stateify savable\n-type ReceivableControlMessages struct {\n- // HasTimestamp indicates whether Timestamp is valid/set.\n- HasTimestamp bool\n+type SendableControlMessages struct {\n+ // HasTTL indicates whether TTL is valid/set.\n+ HasTTL bool\n+\n+ // TTL is the IPv4 Time To Live of the associated packet.\n+ TTL uint8\n+\n+ // HasHopLimit indicates whether HopLimit is valid/set.\n+ HasHopLimit bool\n+ // HopLimit is the IPv6 Hop Limit of the associated packet.\n+ HopLimit uint8\n+}\n+\n+// ReceivableControlMessages contains socket control messages that can be\n+// received.\n+//\n+// +stateify savable\n+type ReceivableControlMessages struct {\n// Timestamp is the time that the last packet used to create the read data\n// was received.\nTimestamp time.Time `state:\".(int64)\"`\n@@ -451,6 +466,9 @@ type ReceivableControlMessages struct {\n// HopLimit is the IPv6 Hop Limit of the associated packet.\nHopLimit uint8\n+ // HasTimestamp indicates whether Timestamp is valid/set.\n+ HasTimestamp bool\n+\n// HasTClass indicates whether TClass is valid/set.\nHasTClass bool\n@@ -699,6 +717,9 @@ type WriteOptions struct {\n// endpoint. If Atomic is false, then data fetched from the Payloader may be\n// discarded if available endpoint buffer space is unsufficient.\nAtomic bool\n+\n+ // ControlMessages contains optional overrides used when writing a packet.\n+ ControlMessages SendableControlMessages\n}\n// SockOptInt represents socket options which values have the int type.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/internal/network/endpoint.go",
"new_path": "pkg/tcpip/transport/internal/network/endpoint.go",
"diff": "@@ -329,11 +329,22 @@ func (e *Endpoint) AcquireContextForWrite(opts tcpip.WriteOptions) (WriteContext\n}\nvar tos uint8\n+ var ttl uint8\nswitch netProto := route.NetProto(); netProto {\ncase header.IPv4ProtocolNumber:\ntos = e.ipv4TOS\n+ if opts.ControlMessages.HasTTL {\n+ ttl = opts.ControlMessages.TTL\n+ } else {\n+ ttl = e.calculateTTL(route)\n+ }\ncase header.IPv6ProtocolNumber:\ntos = e.ipv6TClass\n+ if opts.ControlMessages.HasHopLimit {\n+ ttl = opts.ControlMessages.HopLimit\n+ } else {\n+ ttl = e.calculateTTL(route)\n+ }\ndefault:\npanic(fmt.Sprintf(\"invalid protocol number = %d\", netProto))\n}\n@@ -341,7 +352,7 @@ func (e *Endpoint) AcquireContextForWrite(opts tcpip.WriteOptions) (WriteContext\nreturn WriteContext{\ntransProto: e.transProto,\nroute: route,\n- ttl: e.calculateTTL(route),\n+ ttl: ttl,\ntos: tos,\nowner: e.owner,\n}, nil\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/raw_socket.cc",
"new_path": "test/syscalls/linux/raw_socket.cc",
"diff": "@@ -1300,17 +1300,17 @@ TEST(RawSocketTest, ReceiveTTL) {\nASSERT_THAT(\nbind(raw.get(), reinterpret_cast<const sockaddr*>(&kAddr), sizeof(kAddr)),\nSyscallSucceeds());\n+ ASSERT_THAT(connect(raw.get(), reinterpret_cast<const sockaddr*>(&kAddr),\n+ sizeof(kAddr)),\n+ SyscallSucceeds());\nconstexpr int kArbitraryTTL = 42;\nASSERT_THAT(setsockopt(raw.get(), IPPROTO_IP, IP_TTL, &kArbitraryTTL,\nsizeof(kArbitraryTTL)),\nSyscallSucceeds());\n- constexpr char kSendBuf[] = \"malformed UDP\";\n- ASSERT_THAT(sendto(raw.get(), kSendBuf, sizeof(kSendBuf), 0 /* flags */,\n- reinterpret_cast<const sockaddr*>(&kAddr), sizeof(kAddr)),\n- SyscallSucceedsWithValue(sizeof(kSendBuf)));\n-\n+ char send_buf[] = \"malformed UDP\";\n+ auto test_recv_ttl = [&](int expected_ttl) {\n// Register to receive TTL.\nconstexpr int kOne = 1;\nASSERT_THAT(\n@@ -1319,25 +1319,44 @@ TEST(RawSocketTest, ReceiveTTL) {\nstruct {\niphdr ip;\n- char data[sizeof(kSendBuf)];\n+ char data[sizeof(send_buf)];\n} ABSL_ATTRIBUTE_PACKED recv_buf;\n+\nint recv_ttl;\nsize_t recv_buf_len = sizeof(recv_buf);\n- ASSERT_NO_FATAL_FAILURE(RecvTTL(raw.get(), reinterpret_cast<char*>(&recv_buf),\n+ ASSERT_NO_FATAL_FAILURE(RecvTTL(raw.get(),\n+ reinterpret_cast<char*>(&recv_buf),\n&recv_buf_len, &recv_ttl));\n- ASSERT_EQ(recv_buf_len, sizeof(iphdr) + sizeof(kSendBuf));\n+ ASSERT_EQ(recv_buf_len, sizeof(iphdr) + sizeof(send_buf));\nEXPECT_EQ(recv_buf.ip.version, static_cast<unsigned int>(IPVERSION));\n// IHL holds the number of header bytes in 4 byte units.\nEXPECT_EQ(recv_buf.ip.ihl, sizeof(iphdr) / 4);\n- EXPECT_EQ(ntohs(recv_buf.ip.tot_len), sizeof(iphdr) + sizeof(kSendBuf));\n+ EXPECT_EQ(ntohs(recv_buf.ip.tot_len), sizeof(iphdr) + sizeof(send_buf));\nEXPECT_EQ(recv_buf.ip.protocol, IPPROTO_UDP);\nEXPECT_EQ(ntohl(recv_buf.ip.saddr), INADDR_LOOPBACK);\nEXPECT_EQ(ntohl(recv_buf.ip.daddr), INADDR_LOOPBACK);\n+ EXPECT_EQ(recv_buf.ip.ttl, static_cast<uint8_t>(expected_ttl));\n- EXPECT_EQ(memcmp(kSendBuf, &recv_buf.data, sizeof(kSendBuf)), 0);\n+ EXPECT_EQ(memcmp(send_buf, &recv_buf.data, sizeof(send_buf)), 0);\n+\n+ EXPECT_EQ(recv_ttl, expected_ttl);\n+ };\n+\n+ ASSERT_THAT(send(raw.get(), send_buf, sizeof(send_buf), /*flags=*/0),\n+ SyscallSucceedsWithValue(sizeof(send_buf)));\n+ {\n+ SCOPED_TRACE(\"receive ttl set by option\");\n+ ASSERT_NO_FATAL_FAILURE(test_recv_ttl(kArbitraryTTL));\n+ }\n- EXPECT_EQ(recv_ttl, kArbitraryTTL);\n+ constexpr int kArbitrarySendmsgTTL = kArbitraryTTL + 1;\n+ ASSERT_NO_FATAL_FAILURE(SendTTL(raw.get(), send_buf, size_t(sizeof(send_buf)),\n+ kArbitrarySendmsgTTL));\n+ {\n+ SCOPED_TRACE(\"receive ttl set by cmsg\");\n+ ASSERT_NO_FATAL_FAILURE(test_recv_ttl(kArbitrarySendmsgTTL));\n+ }\n}\nTEST(RawSocketTest, ReceiveHopLimit) {\n@@ -1353,24 +1372,24 @@ TEST(RawSocketTest, ReceiveHopLimit) {\nASSERT_THAT(\nbind(raw.get(), reinterpret_cast<const sockaddr*>(&kAddr), sizeof(kAddr)),\nSyscallSucceeds());\n+ ASSERT_THAT(connect(raw.get(), reinterpret_cast<const sockaddr*>(&kAddr),\n+ sizeof(kAddr)),\n+ SyscallSucceeds());\nconstexpr int kArbitraryHopLimit = 42;\nASSERT_THAT(setsockopt(raw.get(), IPPROTO_IPV6, IPV6_UNICAST_HOPS,\n&kArbitraryHopLimit, sizeof(kArbitraryHopLimit)),\nSyscallSucceeds());\n- constexpr char send_buf[] = \"malformed UDP\";\n- ASSERT_THAT(sendto(raw.get(), send_buf, sizeof(send_buf), 0 /* flags */,\n- reinterpret_cast<const sockaddr*>(&kAddr), sizeof(kAddr)),\n- SyscallSucceedsWithValue(sizeof(send_buf)));\n-\n// Register to receive HOPLIMIT.\nconstexpr int kOne = 1;\nASSERT_THAT(setsockopt(raw.get(), IPPROTO_IPV6, IPV6_RECVHOPLIMIT, &kOne,\nsizeof(kOne)),\nSyscallSucceeds());\n- char recv_buf[sizeof(send_buf) + 1];\n+ char send_buf[] = \"malformed UDP\";\n+ auto test_recv_hoplimit = [&](int expected_hoplimit) {\n+ char recv_buf[sizeof(send_buf)];\nsize_t recv_buf_len = sizeof(recv_buf);\nint recv_hoplimit;\nASSERT_NO_FATAL_FAILURE(\n@@ -1378,7 +1397,24 @@ TEST(RawSocketTest, ReceiveHopLimit) {\nASSERT_EQ(recv_buf_len, sizeof(send_buf));\nEXPECT_EQ(memcmp(send_buf, recv_buf, sizeof(send_buf)), 0);\n- EXPECT_EQ(recv_hoplimit, kArbitraryHopLimit);\n+ EXPECT_EQ(recv_hoplimit, expected_hoplimit);\n+ };\n+\n+ ASSERT_THAT(send(raw.get(), send_buf, sizeof(send_buf), /*flags=*/0),\n+ SyscallSucceedsWithValue(sizeof(send_buf)));\n+ {\n+ SCOPED_TRACE(\"receive hoplimit set by option\");\n+ ASSERT_NO_FATAL_FAILURE(test_recv_hoplimit(kArbitraryHopLimit));\n+ }\n+\n+ constexpr int kArbitrarySendmsgHopLimit = kArbitraryHopLimit + 1;\n+ ASSERT_NO_FATAL_FAILURE(SendHopLimit(raw.get(), send_buf,\n+ size_t(sizeof(send_buf)),\n+ kArbitrarySendmsgHopLimit));\n+ {\n+ SCOPED_TRACE(\"receive hoplimit set by cmsg\");\n+ ASSERT_NO_FATAL_FAILURE(test_recv_hoplimit(kArbitrarySendmsgHopLimit));\n+ }\n}\nTEST(RawSocketTest, SetIPv6ChecksumError_MultipleOf2) {\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/udp_socket.cc",
"new_path": "test/syscalls/linux/udp_socket.cc",
"diff": "@@ -2189,10 +2189,6 @@ TEST_P(UdpSocketControlMessagesTest, SetAndReceiveTTLOrHopLimit) {\n}\nTEST_P(UdpSocketControlMessagesTest, SendAndReceiveTTLOrHopLimit) {\n- // TODO(b/146661005): Setting TTL/HopLimit via sendmsg is not supported by\n- // netstack.\n- SKIP_IF(IsRunningOnGvisor() && !IsRunningWithHostinet());\n-\n// Enable receiving TTL and maybe HOPLIMIT on the receiver.\nASSERT_THAT(setsockopt(server_.get(), SOL_IP, IP_RECVTTL, &kSockOptOn,\nsizeof(kSockOptOn)),\n"
}
] | Go | Apache License 2.0 | google/gvisor | Support sending TTL and HopLimit
PiperOrigin-RevId: 430554985 |
259,909 | 24.02.2022 11:27:11 | 28,800 | 28688eb1a5ab1531eedf7561bc072b3376923dfb | Support cgroup Install() through systemd.
This change implements Install() though systemd and adds associated tests.
The properties saved by the systemdCgroup object will be applied during
Join(), which will be added in a later change. | [
{
"change_type": "MODIFY",
"old_path": "runsc/cgroup/BUILD",
"new_path": "runsc/cgroup/BUILD",
"diff": "@@ -7,12 +7,15 @@ go_library(\nsrcs = [\n\"cgroup.go\",\n\"cgroup_v2.go\",\n+ \"systemd.go\",\n],\nvisibility = [\"//:sandbox\"],\ndeps = [\n\"//pkg/cleanup\",\n\"//pkg/log\",\n\"@com_github_cenkalti_backoff//:go_default_library\",\n+ \"@com_github_coreos_go_systemd_v22//dbus:go_default_library\",\n+ \"@com_github_godbus_dbus_v5//:go_default_library\",\n\"@com_github_opencontainers_runtime_spec//specs-go:go_default_library\",\n\"@org_golang_x_sync//errgroup:go_default_library\",\n\"@org_golang_x_sys//unix:go_default_library\",\n@@ -25,11 +28,16 @@ go_test(\nsrcs = [\n\"cgroup_test.go\",\n\"cgroup_v2_test.go\",\n+ \"systemd_test.go\",\n],\nlibrary = \":cgroup\",\ntags = [\"local\"],\ndeps = [\n\"//pkg/test/testutil\",\n+ \"@com_github_coreos_go_systemd_v22//dbus:go_default_library\",\n+ \"@com_github_godbus_dbus_v5//:go_default_library\",\n+ \"@com_github_google_go_cmp//cmp:go_default_library\",\n+ \"@com_github_google_go_cmp//cmp/cmpopts:go_default_library\",\n\"@com_github_opencontainers_runtime_spec//specs-go:go_default_library\",\n],\n)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/cgroup/systemd.go",
"diff": "+// Copyright 2022 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// https://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package cgroup\n+\n+import (\n+ \"errors\"\n+ \"fmt\"\n+ \"os\"\n+ \"path\"\n+ \"path/filepath\"\n+ \"strconv\"\n+\n+ systemdDbus \"github.com/coreos/go-systemd/v22/dbus\"\n+ dbus \"github.com/godbus/dbus/v5\"\n+ specs \"github.com/opencontainers/runtime-spec/specs-go\"\n+)\n+\n+// ErrBadResourceSpec indicates that a cgroupSystemd function was\n+// passed a specs.LinuxResources object that is impossible or illegal\n+// to process.\n+var ErrBadResourceSpec = errors.New(\"misconfigured resource spec\")\n+\n+// cgroupSystemd represents a cgroup managed by systemd.\n+type cgroupSystemd struct {\n+ // Name is the name of the of the systemd scope that controls the cgroups.\n+ Name string `json:\"name\"`\n+ // Mountpoint is the unified mount point of cgroupV2.\n+ Mountpoint string `json:\"mountpoint\"`\n+ // Path is the relative path to the unified mountpoint.\n+ Path string `json:\"path\"`\n+ // Controllers is the list of supported controllers.\n+ Controllers []string `json:\"controllers\"`\n+ // OwnedPaths is the list of owned paths created when installing this cgroup.\n+ OwnedPaths []string `json:\"owned_paths\"`\n+\n+ properties []systemdDbus.Property\n+ dbusConn *systemdDbus.Conn\n+}\n+\n+// Install creates and configures a scope unit with the specified resource\n+// limits.\n+func (c *cgroupSystemd) Install(res *specs.LinuxResources) error {\n+ slice := path.Base(c.Path)\n+ ext := path.Ext(slice)\n+ if ext != \".slice\" {\n+ return fmt.Errorf(\"invalid systemd path %s does not end in a parent slice: %w\", c.Path, ErrInvalidGroupPath)\n+ }\n+ c.properties = append(c.properties, systemdDbus.PropSlice(slice))\n+ c.properties = append(c.properties, systemdDbus.PropDescription(\"runsc container \"+c.Name))\n+ pid := os.Getpid()\n+ c.properties = append(c.properties, systemdDbus.PropPids(uint32(pid)))\n+ // We always want proper accounting for the container for reporting resource\n+ // usage.\n+ c.addProp(\"MemoryAccounting\", true)\n+ c.addProp(\"CPUAccounting\", true)\n+ c.addProp(\"TasksAccounting\", true)\n+ c.addProp(\"IOAccounting\", true)\n+ // Delegate must be true so that the container can manage its own cgroups.\n+ c.addProp(\"Delegate\", true)\n+ return c.genResourceControl(res)\n+}\n+\n+// MakePath builds a path to the given controller.\n+func (c *cgroupSystemd) MakePath(string) string {\n+ return filepath.Join(c.Mountpoint, c.Path)\n+}\n+\n+func (c *cgroupSystemd) genResourceControl(res *specs.LinuxResources) error {\n+ if res == nil {\n+ return nil\n+ }\n+ var (\n+ mem = res.Memory\n+ cpu = res.CPU\n+ io = res.BlockIO\n+ )\n+ if res.Pids != nil {\n+ c.addProp(\"TasksMax\", res.Pids.Limit)\n+ }\n+ if mem != nil {\n+ if mem.Swap != nil {\n+ if mem.Limit == nil {\n+ return ErrBadResourceSpec\n+ }\n+ swap, err := convertMemorySwapToCgroupV2Value(*mem.Swap, *mem.Limit)\n+ if err != nil {\n+ return err\n+ }\n+ c.addProp(\"MemorySwapMax\", strconv.FormatInt(swap, 10))\n+ }\n+ if mem.Limit != nil {\n+ c.addProp(\"MemoryMax\", *mem.Limit)\n+ }\n+ if mem.Reservation != nil {\n+ c.addProp(\"MemoryLow\", *mem.Reservation)\n+ }\n+ }\n+ if cpu != nil {\n+ if cpu.Shares != nil {\n+ weight := convertCPUSharesToCgroupV2Value(*cpu.Shares)\n+ if weight != 0 {\n+ c.addProp(\"CPUShares\", weight)\n+ }\n+ }\n+\n+ if cpu.Quota != nil && *cpu.Quota > 0 {\n+ c.addProp(\"CPUQuota\", strconv.FormatInt(*cpu.Quota, 10)+\"%\")\n+ }\n+ var period uint64\n+ if cpu.Period != nil && *cpu.Period != 0 {\n+ period = *cpu.Period\n+ } else {\n+ period = defaultPeriod\n+ }\n+ // period is in microseconds, so we have to divide by 10 to convert\n+ // to the milliseconds that systemd expects.\n+ c.addProp(\"CPUQuotaPeriodSec\", strconv.FormatUint(period/10, 10)+\"ms\")\n+ if cpu.Cpus != \"\" {\n+ c.addProp(\"AllowedCPUs\", cpu.Cpus)\n+ }\n+ if cpu.Mems != \"\" {\n+ c.addProp(\"AllowedMemoryNodes\", cpu.Mems)\n+ }\n+ }\n+ if io != nil {\n+ if io.Weight != nil {\n+ c.addProp(\"IOWeight\", *io.Weight)\n+ }\n+ for _, dev := range io.WeightDevice {\n+ val := fmt.Sprintf(\"%d:%d %d\", dev.Major, dev.Minor, *dev.Weight)\n+ c.addProp(\"IODevice\", val)\n+ }\n+ c.addIOProps(\"IOReadBandwidth\", io.ThrottleReadBpsDevice)\n+ c.addIOProps(\"IOWriteBandwidth\", io.ThrottleWriteBpsDevice)\n+ c.addIOProps(\"IOReadIOPS\", io.ThrottleReadIOPSDevice)\n+ c.addIOProps(\"IOWriteIOPS\", io.ThrottleWriteIOPSDevice)\n+ }\n+ return nil\n+}\n+\n+func (c *cgroupSystemd) addIOProps(name string, devs []specs.LinuxThrottleDevice) {\n+ for _, dev := range devs {\n+ val := fmt.Sprintf(\"%d:%d %d\", dev.Major, dev.Minor, dev.Rate)\n+ c.addProp(name, val)\n+ }\n+}\n+\n+func (c *cgroupSystemd) addProp(name string, value interface{}) {\n+ if value == nil {\n+ return\n+ }\n+ c.properties = append(c.properties, newProp(name, value))\n+}\n+\n+func newProp(name string, units interface{}) systemdDbus.Property {\n+ return systemdDbus.Property{\n+ Name: name,\n+ Value: dbus.MakeVariant(units),\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/cgroup/systemd_test.go",
"diff": "+// Copyright The runc Authors.\n+// Copyright The containerd Authors.\n+// Copyright 2022 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// https://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package cgroup\n+\n+import (\n+ \"errors\"\n+ \"path/filepath\"\n+ \"strconv\"\n+ \"testing\"\n+\n+ systemdDbus \"github.com/coreos/go-systemd/v22/dbus\"\n+ dbus \"github.com/godbus/dbus/v5\"\n+ \"github.com/google/go-cmp/cmp\"\n+ \"github.com/google/go-cmp/cmp/cmpopts\"\n+ specs \"github.com/opencontainers/runtime-spec/specs-go\"\n+ \"gvisor.dev/gvisor/pkg/test/testutil\"\n+)\n+\n+var defaultProps = []systemdDbus.Property{}\n+\n+func TestInstall(t *testing.T) {\n+ for _, tc := range []struct {\n+ name string\n+ res *specs.LinuxResources\n+ cgroupPath string\n+ wantProps []systemdDbus.Property\n+ err error\n+ }{\n+ {\n+ name: \"bad parent\",\n+ res: nil,\n+ cgroupPath: \"not_a_slice\",\n+ err: ErrInvalidGroupPath,\n+ },\n+ {\n+ name: \"no limits\",\n+ res: nil,\n+ wantProps: []systemdDbus.Property{\n+ {\"Slice\", dbus.MakeVariant(\"parent.slice\")},\n+ {Name: \"Description\", Value: dbus.MakeVariant(\"runsc container \")},\n+ {Name: \"MemoryAccounting\", Value: dbus.MakeVariant(true)},\n+ {Name: \"CPUAccounting\", Value: dbus.MakeVariant(true)},\n+ {Name: \"TasksAccounting\", Value: dbus.MakeVariant(true)},\n+ {Name: \"IOAccounting\", Value: dbus.MakeVariant(true)},\n+ {Name: \"Delegate\", Value: dbus.MakeVariant(true)},\n+ },\n+ cgroupPath: \"parent.slice\",\n+ },\n+ {\n+ name: \"memory\",\n+ res: &specs.LinuxResources{\n+ Memory: &specs.LinuxMemory{\n+ Limit: int64Ptr(1),\n+ Swap: int64Ptr(2),\n+ Reservation: int64Ptr(3),\n+ },\n+ },\n+ cgroupPath: \"parent.slice\",\n+ wantProps: []systemdDbus.Property{\n+ {\"MemoryMax\", dbus.MakeVariant(int64(1))},\n+ {\"MemoryLow\", dbus.MakeVariant(int64(3))},\n+ {\"MemorySwapMax\", dbus.MakeVariant(\"1\")},\n+ },\n+ },\n+ {\n+ name: \"memory no limit\",\n+ res: &specs.LinuxResources{\n+ Memory: &specs.LinuxMemory{\n+ Swap: int64Ptr(1),\n+ },\n+ },\n+ err: ErrBadResourceSpec,\n+ cgroupPath: \"parent.slice\",\n+ },\n+ {\n+ name: \"cpu defaults\",\n+ res: &specs.LinuxResources{\n+ CPU: &specs.LinuxCPU{\n+ Shares: uint64Ptr(0),\n+ Quota: int64Ptr(0),\n+ Period: uint64Ptr(0),\n+ },\n+ },\n+ cgroupPath: \"parent.slice\",\n+ wantProps: []systemdDbus.Property{\n+ {\"CPUQuotaPeriodSec\", dbus.MakeVariant(strconv.FormatUint(defaultPeriod/10, 10) + \"ms\")},\n+ },\n+ },\n+ {\n+ name: \"cpu\",\n+ res: &specs.LinuxResources{\n+ CPU: &specs.LinuxCPU{\n+ Shares: uint64Ptr(1),\n+ Period: uint64Ptr(20),\n+ Quota: int64Ptr(3),\n+ Cpus: \"4\",\n+ Mems: \"5\",\n+ },\n+ },\n+ cgroupPath: \"parent.slice\",\n+ wantProps: []systemdDbus.Property{\n+ {\"CPUShares\", dbus.MakeVariant(convertCPUSharesToCgroupV2Value(1))},\n+ {\"CPUQuotaPeriodSec\", dbus.MakeVariant(\"2ms\")},\n+ {\"CPUQuota\", dbus.MakeVariant(\"3%\")},\n+ {\"AllowedCPUs\", dbus.MakeVariant(\"4\")},\n+ {\"AllowedMemoryNodes\", dbus.MakeVariant(\"5\")},\n+ },\n+ },\n+ {\n+ name: \"io\",\n+ res: &specs.LinuxResources{\n+ BlockIO: &specs.LinuxBlockIO{\n+ Weight: uint16Ptr(1),\n+ WeightDevice: []specs.LinuxWeightDevice{\n+ makeLinuxWeightDevice(2, 3, uint16Ptr(4), uint16Ptr(0)),\n+ makeLinuxWeightDevice(5, 6, uint16Ptr(7), uint16Ptr(0)),\n+ },\n+ ThrottleReadBpsDevice: []specs.LinuxThrottleDevice{\n+ makeLinuxThrottleDevice(8, 9, 10),\n+ makeLinuxThrottleDevice(11, 12, 13),\n+ },\n+ ThrottleWriteBpsDevice: []specs.LinuxThrottleDevice{\n+ makeLinuxThrottleDevice(14, 15, 16),\n+ },\n+ ThrottleReadIOPSDevice: []specs.LinuxThrottleDevice{\n+ makeLinuxThrottleDevice(17, 18, 19),\n+ },\n+ ThrottleWriteIOPSDevice: []specs.LinuxThrottleDevice{\n+ makeLinuxThrottleDevice(20, 21, 22),\n+ },\n+ },\n+ },\n+ cgroupPath: \"parent.slice\",\n+ wantProps: []systemdDbus.Property{\n+ {\"IOWeight\", dbus.MakeVariant(uint16(1))},\n+ {\"IODevice\", dbus.MakeVariant(\"2:3 4\")},\n+ {\"IODevice\", dbus.MakeVariant(\"5:6 7\")},\n+ {\"IOReadBandwidth\", dbus.MakeVariant(\"8:9 10\")},\n+ {\"IOReadBandwidth\", dbus.MakeVariant(\"11:12 13\")},\n+ {\"IOWriteBandwidth\", dbus.MakeVariant(\"14:15 16\")},\n+ {\"IOReadIOPS\", dbus.MakeVariant(\"17:18 19\")},\n+ {\"IOWriteIOPS\", dbus.MakeVariant(\"20:21 22\")},\n+ },\n+ },\n+ } {\n+ t.Run(tc.name, func(t *testing.T) {\n+ dir := testutil.TmpDir()\n+ testPath := filepath.Join(dir, tc.cgroupPath)\n+\n+ cg := cgroupSystemd{\n+ Path: testPath,\n+ }\n+ err := cg.Install(tc.res)\n+ if !errors.Is(err, tc.err) {\n+ t.Fatalf(\"Wrong error, got: %s, want: %s\", tc.err, err)\n+ }\n+ cmper := cmp.Comparer(func(a dbus.Variant, b dbus.Variant) bool {\n+ return a.String() == b.String()\n+ })\n+ sorter := cmpopts.SortSlices(func(a systemdDbus.Property, b systemdDbus.Property) bool {\n+ return (a.Name + a.Value.String()) > (b.Name + b.Value.String())\n+ })\n+ filteredProps := filterProperties(cg.properties, tc.wantProps)\n+ if diff := cmp.Diff(filteredProps, tc.wantProps, cmper, sorter); diff != \"\" {\n+ t.Errorf(\"cgroup properties list diff %s\", diff)\n+ }\n+ })\n+ }\n+}\n+\n+// filterProperties filters the list of properties in got to ones with\n+// the names of properties specified in want.\n+func filterProperties(got []systemdDbus.Property, want []systemdDbus.Property) []systemdDbus.Property {\n+ if want == nil {\n+ return nil\n+ }\n+ filterMap := map[string]interface{}{}\n+ for _, prop := range want {\n+ filterMap[prop.Name] = nil\n+ }\n+ filtered := []systemdDbus.Property{}\n+ for _, prop := range got {\n+ if _, ok := filterMap[prop.Name]; ok {\n+ filtered = append(filtered, prop)\n+ }\n+ }\n+ return filtered\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Support cgroup Install() through systemd.
This change implements Install() though systemd and adds associated tests.
The properties saved by the systemdCgroup object will be applied during
Join(), which will be added in a later change.
PiperOrigin-RevId: 430751223 |
259,885 | 24.02.2022 12:26:32 | 28,800 | 6ca818990a7778c9a6e63f53a489f9f8d51ac3b2 | Ensure that open(O_RDWR) doesn't copy-up on read-only VFS2 overlay mounts. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/overlay/filesystem.go",
"new_path": "pkg/sentry/fsimpl/overlay/filesystem.go",
"diff": "@@ -797,6 +797,10 @@ func (fs *filesystem) OpenAt(ctx context.Context, rp *vfs.ResolvingPath, opts vf\nreturn nil, linuxerr.EEXIST\n}\nif start.isRegularFile() && mayWrite {\n+ if err := rp.Mount().CheckBeginWrite(); err != nil {\n+ return nil, err\n+ }\n+ defer rp.Mount().EndWrite()\nif err := start.copyUpLocked(ctx); err != nil {\nreturn nil, err\n}\n@@ -851,6 +855,10 @@ afterTrailingSymlink:\nreturn nil, linuxerr.ENOTDIR\n}\nif child.isRegularFile() && mayWrite {\n+ if err := rp.Mount().CheckBeginWrite(); err != nil {\n+ return nil, err\n+ }\n+ defer rp.Mount().EndWrite()\nif err := child.copyUpLocked(ctx); err != nil {\nreturn nil, err\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Ensure that open(O_RDWR) doesn't copy-up on read-only VFS2 overlay mounts.
PiperOrigin-RevId: 430765773 |
259,909 | 24.02.2022 15:55:07 | 28,800 | 510cc2f7fca91b42cf3ed4813631233e478b566a | Fix pivot_root lock inversion.
PivotRoot takes vfs.mountMu then calls PrependPath which calls fs.mu.
RmdirAt() takes fs.mu and then calls PrepareDeleteDentry which takes
vfs.mountMu. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/mount.go",
"new_path": "pkg/sentry/vfs/mount.go",
"diff": "@@ -727,9 +727,8 @@ func (vfs *VirtualFilesystem) PivotRoot(ctx context.Context, creds *auth.Credent\nrootVd := RootFromContext(ctx)\ndefer rootVd.DecRef(ctx)\n- vfs.mountMu.Lock()\n- defer vfs.mountMu.Unlock()\n-\n+retry:\n+ epoch := vfs.mounts.seq.BeginRead()\n// Neither new_root nor put_old can be on the same mount as the current\n//root mount.\nif newRootVd.mount == rootVd.mount || putOldVd.mount == rootVd.mount {\n@@ -763,7 +762,13 @@ func (vfs *VirtualFilesystem) PivotRoot(ctx context.Context, creds *auth.Credent\n// pivot_root-ing new_root/put_old mounts with MS_SHARED propagation once it\n// is implemented in gVisor.\n- vfs.mounts.seq.BeginWrite()\n+ vfs.mountMu.Lock()\n+ if !vfs.mounts.seq.BeginWriteOk(epoch) {\n+ // Checks above raced with a mount change.\n+ vfs.mountMu.Unlock()\n+ goto retry\n+ }\n+ defer vfs.mountMu.Unlock()\nmp := vfs.disconnectLocked(newRootVd.mount)\nmp.DecRef(ctx)\nrootMp := vfs.disconnectLocked(rootVd.mount)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix pivot_root lock inversion.
PivotRoot takes vfs.mountMu then calls PrependPath which calls fs.mu.
RmdirAt() takes fs.mu and then calls PrepareDeleteDentry which takes
vfs.mountMu.
PiperOrigin-RevId: 430813050 |
259,962 | 25.02.2022 11:08:46 | 28,800 | f461bb7b64b6cc3bc606a92256651c9a431bc765 | Fix MTU advertised by sharedmem to exclude header size. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/sharedmem.go",
"new_path": "pkg/tcpip/link/sharedmem/sharedmem.go",
"diff": "@@ -298,7 +298,7 @@ func (e *endpoint) IsAttached() bool {\n// MTU implements stack.LinkEndpoint.MTU. It returns the value initialized\n// during construction.\nfunc (e *endpoint) MTU() uint32 {\n- return e.mtu - e.hdrSize\n+ return e.mtu\n}\n// Capabilities implements stack.LinkEndpoint.Capabilities.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/sharedmem_server.go",
"new_path": "pkg/tcpip/link/sharedmem/sharedmem_server.go",
"diff": "@@ -184,7 +184,7 @@ func (e *serverEndpoint) IsAttached() bool {\n// MTU implements stack.LinkEndpoint.MTU. It returns the value initialized\n// during construction.\nfunc (e *serverEndpoint) MTU() uint32 {\n- return e.mtu - e.hdrSize\n+ return e.mtu\n}\n// Capabilities implements stack.LinkEndpoint.Capabilities.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix MTU advertised by sharedmem to exclude header size.
PiperOrigin-RevId: 430986440 |
259,907 | 25.02.2022 12:36:11 | 28,800 | 7e8f4c3b3fb8ed2fd42df95880ac68b9fb406052 | Do not generate native variants for verity benchmarks. | [
{
"change_type": "MODIFY",
"old_path": "test/perf/BUILD",
"new_path": "test/perf/BUILD",
"diff": "@@ -149,30 +149,35 @@ syscall_test(\nsyscall_test(\nsize = \"large\",\n+ allow_native = False,\ndebug = False,\ntest = \"//test/perf/linux:verity_open_benchmark\",\n)\nsyscall_test(\nsize = \"large\",\n+ allow_native = False,\ndebug = False,\ntest = \"//test/perf/linux:verity_read_benchmark\",\n)\nsyscall_test(\nsize = \"large\",\n+ allow_native = False,\ndebug = False,\ntest = \"//test/perf/linux:verity_randread_benchmark\",\n)\nsyscall_test(\nsize = \"large\",\n+ allow_native = False,\ndebug = False,\ntest = \"//test/perf/linux:verity_open_read_close_benchmark\",\n)\nsyscall_test(\nsize = \"large\",\n+ allow_native = False,\ndebug = False,\ntest = \"//test/perf/linux:verity_stat_benchmark\",\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "test/perf/linux/verity_open_benchmark.cc",
"new_path": "test/perf/linux/verity_open_benchmark.cc",
"diff": "@@ -36,12 +36,6 @@ namespace testing {\nnamespace {\nvoid BM_Open(benchmark::State& state) {\n- // CAP_SYS_ADMIN is needed for making mount(2) syscall.\n- if (!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN))) {\n- state.SkipWithError(\"CAP_SYS_ADMIN missing. Skipping benchmark.\");\n- return;\n- }\n-\nconst int size = state.range(0);\nstd::vector<TempPath> cache;\nstd::vector<EnableTarget> targets;\n"
},
{
"change_type": "MODIFY",
"old_path": "test/perf/linux/verity_open_read_close_benchmark.cc",
"new_path": "test/perf/linux/verity_open_read_close_benchmark.cc",
"diff": "@@ -36,12 +36,6 @@ namespace testing {\nnamespace {\nvoid BM_VerityOpenReadClose(benchmark::State& state) {\n- // CAP_SYS_ADMIN is needed for making mount(2) syscall.\n- if (!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN))) {\n- state.SkipWithError(\"CAP_SYS_ADMIN missing. Skipping benchmark.\");\n- return;\n- }\n-\nconst int size = state.range(0);\n// Mount a tmpfs file system to be wrapped by a verity fs.\n"
},
{
"change_type": "MODIFY",
"old_path": "test/perf/linux/verity_randread_benchmark.cc",
"new_path": "test/perf/linux/verity_randread_benchmark.cc",
"diff": "// limitations under the License.\n#include <fcntl.h>\n-#include <linux/capability.h>\n#include <stdlib.h>\n#include <sys/mount.h>\n#include <sys/stat.h>\n#include \"gtest/gtest.h\"\n#include \"benchmark/benchmark.h\"\n-#include \"test/util/linux_capability_util.h\"\n#include \"test/util/logging.h\"\n#include \"test/util/temp_path.h\"\n#include \"test/util/test_util.h\"\n@@ -85,12 +83,6 @@ GlobalState& GetGlobalState() {\n}\nvoid BM_VerityRandRead(benchmark::State& state) {\n- // CAP_SYS_ADMIN is needed for making mount(2) syscall.\n- if (!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN))) {\n- state.SkipWithError(\"CAP_SYS_ADMIN missing. Skipping benchmark.\");\n- return;\n- }\n-\nconst int size = state.range(0);\nGlobalState& global_state = GetGlobalState();\n"
},
{
"change_type": "MODIFY",
"old_path": "test/perf/linux/verity_read_benchmark.cc",
"new_path": "test/perf/linux/verity_read_benchmark.cc",
"diff": "@@ -36,12 +36,6 @@ namespace testing {\nnamespace {\nvoid BM_VerityRead(benchmark::State& state) {\n- // CAP_SYS_ADMIN is needed for making mount(2) syscall.\n- if (!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN))) {\n- state.SkipWithError(\"CAP_SYS_ADMIN missing. Skipping benchmark.\");\n- return;\n- }\n-\nconst int size = state.range(0);\nconst std::string contents(size, 0);\n"
},
{
"change_type": "MODIFY",
"old_path": "test/perf/linux/verity_stat_benchmark.cc",
"new_path": "test/perf/linux/verity_stat_benchmark.cc",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-#include <linux/capability.h>\n#include <sys/mount.h>\n#include <sys/stat.h>\n#include <sys/types.h>\n#include \"gtest/gtest.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"benchmark/benchmark.h\"\n-#include \"test/util/capability_util.h\"\n#include \"test/util/fs_util.h\"\n#include \"test/util/temp_path.h\"\n#include \"test/util/test_util.h\"\n@@ -38,12 +36,6 @@ namespace {\n// Creates a file in a nested directory hierarchy at least `depth` directories\n// deep, and stats that file multiple times.\nvoid BM_VerityStat(benchmark::State& state) {\n- // CAP_SYS_ADMIN is needed for making mount(2) syscall.\n- if (!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN))) {\n- state.SkipWithError(\"CAP_SYS_ADMIN missing. Skipping benchmark.\");\n- return;\n- }\n-\n// Create nested directories with given depth.\nint depth = state.range(0);\n"
},
{
"change_type": "MODIFY",
"old_path": "test/runner/defs.bzl",
"new_path": "test/runner/defs.bzl",
"diff": "@@ -132,6 +132,7 @@ def syscall_test(\nadd_uds_tree = False,\nadd_hostinet = False,\nfuse = False,\n+ allow_native = True,\ndebug = True,\ntags = None,\n**kwargs):\n@@ -144,6 +145,7 @@ def syscall_test(\nadd_uds_tree: add a UDS test.\nadd_hostinet: add a hostinet test.\nfuse: enable FUSE support.\n+ allow_native: generate a native test variant.\ndebug: enable debug output.\ntags: starting test tags.\n**kwargs: additional test arguments.\n@@ -151,8 +153,8 @@ def syscall_test(\nif not tags:\ntags = []\n- if not fuse:\n- # Generate a native test if fuse is not required.\n+ if not fuse and allow_native:\n+ # Generate a native test if fuse is not required and if it is allowed.\n_syscall_test(\ntest = test,\nplatform = \"native\",\n"
}
] | Go | Apache License 2.0 | google/gvisor | Do not generate native variants for verity benchmarks.
PiperOrigin-RevId: 431006942 |
260,004 | 25.02.2022 18:03:08 | 28,800 | 65d8057067bbf8cdf03a98c9522c6010fc0eeec3 | Return previous config when setting NIC forwarding
...so that callers can determine if forwarding changed as a consequence
of their call. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/ipv4.go",
"new_path": "pkg/tcpip/network/ipv4/ipv4.go",
"diff": "@@ -190,23 +190,24 @@ func (e *endpoint) Forwarding() bool {\n// setForwarding sets the forwarding status for the endpoint.\n//\n-// Returns true if the forwarding status was updated.\n+// Returns the previous forwarding status.\nfunc (e *endpoint) setForwarding(v bool) bool {\nforwarding := uint32(forwardingDisabled)\nif v {\nforwarding = forwardingEnabled\n}\n- return atomic.SwapUint32(&e.forwarding, forwarding) != forwarding\n+ return atomic.SwapUint32(&e.forwarding, forwarding) != forwardingDisabled\n}\n// SetForwarding implements stack.ForwardingNetworkEndpoint.\n-func (e *endpoint) SetForwarding(forwarding bool) {\n+func (e *endpoint) SetForwarding(forwarding bool) bool {\ne.mu.Lock()\ndefer e.mu.Unlock()\n- if !e.setForwarding(forwarding) {\n- return\n+ prevForwarding := e.setForwarding(forwarding)\n+ if prevForwarding == forwarding {\n+ return prevForwarding\n}\nif forwarding {\n@@ -221,7 +222,7 @@ func (e *endpoint) SetForwarding(forwarding bool) {\npanic(fmt.Sprintf(\"e.joinGroupLocked(%s): %s\", header.IPv4AllRoutersGroup, err))\n}\n- return\n+ return prevForwarding\n}\nswitch err := e.leaveGroupLocked(header.IPv4AllRoutersGroup).(type) {\n@@ -231,6 +232,8 @@ func (e *endpoint) SetForwarding(forwarding bool) {\ndefault:\npanic(fmt.Sprintf(\"e.leaveGroupLocked(%s): %s\", header.IPv4AllRoutersGroup, err))\n}\n+\n+ return prevForwarding\n}\n// Enable implements stack.NetworkEndpoint.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/ipv6.go",
"new_path": "pkg/tcpip/network/ipv6/ipv6.go",
"diff": "@@ -439,23 +439,24 @@ func (e *endpoint) Forwarding() bool {\n// setForwarding sets the forwarding status for the endpoint.\n//\n-// Returns true if the forwarding status was updated.\n+// Returns the previous forwarding status.\nfunc (e *endpoint) setForwarding(v bool) bool {\nforwarding := uint32(forwardingDisabled)\nif v {\nforwarding = forwardingEnabled\n}\n- return atomic.SwapUint32(&e.forwarding, forwarding) != forwarding\n+ return atomic.SwapUint32(&e.forwarding, forwarding) != forwardingDisabled\n}\n// SetForwarding implements stack.ForwardingNetworkEndpoint.\n-func (e *endpoint) SetForwarding(forwarding bool) {\n+func (e *endpoint) SetForwarding(forwarding bool) bool {\ne.mu.Lock()\ndefer e.mu.Unlock()\n- if !e.setForwarding(forwarding) {\n- return\n+ prevForwarding := e.setForwarding(forwarding)\n+ if prevForwarding == forwarding {\n+ return prevForwarding\n}\nallRoutersGroups := [...]tcpip.Address{\n@@ -501,6 +502,7 @@ func (e *endpoint) SetForwarding(forwarding bool) {\n}\ne.mu.ndp.forwardingChanged(forwarding)\n+ return prevForwarding\n}\n// Enable implements stack.NetworkEndpoint.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/forwarding_test.go",
"new_path": "pkg/tcpip/stack/forwarding_test.go",
"diff": "@@ -241,10 +241,12 @@ func (f *fwdTestNetworkEndpoint) Forwarding() bool {\n}\n// SetForwarding implements stack.ForwardingNetworkEndpoint.\n-func (f *fwdTestNetworkEndpoint) SetForwarding(v bool) {\n+func (f *fwdTestNetworkEndpoint) SetForwarding(v bool) bool {\nf.mu.Lock()\ndefer f.mu.Unlock()\n+ prev := f.mu.forwarding\nf.mu.forwarding = v\n+ return prev\n}\nvar _ LinkEndpoint = (*fwdTestLinkEndpoint)(nil)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/nic.go",
"new_path": "pkg/tcpip/stack/nic.go",
"diff": "@@ -1013,19 +1013,18 @@ func (n *nic) checkDuplicateAddress(protocol tcpip.NetworkProtocolNumber, addr t\nreturn d.CheckDuplicateAddress(addr, h), nil\n}\n-func (n *nic) setForwarding(protocol tcpip.NetworkProtocolNumber, enable bool) tcpip.Error {\n+func (n *nic) setForwarding(protocol tcpip.NetworkProtocolNumber, enable bool) (bool, tcpip.Error) {\nep := n.getNetworkEndpoint(protocol)\nif ep == nil {\n- return &tcpip.ErrUnknownProtocol{}\n+ return false, &tcpip.ErrUnknownProtocol{}\n}\nforwardingEP, ok := ep.(ForwardingNetworkEndpoint)\nif !ok {\n- return &tcpip.ErrNotSupported{}\n+ return false, &tcpip.ErrNotSupported{}\n}\n- forwardingEP.SetForwarding(enable)\n- return nil\n+ return forwardingEP.SetForwarding(enable), nil\n}\nfunc (n *nic) forwarding(protocol tcpip.NetworkProtocolNumber) (bool, tcpip.Error) {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/registration.go",
"new_path": "pkg/tcpip/stack/registration.go",
"diff": "@@ -686,7 +686,9 @@ type ForwardingNetworkEndpoint interface {\nForwarding() bool\n// SetForwarding sets the forwarding configuration.\n- SetForwarding(bool)\n+ //\n+ // Returns the previous forwarding configuration.\n+ SetForwarding(bool) bool\n}\n// NetworkProtocol is the interface that needs to be implemented by network\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/stack.go",
"new_path": "pkg/tcpip/stack/stack.go",
"diff": "@@ -506,13 +506,15 @@ func (s *Stack) Stats() tcpip.Stats {\n// SetNICForwarding enables or disables packet forwarding on the specified NIC\n// for the passed protocol.\n-func (s *Stack) SetNICForwarding(id tcpip.NICID, protocol tcpip.NetworkProtocolNumber, enable bool) tcpip.Error {\n+//\n+// Returns the previous configuration on the NIC.\n+func (s *Stack) SetNICForwarding(id tcpip.NICID, protocol tcpip.NetworkProtocolNumber, enable bool) (bool, tcpip.Error) {\ns.mu.RLock()\ndefer s.mu.RUnlock()\nnic, ok := s.nics[id]\nif !ok {\n- return &tcpip.ErrUnknownNICID{}\n+ return false, &tcpip.ErrUnknownNICID{}\n}\nreturn nic.setForwarding(protocol, enable)\n@@ -539,7 +541,7 @@ func (s *Stack) SetForwardingDefaultAndAllNICs(protocol tcpip.NetworkProtocolNum\ndoneOnce := false\nfor id, nic := range s.nics {\n- if err := nic.setForwarding(protocol, enable); err != nil {\n+ if _, err := nic.setForwarding(protocol, enable); err != nil {\n// Expect forwarding to be settable on all interfaces if it was set on\n// one.\nif doneOnce {\n@@ -699,7 +701,7 @@ func (s *Stack) CreateNICWithOptions(id tcpip.NICID, ep LinkEndpoint, opts NICOp\nn := newNIC(s, id, ep, opts)\nfor proto := range s.defaultForwardingEnabled {\n- if err := n.setForwarding(proto, true); err != nil {\n+ if _, err := n.setForwarding(proto, true); err != nil {\npanic(fmt.Sprintf(\"newNIC(%d, ...).setForwarding(%d, true): %s\", id, proto, err))\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/stack_test.go",
"new_path": "pkg/tcpip/stack/stack_test.go",
"diff": "@@ -305,10 +305,12 @@ func (f *fakeNetworkEndpoint) Forwarding() bool {\n}\n// SetForwarding implements stack.ForwardingNetworkEndpoint.\n-func (f *fakeNetworkEndpoint) SetForwarding(v bool) {\n+func (f *fakeNetworkEndpoint) SetForwarding(v bool) bool {\nf.mu.Lock()\ndefer f.mu.Unlock()\n+ prev := f.mu.forwarding\nf.mu.forwarding = v\n+ return prev\n}\nfunc fakeNetFactory(s *stack.Stack) stack.NetworkProtocol {\n@@ -4642,6 +4644,80 @@ func TestFindRouteWithForwarding(t *testing.T) {\n}\n}\n+func TestNICForwarding(t *testing.T) {\n+ const nicID = 1\n+\n+ tests := []struct {\n+ name string\n+ factory stack.NetworkProtocolFactory\n+ netProto tcpip.NetworkProtocolNumber\n+ }{\n+ {\n+ name: \"Fake Network\",\n+ factory: fakeNetFactory,\n+ netProto: fakeNetNumber,\n+ },\n+ {\n+ name: \"IPv4\",\n+ factory: ipv4.NewProtocol,\n+ netProto: ipv4.ProtocolNumber,\n+ },\n+ {\n+ name: \"IPv6\",\n+ factory: ipv6.NewProtocol,\n+ netProto: ipv6.ProtocolNumber,\n+ },\n+ }\n+\n+ for _, test := range tests {\n+ t.Run(test.name, func(t *testing.T) {\n+ s := stack.New(stack.Options{\n+ NetworkProtocols: []stack.NetworkProtocolFactory{test.factory},\n+ })\n+ if err := s.CreateNIC(nicID, channel.New(0, defaultMTU, \"\")); err != nil {\n+ t.Fatalf(\"CreateNIC(%d, _): %s\", nicID, err)\n+ }\n+\n+ // Forwarding should initially be disabled.\n+ if forwarding, err := s.NICForwarding(nicID, test.netProto); err != nil {\n+ t.Fatalf(\"s.NICForwarding(%d, %d): %s\", nicID, test.netProto, err)\n+ } else if forwarding {\n+ t.Errorf(\"got s.NICForwarding(%d, %d) = true, want = false\", nicID, test.netProto)\n+ }\n+\n+ // Setting forwarding to be enabled should return the previous\n+ // configuration of false. Enabling it a second time should be a no-op.\n+ for _, wantPrevForwarding := range [...]bool{false, true} {\n+ if prevForwarding, err := s.SetNICForwarding(nicID, test.netProto, true); err != nil {\n+ t.Fatalf(\"s.SetNICForwarding(%d, %d, true): %s\", nicID, test.netProto, err)\n+ } else if prevForwarding != wantPrevForwarding {\n+ t.Errorf(\"got s.SetNICForwarding(%d, %d, true) = %t, want = %t\", nicID, test.netProto, prevForwarding, wantPrevForwarding)\n+ }\n+ if forwarding, err := s.NICForwarding(nicID, test.netProto); err != nil {\n+ t.Fatalf(\"s.NICForwarding(%d, %d): %s\", nicID, test.netProto, err)\n+ } else if !forwarding {\n+ t.Errorf(\"got s.NICForwarding(%d, %d) = false, want = true\", nicID, test.netProto)\n+ }\n+ }\n+\n+ // Setting forwarding to be disabled should return the previous\n+ // configuration of true. Disabling it a second time should be a no-op.\n+ for _, wantPrevForwarding := range [...]bool{true, false} {\n+ if prevForwarding, err := s.SetNICForwarding(nicID, test.netProto, false); err != nil {\n+ t.Fatalf(\"s.SetNICForwarding(%d, %d, false): %s\", nicID, test.netProto, err)\n+ } else if prevForwarding != wantPrevForwarding {\n+ t.Errorf(\"got s.SetNICForwarding(%d, %d, false) = %t, want = %t\", nicID, test.netProto, prevForwarding, wantPrevForwarding)\n+ }\n+ if forwarding, err := s.NICForwarding(nicID, test.netProto); err != nil {\n+ t.Fatalf(\"s.NICForwarding(%d, %d): %s\", nicID, test.netProto, err)\n+ } else if forwarding {\n+ t.Errorf(\"got s.NICForwarding(%d, %d) = true, want = false\", nicID, test.netProto)\n+ }\n+ }\n+ })\n+ }\n+}\n+\nfunc TestWritePacketToRemote(t *testing.T) {\nconst nicID = 1\nconst MTU = 1280\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tests/integration/forward_test.go",
"new_path": "pkg/tcpip/tests/integration/forward_test.go",
"diff": "@@ -628,7 +628,7 @@ func TestPerInterfaceForwarding(t *testing.T) {\n// Only enable forwarding on NIC1 and make sure that only packets arriving\n// on NIC1 are forwarded.\nfor _, netProto := range netProtos {\n- if err := s.SetNICForwarding(nicID1, netProto, true); err != nil {\n+ if _, err := s.SetNICForwarding(nicID1, netProto, true); err != nil {\nt.Fatalf(\"s.SetNICForwarding(%d, %d, true): %s\", nicID1, netProtos, err)\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Return previous config when setting NIC forwarding
...so that callers can determine if forwarding changed as a consequence
of their call.
PiperOrigin-RevId: 431069840 |
259,868 | 25.02.2022 18:27:15 | 28,800 | 488841f73ae16a2a1e9224ac402aa6a0b183a92d | Replace use of undocumented `GVISOR_KVM_DEV` environment variable with flags.
This allows overriding the path used to access the kernel's KVM device file,
typically at `/dev/kvm`, with a flag-controlled path instead. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/testutil/kernel.go",
"new_path": "pkg/sentry/fsimpl/testutil/kernel.go",
"diff": "@@ -45,6 +45,7 @@ import (\nvar (\nplatformFlag = flag.String(\"platform\", \"ptrace\", \"specify which platform to use\")\n+ platformDevicePathFlag = flag.String(\"platform_device_path\", \"\", \"path to a platform-specific device file (e.g. /dev/kvm for KVM platform). If unset, will use a sane platform-specific default.\")\n)\n// Boot initializes a new bare bones kernel for test.\n@@ -53,7 +54,7 @@ func Boot() (*kernel.Kernel, error) {\nif err != nil {\nreturn nil, fmt.Errorf(\"platform not found: %v\", err)\n}\n- deviceFile, err := platformCtr.OpenDevice()\n+ deviceFile, err := platformCtr.OpenDevice(*platformDevicePathFlag)\nif err != nil {\nreturn nil, fmt.Errorf(\"creating platform: %v\", err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/kvm.go",
"new_path": "pkg/sentry/platform/kvm/kvm.go",
"diff": "@@ -76,15 +76,15 @@ var (\nglobalErr error\n)\n-// OpenDevice opens the KVM device at /dev/kvm and returns the File.\n-func OpenDevice() (*os.File, error) {\n- dev, ok := os.LookupEnv(\"GVISOR_KVM_DEV\")\n- if !ok {\n- dev = \"/dev/kvm\"\n+// OpenDevice opens the KVM device and returns the File.\n+// If the devicePath is empty, it will default to /dev/kvm.\n+func OpenDevice(devicePath string) (*os.File, error) {\n+ if devicePath == \"\" {\n+ devicePath = \"/dev/kvm\"\n}\n- f, err := os.OpenFile(dev, unix.O_RDWR, 0)\n+ f, err := os.OpenFile(devicePath, unix.O_RDWR, 0)\nif err != nil {\n- return nil, fmt.Errorf(\"error opening KVM device file (%s): %v\", dev, err)\n+ return nil, fmt.Errorf(\"error opening KVM device file (%s): %v\", devicePath, err)\n}\nreturn f, nil\n}\n@@ -186,8 +186,8 @@ func (*constructor) New(f *os.File) (platform.Platform, error) {\nreturn New(f)\n}\n-func (*constructor) OpenDevice() (*os.File, error) {\n- return OpenDevice()\n+func (*constructor) OpenDevice(devicePath string) (*os.File, error) {\n+ return OpenDevice(devicePath)\n}\n// Flags implements platform.Constructor.Flags().\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/kvm_test.go",
"new_path": "pkg/sentry/platform/kvm/kvm_test.go",
"diff": "@@ -42,7 +42,7 @@ type testHarness interface {\nfunc kvmTest(t testHarness, setup func(*KVM), fn func(*vCPU) bool) {\n// Create the machine.\n- deviceFile, err := OpenDevice()\n+ deviceFile, err := OpenDevice(\"\")\nif err != nil {\nt.Fatalf(\"error opening device file: %v\", err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/platform.go",
"new_path": "pkg/sentry/platform/platform.go",
"diff": "@@ -426,7 +426,11 @@ type Constructor interface {\n//\n// * deviceFile - the device file (e.g. /dev/kvm for the KVM platform).\nNew(deviceFile *os.File) (Platform, error)\n- OpenDevice() (*os.File, error)\n+\n+ // OpenDevice opens the path to the device used by the platform.\n+ // Passing in an empty string will use the default path for the device,\n+ // e.g. \"/dev/kvm\" for the KVM platform.\n+ OpenDevice(devicePath string) (*os.File, error)\n// Requirements returns platform specific requirements.\nRequirements() Requirements\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ptrace/ptrace.go",
"new_path": "pkg/sentry/platform/ptrace/ptrace.go",
"diff": "@@ -262,7 +262,7 @@ func (*constructor) New(*os.File) (platform.Platform, error) {\nreturn New()\n}\n-func (*constructor) OpenDevice() (*os.File, error) {\n+func (*constructor) OpenDevice(_ string) (*os.File, error) {\nreturn nil, nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/install.go",
"new_path": "runsc/cmd/install.go",
"diff": "@@ -78,7 +78,7 @@ func (i *Install) Execute(_ context.Context, f *flag.FlagSet, _ ...interface{})\nif err != nil {\nlog.Fatalf(\"invalid platform: %v\", err)\n}\n- deviceFile, err := p.OpenDevice()\n+ deviceFile, err := p.OpenDevice(conf.PlatformDevicePath)\nif err != nil {\nlog.Printf(\"WARNING: unable to open platform, runsc may fail to start: %v\", err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/config/config.go",
"new_path": "runsc/config/config.go",
"diff": "@@ -114,6 +114,11 @@ type Config struct {\n// Platform is the platform to run on.\nPlatform string `flag:\"platform\"`\n+ // PlatformDevicePath is the path to the device file used by the platform.\n+ // e.g. \"/dev/kvm\" for the KVM platform.\n+ // If unset, a sane platform-specific default will be used.\n+ PlatformDevicePath string `flag:\"platform_device_path\"`\n+\n// Strace indicates that strace should be enabled.\nStrace bool `flag:\"strace\"`\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/config/flags.go",
"new_path": "runsc/config/flags.go",
"diff": "@@ -56,6 +56,7 @@ func RegisterFlags(flagSet *flag.FlagSet) {\n// Flags that control sandbox runtime behavior.\nflagSet.String(\"platform\", \"ptrace\", \"specifies which platform to use: ptrace (default), kvm.\")\n+ flagSet.String(\"platform_device_path\", \"\", \"path to a platform-specific device file (e.g. /dev/kvm for KVM platform). If unset, will use a sane platform-specific default.\")\nflagSet.Var(watchdogActionPtr(watchdog.LogWarning), \"watchdog-action\", \"sets what action the watchdog takes when triggered: log (default), panic.\")\nflagSet.Int(\"panic-signal\", -1, \"register signal handling that panics. Usually set to SIGUSR2(12) to troubleshoot hangs. -1 disables it.\")\nflagSet.Bool(\"profile\", false, \"prepares the sandbox to use Golang profiler. Note that enabling profiler loosens the seccomp protection added to the sandbox (DO NOT USE IN PRODUCTION).\")\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -322,7 +322,7 @@ func (s *Sandbox) Restore(cid string, spec *specs.Spec, conf *config.Config, fil\n}\n// If the platform needs a device FD we must pass it in.\n- if deviceFile, err := deviceFileForPlatform(conf.Platform); err != nil {\n+ if deviceFile, err := deviceFileForPlatform(conf.Platform, conf.PlatformDevicePath); err != nil {\nreturn err\n} else if deviceFile != nil {\ndefer deviceFile.Close()\n@@ -596,7 +596,7 @@ func (s *Sandbox) createSandboxProcess(conf *config.Config, args *Args, startSyn\nreturn err\n}\n- if deviceFile, err := gPlatform.OpenDevice(); err != nil {\n+ if deviceFile, err := gPlatform.OpenDevice(conf.PlatformDevicePath); err != nil {\nreturn fmt.Errorf(\"opening device file for platform %q: %v\", conf.Platform, err)\n} else if deviceFile != nil {\ndefer deviceFile.Close()\n@@ -1407,13 +1407,14 @@ func (s *Sandbox) configureStdios(conf *config.Config, stdios []*os.File) error\n// deviceFileForPlatform opens the device file for the given platform. If the\n// platform does not need a device file, then nil is returned.\n-func deviceFileForPlatform(name string) (*os.File, error) {\n+// devicePath may be empty to use a sane platform-specific default.\n+func deviceFileForPlatform(name, devicePath string) (*os.File, error) {\np, err := platform.Lookup(name)\nif err != nil {\nreturn nil, err\n}\n- f, err := p.OpenDevice()\n+ f, err := p.OpenDevice(devicePath)\nif err != nil {\nreturn nil, fmt.Errorf(\"opening device file for platform %q: %w\", name, err)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Replace use of undocumented `GVISOR_KVM_DEV` environment variable with flags.
This allows overriding the path used to access the kernel's KVM device file,
typically at `/dev/kvm`, with a flag-controlled path instead.
PiperOrigin-RevId: 431073099 |
259,924 | 25.02.2022 21:48:16 | 28,800 | 7f3fdc910f3722b6ca9ca54c77c5703a7b81c4d8 | Implement core tagging
Links to some possibly useful reference material on core tagging:
* LWN article:
* Kernel docs: | [
{
"change_type": "MODIFY",
"old_path": "WORKSPACE",
"new_path": "WORKSPACE",
"diff": "@@ -88,8 +88,8 @@ go_repository(\ngo_repository(\nname = \"org_golang_x_sys\",\nimportpath = \"golang.org/x/sys\",\n- sum = \"h1:oN6lz7iLW/YC7un8pq+9bOLyXrprv2+DKfkJY+2LJJw=\",\n- version = \"v0.0.0-20211007075335-d3039528d8ac\",\n+ sum = \"h1:BXxu8t6QN0G1uff4bzZzSkpsax8+ALqTGUtz08QrV00=\",\n+ version = \"v0.0.0-20220224120231-95c6836cb0e7\",\n)\ngo_repository(\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/prctl.go",
"new_path": "pkg/abi/linux/prctl.go",
"diff": "@@ -145,6 +145,10 @@ const (\n// Protection eXtensions (MPX) bounds tables.\nPR_MPX_DISABLE_MANAGEMENT = 44\n+ // The following constants are used to control thread scheduling on cores.\n+ PR_SCHED_CORE_SCOPE_THREAD = 0\n+ PR_SCHED_CORE_SCOPE_THREAD_GROUP = 1\n+\n// PR_SET_PTRACER allows a specific process (or any, if PR_SET_PTRACER_ANY is\n// specified) to ptrace the current task.\nPR_SET_PTRACER = 0x59616d61\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/coretag/BUILD",
"diff": "+load(\"//tools:defs.bzl\", \"go_library\", \"go_test\")\n+\n+package(licenses = [\"notice\"])\n+\n+go_library(\n+ name = \"coretag\",\n+ srcs = [\n+ \"coretag.go\",\n+ \"coretag_unsafe.go\",\n+ ],\n+ visibility = [\"//:sandbox\"],\n+ deps = [\n+ \"//pkg/abi/linux\",\n+ \"@org_golang_x_sys//unix:go_default_library\",\n+ ],\n+)\n+\n+go_test(\n+ name = \"coretag_test\",\n+ size = \"small\",\n+ srcs = [\n+ \"coretag_test.go\",\n+ ],\n+ library = \":coretag\",\n+ deps = [\n+ \"//pkg/hostos\",\n+ ],\n+)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/coretag/coretag.go",
"diff": "+// Copyright 2022 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Package coretag implements core tagging.\n+package coretag\n+\n+import (\n+ \"fmt\"\n+ \"io/ioutil\"\n+ \"strconv\"\n+\n+ \"golang.org/x/sys/unix\"\n+ \"gvisor.dev/gvisor/pkg/abi/linux\"\n+)\n+\n+// Enable core tagging. If this returns with no error, all threads in the\n+// current thread group will be run in a core tagged thread. Only available on\n+// linux kernel >= 5.14.\n+func Enable() error {\n+ // Set core tag on current thread group.\n+ // prctl(PR_SCHED_CORE, PR_SCHED_CORE_CREATE, pid=0,\n+ // PR_SCHED_CORE_SCOPE_THREAD_GROUP, cookie=nullptr)\n+ // pid=0 means current pid.\n+ // cookie=nullptr is required for PR_SCHED_CORE_CREATE.\n+ if _, _, errno := unix.Syscall6(unix.SYS_PRCTL, unix.PR_SCHED_CORE,\n+ unix.PR_SCHED_CORE_CREATE, 0 /*pid*/, linux.PR_SCHED_CORE_SCOPE_THREAD_GROUP, 0, 0); errno != 0 {\n+ return fmt.Errorf(\"failed to core tag sentry: %w\", errno)\n+ }\n+ return nil\n+}\n+\n+// GetAllCoreTags returns the core tag of all the threads in the thread group.\n+func GetAllCoreTags(pid int) ([]uint64, error) {\n+ // prctl(PR_SCHED_CORE_GET, PR_SCHED_CORE_SCOPE_THREAD_GROUP, ...) is not supported\n+ // in linux. So instead we get all threads from /proc/<pid>/task and get all the\n+ // core tags individually.\n+ tagSet := make(map[uint64]struct{})\n+ // Get current pid core tag.\n+ tag, err := getCoreTag(pid)\n+ if err != nil {\n+ return nil, err\n+ }\n+ tagSet[tag] = struct{}{}\n+\n+ // Get core tags of tids.\n+ tids, err := getTids(pid)\n+ if err != nil {\n+ return nil, err\n+ }\n+ for tid := range tids {\n+ tag, err := getCoreTag(tid)\n+ if err != nil {\n+ return nil, err\n+ }\n+ tagSet[tag] = struct{}{}\n+ }\n+\n+ // Return set of tags as a slice.\n+ tags := make([]uint64, 0, len(tagSet))\n+ for t := range tagSet {\n+ tags = append(tags, t)\n+ }\n+ return tags, nil\n+}\n+\n+// getTids returns set of tids as reported by /proc/<pid>/task.\n+func getTids(pid int) (map[int]struct{}, error) {\n+ tids := make(map[int]struct{})\n+ files, err := ioutil.ReadDir(\"/proc/\" + strconv.Itoa(pid) + \"/task\")\n+ if err != nil {\n+ return nil, err\n+ }\n+ for _, file := range files {\n+ tid, err := strconv.Atoi(file.Name())\n+ if err != nil {\n+ return nil, err\n+ }\n+ tids[tid] = struct{}{}\n+ }\n+\n+ return tids, nil\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/coretag/coretag_test.go",
"diff": "+// Copyright 2022 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package coretag\n+\n+import (\n+ \"os\"\n+ \"testing\"\n+\n+ \"gvisor.dev/gvisor/pkg/hostos\"\n+)\n+\n+func TestEnable(t *testing.T) {\n+ major, minor, err := hostos.KernelVersion()\n+ if err != nil {\n+ t.Fatalf(\"Unable to parse kernel version: %v\", err)\n+ }\n+ // Skip running test when running on Linux kernel < 5.14 because core tagging\n+ // is not available.\n+ if major < 5 && minor < 14 {\n+ t.Skipf(\"Running on Linux kernel: %d.%d < 5.14. Core tagging not available. Skipping test.\", major, minor)\n+ return\n+ }\n+ if err := Enable(); err != nil {\n+ t.Fatalf(\"Enable() got error %v, wanted nil\", err)\n+ }\n+\n+ coreTags, err := GetAllCoreTags(os.Getpid())\n+ if err != nil {\n+ t.Fatalf(\"GetAllCoreTags() got error %v, wanted nil\", err)\n+ }\n+ if len(coreTags) != 1 {\n+ t.Fatalf(\"Got coreTags %v, wanted len(coreTags)=1\", coreTags)\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/coretag/coretag_unsafe.go",
"diff": "+// Copyright 2022 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package coretag\n+\n+import (\n+ \"fmt\"\n+ \"unsafe\"\n+\n+ \"golang.org/x/sys/unix\"\n+ \"gvisor.dev/gvisor/pkg/abi/linux\"\n+)\n+\n+// getCoreTag returns the core tag of the tid. Only available on linux kernel >= 5.14.\n+func getCoreTag(tid int) (uint64, error) {\n+ var cookie uint64\n+ if _, _, errno := unix.Syscall6(unix.SYS_PRCTL, unix.PR_SCHED_CORE,\n+ unix.PR_SCHED_CORE_GET, uintptr(tid), linux.PR_SCHED_CORE_SCOPE_THREAD,\n+ uintptr(unsafe.Pointer(&cookie)), 0); errno != 0 {\n+ return 0, fmt.Errorf(\"prctl(PR_SCHED_CORE, PR_SCHED_CORE_GET, %d, PR_SCHED_CORE_SCOPE_THREAD) (errno=%d)\", tid, errno)\n+ }\n+ return cookie, nil\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/cpuid/BUILD",
"new_path": "pkg/cpuid/BUILD",
"diff": "@@ -42,5 +42,5 @@ go_test(\n],\nlibrary = \":cpuid\",\ntags = [\"manual\"],\n- deps = [\"@org_golang_x_sys//unix:go_default_library\"],\n+ deps = [\"//pkg/hostos\"],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/cpuid/cpuid_parse_test.go",
"new_path": "pkg/cpuid/cpuid_parse_test.go",
"diff": "package cpuid\nimport (\n- \"fmt\"\n\"io/ioutil\"\n\"regexp\"\n- \"strconv\"\n\"strings\"\n\"testing\"\n- \"golang.org/x/sys/unix\"\n+ \"gvisor.dev/gvisor/pkg/hostos\"\n)\n-func kernelVersion() (int, int, error) {\n- var u unix.Utsname\n- if err := unix.Uname(&u); err != nil {\n- return 0, 0, err\n- }\n-\n- var sb strings.Builder\n- for _, b := range u.Release {\n- if b == 0 {\n- break\n- }\n- sb.WriteByte(byte(b))\n- }\n-\n- s := strings.Split(sb.String(), \".\")\n- if len(s) < 2 {\n- return 0, 0, fmt.Errorf(\"kernel release missing major and minor component: %s\", sb.String())\n- }\n-\n- major, err := strconv.Atoi(s[0])\n- if err != nil {\n- return 0, 0, fmt.Errorf(\"error parsing major version %q in %q: %w\", s[0], sb.String(), err)\n- }\n-\n- minor, err := strconv.Atoi(s[1])\n- if err != nil {\n- return 0, 0, fmt.Errorf(\"error parsing minor version %q in %q: %w\", s[1], sb.String(), err)\n- }\n-\n- return major, minor, nil\n-}\n-\n// TestHostFeatureFlags tests that all features detected by HostFeatureSet are\n// on the host.\n//\n@@ -65,7 +31,7 @@ func kernelVersion() (int, int, error) {\n// analog in the actual CPUID feature set.\nfunc TestHostFeatureFlags(t *testing.T) {\n// Extract the kernel version.\n- major, minor, err := kernelVersion()\n+ major, minor, err := hostos.KernelVersion()\nif err != nil {\nt.Fatalf(\"Unable to parse kernel version: %v\", err)\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/hostos/BUILD",
"diff": "+load(\"//tools:defs.bzl\", \"go_library\")\n+\n+package(licenses = [\"notice\"])\n+\n+go_library(\n+ name = \"hostos\",\n+ srcs = [\"hostos.go\"],\n+ visibility = [\"//:sandbox\"],\n+ deps = [\"@org_golang_x_sys//unix:go_default_library\"],\n+)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/hostos/hostos.go",
"diff": "+// Copyright 2022 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Package hostos contains utility functions for getting information about the host OS.\n+package hostos\n+\n+import (\n+ \"fmt\"\n+ \"strconv\"\n+ \"strings\"\n+\n+ \"golang.org/x/sys/unix\"\n+)\n+\n+// KernelVersion returns the major and minor release version of the kernel using uname().\n+func KernelVersion() (int, int, error) {\n+ var u unix.Utsname\n+ if err := unix.Uname(&u); err != nil {\n+ return 0, 0, err\n+ }\n+\n+ var sb strings.Builder\n+ for _, b := range u.Release {\n+ if b == 0 {\n+ break\n+ }\n+ sb.WriteByte(byte(b))\n+ }\n+\n+ s := strings.Split(sb.String(), \".\")\n+ if len(s) < 2 {\n+ return 0, 0, fmt.Errorf(\"kernel release missing major and minor component: %s\", sb.String())\n+ }\n+\n+ major, err := strconv.Atoi(s[0])\n+ if err != nil {\n+ return 0, 0, fmt.Errorf(\"error parsing major version %q in %q: %w\", s[0], sb.String(), err)\n+ }\n+\n+ minor, err := strconv.Atoi(s[1])\n+ if err != nil {\n+ return 0, 0, fmt.Errorf(\"error parsing minor version %q in %q: %w\", s[1], sb.String(), err)\n+ }\n+\n+ return major, minor, nil\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/BUILD",
"new_path": "runsc/cmd/BUILD",
"diff": "@@ -45,6 +45,7 @@ go_library(\n\"//runsc:__subpackages__\",\n],\ndeps = [\n+ \"//pkg/coretag\",\n\"//pkg/coverage\",\n\"//pkg/log\",\n\"//pkg/p9\",\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/boot.go",
"new_path": "runsc/cmd/boot.go",
"diff": "@@ -23,6 +23,7 @@ import (\n\"github.com/google/subcommands\"\nspecs \"github.com/opencontainers/runtime-spec/specs-go\"\n\"golang.org/x/sys/unix\"\n+ \"gvisor.dev/gvisor/pkg/coretag\"\n\"gvisor.dev/gvisor/pkg/log\"\n\"gvisor.dev/gvisor/pkg/sentry/platform\"\n\"gvisor.dev/gvisor/runsc/boot\"\n@@ -236,6 +237,23 @@ func (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nmountsFile.Close()\nspec.Mounts = cleanMounts\n+ if conf.EnableCoreTags {\n+ if err := coretag.Enable(); err != nil {\n+ Fatalf(\"Failed to core tag sentry: %v\", err)\n+ }\n+\n+ // Verify that all sentry threads are properly core tagged, and log\n+ // current core tag.\n+ coreTags, err := coretag.GetAllCoreTags(os.Getpid())\n+ if err != nil {\n+ Fatalf(\"Failed read current core tags: %v\", err)\n+ }\n+ if len(coreTags) != 1 {\n+ Fatalf(\"Not all child threads were core tagged the same. Tags=%v\", coreTags)\n+ }\n+ log.Infof(\"Core tag enabled (core tag=%d)\", coreTags[0])\n+ }\n+\n// Create the loader.\nbootArgs := boot.Args{\nID: f.Arg(0),\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/config/config.go",
"new_path": "runsc/config/config.go",
"diff": "@@ -138,6 +138,13 @@ type Config struct {\n// disabled. Pardon the double negation, but default to enabled is important.\nDisableSeccomp bool\n+ // EnableCoreTags indicates whether the Sentry process and children will be\n+ // run in a core tagged process. This isolates the sentry from sharing\n+ // physical cores with other core tagged processes. This is useful as a\n+ // mitigation for hyperthreading side channel based attacks. Requires host\n+ // linux kernel >= 5.14.\n+ EnableCoreTags bool `flag:\"enable-core-tags\"`\n+\n// WatchdogAction sets what action the watchdog takes when triggered.\nWatchdogAction watchdog.Action `flag:\"watchdog-action\"`\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/config/flags.go",
"new_path": "runsc/config/flags.go",
"diff": "@@ -70,6 +70,7 @@ func RegisterFlags(flagSet *flag.FlagSet) {\nflagSet.Bool(\"cpu-num-from-quota\", false, \"set cpu number to cpu quota (least integer greater or equal to quota value, but not less than 2)\")\nflagSet.Bool(\"oci-seccomp\", false, \"Enables loading OCI seccomp filters inside the sandbox.\")\nflagSet.Var(defaultControlConfig(), \"controls\", \"Sentry control endpoints.\")\n+ flagSet.Bool(\"enable-core-tags\", false, \"enables core tagging. Requires host linux kernel >= 5.14.\")\n// Flags that control sandbox runtime behavior: FS related.\nflagSet.Var(fileAccessTypePtr(FileAccessExclusive), \"file-access\", \"specifies which filesystem validation to use for the root mount: exclusive (default), shared.\")\n"
}
] | Go | Apache License 2.0 | google/gvisor | Implement core tagging
Links to some possibly useful reference material on core tagging:
* LWN article: https://lwn.net/Articles/861251/
* Kernel docs: https://www.kernel.org/doc/Documentation/admin-guide/hw-vuln/core-scheduling.rst
PiperOrigin-RevId: 431093418 |
259,975 | 28.02.2022 09:37:43 | 28,800 | 5240904367dfc7a634cd7a140fb4898ca58ed419 | Fix issue with parsing flags in root_test. | [
{
"change_type": "MODIFY",
"old_path": "test/root/main_test.go",
"new_path": "test/root/main_test.go",
"diff": "@@ -31,7 +31,9 @@ import (\n// path for runsc.\nfunc TestMain(m *testing.M) {\nconfig.RegisterFlags(flag.CommandLine)\n+ if !flag.CommandLine.Parsed() {\nflag.Parse()\n+ }\nif !specutils.HasCapabilities(capability.CAP_SYS_ADMIN, capability.CAP_DAC_OVERRIDE) {\nfmt.Println(\"Test requires sysadmin privileges to run. Try again with sudo.\")\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix issue with parsing flags in root_test.
PiperOrigin-RevId: 431455723 |
259,858 | 28.02.2022 14:56:09 | 28,800 | 711770642a96af6525c059dca4f7269d946a5fb4 | Add release commands to BuildKite. | [
{
"change_type": "MODIFY",
"old_path": ".buildkite/hooks/post-command",
"new_path": ".buildkite/hooks/post-command",
"diff": "+# Clear any downloaded credentials.\n+rm -f repo.key\n+\n# Upload all relevant test failures.\nmake -s testlogs 2>/dev/null | grep // | sort | uniq | (\ndeclare log_count=0\n"
},
{
"change_type": "MODIFY",
"old_path": ".buildkite/hooks/pre-command",
"new_path": ".buildkite/hooks/pre-command",
"diff": "@@ -39,3 +39,15 @@ fi\n# Clear existing profiles.\nsudo rm -rf /tmp/profile\n+\n+# Download credentials, if a release agent.\n+if test \"${BUILDKITE_AGENT_META_DATA_QUEUE}\" = \"release\"; then\n+ # Update gcloud components.\n+ gcloud components update -q\n+\n+ # Pull down secrets.\n+ gcloud secrets versions access --secret=\"repo-key\" > repo.key\n+\n+ # Configure the Docker credential helper (to push images).\n+ gcloud auth configure-docker -q\n+fi\n"
},
{
"change_type": "MODIFY",
"old_path": ".buildkite/pipeline.yaml",
"new_path": ".buildkite/pipeline.yaml",
"diff": "@@ -46,7 +46,9 @@ steps:\nlabel: \":fire: Smoke race tests\"\ncommand: make smoke-race-tests\n- # Check that the Go branch builds.\n+ # Check that the Go branch builds. This is not technically required, as this build is maintained\n+ # as a GitHub action in order to preserve this maintaince across forks. However, providing the\n+ # action here may provide easier debuggability and diagnosis on failure.\n- <<: *common\nlabel: \":golang: Go branch\"\ncommands:\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": ".buildkite/release.yaml",
"diff": "+agents:\n+ queue: release\n+_templates:\n+ common: &common\n+ timeout_in_minutes: 120\n+ retry:\n+ automatic:\n+ - exit_status: -1\n+ limit: 10\n+ - exit_status: \"*\"\n+ limit: 2\n+ # This is enforced by the environment hooks on the release agents\n+ # as well, only the master branch may be built there.\n+ if: build.branch == \"master\" || build.tag != null\n+\n+steps:\n+ - <<: *common\n+ label: \":ship: Push all images (x86_64)\"\n+ commands:\n+ - make push-all-images\n+ - <<: *common\n+ label: \"ship: Push all images (aarch64)\"\n+ commands:\n+ - make ARCH=aarch64 push-all-images\n+ - <<: *common\n+ label: \":ship: Release\"\n+ commands:\n+ - make artifacts/x86_64\n+ - make BAZEL_OPTIONS=--config=cross-aarch64 artifacts/aarch64\n+ - make release RELEASE_NIGHTLY=$$RELEASE_NIGHTLY\n+ - cd repo && gsutil cp -r . gs://gvisor/releases/\n+ - <<: *common\n+ label: \":ship: Website Deploy\"\n+ commands:\n+ - make website-deploy\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add release commands to BuildKite.
PiperOrigin-RevId: 431532954 |
259,858 | 28.02.2022 17:43:47 | 28,800 | d79504d2723621ab1e35903ac17c346d4a5e19ac | Fix mirror typo in emoji and gcloud error. | [
{
"change_type": "MODIFY",
"old_path": ".buildkite/hooks/pre-command",
"new_path": ".buildkite/hooks/pre-command",
"diff": "@@ -42,11 +42,8 @@ sudo rm -rf /tmp/profile\n# Download credentials, if a release agent.\nif test \"${BUILDKITE_AGENT_META_DATA_QUEUE}\" = \"release\"; then\n- # Update gcloud components.\n- gcloud components update -q\n-\n# Pull down secrets.\n- gcloud secrets versions access --secret=\"repo-key\" > repo.key\n+ gcloud secrets versions access --secret=\"repo-key\" latest > repo.key\n# Configure the Docker credential helper (to push images).\ngcloud auth configure-docker -q\n"
},
{
"change_type": "MODIFY",
"old_path": ".buildkite/release.yaml",
"new_path": ".buildkite/release.yaml",
"diff": "@@ -19,7 +19,7 @@ steps:\ncommands:\n- make push-all-images\n- <<: *common\n- label: \"ship: Push all images (aarch64)\"\n+ label: \":ship: Push all images (aarch64)\"\ncommands:\n- make ARCH=aarch64 push-all-images\n- <<: *common\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix mirror typo in emoji and gcloud error.
PiperOrigin-RevId: 431566908 |
259,992 | 01.03.2022 18:57:15 | 28,800 | c6bb9ceb66d88d2dc83e9fd1157b1f8aa328ce8a | Remove VFS2 test dimension
The default is VFS2 enabled, so the tests setting VFS2 are redundant.
Updates | [
{
"change_type": "MODIFY",
"old_path": ".buildkite/pipeline.yaml",
"new_path": ".buildkite/pipeline.yaml",
"diff": "@@ -205,23 +205,23 @@ steps:\n# Runtime tests.\n- <<: *common\nlabel: \":php: PHP runtime tests\"\n- command: make php7.3.6-runtime-tests_vfs2\n+ command: make php7.3.6-runtime-tests\nparallelism: 10\n- <<: *common\nlabel: \":java: Java runtime tests\"\n- command: make java11-runtime-tests_vfs2\n+ command: make java11-runtime-tests\nparallelism: 40\n- <<: *common\nlabel: \":golang: Go runtime tests\"\n- command: make go1.12-runtime-tests_vfs2\n+ command: make go1.12-runtime-tests\nparallelism: 10\n- <<: *common\nlabel: \":node: NodeJS runtime tests\"\n- command: make nodejs12.4.0-runtime-tests_vfs2\n+ command: make nodejs12.4.0-runtime-tests\nparallelism: 10\n- <<: *common\nlabel: \":python: Python runtime tests\"\n- command: make python3.7.3-runtime-tests_vfs2\n+ command: make python3.7.3-runtime-tests\nparallelism: 10\n# ARM tests.\n"
},
{
"change_type": "MODIFY",
"old_path": "Makefile",
"new_path": "Makefile",
"diff": "@@ -158,9 +158,8 @@ dev: $(RUNTIME_BIN) ## Installs a set of local runtimes. Requires sudo.\n@$(call configure_noreload,$(RUNTIME),--net-raw)\n@$(call configure_noreload,$(RUNTIME)-d,--net-raw --debug --strace --log-packets)\n@$(call configure_noreload,$(RUNTIME)-p,--net-raw --profile)\n- @$(call configure_noreload,$(RUNTIME)-vfs2-d,--net-raw --debug --strace --log-packets --vfs2)\n- @$(call configure_noreload,$(RUNTIME)-vfs2-fuse-d,--net-raw --debug --strace --log-packets --vfs2 --fuse)\n- @$(call configure_noreload,$(RUNTIME)-vfs2-cgroup-d,--net-raw --debug --strace --log-packets --vfs2 --cgroupfs)\n+ @$(call configure_noreload,$(RUNTIME)-fuse-d,--net-raw --debug --strace --log-packets --fuse)\n+ @$(call configure_noreload,$(RUNTIME)-cgroup-d,--net-raw --debug --strace --log-packets --cgroupfs)\n@$(call reload_docker)\n.PHONY: dev\n@@ -235,10 +234,6 @@ packetimpact-tests:\n@$(call install_runtime,$(RUNTIME),) # Ensure flags are cleared.\n@$(call test_runtime,$(RUNTIME),--test_timeout=10800 //test/runtimes:$*)\n-%-runtime-tests_vfs2: load-runtimes_% $(RUNTIME_BIN)\n- @$(call install_runtime,$(RUNTIME),--vfs2)\n- @$(call test_runtime,$(RUNTIME),--test_timeout=10800 //test/runtimes:$*)\n-\ndo-tests: $(RUNTIME_BIN)\n@$(RUNTIME_BIN) --rootless do true\n@$(RUNTIME_BIN) --rootless -network=none do true\n@@ -262,8 +257,6 @@ INTEGRATION_TARGETS := //test/image:image_test //test/e2e:integration_test\ndocker-tests: load-basic $(RUNTIME_BIN)\n@$(call install_runtime,$(RUNTIME),) # Clear flags.\n@$(call test_runtime,$(RUNTIME),$(INTEGRATION_TARGETS))\n- @$(call install_runtime,$(RUNTIME),--vfs2)\n- @$(call test_runtime,$(RUNTIME),$(INTEGRATION_TARGETS))\n.PHONY: docker-tests\noverlay-tests: load-basic $(RUNTIME_BIN)\n@@ -306,7 +299,7 @@ packetdrill-tests: load-packetdrill $(RUNTIME_BIN)\n.PHONY: packetdrill-tests\nfsstress-test: load-basic $(RUNTIME_BIN)\n- @$(call install_runtime,$(RUNTIME),--vfs2)\n+ @$(call install_runtime,$(RUNTIME))\n@$(call test_runtime,$(RUNTIME),//test/fsstress:fsstress_test)\n.PHONY: fsstress-test\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/test/dockerutil/dockerutil.go",
"new_path": "pkg/test/dockerutil/dockerutil.go",
"diff": "@@ -22,6 +22,7 @@ import (\n\"io\"\n\"io/ioutil\"\n\"log\"\n+ \"os\"\n\"os/exec\"\n\"regexp\"\n\"strconv\"\n@@ -34,7 +35,7 @@ var (\n// runtime is the runtime to use for tests. This will be applied to all\n// containers. Note that the default here (\"runsc\") corresponds to the\n// default used by the installations.\n- runtime = flag.String(\"runtime\", \"runsc\", \"specify which runtime to use\")\n+ runtime = flag.String(\"runtime\", os.Getenv(\"RUNTIME\"), \"specify which runtime to use\")\n// config is the default Docker daemon configuration path.\nconfig = flag.String(\"config_path\", \"/etc/docker/daemon.json\", \"configuration file for reading paths\")\n@@ -91,27 +92,6 @@ func RuntimePath() (string, error) {\nreturn p, nil\n}\n-// UsingVFS2 returns true if the 'runtime' has the vfs2 flag set.\n-// TODO(gvisor.dev/issue/1624): Remove.\n-func UsingVFS2() (bool, error) {\n- rMap, err := runtimeMap()\n- if err != nil {\n- return false, err\n- }\n-\n- list, ok := rMap[\"runtimeArgs\"].([]interface{})\n- if !ok {\n- return false, fmt.Errorf(\"unexpected format: %v\", rMap)\n- }\n-\n- for _, element := range list {\n- if element == \"--vfs2\" {\n- return true, nil\n- }\n- }\n- return false, nil\n-}\n-\nfunc runtimeMap() (map[string]interface{}, error) {\n// Read the configuration data; the file must exist.\nconfigBytes, err := ioutil.ReadFile(*config)\n"
},
{
"change_type": "MODIFY",
"old_path": "test/e2e/integration_test.go",
"new_path": "test/e2e/integration_test.go",
"diff": "@@ -469,12 +469,6 @@ func TestTmpMount(t *testing.T) {\n// Test that it is allowed to mount a file on top of /dev files, e.g.\n// /dev/random.\nfunc TestMountOverDev(t *testing.T) {\n- if vfs2, err := dockerutil.UsingVFS2(); err != nil {\n- t.Fatalf(\"Failed to read config for runtime %s: %v\", dockerutil.Runtime(), err)\n- } else if !vfs2 {\n- t.Skip(\"VFS1 doesn't allow /dev/random to be mounted.\")\n- }\n-\nrandom, err := ioutil.TempFile(testutil.TmpDir(), \"random\")\nif err != nil {\nt.Fatal(\"ioutil.TempFile() failed:\", err)\n@@ -603,22 +597,10 @@ func TestPing6Loopback(t *testing.T) {\n// can always delete its file when the file is inside a sticky directory owned\n// by another user.\nfunc TestStickyDir(t *testing.T) {\n- if vfs2Used, err := dockerutil.UsingVFS2(); err != nil {\n- t.Fatalf(\"failed to read config for runtime %s: %v\", dockerutil.Runtime(), err)\n- } else if !vfs2Used {\n- t.Skip(\"sticky bit test fails on VFS1.\")\n- }\n-\nrunIntegrationTest(t, nil, \"./test_sticky\")\n}\nfunc TestHostFD(t *testing.T) {\n- if vfs2Used, err := dockerutil.UsingVFS2(); err != nil {\n- t.Fatalf(\"failed to read config for runtime %s: %v\", dockerutil.Runtime(), err)\n- } else if !vfs2Used {\n- t.Skip(\"test fails on VFS1.\")\n- }\n-\nrunIntegrationTest(t, nil, \"./host_fd\")\n}\n@@ -664,12 +646,6 @@ func TestBindOverlay(t *testing.T) {\n}\nfunc TestStdios(t *testing.T) {\n- if vfs2, err := dockerutil.UsingVFS2(); err != nil {\n- t.Fatalf(\"Failed to read config for runtime %s: %v\", dockerutil.Runtime(), err)\n- } else if !vfs2 {\n- t.Skip(\"VFS1 doesn't adjust stdios user\")\n- }\n-\nctx := context.Background()\nd := dockerutil.MakeContainer(ctx, t)\ndefer d.CleanUp(ctx)\n@@ -685,12 +661,6 @@ func TestStdios(t *testing.T) {\n}\nfunc TestStdiosExec(t *testing.T) {\n- if vfs2, err := dockerutil.UsingVFS2(); err != nil {\n- t.Fatalf(\"Failed to read config for runtime %s: %v\", dockerutil.Runtime(), err)\n- } else if !vfs2 {\n- t.Skip(\"VFS1 doesn't adjust stdios user\")\n- }\n-\nctx := context.Background()\nd := dockerutil.MakeContainer(ctx, t)\ndefer d.CleanUp(ctx)\n@@ -748,12 +718,6 @@ func testStdios(t *testing.T, run func(string, ...string) (string, error)) {\n}\nfunc TestStdiosChown(t *testing.T) {\n- if vfs2, err := dockerutil.UsingVFS2(); err != nil {\n- t.Fatalf(\"Failed to read config for runtime %s: %v\", dockerutil.Runtime(), err)\n- } else if !vfs2 {\n- t.Skip(\"VFS1 doesn't adjust stdios user\")\n- }\n-\nctx := context.Background()\nd := dockerutil.MakeContainer(ctx, t)\ndefer d.CleanUp(ctx)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove VFS2 test dimension
The default is VFS2 enabled, so the tests setting VFS2 are redundant.
Updates #1624
PiperOrigin-RevId: 431827013 |
259,858 | 01.03.2022 23:32:28 | 28,800 | 5cfaa79a1a20b68941babf87f44889d8d7b65a68 | Fix gcloud secret access.
The secret access command apparently returns JSON (why!??) and the
payload is base64 encoded. Decode per the example provided in the
documentation [1].
[1] | [
{
"change_type": "MODIFY",
"old_path": ".buildkite/hooks/pre-command",
"new_path": ".buildkite/hooks/pre-command",
"diff": "@@ -43,7 +43,7 @@ sudo rm -rf /tmp/profile\n# Download credentials, if a release agent.\nif test \"${BUILDKITE_AGENT_META_DATA_QUEUE}\" = \"release\"; then\n# Pull down secrets.\n- gcloud secrets versions access --secret=\"repo-key\" latest > repo.key\n+ gcloud secrets versions access --secret=\"repo-key\" --format='get(payload.data)' latest | tr '_-' '/+' | base64 -d > repo.key\n# Configure the Docker credential helper (to push images).\ngcloud auth configure-docker -q\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix gcloud secret access.
The secret access command apparently returns JSON (why!??) and the
payload is base64 encoded. Decode per the example provided in the
documentation [1].
[1] https://cloud.google.com/sdk/gcloud/reference/secrets/versions/access
PiperOrigin-RevId: 431862918 |
259,992 | 02.03.2022 11:52:23 | 28,800 | 25c65171055e267d00df10a36ede442fa2e3f003 | Drop support for containerd 1.3
Update containerd versions used in test to the latest of each
minor version. | [
{
"change_type": "MODIFY",
"old_path": "Makefile",
"new_path": "Makefile",
"diff": "@@ -315,22 +315,18 @@ else\nendif\n@$(call sudo,test/root:root_test,--runtime=$(RUNTIME) -test.v)\n-ifeq ($(CGROUPV2),false)\n-containerd-tests-min: containerd-test-1.3.9\n-else\n-containerd-tests-min: containerd-test-1.4.3\n-endif\n+containerd-tests-min: containerd-test-1.4.12\n-# The shim builds with containerd 1.3.9 and it's not backward compatible. Test\n-# with 1.3.9 and newer versions.\n-# When run under cgroupv2 environment, skip 1.3.9 as it does not support cgroupv2\n-containerd-tests: ## Runs all supported containerd version tests.\n-ifeq ($(CGROUPV2),false)\n-containerd-tests: containerd-test-1.3.9\n-endif\n-containerd-tests: containerd-test-1.4.3\n-containerd-tests: containerd-test-1.5.4\n-containerd-tests: containerd-test-1.6.0-rc.4\n+##\n+## Containerd tests.\n+##\n+## Runs all supported containerd version tests. Update as new versions become\n+## available.\n+##\n+containerd-tests:\n+containerd-tests: containerd-test-1.4.12\n+containerd-tests: containerd-test-1.5.9\n+containerd-tests: containerd-test-1.6.0\n##\n## Benchmarks.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Drop support for containerd 1.3
Update containerd versions used in test to the latest of each
minor version.
PiperOrigin-RevId: 431993047 |
259,858 | 02.03.2022 12:48:20 | 28,800 | 137468a8ebc7312755a09a2251935cf8cd577160 | Make release pipeline architecture-independent. | [
{
"change_type": "MODIFY",
"old_path": ".bazelrc",
"new_path": ".bazelrc",
"diff": "@@ -21,7 +21,12 @@ build --cxxopt=-std=c++17\n# Display the current git revision in the info block.\nbuild --stamp --workspace_status_command tools/workspace_status.sh\n+# Set flags for x86_64.\n+build:x86_64 --crosstool_top=@crosstool//:toolchains\n+build:x86_64 --cpu=k8\n+build:x86_64 --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64\n+\n# Set flags for aarch64.\n-build:cross-aarch64 --crosstool_top=@crosstool//:toolchains --compiler=gcc\n-build:cross-aarch64 --cpu=aarch64\n-build:cross-aarch64 --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64\n+build:aarch64 --crosstool_top=@crosstool//:toolchains\n+build:aarch64 --cpu=aarch64\n+build:aarch64 --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64\n"
},
{
"change_type": "MODIFY",
"old_path": ".buildkite/pipeline.yaml",
"new_path": ".buildkite/pipeline.yaml",
"diff": "@@ -97,8 +97,8 @@ steps:\n- <<: *common\nlabel: \":ship: Release tests\"\ncommands:\n- - make artifacts/x86_64\n- - make BAZEL_OPTIONS=--config=cross-aarch64 artifacts/aarch64\n+ - make BAZEL_OPTIONS=--config=x86_64 artifacts/x86_64\n+ - make BAZEL_OPTIONS=--config=aarch64 artifacts/aarch64\n- make release\n# Images tests.\n"
},
{
"change_type": "MODIFY",
"old_path": ".buildkite/release.yaml",
"new_path": ".buildkite/release.yaml",
"diff": "@@ -17,7 +17,7 @@ steps:\n- <<: *common\nlabel: \":ship: Push all images (x86_64)\"\ncommands:\n- - make push-all-images\n+ - make ARCH=x86_64 push-all-images\n- <<: *common\nlabel: \":ship: Push all images (aarch64)\"\ncommands:\n@@ -25,11 +25,12 @@ steps:\n- <<: *common\nlabel: \":ship: Release\"\ncommands:\n- - make artifacts/x86_64\n- - make BAZEL_OPTIONS=--config=cross-aarch64 artifacts/aarch64\n+ - make BAZEL_OPTIONS=--config=x86_64 artifacts/x86_64\n+ - make BAZEL_OPTIONS=--config=aarch64 artifacts/aarch64\n- make release RELEASE_NIGHTLY=$$RELEASE_NIGHTLY\n- cd repo && gsutil cp -r . gs://gvisor/releases/\n- <<: *common\nlabel: \":ship: Website Deploy\"\ncommands:\n- - make website-deploy\n+ # The built website image must be x86_64.\n+ - make BAZEL_OPTIONS=--config=x86_64 website-deploy\n"
},
{
"change_type": "MODIFY",
"old_path": "Makefile",
"new_path": "Makefile",
"diff": "@@ -240,7 +240,7 @@ do-tests: $(RUNTIME_BIN)\n@sudo $(RUNTIME_BIN) do true\n.PHONY: do-tests\n-arm-qemu-smoke-test: BAZEL_OPTIONS=--config=cross-aarch64\n+arm-qemu-smoke-test: BAZEL_OPTIONS=--config=aarch64\narm-qemu-smoke-test: $(RUNTIME_BIN) load-arm-qemu\nexport T=$$(mktemp -d --tmpdir release.XXXXXX); \\\nmkdir -p $$T/bin/arm64/ && \\\n"
}
] | Go | Apache License 2.0 | google/gvisor | Make release pipeline architecture-independent.
PiperOrigin-RevId: 432006218 |
259,858 | 02.03.2022 13:04:04 | 28,800 | 6d66109a530277cb3e4864522b6401ac6b68bfa3 | Fix repository.
Some versions of gpg don't select the key as the default after import,
and it must be specified explicitly. Select the last imported key. | [
{
"change_type": "MODIFY",
"old_path": "tools/make_apt.sh",
"new_path": "tools/make_apt.sh",
"diff": "@@ -84,6 +84,12 @@ trap cleanup EXIT\ngpg \"${gpg_opts[@]}\" --import \"${private_key}\" || \\\ngpg \"${gpg_opts[@]}\" --import \"${private_key}\"\n+# Select the private key version. For some versions of gpg, it seems like some\n+# will fail with the \"no default secret\" error.\n+declare keyid\n+keyid=\"$(gpg --no-default-keyring --secret-keyring \"${keyring}\" --list-secret-keys | grep -E '^ ' | tail -1)\"\n+readonly keyid\n+\n# Copy the packages into the root.\nfor pkg in \"$@\"; do\nif ! [[ -f \"${pkg}\" ]]; then\n@@ -105,12 +111,14 @@ for pkg in \"$@\"; do\n# Copy & sign the package.\nmkdir -p \"${destdir}\"\ncp -a -L \"$(dirname \"${pkg}\")/${name}.deb\" \"${destdir}\"\n+ if [[ -f \"$(dirname \"${pkg}\")/${name}.changes\" ]]; then\ncp -a -L \"$(dirname \"${pkg}\")/${name}.changes\" \"${destdir}\"\n+ fi\nchmod 0644 \"${destdir}\"/\"${name}\".*\n# Sign a package only if it isn't signed yet.\n# We use [*] here to expand the gpg_opts array into a single shell-word.\ndpkg-sig -g \"${gpg_opts[*]}\" --verify \"${destdir}/${name}.deb\" ||\n- dpkg-sig -g \"${gpg_opts[*]}\" --sign builder \"${destdir}/${name}.deb\"\n+ dpkg-sig -g \"${gpg_opts[*]}\" --sign builder -k \"${keyid}\" \"${destdir}/${name}.deb\"\ndone\n# Build the package list.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix repository.
Some versions of gpg don't select the key as the default after import,
and it must be specified explicitly. Select the last imported key.
PiperOrigin-RevId: 432009606 |
259,858 | 02.03.2022 17:23:35 | 28,800 | 17b93a84fae9c92cbc7c0dbb3748eb80484e3b1e | Attempt to fix gpg package signing (redux).
The semantics for gpg seem to vary in subtle way across systems. This
attempts to identify the key fingerprint on import, since listing the
secret keys afterwards does not appear to have the intended effect. | [
{
"change_type": "MODIFY",
"old_path": "tools/make_apt.sh",
"new_path": "tools/make_apt.sh",
"diff": "@@ -81,13 +81,14 @@ trap cleanup EXIT\n# is not found. This isn't actually a failure for us, because we don't require\n# the public key (this may be stored separately). The second import will succeed\n# because, in reality, the first import succeeded and it's a no-op.\n-gpg \"${gpg_opts[@]}\" --import \"${private_key}\" || \\\n- gpg \"${gpg_opts[@]}\" --import \"${private_key}\"\n-\n-# Select the private key version. For some versions of gpg, it seems like some\n-# will fail with the \"no default secret\" error.\ndeclare keyid\n-keyid=\"$(gpg --no-default-keyring --secret-keyring \"${keyring}\" --list-secret-keys | grep -E '^ ' | tail -1)\"\n+keyid=$(\n+ (gpg \"${gpg_opts[@]}\" --import \"${private_key}\" 2>&1 ||\n+ gpg \"${gpg_opts[@]}\" --import \"${private_key}\" 2>&1) |\n+ grep \"secret key imported\" |\n+ head -1 |\n+ cut -d':' -f2 |\n+ awk '{print $2;}')\nreadonly keyid\n# Copy the packages into the root.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Attempt to fix gpg package signing (redux).
The semantics for gpg seem to vary in subtle way across systems. This
attempts to identify the key fingerprint on import, since listing the
secret keys afterwards does not appear to have the intended effect.
PiperOrigin-RevId: 432067329 |
259,977 | 03.03.2022 12:52:35 | 28,800 | 15898a341f036322aa7f66027fd285066a1278d8 | Move erroneously placed comment in UDP endpoint | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/udp/endpoint.go",
"new_path": "pkg/tcpip/transport/udp/endpoint.go",
"diff": "@@ -922,7 +922,7 @@ func (e *endpoint) HandlePacket(id stack.TransportEndpointID, pkt *stack.PacketB\ne.stats.PacketsReceived.Increment()\ne.rcvMu.Lock()\n- // Drop the packet if our buffer is currently full.\n+ // Drop the packet if our buffer is not ready to receive packets.\nif !e.rcvReady || e.rcvClosed {\ne.rcvMu.Unlock()\ne.stack.Stats().UDP.ReceiveBufferErrors.Increment()\n@@ -931,6 +931,7 @@ func (e *endpoint) HandlePacket(id stack.TransportEndpointID, pkt *stack.PacketB\n}\nrcvBufSize := e.ops.GetReceiveBufferSize()\n+ // Drop the packet if our buffer is currently full.\nif e.frozen || e.rcvBufSize >= int(rcvBufSize) {\ne.rcvMu.Unlock()\ne.stack.Stats().UDP.ReceiveBufferErrors.Increment()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Move erroneously placed comment in UDP endpoint
PiperOrigin-RevId: 432261816 |
259,858 | 03.03.2022 20:21:50 | 28,800 | c8b0aadfa9c3698b595dbcfc1e259af21e32dc8f | Force apt to not bring up an ncurses menu. | [
{
"change_type": "MODIFY",
"old_path": ".buildkite/hooks/pre-command",
"new_path": ".buildkite/hooks/pre-command",
"diff": "function install_pkgs() {\nexport DEBIAN_FRONTEND=noninteractive\nwhile true; do\n- if sudo -E apt-get update && sudo -E apt-get install -y \"$@\"; then\n+ if sudo -E apt-get update && \\\n+ sudo -E apt-get install -y \\\n+ -o Dpkg::Options::=--force-confold \\\n+ -o Dpkg::Options::=--force-confdef \\\n+ --allow-downgrades \\\n+ --allow-remove-essential \\\n+ --allow-change-held-packages \\\n+ \"$@\"; then\nbreak\nfi\ndone\n"
}
] | Go | Apache License 2.0 | google/gvisor | Force apt to not bring up an ncurses menu.
PiperOrigin-RevId: 432347975 |
259,885 | 04.03.2022 11:40:40 | 28,800 | 0b81a0d7b2304d23ba60ba9453d8468d63dc16a2 | Don't panic in overlay.filesystem.UnlinkAt unless necessary. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/overlay/filesystem.go",
"new_path": "pkg/sentry/fsimpl/overlay/filesystem.go",
"diff": "@@ -1215,8 +1215,8 @@ func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa\nStart: replaced.upperVD,\nPath: fspath.Parse(whiteoutName),\n}); err != nil {\n- cleanupRecreateWhiteouts()\nvfsObj.AbortRenameDentry(&renamed.vfsd, replacedVFSD)\n+ cleanupRecreateWhiteouts()\nreturn err\n}\n}\n@@ -1241,8 +1241,8 @@ func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa\nPath: fspath.Parse(oldName),\n}\nif err := vfsObj.RenameAt(ctx, creds, &oldpop, &newpop, &opts); err != nil {\n- cleanupRecreateWhiteouts()\nvfsObj.AbortRenameDentry(&renamed.vfsd, replacedVFSD)\n+ cleanupRecreateWhiteouts()\nreturn err\n}\n@@ -1379,23 +1379,27 @@ func (fs *filesystem) RmdirAt(ctx context.Context, rp *vfs.ResolvingPath) error\nStart: child.upperVD,\nPath: fspath.Parse(whiteoutName),\n}); err != nil {\n- cleanupRecreateWhiteouts()\nvfsObj.AbortDeleteDentry(&child.vfsd)\n+ cleanupRecreateWhiteouts()\nreturn err\n}\n}\n// Remove the existing directory on the upper layer.\nif err := vfsObj.RmdirAt(ctx, fs.creds, &pop); err != nil {\n- cleanupRecreateWhiteouts()\nvfsObj.AbortDeleteDentry(&child.vfsd)\n+ cleanupRecreateWhiteouts()\nreturn err\n}\n}\nif err := fs.createWhiteout(ctx, vfsObj, &pop); err != nil {\n+ vfsObj.AbortDeleteDentry(&child.vfsd)\n+ if child.upperVD.Ok() {\n// Don't attempt to recover from this: the original directory is\n// already gone, so any dentries representing it are invalid, and\n// creating a new directory won't undo that.\n- panic(fmt.Sprintf(\"unrecoverable overlayfs inconsistency: failed to create whiteout during RmdirAt: %v\", err))\n+ panic(fmt.Sprintf(\"unrecoverable overlayfs inconsistency: failed to create whiteout after removing upper layer directory during RmdirAt: %v\", err))\n+ }\n+ return err\n}\nvfsObj.CommitDeleteDentry(ctx, &child.vfsd)\n@@ -1597,24 +1601,22 @@ func (fs *filesystem) UnlinkAt(ctx context.Context, rp *vfs.ResolvingPath) error\nif childLayer == lookupLayerUpper {\n// Remove the existing file on the upper layer.\nif err := vfsObj.UnlinkAt(ctx, fs.creds, &pop); err != nil {\n- if child != nil {\nvfsObj.AbortDeleteDentry(&child.vfsd)\n- }\nreturn err\n}\n}\nif err := fs.createWhiteout(ctx, vfsObj, &pop); err != nil {\n- panic(fmt.Sprintf(\"unrecoverable overlayfs inconsistency: failed to create whiteout during UnlinkAt: %v\", err))\n+ vfsObj.AbortDeleteDentry(&child.vfsd)\n+ if childLayer == lookupLayerUpper {\n+ panic(fmt.Sprintf(\"unrecoverable overlayfs inconsistency: failed to create whiteout after unlinking upper layer file during UnlinkAt: %v\", err))\n+ }\n+ return err\n}\n- var cw *vfs.Watches\n- if child != nil {\nvfsObj.CommitDeleteDentry(ctx, &child.vfsd)\ndelete(parent.children, name)\nds = appendDentry(ds, child)\n- cw = &child.watches\n- }\n- vfs.InotifyRemoveChild(ctx, cw, &parent.watches, name)\n+ vfs.InotifyRemoveChild(ctx, &child.watches, &parent.watches, name)\nparent.dirents = nil\nreturn nil\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Don't panic in overlay.filesystem.UnlinkAt unless necessary.
PiperOrigin-RevId: 432496516 |
259,907 | 05.03.2022 11:14:41 | 28,800 | e3f424c5c51c2d8c6a91624326cf55c372391516 | Set DEBIAN_FRONTEND to be noninteractive for containerd installing.
This was causing annoying timeout issues on BuildKite where the cgroupsv2
agents that are on ubuntu-2110 image to get stuck indefinitely.
Also rollback the experimental changes in pre-command hook to simplify scripts
and make things consistent. | [
{
"change_type": "MODIFY",
"old_path": ".buildkite/hooks/pre-command",
"new_path": ".buildkite/hooks/pre-command",
"diff": "@@ -4,13 +4,7 @@ function install_pkgs() {\nexport DEBIAN_FRONTEND=noninteractive\nwhile true; do\nif sudo -E apt-get update && \\\n- sudo -E apt-get install -y \\\n- -o Dpkg::Options::=--force-confold \\\n- -o Dpkg::Options::=--force-confdef \\\n- --allow-downgrades \\\n- --allow-remove-essential \\\n- --allow-change-held-packages \\\n- \"$@\"; then\n+ sudo -E apt-get install -y \"$@\"; then\nbreak\nfi\ndone\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/install_containerd.sh",
"new_path": "tools/install_containerd.sh",
"diff": "@@ -62,6 +62,7 @@ fi\nreadonly BTRFS_DEV\n# Install dependencies for the crictl tests.\n+export DEBIAN_FRONTEND=noninteractive\nwhile true; do\nif (apt-get update && apt-get install -y \\\n\"${BTRFS_DEV}\" \\\n"
}
] | Go | Apache License 2.0 | google/gvisor | Set DEBIAN_FRONTEND to be noninteractive for containerd installing.
This was causing annoying timeout issues on BuildKite where the cgroupsv2
agents that are on ubuntu-2110 image to get stuck indefinitely.
Also rollback the experimental changes in pre-command hook to simplify scripts
and make things consistent.
PiperOrigin-RevId: 432671125 |
259,885 | 07.03.2022 10:40:14 | 28,800 | 81d384cfe9d38e940e73ad7dad3a8e4de5f06086 | Fix race between epoll readiness check and re-readying. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/epoll/epoll.go",
"new_path": "pkg/sentry/kernel/epoll/epoll.go",
"diff": "@@ -77,6 +77,8 @@ type pollEntry struct {\n// in-struct pointers. Instead, EventPoll will properly set this field\n// in its loading logic.\ncurList *pollEntryList `state:\"nosave\"`\n+\n+ readySeq uint32\n}\n// WeakRefGone implements refs.WeakRefUser.WeakRefGone.\n@@ -130,6 +132,12 @@ type EventPoll struct {\nreadyList pollEntryList\nwaitingList pollEntryList\ndisabledList pollEntryList\n+\n+ // readySeq is used to detect calls to pollEntry.NotifyEvent() while\n+ // eventsAvailable() or ReadEvents() are running with listsMu unlocked.\n+ // readySeq is protected by both mu and listsMu; reading requires either\n+ // mutex to be locked, but mutation requires both mutexes to be locked.\n+ readySeq uint32\n}\n// cycleMu is used to serialize all the cycle checks. This is only used when\n@@ -198,6 +206,7 @@ func (e *EventPoll) eventsAvailable() bool {\n)\ne.listsMu.Lock()\nreadyList.PushBackList(&e.readyList)\n+ e.readySeq++\ne.listsMu.Unlock()\nif readyList.Empty() {\nreturn false\n@@ -205,9 +214,17 @@ func (e *EventPoll) eventsAvailable() bool {\ndefer func() {\ne.listsMu.Lock()\ne.readyList.PushFrontList(&readyList)\n- for entry := waitingList.Front(); entry != nil; entry = entry.Next() {\n+ var next *pollEntry\n+ for entry := waitingList.Front(); entry != nil; entry = next {\n+ next = entry.Next()\n+ if entry.readySeq == e.readySeq {\n+ // entry.NotifyEvent() was called while we were running.\n+ waitingList.Remove(entry)\n+ e.readyList.PushBack(entry)\n+ } else {\nentry.curList = &e.waitingList\n}\n+ }\ne.waitingList.PushBackList(&waitingList)\ne.listsMu.Unlock()\n}()\n@@ -257,13 +274,14 @@ func (e *EventPoll) ReadEvents(max int) []linux.EpollEvent {\n// pollEntry.NotifyEvent() doesn't touch pollEntryEntry.\nvar (\nreadyList pollEntryList\n- requeueList pollEntryList\nwaitingList pollEntryList\n+ requeueList pollEntryList\ndisabledList pollEntryList\nret []linux.EpollEvent\n)\ne.listsMu.Lock()\nreadyList.PushBackList(&e.readyList)\n+ e.readySeq++\ne.listsMu.Unlock()\nif readyList.Empty() {\nreturn nil\n@@ -271,10 +289,18 @@ func (e *EventPoll) ReadEvents(max int) []linux.EpollEvent {\ndefer func() {\ne.listsMu.Lock()\ne.readyList.PushFrontList(&readyList)\n- e.readyList.PushBackList(&requeueList)\n- for entry := waitingList.Front(); entry != nil; entry = entry.Next() {\n+ var next *pollEntry\n+ for entry := waitingList.Front(); entry != nil; entry = next {\n+ next = entry.Next()\n+ if entry.readySeq == e.readySeq {\n+ // entry.NotifyEvent() was called while we were running.\n+ waitingList.Remove(entry)\n+ e.readyList.PushBack(entry)\n+ } else {\nentry.curList = &e.waitingList\n}\n+ }\n+ e.readyList.PushBackList(&requeueList)\ne.waitingList.PushBackList(&waitingList)\nfor entry := disabledList.Front(); entry != nil; entry = entry.Next() {\nentry.curList = &e.disabledList\n@@ -332,6 +358,8 @@ func (p *pollEntry) NotifyEvent(waiter.EventMask) {\ne.listsMu.Lock()\n+ p.readySeq = e.readySeq\n+\nif p.curList == &e.waitingList {\ne.waitingList.Remove(p)\ne.readyList.PushBack(p)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/epoll.go",
"new_path": "pkg/sentry/vfs/epoll.go",
"diff": "@@ -47,7 +47,7 @@ type EpollInstance struct {\n// EpollInstance for monitoring.\ninterest map[epollInterestKey]*epollInterest\n- // readyMu protects ready, epollInterest.ready, and\n+ // readyMu protects ready, readySeq, epollInterest.ready, and\n// epollInterest.epollInterestEntry. ready is analogous to Linux's struct\n// eventpoll::lock.\nreadyMu sync.Mutex `state:\"nosave\"`\n@@ -61,6 +61,12 @@ type EpollInstance struct {\n// because it focuses on a set of file descriptors that are already known\n// to be ready.\" - epoll_wait(2)\nready epollInterestList\n+\n+ // readySeq is used to detect calls to epollInterest.NotifyEvent() while\n+ // Readiness() or ReadEvents() are running with readyMu unlocked. readySeq\n+ // is protected by both interestMu and readyMu; reading requires either\n+ // mutex to be locked, but mutation requires both mutexes to be locked.\n+ readySeq uint32\n}\n// +stateify savable\n@@ -93,10 +99,12 @@ type epollInterest struct {\n// flags EPOLLET and EPOLLONESHOT. mask is protected by epoll.interestMu.\nmask uint32\n- // ready is true if epollInterestEntry is linked into epoll.ready. ready\n- // and epollInterestEntry are protected by epoll.readyMu.\n+ // ready is true if epollInterestEntry is linked into epoll.ready. readySeq\n+ // is the value of epoll.readySeq when NotifyEvent() was last called.\n+ // ready, epollInterestEntry, and readySeq are protected by epoll.readyMu.\nready bool\nepollInterestEntry\n+ readySeq uint32\n// userData is the struct epoll_event::data associated with this\n// epollInterest. userData is protected by epoll.interestMu.\n@@ -156,6 +164,7 @@ func (ep *EpollInstance) Readiness(mask waiter.EventMask) waiter.EventMask {\n)\nep.readyMu.Lock()\nready.PushBackList(&ep.ready)\n+ ep.readySeq++\nep.readyMu.Unlock()\nif ready.Empty() {\nreturn 0\n@@ -163,9 +172,17 @@ func (ep *EpollInstance) Readiness(mask waiter.EventMask) waiter.EventMask {\ndefer func() {\nep.readyMu.Lock()\nep.ready.PushFrontList(&ready)\n- for epi := notReady.Front(); epi != nil; epi = epi.Next() {\n+ var next *epollInterest\n+ for epi := notReady.Front(); epi != nil; epi = next {\n+ next = epi.Next()\n+ if epi.readySeq == ep.readySeq {\n+ // epi.NotifyEvent() was called while we were running.\n+ notReady.Remove(epi)\n+ ep.ready.PushBack(epi)\n+ } else {\nepi.ready = false\n}\n+ }\nep.readyMu.Unlock()\n}()\n@@ -369,6 +386,7 @@ func (epi *epollInterest) NotifyEvent(waiter.EventMask) {\nepi.ready = true\nepi.epoll.ready.PushBack(epi)\n}\n+ epi.readySeq = epi.epoll.readySeq\nepi.epoll.readyMu.Unlock()\nif newReady {\nepi.epoll.q.Notify(waiter.ReadableEvents)\n@@ -399,11 +417,12 @@ func (ep *EpollInstance) ReadEvents(events []linux.EpollEvent, maxEvents int) []\ndefer ep.interestMu.Unlock()\nvar (\nready epollInterestList\n- requeue epollInterestList\nnotReady epollInterestList\n+ requeue epollInterestList\n)\nep.readyMu.Lock()\nready.PushBackList(&ep.ready)\n+ ep.readySeq++\nep.readyMu.Unlock()\nif ready.Empty() {\nreturn nil\n@@ -414,10 +433,18 @@ func (ep *EpollInstance) ReadEvents(events []linux.EpollEvent, maxEvents int) []\n// ep.ready. epollInterests that were ready are re-inserted at the end\n// for reasons described by EpollInstance.ready.\nep.ready.PushFrontList(&ready)\n- ep.ready.PushBackList(&requeue)\n- for epi := notReady.Front(); epi != nil; epi = epi.Next() {\n+ var next *epollInterest\n+ for epi := notReady.Front(); epi != nil; epi = next {\n+ next = epi.Next()\n+ if epi.readySeq == ep.readySeq {\n+ // epi.NotifyEvent() was called while we were running.\n+ notReady.Remove(epi)\n+ ep.ready.PushBack(epi)\n+ } else {\nepi.ready = false\n}\n+ }\n+ ep.ready.PushBackList(&requeue)\nep.readyMu.Unlock()\n}()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix race between epoll readiness check and re-readying.
PiperOrigin-RevId: 432982236 |
259,951 | 07.03.2022 11:53:51 | 28,800 | 90bf1c1ff7a22142bcf5c22fd5901d5ab33a8a7c | Expose endpoint's state for raw and icmp sockets
The datagram endpoint state is valid and set for these endpoints, so we
can expose them (as we do for udp endpoints). | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/BUILD",
"new_path": "pkg/tcpip/transport/BUILD",
"diff": "-load(\"//tools:defs.bzl\", \"go_library\")\n+load(\"//tools:defs.bzl\", \"go_library\", \"go_test\")\npackage(licenses = [\"notice\"])\n@@ -11,3 +11,22 @@ go_library(\nvisibility = [\"//visibility:public\"],\ndeps = [\"//pkg/tcpip\"],\n)\n+\n+go_test(\n+ name = \"datagram_test\",\n+ size = \"small\",\n+ srcs = [\"datagram_test.go\"],\n+ deps = [\n+ \":transport\",\n+ \"//pkg/tcpip\",\n+ \"//pkg/tcpip/header\",\n+ \"//pkg/tcpip/link/loopback\",\n+ \"//pkg/tcpip/network/ipv4\",\n+ \"//pkg/tcpip/stack\",\n+ \"//pkg/tcpip/testutil\",\n+ \"//pkg/tcpip/transport/icmp\",\n+ \"//pkg/tcpip/transport/raw\",\n+ \"//pkg/tcpip/transport/udp\",\n+ \"//pkg/waiter\",\n+ ],\n+)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/tcpip/transport/datagram_test.go",
"diff": "+// Copyright 2022 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Package datagram_test has tests shared by datagram-based transport endpoints.\n+package datagram_test\n+\n+import (\n+ \"fmt\"\n+ \"testing\"\n+\n+ \"gvisor.dev/gvisor/pkg/tcpip\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/header\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/link/loopback\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/network/ipv4\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/stack\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/testutil\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/transport\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/transport/icmp\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/transport/raw\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/transport/udp\"\n+ \"gvisor.dev/gvisor/pkg/waiter\"\n+)\n+\n+func TestStateUpdates(t *testing.T) {\n+ const nicID = 1\n+\n+ for _, test := range []struct {\n+ name string\n+ createEndpoint func(*stack.Stack) (tcpip.Endpoint, error)\n+ }{\n+ {\n+ name: \"UDP\",\n+ createEndpoint: func(s *stack.Stack) (tcpip.Endpoint, error) {\n+ ep, err := s.NewEndpoint(udp.ProtocolNumber, ipv4.ProtocolNumber, &waiter.Queue{})\n+ if err != nil {\n+ return nil, fmt.Errorf(\"s.NewEndpoint(%d, %d, _) failed: %s\", udp.ProtocolNumber, ipv4.ProtocolNumber, err)\n+ }\n+ return ep, nil\n+ },\n+ },\n+ {\n+ name: \"ICMP\",\n+ createEndpoint: func(s *stack.Stack) (tcpip.Endpoint, error) {\n+ ep, err := s.NewEndpoint(icmp.ProtocolNumber4, ipv4.ProtocolNumber, &waiter.Queue{})\n+ if err != nil {\n+ return nil, fmt.Errorf(\"s.NewEndpoint(%d, %d, _) failed: %s\", icmp.ProtocolNumber4, ipv4.ProtocolNumber, err)\n+ }\n+ return ep, nil\n+ },\n+ },\n+ {\n+ name: \"RAW\",\n+ createEndpoint: func(s *stack.Stack) (tcpip.Endpoint, error) {\n+ ep, err := s.NewRawEndpoint(udp.ProtocolNumber, ipv4.ProtocolNumber, &waiter.Queue{}, true /* associated */)\n+ if err != nil {\n+ return nil, fmt.Errorf(\"s.NewRawEndpoint(%d, %d, _, true) failed: %s\", udp.ProtocolNumber, ipv4.ProtocolNumber, err)\n+ }\n+ return ep, nil\n+ },\n+ },\n+ } {\n+ t.Run(test.name, func(t *testing.T) {\n+ s := stack.New(stack.Options{\n+ NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},\n+ TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol, icmp.NewProtocol4},\n+ RawFactory: &raw.EndpointFactory{},\n+ })\n+ if err := s.CreateNIC(nicID, loopback.New()); err != nil {\n+ t.Fatalf(\"s.CreateNIC(%d, loopback.New()) failed: %s\", nicID, err)\n+ }\n+ ep, err := test.createEndpoint(s)\n+ if err != nil {\n+ t.Fatalf(\"test.createEndpoint(_) failed: %s\", err)\n+ }\n+ // The endpoint may be closed during the test, but closing twice is\n+ // expected to be a no-op.\n+ defer ep.Close()\n+\n+ if got, want := transport.DatagramEndpointState(ep.State()), transport.DatagramEndpointStateInitial; got != want {\n+ t.Errorf(\"got ep.State() = %s, want = %s\", got, want)\n+ }\n+\n+ addr := tcpip.ProtocolAddress{\n+ Protocol: ipv4.ProtocolNumber,\n+ AddressWithPrefix: testutil.MustParse4(\"1.2.3.4\").WithPrefix(),\n+ }\n+ if err := s.AddProtocolAddress(nicID, addr, stack.AddressProperties{}); err != nil {\n+ t.Fatalf(\"AddProtocolAddress(%d, %#v, {}): %s\", nicID, addr, err)\n+ }\n+ s.SetRouteTable([]tcpip.Route{\n+ {\n+ Destination: header.IPv4EmptySubnet,\n+ NIC: nicID,\n+ },\n+ })\n+\n+ if err := ep.Bind(tcpip.FullAddress{}); err != nil {\n+ t.Fatalf(\"ep.Bind(...) failed: %s\", err)\n+ }\n+ if got, want := transport.DatagramEndpointState(ep.State()), transport.DatagramEndpointStateBound; got != want {\n+ t.Errorf(\"got ep.State() = %s, want = %s\", got, want)\n+ }\n+\n+ if err := ep.Connect(tcpip.FullAddress{NIC: nicID, Addr: testutil.MustParse4(\"1.0.0.1\"), Port: 12345}); err != nil {\n+ t.Fatalf(\"ep.Connect(...) failed: %s\", err)\n+ }\n+ if got, want := transport.DatagramEndpointState(ep.State()), transport.DatagramEndpointStateConnected; got != want {\n+ t.Errorf(\"got ep.State() = %s, want = %s\", got, want)\n+ }\n+\n+ ep.Close()\n+ if got, want := transport.DatagramEndpointState(ep.State()), transport.DatagramEndpointStateClosed; got != want {\n+ t.Errorf(\"got ep.State() = %s, want = %s\", got, want)\n+ }\n+ })\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/icmp/endpoint.go",
"new_path": "pkg/tcpip/transport/icmp/endpoint.go",
"diff": "@@ -770,7 +770,7 @@ func (*endpoint) HandleError(stack.TransportError, *stack.PacketBuffer) {}\n// State implements tcpip.Endpoint.State. The ICMP endpoint currently doesn't\n// expose internal socket state.\nfunc (e *endpoint) State() uint32 {\n- return 0\n+ return uint32(e.net.State())\n}\n// Info returns a copy of the endpoint info.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/raw/endpoint.go",
"new_path": "pkg/tcpip/transport/raw/endpoint.go",
"diff": "@@ -713,7 +713,7 @@ func (e *endpoint) HandlePacket(pkt *stack.PacketBuffer) {\n// State implements socket.Socket.State.\nfunc (e *endpoint) State() uint32 {\n- return 0\n+ return uint32(e.net.State())\n}\n// Info returns a copy of the endpoint info.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Expose endpoint's state for raw and icmp sockets
The datagram endpoint state is valid and set for these endpoints, so we
can expose them (as we do for udp endpoints).
PiperOrigin-RevId: 433001691 |
259,907 | 07.03.2022 13:21:32 | 28,800 | 21a689b36fb0188162187adb4d79122c3b5ba7b1 | Tolerate channel creation error in lisafs.
We might get a cranky server which does not want to allocate too many channels.
On server, the number of channels allowed is controlled by GOMAXPROCS and might
vary from 2 to 4.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/lisafs/client.go",
"new_path": "pkg/lisafs/client.go",
"diff": "@@ -122,16 +122,13 @@ func NewClient(sock *unet.Socket) (*Client, Inode, error) {\n// channels and costly initialization like flipcall.Endpoint.Connect can\n// proceed parallely.\nvar channelsWg sync.WaitGroup\n- channelErrs := make([]error, maxChans)\nfor i := 0; i < maxChans; i++ {\nchannelsWg.Add(1)\n- curChanID := i\ngo func() {\ndefer channelsWg.Done()\nch, err := c.createChannel()\nif err != nil {\nlog.Warningf(\"channel creation failed: %v\", err)\n- channelErrs[curChanID] = err\nreturn\n}\nc.channelsMu.Lock()\n@@ -142,14 +139,7 @@ func NewClient(sock *unet.Socket) (*Client, Inode, error) {\n}\nchannelsWg.Wait()\n- for _, channelErr := range channelErrs {\n- // Return the first non-nil channel creation error.\n- if channelErr != nil {\n- return nil, Inode{}, channelErr\n- }\n- }\ncu.Release()\n-\nreturn c, mountResp.Root, nil\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Tolerate channel creation error in lisafs.
We might get a cranky server which does not want to allocate too many channels.
On server, the number of channels allowed is controlled by GOMAXPROCS and might
vary from 2 to 4.
Updates #6313
PiperOrigin-RevId: 433023166 |
259,907 | 07.03.2022 17:25:06 | 28,800 | 3d5201528603693b3383adc656acee0fe460b099 | Get rid of FSync RPC batching in gofer client with lisafs.
This optimization was not used correctly, as described in the change. We could
use it correctly to optimize sync(2) and syncfs(2). But that substantially
increases code complexity. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/filesystem.go",
"new_path": "pkg/sentry/fsimpl/gofer/filesystem.go",
"diff": "@@ -56,15 +56,16 @@ func (fs *filesystem) Sync(ctx context.Context) error {\nvar retErr error\nif fs.opts.lisaEnabled {\n- // Try accumulating all FDIDs to fsync and fsync then via one RPC as\n- // opposed to making an RPC per FDID. Passing a non-nil accFsyncFDIDs to\n- // dentry.syncCachedFile() and specialFileFD.sync() will cause them to not\n- // make an RPC, instead accumulate syncable FDIDs in the passed slice.\n- accFsyncFDIDs := make([]lisafs.FDID, 0, len(ds)+len(sffds))\n+ // Note that lisafs is capable of batching FSync RPCs. However, we can not\n+ // batch all the FDIDs to be synced from ds and sffds. Because the error\n+ // handling varies based on file type. FSync errors are only considered for\n+ // regular file FDIDs that were opened for writing. We could do individual\n+ // RPCs for such FDIDs and batch the rest, but it increases code complexity\n+ // substantially. We could implement it in the future if need be.\n// Sync syncable dentries.\nfor _, d := range ds {\n- if err := d.syncCachedFile(ctx, true /* forFilesystemSync */, &accFsyncFDIDs); err != nil {\n+ if err := d.syncCachedFile(ctx, true /* forFilesystemSync */); err != nil {\nctx.Infof(\"gofer.filesystem.Sync: dentry.syncCachedFile failed: %v\", err)\nif retErr == nil {\nretErr = err\n@@ -75,7 +76,7 @@ func (fs *filesystem) Sync(ctx context.Context) error {\n// Sync special files, which may be writable but do not use dentry shared\n// handles (so they won't be synced by the above).\nfor _, sffd := range sffds {\n- if err := sffd.sync(ctx, true /* forFilesystemSync */, &accFsyncFDIDs); err != nil {\n+ if err := sffd.sync(ctx, true /* forFilesystemSync */); err != nil {\nctx.Infof(\"gofer.filesystem.Sync: specialFileFD.sync failed: %v\", err)\nif retErr == nil {\nretErr = err\n@@ -83,19 +84,12 @@ func (fs *filesystem) Sync(ctx context.Context) error {\n}\n}\n- if err := fs.clientLisa.SyncFDs(ctx, accFsyncFDIDs); err != nil {\n- ctx.Infof(\"gofer.filesystem.Sync: fs.fsyncMultipleFDLisa failed: %v\", err)\n- if retErr == nil {\n- retErr = err\n- }\n- }\n-\nreturn retErr\n}\n// Sync syncable dentries.\nfor _, d := range ds {\n- if err := d.syncCachedFile(ctx, true /* forFilesystemSync */, nil /* accFsyncFDIDsLisa */); err != nil {\n+ if err := d.syncCachedFile(ctx, true /* forFilesystemSync */); err != nil {\nctx.Infof(\"gofer.filesystem.Sync: dentry.syncCachedFile failed: %v\", err)\nif retErr == nil {\nretErr = err\n@@ -106,7 +100,7 @@ func (fs *filesystem) Sync(ctx context.Context) error {\n// Sync special files, which may be writable but do not use dentry shared\n// handles (so they won't be synced by the above).\nfor _, sffd := range sffds {\n- if err := sffd.sync(ctx, true /* forFilesystemSync */, nil /* accFsyncFDIDsLisa */); err != nil {\n+ if err := sffd.sync(ctx, true /* forFilesystemSync */); err != nil {\nctx.Infof(\"gofer.filesystem.Sync: specialFileFD.sync failed: %v\", err)\nif retErr == nil {\nretErr = err\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/gofer.go",
"new_path": "pkg/sentry/fsimpl/gofer/gofer.go",
"diff": "@@ -2384,11 +2384,11 @@ func (d *dentry) writeHandleLocked() handle {\nfunc (d *dentry) syncRemoteFile(ctx context.Context) error {\nd.handleMu.RLock()\ndefer d.handleMu.RUnlock()\n- return d.syncRemoteFileLocked(ctx, nil /* accFsyncFDIDsLisa */)\n+ return d.syncRemoteFileLocked(ctx)\n}\n// Preconditions: d.handleMu must be locked.\n-func (d *dentry) syncRemoteFileLocked(ctx context.Context, accFsyncFDIDsLisa *[]lisafs.FDID) error {\n+func (d *dentry) syncRemoteFileLocked(ctx context.Context) error {\n// If we have a host FD, fsyncing it is likely to be faster than an fsync\n// RPC. Prefer syncing write handles over read handles, since some remote\n// filesystem implementations may not sync changes made through write\n@@ -2400,10 +2400,6 @@ func (d *dentry) syncRemoteFileLocked(ctx context.Context, accFsyncFDIDsLisa *[]\nreturn err\n}\nif d.fs.opts.lisaEnabled && d.writeFDLisa.Ok() {\n- if accFsyncFDIDsLisa != nil {\n- *accFsyncFDIDsLisa = append(*accFsyncFDIDsLisa, d.writeFDLisa.ID())\n- return nil\n- }\nreturn d.writeFDLisa.Sync(ctx)\n} else if !d.fs.opts.lisaEnabled && !d.writeFile.isNil() {\nreturn d.writeFile.fsync(ctx)\n@@ -2415,10 +2411,6 @@ func (d *dentry) syncRemoteFileLocked(ctx context.Context, accFsyncFDIDsLisa *[]\nreturn err\n}\nif d.fs.opts.lisaEnabled && d.readFDLisa.Ok() {\n- if accFsyncFDIDsLisa != nil {\n- *accFsyncFDIDsLisa = append(*accFsyncFDIDsLisa, d.readFDLisa.ID())\n- return nil\n- }\nreturn d.readFDLisa.Sync(ctx)\n} else if !d.fs.opts.lisaEnabled && !d.readFile.isNil() {\nreturn d.readFile.fsync(ctx)\n@@ -2426,7 +2418,7 @@ func (d *dentry) syncRemoteFileLocked(ctx context.Context, accFsyncFDIDsLisa *[]\nreturn nil\n}\n-func (d *dentry) syncCachedFile(ctx context.Context, forFilesystemSync bool, accFsyncFDIDsLisa *[]lisafs.FDID) error {\n+func (d *dentry) syncCachedFile(ctx context.Context, forFilesystemSync bool) error {\nd.handleMu.RLock()\ndefer d.handleMu.RUnlock()\nh := d.writeHandleLocked()\n@@ -2439,7 +2431,7 @@ func (d *dentry) syncCachedFile(ctx context.Context, forFilesystemSync bool, acc\nreturn err\n}\n}\n- if err := d.syncRemoteFileLocked(ctx, accFsyncFDIDsLisa); err != nil {\n+ if err := d.syncRemoteFileLocked(ctx); err != nil {\nif !forFilesystemSync {\nreturn err\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/regular_file.go",
"new_path": "pkg/sentry/fsimpl/gofer/regular_file.go",
"diff": "@@ -697,7 +697,7 @@ func regularFileSeekLocked(ctx context.Context, d *dentry, fdOffset, offset int6\n// Sync implements vfs.FileDescriptionImpl.Sync.\nfunc (fd *regularFileFD) Sync(ctx context.Context) error {\n- return fd.dentry().syncCachedFile(ctx, false /* forFilesystemSync */, nil /* accFsyncFDIDsLisa */)\n+ return fd.dentry().syncCachedFile(ctx, false /* forFilesystemSync */)\n}\n// ConfigureMMap implements vfs.FileDescriptionImpl.ConfigureMMap.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/special_file.go",
"new_path": "pkg/sentry/fsimpl/gofer/special_file.go",
"diff": "@@ -24,7 +24,6 @@ import (\n\"gvisor.dev/gvisor/pkg/errors/linuxerr\"\n\"gvisor.dev/gvisor/pkg/fdnotifier\"\n\"gvisor.dev/gvisor/pkg/hostarch\"\n- \"gvisor.dev/gvisor/pkg/lisafs\"\n\"gvisor.dev/gvisor/pkg/metric\"\n\"gvisor.dev/gvisor/pkg/p9\"\n\"gvisor.dev/gvisor/pkg/safemem\"\n@@ -392,10 +391,10 @@ func (fd *specialFileFD) Seek(ctx context.Context, offset int64, whence int32) (\n// Sync implements vfs.FileDescriptionImpl.Sync.\nfunc (fd *specialFileFD) Sync(ctx context.Context) error {\n- return fd.sync(ctx, false /* forFilesystemSync */, nil /* accFsyncFDIDsLisa */)\n+ return fd.sync(ctx, false /* forFilesystemSync */)\n}\n-func (fd *specialFileFD) sync(ctx context.Context, forFilesystemSync bool, accFsyncFDIDsLisa *[]lisafs.FDID) error {\n+func (fd *specialFileFD) sync(ctx context.Context, forFilesystemSync bool) error {\n// Locks to ensure it didn't race with fd.Release().\nfd.releaseMu.RLock()\ndefer fd.releaseMu.RUnlock()\n@@ -413,10 +412,6 @@ func (fd *specialFileFD) sync(ctx context.Context, forFilesystemSync bool, accFs\nreturn err\n}\nif fs := fd.filesystem(); fs.opts.lisaEnabled {\n- if accFsyncFDIDsLisa != nil {\n- *accFsyncFDIDsLisa = append(*accFsyncFDIDsLisa, fd.handle.fdLisa.ID())\n- return nil\n- }\nreturn fd.handle.fdLisa.Sync(ctx)\n}\nreturn fd.handle.file.fsync(ctx)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Get rid of FSync RPC batching in gofer client with lisafs.
This optimization was not used correctly, as described in the change. We could
use it correctly to optimize sync(2) and syncfs(2). But that substantially
increases code complexity.
PiperOrigin-RevId: 433080982 |
259,853 | 07.03.2022 20:00:47 | 28,800 | 9851f1f320065ed83a72c8f4ea2b10e4cc7d1411 | tools/make_release.sh: don't overwrite global variables. | [
{
"change_type": "MODIFY",
"old_path": "tools/make_release.sh",
"new_path": "tools/make_release.sh",
"diff": "@@ -41,6 +41,7 @@ done\n# install_raw installs raw artifacts.\ninstall_raw() {\nfor binary in \"${binaries[@]}\"; do\n+ local arch name\n# Copy the raw file & generate a sha512sum, sorted by architecture.\narch=$(file \"${binary}\" | cut -d',' -f2 | awk '{print $NF}' | tr '-' '_')\nname=$(basename \"${binary}\")\n"
}
] | Go | Apache License 2.0 | google/gvisor | tools/make_release.sh: don't overwrite global variables.
PiperOrigin-RevId: 433105561 |
259,907 | 08.03.2022 10:59:00 | 28,800 | bbf4a590868eba1d343c7edab46f60f8b4c9da18 | Update lisafs to return ENOMEM on hitting max channel limit.
Also add check to ensure that at least 1 channel is created. | [
{
"change_type": "MODIFY",
"old_path": "pkg/lisafs/channel.go",
"new_path": "pkg/lisafs/channel.go",
"diff": "@@ -102,9 +102,13 @@ func (c *Connection) createChannel(maxMessageSize uint32) (*channel, flipcall.Pa\nc.channelsMu.Lock()\ndefer c.channelsMu.Unlock()\n// If c.channels is nil, the connection has closed.\n- if c.channels == nil || len(c.channels) >= maxChannels() {\n+ if c.channels == nil {\nreturn nil, flipcall.PacketWindowDescriptor{}, -1, unix.ENOSYS\n}\n+ // Return ENOMEM to indicate that the server has hit its max channels limit.\n+ if len(c.channels) >= maxChannels() {\n+ return nil, flipcall.PacketWindowDescriptor{}, -1, unix.ENOMEM\n+ }\nch := &channel{}\n// Set up data channel.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/lisafs/client.go",
"new_path": "pkg/lisafs/client.go",
"diff": "@@ -128,7 +128,11 @@ func NewClient(sock *unet.Socket) (*Client, Inode, error) {\ndefer channelsWg.Done()\nch, err := c.createChannel()\nif err != nil {\n+ if err == unix.ENOMEM {\n+ log.Debugf(\"channel creation failed because server hit max channels limit\")\n+ } else {\nlog.Warningf(\"channel creation failed: %v\", err)\n+ }\nreturn\n}\nc.channelsMu.Lock()\n@@ -139,6 +143,16 @@ func NewClient(sock *unet.Socket) (*Client, Inode, error) {\n}\nchannelsWg.Wait()\n+ // Check that atleast 1 channel is created. This is not required by lisafs\n+ // protocol. It exists to flag server side issues in channel creation.\n+ c.channelsMu.Lock()\n+ numChannels := len(c.channels)\n+ c.channelsMu.Unlock()\n+ if maxChans > 0 && numChannels == 0 {\n+ log.Warningf(\"all channel RPCs failed\")\n+ return nil, Inode{}, unix.ENOMEM\n+ }\n+\ncu.Release()\nreturn c, mountResp.Root, nil\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Update lisafs to return ENOMEM on hitting max channel limit.
Also add check to ensure that at least 1 channel is created.
PiperOrigin-RevId: 433256702 |
259,881 | 09.03.2022 10:01:36 | 28,800 | 3cf58092122ae723b6e070b6e655ad652e98f832 | Bump Go branch workflow to 1.17
This matches the version in WORKSPACE and go.mod. | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/go.yml",
"new_path": ".github/workflows/go.yml",
"diff": "@@ -45,7 +45,7 @@ jobs:\nfetch-depth: 0\n- uses: actions/setup-go@v2\nwith:\n- go-version: 1.15\n+ go-version: 1.17\n- run: tools/go_branch.sh\n- run: git checkout go && git clean -xf . && go build ./...\n- if: github.event_name == 'push'\n"
}
] | Go | Apache License 2.0 | google/gvisor | Bump Go branch workflow to 1.17
This matches the version in WORKSPACE and go.mod.
PiperOrigin-RevId: 433506784 |
259,962 | 09.03.2022 17:28:38 | 28,800 | d38b0b3efe411a6baee2dd4e46045fba0c3e0c2b | Add a bulk transfer test for sharedmem. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/sharedmem_server_test.go",
"new_path": "pkg/tcpip/link/sharedmem/sharedmem_server_test.go",
"diff": "@@ -125,7 +125,7 @@ func newClientStack(t *testing.T, qPair *sharedmem.QueuePair, peerFD int) (*stac\nif err != nil {\nreturn nil, fmt.Errorf(\"failed to create sharedmem endpoint: %s\", err)\n}\n- st, err := newStackWithOptions(stackOptions{ep: ep, addr: localIPv4Address, enablePacketLogs: true})\n+ st, err := newStackWithOptions(stackOptions{ep: ep, addr: localIPv4Address, enablePacketLogs: false})\nif err != nil {\nreturn nil, fmt.Errorf(\"failed to create client stack: %s\", err)\n}\n@@ -144,7 +144,7 @@ func newServerStack(t *testing.T, qPair *sharedmem.QueuePair, peerFD int) (*stac\nif err != nil {\nreturn nil, fmt.Errorf(\"failed to create sharedmem endpoint: %s\", err)\n}\n- st, err := newStackWithOptions(stackOptions{ep: ep, addr: remoteIPv4Address, enablePacketLogs: true})\n+ st, err := newStackWithOptions(stackOptions{ep: ep, addr: remoteIPv4Address, enablePacketLogs: false})\nif err != nil {\nreturn nil, fmt.Errorf(\"failed to create client stack: %s\", err)\n}\n@@ -195,12 +195,13 @@ func (ctx *testContext) cleanup() {\nunix.Close(ctx.peerFDs[1])\nctx.clientStk.Close()\nctx.serverStk.Close()\n+ ctx.clientStk.Wait()\n+ ctx.serverStk.Wait()\n}\n-func makeRequest(ctx *testContext) (*http.Response, error) {\n- listenAddr := tcpip.FullAddress{Addr: remoteIPv4Address, Port: serverPort}\n+func makeRequest(serverAddr tcpip.FullAddress, clientStk *stack.Stack) (*http.Response, error) {\ndialFunc := func(address, protocol string) (net.Conn, error) {\n- return gonet.DialTCP(ctx.clientStk, listenAddr, ipv4.ProtocolNumber)\n+ return gonet.DialTCP(clientStk, serverAddr, ipv4.ProtocolNumber)\n}\nhttpClient := &http.Client{\nTransport: &http.Transport{\n@@ -210,7 +211,7 @@ func makeRequest(ctx *testContext) (*http.Response, error) {\n// Close idle \"keep alive\" connections. If any connections remain open after\n// a test ends, DoLeakCheck() will erroneously detect leaked packets.\ndefer httpClient.CloseIdleConnections()\n- serverURL := fmt.Sprintf(\"http://[%s]:%d/\", net.IP(remoteIPv4Address), serverPort)\n+ serverURL := fmt.Sprintf(\"http://[%s]:%d/\", net.IP(serverAddr.Addr), serverAddr.Port)\nresponse, err := httpClient.Get(serverURL)\nreturn response, err\n}\n@@ -231,7 +232,7 @@ func TestServerRoundTrip(t *testing.T) {\n}))\n}()\n- response, err := makeRequest(ctx)\n+ response, err := makeRequest(listenAddr, ctx.clientStk)\nif err != nil {\nt.Fatalf(\"httpClient.Get(\\\"/\\\") failed: %s\", err)\n}\n@@ -267,7 +268,7 @@ func TestServerRoundTripStress(t *testing.T) {\nvar errs errgroup.Group\nfor i := 0; i < 1000; i++ {\nerrs.Go(func() error {\n- response, err := makeRequest(ctx)\n+ response, err := makeRequest(listenAddr, ctx.clientStk)\nif err != nil {\nreturn fmt.Errorf(\"httpClient.Get(\\\"/\\\") failed: %s\", err)\n}\n@@ -282,7 +283,7 @@ func TestServerRoundTripStress(t *testing.T) {\nif got, want := string(body), responseString; got != want {\nreturn fmt.Errorf(\"unexpected response got: %s, want: %s\", got, want)\n}\n- log.Infof(\"worker: %d read %d bytes\", len(body))\n+ log.Infof(\"worker: read %d bytes\", len(body))\nreturn nil\n})\n}\n@@ -291,6 +292,119 @@ func TestServerRoundTripStress(t *testing.T) {\n}\n}\n+func TestServerBulkTransfer(t *testing.T) {\n+ var payloadSizes = []int{\n+ 512 << 20, // 512 MiB\n+ 1024 << 20, // 1 GiB\n+ 2048 << 20, // 2 GiB\n+ 4096 << 20, // 4 GiB\n+ 8192 << 20, // 8 GiB\n+ }\n+\n+ for _, payloadSize := range payloadSizes {\n+ t.Run(fmt.Sprintf(\"%d bytes\", payloadSize), func(t *testing.T) {\n+ ctx := newTestContext(t)\n+ defer ctx.cleanup()\n+ listenAddr := tcpip.FullAddress{Addr: remoteIPv4Address, Port: serverPort}\n+ l, err := gonet.ListenTCP(ctx.serverStk, listenAddr, ipv4.ProtocolNumber)\n+ if err != nil {\n+ t.Fatalf(\"failed to start TCP Listener: %s\", err)\n+ }\n+ defer l.Close()\n+\n+ const chunkSize = 4 << 20 // 4 MiB\n+ var responseString = strings.Repeat(\"r\", chunkSize)\n+ go func() {\n+ http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n+ for done := 0; done < payloadSize; {\n+ n, err := w.Write([]byte(responseString))\n+ if err != nil {\n+ log.Infof(\"failed to write response : %s\", err)\n+ return\n+ }\n+ done += n\n+ }\n+ }))\n+ }()\n+\n+ response, err := makeRequest(listenAddr, ctx.clientStk)\n+ if err != nil {\n+ t.Fatalf(\"httpClient.Get(\\\"/\\\") failed: %s\", err)\n+ }\n+ if got, want := response.StatusCode, http.StatusOK; got != want {\n+ t.Fatalf(\"unexpected status code got: %d, want: %d\", got, want)\n+ }\n+ n, err := io.Copy(io.Discard, response.Body)\n+ if err != nil {\n+ t.Fatalf(\"io.Copy(io.Discard, response.Body) failed: %s\", err)\n+ }\n+ response.Body.Close()\n+ if got, want := int(n), payloadSize; got != want {\n+ t.Fatalf(\"unexpected resposne size got: %d, want: %d\", got, want)\n+ }\n+ log.Infof(\"read %d bytes\", n)\n+ })\n+ }\n+\n+}\n+\n+func TestClientBulkTransfer(t *testing.T) {\n+ var payloadSizes = []int{\n+ 512 << 20, // 512 MiB\n+ 1024 << 20, // 1 GiB\n+ 2048 << 20, // 2 GiB\n+ 4096 << 20, // 4 GiB\n+ 8192 << 20, // 8 GiB\n+ }\n+\n+ for _, payloadSize := range payloadSizes {\n+ t.Run(fmt.Sprintf(\"%d bytes\", payloadSize), func(t *testing.T) {\n+ ctx := newTestContext(t)\n+ defer ctx.cleanup()\n+ listenAddr := tcpip.FullAddress{Addr: localIPv4Address, Port: serverPort}\n+ l, err := gonet.ListenTCP(ctx.clientStk, listenAddr, ipv4.ProtocolNumber)\n+ if err != nil {\n+ t.Fatalf(\"failed to start TCP Listener: %s\", err)\n+ }\n+ defer l.Close()\n+ const chunkSize = 4 << 20 // 4 MiB\n+ var responseString = strings.Repeat(\"r\", chunkSize)\n+ go func() {\n+ http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n+ for done := 0; done < payloadSize; {\n+ n, err := w.Write([]byte(responseString))\n+ if err != nil {\n+ log.Infof(\"failed to write response : %s\", err)\n+ return\n+ }\n+ done += n\n+ }\n+ }))\n+ }()\n+\n+ response, err := makeRequest(listenAddr, ctx.serverStk)\n+ if err != nil {\n+ t.Fatalf(\"httpClient.Get(\\\"/\\\") failed: %s\", err)\n+ }\n+ if err != nil {\n+ t.Fatalf(\"httpClient.Get(\\\"/\\\") failed: %s\", err)\n+ }\n+ if got, want := response.StatusCode, http.StatusOK; got != want {\n+ t.Fatalf(\"unexpected status code got: %d, want: %d\", got, want)\n+ }\n+ n, err := io.Copy(io.Discard, response.Body)\n+ if err != nil {\n+ t.Fatalf(\"io.Copy(io.Discard, response.Body) failed: %s\", err)\n+ }\n+ response.Body.Close()\n+ if got, want := int(n), payloadSize; got != want {\n+ t.Fatalf(\"unexpected resposne size got: %d, want: %d\", got, want)\n+ }\n+ log.Infof(\"read %d bytes\", n)\n+ })\n+ }\n+}\n+\nfunc TestMain(m *testing.M) {\nrefs.SetLeakMode(refs.LeaksPanic)\ncode := m.Run()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add a bulk transfer test for sharedmem.
PiperOrigin-RevId: 433618153 |
259,962 | 09.03.2022 20:00:54 | 28,800 | 00a4ea9bfbfea6047f451e26c6e02d0254090e7e | Minor code cleanups no functionality change. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/pipe/rx.go",
"new_path": "pkg/tcpip/link/sharedmem/pipe/rx.go",
"diff": "@@ -54,7 +54,15 @@ func (r *Rx) Pull() []byte {\n// Check if this is a wrapping slot. If that's the case, it carries no\n// data, so we just skip it and try again from the first slot.\nif int64(newHead-headWrap) >= 0 {\n- if int64(newHead-headWrap) > int64(jump) || newHead&offsetMask != 0 {\n+ // If newHead passes the tail, the pipe is either damaged or the\n+ // RX view of the pipe has completely wrapped without an\n+ // intervening flush.\n+ if int64(newHead-(r.tail+jump)) > 0 {\n+ return nil\n+ }\n+ // The pipe is damaged if newHead doesn't point to the start of\n+ // the ring.\n+ if newHead&offsetMask != 0 {\nreturn nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/pipe/tx.go",
"new_path": "pkg/tcpip/link/sharedmem/pipe/tx.go",
"diff": "@@ -61,6 +61,9 @@ func (t *Tx) Push(payloadSize uint64) []byte {\nreturn nil\n}\n+ // True if TxPipe currently has a pushed message, i.e., it is not\n+ // Flush()'ed.\n+ messageAhead := t.next != t.tail\ntotalLen := payloadToSlotSize(payloadSize)\nnewNext := t.next + totalLen\nnextWrap := (t.next & revolutionMask) | uint64(len(t.p.buffer))\n@@ -69,21 +72,19 @@ func (t *Tx) Push(payloadSize uint64) []byte {\n// slot, then try to add the actual slot to the front of the\n// pipe.\nnewNext = (newNext & revolutionMask) + jump\n- wrappingPayloadSize := slotToPayloadSize(newNext - t.next)\nif !t.reclaim(newNext) {\nreturn nil\n}\n-\n+ wrappingPayloadSize := slotToPayloadSize(newNext - t.next)\noldNext := t.next\nt.next = newNext\n- if oldNext != t.tail {\n+ if messageAhead {\nt.p.write(oldNext, wrappingPayloadSize)\n} else {\nt.tailHeader = wrappingPayloadSize\nt.Flush()\n}\n-\n- newNext += totalLen\n+ return t.Push(payloadSize)\n}\n// Check that we have enough room for the buffer.\n@@ -91,7 +92,7 @@ func (t *Tx) Push(payloadSize uint64) []byte {\nreturn nil\n}\n- if t.next != t.tail {\n+ if messageAhead {\nt.p.write(t.next, payloadSize)\n} else {\nt.tailHeader = payloadSize\n"
}
] | Go | Apache License 2.0 | google/gvisor | Minor code cleanups no functionality change.
PiperOrigin-RevId: 433640207 |
259,907 | 09.03.2022 21:44:30 | 28,800 | 1fcaa119a53ada26ade9fb1405cd593204699adc | Make watchdog panic for runtime tests.
This will make deadlocks more apparent in test results. Currently runtime tests
just timeout after 30 mins if there is some deadlock. | [
{
"change_type": "MODIFY",
"old_path": "Makefile",
"new_path": "Makefile",
"diff": "@@ -231,8 +231,8 @@ packetimpact-tests:\n.PHONY: packetimpact-tests\n%-runtime-tests: load-runtimes_% $(RUNTIME_BIN)\n- @$(call install_runtime,$(RUNTIME),) # Ensure flags are cleared.\n- @$(call test_runtime,$(RUNTIME),--test_timeout=10800 //test/runtimes:$*)\n+ @$(call install_runtime,$(RUNTIME),--watchdog-action=panic)\n+ @$(call test_runtime,$(RUNTIME),--test_timeout=1800 //test/runtimes:$*)\ndo-tests: $(RUNTIME_BIN)\n@$(RUNTIME_BIN) --rootless do true\n"
}
] | Go | Apache License 2.0 | google/gvisor | Make watchdog panic for runtime tests.
This will make deadlocks more apparent in test results. Currently runtime tests
just timeout after 30 mins if there is some deadlock.
PiperOrigin-RevId: 433654750 |
260,009 | 10.03.2022 14:51:25 | 28,800 | 4503ba3f5efdf74079a9dba2242a38d51d0cf58d | Fix data race when using UNSHARE in close_range.
Also add test that fails under gotsan without the fix. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/fd_table.go",
"new_path": "pkg/sentry/kernel/fd_table.go",
"diff": "@@ -760,6 +760,9 @@ func (f *FDTable) RemoveNextInRange(ctx context.Context, startFd int32, endFd in\n// GetLastFd returns the last set FD in the FDTable bitmap.\nfunc (f *FDTable) GetLastFd() int32 {\n+ f.mu.Lock()\n+ defer f.mu.Unlock()\n+\nlast := f.fdBitmap.Maximum()\nif last > bitmap.MaxBitEntryLimit {\nreturn MaxFdLimit\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/BUILD",
"new_path": "test/syscalls/linux/BUILD",
"diff": "@@ -4542,5 +4542,6 @@ cc_binary(\n\"//test/util:temp_path\",\n\"//test/util:test_main\",\n\"//test/util:test_util\",\n+ \"//test/util:thread_util\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/close_range.cc",
"new_path": "test/syscalls/linux/close_range.cc",
"diff": "#include \"test/util/posix_error.h\"\n#include \"test/util/temp_path.h\"\n#include \"test/util/test_util.h\"\n+#include \"test/util/thread_util.h\"\nnamespace gvisor {\nnamespace testing {\n@@ -367,6 +368,37 @@ TEST_F(CloseRangeTest, InvalidFlags) {\nSyscallFailsWithErrno(EINVAL));\n}\n+// Test that calling close_range concurrently while creating new files yields\n+// expected results.\n+TEST_F(CloseRangeTest, ConcurrentCalls) {\n+ SKIP_IF(!IsRunningOnGvisor() && close_range(1, 0, 0) < 0 && errno == ENOSYS);\n+ const int num_files_in_range = 10;\n+ const unsigned int flags = CLOSE_RANGE_UNSHARE;\n+ const int num_threads = 100;\n+ std::unique_ptr<ScopedThread> threads[num_threads];\n+\n+ CreateFiles(num_files_in_range);\n+ OpenFilesRdwr();\n+\n+ auto cr_call = []() {\n+ EXPECT_THAT(close_range(num_files_in_range / 2,\n+ num_files_in_range + num_threads, flags),\n+ SyscallSucceeds());\n+ };\n+ auto open_file_call = []() {\n+ auto file = NewTempAbsPath();\n+ EXPECT_THAT(open(file.c_str(), O_CREAT, 0644), SyscallSucceeds());\n+ };\n+\n+ for (int i = 0; i < num_threads; i++) {\n+ if (i % 2 == 0) {\n+ threads[i] = std::make_unique<ScopedThread>(cr_call);\n+ } else {\n+ threads[i] = std::make_unique<ScopedThread>(open_file_call);\n+ }\n+ }\n+}\n+\n} // namespace\n} // namespace testing\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix data race when using UNSHARE in close_range.
Also add test that fails under gotsan without the fix.
PiperOrigin-RevId: 433857741 |
260,009 | 10.03.2022 17:38:39 | 28,800 | ee133dbcf204c8518d9b272d2f91977f3ee1971c | Check UDP packet size before allocation.
Reported-by: | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/udp/endpoint.go",
"new_path": "pkg/tcpip/transport/udp/endpoint.go",
"diff": "@@ -396,28 +396,32 @@ func (e *endpoint) prepareForWrite(p tcpip.Payloader, opts tcpip.WriteOptions) (\nreturn udpPacketInfo{}, err\n}\n- // TODO(https://gvisor.dev/issue/6538): Avoid this allocation.\n- v := make([]byte, p.Len())\n- if _, err := io.ReadFull(p, v); err != nil {\n- ctx.Release()\n- return udpPacketInfo{}, &tcpip.ErrBadBuffer{}\n- }\n- if len(v) > header.UDPMaximumPacketSize {\n- // Payload can't possibly fit in a packet.\n+ if p.Len() > header.UDPMaximumPacketSize {\n+ // Native linux behaviour differs for IPv4 and IPv6 packets; IPv4 packet\n+ // errors aren't report to the error queue at all.\n+ if ctx.PacketInfo().NetProto == header.IPv6ProtocolNumber {\nso := e.SocketOptions()\nif so.GetRecvError() {\nso.QueueLocalErr(\n&tcpip.ErrMessageTooLong{},\ne.net.NetProto(),\n- header.UDPMaximumPacketSize,\n+ uint32(p.Len()),\ndst,\n- v,\n+ nil,\n)\n}\n+ }\nctx.Release()\nreturn udpPacketInfo{}, &tcpip.ErrMessageTooLong{}\n}\n+ // TODO(https://gvisor.dev/issue/6538): Avoid this allocation.\n+ v := make([]byte, p.Len())\n+ if _, err := io.ReadFull(p, v); err != nil {\n+ ctx.Release()\n+ return udpPacketInfo{}, &tcpip.ErrBadBuffer{}\n+ }\n+\nreturn udpPacketInfo{\nctx: ctx,\ndata: v,\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/udp/udp_test.go",
"new_path": "pkg/tcpip/transport/udp/udp_test.go",
"diff": "@@ -423,7 +423,7 @@ func TestV4ReadBroadcastOnBoundToWildcard(t *testing.T) {\n// and verifies it fails with the provided error code.\n// TODO(https://gvisor.dev/issue/5623): Extract the test write methods in the\n// testing context.\n-func testFailingWrite(c *context.Context, flow context.TestFlow, wantErr tcpip.Error) {\n+func testFailingWrite(c *context.Context, flow context.TestFlow, payloadSize int, wantErr tcpip.Error) {\nc.T.Helper()\n// Take a snapshot of the stats to validate them at the end of the test.\nepstats := c.EP.Stats().(*tcpip.TransportEndpointStats).Clone()\n@@ -431,7 +431,7 @@ func testFailingWrite(c *context.Context, flow context.TestFlow, wantErr tcpip.E\nwriteDstAddr := flow.MapAddrIfApplicable(h.Dst.Addr)\nvar r bytes.Reader\n- r.Reset(newRandomPayload(arbitraryPayloadSize))\n+ r.Reset(newRandomPayload(payloadSize))\n_, gotErr := c.EP.Write(&r, tcpip.WriteOptions{\nTo: &tcpip.FullAddress{Addr: writeDstAddr, Port: h.Dst.Port},\n})\n@@ -590,7 +590,7 @@ func TestDualWriteConnectedToV6(t *testing.T) {\ntestWrite(c, context.UnicastV6)\n// Write to V4 mapped address.\n- testFailingWrite(c, context.UnicastV4in6, &tcpip.ErrNetworkUnreachable{})\n+ testFailingWrite(c, context.UnicastV4in6, arbitraryPayloadSize, &tcpip.ErrNetworkUnreachable{})\nconst want = 1\nif got := c.EP.Stats().(*tcpip.TransportEndpointStats).SendErrors.NoRoute.Value(); got != want {\nc.T.Fatalf(\"Endpoint stat not updated. got %d want %d\", got, want)\n@@ -611,7 +611,7 @@ func TestDualWriteConnectedToV4Mapped(t *testing.T) {\ntestWrite(c, context.UnicastV4in6)\n// Write to v6 address.\n- testFailingWrite(c, context.UnicastV6, &tcpip.ErrInvalidEndpointState{})\n+ testFailingWrite(c, context.UnicastV6, arbitraryPayloadSize, &tcpip.ErrInvalidEndpointState{})\n}\nfunc TestV4WriteOnV6Only(t *testing.T) {\n@@ -621,7 +621,7 @@ func TestV4WriteOnV6Only(t *testing.T) {\nc.CreateEndpointForFlow(context.UnicastV6Only, udp.ProtocolNumber)\n// Write to V4 mapped address.\n- testFailingWrite(c, context.UnicastV4in6, &tcpip.ErrNoRoute{})\n+ testFailingWrite(c, context.UnicastV4in6, arbitraryPayloadSize, &tcpip.ErrNoRoute{})\n}\nfunc TestV6WriteOnBoundToV4Mapped(t *testing.T) {\n@@ -636,7 +636,7 @@ func TestV6WriteOnBoundToV4Mapped(t *testing.T) {\n}\n// Write to v6 address.\n- testFailingWrite(c, context.UnicastV6, &tcpip.ErrInvalidEndpointState{})\n+ testFailingWrite(c, context.UnicastV6, arbitraryPayloadSize, &tcpip.ErrInvalidEndpointState{})\n}\nfunc TestV6WriteOnConnected(t *testing.T) {\n@@ -1772,7 +1772,7 @@ func TestShutdownWrite(t *testing.T) {\nt.Fatalf(\"Shutdown failed: %s\", err)\n}\n- testFailingWrite(c, context.UnicastV6, &tcpip.ErrClosedForSend{})\n+ testFailingWrite(c, context.UnicastV6, arbitraryPayloadSize, &tcpip.ErrClosedForSend{})\n}\nfunc TestOutgoingSubnetBroadcast(t *testing.T) {\n@@ -2067,6 +2067,22 @@ func TestChecksumWithZeroValueOnesComplementSum(t *testing.T) {\n}\n}\n+// TestWritePayloadSizeTooBig verifies that writing anything bigger than\n+// header.UDPMaximumPacketSize fails.\n+func TestWritePayloadSizeTooBig(t *testing.T) {\n+ c := context.New(t, []stack.TransportProtocolFactory{udp.NewProtocol, icmp.NewProtocol6, icmp.NewProtocol4})\n+ defer c.Cleanup()\n+\n+ c.CreateEndpoint(ipv6.ProtocolNumber, udp.ProtocolNumber)\n+\n+ if err := c.EP.Connect(tcpip.FullAddress{Addr: context.TestV6Addr, Port: context.TestPort}); err != nil {\n+ c.T.Fatalf(\"Connect failed: %s\", err)\n+ }\n+\n+ testWrite(c, context.UnicastV6)\n+ testFailingWrite(c, context.UnicastV6, header.UDPMaximumPacketSize+1, &tcpip.ErrMessageTooLong{})\n+}\n+\nfunc TestMain(m *testing.M) {\nrefs.SetLeakMode(refs.LeaksPanic)\ncode := m.Run()\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/socket_ip_udp_generic.cc",
"new_path": "test/syscalls/linux/socket_ip_udp_generic.cc",
"diff": "#include <errno.h>\n#ifdef __linux__\n+#include <linux/errqueue.h>\n#include <linux/in6.h>\n#endif // __linux__\n#include <netinet/in.h>\n@@ -541,5 +542,107 @@ TEST_P(UDPSocketPairTest, GetSocketAcceptConn) {\nEXPECT_EQ(got, 0);\n}\n+#ifdef __linux__\n+TEST_P(UDPSocketPairTest, PayloadTooBig) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ // Set IP_RECVERR socket option to enable error queueing.\n+ int v = kSockOptOn;\n+ socklen_t optlen = sizeof(v);\n+ int opt_level = SOL_IP;\n+ int opt_type = IP_RECVERR;\n+ if (sockets->first_addr()->sa_family == AF_INET6) {\n+ opt_level = SOL_IPV6;\n+ opt_type = IPV6_RECVERR;\n+ }\n+ ASSERT_THAT(setsockopt(sockets->first_fd(), opt_level, opt_type, &v, optlen),\n+ SyscallSucceeds());\n+\n+ // Buffers bigger than 0xffff should receive an error.\n+ const int kBufLen = 0x10000;\n+ char buf[kBufLen];\n+ RandomizeBuffer(buf, sizeof(buf));\n+\n+ EXPECT_THAT(send(sockets->first_fd(), buf, sizeof(buf), 0),\n+ SyscallFailsWithErrno(EMSGSIZE));\n+\n+ // Dequeue error using recvmsg(MSG_ERRQUEUE). Give a buffer big-enough for\n+ // the original message just in case.\n+ char got[kBufLen];\n+ struct iovec iov;\n+ iov.iov_base = reinterpret_cast<void*>(got);\n+ iov.iov_len = kBufLen;\n+\n+ const int addrlen_ = sockets->second_addr_size();\n+ size_t control_buf_len = CMSG_SPACE(sizeof(sock_extended_err) + addrlen_);\n+ std::vector<char> control_buf(control_buf_len);\n+ struct sockaddr_storage remote;\n+ memset(&remote, 0, sizeof(remote));\n+ struct msghdr msg = {};\n+ msg.msg_iov = &iov;\n+ msg.msg_iovlen = 1;\n+ msg.msg_flags = 0;\n+ msg.msg_control = control_buf.data();\n+ msg.msg_controllen = control_buf_len;\n+ msg.msg_name = reinterpret_cast<void*>(&remote);\n+ msg.msg_namelen = addrlen_;\n+\n+ struct sockaddr_storage addr;\n+ optlen = sizeof(addr);\n+ EXPECT_THAT(getpeername(sockets->first_fd(), AsSockAddr(&addr), &optlen),\n+ SyscallSucceeds());\n+ bool ipv6 = false;\n+ if (addr.ss_family == AF_INET6) {\n+ auto ipv6addr = reinterpret_cast<struct sockaddr_in6*>(&addr);\n+\n+ // Exclude IPv4-mapped addresses.\n+ uint8_t v4MappedPrefix[12] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff};\n+ ipv6 = memcmp(&ipv6addr->sin6_addr.s6_addr[0], v4MappedPrefix,\n+ sizeof(v4MappedPrefix)) != 0;\n+ }\n+ // Native behaviour for IPv4 packets is to not report to ERRQUEUE.\n+ if (!ipv6) {\n+ EXPECT_THAT(recvmsg(sockets->first_fd(), &msg, MSG_ERRQUEUE),\n+ SyscallFailsWithErrno(EAGAIN));\n+ return;\n+ }\n+\n+ ASSERT_THAT(recvmsg(sockets->first_fd(), &msg, MSG_ERRQUEUE),\n+ SyscallSucceedsWithValue(0));\n+\n+ EXPECT_NE(msg.msg_flags & MSG_ERRQUEUE, 0);\n+ EXPECT_EQ(memcmp(&remote, sockets->second_addr(), addrlen_), 0);\n+\n+ // Check the contents of the control message.\n+ struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);\n+ ASSERT_NE(cmsg, nullptr);\n+ EXPECT_EQ(CMSG_NXTHDR(&msg, cmsg), nullptr);\n+ EXPECT_EQ(cmsg->cmsg_level, opt_level);\n+ EXPECT_EQ(cmsg->cmsg_type, opt_type);\n+ EXPECT_EQ(cmsg->cmsg_len,\n+ sizeof(sock_extended_err) + addrlen_ + sizeof(cmsghdr));\n+\n+ // Check the contents of socket error.\n+ struct sock_extended_err* sock_err =\n+ reinterpret_cast<sock_extended_err*>(CMSG_DATA(cmsg));\n+ EXPECT_EQ(sock_err->ee_errno, EMSGSIZE);\n+ EXPECT_EQ(sock_err->ee_origin, SO_EE_ORIGIN_LOCAL);\n+ EXPECT_EQ(sock_err->ee_type, ICMP_ECHOREPLY);\n+ EXPECT_EQ(sock_err->ee_code, ICMP_NET_UNREACH);\n+ EXPECT_EQ(sock_err->ee_info, kBufLen);\n+ EXPECT_EQ(sock_err->ee_data, 0);\n+\n+ // Verify that no socket error was put on the queue.\n+ int err;\n+ optlen = sizeof(err);\n+ ASSERT_THAT(\n+ getsockopt(sockets->first_fd(), SOL_SOCKET, SO_ERROR, &err, &optlen),\n+ SyscallSucceeds());\n+ ASSERT_EQ(err, 0);\n+ ASSERT_EQ(optlen, sizeof(err));\n+}\n+#endif // __linux__\n+\n} // namespace testing\n} // namespace gvisor\n"
}
] | Go | Apache License 2.0 | google/gvisor | Check UDP packet size before allocation.
Reported-by: syzbot+66a702d22e8ec6757346@syzkaller.appspotmail.com
PiperOrigin-RevId: 433892548 |
259,975 | 14.03.2022 11:52:34 | 25,200 | 9118171bef5de95f33f0888a888009bcfb8dc7ac | [benchmarks] Set smoke test to Startup benchmark and shorten long running benchmarks.
The smoke test runs ffmpeg, which is a few orders of magnitude longer than the startup benchmark.
Adding --test.benchtime=1ns ensures long running benchmarks only run once when benchmarks are calculating
b.N. | [
{
"change_type": "MODIFY",
"old_path": ".buildkite/pipeline.yaml",
"new_path": ".buildkite/pipeline.yaml",
"diff": "@@ -264,20 +264,20 @@ steps:\n# Run basic benchmarks smoke tests (no upload).\n- <<: *common\nlabel: \":fire: Benchmarks smoke test\"\n- command: make benchmark-platforms\n+ command: make benchmark-platforms BENCHMARKS_TARGETS=test/benchmarks/base:startup_test BENCHMARKS_FILTER=BenchmarkStartupEmpty BENCHMARKS_OPTIONS=-test.benchtime=1ns\n# Use the opposite of the benchmarks filter.\nif: build.branch != \"master\"\n# Run all benchmarks.\n- <<: *benchmarks\nlabel: \":bazel: ABSL build benchmarks\"\n- command: make -i benchmark-platforms BENCHMARKS_FILTER=\"ABSL/page_cache.clean\" BENCHMARKS_SUITE=absl BENCHMARKS_TARGETS=test/benchmarks/fs:bazel_test\n+ command: make -i benchmark-platforms BENCHMARKS_FILTER=\"ABSL/page_cache.clean\" BENCHMARKS_SUITE=absl BENCHMARKS_TARGETS=test/benchmarks/fs:bazel_test BENCHMARKS_OPTIONS=-test.benchtime=1ns\n- <<: *benchmarks\nlabel: \":go: runsc build benchmarks\"\n- command: make -i benchmark-platforms BENCHMARKS_FILTER=\"Runsc/page_cache.clean/filesystem.bind\" BENCHMARKS_SUITE=runsc BENCHMARKS_TARGETS=test/benchmarks/fs:bazel_test\n+ command: make -i benchmark-platforms BENCHMARKS_FILTER=\"Runsc/page_cache.clean/filesystem.bind\" BENCHMARKS_SUITE=runsc BENCHMARKS_TARGETS=test/benchmarks/fs:bazel_test BENCHMARKS_OPTIONS=-test.benchtime=1ns\n- <<: *benchmarks\nlabel: \":metal: FFMPEG benchmarks\"\n- command: make -i benchmark-platforms BENCHMARKS_SUITE=ffmpeg BENCHMARKS_TARGETS=test/benchmarks/media:ffmpeg_test\n+ command: make -i benchmark-platforms BENCHMARKS_SUITE=ffmpeg BENCHMARKS_TARGETS=test/benchmarks/media:ffmpeg_test BENCHMARKS_OPTIONS=-test.benchtime=1ns\n# For fio, running with --test.benchtime=Xs scales the written/read\n# bytes to several GB. This is not a problem for root/bind/volume mounts,\n# but for tmpfs mounts, the size can grow to more memory than the machine\n@@ -318,4 +318,4 @@ steps:\ncommand: make -i benchmark-platforms BENCHMARKS_SUITE=sysbench BENCHMARKS_TARGETS=test/benchmarks/base:sysbench_test\n- <<: *benchmarks\nlabel: \":tensorflow: TensorFlow benchmarks\"\n- command: make -i benchmark-platforms BENCHMARKS_SUITE=tensorflow BENCHMARKS_TARGETS=test/benchmarks/ml:tensorflow_test\n+ command: make -i benchmark-platforms BENCHMARKS_SUITE=tensorflow BENCHMARKS_TARGETS=test/benchmarks/ml:tensorflow_test BENCHMARKS_OPTIONS=-test.benchtime=1ns\n"
}
] | Go | Apache License 2.0 | google/gvisor | [benchmarks] Set smoke test to Startup benchmark and shorten long running benchmarks.
The smoke test runs ffmpeg, which is a few orders of magnitude longer than the startup benchmark.
Adding --test.benchtime=1ns ensures long running benchmarks only run once when benchmarks are calculating
b.N.
PiperOrigin-RevId: 434526626 |
259,884 | 15.03.2022 12:52:14 | 25,200 | dddbb7d7a944ec7cac77b65e500f0715095810be | Redirect new issues to Github chooser menu. | [
{
"change_type": "MODIFY",
"old_path": "website/cmd/server/main.go",
"new_path": "website/cmd/server/main.go",
"diff": "@@ -34,7 +34,7 @@ var redirects = map[string]string{\n\"/change\": \"https://github.com/google/gvisor\",\n\"/issue\": \"https://github.com/google/gvisor/issues\",\n\"/issues\": \"https://github.com/google/gvisor/issues\",\n- \"/issue/new\": \"https://github.com/google/gvisor/issues/new\",\n+ \"/issue/new\": \"https://github.com/google/gvisor/issues/new/choose\",\n\"/pr\": \"https://github.com/google/gvisor/pulls\",\n// For links.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Redirect new issues to Github chooser menu.
PiperOrigin-RevId: 434831027 |
259,975 | 15.03.2022 13:19:32 | 25,200 | 334090575d9f83329d15bf4e858630de32bcc634 | [benchmarks] Fix issue with run-benchmark.
make run-benchmark should not install a new runtime. Instead, just run the
benchmark with the given RUNTIME.
Move all of the logic to install the runtime to benchmark-platforms. | [
{
"change_type": "MODIFY",
"old_path": "Makefile",
"new_path": "Makefile",
"diff": "@@ -367,14 +367,13 @@ init-benchmark-table: ## Initializes a BigQuery table with the benchmark schema.\n@$(call run,//tools/parsers:parser,init --project=$(BENCHMARKS_PROJECT) --dataset=$(BENCHMARKS_DATASET) --table=$(BENCHMARKS_TABLE))\n.PHONY: init-benchmark-table\n-# $(1) is the runtime name, $(2) are the arguments.\n+# $(1) is the runtime name.\nrun_benchmark = \\\n- ($(call header,BENCHMARK $(1) $(2)); \\\n+ ($(call header,BENCHMARK $(1)); \\\nset -euo pipefail; \\\nexport T=$$(mktemp --tmpdir logs.$(1).XXXXXX); \\\nif test \"$(1)\" = \"runc\"; then $(call sudo,$(BENCHMARKS_TARGETS),-runtime=$(1) $(BENCHMARKS_ARGS)) | tee $$T; fi; \\\n- if test \"$(1)\" != \"runc\"; then $(call install_runtime,$(1),--profile $(2)); \\\n- $(call sudo,$(BENCHMARKS_TARGETS),-runtime=$(1) $(BENCHMARKS_ARGS) $(BENCHMARKS_PROFILE)) | tee $$T; fi; \\\n+ if test \"$(1)\" != \"runc\"; then $(call sudo,$(BENCHMARKS_TARGETS),-runtime=$(1) $(BENCHMARKS_ARGS) $(BENCHMARKS_PROFILE)) | tee $$T; fi; \\\nif test \"$(BENCHMARKS_UPLOAD)\" = \"true\"; then \\\n$(call run,tools/parsers:parser,parse --debug --file=$$T --runtime=$(1) --suite_name=$(BENCHMARKS_SUITE) --project=$(BENCHMARKS_PROJECT) --dataset=$(BENCHMARKS_DATASET) --table=$(BENCHMARKS_TABLE) --official=$(BENCHMARKS_OFFICIAL)); \\\nfi; \\\n@@ -383,12 +382,13 @@ run_benchmark = \\\nbenchmark-platforms: load-benchmarks $(RUNTIME_BIN) ## Runs benchmarks for runc and all platforms.\n@set -xe; for PLATFORM in $$($(RUNTIME_BIN) help platforms); do \\\nexport PLATFORM; \\\n- $(call run_benchmark,$${PLATFORM},--platform=$${PLATFORM}); \\\n+ $(call install_runtime,$${PLATFORM},--platform $${PLATFORM}); \\\n+ $(call run_benchmark,$${PLATFORM}); \\\ndone\n@$(call run_benchmark,runc)\n.PHONY: benchmark-platforms\n-run-benchmark: load-benchmarks $(RUNTIME_BIN) ## Runs single benchmark and optionally sends data to BigQuery.\n+run-benchmark: load-benchmarks ## Runs single benchmark and optionally sends data to BigQuery.\n@$(call run_benchmark,$(RUNTIME))\n.PHONY: run-benchmark\n"
}
] | Go | Apache License 2.0 | google/gvisor | [benchmarks] Fix issue with run-benchmark.
make run-benchmark should not install a new runtime. Instead, just run the
benchmark with the given RUNTIME.
Move all of the logic to install the runtime to benchmark-platforms.
PiperOrigin-RevId: 434838884 |
259,992 | 17.03.2022 14:48:06 | 25,200 | 49896299a13934364c35563cfde04d53b6d97fa5 | Add product_name to sysfs
Some applications use product_name to find out if they are running inside
cloud providers and behave differently, like changing network settings or
expecting services to be available to the VM.
More details here:
This is also needed for: go/auth-library-noping | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/sys/sys.go",
"new_path": "pkg/sentry/fsimpl/sys/sys.go",
"diff": "@@ -34,6 +34,7 @@ import (\nconst (\n// Name is the default filesystem name.\nName = \"sysfs\"\n+ defaultSysMode = linux.FileMode(0444)\ndefaultSysDirMode = linux.FileMode(0755)\ndefaultMaxCachedDentries = uint64(1000)\n)\n@@ -43,6 +44,15 @@ const (\n// +stateify savable\ntype FilesystemType struct{}\n+// InternalData contains internal data passed in via\n+// vfs.GetFilesystemOptions.InternalData.\n+//\n+// +stateify savable\n+type InternalData struct {\n+ // ProductName is the value to be set to devices/virtual/dmi/id/product_name.\n+ ProductName string\n+}\n+\n// filesystem implements vfs.FilesystemImpl.\n//\n// +stateify savable\n@@ -96,18 +106,38 @@ func (fsType FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt\nfsDirChildren[\"cgroup\"] = fs.newDir(ctx, creds, defaultSysDirMode, nil)\n}\n- root := fs.newDir(ctx, creds, defaultSysDirMode, map[string]kernfs.Inode{\n- \"block\": fs.newDir(ctx, creds, defaultSysDirMode, nil),\n- \"bus\": fs.newDir(ctx, creds, defaultSysDirMode, nil),\n- \"class\": fs.newDir(ctx, creds, defaultSysDirMode, map[string]kernfs.Inode{\n+ classSub := map[string]kernfs.Inode{\n\"power_supply\": fs.newDir(ctx, creds, defaultSysDirMode, nil),\n- }),\n- \"dev\": fs.newDir(ctx, creds, defaultSysDirMode, nil),\n- \"devices\": fs.newDir(ctx, creds, defaultSysDirMode, map[string]kernfs.Inode{\n+ }\n+ devicesSub := map[string]kernfs.Inode{\n\"system\": fs.newDir(ctx, creds, defaultSysDirMode, map[string]kernfs.Inode{\n\"cpu\": cpuDir(ctx, fs, creds),\n}),\n+ }\n+ productName := \"\"\n+ if opts.InternalData != nil {\n+ data := opts.InternalData.(*InternalData)\n+ productName = data.ProductName\n+ }\n+ if len(productName) > 0 {\n+ log.Debugf(\"Setting product_name: %q\", productName)\n+ classSub[\"dmi\"] = fs.newDir(ctx, creds, defaultSysDirMode, map[string]kernfs.Inode{\n+ \"id\": kernfs.NewStaticSymlink(ctx, creds, linux.UNNAMED_MAJOR, fs.devMinor, fs.NextIno(), \"../../devices/virtual/dmi/id\"),\n+ })\n+ devicesSub[\"virtual\"] = fs.newDir(ctx, creds, defaultSysDirMode, map[string]kernfs.Inode{\n+ \"dmi\": fs.newDir(ctx, creds, defaultSysDirMode, map[string]kernfs.Inode{\n+ \"id\": fs.newDir(ctx, creds, defaultSysDirMode, map[string]kernfs.Inode{\n+ \"product_name\": fs.newStaticFile(ctx, creds, defaultSysMode, productName+\"\\n\"),\n+ }),\n}),\n+ })\n+ }\n+ root := fs.newDir(ctx, creds, defaultSysDirMode, map[string]kernfs.Inode{\n+ \"block\": fs.newDir(ctx, creds, defaultSysDirMode, nil),\n+ \"bus\": fs.newDir(ctx, creds, defaultSysDirMode, nil),\n+ \"class\": fs.newDir(ctx, creds, defaultSysDirMode, classSub),\n+ \"dev\": fs.newDir(ctx, creds, defaultSysDirMode, nil),\n+ \"devices\": fs.newDir(ctx, creds, defaultSysDirMode, devicesSub),\n\"firmware\": fs.newDir(ctx, creds, defaultSysDirMode, nil),\n\"fs\": fs.newDir(ctx, creds, defaultSysDirMode, fsDirChildren),\n\"kernel\": kernelDir(ctx, fs, creds),\n@@ -239,3 +269,15 @@ type implStatFS struct{}\nfunc (*implStatFS) StatFS(context.Context, *vfs.Filesystem) (linux.Statfs, error) {\nreturn vfs.GenericStatFS(linux.SYSFS_MAGIC), nil\n}\n+\n+// +stateify savable\n+type staticFile struct {\n+ kernfs.DynamicBytesFile\n+ vfs.StaticData\n+}\n+\n+func (fs *filesystem) newStaticFile(ctx context.Context, creds *auth.Credentials, mode linux.FileMode, data string) kernfs.Inode {\n+ s := &staticFile{StaticData: vfs.StaticData{Data: data}}\n+ s.Init(ctx, creds, linux.UNNAMED_MAJOR, fs.devMinor, fs.NextIno(), s, mode)\n+ return s\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/controller.go",
"new_path": "runsc/boot/controller.go",
"diff": "@@ -432,7 +432,7 @@ func (cm *containerManager) Restore(o *RestoreOpts, _ *struct{}) error {\n// Set up the restore environment.\nctx := k.SupervisorContext()\n- mntr := newContainerMounter(&cm.l.root, cm.l.k, cm.l.mountHints, kernel.VFS2Enabled)\n+ mntr := newContainerMounter(&cm.l.root, cm.l.k, cm.l.mountHints, kernel.VFS2Enabled, cm.l.productName)\nif kernel.VFS2Enabled {\nctx, err = mntr.configureRestore(ctx)\nif err != nil {\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/fs.go",
"new_path": "runsc/boot/fs.go",
"diff": "@@ -622,15 +622,20 @@ type containerMounter struct {\nk *kernel.Kernel\nhints *podMountHints\n+\n+ // productName is the value to show in\n+ // /sys/devices/virtual/dmi/id/product_name.\n+ productName string\n}\n-func newContainerMounter(info *containerInfo, k *kernel.Kernel, hints *podMountHints, vfs2Enabled bool) *containerMounter {\n+func newContainerMounter(info *containerInfo, k *kernel.Kernel, hints *podMountHints, vfs2Enabled bool, productName string) *containerMounter {\nreturn &containerMounter{\nroot: info.spec.Root,\nmounts: compileMounts(info.spec, info.conf, vfs2Enabled),\nfds: fdDispenser{fds: info.goferFDs},\nk: k,\nhints: hints,\n+ productName: productName,\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -144,6 +144,10 @@ type Loader struct {\n// mountHints provides extra information about mounts for containers that\n// apply to the entire pod.\nmountHints *podMountHints\n+\n+ // productName is the value to show in\n+ // /sys/devices/virtual/dmi/id/product_name.\n+ productName string\n}\n// execID uniquely identifies a sentry process that is executed in a container.\n@@ -219,6 +223,9 @@ type Args struct {\n// TraceFD is the file descriptor to write a Go execution trace to.\n// Valid if >=0.\nTraceFD int\n+ // ProductName is the value to show in\n+ // /sys/devices/virtual/dmi/id/product_name.\n+ ProductName string\n}\n// make sure stdioFDs are always the same on initial start and on restore\n@@ -424,6 +431,7 @@ func New(args Args) (*Loader, error) {\nmountHints: mountHints,\nroot: info,\nstopProfiling: stopProfiling,\n+ productName: args.ProductName,\n}\n// We don't care about child signals; some platforms can generate a\n@@ -769,7 +777,7 @@ func (l *Loader) createContainerProcess(root bool, cid string, info *containerIn\n}\nl.startGoferMonitor(cid, int32(info.goferFDs[0].FD()))\n- mntr := newContainerMounter(info, l.k, l.mountHints, kernel.VFS2Enabled)\n+ mntr := newContainerMounter(info, l.k, l.mountHints, kernel.VFS2Enabled, l.productName)\nif root {\nif err := mntr.processHints(info.conf, info.procArgs.Credentials); err != nil {\nreturn nil, nil, nil, err\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader_test.go",
"new_path": "runsc/boot/loader_test.go",
"diff": "@@ -447,7 +447,7 @@ func TestCreateMountNamespace(t *testing.T) {\ngoferFDs: []*fd.FD{fd.New(sandEnd)},\n}\n- mntr := newContainerMounter(&info, nil, &podMountHints{}, false /* vfs2Enabled */)\n+ mntr := newContainerMounter(&info, nil, &podMountHints{}, false /* vfs2Enabled */, \"\")\nmns, err := mntr.createMountNamespace(ctx, conf)\nif err != nil {\nt.Fatalf(\"failed to create mount namespace: %v\", err)\n@@ -487,7 +487,7 @@ func TestCreateMountNamespaceVFS2(t *testing.T) {\ndefer l.Destroy()\ndefer loaderCleanup()\n- mntr := newContainerMounter(&l.root, l.k, l.mountHints, true /* vfs2Enabled */)\n+ mntr := newContainerMounter(&l.root, l.k, l.mountHints, true /* vfs2Enabled */, \"\")\nif err := mntr.processHints(l.root.conf, l.root.procArgs.Credentials); err != nil {\nt.Fatalf(\"failed process hints: %v\", err)\n}\n@@ -716,7 +716,7 @@ func TestRestoreEnvironment(t *testing.T) {\nspec: tc.spec,\ngoferFDs: ioFDs,\n}\n- mntr := newContainerMounter(&info, nil, &podMountHints{}, conf.VFS2)\n+ mntr := newContainerMounter(&info, nil, &podMountHints{}, conf.VFS2, \"\")\nactualRenv, err := mntr.createRestoreEnvironment(conf)\nif !tc.errorExpected && err != nil {\nt.Fatalf(\"could not create restore environment for test:%s\", tc.name)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/vfs.go",
"new_path": "runsc/boot/vfs.go",
"diff": "@@ -500,12 +500,17 @@ func (c *containerMounter) getMountNameAndOptionsVFS2(conf *config.Config, m *mo\n// Find filesystem name and FS specific data field.\nswitch m.mount.Type {\n- case devpts.Name, devtmpfs.Name, proc.Name, sys.Name:\n+ case devpts.Name, devtmpfs.Name, proc.Name:\n// Nothing to do.\ncase nonefs:\nfsName = sys.Name\n+ case sys.Name:\n+ if len(c.productName) > 0 {\n+ internalData = &sys.InternalData{ProductName: c.productName}\n+ }\n+\ncase tmpfs.Name:\nvar err error\ndata, err = parseAndFilterOptions(m.mount.Options, tmpfsAllowedData...)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/boot.go",
"new_path": "runsc/cmd/boot.go",
"diff": "@@ -16,6 +16,7 @@ package cmd\nimport (\n\"context\"\n+ \"io/ioutil\"\n\"os\"\n\"runtime/debug\"\n\"strings\"\n@@ -107,6 +108,10 @@ type Boot struct {\n// terminates. This flag is set when the command execve's itself because\n// parent death signal doesn't propagate through execve when uid/gid changes.\nattached bool\n+\n+ // productName is the value to show in\n+ // /sys/devices/virtual/dmi/id/product_name.\n+ productName string\n}\n// Name implements subcommands.Command.Name.\n@@ -146,6 +151,7 @@ func (b *Boot) SetFlags(f *flag.FlagSet) {\nf.IntVar(&b.profileMutexFD, \"profile-mutex-fd\", -1, \"file descriptor to write mutex profile to. -1 disables profiling.\")\nf.IntVar(&b.traceFD, \"trace-fd\", -1, \"file descriptor to write Go execution trace to. -1 disables tracing.\")\nf.BoolVar(&b.attached, \"attached\", false, \"if attached is true, kills the sandbox process when the parent process terminates\")\n+ f.StringVar(&b.productName, \"product-name\", \"\", \"value to show in /sys/devices/virtual/dmi/id/product_name\")\n}\n// Execute implements subcommands.Command.Execute. It starts a sandbox in a\n@@ -161,6 +167,16 @@ func (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\n// Set traceback level\ndebug.SetTraceback(conf.Traceback)\n+ if len(b.productName) == 0 {\n+ // Do this before chroot takes effect, otherwise we can't read /sys.\n+ if product, err := ioutil.ReadFile(\"/sys/devices/virtual/dmi/id/product_name\"); err != nil {\n+ log.Warningf(\"Not setting product_name: %v\", err)\n+ } else {\n+ b.productName = strings.TrimSpace(string(product))\n+ log.Infof(\"Setting product_name: %q\", b.productName)\n+ }\n+ }\n+\nif b.attached {\n// Ensure this process is killed after parent process terminates when\n// attached mode is enabled. In the unfortunate event that the parent\n@@ -177,7 +193,7 @@ func (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nif !b.applyCaps && !conf.Rootless {\n// Remove --apply-caps arg to call myself. It has already been done.\n- args := prepareArgs(b.attached, \"setup-root\")\n+ args := b.prepareArgs(\"setup-root\")\n// Note that we've already read the spec from the spec FD, and\n// we will read it again after the exec call. This works\n@@ -217,7 +233,7 @@ func (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\n// Remove --apply-caps and --setup-root arg to call myself. Both have\n// already been done.\n- args := prepareArgs(b.attached, \"setup-root\", \"apply-caps\")\n+ args := b.prepareArgs(\"setup-root\", \"apply-caps\")\n// Note that we've already read the spec from the spec FD, and\n// we will read it again after the exec call. This works\n@@ -271,6 +287,7 @@ func (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nProfileHeapFD: b.profileHeapFD,\nProfileMutexFD: b.profileMutexFD,\nTraceFD: b.traceFD,\n+ ProductName: b.productName,\n}\nl, err := boot.New(bootArgs)\nif err != nil {\n@@ -308,7 +325,7 @@ func (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nreturn subcommands.ExitSuccess\n}\n-func prepareArgs(attached bool, exclude ...string) []string {\n+func (b *Boot) prepareArgs(exclude ...string) []string {\nvar args []string\nfor _, arg := range os.Args {\nfor _, excl := range exclude {\n@@ -317,11 +334,18 @@ func prepareArgs(attached bool, exclude ...string) []string {\n}\n}\nargs = append(args, arg)\n- if attached && arg == \"boot\" {\n- // Strategicaly place \"--attached\" after the command. This is needed\n- // to ensure the new process is killed when the parent process terminates.\n+ // Strategically add parameters after the command and before the container\n+ // ID at the end.\n+ if arg == \"boot\" {\n+ if b.attached {\n+ // This is needed to ensure the new process is killed when the parent\n+ // process terminates.\nargs = append(args, \"--attached\")\n}\n+ if len(b.productName) > 0 {\n+ args = append(args, \"--product-name\", b.productName)\n+ }\n+ }\nskip:\n}\nreturn args\n"
},
{
"change_type": "MODIFY",
"old_path": "test/e2e/integration_test.go",
"new_path": "test/e2e/integration_test.go",
"diff": "@@ -798,3 +798,23 @@ func TestDeleteInterface(t *testing.T) {\nt.Fatalf(\"loopback interface is removed\")\n}\n}\n+\n+func TestProductName(t *testing.T) {\n+ want, err := ioutil.ReadFile(\"/sys/devices/virtual/dmi/id/product_name\")\n+ if err != nil {\n+ t.Fatal(err)\n+ }\n+\n+ ctx := context.Background()\n+ d := dockerutil.MakeContainer(ctx, t)\n+ defer d.CleanUp(ctx)\n+\n+ opts := dockerutil.RunOpts{Image: \"basic/alpine\"}\n+ got, err := d.Run(ctx, opts, \"cat\", \"/sys/devices/virtual/dmi/id/product_name\")\n+ if err != nil {\n+ t.Fatalf(\"docker run failed: %v\", err)\n+ }\n+ if string(want) != got {\n+ t.Errorf(\"invalid product name, want: %q, got: %q\", want, got)\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add product_name to sysfs
Some applications use product_name to find out if they are running inside
cloud providers and behave differently, like changing network settings or
expecting services to be available to the VM.
More details here: https://docs.google.com/document/d/1xQXE27x9wTvwPsgiX9Hn0o8mcq5z3SKi-1jwscQsCAk
This is also needed for: go/auth-library-noping
PiperOrigin-RevId: 435454155 |
259,977 | 17.03.2022 16:47:32 | 25,200 | 438c84460d6ed2dc085c407b05f15eed9e97d608 | Poll for POLLERR before validating ICMP error
...since ICMP errors are returned asynchronously. This also matches the
behavior of existing tests. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/udp_socket.cc",
"new_path": "test/syscalls/linux/udp_socket.cc",
"diff": "@@ -382,6 +382,11 @@ TEST_P(UdpSocketTest, ConnectWriteToInvalidPort) {\nASSERT_THAT(sendto(sock_.get(), buf, sizeof(buf), 0, addr, addrlen_),\nSyscallSucceedsWithValue(sizeof(buf)));\n+ // Poll to make sure we get the ICMP error back.\n+ constexpr int kTimeout = 1000;\n+ struct pollfd pfd = {sock_.get(), POLLERR, 0};\n+ ASSERT_THAT(RetryEINTR(poll)(&pfd, 1, kTimeout), SyscallSucceedsWithValue(1));\n+\n// Now verify that we got an ICMP error back of ECONNREFUSED.\nint err;\nsocklen_t optlen = sizeof(err);\n"
}
] | Go | Apache License 2.0 | google/gvisor | Poll for POLLERR before validating ICMP error
...since ICMP errors are returned asynchronously. This also matches the
behavior of existing tests.
PiperOrigin-RevId: 435484650 |
259,907 | 18.03.2022 00:03:55 | 25,200 | aaf0574af7b4ca4e1974c1e5a8a95e0a025b563b | Set DEBIAN_FRONTEND to be noninteractive in make_apt.sh.
This would prevent timeout issues where apt-get gets stuck on some prompt. | [
{
"change_type": "MODIFY",
"old_path": "tools/make_apt.sh",
"new_path": "tools/make_apt.sh",
"diff": "@@ -30,6 +30,7 @@ readonly root\nshift; shift; shift # For \"$@\" below.\n# Ensure that we have the correct packages installed.\n+export DEBIAN_FRONTEND=noninteractive\nfunction apt_install() {\nwhile true; do\nsudo apt-get update &&\n"
}
] | Go | Apache License 2.0 | google/gvisor | Set DEBIAN_FRONTEND to be noninteractive in make_apt.sh.
This would prevent timeout issues where apt-get gets stuck on some prompt.
PiperOrigin-RevId: 435558241 |
259,907 | 18.03.2022 00:15:32 | 25,200 | cf0bdacdd5228f920023f713715589df90b03846 | Revive VFS1-only failing runtime tests. | [
{
"change_type": "MODIFY",
"old_path": "test/runtimes/exclude/nodejs12.4.0.csv",
"new_path": "test/runtimes/exclude/nodejs12.4.0.csv",
"diff": "@@ -11,10 +11,6 @@ parallel/test-dgram-bind-fd.js,b/132447356,\nparallel/test-dgram-socket-buffer-size.js,b/68847921,\nparallel/test-dns-channel-timeout.js,b/161893056,\nparallel/test-fs-access.js,,Broken test\n-parallel/test-fs-watchfile.js,b/166819807,Flaky - VFS1 only\n-parallel/test-fs-write-stream.js,b/166819807,Flaky - VFS1 only\n-parallel/test-fs-write-stream-double-close.js,b/166819807,Flaky - VFS1 only\n-parallel/test-fs-write-stream-throw-type-error.js,b/166819807,Flaky - VFS1 only\nparallel/test-http-writable-true-after-close.js,b/171301436,Flaky - Mismatched <anonymous> function calls. Expected exactly 1 actual 2\nparallel/test-os.js,b/63997097,\nparallel/test-process-uid-gid.js,,Does not work inside Docker with gid nobody\n@@ -42,5 +38,5 @@ pseudo-tty/test-tty-window-size.js,b/162801321,\npseudo-tty/test-tty-wrap.js,b/162801321,\npummel/test-net-pingpong.js,,Broken test\npummel/test-vm-memleak.js,b/162799436,\n-pummel/test-watch-file.js,,Flaky - VFS1 only\n+pummel/test-watch-file.js,,Flaky test which was fixed in https://github.com/nodejs/node/commit/b0b52b2023f5cd0df4ae921850815586b4313dca. Unexclude when bumping nodejs version.\ntick-processor/test-tick-processor-builtin.js,,Broken test\n"
},
{
"change_type": "MODIFY",
"old_path": "test/runtimes/exclude/php7.3.6.csv",
"new_path": "test/runtimes/exclude/php7.3.6.csv",
"diff": "@@ -27,10 +27,8 @@ ext/standard/tests/file/php_fd_wrapper_01.phpt,,\next/standard/tests/file/php_fd_wrapper_02.phpt,,\next/standard/tests/file/php_fd_wrapper_03.phpt,,\next/standard/tests/file/php_fd_wrapper_04.phpt,,\n-ext/standard/tests/file/realpath_bug77484.phpt,b/162894969,VFS1 only failure\next/standard/tests/file/rename_variation.phpt,b/68717309,\next/standard/tests/file/symlink_link_linkinfo_is_link_variation4.phpt,b/162895341,\n-ext/standard/tests/file/symlink_link_linkinfo_is_link_variation8.phpt,b/162896223,VFS1 only failure\next/standard/tests/general_functions/escapeshellarg_bug71270.phpt,,\next/standard/tests/general_functions/escapeshellcmd_bug71270.phpt,,\next/standard/tests/streams/proc_open_bug60120.phpt,,Flaky until php-src 3852a35fdbcb\n"
}
] | Go | Apache License 2.0 | google/gvisor | Revive VFS1-only failing runtime tests.
PiperOrigin-RevId: 435560141 |
259,985 | 18.03.2022 01:15:27 | 25,200 | 24cef0a622bb99fdfafe7898679a95a2a974b499 | cgroupfs: Stub control files to allow subcontainer creation with gcontain. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/cgroupfs/cgroupfs.go",
"new_path": "pkg/sentry/fsimpl/cgroupfs/cgroupfs.go",
"diff": "package cgroupfs\nimport (\n+ \"bytes\"\n\"fmt\"\n\"sort\"\n\"strconv\"\n\"strings\"\n+ \"sync/atomic\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/context\"\n@@ -71,6 +73,7 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n\"gvisor.dev/gvisor/pkg/sentry/vfs\"\n\"gvisor.dev/gvisor/pkg/sync\"\n+ \"gvisor.dev/gvisor/pkg/usermem\"\n)\nconst (\n@@ -546,6 +549,11 @@ type controllerFile struct {\nkernfs.DynamicBytesFile\n}\n+// SetStat implements kernfs.Inode.SetStat.\n+func (f *controllerFile) SetStat(ctx context.Context, fs *vfs.Filesystem, creds *auth.Credentials, opts vfs.SetStatOptions) error {\n+ return f.InodeAttrs.SetStat(ctx, fs, creds, opts)\n+}\n+\nfunc (fs *filesystem) newControllerFile(ctx context.Context, creds *auth.Credentials, data vfs.DynamicBytesSource) kernfs.Inode {\nf := &controllerFile{}\nf.Init(ctx, creds, linux.UNNAMED_MAJOR, fs.devMinor, fs.NextIno(), data, readonlyFileMode)\n@@ -568,6 +576,11 @@ type staticControllerFile struct {\nvfs.StaticData\n}\n+// SetStat implements kernfs.Inode.SetStat.\n+func (f *staticControllerFile) SetStat(ctx context.Context, fs *vfs.Filesystem, creds *auth.Credentials, opts vfs.SetStatOptions) error {\n+ return f.InodeAttrs.SetStat(ctx, fs, creds, opts)\n+}\n+\n// Note: We let the caller provide the mode so that static files may be used to\n// fake both readable and writable control files. However, static files are\n// effectively readonly, as attempting to write to them will return EIO\n@@ -577,3 +590,40 @@ func (fs *filesystem) newStaticControllerFile(ctx context.Context, creds *auth.C\nf.Init(ctx, creds, linux.UNNAMED_MAJOR, fs.devMinor, fs.NextIno(), f, mode)\nreturn f\n}\n+\n+// stubControllerFile is a writable control file that remembers the control\n+// value written to it.\n+//\n+// +stateify savable\n+type stubControllerFile struct {\n+ controllerFile\n+\n+ // data is accessed through atomic ops.\n+ data *int64\n+}\n+\n+// Generate implements vfs.DynamicBytesSource.Generate.\n+func (f *stubControllerFile) Generate(ctx context.Context, buf *bytes.Buffer) error {\n+ fmt.Fprintf(buf, \"%d\\n\", atomic.LoadInt64(f.data))\n+ return nil\n+}\n+\n+// Write implements vfs.WritableDynamicBytesSource.Write.\n+func (f *stubControllerFile) Write(ctx context.Context, _ *vfs.FileDescription, src usermem.IOSequence, offset int64) (int64, error) {\n+ val, n, err := parseInt64FromString(ctx, src)\n+ if err != nil {\n+ return 0, err\n+ }\n+ atomic.StoreInt64(f.data, val)\n+ return n, nil\n+}\n+\n+// newStubControllerFile creates a new stub controller file tbat loads and\n+// stores a control value from data.\n+func (fs *filesystem) newStubControllerFile(ctx context.Context, creds *auth.Credentials, data *int64) kernfs.Inode {\n+ f := &stubControllerFile{\n+ data: data,\n+ }\n+ f.Init(ctx, creds, linux.UNNAMED_MAJOR, fs.devMinor, fs.NextIno(), f, writableFileMode)\n+ return f\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/cgroupfs/cpu.go",
"new_path": "pkg/sentry/fsimpl/cgroupfs/cpu.go",
"diff": "package cgroupfs\nimport (\n- \"fmt\"\n-\n- \"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n@@ -76,7 +73,7 @@ func (c *cpuController) Clone() controller {\n// AddControlFiles implements controller.AddControlFiles.\nfunc (c *cpuController) AddControlFiles(ctx context.Context, creds *auth.Credentials, _ *cgroupInode, contents map[string]kernfs.Inode) {\n- contents[\"cpu.cfs_period_us\"] = c.fs.newStaticControllerFile(ctx, creds, linux.FileMode(0644), fmt.Sprintf(\"%d\\n\", c.cfsPeriod))\n- contents[\"cpu.cfs_quota_us\"] = c.fs.newStaticControllerFile(ctx, creds, linux.FileMode(0644), fmt.Sprintf(\"%d\\n\", c.cfsQuota))\n- contents[\"cpu.shares\"] = c.fs.newStaticControllerFile(ctx, creds, linux.FileMode(0644), fmt.Sprintf(\"%d\\n\", c.shares))\n+ contents[\"cpu.cfs_period_us\"] = c.fs.newStubControllerFile(ctx, creds, &c.cfsPeriod)\n+ contents[\"cpu.cfs_quota_us\"] = c.fs.newStubControllerFile(ctx, creds, &c.cfsQuota)\n+ contents[\"cpu.shares\"] = c.fs.newStubControllerFile(ctx, creds, &c.shares)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/cgroupfs/job.go",
"new_path": "pkg/sentry/fsimpl/cgroupfs/job.go",
"diff": "package cgroupfs\nimport (\n- \"bytes\"\n- \"fmt\"\n-\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n- \"gvisor.dev/gvisor/pkg/sentry/vfs\"\n- \"gvisor.dev/gvisor/pkg/usermem\"\n)\n// +stateify savable\n@@ -51,26 +46,5 @@ func (c *jobController) Clone() controller {\n}\nfunc (c *jobController) AddControlFiles(ctx context.Context, creds *auth.Credentials, _ *cgroupInode, contents map[string]kernfs.Inode) {\n- contents[\"job.id\"] = c.fs.newControllerWritableFile(ctx, creds, &jobIDData{c: c})\n-}\n-\n-// +stateify savable\n-type jobIDData struct {\n- c *jobController\n-}\n-\n-// Generate implements vfs.DynamicBytesSource.Generate.\n-func (d *jobIDData) Generate(ctx context.Context, buf *bytes.Buffer) error {\n- fmt.Fprintf(buf, \"%d\\n\", d.c.id)\n- return nil\n-}\n-\n-// Write implements vfs.WritableDynamicBytesSource.Write.\n-func (d *jobIDData) Write(ctx context.Context, _ *vfs.FileDescription, src usermem.IOSequence, offset int64) (int64, error) {\n- val, n, err := parseInt64FromString(ctx, src)\n- if err != nil {\n- return n, err\n- }\n- d.c.id = val\n- return n, nil\n+ contents[\"job.id\"] = c.fs.newStubControllerFile(ctx, creds, &c.id)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/cgroupfs/memory.go",
"new_path": "pkg/sentry/fsimpl/cgroupfs/memory.go",
"diff": "@@ -35,6 +35,7 @@ type memoryController struct {\nlimitBytes int64\nsoftLimitBytes int64\nmoveChargeAtImmigrate int64\n+ pressureLevel int64\n}\nvar _ controller = (*memoryController)(nil)\n@@ -78,9 +79,10 @@ func (c *memoryController) Clone() controller {\n// AddControlFiles implements controller.AddControlFiles.\nfunc (c *memoryController) AddControlFiles(ctx context.Context, creds *auth.Credentials, _ *cgroupInode, contents map[string]kernfs.Inode) {\ncontents[\"memory.usage_in_bytes\"] = c.fs.newControllerFile(ctx, creds, &memoryUsageInBytesData{})\n- contents[\"memory.limit_in_bytes\"] = c.fs.newStaticControllerFile(ctx, creds, linux.FileMode(0644), fmt.Sprintf(\"%d\\n\", c.limitBytes))\n- contents[\"memory.soft_limit_in_bytes\"] = c.fs.newStaticControllerFile(ctx, creds, linux.FileMode(0644), fmt.Sprintf(\"%d\\n\", c.softLimitBytes))\n- contents[\"memory.move_charge_at_immigrate\"] = c.fs.newStaticControllerFile(ctx, creds, linux.FileMode(0644), fmt.Sprintf(\"%d\\n\", c.moveChargeAtImmigrate))\n+ contents[\"memory.limit_in_bytes\"] = c.fs.newStubControllerFile(ctx, creds, &c.limitBytes)\n+ contents[\"memory.soft_limit_in_bytes\"] = c.fs.newStubControllerFile(ctx, creds, &c.softLimitBytes)\n+ contents[\"memory.move_charge_at_immigrate\"] = c.fs.newStubControllerFile(ctx, creds, &c.moveChargeAtImmigrate)\n+ contents[\"memory.pressure_level\"] = c.fs.newStaticControllerFile(ctx, creds, linux.FileMode(0644), fmt.Sprintf(\"%d\\n\", c.pressureLevel))\n}\n// +stateify savable\n"
}
] | Go | Apache License 2.0 | google/gvisor | cgroupfs: Stub control files to allow subcontainer creation with gcontain.
PiperOrigin-RevId: 435569386 |
259,864 | 18.03.2022 09:15:08 | -3,600 | 49c97d8b25addb82150a566545720299ec991a9d | Configuration needs to use `options` not `option` | [
{
"change_type": "MODIFY",
"old_path": "g3doc/user_guide/containerd/configuration.md",
"new_path": "g3doc/user_guide/containerd/configuration.md",
"diff": "@@ -68,7 +68,7 @@ version = 2\nruntime_type = \"io.containerd.runc.v2\"\n[plugins.\"io.containerd.grpc.v1.cri\".containerd.runtimes.runsc]\nruntime_type = \"io.containerd.runsc.v1\"\n-[plugins.\"io.containerd.grpc.v1.cri\".containerd.runtimes.runsc.option]\n+[plugins.\"io.containerd.grpc.v1.cri\".containerd.runtimes.runsc.options]\nTypeUrl = \"io.containerd.runsc.v1.options\"\nConfigPath = \"/etc/containerd/runsc.toml\"\nEOF\n"
}
] | Go | Apache License 2.0 | google/gvisor | Configuration needs to use `options` not `option` |
259,992 | 22.03.2022 13:50:29 | 25,200 | f7712ab50c593baf4f372d163a7b553081c1857e | Revalidate symlink chain
When there are more than one symlink pointing to another symlink
the revalidation step was being skipped because steoLocked() would
process all symlinks before returning.
Closes | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/filesystem.go",
"new_path": "pkg/sentry/fsimpl/gofer/filesystem.go",
"diff": "@@ -191,7 +191,9 @@ func (fs *filesystem) renameMuUnlockAndCheckCaching(ctx context.Context, ds **[]\n}\n// stepLocked resolves rp.Component() to an existing file, starting from the\n-// given directory.\n+// given directory. If the file at rp.Component is a symlink and\n+// mayFollowSymlinks is set, the symlink is resolved and the result returned to\n+// the caller (single step).\n//\n// Dentries which may become cached as a result of the traversal are appended\n// to *ds.\n@@ -202,8 +204,6 @@ func (fs *filesystem) renameMuUnlockAndCheckCaching(ctx context.Context, ds **[]\n// * !rp.Done().\n// * If !d.cachedMetadataAuthoritative(), then d and all children that are\n// part of rp must have been revalidated.\n-//\n-// Postconditions: The returned dentry's cached metadata is up to date.\nfunc (fs *filesystem) stepLocked(ctx context.Context, rp *vfs.ResolvingPath, d *dentry, mayFollowSymlinks bool, ds **[]*dentry) (*dentry, bool, error) {\nif !d.isDir() {\nreturn nil, false, linuxerr.ENOTDIR\n@@ -211,25 +211,23 @@ func (fs *filesystem) stepLocked(ctx context.Context, rp *vfs.ResolvingPath, d *\nif err := d.checkPermissions(rp.Credentials(), vfs.MayExec); err != nil {\nreturn nil, false, err\n}\n- followedSymlink := false\n-afterSymlink:\nname := rp.Component()\nif name == \".\" {\nrp.Advance()\n- return d, followedSymlink, nil\n+ return d, false, nil\n}\nif name == \"..\" {\nif isRoot, err := rp.CheckRoot(ctx, &d.vfsd); err != nil {\nreturn nil, false, err\n} else if isRoot || d.parent == nil {\nrp.Advance()\n- return d, followedSymlink, nil\n+ return d, false, nil\n}\nif err := rp.CheckMount(ctx, &d.parent.vfsd); err != nil {\nreturn nil, false, err\n}\nrp.Advance()\n- return d.parent, followedSymlink, nil\n+ return d.parent, false, nil\n}\nvar child *dentry\nvar err error\n@@ -252,11 +250,10 @@ afterSymlink:\nif err := rp.HandleSymlink(target); err != nil {\nreturn nil, false, err\n}\n- followedSymlink = true\n- goto afterSymlink // don't check the current directory again\n+ return d, true, nil\n}\nrp.Advance()\n- return child, followedSymlink, nil\n+ return child, false, nil\n}\n// Preconditions:\n@@ -933,7 +930,7 @@ func (fs *filesystem) MknodAt(ctx context.Context, rp *vfs.ResolvingPath, opts v\n// to creating a synthetic one, i.e. one that is kept entirely in memory.\n// Check that we're not overriding an existing file with a synthetic one.\n- _, _, err = fs.stepLocked(ctx, rp, parent, true, ds)\n+ _, _, err = fs.stepLocked(ctx, rp, parent, false, ds)\nswitch {\ncase err == nil:\n// Step succeeded, another file exists.\n"
},
{
"change_type": "MODIFY",
"old_path": "test/e2e/integration_test.go",
"new_path": "test/e2e/integration_test.go",
"diff": "@@ -818,3 +818,96 @@ func TestProductName(t *testing.T) {\nt.Errorf(\"invalid product name, want: %q, got: %q\", want, got)\n}\n}\n+\n+// TestRevalidateSymlinkChain tests that when a symlink in the middle of chain\n+// gets updated externally, the change is noticed and the internal cache is\n+// updated accordingly.\n+func TestRevalidateSymlinkChain(t *testing.T) {\n+ ctx := context.Background()\n+ d := dockerutil.MakeContainer(ctx, t)\n+ defer d.CleanUp(ctx)\n+\n+ // Create the following structure:\n+ // dir\n+ // + gen1\n+ // | + file [content: 123]\n+ // |\n+ // + gen2\n+ // | + file [content: 456]\n+ // |\n+ // + file -> sym1/file\n+ // + sym1 -> sym2\n+ // + sym2 -> gen1\n+ //\n+ dir, err := ioutil.TempDir(testutil.TmpDir(), \"sub-mount\")\n+ if err != nil {\n+ t.Fatalf(\"TempDir(): %v\", err)\n+ }\n+ if err := os.Mkdir(filepath.Join(dir, \"gen1\"), 0777); err != nil {\n+ t.Fatal(err)\n+ }\n+ if err := os.Mkdir(filepath.Join(dir, \"gen2\"), 0777); err != nil {\n+ t.Fatal(err)\n+ }\n+ if err := os.WriteFile(filepath.Join(dir, \"gen1\", \"file\"), []byte(\"123\"), 0666); err != nil {\n+ t.Fatal(err)\n+ }\n+ if err := os.WriteFile(filepath.Join(dir, \"gen2\", \"file\"), []byte(\"456\"), 0666); err != nil {\n+ t.Fatal(err)\n+ }\n+ if err := os.Symlink(\"sym1/file\", filepath.Join(dir, \"file\")); err != nil {\n+ t.Fatal(err)\n+ }\n+ if err := os.Symlink(\"sym2\", filepath.Join(dir, \"sym1\")); err != nil {\n+ t.Fatal(err)\n+ }\n+ if err := os.Symlink(\"gen1\", filepath.Join(dir, \"sym2\")); err != nil {\n+ t.Fatal(err)\n+ }\n+\n+ // Mount dir inside the container so that external changes are propagated to\n+ // the container.\n+ opts := dockerutil.RunOpts{\n+ Image: \"basic/alpine\",\n+ Privileged: true, // Required for umount\n+ Mounts: []mount.Mount{\n+ {\n+ Type: mount.TypeBind,\n+ Source: dir,\n+ Target: \"/foo\",\n+ },\n+ },\n+ }\n+ if err := d.Create(ctx, opts, \"sleep\", \"1000\"); err != nil {\n+ t.Fatalf(\"docker run failed: %v\", err)\n+ }\n+ if err := d.Start(ctx); err != nil {\n+ t.Fatalf(\"docker run failed: %v\", err)\n+ }\n+\n+ // Read and cache symlinks pointing to gen1/file.\n+ got, err := d.Exec(ctx, dockerutil.ExecOpts{}, \"cat\", \"/foo/file\")\n+ if err != nil {\n+ t.Fatalf(\"docker run failed: %v\", err)\n+ }\n+ if want := \"123\"; got != want {\n+ t.Fatalf(\"Read wrong file, want: %q, got: %q\", want, got)\n+ }\n+\n+ // Change the symlink to point to gen2 file.\n+ if err := os.Remove(filepath.Join(dir, \"sym2\")); err != nil {\n+ t.Fatal(err)\n+ }\n+ if err := os.Symlink(\"gen2\", filepath.Join(dir, \"sym2\")); err != nil {\n+ t.Fatal(err)\n+ }\n+\n+ // Read symlink chain again and check that it got updated to gen2/file.\n+ got, err = d.Exec(ctx, dockerutil.ExecOpts{}, \"cat\", \"/foo/file\")\n+ if err != nil {\n+ t.Fatalf(\"docker run failed: %v\", err)\n+ }\n+ if want := \"456\"; got != want {\n+ t.Fatalf(\"Read wrong file, want: %q, got: %q\", want, got)\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Revalidate symlink chain
When there are more than one symlink pointing to another symlink
the revalidation step was being skipped because steoLocked() would
process all symlinks before returning.
Closes #7297
PiperOrigin-RevId: 436562314 |
259,992 | 22.03.2022 16:42:06 | 25,200 | e5c86191d1529ae4b15745b944b45311d1aaec9a | Group FD flags in cmd.Boot/Gofer | [
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/boot.go",
"new_path": "runsc/cmd/boot.go",
"diff": "@@ -132,16 +132,20 @@ func (*Boot) Usage() string {\n// SetFlags implements subcommands.Command.SetFlags.\nfunc (b *Boot) SetFlags(f *flag.FlagSet) {\nf.StringVar(&b.bundleDir, \"bundle\", \"\", \"required path to the root of the bundle directory\")\n- f.IntVar(&b.specFD, \"spec-fd\", -1, \"required fd with the container spec\")\n- f.IntVar(&b.controllerFD, \"controller-fd\", -1, \"required FD of a stream socket for the control server that must be donated to this process\")\n- f.IntVar(&b.deviceFD, \"device-fd\", -1, \"FD for the platform device file\")\n- f.Var(&b.ioFDs, \"io-fds\", \"list of FDs to connect 9P clients. They must follow this order: root first, then mounts as defined in the spec\")\n- f.Var(&b.stdioFDs, \"stdio-fds\", \"list of FDs containing sandbox stdin, stdout, and stderr in that order\")\nf.BoolVar(&b.applyCaps, \"apply-caps\", false, \"if true, apply capabilities defined in the spec to the process\")\nf.BoolVar(&b.setUpRoot, \"setup-root\", false, \"if true, set up an empty root for the process\")\nf.BoolVar(&b.pidns, \"pidns\", false, \"if true, the sandbox is in its own PID namespace\")\nf.IntVar(&b.cpuNum, \"cpu-num\", 0, \"number of CPUs to create inside the sandbox\")\nf.Uint64Var(&b.totalMem, \"total-memory\", 0, \"sets the initial amount of total memory to report back to the container\")\n+ f.BoolVar(&b.attached, \"attached\", false, \"if attached is true, kills the sandbox process when the parent process terminates\")\n+ f.StringVar(&b.productName, \"product-name\", \"\", \"value to show in /sys/devices/virtual/dmi/id/product_name\")\n+\n+ // Open FDs that are donated to the sandbox.\n+ f.IntVar(&b.specFD, \"spec-fd\", -1, \"required fd with the container spec\")\n+ f.IntVar(&b.controllerFD, \"controller-fd\", -1, \"required FD of a stream socket for the control server that must be donated to this process\")\n+ f.IntVar(&b.deviceFD, \"device-fd\", -1, \"FD for the platform device file\")\n+ f.Var(&b.ioFDs, \"io-fds\", \"list of FDs to connect 9P clients. They must follow this order: root first, then mounts as defined in the spec\")\n+ f.Var(&b.stdioFDs, \"stdio-fds\", \"list of FDs containing sandbox stdin, stdout, and stderr in that order\")\nf.IntVar(&b.userLogFD, \"user-log-fd\", 0, \"file descriptor to write user logs to. 0 means no logging.\")\nf.IntVar(&b.startSyncFD, \"start-sync-fd\", -1, \"required FD to used to synchronize sandbox startup\")\nf.IntVar(&b.mountsFD, \"mounts-fd\", -1, \"mountsFD is the file descriptor to read list of mounts after they have been resolved (direct paths, no symlinks).\")\n@@ -150,8 +154,6 @@ func (b *Boot) SetFlags(f *flag.FlagSet) {\nf.IntVar(&b.profileHeapFD, \"profile-heap-fd\", -1, \"file descriptor to write heap profile to. -1 disables profiling.\")\nf.IntVar(&b.profileMutexFD, \"profile-mutex-fd\", -1, \"file descriptor to write mutex profile to. -1 disables profiling.\")\nf.IntVar(&b.traceFD, \"trace-fd\", -1, \"file descriptor to write Go execution trace to. -1 disables tracing.\")\n- f.BoolVar(&b.attached, \"attached\", false, \"if attached is true, kills the sandbox process when the parent process terminates\")\n- f.StringVar(&b.productName, \"product-name\", \"\", \"value to show in /sys/devices/virtual/dmi/id/product_name\")\n}\n// Execute implements subcommands.Command.Execute. It starts a sandbox in a\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/cmd.go",
"new_path": "runsc/cmd/cmd.go",
"diff": "@@ -50,8 +50,8 @@ func (i *intFlags) Set(s string) error {\nif err != nil {\nreturn fmt.Errorf(\"invalid flag value: %v\", err)\n}\n- if fd < 0 {\n- return fmt.Errorf(\"flag value must be greater than 0: %d\", fd)\n+ if fd < -1 {\n+ return fmt.Errorf(\"flag value must be >= -1: %d\", fd)\n}\n*i = append(*i, fd)\nreturn nil\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/gofer.go",
"new_path": "runsc/cmd/gofer.go",
"diff": "@@ -83,9 +83,11 @@ func (*Gofer) Usage() string {\n// SetFlags implements subcommands.Command.\nfunc (g *Gofer) SetFlags(f *flag.FlagSet) {\nf.StringVar(&g.bundleDir, \"bundle\", \"\", \"path to the root of the bundle directory, defaults to the current directory\")\n- f.Var(&g.ioFDs, \"io-fds\", \"list of FDs to connect gofer servers. They must follow this order: root first, then mounts as defined in the spec\")\nf.BoolVar(&g.applyCaps, \"apply-caps\", true, \"if true, apply capabilities to restrict what the Gofer process can do\")\nf.BoolVar(&g.setUpRoot, \"setup-root\", true, \"if true, set up an empty root for the process\")\n+\n+ // Open FDs that are donated to the gofer.\n+ f.Var(&g.ioFDs, \"io-fds\", \"list of FDs to connect gofer servers. They must follow this order: root first, then mounts as defined in the spec\")\nf.IntVar(&g.specFD, \"spec-fd\", -1, \"required fd with the container spec\")\nf.IntVar(&g.mountsFD, \"mounts-fd\", -1, \"mountsFD is the file descriptor to write list of mounts after they have been resolved (direct paths, no symlinks).\")\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Group FD flags in cmd.Boot/Gofer
PiperOrigin-RevId: 436602364 |
259,858 | 22.03.2022 21:30:25 | 25,200 | 1e5014d65723a8dc535f985525cceddf02088d11 | Don't mark all Copy* functions as go:nosplit.
This relaxes the checkescape annotation in the test slightly, but allowing
stack splits should not cause issues.
Fixes | [
{
"change_type": "MODIFY",
"old_path": "tools/go_marshal/gomarshal/generator_interfaces_array_newtype.go",
"new_path": "tools/go_marshal/gomarshal/generator_interfaces_array_newtype.go",
"diff": "@@ -107,7 +107,6 @@ func (g *interfaceGenerator) emitMarshallableForArrayNewtype(n *ast.Ident, a *as\ng.emit(\"}\\n\\n\")\ng.emit(\"// CopyOutN implements marshal.Marshallable.CopyOutN.\\n\")\n- g.emit(\"//go:nosplit\\n\")\ng.emit(\"func (%s *%s) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {\\n\", g.r, g.typeName())\ng.inIndent(func() {\ng.emitCastToByteSlice(g.r, \"buf\", fmt.Sprintf(\"%s.SizeBytes()\", g.r))\n@@ -119,7 +118,6 @@ func (g *interfaceGenerator) emitMarshallableForArrayNewtype(n *ast.Ident, a *as\ng.emit(\"}\\n\\n\")\ng.emit(\"// CopyOut implements marshal.Marshallable.CopyOut.\\n\")\n- g.emit(\"//go:nosplit\\n\")\ng.emit(\"func (%s *%s) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\\n\", g.r, g.typeName())\ng.inIndent(func() {\ng.emit(\"return %s.CopyOutN(cc, addr, %s.SizeBytes())\\n\", g.r, g.r)\n@@ -127,7 +125,6 @@ func (g *interfaceGenerator) emitMarshallableForArrayNewtype(n *ast.Ident, a *as\ng.emit(\"}\\n\\n\")\ng.emit(\"// CopyIn implements marshal.Marshallable.CopyIn.\\n\")\n- g.emit(\"//go:nosplit\\n\")\ng.emit(\"func (%s *%s) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\\n\", g.r, g.typeName())\ng.inIndent(func() {\ng.emitCastToByteSlice(g.r, \"buf\", fmt.Sprintf(\"%s.SizeBytes()\", g.r))\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/go_marshal/gomarshal/generator_interfaces_dynamic.go",
"new_path": "tools/go_marshal/gomarshal/generator_interfaces_dynamic.go",
"diff": "@@ -57,7 +57,6 @@ func (g *interfaceGenerator) emitMarshallableForDynamicType() {\ng.emit(\"}\\n\\n\")\ng.emit(\"// CopyOut implements marshal.Marshallable.CopyOut.\\n\")\n- g.emit(\"//go:nosplit\\n\")\ng.recordUsedImport(\"marshal\")\ng.recordUsedImport(\"hostarch\")\ng.emit(\"func (%s *%s) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\\n\", g.r, g.typeName())\n@@ -67,7 +66,6 @@ func (g *interfaceGenerator) emitMarshallableForDynamicType() {\ng.emit(\"}\\n\\n\")\ng.emit(\"// CopyIn implements marshal.Marshallable.CopyIn.\\n\")\n- g.emit(\"//go:nosplit\\n\")\ng.recordUsedImport(\"marshal\")\ng.recordUsedImport(\"hostarch\")\ng.emit(\"func (%s *%s) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\\n\", g.r, g.typeName())\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/go_marshal/gomarshal/generator_interfaces_primitive_newtype.go",
"new_path": "tools/go_marshal/gomarshal/generator_interfaces_primitive_newtype.go",
"diff": "@@ -167,7 +167,6 @@ func (g *interfaceGenerator) emitMarshallableForPrimitiveNewtype(nt *ast.Ident)\ng.emit(\"}\\n\\n\")\ng.emit(\"// CopyOutN implements marshal.Marshallable.CopyOutN.\\n\")\n- g.emit(\"//go:nosplit\\n\")\ng.emit(\"func (%s *%s) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {\\n\", g.r, g.typeName())\ng.inIndent(func() {\ng.emitCastToByteSlice(g.r, \"buf\", fmt.Sprintf(\"%s.SizeBytes()\", g.r))\n@@ -179,7 +178,6 @@ func (g *interfaceGenerator) emitMarshallableForPrimitiveNewtype(nt *ast.Ident)\ng.emit(\"}\\n\\n\")\ng.emit(\"// CopyOut implements marshal.Marshallable.CopyOut.\\n\")\n- g.emit(\"//go:nosplit\\n\")\ng.emit(\"func (%s *%s) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\\n\", g.r, g.typeName())\ng.inIndent(func() {\ng.emit(\"return %s.CopyOutN(cc, addr, %s.SizeBytes())\\n\", g.r, g.r)\n@@ -187,7 +185,6 @@ func (g *interfaceGenerator) emitMarshallableForPrimitiveNewtype(nt *ast.Ident)\ng.emit(\"}\\n\\n\")\ng.emit(\"// CopyIn implements marshal.Marshallable.CopyIn.\\n\")\n- g.emit(\"//go:nosplit\\n\")\ng.emit(\"func (%s *%s) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\\n\", g.r, g.typeName())\ng.inIndent(func() {\ng.emitCastToByteSlice(g.r, \"buf\", fmt.Sprintf(\"%s.SizeBytes()\", g.r))\n@@ -254,7 +251,6 @@ func (g *interfaceGenerator) emitMarshallableSliceForPrimitiveNewtype(nt *ast.Id\n}\ng.emit(\"// Copy%sIn copies in a slice of %s objects from the task's memory.\\n\", slice.ident, eltType)\n- g.emit(\"//go:nosplit\\n\")\ng.emit(\"func Copy%sIn(cc marshal.CopyContext, addr hostarch.Addr, dst []%s) (int, error) {\\n\", slice.ident, eltType)\ng.inIndent(func() {\ng.emit(\"count := len(dst)\\n\")\n@@ -274,7 +270,6 @@ func (g *interfaceGenerator) emitMarshallableSliceForPrimitiveNewtype(nt *ast.Id\ng.emit(\"}\\n\\n\")\ng.emit(\"// Copy%sOut copies a slice of %s objects to the task's memory.\\n\", slice.ident, eltType)\n- g.emit(\"//go:nosplit\\n\")\ng.emit(\"func Copy%sOut(cc marshal.CopyContext, addr hostarch.Addr, src []%s) (int, error) {\\n\", slice.ident, eltType)\ng.inIndent(func() {\ng.emit(\"count := len(src)\\n\")\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/go_marshal/gomarshal/generator_interfaces_struct.go",
"new_path": "tools/go_marshal/gomarshal/generator_interfaces_struct.go",
"diff": "@@ -324,7 +324,6 @@ func (g *interfaceGenerator) emitMarshallableForStruct(st *ast.StructType) {\n})\ng.emit(\"}\\n\\n\")\ng.emit(\"// CopyOutN implements marshal.Marshallable.CopyOutN.\\n\")\n- g.emit(\"//go:nosplit\\n\")\ng.recordUsedImport(\"marshal\")\ng.recordUsedImport(\"hostarch\")\ng.emit(\"func (%s *%s) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {\\n\", g.r, g.typeName())\n@@ -357,7 +356,6 @@ func (g *interfaceGenerator) emitMarshallableForStruct(st *ast.StructType) {\ng.emit(\"}\\n\\n\")\ng.emit(\"// CopyOut implements marshal.Marshallable.CopyOut.\\n\")\n- g.emit(\"//go:nosplit\\n\")\ng.recordUsedImport(\"marshal\")\ng.recordUsedImport(\"hostarch\")\ng.emit(\"func (%s *%s) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\\n\", g.r, g.typeName())\n@@ -367,7 +365,6 @@ func (g *interfaceGenerator) emitMarshallableForStruct(st *ast.StructType) {\ng.emit(\"}\\n\\n\")\ng.emit(\"// CopyIn implements marshal.Marshallable.CopyIn.\\n\")\n- g.emit(\"//go:nosplit\\n\")\ng.recordUsedImport(\"marshal\")\ng.recordUsedImport(\"hostarch\")\ng.emit(\"func (%s *%s) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\\n\", g.r, g.typeName())\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/go_marshal/test/escape/escape.go",
"new_path": "tools/go_marshal/test/escape/escape.go",
"diff": "@@ -49,14 +49,14 @@ func (t *dummyCopyContext) MarshalUnsafe(addr hostarch.Addr, marshallable marsha\nt.CopyOutBytes(addr, buf)\n}\n-// +checkescape:all\n+// +checkescape:hard\n//go:nosplit\nfunc doCopyIn(t *dummyCopyContext) {\nvar stat test.Stat\nstat.CopyIn(t, hostarch.Addr(0xf000ba12))\n}\n-// +checkescape:all\n+// +checkescape:hard\n//go:nosplit\nfunc doCopyOut(t *dummyCopyContext) {\nvar stat test.Stat\n"
}
] | Go | Apache License 2.0 | google/gvisor | Don't mark all Copy* functions as go:nosplit.
This relaxes the checkescape annotation in the test slightly, but allowing
stack splits should not cause issues.
Fixes #7309
PiperOrigin-RevId: 436645425 |
259,985 | 23.03.2022 17:23:57 | 25,200 | 9085d334deed528d3f9336c9fd98cf3282e0eee0 | kernfs: Handle duplicate unlink on orphaned directories.
Also don't print kernfs inode internals on panic, when we can't
acquire the necessary locks.
Reported-by:
Reported-by: | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/kernfs/filesystem.go",
"new_path": "pkg/sentry/fsimpl/kernfs/filesystem.go",
"diff": "@@ -242,6 +242,15 @@ func checkDeleteLocked(ctx context.Context, rp *vfs.ResolvingPath, d *Dentry) er\nif parent.vfsd.IsDead() {\nreturn linuxerr.ENOENT\n}\n+ if d.vfsd.IsDead() {\n+ // This implies a duplicate unlink on an orphaned dentry, where the path\n+ // resolution was successful. This is possible when the orphan is\n+ // replaced by a new node of the same name (so the path resolution\n+ // succeeds), and the orphan is unlinked again through a dirfd using\n+ // unlinkat(2) (so the unlink refers to the orphan and not the new\n+ // node). See Linux, fs/namei.c:do_rmdir().\n+ return linuxerr.EINVAL\n+ }\nif err := parent.inode.CheckPermissions(ctx, rp.Credentials(), vfs.MayWrite|vfs.MayExec); err != nil {\nreturn err\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/kernfs/inode_impl_util.go",
"new_path": "pkg/sentry/fsimpl/kernfs/inode_impl_util.go",
"diff": "@@ -590,7 +590,7 @@ func (o *OrderedChildren) checkExistingLocked(name string, child Inode) error {\nreturn linuxerr.ENOENT\n}\nif s.inode != child {\n- panic(fmt.Sprintf(\"Inode doesn't match what kernfs thinks! OrderedChild: %+v, kernfs: %+v\", s.inode, child))\n+ panic(fmt.Sprintf(\"Inode doesn't match what kernfs thinks! Name: %q, OrderedChild: %p, kernfs: %p\", name, s.inode, child))\n}\nreturn nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/cgroup.cc",
"new_path": "test/syscalls/linux/cgroup.cc",
"diff": "@@ -403,6 +403,26 @@ TEST(Cgroup, MigrateToSubcontainerThread) {\nEXPECT_FALSE(tasks.contains(tid));\n}\n+// Regression test for b/222278194.\n+TEST(Cgroup, DuplicateUnlinkOnDirFD) {\n+ SKIP_IF(!CgroupsAvailable());\n+ Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));\n+ Cgroup c = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs(\"\"));\n+ Cgroup child = ASSERT_NO_ERRNO_AND_VALUE(c.CreateChild(\"child\"));\n+\n+ // Orphan child directory by opening FD to it then deleting it.\n+ const FileDescriptor dirfd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(child.Path(), 0, 0));\n+ ASSERT_NO_ERRNO(child.Delete());\n+\n+ // Replace orphan with new directory of same name, so path resolution\n+ // succeeds.\n+ Cgroup child_new = ASSERT_NO_ERRNO_AND_VALUE(c.CreateChild(\"child\"));\n+\n+ // Attempt to delete orphaned child again through dirfd.\n+ EXPECT_THAT(UnlinkAt(dirfd, \".\", AT_REMOVEDIR), PosixErrorIs(EINVAL));\n+}\n+\nTEST(MemoryCgroup, MemoryUsageInBytes) {\nSKIP_IF(!CgroupsAvailable());\n"
},
{
"change_type": "MODIFY",
"old_path": "test/util/cgroup_util.cc",
"new_path": "test/util/cgroup_util.cc",
"diff": "@@ -41,6 +41,8 @@ PosixErrorOr<Cgroup> Cgroup::Create(absl::string_view path) {\nreturn Cgroup(path);\n}\n+PosixError Cgroup::Delete() { return Rmdir(cgroup_path_); }\n+\nPosixErrorOr<Cgroup> Cgroup::CreateChild(absl::string_view name) const {\nreturn Cgroup::Create(JoinPath(Path(), name));\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/util/cgroup_util.h",
"new_path": "test/util/cgroup_util.h",
"diff": "@@ -43,6 +43,9 @@ class Cgroup {\n// cgroupfs directory.\nstatic PosixErrorOr<Cgroup> Create(std::string_view path);\n+ // Deletes the current cgroup represented by this object.\n+ PosixError Delete();\n+\nconst std::string& Path() const { return cgroup_path_; }\n// Creates a child cgroup under this cgroup with the given name.\n"
}
] | Go | Apache License 2.0 | google/gvisor | kernfs: Handle duplicate unlink on orphaned directories.
Also don't print kernfs inode internals on panic, when we can't
acquire the necessary locks.
Reported-by: syzbot+101505e52936904e7d9f@syzkaller.appspotmail.com
Reported-by: syzbot+b9fe0fa83736b77030d2@syzkaller.appspotmail.com
PiperOrigin-RevId: 436869482 |
259,868 | 23.03.2022 18:52:55 | 25,200 | 7da1c59e773fad3ff7eab51d04601c92932e2a3b | Fix broken `+checkescape` annotation in sleep package.
Make `checkescape` check for variants of `+checkescape` that would silently be
ignored. #codehealth | [
{
"change_type": "MODIFY",
"old_path": "pkg/sleep/sleep_unsafe.go",
"new_path": "pkg/sleep/sleep_unsafe.go",
"diff": "@@ -166,6 +166,7 @@ func (s *Sleeper) AddWaker(w *Waker) {\n// block, then we will need to explicitly wake a runtime P.\n//\n// Precondition: wakepOrSleep may be true iff block is true.\n+//go:nosplit\nfunc (s *Sleeper) nextWaker(block, wakepOrSleep bool) *Waker {\n// Attempt to replenish the local list if it's currently empty.\nif s.localList == nil {\n@@ -282,7 +283,7 @@ func (s *Sleeper) Fetch(block bool) *Waker {\n//\n// N.B. Like Fetch, this method is *not* thread-safe. This will also yield the current\n// P to the next goroutine, avoiding associated scheduled overhead.\n-//+checkescapes:all\n+// +checkescape:all\n//go:nosplit\nfunc (s *Sleeper) AssertAndFetch(n *Waker) *Waker {\nn.assert(false /* wakep */)\n@@ -325,6 +326,7 @@ func (s *Sleeper) Done() {\n// enqueueAssertedWaker enqueues an asserted waker to the \"ready\" circular list\n// of wakers that want to notify the sleeper.\n+//go:nosplit\nfunc (s *Sleeper) enqueueAssertedWaker(w *Waker, wakep bool) {\n// Add the new waker to the front of the list.\nfor {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sync/runtime_amd64.go",
"new_path": "pkg/sync/runtime_amd64.go",
"diff": "@@ -20,10 +20,12 @@ func addrOfSpinning() *int32\n// nmspinning caches addrOfSpinning.\nvar nmspinning = addrOfSpinning()\n+//go:nosplit\nfunc preGoReadyWakeSuppression() {\natomic.AddInt32(nmspinning, 1)\n}\n+//go:nosplit\nfunc postGoReadyWakeSuppression() {\natomic.AddInt32(nmspinning, -1)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/checkescape/checkescape.go",
"new_path": "tools/checkescape/checkescape.go",
"diff": "@@ -84,6 +84,10 @@ const (\n// magic is the magic annotation.\nmagic = \"// +checkescape\"\n+ // Bad versions of `magic` observed in the wilderness of the codebase.\n+ badMagicNoSpace = \"//+checkescape\"\n+ badMagicPlural = \"// +checkescapes\"\n+\n// magicParams is the magic annotation with specific parameters.\nmagicParams = magic + \":\"\n@@ -573,6 +577,10 @@ func findReasons(pass *analysis.Pass, fdecl *ast.FuncDecl) ([]EscapeReason, bool\n// Scan all lines.\nfound := false\nfor _, c := range fdecl.Doc.List {\n+ if strings.HasPrefix(c.Text, badMagicNoSpace) || strings.HasPrefix(c.Text, badMagicPlural) {\n+ pass.Reportf(fdecl.Pos(), \"misspelled checkescape prefix: please use %q instead\", magic)\n+ continue\n+ }\n// Does the comment contain a +checkescape line?\nif !strings.HasPrefix(c.Text, magic) && !strings.HasPrefix(c.Text, testMagic) {\ncontinue\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix broken `+checkescape` annotation in sleep package.
Make `checkescape` check for variants of `+checkescape` that would silently be
ignored. #codehealth
PiperOrigin-RevId: 436882400 |
259,985 | 24.03.2022 15:07:06 | 25,200 | 5835bc8c3a4bd1b2c41a40ff706282f6224aaffc | cgroupfs: Handle invalid PID/PGID on migration.
Reported-by: | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/cgroupfs/base.go",
"new_path": "pkg/sentry/fsimpl/cgroupfs/base.go",
"diff": "@@ -302,6 +302,9 @@ func (d *cgroupProcsData) Write(ctx context.Context, fd *vfs.FileDescription, sr\nt := kernel.TaskFromContext(ctx)\ncurrPidns := t.ThreadGroup().PIDNamespace()\ntargetTG := currPidns.ThreadGroupWithID(kernel.ThreadID(tgid))\n+ if targetTG == nil {\n+ return 0, linuxerr.EINVAL\n+ }\nreturn n, targetTG.MigrateCgroup(d.Cgroup(fd))\n}\n@@ -341,6 +344,9 @@ func (d *tasksData) Write(ctx context.Context, fd *vfs.FileDescription, src user\nt := kernel.TaskFromContext(ctx)\ncurrPidns := t.ThreadGroup().PIDNamespace()\ntargetTask := currPidns.TaskWithID(kernel.ThreadID(tid))\n+ if targetTask == nil {\n+ return 0, linuxerr.EINVAL\n+ }\nreturn n, targetTask.MigrateCgroup(d.Cgroup(fd))\n}\n@@ -362,7 +368,7 @@ func parseInt64FromString(ctx context.Context, src usermem.IOSequence) (val, len\nif err != nil {\n// Note: This also handles zero-len writes if offset is beyond the end\n// of src, or src is empty.\n- ctx.Warningf(\"cgroupfs.parseInt64FromString: failed to parse %q: %v\", str, err)\n+ ctx.Debugf(\"cgroupfs.parseInt64FromString: failed to parse %q: %v\", str, err)\nreturn 0, int64(n), linuxerr.EINVAL\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/cgroup.cc",
"new_path": "test/syscalls/linux/cgroup.cc",
"diff": "@@ -403,6 +403,20 @@ TEST(Cgroup, MigrateToSubcontainerThread) {\nEXPECT_FALSE(tasks.contains(tid));\n}\n+TEST(Cgroup, MigrateInvalidPID) {\n+ SKIP_IF(!CgroupsAvailable());\n+ Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));\n+ Cgroup c = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs(\"\"));\n+\n+ EXPECT_THAT(c.WriteControlFile(\"cgroup.procs\", \"-1\"), PosixErrorIs(EINVAL));\n+ EXPECT_THAT(c.WriteControlFile(\"cgroup.procs\", \"not-a-number\"),\n+ PosixErrorIs(EINVAL));\n+\n+ EXPECT_THAT(c.WriteControlFile(\"tasks\", \"-1\"), PosixErrorIs(EINVAL));\n+ EXPECT_THAT(c.WriteControlFile(\"tasks\", \"not-a-number\"),\n+ PosixErrorIs(EINVAL));\n+}\n+\n// Regression test for b/222278194.\nTEST(Cgroup, DuplicateUnlinkOnDirFD) {\nSKIP_IF(!CgroupsAvailable());\n"
}
] | Go | Apache License 2.0 | google/gvisor | cgroupfs: Handle invalid PID/PGID on migration.
Reported-by: syzbot+670d686c42a0a8d7f8a6@syzkaller.appspotmail.com
PiperOrigin-RevId: 437096386 |
259,881 | 28.03.2022 11:22:25 | 25,200 | 34623f4d753169d8af72a75c8e72bbd27e64c01b | Skip analysis of sync/atomic
is adding Pointer[T] to sync/atomic,
which will trip up analyzers that don't handle generics. Skip
it for now.
Drop constraints, maps, slices, as they were dropped from the
standard library. | [
{
"change_type": "MODIFY",
"old_path": "tools/nogo/check/check.go",
"new_path": "tools/nogo/check/check.go",
"diff": "@@ -746,9 +746,7 @@ func SplitPackages(srcs []string, srcRootPrefix string) map[string][]string {\n//\n// TODO(b/201686256): remove once tooling can handle type parameters.\nvar usesTypeParams = map[string]struct{}{\n- \"constraints\": struct{}{}, // golang.org/issue/45458\n- \"maps\": struct{}{}, // golang.org/issue/47649\n- \"slices\": struct{}{}, // golang.org/issue/45955\n+ \"sync/atomic\": struct{}{}, // https://go.dev/issue/50860\n}\n// Bundle checks a bundle of files (typically the standard library).\n"
}
] | Go | Apache License 2.0 | google/gvisor | Skip analysis of sync/atomic
https://go.dev/issue/50860 is adding Pointer[T] to sync/atomic,
which will trip up analyzers that don't handle generics. Skip
it for now.
Drop constraints, maps, slices, as they were dropped from the
standard library.
PiperOrigin-RevId: 437808952 |
259,896 | 29.03.2022 15:46:02 | 25,200 | 007a91a911084e46efffc4e2531ff8dee5cf1917 | Silence the error log message for SO_RCVLOWAT option.
Removed the unimplemented syscall message for SO_RCVLOWAT option and added a
test for {g,s}etsockopt. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netstack/netstack.go",
"new_path": "pkg/sentry/socket/netstack/netstack.go",
"diff": "@@ -1064,6 +1064,14 @@ func getSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, fam\nvP := primitive.Int32(boolToInt32(v))\nreturn &vP, nil\n+ case linux.SO_RCVLOWAT:\n+ if outLen < sizeOfInt32 {\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+\n+ v := primitive.Int32(ep.SocketOptions().GetRcvlowat())\n+ return &v, nil\n+\ndefault:\nsocket.GetSockOptEmitUnimplementedEvent(t, name)\n}\n@@ -2002,6 +2010,17 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam\nvar v tcpip.SocketDetachFilterOption\nreturn syserr.TranslateNetstackError(ep.SetSockOpt(&v))\n+ // TODO(b/226603727): Add support for SO_RCVLOWAT option. For now, only\n+ // the unsupported syscall message is removed.\n+ case linux.SO_RCVLOWAT:\n+ if len(optVal) < sizeOfInt32 {\n+ return syserr.ErrInvalidArgument\n+ }\n+\n+ v := hostarch.ByteOrder.Uint32(optVal)\n+ ep.SocketOptions().SetRcvlowat(int32(v))\n+ return nil\n+\ndefault:\nsocket.SetSockOptEmitUnimplementedEvent(t, name)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/socket.go",
"new_path": "pkg/sentry/socket/socket.go",
"diff": "@@ -583,7 +583,6 @@ func emitUnimplementedEvent(t *kernel.Task, name int) {\nlinux.SO_PEEK_OFF,\nlinux.SO_PRIORITY,\nlinux.SO_RCVBUF,\n- linux.SO_RCVLOWAT,\nlinux.SO_RCVTIMEO,\nlinux.SO_REUSEADDR,\nlinux.SO_REUSEPORT,\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/socketops.go",
"new_path": "pkg/tcpip/socketops.go",
"diff": "@@ -245,6 +245,10 @@ type SocketOptions struct {\n// linger determines the amount of time the socket should linger before\n// close. We currently implement this option for TCP socket only.\nlinger LingerOption\n+\n+ // rcvlowat specifies the minimum number of bytes which should be\n+ // received to indicate the socket as readable.\n+ rcvlowat int32\n}\n// InitHandler initializes the handler. This must be called before using the\n@@ -702,3 +706,17 @@ func (so *SocketOptions) SetReceiveBufferSize(receiveBufferSize int64, notify bo\n}\nso.receiveBufferSize.Store(receiveBufferSize)\n}\n+\n+// GetRcvlowat gets value for SO_RCVLOWAT option.\n+func (so *SocketOptions) GetRcvlowat() int32 {\n+ // TODO(b/226603727): Return so.rcvlowat after adding complete support\n+ // for SO_RCVLOWAT option. For now, return the default value of 1.\n+ defaultRcvlowat := int32(1)\n+ return defaultRcvlowat\n+}\n+\n+// SetRcvlowat sets value for SO_RCVLOWAT option.\n+func (so *SocketOptions) SetRcvlowat(rcvlowat int32) Error {\n+ atomic.StoreInt32(&so.rcvlowat, rcvlowat)\n+ return nil\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/socket_generic_test_cases.cc",
"new_path": "test/syscalls/linux/socket_generic_test_cases.cc",
"diff": "@@ -968,5 +968,35 @@ TEST_P(AllSocketPairTest, GetSocketRcvbufOption) {\nEXPECT_EQ(opt, minRcvBufSizeLinux);\n}\n}\n+\n+TEST_P(AllSocketPairTest, GetSetSocketRcvlowatOption) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ int opt = 0;\n+ socklen_t opt_len = sizeof(opt);\n+ constexpr int defaultSz = 1;\n+ ASSERT_THAT(\n+ getsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVLOWAT, &opt, &opt_len),\n+ SyscallSucceeds());\n+ ASSERT_EQ(opt_len, sizeof(opt));\n+ EXPECT_EQ(opt, defaultSz);\n+\n+ int rcvlowatSz = 100;\n+ ASSERT_THAT(setsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVLOWAT,\n+ &rcvlowatSz, sizeof(rcvlowatSz)),\n+ SyscallSucceeds());\n+\n+ ASSERT_THAT(\n+ getsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVLOWAT, &opt, &opt_len),\n+ SyscallSucceeds());\n+ ASSERT_EQ(opt_len, sizeof(opt));\n+\n+ if (IsRunningOnGvisor()) {\n+ // TODO(b/226603727): Add support for setting SO_RCVLOWAT option in gVisor.\n+ EXPECT_EQ(opt, defaultSz);\n+ } else {\n+ EXPECT_EQ(opt, rcvlowatSz);\n+ }\n+}\n} // namespace testing\n} // namespace gvisor\n"
}
] | Go | Apache License 2.0 | google/gvisor | Silence the error log message for SO_RCVLOWAT option.
Removed the unimplemented syscall message for SO_RCVLOWAT option and added a
test for {g,s}etsockopt.
PiperOrigin-RevId: 438145815 |
259,891 | 29.03.2022 17:45:26 | 25,200 | c2bd153760b0ae24b1cd0183741742ae82f4c4da | don't check refcount in packet buffer's destroy function
The function is only called when refcount is zero,
so we don't have to check again. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/packet_buffer.go",
"new_path": "pkg/tcpip/stack/packet_buffer.go",
"diff": "@@ -188,9 +188,7 @@ func NewPacketBuffer(opts PacketBufferOptions) *PacketBuffer {\n// pool.\nfunc (pk *PacketBuffer) DecRef() {\npk.packetBufferRefs.DecRef(func() {\n- if pk.packetBufferRefs.refCount == 0 {\npkPool.Put(pk)\n- }\n})\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | don't check refcount in packet buffer's destroy function
The function is only called when refcount is zero,
so we don't have to check again.
PiperOrigin-RevId: 438171100 |
259,985 | 29.03.2022 20:00:11 | 25,200 | 5bb1f5086ed69f0d77b217cec5c10e94a1f00498 | cgroupfs: Implement hierarchical accounting for cpuacct controller. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/cgroupfs/base.go",
"new_path": "pkg/sentry/fsimpl/cgroupfs/base.go",
"diff": "@@ -40,6 +40,11 @@ import (\ntype controllerCommon struct {\nty kernel.CgroupControllerType\nfs *filesystem\n+ // parent is the parent controller if any. Immutable.\n+ //\n+ // Note that we don't have to update this on renames, since cgroup\n+ // directories can't be moved to a different parent directory.\n+ parent controller\n}\nfunc (c *controllerCommon) init(ty kernel.CgroupControllerType, fs *filesystem) {\n@@ -47,9 +52,15 @@ func (c *controllerCommon) init(ty kernel.CgroupControllerType, fs *filesystem)\nc.fs = fs\n}\n-func (c *controllerCommon) cloneFrom(other *controllerCommon) {\n- c.ty = other.ty\n- c.fs = other.fs\n+func (c *controllerCommon) cloneFromParent(parent controller) {\n+ c.ty = parent.Type()\n+ c.fs = parent.Filesystem()\n+ c.parent = parent\n+}\n+\n+// Filesystem implements controller.Filesystem.\n+func (c *controllerCommon) Filesystem() *filesystem {\n+ return c.fs\n}\n// Type implements kernel.CgroupController.Type.\n@@ -85,7 +96,10 @@ func (c *controllerCommon) RootCgroup() kernel.Cgroup {\ntype controller interface {\nkernel.CgroupController\n- // Clone creates a new controller based on the internal state of the current\n+ // Filesystem returns the cgroupfs filesystem backing this controller.\n+ Filesystem() *filesystem\n+\n+ // Clone creates a new controller based on the internal state of this\n// controller. This is used to initialize a sub-cgroup based on the state of\n// the parent.\nClone() controller\n@@ -94,6 +108,18 @@ type controller interface {\n// control files defined by this controller.\nAddControlFiles(ctx context.Context, creds *auth.Credentials, c *cgroupInode, contents map[string]kernfs.Inode)\n+ // Enter is called when a task initially moves into a cgroup. This is\n+ // distinct from migration because the task isn't migrating away from a\n+ // cgroup. Enter is called when a task is created and joins its initial\n+ // cgroup, or when cgroupfs is mounted and existing tasks are moved into\n+ // cgroups.\n+ Enter(t *kernel.Task)\n+\n+ // Leave is called when a task leaves a cgroup. This is distinct from\n+ // migration because the task isn't migrating to another cgroup. Leave is\n+ // called when a task exits.\n+ Leave(t *kernel.Task)\n+\n// PrepareMigrate signals the controller that a migration is about to\n// happen. The controller should check for any conditions that would prevent\n// the migration. If PrepareMigrate succeeds, the controller must\n@@ -186,6 +212,7 @@ func (c *cgroupInode) Controllers() []kernel.CgroupController {\nfunc (c *cgroupInode) tasks() []*kernel.Task {\nc.fs.tasksMu.RLock()\ndefer c.fs.tasksMu.RUnlock()\n+\nts := make([]*kernel.Task, 0, len(c.ts))\nfor t := range c.ts {\nts = append(ts, t)\n@@ -196,15 +223,23 @@ func (c *cgroupInode) tasks() []*kernel.Task {\n// Enter implements kernel.CgroupImpl.Enter.\nfunc (c *cgroupInode) Enter(t *kernel.Task) {\nc.fs.tasksMu.Lock()\n+ defer c.fs.tasksMu.Unlock()\n+\nc.ts[t] = struct{}{}\n- c.fs.tasksMu.Unlock()\n+ for _, ctl := range c.controllers {\n+ ctl.Enter(t)\n+ }\n}\n// Leave implements kernel.CgroupImpl.Leave.\nfunc (c *cgroupInode) Leave(t *kernel.Task) {\nc.fs.tasksMu.Lock()\n+ defer c.fs.tasksMu.Unlock()\n+\n+ for _, ctl := range c.controllers {\n+ ctl.Leave(t)\n+ }\ndelete(c.ts, t)\n- c.fs.tasksMu.Unlock()\n}\n// PrepareMigrate implements kernel.CgroupImpl.PrepareMigrate.\n@@ -229,14 +264,14 @@ func (c *cgroupInode) PrepareMigrate(t *kernel.Task, src *kernel.Cgroup) error {\n// CommitMigrate implements kernel.CgroupImpl.CommitMigrate.\nfunc (c *cgroupInode) CommitMigrate(t *kernel.Task, src *kernel.Cgroup) {\n+ c.fs.tasksMu.Lock()\n+ defer c.fs.tasksMu.Unlock()\n+\nfor srcType, srcCtl := range src.CgroupImpl.(*cgroupInode).controllers {\nc.controllers[srcType].CommitMigrate(t, srcCtl)\n}\nsrcI := src.CgroupImpl.(*cgroupInode)\n- c.fs.tasksMu.Lock()\n- defer c.fs.tasksMu.Unlock()\n-\ndelete(srcI.ts, t)\nc.ts[t] = struct{}{}\n}\n@@ -375,17 +410,23 @@ func parseInt64FromString(ctx context.Context, src usermem.IOSequence) (val, len\nreturn val, int64(n), nil\n}\n-// controllerNoopMigrate partially implements controller. It stubs the migration\n+// controllerStateless partially implements controller. It stubs the migration\n// methods with noops for a stateless controller.\n-type controllerNoopMigrate struct{}\n+type controllerStateless struct{}\n+\n+// Enter implements controller.Enter.\n+func (*controllerStateless) Enter(t *kernel.Task) {}\n+\n+// Leave implements controller.Leave.\n+func (*controllerStateless) Leave(t *kernel.Task) {}\n// PrepareMigrate implements controller.PrepareMigrate.\n-func (*controllerNoopMigrate) PrepareMigrate(t *kernel.Task, src controller) error {\n+func (*controllerStateless) PrepareMigrate(t *kernel.Task, src controller) error {\nreturn nil\n}\n// CommitMigrate implements controller.CommitMigrate.\n-func (*controllerNoopMigrate) CommitMigrate(t *kernel.Task, src controller) {}\n+func (*controllerStateless) CommitMigrate(t *kernel.Task, src controller) {}\n// AbortMigrate implements controller.AbortMigrate.\n-func (*controllerNoopMigrate) AbortMigrate(t *kernel.Task, src controller) {}\n+func (*controllerStateless) AbortMigrate(t *kernel.Task, src controller) {}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/cgroupfs/cgroupfs.go",
"new_path": "pkg/sentry/fsimpl/cgroupfs/cgroupfs.go",
"diff": "@@ -541,6 +541,14 @@ func (d *dir) RmDir(ctx context.Context, name string, child kernfs.Inode) error\nreturn err\n}\n+func (d *dir) forEachChildDir(fn func(*dir)) {\n+ d.OrderedChildren.ForEachChild(func(_ string, i kernfs.Inode) {\n+ if childI, ok := i.(*cgroupInode); ok {\n+ fn(&childI.dir)\n+ }\n+ })\n+}\n+\n// controllerFile represents a generic control file that appears within a cgroup\n// directory.\n//\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/cgroupfs/cpu.go",
"new_path": "pkg/sentry/fsimpl/cgroupfs/cpu.go",
"diff": "@@ -23,7 +23,7 @@ import (\n// +stateify savable\ntype cpuController struct {\ncontrollerCommon\n- controllerNoopMigrate\n+ controllerStateless\n// CFS bandwidth control parameters, values in microseconds.\ncfsPeriod int64\n@@ -67,7 +67,7 @@ func (c *cpuController) Clone() controller {\ncfsQuota: c.cfsQuota,\nshares: c.shares,\n}\n- new.controllerCommon.cloneFrom(&c.controllerCommon)\n+ new.controllerCommon.cloneFromParent(c)\nreturn new\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/cgroupfs/cpuacct.go",
"new_path": "pkg/sentry/fsimpl/cgroupfs/cpuacct.go",
"diff": "@@ -21,29 +21,61 @@ import (\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs\"\n+ \"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n\"gvisor.dev/gvisor/pkg/sentry/usage\"\n+ \"gvisor.dev/gvisor/pkg/sync\"\n)\n+// cpuacctController tracks CPU usage for tasks managed by the controller. The\n+// sentry already tracks CPU usage per task; the controller tries to avoid\n+// duplicate bookkeeping. When a task moves into a cpuacct cgroup, for currently\n+// running tasks we simple refer to the tasks themselves when asked to report\n+// usage. Things get more interesting when tasks leave the cgroup, since we need\n+// to attribute the usage across multiple cgroups.\n+//\n+// On migration, we attribute the task's usage up to the point of migration to\n+// the src cgroup, and keep track of how much of the overall usage to discount\n+// at the dst cgroup.\n+//\n+// On task exit, we attribute all unaccounted usage to the current cgroup and\n+// stop tracking the task.\n+//\n// +stateify savable\ntype cpuacctController struct {\ncontrollerCommon\n- controllerNoopMigrate\n+ controllerStateless\n+\n+ mu sync.Mutex `state:\"nosave\"`\n+\n+ // taskCommittedCharges tracks charges for a task already attributed to this\n+ // cgroup. This is used to avoid double counting usage for live\n+ // tasks. Protected by mu.\n+ taskCommittedCharges map[*kernel.Task]usage.CPUStats\n+\n+ // usage is the cumulative CPU time used by past tasks in this cgroup. Note\n+ // that this doesn't include usage by live tasks currently in the\n+ // cgroup. Protected by mu.\n+ usage usage.CPUStats\n}\nvar _ controller = (*cpuacctController)(nil)\nfunc newCPUAcctController(fs *filesystem) *cpuacctController {\n- c := &cpuacctController{}\n+ c := &cpuacctController{\n+ taskCommittedCharges: make(map[*kernel.Task]usage.CPUStats),\n+ }\nc.controllerCommon.init(controllerCPUAcct, fs)\nreturn c\n}\n// Clone implements controller.Clone.\nfunc (c *cpuacctController) Clone() controller {\n- new := &cpuacctController{}\n- new.controllerCommon.cloneFrom(&new.controllerCommon)\n- return c\n+ new := &cpuacctController{\n+ taskCommittedCharges: make(map[*kernel.Task]usage.CPUStats),\n+ }\n+ new.controllerCommon.cloneFromParent(c)\n+ return new\n}\n// AddControlFiles implements controller.AddControlFiles.\n@@ -55,20 +87,81 @@ func (c *cpuacctController) AddControlFiles(ctx context.Context, creds *auth.Cre\ncontents[\"cpuacct.usage_sys\"] = c.fs.newControllerFile(ctx, creds, &cpuacctUsageSysData{cpuacctCG})\n}\n+// Enter implements controller.Enter.\n+func (c *cpuacctController) Enter(t *kernel.Task) {}\n+\n+// Leave implements controller.Leave.\n+func (c *cpuacctController) Leave(t *kernel.Task) {\n+ charge := t.CPUStats()\n+ c.mu.Lock()\n+ outstandingCharge := charge.DifferenceSince(c.taskCommittedCharges[t])\n+ c.usage.Accumulate(outstandingCharge)\n+ delete(c.taskCommittedCharges, t)\n+ c.mu.Unlock()\n+}\n+\n+// PrepareMigrate implements controller.PrepareMigrate.\n+func (c *cpuacctController) PrepareMigrate(t *kernel.Task, src controller) error {\n+ return nil\n+}\n+\n+// CommitMigrate implements controller.CommitMigrate.\n+func (c *cpuacctController) CommitMigrate(t *kernel.Task, src controller) {\n+ charge := t.CPUStats()\n+\n+ // Commit current charge to src and stop tracking t at src.\n+ srcCtl := src.(*cpuacctController)\n+ srcCtl.mu.Lock()\n+ srcTaskCharge := srcCtl.taskCommittedCharges[t]\n+ outstandingCharge := charge.DifferenceSince(srcTaskCharge)\n+ srcCtl.usage.Accumulate(outstandingCharge)\n+ delete(srcCtl.taskCommittedCharges, t)\n+ srcCtl.mu.Unlock()\n+\n+ // Start tracking charge at dst, excluding the charge at src.\n+ c.mu.Lock()\n+ c.taskCommittedCharges[t] = charge\n+ c.mu.Unlock()\n+}\n+\n+// AbortMigrate implements controller.AbortMigrate.\n+func (c *cpuacctController) AbortMigrate(t *kernel.Task, src controller) {}\n+\n// +stateify savable\ntype cpuacctCgroup struct {\n*cgroupInode\n}\n-func (c *cpuacctCgroup) collectCPUStats() usage.CPUStats {\n- var cs usage.CPUStats\n- c.fs.tasksMu.RLock()\n- // Note: This isn't very accurate, since the tasks are potentially\n- // still running as we accumulate their stats.\n+func (c *cpuacctCgroup) cpuacctController() *cpuacctController {\n+ return c.controllers[controllerCPUAcct].(*cpuacctController)\n+}\n+\n+// checklocks:c.fs.tasksMu\n+func (c *cpuacctCgroup) collectCPUStatsLocked(acc *usage.CPUStats) {\n+ ctl := c.cpuacctController()\nfor t := range c.ts {\n- cs.Accumulate(t.CPUStats())\n+ charge := t.CPUStats()\n+ ctl.mu.Lock()\n+ outstandingCharge := charge.DifferenceSince(ctl.taskCommittedCharges[t])\n+ ctl.mu.Unlock()\n+ acc.Accumulate(outstandingCharge)\n+ }\n+ ctl.mu.Lock()\n+ acc.Accumulate(ctl.usage)\n+ ctl.mu.Unlock()\n+\n+ c.forEachChildDir(func(d *dir) {\n+ cg := cpuacctCgroup{d.cgi}\n+ cg.collectCPUStatsLocked(acc)\n+ })\n}\n- c.fs.tasksMu.RUnlock()\n+\n+func (c *cpuacctCgroup) collectCPUStats() usage.CPUStats {\n+ c.fs.tasksMu.RLock()\n+ defer c.fs.tasksMu.RUnlock()\n+\n+ var cs usage.CPUStats\n+ c.collectCPUStatsLocked(&cs)\nreturn cs\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/cgroupfs/cpuset.go",
"new_path": "pkg/sentry/fsimpl/cgroupfs/cpuset.go",
"diff": "@@ -34,7 +34,7 @@ import (\n// +stateify savable\ntype cpusetController struct {\ncontrollerCommon\n- controllerNoopMigrate\n+ controllerStateless\nmaxCpus uint32\nmaxMems uint32\n@@ -73,7 +73,7 @@ func (c *cpusetController) Clone() controller {\ncpus: &cpus,\nmems: &mems,\n}\n- new.controllerCommon.cloneFrom(&c.controllerCommon)\n+ new.controllerCommon.cloneFromParent(c)\nreturn new\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/cgroupfs/job.go",
"new_path": "pkg/sentry/fsimpl/cgroupfs/job.go",
"diff": "@@ -23,7 +23,7 @@ import (\n// +stateify savable\ntype jobController struct {\ncontrollerCommon\n- controllerNoopMigrate\n+ controllerStateless\nid int64\n}\n@@ -41,7 +41,7 @@ func (c *jobController) Clone() controller {\nnew := &jobController{\nid: c.id,\n}\n- new.controllerCommon.cloneFrom(&c.controllerCommon)\n+ new.controllerCommon.cloneFromParent(c)\nreturn new\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/cgroupfs/memory.go",
"new_path": "pkg/sentry/fsimpl/cgroupfs/memory.go",
"diff": "@@ -30,7 +30,7 @@ import (\n// +stateify savable\ntype memoryController struct {\ncontrollerCommon\n- controllerNoopMigrate\n+ controllerStateless\nlimitBytes int64\nsoftLimitBytes int64\n@@ -72,7 +72,7 @@ func (c *memoryController) Clone() controller {\nsoftLimitBytes: c.softLimitBytes,\nmoveChargeAtImmigrate: c.moveChargeAtImmigrate,\n}\n- new.controllerCommon.cloneFrom(&c.controllerCommon)\n+ new.controllerCommon.cloneFromParent(c)\nreturn new\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/kernfs/inode_impl_util.go",
"new_path": "pkg/sentry/fsimpl/kernfs/inode_impl_util.go",
"diff": "@@ -486,6 +486,16 @@ func (o *OrderedChildren) Lookup(ctx context.Context, name string) (Inode, error\nreturn s.inode, nil\n}\n+// ForEachChild calls fn on all childrens tracked by this ordered children.\n+func (o *OrderedChildren) ForEachChild(fn func(string, Inode)) {\n+ o.mu.RLock()\n+ defer o.mu.RUnlock()\n+\n+ for name, slot := range o.set {\n+ fn(name, slot.inode)\n+ }\n+}\n+\n// IterDirents implements Inode.IterDirents.\nfunc (o *OrderedChildren) IterDirents(ctx context.Context, mnt *vfs.Mount, cb vfs.IterDirentsCallback, offset, relOffset int64) (newOffset int64, err error) {\n// All entries from OrderedChildren have already been handled in\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/cgroup.go",
"new_path": "pkg/sentry/kernel/cgroup.go",
"diff": "@@ -102,6 +102,13 @@ func (ctx *CgroupMigrationContext) Abort() {\n// Commit completes a migration.\nfunc (ctx *CgroupMigrationContext) Commit() {\nctx.dst.CommitMigrate(ctx.t, &ctx.src)\n+\n+ ctx.t.mu.Lock()\n+ delete(ctx.t.cgroups, ctx.src)\n+ ctx.src.DecRef(ctx.t)\n+ ctx.dst.IncRef()\n+ ctx.t.cgroups[ctx.dst] = struct{}{}\n+ ctx.t.mu.Unlock()\n}\n// CgroupImpl is the common interface to cgroups.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/usage/cpu.go",
"new_path": "pkg/sentry/usage/cpu.go",
"diff": "@@ -44,3 +44,14 @@ func (s *CPUStats) Accumulate(s2 CPUStats) {\ns.SysTime += s2.SysTime\ns.VoluntarySwitches += s2.VoluntarySwitches\n}\n+\n+// DifferenceSince computes s - earlierSample.\n+//\n+// Precondition: s >= earlierSample.\n+func (s *CPUStats) DifferenceSince(earlierSample CPUStats) CPUStats {\n+ return CPUStats{\n+ UserTime: s.UserTime - earlierSample.UserTime,\n+ SysTime: s.SysTime - earlierSample.SysTime,\n+ VoluntarySwitches: s.VoluntarySwitches - earlierSample.VoluntarySwitches,\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/cgroup.cc",
"new_path": "test/syscalls/linux/cgroup.cc",
"diff": "@@ -508,6 +508,143 @@ TEST(CPUAcctCgroup, CPUAcctStat) {\nEXPECT_THAT(Atoi<int64_t>(sys_tokens[1]), IsPosixErrorOkAndHolds(Ge(0)));\n}\n+TEST(CPUAcctCgroup, HierarchicalAccounting) {\n+ SKIP_IF(!CgroupsAvailable());\n+\n+ Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));\n+ Cgroup root = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs(\"cpuacct\"));\n+ Cgroup child = ASSERT_NO_ERRNO_AND_VALUE(root.CreateChild(\"child1\"));\n+\n+ // Root should have non-zero CPU usage since the test itself will be running\n+ // in the root cgroup.\n+ EXPECT_THAT(root.ReadIntegerControlFile(\"cpuacct.usage\"),\n+ IsPosixErrorOkAndHolds(Gt(0)));\n+\n+ // Child should have zero usage since it is initially empty.\n+ EXPECT_THAT(child.ReadIntegerControlFile(\"cpuacct.usage\"),\n+ IsPosixErrorOkAndHolds(Eq(0)));\n+\n+ // Move test into child and confirm child starts incurring usage.\n+ const int64_t before_move =\n+ ASSERT_NO_ERRNO_AND_VALUE(root.ReadIntegerControlFile(\"cpuacct.usage\"));\n+ ASSERT_NO_ERRNO(child.Enter(getpid()));\n+ ASSERT_NO_ERRNO(\n+ child.PollControlFileForChange(\"cpuacct.usage\", absl::Seconds(30)));\n+\n+ EXPECT_THAT(child.ReadIntegerControlFile(\"cpuacct.usage\"),\n+ IsPosixErrorOkAndHolds(Gt(0)));\n+\n+ // Root shouldn't lose usage due to the migration.\n+ const int64_t after_move =\n+ ASSERT_NO_ERRNO_AND_VALUE(root.ReadIntegerControlFile(\"cpuacct.usage\"));\n+ EXPECT_GE(after_move, before_move);\n+\n+ // Root should continue to gain usage after the move since child is a\n+ // subcgroup.\n+ ASSERT_NO_ERRNO(\n+ child.PollControlFileForChange(\"cpuacct.usage\", absl::Seconds(30)));\n+ EXPECT_THAT(root.ReadIntegerControlFile(\"cpuacct.usage\"),\n+ IsPosixErrorOkAndHolds(Ge(after_move)));\n+}\n+\n+TEST(CPUAcctCgroup, IndirectCharge) {\n+ SKIP_IF(!CgroupsAvailable());\n+\n+ Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));\n+ Cgroup root = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs(\"cpuacct\"));\n+ Cgroup child1 = ASSERT_NO_ERRNO_AND_VALUE(root.CreateChild(\"child1\"));\n+ Cgroup child2 = ASSERT_NO_ERRNO_AND_VALUE(root.CreateChild(\"child2\"));\n+ Cgroup child2a = ASSERT_NO_ERRNO_AND_VALUE(child2.CreateChild(\"child2a\"));\n+\n+ ASSERT_NO_ERRNO(child1.Enter(getpid()));\n+ ASSERT_NO_ERRNO(\n+ child1.PollControlFileForChange(\"cpuacct.usage\", absl::Seconds(30)));\n+\n+ // Only root and child1 should have usage.\n+ for (auto const& cg : {root, child1}) {\n+ EXPECT_THAT(cg.ReadIntegerControlFile(\"cpuacct.usage\"),\n+ IsPosixErrorOkAndHolds(Gt(0)));\n+ }\n+ for (auto const& cg : {child2, child2a}) {\n+ EXPECT_THAT(cg.ReadIntegerControlFile(\"cpuacct.usage\"),\n+ IsPosixErrorOkAndHolds(Eq(0)));\n+ }\n+\n+ ASSERT_NO_ERRNO(child2a.Enter(getpid()));\n+ ASSERT_NO_ERRNO(\n+ child2a.PollControlFileForChange(\"cpuacct.usage\", absl::Seconds(30)));\n+\n+ const int64_t snapshot_root =\n+ ASSERT_NO_ERRNO_AND_VALUE(root.ReadIntegerControlFile(\"cpuacct.usage\"));\n+ const int64_t snapshot_child1 =\n+ ASSERT_NO_ERRNO_AND_VALUE(child1.ReadIntegerControlFile(\"cpuacct.usage\"));\n+ const int64_t snapshot_child2 =\n+ ASSERT_NO_ERRNO_AND_VALUE(child2.ReadIntegerControlFile(\"cpuacct.usage\"));\n+ const int64_t snapshot_child2a = ASSERT_NO_ERRNO_AND_VALUE(\n+ child2a.ReadIntegerControlFile(\"cpuacct.usage\"));\n+\n+ ASSERT_NO_ERRNO(\n+ child2a.PollControlFileForChange(\"cpuacct.usage\", absl::Seconds(30)));\n+\n+ // Root, child2 and child2a should've accumulated new usage. Child1 should\n+ // not.\n+ const int64_t now_root =\n+ ASSERT_NO_ERRNO_AND_VALUE(root.ReadIntegerControlFile(\"cpuacct.usage\"));\n+ const int64_t now_child1 =\n+ ASSERT_NO_ERRNO_AND_VALUE(child1.ReadIntegerControlFile(\"cpuacct.usage\"));\n+ const int64_t now_child2 =\n+ ASSERT_NO_ERRNO_AND_VALUE(child2.ReadIntegerControlFile(\"cpuacct.usage\"));\n+ const int64_t now_child2a = ASSERT_NO_ERRNO_AND_VALUE(\n+ child2a.ReadIntegerControlFile(\"cpuacct.usage\"));\n+\n+ EXPECT_GT(now_root, snapshot_root);\n+ EXPECT_GT(now_child2, snapshot_child2);\n+ EXPECT_GT(now_child2a, snapshot_child2a);\n+ EXPECT_EQ(now_child1, snapshot_child1);\n+}\n+\n+TEST(CPUAcctCgroup, NoDoubleAccounting) {\n+ SKIP_IF(!CgroupsAvailable());\n+\n+ Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));\n+ Cgroup root = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs(\"cpuacct\"));\n+ Cgroup parent = ASSERT_NO_ERRNO_AND_VALUE(root.CreateChild(\"parent\"));\n+ Cgroup a = ASSERT_NO_ERRNO_AND_VALUE(parent.CreateChild(\"a\"));\n+ Cgroup b = ASSERT_NO_ERRNO_AND_VALUE(parent.CreateChild(\"b\"));\n+\n+ ASSERT_NO_ERRNO(a.Enter(getpid()));\n+ ASSERT_NO_ERRNO(\n+ a.PollControlFileForChange(\"cpuacct.usage\", absl::Seconds(30)));\n+\n+ ASSERT_NO_ERRNO(b.Enter(getpid()));\n+ ASSERT_NO_ERRNO(\n+ b.PollControlFileForChange(\"cpuacct.usage\", absl::Seconds(30)));\n+\n+ ASSERT_NO_ERRNO(root.Enter(getpid()));\n+ ASSERT_NO_ERRNO(\n+ root.PollControlFileForChange(\"cpuacct.usage\", absl::Seconds(30)));\n+\n+ // The usage for parent, a & b should now be frozen, since they no longer have\n+ // any tasks. Root will continue to accumulate usage.\n+ const int64_t usage_root =\n+ ASSERT_NO_ERRNO_AND_VALUE(root.ReadIntegerControlFile(\"cpuacct.usage\"));\n+ const int64_t usage_parent =\n+ ASSERT_NO_ERRNO_AND_VALUE(parent.ReadIntegerControlFile(\"cpuacct.usage\"));\n+ const int64_t usage_a =\n+ ASSERT_NO_ERRNO_AND_VALUE(a.ReadIntegerControlFile(\"cpuacct.usage\"));\n+ const int64_t usage_b =\n+ ASSERT_NO_ERRNO_AND_VALUE(b.ReadIntegerControlFile(\"cpuacct.usage\"));\n+\n+ EXPECT_GT(usage_root, 0);\n+ EXPECT_GT(usage_parent, 0);\n+ EXPECT_GT(usage_a, 0);\n+ EXPECT_GT(usage_b, 0);\n+ EXPECT_EQ(usage_parent, usage_a + usage_b);\n+ EXPECT_GE(usage_parent, usage_a);\n+ EXPECT_GE(usage_parent, usage_b);\n+ EXPECT_GE(usage_root, usage_parent);\n+}\n+\n// WriteAndVerifyControlValue attempts to write val to a cgroup file at path,\n// and verify the value by reading it afterwards.\nPosixError WriteAndVerifyControlValue(const Cgroup& c, std::string_view path,\n"
},
{
"change_type": "MODIFY",
"old_path": "test/util/cgroup_util.cc",
"new_path": "test/util/cgroup_util.cc",
"diff": "@@ -92,6 +92,37 @@ PosixErrorOr<absl::flat_hash_set<pid_t>> Cgroup::Tasks() const {\nreturn ParsePIDList(buf);\n}\n+PosixError Cgroup::PollControlFileForChange(absl::string_view name,\n+ absl::Duration timeout) const {\n+ const absl::Duration poll_interval = absl::Milliseconds(10);\n+ const absl::Time deadline = absl::Now() + timeout;\n+ const std::string alias_path = absl::StrFormat(\"[cg#%d]/%s\", id_, name);\n+\n+ ASSIGN_OR_RETURN_ERRNO(const int64_t initial_value,\n+ ReadIntegerControlFile(name));\n+\n+ while (true) {\n+ ASSIGN_OR_RETURN_ERRNO(const int64_t current_value,\n+ ReadIntegerControlFile(name));\n+ if (current_value != initial_value) {\n+ std::cerr << absl::StreamFormat(\n+ \"Control file '%s' changed from '%d' to '%d'\",\n+ alias_path, initial_value, current_value)\n+ << std::endl;\n+ return NoError();\n+ }\n+ if (absl::Now() >= deadline) {\n+ return PosixError(ETIME, absl::StrCat(alias_path, \" didn't change in \",\n+ absl::FormatDuration(timeout)));\n+ }\n+ std::cerr << absl::StreamFormat(\n+ \"Waiting for control file '%s' to change from '%d'...\",\n+ alias_path, initial_value)\n+ << std::endl;\n+ absl::SleepFor(poll_interval);\n+ }\n+}\n+\nPosixError Cgroup::ContainsCallingProcess() const {\nASSIGN_OR_RETURN_ERRNO(const absl::flat_hash_set<pid_t> procs, Procs());\nASSIGN_OR_RETURN_ERRNO(const absl::flat_hash_set<pid_t> tasks, Tasks());\n"
},
{
"change_type": "MODIFY",
"old_path": "test/util/cgroup_util.h",
"new_path": "test/util/cgroup_util.h",
"diff": "@@ -70,6 +70,10 @@ class Cgroup {\nPosixError WriteIntegerControlFile(absl::string_view name,\nint64_t value) const;\n+ // Waits for a control file's value to change.\n+ PosixError PollControlFileForChange(absl::string_view name,\n+ absl::Duration timeout) const;\n+\n// Returns the thread ids of the leaders of thread groups managed by this\n// cgroup.\nPosixErrorOr<absl::flat_hash_set<pid_t>> Procs() const;\n"
}
] | Go | Apache License 2.0 | google/gvisor | cgroupfs: Implement hierarchical accounting for cpuacct controller.
PiperOrigin-RevId: 438193226 |
259,853 | 30.03.2022 10:50:36 | 25,200 | be49295381b41b4d50f597e12999b25b913c2eb0 | make: set --//tools/nogo:fast by default
nogo:fast is converted to nogo:full with the opposite meaning.
All nogo tests have to be always executed with this option.
Analyzing Go Standard Library takes about 10 minutes on buildkite,
but it is required only to run nogo tests. | [
{
"change_type": "MODIFY",
"old_path": "Makefile",
"new_path": "Makefile",
"diff": "@@ -197,7 +197,7 @@ smoke-race-tests: ## Runs a smoke test after build building runsc in race config\n.PHONY: smoke-race-tests\nnogo-tests:\n- @$(call test,--build_tag_filters=nogo --test_tag_filters=nogo //:all pkg/... tools/...)\n+ @$(call test,--build_tag_filters=nogo --test_tag_filters=nogo --//tools/nogo:full //:all pkg/... tools/...)\n.PHONY: nogo-tests\n# For unit tests, we take everything in the root, pkg/... and tools/..., and\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/nogo/BUILD",
"new_path": "tools/nogo/BUILD",
"diff": "@@ -4,8 +4,9 @@ load(\"//tools/nogo:defs.bzl\", \"nogo_stdlib\", \"nogo_target\")\npackage(licenses = [\"notice\"])\nbool_flag(\n- name = \"fast\",\n+ name = \"full\",\nbuild_setting_default = False,\n+ visibility = [\"//visibility:public\"],\n)\nnogo_target(\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/nogo/defs.bzl",
"new_path": "tools/nogo/defs.bzl",
"diff": "@@ -65,7 +65,7 @@ NogoStdlibInfo = provider(\ndef _nogo_stdlib_impl(ctx):\n# If this is disabled, return nothing.\n- if ctx.attr._fast[BuildSettingInfo].value:\n+ if not ctx.attr._nogo_full[BuildSettingInfo].value:\nreturn [NogoStdlibInfo(\nfacts = None,\nraw_findings = [],\n@@ -123,8 +123,8 @@ nogo_stdlib = go_rule(\ndefault = \"//tools/nogo:target\",\ncfg = \"target\",\n),\n- \"_fast\": attr.label(\n- default = \"//tools/nogo:fast\",\n+ \"_nogo_full\": attr.label(\n+ default = \"//tools/nogo:full\",\ncfg = \"host\",\n),\n},\n@@ -419,6 +419,10 @@ nogo_test = rule(\ndefault = \"//tools/nogo:target\",\ncfg = \"target\",\n),\n+ \"_nogo_full\": attr.label(\n+ default = \"//tools/nogo:full\",\n+ cfg = \"exec\",\n+ ),\n},\ntest = True,\n)\n"
}
] | Go | Apache License 2.0 | google/gvisor | make: set --//tools/nogo:fast by default
nogo:fast is converted to nogo:full with the opposite meaning.
All nogo tests have to be always executed with this option.
Analyzing Go Standard Library takes about 10 minutes on buildkite,
but it is required only to run nogo tests.
PiperOrigin-RevId: 438343203 |
259,868 | 30.03.2022 16:45:10 | 25,200 | 8e55783cd36c391a4ea02383a3d5b83cf5099aac | gvisor.dev: Fix chart axis precision to one decimal point to avoid overflow. | [
{
"change_type": "MODIFY",
"old_path": "website/_includes/graph.html",
"new_path": "website/_includes/graph.html",
"diff": "@@ -90,7 +90,7 @@ d3.csv(\"{{ include.url }}\", function(d, i, columns) {\nvar x1_domain = runtimes;\n}\n- // Determine varaible margins.\n+ // Determine variable margins.\nvar x0_domain = data.map(d => d[x0_key]);\nvar margin_bottom_pad = 0;\nif (x0_domain.length > 8) {\n@@ -170,7 +170,7 @@ d3.csv(\"{{ include.url }}\", function(d, i, columns) {\n}\ng.append(\"g\")\n.attr(\"class\", \"axis\")\n- .call(d3.axisLeft(y).ticks(null, \"s\"))\n+ .call(d3.axisLeft(y).ticks(null, \".1s\"))\n.append(\"text\")\n.attr(\"x\", -30.0)\n.attr(\"y\", y(y.ticks().pop()) - 10.0)\n"
}
] | Go | Apache License 2.0 | google/gvisor | gvisor.dev: Fix chart axis precision to one decimal point to avoid overflow.
PiperOrigin-RevId: 438428560 |
259,853 | 30.03.2022 17:48:40 | 25,200 | 5d9d852f375fa6b5524d6806a4f6252ecda107d2 | test/runtimes: move all runner actions under timeout | [
{
"change_type": "MODIFY",
"old_path": "test/runtimes/runner/lib/lib.go",
"new_path": "test/runtimes/runner/lib/lib.go",
"diff": "@@ -56,10 +56,14 @@ func RunTests(lang, image, excludeFile string, batchSize int, timeout time.Durat\nreturn 1\n}\n+ timeoutChan := make(chan struct{})\n+ // Add one minute to let proctor handle timeout.\n+ timer := time.AfterFunc(timeout+time.Minute, func() { close(timeoutChan) })\n+ defer timer.Stop()\n// Get a slice of tests to run. This will also start a single Docker\n// container that will be used to run each test. The final test will\n// stop the Docker container.\n- tests, err := getTests(ctx, d, lang, image, batchSize, timeout, excludes)\n+ tests, err := getTests(ctx, d, lang, image, batchSize, timeoutChan, timeout, excludes)\nif err != nil {\nfmt.Fprintf(os.Stderr, \"%s\\n\", err.Error())\nreturn 1\n@@ -69,7 +73,9 @@ func RunTests(lang, image, excludeFile string, batchSize int, timeout time.Durat\n}\n// getTests executes all tests as table tests.\n-func getTests(ctx context.Context, d *dockerutil.Container, lang, image string, batchSize int, timeout time.Duration, excludes map[string]struct{}) ([]testing.InternalTest, error) {\n+func getTests(ctx context.Context, d *dockerutil.Container, lang, image string, batchSize int, timeoutChan chan struct{}, timeout time.Duration, excludes map[string]struct{}) ([]testing.InternalTest, error) {\n+ startTime := time.Now()\n+\n// Start the container.\nopts := dockerutil.RunOpts{\nImage: fmt.Sprintf(\"runtimes/%s\", image),\n@@ -79,11 +85,23 @@ func getTests(ctx context.Context, d *dockerutil.Container, lang, image string,\nreturn nil, fmt.Errorf(\"docker run failed: %v\", err)\n}\n+ done := make(chan struct{})\n+ go func() {\n+ select {\n+ case <-done:\n+ return\n+ // Make sure that the useful load takes 2/3 of timeout.\n+ case <-time.After((timeout - time.Since(startTime)) / 3):\n+ case <-timeoutChan:\n+ }\n+ panic(\"TIMEOUT: Unable to get a list of tests\")\n+ }()\n// Get a list of all tests in the image.\nlist, err := d.Exec(ctx, dockerutil.ExecOpts{}, \"/proctor/proctor\", \"--runtime\", lang, \"--list\")\nif err != nil {\nreturn nil, fmt.Errorf(\"docker exec failed: %v\", err)\n}\n+ close(done)\n// Calculate a subset of tests.\ntests := strings.Fields(list)\n@@ -131,7 +149,12 @@ func getTests(ctx context.Context, d *dockerutil.Container, lang, image string,\n}\ngo func() {\n- output, err = d.Exec(ctx, dockerutil.ExecOpts{}, \"/proctor/proctor\", \"--runtime\", lang, \"--tests\", strings.Join(tcs, \",\"), fmt.Sprintf(\"--timeout=%s\", timeout))\n+ output, err = d.Exec(\n+ ctx, dockerutil.ExecOpts{},\n+ \"/proctor/proctor\", \"--runtime\", lang,\n+ \"--tests\", strings.Join(tcs, \",\"),\n+ fmt.Sprintf(\"--timeout=%s\", timeout-time.Since(startTime)),\n+ )\nclose(done)\n}()\n@@ -143,7 +166,7 @@ func getTests(ctx context.Context, d *dockerutil.Container, lang, image string,\n}\nt.Fatalf(\"FAIL: (%v):\\nBatch:\\n%s\\nOutput:\\n%s\\n\", time.Since(now), strings.Join(tcs, \"\\n\"), output)\n// Add one minute to let proctor handle timeout.\n- case <-time.After(timeout + time.Minute):\n+ case <-timeoutChan:\nt.Fatalf(\"TIMEOUT: (%v):\\nBatch:\\n%s\\nOutput:\\n%s\\n\", time.Since(now), strings.Join(tcs, \"\\n\"), output)\n}\n},\n"
}
] | Go | Apache License 2.0 | google/gvisor | test/runtimes: move all runner actions under timeout
PiperOrigin-RevId: 438441356 |
259,909 | 31.03.2022 09:14:46 | 25,200 | d9cffce328f9e2ada874cfc4df21890086bceb48 | Parse cgroupv2 path only once while scanning mountinfo.
Some systems have multiple cgroup2 mounts. In these cases, loadPathsHelper
incorrectly attempts to overwrite `paths[cgroup2Key]` with an already
processed value, which causes an error in filepath.Rel.
Fixes | [
{
"change_type": "MODIFY",
"old_path": "runsc/cgroup/cgroup.go",
"new_path": "runsc/cgroup/cgroup.go",
"diff": "@@ -40,6 +40,8 @@ import (\nconst (\ncgroupRoot = \"/sys/fs/cgroup\"\n+ cgroupv1FsName = \"cgroup\"\n+ cgroupv2FsName = \"cgroup2\"\n)\nvar controllers = map[string]controller{\n@@ -255,6 +257,7 @@ func loadPathsHelper(cgroup, mountinfo io.Reader, unified bool) (map[string]stri\n// which don't exist in container, so recover the container paths here by\n// double-checking with /proc/[pid]/mountinfo\nmountScanner := bufio.NewScanner(mountinfo)\n+ haveCg2Path := false\nfor mountScanner.Scan() {\n// Format: ID parent major:minor root mount-point options opt-fields - fs-type source super-options\n// Example: 39 32 0:34 / /sys/fs/cgroup/devices rw,noexec shared:18 - cgroup cgroup rw,devices\n@@ -264,7 +267,7 @@ func loadPathsHelper(cgroup, mountinfo io.Reader, unified bool) (map[string]stri\ncontinue\n}\nswitch fields[len(fields)-3] {\n- case \"cgroup\":\n+ case cgroupv1FsName:\n// Cgroup controller type is in the super-options field.\nsuperOptions := strings.Split(fields[len(fields)-1], \",\")\nfor _, opt := range superOptions {\n@@ -286,13 +289,14 @@ func loadPathsHelper(cgroup, mountinfo io.Reader, unified bool) (map[string]stri\n}\n}\n}\n- case \"cgroup2\":\n- if cgroupPath, ok := paths[cgroup2Key]; ok {\n+ case cgroupv2FsName:\n+ if cgroupPath, ok := paths[cgroup2Key]; !haveCg2Path && ok {\nroot := fields[3]\nrelCgroupPath, err := filepath.Rel(root, cgroupPath)\nif err != nil {\nreturn nil, err\n}\n+ haveCg2Path = true\npaths[cgroup2Key] = relCgroupPath\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cgroup/cgroup_v2_test.go",
"new_path": "runsc/cgroup/cgroup_v2_test.go",
"diff": "@@ -26,7 +26,12 @@ import (\n\"gvisor.dev/gvisor/pkg/test/testutil\"\n)\n-var cgroupv2MountInfo = `29 22 0:26 / /sys/fs/cgroup rw shared:4 - cgroup2 cgroup2 rw,seclabel,nsdelegate`\n+var (\n+ cgroupv2MountInfo = `29 22 0:26 / /sys/fs/cgroup rw shared:4 - cgroup2 cgroup2 rw,seclabel,nsdelegate`\n+ multipleCg2MountInfo = `34 28 0:29 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime shared:8 - cgroup2 cgroup2 rw\n+1479 28 0:29 / /run/some/module/cgroupv2 rw,relatime shared:650 - cgroup2 none rw\n+`\n+)\nfunc TestIO(t *testing.T) {\nfor _, tc := range []struct {\n@@ -149,6 +154,14 @@ func TestLoadPathsCgroupv2(t *testing.T) {\n\"cgroup2\": \".\",\n},\n},\n+ {\n+ name: \"multiple-cgv2\",\n+ cgroups: \"0::/system.slice/containerd.service\\n\",\n+ mountinfo: multipleCg2MountInfo,\n+ want: map[string]string{\n+ \"cgroup2\": \"system.slice/containerd.service\",\n+ },\n+ },\n} {\nt.Run(tc.name, func(t *testing.T) {\nr := strings.NewReader(tc.cgroups)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Parse cgroupv2 path only once while scanning mountinfo.
Some systems have multiple cgroup2 mounts. In these cases, loadPathsHelper
incorrectly attempts to overwrite `paths[cgroup2Key]` with an already
processed value, which causes an error in filepath.Rel.
Fixes #7334
PiperOrigin-RevId: 438583982 |
259,853 | 31.03.2022 14:20:46 | 25,200 | 3b3376ce7d7bc926542b304f23b261c84061fc0f | platform/kvm: don't call mmap for less than one page
mmap can't allocate less than one page.
In addition, we can check whether a new region is merged with a previous one
and avoid a hole overhead. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/physical_map.go",
"new_path": "pkg/sentry/platform/kvm/physical_map.go",
"diff": "@@ -99,6 +99,11 @@ func fillAddressSpace() (excludedRegions []region) {\nunix.MAP_ANONYMOUS|unix.MAP_PRIVATE|unix.MAP_NORESERVE,\n0, 0)\nif errno != 0 {\n+ // One page is the smallest mapping that can be allocated.\n+ if current == hostarch.PageSize {\n+ current = 0\n+ break\n+ }\n// Attempt half the size; overflow not possible.\ncurrentAddr, _ := hostarch.Addr(current >> 1).RoundUp()\ncurrent = uintptr(currentAddr)\n@@ -106,6 +111,20 @@ func fillAddressSpace() (excludedRegions []region) {\n}\n// We filled a block.\nfilled += current\n+ // Check whether a new region is merged with a previous one.\n+ for i := range excludedRegions {\n+ if excludedRegions[i].virtual == addr+current {\n+ excludedRegions[i].virtual = addr\n+ addr = 0\n+ break\n+ }\n+ if excludedRegions[i].virtual+excludedRegions[i].length == addr {\n+ excludedRegions[i].length += current\n+ addr = 0\n+ break\n+ }\n+ }\n+ if addr != 0 {\nexcludedRegions = append(excludedRegions, region{\nvirtual: addr,\nlength: current,\n@@ -115,6 +134,7 @@ func fillAddressSpace() (excludedRegions []region) {\nrequired += faultBlockSize\n}\n}\n+ }\nif current == 0 {\npanic(\"filling address space failed\")\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | platform/kvm: don't call mmap for less than one page
mmap can't allocate less than one page.
In addition, we can check whether a new region is merged with a previous one
and avoid a hole overhead.
PiperOrigin-RevId: 438660167 |
259,975 | 31.03.2022 14:35:31 | 25,200 | c86b049468e0884e4506bb291afb563c8ddafce4 | [benchmarks] Fix benchmarks readme after feedback. | [
{
"change_type": "MODIFY",
"old_path": "test/benchmarks/README.md",
"new_path": "test/benchmarks/README.md",
"diff": "@@ -21,12 +21,11 @@ To run, use the Makefile:\n- The above command will place several configurations of runsc in your\n/etc/docker/daemon.json file. Choose one without the debug option set.\n- Run your benchmark: `make run-benchmark\n- RUNTIME=[RUNTIME_FROM_DAEMON.JSON/runc]\n- BENCHMARKS_TARGETS=//path/to/target\"`\n+ RUNTIME=[RUNTIME_FROM_DAEMON.JSON/runc] BENCHMARKS_TARGETS=path/to/target`\n- Additionally, you can benchmark several platforms in one command:\n```\n-make benchmark-platforms BENCHMARKS_TARGET=//path/to/target\"\n+make benchmark-platforms BENCHMARKS_TARGET=path/to/target\n```\nThe above command will install runtimes/run benchmarks on ptrace and kvm as well\n@@ -122,7 +121,7 @@ To profile, simply run the `benchmark-platforms` command from above and profiles\nwill be in /tmp/profile.\nOr run with: `make run-benchmark RUNTIME=[RUNTIME_UNDER_TEST]\n-BENCHMARKS_TARGETS=//path/to/target`\n+BENCHMARKS_TARGETS=path/to/target`\nProfiles will be in /tmp/profile. Note: runtimes must have the `--profile` flag\nset in /etc/docker/daemon.conf and profiling will not work on runc.\n"
}
] | Go | Apache License 2.0 | google/gvisor | [benchmarks] Fix benchmarks readme after feedback.
PiperOrigin-RevId: 438663626 |
259,853 | 31.03.2022 14:57:15 | 25,200 | 973f8636d78e947da65881bcbfd627bb0035f29d | platform/kvm: limit virtual address space size to 256TB
This is maximum that can be addressed with 4-level paging. | [
{
"change_type": "MODIFY",
"old_path": "pkg/ring0/lib_amd64.go",
"new_path": "pkg/ring0/lib_amd64.go",
"diff": "@@ -84,6 +84,12 @@ var (\nfunc Init(fs cpuid.FeatureSet) {\n// Initialize all sizes.\nVirtualAddressBits = uintptr(fs.VirtualAddressBits())\n+ // TODO(gvisor.dev/issue/7349): introduce support for 5-level paging.\n+ // Four-level page tables allows to address up to 48-bit virtual\n+ // addresses.\n+ if VirtualAddressBits > 48 {\n+ VirtualAddressBits = 48\n+ }\nPhysicalAddressBits = uintptr(fs.PhysicalAddressBits())\nUserspaceSize = uintptr(1) << (VirtualAddressBits - 1)\nMaximumUserAddress = (UserspaceSize - 1) & ^uintptr(hostarch.PageSize-1)\n"
}
] | Go | Apache License 2.0 | google/gvisor | platform/kvm: limit virtual address space size to 256TB
This is maximum that can be addressed with 4-level paging.
PiperOrigin-RevId: 438668768 |
259,985 | 31.03.2022 15:30:47 | 25,200 | 8a8349e1bca4d116806f0aac3587cf0771d0c5cd | cgroupfs: Return the correct cgroup path for tasks from procfs.
Previously we were returning the path to the control file used to
migrate a task, rather than the path to the cgroup directory. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/cgroupfs/base.go",
"new_path": "pkg/sentry/fsimpl/cgroupfs/base.go",
"diff": "@@ -283,9 +283,15 @@ func (c *cgroupInode) AbortMigrate(t *kernel.Task, src *kernel.Cgroup) {\n}\n}\n-func (c *cgroupInode) Cgroup(fd *vfs.FileDescription) kernel.Cgroup {\n+func (c *cgroupInode) CgroupFromControlFileFD(fd *vfs.FileDescription) kernel.Cgroup {\n+ controlFileDentry := fd.Dentry().Impl().(*kernfs.Dentry)\n+ // The returned parent dentry remains valid without holding locks because in\n+ // cgroupfs, the parent directory relationship of a control file is\n+ // effectively immutable. Control files cannot be unlinked, renamed or\n+ // destroyed independently from their parent directory.\n+ parentD := controlFileDentry.Parent()\nreturn kernel.Cgroup{\n- Dentry: fd.Dentry().Impl().(*kernfs.Dentry),\n+ Dentry: parentD,\nCgroupImpl: c,\n}\n}\n@@ -340,7 +346,7 @@ func (d *cgroupProcsData) Write(ctx context.Context, fd *vfs.FileDescription, sr\nif targetTG == nil {\nreturn 0, linuxerr.EINVAL\n}\n- return n, targetTG.MigrateCgroup(d.Cgroup(fd))\n+ return n, targetTG.MigrateCgroup(d.CgroupFromControlFileFD(fd))\n}\n// +stateify savable\n@@ -382,7 +388,7 @@ func (d *tasksData) Write(ctx context.Context, fd *vfs.FileDescription, src user\nif targetTask == nil {\nreturn 0, linuxerr.EINVAL\n}\n- return n, targetTask.MigrateCgroup(d.Cgroup(fd))\n+ return n, targetTask.MigrateCgroup(d.CgroupFromControlFileFD(fd))\n}\n// parseInt64FromString interprets src as string encoding a int64 value, and\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/kernfs/kernfs.go",
"new_path": "pkg/sentry/fsimpl/kernfs/kernfs.go",
"diff": "@@ -600,6 +600,13 @@ func (d *Dentry) WalkDentryTree(ctx context.Context, vfsObj *vfs.VirtualFilesyst\nreturn target, nil\n}\n+// Parent returns the parent of this Dentry. This is not safe in general, the\n+// filesystem may concurrently move d elsewhere. The caller is responsible for\n+// ensuring the returned result remains valid while it is used.\n+func (d *Dentry) Parent() *Dentry {\n+ return d.parent\n+}\n+\n// The Inode interface maps filesystem-level operations that operate on paths to\n// equivalent operations on specific filesystem nodes.\n//\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/cgroup.cc",
"new_path": "test/syscalls/linux/cgroup.cc",
"diff": "@@ -258,7 +258,7 @@ TEST(Cgroup, MountRace) {\n}\n});\n- Cgroup c = Cgroup(mountpoint.path());\n+ Cgroup c = Cgroup::RootCgroup(mountpoint.path());\n// c should be a valid cgroup.\nEXPECT_NO_ERRNO(c.ContainsCallingProcess());\n}\n@@ -960,6 +960,67 @@ TEST(ProcCgroup, ProcfsReportsCgroupfsMountOptions) {\n}\n}\n+TEST(ProcCgroups, ProcfsRreportsHierarchyID) {\n+ SKIP_IF(!CgroupsAvailable());\n+\n+ Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));\n+ Cgroup h1 = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs(\"memory,cpuacct\"));\n+ Cgroup h2 = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs(\"cpu\"));\n+\n+ absl::flat_hash_map<std::string, CgroupsEntry> entries =\n+ ASSERT_NO_ERRNO_AND_VALUE(ProcCgroupsEntries());\n+\n+ EXPECT_EQ(entries[\"memory\"].hierarchy, entries[\"cpuacct\"].hierarchy);\n+\n+ // Hierarhcy IDs are allocated sequentially, starting at 1.\n+ EXPECT_EQ(entries[\"memory\"].hierarchy, 1);\n+ EXPECT_EQ(entries[\"cpu\"].hierarchy, 2);\n+}\n+\n+TEST(ProcCgroups, ProcfsReportsTasksCgroup) {\n+ SKIP_IF(!CgroupsAvailable());\n+\n+ Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));\n+ Cgroup h1 = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs(\"memory\"));\n+ Cgroup h2 = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs(\"cpu,cpuacct\"));\n+\n+ Cgroup h1c1 = ASSERT_NO_ERRNO_AND_VALUE(h1.CreateChild(\"memory_child1\"));\n+ Cgroup h1c2 = ASSERT_NO_ERRNO_AND_VALUE(h1.CreateChild(\"memory_child2\"));\n+\n+ Cgroup h2c1 = ASSERT_NO_ERRNO_AND_VALUE(h2.CreateChild(\"cpu_child1\"));\n+ Cgroup h2c2 = ASSERT_NO_ERRNO_AND_VALUE(h2.CreateChild(\"cpu_child2\"));\n+\n+ // Test should initially be in the hierarchy roots.\n+ auto entries = ASSERT_NO_ERRNO_AND_VALUE(ProcPIDCgroupEntries(getpid()));\n+ EXPECT_EQ(h1.CanonicalPath(), entries[\"memory\"].path);\n+ EXPECT_EQ(h2.CanonicalPath(), entries[\"cpu,cpuacct\"].path);\n+\n+ // Move to child for hierarchy #1 and check paths. Note that we haven't moved\n+ // in hierarchy #2.\n+ ASSERT_NO_ERRNO(h1c1.Enter(getpid()));\n+ entries = ASSERT_NO_ERRNO_AND_VALUE(ProcPIDCgroupEntries(getpid()));\n+ EXPECT_EQ(h1c1.CanonicalPath(), entries[\"memory\"].path);\n+ EXPECT_EQ(h2.CanonicalPath(), entries[\"cpu,cpuacct\"].path);\n+\n+ // Move h2; h1 should remain unchanged.\n+ ASSERT_NO_ERRNO(h2c1.Enter(getpid()));\n+ entries = ASSERT_NO_ERRNO_AND_VALUE(ProcPIDCgroupEntries(getpid()));\n+ EXPECT_EQ(h1c1.CanonicalPath(), entries[\"memory\"].path);\n+ EXPECT_EQ(h2c1.CanonicalPath(), entries[\"cpu,cpuacct\"].path);\n+\n+ // Move the thread rather than process group.\n+ const pid_t tid = syscall(SYS_gettid);\n+ ASSERT_NO_ERRNO(h1c2.EnterThread(tid));\n+ entries = ASSERT_NO_ERRNO_AND_VALUE(ProcPIDCgroupEntries(tid));\n+ EXPECT_EQ(h1c2.CanonicalPath(), entries[\"memory\"].path);\n+ EXPECT_EQ(h2c1.CanonicalPath(), entries[\"cpu,cpuacct\"].path);\n+\n+ ASSERT_NO_ERRNO(h2c2.EnterThread(tid));\n+ entries = ASSERT_NO_ERRNO_AND_VALUE(ProcPIDCgroupEntries(tid));\n+ EXPECT_EQ(h1c2.CanonicalPath(), entries[\"memory\"].path);\n+ EXPECT_EQ(h2c2.CanonicalPath(), entries[\"cpu,cpuacct\"].path);\n+}\n+\n} // namespace\n} // namespace testing\n} // namespace gvisor\n"
},
{
"change_type": "MODIFY",
"old_path": "test/util/cgroup_util.cc",
"new_path": "test/util/cgroup_util.cc",
"diff": "namespace gvisor {\nnamespace testing {\n-Cgroup::Cgroup(absl::string_view path) : cgroup_path_(path) {\n+Cgroup::Cgroup(absl::string_view path, absl::string_view mountpoint)\n+ : cgroup_path_(path), mountpoint_(mountpoint) {\nid_ = ++Cgroup::next_id_;\nstd::cerr << absl::StreamFormat(\"[cg#%d] <= %s\", id_, cgroup_path_)\n<< std::endl;\n}\n-PosixErrorOr<Cgroup> Cgroup::RecursivelyCreate(absl::string_view path) {\n- RETURN_IF_ERRNO(RecursivelyCreateDir(path));\n- return Cgroup(path);\n-}\n-\n-PosixErrorOr<Cgroup> Cgroup::Create(absl::string_view path) {\n- RETURN_IF_ERRNO(Mkdir(path));\n- return Cgroup(path);\n-}\n-\nPosixError Cgroup::Delete() { return Rmdir(cgroup_path_); }\nPosixErrorOr<Cgroup> Cgroup::CreateChild(absl::string_view name) const {\n- return Cgroup::Create(JoinPath(Path(), name));\n+ std::string path = JoinPath(Path(), name);\n+ RETURN_IF_ERRNO(Mkdir(path));\n+ return Cgroup(path, mountpoint_);\n}\nPosixErrorOr<std::string> Cgroup::ReadControlFile(\n@@ -183,7 +176,7 @@ PosixErrorOr<Cgroup> Mounter::MountCgroupfs(std::string mopts) {\n\"Mount(\\\"none\\\", \\\"%s\\\", \\\"cgroup\\\", 0, \\\"%s\\\", 0) => OK\",\nmountpath, mopts)\n<< std::endl;\n- Cgroup cg = Cgroup(mountpath);\n+ Cgroup cg = Cgroup::RootCgroup(mountpath);\nmountpoints_[cg.id()] = std::move(mountpoint);\nmounts_[cg.id()] = std::move(mount);\nreturn cg;\n"
},
{
"change_type": "MODIFY",
"old_path": "test/util/cgroup_util.h",
"new_path": "test/util/cgroup_util.h",
"diff": "@@ -30,24 +30,28 @@ namespace testing {\n// Cgroup represents a cgroup directory on a mounted cgroupfs.\nclass Cgroup {\npublic:\n- Cgroup(std::string_view path);\n+ static Cgroup RootCgroup(absl::string_view path) {\n+ return Cgroup(path, path);\n+ }\nuint64_t id() const { return id_; }\n- // RecursivelyCreate creates cgroup specified by path, including all\n- // components leading up to path. Path should end inside a cgroupfs mount. If\n- // path already exists, RecursivelyCreate does nothing and silently succeeds.\n- static PosixErrorOr<Cgroup> RecursivelyCreate(std::string_view path);\n-\n- // Creates a new cgroup at path. The parent directory must exist and be a\n- // cgroupfs directory.\n- static PosixErrorOr<Cgroup> Create(std::string_view path);\n-\n// Deletes the current cgroup represented by this object.\nPosixError Delete();\nconst std::string& Path() const { return cgroup_path_; }\n+ // Returns the canonical path for this cgroup, which is the absolute path\n+ // starting at the hierarchy root.\n+ const std::string CanonicalPath() const {\n+ std::string relpath =\n+ GetRelativePath(mountpoint_, cgroup_path_).ValueOrDie();\n+ if (relpath == \".\") {\n+ return \"/\";\n+ }\n+ return \"/\" + relpath;\n+ }\n+\n// Creates a child cgroup under this cgroup with the given name.\nPosixErrorOr<Cgroup> CreateChild(std::string_view name) const;\n@@ -95,12 +99,15 @@ class Cgroup {\nPosixError EnterThread(pid_t pid) const;\nprivate:\n+ Cgroup(std::string_view path, std::string_view mountpoint);\n+\nPosixErrorOr<absl::flat_hash_set<pid_t>> ParsePIDList(\nabsl::string_view data) const;\nstatic int64_t next_id_;\nint64_t id_;\nconst std::string cgroup_path_;\n+ const std::string mountpoint_;\n};\n// Mounter is a utility for creating cgroupfs mounts. It automatically manages\n"
}
] | Go | Apache License 2.0 | google/gvisor | cgroupfs: Return the correct cgroup path for tasks from procfs.
Previously we were returning the path to the control file used to
migrate a task, rather than the path to the cgroup directory.
PiperOrigin-RevId: 438676422 |
259,985 | 31.03.2022 16:27:37 | 25,200 | 78d2200a17231f62eef733cae44c0883605eb795 | cgroupfs: Directories should support setstat. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/cgroupfs/cgroupfs.go",
"new_path": "pkg/sentry/fsimpl/cgroupfs/cgroupfs.go",
"diff": "@@ -429,9 +429,9 @@ func (*dir) Keep() bool {\nreturn true\n}\n-// SetStat implements kernfs.Inode.SetStat not allowing inode attributes to be changed.\n-func (*dir) SetStat(context.Context, *vfs.Filesystem, *auth.Credentials, vfs.SetStatOptions) error {\n- return linuxerr.EPERM\n+// SetStat implements kernfs.Inode.SetStat.\n+func (d *dir) SetStat(ctx context.Context, fs *vfs.Filesystem, creds *auth.Credentials, opts vfs.SetStatOptions) error {\n+ return d.InodeAttrs.SetStat(ctx, fs, creds, opts)\n}\n// Open implements kernfs.Inode.Open.\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/cgroup.cc",
"new_path": "test/syscalls/linux/cgroup.cc",
"diff": "@@ -437,6 +437,37 @@ TEST(Cgroup, DuplicateUnlinkOnDirFD) {\nEXPECT_THAT(UnlinkAt(dirfd, \".\", AT_REMOVEDIR), PosixErrorIs(EINVAL));\n}\n+TEST(Cgroup, DirSetStat) {\n+ SKIP_IF(!CgroupsAvailable());\n+ Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));\n+ Cgroup c = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs(\"\"));\n+\n+ const struct stat before = ASSERT_NO_ERRNO_AND_VALUE(Stat(c.Path()));\n+ EXPECT_TRUE(S_ISDIR(before.st_mode));\n+\n+ // Cgroup directories default to 0555.\n+ EXPECT_THAT(before.st_mode, PermissionIs(0555));\n+\n+ // Change permissions and verify they're accepted.\n+ ASSERT_NO_ERRNO(Chmod(c.Path(), 0755));\n+ const struct stat after = ASSERT_NO_ERRNO_AND_VALUE(Stat(c.Path()));\n+ EXPECT_THAT(after.st_mode, PermissionIs(0755));\n+\n+ // Try the same with non-root directory.\n+ Cgroup child = ASSERT_NO_ERRNO_AND_VALUE(c.CreateChild(\"child\"));\n+ const struct stat child_before =\n+ ASSERT_NO_ERRNO_AND_VALUE(Stat(child.Path()));\n+ EXPECT_THAT(child_before.st_mode, PermissionIs(0555)); // Default.\n+\n+ ASSERT_NO_ERRNO(Chmod(child.Path(), 0757));\n+ const struct stat child_after = ASSERT_NO_ERRNO_AND_VALUE(Stat(child.Path()));\n+ EXPECT_THAT(child_after.st_mode, PermissionIs(0757));\n+\n+ // Child chmod didn't affect parent.\n+ const struct stat parent_after = ASSERT_NO_ERRNO_AND_VALUE(Stat(c.Path()));\n+ EXPECT_THAT(parent_after.st_mode, PermissionIs(0755));\n+}\n+\nTEST(MemoryCgroup, MemoryUsageInBytes) {\nSKIP_IF(!CgroupsAvailable());\n"
},
{
"change_type": "MODIFY",
"old_path": "test/util/fs_util.cc",
"new_path": "test/util/fs_util.cc",
"diff": "@@ -732,5 +732,10 @@ PosixError CheckSameFile(const FileDescriptor& fd1, const FileDescriptor& fd2) {\nreturn NoError();\n}\n+\n+::testing::Matcher<mode_t> PermissionIs(mode_t want) {\n+ return MakeMatcher(new ModePermissionMatcher(want));\n+}\n+\n} // namespace testing\n} // namespace gvisor\n"
},
{
"change_type": "MODIFY",
"old_path": "test/util/fs_util.h",
"new_path": "test/util/fs_util.h",
"diff": "@@ -233,6 +233,38 @@ inline std::string JoinPath(absl::string_view path1, absl::string_view path2,\nabsl::string_view path3, const T&... args) {\nreturn internal::JoinPathImpl({path1, path2, path3, args...});\n}\n+\n+// A matcher which checks whether the file permissions bits for a mode value\n+// matches an expected value.\n+class ModePermissionMatcher : public ::testing::MatcherInterface<mode_t> {\n+ public:\n+ explicit ModePermissionMatcher(mode_t want) : want_(want) {}\n+\n+ bool MatchAndExplain(\n+ mode_t got,\n+ ::testing::MatchResultListener* const listener) const override {\n+ const mode_t masked = got & (S_IRWXU | S_IRWXG | S_IRWXO);\n+ if (masked == want_) {\n+ return true;\n+ }\n+ *listener << \"Permission 0\" << std::oct << masked;\n+ return false;\n+ }\n+\n+ void DescribeTo(std::ostream* const os) const override {\n+ *os << \"File permission is 0\" << std::oct << want_;\n+ }\n+\n+ void DescribeNegationTo(std::ostream* const os) const override {\n+ *os << \"File permission is not 0\" << std::oct << want_;\n+ }\n+\n+ private:\n+ mode_t want_;\n+};\n+\n+::testing::Matcher<mode_t> PermissionIs(mode_t want);\n+\n} // namespace testing\n} // namespace gvisor\n#endif // GVISOR_TEST_UTIL_FS_UTIL_H_\n"
}
] | Go | Apache License 2.0 | google/gvisor | cgroupfs: Directories should support setstat.
PiperOrigin-RevId: 438688088 |
259,909 | 31.03.2022 16:45:36 | 25,200 | 28549d8578ee1aad937592eee523016d35f4e733 | Transition to closed state before releasing a tcp endpoint. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/endpoint.go",
"new_path": "pkg/tcpip/transport/tcp/endpoint.go",
"diff": "@@ -1003,6 +1003,8 @@ func (e *endpoint) notifyProtocolGoroutine(n uint32) {\nfunc (e *endpoint) Release() {\ne.LockUser()\ndefer e.UnlockUser()\n+ e.transitionToStateCloseLocked()\n+ e.notifyProtocolGoroutine(notifyTickleWorker)\ne.releaseLocked()\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/test/e2e/tcp_test.go",
"new_path": "pkg/tcpip/transport/tcp/test/e2e/tcp_test.go",
"diff": "@@ -8661,6 +8661,49 @@ func TestReleaseAfterClose(t *testing.T) {\nc.EP.Release()\n}\n+func TestReleaseDanglingEndpoints(t *testing.T) {\n+ c := context.New(t, e2e.DefaultMTU)\n+ defer c.Cleanup()\n+\n+ c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)\n+ ep := c.EP\n+ c.EP = nil\n+\n+ // Close the endpoint, make sure we get a FIN segment. The endpoint should be\n+ // dangling.\n+ ep.Close()\n+ iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)\n+ checker.IPv4(t, c.GetPacket(),\n+ checker.TCP(\n+ checker.DstPort(context.TestPort),\n+ checker.TCPSeqNum(uint32(c.IRS)+1),\n+ checker.TCPAckNum(uint32(iss)),\n+ checker.TCPFlags(header.TCPFlagAck|header.TCPFlagFin),\n+ ),\n+ )\n+ tcpip.ReleaseDanglingEndpoints()\n+\n+ // Now send an ACK and it should trigger a RST as Release should Close the\n+ // endpoint.\n+ c.SendPacket(nil, &context.Headers{\n+ SrcPort: context.TestPort,\n+ DstPort: c.Port,\n+ Flags: header.TCPFlagAck,\n+ SeqNum: iss,\n+ AckNum: c.IRS.Add(2),\n+ RcvWnd: 30000,\n+ })\n+\n+ checker.IPv4(t, c.GetPacket(),\n+ checker.TCP(\n+ checker.DstPort(context.TestPort),\n+ checker.TCPSeqNum(uint32(c.IRS)+2),\n+ checker.TCPAckNum(0),\n+ checker.TCPFlags(header.TCPFlagRst),\n+ ),\n+ )\n+}\n+\nfunc TestMain(m *testing.M) {\nrefs.SetLeakMode(refs.LeaksPanic)\ncode := m.Run()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Transition to closed state before releasing a tcp endpoint.
PiperOrigin-RevId: 438691888 |
259,982 | 01.04.2022 14:59:38 | 25,200 | 5b538f75e447d934ac81a1bfc167306ef8f55814 | Created tests for mounting tmpfs with size option enabled.
Tests include:
Allocating memory below and above range for single file.
Allocating memory above range for multiple file. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/mount.cc",
"new_path": "test/syscalls/linux/mount.cc",
"diff": "#include <errno.h>\n#include <fcntl.h>\n#include <stdio.h>\n+#include <sys/eventfd.h>\n#include <sys/mount.h>\n+#include <sys/resource.h>\n+#include <sys/signalfd.h>\n#include <sys/stat.h>\n#include <unistd.h>\n#include <functional>\n+#include <iostream>\n#include <memory>\n+#include <ostream>\n#include <string>\n+#include <tuple>\n+#include <utility>\n#include <vector>\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n+#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_split.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/time/time.h\"\n@@ -433,6 +441,89 @@ TEST(MountTest, MountInfo) {\n}\n}\n+// TODO(b/29637826): Enable this test on gVisor once tmpfs supports size option.\n+TEST(MountTest, TmpfsSizeRoundUpSinglePageSize) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)) ||\n+ IsRunningOnGvisor());\n+ auto const dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ auto tmpfs_size_opt = absl::StrCat(\"size=\", kPageSize / 2);\n+ auto const mount = ASSERT_NO_ERRNO_AND_VALUE(\n+ Mount(\"\", dir.path(), \"tmpfs\", 0, tmpfs_size_opt, 0));\n+ auto fd = ASSERT_NO_ERRNO_AND_VALUE(\n+ Open(JoinPath(dir.path(), \"foo\"), O_CREAT | O_RDWR, 0777));\n+\n+ // Check that it starts at size zero.\n+ struct stat buf;\n+ ASSERT_THAT(fstat(fd.get(), &buf), SyscallSucceeds());\n+ EXPECT_EQ(buf.st_size, 0);\n+\n+ // Grow to 1 Page Size.\n+ ASSERT_THAT(fallocate(fd.get(), 0, 0, kPageSize), SyscallSucceeds());\n+ ASSERT_THAT(fstat(fd.get(), &buf), SyscallSucceeds());\n+ EXPECT_EQ(buf.st_size, kPageSize);\n+\n+ // Grow to size beyond tmpfs allocated bytes.\n+ ASSERT_THAT(fallocate(fd.get(), 0, 0, kPageSize + 1),\n+ SyscallFailsWithErrno(ENOSPC));\n+ ASSERT_THAT(fstat(fd.get(), &buf), SyscallSucceeds());\n+ EXPECT_EQ(buf.st_size, kPageSize);\n+}\n+\n+TEST(MountTest, TmpfsSizeRoundUpMultiplePages) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)) ||\n+ IsRunningOnGvisor());\n+ auto const dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ auto page_multiple = 2;\n+ auto size = kPageSize * page_multiple;\n+ auto tmpfs_size_opt = absl::StrCat(\"size=\", size);\n+ auto const mount = ASSERT_NO_ERRNO_AND_VALUE(\n+ Mount(\"\", dir.path(), \"tmpfs\", 0, tmpfs_size_opt, 0));\n+ auto fd = ASSERT_NO_ERRNO_AND_VALUE(\n+ Open(JoinPath(dir.path(), \"foo\"), O_CREAT | O_RDWR, 0777));\n+\n+ // Check that it starts at size zero.\n+ struct stat buf;\n+ ASSERT_THAT(fstat(fd.get(), &buf), SyscallSucceeds());\n+ EXPECT_EQ(buf.st_size, 0);\n+\n+ // Grow to multiple of page size.\n+ ASSERT_THAT(fallocate(fd.get(), 0, 0, size), SyscallSucceeds());\n+ ASSERT_THAT(fstat(fd.get(), &buf), SyscallSucceeds());\n+ EXPECT_EQ(buf.st_size, size);\n+\n+ // Grow to beyond tmpfs size bytes.\n+ ASSERT_THAT(fallocate(fd.get(), 0, 0, size + 1),\n+ SyscallFailsWithErrno(ENOSPC));\n+ ASSERT_THAT(fstat(fd.get(), &buf), SyscallSucceeds());\n+ EXPECT_EQ(buf.st_size, size);\n+}\n+\n+TEST(MountTest, TmpfsSizeMoreThanSinglePgSZMultipleFiles) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)) ||\n+ IsRunningOnGvisor());\n+ auto const dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ auto const page_multiple = 10;\n+ auto const size = kPageSize * page_multiple;\n+ auto tmpfs_size_opt = absl::StrCat(\"size=\", size);\n+ auto const mount = ASSERT_NO_ERRNO_AND_VALUE(\n+ Mount(\"\", dir.path(), \"tmpfs\", 0, tmpfs_size_opt, 0));\n+ for (int i = 0; i < page_multiple; i++) {\n+ auto fd = ASSERT_NO_ERRNO_AND_VALUE(Open(\n+ JoinPath(dir.path(), absl::StrCat(\"foo_\", i)), O_CREAT | O_RDWR, 0777));\n+ // Create buffer & Grow to 100 bytes.\n+ struct stat buf;\n+ ASSERT_THAT(fstat(fd.get(), &buf), SyscallSucceeds());\n+ ASSERT_THAT(fallocate(fd.get(), 0, 0, 100), SyscallSucceeds());\n+ ASSERT_THAT(fstat(fd.get(), &buf), SyscallSucceeds());\n+ EXPECT_EQ(buf.st_size, 100);\n+ }\n+ auto fd = ASSERT_NO_ERRNO_AND_VALUE(\n+ Open(JoinPath(dir.path(), absl::StrCat(\"foo_\", page_multiple + 1)),\n+ O_CREAT | O_RDWR, 0777));\n+ // Grow to beyond tmpfs size bytes after exhausting the size.\n+ ASSERT_THAT(fallocate(fd.get(), 0, 0, kPageSize),\n+ SyscallFailsWithErrno(ENOSPC));\n+}\n} // namespace\n} // namespace testing\n"
},
{
"change_type": "MODIFY",
"old_path": "test/util/posix_error.h",
"new_path": "test/util/posix_error.h",
"diff": "@@ -231,8 +231,8 @@ template <typename PosixErrorOrType>\nclass IsPosixErrorOkAndHoldsMatcherImpl\n: public ::testing::MatcherInterface<PosixErrorOrType> {\npublic:\n- using ValueType = typename std::remove_reference<decltype(\n- std::declval<PosixErrorOrType>().ValueOrDie())>::type;\n+ using ValueType = typename std::remove_reference<\n+ decltype(std::declval<PosixErrorOrType>().ValueOrDie())>::type;\ntemplate <typename InnerMatcher>\nexplicit IsPosixErrorOkAndHoldsMatcherImpl(InnerMatcher&& inner_matcher)\n@@ -435,7 +435,7 @@ IsPosixErrorOkAndHolds(InnerMatcher&& inner_matcher) {\n#define RETURN_IF_ERRNO(s) \\\ndo { \\\nif (!s.ok()) { \\\n- return s; \\\n+ return s.error(); \\\n} \\\n} while (false);\n"
}
] | Go | Apache License 2.0 | google/gvisor | Created tests for mounting tmpfs with size option enabled.
Tests include:
Allocating memory below and above range for single file.
Allocating memory above range for multiple file.
PiperOrigin-RevId: 438918539 |