author
int64 658
755k
| date
stringlengths 19
19
| timezone
int64 -46,800
43.2k
| hash
stringlengths 40
40
| message
stringlengths 5
490
| mods
list | language
stringclasses 20
values | license
stringclasses 3
values | repo
stringlengths 5
68
| original_message
stringlengths 12
491
|
---|---|---|---|---|---|---|---|---|---|
259,992 | 22.11.2022 13:37:55 | 28,800 | 03e92949e540d686d96300f5ee3a61dbdaebc338 | Force sendfile(2) to copy data in test | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/sendfile.cc",
"new_path": "test/syscalls/linux/sendfile.cc",
"diff": "@@ -471,7 +471,7 @@ TEST(SendFileTest, SendToNotARegularFile) {\nASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_WRONLY));\n// Receive an error since a directory is not a regular file.\n- EXPECT_THAT(sendfile(outf.get(), inf.get(), nullptr, 0),\n+ EXPECT_THAT(sendfile(outf.get(), inf.get(), nullptr, 1),\nSyscallFailsWithErrno(EINVAL));\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Force sendfile(2) to copy data in test
PiperOrigin-RevId: 490324920 |
259,891 | 22.11.2022 16:17:29 | 28,800 | 5b7274a1fc081f038613583e82d17fdb68af6dc3 | gro: use packet size to infer MSS
Linux GRO simply uses the size of the first packet in a flow as the MSS. So
identically sized packets are coalesced until an oddly-sized one comes along.
See net/core/gro.c:dev_gro_receive and net/ipv4/tcp_offload.c:tcp_gro_receive.
Also fixed clobbering of the groPacket.idx field, which caused memory
corruption. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/gro.go",
"new_path": "pkg/tcpip/stack/gro.go",
"diff": "@@ -85,6 +85,8 @@ func (gb *groBucket) insert(pkt PacketBufferPtr, ipHdr header.IPv4, tcpHdr heade\nep: ep,\nipHdr: ipHdr,\ntcpHdr: tcpHdr,\n+ initialLength: ipHdr.TotalLength(),\n+ idx: groPkt.idx,\n}\ngb.count++\ngb.packets.PushBack(groPkt)\n@@ -99,7 +101,7 @@ func (gb *groBucket) removeOldest() PacketBufferPtr {\ngb.count--\ngb.allocIdxs[gb.count] = pkt.idx\nret := pkt.pkt\n- *pkt = groPacket{}\n+ pkt.reset()\nreturn ret\n}\n@@ -109,7 +111,7 @@ func (gb *groBucket) removeOne(pkt *groPacket) {\ngb.packets.Remove(pkt)\ngb.count--\ngb.allocIdxs[gb.count] = pkt.idx\n- *pkt = groPacket{}\n+ pkt.reset()\n}\n// findGROPacket returns the groPkt that matches ipHdr and tcpHdr, or nil if\n@@ -187,11 +189,24 @@ type groPacket struct {\n// ep is the endpoint to which the packet will be sent after GRO.\nep NetworkEndpoint\n+ // initialLength is the length of the first packet in the flow. It is\n+ // used as a best-effort guess at MSS: senders will send MSS-sized\n+ // packets until they run out of data, so we coalesce as long as\n+ // packets are the same size.\n+ initialLength uint16\n+\n// idx is the groPacket's index in its bucket packetsPrealloc. It is\n// immutable.\nidx int\n}\n+// reset resets all mutable fields of the groPacket.\n+func (pk *groPacket) reset() {\n+ *pk = groPacket{\n+ idx: pk.idx,\n+ }\n+}\n+\n// payloadSize is the payload size of the coalesced packet, which does not\n// include the network or transport headers.\nfunc (pk *groPacket) payloadSize() uint16 {\n@@ -271,7 +286,7 @@ func (gd *groDispatcher) setInterval(interval time.Duration) {\n}\n// dispatch sends pkt up the stack after it undergoes GRO coalescing.\n-func (gd *groDispatcher) dispatch(pkt PacketBufferPtr, netProto tcpip.NetworkProtocolNumber, ep NetworkEndpoint, mtu uint32) {\n+func (gd *groDispatcher) dispatch(pkt PacketBufferPtr, netProto tcpip.NetworkProtocolNumber, ep NetworkEndpoint) {\n// If GRO is disabled simply pass the packet along.\nif gd.intervalNS.Load() == 0 {\nep.HandlePacket(pkt)\n@@ -375,15 +390,18 @@ func (gd *groDispatcher) dispatch(pkt PacketBufferPtr, netProto tcpip.NetworkPro\npkt = PacketBufferPtr{}\n}\n- // Flush if the packet isn't MSS-sized or if certain flags are set. The\n- // reason for checking MSS equality is:\n- // - If the packet is smaller than the MSS, this is likely the end of\n- // some message. Peers will send MSS-sized packets until they have\n+ // Flush if the packet isn't the same size as the previous packets or\n+ // if certain flags are set. The reason for checking size equality is:\n+ // - If the packet is smaller than the others, this is likely the end\n+ // of some message. Peers will send MSS-sized packets until they have\n// insufficient data to do so.\n- // - If the packet is larger than MSS, this packet is either malformed,\n- // a local GSO packet, or has already been handled by host GRO.\n- // TODO(b/256037250): Use MSS instead of MTU.\n- flush := uint32(ipHdr.TotalLength()) != mtu || header.TCPFlags(flags)&(header.TCPFlagUrg|header.TCPFlagPsh|header.TCPFlagRst|header.TCPFlagSyn|header.TCPFlagFin) != 0\n+ // - If the packet is larger than the others, this packet is either\n+ // malformed, a local GSO packet, or has already been handled by host\n+ // GRO.\n+ flush := header.TCPFlags(flags)&(header.TCPFlagUrg|header.TCPFlagPsh|header.TCPFlagRst|header.TCPFlagSyn|header.TCPFlagFin) != 0\n+ if groPkt != nil {\n+ flush = flush || ipHdr.TotalLength() != groPkt.initialLength\n+ }\nswitch {\ncase flush && groPkt != nil:\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/nic.go",
"new_path": "pkg/tcpip/stack/nic.go",
"diff": "@@ -739,7 +739,7 @@ func (n *nic) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt Pac\npkt.RXChecksumValidated = n.NetworkLinkEndpoint.Capabilities()&CapabilityRXChecksumOffload != 0\n- n.gro.dispatch(pkt, protocol, networkEndpoint, n.NetworkLinkEndpoint.MTU())\n+ n.gro.dispatch(pkt, protocol, networkEndpoint)\n}\nfunc (n *nic) DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt PacketBufferPtr, incoming bool) {\n"
}
] | Go | Apache License 2.0 | google/gvisor | gro: use packet size to infer MSS
Linux GRO simply uses the size of the first packet in a flow as the MSS. So
identically sized packets are coalesced until an oddly-sized one comes along.
See net/core/gro.c:dev_gro_receive and net/ipv4/tcp_offload.c:tcp_gro_receive.
Also fixed clobbering of the groPacket.idx field, which caused memory
corruption.
PiperOrigin-RevId: 490361840 |
259,975 | 28.11.2022 16:12:10 | 28,800 | ae731e0394f571af8eba6eaacf89a7930755d329 | Don't use other process's scratch buffer.
If we use the remote process's scratch buffer in process_vm_readv|writev
calls, we will get data races. Use the local task's scratch buffer always. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_process_vm.go",
"new_path": "pkg/sentry/syscalls/linux/sys_process_vm.go",
"diff": "@@ -26,10 +26,9 @@ import (\ntype vmReadWriteOp int\nconst (\n- localReader vmReadWriteOp = iota\n- localWriter\n- remoteReader\n- remoteWriter\n+ localReadLocalWrite vmReadWriteOp = iota\n+ remoteReadLocalWrite\n+ localReadRemoteWrite\n)\n// ProcessVMReadv implements process_vm_readv(2).\n@@ -76,17 +75,16 @@ func processVMRW(t *kernel.Task, args arch.SyscallArguments, isWrite bool) (uint\nisRemote := localProcess == remoteProcess\n// For the write case, we read from the local process and write to the remote process.\n+ op := localReadLocalWrite\nif isWrite {\n- op := localReader\nif isRemote {\n- op = remoteReader\n+ op = remoteReadLocalWrite\n}\nreturn doProcessVMReadWrite(localProcess, remoteProcess, lvec, rvec, liovcnt, riovcnt, op)\n}\n// For the read case, we read from the remote process and write to the local process.\n- op := localWriter\nif isRemote {\n- op = remoteWriter\n+ op = localReadRemoteWrite\n}\nreturn doProcessVMReadWrite(remoteProcess, localProcess, rvec, lvec, riovcnt, liovcnt, op)\n}\n@@ -113,7 +111,16 @@ func doProcessVMReadWrite(rProcess, wProcess *kernel.Task, rAddr, wAddr hostarch\n}\n}\n- buf := rCtx.CopyScratchBuffer(bufSize)\n+ var buf []byte\n+ // We need to copy the called task's scratch buffer so we don't get a data race. If we are\n+ // reading a remote process's memory, then we are on the writer's task goroutine, so use\n+ // the write context's scratch buffer.\n+ if op == remoteReadLocalWrite {\n+ buf = wCtx.CopyScratchBuffer(bufSize)\n+ } else {\n+ buf = rCtx.CopyScratchBuffer(bufSize)\n+ }\n+\nfor _, rIovec := range rIovecs {\nif len(wIovecs) <= 0 {\nbreak\n@@ -163,12 +170,12 @@ func doProcessVMReadWrite(rProcess, wProcess *kernel.Task, rAddr, wAddr hostarch\nvar err error\nswitch op {\n- case remoteReader:\n+ case remoteReadLocalWrite:\nerr = rCtx.WithTaskMutexLocked(doProcessVMReadWriteMaybeLocked)\n- case remoteWriter:\n+ case localReadRemoteWrite:\nerr = wCtx.WithTaskMutexLocked(doProcessVMReadWriteMaybeLocked)\n- case localReader, localWriter:\n+ case localReadLocalWrite:\n// in the case of local reads/writes, we don't have to lock the task mutex, because we are\n// running on the top of the task's goroutine already.\nerr = doProcessVMReadWriteMaybeLocked()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Don't use other process's scratch buffer.
If we use the remote process's scratch buffer in process_vm_readv|writev
calls, we will get data races. Use the local task's scratch buffer always.
PiperOrigin-RevId: 491468904 |
259,891 | 29.11.2022 12:50:18 | 28,800 | b112db0675d7b40fafdb28cbd6ef83472590a1c1 | conntrack: don't nest conntrack locks | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/conntrack.go",
"new_path": "pkg/tcpip/stack/conntrack.go",
"diff": "@@ -548,10 +548,8 @@ func (ct *ConnTrack) getConnAndUpdate(pkt PacketBufferPtr, skipChecksumValidatio\n}\n}\n- bktID := ct.bucket(tid)\n-\nct.mu.RLock()\n- bkt := &ct.buckets[bktID]\n+ bkt := &ct.buckets[ct.bucket(tid)]\nct.mu.RUnlock()\nnow := ct.clock.NowMonotonic()\n@@ -603,10 +601,8 @@ func (ct *ConnTrack) getConnAndUpdate(pkt PacketBufferPtr, skipChecksumValidatio\n}\nfunc (ct *ConnTrack) connForTID(tid tupleID) *tuple {\n- bktID := ct.bucket(tid)\n-\nct.mu.RLock()\n- bkt := &ct.buckets[bktID]\n+ bkt := &ct.buckets[ct.bucket(tid)]\nct.mu.RUnlock()\nreturn bkt.connForTID(tid, ct.clock.NowMonotonic())\n@@ -635,7 +631,7 @@ func (ct *ConnTrack) finalize(cn *conn) finalizeResult {\n{\ntid := cn.reply.tupleID\n- id := ct.bucket(tid)\n+ id := ct.bucketWithTableLength(tid, len(buckets))\nbkt := &buckets[id]\nbkt.mu.Lock()\n@@ -663,7 +659,7 @@ func (ct *ConnTrack) finalize(cn *conn) finalizeResult {\n// better.\ntid := cn.original.tupleID\n- id := ct.bucket(tid)\n+ id := ct.bucketWithTableLength(tid, len(buckets))\nbkt := &buckets[id]\nbkt.mu.Lock()\ndefer bkt.mu.Unlock()\n@@ -978,7 +974,12 @@ func (cn *conn) handlePacket(pkt PacketBufferPtr, hook Hook, rt *Route) bool {\n}\n// bucket gets the conntrack bucket for a tupleID.\n+// +checklocksread:ct.mu\nfunc (ct *ConnTrack) bucket(id tupleID) int {\n+ return ct.bucketWithTableLength(id, len(ct.buckets))\n+}\n+\n+func (ct *ConnTrack) bucketWithTableLength(id tupleID, tableLength int) int {\nh := jenkins.Sum32(ct.seed)\nh.Write([]byte(id.srcAddr))\nh.Write([]byte(id.dstAddr))\n@@ -991,9 +992,7 @@ func (ct *ConnTrack) bucket(id tupleID) int {\nh.Write([]byte(shortBuf))\nbinary.LittleEndian.PutUint16(shortBuf, uint16(id.netProto))\nh.Write([]byte(shortBuf))\n- ct.mu.RLock()\n- defer ct.mu.RUnlock()\n- return int(h.Sum32()) % len(ct.buckets)\n+ return int(h.Sum32()) % tableLength\n}\n// reapUnused deletes timed out entries from the conntrack map. The rules for\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/iptables_test.go",
"new_path": "pkg/tcpip/stack/iptables_test.go",
"diff": "@@ -147,8 +147,10 @@ func TestNATedConnectionReap(t *testing.T) {\n}\nreplyTID := invertedReplyTID.reply()\n+ iptables.connections.mu.RLock()\noriginalBktID := iptables.connections.bucket(originalTID)\nreplyBktID := iptables.connections.bucket(replyTID)\n+ iptables.connections.mu.RUnlock()\n// This test depends on the original and reply tuples mapping to different\n// buckets.\n"
}
] | Go | Apache License 2.0 | google/gvisor | conntrack: don't nest conntrack locks
PiperOrigin-RevId: 491713226 |
259,868 | 29.11.2022 13:03:52 | 28,800 | 1ceee8c31071beacf7d28757ffa43da302dc570e | connectioned: Change nested lock name to only have a single nested lock.
Now there's the unnamed one, and "higherID" which is the lock for the
endpoint that has a higher ID.
The lock dependency should consistently be "lower ID" -> "higher ID". | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/unix/transport/BUILD",
"new_path": "pkg/sentry/socket/unix/transport/BUILD",
"diff": "@@ -22,8 +22,7 @@ declare_mutex(\nname = \"endpoint_mutex\",\nout = \"endpoint_mutex.go\",\nnested_lock_names = [\n- \"e\",\n- \"ce\",\n+ \"higherID\",\n],\npackage = \"transport\",\nprefix = \"endpoint\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/unix/transport/connectioned.go",
"new_path": "pkg/sentry/socket/unix/transport/connectioned.go",
"diff": "@@ -288,27 +288,27 @@ func (e *connectionedEndpoint) BidirectionalConnect(ctx context.Context, ce Conn\n// Do a dance to safely acquire locks on both endpoints.\nif e.id < ce.ID() {\ne.Lock()\n- ce.NestedLock(endpointLockCe)\n+ ce.NestedLock(endpointLockHigherid)\n} else {\nce.Lock()\n- e.NestedLock(endpointLockE)\n+ e.NestedLock(endpointLockHigherid)\n}\n// Check connecting state.\nif ce.Connected() {\n- e.NestedUnlock(endpointLockE)\n+ e.NestedUnlock(endpointLockHigherid)\nce.Unlock()\nreturn syserr.ErrAlreadyConnected\n}\nif ce.ListeningLocked() {\n- e.NestedUnlock(endpointLockE)\n+ e.NestedUnlock(endpointLockHigherid)\nce.Unlock()\nreturn syserr.ErrInvalidEndpointState\n}\n// Check bound state.\nif !e.ListeningLocked() {\n- e.NestedUnlock(endpointLockE)\n+ e.NestedUnlock(endpointLockHigherid)\nce.Unlock()\nreturn syserr.ErrConnectionRefused\n}\n@@ -359,7 +359,7 @@ func (e *connectionedEndpoint) BidirectionalConnect(ctx context.Context, ce Conn\n}\n// Notify can deadlock if we are holding these locks.\n- e.NestedUnlock(endpointLockE)\n+ e.NestedUnlock(endpointLockHigherid)\nce.Unlock()\n// Notify on both ends.\n@@ -369,7 +369,7 @@ func (e *connectionedEndpoint) BidirectionalConnect(ctx context.Context, ce Conn\nreturn nil\ndefault:\n// Busy; return EAGAIN per spec.\n- e.NestedUnlock(endpointLockE)\n+ e.NestedUnlock(endpointLockHigherid)\nce.Unlock()\nne.Close(ctx)\nreturn syserr.ErrTryAgain\n"
}
] | Go | Apache License 2.0 | google/gvisor | connectioned: Change nested lock name to only have a single nested lock.
Now there's the unnamed one, and "higherID" which is the lock for the
endpoint that has a higher ID.
The lock dependency should consistently be "lower ID" -> "higher ID".
PiperOrigin-RevId: 491716600 |
259,975 | 29.11.2022 13:04:39 | 28,800 | bfbb9fa4cce0c4ce599dd218927d40d25394f4ed | Disable process_vm_(read|write)v.
Syzkaller has found several issues with the two syscalls and a rework is
required. Disable tests and the syscall until issues can be fixed. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/linux64.go",
"new_path": "pkg/sentry/syscalls/linux/linux64.go",
"diff": "@@ -362,8 +362,8 @@ var AMD64 = &kernel.SyscallTable{\n307: syscalls.Supported(\"sendmmsg\", SendMMsg),\n308: syscalls.ErrorWithEvent(\"setns\", linuxerr.EOPNOTSUPP, \"Needs filesystem support\", []string{\"gvisor.dev/issue/140\"}), // TODO(b/29354995)\n309: syscalls.Supported(\"getcpu\", Getcpu),\n- 310: syscalls.Supported(\"process_vm_readv\", ProcessVMReadv),\n- 311: syscalls.Supported(\"process_vm_writev\", ProcessVMWritev),\n+ 310: syscalls.ErrorWithEvent(\"process_vm_readv\", linuxerr.ENOSYS, \"\", []string{\"gvisor.dev/issue/158\"}), // TODO(b/260724654)\n+ 311: syscalls.ErrorWithEvent(\"process_vm_writev\", linuxerr.ENOSYS, \"\", []string{\"gvisor.dev/issue/158\"}), // TODO(b/260724654)\n312: syscalls.CapError(\"kcmp\", linux.CAP_SYS_PTRACE, \"\", nil),\n313: syscalls.CapError(\"finit_module\", linux.CAP_SYS_MODULE, \"\", nil),\n314: syscalls.ErrorWithEvent(\"sched_setattr\", linuxerr.ENOSYS, \"gVisor does not implement a scheduler.\", []string{\"gvisor.dev/issue/264\"}), // TODO(b/118902272)\n@@ -685,8 +685,8 @@ var ARM64 = &kernel.SyscallTable{\n267: syscalls.Supported(\"syncfs\", Syncfs),\n268: syscalls.ErrorWithEvent(\"setns\", linuxerr.EOPNOTSUPP, \"Needs filesystem support\", []string{\"gvisor.dev/issue/140\"}), // TODO(b/29354995)\n269: syscalls.Supported(\"sendmmsg\", SendMMsg),\n- 270: syscalls.Supported(\"process_vm_readv\", ProcessVMReadv),\n- 271: syscalls.Supported(\"process_vm_writev\", ProcessVMWritev),\n+ 270: syscalls.ErrorWithEvent(\"process_vm_readv\", linuxerr.ENOSYS, \"\", []string{\"gvisor.dev/issue/158\"}), // TODO(b/260724654)\n+ 271: syscalls.ErrorWithEvent(\"process_vm_writev\", linuxerr.ENOSYS, \"\", []string{\"gvisor.dev/issue/158\"}), // TODO(b/260724654)\n272: syscalls.CapError(\"kcmp\", linux.CAP_SYS_PTRACE, \"\", nil),\n273: syscalls.CapError(\"finit_module\", linux.CAP_SYS_MODULE, \"\", nil),\n274: syscalls.ErrorWithEvent(\"sched_setattr\", linuxerr.ENOSYS, \"gVisor does not implement a scheduler.\", []string{\"gvisor.dev/issue/264\"}), // TODO(b/118902272)\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/BUILD",
"new_path": "test/syscalls/BUILD",
"diff": "@@ -1084,8 +1084,3 @@ syscall_test(\nsize = \"small\",\ntest = \"//test/syscalls/linux:close_range_test\",\n)\n-\n-syscall_test(\n- size = \"small\",\n- test = \"//test/syscalls/linux:process_vm_read_write\",\n-)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Disable process_vm_(read|write)v.
Syzkaller has found several issues with the two syscalls and a rework is
required. Disable tests and the syscall until issues can be fixed.
PiperOrigin-RevId: 491716795 |
259,909 | 29.11.2022 15:09:37 | 28,800 | ece02b45b5b4f3c67a98f684cede3bbdd18344dd | Add a maximum to the total number of mounts allowed in a namespace.
The limit is the same as a the default for /proc/sys/fs/mount-max.
Reported-by: | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/BUILD",
"new_path": "pkg/sentry/vfs/BUILD",
"diff": "@@ -151,6 +151,7 @@ go_library(\n\"//pkg/abi/linux\",\n\"//pkg/atomicbitops\",\n\"//pkg/bitmap\",\n+ \"//pkg/cleanup\",\n\"//pkg/context\",\n\"//pkg/errors/linuxerr\",\n\"//pkg/fd\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/mount.go",
"new_path": "pkg/sentry/vfs/mount.go",
"diff": "@@ -23,6 +23,7 @@ import (\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/atomicbitops\"\n+ \"gvisor.dev/gvisor/pkg/cleanup\"\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/errors/linuxerr\"\n\"gvisor.dev/gvisor/pkg/refsvfs2\"\n@@ -46,6 +47,11 @@ const (\nChild\n// Unbindable represents the unbindable propagation type.\nUnbindable\n+\n+ // MountMax is the maximum number of mounts allowed. In Linux this can be\n+ // configured by the user at /proc/sys/fs/mount-max, but the default is\n+ // 100,000. We set the gVisor limit to 10,000.\n+ MountMax = 10000\n)\n// PropagationTypeFromLinux returns the PropagationType corresponding to a\n@@ -273,6 +279,9 @@ type MountNamespace struct {\n// VFS.PrepareDeleteDentry() and VFS.PrepareRemoveDentry() operate\n// correctly on unreferenced MountNamespaces.\nmountpoints map[*Dentry]uint32\n+\n+ // mounts is the total number of mounts in this mount namespace.\n+ mounts uint32\n}\n// NewMountNamespace returns a new mount namespace with a root filesystem\n@@ -349,11 +358,20 @@ func (vfs *VirtualFilesystem) ConnectMountAt(ctx context.Context, creds *auth.Cr\nvfs.mountMu.Lock()\ndefer vfs.mountMu.Unlock()\ntree := vfs.preparePropagationTree(mnt, vd)\n+\n+ cleanup := cleanup.Make(func() {\n+ vfs.abortPropagationTree(ctx, tree) // +checklocksforce\n+ })\n+ defer cleanup.Clean()\n+ // Check if the new mount + all the propagation mounts puts us over the max.\n+ if uint32(len(tree)+1)+vd.mount.ns.mounts > MountMax {\n+ return linuxerr.ENOSPC\n+ }\nif err := vfs.connectMountAt(ctx, mnt, vd); err != nil {\n- vfs.abortPropagationTree(ctx, tree)\nreturn err\n}\nvfs.commitPropagationTree(ctx, tree)\n+ cleanup.Release()\nreturn nil\n}\n@@ -572,12 +590,21 @@ func (vfs *VirtualFilesystem) BindAt(ctx context.Context, creds *auth.Credential\nvfs.mergePeerGroup(sourceVd.mount, clone)\n}\n}\n+ cleanup := cleanup.Make(func() {\n+ // Checklocks doesn't work with anon functions.\n+ vfs.setPropagation(clone, Private) // +checklocksforce\n+ vfs.abortPropagationTree(ctx, tree) // +checklocksforce\n+ targetVd.DecRef(ctx)\n+ })\n+ defer cleanup.Clean()\n+ if uint32(1+len(tree))+targetVd.mount.ns.mounts > MountMax {\n+ return nil, linuxerr.ENOSPC\n+ }\nif err := vfs.connectMountAt(ctx, clone, targetVd); err != nil {\n- vfs.setPropagation(clone, Private)\n- vfs.abortPropagationTree(ctx, tree)\nreturn nil, err\n}\nvfs.commitPropagationTree(ctx, tree)\n+ cleanup.Release()\nreturn clone, nil\n}\n@@ -795,6 +822,7 @@ func (vfs *VirtualFilesystem) connectLocked(mnt *Mount, vd VirtualDentry, mntns\nvd.dentry.mounts.Add(1)\nmnt.ns = mntns\nmntns.mountpoints[vd.dentry]++\n+ mntns.mounts++\nvfs.mounts.insertSeqed(mnt)\nvfsmpmounts, ok := vfs.mountpoints[vd.dentry]\nif !ok {\n@@ -817,11 +845,18 @@ func (vfs *VirtualFilesystem) disconnectLocked(mnt *Mount) VirtualDentry {\nif vd.mount != nil {\npanic(\"VFS.disconnectLocked called on disconnected mount\")\n}\n+ if mnt.ns.mountpoints[vd.dentry] == 0 {\n+ panic(\"VFS.disconnectLocked called on dentry with zero mountpoints.\")\n+ }\n+ if mnt.ns.mounts == 0 {\n+ panic(\"VFS.disconnectLocked called on namespace with zero mounts.\")\n+ }\n}\nmnt.loadKey(VirtualDentry{})\ndelete(vd.mount.children, mnt)\nvd.dentry.mounts.Add(math.MaxUint32) // -1\nmnt.ns.mountpoints[vd.dentry]--\n+ mnt.ns.mounts--\nif mnt.ns.mountpoints[vd.dentry] == 0 {\ndelete(mnt.ns.mountpoints, vd.dentry)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/mount.cc",
"new_path": "test/syscalls/linux/mount.cc",
"diff": "#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"absl/strings/match.h\"\n+#include \"absl/strings/numbers.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_split.h\"\n#include \"absl/strings/string_view.h\"\n@@ -822,6 +823,46 @@ TEST(MountTest, BindToSelf) {\nASSERT_TRUE(found);\n}\n+TEST(MountTest, MaxMounts) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));\n+\n+ auto const parent = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ ASSERT_THAT(mount(\"\", parent.path().c_str(), \"tmpfs\", 0, \"\"),\n+ SyscallSucceeds());\n+ auto const dir =\n+ ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(parent.path()));\n+ ASSERT_THAT(\n+ mount(dir.path().c_str(), dir.path().c_str(), nullptr, MS_BIND, nullptr),\n+ SyscallSucceeds());\n+ ASSERT_THAT(mount(\"\", dir.path().c_str(), \"\", MS_SHARED, \"\"),\n+ SyscallSucceeds());\n+\n+ // Each bind mount doubles the number of mounts in the peer group. The number\n+ // of binds we can do before failing is log2(max_mounts-num_current_mounts).\n+ int mount_max = 10000;\n+ bool mount_max_exists =\n+ ASSERT_NO_ERRNO_AND_VALUE(Exists(\"/proc/sys/fs/mount-max\"));\n+ if (mount_max_exists) {\n+ std::string mount_max_string;\n+ ASSERT_NO_ERRNO(GetContents(\"/proc/sys/fs/mount-max\", &mount_max_string));\n+ ASSERT_TRUE(absl::SimpleAtoi(mount_max_string, &mount_max));\n+ }\n+\n+ const std::vector<ProcMountInfoEntry> mounts =\n+ ASSERT_NO_ERRNO_AND_VALUE(ProcSelfMountInfoEntries());\n+ int num_binds = static_cast<int>(std::log2(mount_max - mounts.size()));\n+\n+ for (int i = 0; i < num_binds; i++) {\n+ ASSERT_THAT(mount(dir.path().c_str(), dir.path().c_str(), nullptr, MS_BIND,\n+ nullptr),\n+ SyscallSucceeds());\n+ }\n+ ASSERT_THAT(\n+ mount(dir.path().c_str(), dir.path().c_str(), nullptr, MS_BIND, nullptr),\n+ SyscallFailsWithErrno(ENOSPC));\n+ umount2(parent.path().c_str(), MNT_DETACH);\n+}\n+\n// Tests that it is possible to make a shared mount.\nTEST(MountTest, MakeShared) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add a maximum to the total number of mounts allowed in a namespace.
The limit is the same as a the default for /proc/sys/fs/mount-max.
Reported-by: syzbot+ae4591a5d362a6701e40@syzkaller.appspotmail.com
PiperOrigin-RevId: 491748452 |
259,982 | 29.11.2022 17:23:05 | 28,800 | 50f04e5aac0dce17f67dfd92a5d0b34e230cfa81 | Adding a LogEmit in event.go.
This creates a single function that logs
a warning in case of an error. | [
{
"change_type": "MODIFY",
"old_path": "pkg/eventchannel/event.go",
"new_path": "pkg/eventchannel/event.go",
"diff": "@@ -54,6 +54,16 @@ func Emit(msg proto.Message) error {\nreturn err\n}\n+// LogEmit is a helper method that calls DefaultEmitter.Emit.\n+// It also logs a warning message when an error occurs.\n+func LogEmit(msg proto.Message) error {\n+ _, err := DefaultEmitter.Emit(msg)\n+ if err != nil {\n+ log.Warningf(\"unable to emit event: %s\", err)\n+ }\n+ return err\n+}\n+\n// AddEmitter is a helper method that calls DefaultEmitter.AddEmitter.\nfunc AddEmitter(e Emitter) {\nDefaultEmitter.AddEmitter(e)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Adding a LogEmit in event.go.
This creates a single function that logs
a warning in case of an error.
PiperOrigin-RevId: 491777874 |
259,985 | 30.11.2022 08:11:32 | 28,800 | 62ddad611979724c11372a5f49e2a986349279f2 | cgroupfs: Fix several races with task migration.
Reported-by: | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/cgroupfs/pids.go",
"new_path": "pkg/sentry/fsimpl/cgroupfs/pids.go",
"diff": "@@ -161,14 +161,23 @@ func (c *pidsController) Leave(t *kernel.Task) {\n// PrepareMigrate implements controller.PrepareMigrate.\nfunc (c *pidsController) PrepareMigrate(t *kernel.Task, src controller) error {\n+ srcC := src.(*pidsController)\n+ srcC.mu.Lock()\n+ defer srcC.mu.Unlock()\n+\n+ if _, ok := srcC.pendingPool[t]; ok {\n+ // Migrating task isn't fully initialized, return transient failure.\n+ return linuxerr.EAGAIN\n+ }\n+\nreturn nil\n}\n// CommitMigrate implements controller.CommitMigrate.\n//\n-// Migrations can cause a cgroup to exceed its limit. Migration can only be\n-// called for tasks with committed charges, as it is not possible to migrate a\n-// task prior to Enter.\n+// Migrations can cause a cgroup to exceed its limit. CommitMigrate can only be\n+// called for tasks with committed charges, PrepareMigrate will deny migrations\n+// prior to Enter.\nfunc (c *pidsController) CommitMigrate(t *kernel.Task, src controller) {\n// Note: The charge is allowed to exceed max on migration. The charge may\n// not exceed max when incurred due to a fork/clone, which will call\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_cgroup.go",
"new_path": "pkg/sentry/kernel/task_cgroup.go",
"diff": "@@ -89,6 +89,7 @@ func (t *Task) enterCgroupIfNotYetLocked(c Cgroup) {\n// LeaveCgroups removes t out from all its cgroups.\nfunc (t *Task) LeaveCgroups() {\n+ t.tg.pidns.owner.mu.Lock() // Prevent migration.\nt.mu.Lock()\ncgs := t.cgroups\nt.cgroups = nil\n@@ -96,6 +97,8 @@ func (t *Task) LeaveCgroups() {\nc.Leave(t)\n}\nt.mu.Unlock()\n+ t.tg.pidns.owner.mu.Unlock()\n+\nfor c := range cgs {\nc.decRef()\n}\n@@ -229,33 +232,24 @@ func (t *Task) GenerateProcTaskCgroup(buf *bytes.Buffer) {\n}\n// +checklocks:t.mu\n-func (t *Task) chargeLocked(target *Task, ctl CgroupControllerType, res CgroupResourceType, value int64) (bool, uint32, error) {\n+func (t *Task) chargeLocked(target *Task, ctl CgroupControllerType, res CgroupResourceType, value int64) (bool, Cgroup, error) {\n// Due to the uniqueness of controllers on hierarchies, at most one cgroup\n// in t.cgroups will match.\nfor c := range t.cgroups {\nerr := c.Charge(target, c.Dentry, ctl, res, value)\n- return err == nil, c.HierarchyID(), err\n+ if err == nil {\n+ c.IncRef()\n}\n- return false, InvalidCgroupHierarchyID, nil\n+ return err == nil, c, err\n}\n-\n-// ChargeFor charges t's cgroup on behalf of some other task.\n-func (t *Task) ChargeFor(other *Task, ctl CgroupControllerType, res CgroupResourceType, value int64) (bool, uint32, error) {\n- t.mu.Lock()\n- defer t.mu.Unlock()\n- return t.chargeLocked(other, ctl, res, value)\n+ return false, Cgroup{}, nil\n}\n-// ChargeForOnHierarchy is like ChargeFor, but only charges a cgroup with the\n-// matching hierarhcyID. This can be useful when reversing a charge across\n-// potential hierachy changes.\n-func (t *Task) ChargeForOnHierarchy(other *Task, hierarhcyID uint32, ctl CgroupControllerType, res CgroupResourceType, value int64) (bool, uint32, error) {\n+// ChargeFor charges t's cgroup on behalf of some other task. Returns\n+// the cgroup that's charged if any. Returned cgroup has an extra ref\n+// that's transferred to the caller.\n+func (t *Task) ChargeFor(other *Task, ctl CgroupControllerType, res CgroupResourceType, value int64) (bool, Cgroup, error) {\nt.mu.Lock()\ndefer t.mu.Unlock()\n- for c := range t.cgroups {\n- if c.HierarchyID() == hierarhcyID {\nreturn t.chargeLocked(other, ctl, res, value)\n}\n- }\n- return false, InvalidCgroupHierarchyID, nil\n-}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_start.go",
"new_path": "pkg/sentry/kernel/task_start.go",
"diff": "@@ -176,7 +176,11 @@ func (ts *TaskSet) newTask(ctx context.Context, cfg *TaskConfig) (*Task, error)\n// We don't construct t.blockingTimer until Task.run(); see that function\n// for justification.\n- var cu cleanup.Cleanup\n+ var (\n+ cg Cgroup\n+ charged bool\n+ cu cleanup.Cleanup\n+ )\ndefer cu.Clean()\n// Reserve cgroup PIDs controller charge. This is either commited when the\n@@ -187,22 +191,16 @@ func (ts *TaskSet) newTask(ctx context.Context, cfg *TaskConfig) (*Task, error)\n// we skip charging the pids controller, as non-userspace task creation\n// bypasses pid limits.\nif srcT != nil {\n- var (\n- charged bool\n- err error\n- hid uint32\n- )\n- if charged, hid, err = srcT.ChargeFor(t, CgroupControllerPIDs, CgroupResourcePID, 1); err != nil {\n+ var err error\n+ if charged, cg, err = srcT.ChargeFor(t, CgroupControllerPIDs, CgroupResourcePID, 1); err != nil {\nreturn nil, err\n}\nif charged {\ncu.Add(func() {\n- // Since ts.mu was dropped after the corresponding charge, the\n- // hierarchy referenced by hid may no longer exist. If so, this\n- // uncharge will be a no-op.\n- if _, _, err := srcT.ChargeForOnHierarchy(t, hid, CgroupControllerPIDs, CgroupResourcePID, -1); err != nil {\n+ if err := cg.Charge(t, cg.Dentry, CgroupControllerPIDs, CgroupResourcePID, -1); err != nil {\npanic(fmt.Sprintf(\"Failed to clean up PIDs charge on task creation failure: %v\", err))\n}\n+ cg.DecRef(ctx) // Ref from ChargeFor.\n})\n}\n}\n@@ -241,6 +239,11 @@ func (ts *TaskSet) newTask(ctx context.Context, cfg *TaskConfig) (*Task, error)\n// srcT may be nil, in which case we default to root cgroups.\nt.EnterInitialCgroups(srcT)\n+ cu.Release()\n+ if charged {\n+ cg.decRef() // Ref from ChargeFor.\n+ }\n+\nif tg.leader == nil {\n// New thread group.\ntg.leader = t\n@@ -272,7 +275,6 @@ func (ts *TaskSet) newTask(ctx context.Context, cfg *TaskConfig) (*Task, error)\n// other pieces to be initialized as the task is used the context.\nt.p = cfg.Kernel.Platform.NewContext(t.AsyncContext())\n- cu.Release()\nreturn t, nil\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | cgroupfs: Fix several races with task migration.
Reported-by: syzbot+0be09ce607731f085f73@syzkaller.appspotmail.com
PiperOrigin-RevId: 491920581 |
259,909 | 30.11.2022 10:25:01 | 28,800 | 3dca16ed354fe0dd88fbb42bdaa894c569fb4b8e | Separate propagation methods/constants into their own file.
mount.go is becoming bloated, we should break it into smaller files
where it makes sense. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/BUILD",
"new_path": "pkg/sentry/vfs/BUILD",
"diff": "@@ -140,6 +140,7 @@ go_library(\n\"options.go\",\n\"pathname.go\",\n\"permissions.go\",\n+ \"propagation.go\",\n\"resolving_path.go\",\n\"save_restore.go\",\n\"shared_list.go\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/mount.go",
"new_path": "pkg/sentry/vfs/mount.go",
"diff": "@@ -30,46 +30,10 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n)\n-// PropagationType is a propagation flavor as described in\n-// https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt. Child\n-// and Unbindable are currently unimplemented.\n-// TODO(b/249777195): Support MS_SLAVE and MS_UNBINDABLE propagation types.\n-type PropagationType int\n-\n-const (\n- // Unknown represents an invalid/unknown propagation type.\n- Unknown PropagationType = iota\n- // Shared represents the shared propagation type.\n- Shared\n- // Private represents the private propagation type.\n- Private\n- // Child represents the child propagation type (MS_SLAVE).\n- Child\n- // Unbindable represents the unbindable propagation type.\n- Unbindable\n-\n// MountMax is the maximum number of mounts allowed. In Linux this can be\n// configured by the user at /proc/sys/fs/mount-max, but the default is\n// 100,000. We set the gVisor limit to 10,000.\n- MountMax = 10000\n-)\n-\n-// PropagationTypeFromLinux returns the PropagationType corresponding to a\n-// linux mount flag, aka MS_SHARED.\n-func PropagationTypeFromLinux(propFlag uint64) PropagationType {\n- switch propFlag {\n- case linux.MS_SHARED:\n- return Shared\n- case linux.MS_PRIVATE:\n- return Private\n- case linux.MS_SLAVE:\n- return Child\n- case linux.MS_UNBINDABLE:\n- return Unbindable\n- default:\n- return Unknown\n- }\n-}\n+const MountMax = 10000\n// A Mount is a replacement of a Dentry (Mount.key.point) from one Filesystem\n// (Mount.key.parent.fs) with a Dentry (Mount.root) from another Filesystem\n@@ -196,60 +160,6 @@ func (mnt *Mount) generateOptionalTags() string {\nreturn optional\n}\n-// addPeer adds oth to mnt's peer group. Both will have the same groupID\n-// and sharedList. vfs.mountMu must be locked.\n-//\n-// +checklocks:vfs.mountMu\n-func (vfs *VirtualFilesystem) addPeer(mnt *Mount, oth *Mount) {\n- mnt.sharedList.PushBack(oth)\n- oth.sharedList = mnt.sharedList\n- oth.propType = mnt.propType\n- oth.groupID = mnt.groupID\n-}\n-\n-// mergePeerGroup merges oth and all its peers into mnt's peer group. Oth\n-// must have propagation type shared and vfs.mountMu must be locked.\n-//\n-// +checklocks:vfs.mountMu\n-func (vfs *VirtualFilesystem) mergePeerGroup(mnt *Mount, oth *Mount) {\n- peer := oth.sharedList.Front()\n- for peer != nil {\n- next := peer.sharedEntry.Next()\n- vfs.setPropagation(peer, Private)\n- vfs.addPeer(mnt, peer)\n- peer = next\n- }\n-}\n-\n-// setPropagation sets the propagation on mnt for a propagation type.\n-//\n-// +checklocks:vfs.mountMu\n-func (vfs *VirtualFilesystem) setPropagation(mnt *Mount, ptype PropagationType) error {\n- switch ptype {\n- case Shared:\n- id, err := vfs.allocateGroupID()\n- if err != nil {\n- return err\n- }\n- mnt.groupID = id\n- mnt.sharedList = &sharedList{}\n- mnt.sharedList.PushBack(mnt)\n- case Private:\n- if mnt.propType == Shared {\n- mnt.sharedList.Remove(mnt)\n- if mnt.sharedList.Empty() {\n- vfs.freeGroupID(mnt.groupID)\n- }\n- mnt.sharedList = nil\n- mnt.groupID = 0\n- }\n- default:\n- panic(fmt.Sprintf(\"unsupported propagation type: %v\", ptype))\n- }\n- mnt.propType = ptype\n- return nil\n-}\n-\n// A MountNamespace is a collection of Mounts.//\n// MountNamespaces are reference-counted. Unless otherwise specified, all\n// MountNamespace methods require that a reference is held.\n@@ -375,73 +285,6 @@ func (vfs *VirtualFilesystem) ConnectMountAt(ctx context.Context, creds *auth.Cr\nreturn nil\n}\n-// preparePropagationTree returns a mapping of propagated mounts to their future\n-// mountpoints. The new mounts are clones of mnt and are added to mnt's peer\n-// group if vd.mount and mnt are shared. All the cloned mounts and new\n-// mountpoints in the tree have an extra reference taken.\n-//\n-// +checklocks:vfs.mountMu\n-// +checklocksalias:mnt.vfs.mountMu=vfs.mountMu\n-func (vfs *VirtualFilesystem) preparePropagationTree(mnt *Mount, vd VirtualDentry) map[*Mount]VirtualDentry {\n- tree := map[*Mount]VirtualDentry{}\n- if vd.mount.propType == Private {\n- return tree\n- }\n- if mnt.propType == Private {\n- vfs.setPropagation(mnt, Shared)\n- }\n- var newPeerGroup []*Mount\n- for peer := vd.mount.sharedList.Front(); peer != nil; peer = peer.sharedEntry.Next() {\n- if peer == vd.mount {\n- continue\n- }\n- peerVd := VirtualDentry{\n- mount: peer,\n- dentry: vd.dentry,\n- }\n- peerVd.IncRef()\n- clone := vfs.cloneMount(mnt, mnt.root, nil)\n- tree[clone] = peerVd\n- newPeerGroup = append(newPeerGroup, clone)\n- }\n- for _, newPeer := range newPeerGroup {\n- vfs.addPeer(mnt, newPeer)\n- }\n- return tree\n-}\n-\n-// commitPropagationTree attaches to mounts in tree to the mountpoints they\n-// are mapped to. If there is an error attaching a mount, the method panics.\n-//\n-// +checklocks:vfs.mountMu\n-func (vfs *VirtualFilesystem) commitPropagationTree(ctx context.Context, tree map[*Mount]VirtualDentry) {\n- // The peer mounts should have no way of being dead if we've reached this\n- // point so its safe to connect without checks.\n- vfs.mounts.seq.BeginWrite()\n- for mnt, vd := range tree {\n- vd.dentry.mu.Lock()\n- // If mnt isn't connected yet, skip connecting during propagation.\n- if mntns := vd.mount.ns; mntns != nil {\n- vfs.connectLocked(mnt, vd, mntns)\n- }\n- vd.dentry.mu.Unlock()\n- mnt.DecRef(ctx)\n- }\n- vfs.mounts.seq.EndWrite()\n-}\n-\n-// abortPropagationTree releases any references held by the mounts and\n-// mountpoints in the tree and removes the mounts from their peer groups.\n-//\n-// +checklocks:vfs.mountMu\n-func (vfs *VirtualFilesystem) abortPropagationTree(ctx context.Context, tree map[*Mount]VirtualDentry) {\n- for mnt, vd := range tree {\n- vd.DecRef(ctx)\n- vfs.setPropagation(mnt, Private)\n- mnt.DecRef(ctx)\n- }\n-}\n-\n// connectMountAtLocked attaches mnt at vd. It returns the new mountpoint of mnt\n// if no error occurred.\n//\n@@ -496,44 +339,6 @@ func (vfs *VirtualFilesystem) connectMountAt(ctx context.Context, mnt *Mount, vd\nreturn nil\n}\n-// SetMountPropagationAt changes the propagation type of the mount pointed to by\n-// pop.\n-func (vfs *VirtualFilesystem) SetMountPropagationAt(ctx context.Context, creds *auth.Credentials, pop *PathOperation, propType PropagationType) error {\n- vd, err := vfs.GetDentryAt(ctx, creds, pop, &GetDentryOptions{})\n- if err != nil {\n- return err\n- }\n- // See the similar defer in UmountAt for why this is in a closure.\n- defer func() {\n- vd.DecRef(ctx)\n- }()\n- if vd.dentry.isMounted() {\n- if realmnt := vfs.getMountAt(ctx, vd.mount, vd.dentry); realmnt != nil {\n- vd.mount.DecRef(ctx)\n- vd.mount = realmnt\n- }\n- } else if vd.dentry != vd.mount.root {\n- return linuxerr.EINVAL\n- }\n- vfs.SetMountPropagation(vd.mount, propType)\n- return nil\n-}\n-\n-// SetMountPropagation changes the propagation type of the mount.\n-func (vfs *VirtualFilesystem) SetMountPropagation(mnt *Mount, propType PropagationType) {\n- vfs.mountMu.Lock()\n- defer vfs.mountMu.Unlock()\n- if propType != mnt.propType {\n- switch propType {\n- case Shared, Private:\n- vfs.setPropagation(mnt, propType)\n- default:\n- panic(fmt.Sprintf(\"unsupported propagation type: %v\", propType))\n- }\n- }\n- mnt.propType = propType\n-}\n-\n// CloneMountAt returns a new mount with the same fs, specified root and\n// mount options. If mnt's propagation type is shared the new mount is\n// automatically made a peer of mnt. If mount options are nil, mnt's\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/vfs/propagation.go",
"diff": "+// Copyright 2022 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package vfs\n+\n+import (\n+ \"fmt\"\n+\n+ \"gvisor.dev/gvisor/pkg/abi/linux\"\n+ \"gvisor.dev/gvisor/pkg/context\"\n+ \"gvisor.dev/gvisor/pkg/errors/linuxerr\"\n+ \"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n+)\n+\n+// PropagationType is a propagation flavor as described in\n+// https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt. Child\n+// and Unbindable are currently unimplemented.\n+// TODO(b/249777195): Support MS_SLAVE and MS_UNBINDABLE propagation types.\n+type PropagationType int\n+\n+const (\n+ // Unknown represents an invalid/unknown propagation type.\n+ Unknown PropagationType = iota\n+ // Shared represents the shared propagation type.\n+ Shared\n+ // Private represents the private propagation type.\n+ Private\n+ // Child represents the child propagation type (MS_SLAVE).\n+ Child\n+ // Unbindable represents the unbindable propagation type.\n+ Unbindable\n+)\n+\n+// PropagationTypeFromLinux returns the PropagationType corresponding to a\n+// linux mount flag, aka MS_SHARED.\n+func PropagationTypeFromLinux(propFlag uint64) PropagationType {\n+ switch propFlag {\n+ case linux.MS_SHARED:\n+ return Shared\n+ case linux.MS_PRIVATE:\n+ return Private\n+ case linux.MS_SLAVE:\n+ return Child\n+ case linux.MS_UNBINDABLE:\n+ return Unbindable\n+ default:\n+ return Unknown\n+ }\n+}\n+\n+// setPropagation sets the propagation on mnt for a propagation type.\n+//\n+// +checklocks:vfs.mountMu\n+func (vfs *VirtualFilesystem) setPropagation(mnt *Mount, ptype PropagationType) error {\n+ switch ptype {\n+ case Shared:\n+ id, err := vfs.allocateGroupID()\n+ if err != nil {\n+ return err\n+ }\n+ mnt.groupID = id\n+ mnt.sharedList = &sharedList{}\n+ mnt.sharedList.PushBack(mnt)\n+ case Private:\n+ if mnt.propType == Shared {\n+ mnt.sharedList.Remove(mnt)\n+ if mnt.sharedList.Empty() {\n+ vfs.freeGroupID(mnt.groupID)\n+ }\n+ mnt.sharedList = nil\n+ mnt.groupID = 0\n+ }\n+ default:\n+ panic(fmt.Sprintf(\"unsupported propagation type: %v\", ptype))\n+ }\n+ mnt.propType = ptype\n+ return nil\n+}\n+\n+// addPeer adds oth to mnt's peer group. Both will have the same groupID\n+// and sharedList. vfs.mountMu must be locked.\n+//\n+// +checklocks:vfs.mountMu\n+func (vfs *VirtualFilesystem) addPeer(mnt *Mount, oth *Mount) {\n+ mnt.sharedList.PushBack(oth)\n+ oth.sharedList = mnt.sharedList\n+ oth.propType = mnt.propType\n+ oth.groupID = mnt.groupID\n+}\n+\n+// mergePeerGroup merges oth and all its peers into mnt's peer group. Oth\n+// must have propagation type shared and vfs.mountMu must be locked.\n+//\n+// +checklocks:vfs.mountMu\n+func (vfs *VirtualFilesystem) mergePeerGroup(mnt *Mount, oth *Mount) {\n+ peer := oth.sharedList.Front()\n+ for peer != nil {\n+ next := peer.sharedEntry.Next()\n+ vfs.setPropagation(peer, Private)\n+ vfs.addPeer(mnt, peer)\n+ peer = next\n+ }\n+}\n+\n+// preparePropagationTree returns a mapping of propagated mounts to their future\n+// mountpoints. The new mounts are clones of mnt and are added to mnt's peer\n+// group if vd.mount and mnt are shared. All the cloned mounts and new\n+// mountpoints in the tree have an extra reference taken.\n+//\n+// +checklocks:vfs.mountMu\n+// +checklocksalias:mnt.vfs.mountMu=vfs.mountMu\n+func (vfs *VirtualFilesystem) preparePropagationTree(mnt *Mount, vd VirtualDentry) map[*Mount]VirtualDentry {\n+ tree := map[*Mount]VirtualDentry{}\n+ if vd.mount.propType == Private {\n+ return tree\n+ }\n+ if mnt.propType == Private {\n+ vfs.setPropagation(mnt, Shared)\n+ }\n+ var newPeerGroup []*Mount\n+ for peer := vd.mount.sharedList.Front(); peer != nil; peer = peer.sharedEntry.Next() {\n+ if peer == vd.mount {\n+ continue\n+ }\n+ peerVd := VirtualDentry{\n+ mount: peer,\n+ dentry: vd.dentry,\n+ }\n+ peerVd.IncRef()\n+ clone := vfs.cloneMount(mnt, mnt.root, nil)\n+ tree[clone] = peerVd\n+ newPeerGroup = append(newPeerGroup, clone)\n+ }\n+ for _, newPeer := range newPeerGroup {\n+ vfs.addPeer(mnt, newPeer)\n+ }\n+ return tree\n+}\n+\n+// commitPropagationTree attaches to mounts in tree to the mountpoints they\n+// are mapped to. If there is an error attaching a mount, the method panics.\n+//\n+// +checklocks:vfs.mountMu\n+func (vfs *VirtualFilesystem) commitPropagationTree(ctx context.Context, tree map[*Mount]VirtualDentry) {\n+ // The peer mounts should have no way of being dead if we've reached this\n+ // point so its safe to connect without checks.\n+ vfs.mounts.seq.BeginWrite()\n+ for mnt, vd := range tree {\n+ vd.dentry.mu.Lock()\n+ // If mnt isn't connected yet, skip connecting during propagation.\n+ if mntns := vd.mount.ns; mntns != nil {\n+ vfs.connectLocked(mnt, vd, mntns)\n+ }\n+ vd.dentry.mu.Unlock()\n+ mnt.DecRef(ctx)\n+ }\n+ vfs.mounts.seq.EndWrite()\n+}\n+\n+// abortPropagationTree releases any references held by the mounts and\n+// mountpoints in the tree and removes the mounts from their peer groups.\n+//\n+// +checklocks:vfs.mountMu\n+func (vfs *VirtualFilesystem) abortPropagationTree(ctx context.Context, tree map[*Mount]VirtualDentry) {\n+ for mnt, vd := range tree {\n+ vd.DecRef(ctx)\n+ vfs.setPropagation(mnt, Private)\n+ mnt.DecRef(ctx)\n+ }\n+}\n+\n+// SetMountPropagationAt changes the propagation type of the mount pointed to by\n+// pop.\n+func (vfs *VirtualFilesystem) SetMountPropagationAt(ctx context.Context, creds *auth.Credentials, pop *PathOperation, propType PropagationType) error {\n+ vd, err := vfs.GetDentryAt(ctx, creds, pop, &GetDentryOptions{})\n+ if err != nil {\n+ return err\n+ }\n+ // See the similar defer in UmountAt for why this is in a closure.\n+ defer func() {\n+ vd.DecRef(ctx)\n+ }()\n+ if vd.dentry.isMounted() {\n+ if realmnt := vfs.getMountAt(ctx, vd.mount, vd.dentry); realmnt != nil {\n+ vd.mount.DecRef(ctx)\n+ vd.mount = realmnt\n+ }\n+ } else if vd.dentry != vd.mount.root {\n+ return linuxerr.EINVAL\n+ }\n+ vfs.SetMountPropagation(vd.mount, propType)\n+ return nil\n+}\n+\n+// SetMountPropagation changes the propagation type of the mount.\n+func (vfs *VirtualFilesystem) SetMountPropagation(mnt *Mount, propType PropagationType) {\n+ vfs.mountMu.Lock()\n+ defer vfs.mountMu.Unlock()\n+ if propType != mnt.propType {\n+ switch propType {\n+ case Shared, Private:\n+ vfs.setPropagation(mnt, propType)\n+ default:\n+ panic(fmt.Sprintf(\"unsupported propagation type: %v\", propType))\n+ }\n+ }\n+ mnt.propType = propType\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Separate propagation methods/constants into their own file.
mount.go is becoming bloated, we should break it into smaller files
where it makes sense.
PiperOrigin-RevId: 491955373 |
259,909 | 30.11.2022 13:54:09 | 28,800 | 1823b16fccf7df2b77af8c53ca0f631096484447 | Clean up DecRefs in mount methods.
connectMountAt consumes the references held by the VirtualDentries.
Reported-by: | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/mount.go",
"new_path": "pkg/sentry/vfs/mount.go",
"diff": "@@ -275,6 +275,7 @@ func (vfs *VirtualFilesystem) ConnectMountAt(ctx context.Context, creds *auth.Cr\ndefer cleanup.Clean()\n// Check if the new mount + all the propagation mounts puts us over the max.\nif uint32(len(tree)+1)+vd.mount.ns.mounts > MountMax {\n+ vd.DecRef(ctx)\nreturn linuxerr.ENOSPC\n}\nif err := vfs.connectMountAt(ctx, mnt, vd); err != nil {\n@@ -399,10 +400,10 @@ func (vfs *VirtualFilesystem) BindAt(ctx context.Context, creds *auth.Credential\n// Checklocks doesn't work with anon functions.\nvfs.setPropagation(clone, Private) // +checklocksforce\nvfs.abortPropagationTree(ctx, tree) // +checklocksforce\n- targetVd.DecRef(ctx)\n})\ndefer cleanup.Clean()\nif uint32(1+len(tree))+targetVd.mount.ns.mounts > MountMax {\n+ targetVd.DecRef(ctx)\nreturn nil, linuxerr.ENOSPC\n}\nif err := vfs.connectMountAt(ctx, clone, targetVd); err != nil {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Clean up DecRefs in mount methods.
connectMountAt consumes the references held by the VirtualDentries.
Reported-by: syzbot+b415395b0b7186903700@syzkaller.appspotmail.com
PiperOrigin-RevId: 492009947 |
259,868 | 30.11.2022 13:59:00 | 28,800 | ba73788f2ce73fcb79376a7795f20d03a3829bca | gvisor: Explicitly register C++ toolchains for cross-compilation.
Bazel >= 5 seems to no longer implicitly recognize coral_crosstool's
C++ toolchains when looking through the possible set of toolchains to use for
compiling C++ (`@bazel_tools//tools/cpp:toolchain_type`).
This change explicitly registers these toolchains within `WORKSPACE`. | [
{
"change_type": "MODIFY",
"old_path": "BUILD",
"new_path": "BUILD",
"diff": "@@ -145,6 +145,28 @@ go_path(\n],\n)\n+# CC toolchain targets for cross-compilation.\n+# Required to be explicitly specified in bazel >= 5.\n+toolchain(\n+ name = \"cc_toolchain_k8\",\n+ target_compatible_with = [\n+ \"@platforms//os:linux\",\n+ \"@platforms//cpu:x86_64\",\n+ ],\n+ toolchain = \"@crosstool//:cc-compiler-k8\",\n+ toolchain_type = \"@bazel_tools//tools/cpp:toolchain_type\",\n+)\n+\n+toolchain(\n+ name = \"cc_toolchain_aarch64\",\n+ target_compatible_with = [\n+ \"@platforms//os:linux\",\n+ \"@platforms//cpu:aarch64\",\n+ ],\n+ toolchain = \"@crosstool//:cc-compiler-aarch64\",\n+ toolchain_type = \"@bazel_tools//tools/cpp:toolchain_type\",\n+)\n+\n# gazelle is a set of build tools.\n#\n# To update the WORKSPACE from go.mod, use:\n"
},
{
"change_type": "MODIFY",
"old_path": "WORKSPACE",
"new_path": "WORKSPACE",
"diff": "@@ -137,6 +137,7 @@ http_archive(\nload(\"@coral_crosstool//:configure.bzl\", \"cc_crosstool\")\ncc_crosstool(name = \"crosstool\")\n+register_toolchains(\"//:cc_toolchain_k8\", \"//:cc_toolchain_aarch64\")\n# Load protobuf dependencies.\nhttp_archive(\n"
}
] | Go | Apache License 2.0 | google/gvisor | gvisor: Explicitly register C++ toolchains for cross-compilation.
Bazel >= 5 seems to no longer implicitly recognize coral_crosstool's
C++ toolchains when looking through the possible set of toolchains to use for
compiling C++ (`@bazel_tools//tools/cpp:toolchain_type`).
This change explicitly registers these toolchains within `WORKSPACE`.
PiperOrigin-RevId: 492011138 |
259,992 | 30.11.2022 15:14:37 | 28,800 | 4f2479ae58f18c25c83c58d7a83d5203cdd37142 | Add K8s to Falco+gVisor tutorial | [
{
"change_type": "MODIFY",
"old_path": "g3doc/user_guide/tutorials/falco.md",
"new_path": "g3doc/user_guide/tutorials/falco.md",
"diff": "## Installation\n+This section explains the steps required to install Falco+gVisor integration\n+depending your environment.\n+\n+### Docker\n+\nFirst, install [gVisor](/docs/user_guide/install/) and\n[Falco](https://falco.org/docs/getting-started/installation/) on the machine.\nRun `runsc --version` and check that `runsc version release-20220704.0` or newer\nis reported. Run `falco --version` and check that `Falco version` reports\n-`0.32.1` or higher.\n+`0.33.1` or higher.\nOnce both are installed, you can configure gVisor to connect to Falco whenever a\nnew sandbox is started. The first command below generates a configuration file\n@@ -19,27 +24,10 @@ Docker runtime pointing it to the configuration file we just generated:\n```shell\nfalco --gvisor-generate-config | sudo tee /etc/falco/pod-init.json\n-# Edit /etc/falco/pod-init.json, see note below.\nsudo runsc install --runtime=runsc-falco -- --pod-init-config=/etc/falco/pod-init.json\nsudo systemctl restart docker\n```\n-> **Note:** Between steps 1 and 2 above, edit the `pod-init.json` file to add\n-> `ignore_setup_error` to the sink options (this will be fixed in the next Falco\n-> release). The file will look like this:\n-\n-```json\n- \"sinks\" : [\n- {\n- \"config\" : {\n- \"endpoint\" : \"/tmp/gvisor.sock\"\n- },\n- \"name\" : \"remote\",\n- \"ignore_setup_error\": true <== ADD THIS LINE\n- }\n- ]\n-```\n-\ngVisor is now configured. Next, let's start Falco and tell it to enable gVisor\nmonitoring. You should use the same command line that you normally use to start\nFalco with these additional flags:\n@@ -68,6 +56,29 @@ command above, the configuration files are defined in\n`/etc/falco/faco_rules.yaml` and `/etc/falco/faco_rules.local.yaml` (where you\ncan add your own rules).\n+### Kubernetes\n+\n+If you are using Kubernetes, the steps above must be done on every node that has\n+gVisor enabled. Luckily, this can be done for you automatically using\n+[Falco's Helm chart](https://github.com/falcosecurity/charts/blob/master/falco/README.md).\n+You can find more details, like available options, in the\n+[*About gVisor*](https://github.com/falcosecurity/charts/blob/master/falco/README.md#about-gvisor)\n+section.\n+\n+Here is a quick example using\n+[GKE Sandbox](https://cloud.google.com/kubernetes-engine/docs/concepts/sandbox-pods),\n+which already pre-configures gVisor for you. You can use any version that is\n+equal or higher than 1.24.4-gke.1800:\n+\n+```shell\n+gcloud container clusters create my-cluster --release-channel=rapid --cluster-version=1.25\n+gcloud container node-pools create gvisor --sandbox=type=gvisor --cluster=my-cluster\n+gcloud container clusters get-credentials my-cluster\n+helm install falco-gvisor falcosecurity/falco \\\n+ -f https://raw.githubusercontent.com/falcosecurity/charts/master/falco/values-gvisor-gke.yaml \\\n+ --namespace falco-gvisor --create-namespace\n+```\n+\n## Triggering Falco Events\nLet's run something interesting inside a container to see a few rules trigger in\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add K8s to Falco+gVisor tutorial
PiperOrigin-RevId: 492030619 |
259,853 | 01.12.2022 12:53:29 | 28,800 | f59f942d4ff6efa22e6c7ffa7fea07d6ec43ab4c | buildkite: install docker runtimes only when it is required | [
{
"change_type": "MODIFY",
"old_path": ".buildkite/hooks/post-command",
"new_path": ".buildkite/hooks/post-command",
"diff": "@@ -80,6 +80,7 @@ done\nset -euo pipefail\n+if [[ \"${BUILDKITE_PIPELINE_INSTALL_RUNTIME:-}\" == \"true\" ]]; then\n# Remove all Docker runtimes that may be installed.\nnum_docker_runtimes=\"$(jq \\\n'(if has(\"runtimes\") then .runtimes else {} end) | length' \\\n@@ -90,6 +91,7 @@ if [[ \"$num_docker_runtimes\" -gt 0 ]]; then\nsudo mv /etc/docker/daemon.json.tmp /etc/docker/daemon.json\nbash -c \"$DOCKER_RELOAD_COMMAND\"\nfi\n+fi\n# Cleanup temporary directory where STAGED_BINARIES may have been extracted.\nif [[ -n \"${BUILDKITE_STAGED_BINARIES_DIRECTORY:-}\" ]]; then\n"
},
{
"change_type": "MODIFY",
"old_path": ".buildkite/hooks/pre-command",
"new_path": ".buildkite/hooks/pre-command",
"diff": "@@ -47,6 +47,7 @@ else\nexport DOCKER_RELOAD_COMMAND='sudo systemctl reload docker'\nfi\n+if [[ \"${BUILDKITE_PIPELINE_INSTALL_RUNTIME:-}\" == \"true\" ]]; then\n# Ensure Docker has experimental enabled, install runtimes.\nHAD_EXPERIMENTAL=\"$(docker version --format='{{.Server.Experimental}}')\"\nif [[ -n \"${STAGED_BINARIES:-}\" ]]; then\n@@ -72,6 +73,7 @@ else\n# just a SIGHUP.\nbash -c \"$DOCKER_RELOAD_COMMAND\"\nfi\n+fi\n# Helper for benchmarks, based on the branch.\nif test \"${BUILDKITE_BRANCH}\" = \"master\"; then\n"
},
{
"change_type": "MODIFY",
"old_path": ".buildkite/pipeline.yaml",
"new_path": ".buildkite/pipeline.yaml",
"diff": "@@ -12,6 +12,9 @@ _templates:\nplatform_specific_agents: &platform_specific_agents {}\nkvm_agents: &kvm_agents {kvm: \"true\"}\nubuntu_agents: &ubuntu_agents {os: \"ubuntu\"}\n+ docker: &docker\n+ env:\n+ BUILDKITE_PIPELINE_INSTALL_RUNTIME: true\nbenchmarks: &benchmarks\ntimeout_in_minutes: 120\nretry:\n@@ -25,6 +28,7 @@ _templates:\nBENCHMARKS_PROJECT: gvisor-benchmarks\nBENCHMARKS_TABLE: benchmarks\nBENCHMARKS_UPLOAD: true\n+ BUILDKITE_PIPELINE_INSTALL_RUNTIME: true\nagents:\n<<: *kvm_agents\n<<: *platform_specific_agents\n@@ -194,6 +198,7 @@ steps:\narch: \"arm64\"\n- <<: *common\n<<: *source_test\n+ <<: *docker\nlabel: \":test_tube: Container tests (cgroupv1)\"\ncommand: make container-tests\nagents:\n@@ -201,6 +206,7 @@ steps:\ncgroup: \"v1\"\narch: \"amd64\"\n- <<: *common\n+ <<: *docker\n# This variant is not really a source test, but we annotate it as such to\n# avoid running binary-only tests for all variants of cgroups. It is\n# sufficient to run cgroupv2 variants only for source changes.\n@@ -231,6 +237,7 @@ steps:\n# Integration tests.\n- <<: *common\n+ <<: *docker\n<<: *source_test\nlabel: \":docker: Docker tests (cgroupv1)\"\ncommand: make docker-tests\n@@ -239,6 +246,7 @@ steps:\narch: \"amd64\"\ncgroup: \"v1\"\n- <<: *common\n+ <<: *docker\n# See above: not truly a source test.\n<<: *source_test\nlabel: \":docker: Docker tests (cgroupv2)\"\n@@ -248,6 +256,7 @@ steps:\narch: \"amd64\"\ncgroup: \"v2\"\n- <<: *common\n+ <<: *docker\nlabel: \":goggles: Overlay tests\"\ncommand: make overlay-tests\nagents:\n@@ -255,6 +264,7 @@ steps:\n<<: *ubuntu_agents\narch: \"amd64\"\n- <<: *common\n+ <<: *docker\nlabel: \":safety_pin: Host network tests\"\ncommand: make hostnet-tests\nagents:\n@@ -262,6 +272,7 @@ steps:\n<<: *ubuntu_agents\narch: \"amd64\"\n- <<: *common\n+ <<: *docker\nlabel: \":satellite: gVisor GSO tests\"\ncommand: make swgso-tests\nagents:\n@@ -275,6 +286,7 @@ steps:\nagents:\narch: \"amd64\"\n- <<: *common\n+ <<: *docker\n<<: *source_test\nlabel: \":person_in_lotus_position: KVM tests\"\ncommand: make kvm-tests\n@@ -282,6 +294,7 @@ steps:\n<<: *kvm_agents\narch: \"amd64\"\n- <<: *common\n+ <<: *docker\nlabel: \":weight_lifter: Fsstress test\"\ncommand: make fsstress-test\nagents:\n@@ -289,6 +302,7 @@ steps:\n<<: *ubuntu_agents\narch: \"amd64\"\n- <<: *common\n+ <<: *docker\nlabel: \":docker: Containerd 1.3.9 tests (cgroupv1)\"\ncommand: make containerd-test-1.3.9\nagents:\n@@ -297,6 +311,7 @@ steps:\ncgroup: \"v1\"\narch: \"amd64\"\n- <<: *common\n+ <<: *docker\nlabel: \":docker: Containerd 1.4.3 tests (cgroupv1)\"\ncommand: make containerd-test-1.4.3\nagents:\n@@ -304,6 +319,7 @@ steps:\n<<: *ubuntu_agents\ncgroup: \"v1\"\n- <<: *common\n+ <<: *docker\n# See above: not truly a source test.\n<<: *source_test\nlabel: \":docker: Containerd 1.4.3 tests (cgroupv2)\"\n@@ -313,6 +329,7 @@ steps:\n<<: *ubuntu_agents\ncgroup: \"v2\"\n- <<: *common\n+ <<: *docker\nlabel: \":docker: Containerd 1.5.11 tests (cgroupv1)\"\ncommand: make containerd-test-1.5.11\nagents:\n@@ -320,6 +337,7 @@ steps:\n<<: *ubuntu_agents\ncgroup: \"v1\"\n- <<: *common\n+ <<: *docker\n# See above: not truly a source test.\n<<: *source_test\nlabel: \":docker: Containerd 1.5.11 tests (cgroupv2)\"\n@@ -328,6 +346,7 @@ steps:\n<<: *ubuntu_agents\ncgroup: \"v2\"\n- <<: *common\n+ <<: *docker\nlabel: \":docker: Containerd 1.6.2 tests (cgroupv1)\"\ncommand: make containerd-test-1.6.2\nagents:\n@@ -335,6 +354,7 @@ steps:\n<<: *ubuntu_agents\ncgroup: \"v1\"\n- <<: *common\n+ <<: *docker\n<<: *source_test\nlabel: \":docker: Containerd 1.6.2 tests (cgroupv2)\"\ncommand: make containerd-test-1.6.2\n@@ -379,6 +399,7 @@ steps:\n# Runtime tests.\n- <<: *common\n+ <<: *docker\nlabel: \":php: PHP runtime tests\"\ncommand: make RUNTIME_LOG_DIR=/tmp/$${BUILDKITE_JOB_ID} php8.1.1-runtime-tests\nparallelism: 10\n@@ -387,6 +408,7 @@ steps:\n<<: *ubuntu_agents\narch: \"amd64\"\n- <<: *common\n+ <<: *docker\nlabel: \":java: Java runtime tests\"\ncommand: make RUNTIME_LOG_DIR=/tmp/$${BUILDKITE_JOB_ID} java17-runtime-tests\nparallelism: 40\n@@ -395,6 +417,7 @@ steps:\n<<: *ubuntu_agents\narch: \"amd64\"\n- <<: *common\n+ <<: *docker\nlabel: \":golang: Go runtime tests\"\ncommand: make RUNTIME_LOG_DIR=/tmp/$${BUILDKITE_JOB_ID} go1.16-runtime-tests\nparallelism: 10\n@@ -403,6 +426,7 @@ steps:\n<<: *ubuntu_agents\narch: \"amd64\"\n- <<: *common\n+ <<: *docker\nlabel: \":node: NodeJS runtime tests\"\ncommand: make RUNTIME_LOG_DIR=/tmp/$${BUILDKITE_JOB_ID} nodejs16.13.2-runtime-tests\nparallelism: 10\n@@ -411,6 +435,7 @@ steps:\n<<: *ubuntu_agents\narch: \"amd64\"\n- <<: *common\n+ <<: *docker\nlabel: \":python: Python runtime tests\"\ncommand: make RUNTIME_LOG_DIR=/tmp/$${BUILDKITE_JOB_ID} python3.10.2-runtime-tests\nparallelism: 10\n@@ -421,6 +446,7 @@ steps:\n# Run basic benchmarks smoke tests (no upload).\n- <<: *common\n+ <<: *docker\nlabel: \":fire: Benchmarks smoke test\"\ncommand: make benchmark-platforms BENCHMARKS_TARGETS=test/benchmarks/base:startup_test BENCHMARKS_FILTER=BenchmarkStartupEmpty BENCHMARKS_OPTIONS=-test.benchtime=1ns\n# Use the opposite of the benchmarks filter.\n"
}
] | Go | Apache License 2.0 | google/gvisor | buildkite: install docker runtimes only when it is required
PiperOrigin-RevId: 492275948 |
259,907 | 01.12.2022 13:59:27 | 28,800 | 9c444c44e012d600b167f5ce77cf8e39f4082aec | Cleanup remaining comments for
Fixes | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/lock/lock.go",
"new_path": "pkg/sentry/fsimpl/lock/lock.go",
"diff": "@@ -164,14 +164,6 @@ func (l *Locks) LockRegion(ctx context.Context, uid UniqueID, ownerPID int32, t\n}\n}\n-// LockRegionVFS1 is a wrapper around LockRegion for VFS1, which does not implement\n-// F_GETLK (and does not care about storing PIDs as a result).\n-//\n-// TODO(gvisor.dev/issue/1624): Delete.\n-func (l *Locks) LockRegionVFS1(ctx context.Context, uid UniqueID, t LockType, r LockRange, block bool) error {\n- return l.LockRegion(ctx, uid, 0 /* ownerPID */, t, r, block)\n-}\n-\n// Readiness always returns zero.\nfunc (l *Locks) Readiness(waiter.EventMask) waiter.EventMask {\nreturn 0\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/points.go",
"new_path": "pkg/sentry/syscalls/linux/points.go",
"diff": "@@ -691,7 +691,7 @@ func signalfdHelper(t *kernel.Task, fields seccheck.FieldSet, cxtData *pb.Contex\n}\nsigset := info.Args[1].Pointer()\nsigsetsize := info.Args[2].SizeT()\n- mask, err := CopyInSigSet(t, sigset, sigsetsize)\n+ mask, err := copyInSigSet(t, sigset, sigsetsize)\nif err == nil { // if NO error\np.Sigset = uint64(mask)\np.Sigset = uint64(mask)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sigset.go",
"new_path": "pkg/sentry/syscalls/linux/sigset.go",
"diff": "@@ -21,13 +21,9 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n)\n-// CopyInSigSet copies in a sigset_t, checks its size, and ensures that KILL and\n+// copyInSigSet copies in a sigset_t, checks its size, and ensures that KILL and\n// STOP are clear.\n-//\n-// TODO(gvisor.dev/issue/1624): This is only exported because\n-// syscalls/vfs2/signal.go depends on it. Once vfs1 is deleted and the vfs2\n-// syscalls are moved into this package, then they can be unexported.\n-func CopyInSigSet(t *kernel.Task, sigSetAddr hostarch.Addr, size uint) (linux.SignalSet, error) {\n+func copyInSigSet(t *kernel.Task, sigSetAddr hostarch.Addr, size uint) (linux.SignalSet, error) {\nif size != linux.SignalSetSize {\nreturn 0, linuxerr.EINVAL\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_signal.go",
"new_path": "pkg/sentry/syscalls/linux/sys_signal.go",
"diff": "@@ -295,7 +295,7 @@ func RtSigprocmask(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel\n}\noldmask := t.SignalMask()\nif setaddr != 0 {\n- mask, err := CopyInSigSet(t, setaddr, sigsetsize)\n+ mask, err := copyInSigSet(t, setaddr, sigsetsize)\nif err != nil {\nreturn 0, nil, err\n}\n@@ -365,7 +365,7 @@ func RtSigtimedwait(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kerne\ntimespec := args[2].Pointer()\nsigsetsize := args[3].SizeT()\n- mask, err := CopyInSigSet(t, sigset, sigsetsize)\n+ mask, err := copyInSigSet(t, sigset, sigsetsize)\nif err != nil {\nreturn 0, nil, err\n}\n@@ -517,7 +517,7 @@ func RestartSyscall(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kerne\n// sharedSignalfd is shared between the two calls.\nfunc sharedSignalfd(t *kernel.Task, fd int32, sigset hostarch.Addr, sigsetsize uint, flags int32) (uintptr, *kernel.SyscallControl, error) {\n// Copy in the signal mask.\n- mask, err := CopyInSigSet(t, sigset, sigsetsize)\n+ mask, err := copyInSigSet(t, sigset, sigsetsize)\nif err != nil {\nreturn 0, nil, err\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/runtimes/runner/lib/lib.go",
"new_path": "test/runtimes/runner/lib/lib.go",
"diff": "@@ -65,9 +65,6 @@ type Filter func(test string) bool\n// defered functions before exiting. It returns an exit code that should be\n// passed to os.Exit.\nfunc RunTests(lang, image string, filter Filter, batchSize int, timeout time.Duration, proctorSettings ProctorSettings) int {\n- // TODO(gvisor.dev/issue/1624): Remove those tests from all exclude lists\n- // that only fail with VFS1.\n-\n// Construct the shared docker instance.\nctx := context.Background()\nd := dockerutil.MakeContainer(ctx, testutil.DefaultLogger(lang))\n"
}
] | Go | Apache License 2.0 | google/gvisor | Cleanup remaining comments for #1624.
Fixes #1624
PiperOrigin-RevId: 492292937 |
259,982 | 01.12.2022 14:17:12 | 28,800 | 4e11daccf0be7328128e2c97b6cc77cfdd989020 | Adding more trace point integration tests for the following syscalls:
- signalfd
- signalfd4
- fcntl
- pipe
- pipe2
- timerfd_create
- timerfd_settime
- timerfd_gettime
- fork
- vfork
- inotify_init
- inotify_init1
- inotify_add_watch
- inotify_rm_watch
- clone
Updates | [
{
"change_type": "MODIFY",
"old_path": "test/trace/BUILD",
"new_path": "test/trace/BUILD",
"diff": "@@ -4,8 +4,11 @@ package(licenses = [\"notice\"])\ngo_test(\nname = \"trace_test\",\n- size = \"small\",\n- srcs = [\"trace_test.go\"],\n+ srcs = [\n+ \"trace_amd64_test.go\",\n+ \"trace_arm64_test.go\",\n+ \"trace_test.go\",\n+ ],\ndata = [\n\"//runsc\",\n\"//test/trace/workload\",\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "test/trace/trace_amd64_test.go",
"diff": "+// Copyright 2022 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+//go:build amd64\n+// +build amd64\n+\n+package trace\n+\n+import (\n+ \"fmt\"\n+ \"testing\"\n+\n+ \"golang.org/x/sys/unix\"\n+ \"google.golang.org/protobuf/proto\"\n+ pb \"gvisor.dev/gvisor/pkg/sentry/seccheck/points/points_go_proto\"\n+ \"gvisor.dev/gvisor/pkg/sentry/seccheck/sinks/remote/test\"\n+)\n+\n+func extraMatchers(t *testing.T, msgs []test.Message, matchers map[pb.MessageType]*checkers) {\n+ // Register functions that verify each available point specific to amd64 architecture.\n+ matchers[pb.MessageType_MESSAGE_SYSCALL_FORK] = &checkers{checker: checkSyscallFork}\n+}\n+\n+func checkSyscallSignalfdFlags(flags int32) error {\n+ if flags != 0 && flags != (unix.SFD_CLOEXEC|unix.SFD_NONBLOCK) {\n+ return fmt.Errorf(\"invalid flag got: %v\", flags)\n+ }\n+ return nil\n+}\n+\n+func checkSyscallFork(msg test.Message) error {\n+ p := pb.Fork{}\n+ if err := proto.Unmarshal(msg.Msg, &p); err != nil {\n+ return err\n+ }\n+ if err := checkContextData(p.ContextData); err != nil {\n+ return err\n+ }\n+ return nil\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "test/trace/trace_arm64_test.go",
"diff": "+// Copyright 2022 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+//go:build arm64\n+// +build arm64\n+\n+package trace\n+\n+import (\n+ \"fmt\"\n+ \"testing\"\n+\n+ \"golang.org/x/sys/unix\"\n+ pb \"gvisor.dev/gvisor/pkg/sentry/seccheck/points/points_go_proto\"\n+ \"gvisor.dev/gvisor/pkg/sentry/seccheck/sinks/remote/test\"\n+)\n+\n+func extraMatchers(t *testing.T, msgs []test.Message, matchers map[pb.MessageType]*checkers) {\n+ // Register functions that verify each available point specific to arm64 architecture.\n+ return\n+}\n+\n+func checkSyscallSignalfdFlags(flags int32) error {\n+ if flags != (unix.SFD_CLOEXEC | unix.SFD_NONBLOCK) {\n+ return fmt.Errorf(\"invalid flag got: %v\", flags)\n+ }\n+ return nil\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/trace/trace_test.go",
"new_path": "test/trace/trace_test.go",
"diff": "@@ -34,6 +34,11 @@ import (\nvar cutoffTime time.Time\n+type checkers struct {\n+ checker func(test.Message) error\n+ count int\n+}\n+\n// TestAll enabled all trace points in the system with all optional and context\n// fields enabled. Then it runs a workload that will trigger those points and\n// run some basic validation over the points generated.\n@@ -87,15 +92,15 @@ func TestAll(t *testing.T) {\n// Wait until the sandbox disconnects to ensure all points were gathered.\nserver.WaitForNoClients()\n- matchPoints(t, server.GetPoints())\n+\n+ matchers := matchPoints(t, server.GetPoints())\n+ extraMatchers(t, server.GetPoints(), matchers)\n+ validatePoints(t, server.GetPoints(), matchers)\n}\n-func matchPoints(t *testing.T, msgs []test.Message) {\n+func matchPoints(t *testing.T, msgs []test.Message) map[pb.MessageType]*checkers {\n// Register functions that verify each available point.\n- matchers := map[pb.MessageType]*struct {\n- checker func(test.Message) error\n- count int\n- }{\n+ matchers := map[pb.MessageType]*checkers{\npb.MessageType_MESSAGE_CONTAINER_START: {checker: checkContainerStart},\npb.MessageType_MESSAGE_SENTRY_CLONE: {checker: checkSentryClone},\npb.MessageType_MESSAGE_SENTRY_EXEC: {checker: checkSentryExec},\n@@ -116,13 +121,23 @@ func matchPoints(t *testing.T, msgs []test.Message) {\npb.MessageType_MESSAGE_SYSCALL_DUP: {checker: checkSyscallDup},\npb.MessageType_MESSAGE_SYSCALL_PRLIMIT64: {checker: checkSyscallPrlimit64},\npb.MessageType_MESSAGE_SYSCALL_EVENTFD: {checker: checkSyscallEventfd},\n+ pb.MessageType_MESSAGE_SYSCALL_SIGNALFD: {checker: checkSyscallSignalfd},\npb.MessageType_MESSAGE_SYSCALL_BIND: {checker: checkSyscallBind},\npb.MessageType_MESSAGE_SYSCALL_ACCEPT: {checker: checkSyscallAccept},\n-\n- // TODO(gvisor.dev/issue/4805): Add validation for these messages.\n- pb.MessageType_MESSAGE_SYSCALL_CLONE: {checker: checkTODO},\n- pb.MessageType_MESSAGE_SYSCALL_PIPE: {checker: checkTODO},\n+ pb.MessageType_MESSAGE_SYSCALL_FCNTL: {checker: checkSyscallFcntl},\n+ pb.MessageType_MESSAGE_SYSCALL_PIPE: {checker: checkSyscallPipe},\n+ pb.MessageType_MESSAGE_SYSCALL_TIMERFD_CREATE: {checker: checkSyscallTimerfdCreate},\n+ pb.MessageType_MESSAGE_SYSCALL_TIMERFD_SETTIME: {checker: checkSyscallTimerfdSettime},\n+ pb.MessageType_MESSAGE_SYSCALL_TIMERFD_GETTIME: {checker: checkSyscallTimerfdGettime},\n+ pb.MessageType_MESSAGE_SYSCALL_INOTIFY_INIT: {checker: checkSyscallInotifyInit},\n+ pb.MessageType_MESSAGE_SYSCALL_INOTIFY_ADD_WATCH: {checker: checkSyscallInotifyInitAddWatch},\n+ pb.MessageType_MESSAGE_SYSCALL_INOTIFY_RM_WATCH: {checker: checkSyscallInotifyInitRmWatch},\n+ pb.MessageType_MESSAGE_SYSCALL_CLONE: {checker: checkSyscallClone},\n+ }\n+ return matchers\n}\n+\n+func validatePoints(t *testing.T, msgs []test.Message, matchers map[pb.MessageType]*checkers) {\nfor _, msg := range msgs {\nt.Logf(\"Processing message type %v\", msg.MsgType)\nif handler := matchers[msg.MsgType]; handler == nil {\n@@ -147,7 +162,7 @@ func matchPoints(t *testing.T, msgs []test.Message) {\nfunc checkTimeNs(ns int64) error {\nif ns <= int64(cutoffTime.Nanosecond()) {\n- return fmt.Errorf(\"time should not be less than %d (%v), got: %d (%v)\", cutoffTime.Nanosecond(), cutoffTime, ns, time.Unix(0, ns))\n+ return fmt.Errorf(\"time: got: %d (%v), should not be less than %d (%v)\", ns, time.Unix(0, ns), cutoffTime.Nanosecond(), cutoffTime)\n}\nreturn nil\n}\n@@ -512,13 +527,13 @@ func checkSyscallSetresid(msg test.Message) error {\nreturn err\n}\nif p.GetRid() != 0 {\n- return fmt.Errorf(\" Invalid RID: %d\", p.Rid)\n+ return fmt.Errorf(\"invalid rid: %d\", p.Rid)\n}\nif p.GetEid() != 0 {\n- return fmt.Errorf(\" Invalid EID: %d\", p.Eid)\n+ return fmt.Errorf(\"invalid eid: %d\", p.Eid)\n}\nif p.GetSid() != 0 {\n- return fmt.Errorf(\" Invalid SID: %d\", p.Sid)\n+ return fmt.Errorf(\"invalid sid: %d\", p.Sid)\n}\nreturn nil\n@@ -551,10 +566,10 @@ func checkSyscallDup(msg test.Message) error {\nreturn err\n}\nif p.OldFd < 0 {\n- return fmt.Errorf(\"invalid FD: %d\", p.OldFd)\n+ return fmt.Errorf(\"invalid fd: %d\", p.OldFd)\n}\nif p.NewFd < 0 {\n- return fmt.Errorf(\"invalid FD: %d\", p.NewFd)\n+ return fmt.Errorf(\"invalid fd: %d\", p.NewFd)\n}\nif p.Flags != unix.O_CLOEXEC && p.Flags != 0 {\nreturn fmt.Errorf(\"invalid flag got: %v\", p.Flags)\n@@ -572,7 +587,7 @@ func checkSyscallPrlimit64(msg test.Message) error {\nreturn err\n}\nif p.Pid < 0 {\n- return fmt.Errorf(\"invalid PID: %d\", p.Pid)\n+ return fmt.Errorf(\"invalid pid: %d\", p.Pid)\n}\nreturn nil\n}\n@@ -586,10 +601,10 @@ func checkSyscallEventfd(msg test.Message) error {\nreturn err\n}\nif p.Val < 0 {\n- return fmt.Errorf(\"invalid PID: %d\", p.Val)\n+ return fmt.Errorf(\"invalid pid: %d\", p.Val)\n}\nif p.Flags != unix.EFD_NONBLOCK && p.Flags != 0 {\n- return fmt.Errorf(\"invalid Flag got: %d, \", p.Flags)\n+ return fmt.Errorf(\"invalid flag got: %d, \", p.Flags)\n}\nreturn nil\n@@ -604,10 +619,10 @@ func checkSyscallBind(msg test.Message) error {\nreturn err\n}\nif p.Fd < 0 {\n- return fmt.Errorf(\"invalid FD: %d\", p.Fd)\n+ return fmt.Errorf(\"invalid fd: %d\", p.Fd)\n}\nif p.FdPath == \" \" {\n- return fmt.Errorf(\"invalid Path: %v\", p.FdPath)\n+ return fmt.Errorf(\"invalid path: %v\", p.FdPath)\n}\nif len(p.Address) == 0 {\nreturn fmt.Errorf(\"invalid address: %d\", p.Address)\n@@ -624,10 +639,10 @@ func checkSyscallAccept(msg test.Message) error {\nreturn err\n}\nif p.Fd < 0 {\n- return fmt.Errorf(\"invalid FD: %d\", p.Fd)\n+ return fmt.Errorf(\"invalid fd: %d\", p.Fd)\n}\nif p.FdPath == \"\" {\n- return fmt.Errorf(\"invalid Path: %v\", p.FdPath)\n+ return fmt.Errorf(\"invalid path: %v\", p.FdPath)\n}\nif len(p.Address) != 0 {\nreturn fmt.Errorf(\"invalid address: %d, %v\", p.Address, p.Sysno)\n@@ -647,12 +662,203 @@ func checkSyscallChroot(msg test.Message) error {\nreturn err\n}\nif want := \"trace_test.abc\"; !strings.Contains(p.Pathname, want) {\n- return fmt.Errorf(\"wrong Pathname, want: %q, got: %q\", want, p.Pathname)\n+ return fmt.Errorf(\"wrong pathname, want: %q, got: %q\", want, p.Pathname)\n}\nreturn nil\n}\n-func checkTODO(_ test.Message) error {\n+func checkSyscallFcntl(msg test.Message) error {\n+ p := pb.Fcntl{}\n+ if err := proto.Unmarshal(msg.Msg, &p); err != nil {\n+ return err\n+ }\n+ if err := checkContextData(p.ContextData); err != nil {\n+ return err\n+ }\n+ if p.Fd < 0 {\n+ return fmt.Errorf(\"invalid fd: %d\", p.Fd)\n+ }\n+ if p.Cmd != unix.F_GETFL {\n+ return fmt.Errorf(\"invalid cmd: got: %v, want: F_GETFL\", p.Cmd)\n+ }\n+ return nil\n+}\n+\n+func checkSyscallPipe(msg test.Message) error {\n+ p := pb.Pipe{}\n+ if err := proto.Unmarshal(msg.Msg, &p); err != nil {\n+ return err\n+ }\n+ if err := checkContextData(p.ContextData); err != nil {\n+ return err\n+ }\n+ if p.Reader < 0 {\n+ return fmt.Errorf(\"invalid reader fd: %d\", p.Reader)\n+ }\n+ if p.Writer < 0 {\n+ return fmt.Errorf(\"invalid writer fd: %d\", p.Writer)\n+ }\n+ if p.Flags != unix.O_CLOEXEC && p.Flags != 0 {\n+ return fmt.Errorf(\"invalid flag got: %v\", p.Flags)\n+ }\n+ return nil\n+}\n+\n+func checkSyscallSignalfd(msg test.Message) error {\n+ p := pb.Signalfd{}\n+ if err := proto.Unmarshal(msg.Msg, &p); err != nil {\n+ return err\n+ }\n+ if err := checkContextData(p.ContextData); err != nil {\n+ return err\n+ }\n+ if p.Fd != -1 {\n+ return fmt.Errorf(\"invalid fd: %d\", p.Fd)\n+ }\n+ if p.Sigset != 0 && p.Sigset != uint64(unix.SIGILL) {\n+ return fmt.Errorf(\"invalid signal got: %v\", p.Sigset)\n+ }\n+ return checkSyscallSignalfdFlags(p.Flags)\n+}\n+\n+func checkSyscallTimerfdCreate(msg test.Message) error {\n+ p := pb.TimerfdCreate{}\n+ if err := proto.Unmarshal(msg.Msg, &p); err != nil {\n+ return err\n+ }\n+ if err := checkContextData(p.ContextData); err != nil {\n+ return err\n+ }\n+ if p.ClockId != unix.CLOCK_REALTIME {\n+ return fmt.Errorf(\"invalid clockid: %d\", p.ClockId)\n+ }\n+ if p.Flags != 0 {\n+ return fmt.Errorf(\"invalid flag got: %v\", p.Flags)\n+ }\n+ return nil\n+}\n+\n+func checkSyscallTimerfdSettime(msg test.Message) error {\n+ p := pb.TimerfdSetTime{}\n+ if err := proto.Unmarshal(msg.Msg, &p); err != nil {\n+ return err\n+ }\n+ if err := checkContextData(p.ContextData); err != nil {\n+ return err\n+ }\n+ if p.Fd < 0 {\n+ return fmt.Errorf(\"invalid clockid: %d\", p.Fd)\n+ }\n+ if p.FdPath == \"\" {\n+ return fmt.Errorf(\"invalid path: %q\", p.FdPath)\n+ }\n+ if p.Flags != unix.TFD_TIMER_ABSTIME {\n+ return fmt.Errorf(\"invalid flag got: %v\", p.Flags)\n+ }\n+ if p.OldValue != nil {\n+ return fmt.Errorf(\"invalid oldvalue: %v\", p.OldValue.String())\n+ }\n+ if p.NewValue == nil {\n+ return fmt.Errorf(\"invalid oldvalue: %v\", p.OldValue.String())\n+ }\n+ return nil\n+}\n+\n+func checkSyscallTimerfdGettime(msg test.Message) error {\n+ p := pb.TimerfdGetTime{}\n+ if err := proto.Unmarshal(msg.Msg, &p); err != nil {\n+ return err\n+ }\n+ if err := checkContextData(p.ContextData); err != nil {\n+ return err\n+ }\n+ if p.Fd < 0 {\n+ return fmt.Errorf(\"invalid clockid: %d\", p.Fd)\n+ }\n+ if p.FdPath == \"\" {\n+ return fmt.Errorf(\"invalid path: %q\", p.FdPath)\n+ }\n+ if p.CurValue == nil {\n+ return fmt.Errorf(\"invalid oldvalue: %v\", p.CurValue.String())\n+ }\n+ return nil\n+}\n+\n+func checkSyscallClone(msg test.Message) error {\n+ p := pb.Clone{}\n+ if err := proto.Unmarshal(msg.Msg, &p); err != nil {\n+ return err\n+ }\n+ if err := checkContextData(p.ContextData); err != nil {\n+ return err\n+ }\n+ // Flags used by default in system calls that use clone(2) in the underying.\n+ rawFlags := unix.CLONE_CHILD_CLEARTID | unix.CLONE_CHILD_SETTID | uint64(unix.SIGCHLD)\n+ // Flags used for clone(2) syscall in workload.cc\n+ cloneFlags := uint64(unix.SIGCHLD) | unix.CLONE_VFORK | unix.CLONE_FILES\n+ if p.Flags != uint64(rawFlags) && p.Flags != cloneFlags {\n+ return fmt.Errorf(\"invalid flag got: %v\", p.Flags)\n+ }\n+ if (p.Flags == uint64(rawFlags) && p.Stack != 0) || (p.Flags == cloneFlags && p.Stack == 0) {\n+ return fmt.Errorf(\"invalid stack got: %v\", p.Stack)\n+ }\n+ return nil\n+}\n+\n+func checkSyscallInotifyInit(msg test.Message) error {\n+ p := pb.InotifyInit{}\n+ if err := proto.Unmarshal(msg.Msg, &p); err != nil {\n+ return err\n+ }\n+ if err := checkContextData(p.ContextData); err != nil {\n+ return err\n+ }\n+ if !(p.Flags == 0 || p.Flags == unix.IN_NONBLOCK) {\n+ return fmt.Errorf(\"invalid flag got: %v\", p.Flags)\n+ }\n+ return nil\n+}\n+\n+func checkSyscallInotifyInitAddWatch(msg test.Message) error {\n+ p := pb.InotifyAddWatch{}\n+ if err := proto.Unmarshal(msg.Msg, &p); err != nil {\n+ return err\n+ }\n+ if err := checkContextData(p.ContextData); err != nil {\n+ return err\n+ }\n+ if p.Fd < 0 {\n+ return fmt.Errorf(\"invalid fd: %d\", p.Fd)\n+ }\n+ if p.FdPath == \"\" {\n+ return fmt.Errorf(\"invalid path: %v\", p.FdPath)\n+ }\n+ if want := \"timer_trace_test.abc\"; !strings.Contains(p.Pathname, want) {\n+ return fmt.Errorf(\"wrong pathname, got: %q, want: %q\", p.Pathname, want)\n+ }\n+ if want := unix.IN_NONBLOCK; want != int(p.Mask) {\n+ return fmt.Errorf(\"invalid mask: want: %v, got:%v\", want, p.Mask)\n+ }\n+ return nil\n+}\n+\n+func checkSyscallInotifyInitRmWatch(msg test.Message) error {\n+ p := pb.InotifyRmWatch{}\n+ if err := proto.Unmarshal(msg.Msg, &p); err != nil {\n+ return err\n+ }\n+ if err := checkContextData(p.ContextData); err != nil {\n+ return err\n+ }\n+ if p.Fd < 0 {\n+ return fmt.Errorf(\"invalid fd: %d\", p.Fd)\n+ }\n+ if p.FdPath == \"\" {\n+ return fmt.Errorf(\"invalid path: %q\", p.FdPath)\n+ }\n+ if p.Wd < 0 {\n+ return fmt.Errorf(\"invalid wd: %d\", p.Wd)\n+ }\nreturn nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/trace/workload/BUILD",
"new_path": "test/trace/workload/BUILD",
"diff": "@@ -12,8 +12,10 @@ cc_binary(\ndeps = [\n\"//test/util:eventfd_util\",\n\"//test/util:file_descriptor\",\n+ \"//test/util:memory_util\",\n\"//test/util:multiprocess_util\",\n\"//test/util:posix_error\",\n+ \"//test/util:signal_util\",\n\"//test/util:test_util\",\n\"@com_google_absl//absl/cleanup\",\n\"@com_google_absl//absl/strings\",\n"
},
{
"change_type": "MODIFY",
"old_path": "test/trace/workload/workload.cc",
"new_path": "test/trace/workload/workload.cc",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n+#include <bits/types/struct_itimerspec.h>\n#include <err.h>\n#include <fcntl.h>\n+#include <sched.h>\n+#include <stdlib.h>\n#include <sys/eventfd.h>\n+#include <sys/inotify.h>\n+#include <sys/mman.h>\n#include <sys/resource.h>\n+#include <sys/signalfd.h>\n#include <sys/socket.h>\n#include <sys/stat.h>\n+#include <sys/timerfd.h>\n#include <sys/types.h>\n#include <sys/un.h>\n#include <unistd.h>\n#include <csignal>\n+#include <cstdio>\n#include <iostream>\n#include <ostream>\n#include \"absl/time/clock.h\"\n#include \"test/util/eventfd_util.h\"\n#include \"test/util/file_descriptor.h\"\n+#include \"test/util/memory_util.h\"\n#include \"test/util/multiprocess_util.h\"\n#include \"test/util/posix_error.h\"\n#include \"test/util/test_util.h\"\n@@ -82,7 +91,6 @@ void runSocket() {\nif (pid < 0) {\n// Fork error.\nerr(1, \"fork\");\n-\n} else if (pid == 0) {\n// Child.\nclose(parent_sock); // ensure it's not mistakely used in child.\n@@ -458,6 +466,202 @@ void runAccept4() {\nclose(fd);\n}\n+void runSignalfd4() {\n+ sigset_t mask;\n+ sigemptyset(&mask);\n+ int res = signalfd(-1, &mask, SFD_CLOEXEC | SFD_NONBLOCK);\n+ if (res < 0) {\n+ err(1, \"signalfd4\");\n+ }\n+}\n+\n+void runFcntl() {\n+ const auto pathname = \"trace_test.abc\";\n+ static constexpr mode_t kDefaultDirMode = 0755;\n+ int path_or_error = mkdir(pathname, kDefaultDirMode);\n+ if (path_or_error != 0) {\n+ err(1, \"mkdir\");\n+ }\n+ int fd = open(pathname, O_DIRECTORY | O_RDONLY);\n+ if (fd < 0) {\n+ err(1, \"open\");\n+ }\n+ auto fd_closer = absl::MakeCleanup([fd] { close(fd); });\n+\n+ int res = fcntl(fd, F_GETFL);\n+ if (res < 0) {\n+ err(1, \"fcntl\");\n+ }\n+ rmdir(pathname);\n+}\n+\n+void runPipe() {\n+ int fd[2];\n+ int res = pipe(fd);\n+ if (res < 0) {\n+ err(1, \"pipe\");\n+ }\n+ close(fd[0]);\n+ close(fd[1]);\n+}\n+\n+void runPipe2() {\n+ int fd[2];\n+ int res = pipe2(fd, O_CLOEXEC);\n+ if (res < 0) {\n+ err(1, \"pipe2\");\n+ }\n+ close(fd[0]);\n+ close(fd[1]);\n+}\n+\n+void runTimerfdCreate() {\n+ int fd = timerfd_create(CLOCK_REALTIME, 0);\n+ if (fd < 0) {\n+ err(1, \"timerfd_create\");\n+ }\n+ close(fd);\n+}\n+\n+void runTimerfdSettime() {\n+ int fd = timerfd_create(CLOCK_REALTIME, 0);\n+ if (fd < 0) {\n+ err(1, \"timerfd_create\");\n+ }\n+ auto fd_closer = absl::MakeCleanup([fd] { close(fd); });\n+\n+ constexpr auto kInitial = absl::Milliseconds(10);\n+ constexpr auto kInterval = absl::Milliseconds(25);\n+ const itimerspec val = {absl::ToTimespec(kInitial),\n+ absl::ToTimespec(kInterval)};\n+ int res = timerfd_settime(fd, TFD_TIMER_ABSTIME, &val, 0);\n+ if (res < 0) {\n+ err(1, \"timerfd_settime\");\n+ }\n+}\n+\n+void runTimerfdGettime() {\n+ int fd = timerfd_create(CLOCK_REALTIME, 0);\n+ if (fd < 0) {\n+ err(1, \"timerfd_create\");\n+ }\n+ auto fd_closer = absl::MakeCleanup([fd] { close(fd); });\n+\n+ itimerspec val;\n+ int res = timerfd_gettime(fd, &val);\n+ if (res < 0) {\n+ err(1, \"timerfd_gettime\");\n+ }\n+}\n+// signalfd(2), fork(2), and vfork(2) system calls are not supported in arm\n+// architecture.\n+#ifdef __x86_64__\n+void runFork() {\n+ pid_t pid = syscall(__NR_fork);\n+ if (pid < 0) {\n+ err(1, \"fork\");\n+ } else if (pid == 0) {\n+ exit(0);\n+ }\n+ RetryEINTR(waitpid)(pid, nullptr, 0);\n+}\n+\n+void runVfork() {\n+ pid_t pid = vfork();\n+ if (pid < 0) {\n+ err(1, \"vfork\");\n+ } else if (pid == 0) {\n+ _exit(0);\n+ }\n+ RetryEINTR(waitpid)(pid, nullptr, 0);\n+}\n+\n+void runSignalfd() {\n+ sigset_t mask;\n+ sigemptyset(&mask);\n+ constexpr int kSizeofKernelSigset = 8;\n+ int res = syscall(__NR_signalfd, -1, &mask, kSizeofKernelSigset);\n+ if (res < 0) {\n+ err(1, \"signalfd\");\n+ }\n+}\n+#endif\n+\n+void runClone() {\n+ Mapping child_stack = ASSERT_NO_ERRNO_AND_VALUE(\n+ MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE));\n+ int child_pid;\n+ child_pid = clone(\n+ +[](void*) { return 0; },\n+ reinterpret_cast<void*>(child_stack.addr() + kPageSize),\n+ SIGCHLD | CLONE_VFORK | CLONE_FILES, nullptr);\n+\n+ if (child_pid < 0) {\n+ err(1, \"clone\");\n+ }\n+ RetryEINTR(waitpid)(child_pid, nullptr, 0);\n+}\n+\n+void runInotifyInit() {\n+ int fd = inotify_init();\n+ if (fd < 0) {\n+ err(1, \"inotify_init\");\n+ }\n+ close(fd);\n+}\n+\n+void runInotifyInit1() {\n+ int fd = inotify_init1(IN_NONBLOCK);\n+ if (fd < 0) {\n+ err(1, \"inotify_init1\");\n+ }\n+ close(fd);\n+}\n+\n+void runInotifyAddWatch() {\n+ const auto pathname = \"timer_trace_test.abc\";\n+ static constexpr mode_t kDefaultDirMode = 0755;\n+ int path_or_error = mkdir(pathname, kDefaultDirMode);\n+ if (path_or_error != 0) {\n+ err(1, \"mkdir\");\n+ }\n+ int fd = inotify_init1(IN_NONBLOCK);\n+ if (fd < 0) {\n+ err(1, \"inotify_init1\");\n+ }\n+ auto fd_closer = absl::MakeCleanup([fd] { close(fd); });\n+\n+ int res = inotify_add_watch(fd, pathname, IN_NONBLOCK);\n+ if (res < 0) {\n+ err(1, \"inotify_add_watch\");\n+ }\n+ rmdir(pathname);\n+}\n+\n+void runInotifyRmWatch() {\n+ const auto pathname = \"timer_trace_test.abc\";\n+ static constexpr mode_t kDefaultDirMode = 0755;\n+ int path_or_error = mkdir(pathname, kDefaultDirMode);\n+ if (path_or_error != 0) {\n+ err(1, \"mkdir\");\n+ }\n+ int fd = inotify_init1(IN_NONBLOCK);\n+ if (fd < 0) {\n+ err(1, \"inotify_init1\");\n+ }\n+ auto fd_closer = absl::MakeCleanup([fd] { close(fd); });\n+\n+ int wd = inotify_add_watch(fd, pathname, IN_NONBLOCK);\n+ if (wd < 0) {\n+ err(1, \"inotify_add_watch\");\n+ }\n+ int res = inotify_rm_watch(fd, wd);\n+ if (res < 0) {\n+ err(1, \"inotify_rm_watch\");\n+ }\n+ rmdir(pathname);\n+}\n+\n} // namespace testing\n} // namespace gvisor\n@@ -481,6 +685,25 @@ int main(int argc, char** argv) {\n::gvisor::testing::runBind();\n::gvisor::testing::runAccept();\n::gvisor::testing::runAccept4();\n+ ::gvisor::testing::runSignalfd4();\n+ ::gvisor::testing::runFcntl();\n+ ::gvisor::testing::runPipe();\n+ ::gvisor::testing::runPipe2();\n+ ::gvisor::testing::runTimerfdCreate();\n+ ::gvisor::testing::runTimerfdSettime();\n+ ::gvisor::testing::runTimerfdGettime();\n+ ::gvisor::testing::runClone();\n+ ::gvisor::testing::runInotifyInit();\n+ ::gvisor::testing::runInotifyInit1();\n+ ::gvisor::testing::runInotifyAddWatch();\n+ ::gvisor::testing::runInotifyRmWatch();\n+// signalfd(2), fork(2), and vfork(2) system calls are not supported in arm\n+// architecture.\n+#ifdef __x86_64__\n+ ::gvisor::testing::runSignalfd();\n+ ::gvisor::testing::runFork();\n+ ::gvisor::testing::runVfork();\n+#endif\n// Run chroot at the end since it changes the root for all other tests.\n::gvisor::testing::runChroot();\nreturn 0;\n"
}
] | Go | Apache License 2.0 | google/gvisor | Adding more trace point integration tests for the following syscalls:
- signalfd
- signalfd4
- fcntl
- pipe
- pipe2
- timerfd_create
- timerfd_settime
- timerfd_gettime
- fork
- vfork
- inotify_init
- inotify_init1
- inotify_add_watch
- inotify_rm_watch
- clone
Updates #4805
PiperOrigin-RevId: 492297673 |
259,858 | 01.12.2022 15:36:40 | 28,800 | 175be3501193699bed06ab703832863802757650 | Fix lock violations.
The locks should not be rewritten. This is especially confusing with locking
seemingly respected in the first part of this function. Instead, just update
the relevant fields while holding appropriate locks. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/internal/network/endpoint.go",
"new_path": "pkg/tcpip/transport/internal/network/endpoint.go",
"diff": "@@ -118,10 +118,9 @@ type multicastMembership struct {\n// Init initializes the endpoint.\nfunc (e *Endpoint) Init(s *stack.Stack, netProto tcpip.NetworkProtocolNumber, transProto tcpip.TransportProtocolNumber, ops *tcpip.SocketOptions, waiterQueue *waiter.Queue) {\ne.mu.Lock()\n- memberships := e.multicastMemberships\n- e.mu.Unlock()\n- if memberships != nil {\n- panic(fmt.Sprintf(\"endpoint is already initialized; got e.multicastMemberships = %#v, want = nil\", memberships))\n+ defer e.mu.Unlock()\n+ if e.multicastMemberships != nil {\n+ panic(fmt.Sprintf(\"endpoint is already initialized; got e.multicastMemberships = %#v, want = nil\", e.multicastMemberships))\n}\nswitch netProto {\n@@ -130,27 +129,24 @@ func (e *Endpoint) Init(s *stack.Stack, netProto tcpip.NetworkProtocolNumber, tr\npanic(fmt.Sprintf(\"invalid protocol number = %d\", netProto))\n}\n- *e = Endpoint{\n- stack: s,\n- ops: ops,\n- netProto: netProto,\n- transProto: transProto,\n- waiterQueue: waiterQueue,\n-\n- info: stack.TransportEndpointInfo{\n+ e.stack = s\n+ e.ops = ops\n+ e.netProto = netProto\n+ e.transProto = transProto\n+ e.waiterQueue = waiterQueue\n+ e.infoMu.Lock()\n+ e.info = stack.TransportEndpointInfo{\nNetProto: netProto,\nTransProto: transProto,\n- },\n- effectiveNetProto: netProto,\n- ipv4TTL: tcpip.UseDefaultIPv4TTL,\n- ipv6HopLimit: tcpip.UseDefaultIPv6HopLimit,\n- // Linux defaults to TTL=1.\n- multicastTTL: 1,\n- multicastMemberships: make(map[multicastMembership]struct{}),\n}\n+ e.infoMu.Unlock()\n+ e.effectiveNetProto = netProto\n+ e.ipv4TTL = tcpip.UseDefaultIPv4TTL\n+ e.ipv6HopLimit = tcpip.UseDefaultIPv6HopLimit\n- e.mu.Lock()\n- defer e.mu.Unlock()\n+ // Linux defaults to TTL=1.\n+ e.multicastTTL = 1\n+ e.multicastMemberships = make(map[multicastMembership]struct{})\ne.setEndpointState(transport.DatagramEndpointStateInitial)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix lock violations.
The locks should not be rewritten. This is especially confusing with locking
seemingly respected in the first part of this function. Instead, just update
the relevant fields while holding appropriate locks.
PiperOrigin-RevId: 492317437 |
259,909 | 01.12.2022 16:03:04 | 28,800 | 99cc6c2dea59ca51f7672359ed4321041fae02a4 | Display the proper root path in mountinfo. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go",
"new_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go",
"diff": "@@ -920,7 +920,7 @@ func (fs *filesystem) PrependPath(ctx context.Context, vfsroot, vd vfs.VirtualDe\nif mnt == vfsroot.Mount() && &d.vfsd == vfsroot.Dentry() {\nreturn vfs.PrependPathAtVFSRootError{}\n}\n- if &d.vfsd == mnt.Root() {\n+ if mnt != nil && &d.vfsd == mnt.Root() {\nreturn nil\n}\nif d.parent == nil {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/mount.go",
"new_path": "pkg/sentry/vfs/mount.go",
"diff": "@@ -1161,16 +1161,27 @@ func (vfs *VirtualFilesystem) GenerateProcMountInfo(ctx context.Context, taskRoo\nmount: mnt,\ndentry: mnt.root,\n}\n- path, err := vfs.PathnameReachable(ctx, taskRootDir, mntRootVD)\n+ pathFromRoot, err := vfs.PathnameReachable(ctx, taskRootDir, mntRootVD)\nif err != nil {\n// For some reason we didn't get a path. Log a warning\n// and run with empty path.\nctx.Warningf(\"VFS.GenerateProcMountInfo: error getting pathname for mount root %+v: %v\", mnt.root, err)\n- path = \"\"\n+ continue\n}\n- if path == \"\" {\n- // Either an error occurred, or path is not reachable\n- // from root.\n+ if pathFromRoot == \"\" {\n+ // The path is not reachable from root.\n+ continue\n+ }\n+ var pathFromFS string\n+ pathFromFS, err = vfs.PathnameInFilesystem(ctx, mntRootVD)\n+ if err != nil {\n+ // For some reason we didn't get a path. Log a warning\n+ // and run with empty path.\n+ ctx.Warningf(\"VFS.GenerateProcMountInfo: error getting pathname for mount root %+v: %v\", mnt.root, err)\n+ continue\n+ }\n+ if pathFromFS == \"\" {\n+ // The path is not reachable from root.\ncontinue\n}\n// Stat the mount root to get the major/minor device numbers.\n@@ -1208,13 +1219,10 @@ func (vfs *VirtualFilesystem) GenerateProcMountInfo(ctx context.Context, taskRoo\n// (4) Root: the pathname of the directory in the filesystem\n// which forms the root of this mount.\n- //\n- // NOTE(b/78135857): This will always be \"/\" until we implement\n- // bind mounts.\n- fmt.Fprintf(buf, \"/ \")\n+ fmt.Fprintf(buf, \"%s \", manglePath(pathFromFS))\n// (5) Mount point (relative to process root).\n- fmt.Fprintf(buf, \"%s \", manglePath(path))\n+ fmt.Fprintf(buf, \"%s \", manglePath(pathFromRoot))\n// (6) Mount options.\nopts := \"rw\"\n@@ -1241,7 +1249,7 @@ func (vfs *VirtualFilesystem) GenerateProcMountInfo(ctx context.Context, taskRoo\nfmt.Fprintf(buf, \"none \")\n// (11) Superblock options, and final newline.\n- fmt.Fprintf(buf, \"%s\\n\", superBlockOpts(path, mnt))\n+ fmt.Fprintf(buf, \"%s\\n\", superBlockOpts(pathFromRoot, mnt))\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/pathname.go",
"new_path": "pkg/sentry/vfs/pathname.go",
"diff": "@@ -133,6 +133,29 @@ loop:\nreturn b.String(), nil\n}\n+// PathnameInFilesystem returns an absolute path to vd relative to vd's\n+// Filesystem root. It also appends //deleted to for disowned entries. It is\n+// equivalent to Linux's dentry_path().\n+func (vfs *VirtualFilesystem) PathnameInFilesystem(ctx context.Context, vd VirtualDentry) (string, error) {\n+ b := getFSPathBuilder()\n+ defer putFSPathBuilder(b)\n+ if vd.dentry.IsDead() {\n+ b.PrependString(\"//deleted\")\n+ }\n+ if err := vd.mount.fs.impl.PrependPath(ctx, VirtualDentry{}, VirtualDentry{dentry: vd.dentry}, b); err != nil {\n+ // PrependPath returns an error if it encounters a filesystem root before\n+ // the provided vfsroot. We don't provide a vfsroot, so encountering this\n+ // error is expected and can be ignored.\n+ switch err.(type) {\n+ case PrependPathAtNonMountRootError:\n+ default:\n+ return \"\", err\n+ }\n+ }\n+ b.PrependByte('/')\n+ return b.String(), nil\n+}\n+\n// PathnameForGetcwd returns an absolute pathname to vd, consistent with\n// Linux's sys_getcwd().\nfunc (vfs *VirtualFilesystem) PathnameForGetcwd(ctx context.Context, vfsroot, vd VirtualDentry) (string, error) {\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/mount.cc",
"new_path": "test/syscalls/linux/mount.cc",
"diff": "@@ -1397,6 +1397,26 @@ TEST(MountTest, BindParentToChild) {\nASSERT_EQ(opt2, opt3);\n}\n+TEST(MountTest, MountInfoHasRoot) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));\n+ auto const parent = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ auto const mount = ASSERT_NO_ERRNO_AND_VALUE(\n+ Mount(\"\", parent.path(), \"tmpfs\", 0, \"mode=0123\", 0));\n+ auto const child =\n+ ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(parent.path()));\n+ auto const bind_mount = Mount(child.path(), child.path(), \"\", MS_BIND, \"\", 0);\n+ std::vector<ProcMountInfoEntry> mounts =\n+ ASSERT_NO_ERRNO_AND_VALUE(ProcSelfMountInfoEntries());\n+ for (const auto& e : mounts) {\n+ if (e.mount_point == child.path()) {\n+ ASSERT_EQ(e.root, JoinPath(\"/\", Basename(child.path())));\n+ }\n+ if (e.mount_point == parent.path()) {\n+ ASSERT_EQ(e.root, \"/\");\n+ }\n+ }\n+}\n+\n} // namespace\n} // namespace testing\n"
}
] | Go | Apache License 2.0 | google/gvisor | Display the proper root path in mountinfo.
PiperOrigin-RevId: 492323370 |
259,985 | 02.12.2022 16:43:44 | 28,800 | d0ae59368d8a6c4e668301f0229d6329db52be18 | cgroupfs: Fix lock ordering between kernfs.Filesystem.mu and TaskSet.mu.
We can't DecRef a cgroup with TaskSet.mu held as it leads to circular
locking. Restructure task creation to drop cgroup refs outside the
TaskSet.mu critical section.
Reported-by:
Reported-by: | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_start.go",
"new_path": "pkg/sentry/kernel/task_start.go",
"diff": "@@ -19,7 +19,6 @@ import (\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/atomicbitops\"\n- \"gvisor.dev/gvisor/pkg/cleanup\"\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/errors/linuxerr\"\n\"gvisor.dev/gvisor/pkg/hostarch\"\n@@ -178,10 +177,8 @@ func (ts *TaskSet) newTask(ctx context.Context, cfg *TaskConfig) (*Task, error)\nvar (\ncg Cgroup\n- charged bool\n- cu cleanup.Cleanup\n+ charged, committed bool\n)\n- defer cu.Clean()\n// Reserve cgroup PIDs controller charge. This is either commited when the\n// new task enters the cgroup below, or rolled back on failure.\n@@ -196,12 +193,16 @@ func (ts *TaskSet) newTask(ctx context.Context, cfg *TaskConfig) (*Task, error)\nreturn nil, err\n}\nif charged {\n- cu.Add(func() {\n+ defer func() {\n+ if !committed {\nif err := cg.Charge(t, cg.Dentry, CgroupControllerPIDs, CgroupResourcePID, -1); err != nil {\npanic(fmt.Sprintf(\"Failed to clean up PIDs charge on task creation failure: %v\", err))\n}\n- cg.DecRef(ctx) // Ref from ChargeFor.\n- })\n+ }\n+ // Ref from ChargeFor. Note that we need to drop this outside of\n+ // TaskSet.mu critical sections.\n+ cg.DecRef(ctx)\n+ }()\n}\n}\n@@ -238,11 +239,7 @@ func (ts *TaskSet) newTask(ctx context.Context, cfg *TaskConfig) (*Task, error)\n// srcT may be nil, in which case we default to root cgroups.\nt.EnterInitialCgroups(srcT)\n-\n- cu.Release()\n- if charged {\n- cg.decRef() // Ref from ChargeFor.\n- }\n+ committed = true\nif tg.leader == nil {\n// New thread group.\n"
}
] | Go | Apache License 2.0 | google/gvisor | cgroupfs: Fix lock ordering between kernfs.Filesystem.mu and TaskSet.mu.
We can't DecRef a cgroup with TaskSet.mu held as it leads to circular
locking. Restructure task creation to drop cgroup refs outside the
TaskSet.mu critical section.
Reported-by: syzbot+16a334ab1d6873db18f2@syzkaller.appspotmail.com
Reported-by: syzbot+fe1b962d430d1170e671@syzkaller.appspotmail.com
PiperOrigin-RevId: 492589548 |
259,909 | 06.12.2022 14:19:00 | 28,800 | 6fb2a265a109554224f4f9d0708877521558a349 | Remove FUSE feature flag and enable by default. | [
{
"change_type": "MODIFY",
"old_path": "Makefile",
"new_path": "Makefile",
"diff": "@@ -162,7 +162,6 @@ dev: $(RUNTIME_BIN) ## Installs a set of local runtimes. Requires sudo.\n@$(call configure_noreload,$(RUNTIME),--net-raw)\n@$(call configure_noreload,$(RUNTIME)-d,--net-raw --debug --strace --log-packets)\n@$(call configure_noreload,$(RUNTIME)-p,--net-raw --profile)\n- @$(call configure_noreload,$(RUNTIME)-fuse-d,--net-raw --debug --strace --log-packets --fuse)\n@$(call configure_noreload,$(RUNTIME)-cgroup-d,--net-raw --debug --strace --log-packets --cgroupfs)\n@$(call configure_noreload,$(RUNTIME)-systemd-d,--net-raw --debug --strace --log-packets --systemd-cgroup)\n@$(call reload_docker)\n"
},
{
"change_type": "MODIFY",
"old_path": "images/syzkaller/default-gvisor-config.cfg",
"new_path": "images/syzkaller/default-gvisor-config.cfg",
"diff": "\"type\": \"gvisor\",\n\"vm\": {\n\"count\": 1,\n- \"runsc_args\": \"--debug --network none --platform ptrace --vfs2 --fuse -net-raw -watchdog-action=panic\"\n+ \"runsc_args\": \"--debug --network none --platform ptrace --vfs2 -net-raw -watchdog-action=panic\"\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/fuse/dev.go",
"new_path": "pkg/sentry/fsimpl/fuse/dev.go",
"diff": "@@ -36,10 +36,6 @@ type fuseDevice struct{}\n// Open implements vfs.Device.Open.\nfunc (fuseDevice) Open(ctx context.Context, mnt *vfs.Mount, vfsd *vfs.Dentry, opts vfs.OpenOptions) (*vfs.FileDescription, error) {\n- if !kernel.FUSEEnabled {\n- return nil, linuxerr.ENOENT\n- }\n-\nvar fd DeviceFD\nif err := fd.vfsfd.Init(&fd, opts.Flags, mnt, vfsd, &vfs.FileDescriptionOptions{\nUseDentryMetadata: true,\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/kernel.go",
"new_path": "pkg/sentry/kernel/kernel.go",
"diff": "@@ -81,10 +81,6 @@ import (\n// TODO(gvisor.dev/issue/7911): Remove when 9P is deleted.\nvar LISAFSEnabled = false\n-// FUSEEnabled is set to true when FUSE is enabled. Added as a global to allow\n-// easy access everywhere. To be removed once FUSE is completed.\n-var FUSEEnabled = false\n-\n// userCounters is a set of user counters.\n//\n// +stateify savable\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/syslog.go",
"new_path": "pkg/sentry/kernel/syslog.go",
"diff": "@@ -106,10 +106,8 @@ func (s *syslog) Log() []byte {\ntime += rand.Float64() / 2\ns.msg = append(s.msg, []byte(fmt.Sprintf(format, time, \"Setting up VFS...\"))...)\n- if FUSEEnabled {\ntime += rand.Float64() / 2\ns.msg = append(s.msg, []byte(fmt.Sprintf(format, time, \"Setting up FUSE...\"))...)\n- }\ntime += rand.Float64() / 2\ns.msg = append(s.msg, []byte(fmt.Sprintf(format, time, \"Ready!\"))...)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -245,7 +245,6 @@ func New(args Args) (*Loader, error) {\nreturn nil, fmt.Errorf(\"setting up memory usage: %w\", err)\n}\n- kernel.FUSEEnabled = args.Conf.FUSE\nkernel.LISAFSEnabled = args.Conf.Lisafs\n// Make host FDs stable between invocations. Host FDs must map to the exact\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/vfs.go",
"new_path": "runsc/boot/vfs.go",
"diff": "@@ -122,11 +122,9 @@ func registerFilesystems(k *kernel.Kernel) error {\n}\n}\n- if kernel.FUSEEnabled {\nif err := fuse.Register(vfsObj); err != nil {\nreturn fmt.Errorf(\"registering fusedev: %w\", err)\n}\n- }\na, err := devtmpfs.NewAccessor(ctx, vfsObj, creds, devtmpfs.Name)\nif err != nil {\n@@ -149,11 +147,9 @@ func registerFilesystems(k *kernel.Kernel) error {\n}\n}\n- if kernel.FUSEEnabled {\nif err := fuse.CreateDevtmpfsFile(ctx, a); err != nil {\nreturn fmt.Errorf(\"creating fusedev devtmpfs files: %w\", err)\n}\n- }\nreturn nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/config/config.go",
"new_path": "runsc/config/config.go",
"diff": "@@ -225,9 +225,6 @@ type Config struct {\n// Enable lisafs.\nLisafs bool `flag:\"lisafs\"`\n- // Enables FUSE usage.\n- FUSE bool `flag:\"fuse\"`\n-\n// Allows overriding of flags in OCI annotations.\nAllowFlagOverride bool `flag:\"allow-flag-override\"`\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/config/flags.go",
"new_path": "runsc/config/flags.go",
"diff": "@@ -85,7 +85,7 @@ func RegisterFlags(flagSet *flag.FlagSet) {\nflagSet.Var(hostFifoPtr(HostFifoNone), \"host-fifo\", \"controls permission to access host FIFOs (or named pipes). Values: none|open, default: none\")\nflagSet.Bool(\"vfs2\", true, \"DEPRECATED: this flag has no effect.\")\n- flagSet.Bool(\"fuse\", false, \"TEST ONLY; use while FUSE in VFSv2 is landing. This allows the use of the new experimental FUSE filesystem.\")\n+ flagSet.Bool(\"fuse\", true, \"DEPRECATED: this flag has no effect.\")\nflagSet.Bool(\"lisafs\", true, \"Enables lisafs protocol instead of 9P.\")\nflagSet.Bool(\"cgroupfs\", false, \"Automatically mount cgroupfs.\")\nflagSet.Bool(\"ignore-cgroups\", false, \"don't configure cgroups.\")\n"
},
{
"change_type": "MODIFY",
"old_path": "test/fuse/BUILD",
"new_path": "test/fuse/BUILD",
"diff": "@@ -3,81 +3,81 @@ load(\"//test/runner:defs.bzl\", \"syscall_test\")\npackage(licenses = [\"notice\"])\nsyscall_test(\n- fuse = \"True\",\n+ allow_native = False,\ntest = \"//test/fuse/linux:stat_test\",\n)\nsyscall_test(\n- fuse = \"True\",\n+ allow_native = False,\ntest = \"//test/fuse/linux:statfs_test\",\n)\nsyscall_test(\n- fuse = \"True\",\n+ allow_native = False,\ntest = \"//test/fuse/linux:open_test\",\n)\nsyscall_test(\n- fuse = \"True\",\n+ allow_native = False,\ntest = \"//test/fuse/linux:release_test\",\n)\nsyscall_test(\n- fuse = \"True\",\n+ allow_native = False,\ntest = \"//test/fuse/linux:mknod_test\",\n)\nsyscall_test(\n- fuse = \"True\",\n+ allow_native = False,\ntest = \"//test/fuse/linux:symlink_test\",\n)\nsyscall_test(\n- fuse = \"True\",\n+ allow_native = False,\ntest = \"//test/fuse/linux:readlink_test\",\n)\nsyscall_test(\n- fuse = \"True\",\n+ allow_native = False,\ntest = \"//test/fuse/linux:mkdir_test\",\n)\nsyscall_test(\n- fuse = \"True\",\n+ allow_native = False,\ntest = \"//test/fuse/linux:read_test\",\n)\nsyscall_test(\n- fuse = \"True\",\n+ allow_native = False,\ntest = \"//test/fuse/linux:write_test\",\n)\nsyscall_test(\n- fuse = \"True\",\n+ allow_native = False,\ntest = \"//test/fuse/linux:rmdir_test\",\n)\nsyscall_test(\n- fuse = \"True\",\n+ allow_native = False,\ntest = \"//test/fuse/linux:readdir_test\",\n)\nsyscall_test(\n- fuse = \"True\",\n+ allow_native = False,\ntest = \"//test/fuse/linux:create_test\",\n)\nsyscall_test(\n- fuse = \"True\",\n+ allow_native = False,\ntest = \"//test/fuse/linux:unlink_test\",\n)\nsyscall_test(\n- fuse = \"True\",\n+ allow_native = False,\ntest = \"//test/fuse/linux:setstat_test\",\n)\nsyscall_test(\n- fuse = \"True\",\n+ allow_native = False,\ntest = \"//test/fuse/linux:mount_test\",\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "test/runner/defs.bzl",
"new_path": "test/runner/defs.bzl",
"diff": "@@ -69,7 +69,6 @@ def _syscall_test(\nfile_access = \"exclusive\",\noverlay = False,\nadd_host_communication = False,\n- fuse = False,\ncontainer = None,\none_sandbox = True,\n**kwargs):\n@@ -82,8 +81,6 @@ def _syscall_test(\nname += \"_shared\"\nif overlay:\nname += \"_overlay\"\n- if fuse:\n- name += \"_fuse\"\nif network != \"none\":\nname += \"_\" + network + \"net\"\n@@ -136,7 +133,6 @@ def _syscall_test(\n\"--file-access=\" + file_access,\n\"--overlay=\" + str(overlay),\n\"--add-host-communication=\" + str(add_host_communication),\n- \"--fuse=\" + str(fuse),\n\"--strace=\" + str(debug),\n\"--debug=\" + str(debug),\n\"--container=\" + str(container),\n@@ -169,7 +165,6 @@ def syscall_test(\nadd_host_communication = False,\nadd_hostinet = False,\none_sandbox = True,\n- fuse = False,\nallow_native = True,\ndebug = True,\ncontainer = None,\n@@ -184,7 +179,6 @@ def syscall_test(\nadd_host_communication: setup UDS and pipe external communication for tests.\nadd_hostinet: add a hostinet test.\none_sandbox: runs each unit test in a new sandbox instance.\n- fuse: enable FUSE support.\nallow_native: generate a native test variant.\ndebug: enable debug output.\ncontainer: Run the test in a container. If None, determined from other information.\n@@ -194,8 +188,7 @@ def syscall_test(\nif not tags:\ntags = []\n- if not fuse and allow_native:\n- # Generate a native test if fuse is not required and if it is allowed.\n+ if allow_native:\n_syscall_test(\ntest = test,\nplatform = \"native\",\n@@ -215,7 +208,6 @@ def syscall_test(\nuse_tmpfs = use_tmpfs,\nadd_host_communication = add_host_communication,\ntags = platform_tags + tags,\n- fuse = fuse,\ndebug = debug,\ncontainer = container,\none_sandbox = one_sandbox,\n@@ -230,7 +222,6 @@ def syscall_test(\nadd_host_communication = add_host_communication,\ntags = platforms.get(default_platform, []) + tags,\ndebug = debug,\n- fuse = fuse,\ncontainer = container,\none_sandbox = one_sandbox,\noverlay = True,\n@@ -245,7 +236,6 @@ def syscall_test(\nadd_host_communication = add_host_communication,\ntags = platforms.get(default_platform, []) + tags,\ndebug = debug,\n- fuse = fuse,\ncontainer = container,\none_sandbox = one_sandbox,\n**kwargs\n@@ -262,6 +252,5 @@ def syscall_test(\ncontainer = container,\none_sandbox = one_sandbox,\nfile_access = \"shared\",\n- fuse = fuse,\n**kwargs\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "test/runner/main.go",
"new_path": "test/runner/main.go",
"diff": "@@ -51,7 +51,6 @@ var (\nuseTmpfs = flag.Bool(\"use-tmpfs\", false, \"mounts tmpfs for /tmp\")\nfileAccess = flag.String(\"file-access\", \"exclusive\", \"mounts root in exclusive or shared mode\")\noverlay = flag.Bool(\"overlay\", false, \"wrap filesystem mounts with writable tmpfs overlay\")\n- fuse = flag.Bool(\"fuse\", false, \"enable FUSE\")\ncontainer = flag.Bool(\"container\", false, \"run tests in their own namespaces (user ns, network ns, etc), pretending to be root. Implicitly enabled if network=host, or if using network namespaces\")\nsetupContainerPath = flag.String(\"setup-container\", \"\", \"path to setup_container binary (for use with --container)\")\ntrace = flag.Bool(\"trace\", false, \"enables all trace points\")\n@@ -206,9 +205,6 @@ func runRunsc(tc *gtest.TestCase, spec *specs.Spec) error {\nif *overlay {\nargs = append(args, \"-overlay2=all:/tmp\")\n}\n- if *fuse {\n- args = append(args, \"-fuse\")\n- }\nif *debug {\nargs = append(args, \"-debug\", \"-log-packets=true\")\n}\n@@ -432,17 +428,11 @@ func runTestCaseRunsc(testBin string, tc *gtest.TestCase, args []string, t *test\nconst (\nplatformVar = \"TEST_ON_GVISOR\"\nnetworkVar = \"GVISOR_NETWORK\"\n- fuseVar = \"FUSE_ENABLED\"\n)\nenv := append(os.Environ(), platformVar+\"=\"+*platform, networkVar+\"=\"+*network)\nif *platformSupport != \"\" {\nenv = append(env, fmt.Sprintf(\"%s=%s\", platformSupportEnvVar, *platformSupport))\n}\n- if *fuse {\n- env = append(env, fuseVar+\"=TRUE\")\n- } else {\n- env = append(env, fuseVar+\"=FALSE\")\n- }\n// Remove shard env variables so that the gunit binary does not try to\n// interpret them.\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/BUILD",
"new_path": "test/syscalls/BUILD",
"diff": "@@ -131,7 +131,7 @@ syscall_test(\n)\nsyscall_test(\n- fuse = \"True\",\n+ allow_native = False,\ntest = \"//test/syscalls/linux:dev_test\",\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/dev.cc",
"new_path": "test/syscalls/linux/dev.cc",
"diff": "@@ -154,10 +154,6 @@ TEST(DevTest, TTYExists) {\n}\nTEST(DevTest, OpenDevFuse) {\n- // Note(gvisor.dev/issue/3076) This won't work in the sentry until the new\n- // device registration is complete.\n- SKIP_IF(IsRunningOnGvisor() || !IsFUSEEnabled());\n-\nASSERT_NO_ERRNO_AND_VALUE(Open(\"/dev/fuse\", O_RDONLY));\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/util/test_util.cc",
"new_path": "test/util/test_util.cc",
"diff": "@@ -41,7 +41,6 @@ namespace gvisor {\nnamespace testing {\nconstexpr char kGvisorNetwork[] = \"GVISOR_NETWORK\";\n-constexpr char kFuseEnabled[] = \"FUSE_ENABLED\";\nbool IsRunningOnGvisor() { return GvisorPlatform() != Platform::kNative; }\n@@ -59,11 +58,6 @@ bool IsRunningWithHostinet() {\nreturn env && strcmp(env, \"host\") == 0;\n}\n-bool IsFUSEEnabled() {\n- const char* env = getenv(kFuseEnabled);\n- return env && strcmp(env, \"TRUE\") == 0;\n-}\n-\n// Inline cpuid instruction. Preserve %ebx/%rbx register. In PIC compilations\n// %ebx contains the address of the global offset table. %rbx is occasionally\n// used to address stack variables in presence of dynamic allocas.\n"
},
{
"change_type": "MODIFY",
"old_path": "test/util/test_util.h",
"new_path": "test/util/test_util.h",
"diff": "@@ -224,7 +224,6 @@ constexpr char kFuchsia[] = \"fuchsia\";\nbool IsRunningOnGvisor();\nconst std::string GvisorPlatform();\nbool IsRunningWithHostinet();\n-bool IsFUSEEnabled();\n#ifdef __linux__\nvoid SetupGvisorDeathTest();\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove FUSE feature flag and enable by default.
PiperOrigin-RevId: 493410677 |
259,909 | 07.12.2022 10:30:50 | 28,800 | f4fb010ef487086e733fb116dd98e4e87208b544 | Join subcontainer cgroup in cgroupfs instead of parent if it exists.
This is a no-op for most workloads; limits set on the parent affect
the child, and resource accounting of a child shows up in the parent.
Fixes | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -244,7 +244,7 @@ func New(conf *config.Config, args Args) (*Container, error) {\nif args.Spec.Linux.CgroupsPath == \"\" && !conf.TestOnlyAllowRunAsCurrentUserWithoutChroot {\nargs.Spec.Linux.CgroupsPath = \"/\" + args.ID\n}\n- var subCgroup, parentCgroup cgroup.Cgroup\n+ var subCgroup, parentCgroup, containerCgroup cgroup.Cgroup\nif !conf.IgnoreCgroups {\nvar err error\n@@ -254,13 +254,20 @@ func New(conf *config.Config, args Args) (*Container, error) {\nif err != nil {\nreturn nil, fmt.Errorf(\"cannot set up cgroup for root: %w\", err)\n}\n+ // Join the child cgroup when using cgroupfs. Joining non leaf-node\n+ // cgroups is illegal in Linux and will return EBUSY.\n+ if subCgroup != nil && !conf.SystemdCgroup {\n+ containerCgroup = subCgroup\n+ } else {\n+ containerCgroup = parentCgroup\n+ }\n}\nc.CompatCgroup = cgroup.CgroupJSON{Cgroup: subCgroup}\noverlayFilestoreFile, err := createOverlayFilestore(conf.GetOverlay2())\nif err != nil {\nreturn nil, err\n}\n- if err := runInCgroup(parentCgroup, func() error {\n+ if err := runInCgroup(containerCgroup, func() error {\nioFiles, specFile, err := c.createGoferProcess(args.Spec, conf, args.BundleDir, args.Attached)\nif err != nil {\nreturn fmt.Errorf(\"cannot create gofer process: %w\", err)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Join subcontainer cgroup in cgroupfs instead of parent if it exists.
This is a no-op for most workloads; limits set on the parent affect
the child, and resource accounting of a child shows up in the parent.
Fixes #8269
PiperOrigin-RevId: 493643530 |
259,982 | 07.12.2022 11:08:39 | 28,800 | 4a1a26f9666786f269a56f0dda986c3d3f3c6f66 | Adding metrics for start container in multi-container mode. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/control/control.proto",
"new_path": "pkg/sentry/control/control.proto",
"diff": "@@ -40,6 +40,12 @@ message ControlConfig {\nrepeated Endpoint allowed_controls = 1;\n}\n+// ContainerStartedEvent is emitted when a container starts.\n+message ContainerStartedEvent {\n+ bool started = 1;\n+ string container_id = 2;\n+}\n+\n// ContainerExitEvent is emitted when a container's init task exits. Duplicate\n// exit events may be emitted for the same container.\nmessage ContainerExitEvent {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/control/lifecycle.go",
"new_path": "pkg/sentry/control/lifecycle.go",
"diff": "@@ -313,6 +313,10 @@ func (l *Lifecycle) StartContainer(args *StartContainerArgs, _ *uint32) error {\n// Start the newly created process.\nl.Kernel.StartProcess(tg)\nlog.Infof(\"Started the new container %v \", initArgs.ContainerID)\n+ eventchannel.LogEmit(&pb.ContainerStartedEvent{\n+ Started: true,\n+ ContainerId: initArgs.ContainerID,\n+ })\nif err := l.updateContainerState(initArgs.ContainerID, stateRunning); err != nil {\n// Sanity check: shouldn't fail to update the state at this point.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Adding metrics for start container in multi-container mode.
PiperOrigin-RevId: 493655449 |
259,982 | 07.12.2022 12:08:18 | 28,800 | a35ca06260a337280618da523919b1fd021ca749 | Adding metrics for exit_status of container in multi-container mode. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/control/lifecycle.go",
"new_path": "pkg/sentry/control/lifecycle.go",
"diff": "@@ -336,7 +336,7 @@ func (l *Lifecycle) reap(containerID string, tg *kernel.ThreadGroup) {\nif err := l.updateContainerState(containerID, stateStopped); err != nil {\npanic(err)\n}\n- eventchannel.Emit(&pb.ContainerExitEvent{\n+ eventchannel.LogEmit(&pb.ContainerExitEvent{\nContainerId: containerID,\nExitStatus: uint32(tg.ExitStatus()),\n})\n@@ -392,6 +392,10 @@ func (l *Lifecycle) GetExitStatus(args *ContainerArgs, status *uint32) error {\n}\n*status = uint32(c.tg.ExitStatus())\n+ eventchannel.LogEmit(&pb.ContainerExitEvent{\n+ ContainerId: args.ContainerID,\n+ ExitStatus: *status,\n+ })\nreturn nil\n}\n@@ -423,7 +427,7 @@ func (l *Lifecycle) Reap(args *ContainerArgs, _ *struct{}) error {\n// the actual stop is called. This may be a duplicate event, but is\n// necessary in case the reap goroutine transitions the container to the\n// stop state before the caller starts observing the event channel.\n- eventchannel.Emit(&pb.ContainerExitEvent{\n+ eventchannel.LogEmit(&pb.ContainerExitEvent{\nContainerId: args.ContainerID,\nExitStatus: uint32(c.tg.ExitStatus()),\n})\n"
}
] | Go | Apache License 2.0 | google/gvisor | Adding metrics for exit_status of container in multi-container mode.
PiperOrigin-RevId: 493671987 |
260,004 | 07.12.2022 14:47:42 | 28,800 | b314d966ccb2d052f3d5e9c4c949b7c1834813c4 | Wait for GRO loop to stop
...before returning from groDispatcher.close.
This is to make sure that when we return from cleanup functions, the
caller can safely assume that resources have been released. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/gro.go",
"new_path": "pkg/tcpip/stack/gro.go",
"diff": "@@ -223,6 +223,7 @@ type groDispatcher struct {\nstop chan struct{}\nbuckets [groNBuckets]groBucket\n+ wg sync.WaitGroup\n}\nfunc (gd *groDispatcher) init(interval time.Duration) {\n@@ -246,7 +247,11 @@ func (gd *groDispatcher) init(interval time.Duration) {\n// start spawns a goroutine that flushes the GRO periodically based on the\n// interval.\nfunc (gd *groDispatcher) start(interval time.Duration) {\n+ gd.wg.Add(1)\n+\ngo func(interval time.Duration) {\n+ defer gd.wg.Done()\n+\nvar ch <-chan time.Time\nif interval == 0 {\n// Never run.\n@@ -496,6 +501,7 @@ func (gd *groDispatcher) flushAll() {\n// close stops the GRO goroutine and releases any held packets.\nfunc (gd *groDispatcher) close() {\ngd.stop <- struct{}{}\n+ gd.wg.Wait()\nfor i := range gd.buckets {\nbucket := &gd.buckets[i]\n"
}
] | Go | Apache License 2.0 | google/gvisor | Wait for GRO loop to stop
...before returning from groDispatcher.close.
This is to make sure that when we return from cleanup functions, the
caller can safely assume that resources have been released.
PiperOrigin-RevId: 493714729 |
259,907 | 07.12.2022 15:55:25 | 28,800 | c6faaea7dc28b481123a141efd8dd83ae573f2c3 | Make tmpfs size accounting lock-free by using atomics.
This is likely to be a speed-up in the common case when contention between
threads is low. In such situations, generally atomic operations are faster. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go",
"new_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go",
"diff": "@@ -971,21 +971,22 @@ func (fs *filesystem) accountPagesPartial(pagesInc uint64) uint64 {\nreturn pagesInc\n}\n- // Need to acquire fs.pagesUsedMu for fs.pagesUsed.\n- fs.pagesUsedMu.Lock()\n- defer fs.pagesUsedMu.Unlock()\n- if fs.maxSizeInPages <= fs.pagesUsed {\n+ for {\n+ pagesUsed := fs.pagesUsed.Load()\n+ if fs.maxSizeInPages <= pagesUsed {\nreturn 0\n}\n- pagesFree := fs.maxSizeInPages - fs.pagesUsed\n+ pagesFree := fs.maxSizeInPages - pagesUsed\n+ toInc := pagesInc\nif pagesFree < pagesInc {\n- fs.pagesUsed += pagesFree\n- return pagesFree\n+ toInc = pagesFree\n}\n- fs.pagesUsed += pagesInc\n- return pagesInc\n+ if fs.pagesUsed.CompareAndSwap(pagesUsed, pagesUsed+toInc) {\n+ return toInc\n+ }\n+ }\n}\n// accountPages increases the pagesUsed in filesystem struct if tmpfs\n@@ -996,21 +997,22 @@ func (fs *filesystem) accountPages(pagesInc uint64) bool {\nreturn true // No accounting needed.\n}\n- // Need to acquire fs.pagesUsedMu for fs.pagesUsed.\n- fs.pagesUsedMu.Lock()\n- defer fs.pagesUsedMu.Unlock()\n- if fs.maxSizeInPages <= fs.pagesUsed {\n+ for {\n+ pagesUsed := fs.pagesUsed.Load()\n+ if fs.maxSizeInPages <= pagesUsed {\nreturn false\n}\n- pagesFree := fs.maxSizeInPages - fs.pagesUsed\n+ pagesFree := fs.maxSizeInPages - pagesUsed\nif pagesFree < pagesInc {\nreturn false\n}\n- fs.pagesUsed += pagesInc\n+ if fs.pagesUsed.CompareAndSwap(pagesUsed, pagesUsed+pagesInc) {\nreturn true\n}\n+ }\n+}\n// unaccountPages decreases the pagesUsed in filesystem struct if tmpfs\n// is mounted with size option.\n@@ -1018,11 +1020,14 @@ func (fs *filesystem) unaccountPages(pagesDec uint64) {\nif fs.maxSizeInPages == 0 || pagesDec == 0 {\nreturn\n}\n- // Need to acquire fs.pagesUsedMu for fs.pagesUsed.\n- fs.pagesUsedMu.Lock()\n- defer fs.pagesUsedMu.Unlock()\n- if fs.pagesUsed < pagesDec {\n- panic(fmt.Sprintf(\"Deallocating more pages than allocated: fs.pagesUsed = %d, pagesDec = %d\", fs.pagesUsed, pagesDec))\n+\n+ for {\n+ pagesUsed := fs.pagesUsed.Load()\n+ if pagesUsed < pagesDec {\n+ panic(fmt.Sprintf(\"Deallocating more pages than allocated: fs.pagesUsed = %d, pagesDec = %d\", pagesUsed, pagesDec))\n+ }\n+ if fs.pagesUsed.CompareAndSwap(pagesUsed, pagesUsed-pagesDec) {\n+ break\n+ }\n}\n- fs.pagesUsed -= pagesDec\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/tmpfs/tmpfs.go",
"new_path": "pkg/sentry/fsimpl/tmpfs/tmpfs.go",
"diff": "@@ -92,10 +92,8 @@ type filesystem struct {\n// This field is immutable.\nmaxSizeInPages uint64\n- // pagesUsed is the pages used out of the tmpfs size.\n- // pagesUsed is protected by pagesUsedMu.\n- pagesUsedMu pagesUsedMutex `state:\"nosave\"`\n- pagesUsed uint64\n+ // pagesUsed is the number of pages used by this filesystem.\n+ pagesUsed atomicbitops.Uint64\n}\n// Name implements vfs.FilesystemType.Name.\n@@ -321,10 +319,9 @@ func (fs *filesystem) statFS() linux.Statfs {\nif fs.maxSizeInPages > 0 {\n// If size is set for tmpfs return set values.\nst.Blocks = fs.maxSizeInPages\n- fs.pagesUsedMu.Lock()\n- defer fs.pagesUsedMu.Unlock()\n- st.BlocksFree = fs.maxSizeInPages - fs.pagesUsed\n- st.BlocksAvailable = fs.maxSizeInPages - fs.pagesUsed\n+ pagesUsed := fs.pagesUsed.Load()\n+ st.BlocksFree = fs.maxSizeInPages - pagesUsed\n+ st.BlocksAvailable = fs.maxSizeInPages - pagesUsed\nreturn st\n}\n// In Linux, if tmpfs is mounted with no size option,\n"
}
] | Go | Apache License 2.0 | google/gvisor | Make tmpfs size accounting lock-free by using atomics.
This is likely to be a speed-up in the common case when contention between
threads is low. In such situations, generally atomic operations are faster.
PiperOrigin-RevId: 493731992 |
259,853 | 07.12.2022 17:10:49 | 28,800 | 2e0cc62d827d8fa72c1abe3e0fc6615720753ce0 | tcpip/stack: use lockdep mutexes | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/BUILD",
"new_path": "pkg/tcpip/BUILD",
"diff": "@@ -64,6 +64,7 @@ deps_test(\n\"//pkg/state\",\n\"//pkg/state/wire\",\n\"//pkg/sync\",\n+ \"//pkg/sync/locking\",\n\"//pkg/waiter\",\n\"//pkg/xdp\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/BUILD",
"new_path": "pkg/tcpip/stack/BUILD",
"diff": "load(\"//tools:defs.bzl\", \"go_library\", \"go_test\", \"most_shards\")\nload(\"//tools/go_generics:defs.bzl\", \"go_template_instance\")\n+load(\"//pkg/sync/locking:locking.bzl\", \"declare_mutex\", \"declare_rwmutex\")\npackage(licenses = [\"notice\"])\n+declare_rwmutex(\n+ name = \"addressable_endpoint_state_mutex\",\n+ out = \"addressable_endpoint_state_mutex.go\",\n+ package = \"stack\",\n+ prefix = \"addressableEndpointState\",\n+)\n+\n+declare_rwmutex(\n+ name = \"address_state_mutex\",\n+ out = \"address_state_mutex.go\",\n+ package = \"stack\",\n+ prefix = \"addressState\",\n+)\n+\n+declare_rwmutex(\n+ name = \"route_mutex\",\n+ out = \"route_mutex.go\",\n+ package = \"stack\",\n+ prefix = \"route\",\n+)\n+\n+declare_rwmutex(\n+ name = \"route_stack_mutex\",\n+ out = \"route_stack_mutex.go\",\n+ package = \"stack\",\n+ prefix = \"routeStack\",\n+)\n+\n+declare_rwmutex(\n+ name = \"stack_mutex\",\n+ out = \"stack_mutex.go\",\n+ package = \"stack\",\n+ prefix = \"stack\",\n+)\n+\n+declare_rwmutex(\n+ name = \"nic_mutex\",\n+ out = \"nic_mutex.go\",\n+ package = \"stack\",\n+ prefix = \"nic\",\n+)\n+\n+declare_rwmutex(\n+ name = \"packet_eps_mutex\",\n+ out = \"packet_eps_mutex.go\",\n+ package = \"stack\",\n+ prefix = \"packetEPs\",\n+)\n+\n+declare_rwmutex(\n+ name = \"packet_endpoint_list_mutex\",\n+ out = \"packet_endpoint_list_mutex.go\",\n+ package = \"stack\",\n+ prefix = \"packetEndpointList\",\n+)\n+\n+declare_rwmutex(\n+ name = \"transport_endpoints_mutex\",\n+ out = \"transport_endpoints_mutex.go\",\n+ package = \"stack\",\n+ prefix = \"transportEndpoints\",\n+)\n+\n+declare_rwmutex(\n+ name = \"endpoints_by_nic_mutex\",\n+ out = \"endpoints_by_nic_mutex.go\",\n+ package = \"stack\",\n+ prefix = \"endpointsByNIC\",\n+)\n+\n+declare_rwmutex(\n+ name = \"multi_port_endpoint_mutex\",\n+ out = \"multi_port_endpoint_mutex.go\",\n+ package = \"stack\",\n+ prefix = \"multiPortEndpoint\",\n+)\n+\n+declare_rwmutex(\n+ name = \"neighbor_entry_mutex\",\n+ out = \"neighbor_entry_mutex.go\",\n+ package = \"stack\",\n+ prefix = \"neighborEntry\",\n+)\n+\n+declare_rwmutex(\n+ name = \"neighbor_cache_mutex\",\n+ out = \"neighbor_cache_mutex.go\",\n+ package = \"stack\",\n+ prefix = \"neighborCache\",\n+)\n+\n+declare_rwmutex(\n+ name = \"conn_mutex\",\n+ out = \"conn_mutex.go\",\n+ package = \"stack\",\n+ prefix = \"conn\",\n+)\n+\n+declare_rwmutex(\n+ name = \"state_conn_mutex\",\n+ out = \"state_conn_mutex.go\",\n+ package = \"stack\",\n+ prefix = \"stateConn\",\n+)\n+\n+declare_rwmutex(\n+ name = \"bucket_mutex\",\n+ out = \"bucket_mutex.go\",\n+ package = \"stack\",\n+ prefix = \"bucket\",\n+)\n+\n+declare_rwmutex(\n+ name = \"conn_track_mutex\",\n+ out = \"conn_track_mutex.go\",\n+ package = \"stack\",\n+ prefix = \"connTrack\",\n+)\n+\n+declare_rwmutex(\n+ name = \"iptables_mutex\",\n+ out = \"iptables_mutex.go\",\n+ package = \"stack\",\n+ prefix = \"ipTables\",\n+)\n+\n+declare_mutex(\n+ name = \"cleanup_endpoints_mutex\",\n+ out = \"cleanup_endpoints_mutex.go\",\n+ package = \"stack\",\n+ prefix = \"cleanupEndpoints\",\n+)\n+\n+declare_mutex(\n+ name = \"packets_pending_link_resolution_mutex\",\n+ out = \"packets_pending_link_resolution_mutex.go\",\n+ package = \"stack\",\n+ prefix = \"packetsPendingLinkResolution\",\n+)\n+\ngo_template_instance(\nname = \"neighbor_entry_list\",\nout = \"neighbor_entry_list.go\",\n@@ -53,36 +194,56 @@ go_template_instance(\ngo_library(\nname = \"stack\",\nsrcs = [\n+ \"address_state_mutex.go\",\n\"addressable_endpoint_state.go\",\n+ \"addressable_endpoint_state_mutex.go\",\n+ \"bucket_mutex.go\",\n+ \"cleanup_endpoints_mutex.go\",\n+ \"conn_mutex.go\",\n+ \"conn_track_mutex.go\",\n\"conntrack.go\",\n+ \"endpoints_by_nic_mutex.go\",\n\"gro.go\",\n\"gro_packet_list.go\",\n\"headertype_string.go\",\n\"hook_string.go\",\n\"icmp_rate_limit.go\",\n\"iptables.go\",\n+ \"iptables_mutex.go\",\n\"iptables_targets.go\",\n\"iptables_types.go\",\n+ \"multi_port_endpoint_mutex.go\",\n\"neighbor_cache.go\",\n+ \"neighbor_cache_mutex.go\",\n\"neighbor_entry.go\",\n\"neighbor_entry_list.go\",\n+ \"neighbor_entry_mutex.go\",\n\"neighborstate_string.go\",\n\"nic.go\",\n+ \"nic_mutex.go\",\n\"nic_stats.go\",\n\"nud.go\",\n\"packet_buffer.go\",\n\"packet_buffer_list.go\",\n\"packet_buffer_refs.go\",\n\"packet_buffer_unsafe.go\",\n+ \"packet_endpoint_list_mutex.go\",\n+ \"packet_eps_mutex.go\",\n+ \"packets_pending_link_resolution_mutex.go\",\n\"pending_packets.go\",\n\"rand.go\",\n\"registration.go\",\n\"route.go\",\n+ \"route_mutex.go\",\n+ \"route_stack_mutex.go\",\n\"stack.go\",\n\"stack_global_state.go\",\n+ \"stack_mutex.go\",\n\"stack_options.go\",\n+ \"state_conn_mutex.go\",\n\"tcp.go\",\n\"transport_demuxer.go\",\n+ \"transport_endpoints_mutex.go\",\n\"tuple_list.go\",\n],\nvisibility = [\"//visibility:public\"],\n@@ -95,6 +256,7 @@ go_library(\n\"//pkg/refs\",\n\"//pkg/sleep\",\n\"//pkg/sync\",\n+ \"//pkg/sync/locking\",\n\"//pkg/tcpip\",\n\"//pkg/tcpip/checksum\",\n\"//pkg/tcpip/hash/jenkins\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/addressable_endpoint_state.go",
"new_path": "pkg/tcpip/stack/addressable_endpoint_state.go",
"diff": "@@ -17,7 +17,6 @@ package stack\nimport (\n\"fmt\"\n- \"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n)\n@@ -38,7 +37,7 @@ type AddressableEndpointState struct {\n//\n// AddressableEndpointState.mu\n// addressState.mu\n- mu sync.RWMutex\n+ mu addressableEndpointStateRWMutex\n// +checklocks:mu\nendpoints map[tcpip.Address]*addressState\n// +checklocks:mu\n@@ -701,7 +700,7 @@ type addressState struct {\n//\n// AddressableEndpointState.mu\n// addressState.mu\n- mu sync.RWMutex\n+ mu addressStateRWMutex\n// checklocks:mu\nrefs uint32\n// checklocks:mu\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/conntrack.go",
"new_path": "pkg/tcpip/stack/conntrack.go",
"diff": "@@ -139,7 +139,7 @@ type conn struct {\n// Holds a finalizeResult.\nfinalizeResult atomicbitops.Uint32\n- mu sync.RWMutex `state:\"nosave\"`\n+ mu connRWMutex `state:\"nosave\"`\n// sourceManip indicates the source manipulation type.\n//\n// +checklocks:mu\n@@ -149,7 +149,7 @@ type conn struct {\n// +checklocks:mu\ndestinationManip manipType\n- stateMu sync.RWMutex `state:\"nosave\"`\n+ stateMu stateConnRWMutex `state:\"nosave\"`\n// tcb is TCB control block. It is used to keep track of states\n// of tcp connection.\n//\n@@ -230,7 +230,7 @@ type ConnTrack struct {\nclock tcpip.Clock\nrand *rand.Rand\n- mu sync.RWMutex `state:\"nosave\"`\n+ mu connTrackRWMutex `state:\"nosave\"`\n// mu protects the buckets slice, but not buckets' contents. Only take\n// the write lock if you are modifying the slice or saving for S/R.\n//\n@@ -240,7 +240,7 @@ type ConnTrack struct {\n// +stateify savable\ntype bucket struct {\n- mu sync.RWMutex `state:\"nosave\"`\n+ mu bucketRWMutex `state:\"nosave\"`\n// +checklocks:mu\ntuples tupleList\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/iptables_types.go",
"new_path": "pkg/tcpip/stack/iptables_types.go",
"diff": "@@ -18,7 +18,6 @@ import (\n\"fmt\"\n\"strings\"\n- \"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n)\n@@ -85,7 +84,7 @@ type IPTables struct {\nreaper tcpip.Timer\n- mu sync.RWMutex\n+ mu ipTablesRWMutex\n// v4Tables and v6tables map tableIDs to tables. They hold builtin\n// tables only, not user tables.\n//\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/neighbor_cache.go",
"new_path": "pkg/tcpip/stack/neighbor_cache.go",
"diff": "@@ -17,7 +17,6 @@ package stack\nimport (\n\"fmt\"\n- \"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n)\n@@ -48,7 +47,7 @@ type neighborCache struct {\nlinkRes LinkAddressResolver\nmu struct {\n- sync.RWMutex\n+ neighborCacheRWMutex\ncache map[tcpip.Address]*neighborEntry\ndynamic struct {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/neighbor_entry.go",
"new_path": "pkg/tcpip/stack/neighbor_entry.go",
"diff": "@@ -16,7 +16,6 @@ package stack\nimport (\n\"fmt\"\n- \"sync\"\n\"time\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n@@ -97,7 +96,7 @@ type neighborEntry struct {\nnudState *NUDState\nmu struct {\n- sync.RWMutex\n+ neighborEntryRWMutex\nneigh NeighborEntry\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/nic.go",
"new_path": "pkg/tcpip/stack/nic.go",
"diff": "@@ -19,7 +19,6 @@ import (\n\"reflect\"\n\"gvisor.dev/gvisor/pkg/atomicbitops\"\n- \"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n)\n@@ -59,7 +58,7 @@ type nic struct {\nlinkResQueue packetsPendingLinkResolution\n// mu protects annotated fields below.\n- mu sync.RWMutex\n+ mu nicRWMutex\n// +checklocks:mu\nspoofing bool\n@@ -68,7 +67,7 @@ type nic struct {\npromiscuous bool\n// packetEPsMu protects annotated fields below.\n- packetEPsMu sync.RWMutex\n+ packetEPsMu packetEPsRWMutex\n// eps is protected by the mutex, but the values contained in it are not.\n//\n@@ -90,7 +89,7 @@ func makeNICStats(global tcpip.NICStats) sharedStats {\n}\ntype packetEndpointList struct {\n- mu sync.RWMutex\n+ mu packetEndpointListRWMutex\n// eps is protected by mu, but the contained PacketEndpoint values are not.\n//\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/pending_packets.go",
"new_path": "pkg/tcpip/stack/pending_packets.go",
"diff": "@@ -17,7 +17,6 @@ package stack\nimport (\n\"fmt\"\n- \"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n)\n@@ -40,7 +39,7 @@ type packetsPendingLinkResolution struct {\nnic *nic\nmu struct {\n- sync.Mutex\n+ packetsPendingLinkResolutionMutex\n// The packets to send once the resolver completes.\n//\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/route.go",
"new_path": "pkg/tcpip/stack/route.go",
"diff": "@@ -17,7 +17,6 @@ package stack\nimport (\n\"fmt\"\n- \"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n)\n@@ -34,7 +33,7 @@ type Route struct {\nlocalAddressNIC *nic\n// mu protects annotated fields below.\n- mu sync.RWMutex\n+ mu routeRWMutex\n// localAddressEndpoint is the local address this route is associated with.\n// +checklocks:mu\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/stack.go",
"new_path": "pkg/tcpip/stack/stack.go",
"diff": "@@ -32,7 +32,6 @@ import (\n\"gvisor.dev/gvisor/pkg/bufferv2\"\n\"gvisor.dev/gvisor/pkg/log\"\ncryptorand \"gvisor.dev/gvisor/pkg/rand\"\n- \"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n\"gvisor.dev/gvisor/pkg/tcpip/ports\"\n@@ -85,18 +84,18 @@ type Stack struct {\nstats tcpip.Stats\n// routeMu protects annotated fields below.\n- routeMu sync.RWMutex\n+ routeMu routeStackRWMutex\n// +checklocks:routeMu\nrouteTable []tcpip.Route\n- mu sync.RWMutex\n+ mu stackRWMutex\n// +checklocks:mu\nnics map[tcpip.NICID]*nic\ndefaultForwardingEnabled map[tcpip.NetworkProtocolNumber]struct{}\n// cleanupEndpointsMu protects cleanupEndpoints.\n- cleanupEndpointsMu sync.Mutex\n+ cleanupEndpointsMu cleanupEndpointsMutex\n// +checklocks:cleanupEndpointsMu\ncleanupEndpoints map[TransportEndpoint]struct{}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/transport_demuxer.go",
"new_path": "pkg/tcpip/stack/transport_demuxer.go",
"diff": "@@ -17,7 +17,6 @@ package stack\nimport (\n\"fmt\"\n- \"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/hash/jenkins\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n@@ -32,7 +31,7 @@ type protocolIDs struct {\n// transportEndpoints manages all endpoints of a given protocol. It has its own\n// mutex so as to reduce interference between protocols.\ntype transportEndpoints struct {\n- mu sync.RWMutex\n+ mu transportEndpointsRWMutex\n// +checklocks:mu\nendpoints map[TransportEndpointID]*endpointsByNIC\n// rawEndpoints contains endpoints for raw sockets, which receive all\n@@ -138,7 +137,7 @@ type endpointsByNIC struct {\n// seed is a random secret for a jenkins hash.\nseed uint32\n- mu sync.RWMutex\n+ mu endpointsByNICRWMutex\n// +checklocks:mu\nendpoints map[tcpip.NICID]*multiPortEndpoint\n}\n@@ -346,7 +345,7 @@ type multiPortEndpoint struct {\nflags ports.FlagCounter\n- mu sync.RWMutex `state:\"nosave\"`\n+ mu multiPortEndpointRWMutex `state:\"nosave\"`\n// endpoints stores the transport endpoints in the order in which they\n// were bound. This is required for UDP SO_REUSEADDR.\n//\n"
}
] | Go | Apache License 2.0 | google/gvisor | tcpip/stack: use lockdep mutexes
PiperOrigin-RevId: 493749454 |
259,907 | 08.12.2022 11:01:50 | 28,800 | d17af25336d3cd33f9a9406e557f4e4e521347c8 | Add ENAMETOOLONG checks in overlayfs.
Linux overlayfs uses the max of all layers' filename length limit. Do the same
in gVisor.
This is needed to get PHP runtime test ext/standard/tests/strings/007.phpt to
pass with overlayfs. | [
{
"change_type": "MODIFY",
"old_path": "Makefile",
"new_path": "Makefile",
"diff": "@@ -270,6 +270,7 @@ docker-tests: load-basic $(RUNTIME_BIN)\n@$(call install_runtime,$(RUNTIME)-fdlimit,--fdlimit=2000) # Used by TestRlimitNoFile.\n@$(call install_runtime,$(RUNTIME)-dcache,--fdlimit=2000 --dcache=100) # Used by TestDentryCacheLimit.\n@$(call install_runtime,$(RUNTIME)-host-uds,--host-uds=all) # Used by TestHostSocketConnect.\n+ @$(call install_runtime,$(RUNTIME)-overlay,--overlay2=root:/tmp) # Used by TestOverlay*.\n@$(call test_runtime,$(RUNTIME),$(INTEGRATION_TARGETS) //test/e2e:integration_runtime_test)\n.PHONY: docker-tests\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/overlay/filesystem.go",
"new_path": "pkg/sentry/fsimpl/overlay/filesystem.go",
"diff": "@@ -161,6 +161,9 @@ afterSymlink:\nrp.Advance()\nreturn d.parent, d.parent.topLookupLayer(), nil\n}\n+ if uint64(len(name)) > fs.maxFilenameLen {\n+ return nil, lookupLayerNone, linuxerr.ENAMETOOLONG\n+ }\nchild, topLookupLayer, err := fs.getChildLocked(ctx, d, name, ds)\nif err != nil {\nreturn nil, topLookupLayer, err\n@@ -504,6 +507,9 @@ func (fs *filesystem) doCreateAt(ctx context.Context, rp *vfs.ResolvingPath, ct\nif name == \".\" || name == \"..\" {\nreturn linuxerr.EEXIST\n}\n+ if uint64(len(name)) > fs.maxFilenameLen {\n+ return linuxerr.ENAMETOOLONG\n+ }\nif parent.vfsd.IsDead() {\nreturn linuxerr.ENOENT\n}\n@@ -1090,6 +1096,9 @@ func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa\n}\nreturn linuxerr.EBUSY\n}\n+ if uint64(len(newName)) > fs.maxFilenameLen {\n+ return linuxerr.ENAMETOOLONG\n+ }\n// Do not check for newName length, since different filesystem\n// implementations impose different name limits. upperfs.RenameAt() will fail\n// appropriately if it has to.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/overlay/overlay.go",
"new_path": "pkg/sentry/fsimpl/overlay/overlay.go",
"diff": "@@ -125,6 +125,9 @@ type filesystem struct {\n// lastDirIno is the last inode number assigned to a directory. lastDirIno\n// is protected by dirInoCacheMu.\nlastDirIno uint64\n+\n+ // MaxFilenameLen is the maximum filename length allowed by the overlayfs.\n+ maxFilenameLen uint64\n}\n// +stateify savable\n@@ -264,9 +267,23 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt\ndirDevMinor: dirDevMinor,\nlowerDevMinors: make(map[layerDevNumber]uint32),\ndirInoCache: make(map[layerDevNoAndIno]uint64),\n+ maxFilenameLen: linux.NAME_MAX,\n}\nfs.vfsfs.Init(vfsObj, &fstype, fs)\n+ // Configure max filename length. Similar to what Linux does in\n+ // fs/overlayfs/super.c:ovl_fill_super() -> ... -> ovl_check_namelen().\n+ if fsopts.UpperRoot.Ok() {\n+ if err := fs.updateMaxNameLen(ctx, creds, vfsObj, fs.opts.UpperRoot); err != nil {\n+ ctx.Debugf(\"overlay.FilesystemType.GetFilesystem: failed to StatFSAt on upper layer root: %v\", err)\n+ }\n+ }\n+ for _, lowerRoot := range fsopts.LowerRoots {\n+ if err := fs.updateMaxNameLen(ctx, creds, vfsObj, lowerRoot); err != nil {\n+ ctx.Debugf(\"overlay.FilesystemType.GetFilesystem: failed to StatFSAt on lower layer root: %v\", err)\n+ }\n+ }\n+\n// Construct the root dentry.\nroot := fs.newDentry()\nroot.refs = atomicbitops.FromInt64(1)\n@@ -368,6 +385,21 @@ func (fs *filesystem) Release(ctx context.Context) {\n}\n}\n+// updateMaxNameLen is analogous to fs/overlayfs/super.c:ovl_check_namelen().\n+func (fs *filesystem) updateMaxNameLen(ctx context.Context, creds *auth.Credentials, vfsObj *vfs.VirtualFilesystem, vd vfs.VirtualDentry) error {\n+ statfs, err := vfsObj.StatFSAt(ctx, creds, &vfs.PathOperation{\n+ Root: vd,\n+ Start: vd,\n+ })\n+ if err != nil {\n+ return err\n+ }\n+ if statfs.NameLength > fs.maxFilenameLen {\n+ fs.maxFilenameLen = statfs.NameLength\n+ }\n+ return nil\n+}\n+\nfunc (fs *filesystem) statFS(ctx context.Context) (linux.Statfs, error) {\n// Always statfs the root of the topmost layer. Compare Linux's\n// fs/overlayfs/super.c:ovl_statfs().\n"
},
{
"change_type": "MODIFY",
"old_path": "test/e2e/integration_runtime_test.go",
"new_path": "test/e2e/integration_runtime_test.go",
"diff": "@@ -24,6 +24,7 @@ package integration\nimport (\n\"context\"\n\"flag\"\n+ \"fmt\"\n\"io/ioutil\"\n\"net\"\n\"os\"\n@@ -178,3 +179,20 @@ func TestHostSocketConnect(t *testing.T) {\n}\nwg.Wait()\n}\n+\n+func TestOverlayNameTooLong(t *testing.T) {\n+ ctx := context.Background()\n+ d := dockerutil.MakeContainerWithRuntime(ctx, t, \"-overlay\")\n+ defer d.CleanUp(ctx)\n+\n+ opts := dockerutil.RunOpts{\n+ Image: \"basic/integrationtest\",\n+ WorkDir: \"/root\",\n+ }\n+ longName := strings.Repeat(\"a\", unix.NAME_MAX+1)\n+ if got, err := d.Run(ctx, opts, \"bash\", \"-c\", fmt.Sprintf(\"stat %s || true\", longName)); err != nil {\n+ t.Fatalf(\"docker run failed: %v\", err)\n+ } else if want := \"File name too long\"; !strings.Contains(got, want) {\n+ t.Errorf(\"container output %q does not contain %q\", got, want)\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add ENAMETOOLONG checks in overlayfs.
Linux overlayfs uses the max of all layers' filename length limit. Do the same
in gVisor.
This is needed to get PHP runtime test ext/standard/tests/strings/007.phpt to
pass with overlayfs.
PiperOrigin-RevId: 493942515 |
259,909 | 08.12.2022 14:13:36 | 28,800 | 2ff7a2750a771c24e87249e4c7bf8111bc00b7a0 | Only run sandbox in child cgroup if using cgroupv2
Joining a non-leaf node cgroup is only illegal in hierarchical
cgroup accounting. | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -255,8 +255,8 @@ func New(conf *config.Config, args Args) (*Container, error) {\nreturn nil, fmt.Errorf(\"cannot set up cgroup for root: %w\", err)\n}\n// Join the child cgroup when using cgroupfs. Joining non leaf-node\n- // cgroups is illegal in Linux and will return EBUSY.\n- if subCgroup != nil && !conf.SystemdCgroup {\n+ // cgroups is illegal in cgroupsv2 and will return EBUSY.\n+ if subCgroup != nil && !conf.SystemdCgroup && cgroup.IsOnlyV2() {\ncontainerCgroup = subCgroup\n} else {\ncontainerCgroup = parentCgroup\n"
}
] | Go | Apache License 2.0 | google/gvisor | Only run sandbox in child cgroup if using cgroupv2
Joining a non-leaf node cgroup is only illegal in hierarchical
cgroup accounting.
PiperOrigin-RevId: 493992577 |
260,004 | 08.12.2022 14:14:05 | 28,800 | d947422655f16bae014d411a40f9519fafd48b29 | Don't prevent removing loopback in core netstack
Implement this check in netstack integration (in sentry) so that the
core netstack does not prevent an integrator from removing loopback. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netstack/stack.go",
"new_path": "pkg/sentry/socket/netstack/stack.go",
"diff": "@@ -80,6 +80,17 @@ func (s *Stack) Interfaces() map[int32]inet.Interface {\n// RemoveInterface implements inet.Stack.RemoveInterface.\nfunc (s *Stack) RemoveInterface(idx int32) error {\nnic := tcpip.NICID(idx)\n+\n+ nicInfo, ok := s.Stack.NICInfo()[nic]\n+ if !ok {\n+ return syserr.ErrUnknownNICID.ToError()\n+ }\n+\n+ // Don't allow removing the loopback interface.\n+ if nicInfo.Flags.Loopback {\n+ return syserr.ErrNotSupported.ToError()\n+ }\n+\nreturn syserr.TranslateNetstackError(s.Stack.RemoveNIC(nic)).ToError()\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/stack.go",
"new_path": "pkg/tcpip/stack/stack.go",
"diff": "@@ -957,9 +957,6 @@ func (s *Stack) removeNICLocked(id tcpip.NICID) tcpip.Error {\nif !ok {\nreturn &tcpip.ErrUnknownNICID{}\n}\n- if nic.IsLoopback() {\n- return &tcpip.ErrNotSupported{}\n- }\ndelete(s.nics, id)\n// Remove routes in-place. n tracks the number of routes written.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/stack_test.go",
"new_path": "pkg/tcpip/stack/stack_test.go",
"diff": "@@ -968,17 +968,14 @@ func TestRemoveNIC(t *testing.T) {\nfor _, tt := range []struct {\nname string\nlinkep stack.LinkEndpoint\n- expectErr tcpip.Error\n}{\n{\nname: \"loopback\",\nlinkep: loopback.New(),\n- expectErr: &tcpip.ErrNotSupported{},\n},\n{\nname: \"channel\",\nlinkep: channel.New(0, defaultMTU, \"\"),\n- expectErr: nil,\n},\n} {\nt.Run(tt.name, func(t *testing.T) {\n@@ -1006,17 +1003,15 @@ func TestRemoveNIC(t *testing.T) {\n// Removing a NIC should remove it from NICInfo and e should be detached from\n// the NetworkDispatcher.\n- if got, want := s.RemoveNIC(nicID), tt.expectErr; got != want {\n- t.Fatalf(\"got s.RemoveNIC(%d) = %s, want %s\", nicID, got, want)\n+ if err := s.RemoveNIC(nicID); err != nil {\n+ t.Fatalf(\"s.RemoveNIC(%d): %s\", nicID, err)\n}\n- if tt.expectErr == nil {\nif nicInfo, ok := s.NICInfo()[nicID]; ok {\nt.Errorf(\"got unexpected NICInfo entry for deleted NIC %d = %+v\", nicID, nicInfo)\n}\nif e.isAttached() {\nt.Error(\"link endpoint for removed NIC still attached to a network dispatcher\")\n}\n- }\n})\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Don't prevent removing loopback in core netstack
Implement this check in netstack integration (in sentry) so that the
core netstack does not prevent an integrator from removing loopback.
PiperOrigin-RevId: 493992707 |
259,907 | 08.12.2022 14:35:23 | 28,800 | 110c3d76e4a60070be4e38204b23ce206bace87d | Remove remaining references to vfs2. | [
{
"change_type": "MODIFY",
"old_path": "images/syzkaller/default-gvisor-config.cfg",
"new_path": "images/syzkaller/default-gvisor-config.cfg",
"diff": "\"type\": \"gvisor\",\n\"vm\": {\n\"count\": 1,\n- \"runsc_args\": \"--debug --network none --platform ptrace --vfs2 -net-raw -watchdog-action=panic\"\n+ \"runsc_args\": \"--debug --network none --platform ptrace -net-raw -watchdog-action=panic\"\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/elf.go",
"new_path": "pkg/sentry/loader/elf.go",
"diff": "@@ -90,14 +90,8 @@ type elfInfo struct {\nsharedObject bool\n}\n-// fullReader interface extracts the ReadFull method from fsbridge.File so that\n-// client code does not need to define an entire fsbridge.File when only read\n-// functionality is needed.\n-//\n-// TODO(gvisor.dev/issue/1035): Once VFS2 ships, rewrite this to wrap\n-// vfs.FileDescription's PRead/Read instead.\ntype fullReader interface {\n- // ReadFull is the same as fsbridge.File.ReadFull.\n+ // ReadFull is the same as vfs.FileDescription.ReadFull.\nReadFull(ctx context.Context, dst usermem.IOSequence, offset int64) (int64, error)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/vdso.go",
"new_path": "pkg/sentry/loader/vdso.go",
"diff": "@@ -55,6 +55,7 @@ type byteFullReader struct {\ndata []byte\n}\n+// ReadFull implements fullReader.ReadFull.\nfunc (b *byteFullReader) ReadFull(ctx context.Context, dst usermem.IOSequence, offset int64) (int64, error) {\nif offset < 0 {\nreturn 0, linuxerr.EINVAL\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/unix/unix.go",
"new_path": "pkg/sentry/socket/unix/unix.go",
"diff": "@@ -417,12 +417,6 @@ func (*provider) Pair(t *kernel.Task, stype linux.SockType, protocol int) (*vfs.\nreturn s1, s2, nil\n}\n-// socketOpsCommon contains the socket operations common to VFS1 and VFS2.\n-//\n-// +stateify savable\n-type socketOpsCommon struct {\n-}\n-\nfunc (s *Socket) isPacket() bool {\nswitch s.stype {\ncase linux.SOCK_DGRAM, linux.SOCK_SEQPACKET:\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/test/dockerutil/README.md",
"new_path": "pkg/test/dockerutil/README.md",
"diff": "@@ -35,8 +35,7 @@ use pprof profiles generated by `runsc debug`. The profiler will generate Block,\nCPU, Heap, Goroutine, and Mutex profiles. To generate profiles:\n* Install runsc with the `--profile` flag: `make configure RUNTIME=myrunsc\n- ARGS=\"--profile\"` Also add other flags with ARGS like `--platform=kvm` or\n- `--vfs2`.\n+ ARGS=\"--profile\"` Also add other flags with ARGS like `--platform=kvm`.\n* Restart docker: `sudo service docker restart`\nTo run and generate CPU profiles run:\n"
},
{
"change_type": "MODIFY",
"old_path": "test/fuse/README.md",
"new_path": "test/fuse/README.md",
"diff": "@@ -107,19 +107,16 @@ complete a command and when the server awaits the next instruction.\n## Running the tests\n-Based on syscall tests, FUSE tests generate targets only with vfs2 and fuse\n-enabled. The corresponding targets end in `_fuse`.\n-\nFor example, to run fuse test in `stat_test.cc`:\n```bash\n-$ bazel test //test/fuse:stat_test_runsc_ptrace_vfs2_fuse\n+$ bazel test //test/fuse:stat_test_runsc_ptrace\n```\nTest all targets tagged with fuse:\n```bash\n-$ bazel test --test_tag_filters=fuse //test/fuse/...\n+$ bazel test //test/fuse/...\n```\n## Writing a new FUSE test\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove remaining references to vfs2.
PiperOrigin-RevId: 493998299 |
259,985 | 08.12.2022 15:19:16 | 28,800 | a005752d88d1f5a5b629071e423e957e7a679ac9 | iouringfs: Pet the watchdog during ProcessSubmissions.
This loop can take a long time. Since we're blocking on IO outside
Task.Block, we need to periodically check for interrupts and update
pet the watchdog.
Reported-by: | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/iouringfs/iouringfs.go",
"new_path": "pkg/sentry/fsimpl/iouringfs/iouringfs.go",
"diff": "@@ -337,6 +337,12 @@ func (fd *FileDescription) ProcessSubmissions(t *kernel.Task, toSubmit uint32, m\nsubmitted := uint32(0)\nfor toSubmit > submitted {\n+ // This loop can take a long time to process, so periodically check for\n+ // interrupts. This also pets the watchdog.\n+ if t.Interrupted() {\n+ return -1, linuxerr.EINTR\n+ }\n+\nif fetchRB {\nview, err = fd.ioRingsBuf.view(fd.ioRings.SizeBytes())\nif err != nil {\n"
}
] | Go | Apache License 2.0 | google/gvisor | iouringfs: Pet the watchdog during ProcessSubmissions.
This loop can take a long time. Since we're blocking on IO outside
Task.Block, we need to periodically check for interrupts and update
pet the watchdog.
Reported-by: syzbot+3fd282d7e4e5c83fc196@syzkaller.appspotmail.com
PiperOrigin-RevId: 494009269 |
259,978 | 09.12.2022 11:23:10 | 28,800 | 1cbabd822597466845c815811dd4a98f22a7581d | Fix not setting AddressDispatcher on promotion
AddressDispatcher was being set when adding an address only for new
addresses, resulting in cases such as when an expired addresses is
re-added to lose the AddressDispatcher that is passed.
Added a regression test. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/addressable_endpoint_state.go",
"new_path": "pkg/tcpip/stack/addressable_endpoint_state.go",
"diff": "@@ -271,7 +271,6 @@ func (a *AddressableEndpointState) addAndAcquireAddressLocked(addr tcpip.Address\n// We never promote an address to temporary - it can only be added as such.\n// If we are actually adding a permanent address, it is promoted below.\naddrState.kind = Temporary\n- addrState.disp = properties.Disp\n}\n// At this point we have an address we are either promoting from an expired or\n@@ -295,6 +294,7 @@ func (a *AddressableEndpointState) addAndAcquireAddressLocked(addr tcpip.Address\nlifetimes := properties.Lifetimes\nlifetimes.sanitize()\naddrState.lifetimes = lifetimes\n+ addrState.disp = properties.Disp\nif attemptAddToPrimary {\nswitch properties.PEB {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/addressable_endpoint_state_test.go",
"new_path": "pkg/tcpip/stack/addressable_endpoint_state_test.go",
"diff": "@@ -60,3 +60,47 @@ func TestAddressableEndpointStateCleanup(t *testing.T) {\nt.Fatalf(\"got s.AcquireAssignedAddress(%s, false, NeverPrimaryEndpoint) = %s, want = nil\", addr.Address, ep.AddressWithPrefix())\n}\n}\n+\n+func TestAddressDispatcherExpiredToAssigned(t *testing.T) {\n+ var networkEp fakeNetworkEndpoint\n+ if err := networkEp.Enable(); err != nil {\n+ t.Fatalf(\"ep.Enable(): %s\", err)\n+ }\n+\n+ var s stack.AddressableEndpointState\n+ s.Init(&networkEp, stack.AddressableEndpointStateOptions{HiddenWhileDisabled: false})\n+\n+ addr := tcpip.AddressWithPrefix{\n+ Address: \"\\x01\",\n+ PrefixLen: 8,\n+ }\n+\n+ ep, err := s.AddAndAcquirePermanentAddress(addr, stack.AddressProperties{})\n+ if err != nil {\n+ t.Fatalf(\"s.AddAndAcquirePermanentAddress(%s, {}): %s\", addr, err)\n+ }\n+ defer ep.DecRef()\n+ if !ep.IncRef() {\n+ t.Fatalf(\"failed to increase ref count of address endpoint\")\n+ }\n+\n+ if err := s.RemovePermanentEndpoint(ep, stack.AddressRemovalManualAction); err != nil {\n+ ep.DecRef()\n+ t.Fatalf(\"s.RemovePermanentEndpoint(ep, stack.AddressRemovalManualAction): %s\", err)\n+ }\n+\n+ addrDisp := &addressDispatcher{\n+ changedCh: make(chan addressChangedEvent, 1),\n+ removedCh: make(chan stack.AddressRemovalReason, 1),\n+ addr: addr,\n+ }\n+ properties := stack.AddressProperties{Disp: addrDisp}\n+ readdedEp, err := s.AddAndAcquirePermanentAddress(addr, properties)\n+ if err != nil {\n+ t.Fatalf(\"s.AddAndAcquirePermanentAddress(%s, %+v): %s\", addr, properties, err)\n+ }\n+ defer readdedEp.DecRef()\n+ if err := addrDisp.expectChanged(stack.AddressLifetimes{}, stack.AddressAssigned); err != nil {\n+ t.Fatalf(\"expect to observe address added: %s\", err)\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix not setting AddressDispatcher on promotion
AddressDispatcher was being set when adding an address only for new
addresses, resulting in cases such as when an expired addresses is
re-added to lose the AddressDispatcher that is passed.
Added a regression test.
PiperOrigin-RevId: 494220872 |
259,907 | 09.12.2022 14:49:12 | 28,800 | 368e8541469752b08312d25c56b654c626624410 | overlay2: Do not use O_TMPFILE to create unnamed temporary file.
Not all filesystems support O_TMPFILE. Instead manually create an unnamed
file by immediately unlinking the file after creation but keeping an FD on it.
Reported-by:
Proposed-by: Andrei Vagin | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -786,15 +786,19 @@ func createOverlayFilestore(overlay2 config.Overlay2) (*os.File, error) {\nif !fileInfo.IsDir() {\nreturn nil, fmt.Errorf(\"overlay2 flag should specify an existing directory\")\n}\n- // Create an unnamed temporary file in filestore directory using\n- // O_TMPFILE. This file will be deleted when the container exits.\n- // Also specify O_EXCL to prevent this file from being linked into the\n- // filesystem. See open(2) man page's section for O_TMPFILE for details.\n- unnamedTmpFD, err := unix.Open(overlay2.FilestoreDir, unix.O_TMPFILE|unix.O_RDWR|unix.O_EXCL, 0666)\n+ // Create an unnamed temporary file in filestore directory which will be\n+ // deleted when the last FD on it is closed. We don't use O_TMPFILE because\n+ // it is not supported on all filesystems. So we simulate it by creating a\n+ // named file and then immediately unlinking it while keeping an FD on it.\n+ // This file will be deleted when the container exits.\n+ filestoreFile, err := os.CreateTemp(overlay2.FilestoreDir, \"runsc-overlay-filestore-*\")\nif err != nil {\n- return nil, fmt.Errorf(\"failed to create an unnamed temporary file inside %q\", overlay2.FilestoreDir)\n+ return nil, fmt.Errorf(\"failed to create a temporary file inside %q: %v\", overlay2.FilestoreDir, err)\n}\n- return os.NewFile(uintptr(unnamedTmpFD), \"overlay-filestore\"), nil\n+ if err := unix.Unlink(filestoreFile.Name()); err != nil {\n+ return nil, fmt.Errorf(\"failed to unlink temporary file %q: %v\", filestoreFile.Name(), err)\n+ }\n+ return filestoreFile, nil\n}\n// saveLocked saves the container metadata to a file.\n"
}
] | Go | Apache License 2.0 | google/gvisor | overlay2: Do not use O_TMPFILE to create unnamed temporary file.
Not all filesystems support O_TMPFILE. Instead manually create an unnamed
file by immediately unlinking the file after creation but keeping an FD on it.
Reported-by: syzbot+3bf76e1ba1acde65c675@syzkaller.appspotmail.com
Proposed-by: Andrei Vagin <avagin@gmail.com>
PiperOrigin-RevId: 494269186 |
260,002 | 18.07.2022 14:07:45 | 25,200 | b4a05ce65b2257786b2e48300cdb07f425194196 | Add support for /proc/[pid]/limits | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/proc/task.go",
"new_path": "pkg/sentry/fsimpl/proc/task.go",
"diff": "@@ -64,6 +64,7 @@ func (fs *filesystem) newTaskInode(ctx context.Context, task *kernel.Task, pidns\n\"fdinfo\": fs.newFDInfoDirInode(ctx, task),\n\"gid_map\": fs.newTaskOwnedInode(ctx, task, fs.NextIno(), 0644, &idMapData{task: task, gids: true}),\n\"io\": fs.newTaskOwnedInode(ctx, task, fs.NextIno(), 0400, newIO(task, isThreadGroup)),\n+ \"limits\": fs.newTaskOwnedInode(ctx, task, fs.NextIno(), 0444, &limitsData{task: task}),\n\"maps\": fs.newTaskOwnedInode(ctx, task, fs.NextIno(), 0444, &mapsData{task: task}),\n\"mem\": fs.newMemInode(ctx, task, fs.NextIno(), 0400),\n\"mountinfo\": fs.newTaskOwnedInode(ctx, task, fs.NextIno(), 0444, &mountInfoData{fs: fs, task: task}),\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/proc/task_files.go",
"new_path": "pkg/sentry/fsimpl/proc/task_files.go",
"diff": "@@ -529,6 +529,43 @@ func (fd *memFD) SetStat(context.Context, vfs.SetStatOptions) error {\n// Release implements vfs.FileDescriptionImpl.Release.\nfunc (fd *memFD) Release(context.Context) {}\n+// limitsData implements vfs.DynamicBytesSource for /proc/[pid]/limits.\n+//\n+// +stateify savable\n+type limitsData struct {\n+ kernfs.DynamicBytesFile\n+\n+ task *kernel.Task\n+}\n+\n+func (d *limitsData) Generate(ctx context.Context, buf *bytes.Buffer) error {\n+ // formatting matches the kernel output from linux/fs/proc/base.c:proc_pid_limits()\n+ fmt.Fprintf(buf, \"Limit Soft Limit Hard Limit Units \\n\")\n+ for _, lt := range limits.AllLimitTypes {\n+ fmt.Fprintf(buf, \"%-25s \", lt.Name())\n+\n+ l := d.task.Limits().Get(lt)\n+ if l.Cur == limits.Infinity {\n+ fmt.Fprintf(buf, \"%-20s \", \"unlimited\")\n+ } else {\n+ fmt.Fprintf(buf, \"%-20d \", l.Cur)\n+ }\n+\n+ if l.Max == limits.Infinity {\n+ fmt.Fprintf(buf, \"%-20s \", \"unlimited\")\n+ } else {\n+ fmt.Fprintf(buf, \"%-20d \", l.Max)\n+ }\n+\n+ if u := lt.Unit(); u != \"\" {\n+ fmt.Fprintf(buf, \"%-10s\", u)\n+ }\n+\n+ buf.WriteByte('\\n')\n+ }\n+ return nil\n+}\n+\n// mapsData implements vfs.DynamicBytesSource for /proc/[pid]/maps.\n//\n// +stateify savable\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/proc/tasks_test.go",
"new_path": "pkg/sentry/fsimpl/proc/tasks_test.go",
"diff": "@@ -78,6 +78,7 @@ var (\n\"fdinfo\": linux.DT_DIR,\n\"gid_map\": linux.DT_REG,\n\"io\": linux.DT_REG,\n+ \"limits\": linux.DT_REG,\n\"maps\": linux.DT_REG,\n\"mem\": linux.DT_REG,\n\"mountinfo\": linux.DT_REG,\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/limits/limits.go",
"new_path": "pkg/sentry/limits/limits.go",
"diff": "@@ -43,6 +43,103 @@ const (\nRttime\n)\n+var AllLimitTypes = []LimitType{\n+ CPU,\n+ FileSize,\n+ Data,\n+ Stack,\n+ Core,\n+ Rss,\n+ ProcessCount,\n+ NumberOfFiles,\n+ MemoryLocked,\n+ AS,\n+ Locks,\n+ SignalsPending,\n+ MessageQueueBytes,\n+ Nice,\n+ RealTimePriority,\n+ Rttime,\n+}\n+\n+// Name returns the kernel name of the limit\n+func (lt LimitType) Name() string {\n+ switch lt {\n+ case CPU:\n+ return \"Max cpu time\"\n+ case FileSize:\n+ return \"Max file size\"\n+ case Data:\n+ return \"Max data size\"\n+ case Stack:\n+ return \"Max stack size\"\n+ case Core:\n+ return \"Max core file size\"\n+ case Rss:\n+ return \"Max resident set\"\n+ case ProcessCount:\n+ return \"Max processes\"\n+ case NumberOfFiles:\n+ return \"Max open files\"\n+ case MemoryLocked:\n+ return \"Max locked memory\"\n+ case AS:\n+ return \"Max address space\"\n+ case Locks:\n+ return \"Max file locks\"\n+ case SignalsPending:\n+ return \"Max pending signals\"\n+ case MessageQueueBytes:\n+ return \"Max msgqueue size\"\n+ case Nice:\n+ return \"Max nice priority\"\n+ case RealTimePriority:\n+ return \"Max realtime priority\"\n+ case Rttime:\n+ return \"Max realtime timeout\"\n+ }\n+ return \"unknown\"\n+}\n+\n+// Unit returns the unit string for a limit\n+func (lt LimitType) Unit() string {\n+ switch lt {\n+ case CPU:\n+ return \"seconds\"\n+ case FileSize:\n+ return \"bytes\"\n+ case Data:\n+ return \"bytes\"\n+ case Stack:\n+ return \"bytes\"\n+ case Core:\n+ return \"bytes\"\n+ case Rss:\n+ return \"bytes\"\n+ case ProcessCount:\n+ return \"processes\"\n+ case NumberOfFiles:\n+ return \"files\"\n+ case MemoryLocked:\n+ return \"bytes\"\n+ case AS:\n+ return \"bytes\"\n+ case Locks:\n+ return \"locks\"\n+ case SignalsPending:\n+ return \"signals\"\n+ case MessageQueueBytes:\n+ return \"bytes\"\n+ case Nice:\n+ return \"\"\n+ case RealTimePriority:\n+ return \"\"\n+ case Rttime:\n+ return \"us\"\n+ }\n+ return \"\"\n+}\n+\n// Infinity is a constant representing a resource with no limit.\nconst Infinity = ^uint64(0)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add support for /proc/[pid]/limits |
260,002 | 12.12.2022 17:13:27 | 28,800 | db0dfd6a3f9da26154ade878ceb265080b34af24 | Get limits only once instead of each loop iteration | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/proc/task_files.go",
"new_path": "pkg/sentry/fsimpl/proc/task_files.go",
"diff": "@@ -539,12 +539,13 @@ type limitsData struct {\n}\nfunc (d *limitsData) Generate(ctx context.Context, buf *bytes.Buffer) error {\n+ taskLimits := d.task.Limits()\n// formatting matches the kernel output from linux/fs/proc/base.c:proc_pid_limits()\nfmt.Fprintf(buf, \"Limit Soft Limit Hard Limit Units \\n\")\nfor _, lt := range limits.AllLimitTypes {\nfmt.Fprintf(buf, \"%-25s \", lt.Name())\n- l := d.task.Limits().Get(lt)\n+ l := taskLimits.Get(lt)\nif l.Cur == limits.Infinity {\nfmt.Fprintf(buf, \"%-20s \", \"unlimited\")\n} else {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Get limits only once instead of each loop iteration |
259,909 | 13.12.2022 14:39:48 | 28,800 | 70be2fc8a77217eb98c723b2d491d6e2ceff4579 | Remove unsetting route's cached neighbor entry in link res callback.
This does not change behavior, since expired or invalid entries
are ignored anyways. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/route.go",
"new_path": "pkg/tcpip/stack/route.go",
"diff": "@@ -406,9 +406,6 @@ func (r *Route) resolvedFields(afterResolve func(ResolvedFieldsResult)) (RouteIn\n}\nafterResolveFields := fields\nentry, ch, err := r.linkRes.neigh.entry(r.nextHop(), linkAddressResolutionRequestLocalAddr, func(lrr LinkResolutionResult) {\n- if lrr.Err != nil {\n- r.setCachedNeighborEntry(nil)\n- }\nif afterResolve != nil {\nif lrr.Err == nil {\nafterResolveFields.RemoteLinkAddress = lrr.LinkAddress\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove unsetting route's cached neighbor entry in link res callback.
This does not change behavior, since expired or invalid entries
are ignored anyways.
PiperOrigin-RevId: 495130325 |
259,950 | 02.12.2022 10:44:34 | -28,800 | 95ef37391962b608cdfca9d95d32be56095e0e12 | limit numbers of negative children to avoid infinitely increment of memory. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/BUILD",
"new_path": "pkg/sentry/fsimpl/gofer/BUILD",
"diff": "@@ -3,6 +3,18 @@ load(\"//tools/go_generics:defs.bzl\", \"go_template_instance\")\nlicenses([\"notice\"])\n+go_template_instance(\n+ name = \"string_list\",\n+ out = \"string_list.go\",\n+ package = \"gofer\",\n+ prefix = \"string\",\n+ template = \"//pkg/ilist:generic_list\",\n+ types = {\n+ \"Element\": \"*stringListElem\",\n+ \"Linker\": \"*stringListElem\",\n+ },\n+)\n+\ngo_template_instance(\nname = \"dentry_list\",\nout = \"dentry_list.go\",\n@@ -41,6 +53,7 @@ go_template_instance(\ngo_library(\nname = \"gofer\",\nsrcs = [\n+ \"string_list.go\",\n\"dentry_list.go\",\n\"directory.go\",\n\"filesystem.go\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/directory.go",
"new_path": "pkg/sentry/fsimpl/gofer/directory.go",
"diff": "@@ -70,6 +70,9 @@ func (d *dentry) cacheNewChildLocked(child *dentry, name string) {\nchild.name = name\nif d.children == nil {\nd.children = make(map[string]*dentry)\n+ } else if c, ok := d.children[name]; ok && c == nil {\n+ // This child will not be negative, decrease count of negativeChildren.\n+ d.negativeChildren--\n}\nd.children[name] = child\n}\n@@ -77,6 +80,7 @@ func (d *dentry) cacheNewChildLocked(child *dentry, name string) {\n// Preconditions:\n// - d.dirMu must be locked.\n// - d.isDir().\n+// - name is not already a negative entry.\nfunc (d *dentry) cacheNegativeLookupLocked(name string) {\n// Don't cache negative lookups if InteropModeShared is in effect (since\n// this makes remote lookup unavoidable), or if d.isSynthetic() (in which\n@@ -90,6 +94,26 @@ func (d *dentry) cacheNegativeLookupLocked(name string) {\nd.children = make(map[string]*dentry)\n}\nd.children[name] = nil\n+ d.negativeChildren++\n+\n+ if !d.negativeChildrenCache.isInited() {\n+ // Initializing cache with all negative children name at the first time\n+ // that negativeChildren increase upto max.\n+ if d.negativeChildren >= maxCachedNegativeChildren {\n+ d.negativeChildrenCache.init(maxCachedNegativeChildren)\n+ for childName, child := range d.children {\n+ if child == nil {\n+ d.negativeChildrenCache.add(childName)\n+ }\n+ }\n+ }\n+ } else if victim := d.negativeChildrenCache.add(name); victim != \"\" {\n+ // If victim is a negative entry in d.children, delete it.\n+ if child, ok := d.children[victim]; ok && child == nil {\n+ delete(d.children, victim)\n+ d.negativeChildren--\n+ }\n+ }\n}\ntype createSyntheticOpts struct {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/filesystem.go",
"new_path": "pkg/sentry/fsimpl/gofer/filesystem.go",
"diff": "@@ -586,6 +586,7 @@ func (fs *filesystem) doCreateAt(ctx context.Context, rp *vfs.ResolvingPath, dir\nif child, ok := parent.children[name]; ok && child == nil {\n// Delete the now-stale negative dentry.\ndelete(parent.children, name)\n+ parent.negativeChildren--\n}\nparent.touchCMtime()\nparent.clearDirentsLocked()\n@@ -1604,25 +1605,16 @@ func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa\nds = appendDentry(ds, replaced)\n}\noldParent.cacheNegativeLookupLocked(oldName)\n- // We don't use newParent.cacheNewChildLocked() since we don't want to mess\n- // with reference counts and queue oldParent for checkCachingLocked if the\n- // parent isn't actually changing.\n- if oldParent != newParent {\n- oldParent.decRefNoCaching()\n- newParent.IncRef()\n- ds = appendDentry(ds, newParent)\n- ds = appendDentry(ds, oldParent)\nif renamed.isSynthetic() {\noldParent.syntheticChildren--\nnewParent.syntheticChildren++\n}\n- renamed.parent = newParent\n- }\n- renamed.name = newName\n- if newParent.children == nil {\n- newParent.children = make(map[string]*dentry)\n+ newParent.cacheNewChildLocked(renamed, newName)\n+ oldParent.decRefNoCaching()\n+ if oldParent != newParent {\n+ ds = appendDentry(ds, newParent)\n+ ds = appendDentry(ds, oldParent)\n}\n- newParent.children[newName] = renamed\n// Update metadata.\nif renamed.cachedMetadataAuthoritative() {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/gofer.go",
"new_path": "pkg/sentry/fsimpl/gofer/gofer.go",
"diff": "@@ -95,7 +95,45 @@ const (\ncacheRemoteRevalidating = \"remote_revalidating\"\n)\n-const defaultMaxCachedDentries = 1000\n+const (\n+ defaultMaxCachedDentries = 1000\n+ maxCachedNegativeChildren = 1000\n+)\n+\n+// stringFixedCache is a fixed sized cache, once initialized,\n+// its size never changes.\n+//\n+// +stateify savable\n+type stringFixedCache struct {\n+ // namesList stores negative names with fifo list.\n+ // name stored in namesList only means it used to be negative\n+ // at the moment you pushed it to the list.\n+ namesList stringList\n+ size uint64\n+}\n+\n+func (cache *stringFixedCache) isInited() bool {\n+ return cache.size != 0\n+}\n+\n+func (cache *stringFixedCache) init(size uint64) {\n+ elements := make([]stringListElem, size)\n+ for i := uint64(0); i < size; i++ {\n+ cache.namesList.PushFront(&elements[i])\n+ }\n+ cache.size = size\n+}\n+\n+// Update will push name to the front of the list,\n+// and pop the tail value.\n+func (cache *stringFixedCache) add(name string) string {\n+ tail := cache.namesList.Back()\n+ victimName := tail.str\n+ tail.str = name\n+ cache.namesList.Remove(tail)\n+ cache.namesList.PushFront(tail)\n+ return victimName\n+}\n// +stateify savable\ntype dentryCache struct {\n@@ -879,6 +917,13 @@ type dentry struct {\n// children is protected by dirMu.\nchildren map[string]*dentry\n+ // If this dentry represents a directory, negativeChildrenCache cache\n+ // names of negative children, negativeChildrenCache is protected by dirMu.\n+ negativeChildrenCache stringFixedCache\n+ // If this dentry represents a directory, negativeChildren is the number\n+ // of negative child, negativeChildren is protected by dirMu.\n+ negativeChildren int\n+\n// If this dentry represents a directory, syntheticChildren is the number\n// of child dentries for which dentry.isSynthetic() == true.\n// syntheticChildren is protected by dirMu.\n@@ -1016,6 +1061,13 @@ type dentry struct {\nwatches vfs.Watches\n}\n+// +stateify savable\n+type stringListElem struct {\n+ // str is the string that this elem represents.\n+ str string\n+ stringEntry\n+}\n+\n// +stateify savable\ntype dentryListElem struct {\n// d is the dentry that this elem represents.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/gofer_test.go",
"new_path": "pkg/sentry/fsimpl/gofer/gofer_test.go",
"diff": "@@ -65,3 +65,25 @@ func TestDestroyIdempotent(t *testing.T) {\nchild.checkCachingLocked(ctx, true /* renameMuWriteLocked */)\nchild.checkCachingLocked(ctx, true /* renameMuWriteLocked */)\n}\n+\n+func TestStringFixedCache(t *testing.T) {\n+ names := []string{\"a\", \"b\", \"c\"}\n+ cache := stringFixedCache{}\n+\n+ cache.init(uint64(len(names)))\n+ if inited := cache.isInited(); !inited {\n+ t.Fatalf(\"cache.isInited(): %v, want: true\", inited)\n+ }\n+ for _, s := range names {\n+ victim := cache.add(s)\n+ if victim != \"\" {\n+ t.Fatalf(\"cache.add(): %v, want: \\\"\\\"\", victim)\n+ }\n+ }\n+ for _, s := range names {\n+ victim := cache.add(\"something\")\n+ if victim != s {\n+ t.Fatalf(\"cache.add(): %v, want: %v\", victim, s)\n+ }\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | limit numbers of negative children to avoid infinitely increment of memory.
Signed-off-by: Tan Yifeng <yiftan@tencent.com> |
259,975 | 13.12.2022 20:23:39 | 28,800 | 2deb308638ae09906c67ee6a33c062158b6f6dd3 | Add initial portforward fileDescriptionReadWriter methods and tests.
fileDescriptionReadWriter implements io.ReadWriter for use in port
forwarding. Add initial implementation and tests. | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/boot/portforward/BUILD",
"diff": "+load(\"//tools:defs.bzl\", \"go_library\", \"go_test\")\n+\n+package(licenses = [\"notice\"])\n+\n+go_library(\n+ name = \"portforward\",\n+ srcs = [\n+ \"portforward.go\",\n+ \"portforward_fd_rw.go\",\n+ ],\n+ deps = [\n+ \"//pkg/context\",\n+ \"//pkg/errors/linuxerr\",\n+ \"//pkg/sentry/vfs\",\n+ \"//pkg/usermem\",\n+ \"//pkg/waiter\",\n+ ],\n+)\n+\n+go_test(\n+ name = \"portforward_test\",\n+ srcs = [\n+ \"portforward_fd_rw_test.go\",\n+ ],\n+ library = \":portforward\",\n+ deps = [\n+ \"//pkg/abi/linux\",\n+ \"//pkg/context\",\n+ \"//pkg/errors/linuxerr\",\n+ \"//pkg/sentry/contexttest\",\n+ \"//pkg/sentry/vfs\",\n+ \"//pkg/usermem\",\n+ \"//pkg/waiter\",\n+ ],\n+)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/boot/portforward/portforward.go",
"diff": "+// Copyright 2022 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Package portforward holds the infrastructure to support the port forward command.\n+package portforward\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/boot/portforward/portforward_fd_rw.go",
"diff": "+// Copyright 2022 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package portforward\n+\n+import (\n+ \"io\"\n+\n+ \"gvisor.dev/gvisor/pkg/context\"\n+ \"gvisor.dev/gvisor/pkg/errors/linuxerr\"\n+ \"gvisor.dev/gvisor/pkg/sentry/vfs\"\n+ \"gvisor.dev/gvisor/pkg/usermem\"\n+ \"gvisor.dev/gvisor/pkg/waiter\"\n+)\n+\n+// fileDescriptionReadWriter implements io.ReadWriter and allows reading and\n+// writing to a vfs.FileDescription.\n+type fileDescriptionReadWriter struct {\n+ // ctx is the context for the socket reader.\n+ ctx context.Context\n+\n+ // file is the file to read and write from.\n+ file *vfs.FileDescription\n+}\n+\n+// Read implements io.Reader.Read. It performs a blocking read on the fd.\n+func (r *fileDescriptionReadWriter) Read(buf []byte) (int, error) {\n+ var (\n+ notifyCh chan struct{}\n+ waitEntry waiter.Entry\n+ )\n+ n, err := r.file.Read(r.ctx, usermem.BytesIOSequence(buf), vfs.ReadOptions{})\n+ for linuxerr.Equals(linuxerr.ErrWouldBlock, err) {\n+ if notifyCh == nil {\n+ waitEntry, notifyCh = waiter.NewChannelEntry(waiter.ReadableEvents | waiter.WritableEvents | waiter.EventHUp | waiter.EventErr)\n+ // Register for when the endpoint is readable or disconnected.\n+ r.file.EventRegister(&waitEntry)\n+ defer r.file.EventUnregister(&waitEntry)\n+ }\n+ <-notifyCh\n+ n, err = r.file.Read(r.ctx, usermem.BytesIOSequence(buf), vfs.ReadOptions{})\n+ }\n+\n+ // host fd FileDescriptions use recvmsg which returns zero when the\n+ // peer has shutdown. When that happens return EOF.\n+ if n == 0 && err == nil {\n+ return 0, io.EOF\n+ }\n+ return int(n), err\n+}\n+\n+// Write implements io.Writer.Write. It performs a blocking write on the fd.\n+func (r *fileDescriptionReadWriter) Write(buf []byte) (int, error) {\n+ var notifyCh chan struct{}\n+ var waitEntry waiter.Entry\n+ n, err := r.file.Write(r.ctx, usermem.BytesIOSequence(buf), vfs.WriteOptions{})\n+ for linuxerr.Equals(linuxerr.ErrWouldBlock, err) {\n+ if notifyCh == nil {\n+ waitEntry, notifyCh = waiter.NewChannelEntry(waiter.WritableEvents | waiter.EventHUp | waiter.EventErr)\n+ // Register for when the endpoint is writable or disconnected.\n+ r.file.EventRegister(&waitEntry)\n+ defer r.file.EventUnregister(&waitEntry)\n+ }\n+ <-notifyCh\n+ n, err = r.file.Write(r.ctx, usermem.BytesIOSequence(buf), vfs.WriteOptions{})\n+ }\n+ return int(n), err\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/boot/portforward/portforward_fd_rw_test.go",
"diff": "+// Copyright 2022 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package portforward\n+\n+import (\n+ \"bytes\"\n+ \"fmt\"\n+ \"io\"\n+ \"reflect\"\n+ \"sync\"\n+ \"testing\"\n+ \"time\"\n+\n+ \"gvisor.dev/gvisor/pkg/abi/linux\"\n+ \"gvisor.dev/gvisor/pkg/context\"\n+ \"gvisor.dev/gvisor/pkg/errors/linuxerr\"\n+ \"gvisor.dev/gvisor/pkg/sentry/contexttest\"\n+ \"gvisor.dev/gvisor/pkg/sentry/vfs\"\n+ \"gvisor.dev/gvisor/pkg/usermem\"\n+ \"gvisor.dev/gvisor/pkg/waiter\"\n+)\n+\n+// mockFileDescriptionRWImpl implements all vfs.FileDescriptionImpl methods used in\n+// fileDescriptionReaderWriter for a mockFileDescription.\n+type mockFileDescriptionRWImpl interface {\n+ Read(context.Context, usermem.IOSequence, vfs.ReadOptions) (int64, error)\n+ Write(context.Context, usermem.IOSequence, vfs.WriteOptions) (int64, error)\n+ EventRegister(*waiter.Entry) error\n+ EventUnregister(*waiter.Entry)\n+ Release(context.Context)\n+}\n+\n+// mockFileDescription implements vfs.FileDescriptionImpl for portforward tests.\n+type mockFileDescription struct {\n+ vfsfd vfs.FileDescription\n+ impl vfs.FileDescriptionImpl\n+ vfsObj *vfs.VirtualFilesystem\n+}\n+\n+// Read implements FileDescriptionImpl.Read.\n+func (m *mockFileDescription) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.ReadOptions) (int64, error) {\n+ return m.impl.Read(ctx, dst, opts)\n+}\n+\n+// Write implements vfs.FileDescriptionImpl.Write.\n+func (m *mockFileDescription) Write(ctx context.Context, src usermem.IOSequence, opts vfs.WriteOptions) (int64, error) {\n+ return m.impl.Write(ctx, src, opts)\n+}\n+\n+// EventRegister implements vfs.FileDescriptionImpl.EventRegister.\n+func (m *mockFileDescription) EventRegister(waitEntry *waiter.Entry) error {\n+ return m.impl.EventRegister(waitEntry)\n+}\n+\n+// EventUnregister implements vfs.FileDescriptionImpl.EventUnregister.\n+func (m *mockFileDescription) EventUnregister(waitEntry *waiter.Entry) {\n+ m.impl.EventUnregister(waitEntry)\n+}\n+\n+// Release implements vfs.FileDescriptionImpl.Release.\n+func (m *mockFileDescription) Release(ctx context.Context) { m.impl.Release(ctx) }\n+\n+func newMockFileDescription(ctx context.Context, fdImpl vfs.FileDescriptionImpl) (*vfs.FileDescription, error) {\n+ vfsObj := &vfs.VirtualFilesystem{}\n+ if err := vfsObj.Init(ctx); err != nil {\n+ return nil, fmt.Errorf(\"vfsObj.Init: %v\", err)\n+ }\n+ vd := vfsObj.NewAnonVirtualDentry(\"mock_app\")\n+ defer vd.DecRef(ctx)\n+ fd := mockFileDescription{\n+ impl: fdImpl,\n+ vfsObj: vfsObj,\n+ }\n+ fd.vfsfd.Init(fd.impl, linux.O_RDWR, vd.Mount(), vd.Dentry(), &vfs.FileDescriptionOptions{})\n+ fd.vfsObj = vfsObj\n+ return &fd.vfsfd, nil\n+}\n+\n+// readerWriter implements mockFileDescriptionRWImpl. On write, it appends given data to a buffer.\n+// On reads it pops the requested amount of data off the buffer.\n+type readerWriter struct {\n+ vfs.FileDescriptionDefaultImpl\n+ vfs.NoLockFD\n+ vfs.DentryMetadataFileDescriptionImpl\n+ buf bytes.Buffer\n+ released bool\n+}\n+\n+var _ vfs.FileDescriptionImpl = (*readerWriter)(nil)\n+\n+// Read implements vfs.FileDescriptionImpl.Read details for the parent mockFileDescription.\n+func (rw *readerWriter) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.ReadOptions) (int64, error) {\n+ if rw.released {\n+ return 0, nil\n+ }\n+ if rw.buf.Len() == 0 {\n+ return 0, io.EOF\n+ }\n+ buf := make([]byte, dst.NumBytes())\n+ _, err := rw.buf.Read(buf)\n+ if err != nil {\n+ return 0, err\n+ }\n+ n, err := dst.CopyOut(ctx, buf)\n+ return int64(n), err\n+}\n+\n+// Write implements vfs.FileDescriptionImpl.Write details for the parent mockFileDescription.\n+func (rw *readerWriter) Write(ctx context.Context, src usermem.IOSequence, opts vfs.WriteOptions) (int64, error) {\n+ buf := make([]byte, src.NumBytes())\n+ n, err := src.CopyIn(ctx, buf)\n+ if err != nil {\n+ return 0, err\n+ }\n+ n, err = rw.buf.Write(buf[:n])\n+ return int64(n), err\n+}\n+\n+// EventRegister implements vfs.FileDescriptionImpl.EventRegister details for the parent mockFileDescription.\n+func (rw *readerWriter) EventRegister(we *waiter.Entry) error { return fmt.Errorf(\"not implemented\") }\n+\n+// EventUnregister implements vfs.FileDescriptionImpl.Unregister details for the parent mockFileDescription.\n+func (rw *readerWriter) EventUnregister(we *waiter.Entry) { panic(\"not implemented\") }\n+\n+// Release implements vfs.FileDescriptionImpl.Release details for the parent mockFileDescription.\n+func (rw *readerWriter) Release(context.Context) { rw.released = true }\n+\n+// waiterRW implements mockFileDescriptionRWImpl. waiterRW works the same way as readerWriter above,\n+// but it interleaves blocks in between Read and Write calls.\n+type waiterRW struct {\n+ vfs.FileDescriptionDefaultImpl\n+ vfs.NoLockFD\n+ vfs.DentryMetadataFileDescriptionImpl\n+ buf bytes.Buffer\n+ waitMu sync.Mutex\n+ entries []*waiter.Entry\n+ shouldWait bool\n+ quit chan bool\n+ closed bool\n+}\n+\n+var _ vfs.FileDescriptionImpl = (*waiterRW)(nil)\n+\n+func newWaiterReaderWriter() *waiterRW {\n+ ret := &waiterRW{\n+ entries: []*waiter.Entry{},\n+ shouldWait: true,\n+ quit: make(chan bool),\n+ }\n+ go ret.doNotify()\n+ return ret\n+}\n+\n+// Read implements vfs.FileDescriptionImpl.Read details for the parent mockFileDescription.\n+func (w *waiterRW) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.ReadOptions) (int64, error) {\n+ w.waitMu.Lock()\n+ defer w.waitMu.Unlock()\n+ if w.closed {\n+ return 0, nil\n+ }\n+ if w.shouldWait {\n+ return 0, linuxerr.ErrWouldBlock\n+ }\n+ buf := make([]byte, dst.NumBytes())\n+ _, err := w.buf.Read(buf)\n+ if err != nil {\n+ return 0, err\n+ }\n+ n, err := dst.CopyOut(ctx, buf)\n+ w.shouldWait = true\n+ return int64(n), err\n+}\n+\n+// Write implements vfs.FileDescriptionImpl.Write details for the parent mockFileDescription.\n+func (w *waiterRW) Write(ctx context.Context, src usermem.IOSequence, opts vfs.WriteOptions) (int64, error) {\n+ w.waitMu.Lock()\n+ defer w.waitMu.Unlock()\n+ if w.closed {\n+ return 0, nil\n+ }\n+ if w.shouldWait {\n+ return 0, linuxerr.ErrWouldBlock\n+ }\n+ buf := make([]byte, src.NumBytes())\n+ n, err := src.CopyIn(ctx, buf)\n+ if err != nil {\n+ return 0, err\n+ }\n+ if int64(n) != src.NumBytes() {\n+ return 0, linuxerr.EFAULT\n+ }\n+ n, err = w.buf.Write(buf)\n+ w.shouldWait = true\n+ return int64(n), err\n+}\n+\n+// EventRegister implements vfs.FileDescriptionImpl.EventRegister details for the parent mockFileDescription.\n+func (w *waiterRW) EventRegister(we *waiter.Entry) error {\n+ w.waitMu.Lock()\n+ defer w.waitMu.Unlock()\n+ w.entries = append(w.entries, we)\n+ for _, e := range w.entries {\n+ if e == we {\n+ return nil\n+ }\n+ }\n+ w.entries = append(w.entries, we)\n+ return nil\n+}\n+\n+// EventUnregister implements vfs.FileDescriptionImpl.Unregister details for the parent mockFileDescription.\n+func (w *waiterRW) EventUnregister(we *waiter.Entry) {\n+ for i, e := range w.entries {\n+ if e == we {\n+ w.entries = append(w.entries[:i], w.entries[i+1:]...)\n+ }\n+ }\n+}\n+\n+// Release implements vfs.FileDescriptionImpl.Release details for the parent mockFileDescription.\n+func (w *waiterRW) Release(context.Context) {\n+ w.quit <- true\n+}\n+\n+func (w *waiterRW) doNotify() {\n+ for {\n+ w.waitMu.Lock()\n+ select {\n+ case <-w.quit:\n+ w.closed = true\n+ w.waitMu.Unlock()\n+ return\n+ default:\n+ w.shouldWait = false\n+ for _, we := range w.entries {\n+ we.NotifyEvent(waiter.ReadableEvents | waiter.WritableEvents)\n+ }\n+ w.waitMu.Unlock()\n+ time.Sleep(100 * time.Millisecond)\n+ }\n+ }\n+}\n+\n+func TestReaderWriter(t *testing.T) {\n+ ctx := contexttest.Context(t)\n+ for _, tc := range []struct {\n+ name string\n+ mockFDImpl vfs.FileDescriptionImpl\n+ }{\n+ {\n+ name: \"readerWriter\",\n+ mockFDImpl: &readerWriter{},\n+ },\n+ {\n+ name: \"waiter\",\n+ mockFDImpl: newWaiterReaderWriter(),\n+ },\n+ } {\n+ t.Run(tc.name, func(t *testing.T) {\n+ fd, err := newMockFileDescription(ctx, tc.mockFDImpl)\n+ if err != nil {\n+ tc.mockFDImpl.Release(ctx)\n+ t.Fatal(err)\n+ }\n+ readerWriter := fileDescriptionReadWriter{\n+ ctx: ctx,\n+ file: fd,\n+ }\n+ sendBytes := []([]byte){\n+ []byte{'a', 'b', 'c'},\n+ []byte{'1', '2', '3'},\n+ []byte{'a', 'b', 'c', '1', '2', '3'},\n+ []byte{'y', 'o', 'u', 'a', 'n', 'd', 'm', 'e'},\n+ }\n+ for _, buf := range sendBytes {\n+ n, err := readerWriter.Write(buf)\n+ if err != nil {\n+ tc.mockFDImpl.Release(ctx)\n+ t.Fatalf(\"write failed: %v\", err)\n+ }\n+ if n != len(buf) {\n+ tc.mockFDImpl.Release(ctx)\n+ t.Fatalf(\"failed to write buf: %s\", string(buf))\n+ }\n+ }\n+ got := []byte{}\n+ buf := make([]byte, 4)\n+ for {\n+ _, err := readerWriter.Read(buf)\n+ if err == io.EOF {\n+ break\n+ }\n+ got = append(got, buf...)\n+ buf = buf[0:]\n+ }\n+\n+ tc.mockFDImpl.Release(ctx)\n+\n+ want := []byte{}\n+ for _, buf := range sendBytes {\n+ want = append(want, buf...)\n+ }\n+\n+ if !reflect.DeepEqual(got, want) {\n+ t.Fatalf(\"mismatch types: got: %q want: %q\", string(got), string(want))\n+ }\n+\n+ _, err = readerWriter.Read(buf[0:])\n+ if err != io.EOF {\n+ t.Fatalf(\"expected end of file: got: %v\", err)\n+ }\n+ })\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add initial portforward fileDescriptionReadWriter methods and tests.
fileDescriptionReadWriter implements io.ReadWriter for use in port
forwarding. Add initial implementation and tests.
PiperOrigin-RevId: 495196971 |
259,907 | 14.12.2022 09:57:37 | 28,800 | e3f77dddd7881dd8d00cd27d8c09001effea6aa9 | Deprecate 9P gofer protocol in runsc.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/kernel.go",
"new_path": "pkg/sentry/kernel/kernel.go",
"diff": "@@ -75,12 +75,6 @@ import (\n\"gvisor.dev/gvisor/pkg/tcpip\"\n)\n-// LISAFSEnabled is set to true when lisafs protocol is enabled. Added as a\n-// global to allow easy access everywhere.\n-//\n-// TODO(gvisor.dev/issue/7911): Remove when 9P is deleted.\n-var LISAFSEnabled = false\n-\n// userCounters is a set of user counters.\n//\n// +stateify savable\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -245,8 +245,6 @@ func New(args Args) (*Loader, error) {\nreturn nil, fmt.Errorf(\"setting up memory usage: %w\", err)\n}\n- kernel.LISAFSEnabled = args.Conf.Lisafs\n-\n// Make host FDs stable between invocations. Host FDs must map to the exact\n// same number when the sandbox is restored. Otherwise the wrong FD will be\n// used.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/vfs.go",
"new_path": "runsc/boot/vfs.go",
"diff": "@@ -253,7 +253,7 @@ func compileMounts(spec *specs.Spec, conf *config.Config) []specs.Mount {\n}\n// goferMountData creates a slice of gofer mount data.\n-func goferMountData(fd int, fa config.FileAccessType, lisafs bool) []string {\n+func goferMountData(fd int, fa config.FileAccessType) []string {\nopts := []string{\n\"trans=fd\",\n\"rfdno=\" + strconv.Itoa(fd),\n@@ -262,9 +262,7 @@ func goferMountData(fd int, fa config.FileAccessType, lisafs bool) []string {\nif fa == config.FileAccessShared {\nopts = append(opts, \"cache=remote_revalidating\")\n}\n- if lisafs {\nopts = append(opts, \"lisafs=true\")\n- }\nreturn opts\n}\n@@ -404,7 +402,7 @@ func (c *containerMounter) mountAll(conf *config.Config, procArgs *kernel.Create\n// createMountNamespace creates the container's root mount and namespace.\nfunc (c *containerMounter) createMountNamespace(ctx context.Context, conf *config.Config, creds *auth.Credentials) (*vfs.MountNamespace, error) {\nfd := c.fds.remove()\n- data := goferMountData(fd, conf.FileAccess, conf.Lisafs)\n+ data := goferMountData(fd, conf.FileAccess)\n// We can't check for overlayfs here because sandbox is chroot'ed and gofer\n// can only send mount options for specs.Mounts (specs.Root is missing\n@@ -714,7 +712,7 @@ func (c *containerMounter) getMountNameAndOptions(conf *config.Config, m *mountA\n// but unlikely to be correct in this context.\nreturn \"\", nil, false, fmt.Errorf(\"gofer mount requires a connection FD\")\n}\n- data = goferMountData(m.fd, c.getMountAccessType(conf, m.mount), conf.Lisafs)\n+ data = goferMountData(m.fd, c.getMountAccessType(conf, m.mount))\ninternalData = gofer.InternalFilesystemOptions{\nUniqueID: m.mount.Destination,\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cli/main.go",
"new_path": "runsc/cli/main.go",
"diff": "@@ -228,7 +228,6 @@ func Main(version string) {\nlog.Infof(\"\\t\\tOverlay: Root=%t, SubMounts=%t, FilestoreDir=%q\", overlay2.RootMount, overlay2.SubMounts, overlay2.FilestoreDir)\nlog.Infof(\"\\t\\tNetwork: %v, logging: %t\", conf.Network, conf.LogPackets)\nlog.Infof(\"\\t\\tStrace: %t, max size: %d, syscalls: %s\", conf.Strace, conf.StraceLogSize, conf.StraceSyscalls)\n- log.Infof(\"\\t\\tLISAFS: %t\", conf.Lisafs)\nlog.Infof(\"\\t\\tDebug: %v\", conf.Debug)\nlog.Infof(\"\\t\\tSystemd: %v\", conf.SystemdCgroup)\nlog.Infof(\"***************************\")\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/BUILD",
"new_path": "runsc/cmd/BUILD",
"diff": "@@ -50,14 +50,12 @@ go_library(\n\"//pkg/coretag\",\n\"//pkg/coverage\",\n\"//pkg/log\",\n- \"//pkg/p9\",\n\"//pkg/sentry/control\",\n\"//pkg/sentry/kernel\",\n\"//pkg/sentry/kernel/auth\",\n\"//pkg/sentry/platform\",\n\"//pkg/state/pretty\",\n\"//pkg/state/statefile\",\n- \"//pkg/sync\",\n\"//pkg/unet\",\n\"//pkg/urpc\",\n\"//runsc/boot\",\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/gofer.go",
"new_path": "runsc/cmd/gofer.go",
"diff": "@@ -29,8 +29,6 @@ import (\nspecs \"github.com/opencontainers/runtime-spec/specs-go\"\n\"golang.org/x/sys/unix\"\n\"gvisor.dev/gvisor/pkg/log\"\n- \"gvisor.dev/gvisor/pkg/p9\"\n- \"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/unet\"\n\"gvisor.dev/gvisor/runsc/cmd/util\"\n\"gvisor.dev/gvisor/runsc/config\"\n@@ -235,10 +233,7 @@ func (g *Gofer) Execute(_ context.Context, f *flag.FlagSet, args ...any) subcomm\nutil.Fatalf(\"installing seccomp filters: %v\", err)\n}\n- if conf.Lisafs {\n- return g.serveLisafs(spec, conf, root)\n- }\n- return g.serve9P(spec, conf, root)\n+ return g.serve(spec, conf, root)\n}\nfunc newSocket(ioFD int) *unet.Socket {\n@@ -249,7 +244,7 @@ func newSocket(ioFD int) *unet.Socket {\nreturn socket\n}\n-func (g *Gofer) serveLisafs(spec *specs.Spec, conf *config.Config, root string) subcommands.ExitStatus {\n+func (g *Gofer) serve(spec *specs.Spec, conf *config.Config, root string) subcommands.ExitStatus {\ntype connectionConfig struct {\nsock *unet.Socket\nmountPath string\n@@ -316,70 +311,6 @@ func (g *Gofer) serveLisafs(spec *specs.Spec, conf *config.Config, root string)\nreturn subcommands.ExitSuccess\n}\n-func (g *Gofer) serve9P(spec *specs.Spec, conf *config.Config, root string) subcommands.ExitStatus {\n- // Start with root mount, then add any other additional mount as needed.\n- overlay2 := conf.GetOverlay2()\n- ats := make([]p9.Attacher, 0, len(spec.Mounts)+1)\n- ap, err := fsgofer.NewAttachPoint(\"/\", fsgofer.Config{\n- ROMount: spec.Root.Readonly || overlay2.RootMount,\n- HostUDS: conf.GetHostUDS(),\n- HostFifo: conf.HostFifo,\n- })\n- if err != nil {\n- util.Fatalf(\"creating attach point: %v\", err)\n- }\n- ats = append(ats, ap)\n- log.Infof(\"Serving %q mapped to %q on FD %d (ro: %t)\", \"/\", root, g.ioFDs[0], spec.Root.Readonly)\n-\n- mountIdx := 1 // first one is the root\n- for _, m := range spec.Mounts {\n- if specutils.IsGoferMount(m) {\n- cfg := fsgofer.Config{\n- ROMount: isReadonlyMount(m.Options) || overlay2.SubMounts,\n- HostUDS: conf.GetHostUDS(),\n- HostFifo: conf.HostFifo,\n- }\n- ap, err := fsgofer.NewAttachPoint(m.Destination, cfg)\n- if err != nil {\n- util.Fatalf(\"creating attach point: %v\", err)\n- }\n- ats = append(ats, ap)\n-\n- if mountIdx >= len(g.ioFDs) {\n- util.Fatalf(\"no FD found for mount. Did you forget --io-fd? mount: %d, %v\", len(g.ioFDs), m)\n- }\n- log.Infof(\"Serving %q mapped on FD %d (ro: %t)\", m.Destination, g.ioFDs[mountIdx], cfg.ROMount)\n- mountIdx++\n- }\n- }\n- if mountIdx != len(g.ioFDs) {\n- util.Fatalf(\"too many FDs passed for mounts. mounts: %d, FDs: %d\", mountIdx, len(g.ioFDs))\n- }\n-\n- // Run the loops and wait for all to exit.\n- var wg sync.WaitGroup\n- for i, ioFD := range g.ioFDs {\n- wg.Add(1)\n- go func(ioFD int, at p9.Attacher) {\n- socket, err := unet.NewSocket(ioFD)\n- if err != nil {\n- util.Fatalf(\"creating server on FD %d: %v\", ioFD, err)\n- }\n- s := p9.NewServer(at)\n- if err := s.Handle(socket); err != nil {\n- util.Fatalf(\"P9 server returned error. Gofer is shutting down. FD: %d, err: %v\", ioFD, err)\n- }\n- wg.Done()\n- }(ioFD, ats[i])\n- }\n- wg.Wait()\n- log.Infof(\"All 9P servers exited.\")\n- if g.stopProfiling != nil {\n- g.stopProfiling()\n- }\n- return subcommands.ExitSuccess\n-}\n-\nfunc (g *Gofer) writeMounts(mounts []specs.Mount) error {\nbytes, err := json.Marshal(mounts)\nif err != nil {\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/config/config.go",
"new_path": "runsc/config/config.go",
"diff": "@@ -222,9 +222,6 @@ type Config struct {\n// E.g. 0.2 CPU quota will result in 1, and 1.9 in 2.\nCPUNumFromQuota bool `flag:\"cpu-num-from-quota\"`\n- // Enable lisafs.\n- Lisafs bool `flag:\"lisafs\"`\n-\n// Allows overriding of flags in OCI annotations.\nAllowFlagOverride bool `flag:\"allow-flag-override\"`\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/config/flags.go",
"new_path": "runsc/config/flags.go",
"diff": "@@ -86,7 +86,7 @@ func RegisterFlags(flagSet *flag.FlagSet) {\nflagSet.Bool(\"vfs2\", true, \"DEPRECATED: this flag has no effect.\")\nflagSet.Bool(\"fuse\", true, \"DEPRECATED: this flag has no effect.\")\n- flagSet.Bool(\"lisafs\", true, \"Enables lisafs protocol instead of 9P.\")\n+ flagSet.Bool(\"lisafs\", true, \"DEPRECATED: this flag has no effect.\")\nflagSet.Bool(\"cgroupfs\", false, \"Automatically mount cgroupfs.\")\nflagSet.Bool(\"ignore-cgroups\", false, \"don't configure cgroups.\")\nflagSet.Int(\"fdlimit\", -1, \"Specifies a limit on the number of host file descriptors that can be open. Applies separately to the sentry and gofer. Note: each file in the sandbox holds more than one host FD open.\")\n"
}
] | Go | Apache License 2.0 | google/gvisor | Deprecate 9P gofer protocol in runsc.
Updates #7911
PiperOrigin-RevId: 495343206 |
259,982 | 14.12.2022 12:44:07 | 28,800 | 1f8c4cb6bae3c45e3ec28449147e92689eeb2e86 | Adding container_start_duration metric for container multi-container mode.
Including the time when a container start request is
received and the time it is completed in the
ContainerStartedEvent proto message. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/control/BUILD",
"new_path": "pkg/sentry/control/BUILD",
"diff": "@@ -6,6 +6,9 @@ proto_library(\nname = \"control\",\nsrcs = [\"control.proto\"],\nvisibility = [\"//visibility:public\"],\n+ deps = [\n+ \"@com_google_protobuf//:timestamp_proto\",\n+ ],\n)\ngo_library(\n@@ -50,6 +53,7 @@ go_library(\n\"//pkg/tcpip/link/sniffer\",\n\"//pkg/urpc\",\n\"//pkg/usermem\",\n+ \"@org_golang_google_protobuf//types/known/timestamppb\",\n\"@org_golang_x_sys//unix:go_default_library\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/control/control.proto",
"new_path": "pkg/sentry/control/control.proto",
"diff": "@@ -16,6 +16,8 @@ syntax = \"proto3\";\npackage gvisor;\n+import \"google/protobuf/timestamp.proto\";\n+\n// ControlConfig configures the permission of controls.\nmessage ControlConfig {\n// Names for individual control URPC service objects.\n@@ -41,9 +43,13 @@ message ControlConfig {\n}\n// ContainerStartedEvent is emitted when a container starts.\n+// It also keeps a track of the time elapsed when a container\n+// start request is received and the container actually starts.\nmessage ContainerStartedEvent {\nbool started = 1;\nstring container_id = 2;\n+ google.protobuf.Timestamp request_received = 3;\n+ google.protobuf.Timestamp request_completed = 4;\n}\n// ContainerExitEvent is emitted when a container's init task exits. Duplicate\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/control/lifecycle.go",
"new_path": "pkg/sentry/control/lifecycle.go",
"diff": "@@ -17,7 +17,9 @@ package control\nimport (\n\"encoding/json\"\n\"fmt\"\n+ \"time\"\n+ \"google.golang.org/protobuf/types/known/timestamppb\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/eventchannel\"\n\"gvisor.dev/gvisor/pkg/fd\"\n@@ -183,6 +185,11 @@ func (l *Lifecycle) updateContainerState(containerID string, newState containerS\n// StartContainer will start a new container in the sandbox.\nfunc (l *Lifecycle) StartContainer(args *StartContainerArgs, _ *uint32) error {\n+ timeRequested := time.Now()\n+ timeRequestReceived := ×tamppb.Timestamp{\n+ Seconds: timeRequested.Unix(),\n+ Nanos: int32(timeRequested.Nanosecond()),\n+ }\nlog.Infof(\"StartContainer: %v\", args)\nif len(args.Files) != len(args.DonatedFDs) {\nreturn fmt.Errorf(\"FilePayload.Files and DonatedFDs must have same number of elements (%d != %d)\", len(args.Files), len(args.DonatedFDs))\n@@ -313,10 +320,6 @@ func (l *Lifecycle) StartContainer(args *StartContainerArgs, _ *uint32) error {\n// Start the newly created process.\nl.Kernel.StartProcess(tg)\nlog.Infof(\"Started the new container %v \", initArgs.ContainerID)\n- eventchannel.LogEmit(&pb.ContainerStartedEvent{\n- Started: true,\n- ContainerId: initArgs.ContainerID,\n- })\nif err := l.updateContainerState(initArgs.ContainerID, stateRunning); err != nil {\n// Sanity check: shouldn't fail to update the state at this point.\n@@ -324,6 +327,17 @@ func (l *Lifecycle) StartContainer(args *StartContainerArgs, _ *uint32) error {\n}\n+ timeRequestCompleted := time.Now()\n+ eventchannel.LogEmit(&pb.ContainerStartedEvent{\n+ Started: true,\n+ ContainerId: initArgs.ContainerID,\n+ RequestReceived: timeRequestReceived,\n+ RequestCompleted: ×tamppb.Timestamp{\n+ Seconds: timeRequestCompleted.Unix(),\n+ Nanos: int32(timeRequestCompleted.Nanosecond()),\n+ },\n+ })\n+\n// TODO(b/251490950): reap thread needs to synchronize with Save, so the\n// container state update doesn't race with state serialization.\ngo l.reap(initArgs.ContainerID, tg) // S/R-SAFE: see above.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Adding container_start_duration metric for container multi-container mode.
Including the time when a container start request is
received and the time it is completed in the
ContainerStartedEvent proto message.
PiperOrigin-RevId: 495390114 |
259,853 | 15.12.2022 10:13:44 | 28,800 | 14b5ff5a2f323dbb30c850d548cb3b1f6bbc8e41 | overlayfs: don't call SetStat and StatAt under dentry.mapsMu
This allows to avoid lock order inversions with inodeMutex and
filesystemRWMutex. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/overlay/copy_up.go",
"new_path": "pkg/sentry/fsimpl/overlay/copy_up.go",
"diff": "@@ -144,8 +144,6 @@ func (d *dentry) copyUpMaybeSyntheticMountpointLocked(ctx context.Context, forSy\ncleanupUndoCopyUp()\nreturn err\n}\n- d.mapsMu.Lock()\n- defer d.mapsMu.Unlock()\nif d.wrappedMappable != nil {\n// We may have memory mappings of the file on the lower layer.\n// Switch to mapping the file on the upper layer instead.\n@@ -305,8 +303,8 @@ func (d *dentry) copyUpMaybeSyntheticMountpointLocked(ctx context.Context, forSy\n}\nif mmapOpts != nil && mmapOpts.Mappable != nil {\n- // Note that if mmapOpts != nil, then d.mapsMu is locked for writing\n- // (from the S_IFREG path above).\n+ d.mapsMu.Lock()\n+ defer d.mapsMu.Unlock()\n// Propagate mappings of d to the new Mappable. Remember which mappings\n// we added so we can remove them on failure.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/overlay/overlay.go",
"new_path": "pkg/sentry/fsimpl/overlay/overlay.go",
"diff": "@@ -533,6 +533,10 @@ type dentry struct {\n//\n// - isMappable is non-zero iff wrappedMappable is non-nil. isMappable is\n// accessed using atomic memory operations.\n+ //\n+ // - wrappedMappable is protected by mapsMu and dataMu. In addition,\n+ // it has to be immutable if copyMu is taken for write.\n+ // copyUpMaybeSyntheticMountpointLocked relies on this behavior.\nmapsMu mapsMutex `state:\"nosave\"`\nlowerMappings memmap.MappingSet\ndataMu dataRWMutex `state:\"nosave\"`\n"
}
] | Go | Apache License 2.0 | google/gvisor | overlayfs: don't call SetStat and StatAt under dentry.mapsMu
This allows to avoid lock order inversions with inodeMutex and
filesystemRWMutex.
PiperOrigin-RevId: 495629138 |
260,004 | 16.12.2022 11:16:05 | 28,800 | a2ff07e535231f1f73a59830926b767ae65bac7d | Use struct to check mockMulticastGroupProtocol fields
Instead of passing each of the values as arguments to a function.
This prepares for a followup change which introduces a new field
to mockMulticastGroupProtocol that is unused in some places
(improving readability by not having to explicitly pass in a
function argument). | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/internal/ip/generic_multicast_protocol_test.go",
"new_path": "pkg/tcpip/network/internal/ip/generic_multicast_protocol_test.go",
"diff": "@@ -172,17 +172,22 @@ func (m *mockMulticastGroupProtocol) ShouldPerformProtocol(groupAddress tcpip.Ad\nreturn groupAddress != m.skipProtocolAddress\n}\n-func (m *mockMulticastGroupProtocol) check(sendReportGroupAddresses []tcpip.Address, sendLeaveGroupAddresses []tcpip.Address) string {\n+type checkFields struct {\n+ sendReportGroupAddresses []tcpip.Address\n+ sendLeaveGroupAddresses []tcpip.Address\n+}\n+\n+func (m *mockMulticastGroupProtocol) check(fields checkFields) string {\nm.mu.Lock()\ndefer m.mu.Unlock()\nsendReportGroupAddrCount := make(map[tcpip.Address]int)\n- for _, a := range sendReportGroupAddresses {\n+ for _, a := range fields.sendReportGroupAddresses {\nsendReportGroupAddrCount[a] = 1\n}\nsendLeaveGroupAddrCount := make(map[tcpip.Address]int)\n- for _, a := range sendLeaveGroupAddresses {\n+ for _, a := range fields.sendLeaveGroupAddresses {\nsendLeaveGroupAddrCount[a] = 1\n}\n@@ -246,20 +251,20 @@ func TestJoinGroup(t *testing.T) {\n// a random interval between 0 and the maximum unsolicited report delay.\nmgp.joinGroup(test.addr)\nif test.shouldSendReports {\n- if diff := mgp.check([]tcpip.Address{test.addr} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{sendReportGroupAddresses: []tcpip.Address{test.addr}}); diff != \"\" {\nt.Errorf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n// Generic multicast protocol timers are expected to take the job mutex.\nclock.Advance(maxUnsolicitedReportDelay)\n- if diff := mgp.check([]tcpip.Address{test.addr} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{sendReportGroupAddresses: []tcpip.Address{test.addr}}); diff != \"\" {\nt.Errorf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n}\n// Should have no more messages to send.\nclock.Advance(time.Hour)\n- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{}); diff != \"\" {\nt.Errorf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n})\n@@ -297,7 +302,7 @@ func TestLeaveGroup(t *testing.T) {\nmgp.joinGroup(test.addr)\nif test.shouldSendMessages {\n- if diff := mgp.check([]tcpip.Address{test.addr} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{sendReportGroupAddresses: []tcpip.Address{test.addr}}); diff != \"\" {\nt.Fatalf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n}\n@@ -311,7 +316,7 @@ func TestLeaveGroup(t *testing.T) {\n}\n}\nif test.shouldSendMessages {\n- if diff := mgp.check(nil /* sendReportGroupAddresses */, []tcpip.Address{test.addr} /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{sendLeaveGroupAddresses: []tcpip.Address{test.addr}}); diff != \"\" {\nt.Errorf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n}\n@@ -320,7 +325,7 @@ func TestLeaveGroup(t *testing.T) {\n//\n// Generic multicast protocol timers are expected to take the job mutex.\nclock.Advance(time.Hour)\n- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{}); diff != \"\" {\nt.Errorf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n})\n@@ -372,15 +377,15 @@ func TestHandleReport(t *testing.T) {\n})\nmgp.joinGroup(addr1)\n- if diff := mgp.check([]tcpip.Address{addr1} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{sendReportGroupAddresses: []tcpip.Address{addr1}}); diff != \"\" {\nt.Fatalf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\nmgp.joinGroup(addr2)\n- if diff := mgp.check([]tcpip.Address{addr2} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{sendReportGroupAddresses: []tcpip.Address{addr2}}); diff != \"\" {\nt.Fatalf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\nmgp.joinGroup(addr3)\n- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{}); diff != \"\" {\nt.Fatalf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n@@ -390,14 +395,14 @@ func TestHandleReport(t *testing.T) {\nif len(test.expectReportsFor) != 0 {\n// Generic multicast protocol timers are expected to take the job mutex.\nclock.Advance(maxUnsolicitedReportDelay)\n- if diff := mgp.check(test.expectReportsFor /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{sendReportGroupAddresses: test.expectReportsFor}); diff != \"\" {\nt.Errorf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n}\n// Should have no more messages to send.\nclock.Advance(time.Hour)\n- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{}); diff != \"\" {\nt.Errorf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n})\n@@ -461,15 +466,15 @@ func TestHandleQuery(t *testing.T) {\n})\nmgp.joinGroup(addr1)\n- if diff := mgp.check([]tcpip.Address{addr1} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{sendReportGroupAddresses: []tcpip.Address{addr1}}); diff != \"\" {\nt.Fatalf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\nmgp.joinGroup(addr2)\n- if diff := mgp.check([]tcpip.Address{addr2} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{sendReportGroupAddresses: []tcpip.Address{addr2}}); diff != \"\" {\nt.Fatalf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\nmgp.joinGroup(addr3)\n- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{}); diff != \"\" {\nt.Fatalf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n@@ -477,20 +482,20 @@ func TestHandleQuery(t *testing.T) {\n// to some time within the new max response delay.\nmgp.handleQuery(test.queryAddr, test.maxDelay)\nclock.Advance(test.maxDelay)\n- if diff := mgp.check(test.expectQueriedReportsFor /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{sendReportGroupAddresses: test.expectQueriedReportsFor}); diff != \"\" {\nt.Errorf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n// The groups that were not affected by the query should still send a\n// report after the max unsolicited report delay.\nclock.Advance(maxUnsolicitedReportDelay)\n- if diff := mgp.check(test.expectDelayedReportsFor /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{sendReportGroupAddresses: test.expectDelayedReportsFor}); diff != \"\" {\nt.Errorf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n// Should have no more messages to send.\nclock.Advance(time.Hour)\n- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{}); diff != \"\" {\nt.Errorf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n})\n@@ -513,14 +518,14 @@ func TestJoinCount(t *testing.T) {\nt.Fatalf(\"got mgp.isLocallyJoined(%s) = false, want = true\", addr1)\n}\n// Only the first join should trigger a report to be sent.\n- if diff := mgp.check([]tcpip.Address{addr1} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{sendReportGroupAddresses: []tcpip.Address{addr1}}); diff != \"\" {\nt.Fatalf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\nmgp.joinGroup(addr1)\nif !mgp.isLocallyJoined(addr1) {\nt.Errorf(\"got mgp.isLocallyJoined(%s) = false, want = true\", addr1)\n}\n- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{}); diff != \"\" {\nt.Errorf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\nif t.Failed() {\n@@ -535,7 +540,7 @@ func TestJoinCount(t *testing.T) {\nt.Errorf(\"got mgp.isLocallyJoined(%s) = false, want = true\", addr1)\n}\n// A leave report should only be sent once the join count reaches 0.\n- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{}); diff != \"\" {\nt.Errorf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\nif t.Failed() {\n@@ -549,7 +554,7 @@ func TestJoinCount(t *testing.T) {\nif mgp.isLocallyJoined(addr1) {\nt.Errorf(\"got mgp.isLocallyJoined(%s) = true, want = false\", addr1)\n}\n- if diff := mgp.check(nil /* sendReportGroupAddresses */, []tcpip.Address{addr1} /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{sendLeaveGroupAddresses: []tcpip.Address{addr1}}); diff != \"\" {\nt.Errorf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\nif t.Failed() {\n@@ -564,7 +569,7 @@ func TestJoinCount(t *testing.T) {\nif mgp.isLocallyJoined(addr1) {\nt.Errorf(\"got mgp.isLocallyJoined(%s) = true, want = false\", addr1)\n}\n- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{}); diff != \"\" {\nt.Errorf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n@@ -572,7 +577,7 @@ func TestJoinCount(t *testing.T) {\n//\n// Generic multicast protocol timers are expected to take the job mutex.\nclock.Advance(time.Hour)\n- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{}); diff != \"\" {\nt.Errorf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n}\n@@ -588,27 +593,27 @@ func TestMakeAllNonMemberAndInitialize(t *testing.T) {\n})\nmgp.joinGroup(addr1)\n- if diff := mgp.check([]tcpip.Address{addr1} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{sendReportGroupAddresses: []tcpip.Address{addr1}}); diff != \"\" {\nt.Fatalf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\nmgp.joinGroup(addr2)\n- if diff := mgp.check([]tcpip.Address{addr2} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{sendReportGroupAddresses: []tcpip.Address{addr2}}); diff != \"\" {\nt.Fatalf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\nmgp.joinGroup(addr3)\n- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{}); diff != \"\" {\nt.Fatalf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n// Should send the leave reports for each but still consider them locally\n// joined.\nmgp.makeAllNonMember()\n- if diff := mgp.check(nil /* sendReportGroupAddresses */, []tcpip.Address{addr1, addr2} /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{sendLeaveGroupAddresses: []tcpip.Address{addr1, addr2}}); diff != \"\" {\nt.Errorf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n// Generic multicast protocol timers are expected to take the job mutex.\nclock.Advance(time.Hour)\n- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{}); diff != \"\" {\nt.Errorf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\nfor _, group := range []tcpip.Address{addr1, addr2, addr3} {\n@@ -619,17 +624,17 @@ func TestMakeAllNonMemberAndInitialize(t *testing.T) {\n// Should send the initial set of unsolcited reports.\nmgp.initializeGroups()\n- if diff := mgp.check([]tcpip.Address{addr1, addr2} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{sendReportGroupAddresses: []tcpip.Address{addr1, addr2}}); diff != \"\" {\nt.Errorf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\nclock.Advance(maxUnsolicitedReportDelay)\n- if diff := mgp.check([]tcpip.Address{addr1, addr2} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{sendReportGroupAddresses: []tcpip.Address{addr1, addr2}}); diff != \"\" {\nt.Errorf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n// Should have no more messages to send.\nclock.Advance(time.Hour)\n- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{}); diff != \"\" {\nt.Errorf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n}\n@@ -652,14 +657,14 @@ func TestGroupStateNonMember(t *testing.T) {\nif !mgp.isLocallyJoined(addr1) {\nt.Fatalf(\"got mgp.isLocallyJoined(%s) = false, want = true\", addr1)\n}\n- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{}); diff != \"\" {\nt.Fatalf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\nmgp.joinGroup(addr2)\nif !mgp.isLocallyJoined(addr1) {\nt.Fatalf(\"got mgp.isLocallyJoined(%s) = false, want = true\", addr2)\n}\n- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{}); diff != \"\" {\nt.Fatalf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n@@ -667,7 +672,7 @@ func TestGroupStateNonMember(t *testing.T) {\nmgp.handleQuery(addr1, time.Nanosecond)\n// Generic multicast protocol timers are expected to take the job mutex.\nclock.Advance(time.Nanosecond)\n- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{}); diff != \"\" {\nt.Fatalf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n@@ -678,12 +683,12 @@ func TestGroupStateNonMember(t *testing.T) {\nif mgp.isLocallyJoined(addr1) {\nt.Errorf(\"got mgp.isLocallyJoined(%s) = true, want = false\", addr2)\n}\n- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{}); diff != \"\" {\nt.Fatalf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\nclock.Advance(time.Hour)\n- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{}); diff != \"\" {\nt.Fatalf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n}\n@@ -701,34 +706,34 @@ func TestQueuedPackets(t *testing.T) {\n// send the packet.\nmgp.setQueuePackets(true)\nmgp.joinGroup(addr1)\n- if diff := mgp.check([]tcpip.Address{addr1} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{sendReportGroupAddresses: []tcpip.Address{addr1}}); diff != \"\" {\nt.Fatalf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n// The delayed report timer should have been cancelled since we did not send\n// the initial report earlier.\nclock.Advance(time.Hour)\n- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{}); diff != \"\" {\nt.Fatalf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n// Mock being able to successfully send the report.\nmgp.setQueuePackets(false)\nmgp.sendQueuedReports()\n- if diff := mgp.check([]tcpip.Address{addr1} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{sendReportGroupAddresses: []tcpip.Address{addr1}}); diff != \"\" {\nt.Errorf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n// The delayed report (sent after the initial report) should now be sent.\nclock.Advance(maxUnsolicitedReportDelay)\n- if diff := mgp.check([]tcpip.Address{addr1} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{sendReportGroupAddresses: []tcpip.Address{addr1}}); diff != \"\" {\nt.Errorf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n// Should not have anything else to send (we should be idle).\nmgp.sendQueuedReports()\nclock.Advance(time.Hour)\n- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{}); diff != \"\" {\nt.Fatalf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n@@ -736,7 +741,7 @@ func TestQueuedPackets(t *testing.T) {\nmgp.setQueuePackets(true)\nmgp.handleQuery(addr1, time.Nanosecond)\nclock.Advance(time.Nanosecond)\n- if diff := mgp.check([]tcpip.Address{addr1} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{sendReportGroupAddresses: []tcpip.Address{addr1}}); diff != \"\" {\nt.Errorf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n@@ -744,14 +749,14 @@ func TestQueuedPackets(t *testing.T) {\n// send.\nmgp.setQueuePackets(false)\nmgp.sendQueuedReports()\n- if diff := mgp.check([]tcpip.Address{addr1} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{sendReportGroupAddresses: []tcpip.Address{addr1}}); diff != \"\" {\nt.Errorf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n// Should not have anything else to send.\nmgp.sendQueuedReports()\nclock.Advance(time.Hour)\n- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{}); diff != \"\" {\nt.Fatalf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n@@ -759,7 +764,7 @@ func TestQueuedPackets(t *testing.T) {\nmgp.setQueuePackets(true)\nmgp.handleQuery(addr1, time.Nanosecond)\nclock.Advance(time.Nanosecond)\n- if diff := mgp.check([]tcpip.Address{addr1} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{sendReportGroupAddresses: []tcpip.Address{addr1}}); diff != \"\" {\nt.Errorf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n@@ -769,7 +774,7 @@ func TestQueuedPackets(t *testing.T) {\nmgp.handleReport(addr1)\nmgp.sendQueuedReports()\nclock.Advance(time.Hour)\n- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{}); diff != \"\" {\nt.Fatalf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n@@ -777,21 +782,21 @@ func TestQueuedPackets(t *testing.T) {\n// prevent a newly joined group's reports from being sent.\nmgp.setQueuePackets(true)\nmgp.joinGroup(addr2)\n- if diff := mgp.check([]tcpip.Address{addr2} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{sendReportGroupAddresses: []tcpip.Address{addr2}}); diff != \"\" {\nt.Fatalf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\nmgp.handleReport(addr2)\n// Attempting to send queued reports while still unable to send reports should\n// not change the host state.\nmgp.sendQueuedReports()\n- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{}); diff != \"\" {\nt.Fatalf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n// Should not have any packets queued.\nmgp.setQueuePackets(false)\nmgp.sendQueuedReports()\nclock.Advance(time.Hour)\n- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != \"\" {\n+ if diff := mgp.check(checkFields{}); diff != \"\" {\nt.Errorf(\"mockMulticastGroupProtocol mismatch (-want +got):\\n%s\", diff)\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Use struct to check mockMulticastGroupProtocol fields
Instead of passing each of the values as arguments to a function.
This prepares for a followup change which introduces a new field
to mockMulticastGroupProtocol that is unused in some places
(improving readability by not having to explicitly pass in a
function argument).
PiperOrigin-RevId: 495918528 |
259,853 | 16.12.2022 14:56:56 | 28,800 | 41009e886e100ac2272dd20df83e16d398961fa9 | stack/conntrack: suppress the nested locking warning
reapEverything intentionally takes the second bucket lock. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/BUILD",
"new_path": "pkg/tcpip/stack/BUILD",
"diff": "@@ -112,6 +112,7 @@ declare_rwmutex(\ndeclare_rwmutex(\nname = \"bucket_mutex\",\nout = \"bucket_mutex.go\",\n+ nested_lock_names = [\"otherTuple\"],\npackage = \"stack\",\nprefix = \"bucket\",\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/conntrack.go",
"new_path": "pkg/tcpip/stack/conntrack.go",
"diff": "@@ -1097,9 +1097,9 @@ func (ct *ConnTrack) reapTupleLocked(reapingTuple *tuple, bktID int, bkt *bucket\nbkt.tuples.Remove(otherTuple)\n} else {\notherTupleBkt := &ct.buckets[otherTupleBktID]\n- otherTupleBkt.mu.Lock()\n+ otherTupleBkt.mu.NestedLock(bucketLockOthertuple)\notherTupleBkt.tuples.Remove(otherTuple)\n- otherTupleBkt.mu.Unlock()\n+ otherTupleBkt.mu.NestedUnlock(bucketLockOthertuple)\n}\nreturn true\n"
}
] | Go | Apache License 2.0 | google/gvisor | stack/conntrack: suppress the nested locking warning
reapEverything intentionally takes the second bucket lock.
PiperOrigin-RevId: 495967790 |
259,909 | 29.12.2022 13:52:11 | 28,800 | 85f261dd1eb299be5b82ce7808069c379275b87d | Pass the containerCgroup to the sandbox object instead of parent.
This is a bug reported by It affects sandboxes where
containerCgroup != parentCgroup. | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -283,7 +283,7 @@ func New(conf *config.Config, args Args) (*Container, error) {\nUserLog: args.UserLog,\nIOFiles: ioFiles,\nMountsFile: specFile,\n- Cgroup: parentCgroup,\n+ Cgroup: containerCgroup,\nAttached: args.Attached,\nOverlayFilestoreFile: overlayFilestoreFile,\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Pass the containerCgroup to the sandbox object instead of parent.
This is a bug reported by #8311. It affects sandboxes where
containerCgroup != parentCgroup.
PiperOrigin-RevId: 498443910 |
259,858 | 04.01.2023 18:33:44 | 28,800 | 078109563976738c9ebe8d6e2b45a160fb3d5981 | Use arch_genrule for arch-specific VDSO data.
Note that the select_arch macro changed in order to allow for its use within
a rule below arch_genrule. The command line settings were being manipulated in
a way that did not correctly propagate the select_arch select constraints. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/vdsodata/BUILD",
"new_path": "pkg/sentry/loader/vdsodata/BUILD",
"diff": "-load(\"//tools:defs.bzl\", \"go_add_tags\", \"go_embed_data\", \"go_library\")\n+load(\"//tools:defs.bzl\", \"arch_genrule\", \"go_embed_data\", \"go_library\")\npackage(licenses = [\"notice\"])\n@@ -9,28 +9,17 @@ go_embed_data(\nvar = \"Binary\",\n)\n-[\n- # Generate multiple tagged files. Note that the contents of all files\n- # will be the same (i.e. vdso_arm64.go will contain the amd64 vdso), but\n- # the build tags will ensure only one is selected. When we generate the\n- # \"Go\" branch, we select all archiecture files from the relevant build.\n- # This is a hack around some limitations for \"out\" being a configurable\n- # attribute and selects for srcs. See also tools/go_branch.sh.\n- go_add_tags(\n- name = \"vdso_%s\" % arch,\n+arch_genrule(\n+ name = \"vdso_arch\",\nsrc = \":vdso_bin\",\n- out = \"vdso_%s.go\" % arch,\n- go_tags = [arch],\n+ template = \"vdso_%s.go\",\n)\n- for arch in (\"amd64\", \"arm64\")\n-]\ngo_library(\nname = \"vdsodata\",\nsrcs = [\n\"vdsodata.go\",\n- \":vdso_amd64\",\n- \":vdso_arm64\",\n+ \":vdso_arch\",\n],\nmarshal = False,\nstateify = False,\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/bazeldefs/BUILD",
"new_path": "tools/bazeldefs/BUILD",
"diff": "@@ -24,21 +24,13 @@ bzl_library(\n)\nconfig_setting(\n- name = \"linux_arm64_cross\",\n- values = {\n- \"cpu\": \"aarch64\",\n- \"host_cpu\": \"k8\",\n- },\n- visibility = [\"//visibility:private\"],\n+ name = \"amd64\",\n+ values = {\"cpu\": \"k8\"},\n)\nconfig_setting(\n- name = \"linux_amd64_cross\",\n- values = {\n- \"cpu\": \"k8\",\n- \"host_cpu\": \"aarch64\",\n- },\n- visibility = [\"//visibility:private\"],\n+ name = \"arm64\",\n+ values = {\"cpu\": \"aarch64\"},\n)\ngenrule(\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/bazeldefs/defs.bzl",
"new_path": "tools/bazeldefs/defs.bzl",
"diff": "@@ -23,8 +23,8 @@ def proto_library(name, has_services = None, **kwargs):\ndef select_arch(amd64 = \"amd64\", arm64 = \"arm64\", default = None, **kwargs):\nvalues = {\n- \"@bazel_tools//src/conditions:linux_x86_64\": amd64,\n- \"@bazel_tools//src/conditions:linux_aarch64\": arm64,\n+ \"//tools/bazeldefs:amd64\": amd64,\n+ \"//tools/bazeldefs:arm64\": arm64,\n}\nif default:\nvalues[\"//conditions:default\"] = default\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/defs.bzl",
"new_path": "tools/defs.bzl",
"diff": "@@ -10,7 +10,7 @@ load(\"//tools/go_marshal:defs.bzl\", \"go_marshal\", \"marshal_deps\", \"marshal_test_\nload(\"//tools/nogo:defs.bzl\", \"nogo_test\")\nload(\"//tools/bazeldefs:defs.bzl\", _BuildSettingInfo = \"BuildSettingInfo\", _amd64_config = \"amd64_config\", _arch_config = \"arch_config\", _arm64_config = \"arm64_config\", _bool_flag = \"bool_flag\", _bpf_program = \"bpf_program\", _build_test = \"build_test\", _bzl_library = \"bzl_library\", _coreutil = \"coreutil\", _default_net_util = \"default_net_util\", _more_shards = \"more_shards\", _most_shards = \"most_shards\", _proto_library = \"proto_library\", _select_arch = \"select_arch\", _select_system = \"select_system\", _short_path = \"short_path\", _transition_allowlist = \"transition_allowlist\", _version = \"version\")\nload(\"//tools/bazeldefs:cc.bzl\", _cc_binary = \"cc_binary\", _cc_flags_supplier = \"cc_flags_supplier\", _cc_grpc_library = \"cc_grpc_library\", _cc_library = \"cc_library\", _cc_proto_library = \"cc_proto_library\", _cc_test = \"cc_test\", _cc_toolchain = \"cc_toolchain\", _gbenchmark = \"gbenchmark\", _gbenchmark_internal = \"gbenchmark_internal\", _grpcpp = \"grpcpp\", _gtest = \"gtest\", _vdso_linker_option = \"vdso_linker_option\")\n-load(\"//tools/bazeldefs:go.bzl\", _gazelle = \"gazelle\", _go_binary = \"go_binary\", _go_embed_data = \"go_embed_data\", _go_grpc_and_proto_libraries = \"go_grpc_and_proto_libraries\", _go_library = \"go_library\", _go_path = \"go_path\", _go_proto_library = \"go_proto_library\", _go_rule = \"go_rule\", _go_test = \"go_test\", _gotsan_flag_values = \"gotsan_flag_values\", _gotsan_values = \"gotsan_values\", _select_goarch = \"select_goarch\", _select_goos = \"select_goos\")\n+load(\"//tools/bazeldefs:go.bzl\", _gazelle = \"gazelle\", _go_binary = \"go_binary\", _go_embed_data = \"go_embed_data\", _go_grpc_and_proto_libraries = \"go_grpc_and_proto_libraries\", _go_library = \"go_library\", _go_path = \"go_path\", _go_proto_library = \"go_proto_library\", _go_test = \"go_test\", _gotsan_flag_values = \"gotsan_flag_values\", _gotsan_values = \"gotsan_values\", _select_goarch = \"select_goarch\", _select_goos = \"select_goos\")\nload(\"//tools/bazeldefs:pkg.bzl\", _pkg_deb = \"pkg_deb\", _pkg_tar = \"pkg_tar\")\nload(\"//tools/bazeldefs:platforms.bzl\", _default_platform = \"default_platform\", _platform_capabilities = \"platform_capabilities\", _platforms = \"platforms\")\nload(\"//tools/bazeldefs:tags.bzl\", \"go_suffixes\")\n@@ -64,35 +64,6 @@ default_platform = _default_platform\nplatforms = _platforms\nplatform_capabilities = _platform_capabilities\n-def _go_add_tags(ctx):\n- \"\"\" Adds tags to the given source file. \"\"\"\n- output = ctx.outputs.out\n- runner = ctx.actions.declare_file(ctx.label.name + \".sh\")\n- lines = [\"#!/bin/bash\"]\n- lines += [\"echo '// +build %s' >> %s\" % (tag, output.path) for tag in ctx.attr.go_tags]\n- lines.append(\"echo '' >> %s\" % output.path)\n- lines += [\"cat %s >> %s\" % (f.path, output.path) for f in ctx.files.src]\n- lines.append(\"\")\n- ctx.actions.write(runner, \"\\n\".join(lines), is_executable = True)\n- ctx.actions.run(\n- inputs = ctx.files.src,\n- outputs = [output],\n- executable = runner,\n- )\n- return [DefaultInfo(\n- files = depset([output]),\n- )]\n-\n-go_add_tags = _go_rule(\n- rule,\n- implementation = _go_add_tags,\n- attrs = {\n- \"go_tags\": attr.string_list(doc = \"Go build tags to be added.\", mandatory = True),\n- \"src\": attr.label(doc = \"Source file.\", allow_single_file = True, mandatory = True),\n- \"out\": attr.output(doc = \"Output file.\", mandatory = True),\n- },\n-)\n-\ndef go_binary(name, nogo = True, pure = False, static = False, x_defs = None, **kwargs):\n\"\"\"Wraps the standard go_binary.\n"
},
{
"change_type": "MODIFY",
"old_path": "vdso/BUILD",
"new_path": "vdso/BUILD",
"diff": "@@ -36,10 +36,6 @@ genrule(\n# VDSO has no hooks to handle failures.\n\"-fno-stack-protector \" +\nvdso_linker_option +\n- select_arch(\n- amd64 = \"-m64 \",\n- arm64 = \"\",\n- ) +\n\"-shared \" +\n\"-nostdlib \" +\n\"-Wl,-soname=linux-vdso.so.1 \" +\n"
}
] | Go | Apache License 2.0 | google/gvisor | Use arch_genrule for arch-specific VDSO data.
Note that the select_arch macro changed in order to allow for its use within
a rule below arch_genrule. The command line settings were being manipulated in
a way that did not correctly propagate the select_arch select constraints.
PiperOrigin-RevId: 499651425 |
260,004 | 05.01.2023 11:13:51 | 28,800 | 8ebc4d767a432c7a4a97e25e048a1380b2a2e442 | Accept no buffer space errors when forwarding
The outgoing device queue may be full when forwarding packets and those
errors should not result in a panic. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/internal/ip/errors.go",
"new_path": "pkg/tcpip/network/internal/ip/errors.go",
"diff": "@@ -34,6 +34,14 @@ func (*ErrTTLExceeded) isForwardingError() {}\nfunc (*ErrTTLExceeded) String() string { return \"ttl exceeded\" }\n+// ErrOutgoingDeviceNoBufferSpace indicates that the outgoing device does not\n+// have enough space to hold a buffer.\n+type ErrOutgoingDeviceNoBufferSpace struct{}\n+\n+func (*ErrOutgoingDeviceNoBufferSpace) isForwardingError() {}\n+\n+func (*ErrOutgoingDeviceNoBufferSpace) String() string { return \"no device buffer space\" }\n+\n// ErrParameterProblem indicates the received packet had a problem with an IP\n// parameter.\ntype ErrParameterProblem struct{}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/internal/ip/stats.go",
"new_path": "pkg/tcpip/network/internal/ip/stats.go",
"diff": "@@ -64,6 +64,10 @@ type MultiCounterIPForwardingStats struct {\n// were dropped due to insufficent buffer space in the pending packet queue.\nNoMulticastPendingQueueBufferSpace tcpip.MultiCounterStat\n+ // OutgoingDeviceNoBufferSpace is the number of packets that were dropped due\n+ // to insufficient space in the outgoing device.\n+ OutgoingDeviceNoBufferSpace tcpip.MultiCounterStat\n+\n// Errors is the number of IP packets received which could not be\n// successfully forwarded.\nErrors tcpip.MultiCounterStat\n@@ -82,6 +86,7 @@ func (m *MultiCounterIPForwardingStats) Init(a, b *tcpip.IPForwardingStats) {\nm.UnexpectedMulticastInputInterface.Init(a.UnexpectedMulticastInputInterface, b.UnexpectedMulticastInputInterface)\nm.UnknownOutputEndpoint.Init(a.UnknownOutputEndpoint, b.UnknownOutputEndpoint)\nm.NoMulticastPendingQueueBufferSpace.Init(a.NoMulticastPendingQueueBufferSpace, b.NoMulticastPendingQueueBufferSpace)\n+ m.OutgoingDeviceNoBufferSpace.Init(a.OutgoingDeviceNoBufferSpace, b.OutgoingDeviceNoBufferSpace)\n}\n// LINT.ThenChange(:MultiCounterIPForwardingStats, ../../../tcpip.go:IPForwardingStats)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/ipv4.go",
"new_path": "pkg/tcpip/network/ipv4/ipv4.go",
"diff": "@@ -691,6 +691,8 @@ func (e *endpoint) forwardPacketWithRoute(route *stack.Route, pkt stack.PacketBu\n// necessary and the bit is also set.\n_ = e.protocol.returnError(&icmpReasonFragmentationNeeded{}, pkt, false /* deliveredLocally */)\nreturn &ip.ErrMessageTooLong{}\n+ case *tcpip.ErrNoBufferSpace:\n+ return &ip.ErrOutgoingDeviceNoBufferSpace{}\ndefault:\nreturn &ip.ErrOther{Err: err}\n}\n@@ -1105,7 +1107,7 @@ func (e *endpoint) handleValidatedPacket(h header.IPv4, pkt stack.PacketBufferPt\n// counters.\nfunc (e *endpoint) handleForwardingError(err ip.ForwardingError) {\nstats := e.stats.ip\n- switch err.(type) {\n+ switch err := err.(type) {\ncase nil:\nreturn\ncase *ip.ErrLinkLocalSourceAddress:\n@@ -1126,6 +1128,8 @@ func (e *endpoint) handleForwardingError(err ip.ForwardingError) {\nstats.Forwarding.UnexpectedMulticastInputInterface.Increment()\ncase *ip.ErrUnknownOutputEndpoint:\nstats.Forwarding.UnknownOutputEndpoint.Increment()\n+ case *ip.ErrOutgoingDeviceNoBufferSpace:\n+ stats.Forwarding.OutgoingDeviceNoBufferSpace.Increment()\ndefault:\npanic(fmt.Sprintf(\"unrecognized forwarding error: %s\", err))\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/ipv6.go",
"new_path": "pkg/tcpip/network/ipv6/ipv6.go",
"diff": "@@ -1029,6 +1029,8 @@ func (e *endpoint) forwardPacketWithRoute(route *stack.Route, pkt stack.PacketBu\n// outgoing link.\n_ = e.protocol.returnError(&icmpReasonPacketTooBig{}, pkt, false /* deliveredLocally */)\nreturn &ip.ErrMessageTooLong{}\n+ case *tcpip.ErrNoBufferSpace:\n+ return &ip.ErrOutgoingDeviceNoBufferSpace{}\ndefault:\nreturn &ip.ErrOther{Err: err}\n}\n@@ -1227,7 +1229,7 @@ func (e *endpoint) forwardMulticastPacketForOutgoingInterface(pkt stack.PacketBu\n// counters.\nfunc (e *endpoint) handleForwardingError(err ip.ForwardingError) {\nstats := e.stats.ip\n- switch err.(type) {\n+ switch err := err.(type) {\ncase nil:\nreturn\ncase *ip.ErrLinkLocalSourceAddress:\n@@ -1248,6 +1250,8 @@ func (e *endpoint) handleForwardingError(err ip.ForwardingError) {\nstats.Forwarding.UnexpectedMulticastInputInterface.Increment()\ncase *ip.ErrUnknownOutputEndpoint:\nstats.Forwarding.UnknownOutputEndpoint.Increment()\n+ case *ip.ErrOutgoingDeviceNoBufferSpace:\n+ stats.Forwarding.OutgoingDeviceNoBufferSpace.Increment()\ndefault:\npanic(fmt.Sprintf(\"unrecognized forwarding error: %s\", err))\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tcpip.go",
"new_path": "pkg/tcpip/tcpip.go",
"diff": "@@ -1739,6 +1739,10 @@ type IPForwardingStats struct {\n// were dropped due to insufficent buffer space in the pending packet queue.\nNoMulticastPendingQueueBufferSpace *StatCounter\n+ // OutgoingDeviceNoBufferSpace is the number of packets that were dropped due\n+ // to insufficient space in the outgoing device.\n+ OutgoingDeviceNoBufferSpace *StatCounter\n+\n// Errors is the number of IP packets received which could not be\n// successfully forwarded.\nErrors *StatCounter\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tests/integration/forward_test.go",
"new_path": "pkg/tcpip/tests/integration/forward_test.go",
"diff": "@@ -349,6 +349,19 @@ func TestForwarding(t *testing.T) {\n}\n}\n+type fillableLinkEndpoint struct {\n+ *channel.Endpoint\n+ full bool\n+}\n+\n+func (e *fillableLinkEndpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Error) {\n+ if e.full {\n+ return 0, &tcpip.ErrNoBufferSpace{}\n+ }\n+\n+ return e.Endpoint.WritePackets(pkts)\n+}\n+\nfunc TestUnicastForwarding(t *testing.T) {\nconst (\nnicID1 = 1\n@@ -362,6 +375,7 @@ func TestUnicastForwarding(t *testing.T) {\ntests := []struct {\nname string\n+ netProto tcpip.NetworkProtocolNumber\nsrcAddr, dstAddr tcpip.Address\nrx func(*channel.Endpoint, tcpip.Address, tcpip.Address)\nexpectForward bool\n@@ -369,6 +383,7 @@ func TestUnicastForwarding(t *testing.T) {\n}{\n{\nname: \"IPv4 link-local source\",\n+ netProto: ipv4.ProtocolNumber,\nsrcAddr: ipv4LinkLocalUnicastAddr,\ndstAddr: utils.RemoteIPv4Addr,\nrx: rxICMPv4EchoRequest,\n@@ -376,6 +391,7 @@ func TestUnicastForwarding(t *testing.T) {\n},\n{\nname: \"IPv4 link-local destination\",\n+ netProto: ipv4.ProtocolNumber,\nsrcAddr: utils.RemoteIPv4Addr,\ndstAddr: ipv4LinkLocalUnicastAddr,\nrx: rxICMPv4EchoRequest,\n@@ -383,6 +399,7 @@ func TestUnicastForwarding(t *testing.T) {\n},\n{\nname: \"IPv4 non-link-local unicast\",\n+ netProto: ipv4.ProtocolNumber,\nsrcAddr: utils.RemoteIPv4Addr,\ndstAddr: utils.Ipv4Addr2.AddressWithPrefix.Address,\nrx: rxICMPv4EchoRequest,\n@@ -393,6 +410,7 @@ func TestUnicastForwarding(t *testing.T) {\n},\n{\nname: \"IPv6 link-local source\",\n+ netProto: ipv6.ProtocolNumber,\nsrcAddr: ipv6LinkLocalUnicastAddr,\ndstAddr: utils.RemoteIPv6Addr,\nrx: rxICMPv6EchoRequest,\n@@ -400,6 +418,7 @@ func TestUnicastForwarding(t *testing.T) {\n},\n{\nname: \"IPv6 link-local destination\",\n+ netProto: ipv6.ProtocolNumber,\nsrcAddr: utils.RemoteIPv6Addr,\ndstAddr: ipv6LinkLocalUnicastAddr,\nrx: rxICMPv6EchoRequest,\n@@ -407,6 +426,7 @@ func TestUnicastForwarding(t *testing.T) {\n},\n{\nname: \"IPv6 non-link-local unicast\",\n+ netProto: ipv6.ProtocolNumber,\nsrcAddr: utils.RemoteIPv6Addr,\ndstAddr: utils.Ipv6Addr2.AddressWithPrefix.Address,\nrx: rxICMPv6EchoRequest,\n@@ -419,6 +439,8 @@ func TestUnicastForwarding(t *testing.T) {\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\n+ for _, full := range []bool{true, false} {\n+ t.Run(fmt.Sprintf(\"Full=%t\", full), func(t *testing.T) {\ns := stack.New(stack.Options{\nNetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocol},\nTransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},\n@@ -430,9 +452,9 @@ func TestUnicastForwarding(t *testing.T) {\nt.Fatalf(\"s.CreateNIC(%d, _): %s\", nicID1, err)\n}\n- e2 := channel.New(1, header.IPv6MinimumMTU, \"\")\n+ e2 := fillableLinkEndpoint{Endpoint: channel.New(1, header.IPv6MinimumMTU, \"\"), full: full}\ndefer e2.Close()\n- if err := s.CreateNIC(nicID2, e2); err != nil {\n+ if err := s.CreateNIC(nicID2, &e2); err != nil {\nt.Fatalf(\"s.CreateNIC(%d, _): %s\", nicID2, err)\n}\n@@ -471,17 +493,46 @@ func TestUnicastForwarding(t *testing.T) {\ntest.rx(e1, test.srcAddr, test.dstAddr)\n+ expectForward := test.expectForward && !full\np := e2.Read()\n- if (!p.IsNil()) != test.expectForward {\n- t.Fatalf(\"got e2.Read() = %#v, want = (_ == nil) = %t\", p, test.expectForward)\n+ if (!p.IsNil()) != expectForward {\n+ t.Fatalf(\"got e2.Read() = %#v, want = (_ == nil) = %t\", p, expectForward)\n}\n- if test.expectForward {\n+ if expectForward {\npayload := stack.PayloadSince(p.NetworkHeader())\ndefer payload.Release()\ntest.checker(t, payload)\np.DecRef()\n}\n+\n+ checkOutgoingDeviceNoBufferSpaceCounter := func(nicID tcpip.NICID, expectErr bool) {\n+ t.Helper()\n+\n+ expectCounter := uint64(0)\n+ if expectErr {\n+ expectCounter = 1\n+ }\n+\n+ netEP, err := s.GetNetworkEndpoint(nicID, test.netProto)\n+ if err != nil {\n+ t.Fatalf(\"s.GetNetworkEndpoint(%d, %d): %s\", nicID, test.netProto, err)\n+ }\n+\n+ stats := netEP.Stats()\n+ ipStats, ok := stats.(stack.IPNetworkEndpointStats)\n+ if !ok {\n+ t.Fatalf(\"%#v is not a %T\", stats, ipStats)\n+ }\n+\n+ if got := ipStats.IPStats().Forwarding.OutgoingDeviceNoBufferSpace.Value(); got != expectCounter {\n+ t.Errorf(\"got ipStats.IPStats().Forwarding.OutgoingDeviceNoBufferSpace.Value() = %d, want = %d\", got, expectCounter)\n+ }\n+ }\n+ checkOutgoingDeviceNoBufferSpaceCounter(nicID1, test.expectForward && full)\n+ checkOutgoingDeviceNoBufferSpaceCounter(nicID2, false)\n+ })\n+ }\n})\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Accept no buffer space errors when forwarding
The outgoing device queue may be full when forwarding packets and those
errors should not result in a panic.
PiperOrigin-RevId: 499946528 |
259,907 | 06.01.2023 11:16:39 | 28,800 | 3cbd2127f0828d45308d21d9382be7b99a4738fa | Add test to make sure overlay2 flag works for submounts with host filestore. | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/shared_volume_test.go",
"new_path": "runsc/container/shared_volume_test.go",
"diff": "@@ -16,6 +16,7 @@ package container\nimport (\n\"bytes\"\n+ \"errors\"\n\"fmt\"\n\"io/ioutil\"\n\"os\"\n@@ -263,3 +264,53 @@ func TestSharedVolumeFile(t *testing.T) {\nt.Fatal(err.Error())\n}\n}\n+\n+// TestSharedVolumeOverlay tests that changes to a shared volume that is\n+// wrapped in an overlay are not visible externally.\n+func TestSharedVolumeOverlay(t *testing.T) {\n+ conf := testutil.TestConfig(t)\n+ conf.Overlay2 = config.Overlay2{\n+ RootMount: true,\n+ SubMounts: true,\n+ FilestoreDir: \"/tmp\",\n+ }\n+\n+ // File that will be used to check consistency inside/outside sandbox.\n+ // Note that TmpDir() is set up as a shared volume by NewSpecWithArgs(). So\n+ // changes inside TmpDir() should not be visible to the host.\n+ filename := filepath.Join(testutil.TmpDir(), \"file\")\n+\n+ // Create a file in TmpDir() inside the container.\n+ spec := testutil.NewSpecWithArgs(\"/bin/bash\", \"-c\", \"echo Hello > \"+filename+\"; test -f \"+filename)\n+ _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)\n+ if err != nil {\n+ t.Fatalf(\"error setting up container: %v\", err)\n+ }\n+ defer cleanup()\n+\n+ // Create and start the container.\n+ args := Args{\n+ ID: testutil.RandomContainerID(),\n+ Spec: spec,\n+ BundleDir: bundleDir,\n+ }\n+ c, err := New(conf, args)\n+ if err != nil {\n+ t.Fatalf(\"error creating container: %v\", err)\n+ }\n+ defer c.Destroy()\n+ if err := c.Start(conf); err != nil {\n+ t.Fatalf(\"error starting container: %v\", err)\n+ }\n+\n+ if ws, err := c.Wait(); err != nil {\n+ t.Errorf(\"failed to wait for container: %v\", err)\n+ } else if es := ws.ExitStatus(); es != 0 {\n+ t.Errorf(\"subcontainer exited with non-zero status %d\", es)\n+ }\n+\n+ // Ensure that the file does not exist on the host.\n+ if _, err := os.Stat(filename); !errors.Is(err, os.ErrNotExist) {\n+ t.Errorf(\"file exists on host, stat %q got error %v, wanted ErrNotExist\", filename, err)\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add test to make sure overlay2 flag works for submounts with host filestore.
PiperOrigin-RevId: 500218219 |
260,004 | 06.01.2023 12:51:55 | 28,800 | 3d4743d96059f009bff7fa7900b48b7a40a1343e | Implement null timer
NullClock.AfterFunc should not return nil as that violates the API
expectations. Instead, return a timer that never fires. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/faketime/faketime.go",
"new_path": "pkg/tcpip/faketime/faketime.go",
"diff": "@@ -39,9 +39,22 @@ func (*NullClock) NowMonotonic() tcpip.MonotonicTime {\nreturn tcpip.MonotonicTime{}\n}\n+// nullTimer implements a timer that never fires.\n+type nullTimer struct{}\n+\n+var _ tcpip.Timer = (*nullTimer)(nil)\n+\n+// Stop implements tcpip.Timer.\n+func (*nullTimer) Stop() bool {\n+ return true\n+}\n+\n+// Reset implements tcpip.Timer.\n+func (*nullTimer) Reset(time.Duration) {}\n+\n// AfterFunc implements tcpip.Clock.AfterFunc.\nfunc (*NullClock) AfterFunc(time.Duration, func()) tcpip.Timer {\n- return nil\n+ return &nullTimer{}\n}\ntype notificationChannels struct {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Implement null timer
NullClock.AfterFunc should not return nil as that violates the API
expectations. Instead, return a timer that never fires.
PiperOrigin-RevId: 500240130 |
259,985 | 06.01.2023 15:07:11 | 28,800 | a17ad261d6d08ab4a6702cbf8b917b6fba9e8ca0 | stateify: Handle multi-name fields in struct declarations. | [
{
"change_type": "MODIFY",
"old_path": "pkg/state/tests/struct.go",
"new_path": "pkg/state/tests/struct.go",
"diff": "@@ -98,3 +98,10 @@ type system3 struct {\nv2 any\nv3 any\n}\n+\n+// +stateify savable\n+type multiName struct {\n+ _, b, c string\n+ x, y int64\n+ z int32\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/state/tests/struct_test.go",
"new_path": "pkg/state/tests/struct_test.go",
"diff": "@@ -98,3 +98,9 @@ func TestEmbeddedPointers(t *testing.T) {\nsystem{&ofv.inner, &ofv},\n})\n}\n+\n+func TestMultiNameFields(t *testing.T) {\n+ runTestCases(t, false, \"multi-name-field\", []any{\n+ multiName{b: \"foo\", c: \"bar\", x: 10, y: 20, z: -30},\n+ })\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/go_stateify/main.go",
"new_path": "tools/go_stateify/main.go",
"diff": "@@ -99,7 +99,7 @@ type scanFunctions struct {\n// skipped if nil.\n//\n// Fields tagged nosave are skipped.\n-func scanFields(ss *ast.StructType, prefix string, fn scanFunctions) {\n+func scanFields(ss *ast.StructType, fn scanFunctions) {\nif ss.Fields.List == nil {\n// No fields.\nreturn\n@@ -107,29 +107,35 @@ func scanFields(ss *ast.StructType, prefix string, fn scanFunctions) {\n// Scan all fields.\nfor _, field := range ss.Fields.List {\n- // Calculate the name.\n- name := \"\"\n- if field.Names != nil {\n- // It's a named field; override.\n- name = field.Names[0].Name\n- } else {\n+ if field.Names == nil {\n// Anonymous types can't be embedded, so we don't need\n// to worry about providing a useful name here.\n- name, _ = resolveTypeName(field.Type)\n+ name, _ := resolveTypeName(field.Type)\n+ scanField(name, field, fn)\n+ continue\n}\n+ // Iterate over potentially multiple fields defined on the same line.\n+ for _, nameI := range field.Names {\n+ name := nameI.Name\n// Skip _ fields.\nif name == \"_\" {\ncontinue\n}\n+ scanField(name, field, fn)\n+ }\n+ }\n+}\n+// scanField scans a single struct field with a resolved name.\n+func scanField(name string, field *ast.Field, fn scanFunctions) {\n// Is this a anonymous struct? If yes, then continue the\n// recursion with the given prefix. We don't pay attention to\n// any tags on the top-level struct field.\ntag := extractStateTag(field.Tag)\nif anon, ok := field.Type.(*ast.StructType); ok && tag == \"\" {\n- scanFields(anon, name+\".\", fn)\n- continue\n+ scanFields(anon, fn)\n+ return\n}\nswitch tag {\n@@ -159,7 +165,6 @@ func scanFields(ss *ast.StructType, prefix string, fn scanFunctions) {\n}\n}\n}\n-}\nfunc camelCased(name string) string {\nreturn strings.ToUpper(name[:1]) + name[1:]\n@@ -385,7 +390,7 @@ func main() {\n// Generate the fields method.\nfmt.Fprintf(outputFile, \"func (%s *%s) StateFields() []string {\\n\", recv, ts.Name.Name)\nfmt.Fprintf(outputFile, \" return []string{\\n\")\n- scanFields(x, \"\", scanFunctions{\n+ scanFields(x, scanFunctions{\nnormal: emitField,\nwait: emitField,\nvalue: emitFieldValue,\n@@ -414,9 +419,9 @@ func main() {\nfmt.Fprintf(outputFile, \"// +checklocksignore\\n\")\nfmt.Fprintf(outputFile, \"func (%s *%s) StateSave(stateSinkObject %sSink) {\\n\", recv, ts.Name.Name, statePrefix)\nfmt.Fprintf(outputFile, \" %s.beforeSave()\\n\", recv)\n- scanFields(x, \"\", scanFunctions{zerovalue: emitZeroCheck})\n- scanFields(x, \"\", scanFunctions{value: emitSaveValue})\n- scanFields(x, \"\", scanFunctions{normal: emitSave, wait: emitSave})\n+ scanFields(x, scanFunctions{zerovalue: emitZeroCheck})\n+ scanFields(x, scanFunctions{value: emitSaveValue})\n+ scanFields(x, scanFunctions{normal: emitSave, wait: emitSave})\nfmt.Fprintf(outputFile, \"}\\n\\n\")\n}\n@@ -436,8 +441,8 @@ func main() {\nif generateSaverLoader {\nfmt.Fprintf(outputFile, \"// +checklocksignore\\n\")\nfmt.Fprintf(outputFile, \"func (%s *%s) StateLoad(stateSourceObject %sSource) {\\n\", recv, ts.Name.Name, statePrefix)\n- scanFields(x, \"\", scanFunctions{normal: emitLoad, wait: emitLoadWait})\n- scanFields(x, \"\", scanFunctions{value: emitLoadValue})\n+ scanFields(x, scanFunctions{normal: emitLoad, wait: emitLoadWait})\n+ scanFields(x, scanFunctions{value: emitLoadValue})\nif hasAfterLoad {\n// The call to afterLoad is made conditionally, because when\n// AfterLoad is called, the object encodes a dependency on\n"
}
] | Go | Apache License 2.0 | google/gvisor | stateify: Handle multi-name fields in struct declarations.
PiperOrigin-RevId: 500268939 |
259,909 | 09.01.2023 10:22:37 | 28,800 | a248c63cd5df307b32bb2a7526bb2648249c4cfb | Fix circular lock between filesystemRWMutex and taskSetRWMutex. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/seccheck.go",
"new_path": "pkg/sentry/kernel/seccheck.go",
"diff": "@@ -19,17 +19,44 @@ import (\npb \"gvisor.dev/gvisor/pkg/sentry/seccheck/points/points_go_proto\"\n)\n+func getTaskCurrentWorkingDirectory(t *Task) string {\n+ // Grab the filesystem context first since it needs tasks.mu to be locked.\n+ // It's safe to unlock and use the values obtained here as long as there's\n+ // no way to modify root and wd from a separate task.\n+ t.k.tasks.mu.RLock()\n+ root := t.FSContext().RootDirectory()\n+ wd := t.FSContext().WorkingDirectory()\n+ t.k.tasks.mu.RUnlock()\n+\n+ // Perform VFS operations outside of task mutex to avoid circular locking with\n+ // filesystem mutexes.\n+ var cwd string\n+ if root.Ok() {\n+ defer root.DecRef(t)\n+ if wd.Ok() {\n+ defer wd.DecRef(t)\n+ vfsObj := root.Mount().Filesystem().VirtualFilesystem()\n+ cwd, _ = vfsObj.PathnameWithDeleted(t, root, wd)\n+ }\n+ }\n+ return cwd\n+}\n+\n// LoadSeccheckData sets info from the task based on mask.\nfunc LoadSeccheckData(t *Task, mask seccheck.FieldMask, info *pb.ContextData) {\n+ var cwd string\n+ if mask.Contains(seccheck.FieldCtxtCwd) {\n+ cwd = getTaskCurrentWorkingDirectory(t)\n+ }\nt.k.tasks.mu.RLock()\ndefer t.k.tasks.mu.RUnlock()\n- LoadSeccheckDataLocked(t, mask, info)\n+ LoadSeccheckDataLocked(t, mask, info, cwd)\n}\n// LoadSeccheckDataLocked sets info from the task based on mask.\n//\n// Preconditions: The TaskSet mutex must be locked.\n-func LoadSeccheckDataLocked(t *Task, mask seccheck.FieldMask, info *pb.ContextData) {\n+func LoadSeccheckDataLocked(t *Task, mask seccheck.FieldMask, info *pb.ContextData, cwd string) {\nif mask.Contains(seccheck.FieldCtxtTime) {\ninfo.TimeNs = t.k.RealtimeClock().Now().Nanoseconds()\n}\n@@ -49,14 +76,7 @@ func LoadSeccheckDataLocked(t *Task, mask seccheck.FieldMask, info *pb.ContextDa\ninfo.ContainerId = t.tg.leader.ContainerID()\n}\nif mask.Contains(seccheck.FieldCtxtCwd) {\n- if root := t.FSContext().RootDirectory(); root.Ok() {\n- defer root.DecRef(t)\n- if wd := t.FSContext().WorkingDirectory(); wd.Ok() {\n- defer wd.DecRef(t)\n- vfsObj := root.Mount().Filesystem().VirtualFilesystem()\n- info.Cwd, _ = vfsObj.PathnameWithDeleted(t, root, wd)\n- }\n- }\n+ info.Cwd = cwd\n}\nif mask.Contains(seccheck.FieldCtxtProcessName) {\ninfo.ProcessName = t.Name()\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_clone.go",
"new_path": "pkg/sentry/kernel/task_clone.go",
"diff": "@@ -310,7 +310,10 @@ func (t *Task) Clone(args *linux.CloneArgs) (ThreadID, *SyscallControl, error) {\nfunc getCloneSeccheckInfo(t, nt *Task, flags uint64) (seccheck.FieldSet, *pb.CloneInfo) {\nfields := seccheck.Global.GetFieldSet(seccheck.PointClone)\n-\n+ var cwd string\n+ if fields.Context.Contains(seccheck.FieldCtxtCwd) {\n+ cwd = getTaskCurrentWorkingDirectory(t)\n+ }\nt.k.tasks.mu.RLock()\ndefer t.k.tasks.mu.RUnlock()\ninfo := &pb.CloneInfo{\n@@ -322,7 +325,7 @@ func getCloneSeccheckInfo(t, nt *Task, flags uint64) (seccheck.FieldSet, *pb.Clo\nif !fields.Context.Empty() {\ninfo.ContextData = &pb.ContextData{}\n- LoadSeccheckDataLocked(t, fields.Context, info.ContextData)\n+ LoadSeccheckDataLocked(t, fields.Context, info.ContextData, cwd)\n}\nreturn fields, info\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_exit.go",
"new_path": "pkg/sentry/kernel/task_exit.go",
"diff": "@@ -748,7 +748,9 @@ func getExitNotifyParentSeccheckInfo(t *Task) (seccheck.FieldSet, *pb.ExitNotify\n}\nif !fields.Context.Empty() {\ninfo.ContextData = &pb.ContextData{}\n- LoadSeccheckDataLocked(t, fields.Context, info.ContextData)\n+ // cwd isn't used for notifyExit seccheck so it's ok to pass an empty\n+ // string.\n+ LoadSeccheckDataLocked(t, fields.Context, info.ContextData, \"\")\n}\nreturn fields, info\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/seccheck/metadata.go",
"new_path": "pkg/sentry/seccheck/metadata.go",
"diff": "@@ -245,7 +245,40 @@ func init() {\nregisterPoint(PointDesc{\nID: PointExitNotifyParent,\nName: \"sentry/exit_notify_parent\",\n- ContextFields: defaultContextFields,\n+ ContextFields: []FieldDesc{\n+ {\n+ ID: FieldCtxtTime,\n+ Name: \"time\",\n+ },\n+ {\n+ ID: FieldCtxtThreadID,\n+ Name: \"thread_id\",\n+ },\n+ {\n+ ID: FieldCtxtThreadStartTime,\n+ Name: \"task_start_time\",\n+ },\n+ {\n+ ID: FieldCtxtThreadGroupID,\n+ Name: \"group_id\",\n+ },\n+ {\n+ ID: FieldCtxtThreadGroupStartTime,\n+ Name: \"thread_group_start_time\",\n+ },\n+ {\n+ ID: FieldCtxtContainerID,\n+ Name: \"container_id\",\n+ },\n+ {\n+ ID: FieldCtxtCredentials,\n+ Name: \"credentials\",\n+ },\n+ {\n+ ID: FieldCtxtProcessName,\n+ Name: \"process_name\",\n+ },\n+ },\n})\nregisterPoint(PointDesc{\nID: PointTaskExit,\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix circular lock between filesystemRWMutex and taskSetRWMutex.
PiperOrigin-RevId: 500747195 |
259,985 | 09.01.2023 11:09:37 | 28,800 | ef96e9328ea5bf92aa24875a3c3537b8edf04d81 | Disable io_uring syscalls by default.
The current io_uring support is very limited and experimental. Disable
it by default, and add a flag to enable it for testing. | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/iouring.go",
"new_path": "pkg/abi/linux/iouring.go",
"diff": "@@ -126,6 +126,7 @@ type IOUringParams struct {\n// See struct io_uring_cqe in include/uapi/linux/io_uring.h.\n//\n// +marshal\n+// +stateify savable\ntype IOUringCqe struct {\nUserData uint64\nRes int32\n@@ -136,6 +137,7 @@ type IOUringCqe struct {\n// See struct io_uring in io_uring/io_uring.c.\n//\n// +marshal\n+// +stateify savable\ntype IOUring struct {\n// Both head and tail should be cacheline aligned. And we assume that\n// cacheline size is 64 bytes.\n@@ -150,10 +152,14 @@ type IOUring struct {\n// See struct io_rings in io_uring/io_uring.c.\n//\n// +marshal\n+// +stateify savable\ntype IORings struct {\n- Sq, Cq IOUring\n- SqRingMask, CqRingMask uint32\n- SqRingEntries, CqRingEntries uint32\n+ Sq IOUring\n+ Cq IOUring\n+ SqRingMask uint32\n+ CqRingMask uint32\n+ SqRingEntries uint32\n+ CqRingEntries uint32\nsqDropped uint32\nsqFlags int32\ncqFlags uint32\n@@ -169,6 +175,7 @@ type IORings struct {\n// See include/uapi/linux/io_uring.h.\n//\n// +marshal\n+// +stateify savable\ntype IOUringSqe struct {\nOpcode uint8\nFlags uint8\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/iouringfs/iouringfs.go",
"new_path": "pkg/sentry/fsimpl/iouringfs/iouringfs.go",
"diff": "@@ -50,6 +50,8 @@ type FileDescription struct {\nvfs.DentryMetadataFileDescriptionImpl\nvfs.NoLockFD\n+ mfp pgalloc.MemoryFileProvider\n+\nrbmf ringsBufferFile\nsqemf sqEntriesFile\n@@ -62,9 +64,13 @@ type FileDescription struct {\nioRings linux.IORings\n- ioRingsBuf sharedBuffer\n- sqesBuf sharedBuffer\n- cqesBuf sharedBuffer\n+ ioRingsBuf sharedBuffer `state:\"nosave\"`\n+ sqesBuf sharedBuffer `state:\"nosave\"`\n+ cqesBuf sharedBuffer `state:\"nosave\"`\n+\n+ // remap indicates whether the shared buffers need to be remapped\n+ // due to a S/R. Protected by ProcessSubmissions critical section.\n+ remap bool\n}\nvar _ vfs.FileDescriptionImpl = (*FileDescription)(nil)\n@@ -117,7 +123,9 @@ func New(ctx context.Context, vfsObj *vfs.VirtualFilesystem, entries uint32, par\nnumSqEntries*uint32((*linux.IORingIndex)(nil).SizeBytes()))\nringsBufferSize = uint64(hostarch.Addr(ringsBufferSize).MustRoundUp())\n- rbfr, err := mfp.MemoryFile().Allocate(ringsBufferSize, pgalloc.AllocOpts{Kind: usage.Anonymous})\n+ mf := mfp.MemoryFile()\n+\n+ rbfr, err := mf.Allocate(ringsBufferSize, pgalloc.AllocOpts{Kind: usage.Anonymous})\nif err != nil {\nreturn nil, linuxerr.ENOMEM\n}\n@@ -125,18 +133,17 @@ func New(ctx context.Context, vfsObj *vfs.VirtualFilesystem, entries uint32, par\n// Allocate enough space to store the given number of submission queue entries.\nsqEntriesSize := uint64(numSqEntries * uint32((*linux.IOUringSqe)(nil).SizeBytes()))\nsqEntriesSize = uint64(hostarch.Addr(sqEntriesSize).MustRoundUp())\n- sqefr, err := mfp.MemoryFile().Allocate(sqEntriesSize, pgalloc.AllocOpts{Kind: usage.Anonymous})\n+ sqefr, err := mf.Allocate(sqEntriesSize, pgalloc.AllocOpts{Kind: usage.Anonymous})\nif err != nil {\nreturn nil, linuxerr.ENOMEM\n}\niouringfd := &FileDescription{\n+ mfp: mfp,\nrbmf: ringsBufferFile{\n- mf: mfp.MemoryFile(),\nfr: rbfr,\n},\nsqemf: sqEntriesFile{\n- mf: mfp.MemoryFile(),\nfr: sqefr,\n},\n// See ProcessSubmissions for why the capacity is 1.\n@@ -195,6 +202,10 @@ func New(ctx context.Context, vfsObj *vfs.VirtualFilesystem, entries uint32, par\nreturn nil, err\n}\niouringfd.ioRings.MarshalUnsafe(view)\n+\n+ buf := make([]byte, iouringfd.ioRings.SizeBytes())\n+ iouringfd.ioRings.MarshalUnsafe(buf)\n+\nif _, err := iouringfd.ioRingsBuf.writeback(iouringfd.ioRings.SizeBytes()); err != nil {\nreturn nil, err\n}\n@@ -203,16 +214,19 @@ func New(ctx context.Context, vfsObj *vfs.VirtualFilesystem, entries uint32, par\n}\n// Release implements vfs.FileDescriptionImpl.Release.\n-func (fd *FileDescription) Release(context.Context) {\n- fd.rbmf.mf.DecRef(fd.rbmf.fr)\n- fd.sqemf.mf.DecRef(fd.sqemf.fr)\n+func (fd *FileDescription) Release(ctx context.Context) {\n+ mf := pgalloc.MemoryFileProviderFromContext(ctx).MemoryFile()\n+ mf.DecRef(fd.rbmf.fr)\n+ mf.DecRef(fd.sqemf.fr)\n}\n// mapSharedBuffers caches internal mappings for the ring's shared memory\n// regions.\nfunc (fd *FileDescription) mapSharedBuffers() error {\n+ mf := fd.mfp.MemoryFile()\n+\n// Mapping for the IORings header struct.\n- rb, err := fd.rbmf.mf.MapInternal(fd.rbmf.fr, hostarch.ReadWrite)\n+ rb, err := mf.MapInternal(fd.rbmf.fr, hostarch.ReadWrite)\nif err != nil {\nreturn err\n}\n@@ -228,7 +242,7 @@ func (fd *FileDescription) mapSharedBuffers() error {\nfd.cqesBuf.init(cqes)\n// Mapping for the SQEs array.\n- sqes, err := fd.sqemf.mf.MapInternal(fd.sqemf.fr, hostarch.ReadWrite)\n+ sqes, err := mf.MapInternal(fd.sqemf.fr, hostarch.ReadWrite)\nif err != nil {\nreturn err\n}\n@@ -320,6 +334,14 @@ func (fd *FileDescription) ProcessSubmissions(t *kernel.Task, toSubmit uint32, m\n}\n}()\n+ // The rest of this function is a critical section with respect to\n+ // concurrent callers.\n+\n+ if fd.remap {\n+ fd.mapSharedBuffers()\n+ fd.remap = false\n+ }\n+\nvar err error\nvar sqe linux.IOUringSqe\n@@ -524,8 +546,9 @@ func (fd *FileDescription) updateCq(cqes *safemem.BlockSeq, cqe *linux.IOUringCq\n}\n// sqEntriesFile implements memmap.Mappable for SQ entries.\n+//\n+// +stateify savable\ntype sqEntriesFile struct {\n- mf *pgalloc.MemoryFile\nfr memmap.FileRange\n}\n@@ -553,7 +576,7 @@ func (sqemf *sqEntriesFile) Translate(ctx context.Context, required, optional me\nreturn []memmap.Translation{\n{\nSource: source,\n- File: sqemf.mf,\n+ File: pgalloc.MemoryFileProviderFromContext(ctx).MemoryFile(),\nOffset: sqemf.fr.Start + source.Start,\nPerms: at,\n},\n@@ -569,8 +592,9 @@ func (sqemf *sqEntriesFile) InvalidateUnsavable(ctx context.Context) error {\n}\n// ringBuffersFile implements memmap.Mappable for SQ and CQ ring buffers.\n+//\n+// +stateify savable\ntype ringsBufferFile struct {\n- mf *pgalloc.MemoryFile\nfr memmap.FileRange\n}\n@@ -598,7 +622,7 @@ func (rbmf *ringsBufferFile) Translate(ctx context.Context, required, optional m\nreturn []memmap.Translation{\n{\nSource: source,\n- File: rbmf.mf,\n+ File: pgalloc.MemoryFileProviderFromContext(ctx).MemoryFile(),\nOffset: rbmf.fr.Start + source.Start,\nPerms: at,\n},\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/iouringfs/iouringfs_state.go",
"new_path": "pkg/sentry/fsimpl/iouringfs/iouringfs_state.go",
"diff": "@@ -23,8 +23,7 @@ func (fd *FileDescription) beforeSave() {\n// afterLoad is invoked by stateify.\nfunc (fd *FileDescription) afterLoad() {\n+ // Remap shared buffers.\n+ fd.remap = true\nfd.runC = make(chan struct{}, 1)\n- // Wake up any potential sleepers from before the Save. The Pause for Save\n- // ensured there were no active tasks at save time.\n- fd.runC <- struct{}{}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/kernel.go",
"new_path": "pkg/sentry/kernel/kernel.go",
"diff": "@@ -75,6 +75,10 @@ import (\n\"gvisor.dev/gvisor/pkg/tcpip\"\n)\n+// IOUringEnabled is set to true when IO_URING is enabled. Added as a global to\n+// allow easy access everywhere.\n+var IOUringEnabled = false\n+\n// userCounters is a set of user counters.\n//\n// +stateify savable\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_iouring.go",
"new_path": "pkg/sentry/syscalls/linux/sys_iouring.go",
"diff": "@@ -25,6 +25,10 @@ import (\n// IOUringSetup implements linux syscall io_uring_setup(2).\nfunc IOUringSetup(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n+ if !kernel.IOUringEnabled {\n+ return 0, nil, linuxerr.ENOSYS\n+ }\n+\nentries := uint32(args[0].Uint())\nparamsAddr := args[1].Pointer()\nvar params linux.IOUringParams\n@@ -77,6 +81,10 @@ func IOUringSetup(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.\n// IOUringEnter implements linux syscall io_uring_enter(2).\nfunc IOUringEnter(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n+ if !kernel.IOUringEnabled {\n+ return 0, nil, linuxerr.ENOSYS\n+ }\n+\nfd := int32(args[0].Int())\ntoSubmit := uint32(args[1].Uint())\nminComplete := uint32(args[2].Uint())\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -245,6 +245,8 @@ func New(args Args) (*Loader, error) {\nreturn nil, fmt.Errorf(\"setting up memory usage: %w\", err)\n}\n+ kernel.IOUringEnabled = args.Conf.IOUring\n+\n// Make host FDs stable between invocations. Host FDs must map to the exact\n// same number when the sandbox is restored. Otherwise the wrong FD will be\n// used.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cli/main.go",
"new_path": "runsc/cli/main.go",
"diff": "@@ -229,6 +229,7 @@ func Main(version string) {\nlog.Infof(\"\\t\\tOverlay: Root=%t, SubMounts=%t, FilestoreDir=%q\", overlay2.RootMount, overlay2.SubMounts, overlay2.FilestoreDir)\nlog.Infof(\"\\t\\tNetwork: %v, logging: %t\", conf.Network, conf.LogPackets)\nlog.Infof(\"\\t\\tStrace: %t, max size: %d, syscalls: %s\", conf.Strace, conf.StraceLogSize, conf.StraceSyscalls)\n+ log.Infof(\"\\t\\tIOURING: %t\", conf.IOUring)\nlog.Infof(\"\\t\\tDebug: %v\", conf.Debug)\nlog.Infof(\"\\t\\tSystemd: %v\", conf.SystemdCgroup)\nlog.Infof(\"***************************\")\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/config/config.go",
"new_path": "runsc/config/config.go",
"diff": "@@ -262,6 +262,10 @@ type Config struct {\n// used.\nDCache int `flag:\"dcache\"`\n+ // IOUring enables support for the IO_URING API calls to perform\n+ // asynchronous I/O operations.\n+ IOUring bool `flag:\"iouring\"`\n+\n// TestOnlyAllowRunAsCurrentUserWithoutChroot should only be used in\n// tests. It allows runsc to start the sandbox process as the current\n// user, and without chrooting the sandbox process. This can be\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/config/flags.go",
"new_path": "runsc/config/flags.go",
"diff": "@@ -94,6 +94,7 @@ func RegisterFlags(flagSet *flag.FlagSet) {\nflagSet.Bool(\"ignore-cgroups\", false, \"don't configure cgroups.\")\nflagSet.Int(\"fdlimit\", -1, \"Specifies a limit on the number of host file descriptors that can be open. Applies separately to the sentry and gofer. Note: each file in the sandbox holds more than one host FD open.\")\nflagSet.Int(\"dcache\", -1, \"Set the global dentry cache size. This acts as a coarse-grained control on the number of host FDs simultaneously open by the sentry. If negative, per-mount caches are used.\")\n+ flagSet.Bool(\"iouring\", false, \"TEST ONLY; Enables io_uring syscalls in the sentry. Support is experimental and very limited.\")\n// Flags that control sandbox runtime behavior: network related.\nflagSet.Var(networkTypePtr(NetworkSandbox), \"network\", \"specifies which network to use: sandbox (default), host, none. Using network inside the sandbox is more secure because it's isolated from the host network.\")\n"
},
{
"change_type": "MODIFY",
"old_path": "test/runner/defs.bzl",
"new_path": "test/runner/defs.bzl",
"diff": "@@ -69,6 +69,7 @@ def _syscall_test(\nfile_access = \"exclusive\",\noverlay = False,\nadd_host_communication = False,\n+ iouring = False,\ncontainer = None,\none_sandbox = True,\n**kwargs):\n@@ -137,6 +138,7 @@ def _syscall_test(\n\"--debug=\" + str(debug),\n\"--container=\" + str(container),\n\"--one-sandbox=\" + str(one_sandbox),\n+ \"--iouring=\" + str(iouring),\n]\n# Trace points are platform agnostic, so enable them for ptrace only.\n@@ -165,6 +167,7 @@ def syscall_test(\nadd_host_communication = False,\nadd_hostinet = False,\none_sandbox = True,\n+ iouring = False,\nallow_native = True,\ndebug = True,\ncontainer = None,\n@@ -179,6 +182,7 @@ def syscall_test(\nadd_host_communication: setup UDS and pipe external communication for tests.\nadd_hostinet: add a hostinet test.\none_sandbox: runs each unit test in a new sandbox instance.\n+ iouring: enable IO_URING support.\nallow_native: generate a native test variant.\ndebug: enable debug output.\ncontainer: Run the test in a container. If None, determined from other information.\n@@ -195,6 +199,7 @@ def syscall_test(\nuse_tmpfs = False,\nadd_host_communication = add_host_communication,\ntags = tags,\n+ iouring = iouring,\ndebug = debug,\ncontainer = container,\none_sandbox = one_sandbox,\n@@ -208,6 +213,7 @@ def syscall_test(\nuse_tmpfs = use_tmpfs,\nadd_host_communication = add_host_communication,\ntags = platform_tags + tags,\n+ iouring = iouring,\ndebug = debug,\ncontainer = container,\none_sandbox = one_sandbox,\n@@ -222,6 +228,7 @@ def syscall_test(\nadd_host_communication = add_host_communication,\ntags = platforms.get(default_platform, []) + tags,\ndebug = debug,\n+ iouring = iouring,\ncontainer = container,\none_sandbox = one_sandbox,\noverlay = True,\n@@ -236,6 +243,7 @@ def syscall_test(\nadd_host_communication = add_host_communication,\ntags = platforms.get(default_platform, []) + tags,\ndebug = debug,\n+ iouring = iouring,\ncontainer = container,\none_sandbox = one_sandbox,\n**kwargs\n@@ -248,6 +256,7 @@ def syscall_test(\nuse_tmpfs = use_tmpfs,\nadd_host_communication = add_host_communication,\ntags = platforms.get(default_platform, []) + tags,\n+ iouring = iouring,\ndebug = debug,\ncontainer = container,\none_sandbox = one_sandbox,\n"
},
{
"change_type": "MODIFY",
"old_path": "test/runner/main.go",
"new_path": "test/runner/main.go",
"diff": "@@ -56,6 +56,7 @@ var (\ntrace = flag.Bool(\"trace\", false, \"enables all trace points\")\naddUDSTree = flag.Bool(\"add-host-communication\", false, \"expose a tree of UDS and pipe utilities to test communication with the host\")\n+ ioUring = flag.Bool(\"iouring\", false, \"Enables IO_URING API for asynchronous I/O\")\n// TODO(gvisor.dev/issue/4572): properly support leak checking for runsc, and\n// set to true as the default for the test runner.\nleakCheck = flag.Bool(\"leak-check\", false, \"check for reference leaks\")\n@@ -198,6 +199,7 @@ func runRunsc(tc *gtest.TestCase, spec *specs.Spec) error {\n\"-TESTONLY-allow-packet-endpoint-write=true\",\n\"-net-raw=true\",\nfmt.Sprintf(\"-panic-signal=%d\", unix.SIGTERM),\n+ fmt.Sprintf(\"-iouring=%t\", *ioUring),\n\"-watchdog-action=panic\",\n\"-platform\", *platform,\n\"-file-access\", *fileAccess,\n@@ -428,11 +430,17 @@ func runTestCaseRunsc(testBin string, tc *gtest.TestCase, args []string, t *test\nconst (\nplatformVar = \"TEST_ON_GVISOR\"\nnetworkVar = \"GVISOR_NETWORK\"\n+ ioUringVar = \"IOURING_ENABLED\"\n)\nenv := append(os.Environ(), platformVar+\"=\"+*platform, networkVar+\"=\"+*network)\nif *platformSupport != \"\" {\nenv = append(env, fmt.Sprintf(\"%s=%s\", platformSupportEnvVar, *platformSupport))\n}\n+ if *ioUring {\n+ env = append(env, ioUringVar+\"=TRUE\")\n+ } else {\n+ env = append(env, ioUringVar+\"=FALSE\")\n+ }\n// Remove shard env variables so that the gunit binary does not try to\n// interpret them.\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/BUILD",
"new_path": "test/syscalls/BUILD",
"diff": "@@ -260,6 +260,7 @@ syscall_test(\n)\nsyscall_test(\n+ iouring = True,\n# Temporarily added due to intermittent ENOMEM failures. See b/216213621.\ntags = [\"notap\"],\ntest = \"//test/syscalls/linux:iouring_test\",\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/io_uring.cc",
"new_path": "test/syscalls/linux/io_uring.cc",
"diff": "#include <sys/types.h>\n#include <unistd.h>\n+#include <cerrno>\n#include <cstddef>\n#include <cstdint>\n@@ -41,6 +42,24 @@ namespace testing {\nnamespace {\n+bool IOUringAvailable() {\n+ if (IsRunningOnGvisor()) {\n+ return true;\n+ }\n+\n+ // io_uring is relatively new and may not be available on all kernels. Probe\n+ // using an intentionally invalid call to io_uring_enter.\n+ errno = 0;\n+ int rc = syscall(__NR_io_uring_enter, -1, 1, 1, 0, nullptr);\n+ if (rc != -1) {\n+ // How did this succeed?\n+ std::cerr << \"Probe io_uring_enter(2) with invalid FD somehow succeeded...\"\n+ << std::endl;\n+ return false;\n+ }\n+ return errno != ENOSYS;\n+}\n+\n// IOVecContainsString checks that a tuple argument of (struct iovec *, int)\n// corresponding to an iovec array and its length, contains data that matches\n// the string length strlen and the string value str.\n@@ -74,12 +93,16 @@ MATCHER_P(IOVecContainsString, str, \"\") {\n// Testing that io_uring_setup(2) successfully returns a valid file descriptor.\nTEST(IOUringTest, ValidFD) {\n+ SKIP_IF(!IOUringAvailable());\n+\nIOUringParams params;\nFileDescriptor iouringfd = ASSERT_NO_ERRNO_AND_VALUE(NewIOUringFD(1, params));\n}\n// Testing that io_uring_setup(2) fails with EINVAL on non-zero params.\nTEST(IOUringTest, ParamsNonZeroResv) {\n+ SKIP_IF(!IOUringAvailable());\n+\nIOUringParams params;\nmemset(¶ms, 0, sizeof(params));\nparams.resv[1] = 1;\n@@ -87,6 +110,8 @@ TEST(IOUringTest, ParamsNonZeroResv) {\n}\nTEST(IOUringTest, ZeroCQEntries) {\n+ SKIP_IF(!IOUringAvailable());\n+\nIOUringParams params;\nparams.cq_entries = 0;\nparams.flags = IORING_SETUP_CQSIZE;\n@@ -94,6 +119,8 @@ TEST(IOUringTest, ZeroCQEntries) {\n}\nTEST(IOUringTest, ZeroCQEntriesLessThanSQEntries) {\n+ SKIP_IF(!IOUringAvailable());\n+\nIOUringParams params;\nparams.cq_entries = 16;\nparams.flags = IORING_SETUP_CQSIZE;\n@@ -102,17 +129,20 @@ TEST(IOUringTest, ZeroCQEntriesLessThanSQEntries) {\n// Testing that io_uring_setup(2) fails with EINVAL on unsupported flags.\nTEST(IOUringTest, UnsupportedFlags) {\n- if (IsRunningOnGvisor()) {\n+ // Gvisor only test, since linux supports all flags.\n+ SKIP_IF(!IsRunningOnGvisor());\n+\nIOUringParams params;\nmemset(¶ms, 0, sizeof(params));\nparams.flags |= IORING_SETUP_SQPOLL;\nASSERT_THAT(IOUringSetup(1, ¶ms), SyscallFailsWithErrno(EINVAL));\n}\n-}\n// Testing that both mmap and munmap calls succeed and subsequent access to\n// unmapped memory results in SIGSEGV.\nTEST(IOUringTest, MMapMUnMapWork) {\n+ SKIP_IF(!IOUringAvailable());\n+\nIOUringParams params;\nFileDescriptor iouringfd = ASSERT_NO_ERRNO_AND_VALUE(NewIOUringFD(1, params));\n@@ -140,6 +170,8 @@ TEST(IOUringTest, MMapMUnMapWork) {\n// Testing that both mmap fails with EINVAL when an invalid offset is passed.\nTEST(IOUringTest, MMapWrongOffset) {\n+ SKIP_IF(!IOUringAvailable());\n+\nIOUringParams params;\nFileDescriptor iouringfd = ASSERT_NO_ERRNO_AND_VALUE(NewIOUringFD(1, params));\n@@ -154,6 +186,8 @@ TEST(IOUringTest, MMapWrongOffset) {\n// Testing that mmap() handles all three IO_URING-specific offsets and that\n// returned addresses are page aligned.\nTEST(IOUringTest, MMapOffsets) {\n+ SKIP_IF(!IOUringAvailable());\n+\nIOUringParams params;\nFileDescriptor iouringfd = ASSERT_NO_ERRNO_AND_VALUE(NewIOUringFD(1, params));\n@@ -188,10 +222,10 @@ TEST(IOUringTest, MMapOffsets) {\n// Testing that IOUringParams are populated with correct values.\nTEST(IOUringTest, ReturnedParamsValues) {\n- if (IsRunningOnGvisor()) {\n+ SKIP_IF(!IsRunningOnGvisor());\n+\nIOUringParams params;\n- FileDescriptor iouringfd =\n- ASSERT_NO_ERRNO_AND_VALUE(NewIOUringFD(1, params));\n+ FileDescriptor iouringfd = ASSERT_NO_ERRNO_AND_VALUE(NewIOUringFD(1, params));\nEXPECT_EQ(params.sq_entries, 1);\nEXPECT_EQ(params.cq_entries, 2);\n@@ -215,10 +249,11 @@ TEST(IOUringTest, ReturnedParamsValues) {\n// gVisor should support IORING_FEAT_SINGLE_MMAP.\nEXPECT_NE((params.features & IORING_FEAT_SINGLE_MMAP), 0);\n}\n-}\n// Testing that offset of SQE indices array is cacheline aligned.\nTEST(IOUringTest, SqeIndexArrayCacheAligned) {\n+ SKIP_IF(!IOUringAvailable());\n+\nIOUringParams params;\nfor (uint32_t i = 1; i < 10; ++i) {\nFileDescriptor iouringfd =\n@@ -229,10 +264,15 @@ TEST(IOUringTest, SqeIndexArrayCacheAligned) {\n// Testing that io_uring_enter(2) successfully handles a single NOP operation.\nTEST(IOUringTest, SingleNOPTest) {\n+ SKIP_IF(!IOUringAvailable());\n+\nIOUringParams params;\nstd::unique_ptr<IOUring> io_uring =\nASSERT_NO_ERRNO_AND_VALUE(IOUring::InitIOUring(1, params));\n+ ASSERT_EQ(params.sq_entries, 1);\n+ ASSERT_EQ(params.cq_entries, 2);\n+\nuint32_t sq_head = io_uring->load_sq_head();\nASSERT_EQ(sq_head, 0);\n@@ -263,6 +303,8 @@ TEST(IOUringTest, SingleNOPTest) {\n// Testing that io_uring_enter(2) successfully queueing NOP operations.\nTEST(IOUringTest, QueueingNOPTest) {\n+ SKIP_IF(!IOUringAvailable());\n+\nIOUringParams params;\nstd::unique_ptr<IOUring> io_uring =\nASSERT_NO_ERRNO_AND_VALUE(IOUring::InitIOUring(4, params));\n@@ -323,6 +365,8 @@ TEST(IOUringTest, QueueingNOPTest) {\n// Testing that io_uring_enter(2) successfully multiple NOP operations.\nTEST(IOUringTest, MultipleNOPTest) {\n+ SKIP_IF(!IOUringAvailable());\n+\nIOUringParams params;\nstd::unique_ptr<IOUring> io_uring =\nASSERT_NO_ERRNO_AND_VALUE(IOUring::InitIOUring(4, params));\n@@ -367,6 +411,8 @@ TEST(IOUringTest, MultipleNOPTest) {\n// Testing that io_uring_enter(2) successfully handles multiple threads\n// submitting NOP operations.\nTEST(IOUringTest, MultiThreadedNOPTest) {\n+ SKIP_IF(!IOUringAvailable());\n+\nIOUringParams params;\nstd::unique_ptr<IOUring> io_uring =\nASSERT_NO_ERRNO_AND_VALUE(IOUring::InitIOUring(4, params));\n@@ -415,6 +461,8 @@ TEST(IOUringTest, MultiThreadedNOPTest) {\n// Testing that io_uring_enter(2) successfully consumes submission with an\n// invalid opcode and returned CQE contains EINVAL in its result field.\nTEST(IOUringTest, InvalidOpCodeTest) {\n+ SKIP_IF(!IOUringAvailable());\n+\nIOUringParams params;\nstd::unique_ptr<IOUring> io_uring =\nASSERT_NO_ERRNO_AND_VALUE(IOUring::InitIOUring(1, params));\n@@ -450,6 +498,8 @@ TEST(IOUringTest, InvalidOpCodeTest) {\n// Tests that filling the shared memory region with garbage data doesn't cause a\n// kernel panic.\nTEST(IOUringTest, CorruptRingHeader) {\n+ SKIP_IF(!IOUringAvailable());\n+\nconst int kEntries = 64;\nIOUringParams params;\n@@ -493,6 +543,8 @@ TEST(IOUringTest, CorruptRingHeader) {\n// Testing that io_uring_enter(2) successfully consumes submission and SQE ring\n// buffers wrap around.\nTEST(IOUringTest, SQERingBuffersWrapAroundTest) {\n+ SKIP_IF(!IOUringAvailable());\n+\nIOUringParams params;\nstd::unique_ptr<IOUring> io_uring =\nASSERT_NO_ERRNO_AND_VALUE(IOUring::InitIOUring(4, params));\n@@ -560,7 +612,8 @@ TEST(IOUringTest, SQERingBuffersWrapAroundTest) {\n// Testing that io_uring_enter(2) fails with EFAULT when non-null sigset_t has\n// been passed as we currently don't support replacing signal mask.\nTEST(IOUringTest, NonNullSigsetTest) {\n- if (IsRunningOnGvisor()) {\n+ SKIP_IF(!IsRunningOnGvisor());\n+\nIOUringParams params;\nstd::unique_ptr<IOUring> io_uring =\nASSERT_NO_ERRNO_AND_VALUE(IOUring::InitIOUring(1, params));\n@@ -579,13 +632,14 @@ TEST(IOUringTest, NonNullSigsetTest) {\nEXPECT_THAT(io_uring->Enter(1, 1, IORING_ENTER_GETEVENTS, &non_null_sigset),\nSyscallFailsWithErrno(EFAULT));\n}\n-}\n// Testing that completion queue overflow counter is incremented when the\n// completion queue is not drained by the user and completion queue entries are\n// not overwritten.\nTEST(IOUringTest, OverflowCQTest) {\n- if (IsRunningOnGvisor()) {\n+ // Gvisor's completion queue overflow behaviour is different from Linux.\n+ SKIP_IF(!IsRunningOnGvisor());\n+\nIOUringParams params;\nstd::unique_ptr<IOUring> io_uring =\nASSERT_NO_ERRNO_AND_VALUE(IOUring::InitIOUring(4, params));\n@@ -598,8 +652,7 @@ TEST(IOUringTest, OverflowCQTest) {\nIOUringSqe *sqe = io_uring->get_sqes();\nIOUringCqe *cqe = io_uring->get_cqes();\n- for (size_t submission_round = 0; submission_round < 2;\n- ++submission_round) {\n+ for (size_t submission_round = 0; submission_round < 2; ++submission_round) {\nfor (size_t i = 0; i < 4; ++i) {\nsqe[i].opcode = IORING_OP_NOP;\nsqe[i].user_data = 42 + i + submission_round;\n@@ -664,10 +717,11 @@ TEST(IOUringTest, OverflowCQTest) {\nuint32_t cq_overflow_counter = io_uring->load_cq_overflow();\nASSERT_EQ(cq_overflow_counter, 2);\n}\n-}\n// Testing that io_uring_enter(2) successfully handles single READV operation.\nTEST(IOUringTest, SingleREADVTest) {\n+ SKIP_IF(!IOUringAvailable());\n+\nstruct io_uring_params params;\nstd::unique_ptr<IOUring> io_uring =\nASSERT_NO_ERRNO_AND_VALUE(IOUring::InitIOUring(1, params));\n@@ -737,6 +791,8 @@ TEST(IOUringTest, SingleREADVTest) {\n// Tests that IORING_OP_READV handles EOF on an empty file correctly.\nTEST(IOUringTest, ReadvEmptyFile) {\n+ SKIP_IF(!IOUringAvailable());\n+\nstruct io_uring_params params;\nstd::unique_ptr<IOUring> io_uring =\nASSERT_NO_ERRNO_AND_VALUE(IOUring::InitIOUring(1, params));\n@@ -790,6 +846,8 @@ TEST(IOUringTest, ReadvEmptyFile) {\n// Testing that io_uring_enter(2) successfully handles three READV operations\n// from three different files submitted through a single invocation.\nTEST(IOUringTest, ThreeREADVSingleEnterTest) {\n+ SKIP_IF(!IOUringAvailable());\n+\nstruct io_uring_params params;\nstd::unique_ptr<IOUring> io_uring =\nASSERT_NO_ERRNO_AND_VALUE(IOUring::InitIOUring(4, params));\n@@ -881,6 +939,8 @@ TEST(IOUringTest, ThreeREADVSingleEnterTest) {\n// Testing that io_uring_enter(2) successfully handles READV operation, which is\n// racing with deletion of the same file.\nTEST(IOUringTest, READVRaceWithDeleteTest) {\n+ SKIP_IF(!IOUringAvailable());\n+\nstruct io_uring_params params;\nstd::unique_ptr<IOUring> io_uring =\nASSERT_NO_ERRNO_AND_VALUE(IOUring::InitIOUring(2, params));\n@@ -986,6 +1046,8 @@ TEST(IOUringTest, READVRaceWithDeleteTest) {\n// Testing that io_uring_enter(2) successfully handles single READV operation\n// with short read situation.\nTEST(IOUringTest, ShortReadREADVTest) {\n+ SKIP_IF(!IOUringAvailable());\n+\nstruct io_uring_params params;\nstd::unique_ptr<IOUring> io_uring =\nASSERT_NO_ERRNO_AND_VALUE(IOUring::InitIOUring(1, params));\n@@ -1058,6 +1120,8 @@ TEST(IOUringTest, ShortReadREADVTest) {\n// Testing that io_uring_enter(2) successfully handles single READV operation\n// when there file does not have read permissions.\nTEST(IOUringTest, NoReadPermissionsREADVTest) {\n+ SKIP_IF(!IOUringAvailable());\n+\nstruct io_uring_params params;\nstd::unique_ptr<IOUring> io_uring =\nASSERT_NO_ERRNO_AND_VALUE(IOUring::InitIOUring(1, params));\n@@ -1132,6 +1196,8 @@ class IOUringSqeFieldsTest : public ::testing::Test,\n// Testing that io_uring_enter(2) successfully handles single READV operation\n// and returns EINVAL error in the CQE when either ioprio or buf_index is set.\nTEST_P(IOUringSqeFieldsTest, READVWithInvalidSqeFieldValue) {\n+ SKIP_IF(!IOUringAvailable());\n+\nconst SqeFieldsUT p = GetParam();\nstruct io_uring_params params;\n"
},
{
"change_type": "MODIFY",
"old_path": "test/util/test_util.cc",
"new_path": "test/util/test_util.cc",
"diff": "@@ -41,6 +41,7 @@ namespace gvisor {\nnamespace testing {\nconstexpr char kGvisorNetwork[] = \"GVISOR_NETWORK\";\n+constexpr char kIOUringEnabled[] = \"IOURING_ENABLED\";\nbool IsRunningOnGvisor() { return GvisorPlatform() != Platform::kNative; }\n@@ -58,6 +59,11 @@ bool IsRunningWithHostinet() {\nreturn env && strcmp(env, \"host\") == 0;\n}\n+bool IsIOUringEnabled() {\n+ const char* env = getenv(kIOUringEnabled);\n+ return env && strcmp(env, \"TRUE\") == 0;\n+}\n+\n// Inline cpuid instruction. Preserve %ebx/%rbx register. In PIC compilations\n// %ebx contains the address of the global offset table. %rbx is occasionally\n// used to address stack variables in presence of dynamic allocas.\n"
},
{
"change_type": "MODIFY",
"old_path": "test/util/test_util.h",
"new_path": "test/util/test_util.h",
"diff": "@@ -224,6 +224,7 @@ constexpr char kFuchsia[] = \"fuchsia\";\nbool IsRunningOnGvisor();\nconst std::string GvisorPlatform();\nbool IsRunningWithHostinet();\n+bool IsIOUringEnabled();\n#ifdef __linux__\nvoid SetupGvisorDeathTest();\n"
}
] | Go | Apache License 2.0 | google/gvisor | Disable io_uring syscalls by default.
The current io_uring support is very limited and experimental. Disable
it by default, and add a flag to enable it for testing.
PiperOrigin-RevId: 500760451 |
259,975 | 10.01.2023 11:10:13 | 28,800 | 2db971f5266d473cba5bbfa9bc17c9f1aacc6869 | Reduce number of Tensorflow test cases for dashboard.
A full run of all tensorflow test cases can take upwards of an hour
with very little benefit gained for most workloads. Reduce the number
of cases for dashboard runs. | [
{
"change_type": "MODIFY",
"old_path": ".buildkite/pipeline.yaml",
"new_path": ".buildkite/pipeline.yaml",
"diff": "@@ -509,7 +509,7 @@ steps:\ncommand: make -i benchmark-platforms BENCHMARKS_SUITE=sysbench BENCHMARKS_TARGETS=test/benchmarks/base:sysbench_test\n- <<: *benchmarks\nlabel: \":tensorflow: TensorFlow benchmarks\"\n- command: make -i benchmark-platforms BENCHMARKS_SUITE=tensorflow BENCHMARKS_TARGETS=test/benchmarks/ml:tensorflow_test BENCHMARKS_OPTIONS=-test.benchtime=1ns\n+ command: make -i benchmark-platforms BENCHMARKS_SUITE=tensorflow BENCHMARKS_TARGETS=test/benchmarks/ml:tensorflow_test BENCHMARKS_FILTER=BenchmarkTensorflowDashboard\n- <<: *benchmarks\nlabel: \":gear: Syscall benchmarks\"\ncommand: make -i benchmark-platforms BENCHMARKS_SUITE=syscall BENCHMARKS_TARGETS=test/benchmarks/base:syscallbench_test\n"
},
{
"change_type": "MODIFY",
"old_path": "test/benchmarks/ml/tensorflow_test.go",
"new_path": "test/benchmarks/ml/tensorflow_test.go",
"diff": "@@ -23,32 +23,42 @@ import (\n\"gvisor.dev/gvisor/test/benchmarks/tools\"\n)\n+func BenchmarkTensorflowDashboard(b *testing.B) {\n+ workloads := map[string]string{\n+ \"ConvolutionalNetwork\": \"3_NeuralNetworks/convolutional_network.py\",\n+ \"LogisticRegression\": \"2_BasicModels/logistic_regression.py\",\n+ \"NeuralNetwork\": \"3_NeuralNetworks/neural_network.py\",\n+ }\n+ doTensorflowTest(b, workloads)\n+}\n+\n// BenchmarkTensorflow runs workloads from a TensorFlow tutorial.\n// See: https://github.com/aymericdamien/TensorFlow-Examples\nfunc BenchmarkTensorflow(b *testing.B) {\n- workloads := []struct {\n- name, file string\n- }{\n- {\"GradientDecisionTree\", \"2_BasicModels/gradient_boosted_decision_tree.py\"},\n- {\"Kmeans\", \"2_BasicModels/kmeans.py\"},\n- {\"LogisticRegression\", \"2_BasicModels/logistic_regression.py\"},\n- {\"NearestNeighbor\", \"2_BasicModels/nearest_neighbor.py\"},\n- {\"RandomForest\", \"2_BasicModels/random_forest.py\"},\n- {\"ConvolutionalNetwork\", \"3_NeuralNetworks/convolutional_network.py\"},\n- {\"MultilayerPerceptron\", \"3_NeuralNetworks/multilayer_perceptron.py\"},\n- {\"NeuralNetwork\", \"3_NeuralNetworks/neural_network.py\"},\n+ workloads := map[string]string{\n+ \"GradientDecisionTree\": \"2_BasicModels/gradient_boosted_decision_tree.py\",\n+ \"Kmeans\": \"2_BasicModels/kmeans.py\",\n+ \"LogisticRegression\": \"2_BasicModels/logistic_regression.py\",\n+ \"NearestNeighbor\": \"2_BasicModels/nearest_neighbor.py\",\n+ \"RandomForest\": \"2_BasicModels/random_forest.py\",\n+ \"ConvolutionalNetwork\": \"3_NeuralNetworks/convolutional_network.py\",\n+ \"MultilayerPerceptron\": \"3_NeuralNetworks/multilayer_perceptron.py\",\n+ \"NeuralNetwork\": \"3_NeuralNetworks/neural_network.py\",\n+ }\n+ doTensorflowTest(b, workloads)\n}\n+func doTensorflowTest(b *testing.B, workloads map[string]string) {\nmachine, err := harness.GetMachine()\nif err != nil {\nb.Fatalf(\"failed to get machine: %v\", err)\n}\ndefer machine.CleanUp()\n- for _, workload := range workloads {\n+ for name, file := range workloads {\nrunName, err := tools.ParametersToName(tools.Parameter{\nName: \"operation\",\n- Value: workload.name,\n+ Value: name,\n})\nif err != nil {\nb.Fatalf(\"Failed to parse param: %v\", err)\n@@ -73,7 +83,7 @@ func BenchmarkTensorflow(b *testing.B) {\nImage: \"benchmarks/tensorflow\",\nEnv: []string{\"PYTHONPATH=$PYTHONPATH:/TensorFlow-Examples/examples\"},\nWorkDir: \"/TensorFlow-Examples/examples\",\n- }, \"python\", workload.file); err != nil {\n+ }, \"python\", file); err != nil {\nb.Errorf(\"failed to run container: %v logs: %s\", err, out)\n}\nb.StopTimer()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Reduce number of Tensorflow test cases for dashboard.
A full run of all tensorflow test cases can take upwards of an hour
with very little benefit gained for most workloads. Reduce the number
of cases for dashboard runs.
PiperOrigin-RevId: 501043752 |
259,977 | 10.01.2023 12:02:36 | 28,800 | c4976f7d8164e8cb08a2bce445bbf7ed2e82dc91 | [packetimpact] Fix typo in comment | [
{
"change_type": "MODIFY",
"old_path": "test/packetimpact/runner/main.go",
"new_path": "test/packetimpact/runner/main.go",
"diff": "@@ -503,7 +503,7 @@ func withSavedNetNS(f func() error) error {\nreturn err\n}\ndefer func() {\n- // Resotre the namespace when we return from f.\n+ // Restore the namespace when we return from f.\nif err := unix.Setns(saved, unix.CLONE_NEWNET); err != nil {\npanic(fmt.Sprintf(\"setns(%d, CLONE_NEWNET) = %s\", saved, err))\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | [packetimpact] Fix typo in comment
PiperOrigin-RevId: 501057728 |
259,907 | 10.01.2023 17:34:27 | 28,800 | 5bb418ecdb695e47cb1a86dba4422ca7670ccbb1 | Unmount procfs from sandbox process, irrespective of apply-caps flag.
Earlier, we were only unmounting procfs mount if apply-caps flag was not set.
Directfs and hostinet set this flag to true. | [
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/boot.go",
"new_path": "runsc/cmd/boot.go",
"diff": "@@ -137,7 +137,7 @@ func (b *Boot) SetFlags(f *flag.FlagSet) {\nf.BoolVar(&b.setUpRoot, \"setup-root\", false, \"if true, set up an empty root for the process\")\nf.BoolVar(&b.pidns, \"pidns\", false, \"if true, the sandbox is in its own PID namespace\")\nf.IntVar(&b.cpuNum, \"cpu-num\", 0, \"number of CPUs to create inside the sandbox\")\n- f.IntVar(&b.procMountSyncFD, \"proc-mount-sync-fd\", -1, \"file descriptor that has to be closed when /proc isn't needed\")\n+ f.IntVar(&b.procMountSyncFD, \"proc-mount-sync-fd\", -1, \"file descriptor that has to be written to when /proc isn't needed anymore and can be unmounted\")\nf.Uint64Var(&b.totalMem, \"total-memory\", 0, \"sets the initial amount of total memory to report back to the container\")\nf.BoolVar(&b.attached, \"attached\", false, \"if attached is true, kills the sandbox process when the parent process terminates\")\nf.StringVar(&b.productName, \"product-name\", \"\", \"value to show in /sys/devices/virtual/dmi/id/product_name\")\n@@ -196,7 +196,7 @@ func (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...any) subcomma\nutil.Fatalf(\"error setting up chroot: %v\", err)\n}\n- if !b.applyCaps && !conf.Rootless {\n+ if !conf.Rootless {\n// /proc is umounted from a forked process, because the\n// current one is going to re-execute itself without\n// capabilities.\n@@ -208,13 +208,16 @@ func (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...any) subcomma\n}\nb.procMountSyncFD = int(w.Fd())\n- // Remove --apply-caps arg to call myself. It has already been done.\n- args := b.prepareArgs(\"setup-root\")\n-\n- // Clear FD_CLOEXEC.\n+ // Clear FD_CLOEXEC. Regardless of b.applyCaps, this process will be\n+ // re-executed. procMountSyncFD should remain open.\nif _, _, errno := unix.RawSyscall(unix.SYS_FCNTL, w.Fd(), unix.F_SETFD, 0); errno != 0 {\nutil.Fatalf(\"error clearing CLOEXEC: %v\", errno)\n}\n+\n+ if !b.applyCaps {\n+ // Remove --setup-root arg to call myself. It has already been done.\n+ args := b.prepareArgs(\"setup-root\")\n+\n// Note that we've already read the spec from the spec FD, and\n// we will read it again after the exec call. This works\n// because the ReadSpecFromFile function seeks to the beginning\n@@ -223,6 +226,7 @@ func (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...any) subcomma\npanic(\"unreachable\")\n}\n}\n+ }\n// Get the spec from the specFD. We *must* keep this os.File alive past\n// the call setCapsAndCallSelf, otherwise the FD will be closed and the\n"
}
] | Go | Apache License 2.0 | google/gvisor | Unmount procfs from sandbox process, irrespective of apply-caps flag.
Earlier, we were only unmounting procfs mount if apply-caps flag was not set.
Directfs and hostinet set this flag to true.
PiperOrigin-RevId: 501136480 |
259,853 | 11.01.2023 13:21:11 | 28,800 | ff8aa35e1c2c9e3ad33dc991f605baa2b551e2db | Dynamically determine the address space size | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/BUILD",
"new_path": "pkg/abi/linux/BUILD",
"diff": "@@ -56,6 +56,8 @@ go_library(\n\"linux.go\",\n\"membarrier.go\",\n\"mm.go\",\n+ \"mm_amd64.go\",\n+ \"mm_arm64.go\",\n\"mqueue.go\",\n\"msgqueue.go\",\n\"netdevice.go\",\n@@ -99,6 +101,7 @@ go_library(\n\"//pkg/hostarch\",\n\"//pkg/marshal\",\n\"//pkg/marshal/primitive\",\n+ \"@org_golang_x_sys//unix:go_default_library\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/mm.go",
"new_path": "pkg/abi/linux/mm.go",
"diff": "package linux\n+import (\n+ \"fmt\"\n+\n+ \"golang.org/x/sys/unix\"\n+)\n+\n// Protections for mmap(2).\nconst (\nPROT_NONE = 0\n@@ -128,3 +134,26 @@ const (\nMPOL_MF_VALID = MPOL_MF_STRICT | MPOL_MF_MOVE | MPOL_MF_MOVE_ALL\n)\n+\n+// TaskSize is the address space size.\n+var TaskSize = func() uintptr {\n+ pageSize := uintptr(unix.Getpagesize())\n+ for _, s := range feasibleTaskSizes {\n+ // mmap returns ENOMEM if addr is greater than TASK_SIZE,\n+ // otherwise it returns EINVAL, because addr isn't aligned to\n+ // the page size.\n+ _, _, errno := unix.RawSyscall6(\n+ unix.SYS_MMAP,\n+ s-pageSize-1,\n+ 512,\n+ uintptr(unix.PROT_NONE),\n+ uintptr(unix.MAP_ANONYMOUS|unix.MAP_PRIVATE|unix.MAP_FIXED), 0, 0)\n+ if errno == unix.EINVAL {\n+ return s\n+ }\n+ if errno != unix.ENOMEM {\n+ panic(fmt.Sprintf(\"mmap returned unexpected error: %d\", errno))\n+ }\n+ }\n+ panic(\"None of the address space sizes could be successfully mmaped\")\n+}()\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/abi/linux/mm_amd64.go",
"diff": "+// Copyright 2023 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+//go:build amd64\n+// +build amd64\n+\n+package linux\n+\n+// TASK_SIZE can be one of two values, corresponding to 4-level and 5-level\n+// paging.\n+//\n+// The array has to be sorted in decreasing order.\n+var feasibleTaskSizes = []uintptr{0xffffffffff000, 0x7ffffffff000}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/abi/linux/mm_arm64.go",
"diff": "+// Copyright 2023 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+//go:build arm64\n+// +build arm64\n+\n+package linux\n+\n+// Only 4K page size is supported on arm64. In this case, TASK_SIZE can\n+// be one of three values, corresponding to 3-level, 4-level and\n+// 5-level paging.\n+//\n+// The array has to be sorted in decreasing order.\n+var feasibleTaskSizes = []uintptr{1 << 52, 1 << 48, 1 << 39}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ptrace/subprocess.go",
"new_path": "pkg/sentry/platform/ptrace/subprocess.go",
"diff": "@@ -30,6 +30,14 @@ import (\n\"gvisor.dev/gvisor/pkg/sync\"\n)\n+var (\n+ // maximumUserAddress is the largest possible user address.\n+ maximumUserAddress = linux.TaskSize\n+\n+ // stubInitAddress is the initial attempt link address for the stub.\n+ stubInitAddress = linux.TaskSize\n+)\n+\n// Linux kernel errnos which \"should never be seen by user programs\", but will\n// be revealed to ptrace syscall exit tracing.\n//\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ptrace/subprocess_amd64.go",
"new_path": "pkg/sentry/platform/ptrace/subprocess_amd64.go",
"diff": "@@ -28,12 +28,6 @@ import (\n)\nconst (\n- // maximumUserAddress is the largest possible user address.\n- maximumUserAddress = 0x7ffffffff000\n-\n- // stubInitAddress is the initial attempt link address for the stub.\n- stubInitAddress = 0x7fffffff0000\n-\n// initRegsRipAdjustment is the size of the syscall instruction.\ninitRegsRipAdjustment = 2\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ptrace/subprocess_arm64.go",
"new_path": "pkg/sentry/platform/ptrace/subprocess_arm64.go",
"diff": "@@ -28,13 +28,6 @@ import (\n)\nconst (\n- // maximumUserAddress is the largest possible user address.\n- maximumUserAddress = 0xfffffffff000\n-\n- // stubInitAddress is the initial attempt link address for the stub.\n- // Only support 48bits VA currently.\n- stubInitAddress = 0xffffffff0000\n-\n// initRegsRipAdjustment is the size of the svc instruction.\ninitRegsRipAdjustment = 4\n)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Dynamically determine the address space size
PiperOrigin-RevId: 501362170 |
259,907 | 11.01.2023 13:30:01 | 28,800 | 69859a21f82c61762906465340795c484f3927d1 | Add a stress test for multicontainers to check for memory leak.
The test runs 50 subcontainers that do a lot of filesystem work and checks that
after the containers exit, there is no increase in sandbox memory usage. | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/multi_container_test.go",
"new_path": "runsc/container/multi_container_test.go",
"diff": "@@ -21,6 +21,7 @@ import (\n\"os\"\n\"path\"\n\"path/filepath\"\n+ \"reflect\"\n\"strings\"\n\"testing\"\n\"time\"\n@@ -2291,3 +2292,117 @@ func TestMultiContainerOverlayLeaks(t *testing.T) {\nt.Errorf(\"overlay filestore usage changed: old = %d, new = %d\", oldOverlayUsage, newOverlayUsage)\n}\n}\n+\n+// Test that spawning many subcontainers that do a lot of filesystem operations\n+// does not lead to memory leaks.\n+func TestMultiContainerMemoryLeakStress(t *testing.T) {\n+ conf := testutil.TestConfig(t)\n+ app, err := testutil.FindFile(\"test/cmd/test_app/test_app\")\n+ if err != nil {\n+ t.Fatal(\"error finding test_app:\", err)\n+ }\n+\n+ rootDir, cleanup, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer cleanup()\n+ conf.RootDir = rootDir\n+\n+ // Configure root overlay (backed by memory) so that containers can create\n+ // files in the root directory.\n+ conf.Overlay2 = config.Overlay2{\n+ RootMount: true,\n+ }\n+\n+ // Root container will just sleep.\n+ sleep := []string{\"sleep\", \"1000\"}\n+\n+ // Subcontainers will do a lot of filesystem work. Create a lot of them.\n+ createFsTree := []string{app, \"fsTreeCreate\", \"--depth=10\", \"--file-per-level=10\", \"--file-size=1048576\"}\n+ const warmupContainers = 5\n+ const stressContainers = 45\n+ cmds := make([][]string, 0, warmupContainers+stressContainers+1)\n+ cmds = append(cmds, sleep)\n+ for i := 0; i < warmupContainers+stressContainers; i++ {\n+ cmds = append(cmds, createFsTree)\n+ }\n+ testSpecs, ids := createSpecs(cmds...)\n+ // Make sure none of the root filesystems are read-only, otherwise we won't\n+ // be able to create the file.\n+ for _, s := range testSpecs {\n+ s.Root.Readonly = false\n+ }\n+\n+ // Start the root container.\n+ rootCont, cleanup, err := startContainers(conf, testSpecs[:1], ids[:1])\n+ if err != nil {\n+ t.Fatalf(\"error starting containers: %v\", err)\n+ }\n+ defer cleanup()\n+\n+ // Warm up the sandbox.\n+ warmUpContainers, cleanUp2, err := startContainers(conf, testSpecs[1:1+warmupContainers], ids[1:1+warmupContainers])\n+ if err != nil {\n+ t.Fatalf(\"error starting containers: %v\", err)\n+ }\n+ defer cleanUp2()\n+ // Wait for all warm up subcontainers to stop.\n+ for i, c := range warmUpContainers {\n+ // Wait for the sub-container to stop.\n+ if ws, err := c.Wait(); err != nil {\n+ t.Errorf(\"failed to wait for warm up subcontainer number %d: %v\", i, err)\n+ } else if es := ws.ExitStatus(); es != 0 {\n+ t.Errorf(\"warm up subcontainer number %d exited with non-zero status %d\", i, es)\n+ }\n+ }\n+\n+ // Give the reclaimer goroutine some time to reclaim.\n+ time.Sleep(3 * time.Second)\n+\n+ // Measure the memory usage after the warm up.\n+ oldUsage, err := rootCont[0].Sandbox.Usage(true /* Full */)\n+ if err != nil {\n+ t.Fatalf(\"sandbox.Usage failed: %v\", err)\n+ }\n+\n+ // Hammer the sandbox with sub containers.\n+ subConts, cleanup3, err := startContainers(conf, testSpecs[1+warmupContainers:1+warmupContainers+stressContainers], ids[1+warmupContainers:1+warmupContainers+stressContainers])\n+ if err != nil {\n+ t.Fatalf(\"error starting containers: %v\", err)\n+ }\n+ defer cleanup3()\n+ // Wait for all subcontainers to stop.\n+ for i, c := range subConts {\n+ if ws, err := c.Wait(); err != nil {\n+ t.Errorf(\"failed to wait for subcontainer number %d: %v\", i, err)\n+ } else if es := ws.ExitStatus(); es != 0 {\n+ t.Errorf(\"subcontainer number %d exited with non-zero status %d\", i, es)\n+ }\n+ }\n+\n+ // Give the reclaimer goroutine some time to reclaim.\n+ time.Sleep(3 * time.Second)\n+\n+ // Compare memory usage.\n+ newUsage, err := rootCont[0].Sandbox.Usage(true /* Full */)\n+ if err != nil {\n+ t.Fatalf(\"sandbox.Usage failed: %v\", err)\n+ }\n+ // Note that all fields of control.MemoryUsage are exported and uint64.\n+ oldUsageV := reflect.ValueOf(oldUsage)\n+ newUsageV := reflect.ValueOf(newUsage)\n+ numFields := oldUsageV.NumField()\n+ for i := 0; i < numFields; i++ {\n+ name := oldUsageV.Type().Field(i).Name\n+ oldVal := oldUsageV.Field(i).Interface().(uint64)\n+ newVal := newUsageV.Field(i).Interface().(uint64)\n+ if newVal <= oldVal {\n+ continue\n+ }\n+\n+ if ((newVal-oldVal)*100)/oldVal > 5 {\n+ t.Errorf(\"%s usage increased by more than 5%%: old=%d, new=%d\", name, oldVal, newVal)\n+ }\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add a stress test for multicontainers to check for memory leak.
The test runs 50 subcontainers that do a lot of filesystem work and checks that
after the containers exit, there is no increase in sandbox memory usage.
PiperOrigin-RevId: 501364377 |
260,004 | 11.01.2023 17:22:35 | 28,800 | 4560252d1693c65b9e9106cfdb93208a40b90c6e | Coalesce records for reports on MGP disable
This change sends multiple records into a single IGMPv3/MLDv2 report
instead of a single record per report when disabling IGMPv3/MLDv2.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/internal/ip/generic_multicast_protocol.go",
"new_path": "pkg/tcpip/network/internal/ip/generic_multicast_protocol.go",
"diff": "@@ -343,20 +343,17 @@ func (g *GenericMulticastProtocolState) MakeAllNonMemberLocked() {\n}\ng.cancelV2ReportTimers()\n+ var v2ReportBuilder MulticastGroupProtocolV2ReportBuilder\nvar handler func(tcpip.Address, *multicastGroupState)\nswitch g.mode {\ncase protocolModeV2:\n+ v2ReportBuilder = g.opts.Protocol.NewReportV2Builder()\nhandler = func(groupAddress tcpip.Address, _ *multicastGroupState) {\n// Send a report immediately to announce us leaving the group.\n- reportBuilder := g.opts.Protocol.NewReportV2Builder()\n- reportBuilder.AddRecord(\n+ v2ReportBuilder.AddRecord(\nMulticastGroupProtocolV2ReportRecordChangeToIncludeMode,\ngroupAddress,\n)\n- // Nothing meaningful we can do with the error here - this method may be\n- // called when an interface is being disabled when we expect sends to\n- // fail.\n- _, _ = reportBuilder.Send()\n}\ncase protocolModeV1Compatibility:\nhandler = g.transitionToNonMemberLocked\n@@ -380,6 +377,13 @@ func (g *GenericMulticastProtocolState) MakeAllNonMemberLocked() {\ng.memberships[groupAddress] = info\n}\n}\n+\n+ if v2ReportBuilder != nil {\n+ // Nothing meaningful we can do with the error here - this method may be\n+ // called when an interface is being disabled when we expect sends to\n+ // fail.\n+ _, _ = v2ReportBuilder.Send()\n+ }\n}\n// InitializeGroupsLocked initializes each group, as if they were newly joined\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/multicast_group_test.go",
"new_path": "pkg/tcpip/network/multicast_group_test.go",
"diff": "@@ -1359,14 +1359,15 @@ func TestMGPReportMessages(t *testing.T) {\nfunc TestMGPWithNICLifecycle(t *testing.T) {\ntype subTest struct {\nname string\n+ v1Compatibility bool\nenterVersion func(e *channel.Endpoint)\nvalidateReport func(*testing.T, stack.PacketBufferPtr, tcpip.Address)\n- validateLeave func(*testing.T, stack.PacketBufferPtr, tcpip.Address)\n+ validateLeave func(*testing.T, stack.PacketBufferPtr, []tcpip.Address)\ncheckStats func(*testing.T, *stack.Stack, uint64, uint64, uint64)\n- getAndCheckGroupAddress func(*testing.T, map[tcpip.Address]bool, stack.PacketBufferPtr) tcpip.Address\n+ getAndCheckGroupAddress func(*testing.T, map[tcpip.Address]bool, stack.PacketBufferPtr) []tcpip.Address\n}\n- getAndCheckIGMPv2GroupAddress := func(t *testing.T, seen map[tcpip.Address]bool, p stack.PacketBufferPtr) tcpip.Address {\n+ getAndCheckIGMPv2GroupAddress := func(t *testing.T, seen map[tcpip.Address]bool, p stack.PacketBufferPtr) []tcpip.Address {\nt.Helper()\npayload := stack.PayloadSince(p.NetworkHeader())\n@@ -1384,10 +1385,10 @@ func TestMGPWithNICLifecycle(t *testing.T) {\nt.Fatalf(\"already saw packet for group %s\", addr)\n}\nseen[addr] = true\n- return addr\n+ return []tcpip.Address{addr}\n}\n- getAndCheckIGMPv3GroupAddress := func(t *testing.T, seen map[tcpip.Address]bool, p stack.PacketBufferPtr) tcpip.Address {\n+ getAndCheckIGMPv3GroupAddress := func(t *testing.T, seen map[tcpip.Address]bool, p stack.PacketBufferPtr) []tcpip.Address {\nt.Helper()\npayload := stack.PayloadSince(p.NetworkHeader())\n@@ -1398,9 +1399,15 @@ func TestMGPWithNICLifecycle(t *testing.T) {\n}\nreport := header.IGMPv3Report(ipv4.Payload())\nrecords := report.GroupAddressRecords()\n+ var addrs []tcpip.Address\n+ for {\nrecord, res := records.Next()\n- if res != header.IGMPv3ReportGroupAddressRecordIteratorNextOk {\n- t.Fatalf(\"got records.Next() = %d, want = %d\", res, header.IGMPv3ReportGroupAddressRecordIteratorNextOk)\n+ switch res {\n+ case header.IGMPv3ReportGroupAddressRecordIteratorNextOk:\n+ case header.IGMPv3ReportGroupAddressRecordIteratorNextDone:\n+ return addrs\n+ default:\n+ t.Fatalf(\"unhandled res = %d\", res)\n}\naddr := record.GroupAddress()\n@@ -1411,14 +1418,11 @@ func TestMGPWithNICLifecycle(t *testing.T) {\n}\nseen[addr] = true\n- if _, res := records.Next(); res != header.IGMPv3ReportGroupAddressRecordIteratorNextDone {\n- t.Errorf(\"got records.Next() = %d, want = %d\", res, header.IGMPv3ReportGroupAddressRecordIteratorNextDone)\n+ addrs = append(addrs, addr)\n}\n-\n- return addr\n}\n- getAndCheckMLDv1MulticastAddress := func(t *testing.T, seen map[tcpip.Address]bool, p stack.PacketBufferPtr) tcpip.Address {\n+ getAndCheckMLDv1MulticastAddress := func(t *testing.T, seen map[tcpip.Address]bool, p stack.PacketBufferPtr) []tcpip.Address {\nt.Helper()\npayload := stack.PayloadSince(p.NetworkHeader())\ndefer payload.Release()\n@@ -1461,10 +1465,10 @@ func TestMGPWithNICLifecycle(t *testing.T) {\nt.Fatalf(\"already saw packet for group %s\", addr)\n}\nseen[addr] = true\n- return addr\n+ return []tcpip.Address{addr}\n}\n- getAndCheckMLDv2MulticastAddress := func(t *testing.T, seen map[tcpip.Address]bool, p stack.PacketBufferPtr) tcpip.Address {\n+ getAndCheckMLDv2MulticastAddress := func(t *testing.T, seen map[tcpip.Address]bool, p stack.PacketBufferPtr) []tcpip.Address {\nt.Helper()\npayload := stack.PayloadSince(p.NetworkHeader())\n@@ -1502,12 +1506,18 @@ func TestMGPWithNICLifecycle(t *testing.T) {\nreport := header.MLDv2Report(icmpv6.MessageBody())\nrecords := report.MulticastAddressRecords()\n+ var addrs []tcpip.Address\n+ for {\nrecord, res := records.Next()\n- if res != header.MLDv2ReportMulticastAddressRecordIteratorNextOk {\n- t.Fatalf(\"got records.Next() = %d, want = %d\", res, header.MLDv2ReportMulticastAddressRecordIteratorNextOk)\n+ switch res {\n+ case header.MLDv2ReportMulticastAddressRecordIteratorNextOk:\n+ case header.MLDv2ReportMulticastAddressRecordIteratorNextDone:\n+ return addrs\n+ default:\n+ t.Fatalf(\"unhandled res = %d\", res)\n}\n- addr := record.MulticastAddress()\n+ addr := record.MulticastAddress()\ns, ok := seen[addr]\nif !ok {\nt.Fatalf(\"unexpectedly got a packet for group %s\", addr)\n@@ -1516,12 +1526,8 @@ func TestMGPWithNICLifecycle(t *testing.T) {\nt.Fatalf(\"already saw packet for group %s\", addr)\n}\nseen[addr] = true\n-\n- if _, res := records.Next(); res != header.MLDv2ReportMulticastAddressRecordIteratorNextDone {\n- t.Errorf(\"got records.Next() = %d, want = %d\", res, header.MLDv2ReportMulticastAddressRecordIteratorNextDone)\n+ addrs = append(addrs, addr)\n}\n-\n- return addr\n}\ntests := []struct {\n@@ -1532,9 +1538,9 @@ func TestMGPWithNICLifecycle(t *testing.T) {\nmaxUnsolicitedResponseDelay time.Duration\nsentReportStat func(*stack.Stack) *tcpip.StatCounter\nsentLeaveStat func(*stack.Stack) *tcpip.StatCounter\n- validateReport func(*testing.T, stack.PacketBufferPtr, tcpip.Address)\n+ validateReport func(*testing.T, stack.PacketBufferPtr, []tcpip.Address)\nvalidateLeave func(*testing.T, stack.PacketBufferPtr, tcpip.Address)\n- getAndCheckGroupAddress func(*testing.T, map[tcpip.Address]bool, stack.PacketBufferPtr) tcpip.Address\n+ getAndCheckGroupAddress func(*testing.T, map[tcpip.Address]bool, stack.PacketBufferPtr) []tcpip.Address\ncheckInitialGroups func(*testing.T, *channel.Endpoint, *stack.Stack, *faketime.ManualClock) uint64\ncheckStats func(*testing.T, *stack.Stack, uint64, uint64, uint64)\nsubTests []subTest\n@@ -1551,17 +1557,20 @@ func TestMGPWithNICLifecycle(t *testing.T) {\nsentLeaveStat: func(s *stack.Stack) *tcpip.StatCounter {\nreturn s.Stats().IGMP.PacketsSent.LeaveGroup\n},\n- validateReport: func(t *testing.T, p stack.PacketBufferPtr, addr tcpip.Address) {\n+ validateReport: func(t *testing.T, p stack.PacketBufferPtr, addrs []tcpip.Address) {\nt.Helper()\n- validateIGMPv3ReportPacket(t, p, header.IGMPv3ReportSerializer{\n- Records: []header.IGMPv3ReportGroupAddressRecordSerializer{\n- {\n+ var records []header.IGMPv3ReportGroupAddressRecordSerializer\n+ for _, addr := range addrs {\n+ records = append(records, header.IGMPv3ReportGroupAddressRecordSerializer{\nRecordType: header.IGMPv3ReportRecordChangeToExcludeMode,\nGroupAddress: addr,\nSources: nil,\n- },\n- },\n+ })\n+ }\n+\n+ validateIGMPv3ReportPacket(t, p, header.IGMPv3ReportSerializer{\n+ Records: records,\n})\n},\nvalidateLeave: func(t *testing.T, p stack.PacketBufferPtr, addr tcpip.Address) {\n@@ -1582,6 +1591,7 @@ func TestMGPWithNICLifecycle(t *testing.T) {\nsubTests: []subTest{\n{\nname: \"V2\",\n+ v1Compatibility: true,\nenterVersion: func(e *channel.Endpoint) {\n// V2 query for unrelated group.\ncreateAndInjectIGMPPacket(e, igmpMembershipQuery, 1, ipv4MulticastAddr3, 0 /* extraLength */)\n@@ -1591,16 +1601,17 @@ func TestMGPWithNICLifecycle(t *testing.T) {\nvalidateIGMPPacket(t, p, addr, igmpv2MembershipReport, 0, addr)\n},\n- validateLeave: func(t *testing.T, p stack.PacketBufferPtr, addr tcpip.Address) {\n+ validateLeave: func(t *testing.T, p stack.PacketBufferPtr, addrs []tcpip.Address) {\nt.Helper()\n- validateIGMPPacket(t, p, header.IPv4AllRoutersGroup, igmpLeaveGroup, 0, addr)\n+ validateIGMPPacket(t, p, header.IPv4AllRoutersGroup, igmpLeaveGroup, 0, addrs[0])\n},\ncheckStats: iptestutil.CheckIGMPv2Stats,\ngetAndCheckGroupAddress: getAndCheckIGMPv2GroupAddress,\n},\n{\nname: \"V3\",\n+ v1Compatibility: false,\nenterVersion: func(*channel.Endpoint) {},\nvalidateReport: func(t *testing.T, p stack.PacketBufferPtr, addr tcpip.Address) {\nt.Helper()\n@@ -1615,17 +1626,19 @@ func TestMGPWithNICLifecycle(t *testing.T) {\n},\n})\n},\n- validateLeave: func(t *testing.T, p stack.PacketBufferPtr, addr tcpip.Address) {\n+ validateLeave: func(t *testing.T, p stack.PacketBufferPtr, addrs []tcpip.Address) {\nt.Helper()\n- validateIGMPv3ReportPacket(t, p, header.IGMPv3ReportSerializer{\n- Records: []header.IGMPv3ReportGroupAddressRecordSerializer{\n- {\n+ var records []header.IGMPv3ReportGroupAddressRecordSerializer\n+ for _, addr := range addrs {\n+ records = append(records, header.IGMPv3ReportGroupAddressRecordSerializer{\nRecordType: header.IGMPv3ReportRecordChangeToIncludeMode,\nGroupAddress: addr,\nSources: nil,\n- },\n- },\n+ })\n+ }\n+ validateIGMPv3ReportPacket(t, p, header.IGMPv3ReportSerializer{\n+ Records: records,\n})\n},\ncheckStats: iptestutil.CheckIGMPv3Stats,\n@@ -1645,17 +1658,19 @@ func TestMGPWithNICLifecycle(t *testing.T) {\nsentLeaveStat: func(s *stack.Stack) *tcpip.StatCounter {\nreturn s.Stats().ICMP.V6.PacketsSent.MulticastListenerDone\n},\n- validateReport: func(t *testing.T, p stack.PacketBufferPtr, addr tcpip.Address) {\n+ validateReport: func(t *testing.T, p stack.PacketBufferPtr, addrs []tcpip.Address) {\nt.Helper()\n- validateMLDv2ReportPacket(t, p, header.MLDv2ReportSerializer{\n- Records: []header.MLDv2ReportMulticastAddressRecordSerializer{\n- {\n+ var records []header.MLDv2ReportMulticastAddressRecordSerializer\n+ for _, addr := range addrs {\n+ records = append(records, header.MLDv2ReportMulticastAddressRecordSerializer{\nRecordType: header.MLDv2ReportRecordChangeToExcludeMode,\nMulticastAddress: addr,\nSources: nil,\n- },\n- },\n+ })\n+ }\n+ validateMLDv2ReportPacket(t, p, header.MLDv2ReportSerializer{\n+ Records: records,\n})\n},\nvalidateLeave: func(t *testing.T, p stack.PacketBufferPtr, addr tcpip.Address) {\n@@ -1677,6 +1692,7 @@ func TestMGPWithNICLifecycle(t *testing.T) {\nsubTests: []subTest{\n{\nname: \"V1\",\n+ v1Compatibility: true,\nenterVersion: func(e *channel.Endpoint) {\n// V1 query for unrelated group.\ncreateAndInjectMLDPacket(e, mldQuery, 0, ipv6MulticastAddr3, 0 /* extraLength */)\n@@ -1686,16 +1702,17 @@ func TestMGPWithNICLifecycle(t *testing.T) {\nvalidateMLDPacket(t, p, addr, mldReport, 0, addr)\n},\n- validateLeave: func(t *testing.T, p stack.PacketBufferPtr, addr tcpip.Address) {\n+ validateLeave: func(t *testing.T, p stack.PacketBufferPtr, addrs []tcpip.Address) {\nt.Helper()\n- validateMLDPacket(t, p, header.IPv6AllRoutersLinkLocalMulticastAddress, mldDone, 0, addr)\n+ validateMLDPacket(t, p, header.IPv6AllRoutersLinkLocalMulticastAddress, mldDone, 0, addrs[0])\n},\ncheckStats: iptestutil.CheckMLDv1Stats,\ngetAndCheckGroupAddress: getAndCheckMLDv1MulticastAddress,\n},\n{\nname: \"V2\",\n+ v1Compatibility: false,\nenterVersion: func(*channel.Endpoint) {},\nvalidateReport: func(t *testing.T, p stack.PacketBufferPtr, addr tcpip.Address) {\nt.Helper()\n@@ -1710,17 +1727,19 @@ func TestMGPWithNICLifecycle(t *testing.T) {\n},\n})\n},\n- validateLeave: func(t *testing.T, p stack.PacketBufferPtr, addr tcpip.Address) {\n+ validateLeave: func(t *testing.T, p stack.PacketBufferPtr, addrs []tcpip.Address) {\nt.Helper()\n- validateMLDv2ReportPacket(t, p, header.MLDv2ReportSerializer{\n- Records: []header.MLDv2ReportMulticastAddressRecordSerializer{\n- {\n+ var records []header.MLDv2ReportMulticastAddressRecordSerializer\n+ for _, addr := range addrs {\n+ records = append(records, header.MLDv2ReportMulticastAddressRecordSerializer{\nRecordType: header.MLDv2ReportRecordChangeToIncludeMode,\nMulticastAddress: addr,\nSources: nil,\n- },\n- },\n+ })\n+ }\n+ validateMLDv2ReportPacket(t, p, header.MLDv2ReportSerializer{\n+ Records: records,\n})\n},\ncheckStats: iptestutil.CheckMLDv2Stats,\n@@ -1769,15 +1788,19 @@ func TestMGPWithNICLifecycle(t *testing.T) {\nif err := s.DisableNIC(nicID); err != nil {\nt.Fatalf(\"DisableNIC(%d): %s\", nicID, err)\n}\n- leaveCounter += uint64(len(test.multicastAddrs))\n- subTest.checkStats(t, s, reportCounter, leaveCounter, reportV2Counter)\n{\n+ numMessages := 1\n+ if subTest.v1Compatibility {\n+ numMessages = len(test.multicastAddrs)\n+ }\n+ leaveCounter += uint64(numMessages)\n+ subTest.checkStats(t, s, reportCounter, leaveCounter, reportV2Counter)\nseen := make(map[tcpip.Address]bool)\nfor _, a := range test.multicastAddrs {\nseen[a] = false\n}\n- for i := range test.multicastAddrs {\n+ for i := 0; i < numMessages; i++ {\np := e.Read()\nif p.IsNil() {\nt.Fatalf(\"expected (%d-th) leave message to be sent\", i)\n@@ -1816,20 +1839,19 @@ func TestMGPWithNICLifecycle(t *testing.T) {\nif t.Failed() {\nt.FailNow()\n}\n+ subTest.checkStats(t, s, reportCounter, leaveCounter, reportV2Counter)\n// Joining/leaving a group while disabled should not send any messages.\nif err := s.DisableNIC(nicID); err != nil {\nt.Fatalf(\"DisableNIC(%d): %s\", nicID, err)\n}\n- reportV2Counter += uint64(len(test.multicastAddrs))\n+ reportV2Counter++\nsubTest.checkStats(t, s, reportCounter, leaveCounter, reportV2Counter)\n- for i := range test.multicastAddrs {\nif p := e.Read(); p.IsNil() {\n- t.Fatalf(\"expected (%d-th) leave message to be sent\", i)\n+ t.Fatal(\"expected leave message to be sent\")\n} else {\np.DecRef()\n}\n- }\nfor _, a := range test.multicastAddrs {\nif err := s.LeaveGroup(test.protoNum, nicID, a); err != nil {\nt.Fatalf(\"LeaveGroup(%d, nic, %s): %s\", test.protoNum, a, err)\n@@ -1857,7 +1879,7 @@ func TestMGPWithNICLifecycle(t *testing.T) {\nif p := e.Read(); p.IsNil() {\nt.Fatal(\"expected a report message to be sent\")\n} else {\n- test.validateReport(t, p, test.finalMulticastAddr)\n+ test.validateReport(t, p, []tcpip.Address{test.finalMulticastAddr})\np.DecRef()\n}\n@@ -1867,7 +1889,7 @@ func TestMGPWithNICLifecycle(t *testing.T) {\nif p := e.Read(); p.IsNil() {\nt.Fatal(\"expected a report message to be sent\")\n} else {\n- test.validateReport(t, p, test.finalMulticastAddr)\n+ test.validateReport(t, p, []tcpip.Address{test.finalMulticastAddr})\np.DecRef()\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Coalesce records for reports on MGP disable
This change sends multiple records into a single IGMPv3/MLDv2 report
instead of a single record per report when disabling IGMPv3/MLDv2.
Updates #8346
PiperOrigin-RevId: 501419672 |
260,004 | 11.01.2023 17:56:15 | 28,800 | e49f78af9e55ed12a06a06d9bc23421e53e57a99 | Extract IGMPv3/MLDv2 report validation helpers
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/internal/testutil/BUILD",
"new_path": "pkg/tcpip/network/internal/testutil/BUILD",
"diff": "@@ -4,6 +4,7 @@ package(licenses = [\"notice\"])\ngo_library(\nname = \"testutil\",\n+ testonly = True,\nsrcs = [\"testutil.go\"],\nvisibility = [\n\"//pkg/tcpip/network:__pkg__\",\n@@ -16,6 +17,7 @@ go_library(\ndeps = [\n\"//pkg/bufferv2\",\n\"//pkg/tcpip\",\n+ \"//pkg/tcpip/checker\",\n\"//pkg/tcpip/header\",\n\"//pkg/tcpip/stack\",\n],\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/internal/testutil/testutil.go",
"new_path": "pkg/tcpip/network/internal/testutil/testutil.go",
"diff": "@@ -23,6 +23,7 @@ import (\n\"gvisor.dev/gvisor/pkg/bufferv2\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/checker\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n\"gvisor.dev/gvisor/pkg/tcpip/stack\"\n)\n@@ -183,3 +184,53 @@ func CheckMLDv2Stats(t *testing.T, s *stack.Stack, reports, leaves, reportsV2 ui\n// In MLDv2 tests, reports/leaves are just MLDv2 reports.\ncheckMLDStats(t, s, 0 /* reports */, 0 /* leaves */, reports+leaves+reportsV2)\n}\n+\n+// ValidateIGMPv3Report validates an IGMPv3 report.\n+func ValidateIGMPv3Report(t *testing.T, v *bufferv2.View, srcAddr tcpip.Address, addrs []tcpip.Address, recordType header.IGMPv3ReportRecordType) {\n+ t.Helper()\n+\n+ var records []header.IGMPv3ReportGroupAddressRecordSerializer\n+ for _, addr := range addrs {\n+ records = append(records, header.IGMPv3ReportGroupAddressRecordSerializer{\n+ RecordType: recordType,\n+ GroupAddress: addr,\n+ Sources: nil,\n+ })\n+ }\n+\n+ checker.IPv4(t, v,\n+ checker.SrcAddr(srcAddr),\n+ checker.DstAddr(header.IGMPv3RoutersAddress),\n+ checker.TTL(header.IGMPTTL),\n+ checker.IPv4RouterAlert(),\n+ checker.IGMPv3Report(header.IGMPv3ReportSerializer{\n+ Records: records,\n+ }),\n+ )\n+}\n+\n+// ValidateMLDv2Report validates an MLDv2 report.\n+func ValidateMLDv2Report(t *testing.T, v *bufferv2.View, srcAddr tcpip.Address, addrs []tcpip.Address, recordType header.MLDv2ReportRecordType) {\n+ t.Helper()\n+\n+ var records []header.MLDv2ReportMulticastAddressRecordSerializer\n+ for _, addr := range addrs {\n+ records = append(records, header.MLDv2ReportMulticastAddressRecordSerializer{\n+ RecordType: recordType,\n+ MulticastAddress: addr,\n+ Sources: nil,\n+ })\n+ }\n+\n+ checker.IPv6WithExtHdr(t, v,\n+ checker.IPv6ExtHdr(\n+ checker.IPv6HopByHopExtensionHeader(checker.IPv6RouterAlert(header.IPv6RouterAlertMLD)),\n+ ),\n+ checker.SrcAddr(srcAddr),\n+ checker.DstAddr(header.MLDv2RoutersAddress),\n+ checker.TTL(header.MLDHopLimit),\n+ checker.MLDv2Report(header.MLDv2ReportSerializer{\n+ Records: records,\n+ }),\n+ )\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/igmp_test.go",
"new_path": "pkg/tcpip/network/ipv4/igmp_test.go",
"diff": "@@ -72,22 +72,7 @@ func validateIgmpv3ReportPacket(t *testing.T, pkt stack.PacketBufferPtr, srcAddr\npayload := stack.PayloadSince(pkt.NetworkHeader())\ndefer payload.Release()\n- checker.IPv4(t, payload,\n- checker.SrcAddr(srcAddr),\n- checker.DstAddr(header.IGMPv3RoutersAddress),\n- // TTL for an IGMP message must be 1 as per RFC 2236 section 2.\n- checker.TTL(1),\n- checker.IPv4RouterAlert(),\n- checker.IGMPv3Report(header.IGMPv3ReportSerializer{\n- Records: []header.IGMPv3ReportGroupAddressRecordSerializer{\n- {\n- RecordType: header.IGMPv3ReportRecordChangeToExcludeMode,\n- GroupAddress: groupAddress,\n- Sources: nil,\n- },\n- },\n- }),\n- )\n+ iptestutil.ValidateIGMPv3Report(t, payload, srcAddr, []tcpip.Address{groupAddress}, header.IGMPv3ReportRecordChangeToExcludeMode)\n}\ntype igmpTestContext struct {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/mld_test.go",
"new_path": "pkg/tcpip/network/ipv6/mld_test.go",
"diff": "@@ -62,19 +62,11 @@ func validateMLDPacket(t *testing.T, v *bufferv2.View, localAddress, remoteAddre\n)\n}\n-func validateMLDv2ReportPacket(t *testing.T, v *bufferv2.View, localAddress tcpip.Address, report header.MLDv2ReportSerializer) {\n+func validateMLDv2ReportPacket(t *testing.T, v *bufferv2.View, localAddress tcpip.Address, groupAddress tcpip.Address, recordType header.MLDv2ReportRecordType) {\nt.Helper()\ndefer v.Release()\n- checker.IPv6WithExtHdr(t, v,\n- checker.IPv6ExtHdr(\n- checker.IPv6HopByHopExtensionHeader(checker.IPv6RouterAlert(header.IPv6RouterAlertMLD)),\n- ),\n- checker.SrcAddr(localAddress),\n- checker.DstAddr(header.MLDv2RoutersAddress),\n- checker.TTL(header.MLDHopLimit),\n- checker.MLDv2Report(report),\n- )\n+ iptestutil.ValidateMLDv2Report(t, v, localAddress, []tcpip.Address{groupAddress}, recordType)\n}\ntype mldTestContext struct {\n@@ -132,15 +124,7 @@ func TestIPv6JoinLeaveSolicitedNodeAddressPerformsMLD(t *testing.T) {\nrecordType = header.MLDv2ReportRecordChangeToIncludeMode\n}\n- validateMLDv2ReportPacket(t, v, localAddress, header.MLDv2ReportSerializer{\n- Records: []header.MLDv2ReportMulticastAddressRecordSerializer{\n- {\n- RecordType: recordType,\n- MulticastAddress: groupAddress,\n- Sources: nil,\n- },\n- },\n- })\n+ validateMLDv2ReportPacket(t, v, localAddress, groupAddress, recordType)\n},\n},\n}\n@@ -366,15 +350,7 @@ func TestSendQueuedMLDReports(t *testing.T) {\nrecordType = header.MLDv2ReportRecordChangeToIncludeMode\n}\n- validateMLDv2ReportPacket(t, v, localAddress, header.MLDv2ReportSerializer{\n- Records: []header.MLDv2ReportMulticastAddressRecordSerializer{\n- {\n- RecordType: recordType,\n- MulticastAddress: groupAddress,\n- Sources: nil,\n- },\n- },\n- })\n+ validateMLDv2ReportPacket(t, v, localAddress, groupAddress, recordType)\n},\ncheckStats: iptestutil.CheckMLDv2Stats,\ngetAndCheckGroupAddress: getAndCheckMLDv2MulticastAddress,\n@@ -869,15 +845,7 @@ func TestMLDSkipProtocol(t *testing.T) {\nv1Compatibility: false,\nvalidate: func(t *testing.T, v *bufferv2.View, localAddress tcpip.Address, groupAddress tcpip.Address) {\nt.Helper()\n- validateMLDv2ReportPacket(t, v, localAddress, header.MLDv2ReportSerializer{\n- Records: []header.MLDv2ReportMulticastAddressRecordSerializer{\n- {\n- RecordType: header.MLDv2ReportRecordChangeToExcludeMode,\n- MulticastAddress: groupAddress,\n- Sources: nil,\n- },\n- },\n- })\n+ validateMLDv2ReportPacket(t, v, localAddress, groupAddress, header.MLDv2ReportRecordChangeToExcludeMode)\n},\n},\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/multicast_group_test.go",
"new_path": "pkg/tcpip/network/multicast_group_test.go",
"diff": "@@ -101,21 +101,11 @@ func validateMLDPacket(t *testing.T, p stack.PacketBufferPtr, remoteAddress tcpi\n)\n}\n-func validateMLDv2ReportPacket(t *testing.T, p stack.PacketBufferPtr, report header.MLDv2ReportSerializer) {\n+func validateMLDv2ReportPacket(t *testing.T, p stack.PacketBufferPtr, addrs []tcpip.Address, recordType header.MLDv2ReportRecordType) {\nt.Helper()\n-\npayload := stack.PayloadSince(p.NetworkHeader())\ndefer payload.Release()\n-\n- checker.IPv6WithExtHdr(t, payload,\n- checker.IPv6ExtHdr(\n- checker.IPv6HopByHopExtensionHeader(checker.IPv6RouterAlert(header.IPv6RouterAlertMLD)),\n- ),\n- checker.SrcAddr(linkLocalIPv6Addr1),\n- checker.DstAddr(header.MLDv2RoutersAddress),\n- checker.TTL(header.MLDHopLimit),\n- checker.MLDv2Report(report),\n- )\n+ iptestutil.ValidateMLDv2Report(t, payload, linkLocalIPv6Addr1, addrs, recordType)\n}\n// validateIGMPPacket checks that a passed PacketInfo is an IPv4 IGMP packet\n@@ -139,18 +129,12 @@ func validateIGMPPacket(t *testing.T, p stack.PacketBufferPtr, remoteAddress tcp\n)\n}\n-func validateIGMPv3ReportPacket(t *testing.T, p stack.PacketBufferPtr, report header.IGMPv3ReportSerializer) {\n+func validateIGMPv3ReportPacket(t *testing.T, p stack.PacketBufferPtr, addrs []tcpip.Address, recordType header.IGMPv3ReportRecordType) {\nt.Helper()\npayload := stack.PayloadSince(p.NetworkHeader())\ndefer payload.Release()\n- checker.IPv4(t, payload,\n- checker.SrcAddr(stackIPv4Addr),\n- checker.DstAddr(header.IGMPv3RoutersAddress),\n- checker.TTL(header.IGMPTTL),\n- checker.IPv4RouterAlert(),\n- checker.IGMPv3Report(report),\n- )\n+ iptestutil.ValidateIGMPv3Report(t, payload, stackIPv4Addr, addrs, recordType)\n}\ntype multicastTestContext struct {\n@@ -240,15 +224,9 @@ func checkInitialIPv6Groups(t *testing.T, e *channel.Endpoint, s *stack.Stack, c\nif p := e.Read(); p.IsNil() {\nt.Fatal(\"expected a report message to be sent\")\n} else {\n- validateMLDv2ReportPacket(t, p, header.MLDv2ReportSerializer{\n- Records: []header.MLDv2ReportMulticastAddressRecordSerializer{\n- {\n- RecordType: header.MLDv2ReportRecordChangeToExcludeMode,\n- MulticastAddress: ipv6AddrSNMC,\n- Sources: nil,\n- },\n- },\n- })\n+ v := stack.PayloadSince(p.NetworkHeader())\n+ iptestutil.ValidateMLDv2Report(t, v, linkLocalIPv6Addr1, []tcpip.Address{ipv6AddrSNMC}, header.MLDv2ReportRecordChangeToExcludeMode)\n+ v.Release()\np.DecRef()\n}\n@@ -263,15 +241,9 @@ func checkInitialIPv6Groups(t *testing.T, e *channel.Endpoint, s *stack.Stack, c\nif p := e.Read(); p.IsNil() {\nt.Fatal(\"expected a report message to be sent\")\n} else {\n- validateMLDv2ReportPacket(t, p, header.MLDv2ReportSerializer{\n- Records: []header.MLDv2ReportMulticastAddressRecordSerializer{\n- {\n- RecordType: header.MLDv2ReportRecordChangeToIncludeMode,\n- MulticastAddress: ipv6AddrSNMC,\n- Sources: nil,\n- },\n- },\n- })\n+ v := stack.PayloadSince(p.NetworkHeader())\n+ iptestutil.ValidateMLDv2Report(t, v, linkLocalIPv6Addr1, []tcpip.Address{ipv6AddrSNMC}, header.MLDv2ReportRecordChangeToIncludeMode)\n+ v.Release()\np.DecRef()\n}\n@@ -587,15 +559,7 @@ func TestMGPJoinGroup(t *testing.T) {\nvalidateReport: func(t *testing.T, p stack.PacketBufferPtr) {\nt.Helper()\n- validateIGMPv3ReportPacket(t, p, header.IGMPv3ReportSerializer{\n- Records: []header.IGMPv3ReportGroupAddressRecordSerializer{\n- {\n- RecordType: header.IGMPv3ReportRecordChangeToExcludeMode,\n- GroupAddress: ipv4MulticastAddr1,\n- Sources: nil,\n- },\n- },\n- })\n+ validateIGMPv3ReportPacket(t, p, []tcpip.Address{ipv4MulticastAddr1}, header.IGMPv3ReportRecordChangeToExcludeMode)\n},\ncheckStats: iptestutil.CheckIGMPv3Stats,\n},\n@@ -630,15 +594,7 @@ func TestMGPJoinGroup(t *testing.T) {\nvalidateReport: func(t *testing.T, p stack.PacketBufferPtr) {\nt.Helper()\n- validateMLDv2ReportPacket(t, p, header.MLDv2ReportSerializer{\n- Records: []header.MLDv2ReportMulticastAddressRecordSerializer{\n- {\n- RecordType: header.MLDv2ReportRecordChangeToExcludeMode,\n- MulticastAddress: ipv6MulticastAddr1,\n- Sources: nil,\n- },\n- },\n- })\n+ validateMLDv2ReportPacket(t, p, []tcpip.Address{ipv6MulticastAddr1}, header.MLDv2ReportRecordChangeToExcludeMode)\n},\ncheckStats: iptestutil.CheckMLDv2Stats,\n},\n@@ -758,28 +714,12 @@ func TestMGPLeaveGroup(t *testing.T) {\nvalidateReport: func(t *testing.T, p stack.PacketBufferPtr) {\nt.Helper()\n- validateIGMPv3ReportPacket(t, p, header.IGMPv3ReportSerializer{\n- Records: []header.IGMPv3ReportGroupAddressRecordSerializer{\n- {\n- RecordType: header.IGMPv3ReportRecordChangeToExcludeMode,\n- GroupAddress: ipv4MulticastAddr1,\n- Sources: nil,\n- },\n- },\n- })\n+ validateIGMPv3ReportPacket(t, p, []tcpip.Address{ipv4MulticastAddr1}, header.IGMPv3ReportRecordChangeToExcludeMode)\n},\nvalidateLeave: func(t *testing.T, p stack.PacketBufferPtr) {\nt.Helper()\n- validateIGMPv3ReportPacket(t, p, header.IGMPv3ReportSerializer{\n- Records: []header.IGMPv3ReportGroupAddressRecordSerializer{\n- {\n- RecordType: header.IGMPv3ReportRecordChangeToIncludeMode,\n- GroupAddress: ipv4MulticastAddr1,\n- Sources: nil,\n- },\n- },\n- })\n+ validateIGMPv3ReportPacket(t, p, []tcpip.Address{ipv4MulticastAddr1}, header.IGMPv3ReportRecordChangeToIncludeMode)\n},\nleaveCount: 2,\ncheckStats: iptestutil.CheckIGMPv3Stats,\n@@ -818,28 +758,12 @@ func TestMGPLeaveGroup(t *testing.T) {\nvalidateReport: func(t *testing.T, p stack.PacketBufferPtr) {\nt.Helper()\n- validateMLDv2ReportPacket(t, p, header.MLDv2ReportSerializer{\n- Records: []header.MLDv2ReportMulticastAddressRecordSerializer{\n- {\n- RecordType: header.MLDv2ReportRecordChangeToExcludeMode,\n- MulticastAddress: ipv6MulticastAddr1,\n- Sources: nil,\n- },\n- },\n- })\n+ validateMLDv2ReportPacket(t, p, []tcpip.Address{ipv6MulticastAddr1}, header.MLDv2ReportRecordChangeToExcludeMode)\n},\nvalidateLeave: func(t *testing.T, p stack.PacketBufferPtr) {\nt.Helper()\n- validateMLDv2ReportPacket(t, p, header.MLDv2ReportSerializer{\n- Records: []header.MLDv2ReportMulticastAddressRecordSerializer{\n- {\n- RecordType: header.MLDv2ReportRecordChangeToIncludeMode,\n- MulticastAddress: ipv6MulticastAddr1,\n- Sources: nil,\n- },\n- },\n- })\n+ validateMLDv2ReportPacket(t, p, []tcpip.Address{ipv6MulticastAddr1}, header.MLDv2ReportRecordChangeToIncludeMode)\n},\nleaveCount: 2,\ncheckStats: iptestutil.CheckMLDv2Stats,\n@@ -965,15 +889,7 @@ func TestMGPQueryMessages(t *testing.T) {\nrecordType = header.IGMPv3ReportRecordModeIsExclude\n}\n- validateIGMPv3ReportPacket(t, p, header.IGMPv3ReportSerializer{\n- Records: []header.IGMPv3ReportGroupAddressRecordSerializer{\n- {\n- RecordType: recordType,\n- GroupAddress: ipv4MulticastAddr1,\n- Sources: nil,\n- },\n- },\n- })\n+ validateIGMPv3ReportPacket(t, p, []tcpip.Address{ipv4MulticastAddr1}, recordType)\n},\nrxQuery: func(e *channel.Endpoint, maxRespTime uint8, groupAddress tcpip.Address) {\ncreateAndInjectIGMPPacket(e, igmpMembershipQuery, maxRespTime, groupAddress, header.IGMPv3QueryMinimumSize-header.IGMPQueryMinimumSize /* extraLength */)\n@@ -1022,15 +938,7 @@ func TestMGPQueryMessages(t *testing.T) {\nrecordType = header.MLDv2ReportRecordModeIsExclude\n}\n- validateMLDv2ReportPacket(t, p, header.MLDv2ReportSerializer{\n- Records: []header.MLDv2ReportMulticastAddressRecordSerializer{\n- {\n- RecordType: recordType,\n- MulticastAddress: ipv6MulticastAddr1,\n- Sources: nil,\n- },\n- },\n- })\n+ validateMLDv2ReportPacket(t, p, []tcpip.Address{ipv6MulticastAddr1}, recordType)\n},\nrxQuery: func(e *channel.Endpoint, maxRespTime uint8, groupAddress tcpip.Address) {\ncreateAndInjectMLDPacket(e, mldQuery, maxRespTime, groupAddress, header.MLDv2QueryMinimumSize-header.MLDMinimumSize /* extraLength */)\n@@ -1194,28 +1102,12 @@ func TestMGPReportMessages(t *testing.T) {\nvalidateReport: func(t *testing.T, p stack.PacketBufferPtr) {\nt.Helper()\n- validateIGMPv3ReportPacket(t, p, header.IGMPv3ReportSerializer{\n- Records: []header.IGMPv3ReportGroupAddressRecordSerializer{\n- {\n- RecordType: header.IGMPv3ReportRecordChangeToExcludeMode,\n- GroupAddress: ipv4MulticastAddr1,\n- Sources: nil,\n- },\n- },\n- })\n+ validateIGMPv3ReportPacket(t, p, []tcpip.Address{ipv4MulticastAddr1}, header.IGMPv3ReportRecordChangeToExcludeMode)\n},\nvalidateLeave: func(t *testing.T, p stack.PacketBufferPtr) {\nt.Helper()\n- validateIGMPv3ReportPacket(t, p, header.IGMPv3ReportSerializer{\n- Records: []header.IGMPv3ReportGroupAddressRecordSerializer{\n- {\n- RecordType: header.IGMPv3ReportRecordChangeToIncludeMode,\n- GroupAddress: ipv4MulticastAddr1,\n- Sources: nil,\n- },\n- },\n- })\n+ validateIGMPv3ReportPacket(t, p, []tcpip.Address{ipv4MulticastAddr1}, header.IGMPv3ReportRecordChangeToIncludeMode)\n},\nleaveCount: 2,\ncheckStats: iptestutil.CheckIGMPv3Stats,\n@@ -1252,28 +1144,12 @@ func TestMGPReportMessages(t *testing.T) {\nvalidateReport: func(t *testing.T, p stack.PacketBufferPtr) {\nt.Helper()\n- validateMLDv2ReportPacket(t, p, header.MLDv2ReportSerializer{\n- Records: []header.MLDv2ReportMulticastAddressRecordSerializer{\n- {\n- RecordType: header.MLDv2ReportRecordChangeToExcludeMode,\n- MulticastAddress: ipv6MulticastAddr1,\n- Sources: nil,\n- },\n- },\n- })\n+ validateMLDv2ReportPacket(t, p, []tcpip.Address{ipv6MulticastAddr1}, header.MLDv2ReportRecordChangeToExcludeMode)\n},\nvalidateLeave: func(t *testing.T, p stack.PacketBufferPtr) {\nt.Helper()\n- validateMLDv2ReportPacket(t, p, header.MLDv2ReportSerializer{\n- Records: []header.MLDv2ReportMulticastAddressRecordSerializer{\n- {\n- RecordType: header.MLDv2ReportRecordChangeToIncludeMode,\n- MulticastAddress: ipv6MulticastAddr1,\n- Sources: nil,\n- },\n- },\n- })\n+ validateMLDv2ReportPacket(t, p, []tcpip.Address{ipv6MulticastAddr1}, header.MLDv2ReportRecordChangeToIncludeMode)\n},\nleaveCount: 2,\ncheckStats: iptestutil.CheckMLDv2Stats,\n@@ -1560,31 +1436,12 @@ func TestMGPWithNICLifecycle(t *testing.T) {\nvalidateReport: func(t *testing.T, p stack.PacketBufferPtr, addrs []tcpip.Address) {\nt.Helper()\n- var records []header.IGMPv3ReportGroupAddressRecordSerializer\n- for _, addr := range addrs {\n- records = append(records, header.IGMPv3ReportGroupAddressRecordSerializer{\n- RecordType: header.IGMPv3ReportRecordChangeToExcludeMode,\n- GroupAddress: addr,\n- Sources: nil,\n- })\n- }\n-\n- validateIGMPv3ReportPacket(t, p, header.IGMPv3ReportSerializer{\n- Records: records,\n- })\n+ validateIGMPv3ReportPacket(t, p, addrs, header.IGMPv3ReportRecordChangeToExcludeMode)\n},\nvalidateLeave: func(t *testing.T, p stack.PacketBufferPtr, addr tcpip.Address) {\nt.Helper()\n- validateIGMPv3ReportPacket(t, p, header.IGMPv3ReportSerializer{\n- Records: []header.IGMPv3ReportGroupAddressRecordSerializer{\n- {\n- RecordType: header.IGMPv3ReportRecordChangeToIncludeMode,\n- GroupAddress: addr,\n- Sources: nil,\n- },\n- },\n- })\n+ validateIGMPv3ReportPacket(t, p, []tcpip.Address{addr}, header.IGMPv3ReportRecordChangeToIncludeMode)\n},\ngetAndCheckGroupAddress: getAndCheckIGMPv3GroupAddress,\ncheckStats: iptestutil.CheckIGMPv3Stats,\n@@ -1616,30 +1473,12 @@ func TestMGPWithNICLifecycle(t *testing.T) {\nvalidateReport: func(t *testing.T, p stack.PacketBufferPtr, addr tcpip.Address) {\nt.Helper()\n- validateIGMPv3ReportPacket(t, p, header.IGMPv3ReportSerializer{\n- Records: []header.IGMPv3ReportGroupAddressRecordSerializer{\n- {\n- RecordType: header.IGMPv3ReportRecordChangeToExcludeMode,\n- GroupAddress: addr,\n- Sources: nil,\n- },\n- },\n- })\n+ validateIGMPv3ReportPacket(t, p, []tcpip.Address{addr}, header.IGMPv3ReportRecordChangeToExcludeMode)\n},\nvalidateLeave: func(t *testing.T, p stack.PacketBufferPtr, addrs []tcpip.Address) {\nt.Helper()\n- var records []header.IGMPv3ReportGroupAddressRecordSerializer\n- for _, addr := range addrs {\n- records = append(records, header.IGMPv3ReportGroupAddressRecordSerializer{\n- RecordType: header.IGMPv3ReportRecordChangeToIncludeMode,\n- GroupAddress: addr,\n- Sources: nil,\n- })\n- }\n- validateIGMPv3ReportPacket(t, p, header.IGMPv3ReportSerializer{\n- Records: records,\n- })\n+ validateIGMPv3ReportPacket(t, p, addrs, header.IGMPv3ReportRecordChangeToIncludeMode)\n},\ncheckStats: iptestutil.CheckIGMPv3Stats,\ngetAndCheckGroupAddress: getAndCheckIGMPv3GroupAddress,\n@@ -1660,31 +1499,12 @@ func TestMGPWithNICLifecycle(t *testing.T) {\n},\nvalidateReport: func(t *testing.T, p stack.PacketBufferPtr, addrs []tcpip.Address) {\nt.Helper()\n-\n- var records []header.MLDv2ReportMulticastAddressRecordSerializer\n- for _, addr := range addrs {\n- records = append(records, header.MLDv2ReportMulticastAddressRecordSerializer{\n- RecordType: header.MLDv2ReportRecordChangeToExcludeMode,\n- MulticastAddress: addr,\n- Sources: nil,\n- })\n- }\n- validateMLDv2ReportPacket(t, p, header.MLDv2ReportSerializer{\n- Records: records,\n- })\n+ validateMLDv2ReportPacket(t, p, addrs, header.MLDv2ReportRecordChangeToExcludeMode)\n},\nvalidateLeave: func(t *testing.T, p stack.PacketBufferPtr, addr tcpip.Address) {\nt.Helper()\n- validateMLDv2ReportPacket(t, p, header.MLDv2ReportSerializer{\n- Records: []header.MLDv2ReportMulticastAddressRecordSerializer{\n- {\n- RecordType: header.MLDv2ReportRecordChangeToIncludeMode,\n- MulticastAddress: addr,\n- Sources: nil,\n- },\n- },\n- })\n+ validateMLDv2ReportPacket(t, p, []tcpip.Address{addr}, header.MLDv2ReportRecordChangeToIncludeMode)\n},\ngetAndCheckGroupAddress: getAndCheckMLDv2MulticastAddress,\ncheckInitialGroups: checkInitialIPv6Groups,\n@@ -1717,30 +1537,12 @@ func TestMGPWithNICLifecycle(t *testing.T) {\nvalidateReport: func(t *testing.T, p stack.PacketBufferPtr, addr tcpip.Address) {\nt.Helper()\n- validateMLDv2ReportPacket(t, p, header.MLDv2ReportSerializer{\n- Records: []header.MLDv2ReportMulticastAddressRecordSerializer{\n- {\n- RecordType: header.MLDv2ReportRecordChangeToExcludeMode,\n- MulticastAddress: addr,\n- Sources: nil,\n- },\n- },\n- })\n+ validateMLDv2ReportPacket(t, p, []tcpip.Address{addr}, header.MLDv2ReportRecordChangeToExcludeMode)\n},\nvalidateLeave: func(t *testing.T, p stack.PacketBufferPtr, addrs []tcpip.Address) {\nt.Helper()\n- var records []header.MLDv2ReportMulticastAddressRecordSerializer\n- for _, addr := range addrs {\n- records = append(records, header.MLDv2ReportMulticastAddressRecordSerializer{\n- RecordType: header.MLDv2ReportRecordChangeToIncludeMode,\n- MulticastAddress: addr,\n- Sources: nil,\n- })\n- }\n- validateMLDv2ReportPacket(t, p, header.MLDv2ReportSerializer{\n- Records: records,\n- })\n+ validateMLDv2ReportPacket(t, p, addrs, header.MLDv2ReportRecordChangeToIncludeMode)\n},\ncheckStats: iptestutil.CheckMLDv2Stats,\ngetAndCheckGroupAddress: getAndCheckMLDv2MulticastAddress,\n@@ -2009,15 +1811,7 @@ func TestMGPCoalescedQueryResponseRecords(t *testing.T) {\nvalidateReport: func(t *testing.T, p stack.PacketBufferPtr, addr tcpip.Address) {\nt.Helper()\n- validateIGMPv3ReportPacket(t, p, header.IGMPv3ReportSerializer{\n- Records: []header.IGMPv3ReportGroupAddressRecordSerializer{\n- {\n- RecordType: header.IGMPv3ReportRecordChangeToExcludeMode,\n- GroupAddress: addr,\n- Sources: nil,\n- },\n- },\n- })\n+ validateIGMPv3ReportPacket(t, p, []tcpip.Address{addr}, header.IGMPv3ReportRecordChangeToExcludeMode)\n},\ncheckStats: func(t *testing.T, s *stack.Stack, reports uint64) {\nt.Helper()\n@@ -2080,15 +1874,7 @@ func TestMGPCoalescedQueryResponseRecords(t *testing.T) {\nvalidateReport: func(t *testing.T, p stack.PacketBufferPtr, addr tcpip.Address) {\nt.Helper()\n- validateMLDv2ReportPacket(t, p, header.MLDv2ReportSerializer{\n- Records: []header.MLDv2ReportMulticastAddressRecordSerializer{\n- {\n- RecordType: header.MLDv2ReportRecordChangeToExcludeMode,\n- MulticastAddress: addr,\n- Sources: nil,\n- },\n- },\n- })\n+ validateMLDv2ReportPacket(t, p, []tcpip.Address{addr}, header.MLDv2ReportRecordChangeToExcludeMode)\n},\ncheckStats: func(t *testing.T, s *stack.Stack, reports uint64) {\nt.Helper()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Extract IGMPv3/MLDv2 report validation helpers
Updates #8346
PiperOrigin-RevId: 501425348 |
259,927 | 12.01.2023 07:40:05 | 28,800 | 2b208ac832f9be1c1e5ebc01d9075b1b4dc3fecf | Set PktType for all Ethernet Packets
In preparation for improving ARP handling of gratuitous ARPs, set PktType in
regular inbound flow instead of only when handling packet sockets. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/ethernet/BUILD",
"new_path": "pkg/tcpip/link/ethernet/BUILD",
"diff": "@@ -26,5 +26,6 @@ go_test(\n\"//pkg/tcpip/header\",\n\"//pkg/tcpip/link/channel\",\n\"//pkg/tcpip/stack\",\n+ \"@com_github_google_go_cmp//cmp:go_default_library\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/ethernet/ethernet.go",
"new_path": "pkg/tcpip/link/ethernet/ethernet.go",
"diff": "@@ -64,10 +64,21 @@ func (e *Endpoint) DeliverNetworkPacket(_ tcpip.NetworkProtocolNumber, pkt stack\nif !ok {\nreturn\n}\n+ eth := header.Ethernet(hdr)\n+ dst := eth.DestinationAddress()\n+ if dst == header.EthernetBroadcastAddress {\n+ pkt.PktType = tcpip.PacketBroadcast\n+ } else if header.IsMulticastEthernetAddress(dst) {\n+ pkt.PktType = tcpip.PacketMulticast\n+ } else if dst == e.LinkAddress() {\n+ pkt.PktType = tcpip.PacketHost\n+ } else {\n+ pkt.PktType = tcpip.PacketOtherHost\n+ }\n// Note, there is no need to check the destination link address here since\n// the ethernet hardware filters frames based on their destination addresses.\n- e.Endpoint.DeliverNetworkPacket(header.Ethernet(hdr).Type() /* protocol */, pkt)\n+ e.Endpoint.DeliverNetworkPacket(eth.Type() /* protocol */, pkt)\n}\n// Capabilities implements stack.LinkEndpoint.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/ethernet/ethernet_test.go",
"new_path": "pkg/tcpip/link/ethernet/ethernet_test.go",
"diff": "@@ -19,6 +19,7 @@ import (\n\"os\"\n\"testing\"\n+ \"github.com/google/go-cmp/cmp\"\n\"gvisor.dev/gvisor/pkg/bufferv2\"\n\"gvisor.dev/gvisor/pkg/refs\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n@@ -30,47 +31,94 @@ import (\nvar _ stack.NetworkDispatcher = (*testNetworkDispatcher)(nil)\n+type deliveredPacket struct {\n+ protocol tcpip.NetworkProtocolNumber\n+ packet stack.PacketBufferPtr\n+}\n+\ntype testNetworkDispatcher struct {\n- networkPackets int\n+ networkPackets []deliveredPacket\n}\n-func (t *testNetworkDispatcher) DeliverNetworkPacket(tcpip.NetworkProtocolNumber, stack.PacketBufferPtr) {\n- t.networkPackets++\n+func (t *testNetworkDispatcher) DeliverNetworkPacket(proto tcpip.NetworkProtocolNumber, pb stack.PacketBufferPtr) {\n+ t.networkPackets = append(t.networkPackets, deliveredPacket{protocol: proto, packet: pb})\n}\n-func (*testNetworkDispatcher) DeliverLinkPacket(tcpip.NetworkProtocolNumber, stack.PacketBufferPtr, bool) {\n+func (*testNetworkDispatcher) DeliverLinkPacket(tcpip.NetworkProtocolNumber, stack.PacketBufferPtr) {\npanic(\"not implemented\")\n}\nfunc TestDeliverNetworkPacket(t *testing.T) {\n+\nconst (\nlinkAddr = tcpip.LinkAddress(\"\\x02\\x02\\x03\\x04\\x05\\x06\")\n- otherLinkAddr1 = tcpip.LinkAddress(\"\\x02\\x02\\x03\\x04\\x05\\x07\")\n- otherLinkAddr2 = tcpip.LinkAddress(\"\\x02\\x02\\x03\\x04\\x05\\x08\")\n+ otherLinkAddr = tcpip.LinkAddress(\"\\x02\\x02\\x03\\x04\\x05\\x07\")\n)\n+ for _, testCase := range []struct {\n+ name string\n+ dstAddr tcpip.LinkAddress\n+ pktType tcpip.PacketType\n+ }{\n+ {\n+ name: \"unicast\",\n+ dstAddr: linkAddr,\n+ pktType: tcpip.PacketHost,\n+ },\n+ {\n+ name: \"broadcast\",\n+ dstAddr: header.EthernetBroadcastAddress,\n+ pktType: tcpip.PacketBroadcast,\n+ },\n+ {\n+ name: \"multicast\",\n+ dstAddr: tcpip.LinkAddress(\"\\xFF\\x00\\x00\\x00\\x05\\x07\"),\n+ pktType: tcpip.PacketMulticast,\n+ },\n+ {\n+ name: \"other host\",\n+ dstAddr: tcpip.LinkAddress(\"\\x02\\x02\\x03\\x04\\x05\\x08\"),\n+ pktType: tcpip.PacketOtherHost,\n+ },\n+ } {\n+ t.Run(testCase.name, func(t *testing.T) {\n+\ne := ethernet.New(channel.New(0, 0, linkAddr))\nvar networkDispatcher testNetworkDispatcher\ne.Attach(&networkDispatcher)\n- if networkDispatcher.networkPackets != 0 {\n- t.Fatalf(\"got networkDispatcher.networkPackets = %d, want = 0\", networkDispatcher.networkPackets)\n+ if got, want := len(networkDispatcher.networkPackets), 0; got != want {\n+ t.Fatalf(\"got networkDispatcher.networkPackets = %d, want = %d\", got, want)\n}\n+ const networkProtocol = header.IPv4ProtocolNumber\n+\n// An ethernet frame with a destination link address that is not assigned to\n// our ethernet link endpoint should still be delivered to the network\n// dispatcher since the ethernet endpoint is not expected to filter frames.\neth := make([]byte, header.EthernetMinimumSize)\nheader.Ethernet(eth).Encode(&header.EthernetFields{\n- SrcAddr: otherLinkAddr1,\n- DstAddr: otherLinkAddr2,\n- Type: header.IPv4ProtocolNumber,\n+ SrcAddr: otherLinkAddr,\n+ DstAddr: testCase.dstAddr,\n+ Type: networkProtocol,\n})\np := stack.NewPacketBuffer(stack.PacketBufferOptions{Payload: bufferv2.MakeWithData(eth)})\ndefer p.DecRef()\ne.DeliverNetworkPacket(0, p)\n- if networkDispatcher.networkPackets != 1 {\n- t.Fatalf(\"got networkDispatcher.networkPackets = %d, want = 1\", networkDispatcher.networkPackets)\n+ if got, want := len(networkDispatcher.networkPackets), 1; got != want {\n+ t.Fatalf(\"got networkDispatcher.networkPackets = %d, want = %d\", got, want)\n+ }\n+ delivered := networkDispatcher.networkPackets[0]\n+ if diff := cmp.Diff(delivered.packet.LinkHeader().Slice(), eth); diff != \"\" {\n+ t.Errorf(\"LinkHeader mismatch (-want +got):\\n%s\", diff)\n+ }\n+ if got, want := delivered.protocol, networkProtocol; got != want {\n+ t.Errorf(\"got delivered.protocol = %d, want = %d\", got, want)\n+ }\n+ if got, want := delivered.packet.PktType, testCase.pktType; got != want {\n+ t.Errorf(\"got delivered.packet.PktType = %d, want = %d\", got, want)\n+ }\n+ })\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/fdbased/endpoint_test.go",
"new_path": "pkg/tcpip/link/fdbased/endpoint_test.go",
"diff": "@@ -137,7 +137,7 @@ func (c *context) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt\nc.ch <- packetInfo{protocol, pkt}\n}\n-func (c *context) DeliverLinkPacket(tcpip.NetworkProtocolNumber, stack.PacketBufferPtr, bool) {\n+func (c *context) DeliverLinkPacket(tcpip.NetworkProtocolNumber, stack.PacketBufferPtr) {\nc.t.Fatal(\"DeliverLinkPacket not implemented\")\n}\n@@ -575,7 +575,7 @@ func (d *fakeNetworkDispatcher) DeliverNetworkPacket(_ tcpip.NetworkProtocolNumb\nd.pkts = append(d.pkts, pkt)\n}\n-func (*fakeNetworkDispatcher) DeliverLinkPacket(tcpip.NetworkProtocolNumber, stack.PacketBufferPtr, bool) {\n+func (*fakeNetworkDispatcher) DeliverLinkPacket(tcpip.NetworkProtocolNumber, stack.PacketBufferPtr) {\npanic(\"not implemented\")\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/nested/nested.go",
"new_path": "pkg/tcpip/link/nested/nested.go",
"diff": "@@ -61,12 +61,12 @@ func (e *Endpoint) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pk\n}\n// DeliverLinkPacket implements stack.NetworkDispatcher.\n-func (e *Endpoint) DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr, incoming bool) {\n+func (e *Endpoint) DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr) {\ne.mu.RLock()\nd := e.dispatcher\ne.mu.RUnlock()\nif d != nil {\n- d.DeliverLinkPacket(protocol, pkt, incoming)\n+ d.DeliverLinkPacket(protocol, pkt)\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/nested/nested_test.go",
"new_path": "pkg/tcpip/link/nested/nested_test.go",
"diff": "@@ -57,7 +57,7 @@ func (d *counterDispatcher) DeliverNetworkPacket(tcpip.NetworkProtocolNumber, st\nd.count++\n}\n-func (*counterDispatcher) DeliverLinkPacket(tcpip.NetworkProtocolNumber, stack.PacketBufferPtr, bool) {\n+func (*counterDispatcher) DeliverLinkPacket(tcpip.NetworkProtocolNumber, stack.PacketBufferPtr) {\npanic(\"not implemented\")\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/packetsocket/packetsocket.go",
"new_path": "pkg/tcpip/link/packetsocket/packetsocket.go",
"diff": "@@ -41,7 +41,7 @@ func New(lower stack.LinkEndpoint) stack.LinkEndpoint {\n// DeliverNetworkPacket implements stack.NetworkDispatcher.\nfunc (e *endpoint) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr) {\n- e.Endpoint.DeliverLinkPacket(protocol, pkt, true /* incoming */)\n+ e.Endpoint.DeliverLinkPacket(protocol, pkt)\ne.Endpoint.DeliverNetworkPacket(protocol, pkt)\n}\n@@ -49,7 +49,7 @@ func (e *endpoint) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pk\n// WritePackets implements stack.LinkEndpoint.\nfunc (e *endpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Error) {\nfor _, pkt := range pkts.AsSlice() {\n- e.Endpoint.DeliverLinkPacket(pkt.NetworkProtocolNumber, pkt, false /* incoming */)\n+ e.Endpoint.DeliverLinkPacket(pkt.NetworkProtocolNumber, pkt)\n}\nreturn e.Endpoint.WritePackets(pkts)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/packetsocket/packetsocket_test.go",
"new_path": "pkg/tcpip/link/packetsocket/packetsocket_test.go",
"diff": "@@ -59,7 +59,6 @@ var _ stack.NetworkDispatcher = (*testNetworkDispatcher)(nil)\ntype linkPacketInfo struct {\npkt stack.PacketBufferPtr\nprotocol tcpip.NetworkProtocolNumber\n- incoming bool\n}\ntype networkPacketInfo struct {\n@@ -99,11 +98,10 @@ func (t *testNetworkDispatcher) DeliverNetworkPacket(protocol tcpip.NetworkProto\nt.networkPacket = networkPacket\n}\n-func (t *testNetworkDispatcher) DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr, incoming bool) {\n+func (t *testNetworkDispatcher) DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr) {\nlinkPacket := linkPacketInfo{\npkt: pkt.IncRef(),\nprotocol: protocol,\n- incoming: incoming,\n}\nif t.linkPacket != (linkPacketInfo{}) {\n@@ -128,6 +126,7 @@ func TestPacketDispatch(t *testing.T) {\npkt.NetworkProtocolNumber = protocol\n{\n+ pkt.PktType = tcpip.PacketOutgoing\nvar pkts stack.PacketBufferList\npkts.PushBack(pkt)\nif n, err := ep.WritePackets(pkts); err != nil {\n@@ -139,18 +138,19 @@ func TestPacketDispatch(t *testing.T) {\nif want := (networkPacketInfo{}); d.networkPacket != want {\nt.Errorf(\"got d.networkPacket = %#v, want = %#v\", d.networkPacket, want)\n}\n- if want := (linkPacketInfo{pkt: pkt, protocol: protocol, incoming: false}); d.linkPacket != want {\n+ if want := (linkPacketInfo{pkt: pkt, protocol: protocol}); d.linkPacket != want {\nt.Errorf(\"got d.linkPacket = %#v, want = %#v\", d.linkPacket, want)\n}\n}\nd.reset()\n{\n+ pkt.PktType = tcpip.PacketHost\nnullEP.disp.DeliverNetworkPacket(protocol, pkt)\nif want := (networkPacketInfo{pkt: pkt, protocol: protocol}); d.networkPacket != want {\nt.Errorf(\"got d.networkPacket = %#v, want = %#v\", d.networkPacket, want)\n}\n- if want := (linkPacketInfo{pkt: pkt, protocol: protocol, incoming: true}); d.linkPacket != want {\n+ if want := (linkPacketInfo{pkt: pkt, protocol: protocol}); d.linkPacket != want {\nt.Errorf(\"got d.linkPacket = %#v, want = %#v\", d.linkPacket, want)\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/sharedmem_test.go",
"new_path": "pkg/tcpip/link/sharedmem/sharedmem_test.go",
"diff": "@@ -154,7 +154,7 @@ func (c *testContext) DeliverNetworkPacket(proto tcpip.NetworkProtocolNumber, pk\nc.packetCh <- struct{}{}\n}\n-func (c *testContext) DeliverLinkPacket(tcpip.NetworkProtocolNumber, stack.PacketBufferPtr, bool) {\n+func (c *testContext) DeliverLinkPacket(tcpip.NetworkProtocolNumber, stack.PacketBufferPtr) {\nc.t.Fatal(\"DeliverLinkPacket not implemented\")\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/waitable/waitable.go",
"new_path": "pkg/tcpip/link/waitable/waitable.go",
"diff": "@@ -63,12 +63,12 @@ func (e *Endpoint) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pk\n}\n// DeliverLinkPacket implements stack.NetworkDispatcher.\n-func (e *Endpoint) DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr, incoming bool) {\n+func (e *Endpoint) DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr) {\nif !e.dispatchGate.Enter() {\nreturn\n}\n- e.dispatcher.DeliverLinkPacket(protocol, pkt, incoming)\n+ e.dispatcher.DeliverLinkPacket(protocol, pkt)\ne.dispatchGate.Leave()\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/waitable/waitable_test.go",
"new_path": "pkg/tcpip/link/waitable/waitable_test.go",
"diff": "@@ -43,7 +43,7 @@ func (e *countedEndpoint) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNum\ne.dispatchCount++\n}\n-func (*countedEndpoint) DeliverLinkPacket(tcpip.NetworkProtocolNumber, stack.PacketBufferPtr, bool) {\n+func (*countedEndpoint) DeliverLinkPacket(tcpip.NetworkProtocolNumber, stack.PacketBufferPtr) {\npanic(\"not implemented\")\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/nic.go",
"new_path": "pkg/tcpip/stack/nic.go",
"diff": "@@ -386,6 +386,8 @@ func (n *nic) writePacket(pkt PacketBufferPtr) tcpip.Error {\n}\nfunc (n *nic) writeRawPacket(pkt PacketBufferPtr) tcpip.Error {\n+ // Always an outgoing packet.\n+ pkt.PktType = tcpip.PacketOutgoing\nif err := n.qDisc.WritePacket(pkt); err != nil {\nif _, ok := err.(*tcpip.ErrNoBufferSpace); ok {\nn.stats.txPacketsDroppedNoBufferSpace.Increment()\n@@ -738,7 +740,7 @@ func (n *nic) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt Pac\nn.gro.dispatch(pkt, protocol, networkEndpoint)\n}\n-func (n *nic) DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt PacketBufferPtr, incoming bool) {\n+func (n *nic) DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt PacketBufferPtr) {\n// Deliver to interested packet endpoints without holding NIC lock.\nvar packetEPPkt PacketBufferPtr\ndefer func() {\n@@ -764,11 +766,13 @@ func (n *nic) DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt Packet\n// populate it in the packet buffer we provide to packet endpoints as\n// packet endpoints inspect link headers.\npacketEPPkt.LinkHeader().Consume(len(pkt.LinkHeader().Slice()))\n-\n- if incoming {\n+ packetEPPkt.PktType = pkt.PktType\n+ // Assume the packet is for us if the packet type is unset.\n+ // The packet type is set to PacketOutgoing when sending packets so\n+ // this may only be unset for incoming packets where link endpoints\n+ // have not set it.\n+ if packetEPPkt.PktType == 0 {\npacketEPPkt.PktType = tcpip.PacketHost\n- } else {\n- packetEPPkt.PktType = tcpip.PacketOutgoing\n}\n}\n@@ -785,7 +789,7 @@ func (n *nic) DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt Packet\nn.packetEPsMu.Unlock()\n// On Linux, only ETH_P_ALL endpoints get outbound packets.\n- if incoming && protoEPsOK {\n+ if pkt.PktType != tcpip.PacketOutgoing && protoEPsOK {\nprotoEPs.forEach(deliverPacketEPs)\n}\nif anyEPsOK {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/registration.go",
"new_path": "pkg/tcpip/stack/registration.go",
"diff": "@@ -1034,7 +1034,7 @@ type NetworkDispatcher interface {\n// This method should be called with both incoming and outgoing packets.\n//\n// If the link-layer has a header, the packet's link header must be populated.\n- DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt PacketBufferPtr, incoming bool)\n+ DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt PacketBufferPtr)\n}\n// LinkEndpointCapabilities is the type associated with the capabilities\n"
}
] | Go | Apache License 2.0 | google/gvisor | Set PktType for all Ethernet Packets
In preparation for improving ARP handling of gratuitous ARPs, set PktType in
regular inbound flow instead of only when handling packet sockets.
PiperOrigin-RevId: 501562322 |
259,853 | 12.01.2023 12:30:25 | 28,800 | aade3a56b5a5b7ce2adf1c7035a6d10bc538ec9d | Don't register async handlers for signalfd descriptors
This is a missing part of cl/483861398. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/file_description.go",
"new_path": "pkg/sentry/vfs/file_description.go",
"diff": "@@ -276,6 +276,7 @@ func (fd *FileDescription) SetStatusFlags(ctx context.Context, creds *auth.Crede\n// TODO(gvisor.dev/issue/1035): FileDescriptionImpl.SetOAsync()?\nconst settableFlags = linux.O_APPEND | linux.O_ASYNC | linux.O_DIRECT | linux.O_NOATIME | linux.O_NONBLOCK\nfd.flagsMu.Lock()\n+ defer fd.flagsMu.Unlock()\nif fd.asyncHandler != nil {\n// Use fd.statusFlags instead of oldFlags, which may have become outdated,\n// to avoid double registering/unregistering.\n@@ -288,7 +289,6 @@ func (fd *FileDescription) SetStatusFlags(ctx context.Context, creds *auth.Crede\n}\n}\nfd.statusFlags.Store((oldFlags &^ settableFlags) | (flags & settableFlags))\n- fd.flagsMu.Unlock()\nreturn nil\n}\n@@ -944,7 +944,7 @@ func (fd *FileDescription) SetAsyncHandler(newHandler func() FileAsync) (FileAsy\nif fd.asyncHandler == nil {\nfd.asyncHandler = newHandler()\nif fd.statusFlags.RacyLoad()&linux.O_ASYNC != 0 {\n- if err := fd.asyncHandler.Register(fd); err != nil {\n+ if err := fd.impl.RegisterFileAsyncHandler(fd); err != nil {\nreturn nil, err\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/fcntl.cc",
"new_path": "test/syscalls/linux/fcntl.cc",
"diff": "@@ -1510,6 +1510,25 @@ TEST_F(FcntlSignalTest, SignalFD) {\nsyscall(SYS_tkill, tid, SIGIO);\n}\n+TEST_F(FcntlSignalTest, SignalFDSetSigAfterASYNC) {\n+ // Create the signalfd.\n+ sigset_t mask;\n+ sigemptyset(&mask);\n+ sigaddset(&mask, SIGIO);\n+ FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(NewSignalFD(&mask, 0));\n+\n+ const auto signal_cleanup =\n+ ASSERT_NO_ERRNO_AND_VALUE(RegisterSignalHandler(SIGIO));\n+ ASSERT_THAT(fcntl(fd.get(), F_SETOWN, getpid()), SyscallSucceeds());\n+ int old_flags;\n+ ASSERT_THAT(old_flags = fcntl(fd.get(), F_GETFL), SyscallSucceeds());\n+ ASSERT_THAT(fcntl(fd.get(), F_SETFL, old_flags | O_ASYNC), SyscallSucceeds());\n+ ASSERT_THAT(fcntl(fd.get(), F_SETSIG, 0), SyscallSucceeds());\n+\n+ int tid = syscall(SYS_gettid);\n+ syscall(SYS_tkill, tid, SIGIO);\n+}\n+\nTEST_F(FcntlSignalTest, SetSigCustom) {\nconst auto signal_cleanup =\nASSERT_NO_ERRNO_AND_VALUE(RegisterSignalHandler(SIGUSR1));\n"
}
] | Go | Apache License 2.0 | google/gvisor | Don't register async handlers for signalfd descriptors
This is a missing part of cl/483861398.
PiperOrigin-RevId: 501636729 |
259,927 | 12.01.2023 13:05:02 | 28,800 | a6fe4d1d8f82c444722aac6d3ef6e317f3f1459c | Handle gratuitous ARP as unsolicited reply
A gratuitous ARP does not infer two-way connectivity and must not move neighbor
state to reachable. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/arp/arp.go",
"new_path": "pkg/tcpip/network/arp/arp.go",
"diff": "@@ -230,12 +230,11 @@ func (e *endpoint) HandlePacket(pkt stack.PacketBufferPtr) {\ne.dad.StopLocked(addr, &stack.DADDupAddrDetected{HolderLinkAddress: linkAddr})\ne.mu.Unlock()\n- // The solicited, override, and isRouter flags are not available for ARP;\n- // they are only available for IPv6 Neighbor Advertisements.\nswitch err := e.nic.HandleNeighborConfirmation(header.IPv4ProtocolNumber, addr, linkAddr, stack.ReachabilityConfirmationFlags{\n- // Solicited and unsolicited (also referred to as gratuitous) ARP Replies\n- // are handled equivalently to a solicited Neighbor Advertisement.\n- Solicited: true,\n+ // Only unicast ARP replies are considered solicited. Broadcast replies\n+ // are gratuitous ARP replies and should not move neighbor entries to the\n+ // reachable state.\n+ Solicited: pkt.PktType == tcpip.PacketHost,\n// If a different link address is received than the one cached, the entry\n// should always go to Stale.\nOverride: false,\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/arp/arp_test.go",
"new_path": "pkg/tcpip/network/arp/arp_test.go",
"diff": "@@ -420,6 +420,100 @@ func TestDirectRequest(t *testing.T) {\n}\n}\n+func TestReplyPacketType(t *testing.T) {\n+ for _, testCase := range []struct {\n+ name string\n+ packetType tcpip.PacketType\n+ becomesReachable bool\n+ }{\n+ {\n+ name: \"unicast\",\n+ packetType: tcpip.PacketHost,\n+ becomesReachable: true,\n+ },\n+ {\n+ name: \"broadcast\",\n+ packetType: tcpip.PacketBroadcast,\n+ becomesReachable: false,\n+ },\n+ } {\n+ t.Run(testCase.name, func(t *testing.T) {\n+ c := makeTestContext(t, 1, 1)\n+ defer c.cleanup()\n+\n+ // Inject an incoming ARP request first.\n+ v := make([]byte, header.ARPSize)\n+ h := header.ARP(v)\n+ h.SetIPv4OverEthernet()\n+ h.SetOp(header.ARPRequest)\n+ if got, want := copy(h.HardwareAddressSender(), remoteLinkAddr), header.EthernetAddressSize; got != want {\n+ t.Fatalf(\"got copy(_, _) = %d, want = %d\", got, want)\n+ }\n+ if got, want := copy(h.ProtocolAddressSender(), remoteAddr), header.IPv4AddressSize; got != want {\n+ t.Fatalf(\"got copy(_, _) = %d, want = %d\", got, want)\n+ }\n+ if got, want := copy(h.ProtocolAddressTarget(), stackAddr), header.IPv4AddressSize; got != want {\n+ t.Fatalf(\"got copy(_, _) = %d, want = %d\", got, want)\n+ }\n+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ Payload: bufferv2.MakeWithData(v),\n+ })\n+ pkt.PktType = tcpip.PacketBroadcast\n+ c.linkEP.InjectInbound(arp.ProtocolNumber, pkt)\n+ pkt.DecRef()\n+\n+ if got, ok := c.nudDisp.nextEvent(); ok {\n+ want := eventInfo{\n+ eventType: entryAdded,\n+ nicID: nicID,\n+ entry: stack.NeighborEntry{\n+ Addr: remoteAddr,\n+ LinkAddr: remoteLinkAddr,\n+ State: stack.Stale,\n+ },\n+ }\n+ if diff := cmp.Diff(want, got, cmp.AllowUnexported(eventInfo{}), cmpopts.IgnoreFields(stack.NeighborEntry{}, \"UpdatedAt\")); diff != \"\" {\n+ t.Errorf(\"got invalid event (-want +got):\\n%s\", diff)\n+ }\n+ } else {\n+ t.Fatal(\"event didn't arrive\")\n+ }\n+\n+ // Then inject replies with different packet types.\n+ h.SetIPv4OverEthernet()\n+ h.SetOp(header.ARPReply)\n+ pkt = stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ Payload: bufferv2.MakeWithData(v),\n+ })\n+ pkt.PktType = testCase.packetType\n+ c.linkEP.InjectInbound(arp.ProtocolNumber, pkt)\n+ pkt.DecRef()\n+\n+ got, ok := c.nudDisp.nextEvent()\n+ // If the entry doesn't become reachable we're not supposed to see a new\n+ // event.\n+ if got, want := ok, testCase.becomesReachable; got != want {\n+ t.Errorf(\"got c.nudDisp.nextEvent() = %t, want %t\", got, want)\n+ }\n+ if ok {\n+ want := eventInfo{\n+ eventType: entryChanged,\n+ nicID: nicID,\n+ entry: stack.NeighborEntry{\n+ Addr: remoteAddr,\n+ LinkAddr: remoteLinkAddr,\n+ State: stack.Reachable,\n+ },\n+ }\n+ if diff := cmp.Diff(want, got, cmp.AllowUnexported(eventInfo{}), cmpopts.IgnoreFields(stack.NeighborEntry{}, \"UpdatedAt\")); diff != \"\" {\n+ t.Errorf(\"got invalid event (-want +got):\\n%s\", diff)\n+ }\n+ }\n+ })\n+ }\n+\n+}\n+\nvar _ stack.LinkEndpoint = (*testLinkEndpoint)(nil)\ntype testLinkEndpoint struct {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Handle gratuitous ARP as unsolicited reply
A gratuitous ARP does not infer two-way connectivity and must not move neighbor
state to reachable.
PiperOrigin-RevId: 501645481 |
260,004 | 13.01.2023 12:55:48 | 28,800 | 12e13426bec35d05b29d6b0cd136dab3e0ea821a | Test sending multiple queued IGMP reports
This change updates TestSendQueuedIGMPReports to join multiple groups
when performing the test.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/igmp_test.go",
"new_path": "pkg/tcpip/network/ipv4/igmp_test.go",
"diff": "@@ -41,8 +41,9 @@ const (\nvar (\nstackAddr = testutil.MustParse4(\"10.0.0.1\")\nremoteAddr = testutil.MustParse4(\"10.0.0.2\")\n- multicastAddr = testutil.MustParse4(\"224.0.0.3\")\n- unusedMulticastAddr = testutil.MustParse4(\"224.0.0.4\")\n+ multicastAddr1 = testutil.MustParse4(\"224.0.0.3\")\n+ multicastAddr2 = testutil.MustParse4(\"224.0.0.4\")\n+ unusedMulticastAddr = testutil.MustParse4(\"224.0.0.5\")\n)\n// validateIgmpPacket checks that a passed packet is an IPv4 IGMP packet sent\n@@ -91,9 +92,9 @@ func (ctx igmpTestContext) cleanup() {\nfunc newIGMPTestContext(t *testing.T, igmpEnabled bool) igmpTestContext {\nt.Helper()\n- // Create an endpoint of queue size 1, since no more than 1 packets are ever\n+ // Create an endpoint of queue size 2, since no more than 2 packets are ever\n// queued in the tests in this file.\n- e := channel.New(1, 1280, linkAddr)\n+ e := channel.New(2, 1280, linkAddr)\nclock := faketime.NewManualClock()\ns := stack.New(stack.Options{\nNetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocolWithOptions(ipv4.Options{\n@@ -163,8 +164,8 @@ func TestIGMPV1Present(t *testing.T) {\nt.Fatalf(\"AddProtocolAddress(%d, %+v, {}): %s\", nicID, protocolAddr, err)\n}\n- if err := s.JoinGroup(ipv4.ProtocolNumber, nicID, multicastAddr); err != nil {\n- t.Fatalf(\"JoinGroup(ipv4, nic, %s) = %s\", multicastAddr, err)\n+ if err := s.JoinGroup(ipv4.ProtocolNumber, nicID, multicastAddr1); err != nil {\n+ t.Fatalf(\"JoinGroup(ipv4, nic, %s) = %s\", multicastAddr1, err)\n}\n// This NIC will send an IGMPv3 report immediately, before this test can get\n@@ -177,7 +178,7 @@ func TestIGMPV1Present(t *testing.T) {\nif got := s.Stats().IGMP.PacketsSent.V3MembershipReport.Value(); got != 1 {\nt.Fatalf(\"got V3MembershipReport messages sent = %d, want = 1\", got)\n}\n- validateIgmpv3ReportPacket(t, p, stackAddr, multicastAddr)\n+ validateIgmpv3ReportPacket(t, p, stackAddr, multicastAddr1)\np.DecRef()\n}\nif t.Failed() {\n@@ -187,7 +188,7 @@ func TestIGMPV1Present(t *testing.T) {\n// Inject an IGMPv1 General Membership Query which is identical to a standard\n// membership query except the Max Response Time is set to 0, which will tell\n// the stack that this is a router using IGMPv1.\n- createAndInjectIGMPPacket(e, header.IGMPMembershipQuery, 0, defaultTTL, remoteAddr, stackAddr, multicastAddr, true /* hasRouterAlertOption */)\n+ createAndInjectIGMPPacket(e, header.IGMPMembershipQuery, 0, defaultTTL, remoteAddr, stackAddr, multicastAddr1, true /* hasRouterAlertOption */)\nif got := s.Stats().IGMP.PacketsReceived.MembershipQuery.Value(); got != 1 {\nt.Fatalf(\"got Membership Queries received = %d, want = 1\", got)\n}\n@@ -212,7 +213,7 @@ func TestIGMPV1Present(t *testing.T) {\nif got := s.Stats().IGMP.PacketsSent.V1MembershipReport.Value(); got != 1 {\nt.Fatalf(\"got V1MembershipReport messages sent = %d, want = 1\", got)\n}\n- validateIgmpPacket(t, p, header.IGMPv1MembershipReport, 0, stackAddr, multicastAddr, multicastAddr)\n+ validateIgmpPacket(t, p, header.IGMPv1MembershipReport, 0, stackAddr, multicastAddr1, multicastAddr1)\np.DecRef()\n}\n@@ -231,7 +232,7 @@ func TestIGMPV1Present(t *testing.T) {\nif got := s.Stats().IGMP.PacketsSent.V3MembershipReport.Value(); got != 2 {\nt.Fatalf(\"got V3MembershipReport messages sent = %d, want = 2\", got)\n}\n- validateIgmpv3ReportPacket(t, p, stackAddr, multicastAddr)\n+ validateIgmpv3ReportPacket(t, p, stackAddr, multicastAddr1)\np.DecRef()\n}\n}\n@@ -240,23 +241,27 @@ func TestSendQueuedIGMPReports(t *testing.T) {\ntests := []struct {\nname string\nv2Compatibility bool\n- validate func(t *testing.T, pkt stack.PacketBufferPtr, localAddress tcpip.Address, groupAddress tcpip.Address)\n+ validate func(t *testing.T, e *channel.Endpoint, localAddress tcpip.Address, groupAddresses []tcpip.Address)\ncheckStats func(*testing.T, *stack.Stack, uint64, uint64, uint64)\n}{\n{\nname: \"V2 Compatibility\",\nv2Compatibility: true,\n- validate: func(t *testing.T, pkt stack.PacketBufferPtr, localAddress tcpip.Address, groupAddress tcpip.Address) {\n+ validate: func(t *testing.T, e *channel.Endpoint, localAddress tcpip.Address, groupAddresses []tcpip.Address) {\nt.Helper()\n- validateIgmpPacket(t, pkt, header.IGMPv2MembershipReport, 0 /* maxRespTime */, localAddress, groupAddress, groupAddress)\n+ iptestutil.ValidMultipleIGMPv2ReportLeaves(t, e, localAddress, groupAddresses, false /* leave */)\n},\ncheckStats: iptestutil.CheckIGMPv2Stats,\n},\n{\nname: \"V3\",\nv2Compatibility: false,\n- validate: validateIgmpv3ReportPacket,\n+ validate: func(t *testing.T, e *channel.Endpoint, localAddress tcpip.Address, groupAddresses []tcpip.Address) {\n+ t.Helper()\n+\n+ iptestutil.ValidateIGMPv3RecordsAcrossReports(t, e, localAddress, groupAddresses, header.IGMPv3ReportRecordChangeToExcludeMode)\n+ },\ncheckStats: iptestutil.CheckIGMPv3Stats,\n},\n}\n@@ -305,11 +310,14 @@ func TestSendQueuedIGMPReports(t *testing.T) {\nvar reportV2Counter uint64\ntest.checkStats(t, s, reportCounter, doneCounter, reportV2Counter)\n- // Joining a group without an assigned address should queue IGMP packets; none\n- // should be sent without an assigned address.\n+ // Joining groups without an assigned address should queue IGMP packets;\n+ // none should be sent without an assigned address.\n+ multicastAddrs := []tcpip.Address{multicastAddr1, multicastAddr2}\n+ for _, multicastAddr := range multicastAddrs {\nif err := s.JoinGroup(ipv4.ProtocolNumber, nicID, multicastAddr); err != nil {\nt.Fatalf(\"JoinGroup(%d, %d, %s): %s\", ipv4.ProtocolNumber, nicID, multicastAddr, err)\n}\n+ }\ntest.checkStats(t, s, reportCounter, doneCounter, reportV2Counter)\nif p := e.Read(); !p.IsNil() {\nt.Fatalf(\"got unexpected packet = %#v\", p)\n@@ -320,28 +328,19 @@ func TestSendQueuedIGMPReports(t *testing.T) {\nif err := s.AddProtocolAddress(nicID, protocolAddr, stack.AddressProperties{}); err != nil {\nt.Fatalf(\"AddProtocolAddress(%d, %+v, {}): %s\", nicID, protocolAddr, err)\n}\n- reportCounter++\n+\n+ // We expect two batches of reports to be sent (1 batch when the address\n+ // is assigned, and another after the maximum unsolicited report interval.\n+ for i := 0; i < 2; i++ {\n+ reportCounter += uint64(len(multicastAddrs))\ntest.checkStats(t, s, reportCounter, doneCounter, reportV2Counter)\n- if p := e.Read(); p.IsNil() {\n- t.Error(\"expected to send an IGMP membership report\")\n- } else {\n- test.validate(t, p, stackAddr, multicastAddr)\n- p.DecRef()\n- }\n+ test.validate(t, e, stackAddr, multicastAddrs)\n+\nif t.Failed() {\nt.FailNow()\n}\n+\nclock.Advance(ipv4.UnsolicitedReportIntervalMax)\n- reportCounter++\n- test.checkStats(t, s, reportCounter, doneCounter, reportV2Counter)\n- if p := e.Read(); p.IsNil() {\n- t.Error(\"expected to send an IGMP membership report\")\n- } else {\n- test.validate(t, p, stackAddr, multicastAddr)\n- p.DecRef()\n- }\n- if t.Failed() {\n- t.FailNow()\n}\n// Should have no more packets to send after the initial set of unsolicited\n"
}
] | Go | Apache License 2.0 | google/gvisor | Test sending multiple queued IGMP reports
This change updates TestSendQueuedIGMPReports to join multiple groups
when performing the test.
Updates #8346
PiperOrigin-RevId: 501918703 |
260,004 | 13.01.2023 13:58:05 | 28,800 | c76b03723e614707d105c3f3be8fcfe8250e2e74 | Coalesce records sent by the state changed timer
When the state changed timer fires, instead of sending a single record
per report, send as many records as possible per report message.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/internal/ip/generic_multicast_protocol.go",
"new_path": "pkg/tcpip/network/internal/ip/generic_multicast_protocol.go",
"diff": "@@ -520,6 +520,7 @@ func (g *GenericMulticastProtocolState) sendV2ReportAndMaybeScheduleChangedTimer\ng.protocolMU.Lock()\ndefer g.protocolMU.Unlock()\n+ reportBuilder := g.opts.Protocol.NewReportV2Builder()\nnonEmptyReport := false\nfor groupAddress, info := range g.memberships {\nif info.transmissionLeft == 0 || !g.shouldPerformForGroup(groupAddress) {\n@@ -529,15 +530,11 @@ func (g *GenericMulticastProtocolState) sendV2ReportAndMaybeScheduleChangedTimer\ninfo.transmissionLeft--\nnonEmptyReport = true\n- reportBuilder := g.opts.Protocol.NewReportV2Builder()\nmode := MulticastGroupProtocolV2ReportRecordChangeToExcludeMode\nif info.deleteScheduled {\nmode = MulticastGroupProtocolV2ReportRecordChangeToIncludeMode\n}\nreportBuilder.AddRecord(mode, groupAddress)\n- // Nothing meaningful we can do with the error here. We will retry\n- // sending a state changed report again anyways.\n- _, _ = reportBuilder.Send()\nif info.deleteScheduled && info.transmissionLeft == 0 {\n// No more transmissions left so we can actually delete the\n@@ -548,6 +545,10 @@ func (g *GenericMulticastProtocolState) sendV2ReportAndMaybeScheduleChangedTimer\n}\n}\n+ // Nothing meaningful we can do with the error here. We will retry\n+ // sending a state changed report again anyways.\n+ _, _ = reportBuilder.Send()\n+\nif nonEmptyReport {\ng.stateChangedReportV2Timer.Reset(g.calculateDelayTimerDuration(g.opts.MaxUnsolicitedReportDelay))\n} else {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/igmp_test.go",
"new_path": "pkg/tcpip/network/ipv4/igmp_test.go",
"diff": "@@ -332,7 +332,16 @@ func TestSendQueuedIGMPReports(t *testing.T) {\n// We expect two batches of reports to be sent (1 batch when the address\n// is assigned, and another after the maximum unsolicited report interval.\nfor i := 0; i < 2; i++ {\n+ // IGMPv2 always sends a single message per group.\n+ //\n+ // IGMPv3 sends a single message per group when we first get an\n+ // address assigned, but later reports (sent by the state changed\n+ // timer) coalesce records for groups.\n+ if test.v2Compatibility || i == 0 {\nreportCounter += uint64(len(multicastAddrs))\n+ } else {\n+ reportCounter++\n+ }\ntest.checkStats(t, s, reportCounter, doneCounter, reportV2Counter)\ntest.validate(t, e, stackAddr, multicastAddrs)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/mld_test.go",
"new_path": "pkg/tcpip/network/ipv6/mld_test.go",
"diff": "@@ -405,9 +405,16 @@ func TestSendQueuedMLDReports(t *testing.T) {\n// link-local address is assigned, and another after the maximum\n// unsolicited report interval.\nfor i := 0; i < 2; i++ {\n- // We expect reports to be sent (one for globalMulticastAddr and another\n- // for linkLocalAddrSNMC).\n+ // MLDv1 always sends a single message per group.\n+ //\n+ // MLDv2 sends a single message per group when we first get an\n+ // IPv6 link-local address assigned, but later reports (sent by\n+ // the state changed timer) coalesce records for groups.\n+ if subTest.v1Compatibility || i == 0 {\nreportCounter += maxReports\n+ } else {\n+ reportCounter++\n+ }\nsubTest.checkStats(t, s, reportCounter, doneCounter, reportV2Counter)\nsubTest.validate(\n"
}
] | Go | Apache License 2.0 | google/gvisor | Coalesce records sent by the state changed timer
When the state changed timer fires, instead of sending a single record
per report, send as many records as possible per report message.
Updates #8346
PiperOrigin-RevId: 501932735 |
260,004 | 13.01.2023 14:36:00 | 28,800 | a8900d549bbc3ea0d4002204f2cd5c372ba467b6 | Coalesce records for reports on MGP enable
This change sends multiple records into a single IGMPv3/MLDv2 report
instead of a single record per report when enabling IGMPv3/MLDv2.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/internal/ip/generic_multicast_protocol.go",
"new_path": "pkg/tcpip/network/internal/ip/generic_multicast_protocol.go",
"diff": "@@ -195,6 +195,8 @@ type MulticastGroupProtocolV2ReportBuilder interface {\n// Send sends the report.\n//\n+ // Does nothing if no records were added.\n+ //\n// It is invalid to use this builder after this method is called.\nSend() (sent bool, err tcpip.Error)\n}\n@@ -398,11 +400,42 @@ func (g *GenericMulticastProtocolState) InitializeGroupsLocked() {\nreturn\n}\n+ var v2ReportBuilder MulticastGroupProtocolV2ReportBuilder\n+ switch g.mode {\n+ case protocolModeV2:\n+ v2ReportBuilder = g.opts.Protocol.NewReportV2Builder()\n+ case protocolModeV1Compatibility:\n+ default:\n+ panic(fmt.Sprintf(\"unrecognized mode = %d\", g.mode))\n+ }\n+\n+ for groupAddress, info := range g.memberships {\n+ g.initializeNewMemberLocked(groupAddress, &info, v2ReportBuilder)\n+ g.memberships[groupAddress] = info\n+ }\n+\n+ if v2ReportBuilder == nil {\n+ return\n+ }\n+\n+ if sent, err := v2ReportBuilder.Send(); sent && err == nil {\n+ g.scheduleStateChangedTimer()\n+ } else {\n+ // Nothing meaningful we could do with the error here - the interface may\n+ // not yet have an address. This is okay because we would either schedule a\n+ // report to be sent later or we will be notified when an address is added,\n+ // at which point we will try to send messages again.\nfor groupAddress, info := range g.memberships {\n- g.initializeNewMemberLocked(groupAddress, &info)\n+ if !g.shouldPerformForGroup(groupAddress) {\n+ continue\n+ }\n+\n+ // Revert the transmissions count since we did not successfully send.\n+ info.transmissionLeft++\ng.memberships[groupAddress] = info\n}\n}\n+}\n// SendQueuedReportsLocked attempts to send reports for groups that failed to\n// send reports during their last attempt.\n@@ -482,7 +515,7 @@ func (g *GenericMulticastProtocolState) JoinGroupLocked(groupAddress tcpip.Addre\ninfo.clearQueriedIncludeSources()\ninfo.delayedReportJobFiresAt = time.Time{}\ninfo.lastToSendReport = false\n- g.initializeNewMemberLocked(groupAddress, &info)\n+ g.initializeNewMemberLocked(groupAddress, &info, nil /* callersV2ReportBuilder */)\ng.memberships[groupAddress] = info\n}\n@@ -494,7 +527,11 @@ func (g *GenericMulticastProtocolState) IsLocallyJoinedRLocked(groupAddress tcpi\nreturn ok && !info.deleteScheduled\n}\n-func (g *GenericMulticastProtocolState) sendV2ReportAndMaybeScheduleChangedTimer(groupAddress tcpip.Address, info *multicastGroupState, recordType MulticastGroupProtocolV2ReportRecordType) bool {\n+func (g *GenericMulticastProtocolState) sendV2ReportAndMaybeScheduleChangedTimer(\n+ groupAddress tcpip.Address,\n+ info *multicastGroupState,\n+ recordType MulticastGroupProtocolV2ReportRecordType,\n+) bool {\nif info.transmissionLeft == 0 {\nreturn false\n}\n@@ -510,7 +547,19 @@ func (g *GenericMulticastProtocolState) sendV2ReportAndMaybeScheduleChangedTimer\nsuccessfullySentAndHasMore = info.transmissionLeft != 0\n// Use the interface-wide state changed report for further transmissions.\n- if successfullySentAndHasMore && !g.stateChangedReportV2TimerSet {\n+ if successfullySentAndHasMore {\n+ g.scheduleStateChangedTimer()\n+ }\n+ }\n+\n+ return successfullySentAndHasMore\n+}\n+\n+func (g *GenericMulticastProtocolState) scheduleStateChangedTimer() {\n+ if g.stateChangedReportV2TimerSet {\n+ return\n+ }\n+\ndelay := g.calculateDelayTimerDuration(g.opts.MaxUnsolicitedReportDelay)\nif g.stateChangedReportV2Timer == nil {\n// TODO(https://issuetracker.google.com/264799098): Create timer on\n@@ -560,10 +609,6 @@ func (g *GenericMulticastProtocolState) sendV2ReportAndMaybeScheduleChangedTimer\n}\ng.stateChangedReportV2TimerSet = true\n}\n- }\n-\n- return successfullySentAndHasMore\n-}\n// LeaveGroupLocked handles leaving the group.\n//\n@@ -900,7 +945,7 @@ func (g *GenericMulticastProtocolState) HandleReportLocked(groupAddress tcpip.Ad\n// initializeNewMemberLocked initializes a new group membership.\n//\n// Precondition: g.protocolMU must be locked.\n-func (g *GenericMulticastProtocolState) initializeNewMemberLocked(groupAddress tcpip.Address, info *multicastGroupState) {\n+func (g *GenericMulticastProtocolState) initializeNewMemberLocked(groupAddress tcpip.Address, info *multicastGroupState, callersV2ReportBuilder MulticastGroupProtocolV2ReportBuilder) {\nif !g.shouldPerformForGroup(groupAddress) {\nreturn\n}\n@@ -910,7 +955,12 @@ func (g *GenericMulticastProtocolState) initializeNewMemberLocked(groupAddress t\nswitch g.mode {\ncase protocolModeV2:\ninfo.transmissionLeft = g.robustnessVariable\n+ if callersV2ReportBuilder == nil {\ng.sendV2ReportAndMaybeScheduleChangedTimer(groupAddress, info, MulticastGroupProtocolV2ReportRecordChangeToExcludeMode)\n+ } else {\n+ callersV2ReportBuilder.AddRecord(MulticastGroupProtocolV2ReportRecordChangeToExcludeMode, groupAddress)\n+ info.transmissionLeft--\n+ }\ncase protocolModeV1Compatibility:\ninfo.transmissionLeft = unsolicitedTransmissionCount\ng.maybeSendReportLocked(groupAddress, info)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/igmp.go",
"new_path": "pkg/tcpip/network/ipv4/igmp.go",
"diff": "@@ -174,6 +174,10 @@ func (b *igmpv3ReportBuilder) AddRecord(genericRecordType ip.MulticastGroupProto\n//\n// +checklocksread:b.igmp.ep.mu\nfunc (b *igmpv3ReportBuilder) Send() (sent bool, err tcpip.Error) {\n+ if len(b.records) == 0 {\n+ return false, err\n+ }\n+\noptions := header.IPv4OptionsSerializer{\n&header.IPv4SerializableRouterAlertOption{},\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/mld.go",
"new_path": "pkg/tcpip/network/ipv6/mld.go",
"diff": "@@ -134,6 +134,10 @@ func (b *mldv2ReportBuilder) AddRecord(genericRecordType ip.MulticastGroupProtoc\n// Send implements ip.MulticastGroupProtocolV2ReportBuilder.\nfunc (b *mldv2ReportBuilder) Send() (sent bool, err tcpip.Error) {\n+ if len(b.records) == 0 {\n+ return false, err\n+ }\n+\nextensionHeaders := header.IPv6ExtHdrSerializer{\nheader.IPv6SerializableHopByHopExtHdr{\n&header.IPv6RouterAlertOption{Value: header.IPv6RouterAlertMLD},\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/multicast_group_test.go",
"new_path": "pkg/tcpip/network/multicast_group_test.go",
"diff": "@@ -1434,7 +1434,7 @@ func TestMGPWithNICLifecycle(t *testing.T) {\nif err := s.EnableNIC(nicID); err != nil {\nt.Fatalf(\"EnableNIC(%d): %s\", nicID, err)\n}\n- reportV2Counter += uint64(len(test.multicastAddrs))\n+ reportV2Counter++\nsubTest.checkStats(t, s, reportCounter, leaveCounter, reportV2Counter)\ntest.validateReport(t, e, test.multicastAddrs)\nif t.Failed() {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Coalesce records for reports on MGP enable
This change sends multiple records into a single IGMPv3/MLDv2 report
instead of a single record per report when enabling IGMPv3/MLDv2.
Updates #8346
PiperOrigin-RevId: 501941751 |
260,004 | 13.01.2023 16:54:06 | 28,800 | d3894e4481012ea83ee7d1c9b87cca2c1425fdf0 | Populate IGMPv3 report checksum
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/checker/checker.go",
"new_path": "pkg/tcpip/checker/checker.go",
"diff": "@@ -1650,6 +1650,9 @@ func IGMPv3Report(expectedRecords map[tcpip.Address]header.IGMPv3ReportRecordTyp\n}\nreport := header.IGMPv3Report(igmp)\n+ if got, want := report.Checksum(), header.IGMPCalculateChecksum(igmp); got != want {\n+ t.Errorf(\"got report.Checksum() = %d, want = %d\", got, want)\n+ }\nrecords := report.GroupAddressRecords()\nfor len(expectedRecords) != 0 {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/header/igmp_test.go",
"new_path": "pkg/tcpip/header/igmp_test.go",
"diff": "@@ -336,6 +336,11 @@ func TestIGMPv3Report(t *testing.T) {\ntest.serializer.SerializeInto(b)\nreport := header.IGMPv3Report(b)\n+\n+ if got, want := report.Checksum(), header.IGMPCalculateChecksum(header.IGMP(report)); got != want {\n+ t.Errorf(\"got report.Checksum() = %d, want = %d\", got, want)\n+ }\n+\nexpectedRecords := test.serializer.Records\nrecords := report.GroupAddressRecords()\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/header/igmpv3.go",
"new_path": "pkg/tcpip/header/igmpv3.go",
"diff": "@@ -309,12 +309,13 @@ func (s *IGMPv3ReportSerializer) SerializeInto(b []byte) {\nb[igmpv3ReportReserved1Offset] = 0\nbinary.BigEndian.PutUint16(b[igmpv3ReportReserved2Offset:], 0)\nbinary.BigEndian.PutUint16(b[igmpv3ReportNumberOfGroupAddressRecordsOffset:], uint16(len(s.Records)))\n- b = b[igmpv3ReportGroupAddressRecordsOffset:]\n+ recordsBytes := b[igmpv3ReportGroupAddressRecordsOffset:]\nfor _, record := range s.Records {\nlen := record.Length()\n- record.SerializeInto(b[:len])\n- b = b[len:]\n+ record.SerializeInto(recordsBytes[:len])\n+ recordsBytes = recordsBytes[len:]\n}\n+ binary.BigEndian.PutUint16(b[igmpChecksumOffset:], IGMPCalculateChecksum(b))\n}\n// IGMPv3ReportGroupAddressRecord is an IGMPv3 record.\n@@ -436,6 +437,11 @@ func (r IGMPv3ReportGroupAddressRecord) Sources() (AddressIterator, bool) {\n// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\ntype IGMPv3Report []byte\n+// Checksum returns the checksum.\n+func (i IGMPv3Report) Checksum() uint16 {\n+ return binary.BigEndian.Uint16(i[igmpChecksumOffset:])\n+}\n+\n// IGMPv3ReportGroupAddressRecordIterator is an iterator over IGMPv3 Multicast\n// Address Records.\ntype IGMPv3ReportGroupAddressRecordIterator struct {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Populate IGMPv3 report checksum
Updates #8346
PiperOrigin-RevId: 501969927 |
259,975 | 17.01.2023 14:40:48 | 28,800 | 1ae11b17a9d7eb8124971f147aeb3c916ab7cf9d | Implement hostinet port forward
Implement support sandboxes using hostinet for their network stack. Included
is an implmentation of a connection servering a sandboxed process forwarding
to a socket on a local port. | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/portforward/BUILD",
"new_path": "runsc/boot/portforward/BUILD",
"diff": "@@ -7,13 +7,19 @@ go_library(\nsrcs = [\n\"portforward.go\",\n\"portforward_fd_rw.go\",\n+ \"portforward_hostinet.go\",\n],\ndeps = [\n+ \"//pkg/cleanup\",\n\"//pkg/context\",\n\"//pkg/errors/linuxerr\",\n+ \"//pkg/fd\",\n+ \"//pkg/fdnotifier\",\n+ \"//pkg/log\",\n\"//pkg/sentry/vfs\",\n\"//pkg/usermem\",\n\"//pkg/waiter\",\n+ \"@org_golang_x_sys//unix:go_default_library\",\n],\n)\n@@ -21,8 +27,13 @@ go_test(\nname = \"portforward_test\",\nsrcs = [\n\"portforward_fd_rw_test.go\",\n+ \"portforward_hostinet_test.go\",\n],\nlibrary = \":portforward\",\n+ tags = [\n+ \"requires-net:ipv4\",\n+ \"requires-net:loopback\",\n+ ],\ndeps = [\n\"//pkg/abi/linux\",\n\"//pkg/context\",\n@@ -31,5 +42,6 @@ go_test(\n\"//pkg/sentry/vfs\",\n\"//pkg/usermem\",\n\"//pkg/waiter\",\n+ \"@org_golang_x_sync//errgroup:go_default_library\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/portforward/portforward.go",
"new_path": "runsc/boot/portforward/portforward.go",
"diff": "// Package portforward holds the infrastructure to support the port forward command.\npackage portforward\n+\n+import (\n+ \"gvisor.dev/gvisor/pkg/context\"\n+)\n+\n+// portForwardConn is a port forwarding connection. It is used to manage the\n+// lifecycle of the connection and clean it up if necessary.\n+type portForwardConn interface {\n+ // start starts the connection goroutines and returns.\n+ start(ctx context.Context) error\n+ // close closes and cleans up the connection.\n+ close(ctx context.Context) error\n+ // cleanup registers a callback for when the connection closes.\n+ cleanup(func())\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/portforward/portforward_fd_rw_test.go",
"new_path": "runsc/boot/portforward/portforward_fd_rw_test.go",
"diff": "@@ -103,15 +103,12 @@ var _ vfs.FileDescriptionImpl = (*readerWriter)(nil)\n// Read implements vfs.FileDescriptionImpl.Read details for the parent mockFileDescription.\nfunc (rw *readerWriter) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.ReadOptions) (int64, error) {\nif rw.released {\n- return 0, nil\n- }\n- if rw.buf.Len() == 0 {\nreturn 0, io.EOF\n}\nbuf := make([]byte, dst.NumBytes())\n_, err := rw.buf.Read(buf)\nif err != nil {\n- return 0, err\n+ return 0, nil\n}\nn, err := dst.CopyOut(ctx, buf)\nreturn int64(n), err\n@@ -135,7 +132,9 @@ func (rw *readerWriter) EventRegister(we *waiter.Entry) error { return fmt.Error\nfunc (rw *readerWriter) EventUnregister(we *waiter.Entry) { panic(\"not implemented\") }\n// Release implements vfs.FileDescriptionImpl.Release details for the parent mockFileDescription.\n-func (rw *readerWriter) Release(context.Context) { rw.released = true }\n+func (rw *readerWriter) Release(context.Context) {\n+ rw.released = true\n+}\n// waiterRW implements mockFileDescriptionRWImpl. waiterRW works the same way as readerWriter above,\n// but it interleaves blocks in between Read and Write calls.\n@@ -168,7 +167,7 @@ func (w *waiterRW) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.Re\nw.waitMu.Lock()\ndefer w.waitMu.Unlock()\nif w.closed {\n- return 0, nil\n+ return 0, io.EOF\n}\nif w.shouldWait {\nreturn 0, linuxerr.ErrWouldBlock\n@@ -298,10 +297,16 @@ func TestReaderWriter(t *testing.T) {\ngot := []byte{}\nbuf := make([]byte, 4)\nfor {\n- _, err := readerWriter.Read(buf)\n+ n, err := readerWriter.Read(buf)\nif err == io.EOF {\nbreak\n}\n+ if err != nil {\n+ t.Fatalf(\"read failed: %v\", err)\n+ }\n+ if n == 0 {\n+ break\n+ }\ngot = append(got, buf...)\nbuf = buf[0:]\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/boot/portforward/portforward_hostinet.go",
"diff": "+// Copyright 2023 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package portforward\n+\n+import (\n+ \"fmt\"\n+ \"io\"\n+ \"sync\"\n+\n+ \"golang.org/x/sys/unix\"\n+ \"gvisor.dev/gvisor/pkg/cleanup\"\n+ \"gvisor.dev/gvisor/pkg/context\"\n+ fileDescriptor \"gvisor.dev/gvisor/pkg/fd\"\n+ \"gvisor.dev/gvisor/pkg/fdnotifier\"\n+ \"gvisor.dev/gvisor/pkg/log\"\n+ \"gvisor.dev/gvisor/pkg/sentry/vfs\"\n+ \"gvisor.dev/gvisor/pkg/waiter\"\n+)\n+\n+var (\n+ localHost = [4]byte{127, 0, 0, 1}\n+)\n+\n+// localHostSocket allows reading and writing to a local host socket for hostinet.\n+type localHostSocket struct {\n+ // wq is the WaitQueue registered with fdnotifier for this fd.\n+ wq waiter.Queue\n+ // fd is the file descriptor for the socket.\n+ fd *fileDescriptor.FD\n+}\n+\n+// newLocalHostSocket creates a hostSocket for an FD and registers the fd for\n+// notifications.\n+func newLocalHostSocket() (*localHostSocket, error) {\n+ // NOTE: Options must match sandbox seccomp filters. See filter/config.go\n+ fd, err := unix.Socket(unix.AF_INET, unix.SOCK_STREAM|unix.SOCK_NONBLOCK|unix.SOCK_CLOEXEC, 0)\n+ if err != nil {\n+ return nil, err\n+ }\n+ s := localHostSocket{\n+ fd: fileDescriptor.New(fd),\n+ }\n+ if err := fdnotifier.AddFD(int32(s.fd.FD()), &s.wq); err != nil {\n+ return nil, err\n+ }\n+ return &s, nil\n+}\n+\n+// Connect performs a blocking connect on the socket to an ipv4 address.\n+func (s *localHostSocket) Connect(port uint16) error {\n+ sockAddr := &unix.SockaddrInet4{\n+ Addr: localHost,\n+ Port: int(port),\n+ }\n+\n+ if err := unix.Connect(s.fd.FD(), sockAddr); err != nil {\n+ if err != unix.EINPROGRESS {\n+ return err\n+ }\n+\n+ // Connect is in progress. Wait for the socket to be writable.\n+ mask := waiter.WritableEvents\n+ waitEntry, notifyCh := waiter.NewChannelEntry(mask)\n+ s.eventRegister(&waitEntry)\n+ defer s.eventUnregister(&waitEntry)\n+\n+ // Wait for connect to succeed.\n+ // Check the current socket state and if not ready, wait for the event.\n+ if fdnotifier.NonBlockingPoll(int32(s.fd.FD()), mask)&mask == 0 {\n+ <-notifyCh\n+ }\n+\n+ // Call getsockopt to get the connection result.\n+ val, err := unix.GetsockoptInt(s.fd.FD(), unix.SOL_SOCKET, unix.SO_ERROR)\n+ if err != nil {\n+ return nil\n+ }\n+ if val != 0 {\n+ return unix.Errno(val)\n+ }\n+ }\n+\n+ return nil\n+}\n+\n+// Read implements io.Reader.Read. It performs a blocking read on the fd.\n+func (s *localHostSocket) Read(buf []byte) (int, error) {\n+ var ch chan struct{}\n+ var e waiter.Entry\n+ n, err := s.fd.Read(buf)\n+ for err == unix.EWOULDBLOCK {\n+ if ch == nil {\n+ e, ch = waiter.NewChannelEntry(waiter.ReadableEvents | waiter.WritableEvents | waiter.EventHUp | waiter.EventErr)\n+ // Register for when the endpoint is writable or disconnected.\n+ s.eventRegister(&e)\n+ defer s.eventUnregister(&e)\n+ }\n+ <-ch\n+ n, err = s.fd.Read(buf)\n+ }\n+ return n, err\n+}\n+\n+// Write implements io.Writer.Write. It performs a blocking write on the fd.\n+func (s *localHostSocket) Write(buf []byte) (int, error) {\n+ var ch chan struct{}\n+ var e waiter.Entry\n+ n, err := s.fd.Write(buf)\n+ for err == unix.EWOULDBLOCK {\n+ if ch == nil {\n+ e, ch = waiter.NewChannelEntry(waiter.WritableEvents | waiter.EventHUp | waiter.EventErr)\n+ // Register for when the endpoint is writable or disconnected.\n+ s.eventRegister(&e)\n+ defer s.eventUnregister(&e)\n+ }\n+ <-ch\n+ n, err = s.fd.Write(buf)\n+ }\n+ return n, err\n+}\n+\n+func (s *localHostSocket) eventRegister(e *waiter.Entry) {\n+ s.wq.EventRegister(e)\n+ fdnotifier.UpdateFD(int32(s.fd.FD()))\n+}\n+\n+func (s *localHostSocket) eventUnregister(e *waiter.Entry) {\n+ s.wq.EventUnregister(e)\n+ fdnotifier.UpdateFD(int32(s.fd.FD()))\n+}\n+\n+// Close closes the host socket and removes it from notifications.\n+func (s *localHostSocket) Close() {\n+ fdnotifier.RemoveFD(int32(s.fd.FD()))\n+ s.fd.Close()\n+}\n+\n+// hostinetportForwardConn is a hostinet port forwarding connection.\n+type hostinetPortForwardConn struct {\n+ // cid is the container id that this connection is connecting to.\n+ cid string\n+\n+ // Socket is the host socket connected to the application.\n+ socket *localHostSocket\n+ // fd is the FileDescription for the imported host UDS fd.\n+ fd *vfs.FileDescription\n+\n+ // status holds the status of the connection.\n+ status struct {\n+ sync.Mutex\n+ // started indicates if the connection is started or not.\n+ started bool\n+ // closed indicates if the connection is closed or not.\n+ closed bool\n+ }\n+\n+ // toDone is closed when the copy to the application port is finished.\n+ toDone chan struct{}\n+\n+ // fromDone is closed when the copy from the application socket is finished.\n+ fromDone chan struct{}\n+\n+ // cu is called when the connection finishes.\n+ cu cleanup.Cleanup\n+}\n+\n+// newHostinetPortForward starts port forwarding to the given port in hostinet\n+// mode.\n+func newHostinetPortForward(ctx context.Context, cid string, fd *vfs.FileDescription, port uint16) (portForwardConn, error) {\n+ log.Debugf(\"Handling hostinet port forwarding request for %s on port %d\", cid, port)\n+ appSocket, err := newLocalHostSocket()\n+ if err != nil {\n+ return nil, fmt.Errorf(\"hostinet socket: %w\", err)\n+ }\n+\n+ cu := cleanup.Make(func() { appSocket.Close() })\n+ defer cu.Clean()\n+\n+ if err := appSocket.Connect(port); err != nil {\n+ return nil, fmt.Errorf(\"hostinet connect: %w\", err)\n+ }\n+\n+ pfConn := hostinetPortForwardConn{\n+ cid: cid,\n+ socket: appSocket,\n+ fd: fd,\n+ toDone: make(chan struct{}),\n+ fromDone: make(chan struct{}),\n+ cu: cleanup.Cleanup{},\n+ }\n+\n+ cu.Release()\n+ return &pfConn, nil\n+}\n+\n+// Start implements portForwardConn.start.\n+func (c *hostinetPortForwardConn) start(ctx context.Context) error {\n+ c.status.Lock()\n+ defer c.status.Unlock()\n+\n+ if c.status.closed {\n+ return fmt.Errorf(\"already closed\")\n+ }\n+ if c.status.started {\n+ return fmt.Errorf(\"already started\")\n+ }\n+\n+ log.Debugf(\"Start forwarding to/from container %q and localhost\", c.cid)\n+\n+ importedRW := &fileDescriptionReadWriter{\n+ file: c.fd,\n+ }\n+\n+ go func() {\n+ _, _ = io.Copy(c.socket, importedRW)\n+ // Indicate that this goroutine has completed.\n+ close(c.toDone)\n+ // Make sure to clean up when one half of the copy has finished.\n+ c.close(ctx)\n+ }()\n+ go func() {\n+ _, _ = io.Copy(importedRW, c.socket)\n+ // Indicate that this goroutine has completed.\n+ close(c.fromDone)\n+ // Make sure to clean up when one half of the copy has finished.\n+ c.close(ctx)\n+ }()\n+\n+ c.status.started = true\n+\n+ return nil\n+}\n+\n+// close implements portForwardConn.close.\n+func (c *hostinetPortForwardConn) close(ctx context.Context) error {\n+ c.status.Lock()\n+\n+ // This should be a no op if the connection is already closed.\n+ if c.status.closed {\n+ c.status.Unlock()\n+ return nil\n+ }\n+\n+ log.Debugf(\"Stopping forwarding to/from container %q and localhost...\", c.cid)\n+\n+ // Closing the FileDescription and endpoint should make all\n+ // goroutines exit.\n+ c.fd.DecRef(ctx)\n+ c.socket.Close()\n+\n+ // Wait for one goroutine to finish or for a save event.\n+ <-c.toDone\n+ log.Debugf(\"Stopped forwarding one-half of copy for %q\", c.cid)\n+\n+ // Wait on the other goroutine.\n+ <-c.fromDone\n+ log.Debugf(\"Stopped forwarding to/from container %q and localhost\", c.cid)\n+\n+ c.status.closed = true\n+\n+ c.status.Unlock()\n+\n+ // Call the cleanup object.\n+ c.cu.Clean()\n+\n+ return nil\n+}\n+\n+// cleanup implements portForwardConn.cleanup.\n+func (c *hostinetPortForwardConn) cleanup(f func()) {\n+ c.cu.Add(f)\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/boot/portforward/portforward_hostinet_test.go",
"diff": "+// Copyright 2023 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package portforward\n+\n+import (\n+ \"bytes\"\n+ \"fmt\"\n+ \"io\"\n+ \"net\"\n+ \"reflect\"\n+ \"sync\"\n+ \"testing\"\n+\n+ \"golang.org/x/sync/errgroup\"\n+ \"gvisor.dev/gvisor/pkg/context\"\n+ \"gvisor.dev/gvisor/pkg/errors/linuxerr\"\n+ \"gvisor.dev/gvisor/pkg/sentry/contexttest\"\n+ \"gvisor.dev/gvisor/pkg/sentry/vfs\"\n+ \"gvisor.dev/gvisor/pkg/usermem\"\n+ \"gvisor.dev/gvisor/pkg/waiter\"\n+)\n+\n+func TestLocalHostSocket(t *testing.T) {\n+\n+ clientData := append(\n+ []byte(\"do what must be done\\n\"),\n+ []byte(\"do not hesitate\\n\")...,\n+ )\n+\n+ serverData := append(\n+ []byte(\"commander cody...the time has come\\n\"),\n+ []byte(\"execute order 66\\n\")...,\n+ )\n+\n+ l, err := net.Listen(\"tcp\", \":0\")\n+ if err != nil {\n+ t.Fatalf(\"net.Listen failed: %v\", err)\n+ }\n+ defer l.Close()\n+\n+ port := l.Addr().(*net.TCPAddr).Port\n+\n+ var g errgroup.Group\n+\n+ g.Go(func() error {\n+ conn, err := l.Accept()\n+ if err != nil {\n+ return fmt.Errorf(\"could not accept connection: %v\", err)\n+ }\n+ defer conn.Close()\n+\n+ data := make([]byte, 1024)\n+ recLen, err := conn.Read(data)\n+ if err != nil {\n+ return fmt.Errorf(\"could not read data: %v\", err)\n+ }\n+\n+ if !reflect.DeepEqual(data[:recLen], clientData) {\n+ return fmt.Errorf(\"server mismatch data recieved: got: %s want: %s\", data[:recLen], clientData)\n+ }\n+\n+ sentLen, err := conn.Write(serverData)\n+ if err != nil {\n+ return fmt.Errorf(\"could not write data: %v\", err)\n+ }\n+\n+ if sentLen != len(serverData) {\n+ return fmt.Errorf(\"server mismatch data sent: got: %d want: %d\", sentLen, len(serverData))\n+ }\n+\n+ return nil\n+ })\n+\n+ g.Go(func() error {\n+ sock, err := newLocalHostSocket()\n+ if err != nil {\n+ return fmt.Errorf(\"could not create local host socket: %v\", err)\n+ }\n+ defer sock.Close()\n+ if err := sock.Connect(uint16(port)); err != nil {\n+ return fmt.Errorf(\"could not connect to local host socket: %v\", err)\n+ }\n+ for i := 0; i < len(clientData); {\n+ n, err := sock.Write(clientData[i:])\n+ if err != nil {\n+ return fmt.Errorf(\"could not write to local host socket: %v\", err)\n+ }\n+ i += n\n+ }\n+\n+ data := make([]byte, 1024)\n+ dataLen := 0\n+ for dataLen < len(serverData) {\n+ n, err := sock.Read(data[dataLen:])\n+ if err != nil {\n+ t.Fatalf(\"could not read from local host socket: %v\", err)\n+ }\n+ dataLen += n\n+ }\n+\n+ if !reflect.DeepEqual(data[:dataLen], serverData) {\n+ return fmt.Errorf(\"server mismatch data received: got: %s want: %s\", data[:dataLen], clientData)\n+ }\n+ return nil\n+ })\n+\n+ if err := g.Wait(); err != nil {\n+ t.Fatal(err)\n+ }\n+}\n+\n+func newMockSocketPair() (*mockEndpoint, *mockEndpoint) {\n+ client := &mockEndpoint{}\n+ server := &mockEndpoint{other: client}\n+ client.other = server\n+ return client, server\n+}\n+\n+type mockEndpoint struct {\n+ vfs.FileDescriptionDefaultImpl\n+ vfs.NoLockFD\n+ vfs.DentryMetadataFileDescriptionImpl\n+ other *mockEndpoint\n+ readBuf bytes.Buffer\n+ mu sync.Mutex\n+ released bool\n+ queue waiter.Queue\n+}\n+\n+var _ vfs.FileDescriptionImpl = (*mockEndpoint)(nil)\n+\n+// Read implements vfs.FileDescriptionImpl.Read details for the parent mockFileDescription.\n+func (s *mockEndpoint) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.ReadOptions) (int64, error) {\n+ s.mu.Lock()\n+ defer s.mu.Unlock()\n+ if s.released {\n+ return 0, io.EOF\n+ }\n+ if s.readBuf.Len() == 0 {\n+ return 0, linuxerr.ErrWouldBlock\n+ }\n+ buf := s.readBuf.Next(s.readBuf.Len())\n+ n, err := dst.CopyOut(ctx, buf)\n+ s.queue.Notify(waiter.WritableEvents)\n+ return int64(n), err\n+}\n+\n+// Write implements vfs.FileDescriptionImpl.Write details for the parent mockFileDescription.\n+func (s *mockEndpoint) Write(ctx context.Context, src usermem.IOSequence, opts vfs.WriteOptions) (int64, error) {\n+ return s.other.write(ctx, src, opts)\n+}\n+\n+func (s *mockEndpoint) write(ctx context.Context, src usermem.IOSequence, opts vfs.WriteOptions) (int64, error) {\n+ s.mu.Lock()\n+ defer s.mu.Unlock()\n+ if s.released {\n+ return 0, io.EOF\n+ }\n+ buf := make([]byte, src.NumBytes())\n+ n, err := src.CopyIn(ctx, buf)\n+ if err != nil {\n+ return 0, err\n+ }\n+ n, err = s.readBuf.Write(buf[:n])\n+ s.queue.Notify(waiter.ReadableEvents)\n+ return int64(n), err\n+}\n+\n+func (s *mockEndpoint) IsReadable() bool {\n+ s.mu.Lock()\n+ defer s.mu.Unlock()\n+ if s.released {\n+ return false\n+ }\n+ return s.readBuf.Len() > 0\n+}\n+\n+func (s *mockEndpoint) IsWritable() bool {\n+ return s.other.isWritable()\n+}\n+\n+func (s *mockEndpoint) isWritable() bool {\n+ s.mu.Lock()\n+ defer s.mu.Unlock()\n+ return !s.released\n+}\n+\n+// EventRegister implements vfs.FileDescriptionImpl.EventRegister details for the parent mockFileDescription.\n+func (s *mockEndpoint) EventRegister(we *waiter.Entry) error {\n+ s.mu.Lock()\n+ defer s.mu.Unlock()\n+ s.queue.EventRegister(we)\n+ return nil\n+}\n+\n+// EventUnregister implements vfs.FileDescriptionImpl.Unregister details for the parent mockFileDescription.\n+func (s *mockEndpoint) EventUnregister(we *waiter.Entry) {\n+ s.mu.Lock()\n+ defer s.mu.Unlock()\n+ s.queue.EventUnregister(we)\n+}\n+\n+// Release implements vfs.FileDescriptionImpl.Release details for the parent mockFileDescription.\n+func (s *mockEndpoint) Release(context.Context) {\n+ s.mu.Lock()\n+ defer s.mu.Unlock()\n+ s.queue.Notify(waiter.ReadableEvents)\n+ s.released = true\n+}\n+\n+var responses = map[string]string{\n+ \"PING\": \"PONG\",\n+ \"DING\": \"DONG\",\n+ \"TING\": \"TONG\",\n+}\n+\n+func TestHostinetPortForwardConn(t *testing.T) {\n+ ctx := contexttest.Context(t)\n+ clientSock, server := newMockSocketPair()\n+ defer server.Release(ctx)\n+ client, err := newMockFileDescription(ctx, clientSock)\n+ if err != nil {\n+ t.Fatalf(\"newMockFileDescription failed: %v\", err)\n+ }\n+ l, err := net.Listen(\"tcp\", \":0\")\n+ if err != nil {\n+ t.Fatalf(\"net.Listen failed: %v\", err)\n+ }\n+ defer l.Close()\n+ port := l.Addr().(*net.TCPAddr).Port\n+ portForwardConn, err := newHostinetPortForward(ctx, \"\", client, uint16(port))\n+ if err != nil {\n+ t.Fatalf(\"newHostinetPortForward failed: %v\", err)\n+ }\n+ if err := portForwardConn.start(ctx); err != nil {\n+ t.Fatalf(\"portForwardConn.start failed: %v\", err)\n+ }\n+ conn, err := l.Accept()\n+ if err != nil {\n+ t.Fatalf(\"l.Accept failed: %v\", err)\n+ }\n+ defer conn.Close()\n+ buf := make([]byte, 4)\n+ for req, resp := range responses {\n+\n+ for {\n+ if server.IsWritable() {\n+ break\n+ }\n+ }\n+ _, err := server.Write(ctx, usermem.BytesIOSequence([]byte(req)), vfs.WriteOptions{})\n+ if err != nil {\n+ t.Fatalf(\"file.Write failed: %v\", err)\n+ }\n+\n+ read := 0\n+ for {\n+ n, err := conn.Read([]byte(buf)[read:])\n+ if err != nil && !linuxerr.Equals(linuxerr.ErrWouldBlock, err) {\n+ t.Fatalf(\"conn.Write failed: %v\", err)\n+ }\n+ read += n\n+ if read >= len(resp) {\n+ break\n+ }\n+ }\n+\n+ if string(buf) != req {\n+ t.Fatalf(\"read mismatch: got: %s want: %s\", string(buf), req)\n+ }\n+\n+ written := 0\n+ for i := 0; i < 4; i++ {\n+ n, err := conn.Write([]byte(resp)[written:])\n+ if err != nil && !linuxerr.Equals(linuxerr.ErrWouldBlock, err) {\n+ t.Fatalf(\"conn.Write failed: %v\", err)\n+ }\n+ written += n\n+ if written >= len(resp) {\n+ break\n+ }\n+ }\n+\n+ for {\n+ if server.IsReadable() {\n+ break\n+ }\n+ }\n+\n+ _, err = server.Read(ctx, usermem.BytesIOSequence([]byte(buf[:4])), vfs.ReadOptions{})\n+ if err != nil && !linuxerr.Equals(linuxerr.ErrWouldBlock, err) {\n+ t.Fatalf(\"file.Read failed: %v\", err)\n+ }\n+\n+ if string(buf) != resp {\n+ t.Fatalf(\"write mismatch: got: %s want: %s\", string(buf), resp)\n+ }\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Implement hostinet port forward
Implement support sandboxes using hostinet for their network stack. Included
is an implmentation of a connection servering a sandboxed process forwarding
to a socket on a local port.
PiperOrigin-RevId: 502692640 |
259,909 | 17.01.2023 15:05:17 | 28,800 | d3c86781abdff20e4d6675d93268f9befca028a4 | Fix flaky link resolution test.
mointorableEndpoint was flaking because of a race between the
pending packet from the failed write and the new write. We don't
actually need the new write to show that link resolution worked. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tests/integration/link_resolution_test.go",
"new_path": "pkg/tcpip/tests/integration/link_resolution_test.go",
"diff": "@@ -1828,7 +1828,7 @@ func TestUpdateCachedNeighborEntry(t *testing.T) {\n// Send packet to neighbor (start link resolution & resolve, then send\n// packet). Send twice to use cached address the second time.\nfor i := 0; i < 2; i++ {\n- writePacket(t, r)\n+ go writePacket(t, r)\nif err := host1NICMonitorable.waitForLinkAddress(utils.LinkAddr2, time.Second); err != nil {\nt.Fatalf(\"host1NIC.waitForLinkAddress(%s): %s\", utils.LinkAddr2, err)\n}\n@@ -1838,8 +1838,8 @@ func TestUpdateCachedNeighborEntry(t *testing.T) {\nhost1Stack.RemoveNeighbor(host1NICID, header.IPv4ProtocolNumber, neighborAddr)\nhost2Stack.DisableNIC(host2NICID)\n- // Send packet to neighbor that's no longer reachable (should fail).\n- writePacket(t, r)\n+ // Send a packet to the neighbor that's no longer reachable (should fail).\n+ go writePacket(t, r)\nif err := host1NICMonitorable.waitForLinkAddress(utils.LinkAddr2, time.Second); err == nil {\nt.Fatalf(\"got host1NIC.waitForLinkAddress(%s) = nil, want err\", utils.LinkAddr2)\n}\n@@ -1848,8 +1848,7 @@ func TestUpdateCachedNeighborEntry(t *testing.T) {\nhost2Stack.EnableNIC(host2NICID)\nhost2NICSettable.setLinkAddress(utils.LinkAddr3)\n- // Send packet to neighbor (start link resolution and then send packet).\n- writePacket(t, r)\n+ // Pending packet should eventually reach the new neighbor.\nif err := host1NICMonitorable.waitForLinkAddress(utils.LinkAddr3, 5*time.Second); err != nil {\nt.Fatalf(\"host1NIC.waitForLinkAddress(%s): %s\", utils.LinkAddr3, err)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix flaky link resolution test.
mointorableEndpoint was flaking because of a race between the
pending packet from the failed write and the new write. We don't
actually need the new write to show that link resolution worked.
PiperOrigin-RevId: 502699348 |
259,909 | 17.01.2023 15:59:04 | 28,800 | 3c93bb10405f2d2e42ccfe8e65ae63c9857879ee | Defer kernfs openflag handling to inode implementations.
Some implementations handle more flags than others, so it doesn't
make sense to have one set of rules for all.
This change should functionally be a no-op. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/cgroupfs/cgroupfs.go",
"new_path": "pkg/sentry/fsimpl/cgroupfs/cgroupfs.go",
"diff": "@@ -523,6 +523,8 @@ func (d *dir) SetStat(ctx context.Context, fs *vfs.Filesystem, creds *auth.Crede\n// Open implements kernfs.Inode.Open.\nfunc (d *dir) Open(ctx context.Context, rp *vfs.ResolvingPath, kd *kernfs.Dentry, opts vfs.OpenOptions) (*vfs.FileDescription, error) {\n+ opts.Flags &= linux.O_ACCMODE | linux.O_CREAT | linux.O_EXCL | linux.O_TRUNC |\n+ linux.O_DIRECTORY | linux.O_NOFOLLOW | linux.O_NONBLOCK | linux.O_NOCTTY\nfd, err := kernfs.NewGenericDirectoryFD(rp.Mount(), kd, &d.OrderedChildren, &d.locks, &opts, kernfs.GenericDirectoryFDOptions{\nSeekEnd: kernfs.SeekEndStaticEntries,\n})\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/devpts/devpts.go",
"new_path": "pkg/sentry/fsimpl/devpts/devpts.go",
"diff": "@@ -222,6 +222,8 @@ func (i *rootInode) masterClose(ctx context.Context, t *Terminal) {\n// Open implements kernfs.Inode.Open.\nfunc (i *rootInode) Open(ctx context.Context, rp *vfs.ResolvingPath, d *kernfs.Dentry, opts vfs.OpenOptions) (*vfs.FileDescription, error) {\n+ opts.Flags &= linux.O_ACCMODE | linux.O_CREAT | linux.O_EXCL | linux.O_TRUNC |\n+ linux.O_DIRECTORY | linux.O_NOFOLLOW | linux.O_NONBLOCK | linux.O_NOCTTY\nfd, err := kernfs.NewGenericDirectoryFD(rp.Mount(), d, &i.OrderedChildren, &i.locks, &opts, kernfs.GenericDirectoryFDOptions{\nSeekEnd: kernfs.SeekEndStaticEntries,\n})\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/fuse/fusefs.go",
"new_path": "pkg/sentry/fsimpl/fuse/fusefs.go",
"diff": "@@ -392,6 +392,9 @@ func (i *inode) CheckPermissions(ctx context.Context, creds *auth.Credentials, a\n// Open implements kernfs.Inode.Open.\nfunc (i *inode) Open(ctx context.Context, rp *vfs.ResolvingPath, d *kernfs.Dentry, opts vfs.OpenOptions) (*vfs.FileDescription, error) {\n+ opts.Flags &= linux.O_ACCMODE | linux.O_CREAT | linux.O_EXCL | linux.O_TRUNC |\n+ linux.O_DIRECTORY | linux.O_NOFOLLOW | linux.O_NONBLOCK | linux.O_NOCTTY |\n+ linux.O_APPEND\nisDir := i.InodeAttrs.Mode().IsDir()\n// return error if specified to open directory but inode is not a directory.\nif !isDir && opts.Mode.IsDir() {\n@@ -520,6 +523,9 @@ func (*inode) IterDirents(ctx context.Context, mnt *vfs.Mount, callback vfs.Iter\n// NewFile implements kernfs.Inode.NewFile.\nfunc (i *inode) NewFile(ctx context.Context, name string, opts vfs.OpenOptions) (kernfs.Inode, error) {\n+ opts.Flags &= linux.O_ACCMODE | linux.O_CREAT | linux.O_EXCL | linux.O_TRUNC |\n+ linux.O_DIRECTORY | linux.O_NOFOLLOW | linux.O_NONBLOCK | linux.O_NOCTTY |\n+ linux.O_APPEND\nkernelTask := kernel.TaskFromContext(ctx)\nif kernelTask == nil {\nlog.Warningf(\"fusefs.Inode.NewFile: couldn't get kernel task from context\", i.nodeID)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/kernfs/filesystem.go",
"new_path": "pkg/sentry/fsimpl/kernfs/filesystem.go",
"diff": "@@ -479,12 +479,6 @@ func (fs *Filesystem) MknodAt(ctx context.Context, rp *vfs.ResolvingPath, opts v\n// OpenAt implements vfs.FilesystemImpl.OpenAt.\nfunc (fs *Filesystem) OpenAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.OpenOptions) (*vfs.FileDescription, error) {\n- // Filter out flags that are not supported by kernfs. O_DIRECTORY and\n- // O_NOFOLLOW have no effect here (they're handled by VFS by setting\n- // appropriate bits in rp), but are returned by\n- // FileDescriptionImpl.StatusFlags().\n- opts.Flags &= linux.O_ACCMODE | linux.O_CREAT | linux.O_EXCL | linux.O_TRUNC |\n- linux.O_DIRECTORY | linux.O_NOFOLLOW | linux.O_NONBLOCK | linux.O_NOCTTY\nats := vfs.AccessTypesForOpenFlags(&opts)\n// Do not create new file.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/pipefs/pipefs.go",
"new_path": "pkg/sentry/fsimpl/pipefs/pipefs.go",
"diff": "@@ -158,6 +158,8 @@ func (i *inode) SetStat(ctx context.Context, vfsfs *vfs.Filesystem, creds *auth.\n// Open implements kernfs.Inode.Open.\nfunc (i *inode) Open(ctx context.Context, rp *vfs.ResolvingPath, d *kernfs.Dentry, opts vfs.OpenOptions) (*vfs.FileDescription, error) {\n+ opts.Flags &= linux.O_ACCMODE | linux.O_CREAT | linux.O_EXCL | linux.O_TRUNC |\n+ linux.O_DIRECTORY | linux.O_NOFOLLOW | linux.O_NONBLOCK | linux.O_NOCTTY\nreturn i.pipe.Open(ctx, rp.Mount(), d.VFSDentry(), opts.Flags, &i.locks)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/sys/sys.go",
"new_path": "pkg/sentry/fsimpl/sys/sys.go",
"diff": "@@ -224,6 +224,8 @@ func (*dir) SetStat(context.Context, *vfs.Filesystem, *auth.Credentials, vfs.Set\n// Open implements kernfs.Inode.Open.\nfunc (d *dir) Open(ctx context.Context, rp *vfs.ResolvingPath, kd *kernfs.Dentry, opts vfs.OpenOptions) (*vfs.FileDescription, error) {\n+ opts.Flags &= linux.O_ACCMODE | linux.O_CREAT | linux.O_EXCL | linux.O_TRUNC |\n+ linux.O_DIRECTORY | linux.O_NOFOLLOW | linux.O_NONBLOCK | linux.O_NOCTTY\nfd, err := kernfs.NewGenericDirectoryFD(rp.Mount(), kd, &d.OrderedChildren, &d.locks, &opts, kernfs.GenericDirectoryFDOptions{\nSeekEnd: kernfs.SeekEndStaticEntries,\n})\n"
}
] | Go | Apache License 2.0 | google/gvisor | Defer kernfs openflag handling to inode implementations.
Some implementations handle more flags than others, so it doesn't
make sense to have one set of rules for all.
This change should functionally be a no-op.
PiperOrigin-RevId: 502712415 |
259,853 | 18.01.2023 07:32:42 | 28,800 | 2a56495dfaef6add3fa81e9cccd5623bb4c40d80 | test: check that we can change registers via ptrace | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/ptrace.go",
"new_path": "pkg/sentry/kernel/ptrace.go",
"diff": "@@ -1160,8 +1160,6 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data hostarch.Addr) error {\nreturn err\n}\n- t.p.PullFullState(t.MemoryManager().AddressSpace(), t.Arch())\n-\nar := ars.Head()\nn, err := target.Arch().PtraceGetRegSet(uintptr(addr), &usermem.IOReadWriter{\nCtx: t,\n@@ -1189,13 +1187,10 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data hostarch.Addr) error {\nreturn err\n}\n- mm := t.MemoryManager()\n- t.p.PullFullState(mm.AddressSpace(), t.Arch())\n-\nar := ars.Head()\nn, err := target.Arch().PtraceSetRegSet(uintptr(addr), &usermem.IOReadWriter{\nCtx: t,\n- IO: mm,\n+ IO: t.MemoryManager(),\nAddr: ar.Start,\nOpts: usermem.IOOpts{\nAddressSpaceActive: true,\n@@ -1204,7 +1199,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data hostarch.Addr) error {\nif err != nil {\nreturn err\n}\n- t.p.FullStateChanged()\n+ target.p.FullStateChanged()\nar.End -= hostarch.Addr(n)\nreturn t.CopyOutIovecs(data, hostarch.AddrRangeSeqOf(ar))\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/ptrace_amd64.go",
"new_path": "pkg/sentry/kernel/ptrace_amd64.go",
"diff": "@@ -73,6 +73,9 @@ func (t *Task) ptraceArch(target *Task, req int64, addr, data hostarch.Addr) err\nAddressSpaceActive: true,\n},\n})\n+ if err == nil {\n+ target.p.FullStateChanged()\n+ }\nreturn err\ncase linux.PTRACE_SETFPREGS:\n@@ -85,6 +88,9 @@ func (t *Task) ptraceArch(target *Task, req int64, addr, data hostarch.Addr) err\nAddressSpaceActive: true,\n},\n}, len(*s))\n+ if err == nil {\n+ target.p.FullStateChanged()\n+ }\nreturn err\ndefault:\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_run.go",
"new_path": "pkg/sentry/kernel/task_run.go",
"diff": "@@ -246,6 +246,9 @@ func (app *runApp) execute(t *Task) taskRunState {\nif clearSinglestep {\nt.Arch().ClearSingleStep()\n}\n+ if t.hasTracer() {\n+ t.p.PullFullState(t.MemoryManager().AddressSpace(), t.Arch())\n+ }\nswitch err {\ncase nil:\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/ptrace.cc",
"new_path": "test/syscalls/linux/ptrace.cc",
"diff": "@@ -1400,6 +1400,93 @@ TEST(PtraceTest, GetRegSet) {\nEXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)\n<< \" status \" << status;\n}\n+#if defined(__x86_64__)\n+#define SYSNO_STR1(x) #x\n+#define SYSNO_STR(x) SYSNO_STR1(x)\n+\n+// Check that ptrace works properly when a target process is stopped on a\n+// system call that is handled via the fast path.\n+TEST(PtraceTest, ChangeRegSetInOptSyscall) {\n+ constexpr uint64_t kTestRet = 0x111;\n+ constexpr uint64_t kTestRbx1 = 0x333;\n+ constexpr uint64_t kTestRbx2 = 0x333;\n+ constexpr uint64_t kTestRdi = 0x555;\n+\n+ pid_t const child_pid = fork();\n+ if (child_pid == 0) {\n+ // In child process.\n+ uint64_t ret, rbx = 0, rdi = kTestRdi;\n+\n+ // Enable tracing.\n+ TEST_PCHECK(ptrace(PTRACE_TRACEME, 0, 0, 0) == 0);\n+ MaybeSave();\n+\n+ // Use kill explicitly because we check the syscall argument register below.\n+ kill(getpid(), SIGSTOP);\n+\n+ // A tested syscall has to be triggered twice, because the first call\n+ // doesn't trigger the fast path.\n+ for (int i = 0; i < 2; i++) {\n+ if (i == 1) rbx = kTestRbx1;\n+ __asm__ __volatile__(\n+ \"movl $\" SYSNO_STR(SYS_getpid) \", %%eax\\n\"\n+ \"syscall\\n\"\n+ : \"=a\"(ret), \"=b\"(rbx)\n+ : \"b\"(rbx), \"D\"(rdi)\n+ : \"rcx\", \"r11\", \"memory\");\n+ }\n+\n+ TEST_CHECK(ret == kTestRet);\n+ TEST_CHECK(rbx == kTestRbx2);\n+\n+ _exit(0);\n+ }\n+ // In parent process.\n+ ASSERT_THAT(child_pid, SyscallSucceeds());\n+\n+ // Wait for the child to send itself SIGSTOP and enter signal-delivery-stop.\n+ int status;\n+ ASSERT_THAT(waitpid(child_pid, &status, 0),\n+ SyscallSucceedsWithValue(child_pid));\n+ EXPECT_TRUE(WIFSTOPPED(status) && WSTOPSIG(status) == SIGSTOP)\n+ << \" status \" << status;\n+\n+ // Stop the child in the second getpid syscall.\n+ for (int i = 0; i < 2; i++) {\n+ ASSERT_THAT(ptrace(PTRACE_SYSEMU, child_pid, 0, 0), SyscallSucceeds());\n+ ASSERT_THAT(waitpid(child_pid, &status, 0),\n+ SyscallSucceedsWithValue(child_pid));\n+ }\n+\n+ // Get the general registers.\n+ struct user_regs_struct regs;\n+ struct iovec iov;\n+ iov.iov_base = ®s;\n+ iov.iov_len = sizeof(regs);\n+ EXPECT_THAT(ptrace(PTRACE_GETREGSET, child_pid, NT_PRSTATUS, &iov),\n+ SyscallSucceeds());\n+\n+ // Read exactly the full register set.\n+ EXPECT_EQ(iov.iov_len, sizeof(regs));\n+\n+ EXPECT_EQ(regs.rax, -ENOSYS);\n+ EXPECT_EQ(regs.orig_rax, SYS_getpid);\n+ EXPECT_EQ(regs.rdi, kTestRdi);\n+ EXPECT_EQ(regs.rbx, kTestRbx1);\n+\n+ regs.rbx = kTestRbx2;\n+ regs.rax = kTestRet;\n+ EXPECT_THAT(ptrace(PTRACE_SETREGSET, child_pid, NT_PRSTATUS, &iov),\n+ SyscallSucceeds());\n+\n+ ASSERT_THAT(ptrace(PTRACE_CONT, child_pid, 0, 0), SyscallSucceeds());\n+ ASSERT_THAT(waitpid(child_pid, &status, 0),\n+ SyscallSucceedsWithValue(child_pid));\n+ // Let's see that process exited normally.\n+ EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)\n+ << \" status \" << status;\n+}\n+#endif\nTEST(PtraceTest, AttachingConvertsGroupStopToPtraceStop) {\npid_t const child_pid = fork();\n"
}
] | Go | Apache License 2.0 | google/gvisor | test: check that we can change registers via ptrace
PiperOrigin-RevId: 502871694 |
259,985 | 19.01.2023 14:06:31 | 28,800 | 7d68beb26149dde9038c1bc727cca80dddb10810 | Allow construction of mount namespace from existing filesystem. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/mount.go",
"new_path": "pkg/sentry/vfs/mount.go",
"diff": "@@ -207,13 +207,20 @@ func (vfs *VirtualFilesystem) NewMountNamespace(ctx context.Context, creds *auth\nif err != nil {\nreturn nil, err\n}\n+ return vfs.NewMountNamespaceFrom(ctx, creds, fs, root, opts), nil\n+}\n+\n+// NewMountNamespaceFrom constructs a new mount namespace from an existing\n+// filesystem and its root dentry. This is similar to NewMountNamespace, but\n+// uses an existing filesystem instead of constructing a new one.\n+func (vfs *VirtualFilesystem) NewMountNamespaceFrom(ctx context.Context, creds *auth.Credentials, fs *Filesystem, root *Dentry, opts *MountOptions) *MountNamespace {\nmntns := &MountNamespace{\nOwner: creds.UserNamespace,\nmountpoints: make(map[*Dentry]uint32),\n}\nmntns.InitRefs()\nmntns.root = newMount(vfs, fs, root, mntns, opts)\n- return mntns, nil\n+ return mntns\n}\n// NewFilesystem creates a new filesystem object not yet associated with any\n"
}
] | Go | Apache License 2.0 | google/gvisor | Allow construction of mount namespace from existing filesystem.
PiperOrigin-RevId: 503260418 |
259,868 | 19.01.2023 14:38:04 | 28,800 | 5a6a36d22230d6fc8cbe27fc2cc93173319d3c9d | `runsc` metric server: Periodically add sandboxes that failed registration.
The metric server periodically scans the root directory and picks up all
the containers that requested instrumentation.
This change is part of a series of changes to support Prometheus-style metrics
in `runsc`. | [
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/metric_server.go",
"new_path": "runsc/cmd/metric_server.go",
"diff": "@@ -69,6 +69,7 @@ const (\ntype servedSandbox struct {\nrootContainerID container.FullID\nrootDir string\n+ metricServerAddr string\nextraLabels map[string]string\n// mu protects the fields below.\n@@ -108,7 +109,15 @@ func (s *servedSandbox) load() (*sandbox.Sandbox, *prometheus.Verifier, error) {\nif err != nil {\nreturn nil, nil, err\n}\n+ sandboxMetricAddr := strings.ReplaceAll(cont.Sandbox.MetricServerAddress, \"%RUNTIME_ROOT%\", s.rootDir)\n+ if sandboxMetricAddr == \"\" {\n+ return nil, nil, errors.New(\"sandbox did not request instrumentation\")\n+ }\n+ if sandboxMetricAddr != s.metricServerAddr {\n+ return nil, nil, fmt.Errorf(\"sandbox requested instrumentation by a metric server running at a different address (sandbox wants %q, this metric server serves %q)\", sandboxMetricAddr, s.metricServerAddr)\n+ }\n// Update label data as read from the state file.\n+ // Do not store empty labels.\nauthoritativeLabels := cont.Sandbox.PrometheusLabels()\nfor _, label := range []string{sandbox.SandboxIDLabel, sandbox.PodNameLabel, sandbox.NamespaceLabel} {\ns.extraLabels[label] = authoritativeLabels[label]\n@@ -166,6 +175,7 @@ func queryMetrics(ctx context.Context, sand *sandbox.Sandbox, verifier *promethe\n// MetricServer implements subcommands.Command for the \"metric-server\" command.\ntype MetricServer struct {\nrootDir string\n+ address string\nexporterPrefix string\nstartTime time.Time\nrand *rand.Rand\n@@ -188,6 +198,11 @@ type MetricServer struct {\n// sandboxes is the list of sandboxes we serve metrics for.\nsandboxes map[container.FullID]*servedSandbox\n+ // lastStateFileStat maps container full IDs to the last observed stat() of their state file.\n+ // This is used to monitor for sandboxes in the background. If a sandbox's state file matches this\n+ // info, we can assume that the last background scan already looked at it.\n+ lastStateFileStat map[container.FullID]os.FileInfo\n+\n// numSandboxes counts the number of sandboxes that have ever been registered on this server.\n// Used to distinguish between the case where this metrics serve has sat there doing nothing\n// because no sandbox ever registered against it (which is unexpected), vs the case where it has\n@@ -223,9 +238,40 @@ func (*MetricServer) Usage() string {\n// SetFlags implements subcommands.Command.SetFlags.\nfunc (m *MetricServer) SetFlags(f *flag.FlagSet) {}\n-// purgeSandboxesLocked removes sandboxes that are no longer running from m.sandboxes.\n+// sufficientlyEqualStats returns whether the given FileInfo's are sufficiently\n+// equal to assume the file they represent has not changed between the time\n+// each FileInfo was obtained.\n+func sufficientlyEqualStats(s1, s2 os.FileInfo) bool {\n+ if !s1.ModTime().Equal(s2.ModTime()) {\n+ return false\n+ }\n+ if s1.Size() != s2.Size() {\n+ return false\n+ }\n+ statT1, ok1 := s1.Sys().(*syscall.Stat_t)\n+ statT2, ok2 := s2.Sys().(*syscall.Stat_t)\n+ if ok1 != ok2 {\n+ return false\n+ }\n+ if ok1 && ok2 {\n+ if statT1.Dev != statT2.Dev {\n+ return false\n+ }\n+ if statT1.Ino != statT2.Ino {\n+ return false\n+ }\n+ }\n+ return true\n+}\n+\n+// refreshSandboxesLocked removes sandboxes that are no longer running from m.sandboxes, and\n+// adds sandboxes found in the root directory that do request instrumentation.\n// Preconditions: m.mu is locked.\n-func (m *MetricServer) purgeSandboxesLocked() {\n+func (m *MetricServer) refreshSandboxesLocked() {\n+ if m.shuttingDown {\n+ // Do nothing to avoid log spam.\n+ return\n+ }\nsandboxIDs, err := container.ListSandboxes(m.rootDir)\nif err != nil {\nlog.Warningf(\"Cannot list containers in root directory %s, it has likely gone away: %v.\", m.rootDir, err)\n@@ -247,6 +293,86 @@ func (m *MetricServer) purgeSandboxesLocked() {\ndelete(m.sandboxes, sandboxID)\n}\n}\n+ newSandboxIDs := make(map[container.FullID]bool, len(sandboxIDs))\n+ for _, sid := range sandboxIDs {\n+ if _, found := m.sandboxes[sid]; found {\n+ continue\n+ }\n+ newSandboxIDs[sid] = true\n+ }\n+ for sid := range m.lastStateFileStat {\n+ if _, found := newSandboxIDs[sid]; !found {\n+ delete(m.lastStateFileStat, sid)\n+ }\n+ }\n+ for sid := range newSandboxIDs {\n+ stateFile := container.StateFile{\n+ RootDir: m.rootDir,\n+ ID: sid,\n+ }\n+ stat, err := stateFile.Stat()\n+ if err != nil {\n+ log.Warningf(\"Failed to stat() container state file for sandbox %q: %v\", sid, err)\n+ continue\n+ }\n+ if existing, found := m.lastStateFileStat[sid]; found {\n+ // We already tried to stat this sandbox but decided not to pick it up.\n+ // Check if the state file changed since. If it didn't, we don't want to\n+ // try again.\n+ if sufficientlyEqualStats(existing, stat) {\n+ continue\n+ }\n+ log.Infof(\"State file for sandbox %q has changed since we last looked at it; will try to reload it.\", sid)\n+ delete(m.lastStateFileStat, sid)\n+ }\n+ // If we get here, we either haven't seen this sandbox before, or we saw it\n+ // and it has disappeared (which means it is new in this iteration), or we\n+ // saw it before but its state file changed. Either way, we want to try\n+ // loading it and see if it wants instrumentation.\n+ cont, err := container.Load(m.rootDir, sid, container.LoadOpts{\n+ Exact: true,\n+ SkipCheck: true,\n+ TryLock: container.TryAcquire,\n+ RootContainer: true,\n+ })\n+ if err != nil {\n+ if err == container.ErrStateFileLocked {\n+ // This error is OK and shouldn't generate log spam. The sandbox is probably in the middle\n+ // of being created.\n+ continue\n+ }\n+ log.Warningf(\"Cannot load state file for sandbox %q: %v\", sid, err)\n+ continue\n+ }\n+ // This is redundant with one of the checks performed below in servedSandbox.load(), but this\n+ // avoids log spam for the non-error case of sandboxes that didn't request instrumentation.\n+ sandboxMetricAddr := strings.ReplaceAll(cont.Sandbox.MetricServerAddress, \"%RUNTIME_ROOT%\", m.rootDir)\n+ if sandboxMetricAddr != m.address {\n+ m.lastStateFileStat[sid] = stat\n+ continue\n+ }\n+ m.numSandboxes++\n+ served := &servedSandbox{\n+ rootContainerID: sid,\n+ rootDir: m.rootDir,\n+ metricServerAddr: m.address,\n+ extraLabels: map[string]string{\n+ sandbox.SandboxIDLabel: sid.SandboxID,\n+ iterationIDLabel: fmt.Sprintf(\"%d\", m.rand.Uint64()),\n+ },\n+ }\n+ // Best-effort attempt to load the state file instantly.\n+ // This may legitimately fail if it is locked, e.g. during sandbox startup.\n+ // If it fails for any other reason, then the sandbox went away between the time we listed the\n+ // sandboxes and now, so just delete it.\n+ if _, _, err := served.load(); err != nil && err != container.ErrStateFileLocked {\n+ log.Warningf(\"Sandbox %q cannot be loaded, ignoring it: %v\", sid, err)\n+ m.lastStateFileStat[sid] = stat\n+ continue\n+ }\n+ m.sandboxes[sid] = served\n+ log.Infof(\"Registered new sandbox found in root directory: %q\", sid)\n+ }\n}\n// httpResult is returned by HTTP handlers.\n@@ -309,7 +435,7 @@ func (m *MetricServer) serveMetrics(w http.ResponseWriter, req *http.Request) ht\nctx, ctxCancel := context.WithTimeout(req.Context(), metricsExportTimeout)\ndefer ctxCancel()\nm.mu.Lock()\n- m.purgeSandboxesLocked()\n+ m.refreshSandboxesLocked()\nnumGoroutines := exportParallelGoroutines\nnumSandboxes := len(m.sandboxes)\n@@ -543,6 +669,23 @@ func logRequest(f func(w http.ResponseWriter, req *http.Request) httpResult) fun\n}\n}\n+// verify is one iteration of verifyLoop.\n+// It runs in a loop in the background which checks all sandboxes for liveness, tries to load\n+// their metadata if that hasn't been loaded yet, and tries to pick up new sandboxes that\n+// failed to register for whatever reason.\n+func (m *MetricServer) verify(ctx context.Context) {\n+ _, err := container.ListSandboxes(m.rootDir)\n+ m.mu.Lock()\n+ defer m.mu.Unlock()\n+ if err != nil {\n+ log.Warningf(\"Cannot list sandboxes in root directory %s, it has likely gone away: %v. Server shutting down.\", m.rootDir, err)\n+ m.shutdownLocked(ctx)\n+ return\n+ }\n+ m.refreshSandboxesLocked()\n+}\n+\n+// verifyLoop runs in the background and periodically calls verify.\nfunc (m *MetricServer) verifyLoop(ctx context.Context) {\nticker := time.NewTicker(verifyLoopInterval)\ndefer ticker.Stop()\n@@ -557,17 +700,7 @@ func (m *MetricServer) verifyLoop(ctx context.Context) {\nm.mu.Unlock()\nreturn\ncase <-ticker.C:\n- _, listErr := container.ListSandboxes(m.rootDir)\n- func() {\n- m.mu.Lock()\n- defer m.mu.Unlock()\n- if listErr != nil {\n- log.Warningf(\"Cannot list sandboxes in root directory %s, it has likely gone away: %v. Server shutting down.\", m.rootDir, listErr)\n- m.shutdownLocked(ctx)\n- return\n- }\n- m.purgeSandboxesLocked()\n- }()\n+ m.verify(ctx)\n}\n}\n}\n@@ -601,7 +734,9 @@ func (m *MetricServer) Execute(ctx context.Context, f *flag.FlagSet, args ...any\nlog.Infof(\"Metric server address replaced %RUNTIME_ROOT%: %q -> %q\", conf.MetricServer, newAddr)\nconf.MetricServer = newAddr\n}\n+ m.address = conf.MetricServer\nm.sandboxes = make(map[container.FullID]*servedSandbox)\n+ m.lastStateFileStat = make(map[container.FullID]os.FileInfo)\nvar listener net.Listener\nvar listenErr error\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/state_file.go",
"new_path": "runsc/container/state_file.go",
"diff": "@@ -374,6 +374,12 @@ func (s *StateFile) SaveLocked(v any) error {\nreturn nil\n}\n+// Stat returns the result of calling stat() on the state file.\n+// Doing so does not require locking.\n+func (s *StateFile) Stat() (os.FileInfo, error) {\n+ return os.Stat(s.statePath())\n+}\n+\nfunc (s *StateFile) load(v any, opts LoadOpts) error {\nif err := s.lock(opts.TryLock); err != nil {\nreturn err\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -140,6 +140,12 @@ type Sandbox struct {\n// This is a textproto string of metricpb.MetricRegistration.\nRegisteredMetrics string `json:\"registeredMetrics\"`\n+ // MetricServerAddress is the address of the metric server that this sandbox\n+ // intends to export metrics for.\n+ // Only populated if exporting metrics was requested when the sandbox was\n+ // created.\n+ MetricServerAddress string `json:\"metricServerAddress\"`\n+\n// child is set if a sandbox process is a child of the current process.\n//\n// This field isn't saved to json, because only a creator of sandbox\n@@ -215,6 +221,7 @@ func New(conf *config.Config, args *Args) (*Sandbox, error) {\n},\nUID: -1, // prevent usage before it's set.\nGID: -1, // prevent usage before it's set.\n+ MetricServerAddress: conf.MetricServer,\n}\nif args.Spec != nil && args.Spec.Annotations != nil {\ns.PodName = args.Spec.Annotations[podNameAnnotation]\n"
}
] | Go | Apache License 2.0 | google/gvisor | `runsc` metric server: Periodically add sandboxes that failed registration.
The metric server periodically scans the root directory and picks up all
the containers that requested instrumentation.
This change is part of a series of changes to support Prometheus-style metrics
in `runsc`.
PiperOrigin-RevId: 503268994 |
259,868 | 19.01.2023 14:38:22 | 28,800 | fa72d6e37df3c1c9e36fc297d735210f33c4280d | `runsc/sandbox`: Make `OverlayFileUsage` follow convention for urpc methods. | [
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -1240,14 +1240,12 @@ func (s *Sandbox) ChangeLogging(args control.LoggingArgs) error {\n// OverlayFileUsage returns the current usage (bytes) of the overlay filestore.\nfunc (s *Sandbox) OverlayFileUsage() (uint64, error) {\n- conn, err := s.sandboxConnect()\n- if err != nil {\n- return 0, err\n- }\n- defer conn.Close()\n-\n+ log.Debugf(\"Getting overlay file usage for sandbox %q\", s.ID)\nvar usage uint64\n- return usage, conn.Call(boot.CongMgrOverlayFileUsage, nil, &usage)\n+ if err := s.call(boot.CongMgrOverlayFileUsage, nil, &usage); err != nil {\n+ return 0, fmt.Errorf(\"getting overlay file usage for sandbox %q: %w\", s.ID, err)\n+ }\n+ return usage, nil\n}\n// DestroyContainer destroys the given container. If it is the root container,\n"
}
] | Go | Apache License 2.0 | google/gvisor | `runsc/sandbox`: Make `OverlayFileUsage` follow convention for urpc methods.
PiperOrigin-RevId: 503269077 |
259,992 | 19.01.2023 16:35:59 | 28,800 | 0e5d0cc13abce38832634d508cbd620b89364ac0 | Rename pselect to pselect6
The actual syscall name is pselect6. This name is used in Docker's
default seccomp profile. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/linux64.go",
"new_path": "pkg/sentry/syscalls/linux/linux64.go",
"diff": "@@ -322,7 +322,7 @@ var AMD64 = &kernel.SyscallTable{\n267: syscalls.Supported(\"readlinkat\", Readlinkat),\n268: syscalls.Supported(\"fchmodat\", Fchmodat),\n269: syscalls.Supported(\"faccessat\", Faccessat),\n- 270: syscalls.Supported(\"pselect\", Pselect),\n+ 270: syscalls.Supported(\"pselect6\", Pselect6),\n271: syscalls.Supported(\"ppoll\", Ppoll),\n272: syscalls.PartiallySupported(\"unshare\", Unshare, \"Mount, cgroup namespaces not supported. Network namespaces supported but must be empty.\", nil),\n273: syscalls.Supported(\"set_robust_list\", SetRobustList),\n@@ -503,7 +503,7 @@ var ARM64 = &kernel.SyscallTable{\n69: syscalls.SupportedPoint(\"preadv\", Preadv, PointPreadv),\n70: syscalls.SupportedPoint(\"pwritev\", Pwritev, PointPwritev),\n71: syscalls.Supported(\"sendfile\", Sendfile),\n- 72: syscalls.Supported(\"pselect\", Pselect),\n+ 72: syscalls.Supported(\"pselect6\", Pselect6),\n73: syscalls.Supported(\"ppoll\", Ppoll),\n74: syscalls.SupportedPoint(\"signalfd4\", Signalfd4, PointSignalfd4),\n75: syscalls.ErrorWithEvent(\"vmsplice\", linuxerr.ENOSYS, \"\", []string{\"gvisor.dev/issue/138\"}), // TODO(b/29354098)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_poll.go",
"new_path": "pkg/sentry/syscalls/linux/sys_poll.go",
"diff": "@@ -508,8 +508,8 @@ type sigSetWithSize struct {\nsizeofSigset uint64\n}\n-// Pselect implements linux syscall pselect(2).\n-func Pselect(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n+// Pselect6 implements linux syscall pselect6(2).\n+func Pselect6(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\nnfds := int(args[0].Int()) // select(2) uses an int.\nreadFDs := args[1].Pointer()\nwriteFDs := args[2].Pointer()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Rename pselect to pselect6
The actual syscall name is pselect6. This name is used in Docker's
default seccomp profile.
PiperOrigin-RevId: 503297316 |
259,909 | 20.01.2023 14:02:59 | 28,800 | 492d7a98116b79127bf2fb5e6becbe6376b78854 | Decref target VirtualDentry outside the vfs mount lock during mount ops.
Reported-by: | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/mount.go",
"new_path": "pkg/sentry/vfs/mount.go",
"diff": "@@ -273,28 +273,32 @@ func (vfs *VirtualFilesystem) ConnectMountAt(ctx context.Context, creds *auth.Cr\nreturn err\n}\nvfs.mountMu.Lock()\n- defer vfs.mountMu.Unlock()\ntree := vfs.preparePropagationTree(mnt, vd)\n-\ncleanup := cleanup.Make(func() {\nvfs.abortPropagationTree(ctx, tree) // +checklocksforce\n+ // We need to unlock mountMu first because DecRef takes a lock on the\n+ // filesystem mutex in some implementations, which can lead to circular\n+ // locking.\n+ vfs.mountMu.Unlock()\n+ vd.DecRef(ctx)\n})\ndefer cleanup.Clean()\n// Check if the new mount + all the propagation mounts puts us over the max.\nif uint32(len(tree)+1)+vd.mount.ns.mounts > MountMax {\n- vd.DecRef(ctx)\nreturn linuxerr.ENOSPC\n}\nif err := vfs.connectMountAt(ctx, mnt, vd); err != nil {\nreturn err\n}\nvfs.commitPropagationTree(ctx, tree)\n+ vfs.mountMu.Unlock()\ncleanup.Release()\nreturn nil\n}\n-// connectMountAtLocked attaches mnt at vd. It returns the new mountpoint of mnt\n-// if no error occurred.\n+// connectMountAtLocked attaches mnt at vd. If the method returns an error that\n+// is not nil, then it did not consume a reference on vd and the caller is\n+// responsible for calling DecRef.\n//\n// Preconditions:\n// - mnt must be disconnected.\n@@ -307,7 +311,6 @@ func (vfs *VirtualFilesystem) connectMountAt(ctx context.Context, mnt *Mount, vd\nfor {\nif vd.mount.umounted || vdDentry.dead {\nvdDentry.mu.Unlock()\n- vd.DecRef(ctx)\nreturn linuxerr.ENOENT\n}\n// vd might have been mounted over between vfs.GetDentryAt() and\n@@ -392,7 +395,6 @@ func (vfs *VirtualFilesystem) BindAt(ctx context.Context, creds *auth.Credential\n}\nvfs.mountMu.Lock()\n- defer vfs.mountMu.Unlock()\nclone := vfs.cloneMount(sourceVd.mount, sourceVd.dentry, nil)\ndefer clone.DecRef(ctx)\ntree := vfs.preparePropagationTree(clone, targetVd)\n@@ -407,16 +409,18 @@ func (vfs *VirtualFilesystem) BindAt(ctx context.Context, creds *auth.Credential\n// Checklocks doesn't work with anon functions.\nvfs.setPropagation(clone, Private) // +checklocksforce\nvfs.abortPropagationTree(ctx, tree) // +checklocksforce\n+ vfs.mountMu.Unlock()\n+ targetVd.DecRef(ctx)\n})\ndefer cleanup.Clean()\nif uint32(1+len(tree))+targetVd.mount.ns.mounts > MountMax {\n- targetVd.DecRef(ctx)\nreturn nil, linuxerr.ENOSPC\n}\nif err := vfs.connectMountAt(ctx, clone, targetVd); err != nil {\nreturn nil, err\n}\nvfs.commitPropagationTree(ctx, tree)\n+ vfs.mountMu.Unlock()\ncleanup.Release()\nreturn clone, nil\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Decref target VirtualDentry outside the vfs mount lock during mount ops.
Reported-by: syzbot+ad021a61205c51a59627@syzkaller.appspotmail.com
PiperOrigin-RevId: 503518033 |
259,907 | 20.01.2023 14:20:03 | 28,800 | a58df80df31b27d448ca430bdb31b456d51e2609 | Make directory open flags checks consistent in tmpfs and kernfs.
Overlayfs and goferfs seem to be up to date.
Updated syscall tests. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/kernfs/filesystem.go",
"new_path": "pkg/sentry/fsimpl/kernfs/filesystem.go",
"diff": "@@ -618,6 +618,19 @@ afterTrailingSymlink:\nif err := child.inode.CheckPermissions(ctx, rp.Credentials(), ats); err != nil {\nreturn nil, err\n}\n+ if child.isDir() {\n+ // Can't open directories with O_CREAT.\n+ if opts.Flags&linux.O_CREAT != 0 {\n+ return nil, linuxerr.EISDIR\n+ }\n+ // Can't open directories writably.\n+ if ats&vfs.MayWrite != 0 {\n+ return nil, linuxerr.EISDIR\n+ }\n+ if opts.Flags&linux.O_DIRECT != 0 {\n+ return nil, linuxerr.EINVAL\n+ }\n+ }\n// Open may block so we need to unlock fs.mu. IncRef child to prevent\n// its destruction while fs.mu is unlocked.\nchild.IncRef()\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go",
"new_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go",
"diff": "@@ -483,10 +483,17 @@ func (d *dentry) open(ctx context.Context, rp *vfs.ResolvingPath, opts *vfs.Open\n}\nreturn &fd.vfsfd, nil\ncase *directory:\n+ // Can't open directories with O_CREAT.\n+ if opts.Flags&linux.O_CREAT != 0 {\n+ return nil, linuxerr.EISDIR\n+ }\n// Can't open directories writably.\nif ats&vfs.MayWrite != 0 {\nreturn nil, linuxerr.EISDIR\n}\n+ if opts.Flags&linux.O_DIRECT != 0 {\n+ return nil, linuxerr.EINVAL\n+ }\nvar fd directoryFD\nfd.LockFD.Init(&d.inode.locks)\nif err := fd.vfsfd.Init(&fd, opts.Flags, rp.Mount(), &d.vfsd, &vfs.FileDescriptionOptions{AllowDirectIO: true}); err != nil {\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/open.cc",
"new_path": "test/syscalls/linux/open.cc",
"diff": "@@ -99,17 +99,17 @@ TEST_F(OpenTest, OCreateDirectory) {\nauto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n// Normal case: existing directory.\n- ASSERT_THAT(open(dir.path().c_str(), O_RDWR | O_CREAT, 0666),\n+ ASSERT_THAT(open(dir.path().c_str(), O_RDONLY | O_CREAT, 0666),\nSyscallFailsWithErrno(EISDIR));\n// Trailing separator on existing directory.\n- ASSERT_THAT(open(dir.path().append(\"/\").c_str(), O_RDWR | O_CREAT, 0666),\n+ ASSERT_THAT(open(dir.path().append(\"/\").c_str(), O_RDONLY | O_CREAT, 0666),\nSyscallFailsWithErrno(EISDIR));\n// Trailing separator on non-existing directory.\nASSERT_THAT(open(JoinPath(dir.path(), \"non-existent\").append(\"/\").c_str(),\n- O_RDWR | O_CREAT, 0666),\n+ O_RDONLY | O_CREAT, 0666),\nSyscallFailsWithErrno(EISDIR));\n// \".\" special case.\n- ASSERT_THAT(open(JoinPath(dir.path(), \".\").c_str(), O_RDWR | O_CREAT, 0666),\n+ ASSERT_THAT(open(JoinPath(dir.path(), \".\").c_str(), O_RDONLY | O_CREAT, 0666),\nSyscallFailsWithErrno(EISDIR));\n}\n@@ -399,6 +399,11 @@ TEST_F(OpenTest, DirectoryWritableFails) {\nSyscallFailsWithErrno(EISDIR));\n}\n+TEST_F(OpenTest, DirectoryDirectFails) {\n+ ASSERT_THAT(open(GetAbsoluteTestTmpdir().c_str(), O_RDONLY | O_DIRECT),\n+ SyscallFailsWithErrno(EINVAL));\n+}\n+\nTEST_F(OpenTest, FileNotDirectory) {\n// Create a file and try to open it with O_DIRECTORY.\nauto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());\n"
}
] | Go | Apache License 2.0 | google/gvisor | Make directory open flags checks consistent in tmpfs and kernfs.
Overlayfs and goferfs seem to be up to date.
Updated syscall tests.
PiperOrigin-RevId: 503521750 |
259,891 | 20.01.2023 14:44:11 | 28,800 | 226daf3879074a25c88dbbd972362e783b6e20cc | github: add quotes around shell variable | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/go.yml",
"new_path": ".github/workflows/go.yml",
"diff": "@@ -23,9 +23,9 @@ jobs:\n- id: setup\nrun: |\nif ! [[ -z \"${{ secrets.GO_TOKEN }}\" ]]; then\n- echo has_token=true >> $GITHUB_OUTPUT\n+ echo has_token=true >> \"$GITHUB_OUTPUT\"\nelse\n- echo has_token=false >> $GITHUB_OUTPUT\n+ echo has_token=false >> \"$GITHUB_OUTPUT\"\nfi\n- run: |\njq -nc '{\"state\": \"pending\", \"context\": \"go tests\"}' | \\\n"
}
] | Go | Apache License 2.0 | google/gvisor | github: add quotes around shell variable
PiperOrigin-RevId: 503526899 |
259,853 | 20.01.2023 17:08:27 | 28,800 | e08f204299dfcd6a93fde73375933cfa5f017740 | inet: each socket has to hold a reference to its network namespace
Otherwise a network namespace can be destroyed before sockets.
Reported-by: | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netstack/netstack.go",
"new_path": "pkg/sentry/socket/netstack/netstack.go",
"diff": "@@ -342,11 +342,11 @@ type commonEndpoint interface {\nSocketOptions() *tcpip.SocketOptions\n}\n-// Socket encapsulates all the state needed to represent a network stack\n+// sock encapsulates all the state needed to represent a network stack\n// endpoint in the kernel context.\n//\n// +stateify savable\n-type Socket struct {\n+type sock struct {\nvfsfd vfs.FileDescription\nvfs.FileDescriptionDefaultImpl\nvfs.DentryMetadataFileDescriptionImpl\n@@ -359,6 +359,8 @@ type Socket struct {\nskType linux.SockType\nprotocol int\n+ namespace *inet.Namespace\n+\n// readMu protects access to the below fields.\nreadMu sync.Mutex `state:\"nosave\"`\n@@ -379,7 +381,7 @@ type Socket struct {\nsockOptInq bool\n}\n-var _ = socket.Socket(&Socket{})\n+var _ = socket.Socket(&sock{})\n// New creates a new endpoint socket.\nfunc New(t *kernel.Task, family int, skType linux.SockType, protocol int, queue *waiter.Queue, endpoint tcpip.Endpoint) (*vfs.FileDescription, *syserr.Error) {\n@@ -391,12 +393,14 @@ func New(t *kernel.Task, family int, skType linux.SockType, protocol int, queue\nd := sockfs.NewDentry(t, mnt)\ndefer d.DecRef(t)\n- s := &Socket{\n+ namespace := t.NetworkNamespace()\n+ s := &sock{\nQueue: queue,\nfamily: family,\nEndpoint: endpoint,\nskType: skType,\nprotocol: protocol,\n+ namespace: namespace,\n}\ns.LockFD.Init(&vfs.FileLocks{})\nvfsfd := &s.vfsfd\n@@ -407,11 +411,12 @@ func New(t *kernel.Task, family int, skType linux.SockType, protocol int, queue\n}); err != nil {\nreturn nil, syserr.FromError(err)\n}\n+ namespace.IncRef()\nreturn vfsfd, nil\n}\n// Release implements vfs.FileDescriptionImpl.Release.\n-func (s *Socket) Release(ctx context.Context) {\n+func (s *sock) Release(ctx context.Context) {\nkernel.KernelFromContext(ctx).DeleteSocket(&s.vfsfd)\ne, ch := waiter.NewChannelEntry(waiter.EventHUp | waiter.EventErr)\ns.EventRegister(&e)\n@@ -421,10 +426,7 @@ func (s *Socket) Release(ctx context.Context) {\n// SO_LINGER option is valid only for TCP. For other socket types\n// return after endpoint close.\n- if family, skType, _ := s.Type(); skType != linux.SOCK_STREAM || (family != linux.AF_INET && family != linux.AF_INET6) {\n- return\n- }\n-\n+ if family, skType, _ := s.Type(); skType == linux.SOCK_STREAM && (family == linux.AF_INET || family == linux.AF_INET6) {\nv := s.Endpoint.SocketOptions().GetLinger()\n// The case for zero timeout is handled in tcp endpoint close function.\n// Close is blocked until either:\n@@ -438,14 +440,16 @@ func (s *Socket) Release(ctx context.Context) {\n_ = t.BlockWithDeadline(ch, true, deadline)\n}\n}\n+ s.namespace.DecRef()\n+}\n// Epollable implements FileDescriptionImpl.Epollable.\n-func (s *Socket) Epollable() bool {\n+func (s *sock) Epollable() bool {\nreturn true\n}\n// Read implements vfs.FileDescriptionImpl.\n-func (s *Socket) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.ReadOptions) (int64, error) {\n+func (s *sock) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.ReadOptions) (int64, error) {\n// All flags other than RWF_NOWAIT should be ignored.\n// TODO(gvisor.dev/issue/2601): Support RWF_NOWAIT.\nif opts.Flags != 0 {\n@@ -466,7 +470,7 @@ func (s *Socket) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.Read\n}\n// Write implements vfs.FileDescriptionImpl.\n-func (s *Socket) Write(ctx context.Context, src usermem.IOSequence, opts vfs.WriteOptions) (int64, error) {\n+func (s *sock) Write(ctx context.Context, src usermem.IOSequence, opts vfs.WriteOptions) (int64, error) {\n// All flags other than RWF_NOWAIT should be ignored.\n// TODO(gvisor.dev/issue/2601): Support RWF_NOWAIT.\nif opts.Flags != 0 {\n@@ -491,7 +495,7 @@ func (s *Socket) Write(ctx context.Context, src usermem.IOSequence, opts vfs.Wri\n// Accept implements the linux syscall accept(2) for sockets backed by\n// tcpip.Endpoint.\n-func (s *Socket) Accept(t *kernel.Task, peerRequested bool, flags int, blocking bool) (int32, linux.SockAddr, uint32, *syserr.Error) {\n+func (s *sock) Accept(t *kernel.Task, peerRequested bool, flags int, blocking bool) (int32, linux.SockAddr, uint32, *syserr.Error) {\n// Issue the accept request to get the new endpoint.\nvar peerAddr *tcpip.FullAddress\nif peerRequested {\n@@ -538,7 +542,7 @@ func (s *Socket) Accept(t *kernel.Task, peerRequested bool, flags int, blocking\n// GetSockOpt implements the linux syscall getsockopt(2) for sockets backed by\n// tcpip.Endpoint.\n-func (s *Socket) GetSockOpt(t *kernel.Task, level, name int, outPtr hostarch.Addr, outLen int) (marshal.Marshallable, *syserr.Error) {\n+func (s *sock) GetSockOpt(t *kernel.Task, level, name int, outPtr hostarch.Addr, outLen int) (marshal.Marshallable, *syserr.Error) {\n// TODO(b/78348848): Unlike other socket options, SO_TIMESTAMP is\n// implemented specifically for netstack.Socket rather than\n// commonEndpoint. commonEndpoint should be extended to support socket\n@@ -574,7 +578,7 @@ func (s *Socket) GetSockOpt(t *kernel.Task, level, name int, outPtr hostarch.Add\n// SetSockOpt implements the linux syscall setsockopt(2) for sockets backed by\n// tcpip.Endpoint.\n-func (s *Socket) SetSockOpt(t *kernel.Task, level int, name int, optVal []byte) *syserr.Error {\n+func (s *sock) SetSockOpt(t *kernel.Task, level int, name int, optVal []byte) *syserr.Error {\n// TODO(b/78348848): Unlike other socket options, SO_TIMESTAMP is\n// implemented specifically for netstack.Socket rather than\n// commonEndpoint. commonEndpoint should be extended to support socket\n@@ -608,7 +612,7 @@ var sockAddrLinkSize = (*linux.SockAddrLink)(nil).SizeBytes()\n// minSockAddrLen returns the minimum length in bytes of a socket address for\n// the socket's family.\n-func (s *Socket) minSockAddrLen() int {\n+func (s *sock) minSockAddrLen() int {\nconst addressFamilySize = 2\nswitch s.family {\n@@ -627,12 +631,12 @@ func (s *Socket) minSockAddrLen() int {\n}\n}\n-func (s *Socket) isPacketBased() bool {\n+func (s *sock) isPacketBased() bool {\nreturn s.skType == linux.SOCK_DGRAM || s.skType == linux.SOCK_SEQPACKET || s.skType == linux.SOCK_RDM || s.skType == linux.SOCK_RAW\n}\n// Readiness returns a mask of ready events for socket s.\n-func (s *Socket) Readiness(mask waiter.EventMask) waiter.EventMask {\n+func (s *sock) Readiness(mask waiter.EventMask) waiter.EventMask {\nreturn s.Endpoint.Readiness(mask)\n}\n@@ -641,7 +645,7 @@ func (s *Socket) Readiness(mask waiter.EventMask) waiter.EventMask {\n//\n// If exact is true, then the specified address family must be an exact match\n// with the socket's family.\n-func (s *Socket) checkFamily(family uint16, exact bool) bool {\n+func (s *sock) checkFamily(family uint16, exact bool) bool {\nif family == uint16(s.family) {\nreturn true\n}\n@@ -660,7 +664,7 @@ func (s *Socket) checkFamily(family uint16, exact bool) bool {\n// represented by the empty string.\n//\n// TODO(gvisor.dev/issue/1556): remove this function.\n-func (s *Socket) mapFamily(addr tcpip.FullAddress, family uint16) tcpip.FullAddress {\n+func (s *sock) mapFamily(addr tcpip.FullAddress, family uint16) tcpip.FullAddress {\nif len(addr.Addr) == 0 && s.family == linux.AF_INET6 && family == linux.AF_INET {\naddr.Addr = \"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xff\\xff\\x00\\x00\\x00\\x00\"\n}\n@@ -669,7 +673,7 @@ func (s *Socket) mapFamily(addr tcpip.FullAddress, family uint16) tcpip.FullAddr\n// Connect implements the linux syscall connect(2) for sockets backed by\n// tpcip.Endpoint.\n-func (s *Socket) Connect(t *kernel.Task, sockaddr []byte, blocking bool) *syserr.Error {\n+func (s *sock) Connect(t *kernel.Task, sockaddr []byte, blocking bool) *syserr.Error {\naddr, family, err := socket.AddressAndFamily(sockaddr)\nif err != nil {\nreturn err\n@@ -724,7 +728,7 @@ func (s *Socket) Connect(t *kernel.Task, sockaddr []byte, blocking bool) *syserr\n// Bind implements the linux syscall bind(2) for sockets backed by\n// tcpip.Endpoint.\n-func (s *Socket) Bind(_ *kernel.Task, sockaddr []byte) *syserr.Error {\n+func (s *sock) Bind(_ *kernel.Task, sockaddr []byte) *syserr.Error {\nif len(sockaddr) < 2 {\nreturn syserr.ErrInvalidArgument\n}\n@@ -784,13 +788,13 @@ func (s *Socket) Bind(_ *kernel.Task, sockaddr []byte) *syserr.Error {\n// Listen implements the linux syscall listen(2) for sockets backed by\n// tcpip.Endpoint.\n-func (s *Socket) Listen(_ *kernel.Task, backlog int) *syserr.Error {\n+func (s *sock) Listen(_ *kernel.Task, backlog int) *syserr.Error {\nreturn syserr.TranslateNetstackError(s.Endpoint.Listen(backlog))\n}\n// blockingAccept implements a blocking version of accept(2), that is, if no\n// connections are ready to be accept, it will block until one becomes ready.\n-func (s *Socket) blockingAccept(t *kernel.Task, peerAddr *tcpip.FullAddress) (tcpip.Endpoint, *waiter.Queue, *syserr.Error) {\n+func (s *sock) blockingAccept(t *kernel.Task, peerAddr *tcpip.FullAddress) (tcpip.Endpoint, *waiter.Queue, *syserr.Error) {\n// Register for notifications.\ne, ch := waiter.NewChannelEntry(waiter.ReadableEvents)\ns.EventRegister(&e)\n@@ -828,7 +832,7 @@ func ConvertShutdown(how int) (tcpip.ShutdownFlags, *syserr.Error) {\n// Shutdown implements the linux syscall shutdown(2) for sockets backed by\n// tcpip.Endpoint.\n-func (s *Socket) Shutdown(_ *kernel.Task, how int) *syserr.Error {\n+func (s *sock) Shutdown(_ *kernel.Task, how int) *syserr.Error {\nf, err := ConvertShutdown(how)\nif err != nil {\nreturn err\n@@ -2579,7 +2583,7 @@ func setSockOptIP(t *kernel.Task, s socket.Socket, ep commonEndpoint, name int,\n// GetSockName implements the linux syscall getsockname(2) for sockets backed by\n// tcpip.Endpoint.\n-func (s *Socket) GetSockName(*kernel.Task) (linux.SockAddr, uint32, *syserr.Error) {\n+func (s *sock) GetSockName(*kernel.Task) (linux.SockAddr, uint32, *syserr.Error) {\naddr, err := s.Endpoint.GetLocalAddress()\nif err != nil {\nreturn nil, 0, syserr.TranslateNetstackError(err)\n@@ -2591,7 +2595,7 @@ func (s *Socket) GetSockName(*kernel.Task) (linux.SockAddr, uint32, *syserr.Erro\n// GetPeerName implements the linux syscall getpeername(2) for sockets backed by\n// tcpip.Endpoint.\n-func (s *Socket) GetPeerName(*kernel.Task) (linux.SockAddr, uint32, *syserr.Error) {\n+func (s *sock) GetPeerName(*kernel.Task) (linux.SockAddr, uint32, *syserr.Error) {\naddr, err := s.Endpoint.GetRemoteAddress()\nif err != nil {\nreturn nil, 0, syserr.TranslateNetstackError(err)\n@@ -2601,7 +2605,7 @@ func (s *Socket) GetPeerName(*kernel.Task) (linux.SockAddr, uint32, *syserr.Erro\nreturn a, l, nil\n}\n-func (s *Socket) fillCmsgInq(cmsg *socket.ControlMessages) {\n+func (s *sock) fillCmsgInq(cmsg *socket.ControlMessages) {\nif !s.sockOptInq {\nreturn\n}\n@@ -2633,7 +2637,7 @@ func toLinuxPacketType(pktType tcpip.PacketType) uint8 {\n// nonBlockingRead issues a non-blocking read.\n//\n// TODO(b/78348848): Support timestamps for stream sockets.\n-func (s *Socket) nonBlockingRead(ctx context.Context, dst usermem.IOSequence, peek, trunc, senderRequested bool) (int, int, linux.SockAddr, uint32, socket.ControlMessages, *syserr.Error) {\n+func (s *sock) nonBlockingRead(ctx context.Context, dst usermem.IOSequence, peek, trunc, senderRequested bool) (int, int, linux.SockAddr, uint32, socket.ControlMessages, *syserr.Error) {\nisPacket := s.isPacketBased()\nreadOptions := tcpip.ReadOptions{\n@@ -2722,7 +2726,7 @@ func (s *Socket) nonBlockingRead(ctx context.Context, dst usermem.IOSequence, pe\nreturn res.Count, 0, nil, 0, cmsg, syserr.TranslateNetstackError(err)\n}\n-func (s *Socket) netstackToLinuxControlMessages(cm tcpip.ReceivableControlMessages) socket.ControlMessages {\n+func (s *sock) netstackToLinuxControlMessages(cm tcpip.ReceivableControlMessages) socket.ControlMessages {\nreadCM := socket.NewIPControlMessages(s.family, cm)\nreturn socket.ControlMessages{\nIP: socket.IPControlMessages{\n@@ -2748,7 +2752,7 @@ func (s *Socket) netstackToLinuxControlMessages(cm tcpip.ReceivableControlMessag\n}\n}\n-func (s *Socket) linuxToNetstackControlMessages(cm socket.ControlMessages) tcpip.SendableControlMessages {\n+func (s *sock) linuxToNetstackControlMessages(cm socket.ControlMessages) tcpip.SendableControlMessages {\nreturn tcpip.SendableControlMessages{\nHasTTL: cm.IP.HasTTL,\nTTL: uint8(cm.IP.TTL),\n@@ -2761,7 +2765,7 @@ func (s *Socket) linuxToNetstackControlMessages(cm socket.ControlMessages) tcpip\n// successfully writing packet data out to userspace.\n//\n// Precondition: s.readMu must be locked.\n-func (s *Socket) updateTimestamp(cm tcpip.ReceivableControlMessages) {\n+func (s *sock) updateTimestamp(cm tcpip.ReceivableControlMessages) {\n// Save the SIOCGSTAMP timestamp only if SO_TIMESTAMP is disabled.\nif !s.sockOptTimestamp {\ns.timestampValid = true\n@@ -2770,7 +2774,7 @@ func (s *Socket) updateTimestamp(cm tcpip.ReceivableControlMessages) {\n}\n// dequeueErr is analogous to net/core/skbuff.c:sock_dequeue_err_skb().\n-func (s *Socket) dequeueErr() *tcpip.SockError {\n+func (s *sock) dequeueErr() *tcpip.SockError {\nso := s.Endpoint.SocketOptions()\nerr := so.DequeueErr()\nif err == nil {\n@@ -2801,7 +2805,7 @@ func addrFamilyFromNetProto(net tcpip.NetworkProtocolNumber) int {\n// recvErr handles MSG_ERRQUEUE for recvmsg(2).\n// This is analogous to net/ipv4/ip_sockglue.c:ip_recv_error().\n-func (s *Socket) recvErr(t *kernel.Task, dst usermem.IOSequence) (int, int, linux.SockAddr, uint32, socket.ControlMessages, *syserr.Error) {\n+func (s *sock) recvErr(t *kernel.Task, dst usermem.IOSequence) (int, int, linux.SockAddr, uint32, socket.ControlMessages, *syserr.Error) {\nsockErr := s.dequeueErr()\nif sockErr == nil {\nreturn 0, 0, nil, 0, socket.ControlMessages{}, syserr.ErrTryAgain\n@@ -2827,7 +2831,7 @@ func (s *Socket) recvErr(t *kernel.Task, dst usermem.IOSequence) (int, int, linu\n// RecvMsg implements the linux syscall recvmsg(2) for sockets backed by\n// tcpip.Endpoint.\n-func (s *Socket) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, haveDeadline bool, deadline ktime.Time, senderRequested bool, _ uint64) (n int, msgFlags int, senderAddr linux.SockAddr, senderAddrLen uint32, controlMessages socket.ControlMessages, err *syserr.Error) {\n+func (s *sock) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, haveDeadline bool, deadline ktime.Time, senderRequested bool, _ uint64) (n int, msgFlags int, senderAddr linux.SockAddr, senderAddrLen uint32, controlMessages socket.ControlMessages, err *syserr.Error) {\nif flags&linux.MSG_ERRQUEUE != 0 {\nreturn s.recvErr(t, dst)\n}\n@@ -2899,7 +2903,7 @@ func (s *Socket) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, have\n// SendMsg implements the linux syscall sendmsg(2) for sockets backed by\n// tcpip.Endpoint.\n-func (s *Socket) SendMsg(t *kernel.Task, src usermem.IOSequence, to []byte, flags int, haveDeadline bool, deadline ktime.Time, controlMessages socket.ControlMessages) (int, *syserr.Error) {\n+func (s *sock) SendMsg(t *kernel.Task, src usermem.IOSequence, to []byte, flags int, haveDeadline bool, deadline ktime.Time, controlMessages socket.ControlMessages) (int, *syserr.Error) {\n// Reject Unix control messages.\nif !controlMessages.Unix.Empty() {\nreturn 0, syserr.ErrInvalidArgument\n@@ -2972,7 +2976,7 @@ func (s *Socket) SendMsg(t *kernel.Task, src usermem.IOSequence, to []byte, flag\n}\n// Ioctl implements vfs.FileDescriptionImpl.\n-func (s *Socket) Ioctl(ctx context.Context, uio usermem.IO, args arch.SyscallArguments) (uintptr, error) {\n+func (s *sock) Ioctl(ctx context.Context, uio usermem.IO, args arch.SyscallArguments) (uintptr, error) {\nt := kernel.TaskFromContext(ctx)\nif t == nil {\npanic(\"ioctl(2) may only be called from a task goroutine\")\n@@ -3329,7 +3333,7 @@ func isICMPSocket(skType linux.SockType, skProto int) bool {\n// State implements socket.Socket.State. State translates the internal state\n// returned by netstack to values defined by Linux.\n-func (s *Socket) State() uint32 {\n+func (s *sock) State() uint32 {\nif s.family != linux.AF_INET && s.family != linux.AF_INET6 {\n// States not implemented for this socket's family.\nreturn 0\n@@ -3389,17 +3393,17 @@ func (s *Socket) State() uint32 {\n}\n// Type implements socket.Socket.Type.\n-func (s *Socket) Type() (family int, skType linux.SockType, protocol int) {\n+func (s *sock) Type() (family int, skType linux.SockType, protocol int) {\nreturn s.family, s.skType, s.protocol\n}\n// EventRegister implements waiter.Waitable.\n-func (s *Socket) EventRegister(e *waiter.Entry) error {\n+func (s *sock) EventRegister(e *waiter.Entry) error {\ns.Queue.EventRegister(e)\nreturn nil\n}\n// EventUnregister implements waiter.Waitable.EventUnregister.\n-func (s *Socket) EventUnregister(e *waiter.Entry) {\n+func (s *sock) EventUnregister(e *waiter.Entry) {\ns.Queue.EventUnregister(e)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netstack/netstack_state.go",
"new_path": "pkg/sentry/socket/netstack/netstack_state.go",
"diff": "@@ -18,13 +18,13 @@ import (\n\"time\"\n)\n-func (s *Socket) saveTimestamp() int64 {\n+func (s *sock) saveTimestamp() int64 {\ns.readMu.Lock()\ndefer s.readMu.Unlock()\nreturn s.timestamp.UnixNano()\n}\n-func (s *Socket) loadTimestamp(nsec int64) {\n+func (s *sock) loadTimestamp(nsec int64) {\ns.readMu.Lock()\ndefer s.readMu.Unlock()\ns.timestamp = time.Unix(0, nsec)\n"
}
] | Go | Apache License 2.0 | google/gvisor | inet: each socket has to hold a reference to its network namespace
Otherwise a network namespace can be destroyed before sockets.
Reported-by: syzbot+78dcf6a117cd41dcb84e@syzkaller.appspotmail.com
PiperOrigin-RevId: 503552997 |
259,907 | 22.01.2023 18:57:15 | 28,800 | fe562179fea178b4044a4a30fe60264e2a6fa869 | Handle absolute symlink target '/' correctly in VFS layer.
vfs.ResolvingPath.relpathPrepend() has a precondition which was being violated
when handling resolveAbsSymlinkError. This was only happening when
`rp.absSymlinkTarget = "/"` because it has no path components. Added check for
that and also added regression test.
Reported-by: | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/dentry_impl.go",
"new_path": "pkg/sentry/fsimpl/gofer/dentry_impl.go",
"diff": "@@ -201,7 +201,7 @@ func (d *dentry) getRemoteChild(ctx context.Context, name string) (*dentry, erro\n// - fs.renameMu must be locked.\n// - parent.dirMu must be locked.\n// - parent.isDir().\n-// - name is not \".\" or \"..\".\n+// - !rp.Done() && rp.Component() is not \".\" or \"..\".\n// - dentry at name must not already exist in dentry tree.\n//\n// Postcondition: The returned dentry is already cached appropriately.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/lisafs_dentry.go",
"new_path": "pkg/sentry/fsimpl/gofer/lisafs_dentry.go",
"diff": "@@ -241,7 +241,7 @@ func (d *lisafsDentry) getRemoteChild(ctx context.Context, name string) (*dentry\n// - fs.renameMu must be locked.\n// - parent.dirMu must be locked.\n// - parent.isDir().\n-// - name is not \".\" or \"..\".\n+// - !rp.Done().\n// - dentry at name must not already exist in dentry tree.\nfunc (d *lisafsDentry) getRemoteChildAndWalkPathLocked(ctx context.Context, rp *vfs.ResolvingPath, ds **[]*dentry) (*dentry, error) {\n// Walk as much of the path as possible in 1 RPC.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/resolving_path.go",
"new_path": "pkg/sentry/vfs/resolving_path.go",
"diff": "@@ -438,8 +438,10 @@ func (rp *ResolvingPath) handleError(ctx context.Context, err error) bool {\nrp.flags &^= rpflagsHaveMountRef | rpflagsHaveStartRef\n// Consume the path component that represented the symlink.\nrp.Advance()\n+ if rp.absSymlinkTarget.HasComponents() {\n// Prepend the symlink target to the relative path.\nrp.relpathPrepend(rp.absSymlinkTarget)\n+ }\n// Restart path resolution on the new Mount.\nrp.releaseErrorState(ctx)\nreturn true\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/symlink.cc",
"new_path": "test/syscalls/linux/symlink.cc",
"diff": "@@ -365,6 +365,15 @@ TEST(SymlinkTest, SymlinkAtEmptyPath) {\nSyscallFailsWithErrno(ENOENT));\n}\n+// NOTE(b/266111750): Regression test.\n+TEST(SymlinkTest, AbsoluteSymlinkDouble) {\n+ const std::string symlinkPath = NewTempAbsPath();\n+ EXPECT_THAT(symlink(\"/\", symlinkPath.c_str()), SyscallSucceeds());\n+ auto doubleSymlinkPath = symlinkPath + symlinkPath;\n+ EXPECT_THAT(mkdir(doubleSymlinkPath.c_str(), 0777),\n+ SyscallFailsWithErrno(EEXIST));\n+}\n+\nclass ParamSymlinkTest : public ::testing::TestWithParam<std::string> {};\n// Test that creating an existing symlink with creat will create the target.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Handle absolute symlink target '/' correctly in VFS layer.
vfs.ResolvingPath.relpathPrepend() has a precondition which was being violated
when handling resolveAbsSymlinkError. This was only happening when
`rp.absSymlinkTarget = "/"` because it has no path components. Added check for
that and also added regression test.
Reported-by: syzbot+48846f91b6252b56382f@syzkaller.appspotmail.com
PiperOrigin-RevId: 503862090 |
259,881 | 23.01.2023 11:28:20 | 28,800 | f8375fac104532e6a1d8a8c7600cfa8ae4faf2f9 | Remove useless use of fmt.Sprint | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/test/e2e/tcp_test.go",
"new_path": "pkg/tcpip/transport/tcp/test/e2e/tcp_test.go",
"diff": "@@ -3604,7 +3604,7 @@ func TestDefaultTTL(t *testing.T) {\n{\"ipv4\", ipv4.ProtocolNumber, context.TestAddr},\n{\"ipv6\", ipv6.ProtocolNumber, context.TestV6Addr},\n} {\n- t.Run(fmt.Sprint(test.name), func(t *testing.T) {\n+ t.Run(test.name, func(t *testing.T) {\nc := context.New(t, 65535)\ndefer c.Cleanup()\n@@ -3668,7 +3668,7 @@ func TestSetTTL(t *testing.T) {\n{\"ipv4\", ipv4.ProtocolNumber, context.TestAddr, tcpip.IPv4TTLOption, tcpip.IPv6HopLimitOption},\n{\"ipv6\", ipv6.ProtocolNumber, context.TestV6Addr, tcpip.IPv6HopLimitOption, tcpip.IPv4TTLOption},\n} {\n- t.Run(fmt.Sprint(test.name), func(t *testing.T) {\n+ t.Run(test.name, func(t *testing.T) {\nfor _, wantTTL := range []uint8{1, 2, 50, 64, 128, 254, 255} {\nt.Run(fmt.Sprintf(\"TTL:%d\", wantTTL), func(t *testing.T) {\nc := context.New(t, 65535)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove useless use of fmt.Sprint
PiperOrigin-RevId: 504037414 |
259,858 | 23.01.2023 13:27:23 | 28,800 | 12a930a63e132d310bcb0db4c6490b8f7f156f3d | Move goid to dynamic facts render.
This removes the need for ongoing tags.
This change requires some minor updates to remove dependency cycles, since
the goid package is a base library used by many internals (log, sync, etc.). | [
{
"change_type": "MODIFY",
"old_path": "pkg/goid/BUILD",
"new_path": "pkg/goid/BUILD",
"diff": "-load(\"//tools:defs.bzl\", \"go_library\", \"go_test\")\n+load(\"//tools:defs.bzl\", \"arch_genrule\", \"go_library\", \"go_test\", \"select_arch\")\n+load(\"//tools/nogo:defs.bzl\", \"nogo_facts\")\npackage(licenses = [\"notice\"])\n+nogo_facts(\n+ name = \"goid_impl\",\n+ srcs = [\"goid.go\"],\n+ output = \"goid_impl.s\",\n+ template = select_arch(\n+ amd64 = \"goid_amd64.s\",\n+ arm64 = \"goid_arm64.s\",\n+ ),\n+)\n+\n+arch_genrule(\n+ name = \"goid_impl_arch\",\n+ src = \":goid_impl\",\n+ template = \"goid_impl_%s.s\",\n+)\n+\ngo_library(\nname = \"goid\",\nsrcs = [\n\"goid.go\",\n- \"goid_amd64.s\",\n- \"goid_arm64.s\",\n+ \":goid_impl_arch\",\n],\nstateify = False,\nvisibility = [\"//visibility:public\"],\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/goid/goid.go",
"new_path": "pkg/goid/goid.go",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-//go:build go1.12 && !go1.21\n-// +build go1.12,!go1.21\n-\n-// Check type signatures when updating Go version.\n-\n// Package goid provides the Get function.\npackage goid\n-// Get returns the ID of the current goroutine.\n-func Get() int64 {\n- return getg().goid\n-}\n-\n-// Structs from Go runtime. These may change in the future and require\n-// updating. These structs are currently the same on both AMD64 and ARM64,\n-// but may diverge in the future.\n-\n-type stack struct {\n- lo uintptr\n- hi uintptr\n-}\n-\n-type gobuf struct {\n- sp uintptr\n- pc uintptr\n- g uintptr\n- ctxt uintptr\n- ret uint64\n- lr uintptr\n- bp uintptr\n-}\n+import (\n+ \"runtime\"\n+)\n-type g struct {\n- stack stack\n- stackguard0 uintptr\n- stackguard1 uintptr\n+// Dummy references for facts.\n+const _ = runtime.Compiler\n- _panic uintptr\n- _defer uintptr\n- m uintptr\n- sched gobuf\n- syscallsp uintptr\n- syscallpc uintptr\n- stktopsp uintptr\n- param uintptr\n- atomicstatus uint32\n- stackLock uint32\n- goid int64\n+// goid returns the current goid, it is defined in assembly.\n+func goid() int64\n- // More fields...\n- //\n- // We only use goid and the fields before it are only listed to\n- // calculate the correct offset.\n+// Get returns the ID of the current goroutine.\n+func Get() int64 {\n+ return goid()\n}\n-\n-// Defined in assembly. This can't use go:linkname since runtime.getg() isn't a\n-// real function, it's a compiler intrinsic.\n-func getg() *g\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/state/BUILD",
"new_path": "pkg/state/BUILD",
"diff": "@@ -79,7 +79,6 @@ go_library(\nstateify = False,\nvisibility = [\"//:sandbox\"],\ndeps = [\n- \"//pkg/log\",\n\"//pkg/state/wire\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/state/decode.go",
"new_path": "pkg/state/decode.go",
"diff": "@@ -21,7 +21,6 @@ import (\n\"math\"\n\"reflect\"\n- \"gvisor.dev/gvisor/pkg/log\"\n\"gvisor.dev/gvisor/pkg/state/wire\"\n)\n@@ -660,9 +659,9 @@ func (ds *decodeState) Load(obj reflect.Value) {\nnumDeferred++\nif s, ok := encoded.(*wire.Struct); ok && s.TypeID != 0 {\ntyp := ds.types.LookupType(typeID(s.TypeID))\n- log.Warningf(\"unused deferred object: ID %d, type %v\", id, typ)\n+ Failf(\"unused deferred object: ID %d, type %v\", id, typ)\n} else {\n- log.Warningf(\"unused deferred object: ID %d, %#v\", id, encoded)\n+ Failf(\"unused deferred object: ID %d, %#v\", id, encoded)\n}\n}\nif numDeferred != 0 {\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/checkescape/BUILD",
"new_path": "tools/checkescape/BUILD",
"diff": "@@ -12,7 +12,6 @@ go_library(\nnogo = False,\nvisibility = [\"//tools/nogo:__subpackages__\"],\ndeps = [\n- \"//pkg/log\",\n\"//tools/nogo/flags\",\n\"@org_golang_x_tools//go/analysis:go_default_library\",\n\"@org_golang_x_tools//go/analysis/passes/buildssa:go_default_library\",\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/checkescape/checkescape.go",
"new_path": "tools/checkescape/checkescape.go",
"diff": "@@ -76,7 +76,6 @@ import (\n\"golang.org/x/tools/go/analysis\"\n\"golang.org/x/tools/go/analysis/passes/buildssa\"\n\"golang.org/x/tools/go/ssa\"\n- \"gvisor.dev/gvisor/pkg/log\"\n\"gvisor.dev/gvisor/tools/nogo/flags\"\n)\n@@ -653,13 +652,10 @@ func findReasons(pass *analysis.Pass, fdecl *ast.FuncDecl) ([]EscapeReason, bool\n// run performs the analysis.\nfunc run(pass *analysis.Pass, binary io.Reader) (any, error) {\n- calls, callsErr := loadObjdump(binary)\n- if callsErr != nil {\n// Note that if this analysis fails, then we don't actually\n// fail the analyzer itself. We simply report every possible\n// escape. In most cases this will work just fine.\n- log.Warningf(\"unable to load objdump: %v\", callsErr)\n- }\n+ calls, callsErr := loadObjdump(binary)\nallEscapes := make(map[string][]Escapes)\nmergedEscapes := make(map[string]Escapes)\nlinePosition := func(inst, parent poser) LinePosition {\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/checklocks/BUILD",
"new_path": "tools/checklocks/BUILD",
"diff": "@@ -18,7 +18,6 @@ go_library(\n\"//tools/nogo:__subpackages__\",\n],\ndeps = [\n- \"//pkg/atomicbitops\",\n\"@org_golang_x_tools//go/analysis:go_default_library\",\n\"@org_golang_x_tools//go/analysis/passes/buildssa:go_default_library\",\n\"@org_golang_x_tools//go/ssa:go_default_library\",\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/checklocks/checklocks.go",
"new_path": "tools/checklocks/checklocks.go",
"diff": "// access to annotated fields.\n//\n// For detailed usage refer to README.md in the same directory.\n+//\n+// Note that this package uses the built-in atomics, in order to avoid the use\n+// of our own atomic package. This is because our own atomic package depends on\n+// our own sync package, which includes lock dependency analysis. This in turn\n+// requires goid, which introduces a dependency cycle. To avoid this, we simply\n+// use the simpler, built-in sync package.\n+//\n+// +checkalignedignore\npackage checklocks\nimport (\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/checklocks/state.go",
"new_path": "tools/checklocks/state.go",
"diff": "@@ -19,9 +19,9 @@ import (\n\"go/token\"\n\"go/types\"\n\"strings\"\n+ \"sync/atomic\"\n\"golang.org/x/tools/go/ssa\"\n- \"gvisor.dev/gvisor/pkg/atomicbitops\"\n)\n// lockInfo describes a held lock.\n@@ -52,12 +52,12 @@ type lockState struct {\n// refs indicates the number of references on this structure. If it's\n// greater than one, we will do copy-on-write.\n- refs *atomicbitops.Int32\n+ refs *int32\n}\n// newLockState makes a new lockState.\nfunc newLockState() *lockState {\n- refs := atomicbitops.FromInt32(1) // Not shared.\n+ refs := int32(1) // Not shared.\nreturn &lockState{\nlockedMutexes: make(map[string]lockInfo),\nused: make(map[ssa.Value]struct{}),\n@@ -73,7 +73,7 @@ func (l *lockState) fork() *lockState {\nif l == nil {\nreturn newLockState()\n}\n- l.refs.Add(1)\n+ atomic.AddInt32(l.refs, 1)\nreturn &lockState{\nlockedMutexes: l.lockedMutexes,\nused: make(map[ssa.Value]struct{}),\n@@ -85,7 +85,7 @@ func (l *lockState) fork() *lockState {\n// modify indicates that this state will be modified.\nfunc (l *lockState) modify() {\n- if l.refs.Load() > 1 {\n+ if atomic.LoadInt32(l.refs) > 1 {\n// Copy the lockedMutexes.\nlm := make(map[string]lockInfo)\nfor k, v := range l.lockedMutexes {\n@@ -109,8 +109,8 @@ func (l *lockState) modify() {\nl.defers = ds\n// Drop our reference.\n- l.refs.Add(-1)\n- newRefs := atomicbitops.FromInt32(1) // Not shared.\n+ atomic.AddInt32(l.refs, -1)\n+ newRefs := int32(1) // Not shared.\nl.refs = &newRefs\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/nogo/cli/cli.go",
"new_path": "tools/nogo/cli/cli.go",
"diff": "@@ -22,6 +22,7 @@ import (\n\"os\"\n\"path\"\n\"path/filepath\"\n+ \"regexp\"\n\"text/template\"\n\"github.com/google/subcommands\"\n@@ -199,6 +200,7 @@ type Bundle struct {\ncheckCommon\nRoot string\nPrefix string\n+ Filter string\n}\n// Name implements subcommands.Command.Name.\n@@ -227,12 +229,17 @@ func (b *Bundle) SetFlags(fs *flag.FlagSet) {\nb.setFlags(fs, \"bundle\")\nfs.StringVar(&b.Root, \"root\", \"\", \"root regular expression (for package discovery)\")\nfs.StringVar(&b.Prefix, \"prefix\", \"\", \"package prefix to apply (for complete names)\")\n+ fs.StringVar(&b.Filter, \"filter\", \".*\", \"Filter packages to analyze\")\n}\n// Execute implements subcommands.Command.Execute.\nfunc (b *Bundle) Execute(ctx context.Context, fs *flag.FlagSet, args ...any) subcommands.ExitStatus {\n// Perform the analysis.\nif err := b.execute(func() (check.FindingSet, facts.Serializer, error) {\n+ pathRegexp, err := regexp.Compile(b.Filter)\n+ if err != nil {\n+ return nil, nil, fmt.Errorf(\"invalid filter: %v\", err)\n+ }\n// Discover the correct common root.\nsrcRootPrefix, err := check.FindRoot(fs.Args(), b.Root)\nif err != nil {\n@@ -245,8 +252,10 @@ func (b *Bundle) Execute(ctx context.Context, fs *flag.FlagSet, args ...any) sub\nif b.Prefix != \"\" {\npath = b.Prefix + \"/\" + path // Subpackage.\n}\n+ if pathRegexp.MatchString(path) {\nsources[path] = append(sources[path], srcs...)\n}\n+ }\nreturn check.Bundle(sources)\n}); err != nil {\nreturn failure(\"%v\", err)\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/nogo/defs.bzl",
"new_path": "tools/nogo/defs.bzl",
"diff": "@@ -64,12 +64,9 @@ NogoStdlibInfo = provider(\n)\ndef _nogo_stdlib_impl(ctx):\n- # If this is disabled, return nothing.\n+ package_filter = \".*\"\nif not ctx.attr._nogo_full[BuildSettingInfo].value:\n- return [NogoStdlibInfo(\n- facts = None,\n- raw_findings = [],\n- )]\n+ package_filter = \"^runtime$\"\n# Build the configuration for the stdlib.\ngo_ctx, args, inputs, raw_findings = _nogo_config(ctx, deps = [])\n@@ -97,6 +94,7 @@ def _nogo_stdlib_impl(ctx):\n\"-findings=%s\" % findings_file.path,\n\"-facts=%s\" % facts_file.path,\n\"-root=.*?/src/\",\n+ \"-filter=%s\" % package_filter,\n] + [f.path for f in go_ctx.stdlib_srcs],\n)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Move goid to dynamic facts render.
This removes the need for ongoing tags.
This change requires some minor updates to remove dependency cycles, since
the goid package is a base library used by many internals (log, sync, etc.).
PiperOrigin-RevId: 504066914 |
259,907 | 24.01.2023 11:14:25 | 28,800 | 22961a9e41a141b70d1ffa0fc02c00efe93801be | Make kernfs.walkParentDirLocked() start path traversal from passed dentry.
This is analogous to all other filesystem implementations and their
walkParentDirLocked() implementations. This is required because sometimes
walkParentDirLocked() is called after symlinks are followed. And the
traversal should continue from the parent of the symlink file. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/kernfs/filesystem.go",
"new_path": "pkg/sentry/fsimpl/kernfs/filesystem.go",
"diff": "@@ -180,8 +180,7 @@ func (fs *Filesystem) walkExistingLocked(ctx context.Context, rp *vfs.ResolvingP\n// - !rp.Done().\n//\n// Postconditions: Caller must call fs.processDeferredDecRefs*.\n-func (fs *Filesystem) walkParentDirLocked(ctx context.Context, rp *vfs.ResolvingPath) (*Dentry, error) {\n- d := rp.Start().Impl().(*Dentry)\n+func (fs *Filesystem) walkParentDirLocked(ctx context.Context, rp *vfs.ResolvingPath, d *Dentry) (*Dentry, error) {\nfor !rp.Final() {\nvar err error\nd, _, err = fs.stepExistingLocked(ctx, rp, d)\n@@ -342,7 +341,7 @@ func (fs *Filesystem) GetParentDentryAt(ctx context.Context, rp *vfs.ResolvingPa\nfs.mu.RLock()\ndefer fs.processDeferredDecRefs(ctx)\ndefer fs.mu.RUnlock()\n- d, err := fs.walkParentDirLocked(ctx, rp)\n+ d, err := fs.walkParentDirLocked(ctx, rp, rp.Start().Impl().(*Dentry))\nif err != nil {\nreturn nil, err\n}\n@@ -358,7 +357,7 @@ func (fs *Filesystem) LinkAt(ctx context.Context, rp *vfs.ResolvingPath, vd vfs.\nfs.mu.Lock()\ndefer fs.processDeferredDecRefs(ctx)\ndefer fs.mu.Unlock()\n- parent, err := fs.walkParentDirLocked(ctx, rp)\n+ parent, err := fs.walkParentDirLocked(ctx, rp, rp.Start().Impl().(*Dentry))\nif err != nil {\nreturn err\n}\n@@ -405,7 +404,7 @@ func (fs *Filesystem) MkdirAt(ctx context.Context, rp *vfs.ResolvingPath, opts v\nfs.mu.Lock()\ndefer fs.processDeferredDecRefs(ctx)\ndefer fs.mu.Unlock()\n- parent, err := fs.walkParentDirLocked(ctx, rp)\n+ parent, err := fs.walkParentDirLocked(ctx, rp, rp.Start().Impl().(*Dentry))\nif err != nil {\nreturn err\n}\n@@ -442,7 +441,7 @@ func (fs *Filesystem) MknodAt(ctx context.Context, rp *vfs.ResolvingPath, opts v\nfs.mu.Lock()\ndefer fs.processDeferredDecRefs(ctx)\ndefer fs.mu.Unlock()\n- parent, err := fs.walkParentDirLocked(ctx, rp)\n+ parent, err := fs.walkParentDirLocked(ctx, rp, rp.Start().Impl().(*Dentry))\nif err != nil {\nreturn err\n}\n@@ -499,7 +498,7 @@ func (fs *Filesystem) OpenAt(ctx context.Context, rp *vfs.ResolvingPath, opts vf\n// May create new file.\nmustCreate := opts.Flags&linux.O_EXCL != 0\n- d := rp.Start().Impl().(*Dentry)\n+ start := rp.Start().Impl().(*Dentry)\nfs.mu.Lock()\nunlocked := false\nunlock := func() {\n@@ -520,19 +519,19 @@ func (fs *Filesystem) OpenAt(ctx context.Context, rp *vfs.ResolvingPath, opts vf\nif mustCreate {\nreturn nil, linuxerr.EEXIST\n}\n- if err := d.inode.CheckPermissions(ctx, rp.Credentials(), ats); err != nil {\n+ if err := start.inode.CheckPermissions(ctx, rp.Credentials(), ats); err != nil {\nreturn nil, err\n}\n// Open may block so we need to unlock fs.mu. IncRef d to prevent\n// its destruction while fs.mu is unlocked.\n- d.IncRef()\n+ start.IncRef()\nunlock()\n- fd, err := d.inode.Open(ctx, rp, d, opts)\n- d.DecRef(ctx)\n+ fd, err := start.inode.Open(ctx, rp, start, opts)\n+ start.DecRef(ctx)\nreturn fd, err\n}\nafterTrailingSymlink:\n- parent, err := fs.walkParentDirLocked(ctx, rp)\n+ parent, err := fs.walkParentDirLocked(ctx, rp, start)\nif err != nil {\nreturn nil, err\n}\n@@ -566,6 +565,7 @@ afterTrailingSymlink:\n// must be handled by the VFS layer.\nreturn nil, err\n}\n+ start = parent\ngoto afterTrailingSymlink\n}\nif linuxerr.Equals(linuxerr.ENOENT, err) {\n@@ -660,7 +660,7 @@ func (fs *Filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa\n// Resolve the destination directory first to verify that it's on this\n// Mount.\n- dstDir, err := fs.walkParentDirLocked(ctx, rp)\n+ dstDir, err := fs.walkParentDirLocked(ctx, rp, rp.Start().Impl().(*Dentry))\nif err != nil {\nreturn err\n}\n@@ -883,7 +883,7 @@ func (fs *Filesystem) SymlinkAt(ctx context.Context, rp *vfs.ResolvingPath, targ\nfs.mu.Lock()\ndefer fs.processDeferredDecRefs(ctx)\ndefer fs.mu.Unlock()\n- parent, err := fs.walkParentDirLocked(ctx, rp)\n+ parent, err := fs.walkParentDirLocked(ctx, rp, rp.Start().Impl().(*Dentry))\nif err != nil {\nreturn err\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Make kernfs.walkParentDirLocked() start path traversal from passed dentry.
This is analogous to all other filesystem implementations and their
walkParentDirLocked() implementations. This is required because sometimes
walkParentDirLocked() is called after symlinks are followed. And the
traversal should continue from the parent of the symlink file.
PiperOrigin-RevId: 504326573 |
259,881 | 24.01.2023 14:54:23 | 28,800 | 2e68fa3c15d47ef9e54f81e368e6189fcfd2e257 | Enable analysis of packages using type parameters
Upstream analyzers support type parameters now, so we no longer need to skip
analysis. | [
{
"change_type": "MODIFY",
"old_path": "tools/checkinfo/checkinfo.go",
"new_path": "tools/checkinfo/checkinfo.go",
"diff": "@@ -74,12 +74,16 @@ func walkObject(pass *analysis.Pass, obj types.Object) {\npass.ExportObjectFact(obj, &a)\npass.ExportObjectFact(obj, &s)\ncase *types.TypeName:\n- // Skip if just an alias, or if not underlying type. If it is\n- // not an alias, then it must be package-local.\n+ // Skip if just an alias, or if not underlying type, or if a\n+ // type parameter. If it is not an alias, then it must be\n+ // package-local.\ntyp := x.Type()\nif x.IsAlias() || typ == nil || typ.Underlying() == nil {\nbreak\n}\n+ if _, ok := typ.(*types.TypeParam); ok {\n+ break\n+ }\n// Add basic information.\na := Align(pass.TypesSizes.Alignof(typ))\ns := Size(pass.TypesSizes.Sizeof(typ))\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/nogo/check/check.go",
"new_path": "tools/nogo/check/check.go",
"diff": "@@ -365,6 +365,7 @@ func (i *importer) checkPackage(path string, srcs []string) (*types.Package, Fin\n}\ntypesInfo := &types.Info{\nTypes: make(map[ast.Expr]types.TypeAndValue),\n+ Instances: make(map[*ast.Ident]types.Instance),\nUses: make(map[*ast.Ident]types.Object),\nDefs: make(map[*ast.Ident]types.Object),\nImplicits: make(map[ast.Node]types.Object),\n@@ -726,12 +727,6 @@ func SplitPackages(srcs []string, srcRootPrefix string) map[string][]string {\ncontinue\n}\n- // Skip unsupported packages explicitly.\n- if _, ok := usesTypeParams[pkg]; ok {\n- log.Printf(\"WARNING: Skipping package %q: type param analysis not yet supported.\", pkg)\n- continue\n- }\n-\n// Add to the package.\nsources[pkg] = append(sources[pkg], filename)\n}\n@@ -739,16 +734,6 @@ func SplitPackages(srcs []string, srcRootPrefix string) map[string][]string {\nreturn sources\n}\n-// Go standard library packages using Go 1.18 type parameter features.\n-//\n-// As of writing, analysis tooling is not updated to support type parameters\n-// and will choke on these packages. We skip these packages entirely for now.\n-//\n-// TODO(b/201686256): remove once tooling can handle type parameters.\n-var usesTypeParams = map[string]struct{}{\n- \"sync/atomic\": {}, // https://go.dev/issue/50860\n-}\n-\n// Bundle checks a bundle of files (typically the standard library).\nfunc Bundle(sources map[string][]string) (FindingSet, facts.Serializer, error) {\n// Process all packages.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Enable analysis of packages using type parameters
Upstream analyzers support type parameters now, so we no longer need to skip
analysis.
PiperOrigin-RevId: 504383378 |
260,002 | 14.12.2022 11:39:47 | 28,800 | 90873a990380be6c1d64498dde1020f01fb69971 | Add simple unit test checking limits | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/BUILD",
"new_path": "test/syscalls/linux/BUILD",
"diff": "@@ -2139,6 +2139,7 @@ cc_binary(\nlinkstatic = 1,\ndeps = [\n\"//test/util:capability_util\",\n+ \"//test/util:proc_util\",\n\"//test/util:test_main\",\n\"//test/util:test_util\",\n\"//test/util:thread_util\",\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/rlimits.cc",
"new_path": "test/syscalls/linux/rlimits.cc",
"diff": "#include <unistd.h>\n#include <climits>\n+#include <algorithm>\n+#include <string>\n+#include <vector>\n+#include \"absl/strings/numbers.h\"\n+#include \"absl/strings/str_split.h\"\n#include \"test/util/capability_util.h\"\n+#include \"test/util/proc_util.h\"\n#include \"test/util/test_util.h\"\n#include \"test/util/thread_util.h\"\n@@ -31,6 +37,21 @@ namespace testing {\nnamespace {\n+PosixErrorOr<ProcLimitsEntry> GetProcLimitEntryByType(LimitType limit_type) {\n+ ASSIGN_OR_RETURN_ERRNO(std::string proc_self_limits,\n+ GetContents(\"/proc/self/limits\"));\n+ ASSIGN_OR_RETURN_ERRNO(auto entries, ParseProcLimits(proc_self_limits));\n+ auto it =\n+ std::find_if(entries.begin(), entries.end(),\n+ [limit_type](const ProcLimitsEntry& v) {\n+ return v.limit_type == limit_type;\n+ });\n+ if (it == entries.end()) {\n+ return PosixError(ENOENT, \"limit type not found\");\n+ }\n+ return *it;\n+}\n+\nTEST(RlimitTest, SetRlimitHigher) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_RESOURCE)));\n@@ -43,6 +64,12 @@ TEST(RlimitTest, SetRlimitHigher) {\nrl.rlim_max--;\nASSERT_THAT(setrlimit(RLIMIT_NOFILE, &rl), SyscallSucceeds());\n+ // Now verify we can read the changed values via /proc/self/limits\n+ const ProcLimitsEntry limit_entry =\n+ ASSERT_NO_ERRNO_AND_VALUE(GetProcLimitEntryByType(LimitType::NumberOfFiles));\n+ EXPECT_EQ(rl.rlim_cur, limit_entry.cur_limit);\n+ EXPECT_EQ(rl.rlim_max, limit_entry.max_limit);\n+\nrl.rlim_max++;\nEXPECT_THAT(setrlimit(RLIMIT_NOFILE, &rl), SyscallSucceeds());\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/util/proc_util.cc",
"new_path": "test/util/proc_util.cc",
"diff": "@@ -331,5 +331,158 @@ bool IsTHPDisabled() {\nreturn StackTHPDisabled(maps.ValueOrDie());\n}\n+PosixErrorOr<ProcLimitsEntry> ParseProcLimitsLine(absl::string_view line) {\n+ ProcLimitsEntry limits_entry = {};\n+\n+ std::vector<std::string> parts =\n+ absl::StrSplit(line.substr(25), ' ', absl::SkipWhitespace());\n+\n+ // should have 3 parts (soft, hard, units)\n+ // the name is ignored since the whole line is space separated and\n+ // the name has spaces in but not a consistent number of spaces per\n+ // name (e.g. 'Max cpu time' vs 'Max processes')\n+ // however, since units are optional, ignore them as well\n+ if (parts.size() < 2 || parts.size() > 3) {\n+ return PosixError(EINVAL, absl::StrCat(\"Invalid line: \", line));\n+ }\n+\n+ // parse the limit type\n+ auto limitType = line.substr(0,25);\n+ if (absl::StrContains(limitType, \"cpu time\")) {\n+ limits_entry.limit_type = LimitType::CPU;\n+ } else if (absl::StrContains(limitType, \"file size\")) {\n+ limits_entry.limit_type = LimitType::FileSize;\n+ } else if (absl::StrContains(limitType, \"data size\")) {\n+ limits_entry.limit_type = LimitType::Data;\n+ } else if (absl::StrContains(limitType, \"stack size\")) {\n+ limits_entry.limit_type = LimitType::Stack;\n+ } else if (absl::StrContains(limitType, \"core file size\")) {\n+ limits_entry.limit_type = LimitType::Core;\n+ } else if (absl::StrContains(limitType, \"resident set\")) {\n+ limits_entry.limit_type = LimitType::RSS;\n+ } else if (absl::StrContains(limitType, \"processes\")) {\n+ limits_entry.limit_type = LimitType::ProcessCount;\n+ } else if (absl::StrContains(limitType, \"open files\")) {\n+ limits_entry.limit_type = LimitType::NumberOfFiles;\n+ } else if (absl::StrContains(limitType, \"locked memory\")) {\n+ limits_entry.limit_type = LimitType::MemoryLocked;\n+ } else if (absl::StrContains(limitType, \"address space\")) {\n+ limits_entry.limit_type = LimitType::AS;\n+ } else if (absl::StrContains(limitType, \"file locks\")) {\n+ limits_entry.limit_type = LimitType::Locks;\n+ } else if (absl::StrContains(limitType, \"pending signals\")) {\n+ limits_entry.limit_type = LimitType::SignalsPending;\n+ } else if (absl::StrContains(limitType, \"msgqueue size\")) {\n+ limits_entry.limit_type = LimitType::MessageQueueBytes;\n+ } else if (absl::StrContains(limitType, \"nice priority\")) {\n+ limits_entry.limit_type = LimitType::Nice;\n+ } else if (absl::StrContains(limitType, \"realtime priority\")) {\n+ limits_entry.limit_type = LimitType::RealTimePriority;\n+ } else if (absl::StrContains(limitType, \"realtime timeout\")) {\n+ limits_entry.limit_type = LimitType::Rttime;\n+ } else {\n+ return PosixError(EINVAL, absl::StrCat(\"Invalid limit type: \", limitType));\n+ }\n+\n+ // parse soft limit\n+ if (parts[0] == \"unlimited\") {\n+ limits_entry.cur_limit = ~0ULL;\n+ } else {\n+ ASSIGN_OR_RETURN_ERRNO(limits_entry.cur_limit, Atoi<uint64_t>(parts[0]));\n+ }\n+\n+ // parse hard limit\n+ if (parts[1] == \"unlimited\") {\n+ limits_entry.max_limit = ~0ULL;\n+ } else {\n+ ASSIGN_OR_RETURN_ERRNO(limits_entry.max_limit, Atoi<uint64_t>(parts[1]));\n+ }\n+\n+ // ignore units\n+\n+ return limits_entry;\n+}\n+\n+PosixErrorOr<std::vector<ProcLimitsEntry>> ParseProcLimits(\n+ absl::string_view contents) {\n+ std::vector<ProcLimitsEntry> entries;\n+ std::vector<std::string> lines = absl::StrSplit(contents, '\\n', absl::SkipEmpty());\n+ // skip first line (headers)\n+ for (size_t i = 1U; i < lines.size(); ++i) {\n+ std::cout << \"line: \" << lines[i] << std::endl;\n+ ASSIGN_OR_RETURN_ERRNO(auto entry, ParseProcLimitsLine(lines[i]));\n+ entries.push_back(entry);\n+ }\n+ return entries;\n+}\n+\n+std::ostream& operator<<(std::ostream& os, const ProcLimitsEntry& entry) {\n+ std::string str = \"Max \";\n+\n+ switch (entry.limit_type) {\n+ case LimitType::CPU: absl::StrAppend(&str, absl::StrFormat(\"%-25s \", \"cpu time\")); break;\n+ case LimitType::FileSize: absl::StrAppend(&str, absl::StrFormat(\"%-25s \", \"file size\")); break;\n+ case LimitType::Data: absl::StrAppend(&str, absl::StrFormat(\"%-25s \", \"data size\")); break;\n+ case LimitType::Stack: absl::StrAppend(&str, absl::StrFormat(\"%-25s \", \"stack size\")); break;\n+ case LimitType::Core: absl::StrAppend(&str, absl::StrFormat(\"%-25s \", \"core file size\")); break;\n+ case LimitType::RSS: absl::StrAppend(&str, absl::StrFormat(\"%-25s \", \"resident set\")); break;\n+ case LimitType::ProcessCount: absl::StrAppend(&str, absl::StrFormat(\"%-25s \", \"processes\")); break;\n+ case LimitType::NumberOfFiles: absl::StrAppend(&str, absl::StrFormat(\"%-25s \", \"open files\")); break;\n+ case LimitType::MemoryLocked: absl::StrAppend(&str, absl::StrFormat(\"%-25s \", \"locked memory\")); break;\n+ case LimitType::AS: absl::StrAppend(&str, absl::StrFormat(\"%-25s \", \"address space\")); break;\n+ case LimitType::Locks: absl::StrAppend(&str, absl::StrFormat(\"%-25s \", \"file locks\")); break;\n+ case LimitType::SignalsPending: absl::StrAppend(&str, absl::StrFormat(\"%-25s \", \"pending signals\")); break;\n+ case LimitType::MessageQueueBytes: absl::StrAppend(&str, absl::StrFormat(\"%-25s \", \"msgqueue size\")); break;\n+ case LimitType::Nice: absl::StrAppend(&str, absl::StrFormat(\"%-25s \", \"nice priority\")); break;\n+ case LimitType::RealTimePriority: absl::StrAppend(&str, absl::StrFormat(\"%-25s \", \"realtime priority\")); break;\n+ case LimitType::Rttime: absl::StrAppend(&str, absl::StrFormat(\"%-25s \", \"realtime timeout\")); break;\n+ }\n+\n+ if (entry.cur_limit == ~0ULL) {\n+ absl::StrAppend(&str, absl::StrFormat(\"%-20s \", \"unlimited\"));\n+ } else {\n+ absl::StrAppend(&str, absl::StrFormat(\"%-20d \", entry.cur_limit));\n+ }\n+\n+ if (entry.max_limit == ~0ULL) {\n+ absl::StrAppend(&str, absl::StrFormat(\"%-20s \", \"unlimited\"));\n+ } else {\n+ absl::StrAppend(&str, absl::StrFormat(\"%-20d \", entry.max_limit));\n+ }\n+\n+ switch (entry.limit_type) {\n+ case LimitType::CPU: absl::StrAppend(&str, absl::StrFormat(\"%-10s\", \"seconds\")); break;\n+ case LimitType::FileSize: absl::StrAppend(&str, absl::StrFormat(\"%-10s \", \"bytes\")); break;\n+ case LimitType::Data: absl::StrAppend(&str, absl::StrFormat(\"%-10s \", \"bytes\")); break;\n+ case LimitType::Stack: absl::StrAppend(&str, absl::StrFormat(\"%-10s \", \"bytes\")); break;\n+ case LimitType::Core: absl::StrAppend(&str, absl::StrFormat(\"%-10s \", \"bytes\")); break;\n+ case LimitType::RSS: absl::StrAppend(&str, absl::StrFormat(\"%-10s \", \"bytes\")); break;\n+ case LimitType::ProcessCount: absl::StrAppend(&str, absl::StrFormat(\"%-10s \", \"processes\")); break;\n+ case LimitType::NumberOfFiles: absl::StrAppend(&str, absl::StrFormat(\"%-10s \", \"files\")); break;\n+ case LimitType::MemoryLocked: absl::StrAppend(&str, absl::StrFormat(\"%-10s \", \"bytes\")); break;\n+ case LimitType::AS: absl::StrAppend(&str, absl::StrFormat(\"%-10s \", \"bytes\")); break;\n+ case LimitType::Locks: absl::StrAppend(&str, absl::StrFormat(\"%-10s \", \"locks\")); break;\n+ case LimitType::SignalsPending: absl::StrAppend(&str, absl::StrFormat(\"%-10s \", \"signals\")); break;\n+ case LimitType::MessageQueueBytes: absl::StrAppend(&str, absl::StrFormat(\"%-10s \", \"bytes\")); break;\n+ case LimitType::Nice: absl::StrAppend(&str, absl::StrFormat(\"%-10s \", \"\")); break;\n+ case LimitType::RealTimePriority: absl::StrAppend(&str, absl::StrFormat(\"%-10s \", \"\")); break;\n+ case LimitType::Rttime: absl::StrAppend(&str, absl::StrFormat(\"%-10s \", \"us\")); break;\n+ }\n+\n+ os << str;\n+ return os;\n+}\n+\n+std::ostream& operator<<(std::ostream& os, const std::vector<ProcLimitsEntry>& vec) {\n+ os << \"Limit Soft Limit Hard Limit Units \\n\";\n+ for (unsigned int i = 0; i < vec.size(); i++) {\n+ os << vec[i];\n+ if (i != vec.size() - 1) {\n+ os << \"\\n\";\n+ }\n+ }\n+ return os;\n+}\n+\n} // namespace testing\n} // namespace gvisor\n"
},
{
"change_type": "MODIFY",
"old_path": "test/util/proc_util.h",
"new_path": "test/util/proc_util.h",
"diff": "@@ -195,6 +195,49 @@ MATCHER_P(ContainsMappings, mappings,\nreturn all_present;\n}\n+// LimitType is an rlimit type\n+enum class LimitType {\n+ CPU,\n+ FileSize,\n+ Data,\n+ Stack,\n+ Core,\n+ RSS,\n+ ProcessCount,\n+ NumberOfFiles,\n+ MemoryLocked,\n+ AS,\n+ Locks,\n+ SignalsPending,\n+ MessageQueueBytes,\n+ Nice,\n+ RealTimePriority,\n+ Rttime,\n+};\n+\n+// ProcLimitsEntry contains the data from a single line in /proc/xxx/limits.\n+struct ProcLimitsEntry {\n+ LimitType limit_type;\n+ uint64_t cur_limit;\n+ uint64_t max_limit;\n+};\n+\n+// Parses a single line from /proc/xxx/limits\n+PosixErrorOr<ProcLimitsEntry> ParseProcLimitsLine(absl::string_view line);\n+\n+// Parses an entire /proc/xxx/limits file into lines\n+PosixErrorOr<std::vector<ProcLimitsEntry>> ParseProcLimits(absl::string_view contents);\n+\n+// Printer for ProcLimitsEntry.\n+std::ostream& operator<<(std::ostream& os, const ProcLimitsEntry& entry);\n+\n+// Printer for std::vector<ProcLimitsEntry>.\n+std::ostream& operator<<(std::ostream& os, const std::vector<ProcLimitsEntry>& vec);\n+\n+// GMock printer for std::vector<ProcLimitsEntry>.\n+inline void PrintTo(const std::vector<ProcLimitsEntry>& vec, std::ostream* os) {\n+ *os << vec;\n+}\n} // namespace testing\n} // namespace gvisor\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add simple unit test checking limits |
259,853 | 24.01.2023 17:12:43 | 28,800 | aeabb7852781e764655feeca8f67c46be14b0bc0 | Allow to return an error from PullFullState. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_clone.go",
"new_path": "pkg/sentry/kernel/task_clone.go",
"diff": "@@ -65,7 +65,12 @@ func (t *Task) Clone(args *linux.CloneArgs) (ThreadID, *SyscallControl, error) {\n// Pull task registers and FPU state, a cloned task will inherit the\n// state of the current task.\n- t.p.PullFullState(t.MemoryManager().AddressSpace(), t.Arch())\n+ if err := t.p.PullFullState(t.MemoryManager().AddressSpace(), t.Arch()); err != nil {\n+ t.Warningf(\"Unable to pull a full state: %v\", err)\n+ t.forceSignal(linux.SIGILL, true /* unconditional */)\n+ t.SendSignal(SignalInfoPriv(linux.SIGILL))\n+ return 0, nil, linuxerr.EFAULT\n+ }\n// \"If CLONE_NEWUSER is specified along with other CLONE_NEW* flags in a\n// single clone(2) or unshare(2) call, the user namespace is guaranteed to\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_run.go",
"new_path": "pkg/sentry/kernel/task_run.go",
"diff": "@@ -247,7 +247,10 @@ func (app *runApp) execute(t *Task) taskRunState {\nt.Arch().ClearSingleStep()\n}\nif t.hasTracer() {\n- t.p.PullFullState(t.MemoryManager().AddressSpace(), t.Arch())\n+ if e := t.p.PullFullState(t.MemoryManager().AddressSpace(), t.Arch()); e != nil {\n+ t.Warningf(\"Unable to pull a full state: %v\", e)\n+ err = e\n+ }\n}\nswitch err {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_signals.go",
"new_path": "pkg/sentry/kernel/task_signals.go",
"diff": "@@ -644,8 +644,9 @@ func (t *Task) SetSavedSignalMask(mask linux.SignalSet) {\n}\n// SignalStack returns the task-private signal stack.\n+//\n+// By precondition, a full state has to be pulled.\nfunc (t *Task) SignalStack() linux.SignalStack {\n- t.p.PullFullState(t.MemoryManager().AddressSpace(), t.Arch())\nalt := t.signalStack\nif t.onSignalStack(alt) {\nalt.Flags |= linux.SS_ONSTACK\n@@ -653,6 +654,34 @@ func (t *Task) SignalStack() linux.SignalStack {\nreturn alt\n}\n+// SigaltStack implements the sigaltstack syscall.\n+func (t *Task) SigaltStack(setaddr hostarch.Addr, oldaddr hostarch.Addr) (*SyscallControl, error) {\n+ if err := t.p.PullFullState(t.MemoryManager().AddressSpace(), t.Arch()); err != nil {\n+ t.PrepareGroupExit(linux.WaitStatusTerminationSignal(linux.SIGILL))\n+ return CtrlDoExit, linuxerr.EFAULT\n+ }\n+\n+ alt := t.SignalStack()\n+ if oldaddr != 0 {\n+ if _, err := alt.CopyOut(t, oldaddr); err != nil {\n+ return nil, err\n+ }\n+ }\n+ if setaddr != 0 {\n+ if _, err := alt.CopyIn(t, setaddr); err != nil {\n+ return nil, err\n+ }\n+ // The signal stack cannot be changed if the task is currently\n+ // on the stack. This is enforced at the lowest level because\n+ // these semantics apply to changing the signal stack via a\n+ // ucontext during a signal handler.\n+ if !t.SetSignalStack(alt) {\n+ return nil, linuxerr.EPERM\n+ }\n+ }\n+ return nil, nil\n+}\n+\n// onSignalStack returns true if the task is executing on the given signal stack.\nfunc (t *Task) onSignalStack(alt linux.SignalStack) bool {\nsp := hostarch.Addr(t.Arch().Stack())\n@@ -1018,7 +1047,10 @@ func (*runInterrupt) execute(t *Task) taskRunState {\n// Are there signals pending?\nif info := t.dequeueSignalLocked(linux.SignalSet(t.signalMask.RacyLoad())); info != nil {\n- t.p.PullFullState(t.MemoryManager().AddressSpace(), t.Arch())\n+ if err := t.p.PullFullState(t.MemoryManager().AddressSpace(), t.Arch()); err != nil {\n+ t.PrepareGroupExit(linux.WaitStatusTerminationSignal(linux.SIGILL))\n+ return (*runExit)(nil)\n+ }\nif linux.SignalSetOf(linux.Signal(info.Signo))&StopSignals != 0 {\n// Indicate that we've dequeued a stop signal before unlocking the\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/context.go",
"new_path": "pkg/sentry/platform/kvm/context.go",
"diff": "@@ -125,7 +125,7 @@ func (c *context) Release() {}\nfunc (c *context) FullStateChanged() {}\n// PullFullState implements platform.Context.PullFullState.\n-func (c *context) PullFullState(as platform.AddressSpace, ac *arch.Context64) {}\n+func (c *context) PullFullState(as platform.AddressSpace, ac *arch.Context64) error { return nil }\n// PrepareSleep implements platform.Context.platform.Context.\nfunc (*context) PrepareSleep() {}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/platform.go",
"new_path": "pkg/sentry/platform/platform.go",
"diff": "@@ -221,7 +221,7 @@ type Context interface {\n// PullFullState() to load all registers and FPU state.\n//\n// Preconditions: The caller must be running on the task goroutine.\n- PullFullState(as AddressSpace, ac *arch.Context64)\n+ PullFullState(as AddressSpace, ac *arch.Context64) error\n// FullStateChanged() indicates that a thread state has been changed by\n// the Sentry. This happens in case of the rt_sigreturn, execve, etc.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ptrace/ptrace.go",
"new_path": "pkg/sentry/platform/ptrace/ptrace.go",
"diff": "@@ -197,7 +197,7 @@ func (c *context) Release() {}\nfunc (c *context) FullStateChanged() {}\n// PullFullState implements platform.Context.PullFullState.\n-func (c *context) PullFullState(as platform.AddressSpace, ac *arch.Context64) {}\n+func (c *context) PullFullState(as platform.AddressSpace, ac *arch.Context64) error { return nil }\n// PrepareSleep implements platform.Context.platform.PrepareSleep.\nfunc (*context) PrepareSleep() {}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_signal.go",
"new_path": "pkg/sentry/syscalls/linux/sys_signal.go",
"diff": "@@ -323,26 +323,8 @@ func Sigaltstack(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.S\nsetaddr := args[0].Pointer()\noldaddr := args[1].Pointer()\n- alt := t.SignalStack()\n- if oldaddr != 0 {\n- if _, err := alt.CopyOut(t, oldaddr); err != nil {\n- return 0, nil, err\n- }\n- }\n- if setaddr != 0 {\n- if _, err := alt.CopyIn(t, setaddr); err != nil {\n- return 0, nil, err\n- }\n- // The signal stack cannot be changed if the task is currently\n- // on the stack. This is enforced at the lowest level because\n- // these semantics apply to changing the signal stack via a\n- // ucontext during a signal handler.\n- if !t.SetSignalStack(alt) {\n- return 0, nil, linuxerr.EPERM\n- }\n- }\n-\n- return 0, nil, nil\n+ ctrl, err := t.SigaltStack(setaddr, oldaddr)\n+ return 0, ctrl, err\n}\n// Pause implements linux syscall pause(2).\n"
}
] | Go | Apache License 2.0 | google/gvisor | Allow to return an error from PullFullState.
PiperOrigin-RevId: 504415294 |
259,992 | 25.01.2023 13:03:13 | 28,800 | b4c64d11f166496a11717f035ff51b8dc7dca6a0 | Log seccomp from spec
Only log if OCI seccomp flag is enabled, otherwise the seccomp field
is ignored. | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/controller.go",
"new_path": "runsc/boot/controller.go",
"diff": "@@ -293,7 +293,7 @@ func (cm *containerManager) StartSubcontainer(args *StartArgs, _ *struct{}) erro\n}\n// All validation passed, logs the spec for debugging.\n- specutils.LogSpec(args.Spec)\n+ specutils.LogSpecDebug(args.Spec, args.Conf.OCISeccomp)\ngoferFiles := args.Files\nvar stdios []*fd.FD\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/boot.go",
"new_path": "runsc/cmd/boot.go",
"diff": "@@ -236,7 +236,7 @@ func (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...any) subcomma\nif err != nil {\nutil.Fatalf(\"reading spec: %v\", err)\n}\n- specutils.LogSpec(spec)\n+ specutils.LogSpecDebug(spec, conf.OCISeccomp)\nif b.applyCaps {\ncaps := spec.Process.Capabilities\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/checkpoint.go",
"new_path": "runsc/cmd/checkpoint.go",
"diff": "@@ -123,7 +123,7 @@ func (c *Checkpoint) Execute(_ context.Context, f *flag.FlagSet, args ...any) su\nutil.Fatalf(\"reading spec: %v\", err)\n}\n- specutils.LogSpec(spec)\n+ specutils.LogSpecDebug(spec, conf.OCISeccomp)\nif cont.ConsoleSocket != \"\" {\nlog.Warningf(\"ignoring console socket since it cannot be restored\")\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/create.go",
"new_path": "runsc/cmd/create.go",
"diff": "@@ -96,7 +96,7 @@ func (c *Create) Execute(_ context.Context, f *flag.FlagSet, args ...any) subcom\nif err != nil {\nreturn util.Errorf(\"reading spec: %v\", err)\n}\n- specutils.LogSpec(spec)\n+ specutils.LogSpecDebug(spec, conf.OCISeccomp)\n// Create the container. A new sandbox will be created for the\n// container unless the metadata specifies that it should be run in an\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/do.go",
"new_path": "runsc/cmd/do.go",
"diff": "@@ -403,7 +403,7 @@ func calculatePeerIP(ip string) (string, error) {\n}\nfunc startContainerAndWait(spec *specs.Spec, conf *config.Config, cid string, waitStatus *unix.WaitStatus) subcommands.ExitStatus {\n- specutils.LogSpec(spec)\n+ specutils.LogSpecDebug(spec, conf.OCISeccomp)\nout, err := json.Marshal(spec)\nif err != nil {\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/gofer.go",
"new_path": "runsc/cmd/gofer.go",
"diff": "@@ -205,7 +205,7 @@ func (g *Gofer) Execute(_ context.Context, f *flag.FlagSet, args ...any) subcomm\n}\n}()\n- specutils.LogSpec(spec)\n+ specutils.LogSpecDebug(spec, conf.OCISeccomp)\n// fsgofer should run with a umask of 0, because we want to preserve file\n// modes exactly as sent by the sandbox, which will have applied its own umask.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/restore.go",
"new_path": "runsc/cmd/restore.go",
"diff": "@@ -93,7 +93,7 @@ func (r *Restore) Execute(_ context.Context, f *flag.FlagSet, args ...any) subco\nif err != nil {\nreturn util.Errorf(\"reading spec: %v\", err)\n}\n- specutils.LogSpec(spec)\n+ specutils.LogSpecDebug(spec, conf.OCISeccomp)\nif r.imagePath == \"\" {\nreturn util.Errorf(\"image-path flag must be provided\")\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/run.go",
"new_path": "runsc/cmd/run.go",
"diff": "@@ -87,7 +87,7 @@ func (r *Run) Execute(_ context.Context, f *flag.FlagSet, args ...any) subcomman\nif err != nil {\nreturn util.Errorf(\"reading spec: %v\", err)\n}\n- specutils.LogSpec(spec)\n+ specutils.LogSpecDebug(spec, conf.OCISeccomp)\nrunArgs := container.Args{\nID: id,\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/specutils/specutils.go",
"new_path": "runsc/specutils/specutils.go",
"diff": "@@ -47,8 +47,8 @@ var ExePath = \"/proc/self/exe\"\n// Version is the supported spec version.\nvar Version = specs.Version\n-// LogSpec logs the spec in a human-friendly way.\n-func LogSpec(orig *specs.Spec) {\n+// LogSpecDebug writes the spec in a human-friendly format to the debug log.\n+func LogSpecDebug(orig *specs.Spec, logSeccomp bool) {\nif !log.IsLogging(log.Debug) {\nreturn\n}\n@@ -59,7 +59,9 @@ func LogSpec(orig *specs.Spec) {\nspec.Process.Capabilities = nil\n}\nif spec.Linux != nil {\n+ if !logSeccomp {\nspec.Linux.Seccomp = nil\n+ }\nspec.Linux.MaskedPaths = nil\nspec.Linux.ReadonlyPaths = nil\nif spec.Linux.Resources != nil {\n"
},
{
"change_type": "MODIFY",
"old_path": "test/runner/main.go",
"new_path": "test/runner/main.go",
"diff": "@@ -190,7 +190,7 @@ func runRunsc(tc *gtest.TestCase, spec *specs.Spec) error {\nname := tc.FullName()\nid := testutil.RandomContainerID()\nlog.Infof(\"Running test %q in container %q\", name, id)\n- specutils.LogSpec(spec)\n+ specutils.LogSpecDebug(spec, false)\nargs := []string{\n\"-root\", rootDir,\n"
}
] | Go | Apache License 2.0 | google/gvisor | Log seccomp from spec
Only log if OCI seccomp flag is enabled, otherwise the seccomp field
is ignored.
PiperOrigin-RevId: 504640124 |
259,891 | 25.01.2023 14:27:59 | 28,800 | cef82030f6e5af29daeac1a09efd176bbf4e9db8 | github: update actions/setup-go to newest version | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/go.yml",
"new_path": ".github/workflows/go.yml",
"diff": "@@ -43,7 +43,7 @@ jobs:\nif: github.event_name == 'pull_request' || steps.setup.outputs.has_token != 'true'\nwith:\nfetch-depth: 0\n- - uses: actions/setup-go@v2\n+ - uses: actions/setup-go@v3.5.0\nwith:\ngo-version: 1.19\n- run: tools/go_branch.sh\n"
}
] | Go | Apache License 2.0 | google/gvisor | github: update actions/setup-go to newest version
PiperOrigin-RevId: 504662268 |
259,891 | 25.01.2023 16:35:09 | 28,800 | 2b5f0e1b8ed24c05b12297a3e9be9b5d83c65147 | github: update actions/checkout to newest version | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/go.yml",
"new_path": ".github/workflows/go.yml",
"diff": "@@ -34,12 +34,12 @@ jobs:\n-H \"Authorization: token ${{ secrets.GITHUB_TOKEN }}\" \\\n\"${{ github.event.pull_request.statuses_url }}\"\nif: github.event_name == 'pull_request'\n- - uses: actions/checkout@v2\n+ - uses: actions/checkout@v3.3.0\nif: github.event_name == 'push' && steps.setup.outputs.has_token == 'true'\nwith:\nfetch-depth: 0\ntoken: '${{ secrets.GO_TOKEN }}'\n- - uses: actions/checkout@v2\n+ - uses: actions/checkout@v3.3.0\nif: github.event_name == 'pull_request' || steps.setup.outputs.has_token != 'true'\nwith:\nfetch-depth: 0\n"
}
] | Go | Apache License 2.0 | google/gvisor | github: update actions/checkout to newest version
PiperOrigin-RevId: 504693188 |
259,881 | 26.01.2023 13:25:06 | 28,800 | 0c38f72156b0a74be293579c165477ced9fe60d5 | Move procid to dynamic facts render
Based on cl/504066914, this generates the proper procid offsets at build time.
Package facts needs an update to properly skip underscore imports, which don't
have facts for some reason (because they are unnamed?).
Drop use of runtime.getprocid given that these generated code is pretty solid. | [
{
"change_type": "MODIFY",
"old_path": "pkg/procid/BUILD",
"new_path": "pkg/procid/BUILD",
"diff": "-load(\"//tools:defs.bzl\", \"go_library\", \"go_test\")\n+load(\"//tools:defs.bzl\", \"arch_genrule\", \"go_library\", \"go_test\", \"select_arch\")\n+load(\"//tools/nogo:defs.bzl\", \"nogo_facts\")\npackage(licenses = [\"notice\"])\n+nogo_facts(\n+ name = \"procid_impl\",\n+ srcs = [\"procid.go\"],\n+ output = \"procid_impl.s\",\n+ template = select_arch(\n+ amd64 = \"procid_amd64.s\",\n+ arm64 = \"procid_arm64.s\",\n+ ),\n+)\n+\n+arch_genrule(\n+ name = \"procid_impl_arch\",\n+ src = \":procid_impl\",\n+ template = \"procid_impl_%s.s\",\n+)\n+\ngo_library(\nname = \"procid\",\nsrcs = [\n\"procid.go\",\n- \"procid_amd64.s\",\n- \"procid_arm64.s\",\n+ \":procid_impl_arch\",\n],\nvisibility = [\"//visibility:public\"],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/procid/procid.go",
"new_path": "pkg/procid/procid.go",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-//go:build go1.1\n-// +build go1.1\n-\n// Package procid provides a way to get the current system thread identifier.\npackage procid\n+import (\n+ \"runtime\"\n+)\n+\n+// Dummy references for facts.\n+const _ = runtime.Compiler\n+\n// Current returns the current system thread identifier.\n//\n// Precondition: This should only be called with the runtime OS thread locked.\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/nogo/facts/facts.go",
"new_path": "tools/nogo/facts/facts.go",
"diff": "@@ -230,6 +230,13 @@ func (b *Bundle) Package(pkg *types.Package) (*Package, error) {\nreturn facts, nil\n}\n+ if b.reader == nil {\n+ // Nothing available.\n+ //\n+ // N.B. some bundles contain only cached packages.\n+ return nil, nil\n+ }\n+\n// Find based on the reader.\nfor _, f := range b.reader.File {\nif f.Name != pkg.Path() {\n@@ -368,6 +375,9 @@ func Resolve(pkg *types.Package, localFacts *Package, allFacts *Bundle, allFactN\nif err != nil {\nreturn nil, err\n}\n+ if importFacts == nil {\n+ continue\n+ }\nr.walkScope(append(names, \"import\", importPkg.Name()), importPkg.Scope(), importFacts, allFactNames)\n}\nreturn r, nil\n"
}
] | Go | Apache License 2.0 | google/gvisor | Move procid to dynamic facts render
Based on cl/504066914, this generates the proper procid offsets at build time.
Package facts needs an update to properly skip underscore imports, which don't
have facts for some reason (because they are unnamed?).
Drop use of runtime.getprocid given that these generated code is pretty solid.
PiperOrigin-RevId: 504926257 |
259,909 | 26.01.2023 13:29:01 | 28,800 | f0d5892907c89ed28444c4ea9c06beaff14792f8 | Fix flaky creat_test. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/fuse/fusefs.go",
"new_path": "pkg/sentry/fsimpl/fuse/fusefs.go",
"diff": "@@ -524,8 +524,7 @@ func (*inode) IterDirents(ctx context.Context, mnt *vfs.Mount, callback vfs.Iter\n// NewFile implements kernfs.Inode.NewFile.\nfunc (i *inode) NewFile(ctx context.Context, name string, opts vfs.OpenOptions) (kernfs.Inode, error) {\nopts.Flags &= linux.O_ACCMODE | linux.O_CREAT | linux.O_EXCL | linux.O_TRUNC |\n- linux.O_DIRECTORY | linux.O_NOFOLLOW | linux.O_NONBLOCK | linux.O_NOCTTY |\n- linux.O_APPEND\n+ linux.O_DIRECTORY | linux.O_NOFOLLOW | linux.O_NONBLOCK | linux.O_NOCTTY\nkernelTask := kernel.TaskFromContext(ctx)\nif kernelTask == nil {\nlog.Warningf(\"fusefs.Inode.NewFile: couldn't get kernel task from context\", i.nodeID)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix flaky creat_test.
PiperOrigin-RevId: 504927279 |
259,907 | 26.01.2023 22:54:18 | 28,800 | c4fe64c5ef18c99854abe262fbfb2a7100fd408d | Add dir= prefix in overlay2 flag's medium.
This is to make it clear that the host file will be created inside this
directory. This also makes it look cleaner when other medium options are added
later. | [
{
"change_type": "MODIFY",
"old_path": "Makefile",
"new_path": "Makefile",
"diff": "@@ -270,7 +270,7 @@ docker-tests: load-basic $(RUNTIME_BIN)\n@$(call install_runtime,$(RUNTIME)-fdlimit,--fdlimit=2000) # Used by TestRlimitNoFile.\n@$(call install_runtime,$(RUNTIME)-dcache,--fdlimit=2000 --dcache=100) # Used by TestDentryCacheLimit.\n@$(call install_runtime,$(RUNTIME)-host-uds,--host-uds=all) # Used by TestHostSocketConnect.\n- @$(call install_runtime,$(RUNTIME)-overlay,--overlay2=root:/tmp) # Used by TestOverlay*.\n+ @$(call install_runtime,$(RUNTIME)-overlay,--overlay2=root:dir=/tmp) # Used by TestOverlay*.\n@$(call test_runtime,$(RUNTIME),$(INTEGRATION_TARGETS) //test/e2e:integration_runtime_test)\n.PHONY: docker-tests\n"
},
{
"change_type": "MODIFY",
"old_path": "g3doc/user_guide/filesystem.md",
"new_path": "g3doc/user_guide/filesystem.md",
"diff": "@@ -47,9 +47,9 @@ up container memory usage. To circumvent this, you can have root mount's upper\nlayer (tmpfs) be backed by a host file, so all file data is stored on disk.\nThe newer `--overlay2` flag allows you to achieve these. You can specify\n-`--overlay2=root:/dir/path` in `runtimeArgs`. `/dir/path` can be any existing\n-directory inside which the tmpfs filestore file will be created. When the\n-container exits, this filestore file will be destroyed.\n+`--overlay2=root:dir=/dir/path` in `runtimeArgs`. `/dir/path` should be an\n+existing directory inside which the tmpfs filestore file will be created. When\n+the container exits, this filestore file will be destroyed.\n## Shared root filesystem\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cli/main.go",
"new_path": "runsc/cli/main.go",
"diff": "@@ -227,7 +227,7 @@ func Main(version string) {\nlog.Infof(\"\\t\\tPlatform: %v\", conf.Platform)\nlog.Infof(\"\\t\\tFileAccess: %v\", conf.FileAccess)\noverlay2 := conf.GetOverlay2()\n- log.Infof(\"\\t\\tOverlay: Root=%t, SubMounts=%t, FilestoreDir=%q\", overlay2.RootMount, overlay2.SubMounts, overlay2.FilestoreDir)\n+ log.Infof(\"\\t\\tOverlay: Root=%t, SubMounts=%t, Medium=%q\", overlay2.RootMount, overlay2.SubMounts, overlay2.Medium)\nlog.Infof(\"\\t\\tNetwork: %v, logging: %t\", conf.Network, conf.LogPackets)\nlog.Infof(\"\\t\\tStrace: %t, max size: %d, syscalls: %s\", conf.Strace, conf.StraceLogSize, conf.StraceSyscalls)\nlog.Infof(\"\\t\\tIOURING: %t\", conf.IOUring)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/config/config.go",
"new_path": "runsc/config/config.go",
"diff": "@@ -19,6 +19,7 @@ package config\nimport (\n\"fmt\"\n+ \"path/filepath\"\n\"strconv\"\n\"strings\"\n\"time\"\n@@ -346,8 +347,8 @@ func (c *Config) GetOverlay2() Overlay2 {\nif c.Overlay2.Enabled() {\npanic(fmt.Sprintf(\"Overlay2 cannot be set when --overlay=true\"))\n}\n- // Using deprecated flag, honor it to avoid breaking users.\n- return Overlay2{RootMount: true, SubMounts: true, FilestoreDir: \"\"}\n+ // Using a deprecated flag, honor it to avoid breaking users.\n+ return Overlay2{RootMount: true, SubMounts: true, Medium: \"memory\"}\n}\nreturn c.Overlay2\n}\n@@ -645,7 +646,7 @@ func (g HostFifo) AllowOpen() bool {\ntype Overlay2 struct {\nRootMount bool\nSubMounts bool\n- FilestoreDir string\n+ Medium string\n}\nfunc defaultOverlay2() *Overlay2 {\n@@ -673,11 +674,16 @@ func (o *Overlay2) Set(v string) error {\nreturn fmt.Errorf(\"unexpected mount specifier for --overlay2: %q\", mount)\n}\n- switch medium := vs[1]; medium {\n- case \"memory\":\n- o.FilestoreDir = \"\"\n+ o.Medium = vs[1]\n+ switch o.Medium {\n+ case \"memory\": // OK\ndefault:\n- o.FilestoreDir = medium\n+ if !strings.HasPrefix(o.Medium, \"dir=\") {\n+ return fmt.Errorf(\"unexpected medium specifier for --overlay2: %q\", o.Medium)\n+ }\n+ if hostFileDir := strings.TrimPrefix(o.Medium, \"dir=\"); !filepath.IsAbs(hostFileDir) {\n+ return fmt.Errorf(\"overlay host file directory should be an absolute path, got %q\", hostFileDir)\n+ }\n}\nreturn nil\n}\n@@ -702,17 +708,30 @@ func (o Overlay2) String() string {\npanic(\"invalid state of subMounts = true and rootMount = false\")\n}\n- res += \":\"\n- switch o.FilestoreDir {\n- case \"\":\n- res += \"memory\"\n- default:\n- res += o.FilestoreDir\n- }\n- return res\n+ return res + \":\" + o.Medium\n}\n-// Enabled returns true if overlay option is enabled for any mounts.\n+// Enabled returns true if the overlay option is enabled for any mounts.\nfunc (o *Overlay2) Enabled() bool {\nreturn o.RootMount || o.SubMounts\n}\n+\n+// IsBackedByHostFile indicates whether the overlay is backed by a host file.\n+func (o *Overlay2) IsBackedByHostFile() bool {\n+ return o.Enabled() && o.Medium != \"memory\"\n+}\n+\n+// HostFileDir indicates the directory in which the overlay-backing host file\n+// should be created.\n+//\n+// Precondition: o.IsBackedByHostFile() == true.\n+func (o *Overlay2) HostFileDir() string {\n+ if !strings.HasPrefix(o.Medium, \"dir=\") {\n+ panic(fmt.Sprintf(\"Overlay2.Medium = %q does not have dir= prefix when overlay is backed by a host file\", o.Medium))\n+ }\n+ hostFileDir := strings.TrimPrefix(o.Medium, \"dir=\")\n+ if !filepath.IsAbs(hostFileDir) {\n+ panic(fmt.Sprintf(\"overlay host file directory should be an absolute path, got %q\", hostFileDir))\n+ }\n+ return hostFileDir\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/config/config_test.go",
"new_path": "runsc/config/config_test.go",
"diff": "@@ -115,41 +115,59 @@ func TestToFlags(t *testing.T) {\nfunc TestInvalidFlags(t *testing.T) {\nfor _, tc := range []struct {\nname string\n+ value string\nerror string\n}{\n{\nname: \"file-access\",\n+ value: \"invalid\",\nerror: \"invalid file access type\",\n},\n{\nname: \"network\",\n+ value: \"invalid\",\nerror: \"invalid network type\",\n},\n{\nname: \"qdisc\",\n+ value: \"invalid\",\nerror: \"invalid qdisc\",\n},\n{\nname: \"watchdog-action\",\n+ value: \"invalid\",\nerror: \"invalid watchdog action\",\n},\n{\nname: \"ref-leak-mode\",\n+ value: \"invalid\",\nerror: \"invalid ref leak mode\",\n},\n{\nname: \"host-uds\",\n+ value: \"invalid\",\nerror: \"invalid host UDS\",\n},\n{\nname: \"host-fifo\",\n+ value: \"invalid\",\nerror: \"invalid host fifo\",\n},\n+ {\n+ name: \"overlay2\",\n+ value: \"root:/tmp\",\n+ error: \"unexpected medium specifier for --overlay2: \\\"/tmp\\\"\",\n+ },\n+ {\n+ name: \"overlay2\",\n+ value: \"root:dir=tmp\",\n+ error: \"overlay host file directory should be an absolute path, got \\\"tmp\\\"\",\n+ },\n} {\nt.Run(tc.name, func(t *testing.T) {\ntestFlags := flag.NewFlagSet(\"test\", flag.ContinueOnError)\nRegisterFlags(testFlags)\n- if err := testFlags.Lookup(tc.name).Value.Set(\"invalid\"); err == nil || !strings.Contains(err.Error(), tc.error) {\n+ if err := testFlags.Lookup(tc.name).Value.Set(tc.value); err == nil || !strings.Contains(err.Error(), tc.error) {\nt.Errorf(\"flag.Value.Set(invalid) wrong error reported: %v\", err)\n}\n})\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/config/flags.go",
"new_path": "runsc/config/flags.go",
"diff": "@@ -83,7 +83,7 @@ func RegisterFlags(flagSet *flag.FlagSet) {\nflagSet.Var(fileAccessTypePtr(FileAccessExclusive), \"file-access\", \"specifies which filesystem validation to use for the root mount: exclusive (default), shared.\")\nflagSet.Var(fileAccessTypePtr(FileAccessShared), \"file-access-mounts\", \"specifies which filesystem validation to use for volumes other than the root mount: shared (default), exclusive.\")\nflagSet.Bool(\"overlay\", false, \"DEPRECATED: use --overlay2=all:memory to achieve the same effect\")\n- flagSet.Var(defaultOverlay2(), \"overlay2\", \"wrap mounts with overlayfs. Format is {mount}:{medium}, where 'mount' can be 'root' or 'all' and medium can be 'memory' or existing directory path in which filestore will be created. 'none' will turn overlay mode off.\")\n+ flagSet.Var(defaultOverlay2(), \"overlay2\", \"wrap mounts with overlayfs. Format is {mount}:{medium}, where 'mount' can be 'root' or 'all' and medium can be 'memory' or 'dir=/abs/dir/path' in which filestore will be created. 'none' will turn overlay mode off.\")\nflagSet.Bool(\"fsgofer-host-uds\", false, \"DEPRECATED: use host-uds=all\")\nflagSet.Var(hostUDSPtr(HostUDSNone), \"host-uds\", \"controls permission to access host Unix-domain sockets. Values: none|open|create|all, default: none\")\nflagSet.Var(hostFifoPtr(HostFifoNone), \"host-fifo\", \"controls permission to access host FIFOs (or named pipes). Values: none|open, default: none\")\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -776,12 +776,13 @@ func (c *Container) Destroy() error {\n}\nfunc createOverlayFilestore(overlay2 config.Overlay2) (*os.File, error) {\n- if overlay2.FilestoreDir == \"\" {\n+ if !overlay2.IsBackedByHostFile() {\nreturn nil, nil\n}\n- fileInfo, err := os.Stat(overlay2.FilestoreDir)\n+ filestoreDir := overlay2.HostFileDir()\n+ fileInfo, err := os.Stat(filestoreDir)\nif err != nil {\n- return nil, fmt.Errorf(\"failed to stat overlay filestore directory %q: %v\", overlay2.FilestoreDir, err)\n+ return nil, fmt.Errorf(\"failed to stat overlay filestore directory %q: %v\", filestoreDir, err)\n}\nif !fileInfo.IsDir() {\nreturn nil, fmt.Errorf(\"overlay2 flag should specify an existing directory\")\n@@ -791,9 +792,9 @@ func createOverlayFilestore(overlay2 config.Overlay2) (*os.File, error) {\n// it is not supported on all filesystems. So we simulate it by creating a\n// named file and then immediately unlinking it while keeping an FD on it.\n// This file will be deleted when the container exits.\n- filestoreFile, err := os.CreateTemp(overlay2.FilestoreDir, \"runsc-overlay-filestore-*\")\n+ filestoreFile, err := os.CreateTemp(filestoreDir, \"runsc-overlay-filestore-*\")\nif err != nil {\n- return nil, fmt.Errorf(\"failed to create a temporary file inside %q: %v\", overlay2.FilestoreDir, err)\n+ return nil, fmt.Errorf(\"failed to create a temporary file inside %q: %v\", filestoreDir, err)\n}\nif err := unix.Unlink(filestoreFile.Name()); err != nil {\nreturn nil, fmt.Errorf(\"failed to unlink temporary file %q: %v\", filestoreFile.Name(), err)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/multi_container_test.go",
"new_path": "runsc/container/multi_container_test.go",
"diff": "@@ -2233,7 +2233,7 @@ func TestMultiContainerOverlayLeaks(t *testing.T) {\n// Configure root overlay backed by a file from /tmp.\nconf.Overlay2 = config.Overlay2{\nRootMount: true,\n- FilestoreDir: \"/tmp\",\n+ Medium: \"dir=/tmp\",\n}\n// Root container will just sleep.\n@@ -2313,6 +2313,7 @@ func TestMultiContainerMemoryLeakStress(t *testing.T) {\n// files in the root directory.\nconf.Overlay2 = config.Overlay2{\nRootMount: true,\n+ Medium: \"memory\",\n}\n// Root container will just sleep.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/shared_volume_test.go",
"new_path": "runsc/container/shared_volume_test.go",
"diff": "@@ -272,7 +272,7 @@ func TestSharedVolumeOverlay(t *testing.T) {\nconf.Overlay2 = config.Overlay2{\nRootMount: true,\nSubMounts: true,\n- FilestoreDir: \"/tmp\",\n+ Medium: \"dir=/tmp\",\n}\n// File that will be used to check consistency inside/outside sandbox.\n"
},
{
"change_type": "MODIFY",
"old_path": "test/runner/main.go",
"new_path": "test/runner/main.go",
"diff": "@@ -206,7 +206,7 @@ func runRunsc(tc *gtest.TestCase, spec *specs.Spec) error {\n\"-file-access\", *fileAccess,\n}\nif *overlay {\n- args = append(args, \"-overlay2=all:/tmp\")\n+ args = append(args, \"-overlay2=all:dir=/tmp\")\n}\nif *debug {\nargs = append(args, \"-debug\", \"-log-packets=true\")\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add dir= prefix in overlay2 flag's medium.
This is to make it clear that the host file will be created inside this
directory. This also makes it look cleaner when other medium options are added
later.
PiperOrigin-RevId: 505033408 |
259,881 | 27.01.2023 10:25:36 | 28,800 | 16c24b1cec398512458022db8758ade9ebfdfbc1 | Use unsafe.String/StringData to implement string functions
unsafe.String and unsafe.StringData provide compatibility guarantees that don't
require us to verify compatibility in each release.
For | [
{
"change_type": "MODIFY",
"old_path": "pkg/gohacks/BUILD",
"new_path": "pkg/gohacks/BUILD",
"diff": "@@ -8,6 +8,8 @@ go_library(\n\"gohacks_unsafe.go\",\n\"slice_go113_unsafe.go\",\n\"slice_go120_unsafe.go\",\n+ \"string_go113_unsafe.go\",\n+ \"string_go120_unsafe.go\",\n],\nstateify = False,\nvisibility = [\"//:sandbox\"],\n@@ -16,7 +18,10 @@ go_library(\ngo_test(\nname = \"gohacks_test\",\nsize = \"small\",\n- srcs = [\"gohacks_test.go\"],\n+ srcs = [\n+ \"gohacks_test.go\",\n+ \"string_test.go\",\n+ ],\nlibrary = \":gohacks\",\ndeps = [\"@org_golang_x_sys//unix:go_default_library\"],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/gohacks/gohacks_unsafe.go",
"new_path": "pkg/gohacks/gohacks_unsafe.go",
"diff": "@@ -30,14 +30,6 @@ import (\n\"unsafe\"\n)\n-// StringHeader is equivalent to reflect.StringHeader, but represents the\n-// pointer to the underlying array as unsafe.Pointer rather than uintptr,\n-// allowing StringHeaders to be directly converted to strings.\n-type StringHeader struct {\n- Data unsafe.Pointer\n- Len int\n-}\n-\n// Noescape hides a pointer from escape analysis. Noescape is the identity\n// function but escape analysis doesn't think the output depends on the input.\n// Noescape is inlined and currently compiles down to zero instructions.\n@@ -51,24 +43,6 @@ func Noescape(p unsafe.Pointer) unsafe.Pointer {\nreturn unsafe.Pointer(x ^ 0)\n}\n-// ImmutableBytesFromString is equivalent to []byte(s), except that it uses the\n-// same memory backing s instead of making a heap-allocated copy. This is only\n-// valid if the returned slice is never mutated.\n-func ImmutableBytesFromString(s string) []byte {\n- shdr := (*StringHeader)(unsafe.Pointer(&s))\n- return Slice((*byte)(shdr.Data), shdr.Len)\n-}\n-\n-// StringFromImmutableBytes is equivalent to string(bs), except that it uses\n-// the same memory backing bs instead of making a heap-allocated copy. This is\n-// only valid if bs is never mutated after StringFromImmutableBytes returns.\n-func StringFromImmutableBytes(bs []byte) string {\n- // This is cheaper than messing with StringHeader and SliceHeader, which as\n- // of this writing produces many dead stores of zeroes. Compare\n- // strings.Builder.String().\n- return *(*string)(unsafe.Pointer(&bs))\n-}\n-\n// Note that go:linkname silently doesn't work if the local name is exported,\n// necessitating an indirection for exported functions.\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/gohacks/string_go113_unsafe.go",
"diff": "+// Copyright 2023 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+//go:build go1.13 && !go1.20\n+// +build go1.13,!go1.20\n+\n+// TODO(go.dev/issue/8422): Remove this file once Go 1.19 is no longer\n+// supported.\n+\n+package gohacks\n+\n+import (\n+ \"unsafe\"\n+)\n+\n+// stringHeader is equivalent to reflect.StringHeader, but represents the\n+// pointer to the underlying array as unsafe.Pointer rather than uintptr,\n+// allowing StringHeaders to be directly converted to strings.\n+type stringHeader struct {\n+ Data unsafe.Pointer\n+ Len int\n+}\n+\n+// ImmutableBytesFromString is equivalent to []byte(s), except that it uses the\n+// same memory backing s instead of making a heap-allocated copy. This is only\n+// valid if the returned slice is never mutated.\n+func ImmutableBytesFromString(s string) []byte {\n+ shdr := (*stringHeader)(unsafe.Pointer(&s))\n+ return Slice((*byte)(shdr.Data), shdr.Len)\n+}\n+\n+// StringFromImmutableBytes is equivalent to string(bs), except that it uses\n+// the same memory backing bs instead of making a heap-allocated copy. This is\n+// only valid if bs is never mutated after StringFromImmutableBytes returns.\n+func StringFromImmutableBytes(bs []byte) string {\n+ // This is cheaper than messing with StringHeader and SliceHeader, which as\n+ // of this writing produces many dead stores of zeroes. Compare\n+ // strings.Builder.String().\n+ return *(*string)(unsafe.Pointer(&bs))\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/gohacks/string_go120_unsafe.go",
"diff": "+// Copyright 2023 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+//go:build go1.20\n+\n+package gohacks\n+\n+import (\n+ \"unsafe\"\n+)\n+\n+// ImmutableBytesFromString is equivalent to []byte(s), except that it uses the\n+// same memory backing s instead of making a heap-allocated copy. This is only\n+// valid if the returned slice is never mutated.\n+func ImmutableBytesFromString(s string) []byte {\n+ b := unsafe.StringData(s)\n+ return unsafe.Slice(b, len(s))\n+}\n+\n+// StringFromImmutableBytes is equivalent to string(bs), except that it uses\n+// the same memory backing bs instead of making a heap-allocated copy. This is\n+// only valid if bs is never mutated after StringFromImmutableBytes returns.\n+func StringFromImmutableBytes(bs []byte) string {\n+ if len(bs) == 0 {\n+ return \"\"\n+ }\n+ return unsafe.String(&bs[0], len(bs))\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/gohacks/string_test.go",
"diff": "+// Copyright 2023 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package gohacks\n+\n+import (\n+ \"reflect\"\n+ \"testing\"\n+)\n+\n+func TestImmutableBytesFromString(t *testing.T) {\n+ tests := []struct {\n+ name string\n+ input string\n+ want []byte\n+ }{\n+ {\n+ name: \"abc\",\n+ input: \"abc\",\n+ want: []byte(\"abc\"),\n+ },\n+ {\n+ name: \"empty\",\n+ input: \"\",\n+ want: nil,\n+ },\n+ {\n+ name: \"subslice-empty\",\n+ input: \"abc\"[:0],\n+ want: []byte(\"\"),\n+ },\n+ }\n+\n+ for _, tc := range tests {\n+ t.Run(tc.name, func(t *testing.T) {\n+ got := ImmutableBytesFromString(tc.input)\n+ if !reflect.DeepEqual(got, tc.want) {\n+ t.Errorf(\"got contents %v (len %d cap %d) want %v (len %d cap %d)\", got, len(got), cap(got), tc.want, len(tc.want), cap(tc.want))\n+ }\n+ })\n+ }\n+}\n+\n+func TestStringFromImmutableBytes(t *testing.T) {\n+ tests := []struct {\n+ name string\n+ input []byte\n+ want string\n+ }{\n+ {\n+ name: \"abc\",\n+ input: []byte(\"abc\"),\n+ want: \"abc\",\n+ },\n+ {\n+ name: \"empty\",\n+ input: []byte{},\n+ want: \"\",\n+ },\n+ {\n+ name: \"nil\",\n+ input: nil,\n+ want: \"\",\n+ },\n+ }\n+\n+ for _, tc := range tests {\n+ t.Run(tc.name, func(t *testing.T) {\n+ got := StringFromImmutableBytes(tc.input)\n+ if got != tc.want {\n+ t.Errorf(\"got %q want %q\", got, tc.want)\n+ }\n+ })\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Use unsafe.String/StringData to implement string functions
unsafe.String and unsafe.StringData provide compatibility guarantees that don't
require us to verify compatibility in each release.
For #8422.
PiperOrigin-RevId: 505150667 |
259,881 | 27.01.2023 11:49:28 | 28,800 | ee3a8735b5682878543019cdeae459e850c01bf7 | Move runtime constants to dynamic facts render | [
{
"change_type": "MODIFY",
"old_path": "pkg/sync/BUILD",
"new_path": "pkg/sync/BUILD",
"diff": "@@ -28,6 +28,14 @@ arch_genrule(\ntemplate = \"runtime_spinning_impl_%s.s\",\n)\n+# Architecture-independent constants.\n+nogo_facts(\n+ name = \"runtime_constants_impl\",\n+ srcs = [\"runtime.go\"],\n+ output = \"runtime_constants_impl.go\",\n+ template = \"runtime_constants.tmpl\",\n+)\n+\ngo_library(\nname = \"sync\",\nsrcs = [\n@@ -49,6 +57,7 @@ go_library(\n\"rwmutex_unsafe.go\",\n\"seqcount.go\",\n\"sync.go\",\n+ \":runtime_constants_impl\",\n\":runtime_spinning_impl_arch\",\n],\nmarshal = False,\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sync/runtime_constants.tmpl",
"diff": "+// Copyright 2023 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package sync\n+\n+// Values for the reason argument to gopark, from Go's src/runtime/runtime2.go.\n+const (\n+ WaitReasonSelect uint8 = {{ .import.runtime.Constants.waitReasonSelect }}\n+ WaitReasonChanReceive uint8 = {{ .import.runtime.Constants.waitReasonChanReceive }}\n+ WaitReasonSemacquire uint8 = {{ .import.runtime.Constants.waitReasonSemacquire }}\n+)\n+\n+// Values for the traceEv argument to gopark, from Go's src/runtime/trace.go.\n+const (\n+ TraceEvGoBlockRecv byte = {{ .import.runtime.Constants.traceEvGoBlockRecv }}\n+ TraceEvGoBlockSelect byte = {{ .import.runtime.Constants.traceEvGoBlockSelect }}\n+ TraceEvGoBlockSync byte = {{ .import.runtime.Constants.traceEvGoBlockSync }}\n+)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sync/runtime_unsafe.go",
"new_path": "pkg/sync/runtime_unsafe.go",
"diff": "@@ -78,20 +78,6 @@ func Goready(gp uintptr, traceskip int, wakep bool) {\n}\n}\n-// Values for the reason argument to gopark, from Go's src/runtime/runtime2.go.\n-const (\n- WaitReasonSelect uint8 = 9\n- WaitReasonChanReceive uint8 = 14\n- WaitReasonSemacquire uint8 = 18\n-)\n-\n-// Values for the traceEv argument to gopark, from Go's src/runtime/trace.go.\n-const (\n- TraceEvGoBlockRecv byte = 23\n- TraceEvGoBlockSelect byte = 24\n- TraceEvGoBlockSync byte = 25\n-)\n-\n// Rand32 returns a non-cryptographically-secure random uint32.\nfunc Rand32() uint32 {\nreturn fastrand()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Move runtime constants to dynamic facts render
PiperOrigin-RevId: 505172527 |
259,868 | 27.01.2023 15:54:28 | 28,800 | 4e75dc46503d679c7c5c0f1327e4b5c6df8f6b93 | `runsc metric-server`: Add per-sandbox start timestamp metric.
This synthetic metric contains the Unix timestamp that each sandbox was
started at.
This is useful for counter metrics, such that rates of change over time can
be properly on a per-sandbox basis. | [
{
"change_type": "MODIFY",
"old_path": "pkg/prometheus/prometheus.go",
"new_path": "pkg/prometheus/prometheus.go",
"diff": "@@ -222,7 +222,7 @@ type Data struct {\n// NewIntData returns a new Data struct with the given metric and value.\nfunc NewIntData(metric *Metric, val int64) *Data {\n- return &Data{Metric: metric, Number: &Number{Int: val}}\n+ return LabeledIntData(metric, nil, val)\n}\n// LabeledIntData returns a new Data struct with the given metric, labels, and value.\n@@ -232,7 +232,12 @@ func LabeledIntData(metric *Metric, labels map[string]string, val int64) *Data {\n// NewFloatData returns a new Data struct with the given metric and value.\nfunc NewFloatData(metric *Metric, val float64) *Data {\n- return &Data{Metric: metric, Number: &Number{Float: val}}\n+ return LabeledFloatData(metric, nil, val)\n+}\n+\n+// LabeledFloatData returns a new Data struct with the given metric, labels, and value.\n+func LabeledFloatData(metric *Metric, labels map[string]string, val float64) *Data {\n+ return &Data{Metric: metric, Labels: labels, Number: &Number{Float: val}}\n}\n// ExportOptions contains options that control how metric data is exported in Prometheus format.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/metric_server.go",
"new_path": "runsc/cmd/metric_server.go",
"diff": "@@ -475,6 +475,11 @@ var (\nType: prometheus.TypeGauge,\nHelp: \"Key-value pairs about per-sandbox metadata.\",\n}\n+ sandboxCreationMetric = prometheus.Metric{\n+ Name: \"sandbox_creation_time_seconds\",\n+ Type: prometheus.TypeGauge,\n+ Help: \"When the sandbox was created, as a unix timestamp in milliseconds.\",\n+ }\nnumRunningSandboxesMetric = prometheus.Metric{\nName: \"num_sandboxes_running\",\nType: prometheus.TypeGauge,\n@@ -595,12 +600,13 @@ func (m *MetricServer) serveMetrics(w http.ResponseWriter, req *http.Request) ht\ngo func(metricsMu *sync.Mutex, meta *metaMetrics, selfMetrics *prometheus.Snapshot) {\ndefer wg.Done()\nfor s := range loadedSandboxCh {\n- served, sand, verifier, err := s.served, s.sandbox, s.verifier, s.err\n+ served, sand, verifier, loadErr := s.served, s.sandbox, s.verifier, s.err\nisRunning := false\nvar snapshot *prometheus.Snapshot\n- if err == nil {\n+ sandboxErr := loadErr\n+ if loadErr == nil {\nqueryCtx, queryCtxCancel := context.WithTimeout(ctx, perSandboxTime)\n- snapshot, err = queryMetrics(queryCtx, sand, verifier)\n+ snapshot, sandboxErr = queryMetrics(queryCtx, sand, verifier)\nqueryCtxCancel()\nisRunning = sand.IsRunning()\n}\n@@ -608,25 +614,25 @@ func (m *MetricServer) serveMetrics(w http.ResponseWriter, req *http.Request) ht\nmetricsMu.Lock()\ndefer metricsMu.Unlock()\nselfMetrics.Add(prometheus.LabeledIntData(&sandboxPresenceMetric, served.extraLabels, 1))\n- selfMetrics.Add(prometheus.LabeledIntData(&sandboxMetadataMetric, served.labelsWithMetadata, 1))\nsandboxRunning := int64(0)\nif isRunning {\nsandboxRunning = 1\n+ meta.numRunningSandboxes++\n}\nselfMetrics.Add(prometheus.LabeledIntData(&sandboxRunningMetric, served.extraLabels, sandboxRunning))\n- if err != nil && !isRunning {\n- // The sandbox either hasn't started running yet, or it ran and has gone away between the\n- // start of the function and now. It is normal that metrics are not exported for this\n- // sandbox in this case, so do not report this as an error.\n- return\n+ if loadErr == nil {\n+ selfMetrics.Add(prometheus.LabeledIntData(&sandboxMetadataMetric, served.labelsWithMetadata, 1))\n+ selfMetrics.Add(prometheus.LabeledFloatData(&sandboxCreationMetric, served.extraLabels, float64(served.createdAt.Unix())+(float64(served.createdAt.Nanosecond())/1e9)))\n}\n- if err != nil {\n- meta.numRunningSandboxes++\n+ if sandboxErr != nil {\n+ // If the sandbox isn't running, it is normal that metrics are not exported for it, so\n+ // do not report this case as an error.\n+ if isRunning {\nmeta.numCannotExportSandboxes++\n- log.Warningf(\"Could not export metrics from sandbox %s: %v\", served.rootContainerID.SandboxID, err)\n+ log.Warningf(\"Could not export metrics from sandbox %s: %v\", served.rootContainerID.SandboxID, sandboxErr)\n+ }\nreturn\n}\n- meta.numRunningSandboxes++\nsnapshotCh <- snapshotAndOptions{\nsnapshot: snapshot,\noptions: prometheus.SnapshotExportOptions{\n"
}
] | Go | Apache License 2.0 | google/gvisor | `runsc metric-server`: Add per-sandbox start timestamp metric.
This synthetic metric contains the Unix timestamp that each sandbox was
started at.
This is useful for counter metrics, such that rates of change over time can
be properly on a per-sandbox basis.
PiperOrigin-RevId: 505228878 |
259,881 | 31.01.2023 01:00:42 | 28,800 | bcac7fc9adeaf5899edabafb7b947fe202cb1506 | Bump sync build tag
cl/504661697 removes this build tag once and for all, but it is blocked on
additional work needed to get it working with OSS bazel builds.
Until that is ready, bump the build tag to unblock testing Go tip (1.21). | [
{
"change_type": "MODIFY",
"old_path": "pkg/sync/runtime_unsafe.go",
"new_path": "pkg/sync/runtime_unsafe.go",
"diff": "// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n-//go:build go1.13 && !go1.21\n-// +build go1.13,!go1.21\n+//go:build go1.13 && !go1.22\n+// +build go1.13,!go1.22\n// //go:linkname directives type-checked by checklinkname. Any other\n// non-linkname assumptions outside the Go 1 compatibility guarantee should\n"
}
] | Go | Apache License 2.0 | google/gvisor | Bump sync build tag
cl/504661697 removes this build tag once and for all, but it is blocked on
additional work needed to get it working with OSS bazel builds.
Until that is ready, bump the build tag to unblock testing Go tip (1.21).
PiperOrigin-RevId: 505923005 |
259,909 | 31.01.2023 12:38:08 | 28,800 | 7058ea8d3067b109e1e7fe4e04f17b667252539f | Fix flakiness in FUSE syscall tests. | [
{
"change_type": "MODIFY",
"old_path": "test/runner/fuse/BUILD",
"new_path": "test/runner/fuse/BUILD",
"diff": "@@ -8,6 +8,7 @@ go_binary(\nvisibility = [\"//test/runner:__subpackages__\"],\ndeps = [\n\"//pkg/log\",\n+ \"//runsc/specutils\",\n\"@com_github_hanwen_go_fuse_v2//fs:go_default_library\",\n\"@com_github_hanwen_go_fuse_v2//fuse:go_default_library\",\n],\n"
},
{
"change_type": "MODIFY",
"old_path": "test/runner/fuse/fuse.go",
"new_path": "test/runner/fuse/fuse.go",
"diff": "@@ -23,6 +23,7 @@ import (\n\"github.com/hanwen/go-fuse/v2/fs\"\n\"github.com/hanwen/go-fuse/v2/fuse\"\n\"gvisor.dev/gvisor/pkg/log\"\n+ \"gvisor.dev/gvisor/runsc/specutils\"\n)\nfunc main() {\n@@ -44,7 +45,14 @@ func main() {\nserver.Unmount()\nserver.Wait()\n}()\n+ // TODO(b/267200022): Investigate why gofuse pollHack sometimes fails with\n+ // EINTR.\n+ if _, _, err := specutils.RetryEintr(func() (uintptr, uintptr, error) {\nif err := server.WaitMount(); err != nil {\n+ return 0, 0, err\n+ }\n+ return 0, 0, nil\n+ }); err != nil {\n// We don't shutdown the serve loop. If the mount does\n// not succeed, the loop won't work and exit.\nlog.Warningf(`Could not mount fuse submount \"/tmp\": %v`, err)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix flakiness in FUSE syscall tests.
PiperOrigin-RevId: 506082577 |
259,891 | 31.01.2023 22:24:59 | 0 | dae3aa6eee9efcfa547f59e40232181dd24e1053 | netstack: fix UDP forwarding refcount bug
Fixes | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/udp/forwarder.go",
"new_path": "pkg/tcpip/transport/udp/forwarder.go",
"diff": "@@ -47,7 +47,7 @@ func (f *Forwarder) HandlePacket(id stack.TransportEndpointID, pkt stack.PacketB\nf.handler(&ForwarderRequest{\nstack: f.stack,\nid: id,\n- pkt: pkt,\n+ pkt: pkt.IncRef(),\n})\nreturn true\n"
}
] | Go | Apache License 2.0 | google/gvisor | netstack: fix UDP forwarding refcount bug
Fixes #8448. |
259,907 | 01.02.2023 14:31:31 | 28,800 | 6ca309ca15cbc2c3b1c6e891d8db9bf2a5a0616e | Consistently initialize IOUringParams in io_uring syscall tests.
Earlier we were using uninitialized param for io_uring which could
contain garbage flag values, which would trigger unexpected io_uring
behavior when test runs. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/io_uring.cc",
"new_path": "test/syscalls/linux/io_uring.cc",
"diff": "@@ -95,7 +95,7 @@ MATCHER_P(IOVecContainsString, str, \"\") {\nTEST(IOUringTest, ValidFD) {\nSKIP_IF(!IOUringAvailable());\n- IOUringParams params;\n+ IOUringParams params = {};\nFileDescriptor iouringfd = ASSERT_NO_ERRNO_AND_VALUE(NewIOUringFD(1, params));\n}\n@@ -103,7 +103,7 @@ TEST(IOUringTest, ValidFD) {\nTEST(IOUringTest, ParamsNonZeroResv) {\nSKIP_IF(!IOUringAvailable());\n- IOUringParams params;\n+ IOUringParams params = {};\nmemset(¶ms, 0, sizeof(params));\nparams.resv[1] = 1;\nASSERT_THAT(IOUringSetup(1, ¶ms), SyscallFailsWithErrno(EINVAL));\n@@ -112,7 +112,7 @@ TEST(IOUringTest, ParamsNonZeroResv) {\nTEST(IOUringTest, ZeroCQEntries) {\nSKIP_IF(!IOUringAvailable());\n- IOUringParams params;\n+ IOUringParams params = {};\nparams.cq_entries = 0;\nparams.flags = IORING_SETUP_CQSIZE;\nASSERT_THAT(IOUringSetup(1, ¶ms), SyscallFailsWithErrno(EINVAL));\n@@ -121,7 +121,7 @@ TEST(IOUringTest, ZeroCQEntries) {\nTEST(IOUringTest, ZeroCQEntriesLessThanSQEntries) {\nSKIP_IF(!IOUringAvailable());\n- IOUringParams params;\n+ IOUringParams params = {};\nparams.cq_entries = 16;\nparams.flags = IORING_SETUP_CQSIZE;\nASSERT_THAT(IOUringSetup(32, ¶ms), SyscallFailsWithErrno(EINVAL));\n@@ -132,7 +132,7 @@ TEST(IOUringTest, UnsupportedFlags) {\n// Gvisor only test, since linux supports all flags.\nSKIP_IF(!IsRunningOnGvisor());\n- IOUringParams params;\n+ IOUringParams params = {};\nmemset(¶ms, 0, sizeof(params));\nparams.flags |= IORING_SETUP_SQPOLL;\nASSERT_THAT(IOUringSetup(1, ¶ms), SyscallFailsWithErrno(EINVAL));\n@@ -143,7 +143,7 @@ TEST(IOUringTest, UnsupportedFlags) {\nTEST(IOUringTest, MMapMUnMapWork) {\nSKIP_IF(!IOUringAvailable());\n- IOUringParams params;\n+ IOUringParams params = {};\nFileDescriptor iouringfd = ASSERT_NO_ERRNO_AND_VALUE(NewIOUringFD(1, params));\nvoid *ptr = nullptr;\n@@ -172,7 +172,7 @@ TEST(IOUringTest, MMapMUnMapWork) {\nTEST(IOUringTest, MMapWrongOffset) {\nSKIP_IF(!IOUringAvailable());\n- IOUringParams params;\n+ IOUringParams params = {};\nFileDescriptor iouringfd = ASSERT_NO_ERRNO_AND_VALUE(NewIOUringFD(1, params));\nint sring_sz = params.sq_off.array + params.sq_entries * sizeof(unsigned);\n@@ -188,7 +188,7 @@ TEST(IOUringTest, MMapWrongOffset) {\nTEST(IOUringTest, MMapOffsets) {\nSKIP_IF(!IOUringAvailable());\n- IOUringParams params;\n+ IOUringParams params = {};\nFileDescriptor iouringfd = ASSERT_NO_ERRNO_AND_VALUE(NewIOUringFD(1, params));\nvoid *sq_ptr = nullptr;\n@@ -224,7 +224,7 @@ TEST(IOUringTest, MMapOffsets) {\nTEST(IOUringTest, ReturnedParamsValues) {\nSKIP_IF(!IsRunningOnGvisor());\n- IOUringParams params;\n+ IOUringParams params = {};\nFileDescriptor iouringfd = ASSERT_NO_ERRNO_AND_VALUE(NewIOUringFD(1, params));\nEXPECT_EQ(params.sq_entries, 1);\n@@ -254,7 +254,7 @@ TEST(IOUringTest, ReturnedParamsValues) {\nTEST(IOUringTest, SqeIndexArrayCacheAligned) {\nSKIP_IF(!IOUringAvailable());\n- IOUringParams params;\n+ IOUringParams params = {};\nfor (uint32_t i = 1; i < 10; ++i) {\nFileDescriptor iouringfd =\nASSERT_NO_ERRNO_AND_VALUE(NewIOUringFD(i, params));\n@@ -266,7 +266,7 @@ TEST(IOUringTest, SqeIndexArrayCacheAligned) {\nTEST(IOUringTest, SingleNOPTest) {\nSKIP_IF(!IOUringAvailable());\n- IOUringParams params;\n+ IOUringParams params = {};\nstd::unique_ptr<IOUring> io_uring =\nASSERT_NO_ERRNO_AND_VALUE(IOUring::InitIOUring(1, params));\n@@ -305,7 +305,7 @@ TEST(IOUringTest, SingleNOPTest) {\nTEST(IOUringTest, QueueingNOPTest) {\nSKIP_IF(!IOUringAvailable());\n- IOUringParams params;\n+ IOUringParams params = {};\nstd::unique_ptr<IOUring> io_uring =\nASSERT_NO_ERRNO_AND_VALUE(IOUring::InitIOUring(4, params));\n@@ -367,7 +367,7 @@ TEST(IOUringTest, QueueingNOPTest) {\nTEST(IOUringTest, MultipleNOPTest) {\nSKIP_IF(!IOUringAvailable());\n- IOUringParams params;\n+ IOUringParams params = {};\nstd::unique_ptr<IOUring> io_uring =\nASSERT_NO_ERRNO_AND_VALUE(IOUring::InitIOUring(4, params));\n@@ -413,7 +413,7 @@ TEST(IOUringTest, MultipleNOPTest) {\nTEST(IOUringTest, MultiThreadedNOPTest) {\nSKIP_IF(!IOUringAvailable());\n- IOUringParams params;\n+ IOUringParams params = {};\nstd::unique_ptr<IOUring> io_uring =\nASSERT_NO_ERRNO_AND_VALUE(IOUring::InitIOUring(4, params));\n@@ -463,7 +463,7 @@ TEST(IOUringTest, MultiThreadedNOPTest) {\nTEST(IOUringTest, InvalidOpCodeTest) {\nSKIP_IF(!IOUringAvailable());\n- IOUringParams params;\n+ IOUringParams params = {};\nstd::unique_ptr<IOUring> io_uring =\nASSERT_NO_ERRNO_AND_VALUE(IOUring::InitIOUring(1, params));\n@@ -502,7 +502,7 @@ TEST(IOUringTest, CorruptRingHeader) {\nconst int kEntries = 64;\n- IOUringParams params;\n+ IOUringParams params = {};\nFileDescriptor iouringfd =\nASSERT_NO_ERRNO_AND_VALUE(NewIOUringFD(kEntries, params));\n@@ -545,7 +545,7 @@ TEST(IOUringTest, CorruptRingHeader) {\nTEST(IOUringTest, SQERingBuffersWrapAroundTest) {\nSKIP_IF(!IOUringAvailable());\n- IOUringParams params;\n+ IOUringParams params = {};\nstd::unique_ptr<IOUring> io_uring =\nASSERT_NO_ERRNO_AND_VALUE(IOUring::InitIOUring(4, params));\n@@ -614,7 +614,7 @@ TEST(IOUringTest, SQERingBuffersWrapAroundTest) {\nTEST(IOUringTest, NonNullSigsetTest) {\nSKIP_IF(!IsRunningOnGvisor());\n- IOUringParams params;\n+ IOUringParams params = {};\nstd::unique_ptr<IOUring> io_uring =\nASSERT_NO_ERRNO_AND_VALUE(IOUring::InitIOUring(1, params));\n@@ -640,7 +640,7 @@ TEST(IOUringTest, OverflowCQTest) {\n// Gvisor's completion queue overflow behaviour is different from Linux.\nSKIP_IF(!IsRunningOnGvisor());\n- IOUringParams params;\n+ IOUringParams params = {};\nstd::unique_ptr<IOUring> io_uring =\nASSERT_NO_ERRNO_AND_VALUE(IOUring::InitIOUring(4, params));\n@@ -722,7 +722,7 @@ TEST(IOUringTest, OverflowCQTest) {\nTEST(IOUringTest, SingleREADVTest) {\nSKIP_IF(!IOUringAvailable());\n- struct io_uring_params params;\n+ IOUringParams params = {};\nstd::unique_ptr<IOUring> io_uring =\nASSERT_NO_ERRNO_AND_VALUE(IOUring::InitIOUring(1, params));\n@@ -793,7 +793,7 @@ TEST(IOUringTest, SingleREADVTest) {\nTEST(IOUringTest, ReadvEmptyFile) {\nSKIP_IF(!IOUringAvailable());\n- struct io_uring_params params;\n+ IOUringParams params = {};\nstd::unique_ptr<IOUring> io_uring =\nASSERT_NO_ERRNO_AND_VALUE(IOUring::InitIOUring(1, params));\n@@ -848,7 +848,7 @@ TEST(IOUringTest, ReadvEmptyFile) {\nTEST(IOUringTest, ThreeREADVSingleEnterTest) {\nSKIP_IF(!IOUringAvailable());\n- struct io_uring_params params;\n+ IOUringParams params = {};\nstd::unique_ptr<IOUring> io_uring =\nASSERT_NO_ERRNO_AND_VALUE(IOUring::InitIOUring(4, params));\n@@ -940,7 +940,7 @@ TEST(IOUringTest, ThreeREADVSingleEnterTest) {\nTEST(IOUringTest, ReadClosedFD) {\nSKIP_IF(!IOUringAvailable());\n- struct io_uring_params params;\n+ IOUringParams params = {};\nstd::unique_ptr<IOUring> io_uring =\nASSERT_NO_ERRNO_AND_VALUE(IOUring::InitIOUring(1, params));\n@@ -999,7 +999,7 @@ TEST(IOUringTest, ReadClosedFD) {\nTEST(IOUringTest, ShortReadREADVTest) {\nSKIP_IF(!IOUringAvailable());\n- struct io_uring_params params;\n+ IOUringParams params = {};\nstd::unique_ptr<IOUring> io_uring =\nASSERT_NO_ERRNO_AND_VALUE(IOUring::InitIOUring(1, params));\n@@ -1073,7 +1073,7 @@ TEST(IOUringTest, ShortReadREADVTest) {\nTEST(IOUringTest, NoReadPermissionsREADVTest) {\nSKIP_IF(!IOUringAvailable());\n- struct io_uring_params params;\n+ IOUringParams params = {};\nstd::unique_ptr<IOUring> io_uring =\nASSERT_NO_ERRNO_AND_VALUE(IOUring::InitIOUring(1, params));\n@@ -1151,7 +1151,7 @@ TEST_P(IOUringSqeFieldsTest, READVWithInvalidSqeFieldValue) {\nconst SqeFieldsUT p = GetParam();\n- struct io_uring_params params;\n+ IOUringParams params = {};\nstd::unique_ptr<IOUring> io_uring =\nASSERT_NO_ERRNO_AND_VALUE(IOUring::InitIOUring(1, params));\n"
}
] | Go | Apache License 2.0 | google/gvisor | Consistently initialize IOUringParams in io_uring syscall tests.
Earlier we were using uninitialized param for io_uring which could
contain garbage flag values, which would trigger unexpected io_uring
behavior when test runs.
PiperOrigin-RevId: 506424543 |
259,909 | 01.02.2023 15:53:47 | 28,800 | 9f8cbc08921359a47b28dc0e3c68d31db504a99e | Add more internal logging to fuse syscall tests | [
{
"change_type": "MODIFY",
"old_path": "test/runner/fuse/fuse.go",
"new_path": "test/runner/fuse/fuse.go",
"diff": "package main\nimport (\n+ golog \"log\"\n\"os\"\n\"os/exec\"\n@@ -32,8 +33,8 @@ func main() {\nlog.Warningf(\"could not create loopback root: %v\", err)\nos.Exit(1)\n}\n- opts := &fuse.MountOptions{DirectMount: true}\n- rawFS := fs.NewNodeFS(loopbackRoot, &fs.Options{})\n+ opts := &fuse.MountOptions{DirectMount: true, Debug: true}\n+ rawFS := fs.NewNodeFS(loopbackRoot, &fs.Options{Logger: golog.Default()})\nserver, err := fuse.NewServer(rawFS, \"/tmp\", opts)\nif err != nil {\nlog.Warningf(\"could not create fuse server: %v\", err)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add more internal logging to fuse syscall tests
PiperOrigin-RevId: 506445741 |
259,868 | 01.02.2023 17:55:08 | 28,800 | b932fb1fb6661d5b916fbf43c0c99c47703a2fdf | Add documentation on how to use the gVisor Prometheus metric server. | [
{
"change_type": "MODIFY",
"old_path": "g3doc/user_guide/BUILD",
"new_path": "g3doc/user_guide/BUILD",
"diff": "@@ -64,6 +64,14 @@ doc(\nweight = \"55\",\n)\n+doc(\n+ name = \"observability\",\n+ src = \"observability.md\",\n+ category = \"User Guide\",\n+ permalink = \"/docs/user_guide/observability/\",\n+ weight = \"56\",\n+)\n+\ndoc(\nname = \"checkpoint_restore\",\nsrc = \"checkpoint_restore.md\",\n"
},
{
"change_type": "MODIFY",
"old_path": "g3doc/user_guide/platforms.md",
"new_path": "g3doc/user_guide/platforms.md",
"diff": "[TOC]\n-This guide described how to change the\n+This guide describes how to change the\n[platform](../architecture_guide/platforms.md) used by `runsc`.\nConfiguring the platform provides significant performance benefits, but isn't\n"
},
{
"change_type": "MODIFY",
"old_path": "g3doc/user_guide/runtime_monitoring.md",
"new_path": "g3doc/user_guide/runtime_monitoring.md",
"diff": "# Runtime Monitoring\n-The runtime monitoring feature provides an interface to observe runtime behavior\n-of applications running inside gVisor. Although it can be used for many\n-purposes, it was built with the primary focus on threat detection. Out of the\n-box, gVisor comes with support to stream application actions (called trace\n-points) to an external process, that is used to validate the actions and alert\n-when abnormal behavior is detected. Trace points are available for all syscalls\n-and other important events in the system, e.g. container start. More trace\n-points can be easily added as needed. The trace points are sent to a process\n-running alongside the sandbox, which is isolated from the sandbox for security\n-reasons. Additionally, the monitoring process can be shared by many sandboxes.\n+The **Runtime Monitoring** feature provides an interface to observe runtime\n+behavior of applications running inside gVisor. Although it can be used for many\n+purposes, it was built with the primary focus on threat detection.\n+\n+**NOTE**: Runtime monitoring is about the ability to understand the behavior of\n+sandboxed workloads. This differs from\n+[observability of gVisor itself](observability.md).\n+\n+Out of the box, gVisor comes with support to stream application actions (called\n+trace points) to an external process, that is used to validate the actions and\n+alert when abnormal behavior is detected. Trace points are available for all\n+syscalls and other important events in the system, e.g. container start. More\n+trace points can be easily added as needed. The trace points are sent to a\n+process running alongside the sandbox, which is isolated from the sandbox for\n+security reasons. Additionally, the monitoring process can be shared by many\n+sandboxes.\nYou can use the following links to learn more:\n"
},
{
"change_type": "MODIFY",
"old_path": "website/BUILD",
"new_path": "website/BUILD",
"diff": "@@ -155,6 +155,7 @@ docs(\n\"//g3doc/user_guide:filesystem\",\n\"//g3doc/user_guide:install\",\n\"//g3doc/user_guide:networking\",\n+ \"//g3doc/user_guide:observability\",\n\"//g3doc/user_guide:platforms\",\n\"//g3doc/user_guide:production\",\n\"//g3doc/user_guide:runtime_monitoring\",\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add documentation on how to use the gVisor Prometheus metric server.
PiperOrigin-RevId: 506472933 |
259,909 | 02.02.2023 12:25:22 | 28,800 | fc69a401df98e5e6e90e337e2407c9662edb98bc | Enable more FUSE syscall tests.
Several syscall tests are working after refactoring inode operations.
This change also renames the use_fusefs test attribute to add_fusefs,
since that more accurately describes what the attribute does. | [
{
"change_type": "MODIFY",
"old_path": "test/runner/defs.bzl",
"new_path": "test/runner/defs.bzl",
"diff": "@@ -72,7 +72,7 @@ def _syscall_test(\niouring = False,\ncontainer = None,\none_sandbox = True,\n- use_fusefs = False,\n+ fusefs = False,\n**kwargs):\n# Prepend \"runsc\" to non-native platform names.\nfull_platform = platform if platform == \"native\" else \"runsc_\" + platform\n@@ -85,7 +85,7 @@ def _syscall_test(\nname += \"_overlay\"\nif network != \"none\":\nname += \"_\" + network + \"net\"\n- if use_fusefs:\n+ if fusefs:\nname += \"_fuse\"\n# Apply all tags.\n@@ -134,7 +134,7 @@ def _syscall_test(\n\"--platform-support=\" + platform_support,\n\"--network=\" + network,\n\"--use-tmpfs=\" + str(use_tmpfs),\n- \"--use-fusefs=\" + str(use_fusefs),\n+ \"--fusefs=\" + str(fusefs),\n\"--file-access=\" + file_access,\n\"--overlay=\" + str(overlay),\n\"--add-host-communication=\" + str(add_host_communication),\n@@ -167,7 +167,7 @@ def all_platforms():\ndef syscall_test(\ntest,\nuse_tmpfs = False,\n- use_fusefs = False,\n+ add_fusefs = False,\nadd_overlay = False,\nadd_host_communication = False,\nadd_hostinet = False,\n@@ -183,6 +183,7 @@ def syscall_test(\nArgs:\ntest: the test target.\nuse_tmpfs: use tmpfs in the defined tests.\n+ add_fusefs: add a fusefs test.\nadd_overlay: add an overlay test.\nadd_host_communication: setup UDS and pipe external communication for tests.\nadd_hostinet: add a hostinet test.\n@@ -268,12 +269,12 @@ def syscall_test(\nfile_access = \"shared\",\n**kwargs\n)\n- if use_fusefs:\n+ if add_fusefs:\n_syscall_test(\ntest = test,\nplatform = default_platform,\nuse_tmpfs = True,\n- use_fusefs = True,\n+ fusefs = True,\nadd_host_communication = add_host_communication,\ntags = platforms.get(default_platform, []) + tags,\ndebug = debug,\n"
},
{
"change_type": "MODIFY",
"old_path": "test/runner/fuse/BUILD",
"new_path": "test/runner/fuse/BUILD",
"diff": "@@ -11,5 +11,6 @@ go_binary(\n\"//runsc/specutils\",\n\"@com_github_hanwen_go_fuse_v2//fs:go_default_library\",\n\"@com_github_hanwen_go_fuse_v2//fuse:go_default_library\",\n+ \"@org_golang_x_sys//unix:go_default_library\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "test/runner/fuse/fuse.go",
"new_path": "test/runner/fuse/fuse.go",
"diff": "@@ -23,6 +23,7 @@ import (\n\"github.com/hanwen/go-fuse/v2/fs\"\n\"github.com/hanwen/go-fuse/v2/fuse\"\n+ \"golang.org/x/sys/unix\"\n\"gvisor.dev/gvisor/pkg/log\"\n\"gvisor.dev/gvisor/runsc/specutils\"\n)\n@@ -34,20 +35,20 @@ func main() {\nos.Exit(1)\n}\nopts := &fuse.MountOptions{DirectMount: true, Debug: true, Options: []string{\"default_permissions\"}}\n- rawFS := fs.NewNodeFS(loopbackRoot, &fs.Options{Logger: golog.Default()})\n+ rawFS := fs.NewNodeFS(loopbackRoot, &fs.Options{NullPermissions: true, Logger: golog.Default()})\nserver, err := fuse.NewServer(rawFS, \"/tmp\", opts)\nif err != nil {\nlog.Warningf(\"could not create fuse server: %v\", err)\nos.Exit(1)\n}\n+ // Clear umask so that it doesn't affect the mode bits twice.\n+ unix.Umask(0)\ngo server.Serve()\ndefer func() {\nserver.Unmount()\nserver.Wait()\n}()\n- // TODO(b/267200022): Investigate why gofuse pollHack sometimes fails with\n- // EINTR.\nif _, _, err := specutils.RetryEintr(func() (uintptr, uintptr, error) {\nif err := server.WaitMount(); err != nil {\nreturn 0, 0, err\n"
},
{
"change_type": "MODIFY",
"old_path": "test/runner/main.go",
"new_path": "test/runner/main.go",
"diff": "@@ -49,7 +49,7 @@ var (\nplatformSupport = flag.String(\"platform-support\", \"\", \"String passed to the test as GVISOR_PLATFORM_SUPPORT environment variable. Used to determine which syscall tests are expected to work with the current platform.\")\nnetwork = flag.String(\"network\", \"none\", \"network stack to run on (sandbox, host, none)\")\nuseTmpfs = flag.Bool(\"use-tmpfs\", false, \"mounts tmpfs for /tmp\")\n- useFUSEfs = flag.Bool(\"use-fusefs\", false, \"mounts a fusefs for /tmp\")\n+ fusefs = flag.Bool(\"fusefs\", false, \"mounts a fusefs for /tmp\")\nfileAccess = flag.String(\"file-access\", \"exclusive\", \"mounts root in exclusive or shared mode\")\noverlay = flag.Bool(\"overlay\", false, \"wrap filesystem mounts with writable tmpfs overlay\")\ncontainer = flag.Bool(\"container\", false, \"run tests in their own namespaces (user ns, network ns, etc), pretending to be root. Implicitly enabled if network=host, or if using network namespaces\")\n@@ -378,7 +378,7 @@ func runTestCaseRunsc(testBin string, tc *gtest.TestCase, args []string, t *test\nargs = tc.Args()\n}\nvar spec *specs.Spec\n- if *useFUSEfs {\n+ if *fusefs {\nfuseServer, err := testutil.FindFile(\"test/runner/fuse/fuse\")\nif err != nil {\nfatalf(\"cannot find fuse: %v\", err)\n@@ -433,7 +433,7 @@ func runTestCaseRunsc(testBin string, tc *gtest.TestCase, args []string, t *test\ntestTmpDir = \"/tmp\"\n}\n}\n- if *useFUSEfs {\n+ if *fusefs {\n// In fuse tests, the fuse server forwards all filesystem ops from /tmp\n// to /fuse.\nspec.Mounts = append(spec.Mounts, specs.Mount{\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/BUILD",
"new_path": "test/syscalls/BUILD",
"diff": "@@ -63,11 +63,13 @@ syscall_test(\n)\nsyscall_test(\n+ add_fusefs = True,\nadd_overlay = True,\ntest = \"//test/syscalls/linux:chdir_test\",\n)\nsyscall_test(\n+ add_fusefs = True,\nadd_overlay = True,\ntest = \"//test/syscalls/linux:chmod_test\",\n)\n@@ -126,9 +128,9 @@ syscall_test(\n)\nsyscall_test(\n+ add_fusefs = True,\nadd_overlay = True,\ntest = \"//test/syscalls/linux:creat_test\",\n- use_fusefs = True,\n)\nsyscall_test(\n@@ -184,6 +186,7 @@ syscall_test(\n)\nsyscall_test(\n+ add_fusefs = True,\nadd_overlay = True,\ntest = \"//test/syscalls/linux:fchdir_test\",\n)\n@@ -296,6 +299,7 @@ syscall_test(\n)\nsyscall_test(\n+ add_fusefs = True,\nadd_overlay = True,\ntest = \"//test/syscalls/linux:lseek_test\",\n)\n@@ -321,14 +325,15 @@ syscall_test(\n)\nsyscall_test(\n+ add_fusefs = True,\nadd_overlay = True,\ntest = \"//test/syscalls/linux:mkdir_test\",\n)\nsyscall_test(\n+ add_fusefs = True,\nadd_overlay = True,\ntest = \"//test/syscalls/linux:mknod_test\",\n- use_fusefs = True,\n)\nsyscall_test(\n@@ -378,15 +383,16 @@ syscall_test(\n)\nsyscall_test(\n+ add_fusefs = True,\nadd_overlay = True,\ntest = \"//test/syscalls/linux:open_create_test\",\n)\nsyscall_test(\n+ add_fusefs = True,\nadd_overlay = True,\nshard_count = more_shards,\ntest = \"//test/syscalls/linux:open_test\",\n- use_fusefs = True,\n)\nsyscall_test(\n@@ -441,16 +447,19 @@ syscall_test(\n)\nsyscall_test(\n+ add_fusefs = True,\nadd_overlay = True,\ntest = \"//test/syscalls/linux:pread64_test\",\n)\nsyscall_test(\n+ add_fusefs = True,\nadd_overlay = True,\ntest = \"//test/syscalls/linux:preadv_test\",\n)\nsyscall_test(\n+ add_fusefs = True,\nadd_overlay = True,\ntest = \"//test/syscalls/linux:preadv2_test\",\n)\n@@ -506,11 +515,13 @@ syscall_test(\n)\nsyscall_test(\n+ add_fusefs = True,\nadd_overlay = True,\ntest = \"//test/syscalls/linux:pwritev2_test\",\n)\nsyscall_test(\n+ add_fusefs = True,\nadd_overlay = True,\ntest = \"//test/syscalls/linux:pwrite64_test\",\n)\n@@ -529,6 +540,7 @@ syscall_test(\n)\nsyscall_test(\n+ add_fusefs = True,\nadd_overlay = True,\ntest = \"//test/syscalls/linux:read_test\",\n)\n@@ -546,6 +558,7 @@ syscall_test(\nsyscall_test(\nsize = \"medium\",\n+ add_fusefs = True,\nadd_overlay = True,\ntest = \"//test/syscalls/linux:readv_test\",\n)\n@@ -914,16 +927,16 @@ syscall_test(\n)\nsyscall_test(\n+ add_fusefs = True,\nadd_overlay = True,\ntest = \"//test/syscalls/linux:statfs_test\",\n- use_fusefs = True,\nuse_tmpfs = True, # Test specifically relies on TEST_TMPDIR to be tmpfs.\n)\nsyscall_test(\n+ add_fusefs = True,\nadd_overlay = True,\ntest = \"//test/syscalls/linux:stat_test\",\n- use_fusefs = True,\n)\nsyscall_test(\n@@ -937,6 +950,7 @@ syscall_test(\n)\nsyscall_test(\n+ add_fusefs = True,\nadd_overlay = True,\ntest = \"//test/syscalls/linux:symlink_test\",\n)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Enable more FUSE syscall tests.
Several syscall tests are working after refactoring inode operations.
This change also renames the use_fusefs test attribute to add_fusefs,
since that more accurately describes what the attribute does.
PiperOrigin-RevId: 506692314 |