idx
int64
0
522k
project
stringclasses
631 values
commit_id
stringlengths
7
40
project_url
stringclasses
630 values
commit_url
stringlengths
4
164
commit_message
stringlengths
0
11.5k
target
int64
0
1
func
stringlengths
5
484k
func_hash
float64
1,559,120,642,045,605,000,000,000B
340,279,892,905,069,500,000,000,000,000B
file_name
stringlengths
4
45
file_hash
float64
25,942,829,220,065,710,000,000,000B
340,272,304,251,680,200,000,000,000,000B
cwe
sequencelengths
0
1
cve
stringlengths
4
16
cve_desc
stringlengths
0
2.3k
nvd_url
stringlengths
37
49
3,649
radare2
40b021ba29c8f90ccf7c879fde2580bc73a17e8e
https://github.com/radare/radare2
https://github.com/radare/radare2/commit/40b021ba29c8f90ccf7c879fde2580bc73a17e8e
None
1
static bool r_bin_mdmp_init_directory(struct r_bin_mdmp_obj *obj) { int i; ut8 *directory_base; struct minidump_directory *entry; directory_base = obj->b->buf + obj->hdr->stream_directory_rva; sdb_num_set (obj->kv, "mdmp_directory.offset", obj->hdr->stream_directory_rva, 0); sdb_set (obj->kv, "mdmp_directory.format", "[4]E? " "(mdmp_stream_type)StreamType " "(mdmp_location_descriptor)Location", 0); /* Parse each entry in the directory */ for (i = 0; i < (int)obj->hdr->number_of_streams; i++) { entry = (struct minidump_directory *)(directory_base + (i * sizeof (struct minidump_directory))); r_bin_mdmp_init_directory_entry (obj, entry); } return true; }
275,785,464,801,853,400,000,000,000,000,000,000,000
mdmp.c
317,085,147,769,769,330,000,000,000,000,000,000,000
[ "CWE-125" ]
CVE-2018-14016
The r_bin_mdmp_init_directory_entry function in mdmp.c in radare2 2.7.0 allows remote attackers to cause a denial of service (heap-based buffer over-read and application crash) via a crafted Mini Crash Dump file.
https://nvd.nist.gov/vuln/detail/CVE-2018-14016
3,651
libpng
8a05766cb74af05c04c53e6c9d60c13fc4d59bf2
https://github.com/glennrp/libpng
https://github.com/glennrp/libpng/commit/8a05766cb74af05c04c53e6c9d60c13fc4d59bf2
[libpng16] Fix the calculation of row_factor in png_check_chunk_length (Bug report by Thuan Pham, SourceForge issue #278)
1
png_check_chunk_length(png_const_structrp png_ptr, const png_uint_32 length) { png_alloc_size_t limit = PNG_UINT_31_MAX; # ifdef PNG_SET_USER_LIMITS_SUPPORTED if (png_ptr->user_chunk_malloc_max > 0 && png_ptr->user_chunk_malloc_max < limit) limit = png_ptr->user_chunk_malloc_max; # elif PNG_USER_CHUNK_MALLOC_MAX > 0 if (PNG_USER_CHUNK_MALLOC_MAX < limit) limit = PNG_USER_CHUNK_MALLOC_MAX; # endif if (png_ptr->chunk_name == png_IDAT) { png_alloc_size_t idat_limit = PNG_UINT_31_MAX; size_t row_factor = (png_ptr->width * png_ptr->channels * (png_ptr->bit_depth > 8? 2: 1) + 1 + (png_ptr->interlaced? 6: 0)); if (png_ptr->height > PNG_UINT_32_MAX/row_factor) idat_limit=PNG_UINT_31_MAX; else idat_limit = png_ptr->height * row_factor; row_factor = row_factor > 32566? 32566 : row_factor; idat_limit += 6 + 5*(idat_limit/row_factor+1); /* zlib+deflate overhead */ idat_limit=idat_limit < PNG_UINT_31_MAX? idat_limit : PNG_UINT_31_MAX; limit = limit < idat_limit? idat_limit : limit; } if (length > limit) { png_debug2(0," length = %lu, limit = %lu", (unsigned long)length,(unsigned long)limit); png_chunk_error(png_ptr, "chunk data is too large"); } }
110,259,438,980,219,460,000,000,000,000,000,000,000
pngrutil.c
326,867,964,072,458,030,000,000,000,000,000,000,000
[ "CWE-190" ]
CVE-2018-13785
In libpng 1.6.34, a wrong calculation of row_factor in the png_check_chunk_length function (pngrutil.c) may trigger an integer overflow and resultant divide-by-zero while processing a crafted PNG file, leading to a denial of service.
https://nvd.nist.gov/vuln/detail/CVE-2018-13785
3,652
linux
9f645bcc566a1e9f921bdae7528a01ced5bc3713
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/9f645bcc566a1e9f921bdae7528a01ced5bc3713
video: uvesafb: Fix integer overflow in allocation cmap->len can get close to INT_MAX/2, allowing for an integer overflow in allocation. This uses kmalloc_array() instead to catch the condition. Reported-by: Dr Silvio Cesare of InfoSect <silvio.cesare@gmail.com> Fixes: 8bdb3a2d7df48 ("uvesafb: the driver core") Cc: stable@vger.kernel.org Signed-off-by: Kees Cook <keescook@chromium.org>
1
static int uvesafb_setcmap(struct fb_cmap *cmap, struct fb_info *info) { struct uvesafb_pal_entry *entries; int shift = 16 - dac_width; int i, err = 0; if (info->var.bits_per_pixel == 8) { if (cmap->start + cmap->len > info->cmap.start + info->cmap.len || cmap->start < info->cmap.start) return -EINVAL; entries = kmalloc(sizeof(*entries) * cmap->len, GFP_KERNEL); if (!entries) return -ENOMEM; for (i = 0; i < cmap->len; i++) { entries[i].red = cmap->red[i] >> shift; entries[i].green = cmap->green[i] >> shift; entries[i].blue = cmap->blue[i] >> shift; entries[i].pad = 0; } err = uvesafb_setpalette(entries, cmap->len, cmap->start, info); kfree(entries); } else { /* * For modes with bpp > 8, we only set the pseudo palette in * the fb_info struct. We rely on uvesafb_setcolreg to do all * sanity checking. */ for (i = 0; i < cmap->len; i++) { err |= uvesafb_setcolreg(cmap->start + i, cmap->red[i], cmap->green[i], cmap->blue[i], 0, info); } } return err; }
129,461,505,887,171,650,000,000,000,000,000,000,000
uvesafb.c
8,702,348,276,085,379,000,000,000,000,000,000,000
[ "CWE-190" ]
CVE-2018-13406
An integer overflow in the uvesafb_setcmap function in drivers/video/fbdev/uvesafb.c in the Linux kernel before 4.17.4 could result in local attackers being able to crash the kernel or potentially elevate privileges because kmalloc_array is not used.
https://nvd.nist.gov/vuln/detail/CVE-2018-13406
3,653
linux
0fa3ecd87848c9c93c2c828ef4c3a8ca36ce46c7
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/0fa3ecd87848c9c93c2c828ef4c3a8ca36ce46c7
Fix up non-directory creation in SGID directories sgid directories have special semantics, making newly created files in the directory belong to the group of the directory, and newly created subdirectories will also become sgid. This is historically used for group-shared directories. But group directories writable by non-group members should not imply that such non-group members can magically join the group, so make sure to clear the sgid bit on non-directories for non-members (but remember that sgid without group execute means "mandatory locking", just to confuse things even more). Reported-by: Jann Horn <jannh@google.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1
void inode_init_owner(struct inode *inode, const struct inode *dir, umode_t mode) { inode->i_uid = current_fsuid(); if (dir && dir->i_mode & S_ISGID) { inode->i_gid = dir->i_gid; if (S_ISDIR(mode)) mode |= S_ISGID; } else inode->i_gid = current_fsgid(); inode->i_mode = mode; }
215,180,090,082,961,630,000,000,000,000,000,000,000
inode.c
212,299,787,291,086,200,000,000,000,000,000,000,000
[ "CWE-269" ]
CVE-2018-13405
The inode_init_owner function in fs/inode.c in the Linux kernel through 3.16 allows local users to create files with an unintended group ownership, in a scenario where a directory is SGID to a certain group and is writable by a user who is not a member of that group. Here, the non-member can trigger creation of a plain file whose group ownership is that group. The intended behavior was that the non-member can trigger creation of a directory (but not a plain file) whose group ownership is that group. The non-member can escalate privileges by making the plain file executable and SGID.
https://nvd.nist.gov/vuln/detail/CVE-2018-13405
3,654
FFmpeg
00e8181bd97c834fe60751b0c511d4bb97875f78
https://github.com/FFmpeg/FFmpeg
https://github.com/FFmpeg/FFmpeg/commit/00e8181bd97c834fe60751b0c511d4bb97875f78
avcodec/ac3_parser: Check init_get_bits8() for failure Fixes: null pointer dereference Fixes: ffmpeg_crash_6.avi Found-by: Thuan Pham, Marcel Böhme, Andrew Santosa and Alexandru Razvan Caciulescu with AFLSmart Reviewed-by: Paul B Mahol <onemda@gmail.com> Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
1
int avpriv_ac3_parse_header(AC3HeaderInfo **phdr, const uint8_t *buf, size_t size) { GetBitContext gb; AC3HeaderInfo *hdr; int err; if (!*phdr) *phdr = av_mallocz(sizeof(AC3HeaderInfo)); if (!*phdr) return AVERROR(ENOMEM); hdr = *phdr; init_get_bits8(&gb, buf, size); err = ff_ac3_parse_header(&gb, hdr); if (err < 0) return AVERROR_INVALIDDATA; return get_bits_count(&gb); }
236,477,656,578,978,820,000,000,000,000,000,000,000
None
null
[ "CWE-476" ]
CVE-2018-13303
In FFmpeg 4.0.1, a missing check for failure of a call to init_get_bits8() in the avpriv_ac3_parse_header function in libavcodec/ac3_parser.c may trigger a NULL pointer dereference while converting a crafted AVI file to MPEG4, leading to a denial of service.
https://nvd.nist.gov/vuln/detail/CVE-2018-13303
3,655
FFmpeg
2aa9047486dbff12d9e040f917e5f799ed2fd78b
https://github.com/FFmpeg/FFmpeg
https://github.com/FFmpeg/FFmpeg/commit/2aa9047486dbff12d9e040f917e5f799ed2fd78b
avcodec/mpeg4videodec: Check read profile before setting it Fixes: null pointer dereference Fixes: ffmpeg_crash_7.avi Found-by: Thuan Pham, Marcel Böhme, Andrew Santosa and Alexandru Razvan Caciulescu with AFLSmart Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
1
int ff_mpeg4_decode_picture_header(Mpeg4DecContext *ctx, GetBitContext *gb) { MpegEncContext *s = &ctx->m; unsigned startcode, v; int ret; int vol = 0; /* search next start code */ align_get_bits(gb); if (!s->studio_profile && s->avctx->bits_per_raw_sample != 8) s->avctx->bits_per_raw_sample = 0; if (s->codec_tag == AV_RL32("WV1F") && show_bits(gb, 24) == 0x575630) { skip_bits(gb, 24); if (get_bits(gb, 8) == 0xF0) goto end; } startcode = 0xff; for (;;) { if (get_bits_count(gb) >= gb->size_in_bits) { if (gb->size_in_bits == 8 && (ctx->divx_version >= 0 || ctx->xvid_build >= 0) || s->codec_tag == AV_RL32("QMP4")) { av_log(s->avctx, AV_LOG_VERBOSE, "frame skip %d\n", gb->size_in_bits); return FRAME_SKIPPED; // divx bug } else return AVERROR_INVALIDDATA; // end of stream } /* use the bits after the test */ v = get_bits(gb, 8); startcode = ((startcode << 8) | v) & 0xffffffff; if ((startcode & 0xFFFFFF00) != 0x100) continue; // no startcode if (s->avctx->debug & FF_DEBUG_STARTCODE) { av_log(s->avctx, AV_LOG_DEBUG, "startcode: %3X ", startcode); if (startcode <= 0x11F) av_log(s->avctx, AV_LOG_DEBUG, "Video Object Start"); else if (startcode <= 0x12F) av_log(s->avctx, AV_LOG_DEBUG, "Video Object Layer Start"); else if (startcode <= 0x13F) av_log(s->avctx, AV_LOG_DEBUG, "Reserved"); else if (startcode <= 0x15F) av_log(s->avctx, AV_LOG_DEBUG, "FGS bp start"); else if (startcode <= 0x1AF) av_log(s->avctx, AV_LOG_DEBUG, "Reserved"); else if (startcode == 0x1B0) av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Seq Start"); else if (startcode == 0x1B1) av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Seq End"); else if (startcode == 0x1B2) av_log(s->avctx, AV_LOG_DEBUG, "User Data"); else if (startcode == 0x1B3) av_log(s->avctx, AV_LOG_DEBUG, "Group of VOP start"); else if (startcode == 0x1B4) av_log(s->avctx, AV_LOG_DEBUG, "Video Session Error"); else if (startcode == 0x1B5) av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Start"); else if (startcode == 0x1B6) av_log(s->avctx, AV_LOG_DEBUG, "Video Object Plane start"); else if (startcode == 0x1B7) av_log(s->avctx, AV_LOG_DEBUG, "slice start"); else if (startcode == 0x1B8) av_log(s->avctx, AV_LOG_DEBUG, "extension start"); else if (startcode == 0x1B9) av_log(s->avctx, AV_LOG_DEBUG, "fgs start"); else if (startcode == 0x1BA) av_log(s->avctx, AV_LOG_DEBUG, "FBA Object start"); else if (startcode == 0x1BB) av_log(s->avctx, AV_LOG_DEBUG, "FBA Object Plane start"); else if (startcode == 0x1BC) av_log(s->avctx, AV_LOG_DEBUG, "Mesh Object start"); else if (startcode == 0x1BD) av_log(s->avctx, AV_LOG_DEBUG, "Mesh Object Plane start"); else if (startcode == 0x1BE) av_log(s->avctx, AV_LOG_DEBUG, "Still Texture Object start"); else if (startcode == 0x1BF) av_log(s->avctx, AV_LOG_DEBUG, "Texture Spatial Layer start"); else if (startcode == 0x1C0) av_log(s->avctx, AV_LOG_DEBUG, "Texture SNR Layer start"); else if (startcode == 0x1C1) av_log(s->avctx, AV_LOG_DEBUG, "Texture Tile start"); else if (startcode == 0x1C2) av_log(s->avctx, AV_LOG_DEBUG, "Texture Shape Layer start"); else if (startcode == 0x1C3) av_log(s->avctx, AV_LOG_DEBUG, "stuffing start"); else if (startcode <= 0x1C5) av_log(s->avctx, AV_LOG_DEBUG, "reserved"); else if (startcode <= 0x1FF) av_log(s->avctx, AV_LOG_DEBUG, "System start"); av_log(s->avctx, AV_LOG_DEBUG, " at %d\n", get_bits_count(gb)); } if (startcode >= 0x120 && startcode <= 0x12F) { if (vol) { av_log(s->avctx, AV_LOG_WARNING, "Ignoring multiple VOL headers\n"); continue; } vol++; if ((ret = decode_vol_header(ctx, gb)) < 0) return ret; } else if (startcode == USER_DATA_STARTCODE) { decode_user_data(ctx, gb); } else if (startcode == GOP_STARTCODE) { mpeg4_decode_gop_header(s, gb); } else if (startcode == VOS_STARTCODE) { mpeg4_decode_profile_level(s, gb); if (s->avctx->profile == FF_PROFILE_MPEG4_SIMPLE_STUDIO && (s->avctx->level > 0 && s->avctx->level < 9)) { s->studio_profile = 1; next_start_code_studio(gb); extension_and_user_data(s, gb, 0); } } else if (startcode == VISUAL_OBJ_STARTCODE) { if (s->studio_profile) { if ((ret = decode_studiovisualobject(ctx, gb)) < 0) return ret; } else mpeg4_decode_visual_object(s, gb); } else if (startcode == VOP_STARTCODE) { break; } align_get_bits(gb); startcode = 0xff; } end: if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) s->low_delay = 1; s->avctx->has_b_frames = !s->low_delay; if (s->studio_profile) { if (!s->avctx->bits_per_raw_sample) { av_log(s->avctx, AV_LOG_ERROR, "Missing VOL header\n"); return AVERROR_INVALIDDATA; } return decode_studio_vop_header(ctx, gb); } else return decode_vop_header(ctx, gb); }
328,896,097,922,084,930,000,000,000,000,000,000,000
None
null
[ "CWE-476" ]
CVE-2018-13301
In FFmpeg 4.0.1, due to a missing check of a profile value before setting it, the ff_mpeg4_decode_picture_header function in libavcodec/mpeg4videodec.c may trigger a NULL pointer dereference while converting a crafted AVI file to MPEG4, leading to a denial of service.
https://nvd.nist.gov/vuln/detail/CVE-2018-13301
3,657
linux
23fcb3340d033d9f081e21e6c12c2db7eaa541d3
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/23fcb3340d033d9f081e21e6c12c2db7eaa541d3
xfs: More robust inode extent count validation When the inode is in extent format, it can't have more extents that fit in the inode fork. We don't currenty check this, and so this corruption goes unnoticed by the inode verifiers. This can lead to crashes operating on invalid in-memory structures. Attempts to access such a inode will now error out in the verifier rather than allowing modification operations to proceed. Reported-by: Wen Xu <wen.xu@gatech.edu> Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> [darrick: fix a typedef, add some braces and breaks to shut up compiler warnings] Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
1
xfs_dinode_verify( struct xfs_mount *mp, xfs_ino_t ino, struct xfs_dinode *dip) { xfs_failaddr_t fa; uint16_t mode; uint16_t flags; uint64_t flags2; uint64_t di_size; if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC)) return __this_address; /* Verify v3 integrity information first */ if (dip->di_version >= 3) { if (!xfs_sb_version_hascrc(&mp->m_sb)) return __this_address; if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize, XFS_DINODE_CRC_OFF)) return __this_address; if (be64_to_cpu(dip->di_ino) != ino) return __this_address; if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid)) return __this_address; } /* don't allow invalid i_size */ di_size = be64_to_cpu(dip->di_size); if (di_size & (1ULL << 63)) return __this_address; mode = be16_to_cpu(dip->di_mode); if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN) return __this_address; /* No zero-length symlinks/dirs. */ if ((S_ISLNK(mode) || S_ISDIR(mode)) && di_size == 0) return __this_address; /* Fork checks carried over from xfs_iformat_fork */ if (mode && be32_to_cpu(dip->di_nextents) + be16_to_cpu(dip->di_anextents) > be64_to_cpu(dip->di_nblocks)) return __this_address; if (mode && XFS_DFORK_BOFF(dip) > mp->m_sb.sb_inodesize) return __this_address; flags = be16_to_cpu(dip->di_flags); if (mode && (flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp) return __this_address; /* Do we have appropriate data fork formats for the mode? */ switch (mode & S_IFMT) { case S_IFIFO: case S_IFCHR: case S_IFBLK: case S_IFSOCK: if (dip->di_format != XFS_DINODE_FMT_DEV) return __this_address; break; case S_IFREG: case S_IFLNK: case S_IFDIR: switch (dip->di_format) { case XFS_DINODE_FMT_LOCAL: /* * no local regular files yet */ if (S_ISREG(mode)) return __this_address; if (di_size > XFS_DFORK_DSIZE(dip, mp)) return __this_address; if (dip->di_nextents) return __this_address; /* fall through */ case XFS_DINODE_FMT_EXTENTS: case XFS_DINODE_FMT_BTREE: break; default: return __this_address; } break; case 0: /* Uninitialized inode ok. */ break; default: return __this_address; } if (XFS_DFORK_Q(dip)) { switch (dip->di_aformat) { case XFS_DINODE_FMT_LOCAL: if (dip->di_anextents) return __this_address; /* fall through */ case XFS_DINODE_FMT_EXTENTS: case XFS_DINODE_FMT_BTREE: break; default: return __this_address; } } else { /* * If there is no fork offset, this may be a freshly-made inode * in a new disk cluster, in which case di_aformat is zeroed. * Otherwise, such an inode must be in EXTENTS format; this goes * for freed inodes as well. */ switch (dip->di_aformat) { case 0: case XFS_DINODE_FMT_EXTENTS: break; default: return __this_address; } if (dip->di_anextents) return __this_address; } /* extent size hint validation */ fa = xfs_inode_validate_extsize(mp, be32_to_cpu(dip->di_extsize), mode, flags); if (fa) return fa; /* only version 3 or greater inodes are extensively verified here */ if (dip->di_version < 3) return NULL; flags2 = be64_to_cpu(dip->di_flags2); /* don't allow reflink/cowextsize if we don't have reflink */ if ((flags2 & (XFS_DIFLAG2_REFLINK | XFS_DIFLAG2_COWEXTSIZE)) && !xfs_sb_version_hasreflink(&mp->m_sb)) return __this_address; /* only regular files get reflink */ if ((flags2 & XFS_DIFLAG2_REFLINK) && (mode & S_IFMT) != S_IFREG) return __this_address; /* don't let reflink and realtime mix */ if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags & XFS_DIFLAG_REALTIME)) return __this_address; /* don't let reflink and dax mix */ if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags2 & XFS_DIFLAG2_DAX)) return __this_address; /* COW extent size hint validation */ fa = xfs_inode_validate_cowextsize(mp, be32_to_cpu(dip->di_cowextsize), mode, flags, flags2); if (fa) return fa; return NULL; }
274,742,324,962,726,900,000,000,000,000,000,000,000
xfs_inode_buf.c
103,496,325,547,574,170,000,000,000,000,000,000,000
[ "CWE-476" ]
CVE-2018-13095
An issue was discovered in fs/xfs/libxfs/xfs_inode_buf.c in the Linux kernel through 4.17.3. A denial of service (memory corruption and BUG) can occur for a corrupted xfs image upon encountering an inode that is in extent format, but has more extents than fit in the inode fork.
https://nvd.nist.gov/vuln/detail/CVE-2018-13095
3,658
linux
bb3d48dcf86a97dc25fe9fc2c11938e19cb4399a
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/bb3d48dcf86a97dc25fe9fc2c11938e19cb4399a
xfs: don't call xfs_da_shrink_inode with NULL bp xfs_attr3_leaf_create may have errored out before instantiating a buffer, for example if the blkno is out of range. In that case there is no work to do to remove it, and in fact xfs_da_shrink_inode will lead to an oops if we try. This also seems to fix a flaw where the original error from xfs_attr3_leaf_create gets overwritten in the cleanup case, and it removes a pointless assignment to bp which isn't used after this. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=199969 Reported-by: Xu, Wen <wen.xu@gatech.edu> Tested-by: Xu, Wen <wen.xu@gatech.edu> Signed-off-by: Eric Sandeen <sandeen@redhat.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
1
xfs_attr_shortform_to_leaf( struct xfs_da_args *args, struct xfs_buf **leaf_bp) { xfs_inode_t *dp; xfs_attr_shortform_t *sf; xfs_attr_sf_entry_t *sfe; xfs_da_args_t nargs; char *tmpbuffer; int error, i, size; xfs_dablk_t blkno; struct xfs_buf *bp; xfs_ifork_t *ifp; trace_xfs_attr_sf_to_leaf(args); dp = args->dp; ifp = dp->i_afp; sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data; size = be16_to_cpu(sf->hdr.totsize); tmpbuffer = kmem_alloc(size, KM_SLEEP); ASSERT(tmpbuffer != NULL); memcpy(tmpbuffer, ifp->if_u1.if_data, size); sf = (xfs_attr_shortform_t *)tmpbuffer; xfs_idata_realloc(dp, -size, XFS_ATTR_FORK); xfs_bmap_local_to_extents_empty(dp, XFS_ATTR_FORK); bp = NULL; error = xfs_da_grow_inode(args, &blkno); if (error) { /* * If we hit an IO error middle of the transaction inside * grow_inode(), we may have inconsistent data. Bail out. */ if (error == -EIO) goto out; xfs_idata_realloc(dp, size, XFS_ATTR_FORK); /* try to put */ memcpy(ifp->if_u1.if_data, tmpbuffer, size); /* it back */ goto out; } ASSERT(blkno == 0); error = xfs_attr3_leaf_create(args, blkno, &bp); if (error) { error = xfs_da_shrink_inode(args, 0, bp); bp = NULL; if (error) goto out; xfs_idata_realloc(dp, size, XFS_ATTR_FORK); /* try to put */ memcpy(ifp->if_u1.if_data, tmpbuffer, size); /* it back */ goto out; } memset((char *)&nargs, 0, sizeof(nargs)); nargs.dp = dp; nargs.geo = args->geo; nargs.firstblock = args->firstblock; nargs.dfops = args->dfops; nargs.total = args->total; nargs.whichfork = XFS_ATTR_FORK; nargs.trans = args->trans; nargs.op_flags = XFS_DA_OP_OKNOENT; sfe = &sf->list[0]; for (i = 0; i < sf->hdr.count; i++) { nargs.name = sfe->nameval; nargs.namelen = sfe->namelen; nargs.value = &sfe->nameval[nargs.namelen]; nargs.valuelen = sfe->valuelen; nargs.hashval = xfs_da_hashname(sfe->nameval, sfe->namelen); nargs.flags = XFS_ATTR_NSP_ONDISK_TO_ARGS(sfe->flags); error = xfs_attr3_leaf_lookup_int(bp, &nargs); /* set a->index */ ASSERT(error == -ENOATTR); error = xfs_attr3_leaf_add(bp, &nargs); ASSERT(error != -ENOSPC); if (error) goto out; sfe = XFS_ATTR_SF_NEXTENTRY(sfe); } error = 0; *leaf_bp = bp; out: kmem_free(tmpbuffer); return error; }
255,451,512,913,001,000,000,000,000,000,000,000,000
xfs_attr_leaf.c
330,567,582,969,333,100,000,000,000,000,000,000,000
[ "CWE-476" ]
CVE-2018-13094
An issue was discovered in fs/xfs/libxfs/xfs_attr_leaf.c in the Linux kernel through 4.17.3. An OOPS may occur for a corrupted xfs image after xfs_da_shrink_inode() is called with a NULL bp.
https://nvd.nist.gov/vuln/detail/CVE-2018-13094
3,662
gpac
bceb03fd2be95097a7b409ea59914f332fb6bc86
https://github.com/gpac/gpac
https://github.com/gpac/gpac/commit/bceb03fd2be95097a7b409ea59914f332fb6bc86
fixed 2 possible heap overflows (inc. #1088)
1
GF_Err hdlr_dump(GF_Box *a, FILE * trace) { GF_HandlerBox *p = (GF_HandlerBox *)a; gf_isom_box_dump_start(a, "HandlerBox", trace); if (p->nameUTF8 && (u32) p->nameUTF8[0] == strlen(p->nameUTF8+1)) { fprintf(trace, "hdlrType=\"%s\" Name=\"%s\" ", gf_4cc_to_str(p->handlerType), p->nameUTF8+1); } else { fprintf(trace, "hdlrType=\"%s\" Name=\"%s\" ", gf_4cc_to_str(p->handlerType), p->nameUTF8); } fprintf(trace, "reserved1=\"%d\" reserved2=\"", p->reserved1); dump_data(trace, (char *) p->reserved2, 12); fprintf(trace, "\""); fprintf(trace, ">\n"); gf_isom_box_dump_done("HandlerBox", a, trace); return GF_OK; }
20,155,211,497,845,732,000,000,000,000,000,000,000
box_dump.c
2,944,473,506,997,116,000,000,000,000,000,000,000
[ "CWE-125" ]
CVE-2018-13006
An issue was discovered in MP4Box in GPAC 0.7.1. There is a heap-based buffer over-read in the isomedia/box_dump.c function hdlr_dump.
https://nvd.nist.gov/vuln/detail/CVE-2018-13006
3,679
linux
81f9c4e4177d31ced6f52a89bb70e93bfb77ca03
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/81f9c4e4177d31ced6f52a89bb70e93bfb77ca03
Merge tag 'trace-v4.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace Pull tracing fixes from Steven Rostedt: "This contains a few fixes and a clean up. - a bad merge caused an "endif" to go in the wrong place in scripts/Makefile.build - softirq tracing fix for tracing that corrupts lockdep and causes a false splat - histogram documentation typo fixes - fix a bad memory reference when passing in no filter to the filter code - simplify code by using the swap macro instead of open coding the swap" * tag 'trace-v4.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: tracing: Fix SKIP_STACK_VALIDATION=1 build due to bad merge with -mrecord-mcount tracing: Fix some errors in histogram documentation tracing: Use swap macro in update_max_tr softirq: Reorder trace_softirqs_on to prevent lockdep splat tracing: Check for no filter when processing event filters
1
predicate_parse(const char *str, int nr_parens, int nr_preds, parse_pred_fn parse_pred, void *data, struct filter_parse_error *pe) { struct prog_entry *prog_stack; struct prog_entry *prog; const char *ptr = str; char *inverts = NULL; int *op_stack; int *top; int invert = 0; int ret = -ENOMEM; int len; int N = 0; int i; nr_preds += 2; /* For TRUE and FALSE */ op_stack = kmalloc_array(nr_parens, sizeof(*op_stack), GFP_KERNEL); if (!op_stack) return ERR_PTR(-ENOMEM); prog_stack = kmalloc_array(nr_preds, sizeof(*prog_stack), GFP_KERNEL); if (!prog_stack) { parse_error(pe, -ENOMEM, 0); goto out_free; } inverts = kmalloc_array(nr_preds, sizeof(*inverts), GFP_KERNEL); if (!inverts) { parse_error(pe, -ENOMEM, 0); goto out_free; } top = op_stack; prog = prog_stack; *top = 0; /* First pass */ while (*ptr) { /* #1 */ const char *next = ptr++; if (isspace(*next)) continue; switch (*next) { case '(': /* #2 */ if (top - op_stack > nr_parens) return ERR_PTR(-EINVAL); *(++top) = invert; continue; case '!': /* #3 */ if (!is_not(next)) break; invert = !invert; continue; } if (N >= nr_preds) { parse_error(pe, FILT_ERR_TOO_MANY_PREDS, next - str); goto out_free; } inverts[N] = invert; /* #4 */ prog[N].target = N-1; len = parse_pred(next, data, ptr - str, pe, &prog[N].pred); if (len < 0) { ret = len; goto out_free; } ptr = next + len; N++; ret = -1; while (1) { /* #5 */ next = ptr++; if (isspace(*next)) continue; switch (*next) { case ')': case '\0': break; case '&': case '|': if (next[1] == next[0]) { ptr++; break; } default: parse_error(pe, FILT_ERR_TOO_MANY_PREDS, next - str); goto out_free; } invert = *top & INVERT; if (*top & PROCESS_AND) { /* #7 */ update_preds(prog, N - 1, invert); *top &= ~PROCESS_AND; } if (*next == '&') { /* #8 */ *top |= PROCESS_AND; break; } if (*top & PROCESS_OR) { /* #9 */ update_preds(prog, N - 1, !invert); *top &= ~PROCESS_OR; } if (*next == '|') { /* #10 */ *top |= PROCESS_OR; break; } if (!*next) /* #11 */ goto out; if (top == op_stack) { ret = -1; /* Too few '(' */ parse_error(pe, FILT_ERR_TOO_MANY_CLOSE, ptr - str); goto out_free; } top--; /* #12 */ } } out: if (top != op_stack) { /* Too many '(' */ parse_error(pe, FILT_ERR_TOO_MANY_OPEN, ptr - str); goto out_free; } prog[N].pred = NULL; /* #13 */ prog[N].target = 1; /* TRUE */ prog[N+1].pred = NULL; prog[N+1].target = 0; /* FALSE */ prog[N-1].target = N; prog[N-1].when_to_branch = false; /* Second Pass */ for (i = N-1 ; i--; ) { int target = prog[i].target; if (prog[i].when_to_branch == prog[target].when_to_branch) prog[i].target = prog[target].target; } /* Third Pass */ for (i = 0; i < N; i++) { invert = inverts[i] ^ prog[i].when_to_branch; prog[i].when_to_branch = invert; /* Make sure the program always moves forward */ if (WARN_ON(prog[i].target <= i)) { ret = -EINVAL; goto out_free; } } return prog; out_free: kfree(op_stack); kfree(prog_stack); kfree(inverts); return ERR_PTR(ret); }
269,743,203,244,690,430,000,000,000,000,000,000,000
trace_events_filter.c
236,797,272,490,507,370,000,000,000,000,000,000,000
[ "CWE-787" ]
CVE-2018-12714
An issue was discovered in the Linux kernel through 4.17.2. The filter parsing in kernel/trace/trace_events_filter.c could be called with no filter, which is an N=0 case when it expected at least one line to have been read, thus making the N-1 index invalid. This allows attackers to cause a denial of service (slab out-of-bounds write) or possibly have unspecified other impact via crafted perf_event_open and mmap system calls.
https://nvd.nist.gov/vuln/detail/CVE-2018-12714
3,680
linux
bd23a7269834dc7c1f93e83535d16ebc44b75eba
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/bd23a7269834dc7c1f93e83535d16ebc44b75eba
virt: vbox: Only copy_from_user the request-header once In vbg_misc_device_ioctl(), the header of the ioctl argument is copied from the userspace pointer 'arg' and saved to the kernel object 'hdr'. Then the 'version', 'size_in', and 'size_out' fields of 'hdr' are verified. Before this commit, after the checks a buffer for the entire request would be allocated and then all data including the verified header would be copied from the userspace 'arg' pointer again. Given that the 'arg' pointer resides in userspace, a malicious userspace process can race to change the data pointed to by 'arg' between the two copies. By doing so, the user can bypass the verifications on the ioctl argument. This commit fixes this by using the already checked copy of the header to fill the header part of the allocated buffer and only copying the remainder of the data from userspace. Signed-off-by: Wenwen Wang <wang6495@umn.edu> Reviewed-by: Hans de Goede <hdegoede@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1
static long vbg_misc_device_ioctl(struct file *filp, unsigned int req, unsigned long arg) { struct vbg_session *session = filp->private_data; size_t returned_size, size; struct vbg_ioctl_hdr hdr; bool is_vmmdev_req; int ret = 0; void *buf; if (copy_from_user(&hdr, (void *)arg, sizeof(hdr))) return -EFAULT; if (hdr.version != VBG_IOCTL_HDR_VERSION) return -EINVAL; if (hdr.size_in < sizeof(hdr) || (hdr.size_out && hdr.size_out < sizeof(hdr))) return -EINVAL; size = max(hdr.size_in, hdr.size_out); if (_IOC_SIZE(req) && _IOC_SIZE(req) != size) return -EINVAL; if (size > SZ_16M) return -E2BIG; /* * IOCTL_VMMDEV_REQUEST needs the buffer to be below 4G to avoid * the need for a bounce-buffer and another copy later on. */ is_vmmdev_req = (req & ~IOCSIZE_MASK) == VBG_IOCTL_VMMDEV_REQUEST(0) || req == VBG_IOCTL_VMMDEV_REQUEST_BIG; if (is_vmmdev_req) buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT); else buf = kmalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; if (copy_from_user(buf, (void *)arg, hdr.size_in)) { ret = -EFAULT; goto out; } if (hdr.size_in < size) memset(buf + hdr.size_in, 0, size - hdr.size_in); ret = vbg_core_ioctl(session, req, buf); if (ret) goto out; returned_size = ((struct vbg_ioctl_hdr *)buf)->size_out; if (returned_size > size) { vbg_debug("%s: too much output data %zu > %zu\n", __func__, returned_size, size); returned_size = size; } if (copy_to_user((void *)arg, buf, returned_size) != 0) ret = -EFAULT; out: if (is_vmmdev_req) vbg_req_free(buf, size); else kfree(buf); return ret; }
3,708,566,526,586,698,000,000,000,000,000,000,000
vboxguest_linux.c
280,521,136,845,643,650,000,000,000,000,000,000,000
[ "CWE-362" ]
CVE-2018-12633
An issue was discovered in the Linux kernel through 4.17.2. vbg_misc_device_ioctl() in drivers/virt/vboxguest/vboxguest_linux.c reads the same user data twice with copy_from_user. The header part of the user data is double-fetched, and a malicious user thread can tamper with the critical variables (hdr.size_in and hdr.size_out) in the header between the two fetches because of a race condition, leading to severe kernel errors, such as buffer over-accesses. This bug can cause a local denial of service and information leakage.
https://nvd.nist.gov/vuln/detail/CVE-2018-12633
3,681
FFmpeg
b3332a182f8ba33a34542e4a0370f38b914ccf7d
https://github.com/FFmpeg/FFmpeg
https://github.com/FFmpeg/FFmpeg/commit/b3332a182f8ba33a34542e4a0370f38b914ccf7d
avcodec/idctdsp: Transmit studio_profile to init instead of using AVCodecContext profile These 2 fields are not always the same, it is simpler to always use the same field for detecting studio profile Fixes: null pointer dereference Fixes: ffmpeg_crash_3.avi Found-by: Thuan Pham <thuanpv@comp.nus.edu.sg>, Marcel Böhme, Andrew Santosa and Alexandru RazvanCaciulescu with AFLSmart Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
1
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx) { const unsigned high_bit_depth = avctx->bits_per_raw_sample > 8; if (avctx->lowres==1) { c->idct_put = ff_jref_idct4_put; c->idct_add = ff_jref_idct4_add; c->idct = ff_j_rev_dct4; c->perm_type = FF_IDCT_PERM_NONE; } else if (avctx->lowres==2) { c->idct_put = ff_jref_idct2_put; c->idct_add = ff_jref_idct2_add; c->idct = ff_j_rev_dct2; c->perm_type = FF_IDCT_PERM_NONE; } else if (avctx->lowres==3) { c->idct_put = ff_jref_idct1_put; c->idct_add = ff_jref_idct1_add; c->idct = ff_j_rev_dct1; c->perm_type = FF_IDCT_PERM_NONE; } else { if (avctx->bits_per_raw_sample == 10 || avctx->bits_per_raw_sample == 9) { /* 10-bit MPEG-4 Simple Studio Profile requires a higher precision IDCT However, it only uses idct_put */ if (avctx->codec_id == AV_CODEC_ID_MPEG4 && avctx->profile == FF_PROFILE_MPEG4_SIMPLE_STUDIO) c->idct_put = ff_simple_idct_put_int32_10bit; else { c->idct_put = ff_simple_idct_put_int16_10bit; c->idct_add = ff_simple_idct_add_int16_10bit; c->idct = ff_simple_idct_int16_10bit; } c->perm_type = FF_IDCT_PERM_NONE; } else if (avctx->bits_per_raw_sample == 12) { c->idct_put = ff_simple_idct_put_int16_12bit; c->idct_add = ff_simple_idct_add_int16_12bit; c->idct = ff_simple_idct_int16_12bit; c->perm_type = FF_IDCT_PERM_NONE; } else { if (avctx->idct_algo == FF_IDCT_INT) { c->idct_put = ff_jref_idct_put; c->idct_add = ff_jref_idct_add; c->idct = ff_j_rev_dct; c->perm_type = FF_IDCT_PERM_LIBMPEG2; #if CONFIG_FAANIDCT } else if (avctx->idct_algo == FF_IDCT_FAAN) { c->idct_put = ff_faanidct_put; c->idct_add = ff_faanidct_add; c->idct = ff_faanidct; c->perm_type = FF_IDCT_PERM_NONE; #endif /* CONFIG_FAANIDCT */ } else { // accurate/default /* Be sure FF_IDCT_NONE will select this one, since it uses FF_IDCT_PERM_NONE */ c->idct_put = ff_simple_idct_put_int16_8bit; c->idct_add = ff_simple_idct_add_int16_8bit; c->idct = ff_simple_idct_int16_8bit; c->perm_type = FF_IDCT_PERM_NONE; } } } c->put_pixels_clamped = ff_put_pixels_clamped_c; c->put_signed_pixels_clamped = put_signed_pixels_clamped_c; c->add_pixels_clamped = ff_add_pixels_clamped_c; if (CONFIG_MPEG4_DECODER && avctx->idct_algo == FF_IDCT_XVID) ff_xvid_idct_init(c, avctx); if (ARCH_AARCH64) ff_idctdsp_init_aarch64(c, avctx, high_bit_depth); if (ARCH_ALPHA) ff_idctdsp_init_alpha(c, avctx, high_bit_depth); if (ARCH_ARM) ff_idctdsp_init_arm(c, avctx, high_bit_depth); if (ARCH_PPC) ff_idctdsp_init_ppc(c, avctx, high_bit_depth); if (ARCH_X86) ff_idctdsp_init_x86(c, avctx, high_bit_depth); if (ARCH_MIPS) ff_idctdsp_init_mips(c, avctx, high_bit_depth); ff_init_scantable_permutation(c->idct_permutation, c->perm_type); }
258,201,715,643,119,100,000,000,000,000,000,000,000
idctdsp.c
24,121,040,549,549,385,000,000,000,000,000,000,000
[ "CWE-476" ]
CVE-2018-12460
libavcodec in FFmpeg 4.0 may trigger a NULL pointer dereference if the studio profile is incorrectly detected while converting a crafted AVI file to MPEG4, leading to a denial of service, related to idctdsp.c and mpegvideo.c.
https://nvd.nist.gov/vuln/detail/CVE-2018-12460
3,682
FFmpeg
b3332a182f8ba33a34542e4a0370f38b914ccf7d
https://github.com/FFmpeg/FFmpeg
https://github.com/FFmpeg/FFmpeg/commit/b3332a182f8ba33a34542e4a0370f38b914ccf7d
avcodec/idctdsp: Transmit studio_profile to init instead of using AVCodecContext profile These 2 fields are not always the same, it is simpler to always use the same field for detecting studio profile Fixes: null pointer dereference Fixes: ffmpeg_crash_3.avi Found-by: Thuan Pham <thuanpv@comp.nus.edu.sg>, Marcel Böhme, Andrew Santosa and Alexandru RazvanCaciulescu with AFLSmart Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
1
av_cold void ff_mpv_idct_init(MpegEncContext *s) { ff_idctdsp_init(&s->idsp, s->avctx); /* load & permutate scantables * note: only wmv uses different ones */ if (s->alternate_scan) { ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan); ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan); } else { ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct); ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct); } ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan); ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan); }
214,617,330,672,650,500,000,000,000,000,000,000,000
mpegvideo.c
15,848,255,372,654,095,000,000,000,000,000,000,000
[ "CWE-476" ]
CVE-2018-12460
libavcodec in FFmpeg 4.0 may trigger a NULL pointer dereference if the studio profile is incorrectly detected while converting a crafted AVI file to MPEG4, leading to a denial of service, related to idctdsp.c and mpegvideo.c.
https://nvd.nist.gov/vuln/detail/CVE-2018-12460
3,683
FFmpeg
2fc108f60f98cd00813418a8754a46476b404a3c
https://github.com/FFmpeg/FFmpeg
https://github.com/FFmpeg/FFmpeg/commit/2fc108f60f98cd00813418a8754a46476b404a3c
avcodec/mpeg4videodec: Clear bits_per_raw_sample if it has originated from a previous instance Fixes: assertion failure Fixes: ffmpeg_crash_5.avi Found-by: Thuan Pham <thuanpv@comp.nus.edu.sg>, Marcel Böhme, Andrew Santosa and Alexandru Razvan Caciulescu with AFLSmart Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
1
int ff_mpeg4_decode_picture_header(Mpeg4DecContext *ctx, GetBitContext *gb) { MpegEncContext *s = &ctx->m; unsigned startcode, v; int ret; int vol = 0; /* search next start code */ align_get_bits(gb); if (s->codec_tag == AV_RL32("WV1F") && show_bits(gb, 24) == 0x575630) { skip_bits(gb, 24); if (get_bits(gb, 8) == 0xF0) goto end; } startcode = 0xff; for (;;) { if (get_bits_count(gb) >= gb->size_in_bits) { if (gb->size_in_bits == 8 && (ctx->divx_version >= 0 || ctx->xvid_build >= 0) || s->codec_tag == AV_RL32("QMP4")) { av_log(s->avctx, AV_LOG_VERBOSE, "frame skip %d\n", gb->size_in_bits); return FRAME_SKIPPED; // divx bug } else return AVERROR_INVALIDDATA; // end of stream } /* use the bits after the test */ v = get_bits(gb, 8); startcode = ((startcode << 8) | v) & 0xffffffff; if ((startcode & 0xFFFFFF00) != 0x100) continue; // no startcode if (s->avctx->debug & FF_DEBUG_STARTCODE) { av_log(s->avctx, AV_LOG_DEBUG, "startcode: %3X ", startcode); if (startcode <= 0x11F) av_log(s->avctx, AV_LOG_DEBUG, "Video Object Start"); else if (startcode <= 0x12F) av_log(s->avctx, AV_LOG_DEBUG, "Video Object Layer Start"); else if (startcode <= 0x13F) av_log(s->avctx, AV_LOG_DEBUG, "Reserved"); else if (startcode <= 0x15F) av_log(s->avctx, AV_LOG_DEBUG, "FGS bp start"); else if (startcode <= 0x1AF) av_log(s->avctx, AV_LOG_DEBUG, "Reserved"); else if (startcode == 0x1B0) av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Seq Start"); else if (startcode == 0x1B1) av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Seq End"); else if (startcode == 0x1B2) av_log(s->avctx, AV_LOG_DEBUG, "User Data"); else if (startcode == 0x1B3) av_log(s->avctx, AV_LOG_DEBUG, "Group of VOP start"); else if (startcode == 0x1B4) av_log(s->avctx, AV_LOG_DEBUG, "Video Session Error"); else if (startcode == 0x1B5) av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Start"); else if (startcode == 0x1B6) av_log(s->avctx, AV_LOG_DEBUG, "Video Object Plane start"); else if (startcode == 0x1B7) av_log(s->avctx, AV_LOG_DEBUG, "slice start"); else if (startcode == 0x1B8) av_log(s->avctx, AV_LOG_DEBUG, "extension start"); else if (startcode == 0x1B9) av_log(s->avctx, AV_LOG_DEBUG, "fgs start"); else if (startcode == 0x1BA) av_log(s->avctx, AV_LOG_DEBUG, "FBA Object start"); else if (startcode == 0x1BB) av_log(s->avctx, AV_LOG_DEBUG, "FBA Object Plane start"); else if (startcode == 0x1BC) av_log(s->avctx, AV_LOG_DEBUG, "Mesh Object start"); else if (startcode == 0x1BD) av_log(s->avctx, AV_LOG_DEBUG, "Mesh Object Plane start"); else if (startcode == 0x1BE) av_log(s->avctx, AV_LOG_DEBUG, "Still Texture Object start"); else if (startcode == 0x1BF) av_log(s->avctx, AV_LOG_DEBUG, "Texture Spatial Layer start"); else if (startcode == 0x1C0) av_log(s->avctx, AV_LOG_DEBUG, "Texture SNR Layer start"); else if (startcode == 0x1C1) av_log(s->avctx, AV_LOG_DEBUG, "Texture Tile start"); else if (startcode == 0x1C2) av_log(s->avctx, AV_LOG_DEBUG, "Texture Shape Layer start"); else if (startcode == 0x1C3) av_log(s->avctx, AV_LOG_DEBUG, "stuffing start"); else if (startcode <= 0x1C5) av_log(s->avctx, AV_LOG_DEBUG, "reserved"); else if (startcode <= 0x1FF) av_log(s->avctx, AV_LOG_DEBUG, "System start"); av_log(s->avctx, AV_LOG_DEBUG, " at %d\n", get_bits_count(gb)); } if (startcode >= 0x120 && startcode <= 0x12F) { if (vol) { av_log(s->avctx, AV_LOG_WARNING, "Ignoring multiple VOL headers\n"); continue; } vol++; if ((ret = decode_vol_header(ctx, gb)) < 0) return ret; } else if (startcode == USER_DATA_STARTCODE) { decode_user_data(ctx, gb); } else if (startcode == GOP_STARTCODE) { mpeg4_decode_gop_header(s, gb); } else if (startcode == VOS_STARTCODE) { mpeg4_decode_profile_level(s, gb); if (s->avctx->profile == FF_PROFILE_MPEG4_SIMPLE_STUDIO && (s->avctx->level > 0 && s->avctx->level < 9)) { s->studio_profile = 1; next_start_code_studio(gb); extension_and_user_data(s, gb, 0); } } else if (startcode == VISUAL_OBJ_STARTCODE) { if (s->studio_profile) { if ((ret = decode_studiovisualobject(ctx, gb)) < 0) return ret; } else mpeg4_decode_visual_object(s, gb); } else if (startcode == VOP_STARTCODE) { break; } align_get_bits(gb); startcode = 0xff; } end: if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) s->low_delay = 1; s->avctx->has_b_frames = !s->low_delay; if (s->studio_profile) { if (!s->avctx->bits_per_raw_sample) { av_log(s->avctx, AV_LOG_ERROR, "Missing VOL header\n"); return AVERROR_INVALIDDATA; } return decode_studio_vop_header(ctx, gb); } else return decode_vop_header(ctx, gb); }
20,215,823,895,074,912,000,000,000,000,000,000,000
mpeg4videodec.c
139,065,503,574,081,020,000,000,000,000,000,000,000
[ "CWE-20" ]
CVE-2018-12459
An inconsistent bits-per-sample value in the ff_mpeg4_decode_picture_header function in libavcodec/mpeg4videodec.c in FFmpeg 4.0 may trigger an assertion violation while converting a crafted AVI file to MPEG4, leading to a denial of service.
https://nvd.nist.gov/vuln/detail/CVE-2018-12459
3,684
FFmpeg
e1182fac1afba92a4975917823a5f644bee7e6e8
https://github.com/FFmpeg/FFmpeg
https://github.com/FFmpeg/FFmpeg/commit/e1182fac1afba92a4975917823a5f644bee7e6e8
avcodec/mpeg4videoenc: Use 64 bit for times in mpeg4_encode_gop_header() Fixes truncation Fixes Assertion n <= 31 && value < (1U << n) failed at libavcodec/put_bits.h:169 Fixes: ffmpeg_crash_2.avi Found-by: Thuan Pham <thuanpv@comp.nus.edu.sg>, Marcel Böhme, Andrew Santosa and Alexandru RazvanCaciulescu with AFLSmart Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
1
static void mpeg4_encode_gop_header(MpegEncContext *s) { int hours, minutes, seconds; int64_t time; put_bits(&s->pb, 16, 0); put_bits(&s->pb, 16, GOP_STARTCODE); time = s->current_picture_ptr->f->pts; if (s->reordered_input_picture[1]) time = FFMIN(time, s->reordered_input_picture[1]->f->pts); time = time * s->avctx->time_base.num; s->last_time_base = FFUDIV(time, s->avctx->time_base.den); seconds = FFUDIV(time, s->avctx->time_base.den); minutes = FFUDIV(seconds, 60); seconds = FFUMOD(seconds, 60); hours = FFUDIV(minutes, 60); minutes = FFUMOD(minutes, 60); hours = FFUMOD(hours , 24); put_bits(&s->pb, 5, hours); put_bits(&s->pb, 6, minutes); put_bits(&s->pb, 1, 1); put_bits(&s->pb, 6, seconds); put_bits(&s->pb, 1, !!(s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)); put_bits(&s->pb, 1, 0); // broken link == NO ff_mpeg4_stuffing(&s->pb); }
141,072,668,666,883,980,000,000,000,000,000,000,000
mpeg4videoenc.c
245,539,507,026,391,350,000,000,000,000,000,000,000
[ "CWE-20" ]
CVE-2018-12458
An improper integer type in the mpeg4_encode_gop_header function in libavcodec/mpeg4videoenc.c in FFmpeg 2.8 and 4.0 may trigger an assertion violation while converting a crafted AVI file to MPEG4, leading to a denial of service.
https://nvd.nist.gov/vuln/detail/CVE-2018-12458
3,687
radare2
90b71c017a7fa9732fe45fd21b245ee051b1f548
https://github.com/radare/radare2
https://github.com/radare/radare2/commit/90b71c017a7fa9732fe45fd21b245ee051b1f548
Fix #10293 - Use-after-free in r_anal_bb_free()
1
R_API void r_anal_bb_free(RAnalBlock *bb) { if (!bb) { return; } r_anal_cond_free (bb->cond); R_FREE (bb->fingerprint); r_anal_diff_free (bb->diff); bb->diff = NULL; R_FREE (bb->op_bytes); r_anal_switch_op_free (bb->switch_op); bb->switch_op = NULL; bb->fingerprint = NULL; bb->cond = NULL; R_FREE (bb->label); R_FREE (bb->op_pos); R_FREE (bb->parent_reg_arena); if (bb->prev) { if (bb->prev->jumpbb == bb) { bb->prev->jumpbb = NULL; } if (bb->prev->failbb == bb) { bb->prev->failbb = NULL; } bb->prev = NULL; } if (bb->jumpbb) { bb->jumpbb->prev = NULL; bb->jumpbb = NULL; } if (bb->failbb) { bb->failbb->prev = NULL; bb->failbb = NULL; } R_FREE (bb); }
33,807,903,809,090,475,000,000,000,000,000,000,000
bb.c
272,606,549,223,286,240,000,000,000,000,000,000,000
[ "CWE-416" ]
CVE-2018-12320
There is a use after free in radare2 2.6.0 in r_anal_bb_free() in libr/anal/bb.c via a crafted Java binary file.
https://nvd.nist.gov/vuln/detail/CVE-2018-12320
3,688
mruby
faa4eaf6803bd11669bc324b4c34e7162286bfa3
https://github.com/mruby/mruby
https://github.com/mruby/mruby/commit/faa4eaf6803bd11669bc324b4c34e7162286bfa3
`mrb_class_real()` did not work for `BasicObject`; fix #4037
1
mrb_class_real(struct RClass* cl) { if (cl == 0) return NULL; while ((cl->tt == MRB_TT_SCLASS) || (cl->tt == MRB_TT_ICLASS)) { cl = cl->super; } return cl; }
120,395,878,351,176,830,000,000,000,000,000,000,000
class.c
206,993,897,454,459,300,000,000,000,000,000,000,000
[ "CWE-476" ]
CVE-2018-12249
An issue was discovered in mruby 1.4.1. There is a NULL pointer dereference in mrb_class_real because "class BasicObject" is not properly supported in class.c.
https://nvd.nist.gov/vuln/detail/CVE-2018-12249
3,689
mruby
778500563a9f7ceba996937dc886bd8cde29b42b
https://github.com/mruby/mruby
https://github.com/mruby/mruby/commit/778500563a9f7ceba996937dc886bd8cde29b42b
Extend stack when pushing arguments that does not fit in; fix #4038
1
fiber_switch(mrb_state *mrb, mrb_value self, mrb_int len, const mrb_value *a, mrb_bool resume, mrb_bool vmexec) { struct mrb_context *c = fiber_check(mrb, self); struct mrb_context *old_c = mrb->c; mrb_value value; fiber_check_cfunc(mrb, c); if (resume && c->status == MRB_FIBER_TRANSFERRED) { mrb_raise(mrb, E_FIBER_ERROR, "resuming transferred fiber"); } if (c->status == MRB_FIBER_RUNNING || c->status == MRB_FIBER_RESUMED) { mrb_raise(mrb, E_FIBER_ERROR, "double resume (fib)"); } if (c->status == MRB_FIBER_TERMINATED) { mrb_raise(mrb, E_FIBER_ERROR, "resuming dead fiber"); } mrb->c->status = resume ? MRB_FIBER_RESUMED : MRB_FIBER_TRANSFERRED; c->prev = resume ? mrb->c : (c->prev ? c->prev : mrb->root_c); if (c->status == MRB_FIBER_CREATED) { mrb_value *b, *e; if (len >= c->stend - c->stack) { mrb_raise(mrb, E_FIBER_ERROR, "too many arguments to fiber"); } b = c->stack+1; e = b + len; while (b<e) { *b++ = *a++; } c->cibase->argc = (int)len; value = c->stack[0] = MRB_PROC_ENV(c->ci->proc)->stack[0]; } else { value = fiber_result(mrb, a, len); } fiber_switch_context(mrb, c); if (vmexec) { c->vmexec = TRUE; value = mrb_vm_exec(mrb, c->ci[-1].proc, c->ci->pc); mrb->c = old_c; } else { MARK_CONTEXT_MODIFY(c); } return value; }
148,482,836,348,799,490,000,000,000,000,000,000,000
fiber.c
104,372,639,444,452,630,000,000,000,000,000,000,000
[ "CWE-125" ]
CVE-2018-12248
An issue was discovered in mruby 1.4.1. There is a heap-based buffer over-read associated with OP_ENTER because mrbgems/mruby-fiber/src/fiber.c does not extend the stack in cases of many arguments to fiber.
https://nvd.nist.gov/vuln/detail/CVE-2018-12248
3,690
mruby
55edae0226409de25e59922807cb09acb45731a2
https://github.com/mruby/mruby
https://github.com/mruby/mruby/commit/55edae0226409de25e59922807cb09acb45731a2
Allow `Object#clone` to copy frozen status only; fix #4036 Copying all flags from the original object may overwrite the clone's flags e.g. the embedded flag.
1
mrb_obj_clone(mrb_state *mrb, mrb_value self) { struct RObject *p; mrb_value clone; if (mrb_immediate_p(self)) { mrb_raisef(mrb, E_TYPE_ERROR, "can't clone %S", self); } if (mrb_type(self) == MRB_TT_SCLASS) { mrb_raise(mrb, E_TYPE_ERROR, "can't clone singleton class"); } p = (struct RObject*)mrb_obj_alloc(mrb, mrb_type(self), mrb_obj_class(mrb, self)); p->c = mrb_singleton_class_clone(mrb, self); mrb_field_write_barrier(mrb, (struct RBasic*)p, (struct RBasic*)p->c); clone = mrb_obj_value(p); init_copy(mrb, clone, self); p->flags = mrb_obj_ptr(self)->flags; return clone; }
229,363,253,371,945,330,000,000,000,000,000,000,000
kernel.c
163,099,176,507,490,390,000,000,000,000,000,000,000
[ "CWE-476" ]
CVE-2018-12247
An issue was discovered in mruby 1.4.1. There is a NULL pointer dereference in mrb_class, related to certain .clone usage, because mrb_obj_clone in kernel.c copies flags other than the MRB_FLAG_IS_FROZEN flag (e.g., the embedded flag).
https://nvd.nist.gov/vuln/detail/CVE-2018-12247
3,691
linux
6d8c50dcb029872b298eea68cc6209c866fd3e14
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/6d8c50dcb029872b298eea68cc6209c866fd3e14
socket: close race condition between sock_close() and sockfs_setattr() fchownat() doesn't even hold refcnt of fd until it figures out fd is really needed (otherwise is ignored) and releases it after it resolves the path. This means sock_close() could race with sockfs_setattr(), which leads to a NULL pointer dereference since typically we set sock->sk to NULL in ->release(). As pointed out by Al, this is unique to sockfs. So we can fix this in socket layer by acquiring inode_lock in sock_close() and checking against NULL in sockfs_setattr(). sock_release() is called in many places, only the sock_close() path matters here. And fortunately, this should not affect normal sock_close() as it is only called when the last fd refcnt is gone. It only affects sock_close() with a parallel sockfs_setattr() in progress, which is not common. Fixes: 86741ec25462 ("net: core: Add a UID field to struct sock.") Reported-by: shankarapailoor <shankarapailoor@gmail.com> Cc: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp> Cc: Lorenzo Colitti <lorenzo@google.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1
static int sock_close(struct inode *inode, struct file *filp) { sock_release(SOCKET_I(inode)); return 0; }
84,494,788,491,717,655,000,000,000,000,000,000,000
socket.c
177,755,458,975,155,360,000,000,000,000,000,000,000
[ "CWE-362" ]
CVE-2018-12232
In net/socket.c in the Linux kernel through 4.17.1, there is a race condition between fchownat and close in cases where they target the same socket file descriptor, related to the sock_close and sockfs_setattr functions. fchownat does not increment the file descriptor reference count, which allows close to set the socket to NULL during fchownat's execution, leading to a NULL pointer dereference and system crash.
https://nvd.nist.gov/vuln/detail/CVE-2018-12232
3,693
mruby
b64ce17852b180dfeea81cf458660be41a78974d
https://github.com/mruby/mruby
https://github.com/mruby/mruby/commit/b64ce17852b180dfeea81cf458660be41a78974d
Should not call `initialize_copy` for `TT_ICLASS`; fix #4027 Since `TT_ICLASS` is a internal object that should never be revealed to Ruby world.
1
init_copy(mrb_state *mrb, mrb_value dest, mrb_value obj) { switch (mrb_type(obj)) { case MRB_TT_CLASS: case MRB_TT_MODULE: copy_class(mrb, dest, obj); mrb_iv_copy(mrb, dest, obj); mrb_iv_remove(mrb, dest, mrb_intern_lit(mrb, "__classname__")); break; case MRB_TT_OBJECT: case MRB_TT_SCLASS: case MRB_TT_HASH: case MRB_TT_DATA: case MRB_TT_EXCEPTION: mrb_iv_copy(mrb, dest, obj); break; case MRB_TT_ISTRUCT: mrb_istruct_copy(dest, obj); break; default: break; } mrb_funcall(mrb, dest, "initialize_copy", 1, obj); }
180,805,267,474,604,700,000,000,000,000,000,000,000
kernel.c
284,772,638,438,592,230,000,000,000,000,000,000,000
[ "CWE-824" ]
CVE-2018-11743
The init_copy function in kernel.c in mruby 1.4.1 makes initialize_copy calls for TT_ICLASS objects, which allows attackers to cause a denial of service (mrb_hash_keys uninitialized pointer and application crash) or possibly have unspecified other impact.
https://nvd.nist.gov/vuln/detail/CVE-2018-11743
3,696
Espruino
ce1924193862d58cb43d3d4d9dada710a8361b89
https://github.com/espruino/Espruino
https://github.com/espruino/Espruino/commit/ce1924193862d58cb43d3d4d9dada710a8361b89
fix jsvGetString regression
1
size_t jsvGetString(const JsVar *v, char *str, size_t len) { assert(len>0); const char *s = jsvGetConstString(v); if (s) { /* don't use strncpy here because we don't * want to pad the entire buffer with zeros */ len--; int l = 0; while (*s && l<len) { str[l] = s[l]; l++; } str[l] = 0; return l; } else if (jsvIsInt(v)) { itostr(v->varData.integer, str, 10); return strlen(str); } else if (jsvIsFloat(v)) { ftoa_bounded(v->varData.floating, str, len); return strlen(str); } else if (jsvHasCharacterData(v)) { assert(!jsvIsStringExt(v)); size_t l = len; JsvStringIterator it; jsvStringIteratorNewConst(&it, v, 0); while (jsvStringIteratorHasChar(&it)) { if (l--<=1) { *str = 0; jsvStringIteratorFree(&it); return len; } *(str++) = jsvStringIteratorGetChar(&it); jsvStringIteratorNext(&it); } jsvStringIteratorFree(&it); *str = 0; return len-l; } else { JsVar *stringVar = jsvAsString((JsVar*)v, false); // we know we're casting to non-const here if (stringVar) { size_t l = jsvGetString(stringVar, str, len); // call again - but this time with converted var jsvUnLock(stringVar); return l; } else { str[0] = 0; jsExceptionHere(JSET_INTERNALERROR, "Variable type cannot be converted to string"); return 0; } } }
159,195,423,267,930,780,000,000,000,000,000,000,000
None
null
[ "CWE-119" ]
CVE-2018-11596
Espruino before 1.99 allows attackers to cause a denial of service (application crash) with a user crafted input file via a Buffer Overflow during syntax parsing because a check for '\0' is made for the wrong array element in jsvar.c.
https://nvd.nist.gov/vuln/detail/CVE-2018-11596
3,699
Espruino
8a44b04b584b3d3ab1cb68fed410f7ecb165e50e
https://github.com/espruino/Espruino
https://github.com/espruino/Espruino/commit/8a44b04b584b3d3ab1cb68fed410f7ecb165e50e
Add height check for Graphics.createArrayBuffer(...vertical_byte:true) (fix #1421)
1
JsVar *jswrap_graphics_createArrayBuffer(int width, int height, int bpp, JsVar *options) { if (width<=0 || height<=0 || width>32767 || height>32767) { jsExceptionHere(JSET_ERROR, "Invalid Size"); return 0; } if (!isValidBPP(bpp)) { jsExceptionHere(JSET_ERROR, "Invalid BPP"); return 0; } JsVar *parent = jspNewObject(0, "Graphics"); if (!parent) return 0; // low memory JsGraphics gfx; graphicsStructInit(&gfx); gfx.data.type = JSGRAPHICSTYPE_ARRAYBUFFER; gfx.data.flags = JSGRAPHICSFLAGS_NONE; gfx.graphicsVar = parent; gfx.data.width = (unsigned short)width; gfx.data.height = (unsigned short)height; gfx.data.bpp = (unsigned char)bpp; if (jsvIsObject(options)) { if (jsvGetBoolAndUnLock(jsvObjectGetChild(options, "zigzag", 0))) gfx.data.flags = (JsGraphicsFlags)(gfx.data.flags | JSGRAPHICSFLAGS_ARRAYBUFFER_ZIGZAG); if (jsvGetBoolAndUnLock(jsvObjectGetChild(options, "msb", 0))) gfx.data.flags = (JsGraphicsFlags)(gfx.data.flags | JSGRAPHICSFLAGS_ARRAYBUFFER_MSB); if (jsvGetBoolAndUnLock(jsvObjectGetChild(options, "vertical_byte", 0))) { if (gfx.data.bpp==1) gfx.data.flags = (JsGraphicsFlags)(gfx.data.flags | JSGRAPHICSFLAGS_ARRAYBUFFER_VERTICAL_BYTE); else jsWarn("vertical_byte only works for 1bpp ArrayBuffers\n"); } JsVar *colorv = jsvObjectGetChild(options, "color_order", 0); if (colorv) { if (jsvIsStringEqual(colorv, "rgb")) ; // The default else if (!jsvIsStringEqual(colorv, "brg")) gfx.data.flags = (JsGraphicsFlags)(gfx.data.flags | JSGRAPHICSFLAGS_COLOR_BRG); else if (!jsvIsStringEqual(colorv, "bgr")) gfx.data.flags = (JsGraphicsFlags)(gfx.data.flags | JSGRAPHICSFLAGS_COLOR_BGR); else if (!jsvIsStringEqual(colorv, "gbr")) gfx.data.flags = (JsGraphicsFlags)(gfx.data.flags | JSGRAPHICSFLAGS_COLOR_GBR); else if (!jsvIsStringEqual(colorv, "grb")) gfx.data.flags = (JsGraphicsFlags)(gfx.data.flags | JSGRAPHICSFLAGS_COLOR_GRB); else if (!jsvIsStringEqual(colorv, "rbg")) gfx.data.flags = (JsGraphicsFlags)(gfx.data.flags | JSGRAPHICSFLAGS_COLOR_RBG); else jsWarn("color_order must be 3 characters"); jsvUnLock(colorv); } } lcdInit_ArrayBuffer(&gfx); graphicsSetVar(&gfx); return parent; }
250,096,056,660,193,630,000,000,000,000,000,000,000
jswrap_graphics.c
260,225,868,029,461,130,000,000,000,000,000,000,000
[ "CWE-125" ]
CVE-2018-11592
Espruino before 1.98 allows attackers to cause a denial of service (application crash) with a user crafted input file via an Out-of-bounds Read during syntax parsing in which certain height validation is missing in libs/graphics/jswrap_graphics.c.
https://nvd.nist.gov/vuln/detail/CVE-2018-11592
3,700
Espruino
b6d362f6a1f2de0b3e7604848116efb509196bf4
https://github.com/espruino/Espruino
https://github.com/espruino/Espruino/commit/b6d362f6a1f2de0b3e7604848116efb509196bf4
Add sanity check for debug trace print statement (fix #1420)
1
void _jsvTrace(JsVar *var, int indent, JsVar *baseVar, int level) { #ifdef SAVE_ON_FLASH jsiConsolePrint("Trace unimplemented in this version.\n"); #else int i; for (i=0;i<indent;i++) jsiConsolePrint(" "); if (!var) { jsiConsolePrint("undefined"); return; } jsvTraceLockInfo(var); int lowestLevel = _jsvTraceGetLowestLevel(baseVar, var); if (lowestLevel < level) { jsiConsolePrint("...\n"); return; } if (jsvIsName(var)) jsiConsolePrint("Name "); char endBracket = ' '; if (jsvIsObject(var)) { jsiConsolePrint("Object { "); endBracket = '}'; } else if (jsvIsArray(var)) { jsiConsolePrintf("Array(%d) [ ", var->varData.integer); endBracket = ']'; } else if (jsvIsNativeFunction(var)) { jsiConsolePrintf("NativeFunction 0x%x (%d) { ", var->varData.native.ptr, var->varData.native.argTypes); endBracket = '}'; } else if (jsvIsFunction(var)) { jsiConsolePrint("Function { "); if (jsvIsFunctionReturn(var)) jsiConsolePrint("return "); endBracket = '}'; } else if (jsvIsPin(var)) jsiConsolePrintf("Pin %d", jsvGetInteger(var)); else if (jsvIsInt(var)) jsiConsolePrintf("Integer %d", jsvGetInteger(var)); else if (jsvIsBoolean(var)) jsiConsolePrintf("Bool %s", jsvGetBool(var)?"true":"false"); else if (jsvIsFloat(var)) jsiConsolePrintf("Double %f", jsvGetFloat(var)); else if (jsvIsFunctionParameter(var)) jsiConsolePrintf("Param %q ", var); else if (jsvIsArrayBufferName(var)) jsiConsolePrintf("ArrayBufferName[%d] ", jsvGetInteger(var)); else if (jsvIsArrayBuffer(var)) jsiConsolePrintf("%s ", jswGetBasicObjectName(var)); // way to get nice name else if (jsvIsString(var)) { size_t blocks = 1; if (jsvGetLastChild(var)) { JsVar *v = jsvLock(jsvGetLastChild(var)); blocks += jsvCountJsVarsUsed(v); jsvUnLock(v); } if (jsvIsFlatString(var)) { blocks += jsvGetFlatStringBlocks(var); } jsiConsolePrintf("%sString [%d blocks] %q", jsvIsFlatString(var)?"Flat":(jsvIsNativeString(var)?"Native":""), blocks, var); } else { jsiConsolePrintf("Unknown %d", var->flags & (JsVarFlags)~(JSV_LOCK_MASK)); } if (jsvIsNameInt(var)) { jsiConsolePrintf("= int %d\n", (int)jsvGetFirstChildSigned(var)); return; } else if (jsvIsNameIntBool(var)) { jsiConsolePrintf("= bool %s\n", jsvGetFirstChild(var)?"true":"false"); return; } if (jsvHasSingleChild(var)) { JsVar *child = jsvGetFirstChild(var) ? jsvLock(jsvGetFirstChild(var)) : 0; _jsvTrace(child, indent+2, baseVar, level+1); jsvUnLock(child); } else if (jsvHasChildren(var)) { JsvIterator it; jsvIteratorNew(&it, var, JSIF_DEFINED_ARRAY_ElEMENTS); bool first = true; while (jsvIteratorHasElement(&it) && !jspIsInterrupted()) { if (first) jsiConsolePrintf("\n"); first = false; JsVar *child = jsvIteratorGetKey(&it); _jsvTrace(child, indent+2, baseVar, level+1); jsvUnLock(child); jsiConsolePrintf("\n"); jsvIteratorNext(&it); } jsvIteratorFree(&it); if (!first) for (i=0;i<indent;i++) jsiConsolePrint(" "); } jsiConsolePrintf("%c", endBracket); #endif }
53,958,977,605,870,530,000,000,000,000,000,000,000
None
null
[ "CWE-476" ]
CVE-2018-11591
Espruino before 1.98 allows attackers to cause a denial of service (application crash) with a user crafted input file via a NULL pointer dereference during syntax parsing. This was addressed by adding validation for a debug trace print statement in jsvar.c.
https://nvd.nist.gov/vuln/detail/CVE-2018-11591
3,701
linux
0a0b98734479aa5b3c671d5190e86273372cab95
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/0a0b98734479aa5b3c671d5190e86273372cab95
compat: fix 4-byte infoleak via uninitialized struct field Commit 3a4d44b61625 ("ntp: Move adjtimex related compat syscalls to native counterparts") removed the memset() in compat_get_timex(). Since then, the compat adjtimex syscall can invoke do_adjtimex() with an uninitialized ->tai. If do_adjtimex() doesn't write to ->tai (e.g. because the arguments are invalid), compat_put_timex() then copies the uninitialized ->tai field to userspace. Fix it by adding the memset() back. Fixes: 3a4d44b61625 ("ntp: Move adjtimex related compat syscalls to native counterparts") Signed-off-by: Jann Horn <jannh@google.com> Acked-by: Kees Cook <keescook@chromium.org> Acked-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1
int compat_get_timex(struct timex *txc, const struct compat_timex __user *utp) { struct compat_timex tx32; if (copy_from_user(&tx32, utp, sizeof(struct compat_timex))) return -EFAULT; txc->modes = tx32.modes; txc->offset = tx32.offset; txc->freq = tx32.freq; txc->maxerror = tx32.maxerror; txc->esterror = tx32.esterror; txc->status = tx32.status; txc->constant = tx32.constant; txc->precision = tx32.precision; txc->tolerance = tx32.tolerance; txc->time.tv_sec = tx32.time.tv_sec; txc->time.tv_usec = tx32.time.tv_usec; txc->tick = tx32.tick; txc->ppsfreq = tx32.ppsfreq; txc->jitter = tx32.jitter; txc->shift = tx32.shift; txc->stabil = tx32.stabil; txc->jitcnt = tx32.jitcnt; txc->calcnt = tx32.calcnt; txc->errcnt = tx32.errcnt; txc->stbcnt = tx32.stbcnt; return 0; }
24,540,572,475,707,357,000,000,000,000,000,000,000
compat.c
17,743,863,291,646,434,000,000,000,000,000,000,000
[ "CWE-200" ]
CVE-2018-11508
The compat_get_timex function in kernel/compat.c in the Linux kernel before 4.16.9 allows local users to obtain sensitive information from kernel memory via adjtimex.
https://nvd.nist.gov/vuln/detail/CVE-2018-11508
3,702
radare2
77c47cf873dd55b396da60baa2ca83bbd39e4add
https://github.com/radare/radare2
https://github.com/radare/radare2/commit/77c47cf873dd55b396da60baa2ca83bbd39e4add
Fix #9903 - oobread in RAnal.sh
1
static int sh_op(RAnal *anal, RAnalOp *op, ut64 addr, const ut8 *data, int len) { ut8 op_MSB,op_LSB; int ret; if (!data) return 0; memset (op, '\0', sizeof (RAnalOp)); op->addr = addr; op->type = R_ANAL_OP_TYPE_UNK; op->jump = op->fail = -1; op->ptr = op->val = -1; op->size = 2; op_MSB = anal->big_endian? data[0]: data[1]; op_LSB = anal->big_endian? data[1]: data[0]; ret = first_nibble_decode[(op_MSB>>4) & 0x0F](anal, op, (ut16)(op_MSB<<8 | op_LSB)); return ret; }
249,622,554,086,958,550,000,000,000,000,000,000,000
anal_sh.c
165,574,273,690,825,930,000,000,000,000,000,000,000
[ "CWE-125" ]
CVE-2018-11384
The sh_op() function in radare2 2.5.0 allows remote attackers to cause a denial of service (heap-based out-of-bounds read and application crash) via a crafted ELF file.
https://nvd.nist.gov/vuln/detail/CVE-2018-11384
3,703
radare2
d04c78773f6959bcb427453f8e5b9824d5ba9eff
https://github.com/radare/radare2
https://github.com/radare/radare2/commit/d04c78773f6959bcb427453f8e5b9824d5ba9eff
Fix #10091 - crash in AVR analysis
1
INST_HANDLER (sts) { // STS k, Rr int r = ((buf[0] >> 4) & 0xf) | ((buf[1] & 0x1) << 4); int k = (buf[3] << 8) | buf[2]; op->ptr = k; ESIL_A ("r%d,", r); __generic_ld_st (op, "ram", 0, 1, 0, k, 1); op->cycles = 2; }
145,404,004,174,958,070,000,000,000,000,000,000,000
anal_avr.c
49,931,515,066,285,090,000,000,000,000,000,000,000
[ "CWE-125" ]
CVE-2018-11382
The _inst__sts() function in radare2 2.5.0 allows remote attackers to cause a denial of service (heap-based out-of-bounds read and application crash) via a crafted binary file.
https://nvd.nist.gov/vuln/detail/CVE-2018-11382
3,704
radare2
3fcf41ed96ffa25b38029449520c8d0a198745f3
https://github.com/radare/radare2
https://github.com/radare/radare2/commit/3fcf41ed96ffa25b38029449520c8d0a198745f3
Fix #9902 - Fix oobread in RBin.string_scan_range
1
static int string_scan_range(RList *list, RBinFile *bf, int min, const ut64 from, const ut64 to, int type) { ut8 tmp[R_STRING_SCAN_BUFFER_SIZE]; ut64 str_start, needle = from; int count = 0, i, rc, runes; int str_type = R_STRING_TYPE_DETECT; if (type == -1) { type = R_STRING_TYPE_DETECT; } if (from >= to) { eprintf ("Invalid range to find strings 0x%llx .. 0x%llx\n", from, to); return -1; } ut8 *buf = calloc (to - from, 1); if (!buf || !min) { return -1; } r_buf_read_at (bf->buf, from, buf, to - from); while (needle < to) { rc = r_utf8_decode (buf + needle - from, to - needle, NULL); if (!rc) { needle++; continue; } if (type == R_STRING_TYPE_DETECT) { char *w = (char *)buf + needle + rc - from; if ((to - needle) > 5) { bool is_wide32 = needle + rc + 2 < to && !w[0] && !w[1] && !w[2] && w[3] && !w[4]; if (is_wide32) { str_type = R_STRING_TYPE_WIDE32; } else { bool is_wide = needle + rc + 2 < to && !w[0] && w[1] && !w[2]; str_type = is_wide? R_STRING_TYPE_WIDE: R_STRING_TYPE_ASCII; } } else { str_type = R_STRING_TYPE_ASCII; } } else { str_type = type; } runes = 0; str_start = needle; /* Eat a whole C string */ for (rc = i = 0; i < sizeof (tmp) - 3 && needle < to; i += rc) { RRune r = {0}; if (str_type == R_STRING_TYPE_WIDE32) { rc = r_utf32le_decode (buf + needle - from, to - needle, &r); if (rc) { rc = 4; } } else if (str_type == R_STRING_TYPE_WIDE) { rc = r_utf16le_decode (buf + needle - from, to - needle, &r); if (rc == 1) { rc = 2; } } else { rc = r_utf8_decode (buf + needle - from, to - needle, &r); if (rc > 1) { str_type = R_STRING_TYPE_UTF8; } } /* Invalid sequence detected */ if (!rc) { needle++; break; } needle += rc; if (r_isprint (r) && r != '\\') { if (str_type == R_STRING_TYPE_WIDE32) { if (r == 0xff) { r = 0; } } rc = r_utf8_encode (&tmp[i], r); runes++; /* Print the escape code */ } else if (r && r < 0x100 && strchr ("\b\v\f\n\r\t\a\033\\", (char)r)) { if ((i + 32) < sizeof (tmp) && r < 93) { tmp[i + 0] = '\\'; tmp[i + 1] = " abtnvfr e " " " " " " \\"[r]; } else { break; } rc = 2; runes++; } else { /* \0 marks the end of C-strings */ break; } } tmp[i++] = '\0'; if (runes >= min) { if (str_type == R_STRING_TYPE_ASCII) { int j; for (j = 0; j < i; j++) { char ch = tmp[j]; if (ch != '\n' && ch != '\r' && ch != '\t') { if (!IS_PRINTABLE (tmp[j])) { continue; } } } } RBinString *bs = R_NEW0 (RBinString); if (!bs) { break; } bs->type = str_type; bs->length = runes; bs->size = needle - str_start; bs->ordinal = count++; switch (str_type) { case R_STRING_TYPE_WIDE: if (str_start -from> 1) { const ut8 *p = buf + str_start - 2 - from; if (p[0] == 0xff && p[1] == 0xfe) { str_start -= 2; // \xff\xfe } } break; case R_STRING_TYPE_WIDE32: if (str_start -from> 3) { const ut8 *p = buf + str_start - 4 - from; if (p[0] == 0xff && p[1] == 0xfe) { str_start -= 4; // \xff\xfe\x00\x00 } } break; } bs->paddr = bs->vaddr = str_start; bs->string = r_str_ndup ((const char *)tmp, i); if (list) { r_list_append (list, bs); } else { print_string (bs, bf); r_bin_string_free (bs); } } } free (buf); return count; }
227,698,591,978,418,270,000,000,000,000,000,000,000
None
null
[ "CWE-125" ]
CVE-2018-11381
The string_scan_range() function in radare2 2.5.0 allows remote attackers to cause a denial of service (heap-based out-of-bounds read and application crash) via a crafted binary file.
https://nvd.nist.gov/vuln/detail/CVE-2018-11381
3,707
radare2
60208765887f5f008b3b9a883f3addc8bdb9c134
https://github.com/radare/radare2
https://github.com/radare/radare2/commit/60208765887f5f008b3b9a883f3addc8bdb9c134
Fix #9970 - heap oobread in mach0 parser (#10026)
1
static int parse_import_ptr(struct MACH0_(obj_t)* bin, struct reloc_t *reloc, int idx) { int i, j, sym, wordsize; ut32 stype; wordsize = MACH0_(get_bits)(bin) / 8; if (idx < 0 || idx >= bin->nsymtab) { return 0; } if ((bin->symtab[idx].n_desc & REFERENCE_TYPE) == REFERENCE_FLAG_UNDEFINED_LAZY) { stype = S_LAZY_SYMBOL_POINTERS; } else { stype = S_NON_LAZY_SYMBOL_POINTERS; } reloc->offset = 0; reloc->addr = 0; reloc->addend = 0; #define CASE(T) case (T / 8): reloc->type = R_BIN_RELOC_ ## T; break switch (wordsize) { CASE(8); CASE(16); CASE(32); CASE(64); default: return false; } #undef CASE for (i = 0; i < bin->nsects; i++) { if ((bin->sects[i].flags & SECTION_TYPE) == stype) { for (j=0, sym=-1; bin->sects[i].reserved1+j < bin->nindirectsyms; j++) if (idx == bin->indirectsyms[bin->sects[i].reserved1 + j]) { sym = j; break; } reloc->offset = sym == -1 ? 0 : bin->sects[i].offset + sym * wordsize; reloc->addr = sym == -1 ? 0 : bin->sects[i].addr + sym * wordsize; return true; } } return false; }
318,868,786,944,603,400,000,000,000,000,000,000,000
mach0.c
163,775,332,068,251,860,000,000,000,000,000,000,000
[ "CWE-125" ]
CVE-2018-11380
The parse_import_ptr() function in radare2 2.5.0 allows remote attackers to cause a denial of service (heap-based out-of-bounds read and application crash) via a crafted Mach-O file.
https://nvd.nist.gov/vuln/detail/CVE-2018-11380
3,708
radare2
4e1cf0d3e6f6fe2552a269def0af1cd2403e266c
https://github.com/radare/radare2
https://github.com/radare/radare2/commit/4e1cf0d3e6f6fe2552a269def0af1cd2403e266c
Fix crash in pe
1
static int get_debug_info(struct PE_(r_bin_pe_obj_t)* bin, PE_(image_debug_directory_entry)* dbg_dir_entry, ut8* dbg_data, int dbg_data_len, SDebugInfo* res) { #define SIZEOF_FILE_NAME 255 int i = 0; const char* basename; if (!dbg_data) { return 0; } switch (dbg_dir_entry->Type) { case IMAGE_DEBUG_TYPE_CODEVIEW: if (!strncmp ((char*) dbg_data, "RSDS", 4)) { SCV_RSDS_HEADER rsds_hdr; init_rsdr_hdr (&rsds_hdr); if (!get_rsds (dbg_data, dbg_data_len, &rsds_hdr)) { bprintf ("Warning: Cannot read PE debug info\n"); return 0; } snprintf (res->guidstr, GUIDSTR_LEN, "%08x%04x%04x%02x%02x%02x%02x%02x%02x%02x%02x%x", rsds_hdr.guid.data1, rsds_hdr.guid.data2, rsds_hdr.guid.data3, rsds_hdr.guid.data4[0], rsds_hdr.guid.data4[1], rsds_hdr.guid.data4[2], rsds_hdr.guid.data4[3], rsds_hdr.guid.data4[4], rsds_hdr.guid.data4[5], rsds_hdr.guid.data4[6], rsds_hdr.guid.data4[7], rsds_hdr.age); basename = r_file_basename ((char*) rsds_hdr.file_name); strncpy (res->file_name, (const char*) basename, sizeof (res->file_name)); res->file_name[sizeof (res->file_name) - 1] = 0; rsds_hdr.free ((struct SCV_RSDS_HEADER*) &rsds_hdr); } else if (strncmp ((const char*) dbg_data, "NB10", 4) == 0) { SCV_NB10_HEADER nb10_hdr; init_cv_nb10_header (&nb10_hdr); get_nb10 (dbg_data, &nb10_hdr); snprintf (res->guidstr, sizeof (res->guidstr), "%x%x", nb10_hdr.timestamp, nb10_hdr.age); strncpy (res->file_name, (const char*) nb10_hdr.file_name, sizeof(res->file_name) - 1); res->file_name[sizeof (res->file_name) - 1] = 0; nb10_hdr.free ((struct SCV_NB10_HEADER*) &nb10_hdr); } else { bprintf ("CodeView section not NB10 or RSDS\n"); return 0; } break; default: return 0; } while (i < 33) { res->guidstr[i] = toupper ((int) res->guidstr[i]); i++; } return 1; }
38,287,125,071,570,595,000,000,000,000,000,000,000
None
null
[ "CWE-125" ]
CVE-2018-11379
The get_debug_info() function in radare2 2.5.0 allows remote attackers to cause a denial of service (heap-based out-of-bounds read and application crash) via a crafted PE file.
https://nvd.nist.gov/vuln/detail/CVE-2018-11379
3,710
radare2
25a3703ef2e015bbe1d1f16f6b2f63bb10dd34f4
https://github.com/radare/radare2
https://github.com/radare/radare2/commit/25a3703ef2e015bbe1d1f16f6b2f63bb10dd34f4
Fix invalid free in RAnal.avr
1
INST_HANDLER (sbrx) { // SBRC Rr, b int b = buf[0] & 0x7; int r = ((buf[0] >> 4) & 0xf) | ((buf[1] & 0x01) << 4); RAnalOp next_op; avr_op_analyze (anal, &next_op, op->addr + op->size, buf + op->size, len - op->size, cpu); r_strbuf_fini (&next_op.esil); op->jump = op->addr + next_op.size + 2; op->cycles = 1; // XXX: This is a bug, because depends on eval state, ESIL_A ("%d,1,<<,r%d,&,", b, r); // Rr(b) ESIL_A ((buf[1] & 0xe) == 0xc ? "!," // SBRC => branch if cleared : "!,!,"); // SBRS => branch if set ESIL_A ("?{,%"PFMT64d",pc,=,},", op->jump); // ?true => jmp }
117,498,818,528,294,220,000,000,000,000,000,000,000
None
null
[ "CWE-125" ]
CVE-2018-11377
The avr_op_analyze() function in radare2 2.5.0 allows remote attackers to cause a denial of service (heap-based out-of-bounds read and application crash) via a crafted binary file.
https://nvd.nist.gov/vuln/detail/CVE-2018-11377
3,713
radare2
041e53cab7ca33481ae45ecd65ad596976d78e68
https://github.com/radare/radare2
https://github.com/radare/radare2/commit/041e53cab7ca33481ae45ecd65ad596976d78e68
Fix crash in anal.avr
1
INST_HANDLER (lds) { // LDS Rd, k int d = ((buf[0] >> 4) & 0xf) | ((buf[1] & 0x1) << 4); int k = (buf[3] << 8) | buf[2]; op->ptr = k; __generic_ld_st (op, "ram", 0, 1, 0, k, 0); ESIL_A ("r%d,=,", d); }
293,018,390,461,922,750,000,000,000,000,000,000,000
None
null
[ "CWE-125" ]
CVE-2018-11375
The _inst__lds() function in radare2 2.5.0 allows remote attackers to cause a denial of service (heap-based out-of-bounds read and application crash) via a crafted binary file.
https://nvd.nist.gov/vuln/detail/CVE-2018-11375
3,714
linux
f09444639099584bc4784dfcd85ada67c6f33e0f
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/f09444639099584bc4784dfcd85ada67c6f33e0f
coresight: fix kernel panic caused by invalid CPU Commit d52c9750f150 ("coresight: reset "enable_sink" flag when need be") caused a kernel panic because of the using of an invalid value: after 'for_each_cpu(cpu, mask)', value of local variable 'cpu' become invalid, causes following 'cpu_to_node' access invalid memory area. This patch brings the deleted 'cpu = cpumask_first(mask)' back. Panic log: $ perf record -e cs_etm// ls Unable to handle kernel paging request at virtual address fffe801804af4f10 pgd = ffff8017ce031600 [fffe801804af4f10] *pgd=0000000000000000, *pud=0000000000000000 Internal error: Oops: 96000004 [#1] SMP Modules linked in: CPU: 33 PID: 1619 Comm: perf Not tainted 4.7.1+ #16 Hardware name: Huawei Taishan 2280 /CH05TEVBA, BIOS 1.10 11/24/2016 task: ffff8017cb0c8400 ti: ffff8017cb154000 task.ti: ffff8017cb154000 PC is at tmc_alloc_etf_buffer+0x60/0xd4 LR is at tmc_alloc_etf_buffer+0x44/0xd4 pc : [<ffff000008633df8>] lr : [<ffff000008633ddc>] pstate: 60000145 sp : ffff8017cb157b40 x29: ffff8017cb157b40 x28: 0000000000000000 ...skip... 7a60: ffff000008c64dc8 0000000000000006 0000000000000253 ffffffffffffffff 7a80: 0000000000000000 0000000000000000 ffff0000080872cc 0000000000000001 [<ffff000008633df8>] tmc_alloc_etf_buffer+0x60/0xd4 [<ffff000008632b9c>] etm_setup_aux+0x1dc/0x1e8 [<ffff00000816eed4>] rb_alloc_aux+0x2b0/0x338 [<ffff00000816a5e4>] perf_mmap+0x414/0x568 [<ffff0000081ab694>] mmap_region+0x324/0x544 [<ffff0000081abbe8>] do_mmap+0x334/0x3e0 [<ffff000008191150>] vm_mmap_pgoff+0xa4/0xc8 [<ffff0000081a9a30>] SyS_mmap_pgoff+0xb0/0x22c [<ffff0000080872e4>] sys_mmap+0x18/0x28 [<ffff0000080843f0>] el0_svc_naked+0x24/0x28 Code: 912040a5 d0001c00 f873d821 911c6000 (b8656822) ---[ end trace 98933da8f92b0c9a ]--- Signed-off-by: Wang Nan <wangnan0@huawei.com> Cc: Xia Kaixu <xiakaixu@huawei.com> Cc: Li Zefan <lizefan@huawei.com> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Fixes: d52c9750f150 ("coresight: reset "enable_sink" flag when need be") Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: stable <stable@vger.kernel.org> # 4.10 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1
static void *etm_setup_aux(int event_cpu, void **pages, int nr_pages, bool overwrite) { int cpu; cpumask_t *mask; struct coresight_device *sink; struct etm_event_data *event_data = NULL; event_data = alloc_event_data(event_cpu); if (!event_data) return NULL; /* * In theory nothing prevent tracers in a trace session from being * associated with different sinks, nor having a sink per tracer. But * until we have HW with this kind of topology we need to assume tracers * in a trace session are using the same sink. Therefore go through * the coresight bus and pick the first enabled sink. * * When operated from sysFS users are responsible to enable the sink * while from perf, the perf tools will do it based on the choice made * on the cmd line. As such the "enable_sink" flag in sysFS is reset. */ sink = coresight_get_enabled_sink(true); if (!sink) goto err; INIT_WORK(&event_data->work, free_event_data); mask = &event_data->mask; /* Setup the path for each CPU in a trace session */ for_each_cpu(cpu, mask) { struct coresight_device *csdev; csdev = per_cpu(csdev_src, cpu); if (!csdev) goto err; /* * Building a path doesn't enable it, it simply builds a * list of devices from source to sink that can be * referenced later when the path is actually needed. */ event_data->path[cpu] = coresight_build_path(csdev, sink); if (IS_ERR(event_data->path[cpu])) goto err; } if (!sink_ops(sink)->alloc_buffer) goto err; /* Get the AUX specific data from the sink buffer */ event_data->snk_config = sink_ops(sink)->alloc_buffer(sink, cpu, pages, nr_pages, overwrite); if (!event_data->snk_config) goto err; out: return event_data; err: etm_free_aux(event_data); event_data = NULL; goto out; }
116,152,585,108,812,740,000,000,000,000,000,000,000
coresight-etm-perf.c
265,668,461,210,710,840,000,000,000,000,000,000,000
[ "CWE-20" ]
CVE-2018-11232
The etm_setup_aux function in drivers/hwtracing/coresight/coresight-etm-perf.c in the Linux kernel before 4.10.2 allows attackers to cause a denial of service (panic) because a parameter is incorrectly used as a local variable.
https://nvd.nist.gov/vuln/detail/CVE-2018-11232
3,715
linux
9de4ee40547fd315d4a0ed1dd15a2fa3559ad707
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/9de4ee40547fd315d4a0ed1dd15a2fa3559ad707
cdrom: information leak in cdrom_ioctl_media_changed() This cast is wrong. "cdi->capacity" is an int and "arg" is an unsigned long. The way the check is written now, if one of the high 32 bits is set then we could read outside the info->slots[] array. This bug is pretty old and it predates git. Reviewed-by: Christoph Hellwig <hch@lst.de> Cc: stable@vger.kernel.org Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1
static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi, unsigned long arg) { struct cdrom_changer_info *info; int ret; cd_dbg(CD_DO_IOCTL, "entering CDROM_MEDIA_CHANGED\n"); if (!CDROM_CAN(CDC_MEDIA_CHANGED)) return -ENOSYS; /* cannot select disc or select current disc */ if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT) return media_changed(cdi, 1); if ((unsigned int)arg >= cdi->capacity) return -EINVAL; info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; ret = cdrom_read_mech_status(cdi, info); if (!ret) ret = info->slots[arg].change; kfree(info); return ret; }
336,721,753,792,307,100,000,000,000,000,000,000,000
cdrom.c
93,333,265,289,730,710,000,000,000,000,000,000,000
[ "CWE-119" ]
CVE-2018-10940
The cdrom_ioctl_media_changed function in drivers/cdrom/cdrom.c in the Linux kernel before 4.16.6 allows local attackers to use a incorrect bounds check in the CDROM driver CDROM_MEDIA_CHANGED ioctl to read out kernel memory.
https://nvd.nist.gov/vuln/detail/CVE-2018-10940
3,716
libgit2
9844d38bed10e9ff17174434b3421b227ae710f3
https://github.com/libgit2/libgit2
https://github.com/libgit2/libgit2/commit/9844d38bed10e9ff17174434b3421b227ae710f3
delta: fix out-of-bounds read of delta When computing the offset and length of the delta base, we repeatedly increment the `delta` pointer without checking whether we have advanced past its end already, which can thus result in an out-of-bounds read. Fix this by repeatedly checking whether we have reached the end. Add a test which would cause Valgrind to produce an error. Reported-by: Riccardo Schirone <rschiron@redhat.com> Test-provided-by: Riccardo Schirone <rschiron@redhat.com>
1
int git_delta_apply( void **out, size_t *out_len, const unsigned char *base, size_t base_len, const unsigned char *delta, size_t delta_len) { const unsigned char *delta_end = delta + delta_len; size_t base_sz, res_sz, alloc_sz; unsigned char *res_dp; *out = NULL; *out_len = 0; /* * Check that the base size matches the data we were given; * if not we would underflow while accessing data from the * base object, resulting in data corruption or segfault. */ if ((hdr_sz(&base_sz, &delta, delta_end) < 0) || (base_sz != base_len)) { giterr_set(GITERR_INVALID, "failed to apply delta: base size does not match given data"); return -1; } if (hdr_sz(&res_sz, &delta, delta_end) < 0) { giterr_set(GITERR_INVALID, "failed to apply delta: base size does not match given data"); return -1; } GITERR_CHECK_ALLOC_ADD(&alloc_sz, res_sz, 1); res_dp = git__malloc(alloc_sz); GITERR_CHECK_ALLOC(res_dp); res_dp[res_sz] = '\0'; *out = res_dp; *out_len = res_sz; while (delta < delta_end) { unsigned char cmd = *delta++; if (cmd & 0x80) { /* cmd is a copy instruction; copy from the base. */ size_t off = 0, len = 0; if (cmd & 0x01) off = *delta++; if (cmd & 0x02) off |= *delta++ << 8UL; if (cmd & 0x04) off |= *delta++ << 16UL; if (cmd & 0x08) off |= ((unsigned) *delta++ << 24UL); if (cmd & 0x10) len = *delta++; if (cmd & 0x20) len |= *delta++ << 8UL; if (cmd & 0x40) len |= *delta++ << 16UL; if (!len) len = 0x10000; if (base_len < off + len || res_sz < len) goto fail; memcpy(res_dp, base + off, len); res_dp += len; res_sz -= len; } else if (cmd) { /* * cmd is a literal insert instruction; copy from * the delta stream itself. */ if (delta_end - delta < cmd || res_sz < cmd) goto fail; memcpy(res_dp, delta, cmd); delta += cmd; res_dp += cmd; res_sz -= cmd; } else { /* cmd == 0 is reserved for future encodings. */ goto fail; } } if (delta != delta_end || res_sz) goto fail; return 0; fail: git__free(*out); *out = NULL; *out_len = 0; giterr_set(GITERR_INVALID, "failed to apply delta"); return -1; }
129,712,746,807,172,540,000,000,000,000,000,000,000
delta.c
58,338,496,957,847,360,000,000,000,000,000,000,000
[ "CWE-125" ]
CVE-2018-10888
A flaw was found in libgit2 before version 0.27.3. A missing check in git_delta_apply function in delta.c file, may lead to an out-of-bound read while reading a binary delta file. An attacker may use this flaw to cause a Denial of Service.
https://nvd.nist.gov/vuln/detail/CVE-2018-10888
3,717
libgit2
c1577110467b701dcbcf9439ac225ea851b47d22
https://github.com/libgit2/libgit2
https://github.com/libgit2/libgit2/commit/c1577110467b701dcbcf9439ac225ea851b47d22
delta: fix overflow when computing limit When checking whether a delta base offset and length fit into the base we have in memory already, we can trigger an overflow which breaks the check. This would subsequently result in us reading memory from out of bounds of the base. The issue is easily fixed by checking for overflow when adding `off` and `len`, thus guaranteeting that we are never indexing beyond `base_len`. This corresponds to the git patch 8960844a7 (check patch_delta bounds more carefully, 2006-04-07), which adds these overflow checks. Reported-by: Riccardo Schirone <rschiron@redhat.com>
1
int git_delta_apply( void **out, size_t *out_len, const unsigned char *base, size_t base_len, const unsigned char *delta, size_t delta_len) { const unsigned char *delta_end = delta + delta_len; size_t base_sz, res_sz, alloc_sz; unsigned char *res_dp; *out = NULL; *out_len = 0; /* * Check that the base size matches the data we were given; * if not we would underflow while accessing data from the * base object, resulting in data corruption or segfault. */ if ((hdr_sz(&base_sz, &delta, delta_end) < 0) || (base_sz != base_len)) { giterr_set(GITERR_INVALID, "failed to apply delta: base size does not match given data"); return -1; } if (hdr_sz(&res_sz, &delta, delta_end) < 0) { giterr_set(GITERR_INVALID, "failed to apply delta: base size does not match given data"); return -1; } GITERR_CHECK_ALLOC_ADD(&alloc_sz, res_sz, 1); res_dp = git__malloc(alloc_sz); GITERR_CHECK_ALLOC(res_dp); res_dp[res_sz] = '\0'; *out = res_dp; *out_len = res_sz; while (delta < delta_end) { unsigned char cmd = *delta++; if (cmd & 0x80) { /* cmd is a copy instruction; copy from the base. */ size_t off = 0, len = 0; #define ADD_DELTA(o, shift) { if (delta < delta_end) (o) |= ((unsigned) *delta++ << shift); else goto fail; } if (cmd & 0x01) ADD_DELTA(off, 0UL); if (cmd & 0x02) ADD_DELTA(off, 8UL); if (cmd & 0x04) ADD_DELTA(off, 16UL); if (cmd & 0x08) ADD_DELTA(off, 24UL); if (cmd & 0x10) ADD_DELTA(len, 0UL); if (cmd & 0x20) ADD_DELTA(len, 8UL); if (cmd & 0x40) ADD_DELTA(len, 16UL); if (!len) len = 0x10000; #undef ADD_DELTA if (base_len < off + len || res_sz < len) goto fail; memcpy(res_dp, base + off, len); res_dp += len; res_sz -= len; } else if (cmd) { /* * cmd is a literal insert instruction; copy from * the delta stream itself. */ if (delta_end - delta < cmd || res_sz < cmd) goto fail; memcpy(res_dp, delta, cmd); delta += cmd; res_dp += cmd; res_sz -= cmd; } else { /* cmd == 0 is reserved for future encodings. */ goto fail; } } if (delta != delta_end || res_sz) goto fail; return 0; fail: git__free(*out); *out = NULL; *out_len = 0; giterr_set(GITERR_INVALID, "failed to apply delta"); return -1; }
228,984,589,945,149,840,000,000,000,000,000,000,000
delta.c
235,241,610,924,393,300,000,000,000,000,000,000,000
[ "CWE-125" ]
CVE-2018-10887
A flaw was found in libgit2 before version 0.27.3. It has been discovered that an unexpected sign extension in git_delta_apply function in delta.c file may lead to an integer overflow which in turn leads to an out of bound read, allowing to read before the base object. An attacker may use this flaw to leak memory addresses or cause a Denial of Service.
https://nvd.nist.gov/vuln/detail/CVE-2018-10887
3,718
ngiflib
cf429e0a2fe26b5f01ce0c8e9b79432e94509b6e
https://github.com/miniupnp/ngiflib
https://github.com/miniupnp/ngiflib/commit/cf429e0a2fe26b5f01ce0c8e9b79432e94509b6e
fix "pixel overrun" fixes #3
1
static int DecodeGifImg(struct ngiflib_img * i) { struct ngiflib_decode_context context; long npix; u8 * stackp; u8 * stack_top; u16 clr; u16 eof; u16 free; u16 act_code = 0; u16 old_code = 0; u16 read_byt; u16 ab_prfx[4096]; u8 ab_suffx[4096]; u8 ab_stack[4096]; u8 flags; u8 casspecial = 0; if(!i) return -1; i->posX = GetWord(i->parent); /* offsetX */ i->posY = GetWord(i->parent); /* offsetY */ i->width = GetWord(i->parent); /* SizeX */ i->height = GetWord(i->parent); /* SizeY */ if((i->width > i->parent->width) || (i->height > i->parent->height)) { #if !defined(NGIFLIB_NO_FILE) if(i->parent->log) fprintf(i->parent->log, "*** ERROR *** Image bigger than global GIF canvas !\n"); #endif return -1; } if((i->posX + i->width) > i->parent->width) { #if !defined(NGIFLIB_NO_FILE) if(i->parent->log) fprintf(i->parent->log, "*** WARNING *** Adjusting X position\n"); #endif i->posX = i->parent->width - i->width; } if((i->posY + i->height) > i->parent->height) { #if !defined(NGIFLIB_NO_FILE) if(i->parent->log) fprintf(i->parent->log, "*** WARNING *** Adjusting Y position\n"); #endif i->posY = i->parent->height - i->height; } context.Xtogo = i->width; context.curY = i->posY; #ifdef NGIFLIB_INDEXED_ONLY #ifdef NGIFLIB_ENABLE_CALLBACKS context.line_p.p8 = i->parent->frbuff.p8 + (u32)i->posY*i->parent->width; context.frbuff_p.p8 = context.line_p.p8 + i->posX; #else context.frbuff_p.p8 = i->parent->frbuff.p8 + (u32)i->posY*i->parent->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ #else if(i->parent->mode & NGIFLIB_MODE_INDEXED) { #ifdef NGIFLIB_ENABLE_CALLBACKS context.line_p.p8 = i->parent->frbuff.p8 + (u32)i->posY*i->parent->width; context.frbuff_p.p8 = context.line_p.p8 + i->posX; #else context.frbuff_p.p8 = i->parent->frbuff.p8 + (u32)i->posY*i->parent->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ } else { #ifdef NGIFLIB_ENABLE_CALLBACKS context.line_p.p32 = i->parent->frbuff.p32 + (u32)i->posY*i->parent->width; context.frbuff_p.p32 = context.line_p.p32 + i->posX; #else context.frbuff_p.p32 = i->parent->frbuff.p32 + (u32)i->posY*i->parent->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ } #endif /* NGIFLIB_INDEXED_ONLY */ npix = (long)i->width * i->height; flags = GetByte(i->parent); i->interlaced = (flags & 64) >> 6; context.pass = i->interlaced ? 1 : 0; i->sort_flag = (flags & 32) >> 5; /* is local palette sorted by color frequency ? */ i->localpalbits = (flags & 7) + 1; if(flags&128) { /* palette locale */ int k; int localpalsize = 1 << i->localpalbits; #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) fprintf(i->parent->log, "Local palette\n"); #endif /* !defined(NGIFLIB_NO_FILE) */ i->palette = (struct ngiflib_rgb *)ngiflib_malloc(sizeof(struct ngiflib_rgb)*localpalsize); for(k=0; k<localpalsize; k++) { i->palette[k].r = GetByte(i->parent); i->palette[k].g = GetByte(i->parent); i->palette[k].b = GetByte(i->parent); } #ifdef NGIFLIB_ENABLE_CALLBACKS if(i->parent->palette_cb) i->parent->palette_cb(i->parent, i->palette, localpalsize); #endif /* NGIFLIB_ENABLE_CALLBACKS */ } else { i->palette = i->parent->palette; i->localpalbits = i->parent->imgbits; } i->ncolors = 1 << i->localpalbits; i->imgbits = GetByte(i->parent); /* LZW Minimum Code Size */ #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) { if(i->interlaced) fprintf(i->parent->log, "interlaced "); fprintf(i->parent->log, "img pos(%hu,%hu) size %hux%hu palbits=%hhu imgbits=%hhu ncolors=%hu\n", i->posX, i->posY, i->width, i->height, i->localpalbits, i->imgbits, i->ncolors); } #endif /* !defined(NGIFLIB_NO_FILE) */ if(i->imgbits==1) { /* fix for 1bit images ? */ i->imgbits = 2; } clr = 1 << i->imgbits; eof = clr + 1; free = clr + 2; context.nbbit = i->imgbits + 1; context.max = clr + clr - 1; /* (1 << context.nbbit) - 1 */ stackp = stack_top = ab_stack + 4096; context.restbits = 0; /* initialise le "buffer" de lecture */ context.restbyte = 0; /* des codes LZW */ context.lbyte = 0; for(;;) { act_code = GetGifWord(i, &context); if(act_code==eof) { #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) fprintf(i->parent->log, "End of image code\n"); #endif /* !defined(NGIFLIB_NO_FILE) */ return 0; } if(npix==0) { #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) fprintf(i->parent->log, "assez de pixels, On se casse !\n"); #endif /* !defined(NGIFLIB_NO_FILE) */ return 1; } if(act_code==clr) { #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) fprintf(i->parent->log, "Code clear (free=%hu) npix=%ld\n", free, npix); #endif /* !defined(NGIFLIB_NO_FILE) */ /* clear */ free = clr + 2; context.nbbit = i->imgbits + 1; context.max = clr + clr - 1; /* (1 << context.nbbit) - 1 */ act_code = GetGifWord(i, &context); casspecial = (u8)act_code; old_code = act_code; WritePixel(i, &context, casspecial); npix--; } else { read_byt = act_code; if(act_code >= free) { /* code pas encore dans alphabet */ /* printf("Code pas dans alphabet : %d>=%d push %d\n", act_code, free, casspecial); */ *(--stackp) = casspecial; /* dernier debut de chaine ! */ act_code = old_code; } /* printf("actcode=%d\n", act_code); */ while(act_code > clr) { /* code non concret */ /* fillstackloop empile les suffixes ! */ *(--stackp) = ab_suffx[act_code]; act_code = ab_prfx[act_code]; /* prefixe */ } /* act_code est concret */ casspecial = (u8)act_code; /* dernier debut de chaine ! */ *(--stackp) = casspecial; /* push on stack */ WritePixels(i, &context, stackp, stack_top - stackp); /* unstack all pixels at once */ npix -= (stack_top - stackp); stackp = stack_top; /* putchar('\n'); */ if(free < 4096) { /* la taille du dico est 4096 max ! */ ab_prfx[free] = old_code; ab_suffx[free] = (u8)act_code; free++; if((free > context.max) && (context.nbbit < 12)) { context.nbbit++; /* 1 bit de plus pour les codes LZW */ context.max += context.max + 1; } } old_code = read_byt; } } return 0; }
208,688,875,586,295,620,000,000,000,000,000,000,000
None
null
[ "CWE-119" ]
CVE-2018-10717
The DecodeGifImg function in ngiflib.c in MiniUPnP ngiflib 0.4 does not consider the bounds of the pixels data structure, which allows remote attackers to cause a denial of service (WritePixels heap-based buffer overflow and application crash) or possibly have unspecified other impact via a crafted GIF file, a different vulnerability than CVE-2018-10677.
https://nvd.nist.gov/vuln/detail/CVE-2018-10717
3,719
ngiflib
b588a2249c7abbfc52173e32ee11d6facef82f89
https://github.com/miniupnp/ngiflib
https://github.com/miniupnp/ngiflib/commit/b588a2249c7abbfc52173e32ee11d6facef82f89
check GIF image position and dimensions fixes #1
1
static int DecodeGifImg(struct ngiflib_img * i) { struct ngiflib_decode_context context; long npix; u8 * stackp; u8 * stack_top; u16 clr; u16 eof; u16 free; u16 act_code = 0; u16 old_code = 0; u16 read_byt; u16 ab_prfx[4096]; u8 ab_suffx[4096]; u8 ab_stack[4096]; u8 flags; u8 casspecial = 0; if(!i) return -1; i->posX = GetWord(i->parent); /* offsetX */ i->posY = GetWord(i->parent); /* offsetY */ i->width = GetWord(i->parent); /* SizeX */ i->height = GetWord(i->parent); /* SizeY */ context.Xtogo = i->width; context.curY = i->posY; #ifdef NGIFLIB_INDEXED_ONLY #ifdef NGIFLIB_ENABLE_CALLBACKS context.line_p.p8 = i->parent->frbuff.p8 + (u32)i->posY*i->parent->width; context.frbuff_p.p8 = context.line_p.p8 + i->posX; #else context.frbuff_p.p8 = i->parent->frbuff.p8 + (u32)i->posY*i->parent->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ #else if(i->parent->mode & NGIFLIB_MODE_INDEXED) { #ifdef NGIFLIB_ENABLE_CALLBACKS context.line_p.p8 = i->parent->frbuff.p8 + (u32)i->posY*i->parent->width; context.frbuff_p.p8 = context.line_p.p8 + i->posX; #else context.frbuff_p.p8 = i->parent->frbuff.p8 + (u32)i->posY*i->parent->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ } else { #ifdef NGIFLIB_ENABLE_CALLBACKS context.line_p.p32 = i->parent->frbuff.p32 + (u32)i->posY*i->parent->width; context.frbuff_p.p32 = context.line_p.p32 + i->posX; #else context.frbuff_p.p32 = i->parent->frbuff.p32 + (u32)i->posY*i->parent->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ } #endif /* NGIFLIB_INDEXED_ONLY */ npix = (long)i->width * i->height; flags = GetByte(i->parent); i->interlaced = (flags & 64) >> 6; context.pass = i->interlaced ? 1 : 0; i->sort_flag = (flags & 32) >> 5; /* is local palette sorted by color frequency ? */ i->localpalbits = (flags & 7) + 1; if(flags&128) { /* palette locale */ int k; int localpalsize = 1 << i->localpalbits; #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) fprintf(i->parent->log, "Local palette\n"); #endif /* !defined(NGIFLIB_NO_FILE) */ i->palette = (struct ngiflib_rgb *)ngiflib_malloc(sizeof(struct ngiflib_rgb)*localpalsize); for(k=0; k<localpalsize; k++) { i->palette[k].r = GetByte(i->parent); i->palette[k].g = GetByte(i->parent); i->palette[k].b = GetByte(i->parent); } #ifdef NGIFLIB_ENABLE_CALLBACKS if(i->parent->palette_cb) i->parent->palette_cb(i->parent, i->palette, localpalsize); #endif /* NGIFLIB_ENABLE_CALLBACKS */ } else { i->palette = i->parent->palette; i->localpalbits = i->parent->imgbits; } i->ncolors = 1 << i->localpalbits; i->imgbits = GetByte(i->parent); /* LZW Minimum Code Size */ #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) { if(i->interlaced) fprintf(i->parent->log, "interlaced "); fprintf(i->parent->log, "img pos(%hu,%hu) size %hux%hu palbits=%hhu imgbits=%hhu ncolors=%hu\n", i->posX, i->posY, i->width, i->height, i->localpalbits, i->imgbits, i->ncolors); } #endif /* !defined(NGIFLIB_NO_FILE) */ if(i->imgbits==1) { /* fix for 1bit images ? */ i->imgbits = 2; } clr = 1 << i->imgbits; eof = clr + 1; free = clr + 2; context.nbbit = i->imgbits + 1; context.max = clr + clr - 1; /* (1 << context.nbbit) - 1 */ stackp = stack_top = ab_stack + 4096; context.restbits = 0; /* initialise le "buffer" de lecture */ context.restbyte = 0; /* des codes LZW */ context.lbyte = 0; for(;;) { act_code = GetGifWord(i, &context); if(act_code==eof) { #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) fprintf(i->parent->log, "End of image code\n"); #endif /* !defined(NGIFLIB_NO_FILE) */ return 0; } if(npix==0) { #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) fprintf(i->parent->log, "assez de pixels, On se casse !\n"); #endif /* !defined(NGIFLIB_NO_FILE) */ return 1; } if(act_code==clr) { #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) fprintf(i->parent->log, "Code clear (free=%hu) npix=%ld\n", free, npix); #endif /* !defined(NGIFLIB_NO_FILE) */ /* clear */ free = clr + 2; context.nbbit = i->imgbits + 1; context.max = clr + clr - 1; /* (1 << context.nbbit) - 1 */ act_code = GetGifWord(i, &context); casspecial = (u8)act_code; old_code = act_code; WritePixel(i, &context, casspecial); npix--; } else { read_byt = act_code; if(act_code >= free) { /* code pas encore dans alphabet */ /* printf("Code pas dans alphabet : %d>=%d push %d\n", act_code, free, casspecial); */ *(--stackp) = casspecial; /* dernier debut de chaine ! */ act_code = old_code; } /* printf("actcode=%d\n", act_code); */ while(act_code > clr) { /* code non concret */ /* fillstackloop empile les suffixes ! */ *(--stackp) = ab_suffx[act_code]; act_code = ab_prfx[act_code]; /* prefixe */ } /* act_code est concret */ casspecial = (u8)act_code; /* dernier debut de chaine ! */ *(--stackp) = casspecial; /* push on stack */ WritePixels(i, &context, stackp, stack_top - stackp); /* unstack all pixels at once */ npix -= (stack_top - stackp); stackp = stack_top; /* putchar('\n'); */ if(free < 4096) { /* la taille du dico est 4096 max ! */ ab_prfx[free] = old_code; ab_suffx[free] = (u8)act_code; free++; if((free > context.max) && (context.nbbit < 12)) { context.nbbit++; /* 1 bit de plus pour les codes LZW */ context.max += context.max + 1; } } old_code = read_byt; } } return 0; }
284,105,126,342,844,770,000,000,000,000,000,000,000
None
null
[ "CWE-119" ]
CVE-2018-10677
The DecodeGifImg function in ngiflib.c in MiniUPnP ngiflib 0.4 lacks certain checks against width and height, which allows remote attackers to cause a denial of service (WritePixels heap-based buffer overflow and application crash) or possibly have unspecified other impact via a crafted GIF file.
https://nvd.nist.gov/vuln/detail/CVE-2018-10677
3,720
linux
73223e4e2e3867ebf033a5a8eb2e5df0158ccc99
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/73223e4e2e3867ebf033a5a8eb2e5df0158ccc99
mm/mempolicy: fix use after free when calling get_mempolicy I hit a use after free issue when executing trinity and repoduced it with KASAN enabled. The related call trace is as follows. BUG: KASan: use after free in SyS_get_mempolicy+0x3c8/0x960 at addr ffff8801f582d766 Read of size 2 by task syz-executor1/798 INFO: Allocated in mpol_new.part.2+0x74/0x160 age=3 cpu=1 pid=799 __slab_alloc+0x768/0x970 kmem_cache_alloc+0x2e7/0x450 mpol_new.part.2+0x74/0x160 mpol_new+0x66/0x80 SyS_mbind+0x267/0x9f0 system_call_fastpath+0x16/0x1b INFO: Freed in __mpol_put+0x2b/0x40 age=4 cpu=1 pid=799 __slab_free+0x495/0x8e0 kmem_cache_free+0x2f3/0x4c0 __mpol_put+0x2b/0x40 SyS_mbind+0x383/0x9f0 system_call_fastpath+0x16/0x1b INFO: Slab 0xffffea0009cb8dc0 objects=23 used=8 fp=0xffff8801f582de40 flags=0x200000000004080 INFO: Object 0xffff8801f582d760 @offset=5984 fp=0xffff8801f582d600 Bytes b4 ffff8801f582d750: ae 01 ff ff 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ Object ffff8801f582d760: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk Object ffff8801f582d770: 6b 6b 6b 6b 6b 6b 6b a5 kkkkkkk. Redzone ffff8801f582d778: bb bb bb bb bb bb bb bb ........ Padding ffff8801f582d8b8: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ Memory state around the buggy address: ffff8801f582d600: fb fb fb fc fc fc fc fc fc fc fc fc fc fc fc fc ffff8801f582d680: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc >ffff8801f582d700: fc fc fc fc fc fc fc fc fc fc fc fc fb fb fb fc !shared memory policy is not protected against parallel removal by other thread which is normally protected by the mmap_sem. do_get_mempolicy, however, drops the lock midway while we can still access it later. Early premature up_read is a historical artifact from times when put_user was called in this path see https://lwn.net/Articles/124754/ but that is gone since 8bccd85ffbaf ("[PATCH] Implement sys_* do_* layering in the memory policy layer."). but when we have the the current mempolicy ref count model. The issue was introduced accordingly. Fix the issue by removing the premature release. Link: http://lkml.kernel.org/r/1502950924-27521-1-git-send-email-zhongjiang@huawei.com Signed-off-by: zhong jiang <zhongjiang@huawei.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: David Rientjes <rientjes@google.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: <stable@vger.kernel.org> [2.6+] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1
static long do_get_mempolicy(int *policy, nodemask_t *nmask, unsigned long addr, unsigned long flags) { int err; struct mm_struct *mm = current->mm; struct vm_area_struct *vma = NULL; struct mempolicy *pol = current->mempolicy; if (flags & ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) return -EINVAL; if (flags & MPOL_F_MEMS_ALLOWED) { if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) return -EINVAL; *policy = 0; /* just so it's initialized */ task_lock(current); *nmask = cpuset_current_mems_allowed; task_unlock(current); return 0; } if (flags & MPOL_F_ADDR) { /* * Do NOT fall back to task policy if the * vma/shared policy at addr is NULL. We * want to return MPOL_DEFAULT in this case. */ down_read(&mm->mmap_sem); vma = find_vma_intersection(mm, addr, addr+1); if (!vma) { up_read(&mm->mmap_sem); return -EFAULT; } if (vma->vm_ops && vma->vm_ops->get_policy) pol = vma->vm_ops->get_policy(vma, addr); else pol = vma->vm_policy; } else if (addr) return -EINVAL; if (!pol) pol = &default_policy; /* indicates default behavior */ if (flags & MPOL_F_NODE) { if (flags & MPOL_F_ADDR) { err = lookup_node(addr); if (err < 0) goto out; *policy = err; } else if (pol == current->mempolicy && pol->mode == MPOL_INTERLEAVE) { *policy = next_node_in(current->il_prev, pol->v.nodes); } else { err = -EINVAL; goto out; } } else { *policy = pol == &default_policy ? MPOL_DEFAULT : pol->mode; /* * Internal mempolicy flags must be masked off before exposing * the policy to userspace. */ *policy |= (pol->flags & MPOL_MODE_FLAGS); } if (vma) { up_read(&current->mm->mmap_sem); vma = NULL; } err = 0; if (nmask) { if (mpol_store_user_nodemask(pol)) { *nmask = pol->w.user_nodemask; } else { task_lock(current); get_policy_nodemask(pol, nmask); task_unlock(current); } } out: mpol_cond_put(pol); if (vma) up_read(&current->mm->mmap_sem); return err; }
152,269,556,434,917,930,000,000,000,000,000,000,000
mempolicy.c
25,852,424,809,621,314,000,000,000,000,000,000,000
[ "CWE-416" ]
CVE-2018-10675
The do_get_mempolicy function in mm/mempolicy.c in the Linux kernel before 4.12.9 allows local users to cause a denial of service (use-after-free) or possibly have unspecified other impact via crafted system calls.
https://nvd.nist.gov/vuln/detail/CVE-2018-10675
3,721
file
a642587a9c9e2dd7feacdf513c3643ce26ad3c22
https://github.com/file/file
https://github.com/file/file/commit/a642587a9c9e2dd7feacdf513c3643ce26ad3c22
Avoid reading past the end of buffer (Rui Reis)
1
do_core_note(struct magic_set *ms, unsigned char *nbuf, uint32_t type, int swap, uint32_t namesz, uint32_t descsz, size_t noff, size_t doff, int *flags, size_t size, int clazz) { #ifdef ELFCORE int os_style = -1; /* * Sigh. The 2.0.36 kernel in Debian 2.1, at * least, doesn't correctly implement name * sections, in core dumps, as specified by * the "Program Linking" section of "UNIX(R) System * V Release 4 Programmer's Guide: ANSI C and * Programming Support Tools", because my copy * clearly says "The first 'namesz' bytes in 'name' * contain a *null-terminated* [emphasis mine] * character representation of the entry's owner * or originator", but the 2.0.36 kernel code * doesn't include the terminating null in the * name.... */ if ((namesz == 4 && strncmp((char *)&nbuf[noff], "CORE", 4) == 0) || (namesz == 5 && strcmp((char *)&nbuf[noff], "CORE") == 0)) { os_style = OS_STYLE_SVR4; } if ((namesz == 8 && strcmp((char *)&nbuf[noff], "FreeBSD") == 0)) { os_style = OS_STYLE_FREEBSD; } if ((namesz >= 11 && strncmp((char *)&nbuf[noff], "NetBSD-CORE", 11) == 0)) { os_style = OS_STYLE_NETBSD; } if (os_style != -1 && (*flags & FLAGS_DID_CORE_STYLE) == 0) { if (file_printf(ms, ", %s-style", os_style_names[os_style]) == -1) return 1; *flags |= FLAGS_DID_CORE_STYLE; *flags |= os_style; } switch (os_style) { case OS_STYLE_NETBSD: if (type == NT_NETBSD_CORE_PROCINFO) { char sbuf[512]; struct NetBSD_elfcore_procinfo pi; memset(&pi, 0, sizeof(pi)); memcpy(&pi, nbuf + doff, descsz); if (file_printf(ms, ", from '%.31s', pid=%u, uid=%u, " "gid=%u, nlwps=%u, lwp=%u (signal %u/code %u)", file_printable(sbuf, sizeof(sbuf), CAST(char *, pi.cpi_name)), elf_getu32(swap, (uint32_t)pi.cpi_pid), elf_getu32(swap, pi.cpi_euid), elf_getu32(swap, pi.cpi_egid), elf_getu32(swap, pi.cpi_nlwps), elf_getu32(swap, (uint32_t)pi.cpi_siglwp), elf_getu32(swap, pi.cpi_signo), elf_getu32(swap, pi.cpi_sigcode)) == -1) return 1; *flags |= FLAGS_DID_CORE; return 1; } break; default: if (type == NT_PRPSINFO && *flags & FLAGS_IS_CORE) { size_t i, j; unsigned char c; /* * Extract the program name. We assume * it to be 16 characters (that's what it * is in SunOS 5.x and Linux). * * Unfortunately, it's at a different offset * in various OSes, so try multiple offsets. * If the characters aren't all printable, * reject it. */ for (i = 0; i < NOFFSETS; i++) { unsigned char *cname, *cp; size_t reloffset = prpsoffsets(i); size_t noffset = doff + reloffset; size_t k; for (j = 0; j < 16; j++, noffset++, reloffset++) { /* * Make sure we're not past * the end of the buffer; if * we are, just give up. */ if (noffset >= size) goto tryanother; /* * Make sure we're not past * the end of the contents; * if we are, this obviously * isn't the right offset. */ if (reloffset >= descsz) goto tryanother; c = nbuf[noffset]; if (c == '\0') { /* * A '\0' at the * beginning is * obviously wrong. * Any other '\0' * means we're done. */ if (j == 0) goto tryanother; else break; } else { /* * A nonprintable * character is also * wrong. */ if (!isprint(c) || isquote(c)) goto tryanother; } } /* * Well, that worked. */ /* * Try next offsets, in case this match is * in the middle of a string. */ for (k = i + 1 ; k < NOFFSETS; k++) { size_t no; int adjust = 1; if (prpsoffsets(k) >= prpsoffsets(i)) continue; for (no = doff + prpsoffsets(k); no < doff + prpsoffsets(i); no++) adjust = adjust && isprint(nbuf[no]); if (adjust) i = k; } cname = (unsigned char *) &nbuf[doff + prpsoffsets(i)]; for (cp = cname; *cp && isprint(*cp); cp++) continue; /* * Linux apparently appends a space at the end * of the command line: remove it. */ while (cp > cname && isspace(cp[-1])) cp--; if (file_printf(ms, ", from '%.*s'", (int)(cp - cname), cname) == -1) return 1; *flags |= FLAGS_DID_CORE; return 1; tryanother: ; } } break; } #endif return 0; }
272,730,783,674,373,480,000,000,000,000,000,000,000
None
null
[ "CWE-125" ]
CVE-2018-10360
The do_core_note function in readelf.c in libmagic.a in file 5.33 allows remote attackers to cause a denial of service (out-of-bounds read and application crash) via a crafted ELF file.
https://nvd.nist.gov/vuln/detail/CVE-2018-10360
3,722
mruby
b51b21fc63c9805862322551387d9036f2b63433
https://github.com/mruby/mruby
https://github.com/mruby/mruby/commit/b51b21fc63c9805862322551387d9036f2b63433
Fix `use after free in File#initilialize_copy`; fix #4001 The bug and the fix were reported by https://hackerone.com/pnoltof
1
mrb_io_initialize_copy(mrb_state *mrb, mrb_value copy) { mrb_value orig; mrb_value buf; struct mrb_io *fptr_copy; struct mrb_io *fptr_orig; mrb_bool failed = TRUE; mrb_get_args(mrb, "o", &orig); fptr_copy = (struct mrb_io *)DATA_PTR(copy); if (fptr_copy != NULL) { fptr_finalize(mrb, fptr_copy, FALSE); mrb_free(mrb, fptr_copy); } fptr_copy = (struct mrb_io *)mrb_io_alloc(mrb); fptr_orig = io_get_open_fptr(mrb, orig); DATA_TYPE(copy) = &mrb_io_type; DATA_PTR(copy) = fptr_copy; buf = mrb_iv_get(mrb, orig, mrb_intern_cstr(mrb, "@buf")); mrb_iv_set(mrb, copy, mrb_intern_cstr(mrb, "@buf"), buf); fptr_copy->fd = mrb_dup(mrb, fptr_orig->fd, &failed); if (failed) { mrb_sys_fail(mrb, 0); } mrb_fd_cloexec(mrb, fptr_copy->fd); if (fptr_orig->fd2 != -1) { fptr_copy->fd2 = mrb_dup(mrb, fptr_orig->fd2, &failed); if (failed) { close(fptr_copy->fd); mrb_sys_fail(mrb, 0); } mrb_fd_cloexec(mrb, fptr_copy->fd2); } fptr_copy->pid = fptr_orig->pid; fptr_copy->readable = fptr_orig->readable; fptr_copy->writable = fptr_orig->writable; fptr_copy->sync = fptr_orig->sync; fptr_copy->is_socket = fptr_orig->is_socket; return copy; }
150,634,863,447,224,430,000,000,000,000,000,000,000
io.c
41,201,053,483,208,995,000,000,000,000,000,000,000
[ "CWE-416" ]
CVE-2018-10199
In versions of mruby up to and including 1.4.0, a use-after-free vulnerability exists in src/io.c::File#initilialize_copy(). An attacker that can cause Ruby code to be run can possibly use this to execute arbitrary code.
https://nvd.nist.gov/vuln/detail/CVE-2018-10199
3,723
mruby
1905091634a6a2925c911484434448e568330626
https://github.com/mruby/mruby
https://github.com/mruby/mruby/commit/1905091634a6a2925c911484434448e568330626
Check length of env stack before accessing upvar; fix #3995
1
mrb_vm_exec(mrb_state *mrb, struct RProc *proc, mrb_code *pc) { /* mrb_assert(mrb_proc_cfunc_p(proc)) */ mrb_irep *irep = proc->body.irep; mrb_value *pool = irep->pool; mrb_sym *syms = irep->syms; mrb_code i; int ai = mrb_gc_arena_save(mrb); struct mrb_jmpbuf *prev_jmp = mrb->jmp; struct mrb_jmpbuf c_jmp; #ifdef DIRECT_THREADED static void *optable[] = { &&L_OP_NOP, &&L_OP_MOVE, &&L_OP_LOADL, &&L_OP_LOADI, &&L_OP_LOADSYM, &&L_OP_LOADNIL, &&L_OP_LOADSELF, &&L_OP_LOADT, &&L_OP_LOADF, &&L_OP_GETGLOBAL, &&L_OP_SETGLOBAL, &&L_OP_GETSPECIAL, &&L_OP_SETSPECIAL, &&L_OP_GETIV, &&L_OP_SETIV, &&L_OP_GETCV, &&L_OP_SETCV, &&L_OP_GETCONST, &&L_OP_SETCONST, &&L_OP_GETMCNST, &&L_OP_SETMCNST, &&L_OP_GETUPVAR, &&L_OP_SETUPVAR, &&L_OP_JMP, &&L_OP_JMPIF, &&L_OP_JMPNOT, &&L_OP_ONERR, &&L_OP_RESCUE, &&L_OP_POPERR, &&L_OP_RAISE, &&L_OP_EPUSH, &&L_OP_EPOP, &&L_OP_SEND, &&L_OP_SENDB, &&L_OP_FSEND, &&L_OP_CALL, &&L_OP_SUPER, &&L_OP_ARGARY, &&L_OP_ENTER, &&L_OP_KARG, &&L_OP_KDICT, &&L_OP_RETURN, &&L_OP_TAILCALL, &&L_OP_BLKPUSH, &&L_OP_ADD, &&L_OP_ADDI, &&L_OP_SUB, &&L_OP_SUBI, &&L_OP_MUL, &&L_OP_DIV, &&L_OP_EQ, &&L_OP_LT, &&L_OP_LE, &&L_OP_GT, &&L_OP_GE, &&L_OP_ARRAY, &&L_OP_ARYCAT, &&L_OP_ARYPUSH, &&L_OP_AREF, &&L_OP_ASET, &&L_OP_APOST, &&L_OP_STRING, &&L_OP_STRCAT, &&L_OP_HASH, &&L_OP_LAMBDA, &&L_OP_RANGE, &&L_OP_OCLASS, &&L_OP_CLASS, &&L_OP_MODULE, &&L_OP_EXEC, &&L_OP_METHOD, &&L_OP_SCLASS, &&L_OP_TCLASS, &&L_OP_DEBUG, &&L_OP_STOP, &&L_OP_ERR, }; #endif mrb_bool exc_catched = FALSE; RETRY_TRY_BLOCK: MRB_TRY(&c_jmp) { if (exc_catched) { exc_catched = FALSE; if (mrb->exc && mrb->exc->tt == MRB_TT_BREAK) goto L_BREAK; goto L_RAISE; } mrb->jmp = &c_jmp; mrb->c->ci->proc = proc; mrb->c->ci->nregs = irep->nregs; #define regs (mrb->c->stack) INIT_DISPATCH { CASE(OP_NOP) { /* do nothing */ NEXT; } CASE(OP_MOVE) { /* A B R(A) := R(B) */ int a = GETARG_A(i); int b = GETARG_B(i); regs[a] = regs[b]; NEXT; } CASE(OP_LOADL) { /* A Bx R(A) := Pool(Bx) */ int a = GETARG_A(i); int bx = GETARG_Bx(i); #ifdef MRB_WORD_BOXING mrb_value val = pool[bx]; #ifndef MRB_WITHOUT_FLOAT if (mrb_float_p(val)) { val = mrb_float_value(mrb, mrb_float(val)); } #endif regs[a] = val; #else regs[a] = pool[bx]; #endif NEXT; } CASE(OP_LOADI) { /* A sBx R(A) := sBx */ int a = GETARG_A(i); mrb_int bx = GETARG_sBx(i); SET_INT_VALUE(regs[a], bx); NEXT; } CASE(OP_LOADSYM) { /* A Bx R(A) := Syms(Bx) */ int a = GETARG_A(i); int bx = GETARG_Bx(i); SET_SYM_VALUE(regs[a], syms[bx]); NEXT; } CASE(OP_LOADSELF) { /* A R(A) := self */ int a = GETARG_A(i); regs[a] = regs[0]; NEXT; } CASE(OP_LOADT) { /* A R(A) := true */ int a = GETARG_A(i); SET_TRUE_VALUE(regs[a]); NEXT; } CASE(OP_LOADF) { /* A R(A) := false */ int a = GETARG_A(i); SET_FALSE_VALUE(regs[a]); NEXT; } CASE(OP_GETGLOBAL) { /* A Bx R(A) := getglobal(Syms(Bx)) */ int a = GETARG_A(i); int bx = GETARG_Bx(i); mrb_value val = mrb_gv_get(mrb, syms[bx]); regs[a] = val; NEXT; } CASE(OP_SETGLOBAL) { /* A Bx setglobal(Syms(Bx), R(A)) */ int a = GETARG_A(i); int bx = GETARG_Bx(i); mrb_gv_set(mrb, syms[bx], regs[a]); NEXT; } CASE(OP_GETSPECIAL) { /* A Bx R(A) := Special[Bx] */ int a = GETARG_A(i); int bx = GETARG_Bx(i); mrb_value val = mrb_vm_special_get(mrb, bx); regs[a] = val; NEXT; } CASE(OP_SETSPECIAL) { /* A Bx Special[Bx] := R(A) */ int a = GETARG_A(i); int bx = GETARG_Bx(i); mrb_vm_special_set(mrb, bx, regs[a]); NEXT; } CASE(OP_GETIV) { /* A Bx R(A) := ivget(Bx) */ int a = GETARG_A(i); int bx = GETARG_Bx(i); mrb_value val = mrb_vm_iv_get(mrb, syms[bx]); regs[a] = val; NEXT; } CASE(OP_SETIV) { /* A Bx ivset(Syms(Bx),R(A)) */ int a = GETARG_A(i); int bx = GETARG_Bx(i); mrb_vm_iv_set(mrb, syms[bx], regs[a]); NEXT; } CASE(OP_GETCV) { /* A Bx R(A) := cvget(Syms(Bx)) */ int a = GETARG_A(i); int bx = GETARG_Bx(i); mrb_value val; ERR_PC_SET(mrb, pc); val = mrb_vm_cv_get(mrb, syms[bx]); ERR_PC_CLR(mrb); regs[a] = val; NEXT; } CASE(OP_SETCV) { /* A Bx cvset(Syms(Bx),R(A)) */ int a = GETARG_A(i); int bx = GETARG_Bx(i); mrb_vm_cv_set(mrb, syms[bx], regs[a]); NEXT; } CASE(OP_GETCONST) { /* A Bx R(A) := constget(Syms(Bx)) */ mrb_value val; int a = GETARG_A(i); int bx = GETARG_Bx(i); mrb_sym sym = syms[bx]; ERR_PC_SET(mrb, pc); val = mrb_vm_const_get(mrb, sym); ERR_PC_CLR(mrb); regs[a] = val; NEXT; } CASE(OP_SETCONST) { /* A Bx constset(Syms(Bx),R(A)) */ int a = GETARG_A(i); int bx = GETARG_Bx(i); mrb_vm_const_set(mrb, syms[bx], regs[a]); NEXT; } CASE(OP_GETMCNST) { /* A Bx R(A) := R(A)::Syms(Bx) */ mrb_value val; int a = GETARG_A(i); int bx = GETARG_Bx(i); ERR_PC_SET(mrb, pc); val = mrb_const_get(mrb, regs[a], syms[bx]); ERR_PC_CLR(mrb); regs[a] = val; NEXT; } CASE(OP_SETMCNST) { /* A Bx R(A+1)::Syms(Bx) := R(A) */ int a = GETARG_A(i); int bx = GETARG_Bx(i); mrb_const_set(mrb, regs[a+1], syms[bx], regs[a]); NEXT; } CASE(OP_GETUPVAR) { /* A B C R(A) := uvget(B,C) */ int a = GETARG_A(i); int b = GETARG_B(i); int c = GETARG_C(i); mrb_value *regs_a = regs + a; struct REnv *e = uvenv(mrb, c); if (!e) { *regs_a = mrb_nil_value(); } else { *regs_a = e->stack[b]; } NEXT; } CASE(OP_SETUPVAR) { /* A B C uvset(B,C,R(A)) */ int a = GETARG_A(i); int b = GETARG_B(i); int c = GETARG_C(i); struct REnv *e = uvenv(mrb, c); if (e) { mrb_value *regs_a = regs + a; if (b < MRB_ENV_STACK_LEN(e)) { e->stack[b] = *regs_a; mrb_write_barrier(mrb, (struct RBasic*)e); } } NEXT; } CASE(OP_JMP) { /* sBx pc+=sBx */ int sbx = GETARG_sBx(i); pc += sbx; JUMP; } CASE(OP_JMPIF) { /* A sBx if R(A) pc+=sBx */ int a = GETARG_A(i); int sbx = GETARG_sBx(i); if (mrb_test(regs[a])) { pc += sbx; JUMP; } NEXT; } CASE(OP_JMPNOT) { /* A sBx if !R(A) pc+=sBx */ int a = GETARG_A(i); int sbx = GETARG_sBx(i); if (!mrb_test(regs[a])) { pc += sbx; JUMP; } NEXT; } CASE(OP_ONERR) { /* sBx pc+=sBx on exception */ int sbx = GETARG_sBx(i); if (mrb->c->rsize <= mrb->c->ci->ridx) { if (mrb->c->rsize == 0) mrb->c->rsize = RESCUE_STACK_INIT_SIZE; else mrb->c->rsize *= 2; mrb->c->rescue = (mrb_code **)mrb_realloc(mrb, mrb->c->rescue, sizeof(mrb_code*) * mrb->c->rsize); } mrb->c->rescue[mrb->c->ci->ridx++] = pc + sbx; NEXT; } CASE(OP_RESCUE) { /* A B R(A) := exc; clear(exc); R(B) := matched (bool) */ int a = GETARG_A(i); int b = GETARG_B(i); int c = GETARG_C(i); mrb_value exc; if (c == 0) { exc = mrb_obj_value(mrb->exc); mrb->exc = 0; } else { /* continued; exc taken from R(A) */ exc = regs[a]; } if (b != 0) { mrb_value e = regs[b]; struct RClass *ec; switch (mrb_type(e)) { case MRB_TT_CLASS: case MRB_TT_MODULE: break; default: { mrb_value exc; exc = mrb_exc_new_str_lit(mrb, E_TYPE_ERROR, "class or module required for rescue clause"); mrb_exc_set(mrb, exc); goto L_RAISE; } } ec = mrb_class_ptr(e); regs[b] = mrb_bool_value(mrb_obj_is_kind_of(mrb, exc, ec)); } if (a != 0 && c == 0) { regs[a] = exc; } NEXT; } CASE(OP_POPERR) { /* A A.times{rescue_pop()} */ int a = GETARG_A(i); mrb->c->ci->ridx -= a; NEXT; } CASE(OP_RAISE) { /* A raise(R(A)) */ int a = GETARG_A(i); mrb_exc_set(mrb, regs[a]); goto L_RAISE; } CASE(OP_EPUSH) { /* Bx ensure_push(SEQ[Bx]) */ int bx = GETARG_Bx(i); struct RProc *p; p = mrb_closure_new(mrb, irep->reps[bx]); /* push ensure_stack */ if (mrb->c->esize <= mrb->c->eidx+1) { if (mrb->c->esize == 0) mrb->c->esize = ENSURE_STACK_INIT_SIZE; else mrb->c->esize *= 2; mrb->c->ensure = (struct RProc **)mrb_realloc(mrb, mrb->c->ensure, sizeof(struct RProc*) * mrb->c->esize); } mrb->c->ensure[mrb->c->eidx++] = p; mrb->c->ensure[mrb->c->eidx] = NULL; mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_EPOP) { /* A A.times{ensure_pop().call} */ int a = GETARG_A(i); mrb_callinfo *ci = mrb->c->ci; int n, epos = ci->epos; mrb_value self = regs[0]; struct RClass *target_class = ci->target_class; if (mrb->c->eidx <= epos) { NEXT; } if (a > mrb->c->eidx - epos) a = mrb->c->eidx - epos; pc = pc + 1; for (n=0; n<a; n++) { proc = mrb->c->ensure[epos+n]; mrb->c->ensure[epos+n] = NULL; if (proc == NULL) continue; irep = proc->body.irep; ci = cipush(mrb); ci->mid = ci[-1].mid; ci->argc = 0; ci->proc = proc; ci->stackent = mrb->c->stack; ci->nregs = irep->nregs; ci->target_class = target_class; ci->pc = pc; ci->acc = ci[-1].nregs; mrb->c->stack += ci->acc; stack_extend(mrb, ci->nregs); regs[0] = self; pc = irep->iseq; } pool = irep->pool; syms = irep->syms; mrb->c->eidx = epos; JUMP; } CASE(OP_LOADNIL) { /* A R(A) := nil */ int a = GETARG_A(i); SET_NIL_VALUE(regs[a]); NEXT; } CASE(OP_SENDB) { /* A B C R(A) := call(R(A),Syms(B),R(A+1),...,R(A+C),&R(A+C+1))*/ /* fall through */ }; L_SEND: CASE(OP_SEND) { /* A B C R(A) := call(R(A),Syms(B),R(A+1),...,R(A+C)) */ int a = GETARG_A(i); int n = GETARG_C(i); int argc = (n == CALL_MAXARGS) ? -1 : n; int bidx = (argc < 0) ? a+2 : a+n+1; mrb_method_t m; struct RClass *c; mrb_callinfo *ci = mrb->c->ci; mrb_value recv, blk; mrb_sym mid = syms[GETARG_B(i)]; mrb_assert(bidx < ci->nregs); recv = regs[a]; if (GET_OPCODE(i) != OP_SENDB) { SET_NIL_VALUE(regs[bidx]); blk = regs[bidx]; } else { blk = regs[bidx]; if (!mrb_nil_p(blk) && mrb_type(blk) != MRB_TT_PROC) { blk = mrb_convert_type(mrb, blk, MRB_TT_PROC, "Proc", "to_proc"); /* The stack might have been reallocated during mrb_convert_type(), see #3622 */ regs[bidx] = blk; } } c = mrb_class(mrb, recv); m = mrb_method_search_vm(mrb, &c, mid); if (MRB_METHOD_UNDEF_P(m)) { mrb_sym missing = mrb_intern_lit(mrb, "method_missing"); m = mrb_method_search_vm(mrb, &c, missing); if (MRB_METHOD_UNDEF_P(m) || (missing == mrb->c->ci->mid && mrb_obj_eq(mrb, regs[0], recv))) { mrb_value args = (argc < 0) ? regs[a+1] : mrb_ary_new_from_values(mrb, n, regs+a+1); ERR_PC_SET(mrb, pc); mrb_method_missing(mrb, mid, recv, args); } if (argc >= 0) { if (a+2 >= irep->nregs) { stack_extend(mrb, a+3); } regs[a+1] = mrb_ary_new_from_values(mrb, n, regs+a+1); regs[a+2] = blk; argc = -1; } mrb_ary_unshift(mrb, regs[a+1], mrb_symbol_value(mid)); mid = missing; } /* push callinfo */ ci = cipush(mrb); ci->mid = mid; ci->stackent = mrb->c->stack; ci->target_class = c; ci->argc = argc; ci->pc = pc + 1; ci->acc = a; /* prepare stack */ mrb->c->stack += a; if (MRB_METHOD_CFUNC_P(m)) { ci->nregs = (argc < 0) ? 3 : n+2; if (MRB_METHOD_PROC_P(m)) { struct RProc *p = MRB_METHOD_PROC(m); ci->proc = p; recv = p->body.func(mrb, recv); } else { recv = MRB_METHOD_FUNC(m)(mrb, recv); } mrb_gc_arena_restore(mrb, ai); mrb_gc_arena_shrink(mrb, ai); if (mrb->exc) goto L_RAISE; ci = mrb->c->ci; if (GET_OPCODE(i) == OP_SENDB) { if (mrb_type(blk) == MRB_TT_PROC) { struct RProc *p = mrb_proc_ptr(blk); if (p && !MRB_PROC_STRICT_P(p) && MRB_PROC_ENV(p) == ci[-1].env) { p->flags |= MRB_PROC_ORPHAN; } } } if (!ci->target_class) { /* return from context modifying method (resume/yield) */ if (ci->acc == CI_ACC_RESUMED) { mrb->jmp = prev_jmp; return recv; } else { mrb_assert(!MRB_PROC_CFUNC_P(ci[-1].proc)); proc = ci[-1].proc; irep = proc->body.irep; pool = irep->pool; syms = irep->syms; } } mrb->c->stack[0] = recv; /* pop stackpos */ mrb->c->stack = ci->stackent; pc = ci->pc; cipop(mrb); JUMP; } else { /* setup environment for calling method */ proc = ci->proc = MRB_METHOD_PROC(m); irep = proc->body.irep; pool = irep->pool; syms = irep->syms; ci->nregs = irep->nregs; stack_extend(mrb, (argc < 0 && ci->nregs < 3) ? 3 : ci->nregs); pc = irep->iseq; JUMP; } } CASE(OP_FSEND) { /* A B C R(A) := fcall(R(A),Syms(B),R(A+1),... ,R(A+C-1)) */ /* not implemented yet */ NEXT; } CASE(OP_CALL) { /* A R(A) := self.call(frame.argc, frame.argv) */ mrb_callinfo *ci; mrb_value recv = mrb->c->stack[0]; struct RProc *m = mrb_proc_ptr(recv); /* replace callinfo */ ci = mrb->c->ci; ci->target_class = MRB_PROC_TARGET_CLASS(m); ci->proc = m; if (MRB_PROC_ENV_P(m)) { mrb_sym mid; struct REnv *e = MRB_PROC_ENV(m); mid = e->mid; if (mid) ci->mid = mid; if (!e->stack) { e->stack = mrb->c->stack; } } /* prepare stack */ if (MRB_PROC_CFUNC_P(m)) { recv = MRB_PROC_CFUNC(m)(mrb, recv); mrb_gc_arena_restore(mrb, ai); mrb_gc_arena_shrink(mrb, ai); if (mrb->exc) goto L_RAISE; /* pop stackpos */ ci = mrb->c->ci; mrb->c->stack = ci->stackent; regs[ci->acc] = recv; pc = ci->pc; cipop(mrb); irep = mrb->c->ci->proc->body.irep; pool = irep->pool; syms = irep->syms; JUMP; } else { /* setup environment for calling method */ proc = m; irep = m->body.irep; if (!irep) { mrb->c->stack[0] = mrb_nil_value(); goto L_RETURN; } pool = irep->pool; syms = irep->syms; ci->nregs = irep->nregs; stack_extend(mrb, ci->nregs); if (ci->argc < 0) { if (irep->nregs > 3) { stack_clear(regs+3, irep->nregs-3); } } else if (ci->argc+2 < irep->nregs) { stack_clear(regs+ci->argc+2, irep->nregs-ci->argc-2); } if (MRB_PROC_ENV_P(m)) { regs[0] = MRB_PROC_ENV(m)->stack[0]; } pc = irep->iseq; JUMP; } } CASE(OP_SUPER) { /* A C R(A) := super(R(A+1),... ,R(A+C+1)) */ int a = GETARG_A(i); int n = GETARG_C(i); int argc = (n == CALL_MAXARGS) ? -1 : n; int bidx = (argc < 0) ? a+2 : a+n+1; mrb_method_t m; struct RClass *c; mrb_callinfo *ci = mrb->c->ci; mrb_value recv, blk; mrb_sym mid = ci->mid; struct RClass* target_class = MRB_PROC_TARGET_CLASS(ci->proc); mrb_assert(bidx < ci->nregs); if (mid == 0 || !target_class) { mrb_value exc = mrb_exc_new_str_lit(mrb, E_NOMETHOD_ERROR, "super called outside of method"); mrb_exc_set(mrb, exc); goto L_RAISE; } if (target_class->tt == MRB_TT_MODULE) { target_class = ci->target_class; if (target_class->tt != MRB_TT_ICLASS) { mrb_value exc = mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "superclass info lost [mruby limitations]"); mrb_exc_set(mrb, exc); goto L_RAISE; } } recv = regs[0]; if (!mrb_obj_is_kind_of(mrb, recv, target_class)) { mrb_value exc = mrb_exc_new_str_lit(mrb, E_TYPE_ERROR, "self has wrong type to call super in this context"); mrb_exc_set(mrb, exc); goto L_RAISE; } blk = regs[bidx]; if (!mrb_nil_p(blk) && mrb_type(blk) != MRB_TT_PROC) { blk = mrb_convert_type(mrb, blk, MRB_TT_PROC, "Proc", "to_proc"); /* The stack or ci stack might have been reallocated during mrb_convert_type(), see #3622 and #3784 */ regs[bidx] = blk; ci = mrb->c->ci; } c = target_class->super; m = mrb_method_search_vm(mrb, &c, mid); if (MRB_METHOD_UNDEF_P(m)) { mrb_sym missing = mrb_intern_lit(mrb, "method_missing"); if (mid != missing) { c = mrb_class(mrb, recv); } m = mrb_method_search_vm(mrb, &c, missing); if (MRB_METHOD_UNDEF_P(m)) { mrb_value args = (argc < 0) ? regs[a+1] : mrb_ary_new_from_values(mrb, n, regs+a+1); ERR_PC_SET(mrb, pc); mrb_method_missing(mrb, mid, recv, args); } mid = missing; if (argc >= 0) { if (a+2 >= ci->nregs) { stack_extend(mrb, a+3); } regs[a+1] = mrb_ary_new_from_values(mrb, n, regs+a+1); regs[a+2] = blk; argc = -1; } mrb_ary_unshift(mrb, regs[a+1], mrb_symbol_value(ci->mid)); } /* push callinfo */ ci = cipush(mrb); ci->mid = mid; ci->stackent = mrb->c->stack; ci->target_class = c; ci->pc = pc + 1; ci->argc = argc; /* prepare stack */ mrb->c->stack += a; mrb->c->stack[0] = recv; if (MRB_METHOD_CFUNC_P(m)) { mrb_value v; ci->nregs = (argc < 0) ? 3 : n+2; if (MRB_METHOD_PROC_P(m)) { ci->proc = MRB_METHOD_PROC(m); } v = MRB_METHOD_CFUNC(m)(mrb, recv); mrb_gc_arena_restore(mrb, ai); if (mrb->exc) goto L_RAISE; ci = mrb->c->ci; if (!ci->target_class) { /* return from context modifying method (resume/yield) */ if (ci->acc == CI_ACC_RESUMED) { mrb->jmp = prev_jmp; return v; } else { mrb_assert(!MRB_PROC_CFUNC_P(ci[-1].proc)); proc = ci[-1].proc; irep = proc->body.irep; pool = irep->pool; syms = irep->syms; } } mrb->c->stack[0] = v; /* pop stackpos */ mrb->c->stack = ci->stackent; pc = ci->pc; cipop(mrb); JUMP; } else { /* fill callinfo */ ci->acc = a; /* setup environment for calling method */ proc = ci->proc = MRB_METHOD_PROC(m); irep = proc->body.irep; pool = irep->pool; syms = irep->syms; ci->nregs = irep->nregs; stack_extend(mrb, (argc < 0 && ci->nregs < 3) ? 3 : ci->nregs); pc = irep->iseq; JUMP; } } CASE(OP_ARGARY) { /* A Bx R(A) := argument array (16=6:1:5:4) */ int a = GETARG_A(i); int bx = GETARG_Bx(i); int m1 = (bx>>10)&0x3f; int r = (bx>>9)&0x1; int m2 = (bx>>4)&0x1f; int lv = (bx>>0)&0xf; mrb_value *stack; if (mrb->c->ci->mid == 0 || mrb->c->ci->target_class == NULL) { mrb_value exc; L_NOSUPER: exc = mrb_exc_new_str_lit(mrb, E_NOMETHOD_ERROR, "super called outside of method"); mrb_exc_set(mrb, exc); goto L_RAISE; } if (lv == 0) stack = regs + 1; else { struct REnv *e = uvenv(mrb, lv-1); if (!e) goto L_NOSUPER; if (MRB_ENV_STACK_LEN(e) <= m1+r+m2+1) goto L_NOSUPER; stack = e->stack + 1; } if (r == 0) { regs[a] = mrb_ary_new_from_values(mrb, m1+m2, stack); } else { mrb_value *pp = NULL; struct RArray *rest; int len = 0; if (mrb_array_p(stack[m1])) { struct RArray *ary = mrb_ary_ptr(stack[m1]); pp = ARY_PTR(ary); len = (int)ARY_LEN(ary); } regs[a] = mrb_ary_new_capa(mrb, m1+len+m2); rest = mrb_ary_ptr(regs[a]); if (m1 > 0) { stack_copy(ARY_PTR(rest), stack, m1); } if (len > 0) { stack_copy(ARY_PTR(rest)+m1, pp, len); } if (m2 > 0) { stack_copy(ARY_PTR(rest)+m1+len, stack+m1+1, m2); } ARY_SET_LEN(rest, m1+len+m2); } regs[a+1] = stack[m1+r+m2]; mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_ENTER) { /* Ax arg setup according to flags (23=5:5:1:5:5:1:1) */ /* number of optional arguments times OP_JMP should follow */ mrb_aspec ax = GETARG_Ax(i); int m1 = MRB_ASPEC_REQ(ax); int o = MRB_ASPEC_OPT(ax); int r = MRB_ASPEC_REST(ax); int m2 = MRB_ASPEC_POST(ax); /* unused int k = MRB_ASPEC_KEY(ax); int kd = MRB_ASPEC_KDICT(ax); int b = MRB_ASPEC_BLOCK(ax); */ int argc = mrb->c->ci->argc; mrb_value *argv = regs+1; mrb_value *argv0 = argv; int len = m1 + o + r + m2; mrb_value *blk = &argv[argc < 0 ? 1 : argc]; if (argc < 0) { struct RArray *ary = mrb_ary_ptr(regs[1]); argv = ARY_PTR(ary); argc = (int)ARY_LEN(ary); mrb_gc_protect(mrb, regs[1]); } if (mrb->c->ci->proc && MRB_PROC_STRICT_P(mrb->c->ci->proc)) { if (argc >= 0) { if (argc < m1 + m2 || (r == 0 && argc > len)) { argnum_error(mrb, m1+m2); goto L_RAISE; } } } else if (len > 1 && argc == 1 && mrb_array_p(argv[0])) { mrb_gc_protect(mrb, argv[0]); argc = (int)RARRAY_LEN(argv[0]); argv = RARRAY_PTR(argv[0]); } if (argc < len) { int mlen = m2; if (argc < m1+m2) { if (m1 < argc) mlen = argc - m1; else mlen = 0; } regs[len+1] = *blk; /* move block */ SET_NIL_VALUE(regs[argc+1]); if (argv0 != argv) { value_move(&regs[1], argv, argc-mlen); /* m1 + o */ } if (argc < m1) { stack_clear(&regs[argc+1], m1-argc); } if (mlen) { value_move(&regs[len-m2+1], &argv[argc-mlen], mlen); } if (mlen < m2) { stack_clear(&regs[len-m2+mlen+1], m2-mlen); } if (r) { regs[m1+o+1] = mrb_ary_new_capa(mrb, 0); } if (o == 0 || argc < m1+m2) pc++; else pc += argc - m1 - m2 + 1; } else { int rnum = 0; if (argv0 != argv) { regs[len+1] = *blk; /* move block */ value_move(&regs[1], argv, m1+o); } if (r) { rnum = argc-m1-o-m2; regs[m1+o+1] = mrb_ary_new_from_values(mrb, rnum, argv+m1+o); } if (m2) { if (argc-m2 > m1) { value_move(&regs[m1+o+r+1], &argv[m1+o+rnum], m2); } } if (argv0 == argv) { regs[len+1] = *blk; /* move block */ } pc += o + 1; } mrb->c->ci->argc = len; /* clear local (but non-argument) variables */ if (irep->nlocals-len-2 > 0) { stack_clear(&regs[len+2], irep->nlocals-len-2); } JUMP; } CASE(OP_KARG) { /* A B C R(A) := kdict[Syms(B)]; if C kdict.rm(Syms(B)) */ /* if C == 2; raise unless kdict.empty? */ /* OP_JMP should follow to skip init code */ NEXT; } CASE(OP_KDICT) { /* A C R(A) := kdict */ NEXT; } L_RETURN: i = MKOP_AB(OP_RETURN, GETARG_A(i), OP_R_NORMAL); /* fall through */ CASE(OP_RETURN) { /* A B return R(A) (B=normal,in-block return/break) */ mrb_callinfo *ci; #define ecall_adjust() do {\ ptrdiff_t cioff = ci - mrb->c->cibase;\ ecall(mrb);\ ci = mrb->c->cibase + cioff;\ } while (0) ci = mrb->c->ci; if (ci->mid) { mrb_value blk; if (ci->argc < 0) { blk = regs[2]; } else { blk = regs[ci->argc+1]; } if (mrb_type(blk) == MRB_TT_PROC) { struct RProc *p = mrb_proc_ptr(blk); if (!MRB_PROC_STRICT_P(p) && ci > mrb->c->cibase && MRB_PROC_ENV(p) == ci[-1].env) { p->flags |= MRB_PROC_ORPHAN; } } } if (mrb->exc) { mrb_callinfo *ci0; L_RAISE: ci0 = ci = mrb->c->ci; if (ci == mrb->c->cibase) { if (ci->ridx == 0) goto L_FTOP; goto L_RESCUE; } while (ci[0].ridx == ci[-1].ridx) { cipop(mrb); mrb->c->stack = ci->stackent; if (ci->acc == CI_ACC_SKIP && prev_jmp) { mrb->jmp = prev_jmp; MRB_THROW(prev_jmp); } ci = mrb->c->ci; if (ci == mrb->c->cibase) { if (ci->ridx == 0) { L_FTOP: /* fiber top */ if (mrb->c == mrb->root_c) { mrb->c->stack = mrb->c->stbase; goto L_STOP; } else { struct mrb_context *c = mrb->c; while (c->eidx > ci->epos) { ecall_adjust(); } if (c->fib) { mrb_write_barrier(mrb, (struct RBasic*)c->fib); } mrb->c->status = MRB_FIBER_TERMINATED; mrb->c = c->prev; c->prev = NULL; goto L_RAISE; } } break; } /* call ensure only when we skip this callinfo */ if (ci[0].ridx == ci[-1].ridx) { while (mrb->c->eidx > ci->epos) { ecall_adjust(); } } } L_RESCUE: if (ci->ridx == 0) goto L_STOP; proc = ci->proc; irep = proc->body.irep; pool = irep->pool; syms = irep->syms; if (ci < ci0) { mrb->c->stack = ci[1].stackent; } stack_extend(mrb, irep->nregs); pc = mrb->c->rescue[--ci->ridx]; } else { int acc; mrb_value v; struct RProc *dst; ci = mrb->c->ci; v = regs[GETARG_A(i)]; mrb_gc_protect(mrb, v); switch (GETARG_B(i)) { case OP_R_RETURN: /* Fall through to OP_R_NORMAL otherwise */ if (ci->acc >=0 && MRB_PROC_ENV_P(proc) && !MRB_PROC_STRICT_P(proc)) { mrb_callinfo *cibase = mrb->c->cibase; dst = top_proc(mrb, proc); if (MRB_PROC_ENV_P(dst)) { struct REnv *e = MRB_PROC_ENV(dst); if (!MRB_ENV_STACK_SHARED_P(e) || e->cxt != mrb->c) { localjump_error(mrb, LOCALJUMP_ERROR_RETURN); goto L_RAISE; } } while (cibase <= ci && ci->proc != dst) { if (ci->acc < 0) { localjump_error(mrb, LOCALJUMP_ERROR_RETURN); goto L_RAISE; } ci--; } if (ci <= cibase) { localjump_error(mrb, LOCALJUMP_ERROR_RETURN); goto L_RAISE; } break; } case OP_R_NORMAL: NORMAL_RETURN: if (ci == mrb->c->cibase) { struct mrb_context *c; if (!mrb->c->prev) { /* toplevel return */ localjump_error(mrb, LOCALJUMP_ERROR_RETURN); goto L_RAISE; } if (mrb->c->prev->ci == mrb->c->prev->cibase) { mrb_value exc = mrb_exc_new_str_lit(mrb, E_FIBER_ERROR, "double resume"); mrb_exc_set(mrb, exc); goto L_RAISE; } while (mrb->c->eidx > 0) { ecall(mrb); } /* automatic yield at the end */ c = mrb->c; c->status = MRB_FIBER_TERMINATED; mrb->c = c->prev; c->prev = NULL; mrb->c->status = MRB_FIBER_RUNNING; ci = mrb->c->ci; } break; case OP_R_BREAK: if (MRB_PROC_STRICT_P(proc)) goto NORMAL_RETURN; if (MRB_PROC_ORPHAN_P(proc)) { mrb_value exc; L_BREAK_ERROR: exc = mrb_exc_new_str_lit(mrb, E_LOCALJUMP_ERROR, "break from proc-closure"); mrb_exc_set(mrb, exc); goto L_RAISE; } if (!MRB_PROC_ENV_P(proc) || !MRB_ENV_STACK_SHARED_P(MRB_PROC_ENV(proc))) { goto L_BREAK_ERROR; } else { struct REnv *e = MRB_PROC_ENV(proc); if (e == mrb->c->cibase->env && proc != mrb->c->cibase->proc) { goto L_BREAK_ERROR; } if (e->cxt != mrb->c) { goto L_BREAK_ERROR; } } while (mrb->c->eidx > mrb->c->ci->epos) { ecall_adjust(); } /* break from fiber block */ if (ci == mrb->c->cibase && ci->pc) { struct mrb_context *c = mrb->c; mrb->c = c->prev; c->prev = NULL; ci = mrb->c->ci; } if (ci->acc < 0) { mrb_gc_arena_restore(mrb, ai); mrb->c->vmexec = FALSE; mrb->exc = (struct RObject*)break_new(mrb, proc, v); mrb->jmp = prev_jmp; MRB_THROW(prev_jmp); } if (FALSE) { L_BREAK: v = ((struct RBreak*)mrb->exc)->val; proc = ((struct RBreak*)mrb->exc)->proc; mrb->exc = NULL; ci = mrb->c->ci; } mrb->c->stack = ci->stackent; proc = proc->upper; while (mrb->c->cibase < ci && ci[-1].proc != proc) { if (ci[-1].acc == CI_ACC_SKIP) { while (ci < mrb->c->ci) { cipop(mrb); } goto L_BREAK_ERROR; } ci--; } if (ci == mrb->c->cibase) { goto L_BREAK_ERROR; } break; default: /* cannot happen */ break; } while (ci < mrb->c->ci) { cipop(mrb); } ci[0].ridx = ci[-1].ridx; while (mrb->c->eidx > ci->epos) { ecall_adjust(); } if (mrb->c->vmexec && !ci->target_class) { mrb_gc_arena_restore(mrb, ai); mrb->c->vmexec = FALSE; mrb->jmp = prev_jmp; return v; } acc = ci->acc; mrb->c->stack = ci->stackent; cipop(mrb); if (acc == CI_ACC_SKIP || acc == CI_ACC_DIRECT) { mrb_gc_arena_restore(mrb, ai); mrb->jmp = prev_jmp; return v; } pc = ci->pc; ci = mrb->c->ci; DEBUG(fprintf(stderr, "from :%s\n", mrb_sym2name(mrb, ci->mid))); proc = mrb->c->ci->proc; irep = proc->body.irep; pool = irep->pool; syms = irep->syms; regs[acc] = v; mrb_gc_arena_restore(mrb, ai); } JUMP; } CASE(OP_TAILCALL) { /* A B C return call(R(A),Syms(B),R(A+1),... ,R(A+C+1)) */ int a = GETARG_A(i); int b = GETARG_B(i); int n = GETARG_C(i); mrb_method_t m; struct RClass *c; mrb_callinfo *ci; mrb_value recv; mrb_sym mid = syms[b]; recv = regs[a]; c = mrb_class(mrb, recv); m = mrb_method_search_vm(mrb, &c, mid); if (MRB_METHOD_UNDEF_P(m)) { mrb_value sym = mrb_symbol_value(mid); mrb_sym missing = mrb_intern_lit(mrb, "method_missing"); m = mrb_method_search_vm(mrb, &c, missing); if (MRB_METHOD_UNDEF_P(m)) { mrb_value args; if (n == CALL_MAXARGS) { args = regs[a+1]; } else { args = mrb_ary_new_from_values(mrb, n, regs+a+1); } ERR_PC_SET(mrb, pc); mrb_method_missing(mrb, mid, recv, args); } mid = missing; if (n == CALL_MAXARGS) { mrb_ary_unshift(mrb, regs[a+1], sym); } else { value_move(regs+a+2, regs+a+1, ++n); regs[a+1] = sym; } } /* replace callinfo */ ci = mrb->c->ci; ci->mid = mid; ci->target_class = c; if (n == CALL_MAXARGS) { ci->argc = -1; } else { ci->argc = n; } /* move stack */ value_move(mrb->c->stack, &regs[a], ci->argc+1); if (MRB_METHOD_CFUNC_P(m)) { mrb_value v = MRB_METHOD_CFUNC(m)(mrb, recv); mrb->c->stack[0] = v; mrb_gc_arena_restore(mrb, ai); goto L_RETURN; } else { /* setup environment for calling method */ struct RProc *p = MRB_METHOD_PROC(m); irep = p->body.irep; pool = irep->pool; syms = irep->syms; if (ci->argc < 0) { stack_extend(mrb, (irep->nregs < 3) ? 3 : irep->nregs); } else { stack_extend(mrb, irep->nregs); } pc = irep->iseq; } JUMP; } CASE(OP_BLKPUSH) { /* A Bx R(A) := block (16=6:1:5:4) */ int a = GETARG_A(i); int bx = GETARG_Bx(i); int m1 = (bx>>10)&0x3f; int r = (bx>>9)&0x1; int m2 = (bx>>4)&0x1f; int lv = (bx>>0)&0xf; mrb_value *stack; if (lv == 0) stack = regs + 1; else { struct REnv *e = uvenv(mrb, lv-1); if (!e || (!MRB_ENV_STACK_SHARED_P(e) && e->mid == 0) || MRB_ENV_STACK_LEN(e) <= m1+r+m2+1) { localjump_error(mrb, LOCALJUMP_ERROR_YIELD); goto L_RAISE; } stack = e->stack + 1; } if (mrb_nil_p(stack[m1+r+m2])) { localjump_error(mrb, LOCALJUMP_ERROR_YIELD); goto L_RAISE; } regs[a] = stack[m1+r+m2]; NEXT; } #define TYPES2(a,b) ((((uint16_t)(a))<<8)|(((uint16_t)(b))&0xff)) #define OP_MATH_BODY(op,v1,v2) do {\ v1(regs[a]) = v1(regs[a]) op v2(regs[a+1]);\ } while(0) CASE(OP_ADD) { /* A B C R(A) := R(A)+R(A+1) (Syms[B]=:+,C=1)*/ int a = GETARG_A(i); /* need to check if op is overridden */ switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) { case TYPES2(MRB_TT_FIXNUM,MRB_TT_FIXNUM): { mrb_int x, y, z; mrb_value *regs_a = regs + a; x = mrb_fixnum(regs_a[0]); y = mrb_fixnum(regs_a[1]); if (mrb_int_add_overflow(x, y, &z)) { #ifndef MRB_WITHOUT_FLOAT SET_FLOAT_VALUE(mrb, regs_a[0], (mrb_float)x + (mrb_float)y); break; #endif } SET_INT_VALUE(regs[a], z); } break; #ifndef MRB_WITHOUT_FLOAT case TYPES2(MRB_TT_FIXNUM,MRB_TT_FLOAT): { mrb_int x = mrb_fixnum(regs[a]); mrb_float y = mrb_float(regs[a+1]); SET_FLOAT_VALUE(mrb, regs[a], (mrb_float)x + y); } break; case TYPES2(MRB_TT_FLOAT,MRB_TT_FIXNUM): #ifdef MRB_WORD_BOXING { mrb_float x = mrb_float(regs[a]); mrb_int y = mrb_fixnum(regs[a+1]); SET_FLOAT_VALUE(mrb, regs[a], x + y); } #else OP_MATH_BODY(+,mrb_float,mrb_fixnum); #endif break; case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT): #ifdef MRB_WORD_BOXING { mrb_float x = mrb_float(regs[a]); mrb_float y = mrb_float(regs[a+1]); SET_FLOAT_VALUE(mrb, regs[a], x + y); } #else OP_MATH_BODY(+,mrb_float,mrb_float); #endif break; #endif case TYPES2(MRB_TT_STRING,MRB_TT_STRING): regs[a] = mrb_str_plus(mrb, regs[a], regs[a+1]); break; default: goto L_SEND; } mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_SUB) { /* A B C R(A) := R(A)-R(A+1) (Syms[B]=:-,C=1)*/ int a = GETARG_A(i); /* need to check if op is overridden */ switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) { case TYPES2(MRB_TT_FIXNUM,MRB_TT_FIXNUM): { mrb_int x, y, z; x = mrb_fixnum(regs[a]); y = mrb_fixnum(regs[a+1]); if (mrb_int_sub_overflow(x, y, &z)) { #ifndef MRB_WITHOUT_FLOAT SET_FLOAT_VALUE(mrb, regs[a], (mrb_float)x - (mrb_float)y); break; #endif } SET_INT_VALUE(regs[a], z); } break; #ifndef MRB_WITHOUT_FLOAT case TYPES2(MRB_TT_FIXNUM,MRB_TT_FLOAT): { mrb_int x = mrb_fixnum(regs[a]); mrb_float y = mrb_float(regs[a+1]); SET_FLOAT_VALUE(mrb, regs[a], (mrb_float)x - y); } break; case TYPES2(MRB_TT_FLOAT,MRB_TT_FIXNUM): #ifdef MRB_WORD_BOXING { mrb_float x = mrb_float(regs[a]); mrb_int y = mrb_fixnum(regs[a+1]); SET_FLOAT_VALUE(mrb, regs[a], x - y); } #else OP_MATH_BODY(-,mrb_float,mrb_fixnum); #endif break; case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT): #ifdef MRB_WORD_BOXING { mrb_float x = mrb_float(regs[a]); mrb_float y = mrb_float(regs[a+1]); SET_FLOAT_VALUE(mrb, regs[a], x - y); } #else OP_MATH_BODY(-,mrb_float,mrb_float); #endif break; #endif default: goto L_SEND; } NEXT; } CASE(OP_MUL) { /* A B C R(A) := R(A)*R(A+1) (Syms[B]=:*,C=1)*/ int a = GETARG_A(i); /* need to check if op is overridden */ switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) { case TYPES2(MRB_TT_FIXNUM,MRB_TT_FIXNUM): { mrb_int x, y, z; x = mrb_fixnum(regs[a]); y = mrb_fixnum(regs[a+1]); if (mrb_int_mul_overflow(x, y, &z)) { #ifndef MRB_WITHOUT_FLOAT SET_FLOAT_VALUE(mrb, regs[a], (mrb_float)x * (mrb_float)y); break; #endif } SET_INT_VALUE(regs[a], z); } break; #ifndef MRB_WITHOUT_FLOAT case TYPES2(MRB_TT_FIXNUM,MRB_TT_FLOAT): { mrb_int x = mrb_fixnum(regs[a]); mrb_float y = mrb_float(regs[a+1]); SET_FLOAT_VALUE(mrb, regs[a], (mrb_float)x * y); } break; case TYPES2(MRB_TT_FLOAT,MRB_TT_FIXNUM): #ifdef MRB_WORD_BOXING { mrb_float x = mrb_float(regs[a]); mrb_int y = mrb_fixnum(regs[a+1]); SET_FLOAT_VALUE(mrb, regs[a], x * y); } #else OP_MATH_BODY(*,mrb_float,mrb_fixnum); #endif break; case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT): #ifdef MRB_WORD_BOXING { mrb_float x = mrb_float(regs[a]); mrb_float y = mrb_float(regs[a+1]); SET_FLOAT_VALUE(mrb, regs[a], x * y); } #else OP_MATH_BODY(*,mrb_float,mrb_float); #endif break; #endif default: goto L_SEND; } NEXT; } CASE(OP_DIV) { /* A B C R(A) := R(A)/R(A+1) (Syms[B]=:/,C=1)*/ int a = GETARG_A(i); #ifndef MRB_WITHOUT_FLOAT double x, y, f; #endif /* need to check if op is overridden */ switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) { case TYPES2(MRB_TT_FIXNUM,MRB_TT_FIXNUM): #ifdef MRB_WITHOUT_FLOAT { mrb_int x = mrb_fixnum(regs[a]); mrb_int y = mrb_fixnum(regs[a+1]); SET_INT_VALUE(regs[a], y ? x / y : 0); } break; #else x = (mrb_float)mrb_fixnum(regs[a]); y = (mrb_float)mrb_fixnum(regs[a+1]); break; case TYPES2(MRB_TT_FIXNUM,MRB_TT_FLOAT): x = (mrb_float)mrb_fixnum(regs[a]); y = mrb_float(regs[a+1]); break; case TYPES2(MRB_TT_FLOAT,MRB_TT_FIXNUM): x = mrb_float(regs[a]); y = (mrb_float)mrb_fixnum(regs[a+1]); break; case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT): x = mrb_float(regs[a]); y = mrb_float(regs[a+1]); break; #endif default: goto L_SEND; } #ifndef MRB_WITHOUT_FLOAT if (y == 0) { if (x > 0) f = INFINITY; else if (x < 0) f = -INFINITY; else /* if (x == 0) */ f = NAN; } else { f = x / y; } SET_FLOAT_VALUE(mrb, regs[a], f); #endif NEXT; } CASE(OP_ADDI) { /* A B C R(A) := R(A)+C (Syms[B]=:+)*/ int a = GETARG_A(i); /* need to check if + is overridden */ switch (mrb_type(regs[a])) { case MRB_TT_FIXNUM: { mrb_int x = mrb_fixnum(regs[a]); mrb_int y = GETARG_C(i); mrb_int z; if (mrb_int_add_overflow(x, y, &z)) { #ifndef MRB_WITHOUT_FLOAT SET_FLOAT_VALUE(mrb, regs[a], (mrb_float)x + (mrb_float)y); break; #endif } SET_INT_VALUE(regs[a], z); } break; #ifndef MRB_WITHOUT_FLOAT case MRB_TT_FLOAT: #ifdef MRB_WORD_BOXING { mrb_float x = mrb_float(regs[a]); SET_FLOAT_VALUE(mrb, regs[a], x + GETARG_C(i)); } #else mrb_float(regs[a]) += GETARG_C(i); #endif break; #endif default: SET_INT_VALUE(regs[a+1], GETARG_C(i)); i = MKOP_ABC(OP_SEND, a, GETARG_B(i), 1); goto L_SEND; } NEXT; } CASE(OP_SUBI) { /* A B C R(A) := R(A)-C (Syms[B]=:-)*/ int a = GETARG_A(i); mrb_value *regs_a = regs + a; /* need to check if + is overridden */ switch (mrb_type(regs_a[0])) { case MRB_TT_FIXNUM: { mrb_int x = mrb_fixnum(regs_a[0]); mrb_int y = GETARG_C(i); mrb_int z; if (mrb_int_sub_overflow(x, y, &z)) { #ifndef MRB_WITHOUT_FLOAT SET_FLOAT_VALUE(mrb, regs_a[0], (mrb_float)x - (mrb_float)y); break; #endif } SET_INT_VALUE(regs_a[0], z); } break; #ifndef MRB_WITHOUT_FLOAT case MRB_TT_FLOAT: #ifdef MRB_WORD_BOXING { mrb_float x = mrb_float(regs[a]); SET_FLOAT_VALUE(mrb, regs[a], x - GETARG_C(i)); } #else mrb_float(regs_a[0]) -= GETARG_C(i); #endif break; #endif default: SET_INT_VALUE(regs_a[1], GETARG_C(i)); i = MKOP_ABC(OP_SEND, a, GETARG_B(i), 1); goto L_SEND; } NEXT; } #define OP_CMP_BODY(op,v1,v2) (v1(regs[a]) op v2(regs[a+1])) #ifdef MRB_WITHOUT_FLOAT #define OP_CMP(op) do {\ int result;\ /* need to check if - is overridden */\ switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\ case TYPES2(MRB_TT_FIXNUM,MRB_TT_FIXNUM):\ result = OP_CMP_BODY(op,mrb_fixnum,mrb_fixnum);\ break;\ default:\ goto L_SEND;\ }\ if (result) {\ SET_TRUE_VALUE(regs[a]);\ }\ else {\ SET_FALSE_VALUE(regs[a]);\ }\ } while(0) #else #define OP_CMP(op) do {\ int result;\ /* need to check if - is overridden */\ switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\ case TYPES2(MRB_TT_FIXNUM,MRB_TT_FIXNUM):\ result = OP_CMP_BODY(op,mrb_fixnum,mrb_fixnum);\ break;\ case TYPES2(MRB_TT_FIXNUM,MRB_TT_FLOAT):\ result = OP_CMP_BODY(op,mrb_fixnum,mrb_float);\ break;\ case TYPES2(MRB_TT_FLOAT,MRB_TT_FIXNUM):\ result = OP_CMP_BODY(op,mrb_float,mrb_fixnum);\ break;\ case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT):\ result = OP_CMP_BODY(op,mrb_float,mrb_float);\ break;\ default:\ goto L_SEND;\ }\ if (result) {\ SET_TRUE_VALUE(regs[a]);\ }\ else {\ SET_FALSE_VALUE(regs[a]);\ }\ } while(0) #endif CASE(OP_EQ) { /* A B C R(A) := R(A)==R(A+1) (Syms[B]=:==,C=1)*/ int a = GETARG_A(i); if (mrb_obj_eq(mrb, regs[a], regs[a+1])) { SET_TRUE_VALUE(regs[a]); } else { OP_CMP(==); } NEXT; } CASE(OP_LT) { /* A B C R(A) := R(A)<R(A+1) (Syms[B]=:<,C=1)*/ int a = GETARG_A(i); OP_CMP(<); NEXT; } CASE(OP_LE) { /* A B C R(A) := R(A)<=R(A+1) (Syms[B]=:<=,C=1)*/ int a = GETARG_A(i); OP_CMP(<=); NEXT; } CASE(OP_GT) { /* A B C R(A) := R(A)>R(A+1) (Syms[B]=:>,C=1)*/ int a = GETARG_A(i); OP_CMP(>); NEXT; } CASE(OP_GE) { /* A B C R(A) := R(A)>=R(A+1) (Syms[B]=:>=,C=1)*/ int a = GETARG_A(i); OP_CMP(>=); NEXT; } CASE(OP_ARRAY) { /* A B C R(A) := ary_new(R(B),R(B+1)..R(B+C)) */ int a = GETARG_A(i); int b = GETARG_B(i); int c = GETARG_C(i); mrb_value v = mrb_ary_new_from_values(mrb, c, &regs[b]); regs[a] = v; mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_ARYCAT) { /* A B mrb_ary_concat(R(A),R(B)) */ int a = GETARG_A(i); int b = GETARG_B(i); mrb_value splat = mrb_ary_splat(mrb, regs[b]); mrb_ary_concat(mrb, regs[a], splat); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_ARYPUSH) { /* A B R(A).push(R(B)) */ int a = GETARG_A(i); int b = GETARG_B(i); mrb_ary_push(mrb, regs[a], regs[b]); NEXT; } CASE(OP_AREF) { /* A B C R(A) := R(B)[C] */ int a = GETARG_A(i); int b = GETARG_B(i); int c = GETARG_C(i); mrb_value v = regs[b]; if (!mrb_array_p(v)) { if (c == 0) { regs[a] = v; } else { SET_NIL_VALUE(regs[a]); } } else { v = mrb_ary_ref(mrb, v, c); regs[a] = v; } NEXT; } CASE(OP_ASET) { /* A B C R(B)[C] := R(A) */ int a = GETARG_A(i); int b = GETARG_B(i); int c = GETARG_C(i); mrb_ary_set(mrb, regs[b], c, regs[a]); NEXT; } CASE(OP_APOST) { /* A B C *R(A),R(A+1)..R(A+C) := R(A) */ int a = GETARG_A(i); mrb_value v = regs[a]; int pre = GETARG_B(i); int post = GETARG_C(i); struct RArray *ary; int len, idx; if (!mrb_array_p(v)) { v = mrb_ary_new_from_values(mrb, 1, &regs[a]); } ary = mrb_ary_ptr(v); len = (int)ARY_LEN(ary); if (len > pre + post) { v = mrb_ary_new_from_values(mrb, len - pre - post, ARY_PTR(ary)+pre); regs[a++] = v; while (post--) { regs[a++] = ARY_PTR(ary)[len-post-1]; } } else { v = mrb_ary_new_capa(mrb, 0); regs[a++] = v; for (idx=0; idx+pre<len; idx++) { regs[a+idx] = ARY_PTR(ary)[pre+idx]; } while (idx < post) { SET_NIL_VALUE(regs[a+idx]); idx++; } } mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_STRING) { /* A Bx R(A) := str_new(Lit(Bx)) */ mrb_int a = GETARG_A(i); mrb_int bx = GETARG_Bx(i); mrb_value str = mrb_str_dup(mrb, pool[bx]); regs[a] = str; mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_STRCAT) { /* A B R(A).concat(R(B)) */ mrb_int a = GETARG_A(i); mrb_int b = GETARG_B(i); mrb_str_concat(mrb, regs[a], regs[b]); NEXT; } CASE(OP_HASH) { /* A B C R(A) := hash_new(R(B),R(B+1)..R(B+C)) */ int b = GETARG_B(i); int c = GETARG_C(i); int lim = b+c*2; mrb_value hash = mrb_hash_new_capa(mrb, c); while (b < lim) { mrb_hash_set(mrb, hash, regs[b], regs[b+1]); b+=2; } regs[GETARG_A(i)] = hash; mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_LAMBDA) { /* A b c R(A) := lambda(SEQ[b],c) (b:c = 14:2) */ struct RProc *p; int a = GETARG_A(i); int b = GETARG_b(i); int c = GETARG_c(i); mrb_irep *nirep = irep->reps[b]; if (c & OP_L_CAPTURE) { p = mrb_closure_new(mrb, nirep); } else { p = mrb_proc_new(mrb, nirep); p->flags |= MRB_PROC_SCOPE; } if (c & OP_L_STRICT) p->flags |= MRB_PROC_STRICT; regs[a] = mrb_obj_value(p); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_OCLASS) { /* A R(A) := ::Object */ regs[GETARG_A(i)] = mrb_obj_value(mrb->object_class); NEXT; } CASE(OP_CLASS) { /* A B R(A) := newclass(R(A),Syms(B),R(A+1)) */ struct RClass *c = 0, *baseclass; int a = GETARG_A(i); mrb_value base, super; mrb_sym id = syms[GETARG_B(i)]; base = regs[a]; super = regs[a+1]; if (mrb_nil_p(base)) { baseclass = MRB_PROC_TARGET_CLASS(mrb->c->ci->proc); base = mrb_obj_value(baseclass); } c = mrb_vm_define_class(mrb, base, super, id); regs[a] = mrb_obj_value(c); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_MODULE) { /* A B R(A) := newmodule(R(A),Syms(B)) */ struct RClass *c = 0, *baseclass; int a = GETARG_A(i); mrb_value base; mrb_sym id = syms[GETARG_B(i)]; base = regs[a]; if (mrb_nil_p(base)) { baseclass = MRB_PROC_TARGET_CLASS(mrb->c->ci->proc); base = mrb_obj_value(baseclass); } c = mrb_vm_define_module(mrb, base, id); regs[a] = mrb_obj_value(c); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_EXEC) { /* A Bx R(A) := blockexec(R(A),SEQ[Bx]) */ int a = GETARG_A(i); int bx = GETARG_Bx(i); mrb_callinfo *ci; mrb_value recv = regs[a]; struct RProc *p; mrb_irep *nirep = irep->reps[bx]; /* prepare closure */ p = mrb_proc_new(mrb, nirep); p->c = NULL; mrb_field_write_barrier(mrb, (struct RBasic*)p, (struct RBasic*)proc); MRB_PROC_SET_TARGET_CLASS(p, mrb_class_ptr(recv)); p->flags |= MRB_PROC_SCOPE; /* prepare call stack */ ci = cipush(mrb); ci->pc = pc + 1; ci->acc = a; ci->mid = 0; ci->stackent = mrb->c->stack; ci->argc = 0; ci->target_class = mrb_class_ptr(recv); /* prepare stack */ mrb->c->stack += a; /* setup block to call */ ci->proc = p; irep = p->body.irep; pool = irep->pool; syms = irep->syms; ci->nregs = irep->nregs; stack_extend(mrb, ci->nregs); stack_clear(regs+1, ci->nregs-1); pc = irep->iseq; JUMP; } CASE(OP_METHOD) { /* A B R(A).newmethod(Syms(B),R(A+1)) */ int a = GETARG_A(i); struct RClass *c = mrb_class_ptr(regs[a]); struct RProc *p = mrb_proc_ptr(regs[a+1]); mrb_method_t m; MRB_METHOD_FROM_PROC(m, p); mrb_define_method_raw(mrb, c, syms[GETARG_B(i)], m); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_SCLASS) { /* A B R(A) := R(B).singleton_class */ int a = GETARG_A(i); int b = GETARG_B(i); regs[a] = mrb_singleton_class(mrb, regs[b]); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_TCLASS) { /* A R(A) := target_class */ if (!mrb->c->ci->target_class) { mrb_value exc = mrb_exc_new_str_lit(mrb, E_TYPE_ERROR, "no target class or module"); mrb_exc_set(mrb, exc); goto L_RAISE; } regs[GETARG_A(i)] = mrb_obj_value(mrb->c->ci->target_class); NEXT; } CASE(OP_RANGE) { /* A B C R(A) := range_new(R(B),R(B+1),C) */ int b = GETARG_B(i); mrb_value val = mrb_range_new(mrb, regs[b], regs[b+1], GETARG_C(i)); regs[GETARG_A(i)] = val; mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_DEBUG) { /* A B C debug print R(A),R(B),R(C) */ #ifdef MRB_ENABLE_DEBUG_HOOK mrb->debug_op_hook(mrb, irep, pc, regs); #else #ifndef MRB_DISABLE_STDIO printf("OP_DEBUG %d %d %d\n", GETARG_A(i), GETARG_B(i), GETARG_C(i)); #else abort(); #endif #endif NEXT; } CASE(OP_STOP) { /* stop VM */ L_STOP: while (mrb->c->eidx > 0) { ecall(mrb); } ERR_PC_CLR(mrb); mrb->jmp = prev_jmp; if (mrb->exc) { return mrb_obj_value(mrb->exc); } return regs[irep->nlocals]; } CASE(OP_ERR) { /* Bx raise RuntimeError with message Lit(Bx) */ mrb_value msg = mrb_str_dup(mrb, pool[GETARG_Bx(i)]); mrb_value exc; if (GETARG_A(i) == 0) { exc = mrb_exc_new_str(mrb, E_RUNTIME_ERROR, msg); } else { exc = mrb_exc_new_str(mrb, E_LOCALJUMP_ERROR, msg); } ERR_PC_SET(mrb, pc); mrb_exc_set(mrb, exc); goto L_RAISE; } } END_DISPATCH; #undef regs } MRB_CATCH(&c_jmp) { exc_catched = TRUE; goto RETRY_TRY_BLOCK; } MRB_END_EXC(&c_jmp); }
140,181,947,558,548,590,000,000,000,000,000,000,000
vm.c
252,079,256,979,517,780,000,000,000,000,000,000,000
[ "CWE-190" ]
CVE-2018-10191
In versions of mruby up to and including 1.4.0, an integer overflow exists in src/vm.c::mrb_vm_exec() when handling OP_GETUPVAR in the presence of deep scope nesting, resulting in a use-after-free. An attacker that can cause Ruby code to be run can use this to possibly execute arbitrary code.
https://nvd.nist.gov/vuln/detail/CVE-2018-10191
3,724
linux
4ea77014af0d6205b05503d1c7aac6eace11d473
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/4ea77014af0d6205b05503d1c7aac6eace11d473
kernel/signal.c: avoid undefined behaviour in kill_something_info When running kill(72057458746458112, 0) in userspace I hit the following issue. UBSAN: Undefined behaviour in kernel/signal.c:1462:11 negation of -2147483648 cannot be represented in type 'int': CPU: 226 PID: 9849 Comm: test Tainted: G B ---- ------- 3.10.0-327.53.58.70.x86_64_ubsan+ #116 Hardware name: Huawei Technologies Co., Ltd. RH8100 V3/BC61PBIA, BIOS BLHSV028 11/11/2014 Call Trace: dump_stack+0x19/0x1b ubsan_epilogue+0xd/0x50 __ubsan_handle_negate_overflow+0x109/0x14e SYSC_kill+0x43e/0x4d0 SyS_kill+0xe/0x10 system_call_fastpath+0x16/0x1b Add code to avoid the UBSAN detection. [akpm@linux-foundation.org: tweak comment] Link: http://lkml.kernel.org/r/1496670008-59084-1-git-send-email-zhongjiang@huawei.com Signed-off-by: zhongjiang <zhongjiang@huawei.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Xishi Qiu <qiuxishi@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1
static int kill_something_info(int sig, struct siginfo *info, pid_t pid) { int ret; if (pid > 0) { rcu_read_lock(); ret = kill_pid_info(sig, info, find_vpid(pid)); rcu_read_unlock(); return ret; } read_lock(&tasklist_lock); if (pid != -1) { ret = __kill_pgrp_info(sig, info, pid ? find_vpid(-pid) : task_pgrp(current)); } else { int retval = 0, count = 0; struct task_struct * p; for_each_process(p) { if (task_pid_vnr(p) > 1 && !same_thread_group(p, current)) { int err = group_send_sig_info(sig, info, p); ++count; if (err != -EPERM) retval = err; } } ret = count ? retval : -ESRCH; } read_unlock(&tasklist_lock); return ret; }
233,236,629,790,152,600,000,000,000,000,000,000,000
signal.c
43,790,563,659,993,175,000,000,000,000,000,000,000
[ "CWE-119" ]
CVE-2018-10124
The kill_something_info function in kernel/signal.c in the Linux kernel before 4.13, when an unspecified architecture and compiler is used, might allow local users to cause a denial of service via an INT_MIN argument.
https://nvd.nist.gov/vuln/detail/CVE-2018-10124
3,725
linux
9903e41ae1f5d50c93f268ca3304d4d7c64b9311
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/9903e41ae1f5d50c93f268ca3304d4d7c64b9311
clk: hisilicon: hi3660:Fix potential NULL dereference in hi3660_stub_clk_probe() platform_get_resource() may return NULL, add proper check to avoid potential NULL dereferencing. This is detected by Coccinelle semantic patch. @@ expression pdev, res, n, t, e, e1, e2; @@ res = platform_get_resource(pdev, t, n); + if (!res) + return -EINVAL; ... when != res == NULL e = devm_ioremap(e1, res->start, e2); Fixes: 4f16f7ff3bc0 ("clk: hisilicon: Add support for Hi3660 stub clocks") Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com> Signed-off-by: Stephen Boyd <sboyd@kernel.org>
1
static int hi3660_stub_clk_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct resource *res; unsigned int i; int ret; /* Use mailbox client without blocking */ stub_clk_chan.cl.dev = dev; stub_clk_chan.cl.tx_done = NULL; stub_clk_chan.cl.tx_block = false; stub_clk_chan.cl.knows_txdone = false; /* Allocate mailbox channel */ stub_clk_chan.mbox = mbox_request_channel(&stub_clk_chan.cl, 0); if (IS_ERR(stub_clk_chan.mbox)) return PTR_ERR(stub_clk_chan.mbox); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); freq_reg = devm_ioremap(dev, res->start, resource_size(res)); if (!freq_reg) return -ENOMEM; freq_reg += HI3660_STUB_CLOCK_DATA; for (i = 0; i < HI3660_CLK_STUB_NUM; i++) { ret = devm_clk_hw_register(&pdev->dev, &hi3660_stub_clks[i].hw); if (ret) return ret; } return devm_of_clk_add_hw_provider(&pdev->dev, hi3660_stub_clk_hw_get, hi3660_stub_clks); }
198,852,483,878,198,200,000,000,000,000,000,000,000
clk-hi3660-stub.c
154,652,412,877,365,080,000,000,000,000,000,000,000
[ "CWE-476" ]
CVE-2018-10074
The hi3660_stub_clk_probe function in drivers/clk/hisilicon/clk-hi3660-stub.c in the Linux kernel before 4.16 allows local users to cause a denial of service (NULL pointer dereference) by triggering a failure of resource retrieval.
https://nvd.nist.gov/vuln/detail/CVE-2018-10074
3,729
mbedtls
740b218386083dc708ce98ccc94a63a95cd5629e
https://github.com/ARMmbed/mbedtls
https://github.com/ARMmbed/mbedtls/commit/740b218386083dc708ce98ccc94a63a95cd5629e
Add bounds check before length read
1
static int ssl_parse_server_psk_hint( mbedtls_ssl_context *ssl, unsigned char **p, unsigned char *end ) { int ret = MBEDTLS_ERR_SSL_FEATURE_UNAVAILABLE; size_t len; ((void) ssl); /* * PSK parameters: * * opaque psk_identity_hint<0..2^16-1>; */ len = (*p)[0] << 8 | (*p)[1]; *p += 2; if( (*p) + len > end ) { MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad server key exchange message " "(psk_identity_hint length)" ) ); return( MBEDTLS_ERR_SSL_BAD_HS_SERVER_KEY_EXCHANGE ); } /* * Note: we currently ignore the PKS identity hint, as we only allow one * PSK to be provisionned on the client. This could be changed later if * someone needs that feature. */ *p += len; ret = 0; return( ret ); }
249,660,299,586,144,350,000,000,000,000,000,000,000
ssl_cli.c
312,635,158,393,935,170,000,000,000,000,000,000,000
[ "CWE-125" ]
CVE-2018-9989
ARM mbed TLS before 2.1.11, before 2.7.2, and before 2.8.0 has a buffer over-read in ssl_parse_server_psk_hint() that could cause a crash on invalid input.
https://nvd.nist.gov/vuln/detail/CVE-2018-9989
3,730
mbedtls
a1098f81c252b317ad34ea978aea2bc47760b215
https://github.com/ARMmbed/mbedtls
https://github.com/ARMmbed/mbedtls/commit/a1098f81c252b317ad34ea978aea2bc47760b215
Add bounds check before signature length read
1
static int ssl_parse_server_key_exchange( mbedtls_ssl_context *ssl ) { int ret; const mbedtls_ssl_ciphersuite_t *ciphersuite_info = ssl->transform_negotiate->ciphersuite_info; unsigned char *p = NULL, *end = NULL; MBEDTLS_SSL_DEBUG_MSG( 2, ( "=> parse server key exchange" ) ); #if defined(MBEDTLS_KEY_EXCHANGE_RSA_ENABLED) if( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_RSA ) { MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= skip parse server key exchange" ) ); ssl->state++; return( 0 ); } ((void) p); ((void) end); #endif #if defined(MBEDTLS_KEY_EXCHANGE_ECDH_RSA_ENABLED) || \ defined(MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED) if( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_ECDH_RSA || ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA ) { if( ( ret = ssl_get_ecdh_params_from_cert( ssl ) ) != 0 ) { MBEDTLS_SSL_DEBUG_RET( 1, "ssl_get_ecdh_params_from_cert", ret ); mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL, MBEDTLS_SSL_ALERT_MSG_HANDSHAKE_FAILURE ); return( ret ); } MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= skip parse server key exchange" ) ); ssl->state++; return( 0 ); } ((void) p); ((void) end); #endif /* MBEDTLS_KEY_EXCHANGE_ECDH_RSA_ENABLED || MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED */ if( ( ret = mbedtls_ssl_read_record( ssl ) ) != 0 ) { MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_read_record", ret ); return( ret ); } if( ssl->in_msgtype != MBEDTLS_SSL_MSG_HANDSHAKE ) { MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad server key exchange message" ) ); mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL, MBEDTLS_SSL_ALERT_MSG_UNEXPECTED_MESSAGE ); return( MBEDTLS_ERR_SSL_UNEXPECTED_MESSAGE ); } /* * ServerKeyExchange may be skipped with PSK and RSA-PSK when the server * doesn't use a psk_identity_hint */ if( ssl->in_msg[0] != MBEDTLS_SSL_HS_SERVER_KEY_EXCHANGE ) { if( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_PSK || ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_RSA_PSK ) { /* Current message is probably either * CertificateRequest or ServerHelloDone */ ssl->keep_current_message = 1; goto exit; } MBEDTLS_SSL_DEBUG_MSG( 1, ( "server key exchange message must " "not be skipped" ) ); mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL, MBEDTLS_SSL_ALERT_MSG_UNEXPECTED_MESSAGE ); return( MBEDTLS_ERR_SSL_UNEXPECTED_MESSAGE ); } p = ssl->in_msg + mbedtls_ssl_hs_hdr_len( ssl ); end = ssl->in_msg + ssl->in_hslen; MBEDTLS_SSL_DEBUG_BUF( 3, "server key exchange", p, end - p ); #if defined(MBEDTLS_KEY_EXCHANGE__SOME__PSK_ENABLED) if( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_PSK || ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_RSA_PSK || ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_DHE_PSK || ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_ECDHE_PSK ) { if( ssl_parse_server_psk_hint( ssl, &p, end ) != 0 ) { MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad server key exchange message" ) ); mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL, MBEDTLS_SSL_ALERT_MSG_ILLEGAL_PARAMETER ); return( MBEDTLS_ERR_SSL_BAD_HS_SERVER_KEY_EXCHANGE ); } } /* FALLTROUGH */ #endif /* MBEDTLS_KEY_EXCHANGE__SOME__PSK_ENABLED */ #if defined(MBEDTLS_KEY_EXCHANGE_PSK_ENABLED) || \ defined(MBEDTLS_KEY_EXCHANGE_RSA_PSK_ENABLED) if( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_PSK || ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_RSA_PSK ) ; /* nothing more to do */ else #endif /* MBEDTLS_KEY_EXCHANGE_PSK_ENABLED || MBEDTLS_KEY_EXCHANGE_RSA_PSK_ENABLED */ #if defined(MBEDTLS_KEY_EXCHANGE_DHE_RSA_ENABLED) || \ defined(MBEDTLS_KEY_EXCHANGE_DHE_PSK_ENABLED) if( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_DHE_RSA || ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_DHE_PSK ) { if( ssl_parse_server_dh_params( ssl, &p, end ) != 0 ) { MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad server key exchange message" ) ); mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL, MBEDTLS_SSL_ALERT_MSG_ILLEGAL_PARAMETER ); return( MBEDTLS_ERR_SSL_BAD_HS_SERVER_KEY_EXCHANGE ); } } else #endif /* MBEDTLS_KEY_EXCHANGE_DHE_RSA_ENABLED || MBEDTLS_KEY_EXCHANGE_DHE_PSK_ENABLED */ #if defined(MBEDTLS_KEY_EXCHANGE_ECDHE_RSA_ENABLED) || \ defined(MBEDTLS_KEY_EXCHANGE_ECDHE_PSK_ENABLED) || \ defined(MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED) if( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_ECDHE_RSA || ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_ECDHE_PSK || ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA ) { if( ssl_parse_server_ecdh_params( ssl, &p, end ) != 0 ) { MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad server key exchange message" ) ); mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL, MBEDTLS_SSL_ALERT_MSG_ILLEGAL_PARAMETER ); return( MBEDTLS_ERR_SSL_BAD_HS_SERVER_KEY_EXCHANGE ); } } else #endif /* MBEDTLS_KEY_EXCHANGE_ECDHE_RSA_ENABLED || MBEDTLS_KEY_EXCHANGE_ECDHE_PSK_ENABLED || MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED */ #if defined(MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED) if( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_ECJPAKE ) { ret = mbedtls_ecjpake_read_round_two( &ssl->handshake->ecjpake_ctx, p, end - p ); if( ret != 0 ) { MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ecjpake_read_round_two", ret ); mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL, MBEDTLS_SSL_ALERT_MSG_ILLEGAL_PARAMETER ); return( MBEDTLS_ERR_SSL_BAD_HS_SERVER_KEY_EXCHANGE ); } } else #endif /* MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED */ { MBEDTLS_SSL_DEBUG_MSG( 1, ( "should never happen" ) ); return( MBEDTLS_ERR_SSL_INTERNAL_ERROR ); } #if defined(MBEDTLS_KEY_EXCHANGE__WITH_SERVER_SIGNATURE__ENABLED) if( mbedtls_ssl_ciphersuite_uses_server_signature( ciphersuite_info ) ) { size_t sig_len, hashlen; unsigned char hash[64]; mbedtls_md_type_t md_alg = MBEDTLS_MD_NONE; mbedtls_pk_type_t pk_alg = MBEDTLS_PK_NONE; unsigned char *params = ssl->in_msg + mbedtls_ssl_hs_hdr_len( ssl ); size_t params_len = p - params; /* * Handle the digitally-signed structure */ #if defined(MBEDTLS_SSL_PROTO_TLS1_2) if( ssl->minor_ver == MBEDTLS_SSL_MINOR_VERSION_3 ) { if( ssl_parse_signature_algorithm( ssl, &p, end, &md_alg, &pk_alg ) != 0 ) { MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad server key exchange message" ) ); mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL, MBEDTLS_SSL_ALERT_MSG_ILLEGAL_PARAMETER ); return( MBEDTLS_ERR_SSL_BAD_HS_SERVER_KEY_EXCHANGE ); } if( pk_alg != mbedtls_ssl_get_ciphersuite_sig_pk_alg( ciphersuite_info ) ) { MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad server key exchange message" ) ); mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL, MBEDTLS_SSL_ALERT_MSG_ILLEGAL_PARAMETER ); return( MBEDTLS_ERR_SSL_BAD_HS_SERVER_KEY_EXCHANGE ); } } else #endif /* MBEDTLS_SSL_PROTO_TLS1_2 */ #if defined(MBEDTLS_SSL_PROTO_SSL3) || defined(MBEDTLS_SSL_PROTO_TLS1) || \ defined(MBEDTLS_SSL_PROTO_TLS1_1) if( ssl->minor_ver < MBEDTLS_SSL_MINOR_VERSION_3 ) { pk_alg = mbedtls_ssl_get_ciphersuite_sig_pk_alg( ciphersuite_info ); /* Default hash for ECDSA is SHA-1 */ if( pk_alg == MBEDTLS_PK_ECDSA && md_alg == MBEDTLS_MD_NONE ) md_alg = MBEDTLS_MD_SHA1; } else #endif { MBEDTLS_SSL_DEBUG_MSG( 1, ( "should never happen" ) ); return( MBEDTLS_ERR_SSL_INTERNAL_ERROR ); } /* * Read signature */ sig_len = ( p[0] << 8 ) | p[1]; p += 2; if( end != p + sig_len ) { MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad server key exchange message" ) ); mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL, MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR ); return( MBEDTLS_ERR_SSL_BAD_HS_SERVER_KEY_EXCHANGE ); } MBEDTLS_SSL_DEBUG_BUF( 3, "signature", p, sig_len ); /* * Compute the hash that has been signed */ #if defined(MBEDTLS_SSL_PROTO_SSL3) || defined(MBEDTLS_SSL_PROTO_TLS1) || \ defined(MBEDTLS_SSL_PROTO_TLS1_1) if( md_alg == MBEDTLS_MD_NONE ) { hashlen = 36; ret = mbedtls_ssl_get_key_exchange_md_ssl_tls( ssl, hash, params, params_len ); if( ret != 0 ) return( ret ); } else #endif /* MBEDTLS_SSL_PROTO_SSL3 || MBEDTLS_SSL_PROTO_TLS1 || \ MBEDTLS_SSL_PROTO_TLS1_1 */ #if defined(MBEDTLS_SSL_PROTO_TLS1) || defined(MBEDTLS_SSL_PROTO_TLS1_1) || \ defined(MBEDTLS_SSL_PROTO_TLS1_2) if( md_alg != MBEDTLS_MD_NONE ) { /* Info from md_alg will be used instead */ hashlen = 0; ret = mbedtls_ssl_get_key_exchange_md_tls1_2( ssl, hash, params, params_len, md_alg ); if( ret != 0 ) return( ret ); } else #endif /* MBEDTLS_SSL_PROTO_TLS1 || MBEDTLS_SSL_PROTO_TLS1_1 || \ MBEDTLS_SSL_PROTO_TLS1_2 */ { MBEDTLS_SSL_DEBUG_MSG( 1, ( "should never happen" ) ); return( MBEDTLS_ERR_SSL_INTERNAL_ERROR ); } MBEDTLS_SSL_DEBUG_BUF( 3, "parameters hash", hash, hashlen != 0 ? hashlen : (unsigned int) ( mbedtls_md_get_size( mbedtls_md_info_from_type( md_alg ) ) ) ); if( ssl->session_negotiate->peer_cert == NULL ) { MBEDTLS_SSL_DEBUG_MSG( 2, ( "certificate required" ) ); mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL, MBEDTLS_SSL_ALERT_MSG_HANDSHAKE_FAILURE ); return( MBEDTLS_ERR_SSL_UNEXPECTED_MESSAGE ); } /* * Verify signature */ if( ! mbedtls_pk_can_do( &ssl->session_negotiate->peer_cert->pk, pk_alg ) ) { MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad server key exchange message" ) ); mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL, MBEDTLS_SSL_ALERT_MSG_HANDSHAKE_FAILURE ); return( MBEDTLS_ERR_SSL_PK_TYPE_MISMATCH ); } if( ( ret = mbedtls_pk_verify( &ssl->session_negotiate->peer_cert->pk, md_alg, hash, hashlen, p, sig_len ) ) != 0 ) { mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL, MBEDTLS_SSL_ALERT_MSG_DECRYPT_ERROR ); MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_pk_verify", ret ); return( ret ); } } #endif /* MBEDTLS_KEY_EXCHANGE__WITH_SERVER_SIGNATURE__ENABLED */ exit: ssl->state++; MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= parse server key exchange" ) ); return( 0 ); }
126,675,777,471,818,340,000,000,000,000,000,000,000
ssl_cli.c
245,686,137,588,342,770,000,000,000,000,000,000,000
[ "CWE-119" ]
CVE-2018-9988
ARM mbed TLS before 2.1.11, before 2.7.2, and before 2.8.0 has a buffer over-read in ssl_parse_server_key_exchange() that could cause a crash on invalid input.
https://nvd.nist.gov/vuln/detail/CVE-2018-9988
3,731
openvpn
1394192b210cb3c6624a7419bcf3ff966742e79b
https://github.com/OpenVPN/openvpn
https://github.com/OpenVPN/openvpn/commit/1394192b210cb3c6624a7419bcf3ff966742e79b
Fix potential double-free() in Interactive Service (CVE-2018-9336) Malformed input data on the service pipe towards the OpenVPN interactive service (normally used by the OpenVPN GUI to request openvpn instances from the service) can result in a double free() in the error handling code. This usually only leads to a process crash (DoS by an unprivileged local account) but since it could possibly lead to memory corruption if happening while multiple other threads are active at the same time, CVE-2018-9336 has been assigned to acknowledge this risk. Fix by ensuring that sud->directory is set to NULL in GetStartUpData() for all error cases (thus not being free()ed in FreeStartupData()). Rewrite control flow to use explicit error label for error exit. Discovered and reported by Jacob Baines <jbaines@tenable.com>. CVE: 2018-9336 Signed-off-by: Gert Doering <gert@greenie.muc.de> Acked-by: Selva Nair <selva.nair@gmail.com> Message-Id: <20180414072617.25075-1-gert@greenie.muc.de> URL: https://www.mail-archive.com/search?l=mid&q=20180414072617.25075-1-gert@greenie.muc.de Signed-off-by: Gert Doering <gert@greenie.muc.de>
1
GetStartupData(HANDLE pipe, STARTUP_DATA *sud) { size_t size, len; BOOL ret = FALSE; WCHAR *data = NULL; DWORD bytes, read; bytes = PeekNamedPipeAsync(pipe, 1, &exit_event); if (bytes == 0) { MsgToEventLog(M_SYSERR, TEXT("PeekNamedPipeAsync failed")); ReturnLastError(pipe, L"PeekNamedPipeAsync"); goto out; } size = bytes / sizeof(*data); if (size == 0) { MsgToEventLog(M_SYSERR, TEXT("malformed startup data: 1 byte received")); ReturnError(pipe, ERROR_STARTUP_DATA, L"GetStartupData", 1, &exit_event); goto out; } data = malloc(bytes); if (data == NULL) { MsgToEventLog(M_SYSERR, TEXT("malloc failed")); ReturnLastError(pipe, L"malloc"); goto out; } read = ReadPipeAsync(pipe, data, bytes, 1, &exit_event); if (bytes != read) { MsgToEventLog(M_SYSERR, TEXT("ReadPipeAsync failed")); ReturnLastError(pipe, L"ReadPipeAsync"); goto out; } if (data[size - 1] != 0) { MsgToEventLog(M_ERR, TEXT("Startup data is not NULL terminated")); ReturnError(pipe, ERROR_STARTUP_DATA, L"GetStartupData", 1, &exit_event); goto out; } sud->directory = data; len = wcslen(sud->directory) + 1; size -= len; if (size <= 0) { MsgToEventLog(M_ERR, TEXT("Startup data ends at working directory")); ReturnError(pipe, ERROR_STARTUP_DATA, L"GetStartupData", 1, &exit_event); goto out; } sud->options = sud->directory + len; len = wcslen(sud->options) + 1; size -= len; if (size <= 0) { MsgToEventLog(M_ERR, TEXT("Startup data ends at command line options")); ReturnError(pipe, ERROR_STARTUP_DATA, L"GetStartupData", 1, &exit_event); goto out; } sud->std_input = sud->options + len; data = NULL; /* don't free data */ ret = TRUE; out: free(data); return ret; }
292,015,260,812,614,400,000,000,000,000,000,000,000
interactive.c
297,968,443,524,703,830,000,000,000,000,000,000,000
[ "CWE-415" ]
CVE-2018-9336
openvpnserv.exe (aka the interactive service helper) in OpenVPN 2.4.x before 2.4.6 allows a local attacker to cause a double-free of memory by sending a malformed request to the interactive service. This could cause a denial-of-service through memory corruption or possibly have unspecified other impact including privilege escalation.
https://nvd.nist.gov/vuln/detail/CVE-2018-9336
3,732
yubico-pam
0f6ceabab0a8849b47f67d727aa526c2656089ba
https://github.com/Yubico/yubico-pam
https://github.com/Yubico/yubico-pam/commit/0f6ceabab0a8849b47f67d727aa526c2656089ba
util: make sure to close the authfile before returning success fixes #136
1
check_user_token (const char *authfile, const char *username, const char *otp_id, int verbose, FILE *debug_file) { char buf[1024]; char *s_user, *s_token; int retval = AUTH_ERROR; int fd; struct stat st; FILE *opwfile; fd = open(authfile, O_RDONLY, 0); if (fd < 0) { if(verbose) D (debug_file, "Cannot open file: %s (%s)", authfile, strerror(errno)); return retval; } if (fstat(fd, &st) < 0) { if(verbose) D (debug_file, "Cannot stat file: %s (%s)", authfile, strerror(errno)); close(fd); return retval; } if (!S_ISREG(st.st_mode)) { if(verbose) D (debug_file, "%s is not a regular file", authfile); close(fd); return retval; } opwfile = fdopen(fd, "r"); if (opwfile == NULL) { if(verbose) D (debug_file, "fdopen: %s", strerror(errno)); close(fd); return retval; } retval = AUTH_NO_TOKENS; while (fgets (buf, 1024, opwfile)) { char *saveptr = NULL; if (buf[strlen (buf) - 1] == '\n') buf[strlen (buf) - 1] = '\0'; if (buf[0] == '#') { /* This is a comment and we may skip it. */ if(verbose) D (debug_file, "Skipping comment line: %s", buf); continue; } if(verbose) D (debug_file, "Authorization line: %s", buf); s_user = strtok_r (buf, ":", &saveptr); if (s_user && strcmp (username, s_user) == 0) { if(verbose) D (debug_file, "Matched user: %s", s_user); retval = AUTH_NOT_FOUND; /* We found at least one line for the user */ do { s_token = strtok_r (NULL, ":", &saveptr); if(verbose) D (debug_file, "Authorization token: %s", s_token); if (s_token && otp_id && strcmp (otp_id, s_token) == 0) { if(verbose) D (debug_file, "Match user/token as %s/%s", username, otp_id); return AUTH_FOUND; } } while (s_token != NULL); } } fclose (opwfile); return retval; }
73,976,873,416,705,270,000,000,000,000,000,000,000
util.c
194,446,551,470,215,400,000,000,000,000,000,000,000
[ "CWE-200" ]
CVE-2018-9275
In check_user_token in util.c in the Yubico PAM module (aka pam_yubico) 2.18 through 2.25, successful logins can leak file descriptors to the auth mapping file, which can lead to information disclosure (serial number of a device) and/or DoS (reaching the maximum number of file descriptors).
https://nvd.nist.gov/vuln/detail/CVE-2018-9275
3,733
openbsd
0654414afcce51a16d35d05060190a3ec4618d42
https://github.com/libressl-portable/openbsd
https://github.com/libressl-portable/openbsd/commit/0654414afcce51a16d35d05060190a3ec4618d42
None
1
int_x509_param_set_hosts(X509_VERIFY_PARAM_ID *id, int mode, const char *name, size_t namelen) { char *copy; /* * Refuse names with embedded NUL bytes. * XXX: Do we need to push an error onto the error stack? */ if (name && memchr(name, '\0', namelen)) return 0; if (mode == SET_HOST && id->hosts) { string_stack_free(id->hosts); id->hosts = NULL; } if (name == NULL || namelen == 0) return 1; copy = strndup(name, namelen); if (copy == NULL) return 0; if (id->hosts == NULL && (id->hosts = sk_OPENSSL_STRING_new_null()) == NULL) { free(copy); return 0; } if (!sk_OPENSSL_STRING_push(id->hosts, copy)) { free(copy); if (sk_OPENSSL_STRING_num(id->hosts) == 0) { sk_OPENSSL_STRING_free(id->hosts); id->hosts = NULL; } return 0; } return 1; }
281,204,162,380,877,240,000,000,000,000,000,000,000
None
null
[ "CWE-295" ]
CVE-2018-8970
The int_x509_param_set_hosts function in lib/libcrypto/x509/x509_vpm.c in LibreSSL 2.7.0 before 2.7.1 does not support a certain special case of a zero name length, which causes silent omission of hostname verification, and consequently allows man-in-the-middle attackers to spoof servers and obtain sensitive information via a crafted certificate. NOTE: the LibreSSL documentation indicates that this special case is supported, but the BoringSSL documentation does not.
https://nvd.nist.gov/vuln/detail/CVE-2018-8970
3,734
linux
d8ba61ba58c88d5207c1ba2f7d9a2280e7d03be9
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/d8ba61ba58c88d5207c1ba2f7d9a2280e7d03be9
x86/entry/64: Don't use IST entry for #BP stack There's nothing IST-worthy about #BP/int3. We don't allow kprobes in the small handful of places in the kernel that run at CPL0 with an invalid stack, and 32-bit kernels have used normal interrupt gates for #BP forever. Furthermore, we don't allow kprobes in places that have usergs while in kernel mode, so "paranoid" is also unnecessary. Signed-off-by: Andy Lutomirski <luto@kernel.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: stable@vger.kernel.org
1
dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) { #ifdef CONFIG_DYNAMIC_FTRACE /* * ftrace must be first, everything else may cause a recursive crash. * See note by declaration of modifying_ftrace_code in ftrace.c */ if (unlikely(atomic_read(&modifying_ftrace_code)) && ftrace_int3_handler(regs)) return; #endif if (poke_int3_handler(regs)) return; ist_enter(regs); RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, SIGTRAP) == NOTIFY_STOP) goto exit; #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ #ifdef CONFIG_KPROBES if (kprobe_int3_handler(regs)) goto exit; #endif if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, SIGTRAP) == NOTIFY_STOP) goto exit; /* * Let others (NMI) know that the debug stack is in use * as we may switch to the interrupt stack. */ debug_stack_usage_inc(); cond_local_irq_enable(regs); do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); cond_local_irq_disable(regs); debug_stack_usage_dec(); exit: ist_exit(regs); }
106,804,786,307,522,770,000,000,000,000,000,000,000
traps.c
166,654,386,209,643,410,000,000,000,000,000,000,000
[ "CWE-362" ]
CVE-2018-8897
A statement in the System Programming Guide of the Intel 64 and IA-32 Architectures Software Developer's Manual (SDM) was mishandled in the development of some or all operating-system kernels, resulting in unexpected behavior for #DB exceptions that are deferred by MOV SS or POP SS, as demonstrated by (for example) privilege escalation in Windows, macOS, some Xen configurations, or FreeBSD, or a Linux kernel crash. The MOV to SS and POP SS instructions inhibit interrupts (including NMIs), data breakpoints, and single step trap exceptions until the instruction boundary following the next instruction (SDM Vol. 3A; section 6.8.3). (The inhibited data breakpoints are those on memory accessed by the MOV to SS or POP to SS instruction itself.) Note that debug exceptions are not inhibited by the interrupt enable (EFLAGS.IF) system flag (SDM Vol. 3A; section 2.3). If the instruction following the MOV to SS or POP to SS instruction is an instruction like SYSCALL, SYSENTER, INT 3, etc. that transfers control to the operating system at CPL < 3, the debug exception is delivered after the transfer to CPL < 3 is complete. OS kernels may not expect this order of events and may therefore experience unexpected behavior when it occurs.
https://nvd.nist.gov/vuln/detail/CVE-2018-8897
3,735
kamailio
e1d8008a09d9390ebaf698abe8909e10dfec4097
https://github.com/kamailio/kamailio
https://github.com/kamailio/kamailio/commit/e1d8008a09d9390ebaf698abe8909e10dfec4097
tmx: allocate space to store ending 0 for branch value - reported by Alfred Farrugia and Sandro Gauci
1
int tmx_check_pretran(sip_msg_t *msg) { unsigned int chid; unsigned int slotid; int dsize; struct via_param *vbr; str scallid; str scseqmet; str scseqnum; str sftag; str svbranch = {NULL, 0}; pretran_t *it; if(_tmx_ptran_table==NULL) { LM_ERR("pretran hash table not initialized yet\n"); return -1; } if(get_route_type()!=REQUEST_ROUTE) { LM_ERR("invalid usage - not in request route\n"); return -1; } if(msg->first_line.type!=SIP_REQUEST) { LM_ERR("invalid usage - not a sip request\n"); return -1; } if(parse_headers(msg, HDR_FROM_F|HDR_VIA1_F|HDR_CALLID_F|HDR_CSEQ_F, 0)<0) { LM_ERR("failed to parse required headers\n"); return -1; } if(msg->cseq==NULL || msg->cseq->parsed==NULL) { LM_ERR("failed to parse cseq headers\n"); return -1; } if(get_cseq(msg)->method_id==METHOD_ACK || get_cseq(msg)->method_id==METHOD_CANCEL) { LM_DBG("no pre-transaction management for ACK or CANCEL\n"); return -1; } if (msg->via1==0) { LM_ERR("failed to get Via header\n"); return -1; } if (parse_from_header(msg)<0 || get_from(msg)->tag_value.len==0) { LM_ERR("failed to get From header\n"); return -1; } if (msg->callid==NULL || msg->callid->body.s==NULL) { LM_ERR("failed to parse callid headers\n"); return -1; } vbr = msg->via1->branch; scallid = msg->callid->body; trim(&scallid); scseqmet = get_cseq(msg)->method; trim(&scseqmet); scseqnum = get_cseq(msg)->number; trim(&scseqnum); sftag = get_from(msg)->tag_value; trim(&sftag); chid = get_hash1_raw(msg->callid->body.s, msg->callid->body.len); slotid = chid & (_tmx_ptran_size-1); if(unlikely(_tmx_proc_ptran == NULL)) { _tmx_proc_ptran = (pretran_t*)shm_malloc(sizeof(pretran_t)); if(_tmx_proc_ptran == NULL) { LM_ERR("not enough memory for pretran structure\n"); return -1; } memset(_tmx_proc_ptran, 0, sizeof(pretran_t)); _tmx_proc_ptran->pid = my_pid(); } dsize = scallid.len + scseqnum.len + scseqmet.len + sftag.len + 4; if(likely(vbr!=NULL)) { svbranch = vbr->value; trim(&svbranch); dsize += svbranch.len; } if(dsize<256) dsize = 256; tmx_pretran_unlink(); if(dsize > _tmx_proc_ptran->dbuf.len) { if(_tmx_proc_ptran->dbuf.s) shm_free(_tmx_proc_ptran->dbuf.s); _tmx_proc_ptran->dbuf.s = (char*)shm_malloc(dsize); if(_tmx_proc_ptran->dbuf.s==NULL) { LM_ERR("not enough memory for pretran data\n"); return -1; } _tmx_proc_ptran->dbuf.len = dsize; } _tmx_proc_ptran->hid = chid; _tmx_proc_ptran->cseqmetid = (get_cseq(msg))->method_id; _tmx_proc_ptran->callid.s = _tmx_proc_ptran->dbuf.s; memcpy(_tmx_proc_ptran->callid.s, scallid.s, scallid.len); _tmx_proc_ptran->callid.len = scallid.len; _tmx_proc_ptran->callid.s[_tmx_proc_ptran->callid.len] = '\0'; _tmx_proc_ptran->ftag.s = _tmx_proc_ptran->callid.s + _tmx_proc_ptran->callid.len + 1; memcpy(_tmx_proc_ptran->ftag.s, sftag.s, sftag.len); _tmx_proc_ptran->ftag.len = sftag.len; _tmx_proc_ptran->ftag.s[_tmx_proc_ptran->ftag.len] = '\0'; _tmx_proc_ptran->cseqnum.s = _tmx_proc_ptran->ftag.s + _tmx_proc_ptran->ftag.len + 1; memcpy(_tmx_proc_ptran->cseqnum.s, scseqnum.s, scseqnum.len); _tmx_proc_ptran->cseqnum.len = scseqnum.len; _tmx_proc_ptran->cseqnum.s[_tmx_proc_ptran->cseqnum.len] = '\0'; _tmx_proc_ptran->cseqmet.s = _tmx_proc_ptran->cseqnum.s + _tmx_proc_ptran->cseqnum.len + 1; memcpy(_tmx_proc_ptran->cseqmet.s, scseqmet.s, scseqmet.len); _tmx_proc_ptran->cseqmet.len = scseqmet.len; _tmx_proc_ptran->cseqmet.s[_tmx_proc_ptran->cseqmet.len] = '\0'; if(likely(vbr!=NULL)) { _tmx_proc_ptran->vbranch.s = _tmx_proc_ptran->cseqmet.s + _tmx_proc_ptran->cseqmet.len + 1; memcpy(_tmx_proc_ptran->vbranch.s, svbranch.s, svbranch.len); _tmx_proc_ptran->vbranch.len = svbranch.len; _tmx_proc_ptran->vbranch.s[_tmx_proc_ptran->vbranch.len] = '\0'; } else { _tmx_proc_ptran->vbranch.s = NULL; _tmx_proc_ptran->vbranch.len = 0; } lock_get(&_tmx_ptran_table[slotid].lock); it = _tmx_ptran_table[slotid].plist; tmx_pretran_link_safe(slotid); for(; it!=NULL; it=it->next) { if(_tmx_proc_ptran->hid != it->hid || _tmx_proc_ptran->cseqmetid != it->cseqmetid || _tmx_proc_ptran->callid.len != it->callid.len || _tmx_proc_ptran->ftag.len != it->ftag.len || _tmx_proc_ptran->cseqmet.len != it->cseqmet.len || _tmx_proc_ptran->cseqnum.len != it->cseqnum.len) continue; if(_tmx_proc_ptran->vbranch.s != NULL && it->vbranch.s != NULL) { if(_tmx_proc_ptran->vbranch.len != it->vbranch.len) continue; /* shortcut - check last char in Via branch * - kamailio/ser adds there branch index => in case of paralel * forking by previous hop, catch it here quickly */ if(_tmx_proc_ptran->vbranch.s[it->vbranch.len-1] != it->vbranch.s[it->vbranch.len-1]) continue; if(memcmp(_tmx_proc_ptran->vbranch.s, it->vbranch.s, it->vbranch.len)!=0) continue; /* shall stop by matching magic cookie? * if (vbr && vbr->value.s && vbr->value.len > MCOOKIE_LEN * && memcmp(vbr->value.s, MCOOKIE, MCOOKIE_LEN)==0) { * LM_DBG("rfc3261 cookie found in Via branch\n"); * } */ } if(memcmp(_tmx_proc_ptran->callid.s, it->callid.s, it->callid.len)!=0 || memcmp(_tmx_proc_ptran->ftag.s, it->ftag.s, it->ftag.len)!=0 || memcmp(_tmx_proc_ptran->cseqnum.s, it->cseqnum.s, it->cseqnum.len)!=0) continue; if((it->cseqmetid==METHOD_OTHER || it->cseqmetid==METHOD_UNDEF) && memcmp(_tmx_proc_ptran->cseqmet.s, it->cseqmet.s, it->cseqmet.len)!=0) continue; LM_DBG("matched another pre-transaction by pid %d for [%.*s]\n", it->pid, it->callid.len, it->callid.s); lock_release(&_tmx_ptran_table[slotid].lock); return 1; } lock_release(&_tmx_ptran_table[slotid].lock); return 0; }
133,412,845,150,430,060,000,000,000,000,000,000,000
tmx_pretran.c
50,705,933,634,594,320,000,000,000,000,000,000,000
[ "CWE-119" ]
CVE-2018-8828
A Buffer Overflow issue was discovered in Kamailio before 4.4.7, 5.0.x before 5.0.6, and 5.1.x before 5.1.2. A specially crafted REGISTER message with a malformed branch or From tag triggers an off-by-one heap-based buffer overflow in the tmx_check_pretran function in modules/tmx/tmx_pretran.c.
https://nvd.nist.gov/vuln/detail/CVE-2018-8828
3,746
FreeRDP
d1112c279bd1a327e8e4d0b5f371458bf2579659
https://github.com/FreeRDP/FreeRDP
https://github.com/FreeRDP/FreeRDP/commit/d1112c279bd1a327e8e4d0b5f371458bf2579659
Fixed CVE-2018-8788 Thanks to Eyal Itkin from Check Point Software Technologies.
1
static void nsc_rle_decode(BYTE* in, BYTE* out, UINT32 originalSize) { UINT32 len; UINT32 left; BYTE value; left = originalSize; while (left > 4) { value = *in++; if (left == 5) { *out++ = value; left--; } else if (value == *in) { in++; if (*in < 0xFF) { len = (UINT32) * in++; len += 2; } else { in++; len = *((UINT32*) in); in += 4; } FillMemory(out, len, value); out += len; left -= len; } else { *out++ = value; left--; } } *((UINT32*)out) = *((UINT32*)in); }
6,048,075,215,570,503,000,000,000,000,000,000,000
nsc.c
220,797,472,776,409,930,000,000,000,000,000,000,000
[ "CWE-787" ]
CVE-2018-8788
FreeRDP prior to version 2.0.0-rc4 contains an Out-Of-Bounds Write of up to 4 bytes in function nsc_rle_decode() that results in a memory corruption and possibly even a remote code execution.
https://nvd.nist.gov/vuln/detail/CVE-2018-8788
3,752
FreeRDP
09b9d4f1994a674c4ec85b4947aa656eda1aed8a
https://github.com/FreeRDP/FreeRDP
https://github.com/FreeRDP/FreeRDP/commit/09b9d4f1994a674c4ec85b4947aa656eda1aed8a
Fixed CVE-2018-8787 Thanks to Eyal Itkin from Check Point Software Technologies.
1
static BOOL gdi_Bitmap_Decompress(rdpContext* context, rdpBitmap* bitmap, const BYTE* pSrcData, UINT32 DstWidth, UINT32 DstHeight, UINT32 bpp, UINT32 length, BOOL compressed, UINT32 codecId) { UINT32 SrcSize = length; rdpGdi* gdi = context->gdi; bitmap->compressed = FALSE; bitmap->format = gdi->dstFormat; bitmap->length = DstWidth * DstHeight * GetBytesPerPixel(bitmap->format); bitmap->data = (BYTE*) _aligned_malloc(bitmap->length, 16); if (!bitmap->data) return FALSE; if (compressed) { if (bpp < 32) { if (!interleaved_decompress(context->codecs->interleaved, pSrcData, SrcSize, DstWidth, DstHeight, bpp, bitmap->data, bitmap->format, 0, 0, 0, DstWidth, DstHeight, &gdi->palette)) return FALSE; } else { if (!planar_decompress(context->codecs->planar, pSrcData, SrcSize, DstWidth, DstHeight, bitmap->data, bitmap->format, 0, 0, 0, DstWidth, DstHeight, TRUE)) return FALSE; } } else { const UINT32 SrcFormat = gdi_get_pixel_format(bpp); const size_t sbpp = GetBytesPerPixel(SrcFormat); const size_t dbpp = GetBytesPerPixel(bitmap->format); if ((sbpp == 0) || (dbpp == 0)) return FALSE; else { const size_t dstSize = SrcSize * dbpp / sbpp; if (dstSize < bitmap->length) return FALSE; } if (!freerdp_image_copy(bitmap->data, bitmap->format, 0, 0, 0, DstWidth, DstHeight, pSrcData, SrcFormat, 0, 0, 0, &gdi->palette, FREERDP_FLIP_VERTICAL)) return FALSE; } return TRUE; }
8,582,638,630,962,748,000,000,000,000,000,000,000
None
null
[ "CWE-190" ]
CVE-2018-8787
FreeRDP prior to version 2.0.0-rc4 contains an Integer Overflow that leads to a Heap-Based Buffer Overflow in function gdi_Bitmap_Decompress() and results in a memory corruption and probably even a remote code execution.
https://nvd.nist.gov/vuln/detail/CVE-2018-8787
3,753
FreeRDP
445a5a42c500ceb80f8fa7f2c11f3682538033f3
https://github.com/FreeRDP/FreeRDP
https://github.com/FreeRDP/FreeRDP/commit/445a5a42c500ceb80f8fa7f2c11f3682538033f3
Fixed CVE-2018-8786 Thanks to Eyal Itkin from Check Point Software Technologies.
1
BITMAP_UPDATE* update_read_bitmap_update(rdpUpdate* update, wStream* s) { UINT32 i; BITMAP_UPDATE* bitmapUpdate = calloc(1, sizeof(BITMAP_UPDATE)); if (!bitmapUpdate) goto fail; if (Stream_GetRemainingLength(s) < 2) goto fail; Stream_Read_UINT16(s, bitmapUpdate->number); /* numberRectangles (2 bytes) */ WLog_Print(update->log, WLOG_TRACE, "BitmapUpdate: %"PRIu32"", bitmapUpdate->number); if (bitmapUpdate->number > bitmapUpdate->count) { UINT16 count; BITMAP_DATA* newdata; count = bitmapUpdate->number * 2; newdata = (BITMAP_DATA*) realloc(bitmapUpdate->rectangles, sizeof(BITMAP_DATA) * count); if (!newdata) goto fail; bitmapUpdate->rectangles = newdata; ZeroMemory(&bitmapUpdate->rectangles[bitmapUpdate->count], sizeof(BITMAP_DATA) * (count - bitmapUpdate->count)); bitmapUpdate->count = count; } /* rectangles */ for (i = 0; i < bitmapUpdate->number; i++) { if (!update_read_bitmap_data(update, s, &bitmapUpdate->rectangles[i])) goto fail; } return bitmapUpdate; fail: free_bitmap_update(update->context, bitmapUpdate); return NULL; }
282,936,062,818,017,570,000,000,000,000,000,000,000
update.c
243,677,407,853,847,500,000,000,000,000,000,000,000
[ "CWE-119" ]
CVE-2018-8786
FreeRDP prior to version 2.0.0-rc4 contains an Integer Truncation that leads to a Heap-Based Buffer Overflow in function update_read_bitmap_update() and results in a memory corruption and probably even a remote code execution.
https://nvd.nist.gov/vuln/detail/CVE-2018-8786
3,754
FreeRDP
602f4a2e14b41703b5f431de3154cd46a5750a2d
https://github.com/FreeRDP/FreeRDP
https://github.com/FreeRDP/FreeRDP/commit/602f4a2e14b41703b5f431de3154cd46a5750a2d
Fixed CVE-2018-8785 Thanks to Eyal Itkin from Check Point Software Technologies.
1
int zgfx_decompress(ZGFX_CONTEXT* zgfx, const BYTE* pSrcData, UINT32 SrcSize, BYTE** ppDstData, UINT32* pDstSize, UINT32 flags) { int status = -1; BYTE descriptor; wStream* stream = Stream_New((BYTE*)pSrcData, SrcSize); if (!stream) return -1; if (Stream_GetRemainingLength(stream) < 1) goto fail; Stream_Read_UINT8(stream, descriptor); /* descriptor (1 byte) */ if (descriptor == ZGFX_SEGMENTED_SINGLE) { if (!zgfx_decompress_segment(zgfx, stream, Stream_GetRemainingLength(stream))) goto fail; *ppDstData = NULL; if (zgfx->OutputCount > 0) *ppDstData = (BYTE*) malloc(zgfx->OutputCount); if (!*ppDstData) goto fail; *pDstSize = zgfx->OutputCount; CopyMemory(*ppDstData, zgfx->OutputBuffer, zgfx->OutputCount); } else if (descriptor == ZGFX_SEGMENTED_MULTIPART) { UINT32 segmentSize; UINT16 segmentNumber; UINT16 segmentCount; UINT32 uncompressedSize; BYTE* pConcatenated; if (Stream_GetRemainingLength(stream) < 6) goto fail; Stream_Read_UINT16(stream, segmentCount); /* segmentCount (2 bytes) */ Stream_Read_UINT32(stream, uncompressedSize); /* uncompressedSize (4 bytes) */ if (Stream_GetRemainingLength(stream) < segmentCount * sizeof(UINT32)) goto fail; pConcatenated = (BYTE*) malloc(uncompressedSize); if (!pConcatenated) goto fail; *ppDstData = pConcatenated; *pDstSize = uncompressedSize; for (segmentNumber = 0; segmentNumber < segmentCount; segmentNumber++) { if (Stream_GetRemainingLength(stream) < sizeof(UINT32)) goto fail; Stream_Read_UINT32(stream, segmentSize); /* segmentSize (4 bytes) */ if (!zgfx_decompress_segment(zgfx, stream, segmentSize)) goto fail; CopyMemory(pConcatenated, zgfx->OutputBuffer, zgfx->OutputCount); pConcatenated += zgfx->OutputCount; } } else { goto fail; } status = 1; fail: Stream_Free(stream, FALSE); return status; }
318,980,867,436,274,160,000,000,000,000,000,000,000
zgfx.c
283,567,047,878,855,330,000,000,000,000,000,000,000
[ "CWE-119" ]
CVE-2018-8785
FreeRDP prior to version 2.0.0-rc4 contains a Heap-Based Buffer Overflow in function zgfx_decompress() that results in a memory corruption and probably even a remote code execution.
https://nvd.nist.gov/vuln/detail/CVE-2018-8785
3,757
FreeRDP
17c363a5162fd4dc77b1df54e48d7bd9bf6b3be7
https://github.com/FreeRDP/FreeRDP
https://github.com/FreeRDP/FreeRDP/commit/17c363a5162fd4dc77b1df54e48d7bd9bf6b3be7
Fixed CVE-2018-8784 Thanks to Eyal Itkin from Check Point Software Technologies.
1
static BOOL zgfx_decompress_segment(ZGFX_CONTEXT* zgfx, wStream* stream, size_t segmentSize) { BYTE c; BYTE flags; UINT32 extra = 0; int opIndex; int haveBits; int inPrefix; UINT32 count; UINT32 distance; BYTE* pbSegment; size_t cbSegment = segmentSize - 1; if ((Stream_GetRemainingLength(stream) < segmentSize) || (segmentSize < 1)) return FALSE; Stream_Read_UINT8(stream, flags); /* header (1 byte) */ zgfx->OutputCount = 0; pbSegment = Stream_Pointer(stream); Stream_Seek(stream, cbSegment); if (!(flags & PACKET_COMPRESSED)) { zgfx_history_buffer_ring_write(zgfx, pbSegment, cbSegment); CopyMemory(zgfx->OutputBuffer, pbSegment, cbSegment); zgfx->OutputCount = cbSegment; return TRUE; } zgfx->pbInputCurrent = pbSegment; zgfx->pbInputEnd = &pbSegment[cbSegment - 1]; /* NumberOfBitsToDecode = ((NumberOfBytesToDecode - 1) * 8) - ValueOfLastByte */ zgfx->cBitsRemaining = 8 * (cbSegment - 1) - *zgfx->pbInputEnd; zgfx->cBitsCurrent = 0; zgfx->BitsCurrent = 0; while (zgfx->cBitsRemaining) { haveBits = 0; inPrefix = 0; for (opIndex = 0; ZGFX_TOKEN_TABLE[opIndex].prefixLength != 0; opIndex++) { while (haveBits < ZGFX_TOKEN_TABLE[opIndex].prefixLength) { zgfx_GetBits(zgfx, 1); inPrefix = (inPrefix << 1) + zgfx->bits; haveBits++; } if (inPrefix == ZGFX_TOKEN_TABLE[opIndex].prefixCode) { if (ZGFX_TOKEN_TABLE[opIndex].tokenType == 0) { /* Literal */ zgfx_GetBits(zgfx, ZGFX_TOKEN_TABLE[opIndex].valueBits); c = (BYTE)(ZGFX_TOKEN_TABLE[opIndex].valueBase + zgfx->bits); zgfx->HistoryBuffer[zgfx->HistoryIndex] = c; if (++zgfx->HistoryIndex == zgfx->HistoryBufferSize) zgfx->HistoryIndex = 0; zgfx->OutputBuffer[zgfx->OutputCount++] = c; } else { zgfx_GetBits(zgfx, ZGFX_TOKEN_TABLE[opIndex].valueBits); distance = ZGFX_TOKEN_TABLE[opIndex].valueBase + zgfx->bits; if (distance != 0) { /* Match */ zgfx_GetBits(zgfx, 1); if (zgfx->bits == 0) { count = 3; } else { count = 4; extra = 2; zgfx_GetBits(zgfx, 1); while (zgfx->bits == 1) { count *= 2; extra++; zgfx_GetBits(zgfx, 1); } zgfx_GetBits(zgfx, extra); count += zgfx->bits; } zgfx_history_buffer_ring_read(zgfx, distance, &(zgfx->OutputBuffer[zgfx->OutputCount]), count); zgfx_history_buffer_ring_write(zgfx, &(zgfx->OutputBuffer[zgfx->OutputCount]), count); zgfx->OutputCount += count; } else { /* Unencoded */ zgfx_GetBits(zgfx, 15); count = zgfx->bits; zgfx->cBitsRemaining -= zgfx->cBitsCurrent; zgfx->cBitsCurrent = 0; zgfx->BitsCurrent = 0; CopyMemory(&(zgfx->OutputBuffer[zgfx->OutputCount]), zgfx->pbInputCurrent, count); zgfx_history_buffer_ring_write(zgfx, zgfx->pbInputCurrent, count); zgfx->pbInputCurrent += count; zgfx->cBitsRemaining -= (8 * count); zgfx->OutputCount += count; } } break; } } } return TRUE; }
48,551,217,187,937,530,000,000,000,000,000,000,000
zgfx.c
146,200,484,448,513,670,000,000,000,000,000,000,000
[ "CWE-119" ]
CVE-2018-8784
FreeRDP prior to version 2.0.0-rc4 contains a Heap-Based Buffer Overflow in function zgfx_decompress_segment() that results in a memory corruption and probably even a remote code execution.
https://nvd.nist.gov/vuln/detail/CVE-2018-8784
3,759
libgit2
3207ddb0103543da8ad2139ec6539f590f9900c1
https://github.com/libgit2/libgit2
https://github.com/libgit2/libgit2/commit/3207ddb0103543da8ad2139ec6539f590f9900c1
index: fix out-of-bounds read with invalid index entry prefix length The index format in version 4 has prefix-compressed entries, where every index entry can compress its path by using a path prefix of the previous entry. Since implmenting support for this index format version in commit 5625d86b9 (index: support index v4, 2016-05-17), though, we do not correctly verify that the prefix length that we want to reuse is actually smaller or equal to the amount of characters than the length of the previous index entry's path. This can lead to a an integer underflow and subsequently to an out-of-bounds read. Fix this by verifying that the prefix is actually smaller than the previous entry's path length. Reported-by: Krishna Ram Prakash R <krp@gtux.in> Reported-by: Vivek Parikh <viv0411.parikh@gmail.com>
1
static int read_entry( git_index_entry **out, size_t *out_size, git_index *index, const void *buffer, size_t buffer_size, const char *last) { size_t path_length, entry_size; const char *path_ptr; struct entry_short source; git_index_entry entry = {{0}}; bool compressed = index->version >= INDEX_VERSION_NUMBER_COMP; char *tmp_path = NULL; if (INDEX_FOOTER_SIZE + minimal_entry_size > buffer_size) return -1; /* buffer is not guaranteed to be aligned */ memcpy(&source, buffer, sizeof(struct entry_short)); entry.ctime.seconds = (git_time_t)ntohl(source.ctime.seconds); entry.ctime.nanoseconds = ntohl(source.ctime.nanoseconds); entry.mtime.seconds = (git_time_t)ntohl(source.mtime.seconds); entry.mtime.nanoseconds = ntohl(source.mtime.nanoseconds); entry.dev = ntohl(source.dev); entry.ino = ntohl(source.ino); entry.mode = ntohl(source.mode); entry.uid = ntohl(source.uid); entry.gid = ntohl(source.gid); entry.file_size = ntohl(source.file_size); git_oid_cpy(&entry.id, &source.oid); entry.flags = ntohs(source.flags); if (entry.flags & GIT_IDXENTRY_EXTENDED) { uint16_t flags_raw; size_t flags_offset; flags_offset = offsetof(struct entry_long, flags_extended); memcpy(&flags_raw, (const char *) buffer + flags_offset, sizeof(flags_raw)); flags_raw = ntohs(flags_raw); memcpy(&entry.flags_extended, &flags_raw, sizeof(flags_raw)); path_ptr = (const char *) buffer + offsetof(struct entry_long, path); } else path_ptr = (const char *) buffer + offsetof(struct entry_short, path); if (!compressed) { path_length = entry.flags & GIT_IDXENTRY_NAMEMASK; /* if this is a very long string, we must find its * real length without overflowing */ if (path_length == 0xFFF) { const char *path_end; path_end = memchr(path_ptr, '\0', buffer_size); if (path_end == NULL) return -1; path_length = path_end - path_ptr; } entry_size = index_entry_size(path_length, 0, entry.flags); entry.path = (char *)path_ptr; } else { size_t varint_len; size_t strip_len = git_decode_varint((const unsigned char *)path_ptr, &varint_len); size_t last_len = strlen(last); size_t prefix_len = last_len - strip_len; size_t suffix_len = strlen(path_ptr + varint_len); size_t path_len; if (varint_len == 0) return index_error_invalid("incorrect prefix length"); GITERR_CHECK_ALLOC_ADD(&path_len, prefix_len, suffix_len); GITERR_CHECK_ALLOC_ADD(&path_len, path_len, 1); tmp_path = git__malloc(path_len); GITERR_CHECK_ALLOC(tmp_path); memcpy(tmp_path, last, prefix_len); memcpy(tmp_path + prefix_len, path_ptr + varint_len, suffix_len + 1); entry_size = index_entry_size(suffix_len, varint_len, entry.flags); entry.path = tmp_path; } if (entry_size == 0) return -1; if (INDEX_FOOTER_SIZE + entry_size > buffer_size) return -1; if (index_entry_dup(out, index, &entry) < 0) { git__free(tmp_path); return -1; } git__free(tmp_path); *out_size = entry_size; return 0; }
197,322,156,807,941,720,000,000,000,000,000,000,000
index.c
3,627,655,496,182,682,700,000,000,000,000,000,000
[ "CWE-190" ]
CVE-2018-8098
Integer overflow in the index.c:read_entry() function while decompressing a compressed prefix length in libgit2 before v0.26.2 allows an attacker to cause a denial of service (out-of-bounds read) via a crafted repository index file.
https://nvd.nist.gov/vuln/detail/CVE-2018-8098
3,760
linux
0ddcff49b672239dda94d70d0fcf50317a9f4b51
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/0ddcff49b672239dda94d70d0fcf50317a9f4b51
mac80211_hwsim: fix possible memory leak in hwsim_new_radio_nl() 'hwname' is malloced in hwsim_new_radio_nl() and should be freed before leaving from the error handling cases, otherwise it will cause memory leak. Fixes: ff4dd73dd2b4 ("mac80211_hwsim: check HWSIM_ATTR_RADIO_NAME length") Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com> Reviewed-by: Ben Hutchings <ben.hutchings@codethink.co.uk> Signed-off-by: Johannes Berg <johannes.berg@intel.com>
1
static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) { struct hwsim_new_radio_params param = { 0 }; const char *hwname = NULL; int ret; param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG]; param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE]; param.channels = channels; param.destroy_on_close = info->attrs[HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE]; if (info->attrs[HWSIM_ATTR_CHANNELS]) param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]); if (info->attrs[HWSIM_ATTR_NO_VIF]) param.no_vif = true; if (info->attrs[HWSIM_ATTR_RADIO_NAME]) { hwname = kasprintf(GFP_KERNEL, "%.*s", nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]), (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME])); if (!hwname) return -ENOMEM; param.hwname = hwname; } if (info->attrs[HWSIM_ATTR_USE_CHANCTX]) param.use_chanctx = true; else param.use_chanctx = (param.channels > 1); if (info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]) param.reg_alpha2 = nla_data(info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]); if (info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]) { u32 idx = nla_get_u32(info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]); if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom)) return -EINVAL; param.regd = hwsim_world_regdom_custom[idx]; } ret = mac80211_hwsim_new_radio(info, &param); kfree(hwname); return ret; }
260,520,256,202,264,520,000,000,000,000,000,000,000
mac80211_hwsim.c
75,874,823,711,895,600,000,000,000,000,000,000,000
[ "CWE-772" ]
CVE-2018-8087
Memory leak in the hwsim_new_radio_nl function in drivers/net/wireless/mac80211_hwsim.c in the Linux kernel through 4.15.9 allows local users to cause a denial of service (memory consumption) by triggering an out-of-array error case.
https://nvd.nist.gov/vuln/detail/CVE-2018-8087
3,761
linux
297a6961ffb8ff4dc66c9fbf53b924bd1dda05d5
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/297a6961ffb8ff4dc66c9fbf53b924bd1dda05d5
net: phy: mdio-bcm-unimac: fix potential NULL dereference in unimac_mdio_probe() platform_get_resource() may fail and return NULL, so we should better check it's return value to avoid a NULL pointer dereference a bit later in the code. This is detected by Coccinelle semantic patch. @@ expression pdev, res, n, t, e, e1, e2; @@ res = platform_get_resource(pdev, t, n); + if (!res) + return -EINVAL; ... when != res == NULL e = devm_ioremap(e1, res->start, e2); Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1
static int unimac_mdio_probe(struct platform_device *pdev) { struct unimac_mdio_pdata *pdata = pdev->dev.platform_data; struct unimac_mdio_priv *priv; struct device_node *np; struct mii_bus *bus; struct resource *r; int ret; np = pdev->dev.of_node; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); /* Just ioremap, as this MDIO block is usually integrated into an * Ethernet MAC controller register range */ priv->base = devm_ioremap(&pdev->dev, r->start, resource_size(r)); if (!priv->base) { dev_err(&pdev->dev, "failed to remap register\n"); return -ENOMEM; } priv->mii_bus = mdiobus_alloc(); if (!priv->mii_bus) return -ENOMEM; bus = priv->mii_bus; bus->priv = priv; if (pdata) { bus->name = pdata->bus_name; priv->wait_func = pdata->wait_func; priv->wait_func_data = pdata->wait_func_data; bus->phy_mask = ~pdata->phy_mask; } else { bus->name = "unimac MII bus"; priv->wait_func_data = priv; priv->wait_func = unimac_mdio_poll; } bus->parent = &pdev->dev; bus->read = unimac_mdio_read; bus->write = unimac_mdio_write; bus->reset = unimac_mdio_reset; snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id); ret = of_mdiobus_register(bus, np); if (ret) { dev_err(&pdev->dev, "MDIO bus registration failed\n"); goto out_mdio_free; } platform_set_drvdata(pdev, priv); dev_info(&pdev->dev, "Broadcom UniMAC MDIO bus at 0x%p\n", priv->base); return 0; out_mdio_free: mdiobus_free(bus); return ret; }
265,342,746,482,685,360,000,000,000,000,000,000,000
mdio-bcm-unimac.c
301,959,438,202,686,220,000,000,000,000,000,000,000
[ "CWE-476" ]
CVE-2018-8043
The unimac_mdio_probe function in drivers/net/phy/mdio-bcm-unimac.c in the Linux kernel through 4.15.8 does not validate certain resource availability, which allows local users to cause a denial of service (NULL pointer dereference).
https://nvd.nist.gov/vuln/detail/CVE-2018-8043
3,762
libvips
20d840e6da15c1574b3ed998bc92f91d1e36c2a5
https://github.com/libvips/libvips
https://github.com/jcupitt/libvips/commit/20d840e6da15c1574b3ed998bc92f91d1e36c2a5
fix a crash with delayed load If a delayed load failed, it could leave the pipeline only half-set up. Sebsequent threads could then segv. Set a load-has-failed flag and test before generate. See https://github.com/jcupitt/libvips/issues/893
1
vips_foreign_load_start( VipsImage *out, void *a, void *b ) { VipsForeignLoad *load = VIPS_FOREIGN_LOAD( b ); VipsForeignLoadClass *class = VIPS_FOREIGN_LOAD_GET_CLASS( load ); if( !load->real ) { if( !(load->real = vips_foreign_load_temp( load )) ) return( NULL ); #ifdef DEBUG printf( "vips_foreign_load_start: triggering ->load()\n" ); #endif /*DEBUG*/ /* Read the image in. This may involve a long computation and * will finish with load->real holding the decompressed image. * * We want our caller to be able to see this computation on * @out, so eval signals on ->real need to appear on ->out. */ load->real->progress_signal = load->out; /* Note the load object on the image. Loaders can use * this to signal invalidate if they hit a load error. See * vips_foreign_load_invalidate() below. */ g_object_set_qdata( G_OBJECT( load->real ), vips__foreign_load_operation, load ); if( class->load( load ) || vips_image_pio_input( load->real ) ) return( NULL ); /* ->header() read the header into @out, load has read the * image into @real. They must match exactly in size, bands, * format and coding for the copy to work. * * Some versions of ImageMagick give different results between * Ping and Load for some formats, for example. */ if( !vips_foreign_load_iscompat( load->real, out ) ) return( NULL ); /* We have to tell vips that out depends on real. We've set * the demand hint below, but not given an input there. */ vips_image_pipelinev( load->out, load->out->dhint, load->real, NULL ); } return( vips_region_new( load->real ) ); }
279,924,439,095,021,870,000,000,000,000,000,000,000
foreign.c
110,778,714,136,646,050,000,000,000,000,000,000,000
[ "CWE-362" ]
CVE-2018-7998
In libvips before 8.6.3, a NULL function pointer dereference vulnerability was found in the vips_region_generate function in region.c, which allows remote attackers to cause a denial of service or possibly have unspecified other impact via a crafted image file. This occurs because of a race condition involving a failed delayed load and other worker threads.
https://nvd.nist.gov/vuln/detail/CVE-2018-7998
3,763
linux
4a491b1ab11ca0556d2fda1ff1301e862a2d44c4
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/4a491b1ab11ca0556d2fda1ff1301e862a2d44c4
scsi: libsas: fix memory leak in sas_smp_get_phy_events() We've got a memory leak with the following producer: while true; do cat /sys/class/sas_phy/phy-1:0:12/invalid_dword_count >/dev/null; done The buffer req is allocated and not freed after we return. Fix it. Fixes: 2908d778ab3e ("[SCSI] aic94xx: new driver") Signed-off-by: Jason Yan <yanaijie@huawei.com> CC: John Garry <john.garry@huawei.com> CC: chenqilin <chenqilin2@huawei.com> CC: chenxiang <chenxiang66@hisilicon.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
1
int sas_smp_get_phy_events(struct sas_phy *phy) { int res; u8 *req; u8 *resp; struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent); struct domain_device *dev = sas_find_dev_by_rphy(rphy); req = alloc_smp_req(RPEL_REQ_SIZE); if (!req) return -ENOMEM; resp = alloc_smp_resp(RPEL_RESP_SIZE); if (!resp) { kfree(req); return -ENOMEM; } req[1] = SMP_REPORT_PHY_ERR_LOG; req[9] = phy->number; res = smp_execute_task(dev, req, RPEL_REQ_SIZE, resp, RPEL_RESP_SIZE); if (!res) goto out; phy->invalid_dword_count = scsi_to_u32(&resp[12]); phy->running_disparity_error_count = scsi_to_u32(&resp[16]); phy->loss_of_dword_sync_count = scsi_to_u32(&resp[20]); phy->phy_reset_problem_count = scsi_to_u32(&resp[24]); out: kfree(resp); return res; }
239,534,202,847,056,830,000,000,000,000,000,000,000
sas_expander.c
20,389,702,074,033,690,000,000,000,000,000,000,000
[ "CWE-772" ]
CVE-2018-7757
Memory leak in the sas_smp_get_phy_events function in drivers/scsi/libsas/sas_expander.c in the Linux kernel through 4.15.7 allows local users to cause a denial of service (memory consumption) via many read accesses to files in the /sys/class/sas_phy directory, as demonstrated by the /sys/class/sas_phy/phy-1:0:12/invalid_dword_count file.
https://nvd.nist.gov/vuln/detail/CVE-2018-7757
3,764
openjpeg
6d8c0c06ee32dc03ba80acd48334e98728e56cf5
https://github.com/uclouvain/openjpeg
https://github.com/kbabioch/openjpeg/commit/6d8c0c06ee32dc03ba80acd48334e98728e56cf5
None
1
int main(int argc, char *argv[]) { opj_dinfo_t* dinfo; opj_event_mgr_t event_mgr; /* event manager */ int tnum; unsigned int snum; opj_mj2_t *movie; mj2_tk_t *track; mj2_sample_t *sample; unsigned char* frame_codestream; FILE *file, *outfile; char outfilename[50]; mj2_dparameters_t parameters; if (argc != 3) { printf("Usage: %s mj2filename output_location\n", argv[0]); printf("Example: %s foreman.mj2 output/foreman\n", argv[0]); return 1; } file = fopen(argv[1], "rb"); if (!file) { fprintf(stderr, "failed to open %s for reading\n", argv[1]); return 1; } /* configure the event callbacks (not required) setting of each callback is optional */ memset(&event_mgr, 0, sizeof(opj_event_mgr_t)); event_mgr.error_handler = error_callback; event_mgr.warning_handler = warning_callback; event_mgr.info_handler = info_callback; /* get a MJ2 decompressor handle */ dinfo = mj2_create_decompress(); /* catch events using our callbacks and give a local context */ opj_set_event_mgr((opj_common_ptr)dinfo, &event_mgr, stderr); /* setup the decoder decoding parameters using user parameters */ memset(&parameters, 0, sizeof(mj2_dparameters_t)); movie = (opj_mj2_t*) dinfo->mj2_handle; mj2_setup_decoder(movie, &parameters); if (mj2_read_struct(file, movie)) { /* Creating the movie structure*/ return 1; } /* Decode first video track */ tnum = 0; while (movie->tk[tnum].track_type != 0) { tnum ++; } track = &movie->tk[tnum]; fprintf(stdout, "Extracting %d frames from file...\n", track->num_samples); for (snum = 0; snum < track->num_samples; snum++) { sample = &track->sample[snum]; frame_codestream = (unsigned char*) malloc(sample->sample_size - 8); /* Skipping JP2C marker*/ fseek(file, sample->offset + 8, SEEK_SET); fread(frame_codestream, sample->sample_size - 8, 1, file); /* Assuming that jp and ftyp markers size do*/ sprintf(outfilename, "%s_%05d.j2k", argv[2], snum); outfile = fopen(outfilename, "wb"); if (!outfile) { fprintf(stderr, "failed to open %s for writing\n", outfilename); return 1; } fwrite(frame_codestream, sample->sample_size - 8, 1, outfile); fclose(outfile); free(frame_codestream); } fclose(file); fprintf(stdout, "%d frames correctly extracted\n", snum); /* free remaining structures */ if (dinfo) { mj2_destroy_decompress((opj_mj2_t*)dinfo->mj2_handle); } return 0; }
271,909,071,981,036,530,000,000,000,000,000,000,000
opj_mj2_extract.c
253,856,647,793,899,200,000,000,000,000,000,000,000
[ "CWE-119" ]
CVE-2018-7648
An issue was discovered in mj2/opj_mj2_extract.c in OpenJPEG 2.3.0. The output prefix was not checked for length, which could overflow a buffer, when providing a prefix with 50 or more characters on the command line.
https://nvd.nist.gov/vuln/detail/CVE-2018-7648
3,765
linux
f3069c6d33f6ae63a1668737bc78aaaa51bff7ca
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/f3069c6d33f6ae63a1668737bc78aaaa51bff7ca
rds: Fix NULL pointer dereference in __rds_rdma_map This is a fix for syzkaller719569, where memory registration was attempted without any underlying transport being loaded. Analysis of the case reveals that it is the setsockopt() RDS_GET_MR (2) and RDS_GET_MR_FOR_DEST (7) that are vulnerable. Here is an example stack trace when the bug is hit: BUG: unable to handle kernel NULL pointer dereference at 00000000000000c0 IP: __rds_rdma_map+0x36/0x440 [rds] PGD 2f93d03067 P4D 2f93d03067 PUD 2f93d02067 PMD 0 Oops: 0000 [#1] SMP Modules linked in: bridge stp llc tun rpcsec_gss_krb5 nfsv4 dns_resolver nfs fscache rds binfmt_misc sb_edac intel_powerclamp coretemp kvm_intel kvm irqbypass crct10dif_pclmul c rc32_pclmul ghash_clmulni_intel pcbc aesni_intel crypto_simd glue_helper cryptd iTCO_wdt mei_me sg iTCO_vendor_support ipmi_si mei ipmi_devintf nfsd shpchp pcspkr i2c_i801 ioatd ma ipmi_msghandler wmi lpc_ich mfd_core auth_rpcgss nfs_acl lockd grace sunrpc ip_tables ext4 mbcache jbd2 mgag200 i2c_algo_bit drm_kms_helper ixgbe syscopyarea ahci sysfillrect sysimgblt libahci mdio fb_sys_fops ttm ptp libata sd_mod mlx4_core drm crc32c_intel pps_core megaraid_sas i2c_core dca dm_mirror dm_region_hash dm_log dm_mod CPU: 48 PID: 45787 Comm: repro_set2 Not tainted 4.14.2-3.el7uek.x86_64 #2 Hardware name: Oracle Corporation ORACLE SERVER X5-2L/ASM,MOBO TRAY,2U, BIOS 31110000 03/03/2017 task: ffff882f9190db00 task.stack: ffffc9002b994000 RIP: 0010:__rds_rdma_map+0x36/0x440 [rds] RSP: 0018:ffffc9002b997df0 EFLAGS: 00010202 RAX: 0000000000000000 RBX: ffff882fa2182580 RCX: 0000000000000000 RDX: 0000000000000000 RSI: ffffc9002b997e40 RDI: ffff882fa2182580 RBP: ffffc9002b997e30 R08: 0000000000000000 R09: 0000000000000002 R10: ffff885fb29e3838 R11: 0000000000000000 R12: ffff882fa2182580 R13: ffff882fa2182580 R14: 0000000000000002 R15: 0000000020000ffc FS: 00007fbffa20b700(0000) GS:ffff882fbfb80000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00000000000000c0 CR3: 0000002f98a66006 CR4: 00000000001606e0 Call Trace: rds_get_mr+0x56/0x80 [rds] rds_setsockopt+0x172/0x340 [rds] ? __fget_light+0x25/0x60 ? __fdget+0x13/0x20 SyS_setsockopt+0x80/0xe0 do_syscall_64+0x67/0x1b0 entry_SYSCALL64_slow_path+0x25/0x25 RIP: 0033:0x7fbff9b117f9 RSP: 002b:00007fbffa20aed8 EFLAGS: 00000293 ORIG_RAX: 0000000000000036 RAX: ffffffffffffffda RBX: 00000000000c84a4 RCX: 00007fbff9b117f9 RDX: 0000000000000002 RSI: 0000400000000114 RDI: 000000000000109b RBP: 00007fbffa20af10 R08: 0000000000000020 R09: 00007fbff9dd7860 R10: 0000000020000ffc R11: 0000000000000293 R12: 0000000000000000 R13: 00007fbffa20b9c0 R14: 00007fbffa20b700 R15: 0000000000000021 Code: 41 56 41 55 49 89 fd 41 54 53 48 83 ec 18 8b 87 f0 02 00 00 48 89 55 d0 48 89 4d c8 85 c0 0f 84 2d 03 00 00 48 8b 87 00 03 00 00 <48> 83 b8 c0 00 00 00 00 0f 84 25 03 00 0 0 48 8b 06 48 8b 56 08 The fix is to check the existence of an underlying transport in __rds_rdma_map(). Signed-off-by: Håkon Bugge <haakon.bugge@oracle.com> Reported-by: syzbot <syzkaller@googlegroups.com> Acked-by: Santosh Shilimkar <santosh.shilimkar@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1
static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args, u64 *cookie_ret, struct rds_mr **mr_ret) { struct rds_mr *mr = NULL, *found; unsigned int nr_pages; struct page **pages = NULL; struct scatterlist *sg; void *trans_private; unsigned long flags; rds_rdma_cookie_t cookie; unsigned int nents; long i; int ret; if (rs->rs_bound_addr == 0) { ret = -ENOTCONN; /* XXX not a great errno */ goto out; } if (!rs->rs_transport->get_mr) { ret = -EOPNOTSUPP; goto out; } nr_pages = rds_pages_in_vec(&args->vec); if (nr_pages == 0) { ret = -EINVAL; goto out; } /* Restrict the size of mr irrespective of underlying transport * To account for unaligned mr regions, subtract one from nr_pages */ if ((nr_pages - 1) > (RDS_MAX_MSG_SIZE >> PAGE_SHIFT)) { ret = -EMSGSIZE; goto out; } rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n", args->vec.addr, args->vec.bytes, nr_pages); /* XXX clamp nr_pages to limit the size of this alloc? */ pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); if (!pages) { ret = -ENOMEM; goto out; } mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL); if (!mr) { ret = -ENOMEM; goto out; } refcount_set(&mr->r_refcount, 1); RB_CLEAR_NODE(&mr->r_rb_node); mr->r_trans = rs->rs_transport; mr->r_sock = rs; if (args->flags & RDS_RDMA_USE_ONCE) mr->r_use_once = 1; if (args->flags & RDS_RDMA_INVALIDATE) mr->r_invalidate = 1; if (args->flags & RDS_RDMA_READWRITE) mr->r_write = 1; /* * Pin the pages that make up the user buffer and transfer the page * pointers to the mr's sg array. We check to see if we've mapped * the whole region after transferring the partial page references * to the sg array so that we can have one page ref cleanup path. * * For now we have no flag that tells us whether the mapping is * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to * the zero page. */ ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1); if (ret < 0) goto out; nents = ret; sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL); if (!sg) { ret = -ENOMEM; goto out; } WARN_ON(!nents); sg_init_table(sg, nents); /* Stick all pages into the scatterlist */ for (i = 0 ; i < nents; i++) sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0); rdsdebug("RDS: trans_private nents is %u\n", nents); /* Obtain a transport specific MR. If this succeeds, the * s/g list is now owned by the MR. * Note that dma_map() implies that pending writes are * flushed to RAM, so no dma_sync is needed here. */ trans_private = rs->rs_transport->get_mr(sg, nents, rs, &mr->r_key); if (IS_ERR(trans_private)) { for (i = 0 ; i < nents; i++) put_page(sg_page(&sg[i])); kfree(sg); ret = PTR_ERR(trans_private); goto out; } mr->r_trans_private = trans_private; rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n", mr->r_key, (void *)(unsigned long) args->cookie_addr); /* The user may pass us an unaligned address, but we can only * map page aligned regions. So we keep the offset, and build * a 64bit cookie containing <R_Key, offset> and pass that * around. */ cookie = rds_rdma_make_cookie(mr->r_key, args->vec.addr & ~PAGE_MASK); if (cookie_ret) *cookie_ret = cookie; if (args->cookie_addr && put_user(cookie, (u64 __user *)(unsigned long) args->cookie_addr)) { ret = -EFAULT; goto out; } /* Inserting the new MR into the rbtree bumps its * reference count. */ spin_lock_irqsave(&rs->rs_rdma_lock, flags); found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr); spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); BUG_ON(found && found != mr); rdsdebug("RDS: get_mr key is %x\n", mr->r_key); if (mr_ret) { refcount_inc(&mr->r_refcount); *mr_ret = mr; } ret = 0; out: kfree(pages); if (mr) rds_mr_put(mr); return ret; }
227,718,939,197,316,780,000,000,000,000,000,000,000
rdma.c
290,702,579,713,901,130,000,000,000,000,000,000,000
[ "CWE-476" ]
CVE-2018-7492
A NULL pointer dereference was found in the net/rds/rdma.c __rds_rdma_map() function in the Linux kernel before 4.14.7 allowing local attackers to cause a system panic and a denial-of-service, related to RDS_GET_MR and RDS_GET_MR_FOR_DEST.
https://nvd.nist.gov/vuln/detail/CVE-2018-7492
3,772
unixODBC
45ef78e037f578b15fc58938a3a3251655e71d6f
https://github.com/lurcher/unixODBC
https://github.com/lurcher/unixODBC/commit/45ef78e037f578b15fc58938a3a3251655e71d6f#diff-d52750c7ba4e594410438569d8e2963aL24
New Pre Source
1
BOOL SQLWriteFileDSN( LPCSTR pszFileName, LPCSTR pszAppName, LPCSTR pszKeyName, LPCSTR pszString ) { HINI hIni; char szFileName[ODBC_FILENAME_MAX+1]; if ( pszFileName[0] == '/' ) { strncpy( szFileName, sizeof(szFileName) - 5, pszFileName ); } else { char szPath[ODBC_FILENAME_MAX+1]; *szPath = '\0'; _odbcinst_FileINI( szPath ); snprintf( szFileName, sizeof(szFileName) - 5, "%s/%s", szPath, pszFileName ); } if ( strlen( szFileName ) < 4 || strcmp( szFileName + strlen( szFileName ) - 4, ".dsn" )) { strcat( szFileName, ".dsn" ); } #ifdef __OS2__ if ( iniOpen( &hIni, szFileName, "#;", '[', ']', '=', TRUE, 0L ) != INI_SUCCESS ) #else if ( iniOpen( &hIni, szFileName, "#;", '[', ']', '=', TRUE ) != INI_SUCCESS ) #endif { inst_logPushMsg( __FILE__, __FILE__, __LINE__, LOG_CRITICAL, ODBC_ERROR_INVALID_PATH, "" ); return FALSE; } /* delete section */ if ( pszString == NULL && pszKeyName == NULL ) { if ( iniObjectSeek( hIni, (char *)pszAppName ) == INI_SUCCESS ) { iniObjectDelete( hIni ); } } /* delete entry */ else if ( pszString == NULL ) { if ( iniPropertySeek( hIni, (char *)pszAppName, (char *)pszKeyName, "" ) == INI_SUCCESS ) { iniPropertyDelete( hIni ); } } else { /* add section */ if ( iniObjectSeek( hIni, (char *)pszAppName ) != INI_SUCCESS ) { iniObjectInsert( hIni, (char *)pszAppName ); } /* update entry */ if ( iniPropertySeek( hIni, (char *)pszAppName, (char *)pszKeyName, "" ) == INI_SUCCESS ) { iniObjectSeek( hIni, (char *)pszAppName ); iniPropertyUpdate( hIni, (char *)pszKeyName, (char *)pszString ); } /* add entry */ else { iniObjectSeek( hIni, (char *)pszAppName ); iniPropertyInsert( hIni, (char *)pszKeyName, (char *)pszString ); } } if ( iniCommit( hIni ) != INI_SUCCESS ) { iniClose( hIni ); inst_logPushMsg( __FILE__, __FILE__, __LINE__, LOG_CRITICAL, ODBC_ERROR_REQUEST_FAILED, "" ); return FALSE; } iniClose( hIni ); return TRUE; }
179,664,217,812,201,060,000,000,000,000,000,000,000
SQLWriteFileDSN.c
157,150,626,156,817,090,000,000,000,000,000,000,000
[ "CWE-119" ]
CVE-2018-7485
The SQLWriteFileDSN function in odbcinst/SQLWriteFileDSN.c in unixODBC 2.3.5 has strncpy arguments in the wrong order, which allows attackers to cause a denial of service or possibly have unspecified other impact.
https://nvd.nist.gov/vuln/detail/CVE-2018-7485
3,773
linux
9b54d816e00425c3a517514e0d677bb3cec49258
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/9b54d816e00425c3a517514e0d677bb3cec49258
blkcg: fix double free of new_blkg in blkcg_init_queue If blkg_create fails, new_blkg passed as an argument will be freed by blkg_create, so there is no need to free it again. Signed-off-by: Hou Tao <houtao1@huawei.com> Signed-off-by: Jens Axboe <axboe@fb.com>
1
int blkcg_init_queue(struct request_queue *q) { struct blkcg_gq *new_blkg, *blkg; bool preloaded; int ret; new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); if (!new_blkg) return -ENOMEM; preloaded = !radix_tree_preload(GFP_KERNEL); /* * Make sure the root blkg exists and count the existing blkgs. As * @q is bypassing at this point, blkg_lookup_create() can't be * used. Open code insertion. */ rcu_read_lock(); spin_lock_irq(q->queue_lock); blkg = blkg_create(&blkcg_root, q, new_blkg); spin_unlock_irq(q->queue_lock); rcu_read_unlock(); if (preloaded) radix_tree_preload_end(); if (IS_ERR(blkg)) { blkg_free(new_blkg); return PTR_ERR(blkg); } q->root_blkg = blkg; q->root_rl.blkg = blkg; ret = blk_throtl_init(q); if (ret) { spin_lock_irq(q->queue_lock); blkg_destroy_all(q); spin_unlock_irq(q->queue_lock); } return ret; }
94,022,243,707,851,460,000,000,000,000,000,000,000
blk-cgroup.c
11,694,601,837,132,450,000,000,000,000,000,000,000
[ "CWE-415" ]
CVE-2018-7480
The blkcg_init_queue function in block/blk-cgroup.c in the Linux kernel before 4.11 allows local users to cause a denial of service (double free) or possibly have unspecified other impact by triggering a creation failure.
https://nvd.nist.gov/vuln/detail/CVE-2018-7480
3,774
leptonica
c1079bb8e77cdd426759e466729917ca37a3ed9f
https://github.com/DanBloomberg/leptonica
https://github.com/DanBloomberg/leptonica/commit/c1079bb8e77cdd426759e466729917ca37a3ed9f
prog/htmlviewer: Catch unbound memory access (CID 1386222) rootname can have any size, so limit the amount of copied bytes. Signed-off-by: Stefan Weil <sw@weilnetz.de>
1
pixHtmlViewer(const char *dirin, const char *dirout, const char *rootname, l_int32 thumbwidth, l_int32 viewwidth) { char *fname, *fullname, *outname; char *mainname, *linkname, *linknameshort; char *viewfile, *thumbfile; char *shtml, *slink; char charbuf[512]; char htmlstring[] = "<html>"; char framestring[] = "</frameset></html>"; l_int32 i, nfiles, index, w, d, nimages, ret; l_float32 factor; PIX *pix, *pixthumb, *pixview; SARRAY *safiles, *sathumbs, *saviews, *sahtml, *salink; PROCNAME("pixHtmlViewer"); if (!dirin) return ERROR_INT("dirin not defined", procName, 1); if (!dirout) return ERROR_INT("dirout not defined", procName, 1); if (!rootname) return ERROR_INT("rootname not defined", procName, 1); if (thumbwidth == 0) thumbwidth = DEFAULT_THUMB_WIDTH; if (thumbwidth < MIN_THUMB_WIDTH) { L_WARNING("thumbwidth too small; using min value\n", procName); thumbwidth = MIN_THUMB_WIDTH; } if (viewwidth == 0) viewwidth = DEFAULT_VIEW_WIDTH; if (viewwidth < MIN_VIEW_WIDTH) { L_WARNING("viewwidth too small; using min value\n", procName); viewwidth = MIN_VIEW_WIDTH; } /* Make the output directory if it doesn't already exist */ #ifndef _WIN32 snprintf(charbuf, sizeof(charbuf), "mkdir -p %s", dirout); ret = system(charbuf); #else ret = CreateDirectory(dirout, NULL) ? 0 : 1; #endif /* !_WIN32 */ if (ret) { L_ERROR("output directory %s not made\n", procName, dirout); return 1; } /* Capture the filenames in the input directory */ if ((safiles = getFilenamesInDirectory(dirin)) == NULL) return ERROR_INT("safiles not made", procName, 1); /* Generate output text file names */ sprintf(charbuf, "%s/%s.html", dirout, rootname); mainname = stringNew(charbuf); sprintf(charbuf, "%s/%s-links.html", dirout, rootname); linkname = stringNew(charbuf); linknameshort = stringJoin(rootname, "-links.html"); /* Generate the thumbs and views */ sathumbs = sarrayCreate(0); saviews = sarrayCreate(0); nfiles = sarrayGetCount(safiles); index = 0; for (i = 0; i < nfiles; i++) { fname = sarrayGetString(safiles, i, L_NOCOPY); fullname = genPathname(dirin, fname); fprintf(stderr, "name: %s\n", fullname); if ((pix = pixRead(fullname)) == NULL) { fprintf(stderr, "file %s not a readable image\n", fullname); lept_free(fullname); continue; } lept_free(fullname); /* Make and store the thumbnail images */ pixGetDimensions(pix, &w, NULL, &d); factor = (l_float32)thumbwidth / (l_float32)w; pixthumb = pixScale(pix, factor, factor); sprintf(charbuf, "%s_thumb_%03d", rootname, index); sarrayAddString(sathumbs, charbuf, L_COPY); outname = genPathname(dirout, charbuf); WriteFormattedPix(outname, pixthumb); lept_free(outname); pixDestroy(&pixthumb); /* Make and store the view images */ factor = (l_float32)viewwidth / (l_float32)w; if (factor >= 1.0) pixview = pixClone(pix); /* no upscaling */ else pixview = pixScale(pix, factor, factor); snprintf(charbuf, sizeof(charbuf), "%s_view_%03d", rootname, index); sarrayAddString(saviews, charbuf, L_COPY); outname = genPathname(dirout, charbuf); WriteFormattedPix(outname, pixview); lept_free(outname); pixDestroy(&pixview); pixDestroy(&pix); index++; } /* Generate the main html file */ sahtml = sarrayCreate(0); sarrayAddString(sahtml, htmlstring, L_COPY); sprintf(charbuf, "<frameset cols=\"%d, *\">", thumbwidth + 30); sarrayAddString(sahtml, charbuf, L_COPY); sprintf(charbuf, "<frame name=\"thumbs\" src=\"%s\">", linknameshort); sarrayAddString(sahtml, charbuf, L_COPY); sprintf(charbuf, "<frame name=\"views\" src=\"%s\">", sarrayGetString(saviews, 0, L_NOCOPY)); sarrayAddString(sahtml, charbuf, L_COPY); sarrayAddString(sahtml, framestring, L_COPY); shtml = sarrayToString(sahtml, 1); l_binaryWrite(mainname, "w", shtml, strlen(shtml)); fprintf(stderr, "******************************************\n" "Writing html file: %s\n" "******************************************\n", mainname); lept_free(shtml); lept_free(mainname); /* Generate the link html file */ nimages = sarrayGetCount(saviews); fprintf(stderr, "num. images = %d\n", nimages); salink = sarrayCreate(0); for (i = 0; i < nimages; i++) { viewfile = sarrayGetString(saviews, i, L_NOCOPY); thumbfile = sarrayGetString(sathumbs, i, L_NOCOPY); sprintf(charbuf, "<a href=\"%s\" TARGET=views><img src=\"%s\"></a>", viewfile, thumbfile); sarrayAddString(salink, charbuf, L_COPY); } slink = sarrayToString(salink, 1); l_binaryWrite(linkname, "w", slink, strlen(slink)); lept_free(slink); lept_free(linkname); lept_free(linknameshort); sarrayDestroy(&safiles); sarrayDestroy(&sathumbs); sarrayDestroy(&saviews); sarrayDestroy(&sahtml); sarrayDestroy(&salink); return 0; }
91,775,814,159,057,430,000,000,000,000,000,000,000
htmlviewer.c
175,672,854,193,627,600,000,000,000,000,000,000,000
[ "CWE-119" ]
CVE-2018-7247
An issue was discovered in pixHtmlViewer in prog/htmlviewer.c in Leptonica before 1.75.3. Unsanitized input (rootname) can overflow a buffer, leading potentially to arbitrary code execution or possibly unspecified other impact.
https://nvd.nist.gov/vuln/detail/CVE-2018-7247
3,779
leptonica
ee301cb2029db8a6289c5295daa42bba7715e99a
https://github.com/DanBloomberg/leptonica
https://github.com/DanBloomberg/leptonica/commit/ee301cb2029db8a6289c5295daa42bba7715e99a
Security fixes: expect final changes for release 1.75.3. * Fixed a debian security issue with fscanf() reading a string with possible buffer overflow. * There were also a few similar situations with sscanf().
1
gplotRead(const char *filename) { char buf[L_BUF_SIZE]; char *rootname, *title, *xlabel, *ylabel, *ignores; l_int32 outformat, ret, version, ignore; FILE *fp; GPLOT *gplot; PROCNAME("gplotRead"); if (!filename) return (GPLOT *)ERROR_PTR("filename not defined", procName, NULL); if ((fp = fopenReadStream(filename)) == NULL) return (GPLOT *)ERROR_PTR("stream not opened", procName, NULL); ret = fscanf(fp, "Gplot Version %d\n", &version); if (ret != 1) { fclose(fp); return (GPLOT *)ERROR_PTR("not a gplot file", procName, NULL); } if (version != GPLOT_VERSION_NUMBER) { fclose(fp); return (GPLOT *)ERROR_PTR("invalid gplot version", procName, NULL); } ignore = fscanf(fp, "Rootname: %s\n", buf); rootname = stringNew(buf); ignore = fscanf(fp, "Output format: %d\n", &outformat); ignores = fgets(buf, L_BUF_SIZE, fp); /* Title: ... */ title = stringNew(buf + 7); title[strlen(title) - 1] = '\0'; ignores = fgets(buf, L_BUF_SIZE, fp); /* X axis label: ... */ xlabel = stringNew(buf + 14); xlabel[strlen(xlabel) - 1] = '\0'; ignores = fgets(buf, L_BUF_SIZE, fp); /* Y axis label: ... */ ylabel = stringNew(buf + 14); ylabel[strlen(ylabel) - 1] = '\0'; gplot = gplotCreate(rootname, outformat, title, xlabel, ylabel); LEPT_FREE(rootname); LEPT_FREE(title); LEPT_FREE(xlabel); LEPT_FREE(ylabel); if (!gplot) { fclose(fp); return (GPLOT *)ERROR_PTR("gplot not made", procName, NULL); } sarrayDestroy(&gplot->cmddata); sarrayDestroy(&gplot->datanames); sarrayDestroy(&gplot->plotdata); sarrayDestroy(&gplot->plottitles); numaDestroy(&gplot->plotstyles); ignore = fscanf(fp, "Commandfile name: %s\n", buf); stringReplace(&gplot->cmdname, buf); ignore = fscanf(fp, "\nCommandfile data:"); gplot->cmddata = sarrayReadStream(fp); ignore = fscanf(fp, "\nDatafile names:"); gplot->datanames = sarrayReadStream(fp); ignore = fscanf(fp, "\nPlot data:"); gplot->plotdata = sarrayReadStream(fp); ignore = fscanf(fp, "\nPlot titles:"); gplot->plottitles = sarrayReadStream(fp); ignore = fscanf(fp, "\nPlot styles:"); gplot->plotstyles = numaReadStream(fp); ignore = fscanf(fp, "Number of plots: %d\n", &gplot->nplots); ignore = fscanf(fp, "Output file name: %s\n", buf); stringReplace(&gplot->outname, buf); ignore = fscanf(fp, "Axis scaling: %d\n", &gplot->scaling); fclose(fp); return gplot; }
92,835,320,699,013,360,000,000,000,000,000,000,000
gplot.c
223,723,602,336,187,550,000,000,000,000,000,000,000
[ "CWE-119" ]
CVE-2018-7186
Leptonica before 1.75.3 does not limit the number of characters in a %s format argument to fscanf or sscanf, which allows remote attackers to cause a denial of service (stack-based buffer overflow) or possibly have unspecified other impact via a long string, as demonstrated by the gplotRead and ptaReadStream functions.
https://nvd.nist.gov/vuln/detail/CVE-2018-7186
3,780
leptonica
ee301cb2029db8a6289c5295daa42bba7715e99a
https://github.com/DanBloomberg/leptonica
https://github.com/DanBloomberg/leptonica/commit/ee301cb2029db8a6289c5295daa42bba7715e99a
Security fixes: expect final changes for release 1.75.3. * Fixed a debian security issue with fscanf() reading a string with possible buffer overflow. * There were also a few similar situations with sscanf().
1
ptaReadStream(FILE *fp) { char typestr[128]; l_int32 i, n, ix, iy, type, version; l_float32 x, y; PTA *pta; PROCNAME("ptaReadStream"); if (!fp) return (PTA *)ERROR_PTR("stream not defined", procName, NULL); if (fscanf(fp, "\n Pta Version %d\n", &version) != 1) return (PTA *)ERROR_PTR("not a pta file", procName, NULL); if (version != PTA_VERSION_NUMBER) return (PTA *)ERROR_PTR("invalid pta version", procName, NULL); if (fscanf(fp, " Number of pts = %d; format = %s\n", &n, typestr) != 2) return (PTA *)ERROR_PTR("not a pta file", procName, NULL); if (!strcmp(typestr, "float")) type = 0; else /* typestr is "integer" */ type = 1; if ((pta = ptaCreate(n)) == NULL) return (PTA *)ERROR_PTR("pta not made", procName, NULL); for (i = 0; i < n; i++) { if (type == 0) { /* data is float */ if (fscanf(fp, " (%f, %f)\n", &x, &y) != 2) { ptaDestroy(&pta); return (PTA *)ERROR_PTR("error reading floats", procName, NULL); } ptaAddPt(pta, x, y); } else { /* data is integer */ if (fscanf(fp, " (%d, %d)\n", &ix, &iy) != 2) { ptaDestroy(&pta); return (PTA *)ERROR_PTR("error reading ints", procName, NULL); } ptaAddPt(pta, ix, iy); } } return pta; }
30,001,997,675,172,210,000,000,000,000,000,000,000
ptabasic.c
4,753,893,045,332,788,000,000,000,000,000,000,000
[ "CWE-119" ]
CVE-2018-7186
Leptonica before 1.75.3 does not limit the number of characters in a %s format argument to fscanf or sscanf, which allows remote attackers to cause a denial of service (stack-based buffer overflow) or possibly have unspecified other impact via a long string, as demonstrated by the gplotRead and ptaReadStream functions.
https://nvd.nist.gov/vuln/detail/CVE-2018-7186
3,783
linux
fbe0e839d1e22d88810f3ee3e2f1479be4c0aa4a
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/fbe0e839d1e22d88810f3ee3e2f1479be4c0aa4a
futex: Prevent overflow by strengthen input validation UBSAN reports signed integer overflow in kernel/futex.c: UBSAN: Undefined behaviour in kernel/futex.c:2041:18 signed integer overflow: 0 - -2147483648 cannot be represented in type 'int' Add a sanity check to catch negative values of nr_wake and nr_requeue. Signed-off-by: Li Jinyue <lijinyue@huawei.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: peterz@infradead.org Cc: dvhart@infradead.org Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/1513242294-31786-1-git-send-email-lijinyue@huawei.com
1
static int futex_requeue(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2, int nr_wake, int nr_requeue, u32 *cmpval, int requeue_pi) { union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; int drop_count = 0, task_count = 0, ret; struct futex_pi_state *pi_state = NULL; struct futex_hash_bucket *hb1, *hb2; struct futex_q *this, *next; DEFINE_WAKE_Q(wake_q); /* * When PI not supported: return -ENOSYS if requeue_pi is true, * consequently the compiler knows requeue_pi is always false past * this point which will optimize away all the conditional code * further down. */ if (!IS_ENABLED(CONFIG_FUTEX_PI) && requeue_pi) return -ENOSYS; if (requeue_pi) { /* * Requeue PI only works on two distinct uaddrs. This * check is only valid for private futexes. See below. */ if (uaddr1 == uaddr2) return -EINVAL; /* * requeue_pi requires a pi_state, try to allocate it now * without any locks in case it fails. */ if (refill_pi_state_cache()) return -ENOMEM; /* * requeue_pi must wake as many tasks as it can, up to nr_wake * + nr_requeue, since it acquires the rt_mutex prior to * returning to userspace, so as to not leave the rt_mutex with * waiters and no owner. However, second and third wake-ups * cannot be predicted as they involve race conditions with the * first wake and a fault while looking up the pi_state. Both * pthread_cond_signal() and pthread_cond_broadcast() should * use nr_wake=1. */ if (nr_wake != 1) return -EINVAL; } retry: ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ); if (unlikely(ret != 0)) goto out; ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, requeue_pi ? VERIFY_WRITE : VERIFY_READ); if (unlikely(ret != 0)) goto out_put_key1; /* * The check above which compares uaddrs is not sufficient for * shared futexes. We need to compare the keys: */ if (requeue_pi && match_futex(&key1, &key2)) { ret = -EINVAL; goto out_put_keys; } hb1 = hash_futex(&key1); hb2 = hash_futex(&key2); retry_private: hb_waiters_inc(hb2); double_lock_hb(hb1, hb2); if (likely(cmpval != NULL)) { u32 curval; ret = get_futex_value_locked(&curval, uaddr1); if (unlikely(ret)) { double_unlock_hb(hb1, hb2); hb_waiters_dec(hb2); ret = get_user(curval, uaddr1); if (ret) goto out_put_keys; if (!(flags & FLAGS_SHARED)) goto retry_private; put_futex_key(&key2); put_futex_key(&key1); goto retry; } if (curval != *cmpval) { ret = -EAGAIN; goto out_unlock; } } if (requeue_pi && (task_count - nr_wake < nr_requeue)) { /* * Attempt to acquire uaddr2 and wake the top waiter. If we * intend to requeue waiters, force setting the FUTEX_WAITERS * bit. We force this here where we are able to easily handle * faults rather in the requeue loop below. */ ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1, &key2, &pi_state, nr_requeue); /* * At this point the top_waiter has either taken uaddr2 or is * waiting on it. If the former, then the pi_state will not * exist yet, look it up one more time to ensure we have a * reference to it. If the lock was taken, ret contains the * vpid of the top waiter task. * If the lock was not taken, we have pi_state and an initial * refcount on it. In case of an error we have nothing. */ if (ret > 0) { WARN_ON(pi_state); drop_count++; task_count++; /* * If we acquired the lock, then the user space value * of uaddr2 should be vpid. It cannot be changed by * the top waiter as it is blocked on hb2 lock if it * tries to do so. If something fiddled with it behind * our back the pi state lookup might unearth it. So * we rather use the known value than rereading and * handing potential crap to lookup_pi_state. * * If that call succeeds then we have pi_state and an * initial refcount on it. */ ret = lookup_pi_state(uaddr2, ret, hb2, &key2, &pi_state); } switch (ret) { case 0: /* We hold a reference on the pi state. */ break; /* If the above failed, then pi_state is NULL */ case -EFAULT: double_unlock_hb(hb1, hb2); hb_waiters_dec(hb2); put_futex_key(&key2); put_futex_key(&key1); ret = fault_in_user_writeable(uaddr2); if (!ret) goto retry; goto out; case -EAGAIN: /* * Two reasons for this: * - Owner is exiting and we just wait for the * exit to complete. * - The user space value changed. */ double_unlock_hb(hb1, hb2); hb_waiters_dec(hb2); put_futex_key(&key2); put_futex_key(&key1); cond_resched(); goto retry; default: goto out_unlock; } } plist_for_each_entry_safe(this, next, &hb1->chain, list) { if (task_count - nr_wake >= nr_requeue) break; if (!match_futex(&this->key, &key1)) continue; /* * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always * be paired with each other and no other futex ops. * * We should never be requeueing a futex_q with a pi_state, * which is awaiting a futex_unlock_pi(). */ if ((requeue_pi && !this->rt_waiter) || (!requeue_pi && this->rt_waiter) || this->pi_state) { ret = -EINVAL; break; } /* * Wake nr_wake waiters. For requeue_pi, if we acquired the * lock, we already woke the top_waiter. If not, it will be * woken by futex_unlock_pi(). */ if (++task_count <= nr_wake && !requeue_pi) { mark_wake_futex(&wake_q, this); continue; } /* Ensure we requeue to the expected futex for requeue_pi. */ if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) { ret = -EINVAL; break; } /* * Requeue nr_requeue waiters and possibly one more in the case * of requeue_pi if we couldn't acquire the lock atomically. */ if (requeue_pi) { /* * Prepare the waiter to take the rt_mutex. Take a * refcount on the pi_state and store the pointer in * the futex_q object of the waiter. */ get_pi_state(pi_state); this->pi_state = pi_state; ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex, this->rt_waiter, this->task); if (ret == 1) { /* * We got the lock. We do neither drop the * refcount on pi_state nor clear * this->pi_state because the waiter needs the * pi_state for cleaning up the user space * value. It will drop the refcount after * doing so. */ requeue_pi_wake_futex(this, &key2, hb2); drop_count++; continue; } else if (ret) { /* * rt_mutex_start_proxy_lock() detected a * potential deadlock when we tried to queue * that waiter. Drop the pi_state reference * which we took above and remove the pointer * to the state from the waiters futex_q * object. */ this->pi_state = NULL; put_pi_state(pi_state); /* * We stop queueing more waiters and let user * space deal with the mess. */ break; } } requeue_futex(this, hb1, hb2, &key2); drop_count++; } /* * We took an extra initial reference to the pi_state either * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We * need to drop it here again. */ put_pi_state(pi_state); out_unlock: double_unlock_hb(hb1, hb2); wake_up_q(&wake_q); hb_waiters_dec(hb2); /* * drop_futex_key_refs() must be called outside the spinlocks. During * the requeue we moved futex_q's from the hash bucket at key1 to the * one at key2 and updated their key pointer. We no longer need to * hold the references to key1. */ while (--drop_count >= 0) drop_futex_key_refs(&key1); out_put_keys: put_futex_key(&key2); out_put_key1: put_futex_key(&key1); out: return ret ? ret : task_count; }
199,927,163,848,810,380,000,000,000,000,000,000,000
None
null
[ "CWE-190" ]
CVE-2018-6927
The futex_requeue function in kernel/futex.c in the Linux kernel before 4.14.15 might allow attackers to cause a denial of service (integer overflow) or possibly have unspecified other impact by triggering a negative wake or requeue value.
https://nvd.nist.gov/vuln/detail/CVE-2018-6927
3,784
suricata
e1ef57c848bbe4e567d5d4b66d346a742e3f77a1
https://github.com/OISF/suricata
https://github.com/OISF/suricata/commit/e1ef57c848bbe4e567d5d4b66d346a742e3f77a1
stream: still inspect packets dropped by stream The detect engine would bypass packets that are set as dropped. This seems sane, as these packets are going to be dropped anyway. However, it lead to the following corner case: stream events that triggered the drop could not be matched on the rules. The packet with the event wouldn't make it to the detect engine due to the bypass. This patch changes the logic to not bypass DROP packets anymore. Packets that are dropped by the stream engine will set the no payload inspection flag, so avoid needless cost.
1
static void DetectFlow(ThreadVars *tv, DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Packet *p) { /* No need to perform any detection on this packet, if the the given flag is set.*/ if ((p->flags & PKT_NOPACKET_INSPECTION) || (PACKET_TEST_ACTION(p, ACTION_DROP))) { /* hack: if we are in pass the entire flow mode, we need to still * update the inspect_id forward. So test for the condition here, * and call the update code if necessary. */ const int pass = ((p->flow->flags & FLOW_NOPACKET_INSPECTION)); const AppProto alproto = FlowGetAppProtocol(p->flow); if (pass && AppLayerParserProtocolSupportsTxs(p->proto, alproto)) { uint8_t flags; if (p->flowflags & FLOW_PKT_TOSERVER) { flags = STREAM_TOSERVER; } else { flags = STREAM_TOCLIENT; } flags = FlowGetDisruptionFlags(p->flow, flags); DeStateUpdateInspectTransactionId(p->flow, flags, true); } return; } /* see if the packet matches one or more of the sigs */ (void)DetectRun(tv, de_ctx, det_ctx, p); }
195,957,054,812,264,830,000,000,000,000,000,000,000
detect.c
153,350,231,578,102,060,000,000,000,000,000,000,000
[ "CWE-693" ]
CVE-2018-6794
Suricata before 4.0.4 is prone to an HTTP detection bypass vulnerability in detect.c and stream-tcp.c. If a malicious server breaks a normal TCP flow and sends data before the 3-way handshake is complete, then the data sent by the malicious server will be accepted by web clients such as a web browser or Linux CLI utilities, but ignored by Suricata IDS signatures. This mostly affects IDS signatures for the HTTP protocol and TCP stream content; signatures for TCP packets will inspect such network traffic as usual.
https://nvd.nist.gov/vuln/detail/CVE-2018-6794
3,785
suricata
e1ef57c848bbe4e567d5d4b66d346a742e3f77a1
https://github.com/OISF/suricata
https://github.com/OISF/suricata/commit/e1ef57c848bbe4e567d5d4b66d346a742e3f77a1
stream: still inspect packets dropped by stream The detect engine would bypass packets that are set as dropped. This seems sane, as these packets are going to be dropped anyway. However, it lead to the following corner case: stream events that triggered the drop could not be matched on the rules. The packet with the event wouldn't make it to the detect engine due to the bypass. This patch changes the logic to not bypass DROP packets anymore. Packets that are dropped by the stream engine will set the no payload inspection flag, so avoid needless cost.
1
int StreamTcpPacket (ThreadVars *tv, Packet *p, StreamTcpThread *stt, PacketQueue *pq) { SCEnter(); DEBUG_ASSERT_FLOW_LOCKED(p->flow); SCLogDebug("p->pcap_cnt %"PRIu64, p->pcap_cnt); /* assign the thread id to the flow */ if (unlikely(p->flow->thread_id == 0)) { p->flow->thread_id = (FlowThreadId)tv->id; #ifdef DEBUG } else if (unlikely((FlowThreadId)tv->id != p->flow->thread_id)) { SCLogDebug("wrong thread: flow has %u, we are %d", p->flow->thread_id, tv->id); #endif } TcpSession *ssn = (TcpSession *)p->flow->protoctx; /* track TCP flags */ if (ssn != NULL) { ssn->tcp_packet_flags |= p->tcph->th_flags; if (PKT_IS_TOSERVER(p)) ssn->client.tcp_flags |= p->tcph->th_flags; else if (PKT_IS_TOCLIENT(p)) ssn->server.tcp_flags |= p->tcph->th_flags; /* check if we need to unset the ASYNC flag */ if (ssn->flags & STREAMTCP_FLAG_ASYNC && ssn->client.tcp_flags != 0 && ssn->server.tcp_flags != 0) { SCLogDebug("ssn %p: removing ASYNC flag as we have packets on both sides", ssn); ssn->flags &= ~STREAMTCP_FLAG_ASYNC; } } /* update counters */ if ((p->tcph->th_flags & (TH_SYN|TH_ACK)) == (TH_SYN|TH_ACK)) { StatsIncr(tv, stt->counter_tcp_synack); } else if (p->tcph->th_flags & (TH_SYN)) { StatsIncr(tv, stt->counter_tcp_syn); } if (p->tcph->th_flags & (TH_RST)) { StatsIncr(tv, stt->counter_tcp_rst); } /* broken TCP http://ask.wireshark.org/questions/3183/acknowledgment-number-broken-tcp-the-acknowledge-field-is-nonzero-while-the-ack-flag-is-not-set */ if (!(p->tcph->th_flags & TH_ACK) && TCP_GET_ACK(p) != 0) { StreamTcpSetEvent(p, STREAM_PKT_BROKEN_ACK); } /* If we are on IPS mode, and got a drop action triggered from * the IP only module, or from a reassembled msg and/or from an * applayer detection, then drop the rest of the packets of the * same stream and avoid inspecting it any further */ if (StreamTcpCheckFlowDrops(p) == 1) { SCLogDebug("This flow/stream triggered a drop rule"); FlowSetNoPacketInspectionFlag(p->flow); DecodeSetNoPacketInspectionFlag(p); StreamTcpDisableAppLayer(p->flow); PACKET_DROP(p); /* return the segments to the pool */ StreamTcpSessionPktFree(p); SCReturnInt(0); } if (ssn == NULL || ssn->state == TCP_NONE) { if (StreamTcpPacketStateNone(tv, p, stt, ssn, &stt->pseudo_queue) == -1) { goto error; } if (ssn != NULL) SCLogDebug("ssn->alproto %"PRIu16"", p->flow->alproto); } else { /* special case for PKT_PSEUDO_STREAM_END packets: * bypass the state handling and various packet checks, * we care about reassembly here. */ if (p->flags & PKT_PSEUDO_STREAM_END) { if (PKT_IS_TOCLIENT(p)) { ssn->client.last_ack = TCP_GET_ACK(p); StreamTcpReassembleHandleSegment(tv, stt->ra_ctx, ssn, &ssn->server, p, pq); } else { ssn->server.last_ack = TCP_GET_ACK(p); StreamTcpReassembleHandleSegment(tv, stt->ra_ctx, ssn, &ssn->client, p, pq); } /* straight to 'skip' as we already handled reassembly */ goto skip; } /* check if the packet is in right direction, when we missed the SYN packet and picked up midstream session. */ if (ssn->flags & STREAMTCP_FLAG_MIDSTREAM_SYNACK) StreamTcpPacketSwitchDir(ssn, p); if (StreamTcpPacketIsKeepAlive(ssn, p) == 1) { goto skip; } if (StreamTcpPacketIsKeepAliveACK(ssn, p) == 1) { StreamTcpClearKeepAliveFlag(ssn, p); goto skip; } StreamTcpClearKeepAliveFlag(ssn, p); /* if packet is not a valid window update, check if it is perhaps * a bad window update that we should ignore (and alert on) */ if (StreamTcpPacketIsFinShutdownAck(ssn, p) == 0) if (StreamTcpPacketIsWindowUpdate(ssn, p) == 0) if (StreamTcpPacketIsBadWindowUpdate(ssn,p)) goto skip; switch (ssn->state) { case TCP_SYN_SENT: if(StreamTcpPacketStateSynSent(tv, p, stt, ssn, &stt->pseudo_queue)) { goto error; } break; case TCP_SYN_RECV: if(StreamTcpPacketStateSynRecv(tv, p, stt, ssn, &stt->pseudo_queue)) { goto error; } break; case TCP_ESTABLISHED: if(StreamTcpPacketStateEstablished(tv, p, stt, ssn, &stt->pseudo_queue)) { goto error; } break; case TCP_FIN_WAIT1: if(StreamTcpPacketStateFinWait1(tv, p, stt, ssn, &stt->pseudo_queue)) { goto error; } break; case TCP_FIN_WAIT2: if(StreamTcpPacketStateFinWait2(tv, p, stt, ssn, &stt->pseudo_queue)) { goto error; } break; case TCP_CLOSING: if(StreamTcpPacketStateClosing(tv, p, stt, ssn, &stt->pseudo_queue)) { goto error; } break; case TCP_CLOSE_WAIT: if(StreamTcpPacketStateCloseWait(tv, p, stt, ssn, &stt->pseudo_queue)) { goto error; } break; case TCP_LAST_ACK: if(StreamTcpPacketStateLastAck(tv, p, stt, ssn, &stt->pseudo_queue)) { goto error; } break; case TCP_TIME_WAIT: if(StreamTcpPacketStateTimeWait(tv, p, stt, ssn, &stt->pseudo_queue)) { goto error; } break; case TCP_CLOSED: /* TCP session memory is not returned to pool until timeout. */ SCLogDebug("packet received on closed state"); break; default: SCLogDebug("packet received on default state"); break; } skip: if (ssn->state >= TCP_ESTABLISHED) { p->flags |= PKT_STREAM_EST; } } /* deal with a pseudo packet that is created upon receiving a RST * segment. To be sure we process both sides of the connection, we * inject a fake packet into the system, forcing reassembly of the * opposing direction. * There should be only one, but to be sure we do a while loop. */ if (ssn != NULL) { while (stt->pseudo_queue.len > 0) { SCLogDebug("processing pseudo packet / stream end"); Packet *np = PacketDequeue(&stt->pseudo_queue); if (np != NULL) { /* process the opposing direction of the original packet */ if (PKT_IS_TOSERVER(np)) { SCLogDebug("pseudo packet is to server"); StreamTcpReassembleHandleSegment(tv, stt->ra_ctx, ssn, &ssn->client, np, NULL); } else { SCLogDebug("pseudo packet is to client"); StreamTcpReassembleHandleSegment(tv, stt->ra_ctx, ssn, &ssn->server, np, NULL); } /* enqueue this packet so we inspect it in detect etc */ PacketEnqueue(pq, np); } SCLogDebug("processing pseudo packet / stream end done"); } /* recalc the csum on the packet if it was modified */ if (p->flags & PKT_STREAM_MODIFIED) { ReCalculateChecksum(p); } /* check for conditions that may make us not want to log this packet */ /* streams that hit depth */ if ((ssn->client.flags & STREAMTCP_STREAM_FLAG_DEPTH_REACHED) && (ssn->server.flags & STREAMTCP_STREAM_FLAG_DEPTH_REACHED)) { /* we can call bypass callback, if enabled */ if (StreamTcpBypassEnabled()) { PacketBypassCallback(p); } } if ((ssn->client.flags & STREAMTCP_STREAM_FLAG_DEPTH_REACHED) || (ssn->server.flags & STREAMTCP_STREAM_FLAG_DEPTH_REACHED)) { p->flags |= PKT_STREAM_NOPCAPLOG; } /* encrypted packets */ if ((PKT_IS_TOSERVER(p) && (ssn->client.flags & STREAMTCP_STREAM_FLAG_NOREASSEMBLY)) || (PKT_IS_TOCLIENT(p) && (ssn->server.flags & STREAMTCP_STREAM_FLAG_NOREASSEMBLY))) { p->flags |= PKT_STREAM_NOPCAPLOG; } if (ssn->flags & STREAMTCP_FLAG_BYPASS) { /* we can call bypass callback, if enabled */ if (StreamTcpBypassEnabled()) { PacketBypassCallback(p); } /* if stream is dead and we have no detect engine at all, bypass. */ } else if (g_detect_disabled && (ssn->client.flags & STREAMTCP_STREAM_FLAG_NOREASSEMBLY) && (ssn->server.flags & STREAMTCP_STREAM_FLAG_NOREASSEMBLY) && StreamTcpBypassEnabled()) { SCLogDebug("bypass as stream is dead and we have no rules"); PacketBypassCallback(p); } } SCReturnInt(0); error: /* make sure we don't leave packets in our pseudo queue */ while (stt->pseudo_queue.len > 0) { Packet *np = PacketDequeue(&stt->pseudo_queue); if (np != NULL) { PacketEnqueue(pq, np); } } /* recalc the csum on the packet if it was modified */ if (p->flags & PKT_STREAM_MODIFIED) { ReCalculateChecksum(p); } if (StreamTcpInlineDropInvalid()) { PACKET_DROP(p); } SCReturnInt(-1); }
197,454,659,043,875,700,000,000,000,000,000,000,000
stream-tcp.c
1,301,724,529,142,096,000,000,000,000,000,000,000
[ "CWE-693" ]
CVE-2018-6794
Suricata before 4.0.4 is prone to an HTTP detection bypass vulnerability in detect.c and stream-tcp.c. If a malicious server breaks a normal TCP flow and sends data before the 3-way handshake is complete, then the data sent by the malicious server will be accepted by web clients such as a web browser or Linux CLI utilities, but ignored by Suricata IDS signatures. This mostly affects IDS signatures for the HTTP protocol and TCP stream content; signatures for TCP packets will inspect such network traffic as usual.
https://nvd.nist.gov/vuln/detail/CVE-2018-6794
3,786
WavPack
d5bf76b5a88d044a1be1d5656698e3ba737167e5
https://github.com/dbry/WavPack
https://github.com/dbry/WavPack/commit/d5bf76b5a88d044a1be1d5656698e3ba737167e5
issue #27, do not overwrite stack on corrupt RF64 file
1
int ParseRiffHeaderConfig (FILE *infile, char *infilename, char *fourcc, WavpackContext *wpc, WavpackConfig *config) { int is_rf64 = !strncmp (fourcc, "RF64", 4), got_ds64 = 0; int64_t total_samples = 0, infilesize; RiffChunkHeader riff_chunk_header; ChunkHeader chunk_header; WaveHeader WaveHeader; DS64Chunk ds64_chunk; uint32_t bcount; CLEAR (WaveHeader); CLEAR (ds64_chunk); infilesize = DoGetFileSize (infile); if (!is_rf64 && infilesize >= 4294967296LL && !(config->qmode & QMODE_IGNORE_LENGTH)) { error_line ("can't handle .WAV files larger than 4 GB (non-standard)!"); return WAVPACK_SOFT_ERROR; } memcpy (&riff_chunk_header, fourcc, 4); if ((!DoReadFile (infile, ((char *) &riff_chunk_header) + 4, sizeof (RiffChunkHeader) - 4, &bcount) || bcount != sizeof (RiffChunkHeader) - 4 || strncmp (riff_chunk_header.formType, "WAVE", 4))) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &riff_chunk_header, sizeof (RiffChunkHeader))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } while (1) { if (!DoReadFile (infile, &chunk_header, sizeof (ChunkHeader), &bcount) || bcount != sizeof (ChunkHeader)) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &chunk_header, sizeof (ChunkHeader))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackLittleEndianToNative (&chunk_header, ChunkHeaderFormat); if (!strncmp (chunk_header.ckID, "ds64", 4)) { if (chunk_header.ckSize < sizeof (DS64Chunk) || !DoReadFile (infile, &ds64_chunk, chunk_header.ckSize, &bcount) || bcount != chunk_header.ckSize) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &ds64_chunk, chunk_header.ckSize)) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } got_ds64 = 1; WavpackLittleEndianToNative (&ds64_chunk, DS64ChunkFormat); if (debug_logging_mode) error_line ("DS64: riffSize = %lld, dataSize = %lld, sampleCount = %lld, table_length = %d", (long long) ds64_chunk.riffSize64, (long long) ds64_chunk.dataSize64, (long long) ds64_chunk.sampleCount64, ds64_chunk.tableLength); if (ds64_chunk.tableLength * sizeof (CS64Chunk) != chunk_header.ckSize - sizeof (DS64Chunk)) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } while (ds64_chunk.tableLength--) { CS64Chunk cs64_chunk; if (!DoReadFile (infile, &cs64_chunk, sizeof (CS64Chunk), &bcount) || bcount != sizeof (CS64Chunk) || (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &cs64_chunk, sizeof (CS64Chunk)))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } } } else if (!strncmp (chunk_header.ckID, "fmt ", 4)) { // if it's the format chunk, we want to get some info out of there and int supported = TRUE, format; // make sure it's a .wav file we can handle if (chunk_header.ckSize < 16 || chunk_header.ckSize > sizeof (WaveHeader) || !DoReadFile (infile, &WaveHeader, chunk_header.ckSize, &bcount) || bcount != chunk_header.ckSize) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &WaveHeader, chunk_header.ckSize)) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackLittleEndianToNative (&WaveHeader, WaveHeaderFormat); if (debug_logging_mode) { error_line ("format tag size = %d", chunk_header.ckSize); error_line ("FormatTag = %x, NumChannels = %d, BitsPerSample = %d", WaveHeader.FormatTag, WaveHeader.NumChannels, WaveHeader.BitsPerSample); error_line ("BlockAlign = %d, SampleRate = %d, BytesPerSecond = %d", WaveHeader.BlockAlign, WaveHeader.SampleRate, WaveHeader.BytesPerSecond); if (chunk_header.ckSize > 16) error_line ("cbSize = %d, ValidBitsPerSample = %d", WaveHeader.cbSize, WaveHeader.ValidBitsPerSample); if (chunk_header.ckSize > 20) error_line ("ChannelMask = %x, SubFormat = %d", WaveHeader.ChannelMask, WaveHeader.SubFormat); } if (chunk_header.ckSize > 16 && WaveHeader.cbSize == 2) config->qmode |= QMODE_ADOBE_MODE; format = (WaveHeader.FormatTag == 0xfffe && chunk_header.ckSize == 40) ? WaveHeader.SubFormat : WaveHeader.FormatTag; config->bits_per_sample = (chunk_header.ckSize == 40 && WaveHeader.ValidBitsPerSample) ? WaveHeader.ValidBitsPerSample : WaveHeader.BitsPerSample; if (format != 1 && format != 3) supported = FALSE; if (format == 3 && config->bits_per_sample != 32) supported = FALSE; if (!WaveHeader.NumChannels || WaveHeader.NumChannels > 256 || WaveHeader.BlockAlign / WaveHeader.NumChannels < (config->bits_per_sample + 7) / 8 || WaveHeader.BlockAlign / WaveHeader.NumChannels > 4 || WaveHeader.BlockAlign % WaveHeader.NumChannels) supported = FALSE; if (config->bits_per_sample < 1 || config->bits_per_sample > 32) supported = FALSE; if (!supported) { error_line ("%s is an unsupported .WAV format!", infilename); return WAVPACK_SOFT_ERROR; } if (chunk_header.ckSize < 40) { if (!config->channel_mask && !(config->qmode & QMODE_CHANS_UNASSIGNED)) { if (WaveHeader.NumChannels <= 2) config->channel_mask = 0x5 - WaveHeader.NumChannels; else if (WaveHeader.NumChannels <= 18) config->channel_mask = (1 << WaveHeader.NumChannels) - 1; else config->channel_mask = 0x3ffff; } } else if (WaveHeader.ChannelMask && (config->channel_mask || (config->qmode & QMODE_CHANS_UNASSIGNED))) { error_line ("this WAV file already has channel order information!"); return WAVPACK_SOFT_ERROR; } else if (WaveHeader.ChannelMask) config->channel_mask = WaveHeader.ChannelMask; if (format == 3) config->float_norm_exp = 127; else if ((config->qmode & QMODE_ADOBE_MODE) && WaveHeader.BlockAlign / WaveHeader.NumChannels == 4) { if (WaveHeader.BitsPerSample == 24) config->float_norm_exp = 127 + 23; else if (WaveHeader.BitsPerSample == 32) config->float_norm_exp = 127 + 15; } if (debug_logging_mode) { if (config->float_norm_exp == 127) error_line ("data format: normalized 32-bit floating point"); else if (config->float_norm_exp) error_line ("data format: 32-bit floating point (Audition %d:%d float type 1)", config->float_norm_exp - 126, 150 - config->float_norm_exp); else error_line ("data format: %d-bit integers stored in %d byte(s)", config->bits_per_sample, WaveHeader.BlockAlign / WaveHeader.NumChannels); } } else if (!strncmp (chunk_header.ckID, "data", 4)) { // on the data chunk, get size and exit loop int64_t data_chunk_size = (got_ds64 && chunk_header.ckSize == (uint32_t) -1) ? ds64_chunk.dataSize64 : chunk_header.ckSize; if (!WaveHeader.NumChannels || (is_rf64 && !got_ds64)) { // make sure we saw "fmt" and "ds64" chunks (if required) error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } if (infilesize && !(config->qmode & QMODE_IGNORE_LENGTH) && infilesize - data_chunk_size > 16777216) { error_line ("this .WAV file has over 16 MB of extra RIFF data, probably is corrupt!"); return WAVPACK_SOFT_ERROR; } if (config->qmode & QMODE_IGNORE_LENGTH) { if (infilesize && DoGetFilePosition (infile) != -1) total_samples = (infilesize - DoGetFilePosition (infile)) / WaveHeader.BlockAlign; else total_samples = -1; } else { total_samples = data_chunk_size / WaveHeader.BlockAlign; if (got_ds64 && total_samples != ds64_chunk.sampleCount64) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } if (!total_samples) { error_line ("this .WAV file has no audio samples, probably is corrupt!"); return WAVPACK_SOFT_ERROR; } if (total_samples > MAX_WAVPACK_SAMPLES) { error_line ("%s has too many samples for WavPack!", infilename); return WAVPACK_SOFT_ERROR; } } config->bytes_per_sample = WaveHeader.BlockAlign / WaveHeader.NumChannels; config->num_channels = WaveHeader.NumChannels; config->sample_rate = WaveHeader.SampleRate; break; } else { // just copy unknown chunks to output file int bytes_to_copy = (chunk_header.ckSize + 1) & ~1L; char *buff = malloc (bytes_to_copy); if (debug_logging_mode) error_line ("extra unknown chunk \"%c%c%c%c\" of %d bytes", chunk_header.ckID [0], chunk_header.ckID [1], chunk_header.ckID [2], chunk_header.ckID [3], chunk_header.ckSize); if (!DoReadFile (infile, buff, bytes_to_copy, &bcount) || bcount != bytes_to_copy || (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, buff, bytes_to_copy))) { error_line ("%s", WavpackGetErrorMessage (wpc)); free (buff); return WAVPACK_SOFT_ERROR; } free (buff); } } if (!WavpackSetConfiguration64 (wpc, config, total_samples, NULL)) { error_line ("%s: %s", infilename, WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } return WAVPACK_NO_ERROR; }
266,946,299,595,107,760,000,000,000,000,000,000,000
None
null
[ "CWE-119" ]
CVE-2018-6767
A stack-based buffer over-read in the ParseRiffHeaderConfig function of cli/riff.c file of WavPack 5.1.0 allows a remote attacker to cause a denial-of-service attack or possibly have unspecified other impact via a maliciously crafted RF64 file.
https://nvd.nist.gov/vuln/detail/CVE-2018-6767
3,787
uncurl
448cd13e7b18c83855d706c564341ddd1e38e769
https://github.com/chrisd1100/uncurl
https://github.com/chrisd1100/uncurl/commit/448cd13e7b18c83855d706c564341ddd1e38e769
None
1
UNCURL_EXPORT int32_t uncurl_ws_accept(struct uncurl_conn *ucc, char **origins, int32_t n_origins) { int32_t e; e = uncurl_read_header(ucc); if (e != UNCURL_OK) return e; uncurl_set_header_str(ucc, "Upgrade", "websocket"); uncurl_set_header_str(ucc, "Connection", "Upgrade"); char *origin = NULL; e = uncurl_get_header_str(ucc, "Origin", &origin); if (e != UNCURL_OK) return e; bool origin_ok = false; for (int32_t x = 0; x < n_origins; x++) if (strstr(origin, origins[x])) {origin_ok = true; break;} if (!origin_ok) return UNCURL_WS_ERR_ORIGIN; char *sec_key = NULL; e = uncurl_get_header_str(ucc, "Sec-WebSocket-Key", &sec_key); if (e != UNCURL_OK) return e; char *accept_key = ws_create_accept_key(sec_key); uncurl_set_header_str(ucc, "Sec-WebSocket-Accept", accept_key); free(accept_key); e = uncurl_write_header(ucc, "101", "Switching Protocols", UNCURL_RESPONSE); if (e != UNCURL_OK) return e; ucc->ws_mask = 0; return UNCURL_OK; }
185,832,743,048,029,200,000,000,000,000,000,000,000
None
null
[ "CWE-352" ]
CVE-2018-6651
In the uncurl_ws_accept function in uncurl.c in uncurl before 0.07, as used in Parsec before 140-3, insufficient Origin header validation (accepting an arbitrary substring match) for WebSocket API requests allows remote attackers to bypass intended access restrictions. In Parsec, this means full control over the victim's computer.
https://nvd.nist.gov/vuln/detail/CVE-2018-6651
3,790
w3m
7fdc83b0364005a0b5ed869230dd81752ba022e8
https://github.com/tats/w3m
https://github.com/tats/w3m/commit/7fdc83b0364005a0b5ed869230dd81752ba022e8
Prevent invalid columnPos() call in formUpdateBuffer() Bug-Debian: https://github.com/tats/w3m/issues/89
1
formUpdateBuffer(Anchor *a, Buffer *buf, FormItemList *form) { Buffer save; char *p; int spos, epos, rows, c_rows, pos, col = 0; Line *l; copyBuffer(&save, buf); gotoLine(buf, a->start.line); switch (form->type) { case FORM_TEXTAREA: case FORM_INPUT_TEXT: case FORM_INPUT_FILE: case FORM_INPUT_PASSWORD: case FORM_INPUT_CHECKBOX: case FORM_INPUT_RADIO: #ifdef MENU_SELECT case FORM_SELECT: #endif /* MENU_SELECT */ spos = a->start.pos; epos = a->end.pos; break; default: spos = a->start.pos + 1; epos = a->end.pos - 1; } switch (form->type) { case FORM_INPUT_CHECKBOX: case FORM_INPUT_RADIO: if (buf->currentLine == NULL || spos >= buf->currentLine->len || spos < 0) break; if (form->checked) buf->currentLine->lineBuf[spos] = '*'; else buf->currentLine->lineBuf[spos] = ' '; break; case FORM_INPUT_TEXT: case FORM_INPUT_FILE: case FORM_INPUT_PASSWORD: case FORM_TEXTAREA: #ifdef MENU_SELECT case FORM_SELECT: if (form->type == FORM_SELECT) { p = form->label->ptr; updateSelectOption(form, form->select_option); } else #endif /* MENU_SELECT */ { if (!form->value) break; p = form->value->ptr; } l = buf->currentLine; if (!l) break; if (form->type == FORM_TEXTAREA) { int n = a->y - buf->currentLine->linenumber; if (n > 0) for (; l && n; l = l->prev, n--) ; else if (n < 0) for (; l && n; l = l->prev, n++) ; if (!l) break; } rows = form->rows ? form->rows : 1; col = COLPOS(l, a->start.pos); for (c_rows = 0; c_rows < rows; c_rows++, l = l->next) { if (rows > 1) { pos = columnPos(l, col); a = retrieveAnchor(buf->formitem, l->linenumber, pos); if (a == NULL) break; spos = a->start.pos; epos = a->end.pos; } if (a->start.line != a->end.line || spos > epos || epos >= l->len || spos < 0 || epos < 0 || COLPOS(l, epos) < col) break; pos = form_update_line(l, &p, spos, epos, COLPOS(l, epos) - col, rows > 1, form->type == FORM_INPUT_PASSWORD); if (pos != epos) { shiftAnchorPosition(buf->href, buf->hmarklist, a->start.line, spos, pos - epos); shiftAnchorPosition(buf->name, buf->hmarklist, a->start.line, spos, pos - epos); shiftAnchorPosition(buf->img, buf->hmarklist, a->start.line, spos, pos - epos); shiftAnchorPosition(buf->formitem, buf->hmarklist, a->start.line, spos, pos - epos); } } break; } copyBuffer(buf, &save); arrangeLine(buf); }
90,699,627,261,407,880,000,000,000,000,000,000,000
form.c
290,331,077,565,996,070,000,000,000,000,000,000,000
[ "CWE-476" ]
CVE-2018-6197
w3m through 0.5.3 is prone to a NULL pointer dereference flaw in formUpdateBuffer in form.c.
https://nvd.nist.gov/vuln/detail/CVE-2018-6197
3,791
w3m
8354763b90490d4105695df52674d0fcef823e92
https://github.com/tats/w3m
https://github.com/tats/w3m/commit/8354763b90490d4105695df52674d0fcef823e92
Prevent negative indent value in feed_table_block_tag() Bug-Debian: https://github.com/tats/w3m/issues/88
1
feed_table_block_tag(struct table *tbl, char *line, struct table_mode *mode, int indent, int cmd) { int offset; if (mode->indent_level <= 0 && indent == -1) return; if (mode->indent_level >= CHAR_MAX && indent == 1) return; setwidth(tbl, mode); feed_table_inline_tag(tbl, line, mode, -1); clearcontentssize(tbl, mode); if (indent == 1) { mode->indent_level++; if (mode->indent_level <= MAX_INDENT_LEVEL) tbl->indent += INDENT_INCR; } else if (indent == -1) { mode->indent_level--; if (mode->indent_level < MAX_INDENT_LEVEL) tbl->indent -= INDENT_INCR; } offset = tbl->indent; if (cmd == HTML_DT) { if (mode->indent_level > 0 && mode->indent_level <= MAX_INDENT_LEVEL) offset -= INDENT_INCR; } if (tbl->indent > 0) { check_minimum0(tbl, 0); addcontentssize(tbl, offset); } }
95,430,748,719,998,940,000,000,000,000,000,000,000
table.c
48,265,608,758,374,150,000,000,000,000,000,000,000
[ "CWE-835" ]
CVE-2018-6196
w3m through 0.5.3 is prone to an infinite recursion flaw in HTMLlineproc0 because the feed_table_block_tag function in table.c does not prevent a negative indent value.
https://nvd.nist.gov/vuln/detail/CVE-2018-6196
3,792
linux
073c516ff73557a8f7315066856c04b50383ac34
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/073c516ff73557a8f7315066856c04b50383ac34
nsfs: mark dentry with DCACHE_RCUACCESS Andrey reported a use-after-free in __ns_get_path(): spin_lock include/linux/spinlock.h:299 [inline] lockref_get_not_dead+0x19/0x80 lib/lockref.c:179 __ns_get_path+0x197/0x860 fs/nsfs.c:66 open_related_ns+0xda/0x200 fs/nsfs.c:143 sock_ioctl+0x39d/0x440 net/socket.c:1001 vfs_ioctl fs/ioctl.c:45 [inline] do_vfs_ioctl+0x1bf/0x1780 fs/ioctl.c:685 SYSC_ioctl fs/ioctl.c:700 [inline] SyS_ioctl+0x8f/0xc0 fs/ioctl.c:691 We are under rcu read lock protection at that point: rcu_read_lock(); d = atomic_long_read(&ns->stashed); if (!d) goto slow; dentry = (struct dentry *)d; if (!lockref_get_not_dead(&dentry->d_lockref)) goto slow; rcu_read_unlock(); but don't use a proper RCU API on the free path, therefore a parallel __d_free() could free it at the same time. We need to mark the stashed dentry with DCACHE_RCUACCESS so that __d_free() will be called after all readers leave RCU. Fixes: e149ed2b805f ("take the targets of /proc/*/ns/* symlinks to separate fs") Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Andrew Morton <akpm@linux-foundation.org> Reported-by: Andrey Konovalov <andreyknvl@google.com> Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1
static void *__ns_get_path(struct path *path, struct ns_common *ns) { struct vfsmount *mnt = nsfs_mnt; struct qstr qname = { .name = "", }; struct dentry *dentry; struct inode *inode; unsigned long d; rcu_read_lock(); d = atomic_long_read(&ns->stashed); if (!d) goto slow; dentry = (struct dentry *)d; if (!lockref_get_not_dead(&dentry->d_lockref)) goto slow; rcu_read_unlock(); ns->ops->put(ns); got_it: path->mnt = mntget(mnt); path->dentry = dentry; return NULL; slow: rcu_read_unlock(); inode = new_inode_pseudo(mnt->mnt_sb); if (!inode) { ns->ops->put(ns); return ERR_PTR(-ENOMEM); } inode->i_ino = ns->inum; inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); inode->i_flags |= S_IMMUTABLE; inode->i_mode = S_IFREG | S_IRUGO; inode->i_fop = &ns_file_operations; inode->i_private = ns; dentry = d_alloc_pseudo(mnt->mnt_sb, &qname); if (!dentry) { iput(inode); return ERR_PTR(-ENOMEM); } d_instantiate(dentry, inode); dentry->d_fsdata = (void *)ns->ops; d = atomic_long_cmpxchg(&ns->stashed, 0, (unsigned long)dentry); if (d) { d_delete(dentry); /* make sure ->d_prune() does nothing */ dput(dentry); cpu_relax(); return ERR_PTR(-EAGAIN); } goto got_it; }
283,164,269,815,372,800,000,000,000,000,000,000,000
nsfs.c
255,868,983,414,598,720,000,000,000,000,000,000,000
[ "CWE-416" ]
CVE-2018-5873
An issue was discovered in the __ns_get_path function in fs/nsfs.c in the Linux kernel before 4.11. Due to a race condition when accessing files, a Use After Free condition can occur. This also affects all Android releases from CAF using the Linux kernel (Android for MSM, Firefox OS for MSM, QRD Android) before security patch level 2018-07-05.
https://nvd.nist.gov/vuln/detail/CVE-2018-5873
3,795
linux
ae6650163c66a7eff1acd6eb8b0f752dcfa8eba5
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/ae6650163c66a7eff1acd6eb8b0f752dcfa8eba5
loop: fix concurrent lo_open/lo_release 范龙飞 reports that KASAN can report a use-after-free in __lock_acquire. The reason is due to insufficient serialization in lo_release(), which will continue to use the loop device even after it has decremented the lo_refcnt to zero. In the meantime, another process can come in, open the loop device again as it is being shut down. Confusion ensues. Reported-by: 范龙飞 <long7573@126.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1
static void lo_release(struct gendisk *disk, fmode_t mode) { struct loop_device *lo = disk->private_data; int err; if (atomic_dec_return(&lo->lo_refcnt)) return; mutex_lock(&lo->lo_ctl_mutex); if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) { /* * In autoclear mode, stop the loop thread * and remove configuration after last close. */ err = loop_clr_fd(lo); if (!err) return; } else if (lo->lo_state == Lo_bound) { /* * Otherwise keep thread (if running) and config, * but flush possible ongoing bios in thread. */ blk_mq_freeze_queue(lo->lo_queue); blk_mq_unfreeze_queue(lo->lo_queue); } mutex_unlock(&lo->lo_ctl_mutex); }
92,709,335,376,815,670,000,000,000,000,000,000,000
loop.c
15,664,066,985,517,498,000,000,000,000,000,000,000
[ "CWE-416" ]
CVE-2018-5344
In the Linux kernel through 4.14.13, drivers/block/loop.c mishandles lo_release serialization, which allows attackers to cause a denial of service (__lock_acquire use-after-free) or possibly have unspecified other impact.
https://nvd.nist.gov/vuln/detail/CVE-2018-5344
3,796
linux
c1fa0768a8713b135848f78fd43ffc208d8ded70
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/c1fa0768a8713b135848f78fd43ffc208d8ded70
powerpc/tm: Flush TM only if CPU has TM feature Commit cd63f3c ("powerpc/tm: Fix saving of TM SPRs in core dump") added code to access TM SPRs in flush_tmregs_to_thread(). However flush_tmregs_to_thread() does not check if TM feature is available on CPU before trying to access TM SPRs in order to copy live state to thread structures. flush_tmregs_to_thread() is indeed guarded by CONFIG_PPC_TRANSACTIONAL_MEM but it might be the case that kernel was compiled with CONFIG_PPC_TRANSACTIONAL_MEM enabled and ran on a CPU without TM feature available, thus rendering the execution of TM instructions that are treated by the CPU as illegal instructions. The fix is just to add proper checking in flush_tmregs_to_thread() if CPU has the TM feature before accessing any TM-specific resource, returning immediately if TM is no available on the CPU. Adding that checking in flush_tmregs_to_thread() instead of in places where it is called, like in vsr_get() and vsr_set(), is better because avoids the same problem cropping up elsewhere. Cc: stable@vger.kernel.org # v4.13+ Fixes: cd63f3c ("powerpc/tm: Fix saving of TM SPRs in core dump") Signed-off-by: Gustavo Romero <gromero@linux.vnet.ibm.com> Reviewed-by: Cyril Bur <cyrilbur@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
1
static void flush_tmregs_to_thread(struct task_struct *tsk) { /* * If task is not current, it will have been flushed already to * it's thread_struct during __switch_to(). * * A reclaim flushes ALL the state or if not in TM save TM SPRs * in the appropriate thread structures from live. */ if (tsk != current) return; if (MSR_TM_SUSPENDED(mfmsr())) { tm_reclaim_current(TM_CAUSE_SIGNAL); } else { tm_enable(); tm_save_sprs(&(tsk->thread)); } }
117,879,822,457,848,680,000,000,000,000,000,000,000
None
null
[ "CWE-119" ]
CVE-2018-1091
In the flush_tmregs_to_thread function in arch/powerpc/kernel/ptrace.c in the Linux kernel before 4.13.5, a guest kernel crash can be triggered from unprivileged userspace during a core dump on a POWER host due to a missing processor feature check and an erroneous use of transactional memory (TM) instructions in the core dump path, leading to a denial of service.
https://nvd.nist.gov/vuln/detail/CVE-2018-1091
3,802
linux
57ebd808a97d7c5b1e1afb937c2db22beba3c1f8
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/57ebd808a97d7c5b1e1afb937c2db22beba3c1f8
netfilter: add back stackpointer size checks The rationale for removing the check is only correct for rulesets generated by ip(6)tables. In iptables, a jump can only occur to a user-defined chain, i.e. because we size the stack based on number of user-defined chains we cannot exceed stack size. However, the underlying binary format has no such restriction, and the validation step only ensures that the jump target is a valid rule start point. IOW, its possible to build a rule blob that has no user-defined chains but does contain a jump. If this happens, no jump stack gets allocated and crash occurs because no jumpstack was allocated. Fixes: 7814b6ec6d0d6 ("netfilter: xtables: don't save/restore jumpstack offset") Reported-by: syzbot+e783f671527912cd9403@syzkaller.appspotmail.com Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
1
ipt_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table) { unsigned int hook = state->hook; static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); const struct iphdr *ip; /* Initializing verdict to NF_DROP keeps gcc happy. */ unsigned int verdict = NF_DROP; const char *indev, *outdev; const void *table_base; struct ipt_entry *e, **jumpstack; unsigned int stackidx, cpu; const struct xt_table_info *private; struct xt_action_param acpar; unsigned int addend; /* Initialization */ stackidx = 0; ip = ip_hdr(skb); indev = state->in ? state->in->name : nulldevname; outdev = state->out ? state->out->name : nulldevname; /* We handle fragments by dealing with the first fragment as * if it was a normal packet. All other fragments are treated * normally, except that they will NEVER match rules that ask * things we don't know, ie. tcp syn flag or ports). If the * rule is also a fragment-specific rule, non-fragments won't * match it. */ acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET; acpar.thoff = ip_hdrlen(skb); acpar.hotdrop = false; acpar.state = state; WARN_ON(!(table->valid_hooks & (1 << hook))); local_bh_disable(); addend = xt_write_recseq_begin(); private = READ_ONCE(table->private); /* Address dependency. */ cpu = smp_processor_id(); table_base = private->entries; jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; /* Switch to alternate jumpstack if we're being invoked via TEE. * TEE issues XT_CONTINUE verdict on original skb so we must not * clobber the jumpstack. * * For recursion via REJECT or SYNPROXY the stack will be clobbered * but it is no problem since absolute verdict is issued by these. */ if (static_key_false(&xt_tee_enabled)) jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated); e = get_entry(table_base, private->hook_entry[hook]); do { const struct xt_entry_target *t; const struct xt_entry_match *ematch; struct xt_counters *counter; WARN_ON(!e); if (!ip_packet_match(ip, indev, outdev, &e->ip, acpar.fragoff)) { no_match: e = ipt_next_entry(e); continue; } xt_ematch_foreach(ematch, e) { acpar.match = ematch->u.kernel.match; acpar.matchinfo = ematch->data; if (!acpar.match->match(skb, &acpar)) goto no_match; } counter = xt_get_this_cpu_counter(&e->counters); ADD_COUNTER(*counter, skb->len, 1); t = ipt_get_target(e); WARN_ON(!t->u.kernel.target); #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) /* The packet is traced: log it */ if (unlikely(skb->nf_trace)) trace_packet(state->net, skb, hook, state->in, state->out, table->name, private, e); #endif /* Standard target? */ if (!t->u.kernel.target->target) { int v; v = ((struct xt_standard_target *)t)->verdict; if (v < 0) { /* Pop from stack? */ if (v != XT_RETURN) { verdict = (unsigned int)(-v) - 1; break; } if (stackidx == 0) { e = get_entry(table_base, private->underflow[hook]); } else { e = jumpstack[--stackidx]; e = ipt_next_entry(e); } continue; } if (table_base + v != ipt_next_entry(e) && !(e->ip.flags & IPT_F_GOTO)) jumpstack[stackidx++] = e; e = get_entry(table_base, v); continue; } acpar.target = t->u.kernel.target; acpar.targinfo = t->data; verdict = t->u.kernel.target->target(skb, &acpar); if (verdict == XT_CONTINUE) { /* Target might have changed stuff. */ ip = ip_hdr(skb); e = ipt_next_entry(e); } else { /* Verdict */ break; } } while (!acpar.hotdrop); xt_write_recseq_end(addend); local_bh_enable(); if (acpar.hotdrop) return NF_DROP; else return verdict; }
317,586,211,130,688,100,000,000,000,000,000,000,000
None
null
[ "CWE-476" ]
CVE-2018-1065
The netfilter subsystem in the Linux kernel through 4.15.7 mishandles the case of a rule blob that contains a jump but lacks a user-defined chain, which allows local users to cause a denial of service (NULL pointer dereference) by leveraging the CAP_NET_RAW or CAP_NET_ADMIN capability, related to arpt_do_table in net/ipv4/netfilter/arp_tables.c, ipt_do_table in net/ipv4/netfilter/ip_tables.c, and ip6t_do_table in net/ipv6/netfilter/ip6_tables.c.
https://nvd.nist.gov/vuln/detail/CVE-2018-1065
3,803
curl
ba1dbd78e5f1ed67c1b8d37ac89d90e5e330b628
https://github.com/curl/curl
https://github.com/curl/curl/commit/ba1dbd78e5f1ed67c1b8d37ac89d90e5e330b628
smtp: use the upload buffer size for scratch buffer malloc ... not the read buffer size, as that can be set smaller and thus cause a buffer overflow! CVE-2018-0500 Reported-by: Peter Wu Bug: https://curl.haxx.se/docs/adv_2018-70a2.html
1
CURLcode Curl_smtp_escape_eob(struct connectdata *conn, const ssize_t nread) { /* When sending a SMTP payload we must detect CRLF. sequences making sure they are sent as CRLF.. instead, as a . on the beginning of a line will be deleted by the server when not part of an EOB terminator and a genuine CRLF.CRLF which isn't escaped will wrongly be detected as end of data by the server */ ssize_t i; ssize_t si; struct Curl_easy *data = conn->data; struct SMTP *smtp = data->req.protop; char *scratch = data->state.scratch; char *newscratch = NULL; char *oldscratch = NULL; size_t eob_sent; /* Do we need to allocate a scratch buffer? */ if(!scratch || data->set.crlf) { oldscratch = scratch; scratch = newscratch = malloc(2 * data->set.buffer_size); if(!newscratch) { failf(data, "Failed to alloc scratch buffer!"); return CURLE_OUT_OF_MEMORY; } } /* Have we already sent part of the EOB? */ eob_sent = smtp->eob; /* This loop can be improved by some kind of Boyer-Moore style of approach but that is saved for later... */ for(i = 0, si = 0; i < nread; i++) { if(SMTP_EOB[smtp->eob] == data->req.upload_fromhere[i]) { smtp->eob++; /* Is the EOB potentially the terminating CRLF? */ if(2 == smtp->eob || SMTP_EOB_LEN == smtp->eob) smtp->trailing_crlf = TRUE; else smtp->trailing_crlf = FALSE; } else if(smtp->eob) { /* A previous substring matched so output that first */ memcpy(&scratch[si], &SMTP_EOB[eob_sent], smtp->eob - eob_sent); si += smtp->eob - eob_sent; /* Then compare the first byte */ if(SMTP_EOB[0] == data->req.upload_fromhere[i]) smtp->eob = 1; else smtp->eob = 0; eob_sent = 0; /* Reset the trailing CRLF flag as there was more data */ smtp->trailing_crlf = FALSE; } /* Do we have a match for CRLF. as per RFC-5321, sect. 4.5.2 */ if(SMTP_EOB_FIND_LEN == smtp->eob) { /* Copy the replacement data to the target buffer */ memcpy(&scratch[si], &SMTP_EOB_REPL[eob_sent], SMTP_EOB_REPL_LEN - eob_sent); si += SMTP_EOB_REPL_LEN - eob_sent; smtp->eob = 0; eob_sent = 0; } else if(!smtp->eob) scratch[si++] = data->req.upload_fromhere[i]; } if(smtp->eob - eob_sent) { /* A substring matched before processing ended so output that now */ memcpy(&scratch[si], &SMTP_EOB[eob_sent], smtp->eob - eob_sent); si += smtp->eob - eob_sent; } /* Only use the new buffer if we replaced something */ if(si != nread) { /* Upload from the new (replaced) buffer instead */ data->req.upload_fromhere = scratch; /* Save the buffer so it can be freed later */ data->state.scratch = scratch; /* Free the old scratch buffer */ free(oldscratch); /* Set the new amount too */ data->req.upload_present = si; } else free(newscratch); return CURLE_OK; }
170,694,906,334,543,880,000,000,000,000,000,000,000
smtp.c
285,403,495,559,952,680,000,000,000,000,000,000,000
[ "CWE-119" ]
CVE-2018-0500
Curl_smtp_escape_eob in lib/smtp.c in curl 7.54.1 to and including curl 7.60.0 has a heap-based buffer overflow that might be exploitable by an attacker who can control the data that curl transmits over SMTP with certain settings (i.e., use of a nonstandard --limit-rate argument or CURLOPT_BUFFERSIZE value).
https://nvd.nist.gov/vuln/detail/CVE-2018-0500
3,805
miniupnp
7aeb624b44f86d335841242ff427433190e7168a
https://github.com/miniupnp/miniupnp
https://github.com/miniupnp/miniupnp/commit/7aeb624b44f86d335841242ff427433190e7168a
properly initialize data structure for SOAP parsing in ParseNameValue() topelt field was not properly initialized. should fix #268
1
ParseNameValue(const char * buffer, int bufsize, struct NameValueParserData * data) { struct xmlparser parser; data->l_head = NULL; data->portListing = NULL; data->portListingLength = 0; /* init xmlparser object */ parser.xmlstart = buffer; parser.xmlsize = bufsize; parser.data = data; parser.starteltfunc = NameValueParserStartElt; parser.endeltfunc = NameValueParserEndElt; parser.datafunc = NameValueParserGetData; parser.attfunc = 0; parsexml(&parser); }
18,049,727,750,159,877,000,000,000,000,000,000,000
upnpreplyparse.c
41,172,885,683,690,726,000,000,000,000,000,000,000
[ "CWE-119" ]
CVE-2017-1000494
Uninitialized stack variable vulnerability in NameValueParserEndElt (upnpreplyparse.c) in miniupnpd < 2.0 allows an attacker to cause Denial of Service (Segmentation fault and Memory Corruption) or possibly have unspecified other impact
https://nvd.nist.gov/vuln/detail/CVE-2017-1000494
3,806
wildmidi
814f31d8eceda8401eb812fc2e94ed143fdad0ab
https://github.com/Mindwerks/wildmidi
https://github.com/Mindwerks/wildmidi/commit/814f31d8eceda8401eb812fc2e94ed143fdad0ab
wildmidi_lib.c (WildMidi_Open, WildMidi_OpenBuffer): refuse to proceed if less then 18 bytes of input Fixes bug #178.
1
WM_SYMBOL midi *WildMidi_Open(const char *midifile) { uint8_t *mididata = NULL; uint32_t midisize = 0; uint8_t mus_hdr[] = { 'M', 'U', 'S', 0x1A }; uint8_t xmi_hdr[] = { 'F', 'O', 'R', 'M' }; midi * ret = NULL; if (!WM_Initialized) { _WM_GLOBAL_ERROR(__FUNCTION__, __LINE__, WM_ERR_NOT_INIT, NULL, 0); return (NULL); } if (midifile == NULL) { _WM_GLOBAL_ERROR(__FUNCTION__, __LINE__, WM_ERR_INVALID_ARG, "(NULL filename)", 0); return (NULL); } if ((mididata = (uint8_t *) _WM_BufferFile(midifile, &midisize)) == NULL) { return (NULL); } if (memcmp(mididata,"HMIMIDIP", 8) == 0) { ret = (void *) _WM_ParseNewHmp(mididata, midisize); } else if (memcmp(mididata, "HMI-MIDISONG061595", 18) == 0) { ret = (void *) _WM_ParseNewHmi(mididata, midisize); } else if (memcmp(mididata, mus_hdr, 4) == 0) { ret = (void *) _WM_ParseNewMus(mididata, midisize); } else if (memcmp(mididata, xmi_hdr, 4) == 0) { ret = (void *) _WM_ParseNewXmi(mididata, midisize); } else { ret = (void *) _WM_ParseNewMidi(mididata, midisize); } free(mididata); if (ret) { if (add_handle(ret) != 0) { WildMidi_Close(ret); ret = NULL; } } return (ret); }
49,238,147,717,637,630,000,000,000,000,000,000,000
wildmidi_lib.c
287,548,982,026,805,930,000,000,000,000,000,000,000
[ "CWE-119" ]
CVE-2017-1000418
The WildMidi_Open function in WildMIDI since commit d8a466829c67cacbb1700beded25c448d99514e5 allows remote attackers to cause a denial of service (heap-based buffer overflow and application crash) or possibly have unspecified other impact via a crafted file.
https://nvd.nist.gov/vuln/detail/CVE-2017-1000418
3,814
linux
b86e33075ed1909d8002745b56ecf73b833db143
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/b86e33075ed1909d8002745b56ecf73b833db143
f2fs: fix a dead loop in f2fs_fiemap() A dead loop can be triggered in f2fs_fiemap() using the test case as below: ... fd = open(); fallocate(fd, 0, 0, 4294967296); ioctl(fd, FS_IOC_FIEMAP, fiemap_buf); ... It's caused by an overflow in __get_data_block(): ... bh->b_size = map.m_len << inode->i_blkbits; ... map.m_len is an unsigned int, and bh->b_size is a size_t which is 64 bits on 64 bits archtecture, type conversion from an unsigned int to a size_t will result in an overflow. In the above-mentioned case, bh->b_size will be zero, and f2fs_fiemap() will call get_data_block() at block 0 again an again. Fix this by adding a force conversion before left shift. Signed-off-by: Wei Fang <fangwei1@huawei.com> Acked-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
1
static int __get_data_block(struct inode *inode, sector_t iblock, struct buffer_head *bh, int create, int flag, pgoff_t *next_pgofs) { struct f2fs_map_blocks map; int err; map.m_lblk = iblock; map.m_len = bh->b_size >> inode->i_blkbits; map.m_next_pgofs = next_pgofs; err = f2fs_map_blocks(inode, &map, create, flag); if (!err) { map_bh(bh, inode->i_sb, map.m_pblk); bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags; bh->b_size = map.m_len << inode->i_blkbits; } return err; }
36,840,277,564,373,170,000,000,000,000,000,000,000
data.c
145,455,815,818,505,820,000,000,000,000,000,000,000
[ "CWE-190" ]
CVE-2017-18257
The __get_data_block function in fs/f2fs/data.c in the Linux kernel before 4.11 allows local users to cause a denial of service (integer overflow and loop) via crafted use of the open and fallocate system calls with an FS_IOC_FIEMAP ioctl.
https://nvd.nist.gov/vuln/detail/CVE-2017-18257
3,815
linux
1572e45a924f254d9570093abde46430c3172e3d
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/1572e45a924f254d9570093abde46430c3172e3d
perf/core: Fix the perf_cpu_time_max_percent check Use "proc_dointvec_minmax" instead of "proc_dointvec" to check the input value from user-space. If not, we can set a big value and some vars will overflow like "sysctl_perf_event_sample_rate" which will cause a lot of unexpected problems. Signed-off-by: Tan Xiaojun <tanxiaojun@huawei.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: <acme@kernel.org> Cc: <alexander.shishkin@linux.intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Link: http://lkml.kernel.org/r/1487829879-56237-1-git-send-email-tanxiaojun@huawei.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
1
int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret = proc_dointvec(table, write, buffer, lenp, ppos); if (ret || !write) return ret; if (sysctl_perf_cpu_time_max_percent == 100 || sysctl_perf_cpu_time_max_percent == 0) { printk(KERN_WARNING "perf: Dynamic interrupt throttling disabled, can hang your system!\n"); WRITE_ONCE(perf_sample_allowed_ns, 0); } else { update_perf_cpu_limits(); } return 0; }
108,483,018,357,036,800,000,000,000,000,000,000,000
core.c
304,500,610,601,399,240,000,000,000,000,000,000,000
[ "CWE-190" ]
CVE-2017-18255
The perf_cpu_time_max_percent_handler function in kernel/events/core.c in the Linux kernel before 4.11 allows local users to cause a denial of service (integer overflow) or possibly have unspecified other impact via a large value, as demonstrated by an incorrect sample-rate calculation.
https://nvd.nist.gov/vuln/detail/CVE-2017-18255
3,816
linux
30a61ddf8117c26ac5b295e1233eaa9629a94ca3
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/30a61ddf8117c26ac5b295e1233eaa9629a94ca3
f2fs: fix race condition in between free nid allocator/initializer In below concurrent case, allocated nid can be loaded into free nid cache and be allocated again. Thread A Thread B - f2fs_create - f2fs_new_inode - alloc_nid - __insert_nid_to_list(ALLOC_NID_LIST) - f2fs_balance_fs_bg - build_free_nids - __build_free_nids - scan_nat_page - add_free_nid - __lookup_nat_cache - f2fs_add_link - init_inode_metadata - new_inode_page - new_node_page - set_node_addr - alloc_nid_done - __remove_nid_from_list(ALLOC_NID_LIST) - __insert_nid_to_list(FREE_NID_LIST) This patch makes nat cache lookup and free nid list operation being atomical to avoid this race condition. Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org> Signed-off-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
1
static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct free_nid *i; struct nat_entry *ne; int err; /* 0 nid should not be used */ if (unlikely(nid == 0)) return false; if (build) { /* do not add allocated nids */ ne = __lookup_nat_cache(nm_i, nid); if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) || nat_get_blkaddr(ne) != NULL_ADDR)) return false; } i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS); i->nid = nid; i->state = NID_NEW; if (radix_tree_preload(GFP_NOFS)) { kmem_cache_free(free_nid_slab, i); return true; } spin_lock(&nm_i->nid_list_lock); err = __insert_nid_to_list(sbi, i, FREE_NID_LIST, true); spin_unlock(&nm_i->nid_list_lock); radix_tree_preload_end(); if (err) { kmem_cache_free(free_nid_slab, i); return true; } return true; }
84,542,765,132,488,550,000,000,000,000,000,000,000
node.c
42,220,681,768,883,035,000,000,000,000,000,000,000
[ "CWE-362" ]
CVE-2017-18249
The add_free_nid function in fs/f2fs/node.c in the Linux kernel before 4.12 does not properly track an allocated nid, which allows local users to cause a denial of service (race condition) or possibly have unspecified other impact via concurrent threads.
https://nvd.nist.gov/vuln/detail/CVE-2017-18249
3,817
cups
49fa4983f25b64ec29d548ffa3b9782426007df3
https://github.com/apple/cups
https://github.com/apple/cups/commit/49fa4983f25b64ec29d548ffa3b9782426007df3
DBUS notifications could crash the scheduler (Issue #5143) - scheduler/ipp.c: Make sure requesting-user-name string is valid UTF-8.
1
add_job(cupsd_client_t *con, /* I - Client connection */ cupsd_printer_t *printer, /* I - Destination printer */ mime_type_t *filetype) /* I - First print file type, if any */ { http_status_t status; /* Policy status */ ipp_attribute_t *attr, /* Current attribute */ *auth_info; /* auth-info attribute */ const char *mandatory; /* Current mandatory job attribute */ const char *val; /* Default option value */ int priority; /* Job priority */ cupsd_job_t *job; /* Current job */ char job_uri[HTTP_MAX_URI]; /* Job URI */ int kbytes; /* Size of print file */ int i; /* Looping var */ int lowerpagerange; /* Page range bound */ int exact; /* Did we have an exact match? */ ipp_attribute_t *media_col, /* media-col attribute */ *media_margin; /* media-*-margin attribute */ ipp_t *unsup_col; /* media-col in unsupported response */ static const char * const readonly[] =/* List of read-only attributes */ { "date-time-at-completed", "date-time-at-creation", "date-time-at-processing", "job-detailed-status-messages", "job-document-access-errors", "job-id", "job-impressions-completed", "job-k-octets-completed", "job-media-sheets-completed", "job-pages-completed", "job-printer-up-time", "job-printer-uri", "job-state", "job-state-message", "job-state-reasons", "job-uri", "number-of-documents", "number-of-intervening-jobs", "output-device-assigned", "time-at-completed", "time-at-creation", "time-at-processing" }; cupsdLogMessage(CUPSD_LOG_DEBUG2, "add_job(%p[%d], %p(%s), %p(%s/%s))", con, con->number, printer, printer->name, filetype, filetype ? filetype->super : "none", filetype ? filetype->type : "none"); /* * Check remote printing to non-shared printer... */ if (!printer->shared && _cups_strcasecmp(con->http->hostname, "localhost") && _cups_strcasecmp(con->http->hostname, ServerName)) { send_ipp_status(con, IPP_NOT_AUTHORIZED, _("The printer or class is not shared.")); return (NULL); } /* * Check policy... */ auth_info = ippFindAttribute(con->request, "auth-info", IPP_TAG_TEXT); if ((status = cupsdCheckPolicy(printer->op_policy_ptr, con, NULL)) != HTTP_OK) { send_http_error(con, status, printer); return (NULL); } else if (printer->num_auth_info_required == 1 && !strcmp(printer->auth_info_required[0], "negotiate") && !con->username[0]) { send_http_error(con, HTTP_UNAUTHORIZED, printer); return (NULL); } #ifdef HAVE_SSL else if (auth_info && !con->http->tls && !httpAddrLocalhost(con->http->hostaddr)) { /* * Require encryption of auth-info over non-local connections... */ send_http_error(con, HTTP_UPGRADE_REQUIRED, printer); return (NULL); } #endif /* HAVE_SSL */ /* * See if the printer is accepting jobs... */ if (!printer->accepting) { send_ipp_status(con, IPP_NOT_ACCEPTING, _("Destination \"%s\" is not accepting jobs."), printer->name); return (NULL); } /* * Validate job template attributes; for now just document-format, * copies, job-sheets, number-up, page-ranges, mandatory attributes, and * media... */ for (i = 0; i < (int)(sizeof(readonly) / sizeof(readonly[0])); i ++) { if ((attr = ippFindAttribute(con->request, readonly[i], IPP_TAG_ZERO)) != NULL) { ippDeleteAttribute(con->request, attr); if (StrictConformance) { send_ipp_status(con, IPP_BAD_REQUEST, _("The '%s' Job Status attribute cannot be supplied in a job creation request."), readonly[i]); return (NULL); } cupsdLogMessage(CUPSD_LOG_INFO, "Unexpected '%s' Job Status attribute in a job creation request.", readonly[i]); } } if (printer->pc) { for (mandatory = (char *)cupsArrayFirst(printer->pc->mandatory); mandatory; mandatory = (char *)cupsArrayNext(printer->pc->mandatory)) { if (!ippFindAttribute(con->request, mandatory, IPP_TAG_ZERO)) { /* * Missing a required attribute... */ send_ipp_status(con, IPP_CONFLICT, _("The \"%s\" attribute is required for print jobs."), mandatory); return (NULL); } } } if (filetype && printer->filetypes && !cupsArrayFind(printer->filetypes, filetype)) { char mimetype[MIME_MAX_SUPER + MIME_MAX_TYPE + 2]; /* MIME media type string */ snprintf(mimetype, sizeof(mimetype), "%s/%s", filetype->super, filetype->type); send_ipp_status(con, IPP_DOCUMENT_FORMAT, _("Unsupported format \"%s\"."), mimetype); ippAddString(con->response, IPP_TAG_UNSUPPORTED_GROUP, IPP_TAG_MIMETYPE, "document-format", NULL, mimetype); return (NULL); } if ((attr = ippFindAttribute(con->request, "copies", IPP_TAG_INTEGER)) != NULL) { if (attr->values[0].integer < 1 || attr->values[0].integer > MaxCopies) { send_ipp_status(con, IPP_ATTRIBUTES, _("Bad copies value %d."), attr->values[0].integer); ippAddInteger(con->response, IPP_TAG_UNSUPPORTED_GROUP, IPP_TAG_INTEGER, "copies", attr->values[0].integer); return (NULL); } } if ((attr = ippFindAttribute(con->request, "job-sheets", IPP_TAG_ZERO)) != NULL) { if (attr->value_tag != IPP_TAG_KEYWORD && attr->value_tag != IPP_TAG_NAME) { send_ipp_status(con, IPP_BAD_REQUEST, _("Bad job-sheets value type.")); return (NULL); } if (attr->num_values > 2) { send_ipp_status(con, IPP_BAD_REQUEST, _("Too many job-sheets values (%d > 2)."), attr->num_values); return (NULL); } for (i = 0; i < attr->num_values; i ++) if (strcmp(attr->values[i].string.text, "none") && !cupsdFindBanner(attr->values[i].string.text)) { send_ipp_status(con, IPP_BAD_REQUEST, _("Bad job-sheets value \"%s\"."), attr->values[i].string.text); return (NULL); } } if ((attr = ippFindAttribute(con->request, "number-up", IPP_TAG_INTEGER)) != NULL) { if (attr->values[0].integer != 1 && attr->values[0].integer != 2 && attr->values[0].integer != 4 && attr->values[0].integer != 6 && attr->values[0].integer != 9 && attr->values[0].integer != 16) { send_ipp_status(con, IPP_ATTRIBUTES, _("Bad number-up value %d."), attr->values[0].integer); ippAddInteger(con->response, IPP_TAG_UNSUPPORTED_GROUP, IPP_TAG_INTEGER, "number-up", attr->values[0].integer); return (NULL); } } if ((attr = ippFindAttribute(con->request, "page-ranges", IPP_TAG_RANGE)) != NULL) { for (i = 0, lowerpagerange = 1; i < attr->num_values; i ++) { if (attr->values[i].range.lower < lowerpagerange || attr->values[i].range.lower > attr->values[i].range.upper) { send_ipp_status(con, IPP_BAD_REQUEST, _("Bad page-ranges values %d-%d."), attr->values[i].range.lower, attr->values[i].range.upper); return (NULL); } lowerpagerange = attr->values[i].range.upper + 1; } } /* * Do media selection as needed... */ if (!ippFindAttribute(con->request, "PageRegion", IPP_TAG_ZERO) && !ippFindAttribute(con->request, "PageSize", IPP_TAG_ZERO) && _ppdCacheGetPageSize(printer->pc, con->request, NULL, &exact)) { if (!exact && (media_col = ippFindAttribute(con->request, "media-col", IPP_TAG_BEGIN_COLLECTION)) != NULL) { send_ipp_status(con, IPP_OK_SUBST, _("Unsupported margins.")); unsup_col = ippNew(); if ((media_margin = ippFindAttribute(media_col->values[0].collection, "media-bottom-margin", IPP_TAG_INTEGER)) != NULL) ippAddInteger(unsup_col, IPP_TAG_ZERO, IPP_TAG_INTEGER, "media-bottom-margin", media_margin->values[0].integer); if ((media_margin = ippFindAttribute(media_col->values[0].collection, "media-left-margin", IPP_TAG_INTEGER)) != NULL) ippAddInteger(unsup_col, IPP_TAG_ZERO, IPP_TAG_INTEGER, "media-left-margin", media_margin->values[0].integer); if ((media_margin = ippFindAttribute(media_col->values[0].collection, "media-right-margin", IPP_TAG_INTEGER)) != NULL) ippAddInteger(unsup_col, IPP_TAG_ZERO, IPP_TAG_INTEGER, "media-right-margin", media_margin->values[0].integer); if ((media_margin = ippFindAttribute(media_col->values[0].collection, "media-top-margin", IPP_TAG_INTEGER)) != NULL) ippAddInteger(unsup_col, IPP_TAG_ZERO, IPP_TAG_INTEGER, "media-top-margin", media_margin->values[0].integer); ippAddCollection(con->response, IPP_TAG_UNSUPPORTED_GROUP, "media-col", unsup_col); ippDelete(unsup_col); } } /* * Make sure we aren't over our limit... */ if (MaxJobs && cupsArrayCount(Jobs) >= MaxJobs) cupsdCleanJobs(); if (MaxJobs && cupsArrayCount(Jobs) >= MaxJobs) { send_ipp_status(con, IPP_NOT_POSSIBLE, _("Too many active jobs.")); return (NULL); } if ((i = check_quotas(con, printer)) < 0) { send_ipp_status(con, IPP_NOT_POSSIBLE, _("Quota limit reached.")); return (NULL); } else if (i == 0) { send_ipp_status(con, IPP_NOT_AUTHORIZED, _("Not allowed to print.")); return (NULL); } /* * Create the job and set things up... */ if ((attr = ippFindAttribute(con->request, "job-priority", IPP_TAG_INTEGER)) != NULL) priority = attr->values[0].integer; else { if ((val = cupsGetOption("job-priority", printer->num_options, printer->options)) != NULL) priority = atoi(val); else priority = 50; ippAddInteger(con->request, IPP_TAG_JOB, IPP_TAG_INTEGER, "job-priority", priority); } if ((attr = ippFindAttribute(con->request, "job-name", IPP_TAG_ZERO)) == NULL) ippAddString(con->request, IPP_TAG_JOB, IPP_TAG_NAME, "job-name", NULL, "Untitled"); else if ((attr->value_tag != IPP_TAG_NAME && attr->value_tag != IPP_TAG_NAMELANG) || attr->num_values != 1) { send_ipp_status(con, IPP_ATTRIBUTES, _("Bad job-name value: Wrong type or count.")); if ((attr = ippCopyAttribute(con->response, attr, 0)) != NULL) attr->group_tag = IPP_TAG_UNSUPPORTED_GROUP; return (NULL); } else if (!ippValidateAttribute(attr)) { send_ipp_status(con, IPP_ATTRIBUTES, _("Bad job-name value: %s"), cupsLastErrorString()); if ((attr = ippCopyAttribute(con->response, attr, 0)) != NULL) attr->group_tag = IPP_TAG_UNSUPPORTED_GROUP; return (NULL); } if ((job = cupsdAddJob(priority, printer->name)) == NULL) { send_ipp_status(con, IPP_INTERNAL_ERROR, _("Unable to add job for destination \"%s\"."), printer->name); return (NULL); } job->dtype = printer->type & (CUPS_PRINTER_CLASS | CUPS_PRINTER_REMOTE); job->attrs = con->request; job->dirty = 1; con->request = ippNewRequest(job->attrs->request.op.operation_id); cupsdMarkDirty(CUPSD_DIRTY_JOBS); add_job_uuid(job); apply_printer_defaults(printer, job); attr = ippFindAttribute(job->attrs, "requesting-user-name", IPP_TAG_NAME); if (con->username[0]) { cupsdSetString(&job->username, con->username); if (attr) ippSetString(job->attrs, &attr, 0, con->username); } else if (attr) { cupsdLogMessage(CUPSD_LOG_DEBUG, "add_job: requesting-user-name=\"%s\"", attr->values[0].string.text); cupsdSetString(&job->username, attr->values[0].string.text); } else cupsdSetString(&job->username, "anonymous"); if (!attr) ippAddString(job->attrs, IPP_TAG_JOB, IPP_TAG_NAME, "job-originating-user-name", NULL, job->username); else { ippSetGroupTag(job->attrs, &attr, IPP_TAG_JOB); ippSetName(job->attrs, &attr, "job-originating-user-name"); } if (con->username[0] || auth_info) { save_auth_info(con, job, auth_info); /* * Remove the auth-info attribute from the attribute data... */ if (auth_info) ippDeleteAttribute(job->attrs, auth_info); } if ((attr = ippFindAttribute(con->request, "job-name", IPP_TAG_NAME)) != NULL) cupsdSetString(&(job->name), attr->values[0].string.text); if ((attr = ippFindAttribute(job->attrs, "job-originating-host-name", IPP_TAG_ZERO)) != NULL) { /* * Request contains a job-originating-host-name attribute; validate it... */ if (attr->value_tag != IPP_TAG_NAME || attr->num_values != 1 || strcmp(con->http->hostname, "localhost")) { /* * Can't override the value if we aren't connected via localhost. * Also, we can only have 1 value and it must be a name value. */ ippDeleteAttribute(job->attrs, attr); ippAddString(job->attrs, IPP_TAG_JOB, IPP_TAG_NAME, "job-originating-host-name", NULL, con->http->hostname); } else ippSetGroupTag(job->attrs, &attr, IPP_TAG_JOB); } else { /* * No job-originating-host-name attribute, so use the hostname from * the connection... */ ippAddString(job->attrs, IPP_TAG_JOB, IPP_TAG_NAME, "job-originating-host-name", NULL, con->http->hostname); } ippAddOutOfBand(job->attrs, IPP_TAG_JOB, IPP_TAG_NOVALUE, "date-time-at-completed"); ippAddDate(job->attrs, IPP_TAG_JOB, "date-time-at-creation", ippTimeToDate(time(NULL))); ippAddOutOfBand(job->attrs, IPP_TAG_JOB, IPP_TAG_NOVALUE, "date-time-at-processing"); ippAddOutOfBand(job->attrs, IPP_TAG_JOB, IPP_TAG_NOVALUE, "time-at-completed"); ippAddInteger(job->attrs, IPP_TAG_JOB, IPP_TAG_INTEGER, "time-at-creation", time(NULL)); ippAddOutOfBand(job->attrs, IPP_TAG_JOB, IPP_TAG_NOVALUE, "time-at-processing"); /* * Add remaining job attributes... */ ippAddInteger(job->attrs, IPP_TAG_JOB, IPP_TAG_INTEGER, "job-id", job->id); job->state = ippAddInteger(job->attrs, IPP_TAG_JOB, IPP_TAG_ENUM, "job-state", IPP_JOB_STOPPED); job->state_value = (ipp_jstate_t)job->state->values[0].integer; job->reasons = ippAddString(job->attrs, IPP_TAG_JOB, IPP_TAG_KEYWORD, "job-state-reasons", NULL, "job-incoming"); job->impressions = ippAddInteger(job->attrs, IPP_TAG_JOB, IPP_TAG_INTEGER, "job-impressions-completed", 0); job->sheets = ippAddInteger(job->attrs, IPP_TAG_JOB, IPP_TAG_INTEGER, "job-media-sheets-completed", 0); ippAddString(job->attrs, IPP_TAG_JOB, IPP_TAG_URI, "job-printer-uri", NULL, printer->uri); if ((attr = ippFindAttribute(job->attrs, "job-k-octets", IPP_TAG_INTEGER)) != NULL) attr->values[0].integer = 0; else ippAddInteger(job->attrs, IPP_TAG_JOB, IPP_TAG_INTEGER, "job-k-octets", 0); if ((attr = ippFindAttribute(job->attrs, "job-hold-until", IPP_TAG_KEYWORD)) == NULL) attr = ippFindAttribute(job->attrs, "job-hold-until", IPP_TAG_NAME); if (!attr) { if ((val = cupsGetOption("job-hold-until", printer->num_options, printer->options)) == NULL) val = "no-hold"; attr = ippAddString(job->attrs, IPP_TAG_JOB, IPP_TAG_KEYWORD, "job-hold-until", NULL, val); } if (printer->holding_new_jobs) { /* * Hold all new jobs on this printer... */ if (attr && strcmp(attr->values[0].string.text, "no-hold")) cupsdSetJobHoldUntil(job, ippGetString(attr, 0, NULL), 0); else cupsdSetJobHoldUntil(job, "indefinite", 0); job->state->values[0].integer = IPP_JOB_HELD; job->state_value = IPP_JOB_HELD; ippSetString(job->attrs, &job->reasons, 0, "job-held-on-create"); } else if (attr && strcmp(attr->values[0].string.text, "no-hold")) { /* * Hold job until specified time... */ cupsdSetJobHoldUntil(job, attr->values[0].string.text, 0); job->state->values[0].integer = IPP_JOB_HELD; job->state_value = IPP_JOB_HELD; ippSetString(job->attrs, &job->reasons, 0, "job-hold-until-specified"); } else if (job->attrs->request.op.operation_id == IPP_CREATE_JOB) { job->hold_until = time(NULL) + MultipleOperationTimeout; job->state->values[0].integer = IPP_JOB_HELD; job->state_value = IPP_JOB_HELD; } else { job->state->values[0].integer = IPP_JOB_PENDING; job->state_value = IPP_JOB_PENDING; ippSetString(job->attrs, &job->reasons, 0, "none"); } if (!(printer->type & CUPS_PRINTER_REMOTE) || Classification) { /* * Add job sheets options... */ if ((attr = ippFindAttribute(job->attrs, "job-sheets", IPP_TAG_ZERO)) == NULL) { cupsdLogMessage(CUPSD_LOG_DEBUG, "Adding default job-sheets values \"%s,%s\"...", printer->job_sheets[0], printer->job_sheets[1]); attr = ippAddStrings(job->attrs, IPP_TAG_JOB, IPP_TAG_NAME, "job-sheets", 2, NULL, NULL); ippSetString(job->attrs, &attr, 0, printer->job_sheets[0]); ippSetString(job->attrs, &attr, 1, printer->job_sheets[1]); } job->job_sheets = attr; /* * Enforce classification level if set... */ if (Classification) { cupsdLogMessage(CUPSD_LOG_INFO, "Classification=\"%s\", ClassifyOverride=%d", Classification ? Classification : "(null)", ClassifyOverride); if (ClassifyOverride) { if (!strcmp(attr->values[0].string.text, "none") && (attr->num_values == 1 || !strcmp(attr->values[1].string.text, "none"))) { /* * Force the leading banner to have the classification on it... */ ippSetString(job->attrs, &attr, 0, Classification); cupsdLogJob(job, CUPSD_LOG_NOTICE, "CLASSIFICATION FORCED " "job-sheets=\"%s,none\", " "job-originating-user-name=\"%s\"", Classification, job->username); } else if (attr->num_values == 2 && strcmp(attr->values[0].string.text, attr->values[1].string.text) && strcmp(attr->values[0].string.text, "none") && strcmp(attr->values[1].string.text, "none")) { /* * Can't put two different security markings on the same document! */ ippSetString(job->attrs, &attr, 1, attr->values[0].string.text); cupsdLogJob(job, CUPSD_LOG_NOTICE, "CLASSIFICATION FORCED " "job-sheets=\"%s,%s\", " "job-originating-user-name=\"%s\"", attr->values[0].string.text, attr->values[1].string.text, job->username); } else if (strcmp(attr->values[0].string.text, Classification) && strcmp(attr->values[0].string.text, "none") && (attr->num_values == 1 || (strcmp(attr->values[1].string.text, Classification) && strcmp(attr->values[1].string.text, "none")))) { if (attr->num_values == 1) cupsdLogJob(job, CUPSD_LOG_NOTICE, "CLASSIFICATION OVERRIDDEN " "job-sheets=\"%s\", " "job-originating-user-name=\"%s\"", attr->values[0].string.text, job->username); else cupsdLogJob(job, CUPSD_LOG_NOTICE, "CLASSIFICATION OVERRIDDEN " "job-sheets=\"%s,%s\",fffff " "job-originating-user-name=\"%s\"", attr->values[0].string.text, attr->values[1].string.text, job->username); } } else if (strcmp(attr->values[0].string.text, Classification) && (attr->num_values == 1 || strcmp(attr->values[1].string.text, Classification))) { /* * Force the banner to have the classification on it... */ if (attr->num_values > 1 && !strcmp(attr->values[0].string.text, attr->values[1].string.text)) { ippSetString(job->attrs, &attr, 0, Classification); ippSetString(job->attrs, &attr, 1, Classification); } else { if (attr->num_values == 1 || strcmp(attr->values[0].string.text, "none")) ippSetString(job->attrs, &attr, 0, Classification); if (attr->num_values > 1 && strcmp(attr->values[1].string.text, "none")) ippSetString(job->attrs, &attr, 1, Classification); } if (attr->num_values > 1) cupsdLogJob(job, CUPSD_LOG_NOTICE, "CLASSIFICATION FORCED " "job-sheets=\"%s,%s\", " "job-originating-user-name=\"%s\"", attr->values[0].string.text, attr->values[1].string.text, job->username); else cupsdLogJob(job, CUPSD_LOG_NOTICE, "CLASSIFICATION FORCED " "job-sheets=\"%s\", " "job-originating-user-name=\"%s\"", Classification, job->username); } } /* * See if we need to add the starting sheet... */ if (!(printer->type & CUPS_PRINTER_REMOTE)) { cupsdLogJob(job, CUPSD_LOG_INFO, "Adding start banner page \"%s\".", attr->values[0].string.text); if ((kbytes = copy_banner(con, job, attr->values[0].string.text)) < 0) { cupsdSetJobState(job, IPP_JOB_ABORTED, CUPSD_JOB_PURGE, "Aborting job because the start banner could not be " "copied."); return (NULL); } cupsdUpdateQuota(printer, job->username, 0, kbytes); } } else if ((attr = ippFindAttribute(job->attrs, "job-sheets", IPP_TAG_ZERO)) != NULL) job->job_sheets = attr; /* * Fill in the response info... */ httpAssembleURIf(HTTP_URI_CODING_ALL, job_uri, sizeof(job_uri), "ipp", NULL, con->clientname, con->clientport, "/jobs/%d", job->id); ippAddString(con->response, IPP_TAG_JOB, IPP_TAG_URI, "job-uri", NULL, job_uri); ippAddInteger(con->response, IPP_TAG_JOB, IPP_TAG_INTEGER, "job-id", job->id); ippAddInteger(con->response, IPP_TAG_JOB, IPP_TAG_ENUM, "job-state", job->state_value); ippAddString(con->response, IPP_TAG_JOB, IPP_TAG_TEXT, "job-state-message", NULL, ""); ippAddString(con->response, IPP_TAG_JOB, IPP_TAG_KEYWORD, "job-state-reasons", NULL, job->reasons->values[0].string.text); con->response->request.status.status_code = IPP_OK; /* * Add any job subscriptions... */ add_job_subscriptions(con, job); /* * Set all but the first two attributes to the job attributes group... */ for (attr = job->attrs->attrs->next->next; attr; attr = attr->next) attr->group_tag = IPP_TAG_JOB; /* * Fire the "job created" event... */ cupsdAddEvent(CUPSD_EVENT_JOB_CREATED, printer, job, "Job created."); /* * Return the new job... */ return (job); }
143,641,121,189,248,620,000,000,000,000,000,000,000
ipp.c
103,444,657,439,025,130,000,000,000,000,000,000,000
[ "CWE-20" ]
CVE-2017-18248
The add_job function in scheduler/ipp.c in CUPS before 2.2.6, when D-Bus support is enabled, can be crashed by remote attackers by sending print jobs with an invalid username, related to a D-Bus notification.
https://nvd.nist.gov/vuln/detail/CVE-2017-18248
3,837
linux
70feee0e1ef331b22cc51f383d532a0d043fbdcc
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/70feee0e1ef331b22cc51f383d532a0d043fbdcc
mlock: fix mlock count can not decrease in race condition Kefeng reported that when running the follow test, the mlock count in meminfo will increase permanently: [1] testcase linux:~ # cat test_mlockal grep Mlocked /proc/meminfo for j in `seq 0 10` do for i in `seq 4 15` do ./p_mlockall >> log & done sleep 0.2 done # wait some time to let mlock counter decrease and 5s may not enough sleep 5 grep Mlocked /proc/meminfo linux:~ # cat p_mlockall.c #include <sys/mman.h> #include <stdlib.h> #include <stdio.h> #define SPACE_LEN 4096 int main(int argc, char ** argv) { int ret; void *adr = malloc(SPACE_LEN); if (!adr) return -1; ret = mlockall(MCL_CURRENT | MCL_FUTURE); printf("mlcokall ret = %d\n", ret); ret = munlockall(); printf("munlcokall ret = %d\n", ret); free(adr); return 0; } In __munlock_pagevec() we should decrement NR_MLOCK for each page where we clear the PageMlocked flag. Commit 1ebb7cc6a583 ("mm: munlock: batch NR_MLOCK zone state updates") has introduced a bug where we don't decrement NR_MLOCK for pages where we clear the flag, but fail to isolate them from the lru list (e.g. when the pages are on some other cpu's percpu pagevec). Since PageMlocked stays cleared, the NR_MLOCK accounting gets permanently disrupted by this. Fix it by counting the number of page whose PageMlock flag is cleared. Fixes: 1ebb7cc6a583 (" mm: munlock: batch NR_MLOCK zone state updates") Link: http://lkml.kernel.org/r/1495678405-54569-1-git-send-email-xieyisheng1@huawei.com Signed-off-by: Yisheng Xie <xieyisheng1@huawei.com> Reported-by: Kefeng Wang <wangkefeng.wang@huawei.com> Tested-by: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Joern Engel <joern@logfs.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Michel Lespinasse <walken@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Xishi Qiu <qiuxishi@huawei.com> Cc: zhongjiang <zhongjiang@huawei.com> Cc: Hanjun Guo <guohanjun@huawei.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1
static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) { int i; int nr = pagevec_count(pvec); int delta_munlocked; struct pagevec pvec_putback; int pgrescued = 0; pagevec_init(&pvec_putback, 0); /* Phase 1: page isolation */ spin_lock_irq(zone_lru_lock(zone)); for (i = 0; i < nr; i++) { struct page *page = pvec->pages[i]; if (TestClearPageMlocked(page)) { /* * We already have pin from follow_page_mask() * so we can spare the get_page() here. */ if (__munlock_isolate_lru_page(page, false)) continue; else __munlock_isolation_failed(page); } /* * We won't be munlocking this page in the next phase * but we still need to release the follow_page_mask() * pin. We cannot do it under lru_lock however. If it's * the last pin, __page_cache_release() would deadlock. */ pagevec_add(&pvec_putback, pvec->pages[i]); pvec->pages[i] = NULL; } delta_munlocked = -nr + pagevec_count(&pvec_putback); __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); spin_unlock_irq(zone_lru_lock(zone)); /* Now we can release pins of pages that we are not munlocking */ pagevec_release(&pvec_putback); /* Phase 2: page munlock */ for (i = 0; i < nr; i++) { struct page *page = pvec->pages[i]; if (page) { lock_page(page); if (!__putback_lru_fast_prepare(page, &pvec_putback, &pgrescued)) { /* * Slow path. We don't want to lose the last * pin before unlock_page() */ get_page(page); /* for putback_lru_page() */ __munlock_isolated_page(page); unlock_page(page); put_page(page); /* from follow_page_mask() */ } } } /* * Phase 3: page putback for pages that qualified for the fast path * This will also call put_page() to return pin from follow_page_mask() */ if (pagevec_count(&pvec_putback)) __putback_lru_fast(&pvec_putback, pgrescued); }
115,707,399,857,902,040,000,000,000,000,000,000,000
mlock.c
124,452,116,569,757,200,000,000,000,000,000,000,000
[ "CWE-20" ]
CVE-2017-18221
The __munlock_pagevec function in mm/mlock.c in the Linux kernel before 4.11.4 allows local users to cause a denial of service (NR_MLOCK accounting corruption) via crafted use of mlockall and munlockall system calls.
https://nvd.nist.gov/vuln/detail/CVE-2017-18221
3,838
linux
27463ad99f738ed93c7c8b3e2e5bc8c4853a2ff2
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/27463ad99f738ed93c7c8b3e2e5bc8c4853a2ff2
net: hns: Fix a skb used after free bug skb maybe freed in hns_nic_net_xmit_hw() and return NETDEV_TX_OK, which cause hns_nic_net_xmit to use a freed skb. BUG: KASAN: use-after-free in hns_nic_net_xmit_hw+0x62c/0x940... [17659.112635] alloc_debug_processing+0x18c/0x1a0 [17659.117208] __slab_alloc+0x52c/0x560 [17659.120909] kmem_cache_alloc_node+0xac/0x2c0 [17659.125309] __alloc_skb+0x6c/0x260 [17659.128837] tcp_send_ack+0x8c/0x280 [17659.132449] __tcp_ack_snd_check+0x9c/0xf0 [17659.136587] tcp_rcv_established+0x5a4/0xa70 [17659.140899] tcp_v4_do_rcv+0x27c/0x620 [17659.144687] tcp_prequeue_process+0x108/0x170 [17659.149085] tcp_recvmsg+0x940/0x1020 [17659.152787] inet_recvmsg+0x124/0x180 [17659.156488] sock_recvmsg+0x64/0x80 [17659.160012] SyS_recvfrom+0xd8/0x180 [17659.163626] __sys_trace_return+0x0/0x4 [17659.167506] INFO: Freed in kfree_skbmem+0xa0/0xb0 age=23 cpu=1 pid=13 [17659.174000] free_debug_processing+0x1d4/0x2c0 [17659.178486] __slab_free+0x240/0x390 [17659.182100] kmem_cache_free+0x24c/0x270 [17659.186062] kfree_skbmem+0xa0/0xb0 [17659.189587] __kfree_skb+0x28/0x40 [17659.193025] napi_gro_receive+0x168/0x1c0 [17659.197074] hns_nic_rx_up_pro+0x58/0x90 [17659.201038] hns_nic_rx_poll_one+0x518/0xbc0 [17659.205352] hns_nic_common_poll+0x94/0x140 [17659.209576] net_rx_action+0x458/0x5e0 [17659.213363] __do_softirq+0x1b8/0x480 [17659.217062] run_ksoftirqd+0x64/0x80 [17659.220679] smpboot_thread_fn+0x224/0x310 [17659.224821] kthread+0x150/0x170 [17659.228084] ret_from_fork+0x10/0x40 BUG: KASAN: use-after-free in hns_nic_net_xmit+0x8c/0xc0... [17751.080490] __slab_alloc+0x52c/0x560 [17751.084188] kmem_cache_alloc+0x244/0x280 [17751.088238] __build_skb+0x40/0x150 [17751.091764] build_skb+0x28/0x100 [17751.095115] __alloc_rx_skb+0x94/0x150 [17751.098900] __napi_alloc_skb+0x34/0x90 [17751.102776] hns_nic_rx_poll_one+0x180/0xbc0 [17751.107097] hns_nic_common_poll+0x94/0x140 [17751.111333] net_rx_action+0x458/0x5e0 [17751.115123] __do_softirq+0x1b8/0x480 [17751.118823] run_ksoftirqd+0x64/0x80 [17751.122437] smpboot_thread_fn+0x224/0x310 [17751.126575] kthread+0x150/0x170 [17751.129838] ret_from_fork+0x10/0x40 [17751.133454] INFO: Freed in kfree_skbmem+0xa0/0xb0 age=19 cpu=7 pid=43 [17751.139951] free_debug_processing+0x1d4/0x2c0 [17751.144436] __slab_free+0x240/0x390 [17751.148051] kmem_cache_free+0x24c/0x270 [17751.152014] kfree_skbmem+0xa0/0xb0 [17751.155543] __kfree_skb+0x28/0x40 [17751.159022] napi_gro_receive+0x168/0x1c0 [17751.163074] hns_nic_rx_up_pro+0x58/0x90 [17751.167041] hns_nic_rx_poll_one+0x518/0xbc0 [17751.171358] hns_nic_common_poll+0x94/0x140 [17751.175585] net_rx_action+0x458/0x5e0 [17751.179373] __do_softirq+0x1b8/0x480 [17751.183076] run_ksoftirqd+0x64/0x80 [17751.186691] smpboot_thread_fn+0x224/0x310 [17751.190826] kthread+0x150/0x170 [17751.194093] ret_from_fork+0x10/0x40 Fixes: 13ac695e7ea1 ("net:hns: Add support of Hip06 SoC to the Hislicon Network Subsystem") Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com> Signed-off-by: lipeng <lipeng321@huawei.com> Reported-by: Jun He <hjat2005@huawei.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1
static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb, struct net_device *ndev) { struct hns_nic_priv *priv = netdev_priv(ndev); int ret; assert(skb->queue_mapping < ndev->ae_handle->q_num); ret = hns_nic_net_xmit_hw(ndev, skb, &tx_ring_data(priv, skb->queue_mapping)); if (ret == NETDEV_TX_OK) { netif_trans_update(ndev); ndev->stats.tx_bytes += skb->len; ndev->stats.tx_packets++; } return (netdev_tx_t)ret; }
286,205,539,332,141,500,000,000,000,000,000,000,000
hns_enet.c
118,232,006,227,961,090,000,000,000,000,000,000,000
[ "CWE-416" ]
CVE-2017-18218
In drivers/net/ethernet/hisilicon/hns/hns_enet.c in the Linux kernel before 4.13, local users can cause a denial of service (use-after-free and BUG) or possibly have unspecified other impact by leveraging differences in skb handling between hns_nic_net_xmit_hw and hns_nic_net_xmit.
https://nvd.nist.gov/vuln/detail/CVE-2017-18218
3,839
linux
27463ad99f738ed93c7c8b3e2e5bc8c4853a2ff2
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/27463ad99f738ed93c7c8b3e2e5bc8c4853a2ff2
net: hns: Fix a skb used after free bug skb maybe freed in hns_nic_net_xmit_hw() and return NETDEV_TX_OK, which cause hns_nic_net_xmit to use a freed skb. BUG: KASAN: use-after-free in hns_nic_net_xmit_hw+0x62c/0x940... [17659.112635] alloc_debug_processing+0x18c/0x1a0 [17659.117208] __slab_alloc+0x52c/0x560 [17659.120909] kmem_cache_alloc_node+0xac/0x2c0 [17659.125309] __alloc_skb+0x6c/0x260 [17659.128837] tcp_send_ack+0x8c/0x280 [17659.132449] __tcp_ack_snd_check+0x9c/0xf0 [17659.136587] tcp_rcv_established+0x5a4/0xa70 [17659.140899] tcp_v4_do_rcv+0x27c/0x620 [17659.144687] tcp_prequeue_process+0x108/0x170 [17659.149085] tcp_recvmsg+0x940/0x1020 [17659.152787] inet_recvmsg+0x124/0x180 [17659.156488] sock_recvmsg+0x64/0x80 [17659.160012] SyS_recvfrom+0xd8/0x180 [17659.163626] __sys_trace_return+0x0/0x4 [17659.167506] INFO: Freed in kfree_skbmem+0xa0/0xb0 age=23 cpu=1 pid=13 [17659.174000] free_debug_processing+0x1d4/0x2c0 [17659.178486] __slab_free+0x240/0x390 [17659.182100] kmem_cache_free+0x24c/0x270 [17659.186062] kfree_skbmem+0xa0/0xb0 [17659.189587] __kfree_skb+0x28/0x40 [17659.193025] napi_gro_receive+0x168/0x1c0 [17659.197074] hns_nic_rx_up_pro+0x58/0x90 [17659.201038] hns_nic_rx_poll_one+0x518/0xbc0 [17659.205352] hns_nic_common_poll+0x94/0x140 [17659.209576] net_rx_action+0x458/0x5e0 [17659.213363] __do_softirq+0x1b8/0x480 [17659.217062] run_ksoftirqd+0x64/0x80 [17659.220679] smpboot_thread_fn+0x224/0x310 [17659.224821] kthread+0x150/0x170 [17659.228084] ret_from_fork+0x10/0x40 BUG: KASAN: use-after-free in hns_nic_net_xmit+0x8c/0xc0... [17751.080490] __slab_alloc+0x52c/0x560 [17751.084188] kmem_cache_alloc+0x244/0x280 [17751.088238] __build_skb+0x40/0x150 [17751.091764] build_skb+0x28/0x100 [17751.095115] __alloc_rx_skb+0x94/0x150 [17751.098900] __napi_alloc_skb+0x34/0x90 [17751.102776] hns_nic_rx_poll_one+0x180/0xbc0 [17751.107097] hns_nic_common_poll+0x94/0x140 [17751.111333] net_rx_action+0x458/0x5e0 [17751.115123] __do_softirq+0x1b8/0x480 [17751.118823] run_ksoftirqd+0x64/0x80 [17751.122437] smpboot_thread_fn+0x224/0x310 [17751.126575] kthread+0x150/0x170 [17751.129838] ret_from_fork+0x10/0x40 [17751.133454] INFO: Freed in kfree_skbmem+0xa0/0xb0 age=19 cpu=7 pid=43 [17751.139951] free_debug_processing+0x1d4/0x2c0 [17751.144436] __slab_free+0x240/0x390 [17751.148051] kmem_cache_free+0x24c/0x270 [17751.152014] kfree_skbmem+0xa0/0xb0 [17751.155543] __kfree_skb+0x28/0x40 [17751.159022] napi_gro_receive+0x168/0x1c0 [17751.163074] hns_nic_rx_up_pro+0x58/0x90 [17751.167041] hns_nic_rx_poll_one+0x518/0xbc0 [17751.171358] hns_nic_common_poll+0x94/0x140 [17751.175585] net_rx_action+0x458/0x5e0 [17751.179373] __do_softirq+0x1b8/0x480 [17751.183076] run_ksoftirqd+0x64/0x80 [17751.186691] smpboot_thread_fn+0x224/0x310 [17751.190826] kthread+0x150/0x170 [17751.194093] ret_from_fork+0x10/0x40 Fixes: 13ac695e7ea1 ("net:hns: Add support of Hip06 SoC to the Hislicon Network Subsystem") Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com> Signed-off-by: lipeng <lipeng321@huawei.com> Reported-by: Jun He <hjat2005@huawei.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1
int hns_nic_net_xmit_hw(struct net_device *ndev, struct sk_buff *skb, struct hns_nic_ring_data *ring_data) { struct hns_nic_priv *priv = netdev_priv(ndev); struct hnae_ring *ring = ring_data->ring; struct device *dev = ring_to_dev(ring); struct netdev_queue *dev_queue; struct skb_frag_struct *frag; int buf_num; int seg_num; dma_addr_t dma; int size, next_to_use; int i; switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) { case -EBUSY: ring->stats.tx_busy++; goto out_net_tx_busy; case -ENOMEM: ring->stats.sw_err_cnt++; netdev_err(ndev, "no memory to xmit!\n"); goto out_err_tx_ok; default: break; } /* no. of segments (plus a header) */ seg_num = skb_shinfo(skb)->nr_frags + 1; next_to_use = ring->next_to_use; /* fill the first part */ size = skb_headlen(skb); dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma)) { netdev_err(ndev, "TX head DMA map failed\n"); ring->stats.sw_err_cnt++; goto out_err_tx_ok; } priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0, buf_num, DESC_TYPE_SKB, ndev->mtu); /* fill the fragments */ for (i = 1; i < seg_num; i++) { frag = &skb_shinfo(skb)->frags[i - 1]; size = skb_frag_size(frag); dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma)) { netdev_err(ndev, "TX frag(%d) DMA map failed\n", i); ring->stats.sw_err_cnt++; goto out_map_frag_fail; } priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma, seg_num - 1 == i ? 1 : 0, buf_num, DESC_TYPE_PAGE, ndev->mtu); } /*complete translate all packets*/ dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping); netdev_tx_sent_queue(dev_queue, skb->len); wmb(); /* commit all data before submit */ assert(skb->queue_mapping < priv->ae_handle->q_num); hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num); ring->stats.tx_pkts++; ring->stats.tx_bytes += skb->len; return NETDEV_TX_OK; out_map_frag_fail: while (ring->next_to_use != next_to_use) { unfill_desc(ring); if (ring->next_to_use != next_to_use) dma_unmap_page(dev, ring->desc_cb[ring->next_to_use].dma, ring->desc_cb[ring->next_to_use].length, DMA_TO_DEVICE); else dma_unmap_single(dev, ring->desc_cb[next_to_use].dma, ring->desc_cb[next_to_use].length, DMA_TO_DEVICE); } out_err_tx_ok: dev_kfree_skb_any(skb); return NETDEV_TX_OK; out_net_tx_busy: netif_stop_subqueue(ndev, skb->queue_mapping); /* Herbert's original patch had: * smp_mb__after_netif_stop_queue(); * but since that doesn't exist yet, just open code it. */ smp_mb(); return NETDEV_TX_BUSY; }
111,953,708,316,717,710,000,000,000,000,000,000,000
hns_enet.c
118,232,006,227,961,090,000,000,000,000,000,000,000
[ "CWE-416" ]
CVE-2017-18218
In drivers/net/ethernet/hisilicon/hns/hns_enet.c in the Linux kernel before 4.13, local users can cause a denial of service (use-after-free and BUG) or possibly have unspecified other impact by leveraging differences in skb handling between hns_nic_net_xmit_hw and hns_nic_net_xmit.
https://nvd.nist.gov/vuln/detail/CVE-2017-18218
3,843
linux
6ea8d958a2c95a1d514015d4e29ba21a8c0a1a91
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/6ea8d958a2c95a1d514015d4e29ba21a8c0a1a91
mm/madvise.c: fix madvise() infinite loop under special circumstances MADVISE_WILLNEED has always been a noop for DAX (formerly XIP) mappings. Unfortunately madvise_willneed() doesn't communicate this information properly to the generic madvise syscall implementation. The calling convention is quite subtle there. madvise_vma() is supposed to either return an error or update &prev otherwise the main loop will never advance to the next vma and it will keep looping for ever without a way to get out of the kernel. It seems this has been broken since introduction. Nobody has noticed because nobody seems to be using MADVISE_WILLNEED on these DAX mappings. [mhocko@suse.com: rewrite changelog] Link: http://lkml.kernel.org/r/20171127115318.911-1-guoxuenan@huawei.com Fixes: fe77ba6f4f97 ("[PATCH] xip: madvice/fadvice: execute in place") Signed-off-by: chenjie <chenjie6@huawei.com> Signed-off-by: guoxuenan <guoxuenan@huawei.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Minchan Kim <minchan@kernel.org> Cc: zhangyi (F) <yi.zhang@huawei.com> Cc: Miao Xie <miaoxie@huawei.com> Cc: Mike Rapoport <rppt@linux.vnet.ibm.com> Cc: Shaohua Li <shli@fb.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: David Rientjes <rientjes@google.com> Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com> Cc: Rik van Riel <riel@redhat.com> Cc: Carsten Otte <cotte@de.ibm.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1
static long madvise_willneed(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end) { struct file *file = vma->vm_file; #ifdef CONFIG_SWAP if (!file) { *prev = vma; force_swapin_readahead(vma, start, end); return 0; } if (shmem_mapping(file->f_mapping)) { *prev = vma; force_shm_swapin_readahead(vma, start, end, file->f_mapping); return 0; } #else if (!file) return -EBADF; #endif if (IS_DAX(file_inode(file))) { /* no bad return value, but ignore advice */ return 0; } *prev = vma; start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; if (end > vma->vm_end) end = vma->vm_end; end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; force_page_cache_readahead(file->f_mapping, file, start, end - start); return 0; }
171,648,586,462,786,750,000,000,000,000,000,000,000
madvise.c
200,513,704,618,787,880,000,000,000,000,000,000,000
[ "CWE-835" ]
CVE-2017-18208
The madvise_willneed function in mm/madvise.c in the Linux kernel before 4.14.4 allows local users to cause a denial of service (infinite loop) by triggering use of MADVISE_WILLNEED for a DAX mapping.
https://nvd.nist.gov/vuln/detail/CVE-2017-18208
3,844
linux
28f5a8a7c033cbf3e32277f4cc9c6afd74f05300
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/28f5a8a7c033cbf3e32277f4cc9c6afd74f05300
ocfs2: should wait dio before inode lock in ocfs2_setattr() we should wait dio requests to finish before inode lock in ocfs2_setattr(), otherwise the following deadlock will happen: process 1 process 2 process 3 truncate file 'A' end_io of writing file 'A' receiving the bast messages ocfs2_setattr ocfs2_inode_lock_tracker ocfs2_inode_lock_full inode_dio_wait __inode_dio_wait -->waiting for all dio requests finish dlm_proxy_ast_handler dlm_do_local_bast ocfs2_blocking_ast ocfs2_generic_handle_bast set OCFS2_LOCK_BLOCKED flag dio_end_io dio_bio_end_aio dio_complete ocfs2_dio_end_io ocfs2_dio_end_io_write ocfs2_inode_lock __ocfs2_cluster_lock ocfs2_wait_for_mask -->waiting for OCFS2_LOCK_BLOCKED flag to be cleared, that is waiting for 'process 1' unlocking the inode lock inode_dio_end -->here dec the i_dio_count, but will never be called, so a deadlock happened. Link: http://lkml.kernel.org/r/59F81636.70508@huawei.com Signed-off-by: Alex Chen <alex.chen@huawei.com> Reviewed-by: Jun Piao <piaojun@huawei.com> Reviewed-by: Joseph Qi <jiangqi903@gmail.com> Acked-by: Changwei Ge <ge.changwei@h3c.com> Cc: Mark Fasheh <mfasheh@versity.com> Cc: Joel Becker <jlbec@evilplan.org> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1
int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) { int status = 0, size_change; int inode_locked = 0; struct inode *inode = d_inode(dentry); struct super_block *sb = inode->i_sb; struct ocfs2_super *osb = OCFS2_SB(sb); struct buffer_head *bh = NULL; handle_t *handle = NULL; struct dquot *transfer_to[MAXQUOTAS] = { }; int qtype; int had_lock; struct ocfs2_lock_holder oh; trace_ocfs2_setattr(inode, dentry, (unsigned long long)OCFS2_I(inode)->ip_blkno, dentry->d_name.len, dentry->d_name.name, attr->ia_valid, attr->ia_mode, from_kuid(&init_user_ns, attr->ia_uid), from_kgid(&init_user_ns, attr->ia_gid)); /* ensuring we don't even attempt to truncate a symlink */ if (S_ISLNK(inode->i_mode)) attr->ia_valid &= ~ATTR_SIZE; #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \ | ATTR_GID | ATTR_UID | ATTR_MODE) if (!(attr->ia_valid & OCFS2_VALID_ATTRS)) return 0; status = setattr_prepare(dentry, attr); if (status) return status; if (is_quota_modification(inode, attr)) { status = dquot_initialize(inode); if (status) return status; } size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE; if (size_change) { status = ocfs2_rw_lock(inode, 1); if (status < 0) { mlog_errno(status); goto bail; } } had_lock = ocfs2_inode_lock_tracker(inode, &bh, 1, &oh); if (had_lock < 0) { status = had_lock; goto bail_unlock_rw; } else if (had_lock) { /* * As far as we know, ocfs2_setattr() could only be the first * VFS entry point in the call chain of recursive cluster * locking issue. * * For instance: * chmod_common() * notify_change() * ocfs2_setattr() * posix_acl_chmod() * ocfs2_iop_get_acl() * * But, we're not 100% sure if it's always true, because the * ordering of the VFS entry points in the call chain is out * of our control. So, we'd better dump the stack here to * catch the other cases of recursive locking. */ mlog(ML_ERROR, "Another case of recursive locking:\n"); dump_stack(); } inode_locked = 1; if (size_change) { status = inode_newsize_ok(inode, attr->ia_size); if (status) goto bail_unlock; inode_dio_wait(inode); if (i_size_read(inode) >= attr->ia_size) { if (ocfs2_should_order_data(inode)) { status = ocfs2_begin_ordered_truncate(inode, attr->ia_size); if (status) goto bail_unlock; } status = ocfs2_truncate_file(inode, bh, attr->ia_size); } else status = ocfs2_extend_file(inode, bh, attr->ia_size); if (status < 0) { if (status != -ENOSPC) mlog_errno(status); status = -ENOSPC; goto bail_unlock; } } if ((attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { /* * Gather pointers to quota structures so that allocation / * freeing of quota structures happens here and not inside * dquot_transfer() where we have problems with lock ordering */ if (attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid) && OCFS2_HAS_RO_COMPAT_FEATURE(sb, OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) { transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid)); if (IS_ERR(transfer_to[USRQUOTA])) { status = PTR_ERR(transfer_to[USRQUOTA]); goto bail_unlock; } } if (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid) && OCFS2_HAS_RO_COMPAT_FEATURE(sb, OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) { transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid)); if (IS_ERR(transfer_to[GRPQUOTA])) { status = PTR_ERR(transfer_to[GRPQUOTA]); goto bail_unlock; } } handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS + 2 * ocfs2_quota_trans_credits(sb)); if (IS_ERR(handle)) { status = PTR_ERR(handle); mlog_errno(status); goto bail_unlock; } status = __dquot_transfer(inode, transfer_to); if (status < 0) goto bail_commit; } else { handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { status = PTR_ERR(handle); mlog_errno(status); goto bail_unlock; } } setattr_copy(inode, attr); mark_inode_dirty(inode); status = ocfs2_mark_inode_dirty(handle, inode, bh); if (status < 0) mlog_errno(status); bail_commit: ocfs2_commit_trans(osb, handle); bail_unlock: if (status && inode_locked) { ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock); inode_locked = 0; } bail_unlock_rw: if (size_change) ocfs2_rw_unlock(inode, 1); bail: /* Release quota pointers in case we acquired them */ for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++) dqput(transfer_to[qtype]); if (!status && attr->ia_valid & ATTR_MODE) { status = ocfs2_acl_chmod(inode, bh); if (status < 0) mlog_errno(status); } if (inode_locked) ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock); brelse(bh); return status; }
12,293,169,081,478,398,000,000,000,000,000,000,000
None
null
[ "CWE-401" ]
CVE-2017-18204
The ocfs2_setattr function in fs/ocfs2/file.c in the Linux kernel before 4.14.2 allows local users to cause a denial of service (deadlock) via DIO requests.
https://nvd.nist.gov/vuln/detail/CVE-2017-18204
3,845
linux
b9a41d21dceadf8104812626ef85dc56ee8a60ed
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/b9a41d21dceadf8104812626ef85dc56ee8a60ed
dm: fix race between dm_get_from_kobject() and __dm_destroy() The following BUG_ON was hit when testing repeat creation and removal of DM devices: kernel BUG at drivers/md/dm.c:2919! CPU: 7 PID: 750 Comm: systemd-udevd Not tainted 4.1.44 Call Trace: [<ffffffff81649e8b>] dm_get_from_kobject+0x34/0x3a [<ffffffff81650ef1>] dm_attr_show+0x2b/0x5e [<ffffffff817b46d1>] ? mutex_lock+0x26/0x44 [<ffffffff811df7f5>] sysfs_kf_seq_show+0x83/0xcf [<ffffffff811de257>] kernfs_seq_show+0x23/0x25 [<ffffffff81199118>] seq_read+0x16f/0x325 [<ffffffff811de994>] kernfs_fop_read+0x3a/0x13f [<ffffffff8117b625>] __vfs_read+0x26/0x9d [<ffffffff8130eb59>] ? security_file_permission+0x3c/0x44 [<ffffffff8117bdb8>] ? rw_verify_area+0x83/0xd9 [<ffffffff8117be9d>] vfs_read+0x8f/0xcf [<ffffffff81193e34>] ? __fdget_pos+0x12/0x41 [<ffffffff8117c686>] SyS_read+0x4b/0x76 [<ffffffff817b606e>] system_call_fastpath+0x12/0x71 The bug can be easily triggered, if an extra delay (e.g. 10ms) is added between the test of DMF_FREEING & DMF_DELETING and dm_get() in dm_get_from_kobject(). To fix it, we need to ensure the test of DMF_FREEING & DMF_DELETING and dm_get() are done in an atomic way, so _minor_lock is used. The other callers of dm_get() have also been checked to be OK: some callers invoke dm_get() under _minor_lock, some callers invoke it under _hash_lock, and dm_start_request() invoke it after increasing md->open_count. Cc: stable@vger.kernel.org Signed-off-by: Hou Tao <houtao1@huawei.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
1
struct mapped_device *dm_get_from_kobject(struct kobject *kobj) { struct mapped_device *md; md = container_of(kobj, struct mapped_device, kobj_holder.kobj); if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) return NULL; dm_get(md); return md; }
232,285,841,910,144,100,000,000,000,000,000,000,000
dm.c
56,799,825,473,684,530,000,000,000,000,000,000,000
[ "CWE-362" ]
CVE-2017-18203
The dm_get_from_kobject function in drivers/md/dm.c in the Linux kernel before 4.14.3 allow local users to cause a denial of service (BUG) by leveraging a race condition with __dm_destroy during creation and removal of DM devices.
https://nvd.nist.gov/vuln/detail/CVE-2017-18203
3,847
linux
638164a2718f337ea224b747cf5977ef143166a4
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/638164a2718f337ea224b747cf5977ef143166a4
f2fs: fix potential panic during fstrim As Ju Hyung Park reported: "When 'fstrim' is called for manual trim, a BUG() can be triggered randomly with this patch. I'm seeing this issue on both x86 Desktop and arm64 Android phone. On x86 Desktop, this was caused during Ubuntu boot-up. I have a cronjob installed which calls 'fstrim -v /' during boot. On arm64 Android, this was caused during GC looping with 1ms gc_min_sleep_time & gc_max_sleep_time." Root cause of this issue is that f2fs_wait_discard_bios can only be used by f2fs_put_super, because during put_super there must be no other referrers, so it can ignore discard entry's reference count when removing the entry, otherwise in other caller we will hit bug_on in __remove_discard_cmd as there may be other issuer added reference count in discard entry. Thread A Thread B - issue_discard_thread - f2fs_ioc_fitrim - f2fs_trim_fs - f2fs_wait_discard_bios - __issue_discard_cmd - __submit_discard_cmd - __wait_discard_cmd - dc->ref++ - __wait_one_discard_bio - __wait_discard_cmd - __remove_discard_cmd - f2fs_bug_on(sbi, dc->ref) Fixes: 969d1b180d987c2be02de890d0fff0f66a0e80de Reported-by: Ju Hyung Park <qkrwngud825@gmail.com> Signed-off-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
1
void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi) { __issue_discard_cmd(sbi, false); __drop_discard_cmd(sbi); __wait_discard_cmd(sbi, false); }
66,587,864,384,024,680,000,000,000,000,000,000,000
segment.c
277,647,022,130,075,500,000,000,000,000,000,000,000
[ "CWE-20" ]
CVE-2017-18200
The f2fs implementation in the Linux kernel before 4.14 mishandles reference counts associated with f2fs_wait_discard_bios calls, which allows local users to cause a denial of service (BUG), as demonstrated by fstrim.
https://nvd.nist.gov/vuln/detail/CVE-2017-18200
3,849
linux
dad48e73127ba10279ea33e6dbc8d3905c4d31c0
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/dad48e73127ba10279ea33e6dbc8d3905c4d31c0
f2fs: fix a bug caused by NULL extent tree Thread A: Thread B: -f2fs_remount -sbi->mount_opt.opt = 0; <--- -f2fs_iget -do_read_inode -f2fs_init_extent_tree -F2FS_I(inode)->extent_tree is NULL -default_options && parse_options -remount return <--- -f2fs_map_blocks -f2fs_lookup_extent_tree -f2fs_bug_on(sbi, !et); The same problem with f2fs_new_inode. Signed-off-by: Yunlei He <heyunlei@huawei.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
1
bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct extent_tree *et; struct extent_node *en; struct extent_info ei; if (!f2fs_may_extent_tree(inode)) { /* drop largest extent */ if (i_ext && i_ext->len) { i_ext->len = 0; return true; } return false; } et = __grab_extent_tree(inode); if (!i_ext || !i_ext->len) return false; get_extent_info(&ei, i_ext); write_lock(&et->lock); if (atomic_read(&et->node_cnt)) goto out; en = __init_extent_tree(sbi, et, &ei); if (en) { spin_lock(&sbi->extent_lock); list_add_tail(&en->list, &sbi->extent_list); spin_unlock(&sbi->extent_lock); } out: write_unlock(&et->lock); return false; }
144,565,331,810,619,090,000,000,000,000,000,000,000
extent_cache.c
121,324,559,268,227,700,000,000,000,000,000,000,000
[ "CWE-119" ]
CVE-2017-18193
fs/f2fs/extent_cache.c in the Linux kernel before 4.13 mishandles extent trees, which allows local users to cause a denial of service (BUG) via an application with multiple threads.
https://nvd.nist.gov/vuln/detail/CVE-2017-18193
3,850
cups
afa80cb2b457bf8d64f775bed307588610476c41
https://github.com/apple/cups
https://github.com/apple/cups/commit/afa80cb2b457bf8d64f775bed307588610476c41
Don't treat "localhost.localdomain" as an allowed replacement for localhost, since it isn't.
1
valid_host(cupsd_client_t *con) /* I - Client connection */ { cupsd_alias_t *a; /* Current alias */ cupsd_netif_t *netif; /* Current network interface */ const char *end; /* End character */ char *ptr; /* Pointer into host value */ /* * Copy the Host: header for later use... */ strlcpy(con->clientname, httpGetField(con->http, HTTP_FIELD_HOST), sizeof(con->clientname)); if ((ptr = strrchr(con->clientname, ':')) != NULL && !strchr(ptr, ']')) { *ptr++ = '\0'; con->clientport = atoi(ptr); } else con->clientport = con->serverport; /* * Then validate... */ if (httpAddrLocalhost(httpGetAddress(con->http))) { /* * Only allow "localhost" or the equivalent IPv4 or IPv6 numerical * addresses when accessing CUPS via the loopback interface... */ return (!_cups_strcasecmp(con->clientname, "localhost") || !_cups_strcasecmp(con->clientname, "localhost.") || #ifdef __linux !_cups_strcasecmp(con->clientname, "localhost.localdomain") || #endif /* __linux */ !strcmp(con->clientname, "127.0.0.1") || !strcmp(con->clientname, "[::1]")); } #if defined(HAVE_DNSSD) || defined(HAVE_AVAHI) /* * Check if the hostname is something.local (Bonjour); if so, allow it. */ if ((end = strrchr(con->clientname, '.')) != NULL && end > con->clientname && !end[1]) { /* * "." on end, work back to second-to-last "."... */ for (end --; end > con->clientname && *end != '.'; end --); } if (end && (!_cups_strcasecmp(end, ".local") || !_cups_strcasecmp(end, ".local."))) return (1); #endif /* HAVE_DNSSD || HAVE_AVAHI */ /* * Check if the hostname is an IP address... */ if (isdigit(con->clientname[0] & 255) || con->clientname[0] == '[') { /* * Possible IPv4/IPv6 address... */ http_addrlist_t *addrlist; /* List of addresses */ if ((addrlist = httpAddrGetList(con->clientname, AF_UNSPEC, NULL)) != NULL) { /* * Good IPv4/IPv6 address... */ httpAddrFreeList(addrlist); return (1); } } /* * Check for (alias) name matches... */ for (a = (cupsd_alias_t *)cupsArrayFirst(ServerAlias); a; a = (cupsd_alias_t *)cupsArrayNext(ServerAlias)) { /* * "ServerAlias *" allows all host values through... */ if (!strcmp(a->name, "*")) return (1); if (!_cups_strncasecmp(con->clientname, a->name, a->namelen)) { /* * Prefix matches; check the character at the end - it must be "." or nul. */ end = con->clientname + a->namelen; if (!*end || (*end == '.' && !end[1])) return (1); } } #if defined(HAVE_DNSSD) || defined(HAVE_AVAHI) for (a = (cupsd_alias_t *)cupsArrayFirst(DNSSDAlias); a; a = (cupsd_alias_t *)cupsArrayNext(DNSSDAlias)) { /* * "ServerAlias *" allows all host values through... */ if (!strcmp(a->name, "*")) return (1); if (!_cups_strncasecmp(con->clientname, a->name, a->namelen)) { /* * Prefix matches; check the character at the end - it must be "." or nul. */ end = con->clientname + a->namelen; if (!*end || (*end == '.' && !end[1])) return (1); } } #endif /* HAVE_DNSSD || HAVE_AVAHI */ /* * Check for interface hostname matches... */ for (netif = (cupsd_netif_t *)cupsArrayFirst(NetIFList); netif; netif = (cupsd_netif_t *)cupsArrayNext(NetIFList)) { if (!_cups_strncasecmp(con->clientname, netif->hostname, netif->hostlen)) { /* * Prefix matches; check the character at the end - it must be "." or nul. */ end = con->clientname + netif->hostlen; if (!*end || (*end == '.' && !end[1])) return (1); } } return (0); }
159,854,871,526,072,820,000,000,000,000,000,000,000
client.c
181,717,129,039,375,560,000,000,000,000,000,000,000
[ "CWE-290" ]
CVE-2017-18190
A localhost.localdomain whitelist entry in valid_host() in scheduler/client.c in CUPS before 2.2.2 allows remote attackers to execute arbitrary IPP commands by sending POST requests to the CUPS daemon in conjunction with DNS rebinding. The localhost.localdomain name is often resolved via a DNS server (neither the OS nor the web browser is responsible for ensuring that localhost.localdomain is 127.0.0.1).
https://nvd.nist.gov/vuln/detail/CVE-2017-18190
3,851
mbedtls
83c9f495ffe70c7dd280b41fdfd4881485a3bc28
https://github.com/ARMmbed/mbedtls
https://github.com/ARMmbed/mbedtls/commit/83c9f495ffe70c7dd280b41fdfd4881485a3bc28
Prevent bounds check bypass through overflow in PSK identity parsing The check `if( *p + n > end )` in `ssl_parse_client_psk_identity` is unsafe because `*p + n` might overflow, thus bypassing the check. As `n` is a user-specified value up to 65K, this is relevant if the library happens to be located in the last 65K of virtual memory. This commit replaces the check by a safe version.
1
static int ssl_parse_client_psk_identity( mbedtls_ssl_context *ssl, unsigned char **p, const unsigned char *end ) { int ret = 0; size_t n; if( ssl->conf->f_psk == NULL && ( ssl->conf->psk == NULL || ssl->conf->psk_identity == NULL || ssl->conf->psk_identity_len == 0 || ssl->conf->psk_len == 0 ) ) { MBEDTLS_SSL_DEBUG_MSG( 1, ( "got no pre-shared key" ) ); return( MBEDTLS_ERR_SSL_PRIVATE_KEY_REQUIRED ); } /* * Receive client pre-shared key identity name */ if( *p + 2 > end ) { MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange message" ) ); return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE ); } n = ( (*p)[0] << 8 ) | (*p)[1]; *p += 2; if( n < 1 || n > 65535 || *p + n > end ) { MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange message" ) ); return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE ); } if( ssl->conf->f_psk != NULL ) { if( ssl->conf->f_psk( ssl->conf->p_psk, ssl, *p, n ) != 0 ) ret = MBEDTLS_ERR_SSL_UNKNOWN_IDENTITY; } else { /* Identity is not a big secret since clients send it in the clear, * but treat it carefully anyway, just in case */ if( n != ssl->conf->psk_identity_len || mbedtls_ssl_safer_memcmp( ssl->conf->psk_identity, *p, n ) != 0 ) { ret = MBEDTLS_ERR_SSL_UNKNOWN_IDENTITY; } } if( ret == MBEDTLS_ERR_SSL_UNKNOWN_IDENTITY ) { MBEDTLS_SSL_DEBUG_BUF( 3, "Unknown PSK identity", *p, n ); mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL, MBEDTLS_SSL_ALERT_MSG_UNKNOWN_PSK_IDENTITY ); return( MBEDTLS_ERR_SSL_UNKNOWN_IDENTITY ); } *p += n; return( 0 ); }
246,504,953,524,229,660,000,000,000,000,000,000,000
ssl_srv.c
242,941,228,807,408,900,000,000,000,000,000,000,000
[ "CWE-190" ]
CVE-2017-18187
In ARM mbed TLS before 2.7.0, there is a bounds-check bypass through an integer overflow in PSK identity parsing in the ssl_parse_client_psk_identity() function in library/ssl_srv.c.
https://nvd.nist.gov/vuln/detail/CVE-2017-18187
3,852
linux
8dca4a41f1ad65043a78c2338d9725f859c8d2c3
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/8dca4a41f1ad65043a78c2338d9725f859c8d2c3
pinctrl/amd: Drop pinctrl_unregister for devm_ registered device It's not necessary to unregister pin controller device registered with devm_pinctrl_register() and using pinctrl_unregister() leads to a double free. Fixes: 3bfd44306c65 ("pinctrl: amd: Add support for additional GPIO") Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com> Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
1
static int amd_gpio_remove(struct platform_device *pdev) { struct amd_gpio *gpio_dev; gpio_dev = platform_get_drvdata(pdev); gpiochip_remove(&gpio_dev->gc); pinctrl_unregister(gpio_dev->pctrl); return 0; }
100,344,955,333,259,860,000,000,000,000,000,000,000
pinctrl-amd.c
137,063,333,804,755,300,000,000,000,000,000,000,000
[ "CWE-415" ]
CVE-2017-18174
In the Linux kernel before 4.7, the amd_gpio_remove function in drivers/pinctrl/pinctrl-amd.c calls the pinctrl_unregister function, leading to a double free.
https://nvd.nist.gov/vuln/detail/CVE-2017-18174
3,858
linux
2638fd0f92d4397884fd991d8f4925cb3f081901
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/2638fd0f92d4397884fd991d8f4925cb3f081901
netfilter: xt_TCPMSS: add more sanity tests on tcph->doff Denys provided an awesome KASAN report pointing to an use after free in xt_TCPMSS I have provided three patches to fix this issue, either in xt_TCPMSS or in xt_tcpudp.c. It seems xt_TCPMSS patch has the smallest possible impact. Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: Denys Fedoryshchenko <nuclearcat@nuclearcat.com> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
1
tcpmss_mangle_packet(struct sk_buff *skb, const struct xt_action_param *par, unsigned int family, unsigned int tcphoff, unsigned int minlen) { const struct xt_tcpmss_info *info = par->targinfo; struct tcphdr *tcph; int len, tcp_hdrlen; unsigned int i; __be16 oldval; u16 newmss; u8 *opt; /* This is a fragment, no TCP header is available */ if (par->fragoff != 0) return 0; if (!skb_make_writable(skb, skb->len)) return -1; len = skb->len - tcphoff; if (len < (int)sizeof(struct tcphdr)) return -1; tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); tcp_hdrlen = tcph->doff * 4; if (len < tcp_hdrlen) return -1; if (info->mss == XT_TCPMSS_CLAMP_PMTU) { struct net *net = xt_net(par); unsigned int in_mtu = tcpmss_reverse_mtu(net, skb, family); unsigned int min_mtu = min(dst_mtu(skb_dst(skb)), in_mtu); if (min_mtu <= minlen) { net_err_ratelimited("unknown or invalid path-MTU (%u)\n", min_mtu); return -1; } newmss = min_mtu - minlen; } else newmss = info->mss; opt = (u_int8_t *)tcph; for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) { if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) { u_int16_t oldmss; oldmss = (opt[i+2] << 8) | opt[i+3]; /* Never increase MSS, even when setting it, as * doing so results in problems for hosts that rely * on MSS being set correctly. */ if (oldmss <= newmss) return 0; opt[i+2] = (newmss & 0xff00) >> 8; opt[i+3] = newmss & 0x00ff; inet_proto_csum_replace2(&tcph->check, skb, htons(oldmss), htons(newmss), false); return 0; } } /* There is data after the header so the option can't be added * without moving it, and doing so may make the SYN packet * itself too large. Accept the packet unmodified instead. */ if (len > tcp_hdrlen) return 0; /* * MSS Option not found ?! add it.. */ if (skb_tailroom(skb) < TCPOLEN_MSS) { if (pskb_expand_head(skb, 0, TCPOLEN_MSS - skb_tailroom(skb), GFP_ATOMIC)) return -1; tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); } skb_put(skb, TCPOLEN_MSS); /* * IPv4: RFC 1122 states "If an MSS option is not received at * connection setup, TCP MUST assume a default send MSS of 536". * IPv6: RFC 2460 states IPv6 has a minimum MTU of 1280 and a minimum * length IPv6 header of 60, ergo the default MSS value is 1220 * Since no MSS was provided, we must use the default values */ if (xt_family(par) == NFPROTO_IPV4) newmss = min(newmss, (u16)536); else newmss = min(newmss, (u16)1220); opt = (u_int8_t *)tcph + sizeof(struct tcphdr); memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr)); inet_proto_csum_replace2(&tcph->check, skb, htons(len), htons(len + TCPOLEN_MSS), true); opt[0] = TCPOPT_MSS; opt[1] = TCPOLEN_MSS; opt[2] = (newmss & 0xff00) >> 8; opt[3] = newmss & 0x00ff; inet_proto_csum_replace4(&tcph->check, skb, 0, *((__be32 *)opt), false); oldval = ((__be16 *)tcph)[6]; tcph->doff += TCPOLEN_MSS/4; inet_proto_csum_replace2(&tcph->check, skb, oldval, ((__be16 *)tcph)[6], false); return TCPOLEN_MSS; }
250,029,302,786,329,280,000,000,000,000,000,000,000
xt_TCPMSS.c
261,370,175,787,540,400,000,000,000,000,000,000,000
[ "CWE-416" ]
CVE-2017-18017
The tcpmss_mangle_packet function in net/netfilter/xt_TCPMSS.c in the Linux kernel before 4.11, and 4.9.x before 4.9.36, allows remote attackers to cause a denial of service (use-after-free and memory corruption) or possibly have unspecified other impact by leveraging the presence of xt_TCPMSS in an iptables action.
https://nvd.nist.gov/vuln/detail/CVE-2017-18017
3,859
linux
21b5944350052d2583e82dd59b19a9ba94a007f0
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/21b5944350052d2583e82dd59b19a9ba94a007f0
net: Fix double free and memory corruption in get_net_ns_by_id() (I can trivially verify that that idr_remove in cleanup_net happens after the network namespace count has dropped to zero --EWB) Function get_net_ns_by_id() does not check for net::count after it has found a peer in netns_ids idr. It may dereference a peer, after its count has already been finaly decremented. This leads to double free and memory corruption: put_net(peer) rtnl_lock() atomic_dec_and_test(&peer->count) [count=0] ... __put_net(peer) get_net_ns_by_id(net, id) spin_lock(&cleanup_list_lock) list_add(&net->cleanup_list, &cleanup_list) spin_unlock(&cleanup_list_lock) queue_work() peer = idr_find(&net->netns_ids, id) | get_net(peer) [count=1] | ... | (use after final put) v ... cleanup_net() ... spin_lock(&cleanup_list_lock) ... list_replace_init(&cleanup_list, ..) ... spin_unlock(&cleanup_list_lock) ... ... ... ... put_net(peer) ... atomic_dec_and_test(&peer->count) [count=0] ... spin_lock(&cleanup_list_lock) ... list_add(&net->cleanup_list, &cleanup_list) ... spin_unlock(&cleanup_list_lock) ... queue_work() ... rtnl_unlock() rtnl_lock() ... for_each_net(tmp) { ... id = __peernet2id(tmp, peer) ... spin_lock_irq(&tmp->nsid_lock) ... idr_remove(&tmp->netns_ids, id) ... ... ... net_drop_ns() ... net_free(peer) ... } ... | v cleanup_net() ... (Second free of peer) Also, put_net() on the right cpu may reorder with left's cpu list_replace_init(&cleanup_list, ..), and then cleanup_list will be corrupted. Since cleanup_net() is executed in worker thread, while put_net(peer) can happen everywhere, there should be enough time for concurrent get_net_ns_by_id() to pick the peer up, and the race does not seem to be unlikely. The patch fixes the problem in standard way. (Also, there is possible problem in peernet2id_alloc(), which requires check for net::count under nsid_lock and maybe_get_net(peer), but in current stable kernel it's used under rtnl_lock() and it has to be safe. Openswitch begun to use peernet2id_alloc(), and possibly it should be fixed too. While this is not in stable kernel yet, so I'll send a separate message to netdev@ later). Cc: Nicolas Dichtel <nicolas.dichtel@6wind.com> Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com> Fixes: 0c7aecd4bde4 "netns: add rtnl cmd to add and get peer netns ids" Reviewed-by: Andrey Ryabinin <aryabinin@virtuozzo.com> Reviewed-by: "Eric W. Biederman" <ebiederm@xmission.com> Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Reviewed-by: Eric Dumazet <edumazet@google.com> Acked-by: Nicolas Dichtel <nicolas.dichtel@6wind.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1
struct net *get_net_ns_by_id(struct net *net, int id) { struct net *peer; if (id < 0) return NULL; rcu_read_lock(); spin_lock_bh(&net->nsid_lock); peer = idr_find(&net->netns_ids, id); if (peer) get_net(peer); spin_unlock_bh(&net->nsid_lock); rcu_read_unlock(); return peer; }
126,228,411,105,494,450,000,000,000,000,000,000,000
net_namespace.c
229,167,366,123,156,130,000,000,000,000,000,000,000
[ "CWE-416" ]
CVE-2017-15129
A use-after-free vulnerability was found in network namespaces code affecting the Linux kernel before 4.14.11. The function get_net_ns_by_id() in net/core/net_namespace.c does not check for the net::count value after it has found a peer network in netns_ids idr, which could lead to double free and memory corruption. This vulnerability could allow an unprivileged local user to induce kernel memory corruption on the system, leading to a crash. Due to the nature of the flaw, privilege escalation cannot be fully ruled out, although it is thought to be unlikely.
https://nvd.nist.gov/vuln/detail/CVE-2017-15129
3,860
linux
1e3921471354244f70fe268586ff94a97a6dd4df
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/1e3921471354244f70fe268586ff94a97a6dd4df
userfaultfd: hugetlbfs: prevent UFFDIO_COPY to fill beyond the end of i_size This oops: kernel BUG at fs/hugetlbfs/inode.c:484! RIP: remove_inode_hugepages+0x3d0/0x410 Call Trace: hugetlbfs_setattr+0xd9/0x130 notify_change+0x292/0x410 do_truncate+0x65/0xa0 do_sys_ftruncate.constprop.3+0x11a/0x180 SyS_ftruncate+0xe/0x10 tracesys+0xd9/0xde was caused by the lack of i_size check in hugetlb_mcopy_atomic_pte. mmap() can still succeed beyond the end of the i_size after vmtruncate zapped vmas in those ranges, but the faults must not succeed, and that includes UFFDIO_COPY. We could differentiate the retval to userland to represent a SIGBUS like a page fault would do (vs SIGSEGV), but it doesn't seem very useful and we'd need to pick a random retval as there's no meaningful syscall retval that would differentiate from SIGSEGV and SIGBUS, there's just -EFAULT. Link: http://lkml.kernel.org/r/20171016223914.2421-2-aarcange@redhat.com Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Cc: Mike Rapoport <rppt@linux.vnet.ibm.com> Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1
int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, struct page **pagep) { int vm_shared = dst_vma->vm_flags & VM_SHARED; struct hstate *h = hstate_vma(dst_vma); pte_t _dst_pte; spinlock_t *ptl; int ret; struct page *page; if (!*pagep) { ret = -ENOMEM; page = alloc_huge_page(dst_vma, dst_addr, 0); if (IS_ERR(page)) goto out; ret = copy_huge_page_from_user(page, (const void __user *) src_addr, pages_per_huge_page(h), false); /* fallback to copy_from_user outside mmap_sem */ if (unlikely(ret)) { ret = -EFAULT; *pagep = page; /* don't free the page */ goto out; } } else { page = *pagep; *pagep = NULL; } /* * The memory barrier inside __SetPageUptodate makes sure that * preceding stores to the page contents become visible before * the set_pte_at() write. */ __SetPageUptodate(page); set_page_huge_active(page); /* * If shared, add to page cache */ if (vm_shared) { struct address_space *mapping = dst_vma->vm_file->f_mapping; pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr); ret = huge_add_to_page_cache(page, mapping, idx); if (ret) goto out_release_nounlock; } ptl = huge_pte_lockptr(h, dst_mm, dst_pte); spin_lock(ptl); ret = -EEXIST; if (!huge_pte_none(huge_ptep_get(dst_pte))) goto out_release_unlock; if (vm_shared) { page_dup_rmap(page, true); } else { ClearPagePrivate(page); hugepage_add_new_anon_rmap(page, dst_vma, dst_addr); } _dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE); if (dst_vma->vm_flags & VM_WRITE) _dst_pte = huge_pte_mkdirty(_dst_pte); _dst_pte = pte_mkyoung(_dst_pte); set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); (void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte, dst_vma->vm_flags & VM_WRITE); hugetlb_count_add(pages_per_huge_page(h), dst_mm); /* No need to invalidate - it was non-present before */ update_mmu_cache(dst_vma, dst_addr, dst_pte); spin_unlock(ptl); if (vm_shared) unlock_page(page); ret = 0; out: return ret; out_release_unlock: spin_unlock(ptl); if (vm_shared) unlock_page(page); out_release_nounlock: put_page(page); goto out; }
285,571,524,825,774,600,000,000,000,000,000,000,000
hugetlb.c
273,321,831,039,362,950,000,000,000,000,000,000,000
[ "CWE-119" ]
CVE-2017-15128
A flaw was found in the hugetlb_mcopy_atomic_pte function in mm/hugetlb.c in the Linux kernel before 4.13.12. A lack of size check could cause a denial of service (BUG).
https://nvd.nist.gov/vuln/detail/CVE-2017-15128