idx
int64 0
522k
| project
stringclasses 631
values | commit_id
stringlengths 7
40
| project_url
stringclasses 630
values | commit_url
stringlengths 4
164
| commit_message
stringlengths 0
11.5k
| target
int64 0
1
| func
stringlengths 5
484k
| func_hash
float64 1,559,120,642,045,605,000,000,000B
340,279,892,905,069,500,000,000,000,000B
| file_name
stringlengths 4
45
| file_hash
float64 25,942,829,220,065,710,000,000,000B
340,272,304,251,680,200,000,000,000,000B
⌀ | cwe
sequencelengths 0
1
| cve
stringlengths 4
16
| cve_desc
stringlengths 0
2.3k
| nvd_url
stringlengths 37
49
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1,646 | openssl | b15f8769644b00ef7283521593360b7b2135cb63 | https://github.com/openssl/openssl | https://github.com/openssl/openssl/commit/b15f8769644b00ef7283521593360b7b2135cb63 | ECDH downgrade bug fix.
Fix bug where an OpenSSL client would accept a handshake using an
ephemeral ECDH ciphersuites with the server key exchange message omitted.
Thanks to Karthikeyan Bhargavan for reporting this issue.
CVE-2014-3572
Reviewed-by: Matt Caswell <matt@openssl.org> | 1 | int ssl3_get_key_exchange(SSL *s)
{
#ifndef OPENSSL_NO_RSA
unsigned char *q,md_buf[EVP_MAX_MD_SIZE*2];
#endif
EVP_MD_CTX md_ctx;
unsigned char *param,*p;
int al,j,ok;
long i,param_len,n,alg_k,alg_a;
EVP_PKEY *pkey=NULL;
const EVP_MD *md = NULL;
#ifndef OPENSSL_NO_RSA
RSA *rsa=NULL;
#endif
#ifndef OPENSSL_NO_DH
DH *dh=NULL;
#endif
#ifndef OPENSSL_NO_ECDH
EC_KEY *ecdh = NULL;
BN_CTX *bn_ctx = NULL;
EC_POINT *srvr_ecpoint = NULL;
int curve_nid = 0;
int encoded_pt_len = 0;
#endif
/* use same message size as in ssl3_get_certificate_request()
* as ServerKeyExchange message may be skipped */
n=s->method->ssl_get_message(s,
SSL3_ST_CR_KEY_EXCH_A,
SSL3_ST_CR_KEY_EXCH_B,
-1,
s->max_cert_list,
&ok);
if (!ok) return((int)n);
if (s->s3->tmp.message_type != SSL3_MT_SERVER_KEY_EXCHANGE)
{
#ifndef OPENSSL_NO_PSK
/* In plain PSK ciphersuite, ServerKeyExchange can be
omitted if no identity hint is sent. Set
session->sess_cert anyway to avoid problems
later.*/
if (s->s3->tmp.new_cipher->algorithm_mkey & SSL_kPSK)
{
s->session->sess_cert=ssl_sess_cert_new();
if (s->ctx->psk_identity_hint)
OPENSSL_free(s->ctx->psk_identity_hint);
s->ctx->psk_identity_hint = NULL;
}
#endif
s->s3->tmp.reuse_message=1;
return(1);
}
param=p=(unsigned char *)s->init_msg;
if (s->session->sess_cert != NULL)
{
#ifndef OPENSSL_NO_RSA
if (s->session->sess_cert->peer_rsa_tmp != NULL)
{
RSA_free(s->session->sess_cert->peer_rsa_tmp);
s->session->sess_cert->peer_rsa_tmp=NULL;
}
#endif
#ifndef OPENSSL_NO_DH
if (s->session->sess_cert->peer_dh_tmp)
{
DH_free(s->session->sess_cert->peer_dh_tmp);
s->session->sess_cert->peer_dh_tmp=NULL;
}
#endif
#ifndef OPENSSL_NO_ECDH
if (s->session->sess_cert->peer_ecdh_tmp)
{
EC_KEY_free(s->session->sess_cert->peer_ecdh_tmp);
s->session->sess_cert->peer_ecdh_tmp=NULL;
}
#endif
}
else
{
s->session->sess_cert=ssl_sess_cert_new();
}
/* Total length of the parameters including the length prefix */
param_len=0;
alg_k=s->s3->tmp.new_cipher->algorithm_mkey;
alg_a=s->s3->tmp.new_cipher->algorithm_auth;
EVP_MD_CTX_init(&md_ctx);
al=SSL_AD_DECODE_ERROR;
#ifndef OPENSSL_NO_PSK
if (alg_k & SSL_kPSK)
{
char tmp_id_hint[PSK_MAX_IDENTITY_LEN+1];
param_len = 2;
if (param_len > n)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,
SSL_R_LENGTH_TOO_SHORT);
goto f_err;
}
n2s(p,i);
/* Store PSK identity hint for later use, hint is used
* in ssl3_send_client_key_exchange. Assume that the
* maximum length of a PSK identity hint can be as
* long as the maximum length of a PSK identity. */
if (i > PSK_MAX_IDENTITY_LEN)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
goto f_err;
}
if (i > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,
SSL_R_BAD_PSK_IDENTITY_HINT_LENGTH);
goto f_err;
}
param_len += i;
/* If received PSK identity hint contains NULL
* characters, the hint is truncated from the first
* NULL. p may not be ending with NULL, so create a
* NULL-terminated string. */
memcpy(tmp_id_hint, p, i);
memset(tmp_id_hint+i, 0, PSK_MAX_IDENTITY_LEN+1-i);
if (s->ctx->psk_identity_hint != NULL)
OPENSSL_free(s->ctx->psk_identity_hint);
s->ctx->psk_identity_hint = BUF_strdup(tmp_id_hint);
if (s->ctx->psk_identity_hint == NULL)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE);
goto f_err;
}
p+=i;
n-=param_len;
}
else
#endif /* !OPENSSL_NO_PSK */
#ifndef OPENSSL_NO_SRP
if (alg_k & SSL_kSRP)
{
param_len = 2;
if (param_len > n)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,
SSL_R_LENGTH_TOO_SHORT);
goto f_err;
}
n2s(p,i);
if (i > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_SRP_N_LENGTH);
goto f_err;
}
param_len += i;
if (!(s->srp_ctx.N=BN_bin2bn(p,i,NULL)))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB);
goto err;
}
p+=i;
if (2 > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,
SSL_R_LENGTH_TOO_SHORT);
goto f_err;
}
param_len += 2;
n2s(p,i);
if (i > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_SRP_G_LENGTH);
goto f_err;
}
param_len += i;
if (!(s->srp_ctx.g=BN_bin2bn(p,i,NULL)))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB);
goto err;
}
p+=i;
if (1 > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,
SSL_R_LENGTH_TOO_SHORT);
goto f_err;
}
param_len += 1;
i = (unsigned int)(p[0]);
p++;
if (i > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_SRP_S_LENGTH);
goto f_err;
}
param_len += i;
if (!(s->srp_ctx.s=BN_bin2bn(p,i,NULL)))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB);
goto err;
}
p+=i;
if (2 > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,
SSL_R_LENGTH_TOO_SHORT);
goto f_err;
}
param_len += 2;
n2s(p,i);
if (i > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_SRP_B_LENGTH);
goto f_err;
}
param_len += i;
if (!(s->srp_ctx.B=BN_bin2bn(p,i,NULL)))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB);
goto err;
}
p+=i;
n-=param_len;
if (!srp_verify_server_param(s, &al))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_SRP_PARAMETERS);
goto f_err;
}
/* We must check if there is a certificate */
#ifndef OPENSSL_NO_RSA
if (alg_a & SSL_aRSA)
pkey=X509_get_pubkey(s->session->sess_cert->peer_pkeys[SSL_PKEY_RSA_ENC].x509);
#else
if (0)
;
#endif
#ifndef OPENSSL_NO_DSA
else if (alg_a & SSL_aDSS)
pkey=X509_get_pubkey(s->session->sess_cert->peer_pkeys[SSL_PKEY_DSA_SIGN].x509);
#endif
}
else
#endif /* !OPENSSL_NO_SRP */
#ifndef OPENSSL_NO_RSA
if (alg_k & SSL_kRSA)
{
if ((rsa=RSA_new()) == NULL)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_MALLOC_FAILURE);
goto err;
}
param_len = 2;
if (param_len > n)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,
SSL_R_LENGTH_TOO_SHORT);
goto f_err;
}
n2s(p,i);
if (i > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_RSA_MODULUS_LENGTH);
goto f_err;
}
param_len += i;
if (!(rsa->n=BN_bin2bn(p,i,rsa->n)))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB);
goto err;
}
p+=i;
if (2 > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,
SSL_R_LENGTH_TOO_SHORT);
goto f_err;
}
param_len += 2;
n2s(p,i);
if (i > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_RSA_E_LENGTH);
goto f_err;
}
param_len += i;
if (!(rsa->e=BN_bin2bn(p,i,rsa->e)))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB);
goto err;
}
p+=i;
n-=param_len;
/* this should be because we are using an export cipher */
if (alg_a & SSL_aRSA)
pkey=X509_get_pubkey(s->session->sess_cert->peer_pkeys[SSL_PKEY_RSA_ENC].x509);
else
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_INTERNAL_ERROR);
goto err;
}
s->session->sess_cert->peer_rsa_tmp=rsa;
rsa=NULL;
}
#else /* OPENSSL_NO_RSA */
if (0)
;
#endif
#ifndef OPENSSL_NO_DH
else if (alg_k & SSL_kDHE)
{
if ((dh=DH_new()) == NULL)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_DH_LIB);
goto err;
}
param_len = 2;
if (param_len > n)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,
SSL_R_LENGTH_TOO_SHORT);
goto f_err;
}
n2s(p,i);
if (i > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_DH_P_LENGTH);
goto f_err;
}
param_len += i;
if (!(dh->p=BN_bin2bn(p,i,NULL)))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB);
goto err;
}
p+=i;
if (2 > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,
SSL_R_LENGTH_TOO_SHORT);
goto f_err;
}
param_len += 2;
n2s(p,i);
if (i > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_DH_G_LENGTH);
goto f_err;
}
param_len += i;
if (!(dh->g=BN_bin2bn(p,i,NULL)))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB);
goto err;
}
p+=i;
if (2 > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,
SSL_R_LENGTH_TOO_SHORT);
goto f_err;
}
param_len += 2;
n2s(p,i);
if (i > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_DH_PUB_KEY_LENGTH);
goto f_err;
}
param_len += i;
if (!(dh->pub_key=BN_bin2bn(p,i,NULL)))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB);
goto err;
}
p+=i;
n-=param_len;
if (!ssl_security(s, SSL_SECOP_TMP_DH,
DH_security_bits(dh), 0, dh))
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_DH_KEY_TOO_SMALL);
goto f_err;
}
#ifndef OPENSSL_NO_RSA
if (alg_a & SSL_aRSA)
pkey=X509_get_pubkey(s->session->sess_cert->peer_pkeys[SSL_PKEY_RSA_ENC].x509);
#else
if (0)
;
#endif
#ifndef OPENSSL_NO_DSA
else if (alg_a & SSL_aDSS)
pkey=X509_get_pubkey(s->session->sess_cert->peer_pkeys[SSL_PKEY_DSA_SIGN].x509);
#endif
/* else anonymous DH, so no certificate or pkey. */
s->session->sess_cert->peer_dh_tmp=dh;
dh=NULL;
}
else if ((alg_k & SSL_kDHr) || (alg_k & SSL_kDHd))
{
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_TRIED_TO_USE_UNSUPPORTED_CIPHER);
goto f_err;
}
#endif /* !OPENSSL_NO_DH */
#ifndef OPENSSL_NO_ECDH
else if (alg_k & SSL_kECDHE)
{
EC_GROUP *ngroup;
const EC_GROUP *group;
if ((ecdh=EC_KEY_new()) == NULL)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_MALLOC_FAILURE);
goto err;
}
/* Extract elliptic curve parameters and the
* server's ephemeral ECDH public key.
* Keep accumulating lengths of various components in
* param_len and make sure it never exceeds n.
*/
/* XXX: For now we only support named (not generic) curves
* and the ECParameters in this case is just three bytes. We
* also need one byte for the length of the encoded point
*/
param_len=4;
if (param_len > n)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,
SSL_R_LENGTH_TOO_SHORT);
goto f_err;
}
/* Check curve is one of our preferences, if not server has
* sent an invalid curve. ECParameters is 3 bytes.
*/
if (!tls1_check_curve(s, p, 3))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_WRONG_CURVE);
goto f_err;
}
if ((curve_nid = tls1_ec_curve_id2nid(*(p + 2))) == 0)
{
al=SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_UNABLE_TO_FIND_ECDH_PARAMETERS);
goto f_err;
}
ngroup = EC_GROUP_new_by_curve_name(curve_nid);
if (ngroup == NULL)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_EC_LIB);
goto err;
}
if (EC_KEY_set_group(ecdh, ngroup) == 0)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_EC_LIB);
goto err;
}
EC_GROUP_free(ngroup);
group = EC_KEY_get0_group(ecdh);
if (SSL_C_IS_EXPORT(s->s3->tmp.new_cipher) &&
(EC_GROUP_get_degree(group) > 163))
{
al=SSL_AD_EXPORT_RESTRICTION;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_ECGROUP_TOO_LARGE_FOR_CIPHER);
goto f_err;
}
p+=3;
/* Next, get the encoded ECPoint */
if (((srvr_ecpoint = EC_POINT_new(group)) == NULL) ||
((bn_ctx = BN_CTX_new()) == NULL))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_MALLOC_FAILURE);
goto err;
}
encoded_pt_len = *p; /* length of encoded point */
p+=1;
if ((encoded_pt_len > n - param_len) ||
(EC_POINT_oct2point(group, srvr_ecpoint,
p, encoded_pt_len, bn_ctx) == 0))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_ECPOINT);
goto f_err;
}
param_len += encoded_pt_len;
n-=param_len;
p+=encoded_pt_len;
/* The ECC/TLS specification does not mention
* the use of DSA to sign ECParameters in the server
* key exchange message. We do support RSA and ECDSA.
*/
if (0) ;
#ifndef OPENSSL_NO_RSA
else if (alg_a & SSL_aRSA)
pkey=X509_get_pubkey(s->session->sess_cert->peer_pkeys[SSL_PKEY_RSA_ENC].x509);
#endif
#ifndef OPENSSL_NO_ECDSA
else if (alg_a & SSL_aECDSA)
pkey=X509_get_pubkey(s->session->sess_cert->peer_pkeys[SSL_PKEY_ECC].x509);
#endif
/* else anonymous ECDH, so no certificate or pkey. */
EC_KEY_set_public_key(ecdh, srvr_ecpoint);
s->session->sess_cert->peer_ecdh_tmp=ecdh;
ecdh=NULL;
BN_CTX_free(bn_ctx);
bn_ctx = NULL;
EC_POINT_free(srvr_ecpoint);
srvr_ecpoint = NULL;
}
else if (alg_k)
{
al=SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_UNEXPECTED_MESSAGE);
goto f_err;
}
#endif /* !OPENSSL_NO_ECDH */
/* p points to the next byte, there are 'n' bytes left */
/* if it was signed, check the signature */
if (pkey != NULL)
{
if (SSL_USE_SIGALGS(s))
{
int rv;
if (2 > n)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,
SSL_R_LENGTH_TOO_SHORT);
goto f_err;
}
rv = tls12_check_peer_sigalg(&md, s, p, pkey);
if (rv == -1)
goto err;
else if (rv == 0)
{
goto f_err;
}
#ifdef SSL_DEBUG
fprintf(stderr, "USING TLSv1.2 HASH %s\n", EVP_MD_name(md));
#endif
p += 2;
n -= 2;
}
else
md = EVP_sha1();
if (2 > n)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,
SSL_R_LENGTH_TOO_SHORT);
goto f_err;
}
n2s(p,i);
n-=2;
j=EVP_PKEY_size(pkey);
/* Check signature length. If n is 0 then signature is empty */
if ((i != n) || (n > j) || (n <= 0))
{
/* wrong packet length */
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_WRONG_SIGNATURE_LENGTH);
goto f_err;
}
#ifndef OPENSSL_NO_RSA
if (pkey->type == EVP_PKEY_RSA && !SSL_USE_SIGALGS(s))
{
int num;
unsigned int size;
j=0;
q=md_buf;
for (num=2; num > 0; num--)
{
EVP_MD_CTX_set_flags(&md_ctx,
EVP_MD_CTX_FLAG_NON_FIPS_ALLOW);
EVP_DigestInit_ex(&md_ctx,(num == 2)
?s->ctx->md5:s->ctx->sha1, NULL);
EVP_DigestUpdate(&md_ctx,&(s->s3->client_random[0]),SSL3_RANDOM_SIZE);
EVP_DigestUpdate(&md_ctx,&(s->s3->server_random[0]),SSL3_RANDOM_SIZE);
EVP_DigestUpdate(&md_ctx,param,param_len);
EVP_DigestFinal_ex(&md_ctx,q,&size);
q+=size;
j+=size;
}
i=RSA_verify(NID_md5_sha1, md_buf, j, p, n,
pkey->pkey.rsa);
if (i < 0)
{
al=SSL_AD_DECRYPT_ERROR;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_RSA_DECRYPT);
goto f_err;
}
if (i == 0)
{
/* bad signature */
al=SSL_AD_DECRYPT_ERROR;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_SIGNATURE);
goto f_err;
}
}
else
#endif
{
EVP_VerifyInit_ex(&md_ctx, md, NULL);
EVP_VerifyUpdate(&md_ctx,&(s->s3->client_random[0]),SSL3_RANDOM_SIZE);
EVP_VerifyUpdate(&md_ctx,&(s->s3->server_random[0]),SSL3_RANDOM_SIZE);
EVP_VerifyUpdate(&md_ctx,param,param_len);
if (EVP_VerifyFinal(&md_ctx,p,(int)n,pkey) <= 0)
{
/* bad signature */
al=SSL_AD_DECRYPT_ERROR;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_SIGNATURE);
goto f_err;
}
}
}
else
{
/* aNULL, aSRP or kPSK do not need public keys */
if (!(alg_a & (SSL_aNULL|SSL_aSRP)) && !(alg_k & SSL_kPSK))
{
/* Might be wrong key type, check it */
if (ssl3_check_cert_and_algorithm(s))
/* Otherwise this shouldn't happen */
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_INTERNAL_ERROR);
goto err;
}
/* still data left over */
if (n != 0)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_EXTRA_DATA_IN_MESSAGE);
goto f_err;
}
}
EVP_PKEY_free(pkey);
EVP_MD_CTX_cleanup(&md_ctx);
return(1);
f_err:
ssl3_send_alert(s,SSL3_AL_FATAL,al);
err:
EVP_PKEY_free(pkey);
#ifndef OPENSSL_NO_RSA
if (rsa != NULL)
RSA_free(rsa);
#endif
#ifndef OPENSSL_NO_DH
if (dh != NULL)
DH_free(dh);
#endif
#ifndef OPENSSL_NO_ECDH
BN_CTX_free(bn_ctx);
EC_POINT_free(srvr_ecpoint);
if (ecdh != NULL)
EC_KEY_free(ecdh);
#endif
EVP_MD_CTX_cleanup(&md_ctx);
return(-1);
}
| 301,554,715,472,437,100,000,000,000,000,000,000,000 | None | null | [
"CWE-310"
] | CVE-2014-3572 | The ssl3_get_key_exchange function in s3_clnt.c in OpenSSL before 0.9.8zd, 1.0.0 before 1.0.0p, and 1.0.1 before 1.0.1k allows remote SSL servers to conduct ECDHE-to-ECDH downgrade attacks and trigger a loss of forward secrecy by omitting the ServerKeyExchange message. | https://nvd.nist.gov/vuln/detail/CVE-2014-3572 |
1,656 | nbd | 741495cb08503fd32a9d22648e63b64390c601f4 | https://github.com/yoe/nbd | https://github.com/yoe/nbd/commit/741495cb08503fd32a9d22648e63b64390c601f4 | nbd-server: handle modern-style negotiation in a child process
Previously, the modern style negotiation was carried out in the root
server (listener) process before forking the actual client handler. This
made it possible for a malfunctioning or evil client to terminate the
root process simply by querying a non-existent export or aborting in the
middle of the negotation process (caused SIGPIPE in the server).
This commit moves the negotiation process to the child to keep the root
process up and running no matter what happens during the negotiation.
See http://sourceforge.net/mailarchive/message.php?msg_id=30410146
Signed-off-by: Tuomas Räsänen <tuomasjjrasanen@tjjr.fi> | 1 | void serveloop(GArray* servers) {
struct sockaddr_storage addrin;
socklen_t addrinlen=sizeof(addrin);
int i;
int max;
fd_set mset;
fd_set rset;
/*
* Set up the master fd_set. The set of descriptors we need
* to select() for never changes anyway and it buys us a *lot*
* of time to only build this once. However, if we ever choose
* to not fork() for clients anymore, we may have to revisit
* this.
*/
max=0;
FD_ZERO(&mset);
for(i=0;i<servers->len;i++) {
int sock;
if((sock=(g_array_index(servers, SERVER, i)).socket) >= 0) {
FD_SET(sock, &mset);
max=sock>max?sock:max;
}
}
for(i=0;i<modernsocks->len;i++) {
int sock = g_array_index(modernsocks, int, i);
FD_SET(sock, &mset);
max=sock>max?sock:max;
}
for(;;) {
/* SIGHUP causes the root server process to reconfigure
* itself and add new export servers for each newly
* found export configuration group, i.e. spawn new
* server processes for each previously non-existent
* export. This does not alter old runtime configuration
* but just appends new exports. */
if (is_sighup_caught) {
int n;
GError *gerror = NULL;
msg(LOG_INFO, "reconfiguration request received");
is_sighup_caught = 0; /* Reset to allow catching
* it again. */
n = append_new_servers(servers, &gerror);
if (n == -1)
msg(LOG_ERR, "failed to append new servers: %s",
gerror->message);
for (i = servers->len - n; i < servers->len; ++i) {
const SERVER server = g_array_index(servers,
SERVER, i);
if (server.socket >= 0) {
FD_SET(server.socket, &mset);
max = server.socket > max ? server.socket : max;
}
msg(LOG_INFO, "reconfigured new server: %s",
server.servename);
}
}
memcpy(&rset, &mset, sizeof(fd_set));
if(select(max+1, &rset, NULL, NULL, NULL)>0) {
int net;
DEBUG("accept, ");
for(i=0; i < modernsocks->len; i++) {
int sock = g_array_index(modernsocks, int, i);
if(!FD_ISSET(sock, &rset)) {
continue;
}
CLIENT *client;
if((net=accept(sock, (struct sockaddr *) &addrin, &addrinlen)) < 0) {
err_nonfatal("accept: %m");
continue;
}
client = negotiate(net, NULL, servers, NEG_INIT | NEG_MODERN);
if(!client) {
close(net);
continue;
}
handle_connection(servers, net, client->server, client);
}
for(i=0; i < servers->len; i++) {
SERVER *serve;
serve=&(g_array_index(servers, SERVER, i));
if(serve->socket < 0) {
continue;
}
if(FD_ISSET(serve->socket, &rset)) {
if ((net=accept(serve->socket, (struct sockaddr *) &addrin, &addrinlen)) < 0) {
err_nonfatal("accept: %m");
continue;
}
handle_connection(servers, net, serve, NULL);
}
}
}
}
}
| 143,212,735,324,855,570,000,000,000,000,000,000,000 | nbd-server.c | 174,715,315,005,731,700,000,000,000,000,000,000,000 | [
"CWE-399"
] | CVE-2013-7441 | The modern style negotiation in Network Block Device (nbd-server) 2.9.22 through 3.3 allows remote attackers to cause a denial of service (root process termination) by (1) closing the connection during negotiation or (2) specifying a name for a non-existent export. | https://nvd.nist.gov/vuln/detail/CVE-2013-7441 |
1,657 | linux | 128394eff343fc6d2f32172f03e24829539c5835 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/128394eff343fc6d2f32172f03e24829539c5835 | sg_write()/bsg_write() is not fit to be called under KERNEL_DS
Both damn things interpret userland pointers embedded into the payload;
worse, they are actually traversing those. Leaving aside the bad
API design, this is very much _not_ safe to call with KERNEL_DS.
Bail out early if that happens.
Cc: stable@vger.kernel.org
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> | 1 | bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
struct bsg_device *bd = file->private_data;
ssize_t bytes_written;
int ret;
dprintk("%s: write %Zd bytes\n", bd->name, count);
bsg_set_block(bd, file);
bytes_written = 0;
ret = __bsg_write(bd, buf, count, &bytes_written,
file->f_mode & FMODE_WRITE);
*ppos = bytes_written;
/*
* return bytes written on non-fatal errors
*/
if (!bytes_written || err_block_err(ret))
bytes_written = ret;
dprintk("%s: returning %Zd\n", bd->name, bytes_written);
return bytes_written;
}
| 107,643,057,290,525,440,000,000,000,000,000,000,000 | bsg.c | 6,609,762,736,419,196,000,000,000,000,000,000,000 | [
"CWE-416"
] | CVE-2016-10088 | The sg implementation in the Linux kernel through 4.9 does not properly restrict write operations in situations where the KERNEL_DS option is set, which allows local users to read or write to arbitrary kernel memory locations or cause a denial of service (use-after-free) by leveraging access to a /dev/sg device, related to block/bsg.c and drivers/scsi/sg.c. NOTE: this vulnerability exists because of an incomplete fix for CVE-2016-9576. | https://nvd.nist.gov/vuln/detail/CVE-2016-10088 |
1,658 | linux | 128394eff343fc6d2f32172f03e24829539c5835 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/128394eff343fc6d2f32172f03e24829539c5835 | sg_write()/bsg_write() is not fit to be called under KERNEL_DS
Both damn things interpret userland pointers embedded into the payload;
worse, they are actually traversing those. Leaving aside the bad
API design, this is very much _not_ safe to call with KERNEL_DS.
Bail out early if that happens.
Cc: stable@vger.kernel.org
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> | 1 | sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
{
int mxsize, cmd_size, k;
int input_size, blocking;
unsigned char opcode;
Sg_device *sdp;
Sg_fd *sfp;
Sg_request *srp;
struct sg_header old_hdr;
sg_io_hdr_t *hp;
unsigned char cmnd[SG_MAX_CDB_SIZE];
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_write: count=%d\n", (int) count));
if (atomic_read(&sdp->detaching))
return -ENODEV;
if (!((filp->f_flags & O_NONBLOCK) ||
scsi_block_when_processing_errors(sdp->device)))
return -ENXIO;
if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT; /* protects following copy_from_user()s + get_user()s */
if (count < SZ_SG_HEADER)
return -EIO;
if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
return -EFAULT;
blocking = !(filp->f_flags & O_NONBLOCK);
if (old_hdr.reply_len < 0)
return sg_new_write(sfp, filp, buf, count,
blocking, 0, 0, NULL);
if (count < (SZ_SG_HEADER + 6))
return -EIO; /* The minimum scsi command length is 6 bytes. */
if (!(srp = sg_add_request(sfp))) {
SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sdp,
"sg_write: queue full\n"));
return -EDOM;
}
buf += SZ_SG_HEADER;
__get_user(opcode, buf);
if (sfp->next_cmd_len > 0) {
cmd_size = sfp->next_cmd_len;
sfp->next_cmd_len = 0; /* reset so only this write() effected */
} else {
cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */
if ((opcode >= 0xc0) && old_hdr.twelve_byte)
cmd_size = 12;
}
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
"sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
/* Determine buffer size. */
input_size = count - cmd_size;
mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len;
mxsize -= SZ_SG_HEADER;
input_size -= SZ_SG_HEADER;
if (input_size < 0) {
sg_remove_request(sfp, srp);
return -EIO; /* User did not pass enough bytes for this command. */
}
hp = &srp->header;
hp->interface_id = '\0'; /* indicator of old interface tunnelled */
hp->cmd_len = (unsigned char) cmd_size;
hp->iovec_count = 0;
hp->mx_sb_len = 0;
if (input_size > 0)
hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ?
SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV;
else
hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
hp->dxfer_len = mxsize;
if ((hp->dxfer_direction == SG_DXFER_TO_DEV) ||
(hp->dxfer_direction == SG_DXFER_TO_FROM_DEV))
hp->dxferp = (char __user *)buf + cmd_size;
else
hp->dxferp = NULL;
hp->sbp = NULL;
hp->timeout = old_hdr.reply_len; /* structure abuse ... */
hp->flags = input_size; /* structure abuse ... */
hp->pack_id = old_hdr.pack_id;
hp->usr_ptr = NULL;
if (__copy_from_user(cmnd, buf, cmd_size))
return -EFAULT;
/*
* SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
* but is is possible that the app intended SG_DXFER_TO_DEV, because there
* is a non-zero input_size, so emit a warning.
*/
if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) {
static char cmd[TASK_COMM_LEN];
if (strcmp(current->comm, cmd)) {
printk_ratelimited(KERN_WARNING
"sg_write: data in/out %d/%d bytes "
"for SCSI command 0x%x-- guessing "
"data in;\n program %s not setting "
"count and/or reply_len properly\n",
old_hdr.reply_len - (int)SZ_SG_HEADER,
input_size, (unsigned int) cmnd[0],
current->comm);
strcpy(cmd, current->comm);
}
}
k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
return (k < 0) ? k : count;
}
| 55,314,091,797,718,160,000,000,000,000,000,000,000 | sg.c | 199,371,262,027,085,440,000,000,000,000,000,000,000 | [
"CWE-416"
] | CVE-2016-10088 | The sg implementation in the Linux kernel through 4.9 does not properly restrict write operations in situations where the KERNEL_DS option is set, which allows local users to read or write to arbitrary kernel memory locations or cause a denial of service (use-after-free) by leveraging access to a /dev/sg device, related to block/bsg.c and drivers/scsi/sg.c. NOTE: this vulnerability exists because of an incomplete fix for CVE-2016-9576. | https://nvd.nist.gov/vuln/detail/CVE-2016-10088 |
1,659 | linux | 79dc7e3f1cd323be4c81aa1a94faa1b3ed987fb2 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/79dc7e3f1cd323be4c81aa1a94faa1b3ed987fb2 | net: handle no dst on skb in icmp6_send
Andrey reported the following while fuzzing the kernel with syzkaller:
kasan: CONFIG_KASAN_INLINE enabled
kasan: GPF could be caused by NULL-ptr deref or user memory access
general protection fault: 0000 [#1] SMP KASAN
Modules linked in:
CPU: 0 PID: 3859 Comm: a.out Not tainted 4.9.0-rc6+ #429
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
task: ffff8800666d4200 task.stack: ffff880067348000
RIP: 0010:[<ffffffff833617ec>] [<ffffffff833617ec>]
icmp6_send+0x5fc/0x1e30 net/ipv6/icmp.c:451
RSP: 0018:ffff88006734f2c0 EFLAGS: 00010206
RAX: ffff8800666d4200 RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: dffffc0000000000 RDI: 0000000000000018
RBP: ffff88006734f630 R08: ffff880064138418 R09: 0000000000000003
R10: dffffc0000000000 R11: 0000000000000005 R12: 0000000000000000
R13: ffffffff84e7e200 R14: ffff880064138484 R15: ffff8800641383c0
FS: 00007fb3887a07c0(0000) GS:ffff88006cc00000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 0000000020000000 CR3: 000000006b040000 CR4: 00000000000006f0
Stack:
ffff8800666d4200 ffff8800666d49f8 ffff8800666d4200 ffffffff84c02460
ffff8800666d4a1a 1ffff1000ccdaa2f ffff88006734f498 0000000000000046
ffff88006734f440 ffffffff832f4269 ffff880064ba7456 0000000000000000
Call Trace:
[<ffffffff83364ddc>] icmpv6_param_prob+0x2c/0x40 net/ipv6/icmp.c:557
[< inline >] ip6_tlvopt_unknown net/ipv6/exthdrs.c:88
[<ffffffff83394405>] ip6_parse_tlv+0x555/0x670 net/ipv6/exthdrs.c:157
[<ffffffff8339a759>] ipv6_parse_hopopts+0x199/0x460 net/ipv6/exthdrs.c:663
[<ffffffff832ee773>] ipv6_rcv+0xfa3/0x1dc0 net/ipv6/ip6_input.c:191
...
icmp6_send / icmpv6_send is invoked for both rx and tx paths. In both
cases the dst->dev should be preferred for determining the L3 domain
if the dst has been set on the skb. Fallback to the skb->dev if it has
not. This covers the case reported here where icmp6_send is invoked on
Rx before the route lookup.
Fixes: 5d41ce29e ("net: icmp6_send should use dst dev to determine L3 domain")
Reported-by: Andrey Konovalov <andreyknvl@google.com>
Signed-off-by: David Ahern <dsa@cumulusnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
const struct in6_addr *force_saddr)
{
struct net *net = dev_net(skb->dev);
struct inet6_dev *idev = NULL;
struct ipv6hdr *hdr = ipv6_hdr(skb);
struct sock *sk;
struct ipv6_pinfo *np;
const struct in6_addr *saddr = NULL;
struct dst_entry *dst;
struct icmp6hdr tmp_hdr;
struct flowi6 fl6;
struct icmpv6_msg msg;
struct sockcm_cookie sockc_unused = {0};
struct ipcm6_cookie ipc6;
int iif = 0;
int addr_type = 0;
int len;
int err = 0;
u32 mark = IP6_REPLY_MARK(net, skb->mark);
if ((u8 *)hdr < skb->head ||
(skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb))
return;
/*
* Make sure we respect the rules
* i.e. RFC 1885 2.4(e)
* Rule (e.1) is enforced by not using icmp6_send
* in any code that processes icmp errors.
*/
addr_type = ipv6_addr_type(&hdr->daddr);
if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0) ||
ipv6_chk_acast_addr_src(net, skb->dev, &hdr->daddr))
saddr = &hdr->daddr;
/*
* Dest addr check
*/
if (addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST) {
if (type != ICMPV6_PKT_TOOBIG &&
!(type == ICMPV6_PARAMPROB &&
code == ICMPV6_UNK_OPTION &&
(opt_unrec(skb, info))))
return;
saddr = NULL;
}
addr_type = ipv6_addr_type(&hdr->saddr);
/*
* Source addr check
*/
if (__ipv6_addr_needs_scope_id(addr_type))
iif = skb->dev->ifindex;
else
iif = l3mdev_master_ifindex(skb_dst(skb)->dev);
/*
* Must not send error if the source does not uniquely
* identify a single node (RFC2463 Section 2.4).
* We check unspecified / multicast addresses here,
* and anycast addresses will be checked later.
*/
if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
net_dbg_ratelimited("icmp6_send: addr_any/mcast source [%pI6c > %pI6c]\n",
&hdr->saddr, &hdr->daddr);
return;
}
/*
* Never answer to a ICMP packet.
*/
if (is_ineligible(skb)) {
net_dbg_ratelimited("icmp6_send: no reply to icmp error [%pI6c > %pI6c]\n",
&hdr->saddr, &hdr->daddr);
return;
}
mip6_addr_swap(skb);
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_ICMPV6;
fl6.daddr = hdr->saddr;
if (force_saddr)
saddr = force_saddr;
if (saddr)
fl6.saddr = *saddr;
fl6.flowi6_mark = mark;
fl6.flowi6_oif = iif;
fl6.fl6_icmp_type = type;
fl6.fl6_icmp_code = code;
security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
sk = icmpv6_xmit_lock(net);
if (!sk)
return;
sk->sk_mark = mark;
np = inet6_sk(sk);
if (!icmpv6_xrlim_allow(sk, type, &fl6))
goto out;
tmp_hdr.icmp6_type = type;
tmp_hdr.icmp6_code = code;
tmp_hdr.icmp6_cksum = 0;
tmp_hdr.icmp6_pointer = htonl(info);
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
fl6.flowi6_oif = np->mcast_oif;
else if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->ucast_oif;
ipc6.tclass = np->tclass;
fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
dst = icmpv6_route_lookup(net, skb, sk, &fl6);
if (IS_ERR(dst))
goto out;
ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
ipc6.dontfrag = np->dontfrag;
ipc6.opt = NULL;
msg.skb = skb;
msg.offset = skb_network_offset(skb);
msg.type = type;
len = skb->len - msg.offset;
len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) - sizeof(struct icmp6hdr));
if (len < 0) {
net_dbg_ratelimited("icmp: len problem [%pI6c > %pI6c]\n",
&hdr->saddr, &hdr->daddr);
goto out_dst_release;
}
rcu_read_lock();
idev = __in6_dev_get(skb->dev);
err = ip6_append_data(sk, icmpv6_getfrag, &msg,
len + sizeof(struct icmp6hdr),
sizeof(struct icmp6hdr),
&ipc6, &fl6, (struct rt6_info *)dst,
MSG_DONTWAIT, &sockc_unused);
if (err) {
ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
ip6_flush_pending_frames(sk);
} else {
err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
len + sizeof(struct icmp6hdr));
}
rcu_read_unlock();
out_dst_release:
dst_release(dst);
out:
icmpv6_xmit_unlock(sk);
}
| 148,411,612,446,306,800,000,000,000,000,000,000,000 | icmp.c | 303,659,297,633,054,450,000,000,000,000,000,000,000 | [
"CWE-20"
] | CVE-2016-9919 | The icmp6_send function in net/ipv6/icmp.c in the Linux kernel through 4.8.12 omits a certain check of the dst data structure, which allows remote attackers to cause a denial of service (panic) via a fragmented IPv6 packet. | https://nvd.nist.gov/vuln/detail/CVE-2016-9919 |
1,660 | libgsf | 95a8351a75758cf10b3bf6abae0b6b461f90d9e5 | https://github.com/GNOME/libgsf | https://github.com/GNOME/libgsf/commit/95a8351a75758cf10b3bf6abae0b6b461f90d9e5 | tar: fix crash on broken tar file. | 1 | tar_directory_for_file (GsfInfileTar *dir, const char *name, gboolean last)
{
const char *s = name;
while (1) {
const char *s0 = s;
char *dirname;
/* Find a directory component, if any. */
while (1) {
if (*s == 0) {
if (last && s != s0)
break;
else
return dir;
}
/* This is deliberately slash-only. */
if (*s == '/')
break;
s++;
}
dirname = g_strndup (s0, s - s0);
while (*s == '/')
s++;
if (strcmp (dirname, ".") != 0) {
GsfInput *subdir =
gsf_infile_child_by_name (GSF_INFILE (dir),
dirname);
if (subdir) {
/* Undo the ref. */
g_object_unref (subdir);
dir = GSF_INFILE_TAR (subdir);
} else
dir = tar_create_dir (dir, dirname);
}
g_free (dirname);
}
}
| 54,320,595,055,581,590,000,000,000,000,000,000,000 | gsf-infile-tar.c | 239,316,636,556,709,400,000,000,000,000,000,000,000 | [
"CWE-476"
] | CVE-2016-9888 | An error within the "tar_directory_for_file()" function (gsf-infile-tar.c) in GNOME Structured File Library before 1.14.41 can be exploited to trigger a Null pointer dereference and subsequently cause a crash via a crafted TAR file. | https://nvd.nist.gov/vuln/detail/CVE-2016-9888 |
1,661 | linux | 92964c79b357efd980812c4de5c1fd2ec8bb5520 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/92964c79b357efd980812c4de5c1fd2ec8bb5520 | netlink: Fix dump skb leak/double free
When we free cb->skb after a dump, we do it after releasing the
lock. This means that a new dump could have started in the time
being and we'll end up freeing their skb instead of ours.
This patch saves the skb and module before we unlock so we free
the right memory.
Fixes: 16b304f3404f ("netlink: Eliminate kmalloc in netlink dump operation.")
Reported-by: Baozeng Ding <sploving1@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Acked-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | static int netlink_dump(struct sock *sk)
{
struct netlink_sock *nlk = nlk_sk(sk);
struct netlink_callback *cb;
struct sk_buff *skb = NULL;
struct nlmsghdr *nlh;
int len, err = -ENOBUFS;
int alloc_min_size;
int alloc_size;
mutex_lock(nlk->cb_mutex);
if (!nlk->cb_running) {
err = -EINVAL;
goto errout_skb;
}
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
goto errout_skb;
/* NLMSG_GOODSIZE is small to avoid high order allocations being
* required, but it makes sense to _attempt_ a 16K bytes allocation
* to reduce number of system calls on dump operations, if user
* ever provided a big enough buffer.
*/
cb = &nlk->cb;
alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
if (alloc_min_size < nlk->max_recvmsg_len) {
alloc_size = nlk->max_recvmsg_len;
skb = alloc_skb(alloc_size, GFP_KERNEL |
__GFP_NOWARN | __GFP_NORETRY);
}
if (!skb) {
alloc_size = alloc_min_size;
skb = alloc_skb(alloc_size, GFP_KERNEL);
}
if (!skb)
goto errout_skb;
/* Trim skb to allocated size. User is expected to provide buffer as
* large as max(min_dump_alloc, 16KiB (mac_recvmsg_len capped at
* netlink_recvmsg())). dump will pack as many smaller messages as
* could fit within the allocated skb. skb is typically allocated
* with larger space than required (could be as much as near 2x the
* requested size with align to next power of 2 approach). Allowing
* dump to use the excess space makes it difficult for a user to have a
* reasonable static buffer based on the expected largest dump of a
* single netdev. The outcome is MSG_TRUNC error.
*/
skb_reserve(skb, skb_tailroom(skb) - alloc_size);
netlink_skb_set_owner_r(skb, sk);
len = cb->dump(skb, cb);
if (len > 0) {
mutex_unlock(nlk->cb_mutex);
if (sk_filter(sk, skb))
kfree_skb(skb);
else
__netlink_sendskb(sk, skb);
return 0;
}
nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
if (!nlh)
goto errout_skb;
nl_dump_check_consistent(cb, nlh);
memcpy(nlmsg_data(nlh), &len, sizeof(len));
if (sk_filter(sk, skb))
kfree_skb(skb);
else
__netlink_sendskb(sk, skb);
if (cb->done)
cb->done(cb);
nlk->cb_running = false;
mutex_unlock(nlk->cb_mutex);
module_put(cb->module);
consume_skb(cb->skb);
return 0;
errout_skb:
mutex_unlock(nlk->cb_mutex);
kfree_skb(skb);
return err;
}
| 196,001,105,598,532,200,000,000,000,000,000,000,000 | af_netlink.c | 181,510,954,759,435,270,000,000,000,000,000,000,000 | [
"CWE-415"
] | CVE-2016-9806 | Race condition in the netlink_dump function in net/netlink/af_netlink.c in the Linux kernel before 4.6.3 allows local users to cause a denial of service (double free) or possibly have unspecified other impact via a crafted application that makes sendmsg system calls, leading to a free operation associated with a new dump that started earlier than anticipated. | https://nvd.nist.gov/vuln/detail/CVE-2016-9806 |
1,662 | linux | 3aa02cb664c5fb1042958c8d1aa8c35055a2ebc4 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/3aa02cb664c5fb1042958c8d1aa8c35055a2ebc4 | ALSA: pcm : Call kill_fasync() in stream lock
Currently kill_fasync() is called outside the stream lock in
snd_pcm_period_elapsed(). This is potentially racy, since the stream
may get released even during the irq handler is running. Although
snd_pcm_release_substream() calls snd_pcm_drop(), this doesn't
guarantee that the irq handler finishes, thus the kill_fasync() call
outside the stream spin lock may be invoked after the substream is
detached, as recently reported by KASAN.
As a quick workaround, move kill_fasync() call inside the stream
lock. The fasync is rarely used interface, so this shouldn't have a
big impact from the performance POV.
Ideally, we should implement some sync mechanism for the proper finish
of stream and irq handler. But this oneliner should suffice for most
cases, so far.
Reported-by: Baozeng Ding <sploving1@gmail.com>
Signed-off-by: Takashi Iwai <tiwai@suse.de> | 1 | void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime;
unsigned long flags;
if (PCM_RUNTIME_CHECK(substream))
return;
runtime = substream->runtime;
snd_pcm_stream_lock_irqsave(substream, flags);
if (!snd_pcm_running(substream) ||
snd_pcm_update_hw_ptr0(substream, 1) < 0)
goto _end;
#ifdef CONFIG_SND_PCM_TIMER
if (substream->timer_running)
snd_timer_interrupt(substream->timer, 1);
#endif
_end:
snd_pcm_stream_unlock_irqrestore(substream, flags);
kill_fasync(&runtime->fasync, SIGIO, POLL_IN);
}
| 161,224,995,451,594,300,000,000,000,000,000,000,000 | pcm_lib.c | 125,569,751,360,658,480,000,000,000,000,000,000,000 | [
"CWE-416"
] | CVE-2016-9794 | Race condition in the snd_pcm_period_elapsed function in sound/core/pcm_lib.c in the ALSA subsystem in the Linux kernel before 4.7 allows local users to cause a denial of service (use-after-free) or possibly have unspecified other impact via a crafted SNDRV_PCM_TRIGGER_START command. | https://nvd.nist.gov/vuln/detail/CVE-2016-9794 |
1,663 | linux | b98b0bc8c431e3ceb4b26b0dfc8db509518fb290 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/b98b0bc8c431e3ceb4b26b0dfc8db509518fb290 | net: avoid signed overflows for SO_{SND|RCV}BUFFORCE
CAP_NET_ADMIN users should not be allowed to set negative
sk_sndbuf or sk_rcvbuf values, as it can lead to various memory
corruptions, crashes, OOM...
Note that before commit 82981930125a ("net: cleanups in
sock_setsockopt()"), the bug was even more serious, since SO_SNDBUF
and SO_RCVBUF were vulnerable.
This needs to be backported to all known linux kernels.
Again, many thanks to syzkaller team for discovering this gem.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Reported-by: Andrey Konovalov <andreyknvl@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | int sock_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
int val;
int valbool;
struct linger ling;
int ret = 0;
/*
* Options without arguments
*/
if (optname == SO_BINDTODEVICE)
return sock_setbindtodevice(sk, optval, optlen);
if (optlen < sizeof(int))
return -EINVAL;
if (get_user(val, (int __user *)optval))
return -EFAULT;
valbool = val ? 1 : 0;
lock_sock(sk);
switch (optname) {
case SO_DEBUG:
if (val && !capable(CAP_NET_ADMIN))
ret = -EACCES;
else
sock_valbool_flag(sk, SOCK_DBG, valbool);
break;
case SO_REUSEADDR:
sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
break;
case SO_REUSEPORT:
sk->sk_reuseport = valbool;
break;
case SO_TYPE:
case SO_PROTOCOL:
case SO_DOMAIN:
case SO_ERROR:
ret = -ENOPROTOOPT;
break;
case SO_DONTROUTE:
sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
break;
case SO_BROADCAST:
sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
break;
case SO_SNDBUF:
/* Don't error on this BSD doesn't and if you think
* about it this is right. Otherwise apps have to
* play 'guess the biggest size' games. RCVBUF/SNDBUF
* are treated in BSD as hints
*/
val = min_t(u32, val, sysctl_wmem_max);
set_sndbuf:
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
/* Wake up sending tasks if we upped the value. */
sk->sk_write_space(sk);
break;
case SO_SNDBUFFORCE:
if (!capable(CAP_NET_ADMIN)) {
ret = -EPERM;
break;
}
goto set_sndbuf;
case SO_RCVBUF:
/* Don't error on this BSD doesn't and if you think
* about it this is right. Otherwise apps have to
* play 'guess the biggest size' games. RCVBUF/SNDBUF
* are treated in BSD as hints
*/
val = min_t(u32, val, sysctl_rmem_max);
set_rcvbuf:
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
/*
* We double it on the way in to account for
* "struct sk_buff" etc. overhead. Applications
* assume that the SO_RCVBUF setting they make will
* allow that much actual data to be received on that
* socket.
*
* Applications are unaware that "struct sk_buff" and
* other overheads allocate from the receive buffer
* during socket buffer allocation.
*
* And after considering the possible alternatives,
* returning the value we actually used in getsockopt
* is the most desirable behavior.
*/
sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
break;
case SO_RCVBUFFORCE:
if (!capable(CAP_NET_ADMIN)) {
ret = -EPERM;
break;
}
goto set_rcvbuf;
case SO_KEEPALIVE:
#ifdef CONFIG_INET
if (sk->sk_protocol == IPPROTO_TCP &&
sk->sk_type == SOCK_STREAM)
tcp_set_keepalive(sk, valbool);
#endif
sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
break;
case SO_OOBINLINE:
sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
break;
case SO_NO_CHECK:
sk->sk_no_check_tx = valbool;
break;
case SO_PRIORITY:
if ((val >= 0 && val <= 6) ||
ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
sk->sk_priority = val;
else
ret = -EPERM;
break;
case SO_LINGER:
if (optlen < sizeof(ling)) {
ret = -EINVAL; /* 1003.1g */
break;
}
if (copy_from_user(&ling, optval, sizeof(ling))) {
ret = -EFAULT;
break;
}
if (!ling.l_onoff)
sock_reset_flag(sk, SOCK_LINGER);
else {
#if (BITS_PER_LONG == 32)
if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
else
#endif
sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
sock_set_flag(sk, SOCK_LINGER);
}
break;
case SO_BSDCOMPAT:
sock_warn_obsolete_bsdism("setsockopt");
break;
case SO_PASSCRED:
if (valbool)
set_bit(SOCK_PASSCRED, &sock->flags);
else
clear_bit(SOCK_PASSCRED, &sock->flags);
break;
case SO_TIMESTAMP:
case SO_TIMESTAMPNS:
if (valbool) {
if (optname == SO_TIMESTAMP)
sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
else
sock_set_flag(sk, SOCK_RCVTSTAMPNS);
sock_set_flag(sk, SOCK_RCVTSTAMP);
sock_enable_timestamp(sk, SOCK_TIMESTAMP);
} else {
sock_reset_flag(sk, SOCK_RCVTSTAMP);
sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
}
break;
case SO_TIMESTAMPING:
if (val & ~SOF_TIMESTAMPING_MASK) {
ret = -EINVAL;
break;
}
if (val & SOF_TIMESTAMPING_OPT_ID &&
!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
if (sk->sk_protocol == IPPROTO_TCP &&
sk->sk_type == SOCK_STREAM) {
if ((1 << sk->sk_state) &
(TCPF_CLOSE | TCPF_LISTEN)) {
ret = -EINVAL;
break;
}
sk->sk_tskey = tcp_sk(sk)->snd_una;
} else {
sk->sk_tskey = 0;
}
}
sk->sk_tsflags = val;
if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
sock_enable_timestamp(sk,
SOCK_TIMESTAMPING_RX_SOFTWARE);
else
sock_disable_timestamp(sk,
(1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
break;
case SO_RCVLOWAT:
if (val < 0)
val = INT_MAX;
sk->sk_rcvlowat = val ? : 1;
break;
case SO_RCVTIMEO:
ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
break;
case SO_SNDTIMEO:
ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
break;
case SO_ATTACH_FILTER:
ret = -EINVAL;
if (optlen == sizeof(struct sock_fprog)) {
struct sock_fprog fprog;
ret = -EFAULT;
if (copy_from_user(&fprog, optval, sizeof(fprog)))
break;
ret = sk_attach_filter(&fprog, sk);
}
break;
case SO_ATTACH_BPF:
ret = -EINVAL;
if (optlen == sizeof(u32)) {
u32 ufd;
ret = -EFAULT;
if (copy_from_user(&ufd, optval, sizeof(ufd)))
break;
ret = sk_attach_bpf(ufd, sk);
}
break;
case SO_ATTACH_REUSEPORT_CBPF:
ret = -EINVAL;
if (optlen == sizeof(struct sock_fprog)) {
struct sock_fprog fprog;
ret = -EFAULT;
if (copy_from_user(&fprog, optval, sizeof(fprog)))
break;
ret = sk_reuseport_attach_filter(&fprog, sk);
}
break;
case SO_ATTACH_REUSEPORT_EBPF:
ret = -EINVAL;
if (optlen == sizeof(u32)) {
u32 ufd;
ret = -EFAULT;
if (copy_from_user(&ufd, optval, sizeof(ufd)))
break;
ret = sk_reuseport_attach_bpf(ufd, sk);
}
break;
case SO_DETACH_FILTER:
ret = sk_detach_filter(sk);
break;
case SO_LOCK_FILTER:
if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
ret = -EPERM;
else
sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
break;
case SO_PASSSEC:
if (valbool)
set_bit(SOCK_PASSSEC, &sock->flags);
else
clear_bit(SOCK_PASSSEC, &sock->flags);
break;
case SO_MARK:
if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
ret = -EPERM;
else
sk->sk_mark = val;
break;
case SO_RXQ_OVFL:
sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
break;
case SO_WIFI_STATUS:
sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
break;
case SO_PEEK_OFF:
if (sock->ops->set_peek_off)
ret = sock->ops->set_peek_off(sk, val);
else
ret = -EOPNOTSUPP;
break;
case SO_NOFCS:
sock_valbool_flag(sk, SOCK_NOFCS, valbool);
break;
case SO_SELECT_ERR_QUEUE:
sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
break;
#ifdef CONFIG_NET_RX_BUSY_POLL
case SO_BUSY_POLL:
/* allow unprivileged users to decrease the value */
if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
ret = -EPERM;
else {
if (val < 0)
ret = -EINVAL;
else
sk->sk_ll_usec = val;
}
break;
#endif
case SO_MAX_PACING_RATE:
sk->sk_max_pacing_rate = val;
sk->sk_pacing_rate = min(sk->sk_pacing_rate,
sk->sk_max_pacing_rate);
break;
case SO_INCOMING_CPU:
sk->sk_incoming_cpu = val;
break;
case SO_CNX_ADVICE:
if (val == 1)
dst_negative_advice(sk);
break;
default:
ret = -ENOPROTOOPT;
break;
}
release_sock(sk);
return ret;
}
| 66,744,804,628,888,285,000,000,000,000,000,000,000 | sock.c | 37,494,279,310,679,760,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2016-9793 | The sock_setsockopt function in net/core/sock.c in the Linux kernel before 4.8.14 mishandles negative values of sk_sndbuf and sk_rcvbuf, which allows local users to cause a denial of service (memory corruption and system crash) or possibly have unspecified other impact by leveraging the CAP_NET_ADMIN capability for a crafted setsockopt system call with the (1) SO_SNDBUFFORCE or (2) SO_RCVBUFFORCE option. | https://nvd.nist.gov/vuln/detail/CVE-2016-9793 |
1,664 | linux | 81cdb259fb6d8c1c4ecfeea389ff5a73c07f5755 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/81cdb259fb6d8c1c4ecfeea389ff5a73c07f5755 | KVM: x86: fix out-of-bounds accesses of rtc_eoi map
KVM was using arrays of size KVM_MAX_VCPUS with vcpu_id, but ID can be
bigger that the maximal number of VCPUs, resulting in out-of-bounds
access.
Found by syzkaller:
BUG: KASAN: slab-out-of-bounds in __apic_accept_irq+0xb33/0xb50 at addr [...]
Write of size 1 by task a.out/27101
CPU: 1 PID: 27101 Comm: a.out Not tainted 4.9.0-rc5+ #49
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
[...]
Call Trace:
[...] __apic_accept_irq+0xb33/0xb50 arch/x86/kvm/lapic.c:905
[...] kvm_apic_set_irq+0x10e/0x180 arch/x86/kvm/lapic.c:495
[...] kvm_irq_delivery_to_apic+0x732/0xc10 arch/x86/kvm/irq_comm.c:86
[...] ioapic_service+0x41d/0x760 arch/x86/kvm/ioapic.c:360
[...] ioapic_set_irq+0x275/0x6c0 arch/x86/kvm/ioapic.c:222
[...] kvm_ioapic_inject_all arch/x86/kvm/ioapic.c:235
[...] kvm_set_ioapic+0x223/0x310 arch/x86/kvm/ioapic.c:670
[...] kvm_vm_ioctl_set_irqchip arch/x86/kvm/x86.c:3668
[...] kvm_arch_vm_ioctl+0x1a08/0x23c0 arch/x86/kvm/x86.c:3999
[...] kvm_vm_ioctl+0x1fa/0x1a70 arch/x86/kvm/../../../virt/kvm/kvm_main.c:3099
Reported-by: Dmitry Vyukov <dvyukov@google.com>
Cc: stable@vger.kernel.org
Fixes: af1bae5497b9 ("KVM: x86: bump KVM_MAX_VCPU_ID to 1023")
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com> | 1 | static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
{
ioapic->rtc_status.pending_eoi = 0;
bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPUS);
}
| 131,623,410,041,848,010,000,000,000,000,000,000,000 | ioapic.c | 261,284,608,400,710,700,000,000,000,000,000,000,000 | [
"CWE-125"
] | CVE-2016-9777 | KVM in the Linux kernel before 4.8.12, when I/O APIC is enabled, does not properly restrict the VCPU index, which allows guest OS users to gain host OS privileges or cause a denial of service (out-of-bounds array access and host OS crash) via a crafted interrupt request, related to arch/x86/kvm/ioapic.c and arch/x86/kvm/ioapic.h. | https://nvd.nist.gov/vuln/detail/CVE-2016-9777 |
1,667 | linux | 9b57da0630c9fd36ed7a20fc0f98dc82cc0777fa | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/9b57da0630c9fd36ed7a20fc0f98dc82cc0777fa | netfilter: ipv6: nf_defrag: drop mangled skb on ream error
Dmitry Vyukov reported GPF in network stack that Andrey traced down to
negative nh offset in nf_ct_frag6_queue().
Problem is that all network headers before fragment header are pulled.
Normal ipv6 reassembly will drop the skb when errors occur further down
the line.
netfilter doesn't do this, and instead passed the original fragment
along. That was also fine back when netfilter ipv6 defrag worked with
cloned fragments, as the original, pristine fragment was passed on.
So we either have to undo the pull op, or discard such fragments.
Since they're malformed after all (e.g. overlapping fragment) it seems
preferrable to just drop them.
Same for temporary errors -- it doesn't make sense to accept (and
perhaps forward!) only some fragments of same datagram.
Fixes: 029f7f3b8701cc7ac ("netfilter: ipv6: nf_defrag: avoid/free clone operations")
Reported-by: Dmitry Vyukov <dvyukov@google.com>
Debugged-by: Andrey Konovalov <andreyknvl@google.com>
Diagnosed-by: Eric Dumazet <Eric Dumazet <edumazet@google.com>
Signed-off-by: Florian Westphal <fw@strlen.de>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> | 1 | int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
{
struct net_device *dev = skb->dev;
int fhoff, nhoff, ret;
struct frag_hdr *fhdr;
struct frag_queue *fq;
struct ipv6hdr *hdr;
u8 prevhdr;
/* Jumbo payload inhibits frag. header */
if (ipv6_hdr(skb)->payload_len == 0) {
pr_debug("payload len = 0\n");
return -EINVAL;
}
if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0)
return -EINVAL;
if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr)))
return -ENOMEM;
skb_set_transport_header(skb, fhoff);
hdr = ipv6_hdr(skb);
fhdr = (struct frag_hdr *)skb_transport_header(skb);
fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
if (fq == NULL) {
pr_debug("Can't find and can't create new queue\n");
return -ENOMEM;
}
spin_lock_bh(&fq->q.lock);
if (nf_ct_frag6_queue(fq, skb, fhdr, nhoff) < 0) {
ret = -EINVAL;
goto out_unlock;
}
/* after queue has assumed skb ownership, only 0 or -EINPROGRESS
* must be returned.
*/
ret = -EINPROGRESS;
if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
fq->q.meat == fq->q.len &&
nf_ct_frag6_reasm(fq, skb, dev))
ret = 0;
out_unlock:
spin_unlock_bh(&fq->q.lock);
inet_frag_put(&fq->q, &nf_frags);
return ret;
}
| 162,173,779,084,684,320,000,000,000,000,000,000,000 | nf_conntrack_reasm.c | 248,081,613,101,425,030,000,000,000,000,000,000,000 | [
"CWE-787"
] | CVE-2016-9755 | The netfilter subsystem in the Linux kernel before 4.9 mishandles IPv6 reassembly, which allows local users to cause a denial of service (integer overflow, out-of-bounds write, and GPF) or possibly have unspecified other impact via a crafted application that makes socket, connect, and writev system calls, related to net/ipv6/netfilter/nf_conntrack_reasm.c and net/ipv6/netfilter/nf_defrag_ipv6_hooks.c. | https://nvd.nist.gov/vuln/detail/CVE-2016-9755 |
1,668 | linux | 9b57da0630c9fd36ed7a20fc0f98dc82cc0777fa | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/9b57da0630c9fd36ed7a20fc0f98dc82cc0777fa | netfilter: ipv6: nf_defrag: drop mangled skb on ream error
Dmitry Vyukov reported GPF in network stack that Andrey traced down to
negative nh offset in nf_ct_frag6_queue().
Problem is that all network headers before fragment header are pulled.
Normal ipv6 reassembly will drop the skb when errors occur further down
the line.
netfilter doesn't do this, and instead passed the original fragment
along. That was also fine back when netfilter ipv6 defrag worked with
cloned fragments, as the original, pristine fragment was passed on.
So we either have to undo the pull op, or discard such fragments.
Since they're malformed after all (e.g. overlapping fragment) it seems
preferrable to just drop them.
Same for temporary errors -- it doesn't make sense to accept (and
perhaps forward!) only some fragments of same datagram.
Fixes: 029f7f3b8701cc7ac ("netfilter: ipv6: nf_defrag: avoid/free clone operations")
Reported-by: Dmitry Vyukov <dvyukov@google.com>
Debugged-by: Andrey Konovalov <andreyknvl@google.com>
Diagnosed-by: Eric Dumazet <Eric Dumazet <edumazet@google.com>
Signed-off-by: Florian Westphal <fw@strlen.de>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> | 1 | static unsigned int ipv6_defrag(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
int err;
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
/* Previously seen (loopback)? */
if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct))
return NF_ACCEPT;
#endif
err = nf_ct_frag6_gather(state->net, skb,
nf_ct6_defrag_user(state->hook, skb));
/* queued */
if (err == -EINPROGRESS)
return NF_STOLEN;
return NF_ACCEPT;
}
| 58,843,208,546,829,070,000,000,000,000,000,000,000 | nf_defrag_ipv6_hooks.c | 263,801,013,687,032,600,000,000,000,000,000,000,000 | [
"CWE-787"
] | CVE-2016-9755 | The netfilter subsystem in the Linux kernel before 4.9 mishandles IPv6 reassembly, which allows local users to cause a denial of service (integer overflow, out-of-bounds write, and GPF) or possibly have unspecified other impact via a crafted application that makes socket, connect, and writev system calls, related to net/ipv6/netfilter/nf_conntrack_reasm.c and net/ipv6/netfilter/nf_defrag_ipv6_hooks.c. | https://nvd.nist.gov/vuln/detail/CVE-2016-9755 |
1,675 | linux | a0ac402cfcdc904f9772e1762b3fda112dcc56a0 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/a0ac402cfcdc904f9772e1762b3fda112dcc56a0 | Don't feed anything but regular iovec's to blk_rq_map_user_iov
In theory we could map other things, but there's a reason that function
is called "user_iov". Using anything else (like splice can do) just
confuses it.
Reported-and-tested-by: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Al Viro <viro@ZenIV.linux.org.uk>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> | 1 | int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
struct rq_map_data *map_data,
const struct iov_iter *iter, gfp_t gfp_mask)
{
bool copy = false;
unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
struct bio *bio = NULL;
struct iov_iter i;
int ret;
if (map_data)
copy = true;
else if (iov_iter_alignment(iter) & align)
copy = true;
else if (queue_virt_boundary(q))
copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
i = *iter;
do {
ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
if (ret)
goto unmap_rq;
if (!bio)
bio = rq->bio;
} while (iov_iter_count(&i));
if (!bio_flagged(bio, BIO_USER_MAPPED))
rq->cmd_flags |= REQ_COPY_USER;
return 0;
unmap_rq:
__blk_rq_unmap_user(bio);
rq->bio = NULL;
return -EINVAL;
}
| 26,597,385,003,267,720,000,000,000,000,000,000,000 | blk-map.c | 40,067,109,018,686,807,000,000,000,000,000,000,000 | [
"CWE-416"
] | CVE-2016-9576 | The blk_rq_map_user_iov function in block/blk-map.c in the Linux kernel before 4.8.14 does not properly restrict the type of iterator, which allows local users to read or write to arbitrary kernel memory locations or cause a denial of service (use-after-free) by leveraging access to a /dev/sg device. | https://nvd.nist.gov/vuln/detail/CVE-2016-9576 |
1,678 | libtiff | ae9365db1b271b62b35ce018eac8799b1d5e8a53 | https://github.com/vadz/libtiff | https://github.com/vadz/libtiff/commit/ae9365db1b271b62b35ce018eac8799b1d5e8a53 | * tools/tiffcrop.c: fix out-of-bound read of up to 3 bytes in
readContigTilesIntoBuffer(). Reported as MSVR 35092 by Axel Souchet
& Vishal Chauhan from the MSRC Vulnerabilities & Mitigations team. | 1 | static int readContigTilesIntoBuffer (TIFF* in, uint8* buf,
uint32 imagelength,
uint32 imagewidth,
uint32 tw, uint32 tl,
tsample_t spp, uint16 bps)
{
int status = 1;
tsample_t sample = 0;
tsample_t count = spp;
uint32 row, col, trow;
uint32 nrow, ncol;
uint32 dst_rowsize, shift_width;
uint32 bytes_per_sample, bytes_per_pixel;
uint32 trailing_bits, prev_trailing_bits;
uint32 tile_rowsize = TIFFTileRowSize(in);
uint32 src_offset, dst_offset;
uint32 row_offset, col_offset;
uint8 *bufp = (uint8*) buf;
unsigned char *src = NULL;
unsigned char *dst = NULL;
tsize_t tbytes = 0, tile_buffsize = 0;
tsize_t tilesize = TIFFTileSize(in);
unsigned char *tilebuf = NULL;
bytes_per_sample = (bps + 7) / 8;
bytes_per_pixel = ((bps * spp) + 7) / 8;
if ((bps % 8) == 0)
shift_width = 0;
else
{
if (bytes_per_pixel < (bytes_per_sample + 1))
shift_width = bytes_per_pixel;
else
shift_width = bytes_per_sample + 1;
}
tile_buffsize = tilesize;
if (tilesize == 0 || tile_rowsize == 0)
{
TIFFError("readContigTilesIntoBuffer", "Tile size or tile rowsize is zero");
exit(-1);
}
if (tilesize < (tsize_t)(tl * tile_rowsize))
{
#ifdef DEBUG2
TIFFError("readContigTilesIntoBuffer",
"Tilesize %lu is too small, using alternate calculation %u",
tilesize, tl * tile_rowsize);
#endif
tile_buffsize = tl * tile_rowsize;
if (tl != (tile_buffsize / tile_rowsize))
{
TIFFError("readContigTilesIntoBuffer", "Integer overflow when calculating buffer size.");
exit(-1);
}
}
tilebuf = _TIFFmalloc(tile_buffsize);
if (tilebuf == 0)
return 0;
dst_rowsize = ((imagewidth * bps * spp) + 7) / 8;
for (row = 0; row < imagelength; row += tl)
{
nrow = (row + tl > imagelength) ? imagelength - row : tl;
for (col = 0; col < imagewidth; col += tw)
{
tbytes = TIFFReadTile(in, tilebuf, col, row, 0, 0);
if (tbytes < tilesize && !ignore)
{
TIFFError(TIFFFileName(in),
"Error, can't read tile at row %lu col %lu, Read %lu bytes of %lu",
(unsigned long) col, (unsigned long) row, (unsigned long)tbytes,
(unsigned long)tilesize);
status = 0;
_TIFFfree(tilebuf);
return status;
}
row_offset = row * dst_rowsize;
col_offset = ((col * bps * spp) + 7)/ 8;
bufp = buf + row_offset + col_offset;
if (col + tw > imagewidth)
ncol = imagewidth - col;
else
ncol = tw;
/* Each tile scanline will start on a byte boundary but it
* has to be merged into the scanline for the entire
* image buffer and the previous segment may not have
* ended on a byte boundary
*/
/* Optimization for common bit depths, all samples */
if (((bps % 8) == 0) && (count == spp))
{
for (trow = 0; trow < nrow; trow++)
{
src_offset = trow * tile_rowsize;
_TIFFmemcpy (bufp, tilebuf + src_offset, (ncol * spp * bps) / 8);
bufp += (imagewidth * bps * spp) / 8;
}
}
else
{
/* Bit depths not a multiple of 8 and/or extract fewer than spp samples */
prev_trailing_bits = trailing_bits = 0;
trailing_bits = (ncol * bps * spp) % 8;
/* for (trow = 0; tl < nrow; trow++) */
for (trow = 0; trow < nrow; trow++)
{
src_offset = trow * tile_rowsize;
src = tilebuf + src_offset;
dst_offset = (row + trow) * dst_rowsize;
dst = buf + dst_offset + col_offset;
switch (shift_width)
{
case 0: if (extractContigSamplesBytes (src, dst, ncol, sample,
spp, bps, count, 0, ncol))
{
TIFFError("readContigTilesIntoBuffer",
"Unable to extract row %d from tile %lu",
row, (unsigned long)TIFFCurrentTile(in));
return 1;
}
break;
case 1: if (bps == 1)
{
if (extractContigSamplesShifted8bits (src, dst, ncol,
sample, spp,
bps, count,
0, ncol,
prev_trailing_bits))
{
TIFFError("readContigTilesIntoBuffer",
"Unable to extract row %d from tile %lu",
row, (unsigned long)TIFFCurrentTile(in));
return 1;
}
break;
}
else
if (extractContigSamplesShifted16bits (src, dst, ncol,
sample, spp,
bps, count,
0, ncol,
prev_trailing_bits))
{
TIFFError("readContigTilesIntoBuffer",
"Unable to extract row %d from tile %lu",
row, (unsigned long)TIFFCurrentTile(in));
return 1;
}
break;
case 2: if (extractContigSamplesShifted24bits (src, dst, ncol,
sample, spp,
bps, count,
0, ncol,
prev_trailing_bits))
{
TIFFError("readContigTilesIntoBuffer",
"Unable to extract row %d from tile %lu",
row, (unsigned long)TIFFCurrentTile(in));
return 1;
}
break;
case 3:
case 4:
case 5: if (extractContigSamplesShifted32bits (src, dst, ncol,
sample, spp,
bps, count,
0, ncol,
prev_trailing_bits))
{
TIFFError("readContigTilesIntoBuffer",
"Unable to extract row %d from tile %lu",
row, (unsigned long)TIFFCurrentTile(in));
return 1;
}
break;
default: TIFFError("readContigTilesIntoBuffer", "Unsupported bit depth %d", bps);
return 1;
}
}
prev_trailing_bits += trailing_bits;
/* if (prev_trailing_bits > 7) */
/* prev_trailing_bits-= 8; */
}
}
}
_TIFFfree(tilebuf);
return status;
}
| 246,301,533,722,000,940,000,000,000,000,000,000,000 | None | null | [
"CWE-125"
] | CVE-2016-9539 | tools/tiffcrop.c in libtiff 4.0.6 has an out-of-bounds read in readContigTilesIntoBuffer(). Reported as MSVR 35092. | https://nvd.nist.gov/vuln/detail/CVE-2016-9539 |
1,680 | libtiff | 43c0b81a818640429317c80fea1e66771e85024b | https://github.com/vadz/libtiff | https://github.com/vadz/libtiff/commit/43c0b81a818640429317c80fea1e66771e85024b#diff-c8b4b355f9b5c06d585b23138e1c185f | * tools/tiffcp.c: fix read of undefined variable in case of missing
required tags. Found on test case of MSVR 35100.
* tools/tiffcrop.c: fix read of undefined buffer in
readContigStripsIntoBuffer() due to uint16 overflow. Probably not a
security issue but I can be wrong. Reported as MSVR 35100 by Axel
Souchet from the MSRC Vulnerabilities & Mitigations team. | 1 | static int readContigStripsIntoBuffer (TIFF* in, uint8* buf)
{
uint8* bufp = buf;
int32 bytes_read = 0;
uint16 strip, nstrips = TIFFNumberOfStrips(in);
uint32 stripsize = TIFFStripSize(in);
uint32 rows = 0;
uint32 rps = TIFFGetFieldDefaulted(in, TIFFTAG_ROWSPERSTRIP, &rps);
tsize_t scanline_size = TIFFScanlineSize(in);
if (scanline_size == 0) {
TIFFError("", "TIFF scanline size is zero!");
return 0;
}
for (strip = 0; strip < nstrips; strip++) {
bytes_read = TIFFReadEncodedStrip (in, strip, bufp, -1);
rows = bytes_read / scanline_size;
if ((strip < (nstrips - 1)) && (bytes_read != (int32)stripsize))
TIFFError("", "Strip %d: read %lu bytes, strip size %lu",
(int)strip + 1, (unsigned long) bytes_read,
(unsigned long)stripsize);
if (bytes_read < 0 && !ignore) {
TIFFError("", "Error reading strip %lu after %lu rows",
(unsigned long) strip, (unsigned long)rows);
return 0;
}
bufp += bytes_read;
}
return 1;
} /* end readContigStripsIntoBuffer */
| 271,787,766,249,009,200,000,000,000,000,000,000,000 | None | null | [
"CWE-190"
] | CVE-2016-9538 | tools/tiffcrop.c in libtiff 4.0.6 reads an undefined buffer in readContigStripsIntoBuffer() because of a uint16 integer overflow. Reported as MSVR 35100. | https://nvd.nist.gov/vuln/detail/CVE-2016-9538 |
1,704 | w3m | d43527cfa0dbb3ccefec4a6f7b32c1434739aa29 | https://github.com/tats/w3m | https://github.com/tats/w3m/commit/d43527cfa0dbb3ccefec4a6f7b32c1434739aa29 | Merge pull request #27 from kcwu/fix-strgrow
Fix potential heap buffer corruption due to Strgrow | 1 | Strgrow(Str x)
{
char *old = x->ptr;
int newlen;
newlen = x->length * 6 / 5;
if (newlen == x->length)
newlen += 2;
x->ptr = GC_MALLOC_ATOMIC(newlen);
x->area_size = newlen;
bcopy((void *)old, (void *)x->ptr, x->length);
GC_free(old);
}
| 127,289,135,144,358,380,000,000,000,000,000,000,000 | None | null | [
"CWE-119"
] | CVE-2016-9442 | An issue was discovered in the Tatsuya Kinoshita w3m fork before 0.5.3-31. w3m allows remote attackers to cause memory corruption in certain conditions via a crafted HTML page. | https://nvd.nist.gov/vuln/detail/CVE-2016-9442 |
1,706 | linux | 93362fa47fe98b62e4a34ab408c4a418432e7939 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/93362fa47fe98b62e4a34ab408c4a418432e7939 | sysctl: Drop reference added by grab_header in proc_sys_readdir
Fixes CVE-2016-9191, proc_sys_readdir doesn't drop reference
added by grab_header when return from !dir_emit_dots path.
It can cause any path called unregister_sysctl_table will
wait forever.
The calltrace of CVE-2016-9191:
[ 5535.960522] Call Trace:
[ 5535.963265] [<ffffffff817cdaaf>] schedule+0x3f/0xa0
[ 5535.968817] [<ffffffff817d33fb>] schedule_timeout+0x3db/0x6f0
[ 5535.975346] [<ffffffff817cf055>] ? wait_for_completion+0x45/0x130
[ 5535.982256] [<ffffffff817cf0d3>] wait_for_completion+0xc3/0x130
[ 5535.988972] [<ffffffff810d1fd0>] ? wake_up_q+0x80/0x80
[ 5535.994804] [<ffffffff8130de64>] drop_sysctl_table+0xc4/0xe0
[ 5536.001227] [<ffffffff8130de17>] drop_sysctl_table+0x77/0xe0
[ 5536.007648] [<ffffffff8130decd>] unregister_sysctl_table+0x4d/0xa0
[ 5536.014654] [<ffffffff8130deff>] unregister_sysctl_table+0x7f/0xa0
[ 5536.021657] [<ffffffff810f57f5>] unregister_sched_domain_sysctl+0x15/0x40
[ 5536.029344] [<ffffffff810d7704>] partition_sched_domains+0x44/0x450
[ 5536.036447] [<ffffffff817d0761>] ? __mutex_unlock_slowpath+0x111/0x1f0
[ 5536.043844] [<ffffffff81167684>] rebuild_sched_domains_locked+0x64/0xb0
[ 5536.051336] [<ffffffff8116789d>] update_flag+0x11d/0x210
[ 5536.057373] [<ffffffff817cf61f>] ? mutex_lock_nested+0x2df/0x450
[ 5536.064186] [<ffffffff81167acb>] ? cpuset_css_offline+0x1b/0x60
[ 5536.070899] [<ffffffff810fce3d>] ? trace_hardirqs_on+0xd/0x10
[ 5536.077420] [<ffffffff817cf61f>] ? mutex_lock_nested+0x2df/0x450
[ 5536.084234] [<ffffffff8115a9f5>] ? css_killed_work_fn+0x25/0x220
[ 5536.091049] [<ffffffff81167ae5>] cpuset_css_offline+0x35/0x60
[ 5536.097571] [<ffffffff8115aa2c>] css_killed_work_fn+0x5c/0x220
[ 5536.104207] [<ffffffff810bc83f>] process_one_work+0x1df/0x710
[ 5536.110736] [<ffffffff810bc7c0>] ? process_one_work+0x160/0x710
[ 5536.117461] [<ffffffff810bce9b>] worker_thread+0x12b/0x4a0
[ 5536.123697] [<ffffffff810bcd70>] ? process_one_work+0x710/0x710
[ 5536.130426] [<ffffffff810c3f7e>] kthread+0xfe/0x120
[ 5536.135991] [<ffffffff817d4baf>] ret_from_fork+0x1f/0x40
[ 5536.142041] [<ffffffff810c3e80>] ? kthread_create_on_node+0x230/0x230
One cgroup maintainer mentioned that "cgroup is trying to offline
a cpuset css, which takes place under cgroup_mutex. The offlining
ends up trying to drain active usages of a sysctl table which apprently
is not happening."
The real reason is that proc_sys_readdir doesn't drop reference added
by grab_header when return from !dir_emit_dots path. So this cpuset
offline path will wait here forever.
See here for details: http://www.openwall.com/lists/oss-security/2016/11/04/13
Fixes: f0c3b5093add ("[readdir] convert procfs")
Cc: stable@vger.kernel.org
Reported-by: CAI Qian <caiqian@redhat.com>
Tested-by: Yang Shukui <yangshukui@huawei.com>
Signed-off-by: Zhou Chengming <zhouchengming1@huawei.com>
Acked-by: Al Viro <viro@ZenIV.linux.org.uk>
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> | 1 | static int proc_sys_readdir(struct file *file, struct dir_context *ctx)
{
struct ctl_table_header *head = grab_header(file_inode(file));
struct ctl_table_header *h = NULL;
struct ctl_table *entry;
struct ctl_dir *ctl_dir;
unsigned long pos;
if (IS_ERR(head))
return PTR_ERR(head);
ctl_dir = container_of(head, struct ctl_dir, header);
if (!dir_emit_dots(file, ctx))
return 0;
pos = 2;
for (first_entry(ctl_dir, &h, &entry); h; next_entry(&h, &entry)) {
if (!scan(h, entry, &pos, file, ctx)) {
sysctl_head_finish(h);
break;
}
}
sysctl_head_finish(head);
return 0;
}
| 225,602,089,609,046,580,000,000,000,000,000,000,000 | proc_sysctl.c | 174,833,638,602,164,460,000,000,000,000,000,000,000 | [
"CWE-20"
] | CVE-2016-9191 | The cgroup offline implementation in the Linux kernel through 4.8.11 mishandles certain drain operations, which allows local users to cause a denial of service (system hang) by leveraging access to a container environment for executing a crafted application, as demonstrated by trinity. | https://nvd.nist.gov/vuln/detail/CVE-2016-9191 |
1,710 | linux | 9590232bb4f4cc824f3425a6e1349afbe6d6d2b7 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/9590232bb4f4cc824f3425a6e1349afbe6d6d2b7 | staging/android/ion : fix a race condition in the ion driver
There is a use-after-free problem in the ion driver.
This is caused by a race condition in the ion_ioctl()
function.
A handle has ref count of 1 and two tasks on different
cpus calls ION_IOC_FREE simultaneously.
cpu 0 cpu 1
-------------------------------------------------------
ion_handle_get_by_id()
(ref == 2)
ion_handle_get_by_id()
(ref == 3)
ion_free()
(ref == 2)
ion_handle_put()
(ref == 1)
ion_free()
(ref == 0 so ion_handle_destroy() is
called
and the handle is freed.)
ion_handle_put() is called and it
decreases the slub's next free pointer
The problem is detected as an unaligned access in the
spin lock functions since it uses load exclusive
instruction. In some cases it corrupts the slub's
free pointer which causes a mis-aligned access to the
next free pointer.(kmalloc returns a pointer like
ffffc0745b4580aa). And it causes lots of other
hard-to-debug problems.
This symptom is caused since the first member in the
ion_handle structure is the reference count and the
ion driver decrements the reference after it has been
freed.
To fix this problem client->lock mutex is extended
to protect all the codes that uses the handle.
Signed-off-by: Eun Taik Lee <eun.taik.lee@samsung.com>
Reviewed-by: Laura Abbott <labbott@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 1 | static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct ion_client *client = filp->private_data;
struct ion_device *dev = client->dev;
struct ion_handle *cleanup_handle = NULL;
int ret = 0;
unsigned int dir;
union {
struct ion_fd_data fd;
struct ion_allocation_data allocation;
struct ion_handle_data handle;
struct ion_custom_data custom;
} data;
dir = ion_ioctl_dir(cmd);
if (_IOC_SIZE(cmd) > sizeof(data))
return -EINVAL;
if (dir & _IOC_WRITE)
if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
return -EFAULT;
switch (cmd) {
case ION_IOC_ALLOC:
{
struct ion_handle *handle;
handle = ion_alloc(client, data.allocation.len,
data.allocation.align,
data.allocation.heap_id_mask,
data.allocation.flags);
if (IS_ERR(handle))
return PTR_ERR(handle);
data.allocation.handle = handle->id;
cleanup_handle = handle;
break;
}
case ION_IOC_FREE:
{
struct ion_handle *handle;
handle = ion_handle_get_by_id(client, data.handle.handle);
if (IS_ERR(handle))
return PTR_ERR(handle);
ion_free(client, handle);
ion_handle_put(handle);
break;
}
case ION_IOC_SHARE:
case ION_IOC_MAP:
{
struct ion_handle *handle;
handle = ion_handle_get_by_id(client, data.handle.handle);
if (IS_ERR(handle))
return PTR_ERR(handle);
data.fd.fd = ion_share_dma_buf_fd(client, handle);
ion_handle_put(handle);
if (data.fd.fd < 0)
ret = data.fd.fd;
break;
}
case ION_IOC_IMPORT:
{
struct ion_handle *handle;
handle = ion_import_dma_buf_fd(client, data.fd.fd);
if (IS_ERR(handle))
ret = PTR_ERR(handle);
else
data.handle.handle = handle->id;
break;
}
case ION_IOC_SYNC:
{
ret = ion_sync_for_device(client, data.fd.fd);
break;
}
case ION_IOC_CUSTOM:
{
if (!dev->custom_ioctl)
return -ENOTTY;
ret = dev->custom_ioctl(client, data.custom.cmd,
data.custom.arg);
break;
}
default:
return -ENOTTY;
}
if (dir & _IOC_READ) {
if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
if (cleanup_handle)
ion_free(client, cleanup_handle);
return -EFAULT;
}
}
return ret;
}
| 33,118,785,611,393,110,000,000,000,000,000,000,000 | None | null | [
"CWE-416"
] | CVE-2016-9120 | Race condition in the ion_ioctl function in drivers/staging/android/ion/ion.c in the Linux kernel before 4.6 allows local users to gain privileges or cause a denial of service (use-after-free) by calling ION_IOC_FREE on two CPUs at the same time. | https://nvd.nist.gov/vuln/detail/CVE-2016-9120 |
1,711 | linux | 05692d7005a364add85c6e25a6c4447ce08f913a | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/05692d7005a364add85c6e25a6c4447ce08f913a | vfio/pci: Fix integer overflows, bitmask check
The VFIO_DEVICE_SET_IRQS ioctl did not sufficiently sanitize
user-supplied integers, potentially allowing memory corruption. This
patch adds appropriate integer overflow checks, checks the range bounds
for VFIO_IRQ_SET_DATA_NONE, and also verifies that only single element
in the VFIO_IRQ_SET_DATA_TYPE_MASK bitmask is set.
VFIO_IRQ_SET_ACTION_TYPE_MASK is already correctly checked later in
vfio_pci_set_irqs_ioctl().
Furthermore, a kzalloc is changed to a kcalloc because the use of a
kzalloc with an integer multiplication allowed an integer overflow
condition to be reached without this patch. kcalloc checks for overflow
and should prevent a similar occurrence.
Signed-off-by: Vlad Tsyrklevich <vlad@tsyrklevich.net>
Signed-off-by: Alex Williamson <alex.williamson@redhat.com> | 1 | static long vfio_pci_ioctl(void *device_data,
unsigned int cmd, unsigned long arg)
{
struct vfio_pci_device *vdev = device_data;
unsigned long minsz;
if (cmd == VFIO_DEVICE_GET_INFO) {
struct vfio_device_info info;
minsz = offsetofend(struct vfio_device_info, num_irqs);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
info.flags = VFIO_DEVICE_FLAGS_PCI;
if (vdev->reset_works)
info.flags |= VFIO_DEVICE_FLAGS_RESET;
info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
info.num_irqs = VFIO_PCI_NUM_IRQS;
return copy_to_user((void __user *)arg, &info, minsz) ?
-EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
struct pci_dev *pdev = vdev->pdev;
struct vfio_region_info info;
struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
int i, ret;
minsz = offsetofend(struct vfio_region_info, offset);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
switch (info.index) {
case VFIO_PCI_CONFIG_REGION_INDEX:
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.size = pdev->cfg_size;
info.flags = VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE;
break;
case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.size = pci_resource_len(pdev, info.index);
if (!info.size) {
info.flags = 0;
break;
}
info.flags = VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE;
if (vdev->bar_mmap_supported[info.index]) {
info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
if (info.index == vdev->msix_bar) {
ret = msix_sparse_mmap_cap(vdev, &caps);
if (ret)
return ret;
}
}
break;
case VFIO_PCI_ROM_REGION_INDEX:
{
void __iomem *io;
size_t size;
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.flags = 0;
/* Report the BAR size, not the ROM size */
info.size = pci_resource_len(pdev, info.index);
if (!info.size) {
/* Shadow ROMs appear as PCI option ROMs */
if (pdev->resource[PCI_ROM_RESOURCE].flags &
IORESOURCE_ROM_SHADOW)
info.size = 0x20000;
else
break;
}
/* Is it really there? */
io = pci_map_rom(pdev, &size);
if (!io || !size) {
info.size = 0;
break;
}
pci_unmap_rom(pdev, io);
info.flags = VFIO_REGION_INFO_FLAG_READ;
break;
}
case VFIO_PCI_VGA_REGION_INDEX:
if (!vdev->has_vga)
return -EINVAL;
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.size = 0xc0000;
info.flags = VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE;
break;
default:
if (info.index >=
VFIO_PCI_NUM_REGIONS + vdev->num_regions)
return -EINVAL;
i = info.index - VFIO_PCI_NUM_REGIONS;
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.size = vdev->region[i].size;
info.flags = vdev->region[i].flags;
ret = region_type_cap(vdev, &caps,
vdev->region[i].type,
vdev->region[i].subtype);
if (ret)
return ret;
}
if (caps.size) {
info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
if (info.argsz < sizeof(info) + caps.size) {
info.argsz = sizeof(info) + caps.size;
info.cap_offset = 0;
} else {
vfio_info_cap_shift(&caps, sizeof(info));
if (copy_to_user((void __user *)arg +
sizeof(info), caps.buf,
caps.size)) {
kfree(caps.buf);
return -EFAULT;
}
info.cap_offset = sizeof(info);
}
kfree(caps.buf);
}
return copy_to_user((void __user *)arg, &info, minsz) ?
-EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
struct vfio_irq_info info;
minsz = offsetofend(struct vfio_irq_info, count);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
return -EINVAL;
switch (info.index) {
case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
case VFIO_PCI_REQ_IRQ_INDEX:
break;
case VFIO_PCI_ERR_IRQ_INDEX:
if (pci_is_pcie(vdev->pdev))
break;
/* pass thru to return error */
default:
return -EINVAL;
}
info.flags = VFIO_IRQ_INFO_EVENTFD;
info.count = vfio_pci_get_irq_count(vdev, info.index);
if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
info.flags |= (VFIO_IRQ_INFO_MASKABLE |
VFIO_IRQ_INFO_AUTOMASKED);
else
info.flags |= VFIO_IRQ_INFO_NORESIZE;
return copy_to_user((void __user *)arg, &info, minsz) ?
-EFAULT : 0;
} else if (cmd == VFIO_DEVICE_SET_IRQS) {
struct vfio_irq_set hdr;
u8 *data = NULL;
int ret = 0;
minsz = offsetofend(struct vfio_irq_set, count);
if (copy_from_user(&hdr, (void __user *)arg, minsz))
return -EFAULT;
if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS ||
hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
VFIO_IRQ_SET_ACTION_TYPE_MASK))
return -EINVAL;
if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
size_t size;
int max = vfio_pci_get_irq_count(vdev, hdr.index);
if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
size = sizeof(uint8_t);
else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
size = sizeof(int32_t);
else
return -EINVAL;
if (hdr.argsz - minsz < hdr.count * size ||
hdr.start >= max || hdr.start + hdr.count > max)
return -EINVAL;
data = memdup_user((void __user *)(arg + minsz),
hdr.count * size);
if (IS_ERR(data))
return PTR_ERR(data);
}
mutex_lock(&vdev->igate);
ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
hdr.start, hdr.count, data);
mutex_unlock(&vdev->igate);
kfree(data);
return ret;
} else if (cmd == VFIO_DEVICE_RESET) {
return vdev->reset_works ?
pci_try_reset_function(vdev->pdev) : -EINVAL;
} else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
struct vfio_pci_hot_reset_info hdr;
struct vfio_pci_fill_info fill = { 0 };
struct vfio_pci_dependent_device *devices = NULL;
bool slot = false;
int ret = 0;
minsz = offsetofend(struct vfio_pci_hot_reset_info, count);
if (copy_from_user(&hdr, (void __user *)arg, minsz))
return -EFAULT;
if (hdr.argsz < minsz)
return -EINVAL;
hdr.flags = 0;
/* Can we do a slot or bus reset or neither? */
if (!pci_probe_reset_slot(vdev->pdev->slot))
slot = true;
else if (pci_probe_reset_bus(vdev->pdev->bus))
return -ENODEV;
/* How many devices are affected? */
ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
vfio_pci_count_devs,
&fill.max, slot);
if (ret)
return ret;
WARN_ON(!fill.max); /* Should always be at least one */
/*
* If there's enough space, fill it now, otherwise return
* -ENOSPC and the number of devices affected.
*/
if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
ret = -ENOSPC;
hdr.count = fill.max;
goto reset_info_exit;
}
devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
if (!devices)
return -ENOMEM;
fill.devices = devices;
ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
vfio_pci_fill_devs,
&fill, slot);
/*
* If a device was removed between counting and filling,
* we may come up short of fill.max. If a device was
* added, we'll have a return of -EAGAIN above.
*/
if (!ret)
hdr.count = fill.cur;
reset_info_exit:
if (copy_to_user((void __user *)arg, &hdr, minsz))
ret = -EFAULT;
if (!ret) {
if (copy_to_user((void __user *)(arg + minsz), devices,
hdr.count * sizeof(*devices)))
ret = -EFAULT;
}
kfree(devices);
return ret;
} else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) {
struct vfio_pci_hot_reset hdr;
int32_t *group_fds;
struct vfio_pci_group_entry *groups;
struct vfio_pci_group_info info;
bool slot = false;
int i, count = 0, ret = 0;
minsz = offsetofend(struct vfio_pci_hot_reset, count);
if (copy_from_user(&hdr, (void __user *)arg, minsz))
return -EFAULT;
if (hdr.argsz < minsz || hdr.flags)
return -EINVAL;
/* Can we do a slot or bus reset or neither? */
if (!pci_probe_reset_slot(vdev->pdev->slot))
slot = true;
else if (pci_probe_reset_bus(vdev->pdev->bus))
return -ENODEV;
/*
* We can't let userspace give us an arbitrarily large
* buffer to copy, so verify how many we think there
* could be. Note groups can have multiple devices so
* one group per device is the max.
*/
ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
vfio_pci_count_devs,
&count, slot);
if (ret)
return ret;
/* Somewhere between 1 and count is OK */
if (!hdr.count || hdr.count > count)
return -EINVAL;
group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
groups = kcalloc(hdr.count, sizeof(*groups), GFP_KERNEL);
if (!group_fds || !groups) {
kfree(group_fds);
kfree(groups);
return -ENOMEM;
}
if (copy_from_user(group_fds, (void __user *)(arg + minsz),
hdr.count * sizeof(*group_fds))) {
kfree(group_fds);
kfree(groups);
return -EFAULT;
}
/*
* For each group_fd, get the group through the vfio external
* user interface and store the group and iommu ID. This
* ensures the group is held across the reset.
*/
for (i = 0; i < hdr.count; i++) {
struct vfio_group *group;
struct fd f = fdget(group_fds[i]);
if (!f.file) {
ret = -EBADF;
break;
}
group = vfio_group_get_external_user(f.file);
fdput(f);
if (IS_ERR(group)) {
ret = PTR_ERR(group);
break;
}
groups[i].group = group;
groups[i].id = vfio_external_user_iommu_id(group);
}
kfree(group_fds);
/* release reference to groups on error */
if (ret)
goto hot_reset_release;
info.count = hdr.count;
info.groups = groups;
/*
* Test whether all the affected devices are contained
* by the set of groups provided by the user.
*/
ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
vfio_pci_validate_devs,
&info, slot);
if (!ret)
/* User has access, do the reset */
ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
pci_try_reset_bus(vdev->pdev->bus);
hot_reset_release:
for (i--; i >= 0; i--)
vfio_group_put_external_user(groups[i].group);
kfree(groups);
return ret;
}
return -ENOTTY;
}
| 14,003,092,725,819,900,000,000,000,000,000,000,000 | vfio_pci.c | 50,249,264,138,078,830,000,000,000,000,000,000,000 | [
"CWE-190"
] | CVE-2016-9083 | drivers/vfio/pci/vfio_pci.c in the Linux kernel through 4.8.11 allows local users to bypass integer overflow checks, and cause a denial of service (memory corruption) or have unspecified other impact, by leveraging access to a vfio PCI device file for a VFIO_DEVICE_SET_IRQS ioctl call, aka a "state machine confusion bug." | https://nvd.nist.gov/vuln/detail/CVE-2016-9083 |
1,712 | openssh-portable | ec165c392ca54317dbe3064a8c200de6531e89ad | https://github.com/openssh/openssh-portable | https://github.com/openssh/openssh-portable/commit/ec165c392ca54317dbe3064a8c200de6531e89ad | upstream commit
Unregister the KEXINIT handler after message has been
received. Otherwise an unauthenticated peer can repeat the KEXINIT and cause
allocation of up to 128MB -- until the connection is closed. Reported by
shilei-c at 360.cn
Upstream-ID: 43649ae12a27ef94290db16d1a98294588b75c05 | 1 | kex_input_kexinit(int type, u_int32_t seq, void *ctxt)
{
struct ssh *ssh = ctxt;
struct kex *kex = ssh->kex;
const u_char *ptr;
u_int i;
size_t dlen;
int r;
debug("SSH2_MSG_KEXINIT received");
if (kex == NULL)
return SSH_ERR_INVALID_ARGUMENT;
ptr = sshpkt_ptr(ssh, &dlen);
if ((r = sshbuf_put(kex->peer, ptr, dlen)) != 0)
return r;
/* discard packet */
for (i = 0; i < KEX_COOKIE_LEN; i++)
if ((r = sshpkt_get_u8(ssh, NULL)) != 0)
return r;
for (i = 0; i < PROPOSAL_MAX; i++)
if ((r = sshpkt_get_string(ssh, NULL, NULL)) != 0)
return r;
/*
* XXX RFC4253 sec 7: "each side MAY guess" - currently no supported
* KEX method has the server move first, but a server might be using
* a custom method or one that we otherwise don't support. We should
* be prepared to remember first_kex_follows here so we can eat a
* packet later.
* XXX2 - RFC4253 is kind of ambiguous on what first_kex_follows means
* for cases where the server *doesn't* go first. I guess we should
* ignore it when it is set for these cases, which is what we do now.
*/
if ((r = sshpkt_get_u8(ssh, NULL)) != 0 || /* first_kex_follows */
(r = sshpkt_get_u32(ssh, NULL)) != 0 || /* reserved */
(r = sshpkt_get_end(ssh)) != 0)
return r;
if (!(kex->flags & KEX_INIT_SENT))
if ((r = kex_send_kexinit(ssh)) != 0)
return r;
if ((r = kex_choose_conf(ssh)) != 0)
return r;
if (kex->kex_type < KEX_MAX && kex->kex[kex->kex_type] != NULL)
return (kex->kex[kex->kex_type])(ssh);
return SSH_ERR_INTERNAL_ERROR;
}
| 339,023,894,075,827,400,000,000,000,000,000,000,000 | kex.c | 228,822,789,731,076,200,000,000,000,000,000,000,000 | [
"CWE-399"
] | CVE-2016-8858 | The kex_input_kexinit function in kex.c in OpenSSH 6.x and 7.x through 7.3 allows remote attackers to cause a denial of service (memory consumption) by sending many duplicate KEXINIT requests. NOTE: a third party reports that "OpenSSH upstream does not consider this as a security issue." | https://nvd.nist.gov/vuln/detail/CVE-2016-8858 |
1,715 | linux | ded89912156b1a47d940a0c954c43afbabd0c42c | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/ded89912156b1a47d940a0c954c43afbabd0c42c | brcmfmac: avoid potential stack overflow in brcmf_cfg80211_start_ap()
User-space can choose to omit NL80211_ATTR_SSID and only provide raw
IE TLV data. When doing so it can provide SSID IE with length exceeding
the allowed size. The driver further processes this IE copying it
into a local variable without checking the length. Hence stack can be
corrupted and used as exploit.
Cc: stable@vger.kernel.org # v4.7
Reported-by: Daxing Guo <freener.gdx@gmail.com>
Reviewed-by: Hante Meuleman <hante.meuleman@broadcom.com>
Reviewed-by: Pieter-Paul Giesberts <pieter-paul.giesberts@broadcom.com>
Reviewed-by: Franky Lin <franky.lin@broadcom.com>
Signed-off-by: Arend van Spriel <arend.vanspriel@broadcom.com>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org> | 1 | brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_ap_settings *settings)
{
s32 ie_offset;
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
const struct brcmf_tlv *ssid_ie;
const struct brcmf_tlv *country_ie;
struct brcmf_ssid_le ssid_le;
s32 err = -EPERM;
const struct brcmf_tlv *rsn_ie;
const struct brcmf_vs_tlv *wpa_ie;
struct brcmf_join_params join_params;
enum nl80211_iftype dev_role;
struct brcmf_fil_bss_enable_le bss_enable;
u16 chanspec = chandef_to_chanspec(&cfg->d11inf, &settings->chandef);
bool mbss;
int is_11d;
brcmf_dbg(TRACE, "ctrlchn=%d, center=%d, bw=%d, beacon_interval=%d, dtim_period=%d,\n",
settings->chandef.chan->hw_value,
settings->chandef.center_freq1, settings->chandef.width,
settings->beacon_interval, settings->dtim_period);
brcmf_dbg(TRACE, "ssid=%s(%zu), auth_type=%d, inactivity_timeout=%d\n",
settings->ssid, settings->ssid_len, settings->auth_type,
settings->inactivity_timeout);
dev_role = ifp->vif->wdev.iftype;
mbss = ifp->vif->mbss;
/* store current 11d setting */
brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_REGULATORY, &ifp->vif->is_11d);
country_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
settings->beacon.tail_len,
WLAN_EID_COUNTRY);
is_11d = country_ie ? 1 : 0;
memset(&ssid_le, 0, sizeof(ssid_le));
if (settings->ssid == NULL || settings->ssid_len == 0) {
ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
ssid_ie = brcmf_parse_tlvs(
(u8 *)&settings->beacon.head[ie_offset],
settings->beacon.head_len - ie_offset,
WLAN_EID_SSID);
if (!ssid_ie)
return -EINVAL;
memcpy(ssid_le.SSID, ssid_ie->data, ssid_ie->len);
ssid_le.SSID_len = cpu_to_le32(ssid_ie->len);
brcmf_dbg(TRACE, "SSID is (%s) in Head\n", ssid_le.SSID);
} else {
memcpy(ssid_le.SSID, settings->ssid, settings->ssid_len);
ssid_le.SSID_len = cpu_to_le32((u32)settings->ssid_len);
}
if (!mbss) {
brcmf_set_mpc(ifp, 0);
brcmf_configure_arp_nd_offload(ifp, false);
}
/* find the RSN_IE */
rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
settings->beacon.tail_len, WLAN_EID_RSN);
/* find the WPA_IE */
wpa_ie = brcmf_find_wpaie((u8 *)settings->beacon.tail,
settings->beacon.tail_len);
if ((wpa_ie != NULL || rsn_ie != NULL)) {
brcmf_dbg(TRACE, "WPA(2) IE is found\n");
if (wpa_ie != NULL) {
/* WPA IE */
err = brcmf_configure_wpaie(ifp, wpa_ie, false);
if (err < 0)
goto exit;
} else {
struct brcmf_vs_tlv *tmp_ie;
tmp_ie = (struct brcmf_vs_tlv *)rsn_ie;
/* RSN IE */
err = brcmf_configure_wpaie(ifp, tmp_ie, true);
if (err < 0)
goto exit;
}
} else {
brcmf_dbg(TRACE, "No WPA(2) IEs found\n");
brcmf_configure_opensecurity(ifp);
}
brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon);
/* Parameters shared by all radio interfaces */
if (!mbss) {
if (is_11d != ifp->vif->is_11d) {
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY,
is_11d);
if (err < 0) {
brcmf_err("Regulatory Set Error, %d\n", err);
goto exit;
}
}
if (settings->beacon_interval) {
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD,
settings->beacon_interval);
if (err < 0) {
brcmf_err("Beacon Interval Set Error, %d\n",
err);
goto exit;
}
}
if (settings->dtim_period) {
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_DTIMPRD,
settings->dtim_period);
if (err < 0) {
brcmf_err("DTIM Interval Set Error, %d\n", err);
goto exit;
}
}
if ((dev_role == NL80211_IFTYPE_AP) &&
((ifp->ifidx == 0) ||
!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_RSDB))) {
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
if (err < 0) {
brcmf_err("BRCMF_C_DOWN error %d\n", err);
goto exit;
}
brcmf_fil_iovar_int_set(ifp, "apsta", 0);
}
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 1);
if (err < 0) {
brcmf_err("SET INFRA error %d\n", err);
goto exit;
}
} else if (WARN_ON(is_11d != ifp->vif->is_11d)) {
/* Multiple-BSS should use same 11d configuration */
err = -EINVAL;
goto exit;
}
/* Interface specific setup */
if (dev_role == NL80211_IFTYPE_AP) {
if ((brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) && (!mbss))
brcmf_fil_iovar_int_set(ifp, "mbss", 1);
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 1);
if (err < 0) {
brcmf_err("setting AP mode failed %d\n", err);
goto exit;
}
if (!mbss) {
/* Firmware 10.x requires setting channel after enabling
* AP and before bringing interface up.
*/
err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
if (err < 0) {
brcmf_err("Set Channel failed: chspec=%d, %d\n",
chanspec, err);
goto exit;
}
}
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1);
if (err < 0) {
brcmf_err("BRCMF_C_UP error (%d)\n", err);
goto exit;
}
/* On DOWN the firmware removes the WEP keys, reconfigure
* them if they were set.
*/
brcmf_cfg80211_reconfigure_wep(ifp);
memset(&join_params, 0, sizeof(join_params));
/* join parameters starts with ssid */
memcpy(&join_params.ssid_le, &ssid_le, sizeof(ssid_le));
/* create softap */
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
&join_params, sizeof(join_params));
if (err < 0) {
brcmf_err("SET SSID error (%d)\n", err);
goto exit;
}
if (settings->hidden_ssid) {
err = brcmf_fil_iovar_int_set(ifp, "closednet", 1);
if (err) {
brcmf_err("closednet error (%d)\n", err);
goto exit;
}
}
brcmf_dbg(TRACE, "AP mode configuration complete\n");
} else if (dev_role == NL80211_IFTYPE_P2P_GO) {
err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
if (err < 0) {
brcmf_err("Set Channel failed: chspec=%d, %d\n",
chanspec, err);
goto exit;
}
err = brcmf_fil_bsscfg_data_set(ifp, "ssid", &ssid_le,
sizeof(ssid_le));
if (err < 0) {
brcmf_err("setting ssid failed %d\n", err);
goto exit;
}
bss_enable.bsscfgidx = cpu_to_le32(ifp->bsscfgidx);
bss_enable.enable = cpu_to_le32(1);
err = brcmf_fil_iovar_data_set(ifp, "bss", &bss_enable,
sizeof(bss_enable));
if (err < 0) {
brcmf_err("bss_enable config failed %d\n", err);
goto exit;
}
brcmf_dbg(TRACE, "GO mode configuration complete\n");
} else {
WARN_ON(1);
}
set_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
brcmf_net_setcarrier(ifp, true);
exit:
if ((err) && (!mbss)) {
brcmf_set_mpc(ifp, 1);
brcmf_configure_arp_nd_offload(ifp, true);
}
return err;
}
| 107,596,959,318,646,240,000,000,000,000,000,000,000 | cfg80211.c | 239,830,303,826,032,670,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2016-8658 | Stack-based buffer overflow in the brcmf_cfg80211_start_ap function in drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c in the Linux kernel before 4.7.5 allows local users to cause a denial of service (system crash) or possibly have unspecified other impact via a long SSID Information Element in a command to a Netlink socket. | https://nvd.nist.gov/vuln/detail/CVE-2016-8658 |
1,716 | linux | 84ac7260236a49c79eede91617700174c2c19b0c | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/84ac7260236a49c79eede91617700174c2c19b0c | packet: fix race condition in packet_set_ring
When packet_set_ring creates a ring buffer it will initialize a
struct timer_list if the packet version is TPACKET_V3. This value
can then be raced by a different thread calling setsockopt to
set the version to TPACKET_V1 before packet_set_ring has finished.
This leads to a use-after-free on a function pointer in the
struct timer_list when the socket is closed as the previously
initialized timer will not be deleted.
The bug is fixed by taking lock_sock(sk) in packet_setsockopt when
changing the packet version while also taking the lock at the start
of packet_set_ring.
Fixes: f6fb8f100b80 ("af-packet: TPACKET_V3 flexible buffer implementation.")
Signed-off-by: Philip Pettersson <philip.pettersson@gmail.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
int closing, int tx_ring)
{
struct pgv *pg_vec = NULL;
struct packet_sock *po = pkt_sk(sk);
int was_running, order = 0;
struct packet_ring_buffer *rb;
struct sk_buff_head *rb_queue;
__be16 num;
int err = -EINVAL;
/* Added to avoid minimal code churn */
struct tpacket_req *req = &req_u->req;
/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
net_warn_ratelimited("Tx-ring is not supported.\n");
goto out;
}
rb = tx_ring ? &po->tx_ring : &po->rx_ring;
rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
err = -EBUSY;
if (!closing) {
if (atomic_read(&po->mapped))
goto out;
if (packet_read_pending(rb))
goto out;
}
if (req->tp_block_nr) {
/* Sanity tests and some calculations */
err = -EBUSY;
if (unlikely(rb->pg_vec))
goto out;
switch (po->tp_version) {
case TPACKET_V1:
po->tp_hdrlen = TPACKET_HDRLEN;
break;
case TPACKET_V2:
po->tp_hdrlen = TPACKET2_HDRLEN;
break;
case TPACKET_V3:
po->tp_hdrlen = TPACKET3_HDRLEN;
break;
}
err = -EINVAL;
if (unlikely((int)req->tp_block_size <= 0))
goto out;
if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
goto out;
if (po->tp_version >= TPACKET_V3 &&
(int)(req->tp_block_size -
BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
goto out;
if (unlikely(req->tp_frame_size < po->tp_hdrlen +
po->tp_reserve))
goto out;
if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
goto out;
rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
if (unlikely(rb->frames_per_block == 0))
goto out;
if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
req->tp_frame_nr))
goto out;
err = -ENOMEM;
order = get_order(req->tp_block_size);
pg_vec = alloc_pg_vec(req, order);
if (unlikely(!pg_vec))
goto out;
switch (po->tp_version) {
case TPACKET_V3:
/* Transmit path is not supported. We checked
* it above but just being paranoid
*/
if (!tx_ring)
init_prb_bdqc(po, rb, pg_vec, req_u);
break;
default:
break;
}
}
/* Done */
else {
err = -EINVAL;
if (unlikely(req->tp_frame_nr))
goto out;
}
lock_sock(sk);
/* Detach socket from network */
spin_lock(&po->bind_lock);
was_running = po->running;
num = po->num;
if (was_running) {
po->num = 0;
__unregister_prot_hook(sk, false);
}
spin_unlock(&po->bind_lock);
synchronize_net();
err = -EBUSY;
mutex_lock(&po->pg_vec_lock);
if (closing || atomic_read(&po->mapped) == 0) {
err = 0;
spin_lock_bh(&rb_queue->lock);
swap(rb->pg_vec, pg_vec);
rb->frame_max = (req->tp_frame_nr - 1);
rb->head = 0;
rb->frame_size = req->tp_frame_size;
spin_unlock_bh(&rb_queue->lock);
swap(rb->pg_vec_order, order);
swap(rb->pg_vec_len, req->tp_block_nr);
rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
po->prot_hook.func = (po->rx_ring.pg_vec) ?
tpacket_rcv : packet_rcv;
skb_queue_purge(rb_queue);
if (atomic_read(&po->mapped))
pr_err("packet_mmap: vma is busy: %d\n",
atomic_read(&po->mapped));
}
mutex_unlock(&po->pg_vec_lock);
spin_lock(&po->bind_lock);
if (was_running) {
po->num = num;
register_prot_hook(sk);
}
spin_unlock(&po->bind_lock);
if (closing && (po->tp_version > TPACKET_V2)) {
/* Because we don't support block-based V3 on tx-ring */
if (!tx_ring)
prb_shutdown_retire_blk_timer(po, rb_queue);
}
release_sock(sk);
if (pg_vec)
free_pg_vec(pg_vec, order, req->tp_block_nr);
out:
return err;
}
| 180,651,518,307,462,070,000,000,000,000,000,000,000 | af_packet.c | 4,675,655,767,715,536,000,000,000,000,000,000,000 | [
"CWE-416"
] | CVE-2016-8655 | Race condition in net/packet/af_packet.c in the Linux kernel through 4.8.12 allows local users to gain privileges or cause a denial of service (use-after-free) by leveraging the CAP_NET_RAW capability to change a socket version, related to the packet_set_ring and packet_setsockopt functions. | https://nvd.nist.gov/vuln/detail/CVE-2016-8655 |
1,717 | linux | 84ac7260236a49c79eede91617700174c2c19b0c | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/84ac7260236a49c79eede91617700174c2c19b0c | packet: fix race condition in packet_set_ring
When packet_set_ring creates a ring buffer it will initialize a
struct timer_list if the packet version is TPACKET_V3. This value
can then be raced by a different thread calling setsockopt to
set the version to TPACKET_V1 before packet_set_ring has finished.
This leads to a use-after-free on a function pointer in the
struct timer_list when the socket is closed as the previously
initialized timer will not be deleted.
The bug is fixed by taking lock_sock(sk) in packet_setsockopt when
changing the packet version while also taking the lock at the start
of packet_set_ring.
Fixes: f6fb8f100b80 ("af-packet: TPACKET_V3 flexible buffer implementation.")
Signed-off-by: Philip Pettersson <philip.pettersson@gmail.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
int ret;
if (level != SOL_PACKET)
return -ENOPROTOOPT;
switch (optname) {
case PACKET_ADD_MEMBERSHIP:
case PACKET_DROP_MEMBERSHIP:
{
struct packet_mreq_max mreq;
int len = optlen;
memset(&mreq, 0, sizeof(mreq));
if (len < sizeof(struct packet_mreq))
return -EINVAL;
if (len > sizeof(mreq))
len = sizeof(mreq);
if (copy_from_user(&mreq, optval, len))
return -EFAULT;
if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
return -EINVAL;
if (optname == PACKET_ADD_MEMBERSHIP)
ret = packet_mc_add(sk, &mreq);
else
ret = packet_mc_drop(sk, &mreq);
return ret;
}
case PACKET_RX_RING:
case PACKET_TX_RING:
{
union tpacket_req_u req_u;
int len;
switch (po->tp_version) {
case TPACKET_V1:
case TPACKET_V2:
len = sizeof(req_u.req);
break;
case TPACKET_V3:
default:
len = sizeof(req_u.req3);
break;
}
if (optlen < len)
return -EINVAL;
if (copy_from_user(&req_u.req, optval, len))
return -EFAULT;
return packet_set_ring(sk, &req_u, 0,
optname == PACKET_TX_RING);
}
case PACKET_COPY_THRESH:
{
int val;
if (optlen != sizeof(val))
return -EINVAL;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
pkt_sk(sk)->copy_thresh = val;
return 0;
}
case PACKET_VERSION:
{
int val;
if (optlen != sizeof(val))
return -EINVAL;
if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
return -EBUSY;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
switch (val) {
case TPACKET_V1:
case TPACKET_V2:
case TPACKET_V3:
po->tp_version = val;
return 0;
default:
return -EINVAL;
}
}
case PACKET_RESERVE:
{
unsigned int val;
if (optlen != sizeof(val))
return -EINVAL;
if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
return -EBUSY;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
po->tp_reserve = val;
return 0;
}
case PACKET_LOSS:
{
unsigned int val;
if (optlen != sizeof(val))
return -EINVAL;
if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
return -EBUSY;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
po->tp_loss = !!val;
return 0;
}
case PACKET_AUXDATA:
{
int val;
if (optlen < sizeof(val))
return -EINVAL;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
po->auxdata = !!val;
return 0;
}
case PACKET_ORIGDEV:
{
int val;
if (optlen < sizeof(val))
return -EINVAL;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
po->origdev = !!val;
return 0;
}
case PACKET_VNET_HDR:
{
int val;
if (sock->type != SOCK_RAW)
return -EINVAL;
if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
return -EBUSY;
if (optlen < sizeof(val))
return -EINVAL;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
po->has_vnet_hdr = !!val;
return 0;
}
case PACKET_TIMESTAMP:
{
int val;
if (optlen != sizeof(val))
return -EINVAL;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
po->tp_tstamp = val;
return 0;
}
case PACKET_FANOUT:
{
int val;
if (optlen != sizeof(val))
return -EINVAL;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
return fanout_add(sk, val & 0xffff, val >> 16);
}
case PACKET_FANOUT_DATA:
{
if (!po->fanout)
return -EINVAL;
return fanout_set_data(po, optval, optlen);
}
case PACKET_TX_HAS_OFF:
{
unsigned int val;
if (optlen != sizeof(val))
return -EINVAL;
if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
return -EBUSY;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
po->tp_tx_has_off = !!val;
return 0;
}
case PACKET_QDISC_BYPASS:
{
int val;
if (optlen != sizeof(val))
return -EINVAL;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
return 0;
}
default:
return -ENOPROTOOPT;
}
}
| 235,008,048,746,926,100,000,000,000,000,000,000,000 | af_packet.c | 4,675,655,767,715,536,000,000,000,000,000,000,000 | [
"CWE-416"
] | CVE-2016-8655 | Race condition in net/packet/af_packet.c in the Linux kernel through 4.8.12 allows local users to gain privileges or cause a denial of service (use-after-free) by leveraging the CAP_NET_RAW capability to change a socket version, related to the packet_set_ring and packet_setsockopt functions. | https://nvd.nist.gov/vuln/detail/CVE-2016-8655 |
1,718 | linux | 4afa5f9617927453ac04b24b584f6c718dfb4f45 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/4afa5f9617927453ac04b24b584f6c718dfb4f45 | crypto: algif_hash - Only export and import on sockets with data
The hash_accept call fails to work on sockets that have not received
any data. For some algorithm implementations it may cause crashes.
This patch fixes this by ensuring that we only export and import on
sockets that have received data.
Cc: stable@vger.kernel.org
Reported-by: Harsh Jain <harshjain.prof@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Tested-by: Stephan Mueller <smueller@chronox.de> | 1 | static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
struct ahash_request *req = &ctx->req;
char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req))];
struct sock *sk2;
struct alg_sock *ask2;
struct hash_ctx *ctx2;
int err;
err = crypto_ahash_export(req, state);
if (err)
return err;
err = af_alg_accept(ask->parent, newsock);
if (err)
return err;
sk2 = newsock->sk;
ask2 = alg_sk(sk2);
ctx2 = ask2->private;
ctx2->more = 1;
err = crypto_ahash_import(&ctx2->req, state);
if (err) {
sock_orphan(sk2);
sock_put(sk2);
}
return err;
}
| 855,501,647,450,820,300,000,000,000,000,000,000 | algif_hash.c | 274,659,569,534,280,800,000,000,000,000,000,000,000 | [
"CWE-476"
] | CVE-2016-8646 | The hash_accept function in crypto/algif_hash.c in the Linux kernel before 4.3.6 allows local users to cause a denial of service (OOPS) by attempting to trigger use of in-kernel hash algorithms for a socket that has received zero bytes of data. | https://nvd.nist.gov/vuln/detail/CVE-2016-8646 |
1,719 | linux | ac6e780070e30e4c35bd395acfe9191e6268bdd3 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/ac6e780070e30e4c35bd395acfe9191e6268bdd3 | tcp: take care of truncations done by sk_filter()
With syzkaller help, Marco Grassi found a bug in TCP stack,
crashing in tcp_collapse()
Root cause is that sk_filter() can truncate the incoming skb,
but TCP stack was not really expecting this to happen.
It probably was expecting a simple DROP or ACCEPT behavior.
We first need to make sure no part of TCP header could be removed.
Then we need to adjust TCP_SKB_CB(skb)->end_seq
Many thanks to syzkaller team and Marco for giving us a reproducer.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Reported-by: Marco Grassi <marco.gra@gmail.com>
Reported-by: Vladis Dronov <vdronov@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | int tcp_v4_rcv(struct sk_buff *skb)
{
struct net *net = dev_net(skb->dev);
const struct iphdr *iph;
const struct tcphdr *th;
bool refcounted;
struct sock *sk;
int ret;
if (skb->pkt_type != PACKET_HOST)
goto discard_it;
/* Count it even if it's bad */
__TCP_INC_STATS(net, TCP_MIB_INSEGS);
if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
goto discard_it;
th = (const struct tcphdr *)skb->data;
if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
goto bad_packet;
if (!pskb_may_pull(skb, th->doff * 4))
goto discard_it;
/* An explanation is required here, I think.
* Packet length and doff are validated by header prediction,
* provided case of th->doff==0 is eliminated.
* So, we defer the checks. */
if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
goto csum_error;
th = (const struct tcphdr *)skb->data;
iph = ip_hdr(skb);
/* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
* barrier() makes sure compiler wont play fool^Waliasing games.
*/
memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
sizeof(struct inet_skb_parm));
barrier();
TCP_SKB_CB(skb)->seq = ntohl(th->seq);
TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
skb->len - th->doff * 4);
TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
TCP_SKB_CB(skb)->tcp_tw_isn = 0;
TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
TCP_SKB_CB(skb)->sacked = 0;
lookup:
sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
th->dest, &refcounted);
if (!sk)
goto no_tcp_socket;
process:
if (sk->sk_state == TCP_TIME_WAIT)
goto do_time_wait;
if (sk->sk_state == TCP_NEW_SYN_RECV) {
struct request_sock *req = inet_reqsk(sk);
struct sock *nsk;
sk = req->rsk_listener;
if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
sk_drops_add(sk, skb);
reqsk_put(req);
goto discard_it;
}
if (unlikely(sk->sk_state != TCP_LISTEN)) {
inet_csk_reqsk_queue_drop_and_put(sk, req);
goto lookup;
}
/* We own a reference on the listener, increase it again
* as we might lose it too soon.
*/
sock_hold(sk);
refcounted = true;
nsk = tcp_check_req(sk, skb, req, false);
if (!nsk) {
reqsk_put(req);
goto discard_and_relse;
}
if (nsk == sk) {
reqsk_put(req);
} else if (tcp_child_process(sk, nsk, skb)) {
tcp_v4_send_reset(nsk, skb);
goto discard_and_relse;
} else {
sock_put(sk);
return 0;
}
}
if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
goto discard_and_relse;
}
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;
if (tcp_v4_inbound_md5_hash(sk, skb))
goto discard_and_relse;
nf_reset(skb);
if (sk_filter(sk, skb))
goto discard_and_relse;
skb->dev = NULL;
if (sk->sk_state == TCP_LISTEN) {
ret = tcp_v4_do_rcv(sk, skb);
goto put_and_return;
}
sk_incoming_cpu_update(sk);
bh_lock_sock_nested(sk);
tcp_segs_in(tcp_sk(sk), skb);
ret = 0;
if (!sock_owned_by_user(sk)) {
if (!tcp_prequeue(sk, skb))
ret = tcp_v4_do_rcv(sk, skb);
} else if (tcp_add_backlog(sk, skb)) {
goto discard_and_relse;
}
bh_unlock_sock(sk);
put_and_return:
if (refcounted)
sock_put(sk);
return ret;
no_tcp_socket:
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard_it;
if (tcp_checksum_complete(skb)) {
csum_error:
__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
bad_packet:
__TCP_INC_STATS(net, TCP_MIB_INERRS);
} else {
tcp_v4_send_reset(NULL, skb);
}
discard_it:
/* Discard frame. */
kfree_skb(skb);
return 0;
discard_and_relse:
sk_drops_add(sk, skb);
if (refcounted)
sock_put(sk);
goto discard_it;
do_time_wait:
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
inet_twsk_put(inet_twsk(sk));
goto discard_it;
}
if (tcp_checksum_complete(skb)) {
inet_twsk_put(inet_twsk(sk));
goto csum_error;
}
switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
case TCP_TW_SYN: {
struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
&tcp_hashinfo, skb,
__tcp_hdrlen(th),
iph->saddr, th->source,
iph->daddr, th->dest,
inet_iif(skb));
if (sk2) {
inet_twsk_deschedule_put(inet_twsk(sk));
sk = sk2;
refcounted = false;
goto process;
}
/* Fall through to ACK */
}
case TCP_TW_ACK:
tcp_v4_timewait_ack(sk, skb);
break;
case TCP_TW_RST:
tcp_v4_send_reset(sk, skb);
inet_twsk_deschedule_put(inet_twsk(sk));
goto discard_it;
case TCP_TW_SUCCESS:;
}
goto discard_it;
}
| 284,014,189,874,385,800,000,000,000,000,000,000,000 | tcp_ipv4.c | 172,691,742,001,660,820,000,000,000,000,000,000,000 | [
"CWE-284"
] | CVE-2016-8645 | The TCP stack in the Linux kernel before 4.8.10 mishandles skb truncation, which allows local users to cause a denial of service (system crash) via a crafted application that makes sendto system calls, related to net/ipv4/tcp_ipv4.c and net/ipv6/tcp_ipv6.c. | https://nvd.nist.gov/vuln/detail/CVE-2016-8645 |
1,724 | linux | d9092f52d7e61dd1557f2db2400ddb430e85937e | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/d9092f52d7e61dd1557f2db2400ddb430e85937e | kvm: x86: Check memopp before dereference (CVE-2016-8630)
Commit 41061cdb98 ("KVM: emulate: do not initialize memopp") removes a
check for non-NULL under incorrect assumptions. An undefined instruction
with a ModR/M byte with Mod=0 and R/M-5 (e.g. 0xc7 0x15) will attempt
to dereference a null pointer here.
Fixes: 41061cdb98a0bec464278b4db8e894a3121671f5
Message-Id: <1477592752-126650-2-git-send-email-osh@google.com>
Signed-off-by: Owen Hofmann <osh@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | 1 | int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
{
int rc = X86EMUL_CONTINUE;
int mode = ctxt->mode;
int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
bool op_prefix = false;
bool has_seg_override = false;
struct opcode opcode;
ctxt->memop.type = OP_NONE;
ctxt->memopp = NULL;
ctxt->_eip = ctxt->eip;
ctxt->fetch.ptr = ctxt->fetch.data;
ctxt->fetch.end = ctxt->fetch.data + insn_len;
ctxt->opcode_len = 1;
if (insn_len > 0)
memcpy(ctxt->fetch.data, insn, insn_len);
else {
rc = __do_insn_fetch_bytes(ctxt, 1);
if (rc != X86EMUL_CONTINUE)
return rc;
}
switch (mode) {
case X86EMUL_MODE_REAL:
case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16:
def_op_bytes = def_ad_bytes = 2;
break;
case X86EMUL_MODE_PROT32:
def_op_bytes = def_ad_bytes = 4;
break;
#ifdef CONFIG_X86_64
case X86EMUL_MODE_PROT64:
def_op_bytes = 4;
def_ad_bytes = 8;
break;
#endif
default:
return EMULATION_FAILED;
}
ctxt->op_bytes = def_op_bytes;
ctxt->ad_bytes = def_ad_bytes;
/* Legacy prefixes. */
for (;;) {
switch (ctxt->b = insn_fetch(u8, ctxt)) {
case 0x66: /* operand-size override */
op_prefix = true;
/* switch between 2/4 bytes */
ctxt->op_bytes = def_op_bytes ^ 6;
break;
case 0x67: /* address-size override */
if (mode == X86EMUL_MODE_PROT64)
/* switch between 4/8 bytes */
ctxt->ad_bytes = def_ad_bytes ^ 12;
else
/* switch between 2/4 bytes */
ctxt->ad_bytes = def_ad_bytes ^ 6;
break;
case 0x26: /* ES override */
case 0x2e: /* CS override */
case 0x36: /* SS override */
case 0x3e: /* DS override */
has_seg_override = true;
ctxt->seg_override = (ctxt->b >> 3) & 3;
break;
case 0x64: /* FS override */
case 0x65: /* GS override */
has_seg_override = true;
ctxt->seg_override = ctxt->b & 7;
break;
case 0x40 ... 0x4f: /* REX */
if (mode != X86EMUL_MODE_PROT64)
goto done_prefixes;
ctxt->rex_prefix = ctxt->b;
continue;
case 0xf0: /* LOCK */
ctxt->lock_prefix = 1;
break;
case 0xf2: /* REPNE/REPNZ */
case 0xf3: /* REP/REPE/REPZ */
ctxt->rep_prefix = ctxt->b;
break;
default:
goto done_prefixes;
}
/* Any legacy prefix after a REX prefix nullifies its effect. */
ctxt->rex_prefix = 0;
}
done_prefixes:
/* REX prefix. */
if (ctxt->rex_prefix & 8)
ctxt->op_bytes = 8; /* REX.W */
/* Opcode byte(s). */
opcode = opcode_table[ctxt->b];
/* Two-byte opcode? */
if (ctxt->b == 0x0f) {
ctxt->opcode_len = 2;
ctxt->b = insn_fetch(u8, ctxt);
opcode = twobyte_table[ctxt->b];
/* 0F_38 opcode map */
if (ctxt->b == 0x38) {
ctxt->opcode_len = 3;
ctxt->b = insn_fetch(u8, ctxt);
opcode = opcode_map_0f_38[ctxt->b];
}
}
ctxt->d = opcode.flags;
if (ctxt->d & ModRM)
ctxt->modrm = insn_fetch(u8, ctxt);
/* vex-prefix instructions are not implemented */
if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
(mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
ctxt->d = NotImpl;
}
while (ctxt->d & GroupMask) {
switch (ctxt->d & GroupMask) {
case Group:
goffset = (ctxt->modrm >> 3) & 7;
opcode = opcode.u.group[goffset];
break;
case GroupDual:
goffset = (ctxt->modrm >> 3) & 7;
if ((ctxt->modrm >> 6) == 3)
opcode = opcode.u.gdual->mod3[goffset];
else
opcode = opcode.u.gdual->mod012[goffset];
break;
case RMExt:
goffset = ctxt->modrm & 7;
opcode = opcode.u.group[goffset];
break;
case Prefix:
if (ctxt->rep_prefix && op_prefix)
return EMULATION_FAILED;
simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
switch (simd_prefix) {
case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
}
break;
case Escape:
if (ctxt->modrm > 0xbf)
opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
else
opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
break;
case InstrDual:
if ((ctxt->modrm >> 6) == 3)
opcode = opcode.u.idual->mod3;
else
opcode = opcode.u.idual->mod012;
break;
case ModeDual:
if (ctxt->mode == X86EMUL_MODE_PROT64)
opcode = opcode.u.mdual->mode64;
else
opcode = opcode.u.mdual->mode32;
break;
default:
return EMULATION_FAILED;
}
ctxt->d &= ~(u64)GroupMask;
ctxt->d |= opcode.flags;
}
/* Unrecognised? */
if (ctxt->d == 0)
return EMULATION_FAILED;
ctxt->execute = opcode.u.execute;
if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
return EMULATION_FAILED;
if (unlikely(ctxt->d &
(NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
No16))) {
/*
* These are copied unconditionally here, and checked unconditionally
* in x86_emulate_insn.
*/
ctxt->check_perm = opcode.check_perm;
ctxt->intercept = opcode.intercept;
if (ctxt->d & NotImpl)
return EMULATION_FAILED;
if (mode == X86EMUL_MODE_PROT64) {
if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
ctxt->op_bytes = 8;
else if (ctxt->d & NearBranch)
ctxt->op_bytes = 8;
}
if (ctxt->d & Op3264) {
if (mode == X86EMUL_MODE_PROT64)
ctxt->op_bytes = 8;
else
ctxt->op_bytes = 4;
}
if ((ctxt->d & No16) && ctxt->op_bytes == 2)
ctxt->op_bytes = 4;
if (ctxt->d & Sse)
ctxt->op_bytes = 16;
else if (ctxt->d & Mmx)
ctxt->op_bytes = 8;
}
/* ModRM and SIB bytes. */
if (ctxt->d & ModRM) {
rc = decode_modrm(ctxt, &ctxt->memop);
if (!has_seg_override) {
has_seg_override = true;
ctxt->seg_override = ctxt->modrm_seg;
}
} else if (ctxt->d & MemAbs)
rc = decode_abs(ctxt, &ctxt->memop);
if (rc != X86EMUL_CONTINUE)
goto done;
if (!has_seg_override)
ctxt->seg_override = VCPU_SREG_DS;
ctxt->memop.addr.mem.seg = ctxt->seg_override;
/*
* Decode and fetch the source operand: register, memory
* or immediate.
*/
rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
if (rc != X86EMUL_CONTINUE)
goto done;
/*
* Decode and fetch the second source operand: register, memory
* or immediate.
*/
rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
if (rc != X86EMUL_CONTINUE)
goto done;
/* Decode and fetch the destination operand: register or memory. */
rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
if (ctxt->rip_relative)
ctxt->memopp->addr.mem.ea = address_mask(ctxt,
ctxt->memopp->addr.mem.ea + ctxt->_eip);
done:
return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
}
| 204,947,235,715,738,400,000,000,000,000,000,000,000 | emulate.c | 229,450,093,523,585,650,000,000,000,000,000,000,000 | [
"CWE-284"
] | CVE-2016-8630 | The x86_decode_insn function in arch/x86/kvm/emulate.c in the Linux kernel before 4.8.7, when KVM is enabled, allows local users to cause a denial of service (host OS crash) via a certain use of a ModR/M byte in an undefined instruction. | https://nvd.nist.gov/vuln/detail/CVE-2016-8630 |
1,725 | linux | c58d6c93680f28ac58984af61d0a7ebf4319c241 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/c58d6c93680f28ac58984af61d0a7ebf4319c241 | netfilter: nfnetlink: correctly validate length of batch messages
If nlh->nlmsg_len is zero then an infinite loop is triggered because
'skb_pull(skb, msglen);' pulls zero bytes.
The calculation in nlmsg_len() underflows if 'nlh->nlmsg_len <
NLMSG_HDRLEN' which bypasses the length validation and will later
trigger an out-of-bound read.
If the length validation does fail then the malformed batch message is
copied back to userspace. However, we cannot do this because the
nlh->nlmsg_len can be invalid. This leads to an out-of-bounds read in
netlink_ack:
[ 41.455421] ==================================================================
[ 41.456431] BUG: KASAN: slab-out-of-bounds in memcpy+0x1d/0x40 at addr ffff880119e79340
[ 41.456431] Read of size 4294967280 by task a.out/987
[ 41.456431] =============================================================================
[ 41.456431] BUG kmalloc-512 (Not tainted): kasan: bad access detected
[ 41.456431] -----------------------------------------------------------------------------
...
[ 41.456431] Bytes b4 ffff880119e79310: 00 00 00 00 d5 03 00 00 b0 fb fe ff 00 00 00 00 ................
[ 41.456431] Object ffff880119e79320: 20 00 00 00 10 00 05 00 00 00 00 00 00 00 00 00 ...............
[ 41.456431] Object ffff880119e79330: 14 00 0a 00 01 03 fc 40 45 56 11 22 33 10 00 05 .......@EV."3...
[ 41.456431] Object ffff880119e79340: f0 ff ff ff 88 99 aa bb 00 14 00 0a 00 06 fe fb ................
^^ start of batch nlmsg with
nlmsg_len=4294967280
...
[ 41.456431] Memory state around the buggy address:
[ 41.456431] ffff880119e79400: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
[ 41.456431] ffff880119e79480: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
[ 41.456431] >ffff880119e79500: 00 00 00 00 fc fc fc fc fc fc fc fc fc fc fc fc
[ 41.456431] ^
[ 41.456431] ffff880119e79580: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
[ 41.456431] ffff880119e79600: fc fc fc fc fc fc fc fc fc fc fb fb fb fb fb fb
[ 41.456431] ==================================================================
Fix this with better validation of nlh->nlmsg_len and by setting
NFNL_BATCH_FAILURE if any batch message fails length validation.
CAP_NET_ADMIN is required to trigger the bugs.
Fixes: 9ea2aa8b7dba ("netfilter: nfnetlink: validate nfnetlink header from batch")
Signed-off-by: Phil Turnbull <phil.turnbull@oracle.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> | 1 | static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
u_int16_t subsys_id)
{
struct sk_buff *oskb = skb;
struct net *net = sock_net(skb->sk);
const struct nfnetlink_subsystem *ss;
const struct nfnl_callback *nc;
static LIST_HEAD(err_list);
u32 status;
int err;
if (subsys_id >= NFNL_SUBSYS_COUNT)
return netlink_ack(skb, nlh, -EINVAL);
replay:
status = 0;
skb = netlink_skb_clone(oskb, GFP_KERNEL);
if (!skb)
return netlink_ack(oskb, nlh, -ENOMEM);
nfnl_lock(subsys_id);
ss = nfnl_dereference_protected(subsys_id);
if (!ss) {
#ifdef CONFIG_MODULES
nfnl_unlock(subsys_id);
request_module("nfnetlink-subsys-%d", subsys_id);
nfnl_lock(subsys_id);
ss = nfnl_dereference_protected(subsys_id);
if (!ss)
#endif
{
nfnl_unlock(subsys_id);
netlink_ack(oskb, nlh, -EOPNOTSUPP);
return kfree_skb(skb);
}
}
if (!ss->commit || !ss->abort) {
nfnl_unlock(subsys_id);
netlink_ack(oskb, nlh, -EOPNOTSUPP);
return kfree_skb(skb);
}
while (skb->len >= nlmsg_total_size(0)) {
int msglen, type;
nlh = nlmsg_hdr(skb);
err = 0;
if (nlmsg_len(nlh) < sizeof(struct nfgenmsg) ||
skb->len < nlh->nlmsg_len) {
err = -EINVAL;
goto ack;
}
/* Only requests are handled by the kernel */
if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) {
err = -EINVAL;
goto ack;
}
type = nlh->nlmsg_type;
if (type == NFNL_MSG_BATCH_BEGIN) {
/* Malformed: Batch begin twice */
nfnl_err_reset(&err_list);
status |= NFNL_BATCH_FAILURE;
goto done;
} else if (type == NFNL_MSG_BATCH_END) {
status |= NFNL_BATCH_DONE;
goto done;
} else if (type < NLMSG_MIN_TYPE) {
err = -EINVAL;
goto ack;
}
/* We only accept a batch with messages for the same
* subsystem.
*/
if (NFNL_SUBSYS_ID(type) != subsys_id) {
err = -EINVAL;
goto ack;
}
nc = nfnetlink_find_client(type, ss);
if (!nc) {
err = -EINVAL;
goto ack;
}
{
int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
u_int8_t cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type);
struct nlattr *cda[ss->cb[cb_id].attr_count + 1];
struct nlattr *attr = (void *)nlh + min_len;
int attrlen = nlh->nlmsg_len - min_len;
err = nla_parse(cda, ss->cb[cb_id].attr_count,
attr, attrlen, ss->cb[cb_id].policy);
if (err < 0)
goto ack;
if (nc->call_batch) {
err = nc->call_batch(net, net->nfnl, skb, nlh,
(const struct nlattr **)cda);
}
/* The lock was released to autoload some module, we
* have to abort and start from scratch using the
* original skb.
*/
if (err == -EAGAIN) {
status |= NFNL_BATCH_REPLAY;
goto next;
}
}
ack:
if (nlh->nlmsg_flags & NLM_F_ACK || err) {
/* Errors are delivered once the full batch has been
* processed, this avoids that the same error is
* reported several times when replaying the batch.
*/
if (nfnl_err_add(&err_list, nlh, err) < 0) {
/* We failed to enqueue an error, reset the
* list of errors and send OOM to userspace
* pointing to the batch header.
*/
nfnl_err_reset(&err_list);
netlink_ack(oskb, nlmsg_hdr(oskb), -ENOMEM);
status |= NFNL_BATCH_FAILURE;
goto done;
}
/* We don't stop processing the batch on errors, thus,
* userspace gets all the errors that the batch
* triggers.
*/
if (err)
status |= NFNL_BATCH_FAILURE;
}
next:
msglen = NLMSG_ALIGN(nlh->nlmsg_len);
if (msglen > skb->len)
msglen = skb->len;
skb_pull(skb, msglen);
}
done:
if (status & NFNL_BATCH_REPLAY) {
ss->abort(net, oskb);
nfnl_err_reset(&err_list);
nfnl_unlock(subsys_id);
kfree_skb(skb);
goto replay;
} else if (status == NFNL_BATCH_DONE) {
ss->commit(net, oskb);
} else {
ss->abort(net, oskb);
}
nfnl_err_deliver(&err_list, oskb);
nfnl_unlock(subsys_id);
kfree_skb(skb);
}
| 284,681,399,279,689,300,000,000,000,000,000,000,000 | nfnetlink.c | 266,157,007,856,312,750,000,000,000,000,000,000,000 | [
"CWE-125"
] | CVE-2016-7917 | The nfnetlink_rcv_batch function in net/netfilter/nfnetlink.c in the Linux kernel before 4.5 does not check whether a batch message's length field is large enough, which allows local users to obtain sensitive information from kernel memory or cause a denial of service (infinite loop or out-of-bounds read) by leveraging the CAP_NET_ADMIN capability. | https://nvd.nist.gov/vuln/detail/CVE-2016-7917 |
1,726 | linux | 50220dead1650609206efe91f0cc116132d59b3f | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/50220dead1650609206efe91f0cc116132d59b3f | HID: core: prevent out-of-bound readings
Plugging a Logitech DJ receiver with KASAN activated raises a bunch of
out-of-bound readings.
The fields are allocated up to MAX_USAGE, meaning that potentially, we do
not have enough fields to fit the incoming values.
Add checks and silence KASAN.
Signed-off-by: Benjamin Tissoires <benjamin.tissoires@redhat.com>
Signed-off-by: Jiri Kosina <jkosina@suse.cz> | 1 | static void hid_input_field(struct hid_device *hid, struct hid_field *field,
__u8 *data, int interrupt)
{
unsigned n;
unsigned count = field->report_count;
unsigned offset = field->report_offset;
unsigned size = field->report_size;
__s32 min = field->logical_minimum;
__s32 max = field->logical_maximum;
__s32 *value;
value = kmalloc(sizeof(__s32) * count, GFP_ATOMIC);
if (!value)
return;
for (n = 0; n < count; n++) {
value[n] = min < 0 ?
snto32(hid_field_extract(hid, data, offset + n * size,
size), size) :
hid_field_extract(hid, data, offset + n * size, size);
/* Ignore report if ErrorRollOver */
if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
value[n] >= min && value[n] <= max &&
field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1)
goto exit;
}
for (n = 0; n < count; n++) {
if (HID_MAIN_ITEM_VARIABLE & field->flags) {
hid_process_event(hid, field, &field->usage[n], value[n], interrupt);
continue;
}
if (field->value[n] >= min && field->value[n] <= max
&& field->usage[field->value[n] - min].hid
&& search(value, field->value[n], count))
hid_process_event(hid, field, &field->usage[field->value[n] - min], 0, interrupt);
if (value[n] >= min && value[n] <= max
&& field->usage[value[n] - min].hid
&& search(field->value, value[n], count))
hid_process_event(hid, field, &field->usage[value[n] - min], 1, interrupt);
}
memcpy(field->value, value, count * sizeof(__s32));
exit:
kfree(value);
}
| 188,242,288,363,672,830,000,000,000,000,000,000,000 | hid-core.c | 315,690,708,720,987,600,000,000,000,000,000,000,000 | [
"CWE-125"
] | CVE-2016-7915 | The hid_input_field function in drivers/hid/hid-core.c in the Linux kernel before 4.6 allows physically proximate attackers to obtain sensitive information from kernel memory or cause a denial of service (out-of-bounds read) by connecting a device, as demonstrated by a Logitech DJ receiver. | https://nvd.nist.gov/vuln/detail/CVE-2016-7915 |
1,727 | linux | 8d4a2ec1e0b41b0cf9a0c5cd4511da7f8e4f3de2 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/8d4a2ec1e0b41b0cf9a0c5cd4511da7f8e4f3de2 | assoc_array: don't call compare_object() on a node
Changes since V1: fixed the description and added KASan warning.
In assoc_array_insert_into_terminal_node(), we call the
compare_object() method on all non-empty slots, even when they're
not leaves, passing a pointer to an unexpected structure to
compare_object(). Currently it causes an out-of-bound read access
in keyring_compare_object detected by KASan (see below). The issue
is easily reproduced with keyutils testsuite.
Only call compare_object() when the slot is a leave.
KASan warning:
==================================================================
BUG: KASAN: slab-out-of-bounds in keyring_compare_object+0x213/0x240 at addr ffff880060a6f838
Read of size 8 by task keyctl/1655
=============================================================================
BUG kmalloc-192 (Not tainted): kasan: bad access detected
-----------------------------------------------------------------------------
Disabling lock debugging due to kernel taint
INFO: Allocated in assoc_array_insert+0xfd0/0x3a60 age=69 cpu=1 pid=1647
___slab_alloc+0x563/0x5c0
__slab_alloc+0x51/0x90
kmem_cache_alloc_trace+0x263/0x300
assoc_array_insert+0xfd0/0x3a60
__key_link_begin+0xfc/0x270
key_create_or_update+0x459/0xaf0
SyS_add_key+0x1ba/0x350
entry_SYSCALL_64_fastpath+0x12/0x76
INFO: Slab 0xffffea0001829b80 objects=16 used=8 fp=0xffff880060a6f550 flags=0x3fff8000004080
INFO: Object 0xffff880060a6f740 @offset=5952 fp=0xffff880060a6e5d1
Bytes b4 ffff880060a6f730: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
Object ffff880060a6f740: d1 e5 a6 60 00 88 ff ff 0e 00 00 00 00 00 00 00 ...`............
Object ffff880060a6f750: 02 cf 8e 60 00 88 ff ff 02 c0 8e 60 00 88 ff ff ...`.......`....
Object ffff880060a6f760: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
Object ffff880060a6f770: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
Object ffff880060a6f780: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
Object ffff880060a6f790: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
Object ffff880060a6f7a0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
Object ffff880060a6f7b0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
Object ffff880060a6f7c0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
Object ffff880060a6f7d0: 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
Object ffff880060a6f7e0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
Object ffff880060a6f7f0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
CPU: 0 PID: 1655 Comm: keyctl Tainted: G B 4.5.0-rc4-kasan+ #291
Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
0000000000000000 000000001b2800b4 ffff880060a179e0 ffffffff81b60491
ffff88006c802900 ffff880060a6f740 ffff880060a17a10 ffffffff815e2969
ffff88006c802900 ffffea0001829b80 ffff880060a6f740 ffff880060a6e650
Call Trace:
[<ffffffff81b60491>] dump_stack+0x85/0xc4
[<ffffffff815e2969>] print_trailer+0xf9/0x150
[<ffffffff815e9454>] object_err+0x34/0x40
[<ffffffff815ebe50>] kasan_report_error+0x230/0x550
[<ffffffff819949be>] ? keyring_get_key_chunk+0x13e/0x210
[<ffffffff815ec62d>] __asan_report_load_n_noabort+0x5d/0x70
[<ffffffff81994cc3>] ? keyring_compare_object+0x213/0x240
[<ffffffff81994cc3>] keyring_compare_object+0x213/0x240
[<ffffffff81bc238c>] assoc_array_insert+0x86c/0x3a60
[<ffffffff81bc1b20>] ? assoc_array_cancel_edit+0x70/0x70
[<ffffffff8199797d>] ? __key_link_begin+0x20d/0x270
[<ffffffff8199786c>] __key_link_begin+0xfc/0x270
[<ffffffff81993389>] key_create_or_update+0x459/0xaf0
[<ffffffff8128ce0d>] ? trace_hardirqs_on+0xd/0x10
[<ffffffff81992f30>] ? key_type_lookup+0xc0/0xc0
[<ffffffff8199e19d>] ? lookup_user_key+0x13d/0xcd0
[<ffffffff81534763>] ? memdup_user+0x53/0x80
[<ffffffff819983ea>] SyS_add_key+0x1ba/0x350
[<ffffffff81998230>] ? key_get_type_from_user.constprop.6+0xa0/0xa0
[<ffffffff828bcf4e>] ? retint_user+0x18/0x23
[<ffffffff8128cc7e>] ? trace_hardirqs_on_caller+0x3fe/0x580
[<ffffffff81004017>] ? trace_hardirqs_on_thunk+0x17/0x19
[<ffffffff828bc432>] entry_SYSCALL_64_fastpath+0x12/0x76
Memory state around the buggy address:
ffff880060a6f700: fc fc fc fc fc fc fc fc 00 00 00 00 00 00 00 00
ffff880060a6f780: 00 00 00 00 00 00 00 00 00 00 00 fc fc fc fc fc
>ffff880060a6f800: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
^
ffff880060a6f880: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
ffff880060a6f900: fc fc fc fc fc fc 00 00 00 00 00 00 00 00 00 00
==================================================================
Signed-off-by: Jerome Marchand <jmarchan@redhat.com>
Signed-off-by: David Howells <dhowells@redhat.com>
cc: stable@vger.kernel.org | 1 | static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
const struct assoc_array_ops *ops,
const void *index_key,
struct assoc_array_walk_result *result)
{
struct assoc_array_shortcut *shortcut, *new_s0;
struct assoc_array_node *node, *new_n0, *new_n1, *side;
struct assoc_array_ptr *ptr;
unsigned long dissimilarity, base_seg, blank;
size_t keylen;
bool have_meta;
int level, diff;
int slot, next_slot, free_slot, i, j;
node = result->terminal_node.node;
level = result->terminal_node.level;
edit->segment_cache[ASSOC_ARRAY_FAN_OUT] = result->terminal_node.slot;
pr_devel("-->%s()\n", __func__);
/* We arrived at a node which doesn't have an onward node or shortcut
* pointer that we have to follow. This means that (a) the leaf we
* want must go here (either by insertion or replacement) or (b) we
* need to split this node and insert in one of the fragments.
*/
free_slot = -1;
/* Firstly, we have to check the leaves in this node to see if there's
* a matching one we should replace in place.
*/
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
ptr = node->slots[i];
if (!ptr) {
free_slot = i;
continue;
}
if (ops->compare_object(assoc_array_ptr_to_leaf(ptr), index_key)) {
pr_devel("replace in slot %d\n", i);
edit->leaf_p = &node->slots[i];
edit->dead_leaf = node->slots[i];
pr_devel("<--%s() = ok [replace]\n", __func__);
return true;
}
}
/* If there is a free slot in this node then we can just insert the
* leaf here.
*/
if (free_slot >= 0) {
pr_devel("insert in free slot %d\n", free_slot);
edit->leaf_p = &node->slots[free_slot];
edit->adjust_count_on = node;
pr_devel("<--%s() = ok [insert]\n", __func__);
return true;
}
/* The node has no spare slots - so we're either going to have to split
* it or insert another node before it.
*
* Whatever, we're going to need at least two new nodes - so allocate
* those now. We may also need a new shortcut, but we deal with that
* when we need it.
*/
new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
if (!new_n0)
return false;
edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
new_n1 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
if (!new_n1)
return false;
edit->new_meta[1] = assoc_array_node_to_ptr(new_n1);
/* We need to find out how similar the leaves are. */
pr_devel("no spare slots\n");
have_meta = false;
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
ptr = node->slots[i];
if (assoc_array_ptr_is_meta(ptr)) {
edit->segment_cache[i] = 0xff;
have_meta = true;
continue;
}
base_seg = ops->get_object_key_chunk(
assoc_array_ptr_to_leaf(ptr), level);
base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
edit->segment_cache[i] = base_seg & ASSOC_ARRAY_FAN_MASK;
}
if (have_meta) {
pr_devel("have meta\n");
goto split_node;
}
/* The node contains only leaves */
dissimilarity = 0;
base_seg = edit->segment_cache[0];
for (i = 1; i < ASSOC_ARRAY_FAN_OUT; i++)
dissimilarity |= edit->segment_cache[i] ^ base_seg;
pr_devel("only leaves; dissimilarity=%lx\n", dissimilarity);
if ((dissimilarity & ASSOC_ARRAY_FAN_MASK) == 0) {
/* The old leaves all cluster in the same slot. We will need
* to insert a shortcut if the new node wants to cluster with them.
*/
if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0)
goto all_leaves_cluster_together;
/* Otherwise we can just insert a new node ahead of the old
* one.
*/
goto present_leaves_cluster_but_not_new_leaf;
}
split_node:
pr_devel("split node\n");
/* We need to split the current node; we know that the node doesn't
* simply contain a full set of leaves that cluster together (it
* contains meta pointers and/or non-clustering leaves).
*
* We need to expel at least two leaves out of a set consisting of the
* leaves in the node and the new leaf.
*
* We need a new node (n0) to replace the current one and a new node to
* take the expelled nodes (n1).
*/
edit->set[0].to = assoc_array_node_to_ptr(new_n0);
new_n0->back_pointer = node->back_pointer;
new_n0->parent_slot = node->parent_slot;
new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
new_n1->parent_slot = -1; /* Need to calculate this */
do_split_node:
pr_devel("do_split_node\n");
new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
new_n1->nr_leaves_on_branch = 0;
/* Begin by finding two matching leaves. There have to be at least two
* that match - even if there are meta pointers - because any leaf that
* would match a slot with a meta pointer in it must be somewhere
* behind that meta pointer and cannot be here. Further, given N
* remaining leaf slots, we now have N+1 leaves to go in them.
*/
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
slot = edit->segment_cache[i];
if (slot != 0xff)
for (j = i + 1; j < ASSOC_ARRAY_FAN_OUT + 1; j++)
if (edit->segment_cache[j] == slot)
goto found_slot_for_multiple_occupancy;
}
found_slot_for_multiple_occupancy:
pr_devel("same slot: %x %x [%02x]\n", i, j, slot);
BUG_ON(i >= ASSOC_ARRAY_FAN_OUT);
BUG_ON(j >= ASSOC_ARRAY_FAN_OUT + 1);
BUG_ON(slot >= ASSOC_ARRAY_FAN_OUT);
new_n1->parent_slot = slot;
/* Metadata pointers cannot change slot */
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
if (assoc_array_ptr_is_meta(node->slots[i]))
new_n0->slots[i] = node->slots[i];
else
new_n0->slots[i] = NULL;
BUG_ON(new_n0->slots[slot] != NULL);
new_n0->slots[slot] = assoc_array_node_to_ptr(new_n1);
/* Filter the leaf pointers between the new nodes */
free_slot = -1;
next_slot = 0;
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
if (assoc_array_ptr_is_meta(node->slots[i]))
continue;
if (edit->segment_cache[i] == slot) {
new_n1->slots[next_slot++] = node->slots[i];
new_n1->nr_leaves_on_branch++;
} else {
do {
free_slot++;
} while (new_n0->slots[free_slot] != NULL);
new_n0->slots[free_slot] = node->slots[i];
}
}
pr_devel("filtered: f=%x n=%x\n", free_slot, next_slot);
if (edit->segment_cache[ASSOC_ARRAY_FAN_OUT] != slot) {
do {
free_slot++;
} while (new_n0->slots[free_slot] != NULL);
edit->leaf_p = &new_n0->slots[free_slot];
edit->adjust_count_on = new_n0;
} else {
edit->leaf_p = &new_n1->slots[next_slot++];
edit->adjust_count_on = new_n1;
}
BUG_ON(next_slot <= 1);
edit->set_backpointers_to = assoc_array_node_to_ptr(new_n0);
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
if (edit->segment_cache[i] == 0xff) {
ptr = node->slots[i];
BUG_ON(assoc_array_ptr_is_leaf(ptr));
if (assoc_array_ptr_is_node(ptr)) {
side = assoc_array_ptr_to_node(ptr);
edit->set_backpointers[i] = &side->back_pointer;
} else {
shortcut = assoc_array_ptr_to_shortcut(ptr);
edit->set_backpointers[i] = &shortcut->back_pointer;
}
}
}
ptr = node->back_pointer;
if (!ptr)
edit->set[0].ptr = &edit->array->root;
else if (assoc_array_ptr_is_node(ptr))
edit->set[0].ptr = &assoc_array_ptr_to_node(ptr)->slots[node->parent_slot];
else
edit->set[0].ptr = &assoc_array_ptr_to_shortcut(ptr)->next_node;
edit->excised_meta[0] = assoc_array_node_to_ptr(node);
pr_devel("<--%s() = ok [split node]\n", __func__);
return true;
present_leaves_cluster_but_not_new_leaf:
/* All the old leaves cluster in the same slot, but the new leaf wants
* to go into a different slot, so we create a new node to hold the new
* leaf and a pointer to a new node holding all the old leaves.
*/
pr_devel("present leaves cluster but not new leaf\n");
new_n0->back_pointer = node->back_pointer;
new_n0->parent_slot = node->parent_slot;
new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
new_n1->parent_slot = edit->segment_cache[0];
new_n1->nr_leaves_on_branch = node->nr_leaves_on_branch;
edit->adjust_count_on = new_n0;
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
new_n1->slots[i] = node->slots[i];
new_n0->slots[edit->segment_cache[0]] = assoc_array_node_to_ptr(new_n0);
edit->leaf_p = &new_n0->slots[edit->segment_cache[ASSOC_ARRAY_FAN_OUT]];
edit->set[0].ptr = &assoc_array_ptr_to_node(node->back_pointer)->slots[node->parent_slot];
edit->set[0].to = assoc_array_node_to_ptr(new_n0);
edit->excised_meta[0] = assoc_array_node_to_ptr(node);
pr_devel("<--%s() = ok [insert node before]\n", __func__);
return true;
all_leaves_cluster_together:
/* All the leaves, new and old, want to cluster together in this node
* in the same slot, so we have to replace this node with a shortcut to
* skip over the identical parts of the key and then place a pair of
* nodes, one inside the other, at the end of the shortcut and
* distribute the keys between them.
*
* Firstly we need to work out where the leaves start diverging as a
* bit position into their keys so that we know how big the shortcut
* needs to be.
*
* We only need to make a single pass of N of the N+1 leaves because if
* any keys differ between themselves at bit X then at least one of
* them must also differ with the base key at bit X or before.
*/
pr_devel("all leaves cluster together\n");
diff = INT_MAX;
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
int x = ops->diff_objects(assoc_array_ptr_to_leaf(node->slots[i]),
index_key);
if (x < diff) {
BUG_ON(x < 0);
diff = x;
}
}
BUG_ON(diff == INT_MAX);
BUG_ON(diff < level + ASSOC_ARRAY_LEVEL_STEP);
keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE);
keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
new_s0 = kzalloc(sizeof(struct assoc_array_shortcut) +
keylen * sizeof(unsigned long), GFP_KERNEL);
if (!new_s0)
return false;
edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s0);
edit->set[0].to = assoc_array_shortcut_to_ptr(new_s0);
new_s0->back_pointer = node->back_pointer;
new_s0->parent_slot = node->parent_slot;
new_s0->next_node = assoc_array_node_to_ptr(new_n0);
new_n0->back_pointer = assoc_array_shortcut_to_ptr(new_s0);
new_n0->parent_slot = 0;
new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
new_n1->parent_slot = -1; /* Need to calculate this */
new_s0->skip_to_level = level = diff & ~ASSOC_ARRAY_LEVEL_STEP_MASK;
pr_devel("skip_to_level = %d [diff %d]\n", level, diff);
BUG_ON(level <= 0);
for (i = 0; i < keylen; i++)
new_s0->index_key[i] =
ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE);
blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank);
new_s0->index_key[keylen - 1] &= ~blank;
/* This now reduces to a node splitting exercise for which we'll need
* to regenerate the disparity table.
*/
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
ptr = node->slots[i];
base_seg = ops->get_object_key_chunk(assoc_array_ptr_to_leaf(ptr),
level);
base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
edit->segment_cache[i] = base_seg & ASSOC_ARRAY_FAN_MASK;
}
base_seg = ops->get_key_chunk(index_key, level);
base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
edit->segment_cache[ASSOC_ARRAY_FAN_OUT] = base_seg & ASSOC_ARRAY_FAN_MASK;
goto do_split_node;
}
| 107,205,147,767,459,400,000,000,000,000,000,000,000 | assoc_array.c | 22,582,493,732,100,015,000,000,000,000,000,000,000 | [
"CWE-125"
] | CVE-2016-7914 | The assoc_array_insert_into_terminal_node function in lib/assoc_array.c in the Linux kernel before 4.5.3 does not check whether a slot is a leaf, which allows local users to obtain sensitive information from kernel memory or cause a denial of service (invalid pointer dereference and out-of-bounds read) via an application that uses associative-array data structures, as demonstrated by the keyutils test suite. | https://nvd.nist.gov/vuln/detail/CVE-2016-7914 |
1,728 | linux | 8dfbcc4351a0b6d2f2d77f367552f48ffefafe18 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/8dfbcc4351a0b6d2f2d77f367552f48ffefafe18 | [media] xc2028: avoid use after free
If struct xc2028_config is passed without a firmware name,
the following trouble may happen:
[11009.907205] xc2028 5-0061: type set to XCeive xc2028/xc3028 tuner
[11009.907491] ==================================================================
[11009.907750] BUG: KASAN: use-after-free in strcmp+0x96/0xb0 at addr ffff8803bd78ab40
[11009.907992] Read of size 1 by task modprobe/28992
[11009.907994] =============================================================================
[11009.907997] BUG kmalloc-16 (Tainted: G W ): kasan: bad access detected
[11009.907999] -----------------------------------------------------------------------------
[11009.908008] INFO: Allocated in xhci_urb_enqueue+0x214/0x14c0 [xhci_hcd] age=0 cpu=3 pid=28992
[11009.908012] ___slab_alloc+0x581/0x5b0
[11009.908014] __slab_alloc+0x51/0x90
[11009.908017] __kmalloc+0x27b/0x350
[11009.908022] xhci_urb_enqueue+0x214/0x14c0 [xhci_hcd]
[11009.908026] usb_hcd_submit_urb+0x1e8/0x1c60
[11009.908029] usb_submit_urb+0xb0e/0x1200
[11009.908032] usb_serial_generic_write_start+0xb6/0x4c0
[11009.908035] usb_serial_generic_write+0x92/0xc0
[11009.908039] usb_console_write+0x38a/0x560
[11009.908045] call_console_drivers.constprop.14+0x1ee/0x2c0
[11009.908051] console_unlock+0x40d/0x900
[11009.908056] vprintk_emit+0x4b4/0x830
[11009.908061] vprintk_default+0x1f/0x30
[11009.908064] printk+0x99/0xb5
[11009.908067] kasan_report_error+0x10a/0x550
[11009.908070] __asan_report_load1_noabort+0x43/0x50
[11009.908074] INFO: Freed in xc2028_set_config+0x90/0x630 [tuner_xc2028] age=1 cpu=3 pid=28992
[11009.908077] __slab_free+0x2ec/0x460
[11009.908080] kfree+0x266/0x280
[11009.908083] xc2028_set_config+0x90/0x630 [tuner_xc2028]
[11009.908086] xc2028_attach+0x310/0x8a0 [tuner_xc2028]
[11009.908090] em28xx_attach_xc3028.constprop.7+0x1f9/0x30d [em28xx_dvb]
[11009.908094] em28xx_dvb_init.part.3+0x8e4/0x5cf4 [em28xx_dvb]
[11009.908098] em28xx_dvb_init+0x81/0x8a [em28xx_dvb]
[11009.908101] em28xx_register_extension+0xd9/0x190 [em28xx]
[11009.908105] em28xx_dvb_register+0x10/0x1000 [em28xx_dvb]
[11009.908108] do_one_initcall+0x141/0x300
[11009.908111] do_init_module+0x1d0/0x5ad
[11009.908114] load_module+0x6666/0x9ba0
[11009.908117] SyS_finit_module+0x108/0x130
[11009.908120] entry_SYSCALL_64_fastpath+0x16/0x76
[11009.908123] INFO: Slab 0xffffea000ef5e280 objects=25 used=25 fp=0x (null) flags=0x2ffff8000004080
[11009.908126] INFO: Object 0xffff8803bd78ab40 @offset=2880 fp=0x0000000000000001
[11009.908130] Bytes b4 ffff8803bd78ab30: 01 00 00 00 2a 07 00 00 9d 28 00 00 01 00 00 00 ....*....(......
[11009.908133] Object ffff8803bd78ab40: 01 00 00 00 00 00 00 00 b0 1d c3 6a 00 88 ff ff ...........j....
[11009.908137] CPU: 3 PID: 28992 Comm: modprobe Tainted: G B W 4.5.0-rc1+ #43
[11009.908140] Hardware name: /NUC5i7RYB, BIOS RYBDWi35.86A.0350.2015.0812.1722 08/12/2015
[11009.908142] ffff8803bd78a000 ffff8802c273f1b8 ffffffff81932007 ffff8803c6407a80
[11009.908148] ffff8802c273f1e8 ffffffff81556759 ffff8803c6407a80 ffffea000ef5e280
[11009.908153] ffff8803bd78ab40 dffffc0000000000 ffff8802c273f210 ffffffff8155ccb4
[11009.908158] Call Trace:
[11009.908162] [<ffffffff81932007>] dump_stack+0x4b/0x64
[11009.908165] [<ffffffff81556759>] print_trailer+0xf9/0x150
[11009.908168] [<ffffffff8155ccb4>] object_err+0x34/0x40
[11009.908171] [<ffffffff8155f260>] kasan_report_error+0x230/0x550
[11009.908175] [<ffffffff81237d71>] ? trace_hardirqs_off_caller+0x21/0x290
[11009.908179] [<ffffffff8155e926>] ? kasan_unpoison_shadow+0x36/0x50
[11009.908182] [<ffffffff8155f5c3>] __asan_report_load1_noabort+0x43/0x50
[11009.908185] [<ffffffff8155ea00>] ? __asan_register_globals+0x50/0xa0
[11009.908189] [<ffffffff8194cea6>] ? strcmp+0x96/0xb0
[11009.908192] [<ffffffff8194cea6>] strcmp+0x96/0xb0
[11009.908196] [<ffffffffa13ba4ac>] xc2028_set_config+0x15c/0x630 [tuner_xc2028]
[11009.908200] [<ffffffffa13bac90>] xc2028_attach+0x310/0x8a0 [tuner_xc2028]
[11009.908203] [<ffffffff8155ea78>] ? memset+0x28/0x30
[11009.908206] [<ffffffffa13ba980>] ? xc2028_set_config+0x630/0x630 [tuner_xc2028]
[11009.908211] [<ffffffffa157a59a>] em28xx_attach_xc3028.constprop.7+0x1f9/0x30d [em28xx_dvb]
[11009.908215] [<ffffffffa157aa2a>] ? em28xx_dvb_init.part.3+0x37c/0x5cf4 [em28xx_dvb]
[11009.908219] [<ffffffffa157a3a1>] ? hauppauge_hvr930c_init+0x487/0x487 [em28xx_dvb]
[11009.908222] [<ffffffffa01795ac>] ? lgdt330x_attach+0x1cc/0x370 [lgdt330x]
[11009.908226] [<ffffffffa01793e0>] ? i2c_read_demod_bytes.isra.2+0x210/0x210 [lgdt330x]
[11009.908230] [<ffffffff812e87d0>] ? ref_module.part.15+0x10/0x10
[11009.908233] [<ffffffff812e56e0>] ? module_assert_mutex_or_preempt+0x80/0x80
[11009.908238] [<ffffffffa157af92>] em28xx_dvb_init.part.3+0x8e4/0x5cf4 [em28xx_dvb]
[11009.908242] [<ffffffffa157a6ae>] ? em28xx_attach_xc3028.constprop.7+0x30d/0x30d [em28xx_dvb]
[11009.908245] [<ffffffff8195222d>] ? string+0x14d/0x1f0
[11009.908249] [<ffffffff8195381f>] ? symbol_string+0xff/0x1a0
[11009.908253] [<ffffffff81953720>] ? uuid_string+0x6f0/0x6f0
[11009.908257] [<ffffffff811a775e>] ? __kernel_text_address+0x7e/0xa0
[11009.908260] [<ffffffff8104b02f>] ? print_context_stack+0x7f/0xf0
[11009.908264] [<ffffffff812e9846>] ? __module_address+0xb6/0x360
[11009.908268] [<ffffffff8137fdc9>] ? is_ftrace_trampoline+0x99/0xe0
[11009.908271] [<ffffffff811a775e>] ? __kernel_text_address+0x7e/0xa0
[11009.908275] [<ffffffff81240a70>] ? debug_check_no_locks_freed+0x290/0x290
[11009.908278] [<ffffffff8104a24b>] ? dump_trace+0x11b/0x300
[11009.908282] [<ffffffffa13e8143>] ? em28xx_register_extension+0x23/0x190 [em28xx]
[11009.908285] [<ffffffff81237d71>] ? trace_hardirqs_off_caller+0x21/0x290
[11009.908289] [<ffffffff8123ff56>] ? trace_hardirqs_on_caller+0x16/0x590
[11009.908292] [<ffffffff812404dd>] ? trace_hardirqs_on+0xd/0x10
[11009.908296] [<ffffffffa13e8143>] ? em28xx_register_extension+0x23/0x190 [em28xx]
[11009.908299] [<ffffffff822dcbb0>] ? mutex_trylock+0x400/0x400
[11009.908302] [<ffffffff810021a1>] ? do_one_initcall+0x131/0x300
[11009.908306] [<ffffffff81296dc7>] ? call_rcu_sched+0x17/0x20
[11009.908309] [<ffffffff8159e708>] ? put_object+0x48/0x70
[11009.908314] [<ffffffffa1579f11>] em28xx_dvb_init+0x81/0x8a [em28xx_dvb]
[11009.908317] [<ffffffffa13e81f9>] em28xx_register_extension+0xd9/0x190 [em28xx]
[11009.908320] [<ffffffffa0150000>] ? 0xffffffffa0150000
[11009.908324] [<ffffffffa0150010>] em28xx_dvb_register+0x10/0x1000 [em28xx_dvb]
[11009.908327] [<ffffffff810021b1>] do_one_initcall+0x141/0x300
[11009.908330] [<ffffffff81002070>] ? try_to_run_init_process+0x40/0x40
[11009.908333] [<ffffffff8123ff56>] ? trace_hardirqs_on_caller+0x16/0x590
[11009.908337] [<ffffffff8155e926>] ? kasan_unpoison_shadow+0x36/0x50
[11009.908340] [<ffffffff8155e926>] ? kasan_unpoison_shadow+0x36/0x50
[11009.908343] [<ffffffff8155e926>] ? kasan_unpoison_shadow+0x36/0x50
[11009.908346] [<ffffffff8155ea37>] ? __asan_register_globals+0x87/0xa0
[11009.908350] [<ffffffff8144da7b>] do_init_module+0x1d0/0x5ad
[11009.908353] [<ffffffff812f2626>] load_module+0x6666/0x9ba0
[11009.908356] [<ffffffff812e9c90>] ? symbol_put_addr+0x50/0x50
[11009.908361] [<ffffffffa1580037>] ? em28xx_dvb_init.part.3+0x5989/0x5cf4 [em28xx_dvb]
[11009.908366] [<ffffffff812ebfc0>] ? module_frob_arch_sections+0x20/0x20
[11009.908369] [<ffffffff815bc940>] ? open_exec+0x50/0x50
[11009.908374] [<ffffffff811671bb>] ? ns_capable+0x5b/0xd0
[11009.908377] [<ffffffff812f5e58>] SyS_finit_module+0x108/0x130
[11009.908379] [<ffffffff812f5d50>] ? SyS_init_module+0x1f0/0x1f0
[11009.908383] [<ffffffff81004044>] ? lockdep_sys_exit_thunk+0x12/0x14
[11009.908394] [<ffffffff822e6936>] entry_SYSCALL_64_fastpath+0x16/0x76
[11009.908396] Memory state around the buggy address:
[11009.908398] ffff8803bd78aa00: 00 00 fc fc fc fc fc fc fc fc fc fc fc fc fc fc
[11009.908401] ffff8803bd78aa80: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
[11009.908403] >ffff8803bd78ab00: fc fc fc fc fc fc fc fc 00 00 fc fc fc fc fc fc
[11009.908405] ^
[11009.908407] ffff8803bd78ab80: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
[11009.908409] ffff8803bd78ac00: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
[11009.908411] ==================================================================
In order to avoid it, let's set the cached value of the firmware
name to NULL after freeing it. While here, return an error if
the memory allocation fails.
Signed-off-by: Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 1 | static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
{
struct xc2028_data *priv = fe->tuner_priv;
struct xc2028_ctrl *p = priv_cfg;
int rc = 0;
tuner_dbg("%s called\n", __func__);
mutex_lock(&priv->lock);
/*
* Copy the config data.
* For the firmware name, keep a local copy of the string,
* in order to avoid troubles during device release.
*/
kfree(priv->ctrl.fname);
memcpy(&priv->ctrl, p, sizeof(priv->ctrl));
if (p->fname) {
priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL);
if (priv->ctrl.fname == NULL)
rc = -ENOMEM;
}
/*
* If firmware name changed, frees firmware. As free_firmware will
* reset the status to NO_FIRMWARE, this forces a new request_firmware
*/
if (!firmware_name[0] && p->fname &&
priv->fname && strcmp(p->fname, priv->fname))
free_firmware(priv);
if (priv->ctrl.max_len < 9)
priv->ctrl.max_len = 13;
if (priv->state == XC2028_NO_FIRMWARE) {
if (!firmware_name[0])
priv->fname = priv->ctrl.fname;
else
priv->fname = firmware_name;
rc = request_firmware_nowait(THIS_MODULE, 1,
priv->fname,
priv->i2c_props.adap->dev.parent,
GFP_KERNEL,
fe, load_firmware_cb);
if (rc < 0) {
tuner_err("Failed to request firmware %s\n",
priv->fname);
priv->state = XC2028_NODEV;
} else
priv->state = XC2028_WAITING_FIRMWARE;
}
mutex_unlock(&priv->lock);
return rc;
}
| 55,587,506,258,633,540,000,000,000,000,000,000,000 | tuner-xc2028.c | 144,392,756,984,583,100,000,000,000,000,000,000,000 | [
"CWE-416"
] | CVE-2016-7913 | The xc2028_set_config function in drivers/media/tuners/tuner-xc2028.c in the Linux kernel before 4.6 allows local users to gain privileges or cause a denial of service (use-after-free) via vectors involving omission of the firmware name from a certain data structure. | https://nvd.nist.gov/vuln/detail/CVE-2016-7913 |
1,729 | linux | 38740a5b87d53ceb89eb2c970150f6e94e00373a | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/38740a5b87d53ceb89eb2c970150f6e94e00373a | usb: gadget: f_fs: Fix use-after-free
When using asynchronous read or write operations on the USB endpoints the
issuer of the IO request is notified by calling the ki_complete() callback
of the submitted kiocb when the URB has been completed.
Calling this ki_complete() callback will free kiocb. Make sure that the
structure is no longer accessed beyond that point, otherwise undefined
behaviour might occur.
Fixes: 2e4c7553cd6f ("usb: gadget: f_fs: add aio support")
Cc: <stable@vger.kernel.org> # v3.15+
Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
Signed-off-by: Felipe Balbi <felipe.balbi@linux.intel.com> | 1 | static void ffs_user_copy_worker(struct work_struct *work)
{
struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
work);
int ret = io_data->req->status ? io_data->req->status :
io_data->req->actual;
if (io_data->read && ret > 0) {
use_mm(io_data->mm);
ret = copy_to_iter(io_data->buf, ret, &io_data->data);
if (iov_iter_count(&io_data->data))
ret = -EFAULT;
unuse_mm(io_data->mm);
}
io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
if (io_data->ffs->ffs_eventfd &&
!(io_data->kiocb->ki_flags & IOCB_EVENTFD))
eventfd_signal(io_data->ffs->ffs_eventfd, 1);
usb_ep_free_request(io_data->ep, io_data->req);
io_data->kiocb->private = NULL;
if (io_data->read)
kfree(io_data->to_free);
kfree(io_data->buf);
kfree(io_data);
}
| 325,805,134,765,406,900,000,000,000,000,000,000,000 | f_fs.c | 210,013,898,798,065,750,000,000,000,000,000,000,000 | [
"CWE-416"
] | CVE-2016-7912 | Use-after-free vulnerability in the ffs_user_copy_worker function in drivers/usb/gadget/function/f_fs.c in the Linux kernel before 4.5.3 allows local users to gain privileges by accessing an I/O data structure after a certain callback call. | https://nvd.nist.gov/vuln/detail/CVE-2016-7912 |
1,730 | linux | 8ba8682107ee2ca3347354e018865d8e1967c5f4 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/8ba8682107ee2ca3347354e018865d8e1967c5f4 | block: fix use-after-free in sys_ioprio_get()
get_task_ioprio() accesses the task->io_context without holding the task
lock and thus can race with exit_io_context(), leading to a
use-after-free. The reproducer below hits this within a few seconds on
my 4-core QEMU VM:
#define _GNU_SOURCE
#include <assert.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/wait.h>
int main(int argc, char **argv)
{
pid_t pid, child;
long nproc, i;
/* ioprio_set(IOPRIO_WHO_PROCESS, 0, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); */
syscall(SYS_ioprio_set, 1, 0, 0x6000);
nproc = sysconf(_SC_NPROCESSORS_ONLN);
for (i = 0; i < nproc; i++) {
pid = fork();
assert(pid != -1);
if (pid == 0) {
for (;;) {
pid = fork();
assert(pid != -1);
if (pid == 0) {
_exit(0);
} else {
child = wait(NULL);
assert(child == pid);
}
}
}
pid = fork();
assert(pid != -1);
if (pid == 0) {
for (;;) {
/* ioprio_get(IOPRIO_WHO_PGRP, 0); */
syscall(SYS_ioprio_get, 2, 0);
}
}
}
for (;;) {
/* ioprio_get(IOPRIO_WHO_PGRP, 0); */
syscall(SYS_ioprio_get, 2, 0);
}
return 0;
}
This gets us KASAN dumps like this:
[ 35.526914] ==================================================================
[ 35.530009] BUG: KASAN: out-of-bounds in get_task_ioprio+0x7b/0x90 at addr ffff880066f34e6c
[ 35.530009] Read of size 2 by task ioprio-gpf/363
[ 35.530009] =============================================================================
[ 35.530009] BUG blkdev_ioc (Not tainted): kasan: bad access detected
[ 35.530009] -----------------------------------------------------------------------------
[ 35.530009] Disabling lock debugging due to kernel taint
[ 35.530009] INFO: Allocated in create_task_io_context+0x2b/0x370 age=0 cpu=0 pid=360
[ 35.530009] ___slab_alloc+0x55d/0x5a0
[ 35.530009] __slab_alloc.isra.20+0x2b/0x40
[ 35.530009] kmem_cache_alloc_node+0x84/0x200
[ 35.530009] create_task_io_context+0x2b/0x370
[ 35.530009] get_task_io_context+0x92/0xb0
[ 35.530009] copy_process.part.8+0x5029/0x5660
[ 35.530009] _do_fork+0x155/0x7e0
[ 35.530009] SyS_clone+0x19/0x20
[ 35.530009] do_syscall_64+0x195/0x3a0
[ 35.530009] return_from_SYSCALL_64+0x0/0x6a
[ 35.530009] INFO: Freed in put_io_context+0xe7/0x120 age=0 cpu=0 pid=1060
[ 35.530009] __slab_free+0x27b/0x3d0
[ 35.530009] kmem_cache_free+0x1fb/0x220
[ 35.530009] put_io_context+0xe7/0x120
[ 35.530009] put_io_context_active+0x238/0x380
[ 35.530009] exit_io_context+0x66/0x80
[ 35.530009] do_exit+0x158e/0x2b90
[ 35.530009] do_group_exit+0xe5/0x2b0
[ 35.530009] SyS_exit_group+0x1d/0x20
[ 35.530009] entry_SYSCALL_64_fastpath+0x1a/0xa4
[ 35.530009] INFO: Slab 0xffffea00019bcd00 objects=20 used=4 fp=0xffff880066f34ff0 flags=0x1fffe0000004080
[ 35.530009] INFO: Object 0xffff880066f34e58 @offset=3672 fp=0x0000000000000001
[ 35.530009] ==================================================================
Fix it by grabbing the task lock while we poke at the io_context.
Cc: stable@vger.kernel.org
Reported-by: Dmitry Vyukov <dvyukov@google.com>
Signed-off-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@fb.com> | 1 | static int get_task_ioprio(struct task_struct *p)
{
int ret;
ret = security_task_getioprio(p);
if (ret)
goto out;
ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
if (p->io_context)
ret = p->io_context->ioprio;
out:
return ret;
}
| 258,186,631,754,045,350,000,000,000,000,000,000,000 | ioprio.c | 20,495,164,370,427,705,000,000,000,000,000,000,000 | [
"CWE-416"
] | CVE-2016-7911 | Race condition in the get_task_ioprio function in block/ioprio.c in the Linux kernel before 4.6.6 allows local users to gain privileges or cause a denial of service (use-after-free) via a crafted ioprio_get system call. | https://nvd.nist.gov/vuln/detail/CVE-2016-7911 |
1,731 | linux | 77da160530dd1dc94f6ae15a981f24e5f0021e84 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/77da160530dd1dc94f6ae15a981f24e5f0021e84 | block: fix use-after-free in seq file
I got a KASAN report of use-after-free:
==================================================================
BUG: KASAN: use-after-free in klist_iter_exit+0x61/0x70 at addr ffff8800b6581508
Read of size 8 by task trinity-c1/315
=============================================================================
BUG kmalloc-32 (Not tainted): kasan: bad access detected
-----------------------------------------------------------------------------
Disabling lock debugging due to kernel taint
INFO: Allocated in disk_seqf_start+0x66/0x110 age=144 cpu=1 pid=315
___slab_alloc+0x4f1/0x520
__slab_alloc.isra.58+0x56/0x80
kmem_cache_alloc_trace+0x260/0x2a0
disk_seqf_start+0x66/0x110
traverse+0x176/0x860
seq_read+0x7e3/0x11a0
proc_reg_read+0xbc/0x180
do_loop_readv_writev+0x134/0x210
do_readv_writev+0x565/0x660
vfs_readv+0x67/0xa0
do_preadv+0x126/0x170
SyS_preadv+0xc/0x10
do_syscall_64+0x1a1/0x460
return_from_SYSCALL_64+0x0/0x6a
INFO: Freed in disk_seqf_stop+0x42/0x50 age=160 cpu=1 pid=315
__slab_free+0x17a/0x2c0
kfree+0x20a/0x220
disk_seqf_stop+0x42/0x50
traverse+0x3b5/0x860
seq_read+0x7e3/0x11a0
proc_reg_read+0xbc/0x180
do_loop_readv_writev+0x134/0x210
do_readv_writev+0x565/0x660
vfs_readv+0x67/0xa0
do_preadv+0x126/0x170
SyS_preadv+0xc/0x10
do_syscall_64+0x1a1/0x460
return_from_SYSCALL_64+0x0/0x6a
CPU: 1 PID: 315 Comm: trinity-c1 Tainted: G B 4.7.0+ #62
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Ubuntu-1.8.2-1ubuntu1 04/01/2014
ffffea0002d96000 ffff880119b9f918 ffffffff81d6ce81 ffff88011a804480
ffff8800b6581500 ffff880119b9f948 ffffffff8146c7bd ffff88011a804480
ffffea0002d96000 ffff8800b6581500 fffffffffffffff4 ffff880119b9f970
Call Trace:
[<ffffffff81d6ce81>] dump_stack+0x65/0x84
[<ffffffff8146c7bd>] print_trailer+0x10d/0x1a0
[<ffffffff814704ff>] object_err+0x2f/0x40
[<ffffffff814754d1>] kasan_report_error+0x221/0x520
[<ffffffff8147590e>] __asan_report_load8_noabort+0x3e/0x40
[<ffffffff83888161>] klist_iter_exit+0x61/0x70
[<ffffffff82404389>] class_dev_iter_exit+0x9/0x10
[<ffffffff81d2e8ea>] disk_seqf_stop+0x3a/0x50
[<ffffffff8151f812>] seq_read+0x4b2/0x11a0
[<ffffffff815f8fdc>] proc_reg_read+0xbc/0x180
[<ffffffff814b24e4>] do_loop_readv_writev+0x134/0x210
[<ffffffff814b4c45>] do_readv_writev+0x565/0x660
[<ffffffff814b8a17>] vfs_readv+0x67/0xa0
[<ffffffff814b8de6>] do_preadv+0x126/0x170
[<ffffffff814b92ec>] SyS_preadv+0xc/0x10
This problem can occur in the following situation:
open()
- pread()
- .seq_start()
- iter = kmalloc() // succeeds
- seqf->private = iter
- .seq_stop()
- kfree(seqf->private)
- pread()
- .seq_start()
- iter = kmalloc() // fails
- .seq_stop()
- class_dev_iter_exit(seqf->private) // boom! old pointer
As the comment in disk_seqf_stop() says, stop is called even if start
failed, so we need to reinitialise the private pointer to NULL when seq
iteration stops.
An alternative would be to set the private pointer to NULL when the
kmalloc() in disk_seqf_start() fails.
Cc: stable@vger.kernel.org
Signed-off-by: Vegard Nossum <vegard.nossum@oracle.com>
Acked-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@fb.com> | 1 | static void disk_seqf_stop(struct seq_file *seqf, void *v)
{
struct class_dev_iter *iter = seqf->private;
/* stop is called even after start failed :-( */
if (iter) {
class_dev_iter_exit(iter);
kfree(iter);
}
}
| 299,775,501,717,424,660,000,000,000,000,000,000,000 | genhd.c | 241,520,307,378,534,750,000,000,000,000,000,000,000 | [
"CWE-416"
] | CVE-2016-7910 | Use-after-free vulnerability in the disk_seqf_stop function in block/genhd.c in the Linux kernel before 4.7.1 allows local users to gain privileges by leveraging the execution of a certain stop operation even if the corresponding start operation had failed. | https://nvd.nist.gov/vuln/detail/CVE-2016-7910 |
1,732 | libgd | 40bec0f38f50e8510f5bb71a82f516d46facde03 | https://github.com/libgd/libgd | https://github.com/libgd/libgd/commit/40bec0f38f50e8510f5bb71a82f516d46facde03 | Merge branch 'pull-request/296' | 1 | BGD_DECLARE(void) gdImageWebpCtx (gdImagePtr im, gdIOCtx * outfile, int quality)
{
uint8_t *argb;
int x, y;
uint8_t *p;
uint8_t *out;
size_t out_size;
if (im == NULL) {
return;
}
if (!gdImageTrueColor(im)) {
gd_error("Paletter image not supported by webp");
return;
}
if (quality == -1) {
quality = 80;
}
argb = (uint8_t *)gdMalloc(gdImageSX(im) * 4 * gdImageSY(im));
if (!argb) {
return;
}
p = argb;
for (y = 0; y < gdImageSY(im); y++) {
for (x = 0; x < gdImageSX(im); x++) {
register int c;
register char a;
c = im->tpixels[y][x];
a = gdTrueColorGetAlpha(c);
if (a == 127) {
a = 0;
} else {
a = 255 - ((a << 1) + (a >> 6));
}
*(p++) = gdTrueColorGetRed(c);
*(p++) = gdTrueColorGetGreen(c);
*(p++) = gdTrueColorGetBlue(c);
*(p++) = a;
}
}
out_size = WebPEncodeRGBA(argb, gdImageSX(im), gdImageSY(im), gdImageSX(im) * 4, quality, &out);
if (out_size == 0) {
gd_error("gd-webp encoding failed");
goto freeargb;
}
gdPutBuf(out, out_size, outfile);
free(out);
freeargb:
gdFree(argb);
}
| 134,338,295,145,600,240,000,000,000,000,000,000,000 | None | null | [
"CWE-190"
] | CVE-2016-7568 | Integer overflow in the gdImageWebpCtx function in gd_webp.c in the GD Graphics Library (aka libgd) through 2.2.3, as used in PHP through 7.0.11, allows remote attackers to cause a denial of service (heap-based buffer overflow) or possibly have unspecified other impact via crafted imagewebp and imagedestroy calls. | https://nvd.nist.gov/vuln/detail/CVE-2016-7568 |
1,733 | php-src | c4cca4c20e75359c9a13a1f9a36cb7b4e9601d29 | https://github.com/php/php-src | https://github.com/php/php-src/commit/c4cca4c20e75359c9a13a1f9a36cb7b4e9601d29?w=1 | Fix bug #73065: Out-Of-Bounds Read in php_wddx_push_element of wddx.c | 1 | static void php_wddx_push_element(void *user_data, const XML_Char *name, const XML_Char **atts)
{
st_entry ent;
wddx_stack *stack = (wddx_stack *)user_data;
if (!strcmp(name, EL_PACKET)) {
int i;
if (atts) for (i=0; atts[i]; i++) {
if (!strcmp(atts[i], EL_VERSION)) {
/* nothing for now */
}
}
} else if (!strcmp(name, EL_STRING)) {
ent.type = ST_STRING;
SET_STACK_VARNAME;
ALLOC_ZVAL(ent.data);
INIT_PZVAL(ent.data);
Z_TYPE_P(ent.data) = IS_STRING;
Z_STRVAL_P(ent.data) = STR_EMPTY_ALLOC();
Z_STRLEN_P(ent.data) = 0;
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_BINARY)) {
ent.type = ST_BINARY;
SET_STACK_VARNAME;
ALLOC_ZVAL(ent.data);
INIT_PZVAL(ent.data);
Z_TYPE_P(ent.data) = IS_STRING;
Z_STRVAL_P(ent.data) = STR_EMPTY_ALLOC();
Z_STRLEN_P(ent.data) = 0;
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_CHAR)) {
int i;
if (atts) for (i = 0; atts[i]; i++) {
if (!strcmp(atts[i], EL_CHAR_CODE) && atts[++i] && atts[i][0]) {
char tmp_buf[2];
snprintf(tmp_buf, sizeof(tmp_buf), "%c", (char)strtol(atts[i], NULL, 16));
php_wddx_process_data(user_data, tmp_buf, strlen(tmp_buf));
break;
}
}
} else if (!strcmp(name, EL_NUMBER)) {
ent.type = ST_NUMBER;
SET_STACK_VARNAME;
ALLOC_ZVAL(ent.data);
INIT_PZVAL(ent.data);
Z_TYPE_P(ent.data) = IS_LONG;
Z_LVAL_P(ent.data) = 0;
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_BOOLEAN)) {
int i;
if (atts) for (i = 0; atts[i]; i++) {
if (!strcmp(atts[i], EL_VALUE) && atts[++i] && atts[i][0]) {
ent.type = ST_BOOLEAN;
SET_STACK_VARNAME;
ALLOC_ZVAL(ent.data);
INIT_PZVAL(ent.data);
Z_TYPE_P(ent.data) = IS_BOOL;
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
php_wddx_process_data(user_data, atts[i], strlen(atts[i]));
break;
}
}
} else if (!strcmp(name, EL_NULL)) {
ent.type = ST_NULL;
SET_STACK_VARNAME;
ALLOC_ZVAL(ent.data);
INIT_PZVAL(ent.data);
ZVAL_NULL(ent.data);
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_ARRAY)) {
ent.type = ST_ARRAY;
SET_STACK_VARNAME;
ALLOC_ZVAL(ent.data);
array_init(ent.data);
INIT_PZVAL(ent.data);
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_STRUCT)) {
ent.type = ST_STRUCT;
SET_STACK_VARNAME;
ALLOC_ZVAL(ent.data);
array_init(ent.data);
INIT_PZVAL(ent.data);
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_VAR)) {
int i;
if (atts) for (i = 0; atts[i]; i++) {
if (!strcmp(atts[i], EL_NAME) && atts[++i] && atts[i][0]) {
if (stack->varname) efree(stack->varname);
stack->varname = estrdup(atts[i]);
break;
}
}
} else if (!strcmp(name, EL_RECORDSET)) {
int i;
ent.type = ST_RECORDSET;
SET_STACK_VARNAME;
MAKE_STD_ZVAL(ent.data);
array_init(ent.data);
if (atts) for (i = 0; atts[i]; i++) {
if (!strcmp(atts[i], "fieldNames") && atts[++i] && atts[i][0]) {
zval *tmp;
char *key;
char *p1, *p2, *endp;
endp = (char *)atts[i] + strlen(atts[i]);
p1 = (char *)atts[i];
while ((p2 = php_memnstr(p1, ",", sizeof(",")-1, endp)) != NULL) {
key = estrndup(p1, p2 - p1);
MAKE_STD_ZVAL(tmp);
array_init(tmp);
add_assoc_zval_ex(ent.data, key, p2 - p1 + 1, tmp);
p1 = p2 + sizeof(",")-1;
efree(key);
}
if (p1 <= endp) {
MAKE_STD_ZVAL(tmp);
array_init(tmp);
add_assoc_zval_ex(ent.data, p1, endp - p1 + 1, tmp);
}
break;
}
}
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_FIELD)) {
int i;
st_entry ent;
ent.type = ST_FIELD;
ent.varname = NULL;
ent.data = NULL;
if (atts) for (i = 0; atts[i]; i++) {
if (!strcmp(atts[i], EL_NAME) && atts[++i] && atts[i][0]) {
st_entry *recordset;
zval **field;
if (wddx_stack_top(stack, (void**)&recordset) == SUCCESS &&
recordset->type == ST_RECORDSET &&
zend_hash_find(Z_ARRVAL_P(recordset->data), (char*)atts[i], strlen(atts[i])+1, (void**)&field) == SUCCESS) {
ent.data = *field;
}
break;
}
}
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_DATETIME)) {
ent.type = ST_DATETIME;
SET_STACK_VARNAME;
ALLOC_ZVAL(ent.data);
INIT_PZVAL(ent.data);
Z_TYPE_P(ent.data) = IS_LONG;
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
}
}
| 138,332,882,986,529,270,000,000,000,000,000,000,000 | None | null | [
"CWE-119"
] | CVE-2016-7418 | The php_wddx_push_element function in ext/wddx/wddx.c in PHP before 5.6.26 and 7.x before 7.0.11 allows remote attackers to cause a denial of service (invalid pointer access and out-of-bounds read) or possibly have unspecified other impact via an incorrect boolean element in a wddxPacket XML document, leading to mishandling in a wddx_deserialize call. | https://nvd.nist.gov/vuln/detail/CVE-2016-7418 |
1,737 | php-src | 6d55ba265637d6adf0ba7e9c9ef11187d1ec2f5b | https://github.com/php/php-src | https://github.com/php/php-src/commit/6d55ba265637d6adf0ba7e9c9ef11187d1ec2f5b?w=1 | Fix bug #73007: add locale length check | 1 | PHP_FUNCTION( msgfmt_format_message )
{
zval *args;
UChar *spattern = NULL;
int spattern_len = 0;
char *pattern = NULL;
int pattern_len = 0;
const char *slocale = NULL;
int slocale_len = 0;
MessageFormatter_object mf = {0};
MessageFormatter_object *mfo = &mf;
/* Parse parameters. */
if( zend_parse_method_parameters( ZEND_NUM_ARGS() TSRMLS_CC, getThis(), "ssa",
&slocale, &slocale_len, &pattern, &pattern_len, &args ) == FAILURE )
{
intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR,
"msgfmt_format_message: unable to parse input params", 0 TSRMLS_CC );
RETURN_FALSE;
}
msgformat_data_init(&mfo->mf_data TSRMLS_CC);
if(pattern && pattern_len) {
intl_convert_utf8_to_utf16(&spattern, &spattern_len, pattern, pattern_len, &INTL_DATA_ERROR_CODE(mfo));
if( U_FAILURE(INTL_DATA_ERROR_CODE((mfo))) )
{
intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR,
"msgfmt_format_message: error converting pattern to UTF-16", 0 TSRMLS_CC );
RETURN_FALSE;
}
} else {
spattern_len = 0;
spattern = NULL;
}
if(slocale_len == 0) {
slocale = intl_locale_get_default(TSRMLS_C);
}
#ifdef MSG_FORMAT_QUOTE_APOS
if(msgformat_fix_quotes(&spattern, &spattern_len, &INTL_DATA_ERROR_CODE(mfo)) != SUCCESS) {
intl_error_set( NULL, U_INVALID_FORMAT_ERROR,
"msgfmt_format_message: error converting pattern to quote-friendly format", 0 TSRMLS_CC );
RETURN_FALSE;
}
#endif
/* Create an ICU message formatter. */
MSG_FORMAT_OBJECT(mfo) = umsg_open(spattern, spattern_len, slocale, NULL, &INTL_DATA_ERROR_CODE(mfo));
if(spattern && spattern_len) {
efree(spattern);
}
INTL_METHOD_CHECK_STATUS(mfo, "Creating message formatter failed");
msgfmt_do_format(mfo, args, return_value TSRMLS_CC);
/* drop the temporary formatter */
msgformat_data_free(&mfo->mf_data TSRMLS_CC);
}
| 138,237,320,751,157,490,000,000,000,000,000,000,000 | None | null | [
"CWE-119"
] | CVE-2016-7416 | ext/intl/msgformat/msgformat_format.c in PHP before 5.6.26 and 7.x before 7.0.11 does not properly restrict the locale length provided to the Locale class in the ICU library, which allows remote attackers to cause a denial of service (application crash) or possibly have unspecified other impact via a MessageFormatter::formatMessage call with a long first argument. | https://nvd.nist.gov/vuln/detail/CVE-2016-7416 |
1,739 | php-src | 28f80baf3c53e267c9ce46a2a0fadbb981585132 | https://github.com/php/php-src | https://github.com/php/php-src/commit/28f80baf3c53e267c9ce46a2a0fadbb981585132?w=1 | Fix bug #72293 - Heap overflow in mysqlnd related to BIT fields | 1 | php_mysqlnd_rowp_read_text_protocol_aux(MYSQLND_MEMORY_POOL_CHUNK * row_buffer, zval ** fields,
unsigned int field_count, const MYSQLND_FIELD * fields_metadata,
zend_bool as_int_or_float, zend_bool copy_data, MYSQLND_STATS * stats TSRMLS_DC)
{
unsigned int i;
zend_bool last_field_was_string = FALSE;
zval **current_field, **end_field, **start_field;
zend_uchar * p = row_buffer->ptr;
size_t data_size = row_buffer->app;
zend_uchar * bit_area = (zend_uchar*) row_buffer->ptr + data_size + 1; /* we allocate from here */
DBG_ENTER("php_mysqlnd_rowp_read_text_protocol_aux");
if (!fields) {
DBG_RETURN(FAIL);
}
end_field = (start_field = fields) + field_count;
for (i = 0, current_field = start_field; current_field < end_field; current_field++, i++) {
DBG_INF("Directly creating zval");
MAKE_STD_ZVAL(*current_field);
if (!*current_field) {
DBG_RETURN(FAIL);
}
}
for (i = 0, current_field = start_field; current_field < end_field; current_field++, i++) {
/* Don't reverse the order. It is significant!*/
zend_uchar *this_field_len_pos = p;
/* php_mysqlnd_net_field_length() call should be after *this_field_len_pos = p; */
unsigned long len = php_mysqlnd_net_field_length(&p);
if (copy_data == FALSE && current_field > start_field && last_field_was_string) {
/*
Normal queries:
We have to put \0 now to the end of the previous field, if it was
a string. IS_NULL doesn't matter. Because we have already read our
length, then we can overwrite it in the row buffer.
This statement terminates the previous field, not the current one.
NULL_LENGTH is encoded in one byte, so we can stick a \0 there.
Any string's length is encoded in at least one byte, so we can stick
a \0 there.
*/
*this_field_len_pos = '\0';
}
/* NULL or NOT NULL, this is the question! */
if (len == MYSQLND_NULL_LENGTH) {
ZVAL_NULL(*current_field);
last_field_was_string = FALSE;
} else {
#if defined(MYSQLND_STRING_TO_INT_CONVERSION)
struct st_mysqlnd_perm_bind perm_bind =
mysqlnd_ps_fetch_functions[fields_metadata[i].type];
#endif
if (MYSQLND_G(collect_statistics)) {
enum_mysqlnd_collected_stats statistic;
switch (fields_metadata[i].type) {
case MYSQL_TYPE_DECIMAL: statistic = STAT_TEXT_TYPE_FETCHED_DECIMAL; break;
case MYSQL_TYPE_TINY: statistic = STAT_TEXT_TYPE_FETCHED_INT8; break;
case MYSQL_TYPE_SHORT: statistic = STAT_TEXT_TYPE_FETCHED_INT16; break;
case MYSQL_TYPE_LONG: statistic = STAT_TEXT_TYPE_FETCHED_INT32; break;
case MYSQL_TYPE_FLOAT: statistic = STAT_TEXT_TYPE_FETCHED_FLOAT; break;
case MYSQL_TYPE_DOUBLE: statistic = STAT_TEXT_TYPE_FETCHED_DOUBLE; break;
case MYSQL_TYPE_NULL: statistic = STAT_TEXT_TYPE_FETCHED_NULL; break;
case MYSQL_TYPE_TIMESTAMP: statistic = STAT_TEXT_TYPE_FETCHED_TIMESTAMP; break;
case MYSQL_TYPE_LONGLONG: statistic = STAT_TEXT_TYPE_FETCHED_INT64; break;
case MYSQL_TYPE_INT24: statistic = STAT_TEXT_TYPE_FETCHED_INT24; break;
case MYSQL_TYPE_DATE: statistic = STAT_TEXT_TYPE_FETCHED_DATE; break;
case MYSQL_TYPE_TIME: statistic = STAT_TEXT_TYPE_FETCHED_TIME; break;
case MYSQL_TYPE_DATETIME: statistic = STAT_TEXT_TYPE_FETCHED_DATETIME; break;
case MYSQL_TYPE_YEAR: statistic = STAT_TEXT_TYPE_FETCHED_YEAR; break;
case MYSQL_TYPE_NEWDATE: statistic = STAT_TEXT_TYPE_FETCHED_DATE; break;
case MYSQL_TYPE_VARCHAR: statistic = STAT_TEXT_TYPE_FETCHED_STRING; break;
case MYSQL_TYPE_BIT: statistic = STAT_TEXT_TYPE_FETCHED_BIT; break;
case MYSQL_TYPE_NEWDECIMAL: statistic = STAT_TEXT_TYPE_FETCHED_DECIMAL; break;
case MYSQL_TYPE_ENUM: statistic = STAT_TEXT_TYPE_FETCHED_ENUM; break;
case MYSQL_TYPE_SET: statistic = STAT_TEXT_TYPE_FETCHED_SET; break;
case MYSQL_TYPE_JSON: statistic = STAT_TEXT_TYPE_FETCHED_JSON; break;
case MYSQL_TYPE_TINY_BLOB: statistic = STAT_TEXT_TYPE_FETCHED_BLOB; break;
case MYSQL_TYPE_MEDIUM_BLOB:statistic = STAT_TEXT_TYPE_FETCHED_BLOB; break;
case MYSQL_TYPE_LONG_BLOB: statistic = STAT_TEXT_TYPE_FETCHED_BLOB; break;
case MYSQL_TYPE_BLOB: statistic = STAT_TEXT_TYPE_FETCHED_BLOB; break;
case MYSQL_TYPE_VAR_STRING: statistic = STAT_TEXT_TYPE_FETCHED_STRING; break;
case MYSQL_TYPE_STRING: statistic = STAT_TEXT_TYPE_FETCHED_STRING; break;
case MYSQL_TYPE_GEOMETRY: statistic = STAT_TEXT_TYPE_FETCHED_GEOMETRY; break;
default: statistic = STAT_TEXT_TYPE_FETCHED_OTHER; break;
}
MYSQLND_INC_CONN_STATISTIC_W_VALUE2(stats, statistic, 1, STAT_BYTES_RECEIVED_PURE_DATA_TEXT, len);
}
#ifdef MYSQLND_STRING_TO_INT_CONVERSION
if (as_int_or_float && perm_bind.php_type == IS_LONG) {
zend_uchar save = *(p + len);
/* We have to make it ASCIIZ temporarily */
*(p + len) = '\0';
if (perm_bind.pack_len < SIZEOF_LONG) {
/* direct conversion */
int64_t v =
#ifndef PHP_WIN32
atoll((char *) p);
#else
_atoi64((char *) p);
#endif
ZVAL_LONG(*current_field, (long) v); /* the cast is safe */
} else {
uint64_t v =
#ifndef PHP_WIN32
(uint64_t) atoll((char *) p);
#else
(uint64_t) _atoi64((char *) p);
#endif
zend_bool uns = fields_metadata[i].flags & UNSIGNED_FLAG? TRUE:FALSE;
/* We have to make it ASCIIZ temporarily */
#if SIZEOF_LONG==8
if (uns == TRUE && v > 9223372036854775807L)
#elif SIZEOF_LONG==4
if ((uns == TRUE && v > L64(2147483647)) ||
(uns == FALSE && (( L64(2147483647) < (int64_t) v) ||
(L64(-2147483648) > (int64_t) v))))
#else
#error Need fix for this architecture
#endif /* SIZEOF */
{
ZVAL_STRINGL(*current_field, (char *)p, len, 0);
} else {
ZVAL_LONG(*current_field, (long) v); /* the cast is safe */
}
}
*(p + len) = save;
} else if (as_int_or_float && perm_bind.php_type == IS_DOUBLE) {
zend_uchar save = *(p + len);
/* We have to make it ASCIIZ temporarily */
*(p + len) = '\0';
ZVAL_DOUBLE(*current_field, atof((char *) p));
*(p + len) = save;
} else
#endif /* MYSQLND_STRING_TO_INT_CONVERSION */
if (fields_metadata[i].type == MYSQL_TYPE_BIT) {
/*
BIT fields are specially handled. As they come as bit mask, we have
to convert it to human-readable representation. As the bits take
less space in the protocol than the numbers they represent, we don't
have enough space in the packet buffer to overwrite inside.
Thus, a bit more space is pre-allocated at the end of the buffer,
see php_mysqlnd_rowp_read(). And we add the strings at the end.
Definitely not nice, _hackish_ :(, but works.
*/
zend_uchar *start = bit_area;
ps_fetch_from_1_to_8_bytes(*current_field, &(fields_metadata[i]), 0, &p, len TSRMLS_CC);
/*
We have advanced in ps_fetch_from_1_to_8_bytes. We should go back because
later in this function there will be an advancement.
*/
p -= len;
if (Z_TYPE_PP(current_field) == IS_LONG) {
bit_area += 1 + sprintf((char *)start, "%ld", Z_LVAL_PP(current_field));
ZVAL_STRINGL(*current_field, (char *) start, bit_area - start - 1, copy_data);
} else if (Z_TYPE_PP(current_field) == IS_STRING){
memcpy(bit_area, Z_STRVAL_PP(current_field), Z_STRLEN_PP(current_field));
bit_area += Z_STRLEN_PP(current_field);
*bit_area++ = '\0';
zval_dtor(*current_field);
ZVAL_STRINGL(*current_field, (char *) start, bit_area - start - 1, copy_data);
}
} else {
ZVAL_STRINGL(*current_field, (char *)p, len, copy_data);
}
p += len;
last_field_was_string = TRUE;
}
}
if (copy_data == FALSE && last_field_was_string) {
/* Normal queries: The buffer has one more byte at the end, because we need it */
row_buffer->ptr[data_size] = '\0';
}
DBG_RETURN(PASS);
}
| 190,243,804,462,372,060,000,000,000,000,000,000,000 | None | null | [
"CWE-119"
] | CVE-2016-7412 | ext/mysqlnd/mysqlnd_wireprotocol.c in PHP before 5.6.26 and 7.x before 7.0.11 does not verify that a BIT field has the UNSIGNED_FLAG flag, which allows remote MySQL servers to cause a denial of service (heap-based buffer overflow) or possibly have unspecified other impact via crafted field metadata. | https://nvd.nist.gov/vuln/detail/CVE-2016-7412 |
1,744 | libarchive | 6e06b1c89dd0d16f74894eac4cfc1327a06ee4a0 | https://github.com/libarchive/libarchive | https://github.com/libarchive/libarchive/commit/6e06b1c89dd0d16f74894eac4cfc1327a06ee4a0 | Fix a potential crash issue discovered by Alexander Cherepanov:
It seems bsdtar automatically handles stacked compression. This is a
nice feature but it could be problematic when it's completely
unlimited. Most clearly it's illustrated with quines:
$ curl -sRO http://www.maximumcompression.com/selfgz.gz
$ (ulimit -v 10000000 && bsdtar -tvf selfgz.gz)
bsdtar: Error opening archive: Can't allocate data for gzip decompression
Without ulimit, bsdtar will eat all available memory. This could also
be a problem for other applications using libarchive. | 1 | choose_filters(struct archive_read *a)
{
int number_bidders, i, bid, best_bid;
struct archive_read_filter_bidder *bidder, *best_bidder;
struct archive_read_filter *filter;
ssize_t avail;
int r;
for (;;) {
number_bidders = sizeof(a->bidders) / sizeof(a->bidders[0]);
best_bid = 0;
best_bidder = NULL;
bidder = a->bidders;
for (i = 0; i < number_bidders; i++, bidder++) {
if (bidder->bid != NULL) {
bid = (bidder->bid)(bidder, a->filter);
if (bid > best_bid) {
best_bid = bid;
best_bidder = bidder;
}
}
}
/* If no bidder, we're done. */
if (best_bidder == NULL) {
/* Verify the filter by asking it for some data. */
__archive_read_filter_ahead(a->filter, 1, &avail);
if (avail < 0) {
__archive_read_close_filters(a);
__archive_read_free_filters(a);
return (ARCHIVE_FATAL);
}
a->archive.compression_name = a->filter->name;
a->archive.compression_code = a->filter->code;
return (ARCHIVE_OK);
}
filter
= (struct archive_read_filter *)calloc(1, sizeof(*filter));
if (filter == NULL)
return (ARCHIVE_FATAL);
filter->bidder = best_bidder;
filter->archive = a;
filter->upstream = a->filter;
a->filter = filter;
r = (best_bidder->init)(a->filter);
if (r != ARCHIVE_OK) {
__archive_read_close_filters(a);
__archive_read_free_filters(a);
return (ARCHIVE_FATAL);
}
}
}
| 187,470,736,725,719,080,000,000,000,000,000,000,000 | None | null | [
"CWE-399"
] | CVE-2016-7166 | libarchive before 3.2.0 does not limit the number of recursive decompressions, which allows remote attackers to cause a denial of service (memory consumption and application crash) via a crafted gzip file. | https://nvd.nist.gov/vuln/detail/CVE-2016-7166 |
1,745 | openjpeg | c16bc057ba3f125051c9966cf1f5b68a05681de4 | https://github.com/uclouvain/openjpeg | https://github.com/uclouvain/openjpeg/commit/c16bc057ba3f125051c9966cf1f5b68a05681de4 | Fix an integer overflow issue (#809)
Prevent an integer overflow issue in function opj_pi_create_decode of
pi.c. | 1 | opj_pi_iterator_t *opj_pi_create_decode(opj_image_t *p_image,
opj_cp_t *p_cp,
OPJ_UINT32 p_tile_no)
{
/* loop */
OPJ_UINT32 pino;
OPJ_UINT32 compno, resno;
/* to store w, h, dx and dy fro all components and resolutions */
OPJ_UINT32 * l_tmp_data;
OPJ_UINT32 ** l_tmp_ptr;
/* encoding prameters to set */
OPJ_UINT32 l_max_res;
OPJ_UINT32 l_max_prec;
OPJ_INT32 l_tx0,l_tx1,l_ty0,l_ty1;
OPJ_UINT32 l_dx_min,l_dy_min;
OPJ_UINT32 l_bound;
OPJ_UINT32 l_step_p , l_step_c , l_step_r , l_step_l ;
OPJ_UINT32 l_data_stride;
/* pointers */
opj_pi_iterator_t *l_pi = 00;
opj_tcp_t *l_tcp = 00;
const opj_tccp_t *l_tccp = 00;
opj_pi_comp_t *l_current_comp = 00;
opj_image_comp_t * l_img_comp = 00;
opj_pi_iterator_t * l_current_pi = 00;
OPJ_UINT32 * l_encoding_value_ptr = 00;
/* preconditions in debug */
assert(p_cp != 00);
assert(p_image != 00);
assert(p_tile_no < p_cp->tw * p_cp->th);
/* initializations */
l_tcp = &p_cp->tcps[p_tile_no];
l_bound = l_tcp->numpocs+1;
l_data_stride = 4 * OPJ_J2K_MAXRLVLS;
l_tmp_data = (OPJ_UINT32*)opj_malloc(
l_data_stride * p_image->numcomps * sizeof(OPJ_UINT32));
if
(! l_tmp_data)
{
return 00;
}
l_tmp_ptr = (OPJ_UINT32**)opj_malloc(
p_image->numcomps * sizeof(OPJ_UINT32 *));
if
(! l_tmp_ptr)
{
opj_free(l_tmp_data);
return 00;
}
/* memory allocation for pi */
l_pi = opj_pi_create(p_image, p_cp, p_tile_no);
if (!l_pi) {
opj_free(l_tmp_data);
opj_free(l_tmp_ptr);
return 00;
}
l_encoding_value_ptr = l_tmp_data;
/* update pointer array */
for
(compno = 0; compno < p_image->numcomps; ++compno)
{
l_tmp_ptr[compno] = l_encoding_value_ptr;
l_encoding_value_ptr += l_data_stride;
}
/* get encoding parameters */
opj_get_all_encoding_parameters(p_image,p_cp,p_tile_no,&l_tx0,&l_tx1,&l_ty0,&l_ty1,&l_dx_min,&l_dy_min,&l_max_prec,&l_max_res,l_tmp_ptr);
/* step calculations */
l_step_p = 1;
l_step_c = l_max_prec * l_step_p;
l_step_r = p_image->numcomps * l_step_c;
l_step_l = l_max_res * l_step_r;
/* set values for first packet iterator */
l_current_pi = l_pi;
/* memory allocation for include */
l_current_pi->include = (OPJ_INT16*) opj_calloc((l_tcp->numlayers +1) * l_step_l, sizeof(OPJ_INT16));
if
(!l_current_pi->include)
{
opj_free(l_tmp_data);
opj_free(l_tmp_ptr);
opj_pi_destroy(l_pi, l_bound);
return 00;
}
/* special treatment for the first packet iterator */
l_current_comp = l_current_pi->comps;
l_img_comp = p_image->comps;
l_tccp = l_tcp->tccps;
l_current_pi->tx0 = l_tx0;
l_current_pi->ty0 = l_ty0;
l_current_pi->tx1 = l_tx1;
l_current_pi->ty1 = l_ty1;
/*l_current_pi->dx = l_img_comp->dx;*/
/*l_current_pi->dy = l_img_comp->dy;*/
l_current_pi->step_p = l_step_p;
l_current_pi->step_c = l_step_c;
l_current_pi->step_r = l_step_r;
l_current_pi->step_l = l_step_l;
/* allocation for components and number of components has already been calculated by opj_pi_create */
for
(compno = 0; compno < l_current_pi->numcomps; ++compno)
{
opj_pi_resolution_t *l_res = l_current_comp->resolutions;
l_encoding_value_ptr = l_tmp_ptr[compno];
l_current_comp->dx = l_img_comp->dx;
l_current_comp->dy = l_img_comp->dy;
/* resolutions have already been initialized */
for
(resno = 0; resno < l_current_comp->numresolutions; resno++)
{
l_res->pdx = *(l_encoding_value_ptr++);
l_res->pdy = *(l_encoding_value_ptr++);
l_res->pw = *(l_encoding_value_ptr++);
l_res->ph = *(l_encoding_value_ptr++);
++l_res;
}
++l_current_comp;
++l_img_comp;
++l_tccp;
}
++l_current_pi;
for (pino = 1 ; pino<l_bound ; ++pino )
{
l_current_comp = l_current_pi->comps;
l_img_comp = p_image->comps;
l_tccp = l_tcp->tccps;
l_current_pi->tx0 = l_tx0;
l_current_pi->ty0 = l_ty0;
l_current_pi->tx1 = l_tx1;
l_current_pi->ty1 = l_ty1;
/*l_current_pi->dx = l_dx_min;*/
/*l_current_pi->dy = l_dy_min;*/
l_current_pi->step_p = l_step_p;
l_current_pi->step_c = l_step_c;
l_current_pi->step_r = l_step_r;
l_current_pi->step_l = l_step_l;
/* allocation for components and number of components has already been calculated by opj_pi_create */
for
(compno = 0; compno < l_current_pi->numcomps; ++compno)
{
opj_pi_resolution_t *l_res = l_current_comp->resolutions;
l_encoding_value_ptr = l_tmp_ptr[compno];
l_current_comp->dx = l_img_comp->dx;
l_current_comp->dy = l_img_comp->dy;
/* resolutions have already been initialized */
for
(resno = 0; resno < l_current_comp->numresolutions; resno++)
{
l_res->pdx = *(l_encoding_value_ptr++);
l_res->pdy = *(l_encoding_value_ptr++);
l_res->pw = *(l_encoding_value_ptr++);
l_res->ph = *(l_encoding_value_ptr++);
++l_res;
}
++l_current_comp;
++l_img_comp;
++l_tccp;
}
/* special treatment*/
l_current_pi->include = (l_current_pi-1)->include;
++l_current_pi;
}
opj_free(l_tmp_data);
l_tmp_data = 00;
opj_free(l_tmp_ptr);
l_tmp_ptr = 00;
if
(l_tcp->POC)
{
opj_pi_update_decode_poc (l_pi,l_tcp,l_max_prec,l_max_res);
}
else
{
opj_pi_update_decode_not_poc(l_pi,l_tcp,l_max_prec,l_max_res);
}
return l_pi;
}
| 331,884,484,169,803,600,000,000,000,000,000,000,000 | pi.c | 85,117,321,979,618,870,000,000,000,000,000,000,000 | [
"CWE-125"
] | CVE-2016-7163 | Integer overflow in the opj_pi_create_decode function in pi.c in OpenJPEG allows remote attackers to execute arbitrary code via a crafted JP2 file, which triggers an out-of-bounds read or write. | https://nvd.nist.gov/vuln/detail/CVE-2016-7163 |
1,746 | curl | curl-7_50_2~32 | https://github.com/curl/curl | https://github.com/curl/curl/commit/curl-7_50_2~32 | nss: refuse previously loaded certificate from file
... when we are not asked to use a certificate from file | 1 | static SECStatus SelectClientCert(void *arg, PRFileDesc *sock,
struct CERTDistNamesStr *caNames,
struct CERTCertificateStr **pRetCert,
struct SECKEYPrivateKeyStr **pRetKey)
{
struct ssl_connect_data *connssl = (struct ssl_connect_data *)arg;
struct Curl_easy *data = connssl->data;
const char *nickname = connssl->client_nickname;
if(connssl->obj_clicert) {
/* use the cert/key provided by PEM reader */
static const char pem_slotname[] = "PEM Token #1";
SECItem cert_der = { 0, NULL, 0 };
void *proto_win = SSL_RevealPinArg(sock);
struct CERTCertificateStr *cert;
struct SECKEYPrivateKeyStr *key;
PK11SlotInfo *slot = PK11_FindSlotByName(pem_slotname);
if(NULL == slot) {
failf(data, "NSS: PK11 slot not found: %s", pem_slotname);
return SECFailure;
}
if(PK11_ReadRawAttribute(PK11_TypeGeneric, connssl->obj_clicert, CKA_VALUE,
&cert_der) != SECSuccess) {
failf(data, "NSS: CKA_VALUE not found in PK11 generic object");
PK11_FreeSlot(slot);
return SECFailure;
}
cert = PK11_FindCertFromDERCertItem(slot, &cert_der, proto_win);
SECITEM_FreeItem(&cert_der, PR_FALSE);
if(NULL == cert) {
failf(data, "NSS: client certificate from file not found");
PK11_FreeSlot(slot);
return SECFailure;
}
key = PK11_FindPrivateKeyFromCert(slot, cert, NULL);
PK11_FreeSlot(slot);
if(NULL == key) {
failf(data, "NSS: private key from file not found");
CERT_DestroyCertificate(cert);
return SECFailure;
}
infof(data, "NSS: client certificate from file\n");
display_cert_info(data, cert);
*pRetCert = cert;
*pRetKey = key;
return SECSuccess;
}
/* use the default NSS hook */
if(SECSuccess != NSS_GetClientAuthData((void *)nickname, sock, caNames,
pRetCert, pRetKey)
|| NULL == *pRetCert) {
if(NULL == nickname)
failf(data, "NSS: client certificate not found (nickname not "
"specified)");
else
failf(data, "NSS: client certificate not found: %s", nickname);
return SECFailure;
}
/* get certificate nickname if any */
nickname = (*pRetCert)->nickname;
if(NULL == nickname)
nickname = "[unknown]";
if(NULL == *pRetKey) {
failf(data, "NSS: private key not found for certificate: %s", nickname);
return SECFailure;
}
infof(data, "NSS: using client certificate: %s\n", nickname);
display_cert_info(data, *pRetCert);
return SECSuccess;
}
| 183,626,236,930,701,920,000,000,000,000,000,000,000 | nss.c | 12,471,031,817,769,896,000,000,000,000,000,000,000 | [
"CWE-287"
] | CVE-2016-7141 | curl and libcurl before 7.50.2, when built with NSS and the libnsspem.so library is available at runtime, allow remote attackers to hijack the authentication of a TLS connection by leveraging reuse of a previously loaded client certificate from file for a connection for which no certificate has been set, a different vulnerability than CVE-2016-5420. | https://nvd.nist.gov/vuln/detail/CVE-2016-7141 |
1,750 | php-src | 698a691724c0a949295991e5df091ce16f899e02 | https://github.com/php/php-src | https://github.com/php/php-src/commit/698a691724c0a949295991e5df091ce16f899e02?w=1 | Fix bug #72750: wddx_deserialize null dereference | 1 | static void php_wddx_pop_element(void *user_data, const XML_Char *name)
{
st_entry *ent1, *ent2;
wddx_stack *stack = (wddx_stack *)user_data;
HashTable *target_hash;
zend_class_entry **pce;
zval *obj;
zval *tmp;
TSRMLS_FETCH();
/* OBJECTS_FIXME */
if (stack->top == 0) {
return;
}
if (!strcmp(name, EL_STRING) || !strcmp(name, EL_NUMBER) ||
!strcmp(name, EL_BOOLEAN) || !strcmp(name, EL_NULL) ||
!strcmp(name, EL_ARRAY) || !strcmp(name, EL_STRUCT) ||
!strcmp(name, EL_RECORDSET) || !strcmp(name, EL_BINARY) ||
!strcmp(name, EL_DATETIME)) {
wddx_stack_top(stack, (void**)&ent1);
if (!ent1->data) {
if (stack->top > 1) {
stack->top--;
} else {
stack->done = 1;
}
efree(ent1);
return;
}
if (!strcmp(name, EL_BINARY)) {
int new_len=0;
unsigned char *new_str;
new_str = php_base64_decode(Z_STRVAL_P(ent1->data), Z_STRLEN_P(ent1->data), &new_len);
STR_FREE(Z_STRVAL_P(ent1->data));
Z_STRVAL_P(ent1->data) = new_str;
Z_STRLEN_P(ent1->data) = new_len;
}
/* Call __wakeup() method on the object. */
if (Z_TYPE_P(ent1->data) == IS_OBJECT) {
zval *fname, *retval = NULL;
MAKE_STD_ZVAL(fname);
ZVAL_STRING(fname, "__wakeup", 1);
call_user_function_ex(NULL, &ent1->data, fname, &retval, 0, 0, 0, NULL TSRMLS_CC);
zval_dtor(fname);
FREE_ZVAL(fname);
if (retval) {
zval_ptr_dtor(&retval);
}
}
if (stack->top > 1) {
stack->top--;
wddx_stack_top(stack, (void**)&ent2);
/* if non-existent field */
if (ent2->type == ST_FIELD && ent2->data == NULL) {
zval_ptr_dtor(&ent1->data);
efree(ent1);
return;
}
if (Z_TYPE_P(ent2->data) == IS_ARRAY || Z_TYPE_P(ent2->data) == IS_OBJECT) {
target_hash = HASH_OF(ent2->data);
if (ent1->varname) {
if (!strcmp(ent1->varname, PHP_CLASS_NAME_VAR) &&
Z_TYPE_P(ent1->data) == IS_STRING && Z_STRLEN_P(ent1->data) &&
ent2->type == ST_STRUCT && Z_TYPE_P(ent2->data) == IS_ARRAY) {
zend_bool incomplete_class = 0;
zend_str_tolower(Z_STRVAL_P(ent1->data), Z_STRLEN_P(ent1->data));
if (zend_hash_find(EG(class_table), Z_STRVAL_P(ent1->data),
Z_STRLEN_P(ent1->data)+1, (void **) &pce)==FAILURE) {
incomplete_class = 1;
pce = &PHP_IC_ENTRY;
}
/* Initialize target object */
MAKE_STD_ZVAL(obj);
object_init_ex(obj, *pce);
/* Merge current hashtable with object's default properties */
zend_hash_merge(Z_OBJPROP_P(obj),
Z_ARRVAL_P(ent2->data),
(void (*)(void *)) zval_add_ref,
(void *) &tmp, sizeof(zval *), 0);
if (incomplete_class) {
php_store_class_name(obj, Z_STRVAL_P(ent1->data), Z_STRLEN_P(ent1->data));
}
/* Clean up old array entry */
zval_ptr_dtor(&ent2->data);
/* Set stack entry to point to the newly created object */
ent2->data = obj;
/* Clean up class name var entry */
zval_ptr_dtor(&ent1->data);
} else if (Z_TYPE_P(ent2->data) == IS_OBJECT) {
zend_class_entry *old_scope = EG(scope);
EG(scope) = Z_OBJCE_P(ent2->data);
Z_DELREF_P(ent1->data);
add_property_zval(ent2->data, ent1->varname, ent1->data);
EG(scope) = old_scope;
} else {
zend_symtable_update(target_hash, ent1->varname, strlen(ent1->varname)+1, &ent1->data, sizeof(zval *), NULL);
}
efree(ent1->varname);
} else {
zend_hash_next_index_insert(target_hash, &ent1->data, sizeof(zval *), NULL);
}
}
efree(ent1);
} else {
stack->done = 1;
}
} else if (!strcmp(name, EL_VAR) && stack->varname) {
efree(stack->varname);
stack->varname = NULL;
} else if (!strcmp(name, EL_FIELD)) {
st_entry *ent;
wddx_stack_top(stack, (void **)&ent);
efree(ent);
stack->top--;
}
}
| 289,440,307,254,825,080,000,000,000,000,000,000,000 | None | null | [
"CWE-476"
] | CVE-2016-7130 | The php_wddx_pop_element function in ext/wddx/wddx.c in PHP before 5.6.25 and 7.x before 7.0.10 allows remote attackers to cause a denial of service (NULL pointer dereference and application crash) or possibly have unspecified other impact via an invalid base64 binary value, as demonstrated by a wddx_deserialize call that mishandles a binary element in a wddxPacket XML document. | https://nvd.nist.gov/vuln/detail/CVE-2016-7130 |
1,751 | php-src | 426aeb2808955ee3d3f52e0cfb102834cdb836a5 | https://github.com/php/php-src | https://github.com/php/php-src/commit/426aeb2808955ee3d3f52e0cfb102834cdb836a5?w=1 | Fix bug #72749: wddx_deserialize allows illegal memory access | 1 | static void php_wddx_process_data(void *user_data, const XML_Char *s, int len)
{
st_entry *ent;
wddx_stack *stack = (wddx_stack *)user_data;
TSRMLS_FETCH();
if (!wddx_stack_is_empty(stack) && !stack->done) {
wddx_stack_top(stack, (void**)&ent);
switch (ent->type) {
case ST_STRING:
if (Z_STRLEN_P(ent->data) == 0) {
STR_FREE(Z_STRVAL_P(ent->data));
Z_STRVAL_P(ent->data) = estrndup(s, len);
Z_STRLEN_P(ent->data) = len;
} else {
Z_STRVAL_P(ent->data) = erealloc(Z_STRVAL_P(ent->data), Z_STRLEN_P(ent->data) + len + 1);
memcpy(Z_STRVAL_P(ent->data) + Z_STRLEN_P(ent->data), s, len);
Z_STRLEN_P(ent->data) += len;
Z_STRVAL_P(ent->data)[Z_STRLEN_P(ent->data)] = '\0';
}
break;
case ST_BINARY:
if (Z_STRLEN_P(ent->data) == 0) {
STR_FREE(Z_STRVAL_P(ent->data));
Z_STRVAL_P(ent->data) = estrndup(s, len + 1);
} else {
Z_STRVAL_P(ent->data) = erealloc(Z_STRVAL_P(ent->data), Z_STRLEN_P(ent->data) + len + 1);
memcpy(Z_STRVAL_P(ent->data) + Z_STRLEN_P(ent->data), s, len);
}
Z_STRLEN_P(ent->data) += len;
Z_STRVAL_P(ent->data)[Z_STRLEN_P(ent->data)] = '\0';
break;
case ST_NUMBER:
Z_TYPE_P(ent->data) = IS_STRING;
Z_STRLEN_P(ent->data) = len;
Z_STRVAL_P(ent->data) = estrndup(s, len);
convert_scalar_to_number(ent->data TSRMLS_CC);
break;
case ST_BOOLEAN:
if(!ent->data) {
break;
}
if (!strcmp(s, "true")) {
Z_LVAL_P(ent->data) = 1;
} else if (!strcmp(s, "false")) {
Z_LVAL_P(ent->data) = 0;
} else {
zval_ptr_dtor(&ent->data);
if (ent->varname) {
efree(ent->varname);
ent->varname = NULL;
}
ent->data = NULL;
}
break;
case ST_DATETIME: {
char *tmp;
tmp = emalloc(len + 1);
memcpy(tmp, s, len);
tmp[len] = '\0';
Z_LVAL_P(ent->data) = php_parse_date(tmp, NULL);
/* date out of range < 1969 or > 2038 */
if (Z_LVAL_P(ent->data) == -1) {
Z_TYPE_P(ent->data) = IS_STRING;
Z_STRLEN_P(ent->data) = len;
Z_STRVAL_P(ent->data) = estrndup(s, len);
}
efree(tmp);
}
break;
default:
break;
}
}
}
| 189,109,834,750,109,700,000,000,000,000,000,000,000 | None | null | [
"CWE-20"
] | CVE-2016-7129 | The php_wddx_process_data function in ext/wddx/wddx.c in PHP before 5.6.25 and 7.x before 7.0.10 allows remote attackers to cause a denial of service (segmentation fault) or possibly have unspecified other impact via an invalid ISO 8601 time value, as demonstrated by a wddx_deserialize call that mishandles a dateTime element in a wddxPacket XML document. | https://nvd.nist.gov/vuln/detail/CVE-2016-7129 |
1,752 | php-src | 1bd103df00f49cf4d4ade2cfe3f456ac058a4eae | https://github.com/php/php-src | https://github.com/php/php-src/commit/1bd103df00f49cf4d4ade2cfe3f456ac058a4eae?w=1 | Fix bug #72730 - imagegammacorrect allows arbitrary write access | 1 | PHP_FUNCTION(imagegammacorrect)
{
zval *IM;
gdImagePtr im;
int i;
double input, output;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rdd", &IM, &input, &output) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
if (gdImageTrueColor(im)) {
int x, y, c;
for (y = 0; y < gdImageSY(im); y++) {
for (x = 0; x < gdImageSX(im); x++) {
c = gdImageGetPixel(im, x, y);
gdImageSetPixel(im, x, y,
gdTrueColorAlpha(
(int) ((pow((pow((gdTrueColorGetRed(c) / 255.0), input)), 1.0 / output) * 255) + .5),
(int) ((pow((pow((gdTrueColorGetGreen(c) / 255.0), input)), 1.0 / output) * 255) + .5),
(int) ((pow((pow((gdTrueColorGetBlue(c) / 255.0), input)), 1.0 / output) * 255) + .5),
gdTrueColorGetAlpha(c)
)
);
}
}
RETURN_TRUE;
}
for (i = 0; i < gdImageColorsTotal(im); i++) {
im->red[i] = (int)((pow((pow((im->red[i] / 255.0), input)), 1.0 / output) * 255) + .5);
im->green[i] = (int)((pow((pow((im->green[i] / 255.0), input)), 1.0 / output) * 255) + .5);
im->blue[i] = (int)((pow((pow((im->blue[i] / 255.0), input)), 1.0 / output) * 255) + .5);
}
RETURN_TRUE;
}
| 139,742,999,262,680,890,000,000,000,000,000,000,000 | gd.c | 187,233,488,818,011,500,000,000,000,000,000,000,000 | [
"CWE-787"
] | CVE-2016-7127 | The imagegammacorrect function in ext/gd/gd.c in PHP before 5.6.25 and 7.x before 7.0.10 does not properly validate gamma values, which allows remote attackers to cause a denial of service (out-of-bounds write) or possibly have unspecified other impact by providing different signs for the second and third arguments. | https://nvd.nist.gov/vuln/detail/CVE-2016-7127 |
1,753 | php-src | b6f13a5ef9d6280cf984826a5de012a32c396cd4 | https://github.com/php/php-src | https://github.com/php/php-src/commit/b6f13a5ef9d6280cf984826a5de012a32c396cd4?w=1 | None | 1 | PHP_FUNCTION(imagetruecolortopalette)
{
zval *IM;
zend_bool dither;
long ncolors;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rbl", &IM, &dither, &ncolors) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
if (ncolors <= 0) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Number of colors has to be greater than zero");
RETURN_FALSE;
}
gdImageTrueColorToPalette(im, dither, ncolors);
RETURN_TRUE;
}
| 181,878,688,249,174,300,000,000,000,000,000,000,000 | None | null | [
"CWE-787"
] | CVE-2016-7126 | The imagetruecolortopalette function in ext/gd/gd.c in PHP before 5.6.25 and 7.x before 7.0.10 does not properly validate the number of colors, which allows remote attackers to cause a denial of service (select_colors allocation error and out-of-bounds write) or possibly have unspecified other impact via a large value in the third argument. | https://nvd.nist.gov/vuln/detail/CVE-2016-7126 |
1,758 | linux | 34b88a68f26a75e4fded796f1a49c40f82234b7d | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/34b88a68f26a75e4fded796f1a49c40f82234b7d | net: Fix use after free in the recvmmsg exit path
The syzkaller fuzzer hit the following use-after-free:
Call Trace:
[<ffffffff8175ea0e>] __asan_report_load8_noabort+0x3e/0x40 mm/kasan/report.c:295
[<ffffffff851cc31a>] __sys_recvmmsg+0x6fa/0x7f0 net/socket.c:2261
[< inline >] SYSC_recvmmsg net/socket.c:2281
[<ffffffff851cc57f>] SyS_recvmmsg+0x16f/0x180 net/socket.c:2270
[<ffffffff86332bb6>] entry_SYSCALL_64_fastpath+0x16/0x7a
arch/x86/entry/entry_64.S:185
And, as Dmitry rightly assessed, that is because we can drop the
reference and then touch it when the underlying recvmsg calls return
some packets and then hit an error, which will make recvmmsg to set
sock->sk->sk_err, oops, fix it.
Reported-and-Tested-by: Dmitry Vyukov <dvyukov@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Kostya Serebryany <kcc@google.com>
Cc: Sasha Levin <sasha.levin@oracle.com>
Fixes: a2e2725541fa ("net: Introduce recvmmsg socket syscall")
http://lkml.kernel.org/r/20160122211644.GC2470@redhat.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
unsigned int flags, struct timespec *timeout)
{
int fput_needed, err, datagrams;
struct socket *sock;
struct mmsghdr __user *entry;
struct compat_mmsghdr __user *compat_entry;
struct msghdr msg_sys;
struct timespec end_time;
if (timeout &&
poll_select_set_timeout(&end_time, timeout->tv_sec,
timeout->tv_nsec))
return -EINVAL;
datagrams = 0;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
return err;
err = sock_error(sock->sk);
if (err)
goto out_put;
entry = mmsg;
compat_entry = (struct compat_mmsghdr __user *)mmsg;
while (datagrams < vlen) {
/*
* No need to ask LSM for more than the first datagram.
*/
if (MSG_CMSG_COMPAT & flags) {
err = ___sys_recvmsg(sock, (struct user_msghdr __user *)compat_entry,
&msg_sys, flags & ~MSG_WAITFORONE,
datagrams);
if (err < 0)
break;
err = __put_user(err, &compat_entry->msg_len);
++compat_entry;
} else {
err = ___sys_recvmsg(sock,
(struct user_msghdr __user *)entry,
&msg_sys, flags & ~MSG_WAITFORONE,
datagrams);
if (err < 0)
break;
err = put_user(err, &entry->msg_len);
++entry;
}
if (err)
break;
++datagrams;
/* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
if (flags & MSG_WAITFORONE)
flags |= MSG_DONTWAIT;
if (timeout) {
ktime_get_ts(timeout);
*timeout = timespec_sub(end_time, *timeout);
if (timeout->tv_sec < 0) {
timeout->tv_sec = timeout->tv_nsec = 0;
break;
}
/* Timeout, return less than vlen datagrams */
if (timeout->tv_nsec == 0 && timeout->tv_sec == 0)
break;
}
/* Out of band data, return right away */
if (msg_sys.msg_flags & MSG_OOB)
break;
cond_resched();
}
out_put:
fput_light(sock->file, fput_needed);
if (err == 0)
return datagrams;
if (datagrams != 0) {
/*
* We may return less entries than requested (vlen) if the
* sock is non block and there aren't enough datagrams...
*/
if (err != -EAGAIN) {
/*
* ... or if recvmsg returns an error after we
* received some datagrams, where we record the
* error to return on the next call or if the
* app asks about it using getsockopt(SO_ERROR).
*/
sock->sk->sk_err = -err;
}
return datagrams;
}
return err;
}
| 32,465,179,856,289,110,000,000,000,000,000,000,000 | socket.c | 323,406,668,974,979,600,000,000,000,000,000,000,000 | [
"CWE-19"
] | CVE-2016-7117 | Use-after-free vulnerability in the __sys_recvmmsg function in net/socket.c in the Linux kernel before 4.5.2 allows remote attackers to execute arbitrary code via vectors involving a recvmmsg system call that is mishandled during error processing. | https://nvd.nist.gov/vuln/detail/CVE-2016-7117 |
1,759 | MAC-Telnet | b69d11727d4f0f8cf719c79e3fb700f55ca03e9a | https://github.com/haakonnessjoen/MAC-Telnet | https://github.com/haakonnessjoen/MAC-Telnet/commit/b69d11727d4f0f8cf719c79e3fb700f55ca03e9a | Merge pull request #20 from eyalitki/master
2nd round security fixes from eyalitki | 1 | static int handle_packet(unsigned char *data, int data_len) {
struct mt_mactelnet_hdr pkthdr;
/* Minimal size checks (pings are not supported here) */
if (data_len < MT_HEADER_LEN){
return -1;
}
parse_packet(data, &pkthdr);
/* We only care about packets with correct sessionkey */
if (pkthdr.seskey != sessionkey) {
return -1;
}
/* Handle data packets */
if (pkthdr.ptype == MT_PTYPE_DATA) {
struct mt_packet odata;
struct mt_mactelnet_control_hdr cpkt;
int success = 0;
/* Always transmit ACKNOWLEDGE packets in response to DATA packets */
init_packet(&odata, MT_PTYPE_ACK, srcmac, dstmac, sessionkey, pkthdr.counter + (data_len - MT_HEADER_LEN));
send_udp(&odata, 0);
/* Accept first packet, and all packets greater than incounter, and if counter has
wrapped around. */
if (pkthdr.counter > incounter || (incounter - pkthdr.counter) > 65535) {
incounter = pkthdr.counter;
} else {
/* Ignore double or old packets */
return -1;
}
/* Parse controlpacket data */
success = parse_control_packet(data + MT_HEADER_LEN, data_len - MT_HEADER_LEN, &cpkt);
while (success) {
/* If we receive pass_salt, transmit auth data back */
if (cpkt.cptype == MT_CPTYPE_PASSSALT) {
memcpy(pass_salt, cpkt.data, cpkt.length);
send_auth(username, password);
}
/* If the (remaining) data did not have a control-packet magic byte sequence,
the data is raw terminal data to be outputted to the terminal. */
else if (cpkt.cptype == MT_CPTYPE_PLAINDATA) {
fwrite((const void *)cpkt.data, 1, cpkt.length, stdout);
}
/* END_AUTH means that the user/password negotiation is done, and after this point
terminal data may arrive, so we set up the terminal to raw mode. */
else if (cpkt.cptype == MT_CPTYPE_END_AUTH) {
/* we have entered "terminal mode" */
terminal_mode = 1;
if (is_a_tty) {
/* stop input buffering at all levels. Give full control of terminal to RouterOS */
raw_term();
setvbuf(stdin, (char*)NULL, _IONBF, 0);
/* Add resize signal handler */
signal(SIGWINCH, sig_winch);
}
}
/* Parse next controlpacket */
success = parse_control_packet(NULL, 0, &cpkt);
}
}
else if (pkthdr.ptype == MT_PTYPE_ACK) {
/* Handled elsewhere */
}
/* The server wants to terminate the connection, we have to oblige */
else if (pkthdr.ptype == MT_PTYPE_END) {
struct mt_packet odata;
/* Acknowledge the disconnection by sending a END packet in return */
init_packet(&odata, MT_PTYPE_END, srcmac, dstmac, pkthdr.seskey, 0);
send_udp(&odata, 0);
if (!quiet_mode) {
fprintf(stderr, _("Connection closed.\n"));
}
/* exit */
running = 0;
} else {
fprintf(stderr, _("Unhandeled packet type: %d received from server %s\n"), pkthdr.ptype, ether_ntoa((struct ether_addr *)dstmac));
return -1;
}
return pkthdr.ptype;
}
| 321,429,272,328,439,870,000,000,000,000,000,000,000 | None | null | [
"CWE-119"
] | CVE-2016-7115 | Buffer overflow in the handle_packet function in mactelnet.c in the client in MAC-Telnet 0.4.3 and earlier allows remote TELNET servers to execute arbitrary code via a long string in an MT_CPTYPE_PASSSALT control packet. | https://nvd.nist.gov/vuln/detail/CVE-2016-7115 |
1,790 | linux | 10eec60ce79187686e052092e5383c99b4420a20 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/10eec60ce79187686e052092e5383c99b4420a20 | vfs: ioctl: prevent double-fetch in dedupe ioctl
This prevents a double-fetch from user space that can lead to to an
undersized allocation and heap overflow.
Fixes: 54dbc1517237 ("vfs: hoist the btrfs deduplication ioctl to the vfs")
Signed-off-by: Scott Bauer <sbauer@plzdonthack.me>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> | 1 | static long ioctl_file_dedupe_range(struct file *file, void __user *arg)
{
struct file_dedupe_range __user *argp = arg;
struct file_dedupe_range *same = NULL;
int ret;
unsigned long size;
u16 count;
if (get_user(count, &argp->dest_count)) {
ret = -EFAULT;
goto out;
}
size = offsetof(struct file_dedupe_range __user, info[count]);
same = memdup_user(argp, size);
if (IS_ERR(same)) {
ret = PTR_ERR(same);
same = NULL;
goto out;
}
ret = vfs_dedupe_file_range(file, same);
if (ret)
goto out;
ret = copy_to_user(argp, same, size);
if (ret)
ret = -EFAULT;
out:
kfree(same);
return ret;
}
| 177,427,685,205,348,800,000,000,000,000,000,000,000 | ioctl.c | 238,069,660,354,633,780,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2016-6516 | Race condition in the ioctl_file_dedupe_range function in fs/ioctl.c in the Linux kernel through 4.7 allows local users to cause a denial of service (heap-based buffer overflow) or possibly gain privileges by changing a certain count value, aka a "double fetch" vulnerability. | https://nvd.nist.gov/vuln/detail/CVE-2016-6516 |
1,791 | openssh-portable | fcd135c9df440bcd2d5870405ad3311743d78d97 | https://github.com/openssh/openssh-portable | https://github.com/openssh/openssh-portable/commit/fcd135c9df440bcd2d5870405ad3311743d78d97 | upstream commit
Skip passwords longer than 1k in length so clients can't
easily DoS sshd by sending very long passwords, causing it to spend CPU
hashing them. feedback djm@, ok markus@.
Brought to our attention by tomas.kuthan at oracle.com, shilei-c at
360.cn and coredump at autistici.org
Upstream-ID: d0af7d4a2190b63ba1d38eec502bc4be0be9e333 | 1 | auth_password(Authctxt *authctxt, const char *password)
{
struct passwd * pw = authctxt->pw;
int result, ok = authctxt->valid;
#if defined(USE_SHADOW) && defined(HAS_SHADOW_EXPIRE)
static int expire_checked = 0;
#endif
#ifndef HAVE_CYGWIN
if (pw->pw_uid == 0 && options.permit_root_login != PERMIT_YES)
ok = 0;
#endif
if (*password == '\0' && options.permit_empty_passwd == 0)
return 0;
#ifdef KRB5
if (options.kerberos_authentication == 1) {
int ret = auth_krb5_password(authctxt, password);
if (ret == 1 || ret == 0)
return ret && ok;
/* Fall back to ordinary passwd authentication. */
}
#endif
#ifdef HAVE_CYGWIN
{
HANDLE hToken = cygwin_logon_user(pw, password);
if (hToken == INVALID_HANDLE_VALUE)
return 0;
cygwin_set_impersonation_token(hToken);
return ok;
}
#endif
#ifdef USE_PAM
if (options.use_pam)
return (sshpam_auth_passwd(authctxt, password) && ok);
#endif
#if defined(USE_SHADOW) && defined(HAS_SHADOW_EXPIRE)
if (!expire_checked) {
expire_checked = 1;
if (auth_shadow_pwexpired(authctxt))
authctxt->force_pwchange = 1;
}
#endif
result = sys_auth_passwd(authctxt, password);
if (authctxt->force_pwchange)
disable_forwarding();
return (result && ok);
}
| 319,587,561,961,095,600,000,000,000,000,000,000,000 | None | null | [
"CWE-20"
] | CVE-2016-6515 | The auth_password function in auth-passwd.c in sshd in OpenSSH before 7.3 does not limit password lengths for password authentication, which allows remote attackers to cause a denial of service (crypt CPU consumption) via a long string. | https://nvd.nist.gov/vuln/detail/CVE-2016-6515 |
1,792 | ImageMagick | dd84447b63a71fa8c3f47071b09454efc667767b | https://github.com/ImageMagick/ImageMagick | https://github.com/ImageMagick/ImageMagick/commit/dd84447b63a71fa8c3f47071b09454efc667767b | Prevent buffer overflow (bug report from Ibrahim el-sayed) | 1 | static MagickBooleanType Get8BIMProperty(const Image *image,const char *key,
ExceptionInfo *exception)
{
char
*attribute,
format[MagickPathExtent],
name[MagickPathExtent],
*resource;
const StringInfo
*profile;
const unsigned char
*info;
long
start,
stop;
MagickBooleanType
status;
register ssize_t
i;
size_t
length;
ssize_t
count,
id,
sub_number;
/*
There are no newlines in path names, so it's safe as terminator.
*/
profile=GetImageProfile(image,"8bim");
if (profile == (StringInfo *) NULL)
return(MagickFalse);
count=(ssize_t) sscanf(key,"8BIM:%ld,%ld:%1024[^\n]\n%1024[^\n]",&start,&stop,
name,format);
if ((count != 2) && (count != 3) && (count != 4))
return(MagickFalse);
if (count < 4)
(void) CopyMagickString(format,"SVG",MagickPathExtent);
if (count < 3)
*name='\0';
sub_number=1;
if (*name == '#')
sub_number=(ssize_t) StringToLong(&name[1]);
sub_number=MagickMax(sub_number,1L);
resource=(char *) NULL;
status=MagickFalse;
length=GetStringInfoLength(profile);
info=GetStringInfoDatum(profile);
while ((length > 0) && (status == MagickFalse))
{
if (ReadPropertyByte(&info,&length) != (unsigned char) '8')
continue;
if (ReadPropertyByte(&info,&length) != (unsigned char) 'B')
continue;
if (ReadPropertyByte(&info,&length) != (unsigned char) 'I')
continue;
if (ReadPropertyByte(&info,&length) != (unsigned char) 'M')
continue;
id=(ssize_t) ReadPropertyMSBShort(&info,&length);
if (id < (ssize_t) start)
continue;
if (id > (ssize_t) stop)
continue;
if (resource != (char *) NULL)
resource=DestroyString(resource);
count=(ssize_t) ReadPropertyByte(&info,&length);
if ((count != 0) && ((size_t) count <= length))
{
resource=(char *) NULL;
if (~((size_t) count) >= (MagickPathExtent-1))
resource=(char *) AcquireQuantumMemory((size_t) count+
MagickPathExtent,sizeof(*resource));
if (resource != (char *) NULL)
{
for (i=0; i < (ssize_t) count; i++)
resource[i]=(char) ReadPropertyByte(&info,&length);
resource[count]='\0';
}
}
if ((count & 0x01) == 0)
(void) ReadPropertyByte(&info,&length);
count=(ssize_t) ReadPropertyMSBLong(&info,&length);
if ((*name != '\0') && (*name != '#'))
if ((resource == (char *) NULL) || (LocaleCompare(name,resource) != 0))
{
/*
No name match, scroll forward and try next.
*/
info+=count;
length-=MagickMin(count,(ssize_t) length);
continue;
}
if ((*name == '#') && (sub_number != 1))
{
/*
No numbered match, scroll forward and try next.
*/
sub_number--;
info+=count;
length-=MagickMin(count,(ssize_t) length);
continue;
}
/*
We have the resource of interest.
*/
attribute=(char *) NULL;
if (~((size_t) count) >= (MagickPathExtent-1))
attribute=(char *) AcquireQuantumMemory((size_t) count+MagickPathExtent,
sizeof(*attribute));
if (attribute != (char *) NULL)
{
(void) CopyMagickMemory(attribute,(char *) info,(size_t) count);
attribute[count]='\0';
info+=count;
length-=MagickMin(count,(ssize_t) length);
if ((id <= 1999) || (id >= 2999))
(void) SetImageProperty((Image *) image,key,(const char *)
attribute,exception);
else
{
char
*path;
if (LocaleCompare(format,"svg") == 0)
path=TraceSVGClippath((unsigned char *) attribute,(size_t) count,
image->columns,image->rows);
else
path=TracePSClippath((unsigned char *) attribute,(size_t) count);
(void) SetImageProperty((Image *) image,key,(const char *) path,
exception);
path=DestroyString(path);
}
attribute=DestroyString(attribute);
status=MagickTrue;
}
}
if (resource != (char *) NULL)
resource=DestroyString(resource);
return(status);
}
| 80,477,753,582,958,770,000,000,000,000,000,000,000 | property.c | 46,131,985,438,510,870,000,000,000,000,000,000,000 | [
"CWE-125"
] | CVE-2016-6491 | Buffer overflow in the Get8BIMProperty function in MagickCore/property.c in ImageMagick before 6.9.5-4 and 7.x before 7.0.2-6 allows remote attackers to cause a denial of service (out-of-bounds read, memory leak, and crash) via a crafted image. | https://nvd.nist.gov/vuln/detail/CVE-2016-6491 |
1,795 | collectd | b589096f907052b3a4da2b9ccc9b0e2e888dfc18 | https://github.com/collectd/collectd | https://github.com/collectd/collectd/commit/b589096f907052b3a4da2b9ccc9b0e2e888dfc18 | network plugin: Fix heap overflow in parse_packet().
Emilien Gaspar has identified a heap overflow in parse_packet(), the
function used by the network plugin to parse incoming network packets.
This is a vulnerability in collectd, though the scope is not clear at
this point. At the very least specially crafted network packets can be
used to crash the daemon. We can't rule out a potential remote code
execution though.
Fixes: CVE-2016-6254 | 1 | static int parse_packet (sockent_t *se, /* {{{ */
void *buffer, size_t buffer_size, int flags,
const char *username)
{
int status;
value_list_t vl = VALUE_LIST_INIT;
notification_t n;
#if HAVE_LIBGCRYPT
int packet_was_signed = (flags & PP_SIGNED);
int packet_was_encrypted = (flags & PP_ENCRYPTED);
int printed_ignore_warning = 0;
#endif /* HAVE_LIBGCRYPT */
memset (&vl, '\0', sizeof (vl));
memset (&n, '\0', sizeof (n));
status = 0;
while ((status == 0) && (0 < buffer_size)
&& ((unsigned int) buffer_size > sizeof (part_header_t)))
{
uint16_t pkg_length;
uint16_t pkg_type;
memcpy ((void *) &pkg_type,
(void *) buffer,
sizeof (pkg_type));
memcpy ((void *) &pkg_length,
(void *) (buffer + sizeof (pkg_type)),
sizeof (pkg_length));
pkg_length = ntohs (pkg_length);
pkg_type = ntohs (pkg_type);
if (pkg_length > buffer_size)
break;
/* Ensure that this loop terminates eventually */
if (pkg_length < (2 * sizeof (uint16_t)))
break;
if (pkg_type == TYPE_ENCR_AES256)
{
status = parse_part_encr_aes256 (se,
&buffer, &buffer_size, flags);
if (status != 0)
{
ERROR ("network plugin: Decrypting AES256 "
"part failed "
"with status %i.", status);
break;
}
}
#if HAVE_LIBGCRYPT
else if ((se->data.server.security_level == SECURITY_LEVEL_ENCRYPT)
&& (packet_was_encrypted == 0))
{
if (printed_ignore_warning == 0)
{
INFO ("network plugin: Unencrypted packet or "
"part has been ignored.");
printed_ignore_warning = 1;
}
buffer = ((char *) buffer) + pkg_length;
continue;
}
#endif /* HAVE_LIBGCRYPT */
else if (pkg_type == TYPE_SIGN_SHA256)
{
status = parse_part_sign_sha256 (se,
&buffer, &buffer_size, flags);
if (status != 0)
{
ERROR ("network plugin: Verifying HMAC-SHA-256 "
"signature failed "
"with status %i.", status);
break;
}
}
#if HAVE_LIBGCRYPT
else if ((se->data.server.security_level == SECURITY_LEVEL_SIGN)
&& (packet_was_encrypted == 0)
&& (packet_was_signed == 0))
{
if (printed_ignore_warning == 0)
{
INFO ("network plugin: Unsigned packet or "
"part has been ignored.");
printed_ignore_warning = 1;
}
buffer = ((char *) buffer) + pkg_length;
continue;
}
#endif /* HAVE_LIBGCRYPT */
else if (pkg_type == TYPE_VALUES)
{
status = parse_part_values (&buffer, &buffer_size,
&vl.values, &vl.values_len);
if (status != 0)
break;
network_dispatch_values (&vl, username);
sfree (vl.values);
}
else if (pkg_type == TYPE_TIME)
{
uint64_t tmp = 0;
status = parse_part_number (&buffer, &buffer_size,
&tmp);
if (status == 0)
{
vl.time = TIME_T_TO_CDTIME_T (tmp);
n.time = TIME_T_TO_CDTIME_T (tmp);
}
}
else if (pkg_type == TYPE_TIME_HR)
{
uint64_t tmp = 0;
status = parse_part_number (&buffer, &buffer_size,
&tmp);
if (status == 0)
{
vl.time = (cdtime_t) tmp;
n.time = (cdtime_t) tmp;
}
}
else if (pkg_type == TYPE_INTERVAL)
{
uint64_t tmp = 0;
status = parse_part_number (&buffer, &buffer_size,
&tmp);
if (status == 0)
vl.interval = TIME_T_TO_CDTIME_T (tmp);
}
else if (pkg_type == TYPE_INTERVAL_HR)
{
uint64_t tmp = 0;
status = parse_part_number (&buffer, &buffer_size,
&tmp);
if (status == 0)
vl.interval = (cdtime_t) tmp;
}
else if (pkg_type == TYPE_HOST)
{
status = parse_part_string (&buffer, &buffer_size,
vl.host, sizeof (vl.host));
if (status == 0)
sstrncpy (n.host, vl.host, sizeof (n.host));
}
else if (pkg_type == TYPE_PLUGIN)
{
status = parse_part_string (&buffer, &buffer_size,
vl.plugin, sizeof (vl.plugin));
if (status == 0)
sstrncpy (n.plugin, vl.plugin,
sizeof (n.plugin));
}
else if (pkg_type == TYPE_PLUGIN_INSTANCE)
{
status = parse_part_string (&buffer, &buffer_size,
vl.plugin_instance,
sizeof (vl.plugin_instance));
if (status == 0)
sstrncpy (n.plugin_instance,
vl.plugin_instance,
sizeof (n.plugin_instance));
}
else if (pkg_type == TYPE_TYPE)
{
status = parse_part_string (&buffer, &buffer_size,
vl.type, sizeof (vl.type));
if (status == 0)
sstrncpy (n.type, vl.type, sizeof (n.type));
}
else if (pkg_type == TYPE_TYPE_INSTANCE)
{
status = parse_part_string (&buffer, &buffer_size,
vl.type_instance,
sizeof (vl.type_instance));
if (status == 0)
sstrncpy (n.type_instance, vl.type_instance,
sizeof (n.type_instance));
}
else if (pkg_type == TYPE_MESSAGE)
{
status = parse_part_string (&buffer, &buffer_size,
n.message, sizeof (n.message));
if (status != 0)
{
/* do nothing */
}
else if ((n.severity != NOTIF_FAILURE)
&& (n.severity != NOTIF_WARNING)
&& (n.severity != NOTIF_OKAY))
{
INFO ("network plugin: "
"Ignoring notification with "
"unknown severity %i.",
n.severity);
}
else if (n.time <= 0)
{
INFO ("network plugin: "
"Ignoring notification with "
"time == 0.");
}
else if (strlen (n.message) <= 0)
{
INFO ("network plugin: "
"Ignoring notification with "
"an empty message.");
}
else
{
network_dispatch_notification (&n);
}
}
else if (pkg_type == TYPE_SEVERITY)
{
uint64_t tmp = 0;
status = parse_part_number (&buffer, &buffer_size,
&tmp);
if (status == 0)
n.severity = (int) tmp;
}
else
{
DEBUG ("network plugin: parse_packet: Unknown part"
" type: 0x%04hx", pkg_type);
buffer = ((char *) buffer) + pkg_length;
}
} /* while (buffer_size > sizeof (part_header_t)) */
if (status == 0 && buffer_size > 0)
WARNING ("network plugin: parse_packet: Received truncated "
"packet, try increasing `MaxPacketSize'");
return (status);
} /* }}} int parse_packet */
| 268,201,944,099,933,340,000,000,000,000,000,000,000 | network.c | 301,004,326,594,565,760,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2016-6254 | Heap-based buffer overflow in the parse_packet function in network.c in collectd before 5.4.3 and 5.x before 5.5.2 allows remote attackers to cause a denial of service (daemon crash) or possibly execute arbitrary code via a crafted network packet. | https://nvd.nist.gov/vuln/detail/CVE-2016-6254 |
1,796 | libarchive | 3014e198 | https://github.com/libarchive/libarchive | https://github.com/libarchive/libarchive/commit/3014e198 | Issue 711: Be more careful about verifying filename lengths when writing ISO9660 archives
* Don't cast size_t to int, since this can lead to overflow
on machines where sizeof(int) < sizeof(size_t)
* Check a + b > limit by writing it as
a > limit || b > limit || a + b > limit
to avoid problems when a + b wraps around. | 1 | isoent_gen_joliet_identifier(struct archive_write *a, struct isoent *isoent,
struct idr *idr)
{
struct iso9660 *iso9660;
struct isoent *np;
unsigned char *p;
size_t l;
int r;
int ffmax, parent_len;
static const struct archive_rb_tree_ops rb_ops = {
isoent_cmp_node_joliet, isoent_cmp_key_joliet
};
if (isoent->children.cnt == 0)
return (0);
iso9660 = a->format_data;
if (iso9660->opt.joliet == OPT_JOLIET_LONGNAME)
ffmax = 206;
else
ffmax = 128;
r = idr_start(a, idr, isoent->children.cnt, ffmax, 6, 2, &rb_ops);
if (r < 0)
return (r);
parent_len = 1;
for (np = isoent; np->parent != np; np = np->parent)
parent_len += np->mb_len + 1;
for (np = isoent->children.first; np != NULL; np = np->chnext) {
unsigned char *dot;
int ext_off, noff, weight;
size_t lt;
if ((int)(l = np->file->basename_utf16.length) > ffmax)
l = ffmax;
p = malloc((l+1)*2);
if (p == NULL) {
archive_set_error(&a->archive, ENOMEM,
"Can't allocate memory");
return (ARCHIVE_FATAL);
}
memcpy(p, np->file->basename_utf16.s, l);
p[l] = 0;
p[l+1] = 0;
np->identifier = (char *)p;
lt = l;
dot = p + l;
weight = 0;
while (lt > 0) {
if (!joliet_allowed_char(p[0], p[1]))
archive_be16enc(p, 0x005F); /* '_' */
else if (p[0] == 0 && p[1] == 0x2E) /* '.' */
dot = p;
p += 2;
lt -= 2;
}
ext_off = (int)(dot - (unsigned char *)np->identifier);
np->ext_off = ext_off;
np->ext_len = (int)l - ext_off;
np->id_len = (int)l;
/*
* Get a length of MBS of a full-pathname.
*/
if ((int)np->file->basename_utf16.length > ffmax) {
if (archive_strncpy_l(&iso9660->mbs,
(const char *)np->identifier, l,
iso9660->sconv_from_utf16be) != 0 &&
errno == ENOMEM) {
archive_set_error(&a->archive, errno,
"No memory");
return (ARCHIVE_FATAL);
}
np->mb_len = (int)iso9660->mbs.length;
if (np->mb_len != (int)np->file->basename.length)
weight = np->mb_len;
} else
np->mb_len = (int)np->file->basename.length;
/* If a length of full-pathname is longer than 240 bytes,
* it violates Joliet extensions regulation. */
if (parent_len + np->mb_len > 240) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"The regulation of Joliet extensions;"
" A length of a full-pathname of `%s' is "
"longer than 240 bytes, (p=%d, b=%d)",
archive_entry_pathname(np->file->entry),
(int)parent_len, (int)np->mb_len);
return (ARCHIVE_FATAL);
}
/* Make an offset of the number which is used to be set
* hexadecimal number to avoid duplicate identifier. */
if ((int)l == ffmax)
noff = ext_off - 6;
else if ((int)l == ffmax-2)
noff = ext_off - 4;
else if ((int)l == ffmax-4)
noff = ext_off - 2;
else
noff = ext_off;
/* Register entry to the identifier resolver. */
idr_register(idr, np, weight, noff);
}
/* Resolve duplicate identifier with Joliet Volume. */
idr_resolve(idr, idr_set_num_beutf16);
return (ARCHIVE_OK);
}
| 98,797,913,130,701,970,000,000,000,000,000,000,000 | archive_write_set_format_iso9660.c | 22,023,816,518,742,655,000,000,000,000,000,000,000 | [
"CWE-190"
] | CVE-2016-6250 | Integer overflow in the ISO9660 writer in libarchive before 3.2.1 allows remote attackers to cause a denial of service (application crash) or execute arbitrary code via vectors related to verifying filename lengths when writing an ISO9660 archive, which trigger a buffer overflow. | https://nvd.nist.gov/vuln/detail/CVE-2016-6250 |
1,804 | linux | 9409e22acdfc9153f88d9b1ed2bd2a5b34d2d3ca | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/9409e22acdfc9153f88d9b1ed2bd2a5b34d2d3ca | vfs: rename: check backing inode being equal
If a file is renamed to a hardlink of itself POSIX specifies that rename(2)
should do nothing and return success.
This condition is checked in vfs_rename(). However it won't detect hard
links on overlayfs where these are given separate inodes on the overlayfs
layer.
Overlayfs itself detects this condition and returns success without doing
anything, but then vfs_rename() will proceed as if this was a successful
rename (detach_mounts(), d_move()).
The correct thing to do is to detect this condition before even calling
into overlayfs. This patch does this by calling vfs_select_inode() to get
the underlying inodes.
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
Cc: <stable@vger.kernel.org> # v4.2+ | 1 | int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
struct inode **delegated_inode, unsigned int flags)
{
int error;
bool is_dir = d_is_dir(old_dentry);
const unsigned char *old_name;
struct inode *source = old_dentry->d_inode;
struct inode *target = new_dentry->d_inode;
bool new_is_dir = false;
unsigned max_links = new_dir->i_sb->s_max_links;
if (source == target)
return 0;
error = may_delete(old_dir, old_dentry, is_dir);
if (error)
return error;
if (!target) {
error = may_create(new_dir, new_dentry);
} else {
new_is_dir = d_is_dir(new_dentry);
if (!(flags & RENAME_EXCHANGE))
error = may_delete(new_dir, new_dentry, is_dir);
else
error = may_delete(new_dir, new_dentry, new_is_dir);
}
if (error)
return error;
if (!old_dir->i_op->rename && !old_dir->i_op->rename2)
return -EPERM;
if (flags && !old_dir->i_op->rename2)
return -EINVAL;
/*
* If we are going to change the parent - check write permissions,
* we'll need to flip '..'.
*/
if (new_dir != old_dir) {
if (is_dir) {
error = inode_permission(source, MAY_WRITE);
if (error)
return error;
}
if ((flags & RENAME_EXCHANGE) && new_is_dir) {
error = inode_permission(target, MAY_WRITE);
if (error)
return error;
}
}
error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry,
flags);
if (error)
return error;
old_name = fsnotify_oldname_init(old_dentry->d_name.name);
dget(new_dentry);
if (!is_dir || (flags & RENAME_EXCHANGE))
lock_two_nondirectories(source, target);
else if (target)
inode_lock(target);
error = -EBUSY;
if (is_local_mountpoint(old_dentry) || is_local_mountpoint(new_dentry))
goto out;
if (max_links && new_dir != old_dir) {
error = -EMLINK;
if (is_dir && !new_is_dir && new_dir->i_nlink >= max_links)
goto out;
if ((flags & RENAME_EXCHANGE) && !is_dir && new_is_dir &&
old_dir->i_nlink >= max_links)
goto out;
}
if (is_dir && !(flags & RENAME_EXCHANGE) && target)
shrink_dcache_parent(new_dentry);
if (!is_dir) {
error = try_break_deleg(source, delegated_inode);
if (error)
goto out;
}
if (target && !new_is_dir) {
error = try_break_deleg(target, delegated_inode);
if (error)
goto out;
}
if (!old_dir->i_op->rename2) {
error = old_dir->i_op->rename(old_dir, old_dentry,
new_dir, new_dentry);
} else {
WARN_ON(old_dir->i_op->rename != NULL);
error = old_dir->i_op->rename2(old_dir, old_dentry,
new_dir, new_dentry, flags);
}
if (error)
goto out;
if (!(flags & RENAME_EXCHANGE) && target) {
if (is_dir)
target->i_flags |= S_DEAD;
dont_mount(new_dentry);
detach_mounts(new_dentry);
}
if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) {
if (!(flags & RENAME_EXCHANGE))
d_move(old_dentry, new_dentry);
else
d_exchange(old_dentry, new_dentry);
}
out:
if (!is_dir || (flags & RENAME_EXCHANGE))
unlock_two_nondirectories(source, target);
else if (target)
inode_unlock(target);
dput(new_dentry);
if (!error) {
fsnotify_move(old_dir, new_dir, old_name, is_dir,
!(flags & RENAME_EXCHANGE) ? target : NULL, old_dentry);
if (flags & RENAME_EXCHANGE) {
fsnotify_move(new_dir, old_dir, old_dentry->d_name.name,
new_is_dir, NULL, new_dentry);
}
}
fsnotify_oldname_free(old_name);
return error;
}
| 125,343,890,223,537,840,000,000,000,000,000,000,000 | namei.c | 194,268,518,459,072,760,000,000,000,000,000,000,000 | [
"CWE-284"
] | CVE-2016-6198 | The filesystem layer in the Linux kernel before 4.5.5 proceeds with post-rename operations after an OverlayFS file is renamed to a self-hardlink, which allows local users to cause a denial of service (system crash) via a rename system call, related to fs/namei.c and fs/open.c. | https://nvd.nist.gov/vuln/detail/CVE-2016-6198 |
1,807 | linux | 30a46a4647fd1df9cf52e43bf467f0d9265096ca | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/30a46a4647fd1df9cf52e43bf467f0d9265096ca | apparmor: fix oops, validate buffer size in apparmor_setprocattr()
When proc_pid_attr_write() was changed to use memdup_user apparmor's
(interface violating) assumption that the setprocattr buffer was always
a single page was violated.
The size test is not strictly speaking needed as proc_pid_attr_write()
will reject anything larger, but for the sake of robustness we can keep
it in.
SMACK and SELinux look safe to me, but somebody else should probably
have a look just in case.
Based on original patch from Vegard Nossum <vegard.nossum@oracle.com>
modified for the case that apparmor provides null termination.
Fixes: bb646cdb12e75d82258c2f2e7746d5952d3e321a
Reported-by: Vegard Nossum <vegard.nossum@oracle.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: John Johansen <john.johansen@canonical.com>
Cc: Paul Moore <paul@paul-moore.com>
Cc: Stephen Smalley <sds@tycho.nsa.gov>
Cc: Eric Paris <eparis@parisplace.org>
Cc: Casey Schaufler <casey@schaufler-ca.com>
Cc: stable@kernel.org
Signed-off-by: John Johansen <john.johansen@canonical.com>
Reviewed-by: Tyler Hicks <tyhicks@canonical.com>
Signed-off-by: James Morris <james.l.morris@oracle.com> | 1 | static int apparmor_setprocattr(struct task_struct *task, char *name,
void *value, size_t size)
{
struct common_audit_data sa;
struct apparmor_audit_data aad = {0,};
char *command, *args = value;
size_t arg_size;
int error;
if (size == 0)
return -EINVAL;
/* args points to a PAGE_SIZE buffer, AppArmor requires that
* the buffer must be null terminated or have size <= PAGE_SIZE -1
* so that AppArmor can null terminate them
*/
if (args[size - 1] != '\0') {
if (size == PAGE_SIZE)
return -EINVAL;
args[size] = '\0';
}
/* task can only write its own attributes */
if (current != task)
return -EACCES;
args = value;
args = strim(args);
command = strsep(&args, " ");
if (!args)
return -EINVAL;
args = skip_spaces(args);
if (!*args)
return -EINVAL;
arg_size = size - (args - (char *) value);
if (strcmp(name, "current") == 0) {
if (strcmp(command, "changehat") == 0) {
error = aa_setprocattr_changehat(args, arg_size,
!AA_DO_TEST);
} else if (strcmp(command, "permhat") == 0) {
error = aa_setprocattr_changehat(args, arg_size,
AA_DO_TEST);
} else if (strcmp(command, "changeprofile") == 0) {
error = aa_setprocattr_changeprofile(args, !AA_ONEXEC,
!AA_DO_TEST);
} else if (strcmp(command, "permprofile") == 0) {
error = aa_setprocattr_changeprofile(args, !AA_ONEXEC,
AA_DO_TEST);
} else
goto fail;
} else if (strcmp(name, "exec") == 0) {
if (strcmp(command, "exec") == 0)
error = aa_setprocattr_changeprofile(args, AA_ONEXEC,
!AA_DO_TEST);
else
goto fail;
} else
/* only support the "current" and "exec" process attributes */
return -EINVAL;
if (!error)
error = size;
return error;
fail:
sa.type = LSM_AUDIT_DATA_NONE;
sa.aad = &aad;
aad.profile = aa_current_profile();
aad.op = OP_SETPROCATTR;
aad.info = name;
aad.error = -EINVAL;
aa_audit_msg(AUDIT_APPARMOR_DENIED, &sa, NULL);
return -EINVAL;
}
| 94,069,181,006,806,240,000,000,000,000,000,000,000 | lsm.c | 292,105,851,096,993,260,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2016-6187 | The apparmor_setprocattr function in security/apparmor/lsm.c in the Linux kernel before 4.6.5 does not validate the buffer size, which allows local users to gain privileges by triggering an AppArmor setprocattr hook. | https://nvd.nist.gov/vuln/detail/CVE-2016-6187 |
1,808 | linux | 096cdc6f52225835ff503f987a0d68ef770bb78e | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/096cdc6f52225835ff503f987a0d68ef770bb78e | platform/chrome: cros_ec_dev - double fetch bug in ioctl
We verify "u_cmd.outsize" and "u_cmd.insize" but we need to make sure
that those values have not changed between the two copy_from_user()
calls. Otherwise it could lead to a buffer overflow.
Additionally, cros_ec_cmd_xfer() can set s_cmd->insize to a lower value.
We should use the new smaller value so we don't copy too much data to
the user.
Reported-by: Pengfei Wang <wpengfeinudt@gmail.com>
Fixes: a841178445bb ('mfd: cros_ec: Use a zero-length array for command data')
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Reviewed-by: Kees Cook <keescook@chromium.org>
Tested-by: Gwendal Grignou <gwendal@chromium.org>
Cc: <stable@vger.kernel.org> # v4.2+
Signed-off-by: Olof Johansson <olof@lixom.net> | 1 | static long ec_device_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
{
long ret;
struct cros_ec_command u_cmd;
struct cros_ec_command *s_cmd;
if (copy_from_user(&u_cmd, arg, sizeof(u_cmd)))
return -EFAULT;
if ((u_cmd.outsize > EC_MAX_MSG_BYTES) ||
(u_cmd.insize > EC_MAX_MSG_BYTES))
return -EINVAL;
s_cmd = kmalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize),
GFP_KERNEL);
if (!s_cmd)
return -ENOMEM;
if (copy_from_user(s_cmd, arg, sizeof(*s_cmd) + u_cmd.outsize)) {
ret = -EFAULT;
goto exit;
}
s_cmd->command += ec->cmd_offset;
ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd);
/* Only copy data to userland if data was received. */
if (ret < 0)
goto exit;
if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + u_cmd.insize))
ret = -EFAULT;
exit:
kfree(s_cmd);
return ret;
}
| 163,172,745,704,404,350,000,000,000,000,000,000,000 | cros_ec_dev.c | 103,010,045,391,424,050,000,000,000,000,000,000,000 | [
"CWE-362"
] | CVE-2016-6156 | Race condition in the ec_device_ioctl_xcmd function in drivers/platform/chrome/cros_ec_dev.c in the Linux kernel before 4.7 allows local users to cause a denial of service (out-of-bounds array access) by changing a certain size value, aka a "double fetch" vulnerability. | https://nvd.nist.gov/vuln/detail/CVE-2016-6156 |
1,810 | linux | 532c34b5fbf1687df63b3fcd5b2846312ac943c6 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/532c34b5fbf1687df63b3fcd5b2846312ac943c6 | s390/sclp_ctl: fix potential information leak with /dev/sclp
The sclp_ctl_ioctl_sccb function uses two copy_from_user calls to
retrieve the sclp request from user space. The first copy_from_user
fetches the length of the request which is stored in the first two
bytes of the request. The second copy_from_user gets the complete
sclp request, but this copies the length field a second time.
A malicious user may have changed the length in the meantime.
Reported-by: Pengfei Wang <wpengfeinudt@gmail.com>
Reviewed-by: Michael Holzheu <holzheu@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> | 1 | static int sclp_ctl_ioctl_sccb(void __user *user_area)
{
struct sclp_ctl_sccb ctl_sccb;
struct sccb_header *sccb;
int rc;
if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb)))
return -EFAULT;
if (!sclp_ctl_cmdw_supported(ctl_sccb.cmdw))
return -EOPNOTSUPP;
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sizeof(*sccb))) {
rc = -EFAULT;
goto out_free;
}
if (sccb->length > PAGE_SIZE || sccb->length < 8)
return -EINVAL;
if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sccb->length)) {
rc = -EFAULT;
goto out_free;
}
rc = sclp_sync_request(ctl_sccb.cmdw, sccb);
if (rc)
goto out_free;
if (copy_to_user(u64_to_uptr(ctl_sccb.sccb), sccb, sccb->length))
rc = -EFAULT;
out_free:
free_page((unsigned long) sccb);
return rc;
}
| 210,420,001,045,995,120,000,000,000,000,000,000,000 | sclp_ctl.c | 33,795,353,629,654,793,000,000,000,000,000,000,000 | [
"CWE-362"
] | CVE-2016-6130 | Race condition in the sclp_ctl_ioctl_sccb function in drivers/s390/char/sclp_ctl.c in the Linux kernel before 4.6 allows local users to obtain sensitive information from kernel memory by changing a certain length value, aka a "double fetch" vulnerability. | https://nvd.nist.gov/vuln/detail/CVE-2016-6130 |
1,811 | libarchive | 3ad08e01b4d253c66ae56414886089684155af22 | https://github.com/libarchive/libarchive | https://github.com/libarchive/libarchive/commit/3ad08e01b4d253c66ae56414886089684155af22 | Issue 717: Fix integer overflow when computing location of volume descriptor
The multiplication here defaulted to 'int' but calculations
of file positions should always use int64_t. A simple cast
suffices to fix this since the base location is always 32 bits
for ISO, so multiplying by the sector size will never overflow
a 64-bit integer. | 1 | choose_volume(struct archive_read *a, struct iso9660 *iso9660)
{
struct file_info *file;
int64_t skipsize;
struct vd *vd;
const void *block;
char seenJoliet;
vd = &(iso9660->primary);
if (!iso9660->opt_support_joliet)
iso9660->seenJoliet = 0;
if (iso9660->seenJoliet &&
vd->location > iso9660->joliet.location)
/* This condition is unlikely; by way of caution. */
vd = &(iso9660->joliet);
skipsize = LOGICAL_BLOCK_SIZE * vd->location;
skipsize = __archive_read_consume(a, skipsize);
if (skipsize < 0)
return ((int)skipsize);
iso9660->current_position = skipsize;
block = __archive_read_ahead(a, vd->size, NULL);
if (block == NULL) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Failed to read full block when scanning "
"ISO9660 directory list");
return (ARCHIVE_FATAL);
}
/*
* While reading Root Directory, flag seenJoliet must be zero to
* avoid converting special name 0x00(Current Directory) and
* next byte to UCS2.
*/
seenJoliet = iso9660->seenJoliet;/* Save flag. */
iso9660->seenJoliet = 0;
file = parse_file_info(a, NULL, block);
if (file == NULL)
return (ARCHIVE_FATAL);
iso9660->seenJoliet = seenJoliet;
/*
* If the iso image has both RockRidge and Joliet, we preferentially
* use RockRidge Extensions rather than Joliet ones.
*/
if (vd == &(iso9660->primary) && iso9660->seenRockridge
&& iso9660->seenJoliet)
iso9660->seenJoliet = 0;
if (vd == &(iso9660->primary) && !iso9660->seenRockridge
&& iso9660->seenJoliet) {
/* Switch reading data from primary to joliet. */
vd = &(iso9660->joliet);
skipsize = LOGICAL_BLOCK_SIZE * vd->location;
skipsize -= iso9660->current_position;
skipsize = __archive_read_consume(a, skipsize);
if (skipsize < 0)
return ((int)skipsize);
iso9660->current_position += skipsize;
block = __archive_read_ahead(a, vd->size, NULL);
if (block == NULL) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Failed to read full block when scanning "
"ISO9660 directory list");
return (ARCHIVE_FATAL);
}
iso9660->seenJoliet = 0;
file = parse_file_info(a, NULL, block);
if (file == NULL)
return (ARCHIVE_FATAL);
iso9660->seenJoliet = seenJoliet;
}
/* Store the root directory in the pending list. */
if (add_entry(a, iso9660, file) != ARCHIVE_OK)
return (ARCHIVE_FATAL);
if (iso9660->seenRockridge) {
a->archive.archive_format = ARCHIVE_FORMAT_ISO9660_ROCKRIDGE;
a->archive.archive_format_name =
"ISO9660 with Rockridge extensions";
}
return (ARCHIVE_OK);
}
| 79,794,590,550,957,910,000,000,000,000,000,000,000 | archive_read_support_format_iso9660.c | 161,757,426,250,598,000,000,000,000,000,000,000,000 | [
"CWE-190"
] | CVE-2016-5844 | Integer overflow in the ISO parser in libarchive before 3.2.1 allows remote attackers to cause a denial of service (application crash) via a crafted ISO file. | https://nvd.nist.gov/vuln/detail/CVE-2016-5844 |
1,812 | linux | 93a2001bdfd5376c3dc2158653034c20392d15c5 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/93a2001bdfd5376c3dc2158653034c20392d15c5 | HID: hiddev: validate num_values for HIDIOCGUSAGES, HIDIOCSUSAGES commands
This patch validates the num_values parameter from userland during the
HIDIOCGUSAGES and HIDIOCSUSAGES commands. Previously, if the report id was set
to HID_REPORT_ID_UNKNOWN, we would fail to validate the num_values parameter
leading to a heap overflow.
Cc: stable@vger.kernel.org
Signed-off-by: Scott Bauer <sbauer@plzdonthack.me>
Signed-off-by: Jiri Kosina <jkosina@suse.cz> | 1 | static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, void __user *user_arg)
{
struct hid_device *hid = hiddev->hid;
struct hiddev_report_info rinfo;
struct hiddev_usage_ref_multi *uref_multi = NULL;
struct hiddev_usage_ref *uref;
struct hid_report *report;
struct hid_field *field;
int i;
uref_multi = kmalloc(sizeof(struct hiddev_usage_ref_multi), GFP_KERNEL);
if (!uref_multi)
return -ENOMEM;
uref = &uref_multi->uref;
if (cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) {
if (copy_from_user(uref_multi, user_arg,
sizeof(*uref_multi)))
goto fault;
} else {
if (copy_from_user(uref, user_arg, sizeof(*uref)))
goto fault;
}
switch (cmd) {
case HIDIOCGUCODE:
rinfo.report_type = uref->report_type;
rinfo.report_id = uref->report_id;
if ((report = hiddev_lookup_report(hid, &rinfo)) == NULL)
goto inval;
if (uref->field_index >= report->maxfield)
goto inval;
field = report->field[uref->field_index];
if (uref->usage_index >= field->maxusage)
goto inval;
uref->usage_code = field->usage[uref->usage_index].hid;
if (copy_to_user(user_arg, uref, sizeof(*uref)))
goto fault;
goto goodreturn;
default:
if (cmd != HIDIOCGUSAGE &&
cmd != HIDIOCGUSAGES &&
uref->report_type == HID_REPORT_TYPE_INPUT)
goto inval;
if (uref->report_id == HID_REPORT_ID_UNKNOWN) {
field = hiddev_lookup_usage(hid, uref);
if (field == NULL)
goto inval;
} else {
rinfo.report_type = uref->report_type;
rinfo.report_id = uref->report_id;
if ((report = hiddev_lookup_report(hid, &rinfo)) == NULL)
goto inval;
if (uref->field_index >= report->maxfield)
goto inval;
field = report->field[uref->field_index];
if (cmd == HIDIOCGCOLLECTIONINDEX) {
if (uref->usage_index >= field->maxusage)
goto inval;
} else if (uref->usage_index >= field->report_count)
goto inval;
else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
(uref_multi->num_values > HID_MAX_MULTI_USAGES ||
uref->usage_index + uref_multi->num_values > field->report_count))
goto inval;
}
switch (cmd) {
case HIDIOCGUSAGE:
uref->value = field->value[uref->usage_index];
if (copy_to_user(user_arg, uref, sizeof(*uref)))
goto fault;
goto goodreturn;
case HIDIOCSUSAGE:
field->value[uref->usage_index] = uref->value;
goto goodreturn;
case HIDIOCGCOLLECTIONINDEX:
i = field->usage[uref->usage_index].collection_index;
kfree(uref_multi);
return i;
case HIDIOCGUSAGES:
for (i = 0; i < uref_multi->num_values; i++)
uref_multi->values[i] =
field->value[uref->usage_index + i];
if (copy_to_user(user_arg, uref_multi,
sizeof(*uref_multi)))
goto fault;
goto goodreturn;
case HIDIOCSUSAGES:
for (i = 0; i < uref_multi->num_values; i++)
field->value[uref->usage_index + i] =
uref_multi->values[i];
goto goodreturn;
}
goodreturn:
kfree(uref_multi);
return 0;
fault:
kfree(uref_multi);
return -EFAULT;
inval:
kfree(uref_multi);
return -EINVAL;
}
}
| 48,668,317,957,594,940,000,000,000,000,000,000,000 | hiddev.c | 214,175,292,621,757,830,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2016-5829 | Multiple heap-based buffer overflows in the hiddev_ioctl_usage function in drivers/hid/usbhid/hiddev.c in the Linux kernel through 4.6.3 allow local users to cause a denial of service or possibly have unspecified other impact via a crafted (1) HIDIOCGUSAGES or (2) HIDIOCSUSAGES ioctl call. | https://nvd.nist.gov/vuln/detail/CVE-2016-5829 |
1,813 | php-src | a44c89e8af7c2410f4bfc5e097be2a5d0639a60c | https://github.com/php/php-src | http://github.com/php/php-src/commit/a44c89e8af7c2410f4bfc5e097be2a5d0639a60c?w=1 | Fix bug #72340: Double Free Courruption in wddx_deserialize | 1 | static void php_wddx_process_data(void *user_data, const XML_Char *s, int len)
{
st_entry *ent;
wddx_stack *stack = (wddx_stack *)user_data;
TSRMLS_FETCH();
if (!wddx_stack_is_empty(stack) && !stack->done) {
wddx_stack_top(stack, (void**)&ent);
switch (ent->type) {
case ST_STRING:
if (Z_STRLEN_P(ent->data) == 0) {
STR_FREE(Z_STRVAL_P(ent->data));
Z_STRVAL_P(ent->data) = estrndup(s, len);
Z_STRLEN_P(ent->data) = len;
} else {
Z_STRVAL_P(ent->data) = erealloc(Z_STRVAL_P(ent->data), Z_STRLEN_P(ent->data) + len + 1);
memcpy(Z_STRVAL_P(ent->data) + Z_STRLEN_P(ent->data), s, len);
Z_STRLEN_P(ent->data) += len;
Z_STRVAL_P(ent->data)[Z_STRLEN_P(ent->data)] = '\0';
}
break;
case ST_BINARY:
if (Z_STRLEN_P(ent->data) == 0) {
STR_FREE(Z_STRVAL_P(ent->data));
Z_STRVAL_P(ent->data) = estrndup(s, len + 1);
} else {
Z_STRVAL_P(ent->data) = erealloc(Z_STRVAL_P(ent->data), Z_STRLEN_P(ent->data) + len + 1);
memcpy(Z_STRVAL_P(ent->data) + Z_STRLEN_P(ent->data), s, len);
}
Z_STRLEN_P(ent->data) += len;
Z_STRVAL_P(ent->data)[Z_STRLEN_P(ent->data)] = '\0';
break;
case ST_NUMBER:
Z_TYPE_P(ent->data) = IS_STRING;
Z_STRLEN_P(ent->data) = len;
Z_STRVAL_P(ent->data) = estrndup(s, len);
convert_scalar_to_number(ent->data TSRMLS_CC);
break;
case ST_BOOLEAN:
if (!strcmp(s, "true")) {
Z_LVAL_P(ent->data) = 1;
} else if (!strcmp(s, "false")) {
Z_LVAL_P(ent->data) = 0;
} else {
zval_ptr_dtor(&ent->data);
if (ent->varname) {
efree(ent->varname);
}
ent->data = NULL;
}
break;
case ST_DATETIME: {
char *tmp;
tmp = emalloc(len + 1);
memcpy(tmp, s, len);
tmp[len] = '\0';
Z_LVAL_P(ent->data) = php_parse_date(tmp, NULL);
/* date out of range < 1969 or > 2038 */
if (Z_LVAL_P(ent->data) == -1) {
Z_TYPE_P(ent->data) = IS_STRING;
Z_STRLEN_P(ent->data) = len;
Z_STRVAL_P(ent->data) = estrndup(s, len);
}
efree(tmp);
}
break;
default:
break;
}
}
}
| 248,167,914,886,720,230,000,000,000,000,000,000,000 | None | null | [
"CWE-415"
] | CVE-2016-5772 | Double free vulnerability in the php_wddx_process_data function in wddx.c in the WDDX extension in PHP before 5.5.37, 5.6.x before 5.6.23, and 7.x before 7.0.8 allows remote attackers to cause a denial of service (application crash) or possibly execute arbitrary code via crafted XML data that is mishandled in a wddx_deserialize call. | https://nvd.nist.gov/vuln/detail/CVE-2016-5772 |
1,814 | php-src | 3f627e580acfdaf0595ae3b115b8bec677f203ee | https://github.com/php/php-src | http://github.com/php/php-src/commit/3f627e580acfdaf0595ae3b115b8bec677f203ee?w=1 | Fixed ##72433: Use After Free Vulnerability in PHP's GC algorithm and unserialize | 1 | PHP_MINIT_FUNCTION(spl_array)
{
REGISTER_SPL_STD_CLASS_EX(ArrayObject, spl_array_object_new, spl_funcs_ArrayObject);
REGISTER_SPL_IMPLEMENTS(ArrayObject, Aggregate);
REGISTER_SPL_IMPLEMENTS(ArrayObject, ArrayAccess);
REGISTER_SPL_IMPLEMENTS(ArrayObject, Serializable);
REGISTER_SPL_IMPLEMENTS(ArrayObject, Countable);
memcpy(&spl_handler_ArrayObject, zend_get_std_object_handlers(), sizeof(zend_object_handlers));
spl_handler_ArrayObject.clone_obj = spl_array_object_clone;
spl_handler_ArrayObject.read_dimension = spl_array_read_dimension;
spl_handler_ArrayObject.write_dimension = spl_array_write_dimension;
spl_handler_ArrayObject.unset_dimension = spl_array_unset_dimension;
spl_handler_ArrayObject.has_dimension = spl_array_has_dimension;
spl_handler_ArrayObject.count_elements = spl_array_object_count_elements;
spl_handler_ArrayObject.get_properties = spl_array_get_properties;
spl_handler_ArrayObject.get_debug_info = spl_array_get_debug_info;
spl_handler_ArrayObject.read_property = spl_array_read_property;
spl_handler_ArrayObject.write_property = spl_array_write_property;
spl_handler_ArrayObject.get_property_ptr_ptr = spl_array_get_property_ptr_ptr;
spl_handler_ArrayObject.has_property = spl_array_has_property;
spl_handler_ArrayObject.unset_property = spl_array_unset_property;
spl_handler_ArrayObject.compare_objects = spl_array_compare_objects;
REGISTER_SPL_STD_CLASS_EX(ArrayIterator, spl_array_object_new, spl_funcs_ArrayIterator);
REGISTER_SPL_IMPLEMENTS(ArrayIterator, Iterator);
REGISTER_SPL_IMPLEMENTS(ArrayIterator, ArrayAccess);
REGISTER_SPL_IMPLEMENTS(ArrayIterator, SeekableIterator);
REGISTER_SPL_IMPLEMENTS(ArrayIterator, Serializable);
REGISTER_SPL_IMPLEMENTS(ArrayIterator, Countable);
memcpy(&spl_handler_ArrayIterator, &spl_handler_ArrayObject, sizeof(zend_object_handlers));
spl_ce_ArrayIterator->get_iterator = spl_array_get_iterator;
REGISTER_SPL_SUB_CLASS_EX(RecursiveArrayIterator, ArrayIterator, spl_array_object_new, spl_funcs_RecursiveArrayIterator);
REGISTER_SPL_IMPLEMENTS(RecursiveArrayIterator, RecursiveIterator);
spl_ce_RecursiveArrayIterator->get_iterator = spl_array_get_iterator;
REGISTER_SPL_CLASS_CONST_LONG(ArrayObject, "STD_PROP_LIST", SPL_ARRAY_STD_PROP_LIST);
REGISTER_SPL_CLASS_CONST_LONG(ArrayObject, "ARRAY_AS_PROPS", SPL_ARRAY_ARRAY_AS_PROPS);
REGISTER_SPL_CLASS_CONST_LONG(ArrayIterator, "STD_PROP_LIST", SPL_ARRAY_STD_PROP_LIST);
REGISTER_SPL_CLASS_CONST_LONG(ArrayIterator, "ARRAY_AS_PROPS", SPL_ARRAY_ARRAY_AS_PROPS);
REGISTER_SPL_CLASS_CONST_LONG(RecursiveArrayIterator, "CHILD_ARRAYS_ONLY", SPL_ARRAY_CHILD_ARRAYS_ONLY);
return SUCCESS;
}
| 253,420,511,984,728,500,000,000,000,000,000,000,000 | spl_array.c | 252,840,016,822,366,870,000,000,000,000,000,000,000 | [
"CWE-416"
] | CVE-2016-5771 | spl_array.c in the SPL extension in PHP before 5.5.37 and 5.6.x before 5.6.23 improperly interacts with the unserialize implementation and garbage collection, which allows remote attackers to execute arbitrary code or cause a denial of service (use-after-free and application crash) via crafted serialized data. | https://nvd.nist.gov/vuln/detail/CVE-2016-5771 |
1,849 | php-src | 7245bff300d3fa8bacbef7897ff080a6f1c23eba | https://github.com/php/php-src | http://github.com/php/php-src/commit/7245bff300d3fa8bacbef7897ff080a6f1c23eba?w=1 | Fix bug #72262 - do not overflow int | 1 | SPL_METHOD(SplFileObject, fread)
{
spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC);
long length = 0;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &length) == FAILURE) {
return;
}
if (length <= 0) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Length parameter must be greater than 0");
RETURN_FALSE;
}
Z_STRVAL_P(return_value) = emalloc(length + 1);
Z_STRLEN_P(return_value) = php_stream_read(intern->u.file.stream, Z_STRVAL_P(return_value), length);
/* needed because recv/read/gzread doesnt put a null at the end*/
Z_STRVAL_P(return_value)[Z_STRLEN_P(return_value)] = 0;
Z_TYPE_P(return_value) = IS_STRING;
}
| 244,608,472,582,836,800,000,000,000,000,000,000,000 | spl_directory.c | 188,841,539,753,934,600,000,000,000,000,000,000,000 | [
"CWE-190"
] | CVE-2016-5770 | Integer overflow in the SplFileObject::fread function in spl_directory.c in the SPL extension in PHP before 5.5.37 and 5.6.x before 5.6.23 allows remote attackers to cause a denial of service or possibly have unspecified other impact via a large integer argument, a related issue to CVE-2016-5096. | https://nvd.nist.gov/vuln/detail/CVE-2016-5770 |
1,895 | php-src | 5b597a2e5b28e2d5a52fc1be13f425f08f47cb62 | https://github.com/php/php-src | http://github.com/php/php-src/commit/5b597a2e5b28e2d5a52fc1be13f425f08f47cb62?w=1 | Fix bug #72402: _php_mb_regex_ereg_replace_exec - double free | 1 | static void _php_mb_regex_ereg_replace_exec(INTERNAL_FUNCTION_PARAMETERS, OnigOptionType options, int is_callable)
{
zval **arg_pattern_zval;
char *arg_pattern;
int arg_pattern_len;
char *replace;
int replace_len;
zend_fcall_info arg_replace_fci;
zend_fcall_info_cache arg_replace_fci_cache;
char *string;
int string_len;
char *p;
php_mb_regex_t *re;
OnigSyntaxType *syntax;
OnigRegion *regs = NULL;
smart_str out_buf = { 0 };
smart_str eval_buf = { 0 };
smart_str *pbuf;
int i, err, eval, n;
OnigUChar *pos;
OnigUChar *string_lim;
char *description = NULL;
char pat_buf[2];
const mbfl_encoding *enc;
{
const char *current_enc_name;
current_enc_name = _php_mb_regex_mbctype2name(MBREX(current_mbctype));
if (current_enc_name == NULL ||
(enc = mbfl_name2encoding(current_enc_name)) == NULL) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unknown error");
RETURN_FALSE;
}
}
eval = 0;
{
char *option_str = NULL;
int option_str_len = 0;
if (!is_callable) {
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "Zss|s",
&arg_pattern_zval,
&replace, &replace_len,
&string, &string_len,
&option_str, &option_str_len) == FAILURE) {
RETURN_FALSE;
}
} else {
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "Zfs|s",
&arg_pattern_zval,
&arg_replace_fci, &arg_replace_fci_cache,
&string, &string_len,
&option_str, &option_str_len) == FAILURE) {
RETURN_FALSE;
}
}
if (option_str != NULL) {
_php_mb_regex_init_options(option_str, option_str_len, &options, &syntax, &eval);
} else {
options |= MBREX(regex_default_options);
syntax = MBREX(regex_default_syntax);
}
}
if (Z_TYPE_PP(arg_pattern_zval) == IS_STRING) {
arg_pattern = Z_STRVAL_PP(arg_pattern_zval);
arg_pattern_len = Z_STRLEN_PP(arg_pattern_zval);
} else {
/* FIXME: this code is not multibyte aware! */
convert_to_long_ex(arg_pattern_zval);
pat_buf[0] = (char)Z_LVAL_PP(arg_pattern_zval);
pat_buf[1] = '\0';
arg_pattern = pat_buf;
arg_pattern_len = 1;
}
/* create regex pattern buffer */
re = php_mbregex_compile_pattern(arg_pattern, arg_pattern_len, options, MBREX(current_mbctype), syntax TSRMLS_CC);
if (re == NULL) {
RETURN_FALSE;
}
if (eval || is_callable) {
pbuf = &eval_buf;
description = zend_make_compiled_string_description("mbregex replace" TSRMLS_CC);
} else {
pbuf = &out_buf;
description = NULL;
}
if (is_callable) {
if (eval) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Option 'e' cannot be used with replacement callback");
RETURN_FALSE;
}
}
/* do the actual work */
err = 0;
pos = (OnigUChar *)string;
string_lim = (OnigUChar*)(string + string_len);
regs = onig_region_new();
while (err >= 0) {
err = onig_search(re, (OnigUChar *)string, (OnigUChar *)string_lim, pos, (OnigUChar *)string_lim, regs, 0);
if (err <= -2) {
OnigUChar err_str[ONIG_MAX_ERROR_MESSAGE_LEN];
onig_error_code_to_str(err_str, err);
php_error_docref(NULL TSRMLS_CC, E_WARNING, "mbregex search failure in php_mbereg_replace_exec(): %s", err_str);
break;
}
if (err >= 0) {
#if moriyoshi_0
if (regs->beg[0] == regs->end[0]) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Empty regular expression");
break;
}
#endif
/* copy the part of the string before the match */
smart_str_appendl(&out_buf, pos, (size_t)((OnigUChar *)(string + regs->beg[0]) - pos));
if (!is_callable) {
/* copy replacement and backrefs */
i = 0;
p = replace;
while (i < replace_len) {
int fwd = (int) php_mb_mbchar_bytes_ex(p, enc);
n = -1;
if ((replace_len - i) >= 2 && fwd == 1 &&
p[0] == '\\' && p[1] >= '0' && p[1] <= '9') {
n = p[1] - '0';
}
if (n >= 0 && n < regs->num_regs) {
if (regs->beg[n] >= 0 && regs->beg[n] < regs->end[n] && regs->end[n] <= string_len) {
smart_str_appendl(pbuf, string + regs->beg[n], regs->end[n] - regs->beg[n]);
}
p += 2;
i += 2;
} else {
smart_str_appendl(pbuf, p, fwd);
p += fwd;
i += fwd;
}
}
}
if (eval) {
zval v;
/* null terminate buffer */
smart_str_0(&eval_buf);
/* do eval */
if (zend_eval_stringl(eval_buf.c, eval_buf.len, &v, description TSRMLS_CC) == FAILURE) {
efree(description);
php_error_docref(NULL TSRMLS_CC,E_ERROR, "Failed evaluating code: %s%s", PHP_EOL, eval_buf.c);
/* zend_error() does not return in this case */
}
/* result of eval */
convert_to_string(&v);
smart_str_appendl(&out_buf, Z_STRVAL(v), Z_STRLEN(v));
/* Clean up */
eval_buf.len = 0;
zval_dtor(&v);
} else if (is_callable) {
zval *retval_ptr;
zval **args[1];
zval *subpats;
int i;
MAKE_STD_ZVAL(subpats);
array_init(subpats);
for (i = 0; i < regs->num_regs; i++) {
add_next_index_stringl(subpats, string + regs->beg[i], regs->end[i] - regs->beg[i], 1);
}
args[0] = &subpats;
/* null terminate buffer */
smart_str_0(&eval_buf);
arg_replace_fci.param_count = 1;
arg_replace_fci.params = args;
arg_replace_fci.retval_ptr_ptr = &retval_ptr;
if (zend_call_function(&arg_replace_fci, &arg_replace_fci_cache TSRMLS_CC) == SUCCESS && arg_replace_fci.retval_ptr_ptr) {
convert_to_string_ex(&retval_ptr);
smart_str_appendl(&out_buf, Z_STRVAL_P(retval_ptr), Z_STRLEN_P(retval_ptr));
eval_buf.len = 0;
zval_ptr_dtor(&retval_ptr);
} else {
efree(description);
if (!EG(exception)) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to call custom replacement function");
}
}
zval_ptr_dtor(&subpats);
}
n = regs->end[0];
if ((pos - (OnigUChar *)string) < n) {
pos = (OnigUChar *)string + n;
} else {
if (pos < string_lim) {
smart_str_appendl(&out_buf, pos, 1);
}
pos++;
}
} else { /* nomatch */
/* stick that last bit of string on our output */
if (string_lim - pos > 0) {
smart_str_appendl(&out_buf, pos, string_lim - pos);
}
}
onig_region_free(regs, 0);
}
if (description) {
efree(description);
}
if (regs != NULL) {
onig_region_free(regs, 1);
}
smart_str_free(&eval_buf);
if (err <= -2) {
smart_str_free(&out_buf);
RETVAL_FALSE;
} else {
smart_str_appendc(&out_buf, '\0');
RETVAL_STRINGL((char *)out_buf.c, out_buf.len - 1, 0);
}
}
| 273,115,938,524,087,840,000,000,000,000,000,000,000 | php_mbregex.c | 70,273,313,402,533,685,000,000,000,000,000,000,000 | [
"CWE-415"
] | CVE-2016-5768 | Double free vulnerability in the _php_mb_regex_ereg_replace_exec function in php_mbregex.c in the mbstring extension in PHP before 5.5.37, 5.6.x before 5.6.23, and 7.x before 7.0.8 allows remote attackers to execute arbitrary code or cause a denial of service (application crash) by leveraging a callback exception. | https://nvd.nist.gov/vuln/detail/CVE-2016-5768 |
1,903 | php-src | c395c6e5d7e8df37a21265ff76e48fe75ceb5ae6 | https://github.com/php/php-src | http://github.com/php/php-src/commit/c395c6e5d7e8df37a21265ff76e48fe75ceb5ae6?w=1 | iFixed bug #72446 - Integer Overflow in gdImagePaletteToTrueColor() resulting in heap overflow | 1 | gdImagePtr gdImageCreate (int sx, int sy)
{
int i;
gdImagePtr im;
if (overflow2(sx, sy)) {
return NULL;
}
if (overflow2(sizeof(unsigned char *), sy)) {
return NULL;
}
im = (gdImage *) gdCalloc(1, sizeof(gdImage));
/* Row-major ever since gd 1.3 */
im->pixels = (unsigned char **) gdMalloc(sizeof(unsigned char *) * sy);
im->AA_opacity = (unsigned char **) gdMalloc(sizeof(unsigned char *) * sy);
im->polyInts = 0;
im->polyAllocated = 0;
im->brush = 0;
im->tile = 0;
im->style = 0;
for (i = 0; i < sy; i++) {
/* Row-major ever since gd 1.3 */
im->pixels[i] = (unsigned char *) gdCalloc(sx, sizeof(unsigned char));
im->AA_opacity[i] = (unsigned char *) gdCalloc(sx, sizeof(unsigned char));
}
im->sx = sx;
im->sy = sy;
im->colorsTotal = 0;
im->transparent = (-1);
im->interlace = 0;
im->thick = 1;
im->AA = 0;
im->AA_polygon = 0;
for (i = 0; i < gdMaxColors; i++) {
im->open[i] = 1;
im->red[i] = 0;
im->green[i] = 0;
im->blue[i] = 0;
}
im->trueColor = 0;
im->tpixels = 0;
im->cx1 = 0;
im->cy1 = 0;
im->cx2 = im->sx - 1;
im->cy2 = im->sy - 1;
im->interpolation = NULL;
im->interpolation_id = GD_BILINEAR_FIXED;
return im;
}
| 115,776,571,225,885,950,000,000,000,000,000,000,000 | gd.c | 46,573,213,927,881,730,000,000,000,000,000,000,000 | [
"CWE-190"
] | CVE-2016-5767 | Integer overflow in the gdImageCreate function in gd.c in the GD Graphics Library (aka libgd) before 2.0.34RC1, as used in PHP before 5.5.37, 5.6.x before 5.6.23, and 7.x before 7.0.8, allows remote attackers to cause a denial of service (heap-based buffer overflow and application crash) or possibly have unspecified other impact via a crafted image dimensions. | https://nvd.nist.gov/vuln/detail/CVE-2016-5767 |
1,907 | php-src | 7722455726bec8c53458a32851d2a87982cf0eac | https://github.com/php/php-src | http://github.com/php/php-src/commit/7722455726bec8c53458a32851d2a87982cf0eac?w=1 | Fixed #72339 Integer Overflow in _gd2GetHeader() resulting in heap overflow | 1 | static int _gd2GetHeader(gdIOCtxPtr in, int *sx, int *sy, int *cs, int *vers, int *fmt, int *ncx, int *ncy, t_chunk_info ** chunkIdx)
{
int i;
int ch;
char id[5];
t_chunk_info *cidx;
int sidx;
int nc;
GD2_DBG(php_gd_error("Reading gd2 header info"));
for (i = 0; i < 4; i++) {
ch = gdGetC(in);
if (ch == EOF) {
goto fail1;
}
id[i] = ch;
}
id[4] = 0;
GD2_DBG(php_gd_error("Got file code: %s", id));
/* Equiv. of 'magick'. */
if (strcmp(id, GD2_ID) != 0) {
GD2_DBG(php_gd_error("Not a valid gd2 file"));
goto fail1;
}
/* Version */
if (gdGetWord(vers, in) != 1) {
goto fail1;
}
GD2_DBG(php_gd_error("Version: %d", *vers));
if ((*vers != 1) && (*vers != 2)) {
GD2_DBG(php_gd_error("Bad version: %d", *vers));
goto fail1;
}
/* Image Size */
if (!gdGetWord(sx, in)) {
GD2_DBG(php_gd_error("Could not get x-size"));
goto fail1;
}
if (!gdGetWord(sy, in)) {
GD2_DBG(php_gd_error("Could not get y-size"));
goto fail1;
}
GD2_DBG(php_gd_error("Image is %dx%d", *sx, *sy));
/* Chunk Size (pixels, not bytes!) */
if (gdGetWord(cs, in) != 1) {
goto fail1;
}
GD2_DBG(php_gd_error("ChunkSize: %d", *cs));
if ((*cs < GD2_CHUNKSIZE_MIN) || (*cs > GD2_CHUNKSIZE_MAX)) {
GD2_DBG(php_gd_error("Bad chunk size: %d", *cs));
goto fail1;
}
/* Data Format */
if (gdGetWord(fmt, in) != 1) {
goto fail1;
}
GD2_DBG(php_gd_error("Format: %d", *fmt));
if ((*fmt != GD2_FMT_RAW) && (*fmt != GD2_FMT_COMPRESSED) && (*fmt != GD2_FMT_TRUECOLOR_RAW) && (*fmt != GD2_FMT_TRUECOLOR_COMPRESSED)) {
GD2_DBG(php_gd_error("Bad data format: %d", *fmt));
goto fail1;
}
/* # of chunks wide */
if (gdGetWord(ncx, in) != 1) {
goto fail1;
}
GD2_DBG(php_gd_error("%d Chunks Wide", *ncx));
/* # of chunks high */
if (gdGetWord(ncy, in) != 1) {
goto fail1;
}
GD2_DBG(php_gd_error("%d Chunks vertically", *ncy));
if (gd2_compressed(*fmt)) {
nc = (*ncx) * (*ncy);
GD2_DBG(php_gd_error("Reading %d chunk index entries", nc));
sidx = sizeof(t_chunk_info) * nc;
if (sidx <= 0) {
goto fail1;
}
cidx = gdCalloc(sidx, 1);
for (i = 0; i < nc; i++) {
if (gdGetInt(&cidx[i].offset, in) != 1) {
gdFree(cidx);
goto fail1;
}
if (gdGetInt(&cidx[i].size, in) != 1) {
gdFree(cidx);
goto fail1;
}
if (cidx[i].offset < 0 || cidx[i].size < 0) {
gdFree(cidx);
goto fail1;
}
}
*chunkIdx = cidx;
}
GD2_DBG(php_gd_error("gd2 header complete"));
return 1;
fail1:
return 0;
}
| 108,321,245,757,292,700,000,000,000,000,000,000,000 | gd_gd2.c | 21,651,511,292,434,190,000,000,000,000,000,000,000 | [
"CWE-190"
] | CVE-2016-5766 | Integer overflow in the _gd2GetHeader function in gd_gd2.c in the GD Graphics Library (aka libgd) before 2.2.3, as used in PHP before 5.5.37, 5.6.x before 5.6.23, and 7.x before 7.0.8, allows remote attackers to cause a denial of service (heap-based buffer overflow and application crash) or possibly have unspecified other impact via crafted chunk dimensions in an image. | https://nvd.nist.gov/vuln/detail/CVE-2016-5766 |
1,908 | linux | 9bf292bfca94694a721449e3fd752493856710f6 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/9bf292bfca94694a721449e3fd752493856710f6 | misc: mic: Fix for double fetch security bug in VOP driver
The MIC VOP driver does two successive reads from user space to read a
variable length data structure. Kernel memory corruption can result if
the data structure changes between the two reads. This patch disallows
the chance of this happening.
Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=116651
Reported by: Pengfei Wang <wpengfeinudt@gmail.com>
Reviewed-by: Sudeep Dutt <sudeep.dutt@intel.com>
Signed-off-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 1 | static long vop_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
{
struct vop_vdev *vdev = f->private_data;
struct vop_info *vi = vdev->vi;
void __user *argp = (void __user *)arg;
int ret;
switch (cmd) {
case MIC_VIRTIO_ADD_DEVICE:
{
struct mic_device_desc dd, *dd_config;
if (copy_from_user(&dd, argp, sizeof(dd)))
return -EFAULT;
if (mic_aligned_desc_size(&dd) > MIC_MAX_DESC_BLK_SIZE ||
dd.num_vq > MIC_MAX_VRINGS)
return -EINVAL;
dd_config = kzalloc(mic_desc_size(&dd), GFP_KERNEL);
if (!dd_config)
return -ENOMEM;
if (copy_from_user(dd_config, argp, mic_desc_size(&dd))) {
ret = -EFAULT;
goto free_ret;
}
mutex_lock(&vdev->vdev_mutex);
mutex_lock(&vi->vop_mutex);
ret = vop_virtio_add_device(vdev, dd_config);
if (ret)
goto unlock_ret;
list_add_tail(&vdev->list, &vi->vdev_list);
unlock_ret:
mutex_unlock(&vi->vop_mutex);
mutex_unlock(&vdev->vdev_mutex);
free_ret:
kfree(dd_config);
return ret;
}
case MIC_VIRTIO_COPY_DESC:
{
struct mic_copy_desc copy;
mutex_lock(&vdev->vdev_mutex);
ret = vop_vdev_inited(vdev);
if (ret)
goto _unlock_ret;
if (copy_from_user(©, argp, sizeof(copy))) {
ret = -EFAULT;
goto _unlock_ret;
}
ret = vop_virtio_copy_desc(vdev, ©);
if (ret < 0)
goto _unlock_ret;
if (copy_to_user(
&((struct mic_copy_desc __user *)argp)->out_len,
©.out_len, sizeof(copy.out_len)))
ret = -EFAULT;
_unlock_ret:
mutex_unlock(&vdev->vdev_mutex);
return ret;
}
case MIC_VIRTIO_CONFIG_CHANGE:
{
void *buf;
mutex_lock(&vdev->vdev_mutex);
ret = vop_vdev_inited(vdev);
if (ret)
goto __unlock_ret;
buf = kzalloc(vdev->dd->config_len, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto __unlock_ret;
}
if (copy_from_user(buf, argp, vdev->dd->config_len)) {
ret = -EFAULT;
goto done;
}
ret = vop_virtio_config_change(vdev, buf);
done:
kfree(buf);
__unlock_ret:
mutex_unlock(&vdev->vdev_mutex);
return ret;
}
default:
return -ENOIOCTLCMD;
};
return 0;
}
| 42,020,854,998,596,270,000,000,000,000,000,000,000 | None | null | [
"CWE-119"
] | CVE-2016-5728 | Race condition in the vop_ioctl function in drivers/misc/mic/vop/vop_vringh.c in the MIC VOP driver in the Linux kernel before 4.6.1 allows local users to obtain sensitive information from kernel memory or cause a denial of service (memory corruption and system crash) by changing a certain header, aka a "double fetch" vulnerability. | https://nvd.nist.gov/vuln/detail/CVE-2016-5728 |
1,909 | linux | 75ff39ccc1bd5d3c455b6822ab09e533c551f758 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/75ff39ccc1bd5d3c455b6822ab09e533c551f758 | tcp: make challenge acks less predictable
Yue Cao claims that current host rate limiting of challenge ACKS
(RFC 5961) could leak enough information to allow a patient attacker
to hijack TCP sessions. He will soon provide details in an academic
paper.
This patch increases the default limit from 100 to 1000, and adds
some randomization so that the attacker can no longer hijack
sessions without spending a considerable amount of probes.
Based on initial analysis and patch from Linus.
Note that we also have per socket rate limiting, so it is tempting
to remove the host limit in the future.
v2: randomize the count of challenge acks per second, not the period.
Fixes: 282f23c6ee34 ("tcp: implement RFC 5961 3.2")
Reported-by: Yue Cao <ycao009@ucr.edu>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Yuchung Cheng <ycheng@google.com>
Cc: Neal Cardwell <ncardwell@google.com>
Acked-by: Neal Cardwell <ncardwell@google.com>
Acked-by: Yuchung Cheng <ycheng@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
{
/* unprotected vars, we dont care of overwrites */
static u32 challenge_timestamp;
static unsigned int challenge_count;
struct tcp_sock *tp = tcp_sk(sk);
u32 now;
/* First check our per-socket dupack rate limit. */
if (tcp_oow_rate_limited(sock_net(sk), skb,
LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
&tp->last_oow_ack_time))
return;
/* Then check the check host-wide RFC 5961 rate limit. */
now = jiffies / HZ;
if (now != challenge_timestamp) {
challenge_timestamp = now;
challenge_count = 0;
}
if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
tcp_send_ack(sk);
}
}
| 320,924,418,498,212,000,000,000,000,000,000,000,000 | tcp_input.c | 33,022,065,887,978,390,000,000,000,000,000,000,000 | [
"CWE-200"
] | CVE-2016-5696 | net/ipv4/tcp_input.c in the Linux kernel before 4.7 does not properly determine the rate of challenge ACK segments, which makes it easier for remote attackers to hijack TCP sessions via a blind in-window attack. | https://nvd.nist.gov/vuln/detail/CVE-2016-5696 |
1,910 | ImageMagick | 5511ef530576ed18fd636baa3bb4eda3d667665d | https://github.com/ImageMagick/ImageMagick | https://github.com/ImageMagick/ImageMagick/commit/5511ef530576ed18fd636baa3bb4eda3d667665d | Add additional checks to DCM reader to prevent data-driven faults (bug report from Hanno Böck | 1 | static Image *ReadDCMImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
char
explicit_vr[MagickPathExtent],
implicit_vr[MagickPathExtent],
magick[MagickPathExtent],
photometric[MagickPathExtent];
DCMStreamInfo
*stream_info;
Image
*image;
int
*bluemap,
datum,
*greenmap,
*graymap,
index,
*redmap;
MagickBooleanType
explicit_file,
explicit_retry,
polarity,
sequence,
use_explicit;
MagickOffsetType
offset;
Quantum
*scale;
register ssize_t
i,
x;
register Quantum
*q;
register unsigned char
*p;
size_t
bits_allocated,
bytes_per_pixel,
colors,
depth,
height,
length,
mask,
max_value,
number_scenes,
quantum,
samples_per_pixel,
signed_data,
significant_bits,
status,
width,
window_width;
ssize_t
count,
rescale_intercept,
rescale_slope,
scene,
window_center,
y;
unsigned char
*data;
unsigned short
group,
element;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image->depth=8UL;
image->endian=LSBEndian;
/*
Read DCM preamble.
*/
stream_info=(DCMStreamInfo *) AcquireMagickMemory(sizeof(*stream_info));
if (stream_info == (DCMStreamInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
(void) ResetMagickMemory(stream_info,0,sizeof(*stream_info));
count=ReadBlob(image,128,(unsigned char *) magick);
if (count != 128)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
count=ReadBlob(image,4,(unsigned char *) magick);
if ((count != 4) || (LocaleNCompare(magick,"DICM",4) != 0))
{
offset=SeekBlob(image,0L,SEEK_SET);
if (offset < 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
/*
Read DCM Medical image.
*/
(void) CopyMagickString(photometric,"MONOCHROME1 ",MagickPathExtent);
bits_allocated=8;
bytes_per_pixel=1;
polarity=MagickFalse;
data=(unsigned char *) NULL;
depth=8;
element=0;
explicit_vr[2]='\0';
explicit_file=MagickFalse;
colors=0;
redmap=(int *) NULL;
greenmap=(int *) NULL;
bluemap=(int *) NULL;
graymap=(int *) NULL;
height=0;
max_value=255UL;
mask=0xffff;
number_scenes=1;
rescale_intercept=0;
rescale_slope=1;
samples_per_pixel=1;
scale=(Quantum *) NULL;
sequence=MagickFalse;
signed_data=(~0UL);
significant_bits=0;
use_explicit=MagickFalse;
explicit_retry = MagickFalse;
width=0;
window_center=0;
window_width=0;
for (group=0; (group != 0x7FE0) || (element != 0x0010) ||
(sequence != MagickFalse); )
{
/*
Read a group.
*/
image->offset=(ssize_t) TellBlob(image);
group=ReadBlobLSBShort(image);
element=ReadBlobLSBShort(image);
if ((group != 0x0002) && (image->endian == MSBEndian))
{
group=(unsigned short) ((group << 8) | ((group >> 8) & 0xFF));
element=(unsigned short) ((element << 8) | ((element >> 8) & 0xFF));
}
quantum=0;
/*
Find corresponding VR for this group and element.
*/
for (i=0; dicom_info[i].group < 0xffff; i++)
if ((group == dicom_info[i].group) && (element == dicom_info[i].element))
break;
(void) CopyMagickString(implicit_vr,dicom_info[i].vr,MagickPathExtent);
count=ReadBlob(image,2,(unsigned char *) explicit_vr);
if (count != 2)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Check for "explicitness", but meta-file headers always explicit.
*/
if ((explicit_file == MagickFalse) && (group != 0x0002))
explicit_file=(isupper((unsigned char) *explicit_vr) != MagickFalse) &&
(isupper((unsigned char) *(explicit_vr+1)) != MagickFalse) ?
MagickTrue : MagickFalse;
use_explicit=((group == 0x0002) && (explicit_retry == MagickFalse)) ||
(explicit_file != MagickFalse) ? MagickTrue : MagickFalse;
if ((use_explicit != MagickFalse) && (strncmp(implicit_vr,"xs",2) == 0))
(void) CopyMagickString(implicit_vr,explicit_vr,MagickPathExtent);
if ((use_explicit == MagickFalse) || (strncmp(implicit_vr,"!!",2) == 0))
{
offset=SeekBlob(image,(MagickOffsetType) -2,SEEK_CUR);
if (offset < 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
quantum=4;
}
else
{
/*
Assume explicit type.
*/
quantum=2;
if ((strncmp(explicit_vr,"OB",2) == 0) ||
(strncmp(explicit_vr,"UN",2) == 0) ||
(strncmp(explicit_vr,"OW",2) == 0) ||
(strncmp(explicit_vr,"SQ",2) == 0))
{
(void) ReadBlobLSBShort(image);
quantum=4;
}
}
datum=0;
if (quantum == 4)
{
if (group == 0x0002)
datum=ReadBlobLSBSignedLong(image);
else
datum=ReadBlobSignedLong(image);
}
else
if (quantum == 2)
{
if (group == 0x0002)
datum=ReadBlobLSBSignedShort(image);
else
datum=ReadBlobSignedShort(image);
}
quantum=0;
length=1;
if (datum != 0)
{
if ((strncmp(implicit_vr,"SS",2) == 0) ||
(strncmp(implicit_vr,"US",2) == 0))
quantum=2;
else
if ((strncmp(implicit_vr,"UL",2) == 0) ||
(strncmp(implicit_vr,"SL",2) == 0) ||
(strncmp(implicit_vr,"FL",2) == 0))
quantum=4;
else
if (strncmp(implicit_vr,"FD",2) != 0)
quantum=1;
else
quantum=8;
if (datum != ~0)
length=(size_t) datum/quantum;
else
{
/*
Sequence and item of undefined length.
*/
quantum=0;
length=0;
}
}
if (image_info->verbose != MagickFalse)
{
/*
Display Dicom info.
*/
if (use_explicit == MagickFalse)
explicit_vr[0]='\0';
for (i=0; dicom_info[i].description != (char *) NULL; i++)
if ((group == dicom_info[i].group) &&
(element == dicom_info[i].element))
break;
(void) FormatLocaleFile(stdout,"0x%04lX %4ld %s-%s (0x%04lx,0x%04lx)",
(unsigned long) image->offset,(long) length,implicit_vr,explicit_vr,
(unsigned long) group,(unsigned long) element);
if (dicom_info[i].description != (char *) NULL)
(void) FormatLocaleFile(stdout," %s",dicom_info[i].description);
(void) FormatLocaleFile(stdout,": ");
}
if ((sequence == MagickFalse) && (group == 0x7FE0) && (element == 0x0010))
{
if (image_info->verbose != MagickFalse)
(void) FormatLocaleFile(stdout,"\n");
break;
}
/*
Allocate space and read an array.
*/
data=(unsigned char *) NULL;
if ((length == 1) && (quantum == 1))
datum=ReadBlobByte(image);
else
if ((length == 1) && (quantum == 2))
{
if (group == 0x0002)
datum=ReadBlobLSBSignedShort(image);
else
datum=ReadBlobSignedShort(image);
}
else
if ((length == 1) && (quantum == 4))
{
if (group == 0x0002)
datum=ReadBlobLSBSignedLong(image);
else
datum=ReadBlobSignedLong(image);
}
else
if ((quantum != 0) && (length != 0))
{
if (~length >= 1)
data=(unsigned char *) AcquireQuantumMemory(length+1,quantum*
sizeof(*data));
if (data == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) quantum*length,data);
if (count != (ssize_t) (quantum*length))
{
if (image_info->verbose != MagickFalse)
(void) FormatLocaleFile(stdout,"count=%d quantum=%d "
"length=%d group=%d\n",(int) count,(int) quantum,(int)
length,(int) group);
ThrowReaderException(CorruptImageError,
"InsufficientImageDataInFile");
}
data[length*quantum]='\0';
}
else
if ((unsigned int) datum == 0xFFFFFFFFU)
{
sequence=MagickTrue;
continue;
}
if ((unsigned int) ((group << 16) | element) == 0xFFFEE0DD)
{
if (data != (unsigned char *) NULL)
data=(unsigned char *) RelinquishMagickMemory(data);
sequence=MagickFalse;
continue;
}
if (sequence != MagickFalse)
{
if (data != (unsigned char *) NULL)
data=(unsigned char *) RelinquishMagickMemory(data);
continue;
}
switch (group)
{
case 0x0002:
{
switch (element)
{
case 0x0010:
{
char
transfer_syntax[MagickPathExtent];
/*
Transfer Syntax.
*/
if ((datum == 0) && (explicit_retry == MagickFalse))
{
explicit_retry=MagickTrue;
(void) SeekBlob(image,(MagickOffsetType) 0,SEEK_SET);
group=0;
element=0;
if (image_info->verbose != MagickFalse)
(void) FormatLocaleFile(stdout,
"Corrupted image - trying explicit format\n");
break;
}
*transfer_syntax='\0';
if (data != (unsigned char *) NULL)
(void) CopyMagickString(transfer_syntax,(char *) data,
MagickPathExtent);
if (image_info->verbose != MagickFalse)
(void) FormatLocaleFile(stdout,"transfer_syntax=%s\n",
(const char *) transfer_syntax);
if (strncmp(transfer_syntax,"1.2.840.10008.1.2",17) == 0)
{
int
count,
subtype,
type;
type=1;
subtype=0;
if (strlen(transfer_syntax) > 17)
{
count=sscanf(transfer_syntax+17,".%d.%d",&type,&subtype);
if (count < 1)
ThrowReaderException(CorruptImageError,
"ImproperImageHeader");
}
switch (type)
{
case 1:
{
image->endian=LSBEndian;
break;
}
case 2:
{
image->endian=MSBEndian;
break;
}
case 4:
{
if ((subtype >= 80) && (subtype <= 81))
image->compression=JPEGCompression;
else
if ((subtype >= 90) && (subtype <= 93))
image->compression=JPEG2000Compression;
else
image->compression=JPEGCompression;
break;
}
case 5:
{
image->compression=RLECompression;
break;
}
}
}
break;
}
default:
break;
}
break;
}
case 0x0028:
{
switch (element)
{
case 0x0002:
{
/*
Samples per pixel.
*/
samples_per_pixel=(size_t) datum;
break;
}
case 0x0004:
{
/*
Photometric interpretation.
*/
for (i=0; i < (ssize_t) MagickMin(length,MagickPathExtent-1); i++)
photometric[i]=(char) data[i];
photometric[i]='\0';
polarity=LocaleCompare(photometric,"MONOCHROME1 ") == 0 ?
MagickTrue : MagickFalse;
break;
}
case 0x0006:
{
/*
Planar configuration.
*/
if (datum == 1)
image->interlace=PlaneInterlace;
break;
}
case 0x0008:
{
/*
Number of frames.
*/
number_scenes=StringToUnsignedLong((char *) data);
break;
}
case 0x0010:
{
/*
Image rows.
*/
height=(size_t) datum;
break;
}
case 0x0011:
{
/*
Image columns.
*/
width=(size_t) datum;
break;
}
case 0x0100:
{
/*
Bits allocated.
*/
bits_allocated=(size_t) datum;
bytes_per_pixel=1;
if (datum > 8)
bytes_per_pixel=2;
depth=bits_allocated;
if (depth > 32)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
max_value=(1UL << bits_allocated)-1;
break;
}
case 0x0101:
{
/*
Bits stored.
*/
significant_bits=(size_t) datum;
bytes_per_pixel=1;
if (significant_bits > 8)
bytes_per_pixel=2;
depth=significant_bits;
if (depth > 32)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
max_value=(1UL << significant_bits)-1;
mask=(size_t) GetQuantumRange(significant_bits);
break;
}
case 0x0102:
{
/*
High bit.
*/
break;
}
case 0x0103:
{
/*
Pixel representation.
*/
signed_data=(size_t) datum;
break;
}
case 0x1050:
{
/*
Visible pixel range: center.
*/
if (data != (unsigned char *) NULL)
window_center=(ssize_t) StringToLong((char *) data);
break;
}
case 0x1051:
{
/*
Visible pixel range: width.
*/
if (data != (unsigned char *) NULL)
window_width=StringToUnsignedLong((char *) data);
break;
}
case 0x1052:
{
/*
Rescale intercept
*/
if (data != (unsigned char *) NULL)
rescale_intercept=(ssize_t) StringToLong((char *) data);
break;
}
case 0x1053:
{
/*
Rescale slope
*/
if (data != (unsigned char *) NULL)
rescale_slope=(ssize_t) StringToLong((char *) data);
break;
}
case 0x1200:
case 0x3006:
{
/*
Populate graymap.
*/
if (data == (unsigned char *) NULL)
break;
colors=(size_t) (length/bytes_per_pixel);
datum=(int) colors;
graymap=(int *) AcquireQuantumMemory((size_t) colors,
sizeof(*graymap));
if (graymap == (int *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) colors; i++)
if (bytes_per_pixel == 1)
graymap[i]=(int) data[i];
else
graymap[i]=(int) ((short *) data)[i];
break;
}
case 0x1201:
{
unsigned short
index;
/*
Populate redmap.
*/
if (data == (unsigned char *) NULL)
break;
colors=(size_t) (length/2);
datum=(int) colors;
redmap=(int *) AcquireQuantumMemory((size_t) colors,
sizeof(*redmap));
if (redmap == (int *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
p=data;
for (i=0; i < (ssize_t) colors; i++)
{
if (image->endian == MSBEndian)
index=(unsigned short) ((*p << 8) | *(p+1));
else
index=(unsigned short) (*p | (*(p+1) << 8));
redmap[i]=(int) index;
p+=2;
}
break;
}
case 0x1202:
{
unsigned short
index;
/*
Populate greenmap.
*/
if (data == (unsigned char *) NULL)
break;
colors=(size_t) (length/2);
datum=(int) colors;
greenmap=(int *) AcquireQuantumMemory((size_t) colors,
sizeof(*greenmap));
if (greenmap == (int *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
p=data;
for (i=0; i < (ssize_t) colors; i++)
{
if (image->endian == MSBEndian)
index=(unsigned short) ((*p << 8) | *(p+1));
else
index=(unsigned short) (*p | (*(p+1) << 8));
greenmap[i]=(int) index;
p+=2;
}
break;
}
case 0x1203:
{
unsigned short
index;
/*
Populate bluemap.
*/
if (data == (unsigned char *) NULL)
break;
colors=(size_t) (length/2);
datum=(int) colors;
bluemap=(int *) AcquireQuantumMemory((size_t) colors,
sizeof(*bluemap));
if (bluemap == (int *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
p=data;
for (i=0; i < (ssize_t) colors; i++)
{
if (image->endian == MSBEndian)
index=(unsigned short) ((*p << 8) | *(p+1));
else
index=(unsigned short) (*p | (*(p+1) << 8));
bluemap[i]=(int) index;
p+=2;
}
break;
}
default:
break;
}
break;
}
case 0x2050:
{
switch (element)
{
case 0x0020:
{
if ((data != (unsigned char *) NULL) &&
(strncmp((char *) data,"INVERSE",7) == 0))
polarity=MagickTrue;
break;
}
default:
break;
}
break;
}
default:
break;
}
if (data != (unsigned char *) NULL)
{
char
*attribute;
for (i=0; dicom_info[i].description != (char *) NULL; i++)
if ((group == dicom_info[i].group) &&
(element == dicom_info[i].element))
break;
if (dicom_info[i].description != (char *) NULL)
{
attribute=AcquireString("dcm:");
(void) ConcatenateString(&attribute,dicom_info[i].description);
for (i=0; i < (ssize_t) MagickMax(length,4); i++)
if (isprint((int) data[i]) == MagickFalse)
break;
if ((i == (ssize_t) length) || (length > 4))
{
(void) SubstituteString(&attribute," ","");
(void) SetImageProperty(image,attribute,(char *) data,exception);
}
attribute=DestroyString(attribute);
}
}
if (image_info->verbose != MagickFalse)
{
if (data == (unsigned char *) NULL)
(void) FormatLocaleFile(stdout,"%d\n",datum);
else
{
/*
Display group data.
*/
for (i=0; i < (ssize_t) MagickMax(length,4); i++)
if (isprint((int) data[i]) == MagickFalse)
break;
if ((i != (ssize_t) length) && (length <= 4))
{
ssize_t
j;
datum=0;
for (j=(ssize_t) length-1; j >= 0; j--)
datum=(256*datum+data[j]);
(void) FormatLocaleFile(stdout,"%d",datum);
}
else
for (i=0; i < (ssize_t) length; i++)
if (isprint((int) data[i]) != MagickFalse)
(void) FormatLocaleFile(stdout,"%c",data[i]);
else
(void) FormatLocaleFile(stdout,"%c",'.');
(void) FormatLocaleFile(stdout,"\n");
}
}
if (data != (unsigned char *) NULL)
data=(unsigned char *) RelinquishMagickMemory(data);
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
}
if ((width == 0) || (height == 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
image->columns=(size_t) width;
image->rows=(size_t) height;
if (signed_data == 0xffff)
signed_data=(size_t) (significant_bits == 16 ? 1 : 0);
if ((image->compression == JPEGCompression) ||
(image->compression == JPEG2000Compression))
{
Image
*images;
ImageInfo
*read_info;
int
c;
size_t
length;
unsigned int
tag;
/*
Read offset table.
*/
for (i=0; i < (ssize_t) stream_info->remaining; i++)
(void) ReadBlobByte(image);
tag=(ReadBlobLSBShort(image) << 16) | ReadBlobLSBShort(image);
(void) tag;
length=(size_t) ReadBlobLSBLong(image);
stream_info->offset_count=length >> 2;
if (stream_info->offset_count != 0)
{
MagickOffsetType
offset;
stream_info->offsets=(ssize_t *) AcquireQuantumMemory(
stream_info->offset_count,sizeof(*stream_info->offsets));
if (stream_info->offsets == (ssize_t *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) stream_info->offset_count; i++)
stream_info->offsets[i]=(ssize_t) ReadBlobLSBSignedLong(image);
offset=TellBlob(image);
for (i=0; i < (ssize_t) stream_info->offset_count; i++)
stream_info->offsets[i]+=offset;
}
/*
Handle non-native image formats.
*/
read_info=CloneImageInfo(image_info);
SetImageInfoBlob(read_info,(void *) NULL,0);
images=NewImageList();
for (scene=0; scene < (ssize_t) number_scenes; scene++)
{
char
filename[MagickPathExtent];
const char
*property;
FILE
*file;
Image
*jpeg_image;
int
unique_file;
unsigned int
tag;
tag=(ReadBlobLSBShort(image) << 16) | ReadBlobLSBShort(image);
length=(size_t) ReadBlobLSBLong(image);
if (tag == 0xFFFEE0DD)
break; /* sequence delimiter tag */
if (tag != 0xFFFEE000)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
file=(FILE *) NULL;
unique_file=AcquireUniqueFileResource(filename);
if (unique_file != -1)
file=fdopen(unique_file,"wb");
if (file == (FILE *) NULL)
{
(void) RelinquishUniqueFileResource(filename);
ThrowFileException(exception,FileOpenError,
"UnableToCreateTemporaryFile",filename);
break;
}
for ( ; length != 0; length--)
{
c=ReadBlobByte(image);
if (c == EOF)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
(void) fputc(c,file);
}
(void) fclose(file);
(void) FormatLocaleString(read_info->filename,MagickPathExtent,
"jpeg:%s",filename);
if (image->compression == JPEG2000Compression)
(void) FormatLocaleString(read_info->filename,MagickPathExtent,
"j2k:%s",filename);
jpeg_image=ReadImage(read_info,exception);
if (jpeg_image != (Image *) NULL)
{
ResetImagePropertyIterator(image);
property=GetNextImageProperty(image);
while (property != (const char *) NULL)
{
(void) SetImageProperty(jpeg_image,property,
GetImageProperty(image,property,exception),exception);
property=GetNextImageProperty(image);
}
AppendImageToList(&images,jpeg_image);
}
(void) RelinquishUniqueFileResource(filename);
}
read_info=DestroyImageInfo(read_info);
image=DestroyImage(image);
return(GetFirstImageInList(images));
}
if (depth != (1UL*MAGICKCORE_QUANTUM_DEPTH))
{
QuantumAny
range;
size_t
length;
/*
Compute pixel scaling table.
*/
length=(size_t) (GetQuantumRange(depth)+1);
scale=(Quantum *) AcquireQuantumMemory(length,sizeof(*scale));
if (scale == (Quantum *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
range=GetQuantumRange(depth);
for (i=0; i < (ssize_t) (GetQuantumRange(depth)+1); i++)
scale[i]=ScaleAnyToQuantum((size_t) i,range);
}
if (image->compression == RLECompression)
{
size_t
length;
unsigned int
tag;
/*
Read RLE offset table.
*/
for (i=0; i < (ssize_t) stream_info->remaining; i++)
(void) ReadBlobByte(image);
tag=(ReadBlobLSBShort(image) << 16) | ReadBlobLSBShort(image);
(void) tag;
length=(size_t) ReadBlobLSBLong(image);
stream_info->offset_count=length >> 2;
if (stream_info->offset_count != 0)
{
MagickOffsetType
offset;
stream_info->offsets=(ssize_t *) AcquireQuantumMemory(
stream_info->offset_count,sizeof(*stream_info->offsets));
if (stream_info->offsets == (ssize_t *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) stream_info->offset_count; i++)
stream_info->offsets[i]=(ssize_t) ReadBlobLSBSignedLong(image);
offset=TellBlob(image);
for (i=0; i < (ssize_t) stream_info->offset_count; i++)
stream_info->offsets[i]+=offset;
}
}
for (scene=0; scene < (ssize_t) number_scenes; scene++)
{
if (image_info->ping != MagickFalse)
break;
image->columns=(size_t) width;
image->rows=(size_t) height;
image->depth=depth;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
break;
image->colorspace=RGBColorspace;
if ((image->colormap == (PixelInfo *) NULL) && (samples_per_pixel == 1))
{
size_t
one;
one=1;
if (colors == 0)
colors=one << depth;
if (AcquireImageColormap(image,one << depth,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (redmap != (int *) NULL)
for (i=0; i < (ssize_t) colors; i++)
{
index=redmap[i];
if ((scale != (Quantum *) NULL) && (index <= (int) max_value))
index=(int) scale[index];
image->colormap[i].red=(MagickRealType) index;
}
if (greenmap != (int *) NULL)
for (i=0; i < (ssize_t) colors; i++)
{
index=greenmap[i];
if ((scale != (Quantum *) NULL) && (index <= (int) max_value))
index=(int) scale[index];
image->colormap[i].green=(MagickRealType) index;
}
if (bluemap != (int *) NULL)
for (i=0; i < (ssize_t) colors; i++)
{
index=bluemap[i];
if ((scale != (Quantum *) NULL) && (index <= (int) max_value))
index=(int) scale[index];
image->colormap[i].blue=(MagickRealType) index;
}
if (graymap != (int *) NULL)
for (i=0; i < (ssize_t) colors; i++)
{
index=graymap[i];
if ((scale != (Quantum *) NULL) && (index <= (int) max_value))
index=(int) scale[index];
image->colormap[i].red=(MagickRealType) index;
image->colormap[i].green=(MagickRealType) index;
image->colormap[i].blue=(MagickRealType) index;
}
}
if (image->compression == RLECompression)
{
unsigned int
tag;
/*
Read RLE segment table.
*/
for (i=0; i < (ssize_t) stream_info->remaining; i++)
(void) ReadBlobByte(image);
tag=(ReadBlobLSBShort(image) << 16) | ReadBlobLSBShort(image);
stream_info->remaining=(size_t) ReadBlobLSBLong(image);
if ((tag != 0xFFFEE000) || (stream_info->remaining <= 64) ||
(EOFBlob(image) != MagickFalse))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
stream_info->count=0;
stream_info->segment_count=ReadBlobLSBLong(image);
if (stream_info->segment_count > 1)
{
bytes_per_pixel=1;
depth=8;
}
for (i=0; i < 15; i++)
stream_info->segments[i]=(ssize_t) ReadBlobLSBSignedLong(image);
stream_info->remaining-=64;
}
if ((samples_per_pixel > 1) && (image->interlace == PlaneInterlace))
{
/*
Convert Planar RGB DCM Medical image to pixel packets.
*/
for (i=0; i < (ssize_t) samples_per_pixel; i++)
{
for (y=0; y < (ssize_t) image->rows; y++)
{
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
switch ((int) i)
{
case 0:
{
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadDCMByte(stream_info,image)),q);
break;
}
case 1:
{
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadDCMByte(stream_info,image)),q);
break;
}
case 2:
{
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadDCMByte(stream_info,image)),q);
break;
}
case 3:
{
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
ReadDCMByte(stream_info,image)),q);
break;
}
default:
break;
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
}
}
else
{
const char
*option;
int
byte;
PixelPacket
pixel;
/*
Convert DCM Medical image to pixel packets.
*/
byte=0;
i=0;
if ((window_center != 0) && (window_width == 0))
window_width=(size_t) window_center;
option=GetImageOption(image_info,"dcm:display-range");
if (option != (const char *) NULL)
{
if (LocaleCompare(option,"reset") == 0)
window_width=0;
}
(void) ResetMagickMemory(&pixel,0,sizeof(pixel));
for (y=0; y < (ssize_t) image->rows; y++)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (samples_per_pixel == 1)
{
int
pixel_value;
if (bytes_per_pixel == 1)
pixel_value=polarity != MagickFalse ?
((int) max_value-ReadDCMByte(stream_info,image)) :
ReadDCMByte(stream_info,image);
else
if ((bits_allocated != 12) || (significant_bits != 12))
{
if (signed_data)
pixel_value=ReadDCMSignedShort(stream_info,image);
else
pixel_value=ReadDCMShort(stream_info,image);
if (polarity != MagickFalse)
pixel_value=(int)max_value-pixel_value;
}
else
{
if ((i & 0x01) != 0)
pixel_value=(ReadDCMByte(stream_info,image) << 8) |
byte;
else
{
pixel_value=ReadDCMSignedShort(stream_info,image);
byte=(int) (pixel_value & 0x0f);
pixel_value>>=4;
}
i++;
}
index=(pixel_value*rescale_slope)+rescale_intercept;
if (window_width == 0)
{
if (signed_data == 1)
index-=32767;
}
else
{
ssize_t
window_max,
window_min;
window_min=(ssize_t) ceil((double) window_center-
(window_width-1.0)/2.0-0.5);
window_max=(ssize_t) floor((double) window_center+
(window_width-1.0)/2.0+0.5);
if ((ssize_t)index <= window_min)
index=0;
else
if ((ssize_t)index > window_max)
index=(int) max_value;
else
index=(int) (max_value*(((index-window_center-
0.5)/(window_width-1))+0.5));
}
index&=mask;
index=(int) ConstrainColormapIndex(image,(size_t) index,
exception);
SetPixelIndex(image,(Quantum) index,q);
pixel.red=(unsigned int) image->colormap[index].red;
pixel.green=(unsigned int) image->colormap[index].green;
pixel.blue=(unsigned int) image->colormap[index].blue;
}
else
{
if (bytes_per_pixel == 1)
{
pixel.red=(unsigned int) ReadDCMByte(stream_info,image);
pixel.green=(unsigned int) ReadDCMByte(stream_info,image);
pixel.blue=(unsigned int) ReadDCMByte(stream_info,image);
}
else
{
pixel.red=ReadDCMShort(stream_info,image);
pixel.green=ReadDCMShort(stream_info,image);
pixel.blue=ReadDCMShort(stream_info,image);
}
pixel.red&=mask;
pixel.green&=mask;
pixel.blue&=mask;
if (scale != (Quantum *) NULL)
{
pixel.red=scale[pixel.red];
pixel.green=scale[pixel.green];
pixel.blue=scale[pixel.blue];
}
}
SetPixelRed(image,(Quantum) pixel.red,q);
SetPixelGreen(image,(Quantum) pixel.green,q);
SetPixelBlue(image,(Quantum) pixel.blue,q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
if (stream_info->segment_count > 1)
for (y=0; y < (ssize_t) image->rows; y++)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (samples_per_pixel == 1)
{
int
pixel_value;
if (bytes_per_pixel == 1)
pixel_value=polarity != MagickFalse ?
((int) max_value-ReadDCMByte(stream_info,image)) :
ReadDCMByte(stream_info,image);
else
if ((bits_allocated != 12) || (significant_bits != 12))
{
pixel_value=(int) (polarity != MagickFalse ?
(max_value-ReadDCMShort(stream_info,image)) :
ReadDCMShort(stream_info,image));
if (signed_data == 1)
pixel_value=((signed short) pixel_value);
}
else
{
if ((i & 0x01) != 0)
pixel_value=(ReadDCMByte(stream_info,image) << 8) |
byte;
else
{
pixel_value=ReadDCMShort(stream_info,image);
byte=(int) (pixel_value & 0x0f);
pixel_value>>=4;
}
i++;
}
index=(pixel_value*rescale_slope)+rescale_intercept;
if (window_width == 0)
{
if (signed_data == 1)
index-=32767;
}
else
{
ssize_t
window_max,
window_min;
window_min=(ssize_t) ceil((double) window_center-
(window_width-1.0)/2.0-0.5);
window_max=(ssize_t) floor((double) window_center+
(window_width-1.0)/2.0+0.5);
if ((ssize_t)index <= window_min)
index=0;
else
if ((ssize_t)index > window_max)
index=(int) max_value;
else
index=(int) (max_value*(((index-window_center-
0.5)/(window_width-1))+0.5));
}
index&=mask;
index=(int) ConstrainColormapIndex(image,(size_t) index,
exception);
SetPixelIndex(image,(Quantum) (((size_t)
GetPixelIndex(image,q)) | (((size_t) index) << 8)),q);
pixel.red=(unsigned int) image->colormap[index].red;
pixel.green=(unsigned int) image->colormap[index].green;
pixel.blue=(unsigned int) image->colormap[index].blue;
}
else
{
if (bytes_per_pixel == 1)
{
pixel.red=(unsigned int) ReadDCMByte(stream_info,image);
pixel.green=(unsigned int) ReadDCMByte(stream_info,image);
pixel.blue=(unsigned int) ReadDCMByte(stream_info,image);
}
else
{
pixel.red=ReadDCMShort(stream_info,image);
pixel.green=ReadDCMShort(stream_info,image);
pixel.blue=ReadDCMShort(stream_info,image);
}
pixel.red&=mask;
pixel.green&=mask;
pixel.blue&=mask;
if (scale != (Quantum *) NULL)
{
pixel.red=scale[pixel.red];
pixel.green=scale[pixel.green];
pixel.blue=scale[pixel.blue];
}
}
SetPixelRed(image,(Quantum) (((size_t) GetPixelRed(image,q)) |
(((size_t) pixel.red) << 8)),q);
SetPixelGreen(image,(Quantum) (((size_t) GetPixelGreen(image,q)) |
(((size_t) pixel.green) << 8)),q);
SetPixelBlue(image,(Quantum) (((size_t) GetPixelBlue(image,q)) |
(((size_t) pixel.blue) << 8)),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
}
if (SetImageGray(image,exception) != MagickFalse)
(void) SetImageColorspace(image,GRAYColorspace,exception);
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
if (scene < (ssize_t) (number_scenes-1))
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
}
/*
Free resources.
*/
if (stream_info->offsets != (ssize_t *) NULL)
stream_info->offsets=(ssize_t *)
RelinquishMagickMemory(stream_info->offsets);
stream_info=(DCMStreamInfo *) RelinquishMagickMemory(stream_info);
if (scale != (Quantum *) NULL)
scale=(Quantum *) RelinquishMagickMemory(scale);
if (graymap != (int *) NULL)
graymap=(int *) RelinquishMagickMemory(graymap);
if (bluemap != (int *) NULL)
bluemap=(int *) RelinquishMagickMemory(bluemap);
if (greenmap != (int *) NULL)
greenmap=(int *) RelinquishMagickMemory(greenmap);
if (redmap != (int *) NULL)
redmap=(int *) RelinquishMagickMemory(redmap);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
| 40,219,157,448,116,547,000,000,000,000,000,000,000 | dcm.c | 282,395,254,250,022,830,000,000,000,000,000,000,000 | [
"CWE-20"
] | CVE-2016-5689 | The DCM reader in ImageMagick before 6.9.4-5 and 7.x before 7.0.1-7 allows remote attackers to have unspecified impact by leveraging lack of NULL pointer checks. | https://nvd.nist.gov/vuln/detail/CVE-2016-5689 |
1,914 | linux | aa93d1fee85c890a34f2510a310e55ee76a27848 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/aa93d1fee85c890a34f2510a310e55ee76a27848 | media: fix airspy usb probe error path
Fix a memory leak on probe error of the airspy usb device driver.
The problem is triggered when more than 64 usb devices register with
v4l2 of type VFL_TYPE_SDR or VFL_TYPE_SUBDEV.
The memory leak is caused by the probe function of the airspy driver
mishandeling errors and not freeing the corresponding control structures
when an error occours registering the device to v4l2 core.
A badusb device can emulate 64 of these devices, and then through
continual emulated connect/disconnect of the 65th device, cause the
kernel to run out of RAM and crash the kernel, thus causing a local DOS
vulnerability.
Fixes CVE-2016-5400
Signed-off-by: James Patrick-Evans <james@jmp-e.com>
Reviewed-by: Kees Cook <keescook@chromium.org>
Cc: stable@vger.kernel.org # 3.17+
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> | 1 | static int airspy_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct airspy *s;
int ret;
u8 u8tmp, buf[BUF_SIZE];
s = kzalloc(sizeof(struct airspy), GFP_KERNEL);
if (s == NULL) {
dev_err(&intf->dev, "Could not allocate memory for state\n");
return -ENOMEM;
}
mutex_init(&s->v4l2_lock);
mutex_init(&s->vb_queue_lock);
spin_lock_init(&s->queued_bufs_lock);
INIT_LIST_HEAD(&s->queued_bufs);
s->dev = &intf->dev;
s->udev = interface_to_usbdev(intf);
s->f_adc = bands[0].rangelow;
s->f_rf = bands_rf[0].rangelow;
s->pixelformat = formats[0].pixelformat;
s->buffersize = formats[0].buffersize;
/* Detect device */
ret = airspy_ctrl_msg(s, CMD_BOARD_ID_READ, 0, 0, &u8tmp, 1);
if (ret == 0)
ret = airspy_ctrl_msg(s, CMD_VERSION_STRING_READ, 0, 0,
buf, BUF_SIZE);
if (ret) {
dev_err(s->dev, "Could not detect board\n");
goto err_free_mem;
}
buf[BUF_SIZE - 1] = '\0';
dev_info(s->dev, "Board ID: %02x\n", u8tmp);
dev_info(s->dev, "Firmware version: %s\n", buf);
/* Init videobuf2 queue structure */
s->vb_queue.type = V4L2_BUF_TYPE_SDR_CAPTURE;
s->vb_queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
s->vb_queue.drv_priv = s;
s->vb_queue.buf_struct_size = sizeof(struct airspy_frame_buf);
s->vb_queue.ops = &airspy_vb2_ops;
s->vb_queue.mem_ops = &vb2_vmalloc_memops;
s->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
ret = vb2_queue_init(&s->vb_queue);
if (ret) {
dev_err(s->dev, "Could not initialize vb2 queue\n");
goto err_free_mem;
}
/* Init video_device structure */
s->vdev = airspy_template;
s->vdev.queue = &s->vb_queue;
s->vdev.queue->lock = &s->vb_queue_lock;
video_set_drvdata(&s->vdev, s);
/* Register the v4l2_device structure */
s->v4l2_dev.release = airspy_video_release;
ret = v4l2_device_register(&intf->dev, &s->v4l2_dev);
if (ret) {
dev_err(s->dev, "Failed to register v4l2-device (%d)\n", ret);
goto err_free_mem;
}
/* Register controls */
v4l2_ctrl_handler_init(&s->hdl, 5);
s->lna_gain_auto = v4l2_ctrl_new_std(&s->hdl, &airspy_ctrl_ops,
V4L2_CID_RF_TUNER_LNA_GAIN_AUTO, 0, 1, 1, 0);
s->lna_gain = v4l2_ctrl_new_std(&s->hdl, &airspy_ctrl_ops,
V4L2_CID_RF_TUNER_LNA_GAIN, 0, 14, 1, 8);
v4l2_ctrl_auto_cluster(2, &s->lna_gain_auto, 0, false);
s->mixer_gain_auto = v4l2_ctrl_new_std(&s->hdl, &airspy_ctrl_ops,
V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO, 0, 1, 1, 0);
s->mixer_gain = v4l2_ctrl_new_std(&s->hdl, &airspy_ctrl_ops,
V4L2_CID_RF_TUNER_MIXER_GAIN, 0, 15, 1, 8);
v4l2_ctrl_auto_cluster(2, &s->mixer_gain_auto, 0, false);
s->if_gain = v4l2_ctrl_new_std(&s->hdl, &airspy_ctrl_ops,
V4L2_CID_RF_TUNER_IF_GAIN, 0, 15, 1, 0);
if (s->hdl.error) {
ret = s->hdl.error;
dev_err(s->dev, "Could not initialize controls\n");
goto err_free_controls;
}
v4l2_ctrl_handler_setup(&s->hdl);
s->v4l2_dev.ctrl_handler = &s->hdl;
s->vdev.v4l2_dev = &s->v4l2_dev;
s->vdev.lock = &s->v4l2_lock;
ret = video_register_device(&s->vdev, VFL_TYPE_SDR, -1);
if (ret) {
dev_err(s->dev, "Failed to register as video device (%d)\n",
ret);
goto err_unregister_v4l2_dev;
}
dev_info(s->dev, "Registered as %s\n",
video_device_node_name(&s->vdev));
dev_notice(s->dev, "SDR API is still slightly experimental and functionality changes may follow\n");
return 0;
err_free_controls:
v4l2_ctrl_handler_free(&s->hdl);
err_unregister_v4l2_dev:
v4l2_device_unregister(&s->v4l2_dev);
err_free_mem:
kfree(s);
return ret;
}
| 201,482,838,855,407,340,000,000,000,000,000,000,000 | airspy.c | 271,094,106,681,489,230,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2016-5400 | Memory leak in the airspy_probe function in drivers/media/usb/airspy/airspy.c in the airspy USB driver in the Linux kernel before 4.7 allows local users to cause a denial of service (memory consumption) via a crafted USB device that emulates many VFL_TYPE_SDR or VFL_TYPE_SUBDEV devices and performs many connect and disconnect operations. | https://nvd.nist.gov/vuln/detail/CVE-2016-5400 |
1,919 | wireshark | 2c13e97d656c1c0ac4d76eb9d307664aae0e0cf7 | https://github.com/wireshark/wireshark | https://github.com/wireshark/wireshark/commit/2c13e97d656c1c0ac4d76eb9d307664aae0e0cf7 | The WTAP_ENCAP_ETHERNET dissector needs to be passed a struct eth_phdr.
We now require that. Make it so.
Bug: 12440
Change-Id: Iffee520976b013800699bde3c6092a3e86be0d76
Reviewed-on: https://code.wireshark.org/review/15424
Reviewed-by: Guy Harris <guy@alum.mit.edu> | 1 | dissect_pktap(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree)
{
proto_tree *pktap_tree = NULL;
proto_item *ti = NULL;
tvbuff_t *next_tvb;
int offset = 0;
guint32 pkt_len, rectype, dlt;
col_set_str(pinfo->cinfo, COL_PROTOCOL, "PKTAP");
col_clear(pinfo->cinfo, COL_INFO);
pkt_len = tvb_get_letohl(tvb, offset);
col_add_fstr(pinfo->cinfo, COL_INFO, "PKTAP, %u byte header", pkt_len);
/* Dissect the packet */
ti = proto_tree_add_item(tree, proto_pktap, tvb, offset, pkt_len, ENC_NA);
pktap_tree = proto_item_add_subtree(ti, ett_pktap);
proto_tree_add_item(pktap_tree, hf_pktap_hdrlen, tvb, offset, 4,
ENC_LITTLE_ENDIAN);
if (pkt_len < MIN_PKTAP_HDR_LEN) {
proto_tree_add_expert(tree, pinfo, &ei_pktap_hdrlen_too_short,
tvb, offset, 4);
return;
}
offset += 4;
proto_tree_add_item(pktap_tree, hf_pktap_rectype, tvb, offset, 4,
ENC_LITTLE_ENDIAN);
rectype = tvb_get_letohl(tvb, offset);
offset += 4;
proto_tree_add_item(pktap_tree, hf_pktap_dlt, tvb, offset, 4,
ENC_LITTLE_ENDIAN);
dlt = tvb_get_letohl(tvb, offset);
offset += 4;
proto_tree_add_item(pktap_tree, hf_pktap_ifname, tvb, offset, 24,
ENC_ASCII|ENC_NA);
offset += 24;
proto_tree_add_item(pktap_tree, hf_pktap_flags, tvb, offset, 4,
ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_item(pktap_tree, hf_pktap_pfamily, tvb, offset, 4,
ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_item(pktap_tree, hf_pktap_llhdrlen, tvb, offset, 4,
ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_item(pktap_tree, hf_pktap_lltrlrlen, tvb, offset, 4,
ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_item(pktap_tree, hf_pktap_pid, tvb, offset, 4,
ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_item(pktap_tree, hf_pktap_cmdname, tvb, offset, 20,
ENC_UTF_8|ENC_NA);
offset += 20;
proto_tree_add_item(pktap_tree, hf_pktap_svc_class, tvb, offset, 4,
ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_item(pktap_tree, hf_pktap_iftype, tvb, offset, 2,
ENC_LITTLE_ENDIAN);
offset += 2;
proto_tree_add_item(pktap_tree, hf_pktap_ifunit, tvb, offset, 2,
ENC_LITTLE_ENDIAN);
offset += 2;
proto_tree_add_item(pktap_tree, hf_pktap_epid, tvb, offset, 4,
ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_item(pktap_tree, hf_pktap_ecmdname, tvb, offset, 20,
ENC_UTF_8|ENC_NA);
/*offset += 20;*/
if (rectype == PKT_REC_PACKET) {
next_tvb = tvb_new_subset_remaining(tvb, pkt_len);
dissector_try_uint(wtap_encap_dissector_table,
wtap_pcap_encap_to_wtap_encap(dlt), next_tvb, pinfo, tree);
}
}
| 60,167,683,222,163,160,000,000,000,000,000,000,000 | packet-pktap.c | 74,355,000,406,750,550,000,000,000,000,000,000,000 | [
"CWE-20"
] | CVE-2016-5358 | epan/dissectors/packet-pktap.c in the Ethernet dissector in Wireshark 2.x before 2.0.4 mishandles the packet-header data type, which allows remote attackers to cause a denial of service (application crash) via a crafted packet. | https://nvd.nist.gov/vuln/detail/CVE-2016-5358 |
1,925 | wireshark | a66628e425db725df1ac52a3c573a03357060ddd | https://github.com/wireshark/wireshark | https://github.com/wireshark/wireshark/commit/a66628e425db725df1ac52a3c573a03357060ddd | Don't treat the packet length as unsigned.
The scanf family of functions are as annoyingly bad at handling unsigned
numbers as strtoul() is - both of them are perfectly willing to accept a
value beginning with a negative sign as an unsigned value. When using
strtoul(), you can compensate for this by explicitly checking for a '-'
as the first character of the string, but you can't do that with
sscanf().
So revert to having pkt_len be signed, and scanning it with %d, but
check for a negative value and fail if we see a negative value.
Bug: 12395
Change-Id: I43b458a73b0934e9a5c2c89d34eac5a8f21a7455
Reviewed-on: https://code.wireshark.org/review/15223
Reviewed-by: Guy Harris <guy@alum.mit.edu> | 1 | parse_cosine_packet(FILE_T fh, struct wtap_pkthdr *phdr, Buffer *buf,
char *line, int *err, gchar **err_info)
{
union wtap_pseudo_header *pseudo_header = &phdr->pseudo_header;
int num_items_scanned;
int yy, mm, dd, hr, min, sec, csec;
guint pkt_len;
int pro, off, pri, rm, error;
guint code1, code2;
char if_name[COSINE_MAX_IF_NAME_LEN] = "", direction[6] = "";
struct tm tm;
guint8 *pd;
int i, hex_lines, n, caplen = 0;
if (sscanf(line, "%4d-%2d-%2d,%2d:%2d:%2d.%9d:",
&yy, &mm, &dd, &hr, &min, &sec, &csec) == 7) {
/* appears to be output to a control blade */
num_items_scanned = sscanf(line,
"%4d-%2d-%2d,%2d:%2d:%2d.%9d: %5s (%127[A-Za-z0-9/:]), Length:%9u, Pro:%9d, Off:%9d, Pri:%9d, RM:%9d, Err:%9d [%8x, %8x]",
&yy, &mm, &dd, &hr, &min, &sec, &csec,
direction, if_name, &pkt_len,
&pro, &off, &pri, &rm, &error,
&code1, &code2);
if (num_items_scanned != 17) {
*err = WTAP_ERR_BAD_FILE;
*err_info = g_strdup("cosine: purported control blade line doesn't have code values");
return FALSE;
}
} else {
/* appears to be output to PE */
num_items_scanned = sscanf(line,
"%5s (%127[A-Za-z0-9/:]), Length:%9u, Pro:%9d, Off:%9d, Pri:%9d, RM:%9d, Err:%9d [%8x, %8x]",
direction, if_name, &pkt_len,
&pro, &off, &pri, &rm, &error,
&code1, &code2);
if (num_items_scanned != 10) {
*err = WTAP_ERR_BAD_FILE;
*err_info = g_strdup("cosine: header line is neither control blade nor PE output");
return FALSE;
}
yy = mm = dd = hr = min = sec = csec = 0;
}
if (pkt_len > WTAP_MAX_PACKET_SIZE) {
/*
* Probably a corrupt capture file; don't blow up trying
* to allocate space for an immensely-large packet.
*/
*err = WTAP_ERR_BAD_FILE;
*err_info = g_strdup_printf("cosine: File has %u-byte packet, bigger than maximum of %u",
pkt_len, WTAP_MAX_PACKET_SIZE);
return FALSE;
}
phdr->rec_type = REC_TYPE_PACKET;
phdr->presence_flags = WTAP_HAS_TS|WTAP_HAS_CAP_LEN;
tm.tm_year = yy - 1900;
tm.tm_mon = mm - 1;
tm.tm_mday = dd;
tm.tm_hour = hr;
tm.tm_min = min;
tm.tm_sec = sec;
tm.tm_isdst = -1;
phdr->ts.secs = mktime(&tm);
phdr->ts.nsecs = csec * 10000000;
phdr->len = pkt_len;
/* XXX need to handle other encapsulations like Cisco HDLC,
Frame Relay and ATM */
if (strncmp(if_name, "TEST:", 5) == 0) {
pseudo_header->cosine.encap = COSINE_ENCAP_TEST;
} else if (strncmp(if_name, "PPoATM:", 7) == 0) {
pseudo_header->cosine.encap = COSINE_ENCAP_PPoATM;
} else if (strncmp(if_name, "PPoFR:", 6) == 0) {
pseudo_header->cosine.encap = COSINE_ENCAP_PPoFR;
} else if (strncmp(if_name, "ATM:", 4) == 0) {
pseudo_header->cosine.encap = COSINE_ENCAP_ATM;
} else if (strncmp(if_name, "FR:", 3) == 0) {
pseudo_header->cosine.encap = COSINE_ENCAP_FR;
} else if (strncmp(if_name, "HDLC:", 5) == 0) {
pseudo_header->cosine.encap = COSINE_ENCAP_HDLC;
} else if (strncmp(if_name, "PPP:", 4) == 0) {
pseudo_header->cosine.encap = COSINE_ENCAP_PPP;
} else if (strncmp(if_name, "ETH:", 4) == 0) {
pseudo_header->cosine.encap = COSINE_ENCAP_ETH;
} else {
pseudo_header->cosine.encap = COSINE_ENCAP_UNKNOWN;
}
if (strncmp(direction, "l2-tx", 5) == 0) {
pseudo_header->cosine.direction = COSINE_DIR_TX;
} else if (strncmp(direction, "l2-rx", 5) == 0) {
pseudo_header->cosine.direction = COSINE_DIR_RX;
}
g_strlcpy(pseudo_header->cosine.if_name, if_name,
COSINE_MAX_IF_NAME_LEN);
pseudo_header->cosine.pro = pro;
pseudo_header->cosine.off = off;
pseudo_header->cosine.pri = pri;
pseudo_header->cosine.rm = rm;
pseudo_header->cosine.err = error;
/* Make sure we have enough room for the packet */
ws_buffer_assure_space(buf, pkt_len);
pd = ws_buffer_start_ptr(buf);
/* Calculate the number of hex dump lines, each
* containing 16 bytes of data */
hex_lines = pkt_len / 16 + ((pkt_len % 16) ? 1 : 0);
for (i = 0; i < hex_lines; i++) {
if (file_gets(line, COSINE_LINE_LENGTH, fh) == NULL) {
*err = file_error(fh, err_info);
if (*err == 0) {
*err = WTAP_ERR_SHORT_READ;
}
return FALSE;
}
if (empty_line(line)) {
break;
}
if ((n = parse_single_hex_dump_line(line, pd, i*16)) == -1) {
*err = WTAP_ERR_BAD_FILE;
*err_info = g_strdup("cosine: hex dump line doesn't have 16 numbers");
return FALSE;
}
caplen += n;
}
phdr->caplen = caplen;
return TRUE;
}
| 775,128,632,240,788,800,000,000,000,000,000,000 | cosine.c | 260,698,406,841,229,670,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2016-5356 | wiretap/cosine.c in the CoSine file parser in Wireshark 1.12.x before 1.12.12 and 2.x before 2.0.4 mishandles sscanf unsigned-integer processing, which allows remote attackers to cause a denial of service (application crash) via a crafted file. | https://nvd.nist.gov/vuln/detail/CVE-2016-5356 |
1,926 | wireshark | 3270dfac43da861c714df76513456b46765ff47f | https://github.com/wireshark/wireshark | https://github.com/wireshark/wireshark/commit/3270dfac43da861c714df76513456b46765ff47f | Don't treat the packet length as unsigned.
The scanf family of functions are as annoyingly bad at handling unsigned
numbers as strtoul() is - both of them are perfectly willing to accept a
value beginning with a negative sign as an unsigned value. When using
strtoul(), you can compensate for this by explicitly checking for a '-'
as the first character of the string, but you can't do that with
sscanf().
So revert to having pkt_len be signed, and scanning it with %d, but
check for a negative value and fail if we see a negative value.
Bug: 12394
Change-Id: I4b19b95f2e1ffc96dac5c91bff6698c246f52007
Reviewed-on: https://code.wireshark.org/review/15230
Reviewed-by: Guy Harris <guy@alum.mit.edu> | 1 | parse_toshiba_packet(FILE_T fh, struct wtap_pkthdr *phdr, Buffer *buf,
int *err, gchar **err_info)
{
union wtap_pseudo_header *pseudo_header = &phdr->pseudo_header;
char line[TOSHIBA_LINE_LENGTH];
int num_items_scanned;
guint pkt_len;
int pktnum, hr, min, sec, csec;
char channel[10], direction[10];
int i, hex_lines;
guint8 *pd;
/* Our file pointer should be on the line containing the
* summary information for a packet. Read in that line and
* extract the useful information
*/
if (file_gets(line, TOSHIBA_LINE_LENGTH, fh) == NULL) {
*err = file_error(fh, err_info);
if (*err == 0) {
*err = WTAP_ERR_SHORT_READ;
}
return FALSE;
}
/* Find text in line after "[No.". Limit the length of the
* two strings since we have fixed buffers for channel[] and
* direction[] */
num_items_scanned = sscanf(line, "%9d] %2d:%2d:%2d.%9d %9s %9s",
&pktnum, &hr, &min, &sec, &csec, channel, direction);
if (num_items_scanned != 7) {
*err = WTAP_ERR_BAD_FILE;
*err_info = g_strdup("toshiba: record header isn't valid");
return FALSE;
}
/* Scan lines until we find the OFFSET line. In a "telnet" trace,
* this will be the next line. But if you save your telnet session
* to a file from within a Windows-based telnet client, it may
* put in line breaks at 80 columns (or however big your "telnet" box
* is). CRT (a Windows telnet app from VanDyke) does this.
* Here we assume that 80 columns will be the minimum size, and that
* the OFFSET line is not broken in the middle. It's the previous
* line that is normally long and can thus be broken at column 80.
*/
do {
if (file_gets(line, TOSHIBA_LINE_LENGTH, fh) == NULL) {
*err = file_error(fh, err_info);
if (*err == 0) {
*err = WTAP_ERR_SHORT_READ;
}
return FALSE;
}
/* Check for "OFFSET 0001-0203" at beginning of line */
line[16] = '\0';
} while (strcmp(line, "OFFSET 0001-0203") != 0);
num_items_scanned = sscanf(line+64, "LEN=%9u", &pkt_len);
if (num_items_scanned != 1) {
*err = WTAP_ERR_BAD_FILE;
*err_info = g_strdup("toshiba: OFFSET line doesn't have valid LEN item");
return FALSE;
}
if (pkt_len > WTAP_MAX_PACKET_SIZE) {
/*
* Probably a corrupt capture file; don't blow up trying
* to allocate space for an immensely-large packet.
*/
*err = WTAP_ERR_BAD_FILE;
*err_info = g_strdup_printf("toshiba: File has %u-byte packet, bigger than maximum of %u",
pkt_len, WTAP_MAX_PACKET_SIZE);
return FALSE;
}
phdr->rec_type = REC_TYPE_PACKET;
phdr->presence_flags = WTAP_HAS_TS|WTAP_HAS_CAP_LEN;
phdr->ts.secs = hr * 3600 + min * 60 + sec;
phdr->ts.nsecs = csec * 10000000;
phdr->caplen = pkt_len;
phdr->len = pkt_len;
switch (channel[0]) {
case 'B':
phdr->pkt_encap = WTAP_ENCAP_ISDN;
pseudo_header->isdn.uton = (direction[0] == 'T');
pseudo_header->isdn.channel = (guint8)
strtol(&channel[1], NULL, 10);
break;
case 'D':
phdr->pkt_encap = WTAP_ENCAP_ISDN;
pseudo_header->isdn.uton = (direction[0] == 'T');
pseudo_header->isdn.channel = 0;
break;
default:
phdr->pkt_encap = WTAP_ENCAP_ETHERNET;
/* XXX - is there an FCS in the frame? */
pseudo_header->eth.fcs_len = -1;
break;
}
/* Make sure we have enough room for the packet */
ws_buffer_assure_space(buf, pkt_len);
pd = ws_buffer_start_ptr(buf);
/* Calculate the number of hex dump lines, each
* containing 16 bytes of data */
hex_lines = pkt_len / 16 + ((pkt_len % 16) ? 1 : 0);
for (i = 0; i < hex_lines; i++) {
if (file_gets(line, TOSHIBA_LINE_LENGTH, fh) == NULL) {
*err = file_error(fh, err_info);
if (*err == 0) {
*err = WTAP_ERR_SHORT_READ;
}
return FALSE;
}
if (!parse_single_hex_dump_line(line, pd, i * 16)) {
*err = WTAP_ERR_BAD_FILE;
*err_info = g_strdup("toshiba: hex dump not valid");
return FALSE;
}
}
return TRUE;
}
| 46,587,021,802,752,220,000,000,000,000,000,000,000 | toshiba.c | 331,447,668,869,948,600,000,000,000,000,000,000,000 | [
"CWE-20"
] | CVE-2016-5355 | wiretap/toshiba.c in the Toshiba file parser in Wireshark 1.12.x before 1.12.12 and 2.x before 2.0.4 mishandles sscanf unsigned-integer processing, which allows remote attackers to cause a denial of service (application crash) via a crafted file. | https://nvd.nist.gov/vuln/detail/CVE-2016-5355 |
1,930 | wireshark | 7d7190695ce2ff269fdffb04e87139995cde21f4 | https://github.com/wireshark/wireshark | https://github.com/wireshark/wireshark/commit/7d7190695ce2ff269fdffb04e87139995cde21f4 | UMTS_FP: fix handling reserved C/T value
The spec puts the reserved value at 0xf but our internal table has 'unknown' at
0; since all the other values seem to be offset-by-one, just take the modulus
0xf to avoid running off the end of the table.
Bug: 12191
Change-Id: I83c8fb66797bbdee52a2246fb1eea6e37cbc7eb0
Reviewed-on: https://code.wireshark.org/review/15722
Reviewed-by: Evan Huus <eapache@gmail.com>
Petri-Dish: Evan Huus <eapache@gmail.com>
Tested-by: Petri Dish Buildbot <buildbot-no-reply@wireshark.org>
Reviewed-by: Michael Mann <mmann78@netscape.net> | 1 | fp_set_per_packet_inf_from_conv(umts_fp_conversation_info_t *p_conv_data,
tvbuff_t *tvb, packet_info *pinfo,
proto_tree *tree _U_)
{
fp_info *fpi;
guint8 tfi, c_t;
int offset = 0, i=0, j=0, num_tbs, chan, tb_size, tb_bit_off;
gboolean is_control_frame;
umts_mac_info *macinf;
rlc_info *rlcinf;
guint8 fake_lchid=0;
gint *cur_val=NULL;
fpi = wmem_new0(wmem_file_scope(), fp_info);
p_add_proto_data(wmem_file_scope(), pinfo, proto_fp, 0, fpi);
fpi->iface_type = p_conv_data->iface_type;
fpi->division = p_conv_data->division;
fpi->release = 7; /* Set values greater then the checks performed */
fpi->release_year = 2006;
fpi->release_month = 12;
fpi->channel = p_conv_data->channel;
fpi->dch_crc_present = p_conv_data->dch_crc_present;
/*fpi->paging_indications;*/
fpi->link_type = FP_Link_Ethernet;
#if 0
/*Only do this the first run, signals that we need to reset the RLC fragtable*/
if (!pinfo->fd->flags.visited && p_conv_data->reset_frag ) {
fpi->reset_frag = p_conv_data->reset_frag;
p_conv_data->reset_frag = FALSE;
}
#endif
/* remember 'lower' UDP layer port information so we can later
* differentiate 'lower' UDP layer from 'user data' UDP layer */
fpi->srcport = pinfo->srcport;
fpi->destport = pinfo->destport;
fpi->com_context_id = p_conv_data->com_context_id;
if (pinfo->link_dir == P2P_DIR_UL) {
fpi->is_uplink = TRUE;
} else {
fpi->is_uplink = FALSE;
}
is_control_frame = tvb_get_guint8(tvb, offset) & 0x01;
switch (fpi->channel) {
case CHANNEL_HSDSCH: /* HS-DSCH - High Speed Downlink Shared Channel */
fpi->hsdsch_entity = p_conv_data->hsdsch_entity;
macinf = wmem_new0(wmem_file_scope(), umts_mac_info);
fpi->hsdsch_macflowd_id = p_conv_data->hsdsch_macdflow_id;
macinf->content[0] = hsdsch_macdflow_id_mac_content_map[p_conv_data->hsdsch_macdflow_id]; /*MAC_CONTENT_PS_DTCH;*/
macinf->lchid[0] = p_conv_data->hsdsch_macdflow_id;
/*macinf->content[0] = lchId_type_table[p_conv_data->edch_lchId[0]];*/
p_add_proto_data(wmem_file_scope(), pinfo, proto_umts_mac, 0, macinf);
rlcinf = wmem_new0(wmem_file_scope(), rlc_info);
/*Figure out RLC_MODE based on MACd-flow-ID, basically MACd-flow-ID = 0 then it's SRB0 == UM else AM*/
rlcinf->mode[0] = hsdsch_macdflow_id_rlc_map[p_conv_data->hsdsch_macdflow_id];
if (fpi->hsdsch_entity == hs /*&& !rlc_is_ciphered(pinfo)*/) {
for (i=0; i<MAX_NUM_HSDHSCH_MACDFLOW; i++) {
/*Figure out if this channel is multiplexed (signaled from RRC)*/
if ((cur_val=(gint *)g_tree_lookup(hsdsch_muxed_flows, GINT_TO_POINTER((gint)p_conv_data->hrnti))) != NULL) {
j = 1 << i;
fpi->hsdhsch_macfdlow_is_mux[i] = j & *cur_val;
} else {
fpi->hsdhsch_macfdlow_is_mux[i] = FALSE;
}
}
}
/* Make configurable ?(available in NBAP?) */
/* urnti[MAX_RLC_CHANS] */
/*
switch (p_conv_data->rlc_mode) {
case FP_RLC_TM:
rlcinf->mode[0] = RLC_TM;
break;
case FP_RLC_UM:
rlcinf->mode[0] = RLC_UM;
break;
case FP_RLC_AM:
rlcinf->mode[0] = RLC_AM;
break;
case FP_RLC_MODE_UNKNOWN:
default:
rlcinf->mode[0] = RLC_UNKNOWN_MODE;
break;
}*/
/* rbid[MAX_RLC_CHANS] */
/* For RLC re-assembly to work we urnti signaled from NBAP */
rlcinf->urnti[0] = fpi->com_context_id;
rlcinf->li_size[0] = RLC_LI_7BITS;
rlcinf->ciphered[0] = FALSE;
rlcinf->deciphered[0] = FALSE;
p_add_proto_data(wmem_file_scope(), pinfo, proto_rlc, 0, rlcinf);
return fpi;
case CHANNEL_EDCH:
/*Most configuration is now done in the actual dissecting function*/
macinf = wmem_new0(wmem_file_scope(), umts_mac_info);
rlcinf = wmem_new0(wmem_file_scope(), rlc_info);
fpi->no_ddi_entries = p_conv_data->no_ddi_entries;
for (i=0; i<fpi->no_ddi_entries; i++) {
fpi->edch_ddi[i] = p_conv_data->edch_ddi[i]; /*Set the DDI value*/
fpi->edch_macd_pdu_size[i] = p_conv_data->edch_macd_pdu_size[i]; /*Set the size*/
fpi->edch_lchId[i] = p_conv_data->edch_lchId[i]; /*Set the channel id for this entry*/
/*macinf->content[i] = lchId_type_table[p_conv_data->edch_lchId[i]]; */ /*Set the proper Content type for the mac layer.*/
/* rlcinf->mode[i] = lchId_rlc_map[p_conv_data->edch_lchId[i]];*/ /* Set RLC mode by lchid to RLC_MODE map in nbap.h */
}
fpi->edch_type = p_conv_data->edch_type;
/* macinf = wmem_new0(wmem_file_scope(), umts_mac_info);
macinf->content[0] = MAC_CONTENT_PS_DTCH;*/
p_add_proto_data(wmem_file_scope(), pinfo, proto_umts_mac, 0, macinf);
/* For RLC re-assembly to work we need a urnti signaled from NBAP */
rlcinf->urnti[0] = fpi->com_context_id;
/* rlcinf->mode[0] = RLC_AM;*/
rlcinf->li_size[0] = RLC_LI_7BITS;
rlcinf->ciphered[0] = FALSE;
rlcinf->deciphered[0] = FALSE;
p_add_proto_data(wmem_file_scope(), pinfo, proto_rlc, 0, rlcinf);
return fpi;
case CHANNEL_PCH:
fpi->paging_indications = p_conv_data->paging_indications;
fpi->num_chans = p_conv_data->num_dch_in_flow;
/* Set offset to point to first TFI
*/
if (is_control_frame) {
/* control frame, we're done */
return fpi;
}
/* Set offset to TFI */
offset = 3;
break;
case CHANNEL_DCH:
fpi->num_chans = p_conv_data->num_dch_in_flow;
if (is_control_frame) {
/* control frame, we're done */
return fpi;
}
rlcinf = wmem_new0(wmem_file_scope(), rlc_info);
macinf = wmem_new0(wmem_file_scope(), umts_mac_info);
offset = 2; /*To correctly read the tfi*/
fakes = 5; /* Reset fake counter. */
for (chan=0; chan < fpi->num_chans; chan++) { /*Iterate over the what channels*/
/*Iterate over the transport blocks*/
/*tfi = tvb_get_guint8(tvb, offset);*/
/*TFI is 5 bits according to 3GPP TS 25.321, paragraph 6.2.4.4*/
tfi = tvb_get_bits8(tvb, 3+offset*8, 5);
/*Figure out the number of tbs and size*/
num_tbs = (fpi->is_uplink) ? p_conv_data->fp_dch_channel_info[chan].ul_chan_num_tbs[tfi] : p_conv_data->fp_dch_channel_info[chan].dl_chan_num_tbs[tfi];
tb_size= (fpi->is_uplink) ? p_conv_data->fp_dch_channel_info[i].ul_chan_tf_size[tfi] : p_conv_data->fp_dch_channel_info[i].dl_chan_tf_size[tfi];
/*TODO: This stuff has to be reworked!*/
/*Generates a fake logical channel id for non multiplexed channel*/
if ( p_conv_data->dchs_in_flow_list[chan] != 31 && (p_conv_data->dchs_in_flow_list[chan] == 24 &&
tb_size != 340) ) {
fake_lchid = make_fake_lchid(pinfo, p_conv_data->dchs_in_flow_list[chan]);
}
tb_bit_off = (2+p_conv_data->num_dch_in_flow)*8; /*Point to the C/T of first TB*/
/*Set configuration for individual blocks*/
for (j=0; j < num_tbs && j+chan < MAX_MAC_FRAMES; j++) {
/*Set transport channel id (useful for debugging)*/
macinf->trchid[j+chan] = p_conv_data->dchs_in_flow_list[chan];
/*Transport Channel m31 and 24 might be multiplexed!*/
if ( p_conv_data->dchs_in_flow_list[chan] == 31 || p_conv_data->dchs_in_flow_list[chan] == 24) {
/****** MUST FIGURE OUT IF THIS IS REALLY MULTIPLEXED OR NOT*******/
/*If Trchid == 31 and only on TB, we have no multiplexing*/
if (0/*p_conv_data->dchs_in_flow_list[chan] == 31 && num_tbs == 1*/) {
macinf->ctmux[j+chan] = FALSE;/*Set TRUE if this channel is multiplexed (ie. C/T flag exists)*/
macinf->lchid[j+chan] = 1;
macinf->content[j+chan] = lchId_type_table[1]; /*Base MAC content on logical channel id (Table is in packet-nbap.h)*/
rlcinf->mode[j+chan] = lchId_rlc_map[1]; /*Based RLC mode on logical channel id*/
}
/*Indicate we don't have multiplexing.*/
else if (p_conv_data->dchs_in_flow_list[chan] == 24 && tb_size != 340) {
macinf->ctmux[j+chan] = FALSE;/*Set TRUE if this channel is multiplexed (ie. C/T flag exists)*/
/*g_warning("settin this for %d", pinfo->num);*/
macinf->lchid[j+chan] = fake_lchid;
macinf->fake_chid[j+chan] = TRUE;
macinf->content[j+chan] = MAC_CONTENT_PS_DTCH; /*lchId_type_table[fake_lchid];*/ /*Base MAC content on logical channel id (Table is in packet-nbap.h)*/
rlcinf->mode[j+chan] = RLC_AM;/*lchId_rlc_map[fake_lchid];*/ /*Based RLC mode on logical channel id*/
}
/*We have multiplexing*/
else {
macinf->ctmux[j+chan] = TRUE;/*Set TRUE if this channel is multiplexed (ie. C/T flag exists)*/
/* Peek at C/T, different RLC params for different logical channels */
/*C/T is 4 bits according to 3GPP TS 25.321, paragraph 9.2.1, from MAC header (not FP)*/
c_t = tvb_get_bits8(tvb, tb_bit_off/*(2+p_conv_data->num_dch_in_flow)*8*/, 4); /* c_t = tvb_get_guint8(tvb, offset);*/
macinf->lchid[j+chan] = c_t+1;
macinf->content[j+chan] = lchId_type_table[c_t+1]; /*Base MAC content on logical channel id (Table is in packet-nbap.h)*/
rlcinf->mode[j+chan] = lchId_rlc_map[c_t+1]; /*Based RLC mode on logical channel id*/
}
} else {
fake_lchid = make_fake_lchid(pinfo, p_conv_data->dchs_in_flow_list[chan]);
macinf->ctmux[j+chan] = FALSE;/*Set TRUE if this channel is multiplexed (ie. C/T flag exists)*/
/*macinf->content[j+chan] = MAC_CONTENT_CS_DTCH;*/
macinf->content[j+chan] = lchId_type_table[fake_lchid];
rlcinf->mode[j+chan] = lchId_rlc_map[fake_lchid];
/*Generate virtual logical channel id*/
/************************/
/*TODO: Once proper lchid is always set, this has to be removed*/
macinf->fake_chid[j+chan] = TRUE;
macinf->lchid[j+chan] = fake_lchid; /*make_fake_lchid(pinfo, p_conv_data->dchs_in_flow_list[chan]);*/
/************************/
}
/*** Set rlc info ***/
rlcinf->urnti[j+chan] = p_conv_data->com_context_id;
rlcinf->li_size[j+chan] = RLC_LI_7BITS;
#if 0
/*If this entry exists, SECRUITY_MODE is completed (signled by RRC)*/
if ( rrc_ciph_inf && g_tree_lookup(rrc_ciph_inf, GINT_TO_POINTER((gint)p_conv_data->com_context_id)) != NULL ) {
rlcinf->ciphered[j+chan] = TRUE;
} else {
rlcinf->ciphered[j+chan] = FALSE;
}
#endif
rlcinf->ciphered[j+chan] = FALSE;
rlcinf->deciphered[j+chan] = FALSE;
rlcinf->rbid[j+chan] = macinf->lchid[j+chan];
/*Step over this TB and it's C/T flag.*/
tb_bit_off += tb_size+4;
}
offset++;
}
p_add_proto_data(wmem_file_scope(), pinfo, proto_umts_mac, 0, macinf);
p_add_proto_data(wmem_file_scope(), pinfo, proto_rlc, 0, rlcinf);
/* Set offset to point to first TFI
* the Number of TFI's = number of DCH's in the flow
*/
offset = 2;
break;
case CHANNEL_FACH_FDD:
fpi->num_chans = p_conv_data->num_dch_in_flow;
if (is_control_frame) {
/* control frame, we're done */
return fpi;
}
/* Set offset to point to first TFI
* the Number of TFI's = number of DCH's in the flow
*/
offset = 2;
/* Set MAC data */
macinf = wmem_new0(wmem_file_scope(), umts_mac_info);
macinf->ctmux[0] = 1;
macinf->content[0] = MAC_CONTENT_DCCH;
p_add_proto_data(wmem_file_scope(), pinfo, proto_umts_mac, 0, macinf);
/* Set RLC data */
rlcinf = wmem_new0(wmem_file_scope(), rlc_info);
/* Make configurable ?(avaliable in NBAP?) */
/* For RLC re-assembly to work we need to fake urnti */
rlcinf->urnti[0] = fpi->channel;
rlcinf->mode[0] = RLC_AM;
/* rbid[MAX_RLC_CHANS] */
rlcinf->li_size[0] = RLC_LI_7BITS;
rlcinf->ciphered[0] = FALSE;
rlcinf->deciphered[0] = FALSE;
p_add_proto_data(wmem_file_scope(), pinfo, proto_rlc, 0, rlcinf);
break;
case CHANNEL_RACH_FDD:
fpi->num_chans = p_conv_data->num_dch_in_flow;
if (is_control_frame) {
/* control frame, we're done */
return fpi;
}
/* Set offset to point to first TFI
* the Number of TFI's = number of DCH's in the flow
*/
offset = 2;
/* set MAC data */
macinf = wmem_new0(wmem_file_scope(), umts_mac_info);
rlcinf = wmem_new0(wmem_file_scope(), rlc_info);
for ( chan = 0; chan < fpi->num_chans; chan++ ) {
macinf->ctmux[chan] = 1;
macinf->content[chan] = MAC_CONTENT_DCCH;
rlcinf->urnti[chan] = fpi->com_context_id; /*Note that MAC probably will change this*/
}
p_add_proto_data(wmem_file_scope(), pinfo, proto_umts_mac, 0, macinf);
p_add_proto_data(wmem_file_scope(), pinfo, proto_rlc, 0, rlcinf);
break;
case CHANNEL_HSDSCH_COMMON:
rlcinf = wmem_new0(wmem_file_scope(), rlc_info);
macinf = wmem_new0(wmem_file_scope(), umts_mac_info);
p_add_proto_data(wmem_file_scope(), pinfo, proto_umts_mac, 0, macinf);
p_add_proto_data(wmem_file_scope(), pinfo, proto_rlc, 0, rlcinf);
break;
default:
expert_add_info(pinfo, NULL, &ei_fp_transport_channel_type_unknown);
return NULL;
}
/* Peek at the packet as the per packet info seems not to take the tfi into account */
for (i=0; i<fpi->num_chans; i++) {
tfi = tvb_get_guint8(tvb, offset);
/*TFI is 5 bits according to 3GPP TS 25.321, paragraph 6.2.4.4*/
/*tfi = tvb_get_bits8(tvb, offset*8, 5);*/
if (pinfo->link_dir == P2P_DIR_UL) {
fpi->chan_tf_size[i] = p_conv_data->fp_dch_channel_info[i].ul_chan_tf_size[tfi];
fpi->chan_num_tbs[i] = p_conv_data->fp_dch_channel_info[i].ul_chan_num_tbs[tfi];
} else {
fpi->chan_tf_size[i] = p_conv_data->fp_dch_channel_info[i].dl_chan_tf_size[tfi];
fpi->chan_num_tbs[i] = p_conv_data->fp_dch_channel_info[i].dl_chan_num_tbs[tfi];
}
offset++;
}
return fpi;
}
| 193,271,014,369,532,400,000,000,000,000,000,000,000 | packet-umts_fp.c | 301,117,539,260,110,400,000,000,000,000,000,000,000 | [
"CWE-20"
] | CVE-2016-5353 | epan/dissectors/packet-umts_fp.c in the UMTS FP dissector in Wireshark 1.12.x before 1.12.12 and 2.x before 2.0.4 mishandles the reserved C/T value, which allows remote attackers to cause a denial of service (application crash) via a crafted packet. | https://nvd.nist.gov/vuln/detail/CVE-2016-5353 |
1,931 | wireshark | b6d838eebf4456192360654092e5587c5207f185 | https://github.com/wireshark/wireshark | https://github.com/wireshark/wireshark/commit/b6d838eebf4456192360654092e5587c5207f185 | Sanity check eapol_len in AirPDcapDecryptWPABroadcastKey
Bug: 12175
Change-Id: Iaf977ba48f8668bf8095800a115ff9a3472dd893
Reviewed-on: https://code.wireshark.org/review/15326
Petri-Dish: Michael Mann <mmann78@netscape.net>
Tested-by: Petri Dish Buildbot <buildbot-no-reply@wireshark.org>
Reviewed-by: Alexis La Goutte <alexis.lagoutte@gmail.com>
Reviewed-by: Peter Wu <peter@lekensteyn.nl>
Tested-by: Peter Wu <peter@lekensteyn.nl> | 1 | AirPDcapDecryptWPABroadcastKey(const EAPOL_RSN_KEY *pEAPKey, guint8 *decryption_key, PAIRPDCAP_SEC_ASSOCIATION sa, guint eapol_len)
{
guint8 key_version;
guint8 *key_data;
guint8 *szEncryptedKey;
guint16 key_bytes_len = 0; /* Length of the total key data field */
guint16 key_len; /* Actual group key length */
static AIRPDCAP_KEY_ITEM dummy_key; /* needed in case AirPDcapRsnaMng() wants the key structure */
AIRPDCAP_SEC_ASSOCIATION *tmp_sa;
/* We skip verifying the MIC of the key. If we were implementing a WPA supplicant we'd want to verify, but for a sniffer it's not needed. */
/* Preparation for decrypting the group key - determine group key data length */
/* depending on whether the pairwise key is TKIP or AES encryption key */
key_version = AIRPDCAP_EAP_KEY_DESCR_VER(pEAPKey->key_information[1]);
if (key_version == AIRPDCAP_WPA_KEY_VER_NOT_CCMP){
/* TKIP */
key_bytes_len = pntoh16(pEAPKey->key_length);
}else if (key_version == AIRPDCAP_WPA_KEY_VER_AES_CCMP){
/* AES */
key_bytes_len = pntoh16(pEAPKey->key_data_len);
/* AES keys must be at least 128 bits = 16 bytes. */
if (key_bytes_len < 16) {
return AIRPDCAP_RET_NO_VALID_HANDSHAKE;
}
}
if (key_bytes_len < GROUP_KEY_MIN_LEN || key_bytes_len > eapol_len - sizeof(EAPOL_RSN_KEY)) {
return AIRPDCAP_RET_NO_VALID_HANDSHAKE;
}
/* Encrypted key is in the information element field of the EAPOL key packet */
key_data = (guint8 *)pEAPKey + sizeof(EAPOL_RSN_KEY);
szEncryptedKey = (guint8 *)g_memdup(key_data, key_bytes_len);
DEBUG_DUMP("Encrypted Broadcast key:", szEncryptedKey, key_bytes_len);
DEBUG_DUMP("KeyIV:", pEAPKey->key_iv, 16);
DEBUG_DUMP("decryption_key:", decryption_key, 16);
/* We are rekeying, save old sa */
tmp_sa=(AIRPDCAP_SEC_ASSOCIATION *)g_malloc(sizeof(AIRPDCAP_SEC_ASSOCIATION));
memcpy(tmp_sa, sa, sizeof(AIRPDCAP_SEC_ASSOCIATION));
sa->next=tmp_sa;
/* As we have no concept of the prior association request at this point, we need to deduce the */
/* group key cipher from the length of the key bytes. In WPA this is straightforward as the */
/* keybytes just contain the GTK, and the GTK is only in the group handshake, NOT the M3. */
/* In WPA2 its a little more tricky as the M3 keybytes contain an RSN_IE, but the group handshake */
/* does not. Also there are other (variable length) items in the keybytes which we need to account */
/* for to determine the true key length, and thus the group cipher. */
if (key_version == AIRPDCAP_WPA_KEY_VER_NOT_CCMP){
guint8 new_key[32];
guint8 dummy[256];
/* TKIP key */
/* Per 802.11i, Draft 3.0 spec, section 8.5.2, p. 97, line 4-8, */
/* group key is decrypted using RC4. Concatenate the IV with the 16 byte EK (PTK+16) to get the decryption key */
rc4_state_struct rc4_state;
/* The WPA group key just contains the GTK bytes so deducing the type is straightforward */
/* Note - WPA M3 doesn't contain a group key so we'll only be here for the group handshake */
sa->wpa.key_ver = (key_bytes_len >=TKIP_GROUP_KEY_LEN)?AIRPDCAP_WPA_KEY_VER_NOT_CCMP:AIRPDCAP_WPA_KEY_VER_AES_CCMP;
/* Build the full decryption key based on the IV and part of the pairwise key */
memcpy(new_key, pEAPKey->key_iv, 16);
memcpy(new_key+16, decryption_key, 16);
DEBUG_DUMP("FullDecrKey:", new_key, 32);
crypt_rc4_init(&rc4_state, new_key, sizeof(new_key));
/* Do dummy 256 iterations of the RC4 algorithm (per 802.11i, Draft 3.0, p. 97 line 6) */
crypt_rc4(&rc4_state, dummy, 256);
crypt_rc4(&rc4_state, szEncryptedKey, key_bytes_len);
} else if (key_version == AIRPDCAP_WPA_KEY_VER_AES_CCMP){
/* AES CCMP key */
guint8 key_found;
guint8 key_length;
guint16 key_index;
guint8 *decrypted_data;
/* Unwrap the key; the result is key_bytes_len in length */
decrypted_data = AES_unwrap(decryption_key, 16, szEncryptedKey, key_bytes_len);
/* With WPA2 what we get after Broadcast Key decryption is an actual RSN structure.
The key itself is stored as a GTK KDE
WPA2 IE (1 byte) id = 0xdd, length (1 byte), GTK OUI (4 bytes), key index (1 byte) and 1 reserved byte. Thus we have to
pass pointer to the actual key with 8 bytes offset */
key_found = FALSE;
key_index = 0;
/* Parse Key data until we found GTK KDE */
/* GTK KDE = 00-0F-AC 01 */
while(key_index < (key_bytes_len - 6) && !key_found){
guint8 rsn_id;
guint32 type;
/* Get RSN ID */
rsn_id = decrypted_data[key_index];
type = ((decrypted_data[key_index + 2] << 24) +
(decrypted_data[key_index + 3] << 16) +
(decrypted_data[key_index + 4] << 8) +
(decrypted_data[key_index + 5]));
if (rsn_id == 0xdd && type == 0x000fac01) {
key_found = TRUE;
} else {
key_index += decrypted_data[key_index+1]+2;
}
}
if (key_found){
key_length = decrypted_data[key_index+1] - 6;
if (key_index+8 >= key_bytes_len ||
key_length > key_bytes_len - key_index - 8) {
g_free(decrypted_data);
g_free(szEncryptedKey);
return AIRPDCAP_RET_NO_VALID_HANDSHAKE;
}
/* Skip over the GTK header info, and don't copy past the end of the encrypted data */
memcpy(szEncryptedKey, decrypted_data+key_index+8, key_length);
} else {
g_free(decrypted_data);
g_free(szEncryptedKey);
return AIRPDCAP_RET_NO_VALID_HANDSHAKE;
}
if (key_length == TKIP_GROUP_KEY_LEN)
sa->wpa.key_ver = AIRPDCAP_WPA_KEY_VER_NOT_CCMP;
else
sa->wpa.key_ver = AIRPDCAP_WPA_KEY_VER_AES_CCMP;
g_free(decrypted_data);
}
key_len = (sa->wpa.key_ver==AIRPDCAP_WPA_KEY_VER_NOT_CCMP)?TKIP_GROUP_KEY_LEN:CCMP_GROUP_KEY_LEN;
if (key_len > key_bytes_len) {
/* the key required for this protocol is longer than the key that we just calculated */
g_free(szEncryptedKey);
return AIRPDCAP_RET_NO_VALID_HANDSHAKE;
}
/* Decrypted key is now in szEncryptedKey with len of key_len */
DEBUG_DUMP("Broadcast key:", szEncryptedKey, key_len);
/* Load the proper key material info into the SA */
sa->key = &dummy_key; /* we just need key to be not null because it is checked in AirPDcapRsnaMng(). The WPA key materials are actually in the .wpa structure */
sa->validKey = TRUE;
/* Since this is a GTK and its size is only 32 bytes (vs. the 64 byte size of a PTK), we fake it and put it in at a 32-byte offset so the */
/* AirPDcapRsnaMng() function will extract the right piece of the GTK for decryption. (The first 16 bytes of the GTK are used for decryption.) */
memset(sa->wpa.ptk, 0, sizeof(sa->wpa.ptk));
memcpy(sa->wpa.ptk+32, szEncryptedKey, key_len);
g_free(szEncryptedKey);
return AIRPDCAP_RET_SUCCESS_HANDSHAKE;
}
| 249,501,140,671,775,100,000,000,000,000,000,000,000 | airpdcap.c | 135,102,669,786,164,510,000,000,000,000,000,000,000 | [
"CWE-125"
] | CVE-2016-5352 | epan/crypt/airpdcap.c in the IEEE 802.11 dissector in Wireshark 2.x before 2.0.4 mishandles certain length values, which allows remote attackers to cause a denial of service (application crash) via a crafted packet. | https://nvd.nist.gov/vuln/detail/CVE-2016-5352 |
1,932 | wireshark | 9b0b20b8d5f8c9f7839d58ff6c5900f7e19283b4 | https://github.com/wireshark/wireshark | https://github.com/wireshark/wireshark/commit/9b0b20b8d5f8c9f7839d58ff6c5900f7e19283b4 | Make sure EAPOL body is big enough for a EAPOL_RSN_KEY.
A pointer to a EAPOL_RSN_KEY is set on the packet presuming the
whole EAPOL_RSN_KEY is there. That's not always the case for
fuzzed/malicious captures.
Bug: 11585
Change-Id: Ib94b8aceef444c7820e43b969596efdb8dbecccd
Reviewed-on: https://code.wireshark.org/review/15540
Reviewed-by: Michael Mann <mmann78@netscape.net>
Petri-Dish: Michael Mann <mmann78@netscape.net>
Tested-by: Petri Dish Buildbot <buildbot-no-reply@wireshark.org>
Reviewed-by: Anders Broman <a.broman58@gmail.com> | 1 | static INT AirPDcapScanForKeys(
PAIRPDCAP_CONTEXT ctx,
const guint8 *data,
const guint mac_header_len,
const guint tot_len,
AIRPDCAP_SEC_ASSOCIATION_ID id
)
{
const UCHAR *addr;
guint bodyLength;
PAIRPDCAP_SEC_ASSOCIATION sta_sa;
PAIRPDCAP_SEC_ASSOCIATION sa;
guint offset = 0;
const guint8 dot1x_header[] = {
0xAA, /* DSAP=SNAP */
0xAA, /* SSAP=SNAP */
0x03, /* Control field=Unnumbered frame */
0x00, 0x00, 0x00, /* Org. code=encaps. Ethernet */
0x88, 0x8E /* Type: 802.1X authentication */
};
const guint8 bt_dot1x_header[] = {
0xAA, /* DSAP=SNAP */
0xAA, /* SSAP=SNAP */
0x03, /* Control field=Unnumbered frame */
0x00, 0x19, 0x58, /* Org. code=Bluetooth SIG */
0x00, 0x03 /* Type: Bluetooth Security */
};
const guint8 tdls_header[] = {
0xAA, /* DSAP=SNAP */
0xAA, /* SSAP=SNAP */
0x03, /* Control field=Unnumbered frame */
0x00, 0x00, 0x00, /* Org. code=encaps. Ethernet */
0x89, 0x0D, /* Type: 802.11 - Fast Roaming Remote Request */
0x02, /* Payload Type: TDLS */
0X0C /* Action Category: TDLS */
};
const EAPOL_RSN_KEY *pEAPKey;
#ifdef _DEBUG
#define MSGBUF_LEN 255
CHAR msgbuf[MSGBUF_LEN];
#endif
AIRPDCAP_DEBUG_TRACE_START("AirPDcapScanForKeys");
/* cache offset in the packet data */
offset = mac_header_len;
/* check if the packet has an LLC header and the packet is 802.1X authentication (IEEE 802.1X-2004, pg. 24) */
if (memcmp(data+offset, dot1x_header, 8) == 0 || memcmp(data+offset, bt_dot1x_header, 8) == 0) {
AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "Authentication: EAPOL packet", AIRPDCAP_DEBUG_LEVEL_3);
/* skip LLC header */
offset+=8;
/* check if the packet is a EAPOL-Key (0x03) (IEEE 802.1X-2004, pg. 25) */
if (data[offset+1]!=3) {
AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "Not EAPOL-Key", AIRPDCAP_DEBUG_LEVEL_3);
return AIRPDCAP_RET_NO_VALID_HANDSHAKE;
}
/* get and check the body length (IEEE 802.1X-2004, pg. 25) */
bodyLength=pntoh16(data+offset+2);
if ((tot_len-offset-4) < bodyLength) { /* Only check if frame is long enough for eapol header, ignore tailing garbage, see bug 9065 */
AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "EAPOL body too short", AIRPDCAP_DEBUG_LEVEL_3);
return AIRPDCAP_RET_NO_VALID_HANDSHAKE;
}
/* skip EAPOL MPDU and go to the first byte of the body */
offset+=4;
pEAPKey = (const EAPOL_RSN_KEY *) (data+offset);
/* check if the key descriptor type is valid (IEEE 802.1X-2004, pg. 27) */
if (/*pEAPKey->type!=0x1 &&*/ /* RC4 Key Descriptor Type (deprecated) */
pEAPKey->type != AIRPDCAP_RSN_WPA2_KEY_DESCRIPTOR && /* IEEE 802.11 Key Descriptor Type (WPA2) */
pEAPKey->type != AIRPDCAP_RSN_WPA_KEY_DESCRIPTOR) /* 254 = RSN_KEY_DESCRIPTOR - WPA, */
{
AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "Not valid key descriptor type", AIRPDCAP_DEBUG_LEVEL_3);
return AIRPDCAP_RET_NO_VALID_HANDSHAKE;
}
/* start with descriptor body */
offset+=1;
/* search for a cached Security Association for current BSSID and AP */
sa = AirPDcapGetSaPtr(ctx, &id);
if (sa == NULL){
AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "No SA for BSSID found", AIRPDCAP_DEBUG_LEVEL_3);
return AIRPDCAP_RET_REQ_DATA;
}
/* It could be a Pairwise Key exchange, check */
if (AirPDcapRsna4WHandshake(ctx, data, sa, offset, tot_len) == AIRPDCAP_RET_SUCCESS_HANDSHAKE)
return AIRPDCAP_RET_SUCCESS_HANDSHAKE;
if (mac_header_len + GROUP_KEY_PAYLOAD_LEN_MIN > tot_len) {
AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "Message too short for Group Key", AIRPDCAP_DEBUG_LEVEL_3);
return AIRPDCAP_RET_NO_VALID_HANDSHAKE;
}
/* Verify the bitfields: Key = 0(groupwise) Mic = 1 Ack = 1 Secure = 1 */
if (AIRPDCAP_EAP_KEY(data[offset+1])!=0 ||
AIRPDCAP_EAP_ACK(data[offset+1])!=1 ||
AIRPDCAP_EAP_MIC(data[offset]) != 1 ||
AIRPDCAP_EAP_SEC(data[offset]) != 1){
AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "Key bitfields not correct for Group Key", AIRPDCAP_DEBUG_LEVEL_3);
return AIRPDCAP_RET_NO_VALID_HANDSHAKE;
}
/* force STA address to be the broadcast MAC so we create an SA for the groupkey */
memcpy(id.sta, broadcast_mac, AIRPDCAP_MAC_LEN);
/* get the Security Association structure for the broadcast MAC and AP */
sa = AirPDcapGetSaPtr(ctx, &id);
if (sa == NULL){
return AIRPDCAP_RET_REQ_DATA;
}
/* Get the SA for the STA, since we need its pairwise key to decrpyt the group key */
/* get STA address */
if ( (addr=AirPDcapGetStaAddress((const AIRPDCAP_MAC_FRAME_ADDR4 *)(data))) != NULL) {
memcpy(id.sta, addr, AIRPDCAP_MAC_LEN);
#ifdef _DEBUG
g_snprintf(msgbuf, MSGBUF_LEN, "ST_MAC: %2X.%2X.%2X.%2X.%2X.%2X\t", id.sta[0],id.sta[1],id.sta[2],id.sta[3],id.sta[4],id.sta[5]);
#endif
AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", msgbuf, AIRPDCAP_DEBUG_LEVEL_3);
} else {
AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "SA not found", AIRPDCAP_DEBUG_LEVEL_5);
return AIRPDCAP_RET_REQ_DATA;
}
sta_sa = AirPDcapGetSaPtr(ctx, &id);
if (sta_sa == NULL){
return AIRPDCAP_RET_REQ_DATA;
}
/* Try to extract the group key and install it in the SA */
return (AirPDcapDecryptWPABroadcastKey(pEAPKey, sta_sa->wpa.ptk+16, sa, tot_len-offset+1));
} else if (memcmp(data+offset, tdls_header, 10) == 0) {
const guint8 *initiator, *responder;
guint8 action;
guint status, offset_rsne = 0, offset_fte = 0, offset_link = 0, offset_timeout = 0;
AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "Authentication: TDLS Action Frame", AIRPDCAP_DEBUG_LEVEL_3);
/* skip LLC header */
offset+=10;
/* check if the packet is a TDLS response or confirm */
action = data[offset];
if (action!=1 && action!=2) {
AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "Not Response nor confirm", AIRPDCAP_DEBUG_LEVEL_3);
return AIRPDCAP_RET_NO_VALID_HANDSHAKE;
}
/* check status */
offset++;
status=pntoh16(data+offset);
if (status!=0) {
AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "TDLS setup not successfull", AIRPDCAP_DEBUG_LEVEL_3);
return AIRPDCAP_RET_NO_VALID_HANDSHAKE;
}
/* skip Token + capabilities */
offset+=5;
/* search for RSN, Fast BSS Transition, Link Identifier and Timeout Interval IEs */
while(offset < (tot_len - 2)) {
if (data[offset] == 48) {
offset_rsne = offset;
} else if (data[offset] == 55) {
offset_fte = offset;
} else if (data[offset] == 56) {
offset_timeout = offset;
} else if (data[offset] == 101) {
offset_link = offset;
}
if (tot_len < offset + data[offset + 1] + 2) {
return AIRPDCAP_RET_NO_VALID_HANDSHAKE;
}
offset += data[offset + 1] + 2;
}
if (offset_rsne == 0 || offset_fte == 0 ||
offset_timeout == 0 || offset_link == 0)
{
AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "Cannot Find all necessary IEs", AIRPDCAP_DEBUG_LEVEL_3);
return AIRPDCAP_RET_NO_VALID_HANDSHAKE;
}
AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "Found RSNE/Fast BSS/Timeout Interval/Link IEs", AIRPDCAP_DEBUG_LEVEL_3);
/* Will create a Security Association between 2 STA. Need to get both MAC address */
initiator = &data[offset_link + 8];
responder = &data[offset_link + 14];
if (memcmp(initiator, responder, AIRPDCAP_MAC_LEN) < 0) {
memcpy(id.sta, initiator, AIRPDCAP_MAC_LEN);
memcpy(id.bssid, responder, AIRPDCAP_MAC_LEN);
} else {
memcpy(id.sta, responder, AIRPDCAP_MAC_LEN);
memcpy(id.bssid, initiator, AIRPDCAP_MAC_LEN);
}
sa = AirPDcapGetSaPtr(ctx, &id);
if (sa == NULL){
return AIRPDCAP_RET_REQ_DATA;
}
if (sa->validKey) {
if (memcmp(sa->wpa.nonce, data + offset_fte + 52, AIRPDCAP_WPA_NONCE_LEN) == 0) {
/* Already have valid key for this SA, no need to redo key derivation */
return AIRPDCAP_RET_SUCCESS_HANDSHAKE;
} else {
/* We are opening a new session with the same two STA, save previous sa */
AIRPDCAP_SEC_ASSOCIATION *tmp_sa = g_new(AIRPDCAP_SEC_ASSOCIATION, 1);
memcpy(tmp_sa, sa, sizeof(AIRPDCAP_SEC_ASSOCIATION));
sa->next=tmp_sa;
sa->validKey = FALSE;
}
}
if (AirPDcapTDLSDeriveKey(sa, data, offset_rsne, offset_fte, offset_timeout, offset_link, action)
== AIRPDCAP_RET_SUCCESS) {
AIRPDCAP_DEBUG_TRACE_END("AirPDcapScanForKeys");
return AIRPDCAP_RET_SUCCESS_HANDSHAKE;
}
} else {
AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "Skipping: not an EAPOL packet", AIRPDCAP_DEBUG_LEVEL_3);
}
AIRPDCAP_DEBUG_TRACE_END("AirPDcapScanForKeys");
return AIRPDCAP_RET_NO_VALID_HANDSHAKE;
}
| 240,174,644,621,715,030,000,000,000,000,000,000,000 | airpdcap.c | 178,616,951,686,308,540,000,000,000,000,000,000,000 | [
"CWE-20"
] | CVE-2016-5351 | epan/crypt/airpdcap.c in the IEEE 802.11 dissector in Wireshark 1.12.x before 1.12.12 and 2.x before 2.0.4 mishandles the lack of an EAPOL_RSN_KEY, which allows remote attackers to cause a denial of service (application crash) via a crafted packet. | https://nvd.nist.gov/vuln/detail/CVE-2016-5351 |
1,935 | linux | 4116def2337991b39919f3b448326e21c40e0dbb | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/4116def2337991b39919f3b448326e21c40e0dbb | rds: fix an infoleak in rds_inc_info_copy
The last field "flags" of object "minfo" is not initialized.
Copying this object out may leak kernel stack data.
Assign 0 to it to avoid leak.
Signed-off-by: Kangjie Lu <kjlu@gatech.edu>
Acked-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | void rds_inc_info_copy(struct rds_incoming *inc,
struct rds_info_iterator *iter,
__be32 saddr, __be32 daddr, int flip)
{
struct rds_info_message minfo;
minfo.seq = be64_to_cpu(inc->i_hdr.h_sequence);
minfo.len = be32_to_cpu(inc->i_hdr.h_len);
if (flip) {
minfo.laddr = daddr;
minfo.faddr = saddr;
minfo.lport = inc->i_hdr.h_dport;
minfo.fport = inc->i_hdr.h_sport;
} else {
minfo.laddr = saddr;
minfo.faddr = daddr;
minfo.lport = inc->i_hdr.h_sport;
minfo.fport = inc->i_hdr.h_dport;
}
rds_info_copy(iter, &minfo, sizeof(minfo));
}
| 333,555,213,106,388,200,000,000,000,000,000,000,000 | recv.c | 196,002,549,294,029,070,000,000,000,000,000,000,000 | [
"CWE-200"
] | CVE-2016-5244 | The rds_inc_info_copy function in net/rds/recv.c in the Linux kernel through 4.6.3 does not initialize a certain structure member, which allows remote attackers to obtain sensitive information from kernel stack memory by reading an RDS message. | https://nvd.nist.gov/vuln/detail/CVE-2016-5244 |
1,936 | linux | 5d2be1422e02ccd697ccfcd45c85b4a26e6178e2 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/5d2be1422e02ccd697ccfcd45c85b4a26e6178e2 | tipc: fix an infoleak in tipc_nl_compat_link_dump
link_info.str is a char array of size 60. Memory after the NULL
byte is not initialized. Sending the whole object out can cause
a leak.
Signed-off-by: Kangjie Lu <kjlu@gatech.edu>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
struct nlattr **attrs)
{
struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
struct tipc_link_info link_info;
int err;
if (!attrs[TIPC_NLA_LINK])
return -EINVAL;
err = nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK],
NULL);
if (err)
return err;
link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
strcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]));
return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO,
&link_info, sizeof(link_info));
}
| 22,483,029,912,805,243,000,000,000,000,000,000,000 | netlink_compat.c | 119,355,885,410,724,360,000,000,000,000,000,000,000 | [
"CWE-200"
] | CVE-2016-5243 | The tipc_nl_compat_link_dump function in net/tipc/netlink_compat.c in the Linux kernel through 4.6.3 does not properly copy a certain string, which allows local users to obtain sensitive information from kernel stack memory by reading a Netlink message. | https://nvd.nist.gov/vuln/detail/CVE-2016-5243 |
1,940 | libimobiledevice | df1f5c4d70d0c19ad40072f5246ca457e7f9849e | https://github.com/libimobiledevice/libimobiledevice | https://github.com/libimobiledevice/libimobiledevice/commit/df1f5c4d70d0c19ad40072f5246ca457e7f9849e | common: [security fix] Make sure sockets only listen locally | 1 | int socket_create(uint16_t port)
{
int sfd = -1;
int yes = 1;
#ifdef WIN32
WSADATA wsa_data;
if (!wsa_init) {
if (WSAStartup(MAKEWORD(2,2), &wsa_data) != ERROR_SUCCESS) {
fprintf(stderr, "WSAStartup failed!\n");
ExitProcess(-1);
}
wsa_init = 1;
}
#endif
struct sockaddr_in saddr;
if (0 > (sfd = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP))) {
perror("socket()");
return -1;
}
if (setsockopt(sfd, SOL_SOCKET, SO_REUSEADDR, (void*)&yes, sizeof(int)) == -1) {
perror("setsockopt()");
socket_close(sfd);
return -1;
}
memset((void *) &saddr, 0, sizeof(saddr));
saddr.sin_family = AF_INET;
saddr.sin_addr.s_addr = htonl(INADDR_ANY);
saddr.sin_port = htons(port);
if (0 > bind(sfd, (struct sockaddr *) &saddr, sizeof(saddr))) {
perror("bind()");
socket_close(sfd);
return -1;
}
if (listen(sfd, 1) == -1) {
perror("listen()");
socket_close(sfd);
return -1;
}
return sfd;
}
| 174,992,331,678,357,600,000,000,000,000,000,000,000 | socket.c | 5,143,455,029,250,239,000,000,000,000,000,000,000 | [
"CWE-284"
] | CVE-2016-5104 | The socket_create function in common/socket.c in libimobiledevice and libusbmuxd allows remote attackers to bypass intended access restrictions and communicate with services on iOS devices by connecting to an IPv4 TCP socket. | https://nvd.nist.gov/vuln/detail/CVE-2016-5104 |
1,941 | php-src | abd159cce48f3e34f08e4751c568e09677d5ec9c | https://github.com/php/php-src | https://github.com/php/php-src/commit/abd159cce48f3e34f08e4751c568e09677d5ec9c?w=1 | Fix bug #72114 - int/size_t confusion in fread | 1 | PHPAPI PHP_FUNCTION(fread)
{
zval *arg1;
long len;
php_stream *stream;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rl", &arg1, &len) == FAILURE) {
RETURN_FALSE;
}
PHP_STREAM_TO_ZVAL(stream, &arg1);
if (len <= 0) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Length parameter must be greater than 0");
RETURN_FALSE;
}
Z_STRVAL_P(return_value) = emalloc(len + 1);
Z_STRLEN_P(return_value) = php_stream_read(stream, Z_STRVAL_P(return_value), len);
/* needed because recv/read/gzread doesnt put a null at the end*/
Z_STRVAL_P(return_value)[Z_STRLEN_P(return_value)] = 0;
Z_TYPE_P(return_value) = IS_STRING;
}
| 267,862,534,182,931,460,000,000,000,000,000,000,000 | file.c | 61,201,000,865,043,630,000,000,000,000,000,000,000 | [
"CWE-190"
] | CVE-2016-5096 | Integer overflow in the fread function in ext/standard/file.c in PHP before 5.5.36 and 5.6.x before 5.6.22 allows remote attackers to cause a denial of service or possibly have unspecified other impact via a large integer in the second argument. | https://nvd.nist.gov/vuln/detail/CVE-2016-5096 |
1,949 | php-src | 0da8b8b801f9276359262f1ef8274c7812d3dfda | https://github.com/php/php-src | https://github.com/php/php-src/commit/0da8b8b801f9276359262f1ef8274c7812d3dfda?w=1 | Fix bug #72135 - don't create strings with lengths outside int range | 1 | static void php_html_entities(INTERNAL_FUNCTION_PARAMETERS, int all)
{
char *str, *hint_charset = NULL;
int str_len, hint_charset_len = 0;
size_t new_len;
long flags = ENT_COMPAT;
char *replaced;
zend_bool double_encode = 1;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|ls!b", &str, &str_len, &flags, &hint_charset, &hint_charset_len, &double_encode) == FAILURE) {
return;
}
replaced = php_escape_html_entities_ex(str, str_len, &new_len, all, (int) flags, hint_charset, double_encode TSRMLS_CC);
RETVAL_STRINGL(replaced, (int)new_len, 0);
}
| 304,507,025,606,054,900,000,000,000,000,000,000,000 | html.c | 185,037,864,091,738,220,000,000,000,000,000,000,000 | [
"CWE-190"
] | CVE-2016-5094 | Integer overflow in the php_html_entities function in ext/standard/html.c in PHP before 5.5.36 and 5.6.x before 5.6.22 allows remote attackers to cause a denial of service or possibly have unspecified other impact by triggering a large output string from the htmlspecialchars function. | https://nvd.nist.gov/vuln/detail/CVE-2016-5094 |
1,964 | php-src | 97eff7eb57fc2320c267a949cffd622c38712484 | https://github.com/php/php-src | https://github.com/php/php-src/commit/97eff7eb57fc2320c267a949cffd622c38712484?w=1 | Fix bug #72241: get_icu_value_internal out-of-bounds read | 1 | PHP_FUNCTION(locale_accept_from_http)
{
UEnumeration *available;
char *http_accept = NULL;
int http_accept_len;
UErrorCode status = 0;
int len;
char resultLocale[INTL_MAX_LOCALE_LEN+1];
UAcceptResult outResult;
if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "s", &http_accept, &http_accept_len) == FAILURE)
{
intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR,
"locale_accept_from_http: unable to parse input parameters", 0 TSRMLS_CC );
RETURN_FALSE;
}
available = ures_openAvailableLocales(NULL, &status);
INTL_CHECK_STATUS(status, "locale_accept_from_http: failed to retrieve locale list");
len = uloc_acceptLanguageFromHTTP(resultLocale, INTL_MAX_LOCALE_LEN,
&outResult, http_accept, available, &status);
uenum_close(available);
INTL_CHECK_STATUS(status, "locale_accept_from_http: failed to find acceptable locale");
if (len < 0 || outResult == ULOC_ACCEPT_FAILED) {
RETURN_FALSE;
}
RETURN_STRINGL(resultLocale, len, 1);
}
| 229,704,843,578,137,100,000,000,000,000,000,000,000 | locale_methods.c | 304,415,109,677,498,050,000,000,000,000,000,000,000 | [
"CWE-125"
] | CVE-2016-5093 | The get_icu_value_internal function in ext/intl/locale/locale_methods.c in PHP before 5.5.36, 5.6.x before 5.6.22, and 7.x before 7.0.7 does not ensure the presence of a '\0' character, which allows remote attackers to cause a denial of service (out-of-bounds read) or possibly have unspecified other impact via a crafted locale_get_primary_language call. | https://nvd.nist.gov/vuln/detail/CVE-2016-5093 |
1,973 | php-src | 97eff7eb57fc2320c267a949cffd622c38712484 | https://github.com/php/php-src | https://github.com/php/php-src/commit/97eff7eb57fc2320c267a949cffd622c38712484?w=1 | Fix bug #72241: get_icu_value_internal out-of-bounds read | 1 | static char* get_icu_value_internal( const char* loc_name , char* tag_name, int* result , int fromParseLocale)
{
char* tag_value = NULL;
int32_t tag_value_len = 512;
int singletonPos = 0;
char* mod_loc_name = NULL;
int grOffset = 0;
int32_t buflen = 512;
UErrorCode status = U_ZERO_ERROR;
if( strcmp(tag_name, LOC_CANONICALIZE_TAG) != 0 ){
/* Handle grandfathered languages */
grOffset = findOffset( LOC_GRANDFATHERED , loc_name );
if( grOffset >= 0 ){
if( strcmp(tag_name , LOC_LANG_TAG)==0 ){
return estrdup(loc_name);
} else {
/* Since Grandfathered , no value , do nothing , retutn NULL */
return NULL;
}
}
if( fromParseLocale==1 ){
/* Handle singletons */
if( strcmp(tag_name , LOC_LANG_TAG)==0 ){
if( strlen(loc_name)>1 && (isIDPrefix(loc_name) == 1) ){
return estrdup(loc_name);
}
}
singletonPos = getSingletonPos( loc_name );
if( singletonPos == 0){
/* singleton at start of script, region , variant etc.
* or invalid singleton at start of language */
return NULL;
} else if( singletonPos > 0 ){
/* singleton at some position except at start
* strip off the singleton and rest of the loc_name */
mod_loc_name = estrndup ( loc_name , singletonPos-1);
}
} /* end of if fromParse */
} /* end of if != LOC_CANONICAL_TAG */
if( mod_loc_name == NULL){
mod_loc_name = estrdup(loc_name );
}
/* Proceed to ICU */
do{
tag_value = erealloc( tag_value , buflen );
tag_value_len = buflen;
if( strcmp(tag_name , LOC_SCRIPT_TAG)==0 ){
buflen = uloc_getScript ( mod_loc_name ,tag_value , tag_value_len , &status);
}
if( strcmp(tag_name , LOC_LANG_TAG )==0 ){
buflen = uloc_getLanguage ( mod_loc_name ,tag_value , tag_value_len , &status);
}
if( strcmp(tag_name , LOC_REGION_TAG)==0 ){
buflen = uloc_getCountry ( mod_loc_name ,tag_value , tag_value_len , &status);
}
if( strcmp(tag_name , LOC_VARIANT_TAG)==0 ){
buflen = uloc_getVariant ( mod_loc_name ,tag_value , tag_value_len , &status);
}
if( strcmp(tag_name , LOC_CANONICALIZE_TAG)==0 ){
buflen = uloc_canonicalize ( mod_loc_name ,tag_value , tag_value_len , &status);
}
if( U_FAILURE( status ) ) {
if( status == U_BUFFER_OVERFLOW_ERROR ) {
status = U_ZERO_ERROR;
continue;
}
/* Error in retriving data */
*result = 0;
if( tag_value ){
efree( tag_value );
}
if( mod_loc_name ){
efree( mod_loc_name);
}
return NULL;
}
} while( buflen > tag_value_len );
if( buflen ==0 ){
/* No value found */
*result = -1;
if( tag_value ){
efree( tag_value );
}
if( mod_loc_name ){
efree( mod_loc_name);
}
return NULL;
} else {
*result = 1;
}
if( mod_loc_name ){
efree( mod_loc_name);
}
return tag_value;
}
| 258,636,481,049,794,200,000,000,000,000,000,000,000 | locale_methods.c | 118,305,450,747,253,400,000,000,000,000,000,000,000 | [
"CWE-125"
] | CVE-2016-5093 | The get_icu_value_internal function in ext/intl/locale/locale_methods.c in PHP before 5.5.36, 5.6.x before 5.6.22, and 7.x before 7.0.7 does not ensure the presence of a '\0' character, which allows remote attackers to cause a denial of service (out-of-bounds read) or possibly have unspecified other impact via a crafted locale_get_primary_language call. | https://nvd.nist.gov/vuln/detail/CVE-2016-5093 |
1,991 | linux | 45e093ae2830cd1264677d47ff9a95a71f5d9f9c | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/45e093ae2830cd1264677d47ff9a95a71f5d9f9c | tipc: check nl sock before parsing nested attributes
Make sure the socket for which the user is listing publication exists
before parsing the socket netlink attributes.
Prior to this patch a call without any socket caused a NULL pointer
dereference in tipc_nl_publ_dump().
Tested-and-reported-by: Baozeng Ding <sploving1@gmail.com>
Signed-off-by: Richard Alpe <richard.alpe@ericsson.com>
Acked-by: Jon Maloy <jon.maloy@ericsson.cm>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
int err;
u32 tsk_portid = cb->args[0];
u32 last_publ = cb->args[1];
u32 done = cb->args[2];
struct net *net = sock_net(skb->sk);
struct tipc_sock *tsk;
if (!tsk_portid) {
struct nlattr **attrs;
struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
err = tipc_nlmsg_parse(cb->nlh, &attrs);
if (err)
return err;
err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
attrs[TIPC_NLA_SOCK],
tipc_nl_sock_policy);
if (err)
return err;
if (!sock[TIPC_NLA_SOCK_REF])
return -EINVAL;
tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
}
if (done)
return 0;
tsk = tipc_sk_lookup(net, tsk_portid);
if (!tsk)
return -EINVAL;
lock_sock(&tsk->sk);
err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
if (!err)
done = 1;
release_sock(&tsk->sk);
sock_put(&tsk->sk);
cb->args[0] = tsk_portid;
cb->args[1] = last_publ;
cb->args[2] = done;
return skb->len;
}
| 109,108,179,952,697,830,000,000,000,000,000,000,000 | socket.c | 147,330,032,818,011,570,000,000,000,000,000,000,000 | [
"CWE-703"
] | CVE-2016-4951 | The tipc_nl_publ_dump function in net/tipc/socket.c in the Linux kernel through 4.6 does not verify socket existence, which allows local users to cause a denial of service (NULL pointer dereference and system crash) or possibly have unspecified other impact via a dumpit operation. | https://nvd.nist.gov/vuln/detail/CVE-2016-4951 |
1,992 | linux | 99d825822eade8d827a1817357cbf3f889a552d6 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/99d825822eade8d827a1817357cbf3f889a552d6 | get_rock_ridge_filename(): handle malformed NM entries
Payloads of NM entries are not supposed to contain NUL. When we run
into such, only the part prior to the first NUL goes into the
concatenation (i.e. the directory entry name being encoded by a bunch
of NM entries). We do stop when the amount collected so far + the
claimed amount in the current NM entry exceed 254. So far, so good,
but what we return as the total length is the sum of *claimed*
sizes, not the actual amount collected. And that can grow pretty
large - not unlimited, since you'd need to put CE entries in
between to be able to get more than the maximum that could be
contained in one isofs directory entry / continuation chunk and
we are stop once we'd encountered 32 CEs, but you can get about 8Kb
easily. And that's what will be passed to readdir callback as the
name length. 8Kb __copy_to_user() from a buffer allocated by
__get_free_page()
Cc: stable@vger.kernel.org # 0.98pl6+ (yes, really)
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> | 1 | int get_rock_ridge_filename(struct iso_directory_record *de,
char *retname, struct inode *inode)
{
struct rock_state rs;
struct rock_ridge *rr;
int sig;
int retnamlen = 0;
int truncate = 0;
int ret = 0;
if (!ISOFS_SB(inode->i_sb)->s_rock)
return 0;
*retname = 0;
init_rock_state(&rs, inode);
setup_rock_ridge(de, inode, &rs);
repeat:
while (rs.len > 2) { /* There may be one byte for padding somewhere */
rr = (struct rock_ridge *)rs.chr;
/*
* Ignore rock ridge info if rr->len is out of range, but
* don't return -EIO because that would make the file
* invisible.
*/
if (rr->len < 3)
goto out; /* Something got screwed up here */
sig = isonum_721(rs.chr);
if (rock_check_overflow(&rs, sig))
goto eio;
rs.chr += rr->len;
rs.len -= rr->len;
/*
* As above, just ignore the rock ridge info if rr->len
* is bogus.
*/
if (rs.len < 0)
goto out; /* Something got screwed up here */
switch (sig) {
case SIG('R', 'R'):
if ((rr->u.RR.flags[0] & RR_NM) == 0)
goto out;
break;
case SIG('S', 'P'):
if (check_sp(rr, inode))
goto out;
break;
case SIG('C', 'E'):
rs.cont_extent = isonum_733(rr->u.CE.extent);
rs.cont_offset = isonum_733(rr->u.CE.offset);
rs.cont_size = isonum_733(rr->u.CE.size);
break;
case SIG('N', 'M'):
if (truncate)
break;
if (rr->len < 5)
break;
/*
* If the flags are 2 or 4, this indicates '.' or '..'.
* We don't want to do anything with this, because it
* screws up the code that calls us. We don't really
* care anyways, since we can just use the non-RR
* name.
*/
if (rr->u.NM.flags & 6)
break;
if (rr->u.NM.flags & ~1) {
printk("Unsupported NM flag settings (%d)\n",
rr->u.NM.flags);
break;
}
if ((strlen(retname) + rr->len - 5) >= 254) {
truncate = 1;
break;
}
strncat(retname, rr->u.NM.name, rr->len - 5);
retnamlen += rr->len - 5;
break;
case SIG('R', 'E'):
kfree(rs.buffer);
return -1;
default:
break;
}
}
ret = rock_continue(&rs);
if (ret == 0)
goto repeat;
if (ret == 1)
return retnamlen; /* If 0, this file did not have a NM field */
out:
kfree(rs.buffer);
return ret;
eio:
ret = -EIO;
goto out;
}
| 93,758,422,006,208,340,000,000,000,000,000,000,000 | rock.c | 282,024,907,067,004,600,000,000,000,000,000,000,000 | [
"CWE-200"
] | CVE-2016-4913 | The get_rock_ridge_filename function in fs/isofs/rock.c in the Linux kernel before 4.5.5 mishandles NM (aka alternate name) entries containing \0 characters, which allows local users to obtain sensitive information from kernel memory or possibly have unspecified other impact via a crafted isofs filesystem. | https://nvd.nist.gov/vuln/detail/CVE-2016-4913 |
1,995 | libarchive | fd7e0c02 | https://github.com/libarchive/libarchive | https://github.com/libarchive/libarchive/commit/fd7e0c02 | Reject cpio symlinks that exceed 1MB | 1 | archive_read_format_cpio_read_header(struct archive_read *a,
struct archive_entry *entry)
{
struct cpio *cpio;
const void *h;
struct archive_string_conv *sconv;
size_t namelength;
size_t name_pad;
int r;
cpio = (struct cpio *)(a->format->data);
sconv = cpio->opt_sconv;
if (sconv == NULL) {
if (!cpio->init_default_conversion) {
cpio->sconv_default =
archive_string_default_conversion_for_read(
&(a->archive));
cpio->init_default_conversion = 1;
}
sconv = cpio->sconv_default;
}
r = (cpio->read_header(a, cpio, entry, &namelength, &name_pad));
if (r < ARCHIVE_WARN)
return (r);
/* Read name from buffer. */
h = __archive_read_ahead(a, namelength + name_pad, NULL);
if (h == NULL)
return (ARCHIVE_FATAL);
if (archive_entry_copy_pathname_l(entry,
(const char *)h, namelength, sconv) != 0) {
if (errno == ENOMEM) {
archive_set_error(&a->archive, ENOMEM,
"Can't allocate memory for Pathname");
return (ARCHIVE_FATAL);
}
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Pathname can't be converted from %s to current locale.",
archive_string_conversion_charset_name(sconv));
r = ARCHIVE_WARN;
}
cpio->entry_offset = 0;
__archive_read_consume(a, namelength + name_pad);
/* If this is a symlink, read the link contents. */
if (archive_entry_filetype(entry) == AE_IFLNK) {
h = __archive_read_ahead(a,
(size_t)cpio->entry_bytes_remaining, NULL);
if (h == NULL)
return (ARCHIVE_FATAL);
if (archive_entry_copy_symlink_l(entry, (const char *)h,
(size_t)cpio->entry_bytes_remaining, sconv) != 0) {
if (errno == ENOMEM) {
archive_set_error(&a->archive, ENOMEM,
"Can't allocate memory for Linkname");
return (ARCHIVE_FATAL);
}
archive_set_error(&a->archive,
ARCHIVE_ERRNO_FILE_FORMAT,
"Linkname can't be converted from %s to "
"current locale.",
archive_string_conversion_charset_name(sconv));
r = ARCHIVE_WARN;
}
__archive_read_consume(a, cpio->entry_bytes_remaining);
cpio->entry_bytes_remaining = 0;
}
/* XXX TODO: If the full mode is 0160200, then this is a Solaris
* ACL description for the following entry. Read this body
* and parse it as a Solaris-style ACL, then read the next
* header. XXX */
/* Compare name to "TRAILER!!!" to test for end-of-archive. */
if (namelength == 11 && strcmp((const char *)h, "TRAILER!!!") == 0) {
/* TODO: Store file location of start of block. */
archive_clear_error(&a->archive);
return (ARCHIVE_EOF);
}
/* Detect and record hardlinks to previously-extracted entries. */
if (record_hardlink(a, cpio, entry) != ARCHIVE_OK) {
return (ARCHIVE_FATAL);
}
return (r);
}
| 116,133,945,221,878,200,000,000,000,000,000,000,000 | archive_read_support_format_cpio.c | 177,814,792,440,499,570,000,000,000,000,000,000,000 | [
"CWE-20"
] | CVE-2016-4809 | The archive_read_format_cpio_read_header function in archive_read_support_format_cpio.c in libarchive before 3.2.1 allows remote attackers to cause a denial of service (application crash) via a CPIO archive with a large symlink. | https://nvd.nist.gov/vuln/detail/CVE-2016-4809 |
1,996 | linux | 1f461dcdd296eecedaffffc6bae2bfa90bd7eb89 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/1f461dcdd296eecedaffffc6bae2bfa90bd7eb89 | ppp: take reference on channels netns
Let channels hold a reference on their network namespace.
Some channel types, like ppp_async and ppp_synctty, can have their
userspace controller running in a different namespace. Therefore they
can't rely on them to preclude their netns from being removed from
under them.
==================================================================
BUG: KASAN: use-after-free in ppp_unregister_channel+0x372/0x3a0 at
addr ffff880064e217e0
Read of size 8 by task syz-executor/11581
=============================================================================
BUG net_namespace (Not tainted): kasan: bad access detected
-----------------------------------------------------------------------------
Disabling lock debugging due to kernel taint
INFO: Allocated in copy_net_ns+0x6b/0x1a0 age=92569 cpu=3 pid=6906
[< none >] ___slab_alloc+0x4c7/0x500 kernel/mm/slub.c:2440
[< none >] __slab_alloc+0x4c/0x90 kernel/mm/slub.c:2469
[< inline >] slab_alloc_node kernel/mm/slub.c:2532
[< inline >] slab_alloc kernel/mm/slub.c:2574
[< none >] kmem_cache_alloc+0x23a/0x2b0 kernel/mm/slub.c:2579
[< inline >] kmem_cache_zalloc kernel/include/linux/slab.h:597
[< inline >] net_alloc kernel/net/core/net_namespace.c:325
[< none >] copy_net_ns+0x6b/0x1a0 kernel/net/core/net_namespace.c:360
[< none >] create_new_namespaces+0x2f6/0x610 kernel/kernel/nsproxy.c:95
[< none >] copy_namespaces+0x297/0x320 kernel/kernel/nsproxy.c:150
[< none >] copy_process.part.35+0x1bf4/0x5760 kernel/kernel/fork.c:1451
[< inline >] copy_process kernel/kernel/fork.c:1274
[< none >] _do_fork+0x1bc/0xcb0 kernel/kernel/fork.c:1723
[< inline >] SYSC_clone kernel/kernel/fork.c:1832
[< none >] SyS_clone+0x37/0x50 kernel/kernel/fork.c:1826
[< none >] entry_SYSCALL_64_fastpath+0x16/0x7a kernel/arch/x86/entry/entry_64.S:185
INFO: Freed in net_drop_ns+0x67/0x80 age=575 cpu=2 pid=2631
[< none >] __slab_free+0x1fc/0x320 kernel/mm/slub.c:2650
[< inline >] slab_free kernel/mm/slub.c:2805
[< none >] kmem_cache_free+0x2a0/0x330 kernel/mm/slub.c:2814
[< inline >] net_free kernel/net/core/net_namespace.c:341
[< none >] net_drop_ns+0x67/0x80 kernel/net/core/net_namespace.c:348
[< none >] cleanup_net+0x4e5/0x600 kernel/net/core/net_namespace.c:448
[< none >] process_one_work+0x794/0x1440 kernel/kernel/workqueue.c:2036
[< none >] worker_thread+0xdb/0xfc0 kernel/kernel/workqueue.c:2170
[< none >] kthread+0x23f/0x2d0 kernel/drivers/block/aoe/aoecmd.c:1303
[< none >] ret_from_fork+0x3f/0x70 kernel/arch/x86/entry/entry_64.S:468
INFO: Slab 0xffffea0001938800 objects=3 used=0 fp=0xffff880064e20000
flags=0x5fffc0000004080
INFO: Object 0xffff880064e20000 @offset=0 fp=0xffff880064e24200
CPU: 1 PID: 11581 Comm: syz-executor Tainted: G B 4.4.0+
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
rel-1.8.2-0-g33fbe13 by qemu-project.org 04/01/2014
00000000ffffffff ffff8800662c7790 ffffffff8292049d ffff88003e36a300
ffff880064e20000 ffff880064e20000 ffff8800662c77c0 ffffffff816f2054
ffff88003e36a300 ffffea0001938800 ffff880064e20000 0000000000000000
Call Trace:
[< inline >] __dump_stack kernel/lib/dump_stack.c:15
[<ffffffff8292049d>] dump_stack+0x6f/0xa2 kernel/lib/dump_stack.c:50
[<ffffffff816f2054>] print_trailer+0xf4/0x150 kernel/mm/slub.c:654
[<ffffffff816f875f>] object_err+0x2f/0x40 kernel/mm/slub.c:661
[< inline >] print_address_description kernel/mm/kasan/report.c:138
[<ffffffff816fb0c5>] kasan_report_error+0x215/0x530 kernel/mm/kasan/report.c:236
[< inline >] kasan_report kernel/mm/kasan/report.c:259
[<ffffffff816fb4de>] __asan_report_load8_noabort+0x3e/0x40 kernel/mm/kasan/report.c:280
[< inline >] ? ppp_pernet kernel/include/linux/compiler.h:218
[<ffffffff83ad71b2>] ? ppp_unregister_channel+0x372/0x3a0 kernel/drivers/net/ppp/ppp_generic.c:2392
[< inline >] ppp_pernet kernel/include/linux/compiler.h:218
[<ffffffff83ad71b2>] ppp_unregister_channel+0x372/0x3a0 kernel/drivers/net/ppp/ppp_generic.c:2392
[< inline >] ? ppp_pernet kernel/drivers/net/ppp/ppp_generic.c:293
[<ffffffff83ad6f26>] ? ppp_unregister_channel+0xe6/0x3a0 kernel/drivers/net/ppp/ppp_generic.c:2392
[<ffffffff83ae18f3>] ppp_asynctty_close+0xa3/0x130 kernel/drivers/net/ppp/ppp_async.c:241
[<ffffffff83ae1850>] ? async_lcp_peek+0x5b0/0x5b0 kernel/drivers/net/ppp/ppp_async.c:1000
[<ffffffff82c33239>] tty_ldisc_close.isra.1+0x99/0xe0 kernel/drivers/tty/tty_ldisc.c:478
[<ffffffff82c332c0>] tty_ldisc_kill+0x40/0x170 kernel/drivers/tty/tty_ldisc.c:744
[<ffffffff82c34943>] tty_ldisc_release+0x1b3/0x260 kernel/drivers/tty/tty_ldisc.c:772
[<ffffffff82c1ef21>] tty_release+0xac1/0x13e0 kernel/drivers/tty/tty_io.c:1901
[<ffffffff82c1e460>] ? release_tty+0x320/0x320 kernel/drivers/tty/tty_io.c:1688
[<ffffffff8174de36>] __fput+0x236/0x780 kernel/fs/file_table.c:208
[<ffffffff8174e405>] ____fput+0x15/0x20 kernel/fs/file_table.c:244
[<ffffffff813595ab>] task_work_run+0x16b/0x200 kernel/kernel/task_work.c:115
[< inline >] exit_task_work kernel/include/linux/task_work.h:21
[<ffffffff81307105>] do_exit+0x8b5/0x2c60 kernel/kernel/exit.c:750
[<ffffffff813fdd20>] ? debug_check_no_locks_freed+0x290/0x290 kernel/kernel/locking/lockdep.c:4123
[<ffffffff81306850>] ? mm_update_next_owner+0x6f0/0x6f0 kernel/kernel/exit.c:357
[<ffffffff813215e6>] ? __dequeue_signal+0x136/0x470 kernel/kernel/signal.c:550
[<ffffffff8132067b>] ? recalc_sigpending_tsk+0x13b/0x180 kernel/kernel/signal.c:145
[<ffffffff81309628>] do_group_exit+0x108/0x330 kernel/kernel/exit.c:880
[<ffffffff8132b9d4>] get_signal+0x5e4/0x14f0 kernel/kernel/signal.c:2307
[< inline >] ? kretprobe_table_lock kernel/kernel/kprobes.c:1113
[<ffffffff8151d355>] ? kprobe_flush_task+0xb5/0x450 kernel/kernel/kprobes.c:1158
[<ffffffff8115f7d3>] do_signal+0x83/0x1c90 kernel/arch/x86/kernel/signal.c:712
[<ffffffff8151d2a0>] ? recycle_rp_inst+0x310/0x310 kernel/include/linux/list.h:655
[<ffffffff8115f750>] ? setup_sigcontext+0x780/0x780 kernel/arch/x86/kernel/signal.c:165
[<ffffffff81380864>] ? finish_task_switch+0x424/0x5f0 kernel/kernel/sched/core.c:2692
[< inline >] ? finish_lock_switch kernel/kernel/sched/sched.h:1099
[<ffffffff81380560>] ? finish_task_switch+0x120/0x5f0 kernel/kernel/sched/core.c:2678
[< inline >] ? context_switch kernel/kernel/sched/core.c:2807
[<ffffffff85d794e9>] ? __schedule+0x919/0x1bd0 kernel/kernel/sched/core.c:3283
[<ffffffff81003901>] exit_to_usermode_loop+0xf1/0x1a0 kernel/arch/x86/entry/common.c:247
[< inline >] prepare_exit_to_usermode kernel/arch/x86/entry/common.c:282
[<ffffffff810062ef>] syscall_return_slowpath+0x19f/0x210 kernel/arch/x86/entry/common.c:344
[<ffffffff85d88022>] int_ret_from_sys_call+0x25/0x9f kernel/arch/x86/entry/entry_64.S:281
Memory state around the buggy address:
ffff880064e21680: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
ffff880064e21700: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
>ffff880064e21780: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
^
ffff880064e21800: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
ffff880064e21880: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
==================================================================
Fixes: 273ec51dd7ce ("net: ppp_generic - introduce net-namespace functionality v2")
Reported-by: Baozeng Ding <sploving1@gmail.com>
Signed-off-by: Guillaume Nault <g.nault@alphalink.fr>
Reviewed-by: Cyrill Gorcunov <gorcunov@openvz.org>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
{
struct channel *pch;
struct ppp_net *pn;
pch = kzalloc(sizeof(struct channel), GFP_KERNEL);
if (!pch)
return -ENOMEM;
pn = ppp_pernet(net);
pch->ppp = NULL;
pch->chan = chan;
pch->chan_net = net;
chan->ppp = pch;
init_ppp_file(&pch->file, CHANNEL);
pch->file.hdrlen = chan->hdrlen;
#ifdef CONFIG_PPP_MULTILINK
pch->lastseq = -1;
#endif /* CONFIG_PPP_MULTILINK */
init_rwsem(&pch->chan_sem);
spin_lock_init(&pch->downl);
rwlock_init(&pch->upl);
spin_lock_bh(&pn->all_channels_lock);
pch->file.index = ++pn->last_channel_index;
list_add(&pch->list, &pn->new_channels);
atomic_inc(&channel_count);
spin_unlock_bh(&pn->all_channels_lock);
return 0;
}
| 200,851,677,246,187,520,000,000,000,000,000,000,000 | ppp_generic.c | 33,074,513,998,217,310,000,000,000,000,000,000,000 | [
"CWE-416"
] | CVE-2016-4805 | Use-after-free vulnerability in drivers/net/ppp/ppp_generic.c in the Linux kernel before 4.5.2 allows local users to cause a denial of service (memory corruption and system crash, or spinlock) or possibly have unspecified other impact by removing a network namespace, related to the ppp_register_net_channel and ppp_unregister_channel functions. | https://nvd.nist.gov/vuln/detail/CVE-2016-4805 |
1,998 | dosfstools | e8eff147e9da1185f9afd5b25948153a3b97cf52 | https://github.com/dosfstools/dosfstools | https://github.com/dosfstools/dosfstools/commit/e8eff147e9da1185f9afd5b25948153a3b97cf52 | read_boot(): Handle excessive FAT size specifications
The variable used for storing the FAT size (in bytes) was an unsigned
int. Since the size in sectors read from the BPB was not sufficiently
checked, this could end up being zero after multiplying it with the
sector size while some offsets still stayed excessive. Ultimately it
would cause segfaults when accessing FAT entries for which no memory
was allocated.
Make it more robust by changing the types used to store FAT size to
off_t and abort if there is no room for data clusters. Additionally
check that FAT size is not specified as zero.
Fixes #25 and fixes #26.
Reported-by: Hanno Böck
Signed-off-by: Andreas Bombe <aeb@debian.org> | 1 | void read_boot(DOS_FS * fs)
{
struct boot_sector b;
unsigned total_sectors;
unsigned short logical_sector_size, sectors;
unsigned fat_length;
unsigned total_fat_entries;
off_t data_size;
fs_read(0, sizeof(b), &b);
logical_sector_size = GET_UNALIGNED_W(b.sector_size);
if (!logical_sector_size)
die("Logical sector size is zero.");
/* This was moved up because it's the first thing that will fail */
/* if the platform needs special handling of unaligned multibyte accesses */
/* but such handling isn't being provided. See GET_UNALIGNED_W() above. */
if (logical_sector_size & (SECTOR_SIZE - 1))
die("Logical sector size (%d bytes) is not a multiple of the physical "
"sector size.", logical_sector_size);
fs->cluster_size = b.cluster_size * logical_sector_size;
if (!fs->cluster_size)
die("Cluster size is zero.");
if (b.fats != 2 && b.fats != 1)
die("Currently, only 1 or 2 FATs are supported, not %d.\n", b.fats);
fs->nfats = b.fats;
sectors = GET_UNALIGNED_W(b.sectors);
total_sectors = sectors ? sectors : le32toh(b.total_sect);
if (verbose)
printf("Checking we can access the last sector of the filesystem\n");
/* Can't access last odd sector anyway, so round down */
fs_test((off_t)((total_sectors & ~1) - 1) * logical_sector_size,
logical_sector_size);
fat_length = le16toh(b.fat_length) ?
le16toh(b.fat_length) : le32toh(b.fat32_length);
fs->fat_start = (off_t)le16toh(b.reserved) * logical_sector_size;
fs->root_start = ((off_t)le16toh(b.reserved) + b.fats * fat_length) *
logical_sector_size;
fs->root_entries = GET_UNALIGNED_W(b.dir_entries);
fs->data_start = fs->root_start + ROUND_TO_MULTIPLE(fs->root_entries <<
MSDOS_DIR_BITS,
logical_sector_size);
data_size = (off_t)total_sectors * logical_sector_size - fs->data_start;
fs->data_clusters = data_size / fs->cluster_size;
fs->root_cluster = 0; /* indicates standard, pre-FAT32 root dir */
fs->fsinfo_start = 0; /* no FSINFO structure */
fs->free_clusters = -1; /* unknown */
if (!b.fat_length && b.fat32_length) {
fs->fat_bits = 32;
fs->root_cluster = le32toh(b.root_cluster);
if (!fs->root_cluster && fs->root_entries)
/* M$ hasn't specified this, but it looks reasonable: If
* root_cluster is 0 but there is a separate root dir
* (root_entries != 0), we handle the root dir the old way. Give a
* warning, but convertig to a root dir in a cluster chain seems
* to complex for now... */
printf("Warning: FAT32 root dir not in cluster chain! "
"Compatibility mode...\n");
else if (!fs->root_cluster && !fs->root_entries)
die("No root directory!");
else if (fs->root_cluster && fs->root_entries)
printf("Warning: FAT32 root dir is in a cluster chain, but "
"a separate root dir\n"
" area is defined. Cannot fix this easily.\n");
if (fs->data_clusters < FAT16_THRESHOLD)
printf("Warning: Filesystem is FAT32 according to fat_length "
"and fat32_length fields,\n"
" but has only %lu clusters, less than the required "
"minimum of %d.\n"
" This may lead to problems on some systems.\n",
(unsigned long)fs->data_clusters, FAT16_THRESHOLD);
check_fat_state_bit(fs, &b);
fs->backupboot_start = le16toh(b.backup_boot) * logical_sector_size;
check_backup_boot(fs, &b, logical_sector_size);
read_fsinfo(fs, &b, logical_sector_size);
} else if (!atari_format) {
/* On real MS-DOS, a 16 bit FAT is used whenever there would be too
* much clusers otherwise. */
fs->fat_bits = (fs->data_clusters >= FAT12_THRESHOLD) ? 16 : 12;
if (fs->data_clusters >= FAT16_THRESHOLD)
die("Too many clusters (%lu) for FAT16 filesystem.", fs->data_clusters);
check_fat_state_bit(fs, &b);
} else {
/* On Atari, things are more difficult: GEMDOS always uses 12bit FATs
* on floppies, and always 16 bit on harddisks. */
fs->fat_bits = 16; /* assume 16 bit FAT for now */
/* If more clusters than fat entries in 16-bit fat, we assume
* it's a real MSDOS FS with 12-bit fat. */
if (fs->data_clusters + 2 > fat_length * logical_sector_size * 8 / 16 ||
/* if it has one of the usual floppy sizes -> 12bit FAT */
(total_sectors == 720 || total_sectors == 1440 ||
total_sectors == 2880))
fs->fat_bits = 12;
}
/* On FAT32, the high 4 bits of a FAT entry are reserved */
fs->eff_fat_bits = (fs->fat_bits == 32) ? 28 : fs->fat_bits;
fs->fat_size = fat_length * logical_sector_size;
fs->label = calloc(12, sizeof(uint8_t));
if (fs->fat_bits == 12 || fs->fat_bits == 16) {
struct boot_sector_16 *b16 = (struct boot_sector_16 *)&b;
if (b16->extended_sig == 0x29)
memmove(fs->label, b16->label, 11);
else
fs->label = NULL;
} else if (fs->fat_bits == 32) {
if (b.extended_sig == 0x29)
memmove(fs->label, &b.label, 11);
else
fs->label = NULL;
}
total_fat_entries = (uint64_t)fs->fat_size * 8 / fs->fat_bits;
if (fs->data_clusters > total_fat_entries - 2)
die("Filesystem has %u clusters but only space for %u FAT entries.",
fs->data_clusters, total_fat_entries - 2);
if (!fs->root_entries && !fs->root_cluster)
die("Root directory has zero size.");
if (fs->root_entries & (MSDOS_DPS - 1))
die("Root directory (%d entries) doesn't span an integral number of "
"sectors.", fs->root_entries);
if (logical_sector_size & (SECTOR_SIZE - 1))
die("Logical sector size (%d bytes) is not a multiple of the physical "
"sector size.", logical_sector_size);
#if 0 /* linux kernel doesn't check that either */
/* ++roman: On Atari, these two fields are often left uninitialized */
if (!atari_format && (!b.secs_track || !b.heads))
die("Invalid disk format in boot sector.");
#endif
if (verbose)
dump_boot(fs, &b, logical_sector_size);
}
| 186,977,352,067,923,180,000,000,000,000,000,000,000 | boot.c | 197,145,767,489,226,020,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2016-4804 | The read_boot function in boot.c in dosfstools before 4.0 allows attackers to cause a denial of service (crash) via a crafted filesystem, which triggers a heap-based buffer overflow in the (1) read_fat function or an out-of-bounds heap read in (2) get_fat function. | https://nvd.nist.gov/vuln/detail/CVE-2016-4804 |
2,001 | linux | 79e48650320e6fba48369fccf13fd045315b19b8 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/79e48650320e6fba48369fccf13fd045315b19b8 | net: fix a kernel infoleak in x25 module
Stack object "dte_facilities" is allocated in x25_rx_call_request(),
which is supposed to be initialized in x25_negotiate_facilities.
However, 5 fields (8 bytes in total) are not initialized. This
object is then copied to userland via copy_to_user, thus infoleak
occurs.
Signed-off-by: Kangjie Lu <kjlu@gatech.edu>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
struct x25_facilities *new, struct x25_dte_facilities *dte)
{
struct x25_sock *x25 = x25_sk(sk);
struct x25_facilities *ours = &x25->facilities;
struct x25_facilities theirs;
int len;
memset(&theirs, 0, sizeof(theirs));
memcpy(new, ours, sizeof(*new));
len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask);
if (len < 0)
return len;
/*
* They want reverse charging, we won't accept it.
*/
if ((theirs.reverse & 0x01 ) && (ours->reverse & 0x01)) {
SOCK_DEBUG(sk, "X.25: rejecting reverse charging request\n");
return -1;
}
new->reverse = theirs.reverse;
if (theirs.throughput) {
int theirs_in = theirs.throughput & 0x0f;
int theirs_out = theirs.throughput & 0xf0;
int ours_in = ours->throughput & 0x0f;
int ours_out = ours->throughput & 0xf0;
if (!ours_in || theirs_in < ours_in) {
SOCK_DEBUG(sk, "X.25: inbound throughput negotiated\n");
new->throughput = (new->throughput & 0xf0) | theirs_in;
}
if (!ours_out || theirs_out < ours_out) {
SOCK_DEBUG(sk,
"X.25: outbound throughput negotiated\n");
new->throughput = (new->throughput & 0x0f) | theirs_out;
}
}
if (theirs.pacsize_in && theirs.pacsize_out) {
if (theirs.pacsize_in < ours->pacsize_in) {
SOCK_DEBUG(sk, "X.25: packet size inwards negotiated down\n");
new->pacsize_in = theirs.pacsize_in;
}
if (theirs.pacsize_out < ours->pacsize_out) {
SOCK_DEBUG(sk, "X.25: packet size outwards negotiated down\n");
new->pacsize_out = theirs.pacsize_out;
}
}
if (theirs.winsize_in && theirs.winsize_out) {
if (theirs.winsize_in < ours->winsize_in) {
SOCK_DEBUG(sk, "X.25: window size inwards negotiated down\n");
new->winsize_in = theirs.winsize_in;
}
if (theirs.winsize_out < ours->winsize_out) {
SOCK_DEBUG(sk, "X.25: window size outwards negotiated down\n");
new->winsize_out = theirs.winsize_out;
}
}
return len;
}
| 288,890,073,918,554,100,000,000,000,000,000,000,000 | x25_facilities.c | 175,536,494,937,069,540,000,000,000,000,000,000,000 | [
"CWE-200"
] | CVE-2016-4580 | The x25_negotiate_facilities function in net/x25/x25_facilities.c in the Linux kernel before 4.5.5 does not properly initialize a certain data structure, which allows attackers to obtain sensitive information from kernel stack memory via an X.25 Call Request. | https://nvd.nist.gov/vuln/detail/CVE-2016-4580 |
2,002 | linux | e4ec8cc8039a7063e24204299b462bd1383184a5 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/e4ec8cc8039a7063e24204299b462bd1383184a5 | ALSA: timer: Fix leak in events via snd_timer_user_tinterrupt
The stack object “r1” has a total size of 32 bytes. Its field
“event” and “val” both contain 4 bytes padding. These 8 bytes
padding bytes are sent to user without being initialized.
Signed-off-by: Kangjie Lu <kjlu@gatech.edu>
Signed-off-by: Takashi Iwai <tiwai@suse.de> | 1 | static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri,
unsigned long resolution,
unsigned long ticks)
{
struct snd_timer_user *tu = timeri->callback_data;
struct snd_timer_tread *r, r1;
struct timespec tstamp;
int prev, append = 0;
memset(&tstamp, 0, sizeof(tstamp));
spin_lock(&tu->qlock);
if ((tu->filter & ((1 << SNDRV_TIMER_EVENT_RESOLUTION) |
(1 << SNDRV_TIMER_EVENT_TICK))) == 0) {
spin_unlock(&tu->qlock);
return;
}
if (tu->last_resolution != resolution || ticks > 0) {
if (timer_tstamp_monotonic)
ktime_get_ts(&tstamp);
else
getnstimeofday(&tstamp);
}
if ((tu->filter & (1 << SNDRV_TIMER_EVENT_RESOLUTION)) &&
tu->last_resolution != resolution) {
r1.event = SNDRV_TIMER_EVENT_RESOLUTION;
r1.tstamp = tstamp;
r1.val = resolution;
snd_timer_user_append_to_tqueue(tu, &r1);
tu->last_resolution = resolution;
append++;
}
if ((tu->filter & (1 << SNDRV_TIMER_EVENT_TICK)) == 0)
goto __wake;
if (ticks == 0)
goto __wake;
if (tu->qused > 0) {
prev = tu->qtail == 0 ? tu->queue_size - 1 : tu->qtail - 1;
r = &tu->tqueue[prev];
if (r->event == SNDRV_TIMER_EVENT_TICK) {
r->tstamp = tstamp;
r->val += ticks;
append++;
goto __wake;
}
}
r1.event = SNDRV_TIMER_EVENT_TICK;
r1.tstamp = tstamp;
r1.val = ticks;
snd_timer_user_append_to_tqueue(tu, &r1);
append++;
__wake:
spin_unlock(&tu->qlock);
if (append == 0)
return;
kill_fasync(&tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
| 124,304,214,797,495,180,000,000,000,000,000,000,000 | timer.c | 102,836,771,800,390,000,000,000,000,000,000,000,000 | [
"CWE-200"
] | CVE-2016-4578 | sound/core/timer.c in the Linux kernel through 4.6 does not initialize certain r1 data structures, which allows local users to obtain sensitive information from kernel stack memory via crafted use of the ALSA timer interface, related to the (1) snd_timer_user_ccallback and (2) snd_timer_user_tinterrupt functions. | https://nvd.nist.gov/vuln/detail/CVE-2016-4578 |
2,003 | linux | cec8f96e49d9be372fdb0c3836dcf31ec71e457e | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/cec8f96e49d9be372fdb0c3836dcf31ec71e457e | ALSA: timer: Fix leak in SNDRV_TIMER_IOCTL_PARAMS
The stack object “tread” has a total size of 32 bytes. Its field
“event” and “val” both contain 4 bytes padding. These 8 bytes
padding bytes are sent to user without being initialized.
Signed-off-by: Kangjie Lu <kjlu@gatech.edu>
Signed-off-by: Takashi Iwai <tiwai@suse.de> | 1 | static int snd_timer_user_params(struct file *file,
struct snd_timer_params __user *_params)
{
struct snd_timer_user *tu;
struct snd_timer_params params;
struct snd_timer *t;
struct snd_timer_read *tr;
struct snd_timer_tread *ttr;
int err;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
t = tu->timeri->timer;
if (!t)
return -EBADFD;
if (copy_from_user(¶ms, _params, sizeof(params)))
return -EFAULT;
if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE) && params.ticks < 1) {
err = -EINVAL;
goto _end;
}
if (params.queue_size > 0 &&
(params.queue_size < 32 || params.queue_size > 1024)) {
err = -EINVAL;
goto _end;
}
if (params.filter & ~((1<<SNDRV_TIMER_EVENT_RESOLUTION)|
(1<<SNDRV_TIMER_EVENT_TICK)|
(1<<SNDRV_TIMER_EVENT_START)|
(1<<SNDRV_TIMER_EVENT_STOP)|
(1<<SNDRV_TIMER_EVENT_CONTINUE)|
(1<<SNDRV_TIMER_EVENT_PAUSE)|
(1<<SNDRV_TIMER_EVENT_SUSPEND)|
(1<<SNDRV_TIMER_EVENT_RESUME)|
(1<<SNDRV_TIMER_EVENT_MSTART)|
(1<<SNDRV_TIMER_EVENT_MSTOP)|
(1<<SNDRV_TIMER_EVENT_MCONTINUE)|
(1<<SNDRV_TIMER_EVENT_MPAUSE)|
(1<<SNDRV_TIMER_EVENT_MSUSPEND)|
(1<<SNDRV_TIMER_EVENT_MRESUME))) {
err = -EINVAL;
goto _end;
}
snd_timer_stop(tu->timeri);
spin_lock_irq(&t->lock);
tu->timeri->flags &= ~(SNDRV_TIMER_IFLG_AUTO|
SNDRV_TIMER_IFLG_EXCLUSIVE|
SNDRV_TIMER_IFLG_EARLY_EVENT);
if (params.flags & SNDRV_TIMER_PSFLG_AUTO)
tu->timeri->flags |= SNDRV_TIMER_IFLG_AUTO;
if (params.flags & SNDRV_TIMER_PSFLG_EXCLUSIVE)
tu->timeri->flags |= SNDRV_TIMER_IFLG_EXCLUSIVE;
if (params.flags & SNDRV_TIMER_PSFLG_EARLY_EVENT)
tu->timeri->flags |= SNDRV_TIMER_IFLG_EARLY_EVENT;
spin_unlock_irq(&t->lock);
if (params.queue_size > 0 &&
(unsigned int)tu->queue_size != params.queue_size) {
if (tu->tread) {
ttr = kmalloc(params.queue_size * sizeof(*ttr),
GFP_KERNEL);
if (ttr) {
kfree(tu->tqueue);
tu->queue_size = params.queue_size;
tu->tqueue = ttr;
}
} else {
tr = kmalloc(params.queue_size * sizeof(*tr),
GFP_KERNEL);
if (tr) {
kfree(tu->queue);
tu->queue_size = params.queue_size;
tu->queue = tr;
}
}
}
tu->qhead = tu->qtail = tu->qused = 0;
if (tu->timeri->flags & SNDRV_TIMER_IFLG_EARLY_EVENT) {
if (tu->tread) {
struct snd_timer_tread tread;
tread.event = SNDRV_TIMER_EVENT_EARLY;
tread.tstamp.tv_sec = 0;
tread.tstamp.tv_nsec = 0;
tread.val = 0;
snd_timer_user_append_to_tqueue(tu, &tread);
} else {
struct snd_timer_read *r = &tu->queue[0];
r->resolution = 0;
r->ticks = 0;
tu->qused++;
tu->qtail++;
}
}
tu->filter = params.filter;
tu->ticks = params.ticks;
err = 0;
_end:
if (copy_to_user(_params, ¶ms, sizeof(params)))
return -EFAULT;
return err;
}
| 239,787,913,206,909,500,000,000,000,000,000,000,000 | timer.c | 67,637,640,137,585,530,000,000,000,000,000,000,000 | [
"CWE-200"
] | CVE-2016-4569 | The snd_timer_user_params function in sound/core/timer.c in the Linux kernel through 4.6 does not initialize a certain data structure, which allows local users to obtain sensitive information from kernel stack memory via crafted use of the ALSA timer interface. | https://nvd.nist.gov/vuln/detail/CVE-2016-4569 |
2,010 | ImageMagick | 726812fa2fa7ce16bcf58f6e115f65427a1c0950 | https://github.com/ImageMagick/ImageMagick | https://github.com/ImageMagick/ImageMagick/commit/726812fa2fa7ce16bcf58f6e115f65427a1c0950 | Prevent buffer overflow in magick/draw.c | 1 | static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
double
length,
maximum_length,
offset,
scale,
total_length;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
register ssize_t
i;
register double
dx,
dy;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*number_vertices+1UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
return(MagickFalse);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*(draw_info->dash_pattern[0]-0.5);
offset=draw_info->dash_offset != 0.0 ? scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*(draw_info->dash_pattern[n]+0.5);
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; (i < number_vertices) && (length >= 0.0); i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot((double) dx,dy);
if (length == 0.0)
{
n++;
if (draw_info->dash_pattern[n] == 0.0)
n=0;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
}
for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length/maximum_length);
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length/maximum_length);
j=1;
}
else
{
if ((j+1) > (ssize_t) (2*number_vertices))
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length/maximum_length);
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length/maximum_length);
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
n++;
if (draw_info->dash_pattern[n] == 0.0)
n=0;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((total_length <= maximum_length) && ((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=MagickEpsilon;
dash_polygon[j].point.y+=MagickEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
| 278,418,562,021,844,360,000,000,000,000,000,000,000 | None | null | [
"CWE-119"
] | CVE-2016-4564 | The DrawImage function in MagickCore/draw.c in ImageMagick before 6.9.4-0 and 7.x before 7.0.1-2 makes an incorrect function call in attempting to locate the next token, which allows remote attackers to cause a denial of service (buffer overflow and application crash) or possibly have unspecified other impact via a crafted file. | https://nvd.nist.gov/vuln/detail/CVE-2016-4564 |
2,011 | ImageMagick | 726812fa2fa7ce16bcf58f6e115f65427a1c0950 | https://github.com/ImageMagick/ImageMagick | https://github.com/ImageMagick/ImageMagick/commit/726812fa2fa7ce16bcf58f6e115f65427a1c0950 | Prevent buffer overflow in magick/draw.c | 1 | MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
ExceptionInfo *exception)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
keyword[MagickPathExtent],
geometry[MagickPathExtent],
pattern[MagickPathExtent],
*primitive,
*token;
const char
*q;
DrawInfo
**graphic_context;
MagickBooleanType
proceed;
MagickStatusType
status;
double
angle,
factor,
primitive_extent;
PointInfo
point;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register const char
*p;
register ssize_t
i,
x;
SegmentInfo
bounds;
size_t
extent,
length,
number_points,
number_stops;
ssize_t
j,
k,
n;
StopInfo
*stops;
/*
Ensure the annotation info is valid.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
if (*draw_info->primitive != '@')
primitive=AcquireString(draw_info->primitive);
else
primitive=FileToString(draw_info->primitive+1,~0UL,exception);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(double) strlen(primitive);
(void) SetImageArtifact(image,"MVG",primitive);
n=0;
number_stops=0;
stops=(StopInfo *) NULL;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(
sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=6553;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
GetNextToken(q,&q,extent,keyword);
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,(char **) NULL);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.rx=StringToDouble(token,(char **) NULL);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.ry=StringToDouble(token,(char **) NULL);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,(char **) NULL);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,(char **) NULL);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,(char **) NULL);
break;
}
if (LocaleCompare("alpha",keyword) == 0)
{
primitive_type=AlphaPrimitive;
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->border_color,exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("clip-path",keyword) == 0)
{
/*
Create clip mask.
*/
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->clip_mask,token);
(void) DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
status=MagickFalse;
else
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
GetNextToken(q,&q,extent,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
GetNextToken(q,&q,extent,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
status=MagickFalse;
else
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
if (LocaleCompare("density",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->density,token);
break;
}
if (LocaleCompare("direction",keyword) == 0)
{
ssize_t
direction;
GetNextToken(q,&q,extent,token);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
token);
if (direction == -1)
status=MagickFalse;
else
graphic_context[n]->direction=(DirectionType) direction;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->fill,exception);
if (status == MagickFalse)
{
ImageInfo
*pattern_info;
pattern_info=AcquireImageInfo();
(void) CopyMagickString(pattern_info->filename,token,
MagickPathExtent);
graphic_context[n]->fill_pattern=ReadImage(pattern_info,
exception);
CatchException(exception);
pattern_info=DestroyImageInfo(pattern_info);
}
}
break;
}
if (LocaleCompare("fill-opacity",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
graphic_context[n]->fill.alpha=(double) QuantumRange*
factor*StringToDouble(token,(char **) NULL);
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
status=MagickFalse;
else
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *)
RelinquishMagickMemory(graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->pointsize=StringToDouble(token,(char **) NULL);
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
GetNextToken(q,&q,extent,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
status=MagickFalse;
else
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
GetNextToken(q,&q,extent,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
status=MagickFalse;
else
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
ssize_t
weight;
GetNextToken(q,&q,extent,token);
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token);
if (weight == -1)
weight=StringToUnsignedLong(token);
graphic_context[n]->weight=(size_t) weight;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
GetNextToken(q,&q,extent,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
status=MagickFalse;
else
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
GetNextToken(q,&q,extent,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
status=MagickFalse;
else
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->interline_spacing=StringToDouble(token,
(char **) NULL);
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
(char **) NULL);
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->kerning=StringToDouble(token,(char **) NULL);
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("line",keyword) == 0)
primitive_type=LinePrimitive;
else
status=MagickFalse;
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
graphic_context[n]->alpha=ClampToQuantum(QuantumRange*(1.0-((1.0-
QuantumScale*graphic_context[n]->alpha)*factor*
StringToDouble(token,(char **) NULL))));
graphic_context[n]->fill.alpha=(double) graphic_context[n]->alpha;
graphic_context[n]->stroke.alpha=(double) graphic_context[n]->alpha;
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
break;
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
DrawError,"UnbalancedGraphicContextPushPop","`%s'",token);
n=0;
break;
}
if (graphic_context[n]->clip_mask != (char *) NULL)
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
(void) SetImageMask(image,ReadPixelMask,(Image *) NULL,
exception);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("pattern",token) == 0)
break;
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare("clip-path",token) == 0)
{
char
name[MagickPathExtent];
GetNextToken(q,&q,extent,token);
(void) FormatLocaleString(name,MagickPathExtent,"%s",token);
for (p=q; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"clip-path") != 0)
continue;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) SetImageArtifact(image,name,token);
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent],
type[MagickPathExtent];
SegmentInfo
segment;
GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
GetNextToken(q,&q,extent,token);
(void) CopyMagickString(type,token,MagickPathExtent);
GetNextToken(q,&q,extent,token);
segment.x1=StringToDouble(token,(char **) NULL);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
segment.y1=StringToDouble(token,(char **) NULL);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
segment.x2=StringToDouble(token,(char **) NULL);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
segment.y2=StringToDouble(token,(char **) NULL);
if (LocaleCompare(type,"radial") == 0)
{
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
}
for (p=q; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-type",name);
(void) SetImageArtifact(image,key,type);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent];
RectangleInfo
pattern_bounds;
GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
GetNextToken(q,&q,extent,token);
pattern_bounds.x=(ssize_t) ceil(StringToDouble(token,
(char **) NULL)-0.5);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
pattern_bounds.y=(ssize_t) ceil(StringToDouble(token,
(char **) NULL)-0.5);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
pattern_bounds.width=(size_t) floor(StringToDouble(token,
(char **) NULL)+0.5);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
pattern_bounds.height=(size_t) floor(StringToDouble(token,
(char **) NULL)+0.5);
for (p=q; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double)pattern_bounds.width,
(double)pattern_bounds.height,(double)pattern_bounds.x,
(double)pattern_bounds.y);
(void) SetImageArtifact(image,key,geometry);
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
break;
}
if (LocaleCompare("defs",token) == 0)
break;
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,(char **) NULL);
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,(char **) NULL);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,(char **) NULL);
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,(char **) NULL);
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,(char **) NULL);
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
PixelInfo
stop_color;
number_stops++;
if (number_stops == 1)
stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops));
else if (number_stops > 2)
stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops,
sizeof(*stops));
if (stops == (StopInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
GetNextToken(q,&q,extent,token);
(void) QueryColorCompliance(token,AllCompliance,&stop_color,
exception);
stops[number_stops-1].color=stop_color;
GetNextToken(q,&q,extent,token);
stops[number_stops-1].offset=StringToDouble(token,(char **) NULL);
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->stroke,exception);
if (status == MagickFalse)
{
ImageInfo
*pattern_info;
pattern_info=AcquireImageInfo();
(void) CopyMagickString(pattern_info->filename,token,
MagickPathExtent);
graphic_context[n]->stroke_pattern=ReadImage(pattern_info,
exception);
CatchException(exception);
pattern_info=DestroyImageInfo(pattern_info);
}
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->stroke_antialias=
StringToLong(token) != 0 ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*r;
r=q;
GetNextToken(r,&r,extent,token);
if (*token == ',')
GetNextToken(r,&r,extent,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
GetNextToken(r,&r,extent,token);
if (*token == ',')
GetNextToken(r,&r,extent,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2UL*x+1UL),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
for (j=0; j < x; j++)
{
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_pattern[j]=StringToDouble(token,
(char **) NULL);
if (graphic_context[n]->dash_pattern[j] < 0.0)
status=MagickFalse;
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_offset=StringToDouble(token,
(char **) NULL);
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
GetNextToken(q,&q,extent,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
status=MagickFalse;
else
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
GetNextToken(q,&q,extent,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,
token);
if (linejoin == -1)
status=MagickFalse;
else
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
graphic_context[n]->stroke.alpha=(double) QuantumRange*
factor*StringToDouble(token,(char **) NULL);
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->stroke_width=StringToDouble(token,
(char **) NULL);
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
status=MagickFalse;
else
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
status=MagickFalse;
else
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->text_antialias=
StringToLong(token) != 0 ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->undercolor,exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,(char **) NULL);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,(char **) NULL);
break;
}
status=MagickFalse;
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token,
(char **) NULL)-0.5);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token,
(char **) NULL)-0.5);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble(
token,(char **) NULL)+0.5);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble(
token,(char **) NULL)+0.5);
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((affine.sx != 1.0) || (affine.rx != 0.0) || (affine.ry != 0.0) ||
(affine.sy != 1.0) || (affine.tx != 0.0) || (affine.ty != 0.0))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if (*q == '\0')
{
if (number_stops > 1)
{
GradientType
type;
type=LinearGradient;
if (draw_info->gradient.type == RadialGradient)
type=RadialGradient;
(void) GradientImage(image,type,PadSpread,stops,number_stops,
exception);
}
if (number_stops > 0)
stops=(StopInfo *) RelinquishMagickMemory(stops);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",
(int) (q-p),p);
continue;
}
/*
Parse the primitive attributes.
*/
i=0;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
GetNextToken(q,&q,extent,token);
point.x=StringToDouble(token,(char **) NULL);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
point.y=StringToDouble(token,(char **) NULL);
GetNextToken(q,(const char **) NULL,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
i++;
if (i < (ssize_t) number_points)
continue;
number_points<<=1;
primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(primitive_info,
(size_t) number_points,sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
break;
}
}
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].text=(char *) NULL;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
length=primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
length*=5;
break;
}
case RoundRectanglePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot((double) alpha,(double) beta);
length*=5;
length+=2*((size_t) ceil((double) MagickPI*radius))+6*BezierQuantum+360;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates > 107)
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
length=BezierQuantum*primitive_info[j].coordinates;
break;
}
case PathPrimitive:
{
char
*s,
*t;
GetNextToken(q,&q,extent,token);
length=1;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=StringToDouble(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
length++;
}
length=length*BezierQuantum/2;
break;
}
case CirclePrimitive:
case ArcPrimitive:
case EllipsePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot((double) alpha,(double) beta);
length=2*((size_t) ceil((double) MagickPI*radius))+6*BezierQuantum+360;
break;
}
default:
break;
}
if ((size_t) (i+length) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=length+1;
primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(primitive_info,
(size_t) number_points,sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
}
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
TraceRoundRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
primitive_type=UndefinedPrimitive;
break;
}
TraceArc(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
TraceEllipse(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
TraceCircle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
break;
case PolygonPrimitive:
{
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
TraceBezier(primitive_info+j,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
i=(ssize_t) (j+TracePath(primitive_info+j,token));
break;
}
case AlphaPrimitive:
case ColorPrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
GetNextToken(q,&q,extent,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
status=MagickFalse;
else
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
GetNextToken(q,&q,extent,token);
primitive_info[j].text=AcquireString(token);
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
GetNextToken(q,&q,extent,token);
primitive_info[j].text=AcquireString(token);
break;
}
}
if (primitive_info == (PrimitiveInfo *) NULL)
break;
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p),p);
if (status == MagickFalse)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
status&=DrawPrimitive(image,graphic_context[n],primitive_info,
exception);
}
if (primitive_info->text != (char *) NULL)
primitive_info->text=(char *) RelinquishMagickMemory(
primitive_info->text);
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
if (status == 0)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition",
keyword);
return(status != 0 ? MagickTrue : MagickFalse);
}
| 214,572,375,985,539,270,000,000,000,000,000,000,000 | None | null | [
"CWE-119"
] | CVE-2016-4564 | The DrawImage function in MagickCore/draw.c in ImageMagick before 6.9.4-0 and 7.x before 7.0.1-2 makes an incorrect function call in attempting to locate the next token, which allows remote attackers to cause a denial of service (buffer overflow and application crash) or possibly have unspecified other impact via a crafted file. | https://nvd.nist.gov/vuln/detail/CVE-2016-4564 |
2,014 | ImageMagick | 726812fa2fa7ce16bcf58f6e115f65427a1c0950 | https://github.com/ImageMagick/ImageMagick | https://github.com/ImageMagick/ImageMagick/commit/726812fa2fa7ce16bcf58f6e115f65427a1c0950 | Prevent buffer overflow in magick/draw.c | 1 | static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info)
{
typedef struct _LineSegment
{
double
p,
q;
} LineSegment;
LineSegment
dx,
dy,
inverse_slope,
slope,
theta;
MagickBooleanType
closed_path;
double
delta_theta,
dot_product,
mid,
miterlimit;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*path_p,
*path_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
register ssize_t
i;
size_t
arc_segments,
max_strokes,
number_vertices;
ssize_t
j,
n,
p,
q;
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
max_strokes=2*number_vertices+6*BezierQuantum+360;
path_p=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_p));
path_q=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_q));
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL) ||
(polygon_primitive == (PrimitiveInfo *) NULL))
return((PrimitiveInfo *) NULL);
(void) CopyMagickMemory(polygon_primitive,primitive_info,(size_t)
number_vertices*sizeof(*polygon_primitive));
closed_path=
(primitive_info[number_vertices-1].point.x == primitive_info[0].point.x) &&
(primitive_info[number_vertices-1].point.y == primitive_info[0].point.y) ?
MagickTrue : MagickFalse;
if ((draw_info->linejoin == RoundJoin) ||
((draw_info->linejoin == MiterJoin) && (closed_path != MagickFalse)))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
n=(ssize_t) number_vertices-1L;
slope.p=DrawEpsilonReciprocal(dx.p)*dy.p;
inverse_slope.p=(-1.0*DrawEpsilonReciprocal(slope.p));
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*
mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
path_q[p++]=box_q[0];
path_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=DrawEpsilonReciprocal(dx.q)*dy.q;
inverse_slope.q=(-1.0*DrawEpsilonReciprocal(slope.q));
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < MagickEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
if (q >= (ssize_t) (max_strokes-6*BezierQuantum-360))
{
max_strokes+=6*BezierQuantum+360;
path_p=(PointInfo *) ResizeQuantumMemory(path_p,(size_t) max_strokes,
sizeof(*path_p));
path_q=(PointInfo *) ResizeQuantumMemory(path_q,(size_t) max_strokes,
sizeof(*path_q));
if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL))
{
polygon_primitive=(PrimitiveInfo *)
RelinquishMagickMemory(polygon_primitive);
return((PrimitiveInfo *) NULL);
}
}
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=(double) (2.0*MagickPI);
arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/
(2.0*sqrt((double) (1.0/mid)))));
path_q[q].x=box_q[1].x;
path_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
path_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=(double) (2.0*MagickPI);
arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/
(2.0*sqrt((double) (1.0/mid)))));
path_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
path_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
path_p[p++]=box_p[1];
path_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon != (PrimitiveInfo *) NULL)
{
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
}
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
path_q=(PointInfo *) RelinquishMagickMemory(path_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
| 315,690,379,769,147,360,000,000,000,000,000,000,000 | None | null | [
"CWE-119"
] | CVE-2016-4564 | The DrawImage function in MagickCore/draw.c in ImageMagick before 6.9.4-0 and 7.x before 7.0.1-2 makes an incorrect function call in attempting to locate the next token, which allows remote attackers to cause a denial of service (buffer overflow and application crash) or possibly have unspecified other impact via a crafted file. | https://nvd.nist.gov/vuln/detail/CVE-2016-4564 |
2,020 | linux | 8358b02bf67d3a5d8a825070e1aa73f25fb2e4c7 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/8358b02bf67d3a5d8a825070e1aa73f25fb2e4c7 | bpf: fix double-fdput in replace_map_fd_with_map_ptr()
When bpf(BPF_PROG_LOAD, ...) was invoked with a BPF program whose bytecode
references a non-map file descriptor as a map file descriptor, the error
handling code called fdput() twice instead of once (in __bpf_map_get() and
in replace_map_fd_with_map_ptr()). If the file descriptor table of the
current task is shared, this causes f_count to be decremented too much,
allowing the struct file to be freed while it is still in use
(use-after-free). This can be exploited to gain root privileges by an
unprivileged user.
This bug was introduced in
commit 0246e64d9a5f ("bpf: handle pseudo BPF_LD_IMM64 insn"), but is only
exploitable since
commit 1be7f75d1668 ("bpf: enable non-root eBPF programs") because
previously, CAP_SYS_ADMIN was required to reach the vulnerable code.
(posted publicly according to request by maintainer)
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | static int replace_map_fd_with_map_ptr(struct verifier_env *env)
{
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
int i, j;
for (i = 0; i < insn_cnt; i++, insn++) {
if (BPF_CLASS(insn->code) == BPF_LDX &&
(BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
verbose("BPF_LDX uses reserved fields\n");
return -EINVAL;
}
if (BPF_CLASS(insn->code) == BPF_STX &&
((BPF_MODE(insn->code) != BPF_MEM &&
BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
verbose("BPF_STX uses reserved fields\n");
return -EINVAL;
}
if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
struct bpf_map *map;
struct fd f;
if (i == insn_cnt - 1 || insn[1].code != 0 ||
insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
insn[1].off != 0) {
verbose("invalid bpf_ld_imm64 insn\n");
return -EINVAL;
}
if (insn->src_reg == 0)
/* valid generic load 64-bit imm */
goto next_insn;
if (insn->src_reg != BPF_PSEUDO_MAP_FD) {
verbose("unrecognized bpf_ld_imm64 insn\n");
return -EINVAL;
}
f = fdget(insn->imm);
map = __bpf_map_get(f);
if (IS_ERR(map)) {
verbose("fd %d is not pointing to valid bpf_map\n",
insn->imm);
fdput(f);
return PTR_ERR(map);
}
/* store map pointer inside BPF_LD_IMM64 instruction */
insn[0].imm = (u32) (unsigned long) map;
insn[1].imm = ((u64) (unsigned long) map) >> 32;
/* check whether we recorded this map already */
for (j = 0; j < env->used_map_cnt; j++)
if (env->used_maps[j] == map) {
fdput(f);
goto next_insn;
}
if (env->used_map_cnt >= MAX_USED_MAPS) {
fdput(f);
return -E2BIG;
}
/* remember this map */
env->used_maps[env->used_map_cnt++] = map;
/* hold the map. If the program is rejected by verifier,
* the map will be released by release_maps() or it
* will be used by the valid program until it's unloaded
* and all maps are released in free_bpf_prog_info()
*/
bpf_map_inc(map, false);
fdput(f);
next_insn:
insn++;
i++;
}
}
/* now all pseudo BPF_LD_IMM64 instructions load valid
* 'struct bpf_map *' into a register instead of user map_fd.
* These pointers will be used later by verifier to validate map access.
*/
return 0;
}
| 273,118,594,432,448,770,000,000,000,000,000,000,000 | verifier.c | 191,264,986,849,802,150,000,000,000,000,000,000,000 | [
"CWE-703"
] | CVE-2016-4557 | The replace_map_fd_with_map_ptr function in kernel/bpf/verifier.c in the Linux kernel before 4.5.5 does not properly maintain an fd data structure, which allows local users to gain privileges or cause a denial of service (use-after-free) via crafted BPF instructions that reference an incorrect file descriptor. | https://nvd.nist.gov/vuln/detail/CVE-2016-4557 |
2,021 | linux | 5f8e44741f9f216e33736ea4ec65ca9ac03036e6 | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/5f8e44741f9f216e33736ea4ec65ca9ac03036e6 | net: fix infoleak in rtnetlink
The stack object “map” has a total size of 32 bytes. Its last 4
bytes are padding generated by compiler. These padding bytes are
not initialized and sent out via “nla_put”.
Signed-off-by: Kangjie Lu <kjlu@gatech.edu>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
{
struct rtnl_link_ifmap map = {
.mem_start = dev->mem_start,
.mem_end = dev->mem_end,
.base_addr = dev->base_addr,
.irq = dev->irq,
.dma = dev->dma,
.port = dev->if_port,
};
if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
return -EMSGSIZE;
return 0;
}
| 88,210,912,937,139,420,000,000,000,000,000,000,000 | rtnetlink.c | 304,302,998,298,579,250,000,000,000,000,000,000,000 | [
"CWE-200"
] | CVE-2016-4486 | The rtnl_fill_link_ifmap function in net/core/rtnetlink.c in the Linux kernel before 4.5.5 does not initialize a certain data structure, which allows local users to obtain sensitive information from kernel stack memory by reading a Netlink message. | https://nvd.nist.gov/vuln/detail/CVE-2016-4486 |
2,022 | linux | b8670c09f37bdf2847cc44f36511a53afc6161fd | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/b8670c09f37bdf2847cc44f36511a53afc6161fd | net: fix infoleak in llc
The stack object “info” has a total size of 12 bytes. Its last byte
is padding which is not initialized and leaked via “put_cmsg”.
Signed-off-by: Kangjie Lu <kjlu@gatech.edu>
Signed-off-by: David S. Miller <davem@davemloft.net> | 1 | static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
{
struct llc_sock *llc = llc_sk(skb->sk);
if (llc->cmsg_flags & LLC_CMSG_PKTINFO) {
struct llc_pktinfo info;
info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex;
llc_pdu_decode_dsap(skb, &info.lpi_sap);
llc_pdu_decode_da(skb, info.lpi_mac);
put_cmsg(msg, SOL_LLC, LLC_OPT_PKTINFO, sizeof(info), &info);
}
}
| 265,988,475,058,809,480,000,000,000,000,000,000,000 | af_llc.c | 114,091,511,487,822,400,000,000,000,000,000,000,000 | [
"CWE-200"
] | CVE-2016-4485 | The llc_cmsg_rcv function in net/llc/af_llc.c in the Linux kernel before 4.5.5 does not initialize a certain data structure, which allows attackers to obtain sensitive information from kernel stack memory by reading a message. | https://nvd.nist.gov/vuln/detail/CVE-2016-4485 |
2,023 | linux | 681fef8380eb818c0b845fca5d2ab1dcbab114ee | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/681fef8380eb818c0b845fca5d2ab1dcbab114ee | USB: usbfs: fix potential infoleak in devio
The stack object “ci” has a total size of 8 bytes. Its last 3 bytes
are padding bytes which are not initialized and leaked to userland
via “copy_to_user”.
Signed-off-by: Kangjie Lu <kjlu@gatech.edu>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 1 | static int proc_connectinfo(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_connectinfo ci = {
.devnum = ps->dev->devnum,
.slow = ps->dev->speed == USB_SPEED_LOW
};
if (copy_to_user(arg, &ci, sizeof(ci)))
return -EFAULT;
return 0;
}
| 94,713,218,675,784,140,000,000,000,000,000,000,000 | devio.c | 150,665,955,136,899,330,000,000,000,000,000,000,000 | [
"CWE-200"
] | CVE-2016-4482 | The proc_connectinfo function in drivers/usb/core/devio.c in the Linux kernel through 4.6 does not initialize a certain data structure, which allows local users to obtain sensitive information from kernel stack memory via a crafted USBDEVFS_CONNECTINFO ioctl call. | https://nvd.nist.gov/vuln/detail/CVE-2016-4482 |
2,024 | atheme | 87580d767868360d2fed503980129504da84b63e | https://github.com/atheme/atheme | https://github.com/atheme/atheme/commit/87580d767868360d2fed503980129504da84b63e | Do not copy more bytes than were allocated | 1 | void xmlrpc_char_encode(char *outbuffer, const char *s1)
{
long unsigned int i;
unsigned char c;
char buf2[15];
mowgli_string_t *s = mowgli_string_create();
*buf2 = '\0';
*outbuffer = '\0';
if ((!(s1) || (*(s1) == '\0')))
{
return;
}
for (i = 0; s1[i] != '\0'; i++)
{
c = s1[i];
if (c > 127)
{
snprintf(buf2, sizeof buf2, "&#%d;", c);
s->append(s, buf2, strlen(buf2));
}
else if (c == '&')
{
s->append(s, "&", 5);
}
else if (c == '<')
{
s->append(s, "<", 4);
}
else if (c == '>')
{
s->append(s, ">", 4);
}
else if (c == '"')
{
s->append(s, """, 6);
}
else
{
s->append_char(s, c);
}
}
memcpy(outbuffer, s->str, XMLRPC_BUFSIZE);
}
| 43,536,472,075,997,620,000,000,000,000,000,000,000 | xmlrpclib.c | 209,647,623,324,171,000,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2016-4478 | Buffer overflow in the xmlrpc_char_encode function in modules/transport/xmlrpc/xmlrpclib.c in Atheme before 7.2.7 allows remote attackers to cause a denial of service via vectors related to XMLRPC response encoding. | https://nvd.nist.gov/vuln/detail/CVE-2016-4478 |
2,025 | linux | 38327424b40bcebe2de92d07312c89360ac9229a | https://github.com/torvalds/linux | https://github.com/torvalds/linux/commit/38327424b40bcebe2de92d07312c89360ac9229a | KEYS: potential uninitialized variable
If __key_link_begin() failed then "edit" would be uninitialized. I've
added a check to fix that.
This allows a random user to crash the kernel, though it's quite
difficult to achieve. There are three ways it can be done as the user
would have to cause an error to occur in __key_link():
(1) Cause the kernel to run out of memory. In practice, this is difficult
to achieve without ENOMEM cropping up elsewhere and aborting the
attempt.
(2) Revoke the destination keyring between the keyring ID being looked up
and it being tested for revocation. In practice, this is difficult to
time correctly because the KEYCTL_REJECT function can only be used
from the request-key upcall process. Further, users can only make use
of what's in /sbin/request-key.conf, though this does including a
rejection debugging test - which means that the destination keyring
has to be the caller's session keyring in practice.
(3) Have just enough key quota available to create a key, a new session
keyring for the upcall and a link in the session keyring, but not then
sufficient quota to create a link in the nominated destination keyring
so that it fails with EDQUOT.
The bug can be triggered using option (3) above using something like the
following:
echo 80 >/proc/sys/kernel/keys/root_maxbytes
keyctl request2 user debug:fred negate @t
The above sets the quota to something much lower (80) to make the bug
easier to trigger, but this is dependent on the system. Note also that
the name of the keyring created contains a random number that may be
between 1 and 10 characters in size, so may throw the test off by
changing the amount of quota used.
Assuming the failure occurs, something like the following will be seen:
kfree_debugcheck: out of range ptr 6b6b6b6b6b6b6b68h
------------[ cut here ]------------
kernel BUG at ../mm/slab.c:2821!
...
RIP: 0010:[<ffffffff811600f9>] kfree_debugcheck+0x20/0x25
RSP: 0018:ffff8804014a7de8 EFLAGS: 00010092
RAX: 0000000000000034 RBX: 6b6b6b6b6b6b6b68 RCX: 0000000000000000
RDX: 0000000000040001 RSI: 00000000000000f6 RDI: 0000000000000300
RBP: ffff8804014a7df0 R08: 0000000000000001 R09: 0000000000000000
R10: ffff8804014a7e68 R11: 0000000000000054 R12: 0000000000000202
R13: ffffffff81318a66 R14: 0000000000000000 R15: 0000000000000001
...
Call Trace:
kfree+0xde/0x1bc
assoc_array_cancel_edit+0x1f/0x36
__key_link_end+0x55/0x63
key_reject_and_link+0x124/0x155
keyctl_reject_key+0xb6/0xe0
keyctl_negate_key+0x10/0x12
SyS_keyctl+0x9f/0xe7
do_syscall_64+0x63/0x13a
entry_SYSCALL64_slow_path+0x25/0x25
Fixes: f70e2e06196a ('KEYS: Do preallocation for __key_link()')
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: David Howells <dhowells@redhat.com>
cc: stable@vger.kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> | 1 | int key_reject_and_link(struct key *key,
unsigned timeout,
unsigned error,
struct key *keyring,
struct key *authkey)
{
struct assoc_array_edit *edit;
struct timespec now;
int ret, awaken, link_ret = 0;
key_check(key);
key_check(keyring);
awaken = 0;
ret = -EBUSY;
if (keyring) {
if (keyring->restrict_link)
return -EPERM;
link_ret = __key_link_begin(keyring, &key->index_key, &edit);
}
mutex_lock(&key_construction_mutex);
/* can't instantiate twice */
if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
/* mark the key as being negatively instantiated */
atomic_inc(&key->user->nikeys);
key->reject_error = -error;
smp_wmb();
set_bit(KEY_FLAG_NEGATIVE, &key->flags);
set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
now = current_kernel_time();
key->expiry = now.tv_sec + timeout;
key_schedule_gc(key->expiry + key_gc_delay);
if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
awaken = 1;
ret = 0;
/* and link it into the destination keyring */
if (keyring && link_ret == 0)
__key_link(key, &edit);
/* disable the authorisation key */
if (authkey)
key_revoke(authkey);
}
mutex_unlock(&key_construction_mutex);
if (keyring)
__key_link_end(keyring, &key->index_key, edit);
/* wake up anyone waiting for a key to be constructed */
if (awaken)
wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
return ret == 0 ? link_ret : ret;
}
| 256,806,618,522,082,730,000,000,000,000,000,000,000 | key.c | 16,508,077,423,529,737,000,000,000,000,000,000,000 | [
"CWE-703"
] | CVE-2016-4470 | The key_reject_and_link function in security/keys/key.c in the Linux kernel through 4.6.3 does not ensure that a certain data structure is initialized, which allows local users to cause a denial of service (system crash) via vectors involving a crafted keyctl request2 command. | https://nvd.nist.gov/vuln/detail/CVE-2016-4470 |
2,066 | iperf | 91f2fa59e8ed80dfbf400add0164ee0e508e412a | https://github.com/esnet/iperf | https://github.com/esnet/iperf/commit/91f2fa59e8ed80dfbf400add0164ee0e508e412a | Fix a buffer overflow / heap corruption issue that could occur if a
malformed JSON string was passed on the control channel. This issue,
present in the cJSON library, was already fixed upstream, so was
addressed here in iperf3 by importing a newer version of cJSON (plus
local ESnet modifications).
Discovered and reported by Dave McDaniel, Cisco Talos.
Based on a patch by @dopheide-esnet, with input from @DaveGamble.
Cross-references: TALOS-CAN-0164, ESNET-SECADV-2016-0001,
CVE-2016-4303
(cherry picked from commit ed94082be27d971a5e1b08b666e2c217cf470a40)
Signed-off-by: Bruce A. Mah <bmah@es.net> | 1 | static const char *parse_string( cJSON *item, const char *str )
{
const char *ptr = str + 1;
char *ptr2;
char *out;
int len = 0;
unsigned uc, uc2;
if ( *str != '\"' ) {
/* Not a string! */
ep = str;
return 0;
}
/* Skip escaped quotes. */
while ( *ptr != '\"' && *ptr && ++len )
if ( *ptr++ == '\\' )
ptr++;
if ( ! ( out = (char*) cJSON_malloc( len + 1 ) ) )
return 0;
ptr = str + 1;
ptr2 = out;
while ( *ptr != '\"' && *ptr ) {
if ( *ptr != '\\' )
*ptr2++ = *ptr++;
else {
ptr++;
switch ( *ptr ) {
case 'b': *ptr2++ ='\b'; break;
case 'f': *ptr2++ ='\f'; break;
case 'n': *ptr2++ ='\n'; break;
case 'r': *ptr2++ ='\r'; break;
case 't': *ptr2++ ='\t'; break;
case 'u':
/* Transcode utf16 to utf8. */
/* Get the unicode char. */
sscanf( ptr + 1,"%4x", &uc );
ptr += 4;
/* Check for invalid. */
if ( ( uc >= 0xDC00 && uc <= 0xDFFF ) || uc == 0 )
break;
/* UTF16 surrogate pairs. */
if ( uc >= 0xD800 && uc <= 0xDBFF ) {
if ( ptr[1] != '\\' || ptr[2] != 'u' )
/* Missing second-half of surrogate. */
break;
sscanf( ptr + 3, "%4x", &uc2 );
ptr += 6;
if ( uc2 < 0xDC00 || uc2 > 0xDFFF )
/* Invalid second-half of surrogate. */
break;
uc = 0x10000 | ( ( uc & 0x3FF ) << 10 ) | ( uc2 & 0x3FF );
}
len = 4;
if ( uc < 0x80 )
len = 1;
else if ( uc < 0x800 )
len = 2;
else if ( uc < 0x10000 )
len = 3;
ptr2 += len;
switch ( len ) {
case 4: *--ptr2 = ( ( uc | 0x80) & 0xBF ); uc >>= 6;
case 3: *--ptr2 = ( ( uc | 0x80) & 0xBF ); uc >>= 6;
case 2: *--ptr2 = ( ( uc | 0x80) & 0xBF ); uc >>= 6;
case 1: *--ptr2 = ( uc | firstByteMark[len] );
}
ptr2 += len;
break;
default: *ptr2++ = *ptr; break;
}
++ptr;
}
}
*ptr2 = 0;
if ( *ptr == '\"' )
++ptr;
item->valuestring = out;
item->type = cJSON_String;
return ptr;
}
| 202,681,723,794,535,540,000,000,000,000,000,000,000 | cjson.c | 211,459,362,401,629,130,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2016-4303 | The parse_string function in cjson.c in the cJSON library mishandles UTF8/16 strings, which allows remote attackers to cause a denial of service (crash) or execute arbitrary code via a non-hex character in a JSON string, which triggers a heap-based buffer overflow. | https://nvd.nist.gov/vuln/detail/CVE-2016-4303 |
2,077 | libarchive | 05caadc7eedbef471ac9610809ba683f0c698700 | https://github.com/libarchive/libarchive | https://github.com/libarchive/libarchive/commit/05caadc7eedbef471ac9610809ba683f0c698700 | Issue 719: Fix for TALOS-CAN-154
A RAR file with an invalid zero dictionary size was not being
rejected, leading to a zero-sized allocation for the dictionary
storage which was then overwritten during the dictionary initialization.
Thanks to the Open Source and Threat Intelligence project at Cisco for
reporting this. | 1 | parse_codes(struct archive_read *a)
{
int i, j, val, n, r;
unsigned char bitlengths[MAX_SYMBOLS], zerocount, ppmd_flags;
unsigned int maxorder;
struct huffman_code precode;
struct rar *rar = (struct rar *)(a->format->data);
struct rar_br *br = &(rar->br);
free_codes(a);
/* Skip to the next byte */
rar_br_consume_unalined_bits(br);
/* PPMd block flag */
if (!rar_br_read_ahead(a, br, 1))
goto truncated_data;
if ((rar->is_ppmd_block = rar_br_bits(br, 1)) != 0)
{
rar_br_consume(br, 1);
if (!rar_br_read_ahead(a, br, 7))
goto truncated_data;
ppmd_flags = rar_br_bits(br, 7);
rar_br_consume(br, 7);
/* Memory is allocated in MB */
if (ppmd_flags & 0x20)
{
if (!rar_br_read_ahead(a, br, 8))
goto truncated_data;
rar->dictionary_size = (rar_br_bits(br, 8) + 1) << 20;
rar_br_consume(br, 8);
}
if (ppmd_flags & 0x40)
{
if (!rar_br_read_ahead(a, br, 8))
goto truncated_data;
rar->ppmd_escape = rar->ppmd7_context.InitEsc = rar_br_bits(br, 8);
rar_br_consume(br, 8);
}
else
rar->ppmd_escape = 2;
if (ppmd_flags & 0x20)
{
maxorder = (ppmd_flags & 0x1F) + 1;
if(maxorder > 16)
maxorder = 16 + (maxorder - 16) * 3;
if (maxorder == 1)
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Truncated RAR file data");
return (ARCHIVE_FATAL);
}
/* Make sure ppmd7_contest is freed before Ppmd7_Construct
* because reading a broken file cause this abnormal sequence. */
__archive_ppmd7_functions.Ppmd7_Free(&rar->ppmd7_context, &g_szalloc);
rar->bytein.a = a;
rar->bytein.Read = &ppmd_read;
__archive_ppmd7_functions.PpmdRAR_RangeDec_CreateVTable(&rar->range_dec);
rar->range_dec.Stream = &rar->bytein;
__archive_ppmd7_functions.Ppmd7_Construct(&rar->ppmd7_context);
if (!__archive_ppmd7_functions.Ppmd7_Alloc(&rar->ppmd7_context,
rar->dictionary_size, &g_szalloc))
{
archive_set_error(&a->archive, ENOMEM,
"Out of memory");
return (ARCHIVE_FATAL);
}
if (!__archive_ppmd7_functions.PpmdRAR_RangeDec_Init(&rar->range_dec))
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Unable to initialize PPMd range decoder");
return (ARCHIVE_FATAL);
}
__archive_ppmd7_functions.Ppmd7_Init(&rar->ppmd7_context, maxorder);
rar->ppmd_valid = 1;
}
else
{
if (!rar->ppmd_valid) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid PPMd sequence");
return (ARCHIVE_FATAL);
}
if (!__archive_ppmd7_functions.PpmdRAR_RangeDec_Init(&rar->range_dec))
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Unable to initialize PPMd range decoder");
return (ARCHIVE_FATAL);
}
}
}
else
{
rar_br_consume(br, 1);
/* Keep existing table flag */
if (!rar_br_read_ahead(a, br, 1))
goto truncated_data;
if (!rar_br_bits(br, 1))
memset(rar->lengthtable, 0, sizeof(rar->lengthtable));
rar_br_consume(br, 1);
memset(&bitlengths, 0, sizeof(bitlengths));
for (i = 0; i < MAX_SYMBOLS;)
{
if (!rar_br_read_ahead(a, br, 4))
goto truncated_data;
bitlengths[i++] = rar_br_bits(br, 4);
rar_br_consume(br, 4);
if (bitlengths[i-1] == 0xF)
{
if (!rar_br_read_ahead(a, br, 4))
goto truncated_data;
zerocount = rar_br_bits(br, 4);
rar_br_consume(br, 4);
if (zerocount)
{
i--;
for (j = 0; j < zerocount + 2 && i < MAX_SYMBOLS; j++)
bitlengths[i++] = 0;
}
}
}
memset(&precode, 0, sizeof(precode));
r = create_code(a, &precode, bitlengths, MAX_SYMBOLS, MAX_SYMBOL_LENGTH);
if (r != ARCHIVE_OK) {
free(precode.tree);
free(precode.table);
return (r);
}
for (i = 0; i < HUFFMAN_TABLE_SIZE;)
{
if ((val = read_next_symbol(a, &precode)) < 0) {
free(precode.tree);
free(precode.table);
return (ARCHIVE_FATAL);
}
if (val < 16)
{
rar->lengthtable[i] = (rar->lengthtable[i] + val) & 0xF;
i++;
}
else if (val < 18)
{
if (i == 0)
{
free(precode.tree);
free(precode.table);
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Internal error extracting RAR file.");
return (ARCHIVE_FATAL);
}
if(val == 16) {
if (!rar_br_read_ahead(a, br, 3)) {
free(precode.tree);
free(precode.table);
goto truncated_data;
}
n = rar_br_bits(br, 3) + 3;
rar_br_consume(br, 3);
} else {
if (!rar_br_read_ahead(a, br, 7)) {
free(precode.tree);
free(precode.table);
goto truncated_data;
}
n = rar_br_bits(br, 7) + 11;
rar_br_consume(br, 7);
}
for (j = 0; j < n && i < HUFFMAN_TABLE_SIZE; j++)
{
rar->lengthtable[i] = rar->lengthtable[i-1];
i++;
}
}
else
{
if(val == 18) {
if (!rar_br_read_ahead(a, br, 3)) {
free(precode.tree);
free(precode.table);
goto truncated_data;
}
n = rar_br_bits(br, 3) + 3;
rar_br_consume(br, 3);
} else {
if (!rar_br_read_ahead(a, br, 7)) {
free(precode.tree);
free(precode.table);
goto truncated_data;
}
n = rar_br_bits(br, 7) + 11;
rar_br_consume(br, 7);
}
for(j = 0; j < n && i < HUFFMAN_TABLE_SIZE; j++)
rar->lengthtable[i++] = 0;
}
}
free(precode.tree);
free(precode.table);
r = create_code(a, &rar->maincode, &rar->lengthtable[0], MAINCODE_SIZE,
MAX_SYMBOL_LENGTH);
if (r != ARCHIVE_OK)
return (r);
r = create_code(a, &rar->offsetcode, &rar->lengthtable[MAINCODE_SIZE],
OFFSETCODE_SIZE, MAX_SYMBOL_LENGTH);
if (r != ARCHIVE_OK)
return (r);
r = create_code(a, &rar->lowoffsetcode,
&rar->lengthtable[MAINCODE_SIZE + OFFSETCODE_SIZE],
LOWOFFSETCODE_SIZE, MAX_SYMBOL_LENGTH);
if (r != ARCHIVE_OK)
return (r);
r = create_code(a, &rar->lengthcode,
&rar->lengthtable[MAINCODE_SIZE + OFFSETCODE_SIZE +
LOWOFFSETCODE_SIZE], LENGTHCODE_SIZE, MAX_SYMBOL_LENGTH);
if (r != ARCHIVE_OK)
return (r);
}
if (!rar->dictionary_size || !rar->lzss.window)
{
/* Seems as though dictionary sizes are not used. Even so, minimize
* memory usage as much as possible.
*/
void *new_window;
unsigned int new_size;
if (rar->unp_size >= DICTIONARY_MAX_SIZE)
new_size = DICTIONARY_MAX_SIZE;
else
new_size = rar_fls((unsigned int)rar->unp_size) << 1;
new_window = realloc(rar->lzss.window, new_size);
if (new_window == NULL) {
archive_set_error(&a->archive, ENOMEM,
"Unable to allocate memory for uncompressed data.");
return (ARCHIVE_FATAL);
}
rar->lzss.window = (unsigned char *)new_window;
rar->dictionary_size = new_size;
memset(rar->lzss.window, 0, rar->dictionary_size);
rar->lzss.mask = rar->dictionary_size - 1;
}
rar->start_new_table = 0;
return (ARCHIVE_OK);
truncated_data:
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Truncated RAR file data");
rar->valid = 0;
return (ARCHIVE_FATAL);
}
| 325,675,593,910,182,270,000,000,000,000,000,000,000 | archive_read_support_format_rar.c | 97,898,541,607,838,170,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2016-4302 | Heap-based buffer overflow in the parse_codes function in archive_read_support_format_rar.c in libarchive before 3.2.1 allows remote attackers to execute arbitrary code via a RAR file with a zero-sized dictionary. | https://nvd.nist.gov/vuln/detail/CVE-2016-4302 |
2,078 | libarchive | a550daeecf6bc689ade371349892ea17b5b97c77 | https://github.com/libarchive/libarchive | https://github.com/libarchive/libarchive/commit/a550daeecf6bc689ade371349892ea17b5b97c77 | Fix libarchive/archive_read_support_format_mtree.c:1388:11: error: array subscript is above array bounds | 1 | parse_device(dev_t *pdev, struct archive *a, char *val)
{
#define MAX_PACK_ARGS 3
unsigned long numbers[MAX_PACK_ARGS];
char *p, *dev;
int argc;
pack_t *pack;
dev_t result;
const char *error = NULL;
memset(pdev, 0, sizeof(*pdev));
if ((dev = strchr(val, ',')) != NULL) {
/*
* Device's major/minor are given in a specified format.
* Decode and pack it accordingly.
*/
*dev++ = '\0';
if ((pack = pack_find(val)) == NULL) {
archive_set_error(a, ARCHIVE_ERRNO_FILE_FORMAT,
"Unknown format `%s'", val);
return ARCHIVE_WARN;
}
argc = 0;
while ((p = la_strsep(&dev, ",")) != NULL) {
if (*p == '\0') {
archive_set_error(a, ARCHIVE_ERRNO_FILE_FORMAT,
"Missing number");
return ARCHIVE_WARN;
}
numbers[argc++] = (unsigned long)mtree_atol(&p);
if (argc > MAX_PACK_ARGS) {
archive_set_error(a, ARCHIVE_ERRNO_FILE_FORMAT,
"Too many arguments");
return ARCHIVE_WARN;
}
}
if (argc < 2) {
archive_set_error(a, ARCHIVE_ERRNO_FILE_FORMAT,
"Not enough arguments");
return ARCHIVE_WARN;
}
result = (*pack)(argc, numbers, &error);
if (error != NULL) {
archive_set_error(a, ARCHIVE_ERRNO_FILE_FORMAT,
"%s", error);
return ARCHIVE_WARN;
}
} else {
/* file system raw value. */
result = (dev_t)mtree_atol(&val);
}
*pdev = result;
return ARCHIVE_OK;
#undef MAX_PACK_ARGS
}
| 140,177,069,250,601,300,000,000,000,000,000,000,000 | archive_read_support_format_mtree.c | 289,329,355,082,132,950,000,000,000,000,000,000,000 | [
"CWE-119"
] | CVE-2016-4301 | Stack-based buffer overflow in the parse_device function in archive_read_support_format_mtree.c in libarchive before 3.2.1 allows remote attackers to execute arbitrary code via a crafted mtree file. | https://nvd.nist.gov/vuln/detail/CVE-2016-4301 |