text
stringlengths 2
100k
| meta
dict |
---|---|
/*
* llc_c_ev.c - Connection component state transition event qualifiers
*
* A 'state' consists of a number of possible event matching functions,
* the actions associated with each being executed when that event is
* matched; a 'state machine' accepts events in a serial fashion from an
* event queue. Each event is passed to each successive event matching
* function until a match is made (the event matching function returns
* success, or '0') or the list of event matching functions is exhausted.
* If a match is made, the actions associated with the event are executed
* and the state is changed to that event's transition state. Before some
* events are recognized, even after a match has been made, a certain
* number of 'event qualifier' functions must also be executed. If these
* all execute successfully, then the event is finally executed.
*
* These event functions must return 0 for success, to show a matched
* event, of 1 if the event does not match. Event qualifier functions
* must return a 0 for success or a non-zero for failure. Each function
* is simply responsible for verifying one single thing and returning
* either a success or failure.
*
* All of followed event functions are described in 802.2 LLC Protocol
* standard document except two functions that we added that will explain
* in their comments, at below.
*
* Copyright (c) 1997 by Procom Technology, Inc.
* 2001-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*
* This program can be redistributed or modified under the terms of the
* GNU General Public License as published by the Free Software Foundation.
* This program is distributed without any warranty or implied warranty
* of merchantability or fitness for a particular purpose.
*
* See the GNU General Public License for more details.
*/
#include <linux/netdevice.h>
#include <net/llc_conn.h>
#include <net/llc_sap.h>
#include <net/sock.h>
#include <net/llc_c_ac.h>
#include <net/llc_c_ev.h>
#include <net/llc_pdu.h>
#if 1
#define dprintk(args...) printk(KERN_DEBUG args)
#else
#define dprintk(args...)
#endif
/**
* llc_util_ns_inside_rx_window - check if sequence number is in rx window
* @ns: sequence number of received pdu.
* @vr: sequence number which receiver expects to receive.
* @rw: receive window size of receiver.
*
* Checks if sequence number of received PDU is in range of receive
* window. Returns 0 for success, 1 otherwise
*/
static u16 llc_util_ns_inside_rx_window(u8 ns, u8 vr, u8 rw)
{
return !llc_circular_between(vr, ns,
(vr + rw - 1) % LLC_2_SEQ_NBR_MODULO);
}
/**
* llc_util_nr_inside_tx_window - check if sequence number is in tx window
* @sk: current connection.
* @nr: N(R) of received PDU.
*
* This routine checks if N(R) of received PDU is in range of transmit
* window; on the other hand checks if received PDU acknowledges some
* outstanding PDUs that are in transmit window. Returns 0 for success, 1
* otherwise.
*/
static u16 llc_util_nr_inside_tx_window(struct sock *sk, u8 nr)
{
u8 nr1, nr2;
struct sk_buff *skb;
struct llc_pdu_sn *pdu;
struct llc_sock *llc = llc_sk(sk);
int rc = 0;
if (llc->dev->flags & IFF_LOOPBACK)
goto out;
rc = 1;
if (skb_queue_empty(&llc->pdu_unack_q))
goto out;
skb = skb_peek(&llc->pdu_unack_q);
pdu = llc_pdu_sn_hdr(skb);
nr1 = LLC_I_GET_NS(pdu);
skb = skb_peek_tail(&llc->pdu_unack_q);
pdu = llc_pdu_sn_hdr(skb);
nr2 = LLC_I_GET_NS(pdu);
rc = !llc_circular_between(nr1, nr, (nr2 + 1) % LLC_2_SEQ_NBR_MODULO);
out:
return rc;
}
int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb)
{
const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
return ev->prim == LLC_CONN_PRIM &&
ev->prim_type == LLC_PRIM_TYPE_REQ ? 0 : 1;
}
int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb)
{
const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
return ev->prim == LLC_DATA_PRIM &&
ev->prim_type == LLC_PRIM_TYPE_REQ ? 0 : 1;
}
int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb)
{
const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
return ev->prim == LLC_DISC_PRIM &&
ev->prim_type == LLC_PRIM_TYPE_REQ ? 0 : 1;
}
int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb)
{
const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
return ev->prim == LLC_RESET_PRIM &&
ev->prim_type == LLC_PRIM_TYPE_REQ ? 0 : 1;
}
int llc_conn_ev_local_busy_detected(struct sock *sk, struct sk_buff *skb)
{
const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
return ev->type == LLC_CONN_EV_TYPE_SIMPLE &&
ev->prim_type == LLC_CONN_EV_LOCAL_BUSY_DETECTED ? 0 : 1;
}
int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb)
{
const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
return ev->type == LLC_CONN_EV_TYPE_SIMPLE &&
ev->prim_type == LLC_CONN_EV_LOCAL_BUSY_CLEARED ? 0 : 1;
}
int llc_conn_ev_rx_bad_pdu(struct sock *sk, struct sk_buff *skb)
{
return 1;
}
int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb)
{
const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_U(pdu) &&
LLC_U_PDU_CMD(pdu) == LLC_2_PDU_CMD_DISC ? 0 : 1;
}
int llc_conn_ev_rx_dm_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb)
{
const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_U(pdu) &&
LLC_U_PDU_RSP(pdu) == LLC_2_PDU_RSP_DM ? 0 : 1;
}
int llc_conn_ev_rx_frmr_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb)
{
const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_U(pdu) &&
LLC_U_PDU_RSP(pdu) == LLC_2_PDU_RSP_FRMR ? 0 : 1;
}
int llc_conn_ev_rx_i_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb)
{
const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
return llc_conn_space(sk, skb) &&
LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_I(pdu) &&
LLC_I_PF_IS_0(pdu) &&
LLC_I_GET_NS(pdu) == llc_sk(sk)->vR ? 0 : 1;
}
int llc_conn_ev_rx_i_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb)
{
const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
return llc_conn_space(sk, skb) &&
LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_I(pdu) &&
LLC_I_PF_IS_1(pdu) &&
LLC_I_GET_NS(pdu) == llc_sk(sk)->vR ? 0 : 1;
}
int llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns(struct sock *sk,
struct sk_buff *skb)
{
const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
const u8 vr = llc_sk(sk)->vR;
const u8 ns = LLC_I_GET_NS(pdu);
return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_I(pdu) &&
LLC_I_PF_IS_0(pdu) && ns != vr &&
!llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1;
}
int llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns(struct sock *sk,
struct sk_buff *skb)
{
const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
const u8 vr = llc_sk(sk)->vR;
const u8 ns = LLC_I_GET_NS(pdu);
return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_I(pdu) &&
LLC_I_PF_IS_1(pdu) && ns != vr &&
!llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1;
}
int llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns(struct sock *sk,
struct sk_buff *skb)
{
const struct llc_pdu_sn * pdu = llc_pdu_sn_hdr(skb);
const u8 vr = llc_sk(sk)->vR;
const u8 ns = LLC_I_GET_NS(pdu);
const u16 rc = LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_I(pdu) &&
ns != vr &&
llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1;
if (!rc)
dprintk("%s: matched, state=%d, ns=%d, vr=%d\n",
__func__, llc_sk(sk)->state, ns, vr);
return rc;
}
int llc_conn_ev_rx_i_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb)
{
const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
return llc_conn_space(sk, skb) &&
LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) &&
LLC_I_PF_IS_0(pdu) &&
LLC_I_GET_NS(pdu) == llc_sk(sk)->vR ? 0 : 1;
}
int llc_conn_ev_rx_i_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb)
{
const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) &&
LLC_I_PF_IS_1(pdu) &&
LLC_I_GET_NS(pdu) == llc_sk(sk)->vR ? 0 : 1;
}
int llc_conn_ev_rx_i_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb)
{
const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
return llc_conn_space(sk, skb) &&
LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) &&
LLC_I_GET_NS(pdu) == llc_sk(sk)->vR ? 0 : 1;
}
int llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns(struct sock *sk,
struct sk_buff *skb)
{
const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
const u8 vr = llc_sk(sk)->vR;
const u8 ns = LLC_I_GET_NS(pdu);
return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) &&
LLC_I_PF_IS_0(pdu) && ns != vr &&
!llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1;
}
int llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns(struct sock *sk,
struct sk_buff *skb)
{
const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
const u8 vr = llc_sk(sk)->vR;
const u8 ns = LLC_I_GET_NS(pdu);
return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) &&
LLC_I_PF_IS_1(pdu) && ns != vr &&
!llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1;
}
int llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns(struct sock *sk,
struct sk_buff *skb)
{
const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
const u8 vr = llc_sk(sk)->vR;
const u8 ns = LLC_I_GET_NS(pdu);
return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) && ns != vr &&
!llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1;
}
int llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns(struct sock *sk,
struct sk_buff *skb)
{
const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
const u8 vr = llc_sk(sk)->vR;
const u8 ns = LLC_I_GET_NS(pdu);
const u16 rc = LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) &&
ns != vr &&
llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1;
if (!rc)
dprintk("%s: matched, state=%d, ns=%d, vr=%d\n",
__func__, llc_sk(sk)->state, ns, vr);
return rc;
}
int llc_conn_ev_rx_rej_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb)
{
const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_S(pdu) &&
LLC_S_PF_IS_0(pdu) &&
LLC_S_PDU_CMD(pdu) == LLC_2_PDU_CMD_REJ ? 0 : 1;
}
int llc_conn_ev_rx_rej_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb)
{
const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_S(pdu) &&
LLC_S_PF_IS_1(pdu) &&
LLC_S_PDU_CMD(pdu) == LLC_2_PDU_CMD_REJ ? 0 : 1;
}
int llc_conn_ev_rx_rej_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb)
{
const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_S(pdu) &&
LLC_S_PF_IS_0(pdu) &&
LLC_S_PDU_RSP(pdu) == LLC_2_PDU_RSP_REJ ? 0 : 1;
}
int llc_conn_ev_rx_rej_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb)
{
const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_S(pdu) &&
LLC_S_PF_IS_1(pdu) &&
LLC_S_PDU_RSP(pdu) == LLC_2_PDU_RSP_REJ ? 0 : 1;
}
int llc_conn_ev_rx_rej_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb)
{
const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_S(pdu) &&
LLC_S_PDU_RSP(pdu) == LLC_2_PDU_RSP_REJ ? 0 : 1;
}
int llc_conn_ev_rx_rnr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb)
{
const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_S(pdu) &&
LLC_S_PF_IS_0(pdu) &&
LLC_S_PDU_CMD(pdu) == LLC_2_PDU_CMD_RNR ? 0 : 1;
}
int llc_conn_ev_rx_rnr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb)
{
const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_S(pdu) &&
LLC_S_PF_IS_1(pdu) &&
LLC_S_PDU_CMD(pdu) == LLC_2_PDU_CMD_RNR ? 0 : 1;
}
int llc_conn_ev_rx_rnr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb)
{
const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_S(pdu) &&
LLC_S_PF_IS_0(pdu) &&
LLC_S_PDU_RSP(pdu) == LLC_2_PDU_RSP_RNR ? 0 : 1;
}
int llc_conn_ev_rx_rnr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb)
{
const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_S(pdu) &&
LLC_S_PF_IS_1(pdu) &&
LLC_S_PDU_RSP(pdu) == LLC_2_PDU_RSP_RNR ? 0 : 1;
}
int llc_conn_ev_rx_rr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb)
{
const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_S(pdu) &&
LLC_S_PF_IS_0(pdu) &&
LLC_S_PDU_CMD(pdu) == LLC_2_PDU_CMD_RR ? 0 : 1;
}
int llc_conn_ev_rx_rr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb)
{
const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_S(pdu) &&
LLC_S_PF_IS_1(pdu) &&
LLC_S_PDU_CMD(pdu) == LLC_2_PDU_CMD_RR ? 0 : 1;
}
int llc_conn_ev_rx_rr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb)
{
const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
return llc_conn_space(sk, skb) &&
LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_S(pdu) &&
LLC_S_PF_IS_0(pdu) &&
LLC_S_PDU_RSP(pdu) == LLC_2_PDU_RSP_RR ? 0 : 1;
}
int llc_conn_ev_rx_rr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb)
{
const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
return llc_conn_space(sk, skb) &&
LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_S(pdu) &&
LLC_S_PF_IS_1(pdu) &&
LLC_S_PDU_RSP(pdu) == LLC_2_PDU_RSP_RR ? 0 : 1;
}
int llc_conn_ev_rx_sabme_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb)
{
const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_U(pdu) &&
LLC_U_PDU_CMD(pdu) == LLC_2_PDU_CMD_SABME ? 0 : 1;
}
int llc_conn_ev_rx_ua_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb)
{
struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_U(pdu) &&
LLC_U_PDU_RSP(pdu) == LLC_2_PDU_RSP_UA ? 0 : 1;
}
int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb)
{
u16 rc = 1;
const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
if (LLC_PDU_IS_CMD(pdu)) {
if (LLC_PDU_TYPE_IS_I(pdu) || LLC_PDU_TYPE_IS_S(pdu)) {
if (LLC_I_PF_IS_1(pdu))
rc = 0;
} else if (LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PF_IS_1(pdu))
rc = 0;
}
return rc;
}
int llc_conn_ev_rx_xxx_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb)
{
u16 rc = 1;
const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
if (LLC_PDU_IS_CMD(pdu)) {
if (LLC_PDU_TYPE_IS_I(pdu) || LLC_PDU_TYPE_IS_S(pdu))
rc = 0;
else if (LLC_PDU_TYPE_IS_U(pdu))
switch (LLC_U_PDU_CMD(pdu)) {
case LLC_2_PDU_CMD_SABME:
case LLC_2_PDU_CMD_DISC:
rc = 0;
break;
}
}
return rc;
}
int llc_conn_ev_rx_xxx_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb)
{
u16 rc = 1;
const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
if (LLC_PDU_IS_RSP(pdu)) {
if (LLC_PDU_TYPE_IS_I(pdu) || LLC_PDU_TYPE_IS_S(pdu))
rc = 0;
else if (LLC_PDU_TYPE_IS_U(pdu))
switch (LLC_U_PDU_RSP(pdu)) {
case LLC_2_PDU_RSP_UA:
case LLC_2_PDU_RSP_DM:
case LLC_2_PDU_RSP_FRMR:
rc = 0;
break;
}
}
return rc;
}
int llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr(struct sock *sk,
struct sk_buff *skb)
{
u16 rc = 1;
const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
const u8 vs = llc_sk(sk)->vS;
const u8 nr = LLC_I_GET_NR(pdu);
if (LLC_PDU_IS_CMD(pdu) &&
(LLC_PDU_TYPE_IS_I(pdu) || LLC_PDU_TYPE_IS_S(pdu)) &&
nr != vs && llc_util_nr_inside_tx_window(sk, nr)) {
dprintk("%s: matched, state=%d, vs=%d, nr=%d\n",
__func__, llc_sk(sk)->state, vs, nr);
rc = 0;
}
return rc;
}
int llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr(struct sock *sk,
struct sk_buff *skb)
{
u16 rc = 1;
const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
const u8 vs = llc_sk(sk)->vS;
const u8 nr = LLC_I_GET_NR(pdu);
if (LLC_PDU_IS_RSP(pdu) &&
(LLC_PDU_TYPE_IS_I(pdu) || LLC_PDU_TYPE_IS_S(pdu)) &&
nr != vs && llc_util_nr_inside_tx_window(sk, nr)) {
rc = 0;
dprintk("%s: matched, state=%d, vs=%d, nr=%d\n",
__func__, llc_sk(sk)->state, vs, nr);
}
return rc;
}
int llc_conn_ev_rx_any_frame(struct sock *sk, struct sk_buff *skb)
{
return 0;
}
int llc_conn_ev_p_tmr_exp(struct sock *sk, struct sk_buff *skb)
{
const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
return ev->type != LLC_CONN_EV_TYPE_P_TMR;
}
int llc_conn_ev_ack_tmr_exp(struct sock *sk, struct sk_buff *skb)
{
const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
return ev->type != LLC_CONN_EV_TYPE_ACK_TMR;
}
int llc_conn_ev_rej_tmr_exp(struct sock *sk, struct sk_buff *skb)
{
const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
return ev->type != LLC_CONN_EV_TYPE_REJ_TMR;
}
int llc_conn_ev_busy_tmr_exp(struct sock *sk, struct sk_buff *skb)
{
const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
return ev->type != LLC_CONN_EV_TYPE_BUSY_TMR;
}
int llc_conn_ev_init_p_f_cycle(struct sock *sk, struct sk_buff *skb)
{
return 1;
}
int llc_conn_ev_tx_buffer_full(struct sock *sk, struct sk_buff *skb)
{
const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
return ev->type == LLC_CONN_EV_TYPE_SIMPLE &&
ev->prim_type == LLC_CONN_EV_TX_BUFF_FULL ? 0 : 1;
}
/* Event qualifier functions
*
* these functions simply verify the value of a state flag associated with
* the connection and return either a 0 for success or a non-zero value
* for not-success; verify the event is the type we expect
*/
int llc_conn_ev_qlfy_data_flag_eq_1(struct sock *sk, struct sk_buff *skb)
{
return llc_sk(sk)->data_flag != 1;
}
int llc_conn_ev_qlfy_data_flag_eq_0(struct sock *sk, struct sk_buff *skb)
{
return llc_sk(sk)->data_flag;
}
int llc_conn_ev_qlfy_data_flag_eq_2(struct sock *sk, struct sk_buff *skb)
{
return llc_sk(sk)->data_flag != 2;
}
int llc_conn_ev_qlfy_p_flag_eq_1(struct sock *sk, struct sk_buff *skb)
{
return llc_sk(sk)->p_flag != 1;
}
/**
* conn_ev_qlfy_last_frame_eq_1 - checks if frame is last in tx window
* @sk: current connection structure.
* @skb: current event.
*
* This function determines when frame which is sent, is last frame of
* transmit window, if it is then this function return zero else return
* one. This function is used for sending last frame of transmit window
* as I-format command with p-bit set to one. Returns 0 if frame is last
* frame, 1 otherwise.
*/
int llc_conn_ev_qlfy_last_frame_eq_1(struct sock *sk, struct sk_buff *skb)
{
return !(skb_queue_len(&llc_sk(sk)->pdu_unack_q) + 1 == llc_sk(sk)->k);
}
/**
* conn_ev_qlfy_last_frame_eq_0 - checks if frame isn't last in tx window
* @sk: current connection structure.
* @skb: current event.
*
* This function determines when frame which is sent, isn't last frame of
* transmit window, if it isn't then this function return zero else return
* one. Returns 0 if frame isn't last frame, 1 otherwise.
*/
int llc_conn_ev_qlfy_last_frame_eq_0(struct sock *sk, struct sk_buff *skb)
{
return skb_queue_len(&llc_sk(sk)->pdu_unack_q) + 1 == llc_sk(sk)->k;
}
int llc_conn_ev_qlfy_p_flag_eq_0(struct sock *sk, struct sk_buff *skb)
{
return llc_sk(sk)->p_flag;
}
int llc_conn_ev_qlfy_p_flag_eq_f(struct sock *sk, struct sk_buff *skb)
{
u8 f_bit;
llc_pdu_decode_pf_bit(skb, &f_bit);
return llc_sk(sk)->p_flag == f_bit ? 0 : 1;
}
int llc_conn_ev_qlfy_remote_busy_eq_0(struct sock *sk, struct sk_buff *skb)
{
return llc_sk(sk)->remote_busy_flag;
}
int llc_conn_ev_qlfy_remote_busy_eq_1(struct sock *sk, struct sk_buff *skb)
{
return !llc_sk(sk)->remote_busy_flag;
}
int llc_conn_ev_qlfy_retry_cnt_lt_n2(struct sock *sk, struct sk_buff *skb)
{
return !(llc_sk(sk)->retry_count < llc_sk(sk)->n2);
}
int llc_conn_ev_qlfy_retry_cnt_gte_n2(struct sock *sk, struct sk_buff *skb)
{
return !(llc_sk(sk)->retry_count >= llc_sk(sk)->n2);
}
int llc_conn_ev_qlfy_s_flag_eq_1(struct sock *sk, struct sk_buff *skb)
{
return !llc_sk(sk)->s_flag;
}
int llc_conn_ev_qlfy_s_flag_eq_0(struct sock *sk, struct sk_buff *skb)
{
return llc_sk(sk)->s_flag;
}
int llc_conn_ev_qlfy_cause_flag_eq_1(struct sock *sk, struct sk_buff *skb)
{
return !llc_sk(sk)->cause_flag;
}
int llc_conn_ev_qlfy_cause_flag_eq_0(struct sock *sk, struct sk_buff *skb)
{
return llc_sk(sk)->cause_flag;
}
int llc_conn_ev_qlfy_set_status_conn(struct sock *sk, struct sk_buff *skb)
{
struct llc_conn_state_ev *ev = llc_conn_ev(skb);
ev->status = LLC_STATUS_CONN;
return 0;
}
int llc_conn_ev_qlfy_set_status_disc(struct sock *sk, struct sk_buff *skb)
{
struct llc_conn_state_ev *ev = llc_conn_ev(skb);
ev->status = LLC_STATUS_DISC;
return 0;
}
int llc_conn_ev_qlfy_set_status_failed(struct sock *sk, struct sk_buff *skb)
{
struct llc_conn_state_ev *ev = llc_conn_ev(skb);
ev->status = LLC_STATUS_FAILED;
return 0;
}
int llc_conn_ev_qlfy_set_status_remote_busy(struct sock *sk,
struct sk_buff *skb)
{
struct llc_conn_state_ev *ev = llc_conn_ev(skb);
ev->status = LLC_STATUS_REMOTE_BUSY;
return 0;
}
int llc_conn_ev_qlfy_set_status_refuse(struct sock *sk, struct sk_buff *skb)
{
struct llc_conn_state_ev *ev = llc_conn_ev(skb);
ev->status = LLC_STATUS_REFUSE;
return 0;
}
int llc_conn_ev_qlfy_set_status_conflict(struct sock *sk, struct sk_buff *skb)
{
struct llc_conn_state_ev *ev = llc_conn_ev(skb);
ev->status = LLC_STATUS_CONFLICT;
return 0;
}
int llc_conn_ev_qlfy_set_status_rst_done(struct sock *sk, struct sk_buff *skb)
{
struct llc_conn_state_ev *ev = llc_conn_ev(skb);
ev->status = LLC_STATUS_RESET_DONE;
return 0;
}
| {
"language": "C"
} |
/*******************************************************************************
* File Name: cps.h
*
* Version 1.0
*
* Description:
* Contains the function prototypes and constants available to the example
* project.
*
********************************************************************************
* Copyright 2015, Cypress Semiconductor Corporation. All rights reserved.
* You may use this file only in accordance with the license, terms, conditions,
* disclaimers, and limitations in the end user license agreement accompanying
* the software package with which this file was provided.
*******************************************************************************/
#include <project.h>
/***************************************
* Data Struct Definition
***************************************/
CYBLE_CYPACKED typedef struct
{
uint16 flags; /* Mandatory */
int16 instantaneousPower; /* Mandatory, Unit is in watts with a resolution of 1 */
uint32 accumulatedTorque; /* Unit is in newton meters with a resolution of 1/32, send only low 2 bytes */
uint32 cumulativeWheelRevolutions; /* When present, these fields are always present as a pair */
uint16 lastWheelEventTime; /* Unit is in seconds with a resolution of 1/2048 */
uint32 accumulatedEnergy; /* Unit is in kilojoules with a resolution of 1, send only low 2 bytes */
}CYBLE_CYPACKED_ATTR CYBLE_CPS_POWER_MEASURE_T;
CYBLE_CYPACKED typedef struct
{
uint8 flags; /* Mandatory */
uint16 cumulativeCrankRevolutions;
uint16 lastCrankEventTime; /* Unit is in seconds with a resolution of 1/1024 */
}CYBLE_CYPACKED_ATTR CYBLE_CPS_POWER_VECTOP_T;
/***************************************
* Constants
***************************************/
#define CYBLE_CPS_CP_RESP_LENGTH (0u)
#define CYBLE_CPS_CP_RESP_OP_CODES (1u)
#define CYBLE_CPS_CP_RESP_REQUEST_OP_CODE (2u)
#define CYBLE_CPS_CP_RESP_VALUE (3u)
#define CYBLE_CPS_CP_RESP_PARAMETER (4u)
#define CPS_POWER_MEASURE_DATA_MAX_SIZE (35u)
#define CPS_POWER_VECTOR_DATA_MAX_SIZE (12u)
#define CPS_SIMULATION_DISABLE (0u)
#define CPS_NOTIFICATION_MEASURE_ENABLE (1u)
#define CPS_NOTIFICATION_VECTOR_ENABLE (2u)
#define CPS_INDICATION_ENABLE (4u)
#define CPS_BROADCAST_ENABLE (8u)
#define CPS_SIM_TORQUE_INIT (0xFDC0u) /* Start value for rollover simulation */
#define CPS_SIM_TORQUE_INCREMENT (32u*10u) /* Value by which the torque is incremented - 10 Nm */
#define CPS_WHEEL_EVENT_TIME_PER_SEC (2048u) /* Unit is in seconds with a resolution of 1/2048 */
#define CPS_SIM_WHEEL_EVENT_TIME_INIT (63000u) /* Start value for rollover simulation */
#define CPS_SIM_WHEEL_EVENT_TIME_INCREMENT (2048u) /* Value by which the torque is incremented - 1 sec */
#define CPS_SEC_IN_HOUR (3600u) /* To convert speed to km per hour */
#define CPS_SIM_CUMULATIVE_WHEEL_REVOLUTION_INIT (1000u) /* Start value for Cumulative Wheel Revolution */
#define CPS_SIM_CUMULATIVE_WHEEL_REVOLUTION_INCREMENT (8u) /* Value by which the torque is incremented - 1 sec */
#define CPS_WHEEL_CIRCUMFERENCE (0.0021f) /* km */
#define CPS_SIM_ACCUMULATED_ENERGY_INIT (65532u) /* Start value for Accumulated Energy Value kJ */
#define CPS_SIM_ACCUMULATED_ENERGY_INCREMENT (2u) /* Value by which the energy is incremented - 2 kJ */
#define CPS_SIM_POWER_INIT (200u) /* Start value for instantaneous power in W */
#define CPS_CRANK_EVENT_TIME_PER_SEC (1024u) /* Unit is in seconds with a resolution of 1/1024 */
#define CPS_SIM_CRANK_EVENT_TIME_INIT (9300u) /* Start value last crank event time */
#define CPS_SIM_CRANK_EVENT_TIME_INCREMENT (1024u) /* Value by which the torque is incremented - 1 sec */
#define CPS_SIM_CUMULATIVE_CRANK_REVOLUTION_INIT (65470u) /* Start value for Cumulative Crank Revolution */
#define CPS_SIM_CUMULATIVE_CRANK_REVOLUTION_INCREMENT (60u) /* Value by which the torque is incremented */
/***************************************
* Function Prototypes
***************************************/
void CpsCallback(uint32 event, void *eventParam);
void CpsInit(void);
void SimulateCyclingPower(void);
/***************************************
* External data references
***************************************/
extern uint16 powerSimulation;
extern CYBLE_CPS_POWER_MEASURE_T powerMeasure;
/* [] END OF FILE */
| {
"language": "C"
} |
#if defined(HAVE_BYTESWAP_H) && HAVE_BYTESWAP_H
#include_next <byteswap.h>
#else
#include <inttypes.h>
/* minimal version defining only the macros we need in our codebase */
static inline uint16_t __bswap_16(uint16_t u)
{
return (uint16_t) __builtin_bswap16(u);
}
static inline uint32_t __bswap_32(uint32_t u)
{
return (uint32_t) __builtin_bswap32(u);
}
static inline uint64_t __bswap_64(uint64_t u)
{
return (uint64_t) __builtin_bswap64(u);
}
#endif
| {
"language": "C"
} |
/*
Simple DirectMedia Layer
Copyright (C) 1997-2016 Sam Lantinga <slouken@libsdl.org>
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#include "../../SDL_internal.h"
#include <sys/time.h>
#include <time.h>
#include <unistd.h>
#include <errno.h>
#include <pthread.h>
#include "SDL_thread.h"
#include "SDL_sysmutex_c.h"
struct SDL_cond
{
pthread_cond_t cond;
};
/* Create a condition variable */
SDL_cond *
SDL_CreateCond(void)
{
SDL_cond *cond;
cond = (SDL_cond *) SDL_malloc(sizeof(SDL_cond));
if (cond) {
if (pthread_cond_init(&cond->cond, NULL) < 0) {
SDL_SetError("pthread_cond_init() failed");
SDL_free(cond);
cond = NULL;
}
}
return (cond);
}
/* Destroy a condition variable */
void
SDL_DestroyCond(SDL_cond * cond)
{
if (cond) {
pthread_cond_destroy(&cond->cond);
SDL_free(cond);
}
}
/* Restart one of the threads that are waiting on the condition variable */
int
SDL_CondSignal(SDL_cond * cond)
{
int retval;
if (!cond) {
return SDL_SetError("Passed a NULL condition variable");
}
retval = 0;
if (pthread_cond_signal(&cond->cond) != 0) {
return SDL_SetError("pthread_cond_signal() failed");
}
return retval;
}
/* Restart all threads that are waiting on the condition variable */
int
SDL_CondBroadcast(SDL_cond * cond)
{
int retval;
if (!cond) {
return SDL_SetError("Passed a NULL condition variable");
}
retval = 0;
if (pthread_cond_broadcast(&cond->cond) != 0) {
return SDL_SetError("pthread_cond_broadcast() failed");
}
return retval;
}
int
SDL_CondWaitTimeout(SDL_cond * cond, SDL_mutex * mutex, Uint32 ms)
{
int retval;
#ifndef HAVE_CLOCK_GETTIME
struct timeval delta;
#endif
struct timespec abstime;
if (!cond) {
return SDL_SetError("Passed a NULL condition variable");
}
#ifdef HAVE_CLOCK_GETTIME
clock_gettime(CLOCK_REALTIME, &abstime);
abstime.tv_nsec += (ms % 1000) * 1000000;
abstime.tv_sec += ms / 1000;
#else
gettimeofday(&delta, NULL);
abstime.tv_sec = delta.tv_sec + (ms / 1000);
abstime.tv_nsec = (delta.tv_usec + (ms % 1000) * 1000) * 1000;
#endif
if (abstime.tv_nsec > 1000000000) {
abstime.tv_sec += 1;
abstime.tv_nsec -= 1000000000;
}
tryagain:
retval = pthread_cond_timedwait(&cond->cond, &mutex->id, &abstime);
switch (retval) {
case EINTR:
goto tryagain;
break;
case ETIMEDOUT:
retval = SDL_MUTEX_TIMEDOUT;
break;
case 0:
break;
default:
retval = SDL_SetError("pthread_cond_timedwait() failed");
}
return retval;
}
/* Wait on the condition variable, unlocking the provided mutex.
The mutex must be locked before entering this function!
*/
int
SDL_CondWait(SDL_cond * cond, SDL_mutex * mutex)
{
if (!cond) {
return SDL_SetError("Passed a NULL condition variable");
} else if (pthread_cond_wait(&cond->cond, &mutex->id) != 0) {
return SDL_SetError("pthread_cond_wait() failed");
}
return 0;
}
/* vi: set ts=4 sw=4 expandtab: */
| {
"language": "C"
} |
/*
* Copyright 2007-2008 Peter Hutterer
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Peter Hutterer, University of South Australia, NICTA
*/
#ifdef HAVE_DIX_CONFIG_H
#include <dix-config.h>
#endif
#ifndef SETCPTR_H
#define SETCPTR_H 1
int SProcXISetClientPointer(ClientPtr /* client */ );
int ProcXISetClientPointer(ClientPtr /* client */ );
#endif /* SETCPTR_H */
| {
"language": "C"
} |
/* gopt.h PUBILC DOMAIN 2015 t.gopt@purposeful.co.uk */
/* <http:///www.purposeful.co.uk/software/gopt> */
/*
I, Tom Vajzovic, am the author of this software and its documentation.
I permanently abandon all intellectual property rights in them, including
copyright, trademarks, design rights, database right, patents, and the right
to be identified as the author.
I am fairly certain that the software does what the documentation says it
does, but I do not guarantee that it does, or that it does what you think it
should. I do not guarantee that it will not have undesirable side effects.
You are free to use, modify and distribute this software as you please, but
you do so at your own risk. If you do not pass on this warning then you may
be responsible for any problems encountered by those who obtain the software
through you.
*/
#ifndef GOPT_H_INCLUDED
#define GOPT_H_INCLUDED
#ifdef __cplusplus
extern "C" {
#endif
struct option
{
/* input: */
char short_name;
const char *long_name;
/* input/output: */
unsigned int flags;
/* output: */
unsigned int count;
char *argument;
};
/* values for flags: */
#define GOPT_ARGUMENT_OPTIONAL 0x000u /* option may or may not have an option-argument */
#define GOPT_ARGUMENT_FORBIDDEN 0x001u /* option must not have an option-argument */
#define GOPT_ARGUMENT_REQUIRED 0x002u /* option must have an option-argument */
#define GOPT_ARGUMENT_NO_HYPHEN 0x004u /* option-argument may not start with hyphen/minus */
#define GOPT_REPEATABLE 0x008u /* option may be specified more than once */
#define GOPT_SEEN_SHORT_WITHOUT 0x010u /* short form of option was present without an option-argument in argv */
#define GOPT_SEEN_SHORT_WITH 0x020u /* short form of option was present with an option-argument in argv */
#define GOPT_SEEN_LONG_WITHOUT 0x040u /* long form of option was present without an option-argument in argv */
#define GOPT_SEEN_LONG_WITH 0x080u /* long form of option was present with an option-argument in argv */
#define GOPT_LAST 0x100u /* this is the last element of the array */
int gopt (char **argv, struct option *options);
/*
The function gopt() takes an argument vector argv, which has the same
form as the second argument to main(). It removes from the vector all
options and option-arguments, leaving only the program name, the
operands and a null pointer at the end. It returns the updated
argument count argc. It does not need to know the initial value of
argc because the input vector must be null-terminated.
It also takes an array of struct option, the elements of which are
used for specifying the options which are to be recognized and for
returning details of which options were present in the argument
vector.
The members of struct option are used as follows:
On input, short_name is the single character that follows "-" to make
a short option, or zero if there is no short form of this option.
On input, long_name is the name of the long option excluding the "--",
or NULL if there is no long form of this option.
On return short_name and long_name are unaltered.
count may be uninitialized on input. On return it is set to the
number of times the option was specified. If the option is not
present, this is zero. If an option has both a short and long name,
this is the sum of counts for both forms.
argument may be uninitialized on input. On return it is set to the
last option-argument specified to this option, or NULL if no
option-argument was specified with the last occurrence of this
option, or the option was never specified.
flags is the bitwise-or of certain constants. On input flags must
contain exactly one of:
GOPT_ARGUMENT_FORBIDDEN if the option does not take an option-argument
GOPT_ARGUMENT_REQUIRED if it always requires an option-argument
GOPT_ARGUMENT_OPTIONAL if it may or may not take an option-argument
If flags contains GOPT_ARGUMENT_REQUIRED, it may also contain
GOPT_ARGUMENT_NO_HYPHEN. If the option argument is not allowed to
start with a '-', but the next member of the argument vector does
start with one where an option argument was expected, then this flag
causes it to be interpreted as another option, leaving the argument
to the preceding option NULL. The application can then detect the
NULL and give an error message like "missing option argument" which
may be preferable to treating the next program argument as an option
argument and then giving a less helpful message when the option
argument is found to be invalid.
flags may also optionally contain GOPT_REPEATABLE. This is ignored
by the option parsing code, but may be used when checking the value
returned in count, where if this flag is specified then the
application will presumably report an error if count is greater than
one.
On return, the initial value of flags is still present, and
additionally GOPT_SEEN_SHORT_WITH and/or GOPT_SEEN_SHORT_WITHOUT are
set if the short name was used at least once with or without an
option-argument, respectively, and GOPT_SEEN_LONG_WITH and/or
GOPT_SEEN_LONG_WITHOUT are set if the long name was used.
On input there must be zero or more elements in the options array
initialized as above. These must be followed by one element where
flags is exactly GOPT_LAST. All other members of this element may be
uninitialized on input. On return the count member of this element
will be set to the total number of unrecognized options present.
The short_name member will be set to the first unrecognized short
option, or zero if there were none. The long_name will be set to
point to the start of the name of the first unrecognized long option,
or NULL if there were none. For completeness the argument member
will be set to the option argument of the last unrecognized option
(the same as with other options) but this is probably useless to the
application.
*/
void gopt_errors (const char *argv0, const struct option *options);
/*
The function gopt_errors() examines the array of struct option
after it has been filled out by gopt(). If the options specified
were invalid it prints an error message starting with argv0 and
terminates the program.
*/
#ifdef __cplusplus
}
#endif
#endif /* GOPT_H_INCLUDED */
| {
"language": "C"
} |
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <math.h>
#include <pthread.h>
#include <sys/mman.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
volatile uint16_t *rx_cntr;
volatile float *rx_data;
int sock_thread = -1;
uint32_t rate_thread = 5;
uint32_t size_thread = 6000;
void *read_handler(void *arg);
int main(int argc, char *argv[])
{
int fd, sock_server, sock_client;
struct sched_param param;
pthread_attr_t attr;
pthread_t thread;
volatile void *cfg, *sts;
volatile uint32_t *rx_freq, *rx_size, *tx_phase[2];
volatile int16_t *tx_level[2];
volatile uint8_t *rst, *gpio;
struct sockaddr_in addr;
uint32_t command, freq, rate, size, i;
int32_t value, corr;
int64_t start, stop;
int yes = 1;
if((fd = open("/dev/mem", O_RDWR)) < 0)
{
perror("open");
return EXIT_FAILURE;
}
sts = mmap(NULL, sysconf(_SC_PAGESIZE), PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0x40000000);
cfg = mmap(NULL, sysconf(_SC_PAGESIZE), PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0x40001000);
rx_data = mmap(NULL, 2*sysconf(_SC_PAGESIZE), PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0x40002000);
rx_freq = mmap(NULL, 32*sysconf(_SC_PAGESIZE), PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0x40020000);
rx_cntr = ((uint16_t *)(sts + 12));
rst = ((uint8_t *)(cfg + 0));
gpio = ((uint8_t *)(cfg + 1));
rx_size = ((uint32_t *)(cfg + 4));
tx_phase[0] = ((uint32_t *)(cfg + 8));
tx_phase[1] = ((uint32_t *)(cfg + 12));
tx_level[0] = ((int16_t *)(cfg + 16));
tx_level[1] = ((int16_t *)(cfg + 18));
*tx_phase[0] = 0;
*tx_phase[1] = 0;
*tx_level[0] = 32766;
*tx_level[1] = 0;
*rx_size = 25000 - 1;
start = 10000;
stop = 60000000;
size = 6000;
rate = 5;
corr = 0;
*rst &= ~3;
*rst |= 4;
*gpio = 0;
if((sock_server = socket(AF_INET, SOCK_STREAM, 0)) < 0)
{
perror("socket");
return EXIT_FAILURE;
}
setsockopt(sock_server, SOL_SOCKET, SO_REUSEADDR, (void *)&yes , sizeof(yes));
/* setup listening address */
memset(&addr, 0, sizeof(addr));
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = htonl(INADDR_ANY);
addr.sin_port = htons(1001);
if(bind(sock_server, (struct sockaddr *)&addr, sizeof(addr)) < 0)
{
perror("bind");
return EXIT_FAILURE;
}
listen(sock_server, 1024);
pthread_attr_init(&attr);
pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
pthread_attr_setschedpolicy(&attr, SCHED_FIFO);
param.sched_priority = 99;
pthread_attr_setschedparam(&attr, ¶m);
while(1)
{
if((sock_client = accept(sock_server, NULL, NULL)) < 0)
{
perror("accept");
return EXIT_FAILURE;
}
while(1)
{
if(recv(sock_client, (char *)&command, 4, MSG_WAITALL) <= 0) break;
value = command & 0xfffffff;
switch(command >> 28)
{
case 0:
/* set start */
if(value < 0 || value > 62500000) continue;
start = value;
break;
case 1:
/* set stop */
if(value < 0 || value > 62500000) continue;
stop = value;
break;
case 2:
/* set size */
if(value < 1 || value > 32768) continue;
size = value;
break;
case 3:
/* set rate */
if(value < 5 || value > 50000) continue;
rate = value;
*rx_size = 2500 * (rate + 5) - 1;
break;
case 4:
/* set correction */
if(value < -100000 || value > 100000) continue;
corr = value;
break;
case 5:
/* set phase */
if(value < 0 || value > 360) continue;
*tx_phase[0] = (uint32_t)floor(value / 360.0 * (1<<30) + 0.5);
break;
case 6:
/* set phase */
if(value < 0 || value > 360) continue;
*tx_phase[1] = (uint32_t)floor(value / 360.0 * (1<<30) + 0.5);
break;
case 7:
/* set level */
if(value < -32766 || value > 32766) continue;
*tx_level[0] = value;
break;
case 8:
/* set level */
if(value < -32766 || value > 32766) continue;
*tx_level[1] = value;
break;
case 9:
/* set gpio */
if(value < 0 || value > 255) continue;
*gpio = value;
break;
case 10:
/* sweep */
*rst &= ~3;
*rst |= 4;
*rst &= ~4;
*rst |= 2;
rate_thread = rate;
size_thread = size;
sock_thread = sock_client;
if(pthread_create(&thread, &attr, read_handler, NULL) < 0)
{
perror("pthread_create");
return EXIT_FAILURE;
}
pthread_detach(thread);
for(i = 0; i < size; ++i)
{
freq = start + (stop - start) * i / (size - 1);
freq *= (1.0 + 1.0e-9 * corr);
*rx_freq = (uint32_t)floor(freq / 125.0e6 * (1<<30) + 0.5);
}
*rst |= 1;
break;
case 11:
/* cancel */
*rst &= ~3;
*rst |= 4;
sock_thread = -1;
break;
}
}
*rst &= ~3;
*rst |= 4;
*gpio = 0;
sock_thread = -1;
close(sock_client);
}
close(sock_server);
return EXIT_SUCCESS;
}
void *read_handler(void *arg)
{
uint32_t i, j, cntr;
uint32_t rate = rate_thread;
uint32_t size = size_thread;
float buffer[4];
i = 0;
cntr = 0;
while(cntr < size * (rate + 5))
{
if(sock_thread < 0) break;
if(*rx_cntr < 4)
{
usleep(1000);
continue;
}
if(i < 5)
{
for(j = 0; j < 4; ++j)
{
buffer[j] = *rx_data;
}
memset(buffer, 0, 16);
}
else
{
for(j = 0; j < 4; ++j)
{
buffer[j] += *rx_data;
}
}
++i;
++cntr;
if(i < rate + 5) continue;
i = 0;
for(j = 0; j < 4; ++j)
{
buffer[j] /= rate;
}
if(send(sock_thread, buffer, 16, MSG_NOSIGNAL) < 0) break;
}
return NULL;
}
| {
"language": "C"
} |
/**
* C++ program copying a Cube class.
*
* @author
* Wade Fagen-Ulmschneider <waf@illinois.edu>
*/
#include "../Cube.h"
using uiuc::Cube;
int main() {
Cube c;
Cube myCube = c;
return 0;
}
| {
"language": "C"
} |
/*
Copyright (c) 2008-2018, Troy D. Hanson http://troydhanson.github.com/uthash/
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* a dynamic array implementation using macros
*/
#ifndef UTARRAY_H
#define UTARRAY_H
#define UTARRAY_VERSION 2.1.0
#include <stddef.h> /* size_t */
#include <string.h> /* memset, etc */
#include <stdlib.h> /* exit */
#ifdef __GNUC__
#define UTARRAY_UNUSED __attribute__((__unused__))
#else
#define UTARRAY_UNUSED
#endif
#ifdef oom
#error "The name of macro 'oom' has been changed to 'utarray_oom'. Please update your code."
#define utarray_oom() oom()
#endif
#ifndef utarray_oom
#define utarray_oom() exit(-1)
#endif
typedef void (ctor_f)(void *dst, const void *src);
typedef void (dtor_f)(void *elt);
typedef void (init_f)(void *elt);
typedef struct {
size_t sz;
init_f *init;
ctor_f *copy;
dtor_f *dtor;
} UT_icd;
typedef struct {
unsigned i,n;/* i: index of next available slot, n: num slots */
UT_icd icd; /* initializer, copy and destructor functions */
char *d; /* n slots of size icd->sz*/
} UT_array;
#define utarray_init(a,_icd) do { \
memset(a,0,sizeof(UT_array)); \
(a)->icd = *(_icd); \
} while(0)
#define utarray_done(a) do { \
if ((a)->n) { \
if ((a)->icd.dtor) { \
unsigned _ut_i; \
for(_ut_i=0; _ut_i < (a)->i; _ut_i++) { \
(a)->icd.dtor(utarray_eltptr(a,_ut_i)); \
} \
} \
free((a)->d); \
} \
(a)->n=0; \
} while(0)
#define utarray_new(a,_icd) do { \
(a) = (UT_array*)malloc(sizeof(UT_array)); \
if ((a) == NULL) { \
utarray_oom(); \
} \
utarray_init(a,_icd); \
} while(0)
#define utarray_free(a) do { \
utarray_done(a); \
free(a); \
} while(0)
#define utarray_reserve(a,by) do { \
if (((a)->i+(by)) > (a)->n) { \
char *utarray_tmp; \
while (((a)->i+(by)) > (a)->n) { (a)->n = ((a)->n ? (2*(a)->n) : 8); } \
utarray_tmp=(char*)realloc((a)->d, (a)->n*(a)->icd.sz); \
if (utarray_tmp == NULL) { \
utarray_oom(); \
} \
(a)->d=utarray_tmp; \
} \
} while(0)
#define utarray_push_back(a,p) do { \
utarray_reserve(a,1); \
if ((a)->icd.copy) { (a)->icd.copy( _utarray_eltptr(a,(a)->i++), p); } \
else { memcpy(_utarray_eltptr(a,(a)->i++), p, (a)->icd.sz); }; \
} while(0)
#define utarray_pop_back(a) do { \
if ((a)->icd.dtor) { (a)->icd.dtor( _utarray_eltptr(a,--((a)->i))); } \
else { (a)->i--; } \
} while(0)
#define utarray_extend_back(a) do { \
utarray_reserve(a,1); \
if ((a)->icd.init) { (a)->icd.init(_utarray_eltptr(a,(a)->i)); } \
else { memset(_utarray_eltptr(a,(a)->i),0,(a)->icd.sz); } \
(a)->i++; \
} while(0)
#define utarray_len(a) ((a)->i)
#define utarray_eltptr(a,j) (((j) < (a)->i) ? _utarray_eltptr(a,j) : NULL)
#define _utarray_eltptr(a,j) ((void*)((a)->d + ((a)->icd.sz * (j))))
#define utarray_insert(a,p,j) do { \
if ((j) > (a)->i) utarray_resize(a,j); \
utarray_reserve(a,1); \
if ((j) < (a)->i) { \
memmove( _utarray_eltptr(a,(j)+1), _utarray_eltptr(a,j), \
((a)->i - (j))*((a)->icd.sz)); \
} \
if ((a)->icd.copy) { (a)->icd.copy( _utarray_eltptr(a,j), p); } \
else { memcpy(_utarray_eltptr(a,j), p, (a)->icd.sz); }; \
(a)->i++; \
} while(0)
#define utarray_inserta(a,w,j) do { \
if (utarray_len(w) == 0) break; \
if ((j) > (a)->i) utarray_resize(a,j); \
utarray_reserve(a,utarray_len(w)); \
if ((j) < (a)->i) { \
memmove(_utarray_eltptr(a,(j)+utarray_len(w)), \
_utarray_eltptr(a,j), \
((a)->i - (j))*((a)->icd.sz)); \
} \
if ((a)->icd.copy) { \
unsigned _ut_i; \
for(_ut_i=0;_ut_i<(w)->i;_ut_i++) { \
(a)->icd.copy(_utarray_eltptr(a, (j) + _ut_i), _utarray_eltptr(w, _ut_i)); \
} \
} else { \
memcpy(_utarray_eltptr(a,j), _utarray_eltptr(w,0), \
utarray_len(w)*((a)->icd.sz)); \
} \
(a)->i += utarray_len(w); \
} while(0)
#define utarray_resize(dst,num) do { \
unsigned _ut_i; \
if ((dst)->i > (unsigned)(num)) { \
if ((dst)->icd.dtor) { \
for (_ut_i = (num); _ut_i < (dst)->i; ++_ut_i) { \
(dst)->icd.dtor(_utarray_eltptr(dst, _ut_i)); \
} \
} \
} else if ((dst)->i < (unsigned)(num)) { \
utarray_reserve(dst, (num) - (dst)->i); \
if ((dst)->icd.init) { \
for (_ut_i = (dst)->i; _ut_i < (unsigned)(num); ++_ut_i) { \
(dst)->icd.init(_utarray_eltptr(dst, _ut_i)); \
} \
} else { \
memset(_utarray_eltptr(dst, (dst)->i), 0, (dst)->icd.sz*((num) - (dst)->i)); \
} \
} \
(dst)->i = (num); \
} while(0)
#define utarray_concat(dst,src) do { \
utarray_inserta(dst, src, utarray_len(dst)); \
} while(0)
#define utarray_erase(a,pos,len) do { \
if ((a)->icd.dtor) { \
unsigned _ut_i; \
for (_ut_i = 0; _ut_i < (len); _ut_i++) { \
(a)->icd.dtor(utarray_eltptr(a, (pos) + _ut_i)); \
} \
} \
if ((a)->i > ((pos) + (len))) { \
memmove(_utarray_eltptr(a, pos), _utarray_eltptr(a, (pos) + (len)), \
((a)->i - ((pos) + (len))) * (a)->icd.sz); \
} \
(a)->i -= (len); \
} while(0)
#define utarray_renew(a,u) do { \
if (a) utarray_clear(a); \
else utarray_new(a, u); \
} while(0)
#define utarray_clear(a) do { \
if ((a)->i > 0) { \
if ((a)->icd.dtor) { \
unsigned _ut_i; \
for(_ut_i=0; _ut_i < (a)->i; _ut_i++) { \
(a)->icd.dtor(_utarray_eltptr(a, _ut_i)); \
} \
} \
(a)->i = 0; \
} \
} while(0)
#define utarray_sort(a,cmp) do { \
qsort((a)->d, (a)->i, (a)->icd.sz, cmp); \
} while(0)
#define utarray_find(a,v,cmp) bsearch((v),(a)->d,(a)->i,(a)->icd.sz,cmp)
#define utarray_front(a) (((a)->i) ? (_utarray_eltptr(a,0)) : NULL)
#define utarray_next(a,e) (((e)==NULL) ? utarray_front(a) : (((a)->i != utarray_eltidx(a,e)+1) ? _utarray_eltptr(a,utarray_eltidx(a,e)+1) : NULL))
#define utarray_prev(a,e) (((e)==NULL) ? utarray_back(a) : ((utarray_eltidx(a,e) != 0) ? _utarray_eltptr(a,utarray_eltidx(a,e)-1) : NULL))
#define utarray_back(a) (((a)->i) ? (_utarray_eltptr(a,(a)->i-1)) : NULL)
#define utarray_eltidx(a,e) (((char*)(e) - (a)->d) / (a)->icd.sz)
/* last we pre-define a few icd for common utarrays of ints and strings */
static void utarray_str_cpy(void *dst, const void *src) {
char **_src = (char**)src, **_dst = (char**)dst;
*_dst = (*_src == NULL) ? NULL : strdup(*_src);
}
static void utarray_str_dtor(void *elt) {
char **eltc = (char**)elt;
if (*eltc != NULL) free(*eltc);
}
static const UT_icd ut_str_icd UTARRAY_UNUSED = {sizeof(char*),NULL,utarray_str_cpy,utarray_str_dtor};
static const UT_icd ut_int_icd UTARRAY_UNUSED = {sizeof(int),NULL,NULL,NULL};
static const UT_icd ut_ptr_icd UTARRAY_UNUSED = {sizeof(void*),NULL,NULL,NULL};
#endif /* UTARRAY_H */
| {
"language": "C"
} |
/*****************************************************************************
* Copyright (c) 2019, Nations Technologies Inc.
*
* All rights reserved.
* ****************************************************************************
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the disclaimer below.
*
* Nations' name may not be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* DISCLAIMER: THIS SOFTWARE IS PROVIDED BY NATIONS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
* DISCLAIMED. IN NO EVENT SHALL NATIONS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* ****************************************************************************/
/**
* @file n32g45x_xfmc.h
* @author Nations Solution Team
* @version v1.0.0
*
* @copyright Copyright (c) 2019, Nations Technologies Inc. All rights reserved.
*/
#ifndef __N32G45X_XFMC_H__
#define __N32G45X_XFMC_H__
#ifdef __cplusplus
extern "C" {
#endif
#include "n32g45x.h"
/** @addtogroup N32G45X_StdPeriph_Driver
* @{
*/
/** @addtogroup XFMC
* @{
*/
/** @addtogroup XFMC_Exported_Types
* @{
*/
/**
* @brief Timing parameters For NOR/SRAM Banks
*/
typedef struct
{
uint32_t AddrSetTime; /*!< Defines the number of HCLK cycles to configure
the duration of the address setup time.
This parameter can be a value between 0 and 0xF.
@note: It is not used with synchronous NOR Flash memories. */
uint32_t AddrHoldTime; /*!< Defines the number of HCLK cycles to configure
the duration of the address hold time.
This parameter can be a value between 0 and 0xF.
@note: It is not used with synchronous NOR Flash memories.*/
uint32_t
DataSetTime; /*!< Defines the number of HCLK cycles to configure
the duration of the data setup time.
This parameter can be a value between 0 and 0xFF.
@note: It is used for SRAMs, ROMs and asynchronous multiplexed NOR Flash memories. */
uint32_t BusRecoveryCycle; /*!< Defines the number of HCLK cycles to configure
the duration of the bus turnaround.
This parameter can be a value between 0 and 0xF.
@note: It is only used for multiplexed NOR Flash memories. */
uint32_t
ClkDiv; /*!< Defines the period of CLK clock output signal, expressed in number of HCLK cycles.
This parameter can be a value between 1 and 0xF.
@note: This parameter is not used for asynchronous NOR Flash, SRAM or ROM accesses. */
uint32_t DataLatency; /*!< Defines the number of memory clock cycles to issue
to the memory before getting the first data.
The value of this parameter depends on the memory type as shown below:
- It must be set to 0 in case of a CRAM
- It is don't care in asynchronous NOR, SRAM or ROM accesses
- It may assume a value between 0 and 0xF in NOR Flash memories
with synchronous burst mode enable */
uint32_t AccMode; /*!< Specifies the asynchronous access mode.
This parameter can be a value of @ref XFMC_Access_Mode */
} XFMC_NorSramTimingInitType;
/**
* @brief XFMC NOR/SRAM Init structure definition
*/
typedef struct
{
uint32_t Bank; /*!< Specifies the NOR/SRAM memory bank that will be used.
This parameter can be a value of @ref XFMC_NORSRAM_Bank */
uint32_t DataAddrMux; /*!< Specifies whether the address and data values are
multiplexed on the databus or not.
This parameter can be a value of @ref XFMC_Data_Address_Bus_Multiplexing */
uint32_t MemType; /*!< Specifies the type of external memory attached to
the corresponding memory bank.
This parameter can be a value of @ref XFMC_Memory_Type */
uint32_t MemDataWidth; /*!< Specifies the external memory device width.
This parameter can be a value of @ref XFMC_Data_Width */
uint32_t BurstAccMode; /*!< Enables or disables the burst access mode for Flash memory,
valid only with synchronous burst Flash memories.
This parameter can be a value of @ref XFMC_Burst_Access_Mode */
uint32_t AsynchroWait; /*!< Enables or disables wait signal during asynchronous transfers,
valid only with asynchronous Flash memories.
This parameter can be a value of @ref AsynchroWait */
uint32_t WaitSigPolarity; /*!< Specifies the wait signal polarity, valid only when accessing
the Flash memory in burst mode.
This parameter can be a value of @ref XFMC_Wait_Signal_Polarity */
uint32_t WrapMode; /*!< Enables or disables the Wrapped burst access mode for Flash
memory, valid only when accessing Flash memories in burst mode.
This parameter can be a value of @ref XFMC_Wrap_Mode */
uint32_t WaitSigConfig; /*!< Specifies if the wait signal is asserted by the memory one
clock cycle before the wait state or during the wait state,
valid only when accessing memories in burst mode.
This parameter can be a value of @ref XFMC_Wait_Timing */
uint32_t WriteEnable; /*!< Enables or disables the write operation in the selected bank by the XFMC.
This parameter can be a value of @ref XFMC_Write_Operation */
uint32_t WaitSigEnable; /*!< Enables or disables the wait-state insertion via wait
signal, valid for Flash memory access in burst mode.
This parameter can be a value of @ref XFMC_Wait_Signal */
uint32_t ExtModeEnable; /*!< Enables or disables the extended mode.
This parameter can be a value of @ref XFMC_Extended_Mode */
uint32_t WriteBurstEnable; /*!< Enables or disables the write burst operation.
This parameter can be a value of @ref XFMC_Write_Burst */
XFMC_NorSramTimingInitType*
RWTimingStruct; /*!< Timing Parameters for write and read access if the ExtendedMode is not used*/
XFMC_NorSramTimingInitType* WTimingStruct; /*!< Timing Parameters for write access if the ExtendedMode is used*/
} XFMC_NorSramInitTpye;
/**
* @brief Timing parameters For XFMC NAND and PCCARD Banks
*/
typedef struct
{
uint32_t SetTime; /*!< Defines the number of HCLK cycles to setup address before
the command assertion for NAND-Flash read or write access
to common/Attribute or I/O memory space (depending on
the memory space timing to be configured).
This parameter can be a value between 0 and 0xFF.*/
uint32_t WaitSetTime; /*!< Defines the minimum number of HCLK cycles to assert the
command for NAND-Flash read or write access to
common/Attribute or I/O memory space (depending on the
memory space timing to be configured).
This parameter can be a number between 0x00 and 0xFF */
uint32_t HoldSetTime; /*!< Defines the number of HCLK clock cycles to hold address
(and data for write access) after the command deassertion
for NAND-Flash read or write access to common/Attribute
or I/O memory space (depending on the memory space timing
to be configured).
This parameter can be a number between 0x00 and 0xFF */
uint32_t HiZSetTime; /*!< Defines the number of HCLK clock cycles during which the
databus is kept in HiZ after the start of a NAND-Flash
write access to common/Attribute or I/O memory space (depending
on the memory space timing to be configured).
This parameter can be a number between 0x00 and 0xFF */
} XFMC_NandTimingInitType;
/**
* @brief XFMC NAND Init structure definition
*/
typedef struct
{
uint32_t Bank; /*!< Specifies the NAND memory bank that will be used.
This parameter can be a value of @ref XFMC_NAND_Bank */
uint32_t WaitFeatureEnable; /*!< Enables or disables the Wait feature for the NAND Memory Bank.
This parameter can be any value of @ref XFMC_Wait_feature */
uint32_t MemDataWidth; /*!< Specifies the external memory device width.
This parameter can be any value of @ref XFMC_Data_Width */
uint32_t EccEnable; /*!< Enables or disables the ECC computation.
This parameter can be any value of @ref XFMC_Ecc */
uint32_t EccPageSize; /*!< Defines the page size for the extended ECC.
This parameter can be any value of @ref XFMC_ECC_Page_Size */
uint32_t TCLRSetTime; /*!< Defines the number of HCLK cycles to configure the
delay between CLE low and RE low.
This parameter can be a value between 0 and 0xFF. */
uint32_t TARSetTime; /*!< Defines the number of HCLK cycles to configure the
delay between ALE low and RE low.
This parameter can be a number between 0x0 and 0xFF */
XFMC_NandTimingInitType* CommSpaceTimingStruct; /*!< XFMC Common Space Timing */
XFMC_NandTimingInitType* AttrSpaceTimingStruct; /*!< XFMC Attribute Space Timing */
} XFMC_NandInitType;
/**
* @brief XFMC PCCARD Init structure definition
*/
typedef struct
{
uint32_t WaitFeatureEnable; /*!< Enables or disables the Wait feature for the Memory Bank.
This parameter can be any value of @ref XFMC_Wait_feature */
uint32_t TCLRSetTime; /*!< Defines the number of HCLK cycles to configure the
delay between CLE low and RE low.
This parameter can be a value between 0 and 0xFF. */
uint32_t TARSetTime; /*!< Defines the number of HCLK cycles to configure the
delay between ALE low and RE low.
This parameter can be a number between 0x0 and 0xFF */
XFMC_NandTimingInitType* CommSpaceTimingStruct; /*!< XFMC Common Space Timing */
XFMC_NandTimingInitType* AttrSpaceTimingStruct; /*!< XFMC Attribute Space Timing */
XFMC_NandTimingInitType* XFMC_IOSpaceTimingStruct; /*!< XFMC IO Space Timing */
} XFMC_PCCARDInitType;
/**
* @}
*/
/** @addtogroup XFMC_Exported_Constants
* @{
*/
/** @addtogroup XFMC_NORSRAM_Bank
* @{
*/
#define XFMC_BANK1_NORSRAM1 ((uint32_t)0x00000000)
#define XFMC_BANK1_NORSRAM2 ((uint32_t)0x00000002)
/**
* @}
*/
/** @addtogroup XFMC_NAND_Bank
* @{
*/
#define XFMC_BANK2_NAND ((uint32_t)0x00000010)
#define XFMC_BANK3_NAND ((uint32_t)0x00000100)
/**
* @}
*/
#define IS_XFMC_NORSRAM_BANK(BANK) (((BANK) == XFMC_BANK1_NORSRAM1) || ((BANK) == XFMC_BANK1_NORSRAM2))
#define IS_XFMC_NAND_BANK(BANK) (((BANK) == XFMC_BANK2_NAND) || ((BANK) == XFMC_BANK3_NAND))
#define IS_XFMC_GETFLAG_BANK(BANK) (((BANK) == XFMC_BANK2_NAND) || ((BANK) == XFMC_BANK3_NAND))
#define IS_XFMC_IT_BANK(BANK) (((BANK) == XFMC_BANK2_NAND) || ((BANK) == XFMC_BANK3_NAND))
/** @addtogroup NOR_SRAM_Controller
* @{
*/
/** @addtogroup XFMC_Data_Address_Bus_Multiplexing
* @{
*/
#define XFMC_DATA_ADDR_MUX_DISABLE ((uint32_t)0x00000000)
#define XFMC_DATA_ADDR_MUX_ENABLE ((uint32_t)0x00000002)
#define IS_XFMC_MUX(MUX) (((MUX) == XFMC_DATA_ADDR_MUX_DISABLE) || ((MUX) == XFMC_DATA_ADDR_MUX_ENABLE))
/**
* @}
*/
/** @addtogroup XFMC_Memory_Type
* @{
*/
#define XFMC_MEM_TYPE_SRAM ((uint32_t)0x00000000)
#define XFMC_MEM_TYPE_PSRAM ((uint32_t)0x00000004)
#define XFMC_MEM_TYPE_NOR ((uint32_t)0x00000008)
#define IS_XFMC_MEMORY(MEMORY) \
(((MEMORY) == XFMC_MEM_TYPE_SRAM) || ((MEMORY) == XFMC_MEM_TYPE_PSRAM) || ((MEMORY) == XFMC_MEM_TYPE_NOR))
/**
* @}
*/
/** @addtogroup XFMC_Data_Width
* @{
*/
#define XFMC_MEM_DATA_WIDTH_8B ((uint32_t)0x00000000)
#define XFMC_MEM_DATA_WIDTH_16B ((uint32_t)0x00000010)
#define IS_XFMC_MEMORY_WIDTH(WIDTH) (((WIDTH) == XFMC_MEM_DATA_WIDTH_8B) || ((WIDTH) == XFMC_MEM_DATA_WIDTH_16B))
/**
* @}
*/
/** @addtogroup XFMC_Burst_Access_Mode
* @{
*/
#define XFMC_BURST_ACC_MODE_DISABLE ((uint32_t)0x00000000)
#define XFMC_BURST_ACC_MODE_ENABLE ((uint32_t)0x00000100)
#define IS_XFMC_BURSTMODE(STATE) (((STATE) == XFMC_BURST_ACC_MODE_DISABLE) || ((STATE) == XFMC_BURST_ACC_MODE_ENABLE))
/**
* @}
*/
/** @addtogroup AsynchroWait
* @{
*/
#define XFMC_ASYNCHRO_WAIT_DISABLE ((uint32_t)0x00000000)
#define XFMC_ASYNCHRO_WAIT_ENABLE ((uint32_t)0x00008000)
#define IS_XFMC_ASYNWAIT(STATE) (((STATE) == XFMC_ASYNCHRO_WAIT_DISABLE) || ((STATE) == XFMC_ASYNCHRO_WAIT_ENABLE))
/**
* @}
*/
/** @addtogroup XFMC_Wait_Signal_Polarity
* @{
*/
#define XFMC_WAIT_SIGNAL_POLARITY_LOW ((uint32_t)0x00000000)
#define XFMC_WAIT_SIGNAL_POLARITY_HIGH ((uint32_t)0x00000200)
#define IS_XFMC_WAIT_POLARITY(POLARITY) \
(((POLARITY) == XFMC_WAIT_SIGNAL_POLARITY_LOW) || ((POLARITY) == XFMC_WAIT_SIGNAL_POLARITY_HIGH))
/**
* @}
*/
/** @addtogroup XFMC_Wrap_Mode
* @{
*/
#define XFMC_WRAP_MODE_DISABLE ((uint32_t)0x00000000)
#define XFMC_WRAP_MODE_ENABLE ((uint32_t)0x00000400)
#define IS_XFMC_WRAP_MODE(MODE) (((MODE) == XFMC_WRAP_MODE_DISABLE) || ((MODE) == XFMC_WRAP_MODE_ENABLE))
/**
* @}
*/
/** @addtogroup XFMC_Wait_Timing
* @{
*/
#define XFMC_WAIT_SIG_ACTIVE_BEFORE_WAIT_STATE ((uint32_t)0x00000000)
#define XFMC_WAIT_SIG_ACTIVE_DURING_WAIT_STATE ((uint32_t)0x00000800)
#define IS_XFMC_WAIT_SIGNAL_ACTIVE(ACTIVE) \
(((ACTIVE) == XFMC_WAIT_SIG_ACTIVE_BEFORE_WAIT_STATE) || ((ACTIVE) == XFMC_WAIT_SIG_ACTIVE_DURING_WAIT_STATE))
/**
* @}
*/
/** @addtogroup XFMC_Write_Operation
* @{
*/
#define XFMC_WRITE_DISABLE ((uint32_t)0x00000000)
#define XFMC_WRITE_ENABLE ((uint32_t)0x00001000)
#define IS_XFMC_WRITE_OPERATION(OPERATION) (((OPERATION) == XFMC_WRITE_DISABLE) || ((OPERATION) == XFMC_WRITE_ENABLE))
/**
* @}
*/
/** @addtogroup XFMC_Wait_Signal
* @{
*/
#define XFMC_WAIT_SIGNAL_DISABLE ((uint32_t)0x00000000)
#define XFMC_WAIT_SIGNAL_ENABLE ((uint32_t)0x00002000)
#define IS_XFMC_WAITE_SIGNAL(SIGNAL) (((SIGNAL) == XFMC_WAIT_SIGNAL_DISABLE) || ((SIGNAL) == XFMC_WAIT_SIGNAL_ENABLE))
/**
* @}
*/
/** @addtogroup XFMC_Extended_Mode
* @{
*/
#define XFMC_EXTENDED_DISABLE ((uint32_t)0x00000000)
#define XFMC_EXTENDED_ENABLE ((uint32_t)0x00004000)
#define IS_XFMC_EXTENDED_MODE(MODE) (((MODE) == XFMC_EXTENDED_DISABLE) || ((MODE) == XFMC_EXTENDED_ENABLE))
/**
* @}
*/
/** @addtogroup XFMC_Write_Burst
* @{
*/
#define XFMC_WRITE_BURST_DISABLE ((uint32_t)0x00000000)
#define XFMC_WRITE_BURST_ENABLE ((uint32_t)0x00080000)
#define IS_XFMC_WRITE_BURST(BURST) (((BURST) == XFMC_WRITE_BURST_DISABLE) || ((BURST) == XFMC_WRITE_BURST_ENABLE))
/**
* @}
*/
/** @addtogroup XFMC_Address_Setup_Time
* @{
*/
#define IS_XFMC_ADDRESS_SETUP_TIME(TIME) ((TIME) <= 0xF)
/**
* @}
*/
/** @addtogroup XFMC_Address_Hold_Time
* @{
*/
#define IS_XFMC_ADDRESS_HOLD_TIME(TIME) ((TIME) <= 0xF)
/**
* @}
*/
/** @addtogroup XFMC_Data_Setup_Time
* @{
*/
#define IS_XFMC_DATASETUP_TIME(TIME) (((TIME) > 0) && ((TIME) <= 0xFF))
/**
* @}
*/
/** @addtogroup XFMC_Bus_Turn_around_Duration
* @{
*/
#define IS_XFMC_TURNAROUND_TIME(TIME) ((TIME) <= 0xF)
/**
* @}
*/
/** @addtogroup XFMC_CLK_Division
* @{
*/
#define IS_XFMC_CLK_DIV(DIV) ((DIV) <= 0xF)
/**
* @}
*/
/** @addtogroup XFMC_Data_Latency
* @{
*/
#define IS_XFMC_DATA_LATENCY(LATENCY) ((LATENCY) <= 0xF)
/**
* @}
*/
/** @addtogroup XFMC_Access_Mode
* @{
*/
#define XFMC_ACC_MODE_A ((uint32_t)0x00000000)
#define XFMC_ACC_MODE_B ((uint32_t)0x10000000)
#define XFMC_ACC_MODE_C ((uint32_t)0x20000000)
#define XFMC_ACC_MODE_D ((uint32_t)0x30000000)
#define IS_XFMC_ACCESS_MODE(MODE) \
(((MODE) == XFMC_ACC_MODE_A) || ((MODE) == XFMC_ACC_MODE_B) || ((MODE) == XFMC_ACC_MODE_C) \
|| ((MODE) == XFMC_ACC_MODE_D))
/**
* @}
*/
/**
* @}
*/
/** @addtogroup NAND_PCCARD_Controller
* @{
*/
/** @addtogroup XFMC_Wait_feature
* @{
*/
#define XFMC_WAIT_FEATURE_DISABLE ((uint32_t)0x00000000)
#define XFMC_WAIT_FEATURE_ENABLE ((uint32_t)0x00000002)
#define IS_XFMC_WAIT_FEATURE(FEATURE) \
(((FEATURE) == XFMC_WAIT_FEATURE_DISABLE) || ((FEATURE) == XFMC_WAIT_FEATURE_ENABLE))
/**
* @}
*/
/** @addtogroup XFMC_Ecc
* @{
*/
#define XFMC_ECC_DISABLE ((uint32_t)0x00000000)
#define XFMC_ECC_ENABLE ((uint32_t)0x00000040)
#define IS_XFMC_ECC_STATE(STATE) (((STATE) == XFMC_ECC_DISABLE) || ((STATE) == XFMC_ECC_ENABLE))
/**
* @}
*/
/** @addtogroup XFMC_ECC_Page_Size
* @{
*/
#define XFMC_ECC_PAGE_SIZE_256BYTES ((uint32_t)0x00000000)
#define XFMC_ECC_PAGE_SIZE_512BYTES ((uint32_t)0x00020000)
#define XFMC_ECC_PAGE_SIZE_1024BYTES ((uint32_t)0x00040000)
#define XFMC_ECC_PAGE_SIZE_2048BYTES ((uint32_t)0x00060000)
#define XFMC_ECC_PAGE_SIZE_4096BYTES ((uint32_t)0x00080000)
#define XFMC_ECC_PAGE_SIZE_8192BYTES ((uint32_t)0x000A0000)
#define IS_XFMC_ECCPAGE_SIZE(SIZE) \
(((SIZE) == XFMC_ECC_PAGE_SIZE_256BYTES) || ((SIZE) == XFMC_ECC_PAGE_SIZE_512BYTES) \
|| ((SIZE) == XFMC_ECC_PAGE_SIZE_1024BYTES) || ((SIZE) == XFMC_ECC_PAGE_SIZE_2048BYTES) \
|| ((SIZE) == XFMC_ECC_PAGE_SIZE_4096BYTES) || ((SIZE) == XFMC_ECC_PAGE_SIZE_8192BYTES))
/**
* @}
*/
/** @addtogroup XFMC_TCLR_Setup_Time
* @{
*/
#define IS_XFMC_TCLR_TIME(TIME) ((TIME) <= 0xFF)
/**
* @}
*/
/** @addtogroup XFMC_TAR_Setup_Time
* @{
*/
#define IS_XFMC_TAR_TIME(TIME) ((TIME) <= 0xFF)
/**
* @}
*/
/** @addtogroup XFMC_Setup_Time
* @{
*/
#define IS_XFMC_SETUP_TIME(TIME) ((TIME) <= 0xFF)
/**
* @}
*/
/** @addtogroup XFMC_Wait_Setup_Time
* @{
*/
#define IS_XFMC_WAIT_TIME(TIME) ((TIME) <= 0xFF)
/**
* @}
*/
/** @addtogroup XFMC_Hold_Setup_Time
* @{
*/
#define IS_XFMC_HOLD_TIME(TIME) ((TIME) <= 0xFF)
/**
* @}
*/
/** @addtogroup XFMC_HiZ_Setup_Time
* @{
*/
#define IS_XFMC_HIZ_TIME(TIME) ((TIME) <= 0xFF)
/**
* @}
*/
/**
* @}
*/
/** @addtogroup XFMC_Flags
* @{
*/
#define XFMC_FLAG_FEMPT ((uint32_t)0x00000040)
#define IS_XFMC_GET_FLAG(FLAG) ((FLAG == XFMC_FLAG_FEMPT))
#define IS_XFMC_CLEAR_FLAG(FLAG) ((((FLAG) & (uint32_t)0xFFFFFFF8) == 0x00000000) && ((FLAG) != 0x00000000))
/**
* @}
*/
/**
* @}
*/
/** @addtogroup XFMC_Exported_Macros
* @{
*/
/**
* @}
*/
/** @addtogroup XFMC_Exported_Functions
* @{
*/
void XFMC_DeInitNorSram(uint32_t Bank);
void XFMC_DeInitNand(uint32_t Bank);
void XFMC_PCCARDDeInit(void);
void XFMC_InitNorSram(XFMC_NorSramInitTpye* XFMC_NORSRAMInitStruct);
void XFMC_InitNand(XFMC_NandInitType* XFMC_NANDInitStruct);
void XFMC_PCCARDInit(XFMC_PCCARDInitType* XFMC_PCCARDInitStruct);
void XFMC_InitNorSramStruct(XFMC_NorSramInitTpye* XFMC_NORSRAMInitStruct);
void XFMC_InitNandStruct(XFMC_NandInitType* XFMC_NANDInitStruct);
// void XFMC_PCCARDStructInit(XFMC_PCCARDInitType* XFMC_PCCARDInitStruct);
void XFMC_EnableNorSram(uint32_t Bank, FunctionalState Cmd);
void XFMC_EnableNand(uint32_t Bank, FunctionalState Cmd);
// void XFMC_PCCARDCmd(FunctionalState Cmd);
void XFMC_EnableNandEcc(uint32_t Bank, FunctionalState Cmd);
uint32_t XFMC_GetEcc(uint32_t Bank);
FlagStatus XFMC_GetFlag(uint32_t Bank, uint32_t XFMC_FLAG);
void XFMC_ClrFlag(uint32_t Bank, uint32_t XFMC_FLAG);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /*__N32G45X_XFMC_H__ */
/**
* @}
*/
/**
* @}
*/
| {
"language": "C"
} |
/*
This file is part of CanFestival, a library implementing CanOpen Stack.
Copyright (C): Edouard TISSERANT and Francis DUPIN
See COPYING file for copyrights details.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __data_h__
#define __data_h__
#ifdef __cplusplus
extern "C" {
#endif
/* declaration of CO_Data type let us include all necessary headers
struct struct_CO_Data can then be defined later
*/
typedef struct struct_CO_Data CO_Data;
#include "applicfg.h"
#include "def.h"
#include "can.h"
#include "objdictdef.h"
#include "objacces.h"
#include "sdo.h"
#include "pdo.h"
#include "states.h"
#include "lifegrd.h"
#include "sync.h"
#include "nmtSlave.h"
#include "nmtMaster.h"
#include "emcy.h"
#ifdef CO_ENABLE_LSS
#include "lss.h"
#endif
/**
* @ingroup od
* @brief This structure contains all necessary informations to define a CANOpen node
*/
struct struct_CO_Data {
/* Object dictionary */
UNS8 *bDeviceNodeId;
const indextable *objdict;
s_PDO_status *PDO_status;
const quick_index *firstIndex;
const quick_index *lastIndex;
const UNS16 *ObjdictSize;
const UNS8 *iam_a_slave;
valueRangeTest_t valueRangeTest;
/* SDO */
s_transfer transfers[SDO_MAX_SIMULTANEOUS_TRANSFERTS];
/* s_sdo_parameter *sdo_parameters; */
/* State machine */
e_nodeState nodeState;
s_state_communication CurrentCommunicationState;
initialisation_t initialisation;
preOperational_t preOperational;
operational_t operational;
stopped_t stopped;
void (*NMT_Slave_Node_Reset_Callback)(CO_Data*);
void (*NMT_Slave_Communications_Reset_Callback)(CO_Data*);
/* NMT-heartbeat */
UNS8 *ConsumerHeartbeatCount;
UNS32 *ConsumerHeartbeatEntries;
TIMER_HANDLE *ConsumerHeartBeatTimers;
UNS16 *ProducerHeartBeatTime;
TIMER_HANDLE ProducerHeartBeatTimer;
heartbeatError_t heartbeatError;
e_nodeState NMTable[NMT_MAX_NODE_ID];
/* SYNC */
TIMER_HANDLE syncTimer;
UNS32 *COB_ID_Sync;
UNS32 *Sync_Cycle_Period;
/*UNS32 *Sync_window_length;;*/
post_sync_t post_sync;
post_TPDO_t post_TPDO;
post_SlaveBootup_t post_SlaveBootup;
/* General */
UNS8 toggle;
CAN_PORT canHandle;
scanIndexOD_t scanIndexOD;
storeODSubIndex_t storeODSubIndex;
/* DCF concise */
const indextable* dcf_odentry;
UNS8* dcf_cursor;
UNS32 dcf_entries_count;
UNS8 dcf_request;
/* EMCY */
e_errorState error_state;
UNS8 error_history_size;
UNS8* error_number;
UNS32* error_first_element;
UNS8* error_register;
UNS32* error_cobid;
s_errors error_data[EMCY_MAX_ERRORS];
post_emcy_t post_emcy;
#ifdef CO_ENABLE_LSS
/* LSS */
lss_transfer_t lss_transfer;
lss_StoreConfiguration_t lss_StoreConfiguration;
#endif
};
#define NMTable_Initializer Unknown_state,
#define s_transfer_Initializer {\
0, /* nodeId */\
0, /* wohami */\
SDO_RESET, /* state */\
0, /* toggle */\
0, /* abortCode */\
0, /* index */\
0, /* subIndex */\
0, /* count */\
0, /* offset */\
{0}, /* data (static use, so that all the table is initialize at 0)*/\
0, /* dataType */\
-1, /* timer */\
NULL /* Callback */\
},
#define ERROR_DATA_INITIALIZER \
{\
0, /* errCode */\
0, /* errRegMask */\
0 /* active */\
},
#ifdef CO_ENABLE_LSS
#ifdef CO_ENABLE_LSS_FS
#define lss_fs_Initializer \
,0, /* IDNumber */\
128, /* BitChecked */\
0, /* LSSSub */\
0, /* LSSNext */\
0, /* LSSPos */\
LSS_FS_RESET, /* FastScan_SM */\
-1, /* timerFS */\
{{0,0,0,0},{0,0,0,0}} /* lss_fs_transfer */
#else
#define lss_fs_Initializer
#endif
#define lss_Initializer {\
LSS_RESET, /* state */\
0, /* command */\
LSS_WAITING_MODE, /* mode */\
0, /* dat1 */\
0, /* dat2 */\
0, /* NodeID */\
0, /* addr_sel_match */\
0, /* addr_ident_match */\
"none", /* BaudRate */\
0, /* SwitchDelay */\
SDELAY_OFF, /* SwitchDelayState */\
NULL, /* canHandle_t */\
-1, /* TimerMSG */\
-1, /* TimerSDELAY */\
NULL, /* Callback */\
0 /* LSSanswer */\
lss_fs_Initializer /*FastScan service initialization */\
},\
NULL /* _lss_StoreConfiguration*/
#else
#define lss_Initializer
#endif
/* A macro to initialize the data in client app.*/
/* CO_Data structure */
#define CANOPEN_NODE_DATA_INITIALIZER(NODE_PREFIX) {\
/* Object dictionary*/\
& NODE_PREFIX ## _bDeviceNodeId, /* bDeviceNodeId */\
NODE_PREFIX ## _objdict, /* objdict */\
NODE_PREFIX ## _PDO_status, /* PDO_status */\
& NODE_PREFIX ## _firstIndex, /* firstIndex */\
& NODE_PREFIX ## _lastIndex, /* lastIndex */\
& NODE_PREFIX ## _ObjdictSize, /* ObjdictSize */\
& NODE_PREFIX ## _iam_a_slave, /* iam_a_slave */\
NODE_PREFIX ## _valueRangeTest, /* valueRangeTest */\
\
/* SDO, structure s_transfer */\
{\
REPEAT_SDO_MAX_SIMULTANEOUS_TRANSFERTS_TIMES(s_transfer_Initializer)\
},\
\
/* State machine*/\
Unknown_state, /* nodeState */\
/* structure s_state_communication */\
{\
0, /* csBoot_Up */\
0, /* csSDO */\
0, /* csEmergency */\
0, /* csSYNC */\
0, /* csHeartbeat */\
0, /* csPDO */\
0 /* csLSS */\
},\
_initialisation, /* initialisation */\
_preOperational, /* preOperational */\
_operational, /* operational */\
_stopped, /* stopped */\
NULL, /* NMT node reset callback */\
NULL, /* NMT communications reset callback */\
\
/* NMT-heartbeat */\
& NODE_PREFIX ## _highestSubIndex_obj1016, /* ConsumerHeartbeatCount */\
NODE_PREFIX ## _obj1016, /* ConsumerHeartbeatEntries */\
NODE_PREFIX ## _heartBeatTimers, /* ConsumerHeartBeatTimers */\
& NODE_PREFIX ## _obj1017, /* ProducerHeartBeatTime */\
TIMER_NONE, /* ProducerHeartBeatTimer */\
_heartbeatError, /* heartbeatError */\
\
{REPEAT_NMT_MAX_NODE_ID_TIMES(NMTable_Initializer)},\
/* is well initialized at "Unknown_state". Is it ok ? (FD)*/\
\
/* SYNC */\
TIMER_NONE, /* syncTimer */\
& NODE_PREFIX ## _obj1005, /* COB_ID_Sync */\
& NODE_PREFIX ## _obj1006, /* Sync_Cycle_Period */\
/*& NODE_PREFIX ## _obj1007, */ /* Sync_window_length */\
_post_sync, /* post_sync */\
_post_TPDO, /* post_TPDO */\
_post_SlaveBootup, /* post_SlaveBootup */\
\
/* General */\
0, /* toggle */\
NULL, /* canSend */\
NODE_PREFIX ## _scanIndexOD, /* scanIndexOD */\
_storeODSubIndex, /* storeODSubIndex */\
/* DCF concise */\
NULL, /*dcf_odentry*/\
NULL, /*dcf_cursor*/\
1, /*dcf_entries_count*/\
0, /* dcf_request*/\
\
/* EMCY */\
Error_free, /* error_state */\
sizeof(NODE_PREFIX ## _obj1003) / sizeof(NODE_PREFIX ## _obj1003[0]), /* error_history_size */\
& NODE_PREFIX ## _highestSubIndex_obj1003, /* error_number */\
& NODE_PREFIX ## _obj1003[0], /* error_first_element */\
& NODE_PREFIX ## _obj1001, /* error_register */\
& NODE_PREFIX ## _obj1014, /* error_cobid */\
/* error_data: structure s_errors */\
{\
REPEAT_EMCY_MAX_ERRORS_TIMES(ERROR_DATA_INITIALIZER)\
},\
_post_emcy, /* post_emcy */\
/* LSS */\
lss_Initializer\
}
#ifdef __cplusplus
};
#endif
#endif /* __data_h__ */
| {
"language": "C"
} |
/*
Copyright (C) 2014, The University of Texas at Austin
This file is part of libflame and is available under the 3-Clause
BSD license, which can be found in the LICENSE file at the top-level
directory, or at http://opensource.org/licenses/BSD-3-Clause
*/
#include "FLAME.h"
FLA_Error FLA_QR_UT_piv_colnorm( FLA_Obj alpha, FLA_Obj A, FLA_Obj b )
{
FLA_Obj AL, AR, A0, a1, A2;
FLA_Obj bT, b0,
bB, beta1,
b2;
FLA_Obj val2_a1, val2_a1_real;
// A and b has matching dimensions.
if ( FLA_Check_error_level() >= FLA_MIN_ERROR_CHECKING )
FLA_QR_UT_piv_colnorm_check( alpha, A, b );
FLA_Obj_create( FLA_Obj_datatype( A ), 1, 1, 0, 0, &val2_a1 );
FLA_Obj_create( FLA_Obj_datatype( b ), 1, 1, 0, 0, &val2_a1_real );
FLA_Part_1x2( A, &AL, &AR, 0, FLA_LEFT );
FLA_Part_2x1( b, &bT,
&bB, 0, FLA_TOP );
while ( FLA_Obj_width( AL ) < FLA_Obj_width( A ) ){
FLA_Repart_1x2_to_1x3( AL, /**/ AR, &A0, /**/ &a1, &A2,
1, FLA_RIGHT );
FLA_Repart_2x1_to_3x1( bT, &b0,
/* ** */ /* ** */
&beta1,
bB, &b2, 1, FLA_BOTTOM );
/*------------------------------------------------------------*/
// Using dot product is a bit dangerous when a1 is close to
// under/over flow limits.
// The matrix should be properly scaled before using QR_UT_piv.
FLA_Dot( a1, a1, val2_a1 );
FLA_Obj_extract_real_part( val2_a1, val2_a1_real );
FLA_Axpy( alpha, val2_a1_real, beta1 );
/*------------------------------------------------------------*/
FLA_Cont_with_1x3_to_1x2( &AL, /**/ &AR, A0, a1, /**/ A2,
FLA_LEFT );
FLA_Cont_with_3x1_to_2x1( &bT, b0,
beta1,
/* ** */ /* ** */
&bB, b2, FLA_TOP );
}
FLA_Obj_free( &val2_a1 );
FLA_Obj_free( &val2_a1_real );
return FLA_SUCCESS;
}
| {
"language": "C"
} |
#include "pair.h"
#define DEBUG 0
static int predict_length(VALUE tokens)
{
int i, l, result;
for (i = 0, result = 0; i < RARRAY_LEN(tokens); i++) {
VALUE t = rb_ary_entry(tokens, i);
l = (int) RSTRING_LEN(t) - 1;
if (l > 0) result += l;
}
return result;
}
PairArray *PairArray_new(VALUE tokens)
{
int i, j, k, len = predict_length(tokens);
PairArray *pair_array = ALLOC(PairArray);
Pair *pairs = ALLOC_N(Pair, len);
MEMZERO(pairs, Pair, len);
pair_array->pairs = pairs;
pair_array->len = len;
for (i = 0, k = 0; i < RARRAY_LEN(tokens); i++) {
VALUE t = rb_ary_entry(tokens, i);
char *string = RSTRING_PTR(t);
for (j = 0; j < RSTRING_LEN(t) - 1; j++) {
pairs[k].fst = string[j];
pairs[k].snd = string[j + 1];
pairs[k].status = PAIR_ACTIVE;
k++;
}
}
return pair_array;
}
void pair_array_reactivate(PairArray *self)
{
int i;
for (i = 0; i < self->len; i++) {
self->pairs[i].status = PAIR_ACTIVE;
}
}
double pair_array_match(PairArray *self, PairArray *other)
{
int i, j, matches = 0;
int sum = self->len + other->len;
if (sum == 0) return 1.0;
for (i = 0; i < self->len; i++) {
for (j = 0; j < other->len; j++) {
#if DEBUG
pair_print(self->pairs[i]);
putc(' ', stdout);
pair_print(other->pairs[j]);
printf(" -> %d\n", pair_equal(self->pairs[i], other->pairs[j]));
#endif
if (pair_equal(self->pairs[i], other->pairs[j])) {
matches++;
other->pairs[j].status = PAIR_INACTIVE;
break;
}
}
}
return ((double) (2 * matches)) / sum;
}
void pair_print(Pair pair)
{
printf("%c%c (%d)", pair.fst, pair.snd, pair.status);
}
void pair_array_destroy(PairArray *pair_array)
{
if (pair_array->pairs) {
xfree(pair_array->pairs);
}
xfree(pair_array);
}
| {
"language": "C"
} |
/*******************************************************************************
* gui/common/Emoticons.h *
* *
* Copyright (C) 2010, Retroshare Team <retroshare.project@gmail.com> *
* *
* This program is free software: you can redistribute it and/or modify *
* it under the terms of the GNU Affero General Public License as *
* published by the Free Software Foundation, either version 3 of the *
* License, or (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU Affero General Public License for more details. *
* *
* You should have received a copy of the GNU Affero General Public License *
* along with this program. If not, see <https://www.gnu.org/licenses/>. *
* *
*******************************************************************************/
#ifndef _EMOTICONS_H
#define _EMOTICONS_H
#include <QVector>
class QWidget;
class QString;
class Emoticons
{
public:
static void load();
static void showSmileyWidget(QWidget *parent, QWidget *button, const char *slotAddMethod, bool above);
static void showStickerWidget(QWidget *parent, QWidget *button, const char *slotAddMethod, bool above);
static QString importedStickerPath();
private:
static void loadToolTips(QWidget *container);
static void loadSmiley();
static void refreshStickerTabs(QVector<QString>& stickerTabs, QString foldername);
static void refreshStickerTabs(QVector<QString>& stickerTabs);
};
#endif
| {
"language": "C"
} |
predicate fzn_network_flow(array[int,1..2] of int: arc,
array[int] of int: balance,
array[int] of var int: flow) =
let { int: source_node = 1;
int: sink_node = 2;
set of int: ARCS = index_set_1of2(arc);
set of int: NODES = index_set(balance);
} in
forall (i in NODES) (
sum (j in ARCS where i == arc[j,source_node]) (flow[j]) -
sum (j in ARCS where i == arc[j,sink_node]) (flow[j])
= balance[i]
);
| {
"language": "C"
} |
/* BEGIN CSTYLED */
/*
** $Id: lcorolib.c,v 1.5.1.1 2013/04/12 18:48:47 roberto Exp $
** Coroutine Library
** See Copyright Notice in lua.h
*/
#define lcorolib_c
#define LUA_LIB
#include <sys/lua/lua.h>
#include <sys/lua/lauxlib.h>
#include <sys/lua/lualib.h>
static int auxresume (lua_State *L, lua_State *co, int narg) {
int status;
if (!lua_checkstack(co, narg)) {
lua_pushliteral(L, "too many arguments to resume");
return -1; /* error flag */
}
if (lua_status(co) == LUA_OK && lua_gettop(co) == 0) {
lua_pushliteral(L, "cannot resume dead coroutine");
return -1; /* error flag */
}
lua_xmove(L, co, narg);
status = lua_resume(co, L, narg);
if (status == LUA_OK || status == LUA_YIELD) {
int nres = lua_gettop(co);
if (!lua_checkstack(L, nres + 1)) {
lua_pop(co, nres); /* remove results anyway */
lua_pushliteral(L, "too many results to resume");
return -1; /* error flag */
}
lua_xmove(co, L, nres); /* move yielded values */
return nres;
}
else {
lua_xmove(co, L, 1); /* move error message */
return -1; /* error flag */
}
}
static int luaB_coresume (lua_State *L) {
lua_State *co = lua_tothread(L, 1);
int r;
luaL_argcheck(L, co, 1, "coroutine expected");
r = auxresume(L, co, lua_gettop(L) - 1);
if (r < 0) {
lua_pushboolean(L, 0);
lua_insert(L, -2);
return 2; /* return false + error message */
}
else {
lua_pushboolean(L, 1);
lua_insert(L, -(r + 1));
return r + 1; /* return true + 'resume' returns */
}
}
static int luaB_auxwrap (lua_State *L) {
lua_State *co = lua_tothread(L, lua_upvalueindex(1));
int r = auxresume(L, co, lua_gettop(L));
if (r < 0) {
if (lua_isstring(L, -1)) { /* error object is a string? */
luaL_where(L, 1); /* add extra info */
lua_insert(L, -2);
lua_concat(L, 2);
}
return lua_error(L); /* propagate error */
}
return r;
}
static int luaB_cocreate (lua_State *L) {
lua_State *NL;
luaL_checktype(L, 1, LUA_TFUNCTION);
NL = lua_newthread(L);
lua_pushvalue(L, 1); /* move function to top */
lua_xmove(L, NL, 1); /* move function from L to NL */
return 1;
}
static int luaB_cowrap (lua_State *L) {
luaB_cocreate(L);
lua_pushcclosure(L, luaB_auxwrap, 1);
return 1;
}
static int luaB_yield (lua_State *L) {
return lua_yield(L, lua_gettop(L));
}
static int luaB_costatus (lua_State *L) {
lua_State *co = lua_tothread(L, 1);
luaL_argcheck(L, co, 1, "coroutine expected");
if (L == co) lua_pushliteral(L, "running");
else {
switch (lua_status(co)) {
case LUA_YIELD:
lua_pushliteral(L, "suspended");
break;
case LUA_OK: {
lua_Debug ar;
if (lua_getstack(co, 0, &ar) > 0) /* does it have frames? */
lua_pushliteral(L, "normal"); /* it is running */
else if (lua_gettop(co) == 0)
lua_pushliteral(L, "dead");
else
lua_pushliteral(L, "suspended"); /* initial state */
break;
}
default: /* some error occurred */
lua_pushliteral(L, "dead");
break;
}
}
return 1;
}
static int luaB_corunning (lua_State *L) {
int ismain = lua_pushthread(L);
lua_pushboolean(L, ismain);
return 2;
}
static const luaL_Reg co_funcs[] = {
{"create", luaB_cocreate},
{"resume", luaB_coresume},
{"running", luaB_corunning},
{"status", luaB_costatus},
{"wrap", luaB_cowrap},
{"yield", luaB_yield},
{NULL, NULL}
};
LUAMOD_API int luaopen_coroutine (lua_State *L) {
luaL_newlib(L, co_funcs);
return 1;
}
#if defined(_KERNEL)
EXPORT_SYMBOL(luaopen_coroutine);
#endif
/* END CSTYLED */
| {
"language": "C"
} |
/*
* Functions related to setting various queue properties from drivers
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
#include <linux/gcd.h>
#include <linux/lcm.h>
#include <linux/jiffies.h>
#include <linux/gfp.h>
#include "blk.h"
unsigned long blk_max_low_pfn;
EXPORT_SYMBOL(blk_max_low_pfn);
unsigned long blk_max_pfn;
/**
* blk_queue_prep_rq - set a prepare_request function for queue
* @q: queue
* @pfn: prepare_request function
*
* It's possible for a queue to register a prepare_request callback which
* is invoked before the request is handed to the request_fn. The goal of
* the function is to prepare a request for I/O, it can be used to build a
* cdb from the request data for instance.
*
*/
void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
{
q->prep_rq_fn = pfn;
}
EXPORT_SYMBOL(blk_queue_prep_rq);
/**
* blk_queue_unprep_rq - set an unprepare_request function for queue
* @q: queue
* @ufn: unprepare_request function
*
* It's possible for a queue to register an unprepare_request callback
* which is invoked before the request is finally completed. The goal
* of the function is to deallocate any data that was allocated in the
* prepare_request callback.
*
*/
void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn)
{
q->unprep_rq_fn = ufn;
}
EXPORT_SYMBOL(blk_queue_unprep_rq);
/**
* blk_queue_merge_bvec - set a merge_bvec function for queue
* @q: queue
* @mbfn: merge_bvec_fn
*
* Usually queues have static limitations on the max sectors or segments that
* we can put in a request. Stacking drivers may have some settings that
* are dynamic, and thus we have to query the queue whether it is ok to
* add a new bio_vec to a bio at a given offset or not. If the block device
* has such limitations, it needs to register a merge_bvec_fn to control
* the size of bio's sent to it. Note that a block device *must* allow a
* single page to be added to an empty bio. The block device driver may want
* to use the bio_split() function to deal with these bio's. By default
* no merge_bvec_fn is defined for a queue, and only the fixed limits are
* honored.
*/
void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
{
q->merge_bvec_fn = mbfn;
}
EXPORT_SYMBOL(blk_queue_merge_bvec);
void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
{
q->softirq_done_fn = fn;
}
EXPORT_SYMBOL(blk_queue_softirq_done);
void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
{
q->rq_timeout = timeout;
}
EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
{
q->rq_timed_out_fn = fn;
}
EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
{
q->lld_busy_fn = fn;
}
EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
/**
* blk_set_default_limits - reset limits to default values
* @lim: the queue_limits structure to reset
*
* Description:
* Returns a queue_limit struct to its default state.
*/
void blk_set_default_limits(struct queue_limits *lim)
{
lim->max_segments = BLK_MAX_SEGMENTS;
lim->max_integrity_segments = 0;
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
lim->max_write_same_sectors = 0;
lim->max_discard_sectors = 0;
lim->discard_granularity = 0;
lim->discard_alignment = 0;
lim->discard_misaligned = 0;
lim->discard_zeroes_data = 0;
lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
lim->alignment_offset = 0;
lim->io_opt = 0;
lim->misaligned = 0;
lim->cluster = 1;
}
EXPORT_SYMBOL(blk_set_default_limits);
/**
* blk_set_stacking_limits - set default limits for stacking devices
* @lim: the queue_limits structure to reset
*
* Description:
* Returns a queue_limit struct to its default state. Should be used
* by stacking drivers like DM that have no internal limits.
*/
void blk_set_stacking_limits(struct queue_limits *lim)
{
blk_set_default_limits(lim);
/* Inherit limits from component devices */
lim->discard_zeroes_data = 1;
lim->max_segments = USHRT_MAX;
lim->max_hw_sectors = UINT_MAX;
lim->max_segment_size = UINT_MAX;
lim->max_sectors = UINT_MAX;
lim->max_write_same_sectors = UINT_MAX;
}
EXPORT_SYMBOL(blk_set_stacking_limits);
/**
* blk_queue_make_request - define an alternate make_request function for a device
* @q: the request queue for the device to be affected
* @mfn: the alternate make_request function
*
* Description:
* The normal way for &struct bios to be passed to a device
* driver is for them to be collected into requests on a request
* queue, and then to allow the device driver to select requests
* off that queue when it is ready. This works well for many block
* devices. However some block devices (typically virtual devices
* such as md or lvm) do not benefit from the processing on the
* request queue, and are served best by having the requests passed
* directly to them. This can be achieved by providing a function
* to blk_queue_make_request().
*
* Caveat:
* The driver that does this *must* be able to deal appropriately
* with buffers in "highmemory". This can be accomplished by either calling
* __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
* blk_queue_bounce() to create a buffer in normal memory.
**/
void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
{
/*
* set defaults
*/
q->nr_requests = BLKDEV_MAX_RQ;
q->make_request_fn = mfn;
blk_queue_dma_alignment(q, 511);
blk_queue_congestion_threshold(q);
q->nr_batching = BLK_BATCH_REQ;
blk_set_default_limits(&q->limits);
/*
* by default assume old behaviour and bounce for any highmem page
*/
blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
}
EXPORT_SYMBOL(blk_queue_make_request);
/**
* blk_queue_bounce_limit - set bounce buffer limit for queue
* @q: the request queue for the device
* @dma_mask: the maximum address the device can handle
*
* Description:
* Different hardware can have different requirements as to what pages
* it can do I/O directly to. A low level driver can call
* blk_queue_bounce_limit to have lower memory pages allocated as bounce
* buffers for doing I/O to pages residing above @dma_mask.
**/
void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
{
unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
int dma = 0;
q->bounce_gfp = GFP_NOIO;
#if BITS_PER_LONG == 64
/*
* Assume anything <= 4GB can be handled by IOMMU. Actually
* some IOMMUs can handle everything, but I don't know of a
* way to test this here.
*/
if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
dma = 1;
q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
#else
if (b_pfn < blk_max_low_pfn)
dma = 1;
q->limits.bounce_pfn = b_pfn;
#endif
if (dma) {
init_emergency_isa_pool();
q->bounce_gfp = GFP_NOIO | GFP_DMA;
q->limits.bounce_pfn = b_pfn;
}
}
EXPORT_SYMBOL(blk_queue_bounce_limit);
/**
* blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request
* @limits: the queue limits
* @max_hw_sectors: max hardware sectors in the usual 512b unit
*
* Description:
* Enables a low level driver to set a hard upper limit,
* max_hw_sectors, on the size of requests. max_hw_sectors is set by
* the device driver based upon the combined capabilities of I/O
* controller and storage device.
*
* max_sectors is a soft limit imposed by the block layer for
* filesystem type requests. This value can be overridden on a
* per-device basis in /sys/block/<device>/queue/max_sectors_kb.
* The soft limit can not exceed max_hw_sectors.
**/
void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors)
{
if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
printk(KERN_INFO "%s: set to minimum %d\n",
__func__, max_hw_sectors);
}
limits->max_hw_sectors = max_hw_sectors;
limits->max_sectors = min_t(unsigned int, max_hw_sectors,
BLK_DEF_MAX_SECTORS);
}
EXPORT_SYMBOL(blk_limits_max_hw_sectors);
/**
* blk_queue_max_hw_sectors - set max sectors for a request for this queue
* @q: the request queue for the device
* @max_hw_sectors: max hardware sectors in the usual 512b unit
*
* Description:
* See description for blk_limits_max_hw_sectors().
**/
void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
{
blk_limits_max_hw_sectors(&q->limits, max_hw_sectors);
}
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
/**
* blk_queue_max_discard_sectors - set max sectors for a single discard
* @q: the request queue for the device
* @max_discard_sectors: maximum number of sectors to discard
**/
void blk_queue_max_discard_sectors(struct request_queue *q,
unsigned int max_discard_sectors)
{
q->limits.max_discard_sectors = max_discard_sectors;
}
EXPORT_SYMBOL(blk_queue_max_discard_sectors);
/**
* blk_queue_max_write_same_sectors - set max sectors for a single write same
* @q: the request queue for the device
* @max_write_same_sectors: maximum number of sectors to write per command
**/
void blk_queue_max_write_same_sectors(struct request_queue *q,
unsigned int max_write_same_sectors)
{
q->limits.max_write_same_sectors = max_write_same_sectors;
}
EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
/**
* blk_queue_max_segments - set max hw segments for a request for this queue
* @q: the request queue for the device
* @max_segments: max number of segments
*
* Description:
* Enables a low level driver to set an upper limit on the number of
* hw data segments in a request.
**/
void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
{
if (!max_segments) {
max_segments = 1;
printk(KERN_INFO "%s: set to minimum %d\n",
__func__, max_segments);
}
q->limits.max_segments = max_segments;
}
EXPORT_SYMBOL(blk_queue_max_segments);
/**
* blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
* @q: the request queue for the device
* @max_size: max size of segment in bytes
*
* Description:
* Enables a low level driver to set an upper limit on the size of a
* coalesced segment
**/
void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
{
if (max_size < PAGE_CACHE_SIZE) {
max_size = PAGE_CACHE_SIZE;
printk(KERN_INFO "%s: set to minimum %d\n",
__func__, max_size);
}
q->limits.max_segment_size = max_size;
}
EXPORT_SYMBOL(blk_queue_max_segment_size);
/**
* blk_queue_logical_block_size - set logical block size for the queue
* @q: the request queue for the device
* @size: the logical block size, in bytes
*
* Description:
* This should be set to the lowest possible block size that the
* storage device can address. The default of 512 covers most
* hardware.
**/
void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
{
q->limits.logical_block_size = size;
if (q->limits.physical_block_size < size)
q->limits.physical_block_size = size;
if (q->limits.io_min < q->limits.physical_block_size)
q->limits.io_min = q->limits.physical_block_size;
}
EXPORT_SYMBOL(blk_queue_logical_block_size);
/**
* blk_queue_physical_block_size - set physical block size for the queue
* @q: the request queue for the device
* @size: the physical block size, in bytes
*
* Description:
* This should be set to the lowest possible sector size that the
* hardware can operate on without reverting to read-modify-write
* operations.
*/
void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
{
q->limits.physical_block_size = size;
if (q->limits.physical_block_size < q->limits.logical_block_size)
q->limits.physical_block_size = q->limits.logical_block_size;
if (q->limits.io_min < q->limits.physical_block_size)
q->limits.io_min = q->limits.physical_block_size;
}
EXPORT_SYMBOL(blk_queue_physical_block_size);
/**
* blk_queue_alignment_offset - set physical block alignment offset
* @q: the request queue for the device
* @offset: alignment offset in bytes
*
* Description:
* Some devices are naturally misaligned to compensate for things like
* the legacy DOS partition table 63-sector offset. Low-level drivers
* should call this function for devices whose first sector is not
* naturally aligned.
*/
void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
{
q->limits.alignment_offset =
offset & (q->limits.physical_block_size - 1);
q->limits.misaligned = 0;
}
EXPORT_SYMBOL(blk_queue_alignment_offset);
/**
* blk_limits_io_min - set minimum request size for a device
* @limits: the queue limits
* @min: smallest I/O size in bytes
*
* Description:
* Some devices have an internal block size bigger than the reported
* hardware sector size. This function can be used to signal the
* smallest I/O the device can perform without incurring a performance
* penalty.
*/
void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
{
limits->io_min = min;
if (limits->io_min < limits->logical_block_size)
limits->io_min = limits->logical_block_size;
if (limits->io_min < limits->physical_block_size)
limits->io_min = limits->physical_block_size;
}
EXPORT_SYMBOL(blk_limits_io_min);
/**
* blk_queue_io_min - set minimum request size for the queue
* @q: the request queue for the device
* @min: smallest I/O size in bytes
*
* Description:
* Storage devices may report a granularity or preferred minimum I/O
* size which is the smallest request the device can perform without
* incurring a performance penalty. For disk drives this is often the
* physical block size. For RAID arrays it is often the stripe chunk
* size. A properly aligned multiple of minimum_io_size is the
* preferred request size for workloads where a high number of I/O
* operations is desired.
*/
void blk_queue_io_min(struct request_queue *q, unsigned int min)
{
blk_limits_io_min(&q->limits, min);
}
EXPORT_SYMBOL(blk_queue_io_min);
/**
* blk_limits_io_opt - set optimal request size for a device
* @limits: the queue limits
* @opt: smallest I/O size in bytes
*
* Description:
* Storage devices may report an optimal I/O size, which is the
* device's preferred unit for sustained I/O. This is rarely reported
* for disk drives. For RAID arrays it is usually the stripe width or
* the internal track size. A properly aligned multiple of
* optimal_io_size is the preferred request size for workloads where
* sustained throughput is desired.
*/
void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
{
limits->io_opt = opt;
}
EXPORT_SYMBOL(blk_limits_io_opt);
/**
* blk_queue_io_opt - set optimal request size for the queue
* @q: the request queue for the device
* @opt: optimal request size in bytes
*
* Description:
* Storage devices may report an optimal I/O size, which is the
* device's preferred unit for sustained I/O. This is rarely reported
* for disk drives. For RAID arrays it is usually the stripe width or
* the internal track size. A properly aligned multiple of
* optimal_io_size is the preferred request size for workloads where
* sustained throughput is desired.
*/
void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
{
blk_limits_io_opt(&q->limits, opt);
}
EXPORT_SYMBOL(blk_queue_io_opt);
/**
* blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
* @t: the stacking driver (top)
* @b: the underlying device (bottom)
**/
void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
{
blk_stack_limits(&t->limits, &b->limits, 0);
}
EXPORT_SYMBOL(blk_queue_stack_limits);
/**
* blk_stack_limits - adjust queue_limits for stacked devices
* @t: the stacking driver limits (top device)
* @b: the underlying queue limits (bottom, component device)
* @start: first data sector within component device
*
* Description:
* This function is used by stacking drivers like MD and DM to ensure
* that all component devices have compatible block sizes and
* alignments. The stacking driver must provide a queue_limits
* struct (top) and then iteratively call the stacking function for
* all component (bottom) devices. The stacking function will
* attempt to combine the values and ensure proper alignment.
*
* Returns 0 if the top and bottom queue_limits are compatible. The
* top device's block sizes and alignment offsets may be adjusted to
* ensure alignment with the bottom device. If no compatible sizes
* and alignments exist, -1 is returned and the resulting top
* queue_limits will have the misaligned flag set to indicate that
* the alignment_offset is undefined.
*/
int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
sector_t start)
{
unsigned int top, bottom, alignment, ret = 0;
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
t->max_write_same_sectors = min(t->max_write_same_sectors,
b->max_write_same_sectors);
t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
b->seg_boundary_mask);
t->max_segments = min_not_zero(t->max_segments, b->max_segments);
t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
b->max_integrity_segments);
t->max_segment_size = min_not_zero(t->max_segment_size,
b->max_segment_size);
t->misaligned |= b->misaligned;
alignment = queue_limit_alignment_offset(b, start);
/* Bottom device has different alignment. Check that it is
* compatible with the current top alignment.
*/
if (t->alignment_offset != alignment) {
top = max(t->physical_block_size, t->io_min)
+ t->alignment_offset;
bottom = max(b->physical_block_size, b->io_min) + alignment;
/* Verify that top and bottom intervals line up */
if (max(top, bottom) % min(top, bottom)) {
t->misaligned = 1;
ret = -1;
}
}
t->logical_block_size = max(t->logical_block_size,
b->logical_block_size);
t->physical_block_size = max(t->physical_block_size,
b->physical_block_size);
t->io_min = max(t->io_min, b->io_min);
t->io_opt = lcm(t->io_opt, b->io_opt);
t->cluster &= b->cluster;
t->discard_zeroes_data &= b->discard_zeroes_data;
/* Physical block size a multiple of the logical block size? */
if (t->physical_block_size & (t->logical_block_size - 1)) {
t->physical_block_size = t->logical_block_size;
t->misaligned = 1;
ret = -1;
}
/* Minimum I/O a multiple of the physical block size? */
if (t->io_min & (t->physical_block_size - 1)) {
t->io_min = t->physical_block_size;
t->misaligned = 1;
ret = -1;
}
/* Optimal I/O a multiple of the physical block size? */
if (t->io_opt & (t->physical_block_size - 1)) {
t->io_opt = 0;
t->misaligned = 1;
ret = -1;
}
/* Find lowest common alignment_offset */
t->alignment_offset = lcm(t->alignment_offset, alignment)
% max(t->physical_block_size, t->io_min);
/* Verify that new alignment_offset is on a logical block boundary */
if (t->alignment_offset & (t->logical_block_size - 1)) {
t->misaligned = 1;
ret = -1;
}
/* Discard alignment and granularity */
if (b->discard_granularity) {
alignment = queue_limit_discard_alignment(b, start);
if (t->discard_granularity != 0 &&
t->discard_alignment != alignment) {
top = t->discard_granularity + t->discard_alignment;
bottom = b->discard_granularity + alignment;
/* Verify that top and bottom intervals line up */
if ((max(top, bottom) % min(top, bottom)) != 0)
t->discard_misaligned = 1;
}
t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
b->max_discard_sectors);
t->discard_granularity = max(t->discard_granularity,
b->discard_granularity);
t->discard_alignment = lcm(t->discard_alignment, alignment) %
t->discard_granularity;
}
return ret;
}
EXPORT_SYMBOL(blk_stack_limits);
/**
* bdev_stack_limits - adjust queue limits for stacked drivers
* @t: the stacking driver limits (top device)
* @bdev: the component block_device (bottom)
* @start: first data sector within component device
*
* Description:
* Merges queue limits for a top device and a block_device. Returns
* 0 if alignment didn't change. Returns -1 if adding the bottom
* device caused misalignment.
*/
int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
sector_t start)
{
struct request_queue *bq = bdev_get_queue(bdev);
start += get_start_sect(bdev);
return blk_stack_limits(t, &bq->limits, start);
}
EXPORT_SYMBOL(bdev_stack_limits);
/**
* disk_stack_limits - adjust queue limits for stacked drivers
* @disk: MD/DM gendisk (top)
* @bdev: the underlying block device (bottom)
* @offset: offset to beginning of data within component device
*
* Description:
* Merges the limits for a top level gendisk and a bottom level
* block_device.
*/
void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
sector_t offset)
{
struct request_queue *t = disk->queue;
if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
disk_name(disk, 0, top);
bdevname(bdev, bottom);
printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
top, bottom);
}
}
EXPORT_SYMBOL(disk_stack_limits);
/**
* blk_queue_dma_pad - set pad mask
* @q: the request queue for the device
* @mask: pad mask
*
* Set dma pad mask.
*
* Appending pad buffer to a request modifies the last entry of a
* scatter list such that it includes the pad buffer.
**/
void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
{
q->dma_pad_mask = mask;
}
EXPORT_SYMBOL(blk_queue_dma_pad);
/**
* blk_queue_update_dma_pad - update pad mask
* @q: the request queue for the device
* @mask: pad mask
*
* Update dma pad mask.
*
* Appending pad buffer to a request modifies the last entry of a
* scatter list such that it includes the pad buffer.
**/
void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
{
if (mask > q->dma_pad_mask)
q->dma_pad_mask = mask;
}
EXPORT_SYMBOL(blk_queue_update_dma_pad);
/**
* blk_queue_dma_drain - Set up a drain buffer for excess dma.
* @q: the request queue for the device
* @dma_drain_needed: fn which returns non-zero if drain is necessary
* @buf: physically contiguous buffer
* @size: size of the buffer in bytes
*
* Some devices have excess DMA problems and can't simply discard (or
* zero fill) the unwanted piece of the transfer. They have to have a
* real area of memory to transfer it into. The use case for this is
* ATAPI devices in DMA mode. If the packet command causes a transfer
* bigger than the transfer size some HBAs will lock up if there
* aren't DMA elements to contain the excess transfer. What this API
* does is adjust the queue so that the buf is always appended
* silently to the scatterlist.
*
* Note: This routine adjusts max_hw_segments to make room for appending
* the drain buffer. If you call blk_queue_max_segments() after calling
* this routine, you must set the limit to one fewer than your device
* can support otherwise there won't be room for the drain buffer.
*/
int blk_queue_dma_drain(struct request_queue *q,
dma_drain_needed_fn *dma_drain_needed,
void *buf, unsigned int size)
{
if (queue_max_segments(q) < 2)
return -EINVAL;
/* make room for appending the drain */
blk_queue_max_segments(q, queue_max_segments(q) - 1);
q->dma_drain_needed = dma_drain_needed;
q->dma_drain_buffer = buf;
q->dma_drain_size = size;
return 0;
}
EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
/**
* blk_queue_segment_boundary - set boundary rules for segment merging
* @q: the request queue for the device
* @mask: the memory boundary mask
**/
void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
{
if (mask < PAGE_CACHE_SIZE - 1) {
mask = PAGE_CACHE_SIZE - 1;
printk(KERN_INFO "%s: set to minimum %lx\n",
__func__, mask);
}
q->limits.seg_boundary_mask = mask;
}
EXPORT_SYMBOL(blk_queue_segment_boundary);
/**
* blk_queue_dma_alignment - set dma length and memory alignment
* @q: the request queue for the device
* @mask: alignment mask
*
* description:
* set required memory and length alignment for direct dma transactions.
* this is used when building direct io requests for the queue.
*
**/
void blk_queue_dma_alignment(struct request_queue *q, int mask)
{
q->dma_alignment = mask;
}
EXPORT_SYMBOL(blk_queue_dma_alignment);
/**
* blk_queue_update_dma_alignment - update dma length and memory alignment
* @q: the request queue for the device
* @mask: alignment mask
*
* description:
* update required memory and length alignment for direct dma transactions.
* If the requested alignment is larger than the current alignment, then
* the current queue alignment is updated to the new value, otherwise it
* is left alone. The design of this is to allow multiple objects
* (driver, device, transport etc) to set their respective
* alignments without having them interfere.
*
**/
void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
{
BUG_ON(mask > PAGE_SIZE);
if (mask > q->dma_alignment)
q->dma_alignment = mask;
}
EXPORT_SYMBOL(blk_queue_update_dma_alignment);
/**
* blk_queue_flush - configure queue's cache flush capability
* @q: the request queue for the device
* @flush: 0, REQ_FLUSH or REQ_FLUSH | REQ_FUA
*
* Tell block layer cache flush capability of @q. If it supports
* flushing, REQ_FLUSH should be set. If it supports bypassing
* write cache for individual writes, REQ_FUA should be set.
*/
void blk_queue_flush(struct request_queue *q, unsigned int flush)
{
WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
flush &= ~REQ_FUA;
q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
}
EXPORT_SYMBOL_GPL(blk_queue_flush);
void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
{
q->flush_not_queueable = !queueable;
}
EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
static int __init blk_settings_init(void)
{
blk_max_low_pfn = max_low_pfn - 1;
blk_max_pfn = max_pfn - 1;
return 0;
}
subsys_initcall(blk_settings_init);
| {
"language": "C"
} |
/******************************************************************************
The MIT License(MIT)
Embedded Template Library.
https://github.com/ETLCPP/etl
https://www.etlcpp.com
Copyright(c) 2014 jwellbelove
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files(the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions :
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
******************************************************************************/
#include "UnitTest++/UnitTest++.h"
#include <iterator>
#include <string>
#include <vector>
#include <stdint.h>
#include "etl/hash.h"
namespace
{
SUITE(test_hash)
{
//*************************************************************************
TEST(test_hash_bool)
{
size_t hash = etl::hash<bool>()(false);
CHECK_EQUAL(0U, hash);
hash = etl::hash<bool>()(true);
CHECK_EQUAL(1U, hash);
}
//*************************************************************************
TEST(test_hash_char)
{
size_t hash = etl::hash<char>()((char)(0x5A));
CHECK_EQUAL(0x5AU, hash);
}
//*************************************************************************
TEST(test_hash_signed_char)
{
size_t hash = etl::hash<signed char>()((signed char)(0x5A));
CHECK_EQUAL(0x5AU, hash);
}
//*************************************************************************
TEST(test_hash_unsigned_char)
{
size_t hash = etl::hash<unsigned char>()((unsigned char)(0x5A));
CHECK_EQUAL(0x5AU, hash);
}
//*************************************************************************
TEST(test_hash_short)
{
size_t hash = etl::hash<short>()((short)(0x5AA5));
CHECK_EQUAL(0x5AA5U, hash);
}
//*************************************************************************
TEST(test_hash_unsigned_short)
{
size_t hash = etl::hash<unsigned short>()((unsigned short)(0x5AA5));
CHECK_EQUAL(0x5AA5U, hash);
}
//*************************************************************************
TEST(test_hash_int)
{
size_t hash = etl::hash<int>()((int)(0x5AA555AA));
CHECK_EQUAL(0x5AA555AAU, hash);
}
//*************************************************************************
TEST(test_hash_unsigned_int)
{
size_t hash = etl::hash<unsigned int>()((unsigned int)(0x5AA555AA));
CHECK_EQUAL(0x5AA555AAU, hash);
}
//*************************************************************************
TEST(test_hash_long)
{
size_t hash = etl::hash<long>()((long)(0x5AA555AA));
CHECK_EQUAL(0x5AA555AAU, hash);
}
//*************************************************************************
TEST(test_hash_unsigned_long)
{
size_t hash = etl::hash<unsigned long>()((unsigned long)(0x5AA555AA));
CHECK_EQUAL(0x5AA555AAU, hash);
}
//*************************************************************************
TEST(test_hash_long_long)
{
size_t hash = etl::hash<long long>()((long long)(0x5AA555AA3CC333CC));
if (ETL_PLATFORM_32BIT)
{
CHECK_EQUAL(0xEC6A8D69U, hash);
}
if (ETL_PLATFORM_64BIT)
{
CHECK_EQUAL(0x5AA555AA3CC333CCU, hash);
}
}
//*************************************************************************
TEST(test_hash_unsigned_long_long)
{
size_t hash = etl::hash<unsigned long long>()((unsigned long long)(0x5AA555AA3CC333CC));
if (ETL_PLATFORM_32BIT)
{
CHECK_EQUAL(0xEC6A8D69U, hash);
}
if (ETL_PLATFORM_64BIT)
{
CHECK_EQUAL(0x5AA555AA3CC333CCU, hash);
}
}
//*************************************************************************
TEST(test_hash_float)
{
size_t hash = etl::hash<float>()((float)(1.2345));
if (ETL_PLATFORM_32BIT)
{
CHECK_EQUAL(0X3F9E0419U, hash);
}
if (ETL_PLATFORM_64BIT)
{
CHECK_EQUAL(9821047038287739023U, hash);
}
}
//*************************************************************************
TEST(test_hash_double)
{
size_t hash = etl::hash<double>()((double)(1.2345));
if (ETL_PLATFORM_32BIT)
{
CHECK_EQUAL(0x86FBF224U, hash);
}
if (ETL_PLATFORM_64BIT)
{
CHECK_EQUAL(0x3FF3C083126E978DU, hash);
}
}
//*************************************************************************
TEST(test_hash_pointer)
{
int i;
size_t hash = etl::hash<int*>()(&i);
CHECK_EQUAL(size_t(&i), hash);
}
//*************************************************************************
TEST(test_hash_const_pointer)
{
int i;
size_t hash = etl::hash<const int*>()(&i);
CHECK_EQUAL(size_t(&i), hash);
}
//*************************************************************************
TEST(test_hash_const_pointer_const)
{
int i;
const int * const pi = &i;
size_t hash = etl::hash<const int *>()(pi);
CHECK_EQUAL(size_t(&i), hash);
}
};
}
| {
"language": "C"
} |
/* mbed Microcontroller Library
* Copyright (c) 2006-2013 ARM Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MBED_SERIAL_API_H
#define MBED_SERIAL_API_H
#include "device.h"
#include "buffer.h"
#include "dma_api.h"
#if DEVICE_SERIAL
#define SERIAL_EVENT_TX_SHIFT (2)
#define SERIAL_EVENT_RX_SHIFT (8)
#define SERIAL_EVENT_TX_MASK (0x00FC)
#define SERIAL_EVENT_RX_MASK (0x3F00)
#define SERIAL_EVENT_ERROR (1 << 1)
/**
* @defgroup SerialTXEvents Serial TX Events Macros
*
* @{
*/
#define SERIAL_EVENT_TX_COMPLETE (1 << (SERIAL_EVENT_TX_SHIFT + 0))
#define SERIAL_EVENT_TX_ALL (SERIAL_EVENT_TX_COMPLETE)
/**@}*/
/**
* @defgroup SerialRXEvents Serial RX Events Macros
*
* @{
*/
#define SERIAL_EVENT_RX_COMPLETE (1 << (SERIAL_EVENT_RX_SHIFT + 0))
#define SERIAL_EVENT_RX_OVERRUN_ERROR (1 << (SERIAL_EVENT_RX_SHIFT + 1))
#define SERIAL_EVENT_RX_FRAMING_ERROR (1 << (SERIAL_EVENT_RX_SHIFT + 2))
#define SERIAL_EVENT_RX_PARITY_ERROR (1 << (SERIAL_EVENT_RX_SHIFT + 3))
#define SERIAL_EVENT_RX_OVERFLOW (1 << (SERIAL_EVENT_RX_SHIFT + 4))
#define SERIAL_EVENT_RX_CHARACTER_MATCH (1 << (SERIAL_EVENT_RX_SHIFT + 5))
#define SERIAL_EVENT_RX_ALL (SERIAL_EVENT_RX_OVERFLOW | SERIAL_EVENT_RX_PARITY_ERROR | \
SERIAL_EVENT_RX_FRAMING_ERROR | SERIAL_EVENT_RX_OVERRUN_ERROR | \
SERIAL_EVENT_RX_COMPLETE | SERIAL_EVENT_RX_CHARACTER_MATCH)
/**@}*/
#define SERIAL_RESERVED_CHAR_MATCH (255)
typedef enum {
ParityNone = 0,
ParityOdd = 1,
ParityEven = 2,
ParityForced1 = 3,
ParityForced0 = 4
} SerialParity;
typedef enum {
RxIrq,
TxIrq
} SerialIrq;
typedef enum {
FlowControlNone,
FlowControlRTS,
FlowControlCTS,
FlowControlRTSCTS
} FlowControl;
typedef void (*uart_irq_handler)(uint32_t id, SerialIrq event);
#if DEVICE_SERIAL_ASYNCH
/** Asynch serial hal structure
*/
typedef struct {
struct serial_s serial; /**< Target specific serial structure */
struct buffer_s tx_buff; /**< Tx buffer */
struct buffer_s rx_buff; /**< Rx buffer */
uint8_t char_match; /**< Character to be matched */
uint8_t char_found; /**< State of the matched character */
} serial_t;
#else
/** Non-asynch serial hal structure
*/
typedef struct serial_s serial_t;
#endif
#ifdef __cplusplus
extern "C" {
#endif
/**
* \defgroup GeneralSerial Serial Configuration Functions
* @{
*/
/** Initialize the serial peripheral. It sets the default parameters for serial
* peripheral, and configure its specifieds pins.
*
* @param obj The serial object
* @param tx The TX pin
* @param rx The RX pin
*/
void serial_init(serial_t *obj, PinName tx, PinName rx);
/** Release the serial peripheral, not currently invoked. It requires further
* resource management.
*
* @param obj The serial object
*/
void serial_free(serial_t *obj);
/** Configure the baud rate
*
* @param obj The serial object
* @param baudrate The baud rate to be configured
*/
void serial_baud(serial_t *obj, int baudrate);
/** Configure the format. Set the number of bits, parity and the number of stop bits
*
* @param obj The serial object
* @param data_bits The number of data bits
* @param parity The parity
* @param stop_bits The number of stop bits
*/
void serial_format(serial_t *obj, int data_bits, SerialParity parity, int stop_bits);
/** The serial interrupt handler registration.
*
* @param obj The serial object
* @param handler The interrupt handler which will be invoked when interrupt fires.
* @param id The SerialBase object
*/
void serial_irq_handler(serial_t *obj, uart_irq_handler handler, uint32_t id);
/** Configure serial interrupt. This function is used for word-approach
*
* @param obj The serial object
* @param irq The serial IRQ type (RX or TX)
* @param enable Set to non-zero to enable events, or zero to disable them
*/
void serial_irq_set(serial_t *obj, SerialIrq irq, uint32_t enable);
/** Get character. This is a blocking call, waiting for a character
*
* @param obj The serial object
*/
int serial_getc(serial_t *obj);
/** Put a character. This is a blocking call, waiting for a peripheral to be available
* for writing
*
* @param obj The serial object
* @param c The character to be sent
*/
void serial_putc(serial_t *obj, int c);
/** Check if the serial peripheral is readable
*
* @param obj The serial object
* @return Non-zero value if a character can be read, 0 if nothing to read.
*/
int serial_readable(serial_t *obj);
/** Check if the serial peripheral is writable
*
* @param obj The serial object
* @return Non-zero value if a character can be written, 0 otherwise.
*/
int serial_writable(serial_t *obj);
/** Clear the serial peripheral
*
* @param obj The serial object
*/
void serial_clear(serial_t *obj);
/** Set the break
*
* @param obj The serial object
*/
void serial_break_set(serial_t *obj);
/** Clear the break
*
* @param obj The serial object
*/
void serial_break_clear(serial_t *obj);
/** Configure the TX pin for UART function.
*
* @param tx The pin used for TX
*/
void serial_pinout_tx(PinName tx);
/** Configure the serial for the flow control. It sets flow control in the hardware
* if a serial peripheral supports it, otherwise software emulation is used.
*
* @param obj The serial object
* @param type The type of the flow control. Look at the available FlowControl types.
* @param rxflow The tx pin
* @param txflow The rx pin
*/
void serial_set_flow_control(serial_t *obj, FlowControl type, PinName rxflow, PinName txflow);
#if DEVICE_SERIAL_ASYNCH
/**@}*/
/**
* \defgroup AsynchSerial Asynchronous Serial Hardware Abstraction Layer
* @{
*/
/** Begin asynchronous TX transfer. The used buffer is specified in the serial object,
* tx_buff
*
* @param obj The serial object
* @param tx The buffer for sending
* @param tx_length The number of words to transmit
* @param tx_width The bit width of buffer word
* @param handler The serial handler
* @param event The logical OR of events to be registered
* @param hint A suggestion for how to use DMA with this transfer
* @return Returns number of data transfered, or 0 otherwise
*/
int serial_tx_asynch(serial_t *obj, const void *tx, size_t tx_length, uint8_t tx_width, uint32_t handler, uint32_t event, DMAUsage hint);
/** Begin asynchronous RX transfer (enable interrupt for data collecting)
* The used buffer is specified in the serial object - rx_buff
*
* @param obj The serial object
* @param rx The buffer for sending
* @param rx_length The number of words to transmit
* @param rx_width The bit width of buffer word
* @param handler The serial handler
* @param event The logical OR of events to be registered
* @param handler The serial handler
* @param char_match A character in range 0-254 to be matched
* @param hint A suggestion for how to use DMA with this transfer
*/
void serial_rx_asynch(serial_t *obj, void *rx, size_t rx_length, uint8_t rx_width, uint32_t handler, uint32_t event, uint8_t char_match, DMAUsage hint);
/** Attempts to determine if the serial peripheral is already in use for TX
*
* @param obj The serial object
* @return Non-zero if the RX transaction is ongoing, 0 otherwise
*/
uint8_t serial_tx_active(serial_t *obj);
/** Attempts to determine if the serial peripheral is already in use for RX
*
* @param obj The serial object
* @return Non-zero if the RX transaction is ongoing, 0 otherwise
*/
uint8_t serial_rx_active(serial_t *obj);
/** The asynchronous TX and RX handler.
*
* @param obj The serial object
* @return Returns event flags if a RX transfer termination condition was met or 0 otherwise
*/
int serial_irq_handler_asynch(serial_t *obj);
/** Abort the ongoing TX transaction. It disables the enabled interupt for TX and
* flush TX hardware buffer if TX FIFO is used
*
* @param obj The serial object
*/
void serial_tx_abort_asynch(serial_t *obj);
/** Abort the ongoing RX transaction It disables the enabled interrupt for RX and
* flush RX hardware buffer if RX FIFO is used
*
* @param obj The serial object
*/
void serial_rx_abort_asynch(serial_t *obj);
/**@}*/
#endif
#ifdef __cplusplus
}
#endif
#endif
#endif
| {
"language": "C"
} |
/*******************************************************************************
*
* Copyright 2016-2018 snickerbockers
* snickerbockers@washemu.org
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#include <string.h>
#include <stdlib.h>
#include "memory.h"
void memory_init(struct Memory *mem) {
memory_clear(mem);
}
void memory_cleanup(struct Memory *mem) {
}
void memory_clear(struct Memory *mem) {
memset(mem->mem, 0, sizeof(mem->mem[0]) * MEMORY_SIZE);
}
struct memory_interface ram_intf = {
.readdouble = memory_read_double,
.readfloat = memory_read_float,
.read32 = memory_read_32,
.read16 = memory_read_16,
.read8 = memory_read_8,
.writedouble = memory_write_double,
.writefloat = memory_write_float,
.write32 = memory_write_32,
.write16 = memory_write_16,
.write8 = memory_write_8
};
| {
"language": "C"
} |
/*
* File I/O binding example.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "duktape.h"
static int fileio_readfile(duk_context *ctx) {
const char *filename = duk_to_string(ctx, 0);
FILE *f = NULL;
long len;
void *buf;
size_t got;
if (!filename) {
goto error;
}
f = fopen(filename, "rb");
if (!f) {
goto error;
}
if (fseek(f, 0, SEEK_END) != 0) {
goto error;
}
len = ftell(f);
if (fseek(f, 0, SEEK_SET) != 0) {
goto error;
}
buf = duk_push_fixed_buffer(ctx, (size_t) len);
got = fread(buf, 1, len, f);
if (got != (size_t) len) {
goto error;
}
fclose(f);
f = NULL;
return 1;
error:
if (f) {
fclose(f);
}
return DUK_RET_ERROR;
}
static duk_function_list_entry fileio_funcs[] = {
{ "readfile", fileio_readfile, 1 },
{ NULL, NULL, 0 }
};
void fileio_register(duk_context *ctx) {
/* Set global 'FileIo'. */
duk_push_global_object(ctx);
duk_push_object(ctx);
duk_put_function_list(ctx, -1, fileio_funcs);
duk_put_prop_string(ctx, -2, "FileIo");
duk_pop(ctx);
}
| {
"language": "C"
} |
/*-----------------------------------------------------------------
LOG
GEM - Graphics Environment for Multimedia
Color blob tracker
Copyright (c) 1997-1999 Mark Danks. mark@danks.org
Copyright (c) Günther Geiger. geiger@epy.co.at
Copyright (c) 2001-2002 IOhannes m zmoelnig. forum::für::umläute. IEM. zmoelnig@iem.kug.ac.at
Copyright (c) 2002 James Tittle & Chris Clepper
For information on usage and redistribution, and for a DISCLAIMER OF ALL
WARRANTIES, see the file, "GEM.LICENSE.TERMS" in this distribution.
-----------------------------------------------------------------*/
#ifndef INCLUDE_PIX_OPENCV_FLOODFILL_H_
#define INCLUDE_PIX_OPENCV_FLOODFILL_H_
#ifndef _EiC
#include "cv.h"
#endif
#include "Base/GemPixObj.h"
#define MAX_COMPONENTS 10
/*-----------------------------------------------------------------
-------------------------------------------------------------------
CLASS
pix_opencv_floodfill
Color blob tracker
KEYWORDS
pix
DESCRIPTION
-----------------------------------------------------------------*/
class GEM_EXTERN pix_opencv_floodfill : public GemPixObj
{
CPPEXTERN_HEADER(pix_opencv_floodfill, GemPixObj)
public:
//////////
// Constructor
pix_opencv_floodfill();
protected:
//////////
// Destructor
virtual ~pix_opencv_floodfill();
//////////
// Do the processing
virtual void processRGBAImage(imageStruct &image);
virtual void processRGBImage(imageStruct &image);
virtual void processYUVImage(imageStruct &image);
virtual void processGrayImage(imageStruct &image);
void colorMess(float color);
void fillcolorMess(float index, float r, float g, float b);
void connectivityMess(float connectivity);
void markMess(float px, float py);
void deleteMess(float index);
void clearMess(void);
void updiffMess(float updiff);
void lodiffMess(float lodiff);
int comp_xsize;
int comp_ysize;
t_outlet *m_dataout;
t_atom x_list[5];
int x_up;
int x_lo;
int x_connectivity;
int x_color;
private:
//////////
// Static member functions
static void colorMessCallback(void *data, float color);
static void fillcolorMessCallback(void *data, float index, float r, float g, float b);
static void connectivityMessCallback(void *data, float connectivity);
static void markMessCallback(void *data, float px, float py);
static void deleteMessCallback(void *data, float index);
static void clearMessCallback(void *data);
static void updiffMessCallback(void *data, float updiff);
static void lodiffMessCallback(void *data, float lodiff);
// Internal Open CV data
// tracked components
int x_xcomp[MAX_COMPONENTS];
int x_ycomp[MAX_COMPONENTS];
// fill color
int x_r[MAX_COMPONENTS];
int x_g[MAX_COMPONENTS];
int x_b[MAX_COMPONENTS];
IplImage *rgba, *rgb, *grey;
};
#endif // for header file
| {
"language": "C"
} |
/*
* Copyright © 2005 Red Hat, Inc.
*
* Permission to use, copy, modify, distribute, and sell this software
* and its documentation for any purpose is hereby granted without
* fee, provided that the above copyright notice appear in all copies
* and that both that copyright notice and this permission notice
* appear in supporting documentation, and that the name of
* Red Hat, Inc. not be used in advertising or publicity pertaining to
* distribution of the software without specific, written prior
* permission. Red Hat, Inc. makes no representations about the
* suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* RED HAT, INC. DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
* SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS, IN NO EVENT SHALL RED HAT, INC. BE LIABLE FOR ANY SPECIAL,
* INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
* RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
* IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
* Author: Carl D. Worth <cworth@cworth.org>
*/
#include "cairo-test.h"
#define SIZE 30
/* At one point, an optimization was proposed for cairo in which a
* curve_to would be optimized as a line_to. The initial (buggy)
* implementation verified that the slopes of several segments of the
* spline's control polygon were identical, but left open the
* possibility of an anti-parallel slope for one segment.
*
* For example, given a spline with collinear control points (A,B,C,D)
* positioned as follows:
*
* C--A--B--D
*
* The code verified identical slopes for AB, CD, and AD. The missing
* check for the BC segment allowed it to be anti-parallel to the
* others as above, and hence invalid to replace this spline with the
* AD line segment.
*/
static cairo_test_status_t
draw (cairo_t *cr, int width, int height)
{
cairo_set_source_rgb (cr, 1.0, 1.0, 1.0); /* white */
cairo_paint (cr);
cairo_set_line_width (cr, 1.0);
cairo_set_line_cap (cr, CAIRO_LINE_CAP_BUTT);
cairo_set_line_join (cr, CAIRO_LINE_JOIN_BEVEL);
cairo_set_source_rgb (cr, 0.0, 0.0, 0.0); /* black */
cairo_translate (cr, 0, 1.0);
/* The CABD spline as described above. We ensure that the spline
* folds over on itself outside the bounds of the image to avoid
* the reference image having the curved portion of that fold,
* (which would just be harder to match in all the backends than
* we really want). */
cairo_move_to (cr,
10.5, 0.5);
cairo_curve_to (cr,
11.5, 0.5,
-25.0, 0.5,
31.0, 0.5);
cairo_stroke (cr);
cairo_translate (cr, 0, 2.0);
/* A reflected version: DBAC */
cairo_move_to (cr,
19.5, 0.5);
cairo_curve_to (cr,
18.5, 0.5,
55.0, 0.5,
-1.0, 0.5);
cairo_stroke (cr);
return CAIRO_TEST_SUCCESS;
}
CAIRO_TEST (curve_to_as_line_to,
"Test optimization treating curve_to as line_to",
"path", /* keywords */
NULL, /* requirements */
30,
5,
NULL, draw)
| {
"language": "C"
} |
/*
* Copyright 2009 Intel Corporation
*
* Permission to use, copy, modify, distribute, and sell this software
* and its documentation for any purpose is hereby granted without
* fee, provided that the above copyright notice appear in all copies
* and that both that copyright notice and this permission notice
* appear in supporting documentation, and that the name of
* Intel not be used in advertising or publicity pertaining to
* distribution of the software without specific, written prior
* permission. Intel makes no representations about the
* suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* INTEL CORPORATION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
* SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS, IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY SPECIAL,
* INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
* RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
* IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
* Author: Chris Wilson <chris@chris-wilson.co.uk>
*/
#include "cairo-test.h"
static cairo_test_status_t
draw (cairo_t *cr, int width, int height)
{
cairo_surface_t *region[5];
const char *text = "Cairo";
int i;
cairo_set_source_rgb (cr, 1, 1, 1);
cairo_paint (cr);
cairo_set_source_rgb (cr, 0, 0, 0);
for (i = 0; i < 5; i++) {
cairo_t *cr_region;
cairo_text_extents_t extents;
char buf[2] = { text[i], '\0' };
region[i] = cairo_surface_create_for_rectangle (cairo_get_target (cr),
20 * i, 0, 20, 20);
cr_region = cairo_create (region[i]);
cairo_surface_destroy (region[i]);
cairo_select_font_face (cr_region, "@cairo:",
CAIRO_FONT_WEIGHT_NORMAL,
CAIRO_FONT_SLANT_NORMAL);
cairo_set_font_size (cr_region, 20);
cairo_text_extents (cr_region, buf, &extents);
cairo_move_to (cr_region,
10 - (extents.width/2 + extents.x_bearing),
10 - (extents.height/2 + extents.y_bearing));
cairo_show_text (cr_region, buf);
region[i] = cairo_surface_reference (cairo_get_target (cr_region));
cairo_destroy (cr_region);
}
for (i = 0; i < 5; i++) {
cairo_set_source_surface (cr, region[5-i-1], 20 * i, 20);
cairo_paint (cr);
}
for (i = 0; i < 5; i++) {
cairo_set_source_surface (cr, region[5-i-1], 20 * i, 40);
cairo_paint_with_alpha (cr, .5);
}
for (i = 0; i < 5; i++)
cairo_surface_destroy (region[i]);
return CAIRO_TEST_SUCCESS;
}
CAIRO_TEST (subsurface,
"Tests clipping of both source and destination using subsurfaces",
"subsurface", /* keywords */
NULL, /* requirements */
100, 60,
NULL, draw)
| {
"language": "C"
} |
/*
* Configuration for Xilinx ZynqMP
* (C) Copyright 2014 - 2015 Xilinx, Inc.
* Michal Simek <michal.simek@xilinx.com>
*
* Based on Configuration for Versatile Express
*
* SPDX-License-Identifier: GPL-2.0+
*/
#ifndef __XILINX_ZYNQMP_H
#define __XILINX_ZYNQMP_H
#define CONFIG_REMAKE_ELF
/* #define CONFIG_ARMV8_SWITCH_TO_EL1 */
/* Generic Interrupt Controller Definitions */
#define CONFIG_GICV2
#define GICD_BASE 0xF9010000
#define GICC_BASE 0xF9020000
#define CONFIG_SYS_ALT_MEMTEST
#define CONFIG_SYS_MEMTEST_SCRATCH 0xfffc0000
#ifndef CONFIG_NR_DRAM_BANKS
# define CONFIG_NR_DRAM_BANKS 2
#endif
#define CONFIG_SYS_MEMTEST_START 0
#define CONFIG_SYS_MEMTEST_END 1000
#define CONFIG_SYS_INIT_SP_ADDR CONFIG_SYS_TEXT_BASE
/* Generic Timer Definitions - setup in EL3. Setup by ATF for other cases */
#if !defined(COUNTER_FREQUENCY)
# define COUNTER_FREQUENCY 100000000
#endif
/* Size of malloc() pool */
#define CONFIG_SYS_MALLOC_LEN (CONFIG_ENV_SIZE + 0x2000000)
/* Serial setup */
#define CONFIG_ARM_DCC
#define CONFIG_CPU_ARMV8
#define CONFIG_ZYNQ_SERIAL
#define CONFIG_CONS_INDEX 0
#define CONFIG_SYS_BAUDRATE_TABLE \
{ 4800, 9600, 19200, 38400, 57600, 115200 }
/* Command line configuration */
#define CONFIG_MP
/* BOOTP options */
#define CONFIG_BOOTP_BOOTFILESIZE
#define CONFIG_BOOTP_BOOTPATH
#define CONFIG_BOOTP_GATEWAY
#define CONFIG_BOOTP_HOSTNAME
#define CONFIG_BOOTP_MAY_FAIL
#define CONFIG_BOOTP_DNS
#define CONFIG_BOOTP_PXE
#define CONFIG_BOOTP_SUBNETMASK
/* Diff from config_distro_defaults.h */
#define CONFIG_SUPPORT_RAW_INITRD
#if !defined(CONFIG_SPL_BUILD)
#define CONFIG_ENV_VARS_UBOOT_CONFIG
#endif
#define CONFIG_AUTO_COMPLETE
#if defined(CONFIG_MMC_SDHCI_ZYNQ)
# define CONFIG_SUPPORT_EMMC_BOOT
# ifndef CONFIG_ZYNQ_SDHCI_MAX_FREQ
# define CONFIG_ZYNQ_SDHCI_MAX_FREQ 200000000
# endif
# define CONFIG_ENV_IS_IN_FAT
# define FAT_ENV_DEVICE_AND_PART "0:auto"
# define FAT_ENV_FILE "uboot.env"
# define FAT_ENV_INTERFACE "mmc"
#endif
#ifdef CONFIG_NAND_ARASAN
# define CONFIG_CMD_NAND_LOCK_UNLOCK
# define CONFIG_SYS_MAX_NAND_DEVICE 1
# define CONFIG_SYS_NAND_SELF_INIT
# define CONFIG_SYS_NAND_ONFI_DETECTION
# define CONFIG_MTD_DEVICE
#endif
/* Miscellaneous configurable options */
#define CONFIG_SYS_LOAD_ADDR 0x8000000
#if defined(CONFIG_ZYNQMP_USB)
#define CONFIG_SYS_USB_XHCI_MAX_ROOT_PORTS 2
#define CONFIG_SYS_DFU_DATA_BUF_SIZE 0x1800000
#define DFU_DEFAULT_POLL_TIMEOUT 300
#define CONFIG_USB_CABLE_CHECK
#define CONFIG_CMD_THOR_DOWNLOAD
#define CONFIG_USB_FUNCTION_THOR
#define CONFIG_THOR_RESET_OFF
#define DFU_ALT_INFO_RAM \
"dfu_ram_info=" \
"setenv dfu_alt_info " \
"Image ram $kernel_addr $kernel_size\\\\;" \
"system.dtb ram $fdt_addr $fdt_size\0" \
"dfu_ram=run dfu_ram_info && dfu 0 ram 0\0" \
"thor_ram=run dfu_ram_info && thordown 0 ram 0\0"
#define DFU_ALT_INFO \
DFU_ALT_INFO_RAM
#ifndef CONFIG_SPL_BUILD
# define CONFIG_USB_FUNCTION_FASTBOOT
# define CONFIG_CMD_FASTBOOT
# define CONFIG_ANDROID_BOOT_IMAGE
# define CONFIG_FASTBOOT_BUF_ADDR 0x100000
# define CONFIG_FASTBOOT_BUF_SIZE 0x6000000
# define CONFIG_FASTBOOT_FLASH
# ifdef CONFIG_MMC_SDHCI_ZYNQ
# define CONFIG_FASTBOOT_FLASH_MMC_DEV 0
# endif
# define CONFIG_RANDOM_UUID
# define PARTS_DEFAULT \
"partitions=uuid_disk=${uuid_gpt_disk};" \
"name=""boot"",size=16M,uuid=${uuid_gpt_boot};" \
"name=""Linux"",size=-M,uuid=${uuid_gpt_Linux}\0"
#endif
#endif
#if !defined(DFU_ALT_INFO)
# define DFU_ALT_INFO
#endif
#if !defined(PARTS_DEFAULT)
# define PARTS_DEFAULT
#endif
/* Do not preserve environment */
#if !defined(CONFIG_ENV_IS_IN_FAT)
#define CONFIG_ENV_IS_NOWHERE 1
#endif
#define CONFIG_ENV_SIZE 0x8000
/* Monitor Command Prompt */
/* Console I/O Buffer Size */
#define CONFIG_SYS_CBSIZE 2048
#define CONFIG_SYS_PBSIZE (CONFIG_SYS_CBSIZE + \
sizeof(CONFIG_SYS_PROMPT) + 16)
#define CONFIG_SYS_BARGSIZE CONFIG_SYS_CBSIZE
#define CONFIG_SYS_LONGHELP
#define CONFIG_CMDLINE_EDITING
#define CONFIG_SYS_MAXARGS 64
/* Ethernet driver */
#if defined(CONFIG_ZYNQ_GEM)
# define CONFIG_NET_MULTI
# define CONFIG_MII
# define CONFIG_SYS_FAULT_ECHO_LINK_DOWN
# define CONFIG_PHY_MARVELL
# define CONFIG_PHY_NATSEMI
# define CONFIG_PHY_TI
# define CONFIG_PHY_GIGE
# define CONFIG_PHY_VITESSE
# define CONFIG_PHY_REALTEK
# define PHY_ANEG_TIMEOUT 20000
#endif
/* I2C */
#if defined(CONFIG_SYS_I2C_ZYNQ)
# define CONFIG_SYS_I2C
# define CONFIG_SYS_I2C_ZYNQ_SPEED 100000
# define CONFIG_SYS_I2C_ZYNQ_SLAVE 0
#endif
/* EEPROM */
#ifdef CONFIG_ZYNQMP_EEPROM
# define CONFIG_SYS_I2C_EEPROM_ADDR_LEN 2
# define CONFIG_SYS_I2C_EEPROM_ADDR 0x54
# define CONFIG_SYS_EEPROM_PAGE_WRITE_BITS 4
# define CONFIG_SYS_EEPROM_PAGE_WRITE_DELAY_MS 5
# define CONFIG_SYS_EEPROM_SIZE (64 * 1024)
#endif
#ifdef CONFIG_SATA_CEVA
#define CONFIG_LIBATA
#define CONFIG_SCSI_AHCI
#define CONFIG_SYS_SCSI_MAX_SCSI_ID 2
#define CONFIG_SYS_SCSI_MAX_LUN 1
#define CONFIG_SYS_SCSI_MAX_DEVICE (CONFIG_SYS_SCSI_MAX_SCSI_ID * \
CONFIG_SYS_SCSI_MAX_LUN)
#define CONFIG_SCSI
#endif
#define CONFIG_SYS_BOOTM_LEN (60 * 1024 * 1024)
#define CONFIG_BOARD_EARLY_INIT_R
#define CONFIG_CLOCKS
#define ENV_MEM_LAYOUT_SETTINGS \
"fdt_high=10000000\0" \
"initrd_high=10000000\0" \
"fdt_addr_r=0x40000000\0" \
"pxefile_addr_r=0x10000000\0" \
"kernel_addr_r=0x18000000\0" \
"scriptaddr=0x02000000\0" \
"ramdisk_addr_r=0x02100000\0" \
#if defined(CONFIG_MMC_SDHCI_ZYNQ)
# define BOOT_TARGET_DEVICES_MMC(func) func(MMC, mmc, 0) func(MMC, mmc, 1)
#else
# define BOOT_TARGET_DEVICES_MMC(func)
#endif
#if defined(CONFIG_SATA_CEVA)
# define BOOT_TARGET_DEVICES_SCSI(func) func(SCSI, scsi, 0)
#else
# define BOOT_TARGET_DEVICES_SCSI(func)
#endif
#if defined(CONFIG_ZYNQMP_USB)
# define BOOT_TARGET_DEVICES_USB(func) func(USB, usb, 0) func(USB, usb, 1)
#else
# define BOOT_TARGET_DEVICES_USB(func)
#endif
#define BOOT_TARGET_DEVICES(func) \
BOOT_TARGET_DEVICES_MMC(func) \
BOOT_TARGET_DEVICES_USB(func) \
BOOT_TARGET_DEVICES_SCSI(func) \
func(PXE, pxe, na) \
func(DHCP, dhcp, na)
#include <config_distro_bootcmd.h>
/* Initial environment variables */
#ifndef CONFIG_EXTRA_ENV_SETTINGS
#define CONFIG_EXTRA_ENV_SETTINGS \
ENV_MEM_LAYOUT_SETTINGS \
BOOTENV \
DFU_ALT_INFO
#endif
/* SPL can't handle all huge variables - define just DFU */
#if defined(CONFIG_SPL_BUILD) && defined(CONFIG_SPL_DFU_SUPPORT)
#undef CONFIG_EXTRA_ENV_SETTINGS
# define CONFIG_EXTRA_ENV_SETTINGS \
"dfu_alt_info_ram=uboot.bin ram 0x8000000 0x1000000;" \
"atf-uboot.ub ram 0x10000000 0x1000000;" \
"Image ram 0x80000 0x3f80000;" \
"system.dtb ram 0x4000000 0x100000\0" \
"dfu_bufsiz=0x1000\0"
#endif
#define CONFIG_SPL_TEXT_BASE 0xfffc0000
#define CONFIG_SPL_STACK 0xfffffffc
#define CONFIG_SPL_MAX_SIZE 0x40000
/* Just random location in OCM */
#define CONFIG_SPL_BSS_START_ADDR 0x0
#define CONFIG_SPL_BSS_MAX_SIZE 0x80000
#define CONFIG_SPL_FRAMEWORK
/* u-boot is like dtb */
#define CONFIG_SPL_FS_LOAD_ARGS_NAME "u-boot.bin"
#define CONFIG_SYS_SPL_ARGS_ADDR 0x8000000
/* ATF is my kernel image */
#define CONFIG_SPL_FS_LOAD_KERNEL_NAME "atf-uboot.ub"
/* FIT load address for RAM boot */
#define CONFIG_SPL_LOAD_FIT_ADDRESS 0x10000000
/* MMC support */
#ifdef CONFIG_MMC_SDHCI_ZYNQ
# define CONFIG_SYS_MMCSD_FS_BOOT_PARTITION 1
# define CONFIG_SYS_MMCSD_RAW_MODE_ARGS_SECTOR 0 /* unused */
# define CONFIG_SYS_MMCSD_RAW_MODE_ARGS_SECTORS 0 /* unused */
# define CONFIG_SYS_MMCSD_RAW_MODE_KERNEL_SECTOR 0 /* unused */
# define CONFIG_SPL_FS_LOAD_PAYLOAD_NAME "u-boot.img"
#endif
#if defined(CONFIG_SPL_BUILD) && defined(CONFIG_SPL_DFU_SUPPORT)
# undef CONFIG_CMD_BOOTD
# define CONFIG_SPL_ENV_SUPPORT
# define CONFIG_SPL_HASH_SUPPORT
# define CONFIG_ENV_MAX_ENTRIES 10
# define CONFIG_SYS_SPL_MALLOC_START 0x20000000
# define CONFIG_SYS_SPL_MALLOC_SIZE 0x100000
#ifdef CONFIG_SPL_SYS_MALLOC_SIMPLE
# error "Disable CONFIG_SPL_SYS_MALLOC_SIMPLE. Full malloc needs to be used"
#endif
#endif
#define CONFIG_BOARD_EARLY_INIT_F
#endif /* __XILINX_ZYNQMP_H */
| {
"language": "C"
} |
/**
\file ADM_audioStreamConstantChunk
\brief Base class
(C) Mean 2008
GPL-v2
*/
#include "ADM_default.h"
#include "ADM_audioStreamConstantChunk.h"
#include "DIA_working.h"
#include "ADM_vidMisc.h"
/**
\fn ADM_audioStreamConstantChunk
\brief constructor
*/
ADM_audioStreamConstantChunk::ADM_audioStreamConstantChunk(WAVHeader *header,ADM_audioAccess *access)
: ADM_audioStream(header,access)
{
//
chunkSize=header->blockalign;
if(!chunkSize)
{
ADM_warning("[ADM_audioStreamConstantChunk] Blockalign is null expect problems\n");
chunkSize=8192; // dummy value
}
ADM_info("[ADM_audioStreamConstantChunk] Chunk size %" PRIu32"\n",chunkSize);
ADM_info("[ADM_audioStreamConstantChunk] Byterate %" PRIu32"\n",header->byterate);
// Compute sample per chunk from wavHeader...
float f;
f=chunkSize;
f/=header->byterate; // F is in seconds
f*=header->frequency; // in sample
samplesPerChunk=(uint32_t)f;
ADM_info("[ADM_audioStreamConstantChunk] About %" PRIu32" samples per chunk\n",samplesPerChunk);
//samplesPerChunk=16;
// If hinted..., compute the duration ourselves
if(access->isCBR()==true && access->canSeekOffset()==true)
{
// We can compute the duration from the length
float size=access->getLength();
size/=header->byterate; // Result is in second
size*=1000;
size*=1000; // s->us
durationInUs=(uint64_t)size;
ADM_info("Computed duration %s\n",ADM_us2plain(durationInUs));
return;
}
// Time based
durationInUs=access->getDurationInUs();
}
/**
\fn ADM_audioStream
\brief destructor
*/
ADM_audioStreamConstantChunk::~ADM_audioStreamConstantChunk()
{
}
/**
\fn getPacket
*/
uint8_t ADM_audioStreamConstantChunk::getPacket(uint8_t *buffer,uint32_t *size, uint32_t sizeMax,uint32_t *nbSample,uint64_t *dts)
{
*size=0;
*nbSample=0;
if(sizeMax>=chunkSize)
{
uint32_t mSize;
uint64_t mDts;
if(!access->getPacket(buffer,&mSize,sizeMax,&mDts))
{
ADM_warning("Cant get packet\n");
return 0;
}
ADM_info("Got packet : chunk=%d size=%d dts=%s\n",chunkSize,mSize,ADM_us2plain(mDts));
if(!*size)
*dts=mDts;
*size+=mSize;
*nbSample+=samplesPerChunk;
if(mSize!=chunkSize)
{
ADM_warning("Expected chunk of size =%d, got %d\n",chunkSize,mSize);
}
buffer+=mSize;
sizeMax-=mSize;
}
if(!*size) return 0;
return 1;
}
| {
"language": "C"
} |
/* w_gammal.c -- long double version of w_gamma.c.
* Conversion to long double by Ulrich Drepper,
* Cygnus Support, drepper@cygnus.com.
*/
/*
* ====================================================
* Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
*
* Developed at SunPro, a Sun Microsystems, Inc. business.
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
#if defined(LIBM_SCCS) && !defined(lint)
static char rcsid[] = "$NetBSD: $";
#endif
/* long double gammal(double x)
* Return the Gamma function of x.
*/
#include <math.h>
#include "math_private.h"
#ifdef __STDC__
long double __tgammal(long double x)
#else
long double __tgammal(x)
long double x;
#endif
{
long double y;
int local_signgam;
y = __ieee754_gammal_r(x,&local_signgam);
if (local_signgam < 0) y = -y;
#ifdef _IEEE_LIBM
return y;
#else
if(_LIB_VERSION == _IEEE_) return y;
if(!__finitel(y)&&__finitel(x)) {
if(x==0.0)
return __kernel_standard(x,x,250); /* tgamma pole */
else if(__floorl(x)==x&&x<0.0)
return __kernel_standard(x,x,241); /* tgamma domain */
else
return __kernel_standard(x,x,240); /* tgamma overflow */
}
return y;
#endif
}
weak_alias (__tgammal, tgammal)
| {
"language": "C"
} |
/* ______ ___ ___
* /\ _ \ /\_ \ /\_ \
* \ \ \L\ \\//\ \ \//\ \ __ __ _ __ ___
* \ \ __ \ \ \ \ \ \ \ /'__`\ /'_ `\/\`'__\/ __`\
* \ \ \/\ \ \_\ \_ \_\ \_/\ __//\ \L\ \ \ \//\ \L\ \
* \ \_\ \_\/\____\/\____\ \____\ \____ \ \_\\ \____/
* \/_/\/_/\/____/\/____/\/____/\/___L\ \/_/ \/___/
* /\____/
* \_/__/
*
* FLI/FLC routines.
*
* By Shawn Hargreaves.
*
* See readme.txt for copyright information.
*/
#ifndef ALLEGRO_FLI_H
#define ALLEGRO_FLI_H
#include "base.h"
#include "palette.h"
#ifdef __cplusplus
extern "C" {
#endif
struct BITMAP;
#define FLI_OK 0 /* FLI player return values */
#define FLI_EOF -1
#define FLI_ERROR -2
#define FLI_NOT_OPEN -3
AL_FUNC(int, play_fli, (AL_CONST char *filename, struct BITMAP *bmp, int loop, AL_METHOD(int, callback, (void))));
AL_FUNC(int, play_memory_fli, (void *fli_data, struct BITMAP *bmp, int loop, AL_METHOD(int, callback, (void))));
AL_FUNC(int, open_fli, (AL_CONST char *filename));
AL_FUNC(int, open_memory_fli, (void *fli_data));
AL_FUNC(void, close_fli, (void));
AL_FUNC(int, next_fli_frame, (int loop));
AL_FUNC(void, reset_fli_variables, (void));
AL_VAR(struct BITMAP *, fli_bitmap); /* current frame of the FLI */
AL_VAR(PALETTE, fli_palette); /* current FLI palette */
AL_VAR(int, fli_bmp_dirty_from); /* what part of fli_bitmap is dirty */
AL_VAR(int, fli_bmp_dirty_to);
AL_VAR(int, fli_pal_dirty_from); /* what part of fli_palette is dirty */
AL_VAR(int, fli_pal_dirty_to);
AL_VAR(int, fli_frame); /* current frame number */
AL_VAR(volatile int, fli_timer); /* for timing FLI playback */
#ifdef __cplusplus
}
#endif
#endif /* ifndef ALLEGRO_FLI_H */
| {
"language": "C"
} |
/*
+----------------------------------------------------------------------+
| PHP Version 7 |
+----------------------------------------------------------------------+
| Copyright (c) 1997-2017 The PHP Group |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Authors: Felipe Pena <felipe@php.net> |
| Authors: Joe Watkins <joe.watkins@live.co.uk> |
| Authors: Bob Weinand <bwoebi@php.net> |
+----------------------------------------------------------------------+
*/
#include "phpdbg.h"
#include "phpdbg_cmd.h"
#include "phpdbg_utils.h"
#include "phpdbg_set.h"
#include "phpdbg_prompt.h"
#include "phpdbg_io.h"
ZEND_EXTERN_MODULE_GLOBALS(phpdbg)
static inline const char *phpdbg_command_name(const phpdbg_command_t *command, char *buffer) {
size_t pos = 0;
if (command->parent) {
memcpy(&buffer[pos], command->parent->name, command->parent->name_len);
pos += command->parent->name_len;
memcpy(&buffer[pos], " ", sizeof(" ")-1);
pos += (sizeof(" ")-1);
}
memcpy(&buffer[pos], command->name, command->name_len);
pos += command->name_len;
buffer[pos] = 0;
return buffer;
}
PHPDBG_API const char *phpdbg_get_param_type(const phpdbg_param_t *param) /* {{{ */
{
switch (param->type) {
case STACK_PARAM:
return "stack";
case EMPTY_PARAM:
return "empty";
case ADDR_PARAM:
return "address";
case NUMERIC_PARAM:
return "numeric";
case METHOD_PARAM:
return "method";
case NUMERIC_FUNCTION_PARAM:
return "function opline";
case NUMERIC_METHOD_PARAM:
return "method opline";
case FILE_PARAM:
return "file or file opline";
case STR_PARAM:
return "string";
default: /* this is bad */
return "unknown";
}
}
PHPDBG_API void phpdbg_clear_param(phpdbg_param_t *param) /* {{{ */
{
if (param) {
switch (param->type) {
case FILE_PARAM:
efree(param->file.name);
break;
case METHOD_PARAM:
efree(param->method.class);
efree(param->method.name);
break;
case STR_PARAM:
efree(param->str);
break;
default:
break;
}
}
} /* }}} */
PHPDBG_API char* phpdbg_param_tostring(const phpdbg_param_t *param, char **pointer) /* {{{ */
{
switch (param->type) {
case STR_PARAM:
ZEND_IGNORE_VALUE(asprintf(pointer, "%s", param->str));
break;
case ADDR_PARAM:
ZEND_IGNORE_VALUE(asprintf(pointer, ZEND_ULONG_FMT, param->addr));
break;
case NUMERIC_PARAM:
ZEND_IGNORE_VALUE(asprintf(pointer, "%li", param->num));
break;
case METHOD_PARAM:
ZEND_IGNORE_VALUE(asprintf(pointer, "%s::%s", param->method.class, param->method.name));
break;
case FILE_PARAM:
if (param->num) {
ZEND_IGNORE_VALUE(asprintf(pointer, "%s:%lu#%lu", param->file.name, param->file.line, param->num));
} else {
ZEND_IGNORE_VALUE(asprintf(pointer, "%s:%lu", param->file.name, param->file.line));
}
break;
case NUMERIC_FUNCTION_PARAM:
ZEND_IGNORE_VALUE(asprintf(pointer, "%s#%lu", param->str, param->num));
break;
case NUMERIC_METHOD_PARAM:
ZEND_IGNORE_VALUE(asprintf(pointer, "%s::%s#%lu", param->method.class, param->method.name, param->num));
break;
default:
*pointer = strdup("unknown");
}
return *pointer;
} /* }}} */
PHPDBG_API void phpdbg_copy_param(const phpdbg_param_t* src, phpdbg_param_t* dest) /* {{{ */
{
switch ((dest->type = src->type)) {
case STACK_PARAM:
/* nope */
break;
case STR_PARAM:
dest->str = estrndup(src->str, src->len);
dest->len = src->len;
break;
case OP_PARAM:
dest->str = estrndup(src->str, src->len);
dest->len = src->len;
break;
case ADDR_PARAM:
dest->addr = src->addr;
break;
case NUMERIC_PARAM:
dest->num = src->num;
break;
case METHOD_PARAM:
dest->method.class = estrdup(src->method.class);
dest->method.name = estrdup(src->method.name);
break;
case NUMERIC_FILE_PARAM:
case FILE_PARAM:
dest->file.name = estrdup(src->file.name);
dest->file.line = src->file.line;
if (src->num)
dest->num = src->num;
break;
case NUMERIC_FUNCTION_PARAM:
dest->str = estrndup(src->str, src->len);
dest->num = src->num;
dest->len = src->len;
break;
case NUMERIC_METHOD_PARAM:
dest->method.class = estrdup(src->method.class);
dest->method.name = estrdup(src->method.name);
dest->num = src->num;
break;
case EMPTY_PARAM: { /* do nothing */ } break;
default: {
/* not yet */
}
}
} /* }}} */
PHPDBG_API zend_ulong phpdbg_hash_param(const phpdbg_param_t *param) /* {{{ */
{
zend_ulong hash = param->type;
switch (param->type) {
case STACK_PARAM:
/* nope */
break;
case STR_PARAM:
hash += zend_inline_hash_func(param->str, param->len);
break;
case METHOD_PARAM:
hash += zend_inline_hash_func(param->method.class, strlen(param->method.class));
hash += zend_inline_hash_func(param->method.name, strlen(param->method.name));
break;
case FILE_PARAM:
hash += zend_inline_hash_func(param->file.name, strlen(param->file.name));
hash += param->file.line;
if (param->num)
hash += param->num;
break;
case ADDR_PARAM:
hash += param->addr;
break;
case NUMERIC_PARAM:
hash += param->num;
break;
case NUMERIC_FUNCTION_PARAM:
hash += zend_inline_hash_func(param->str, param->len);
hash += param->num;
break;
case NUMERIC_METHOD_PARAM:
hash += zend_inline_hash_func(param->method.class, strlen(param->method.class));
hash += zend_inline_hash_func(param->method.name, strlen(param->method.name));
if (param->num)
hash+= param->num;
break;
case EMPTY_PARAM: { /* do nothing */ } break;
default: {
/* not yet */
}
}
return hash;
} /* }}} */
PHPDBG_API zend_bool phpdbg_match_param(const phpdbg_param_t *l, const phpdbg_param_t *r) /* {{{ */
{
if (l && r) {
if (l->type == r->type) {
switch (l->type) {
case STACK_PARAM:
/* nope, or yep */
return 1;
break;
case NUMERIC_FUNCTION_PARAM:
if (l->num != r->num) {
break;
}
/* break intentionally omitted */
case STR_PARAM:
return (l->len == r->len) &&
(memcmp(l->str, r->str, l->len) == SUCCESS);
case NUMERIC_PARAM:
return (l->num == r->num);
case ADDR_PARAM:
return (l->addr == r->addr);
case FILE_PARAM: {
if (l->file.line == r->file.line) {
size_t lengths[2] = {
strlen(l->file.name), strlen(r->file.name)};
if (lengths[0] == lengths[1]) {
if ((!l->num && !r->num) || (l->num == r->num)) {
return (memcmp(
l->file.name, r->file.name, lengths[0]) == SUCCESS);
}
}
}
} break;
case NUMERIC_METHOD_PARAM:
if (l->num != r->num) {
break;
}
/* break intentionally omitted */
case METHOD_PARAM: {
size_t lengths[2] = {
strlen(l->method.class), strlen(r->method.class)};
if (lengths[0] == lengths[1]) {
if (memcmp(l->method.class, r->method.class, lengths[0]) == SUCCESS) {
lengths[0] = strlen(l->method.name);
lengths[1] = strlen(r->method.name);
if (lengths[0] == lengths[1]) {
return (memcmp(
l->method.name, r->method.name, lengths[0]) == SUCCESS);
}
}
}
} break;
case EMPTY_PARAM:
return 1;
default: {
/* not yet */
}
}
}
}
return 0;
} /* }}} */
/* {{{ */
PHPDBG_API void phpdbg_param_debug(const phpdbg_param_t *param, const char *msg) {
if (param && param->type) {
switch (param->type) {
case STR_PARAM:
fprintf(stderr, "%s STR_PARAM(%s=%zu)\n", msg, param->str, param->len);
break;
case ADDR_PARAM:
fprintf(stderr, "%s ADDR_PARAM(" ZEND_ULONG_FMT ")\n", msg, param->addr);
break;
case NUMERIC_FILE_PARAM:
fprintf(stderr, "%s NUMERIC_FILE_PARAM(%s:#%lu)\n", msg, param->file.name, param->file.line);
break;
case FILE_PARAM:
fprintf(stderr, "%s FILE_PARAM(%s:%lu)\n", msg, param->file.name, param->file.line);
break;
case METHOD_PARAM:
fprintf(stderr, "%s METHOD_PARAM(%s::%s)\n", msg, param->method.class, param->method.name);
break;
case NUMERIC_METHOD_PARAM:
fprintf(stderr, "%s NUMERIC_METHOD_PARAM(%s::%s)\n", msg, param->method.class, param->method.name);
break;
case NUMERIC_FUNCTION_PARAM:
fprintf(stderr, "%s NUMERIC_FUNCTION_PARAM(%s::%ld)\n", msg, param->str, param->num);
break;
case NUMERIC_PARAM:
fprintf(stderr, "%s NUMERIC_PARAM(%ld)\n", msg, param->num);
break;
case COND_PARAM:
fprintf(stderr, "%s COND_PARAM(%s=%zu)\n", msg, param->str, param->len);
break;
case OP_PARAM:
fprintf(stderr, "%s OP_PARAM(%s=%zu)\n", msg, param->str, param->len);
break;
default: {
/* not yet */
}
}
}
} /* }}} */
/* {{{ */
PHPDBG_API void phpdbg_stack_free(phpdbg_param_t *stack) {
if (stack && stack->next) {
phpdbg_param_t *remove = stack->next;
while (remove) {
phpdbg_param_t *next = NULL;
if (remove->next)
next = remove->next;
switch (remove->type) {
case NUMERIC_METHOD_PARAM:
case METHOD_PARAM:
if (remove->method.class) {
efree(remove->method.class);
}
if (remove->method.name) {
efree(remove->method.name);
}
break;
case NUMERIC_FUNCTION_PARAM:
case STR_PARAM:
case OP_PARAM:
case EVAL_PARAM:
case SHELL_PARAM:
case COND_PARAM:
case RUN_PARAM:
if (remove->str) {
efree(remove->str);
}
break;
case NUMERIC_FILE_PARAM:
case FILE_PARAM:
if (remove->file.name) {
efree(remove->file.name);
}
break;
default: {
/* nothing */
}
}
free(remove);
remove = NULL;
if (next)
remove = next;
else break;
}
}
stack->next = NULL;
} /* }}} */
/* {{{ */
PHPDBG_API void phpdbg_stack_push(phpdbg_param_t *stack, phpdbg_param_t *param) {
phpdbg_param_t *next = calloc(1, sizeof(phpdbg_param_t));
if (!next) {
return;
}
*(next) = *(param);
next->next = NULL;
if (stack->top == NULL) {
stack->top = next;
next->top = NULL;
stack->next = next;
} else {
stack->top->next = next;
next->top = stack->top;
stack->top = next;
}
stack->len++;
} /* }}} */
/* {{{ */
PHPDBG_API void phpdbg_stack_separate(phpdbg_param_t *param) {
phpdbg_param_t *stack = calloc(1, sizeof(phpdbg_param_t));
stack->type = STACK_PARAM;
stack->next = param->next;
param->next = stack;
stack->top = param->top;
} /* }}} */
PHPDBG_API int phpdbg_stack_verify(const phpdbg_command_t *command, phpdbg_param_t **stack) {
if (command) {
char buffer[128] = {0,};
const phpdbg_param_t *top = (stack != NULL) ? *stack : NULL;
const char *arg = command->args;
size_t least = 0L,
received = 0L,
current = 0L;
zend_bool optional = 0;
/* check for arg spec */
if (!(arg) || !(*arg)) {
if (!top || top->type == STACK_PARAM) {
return SUCCESS;
}
phpdbg_error("command", "type=\"toomanyargs\" command=\"%s\" expected=\"0\"", "The command \"%s\" expected no arguments",
phpdbg_command_name(command, buffer));
return FAILURE;
}
least = 0L;
/* count least amount of arguments */
while (arg && *arg) {
if (arg[0] == '|') {
break;
}
least++;
arg++;
}
arg = command->args;
#define verify_arg(e, a, t) if (!(a)) { \
if (!optional) { \
phpdbg_error("command", "type=\"noarg\" command=\"%s\" expected=\"%s\" num=\"%lu\"", "The command \"%s\" expected %s and got nothing at parameter %lu", \
phpdbg_command_name(command, buffer), \
(e), \
current); \
return FAILURE;\
} \
} else if ((a)->type != (t)) { \
phpdbg_error("command", "type=\"wrongarg\" command=\"%s\" expected=\"%s\" got=\"%s\" num=\"%lu\"", "The command \"%s\" expected %s and got %s at parameter %lu", \
phpdbg_command_name(command, buffer), \
(e),\
phpdbg_get_param_type((a)), \
current); \
return FAILURE; \
}
while (arg && *arg) {
if (top && top->type == STACK_PARAM) {
break;
}
current++;
switch (*arg) {
case '|': {
current--;
optional = 1;
arg++;
} continue;
case 'i': verify_arg("raw input", top, STR_PARAM); break;
case 's': verify_arg("string", top, STR_PARAM); break;
case 'n': verify_arg("number", top, NUMERIC_PARAM); break;
case 'm': verify_arg("method", top, METHOD_PARAM); break;
case 'a': verify_arg("address", top, ADDR_PARAM); break;
case 'f': verify_arg("file:line", top, FILE_PARAM); break;
case 'c': verify_arg("condition", top, COND_PARAM); break;
case 'o': verify_arg("opcode", top, OP_PARAM); break;
case 'b': verify_arg("boolean", top, NUMERIC_PARAM); break;
case '*': { /* do nothing */ } break;
}
if (top) {
top = top->next;
} else {
break;
}
received++;
arg++;
}
#undef verify_arg
if ((received < least)) {
phpdbg_error("command", "type=\"toofewargs\" command=\"%s\" expected=\"%d\" argtypes=\"%s\" got=\"%d\"", "The command \"%s\" expected at least %lu arguments (%s) and received %lu",
phpdbg_command_name(command, buffer),
least,
command->args,
received);
return FAILURE;
}
}
return SUCCESS;
}
/* {{{ */
PHPDBG_API const phpdbg_command_t *phpdbg_stack_resolve(const phpdbg_command_t *commands, const phpdbg_command_t *parent, phpdbg_param_t **top) {
const phpdbg_command_t *command = commands;
phpdbg_param_t *name = *top;
const phpdbg_command_t *matched[3] = {NULL, NULL, NULL};
ulong matches = 0L;
while (command && command->name && command->handler) {
if (name->len == 1 || command->name_len >= name->len) {
/* match single letter alias */
if (command->alias && (name->len == 1)) {
if (command->alias == (*name->str)) {
matched[matches] = command;
matches++;
}
} else {
/* match full, case insensitive, command name */
if (strncasecmp(command->name, name->str, name->len) == SUCCESS) {
if (matches < 3) {
/* only allow abbreviating commands that can be aliased */
if ((name->len != command->name_len && command->alias) || name->len == command->name_len) {
matched[matches] = command;
matches++;
}
/* exact match */
if (name->len == command->name_len) {
break;
}
} else {
break;
}
}
}
}
command++;
}
switch (matches) {
case 0:
if (parent) {
phpdbg_error("command", "type=\"notfound\" command=\"%s\" subcommand=\"%s\"", "The command \"%s %s\" could not be found", parent->name, name->str);
} else {
phpdbg_error("command", "type=\"notfound\" command=\"%s\"", "The command \"%s\" could not be found", name->str);
}
return parent;
case 1:
(*top) = (*top)->next;
command = matched[0];
break;
default: {
char *list = NULL;
uint32_t it = 0;
size_t pos = 0;
while (it < matches) {
if (!list) {
list = emalloc(matched[it]->name_len + 1 + (it + 1 < matches ? sizeof(", ") - 1 : 0));
} else {
list = erealloc(list, (pos + matched[it]->name_len) + 1 + (it + 1 < matches ? sizeof(", ") - 1 : 0));
}
memcpy(&list[pos], matched[it]->name, matched[it]->name_len);
pos += matched[it]->name_len;
if ((it + 1) < matches) {
memcpy(&list[pos], ", ", sizeof(", ") - 1);
pos += (sizeof(", ") - 1);
}
list[pos] = 0;
it++;
}
/* ", " separated matches */
phpdbg_error("command", "type=\"ambiguous\" command=\"%s\" matches=\"%lu\" matched=\"%s\"", "The command \"%s\" is ambigious, matching %lu commands (%s)", name->str, matches, list);
efree(list);
return NULL;
}
}
if (command->subs && (*top) && ((*top)->type == STR_PARAM)) {
return phpdbg_stack_resolve(command->subs, command, top);
} else {
return command;
}
return NULL;
} /* }}} */
static int phpdbg_internal_stack_execute(phpdbg_param_t *stack, zend_bool allow_async_unsafe) {
const phpdbg_command_t *handler = NULL;
phpdbg_param_t *top = (phpdbg_param_t *) stack->next;
switch (top->type) {
case EVAL_PARAM:
phpdbg_activate_err_buf(0);
phpdbg_free_err_buf();
return PHPDBG_COMMAND_HANDLER(ev)(top);
case RUN_PARAM:
if (!allow_async_unsafe) {
phpdbg_error("signalsegv", "command=\"run\"", "run command is disallowed during hard interrupt");
}
phpdbg_activate_err_buf(0);
phpdbg_free_err_buf();
return PHPDBG_COMMAND_HANDLER(run)(top);
case SHELL_PARAM:
if (!allow_async_unsafe) {
phpdbg_error("signalsegv", "command=\"sh\"", "sh command is disallowed during hard interrupt");
return FAILURE;
}
phpdbg_activate_err_buf(0);
phpdbg_free_err_buf();
return PHPDBG_COMMAND_HANDLER(sh)(top);
case STR_PARAM: {
handler = phpdbg_stack_resolve(phpdbg_prompt_commands, NULL, &top);
if (handler) {
if (!allow_async_unsafe && !(handler->flags & PHPDBG_ASYNC_SAFE)) {
phpdbg_error("signalsegv", "command=\"%s\"", "%s command is disallowed during hard interrupt", handler->name);
return FAILURE;
}
if (phpdbg_stack_verify(handler, &top) == SUCCESS) {
phpdbg_activate_err_buf(0);
phpdbg_free_err_buf();
return handler->handler(top);
}
}
} return FAILURE;
default:
phpdbg_error("command", "type=\"invalidcommand\"", "The first parameter makes no sense !");
return FAILURE;
}
return SUCCESS;
} /* }}} */
/* {{{ */
PHPDBG_API int phpdbg_stack_execute(phpdbg_param_t *stack, zend_bool allow_async_unsafe) {
phpdbg_param_t *top = stack;
if (stack->type != STACK_PARAM) {
phpdbg_error("command", "type=\"nostack\"", "The passed argument was not a stack !");
return FAILURE;
}
if (!stack->len) {
phpdbg_error("command", "type=\"emptystack\"", "The stack contains nothing !");
return FAILURE;
}
do {
if (top->type == STACK_PARAM) {
int result;
if ((result = phpdbg_internal_stack_execute(top, allow_async_unsafe)) != SUCCESS) {
return result;
}
}
} while ((top = top->next));
return SUCCESS;
} /* }}} */
PHPDBG_API char *phpdbg_read_input(char *buffered) /* {{{ */
{
char buf[PHPDBG_MAX_CMD];
char *cmd = NULL;
char *buffer = NULL;
if ((PHPDBG_G(flags) & (PHPDBG_IS_STOPPING | PHPDBG_IS_RUNNING)) != PHPDBG_IS_STOPPING) {
if ((PHPDBG_G(flags) & PHPDBG_IS_REMOTE) && (buffered == NULL) && !phpdbg_active_sigsafe_mem()) {
fflush(PHPDBG_G(io)[PHPDBG_STDOUT].ptr);
}
if (buffered == NULL) {
#define USE_LIB_STAR (defined(HAVE_LIBREADLINE) || defined(HAVE_LIBEDIT))
/* note: EOF makes readline write prompt again in local console mode - and ignored if compiled without readline */
#if USE_LIB_STAR
if ((PHPDBG_G(flags) & PHPDBG_IS_REMOTE) || !isatty(PHPDBG_G(io)[PHPDBG_STDIN].fd))
#endif
{
phpdbg_write("prompt", "", "%s", phpdbg_get_prompt());
phpdbg_consume_stdin_line(cmd = buf);
}
#if USE_LIB_STAR
else {
cmd = readline(phpdbg_get_prompt());
PHPDBG_G(last_was_newline) = 1;
if (!cmd) {
PHPDBG_G(flags) |= PHPDBG_IS_QUITTING | PHPDBG_IS_DISCONNECTED;
zend_bailout();
}
add_history(cmd);
}
#endif
} else {
cmd = buffered;
}
buffer = estrdup(cmd);
#if USE_LIB_STAR
if (!buffered && cmd && !(PHPDBG_G(flags) & PHPDBG_IS_REMOTE) && isatty(PHPDBG_G(io)[PHPDBG_STDIN].fd)) {
free(cmd);
}
#endif
}
if (buffer && isspace(*buffer)) {
char *trimmed = buffer;
while (isspace(*trimmed))
trimmed++;
trimmed = estrdup(trimmed);
efree(buffer);
buffer = trimmed;
}
if (buffer && strlen(buffer)) {
if (PHPDBG_G(buffer)) {
free(PHPDBG_G(buffer));
}
PHPDBG_G(buffer) = strdup(buffer);
} else if (PHPDBG_G(buffer)) {
if (buffer) {
efree(buffer);
}
buffer = estrdup(PHPDBG_G(buffer));
}
return buffer;
} /* }}} */
PHPDBG_API void phpdbg_destroy_input(char **input) /*{{{ */
{
efree(*input);
} /* }}} */
PHPDBG_API int phpdbg_ask_user_permission(const char *question) {
if (!(PHPDBG_G(flags) & PHPDBG_WRITE_XML)) {
char buf[PHPDBG_MAX_CMD];
phpdbg_out("%s", question);
phpdbg_out(" (type y or n): ");
while (1) {
phpdbg_consume_stdin_line(buf);
if (buf[1] == '\n' && (buf[0] == 'y' || buf[0] == 'n')) {
if (buf[0] == 'y') {
return SUCCESS;
}
return FAILURE;
}
phpdbg_out("Please enter either y (yes) or n (no): ");
}
}
return SUCCESS;
}
| {
"language": "C"
} |
/*
* Copyright (c) 2014 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef __OPA_SMI_H_
#define __OPA_SMI_H_
#include <rdma/ib_smi.h>
#include <rdma/opa_smi.h>
#include "smi.h"
enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch,
int port_num, int phys_port_cnt);
int opa_smi_get_fwd_port(struct opa_smp *smp);
extern enum smi_forward_action opa_smi_check_forward_dr_smp(struct opa_smp *smp);
extern enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
bool is_switch, int port_num);
/*
* Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
* via process_mad
*/
static inline enum smi_action opa_smi_check_local_smp(struct opa_smp *smp,
struct ib_device *device)
{
/* C14-9:3 -- We're at the end of the DR segment of path */
/* C14-9:4 -- Hop Pointer = Hop Count + 1 -> give to SMA/SM */
return (device->process_mad &&
!opa_get_smp_direction(smp) &&
(smp->hop_ptr == smp->hop_cnt + 1)) ?
IB_SMI_HANDLE : IB_SMI_DISCARD;
}
/*
* Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
* via process_mad
*/
static inline enum smi_action opa_smi_check_local_returning_smp(struct opa_smp *smp,
struct ib_device *device)
{
/* C14-13:3 -- We're at the end of the DR segment of path */
/* C14-13:4 -- Hop Pointer == 0 -> give to SM */
return (device->process_mad &&
opa_get_smp_direction(smp) &&
!smp->hop_ptr) ? IB_SMI_HANDLE : IB_SMI_DISCARD;
}
#endif /* __OPA_SMI_H_ */
| {
"language": "C"
} |
/*
* Copyright (C) Igor Sysoev
* Copyright (C) Nginx, Inc.
*/
#include <ngx_config.h>
#include <ngx_core.h>
#include <ngx_http.h>
#include <nginx.h>
static ngx_int_t ngx_http_header_filter_init(ngx_conf_t *cf);
static ngx_int_t ngx_http_header_filter(ngx_http_request_t *r);
static ngx_http_module_t ngx_http_header_filter_module_ctx = {
NULL, /* preconfiguration */
ngx_http_header_filter_init, /* postconfiguration */
NULL, /* create main configuration */
NULL, /* init main configuration */
NULL, /* create server configuration */
NULL, /* merge server configuration */
NULL, /* create location configuration */
NULL, /* merge location configuration */
};
ngx_module_t ngx_http_header_filter_module = {
NGX_MODULE_V1,
&ngx_http_header_filter_module_ctx, /* module context */
NULL, /* module directives */
NGX_HTTP_MODULE, /* module type */
NULL, /* init master */
NULL, /* init module */
NULL, /* init process */
NULL, /* init thread */
NULL, /* exit thread */
NULL, /* exit process */
NULL, /* exit master */
NGX_MODULE_V1_PADDING
};
static char ngx_http_server_string[] = "Server: nginx" CRLF;
static char ngx_http_server_full_string[] = "Server: " NGINX_VER CRLF;
static ngx_str_t ngx_http_status_lines[] = {
ngx_string("200 OK"),
ngx_string("201 Created"),
ngx_string("202 Accepted"),
ngx_null_string, /* "203 Non-Authoritative Information" */
ngx_string("204 No Content"),
ngx_null_string, /* "205 Reset Content" */
ngx_string("206 Partial Content"),
/* ngx_null_string, */ /* "207 Multi-Status" */
#define NGX_HTTP_LAST_2XX 207
#define NGX_HTTP_OFF_3XX (NGX_HTTP_LAST_2XX - 200)
/* ngx_null_string, */ /* "300 Multiple Choices" */
ngx_string("301 Moved Permanently"),
ngx_string("302 Moved Temporarily"),
ngx_string("303 See Other"),
ngx_string("304 Not Modified"),
ngx_null_string, /* "305 Use Proxy" */
ngx_null_string, /* "306 unused" */
ngx_string("307 Temporary Redirect"),
#define NGX_HTTP_LAST_3XX 308
#define NGX_HTTP_OFF_4XX (NGX_HTTP_LAST_3XX - 301 + NGX_HTTP_OFF_3XX)
ngx_string("400 Bad Request"),
ngx_string("401 Unauthorized"),
ngx_string("402 Payment Required"),
ngx_string("403 Forbidden"),
ngx_string("404 Not Found"),
ngx_string("405 Not Allowed"),
ngx_string("406 Not Acceptable"),
ngx_null_string, /* "407 Proxy Authentication Required" */
ngx_string("408 Request Time-out"),
ngx_string("409 Conflict"),
ngx_string("410 Gone"),
ngx_string("411 Length Required"),
ngx_string("412 Precondition Failed"),
ngx_string("413 Request Entity Too Large"),
ngx_null_string, /* "414 Request-URI Too Large", but we never send it
* because we treat such requests as the HTTP/0.9
* requests and send only a body without a header
*/
ngx_string("415 Unsupported Media Type"),
ngx_string("416 Requested Range Not Satisfiable"),
/* ngx_null_string, */ /* "417 Expectation Failed" */
/* ngx_null_string, */ /* "418 unused" */
/* ngx_null_string, */ /* "419 unused" */
/* ngx_null_string, */ /* "420 unused" */
/* ngx_null_string, */ /* "421 unused" */
/* ngx_null_string, */ /* "422 Unprocessable Entity" */
/* ngx_null_string, */ /* "423 Locked" */
/* ngx_null_string, */ /* "424 Failed Dependency" */
#define NGX_HTTP_LAST_4XX 417
#define NGX_HTTP_OFF_5XX (NGX_HTTP_LAST_4XX - 400 + NGX_HTTP_OFF_4XX)
ngx_string("500 Internal Server Error"),
ngx_string("501 Method Not Implemented"),
ngx_string("502 Bad Gateway"),
ngx_string("503 Service Temporarily Unavailable"),
ngx_string("504 Gateway Time-out"),
ngx_null_string, /* "505 HTTP Version Not Supported" */
ngx_null_string, /* "506 Variant Also Negotiates" */
ngx_string("507 Insufficient Storage"),
/* ngx_null_string, */ /* "508 unused" */
/* ngx_null_string, */ /* "509 unused" */
/* ngx_null_string, */ /* "510 Not Extended" */
#define NGX_HTTP_LAST_5XX 508
};
ngx_http_header_out_t ngx_http_headers_out[] = {
{ ngx_string("Server"), offsetof(ngx_http_headers_out_t, server) },
{ ngx_string("Date"), offsetof(ngx_http_headers_out_t, date) },
{ ngx_string("Content-Length"),
offsetof(ngx_http_headers_out_t, content_length) },
{ ngx_string("Content-Encoding"),
offsetof(ngx_http_headers_out_t, content_encoding) },
{ ngx_string("Location"), offsetof(ngx_http_headers_out_t, location) },
{ ngx_string("Last-Modified"),
offsetof(ngx_http_headers_out_t, last_modified) },
{ ngx_string("Accept-Ranges"),
offsetof(ngx_http_headers_out_t, accept_ranges) },
{ ngx_string("Expires"), offsetof(ngx_http_headers_out_t, expires) },
{ ngx_string("Cache-Control"),
offsetof(ngx_http_headers_out_t, cache_control) },
{ ngx_string("ETag"), offsetof(ngx_http_headers_out_t, etag) },
{ ngx_null_string, 0 }
};
static ngx_int_t
ngx_http_header_filter(ngx_http_request_t *r)
{
u_char *p;
size_t len;
ngx_str_t host, *status_line;
ngx_buf_t *b;
ngx_uint_t status, i, port;
ngx_chain_t out;
ngx_list_part_t *part;
ngx_table_elt_t *header;
ngx_connection_t *c;
ngx_http_core_loc_conf_t *clcf;
ngx_http_core_srv_conf_t *cscf;
struct sockaddr_in *sin;
#if (NGX_HAVE_INET6)
struct sockaddr_in6 *sin6;
#endif
u_char addr[NGX_SOCKADDR_STRLEN];
if (r->header_sent) {
return NGX_OK;
}
r->header_sent = 1;
if (r != r->main) {
return NGX_OK;
}
if (r->http_version < NGX_HTTP_VERSION_10) {
return NGX_OK;
}
if (r->method == NGX_HTTP_HEAD) {
r->header_only = 1;
}
if (r->headers_out.last_modified_time != -1) {
if (r->headers_out.status != NGX_HTTP_OK
&& r->headers_out.status != NGX_HTTP_PARTIAL_CONTENT
&& r->headers_out.status != NGX_HTTP_NOT_MODIFIED)
{
r->headers_out.last_modified_time = -1;
r->headers_out.last_modified = NULL;
}
}
len = sizeof("HTTP/1.x ") - 1 + sizeof(CRLF) - 1
/* the end of the header */
+ sizeof(CRLF) - 1;
/* status line */
if (r->headers_out.status_line.len) {
len += r->headers_out.status_line.len;
status_line = &r->headers_out.status_line;
#if (NGX_SUPPRESS_WARN)
status = 0;
#endif
} else {
status = r->headers_out.status;
if (status >= NGX_HTTP_OK
&& status < NGX_HTTP_LAST_2XX)
{
/* 2XX */
if (status == NGX_HTTP_NO_CONTENT) {
r->header_only = 1;
ngx_str_null(&r->headers_out.content_type);
r->headers_out.last_modified_time = -1;
r->headers_out.last_modified = NULL;
r->headers_out.content_length = NULL;
r->headers_out.content_length_n = -1;
}
status -= NGX_HTTP_OK;
status_line = &ngx_http_status_lines[status];
len += ngx_http_status_lines[status].len;
} else if (status >= NGX_HTTP_MOVED_PERMANENTLY
&& status < NGX_HTTP_LAST_3XX)
{
/* 3XX */
if (status == NGX_HTTP_NOT_MODIFIED) {
r->header_only = 1;
}
status = status - NGX_HTTP_MOVED_PERMANENTLY + NGX_HTTP_OFF_3XX;
status_line = &ngx_http_status_lines[status];
len += ngx_http_status_lines[status].len;
} else if (status >= NGX_HTTP_BAD_REQUEST
&& status < NGX_HTTP_LAST_4XX)
{
/* 4XX */
status = status - NGX_HTTP_BAD_REQUEST
+ NGX_HTTP_OFF_4XX;
status_line = &ngx_http_status_lines[status];
len += ngx_http_status_lines[status].len;
} else if (status >= NGX_HTTP_INTERNAL_SERVER_ERROR
&& status < NGX_HTTP_LAST_5XX)
{
/* 5XX */
status = status - NGX_HTTP_INTERNAL_SERVER_ERROR
+ NGX_HTTP_OFF_5XX;
status_line = &ngx_http_status_lines[status];
len += ngx_http_status_lines[status].len;
} else {
len += NGX_INT_T_LEN;
status_line = NULL;
}
}
clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module);
if (r->headers_out.server == NULL) {
len += clcf->server_tokens ? sizeof(ngx_http_server_full_string) - 1:
sizeof(ngx_http_server_string) - 1;
}
if (r->headers_out.date == NULL) {
len += sizeof("Date: Mon, 28 Sep 1970 06:00:00 GMT" CRLF) - 1;
}
if (r->headers_out.content_type.len) {
len += sizeof("Content-Type: ") - 1
+ r->headers_out.content_type.len + 2;
if (r->headers_out.content_type_len == r->headers_out.content_type.len
&& r->headers_out.charset.len)
{
len += sizeof("; charset=") - 1 + r->headers_out.charset.len;
}
}
if (r->headers_out.content_length == NULL
&& r->headers_out.content_length_n >= 0)
{
len += sizeof("Content-Length: ") - 1 + NGX_OFF_T_LEN + 2;
}
if (r->headers_out.last_modified == NULL
&& r->headers_out.last_modified_time != -1)
{
len += sizeof("Last-Modified: Mon, 28 Sep 1970 06:00:00 GMT" CRLF) - 1;
}
c = r->connection;
if (r->headers_out.location
&& r->headers_out.location->value.len
&& r->headers_out.location->value.data[0] == '/')
{
r->headers_out.location->hash = 0;
if (clcf->server_name_in_redirect) {
cscf = ngx_http_get_module_srv_conf(r, ngx_http_core_module);
host = cscf->server_name;
} else if (r->headers_in.server.len) {
host = r->headers_in.server;
} else {
host.len = NGX_SOCKADDR_STRLEN;
host.data = addr;
if (ngx_connection_local_sockaddr(c, &host, 0) != NGX_OK) {
return NGX_ERROR;
}
}
switch (c->local_sockaddr->sa_family) {
#if (NGX_HAVE_INET6)
case AF_INET6:
sin6 = (struct sockaddr_in6 *) c->local_sockaddr;
port = ntohs(sin6->sin6_port);
break;
#endif
#if (NGX_HAVE_UNIX_DOMAIN)
case AF_UNIX:
port = 0;
break;
#endif
default: /* AF_INET */
sin = (struct sockaddr_in *) c->local_sockaddr;
port = ntohs(sin->sin_port);
break;
}
len += sizeof("Location: https://") - 1
+ host.len
+ r->headers_out.location->value.len + 2;
if (clcf->port_in_redirect) {
#if (NGX_HTTP_SSL)
if (c->ssl)
port = (port == 443) ? 0 : port;
else
#endif
port = (port == 80) ? 0 : port;
} else {
port = 0;
}
if (port) {
len += sizeof(":65535") - 1;
}
} else {
ngx_str_null(&host);
port = 0;
}
if (r->chunked) {
len += sizeof("Transfer-Encoding: chunked" CRLF) - 1;
}
if (r->keepalive) {
len += sizeof("Connection: keep-alive" CRLF) - 1;
/*
* MSIE and Opera ignore the "Keep-Alive: timeout=<N>" header.
* MSIE keeps the connection alive for about 60-65 seconds.
* Opera keeps the connection alive very long.
* Mozilla keeps the connection alive for N plus about 1-10 seconds.
* Konqueror keeps the connection alive for about N seconds.
*/
if (clcf->keepalive_header) {
len += sizeof("Keep-Alive: timeout=") - 1 + NGX_TIME_T_LEN + 2;
}
} else {
len += sizeof("Connection: closed" CRLF) - 1;
}
#if (NGX_HTTP_GZIP)
if (r->gzip_vary) {
if (clcf->gzip_vary) {
len += sizeof("Vary: Accept-Encoding" CRLF) - 1;
} else {
r->gzip_vary = 0;
}
}
#endif
part = &r->headers_out.headers.part;
header = part->elts;
for (i = 0; /* void */; i++) {
if (i >= part->nelts) {
if (part->next == NULL) {
break;
}
part = part->next;
header = part->elts;
i = 0;
}
if (header[i].hash == 0) {
continue;
}
len += header[i].key.len + sizeof(": ") - 1 + header[i].value.len
+ sizeof(CRLF) - 1;
}
b = ngx_create_temp_buf(r->pool, len);
if (b == NULL) {
return NGX_ERROR;
}
/* "HTTP/1.x " */
b->last = ngx_cpymem(b->last, "HTTP/1.1 ", sizeof("HTTP/1.x ") - 1);
/* status line */
if (status_line) {
b->last = ngx_copy(b->last, status_line->data, status_line->len);
} else {
b->last = ngx_sprintf(b->last, "%ui", status);
}
*b->last++ = CR; *b->last++ = LF;
if (r->headers_out.server == NULL) {
if (clcf->server_tokens) {
p = (u_char *) ngx_http_server_full_string;
len = sizeof(ngx_http_server_full_string) - 1;
} else {
p = (u_char *) ngx_http_server_string;
len = sizeof(ngx_http_server_string) - 1;
}
b->last = ngx_cpymem(b->last, p, len);
}
if (r->headers_out.date == NULL) {
b->last = ngx_cpymem(b->last, "Date: ", sizeof("Date: ") - 1);
b->last = ngx_cpymem(b->last, ngx_cached_http_time.data,
ngx_cached_http_time.len);
*b->last++ = CR; *b->last++ = LF;
}
if (r->headers_out.content_type.len) {
b->last = ngx_cpymem(b->last, "Content-Type: ",
sizeof("Content-Type: ") - 1);
p = b->last;
b->last = ngx_copy(b->last, r->headers_out.content_type.data,
r->headers_out.content_type.len);
if (r->headers_out.content_type_len == r->headers_out.content_type.len
&& r->headers_out.charset.len)
{
b->last = ngx_cpymem(b->last, "; charset=",
sizeof("; charset=") - 1);
b->last = ngx_copy(b->last, r->headers_out.charset.data,
r->headers_out.charset.len);
/* update r->headers_out.content_type for possible logging */
r->headers_out.content_type.len = b->last - p;
r->headers_out.content_type.data = p;
}
*b->last++ = CR; *b->last++ = LF;
}
if (r->headers_out.content_length == NULL
&& r->headers_out.content_length_n >= 0)
{
b->last = ngx_sprintf(b->last, "Content-Length: %O" CRLF,
r->headers_out.content_length_n);
}
if (r->headers_out.last_modified == NULL
&& r->headers_out.last_modified_time != -1)
{
b->last = ngx_cpymem(b->last, "Last-Modified: ",
sizeof("Last-Modified: ") - 1);
b->last = ngx_http_time(b->last, r->headers_out.last_modified_time);
*b->last++ = CR; *b->last++ = LF;
}
if (host.data) {
p = b->last + sizeof("Location: ") - 1;
b->last = ngx_cpymem(b->last, "Location: http",
sizeof("Location: http") - 1);
#if (NGX_HTTP_SSL)
if (c->ssl) {
*b->last++ ='s';
}
#endif
*b->last++ = ':'; *b->last++ = '/'; *b->last++ = '/';
b->last = ngx_copy(b->last, host.data, host.len);
if (port) {
b->last = ngx_sprintf(b->last, ":%ui", port);
}
b->last = ngx_copy(b->last, r->headers_out.location->value.data,
r->headers_out.location->value.len);
/* update r->headers_out.location->value for possible logging */
r->headers_out.location->value.len = b->last - p;
r->headers_out.location->value.data = p;
ngx_str_set(&r->headers_out.location->key, "Location");
*b->last++ = CR; *b->last++ = LF;
}
if (r->chunked) {
b->last = ngx_cpymem(b->last, "Transfer-Encoding: chunked" CRLF,
sizeof("Transfer-Encoding: chunked" CRLF) - 1);
}
if (r->keepalive) {
b->last = ngx_cpymem(b->last, "Connection: keep-alive" CRLF,
sizeof("Connection: keep-alive" CRLF) - 1);
if (clcf->keepalive_header) {
b->last = ngx_sprintf(b->last, "Keep-Alive: timeout=%T" CRLF,
clcf->keepalive_header);
}
} else {
b->last = ngx_cpymem(b->last, "Connection: close" CRLF,
sizeof("Connection: close" CRLF) - 1);
}
#if (NGX_HTTP_GZIP)
if (r->gzip_vary) {
b->last = ngx_cpymem(b->last, "Vary: Accept-Encoding" CRLF,
sizeof("Vary: Accept-Encoding" CRLF) - 1);
}
#endif
part = &r->headers_out.headers.part;
header = part->elts;
for (i = 0; /* void */; i++) {
if (i >= part->nelts) {
if (part->next == NULL) {
break;
}
part = part->next;
header = part->elts;
i = 0;
}
if (header[i].hash == 0) {
continue;
}
b->last = ngx_copy(b->last, header[i].key.data, header[i].key.len);
*b->last++ = ':'; *b->last++ = ' ';
b->last = ngx_copy(b->last, header[i].value.data, header[i].value.len);
*b->last++ = CR; *b->last++ = LF;
}
ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0,
"%*s", (size_t) (b->last - b->pos), b->pos);
/* the end of HTTP header */
*b->last++ = CR; *b->last++ = LF;
r->header_size = b->last - b->pos;
if (r->header_only) {
b->last_buf = 1;
}
out.buf = b;
out.next = NULL;
return ngx_http_write_filter(r, &out);
}
static ngx_int_t
ngx_http_header_filter_init(ngx_conf_t *cf)//postconfiguration阶段调用
{
ngx_http_top_header_filter = ngx_http_header_filter;//初始化链表头结点
return NGX_OK;
}
| {
"language": "C"
} |
/*
Copyright (C) 1996-1997 Id Software, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
$Id: sv_sys_unix.c,v 1.11 2006-12-30 11:24:54 disconn3ct Exp $
*/
#include <sys/types.h>
#include "qwsvdef.h"
#if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__)
#include <sys/stat.h>
#include <unistd.h>
#include <sys/time.h>
#include <errno.h>
#else
#include <sys/dir.h>
#endif
cvar_t sys_nostdout = {"sys_nostdout","0"};
cvar_t sys_extrasleep = {"sys_extrasleep","0"};
qbool stdin_ready;
int do_stdin = 1;
/*
===============================================================================
REQUIRED SYS FUNCTIONS
===============================================================================
*/
/*
============
Sys_mkdir
============
*/
void Sys_mkdir (const char *path)
{
if (mkdir (path, 0777) != -1)
return;
if (errno != EEXIST)
Sys_Error ("mkdir %s: %s",path, strerror(errno));
}
/*
================
Sys_DoubleTime
================
*/
double Sys_DoubleTime (void)
{
struct timeval tp;
struct timezone tzp;
static int secbase;
gettimeofday(&tp, &tzp);
if (!secbase)
{
secbase = tp.tv_sec;
return tp.tv_usec/1000000.0;
}
return (tp.tv_sec - secbase) + tp.tv_usec/1000000.0;
}
/*
================
Sys_Error
================
*/
void Sys_Error (char *error, ...)
{
va_list argptr;
char string[1024];
va_start (argptr ,error);
vsnprintf (string, sizeof(string), error, argptr);
va_end (argptr);
printf ("Fatal error: %s\n",string);
exit (1);
}
/*
================
Sys_Printf
================
*/
void Sys_Printf (char *fmt, ...)
{
va_list argptr;
static char text[2048];
unsigned char *p;
va_start (argptr, fmt);
vsnprintf (text, sizeof(text), fmt, argptr);
va_end (argptr);
if (sys_nostdout.value)
return;
for (p = (unsigned char *)text; *p; p++) {
*p &= 0x7f;
if ((*p > 128 || *p < 32) && *p != 10 && *p != 13 && *p != 9)
printf("[%02x]", *p);
else
putc(*p, stdout);
}
fflush(stdout);
}
/*
================
Sys_Quit
================
*/
void Sys_Quit (void)
{
exit (0); // appkit isn't running
}
/*
=============
Sys_Init
Quake calls this so the system can register variables before host_hunklevel
is marked
=============
*/
void Sys_Init (void)
{
Cvar_Register (&sys_nostdout);
Cvar_Register (&sys_extrasleep);
}
/*
=============
main
=============
*/
int main (int argc, char *argv[])
{
double time, oldtime, newtime;
Host_Init (argc, argv, 16*1024*1024);
//
// main loop
//
oldtime = Sys_DoubleTime () - 0.1;
while (1)
{
// select on the net socket and stdin
NET_Sleep (10);
// find time passed since last cycle
newtime = Sys_DoubleTime ();
time = newtime - oldtime;
oldtime = newtime;
Host_Frame (time);
// extrasleep is just a way to generate a fucked up connection on purpose
if (sys_extrasleep.value)
usleep (sys_extrasleep.value);
}
}
| {
"language": "C"
} |
/*
* Device access for Dialog DA9055 PMICs.
*
* Copyright(c) 2012 Dialog Semiconductor Ltd.
*
* Author: David Dajun Chen <dchen@diasemi.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/input.h>
#include <linux/irq.h>
#include <linux/mutex.h>
#include <linux/mfd/core.h>
#include <linux/mfd/da9055/core.h>
#include <linux/mfd/da9055/pdata.h>
#include <linux/mfd/da9055/reg.h>
#define DA9055_IRQ_NONKEY_MASK 0x01
#define DA9055_IRQ_ALM_MASK 0x02
#define DA9055_IRQ_TICK_MASK 0x04
#define DA9055_IRQ_ADC_MASK 0x08
#define DA9055_IRQ_BUCK_ILIM_MASK 0x08
static bool da9055_register_readable(struct device *dev, unsigned int reg)
{
switch (reg) {
case DA9055_REG_STATUS_A:
case DA9055_REG_STATUS_B:
case DA9055_REG_EVENT_A:
case DA9055_REG_EVENT_B:
case DA9055_REG_EVENT_C:
case DA9055_REG_IRQ_MASK_A:
case DA9055_REG_IRQ_MASK_B:
case DA9055_REG_IRQ_MASK_C:
case DA9055_REG_CONTROL_A:
case DA9055_REG_CONTROL_B:
case DA9055_REG_CONTROL_C:
case DA9055_REG_CONTROL_D:
case DA9055_REG_CONTROL_E:
case DA9055_REG_ADC_MAN:
case DA9055_REG_ADC_CONT:
case DA9055_REG_VSYS_MON:
case DA9055_REG_ADC_RES_L:
case DA9055_REG_ADC_RES_H:
case DA9055_REG_VSYS_RES:
case DA9055_REG_ADCIN1_RES:
case DA9055_REG_ADCIN2_RES:
case DA9055_REG_ADCIN3_RES:
case DA9055_REG_COUNT_S:
case DA9055_REG_COUNT_MI:
case DA9055_REG_COUNT_H:
case DA9055_REG_COUNT_D:
case DA9055_REG_COUNT_MO:
case DA9055_REG_COUNT_Y:
case DA9055_REG_ALARM_H:
case DA9055_REG_ALARM_D:
case DA9055_REG_ALARM_MI:
case DA9055_REG_ALARM_MO:
case DA9055_REG_ALARM_Y:
case DA9055_REG_GPIO0_1:
case DA9055_REG_GPIO2:
case DA9055_REG_GPIO_MODE0_2:
case DA9055_REG_BCORE_CONT:
case DA9055_REG_BMEM_CONT:
case DA9055_REG_LDO1_CONT:
case DA9055_REG_LDO2_CONT:
case DA9055_REG_LDO3_CONT:
case DA9055_REG_LDO4_CONT:
case DA9055_REG_LDO5_CONT:
case DA9055_REG_LDO6_CONT:
case DA9055_REG_BUCK_LIM:
case DA9055_REG_BCORE_MODE:
case DA9055_REG_VBCORE_A:
case DA9055_REG_VBMEM_A:
case DA9055_REG_VLDO1_A:
case DA9055_REG_VLDO2_A:
case DA9055_REG_VLDO3_A:
case DA9055_REG_VLDO4_A:
case DA9055_REG_VLDO5_A:
case DA9055_REG_VLDO6_A:
case DA9055_REG_VBCORE_B:
case DA9055_REG_VBMEM_B:
case DA9055_REG_VLDO1_B:
case DA9055_REG_VLDO2_B:
case DA9055_REG_VLDO3_B:
case DA9055_REG_VLDO4_B:
case DA9055_REG_VLDO5_B:
case DA9055_REG_VLDO6_B:
return true;
default:
return false;
}
}
static bool da9055_register_writeable(struct device *dev, unsigned int reg)
{
switch (reg) {
case DA9055_REG_STATUS_A:
case DA9055_REG_STATUS_B:
case DA9055_REG_EVENT_A:
case DA9055_REG_EVENT_B:
case DA9055_REG_EVENT_C:
case DA9055_REG_IRQ_MASK_A:
case DA9055_REG_IRQ_MASK_B:
case DA9055_REG_IRQ_MASK_C:
case DA9055_REG_CONTROL_A:
case DA9055_REG_CONTROL_B:
case DA9055_REG_CONTROL_C:
case DA9055_REG_CONTROL_D:
case DA9055_REG_CONTROL_E:
case DA9055_REG_ADC_MAN:
case DA9055_REG_ADC_CONT:
case DA9055_REG_VSYS_MON:
case DA9055_REG_ADC_RES_L:
case DA9055_REG_ADC_RES_H:
case DA9055_REG_VSYS_RES:
case DA9055_REG_ADCIN1_RES:
case DA9055_REG_ADCIN2_RES:
case DA9055_REG_ADCIN3_RES:
case DA9055_REG_COUNT_S:
case DA9055_REG_COUNT_MI:
case DA9055_REG_COUNT_H:
case DA9055_REG_COUNT_D:
case DA9055_REG_COUNT_MO:
case DA9055_REG_COUNT_Y:
case DA9055_REG_ALARM_H:
case DA9055_REG_ALARM_D:
case DA9055_REG_ALARM_MI:
case DA9055_REG_ALARM_MO:
case DA9055_REG_ALARM_Y:
case DA9055_REG_GPIO0_1:
case DA9055_REG_GPIO2:
case DA9055_REG_GPIO_MODE0_2:
case DA9055_REG_BCORE_CONT:
case DA9055_REG_BMEM_CONT:
case DA9055_REG_LDO1_CONT:
case DA9055_REG_LDO2_CONT:
case DA9055_REG_LDO3_CONT:
case DA9055_REG_LDO4_CONT:
case DA9055_REG_LDO5_CONT:
case DA9055_REG_LDO6_CONT:
case DA9055_REG_BUCK_LIM:
case DA9055_REG_BCORE_MODE:
case DA9055_REG_VBCORE_A:
case DA9055_REG_VBMEM_A:
case DA9055_REG_VLDO1_A:
case DA9055_REG_VLDO2_A:
case DA9055_REG_VLDO3_A:
case DA9055_REG_VLDO4_A:
case DA9055_REG_VLDO5_A:
case DA9055_REG_VLDO6_A:
case DA9055_REG_VBCORE_B:
case DA9055_REG_VBMEM_B:
case DA9055_REG_VLDO1_B:
case DA9055_REG_VLDO2_B:
case DA9055_REG_VLDO3_B:
case DA9055_REG_VLDO4_B:
case DA9055_REG_VLDO5_B:
case DA9055_REG_VLDO6_B:
return true;
default:
return false;
}
}
static bool da9055_register_volatile(struct device *dev, unsigned int reg)
{
switch (reg) {
case DA9055_REG_STATUS_A:
case DA9055_REG_STATUS_B:
case DA9055_REG_EVENT_A:
case DA9055_REG_EVENT_B:
case DA9055_REG_EVENT_C:
case DA9055_REG_CONTROL_A:
case DA9055_REG_CONTROL_E:
case DA9055_REG_ADC_MAN:
case DA9055_REG_ADC_RES_L:
case DA9055_REG_ADC_RES_H:
case DA9055_REG_VSYS_RES:
case DA9055_REG_ADCIN1_RES:
case DA9055_REG_ADCIN2_RES:
case DA9055_REG_ADCIN3_RES:
case DA9055_REG_COUNT_S:
case DA9055_REG_COUNT_MI:
case DA9055_REG_COUNT_H:
case DA9055_REG_COUNT_D:
case DA9055_REG_COUNT_MO:
case DA9055_REG_COUNT_Y:
case DA9055_REG_ALARM_MI:
case DA9055_REG_BCORE_CONT:
case DA9055_REG_BMEM_CONT:
case DA9055_REG_LDO1_CONT:
case DA9055_REG_LDO2_CONT:
case DA9055_REG_LDO3_CONT:
case DA9055_REG_LDO4_CONT:
case DA9055_REG_LDO5_CONT:
case DA9055_REG_LDO6_CONT:
return true;
default:
return false;
}
}
static const struct regmap_irq da9055_irqs[] = {
[DA9055_IRQ_NONKEY] = {
.reg_offset = 0,
.mask = DA9055_IRQ_NONKEY_MASK,
},
[DA9055_IRQ_ALARM] = {
.reg_offset = 0,
.mask = DA9055_IRQ_ALM_MASK,
},
[DA9055_IRQ_TICK] = {
.reg_offset = 0,
.mask = DA9055_IRQ_TICK_MASK,
},
[DA9055_IRQ_HWMON] = {
.reg_offset = 0,
.mask = DA9055_IRQ_ADC_MASK,
},
[DA9055_IRQ_REGULATOR] = {
.reg_offset = 1,
.mask = DA9055_IRQ_BUCK_ILIM_MASK,
},
};
const struct regmap_config da9055_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.cache_type = REGCACHE_RBTREE,
.max_register = DA9055_MAX_REGISTER_CNT,
.readable_reg = da9055_register_readable,
.writeable_reg = da9055_register_writeable,
.volatile_reg = da9055_register_volatile,
};
EXPORT_SYMBOL_GPL(da9055_regmap_config);
static struct resource da9055_onkey_resource = {
.name = "ONKEY",
.start = DA9055_IRQ_NONKEY,
.end = DA9055_IRQ_NONKEY,
.flags = IORESOURCE_IRQ,
};
static struct resource da9055_rtc_resource[] = {
{
.name = "ALM",
.start = DA9055_IRQ_ALARM,
.end = DA9055_IRQ_ALARM,
.flags = IORESOURCE_IRQ,
},
{
.name = "TICK",
.start = DA9055_IRQ_TICK,
.end = DA9055_IRQ_TICK,
.flags = IORESOURCE_IRQ,
},
};
static struct resource da9055_hwmon_resource = {
.name = "HWMON",
.start = DA9055_IRQ_HWMON,
.end = DA9055_IRQ_HWMON,
.flags = IORESOURCE_IRQ,
};
static struct resource da9055_ld05_6_resource = {
.name = "REGULATOR",
.start = DA9055_IRQ_REGULATOR,
.end = DA9055_IRQ_REGULATOR,
.flags = IORESOURCE_IRQ,
};
static const struct mfd_cell da9055_devs[] = {
{
.of_compatible = "dlg,da9055-gpio",
.name = "da9055-gpio",
},
{
.of_compatible = "dlg,da9055-regulator",
.name = "da9055-regulator",
.id = 1,
},
{
.of_compatible = "dlg,da9055-regulator",
.name = "da9055-regulator",
.id = 2,
},
{
.of_compatible = "dlg,da9055-regulator",
.name = "da9055-regulator",
.id = 3,
},
{
.of_compatible = "dlg,da9055-regulator",
.name = "da9055-regulator",
.id = 4,
},
{
.of_compatible = "dlg,da9055-regulator",
.name = "da9055-regulator",
.id = 5,
},
{
.of_compatible = "dlg,da9055-regulator",
.name = "da9055-regulator",
.id = 6,
},
{
.of_compatible = "dlg,da9055-regulator",
.name = "da9055-regulator",
.id = 7,
.resources = &da9055_ld05_6_resource,
.num_resources = 1,
},
{
.of_compatible = "dlg,da9055-regulator",
.name = "da9055-regulator",
.resources = &da9055_ld05_6_resource,
.num_resources = 1,
.id = 8,
},
{
.of_compatible = "dlg,da9055-onkey",
.name = "da9055-onkey",
.resources = &da9055_onkey_resource,
.num_resources = 1,
},
{
.of_compatible = "dlg,da9055-rtc",
.name = "da9055-rtc",
.resources = da9055_rtc_resource,
.num_resources = ARRAY_SIZE(da9055_rtc_resource),
},
{
.of_compatible = "dlg,da9055-hwmon",
.name = "da9055-hwmon",
.resources = &da9055_hwmon_resource,
.num_resources = 1,
},
{
.of_compatible = "dlg,da9055-watchdog",
.name = "da9055-watchdog",
},
};
static const struct regmap_irq_chip da9055_regmap_irq_chip = {
.name = "da9055_irq",
.status_base = DA9055_REG_EVENT_A,
.mask_base = DA9055_REG_IRQ_MASK_A,
.ack_base = DA9055_REG_EVENT_A,
.num_regs = 3,
.irqs = da9055_irqs,
.num_irqs = ARRAY_SIZE(da9055_irqs),
};
int da9055_device_init(struct da9055 *da9055)
{
struct da9055_pdata *pdata = dev_get_platdata(da9055->dev);
int ret;
uint8_t clear_events[3] = {0xFF, 0xFF, 0xFF};
if (pdata && pdata->init != NULL)
pdata->init(da9055);
if (!pdata || !pdata->irq_base)
da9055->irq_base = -1;
else
da9055->irq_base = pdata->irq_base;
ret = da9055_group_write(da9055, DA9055_REG_EVENT_A, 3, clear_events);
if (ret < 0)
return ret;
ret = regmap_add_irq_chip(da9055->regmap, da9055->chip_irq,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
da9055->irq_base, &da9055_regmap_irq_chip,
&da9055->irq_data);
if (ret < 0)
return ret;
da9055->irq_base = regmap_irq_chip_get_base(da9055->irq_data);
ret = mfd_add_devices(da9055->dev, -1,
da9055_devs, ARRAY_SIZE(da9055_devs),
NULL, da9055->irq_base, NULL);
if (ret)
goto err;
return 0;
err:
mfd_remove_devices(da9055->dev);
return ret;
}
void da9055_device_exit(struct da9055 *da9055)
{
regmap_del_irq_chip(da9055->chip_irq, da9055->irq_data);
mfd_remove_devices(da9055->dev);
}
MODULE_DESCRIPTION("Core support for the DA9055 PMIC");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
| {
"language": "C"
} |
/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/err.h>
#include <linux/module.h>
#include <linux/bitops.h>
#include <sound/control.h>
#include <sound/q6adm-v2.h>
#include "msm-ds2-dap-config.h"
#include "msm-pcm-routing-v2.h"
#include <sound/q6core.h>
#ifdef CONFIG_DOLBY_DS2
/* ramp up/down for 30ms */
#define DOLBY_SOFT_VOLUME_PERIOD 40
/* Step value 0ms or 0us */
#define DOLBY_SOFT_VOLUME_STEP 1000
#define DOLBY_ADDITIONAL_RAMP_WAIT 10
#define SOFT_VOLUME_PARAM_SIZE 3
#define PARAM_PAYLOAD_SIZE 3
enum {
DOLBY_SOFT_VOLUME_CURVE_LINEAR = 0,
DOLBY_SOFT_VOLUME_CURVE_EXP,
DOLBY_SOFT_VOLUME_CURVE_LOG,
};
#define VOLUME_ZERO_GAIN 0x0
#define VOLUME_UNITY_GAIN 0x2000
/* Wait time for module enable/disble */
#define DOLBY_MODULE_ENABLE_PERIOD 50
/* DOLBY device definitions end */
enum {
DOLBY_OFF_CACHE = 0,
DOLBY_SPEAKER_CACHE,
DOLBY_HEADPHONE_CACHE,
DOLBY_HDMI_CACHE,
DOLBY_WFD_CACHE,
DOLBY_FM_CACHE,
DOLBY_MAX_CACHE,
};
enum {
DAP_SOFT_BYPASS = 0,
DAP_HARD_BYPASS,
};
enum {
MODULE_DISABLE = 0,
MODULE_ENABLE,
};
/* dolby param ids to/from dsp */
static uint32_t ds2_dap_params_id[MAX_DS2_PARAMS] = {
DOLBY_PARAM_ID_VDHE, DOLBY_PARAM_ID_VSPE, DOLBY_PARAM_ID_DSSF,
DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLE,
DOLBY_PARAM_ID_DVMC, DOLBY_PARAM_ID_DVME, DOLBY_PARAM_ID_IENB,
DOLBY_PARAM_ID_IEBF, DOLBY_PARAM_ID_IEON, DOLBY_PARAM_ID_DEON,
DOLBY_PARAM_ID_NGON, DOLBY_PARAM_ID_GEON, DOLBY_PARAM_ID_GENB,
DOLBY_PARAM_ID_GEBF, DOLBY_PARAM_ID_AONB, DOLBY_PARAM_ID_AOBF,
DOLBY_PARAM_ID_AOBG, DOLBY_PARAM_ID_AOON, DOLBY_PARAM_ID_ARNB,
DOLBY_PARAM_ID_ARBF, DOLBY_PARAM_ID_PLB, DOLBY_PARAM_ID_PLMD,
DOLBY_PARAM_ID_DHSB, DOLBY_PARAM_ID_DHRG, DOLBY_PARAM_ID_DSSB,
DOLBY_PARAM_ID_DSSA, DOLBY_PARAM_ID_DVLA, DOLBY_PARAM_ID_IEBT,
DOLBY_PARAM_ID_IEA, DOLBY_PARAM_ID_DEA, DOLBY_PARAM_ID_DED,
DOLBY_PARAM_ID_GEBG, DOLBY_PARAM_ID_AOCC, DOLBY_PARAM_ID_ARBI,
DOLBY_PARAM_ID_ARBL, DOLBY_PARAM_ID_ARBH, DOLBY_PARAM_ID_AROD,
DOLBY_PARAM_ID_ARTP, DOLBY_PARAM_ID_VMON, DOLBY_PARAM_ID_VMB,
DOLBY_PARAM_ID_VCNB, DOLBY_PARAM_ID_VCBF, DOLBY_PARAM_ID_PREG,
DOLBY_PARAM_ID_VEN, DOLBY_PARAM_ID_PSTG, DOLBY_PARAM_ID_INIT_ENDP,
};
/* modifed state: 0x00000000 - Not updated
* > 0x00000000 && < 0x00010000
* Updated and not commited to DSP
* 0x00010001 - Updated and commited to DSP
* > 0x00010001 - Modified the commited value
*/
/* param offset */
static uint32_t ds2_dap_params_offset[MAX_DS2_PARAMS] = {
DOLBY_PARAM_VDHE_OFFSET, DOLBY_PARAM_VSPE_OFFSET,
DOLBY_PARAM_DSSF_OFFSET, DOLBY_PARAM_DVLI_OFFSET,
DOLBY_PARAM_DVLO_OFFSET, DOLBY_PARAM_DVLE_OFFSET,
DOLBY_PARAM_DVMC_OFFSET, DOLBY_PARAM_DVME_OFFSET,
DOLBY_PARAM_IENB_OFFSET, DOLBY_PARAM_IEBF_OFFSET,
DOLBY_PARAM_IEON_OFFSET, DOLBY_PARAM_DEON_OFFSET,
DOLBY_PARAM_NGON_OFFSET, DOLBY_PARAM_GEON_OFFSET,
DOLBY_PARAM_GENB_OFFSET, DOLBY_PARAM_GEBF_OFFSET,
DOLBY_PARAM_AONB_OFFSET, DOLBY_PARAM_AOBF_OFFSET,
DOLBY_PARAM_AOBG_OFFSET, DOLBY_PARAM_AOON_OFFSET,
DOLBY_PARAM_ARNB_OFFSET, DOLBY_PARAM_ARBF_OFFSET,
DOLBY_PARAM_PLB_OFFSET, DOLBY_PARAM_PLMD_OFFSET,
DOLBY_PARAM_DHSB_OFFSET, DOLBY_PARAM_DHRG_OFFSET,
DOLBY_PARAM_DSSB_OFFSET, DOLBY_PARAM_DSSA_OFFSET,
DOLBY_PARAM_DVLA_OFFSET, DOLBY_PARAM_IEBT_OFFSET,
DOLBY_PARAM_IEA_OFFSET, DOLBY_PARAM_DEA_OFFSET,
DOLBY_PARAM_DED_OFFSET, DOLBY_PARAM_GEBG_OFFSET,
DOLBY_PARAM_AOCC_OFFSET, DOLBY_PARAM_ARBI_OFFSET,
DOLBY_PARAM_ARBL_OFFSET, DOLBY_PARAM_ARBH_OFFSET,
DOLBY_PARAM_AROD_OFFSET, DOLBY_PARAM_ARTP_OFFSET,
DOLBY_PARAM_VMON_OFFSET, DOLBY_PARAM_VMB_OFFSET,
DOLBY_PARAM_VCNB_OFFSET, DOLBY_PARAM_VCBF_OFFSET,
DOLBY_PARAM_PREG_OFFSET, DOLBY_PARAM_VEN_OFFSET,
DOLBY_PARAM_PSTG_OFFSET, DOLBY_PARAM_INT_ENDP_OFFSET,
};
/* param_length */
static uint32_t ds2_dap_params_length[MAX_DS2_PARAMS] = {
DOLBY_PARAM_VDHE_LENGTH, DOLBY_PARAM_VSPE_LENGTH,
DOLBY_PARAM_DSSF_LENGTH, DOLBY_PARAM_DVLI_LENGTH,
DOLBY_PARAM_DVLO_LENGTH, DOLBY_PARAM_DVLE_LENGTH,
DOLBY_PARAM_DVMC_LENGTH, DOLBY_PARAM_DVME_LENGTH,
DOLBY_PARAM_IENB_LENGTH, DOLBY_PARAM_IEBF_LENGTH,
DOLBY_PARAM_IEON_LENGTH, DOLBY_PARAM_DEON_LENGTH,
DOLBY_PARAM_NGON_LENGTH, DOLBY_PARAM_GEON_LENGTH,
DOLBY_PARAM_GENB_LENGTH, DOLBY_PARAM_GEBF_LENGTH,
DOLBY_PARAM_AONB_LENGTH, DOLBY_PARAM_AOBF_LENGTH,
DOLBY_PARAM_AOBG_LENGTH, DOLBY_PARAM_AOON_LENGTH,
DOLBY_PARAM_ARNB_LENGTH, DOLBY_PARAM_ARBF_LENGTH,
DOLBY_PARAM_PLB_LENGTH, DOLBY_PARAM_PLMD_LENGTH,
DOLBY_PARAM_DHSB_LENGTH, DOLBY_PARAM_DHRG_LENGTH,
DOLBY_PARAM_DSSB_LENGTH, DOLBY_PARAM_DSSA_LENGTH,
DOLBY_PARAM_DVLA_LENGTH, DOLBY_PARAM_IEBT_LENGTH,
DOLBY_PARAM_IEA_LENGTH, DOLBY_PARAM_DEA_LENGTH,
DOLBY_PARAM_DED_LENGTH, DOLBY_PARAM_GEBG_LENGTH,
DOLBY_PARAM_AOCC_LENGTH, DOLBY_PARAM_ARBI_LENGTH,
DOLBY_PARAM_ARBL_LENGTH, DOLBY_PARAM_ARBH_LENGTH,
DOLBY_PARAM_AROD_LENGTH, DOLBY_PARAM_ARTP_LENGTH,
DOLBY_PARAM_VMON_LENGTH, DOLBY_PARAM_VMB_LENGTH,
DOLBY_PARAM_VCNB_LENGTH, DOLBY_PARAM_VCBF_LENGTH,
DOLBY_PARAM_PREG_LENGTH, DOLBY_PARAM_VEN_LENGTH,
DOLBY_PARAM_PSTG_LENGTH, DOLBY_PARAM_INT_ENDP_LENGTH,
};
struct ds2_dap_params_s {
int32_t params_val[TOTAL_LENGTH_DS2_PARAM];
int32_t dap_params_modified[MAX_DS2_PARAMS];
};
struct audio_rx_cal_data {
char aud_proc_data[AUD_PROC_BLOCK_SIZE];
int32_t aud_proc_size;
char aud_vol_data[AUD_VOL_BLOCK_SIZE];
int32_t aud_vol_size;
};
static struct ds2_dap_params_s ds2_dap_params[DOLBY_MAX_CACHE];
struct ds2_device_mapping {
int32_t device_id; /* audio_out_... */
int port_id; /* afe port. constant for a target variant. routing-v2*/
/*Only one Dolby COPP for a specific port*/
int copp_idx; /* idx for the copp port on which ds2 is active */
int cache_dev; /* idx to a shared parameter array dependent on device*/
uint32_t stream_ref_count;
bool active;
void *cal_data;
};
static struct ds2_device_mapping dev_map[DS2_DEVICES_ALL];
struct ds2_dap_params_states_s {
bool use_cache;
bool dap_bypass;
bool dap_bypass_type;
bool node_opened;
int32_t device;
bool custom_stereo_onoff;
};
static struct ds2_dap_params_states_s ds2_dap_params_states = {true, false,
false, DEVICE_NONE};
static int all_supported_devices = EARPIECE|SPEAKER|WIRED_HEADSET|
WIRED_HEADPHONE|BLUETOOTH_SCO|AUX_DIGITAL|
ANLG_DOCK_HEADSET|DGTL_DOCK_HEADSET|
REMOTE_SUBMIX|ANC_HEADSET|ANC_HEADPHONE|
PROXY|FM|FM_TX|DEVICE_NONE|
BLUETOOTH_SCO_HEADSET|BLUETOOTH_SCO_CARKIT;
static void msm_ds2_dap_check_and_update_ramp_wait(int port_id, int copp_idx,
int *ramp_wait)
{
int32_t *update_params_value = NULL;
uint32_t params_length = SOFT_VOLUME_PARAM_SIZE * sizeof(uint32_t);
uint32_t param_payload_len = PARAM_PAYLOAD_SIZE * sizeof(uint32_t);
int rc = 0;
update_params_value = kzalloc(params_length, GFP_KERNEL);
if (!update_params_value) {
pr_err("%s: params memory alloc failed\n", __func__);
goto end;
}
rc = adm_get_params(port_id, copp_idx,
AUDPROC_MODULE_ID_VOL_CTRL,
AUDPROC_PARAM_ID_SOFT_VOL_STEPPING_PARAMETERS,
params_length + param_payload_len,
(char *) update_params_value);
if (rc == 0) {
pr_debug("%s: params_value [0x%x, 0x%x, 0x%x]\n",
__func__, update_params_value[0],
update_params_value[1],
update_params_value[2]);
*ramp_wait = update_params_value[0];
}
end:
kfree(update_params_value);
/*
* No error returned as we do not need to error out from dap on/dap
* bypass. The default ramp parameter will be used to wait during
* ramp down.
*/
return;
}
static int msm_ds2_dap_set_vspe_vdhe(int dev_map_idx,
bool is_custom_stereo_enabled)
{
int32_t *update_params_value = NULL;
int32_t *param_val = NULL;
int idx, i, j, rc = 0, cdev;
uint32_t params_length = (TOTAL_LENGTH_DOLBY_PARAM +
2 * DOLBY_PARAM_PAYLOAD_SIZE) *
sizeof(uint32_t);
if (dev_map_idx < 0 || dev_map_idx >= DS2_DEVICES_ALL) {
pr_err("%s: invalid dev map index %d\n", __func__, dev_map_idx);
rc = -EINVAL;
goto end;
}
if (dev_map[dev_map_idx].port_id == DOLBY_INVALID_PORT_ID) {
pr_err("%s: Invalid port id\n", __func__);
rc = -EINVAL;
goto end;
}
if ((dev_map[dev_map_idx].copp_idx < 0) ||
(dev_map[dev_map_idx].copp_idx >= MAX_COPPS_PER_PORT)) {
pr_err("%s: Invalid copp_idx\n", __func__);
rc = -EINVAL;
goto end;
}
if ((dev_map[dev_map_idx].port_id != SLIMBUS_0_RX) &&
(dev_map[dev_map_idx].port_id != RT_PROXY_PORT_001_RX)) {
pr_debug("%s:No Custom stereo for port:0x%x\n",
__func__, dev_map[dev_map_idx].port_id);
goto end;
}
update_params_value = kzalloc(params_length, GFP_KERNEL);
if (!update_params_value) {
pr_err("%s: params memory alloc failed\n", __func__);
rc = -ENOMEM;
goto end;
}
params_length = 0;
param_val = update_params_value;
cdev = dev_map[dev_map_idx].cache_dev;
/* for VDHE and VSPE DAP params at index 0 and 1 in table */
for (i = 0; i < 2; i++) {
*update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
*update_params_value++ = ds2_dap_params_id[i];
*update_params_value++ = ds2_dap_params_length[i] *
sizeof(uint32_t);
idx = ds2_dap_params_offset[i];
for (j = 0; j < ds2_dap_params_length[i]; j++) {
if (is_custom_stereo_enabled)
*update_params_value++ = 0;
else
*update_params_value++ =
ds2_dap_params[cdev].params_val[idx+j];
}
params_length += (DOLBY_PARAM_PAYLOAD_SIZE +
ds2_dap_params_length[i]) *
sizeof(uint32_t);
}
pr_debug("%s: valid param length: %d\n", __func__, params_length);
if (params_length) {
rc = adm_dolby_dap_send_params(dev_map[dev_map_idx].port_id,
dev_map[dev_map_idx].copp_idx,
(char *)param_val,
params_length);
if (rc) {
pr_err("%s: send vdhe/vspe params failed with rc=%d\n",
__func__, rc);
rc = -EINVAL;
goto end;
}
}
end:
kfree(param_val);
return rc;
}
int qti_set_custom_stereo_on(int port_id, int copp_idx,
bool is_custom_stereo_on)
{
uint16_t op_FL_ip_FL_weight;
uint16_t op_FL_ip_FR_weight;
uint16_t op_FR_ip_FL_weight;
uint16_t op_FR_ip_FR_weight;
int32_t *update_params_value32 = NULL, rc = 0;
int32_t *param_val = NULL;
int16_t *update_params_value16 = 0;
uint32_t params_length_bytes = CUSTOM_STEREO_PAYLOAD_SIZE *
sizeof(uint32_t);
uint32_t avail_length = params_length_bytes;
if ((port_id != SLIMBUS_0_RX) &&
(port_id != RT_PROXY_PORT_001_RX)) {
pr_debug("%s:No Custom stereo for port:0x%x\n",
__func__, port_id);
goto skip_send_cmd;
}
pr_debug("%s: port 0x%x, copp_idx %d, is_custom_stereo_on %d\n",
__func__, port_id, copp_idx, is_custom_stereo_on);
if (is_custom_stereo_on) {
op_FL_ip_FL_weight =
Q14_GAIN_ZERO_POINT_FIVE;
op_FL_ip_FR_weight =
Q14_GAIN_ZERO_POINT_FIVE;
op_FR_ip_FL_weight =
Q14_GAIN_ZERO_POINT_FIVE;
op_FR_ip_FR_weight =
Q14_GAIN_ZERO_POINT_FIVE;
} else {
op_FL_ip_FL_weight = Q14_GAIN_UNITY;
op_FL_ip_FR_weight = 0;
op_FR_ip_FL_weight = 0;
op_FR_ip_FR_weight = Q14_GAIN_UNITY;
}
update_params_value32 = kzalloc(params_length_bytes, GFP_KERNEL);
if (!update_params_value32) {
pr_err("%s, params memory alloc failed\n", __func__);
rc = -ENOMEM;
goto skip_send_cmd;
}
param_val = update_params_value32;
if (avail_length < 2 * sizeof(uint32_t))
goto skip_send_cmd;
*update_params_value32++ = MTMX_MODULE_ID_DEFAULT_CHMIXER;
*update_params_value32++ = DEFAULT_CHMIXER_PARAM_ID_COEFF;
avail_length = avail_length - (2 * sizeof(uint32_t));
update_params_value16 = (int16_t *)update_params_value32;
if (avail_length < 10 * sizeof(uint16_t))
goto skip_send_cmd;
*update_params_value16++ = CUSTOM_STEREO_CMD_PARAM_SIZE;
/* for alignment only*/
*update_params_value16++ = 0;
/* index is 32-bit param in little endian*/
*update_params_value16++ = CUSTOM_STEREO_INDEX_PARAM;
*update_params_value16++ = 0;
/* for stereo mixing num out ch*/
*update_params_value16++ = CUSTOM_STEREO_NUM_OUT_CH;
/* for stereo mixing num in ch*/
*update_params_value16++ = CUSTOM_STEREO_NUM_IN_CH;
/* Out ch map FL/FR*/
*update_params_value16++ = PCM_CHANNEL_FL;
*update_params_value16++ = PCM_CHANNEL_FR;
/* In ch map FL/FR*/
*update_params_value16++ = PCM_CHANNEL_FL;
*update_params_value16++ = PCM_CHANNEL_FR;
avail_length = avail_length - (10 * sizeof(uint16_t));
/* weighting coefficients as name suggests,
mixing will be done according to these coefficients*/
if (avail_length < 4 * sizeof(uint16_t))
goto skip_send_cmd;
*update_params_value16++ = op_FL_ip_FL_weight;
*update_params_value16++ = op_FL_ip_FR_weight;
*update_params_value16++ = op_FR_ip_FL_weight;
*update_params_value16++ = op_FR_ip_FR_weight;
avail_length = avail_length - (4 * sizeof(uint16_t));
if (params_length_bytes != 0) {
rc = adm_dolby_dap_send_params(port_id, copp_idx,
(char *)param_val,
params_length_bytes);
if (rc) {
pr_err("%s: send params failed rc=%d\n", __func__, rc);
rc = -EINVAL;
goto skip_send_cmd;
}
}
kfree(param_val);
return 0;
skip_send_cmd:
pr_err("%s: insufficient memory, send cmd failed\n",
__func__);
kfree(param_val);
return rc;
}
static int dap_set_custom_stereo_onoff(int dev_map_idx,
bool is_custom_stereo_enabled)
{
int32_t *update_params_value = NULL, rc = 0;
int32_t *param_val = NULL;
uint32_t params_length_bytes = (TOTAL_LENGTH_DOLBY_PARAM +
DOLBY_PARAM_PAYLOAD_SIZE) * sizeof(uint32_t);
if ((dev_map[dev_map_idx].port_id != SLIMBUS_0_RX) &&
(dev_map[dev_map_idx].port_id != RT_PROXY_PORT_001_RX)) {
pr_debug("%s:No Custom stereo for port:0x%x\n",
__func__, dev_map[dev_map_idx].port_id);
goto end;
}
if ((dev_map[dev_map_idx].copp_idx < 0) ||
(dev_map[dev_map_idx].copp_idx >= MAX_COPPS_PER_PORT)) {
rc = -EINVAL;
goto end;
}
/* DAP custom stereo */
msm_ds2_dap_set_vspe_vdhe(dev_map_idx,
is_custom_stereo_enabled);
update_params_value = kzalloc(params_length_bytes, GFP_KERNEL);
if (!update_params_value) {
pr_err("%s: params memory alloc failed\n", __func__);
rc = -ENOMEM;
goto end;
}
params_length_bytes = 0;
param_val = update_params_value;
*update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
*update_params_value++ = DOLBY_ENABLE_CUSTOM_STEREO;
*update_params_value++ = sizeof(uint32_t);
if (is_custom_stereo_enabled)
*update_params_value++ = 1;
else
*update_params_value++ = 0;
params_length_bytes += (DOLBY_PARAM_PAYLOAD_SIZE + 1) *
sizeof(uint32_t);
pr_debug("%s: valid param length: %d\n", __func__, params_length_bytes);
if (params_length_bytes) {
rc = adm_dolby_dap_send_params(dev_map[dev_map_idx].port_id,
dev_map[dev_map_idx].copp_idx,
(char *)param_val,
params_length_bytes);
if (rc) {
pr_err("%s: custom stereo param failed with rc=%d\n",
__func__, rc);
rc = -EINVAL;
goto end;
}
}
end:
kfree(param_val);
return rc;
}
static int set_custom_stereo_onoff(int dev_map_idx,
bool is_custom_stereo_enabled)
{
int rc = 0;
pr_debug("%s: map index %d, custom stereo %d\n", __func__, dev_map_idx,
is_custom_stereo_enabled);
if (dev_map_idx < 0 || dev_map_idx >= DS2_DEVICES_ALL) {
pr_err("%s: invalid dev map index %d\n", __func__, dev_map_idx);
rc = -EINVAL;
goto end;
}
if (dev_map[dev_map_idx].port_id == DOLBY_INVALID_PORT_ID) {
pr_err("%s: invalid port id\n", __func__);
rc = -EINVAL;
goto end;
}
if ((dev_map[dev_map_idx].copp_idx < 0) ||
(dev_map[dev_map_idx].copp_idx >= MAX_COPPS_PER_PORT)) {
pr_err("%s: invalid copp idx\n", __func__);
rc = -EINVAL;
goto end;
}
if (ds2_dap_params_states.dap_bypass == true &&
ds2_dap_params_states.dap_bypass_type == DAP_HARD_BYPASS) {
rc = qti_set_custom_stereo_on(dev_map[dev_map_idx].port_id,
dev_map[dev_map_idx].copp_idx,
is_custom_stereo_enabled);
if (rc < 0) {
pr_err("%s:qti_set_custom_stereo_on_copp failed C.S %d",
__func__, is_custom_stereo_enabled);
}
goto end;
}
if (ds2_dap_params_states.dap_bypass == false) {
rc = dap_set_custom_stereo_onoff(dev_map_idx,
is_custom_stereo_enabled);
if (rc < 0) {
pr_err("%s:qti_set_custom_stereo_on_copp failed C.S %d",
__func__, is_custom_stereo_enabled);
}
goto end;
}
end:
return rc;
}
static int msm_ds2_dap_alloc_and_store_cal_data(int dev_map_idx, int path,
int perf_mode)
{
int rc = 0;
struct audio_rx_cal_data *aud_cal_data;
pr_debug("%s: path %d, perf_mode %d, dev_map_idx %d\n",
__func__, path, perf_mode, dev_map_idx);
if (dev_map_idx < 0 || dev_map_idx >= DS2_DEVICES_ALL) {
pr_err("%s: invalid dev map index %d\n", __func__, dev_map_idx);
rc = -EINVAL;
goto end;
}
aud_cal_data = kzalloc(sizeof(struct audio_rx_cal_data), GFP_KERNEL);
if (!aud_cal_data) {
pr_err("%s, param memory alloc failed\n", __func__);
rc = -ENOMEM;
goto end;
}
rc = adm_store_cal_data(dev_map[dev_map_idx].port_id,
dev_map[dev_map_idx].copp_idx, path, perf_mode,
ADM_AUDPROC_CAL, aud_cal_data->aud_proc_data,
&aud_cal_data->aud_proc_size);
if (rc < 0) {
pr_err("%s: store cal data err %d\n", __func__, rc);
kfree(aud_cal_data);
goto end;
}
rc = adm_store_cal_data(dev_map[dev_map_idx].port_id,
dev_map[dev_map_idx].copp_idx, path, perf_mode,
ADM_AUDVOL_CAL, aud_cal_data->aud_vol_data,
&aud_cal_data->aud_vol_size);
if (rc < 0) {
pr_err("%s: store cal data err %d\n", __func__, rc);
kfree(aud_cal_data);
goto end;
}
dev_map[dev_map_idx].cal_data = (void *)aud_cal_data;
end:
pr_debug("%s: ret %d\n", __func__, rc);
return rc;
}
static int msm_ds2_dap_free_cal_data(int dev_map_idx)
{
int rc = 0;
struct audio_rx_cal_data *aud_cal_data;
pr_debug("%s: dev_map_idx %d\n", __func__, dev_map_idx);
if (dev_map_idx < 0 || dev_map_idx >= DS2_DEVICES_ALL) {
pr_err("%s: invalid dev map index %d\n", __func__, dev_map_idx);
rc = -EINVAL;
goto end;
}
aud_cal_data = (struct audio_rx_cal_data *)
dev_map[dev_map_idx].cal_data;
kfree(aud_cal_data);
dev_map[dev_map_idx].cal_data = NULL;
end:
return rc;
}
static int msm_ds2_dap_send_cal_data(int dev_map_idx)
{
int rc = 0;
struct audio_rx_cal_data *aud_cal_data = NULL;
pr_debug("%s: devmap index %d\n", __func__, dev_map_idx);
if (dev_map_idx < 0 || dev_map_idx >= DS2_DEVICES_ALL) {
pr_err("%s: invalid dev map index %d\n", __func__, dev_map_idx);
rc = -EINVAL;
goto end;
}
if (dev_map[dev_map_idx].cal_data == NULL) {
pr_err("%s: No valid calibration data stored for idx %d\n",
__func__, dev_map_idx);
rc = -EINVAL;
goto end;
}
/* send aud proc cal */
aud_cal_data = (struct audio_rx_cal_data *)
dev_map[dev_map_idx].cal_data;
rc = adm_send_calibration(dev_map[dev_map_idx].port_id,
dev_map[dev_map_idx].copp_idx,
ADM_PATH_PLAYBACK, 0,
ADM_AUDPROC_CAL,
aud_cal_data->aud_proc_data,
aud_cal_data->aud_proc_size);
if (rc < 0) {
pr_err("%s: adm_send_calibration failed %d\n", __func__, rc);
goto end;
}
/* send aud volume cal*/
rc = adm_send_calibration(dev_map[dev_map_idx].port_id,
dev_map[dev_map_idx].copp_idx,
ADM_PATH_PLAYBACK, 0,
ADM_AUDVOL_CAL,
aud_cal_data->aud_vol_data,
aud_cal_data->aud_vol_size);
if (rc < 0)
pr_err("%s: adm_send_calibration failed %d\n", __func__, rc);
end:
pr_debug("%s: return %d\n", __func__, rc);
return rc;
}
static inline int msm_ds2_dap_can_enable_module(int32_t module_id)
{
if (module_id == MTMX_MODULE_ID_DEFAULT_CHMIXER ||
module_id == AUDPROC_MODULE_ID_RESAMPLER ||
module_id == AUDPROC_MODULE_ID_VOL_CTRL) {
return false;
}
return true;
}
static int msm_ds2_dap_init_modules_in_topology(int dev_map_idx)
{
int rc = 0, i = 0, port_id, copp_idx;
/* Account for 32 bit interger allocation */
int32_t param_sz = (ADM_GET_TOPO_MODULE_LIST_LENGTH / sizeof(uint32_t));
int32_t *update_param_val = NULL;
if (dev_map_idx < 0 || dev_map_idx >= DS2_DEVICES_ALL) {
pr_err("%s: invalid dev map index %d\n", __func__, dev_map_idx);
rc = -EINVAL;
goto end;
}
port_id = dev_map[dev_map_idx].port_id;
copp_idx = dev_map[dev_map_idx].copp_idx;
pr_debug("%s: port_id 0x%x copp_idx %d\n", __func__, port_id, copp_idx);
update_param_val = kzalloc(ADM_GET_TOPO_MODULE_LIST_LENGTH, GFP_KERNEL);
if (!update_param_val) {
pr_err("%s, param memory alloc failed\n", __func__);
rc = -ENOMEM;
goto end;
}
if (!ds2_dap_params_states.dap_bypass) {
/* get modules from dsp */
rc = adm_get_pp_topo_module_list(port_id, copp_idx,
ADM_GET_TOPO_MODULE_LIST_LENGTH,
(char *)update_param_val);
if (rc < 0) {
pr_err("%s:topo list port %d, err %d,copp_idx %d\n",
__func__, port_id, copp_idx, rc);
goto end;
}
if (update_param_val[0] > (param_sz - 1)) {
pr_err("%s:max modules exp/ret [%d: %d]\n",
__func__, (param_sz - 1),
update_param_val[0]);
rc = -EINVAL;
goto end;
}
/* Turn off modules */
for (i = 1; i < update_param_val[0]; i++) {
if (!msm_ds2_dap_can_enable_module(
update_param_val[i]) ||
(update_param_val[i] == DS2_MODULE_ID)) {
pr_debug("%s: Do not enable/disable %d\n",
__func__, update_param_val[i]);
continue;
}
pr_debug("%s: param disable %d\n",
__func__, update_param_val[i]);
adm_param_enable(port_id, copp_idx, update_param_val[i],
MODULE_DISABLE);
}
} else {
msm_ds2_dap_send_cal_data(dev_map_idx);
}
adm_param_enable(port_id, copp_idx, DS2_MODULE_ID,
!ds2_dap_params_states.dap_bypass);
end:
kfree(update_param_val);
return rc;
}
static bool msm_ds2_dap_check_is_param_modified(int32_t *dap_params_modified,
int32_t idx, int32_t commit)
{
if ((dap_params_modified[idx] == 0) ||
(commit &&
((dap_params_modified[idx] & 0x00010000) &&
((dap_params_modified[idx] & 0x0000FFFF) <= 1)))) {
pr_debug("%s: not modified at idx %d\n", __func__, idx);
return false;
}
pr_debug("%s: modified at idx %d\n", __func__, idx);
return true;
}
static int msm_ds2_dap_map_device_to_dolby_cache_devices(int32_t device_id)
{
int32_t cache_dev = -1;
switch (device_id) {
case DEVICE_NONE:
cache_dev = DOLBY_OFF_CACHE;
break;
case EARPIECE:
case SPEAKER:
cache_dev = DOLBY_SPEAKER_CACHE;
break;
case WIRED_HEADSET:
case WIRED_HEADPHONE:
case ANLG_DOCK_HEADSET:
case DGTL_DOCK_HEADSET:
case ANC_HEADSET:
case ANC_HEADPHONE:
case BLUETOOTH_SCO:
case BLUETOOTH_SCO_HEADSET:
case BLUETOOTH_SCO_CARKIT:
cache_dev = DOLBY_HEADPHONE_CACHE;
break;
case FM:
case FM_TX:
cache_dev = DOLBY_FM_CACHE;
break;
case AUX_DIGITAL:
cache_dev = DOLBY_HDMI_CACHE;
break;
case PROXY:
case REMOTE_SUBMIX:
cache_dev = DOLBY_WFD_CACHE;
break;
default:
pr_err("%s: invalid cache device\n", __func__);
}
pr_debug("%s: cache device %d\n", __func__, cache_dev);
return cache_dev;
}
static int msm_ds2_dap_update_num_devices(struct dolby_param_data *dolby_data,
int32_t *num_device, int32_t *dev_arr,
int32_t array_size)
{
int32_t idx = 0;
int supported_devices = 0;
if (!array_size) {
pr_err("%s: array size zero\n", __func__);
return -EINVAL;
}
if (dolby_data->device_id == DEVICE_OUT_ALL ||
dolby_data->device_id == DEVICE_OUT_DEFAULT)
supported_devices = all_supported_devices;
else
supported_devices = dolby_data->device_id;
if ((idx < array_size) && (supported_devices & EARPIECE))
dev_arr[idx++] = EARPIECE;
if ((idx < array_size) && (supported_devices & SPEAKER))
dev_arr[idx++] = SPEAKER;
if ((idx < array_size) && (supported_devices & WIRED_HEADSET))
dev_arr[idx++] = WIRED_HEADSET;
if ((idx < array_size) && (supported_devices & WIRED_HEADPHONE))
dev_arr[idx++] = WIRED_HEADPHONE;
if ((idx < array_size) && (supported_devices & BLUETOOTH_SCO))
dev_arr[idx++] = BLUETOOTH_SCO;
if ((idx < array_size) && (supported_devices & BLUETOOTH_SCO_CARKIT))
dev_arr[idx++] = BLUETOOTH_SCO_CARKIT;
if ((idx < array_size) && (supported_devices & BLUETOOTH_SCO_HEADSET))
dev_arr[idx++] = BLUETOOTH_SCO_HEADSET;
if ((idx < array_size) && (supported_devices & AUX_DIGITAL))
dev_arr[idx++] = AUX_DIGITAL;
if ((idx < array_size) && (supported_devices & ANLG_DOCK_HEADSET))
dev_arr[idx++] = ANLG_DOCK_HEADSET;
if ((idx < array_size) && (supported_devices & DGTL_DOCK_HEADSET))
dev_arr[idx++] = DGTL_DOCK_HEADSET;
if ((idx < array_size) && (supported_devices & REMOTE_SUBMIX))
dev_arr[idx++] = REMOTE_SUBMIX;
if ((idx < array_size) && (supported_devices & ANC_HEADSET))
dev_arr[idx++] = ANC_HEADSET;
if ((idx < array_size) && (supported_devices & ANC_HEADPHONE))
dev_arr[idx++] = ANC_HEADPHONE;
if ((idx < array_size) && (supported_devices & PROXY))
dev_arr[idx++] = PROXY;
if ((idx < array_size) && (supported_devices & FM))
dev_arr[idx++] = FM;
if ((idx < array_size) && (supported_devices & FM_TX))
dev_arr[idx++] = FM_TX;
/* CHECK device none separately */
if ((idx < array_size) && (supported_devices == DEVICE_NONE))
dev_arr[idx++] = DEVICE_NONE;
pr_debug("%s: dev id 0x%x, idx %d\n", __func__,
supported_devices, idx);
*num_device = idx;
return 0;
}
static int msm_ds2_dap_get_port_id(
int32_t device_id, int32_t be_id)
{
struct msm_pcm_routing_bdai_data bedais;
int port_id = DOLBY_INVALID_PORT_ID;
int port_type = 0;
if (be_id < 0) {
port_id = -1;
goto end;
}
msm_pcm_routing_get_bedai_info(be_id, &bedais);
pr_debug("%s: be port_id %d\n", __func__, bedais.port_id);
port_id = bedais.port_id;
port_type = afe_get_port_type(bedais.port_id);
if (port_type != MSM_AFE_PORT_TYPE_RX)
port_id = DOLBY_INVALID_PORT_ID;
end:
pr_debug("%s: device_id 0x%x, be_id %d, port_id %d\n",
__func__, device_id, be_id, port_id);
return port_id;
}
static int msm_ds2_dap_update_dev_map_port_id(int32_t device_id, int port_id)
{
int i;
for (i = 0; i < DS2_DEVICES_ALL; i++) {
if (dev_map[i].device_id == device_id)
dev_map[i].port_id = port_id;
}
pr_debug("%s: port_id %d, device_id 0x%x\n",
__func__, port_id, device_id);
return 0;
}
static int msm_ds2_dap_handle_bypass_wait(int port_id, int copp_idx,
int wait_time)
{
int ret = 0;
adm_set_wait_parameters(port_id, copp_idx);
msm_pcm_routing_release_lock();
ret = adm_wait_timeout(port_id, copp_idx, wait_time);
msm_pcm_routing_acquire_lock();
/* Reset the parameters if wait has timed out */
if (ret == 0)
adm_reset_wait_parameters(port_id, copp_idx);
return ret;
}
static int msm_ds2_dap_handle_bypass(struct dolby_param_data *dolby_data)
{
int rc = 0, i = 0, j = 0;
/*Account for 32 bit interger allocation */
int32_t param_sz = (ADM_GET_TOPO_MODULE_LIST_LENGTH / sizeof(uint32_t));
int32_t *mod_list = NULL;
int port_id = 0, copp_idx = -1;
bool cs_onoff = ds2_dap_params_states.custom_stereo_onoff;
int ramp_wait = DOLBY_SOFT_VOLUME_PERIOD;
pr_debug("%s: bypass type %d bypass %d custom stereo %d\n", __func__,
ds2_dap_params_states.dap_bypass_type,
ds2_dap_params_states.dap_bypass,
ds2_dap_params_states.custom_stereo_onoff);
mod_list = kzalloc(ADM_GET_TOPO_MODULE_LIST_LENGTH, GFP_KERNEL);
if (!mod_list) {
pr_err("%s: param memory alloc failed\n", __func__);
rc = -ENOMEM;
goto end;
}
for (i = 0; i < DS2_DEVICES_ALL; i++) {
pr_debug("%s: active dev %d\n", __func__, dev_map[i].active);
if (dev_map[i].active) {
port_id = dev_map[i].port_id;
copp_idx = dev_map[i].copp_idx;
if (port_id == DOLBY_INVALID_PORT_ID) {
pr_err("%s: invalid port\n", __func__);
rc = 0;
goto end;
}
if ((copp_idx < 0) ||
(copp_idx >= MAX_COPPS_PER_PORT)) {
pr_err("%s: Invalid copp_idx\n", __func__);
rc = 0;
goto end;
}
/* getmodules from dsp */
rc = adm_get_pp_topo_module_list(port_id, copp_idx,
ADM_GET_TOPO_MODULE_LIST_LENGTH,
(char *)mod_list);
if (rc < 0) {
pr_err("%s:adm get topo list port %d",
__func__, port_id);
pr_err("copp_idx %d, err %d\n",
copp_idx, rc);
goto end;
}
if (mod_list[0] > (param_sz - 1)) {
pr_err("%s:max modules exp/ret [%d: %d]\n",
__func__, (param_sz - 1),
mod_list[0]);
rc = -EINVAL;
goto end;
}
/*
* get ramp parameters
* check for change in ramp parameters
* update ramp wait
*/
msm_ds2_dap_check_and_update_ramp_wait(port_id,
copp_idx,
&ramp_wait);
/* Mute before switching modules */
rc = adm_set_volume(port_id, copp_idx,
VOLUME_ZERO_GAIN);
if (rc < 0) {
/*
* Not Fatal can continue bypass operations.
* Do not need to block playback
*/
pr_info("%s :Set volume port_id %d",
__func__, port_id);
pr_info("copp_idx %d, error %d\n",
copp_idx, rc);
}
rc = msm_ds2_dap_handle_bypass_wait(port_id, copp_idx,
(ramp_wait +
DOLBY_ADDITIONAL_RAMP_WAIT));
if (rc == -EINTR) {
pr_info("%s:bypass interupted-ignore,port %d",
__func__, port_id);
pr_info("copp_idx %d\n", copp_idx);
rc = 0;
continue;
}
/* if dap bypass is set */
if (ds2_dap_params_states.dap_bypass) {
/* Turn off dap module */
adm_param_enable(port_id, copp_idx,
DS2_MODULE_ID, MODULE_DISABLE);
/*
* If custom stereo is on at the time of bypass,
* switch off custom stereo on dap and turn on
* custom stereo on qti channel mixer.
*/
if (cs_onoff) {
rc = dap_set_custom_stereo_onoff(i,
!cs_onoff);
if (rc < 0) {
pr_info("%s:D_CS i %d,rc %d\n",
__func__, i, rc);
}
rc = qti_set_custom_stereo_on(port_id,
copp_idx,
cs_onoff);
if (rc < 0) {
pr_info("%s:Q_CS port id 0x%x",
__func__, port_id);
pr_info("copp idx %d, rc %d\n",
copp_idx, rc);
}
}
/* Add adm api to resend calibration on port */
rc = msm_ds2_dap_send_cal_data(i);
if (rc < 0) {
/*
* Not fatal,continue bypass operations.
* Do not need to block playback
*/
pr_info("%s:send cal err %d index %d\n",
__func__, rc, i);
}
} else {
/* Turn off qti modules */
for (j = 1; j < mod_list[0]; j++) {
if (!msm_ds2_dap_can_enable_module(
mod_list[j]) ||
mod_list[j] ==
DS2_MODULE_ID)
continue;
pr_debug("%s: param disable %d\n",
__func__, mod_list[j]);
adm_param_enable(port_id, copp_idx,
mod_list[j],
MODULE_DISABLE);
}
/* Enable DAP modules */
pr_debug("%s:DS2 param enable\n", __func__);
adm_param_enable(port_id, copp_idx,
DS2_MODULE_ID, MODULE_ENABLE);
/*
* If custom stereo is on at the time of dap on,
* switch off custom stereo on qti channel mixer
* and turn on custom stereo on DAP.
* mixer(qti).
*/
if (cs_onoff) {
rc = qti_set_custom_stereo_on(port_id,
copp_idx,
!cs_onoff);
if (rc < 0) {
pr_info("%s:Q_CS port_id 0x%x",
__func__, port_id);
pr_info("copp_idx %d rc %d\n",
copp_idx, rc);
}
rc = dap_set_custom_stereo_onoff(i,
cs_onoff);
if (rc < 0) {
pr_info("%s:D_CS i %d,rc %d\n",
__func__, i, rc);
}
}
}
rc = msm_ds2_dap_handle_bypass_wait(port_id, copp_idx,
DOLBY_MODULE_ENABLE_PERIOD);
if (rc == -EINTR) {
pr_info("%s:bypass interupted port_id %d copp_idx %d\n",
__func__, port_id, copp_idx);
/* Interrupted ignore bypass */
rc = 0;
continue;
}
/* set volume to unity gain after module on/off */
rc = adm_set_volume(port_id, copp_idx,
VOLUME_UNITY_GAIN);
if (rc < 0) {
/*
* Not Fatal can continue bypass operations.
* Do not need to block playback
*/
pr_info("%s: Set vol port %d copp %d, rc %d\n",
__func__, port_id, copp_idx, rc);
rc = 0;
}
}
}
end:
kfree(mod_list);
pr_debug("%s:return rc=%d\n", __func__, rc);
return rc;
}
static int msm_ds2_dap_send_end_point(int dev_map_idx, int endp_idx)
{
int rc = 0;
int32_t *update_params_value = NULL, *params_value = NULL;
uint32_t params_length = (DOLBY_PARAM_INT_ENDP_LENGTH +
DOLBY_PARAM_PAYLOAD_SIZE) * sizeof(uint32_t);
int cache_device = 0;
struct ds2_dap_params_s *ds2_ap_params_obj = NULL;
int32_t *modified_param = NULL;
if (dev_map_idx < 0 || dev_map_idx >= DS2_DEVICES_ALL) {
pr_err("%s: invalid dev map index %d\n", __func__, dev_map_idx);
rc = -EINVAL;
goto end;
}
cache_device = dev_map[dev_map_idx].cache_dev;
ds2_ap_params_obj = &ds2_dap_params[cache_device];
pr_debug("%s: cache dev %d, dev_map_idx %d\n", __func__,
cache_device, dev_map_idx);
pr_debug("%s: endp - %p %p\n", __func__,
&ds2_dap_params[cache_device], ds2_ap_params_obj);
params_value = kzalloc(params_length, GFP_KERNEL);
if (!params_value) {
pr_err("%s: params memory alloc failed\n", __func__);
rc = -ENOMEM;
goto end;
}
if (dev_map[dev_map_idx].port_id == DOLBY_INVALID_PORT_ID) {
pr_err("%s: invalid port\n", __func__);
rc = -EINVAL;
goto end;
}
if ((dev_map[dev_map_idx].copp_idx < 0) ||
(dev_map[dev_map_idx].copp_idx >= MAX_COPPS_PER_PORT)) {
pr_err("%s: Invalid copp_idx\n", __func__);
rc = -EINVAL;
goto end;
}
update_params_value = params_value;
*update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
*update_params_value++ = DOLBY_PARAM_ID_INIT_ENDP;
*update_params_value++ = DOLBY_PARAM_INT_ENDP_LENGTH * sizeof(uint32_t);
*update_params_value++ = ds2_ap_params_obj->params_val[
ds2_dap_params_offset[endp_idx]];
pr_debug("%s: off %d, length %d\n", __func__,
ds2_dap_params_offset[endp_idx],
ds2_dap_params_length[endp_idx]);
pr_debug("%s: param 0x%x, param val %d\n", __func__,
ds2_dap_params_id[endp_idx], ds2_ap_params_obj->
params_val[ds2_dap_params_offset[endp_idx]]);
rc = adm_dolby_dap_send_params(dev_map[dev_map_idx].port_id,
dev_map[dev_map_idx].copp_idx,
(char *)params_value, params_length);
if (rc) {
pr_err("%s: send dolby params failed rc %d\n", __func__, rc);
rc = -EINVAL;
}
modified_param = ds2_ap_params_obj->dap_params_modified;
if (modified_param == NULL) {
pr_err("%s: modified param structure invalid\n",
__func__);
rc = -EINVAL;
goto end;
}
if (msm_ds2_dap_check_is_param_modified(modified_param, endp_idx, 0))
ds2_ap_params_obj->dap_params_modified[endp_idx] = 0x00010001;
end:
kfree(params_value);
return rc;
}
static int msm_ds2_dap_send_cached_params(int dev_map_idx,
int commit)
{
int32_t *update_params_value = NULL, *params_value = NULL;
uint32_t idx, i, j, ret = 0;
uint32_t params_length = (TOTAL_LENGTH_DOLBY_PARAM +
(MAX_DS2_PARAMS - 1) *
DOLBY_PARAM_PAYLOAD_SIZE) *
sizeof(uint32_t);
int cache_device = 0;
struct ds2_dap_params_s *ds2_ap_params_obj = NULL;
int32_t *modified_param = NULL;
if (dev_map_idx < 0 || dev_map_idx >= DS2_DEVICES_ALL) {
pr_err("%s: invalid dev map index %d\n", __func__, dev_map_idx);
ret = -EINVAL;
goto end;
}
cache_device = dev_map[dev_map_idx].cache_dev;
/* Use off profile cache in only for soft bypass */
if (ds2_dap_params_states.dap_bypass_type == DAP_SOFT_BYPASS &&
ds2_dap_params_states.dap_bypass == true) {
pr_debug("%s: use bypass cache 0\n", __func__);
cache_device = dev_map[0].cache_dev;
}
ds2_ap_params_obj = &ds2_dap_params[cache_device];
pr_debug("%s: cached param - %p %p, cache_device %d\n", __func__,
&ds2_dap_params[cache_device], ds2_ap_params_obj,
cache_device);
params_value = kzalloc(params_length, GFP_KERNEL);
if (!params_value) {
pr_err("%s: params memory alloc failed\n", __func__);
ret = -ENOMEM;
goto end;
}
if (dev_map[dev_map_idx].port_id == DOLBY_INVALID_PORT_ID) {
pr_err("%s: invalid port id\n", __func__);
ret = -EINVAL;
goto end;
}
if ((dev_map[dev_map_idx].copp_idx < 0) ||
(dev_map[dev_map_idx].copp_idx >= MAX_COPPS_PER_PORT)) {
pr_err("%s: Invalid copp_idx\n", __func__);
ret = -EINVAL;
goto end;
}
update_params_value = params_value;
params_length = 0;
for (i = 0; i < (MAX_DS2_PARAMS-1); i++) {
/*get the pointer to the param modified array in the cache*/
modified_param = ds2_ap_params_obj->dap_params_modified;
if (modified_param == NULL) {
pr_err("%s: modified param structure invalid\n",
__func__);
ret = -EINVAL;
goto end;
}
if (!msm_ds2_dap_check_is_param_modified(modified_param, i,
commit))
continue;
*update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
*update_params_value++ = ds2_dap_params_id[i];
*update_params_value++ = ds2_dap_params_length[i] *
sizeof(uint32_t);
idx = ds2_dap_params_offset[i];
for (j = 0; j < ds2_dap_params_length[i]; j++) {
*update_params_value++ =
ds2_ap_params_obj->params_val[idx+j];
pr_debug("%s: id 0x%x,val %d\n", __func__,
ds2_dap_params_id[i],
ds2_ap_params_obj->params_val[idx+j]);
}
params_length += (DOLBY_PARAM_PAYLOAD_SIZE +
ds2_dap_params_length[i]) * sizeof(uint32_t);
}
pr_debug("%s: valid param length: %d\n", __func__, params_length);
if (params_length) {
ret = adm_dolby_dap_send_params(dev_map[dev_map_idx].port_id,
dev_map[dev_map_idx].copp_idx,
(char *)params_value,
params_length);
if (ret) {
pr_err("%s: send dolby params failed ret %d\n",
__func__, ret);
ret = -EINVAL;
goto end;
}
for (i = 0; i < MAX_DS2_PARAMS-1; i++) {
/*get pointer to the param modified array in the cache*/
modified_param = ds2_ap_params_obj->dap_params_modified;
if (modified_param == NULL) {
pr_err("%s: modified param struct invalid\n",
__func__);
ret = -EINVAL;
goto end;
}
if (!msm_ds2_dap_check_is_param_modified(
modified_param, i, commit))
continue;
ds2_ap_params_obj->dap_params_modified[i] = 0x00010001;
}
}
end:
kfree(params_value);
return ret;
}
static int msm_ds2_dap_commit_params(struct dolby_param_data *dolby_data,
int commit)
{
int ret = 0, i, idx;
struct ds2_dap_params_s *ds2_ap_params_obj = NULL;
int32_t *modified_param = NULL;
/* Do not commit params if in hard bypass */
if (ds2_dap_params_states.dap_bypass_type == DAP_HARD_BYPASS &&
ds2_dap_params_states.dap_bypass == true) {
pr_debug("%s: called in bypass", __func__);
ret = -EINVAL;
goto end;
}
for (idx = 0; idx < MAX_DS2_PARAMS; idx++) {
if (DOLBY_PARAM_ID_INIT_ENDP == ds2_dap_params_id[idx])
break;
}
if (idx >= MAX_DS2_PARAMS || idx < 0) {
pr_err("%s: index of DS2 Param not found idx %d\n",
__func__, idx);
ret = -EINVAL;
goto end;
}
pr_debug("%s: found endp - idx %d 0x%x\n", __func__, idx,
ds2_dap_params_id[idx]);
for (i = 0; i < DS2_DEVICES_ALL; i++) {
pr_debug("%s:dev[0x%x,0x%x],i:%d,active:%d,bypass:%d,type:%d\n",
__func__, dolby_data->device_id, dev_map[i].device_id,
i, dev_map[i].active, ds2_dap_params_states.dap_bypass,
ds2_dap_params_states.dap_bypass_type);
if (((dev_map[i].device_id & ds2_dap_params_states.device) ||
((ds2_dap_params_states.dap_bypass_type ==
DAP_SOFT_BYPASS) &&
(ds2_dap_params_states.dap_bypass == true))) &&
(dev_map[i].active == true)) {
/*get ptr to the cache storing the params for device*/
if ((ds2_dap_params_states.dap_bypass_type ==
DAP_SOFT_BYPASS) &&
(ds2_dap_params_states.dap_bypass == true))
ds2_ap_params_obj =
&ds2_dap_params[dev_map[0].cache_dev];
else
ds2_ap_params_obj =
&ds2_dap_params[dev_map[i].cache_dev];
/*get the pointer to the param modified array in cache*/
modified_param = ds2_ap_params_obj->dap_params_modified;
if (modified_param == NULL) {
pr_err("%s: modified_param NULL\n", __func__);
ret = -EINVAL;
goto end;
}
/*
* Send the endp param if use cache is set
* or if param is modified
*/
if (!commit || msm_ds2_dap_check_is_param_modified(
modified_param, idx, commit)) {
msm_ds2_dap_send_end_point(i, idx);
commit = 0;
}
ret = msm_ds2_dap_send_cached_params(i, commit);
if (ret < 0) {
pr_err("%s: send cached param %d\n",
__func__, ret);
goto end;
}
}
}
end:
return ret;
}
static int msm_ds2_dap_handle_commands(u32 cmd, void *arg)
{
int ret = 0, port_id = 0;
struct dolby_param_data *dolby_data = (struct dolby_param_data *)arg;
pr_debug("%s: param_id %d,be_id %d,device_id 0x%x,length %d,data %d\n",
__func__, dolby_data->param_id, dolby_data->be_id,
dolby_data->device_id, dolby_data->length, dolby_data->data[0]);
switch (dolby_data->param_id) {
case DAP_CMD_COMMIT_ALL:
msm_ds2_dap_commit_params(dolby_data, 0);
break;
case DAP_CMD_COMMIT_CHANGED:
msm_ds2_dap_commit_params(dolby_data, 1);
break;
case DAP_CMD_USE_CACHE_FOR_INIT:
ds2_dap_params_states.use_cache = dolby_data->data[0];
break;
case DAP_CMD_SET_BYPASS:
pr_debug("%s: bypass %d bypass type %d, data %d\n", __func__,
ds2_dap_params_states.dap_bypass,
ds2_dap_params_states.dap_bypass_type,
dolby_data->data[0]);
/* Do not perform bypass operation if bypass state is same*/
if (ds2_dap_params_states.dap_bypass == dolby_data->data[0])
break;
ds2_dap_params_states.dap_bypass = dolby_data->data[0];
/* hard bypass */
if (ds2_dap_params_states.dap_bypass_type == DAP_HARD_BYPASS)
msm_ds2_dap_handle_bypass(dolby_data);
/* soft bypass */
msm_ds2_dap_commit_params(dolby_data, 0);
break;
case DAP_CMD_SET_BYPASS_TYPE:
if (dolby_data->data[0] == true)
ds2_dap_params_states.dap_bypass_type =
DAP_HARD_BYPASS;
else
ds2_dap_params_states.dap_bypass_type =
DAP_SOFT_BYPASS;
pr_debug("%s: bypass type %d", __func__,
ds2_dap_params_states.dap_bypass_type);
break;
case DAP_CMD_SET_ACTIVE_DEVICE:
pr_debug("%s: DAP_CMD_SET_ACTIVE_DEVICE length %d\n",
__func__, dolby_data->length);
/* TODO: need to handle multiple instance*/
ds2_dap_params_states.device |= dolby_data->device_id;
port_id = msm_ds2_dap_get_port_id(
dolby_data->device_id,
dolby_data->be_id);
pr_debug("%s: device id 0x%x all_dev 0x%x port_id %d\n",
__func__, dolby_data->device_id,
ds2_dap_params_states.device, port_id);
msm_ds2_dap_update_dev_map_port_id(dolby_data->device_id,
port_id);
if (port_id == DOLBY_INVALID_PORT_ID) {
pr_err("%s: invalid port id %d\n", __func__, port_id);
ret = -EINVAL;
goto end;
}
break;
}
end:
return ret;
}
static int msm_ds2_dap_set_param(u32 cmd, void *arg)
{
int rc = 0, idx, i, j, off, port_id = 0, cdev = 0;
int32_t num_device = 0;
int32_t dev_arr[DS2_DSP_SUPPORTED_ENDP_DEVICE] = {0};
struct dolby_param_data *dolby_data = (struct dolby_param_data *)arg;
rc = msm_ds2_dap_update_num_devices(dolby_data, &num_device, dev_arr,
DS2_DSP_SUPPORTED_ENDP_DEVICE);
if (num_device == 0 || rc < 0) {
pr_err("%s: num devices 0\n", __func__);
rc = -EINVAL;
goto end;
}
for (i = 0; i < num_device; i++) {
port_id = msm_ds2_dap_get_port_id(dev_arr[i],
dolby_data->be_id);
if (port_id != DOLBY_INVALID_PORT_ID)
msm_ds2_dap_update_dev_map_port_id(dev_arr[i], port_id);
cdev = msm_ds2_dap_map_device_to_dolby_cache_devices(
dev_arr[i]);
if (cdev < 0 || cdev >= DOLBY_MAX_CACHE) {
pr_err("%s: Invalide cache device %d for device 0x%x\n",
__func__, cdev, dev_arr[i]);
rc = -EINVAL;
goto end;
}
pr_debug("%s:port:%d,be:%d,dev:0x%x,cdev:%d,param:0x%x,len:%d\n"
, __func__, port_id, dolby_data->be_id, dev_arr[i],
cdev, dolby_data->param_id, dolby_data->length);
for (idx = 0; idx < MAX_DS2_PARAMS; idx++) {
/*paramid from user space*/
if (dolby_data->param_id == ds2_dap_params_id[idx])
break;
}
if (idx > MAX_DS2_PARAMS-1) {
pr_err("%s: invalid param id 0x%x at idx %d\n",
__func__, dolby_data->param_id, idx);
rc = -EINVAL;
goto end;
}
/* cache the parameters */
ds2_dap_params[cdev].dap_params_modified[idx] += 1;
for (j = 0; j < dolby_data->length; j++) {
off = ds2_dap_params_offset[idx];
ds2_dap_params[cdev].params_val[off + j] =
dolby_data->data[j];
pr_debug("%s:off %d,val[i/p:o/p]-[%d / %d]\n",
__func__, off, dolby_data->data[j],
ds2_dap_params[cdev].
params_val[off + j]);
}
}
end:
return rc;
}
static int msm_ds2_dap_get_param(u32 cmd, void *arg)
{
int rc = 0, i, port_id = 0, copp_idx = -1;
struct dolby_param_data *dolby_data = (struct dolby_param_data *)arg;
int32_t *update_params_value = NULL, *params_value = NULL;
uint32_t params_length = DOLBY_MAX_LENGTH_INDIVIDUAL_PARAM *
sizeof(uint32_t);
uint32_t param_payload_len =
DOLBY_PARAM_PAYLOAD_SIZE * sizeof(uint32_t);
/* Return error on get param in soft or hard bypass */
if (ds2_dap_params_states.dap_bypass == true) {
pr_err("%s: called in bypass_type %d bypass %d\n", __func__,
ds2_dap_params_states.dap_bypass_type,
ds2_dap_params_states.dap_bypass);
rc = -EINVAL;
goto end;
}
for (i = 0; i < DS2_DEVICES_ALL; i++) {
if ((dev_map[i].active) &&
(dev_map[i].device_id & dolby_data->device_id)) {
port_id = dev_map[i].port_id;
copp_idx = dev_map[i].copp_idx;
break;
}
}
if (port_id == DOLBY_INVALID_PORT_ID) {
pr_err("%s: Invalid port\n", __func__);
rc = -EINVAL;
goto end;
}
if ((copp_idx < 0) || (copp_idx >= MAX_COPPS_PER_PORT)) {
pr_err("%s: Invalid copp_idx\n", __func__);
rc = -EINVAL;
goto end;
}
pr_debug("%s: port_id 0x%x, copp_idx %d, dev_map[i].device_id %x\n",
__func__, port_id, copp_idx, dev_map[i].device_id);
params_value = kzalloc(params_length, GFP_KERNEL);
if (!params_value) {
pr_err("%s: params memory alloc failed\n", __func__);
rc = -ENOMEM;
goto end;
}
if (dolby_data->param_id == DOLBY_PARAM_ID_VER) {
rc = adm_get_params(port_id, copp_idx,
DOLBY_BUNDLE_MODULE_ID,
DOLBY_PARAM_ID_VER,
params_length + param_payload_len,
(char *)params_value);
} else {
for (i = 0; i < MAX_DS2_PARAMS; i++)
if (ds2_dap_params_id[i] ==
dolby_data->param_id)
break;
if (i > MAX_DS2_PARAMS-1) {
pr_err("%s: invalid param id 0x%x at id %d\n", __func__,
dolby_data->param_id, i);
rc = -EINVAL;
goto end;
} else {
params_length = (ds2_dap_params_length[i] +
DOLBY_PARAM_PAYLOAD_SIZE) *
sizeof(uint32_t);
rc = adm_get_params(port_id, copp_idx,
DOLBY_BUNDLE_MODULE_ID,
ds2_dap_params_id[i],
params_length +
param_payload_len,
(char *)params_value);
}
}
if (rc) {
pr_err("%s: get parameters failed rc %d\n", __func__, rc);
rc = -EINVAL;
goto end;
}
update_params_value = params_value;
if (copy_to_user((void *)dolby_data->data,
&update_params_value[DOLBY_PARAM_PAYLOAD_SIZE],
(dolby_data->length * sizeof(uint32_t)))) {
pr_err("%s: error getting param\n", __func__);
rc = -EFAULT;
goto end;
}
end:
kfree(params_value);
return rc;
}
static int msm_ds2_dap_param_visualizer_control_get(u32 cmd, void *arg)
{
int32_t *visualizer_data = NULL;
int i = 0, ret = 0, port_id = -1, cache_dev = -1, copp_idx = -1;
int32_t *update_visualizer_data = NULL;
struct dolby_param_data *dolby_data = (struct dolby_param_data *)arg;
uint32_t offset, length, params_length;
uint32_t param_payload_len =
DOLBY_PARAM_PAYLOAD_SIZE * sizeof(uint32_t);
for (i = 0; i < DS2_DEVICES_ALL; i++) {
if ((dev_map[i].active)) {
port_id = dev_map[i].port_id;
cache_dev = dev_map[i].cache_dev;
copp_idx = dev_map[i].copp_idx;
break;
}
}
if (port_id == DOLBY_INVALID_PORT_ID ||
(copp_idx < 0) || (copp_idx >= MAX_COPPS_PER_PORT)) {
ret = 0;
dolby_data->length = 0;
pr_err("%s: no device active\n", __func__);
goto end;
}
length = ds2_dap_params[cache_dev].params_val[DOLBY_PARAM_VCNB_OFFSET];
params_length = (2*length + DOLBY_VIS_PARAM_HEADER_SIZE) *
sizeof(uint32_t);
visualizer_data = kzalloc(params_length, GFP_KERNEL);
if (!visualizer_data) {
pr_err("%s: params memory alloc failed\n", __func__);
ret = -ENOMEM;
dolby_data->length = 0;
goto end;
}
memset(visualizer_data, 0x0, params_length);
/* Return error on get param in soft or hard bypass */
if (ds2_dap_params_states.dap_bypass == true) {
pr_debug("%s: visualizer called in bypass, return 0\n",
__func__);
ret = 0;
dolby_data->length = 0;
goto end;
}
offset = 0;
params_length = length * sizeof(uint32_t);
ret = adm_get_params(port_id, copp_idx,
DOLBY_BUNDLE_MODULE_ID,
DOLBY_PARAM_ID_VCBG,
params_length + param_payload_len,
(((char *)(visualizer_data)) + offset));
if (ret) {
pr_err("%s: get parameters failed ret %d\n", __func__, ret);
ret = -EINVAL;
dolby_data->length = 0;
goto end;
}
offset = length * sizeof(uint32_t);
ret = adm_get_params(port_id, copp_idx,
DOLBY_BUNDLE_MODULE_ID,
DOLBY_PARAM_ID_VCBE,
params_length + param_payload_len,
(((char *)(visualizer_data)) + offset));
if (ret) {
pr_err("%s: get parameters failed ret %d\n", __func__, ret);
ret = -EINVAL;
dolby_data->length = 0;
goto end;
}
update_visualizer_data = visualizer_data;
dolby_data->length = 2 * length;
if (copy_to_user((void *)dolby_data->data,
(void *)update_visualizer_data,
(dolby_data->length * sizeof(uint32_t)))) {
pr_err("%s: copy to user failed for data\n", __func__);
dolby_data->length = 0;
ret = -EFAULT;
goto end;
}
end:
kfree(visualizer_data);
return ret;
}
int msm_ds2_dap_set_security_control(u32 cmd, void *arg)
{
struct dolby_param_license *dolby_license =
((struct dolby_param_license *)arg);
pr_debug("%s: dmid %d license key %d\n", __func__,
dolby_license->dmid, dolby_license->license_key);
core_set_dolby_manufacturer_id(dolby_license->dmid);
core_set_license(dolby_license->license_key, DOLBY_DS1_LICENSE_ID);
return 0;
}
int msm_ds2_dap_update_port_parameters(struct snd_hwdep *hw, struct file *file,
bool open)
{
int i = 0, dev_id = 0;
pr_debug("%s: open %d\n", __func__, open);
ds2_dap_params_states.node_opened = open;
ds2_dap_params_states.dap_bypass = true;
ds2_dap_params_states.dap_bypass_type = 0;
ds2_dap_params_states.use_cache = 0;
ds2_dap_params_states.device = 0;
ds2_dap_params_states.custom_stereo_onoff = 0;
for (i = 0; i < DS2_DEVICES_ALL; i++) {
if (i == 0)
dev_map[i].device_id = 0;
else {
dev_id = (1 << (i-1));
if (all_supported_devices & dev_id)
dev_map[i].device_id = dev_id;
else
continue;
}
dev_map[i].cache_dev =
msm_ds2_dap_map_device_to_dolby_cache_devices(
dev_map[i].device_id);
if (dev_map[i].cache_dev < 0 ||
dev_map[i].cache_dev >= DOLBY_MAX_CACHE)
pr_err("%s: Invalide cache device %d for device 0x%x\n",
__func__,
dev_map[i].cache_dev,
dev_map[i].device_id);
dev_map[i].port_id = -1;
dev_map[i].active = false;
dev_map[i].stream_ref_count = 0;
dev_map[i].cal_data = NULL;
dev_map[i].copp_idx = -1;
pr_debug("%s: device_id 0x%x, cache_dev %d act %d\n", __func__,
dev_map[i].device_id, dev_map[i].cache_dev,
dev_map[i].active);
}
return 0;
}
int msm_ds2_dap_ioctl_shared(struct snd_hwdep *hw, struct file *file,
u32 cmd, void *arg)
{
int ret = 0;
pr_debug("%s: cmd: 0x%x\n", __func__, cmd);
switch (cmd) {
case SNDRV_DEVDEP_DAP_IOCTL_SET_PARAM:
ret = msm_ds2_dap_set_param(cmd, arg);
break;
case SNDRV_DEVDEP_DAP_IOCTL_GET_PARAM:
ret = msm_ds2_dap_get_param(cmd, arg);
break;
case SNDRV_DEVDEP_DAP_IOCTL_DAP_COMMAND:
ret = msm_ds2_dap_handle_commands(cmd, arg);
break;
case SNDRV_DEVDEP_DAP_IOCTL_DAP_LICENSE:
ret = msm_ds2_dap_set_security_control(cmd, arg);
break;
case SNDRV_DEVDEP_DAP_IOCTL_GET_VISUALIZER:
ret = msm_ds2_dap_param_visualizer_control_get(cmd, arg);
break;
default:
pr_err("%s: called with invalid control 0x%x\n", __func__, cmd);
ret = -EINVAL;
}
return ret;
}
int msm_ds2_dap_ioctl(struct snd_hwdep *hw, struct file *file,
u32 cmd, void *arg)
{
int ret = 0;
pr_debug("%s: cmd: 0x%x\n", __func__, cmd);
if (!arg) {
pr_err("%s: Invalid params event status\n", __func__);
ret = -EINVAL;
goto end;
}
switch (cmd) {
case SNDRV_DEVDEP_DAP_IOCTL_SET_PARAM:
case SNDRV_DEVDEP_DAP_IOCTL_DAP_COMMAND: {
struct dolby_param_data dolby_data;
if (copy_from_user((void *)&dolby_data, (void *)arg,
sizeof(struct dolby_param_data))) {
pr_err("%s: Copy from user failed\n", __func__);
ret = -EFAULT;
goto end;
}
ret = msm_ds2_dap_ioctl_shared(hw, file, cmd, &dolby_data);
break;
}
case SNDRV_DEVDEP_DAP_IOCTL_DAP_LICENSE: {
struct dolby_param_license dolby_license;
if (copy_from_user((void *)&dolby_license, (void *)arg,
sizeof(struct dolby_param_license))) {
pr_err("%s: Copy from user failed\n", __func__);
ret = -EFAULT;
goto end;
}
ret = msm_ds2_dap_ioctl_shared(hw, file, cmd, &dolby_license);
break;
}
case SNDRV_DEVDEP_DAP_IOCTL_GET_PARAM:
case SNDRV_DEVDEP_DAP_IOCTL_GET_VISUALIZER: {
struct dolby_param_data dolby_data;
if (copy_from_user((void *)&dolby_data, (void *)arg,
sizeof(struct dolby_param_data))) {
pr_err("%s: Copy from user failed\n", __func__);
ret = -EFAULT;
goto end;
}
ret = msm_ds2_dap_ioctl_shared(hw, file, cmd, &dolby_data);
if (ret < 0)
pr_err("%s: ioctl cmd %d returned err %d\n",
__func__, cmd, ret);
if (copy_to_user((void *)arg, &dolby_data,
sizeof(struct dolby_param_data))) {
pr_err("%s: Copy to user failed\n", __func__);
ret = -EFAULT;
goto end;
}
break;
}
default:
pr_err("%s: called with invalid control 0x%x\n", __func__, cmd);
ret = -EINVAL;
}
end:
return ret;
}
#ifdef CONFIG_COMPAT
int msm_ds2_dap_compat_ioctl(struct snd_hwdep *hw, struct file *file,
u32 cmd, void *arg)
{
int ret = 0;
pr_debug("%s: cmd: 0x%x\n", __func__, cmd);
switch (cmd) {
case SNDRV_DEVDEP_DAP_IOCTL_SET_PARAM32:
cmd = SNDRV_DEVDEP_DAP_IOCTL_SET_PARAM;
goto handle_set_ioctl;
case SNDRV_DEVDEP_DAP_IOCTL_DAP_COMMAND32:
cmd = SNDRV_DEVDEP_DAP_IOCTL_DAP_COMMAND;
handle_set_ioctl:
{
struct dolby_param_data32 dolby_data32;
struct dolby_param_data dolby_data;
memset(&dolby_data32, 0, sizeof(dolby_data32));
memset(&dolby_data, 0, sizeof(dolby_data));
if (copy_from_user(&dolby_data32, (void *)arg,
sizeof(struct dolby_param_data32))) {
pr_err("%s: Copy from user failed\n", __func__);
ret = -EFAULT;
goto end;
}
dolby_data.version = dolby_data32.version;
dolby_data.device_id = dolby_data32.device_id;
dolby_data.be_id = dolby_data32.be_id;
dolby_data.param_id = dolby_data32.param_id;
dolby_data.length = dolby_data32.length;
dolby_data.data = compat_ptr(dolby_data32.data);
ret = msm_ds2_dap_ioctl_shared(hw, file, cmd, &dolby_data);
break;
}
case SNDRV_DEVDEP_DAP_IOCTL_GET_PARAM32:
cmd = SNDRV_DEVDEP_DAP_IOCTL_GET_PARAM;
goto handle_get_ioctl;
case SNDRV_DEVDEP_DAP_IOCTL_GET_VISUALIZER32:
cmd = SNDRV_DEVDEP_DAP_IOCTL_GET_VISUALIZER;
handle_get_ioctl:
{
struct dolby_param_data32 dolby_data32;
struct dolby_param_data dolby_data;
memset(&dolby_data32, 0, sizeof(dolby_data32));
memset(&dolby_data, 0, sizeof(dolby_data));
if (copy_from_user(&dolby_data32, (void *)arg,
sizeof(struct dolby_param_data32))) {
pr_err("%s: Copy from user failed\n", __func__);
ret = -EFAULT;
goto end;
}
dolby_data.version = dolby_data32.version;
dolby_data.device_id = dolby_data32.device_id;
dolby_data.be_id = dolby_data32.be_id;
dolby_data.param_id = dolby_data32.param_id;
dolby_data.length = dolby_data32.length;
dolby_data.data = compat_ptr(dolby_data32.data);
ret = msm_ds2_dap_ioctl_shared(hw, file, cmd, &dolby_data);
if (ret < 0)
pr_err("%s: ioctl cmd %d, returned err %d\n",
__func__, cmd, ret);
dolby_data32.length = dolby_data.length;
if (copy_to_user((void *)arg, &dolby_data32,
sizeof(struct dolby_param_data32))) {
pr_err("%s: Copy to user failed\n", __func__);
ret = -EFAULT;
goto end;
}
break;
}
case SNDRV_DEVDEP_DAP_IOCTL_DAP_LICENSE32: {
struct dolby_param_license32 dolby_license32;
struct dolby_param_license dolby_license;
cmd = SNDRV_DEVDEP_DAP_IOCTL_DAP_LICENSE;
if (copy_from_user((void *)&dolby_license32, (void *)arg,
sizeof(struct dolby_param_license32))) {
pr_err("%s: Copy from user failed\n", __func__);
ret = -EFAULT;
goto end;
}
dolby_license.dmid = dolby_license32.dmid;
dolby_license.license_key = dolby_license32.license_key;
ret = msm_ds2_dap_ioctl_shared(hw, file, cmd, &dolby_license);
break;
}
default:
pr_err("%s: called with invalid control 0x%x\n",
__func__, cmd);
ret = -EINVAL;
}
end:
return ret;
}
#endif
int msm_ds2_dap_init(int port_id, int copp_idx, int channels,
bool is_custom_stereo_on)
{
int ret = 0, idx = -1, i;
struct dolby_param_data dolby_data;
struct audproc_softvolume_params softvol = {
.period = DOLBY_SOFT_VOLUME_PERIOD,
.step = DOLBY_SOFT_VOLUME_STEP,
.rampingcurve = DOLBY_SOFT_VOLUME_CURVE_EXP,
};
pr_debug("%s: port id %d, copp_idx %d\n", __func__, port_id, copp_idx);
if (port_id != DOLBY_INVALID_PORT_ID) {
for (i = 0; i < DS2_DEVICES_ALL; i++) {
if ((dev_map[i].port_id == port_id) &&
/* device part of active device */
(dev_map[i].device_id &
ds2_dap_params_states.device)) {
idx = i;
/* Give priority to headset in case of
combo device */
if (dev_map[i].device_id == SPEAKER)
continue;
else
break;
}
}
if (idx < 0) {
pr_err("%s: invalid index for port %d\n",
__func__, port_id);
ret = -EINVAL;
goto end;
}
pr_debug("%s:index %d, dev[0x%x,0x%x]\n", __func__, idx,
dev_map[idx].device_id, ds2_dap_params_states.device);
dev_map[idx].active = true;
dev_map[idx].copp_idx = copp_idx;
dolby_data.param_id = DOLBY_COMMIT_ALL_TO_DSP;
dolby_data.length = 0;
dolby_data.data = NULL;
dolby_data.device_id = dev_map[idx].device_id;
pr_debug("%s: idx %d, active %d, dev id 0x%x, ref count %d\n",
__func__, idx, dev_map[idx].active,
dev_map[idx].device_id,
dev_map[idx].stream_ref_count);
if (dev_map[idx].stream_ref_count == 0) {
/*perform next 3 func only if hard bypass enabled*/
if (ds2_dap_params_states.dap_bypass_type ==
DAP_HARD_BYPASS) {
ret = msm_ds2_dap_alloc_and_store_cal_data(idx,
ADM_PATH_PLAYBACK, 0);
if (ret < 0) {
pr_err("%s: Failed to alloc and store cal data for idx %d, device %d, copp_idx %d",
__func__,
idx, dev_map[idx].device_id,
dev_map[idx].copp_idx);
dev_map[idx].active = false;
dev_map[idx].copp_idx = -1;
goto end;
}
ret = adm_set_softvolume(port_id, copp_idx,
&softvol);
if (ret < 0) {
pr_err("%s: Soft volume ret error %d\n",
__func__, ret);
dev_map[idx].active = false;
dev_map[idx].copp_idx = -1;
goto end;
}
ret = msm_ds2_dap_init_modules_in_topology(
idx);
if (ret < 0) {
pr_err("%s: Failed to init modules in topolofy for idx %d, device %d, copp_idx %d\n",
__func__, idx,
dev_map[idx].device_id,
dev_map[idx].copp_idx);
dev_map[idx].active = false;
dev_map[idx].copp_idx = -1;
goto end;
}
}
ret = msm_ds2_dap_commit_params(&dolby_data, 0);
if (ret < 0) {
pr_debug("%s: commit params ret %d\n",
__func__, ret);
ret = 0;
}
}
dev_map[idx].stream_ref_count++;
if (is_custom_stereo_on) {
ds2_dap_params_states.custom_stereo_onoff =
is_custom_stereo_on;
set_custom_stereo_onoff(idx,
is_custom_stereo_on);
}
}
end:
return ret;
}
void msm_ds2_dap_deinit(int port_id)
{
/*
* Get the active port corrresponding to the active device
* Check if this is same as incoming port
* Set it to invalid
*/
int idx = -1, i;
pr_debug("%s: port_id %d\n", __func__, port_id);
if (port_id != DOLBY_INVALID_PORT_ID) {
for (i = 0; i < DS2_DEVICES_ALL; i++) {
/* Active port */
if ((dev_map[i].port_id == port_id) &&
/* device part of active device */
(dev_map[i].device_id &
ds2_dap_params_states.device) &&
/*
* Need this check to avoid race condition of
* active device being set and playback
* instance opened
*/
/* active device*/
dev_map[i].active) {
idx = i;
if (dev_map[i].device_id == SPEAKER)
continue;
else
break;
}
}
if (idx < 0) {
pr_err("%s: invalid index for port %d\n",
__func__, port_id);
return;
}
pr_debug("%s:index %d, dev [0x%x, 0x%x]\n", __func__, idx,
dev_map[idx].device_id, ds2_dap_params_states.device);
dev_map[idx].stream_ref_count--;
if (dev_map[idx].stream_ref_count == 0) {
/*perform next func only if hard bypass enabled*/
if (ds2_dap_params_states.dap_bypass_type ==
DAP_HARD_BYPASS) {
msm_ds2_dap_free_cal_data(idx);
}
ds2_dap_params_states.device &= ~dev_map[idx].device_id;
dev_map[idx].active = false;
dev_map[idx].copp_idx = -1;
}
pr_debug("%s:idx %d, active %d, dev id 0x%x ref count %d\n",
__func__, idx, dev_map[idx].active,
dev_map[idx].device_id, dev_map[idx].stream_ref_count);
}
}
int msm_ds2_dap_set_custom_stereo_onoff(int port_id, int copp_idx,
bool is_custom_stereo_enabled)
{
int idx = -1, rc = 0, i;
pr_debug("%s: port_id %d\n", __func__, port_id);
if (port_id != DOLBY_INVALID_PORT_ID) {
for (i = 0; i < DS2_DEVICES_ALL; i++) {
if ((dev_map[i].port_id == port_id) &&
/* device part of active device */
(dev_map[i].device_id &
ds2_dap_params_states.device)) {
idx = i;
if (dev_map[i].device_id == SPEAKER)
continue;
else
break;
}
}
if (idx < 0) {
pr_err("%s: invalid index for port %d\n",
__func__, port_id);
return rc;
}
ds2_dap_params_states.custom_stereo_onoff =
is_custom_stereo_enabled;
rc = set_custom_stereo_onoff(idx,
is_custom_stereo_enabled);
if (rc < 0) {
pr_err("%s: Custom stereo err %d on port %d\n",
__func__, rc, port_id);
}
}
return rc;
}
#else
static int msm_ds2_dap_alloc_and_store_cal_data(int dev_map_idx, int path,
int perf_mode)
{
return 0;
}
static int msm_ds2_dap_free_cal_data(int dev_map_idx)
{
return 0;
}
static int msm_ds2_dap_send_cal_data(int dev_map_idx)
{
return 0;
}
static int msm_ds2_dap_can_enable_module(int32_t module_id)
{
return 0;
}
static int msm_ds2_dap_init_modules_in_topology(int dev_map_idx)
{
return 0;
}
static bool msm_ds2_dap_check_is_param_modified(int32_t *dap_params_modified,
int32_t idx, int32_t commit)
{
return false;
}
static int msm_ds2_dap_map_device_to_dolby_cache_devices(int32_t device_id)
{
return 0;
}
static int msm_ds2_dap_update_num_devices(struct dolby_param_data *dolby_data,
int32_t *num_device, int32_t *dev_arr,
int32_t array_size)
{
return 0;
}
static int msm_ds2_dap_commit_params(struct dolby_param_data *dolby_data,
int commit)
{
return 0;
}
static int msm_ds2_dap_handle_commands(u32 cmd, void *arg)
{
return 0;
}
static int msm_ds2_dap_set_param(u32 cmd, void *arg)
{
return 0;
}
static int msm_ds2_dap_get_param(u32 cmd, void *arg)
{
return 0;
}
static int msm_ds2_dap_send_end_point(int dev_map_idx, int endp_idx)
{
return 0;
}
static int msm_ds2_dap_send_cached_params(int dev_map_idx,
int commit)
{
return 0;
}
static int msm_ds2_dap_set_vspe_vdhe(int dev_map_idx,
bool is_custom_stereo_enabled)
{
return 0;
}
static int msm_ds2_dap_param_visualizer_control_get(
u32 cmd, void *arg,
struct msm_pcm_routing_bdai_data *bedais)
{
return 0;
}
static int msm_ds2_dap_set_security_control(u32 cmd, void *arg)
{
return 0
}
static int msm_ds2_dap_update_dev_map_port_id(int32_t device_id, int port_id)
{
return 0;
}
static int32_t msm_ds2_dap_get_port_id(
int32_t device_id, int32_t be_id)
{
return 0;
}
static int msm_ds2_dap_handle_bypass(struct dolby_param_data *dolby_data)
{
return 0;
}
static int msm_ds2_dap_handle_bypass_wait(int port_id, int copp_idx,
int wait_time)
{
return 0;
}
static int dap_set_custom_stereo_onoff(int dev_map_idx,
bool is_custom_stereo_enabled)
{
return 0;
}
int qti_set_custom_stereo_on(int port_id, int copp_idx,
bool is_custom_stereo_on)
{
return 0;
}
int set_custom_stereo_onoff(int dev_map_idx,
bool is_custom_stereo_enabled)
{
return 0;
}
int msm_ds2_dap_ioctl_shared(struct snd_hwdep *hw, struct file *file,
u32 cmd, void *arg)
{
return 0;
}
#endif /*CONFIG_DOLBY_DS2*/
| {
"language": "C"
} |
/*
* Copyright © 2006 Jinghua Luo
*
* Permission to use, copy, modify, distribute, and sell this software
* and its documentation for any purpose is hereby granted without
* fee, provided that the above copyright notice appear in all copies
* and that both that copyright notice and this permission notice
* appear in supporting documentation, and that the name of
* Red Hat, Inc. not be used in advertising or publicity pertaining to
* distribution of the software without specific, written prior
* permission. Red Hat, Inc. makes no representations about the
* suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* JINGHUA LUO DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
* SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS, IN NO EVENT SHALL RED HAT, INC. BE LIABLE FOR ANY SPECIAL,
* INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
* RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
* IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
* Author: Jinghua Luo <sunmoon1997@gmail.com>
* Derived from:
* text-antialias-none.c,
* ft-font-create-for-ft-face.c.
* Original Author: Carl D. Worth <cworth@cworth.org>
*/
#include "cairo-test.h"
#include <cairo-ft.h>
#define WIDTH 40
#define HEIGHT 30
#define TEXT_SIZE 12
static cairo_status_t
create_scaled_font (cairo_t * cr,
cairo_scaled_font_t **out)
{
FcPattern *pattern, *resolved;
FcResult result;
cairo_font_face_t *font_face;
cairo_scaled_font_t *scaled_font;
cairo_font_options_t *font_options;
cairo_matrix_t font_matrix, ctm;
cairo_status_t status;
double pixel_size;
font_options = cairo_font_options_create ();
cairo_get_font_options (cr, font_options);
pattern = FcPatternCreate ();
if (pattern == NULL)
return CAIRO_STATUS_NO_MEMORY;
FcPatternAddString (pattern, FC_FAMILY, (FcChar8 *) CAIRO_TEST_FONT_FAMILY " Sans");
FcPatternAddDouble (pattern, FC_SIZE, TEXT_SIZE);
FcConfigSubstitute (NULL, pattern, FcMatchPattern);
cairo_ft_font_options_substitute (font_options, pattern);
FcDefaultSubstitute (pattern);
resolved = FcFontMatch (NULL, pattern, &result);
if (resolved == NULL) {
FcPatternDestroy (pattern);
return CAIRO_STATUS_NO_MEMORY;
}
/* turn antialiasing off */
FcPatternDel (resolved, FC_ANTIALIAS);
FcPatternAddBool (resolved, FC_ANTIALIAS, FcFalse);
FcPatternGetDouble (resolved, FC_PIXEL_SIZE, 0, &pixel_size);
font_face = cairo_ft_font_face_create_for_pattern (resolved);
cairo_matrix_init_identity (&font_matrix);
cairo_matrix_scale (&font_matrix, pixel_size, pixel_size);
cairo_get_matrix (cr, &ctm);
scaled_font = cairo_scaled_font_create (font_face,
&font_matrix,
&ctm,
font_options);
cairo_font_options_destroy (font_options);
cairo_font_face_destroy (font_face);
FcPatternDestroy (pattern);
FcPatternDestroy (resolved);
status = cairo_scaled_font_status (scaled_font);
if (status) {
cairo_scaled_font_destroy (scaled_font);
return status;
}
*out = scaled_font;
return CAIRO_STATUS_SUCCESS;
}
static cairo_test_status_t
draw (cairo_t *cr, int width, int height)
{
cairo_text_extents_t extents;
cairo_scaled_font_t *scaled_font;
cairo_status_t status;
const char black[] = "black", blue[] = "blue";
/* We draw in the default black, so paint white first. */
cairo_save (cr);
cairo_set_source_rgb (cr, 1.0, 1.0, 1.0); /* white */
cairo_paint (cr);
cairo_restore (cr);
status = create_scaled_font (cr, &scaled_font);
if (status) {
return cairo_test_status_from_status (cairo_test_get_context (cr),
status);
}
cairo_set_scaled_font (cr, scaled_font);
cairo_set_source_rgb (cr, 0, 0, 0); /* black */
cairo_text_extents (cr, black, &extents);
cairo_move_to (cr, -extents.x_bearing, -extents.y_bearing);
cairo_show_text (cr, black);
cairo_translate (cr, 0, -extents.y_bearing + 1);
cairo_set_source_rgb (cr, 0, 0, 1); /* blue */
cairo_text_extents (cr, blue, &extents);
cairo_move_to (cr, -extents.x_bearing, -extents.y_bearing);
cairo_show_text (cr, blue);
cairo_scaled_font_destroy (scaled_font);
return CAIRO_TEST_SUCCESS;
}
CAIRO_TEST (ft_text_antialias_none,
"Tests text rendering with no antialiasing",
"ft, text", /* keywords */
"target=raster", /* requirements */
WIDTH, HEIGHT,
NULL, draw)
| {
"language": "C"
} |
/*
* pata_sil680.c - SIL680 PATA for new ATA layer
* (C) 2005 Red Hat Inc
*
* based upon
*
* linux/drivers/ide/pci/siimage.c Version 1.07 Nov 30, 2003
*
* Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
* Copyright (C) 2003 Red Hat <alan@redhat.com>
*
* May be copied or modified under the terms of the GNU General Public License
*
* Documentation publicly available.
*
* If you have strange problems with nVidia chipset systems please
* see the SI support documentation and update your system BIOS
* if necessary
*
* TODO
* If we know all our devices are LBA28 (or LBA28 sized) we could use
* the command fifo mode.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_sil680"
#define DRV_VERSION "0.4.9"
#define SIL680_MMIO_BAR 5
/**
* sil680_selreg - return register base
* @ap: ATA interface
* @r: config offset
*
* Turn a config register offset into the right address in PCI space
* to access the control register in question.
*
* Thankfully this is a configuration operation so isn't performance
* criticial.
*/
static unsigned long sil680_selreg(struct ata_port *ap, int r)
{
unsigned long base = 0xA0 + r;
base += (ap->port_no << 4);
return base;
}
/**
* sil680_seldev - return register base
* @ap: ATA interface
* @r: config offset
*
* Turn a config register offset into the right address in PCI space
* to access the control register in question including accounting for
* the unit shift.
*/
static unsigned long sil680_seldev(struct ata_port *ap, struct ata_device *adev, int r)
{
unsigned long base = 0xA0 + r;
base += (ap->port_no << 4);
base |= adev->devno ? 2 : 0;
return base;
}
/**
* sil680_cable_detect - cable detection
* @ap: ATA port
*
* Perform cable detection. The SIL680 stores this in PCI config
* space for us.
*/
static int sil680_cable_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned long addr = sil680_selreg(ap, 0);
u8 ata66;
pci_read_config_byte(pdev, addr, &ata66);
if (ata66 & 1)
return ATA_CBL_PATA80;
else
return ATA_CBL_PATA40;
}
/**
* sil680_set_piomode - set PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Program the SIL680 registers for PIO mode. Note that the task speed
* registers are shared between the devices so we must pick the lowest
* mode for command work.
*/
static void sil680_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
static const u16 speed_p[5] = {
0x328A, 0x2283, 0x1104, 0x10C3, 0x10C1
};
static const u16 speed_t[5] = {
0x328A, 0x2283, 0x1281, 0x10C3, 0x10C1
};
unsigned long tfaddr = sil680_selreg(ap, 0x02);
unsigned long addr = sil680_seldev(ap, adev, 0x04);
unsigned long addr_mask = 0x80 + 4 * ap->port_no;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int pio = adev->pio_mode - XFER_PIO_0;
int lowest_pio = pio;
int port_shift = 4 * adev->devno;
u16 reg;
u8 mode;
struct ata_device *pair = ata_dev_pair(adev);
if (pair != NULL && adev->pio_mode > pair->pio_mode)
lowest_pio = pair->pio_mode - XFER_PIO_0;
pci_write_config_word(pdev, addr, speed_p[pio]);
pci_write_config_word(pdev, tfaddr, speed_t[lowest_pio]);
pci_read_config_word(pdev, tfaddr-2, ®);
pci_read_config_byte(pdev, addr_mask, &mode);
reg &= ~0x0200; /* Clear IORDY */
mode &= ~(3 << port_shift); /* Clear IORDY and DMA bits */
if (ata_pio_need_iordy(adev)) {
reg |= 0x0200; /* Enable IORDY */
mode |= 1 << port_shift;
}
pci_write_config_word(pdev, tfaddr-2, reg);
pci_write_config_byte(pdev, addr_mask, mode);
}
/**
* sil680_set_dmamode - set DMA mode data
* @ap: ATA interface
* @adev: ATA device
*
* Program the MWDMA/UDMA modes for the sil680 chipset.
*
* The MWDMA mode values are pulled from a lookup table
* while the chipset uses mode number for UDMA.
*/
static void sil680_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
static const u8 ultra_table[2][7] = {
{ 0x0C, 0x07, 0x05, 0x04, 0x02, 0x01, 0xFF }, /* 100MHz */
{ 0x0F, 0x0B, 0x07, 0x05, 0x03, 0x02, 0x01 }, /* 133Mhz */
};
static const u16 dma_table[3] = { 0x2208, 0x10C2, 0x10C1 };
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned long ma = sil680_seldev(ap, adev, 0x08);
unsigned long ua = sil680_seldev(ap, adev, 0x0C);
unsigned long addr_mask = 0x80 + 4 * ap->port_no;
int port_shift = adev->devno * 4;
u8 scsc, mode;
u16 multi, ultra;
pci_read_config_byte(pdev, 0x8A, &scsc);
pci_read_config_byte(pdev, addr_mask, &mode);
pci_read_config_word(pdev, ma, &multi);
pci_read_config_word(pdev, ua, &ultra);
/* Mask timing bits */
ultra &= ~0x3F;
mode &= ~(0x03 << port_shift);
/* Extract scsc */
scsc = (scsc & 0x30) ? 1 : 0;
if (adev->dma_mode >= XFER_UDMA_0) {
multi = 0x10C1;
ultra |= ultra_table[scsc][adev->dma_mode - XFER_UDMA_0];
mode |= (0x03 << port_shift);
} else {
multi = dma_table[adev->dma_mode - XFER_MW_DMA_0];
mode |= (0x02 << port_shift);
}
pci_write_config_byte(pdev, addr_mask, mode);
pci_write_config_word(pdev, ma, multi);
pci_write_config_word(pdev, ua, ultra);
}
/**
* sil680_sff_exec_command - issue ATA command to host controller
* @ap: port to which command is being issued
* @tf: ATA taskfile register set
*
* Issues ATA command, with proper synchronization with interrupt
* handler / other threads. Use our MMIO space for PCI posting to avoid
* a hideously slow cycle all the way to the device.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
static void sil680_sff_exec_command(struct ata_port *ap,
const struct ata_taskfile *tf)
{
DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
iowrite8(tf->command, ap->ioaddr.command_addr);
ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
}
static bool sil680_sff_irq_check(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned long addr = sil680_selreg(ap, 1);
u8 val;
pci_read_config_byte(pdev, addr, &val);
return val & 0x08;
}
static struct scsi_host_template sil680_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations sil680_port_ops = {
.inherits = &ata_bmdma32_port_ops,
.sff_exec_command = sil680_sff_exec_command,
.sff_irq_check = sil680_sff_irq_check,
.cable_detect = sil680_cable_detect,
.set_piomode = sil680_set_piomode,
.set_dmamode = sil680_set_dmamode,
};
/**
* sil680_init_chip - chip setup
* @pdev: PCI device
*
* Perform all the chip setup which must be done both when the device
* is powered up on boot and when we resume in case we resumed from RAM.
* Returns the final clock settings.
*/
static u8 sil680_init_chip(struct pci_dev *pdev, int *try_mmio)
{
u8 tmpbyte = 0;
/* FIXME: double check */
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
pdev->revision ? 1 : 255);
pci_write_config_byte(pdev, 0x80, 0x00);
pci_write_config_byte(pdev, 0x84, 0x00);
pci_read_config_byte(pdev, 0x8A, &tmpbyte);
dev_dbg(&pdev->dev, "sil680: BA5_EN = %d clock = %02X\n",
tmpbyte & 1, tmpbyte & 0x30);
*try_mmio = 0;
#ifdef CONFIG_PPC
if (machine_is(cell))
*try_mmio = (tmpbyte & 1) || pci_resource_start(pdev, 5);
#endif
switch (tmpbyte & 0x30) {
case 0x00:
/* 133 clock attempt to force it on */
pci_write_config_byte(pdev, 0x8A, tmpbyte|0x10);
break;
case 0x30:
/* if clocking is disabled */
/* 133 clock attempt to force it on */
pci_write_config_byte(pdev, 0x8A, tmpbyte & ~0x20);
break;
case 0x10:
/* 133 already */
break;
case 0x20:
/* BIOS set PCI x2 clocking */
break;
}
pci_read_config_byte(pdev, 0x8A, &tmpbyte);
dev_dbg(&pdev->dev, "sil680: BA5_EN = %d clock = %02X\n",
tmpbyte & 1, tmpbyte & 0x30);
pci_write_config_byte(pdev, 0xA1, 0x72);
pci_write_config_word(pdev, 0xA2, 0x328A);
pci_write_config_dword(pdev, 0xA4, 0x62DD62DD);
pci_write_config_dword(pdev, 0xA8, 0x43924392);
pci_write_config_dword(pdev, 0xAC, 0x40094009);
pci_write_config_byte(pdev, 0xB1, 0x72);
pci_write_config_word(pdev, 0xB2, 0x328A);
pci_write_config_dword(pdev, 0xB4, 0x62DD62DD);
pci_write_config_dword(pdev, 0xB8, 0x43924392);
pci_write_config_dword(pdev, 0xBC, 0x40094009);
switch (tmpbyte & 0x30) {
case 0x00:
printk(KERN_INFO "sil680: 100MHz clock.\n");
break;
case 0x10:
printk(KERN_INFO "sil680: 133MHz clock.\n");
break;
case 0x20:
printk(KERN_INFO "sil680: Using PCI clock.\n");
break;
/* This last case is _NOT_ ok */
case 0x30:
printk(KERN_ERR "sil680: Clock disabled ?\n");
}
return tmpbyte & 0x30;
}
static int __devinit sil680_init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &sil680_port_ops
};
static const struct ata_port_info info_slow = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &sil680_port_ops
};
const struct ata_port_info *ppi[] = { &info, NULL };
struct ata_host *host;
void __iomem *mmio_base;
int rc, try_mmio;
ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
return rc;
switch (sil680_init_chip(pdev, &try_mmio)) {
case 0:
ppi[0] = &info_slow;
break;
case 0x30:
return -ENODEV;
}
if (!try_mmio)
goto use_ioports;
/* Try to acquire MMIO resources and fallback to PIO if
* that fails
*/
rc = pcim_iomap_regions(pdev, 1 << SIL680_MMIO_BAR, DRV_NAME);
if (rc)
goto use_ioports;
/* Allocate host and set it up */
host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
if (!host)
return -ENOMEM;
host->iomap = pcim_iomap_table(pdev);
/* Setup DMA masks */
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
if (rc)
return rc;
rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
if (rc)
return rc;
pci_set_master(pdev);
/* Get MMIO base and initialize port addresses */
mmio_base = host->iomap[SIL680_MMIO_BAR];
host->ports[0]->ioaddr.bmdma_addr = mmio_base + 0x00;
host->ports[0]->ioaddr.cmd_addr = mmio_base + 0x80;
host->ports[0]->ioaddr.ctl_addr = mmio_base + 0x8a;
host->ports[0]->ioaddr.altstatus_addr = mmio_base + 0x8a;
ata_sff_std_ports(&host->ports[0]->ioaddr);
host->ports[1]->ioaddr.bmdma_addr = mmio_base + 0x08;
host->ports[1]->ioaddr.cmd_addr = mmio_base + 0xc0;
host->ports[1]->ioaddr.ctl_addr = mmio_base + 0xca;
host->ports[1]->ioaddr.altstatus_addr = mmio_base + 0xca;
ata_sff_std_ports(&host->ports[1]->ioaddr);
/* Register & activate */
return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &sil680_sht);
use_ioports:
return ata_pci_bmdma_init_one(pdev, ppi, &sil680_sht, NULL, 0);
}
#ifdef CONFIG_PM
static int sil680_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
int try_mmio, rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
sil680_init_chip(pdev, &try_mmio);
ata_host_resume(host);
return 0;
}
#endif
static const struct pci_device_id sil680[] = {
{ PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_680), },
{ },
};
static struct pci_driver sil680_pci_driver = {
.name = DRV_NAME,
.id_table = sil680,
.probe = sil680_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = sil680_reinit_one,
#endif
};
static int __init sil680_init(void)
{
return pci_register_driver(&sil680_pci_driver);
}
static void __exit sil680_exit(void)
{
pci_unregister_driver(&sil680_pci_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for SI680 PATA");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, sil680);
MODULE_VERSION(DRV_VERSION);
module_init(sil680_init);
module_exit(sil680_exit);
| {
"language": "C"
} |
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DAL_TYPES_H__
#define __DAL_TYPES_H__
#include "signal_types.h"
#include "dc_types.h"
struct dal_logger;
struct dc_bios;
enum dce_version {
DCE_VERSION_UNKNOWN = (-1),
DCE_VERSION_8_0,
DCE_VERSION_8_1,
DCE_VERSION_8_3,
DCE_VERSION_10_0,
DCE_VERSION_11_0,
DCE_VERSION_11_2,
DCE_VERSION_11_22,
DCE_VERSION_12_0,
DCE_VERSION_MAX,
DCN_VERSION_1_0,
DCN_VERSION_MAX
};
#endif /* __DAL_TYPES_H__ */
| {
"language": "C"
} |
/*
** The OpenGL Extension Wrangler Library
** Copyright (C) 2002-2008, Milan Ikits <milan ikits[]ieee org>
** Copyright (C) 2002-2008, Marcelo E. Magallon <mmagallo[]debian org>
** Copyright (C) 2002, Lev Povalahev
** All rights reserved.
**
** Redistribution and use in source and binary forms, with or without
** modification, are permitted provided that the following conditions are met:
**
** * Redistributions of source code must retain the above copyright notice,
** this list of conditions and the following disclaimer.
** * Redistributions in binary form must reproduce the above copyright notice,
** this list of conditions and the following disclaimer in the documentation
** and/or other materials provided with the distribution.
** * The name of the author may be used to endorse or promote products
** derived from this software without specific prior written permission.
**
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
** AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
** IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
** ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
** LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
** CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
** SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
** INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
** CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
** ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
** THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
** Copyright (c) 2007 The Khronos Group Inc.
**
** Permission is hereby granted, free of charge, to any person obtaining a
** copy of this software and/or associated documentation files (the
** "Materials"), to deal in the Materials without restriction, including
** without limitation the rights to use, copy, modify, merge, publish,
** distribute, sublicense, and/or sell copies of the Materials, and to
** permit persons to whom the Materials are furnished to do so, subject to
** the following conditions:
**
** The above copyright notice and this permission notice shall be included
** in all copies or substantial portions of the Materials.
**
** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
*/
#ifndef __wglew_h__
#define __wglew_h__
#define __WGLEW_H__
#ifdef __wglext_h_
#error wglext.h included before wglew.h
#endif
#define __wglext_h_
#if !defined(WINAPI)
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN 1
# endif
#include <windows.h>
# undef WIN32_LEAN_AND_MEAN
#endif
/*
* GLEW_STATIC needs to be set when using the static version.
* GLEW_BUILD is set when building the DLL version.
*/
#ifdef GLEW_STATIC
# define GLEWAPI extern
#else
# ifdef GLEW_BUILD
# define GLEWAPI extern __declspec(dllexport)
# else
# define GLEWAPI extern __declspec(dllimport)
# endif
#endif
#ifdef __cplusplus
extern "C" {
#endif
/* -------------------------- WGL_3DFX_multisample ------------------------- */
#ifndef WGL_3DFX_multisample
#define WGL_3DFX_multisample 1
#define WGL_SAMPLE_BUFFERS_3DFX 0x2060
#define WGL_SAMPLES_3DFX 0x2061
#define WGLEW_3DFX_multisample WGLEW_GET_VAR(__WGLEW_3DFX_multisample)
#endif /* WGL_3DFX_multisample */
/* ------------------------- WGL_3DL_stereo_control ------------------------ */
#ifndef WGL_3DL_stereo_control
#define WGL_3DL_stereo_control 1
#define WGL_STEREO_EMITTER_ENABLE_3DL 0x2055
#define WGL_STEREO_EMITTER_DISABLE_3DL 0x2056
#define WGL_STEREO_POLARITY_NORMAL_3DL 0x2057
#define WGL_STEREO_POLARITY_INVERT_3DL 0x2058
typedef BOOL (WINAPI * PFNWGLSETSTEREOEMITTERSTATE3DLPROC) (HDC hDC, UINT uState);
#define wglSetStereoEmitterState3DL WGLEW_GET_FUN(__wglewSetStereoEmitterState3DL)
#define WGLEW_3DL_stereo_control WGLEW_GET_VAR(__WGLEW_3DL_stereo_control)
#endif /* WGL_3DL_stereo_control */
/* ------------------------ WGL_AMD_gpu_association ------------------------ */
#ifndef WGL_AMD_gpu_association
#define WGL_AMD_gpu_association 1
#define WGL_GPU_VENDOR_AMD 0x1F00
#define WGL_GPU_RENDERER_STRING_AMD 0x1F01
#define WGL_GPU_OPENGL_VERSION_STRING_AMD 0x1F02
#define WGL_GPU_FASTEST_TARGET_GPUS_AMD 0x21A2
#define WGL_GPU_RAM_AMD 0x21A3
#define WGL_GPU_CLOCK_AMD 0x21A4
#define WGL_GPU_NUM_PIPES_AMD 0x21A5
#define WGL_GPU_NUM_SIMD_AMD 0x21A6
#define WGL_GPU_NUM_RB_AMD 0x21A7
#define WGL_GPU_NUM_SPI_AMD 0x21A8
typedef VOID (WINAPI * PFNWGLBLITCONTEXTFRAMEBUFFERAMDPROC) (HGLRC dstCtx, GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter);
typedef HGLRC (WINAPI * PFNWGLCREATEASSOCIATEDCONTEXTAMDPROC) (UINT id);
typedef HGLRC (WINAPI * PFNWGLCREATEASSOCIATEDCONTEXTATTRIBSAMDPROC) (UINT id, HGLRC hShareContext, const int* attribList);
typedef BOOL (WINAPI * PFNWGLDELETEASSOCIATEDCONTEXTAMDPROC) (HGLRC hglrc);
typedef UINT (WINAPI * PFNWGLGETCONTEXTGPUIDAMDPROC) (HGLRC hglrc);
typedef HGLRC (WINAPI * PFNWGLGETCURRENTASSOCIATEDCONTEXTAMDPROC) (void);
typedef UINT (WINAPI * PFNWGLGETGPUIDSAMDPROC) (UINT maxCount, UINT* ids);
typedef INT (WINAPI * PFNWGLGETGPUINFOAMDPROC) (UINT id, INT property, GLenum dataType, UINT size, void* data);
typedef BOOL (WINAPI * PFNWGLMAKEASSOCIATEDCONTEXTCURRENTAMDPROC) (HGLRC hglrc);
#define wglBlitContextFramebufferAMD WGLEW_GET_FUN(__wglewBlitContextFramebufferAMD)
#define wglCreateAssociatedContextAMD WGLEW_GET_FUN(__wglewCreateAssociatedContextAMD)
#define wglCreateAssociatedContextAttribsAMD WGLEW_GET_FUN(__wglewCreateAssociatedContextAttribsAMD)
#define wglDeleteAssociatedContextAMD WGLEW_GET_FUN(__wglewDeleteAssociatedContextAMD)
#define wglGetContextGPUIDAMD WGLEW_GET_FUN(__wglewGetContextGPUIDAMD)
#define wglGetCurrentAssociatedContextAMD WGLEW_GET_FUN(__wglewGetCurrentAssociatedContextAMD)
#define wglGetGPUIDsAMD WGLEW_GET_FUN(__wglewGetGPUIDsAMD)
#define wglGetGPUInfoAMD WGLEW_GET_FUN(__wglewGetGPUInfoAMD)
#define wglMakeAssociatedContextCurrentAMD WGLEW_GET_FUN(__wglewMakeAssociatedContextCurrentAMD)
#define WGLEW_AMD_gpu_association WGLEW_GET_VAR(__WGLEW_AMD_gpu_association)
#endif /* WGL_AMD_gpu_association */
/* ------------------------- WGL_ARB_buffer_region ------------------------- */
#ifndef WGL_ARB_buffer_region
#define WGL_ARB_buffer_region 1
#define WGL_FRONT_COLOR_BUFFER_BIT_ARB 0x00000001
#define WGL_BACK_COLOR_BUFFER_BIT_ARB 0x00000002
#define WGL_DEPTH_BUFFER_BIT_ARB 0x00000004
#define WGL_STENCIL_BUFFER_BIT_ARB 0x00000008
typedef HANDLE (WINAPI * PFNWGLCREATEBUFFERREGIONARBPROC) (HDC hDC, int iLayerPlane, UINT uType);
typedef VOID (WINAPI * PFNWGLDELETEBUFFERREGIONARBPROC) (HANDLE hRegion);
typedef BOOL (WINAPI * PFNWGLRESTOREBUFFERREGIONARBPROC) (HANDLE hRegion, int x, int y, int width, int height, int xSrc, int ySrc);
typedef BOOL (WINAPI * PFNWGLSAVEBUFFERREGIONARBPROC) (HANDLE hRegion, int x, int y, int width, int height);
#define wglCreateBufferRegionARB WGLEW_GET_FUN(__wglewCreateBufferRegionARB)
#define wglDeleteBufferRegionARB WGLEW_GET_FUN(__wglewDeleteBufferRegionARB)
#define wglRestoreBufferRegionARB WGLEW_GET_FUN(__wglewRestoreBufferRegionARB)
#define wglSaveBufferRegionARB WGLEW_GET_FUN(__wglewSaveBufferRegionARB)
#define WGLEW_ARB_buffer_region WGLEW_GET_VAR(__WGLEW_ARB_buffer_region)
#endif /* WGL_ARB_buffer_region */
/* ------------------------- WGL_ARB_create_context ------------------------ */
#ifndef WGL_ARB_create_context
#define WGL_ARB_create_context 1
#define WGL_CONTEXT_DEBUG_BIT_ARB 0x0001
#define WGL_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB 0x0002
#define WGL_CONTEXT_MAJOR_VERSION_ARB 0x2091
#define WGL_CONTEXT_MINOR_VERSION_ARB 0x2092
#define WGL_CONTEXT_LAYER_PLANE_ARB 0x2093
#define WGL_CONTEXT_FLAGS_ARB 0x2094
#define ERROR_INVALID_VERSION_ARB 0x2095
#define ERROR_INVALID_PROFILE_ARB 0x2096
typedef HGLRC (WINAPI * PFNWGLCREATECONTEXTATTRIBSARBPROC) (HDC hDC, HGLRC hShareContext, const int* attribList);
#define wglCreateContextAttribsARB WGLEW_GET_FUN(__wglewCreateContextAttribsARB)
#define WGLEW_ARB_create_context WGLEW_GET_VAR(__WGLEW_ARB_create_context)
#endif /* WGL_ARB_create_context */
/* --------------------- WGL_ARB_create_context_profile -------------------- */
#ifndef WGL_ARB_create_context_profile
#define WGL_ARB_create_context_profile 1
#define WGL_CONTEXT_CORE_PROFILE_BIT_ARB 0x00000001
#define WGL_CONTEXT_COMPATIBILITY_PROFILE_BIT_ARB 0x00000002
#define WGL_CONTEXT_PROFILE_MASK_ARB 0x9126
#define WGLEW_ARB_create_context_profile WGLEW_GET_VAR(__WGLEW_ARB_create_context_profile)
#endif /* WGL_ARB_create_context_profile */
/* ------------------- WGL_ARB_create_context_robustness ------------------- */
#ifndef WGL_ARB_create_context_robustness
#define WGL_ARB_create_context_robustness 1
#define WGL_CONTEXT_ROBUST_ACCESS_BIT_ARB 0x00000004
#define WGL_LOSE_CONTEXT_ON_RESET_ARB 0x8252
#define WGL_CONTEXT_RESET_NOTIFICATION_STRATEGY_ARB 0x8256
#define WGL_NO_RESET_NOTIFICATION_ARB 0x8261
#define WGLEW_ARB_create_context_robustness WGLEW_GET_VAR(__WGLEW_ARB_create_context_robustness)
#endif /* WGL_ARB_create_context_robustness */
/* ----------------------- WGL_ARB_extensions_string ----------------------- */
#ifndef WGL_ARB_extensions_string
#define WGL_ARB_extensions_string 1
typedef const char* (WINAPI * PFNWGLGETEXTENSIONSSTRINGARBPROC) (HDC hdc);
#define wglGetExtensionsStringARB WGLEW_GET_FUN(__wglewGetExtensionsStringARB)
#define WGLEW_ARB_extensions_string WGLEW_GET_VAR(__WGLEW_ARB_extensions_string)
#endif /* WGL_ARB_extensions_string */
/* ------------------------ WGL_ARB_framebuffer_sRGB ----------------------- */
#ifndef WGL_ARB_framebuffer_sRGB
#define WGL_ARB_framebuffer_sRGB 1
#define WGL_FRAMEBUFFER_SRGB_CAPABLE_ARB 0x20A9
#define WGLEW_ARB_framebuffer_sRGB WGLEW_GET_VAR(__WGLEW_ARB_framebuffer_sRGB)
#endif /* WGL_ARB_framebuffer_sRGB */
/* ----------------------- WGL_ARB_make_current_read ----------------------- */
#ifndef WGL_ARB_make_current_read
#define WGL_ARB_make_current_read 1
#define ERROR_INVALID_PIXEL_TYPE_ARB 0x2043
#define ERROR_INCOMPATIBLE_DEVICE_CONTEXTS_ARB 0x2054
typedef HDC (WINAPI * PFNWGLGETCURRENTREADDCARBPROC) (VOID);
typedef BOOL (WINAPI * PFNWGLMAKECONTEXTCURRENTARBPROC) (HDC hDrawDC, HDC hReadDC, HGLRC hglrc);
#define wglGetCurrentReadDCARB WGLEW_GET_FUN(__wglewGetCurrentReadDCARB)
#define wglMakeContextCurrentARB WGLEW_GET_FUN(__wglewMakeContextCurrentARB)
#define WGLEW_ARB_make_current_read WGLEW_GET_VAR(__WGLEW_ARB_make_current_read)
#endif /* WGL_ARB_make_current_read */
/* -------------------------- WGL_ARB_multisample -------------------------- */
#ifndef WGL_ARB_multisample
#define WGL_ARB_multisample 1
#define WGL_SAMPLE_BUFFERS_ARB 0x2041
#define WGL_SAMPLES_ARB 0x2042
#define WGLEW_ARB_multisample WGLEW_GET_VAR(__WGLEW_ARB_multisample)
#endif /* WGL_ARB_multisample */
/* ---------------------------- WGL_ARB_pbuffer ---------------------------- */
#ifndef WGL_ARB_pbuffer
#define WGL_ARB_pbuffer 1
#define WGL_DRAW_TO_PBUFFER_ARB 0x202D
#define WGL_MAX_PBUFFER_PIXELS_ARB 0x202E
#define WGL_MAX_PBUFFER_WIDTH_ARB 0x202F
#define WGL_MAX_PBUFFER_HEIGHT_ARB 0x2030
#define WGL_PBUFFER_LARGEST_ARB 0x2033
#define WGL_PBUFFER_WIDTH_ARB 0x2034
#define WGL_PBUFFER_HEIGHT_ARB 0x2035
#define WGL_PBUFFER_LOST_ARB 0x2036
DECLARE_HANDLE(HPBUFFERARB);
typedef HPBUFFERARB (WINAPI * PFNWGLCREATEPBUFFERARBPROC) (HDC hDC, int iPixelFormat, int iWidth, int iHeight, const int* piAttribList);
typedef BOOL (WINAPI * PFNWGLDESTROYPBUFFERARBPROC) (HPBUFFERARB hPbuffer);
typedef HDC (WINAPI * PFNWGLGETPBUFFERDCARBPROC) (HPBUFFERARB hPbuffer);
typedef BOOL (WINAPI * PFNWGLQUERYPBUFFERARBPROC) (HPBUFFERARB hPbuffer, int iAttribute, int* piValue);
typedef int (WINAPI * PFNWGLRELEASEPBUFFERDCARBPROC) (HPBUFFERARB hPbuffer, HDC hDC);
#define wglCreatePbufferARB WGLEW_GET_FUN(__wglewCreatePbufferARB)
#define wglDestroyPbufferARB WGLEW_GET_FUN(__wglewDestroyPbufferARB)
#define wglGetPbufferDCARB WGLEW_GET_FUN(__wglewGetPbufferDCARB)
#define wglQueryPbufferARB WGLEW_GET_FUN(__wglewQueryPbufferARB)
#define wglReleasePbufferDCARB WGLEW_GET_FUN(__wglewReleasePbufferDCARB)
#define WGLEW_ARB_pbuffer WGLEW_GET_VAR(__WGLEW_ARB_pbuffer)
#endif /* WGL_ARB_pbuffer */
/* -------------------------- WGL_ARB_pixel_format ------------------------- */
#ifndef WGL_ARB_pixel_format
#define WGL_ARB_pixel_format 1
#define WGL_NUMBER_PIXEL_FORMATS_ARB 0x2000
#define WGL_DRAW_TO_WINDOW_ARB 0x2001
#define WGL_DRAW_TO_BITMAP_ARB 0x2002
#define WGL_ACCELERATION_ARB 0x2003
#define WGL_NEED_PALETTE_ARB 0x2004
#define WGL_NEED_SYSTEM_PALETTE_ARB 0x2005
#define WGL_SWAP_LAYER_BUFFERS_ARB 0x2006
#define WGL_SWAP_METHOD_ARB 0x2007
#define WGL_NUMBER_OVERLAYS_ARB 0x2008
#define WGL_NUMBER_UNDERLAYS_ARB 0x2009
#define WGL_TRANSPARENT_ARB 0x200A
#define WGL_SHARE_DEPTH_ARB 0x200C
#define WGL_SHARE_STENCIL_ARB 0x200D
#define WGL_SHARE_ACCUM_ARB 0x200E
#define WGL_SUPPORT_GDI_ARB 0x200F
#define WGL_SUPPORT_OPENGL_ARB 0x2010
#define WGL_DOUBLE_BUFFER_ARB 0x2011
#define WGL_STEREO_ARB 0x2012
#define WGL_PIXEL_TYPE_ARB 0x2013
#define WGL_COLOR_BITS_ARB 0x2014
#define WGL_RED_BITS_ARB 0x2015
#define WGL_RED_SHIFT_ARB 0x2016
#define WGL_GREEN_BITS_ARB 0x2017
#define WGL_GREEN_SHIFT_ARB 0x2018
#define WGL_BLUE_BITS_ARB 0x2019
#define WGL_BLUE_SHIFT_ARB 0x201A
#define WGL_ALPHA_BITS_ARB 0x201B
#define WGL_ALPHA_SHIFT_ARB 0x201C
#define WGL_ACCUM_BITS_ARB 0x201D
#define WGL_ACCUM_RED_BITS_ARB 0x201E
#define WGL_ACCUM_GREEN_BITS_ARB 0x201F
#define WGL_ACCUM_BLUE_BITS_ARB 0x2020
#define WGL_ACCUM_ALPHA_BITS_ARB 0x2021
#define WGL_DEPTH_BITS_ARB 0x2022
#define WGL_STENCIL_BITS_ARB 0x2023
#define WGL_AUX_BUFFERS_ARB 0x2024
#define WGL_NO_ACCELERATION_ARB 0x2025
#define WGL_GENERIC_ACCELERATION_ARB 0x2026
#define WGL_FULL_ACCELERATION_ARB 0x2027
#define WGL_SWAP_EXCHANGE_ARB 0x2028
#define WGL_SWAP_COPY_ARB 0x2029
#define WGL_SWAP_UNDEFINED_ARB 0x202A
#define WGL_TYPE_RGBA_ARB 0x202B
#define WGL_TYPE_COLORINDEX_ARB 0x202C
#define WGL_TRANSPARENT_RED_VALUE_ARB 0x2037
#define WGL_TRANSPARENT_GREEN_VALUE_ARB 0x2038
#define WGL_TRANSPARENT_BLUE_VALUE_ARB 0x2039
#define WGL_TRANSPARENT_ALPHA_VALUE_ARB 0x203A
#define WGL_TRANSPARENT_INDEX_VALUE_ARB 0x203B
typedef BOOL (WINAPI * PFNWGLCHOOSEPIXELFORMATARBPROC) (HDC hdc, const int* piAttribIList, const FLOAT *pfAttribFList, UINT nMaxFormats, int *piFormats, UINT *nNumFormats);
typedef BOOL (WINAPI * PFNWGLGETPIXELFORMATATTRIBFVARBPROC) (HDC hdc, int iPixelFormat, int iLayerPlane, UINT nAttributes, const int* piAttributes, FLOAT *pfValues);
typedef BOOL (WINAPI * PFNWGLGETPIXELFORMATATTRIBIVARBPROC) (HDC hdc, int iPixelFormat, int iLayerPlane, UINT nAttributes, const int* piAttributes, int *piValues);
#define wglChoosePixelFormatARB WGLEW_GET_FUN(__wglewChoosePixelFormatARB)
#define wglGetPixelFormatAttribfvARB WGLEW_GET_FUN(__wglewGetPixelFormatAttribfvARB)
#define wglGetPixelFormatAttribivARB WGLEW_GET_FUN(__wglewGetPixelFormatAttribivARB)
#define WGLEW_ARB_pixel_format WGLEW_GET_VAR(__WGLEW_ARB_pixel_format)
#endif /* WGL_ARB_pixel_format */
/* ----------------------- WGL_ARB_pixel_format_float ---------------------- */
#ifndef WGL_ARB_pixel_format_float
#define WGL_ARB_pixel_format_float 1
#define WGL_TYPE_RGBA_FLOAT_ARB 0x21A0
#define WGLEW_ARB_pixel_format_float WGLEW_GET_VAR(__WGLEW_ARB_pixel_format_float)
#endif /* WGL_ARB_pixel_format_float */
/* ------------------------- WGL_ARB_render_texture ------------------------ */
#ifndef WGL_ARB_render_texture
#define WGL_ARB_render_texture 1
#define WGL_BIND_TO_TEXTURE_RGB_ARB 0x2070
#define WGL_BIND_TO_TEXTURE_RGBA_ARB 0x2071
#define WGL_TEXTURE_FORMAT_ARB 0x2072
#define WGL_TEXTURE_TARGET_ARB 0x2073
#define WGL_MIPMAP_TEXTURE_ARB 0x2074
#define WGL_TEXTURE_RGB_ARB 0x2075
#define WGL_TEXTURE_RGBA_ARB 0x2076
#define WGL_NO_TEXTURE_ARB 0x2077
#define WGL_TEXTURE_CUBE_MAP_ARB 0x2078
#define WGL_TEXTURE_1D_ARB 0x2079
#define WGL_TEXTURE_2D_ARB 0x207A
#define WGL_MIPMAP_LEVEL_ARB 0x207B
#define WGL_CUBE_MAP_FACE_ARB 0x207C
#define WGL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB 0x207D
#define WGL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB 0x207E
#define WGL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB 0x207F
#define WGL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB 0x2080
#define WGL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB 0x2081
#define WGL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB 0x2082
#define WGL_FRONT_LEFT_ARB 0x2083
#define WGL_FRONT_RIGHT_ARB 0x2084
#define WGL_BACK_LEFT_ARB 0x2085
#define WGL_BACK_RIGHT_ARB 0x2086
#define WGL_AUX0_ARB 0x2087
#define WGL_AUX1_ARB 0x2088
#define WGL_AUX2_ARB 0x2089
#define WGL_AUX3_ARB 0x208A
#define WGL_AUX4_ARB 0x208B
#define WGL_AUX5_ARB 0x208C
#define WGL_AUX6_ARB 0x208D
#define WGL_AUX7_ARB 0x208E
#define WGL_AUX8_ARB 0x208F
#define WGL_AUX9_ARB 0x2090
typedef BOOL (WINAPI * PFNWGLBINDTEXIMAGEARBPROC) (HPBUFFERARB hPbuffer, int iBuffer);
typedef BOOL (WINAPI * PFNWGLRELEASETEXIMAGEARBPROC) (HPBUFFERARB hPbuffer, int iBuffer);
typedef BOOL (WINAPI * PFNWGLSETPBUFFERATTRIBARBPROC) (HPBUFFERARB hPbuffer, const int* piAttribList);
#define wglBindTexImageARB WGLEW_GET_FUN(__wglewBindTexImageARB)
#define wglReleaseTexImageARB WGLEW_GET_FUN(__wglewReleaseTexImageARB)
#define wglSetPbufferAttribARB WGLEW_GET_FUN(__wglewSetPbufferAttribARB)
#define WGLEW_ARB_render_texture WGLEW_GET_VAR(__WGLEW_ARB_render_texture)
#endif /* WGL_ARB_render_texture */
/* ----------------------- WGL_ATI_pixel_format_float ---------------------- */
#ifndef WGL_ATI_pixel_format_float
#define WGL_ATI_pixel_format_float 1
#define WGL_TYPE_RGBA_FLOAT_ATI 0x21A0
#define GL_RGBA_FLOAT_MODE_ATI 0x8820
#define GL_COLOR_CLEAR_UNCLAMPED_VALUE_ATI 0x8835
#define WGLEW_ATI_pixel_format_float WGLEW_GET_VAR(__WGLEW_ATI_pixel_format_float)
#endif /* WGL_ATI_pixel_format_float */
/* -------------------- WGL_ATI_render_texture_rectangle ------------------- */
#ifndef WGL_ATI_render_texture_rectangle
#define WGL_ATI_render_texture_rectangle 1
#define WGL_TEXTURE_RECTANGLE_ATI 0x21A5
#define WGLEW_ATI_render_texture_rectangle WGLEW_GET_VAR(__WGLEW_ATI_render_texture_rectangle)
#endif /* WGL_ATI_render_texture_rectangle */
/* ------------------- WGL_EXT_create_context_es2_profile ------------------ */
#ifndef WGL_EXT_create_context_es2_profile
#define WGL_EXT_create_context_es2_profile 1
#define WGL_CONTEXT_ES2_PROFILE_BIT_EXT 0x00000004
#define WGLEW_EXT_create_context_es2_profile WGLEW_GET_VAR(__WGLEW_EXT_create_context_es2_profile)
#endif /* WGL_EXT_create_context_es2_profile */
/* -------------------------- WGL_EXT_depth_float -------------------------- */
#ifndef WGL_EXT_depth_float
#define WGL_EXT_depth_float 1
#define WGL_DEPTH_FLOAT_EXT 0x2040
#define WGLEW_EXT_depth_float WGLEW_GET_VAR(__WGLEW_EXT_depth_float)
#endif /* WGL_EXT_depth_float */
/* ---------------------- WGL_EXT_display_color_table ---------------------- */
#ifndef WGL_EXT_display_color_table
#define WGL_EXT_display_color_table 1
typedef GLboolean (WINAPI * PFNWGLBINDDISPLAYCOLORTABLEEXTPROC) (GLushort id);
typedef GLboolean (WINAPI * PFNWGLCREATEDISPLAYCOLORTABLEEXTPROC) (GLushort id);
typedef void (WINAPI * PFNWGLDESTROYDISPLAYCOLORTABLEEXTPROC) (GLushort id);
typedef GLboolean (WINAPI * PFNWGLLOADDISPLAYCOLORTABLEEXTPROC) (GLushort* table, GLuint length);
#define wglBindDisplayColorTableEXT WGLEW_GET_FUN(__wglewBindDisplayColorTableEXT)
#define wglCreateDisplayColorTableEXT WGLEW_GET_FUN(__wglewCreateDisplayColorTableEXT)
#define wglDestroyDisplayColorTableEXT WGLEW_GET_FUN(__wglewDestroyDisplayColorTableEXT)
#define wglLoadDisplayColorTableEXT WGLEW_GET_FUN(__wglewLoadDisplayColorTableEXT)
#define WGLEW_EXT_display_color_table WGLEW_GET_VAR(__WGLEW_EXT_display_color_table)
#endif /* WGL_EXT_display_color_table */
/* ----------------------- WGL_EXT_extensions_string ----------------------- */
#ifndef WGL_EXT_extensions_string
#define WGL_EXT_extensions_string 1
typedef const char* (WINAPI * PFNWGLGETEXTENSIONSSTRINGEXTPROC) (void);
#define wglGetExtensionsStringEXT WGLEW_GET_FUN(__wglewGetExtensionsStringEXT)
#define WGLEW_EXT_extensions_string WGLEW_GET_VAR(__WGLEW_EXT_extensions_string)
#endif /* WGL_EXT_extensions_string */
/* ------------------------ WGL_EXT_framebuffer_sRGB ----------------------- */
#ifndef WGL_EXT_framebuffer_sRGB
#define WGL_EXT_framebuffer_sRGB 1
#define WGL_FRAMEBUFFER_SRGB_CAPABLE_EXT 0x20A9
#define WGLEW_EXT_framebuffer_sRGB WGLEW_GET_VAR(__WGLEW_EXT_framebuffer_sRGB)
#endif /* WGL_EXT_framebuffer_sRGB */
/* ----------------------- WGL_EXT_make_current_read ----------------------- */
#ifndef WGL_EXT_make_current_read
#define WGL_EXT_make_current_read 1
#define ERROR_INVALID_PIXEL_TYPE_EXT 0x2043
typedef HDC (WINAPI * PFNWGLGETCURRENTREADDCEXTPROC) (VOID);
typedef BOOL (WINAPI * PFNWGLMAKECONTEXTCURRENTEXTPROC) (HDC hDrawDC, HDC hReadDC, HGLRC hglrc);
#define wglGetCurrentReadDCEXT WGLEW_GET_FUN(__wglewGetCurrentReadDCEXT)
#define wglMakeContextCurrentEXT WGLEW_GET_FUN(__wglewMakeContextCurrentEXT)
#define WGLEW_EXT_make_current_read WGLEW_GET_VAR(__WGLEW_EXT_make_current_read)
#endif /* WGL_EXT_make_current_read */
/* -------------------------- WGL_EXT_multisample -------------------------- */
#ifndef WGL_EXT_multisample
#define WGL_EXT_multisample 1
#define WGL_SAMPLE_BUFFERS_EXT 0x2041
#define WGL_SAMPLES_EXT 0x2042
#define WGLEW_EXT_multisample WGLEW_GET_VAR(__WGLEW_EXT_multisample)
#endif /* WGL_EXT_multisample */
/* ---------------------------- WGL_EXT_pbuffer ---------------------------- */
#ifndef WGL_EXT_pbuffer
#define WGL_EXT_pbuffer 1
#define WGL_DRAW_TO_PBUFFER_EXT 0x202D
#define WGL_MAX_PBUFFER_PIXELS_EXT 0x202E
#define WGL_MAX_PBUFFER_WIDTH_EXT 0x202F
#define WGL_MAX_PBUFFER_HEIGHT_EXT 0x2030
#define WGL_OPTIMAL_PBUFFER_WIDTH_EXT 0x2031
#define WGL_OPTIMAL_PBUFFER_HEIGHT_EXT 0x2032
#define WGL_PBUFFER_LARGEST_EXT 0x2033
#define WGL_PBUFFER_WIDTH_EXT 0x2034
#define WGL_PBUFFER_HEIGHT_EXT 0x2035
DECLARE_HANDLE(HPBUFFEREXT);
typedef HPBUFFEREXT (WINAPI * PFNWGLCREATEPBUFFEREXTPROC) (HDC hDC, int iPixelFormat, int iWidth, int iHeight, const int* piAttribList);
typedef BOOL (WINAPI * PFNWGLDESTROYPBUFFEREXTPROC) (HPBUFFEREXT hPbuffer);
typedef HDC (WINAPI * PFNWGLGETPBUFFERDCEXTPROC) (HPBUFFEREXT hPbuffer);
typedef BOOL (WINAPI * PFNWGLQUERYPBUFFEREXTPROC) (HPBUFFEREXT hPbuffer, int iAttribute, int* piValue);
typedef int (WINAPI * PFNWGLRELEASEPBUFFERDCEXTPROC) (HPBUFFEREXT hPbuffer, HDC hDC);
#define wglCreatePbufferEXT WGLEW_GET_FUN(__wglewCreatePbufferEXT)
#define wglDestroyPbufferEXT WGLEW_GET_FUN(__wglewDestroyPbufferEXT)
#define wglGetPbufferDCEXT WGLEW_GET_FUN(__wglewGetPbufferDCEXT)
#define wglQueryPbufferEXT WGLEW_GET_FUN(__wglewQueryPbufferEXT)
#define wglReleasePbufferDCEXT WGLEW_GET_FUN(__wglewReleasePbufferDCEXT)
#define WGLEW_EXT_pbuffer WGLEW_GET_VAR(__WGLEW_EXT_pbuffer)
#endif /* WGL_EXT_pbuffer */
/* -------------------------- WGL_EXT_pixel_format ------------------------- */
#ifndef WGL_EXT_pixel_format
#define WGL_EXT_pixel_format 1
#define WGL_NUMBER_PIXEL_FORMATS_EXT 0x2000
#define WGL_DRAW_TO_WINDOW_EXT 0x2001
#define WGL_DRAW_TO_BITMAP_EXT 0x2002
#define WGL_ACCELERATION_EXT 0x2003
#define WGL_NEED_PALETTE_EXT 0x2004
#define WGL_NEED_SYSTEM_PALETTE_EXT 0x2005
#define WGL_SWAP_LAYER_BUFFERS_EXT 0x2006
#define WGL_SWAP_METHOD_EXT 0x2007
#define WGL_NUMBER_OVERLAYS_EXT 0x2008
#define WGL_NUMBER_UNDERLAYS_EXT 0x2009
#define WGL_TRANSPARENT_EXT 0x200A
#define WGL_TRANSPARENT_VALUE_EXT 0x200B
#define WGL_SHARE_DEPTH_EXT 0x200C
#define WGL_SHARE_STENCIL_EXT 0x200D
#define WGL_SHARE_ACCUM_EXT 0x200E
#define WGL_SUPPORT_GDI_EXT 0x200F
#define WGL_SUPPORT_OPENGL_EXT 0x2010
#define WGL_DOUBLE_BUFFER_EXT 0x2011
#define WGL_STEREO_EXT 0x2012
#define WGL_PIXEL_TYPE_EXT 0x2013
#define WGL_COLOR_BITS_EXT 0x2014
#define WGL_RED_BITS_EXT 0x2015
#define WGL_RED_SHIFT_EXT 0x2016
#define WGL_GREEN_BITS_EXT 0x2017
#define WGL_GREEN_SHIFT_EXT 0x2018
#define WGL_BLUE_BITS_EXT 0x2019
#define WGL_BLUE_SHIFT_EXT 0x201A
#define WGL_ALPHA_BITS_EXT 0x201B
#define WGL_ALPHA_SHIFT_EXT 0x201C
#define WGL_ACCUM_BITS_EXT 0x201D
#define WGL_ACCUM_RED_BITS_EXT 0x201E
#define WGL_ACCUM_GREEN_BITS_EXT 0x201F
#define WGL_ACCUM_BLUE_BITS_EXT 0x2020
#define WGL_ACCUM_ALPHA_BITS_EXT 0x2021
#define WGL_DEPTH_BITS_EXT 0x2022
#define WGL_STENCIL_BITS_EXT 0x2023
#define WGL_AUX_BUFFERS_EXT 0x2024
#define WGL_NO_ACCELERATION_EXT 0x2025
#define WGL_GENERIC_ACCELERATION_EXT 0x2026
#define WGL_FULL_ACCELERATION_EXT 0x2027
#define WGL_SWAP_EXCHANGE_EXT 0x2028
#define WGL_SWAP_COPY_EXT 0x2029
#define WGL_SWAP_UNDEFINED_EXT 0x202A
#define WGL_TYPE_RGBA_EXT 0x202B
#define WGL_TYPE_COLORINDEX_EXT 0x202C
typedef BOOL (WINAPI * PFNWGLCHOOSEPIXELFORMATEXTPROC) (HDC hdc, const int* piAttribIList, const FLOAT *pfAttribFList, UINT nMaxFormats, int *piFormats, UINT *nNumFormats);
typedef BOOL (WINAPI * PFNWGLGETPIXELFORMATATTRIBFVEXTPROC) (HDC hdc, int iPixelFormat, int iLayerPlane, UINT nAttributes, int* piAttributes, FLOAT *pfValues);
typedef BOOL (WINAPI * PFNWGLGETPIXELFORMATATTRIBIVEXTPROC) (HDC hdc, int iPixelFormat, int iLayerPlane, UINT nAttributes, int* piAttributes, int *piValues);
#define wglChoosePixelFormatEXT WGLEW_GET_FUN(__wglewChoosePixelFormatEXT)
#define wglGetPixelFormatAttribfvEXT WGLEW_GET_FUN(__wglewGetPixelFormatAttribfvEXT)
#define wglGetPixelFormatAttribivEXT WGLEW_GET_FUN(__wglewGetPixelFormatAttribivEXT)
#define WGLEW_EXT_pixel_format WGLEW_GET_VAR(__WGLEW_EXT_pixel_format)
#endif /* WGL_EXT_pixel_format */
/* ------------------- WGL_EXT_pixel_format_packed_float ------------------- */
#ifndef WGL_EXT_pixel_format_packed_float
#define WGL_EXT_pixel_format_packed_float 1
#define WGL_TYPE_RGBA_UNSIGNED_FLOAT_EXT 0x20A8
#define WGLEW_EXT_pixel_format_packed_float WGLEW_GET_VAR(__WGLEW_EXT_pixel_format_packed_float)
#endif /* WGL_EXT_pixel_format_packed_float */
/* -------------------------- WGL_EXT_swap_control ------------------------- */
#ifndef WGL_EXT_swap_control
#define WGL_EXT_swap_control 1
typedef int (WINAPI * PFNWGLGETSWAPINTERVALEXTPROC) (void);
typedef BOOL (WINAPI * PFNWGLSWAPINTERVALEXTPROC) (int interval);
#define wglGetSwapIntervalEXT WGLEW_GET_FUN(__wglewGetSwapIntervalEXT)
#define wglSwapIntervalEXT WGLEW_GET_FUN(__wglewSwapIntervalEXT)
#define WGLEW_EXT_swap_control WGLEW_GET_VAR(__WGLEW_EXT_swap_control)
#endif /* WGL_EXT_swap_control */
/* ----------------------- WGL_EXT_swap_control_tear ----------------------- */
#ifndef WGL_EXT_swap_control_tear
#define WGL_EXT_swap_control_tear 1
#define WGLEW_EXT_swap_control_tear WGLEW_GET_VAR(__WGLEW_EXT_swap_control_tear)
#endif /* WGL_EXT_swap_control_tear */
/* --------------------- WGL_I3D_digital_video_control --------------------- */
#ifndef WGL_I3D_digital_video_control
#define WGL_I3D_digital_video_control 1
#define WGL_DIGITAL_VIDEO_CURSOR_ALPHA_FRAMEBUFFER_I3D 0x2050
#define WGL_DIGITAL_VIDEO_CURSOR_ALPHA_VALUE_I3D 0x2051
#define WGL_DIGITAL_VIDEO_CURSOR_INCLUDED_I3D 0x2052
#define WGL_DIGITAL_VIDEO_GAMMA_CORRECTED_I3D 0x2053
typedef BOOL (WINAPI * PFNWGLGETDIGITALVIDEOPARAMETERSI3DPROC) (HDC hDC, int iAttribute, int* piValue);
typedef BOOL (WINAPI * PFNWGLSETDIGITALVIDEOPARAMETERSI3DPROC) (HDC hDC, int iAttribute, const int* piValue);
#define wglGetDigitalVideoParametersI3D WGLEW_GET_FUN(__wglewGetDigitalVideoParametersI3D)
#define wglSetDigitalVideoParametersI3D WGLEW_GET_FUN(__wglewSetDigitalVideoParametersI3D)
#define WGLEW_I3D_digital_video_control WGLEW_GET_VAR(__WGLEW_I3D_digital_video_control)
#endif /* WGL_I3D_digital_video_control */
/* ----------------------------- WGL_I3D_gamma ----------------------------- */
#ifndef WGL_I3D_gamma
#define WGL_I3D_gamma 1
#define WGL_GAMMA_TABLE_SIZE_I3D 0x204E
#define WGL_GAMMA_EXCLUDE_DESKTOP_I3D 0x204F
typedef BOOL (WINAPI * PFNWGLGETGAMMATABLEI3DPROC) (HDC hDC, int iEntries, USHORT* puRed, USHORT *puGreen, USHORT *puBlue);
typedef BOOL (WINAPI * PFNWGLGETGAMMATABLEPARAMETERSI3DPROC) (HDC hDC, int iAttribute, int* piValue);
typedef BOOL (WINAPI * PFNWGLSETGAMMATABLEI3DPROC) (HDC hDC, int iEntries, const USHORT* puRed, const USHORT *puGreen, const USHORT *puBlue);
typedef BOOL (WINAPI * PFNWGLSETGAMMATABLEPARAMETERSI3DPROC) (HDC hDC, int iAttribute, const int* piValue);
#define wglGetGammaTableI3D WGLEW_GET_FUN(__wglewGetGammaTableI3D)
#define wglGetGammaTableParametersI3D WGLEW_GET_FUN(__wglewGetGammaTableParametersI3D)
#define wglSetGammaTableI3D WGLEW_GET_FUN(__wglewSetGammaTableI3D)
#define wglSetGammaTableParametersI3D WGLEW_GET_FUN(__wglewSetGammaTableParametersI3D)
#define WGLEW_I3D_gamma WGLEW_GET_VAR(__WGLEW_I3D_gamma)
#endif /* WGL_I3D_gamma */
/* ---------------------------- WGL_I3D_genlock ---------------------------- */
#ifndef WGL_I3D_genlock
#define WGL_I3D_genlock 1
#define WGL_GENLOCK_SOURCE_MULTIVIEW_I3D 0x2044
#define WGL_GENLOCK_SOURCE_EXTERNAL_SYNC_I3D 0x2045
#define WGL_GENLOCK_SOURCE_EXTERNAL_FIELD_I3D 0x2046
#define WGL_GENLOCK_SOURCE_EXTERNAL_TTL_I3D 0x2047
#define WGL_GENLOCK_SOURCE_DIGITAL_SYNC_I3D 0x2048
#define WGL_GENLOCK_SOURCE_DIGITAL_FIELD_I3D 0x2049
#define WGL_GENLOCK_SOURCE_EDGE_FALLING_I3D 0x204A
#define WGL_GENLOCK_SOURCE_EDGE_RISING_I3D 0x204B
#define WGL_GENLOCK_SOURCE_EDGE_BOTH_I3D 0x204C
typedef BOOL (WINAPI * PFNWGLDISABLEGENLOCKI3DPROC) (HDC hDC);
typedef BOOL (WINAPI * PFNWGLENABLEGENLOCKI3DPROC) (HDC hDC);
typedef BOOL (WINAPI * PFNWGLGENLOCKSAMPLERATEI3DPROC) (HDC hDC, UINT uRate);
typedef BOOL (WINAPI * PFNWGLGENLOCKSOURCEDELAYI3DPROC) (HDC hDC, UINT uDelay);
typedef BOOL (WINAPI * PFNWGLGENLOCKSOURCEEDGEI3DPROC) (HDC hDC, UINT uEdge);
typedef BOOL (WINAPI * PFNWGLGENLOCKSOURCEI3DPROC) (HDC hDC, UINT uSource);
typedef BOOL (WINAPI * PFNWGLGETGENLOCKSAMPLERATEI3DPROC) (HDC hDC, UINT* uRate);
typedef BOOL (WINAPI * PFNWGLGETGENLOCKSOURCEDELAYI3DPROC) (HDC hDC, UINT* uDelay);
typedef BOOL (WINAPI * PFNWGLGETGENLOCKSOURCEEDGEI3DPROC) (HDC hDC, UINT* uEdge);
typedef BOOL (WINAPI * PFNWGLGETGENLOCKSOURCEI3DPROC) (HDC hDC, UINT* uSource);
typedef BOOL (WINAPI * PFNWGLISENABLEDGENLOCKI3DPROC) (HDC hDC, BOOL* pFlag);
typedef BOOL (WINAPI * PFNWGLQUERYGENLOCKMAXSOURCEDELAYI3DPROC) (HDC hDC, UINT* uMaxLineDelay, UINT *uMaxPixelDelay);
#define wglDisableGenlockI3D WGLEW_GET_FUN(__wglewDisableGenlockI3D)
#define wglEnableGenlockI3D WGLEW_GET_FUN(__wglewEnableGenlockI3D)
#define wglGenlockSampleRateI3D WGLEW_GET_FUN(__wglewGenlockSampleRateI3D)
#define wglGenlockSourceDelayI3D WGLEW_GET_FUN(__wglewGenlockSourceDelayI3D)
#define wglGenlockSourceEdgeI3D WGLEW_GET_FUN(__wglewGenlockSourceEdgeI3D)
#define wglGenlockSourceI3D WGLEW_GET_FUN(__wglewGenlockSourceI3D)
#define wglGetGenlockSampleRateI3D WGLEW_GET_FUN(__wglewGetGenlockSampleRateI3D)
#define wglGetGenlockSourceDelayI3D WGLEW_GET_FUN(__wglewGetGenlockSourceDelayI3D)
#define wglGetGenlockSourceEdgeI3D WGLEW_GET_FUN(__wglewGetGenlockSourceEdgeI3D)
#define wglGetGenlockSourceI3D WGLEW_GET_FUN(__wglewGetGenlockSourceI3D)
#define wglIsEnabledGenlockI3D WGLEW_GET_FUN(__wglewIsEnabledGenlockI3D)
#define wglQueryGenlockMaxSourceDelayI3D WGLEW_GET_FUN(__wglewQueryGenlockMaxSourceDelayI3D)
#define WGLEW_I3D_genlock WGLEW_GET_VAR(__WGLEW_I3D_genlock)
#endif /* WGL_I3D_genlock */
/* -------------------------- WGL_I3D_image_buffer ------------------------- */
#ifndef WGL_I3D_image_buffer
#define WGL_I3D_image_buffer 1
#define WGL_IMAGE_BUFFER_MIN_ACCESS_I3D 0x00000001
#define WGL_IMAGE_BUFFER_LOCK_I3D 0x00000002
typedef BOOL (WINAPI * PFNWGLASSOCIATEIMAGEBUFFEREVENTSI3DPROC) (HDC hdc, HANDLE* pEvent, LPVOID *pAddress, DWORD *pSize, UINT count);
typedef LPVOID (WINAPI * PFNWGLCREATEIMAGEBUFFERI3DPROC) (HDC hDC, DWORD dwSize, UINT uFlags);
typedef BOOL (WINAPI * PFNWGLDESTROYIMAGEBUFFERI3DPROC) (HDC hDC, LPVOID pAddress);
typedef BOOL (WINAPI * PFNWGLRELEASEIMAGEBUFFEREVENTSI3DPROC) (HDC hdc, LPVOID* pAddress, UINT count);
#define wglAssociateImageBufferEventsI3D WGLEW_GET_FUN(__wglewAssociateImageBufferEventsI3D)
#define wglCreateImageBufferI3D WGLEW_GET_FUN(__wglewCreateImageBufferI3D)
#define wglDestroyImageBufferI3D WGLEW_GET_FUN(__wglewDestroyImageBufferI3D)
#define wglReleaseImageBufferEventsI3D WGLEW_GET_FUN(__wglewReleaseImageBufferEventsI3D)
#define WGLEW_I3D_image_buffer WGLEW_GET_VAR(__WGLEW_I3D_image_buffer)
#endif /* WGL_I3D_image_buffer */
/* ------------------------ WGL_I3D_swap_frame_lock ------------------------ */
#ifndef WGL_I3D_swap_frame_lock
#define WGL_I3D_swap_frame_lock 1
typedef BOOL (WINAPI * PFNWGLDISABLEFRAMELOCKI3DPROC) (VOID);
typedef BOOL (WINAPI * PFNWGLENABLEFRAMELOCKI3DPROC) (VOID);
typedef BOOL (WINAPI * PFNWGLISENABLEDFRAMELOCKI3DPROC) (BOOL* pFlag);
typedef BOOL (WINAPI * PFNWGLQUERYFRAMELOCKMASTERI3DPROC) (BOOL* pFlag);
#define wglDisableFrameLockI3D WGLEW_GET_FUN(__wglewDisableFrameLockI3D)
#define wglEnableFrameLockI3D WGLEW_GET_FUN(__wglewEnableFrameLockI3D)
#define wglIsEnabledFrameLockI3D WGLEW_GET_FUN(__wglewIsEnabledFrameLockI3D)
#define wglQueryFrameLockMasterI3D WGLEW_GET_FUN(__wglewQueryFrameLockMasterI3D)
#define WGLEW_I3D_swap_frame_lock WGLEW_GET_VAR(__WGLEW_I3D_swap_frame_lock)
#endif /* WGL_I3D_swap_frame_lock */
/* ------------------------ WGL_I3D_swap_frame_usage ----------------------- */
#ifndef WGL_I3D_swap_frame_usage
#define WGL_I3D_swap_frame_usage 1
typedef BOOL (WINAPI * PFNWGLBEGINFRAMETRACKINGI3DPROC) (void);
typedef BOOL (WINAPI * PFNWGLENDFRAMETRACKINGI3DPROC) (void);
typedef BOOL (WINAPI * PFNWGLGETFRAMEUSAGEI3DPROC) (float* pUsage);
typedef BOOL (WINAPI * PFNWGLQUERYFRAMETRACKINGI3DPROC) (DWORD* pFrameCount, DWORD *pMissedFrames, float *pLastMissedUsage);
#define wglBeginFrameTrackingI3D WGLEW_GET_FUN(__wglewBeginFrameTrackingI3D)
#define wglEndFrameTrackingI3D WGLEW_GET_FUN(__wglewEndFrameTrackingI3D)
#define wglGetFrameUsageI3D WGLEW_GET_FUN(__wglewGetFrameUsageI3D)
#define wglQueryFrameTrackingI3D WGLEW_GET_FUN(__wglewQueryFrameTrackingI3D)
#define WGLEW_I3D_swap_frame_usage WGLEW_GET_VAR(__WGLEW_I3D_swap_frame_usage)
#endif /* WGL_I3D_swap_frame_usage */
/* --------------------------- WGL_NV_DX_interop --------------------------- */
#ifndef WGL_NV_DX_interop
#define WGL_NV_DX_interop 1
#define WGL_ACCESS_READ_ONLY_NV 0x0000
#define WGL_ACCESS_READ_WRITE_NV 0x0001
#define WGL_ACCESS_WRITE_DISCARD_NV 0x0002
typedef BOOL (WINAPI * PFNWGLDXCLOSEDEVICENVPROC) (HANDLE hDevice);
typedef BOOL (WINAPI * PFNWGLDXLOCKOBJECTSNVPROC) (HANDLE hDevice, GLint count, HANDLE* hObjects);
typedef BOOL (WINAPI * PFNWGLDXOBJECTACCESSNVPROC) (HANDLE hObject, GLenum access);
typedef HANDLE (WINAPI * PFNWGLDXOPENDEVICENVPROC) (void* dxDevice);
typedef HANDLE (WINAPI * PFNWGLDXREGISTEROBJECTNVPROC) (HANDLE hDevice, void* dxObject, GLuint name, GLenum type, GLenum access);
typedef BOOL (WINAPI * PFNWGLDXSETRESOURCESHAREHANDLENVPROC) (void* dxObject, HANDLE shareHandle);
typedef BOOL (WINAPI * PFNWGLDXUNLOCKOBJECTSNVPROC) (HANDLE hDevice, GLint count, HANDLE* hObjects);
typedef BOOL (WINAPI * PFNWGLDXUNREGISTEROBJECTNVPROC) (HANDLE hDevice, HANDLE hObject);
#define wglDXCloseDeviceNV WGLEW_GET_FUN(__wglewDXCloseDeviceNV)
#define wglDXLockObjectsNV WGLEW_GET_FUN(__wglewDXLockObjectsNV)
#define wglDXObjectAccessNV WGLEW_GET_FUN(__wglewDXObjectAccessNV)
#define wglDXOpenDeviceNV WGLEW_GET_FUN(__wglewDXOpenDeviceNV)
#define wglDXRegisterObjectNV WGLEW_GET_FUN(__wglewDXRegisterObjectNV)
#define wglDXSetResourceShareHandleNV WGLEW_GET_FUN(__wglewDXSetResourceShareHandleNV)
#define wglDXUnlockObjectsNV WGLEW_GET_FUN(__wglewDXUnlockObjectsNV)
#define wglDXUnregisterObjectNV WGLEW_GET_FUN(__wglewDXUnregisterObjectNV)
#define WGLEW_NV_DX_interop WGLEW_GET_VAR(__WGLEW_NV_DX_interop)
#endif /* WGL_NV_DX_interop */
/* --------------------------- WGL_NV_DX_interop2 -------------------------- */
#ifndef WGL_NV_DX_interop2
#define WGL_NV_DX_interop2 1
#define WGLEW_NV_DX_interop2 WGLEW_GET_VAR(__WGLEW_NV_DX_interop2)
#endif /* WGL_NV_DX_interop2 */
/* --------------------------- WGL_NV_copy_image --------------------------- */
#ifndef WGL_NV_copy_image
#define WGL_NV_copy_image 1
typedef BOOL (WINAPI * PFNWGLCOPYIMAGESUBDATANVPROC) (HGLRC hSrcRC, GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, HGLRC hDstRC, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei width, GLsizei height, GLsizei depth);
#define wglCopyImageSubDataNV WGLEW_GET_FUN(__wglewCopyImageSubDataNV)
#define WGLEW_NV_copy_image WGLEW_GET_VAR(__WGLEW_NV_copy_image)
#endif /* WGL_NV_copy_image */
/* -------------------------- WGL_NV_float_buffer -------------------------- */
#ifndef WGL_NV_float_buffer
#define WGL_NV_float_buffer 1
#define WGL_FLOAT_COMPONENTS_NV 0x20B0
#define WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_R_NV 0x20B1
#define WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_RG_NV 0x20B2
#define WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_RGB_NV 0x20B3
#define WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_RGBA_NV 0x20B4
#define WGL_TEXTURE_FLOAT_R_NV 0x20B5
#define WGL_TEXTURE_FLOAT_RG_NV 0x20B6
#define WGL_TEXTURE_FLOAT_RGB_NV 0x20B7
#define WGL_TEXTURE_FLOAT_RGBA_NV 0x20B8
#define WGLEW_NV_float_buffer WGLEW_GET_VAR(__WGLEW_NV_float_buffer)
#endif /* WGL_NV_float_buffer */
/* -------------------------- WGL_NV_gpu_affinity -------------------------- */
#ifndef WGL_NV_gpu_affinity
#define WGL_NV_gpu_affinity 1
#define WGL_ERROR_INCOMPATIBLE_AFFINITY_MASKS_NV 0x20D0
#define WGL_ERROR_MISSING_AFFINITY_MASK_NV 0x20D1
DECLARE_HANDLE(HGPUNV);
typedef struct _GPU_DEVICE {
DWORD cb;
CHAR DeviceName[32];
CHAR DeviceString[128];
DWORD Flags;
RECT rcVirtualScreen;
} GPU_DEVICE, *PGPU_DEVICE;
typedef HDC (WINAPI * PFNWGLCREATEAFFINITYDCNVPROC) (const HGPUNV *phGpuList);
typedef BOOL (WINAPI * PFNWGLDELETEDCNVPROC) (HDC hdc);
typedef BOOL (WINAPI * PFNWGLENUMGPUDEVICESNVPROC) (HGPUNV hGpu, UINT iDeviceIndex, PGPU_DEVICE lpGpuDevice);
typedef BOOL (WINAPI * PFNWGLENUMGPUSFROMAFFINITYDCNVPROC) (HDC hAffinityDC, UINT iGpuIndex, HGPUNV *hGpu);
typedef BOOL (WINAPI * PFNWGLENUMGPUSNVPROC) (UINT iGpuIndex, HGPUNV *phGpu);
#define wglCreateAffinityDCNV WGLEW_GET_FUN(__wglewCreateAffinityDCNV)
#define wglDeleteDCNV WGLEW_GET_FUN(__wglewDeleteDCNV)
#define wglEnumGpuDevicesNV WGLEW_GET_FUN(__wglewEnumGpuDevicesNV)
#define wglEnumGpusFromAffinityDCNV WGLEW_GET_FUN(__wglewEnumGpusFromAffinityDCNV)
#define wglEnumGpusNV WGLEW_GET_FUN(__wglewEnumGpusNV)
#define WGLEW_NV_gpu_affinity WGLEW_GET_VAR(__WGLEW_NV_gpu_affinity)
#endif /* WGL_NV_gpu_affinity */
/* ---------------------- WGL_NV_multisample_coverage ---------------------- */
#ifndef WGL_NV_multisample_coverage
#define WGL_NV_multisample_coverage 1
#define WGL_COVERAGE_SAMPLES_NV 0x2042
#define WGL_COLOR_SAMPLES_NV 0x20B9
#define WGLEW_NV_multisample_coverage WGLEW_GET_VAR(__WGLEW_NV_multisample_coverage)
#endif /* WGL_NV_multisample_coverage */
/* -------------------------- WGL_NV_present_video ------------------------- */
#ifndef WGL_NV_present_video
#define WGL_NV_present_video 1
#define WGL_NUM_VIDEO_SLOTS_NV 0x20F0
DECLARE_HANDLE(HVIDEOOUTPUTDEVICENV);
typedef BOOL (WINAPI * PFNWGLBINDVIDEODEVICENVPROC) (HDC hDc, unsigned int uVideoSlot, HVIDEOOUTPUTDEVICENV hVideoDevice, const int* piAttribList);
typedef int (WINAPI * PFNWGLENUMERATEVIDEODEVICESNVPROC) (HDC hDc, HVIDEOOUTPUTDEVICENV* phDeviceList);
typedef BOOL (WINAPI * PFNWGLQUERYCURRENTCONTEXTNVPROC) (int iAttribute, int* piValue);
#define wglBindVideoDeviceNV WGLEW_GET_FUN(__wglewBindVideoDeviceNV)
#define wglEnumerateVideoDevicesNV WGLEW_GET_FUN(__wglewEnumerateVideoDevicesNV)
#define wglQueryCurrentContextNV WGLEW_GET_FUN(__wglewQueryCurrentContextNV)
#define WGLEW_NV_present_video WGLEW_GET_VAR(__WGLEW_NV_present_video)
#endif /* WGL_NV_present_video */
/* ---------------------- WGL_NV_render_depth_texture ---------------------- */
#ifndef WGL_NV_render_depth_texture
#define WGL_NV_render_depth_texture 1
#define WGL_NO_TEXTURE_ARB 0x2077
#define WGL_BIND_TO_TEXTURE_DEPTH_NV 0x20A3
#define WGL_BIND_TO_TEXTURE_RECTANGLE_DEPTH_NV 0x20A4
#define WGL_DEPTH_TEXTURE_FORMAT_NV 0x20A5
#define WGL_TEXTURE_DEPTH_COMPONENT_NV 0x20A6
#define WGL_DEPTH_COMPONENT_NV 0x20A7
#define WGLEW_NV_render_depth_texture WGLEW_GET_VAR(__WGLEW_NV_render_depth_texture)
#endif /* WGL_NV_render_depth_texture */
/* -------------------- WGL_NV_render_texture_rectangle -------------------- */
#ifndef WGL_NV_render_texture_rectangle
#define WGL_NV_render_texture_rectangle 1
#define WGL_BIND_TO_TEXTURE_RECTANGLE_RGB_NV 0x20A0
#define WGL_BIND_TO_TEXTURE_RECTANGLE_RGBA_NV 0x20A1
#define WGL_TEXTURE_RECTANGLE_NV 0x20A2
#define WGLEW_NV_render_texture_rectangle WGLEW_GET_VAR(__WGLEW_NV_render_texture_rectangle)
#endif /* WGL_NV_render_texture_rectangle */
/* --------------------------- WGL_NV_swap_group --------------------------- */
#ifndef WGL_NV_swap_group
#define WGL_NV_swap_group 1
typedef BOOL (WINAPI * PFNWGLBINDSWAPBARRIERNVPROC) (GLuint group, GLuint barrier);
typedef BOOL (WINAPI * PFNWGLJOINSWAPGROUPNVPROC) (HDC hDC, GLuint group);
typedef BOOL (WINAPI * PFNWGLQUERYFRAMECOUNTNVPROC) (HDC hDC, GLuint* count);
typedef BOOL (WINAPI * PFNWGLQUERYMAXSWAPGROUPSNVPROC) (HDC hDC, GLuint* maxGroups, GLuint *maxBarriers);
typedef BOOL (WINAPI * PFNWGLQUERYSWAPGROUPNVPROC) (HDC hDC, GLuint* group, GLuint *barrier);
typedef BOOL (WINAPI * PFNWGLRESETFRAMECOUNTNVPROC) (HDC hDC);
#define wglBindSwapBarrierNV WGLEW_GET_FUN(__wglewBindSwapBarrierNV)
#define wglJoinSwapGroupNV WGLEW_GET_FUN(__wglewJoinSwapGroupNV)
#define wglQueryFrameCountNV WGLEW_GET_FUN(__wglewQueryFrameCountNV)
#define wglQueryMaxSwapGroupsNV WGLEW_GET_FUN(__wglewQueryMaxSwapGroupsNV)
#define wglQuerySwapGroupNV WGLEW_GET_FUN(__wglewQuerySwapGroupNV)
#define wglResetFrameCountNV WGLEW_GET_FUN(__wglewResetFrameCountNV)
#define WGLEW_NV_swap_group WGLEW_GET_VAR(__WGLEW_NV_swap_group)
#endif /* WGL_NV_swap_group */
/* ----------------------- WGL_NV_vertex_array_range ----------------------- */
#ifndef WGL_NV_vertex_array_range
#define WGL_NV_vertex_array_range 1
typedef void * (WINAPI * PFNWGLALLOCATEMEMORYNVPROC) (GLsizei size, GLfloat readFrequency, GLfloat writeFrequency, GLfloat priority);
typedef void (WINAPI * PFNWGLFREEMEMORYNVPROC) (void *pointer);
#define wglAllocateMemoryNV WGLEW_GET_FUN(__wglewAllocateMemoryNV)
#define wglFreeMemoryNV WGLEW_GET_FUN(__wglewFreeMemoryNV)
#define WGLEW_NV_vertex_array_range WGLEW_GET_VAR(__WGLEW_NV_vertex_array_range)
#endif /* WGL_NV_vertex_array_range */
/* -------------------------- WGL_NV_video_capture ------------------------- */
#ifndef WGL_NV_video_capture
#define WGL_NV_video_capture 1
#define WGL_UNIQUE_ID_NV 0x20CE
#define WGL_NUM_VIDEO_CAPTURE_SLOTS_NV 0x20CF
DECLARE_HANDLE(HVIDEOINPUTDEVICENV);
typedef BOOL (WINAPI * PFNWGLBINDVIDEOCAPTUREDEVICENVPROC) (UINT uVideoSlot, HVIDEOINPUTDEVICENV hDevice);
typedef UINT (WINAPI * PFNWGLENUMERATEVIDEOCAPTUREDEVICESNVPROC) (HDC hDc, HVIDEOINPUTDEVICENV* phDeviceList);
typedef BOOL (WINAPI * PFNWGLLOCKVIDEOCAPTUREDEVICENVPROC) (HDC hDc, HVIDEOINPUTDEVICENV hDevice);
typedef BOOL (WINAPI * PFNWGLQUERYVIDEOCAPTUREDEVICENVPROC) (HDC hDc, HVIDEOINPUTDEVICENV hDevice, int iAttribute, int* piValue);
typedef BOOL (WINAPI * PFNWGLRELEASEVIDEOCAPTUREDEVICENVPROC) (HDC hDc, HVIDEOINPUTDEVICENV hDevice);
#define wglBindVideoCaptureDeviceNV WGLEW_GET_FUN(__wglewBindVideoCaptureDeviceNV)
#define wglEnumerateVideoCaptureDevicesNV WGLEW_GET_FUN(__wglewEnumerateVideoCaptureDevicesNV)
#define wglLockVideoCaptureDeviceNV WGLEW_GET_FUN(__wglewLockVideoCaptureDeviceNV)
#define wglQueryVideoCaptureDeviceNV WGLEW_GET_FUN(__wglewQueryVideoCaptureDeviceNV)
#define wglReleaseVideoCaptureDeviceNV WGLEW_GET_FUN(__wglewReleaseVideoCaptureDeviceNV)
#define WGLEW_NV_video_capture WGLEW_GET_VAR(__WGLEW_NV_video_capture)
#endif /* WGL_NV_video_capture */
/* -------------------------- WGL_NV_video_output -------------------------- */
#ifndef WGL_NV_video_output
#define WGL_NV_video_output 1
#define WGL_BIND_TO_VIDEO_RGB_NV 0x20C0
#define WGL_BIND_TO_VIDEO_RGBA_NV 0x20C1
#define WGL_BIND_TO_VIDEO_RGB_AND_DEPTH_NV 0x20C2
#define WGL_VIDEO_OUT_COLOR_NV 0x20C3
#define WGL_VIDEO_OUT_ALPHA_NV 0x20C4
#define WGL_VIDEO_OUT_DEPTH_NV 0x20C5
#define WGL_VIDEO_OUT_COLOR_AND_ALPHA_NV 0x20C6
#define WGL_VIDEO_OUT_COLOR_AND_DEPTH_NV 0x20C7
#define WGL_VIDEO_OUT_FRAME 0x20C8
#define WGL_VIDEO_OUT_FIELD_1 0x20C9
#define WGL_VIDEO_OUT_FIELD_2 0x20CA
#define WGL_VIDEO_OUT_STACKED_FIELDS_1_2 0x20CB
#define WGL_VIDEO_OUT_STACKED_FIELDS_2_1 0x20CC
DECLARE_HANDLE(HPVIDEODEV);
typedef BOOL (WINAPI * PFNWGLBINDVIDEOIMAGENVPROC) (HPVIDEODEV hVideoDevice, HPBUFFERARB hPbuffer, int iVideoBuffer);
typedef BOOL (WINAPI * PFNWGLGETVIDEODEVICENVPROC) (HDC hDC, int numDevices, HPVIDEODEV* hVideoDevice);
typedef BOOL (WINAPI * PFNWGLGETVIDEOINFONVPROC) (HPVIDEODEV hpVideoDevice, unsigned long* pulCounterOutputPbuffer, unsigned long *pulCounterOutputVideo);
typedef BOOL (WINAPI * PFNWGLRELEASEVIDEODEVICENVPROC) (HPVIDEODEV hVideoDevice);
typedef BOOL (WINAPI * PFNWGLRELEASEVIDEOIMAGENVPROC) (HPBUFFERARB hPbuffer, int iVideoBuffer);
typedef BOOL (WINAPI * PFNWGLSENDPBUFFERTOVIDEONVPROC) (HPBUFFERARB hPbuffer, int iBufferType, unsigned long* pulCounterPbuffer, BOOL bBlock);
#define wglBindVideoImageNV WGLEW_GET_FUN(__wglewBindVideoImageNV)
#define wglGetVideoDeviceNV WGLEW_GET_FUN(__wglewGetVideoDeviceNV)
#define wglGetVideoInfoNV WGLEW_GET_FUN(__wglewGetVideoInfoNV)
#define wglReleaseVideoDeviceNV WGLEW_GET_FUN(__wglewReleaseVideoDeviceNV)
#define wglReleaseVideoImageNV WGLEW_GET_FUN(__wglewReleaseVideoImageNV)
#define wglSendPbufferToVideoNV WGLEW_GET_FUN(__wglewSendPbufferToVideoNV)
#define WGLEW_NV_video_output WGLEW_GET_VAR(__WGLEW_NV_video_output)
#endif /* WGL_NV_video_output */
/* -------------------------- WGL_OML_sync_control ------------------------- */
#ifndef WGL_OML_sync_control
#define WGL_OML_sync_control 1
typedef BOOL (WINAPI * PFNWGLGETMSCRATEOMLPROC) (HDC hdc, INT32* numerator, INT32 *denominator);
typedef BOOL (WINAPI * PFNWGLGETSYNCVALUESOMLPROC) (HDC hdc, INT64* ust, INT64 *msc, INT64 *sbc);
typedef INT64 (WINAPI * PFNWGLSWAPBUFFERSMSCOMLPROC) (HDC hdc, INT64 target_msc, INT64 divisor, INT64 remainder);
typedef INT64 (WINAPI * PFNWGLSWAPLAYERBUFFERSMSCOMLPROC) (HDC hdc, INT fuPlanes, INT64 target_msc, INT64 divisor, INT64 remainder);
typedef BOOL (WINAPI * PFNWGLWAITFORMSCOMLPROC) (HDC hdc, INT64 target_msc, INT64 divisor, INT64 remainder, INT64* ust, INT64 *msc, INT64 *sbc);
typedef BOOL (WINAPI * PFNWGLWAITFORSBCOMLPROC) (HDC hdc, INT64 target_sbc, INT64* ust, INT64 *msc, INT64 *sbc);
#define wglGetMscRateOML WGLEW_GET_FUN(__wglewGetMscRateOML)
#define wglGetSyncValuesOML WGLEW_GET_FUN(__wglewGetSyncValuesOML)
#define wglSwapBuffersMscOML WGLEW_GET_FUN(__wglewSwapBuffersMscOML)
#define wglSwapLayerBuffersMscOML WGLEW_GET_FUN(__wglewSwapLayerBuffersMscOML)
#define wglWaitForMscOML WGLEW_GET_FUN(__wglewWaitForMscOML)
#define wglWaitForSbcOML WGLEW_GET_FUN(__wglewWaitForSbcOML)
#define WGLEW_OML_sync_control WGLEW_GET_VAR(__WGLEW_OML_sync_control)
#endif /* WGL_OML_sync_control */
/* ------------------------------------------------------------------------- */
#ifdef GLEW_MX
#define WGLEW_FUN_EXPORT
#define WGLEW_VAR_EXPORT
#else
#define WGLEW_FUN_EXPORT GLEW_FUN_EXPORT
#define WGLEW_VAR_EXPORT GLEW_VAR_EXPORT
#endif /* GLEW_MX */
#ifdef GLEW_MX
struct WGLEWContextStruct
{
#endif /* GLEW_MX */
WGLEW_FUN_EXPORT PFNWGLSETSTEREOEMITTERSTATE3DLPROC __wglewSetStereoEmitterState3DL;
WGLEW_FUN_EXPORT PFNWGLBLITCONTEXTFRAMEBUFFERAMDPROC __wglewBlitContextFramebufferAMD;
WGLEW_FUN_EXPORT PFNWGLCREATEASSOCIATEDCONTEXTAMDPROC __wglewCreateAssociatedContextAMD;
WGLEW_FUN_EXPORT PFNWGLCREATEASSOCIATEDCONTEXTATTRIBSAMDPROC __wglewCreateAssociatedContextAttribsAMD;
WGLEW_FUN_EXPORT PFNWGLDELETEASSOCIATEDCONTEXTAMDPROC __wglewDeleteAssociatedContextAMD;
WGLEW_FUN_EXPORT PFNWGLGETCONTEXTGPUIDAMDPROC __wglewGetContextGPUIDAMD;
WGLEW_FUN_EXPORT PFNWGLGETCURRENTASSOCIATEDCONTEXTAMDPROC __wglewGetCurrentAssociatedContextAMD;
WGLEW_FUN_EXPORT PFNWGLGETGPUIDSAMDPROC __wglewGetGPUIDsAMD;
WGLEW_FUN_EXPORT PFNWGLGETGPUINFOAMDPROC __wglewGetGPUInfoAMD;
WGLEW_FUN_EXPORT PFNWGLMAKEASSOCIATEDCONTEXTCURRENTAMDPROC __wglewMakeAssociatedContextCurrentAMD;
WGLEW_FUN_EXPORT PFNWGLCREATEBUFFERREGIONARBPROC __wglewCreateBufferRegionARB;
WGLEW_FUN_EXPORT PFNWGLDELETEBUFFERREGIONARBPROC __wglewDeleteBufferRegionARB;
WGLEW_FUN_EXPORT PFNWGLRESTOREBUFFERREGIONARBPROC __wglewRestoreBufferRegionARB;
WGLEW_FUN_EXPORT PFNWGLSAVEBUFFERREGIONARBPROC __wglewSaveBufferRegionARB;
WGLEW_FUN_EXPORT PFNWGLCREATECONTEXTATTRIBSARBPROC __wglewCreateContextAttribsARB;
WGLEW_FUN_EXPORT PFNWGLGETEXTENSIONSSTRINGARBPROC __wglewGetExtensionsStringARB;
WGLEW_FUN_EXPORT PFNWGLGETCURRENTREADDCARBPROC __wglewGetCurrentReadDCARB;
WGLEW_FUN_EXPORT PFNWGLMAKECONTEXTCURRENTARBPROC __wglewMakeContextCurrentARB;
WGLEW_FUN_EXPORT PFNWGLCREATEPBUFFERARBPROC __wglewCreatePbufferARB;
WGLEW_FUN_EXPORT PFNWGLDESTROYPBUFFERARBPROC __wglewDestroyPbufferARB;
WGLEW_FUN_EXPORT PFNWGLGETPBUFFERDCARBPROC __wglewGetPbufferDCARB;
WGLEW_FUN_EXPORT PFNWGLQUERYPBUFFERARBPROC __wglewQueryPbufferARB;
WGLEW_FUN_EXPORT PFNWGLRELEASEPBUFFERDCARBPROC __wglewReleasePbufferDCARB;
WGLEW_FUN_EXPORT PFNWGLCHOOSEPIXELFORMATARBPROC __wglewChoosePixelFormatARB;
WGLEW_FUN_EXPORT PFNWGLGETPIXELFORMATATTRIBFVARBPROC __wglewGetPixelFormatAttribfvARB;
WGLEW_FUN_EXPORT PFNWGLGETPIXELFORMATATTRIBIVARBPROC __wglewGetPixelFormatAttribivARB;
WGLEW_FUN_EXPORT PFNWGLBINDTEXIMAGEARBPROC __wglewBindTexImageARB;
WGLEW_FUN_EXPORT PFNWGLRELEASETEXIMAGEARBPROC __wglewReleaseTexImageARB;
WGLEW_FUN_EXPORT PFNWGLSETPBUFFERATTRIBARBPROC __wglewSetPbufferAttribARB;
WGLEW_FUN_EXPORT PFNWGLBINDDISPLAYCOLORTABLEEXTPROC __wglewBindDisplayColorTableEXT;
WGLEW_FUN_EXPORT PFNWGLCREATEDISPLAYCOLORTABLEEXTPROC __wglewCreateDisplayColorTableEXT;
WGLEW_FUN_EXPORT PFNWGLDESTROYDISPLAYCOLORTABLEEXTPROC __wglewDestroyDisplayColorTableEXT;
WGLEW_FUN_EXPORT PFNWGLLOADDISPLAYCOLORTABLEEXTPROC __wglewLoadDisplayColorTableEXT;
WGLEW_FUN_EXPORT PFNWGLGETEXTENSIONSSTRINGEXTPROC __wglewGetExtensionsStringEXT;
WGLEW_FUN_EXPORT PFNWGLGETCURRENTREADDCEXTPROC __wglewGetCurrentReadDCEXT;
WGLEW_FUN_EXPORT PFNWGLMAKECONTEXTCURRENTEXTPROC __wglewMakeContextCurrentEXT;
WGLEW_FUN_EXPORT PFNWGLCREATEPBUFFEREXTPROC __wglewCreatePbufferEXT;
WGLEW_FUN_EXPORT PFNWGLDESTROYPBUFFEREXTPROC __wglewDestroyPbufferEXT;
WGLEW_FUN_EXPORT PFNWGLGETPBUFFERDCEXTPROC __wglewGetPbufferDCEXT;
WGLEW_FUN_EXPORT PFNWGLQUERYPBUFFEREXTPROC __wglewQueryPbufferEXT;
WGLEW_FUN_EXPORT PFNWGLRELEASEPBUFFERDCEXTPROC __wglewReleasePbufferDCEXT;
WGLEW_FUN_EXPORT PFNWGLCHOOSEPIXELFORMATEXTPROC __wglewChoosePixelFormatEXT;
WGLEW_FUN_EXPORT PFNWGLGETPIXELFORMATATTRIBFVEXTPROC __wglewGetPixelFormatAttribfvEXT;
WGLEW_FUN_EXPORT PFNWGLGETPIXELFORMATATTRIBIVEXTPROC __wglewGetPixelFormatAttribivEXT;
WGLEW_FUN_EXPORT PFNWGLGETSWAPINTERVALEXTPROC __wglewGetSwapIntervalEXT;
WGLEW_FUN_EXPORT PFNWGLSWAPINTERVALEXTPROC __wglewSwapIntervalEXT;
WGLEW_FUN_EXPORT PFNWGLGETDIGITALVIDEOPARAMETERSI3DPROC __wglewGetDigitalVideoParametersI3D;
WGLEW_FUN_EXPORT PFNWGLSETDIGITALVIDEOPARAMETERSI3DPROC __wglewSetDigitalVideoParametersI3D;
WGLEW_FUN_EXPORT PFNWGLGETGAMMATABLEI3DPROC __wglewGetGammaTableI3D;
WGLEW_FUN_EXPORT PFNWGLGETGAMMATABLEPARAMETERSI3DPROC __wglewGetGammaTableParametersI3D;
WGLEW_FUN_EXPORT PFNWGLSETGAMMATABLEI3DPROC __wglewSetGammaTableI3D;
WGLEW_FUN_EXPORT PFNWGLSETGAMMATABLEPARAMETERSI3DPROC __wglewSetGammaTableParametersI3D;
WGLEW_FUN_EXPORT PFNWGLDISABLEGENLOCKI3DPROC __wglewDisableGenlockI3D;
WGLEW_FUN_EXPORT PFNWGLENABLEGENLOCKI3DPROC __wglewEnableGenlockI3D;
WGLEW_FUN_EXPORT PFNWGLGENLOCKSAMPLERATEI3DPROC __wglewGenlockSampleRateI3D;
WGLEW_FUN_EXPORT PFNWGLGENLOCKSOURCEDELAYI3DPROC __wglewGenlockSourceDelayI3D;
WGLEW_FUN_EXPORT PFNWGLGENLOCKSOURCEEDGEI3DPROC __wglewGenlockSourceEdgeI3D;
WGLEW_FUN_EXPORT PFNWGLGENLOCKSOURCEI3DPROC __wglewGenlockSourceI3D;
WGLEW_FUN_EXPORT PFNWGLGETGENLOCKSAMPLERATEI3DPROC __wglewGetGenlockSampleRateI3D;
WGLEW_FUN_EXPORT PFNWGLGETGENLOCKSOURCEDELAYI3DPROC __wglewGetGenlockSourceDelayI3D;
WGLEW_FUN_EXPORT PFNWGLGETGENLOCKSOURCEEDGEI3DPROC __wglewGetGenlockSourceEdgeI3D;
WGLEW_FUN_EXPORT PFNWGLGETGENLOCKSOURCEI3DPROC __wglewGetGenlockSourceI3D;
WGLEW_FUN_EXPORT PFNWGLISENABLEDGENLOCKI3DPROC __wglewIsEnabledGenlockI3D;
WGLEW_FUN_EXPORT PFNWGLQUERYGENLOCKMAXSOURCEDELAYI3DPROC __wglewQueryGenlockMaxSourceDelayI3D;
WGLEW_FUN_EXPORT PFNWGLASSOCIATEIMAGEBUFFEREVENTSI3DPROC __wglewAssociateImageBufferEventsI3D;
WGLEW_FUN_EXPORT PFNWGLCREATEIMAGEBUFFERI3DPROC __wglewCreateImageBufferI3D;
WGLEW_FUN_EXPORT PFNWGLDESTROYIMAGEBUFFERI3DPROC __wglewDestroyImageBufferI3D;
WGLEW_FUN_EXPORT PFNWGLRELEASEIMAGEBUFFEREVENTSI3DPROC __wglewReleaseImageBufferEventsI3D;
WGLEW_FUN_EXPORT PFNWGLDISABLEFRAMELOCKI3DPROC __wglewDisableFrameLockI3D;
WGLEW_FUN_EXPORT PFNWGLENABLEFRAMELOCKI3DPROC __wglewEnableFrameLockI3D;
WGLEW_FUN_EXPORT PFNWGLISENABLEDFRAMELOCKI3DPROC __wglewIsEnabledFrameLockI3D;
WGLEW_FUN_EXPORT PFNWGLQUERYFRAMELOCKMASTERI3DPROC __wglewQueryFrameLockMasterI3D;
WGLEW_FUN_EXPORT PFNWGLBEGINFRAMETRACKINGI3DPROC __wglewBeginFrameTrackingI3D;
WGLEW_FUN_EXPORT PFNWGLENDFRAMETRACKINGI3DPROC __wglewEndFrameTrackingI3D;
WGLEW_FUN_EXPORT PFNWGLGETFRAMEUSAGEI3DPROC __wglewGetFrameUsageI3D;
WGLEW_FUN_EXPORT PFNWGLQUERYFRAMETRACKINGI3DPROC __wglewQueryFrameTrackingI3D;
WGLEW_FUN_EXPORT PFNWGLDXCLOSEDEVICENVPROC __wglewDXCloseDeviceNV;
WGLEW_FUN_EXPORT PFNWGLDXLOCKOBJECTSNVPROC __wglewDXLockObjectsNV;
WGLEW_FUN_EXPORT PFNWGLDXOBJECTACCESSNVPROC __wglewDXObjectAccessNV;
WGLEW_FUN_EXPORT PFNWGLDXOPENDEVICENVPROC __wglewDXOpenDeviceNV;
WGLEW_FUN_EXPORT PFNWGLDXREGISTEROBJECTNVPROC __wglewDXRegisterObjectNV;
WGLEW_FUN_EXPORT PFNWGLDXSETRESOURCESHAREHANDLENVPROC __wglewDXSetResourceShareHandleNV;
WGLEW_FUN_EXPORT PFNWGLDXUNLOCKOBJECTSNVPROC __wglewDXUnlockObjectsNV;
WGLEW_FUN_EXPORT PFNWGLDXUNREGISTEROBJECTNVPROC __wglewDXUnregisterObjectNV;
WGLEW_FUN_EXPORT PFNWGLCOPYIMAGESUBDATANVPROC __wglewCopyImageSubDataNV;
WGLEW_FUN_EXPORT PFNWGLCREATEAFFINITYDCNVPROC __wglewCreateAffinityDCNV;
WGLEW_FUN_EXPORT PFNWGLDELETEDCNVPROC __wglewDeleteDCNV;
WGLEW_FUN_EXPORT PFNWGLENUMGPUDEVICESNVPROC __wglewEnumGpuDevicesNV;
WGLEW_FUN_EXPORT PFNWGLENUMGPUSFROMAFFINITYDCNVPROC __wglewEnumGpusFromAffinityDCNV;
WGLEW_FUN_EXPORT PFNWGLENUMGPUSNVPROC __wglewEnumGpusNV;
WGLEW_FUN_EXPORT PFNWGLBINDVIDEODEVICENVPROC __wglewBindVideoDeviceNV;
WGLEW_FUN_EXPORT PFNWGLENUMERATEVIDEODEVICESNVPROC __wglewEnumerateVideoDevicesNV;
WGLEW_FUN_EXPORT PFNWGLQUERYCURRENTCONTEXTNVPROC __wglewQueryCurrentContextNV;
WGLEW_FUN_EXPORT PFNWGLBINDSWAPBARRIERNVPROC __wglewBindSwapBarrierNV;
WGLEW_FUN_EXPORT PFNWGLJOINSWAPGROUPNVPROC __wglewJoinSwapGroupNV;
WGLEW_FUN_EXPORT PFNWGLQUERYFRAMECOUNTNVPROC __wglewQueryFrameCountNV;
WGLEW_FUN_EXPORT PFNWGLQUERYMAXSWAPGROUPSNVPROC __wglewQueryMaxSwapGroupsNV;
WGLEW_FUN_EXPORT PFNWGLQUERYSWAPGROUPNVPROC __wglewQuerySwapGroupNV;
WGLEW_FUN_EXPORT PFNWGLRESETFRAMECOUNTNVPROC __wglewResetFrameCountNV;
WGLEW_FUN_EXPORT PFNWGLALLOCATEMEMORYNVPROC __wglewAllocateMemoryNV;
WGLEW_FUN_EXPORT PFNWGLFREEMEMORYNVPROC __wglewFreeMemoryNV;
WGLEW_FUN_EXPORT PFNWGLBINDVIDEOCAPTUREDEVICENVPROC __wglewBindVideoCaptureDeviceNV;
WGLEW_FUN_EXPORT PFNWGLENUMERATEVIDEOCAPTUREDEVICESNVPROC __wglewEnumerateVideoCaptureDevicesNV;
WGLEW_FUN_EXPORT PFNWGLLOCKVIDEOCAPTUREDEVICENVPROC __wglewLockVideoCaptureDeviceNV;
WGLEW_FUN_EXPORT PFNWGLQUERYVIDEOCAPTUREDEVICENVPROC __wglewQueryVideoCaptureDeviceNV;
WGLEW_FUN_EXPORT PFNWGLRELEASEVIDEOCAPTUREDEVICENVPROC __wglewReleaseVideoCaptureDeviceNV;
WGLEW_FUN_EXPORT PFNWGLBINDVIDEOIMAGENVPROC __wglewBindVideoImageNV;
WGLEW_FUN_EXPORT PFNWGLGETVIDEODEVICENVPROC __wglewGetVideoDeviceNV;
WGLEW_FUN_EXPORT PFNWGLGETVIDEOINFONVPROC __wglewGetVideoInfoNV;
WGLEW_FUN_EXPORT PFNWGLRELEASEVIDEODEVICENVPROC __wglewReleaseVideoDeviceNV;
WGLEW_FUN_EXPORT PFNWGLRELEASEVIDEOIMAGENVPROC __wglewReleaseVideoImageNV;
WGLEW_FUN_EXPORT PFNWGLSENDPBUFFERTOVIDEONVPROC __wglewSendPbufferToVideoNV;
WGLEW_FUN_EXPORT PFNWGLGETMSCRATEOMLPROC __wglewGetMscRateOML;
WGLEW_FUN_EXPORT PFNWGLGETSYNCVALUESOMLPROC __wglewGetSyncValuesOML;
WGLEW_FUN_EXPORT PFNWGLSWAPBUFFERSMSCOMLPROC __wglewSwapBuffersMscOML;
WGLEW_FUN_EXPORT PFNWGLSWAPLAYERBUFFERSMSCOMLPROC __wglewSwapLayerBuffersMscOML;
WGLEW_FUN_EXPORT PFNWGLWAITFORMSCOMLPROC __wglewWaitForMscOML;
WGLEW_FUN_EXPORT PFNWGLWAITFORSBCOMLPROC __wglewWaitForSbcOML;
WGLEW_VAR_EXPORT GLboolean __WGLEW_3DFX_multisample;
WGLEW_VAR_EXPORT GLboolean __WGLEW_3DL_stereo_control;
WGLEW_VAR_EXPORT GLboolean __WGLEW_AMD_gpu_association;
WGLEW_VAR_EXPORT GLboolean __WGLEW_ARB_buffer_region;
WGLEW_VAR_EXPORT GLboolean __WGLEW_ARB_create_context;
WGLEW_VAR_EXPORT GLboolean __WGLEW_ARB_create_context_profile;
WGLEW_VAR_EXPORT GLboolean __WGLEW_ARB_create_context_robustness;
WGLEW_VAR_EXPORT GLboolean __WGLEW_ARB_extensions_string;
WGLEW_VAR_EXPORT GLboolean __WGLEW_ARB_framebuffer_sRGB;
WGLEW_VAR_EXPORT GLboolean __WGLEW_ARB_make_current_read;
WGLEW_VAR_EXPORT GLboolean __WGLEW_ARB_multisample;
WGLEW_VAR_EXPORT GLboolean __WGLEW_ARB_pbuffer;
WGLEW_VAR_EXPORT GLboolean __WGLEW_ARB_pixel_format;
WGLEW_VAR_EXPORT GLboolean __WGLEW_ARB_pixel_format_float;
WGLEW_VAR_EXPORT GLboolean __WGLEW_ARB_render_texture;
WGLEW_VAR_EXPORT GLboolean __WGLEW_ATI_pixel_format_float;
WGLEW_VAR_EXPORT GLboolean __WGLEW_ATI_render_texture_rectangle;
WGLEW_VAR_EXPORT GLboolean __WGLEW_EXT_create_context_es2_profile;
WGLEW_VAR_EXPORT GLboolean __WGLEW_EXT_depth_float;
WGLEW_VAR_EXPORT GLboolean __WGLEW_EXT_display_color_table;
WGLEW_VAR_EXPORT GLboolean __WGLEW_EXT_extensions_string;
WGLEW_VAR_EXPORT GLboolean __WGLEW_EXT_framebuffer_sRGB;
WGLEW_VAR_EXPORT GLboolean __WGLEW_EXT_make_current_read;
WGLEW_VAR_EXPORT GLboolean __WGLEW_EXT_multisample;
WGLEW_VAR_EXPORT GLboolean __WGLEW_EXT_pbuffer;
WGLEW_VAR_EXPORT GLboolean __WGLEW_EXT_pixel_format;
WGLEW_VAR_EXPORT GLboolean __WGLEW_EXT_pixel_format_packed_float;
WGLEW_VAR_EXPORT GLboolean __WGLEW_EXT_swap_control;
WGLEW_VAR_EXPORT GLboolean __WGLEW_EXT_swap_control_tear;
WGLEW_VAR_EXPORT GLboolean __WGLEW_I3D_digital_video_control;
WGLEW_VAR_EXPORT GLboolean __WGLEW_I3D_gamma;
WGLEW_VAR_EXPORT GLboolean __WGLEW_I3D_genlock;
WGLEW_VAR_EXPORT GLboolean __WGLEW_I3D_image_buffer;
WGLEW_VAR_EXPORT GLboolean __WGLEW_I3D_swap_frame_lock;
WGLEW_VAR_EXPORT GLboolean __WGLEW_I3D_swap_frame_usage;
WGLEW_VAR_EXPORT GLboolean __WGLEW_NV_DX_interop;
WGLEW_VAR_EXPORT GLboolean __WGLEW_NV_DX_interop2;
WGLEW_VAR_EXPORT GLboolean __WGLEW_NV_copy_image;
WGLEW_VAR_EXPORT GLboolean __WGLEW_NV_float_buffer;
WGLEW_VAR_EXPORT GLboolean __WGLEW_NV_gpu_affinity;
WGLEW_VAR_EXPORT GLboolean __WGLEW_NV_multisample_coverage;
WGLEW_VAR_EXPORT GLboolean __WGLEW_NV_present_video;
WGLEW_VAR_EXPORT GLboolean __WGLEW_NV_render_depth_texture;
WGLEW_VAR_EXPORT GLboolean __WGLEW_NV_render_texture_rectangle;
WGLEW_VAR_EXPORT GLboolean __WGLEW_NV_swap_group;
WGLEW_VAR_EXPORT GLboolean __WGLEW_NV_vertex_array_range;
WGLEW_VAR_EXPORT GLboolean __WGLEW_NV_video_capture;
WGLEW_VAR_EXPORT GLboolean __WGLEW_NV_video_output;
WGLEW_VAR_EXPORT GLboolean __WGLEW_OML_sync_control;
#ifdef GLEW_MX
}; /* WGLEWContextStruct */
#endif /* GLEW_MX */
/* ------------------------------------------------------------------------- */
#ifdef GLEW_MX
typedef struct WGLEWContextStruct WGLEWContext;
GLEWAPI GLenum GLEWAPIENTRY wglewContextInit (WGLEWContext *ctx);
GLEWAPI GLboolean GLEWAPIENTRY wglewContextIsSupported (const WGLEWContext *ctx, const char *name);
#define wglewInit() wglewContextInit(wglewGetContext())
#define wglewIsSupported(x) wglewContextIsSupported(wglewGetContext(), x)
#define WGLEW_GET_VAR(x) (*(const GLboolean*)&(wglewGetContext()->x))
#define WGLEW_GET_FUN(x) wglewGetContext()->x
#else /* GLEW_MX */
#define WGLEW_GET_VAR(x) (*(const GLboolean*)&x)
#define WGLEW_GET_FUN(x) x
GLEWAPI GLboolean GLEWAPIENTRY wglewIsSupported (const char *name);
#endif /* GLEW_MX */
GLEWAPI GLboolean GLEWAPIENTRY wglewGetExtension (const char *name);
#ifdef __cplusplus
}
#endif
#undef GLEWAPI
#endif /* __wglew_h__ */
| {
"language": "C"
} |
/*
for tracking IP/port ranges
*/
#include "ranges.h"
#include "templ-port.h"
#include <assert.h>
#include <ctype.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#define BUCKET_COUNT 16
#define REGRESS(x) if (!(x)) return (fprintf(stderr, "regression failed %s:%u\n", __FILE__, __LINE__)|1)
/***************************************************************************
***************************************************************************/
int
rangelist_is_contains(const struct RangeList *task, unsigned number)
{
unsigned i;
for (i=0; i<task->count; i++) {
struct Range *range = &task->list[i];
if (range->begin <= number && number <= range->end)
return 1;
}
return 0;
}
/***************************************************************************
* ???
***************************************************************************/
static void
todo_remove_at(struct RangeList *task, unsigned index)
{
memmove(&task->list[index],
&task->list[index+1],
(task->count - index) * sizeof(task->list[index])
);
task->count--;
}
/***************************************************************************
* Test if two ranges overlap
***************************************************************************/
static int
range_is_overlap(struct Range lhs, struct Range rhs)
{
if (lhs.begin < rhs.begin) {
if (lhs.end == 0xFFFFFFFF || lhs.end + 1 >= rhs.begin)
return 1;
}
if (lhs.begin >= rhs.begin) {
if (lhs.end <= rhs.end)
return 1;
}
if (rhs.begin < lhs.begin) {
if (rhs.end == 0xFFFFFFFF || rhs.end + 1 >= lhs.begin)
return 1;
}
if (rhs.begin >= lhs.begin) {
if (rhs.end <= lhs.end)
return 1;
}
return 0;
}
/***************************************************************************
* Combine two ranges, such as when they overlap.
***************************************************************************/
static void
range_combine(struct Range *lhs, struct Range rhs)
{
if (lhs->begin > rhs.begin)
lhs->begin = rhs.begin;
if (lhs->end < rhs.end)
lhs->end = rhs.end;
}
/***************************************************************************
* Add the IPv4 range to our list of ranges.
***************************************************************************/
void
rangelist_add_range(struct RangeList *task, unsigned begin, unsigned end)
{
unsigned i;
struct Range range;
range.begin = begin;
range.end = end;
/* auto-expand the list if necessary */
if (task->count + 1 >= task->max) {
size_t new_max = (size_t)task->max * 2 + 1;
struct Range *new_list;
if (new_max >= SIZE_MAX/sizeof(*new_list))
exit(1); /* integer overflow */
new_list = (struct Range *)malloc(sizeof(*new_list) * new_max);
if (new_list == NULL)
exit(1); /* out of memory */
memcpy(new_list, task->list, task->count * sizeof(*new_list));
if (task->list)
free(task->list);
task->list = new_list;
task->max = (unsigned)new_max;
}
/* See if the range overlaps any exist range already in the
* list */
for (i = 0; i < task->count; i++) {
if (range_is_overlap(task->list[i], range)) {
range_combine(&range, task->list[i]);
todo_remove_at(task, i);
rangelist_add_range(task, range.begin, range.end);
return;
}
}
/* Find a spot to insert in sorted order */
for (i = 0; i < task->count; i++) {
if (range.begin < task->list[i].begin) {
memmove(task->list+i+1, task->list+i, (task->count - i) * sizeof(task->list[0]));
break;
}
}
/* Add to end of list */
task->list[i].begin = begin;
task->list[i].end = end;
task->count++;
}
/***************************************************************************
***************************************************************************/
void
rangelist_remove_all(struct RangeList *tasks)
{
if (tasks->list) {
free(tasks->list);
memset(tasks, 0, sizeof(*tasks));
}
}
/***************************************************************************
***************************************************************************/
void
rangelist_remove_range(struct RangeList *task, unsigned begin, unsigned end)
{
unsigned i;
struct Range x;
x.begin = begin;
x.end = end;
/* See if the range overlaps any exist range already in the
* list */
for (i = 0; i < task->count; i++) {
if (!range_is_overlap(task->list[i], x))
continue;
/* If the removal-range wholly covers the range, delete
* it completely */
if (begin <= task->list[i].begin && end >= task->list[i].end) {
todo_remove_at(task, i);
i--;
continue;
}
/* If the removal-range bisects the target-rage, truncate
* the lower end and add a new high-end */
if (begin > task->list[i].begin && end < task->list[i].end) {
struct Range newrange;
newrange.begin = end+1;
newrange.end = task->list[i].end;
task->list[i].end = begin-1;
rangelist_add_range(task, newrange.begin, newrange.end);
i--;
continue;
}
/* If overlap on the lower side */
if (end >= task->list[i].begin && end < task->list[i].end) {
task->list[i].begin = end+1;
}
/* If overlap on the upper side */
if (begin > task->list[i].begin && begin <= task->list[i].end) {
task->list[i].end = begin-1;
}
//assert(!"impossible");
}
}
static void
rangelist_add_range2(struct RangeList *task, struct Range range)
{
rangelist_add_range(task, range.begin, range.end);
}
void
rangelist_remove_range2(struct RangeList *task, struct Range range)
{
rangelist_remove_range(task, range.begin, range.end);
}
/***************************************************************************
* Parse an IPv4 address from a line of text, moving the offset forward
* to the first non-IPv4 character
***************************************************************************/
static int
parse_ipv4(const char *line, unsigned *inout_offset, unsigned max, unsigned *ipv4)
{
unsigned offset = *inout_offset;
unsigned result = 0;
unsigned i;
for (i=0; i<4; i++) {
unsigned x = 0;
unsigned digits = 0;
if (offset >= max)
return -4;
if (!isdigit(line[offset]&0xFF))
return -1;
/* clear leading zeros */
while (offset < max && line[offset] == '0')
offset++;
/* parse maximum of 3 digits */
while (offset < max && isdigit(line[offset]&0xFF)) {
x = x * 10 + (line[offset] - '0');
offset++;
if (++digits > 3)
return -2;
}
if (x > 255)
return -5;
result = result * 256 + (x & 0xFF);
if (i == 3)
break;
if (line[offset] != '.')
return -3;
offset++; /* skip dot */
}
*inout_offset = offset;
*ipv4 = result;
return 0; /* parse ok */
}
/****************************************************************************
* Parse from text an IPv4 address range. This can be in one of several
* formats:
* - '192.168.1.1" - a single address
* - '192.168.1.0/24" - a CIDR spec
* - '192.168.1.0-192.168.1.255' - a range
* @param line
* Part of a line of text, probably read from a commandline or conf
* file.
* @param inout_offset
* On input, the offset from the start of the line where the address
* starts. On output, the offset of the first character after the
* range, or equal to 'max' if the line prematurely ended.
* @param max
* The maximum length of the line.
* @return
* The first and last address of the range, inclusive.
****************************************************************************/
struct Range
range_parse_ipv4(const char *line, unsigned *inout_offset, unsigned max)
{
unsigned offset;
struct Range result;
static const struct Range badrange = {0xFFFFFFFF, 0};
int err;
if (line == NULL)
return badrange;
if (inout_offset == NULL) {
inout_offset = &offset;
offset = 0;
max = (unsigned)strlen(line);
} else
offset = *inout_offset;
/* trim whitespace */
while (offset < max && isspace(line[offset]&0xFF))
offset++;
/* get the first IP address */
err = parse_ipv4(line, &offset, max, &result.begin);
if (err) {
return badrange;
}
result.end = result.begin;
/* trim whitespace */
while (offset < max && isspace(line[offset]&0xFF))
offset++;
/* If onely one IP address, return that */
if (offset >= max)
goto end;
/*
* Handle CIDR address of the form "10.0.0.0/8"
*/
if (line[offset] == '/') {
uint64_t prefix = 0;
uint64_t mask = 0;
unsigned digits = 0;
/* skip slash */
offset++;
if (!isdigit(line[offset]&0xFF)) {
return badrange;
}
/* strip leading zeroes */
while (offset<max && line[offset] == '0')
offset++;
/* parse decimal integer */
while (offset<max && isdigit(line[offset]&0xFF)) {
prefix = prefix * 10 + (line[offset++] - '0');
if (++digits > 2)
return badrange;
}
if (prefix > 32)
return badrange;
/* Create the mask from the prefix */
mask = 0xFFFFFFFF00000000ULL >> prefix;
/* Mask off any non-zero bits from the start
* TODO print warning */
result.begin &= mask;
/* Set all suffix bits to 1, so that 192.168.1.0/24 has
* an ending address of 192.168.1.255. */
result.end = result.begin | (unsigned)~mask;
goto end;
}
/*
* Handle a dashed range like "10.0.0.100-10.0.0.200"
*/
if (offset<max && line[offset] == '-') {
unsigned ip;
offset++;
err = parse_ipv4(line, &offset, max, &ip);
if (err)
return badrange;
if (ip < result.begin) {
result.begin = 0xFFFFFFFF;
result.end = 0x00000000;
fprintf(stderr, "err: ending addr %u.%u.%u.%u cannot come before starting addr %u.%u.%u.%u\n",
((ip>>24)&0xFF), ((ip>>16)&0xFF), ((ip>>8)&0xFF), ((ip>>0)&0xFF),
((result.begin>>24)&0xFF), ((result.begin>>16)&0xFF), ((result.begin>>8)&0xFF), ((result.begin>>0)&0xFF)
);
} else
result.end = ip;
goto end;
}
end:
*inout_offset = offset;
return result;
}
/***************************************************************************
***************************************************************************/
uint64_t
rangelist_exclude( struct RangeList *targets,
const struct RangeList *excludes)
{
uint64_t count = 0;
unsigned i;
for (i=0; i<excludes->count; i++) {
struct Range range = excludes->list[i];
count += range.end - range.begin + 1;
rangelist_remove_range(targets, range.begin, range.end);
}
return count;
}
/***************************************************************************
***************************************************************************/
uint64_t
rangelist_count(const struct RangeList *targets)
{
unsigned i;
uint64_t result = 0;
for (i=0; i<targets->count; i++) {
result += (uint64_t)targets->list[i].end - (uint64_t)targets->list[i].begin + 1UL;
}
return result;
}
/***************************************************************************
* Get's the indexed port/address.
*
* Note that this requires a search of all the ranges. Currently, this is
* done by a learn search of the ranges. This needs to change, because
* once we start adding in a lot of "exclude ranges", the address space
* will get fragmented, and the linear search will take too long.
***************************************************************************/
unsigned
rangelist_pick(const struct RangeList *targets, uint64_t index)
{
unsigned i;
for (i=0; i<targets->count; i++) {
uint64_t range = (uint64_t)targets->list[i].end - (uint64_t)targets->list[i].begin + 1UL;
if (index < range)
return (unsigned)(targets->list[i].begin + index);
else
index -= range;
}
assert(!"end of list");
return 0;
}
/***************************************************************************
* The normal "pick" function is a linear search, which is slow when there
* are a lot of ranges. Therefore, the "pick2" creates sort of binary
* search that'll be a lot faster. We choose "binary search" because
* it's the most cache-efficient, having the least overhead to fit within
* the cache.
***************************************************************************/
unsigned *
rangelist_pick2_create(struct RangeList *targets)
{
unsigned *picker;
unsigned i;
unsigned total = 0;
if (((size_t)targets->count) >= (size_t)(SIZE_MAX/sizeof(*picker)))
exit(1); /* integer overflow */
else
picker = (unsigned *)malloc(targets->count * sizeof(*picker));
if (picker == NULL)
exit(1); /* out of memory */
for (i=0; i<targets->count; i++) {
picker[i] = total;
total += targets->list[i].end - targets->list[i].begin + 1;
}
return picker;
}
/***************************************************************************
***************************************************************************/
void
rangelist_pick2_destroy(unsigned *picker)
{
if (picker)
free(picker);
}
/***************************************************************************
***************************************************************************/
unsigned
rangelist_pick2(const struct RangeList *targets, uint64_t index, const unsigned *picker)
{
unsigned maxmax = targets->count;
unsigned min = 0;
unsigned max = targets->count;
unsigned mid;
for (;;) {
mid = min + (max-min)/2;
if (index < picker[mid]) {
max = mid;
continue;
} if (index >= picker[mid]) {
if (mid + 1 == maxmax)
break;
else if (index < picker[mid+1])
break;
else
min = mid+1;
}
}
return (unsigned)(targets->list[mid].begin + (index - picker[mid]));
}
/***************************************************************************
* Provide my own rand() simply to avoid static-analysis warning me that
* 'rand()' is unrandom, when in fact we want the non-random properties of
* rand() for regression testing.
***************************************************************************/
static unsigned
r_rand(unsigned *seed)
{
static const unsigned a = 214013;
static const unsigned c = 2531011;
*seed = (*seed) * a + c;
return (*seed)>>16 & 0x7fff;
}
/***************************************************************************
***************************************************************************/
static int
regress_pick2()
{
unsigned i;
unsigned seed = 0;
/*
* Run 100 randomized regression tests
*/
for (i=0; i<100; i++) {
unsigned j;
unsigned num_targets;
unsigned begin = 0;
unsigned end;
struct RangeList targets[1];
struct RangeList duplicate[1];
unsigned *picker;
unsigned range;
/* Create a new target list */
memset(targets, 0, sizeof(targets[0]));
/* fill the target list with random ranges */
num_targets = r_rand(&seed)%5 + 1;
for (j=0; j<num_targets; j++) {
begin += r_rand(&seed)%10;
end = begin + r_rand(&seed)%10;
rangelist_add_range(targets, begin, end);
}
range = (unsigned)rangelist_count(targets);
/* Create a "picker" */
picker = rangelist_pick2_create(targets);
/* Duplicate the targetlist using the picker */
memset(duplicate, 0, sizeof(duplicate[0]));
for (j=0; j<range; j++) {
unsigned x;
x = rangelist_pick2(targets, j, picker);
rangelist_add_range(duplicate, x, x);
}
/* at this point, the two range lists shouild be identical */
REGRESS(targets->count == duplicate->count);
REGRESS(memcmp(targets->list, duplicate->list, targets->count*sizeof(targets->list[0])) == 0);
rangelist_remove_all(targets);
rangelist_remove_all(duplicate);
rangelist_pick2_destroy(picker);
}
return 0;
}
/***************************************************************************
* This returns a character pointer where parsing ends so that it can
* handle multiple stuff on the same line
***************************************************************************/
const char *
rangelist_parse_ports(struct RangeList *ports, const char *string, unsigned *is_error)
{
char *p = (char*)string;
*is_error = 0;
while (*p) {
unsigned port;
unsigned end;
unsigned proto_offset = 0;
/* skip whitespace */
while (*p && isspace(*p & 0xFF))
p++;
/* end at comment */
if (*p == 0 || *p == '#')
break;
/* special processing. Nmap allows ports to be prefixed with a
* characters to clarify TCP, UDP, or SCTP */
if (isalpha(*p&0xFF) && p[1] == ':') {
switch (*p) {
case 'T': case 't':
proto_offset = 0;
break;
case 'U': case 'u':
proto_offset = Templ_UDP;
break;
case 'S': case 's':
proto_offset = Templ_SCTP;
break;
case 'I': case 'i':
proto_offset = Templ_ICMP_echo;
break;
default:
fprintf(stderr, "bad port charactern = %c\n", p[0]);
*is_error = 1;
return p;
}
p += 2;
}
if (!isdigit(p[0] & 0xFF))
break;
port = strtoul(p, &p, 0);
end = port;
if (*p == '-') {
p++;
end = strtoul(p, &p, 0);
}
if (port > 0xFFFF || end > 0xFFFF || end < port) {
fprintf(stderr, "bad ports: %u-%u\n", port, end);
*is_error = 2;
return p;
} else {
rangelist_add_range(ports, port+proto_offset, end+proto_offset);
}
if (*p == ',')
p++;
else
break;
}
return p;
}
/***************************************************************************
* Called during "make regress" to run a regression test over this module.
***************************************************************************/
int
ranges_selftest(void)
{
struct Range r;
struct RangeList task[1];
REGRESS(regress_pick2() == 0);
memset(task, 0, sizeof(task[0]));
#define ERROR() fprintf(stderr, "selftest: failed %s:%u\n", __FILE__, __LINE__);
/* test for the /0 CIDR block, since we'll be using that a lot to scan the entire
* Internet */
r = range_parse_ipv4("0.0.0.0/0", 0, 0);
REGRESS(r.begin == 0 && r.end == 0xFFFFFFFF);
r = range_parse_ipv4("0.0.0./0", 0, 0);
REGRESS(r.begin > r.end);
r = range_parse_ipv4("75.748.86.91", 0, 0);
REGRESS(r.begin > r.end);
r = range_parse_ipv4("23.75.345.200", 0, 0);
REGRESS(r.begin > r.end);
r = range_parse_ipv4("192.1083.0.1", 0, 0);
REGRESS(r.begin > r.end);
r = range_parse_ipv4("192.168.1.3", 0, 0);
if (r.begin != 0xc0a80103 || r.end != 0xc0a80103) {
fprintf(stderr, "r.begin = 0x%08x r.end = 0x%08x\n", r.begin, r.end);
ERROR();
return 1;
}
r = range_parse_ipv4("10.0.0.20-10.0.0.30", 0, 0);
if (r.begin != 0x0A000000+20 || r.end != 0x0A000000+30) {
fprintf(stderr, "r.begin = 0x%08x r.end = 0x%08x\n", r.begin, r.end);
ERROR();
return 1;
}
r = range_parse_ipv4("10.0.1.2/16", 0, 0);
if (r.begin != 0x0A000000 || r.end != 0x0A00FFFF) {
fprintf(stderr, "r.begin = 0x%08x r.end = 0x%08x\n", r.begin, r.end);
ERROR();
return 1;
}
rangelist_add_range2(task, range_parse_ipv4("10.0.0.0/24", 0, 0));
rangelist_add_range2(task, range_parse_ipv4("10.0.1.10-10.0.1.19", 0, 0));
rangelist_add_range2(task, range_parse_ipv4("10.0.1.20-10.0.1.30", 0, 0));
rangelist_add_range2(task, range_parse_ipv4("10.0.0.0-10.0.1.12", 0, 0));
if (task->count != 1) {
fprintf(stderr, "count = %u\n", task->count);
ERROR();
return 1;
}
if (task->list[0].begin != 0x0a000000 || task->list[0].end != 0x0a000100+30) {
fprintf(stderr, "r.begin = 0x%08x r.end = 0x%08x\n", task->list[0].begin, task->list[0].end);
ERROR();
return 1;
}
/*
* Test removal
*/
memset(task, 0, sizeof(task[0]));
rangelist_add_range2(task, range_parse_ipv4("10.0.0.0/8", 0, 0));
/* These removals shouldn't change anything */
rangelist_remove_range2(task, range_parse_ipv4("9.255.255.255", 0, 0));
rangelist_remove_range2(task, range_parse_ipv4("11.0.0.0/16", 0, 0));
rangelist_remove_range2(task, range_parse_ipv4("192.168.0.0/16", 0, 0));
if (task->count != 1
|| task->list->begin != 0x0a000000
|| task->list->end != 0x0aFFFFFF) {
ERROR();
return 1;
}
/* These removals should remove a bit from the edges */
rangelist_remove_range2(task, range_parse_ipv4("1.0.0.0-10.0.0.0", 0, 0));
rangelist_remove_range2(task, range_parse_ipv4("10.255.255.255-11.0.0.0", 0, 0));
if (task->count != 1
|| task->list->begin != 0x0a000001
|| task->list->end != 0x0aFFFFFE) {
ERROR();
return 1;
}
/* remove things from the middle */
rangelist_remove_range2(task, range_parse_ipv4("10.10.0.0/16", 0, 0));
rangelist_remove_range2(task, range_parse_ipv4("10.20.0.0/16", 0, 0));
if (task->count != 3) {
ERROR();
return 1;
}
rangelist_remove_range2(task, range_parse_ipv4("10.12.0.0/16", 0, 0));
if (task->count != 4) {
ERROR();
return 1;
}
rangelist_remove_range2(task, range_parse_ipv4("10.10.10.10-10.12.12.12", 0, 0));
if (task->count != 3) {
ERROR();
return 1;
}
/* test ports */
{
unsigned is_error = 0;
memset(task, 0, sizeof(task[0]));
rangelist_parse_ports(task, "80,1000-2000,1234,4444", &is_error);
if (task->count != 3 || is_error) {
ERROR();
return 1;
}
if (task->list[0].begin != 80 || task->list[0].end != 80 ||
task->list[1].begin != 1000 || task->list[1].end != 2000 ||
task->list[2].begin != 4444 || task->list[2].end != 4444) {
ERROR();
return 1;
}
}
return 0;
}
| {
"language": "C"
} |
/*
* Author: Garrett Barboza <garrett.barboza@kapricasecurity.com>
*
* Copyright (c) 2014 Kaprica Security, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*/
#include "libcgc.h"
#include <malloc.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
void *realloc(void *ptr, size_t size)
{
if (ptr == NULL)
return malloc(size);
if (size == 0) {
free(ptr);
return NULL;
}
void *new = malloc(size);
if (new == NULL)
return NULL;
struct blk_t *blk = (struct blk_t *)((intptr_t)ptr - HEADER_PADDING);
if (size < blk->size - HEADER_PADDING)
memcpy(new, ptr, size);
else
memcpy(new, ptr, blk->size - HEADER_PADDING);
free(ptr);
return new;
}
| {
"language": "C"
} |
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright(c) 2015-18 Intel Corporation.
*/
/*
* This file defines data structures used in Machine Driver for Intel
* platforms with HDA Codecs.
*/
#ifndef __SKL_HDA_DSP_COMMON_H
#define __SKL_HDA_DSP_COMMON_H
#include <linux/module.h>
#include <linux/platform_device.h>
#include <sound/core.h>
#include <sound/jack.h>
#include <sound/hda_codec.h>
#include "../../codecs/hdac_hda.h"
#include "hda_dsp_common.h"
#define HDA_DSP_MAX_BE_DAI_LINKS 7
struct skl_hda_hdmi_pcm {
struct list_head head;
struct snd_soc_dai *codec_dai;
struct snd_soc_jack hdmi_jack;
int device;
};
struct skl_hda_private {
struct list_head hdmi_pcm_list;
int pcm_count;
int dai_index;
const char *platform_name;
bool common_hdmi_codec_drv;
bool idisp_codec;
};
extern struct snd_soc_dai_link skl_hda_be_dai_links[HDA_DSP_MAX_BE_DAI_LINKS];
int skl_hda_hdmi_jack_init(struct snd_soc_card *card);
int skl_hda_hdmi_add_pcm(struct snd_soc_card *card, int device);
/*
* Search card topology and register HDMI PCM related controls
* to codec driver.
*/
static inline int skl_hda_hdmi_build_controls(struct snd_soc_card *card)
{
struct skl_hda_private *ctx = snd_soc_card_get_drvdata(card);
struct snd_soc_component *component;
struct skl_hda_hdmi_pcm *pcm;
/* HDMI disabled, do not create controls */
if (list_empty(&ctx->hdmi_pcm_list))
return 0;
pcm = list_first_entry(&ctx->hdmi_pcm_list, struct skl_hda_hdmi_pcm,
head);
component = pcm->codec_dai->component;
if (!component)
return -EINVAL;
return hda_dsp_hdmi_build_controls(card, component);
}
#endif /* __SOUND_SOC_HDA_DSP_COMMON_H */
| {
"language": "C"
} |
/**
* Marlin 3D Printer Firmware
* Copyright (c) 2020 MarlinFirmware [https://github.com/MarlinFirmware/Marlin]
*
* Based on Sprinter and grbl.
* Copyright (c) 2011 Camiel Gubbels / Erik van der Zalm
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*
*/
#pragma once
/**
* Sethi 3D_1 pin assignments - www.sethi3d.com.br
*/
/**
* Rev B 26 DEC 2016
*
* added pointer to a current Arduino IDE extension
* this assumes that this board uses the Sanguino pin map
*/
/**
* A useable Arduino IDE extension (board manager) can be found at
* https://github.com/Lauszus/Sanguino
*
* This extension has been tested on Arduino 1.6.12 & 1.8.0
*
* Here's the JSON path:
* https://raw.githubusercontent.com/Lauszus/Sanguino/master/package_lauszus_sanguino_index.json
*
* When installing select 1.0.2
*
* Installation instructions can be found at https://learn.sparkfun.com/pages/CustomBoardsArduino
* Just use the above JSON URL instead of Sparkfun's JSON.
*
* Once installed select the Sanguino board and then select the CPU.
*
*/
#if !defined(__AVR_ATmega644P__) && !defined(__AVR_ATmega644__) && !defined(__AVR_ATmega1284P__)
#error "Oops! Select 'Sanguino' in 'Tools > Boards' and 'ATmega644', 'ATmega644P', or 'ATmega1284P' in 'Tools > Processor.'"
#endif
#define BOARD_INFO_NAME "Sethi 3D_1"
#ifndef GEN7_VERSION
#define GEN7_VERSION 12 // v1.x
#endif
//
// Limit Switches
//
#define X_STOP_PIN 2
#define Y_STOP_PIN 0
#define Z_MIN_PIN 1
#define Z_MAX_PIN 0
//
// Steppers
//
#define X_STEP_PIN 19
#define X_DIR_PIN 18
#define X_ENABLE_PIN 24
#define Y_STEP_PIN 23
#define Y_DIR_PIN 22
#define Y_ENABLE_PIN 24
#define Z_STEP_PIN 26
#define Z_DIR_PIN 25
#define Z_ENABLE_PIN 24
#define E0_STEP_PIN 28
#define E0_DIR_PIN 27
#define E0_ENABLE_PIN 24
//
// Temperature Sensors
//
#define TEMP_0_PIN 1 // Analog Input
#define TEMP_BED_PIN 2 // Analog Input
//
// Heaters / Fans
//
#define HEATER_0_PIN 4
#define HEATER_BED_PIN 3
#ifndef FAN_PIN
#if GEN7_VERSION >= 13
// Gen7 v1.3 removed the fan pin
#define FAN_PIN -1
#else
#define FAN_PIN 31
#endif
#endif
//
// Misc. Functions
//
#define PS_ON_PIN 15
// All these generations of Gen7 supply thermistor power
// via PS_ON, so ignore bad thermistor readings
//#define BOGUS_TEMPERATURE_GRACE_PERIOD 2000
// our pin for debugging.
#define DEBUG_PIN 0
// our RS485 pins
#define TX_ENABLE_PIN 12
#define RX_ENABLE_PIN 13
| {
"language": "C"
} |
/****************************************************************************
**
** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
** Contact: http://www.qt-project.org/legal
**
** This file is part of Qt Creator.
**
** Commercial License Usage
** Licensees holding valid commercial Qt licenses may use this file in
** accordance with the commercial license agreement provided with the
** Software or, alternatively, in accordance with the terms contained in
** a written agreement between you and Digia. For licensing terms and
** conditions see http://qt.digia.com/licensing. For further information
** use the contact form at http://qt.digia.com/contact-us.
**
** GNU Lesser General Public License Usage
** Alternatively, this file may be used under the terms of the GNU Lesser
** General Public License version 2.1 as published by the Free Software
** Foundation and appearing in the file LICENSE.LGPL included in the
** packaging of this file. Please review the following information to
** ensure the GNU Lesser General Public License version 2.1 requirements
** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
**
** In addition, as a special exception, Digia gives you certain additional
** rights. These rights are described in the Digia Qt LGPL Exception
** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
**
****************************************************************************/
#ifndef APPMAINWINDOW_H
#define APPMAINWINDOW_H
#include "utils_global.h"
#include <QMainWindow>
namespace Utils {
class QTCREATOR_UTILS_EXPORT AppMainWindow : public QMainWindow
{
Q_OBJECT
public:
AppMainWindow();
void raiseWindow();
signals:
void deviceChange();
#ifdef Q_OS_WIN
protected:
virtual bool winEvent(MSG *message, long *result);
virtual bool event(QEvent *event);
#endif
private:
const int m_deviceEventId;
};
} // Utils
#endif // APPMAINWINDOW_H
| {
"language": "C"
} |
/*
* INTC device simulation in PKUnity SoC
*
* Copyright (C) 2010-2012 Guan Xuetao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation, or any later version.
* See the COPYING file in the top-level directory.
*/
#include "hw/sysbus.h"
#undef DEBUG_PUV3
#include "hw/unicore32/puv3.h"
typedef struct {
SysBusDevice busdev;
MemoryRegion iomem;
qemu_irq parent_irq;
uint32_t reg_ICMR;
uint32_t reg_ICPR;
} PUV3INTCState;
/* Update interrupt status after enabled or pending bits have been changed. */
static void puv3_intc_update(PUV3INTCState *s)
{
if (s->reg_ICMR & s->reg_ICPR) {
qemu_irq_raise(s->parent_irq);
} else {
qemu_irq_lower(s->parent_irq);
}
}
/* Process a change in an external INTC input. */
static void puv3_intc_handler(void *opaque, int irq, int level)
{
PUV3INTCState *s = opaque;
DPRINTF("irq 0x%x, level 0x%x\n", irq, level);
if (level) {
s->reg_ICPR |= (1 << irq);
} else {
s->reg_ICPR &= ~(1 << irq);
}
puv3_intc_update(s);
}
static uint64_t puv3_intc_read(void *opaque, hwaddr offset,
unsigned size)
{
PUV3INTCState *s = opaque;
uint32_t ret = 0;
switch (offset) {
case 0x04: /* INTC_ICMR */
ret = s->reg_ICMR;
break;
case 0x0c: /* INTC_ICIP */
ret = s->reg_ICPR; /* the same value with ICPR */
break;
default:
DPRINTF("Bad offset %x\n", (int)offset);
}
DPRINTF("offset 0x%x, value 0x%x\n", offset, ret);
return ret;
}
static void puv3_intc_write(void *opaque, hwaddr offset,
uint64_t value, unsigned size)
{
PUV3INTCState *s = opaque;
DPRINTF("offset 0x%x, value 0x%x\n", offset, value);
switch (offset) {
case 0x00: /* INTC_ICLR */
case 0x14: /* INTC_ICCR */
break;
case 0x04: /* INTC_ICMR */
s->reg_ICMR = value;
break;
default:
DPRINTF("Bad offset 0x%x\n", (int)offset);
return;
}
puv3_intc_update(s);
}
static const MemoryRegionOps puv3_intc_ops = {
.read = puv3_intc_read,
.write = puv3_intc_write,
.impl = {
.min_access_size = 4,
.max_access_size = 4,
},
.endianness = DEVICE_NATIVE_ENDIAN,
};
static int puv3_intc_init(SysBusDevice *dev)
{
PUV3INTCState *s = FROM_SYSBUS(PUV3INTCState, dev);
qdev_init_gpio_in(&s->busdev.qdev, puv3_intc_handler, PUV3_IRQS_NR);
sysbus_init_irq(&s->busdev, &s->parent_irq);
s->reg_ICMR = 0;
s->reg_ICPR = 0;
memory_region_init_io(&s->iomem, &puv3_intc_ops, s, "puv3_intc",
PUV3_REGS_OFFSET);
sysbus_init_mmio(dev, &s->iomem);
return 0;
}
static void puv3_intc_class_init(ObjectClass *klass, void *data)
{
SysBusDeviceClass *sdc = SYS_BUS_DEVICE_CLASS(klass);
sdc->init = puv3_intc_init;
}
static const TypeInfo puv3_intc_info = {
.name = "puv3_intc",
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(PUV3INTCState),
.class_init = puv3_intc_class_init,
};
static void puv3_intc_register_type(void)
{
type_register_static(&puv3_intc_info);
}
type_init(puv3_intc_register_type)
| {
"language": "C"
} |
/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef ADF_C62X_HW_DATA_H_
#define ADF_C62X_HW_DATA_H_
/* PCIe configuration space */
#define ADF_C62X_SRAM_BAR 0
#define ADF_C62X_PMISC_BAR 1
#define ADF_C62X_ETR_BAR 2
#define ADF_C62X_RX_RINGS_OFFSET 8
#define ADF_C62X_TX_RINGS_MASK 0xFF
#define ADF_C62X_MAX_ACCELERATORS 5
#define ADF_C62X_MAX_ACCELENGINES 10
#define ADF_C62X_ACCELERATORS_REG_OFFSET 16
#define ADF_C62X_ACCELERATORS_MASK 0x1F
#define ADF_C62X_ACCELENGINES_MASK 0x3FF
#define ADF_C62X_ETR_MAX_BANKS 16
#define ADF_C62X_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
#define ADF_C62X_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
#define ADF_C62X_SMIA0_MASK 0xFFFF
#define ADF_C62X_SMIA1_MASK 0x1
/* Error detection and correction */
#define ADF_C62X_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818)
#define ADF_C62X_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960)
#define ADF_C62X_ENABLE_AE_ECC_ERR BIT(28)
#define ADF_C62X_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12))
#define ADF_C62X_UERRSSMSH(i) (i * 0x4000 + 0x18)
#define ADF_C62X_CERRSSMSH(i) (i * 0x4000 + 0x10)
#define ADF_C62X_ERRSSMSH_EN BIT(3)
#define ADF_C62X_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
#define ADF_C62X_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04))
/* Firmware Binary */
#define ADF_C62X_FW "qat_c62x.bin"
#define ADF_C62X_MMP "qat_c62x_mmp.bin"
void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data);
void adf_clean_hw_data_c62x(struct adf_hw_device_data *hw_data);
#endif
| {
"language": "C"
} |
#ifndef _STD_HEADER_INCLUDED_
#define _STD_HEADER_INCLUDED_
// basic types
typedef unsigned char byte_;
typedef unsigned short word_;
typedef unsigned int dword_;
#ifdef UNICODE
typedef wchar_t char_;
#else
typedef char char_;
#endif
#if defined(_WIN32)
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#endif
#if defined (linux)||(__linux)
#endif
#endif | {
"language": "C"
} |
/*
* Copyright (C) 2020 Intel Corporation
*
* SPDX-License-Identifier: MIT
*
*/
#include "shared/source/os_interface/linux/drm_neo.h"
#include "opencl/source/dll/linux/devices/device_ids.h"
#include "test.h"
#include <array>
using namespace NEO;
TEST(RklDeviceIdTest, supportedDeviceId) {
std::array<DeviceDescriptor, 6> expectedDescriptors = {{{DEVICE_ID_4C80, &RKL_HW_CONFIG::hwInfo, &RKL_HW_CONFIG::setupHardwareInfo, GTTYPE_GT1},
{DEVICE_ID_4C8A, &RKL_HW_CONFIG::hwInfo, &RKL_HW_CONFIG::setupHardwareInfo, GTTYPE_GT1},
{DEVICE_ID_4C8B, &RKL_HW_CONFIG::hwInfo, &RKL_HW_CONFIG::setupHardwareInfo, GTTYPE_GT1},
{DEVICE_ID_4C8C, &RKL_HW_CONFIG::hwInfo, &RKL_HW_CONFIG::setupHardwareInfo, GTTYPE_GT0_5},
{DEVICE_ID_4C90, &RKL_HW_CONFIG::hwInfo, &RKL_HW_CONFIG::setupHardwareInfo, GTTYPE_GT1},
{DEVICE_ID_4C9A, &RKL_HW_CONFIG::hwInfo, &RKL_HW_CONFIG::setupHardwareInfo, GTTYPE_GT1}}};
auto compareStructs = [](const DeviceDescriptor *first, const DeviceDescriptor *second) {
return first->deviceId == second->deviceId && first->pHwInfo == second->pHwInfo &&
first->setupHardwareInfo == second->setupHardwareInfo && first->eGtType == second->eGtType;
};
size_t startIndex = 0;
while (!compareStructs(&expectedDescriptors[0], &deviceDescriptorTable[startIndex]) &&
deviceDescriptorTable[startIndex].deviceId != 0) {
startIndex++;
};
EXPECT_NE(0u, deviceDescriptorTable[startIndex].deviceId);
for (auto &expected : expectedDescriptors) {
EXPECT_TRUE(compareStructs(&expected, &deviceDescriptorTable[startIndex]));
startIndex++;
}
}
| {
"language": "C"
} |
/******************************** -*- C -*- ****************************
*
* Platform-independent layer (i386 version)
*
***********************************************************************/
/***********************************************************************
*
* Copyright 2000,2001,2002,2003,2006,2010 Free Software Foundation, Inc.
*
* This file is part of GNU lightning.
*
* GNU lightning is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; either version 3, or (at your option)
* any later version.
*
* GNU lightning is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
* License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with GNU lightning; see the file COPYING.LESSER; if not, write to the
* Free Software Foundation, 59 Temple Place - Suite 330, Boston,
* MA 02111-1307, USA.
*
* Authors:
* Matthew Flatt
* Paolo Bonzini
* Paulo Cesar Pereira de Andrade
***********************************************************************/
#ifndef __lightning_core_h
#define __lightning_core_h
static const jit_gpr_t
jit_arg_reg_order[] = {
_RDI, _RSI, _RDX, _RCX, _R8, _R9
};
#define JIT_REXTMP _R12
/* Number or integer argument registers */
#define JIT_ARG_MAX 6
/* Number of float argument registers */
#define JIT_FP_ARG_MAX 8
#define JIT_R_NUM 3
static const jit_gpr_t
jit_r_order[JIT_R_NUM] = {
_RAX, _R10, _R11
};
#define JIT_R(i) jit_r_order[i]
#define JIT_V_NUM 3
static const jit_gpr_t
jit_v_order[JIT_R_NUM] = {
_RBX, _R13, _R14
};
#define JIT_V(i) jit_v_order[i]
#define jit_allocai(n) x86_allocai(_jit, n)
__jit_inline int
x86_allocai(jit_state_t _jit, int length)
{
assert(length >= 0);
_jitl.alloca_offset += length;
if (_jitl.alloca_offset + _jitl.stack_length > *_jitl.stack)
*_jitl.stack += (length + 15) & ~15;
return (-_jitl.alloca_offset);
}
#define jit_movi_p(r0, i0) x86_movi_p(_jit, r0, i0)
__jit_inline jit_insn *
x86_movi_p(jit_state_t _jit, jit_gpr_t r0, void *i0)
{
MOVQir((long)i0, r0);
return (_jit->x.pc);
}
#define jit_movi_l(r0, i0) x86_movi_l(_jit, r0, i0)
#define jit_movi_ul(r0, i0) x86_movi_l(_jit, r0, i0)
/* ensure proper zero/sign extension */
#define jit_movi_i(r0, i0) x86_movi_l(_jit, r0, (long)(int)i0)
#define jit_movi_ui(r0, i0) x86_movi_l(_jit, r0, (_ul)(_ui)i0)
__jit_inline void
x86_movi_l(jit_state_t _jit, jit_gpr_t r0, long i0)
{
if (i0) {
if (jit_can_zero_extend_int_p(i0))
MOVLir(i0, r0);
else
MOVQir(i0, r0);
}
else
XORQrr(r0, r0);
}
/* Return address is 8 bytes, plus 5 registers = 40 bytes, total = 48 bytes. */
#define jit_prolog(n) x86_prolog(_jit, n)
__jit_inline void
x86_prolog(jit_state_t _jit, int n)
{
/* offset of stack arguments */
_jitl.framesize = 48;
/* offsets of arguments */
_jitl.nextarg_getfp = _jitl.nextarg_geti = 0;
/* stack frame */
PUSHQr(_RBX);
PUSHQr(_R12);
PUSHQr(_R13);
PUSHQr(_R14);
PUSHQr(JIT_FP);
MOVQrr(JIT_SP, JIT_FP);
/* Adjust stack only once for alloca and stack arguments */
_REXQrr(_NOREG, JIT_SP);
_O(0x81);
_Mrm(_b11, X86_SUB, _rA(JIT_SP));
_jit_I(0);
_jitl.stack = ((int *)_jit->x.pc) - 1;
_jitl.alloca_offset = _jitl.stack_offset = _jitl.stack_length = 0;
_jitl.float_offset = 0;
}
#define jit_ret() x86_ret(_jit)
__jit_inline void
x86_ret(jit_state_t _jit)
{
LEAVE_();
POPQr(_R14);
POPQr(_R13);
POPQr(_R12);
POPQr(_RBX);
RET_();
}
#define jit_calli(i0) x86_calli(_jit, i0)
__jit_inline jit_insn *
x86_calli(jit_state_t _jit, void *i0)
{
MOVQir((long)i0, JIT_REXTMP);
_jitl.label = _jit->x.pc;
CALLsr(JIT_REXTMP);
return (_jitl.label);
}
#define jit_callr(r0) x86_callr(_jit, r0)
__jit_inline void
x86_callr(jit_state_t _jit, jit_gpr_t r0)
{
CALLsr(r0);
}
#define jit_patch_calli(call, label) x86_patch_calli(_jit, call, label)
__jit_inline void
x86_patch_calli(jit_state_t _jit, jit_insn *call, jit_insn *label)
{
jit_patch_movi(call, label);
}
#define jit_prepare_i(ni) x86_prepare_i(_jit, ni)
__jit_inline void
x86_prepare_i(jit_state_t _jit, int count)
{
assert(count >= 0 &&
_jitl.stack_offset == 0 &&
_jitl.nextarg_puti == 0 &&
_jitl.nextarg_putfp == 0 &&
_jitl.fprssize == 0);
/* offset of right to left integer argument */
_jitl.nextarg_puti = count;
/* update stack offset and check if need to patch stack adjustment */
if (_jitl.nextarg_puti > JIT_ARG_MAX) {
_jitl.stack_offset = (_jitl.nextarg_puti - JIT_ARG_MAX) << 3;
if (jit_push_pop_p())
_jitl.stack_length = _jitl.stack_offset;
else if (_jitl.stack_length < _jitl.stack_offset) {
_jitl.stack_length = _jitl.stack_offset;
*_jitl.stack = (_jitl.alloca_offset +
_jitl.stack_length + 15) & ~15;
}
}
}
#define jit_patch_at(jump, label) x86_patch_at(_jit, jump, label)
__jit_inline void
x86_patch_at(jit_state_t _jit, jit_insn *jump, jit_insn *label)
{
if (_jitl.long_jumps)
jit_patch_abs_long_at(jump - 3, label);
else
jit_patch_rel_int_at(jump, label);
}
/* ALU */
#define jit_negr_l(r0, r1) x86_negr_l(_jit, r0, r1)
__jit_inline void
x86_negr_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1)
{
if (r0 == r1)
NEGQr(r0);
else {
XORLrr(r0, r0);
SUBQrr(r1, r0);
}
}
#define jit_notr_l(r0, r1) x86_notr_l(_jit, r0, r1)
__jit_inline void
x86_notr_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1)
{
jit_movr_l(r0, r1);
NOTQr(r0);
}
#define jit_addi_l(r0, r1, i0) x86_addi_l(_jit, r0, r1, i0)
__jit_inline void
x86_addi_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, long i0)
{
if (i0 == 0)
jit_movr_l(r0, r1);
else if (i0 == 1) {
jit_movr_l(r0, r1);
INCQr(r0);
}
else if (i0 == -1) {
jit_movr_l(r0, r1);
DECQr(r0);
}
else if (jit_can_sign_extend_int_p(i0)) {
if (r0 == r1)
ADDQir(i0, r0);
else
LEAQmr(i0, r1, _NOREG, _SCL1, r0);
}
else if (r0 != r1) {
MOVQir(i0, r0);
ADDQrr(r1, r0);
}
else {
MOVQir(i0, JIT_REXTMP);
ADDQrr(JIT_REXTMP, r0);
}
}
#define jit_addr_l(r0, r1, r2) x86_addr_l(_jit, r0, r1, r2)
__jit_inline void
x86_addr_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
if (r0 == r1)
ADDQrr(r2, r0);
else if (r0 == r2)
ADDQrr(r1, r0);
else
LEAQmr(0, r1, r2, _SCL1, r0);
}
#define jit_subi_l(r0, r1, i0) x86_subi_l(_jit, r0, r1, i0)
__jit_inline void
x86_subi_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, long i0)
{
if (i0 == 0)
jit_movr_l(r0, r1);
else if (i0 == 1) {
jit_movr_l(r0, r1);
DECQr(r0);
}
else if (i0 == -1) {
jit_movr_l(r0, r1);
INCQr(r0);
}
else if (jit_can_sign_extend_int_p(i0)) {
if (r0 == r1)
SUBQir(i0, r0);
else
LEAQmr(-i0, r1, _NOREG, _SCL1, r0);
}
else if (r0 != r1) {
MOVQir(-i0, r0);
ADDQrr(r1, r0);
}
else {
MOVQir(i0, JIT_REXTMP);
SUBQrr(JIT_REXTMP, r0);
}
}
#define jit_subr_l(r0, r1, r2) x86_subr_l(_jit, r0, r1, r2)
__jit_inline void
x86_subr_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
if (r1 == r2)
XORQrr(r0, r0);
else if (r0 == r2) {
SUBQrr(r1, r0);
NEGQr(r0);
}
else {
jit_movr_l(r0, r1);
SUBQrr(r2, r0);
}
}
/* o Immediates are sign extended
* o CF (C)arry (F)lag is set when interpreting it as unsigned addition
* o OF (O)verflow (F)lag is set when interpreting it as signed addition
*/
/* Commutative */
#define jit_addci_ul(r0, r1, i0) x86_addci_ul(_jit, r0, r1, i0)
__jit_inline void
x86_addci_ul(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, unsigned long i0)
{
if (jit_can_sign_extend_int_p(i0)) {
jit_movr_l(r0, r1);
ADDQir(i0, r0);
}
else if (r0 == r1) {
MOVQir(i0, JIT_REXTMP);
ADDQrr(JIT_REXTMP, r0);
}
else {
MOVQir(i0, r0);
ADDQrr(r1, r0);
}
}
#define jit_addcr_ul(r0, r1, r2) x86_addcr_ul(_jit, r0, r1, r2)
__jit_inline void
x86_addcr_ul(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
if (r0 == r2)
ADDQrr(r1, r0);
else if (r0 == r1)
ADDQrr(r2, r0);
else {
MOVQrr(r1, r0);
ADDQrr(r2, r0);
}
}
#define jit_addxi_ul(r0, r1, i0) x86_addxi_ul(_jit, r0, r1, i0)
__jit_inline void
x86_addxi_ul(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, unsigned long i0)
{
if (jit_can_sign_extend_int_p(i0)) {
jit_movr_l(r0, r1);
ADCQir(i0, r0);
}
else if (r0 == r1) {
MOVQir(i0, JIT_REXTMP);
ADCQrr(JIT_REXTMP, r0);
}
else {
MOVQir(i0, r0);
ADCQrr(r1, r0);
}
}
#define jit_addxr_ul(r0, r1, r2) x86_addxr_ul(_jit, r0, r1, r2)
__jit_inline void
x86_addxr_ul(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
if (r0 == r2)
ADCQrr(r1, r0);
else if (r0 == r1)
ADCQrr(r2, r0);
else {
MOVQrr(r1, r0);
ADCQrr(r2, r0);
}
}
/* Non commutative */
#define jit_subci_ul(r0, r1, i0) x86_subci_ul(_jit, r0, r1, i0)
__jit_inline void
x86_subci_ul(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, unsigned long i0)
{
jit_movr_l(r0, r1);
if (jit_can_sign_extend_int_p(i0))
SUBQir(i0, r0);
else {
MOVQir(i0, JIT_REXTMP);
SUBQrr(JIT_REXTMP, r0);
}
}
#define jit_subcr_ul(r0, r1, r2) x86_subcr_ul(_jit, r0, r1, r2)
__jit_inline void
x86_subcr_ul(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
if (r0 == r2 && r0 != r1) {
MOVQrr(r0, JIT_REXTMP);
MOVQrr(r1, r0);
SUBQrr(JIT_REXTMP, r0);
}
else {
jit_movr_l(r0, r1);
SUBQrr(r2, r0);
}
}
#define jit_subxi_ul(r0, r1, i0) x86_subxi_ul(_jit, r0, r1, i0)
__jit_inline void
x86_subxi_ul(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, unsigned long i0)
{
jit_movr_l(r0, r1);
if (jit_can_sign_extend_int_p(i0))
SBBQir(i0, r0);
else {
MOVQir(i0, JIT_REXTMP);
SBBQrr(JIT_REXTMP, r0);
}
}
#define jit_subxr_ul(r0, r1, r2) x86_subxr_ul(_jit, r0, r1, r2)
__jit_inline void
x86_subxr_ul(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
if (r0 == r2 && r0 != r1) {
MOVQrr(r0, JIT_REXTMP);
MOVQrr(r1, r0);
SBBQrr(JIT_REXTMP, r0);
}
else {
jit_movr_l(r0, r1);
SBBQrr(r2, r0);
}
}
#define jit_andi_l(r0, r1, i0) x86_andi_l(_jit, r0, r1, i0)
__jit_inline void
x86_andi_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, long i0)
{
if (i0 == 0)
XORQrr(r0, r0);
else if (i0 == -1)
jit_movr_l(r0, r1);
else if (r0 == r1) {
if (jit_can_sign_extend_int_p(i0))
ANDQir(i0, r0);
else {
MOVQir(i0, JIT_REXTMP);
ANDQrr(JIT_REXTMP, r0);
}
}
else {
MOVQir(i0, r0);
ANDQrr(r1, r0);
}
}
#define jit_andr_l(r0, r1, r2) x86_andr_l(_jit, r0, r1, r2)
__jit_inline void
x86_andr_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
if (r1 == r2)
jit_movr_l(r0, r1);
else if (r0 == r1)
ANDQrr(r2, r0);
else if (r0 == r2)
ANDQrr(r1, r0);
else {
MOVQrr(r1, r0);
ANDQrr(r2, r0);
}
}
#define jit_ori_l(r0, r1, i0) x86_ori_l(_jit, r0, r1, i0)
__jit_inline void
x86_ori_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, long i0)
{
if (i0 == 0)
jit_movr_l(r0, r1);
else if (i0 == -1)
MOVQir(-1, r0);
else if (r0 == r1) {
if (jit_can_sign_extend_char_p(i0))
ORBir(i0, r0);
else if (jit_can_sign_extend_int_p(i0))
ORQir(i0, r0);
else {
MOVQir(i0, JIT_REXTMP);
ORQrr(JIT_REXTMP, r0);
}
}
else {
MOVQir(i0, r0);
ORQrr(r1, r0);
}
}
#define jit_orr_l(r0, r1, r2) x86_orr_l(_jit, r0, r1, r2)
__jit_inline void
x86_orr_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
if (r1 == r2)
jit_movr_l(r0, r1);
else if (r0 == r1)
ORQrr(r2, r0);
else if (r0 == r2)
ORQrr(r1, r0);
else {
MOVQrr(r1, r0);
ORQrr(r2, r0);
}
}
#define jit_xori_l(r0, r1, i0) x86_xori_l(_jit, r0, r1, i0)
__jit_inline void
x86_xori_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, long i0)
{
if (i0 == 0)
jit_movr_l(r0, r1);
else if (i0 == -1) {
jit_movr_l(r0, r1);
NOTQr(r0);
}
else {
if (jit_can_sign_extend_char_p(i0)) {
jit_movr_l(r0, r1);
XORBir(i0, r0);
}
else if (jit_can_sign_extend_int_p(i0)) {
jit_movr_l(r0, r1);
XORQir(i0, r0);
}
else {
if (r0 == r1) {
MOVQir(i0, JIT_REXTMP);
XORQrr(JIT_REXTMP, r0);
}
else {
MOVQir(i0, r0);
XORQrr(r1, r0);
}
}
}
}
#define jit_xorr_l(r0, r1, r2) x86_xorr_l(_jit, r0, r1, r2)
__jit_inline void
x86_xorr_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
if (r1 == r2)
XORLrr(r0, r0);
else if (r0 == r1)
XORQrr(r2, r0);
else if (r0 == r2)
XORQrr(r1, r0);
else {
MOVQrr(r1, r0);
XORQrr(r2, r0);
}
}
#define jit_muli_l(r0, r1, i0) x86_muli_l(_jit, r0, r1, i0)
#define jit_muli_ul(r0, r1, i0) x86_muli_l(_jit, r0, r1, i0)
__jit_inline void
x86_muli_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, long i0)
{
switch (i0) {
case 0:
XORLrr(r0, r0);
break;
case 1:
jit_movr_l(r0, r1);
break;
case -1:
jit_negr_l(r0, r1);
break;
case 2:
LEAQmr(0, _NOREG, r1, _SCL2, r0);
break;
case 4:
LEAQmr(0, _NOREG, r1, _SCL4, r0);
break;
case 8:
LEAQmr(0, _NOREG, r1, _SCL8, r0);
break;
default:
if (i0 > 0 && !(i0 & (i0 - 1))) {
jit_movr_l(r0, r1);
SHLQir(ffsl(i0) - 1, r0);
}
else if (jit_can_sign_extend_int_p(i0))
IMULQirr(i0, r1, r0);
else if (r0 == r1) {
MOVQir(i0, JIT_REXTMP);
IMULQrr(JIT_REXTMP, r0);
}
else {
MOVQir(i0, r0);
IMULQrr(r1, r0);
}
break;
}
}
#define jit_mulr_l(r0, r1, r2) x86_mulr_l(_jit, r0, r1, r2)
#define jit_mulr_ul(r0, r1, r2) x86_mulr_l(_jit, r0, r1, r2)
__jit_inline void
x86_mulr_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
if (r0 == r1)
IMULQrr(r2, r0);
else if (r0 == r2)
IMULQrr(r1, r0);
else {
MOVQrr(r1, r0);
IMULQrr(r2, r0);
}
}
/* Instruction format is:
* imul reg64/mem64
* and the result is stored in %rdx:%rax
* %rax = low 64 bits
* %rdx = high 64 bits
*/
#define jit_muli_l_(r0, i0) x86_muli_l_(_jit, r0, i0)
__jit_inline void
x86_muli_l_(jit_state_t _jit, jit_gpr_t r0, long i0)
{
if (r0 == _RAX) {
jit_movi_l(_RDX, i0);
IMULQr(_RDX);
}
else {
jit_movi_l(_RAX, i0);
IMULQr(r0);
}
}
#define jit_hmuli_l(r0, r1, i0) x86_hmuli_l(_jit, r0, r1, i0)
__jit_inline void
x86_hmuli_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, long i0)
{
if (r0 == _RDX) {
MOVQrr(_RAX, JIT_REXTMP);
jit_muli_l_(r1, i0);
MOVQrr(JIT_REXTMP, _RAX);
}
else if (r0 == _RAX) {
MOVQrr(_RDX, JIT_REXTMP);
jit_muli_l_(r1, i0);
MOVQrr(_RDX, _RAX);
MOVQrr(JIT_REXTMP, _RDX);
}
else {
MOVQrr(_RDX, JIT_REXTMP);
jit_pushr_l(_RAX);
jit_muli_l_(r1, i0);
MOVQrr(_RDX, r0);
jit_popr_l(_RAX);
MOVQrr(JIT_REXTMP, _RDX);
}
}
#define jit_mulr_l_(r0, r1) x86_mulr_l_(_jit, r0, r1)
__jit_inline void
x86_mulr_l_(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1)
{
if (r1 == _RAX)
IMULQr(r0);
else if (r0 == _RAX)
IMULQr(r1);
else {
MOVQrr(r1, _RAX);
IMULQr(r0);
}
}
#define jit_hmulr_l(r0, r1, r2) x86_hmulr_l(_jit, r0, r1, r2)
__jit_inline void
x86_hmulr_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
if (r0 == _RDX) {
MOVQrr(_RAX, JIT_REXTMP);
jit_mulr_l_(r1, r2);
MOVQrr(JIT_REXTMP, _RAX);
}
else if (r0 == _RAX) {
MOVQrr(_RDX, JIT_REXTMP);
jit_mulr_l_(r1, r2);
MOVQrr(_RDX, _RAX);
MOVQrr(JIT_REXTMP, _RDX);
}
else {
MOVQrr(_RDX, JIT_REXTMP);
jit_pushr_l(_RAX);
jit_mulr_l_(r1, r2);
MOVQrr(_RDX, r0);
jit_popr_l(_RAX);
MOVQrr(JIT_REXTMP, _RDX);
}
}
/* Instruction format is:
* mul reg64/mem64
* and the result is stored in %rdx:%rax
* %rax = low 64 bits
* %rdx = high 64 bits
*/
#define jit_muli_ul_(r0, i0) x86_muli_ul_(_jit, r0, i0)
__jit_inline void
x86_muli_ul_(jit_state_t _jit, jit_gpr_t r0, unsigned long i0)
{
if (r0 == _RAX) {
jit_movi_ul(_RDX, i0);
MULQr(_RDX);
}
else {
jit_movi_ul(_RAX, i0);
MULQr(r0);
}
}
#define jit_hmuli_ul(r0, r1, i0) x86_hmuli_ul(_jit, r0, r1, i0)
__jit_inline void
x86_hmuli_ul(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, unsigned long i0)
{
if (r0 == _RDX) {
MOVQrr(_RAX, JIT_REXTMP);
jit_muli_ul_(r1, i0);
MOVQrr(JIT_REXTMP, _RAX);
}
else if (r0 == _RAX) {
MOVQrr(_RDX, JIT_REXTMP);
jit_muli_ul_(r1, i0);
MOVQrr(_RDX, _RAX);
MOVQrr(JIT_REXTMP, _RDX);
}
else {
MOVQrr(_RDX, JIT_REXTMP);
jit_pushr_l(_RAX);
jit_muli_ul_(r1, i0);
MOVQrr(_RDX, r0);
jit_popr_l(_RAX);
MOVQrr(JIT_REXTMP, _RDX);
}
}
#define jit_mulr_ul_(r0, r1) x86_mulr_ul_(_jit, r0, r1)
__jit_inline void
x86_mulr_ul_(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1)
{
if (r1 == _RAX)
MULQr(r0);
else if (r0 == _RAX)
MULQr(r1);
else {
MOVQrr(r1, _RAX);
MULQr(r0);
}
}
#define jit_hmulr_ul(r0, r1, r2) x86_hmulr_ul(_jit, r0, r1, r2)
__jit_inline void
x86_hmulr_ul(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
if (r0 == _RDX) {
MOVQrr(_RAX, JIT_REXTMP);
jit_mulr_ul_(r1, r2);
MOVQrr(JIT_REXTMP, _RAX);
}
else if (r0 == _RAX) {
MOVQrr(_RDX, JIT_REXTMP);
jit_mulr_ul_(r1, r2);
MOVQrr(_RDX, _RAX);
MOVQrr(JIT_REXTMP, _RDX);
}
else {
MOVQrr(_RDX, JIT_REXTMP);
jit_pushr_l(_RAX);
jit_mulr_ul_(r1, r2);
MOVQrr(_RDX, r0);
jit_popr_l(_RAX);
MOVQrr(JIT_REXTMP, _RDX);
}
}
#define jit_divi_l_(r0, r1, i0, i1, i2) x86_divi_l_(_jit, r0, r1, i0, i1, i2)
__jit_inline void
x86_divi_l_(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, long i0,
int sign, int divide)
{
jit_gpr_t div;
if (divide) {
switch (i0) {
case 1:
jit_movr_l(r0, r1);
return;
case -1:
if (sign) {
jit_negr_l(r0, r1);
return;
}
break;
default:
if (i0 > 0 && !(i0 & (i0 - 1))) {
jit_movr_l(r0, r1);
_ROTSHIQir(sign ? X86_SAR : X86_SHR, ffsl(i0) - 1, r0);
return;
}
break;
}
}
else if (i0 == 1 || (sign && i0 == -1)) {
XORLrr(r0, r0);
return;
}
else if (!sign && i0 > 0 && !(i0 & (i0 - 1))) {
if (jit_can_sign_extend_int_p(i0)) {
jit_movr_l(r0, r1);
ANDQir(i0 - 1, r0);
}
else if (r0 != r1) {
MOVQir(i0 - 1, r0);
ANDQrr(r1, r0);
}
else {
MOVQir(i0 - 1, JIT_REXTMP);
ANDQrr(JIT_REXTMP, r0);
}
return;
}
if (r0 == _RAX) {
jit_pushr_l(_RDX);
div = JIT_REXTMP;
}
else if (r0 == _RDX) {
jit_pushr_l(_RAX);
div = JIT_REXTMP;
}
else if (r0 == r1) {
jit_pushr_l(_RDX);
jit_pushr_l(_RAX);
div = JIT_REXTMP;
}
else {
jit_pushr_l(_RDX);
MOVQrr(_RAX, JIT_REXTMP);
div = r0;
}
MOVQir(i0, div);
jit_movr_l(_RAX, r1);
if (sign) {
CQO_();
IDIVQr(div);
}
else {
XORQrr(_RDX, _RDX);
DIVQr(div);
}
if (r0 != _RAX) {
if (divide)
MOVQrr(_RAX, r0);
if (div == JIT_REXTMP)
jit_popr_l(_RAX);
else
MOVQrr(JIT_REXTMP, _RAX);
}
if (r0 != _RDX) {
if (!divide)
MOVQrr(_RDX, r0);
jit_popr_l(_RDX);
}
}
#define jit_divr_l_(r0, r1, r2, i0, i1) x86_divr_l_(_jit, r0, r1, r2, i0, i1)
__jit_inline void
x86_divr_l_(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2,
int sign, int divide)
{
jit_gpr_t div;
if (r0 != _RDX)
jit_pushr_l(_RDX);
if (r0 != _RAX)
jit_pushr_l(_RAX);
if (r2 == _RAX) {
if (r0 == _RAX || r0 == _RDX) {
div = JIT_REXTMP;
MOVQrr(_RAX, div);
if (r1 != _RAX)
MOVQrr(r1, _RAX);
}
else {
if (r0 == r1)
jit_xchgr_l(_RAX, r0);
else {
if (r0 != _RAX)
MOVQrr(_RAX, r0);
if (r1 != _RAX)
MOVQrr(r1, _RAX);
}
div = r0;
}
}
else if (r2 == _RDX) {
if (r0 == _RAX || r0 == _RDX) {
div = JIT_REXTMP;
MOVQrr(_RDX, div);
if (r1 != _RAX)
MOVQrr(r1, _RAX);
}
else {
if (r1 != _RAX)
MOVQrr(r1, _RAX);
MOVQrr(_RDX, r0);
div = r0;
}
}
else {
if (r1 != _RAX)
MOVQrr(r1, _RAX);
div = r2;
}
if (sign) {
CQO_();
IDIVQr(div);
}
else {
XORLrr(_RDX, _RDX);
DIVQr(div);
}
if (r0 != _RAX) {
if (divide)
MOVQrr(_RAX, r0);
jit_popr_l(_RAX);
}
if (r0 != _RDX) {
if (!divide)
MOVQrr(_RDX, r0);
jit_popr_l(_RDX);
}
}
#define jit_divi_l(r0, r1, i0) x86_divi_l(_jit, r0, r1, i0)
__jit_inline void
x86_divi_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, long i0)
{
jit_divi_l_(r0, r1, i0, 1, 1);
}
#define jit_divr_l(r0, r1, r2) x86_divr_l(_jit, r0, r1, r2)
__jit_inline void
x86_divr_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
jit_divr_l_(r0, r1, r2, 1, 1);
}
#define jit_divi_ul(r0, r1, i0) x86_divi_ul(_jit, r0, r1, i0)
__jit_inline void
x86_divi_ul(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, unsigned long i0)
{
jit_divi_l_(r0, r1, i0, 0, 1);
}
#define jit_divr_ul(r0, r1, r2) x86_divr_ul(_jit, r0, r1, r2)
__jit_inline void
x86_divr_ul(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
jit_divr_l_(r0, r1, r2, 0, 1);
}
#define jit_modi_l(r0, r1, i0) x86_modi_l(_jit, r0, r1, i0)
__jit_inline void
x86_modi_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, long i0)
{
jit_divi_l_(r0, r1, i0, 1, 0);
}
#define jit_modr_l(r0, r1, r2) x86_modr_l(_jit, r0, r1, r2)
__jit_inline void
x86_modr_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
jit_divr_l_(r0, r1, r2, 1, 0);
}
#define jit_modi_ul(r0, r1, i0) x86_modi_ul(_jit, r0, r1, i0)
__jit_inline void
x86_modi_ul(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, unsigned long i0)
{
jit_divi_l_(r0, r1, i0, 0, 0);
}
#define jit_modr_ul(r0, r1, r2) x86_modr_ul(_jit, r0, r1, r2)
__jit_inline void
x86_modr_ul(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
jit_divr_l_(r0, r1, r2, 0, 0);
}
/* Instruction format is:
* <shift> %r0 %r1
* %r0 <shift>= %r1
* only %cl can be used as %r1
*/
#define jit_shift64(r0, r1, r2, i0) x86_shift64(_jit, r0, r1, r2, i0)
__jit_inline void
x86_shift64(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2,
x86_rotsh_t cc)
{
if (r0 == _RCX) {
MOVQrr(r1, JIT_REXTMP);
if (r2 != _RCX)
MOVBrr(r2, _RCX);
_ROTSHIQrr(cc, _RCX, JIT_REXTMP);
MOVQrr(JIT_REXTMP, _RCX);
}
else if (r2 != _RCX) {
MOVQrr(_RCX, JIT_REXTMP);
MOVBrr(r2, _RCX);
jit_movr_l(r0, r1);
_ROTSHIQrr(cc, _RCX, r0);
MOVQrr(JIT_REXTMP, _RCX);
}
else {
jit_movr_l(r0, r1);
_ROTSHIQrr(cc, _RCX, r0);
}
}
#define jit_lshi_l(r0, r1, i0) x86_lshi_l(_jit, r0, r1, i0)
__jit_inline void
x86_lshi_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, unsigned char i0)
{
if (i0 == 0)
jit_movr_l(r0, r1);
else if (i0 <= 3)
LEAQmr(0, _NOREG, r1, i0 == 1 ? _SCL2 : i0 == 2 ? _SCL4 : _SCL8, r0);
else {
jit_movr_l(r0, r1);
SHLQir(i0, r0);
}
}
#define jit_lshr_l(r0, r1, r2) x86_lshr_l(_jit, r0, r1, r2)
__jit_inline void
x86_lshr_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
jit_shift64(r0, r1, r2, X86_SHL);
}
#define jit_rshi_l(r0, r1, i0) x86_rshi_l(_jit, r0, r1, i0)
__jit_inline void
x86_rshi_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, unsigned char i0)
{
jit_movr_l(r0, r1);
if (i0)
SARQir(i0, r0);
}
#define jit_rshr_l(r0, r1, r2) x86_rshr_l(_jit, r0, r1, r2)
__jit_inline void
x86_rshr_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
jit_shift64(r0, r1, r2, X86_SAR);
}
#define jit_rshi_ul(r0, r1, i0) x86_rshi_ul(_jit, r0, r1, i0)
__jit_inline void
x86_rshi_ul(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, unsigned char i0)
{
jit_movr_l(r0, r1);
if (i0)
SHRQir(i0, r0);
}
#define jit_rshr_ul(r0, r1, r2) x86_rshr_ul(_jit, r0, r1, r2)
__jit_inline void
x86_rshr_ul(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
jit_shift64(r0, r1, r2, X86_SHR);
}
/* Boolean */
#define jit_cmp_ri64(r0, r1, i0, i1) x86_cmp_ri64(_jit, r0, r1, i0, i1)
__jit_inline void
x86_cmp_ri64(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, long i0, x86_cc_t cc)
{
int same = r0 == r1;
if (!same)
/* XORLrr is cheaper */
XORLrr(r0, r0);
if (jit_can_sign_extend_int_p(i0))
CMPQir(i0, r1);
else {
MOVQir(i0, JIT_REXTMP);
CMPQrr(JIT_REXTMP, r1);
}
if (same)
/* MOVLir is cheaper */
MOVLir(0, r0);
SETCCir(cc, r0);
}
#define jit_test_r64(r0, r1, i0) x86_test_r64(_jit, r0, r1, i0)
__jit_inline void
x86_test_r64(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, x86_cc_t cc)
{
int same = r0 == r1;
if (!same)
XORLrr(r0, r0);
TESTQrr(r1, r1);
if (same)
MOVLir(0, r0);
SETCCir(cc, r0);
}
#define jit_cmp_rr64(r0, r1, r2, i0) x86_cmp_rr64(_jit, r0, r1, r2, i0)
__jit_inline void
x86_cmp_rr64(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2,
x86_cc_t cc)
{
int same = r0 == r1 || r0 == r2;
if (!same)
XORLrr(r0, r0);
CMPQrr(r2, r1);
if (same)
MOVLir(0, r0);
SETCCir(cc, r0);
}
#define jit_lti_l(r0, r1, i0) x86_lti_l(_jit, r0, r1, i0)
__jit_inline void
x86_lti_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, long i0)
{
if (i0)
jit_cmp_ri64(r0, r1, i0, X86_CC_L);
else
jit_test_r64(r0, r1, X86_CC_S);
}
#define jit_ltr_l(r0, r1, r2) x86_ltr_l(_jit, r0, r1, r2)
__jit_inline void
x86_ltr_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
jit_cmp_rr64(r0, r1, r2, X86_CC_L);
}
#define jit_lei_l(r0, r1, i0) x86_lei_l(_jit, r0, r1, i0)
__jit_inline void
x86_lei_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, long i0)
{
jit_cmp_ri64(r0, r1, i0, X86_CC_LE);
}
#define jit_ler_l(r0, r1, r2) x86_ler_l(_jit, r0, r1, r2)
__jit_inline void
x86_ler_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
if (r1 == r2)
MOVLir(1, r0);
else
jit_cmp_rr64(r0, r1, r2, X86_CC_LE);
}
#define jit_eqi_l(r0, r1, i0) x86_eqi_l(_jit, r0, r1, i0)
__jit_inline void
x86_eqi_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, long i0)
{
if (i0)
jit_cmp_ri64(r0, r1, i0, X86_CC_E);
else
jit_test_r64(r0, r1, X86_CC_E);
}
#define jit_eqr_l(r0, r1, r2) x86_eqr_l(_jit, r0, r1, r2)
__jit_inline void
x86_eqr_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
if (r1 == r2)
MOVLir(1, r0);
else
jit_cmp_rr64(r0, r1, r2, X86_CC_E);
}
#define jit_gei_l(r0, r1, i0) x86_gei_l(_jit, r0, r1, i0)
__jit_inline void
x86_gei_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, long i0)
{
if (i0)
jit_cmp_ri64(r0, r1, i0, X86_CC_GE);
else
jit_test_r64(r0, r1, X86_CC_NS);
}
#define jit_ger_l(r0, r1, r2) x86_ger_l(_jit, r0, r1, r2)
__jit_inline void
x86_ger_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
if (r1 == r2)
MOVLir(1, r0);
else
jit_cmp_rr64(r0, r1, r2, X86_CC_GE);
}
#define jit_gti_l(r0, r1, i0) x86_gti_l(_jit, r0, r1, i0)
__jit_inline void
x86_gti_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, long i0)
{
jit_cmp_ri64(r0, r1, i0, X86_CC_G);
}
#define jit_gtr_l(r0, r1, r2) x86_gtr_l(_jit, r0, r1, r2)
__jit_inline void
x86_gtr_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
jit_cmp_rr64(r0, r1, r2, X86_CC_G);
}
#define jit_nei_l(r0, r1, i0) x86_nei_l(_jit, r0, r1, i0)
__jit_inline void
x86_nei_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, long i0)
{
if (i0)
jit_cmp_ri64(r0, r1, i0, X86_CC_NE);
else
jit_test_r64(r0, r1, X86_CC_NE);
}
#define jit_ner_l(r0, r1, r2) x86_ner_l(_jit, r0, r1, r2)
__jit_inline void
x86_ner_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
if (r1 == r2)
XORLrr(r0, r0);
else
jit_cmp_rr64(r0, r1, r2, X86_CC_NE);
}
#define jit_lti_ul(r0, r1, i0) x86_lti_ul(_jit, r0, r1, i0)
__jit_inline void
x86_lti_ul(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, unsigned long i0)
{
jit_cmp_ri64(r0, r1, i0, X86_CC_B);
}
#define jit_ltr_ul(r0, r1, r2) x86_ltr_ul(_jit, r0, r1, r2)
__jit_inline void
x86_ltr_ul(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
jit_cmp_rr64(r0, r1, r2, X86_CC_B);
}
#define jit_lei_ul(r0, r1, i0) x86_lei_ul(_jit, r0, r1, i0)
__jit_inline void
x86_lei_ul(jit_state_t _jit,
jit_gpr_t r0, jit_gpr_t r1, unsigned long i0)
{
if (i0)
jit_cmp_ri64(r0, r1, i0, X86_CC_BE);
else
jit_test_r64(r0, r1, X86_CC_E);
}
#define jit_ler_ul(r0, r1, r2) x86_ler_ul(_jit, r0, r1, r2)
__jit_inline void
x86_ler_ul(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
if (r1 == r2)
MOVLir(1, r0);
else
jit_cmp_rr64(r0, r1, r2, X86_CC_BE);
}
#define jit_gei_ul(r0, r1, i0) x86_gei_ul(_jit, r0, r1, i0)
__jit_inline void
x86_gei_ul(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, unsigned long i0)
{
if (i0)
jit_cmp_ri64(r0, r1, i0, X86_CC_AE);
else
jit_test_r64(r0, r1, X86_CC_NB);
}
#define jit_ger_ul(r0, r1, r2) x86_ger_ul(_jit, r0, r1, r2)
__jit_inline void
x86_ger_ul(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
if (r1 == r2)
MOVLir(1, r0);
else
jit_cmp_rr64(r0, r1, r2, X86_CC_AE);
}
#define jit_gti_ul(r0, r1, i0) x86_gti_ul(_jit, r0, r1, i0)
__jit_inline void
x86_gti_ul(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, unsigned long i0)
{
if (i0)
jit_cmp_ri64(r0, r1, i0, X86_CC_A);
else
jit_test_r64(r0, r1, X86_CC_NE);
}
#define jit_gtr_ul(r0, r1, r2) x86_gtr_ul(_jit, r0, r1, r2)
__jit_inline void
x86_gtr_ul(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
jit_cmp_rr64(r0, r1, r2, X86_CC_A);
}
/* Jump */
#define jit_bcmp_ri64(i0, r0, i1, i2) x86_bcmp_ri64(_jit, i0, r0, i1, i2)
__jit_inline void
x86_bcmp_ri64(jit_state_t _jit,
jit_insn *label, jit_gpr_t r0, long i0, x86_cc_t cc)
{
if (jit_can_sign_extend_int_p(i0))
CMPQir(i0, r0);
else {
MOVQir(i0, JIT_REXTMP);
CMPQrr(JIT_REXTMP, r0);
}
JCCim(cc, label);
}
#define jit_btest_r64(i0, r0, i1) x86_btest_r64(_jit, i0, r0, i1)
__jit_inline void
x86_btest_r64(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, x86_cc_t cc)
{
TESTQrr(r0, r0);
JCCim(cc, label);
}
#define jit_bcmp_rr64(i0, r0, r1, i1) x86_bcmp_rr64(_jit, i0, r0, r1, i1)
__jit_inline void
x86_bcmp_rr64(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, jit_gpr_t r1,
x86_cc_t cc)
{
CMPQrr(r1, r0);
JCCim(cc, label);
}
#define jit_blti_l(label, r0, i0) x86_blti_l(_jit, label, r0, i0)
__jit_inline jit_insn *
x86_blti_l(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, long i0)
{
if (i0)
jit_bcmp_ri64(label, r0, i0, X86_CC_L);
else
jit_btest_r64(label, r0, X86_CC_S);
return (_jit->x.pc);
}
#define jit_bltr_l(label, r0, r1) x86_bltr_l(_jit, label, r0, r1)
__jit_inline jit_insn *
x86_bltr_l(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, jit_gpr_t r1)
{
jit_bcmp_rr64(label, r0, r1, X86_CC_L);
return (_jit->x.pc);
}
#define jit_blei_l(label, r0, i0) x86_blei_l(_jit, label, r0, i0)
__jit_inline jit_insn *
x86_blei_l(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, long i0)
{
jit_bcmp_ri64(label, r0, i0, X86_CC_LE);
return (_jit->x.pc);
}
#define jit_bler_l(label, r0, r1) x86_bler_l(_jit, label, r0, r1)
__jit_inline jit_insn *
x86_bler_l(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, jit_gpr_t r1)
{
if (r0 == r1)
JMPm(label);
else
jit_bcmp_rr64(label, r0, r1, X86_CC_LE);
return (_jit->x.pc);
}
#define jit_beqi_l(label, r0, i0) x86_beqi_l(_jit, label, r0, i0)
__jit_inline jit_insn *
x86_beqi_l(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, long i0)
{
if (i0)
jit_bcmp_ri64(label, r0, i0, X86_CC_E);
else
jit_btest_r64(label, r0, X86_CC_E);
return (_jit->x.pc);
}
#define jit_beqr_l(label, r0, r1) x86_beqr_l(_jit, label, r0, r1)
__jit_inline jit_insn *
x86_beqr_l(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, jit_gpr_t r1)
{
if (r0 == r1)
JMPm(label);
else
jit_bcmp_rr64(label, r0, r1, X86_CC_E);
return (_jit->x.pc);
}
#define jit_bgei_l(label, r0, i0) x86_bgei_l(_jit, label, r0, i0)
__jit_inline jit_insn *
x86_bgei_l(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, long i0)
{
if (i0)
jit_bcmp_ri64(label, r0, i0, X86_CC_GE);
else
jit_btest_r64(label, r0, X86_CC_NS);
return (_jit->x.pc);
}
#define jit_bger_l(label, r0, r1) x86_bger_l(_jit, label, r0, r1)
__jit_inline jit_insn *
x86_bger_l(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, jit_gpr_t r1)
{
if (r0 == r1)
JMPm(label);
else
jit_bcmp_rr64(label, r0, r1, X86_CC_GE);
return (_jit->x.pc);
}
#define jit_bgti_l(label, r0, i0) x86_bgti_l(_jit, label, r0, i0)
__jit_inline jit_insn *
x86_bgti_l(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, long i0)
{
jit_bcmp_ri64(label, r0, i0, X86_CC_G);
return (_jit->x.pc);
}
#define jit_bgtr_l(label, r0, r1) x86_bgtr_l(_jit, label, r0, r1)
__jit_inline jit_insn *
x86_bgtr_l(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, jit_gpr_t r1)
{
jit_bcmp_rr64(label, r0, r1, X86_CC_G);
return (_jit->x.pc);
}
#define jit_bnei_l(label, r0, i0) x86_bnei_l(_jit, label, r0, i0)
__jit_inline jit_insn *
x86_bnei_l(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, long i0)
{
if (i0)
jit_bcmp_ri64(label, r0, i0, X86_CC_NE);
else
jit_btest_r64(label, r0, X86_CC_NE);
return (_jit->x.pc);
}
#define jit_bner_l(label, r0, r1) x86_bner_l(_jit, label, r0, r1)
__jit_inline jit_insn *
x86_bner_l(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, jit_gpr_t r1)
{
/* need to return a patchable address even if r0 == r1 */
jit_bcmp_rr64(label, r0, r1, X86_CC_NE);
return (_jit->x.pc);
}
#define jit_blti_ul(label, r0, i0) x86_blti_ul(_jit, label, r0, i0)
__jit_inline jit_insn *
x86_blti_ul(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, unsigned long i0)
{
/* need to return a patchable address even if i0 == 0 */
jit_bcmp_ri64(label, r0, i0, X86_CC_B);
return (_jit->x.pc);
}
#define jit_bltr_ul(label, r0, r1) x86_bltr_ul(_jit, label, r0, r1)
__jit_inline jit_insn *
x86_bltr_ul(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, jit_gpr_t r1)
{
jit_bcmp_rr64(label, r0, r1, X86_CC_B);
return (_jit->x.pc);
}
#define jit_blei_ul(label, r0, i0) x86_blei_ul(_jit, label, r0, i0)
__jit_inline jit_insn *
x86_blei_ul(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, unsigned long i0)
{
/* need to return a patchable address even if i0 == 0 */
jit_bcmp_ri64(label, r0, i0, X86_CC_BE);
return (_jit->x.pc);
}
#define jit_bler_ul(label, r0, r1) x86_bler_ul(_jit, label, r0, r1)
__jit_inline jit_insn *
x86_bler_ul(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, jit_gpr_t r1)
{
if (r0 == r1)
JMPm(label);
else
jit_bcmp_rr64(label, r0, r1, X86_CC_BE);
return (_jit->x.pc);
}
#define jit_bgei_ul(label, r0, i0) x86_bgei_ul(_jit, label, r0, i0)
__jit_inline jit_insn *
x86_bgei_ul(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, unsigned long i0)
{
if (i0 == 0)
JMPm(label);
else
jit_bcmp_ri64(label, r0, i0, X86_CC_AE);
return (_jit->x.pc);
}
#define jit_bger_ul(label, r0, r1) x86_bger_ul(_jit, label, r0, r1)
__jit_inline jit_insn *
x86_bger_ul(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, jit_gpr_t r1)
{
if (r0 == r1)
JMPm(label);
else
jit_bcmp_rr64(label, r0, r1, X86_CC_AE);
return (_jit->x.pc);
}
#define jit_bgti_ul(label, r0, i0) x86_bgti_ul(_jit, label, r0, i0)
__jit_inline jit_insn *
x86_bgti_ul(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, unsigned long i0)
{
if (i0)
jit_bcmp_ri64(label, r0, i0, X86_CC_A);
else
jit_btest_r64(label, r0, X86_CC_NE);
return (_jit->x.pc);
}
#define jit_bgtr_ul(label, r0, r1) x86_bgtr_ul(_jit, label, r0, r1)
__jit_inline jit_insn *
x86_bgtr_ul(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, jit_gpr_t r1)
{
jit_bcmp_rr64(label, r0, r1, X86_CC_A);
return (_jit->x.pc);
}
#define jit_boaddi_l(label, r0, i0) x86_boaddi_l(_jit, label, r0, i0)
__jit_inline jit_insn *
x86_boaddi_l(jit_state_t _jit,
jit_insn *label, jit_gpr_t r0, long i0)
{
if (jit_can_sign_extend_int_p(i0))
ADDQir(i0, r0);
else {
MOVQir(i0, JIT_REXTMP);
ADDQrr(JIT_REXTMP, r0);
}
JOm(label);
return (_jit->x.pc);
}
#define jit_boaddr_l(label, r0, r1) x86_boaddr_l(_jit, label, r0, r1)
__jit_inline jit_insn *
x86_boaddr_l(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, jit_gpr_t r1)
{
ADDQrr(r1, r0);
JOm(label);
return (_jit->x.pc);
}
#define jit_bosubi_l(label, r0, i0) x86_bosubi_l(_jit, label, r0, i0)
__jit_inline jit_insn *
x86_bosubi_l(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, long i0)
{
if (jit_can_sign_extend_int_p(i0))
SUBQir(i0, r0);
else {
MOVQir(i0, JIT_REXTMP);
SUBQrr(JIT_REXTMP, r0);
}
JOm(label);
return (_jit->x.pc);
}
#define jit_bosubr_l(label, r0, r1) x86_bosubr_l(_jit, label, r0, r1)
__jit_inline jit_insn *
x86_bosubr_l(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, jit_gpr_t r1)
{
SUBQrr(r1, r0);
JOm(label);
return (_jit->x.pc);
}
#define jit_boaddi_ul(label, r0, i0) x86_boaddi_ul(_jit, label, r0, i0)
__jit_inline jit_insn *
x86_boaddi_ul(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, unsigned long i0)
{
if (jit_can_sign_extend_int_p(i0))
ADDQir(i0, r0);
else {
MOVQir(i0, JIT_REXTMP);
ADDQrr(JIT_REXTMP, r0);
}
JCm(label);
return (_jit->x.pc);
}
#define jit_boaddr_ul(label, r0, r1) x86_boaddr_ul(_jit, label, r0, r1)
__jit_inline jit_insn *
x86_boaddr_ul(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, jit_gpr_t r1)
{
ADDQrr(r1, r0);
JCm(label);
return (_jit->x.pc);
}
#define jit_bosubi_ul(label, r0, i0) x86_bosubi_ul(_jit, label, r0, i0)
__jit_inline jit_insn *
x86_bosubi_ul(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, unsigned long i0)
{
if (jit_can_sign_extend_int_p(i0))
SUBQir(i0, r0);
else {
MOVQir(i0, JIT_REXTMP);
SUBQrr(JIT_REXTMP, r0);
}
JCm(label);
return (_jit->x.pc);
}
#define jit_bosubr_ul(label, r0, r1) x86_bosubr_ul(_jit, label, r0, r1)
__jit_inline jit_insn *
x86_bosubr_ul(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, jit_gpr_t r1)
{
SUBQrr(r1, r0);
JCm(label);
return (_jit->x.pc);
}
#define jit_bmsi_l(label, r0, i0) x86_bmsi_l(_jit, label, r0, i0)
__jit_inline jit_insn *
x86_bmsi_l(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, long i0)
{
if (jit_can_zero_extend_char_p(i0))
TESTBir(i0, r0);
else if (jit_can_zero_extend_short_p(i0))
TESTWir(i0, r0);
else if (jit_can_sign_extend_int_p(i0))
TESTLir(i0, r0);
else {
MOVQir(i0, JIT_REXTMP);
TESTQrr(JIT_REXTMP, r0);
}
JNZm(label);
return (_jit->x.pc);
}
#define jit_bmsr_l(label, r0, r1) x86_bmsr_l(_jit, label, r0, r1)
__jit_inline jit_insn *
x86_bmsr_l(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, jit_gpr_t r1)
{
TESTQrr(r1, r0);
JNZm(label);
return (_jit->x.pc);
}
#define jit_bmci_l(label, r0, i0) x86_bmci_l(_jit, label, r0, i0)
__jit_inline jit_insn *
x86_bmci_l(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, long i0)
{
if (jit_can_zero_extend_char_p(i0))
TESTBir(i0, r0);
else if (jit_can_zero_extend_short_p(i0))
TESTWir(i0, r0);
else if (jit_can_zero_extend_int_p(i0))
TESTLir(i0, r0);
else if (jit_can_sign_extend_int_p(i0))
TESTQir(i0, r0);
else {
MOVQir(i0, JIT_REXTMP);
TESTQrr(JIT_REXTMP, r0);
}
JZm(label);
return (_jit->x.pc);
}
#define jit_bmcr_l(label, r0, r1) x86_bmcr_l(_jit, label, r0, r1)
__jit_inline jit_insn *
x86_bmcr_l(jit_state_t _jit, jit_insn *label, jit_gpr_t r0, jit_gpr_t r1)
{
TESTQrr(r1, r0);
JZm(label);
return (_jit->x.pc);
}
/* Memory */
#define jit_ntoh_ul(r0, r1) x86_ntoh_ul(_jit, r0, r1)
__jit_inline void
x86_ntoh_ul(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1)
{
jit_movr_l(r0, r1);
BSWAPQr(r0);
}
#define jit_ldr_c(r0, r1) x86_ldr_c(_jit, r0, r1)
__jit_inline void
x86_ldr_c(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1)
{
MOVSBQmr(0, r1, _NOREG, _SCL1, r0);
}
#define jit_ldxr_c(r0, r1, r2) x86_ldxr_c(_jit, r0, r1, r2)
__jit_inline void
x86_ldxr_c(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
MOVSBQmr(0, r1, r2, _SCL1, r0);
}
#define jit_ldr_s(r0, r1) x86_ldr_s(_jit, r0, r1)
__jit_inline void
x86_ldr_s(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1)
{
MOVSWQmr(0, r1, _NOREG, _SCL1, r0);
}
#define jit_ldxr_s(r0, r1, r2) x86_ldxr_s(_jit, r0, r1, r2)
__jit_inline void
x86_ldxr_s(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
MOVSWQmr(0, r1, r2, _SCL1, r0);
}
#define jit_ldi_c(r0, i0) x86_ldi_c(_jit, r0, i0)
__jit_inline void
x86_ldi_c(jit_state_t _jit, jit_gpr_t r0, void *i0)
{
if (jit_can_sign_extend_int_p((long)i0))
MOVSBQmr((long)i0, _NOREG, _NOREG, _SCL1, r0);
else {
MOVQir((long)i0, JIT_REXTMP);
jit_ldr_c(r0, JIT_REXTMP);
}
}
#define jit_ldxi_c(r0, r1, i0) x86_ldxi_c(_jit, r0, r1, i0)
__jit_inline void
x86_ldxi_c(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, long i0)
{
if (jit_can_sign_extend_int_p(i0))
MOVSBQmr(i0, r1, _NOREG, _SCL1, r0);
else {
MOVQir(i0, JIT_REXTMP);
jit_ldxr_c(r0, r1, JIT_REXTMP);
}
}
#define jit_ldi_uc(r0, i0) x86_ldi_uc(_jit, r0, i0)
__jit_inline void
x86_ldi_uc(jit_state_t _jit, jit_gpr_t r0, void *i0)
{
if (jit_can_sign_extend_int_p((long)i0))
MOVZBLmr((long)i0, _NOREG, _NOREG, _SCL1, r0);
else {
MOVQir((long)i0, JIT_REXTMP);
jit_ldr_uc(r0, JIT_REXTMP);
}
}
#define jit_ldxi_uc(r0, r1, i0) x86_ldxi_uc(_jit, r0, r1, i0)
__jit_inline void
x86_ldxi_uc(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, long i0)
{
if (jit_can_sign_extend_int_p(i0))
MOVZBLmr(i0, r1, _NOREG, _SCL1, r0);
else {
MOVQir(i0, JIT_REXTMP);
jit_ldxr_uc(r0, r1, JIT_REXTMP);
}
}
#define jit_str_c(r0, r1) x86_str_c(_jit, r0, r1)
__jit_inline void
x86_str_c(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1)
{
MOVBrm(r1, 0, r0, _NOREG, _SCL1);
}
#define jit_sti_c(i0, r0) x86_sti_c(_jit, i0, r0)
__jit_inline void
x86_sti_c(jit_state_t _jit, void *i0, jit_gpr_t r0)
{
if (jit_can_sign_extend_int_p((long)i0))
MOVBrm(r0, (long)i0, _NOREG, _NOREG, _SCL1);
else {
MOVQir((long)i0, JIT_REXTMP);
jit_str_c(JIT_REXTMP, r0);
}
}
#define jit_stxr_c(r0, r1, r2) x86_stxr_c(_jit, r0, r1, r2)
__jit_inline void
x86_stxr_c(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
MOVBrm(r2, 0, r0, r1, _SCL1);
}
#define jit_stxi_c(i0, r0, r1) x86_stxi_c(_jit, i0, r0, r1)
__jit_inline void
x86_stxi_c(jit_state_t _jit, long i0, jit_gpr_t r0, jit_gpr_t r1)
{
if (jit_can_sign_extend_int_p(i0))
MOVBrm(r1, i0, r0, _NOREG, _SCL1);
else {
MOVQir(i0, JIT_REXTMP);
jit_stxr_c(JIT_REXTMP, r0, r1);
}
}
#define jit_ldi_s(r0, i0) x86_ldi_s(_jit, r0, i0)
__jit_inline void
x86_ldi_s(jit_state_t _jit, jit_gpr_t r0, void *i0)
{
if (jit_can_sign_extend_int_p((long)i0))
MOVSWQmr((long)i0, _NOREG, _NOREG, _SCL1, r0);
else {
MOVQir((long)i0, JIT_REXTMP);
jit_ldr_s(r0, JIT_REXTMP);
}
}
#define jit_ldxi_s(r0, r1, i0) x86_ldxi_s(_jit, r0, r1, i0)
__jit_inline void
x86_ldxi_s(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, long i0)
{
if (jit_can_sign_extend_int_p(i0))
MOVSWQmr(i0, r1, _NOREG, _SCL1, r0);
else {
MOVQir(i0, JIT_REXTMP);
jit_ldxr_s(r0, r1, JIT_REXTMP);
}
}
#define jit_ldi_us(r0, i0) x86_ldi_us(_jit, r0, i0)
__jit_inline void
x86_ldi_us(jit_state_t _jit, jit_gpr_t r0, void *i0)
{
if (jit_can_sign_extend_int_p((long)i0))
MOVZWLmr((long)i0, _NOREG, _NOREG, _SCL1, r0);
else {
MOVQir((long)i0, JIT_REXTMP);
jit_ldr_us(r0, JIT_REXTMP);
}
}
#define jit_ldxi_us(r0, r1, i0) x86_ldxi_us(_jit, r0, r1, i0)
__jit_inline void
x86_ldxi_us(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, long i0)
{
if (jit_can_sign_extend_int_p(i0))
MOVZWLmr(i0, r1, _NOREG, _SCL1, r0);
else {
MOVQir(i0, JIT_REXTMP);
jit_ldxr_us(r0, r1, JIT_REXTMP);
}
}
#define jit_sti_s(i0, r0) x86_sti_s(_jit, i0, r0)
__jit_inline void
x86_sti_s(jit_state_t _jit, void *i0, jit_gpr_t r0)
{
if (jit_can_sign_extend_int_p((long)i0))
MOVWrm(r0, (long)i0, _NOREG, _NOREG, _SCL1);
else {
MOVQir((long)i0, JIT_REXTMP);
jit_str_s(JIT_REXTMP, r0);
}
}
#define jit_stxi_s(i0, r0, r1) x86_stxi_s(_jit, i0, r0, r1)
__jit_inline void
x86_stxi_s(jit_state_t _jit, long i0, jit_gpr_t r0, jit_gpr_t r1)
{
if (jit_can_sign_extend_int_p(i0))
MOVWrm(r1, i0, r0, _NOREG, _SCL1);
else {
MOVQir(i0, JIT_REXTMP);
jit_stxr_s(JIT_REXTMP, r0, r1);
}
}
#define jit_ldr_i(r0, r1) x86_ldr_i(_jit, r0, r1)
__jit_inline void
x86_ldr_i(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1)
{
MOVSLQmr(0, r1, _NOREG, _SCL1, r0);
}
#define jit_ldi_i(r0, i0) x86_ldi_i(_jit, r0, i0)
__jit_inline void
x86_ldi_i(jit_state_t _jit, jit_gpr_t r0, void *i0)
{
if (jit_can_sign_extend_int_p((long)i0))
MOVSLQmr((long)i0, _NOREG, _NOREG, _SCL1, r0);
else {
MOVQir((long)i0, JIT_REXTMP);
jit_ldr_i(r0, JIT_REXTMP);
}
}
#define jit_ldxr_i(r0, r1, r2) x86_ldxr_i(_jit, r0, r1, r2)
__jit_inline void
x86_ldxr_i(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
MOVSLQmr(0, r1, r2, _SCL1, r0);
}
#define jit_ldxi_i(r0, r1, i0) x86_ldxi_i(_jit, r0, r1, i0)
__jit_inline void
x86_ldxi_i(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, long i0)
{
if (jit_can_sign_extend_int_p(i0))
MOVSLQmr(i0, r1, _NOREG, _SCL1, r0);
else {
MOVQir(i0, JIT_REXTMP);
jit_ldxr_i(r0, r1, JIT_REXTMP);
}
}
#define jit_ldr_ui(r0, r1) x86_ldr_ui(_jit, r0, r1)
__jit_inline void
x86_ldr_ui(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1)
{
MOVLmr(0, r1, _NOREG, _SCL1, r0);
}
#define jit_ldi_ui(r0, i0) x86_ldi_ui(_jit, r0, i0)
__jit_inline void
x86_ldi_ui(jit_state_t _jit, jit_gpr_t r0, void *i0)
{
if (jit_can_sign_extend_int_p((long)i0))
MOVLmr((long)i0, _NOREG, _NOREG, _SCL1, r0);
else {
MOVQir((long)i0, JIT_REXTMP);
jit_ldr_ui(r0, JIT_REXTMP);
}
}
#define jit_ldxr_ui(r0, r1, r2) x86_ldxr_ui(_jit, r0, r1, r2)
__jit_inline void
x86_ldxr_ui(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
MOVLmr(0, r1, r2, _SCL1, r0);
}
#define jit_ldxi_ui(r0, r1, i0) x86_ldxi_ui(_jit, r0, r1, i0)
__jit_inline void
x86_ldxi_ui(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, long i0)
{
if (jit_can_sign_extend_int_p(i0))
MOVLmr(i0, r1, _NOREG, _SCL1, r0);
else {
MOVQir(i0, JIT_REXTMP);
jit_ldxr_ui(r0, r1, JIT_REXTMP);
}
}
#define jit_sti_i(i0, r0) x86_sti_i(_jit, i0, r0)
__jit_inline void
x86_sti_i(jit_state_t _jit, void *i0, jit_gpr_t r0)
{
if (jit_can_sign_extend_int_p((long)i0))
MOVLrm(r0, (long)i0, _NOREG, _NOREG, _SCL1);
else {
MOVQir((long)i0, JIT_REXTMP);
jit_str_i(JIT_REXTMP, r0);
}
}
#define jit_stxi_i(i0, r0, r1) x86_stxi_i(_jit, i0, r0, r1)
__jit_inline void
x86_stxi_i(jit_state_t _jit, long i0, jit_gpr_t r0, jit_gpr_t r1)
{
if (jit_can_sign_extend_int_p(i0))
MOVLrm(r1, i0, r0, _NOREG, _SCL1);
else {
MOVQir(i0, JIT_REXTMP);
jit_stxr_i(JIT_REXTMP, r0, r1);
}
}
#define jit_ldr_l(r0, r1) x86_ldr_l(_jit, r0, r1)
__jit_inline void
x86_ldr_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1)
{
MOVQmr(0, r1, _NOREG, _SCL1, r0);
}
#define jit_ldi_l(r0, i0) x86_ldi_l(_jit, r0, i0)
__jit_inline void
x86_ldi_l(jit_state_t _jit, jit_gpr_t r0, void *i0)
{
if (jit_can_sign_extend_int_p((long)i0))
MOVQmr((long)i0, _NOREG, _NOREG, _SCL1, r0);
else {
MOVQir((long)i0, JIT_REXTMP);
jit_ldr_l(r0, JIT_REXTMP);
}
}
#define jit_ldxr_l(r0, r1, r2) x86_ldxr_l(_jit, r0, r1, r2)
__jit_inline void
x86_ldxr_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
MOVQmr(0, r1, r2, _SCL1, r0);
}
#define jit_ldxi_l(r0, r1, i0) x86_ldxi_l(_jit, r0, r1, i0)
#define jit_ldxi_ul(r0, r1, i0) x86_ldxi_l(_jit, r0, r1, i0)
#define jit_ldxi_p(r0, r1, i0) x86_ldxi_l(_jit, r0, r1, i0)
__jit_inline void
x86_ldxi_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, long i0)
{
if (jit_can_sign_extend_int_p(i0))
MOVQmr(i0, r1, _NOREG, _SCL1, r0);
else {
MOVQir(i0, JIT_REXTMP);
jit_ldxr_l(r0, r1, JIT_REXTMP);
}
}
#define jit_str_l(r0, r1) x86_str_l(_jit, r0, r1)
__jit_inline void
x86_str_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1)
{
MOVQrm(r1, 0, r0, _NOREG, _SCL1);
}
#define jit_sti_l(i0, r0) x86_sti_l(_jit, i0, r0)
__jit_inline void
x86_sti_l(jit_state_t _jit, void *i0, jit_gpr_t r0)
{
if (jit_can_sign_extend_int_p((long)i0))
MOVQrm(r0, (long)i0, _NOREG, _NOREG, _SCL1);
else {
MOVQir((long)i0, JIT_REXTMP);
jit_str_l(JIT_REXTMP, r0);
}
}
#define jit_stxr_l(r0, r1, r2) x86_stxr_l(_jit, r0, r1, r2)
__jit_inline void
x86_stxr_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1, jit_gpr_t r2)
{
MOVQrm(r2, 0, r0, r1, _SCL1);
}
#define jit_stxi_l(i0, r0, r1) x86_stxi_l(_jit, i0, r0, r1)
__jit_inline void
x86_stxi_l(jit_state_t _jit, long i0, jit_gpr_t r0, jit_gpr_t r1)
{
if (jit_can_sign_extend_int_p(i0))
MOVQrm(r1, i0, r0, _NOREG, _SCL1);
else {
MOVQir(i0, JIT_REXTMP);
jit_stxr_l(JIT_REXTMP, r0, r1);
}
}
#define jit_extr_c_l(r0, r1) x86_extr_c_l(_jit, r0, r1)
__jit_inline void
x86_extr_c_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1)
{
MOVSBQrr(r1, r0);
}
#define jit_extr_c_ul(r0, r1) x86_extr_c_ul(_jit, r0, r1)
__jit_inline void
x86_extr_c_ul(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1)
{
MOVZBQrr(r1, r0);
}
#define jit_extr_s_l(r0, r1) x86_extr_s_l(_jit, r0, r1)
__jit_inline void
x86_extr_s_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1)
{
MOVSWQrr(r1, r0);
}
#define jit_extr_s_ul(r0, r1) x86_extr_s_ul(_jit, r0, r1)
__jit_inline void
x86_extr_s_ul(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1)
{
MOVZWQrr(r1, r0);
}
#define jit_extr_s_l(r0, r1) x86_extr_s_l(_jit, r0, r1)
__jit_inline void
x86_extr_i_l(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1)
{
MOVSLQrr(r1, r0);
}
#define jit_extr_s_ul(r0, r1) x86_extr_s_ul(_jit, r0, r1)
__jit_inline void
x86_extr_i_ul(jit_state_t _jit, jit_gpr_t r0, jit_gpr_t r1)
{
MOVLrr(r1, r0);
}
#define jit_finish(i0) x86_finish(_jit, i0)
__jit_inline jit_insn *
x86_finish(jit_state_t _jit, void *i0)
{
assert(_jitl.stack_offset == 0 &&
_jitl.nextarg_puti == 0 &&
_jitl.nextarg_putfp == 0);
if (_jitl.fprssize) {
MOVBir(_jitl.fprssize, _RAX);
_jitl.fprssize = 0;
}
else
MOVBir(0, _RAX);
jit_calli(i0);
if (jit_push_pop_p() && _jitl.stack_length) {
jit_addi_l(JIT_SP, JIT_SP, _jitl.stack_length);
_jitl.stack_length = 0;
}
return (_jitl.label);
}
#define jit_finishr(rs) x86_finishr(_jit, rs)
__jit_inline void
x86_finishr(jit_state_t _jit, jit_gpr_t r0)
{
assert(_jitl.stack_offset == 0 &&
_jitl.nextarg_puti == 0 &&
_jitl.nextarg_putfp == 0);
if (r0 == _RAX) {
/* clobbered with # of fp registers (for varargs) */
MOVQrr(_RAX, JIT_REXTMP);
r0 = JIT_REXTMP;
}
if (_jitl.fprssize) {
MOVBir(_jitl.fprssize, _RAX);
_jitl.fprssize = 0;
}
else
MOVBir(0, _RAX);
jit_callr(r0);
if (jit_push_pop_p() && _jitl.stack_length) {
jit_addi_l(JIT_SP, JIT_SP, _jitl.stack_length);
_jitl.stack_length = 0;
}
}
#define jit_pusharg_i(r0) x86_pusharg_i(_jit, r0)
#define jit_pusharg_l(r0) x86_pusharg_i(_jit, r0)
__jit_inline void
x86_pusharg_i(jit_state_t _jit, jit_gpr_t r0)
{
assert(_jitl.nextarg_puti > 0);
if (--_jitl.nextarg_puti >= JIT_ARG_MAX) {
_jitl.stack_offset -= sizeof(long);
assert(_jitl.stack_offset >= 0);
if (jit_push_pop_p()) {
int pad = -_jitl.stack_length & 15;
if (pad) {
jit_subi_l(JIT_SP, JIT_SP, pad + sizeof(long));
_jitl.stack_length += pad;
jit_str_l(JIT_SP, r0);
}
else
jit_pushr_l(r0);
}
else
jit_stxi_l(_jitl.stack_offset, JIT_SP, r0);
}
else
jit_movr_l(jit_arg_reg_order[_jitl.nextarg_puti], r0);
}
#define jit_retval_l(r0) x86_retval_l(_jit, r0)
__jit_inline void
x86_retval_l(jit_state_t _jit, jit_gpr_t r0)
{
jit_movr_l(r0, _RAX);
}
#define jit_arg_i() x86_arg_i(_jit)
#define jit_arg_c() x86_arg_i(_jit)
#define jit_arg_uc() x86_arg_i(_jit)
#define jit_arg_s() x86_arg_i(_jit)
#define jit_arg_us() x86_arg_i(_jit)
#define jit_arg_ui() x86_arg_i(_jit)
#define jit_arg_l() x86_arg_i(_jit)
#define jit_arg_ul() x86_arg_i(_jit)
#define jit_arg_p() x86_arg_i(_jit)
__jit_inline int
x86_arg_i(jit_state_t _jit)
{
int ofs;
if (_jitl.nextarg_geti < JIT_ARG_MAX) {
ofs = _jitl.nextarg_geti;
++_jitl.nextarg_geti;
}
else {
ofs = _jitl.framesize;
_jitl.framesize += sizeof(long);
}
return (ofs);
}
#define jit_getarg_c(r0, ofs) x86_getarg_c(_jit, r0, ofs)
__jit_inline void
x86_getarg_c(jit_state_t _jit, jit_gpr_t r0, int ofs)
{
if (ofs < JIT_ARG_MAX)
jit_extr_c_l(r0, jit_arg_reg_order[ofs]);
else
jit_ldxi_c(r0, JIT_FP, ofs);
}
#define jit_getarg_uc(r0, ofs) x86_getarg_uc(_jit, r0, ofs)
__jit_inline void
x86_getarg_uc(jit_state_t _jit, jit_gpr_t r0, int ofs)
{
if (ofs < JIT_ARG_MAX)
jit_extr_c_ul(r0, jit_arg_reg_order[ofs]);
else
jit_ldxi_uc(r0, JIT_FP, ofs);
}
#define jit_getarg_s(r0, ofs) x86_getarg_s(_jit, r0, ofs)
__jit_inline void
x86_getarg_s(jit_state_t _jit, jit_gpr_t r0, int ofs)
{
if (ofs < JIT_ARG_MAX)
jit_extr_s_l(r0, jit_arg_reg_order[ofs]);
else
jit_ldxi_s(r0, JIT_FP, ofs);
}
#define jit_getarg_us(r0, ofs) x86_getarg_us(_jit, r0, ofs)
__jit_inline void
x86_getarg_us(jit_state_t _jit, jit_gpr_t r0, int ofs)
{
if (ofs < JIT_ARG_MAX)
jit_extr_s_ul(r0, jit_arg_reg_order[ofs]);
else
jit_ldxi_us(r0, JIT_FP, ofs);
}
#define jit_getarg_i(r0, ofs) x86_getarg_i(_jit, r0, ofs)
__jit_inline void
x86_getarg_i(jit_state_t _jit, jit_gpr_t r0, int ofs)
{
if (ofs < JIT_ARG_MAX)
jit_movr_l(r0, jit_arg_reg_order[ofs]);
else
jit_ldxi_i(r0, JIT_FP, ofs);
}
#define jit_getarg_ui(r0, ofs) x86_getarg_ui(_jit, r0, ofs)
__jit_inline void
x86_getarg_ui(jit_state_t _jit, jit_gpr_t r0, int ofs)
{
if (ofs < JIT_ARG_MAX)
jit_movr_ul(r0, jit_arg_reg_order[ofs]);
else
jit_ldxi_ui(r0, JIT_FP, ofs);
}
#define jit_getarg_l(r0, ofs) x86_getarg_l(_jit, r0, ofs)
__jit_inline void
x86_getarg_l(jit_state_t _jit, jit_gpr_t r0, int ofs)
{
if (ofs < JIT_ARG_MAX)
jit_movr_l(r0, jit_arg_reg_order[ofs]);
else
jit_ldxi_l(r0, JIT_FP, ofs);
}
#define jit_getarg_ul(r0, ofs) x86_getarg_ul(_jit, r0, ofs)
__jit_inline void
x86_getarg_ul(jit_state_t _jit, jit_gpr_t r0, int ofs)
{
if (ofs < JIT_ARG_MAX)
jit_movr_ul(r0, jit_arg_reg_order[ofs]);
else
jit_ldxi_ul(r0, JIT_FP, ofs);
}
#define jit_getarg_p(r0, ofs) x86_getarg_p(_jit, r0, ofs)
__jit_inline void
x86_getarg_p(jit_state_t _jit, jit_gpr_t r0, int ofs)
{
if (ofs < JIT_ARG_MAX)
jit_movr_p(r0, jit_arg_reg_order[ofs]);
else
jit_ldxi_p(r0, JIT_FP, ofs);
}
#endif /* __lightning_core_h */
| {
"language": "C"
} |
/*
* Marvell Wireless LAN device driver: 802.11h
*
* Copyright (C) 2013, Marvell International Ltd.
*
* This software file (the "File") is distributed by Marvell International
* Ltd. under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
* worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
*
* THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
* IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
* ARE EXPRESSLY DISCLAIMED. The License provides additional details about
* this warranty disclaimer.
*/
#include "main.h"
#include "fw.h"
/* This function appends 11h info to a buffer while joining an
* infrastructure BSS
*/
static void
mwifiex_11h_process_infra_join(struct mwifiex_private *priv, u8 **buffer,
struct mwifiex_bssdescriptor *bss_desc)
{
struct mwifiex_ie_types_header *ie_header;
struct mwifiex_ie_types_pwr_capability *cap;
struct mwifiex_ie_types_local_pwr_constraint *constraint;
struct ieee80211_supported_band *sband;
u8 radio_type;
int i;
if (!buffer || !(*buffer))
return;
radio_type = mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
sband = priv->wdev->wiphy->bands[radio_type];
cap = (struct mwifiex_ie_types_pwr_capability *)*buffer;
cap->header.type = cpu_to_le16(WLAN_EID_PWR_CAPABILITY);
cap->header.len = cpu_to_le16(2);
cap->min_pwr = 0;
cap->max_pwr = 0;
*buffer += sizeof(*cap);
constraint = (struct mwifiex_ie_types_local_pwr_constraint *)*buffer;
constraint->header.type = cpu_to_le16(WLAN_EID_PWR_CONSTRAINT);
constraint->header.len = cpu_to_le16(2);
constraint->chan = bss_desc->channel;
constraint->constraint = bss_desc->local_constraint;
*buffer += sizeof(*constraint);
ie_header = (struct mwifiex_ie_types_header *)*buffer;
ie_header->type = cpu_to_le16(TLV_TYPE_PASSTHROUGH);
ie_header->len = cpu_to_le16(2 * sband->n_channels + 2);
*buffer += sizeof(*ie_header);
*(*buffer)++ = WLAN_EID_SUPPORTED_CHANNELS;
*(*buffer)++ = 2 * sband->n_channels;
for (i = 0; i < sband->n_channels; i++) {
*(*buffer)++ = ieee80211_frequency_to_channel(
sband->channels[i].center_freq);
*(*buffer)++ = 1; /* one channel in the subband */
}
}
/* Enable or disable the 11h extensions in the firmware */
static int mwifiex_11h_activate(struct mwifiex_private *priv, bool flag)
{
u32 enable = flag;
return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
HostCmd_ACT_GEN_SET, DOT11H_I, &enable);
}
/* This functions processes TLV buffer for a pending BSS Join command.
*
* Activate 11h functionality in the firmware if the spectrum management
* capability bit is found in the network we are joining. Also, necessary
* TLVs are set based on requested network's 11h capability.
*/
void mwifiex_11h_process_join(struct mwifiex_private *priv, u8 **buffer,
struct mwifiex_bssdescriptor *bss_desc)
{
if (bss_desc->sensed_11h) {
/* Activate 11h functions in firmware, turns on capability
* bit
*/
mwifiex_11h_activate(priv, true);
bss_desc->cap_info_bitmap |= WLAN_CAPABILITY_SPECTRUM_MGMT;
mwifiex_11h_process_infra_join(priv, buffer, bss_desc);
} else {
/* Deactivate 11h functions in the firmware */
mwifiex_11h_activate(priv, false);
bss_desc->cap_info_bitmap &= ~WLAN_CAPABILITY_SPECTRUM_MGMT;
}
}
| {
"language": "C"
} |
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
* Copyright 2009 Jerome Glisse.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
* Jerome Glisse
*/
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "atom.h"
#include <linux/pm_runtime.h>
#define RADEON_WAIT_IDLE_TIMEOUT 200
/**
* radeon_driver_irq_handler_kms - irq handler for KMS
*
* @int irq, void *arg: args
*
* This is the irq handler for the radeon KMS driver (all asics).
* radeon_irq_process is a macro that points to the per-asic
* irq handler callback.
*/
irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
struct radeon_device *rdev = dev->dev_private;
irqreturn_t ret;
ret = radeon_irq_process(rdev);
if (ret == IRQ_HANDLED)
pm_runtime_mark_last_busy(dev->dev);
return ret;
}
/*
* Handle hotplug events outside the interrupt handler proper.
*/
/**
* radeon_hotplug_work_func - display hotplug work handler
*
* @work: work struct
*
* This is the hot plug event work handler (all asics).
* The work gets scheduled from the irq handler if there
* was a hot plug interrupt. It walks the connector table
* and calls the hotplug handler for each one, then sends
* a drm hotplug event to alert userspace.
*/
static void radeon_hotplug_work_func(struct work_struct *work)
{
struct radeon_device *rdev = container_of(work, struct radeon_device,
hotplug_work);
struct drm_device *dev = rdev->ddev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
mutex_lock(&mode_config->mutex);
if (mode_config->num_connector) {
list_for_each_entry(connector, &mode_config->connector_list, head)
radeon_connector_hotplug(connector);
}
mutex_unlock(&mode_config->mutex);
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
}
/**
* radeon_driver_irq_preinstall_kms - drm irq preinstall callback
*
* @dev: drm dev pointer
*
* Gets the hw ready to enable irqs (all asics).
* This function disables all interrupt sources on the GPU.
*/
void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
unsigned long irqflags;
unsigned i;
spin_lock_irqsave(&rdev->irq.lock, irqflags);
/* Disable *all* interrupts */
for (i = 0; i < RADEON_NUM_RINGS; i++)
atomic_set(&rdev->irq.ring_int[i], 0);
rdev->irq.dpm_thermal = false;
for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
rdev->irq.hpd[i] = false;
for (i = 0; i < RADEON_MAX_CRTCS; i++) {
rdev->irq.crtc_vblank_int[i] = false;
atomic_set(&rdev->irq.pflip[i], 0);
rdev->irq.afmt[i] = false;
}
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
/* Clear bits */
radeon_irq_process(rdev);
}
/**
* radeon_driver_irq_postinstall_kms - drm irq preinstall callback
*
* @dev: drm dev pointer
*
* Handles stuff to be done after enabling irqs (all asics).
* Returns 0 on success.
*/
int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
{
dev->max_vblank_count = 0x001fffff;
return 0;
}
/**
* radeon_driver_irq_uninstall_kms - drm irq uninstall callback
*
* @dev: drm dev pointer
*
* This function disables all interrupt sources on the GPU (all asics).
*/
void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
unsigned long irqflags;
unsigned i;
if (rdev == NULL) {
return;
}
spin_lock_irqsave(&rdev->irq.lock, irqflags);
/* Disable *all* interrupts */
for (i = 0; i < RADEON_NUM_RINGS; i++)
atomic_set(&rdev->irq.ring_int[i], 0);
rdev->irq.dpm_thermal = false;
for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
rdev->irq.hpd[i] = false;
for (i = 0; i < RADEON_MAX_CRTCS; i++) {
rdev->irq.crtc_vblank_int[i] = false;
atomic_set(&rdev->irq.pflip[i], 0);
rdev->irq.afmt[i] = false;
}
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
/**
* radeon_msi_ok - asic specific msi checks
*
* @rdev: radeon device pointer
*
* Handles asic specific MSI checks to determine if
* MSIs should be enabled on a particular chip (all asics).
* Returns true if MSIs should be enabled, false if MSIs
* should not be enabled.
*/
static bool radeon_msi_ok(struct radeon_device *rdev)
{
/* RV370/RV380 was first asic with MSI support */
if (rdev->family < CHIP_RV380)
return false;
/* MSIs don't work on AGP */
if (rdev->flags & RADEON_IS_AGP)
return false;
/*
* Older chips have a HW limitation, they can only generate 40 bits
* of address for "64-bit" MSIs which breaks on some platforms, notably
* IBM POWER servers, so we limit them
*/
if (rdev->family < CHIP_BONAIRE) {
dev_info(rdev->dev, "radeon: MSI limited to 32-bit\n");
rdev->pdev->no_64bit_msi = 1;
}
/* force MSI on */
if (radeon_msi == 1)
return true;
else if (radeon_msi == 0)
return false;
/* Quirks */
/* HP RS690 only seems to work with MSIs. */
if ((rdev->pdev->device == 0x791f) &&
(rdev->pdev->subsystem_vendor == 0x103c) &&
(rdev->pdev->subsystem_device == 0x30c2))
return true;
/* Dell RS690 only seems to work with MSIs. */
if ((rdev->pdev->device == 0x791f) &&
(rdev->pdev->subsystem_vendor == 0x1028) &&
(rdev->pdev->subsystem_device == 0x01fc))
return true;
/* Dell RS690 only seems to work with MSIs. */
if ((rdev->pdev->device == 0x791f) &&
(rdev->pdev->subsystem_vendor == 0x1028) &&
(rdev->pdev->subsystem_device == 0x01fd))
return true;
/* Gateway RS690 only seems to work with MSIs. */
if ((rdev->pdev->device == 0x791f) &&
(rdev->pdev->subsystem_vendor == 0x107b) &&
(rdev->pdev->subsystem_device == 0x0185))
return true;
/* try and enable MSIs by default on all RS690s */
if (rdev->family == CHIP_RS690)
return true;
/* RV515 seems to have MSI issues where it loses
* MSI rearms occasionally. This leads to lockups and freezes.
* disable it by default.
*/
if (rdev->family == CHIP_RV515)
return false;
if (rdev->flags & RADEON_IS_IGP) {
/* APUs work fine with MSIs */
if (rdev->family >= CHIP_PALM)
return true;
/* lots of IGPs have problems with MSIs */
return false;
}
return true;
}
/**
* radeon_irq_kms_init - init driver interrupt info
*
* @rdev: radeon device pointer
*
* Sets up the work irq handlers, vblank init, MSIs, etc. (all asics).
* Returns 0 for success, error for failure.
*/
int radeon_irq_kms_init(struct radeon_device *rdev)
{
int r = 0;
spin_lock_init(&rdev->irq.lock);
r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
if (r) {
return r;
}
/* enable msi */
rdev->msi_enabled = 0;
if (radeon_msi_ok(rdev)) {
int ret = pci_enable_msi(rdev->pdev);
if (!ret) {
rdev->msi_enabled = 1;
dev_info(rdev->dev, "radeon: using MSI.\n");
}
}
INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
rdev->irq.installed = true;
r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq);
if (r) {
rdev->irq.installed = false;
flush_work(&rdev->hotplug_work);
return r;
}
DRM_INFO("radeon: irq initialized.\n");
return 0;
}
/**
* radeon_irq_kms_fini - tear down driver interrupt info
*
* @rdev: radeon device pointer
*
* Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics).
*/
void radeon_irq_kms_fini(struct radeon_device *rdev)
{
drm_vblank_cleanup(rdev->ddev);
if (rdev->irq.installed) {
drm_irq_uninstall(rdev->ddev);
rdev->irq.installed = false;
if (rdev->msi_enabled)
pci_disable_msi(rdev->pdev);
flush_work(&rdev->hotplug_work);
}
}
/**
* radeon_irq_kms_sw_irq_get - enable software interrupt
*
* @rdev: radeon device pointer
* @ring: ring whose interrupt you want to enable
*
* Enables the software interrupt for a specific ring (all asics).
* The software interrupt is generally used to signal a fence on
* a particular ring.
*/
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
{
unsigned long irqflags;
if (!rdev->ddev->irq_enabled)
return;
if (atomic_inc_return(&rdev->irq.ring_int[ring]) == 1) {
spin_lock_irqsave(&rdev->irq.lock, irqflags);
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
}
/**
* radeon_irq_kms_sw_irq_get_delayed - enable software interrupt
*
* @rdev: radeon device pointer
* @ring: ring whose interrupt you want to enable
*
* Enables the software interrupt for a specific ring (all asics).
* The software interrupt is generally used to signal a fence on
* a particular ring.
*/
bool radeon_irq_kms_sw_irq_get_delayed(struct radeon_device *rdev, int ring)
{
return atomic_inc_return(&rdev->irq.ring_int[ring]) == 1;
}
/**
* radeon_irq_kms_sw_irq_put - disable software interrupt
*
* @rdev: radeon device pointer
* @ring: ring whose interrupt you want to disable
*
* Disables the software interrupt for a specific ring (all asics).
* The software interrupt is generally used to signal a fence on
* a particular ring.
*/
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
{
unsigned long irqflags;
if (!rdev->ddev->irq_enabled)
return;
if (atomic_dec_and_test(&rdev->irq.ring_int[ring])) {
spin_lock_irqsave(&rdev->irq.lock, irqflags);
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
}
/**
* radeon_irq_kms_pflip_irq_get - enable pageflip interrupt
*
* @rdev: radeon device pointer
* @crtc: crtc whose interrupt you want to enable
*
* Enables the pageflip interrupt for a specific crtc (all asics).
* For pageflips we use the vblank interrupt source.
*/
void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
{
unsigned long irqflags;
if (crtc < 0 || crtc >= rdev->num_crtc)
return;
if (!rdev->ddev->irq_enabled)
return;
if (atomic_inc_return(&rdev->irq.pflip[crtc]) == 1) {
spin_lock_irqsave(&rdev->irq.lock, irqflags);
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
}
/**
* radeon_irq_kms_pflip_irq_put - disable pageflip interrupt
*
* @rdev: radeon device pointer
* @crtc: crtc whose interrupt you want to disable
*
* Disables the pageflip interrupt for a specific crtc (all asics).
* For pageflips we use the vblank interrupt source.
*/
void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
{
unsigned long irqflags;
if (crtc < 0 || crtc >= rdev->num_crtc)
return;
if (!rdev->ddev->irq_enabled)
return;
if (atomic_dec_and_test(&rdev->irq.pflip[crtc])) {
spin_lock_irqsave(&rdev->irq.lock, irqflags);
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
}
/**
* radeon_irq_kms_enable_afmt - enable audio format change interrupt
*
* @rdev: radeon device pointer
* @block: afmt block whose interrupt you want to enable
*
* Enables the afmt change interrupt for a specific afmt block (all asics).
*/
void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block)
{
unsigned long irqflags;
if (!rdev->ddev->irq_enabled)
return;
spin_lock_irqsave(&rdev->irq.lock, irqflags);
rdev->irq.afmt[block] = true;
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
/**
* radeon_irq_kms_disable_afmt - disable audio format change interrupt
*
* @rdev: radeon device pointer
* @block: afmt block whose interrupt you want to disable
*
* Disables the afmt change interrupt for a specific afmt block (all asics).
*/
void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block)
{
unsigned long irqflags;
if (!rdev->ddev->irq_enabled)
return;
spin_lock_irqsave(&rdev->irq.lock, irqflags);
rdev->irq.afmt[block] = false;
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
/**
* radeon_irq_kms_enable_hpd - enable hotplug detect interrupt
*
* @rdev: radeon device pointer
* @hpd_mask: mask of hpd pins you want to enable.
*
* Enables the hotplug detect interrupt for a specific hpd pin (all asics).
*/
void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
{
unsigned long irqflags;
int i;
if (!rdev->ddev->irq_enabled)
return;
spin_lock_irqsave(&rdev->irq.lock, irqflags);
for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i));
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
/**
* radeon_irq_kms_disable_hpd - disable hotplug detect interrupt
*
* @rdev: radeon device pointer
* @hpd_mask: mask of hpd pins you want to disable.
*
* Disables the hotplug detect interrupt for a specific hpd pin (all asics).
*/
void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
{
unsigned long irqflags;
int i;
if (!rdev->ddev->irq_enabled)
return;
spin_lock_irqsave(&rdev->irq.lock, irqflags);
for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
rdev->irq.hpd[i] &= !(hpd_mask & (1 << i));
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
| {
"language": "C"
} |
// Autogenerated by gameplay-luagen
#ifndef LUA_BUNDLE_H_
#define LUA_BUNDLE_H_
namespace gameplay
{
void luaRegister_Bundle();
}
#endif
| {
"language": "C"
} |
// Copyright (c) 2000 Max-Planck-Institute Saarbruecken (Germany).
// All rights reserved.
//
// This file is part of CGAL (www.cgal.org).
//
// $URL$
// $Id$
// SPDX-License-Identifier: GPL-3.0-or-later OR LicenseRef-Commercial
//
//
// Author(s) : Susan Hert <hert@mpi-sb.mpg.de>
#ifndef CGAL_TURN_REVERSER_H
#define CGAL_TURN_REVERSER_H
#include <CGAL/license/Partition_2.h>
namespace CGAL {
template <class Point_2, class TurnPredicate>
class Turn_reverser
{
public:
Turn_reverser() {}
Turn_reverser( const TurnPredicate& t ): turn(t) {}
bool operator() (const Point_2& p1,
const Point_2& p2,
const Point_2& p3) const
{ return turn(p2, p1, p3); }
private:
TurnPredicate turn;
};
}
#endif // CGAL_TURN_REVERSER_H
| {
"language": "C"
} |
#include <ccan/crypto/sha256/sha256.h>
/* Include the C files directly. */
#include <ccan/crypto/sha256/sha256.c>
#include <ccan/tap/tap.h>
int main(void)
{
struct sha256 h, expected;
static const char zeroes[1000];
size_t i;
plan_tests(63);
/* Test different alignments. */
sha256(&expected, zeroes, sizeof(zeroes) - 64);
for (i = 1; i < 64; i++) {
sha256(&h, zeroes + i, sizeof(zeroes) - 64);
ok1(memcmp(&h, &expected, sizeof(h)) == 0);
}
/* This exits depending on whether all tests passed */
return exit_status();
}
| {
"language": "C"
} |
// Software License Agreement (BSD License)
//
// Copyright (c) 2010-2016, Deusty, LLC
// All rights reserved.
//
// Redistribution and use of this software in source and binary forms,
// with or without modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Neither the name of Deusty nor the names of its contributors may be used
// to endorse or promote products derived from this software without specific
// prior written permission of Deusty, LLC.
// Disable legacy macros
#ifndef DD_LEGACY_MACROS
#define DD_LEGACY_MACROS 0
#endif
#import "DDLog.h"
/**
* The constant/variable/method responsible for controlling the current log level.
**/
#ifndef LOG_LEVEL_DEF
#define LOG_LEVEL_DEF ddLogLevel
#endif
/**
* Whether async should be used by log messages, excluding error messages that are always sent sync.
**/
#ifndef LOG_ASYNC_ENABLED
#define LOG_ASYNC_ENABLED YES
#endif
/**
* These are the two macros that all other macros below compile into.
* These big multiline macros makes all the other macros easier to read.
**/
#define LOG_MACRO(isAsynchronous, lvl, flg, ctx, atag, fnct, frmt, ...) \
[DDLog log : isAsynchronous \
level : lvl \
flag : flg \
context : ctx \
file : __FILE__ \
function : fnct \
line : __LINE__ \
tag : atag \
format : (frmt), ## __VA_ARGS__]
#define LOG_MACRO_TO_DDLOG(ddlog, isAsynchronous, lvl, flg, ctx, atag, fnct, frmt, ...) \
[ddlog log : isAsynchronous \
level : lvl \
flag : flg \
context : ctx \
file : __FILE__ \
function : fnct \
line : __LINE__ \
tag : atag \
format : (frmt), ## __VA_ARGS__]
/**
* Define version of the macro that only execute if the log level is above the threshold.
* The compiled versions essentially look like this:
*
* if (logFlagForThisLogMsg & ddLogLevel) { execute log message }
*
* When LOG_LEVEL_DEF is defined as ddLogLevel.
*
* As shown further below, Lumberjack actually uses a bitmask as opposed to primitive log levels.
* This allows for a great amount of flexibility and some pretty advanced fine grained logging techniques.
*
* Note that when compiler optimizations are enabled (as they are for your release builds),
* the log messages above your logging threshold will automatically be compiled out.
*
* (If the compiler sees LOG_LEVEL_DEF/ddLogLevel declared as a constant, the compiler simply checks to see
* if the 'if' statement would execute, and if not it strips it from the binary.)
*
* We also define shorthand versions for asynchronous and synchronous logging.
**/
#define LOG_MAYBE(async, lvl, flg, ctx, tag, fnct, frmt, ...) \
do { if(lvl & flg) LOG_MACRO(async, lvl, flg, ctx, tag, fnct, frmt, ##__VA_ARGS__); } while(0)
#define LOG_MAYBE_TO_DDLOG(ddlog, async, lvl, flg, ctx, tag, fnct, frmt, ...) \
do { if(lvl & flg) LOG_MACRO_TO_DDLOG(ddlog, async, lvl, flg, ctx, tag, fnct, frmt, ##__VA_ARGS__); } while(0)
/**
* Ready to use log macros with no context or tag.
**/
#define DDLogError(frmt, ...) LOG_MAYBE(NO, LOG_LEVEL_DEF, DDLogFlagError, 0, nil, __PRETTY_FUNCTION__, frmt, ##__VA_ARGS__)
#define DDLogWarn(frmt, ...) LOG_MAYBE(LOG_ASYNC_ENABLED, LOG_LEVEL_DEF, DDLogFlagWarning, 0, nil, __PRETTY_FUNCTION__, frmt, ##__VA_ARGS__)
#define DDLogInfo(frmt, ...) LOG_MAYBE(LOG_ASYNC_ENABLED, LOG_LEVEL_DEF, DDLogFlagInfo, 0, nil, __PRETTY_FUNCTION__, frmt, ##__VA_ARGS__)
#define DDLogDebug(frmt, ...) LOG_MAYBE(LOG_ASYNC_ENABLED, LOG_LEVEL_DEF, DDLogFlagDebug, 0, nil, __PRETTY_FUNCTION__, frmt, ##__VA_ARGS__)
#define DDLogVerbose(frmt, ...) LOG_MAYBE(LOG_ASYNC_ENABLED, LOG_LEVEL_DEF, DDLogFlagVerbose, 0, nil, __PRETTY_FUNCTION__, frmt, ##__VA_ARGS__)
#define DDLogErrorToDDLog(ddlog, frmt, ...) LOG_MAYBE_TO_DDLOG(ddlog, NO, LOG_LEVEL_DEF, DDLogFlagError, 0, nil, __PRETTY_FUNCTION__, frmt, ##__VA_ARGS__)
#define DDLogWarnToDDLog(ddlog, frmt, ...) LOG_MAYBE_TO_DDLOG(ddlog, LOG_ASYNC_ENABLED, LOG_LEVEL_DEF, DDLogFlagWarning, 0, nil, __PRETTY_FUNCTION__, frmt, ##__VA_ARGS__)
#define DDLogInfoToDDLog(ddlog, frmt, ...) LOG_MAYBE_TO_DDLOG(ddlog, LOG_ASYNC_ENABLED, LOG_LEVEL_DEF, DDLogFlagInfo, 0, nil, __PRETTY_FUNCTION__, frmt, ##__VA_ARGS__)
#define DDLogDebugToDDLog(ddlog, frmt, ...) LOG_MAYBE_TO_DDLOG(ddlog, LOG_ASYNC_ENABLED, LOG_LEVEL_DEF, DDLogFlagDebug, 0, nil, __PRETTY_FUNCTION__, frmt, ##__VA_ARGS__)
#define DDLogVerboseToDDLog(ddlog, frmt, ...) LOG_MAYBE_TO_DDLOG(ddlog, LOG_ASYNC_ENABLED, LOG_LEVEL_DEF, DDLogFlagVerbose, 0, nil, __PRETTY_FUNCTION__, frmt, ##__VA_ARGS__)
| {
"language": "C"
} |
/*
* jdatadst.c
*
* This file was part of the Independent JPEG Group's software:
* Copyright (C) 1994-1996, Thomas G. Lane.
* Modified 2009-2012 by Guido Vollbeding.
* libjpeg-turbo Modifications:
* Copyright (C) 2013, 2016, D. R. Commander.
* For conditions of distribution and use, see the accompanying README.ijg
* file.
*
* This file contains compression data destination routines for the case of
* emitting JPEG data to memory or to a file (or any stdio stream).
* While these routines are sufficient for most applications,
* some will want to use a different destination manager.
* IMPORTANT: we assume that fwrite() will correctly transcribe an array of
* JOCTETs into 8-bit-wide elements on external storage. If char is wider
* than 8 bits on your machine, you may need to do some tweaking.
*/
/* this is not a core library module, so it doesn't define JPEG_INTERNALS */
#include "jinclude.h"
#include "jpeglib.h"
#include "jerror.h"
#include "jpegint.h"
#ifndef HAVE_STDLIB_H /* <stdlib.h> should declare malloc(),free() */
extern void *malloc (size_t size);
extern void free (void *ptr);
#endif
/* Expanded data destination object for stdio output */
typedef struct {
struct jpeg_destination_mgr pub; /* public fields */
FILE *outfile; /* target stream */
JOCTET *buffer; /* start of buffer */
} my_destination_mgr;
typedef my_destination_mgr *my_dest_ptr;
#define OUTPUT_BUF_SIZE 4096 /* choose an efficiently fwrite'able size */
#if JPEG_LIB_VERSION >= 80 || defined(MEM_SRCDST_SUPPORTED)
/* Expanded data destination object for memory output */
typedef struct {
struct jpeg_destination_mgr pub; /* public fields */
unsigned char **outbuffer; /* target buffer */
unsigned long *outsize;
unsigned char *newbuffer; /* newly allocated buffer */
JOCTET *buffer; /* start of buffer */
size_t bufsize;
} my_mem_destination_mgr;
typedef my_mem_destination_mgr *my_mem_dest_ptr;
#endif
/*
* Initialize destination --- called by jpeg_start_compress
* before any data is actually written.
*/
METHODDEF(void)
init_destination (j_compress_ptr cinfo)
{
my_dest_ptr dest = (my_dest_ptr) cinfo->dest;
/* Allocate the output buffer --- it will be released when done with image */
dest->buffer = (JOCTET *)
(*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE,
OUTPUT_BUF_SIZE * sizeof(JOCTET));
dest->pub.next_output_byte = dest->buffer;
dest->pub.free_in_buffer = OUTPUT_BUF_SIZE;
}
#if JPEG_LIB_VERSION >= 80 || defined(MEM_SRCDST_SUPPORTED)
METHODDEF(void)
init_mem_destination (j_compress_ptr cinfo)
{
/* no work necessary here */
}
#endif
/*
* Empty the output buffer --- called whenever buffer fills up.
*
* In typical applications, this should write the entire output buffer
* (ignoring the current state of next_output_byte & free_in_buffer),
* reset the pointer & count to the start of the buffer, and return TRUE
* indicating that the buffer has been dumped.
*
* In applications that need to be able to suspend compression due to output
* overrun, a FALSE return indicates that the buffer cannot be emptied now.
* In this situation, the compressor will return to its caller (possibly with
* an indication that it has not accepted all the supplied scanlines). The
* application should resume compression after it has made more room in the
* output buffer. Note that there are substantial restrictions on the use of
* suspension --- see the documentation.
*
* When suspending, the compressor will back up to a convenient restart point
* (typically the start of the current MCU). next_output_byte & free_in_buffer
* indicate where the restart point will be if the current call returns FALSE.
* Data beyond this point will be regenerated after resumption, so do not
* write it out when emptying the buffer externally.
*/
METHODDEF(boolean)
empty_output_buffer (j_compress_ptr cinfo)
{
my_dest_ptr dest = (my_dest_ptr) cinfo->dest;
if (JFWRITE(dest->outfile, dest->buffer, OUTPUT_BUF_SIZE) !=
(size_t) OUTPUT_BUF_SIZE)
ERREXIT(cinfo, JERR_FILE_WRITE);
dest->pub.next_output_byte = dest->buffer;
dest->pub.free_in_buffer = OUTPUT_BUF_SIZE;
return TRUE;
}
#if JPEG_LIB_VERSION >= 80 || defined(MEM_SRCDST_SUPPORTED)
METHODDEF(boolean)
empty_mem_output_buffer (j_compress_ptr cinfo)
{
size_t nextsize;
JOCTET *nextbuffer;
my_mem_dest_ptr dest = (my_mem_dest_ptr) cinfo->dest;
/* Try to allocate new buffer with double size */
nextsize = dest->bufsize * 2;
nextbuffer = (JOCTET *) malloc(nextsize);
if (nextbuffer == NULL)
ERREXIT1(cinfo, JERR_OUT_OF_MEMORY, 10);
MEMCOPY(nextbuffer, dest->buffer, dest->bufsize);
free(dest->newbuffer);
dest->newbuffer = nextbuffer;
dest->pub.next_output_byte = nextbuffer + dest->bufsize;
dest->pub.free_in_buffer = dest->bufsize;
dest->buffer = nextbuffer;
dest->bufsize = nextsize;
return TRUE;
}
#endif
/*
* Terminate destination --- called by jpeg_finish_compress
* after all data has been written. Usually needs to flush buffer.
*
* NB: *not* called by jpeg_abort or jpeg_destroy; surrounding
* application must deal with any cleanup that should happen even
* for error exit.
*/
METHODDEF(void)
term_destination (j_compress_ptr cinfo)
{
my_dest_ptr dest = (my_dest_ptr) cinfo->dest;
size_t datacount = OUTPUT_BUF_SIZE - dest->pub.free_in_buffer;
/* Write any data remaining in the buffer */
if (datacount > 0) {
if (JFWRITE(dest->outfile, dest->buffer, datacount) != datacount)
ERREXIT(cinfo, JERR_FILE_WRITE);
}
fflush(dest->outfile);
/* Make sure we wrote the output file OK */
if (ferror(dest->outfile))
ERREXIT(cinfo, JERR_FILE_WRITE);
}
#if JPEG_LIB_VERSION >= 80 || defined(MEM_SRCDST_SUPPORTED)
METHODDEF(void)
term_mem_destination (j_compress_ptr cinfo)
{
my_mem_dest_ptr dest = (my_mem_dest_ptr) cinfo->dest;
*dest->outbuffer = dest->buffer;
*dest->outsize = (unsigned long)(dest->bufsize - dest->pub.free_in_buffer);
}
#endif
/*
* Prepare for output to a stdio stream.
* The caller must have already opened the stream, and is responsible
* for closing it after finishing compression.
*/
GLOBAL(void)
jpeg_stdio_dest (j_compress_ptr cinfo, FILE *outfile)
{
my_dest_ptr dest;
/* The destination object is made permanent so that multiple JPEG images
* can be written to the same file without re-executing jpeg_stdio_dest.
*/
if (cinfo->dest == NULL) { /* first time for this JPEG object? */
cinfo->dest = (struct jpeg_destination_mgr *)
(*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_PERMANENT,
sizeof(my_destination_mgr));
} else if (cinfo->dest->init_destination != init_destination) {
/* It is unsafe to reuse the existing destination manager unless it was
* created by this function. Otherwise, there is no guarantee that the
* opaque structure is the right size. Note that we could just create a
* new structure, but the old structure would not be freed until
* jpeg_destroy_compress() was called.
*/
ERREXIT(cinfo, JERR_BUFFER_SIZE);
}
dest = (my_dest_ptr) cinfo->dest;
dest->pub.init_destination = init_destination;
dest->pub.empty_output_buffer = empty_output_buffer;
dest->pub.term_destination = term_destination;
dest->outfile = outfile;
}
#if JPEG_LIB_VERSION >= 80 || defined(MEM_SRCDST_SUPPORTED)
/*
* Prepare for output to a memory buffer.
* The caller may supply an own initial buffer with appropriate size.
* Otherwise, or when the actual data output exceeds the given size,
* the library adapts the buffer size as necessary.
* The standard library functions malloc/free are used for allocating
* larger memory, so the buffer is available to the application after
* finishing compression, and then the application is responsible for
* freeing the requested memory.
* Note: An initial buffer supplied by the caller is expected to be
* managed by the application. The library does not free such buffer
* when allocating a larger buffer.
*/
GLOBAL(void)
jpeg_mem_dest_internal (j_compress_ptr cinfo,
unsigned char **outbuffer, unsigned long *outsize, int pool_id)
{
my_mem_dest_ptr dest;
if (outbuffer == NULL || outsize == NULL) /* sanity check */
ERREXIT(cinfo, JERR_BUFFER_SIZE);
/* The destination object is made permanent so that multiple JPEG images
* can be written to the same buffer without re-executing jpeg_mem_dest.
*/
if (cinfo->dest == NULL) { /* first time for this JPEG object? */
cinfo->dest = (struct jpeg_destination_mgr *)
(*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, pool_id,
sizeof(my_mem_destination_mgr));
} else if (cinfo->dest->init_destination != init_mem_destination) {
/* It is unsafe to reuse the existing destination manager unless it was
* created by this function.
*/
ERREXIT(cinfo, JERR_BUFFER_SIZE);
}
dest = (my_mem_dest_ptr) cinfo->dest;
dest->pub.init_destination = init_mem_destination;
dest->pub.empty_output_buffer = empty_mem_output_buffer;
dest->pub.term_destination = term_mem_destination;
dest->outbuffer = outbuffer;
dest->outsize = outsize;
dest->newbuffer = NULL;
if (*outbuffer == NULL || *outsize == 0) {
/* Allocate initial buffer */
dest->newbuffer = *outbuffer = (unsigned char *) malloc(OUTPUT_BUF_SIZE);
if (dest->newbuffer == NULL)
ERREXIT1(cinfo, JERR_OUT_OF_MEMORY, 10);
*outsize = OUTPUT_BUF_SIZE;
}
dest->pub.next_output_byte = dest->buffer = *outbuffer;
dest->pub.free_in_buffer = dest->bufsize = *outsize;
}
GLOBAL(void)
jpeg_mem_dest (j_compress_ptr cinfo,
unsigned char **outbuffer, unsigned long *outsize)
{
/* The destination object is made permanent so that multiple JPEG images
* can be written to the same file without re-executing jpeg_stdio_dest.
*/
jpeg_mem_dest_internal(cinfo, outbuffer, outsize, JPOOL_PERMANENT);
}
#endif
| {
"language": "C"
} |
/*
* This file is part of cparser.
* Copyright (C) 2012 Matthias Braun <matze@braunis.de>
*/
#include "warning.h"
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include "adt/strutil.h"
#include "adt/util.h"
#include "diagnostic.h"
#include "help.h"
static warning_switch_t warning[] = {
#define ERR WARN_STATE_ON | WARN_STATE_ERROR
#define ON WARN_STATE_ON
#define OFF WARN_STATE_NONE
#define M(warning, state, option, explanation) [warning] = { state, option, explanation },
WARNINGS(M)
#undef M
#undef OFF
#undef ON
};
warning_switch_t const *get_warn_switch(warning_t const w)
{
assert((size_t)w < ARRAY_SIZE(warning));
assert(warning[w].name);
return &warning[w];
}
void print_warning_opt_help(void)
{
/* TODO: write explanations */
for (warning_switch_t* i = warning; i != endof(warning); ++i) {
char buf[256];
snprintf(buf, sizeof(buf), "-W%s", i->name);
help_simple(buf, i->explanation);
}
}
#define SET(y) (void)(warning[y].state = (warning[y].state & ~off) | on)
static void warn_parentheses(warn_state_t const on, warn_state_t const off)
{
SET(WARN_PARENTHESES_ASSIGNMENT);
SET(WARN_PARENTHESES_COMPARISON);
SET(WARN_PARENTHESES_ELSE);
SET(WARN_PARENTHESES_LOGICAL);
SET(WARN_PARENTHESES_SHIFT);
}
static void warn_unused(warn_state_t const on, warn_state_t const off)
{
SET(WARN_UNUSED_FUNCTION);
SET(WARN_UNUSED_LABEL);
SET(WARN_UNUSED_PARAMETER);
SET(WARN_UNUSED_VALUE);
SET(WARN_UNUSED_VARIABLE);
}
void set_warning_opt(const char *const opt)
{
/* Process prefixes: -W[no-][error=] */
char const *s = opt;
char const *rest;
bool const no = (rest = strstart(s, "no-")) ? s = rest, true : false;
bool const error = (rest = strstart(s, "error=")) ? s = rest, true : false;
warn_state_t on = WARN_STATE_NONE;
warn_state_t off = WARN_STATE_NONE;
if (!no || !error)
on |= WARN_STATE_ON;
if (error) {
on |= WARN_STATE_ERROR;
off |= WARN_STATE_NO_ERROR;
}
if (no) {
warn_state_t const tmp = on;
on = off;
off = tmp;
}
for (warning_switch_t* i = warning; i != endof(warning); ++i) {
if (streq(i->name, s)) {
i->state = (i->state & ~off) | on;
return;
}
}
if (s[0] == '\0') { // -W is an alias for -Wextra
goto extra;
}
#define OPTX(x) else if (streq(s, x))
OPTX("all") {
/* Note: this switched on a lot more warnings than gcc's -Wall */
SET(WARN_ADDRESS);
SET(WARN_ATTRIBUTE);
SET(WARN_CHAR_SUBSCRIPTS);
SET(WARN_CHAR_CTYPE);
SET(WARN_COMMENT);
SET(WARN_EMPTY_STATEMENT);
SET(WARN_FORMAT);
SET(WARN_IMPLICIT_FUNCTION_DECLARATION);
SET(WARN_IMPLICIT_INT);
SET(WARN_MAIN);
SET(WARN_MISSING_PROTOTYPES);
SET(WARN_MISSING_VARIABLE_DECLARATIONS);
SET(WARN_NONNULL);
SET(WARN_OTHER);
SET(WARN_POINTER_ARITH);
SET(WARN_REDUNDANT_DECLS);
SET(WARN_RETURN_TYPE);
SET(WARN_SHADOW_LOCAL);
SET(WARN_SIGN_COMPARE);
SET(WARN_STRICT_PROTOTYPES);
SET(WARN_SWITCH_ENUM);
SET(WARN_UNINITIALIZED);
SET(WARN_UNKNOWN_PRAGMAS);
SET(WARN_UNREACHABLE_CODE);
warn_parentheses(on, off);
warn_unused(on, off);
}
OPTX("comments") {
SET(WARN_COMMENT);
}
OPTX("extra") {
extra:
/* TODO */
// TODO SET(function_end_without_return);
SET(WARN_EMPTY_STATEMENT);
// TODO SET(incomplete_aggregate_init);
// TODO SET(missing_field_initializers);
// TODO SET(pointless_comparison);
SET(WARN_SHADOW);
SET(WARN_UNUSED_PARAMETER);
SET(WARN_UNUSED_VALUE);
}
OPTX("implicit") {
SET(WARN_IMPLICIT_FUNCTION_DECLARATION);
SET(WARN_IMPLICIT_INT);
}
OPTX("parentheses") {
warn_parentheses(on, off);
}
OPTX("unused") {
warn_unused(on, off);
}
#undef SET
#undef OPT_X
else if (streq(opt /* sic */, "error-implicit-function-declaration")) {
/* GCC legacy: This way it only can be activated. */
warning[WARN_IMPLICIT_FUNCTION_DECLARATION].state = WARN_STATE_ON | WARN_STATE_ERROR;
} else {
warningf(WARN_UNKNOWN_WARNING_OPTION, NULL, "ignoring unknown option '%hs%hs'", "-W", opt);
}
}
void disable_all_warnings(void)
{
for (warning_switch_t* i = warning; i != endof(warning); ++i) {
if (i != &warning[WARN_ERROR] &&
i != &warning[WARN_FATAL_ERRORS]) {
i->state &= ~WARN_STATE_ON;
}
}
}
| {
"language": "C"
} |
/*
//
// BEGIN SONGBIRD GPL
//
// This file is part of the Songbird web player.
//
// Copyright(c) 2005-2008 POTI, Inc.
// http://songbirdnest.com
//
// This file may be licensed under the terms of of the
// GNU General Public License Version 2 (the "GPL").
//
// Software distributed under the License is distributed
// on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either
// express or implied. See the GPL for the specific language
// governing rights and limitations.
//
// You should have received a copy of the GPL along with this
// program. If not, go to http://www.gnu.org/licenses/gpl.html
// or write to the Free Software Foundation, Inc.,
// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
//
// END SONGBIRD GPL
//
*/
#ifndef __SB_LOCALDATABASELOGGINGSERVICE_H__
#define __SB_LOCALDATABASELOGGINGSERVICE_H__
#endif /* __SB_LOCALDATABASELOGGINGSERVICE_H__ */
| {
"language": "C"
} |
/******************************************************************************
*
* Copyright(c) 2013 - 2017 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#include <drv_types.h>
extern void sdhci_bus_scan(void);
#ifndef ANDROID_2X
extern int sdhci_device_attached(void);
#endif
/*
* Return:
* 0: power on successfully
* others: power on failed
*/
int platform_wifi_power_on(void)
{
int ret = 0;
#ifdef CONFIG_RTL8188E
rtw_wifi_gpio_wlan_ctrl(WLAN_POWER_ON);
#endif /* CONFIG_RTL8188E */
/* Pull up pwd pin, make wifi leave power down mode. */
rtw_wifi_gpio_init();
rtw_wifi_gpio_wlan_ctrl(WLAN_PWDN_ON);
#if (MP_DRIVER == 1) && (defined(CONFIG_RTL8723A) || defined(CONFIG_RTL8723B))
/* Pull up BT reset pin. */
rtw_wifi_gpio_wlan_ctrl(WLAN_BT_PWDN_ON);
#endif
rtw_mdelay_os(5);
sdhci_bus_scan();
#ifdef CONFIG_RTL8723B
/* YJ,test,130305 */
rtw_mdelay_os(1000);
#endif
#ifdef ANDROID_2X
rtw_mdelay_os(200);
#else /* !ANDROID_2X */
if (1) {
int i = 0;
for (i = 0; i <= 50; i++) {
msleep(10);
if (sdhci_device_attached())
break;
printk("%s delay times:%d\n", __func__, i);
}
}
#endif /* !ANDROID_2X */
return ret;
}
void platform_wifi_power_off(void)
{
/* Pull down pwd pin, make wifi enter power down mode. */
rtw_wifi_gpio_wlan_ctrl(WLAN_PWDN_OFF);
rtw_mdelay_os(5);
rtw_wifi_gpio_deinit();
#ifdef CONFIG_RTL8188E
rtw_wifi_gpio_wlan_ctrl(WLAN_POWER_OFF);
#endif /* CONFIG_RTL8188E */
#ifdef CONFIG_WOWLAN
if (mmc_host)
mmc_host->pm_flags &= ~MMC_PM_KEEP_POWER;
#endif /* CONFIG_WOWLAN */
}
| {
"language": "C"
} |
/* For information on usage and redistribution, and for a DISCLAIMER OF ALL
* WARRANTIES, see the file, "LICENSE.txt," in this distribution.
iemlib2 written by Thomas Musil, Copyright (c) IEM KUG Graz Austria 2000 - 2006 */
#include "m_pd.h"
#include "iemlib.h"
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
/* ------------------------ wrap ----------------- */
/* -- leave the fractal part of a float message -- */
typedef struct _wrap
{
t_object x_obj;
t_float x_f;
} t_wrap;
static t_class *wrap_class;
static void wrap_bang(t_wrap *x)
{
outlet_float(x->x_obj.ob_outlet, x->x_f);
}
static void wrap_float(t_wrap *x, t_floatarg f)
{
int i=(int)f;
if(f > 0.0)
x->x_f = f - (t_float)i;
else
x->x_f = f - (t_float)(i - 1);
wrap_bang(x);
}
static void wrap_list(t_wrap *x, t_symbol *s, int argc, t_atom *argv)
{
if((argc > 0) && (IS_A_FLOAT(argv, 0)))
wrap_float(x, atom_getfloat(argv));
}
static void *wrap_new(void)
{
t_wrap *x = (t_wrap *)pd_new(wrap_class);
outlet_new(&x->x_obj, &s_float);
x->x_f = 0.0;
return (x);
}
void wrap_setup(void)
{
wrap_class = class_new(gensym("wrap"), (t_newmethod)wrap_new, 0,
sizeof(t_wrap), 0, 0);
class_addbang(wrap_class, (t_method)wrap_bang);
class_addfloat(wrap_class, (t_method)wrap_float);
class_addlist(wrap_class, (t_method)wrap_list);
// class_sethelpsymbol(wrap_class, gensym("iemhelp/help-wrap"));
}
| {
"language": "C"
} |
/**
* @file
* This is the IPv4 layer implementation for incoming and outgoing IP traffic.
*
* @see ip_frag.c
*
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
*
*/
#include "lwip/opt.h"
#if LWIP_IPV4
#include "lwip/ip.h"
#include "lwip/def.h"
#include "lwip/mem.h"
#include "lwip/ip4_frag.h"
#include "lwip/inet_chksum.h"
#include "lwip/netif.h"
#include "lwip/icmp.h"
#include "lwip/igmp.h"
#include "lwip/raw.h"
#include "lwip/udp.h"
#include "lwip/priv/tcp_priv.h"
#include "lwip/autoip.h"
#include "lwip/stats.h"
#include "lwip/prot/dhcp.h"
#include <string.h>
#ifdef LWIP_HOOK_FILENAME
#include LWIP_HOOK_FILENAME
#endif
/** Set this to 0 in the rare case of wanting to call an extra function to
* generate the IP checksum (in contrast to calculating it on-the-fly). */
#ifndef LWIP_INLINE_IP_CHKSUM
#if LWIP_CHECKSUM_CTRL_PER_NETIF
#define LWIP_INLINE_IP_CHKSUM 0
#else /* LWIP_CHECKSUM_CTRL_PER_NETIF */
#define LWIP_INLINE_IP_CHKSUM 1
#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF */
#endif
#if LWIP_INLINE_IP_CHKSUM && CHECKSUM_GEN_IP
#define CHECKSUM_GEN_IP_INLINE 1
#else
#define CHECKSUM_GEN_IP_INLINE 0
#endif
#if LWIP_DHCP || defined(LWIP_IP_ACCEPT_UDP_PORT)
#define IP_ACCEPT_LINK_LAYER_ADDRESSING 1
/** Some defines for DHCP to let link-layer-addressed packets through while the
* netif is down.
* To use this in your own application/protocol, define LWIP_IP_ACCEPT_UDP_PORT(port)
* to return 1 if the port is accepted and 0 if the port is not accepted.
*/
#if LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT)
/* accept DHCP client port and custom port */
#define IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(port) (((port) == PP_NTOHS(DHCP_CLIENT_PORT)) \
|| (LWIP_IP_ACCEPT_UDP_PORT(port)))
#elif defined(LWIP_IP_ACCEPT_UDP_PORT) /* LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) */
/* accept custom port only */
#define IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(port) (LWIP_IP_ACCEPT_UDP_PORT(port))
#else /* LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) */
/* accept DHCP client port only */
#define IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(port) ((port) == PP_NTOHS(DHCP_CLIENT_PORT))
#endif /* LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) */
#else /* LWIP_DHCP */
#define IP_ACCEPT_LINK_LAYER_ADDRESSING 0
#endif /* LWIP_DHCP */
/** The IP header ID of the next outgoing IP packet */
static u16_t ip_id;
#if LWIP_MULTICAST_TX_OPTIONS
/** The default netif used for multicast */
static struct netif *ip4_default_multicast_netif;
/**
* @ingroup ip4
* Set a default netif for IPv4 multicast. */
void
ip4_set_default_multicast_netif(struct netif *default_multicast_netif)
{
ip4_default_multicast_netif = default_multicast_netif;
}
#endif /* LWIP_MULTICAST_TX_OPTIONS */
#ifdef LWIP_HOOK_IP4_ROUTE_SRC
/**
* Source based IPv4 routing must be fully implemented in
* LWIP_HOOK_IP4_ROUTE_SRC(). This function only provides he parameters.
*/
struct netif *
ip4_route_src(const ip4_addr_t *dest, const ip4_addr_t *src)
{
if (src != NULL)
{
/* when src==NULL, the hook is called from ip4_route(dest) */
struct netif *netif = LWIP_HOOK_IP4_ROUTE_SRC(dest, src);
if (netif != NULL)
{
return netif;
}
}
return ip4_route(dest);
}
#endif /* LWIP_HOOK_IP4_ROUTE_SRC */
/**
* Finds the appropriate network interface for a given IP address. It
* searches the list of network interfaces linearly. A match is found
* if the masked IP address of the network interface equals the masked
* IP address given to the function.
*
* @param dest the destination IP address for which to find the route
* @return the netif on which to send to reach dest
*/
struct netif *
ip4_route(const ip4_addr_t *dest)
{
struct netif *netif;
#if LWIP_MULTICAST_TX_OPTIONS
/* Use administratively selected interface for multicast by default */
if (ip4_addr_ismulticast(dest) && ip4_default_multicast_netif)
{
return ip4_default_multicast_netif;
}
#endif /* LWIP_MULTICAST_TX_OPTIONS */
/* iterate through netifs */
for (netif = netif_list; netif != NULL; netif = netif->next)
{
/* is the netif up, does it have a link and a valid address? */
if (netif_is_up(netif) && netif_is_link_up(netif) && !ip4_addr_isany_val(*netif_ip4_addr(netif)))
{
/* network mask matches? */
if (ip4_addr_netcmp(dest, netif_ip4_addr(netif), netif_ip4_netmask(netif)))
{
/* return netif on which to forward IP packet */
return netif;
}
/* gateway matches on a non broadcast interface? (i.e. peer in a point to point interface) */
if (((netif->flags & NETIF_FLAG_BROADCAST) == 0) && ip4_addr_cmp(dest, netif_ip4_gw(netif)))
{
/* return netif on which to forward IP packet */
return netif;
}
}
}
#if LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF
/* loopif is disabled, looopback traffic is passed through any netif */
if (ip4_addr_isloopback(dest))
{
/* don't check for link on loopback traffic */
if (netif_default != NULL && netif_is_up(netif_default))
{
return netif_default;
}
/* default netif is not up, just use any netif for loopback traffic */
for (netif = netif_list; netif != NULL; netif = netif->next)
{
if (netif_is_up(netif))
{
return netif;
}
}
return NULL;
}
#endif /* LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF */
#ifdef LWIP_HOOK_IP4_ROUTE_SRC
netif = LWIP_HOOK_IP4_ROUTE_SRC(dest, NULL);
if (netif != NULL)
{
return netif;
}
#elif defined(LWIP_HOOK_IP4_ROUTE)
netif = LWIP_HOOK_IP4_ROUTE(dest);
if (netif != NULL)
{
return netif;
}
#endif
if ((netif_default == NULL) || !netif_is_up(netif_default) || !netif_is_link_up(netif_default) ||
ip4_addr_isany_val(*netif_ip4_addr(netif_default)))
{
/* No matching netif found and default netif is not usable.
If this is not good enough for you, use LWIP_HOOK_IP4_ROUTE() */
LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_route: No route to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
ip4_addr1_16(dest), ip4_addr2_16(dest), ip4_addr3_16(dest), ip4_addr4_16(dest)));
IP_STATS_INC(ip.rterr);
MIB2_STATS_INC(mib2.ipoutnoroutes);
return NULL;
}
return netif_default;
}
#if IP_FORWARD
/**
* Determine whether an IP address is in a reserved set of addresses
* that may not be forwarded, or whether datagrams to that destination
* may be forwarded.
* @param p the packet to forward
* @return 1: can forward 0: discard
*/
static int
ip4_canforward(struct pbuf *p)
{
u32_t addr = lwip_htonl(ip4_addr_get_u32(ip4_current_dest_addr()));
if (p->flags & PBUF_FLAG_LLBCAST)
{
/* don't route link-layer broadcasts */
return 0;
}
if ((p->flags & PBUF_FLAG_LLMCAST) && !IP_MULTICAST(addr))
{
/* don't route link-layer multicasts unless the destination address is an IP
multicast address */
return 0;
}
if (IP_EXPERIMENTAL(addr))
{
return 0;
}
if (IP_CLASSA(addr))
{
u32_t net = addr & IP_CLASSA_NET;
if ((net == 0) || (net == ((u32_t)IP_LOOPBACKNET << IP_CLASSA_NSHIFT)))
{
/* don't route loopback packets */
return 0;
}
}
return 1;
}
/**
* Forwards an IP packet. It finds an appropriate route for the
* packet, decrements the TTL value of the packet, adjusts the
* checksum and outputs the packet on the appropriate interface.
*
* @param p the packet to forward (p->payload points to IP header)
* @param iphdr the IP header of the input packet
* @param inp the netif on which this packet was received
*/
static void
ip4_forward(struct pbuf *p, struct ip_hdr *iphdr, struct netif *inp)
{
struct netif *netif;
PERF_START;
LWIP_UNUSED_ARG(inp);
if (!ip4_canforward(p))
{
goto return_noroute;
}
/* RFC3927 2.7: do not forward link-local addresses */
if (ip4_addr_islinklocal(ip4_current_dest_addr()))
{
LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: not forwarding LLA %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
ip4_addr1_16(ip4_current_dest_addr()), ip4_addr2_16(ip4_current_dest_addr()),
ip4_addr3_16(ip4_current_dest_addr()), ip4_addr4_16(ip4_current_dest_addr())));
goto return_noroute;
}
/* Find network interface where to forward this IP packet to. */
netif = ip4_route_src(ip4_current_dest_addr(), ip4_current_src_addr());
if (netif == NULL)
{
LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: no forwarding route for %"U16_F".%"U16_F".%"U16_F".%"U16_F" found\n",
ip4_addr1_16(ip4_current_dest_addr()), ip4_addr2_16(ip4_current_dest_addr()),
ip4_addr3_16(ip4_current_dest_addr()), ip4_addr4_16(ip4_current_dest_addr())));
/* @todo: send ICMP_DUR_NET? */
goto return_noroute;
}
#if !IP_FORWARD_ALLOW_TX_ON_RX_NETIF
/* Do not forward packets onto the same network interface on which
* they arrived. */
if (netif == inp)
{
LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: not bouncing packets back on incoming interface.\n"));
goto return_noroute;
}
#endif /* IP_FORWARD_ALLOW_TX_ON_RX_NETIF */
/* decrement TTL */
IPH_TTL_SET(iphdr, IPH_TTL(iphdr) - 1);
/* send ICMP if TTL == 0 */
if (IPH_TTL(iphdr) == 0)
{
MIB2_STATS_INC(mib2.ipinhdrerrors);
#if LWIP_ICMP
/* Don't send ICMP messages in response to ICMP messages */
if (IPH_PROTO(iphdr) != IP_PROTO_ICMP)
{
icmp_time_exceeded(p, ICMP_TE_TTL);
}
#endif /* LWIP_ICMP */
return;
}
/* Incrementally update the IP checksum. */
if (IPH_CHKSUM(iphdr) >= PP_HTONS(0xffffU - 0x100))
{
IPH_CHKSUM_SET(iphdr, IPH_CHKSUM(iphdr) + PP_HTONS(0x100) + 1);
}
else
{
IPH_CHKSUM_SET(iphdr, IPH_CHKSUM(iphdr) + PP_HTONS(0x100));
}
LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: forwarding packet to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
ip4_addr1_16(ip4_current_dest_addr()), ip4_addr2_16(ip4_current_dest_addr()),
ip4_addr3_16(ip4_current_dest_addr()), ip4_addr4_16(ip4_current_dest_addr())));
IP_STATS_INC(ip.fw);
MIB2_STATS_INC(mib2.ipforwdatagrams);
IP_STATS_INC(ip.xmit);
PERF_STOP("ip4_forward");
/* don't fragment if interface has mtu set to 0 [loopif] */
if (netif->mtu && (p->tot_len > netif->mtu))
{
if ((IPH_OFFSET(iphdr) & PP_NTOHS(IP_DF)) == 0)
{
#if IP_FRAG
ip4_frag(p, netif, ip4_current_dest_addr());
#else /* IP_FRAG */
/* @todo: send ICMP Destination Unreachable code 13 "Communication administratively prohibited"? */
#endif /* IP_FRAG */
}
else
{
#if LWIP_ICMP
/* send ICMP Destination Unreachable code 4: "Fragmentation Needed and DF Set" */
icmp_dest_unreach(p, ICMP_DUR_FRAG);
#endif /* LWIP_ICMP */
}
return;
}
/* transmit pbuf on chosen interface */
netif->output(netif, p, ip4_current_dest_addr());
return;
return_noroute:
MIB2_STATS_INC(mib2.ipoutnoroutes);
}
#endif /* IP_FORWARD */
/**
* This function is called by the network interface device driver when
* an IP packet is received. The function does the basic checks of the
* IP header such as packet size being at least larger than the header
* size etc. If the packet was not destined for us, the packet is
* forwarded (using ip_forward). The IP checksum is always checked.
*
* Finally, the packet is sent to the upper layer protocol input function.
*
* @param p the received IP packet (p->payload points to IP header)
* @param inp the netif on which this packet was received
* @return ERR_OK if the packet was processed (could return ERR_* if it wasn't
* processed, but currently always returns ERR_OK)
*/
err_t
ip4_input(struct pbuf *p, struct netif *inp)
{
struct ip_hdr *iphdr;
struct netif *netif;
u16_t iphdr_hlen;
u16_t iphdr_len;
#if IP_ACCEPT_LINK_LAYER_ADDRESSING || LWIP_IGMP
int check_ip_src = 1;
#endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING || LWIP_IGMP */
IP_STATS_INC(ip.recv);
MIB2_STATS_INC(mib2.ipinreceives);
/* identify the IP header */
iphdr = (struct ip_hdr *)p->payload;
if (IPH_V(iphdr) != 4)
{
LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_WARNING, ("IP packet dropped due to bad version number %"U16_F"\n", (u16_t)IPH_V(iphdr)));
ip4_debug_print(p);
pbuf_free(p);
IP_STATS_INC(ip.err);
IP_STATS_INC(ip.drop);
MIB2_STATS_INC(mib2.ipinhdrerrors);
return ERR_OK;
}
#ifdef LWIP_HOOK_IP4_INPUT
if (LWIP_HOOK_IP4_INPUT(p, inp))
{
/* the packet has been eaten */
return ERR_OK;
}
#endif
/* obtain IP header length in number of 32-bit words */
iphdr_hlen = IPH_HL(iphdr);
/* calculate IP header length in bytes */
iphdr_hlen *= 4;
/* obtain ip length in bytes */
iphdr_len = lwip_ntohs(IPH_LEN(iphdr));
/* Trim pbuf. This is especially required for packets < 60 bytes. */
if (iphdr_len < p->tot_len)
{
pbuf_realloc(p, iphdr_len);
}
/* header length exceeds first pbuf length, or ip length exceeds total pbuf length? */
if ((iphdr_hlen > p->len) || (iphdr_len > p->tot_len) || (iphdr_hlen < IP_HLEN))
{
if (iphdr_hlen < IP_HLEN)
{
LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
("ip4_input: short IP header (%"U16_F" bytes) received, IP packet dropped\n", iphdr_hlen));
}
if (iphdr_hlen > p->len)
{
LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
("IP header (len %"U16_F") does not fit in first pbuf (len %"U16_F"), IP packet dropped.\n",
iphdr_hlen, p->len));
}
if (iphdr_len > p->tot_len)
{
LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
("IP (len %"U16_F") is longer than pbuf (len %"U16_F"), IP packet dropped.\n",
iphdr_len, p->tot_len));
}
/* free (drop) packet pbufs */
pbuf_free(p);
IP_STATS_INC(ip.lenerr);
IP_STATS_INC(ip.drop);
MIB2_STATS_INC(mib2.ipindiscards);
return ERR_OK;
}
/* verify checksum */
#if CHECKSUM_CHECK_IP
IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_IP)
{
if (inet_chksum(iphdr, iphdr_hlen) != 0)
{
LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
("Checksum (0x%"X16_F") failed, IP packet dropped.\n", inet_chksum(iphdr, iphdr_hlen)));
ip4_debug_print(p);
pbuf_free(p);
IP_STATS_INC(ip.chkerr);
IP_STATS_INC(ip.drop);
MIB2_STATS_INC(mib2.ipinhdrerrors);
return ERR_OK;
}
}
#endif
/* copy IP addresses to aligned ip_addr_t */
ip_addr_copy_from_ip4(ip_data.current_iphdr_dest, iphdr->dest);
ip_addr_copy_from_ip4(ip_data.current_iphdr_src, iphdr->src);
/* match packet against an interface, i.e. is this packet for us? */
if (ip4_addr_ismulticast(ip4_current_dest_addr()))
{
#if LWIP_IGMP
if ((inp->flags & NETIF_FLAG_IGMP) && (igmp_lookfor_group(inp, ip4_current_dest_addr())))
{
/* IGMP snooping switches need 0.0.0.0 to be allowed as source address (RFC 4541) */
ip4_addr_t allsystems;
IP4_ADDR(&allsystems, 224, 0, 0, 1);
if (ip4_addr_cmp(ip4_current_dest_addr(), &allsystems) &&
ip4_addr_isany(ip4_current_src_addr()))
{
check_ip_src = 0;
}
netif = inp;
}
else
{
netif = NULL;
}
#else /* LWIP_IGMP */
if ((netif_is_up(inp)) && (!ip4_addr_isany_val(*netif_ip4_addr(inp))))
{
netif = inp;
}
else
{
netif = NULL;
}
#endif /* LWIP_IGMP */
}
else
{
/* start trying with inp. if that's not acceptable, start walking the
list of configured netifs.
'first' is used as a boolean to mark whether we started walking the list */
int first = 1;
netif = inp;
do
{
LWIP_DEBUGF(IP_DEBUG, ("ip_input: iphdr->dest 0x%"X32_F" netif->ip_addr 0x%"X32_F" (0x%"X32_F", 0x%"X32_F", 0x%"X32_F")\n",
ip4_addr_get_u32(&iphdr->dest), ip4_addr_get_u32(netif_ip4_addr(netif)),
ip4_addr_get_u32(&iphdr->dest) & ip4_addr_get_u32(netif_ip4_netmask(netif)),
ip4_addr_get_u32(netif_ip4_addr(netif)) & ip4_addr_get_u32(netif_ip4_netmask(netif)),
ip4_addr_get_u32(&iphdr->dest) & ~ip4_addr_get_u32(netif_ip4_netmask(netif))));
/* interface is up and configured? */
if ((netif_is_up(netif)) && (!ip4_addr_isany_val(*netif_ip4_addr(netif))))
{
/* unicast to this interface address? */
if (ip4_addr_cmp(ip4_current_dest_addr(), netif_ip4_addr(netif)) ||
/* or broadcast on this interface network address? */
ip4_addr_isbroadcast(ip4_current_dest_addr(), netif)
#if LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF
|| (ip4_addr_get_u32(ip4_current_dest_addr()) == PP_HTONL(IPADDR_LOOPBACK))
#endif /* LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF */
)
{
LWIP_DEBUGF(IP_DEBUG, ("ip4_input: packet accepted on interface %c%c\n",
netif->name[0], netif->name[1]));
/* break out of for loop */
break;
}
#if LWIP_AUTOIP
/* connections to link-local addresses must persist after changing
the netif's address (RFC3927 ch. 1.9) */
if (autoip_accept_packet(netif, ip4_current_dest_addr()))
{
LWIP_DEBUGF(IP_DEBUG, ("ip4_input: LLA packet accepted on interface %c%c\n",
netif->name[0], netif->name[1]));
/* break out of for loop */
break;
}
#endif /* LWIP_AUTOIP */
}
if (first)
{
#if !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF
/* Packets sent to the loopback address must not be accepted on an
* interface that does not have the loopback address assigned to it,
* unless a non-loopback interface is used for loopback traffic. */
if (ip4_addr_isloopback(ip4_current_dest_addr()))
{
netif = NULL;
break;
}
#endif /* !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF */
first = 0;
netif = netif_list;
}
else
{
netif = netif->next;
}
if (netif == inp)
{
netif = netif->next;
}
}
while (netif != NULL);
}
#if IP_ACCEPT_LINK_LAYER_ADDRESSING
/* Pass DHCP messages regardless of destination address. DHCP traffic is addressed
* using link layer addressing (such as Ethernet MAC) so we must not filter on IP.
* According to RFC 1542 section 3.1.1, referred by RFC 2131).
*
* If you want to accept private broadcast communication while a netif is down,
* define LWIP_IP_ACCEPT_UDP_PORT(dst_port), e.g.:
*
* #define LWIP_IP_ACCEPT_UDP_PORT(dst_port) ((dst_port) == PP_NTOHS(12345))
*/
if (netif == NULL)
{
/* remote port is DHCP server? */
if (IPH_PROTO(iphdr) == IP_PROTO_UDP)
{
struct udp_hdr *udphdr = (struct udp_hdr *)((u8_t *)iphdr + iphdr_hlen);
LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE, ("ip4_input: UDP packet to DHCP client port %"U16_F"\n",
lwip_ntohs(udphdr->dest)));
if (IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(udphdr->dest))
{
LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE, ("ip4_input: DHCP packet accepted.\n"));
netif = inp;
check_ip_src = 0;
}
}
}
#endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING */
/* broadcast or multicast packet source address? Compliant with RFC 1122: 3.2.1.3 */
#if LWIP_IGMP || IP_ACCEPT_LINK_LAYER_ADDRESSING
if (check_ip_src
#if IP_ACCEPT_LINK_LAYER_ADDRESSING
/* DHCP servers need 0.0.0.0 to be allowed as source address (RFC 1.1.2.2: 3.2.1.3/a) */
&& !ip4_addr_isany_val(*ip4_current_src_addr())
#endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING */
)
#endif /* LWIP_IGMP || IP_ACCEPT_LINK_LAYER_ADDRESSING */
{
if ((ip4_addr_isbroadcast(ip4_current_src_addr(), inp)) ||
(ip4_addr_ismulticast(ip4_current_src_addr())))
{
/* packet source is not valid */
LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("ip4_input: packet source is not valid.\n"));
/* free (drop) packet pbufs */
pbuf_free(p);
IP_STATS_INC(ip.drop);
MIB2_STATS_INC(mib2.ipinaddrerrors);
MIB2_STATS_INC(mib2.ipindiscards);
return ERR_OK;
}
}
/* packet not for us? */
if (netif == NULL)
{
/* packet not for us, route or discard */
LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE, ("ip4_input: packet not for us.\n"));
#if IP_FORWARD
/* non-broadcast packet? */
if (!ip4_addr_isbroadcast(ip4_current_dest_addr(), inp))
{
/* try to forward IP packet on (other) interfaces */
ip4_forward(p, iphdr, inp);
}
else
#endif /* IP_FORWARD */
{
IP_STATS_INC(ip.drop);
MIB2_STATS_INC(mib2.ipinaddrerrors);
MIB2_STATS_INC(mib2.ipindiscards);
}
pbuf_free(p);
return ERR_OK;
}
/* packet consists of multiple fragments? */
if ((IPH_OFFSET(iphdr) & PP_HTONS(IP_OFFMASK | IP_MF)) != 0)
{
#if IP_REASSEMBLY /* packet fragment reassembly code present? */
LWIP_DEBUGF(IP_DEBUG, ("IP packet is a fragment (id=0x%04"X16_F" tot_len=%"U16_F" len=%"U16_F" MF=%"U16_F" offset=%"U16_F"), calling ip4_reass()\n",
lwip_ntohs(IPH_ID(iphdr)), p->tot_len, lwip_ntohs(IPH_LEN(iphdr)), (u16_t)!!(IPH_OFFSET(iphdr) & PP_HTONS(IP_MF)), (u16_t)((lwip_ntohs(IPH_OFFSET(iphdr)) & IP_OFFMASK) * 8)));
/* reassemble the packet*/
p = ip4_reass(p);
/* packet not fully reassembled yet? */
if (p == NULL)
{
return ERR_OK;
}
iphdr = (struct ip_hdr *)p->payload;
#else /* IP_REASSEMBLY == 0, no packet fragment reassembly code present */
pbuf_free(p);
LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("IP packet dropped since it was fragmented (0x%"X16_F") (while IP_REASSEMBLY == 0).\n",
lwip_ntohs(IPH_OFFSET(iphdr))));
IP_STATS_INC(ip.opterr);
IP_STATS_INC(ip.drop);
/* unsupported protocol feature */
MIB2_STATS_INC(mib2.ipinunknownprotos);
return ERR_OK;
#endif /* IP_REASSEMBLY */
}
#if IP_OPTIONS_ALLOWED == 0 /* no support for IP options in the IP header? */
#if LWIP_IGMP
/* there is an extra "router alert" option in IGMP messages which we allow for but do not police */
if ((iphdr_hlen > IP_HLEN) && (IPH_PROTO(iphdr) != IP_PROTO_IGMP))
{
#else
if (iphdr_hlen > IP_HLEN)
{
#endif /* LWIP_IGMP */
LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("IP packet dropped since there were IP options (while IP_OPTIONS_ALLOWED == 0).\n"));
pbuf_free(p);
IP_STATS_INC(ip.opterr);
IP_STATS_INC(ip.drop);
/* unsupported protocol feature */
MIB2_STATS_INC(mib2.ipinunknownprotos);
return ERR_OK;
}
#endif /* IP_OPTIONS_ALLOWED == 0 */
/* send to upper layers */
LWIP_DEBUGF(IP_DEBUG, ("ip4_input: \n"));
ip4_debug_print(p);
LWIP_DEBUGF(IP_DEBUG, ("ip4_input: p->len %"U16_F" p->tot_len %"U16_F"\n", p->len, p->tot_len));
ip_data.current_netif = netif;
ip_data.current_input_netif = inp;
ip_data.current_ip4_header = iphdr;
ip_data.current_ip_header_tot_len = IPH_HL(iphdr) * 4;
#if LWIP_RAW
/* raw input did not eat the packet? */
if (raw_input(p, inp) == 0)
#endif /* LWIP_RAW */
{
pbuf_header(p, -(s16_t)iphdr_hlen); /* Move to payload, no check necessary. */
switch (IPH_PROTO(iphdr))
{
#if LWIP_UDP
case IP_PROTO_UDP:
#if LWIP_UDPLITE
case IP_PROTO_UDPLITE:
#endif /* LWIP_UDPLITE */
MIB2_STATS_INC(mib2.ipindelivers);
udp_input(p, inp);
break;
#endif /* LWIP_UDP */
#if LWIP_TCP
case IP_PROTO_TCP:
MIB2_STATS_INC(mib2.ipindelivers);
tcp_input(p, inp);
break;
#endif /* LWIP_TCP */
#if LWIP_ICMP
case IP_PROTO_ICMP:
MIB2_STATS_INC(mib2.ipindelivers);
icmp_input(p, inp);
break;
#endif /* LWIP_ICMP */
#if LWIP_IGMP
case IP_PROTO_IGMP:
igmp_input(p, inp, ip4_current_dest_addr());
break;
#endif /* LWIP_IGMP */
default:
#if LWIP_ICMP
/* send ICMP destination protocol unreachable unless is was a broadcast */
if (!ip4_addr_isbroadcast(ip4_current_dest_addr(), netif) &&
!ip4_addr_ismulticast(ip4_current_dest_addr()))
{
pbuf_header_force(p, iphdr_hlen); /* Move to ip header, no check necessary. */
p->payload = iphdr;
icmp_dest_unreach(p, ICMP_DUR_PROTO);
}
#endif /* LWIP_ICMP */
pbuf_free(p);
LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("Unsupported transport protocol %"U16_F"\n", (u16_t)IPH_PROTO(iphdr)));
IP_STATS_INC(ip.proterr);
IP_STATS_INC(ip.drop);
MIB2_STATS_INC(mib2.ipinunknownprotos);
}
}
/* @todo: this is not really necessary... */
ip_data.current_netif = NULL;
ip_data.current_input_netif = NULL;
ip_data.current_ip4_header = NULL;
ip_data.current_ip_header_tot_len = 0;
ip4_addr_set_any(ip4_current_src_addr());
ip4_addr_set_any(ip4_current_dest_addr());
return ERR_OK;
}
/**
* Sends an IP packet on a network interface. This function constructs
* the IP header and calculates the IP header checksum. If the source
* IP address is NULL, the IP address of the outgoing network
* interface is filled in as source address.
* If the destination IP address is LWIP_IP_HDRINCL, p is assumed to already
* include an IP header and p->payload points to it instead of the data.
*
* @param p the packet to send (p->payload points to the data, e.g. next
protocol header; if dest == LWIP_IP_HDRINCL, p already includes an
IP header and p->payload points to that IP header)
* @param src the source IP address to send from (if src == IP4_ADDR_ANY, the
* IP address of the netif used to send is used as source address)
* @param dest the destination IP address to send the packet to
* @param ttl the TTL value to be set in the IP header
* @param tos the TOS value to be set in the IP header
* @param proto the PROTOCOL to be set in the IP header
* @param netif the netif on which to send this packet
* @return ERR_OK if the packet was sent OK
* ERR_BUF if p doesn't have enough space for IP/LINK headers
* returns errors returned by netif->output
*
* @note ip_id: RFC791 "some host may be able to simply use
* unique identifiers independent of destination"
*/
err_t
ip4_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest,
u8_t ttl, u8_t tos,
u8_t proto, struct netif *netif)
{
#if IP_OPTIONS_SEND
return ip4_output_if_opt(p, src, dest, ttl, tos, proto, netif, NULL, 0);
}
/**
* Same as ip_output_if() but with the possibility to include IP options:
*
* @ param ip_options pointer to the IP options, copied into the IP header
* @ param optlen length of ip_options
*/
err_t
ip4_output_if_opt(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest,
u8_t ttl, u8_t tos, u8_t proto, struct netif *netif, void *ip_options,
u16_t optlen)
{
#endif /* IP_OPTIONS_SEND */
const ip4_addr_t *src_used = src;
if (dest != LWIP_IP_HDRINCL)
{
if (ip4_addr_isany(src))
{
src_used = netif_ip4_addr(netif);
}
}
#if IP_OPTIONS_SEND
return ip4_output_if_opt_src(p, src_used, dest, ttl, tos, proto, netif,
ip_options, optlen);
#else /* IP_OPTIONS_SEND */
return ip4_output_if_src(p, src_used, dest, ttl, tos, proto, netif);
#endif /* IP_OPTIONS_SEND */
}
/**
* Same as ip_output_if() but 'src' address is not replaced by netif address
* when it is 'any'.
*/
err_t
ip4_output_if_src(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest,
u8_t ttl, u8_t tos,
u8_t proto, struct netif *netif)
{
#if IP_OPTIONS_SEND
return ip4_output_if_opt_src(p, src, dest, ttl, tos, proto, netif, NULL, 0);
}
/**
* Same as ip_output_if_opt() but 'src' address is not replaced by netif address
* when it is 'any'.
*/
err_t
ip4_output_if_opt_src(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest,
u8_t ttl, u8_t tos, u8_t proto, struct netif *netif, void *ip_options,
u16_t optlen)
{
#endif /* IP_OPTIONS_SEND */
struct ip_hdr *iphdr;
ip4_addr_t dest_addr;
#if CHECKSUM_GEN_IP_INLINE
u32_t chk_sum = 0;
#endif /* CHECKSUM_GEN_IP_INLINE */
LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p);
MIB2_STATS_INC(mib2.ipoutrequests);
/* Should the IP header be generated or is it already included in p? */
if (dest != LWIP_IP_HDRINCL)
{
u16_t ip_hlen = IP_HLEN;
#if IP_OPTIONS_SEND
u16_t optlen_aligned = 0;
if (optlen != 0)
{
#if CHECKSUM_GEN_IP_INLINE
int i;
#endif /* CHECKSUM_GEN_IP_INLINE */
/* round up to a multiple of 4 */
optlen_aligned = ((optlen + 3) & ~3);
ip_hlen += optlen_aligned;
/* First write in the IP options */
if (pbuf_header(p, optlen_aligned))
{
LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_output_if_opt: not enough room for IP options in pbuf\n"));
IP_STATS_INC(ip.err);
MIB2_STATS_INC(mib2.ipoutdiscards);
return ERR_BUF;
}
MEMCPY(p->payload, ip_options, optlen);
if (optlen < optlen_aligned)
{
/* zero the remaining bytes */
memset(((char *)p->payload) + optlen, 0, optlen_aligned - optlen);
}
#if CHECKSUM_GEN_IP_INLINE
for (i = 0; i < optlen_aligned / 2; i++)
{
chk_sum += ((u16_t *)p->payload)[i];
}
#endif /* CHECKSUM_GEN_IP_INLINE */
}
#endif /* IP_OPTIONS_SEND */
/* generate IP header */
if (pbuf_header(p, IP_HLEN))
{
LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_output: not enough room for IP header in pbuf\n"));
IP_STATS_INC(ip.err);
MIB2_STATS_INC(mib2.ipoutdiscards);
return ERR_BUF;
}
iphdr = (struct ip_hdr *)p->payload;
LWIP_ASSERT("check that first pbuf can hold struct ip_hdr",
(p->len >= sizeof(struct ip_hdr)));
IPH_TTL_SET(iphdr, ttl);
IPH_PROTO_SET(iphdr, proto);
#if CHECKSUM_GEN_IP_INLINE
chk_sum += PP_NTOHS(proto | (ttl << 8));
#endif /* CHECKSUM_GEN_IP_INLINE */
/* dest cannot be NULL here */
ip4_addr_copy(iphdr->dest, *dest);
#if CHECKSUM_GEN_IP_INLINE
chk_sum += ip4_addr_get_u32(&iphdr->dest) & 0xFFFF;
chk_sum += ip4_addr_get_u32(&iphdr->dest) >> 16;
#endif /* CHECKSUM_GEN_IP_INLINE */
IPH_VHL_SET(iphdr, 4, ip_hlen / 4);
IPH_TOS_SET(iphdr, tos);
#if CHECKSUM_GEN_IP_INLINE
chk_sum += PP_NTOHS(tos | (iphdr->_v_hl << 8));
#endif /* CHECKSUM_GEN_IP_INLINE */
IPH_LEN_SET(iphdr, lwip_htons(p->tot_len));
#if CHECKSUM_GEN_IP_INLINE
chk_sum += iphdr->_len;
#endif /* CHECKSUM_GEN_IP_INLINE */
IPH_OFFSET_SET(iphdr, 0);
IPH_ID_SET(iphdr, lwip_htons(ip_id));
#if CHECKSUM_GEN_IP_INLINE
chk_sum += iphdr->_id;
#endif /* CHECKSUM_GEN_IP_INLINE */
++ip_id;
if (src == NULL)
{
ip4_addr_copy(iphdr->src, *IP4_ADDR_ANY4);
}
else
{
/* src cannot be NULL here */
ip4_addr_copy(iphdr->src, *src);
}
#if CHECKSUM_GEN_IP_INLINE
chk_sum += ip4_addr_get_u32(&iphdr->src) & 0xFFFF;
chk_sum += ip4_addr_get_u32(&iphdr->src) >> 16;
chk_sum = (chk_sum >> 16) + (chk_sum & 0xFFFF);
chk_sum = (chk_sum >> 16) + chk_sum;
chk_sum = ~chk_sum;
IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_IP)
{
iphdr->_chksum = (u16_t)chk_sum; /* network order */
}
#if LWIP_CHECKSUM_CTRL_PER_NETIF
else
{
IPH_CHKSUM_SET(iphdr, 0);
}
#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF*/
#else /* CHECKSUM_GEN_IP_INLINE */
IPH_CHKSUM_SET(iphdr, 0);
#if CHECKSUM_GEN_IP
IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_IP)
{
IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, ip_hlen));
}
#endif /* CHECKSUM_GEN_IP */
#endif /* CHECKSUM_GEN_IP_INLINE */
}
else
{
/* IP header already included in p */
iphdr = (struct ip_hdr *)p->payload;
ip4_addr_copy(dest_addr, iphdr->dest);
dest = &dest_addr;
}
IP_STATS_INC(ip.xmit);
LWIP_DEBUGF(IP_DEBUG, ("ip4_output_if: %c%c%"U16_F"\n", netif->name[0], netif->name[1], (u16_t)netif->num));
ip4_debug_print(p);
#if ENABLE_LOOPBACK
if (ip4_addr_cmp(dest, netif_ip4_addr(netif))
#if !LWIP_HAVE_LOOPIF
|| ip4_addr_isloopback(dest)
#endif /* !LWIP_HAVE_LOOPIF */
)
{
/* Packet to self, enqueue it for loopback */
LWIP_DEBUGF(IP_DEBUG, ("netif_loop_output()"));
return netif_loop_output(netif, p);
}
#if LWIP_MULTICAST_TX_OPTIONS
if ((p->flags & PBUF_FLAG_MCASTLOOP) != 0)
{
netif_loop_output(netif, p);
}
#endif /* LWIP_MULTICAST_TX_OPTIONS */
#endif /* ENABLE_LOOPBACK */
#if IP_FRAG
/* don't fragment if interface has mtu set to 0 [loopif] */
if (netif->mtu && (p->tot_len > netif->mtu))
{
return ip4_frag(p, netif, dest);
}
#endif /* IP_FRAG */
LWIP_DEBUGF(IP_DEBUG, ("ip4_output_if: call netif->output()\n"));
return netif->output(netif, p, dest);
}
/**
* Simple interface to ip_output_if. It finds the outgoing network
* interface and calls upon ip_output_if to do the actual work.
*
* @param p the packet to send (p->payload points to the data, e.g. next
protocol header; if dest == LWIP_IP_HDRINCL, p already includes an
IP header and p->payload points to that IP header)
* @param src the source IP address to send from (if src == IP4_ADDR_ANY, the
* IP address of the netif used to send is used as source address)
* @param dest the destination IP address to send the packet to
* @param ttl the TTL value to be set in the IP header
* @param tos the TOS value to be set in the IP header
* @param proto the PROTOCOL to be set in the IP header
*
* @return ERR_RTE if no route is found
* see ip_output_if() for more return values
*/
err_t
ip4_output(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest,
u8_t ttl, u8_t tos, u8_t proto)
{
struct netif *netif;
LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p);
if ((netif = ip4_route_src(dest, src)) == NULL)
{
LWIP_DEBUGF(IP_DEBUG, ("ip4_output: No route to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
ip4_addr1_16(dest), ip4_addr2_16(dest), ip4_addr3_16(dest), ip4_addr4_16(dest)));
IP_STATS_INC(ip.rterr);
return ERR_RTE;
}
return ip4_output_if(p, src, dest, ttl, tos, proto, netif);
}
#if LWIP_NETIF_HWADDRHINT
/** Like ip_output, but takes and addr_hint pointer that is passed on to netif->addr_hint
* before calling ip_output_if.
*
* @param p the packet to send (p->payload points to the data, e.g. next
protocol header; if dest == LWIP_IP_HDRINCL, p already includes an
IP header and p->payload points to that IP header)
* @param src the source IP address to send from (if src == IP4_ADDR_ANY, the
* IP address of the netif used to send is used as source address)
* @param dest the destination IP address to send the packet to
* @param ttl the TTL value to be set in the IP header
* @param tos the TOS value to be set in the IP header
* @param proto the PROTOCOL to be set in the IP header
* @param addr_hint address hint pointer set to netif->addr_hint before
* calling ip_output_if()
*
* @return ERR_RTE if no route is found
* see ip_output_if() for more return values
*/
err_t
ip4_output_hinted(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest,
u8_t ttl, u8_t tos, u8_t proto, u8_t *addr_hint)
{
struct netif *netif;
err_t err;
LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p);
if ((netif = ip4_route_src(dest, src)) == NULL)
{
LWIP_DEBUGF(IP_DEBUG, ("ip4_output: No route to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
ip4_addr1_16(dest), ip4_addr2_16(dest), ip4_addr3_16(dest), ip4_addr4_16(dest)));
IP_STATS_INC(ip.rterr);
return ERR_RTE;
}
NETIF_SET_HWADDRHINT(netif, addr_hint);
err = ip4_output_if(p, src, dest, ttl, tos, proto, netif);
NETIF_SET_HWADDRHINT(netif, NULL);
return err;
}
#endif /* LWIP_NETIF_HWADDRHINT*/
#if IP_DEBUG
/* Print an IP header by using LWIP_DEBUGF
* @param p an IP packet, p->payload pointing to the IP header
*/
void
ip4_debug_print(struct pbuf *p)
{
struct ip_hdr *iphdr = (struct ip_hdr *)p->payload;
LWIP_DEBUGF(IP_DEBUG, ("IP header:\n"));
LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n"));
LWIP_DEBUGF(IP_DEBUG, ("|%2"S16_F" |%2"S16_F" | 0x%02"X16_F" | %5"U16_F" | (v, hl, tos, len)\n",
(u16_t)IPH_V(iphdr),
(u16_t)IPH_HL(iphdr),
(u16_t)IPH_TOS(iphdr),
lwip_ntohs(IPH_LEN(iphdr))));
LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n"));
LWIP_DEBUGF(IP_DEBUG, ("| %5"U16_F" |%"U16_F"%"U16_F"%"U16_F"| %4"U16_F" | (id, flags, offset)\n",
lwip_ntohs(IPH_ID(iphdr)),
(u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) >> 15 & 1),
(u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) >> 14 & 1),
(u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) >> 13 & 1),
(u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) & IP_OFFMASK)));
LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n"));
LWIP_DEBUGF(IP_DEBUG, ("| %3"U16_F" | %3"U16_F" | 0x%04"X16_F" | (ttl, proto, chksum)\n",
(u16_t)IPH_TTL(iphdr),
(u16_t)IPH_PROTO(iphdr),
lwip_ntohs(IPH_CHKSUM(iphdr))));
LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n"));
LWIP_DEBUGF(IP_DEBUG, ("| %3"U16_F" | %3"U16_F" | %3"U16_F" | %3"U16_F" | (src)\n",
ip4_addr1_16(&iphdr->src),
ip4_addr2_16(&iphdr->src),
ip4_addr3_16(&iphdr->src),
ip4_addr4_16(&iphdr->src)));
LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n"));
LWIP_DEBUGF(IP_DEBUG, ("| %3"U16_F" | %3"U16_F" | %3"U16_F" | %3"U16_F" | (dest)\n",
ip4_addr1_16(&iphdr->dest),
ip4_addr2_16(&iphdr->dest),
ip4_addr3_16(&iphdr->dest),
ip4_addr4_16(&iphdr->dest)));
LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n"));
}
#endif /* IP_DEBUG */
#endif /* LWIP_IPV4 */
| {
"language": "C"
} |
--- a/arch/mips/bcm63xx/boards/board_bcm963xx.c
+++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c
@@ -1619,6 +1619,59 @@ static struct board_info __initdata boar
.has_ehci0 = 1,
};
+static struct sprom_fixup __initdata vr3025u_fixups[] = {
+ { .offset = 97, .value = 0xfeb3 },
+ { .offset = 98, .value = 0x1618 },
+ { .offset = 99, .value = 0xfab0 },
+ { .offset = 113, .value = 0xfed1 },
+ { .offset = 114, .value = 0x1609 },
+ { .offset = 115, .value = 0xfad9 },
+};
+
+static struct board_info __initdata board_VR3025u = {
+ .name = "96368M-1541N",
+ .expected_cpu_id = 0x6368,
+
+ .has_pci = 1,
+ .use_fallback_sprom = 1,
+ .has_ohci0 = 1,
+ .has_ehci0 = 1,
+
+ .has_enetsw = 1,
+ .enetsw = {
+ .used_ports = {
+ [0] = {
+ .used = 1,
+ .phy_id = 1,
+ .name = "port1",
+ },
+ [1] = {
+ .used = 1,
+ .phy_id = 2,
+ .name = "port2",
+ },
+ [2] = {
+ .used = 1,
+ .phy_id = 3,
+ .name = "port3",
+ },
+ [3] = {
+ .used = 1,
+ .phy_id = 4,
+ .name = "port4",
+ },
+ },
+ },
+
+ .fallback_sprom = {
+ .type = SPROM_BCM43222,
+ .pci_bus = 0,
+ .pci_dev = 1,
+ .board_fixups = vr3025u_fixups,
+ .num_board_fixups = ARRAY_SIZE(vr3025u_fixups),
+ },
+};
+
static struct sprom_fixup __initdata wap5813n_fixups[] = {
{ .offset = 97, .value = 0xfeed },
{ .offset = 98, .value = 0x15d1 },
@@ -1889,6 +1942,7 @@ static const struct board_info __initcon
#ifdef CONFIG_BCM63XX_CPU_6368
&board_96368mvwg,
&board_96368mvngr,
+ &board_VR3025u,
&board_WAP5813n,
#endif
#ifdef CONFIG_BCM63XX_CPU_63268
@@ -1982,6 +2036,7 @@ static struct of_device_id const bcm963x
#ifdef CONFIG_BCM63XX_CPU_6368
{ .compatible = "brcm,bcm96368mvngr", .data = &board_96368mvngr, },
{ .compatible = "brcm,bcm96368mvwg", .data = &board_96368mvwg, },
+ { .compatible = "comtrend,vr-3025u", .data = &board_VR3025u, },
{ .compatible = "comtrend,wap-5813n", .data = &board_WAP5813n, },
#endif
#ifdef CONFIG_BCM63XX_CPU_63268
| {
"language": "C"
} |
/*
* Copyright (C) 1999-2000 Harri Porten (porten@kde.org)
* Copyright (C) 2008 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
#ifndef NumberPrototype_h
#define NumberPrototype_h
#include "NumberObject.h"
namespace JSC {
class NumberPrototype : public NumberObject {
public:
NumberPrototype(ExecState*, JSGlobalObject*, Structure*, Structure* functionStructure);
};
} // namespace JSC
#endif // NumberPrototype_h
| {
"language": "C"
} |
/* System dependent definitions for run-time dynamic loading.
Copyright (C) 1996, 1997, 1999, 2000, 2001, 2004
Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
#ifndef _DLFCN_H
# error "Never use <bits/dlfcn.h> directly; include <dlfcn.h> instead."
#endif
/* The MODE argument to `dlopen' contains one of the following: */
#define RTLD_LAZY 0x0001 /* Lazy function call binding. */
#define RTLD_NOW 0x0002 /* Immediate function call binding. */
#define RTLD_BINDING_MASK 0x3 /* Mask of binding time value. */
#define RTLD_NOLOAD 0x00008 /* Do not load the object. */
#define RTLD_DEEPBIND 0x00010 /* Use deep binding. */
/* If the following bit is set in the MODE argument to `dlopen',
the symbols of the loaded object and its dependencies are made
visible as if the object were linked directly into the program. */
#define RTLD_GLOBAL 0x0004
/* Unix98 demands the following flag which is the inverse to RTLD_GLOBAL.
The implementation does this by default and so we can define the
value to zero. */
#define RTLD_LOCAL 0
/* Do not delete object when closed. */
#define RTLD_NODELETE 0x01000
#if 0 /*def __USE_GNU*/
/* To support profiling of shared objects it is a good idea to call
the function found using `dlsym' using the following macro since
these calls do not use the PLT. But this would mean the dynamic
loader has no chance to find out when the function is called. The
macro applies the necessary magic so that profiling is possible.
Rewrite
foo = (*fctp) (arg1, arg2);
into
foo = DL_CALL_FCT (fctp, (arg1, arg2));
*/
# define DL_CALL_FCT(fctp, args) \
(_dl_mcount_wrapper_check ((void *) (fctp)), (*(fctp)) args)
__BEGIN_DECLS
/* This function calls the profiling functions. */
extern void _dl_mcount_wrapper_check (void *__selfpc) __THROW;
__END_DECLS
#endif
| {
"language": "C"
} |
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* This Software, including technical data, may be subject to U.S. export
* control laws, including the U.S. Export Administration Act and its
* associated regulations, and may be subject to export or import
* regulations in other countries.
*
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
#include <bdk.h>
#include "libbdk-arch/bdk-csrs-gpio.h"
#include "libbdk-hal/bdk-gpio.h"
/* This code is an optional part of the BDK. It is only linked in
if BDK_REQUIRE() needs it */
BDK_REQUIRE_DEFINE(GPIO);
/**
* Initialize a single GPIO as either an input or output. If it is
* an output, also set its output value.
*
* @param gpio GPIO to initialize
* @param is_output Non zero if this GPIO should be an output
* @param output_value
* Value of the GPIO if it should be an output. Not used if the
* GPIO isn't an output.
*
* @return Zero on success, negative ob failure
*/
int bdk_gpio_initialize(bdk_node_t node, int gpio, int is_output, int output_value)
{
if ((gpio >= 0) && (gpio < bdk_gpio_get_num()))
{
int gpio_group = gpio >> 6;
int gpio_index = gpio & 63;
if (output_value)
bdk_gpio_set(node, gpio_group, 1ull << gpio_index);
else
bdk_gpio_clear(node, gpio_group, 1ull << gpio_index);
BDK_CSR_DEFINE(cfg, BDK_GPIO_BIT_CFGX(gpio));
cfg.u = 0;
cfg.s.tx_oe = !!is_output;
BDK_CSR_WRITE(node, BDK_GPIO_BIT_CFGX(gpio), cfg.u);
}
else
{
bdk_error("bdk_gpio_initialize: Illegal GPIO\n");
return -1;
}
return 0;
}
/**
* GPIO Read Data
*
* @param node Node GPIO block is on
* @param gpio_block GPIO block to access. Each block contains up to 64 GPIOs
*
* @return Status of the GPIO pins for the given block
*/
uint64_t bdk_gpio_read(bdk_node_t node, int gpio_block)
{
bdk_gpio_rx_dat_t gpio_rx_dat;
switch (gpio_block)
{
case 0:
gpio_rx_dat.u = BDK_CSR_READ(node, BDK_GPIO_RX_DAT);
break;
case 1:
gpio_rx_dat.u = BDK_CSR_READ(node, BDK_GPIO_RX1_DAT);
break;
default:
bdk_error("GPIO block %d not supported\n", gpio_block);
gpio_rx_dat.u = 0;
break;
}
return gpio_rx_dat.s.dat;
}
/**
* GPIO Clear pin
*
* @param node Node GPIO block is on
* @param gpio_block GPIO block to access. Each block contains up to 64 GPIOs
* @param clear_mask Bit mask to indicate which bits to drive to '0'.
*/
void bdk_gpio_clear(bdk_node_t node, int gpio_block, uint64_t clear_mask)
{
switch (gpio_block)
{
case 0:
BDK_CSR_WRITE(node, BDK_GPIO_TX_CLR, clear_mask);
break;
case 1:
BDK_CSR_WRITE(node, BDK_GPIO_TX1_CLR, clear_mask);
break;
default:
bdk_error("GPIO block %d not supported\n", gpio_block);
break;
}
}
/**
* GPIO Set pin
*
* @param node Node GPIO block is on
* @param gpio_block GPIO block to access. Each block contains up to 64 GPIOs
* @param set_mask Bit mask to indicate which bits to drive to '1'.
*/
void bdk_gpio_set(bdk_node_t node, int gpio_block, uint64_t set_mask)
{
switch (gpio_block)
{
case 0:
BDK_CSR_WRITE(node, BDK_GPIO_TX_SET, set_mask);
break;
case 1:
BDK_CSR_WRITE(node, BDK_GPIO_TX1_SET, set_mask);
break;
default:
bdk_error("GPIO block %d not supported\n", gpio_block);
break;
}
}
/** GPIO Select pin
*
* @param node CPU node
* @param gpio GPIO number
* @param pin Pin number
*/
void bdk_gpio_select_pin(bdk_node_t node, int gpio, int pin)
{
if ((gpio < 0) || (gpio >= bdk_gpio_get_num()))
{
bdk_warn("bdk_gpio_select_pin: Illegal GPIO %d\n", gpio);
return;
}
BDK_CSR_MODIFY(c, node, BDK_GPIO_BIT_CFGX(gpio), c.s.pin_sel = pin);
}
/**
* Return the number of GPIO pins on this chip
*
* @return Number of GPIO pins
*/
int bdk_gpio_get_num(void)
{
if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
return 51;
else if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
return 48;
else if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
return 80;
else if (CAVIUM_IS_MODEL(CAVIUM_CN93XX))
return 96;
else
{
bdk_error("bdk_gpio_get_num(): Unsupported chip");
return 0;
}
}
| {
"language": "C"
} |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (c) 2018 Facebook */
#include <ctype.h>
#include <stdio.h> /* for (FILE *) used by json_writer */
#include <string.h>
#include <unistd.h>
#include <asm/byteorder.h>
#include <linux/bitops.h>
#include <linux/btf.h>
#include <linux/err.h>
#include <bpf/btf.h>
#include <bpf/bpf.h>
#include "json_writer.h"
#include "main.h"
#define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
#define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
#define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
#define BITS_ROUNDUP_BYTES(bits) \
(BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id,
__u8 bit_offset, const void *data);
static int btf_dump_func(const struct btf *btf, char *func_sig,
const struct btf_type *func_proto,
const struct btf_type *func, int pos, int size);
static int dump_prog_id_as_func_ptr(const struct btf_dumper *d,
const struct btf_type *func_proto,
__u32 prog_id)
{
struct bpf_prog_info_linear *prog_info = NULL;
const struct btf_type *func_type;
const char *prog_name = NULL;
struct bpf_func_info *finfo;
struct btf *prog_btf = NULL;
struct bpf_prog_info *info;
int prog_fd, func_sig_len;
char prog_str[1024];
/* Get the ptr's func_proto */
func_sig_len = btf_dump_func(d->btf, prog_str, func_proto, NULL, 0,
sizeof(prog_str));
if (func_sig_len == -1)
return -1;
if (!prog_id)
goto print;
/* Get the bpf_prog's name. Obtain from func_info. */
prog_fd = bpf_prog_get_fd_by_id(prog_id);
if (prog_fd == -1)
goto print;
prog_info = bpf_program__get_prog_info_linear(prog_fd,
1UL << BPF_PROG_INFO_FUNC_INFO);
close(prog_fd);
if (IS_ERR(prog_info)) {
prog_info = NULL;
goto print;
}
info = &prog_info->info;
if (!info->btf_id || !info->nr_func_info ||
btf__get_from_id(info->btf_id, &prog_btf))
goto print;
finfo = u64_to_ptr(info->func_info);
func_type = btf__type_by_id(prog_btf, finfo->type_id);
if (!func_type || !btf_is_func(func_type))
goto print;
prog_name = btf__name_by_offset(prog_btf, func_type->name_off);
print:
if (!prog_id)
snprintf(&prog_str[func_sig_len],
sizeof(prog_str) - func_sig_len, " 0");
else if (prog_name)
snprintf(&prog_str[func_sig_len],
sizeof(prog_str) - func_sig_len,
" %s/prog_id:%u", prog_name, prog_id);
else
snprintf(&prog_str[func_sig_len],
sizeof(prog_str) - func_sig_len,
" <unknown_prog_name>/prog_id:%u", prog_id);
prog_str[sizeof(prog_str) - 1] = '\0';
jsonw_string(d->jw, prog_str);
btf__free(prog_btf);
free(prog_info);
return 0;
}
static void btf_dumper_ptr(const struct btf_dumper *d,
const struct btf_type *t,
const void *data)
{
unsigned long value = *(unsigned long *)data;
const struct btf_type *ptr_type;
__s32 ptr_type_id;
if (!d->prog_id_as_func_ptr || value > UINT32_MAX)
goto print_ptr_value;
ptr_type_id = btf__resolve_type(d->btf, t->type);
if (ptr_type_id < 0)
goto print_ptr_value;
ptr_type = btf__type_by_id(d->btf, ptr_type_id);
if (!ptr_type || !btf_is_func_proto(ptr_type))
goto print_ptr_value;
if (!dump_prog_id_as_func_ptr(d, ptr_type, value))
return;
print_ptr_value:
if (d->is_plain_text)
jsonw_printf(d->jw, "%p", (void *)value);
else
jsonw_printf(d->jw, "%lu", value);
}
static int btf_dumper_modifier(const struct btf_dumper *d, __u32 type_id,
__u8 bit_offset, const void *data)
{
int actual_type_id;
actual_type_id = btf__resolve_type(d->btf, type_id);
if (actual_type_id < 0)
return actual_type_id;
return btf_dumper_do_type(d, actual_type_id, bit_offset, data);
}
static int btf_dumper_enum(const struct btf_dumper *d,
const struct btf_type *t,
const void *data)
{
const struct btf_enum *enums = btf_enum(t);
__s64 value;
__u16 i;
switch (t->size) {
case 8:
value = *(__s64 *)data;
break;
case 4:
value = *(__s32 *)data;
break;
case 2:
value = *(__s16 *)data;
break;
case 1:
value = *(__s8 *)data;
break;
default:
return -EINVAL;
}
for (i = 0; i < btf_vlen(t); i++) {
if (value == enums[i].val) {
jsonw_string(d->jw,
btf__name_by_offset(d->btf,
enums[i].name_off));
return 0;
}
}
jsonw_int(d->jw, value);
return 0;
}
static bool is_str_array(const struct btf *btf, const struct btf_array *arr,
const char *s)
{
const struct btf_type *elem_type;
const char *end_s;
if (!arr->nelems)
return false;
elem_type = btf__type_by_id(btf, arr->type);
/* Not skipping typedef. typedef to char does not count as
* a string now.
*/
while (elem_type && btf_is_mod(elem_type))
elem_type = btf__type_by_id(btf, elem_type->type);
if (!elem_type || !btf_is_int(elem_type) || elem_type->size != 1)
return false;
if (btf_int_encoding(elem_type) != BTF_INT_CHAR &&
strcmp("char", btf__name_by_offset(btf, elem_type->name_off)))
return false;
end_s = s + arr->nelems;
while (s < end_s) {
if (!*s)
return true;
if (*s <= 0x1f || *s >= 0x7f)
return false;
s++;
}
/* '\0' is not found */
return false;
}
static int btf_dumper_array(const struct btf_dumper *d, __u32 type_id,
const void *data)
{
const struct btf_type *t = btf__type_by_id(d->btf, type_id);
struct btf_array *arr = (struct btf_array *)(t + 1);
long long elem_size;
int ret = 0;
__u32 i;
if (is_str_array(d->btf, arr, data)) {
jsonw_string(d->jw, data);
return 0;
}
elem_size = btf__resolve_size(d->btf, arr->type);
if (elem_size < 0)
return elem_size;
jsonw_start_array(d->jw);
for (i = 0; i < arr->nelems; i++) {
ret = btf_dumper_do_type(d, arr->type, 0,
data + i * elem_size);
if (ret)
break;
}
jsonw_end_array(d->jw);
return ret;
}
static void btf_int128_print(json_writer_t *jw, const void *data,
bool is_plain_text)
{
/* data points to a __int128 number.
* Suppose
* int128_num = *(__int128 *)data;
* The below formulas shows what upper_num and lower_num represents:
* upper_num = int128_num >> 64;
* lower_num = int128_num & 0xffffffffFFFFFFFFULL;
*/
__u64 upper_num, lower_num;
#ifdef __BIG_ENDIAN_BITFIELD
upper_num = *(__u64 *)data;
lower_num = *(__u64 *)(data + 8);
#else
upper_num = *(__u64 *)(data + 8);
lower_num = *(__u64 *)data;
#endif
if (is_plain_text) {
if (upper_num == 0)
jsonw_printf(jw, "0x%llx", lower_num);
else
jsonw_printf(jw, "0x%llx%016llx", upper_num, lower_num);
} else {
if (upper_num == 0)
jsonw_printf(jw, "\"0x%llx\"", lower_num);
else
jsonw_printf(jw, "\"0x%llx%016llx\"", upper_num, lower_num);
}
}
static void btf_int128_shift(__u64 *print_num, __u16 left_shift_bits,
__u16 right_shift_bits)
{
__u64 upper_num, lower_num;
#ifdef __BIG_ENDIAN_BITFIELD
upper_num = print_num[0];
lower_num = print_num[1];
#else
upper_num = print_num[1];
lower_num = print_num[0];
#endif
/* shake out un-needed bits by shift/or operations */
if (left_shift_bits >= 64) {
upper_num = lower_num << (left_shift_bits - 64);
lower_num = 0;
} else {
upper_num = (upper_num << left_shift_bits) |
(lower_num >> (64 - left_shift_bits));
lower_num = lower_num << left_shift_bits;
}
if (right_shift_bits >= 64) {
lower_num = upper_num >> (right_shift_bits - 64);
upper_num = 0;
} else {
lower_num = (lower_num >> right_shift_bits) |
(upper_num << (64 - right_shift_bits));
upper_num = upper_num >> right_shift_bits;
}
#ifdef __BIG_ENDIAN_BITFIELD
print_num[0] = upper_num;
print_num[1] = lower_num;
#else
print_num[0] = lower_num;
print_num[1] = upper_num;
#endif
}
static void btf_dumper_bitfield(__u32 nr_bits, __u8 bit_offset,
const void *data, json_writer_t *jw,
bool is_plain_text)
{
int left_shift_bits, right_shift_bits;
__u64 print_num[2] = {};
int bytes_to_copy;
int bits_to_copy;
bits_to_copy = bit_offset + nr_bits;
bytes_to_copy = BITS_ROUNDUP_BYTES(bits_to_copy);
memcpy(print_num, data, bytes_to_copy);
#if defined(__BIG_ENDIAN_BITFIELD)
left_shift_bits = bit_offset;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
left_shift_bits = 128 - bits_to_copy;
#else
#error neither big nor little endian
#endif
right_shift_bits = 128 - nr_bits;
btf_int128_shift(print_num, left_shift_bits, right_shift_bits);
btf_int128_print(jw, print_num, is_plain_text);
}
static void btf_dumper_int_bits(__u32 int_type, __u8 bit_offset,
const void *data, json_writer_t *jw,
bool is_plain_text)
{
int nr_bits = BTF_INT_BITS(int_type);
int total_bits_offset;
/* bits_offset is at most 7.
* BTF_INT_OFFSET() cannot exceed 128 bits.
*/
total_bits_offset = bit_offset + BTF_INT_OFFSET(int_type);
data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
bit_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
btf_dumper_bitfield(nr_bits, bit_offset, data, jw,
is_plain_text);
}
static int btf_dumper_int(const struct btf_type *t, __u8 bit_offset,
const void *data, json_writer_t *jw,
bool is_plain_text)
{
__u32 *int_type;
__u32 nr_bits;
int_type = (__u32 *)(t + 1);
nr_bits = BTF_INT_BITS(*int_type);
/* if this is bit field */
if (bit_offset || BTF_INT_OFFSET(*int_type) ||
BITS_PER_BYTE_MASKED(nr_bits)) {
btf_dumper_int_bits(*int_type, bit_offset, data, jw,
is_plain_text);
return 0;
}
if (nr_bits == 128) {
btf_int128_print(jw, data, is_plain_text);
return 0;
}
switch (BTF_INT_ENCODING(*int_type)) {
case 0:
if (BTF_INT_BITS(*int_type) == 64)
jsonw_printf(jw, "%llu", *(__u64 *)data);
else if (BTF_INT_BITS(*int_type) == 32)
jsonw_printf(jw, "%u", *(__u32 *)data);
else if (BTF_INT_BITS(*int_type) == 16)
jsonw_printf(jw, "%hu", *(__u16 *)data);
else if (BTF_INT_BITS(*int_type) == 8)
jsonw_printf(jw, "%hhu", *(__u8 *)data);
else
btf_dumper_int_bits(*int_type, bit_offset, data, jw,
is_plain_text);
break;
case BTF_INT_SIGNED:
if (BTF_INT_BITS(*int_type) == 64)
jsonw_printf(jw, "%lld", *(long long *)data);
else if (BTF_INT_BITS(*int_type) == 32)
jsonw_printf(jw, "%d", *(int *)data);
else if (BTF_INT_BITS(*int_type) == 16)
jsonw_printf(jw, "%hd", *(short *)data);
else if (BTF_INT_BITS(*int_type) == 8)
jsonw_printf(jw, "%hhd", *(char *)data);
else
btf_dumper_int_bits(*int_type, bit_offset, data, jw,
is_plain_text);
break;
case BTF_INT_CHAR:
if (isprint(*(char *)data))
jsonw_printf(jw, "\"%c\"", *(char *)data);
else
if (is_plain_text)
jsonw_printf(jw, "0x%hhx", *(char *)data);
else
jsonw_printf(jw, "\"\\u00%02hhx\"",
*(char *)data);
break;
case BTF_INT_BOOL:
jsonw_bool(jw, *(int *)data);
break;
default:
/* shouldn't happen */
return -EINVAL;
}
return 0;
}
static int btf_dumper_struct(const struct btf_dumper *d, __u32 type_id,
const void *data)
{
const struct btf_type *t;
struct btf_member *m;
const void *data_off;
int kind_flag;
int ret = 0;
int i, vlen;
t = btf__type_by_id(d->btf, type_id);
if (!t)
return -EINVAL;
kind_flag = BTF_INFO_KFLAG(t->info);
vlen = BTF_INFO_VLEN(t->info);
jsonw_start_object(d->jw);
m = (struct btf_member *)(t + 1);
for (i = 0; i < vlen; i++) {
__u32 bit_offset = m[i].offset;
__u32 bitfield_size = 0;
if (kind_flag) {
bitfield_size = BTF_MEMBER_BITFIELD_SIZE(bit_offset);
bit_offset = BTF_MEMBER_BIT_OFFSET(bit_offset);
}
jsonw_name(d->jw, btf__name_by_offset(d->btf, m[i].name_off));
data_off = data + BITS_ROUNDDOWN_BYTES(bit_offset);
if (bitfield_size) {
btf_dumper_bitfield(bitfield_size,
BITS_PER_BYTE_MASKED(bit_offset),
data_off, d->jw, d->is_plain_text);
} else {
ret = btf_dumper_do_type(d, m[i].type,
BITS_PER_BYTE_MASKED(bit_offset),
data_off);
if (ret)
break;
}
}
jsonw_end_object(d->jw);
return ret;
}
static int btf_dumper_var(const struct btf_dumper *d, __u32 type_id,
__u8 bit_offset, const void *data)
{
const struct btf_type *t = btf__type_by_id(d->btf, type_id);
int ret;
jsonw_start_object(d->jw);
jsonw_name(d->jw, btf__name_by_offset(d->btf, t->name_off));
ret = btf_dumper_do_type(d, t->type, bit_offset, data);
jsonw_end_object(d->jw);
return ret;
}
static int btf_dumper_datasec(const struct btf_dumper *d, __u32 type_id,
const void *data)
{
struct btf_var_secinfo *vsi;
const struct btf_type *t;
int ret = 0, i, vlen;
t = btf__type_by_id(d->btf, type_id);
if (!t)
return -EINVAL;
vlen = BTF_INFO_VLEN(t->info);
vsi = (struct btf_var_secinfo *)(t + 1);
jsonw_start_object(d->jw);
jsonw_name(d->jw, btf__name_by_offset(d->btf, t->name_off));
jsonw_start_array(d->jw);
for (i = 0; i < vlen; i++) {
ret = btf_dumper_do_type(d, vsi[i].type, 0, data + vsi[i].offset);
if (ret)
break;
}
jsonw_end_array(d->jw);
jsonw_end_object(d->jw);
return ret;
}
static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id,
__u8 bit_offset, const void *data)
{
const struct btf_type *t = btf__type_by_id(d->btf, type_id);
switch (BTF_INFO_KIND(t->info)) {
case BTF_KIND_INT:
return btf_dumper_int(t, bit_offset, data, d->jw,
d->is_plain_text);
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
return btf_dumper_struct(d, type_id, data);
case BTF_KIND_ARRAY:
return btf_dumper_array(d, type_id, data);
case BTF_KIND_ENUM:
return btf_dumper_enum(d, t, data);
case BTF_KIND_PTR:
btf_dumper_ptr(d, t, data);
return 0;
case BTF_KIND_UNKN:
jsonw_printf(d->jw, "(unknown)");
return 0;
case BTF_KIND_FWD:
/* map key or value can't be forward */
jsonw_printf(d->jw, "(fwd-kind-invalid)");
return -EINVAL;
case BTF_KIND_TYPEDEF:
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
return btf_dumper_modifier(d, type_id, bit_offset, data);
case BTF_KIND_VAR:
return btf_dumper_var(d, type_id, bit_offset, data);
case BTF_KIND_DATASEC:
return btf_dumper_datasec(d, type_id, data);
default:
jsonw_printf(d->jw, "(unsupported-kind");
return -EINVAL;
}
}
int btf_dumper_type(const struct btf_dumper *d, __u32 type_id,
const void *data)
{
return btf_dumper_do_type(d, type_id, 0, data);
}
#define BTF_PRINT_ARG(...) \
do { \
pos += snprintf(func_sig + pos, size - pos, \
__VA_ARGS__); \
if (pos >= size) \
return -1; \
} while (0)
#define BTF_PRINT_TYPE(type) \
do { \
pos = __btf_dumper_type_only(btf, type, func_sig, \
pos, size); \
if (pos == -1) \
return -1; \
} while (0)
static int __btf_dumper_type_only(const struct btf *btf, __u32 type_id,
char *func_sig, int pos, int size)
{
const struct btf_type *proto_type;
const struct btf_array *array;
const struct btf_var *var;
const struct btf_type *t;
if (!type_id) {
BTF_PRINT_ARG("void ");
return pos;
}
t = btf__type_by_id(btf, type_id);
switch (BTF_INFO_KIND(t->info)) {
case BTF_KIND_INT:
case BTF_KIND_TYPEDEF:
BTF_PRINT_ARG("%s ", btf__name_by_offset(btf, t->name_off));
break;
case BTF_KIND_STRUCT:
BTF_PRINT_ARG("struct %s ",
btf__name_by_offset(btf, t->name_off));
break;
case BTF_KIND_UNION:
BTF_PRINT_ARG("union %s ",
btf__name_by_offset(btf, t->name_off));
break;
case BTF_KIND_ENUM:
BTF_PRINT_ARG("enum %s ",
btf__name_by_offset(btf, t->name_off));
break;
case BTF_KIND_ARRAY:
array = (struct btf_array *)(t + 1);
BTF_PRINT_TYPE(array->type);
BTF_PRINT_ARG("[%d]", array->nelems);
break;
case BTF_KIND_PTR:
BTF_PRINT_TYPE(t->type);
BTF_PRINT_ARG("* ");
break;
case BTF_KIND_FWD:
BTF_PRINT_ARG("%s %s ",
BTF_INFO_KFLAG(t->info) ? "union" : "struct",
btf__name_by_offset(btf, t->name_off));
break;
case BTF_KIND_VOLATILE:
BTF_PRINT_ARG("volatile ");
BTF_PRINT_TYPE(t->type);
break;
case BTF_KIND_CONST:
BTF_PRINT_ARG("const ");
BTF_PRINT_TYPE(t->type);
break;
case BTF_KIND_RESTRICT:
BTF_PRINT_ARG("restrict ");
BTF_PRINT_TYPE(t->type);
break;
case BTF_KIND_FUNC_PROTO:
pos = btf_dump_func(btf, func_sig, t, NULL, pos, size);
if (pos == -1)
return -1;
break;
case BTF_KIND_FUNC:
proto_type = btf__type_by_id(btf, t->type);
pos = btf_dump_func(btf, func_sig, proto_type, t, pos, size);
if (pos == -1)
return -1;
break;
case BTF_KIND_VAR:
var = (struct btf_var *)(t + 1);
if (var->linkage == BTF_VAR_STATIC)
BTF_PRINT_ARG("static ");
BTF_PRINT_TYPE(t->type);
BTF_PRINT_ARG(" %s",
btf__name_by_offset(btf, t->name_off));
break;
case BTF_KIND_DATASEC:
BTF_PRINT_ARG("section (\"%s\") ",
btf__name_by_offset(btf, t->name_off));
break;
case BTF_KIND_UNKN:
default:
return -1;
}
return pos;
}
static int btf_dump_func(const struct btf *btf, char *func_sig,
const struct btf_type *func_proto,
const struct btf_type *func, int pos, int size)
{
int i, vlen;
BTF_PRINT_TYPE(func_proto->type);
if (func)
BTF_PRINT_ARG("%s(", btf__name_by_offset(btf, func->name_off));
else
BTF_PRINT_ARG("(");
vlen = BTF_INFO_VLEN(func_proto->info);
for (i = 0; i < vlen; i++) {
struct btf_param *arg = &((struct btf_param *)(func_proto + 1))[i];
if (i)
BTF_PRINT_ARG(", ");
if (arg->type) {
BTF_PRINT_TYPE(arg->type);
if (arg->name_off)
BTF_PRINT_ARG("%s",
btf__name_by_offset(btf, arg->name_off));
else if (pos && func_sig[pos - 1] == ' ')
/* Remove unnecessary space for
* FUNC_PROTO that does not have
* arg->name_off
*/
func_sig[--pos] = '\0';
} else {
BTF_PRINT_ARG("...");
}
}
BTF_PRINT_ARG(")");
return pos;
}
void btf_dumper_type_only(const struct btf *btf, __u32 type_id, char *func_sig,
int size)
{
int err;
func_sig[0] = '\0';
if (!btf)
return;
err = __btf_dumper_type_only(btf, type_id, func_sig, 0, size);
if (err < 0)
func_sig[0] = '\0';
}
static const char *ltrim(const char *s)
{
while (isspace(*s))
s++;
return s;
}
void btf_dump_linfo_plain(const struct btf *btf,
const struct bpf_line_info *linfo,
const char *prefix, bool linum)
{
const char *line = btf__name_by_offset(btf, linfo->line_off);
if (!line)
return;
line = ltrim(line);
if (!prefix)
prefix = "";
if (linum) {
const char *file = btf__name_by_offset(btf, linfo->file_name_off);
/* More forgiving on file because linum option is
* expected to provide more info than the already
* available src line.
*/
if (!file)
file = "";
printf("%s%s [file:%s line_num:%u line_col:%u]\n",
prefix, line, file,
BPF_LINE_INFO_LINE_NUM(linfo->line_col),
BPF_LINE_INFO_LINE_COL(linfo->line_col));
} else {
printf("%s%s\n", prefix, line);
}
}
void btf_dump_linfo_json(const struct btf *btf,
const struct bpf_line_info *linfo, bool linum)
{
const char *line = btf__name_by_offset(btf, linfo->line_off);
if (line)
jsonw_string_field(json_wtr, "src", ltrim(line));
if (linum) {
const char *file = btf__name_by_offset(btf, linfo->file_name_off);
if (file)
jsonw_string_field(json_wtr, "file", file);
if (BPF_LINE_INFO_LINE_NUM(linfo->line_col))
jsonw_int_field(json_wtr, "line_num",
BPF_LINE_INFO_LINE_NUM(linfo->line_col));
if (BPF_LINE_INFO_LINE_COL(linfo->line_col))
jsonw_int_field(json_wtr, "line_col",
BPF_LINE_INFO_LINE_COL(linfo->line_col));
}
}
| {
"language": "C"
} |
/******************************************************************************
* Project: libgeotiff
* Purpose: GeoTIFF Projection Method codes.
* Author: Frank Warmerdam, warmerdam@pobox.com
*
******************************************************************************
* Copyright (c) 2005, Frank Warmerdam <warmerdam@pobox.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
******************************************************************************
*
* $Log$
* Revision 1.3 2005/03/04 03:59:11 fwarmerdam
* Added header.
*
*/
/* C database for Geotiff include files. */
/* the macro ValuePair() must be defined */
/* by the enclosing include file */
/*
* Revised 12 Jul 1995 NDR -- changed South Oriented to a code
* Revised 28 Sep 1995 NDR -- Added Rev. 1.0 aliases.
*/
ValuePair(CT_TransverseMercator, 1)
ValuePair(CT_TransvMercator_Modified_Alaska, 2)
ValuePair(CT_ObliqueMercator, 3)
ValuePair(CT_ObliqueMercator_Laborde, 4)
ValuePair(CT_ObliqueMercator_Rosenmund, 5)
ValuePair(CT_ObliqueMercator_Spherical, 6) /* not advisable */
ValuePair(CT_Mercator, 7)
ValuePair(CT_LambertConfConic_2SP, 8)
ValuePair(CT_LambertConfConic,CT_LambertConfConic_2SP) /* Alias */
ValuePair(CT_LambertConfConic_1SP, 9)
ValuePair(CT_LambertConfConic_Helmert,CT_LambertConfConic_1SP) /* alias */
ValuePair(CT_LambertAzimEqualArea, 10)
ValuePair(CT_AlbersEqualArea, 11)
ValuePair(CT_AzimuthalEquidistant, 12)
ValuePair(CT_EquidistantConic, 13)
ValuePair(CT_Stereographic, 14)
ValuePair(CT_PolarStereographic, 15)
ValuePair(CT_ObliqueStereographic, 16) /* not advisable */
ValuePair(CT_Equirectangular, 17)
ValuePair(CT_CassiniSoldner, 18)
ValuePair(CT_Gnomonic, 19)
ValuePair(CT_MillerCylindrical, 20)
ValuePair(CT_Orthographic, 21)
ValuePair(CT_Polyconic, 22)
ValuePair(CT_Robinson, 23)
ValuePair(CT_Sinusoidal, 24)
ValuePair(CT_VanDerGrinten, 25)
ValuePair(CT_NewZealandMapGrid, 26)
/* Added for 1.0 */
ValuePair(CT_TransvMercator_SouthOrientated, 27)
/* Added Feb 2005 */
ValuePair(CT_CylindricalEqualArea, 28)
/* Aliases */
ValuePair(CT_SouthOrientedGaussConformal,CT_TransvMercator_SouthOrientated)
ValuePair(CT_AlaskaConformal, CT_TransvMercator_Modified_Alaska)
ValuePair(CT_TransvEquidistCylindrical, CT_CassiniSoldner)
ValuePair(CT_ObliqueMercator_Hotine, CT_ObliqueMercator)
ValuePair(CT_SwissObliqueCylindrical, CT_ObliqueMercator_Rosenmund)
ValuePair(CT_GaussBoaga, CT_TransverseMercator)
ValuePair(CT_GaussKruger, CT_TransverseMercator)
ValuePair(CT_TransvMercator_SouthOriented, CT_TransvMercator_SouthOrientated)
| {
"language": "C"
} |
/**
* This code is released under a BSD License.
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "simdcomp.h"
#ifdef _MSC_VER
# include <windows.h>
__int64 freq;
typedef __int64 time_snap_t;
static time_snap_t time_snap(void)
{
__int64 now;
QueryPerformanceCounter((LARGE_INTEGER *)&now);
return (__int64)((now*1000000)/freq);
}
# define TIME_SNAP_FMT "%I64d"
#else
# define time_snap clock
# define TIME_SNAP_FMT "%lu"
typedef clock_t time_snap_t;
#endif
void benchmarkSelect() {
uint32_t buffer[128];
uint32_t backbuffer[128];
uint32_t initial = 33;
uint32_t b;
time_snap_t S1, S2, S3;
int i;
printf("benchmarking select \n");
/* this test creates delta encoded buffers with different bits, then
* performs lower bound searches for each key */
for (b = 0; b <= 32; b++) {
uint32_t prev = initial;
uint32_t out[128];
/* initialize the buffer */
for (i = 0; i < 128; i++) {
buffer[i] = ((uint32_t)(1655765 * i )) ;
if(b < 32) buffer[i] %= (1<<b);
}
for (i = 0; i < 128; i++) {
buffer[i] = buffer[i] + prev;
prev = buffer[i];
}
for (i = 1; i < 128; i++) {
if(buffer[i] < buffer[i-1] )
buffer[i] = buffer[i-1];
}
assert(simdmaxbitsd1(initial, buffer)<=b);
for (i = 0; i < 128; i++) {
out[i] = 0; /* memset would do too */
}
/* delta-encode to 'i' bits */
simdpackwithoutmaskd1(initial, buffer, (__m128i *)out, b);
S1 = time_snap();
for (i = 0; i < 128 * 10; i++) {
uint32_t valretrieved = simdselectd1(initial, (__m128i *)out, b, (uint32_t)i % 128);
assert(valretrieved == buffer[i%128]);
}
S2 = time_snap();
for (i = 0; i < 128 * 10; i++) {
simdunpackd1(initial, (__m128i *)out, backbuffer, b);
assert(backbuffer[i % 128] == buffer[i % 128]);
}
S3 = time_snap();
printf("bit width = %d, fast select function time = " TIME_SNAP_FMT ", naive time = " TIME_SNAP_FMT " \n", b, (S2-S1), (S3-S2));
}
}
int uint32_cmp(const void *a, const void *b)
{
const uint32_t *ia = (const uint32_t *)a;
const uint32_t *ib = (const uint32_t *)b;
if(*ia < *ib)
return -1;
else if (*ia > *ib)
return 1;
return 0;
}
/* adapted from wikipedia */
int binary_search(uint32_t * A, uint32_t key, int imin, int imax)
{
int imid;
imax --;
while(imin + 1 < imax) {
imid = imin + ((imax - imin) / 2);
if (A[imid] > key) {
imax = imid;
} else if (A[imid] < key) {
imin = imid;
} else {
return imid;
}
}
return imax;
}
/* adapted from wikipedia */
int lower_bound(uint32_t * A, uint32_t key, int imin, int imax)
{
int imid;
imax --;
while(imin + 1 < imax) {
imid = imin + ((imax - imin) / 2);
if (A[imid] >= key) {
imax = imid;
} else if (A[imid] < key) {
imin = imid;
}
}
if(A[imin] >= key) return imin;
return imax;
}
void benchmarkSearch() {
uint32_t buffer[128];
uint32_t backbuffer[128];
uint32_t out[128];
uint32_t result, initial = 0;
uint32_t b, i;
time_snap_t S1, S2, S3, S4;
printf("benchmarking search \n");
/* this test creates delta encoded buffers with different bits, then
* performs lower bound searches for each key */
for (b = 0; b <= 32; b++) {
uint32_t prev = initial;
/* initialize the buffer */
for (i = 0; i < 128; i++) {
buffer[i] = ((uint32_t)rand()) ;
if(b < 32) buffer[i] %= (1<<b);
}
qsort(buffer,128, sizeof(uint32_t), uint32_cmp);
for (i = 0; i < 128; i++) {
buffer[i] = buffer[i] + prev;
prev = buffer[i];
}
for (i = 1; i < 128; i++) {
if(buffer[i] < buffer[i-1] )
buffer[i] = buffer[i-1];
}
assert(simdmaxbitsd1(initial, buffer)<=b);
for (i = 0; i < 128; i++) {
out[i] = 0; /* memset would do too */
}
/* delta-encode to 'i' bits */
simdpackwithoutmaskd1(initial, buffer, (__m128i *)out, b);
simdunpackd1(initial, (__m128i *)out, backbuffer, b);
for (i = 0; i < 128; i++) {
assert(buffer[i] == backbuffer[i]);
}
S1 = time_snap();
for (i = 0; i < 128 * 10; i++) {
int pos;
uint32_t pseudorandomkey = buffer[i%128];
__m128i vecinitial = _mm_set1_epi32(initial);
pos = simdsearchd1(&vecinitial, (__m128i *)out, b,
pseudorandomkey, &result);
if((result < pseudorandomkey) || (buffer[pos] != result)) {
printf("bug A.\n");
} else if (pos > 0) {
if(buffer[pos-1] >= pseudorandomkey)
printf("bug B.\n");
}
}
S2 = time_snap();
for (i = 0; i < 128 * 10; i++) {
int pos;
uint32_t pseudorandomkey = buffer[i%128];
simdunpackd1(initial, (__m128i *)out, backbuffer, b);
pos = lower_bound(backbuffer, pseudorandomkey, 0, 128);
result = backbuffer[pos];
if((result < pseudorandomkey) || (buffer[pos] != result)) {
printf("bug C.\n");
} else if (pos > 0) {
if(buffer[pos-1] >= pseudorandomkey)
printf("bug D.\n");
}
}
S3 = time_snap();
for (i = 0; i < 128 * 10; i++) {
int pos;
uint32_t pseudorandomkey = buffer[i%128];
pos = simdsearchwithlengthd1(initial, (__m128i *)out, b, 128,
pseudorandomkey, &result);
if((result < pseudorandomkey) || (buffer[pos] != result)) {
printf("bug A.\n");
} else if (pos > 0) {
if(buffer[pos-1] >= pseudorandomkey)
printf("bug B.\n");
}
}
S4 = time_snap();
printf("bit width = %d, fast search function time = " TIME_SNAP_FMT ", naive time = " TIME_SNAP_FMT " , fast with length time = " TIME_SNAP_FMT " \n", b, (S2-S1), (S3-S2), (S4-S3) );
}
}
int main() {
#ifdef _MSC_VER
QueryPerformanceFrequency((LARGE_INTEGER *)&freq);
#endif
benchmarkSearch();
benchmarkSelect();
return 0;
}
| {
"language": "C"
} |
/* libFLAC - Free Lossless Audio Codec library
* Copyright (C) 2000,2001,2002,2003 Josh Coalson
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
#ifndef FLAC__FILE_DECODER_H
#define FLAC__FILE_DECODER_H
#include "export.h"
#include "seekable_stream_decoder.h"
#ifdef __cplusplus
extern "C" {
#endif
/** \file include/FLAC/file_decoder.h
*
* \brief
* This module contains the functions which implement the file
* decoder.
*
* See the detailed documentation in the
* \link flac_file_decoder file decoder \endlink module.
*/
/** \defgroup flac_file_decoder FLAC/file_decoder.h: file decoder interface
* \ingroup flac_decoder
*
* \brief
* This module contains the functions which implement the file
* decoder.
*
* The basic usage of this decoder is as follows:
* - The program creates an instance of a decoder using
* FLAC__file_decoder_new().
* - The program overrides the default settings and sets callbacks for
* writing, error reporting, and metadata reporting using
* FLAC__file_decoder_set_*() functions.
* - The program initializes the instance to validate the settings and
* prepare for decoding using FLAC__file_decoder_init().
* - The program calls the FLAC__file_decoder_process_*() functions
* to decode data, which subsequently calls the callbacks.
* - The program finishes the decoding with FLAC__file_decoder_finish(),
* which flushes the input and output and resets the decoder to the
* uninitialized state.
* - The instance may be used again or deleted with
* FLAC__file_decoder_delete().
*
* The file decoder is a trivial wrapper around the
* \link flac_seekable_stream_decoder seekable stream decoder \endlink
* meant to simplfy the process of decoding from a standard file. The
* file decoder supplies all but the Write/Metadata/Error callbacks.
* The user needs only to provide the path to the file and the file
* decoder handles the rest.
*
* Like the seekable stream decoder, seeking is exposed through the
* FLAC__file_decoder_seek_absolute() method. At any point after the file
* decoder has been initialized, the user can call this function to seek to
* an exact sample within the file. Subsequently, the first time the write
* callback is called it will be passed a (possibly partial) block starting
* at that sample.
*
* The file decoder also inherits MD5 signature checking from the seekable
* stream decoder. If this is turned on before initialization,
* FLAC__file_decoder_finish() will report when the decoded MD5 signature
* does not match the one stored in the STREAMINFO block. MD5 checking is
* automatically turned off if there is no signature in the STREAMINFO
* block or when a seek is attempted.
*
* Make sure to read the detailed descriptions of the
* \link flac_seekable_stream_decoder seekable stream decoder module \endlink
* and \link flac_stream_decoder stream decoder module \endlink
* since the file decoder inherits much of its behavior from them.
*
* \note
* The "set" functions may only be called when the decoder is in the
* state FLAC__FILE_DECODER_UNINITIALIZED, i.e. after
* FLAC__file_decoder_new() or FLAC__file_decoder_finish(), but
* before FLAC__file_decoder_init(). If this is the case they will
* return \c true, otherwise \c false.
*
* \note
* FLAC__file_decoder_finish() resets all settings to the constructor
* defaults, including the callbacks.
*
* \{
*/
/** State values for a FLAC__FileDecoder
*
* The decoder's state can be obtained by calling FLAC__file_decoder_get_state().
*/
typedef enum {
FLAC__FILE_DECODER_OK = 0,
/**< The decoder is in the normal OK state. */
FLAC__FILE_DECODER_END_OF_FILE,
/**< The decoder has reached the end of the file. */
FLAC__FILE_DECODER_ERROR_OPENING_FILE,
/**< An error occurred opening the input file. */
FLAC__FILE_DECODER_MEMORY_ALLOCATION_ERROR,
/**< An error occurred allocating memory. */
FLAC__FILE_DECODER_SEEK_ERROR,
/**< An error occurred while seeking. */
FLAC__FILE_DECODER_SEEKABLE_STREAM_DECODER_ERROR,
/**< An error occurred in the underlying seekable stream decoder. */
FLAC__FILE_DECODER_ALREADY_INITIALIZED,
/**< FLAC__file_decoder_init() was called when the decoder was already
* initialized, usually because FLAC__file_decoder_finish() was not
* called.
*/
FLAC__FILE_DECODER_INVALID_CALLBACK,
/**< FLAC__file_decoder_init() was called without all callbacks
* being set.
*/
FLAC__FILE_DECODER_UNINITIALIZED
/**< The decoder is in the uninitialized state. */
} FLAC__FileDecoderState;
/** Maps a FLAC__FileDecoderState to a C string.
*
* Using a FLAC__FileDecoderState as the index to this array
* will give the string equivalent. The contents should not be modified.
*/
extern FLAC_API const char * const FLAC__FileDecoderStateString[];
/***********************************************************************
*
* class FLAC__FileDecoder : public FLAC__StreamDecoder
*
***********************************************************************/
struct FLAC__FileDecoderProtected;
struct FLAC__FileDecoderPrivate;
/** The opaque structure definition for the file decoder type. See the
* \link flac_file_decoder file decoder module \endlink for a detailed
* description.
*/
typedef struct {
struct FLAC__FileDecoderProtected *protected_; /* avoid the C++ keyword 'protected' */
struct FLAC__FileDecoderPrivate *private_; /* avoid the C++ keyword 'private' */
} FLAC__FileDecoder;
/** Signature for the write callback.
* See FLAC__file_decoder_set_write_callback()
* and FLAC__SeekableStreamDecoderWriteCallback for more info.
*
* \param decoder The decoder instance calling the callback.
* \param frame The description of the decoded frame.
* \param buffer An array of pointers to decoded channels of data.
* \param client_data The callee's client data set through
* FLAC__file_decoder_set_client_data().
* \retval FLAC__StreamDecoderWriteStatus
* The callee's return status.
*/
typedef FLAC__StreamDecoderWriteStatus (*FLAC__FileDecoderWriteCallback)(const FLAC__FileDecoder *decoder, const FLAC__Frame *frame, const FLAC__int32 * const buffer[], void *client_data);
/** Signature for the metadata callback.
* See FLAC__file_decoder_set_metadata_callback()
* and FLAC__SeekableStreamDecoderMetadataCallback for more info.
*
* \param decoder The decoder instance calling the callback.
* \param metadata The decoded metadata block.
* \param client_data The callee's client data set through
* FLAC__file_decoder_set_client_data().
*/
typedef void (*FLAC__FileDecoderMetadataCallback)(const FLAC__FileDecoder *decoder, const FLAC__StreamMetadata *metadata, void *client_data);
/** Signature for the error callback.
* See FLAC__file_decoder_set_error_callback()
* and FLAC__SeekableStreamDecoderErrorCallback for more info.
*
* \param decoder The decoder instance calling the callback.
* \param status The error encountered by the decoder.
* \param client_data The callee's client data set through
* FLAC__file_decoder_set_client_data().
*/
typedef void (*FLAC__FileDecoderErrorCallback)(const FLAC__FileDecoder *decoder, FLAC__StreamDecoderErrorStatus status, void *client_data);
/***********************************************************************
*
* Class constructor/destructor
*
***********************************************************************/
/** Create a new file decoder instance. The instance is created with
* default settings; see the individual FLAC__file_decoder_set_*()
* functions for each setting's default.
*
* \retval FLAC__FileDecoder*
* \c NULL if there was an error allocating memory, else the new instance.
*/
FLAC_API FLAC__FileDecoder *FLAC__file_decoder_new();
/** Free a decoder instance. Deletes the object pointed to by \a decoder.
*
* \param decoder A pointer to an existing decoder.
* \assert
* \code decoder != NULL \endcode
*/
FLAC_API void FLAC__file_decoder_delete(FLAC__FileDecoder *decoder);
/***********************************************************************
*
* Public class method prototypes
*
***********************************************************************/
/** Set the "MD5 signature checking" flag.
* This is inherited from FLAC__SeekableStreamDecoder; see
* FLAC__seekable_stream_decoder_set_md5_checking().
*
* \default \c false
* \param decoder A decoder instance to set.
* \param value See above.
* \assert
* \code decoder != NULL \endcode
* \retval FLAC__bool
* \c false if the decoder is already initialized, else \c true.
*/
FLAC_API FLAC__bool FLAC__file_decoder_set_md5_checking(FLAC__FileDecoder *decoder, FLAC__bool value);
/** Set the input file name to decode.
*
* \default \c "-"
* \param decoder A decoder instance to set.
* \param value The input file name, or "-" for \c stdin.
* \assert
* \code decoder != NULL \endcode
* \code value != NULL \endcode
* \retval FLAC__bool
* \c false if the decoder is already initialized, or there was a memory
* allocation error, else \c true.
*/
FLAC_API FLAC__bool FLAC__file_decoder_set_filename(FLAC__FileDecoder *decoder, const char *value);
/** Set the write callback.
* This is inherited from FLAC__SeekableStreamDecoder; see
* FLAC__seekable_stream_decoder_set_write_callback().
*
* \note
* The callback is mandatory and must be set before initialization.
*
* \default \c NULL
* \param decoder A decoder instance to set.
* \param value See above.
* \assert
* \code decoder != NULL \endcode
* \code value != NULL \endcode
* \retval FLAC__bool
* \c false if the decoder is already initialized, else \c true.
*/
FLAC_API FLAC__bool FLAC__file_decoder_set_write_callback(FLAC__FileDecoder *decoder, FLAC__FileDecoderWriteCallback value);
/** Set the metadata callback.
* This is inherited from FLAC__SeekableStreamDecoder; see
* FLAC__seekable_stream_decoder_set_metadata_callback().
*
* \note
* The callback is mandatory and must be set before initialization.
*
* \default \c NULL
* \param decoder A decoder instance to set.
* \param value See above.
* \assert
* \code decoder != NULL \endcode
* \code value != NULL \endcode
* \retval FLAC__bool
* \c false if the decoder is already initialized, else \c true.
*/
FLAC_API FLAC__bool FLAC__file_decoder_set_metadata_callback(FLAC__FileDecoder *decoder, FLAC__FileDecoderMetadataCallback value);
/** Set the error callback.
* This is inherited from FLAC__SeekableStreamDecoder; see
* FLAC__seekable_stream_decoder_set_error_callback().
*
* \note
* The callback is mandatory and must be set before initialization.
*
* \default \c NULL
* \param decoder A decoder instance to set.
* \param value See above.
* \assert
* \code decoder != NULL \endcode
* \code value != NULL \endcode
* \retval FLAC__bool
* \c false if the decoder is already initialized, else \c true.
*/
FLAC_API FLAC__bool FLAC__file_decoder_set_error_callback(FLAC__FileDecoder *decoder, FLAC__FileDecoderErrorCallback value);
/** Set the client data to be passed back to callbacks.
* This value will be supplied to callbacks in their \a client_data
* argument.
*
* \default \c NULL
* \param decoder A decoder instance to set.
* \param value See above.
* \assert
* \code decoder != NULL \endcode
* \retval FLAC__bool
* \c false if the decoder is already initialized, else \c true.
*/
FLAC_API FLAC__bool FLAC__file_decoder_set_client_data(FLAC__FileDecoder *decoder, void *value);
/** This is inherited from FLAC__SeekableStreamDecoder; see
* FLAC__seekable_stream_decoder_set_metadata_respond().
*
* \default By default, only the \c STREAMINFO block is returned via the
* metadata callback.
* \param decoder A decoder instance to set.
* \param type See above.
* \assert
* \code decoder != NULL \endcode
* \a type is valid
* \retval FLAC__bool
* \c false if the decoder is already initialized, else \c true.
*/
FLAC_API FLAC__bool FLAC__file_decoder_set_metadata_respond(FLAC__FileDecoder *decoder, FLAC__MetadataType type);
/** This is inherited from FLAC__SeekableStreamDecoder; see
* FLAC__seekable_stream_decoder_set_metadata_respond_application().
*
* \default By default, only the \c STREAMINFO block is returned via the
* metadata callback.
* \param decoder A decoder instance to set.
* \param id See above.
* \assert
* \code decoder != NULL \endcode
* \code id != NULL \endcode
* \retval FLAC__bool
* \c false if the decoder is already initialized, else \c true.
*/
FLAC_API FLAC__bool FLAC__file_decoder_set_metadata_respond_application(FLAC__FileDecoder *decoder, const FLAC__byte id[4]);
/** This is inherited from FLAC__SeekableStreamDecoder; see
* FLAC__seekable_stream_decoder_set_metadata_respond_all().
*
* \default By default, only the \c STREAMINFO block is returned via the
* metadata callback.
* \param decoder A decoder instance to set.
* \assert
* \code decoder != NULL \endcode
* \retval FLAC__bool
* \c false if the decoder is already initialized, else \c true.
*/
FLAC_API FLAC__bool FLAC__file_decoder_set_metadata_respond_all(FLAC__FileDecoder *decoder);
/** This is inherited from FLAC__SeekableStreamDecoder; see
* FLAC__seekable_stream_decoder_set_metadata_ignore().
*
* \default By default, only the \c STREAMINFO block is returned via the
* metadata callback.
* \param decoder A decoder instance to set.
* \param type See above.
* \assert
* \code decoder != NULL \endcode
* \a type is valid
* \retval FLAC__bool
* \c false if the decoder is already initialized, else \c true.
*/
FLAC_API FLAC__bool FLAC__file_decoder_set_metadata_ignore(FLAC__FileDecoder *decoder, FLAC__MetadataType type);
/** This is inherited from FLAC__SeekableStreamDecoder; see
* FLAC__seekable_stream_decoder_set_metadata_ignore_application().
*
* \default By default, only the \c STREAMINFO block is returned via the
* metadata callback.
* \param decoder A decoder instance to set.
* \param id See above.
* \assert
* \code decoder != NULL \endcode
* \code id != NULL \endcode
* \retval FLAC__bool
* \c false if the decoder is already initialized, else \c true.
*/
FLAC_API FLAC__bool FLAC__file_decoder_set_metadata_ignore_application(FLAC__FileDecoder *decoder, const FLAC__byte id[4]);
/** This is inherited from FLAC__SeekableStreamDecoder; see
* FLAC__seekable_stream_decoder_set_metadata_ignore_all().
*
* \default By default, only the \c STREAMINFO block is returned via the
* metadata callback.
* \param decoder A decoder instance to set.
* \assert
* \code decoder != NULL \endcode
* \retval FLAC__bool
* \c false if the decoder is already initialized, else \c true.
*/
FLAC_API FLAC__bool FLAC__file_decoder_set_metadata_ignore_all(FLAC__FileDecoder *decoder);
/** Get the current decoder state.
*
* \param decoder A decoder instance to query.
* \assert
* \code decoder != NULL \endcode
* \retval FLAC__FileDecoderState
* The current decoder state.
*/
FLAC_API FLAC__FileDecoderState FLAC__file_decoder_get_state(const FLAC__FileDecoder *decoder);
/** Get the state of the underlying seekable stream decoder.
* Useful when the file decoder state is
* \c FLAC__FILE_DECODER_SEEKABLE_STREAM_DECODER_ERROR.
*
* \param decoder A decoder instance to query.
* \assert
* \code decoder != NULL \endcode
* \retval FLAC__SeekableStreamDecoderState
* The seekable stream decoder state.
*/
FLAC_API FLAC__SeekableStreamDecoderState FLAC__file_decoder_get_seekable_stream_decoder_state(const FLAC__FileDecoder *decoder);
/** Get the state of the underlying stream decoder.
* Useful when the file decoder state is
* \c FLAC__FILE_DECODER_SEEKABLE_STREAM_DECODER_ERROR and the seekable stream
* decoder state is \c FLAC__SEEKABLE_STREAM_DECODER_STREAM_DECODER_ERROR.
*
* \param decoder A decoder instance to query.
* \assert
* \code decoder != NULL \endcode
* \retval FLAC__StreamDecoderState
* The seekable stream decoder state.
*/
FLAC_API FLAC__StreamDecoderState FLAC__file_decoder_get_stream_decoder_state(const FLAC__FileDecoder *decoder);
/** Get the current decoder state as a C string.
* This version automatically resolves
* \c FLAC__FILE_DECODER_SEEKABLE_STREAM_DECODER_ERROR by getting the
* seekable stream decoder's state.
*
* \param decoder A decoder instance to query.
* \assert
* \code decoder != NULL \endcode
* \retval const char *
* The decoder state as a C string. Do not modify the contents.
*/
FLAC_API const char *FLAC__file_decoder_get_resolved_state_string(const FLAC__FileDecoder *decoder);
/** Get the "MD5 signature checking" flag.
* This is inherited from FLAC__SeekableStreamDecoder; see
* FLAC__seekable_stream_decoder_get_md5_checking().
*
* \param decoder A decoder instance to query.
* \assert
* \code decoder != NULL \endcode
* \retval FLAC__bool
* See above.
*/
FLAC_API FLAC__bool FLAC__file_decoder_get_md5_checking(const FLAC__FileDecoder *decoder);
/** This is inherited from FLAC__SeekableStreamDecoder; see
* FLAC__seekable_stream_decoder_get_channels().
*
* \param decoder A decoder instance to query.
* \assert
* \code decoder != NULL \endcode
* \retval unsigned
* See above.
*/
FLAC_API unsigned FLAC__file_decoder_get_channels(const FLAC__FileDecoder *decoder);
/** This is inherited from FLAC__SeekableStreamDecoder; see
* FLAC__seekable_stream_decoder_get_channel_assignment().
*
* \param decoder A decoder instance to query.
* \assert
* \code decoder != NULL \endcode
* \retval FLAC__ChannelAssignment
* See above.
*/
FLAC_API FLAC__ChannelAssignment FLAC__file_decoder_get_channel_assignment(const FLAC__FileDecoder *decoder);
/** This is inherited from FLAC__SeekableStreamDecoder; see
* FLAC__seekable_stream_decoder_get_bits_per_sample().
*
* \param decoder A decoder instance to query.
* \assert
* \code decoder != NULL \endcode
* \retval unsigned
* See above.
*/
FLAC_API unsigned FLAC__file_decoder_get_bits_per_sample(const FLAC__FileDecoder *decoder);
/** This is inherited from FLAC__SeekableStreamDecoder; see
* FLAC__seekable_stream_decoder_get_sample_rate().
*
* \param decoder A decoder instance to query.
* \assert
* \code decoder != NULL \endcode
* \retval unsigned
* See above.
*/
FLAC_API unsigned FLAC__file_decoder_get_sample_rate(const FLAC__FileDecoder *decoder);
/** This is inherited from FLAC__SeekableStreamDecoder; see
* FLAC__seekable_stream_decoder_get_blocksize().
*
* \param decoder A decoder instance to query.
* \assert
* \code decoder != NULL \endcode
* \retval unsigned
* See above.
*/
FLAC_API unsigned FLAC__file_decoder_get_blocksize(const FLAC__FileDecoder *decoder);
/** This is inherited from FLAC__SeekableStreamDecoder; see
* FLAC__seekable_stream_decoder_get_decode_position().
*
* \param decoder A decoder instance to query.
* \param position Address at which to return the desired position.
* \assert
* \code decoder != NULL \endcode
* \code position != NULL \endcode
* \retval FLAC__bool
* \c true if successful, \c false if there was an error from
* the 'tell' callback.
*/
FLAC_API FLAC__bool FLAC__file_decoder_get_decode_position(const FLAC__FileDecoder *decoder, FLAC__uint64 *position);
/** Initialize the decoder instance.
* Should be called after FLAC__file_decoder_new() and
* FLAC__file_decoder_set_*() but before any of the
* FLAC__file_decoder_process_*() functions. Will set and return
* the decoder state, which will be FLAC__FILE_DECODER_OK if
* initialization succeeded.
*
* \param decoder An uninitialized decoder instance.
* \assert
* \code decoder != NULL \endcode
* \retval FLAC__FileDecoderState
* \c FLAC__FILE_DECODER_OK if initialization was successful; see
* FLAC__FileDecoderState for the meanings of other return values.
*/
FLAC_API FLAC__FileDecoderState FLAC__file_decoder_init(FLAC__FileDecoder *decoder);
/** Finish the decoding process.
* Flushes the decoding buffer, releases resources, resets the decoder
* settings to their defaults, and returns the decoder state to
* FLAC__FILE_DECODER_UNINITIALIZED.
*
* In the event of a prematurely-terminated decode, it is not strictly
* necessary to call this immediately before FLAC__file_decoder_delete()
* but it is good practice to match every FLAC__file_decoder_init() with
* a FLAC__file_decoder_finish().
*
* \param decoder An uninitialized decoder instance.
* \assert
* \code decoder != NULL \endcode
* \retval FLAC__bool
* \c false if MD5 checking is on AND a STREAMINFO block was available
* AND the MD5 signature in the STREAMINFO block was non-zero AND the
* signature does not match the one computed by the decoder; else
* \c true.
*/
FLAC_API FLAC__bool FLAC__file_decoder_finish(FLAC__FileDecoder *decoder);
/** This is inherited from FLAC__SeekableStreamDecoder; see
* FLAC__seekable_stream_decoder_process_single().
*
* \param decoder A decoder instance.
* \assert
* \code decoder != NULL \endcode
* \retval FLAC__bool
* See above.
*/
FLAC_API FLAC__bool FLAC__file_decoder_process_single(FLAC__FileDecoder *decoder);
/** This is inherited from FLAC__SeekableStreamDecoder; see
* FLAC__seekable_stream_decoder_process_until_end_of_metadata().
*
* \param decoder A decoder instance.
* \assert
* \code decoder != NULL \endcode
* \retval FLAC__bool
* See above.
*/
FLAC_API FLAC__bool FLAC__file_decoder_process_until_end_of_metadata(FLAC__FileDecoder *decoder);
/** This is inherited from FLAC__SeekableStreamDecoder; see
* FLAC__seekable_stream_decoder_process_until_end_of_stream().
*
* \param decoder A decoder instance.
* \assert
* \code decoder != NULL \endcode
* \retval FLAC__bool
* See above.
*/
FLAC_API FLAC__bool FLAC__file_decoder_process_until_end_of_file(FLAC__FileDecoder *decoder);
/** This is inherited from FLAC__SeekableStreamDecoder; see
* FLAC__seekable_stream_decoder_process_remaining_frames().
*
* \param decoder A decoder instance.
* \assert
* \code decoder != NULL \endcode
* \retval FLAC__bool
* See above.
*/
FLAC_API FLAC__bool FLAC__file_decoder_process_remaining_frames(FLAC__FileDecoder *decoder);
/** Flush the input and seek to an absolute sample.
* This is inherited from FLAC__SeekableStreamDecoder; see
* FLAC__seekable_stream_decoder_seek_absolute().
*
* \param decoder A decoder instance.
* \param sample The target sample number to seek to.
* \assert
* \code decoder != NULL \endcode
* \retval FLAC__bool
* \c true if successful, else \c false.
*/
FLAC_API FLAC__bool FLAC__file_decoder_seek_absolute(FLAC__FileDecoder *decoder, FLAC__uint64 sample);
/* \} */
#ifdef __cplusplus
}
#endif
#endif
| {
"language": "C"
} |
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2015 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2015 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __tof_h__
#define __tof_h__
#include "fw/api/tof.h"
struct iwl_mvm_tof_data {
struct iwl_tof_config_cmd tof_cfg;
struct iwl_tof_range_req_cmd range_req;
struct iwl_tof_range_req_ext_cmd range_req_ext;
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct iwl_tof_responder_config_cmd responder_cfg;
#endif
struct iwl_tof_range_rsp_ntfy range_resp;
u8 last_abort_id;
u16 active_range_request;
};
void iwl_mvm_tof_init(struct iwl_mvm *mvm);
void iwl_mvm_tof_clean(struct iwl_mvm *mvm);
int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm);
int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id);
int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
#endif
#endif /* __tof_h__ */
| {
"language": "C"
} |
/* Graphite polyhedral representation.
Copyright (C) 2009, 2010 Free Software Foundation, Inc.
Contributed by Sebastian Pop <sebastian.pop@amd.com> and
Tobias Grosser <grosser@fim.uni-passau.de>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_GRAPHITE_POLY_H
#define GCC_GRAPHITE_POLY_H
typedef struct poly_dr *poly_dr_p;
DEF_VEC_P(poly_dr_p);
DEF_VEC_ALLOC_P (poly_dr_p, heap);
typedef struct poly_bb *poly_bb_p;
DEF_VEC_P(poly_bb_p);
DEF_VEC_ALLOC_P (poly_bb_p, heap);
typedef struct scop *scop_p;
DEF_VEC_P(scop_p);
DEF_VEC_ALLOC_P (scop_p, heap);
typedef ppl_dimension_type graphite_dim_t;
static inline graphite_dim_t pbb_dim_iter_domain (const struct poly_bb *);
static inline graphite_dim_t pbb_nb_params (const struct poly_bb *);
static inline graphite_dim_t scop_nb_params (scop_p);
/* A data reference can write or read some memory or we
just know it may write some memory. */
enum poly_dr_type
{
PDR_READ,
/* PDR_MAY_READs are represented using PDR_READS. This does not
limit the expressiveness. */
PDR_WRITE,
PDR_MAY_WRITE
};
struct poly_dr
{
/* An identifier for this PDR. */
int id;
/* The number of data refs identical to this one in the PBB. */
int nb_refs;
/* A pointer to compiler's data reference description. */
void *compiler_dr;
/* A pointer to the PBB that contains this data reference. */
poly_bb_p pbb;
enum poly_dr_type type;
/* The access polyhedron contains the polyhedral space this data
reference will access.
The polyhedron contains these dimensions:
- The alias set (a):
Every memory access is classified in at least one alias set.
- The subscripts (s_0, ..., s_n):
The memory is accessed using zero or more subscript dimensions.
- The iteration domain (variables and parameters)
Do not hardcode the dimensions. Use the following accessor functions:
- pdr_alias_set_dim
- pdr_subscript_dim
- pdr_iterator_dim
- pdr_parameter_dim
Example:
| int A[1335][123];
| int *p = malloc ();
|
| k = ...
| for i
| {
| if (unknown_function ())
| p = A;
| ... = p[?][?];
| for j
| A[i][j+k] = m;
| }
The data access A[i][j+k] in alias set "5" is described like this:
| i j k a s0 s1 1
| 0 0 0 1 0 0 -5 = 0
|-1 0 0 0 1 0 0 = 0
| 0 -1 -1 0 0 1 0 = 0
| 0 0 0 0 1 0 0 >= 0 # The last four lines describe the
| 0 0 0 0 0 1 0 >= 0 # array size.
| 0 0 0 0 -1 0 1335 >= 0
| 0 0 0 0 0 -1 123 >= 0
The pointer "*p" in alias set "5" and "7" is described as a union of
polyhedron:
| i k a s0 1
| 0 0 1 0 -5 = 0
| 0 0 0 1 0 >= 0
"or"
| i k a s0 1
| 0 0 1 0 -7 = 0
| 0 0 0 1 0 >= 0
"*p" accesses all of the object allocated with 'malloc'.
The scalar data access "m" is represented as an array with zero subscript
dimensions.
| i j k a 1
| 0 0 0 -1 15 = 0
The difference between the graphite internal format for access data and
the OpenSop format is in the order of columns.
Instead of having:
| i j k a s0 s1 1
| 0 0 0 1 0 0 -5 = 0
|-1 0 0 0 1 0 0 = 0
| 0 -1 -1 0 0 1 0 = 0
| 0 0 0 0 1 0 0 >= 0 # The last four lines describe the
| 0 0 0 0 0 1 0 >= 0 # array size.
| 0 0 0 0 -1 0 1335 >= 0
| 0 0 0 0 0 -1 123 >= 0
In OpenScop we have:
| a s0 s1 i j k 1
| 1 0 0 0 0 0 -5 = 0
| 0 1 0 -1 0 0 0 = 0
| 0 0 1 0 -1 -1 0 = 0
| 0 1 0 0 0 0 0 >= 0 # The last four lines describe the
| 0 0 1 0 0 0 0 >= 0 # array size.
| 0 -1 0 0 0 0 1335 >= 0
| 0 0 -1 0 0 0 123 >= 0
The OpenScop access function is printed as follows:
| 1 # The number of disjunct components in a union of access functions.
| R C O I L P # Described bellow.
| a s0 s1 i j k 1
| 1 0 0 0 0 0 -5 = 0
| 0 1 0 -1 0 0 0 = 0
| 0 0 1 0 -1 -1 0 = 0
| 0 1 0 0 0 0 0 >= 0 # The last four lines describe the
| 0 0 1 0 0 0 0 >= 0 # array size.
| 0 -1 0 0 0 0 1335 >= 0
| 0 0 -1 0 0 0 123 >= 0
Where:
- R: Number of rows.
- C: Number of columns.
- O: Number of output dimensions = alias set + number of subscripts.
- I: Number of input dimensions (iterators).
- L: Number of local (existentially quantified) dimensions.
- P: Number of parameters.
In the example, the vector "R C O I L P" is "7 7 3 2 0 1". */
ppl_Pointset_Powerset_C_Polyhedron_t accesses;
/* Data reference's base object set number, we must assure 2 pdrs are in the
same base object set before dependency checking. */
int dr_base_object_set;
/* The number of subscripts. */
graphite_dim_t nb_subscripts;
};
#define PDR_ID(PDR) (PDR->id)
#define PDR_NB_REFS(PDR) (PDR->nb_refs)
#define PDR_CDR(PDR) (PDR->compiler_dr)
#define PDR_PBB(PDR) (PDR->pbb)
#define PDR_TYPE(PDR) (PDR->type)
#define PDR_ACCESSES(PDR) (PDR->accesses)
#define PDR_BASE_OBJECT_SET(PDR) (PDR->dr_base_object_set)
#define PDR_NB_SUBSCRIPTS(PDR) (PDR->nb_subscripts)
void new_poly_dr (poly_bb_p, int, ppl_Pointset_Powerset_C_Polyhedron_t,
enum poly_dr_type, void *, graphite_dim_t);
void free_poly_dr (poly_dr_p);
void debug_pdr (poly_dr_p, int);
void print_pdr (FILE *, poly_dr_p, int);
static inline scop_p pdr_scop (poly_dr_p pdr);
/* The dimension of the PDR_ACCESSES polyhedron of PDR. */
static inline ppl_dimension_type
pdr_dim (poly_dr_p pdr)
{
ppl_dimension_type dim;
ppl_Pointset_Powerset_C_Polyhedron_space_dimension (PDR_ACCESSES (pdr),
&dim);
return dim;
}
/* The dimension of the iteration domain of the scop of PDR. */
static inline ppl_dimension_type
pdr_dim_iter_domain (poly_dr_p pdr)
{
return pbb_dim_iter_domain (PDR_PBB (pdr));
}
/* The number of parameters of the scop of PDR. */
static inline ppl_dimension_type
pdr_nb_params (poly_dr_p pdr)
{
return scop_nb_params (pdr_scop (pdr));
}
/* The dimension of the alias set in PDR. */
static inline ppl_dimension_type
pdr_alias_set_dim (poly_dr_p pdr)
{
poly_bb_p pbb = PDR_PBB (pdr);
return pbb_dim_iter_domain (pbb) + pbb_nb_params (pbb);
}
/* The dimension in PDR containing subscript S. */
static inline ppl_dimension_type
pdr_subscript_dim (poly_dr_p pdr, graphite_dim_t s)
{
poly_bb_p pbb = PDR_PBB (pdr);
return pbb_dim_iter_domain (pbb) + pbb_nb_params (pbb) + 1 + s;
}
/* The dimension in PDR containing the loop iterator ITER. */
static inline ppl_dimension_type
pdr_iterator_dim (poly_dr_p pdr ATTRIBUTE_UNUSED, graphite_dim_t iter)
{
return iter;
}
/* The dimension in PDR containing parameter PARAM. */
static inline ppl_dimension_type
pdr_parameter_dim (poly_dr_p pdr, graphite_dim_t param)
{
poly_bb_p pbb = PDR_PBB (pdr);
return pbb_dim_iter_domain (pbb) + param;
}
/* Returns true when PDR is a "read". */
static inline bool
pdr_read_p (poly_dr_p pdr)
{
return PDR_TYPE (pdr) == PDR_READ;
}
/* Returns true when PDR is a "write". */
static inline bool
pdr_write_p (poly_dr_p pdr)
{
return PDR_TYPE (pdr) == PDR_WRITE;
}
/* Returns true when PDR is a "may write". */
static inline bool
pdr_may_write_p (poly_dr_p pdr)
{
return PDR_TYPE (pdr) == PDR_MAY_WRITE;
}
/* Return true when PDR1 and PDR2 are similar data accesses: they have
the same base array, and the same access functions. */
static inline bool
same_pdr_p (poly_dr_p pdr1, poly_dr_p pdr2)
{
return PDR_NB_SUBSCRIPTS (pdr1) == PDR_NB_SUBSCRIPTS (pdr2)
&& PDR_BASE_OBJECT_SET (pdr1) == PDR_BASE_OBJECT_SET (pdr2);
}
typedef struct poly_scattering *poly_scattering_p;
struct poly_scattering
{
/* The scattering function containing the transformations: the
layout of this polyhedron is: T|I|G with T the transform
scattering, I the iteration domain, G the context parameters. */
ppl_Polyhedron_t scattering;
/* The number of local variables. */
int nb_local_variables;
/* The number of scattering dimensions. */
int nb_scattering;
};
/* POLY_BB represents a blackbox in the polyhedral model. */
struct poly_bb
{
/* Pointer to a basic block or a statement in the compiler. */
void *black_box;
/* Pointer to the SCOP containing this PBB. */
scop_p scop;
/* The iteration domain of this bb. The layout of this polyhedron
is I|G with I the iteration domain, G the context parameters.
Example:
for (i = a - 7*b + 8; i <= 3*a + 13*b + 20; i++)
for (j = 2; j <= 2*i + 5; j++)
for (k = 0; k <= 5; k++)
S (i,j,k)
Loop iterators: i, j, k
Parameters: a, b
| i >= a - 7b + 8
| i <= 3a + 13b + 20
| j >= 2
| j <= 2i + 5
| k >= 0
| k <= 5
The number of variables in the DOMAIN may change and is not
related to the number of loops in the original code. */
ppl_Pointset_Powerset_C_Polyhedron_t domain;
/* The data references we access. */
VEC (poly_dr_p, heap) *drs;
/* The original scattering. */
poly_scattering_p original;
/* The transformed scattering. */
poly_scattering_p transformed;
/* A copy of the transformed scattering. */
poly_scattering_p saved;
/* True when the PDR duplicates have already been removed. */
bool pdr_duplicates_removed;
/* True when this PBB contains only a reduction statement. */
bool is_reduction;
};
#define PBB_BLACK_BOX(PBB) ((gimple_bb_p) PBB->black_box)
#define PBB_SCOP(PBB) (PBB->scop)
#define PBB_DOMAIN(PBB) (PBB->domain)
#define PBB_DRS(PBB) (PBB->drs)
#define PBB_ORIGINAL(PBB) (PBB->original)
#define PBB_ORIGINAL_SCATTERING(PBB) (PBB->original->scattering)
#define PBB_TRANSFORMED(PBB) (PBB->transformed)
#define PBB_TRANSFORMED_SCATTERING(PBB) (PBB->transformed->scattering)
#define PBB_SAVED(PBB) (PBB->saved)
#define PBB_NB_LOCAL_VARIABLES(PBB) (PBB->transformed->nb_local_variables)
#define PBB_NB_SCATTERING_TRANSFORM(PBB) (PBB->transformed->nb_scattering)
#define PBB_PDR_DUPLICATES_REMOVED(PBB) (PBB->pdr_duplicates_removed)
#define PBB_IS_REDUCTION(PBB) (PBB->is_reduction)
extern poly_bb_p new_poly_bb (scop_p, void *);
extern void free_poly_bb (poly_bb_p);
extern void debug_loop_vec (poly_bb_p);
extern void schedule_to_scattering (poly_bb_p, int);
extern void print_pbb_domain (FILE *, poly_bb_p, int);
extern void print_pbb (FILE *, poly_bb_p, int);
extern void print_scop_context (FILE *, scop_p, int);
extern void print_scop (FILE *, scop_p, int);
extern void print_cloog (FILE *, scop_p, int);
extern void debug_pbb_domain (poly_bb_p, int);
extern void debug_pbb (poly_bb_p, int);
extern void print_pdrs (FILE *, poly_bb_p, int);
extern void debug_pdrs (poly_bb_p, int);
extern void debug_scop_context (scop_p, int);
extern void debug_scop (scop_p, int);
extern void debug_cloog (scop_p, int);
extern void print_scop_params (FILE *, scop_p, int);
extern void debug_scop_params (scop_p, int);
extern void print_iteration_domain (FILE *, poly_bb_p, int);
extern void print_iteration_domains (FILE *, scop_p, int);
extern void debug_iteration_domain (poly_bb_p, int);
extern void debug_iteration_domains (scop_p, int);
extern int scop_do_interchange (scop_p);
extern int scop_do_strip_mine (scop_p, int);
extern bool scop_do_block (scop_p);
extern bool flatten_all_loops (scop_p);
extern void pbb_number_of_iterations_at_time (poly_bb_p, graphite_dim_t, mpz_t);
extern void pbb_remove_duplicate_pdrs (poly_bb_p);
/* Return the number of write data references in PBB. */
static inline int
number_of_write_pdrs (poly_bb_p pbb)
{
int res = 0;
int i;
poly_dr_p pdr;
for (i = 0; VEC_iterate (poly_dr_p, PBB_DRS (pbb), i, pdr); i++)
if (PDR_TYPE (pdr) == PDR_WRITE)
res++;
return res;
}
/* Returns a gimple_bb from BB. */
static inline gimple_bb_p
gbb_from_bb (basic_block bb)
{
return (gimple_bb_p) bb->aux;
}
/* The poly_bb of the BB. */
static inline poly_bb_p
pbb_from_bb (basic_block bb)
{
return GBB_PBB (gbb_from_bb (bb));
}
/* The basic block of the PBB. */
static inline basic_block
pbb_bb (poly_bb_p pbb)
{
return GBB_BB (PBB_BLACK_BOX (pbb));
}
/* The index of the PBB. */
static inline int
pbb_index (poly_bb_p pbb)
{
return pbb_bb (pbb)->index;
}
/* The loop of the PBB. */
static inline loop_p
pbb_loop (poly_bb_p pbb)
{
return gbb_loop (PBB_BLACK_BOX (pbb));
}
/* The scop that contains the PDR. */
static inline scop_p
pdr_scop (poly_dr_p pdr)
{
return PBB_SCOP (PDR_PBB (pdr));
}
/* Set black box of PBB to BLACKBOX. */
static inline void
pbb_set_black_box (poly_bb_p pbb, void *black_box)
{
pbb->black_box = black_box;
}
/* The number of loops around PBB: the dimension of the iteration
domain. */
static inline graphite_dim_t
pbb_dim_iter_domain (const struct poly_bb *pbb)
{
scop_p scop = PBB_SCOP (pbb);
ppl_dimension_type dim;
ppl_Pointset_Powerset_C_Polyhedron_space_dimension (PBB_DOMAIN (pbb), &dim);
return dim - scop_nb_params (scop);
}
/* The number of params defined in PBB. */
static inline graphite_dim_t
pbb_nb_params (const struct poly_bb *pbb)
{
scop_p scop = PBB_SCOP (pbb);
return scop_nb_params (scop);
}
/* The number of scattering dimensions in the SCATTERING polyhedron
of a PBB for a given SCOP. */
static inline graphite_dim_t
pbb_nb_scattering_orig (const struct poly_bb *pbb)
{
return 2 * pbb_dim_iter_domain (pbb) + 1;
}
/* The number of scattering dimensions in PBB. */
static inline graphite_dim_t
pbb_nb_scattering_transform (const struct poly_bb *pbb)
{
return PBB_NB_SCATTERING_TRANSFORM (pbb);
}
/* The number of dynamic scattering dimensions in PBB. */
static inline graphite_dim_t
pbb_nb_dynamic_scattering_transform (const struct poly_bb *pbb)
{
/* This function requires the 2d + 1 scattering format to be
invariant during all transformations. */
gcc_assert (PBB_NB_SCATTERING_TRANSFORM (pbb) % 2);
return PBB_NB_SCATTERING_TRANSFORM (pbb) / 2;
}
/* Returns the number of local variables used in the transformed
scattering polyhedron of PBB. */
static inline graphite_dim_t
pbb_nb_local_vars (const struct poly_bb *pbb)
{
/* For now we do not have any local variables, as we do not do strip
mining for example. */
return PBB_NB_LOCAL_VARIABLES (pbb);
}
/* The dimension in the domain of PBB containing the iterator ITER. */
static inline ppl_dimension_type
pbb_iterator_dim (poly_bb_p pbb ATTRIBUTE_UNUSED, graphite_dim_t iter)
{
return iter;
}
/* The dimension in the domain of PBB containing the iterator ITER. */
static inline ppl_dimension_type
pbb_parameter_dim (poly_bb_p pbb, graphite_dim_t param)
{
return param
+ pbb_dim_iter_domain (pbb);
}
/* The dimension in the original scattering polyhedron of PBB
containing the scattering iterator SCATTER. */
static inline ppl_dimension_type
psco_scattering_dim (poly_bb_p pbb ATTRIBUTE_UNUSED, graphite_dim_t scatter)
{
gcc_assert (scatter < pbb_nb_scattering_orig (pbb));
return scatter;
}
/* The dimension in the transformed scattering polyhedron of PBB
containing the scattering iterator SCATTER. */
static inline ppl_dimension_type
psct_scattering_dim (poly_bb_p pbb ATTRIBUTE_UNUSED, graphite_dim_t scatter)
{
gcc_assert (scatter <= pbb_nb_scattering_transform (pbb));
return scatter;
}
ppl_dimension_type psct_scattering_dim_for_loop_depth (poly_bb_p,
graphite_dim_t);
/* The dimension in the transformed scattering polyhedron of PBB of
the local variable LV. */
static inline ppl_dimension_type
psct_local_var_dim (poly_bb_p pbb, graphite_dim_t lv)
{
gcc_assert (lv <= pbb_nb_local_vars (pbb));
return lv + pbb_nb_scattering_transform (pbb);
}
/* The dimension in the original scattering polyhedron of PBB
containing the loop iterator ITER. */
static inline ppl_dimension_type
psco_iterator_dim (poly_bb_p pbb, graphite_dim_t iter)
{
gcc_assert (iter < pbb_dim_iter_domain (pbb));
return iter + pbb_nb_scattering_orig (pbb);
}
/* The dimension in the transformed scattering polyhedron of PBB
containing the loop iterator ITER. */
static inline ppl_dimension_type
psct_iterator_dim (poly_bb_p pbb, graphite_dim_t iter)
{
gcc_assert (iter < pbb_dim_iter_domain (pbb));
return iter
+ pbb_nb_scattering_transform (pbb)
+ pbb_nb_local_vars (pbb);
}
/* The dimension in the original scattering polyhedron of PBB
containing parameter PARAM. */
static inline ppl_dimension_type
psco_parameter_dim (poly_bb_p pbb, graphite_dim_t param)
{
gcc_assert (param < pbb_nb_params (pbb));
return param
+ pbb_nb_scattering_orig (pbb)
+ pbb_dim_iter_domain (pbb);
}
/* The dimension in the transformed scattering polyhedron of PBB
containing parameter PARAM. */
static inline ppl_dimension_type
psct_parameter_dim (poly_bb_p pbb, graphite_dim_t param)
{
gcc_assert (param < pbb_nb_params (pbb));
return param
+ pbb_nb_scattering_transform (pbb)
+ pbb_nb_local_vars (pbb)
+ pbb_dim_iter_domain (pbb);
}
/* The scattering dimension of PBB corresponding to the dynamic level
LEVEL. */
static inline ppl_dimension_type
psct_dynamic_dim (poly_bb_p pbb, graphite_dim_t level)
{
graphite_dim_t result = 1 + 2 * level;
gcc_assert (result < pbb_nb_scattering_transform (pbb));
return result;
}
/* The scattering dimension of PBB corresponding to the static
sequence of the loop level LEVEL. */
static inline ppl_dimension_type
psct_static_dim (poly_bb_p pbb, graphite_dim_t level)
{
graphite_dim_t result = 2 * level;
gcc_assert (result < pbb_nb_scattering_transform (pbb));
return result;
}
/* Adds to the transformed scattering polyhedron of PBB a new local
variable and returns its index. */
static inline graphite_dim_t
psct_add_local_variable (poly_bb_p pbb)
{
graphite_dim_t nlv = pbb_nb_local_vars (pbb);
ppl_dimension_type lv_column = psct_local_var_dim (pbb, nlv);
ppl_insert_dimensions (PBB_TRANSFORMED_SCATTERING (pbb), lv_column, 1);
PBB_NB_LOCAL_VARIABLES (pbb) += 1;
return nlv;
}
/* Adds a dimension to the transformed scattering polyhedron of PBB at
INDEX. */
static inline void
psct_add_scattering_dimension (poly_bb_p pbb, ppl_dimension_type index)
{
gcc_assert (index < pbb_nb_scattering_transform (pbb));
ppl_insert_dimensions (PBB_TRANSFORMED_SCATTERING (pbb), index, 1);
PBB_NB_SCATTERING_TRANSFORM (pbb) += 1;
}
typedef struct lst *lst_p;
DEF_VEC_P(lst_p);
DEF_VEC_ALLOC_P (lst_p, heap);
/* Loops and Statements Tree. */
struct lst {
/* LOOP_P is true when an LST node is a loop. */
bool loop_p;
/* A pointer to the loop that contains this node. */
lst_p loop_father;
/* The sum of all the memory strides for an LST loop. */
mpz_t memory_strides;
/* Loop nodes contain a sequence SEQ of LST nodes, statements
contain a pointer to their polyhedral representation PBB. */
union {
poly_bb_p pbb;
VEC (lst_p, heap) *seq;
} node;
};
#define LST_LOOP_P(LST) ((LST)->loop_p)
#define LST_LOOP_FATHER(LST) ((LST)->loop_father)
#define LST_PBB(LST) ((LST)->node.pbb)
#define LST_SEQ(LST) ((LST)->node.seq)
#define LST_LOOP_MEMORY_STRIDES(LST) ((LST)->memory_strides)
void scop_to_lst (scop_p);
void print_lst (FILE *, lst_p, int);
void debug_lst (lst_p);
void dot_lst (lst_p);
/* Creates a new LST loop with SEQ. */
static inline lst_p
new_lst_loop (VEC (lst_p, heap) *seq)
{
lst_p lst = XNEW (struct lst);
int i;
lst_p l;
LST_LOOP_P (lst) = true;
LST_SEQ (lst) = seq;
LST_LOOP_FATHER (lst) = NULL;
mpz_init (LST_LOOP_MEMORY_STRIDES (lst));
mpz_set_si (LST_LOOP_MEMORY_STRIDES (lst), -1);
for (i = 0; VEC_iterate (lst_p, seq, i, l); i++)
LST_LOOP_FATHER (l) = lst;
return lst;
}
/* Creates a new LST statement with PBB. */
static inline lst_p
new_lst_stmt (poly_bb_p pbb)
{
lst_p lst = XNEW (struct lst);
LST_LOOP_P (lst) = false;
LST_PBB (lst) = pbb;
LST_LOOP_FATHER (lst) = NULL;
return lst;
}
/* Frees the memory used by LST. */
static inline void
free_lst (lst_p lst)
{
if (!lst)
return;
if (LST_LOOP_P (lst))
{
int i;
lst_p l;
for (i = 0; VEC_iterate (lst_p, LST_SEQ (lst), i, l); i++)
free_lst (l);
mpz_clear (LST_LOOP_MEMORY_STRIDES (lst));
VEC_free (lst_p, heap, LST_SEQ (lst));
}
free (lst);
}
/* Returns a copy of LST. */
static inline lst_p
copy_lst (lst_p lst)
{
if (!lst)
return NULL;
if (LST_LOOP_P (lst))
{
int i;
lst_p l;
VEC (lst_p, heap) *seq = VEC_alloc (lst_p, heap, 5);
for (i = 0; VEC_iterate (lst_p, LST_SEQ (lst), i, l); i++)
VEC_safe_push (lst_p, heap, seq, copy_lst (l));
return new_lst_loop (seq);
}
return new_lst_stmt (LST_PBB (lst));
}
/* Adds a new loop under the loop LST. */
static inline void
lst_add_loop_under_loop (lst_p lst)
{
VEC (lst_p, heap) *seq = VEC_alloc (lst_p, heap, 1);
lst_p l = new_lst_loop (LST_SEQ (lst));
gcc_assert (LST_LOOP_P (lst));
LST_LOOP_FATHER (l) = lst;
VEC_quick_push (lst_p, seq, l);
LST_SEQ (lst) = seq;
}
/* Returns the loop depth of LST. */
static inline int
lst_depth (lst_p lst)
{
if (!lst)
return -2;
/* The depth of the outermost "fake" loop is -1. This outermost
loop does not have a loop father and it is just a container, as
in the loop representation of GCC. */
if (!LST_LOOP_FATHER (lst))
return -1;
return lst_depth (LST_LOOP_FATHER (lst)) + 1;
}
/* Returns the Dewey number for LST. */
static inline int
lst_dewey_number (lst_p lst)
{
int i;
lst_p l;
if (!lst)
return -1;
if (!LST_LOOP_FATHER (lst))
return 0;
FOR_EACH_VEC_ELT (lst_p, LST_SEQ (LST_LOOP_FATHER (lst)), i, l)
if (l == lst)
return i;
return -1;
}
/* Returns the Dewey number of LST at depth DEPTH. */
static inline int
lst_dewey_number_at_depth (lst_p lst, int depth)
{
gcc_assert (lst && depth >= 0 && lst_depth (lst) <= depth);
if (lst_depth (lst) == depth)
return lst_dewey_number (lst);
return lst_dewey_number_at_depth (LST_LOOP_FATHER (lst), depth);
}
/* Returns the predecessor of LST in the sequence of its loop father.
Returns NULL if LST is the first statement in the sequence. */
static inline lst_p
lst_pred (lst_p lst)
{
int dewey;
lst_p father;
if (!lst || !LST_LOOP_FATHER (lst))
return NULL;
dewey = lst_dewey_number (lst);
if (dewey == 0)
return NULL;
father = LST_LOOP_FATHER (lst);
return VEC_index (lst_p, LST_SEQ (father), dewey - 1);
}
/* Returns the successor of LST in the sequence of its loop father.
Returns NULL if there is none. */
static inline lst_p
lst_succ (lst_p lst)
{
int dewey;
lst_p father;
if (!lst || !LST_LOOP_FATHER (lst))
return NULL;
dewey = lst_dewey_number (lst);
father = LST_LOOP_FATHER (lst);
if (VEC_length (lst_p, LST_SEQ (father)) == (unsigned) dewey + 1)
return NULL;
return VEC_index (lst_p, LST_SEQ (father), dewey + 1);
}
/* Return the LST node corresponding to PBB. */
static inline lst_p
lst_find_pbb (lst_p lst, poly_bb_p pbb)
{
int i;
lst_p l;
if (!lst)
return NULL;
if (!LST_LOOP_P (lst))
return (pbb == LST_PBB (lst)) ? lst : NULL;
for (i = 0; VEC_iterate (lst_p, LST_SEQ (lst), i, l); i++)
{
lst_p res = lst_find_pbb (l, pbb);
if (res)
return res;
}
return NULL;
}
/* Return the LST node corresponding to the loop around STMT at depth
LOOP_DEPTH. */
static inline lst_p
find_lst_loop (lst_p stmt, int loop_depth)
{
lst_p loop = LST_LOOP_FATHER (stmt);
gcc_assert (loop_depth >= 0);
while (loop_depth < lst_depth (loop))
loop = LST_LOOP_FATHER (loop);
return loop;
}
/* Return the first LST representing a PBB statement in LST. */
static inline lst_p
lst_find_first_pbb (lst_p lst)
{
int i;
lst_p l;
if (!lst)
return NULL;
if (!LST_LOOP_P (lst))
return lst;
for (i = 0; VEC_iterate (lst_p, LST_SEQ (lst), i, l); i++)
{
lst_p res = lst_find_first_pbb (l);
if (res)
return res;
}
return NULL;
}
/* Returns true when LST is a loop that does not contain
statements. */
static inline bool
lst_empty_p (lst_p lst)
{
return !lst_find_first_pbb (lst);
}
/* Return the last LST representing a PBB statement in LST. */
static inline lst_p
lst_find_last_pbb (lst_p lst)
{
int i;
lst_p l, res = NULL;
if (!lst)
return NULL;
if (!LST_LOOP_P (lst))
return lst;
for (i = 0; VEC_iterate (lst_p, LST_SEQ (lst), i, l); i++)
{
lst_p last = lst_find_last_pbb (l);
if (last)
res = last;
}
gcc_assert (res);
return res;
}
/* Returns true if LOOP contains LST, in other words, if LST is nested
in LOOP. */
static inline bool
lst_contains_p (lst_p loop, lst_p lst)
{
if (!loop || !lst || !LST_LOOP_P (loop))
return false;
if (loop == lst)
return true;
return lst_contains_p (loop, LST_LOOP_FATHER (lst));
}
/* Returns true if LOOP contains PBB, in other words, if PBB is nested
in LOOP. */
static inline bool
lst_contains_pbb (lst_p loop, poly_bb_p pbb)
{
return lst_find_pbb (loop, pbb) ? true : false;
}
/* Creates a loop nest of depth NB_LOOPS containing LST. */
static inline lst_p
lst_create_nest (int nb_loops, lst_p lst)
{
lst_p res, loop;
VEC (lst_p, heap) *seq;
if (nb_loops == 0)
return lst;
seq = VEC_alloc (lst_p, heap, 1);
loop = lst_create_nest (nb_loops - 1, lst);
VEC_quick_push (lst_p, seq, loop);
res = new_lst_loop (seq);
LST_LOOP_FATHER (loop) = res;
return res;
}
/* Removes LST from the sequence of statements of its loop father. */
static inline void
lst_remove_from_sequence (lst_p lst)
{
lst_p father = LST_LOOP_FATHER (lst);
int dewey = lst_dewey_number (lst);
gcc_assert (lst && father && dewey >= 0);
VEC_ordered_remove (lst_p, LST_SEQ (father), dewey);
LST_LOOP_FATHER (lst) = NULL;
}
/* Removes the loop LST and inline its body in the father loop. */
static inline void
lst_remove_loop_and_inline_stmts_in_loop_father (lst_p lst)
{
lst_p l, father = LST_LOOP_FATHER (lst);
int i, dewey = lst_dewey_number (lst);
gcc_assert (lst && father && dewey >= 0);
VEC_ordered_remove (lst_p, LST_SEQ (father), dewey);
LST_LOOP_FATHER (lst) = NULL;
FOR_EACH_VEC_ELT (lst_p, LST_SEQ (lst), i, l)
{
VEC_safe_insert (lst_p, heap, LST_SEQ (father), dewey + i, l);
LST_LOOP_FATHER (l) = father;
}
}
/* Sets NITER to the upper bound approximation of the number of
iterations of loop LST. */
static inline void
lst_niter_for_loop (lst_p lst, mpz_t niter)
{
int depth = lst_depth (lst);
poly_bb_p pbb = LST_PBB (lst_find_first_pbb (lst));
gcc_assert (LST_LOOP_P (lst));
pbb_number_of_iterations_at_time (pbb, psct_dynamic_dim (pbb, depth), niter);
}
/* Updates the scattering of PBB to be at the DEWEY number in the loop
at depth LEVEL. */
static inline void
pbb_update_scattering (poly_bb_p pbb, graphite_dim_t level, int dewey)
{
ppl_Polyhedron_t ph = PBB_TRANSFORMED_SCATTERING (pbb);
ppl_dimension_type sched = psct_static_dim (pbb, level);
ppl_dimension_type ds[1];
ppl_Constraint_t new_cstr;
ppl_Linear_Expression_t expr;
ppl_dimension_type dim;
ppl_Polyhedron_space_dimension (ph, &dim);
ds[0] = sched;
ppl_Polyhedron_remove_space_dimensions (ph, ds, 1);
ppl_insert_dimensions (ph, sched, 1);
ppl_new_Linear_Expression_with_dimension (&expr, dim);
ppl_set_coef (expr, sched, -1);
ppl_set_inhomogeneous (expr, dewey);
ppl_new_Constraint (&new_cstr, expr, PPL_CONSTRAINT_TYPE_EQUAL);
ppl_delete_Linear_Expression (expr);
ppl_Polyhedron_add_constraint (ph, new_cstr);
ppl_delete_Constraint (new_cstr);
}
/* Updates the scattering of all the PBBs under LST to be at the DEWEY
number in the loop at depth LEVEL. */
static inline void
lst_update_scattering_under (lst_p lst, int level, int dewey)
{
int i;
lst_p l;
gcc_assert (lst && level >= 0 && dewey >= 0);
if (LST_LOOP_P (lst))
for (i = 0; VEC_iterate (lst_p, LST_SEQ (lst), i, l); i++)
lst_update_scattering_under (l, level, dewey);
else
pbb_update_scattering (LST_PBB (lst), level, dewey);
}
/* Updates the all the scattering levels of all the PBBs under
LST. */
static inline void
lst_update_scattering (lst_p lst)
{
int i;
lst_p l;
if (!lst)
return;
if (LST_LOOP_FATHER (lst))
{
lst_p father = LST_LOOP_FATHER (lst);
int dewey = lst_dewey_number (lst);
int level = lst_depth (lst);
gcc_assert (lst && father && dewey >= 0 && level >= 0);
for (i = dewey; VEC_iterate (lst_p, LST_SEQ (father), i, l); i++)
lst_update_scattering_under (l, level, i);
}
if (LST_LOOP_P (lst))
for (i = 0; VEC_iterate (lst_p, LST_SEQ (lst), i, l); i++)
lst_update_scattering (l);
}
/* Inserts LST1 before LST2 if BEFORE is true; inserts LST1 after LST2
if BEFORE is false. */
static inline void
lst_insert_in_sequence (lst_p lst1, lst_p lst2, bool before)
{
lst_p father;
int dewey;
/* Do not insert empty loops. */
if (!lst1 || lst_empty_p (lst1))
return;
father = LST_LOOP_FATHER (lst2);
dewey = lst_dewey_number (lst2);
gcc_assert (lst2 && father && dewey >= 0);
VEC_safe_insert (lst_p, heap, LST_SEQ (father), before ? dewey : dewey + 1,
lst1);
LST_LOOP_FATHER (lst1) = father;
}
/* Replaces LST1 with LST2. */
static inline void
lst_replace (lst_p lst1, lst_p lst2)
{
lst_p father;
int dewey;
if (!lst2 || lst_empty_p (lst2))
return;
father = LST_LOOP_FATHER (lst1);
dewey = lst_dewey_number (lst1);
LST_LOOP_FATHER (lst2) = father;
VEC_replace (lst_p, LST_SEQ (father), dewey, lst2);
}
/* Returns a copy of ROOT where LST has been replaced by a copy of the
LSTs A B C in this sequence. */
static inline lst_p
lst_substitute_3 (lst_p root, lst_p lst, lst_p a, lst_p b, lst_p c)
{
int i;
lst_p l;
VEC (lst_p, heap) *seq;
if (!root)
return NULL;
gcc_assert (lst && root != lst);
if (!LST_LOOP_P (root))
return new_lst_stmt (LST_PBB (root));
seq = VEC_alloc (lst_p, heap, 5);
for (i = 0; VEC_iterate (lst_p, LST_SEQ (root), i, l); i++)
if (l != lst)
VEC_safe_push (lst_p, heap, seq, lst_substitute_3 (l, lst, a, b, c));
else
{
if (!lst_empty_p (a))
VEC_safe_push (lst_p, heap, seq, copy_lst (a));
if (!lst_empty_p (b))
VEC_safe_push (lst_p, heap, seq, copy_lst (b));
if (!lst_empty_p (c))
VEC_safe_push (lst_p, heap, seq, copy_lst (c));
}
return new_lst_loop (seq);
}
/* Moves LST before LOOP if BEFORE is true, and after the LOOP if
BEFORE is false. */
static inline void
lst_distribute_lst (lst_p loop, lst_p lst, bool before)
{
int loop_depth = lst_depth (loop);
int depth = lst_depth (lst);
int nb_loops = depth - loop_depth;
gcc_assert (lst && loop && LST_LOOP_P (loop) && nb_loops > 0);
lst_remove_from_sequence (lst);
lst_insert_in_sequence (lst_create_nest (nb_loops, lst), loop, before);
}
/* Removes from LOOP all the statements before/after and including PBB
if BEFORE is true/false. Returns the negation of BEFORE when the
statement PBB has been found. */
static inline bool
lst_remove_all_before_including_pbb (lst_p loop, poly_bb_p pbb, bool before)
{
int i;
lst_p l;
if (!loop || !LST_LOOP_P (loop))
return before;
for (i = 0; VEC_iterate (lst_p, LST_SEQ (loop), i, l);)
if (LST_LOOP_P (l))
{
before = lst_remove_all_before_including_pbb (l, pbb, before);
if (VEC_length (lst_p, LST_SEQ (l)) == 0)
{
VEC_ordered_remove (lst_p, LST_SEQ (loop), i);
free_lst (l);
}
else
i++;
}
else
{
if (before)
{
if (LST_PBB (l) == pbb)
before = false;
VEC_ordered_remove (lst_p, LST_SEQ (loop), i);
free_lst (l);
}
else if (LST_PBB (l) == pbb)
{
before = true;
VEC_ordered_remove (lst_p, LST_SEQ (loop), i);
free_lst (l);
}
else
i++;
}
return before;
}
/* Removes from LOOP all the statements before/after and excluding PBB
if BEFORE is true/false; Returns the negation of BEFORE when the
statement PBB has been found. */
static inline bool
lst_remove_all_before_excluding_pbb (lst_p loop, poly_bb_p pbb, bool before)
{
int i;
lst_p l;
if (!loop || !LST_LOOP_P (loop))
return before;
for (i = 0; VEC_iterate (lst_p, LST_SEQ (loop), i, l);)
if (LST_LOOP_P (l))
{
before = lst_remove_all_before_excluding_pbb (l, pbb, before);
if (VEC_length (lst_p, LST_SEQ (l)) == 0)
{
VEC_ordered_remove (lst_p, LST_SEQ (loop), i);
free_lst (l);
continue;
}
i++;
}
else
{
if (before && LST_PBB (l) != pbb)
{
VEC_ordered_remove (lst_p, LST_SEQ (loop), i);
free_lst (l);
continue;
}
i++;
if (LST_PBB (l) == pbb)
before = before ? false : true;
}
return before;
}
/* A SCOP is a Static Control Part of the program, simple enough to be
represented in polyhedral form. */
struct scop
{
/* A SCOP is defined as a SESE region. */
void *region;
/* Number of parameters in SCoP. */
graphite_dim_t nb_params;
/* All the basic blocks in this scop that contain memory references
and that will be represented as statements in the polyhedral
representation. */
VEC (poly_bb_p, heap) *bbs;
/* Original, transformed and saved schedules. */
lst_p original_schedule, transformed_schedule, saved_schedule;
/* The context describes known restrictions concerning the parameters
and relations in between the parameters.
void f (int8_t a, uint_16_t b) {
c = 2 a + b;
...
}
Here we can add these restrictions to the context:
-128 >= a >= 127
0 >= b >= 65,535
c = 2a + b */
ppl_Pointset_Powerset_C_Polyhedron_t context;
/* A hashtable of the data dependence relations for the original
scattering. */
htab_t original_pddrs;
/* True when the scop has been converted to its polyhedral
representation. */
bool poly_scop_p;
};
#define SCOP_BBS(S) (S->bbs)
#define SCOP_REGION(S) ((sese) S->region)
#define SCOP_CONTEXT(S) (S->context)
#define SCOP_ORIGINAL_PDDRS(S) (S->original_pddrs)
#define SCOP_ORIGINAL_SCHEDULE(S) (S->original_schedule)
#define SCOP_TRANSFORMED_SCHEDULE(S) (S->transformed_schedule)
#define SCOP_SAVED_SCHEDULE(S) (S->saved_schedule)
#define POLY_SCOP_P(S) (S->poly_scop_p)
extern scop_p new_scop (void *);
extern void free_scop (scop_p);
extern void free_scops (VEC (scop_p, heap) *);
extern void print_generated_program (FILE *, scop_p);
extern void debug_generated_program (scop_p);
extern void print_scattering_function (FILE *, poly_bb_p, int);
extern void print_scattering_functions (FILE *, scop_p, int);
extern void debug_scattering_function (poly_bb_p, int);
extern void debug_scattering_functions (scop_p, int);
extern int scop_max_loop_depth (scop_p);
extern int unify_scattering_dimensions (scop_p);
extern bool apply_poly_transforms (scop_p);
extern bool graphite_legal_transform (scop_p);
extern void cloog_checksum (scop_p);
/* Set the region of SCOP to REGION. */
static inline void
scop_set_region (scop_p scop, void *region)
{
scop->region = region;
}
/* Returns the number of parameters for SCOP. */
static inline graphite_dim_t
scop_nb_params (scop_p scop)
{
return scop->nb_params;
}
/* Set the number of params of SCOP to NB_PARAMS. */
static inline void
scop_set_nb_params (scop_p scop, graphite_dim_t nb_params)
{
scop->nb_params = nb_params;
}
/* Allocates a new empty poly_scattering structure. */
static inline poly_scattering_p
poly_scattering_new (void)
{
poly_scattering_p res = XNEW (struct poly_scattering);
res->scattering = NULL;
res->nb_local_variables = 0;
res->nb_scattering = 0;
return res;
}
/* Free a poly_scattering structure. */
static inline void
poly_scattering_free (poly_scattering_p s)
{
ppl_delete_Polyhedron (s->scattering);
free (s);
}
/* Copies S and return a new scattering. */
static inline poly_scattering_p
poly_scattering_copy (poly_scattering_p s)
{
poly_scattering_p res = poly_scattering_new ();
ppl_new_C_Polyhedron_from_C_Polyhedron (&(res->scattering), s->scattering);
res->nb_local_variables = s->nb_local_variables;
res->nb_scattering = s->nb_scattering;
return res;
}
/* Saves the transformed scattering of PBB. */
static inline void
store_scattering_pbb (poly_bb_p pbb)
{
gcc_assert (PBB_TRANSFORMED (pbb));
if (PBB_SAVED (pbb))
poly_scattering_free (PBB_SAVED (pbb));
PBB_SAVED (pbb) = poly_scattering_copy (PBB_TRANSFORMED (pbb));
}
/* Stores the SCOP_TRANSFORMED_SCHEDULE to SCOP_SAVED_SCHEDULE. */
static inline void
store_lst_schedule (scop_p scop)
{
if (SCOP_SAVED_SCHEDULE (scop))
free_lst (SCOP_SAVED_SCHEDULE (scop));
SCOP_SAVED_SCHEDULE (scop) = copy_lst (SCOP_TRANSFORMED_SCHEDULE (scop));
}
/* Restores the SCOP_TRANSFORMED_SCHEDULE from SCOP_SAVED_SCHEDULE. */
static inline void
restore_lst_schedule (scop_p scop)
{
if (SCOP_TRANSFORMED_SCHEDULE (scop))
free_lst (SCOP_TRANSFORMED_SCHEDULE (scop));
SCOP_TRANSFORMED_SCHEDULE (scop) = copy_lst (SCOP_SAVED_SCHEDULE (scop));
}
/* Saves the scattering for all the pbbs in the SCOP. */
static inline void
store_scattering (scop_p scop)
{
int i;
poly_bb_p pbb;
for (i = 0; VEC_iterate (poly_bb_p, SCOP_BBS (scop), i, pbb); i++)
store_scattering_pbb (pbb);
store_lst_schedule (scop);
}
/* Restores the scattering of PBB. */
static inline void
restore_scattering_pbb (poly_bb_p pbb)
{
gcc_assert (PBB_SAVED (pbb));
poly_scattering_free (PBB_TRANSFORMED (pbb));
PBB_TRANSFORMED (pbb) = poly_scattering_copy (PBB_SAVED (pbb));
}
/* Restores the scattering for all the pbbs in the SCOP. */
static inline void
restore_scattering (scop_p scop)
{
int i;
poly_bb_p pbb;
for (i = 0; VEC_iterate (poly_bb_p, SCOP_BBS (scop), i, pbb); i++)
restore_scattering_pbb (pbb);
restore_lst_schedule (scop);
}
/* For a given PBB, add to RES the scop context, the iteration domain,
the original scattering when ORIGINAL_P is true, otherwise add the
transformed scattering. */
static inline void
combine_context_id_scat (ppl_Pointset_Powerset_C_Polyhedron_t *res,
poly_bb_p pbb, bool original_p)
{
ppl_Pointset_Powerset_C_Polyhedron_t context;
ppl_Pointset_Powerset_C_Polyhedron_t id;
ppl_new_Pointset_Powerset_C_Polyhedron_from_C_Polyhedron
(res, original_p ?
PBB_ORIGINAL_SCATTERING (pbb) : PBB_TRANSFORMED_SCATTERING (pbb));
ppl_new_Pointset_Powerset_C_Polyhedron_from_Pointset_Powerset_C_Polyhedron
(&context, SCOP_CONTEXT (PBB_SCOP (pbb)));
ppl_new_Pointset_Powerset_C_Polyhedron_from_Pointset_Powerset_C_Polyhedron
(&id, PBB_DOMAIN (pbb));
/* Extend the context and the iteration domain to the dimension of
the scattering: T|I|G. */
{
ppl_dimension_type gdim, tdim, idim;
ppl_Pointset_Powerset_C_Polyhedron_space_dimension (*res, &tdim);
ppl_Pointset_Powerset_C_Polyhedron_space_dimension (context, &gdim);
ppl_Pointset_Powerset_C_Polyhedron_space_dimension (id, &idim);
if (tdim > gdim)
ppl_insert_dimensions_pointset (context, 0, tdim - gdim);
if (tdim > idim)
ppl_insert_dimensions_pointset (id, 0, tdim - idim);
}
/* Add the context and the iteration domain to the result. */
ppl_Pointset_Powerset_C_Polyhedron_intersection_assign (*res, context);
ppl_Pointset_Powerset_C_Polyhedron_intersection_assign (*res, id);
ppl_delete_Pointset_Powerset_C_Polyhedron (context);
ppl_delete_Pointset_Powerset_C_Polyhedron (id);
}
#endif
| {
"language": "C"
} |
/*****************************************************************************
Copyright (c) 2016, 2017, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License, version 2.0, as published by the
Free Software Foundation.
This program is also distributed with certain software (including but not
limited to OpenSSL) that is licensed under separate terms, as designated in a
particular file or component or in included license documentation. The authors
of MySQL hereby grant you an additional permission to link the program and
your derivative works with the separately licensed software that they have
included with MySQL.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License, version 2.0,
for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*****************************************************************************/
#ifndef _buf0types_h_
#define _buf0types_h_
#include "lot0types.h"
typedef byte buf_frame_t;
#endif // _buf0types_h_
| {
"language": "C"
} |
/*
* Copyright © 2010 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <string.h>
#include "main/compiler.h"
#include "ir.h"
#include "glsl_types.h"
#include "program/hash_table.h"
ir_rvalue *
ir_rvalue::clone(void *mem_ctx, struct hash_table *ht) const
{
/* The only possible instantiation is the generic error value. */
return error_value(mem_ctx);
}
/**
* Duplicate an IR variable
*
* \note
* This will probably be made \c virtual and moved to the base class
* eventually.
*/
ir_variable *
ir_variable::clone(void *mem_ctx, struct hash_table *ht) const
{
ir_variable *var = new(mem_ctx) ir_variable(this->type, this->name,
(ir_variable_mode) this->mode);
var->max_array_access = this->max_array_access;
var->read_only = this->read_only;
var->centroid = this->centroid;
var->invariant = this->invariant;
var->interpolation = this->interpolation;
var->location = this->location;
var->index = this->index;
var->uniform_block = this->uniform_block;
var->warn_extension = this->warn_extension;
var->origin_upper_left = this->origin_upper_left;
var->pixel_center_integer = this->pixel_center_integer;
var->explicit_location = this->explicit_location;
var->explicit_index = this->explicit_index;
var->has_initializer = this->has_initializer;
var->depth_layout = this->depth_layout;
var->num_state_slots = this->num_state_slots;
if (this->state_slots) {
/* FINISHME: This really wants to use something like talloc_reference, but
* FINISHME: ralloc doesn't have any similar function.
*/
var->state_slots = ralloc_array(var, ir_state_slot,
this->num_state_slots);
memcpy(var->state_slots, this->state_slots,
sizeof(this->state_slots[0]) * var->num_state_slots);
}
if (this->constant_value)
var->constant_value = this->constant_value->clone(mem_ctx, ht);
if (this->constant_initializer)
var->constant_initializer =
this->constant_initializer->clone(mem_ctx, ht);
if (ht) {
hash_table_insert(ht, var, (void *)const_cast<ir_variable *>(this));
}
return var;
}
ir_swizzle *
ir_swizzle::clone(void *mem_ctx, struct hash_table *ht) const
{
return new(mem_ctx) ir_swizzle(this->val->clone(mem_ctx, ht), this->mask);
}
ir_return *
ir_return::clone(void *mem_ctx, struct hash_table *ht) const
{
ir_rvalue *new_value = NULL;
if (this->value)
new_value = this->value->clone(mem_ctx, ht);
return new(mem_ctx) ir_return(new_value);
}
ir_discard *
ir_discard::clone(void *mem_ctx, struct hash_table *ht) const
{
ir_rvalue *new_condition = NULL;
if (this->condition != NULL)
new_condition = this->condition->clone(mem_ctx, ht);
return new(mem_ctx) ir_discard(new_condition);
}
ir_loop_jump *
ir_loop_jump::clone(void *mem_ctx, struct hash_table *ht) const
{
(void)ht;
return new(mem_ctx) ir_loop_jump(this->mode);
}
ir_if *
ir_if::clone(void *mem_ctx, struct hash_table *ht) const
{
ir_if *new_if = new(mem_ctx) ir_if(this->condition->clone(mem_ctx, ht));
foreach_iter(exec_list_iterator, iter, this->then_instructions) {
ir_instruction *ir = (ir_instruction *)iter.get();
new_if->then_instructions.push_tail(ir->clone(mem_ctx, ht));
}
foreach_iter(exec_list_iterator, iter, this->else_instructions) {
ir_instruction *ir = (ir_instruction *)iter.get();
new_if->else_instructions.push_tail(ir->clone(mem_ctx, ht));
}
return new_if;
}
ir_loop *
ir_loop::clone(void *mem_ctx, struct hash_table *ht) const
{
ir_loop *new_loop = new(mem_ctx) ir_loop();
if (this->from)
new_loop->from = this->from->clone(mem_ctx, ht);
if (this->to)
new_loop->to = this->to->clone(mem_ctx, ht);
if (this->increment)
new_loop->increment = this->increment->clone(mem_ctx, ht);
new_loop->counter = counter;
foreach_iter(exec_list_iterator, iter, this->body_instructions) {
ir_instruction *ir = (ir_instruction *)iter.get();
new_loop->body_instructions.push_tail(ir->clone(mem_ctx, ht));
}
new_loop->cmp = this->cmp;
return new_loop;
}
ir_call *
ir_call::clone(void *mem_ctx, struct hash_table *ht) const
{
ir_dereference_variable *new_return_ref = NULL;
if (this->return_deref != NULL)
new_return_ref = this->return_deref->clone(mem_ctx, ht);
exec_list new_parameters;
foreach_iter(exec_list_iterator, iter, this->actual_parameters) {
ir_instruction *ir = (ir_instruction *)iter.get();
new_parameters.push_tail(ir->clone(mem_ctx, ht));
}
return new(mem_ctx) ir_call(this->callee, new_return_ref, &new_parameters);
}
ir_expression *
ir_expression::clone(void *mem_ctx, struct hash_table *ht) const
{
ir_rvalue *op[Elements(this->operands)] = { NULL, };
unsigned int i;
for (i = 0; i < get_num_operands(); i++) {
op[i] = this->operands[i]->clone(mem_ctx, ht);
}
return new(mem_ctx) ir_expression(this->operation, this->type,
op[0], op[1], op[2], op[3]);
}
ir_dereference_variable *
ir_dereference_variable::clone(void *mem_ctx, struct hash_table *ht) const
{
ir_variable *new_var;
if (ht) {
new_var = (ir_variable *)hash_table_find(ht, this->var);
if (!new_var)
new_var = this->var;
} else {
new_var = this->var;
}
return new(mem_ctx) ir_dereference_variable(new_var);
}
ir_dereference_array *
ir_dereference_array::clone(void *mem_ctx, struct hash_table *ht) const
{
return new(mem_ctx) ir_dereference_array(this->array->clone(mem_ctx, ht),
this->array_index->clone(mem_ctx,
ht));
}
ir_dereference_record *
ir_dereference_record::clone(void *mem_ctx, struct hash_table *ht) const
{
return new(mem_ctx) ir_dereference_record(this->record->clone(mem_ctx, ht),
this->field);
}
ir_texture *
ir_texture::clone(void *mem_ctx, struct hash_table *ht) const
{
ir_texture *new_tex = new(mem_ctx) ir_texture(this->op);
new_tex->type = this->type;
new_tex->sampler = this->sampler->clone(mem_ctx, ht);
if (this->coordinate)
new_tex->coordinate = this->coordinate->clone(mem_ctx, ht);
if (this->projector)
new_tex->projector = this->projector->clone(mem_ctx, ht);
if (this->shadow_comparitor) {
new_tex->shadow_comparitor = this->shadow_comparitor->clone(mem_ctx, ht);
}
if (this->offset != NULL)
new_tex->offset = this->offset->clone(mem_ctx, ht);
switch (this->op) {
case ir_tex:
break;
case ir_txb:
new_tex->lod_info.bias = this->lod_info.bias->clone(mem_ctx, ht);
break;
case ir_txl:
case ir_txf:
case ir_txs:
new_tex->lod_info.lod = this->lod_info.lod->clone(mem_ctx, ht);
break;
case ir_txd:
new_tex->lod_info.grad.dPdx = this->lod_info.grad.dPdx->clone(mem_ctx, ht);
new_tex->lod_info.grad.dPdy = this->lod_info.grad.dPdy->clone(mem_ctx, ht);
break;
}
return new_tex;
}
ir_assignment *
ir_assignment::clone(void *mem_ctx, struct hash_table *ht) const
{
ir_rvalue *new_condition = NULL;
if (this->condition)
new_condition = this->condition->clone(mem_ctx, ht);
return new(mem_ctx) ir_assignment(this->lhs->clone(mem_ctx, ht),
this->rhs->clone(mem_ctx, ht),
new_condition,
this->write_mask);
}
ir_function *
ir_function::clone(void *mem_ctx, struct hash_table *ht) const
{
ir_function *copy = new(mem_ctx) ir_function(this->name);
foreach_list_const(node, &this->signatures) {
const ir_function_signature *const sig =
(const ir_function_signature *const) node;
ir_function_signature *sig_copy = sig->clone(mem_ctx, ht);
copy->add_signature(sig_copy);
if (ht != NULL)
hash_table_insert(ht, sig_copy,
(void *)const_cast<ir_function_signature *>(sig));
}
return copy;
}
ir_function_signature *
ir_function_signature::clone(void *mem_ctx, struct hash_table *ht) const
{
ir_function_signature *copy = this->clone_prototype(mem_ctx, ht);
copy->is_defined = this->is_defined;
/* Clone the instruction list.
*/
foreach_list_const(node, &this->body) {
const ir_instruction *const inst = (const ir_instruction *) node;
ir_instruction *const inst_copy = inst->clone(mem_ctx, ht);
copy->body.push_tail(inst_copy);
}
return copy;
}
ir_function_signature *
ir_function_signature::clone_prototype(void *mem_ctx, struct hash_table *ht) const
{
ir_function_signature *copy =
new(mem_ctx) ir_function_signature(this->return_type);
copy->is_defined = false;
copy->is_builtin = this->is_builtin;
copy->origin = this;
/* Clone the parameter list, but NOT the body.
*/
foreach_list_const(node, &this->parameters) {
const ir_variable *const param = (const ir_variable *) node;
assert(const_cast<ir_variable *>(param)->as_variable() != NULL);
ir_variable *const param_copy = param->clone(mem_ctx, ht);
copy->parameters.push_tail(param_copy);
}
return copy;
}
ir_constant *
ir_constant::clone(void *mem_ctx, struct hash_table *ht) const
{
(void)ht;
switch (this->type->base_type) {
case GLSL_TYPE_UINT:
case GLSL_TYPE_INT:
case GLSL_TYPE_FLOAT:
case GLSL_TYPE_BOOL:
return new(mem_ctx) ir_constant(this->type, &this->value);
case GLSL_TYPE_STRUCT: {
ir_constant *c = new(mem_ctx) ir_constant;
c->type = this->type;
for (exec_node *node = this->components.head
; !node->is_tail_sentinel()
; node = node->next) {
ir_constant *const orig = (ir_constant *) node;
c->components.push_tail(orig->clone(mem_ctx, NULL));
}
return c;
}
case GLSL_TYPE_ARRAY: {
ir_constant *c = new(mem_ctx) ir_constant;
c->type = this->type;
c->array_elements = ralloc_array(c, ir_constant *, this->type->length);
for (unsigned i = 0; i < this->type->length; i++) {
c->array_elements[i] = this->array_elements[i]->clone(mem_ctx, NULL);
}
return c;
}
default:
assert(!"Should not get here.");
return NULL;
}
}
class fixup_ir_call_visitor : public ir_hierarchical_visitor {
public:
fixup_ir_call_visitor(struct hash_table *ht)
{
this->ht = ht;
}
virtual ir_visitor_status visit_enter(ir_call *ir)
{
/* Try to find the function signature referenced by the ir_call in the
* table. If it is found, replace it with the value from the table.
*/
ir_function_signature *sig =
(ir_function_signature *) hash_table_find(this->ht, ir->callee);
if (sig != NULL)
ir->callee = sig;
/* Since this may be used before function call parameters are flattened,
* the children also need to be processed.
*/
return visit_continue;
}
private:
struct hash_table *ht;
};
static void
fixup_function_calls(struct hash_table *ht, exec_list *instructions)
{
fixup_ir_call_visitor v(ht);
v.run(instructions);
}
void
clone_ir_list(void *mem_ctx, exec_list *out, const exec_list *in)
{
struct hash_table *ht =
hash_table_ctor(0, hash_table_pointer_hash, hash_table_pointer_compare);
foreach_list_const(node, in) {
const ir_instruction *const original = (ir_instruction *) node;
ir_instruction *copy = original->clone(mem_ctx, ht);
out->push_tail(copy);
}
/* Make a pass over the cloned tree to fix up ir_call nodes to point to the
* cloned ir_function_signature nodes. This cannot be done automatically
* during cloning because the ir_call might be a forward reference (i.e.,
* the function signature that it references may not have been cloned yet).
*/
fixup_function_calls(ht, out);
hash_table_dtor(ht);
}
| {
"language": "C"
} |
/*
** $Id: llimits.h,v 1.52 2003/02/20 19:33:23 roberto Exp $
** Limits, basic types, and some other `installation-dependent' definitions
** See Copyright Notice in lua.h
*/
#ifndef llimits_h
#define llimits_h
#include <limits.h>
#include <stddef.h>
#include "lua.h"
/*
** try to find number of bits in an integer
*/
#ifndef BITS_INT
/* avoid overflows in comparison */
#if INT_MAX-20 < 32760
#define BITS_INT 16
#else
#if INT_MAX > 2147483640L
/* machine has at least 32 bits */
#define BITS_INT 32
#else
#error "you must define BITS_INT with number of bits in an integer"
#endif
#endif
#endif
/*
** the following types define integer types for values that may not
** fit in a `small int' (16 bits), but may waste space in a
** `large long' (64 bits). The current definitions should work in
** any machine, but may not be optimal.
*/
/* an unsigned integer to hold hash values */
typedef unsigned int lu_hash;
/* its signed equivalent */
typedef int ls_hash;
/* an unsigned integer big enough to count the total memory used by Lua; */
/* it should be at least as large as size_t */
typedef unsigned long lu_mem;
#define MAX_LUMEM ULONG_MAX
/* an integer big enough to count the number of strings in use */
typedef long ls_nstr;
/* chars used as small naturals (so that `char' is reserved for characters) */
typedef unsigned char lu_byte;
#define MAX_SIZET ((size_t)(~(size_t)0)-2)
#define MAX_INT (INT_MAX-2) /* maximum value of an int (-2 for safety) */
/*
** conversion of pointer to integer
** this is for hashing only; there is no problem if the integer
** cannot hold the whole pointer value
*/
#define IntPoint(p) ((lu_hash)(p))
/* type to ensure maximum alignment */
#ifndef LUSER_ALIGNMENT_T
typedef union { double u; void *s; long l; } L_Umaxalign;
#else
typedef LUSER_ALIGNMENT_T L_Umaxalign;
#endif
/* result of `usual argument conversion' over lua_Number */
#ifndef LUA_UACNUMBER
typedef double l_uacNumber;
#else
typedef LUA_UACNUMBER l_uacNumber;
#endif
#ifndef lua_assert
#define lua_assert(c) /* empty */
#endif
#ifndef check_exp
#define check_exp(c,e) (e)
#endif
#ifndef UNUSED
#define UNUSED(x) ((void)(x)) /* to avoid warnings */
#endif
#ifndef cast
#define cast(t, exp) ((t)(exp))
#endif
/*
** type for virtual-machine instructions
** must be an unsigned with (at least) 4 bytes (see details in lopcodes.h)
*/
typedef unsigned long Instruction;
/* maximum depth for calls (unsigned short) */
#ifndef LUA_MAXCALLS
#define LUA_MAXCALLS 4096
#endif
/*
** maximum depth for C calls (unsigned short): Not too big, or may
** overflow the C stack...
*/
#ifndef LUA_MAXCCALLS
#define LUA_MAXCCALLS 200
#endif
/* maximum size for the C stack */
#ifndef LUA_MAXCSTACK
#define LUA_MAXCSTACK 2048
#endif
/* maximum stack for a Lua function */
#define MAXSTACK 250
/* maximum number of variables declared in a function */
#ifndef MAXVARS
#define MAXVARS 200 /* arbitrary limit (<MAXSTACK) */
#endif
/* maximum number of upvalues per function */
#ifndef MAXUPVALUES
#define MAXUPVALUES 32
#endif
/* maximum number of parameters in a function */
#ifndef MAXPARAMS
#define MAXPARAMS 100 /* arbitrary limit (<MAXLOCALS) */
#endif
/* minimum size for the string table (must be power of 2) */
#ifndef MINSTRTABSIZE
#define MINSTRTABSIZE 32
#endif
/* minimum size for string buffer */
#ifndef LUA_MINBUFFER
#define LUA_MINBUFFER 32
#endif
/*
** maximum number of syntactical nested non-terminals: Not too big,
** or may overflow the C stack...
*/
#ifndef LUA_MAXPARSERLEVEL
#define LUA_MAXPARSERLEVEL 200
#endif
#endif
| {
"language": "C"
} |
/*
* gedit-progress-info-bar.h
* This file is part of gedit
*
* Copyright (C) 2005 - Paolo Maggi
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef GEDIT_PROGRESS_INFO_BAR_H
#define GEDIT_PROGRESS_INFO_BAR_H
#include <gtk/gtk.h>
G_BEGIN_DECLS
#define GEDIT_TYPE_PROGRESS_INFO_BAR (gedit_progress_info_bar_get_type ())
G_DECLARE_FINAL_TYPE (GeditProgressInfoBar, gedit_progress_info_bar, GEDIT, PROGRESS_INFO_BAR, GtkInfoBar)
GtkWidget *gedit_progress_info_bar_new (const gchar *icon_name,
const gchar *markup,
gboolean has_cancel);
void gedit_progress_info_bar_set_icon_name (GeditProgressInfoBar *bar,
const gchar *icon_name);
void gedit_progress_info_bar_set_markup (GeditProgressInfoBar *bar,
const gchar *markup);
void gedit_progress_info_bar_set_text (GeditProgressInfoBar *bar,
const gchar *text);
void gedit_progress_info_bar_set_fraction (GeditProgressInfoBar *bar,
gdouble fraction);
void gedit_progress_info_bar_pulse (GeditProgressInfoBar *bar);
G_END_DECLS
#endif /* GEDIT_PROGRESS_INFO_BAR_H */
/* ex:set ts=8 noet: */
| {
"language": "C"
} |
/*
* WPA Supplicant - driver interaction with Ralink Wireless Client
* Copyright (c) 2003-2006, Jouni Malinen <j@w1.fi>
* Copyright (c) 2007, Snowpin Lee <snowpin_lee@ralinktech.com.tw>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Alternatively, this software may be distributed under the terms of BSD
* license.
*
* See README and COPYING for more details.
*
*/
#include "includes.h"
#include <sys/ioctl.h>
#include "wireless_copy.h"
#include "common.h"
#include "driver.h"
#include "l2_packet/l2_packet.h"
#include "eloop.h"
#include "ieee802_11_defs.h"
#include "priv_netlink.h"
#include "driver_ralink.h"
static void wpa_driver_ralink_scan_timeout(void *eloop_ctx, void *timeout_ctx);
#define MAX_SSID_LEN 32
struct wpa_driver_ralink_data {
void *ctx;
int ioctl_sock;
int event_sock;
char ifname[IFNAMSIZ + 1];
u8 *assoc_req_ies;
size_t assoc_req_ies_len;
u8 *assoc_resp_ies;
size_t assoc_resp_ies_len;
int no_of_pmkid;
struct ndis_pmkid_entry *pmkid;
int we_version_compiled;
int ap_scan;
int scanning_done;
u8 g_driver_down;
};
static int ralink_set_oid(struct wpa_driver_ralink_data *drv,
unsigned short oid, char *data, int len)
{
char *buf;
struct iwreq iwr;
buf = os_zalloc(len);
if (buf == NULL)
return -1;
os_memset(&iwr, 0, sizeof(iwr));
os_strlcpy(iwr.ifr_name, drv->ifname, IFNAMSIZ);
iwr.u.data.flags = oid;
iwr.u.data.flags |= OID_GET_SET_TOGGLE;
if (data)
os_memcpy(buf, data, len);
iwr.u.data.pointer = (caddr_t) buf;
iwr.u.data.length = len;
if (ioctl(drv->ioctl_sock, RT_PRIV_IOCTL, &iwr) < 0) {
wpa_printf(MSG_DEBUG, "%s: oid=0x%x len (%d) failed",
__func__, oid, len);
os_free(buf);
return -1;
}
os_free(buf);
return 0;
}
static int
ralink_get_new_driver_flag(struct wpa_driver_ralink_data *drv)
{
struct iwreq iwr;
UCHAR enabled = 0;
os_memset(&iwr, 0, sizeof(iwr));
os_strlcpy(iwr.ifr_name, drv->ifname, IFNAMSIZ);
iwr.u.data.pointer = (UCHAR*) &enabled;
iwr.u.data.flags = RT_OID_NEW_DRIVER;
if (ioctl(drv->ioctl_sock, RT_PRIV_IOCTL, &iwr) < 0) {
wpa_printf(MSG_DEBUG, "%s: failed", __func__);
return 0;
}
return (enabled == 1) ? 1 : 0;
}
static int wpa_driver_ralink_get_bssid(void *priv, u8 *bssid)
{
struct wpa_driver_ralink_data *drv = priv;
struct iwreq iwr;
int ret = 0;
if (drv->g_driver_down == 1)
return -1;
wpa_printf(MSG_DEBUG, "%s", __FUNCTION__);
os_memset(&iwr, 0, sizeof(iwr));
os_strlcpy(iwr.ifr_name, drv->ifname, IFNAMSIZ);
if (ioctl(drv->ioctl_sock, SIOCGIWAP, &iwr) < 0) {
perror("ioctl[SIOCGIWAP]");
ret = -1;
}
os_memcpy(bssid, iwr.u.ap_addr.sa_data, ETH_ALEN);
return ret;
}
static int wpa_driver_ralink_get_ssid(void *priv, u8 *ssid)
{
struct wpa_driver_ralink_data *drv = priv;
#if 0
struct wpa_supplicant *wpa_s = drv->ctx;
struct wpa_ssid *entry;
#endif
int ssid_len;
u8 bssid[ETH_ALEN];
u8 ssid_str[MAX_SSID_LEN];
struct iwreq iwr;
#if 0
int result = 0;
#endif
int ret = 0;
#if 0
BOOLEAN ieee8021x_mode = FALSE;
BOOLEAN ieee8021x_required_key = FALSE;
#endif
if (drv->g_driver_down == 1)
return -1;
wpa_printf(MSG_DEBUG, "%s", __FUNCTION__);
os_memset(&iwr, 0, sizeof(iwr));
os_strlcpy(iwr.ifr_name, drv->ifname, IFNAMSIZ);
iwr.u.essid.pointer = (caddr_t) ssid;
iwr.u.essid.length = 32;
if (ioctl(drv->ioctl_sock, SIOCGIWESSID, &iwr) < 0) {
perror("ioctl[SIOCGIWESSID]");
ret = -1;
} else
ret = iwr.u.essid.length;
if (ret <= 0)
return ret;
ssid_len = ret;
os_memset(ssid_str, 0, MAX_SSID_LEN);
os_memcpy(ssid_str, ssid, ssid_len);
if (drv->ap_scan == 0) {
/* Read BSSID form driver */
if (wpa_driver_ralink_get_bssid(priv, bssid) < 0) {
wpa_printf(MSG_WARNING, "Could not read BSSID from "
"driver.");
return ret;
}
#if 0
entry = wpa_s->conf->ssid;
while (entry) {
if (!entry->disabled && ssid_len == entry->ssid_len &&
os_memcmp(ssid_str, entry->ssid, ssid_len) == 0 &&
(!entry->bssid_set ||
os_memcmp(bssid, entry->bssid, ETH_ALEN) == 0)) {
/* match the config of driver */
result = 1;
break;
}
entry = entry->next;
}
if (result) {
wpa_printf(MSG_DEBUG, "Ready to set 802.1x mode and "
"ieee_required_keys parameters to driver");
/* set 802.1x mode and ieee_required_keys parameter */
if (entry->key_mgmt == WPA_KEY_MGMT_IEEE8021X_NO_WPA) {
if ((entry->eapol_flags & (EAPOL_FLAG_REQUIRE_KEY_UNICAST | EAPOL_FLAG_REQUIRE_KEY_BROADCAST)))
ieee8021x_required_key = TRUE;
ieee8021x_mode = TRUE;
}
if (ralink_set_oid(drv, OID_802_11_SET_IEEE8021X, (char *) &ieee8021x_mode, sizeof(BOOLEAN)) < 0)
{
wpa_printf(MSG_DEBUG, "RALINK: Failed to set OID_802_11_SET_IEEE8021X(%d)", (int) ieee8021x_mode);
}
else
{
wpa_printf(MSG_DEBUG, "ieee8021x_mode is %s", ieee8021x_mode ? "TRUE" : "FALSE");
}
if (ralink_set_oid(drv, OID_802_11_SET_IEEE8021X_REQUIRE_KEY, (char *) &ieee8021x_required_key, sizeof(BOOLEAN)) < 0)
{
wpa_printf(MSG_DEBUG, "ERROR: Failed to set OID_802_11_SET_IEEE8021X_REQUIRE_KEY(%d)", (int) ieee8021x_required_key);
}
else
{
wpa_printf(MSG_DEBUG, "ieee8021x_required_key is %s and eapol_flag(%d)", ieee8021x_required_key ? "TRUE" : "FALSE",
entry->eapol_flags);
}
}
#endif
}
return ret;
}
static int wpa_driver_ralink_set_ssid(struct wpa_driver_ralink_data *drv,
const u8 *ssid, size_t ssid_len)
{
NDIS_802_11_SSID *buf;
int ret = 0;
struct iwreq iwr;
wpa_printf(MSG_DEBUG, "%s", __FUNCTION__);
buf = os_zalloc(sizeof(NDIS_802_11_SSID));
if (buf == NULL)
return -1;
os_memset(buf, 0, sizeof(buf));
buf->SsidLength = ssid_len;
os_memcpy(buf->Ssid, ssid, ssid_len);
os_memset(&iwr, 0, sizeof(iwr));
os_strlcpy(iwr.ifr_name, drv->ifname, IFNAMSIZ);
iwr.u.data.flags = OID_802_11_SSID;
iwr.u.data.flags |= OID_GET_SET_TOGGLE;
iwr.u.data.pointer = (caddr_t) buf;
iwr.u.data.length = sizeof(NDIS_802_11_SSID);
if (ioctl(drv->ioctl_sock, RT_PRIV_IOCTL, &iwr) < 0) {
perror("ioctl[RT_PRIV_IOCTL] -- OID_802_11_SSID");
ret = -1;
}
os_free(buf);
return ret;
}
static void wpa_driver_ralink_event_pmkid(struct wpa_driver_ralink_data *drv,
const u8 *data, size_t data_len)
{
NDIS_802_11_PMKID_CANDIDATE_LIST *pmkid;
size_t i;
union wpa_event_data event;
wpa_printf(MSG_DEBUG, "%s", __FUNCTION__);
if (data_len < 8) {
wpa_printf(MSG_DEBUG, "RALINK: Too short PMKID Candidate List "
"Event (len=%lu)", (unsigned long) data_len);
return;
}
pmkid = (NDIS_802_11_PMKID_CANDIDATE_LIST *) data;
wpa_printf(MSG_DEBUG, "RALINK: PMKID Candidate List Event - Version %d"
" NumCandidates %d",
(int) pmkid->Version, (int) pmkid->NumCandidates);
if (pmkid->Version != 1) {
wpa_printf(MSG_DEBUG, "RALINK: Unsupported PMKID Candidate "
"List Version %d", (int) pmkid->Version);
return;
}
if (data_len < 8 + pmkid->NumCandidates * sizeof(PMKID_CANDIDATE)) {
wpa_printf(MSG_DEBUG, "RALINK: PMKID Candidate List "
"underflow");
return;
}
os_memset(&event, 0, sizeof(event));
for (i = 0; i < pmkid->NumCandidates; i++) {
PMKID_CANDIDATE *p = &pmkid->CandidateList[i];
wpa_printf(MSG_DEBUG, "RALINK: %lu: " MACSTR " Flags 0x%x",
(unsigned long) i, MAC2STR(p->BSSID),
(int) p->Flags);
os_memcpy(event.pmkid_candidate.bssid, p->BSSID, ETH_ALEN);
event.pmkid_candidate.index = i;
event.pmkid_candidate.preauth =
p->Flags & NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED;
wpa_supplicant_event(drv->ctx, EVENT_PMKID_CANDIDATE,
&event);
}
}
static int wpa_driver_ralink_set_pmkid(struct wpa_driver_ralink_data *drv)
{
int len, count, i, ret;
struct ndis_pmkid_entry *entry;
NDIS_802_11_PMKID *p;
wpa_printf(MSG_DEBUG, "%s", __FUNCTION__);
count = 0;
entry = drv->pmkid;
while (entry) {
count++;
if (count >= drv->no_of_pmkid)
break;
entry = entry->next;
}
len = 8 + count * sizeof(BSSID_INFO);
p = os_zalloc(len);
if (p == NULL)
return -1;
p->Length = len;
p->BSSIDInfoCount = count;
entry = drv->pmkid;
for (i = 0; i < count; i++) {
os_memcpy(&p->BSSIDInfo[i].BSSID, entry->bssid, ETH_ALEN);
os_memcpy(&p->BSSIDInfo[i].PMKID, entry->pmkid, 16);
entry = entry->next;
}
wpa_hexdump(MSG_MSGDUMP, "NDIS: OID_802_11_PMKID",
(const u8 *) p, len);
ret = ralink_set_oid(drv, OID_802_11_PMKID, (char *) p, len);
os_free(p);
return ret;
}
static int wpa_driver_ralink_add_pmkid(void *priv, const u8 *bssid,
const u8 *pmkid)
{
struct wpa_driver_ralink_data *drv = priv;
struct ndis_pmkid_entry *entry, *prev;
if (drv->g_driver_down == 1)
return -1;
wpa_printf(MSG_DEBUG, "%s", __FUNCTION__);
if (drv->no_of_pmkid == 0)
return 0;
prev = NULL;
entry = drv->pmkid;
while (entry) {
if (os_memcmp(entry->bssid, bssid, ETH_ALEN) == 0)
break;
prev = entry;
entry = entry->next;
}
if (entry) {
/* Replace existing entry for this BSSID and move it into the
* beginning of the list. */
os_memcpy(entry->pmkid, pmkid, 16);
if (prev) {
prev->next = entry->next;
entry->next = drv->pmkid;
drv->pmkid = entry;
}
} else {
entry = os_malloc(sizeof(*entry));
if (entry) {
os_memcpy(entry->bssid, bssid, ETH_ALEN);
os_memcpy(entry->pmkid, pmkid, 16);
entry->next = drv->pmkid;
drv->pmkid = entry;
}
}
return wpa_driver_ralink_set_pmkid(drv);
}
static int wpa_driver_ralink_remove_pmkid(void *priv, const u8 *bssid,
const u8 *pmkid)
{
struct wpa_driver_ralink_data *drv = priv;
struct ndis_pmkid_entry *entry, *prev;
if (drv->g_driver_down == 1)
return -1;
wpa_printf(MSG_DEBUG, "%s", __FUNCTION__);
if (drv->no_of_pmkid == 0)
return 0;
entry = drv->pmkid;
prev = NULL;
drv->pmkid = NULL;
while (entry) {
if (os_memcmp(entry->bssid, bssid, ETH_ALEN) == 0 &&
os_memcmp(entry->pmkid, pmkid, 16) == 0) {
if (prev)
prev->next = entry->next;
else
drv->pmkid = entry->next;
os_free(entry);
break;
}
prev = entry;
entry = entry->next;
}
return wpa_driver_ralink_set_pmkid(drv);
}
static int wpa_driver_ralink_flush_pmkid(void *priv)
{
struct wpa_driver_ralink_data *drv = priv;
NDIS_802_11_PMKID p;
struct ndis_pmkid_entry *pmkid, *prev;
if (drv->g_driver_down == 1)
return -1;
wpa_printf(MSG_DEBUG, "%s", __FUNCTION__);
if (drv->no_of_pmkid == 0)
return 0;
pmkid = drv->pmkid;
drv->pmkid = NULL;
while (pmkid) {
prev = pmkid;
pmkid = pmkid->next;
os_free(prev);
}
os_memset(&p, 0, sizeof(p));
p.Length = 8;
p.BSSIDInfoCount = 0;
wpa_hexdump(MSG_MSGDUMP, "NDIS: OID_802_11_PMKID (flush)",
(const u8 *) &p, 8);
return ralink_set_oid(drv, OID_802_11_PMKID, (char *) &p, 8);
}
static void
wpa_driver_ralink_event_wireless_custom(struct wpa_driver_ralink_data *drv,
void *ctx, char *custom)
{
union wpa_event_data data;
wpa_printf(MSG_DEBUG, "%s", __FUNCTION__);
wpa_printf(MSG_DEBUG, "Custom wireless event: '%s'", custom);
os_memset(&data, 0, sizeof(data));
/* Host AP driver */
if (os_strncmp(custom, "MLME-MICHAELMICFAILURE.indication", 33) == 0) {
/* receive a MICFAILURE report */
data.michael_mic_failure.unicast =
os_strstr(custom, " unicast") != NULL;
/* TODO: parse parameters(?) */
wpa_supplicant_event(ctx, EVENT_MICHAEL_MIC_FAILURE, &data);
} else if (os_strncmp(custom, "ASSOCINFO_ReqIEs=", 17) == 0) {
/* receive assoc. req. IEs */
char *spos;
int bytes;
spos = custom + 17;
/*get IE's length */
/*
* bytes = strlen(spos); ==> bug, bytes may less than original
* size by using this way to get size. snowpin 20070312
* if (!bytes)
* return;
*/
bytes = drv->assoc_req_ies_len;
data.assoc_info.req_ies = os_malloc(bytes);
if (data.assoc_info.req_ies == NULL)
return;
data.assoc_info.req_ies_len = bytes;
os_memcpy(data.assoc_info.req_ies, spos, bytes);
/* skip the '\0' byte */
spos += bytes + 1;
data.assoc_info.resp_ies = NULL;
data.assoc_info.resp_ies_len = 0;
if (os_strncmp(spos, " RespIEs=", 9) == 0) {
/* receive assoc. resp. IEs */
spos += 9;
/* get IE's length */
bytes = os_strlen(spos);
if (!bytes)
goto done;
data.assoc_info.resp_ies = os_malloc(bytes);
if (data.assoc_info.resp_ies == NULL)
goto done;
data.assoc_info.resp_ies_len = bytes;
os_memcpy(data.assoc_info.resp_ies, spos, bytes);
}
wpa_supplicant_event(ctx, EVENT_ASSOCINFO, &data);
/* free allocated memory */
done:
os_free(data.assoc_info.resp_ies);
os_free(data.assoc_info.req_ies);
}
}
static void
wpa_driver_ralink_event_wireless(struct wpa_driver_ralink_data *drv,
void *ctx, char *data, int len)
{
struct iw_event iwe_buf, *iwe = &iwe_buf;
char *pos, *end, *custom, *buf, *assoc_info_buf, *info_pos;
#if 0
BOOLEAN ieee8021x_required_key = FALSE;
#endif
wpa_printf(MSG_DEBUG, "%s", __FUNCTION__);
assoc_info_buf = info_pos = NULL;
pos = data;
end = data + len;
while (pos + IW_EV_LCP_LEN <= end) {
/* Event data may be unaligned, so make a local, aligned copy
* before processing. */
os_memcpy(&iwe_buf, pos, IW_EV_LCP_LEN);
wpa_printf(MSG_DEBUG, "Wireless event: cmd=0x%x len=%d",
iwe->cmd, iwe->len);
if (iwe->len <= IW_EV_LCP_LEN)
return;
custom = pos + IW_EV_POINT_LEN;
if (drv->we_version_compiled > 18 && iwe->cmd == IWEVCUSTOM) {
/* WE-19 removed the pointer from struct iw_point */
char *dpos = (char *) &iwe_buf.u.data.length;
int dlen = dpos - (char *) &iwe_buf;
os_memcpy(dpos, pos + IW_EV_LCP_LEN,
sizeof(struct iw_event) - dlen);
} else {
os_memcpy(&iwe_buf, pos, sizeof(struct iw_event));
custom += IW_EV_POINT_OFF;
}
switch (iwe->cmd) {
case IWEVCUSTOM:
if (custom + iwe->u.data.length > end)
return;
buf = os_malloc(iwe->u.data.length + 1);
if (buf == NULL)
return;
os_memcpy(buf, custom, iwe->u.data.length);
buf[iwe->u.data.length] = '\0';
if (drv->ap_scan == 1) {
if ((iwe->u.data.flags == RT_ASSOC_EVENT_FLAG)
|| (iwe->u.data.flags ==
RT_REQIE_EVENT_FLAG) ||
(iwe->u.data.flags == RT_RESPIE_EVENT_FLAG)
|| (iwe->u.data.flags ==
RT_ASSOCINFO_EVENT_FLAG)) {
if (drv->scanning_done == 0) {
os_free(buf);
return;
}
}
}
if (iwe->u.data.flags == RT_ASSOC_EVENT_FLAG) {
wpa_printf(MSG_DEBUG, "Custom wireless event: "
"receive ASSOCIATED_EVENT !!!");
/* determine whether the dynamic-WEP is used or
* not */
#if 0
if (wpa_s && wpa_s->current_ssid &&
wpa_s->current_ssid->key_mgmt ==
WPA_KEY_MGMT_IEEE8021X_NO_WPA) {
if ((wpa_s->current_ssid->eapol_flags &
(EAPOL_FLAG_REQUIRE_KEY_UNICAST | EAPOL_FLAG_REQUIRE_KEY_BROADCAST))) {
//wpa_printf(MSG_DEBUG, "The current ssid - (%s), eapol_flag = %d.\n",
// wpa_ssid_txt(wpa_s->current_ssid->ssid, wpa_s->current_ssid->ssid_len),wpa_s->current_ssid->eapol_flags);
ieee8021x_required_key = TRUE;
}
if (ralink_set_oid(drv, OID_802_11_SET_IEEE8021X_REQUIRE_KEY, (char *) &ieee8021x_required_key, sizeof(BOOLEAN)) < 0)
{
wpa_printf(MSG_DEBUG, "ERROR: Failed to set OID_802_11_SET_IEEE8021X_REQUIRE_KEY(%d)",
(int) ieee8021x_required_key);
}
wpa_printf(MSG_DEBUG, "ieee8021x_required_key is %s and eapol_flag(%d).\n", ieee8021x_required_key ? "TRUE" : "FALSE",
wpa_s->current_ssid->eapol_flags);
}
#endif
wpa_supplicant_event(ctx, EVENT_ASSOC, NULL);
} else if (iwe->u.data.flags == RT_REQIE_EVENT_FLAG) {
wpa_printf(MSG_DEBUG, "Custom wireless event: "
"receive ReqIEs !!!");
drv->assoc_req_ies =
os_malloc(iwe->u.data.length);
if (drv->assoc_req_ies == NULL) {
os_free(buf);
return;
}
drv->assoc_req_ies_len = iwe->u.data.length;
os_memcpy(drv->assoc_req_ies, custom,
iwe->u.data.length);
} else if (iwe->u.data.flags == RT_RESPIE_EVENT_FLAG) {
wpa_printf(MSG_DEBUG, "Custom wireless event: "
"receive RespIEs !!!");
drv->assoc_resp_ies =
os_malloc(iwe->u.data.length);
if (drv->assoc_resp_ies == NULL) {
os_free(drv->assoc_req_ies);
drv->assoc_req_ies = NULL;
os_free(buf);
return;
}
drv->assoc_resp_ies_len = iwe->u.data.length;
os_memcpy(drv->assoc_resp_ies, custom,
iwe->u.data.length);
} else if (iwe->u.data.flags ==
RT_ASSOCINFO_EVENT_FLAG) {
wpa_printf(MSG_DEBUG, "Custom wireless event: "
"receive ASSOCINFO_EVENT !!!");
assoc_info_buf =
os_zalloc(drv->assoc_req_ies_len +
drv->assoc_resp_ies_len + 1);
if (assoc_info_buf == NULL) {
os_free(drv->assoc_req_ies);
drv->assoc_req_ies = NULL;
os_free(drv->assoc_resp_ies);
drv->assoc_resp_ies = NULL;
os_free(buf);
return;
}
if (drv->assoc_req_ies) {
os_memcpy(assoc_info_buf,
drv->assoc_req_ies,
drv->assoc_req_ies_len);
}
info_pos = assoc_info_buf +
drv->assoc_req_ies_len;
if (drv->assoc_resp_ies) {
os_memcpy(info_pos,
drv->assoc_resp_ies,
drv->assoc_resp_ies_len);
}
assoc_info_buf[drv->assoc_req_ies_len +
drv->assoc_resp_ies_len] = '\0';
wpa_driver_ralink_event_wireless_custom(
drv, ctx, assoc_info_buf);
os_free(drv->assoc_req_ies);
drv->assoc_req_ies = NULL;
os_free(drv->assoc_resp_ies);
drv->assoc_resp_ies = NULL;
os_free(assoc_info_buf);
} else if (iwe->u.data.flags == RT_DISASSOC_EVENT_FLAG)
{
wpa_printf(MSG_DEBUG, "Custom wireless event: "
"receive DISASSOCIATED_EVENT !!!");
wpa_supplicant_event(ctx, EVENT_DISASSOC,
NULL);
} else if (iwe->u.data.flags == RT_PMKIDCAND_FLAG) {
wpa_printf(MSG_DEBUG, "Custom wireless event: "
"receive PMKIDCAND_EVENT !!!");
wpa_driver_ralink_event_pmkid(
drv, (const u8 *) custom,
iwe->u.data.length);
} else if (iwe->u.data.flags == RT_INTERFACE_DOWN) {
drv->g_driver_down = 1;
eloop_terminate();
} else if (iwe->u.data.flags == RT_REPORT_AP_INFO) {
if (drv->ap_scan != 1) {
typedef struct PACKED {
UCHAR bssid[MAC_ADDR_LEN];
UCHAR ssid[MAX_LEN_OF_SSID];
INT ssid_len;
UCHAR wpa_ie[40];
INT wpa_ie_len;
UCHAR rsn_ie[40];
INT rsn_ie_len;
INT freq;
USHORT caps;
} *PAPINFO;
wpa_printf(MSG_DEBUG, "Custom wireless"
" event: receive "
"RT_REPORT_AP_INFO !!!");
//printf("iwe->u.data.length = %d\n", iwe->u.data.length);
//wpa_hexdump(MSG_DEBUG, "AP_Info: ", buf, iwe->u.data.length);
#if 0
wpa_s->num_scan_results = 1;
if (wpa_s->scan_results)
os_free(wpa_s->scan_results);
wpa_s->scan_results = os_malloc(sizeof(struct wpa_scan_result) + 1);
if (wpa_s->scan_results) {
PAPINFO pApInfo = (PAPINFO)buf;
os_memcpy(wpa_s->scan_results[0].bssid, pApInfo->bssid, ETH_ALEN);
os_memcpy(wpa_s->scan_results[0].ssid, pApInfo->ssid, pApInfo->ssid_len);
wpa_s->scan_results[0].ssid_len = pApInfo->ssid_len;
if (pApInfo->wpa_ie_len > 0) {
os_memcpy(wpa_s->scan_results[0].wpa_ie, pApInfo->wpa_ie, pApInfo->wpa_ie_len);
wpa_s->scan_results[0].wpa_ie_len = pApInfo->wpa_ie_len;
} else if (pApInfo->rsn_ie_len > 0) {
os_memcpy(wpa_s->scan_results[0].rsn_ie, pApInfo->rsn_ie, pApInfo->rsn_ie_len);
wpa_s->scan_results[0].rsn_ie_len = pApInfo->rsn_ie_len;
}
wpa_s->scan_results[0].caps = pApInfo->caps;
wpa_s->scan_results[0].freq = pApInfo->freq;
} else {
wpa_printf("wpa_s->scan_"
"results fail to "
"os_malloc!!\n");
}
#endif
}
} else {
wpa_driver_ralink_event_wireless_custom(
drv, ctx, buf);
}
os_free(buf);
break;
}
pos += iwe->len;
}
}
static void
wpa_driver_ralink_event_rtm_newlink(struct wpa_driver_ralink_data *drv,
void *ctx, struct nlmsghdr *h, int len)
{
struct ifinfomsg *ifi;
int attrlen, nlmsg_len, rta_len;
struct rtattr * attr;
wpa_printf(MSG_DEBUG, "%s", __FUNCTION__);
if (len < (int) sizeof(*ifi))
return;
ifi = NLMSG_DATA(h);
wpa_hexdump(MSG_DEBUG, "ifi: ", (u8 *) ifi, sizeof(struct ifinfomsg));
nlmsg_len = NLMSG_ALIGN(sizeof(struct ifinfomsg));
attrlen = h->nlmsg_len - nlmsg_len;
wpa_printf(MSG_DEBUG, "attrlen=%d", attrlen);
if (attrlen < 0)
return;
attr = (struct rtattr *) (((char *) ifi) + nlmsg_len);
wpa_hexdump(MSG_DEBUG, "attr1: ", (u8 *) attr, sizeof(struct rtattr));
rta_len = RTA_ALIGN(sizeof(struct rtattr));
wpa_hexdump(MSG_DEBUG, "attr2: ", (u8 *)attr,rta_len);
while (RTA_OK(attr, attrlen)) {
wpa_printf(MSG_DEBUG, "rta_type=%02x\n", attr->rta_type);
if (attr->rta_type == IFLA_WIRELESS) {
wpa_driver_ralink_event_wireless(
drv, ctx,
((char *) attr) + rta_len,
attr->rta_len - rta_len);
}
attr = RTA_NEXT(attr, attrlen);
wpa_hexdump(MSG_DEBUG, "attr3: ",
(u8 *) attr, sizeof(struct rtattr));
}
}
static void wpa_driver_ralink_event_receive(int sock, void *ctx,
void *sock_ctx)
{
char buf[8192];
int left;
struct sockaddr_nl from;
socklen_t fromlen;
struct nlmsghdr *h;
wpa_printf(MSG_DEBUG, "%s", __FUNCTION__);
fromlen = sizeof(from);
left = recvfrom(sock, buf, sizeof(buf), MSG_DONTWAIT,
(struct sockaddr *) &from, &fromlen);
if (left < 0) {
if (errno != EINTR && errno != EAGAIN)
perror("recvfrom(netlink)");
return;
}
h = (struct nlmsghdr *) buf;
wpa_hexdump(MSG_DEBUG, "h: ", (u8 *)h, h->nlmsg_len);
while (left >= (int) sizeof(*h)) {
int len, plen;
len = h->nlmsg_len;
plen = len - sizeof(*h);
if (len > left || plen < 0) {
wpa_printf(MSG_DEBUG, "Malformed netlink message: "
"len=%d left=%d plen=%d", len, left, plen);
break;
}
switch (h->nlmsg_type) {
case RTM_NEWLINK:
wpa_driver_ralink_event_rtm_newlink(ctx, sock_ctx, h,
plen);
break;
}
len = NLMSG_ALIGN(len);
left -= len;
h = (struct nlmsghdr *) ((char *) h + len);
}
if (left > 0) {
wpa_printf(MSG_DEBUG, "%d extra bytes in the end of netlink "
"message", left);
}
}
static int
ralink_get_we_version_compiled(struct wpa_driver_ralink_data *drv)
{
struct iwreq iwr;
UINT we_version_compiled = 0;
os_memset(&iwr, 0, sizeof(iwr));
os_strlcpy(iwr.ifr_name, drv->ifname, IFNAMSIZ);
iwr.u.data.pointer = (caddr_t) &we_version_compiled;
iwr.u.data.flags = RT_OID_WE_VERSION_COMPILED;
if (ioctl(drv->ioctl_sock, RT_PRIV_IOCTL, &iwr) < 0) {
wpa_printf(MSG_DEBUG, "%s: failed", __func__);
return -1;
}
drv->we_version_compiled = we_version_compiled;
return 0;
}
static int
ralink_set_iface_flags(void *priv, int dev_up)
{
struct wpa_driver_ralink_data *drv = priv;
struct ifreq ifr;
wpa_printf(MSG_DEBUG, "%s", __FUNCTION__);
if (drv->ioctl_sock < 0)
return -1;
os_memset(&ifr, 0, sizeof(ifr));
os_snprintf(ifr.ifr_name, IFNAMSIZ, "%s", drv->ifname);
if (ioctl(drv->ioctl_sock, SIOCGIFFLAGS, &ifr) != 0) {
perror("ioctl[SIOCGIFFLAGS]");
return -1;
}
if (dev_up)
ifr.ifr_flags |= IFF_UP;
else
ifr.ifr_flags &= ~IFF_UP;
if (ioctl(drv->ioctl_sock, SIOCSIFFLAGS, &ifr) != 0) {
perror("ioctl[SIOCSIFFLAGS]");
return -1;
}
return 0;
}
static void * wpa_driver_ralink_init(void *ctx, const char *ifname)
{
int s;
struct wpa_driver_ralink_data *drv;
struct ifreq ifr;
struct sockaddr_nl local;
UCHAR enable_wpa_supplicant = 0;
wpa_printf(MSG_DEBUG, "%s", __FUNCTION__);
/* open socket to kernel */
if ((s = socket(AF_INET, SOCK_DGRAM, 0)) < 0) {
perror("socket");
return NULL;
}
/* do it */
os_strlcpy(ifr.ifr_name, ifname, IFNAMSIZ);
if (ioctl(s, SIOCGIFINDEX, &ifr) < 0) {
perror(ifr.ifr_name);
return NULL;
}
drv = os_zalloc(sizeof(*drv));
if (drv == NULL)
return NULL;
drv->scanning_done = 1;
drv->ap_scan = 1; /* for now - let's assume ap_scan=1 is used */
drv->ctx = ctx;
os_strlcpy(drv->ifname, ifname, sizeof(drv->ifname));
drv->ioctl_sock = s;
drv->g_driver_down = 0;
s = socket(PF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
if (s < 0) {
perror("socket(PF_NETLINK,SOCK_RAW,NETLINK_ROUTE)");
close(drv->ioctl_sock);
os_free(drv);
return NULL;
}
os_memset(&local, 0, sizeof(local));
local.nl_family = AF_NETLINK;
local.nl_groups = RTMGRP_LINK;
if (bind(s, (struct sockaddr *) &local, sizeof(local)) < 0) {
perror("bind(netlink)");
close(s);
close(drv->ioctl_sock);
os_free(drv);
return NULL;
}
eloop_register_read_sock(s, wpa_driver_ralink_event_receive, drv, ctx);
drv->event_sock = s;
drv->no_of_pmkid = 4; /* Number of PMKID saved supported */
ralink_set_iface_flags(drv, 1); /* mark up during setup */
ralink_get_we_version_compiled(drv);
wpa_driver_ralink_flush_pmkid(drv);
if (drv->ap_scan == 1)
enable_wpa_supplicant = 1;
else
enable_wpa_supplicant = 2;
/* trigger driver support wpa_supplicant */
if (ralink_set_oid(drv, RT_OID_WPA_SUPPLICANT_SUPPORT,
(PCHAR) &enable_wpa_supplicant, sizeof(UCHAR)) < 0)
{
wpa_printf(MSG_DEBUG, "RALINK: Failed to set "
"RT_OID_WPA_SUPPLICANT_SUPPORT(%d)",
(int) enable_wpa_supplicant);
wpa_printf(MSG_ERROR, "RALINK: Driver does not support "
"wpa_supplicant");
close(s);
close(drv->ioctl_sock);
os_free(drv);
return NULL;
}
if (drv->ap_scan == 1)
drv->scanning_done = 0;
return drv;
}
static void wpa_driver_ralink_deinit(void *priv)
{
struct wpa_driver_ralink_data *drv = priv;
UCHAR enable_wpa_supplicant;
wpa_printf(MSG_DEBUG, "%s", __FUNCTION__);
enable_wpa_supplicant = 0;
if (drv->g_driver_down == 0) {
/* trigger driver disable wpa_supplicant support */
if (ralink_set_oid(drv, RT_OID_WPA_SUPPLICANT_SUPPORT,
(char *) &enable_wpa_supplicant,
sizeof(BOOLEAN)) < 0) {
wpa_printf(MSG_DEBUG, "RALINK: Failed to set "
"RT_OID_WPA_SUPPLICANT_SUPPORT(%d)",
(int) enable_wpa_supplicant);
}
wpa_driver_ralink_flush_pmkid(drv);
sleep(1);
ralink_set_iface_flags(drv, 0);
}
eloop_cancel_timeout(wpa_driver_ralink_scan_timeout, drv, drv->ctx);
eloop_unregister_read_sock(drv->event_sock);
close(drv->event_sock);
close(drv->ioctl_sock);
os_free(drv);
}
static void wpa_driver_ralink_scan_timeout(void *eloop_ctx, void *timeout_ctx)
{
struct wpa_driver_ralink_data *drv = eloop_ctx;
wpa_printf(MSG_DEBUG, "%s", __FUNCTION__);
wpa_printf(MSG_DEBUG, "Scan timeout - try to get results");
wpa_supplicant_event(timeout_ctx, EVENT_SCAN_RESULTS, NULL);
drv->scanning_done = 1;
}
static int wpa_driver_ralink_scan(void *priv, const u8 *ssid, size_t ssid_len)
{
struct wpa_driver_ralink_data *drv = priv;
struct iwreq iwr;
int ret = 0;
if (drv->g_driver_down == 1)
return -1;
wpa_printf(MSG_DEBUG, "%s", __FUNCTION__);
if (ssid_len > IW_ESSID_MAX_SIZE) {
wpa_printf(MSG_DEBUG, "%s: too long SSID (%lu)",
__FUNCTION__, (unsigned long) ssid_len);
return -1;
}
/* wpa_driver_ralink_set_ssid(drv, ssid, ssid_len); */
os_memset(&iwr, 0, sizeof(iwr));
os_strlcpy(iwr.ifr_name, drv->ifname, IFNAMSIZ);
if (ioctl(drv->ioctl_sock, SIOCSIWSCAN, &iwr) < 0) {
perror("ioctl[SIOCSIWSCAN]");
ret = -1;
}
/* Not all drivers generate "scan completed" wireless event, so try to
* read results after a timeout. */
eloop_cancel_timeout(wpa_driver_ralink_scan_timeout, drv, drv->ctx);
eloop_register_timeout(4, 0, wpa_driver_ralink_scan_timeout, drv,
drv->ctx);
drv->scanning_done = 0;
return ret;
}
static int
wpa_driver_ralink_get_scan_results(void *priv,
struct wpa_scan_result *results,
size_t max_size)
{
struct wpa_driver_ralink_data *drv = priv;
UCHAR *buf = NULL;
NDIS_802_11_BSSID_LIST_EX *wsr;
NDIS_WLAN_BSSID_EX *wbi;
struct iwreq iwr;
int rv = 0;
size_t ap_num;
u8 *pos, *end;
if (drv->g_driver_down == 1)
return -1;
wpa_printf(MSG_DEBUG, "%s", __FUNCTION__);
if (drv->we_version_compiled >= 17) {
buf = os_zalloc(8192);
iwr.u.data.length = 8192;
} else {
buf = os_zalloc(4096);
iwr.u.data.length = 4096;
}
if (buf == NULL)
return -1;
wsr = (NDIS_802_11_BSSID_LIST_EX *) buf;
wsr->NumberOfItems = 0;
os_strlcpy(iwr.ifr_name, drv->ifname, IFNAMSIZ);
iwr.u.data.pointer = (void *) buf;
iwr.u.data.flags = OID_802_11_BSSID_LIST;
if ((rv = ioctl(drv->ioctl_sock, RT_PRIV_IOCTL, &iwr)) < 0) {
wpa_printf(MSG_DEBUG, "ioctl fail: rv = %d", rv);
os_free(buf);
return -1;
}
os_memset(results, 0, max_size * sizeof(struct wpa_scan_result));
for (ap_num = 0, wbi = wsr->Bssid; ap_num < wsr->NumberOfItems;
++ap_num) {
os_memcpy(results[ap_num].bssid, &wbi->MacAddress, ETH_ALEN);
os_memcpy(results[ap_num].ssid, wbi->Ssid.Ssid,
wbi->Ssid.SsidLength);
results[ap_num].ssid_len = wbi->Ssid.SsidLength;
results[ap_num].freq = (wbi->Configuration.DSConfig / 1000);
/* get ie's */
wpa_hexdump(MSG_DEBUG, "RALINK: AP IEs",
(u8 *) wbi + sizeof(*wbi) - 1, wbi->IELength);
pos = (u8 *) wbi + sizeof(*wbi) - 1;
end = (u8 *) wbi + sizeof(*wbi) + wbi->IELength;
if (wbi->IELength < sizeof(NDIS_802_11_FIXED_IEs))
break;
pos += sizeof(NDIS_802_11_FIXED_IEs) - 2;
os_memcpy(&results[ap_num].caps, pos, 2);
pos += 2;
while (pos + 1 < end && pos + 2 + pos[1] <= end) {
u8 ielen = 2 + pos[1];
if (ielen > SSID_MAX_WPA_IE_LEN) {
pos += ielen;
continue;
}
if (pos[0] == WLAN_EID_VENDOR_SPECIFIC &&
pos[1] >= 4 &&
os_memcmp(pos + 2, "\x00\x50\xf2\x01", 4) == 0) {
os_memcpy(results[ap_num].wpa_ie, pos, ielen);
results[ap_num].wpa_ie_len = ielen;
} else if (pos[0] == WLAN_EID_RSN) {
os_memcpy(results[ap_num].rsn_ie, pos, ielen);
results[ap_num].rsn_ie_len = ielen;
}
pos += ielen;
}
wbi = (NDIS_WLAN_BSSID_EX *) ((u8 *) wbi + wbi->Length);
}
os_free(buf);
return ap_num;
}
static int ralink_set_auth_mode(struct wpa_driver_ralink_data *drv,
NDIS_802_11_AUTHENTICATION_MODE mode)
{
NDIS_802_11_AUTHENTICATION_MODE auth_mode = mode;
wpa_printf(MSG_DEBUG, "%s", __FUNCTION__);
if (ralink_set_oid(drv, OID_802_11_AUTHENTICATION_MODE,
(char *) &auth_mode, sizeof(auth_mode)) < 0) {
wpa_printf(MSG_DEBUG, "RALINK: Failed to set "
"OID_802_11_AUTHENTICATION_MODE (%d)",
(int) auth_mode);
return -1;
}
return 0;
}
static int wpa_driver_ralink_remove_key(struct wpa_driver_ralink_data *drv,
int key_idx, const u8 *addr,
const u8 *bssid, int pairwise)
{
NDIS_802_11_REMOVE_KEY rkey;
NDIS_802_11_KEY_INDEX _index;
int res, res2;
wpa_printf(MSG_DEBUG, "%s", __FUNCTION__);
os_memset(&rkey, 0, sizeof(rkey));
rkey.Length = sizeof(rkey);
rkey.KeyIndex = key_idx;
if (pairwise)
rkey.KeyIndex |= 1 << 30;
os_memcpy(rkey.BSSID, bssid, ETH_ALEN);
res = ralink_set_oid(drv, OID_802_11_REMOVE_KEY, (char *) &rkey,
sizeof(rkey));
/* AlbertY@20060210 removed it */
if (0 /* !pairwise */) {
res2 = ralink_set_oid(drv, OID_802_11_REMOVE_WEP,
(char *) &_index, sizeof(_index));
} else
res2 = 0;
if (res < 0 && res2 < 0)
return res;
return 0;
}
static int wpa_driver_ralink_add_wep(struct wpa_driver_ralink_data *drv,
int pairwise, int key_idx, int set_tx,
const u8 *key, size_t key_len)
{
NDIS_802_11_WEP *wep;
size_t len;
int res;
wpa_printf(MSG_DEBUG, "%s", __FUNCTION__);
len = 12 + key_len;
wep = os_zalloc(len);
if (wep == NULL)
return -1;
wep->Length = len;
wep->KeyIndex = key_idx;
if (set_tx)
wep->KeyIndex |= 0x80000000;
wep->KeyLength = key_len;
os_memcpy(wep->KeyMaterial, key, key_len);
wpa_hexdump_key(MSG_MSGDUMP, "RALINK: OID_802_11_ADD_WEP",
(const u8 *) wep, len);
res = ralink_set_oid(drv, OID_802_11_ADD_WEP, (char *) wep, len);
os_free(wep);
return res;
}
static int wpa_driver_ralink_set_key(void *priv, wpa_alg alg, const u8 *addr,
int key_idx, int set_tx,
const u8 *seq, size_t seq_len,
const u8 *key, size_t key_len)
{
struct wpa_driver_ralink_data *drv = priv;
size_t len, i;
NDIS_802_11_KEY *nkey;
int res, pairwise;
u8 bssid[ETH_ALEN];
if (drv->g_driver_down == 1)
return -1;
wpa_printf(MSG_DEBUG, "%s", __FUNCTION__);
if (addr == NULL || os_memcmp(addr, "\xff\xff\xff\xff\xff\xff",
ETH_ALEN) == 0) {
/* Group Key */
pairwise = 0;
wpa_driver_ralink_get_bssid(drv, bssid);
} else {
/* Pairwise Key */
pairwise = 1;
os_memcpy(bssid, addr, ETH_ALEN);
}
if (alg == WPA_ALG_NONE || key_len == 0) {
return wpa_driver_ralink_remove_key(drv, key_idx, addr, bssid,
pairwise);
}
if (alg == WPA_ALG_WEP) {
return wpa_driver_ralink_add_wep(drv, pairwise, key_idx,
set_tx, key, key_len);
}
len = 12 + 6 + 6 + 8 + key_len;
nkey = os_zalloc(len);
if (nkey == NULL)
return -1;
nkey->Length = len;
nkey->KeyIndex = key_idx;
if (set_tx)
nkey->KeyIndex |= 1 << 31;
if (pairwise)
nkey->KeyIndex |= 1 << 30;
if (seq && seq_len)
nkey->KeyIndex |= 1 << 29;
nkey->KeyLength = key_len;
os_memcpy(nkey->BSSID, bssid, ETH_ALEN);
if (seq && seq_len) {
for (i = 0; i < seq_len; i++)
nkey->KeyRSC |= seq[i] << (i * 8);
}
if (alg == WPA_ALG_TKIP && key_len == 32) {
os_memcpy(nkey->KeyMaterial, key, 16);
os_memcpy(nkey->KeyMaterial + 16, key + 24, 8);
os_memcpy(nkey->KeyMaterial + 24, key + 16, 8);
} else {
os_memcpy(nkey->KeyMaterial, key, key_len);
}
wpa_printf(MSG_DEBUG, "%s: alg=%d key_idx=%d set_tx=%d seq_len=%lu "
"key_len=%lu", __FUNCTION__, alg, key_idx, set_tx,
(unsigned long) seq_len, (unsigned long) key_len);
wpa_hexdump_key(MSG_MSGDUMP, "RALINK: OID_802_11_ADD_KEY",
(const u8 *) nkey, len);
res = ralink_set_oid(drv, OID_802_11_ADD_KEY, (char *) nkey, len);
os_free(nkey);
return res;
}
static int wpa_driver_ralink_disassociate(void *priv, const u8 *addr,
int reason_code)
{
struct wpa_driver_ralink_data *drv = priv;
if (drv->g_driver_down == 1)
return -1;
wpa_printf(MSG_DEBUG, "%s", __FUNCTION__);
if (ralink_set_oid(drv, OID_802_11_DISASSOCIATE, " ", 4) < 0) {
wpa_printf(MSG_DEBUG, "RALINK: Failed to set "
"OID_802_11_DISASSOCIATE");
}
return 0;
}
static int wpa_driver_ralink_deauthenticate(void *priv, const u8 *addr,
int reason_code)
{
struct wpa_driver_ralink_data *drv = priv;
wpa_printf(MSG_DEBUG, "g_driver_down = %d", drv->g_driver_down);
if (drv->g_driver_down == 1)
return -1;
wpa_printf(MSG_DEBUG, "%s", __FUNCTION__);
if (ralink_get_new_driver_flag(drv) == 0) {
return wpa_driver_ralink_disassociate(priv, addr, reason_code);
} else {
MLME_DEAUTH_REQ_STRUCT mlme;
os_memset(&mlme, 0, sizeof(MLME_DEAUTH_REQ_STRUCT));
mlme.Reason = reason_code;
os_memcpy(mlme.Addr, addr, MAC_ADDR_LEN);
return ralink_set_oid(drv, OID_802_11_DEAUTHENTICATION,
(char *) &mlme,
sizeof(MLME_DEAUTH_REQ_STRUCT));
}
}
static int
wpa_driver_ralink_associate(void *priv,
struct wpa_driver_associate_params *params)
{
struct wpa_driver_ralink_data *drv = priv;
NDIS_802_11_NETWORK_INFRASTRUCTURE mode;
NDIS_802_11_AUTHENTICATION_MODE auth_mode;
NDIS_802_11_WEP_STATUS encr;
BOOLEAN ieee8021xMode;
if (drv->g_driver_down == 1)
return -1;
wpa_printf(MSG_DEBUG, "%s", __FUNCTION__);
if (params->mode == IEEE80211_MODE_IBSS)
mode = Ndis802_11IBSS;
else
mode = Ndis802_11Infrastructure;
if (ralink_set_oid(drv, OID_802_11_INFRASTRUCTURE_MODE,
(char *) &mode, sizeof(mode)) < 0) {
wpa_printf(MSG_DEBUG, "RALINK: Failed to set "
"OID_802_11_INFRASTRUCTURE_MODE (%d)",
(int) mode);
/* Try to continue anyway */
}
if (params->wpa_ie == NULL || params->wpa_ie_len == 0) {
if (params->auth_alg & AUTH_ALG_SHARED_KEY) {
if (params->auth_alg & AUTH_ALG_OPEN_SYSTEM)
auth_mode = Ndis802_11AuthModeAutoSwitch;
else
auth_mode = Ndis802_11AuthModeShared;
} else
auth_mode = Ndis802_11AuthModeOpen;
} else if (params->wpa_ie[0] == WLAN_EID_RSN) {
if (params->key_mgmt_suite == KEY_MGMT_PSK)
auth_mode = Ndis802_11AuthModeWPA2PSK;
else
auth_mode = Ndis802_11AuthModeWPA2;
} else {
if (params->key_mgmt_suite == KEY_MGMT_WPA_NONE)
auth_mode = Ndis802_11AuthModeWPANone;
else if (params->key_mgmt_suite == KEY_MGMT_PSK)
auth_mode = Ndis802_11AuthModeWPAPSK;
else
auth_mode = Ndis802_11AuthModeWPA;
}
switch (params->pairwise_suite) {
case CIPHER_CCMP:
encr = Ndis802_11Encryption3Enabled;
break;
case CIPHER_TKIP:
encr = Ndis802_11Encryption2Enabled;
break;
case CIPHER_WEP40:
case CIPHER_WEP104:
encr = Ndis802_11Encryption1Enabled;
break;
case CIPHER_NONE:
if (params->group_suite == CIPHER_CCMP)
encr = Ndis802_11Encryption3Enabled;
else if (params->group_suite == CIPHER_TKIP)
encr = Ndis802_11Encryption2Enabled;
else
encr = Ndis802_11EncryptionDisabled;
break;
default:
encr = Ndis802_11EncryptionDisabled;
break;
}
ralink_set_auth_mode(drv, auth_mode);
/* notify driver that IEEE8021x mode is enabled */
if (params->key_mgmt_suite == KEY_MGMT_802_1X_NO_WPA)
ieee8021xMode = TRUE;
else
ieee8021xMode = FALSE;
if (ralink_set_oid(drv, OID_802_11_SET_IEEE8021X,
(char *) &ieee8021xMode, sizeof(BOOLEAN)) < 0) {
wpa_printf(MSG_DEBUG, "RALINK: Failed to set "
"OID_802_11_SET_IEEE8021X(%d)",
(int) ieee8021xMode);
}
if (ralink_set_oid(drv, OID_802_11_WEP_STATUS,
(char *) &encr, sizeof(encr)) < 0) {
wpa_printf(MSG_DEBUG, "RALINK: Failed to set "
"OID_802_11_WEP_STATUS(%d)",
(int) encr);
}
if ((ieee8021xMode == FALSE) &&
(encr == Ndis802_11Encryption1Enabled)) {
/* static WEP */
int enabled = 0;
if (ralink_set_oid(drv, OID_802_11_DROP_UNENCRYPTED,
(char *) &enabled, sizeof(enabled)) < 0) {
wpa_printf(MSG_DEBUG, "RALINK: Failed to set "
"OID_802_11_DROP_UNENCRYPTED(%d)",
(int) encr);
}
}
return wpa_driver_ralink_set_ssid(drv, params->ssid, params->ssid_len);
}
static int
wpa_driver_ralink_set_countermeasures(void *priv, int enabled)
{
struct wpa_driver_ralink_data *drv = priv;
if (drv->g_driver_down == 1)
return -1;
wpa_printf(MSG_DEBUG, "%s: enabled=%d", __FUNCTION__, enabled);
return ralink_set_oid(drv, OID_SET_COUNTERMEASURES, (char *) &enabled,
sizeof(int));
}
const struct wpa_driver_ops wpa_driver_ralink_ops = {
.name = "ralink",
.desc = "Ralink Wireless Client driver",
.get_bssid = wpa_driver_ralink_get_bssid,
.get_ssid = wpa_driver_ralink_get_ssid,
.set_key = wpa_driver_ralink_set_key,
.init = wpa_driver_ralink_init,
.deinit = wpa_driver_ralink_deinit,
.set_countermeasures = wpa_driver_ralink_set_countermeasures,
.scan = wpa_driver_ralink_scan,
.get_scan_results = wpa_driver_ralink_get_scan_results,
.deauthenticate = wpa_driver_ralink_deauthenticate,
.disassociate = wpa_driver_ralink_disassociate,
.associate = wpa_driver_ralink_associate,
.add_pmkid = wpa_driver_ralink_add_pmkid,
.remove_pmkid = wpa_driver_ralink_remove_pmkid,
.flush_pmkid = wpa_driver_ralink_flush_pmkid,
};
| {
"language": "C"
} |
//
// Project:
//
// DX9 "Omnidirectional Shadow Mapping" Demo
//
// Effect Version:
//
// DirectX 9.0
//
// Shader Profile:
//
// Pixel Shaders 2.0 ( Pixel Shader Assembler )
//
// Vertex Shaders 2.0 ( Vertex Shader Assembler )
//
//
// Global Variables
//
#if HALF_PRECISION
#define PHG_FLOAT half
#define PHG_FLOAT2 vector<half, 2>
#define PHG_FLOAT3 vector<half, 3>
#define PHG_FLOAT4 vector<half, 4>
#define PHG_FLOAT4x4 matrix<half, 4, 4>
#else
#define PHG_FLOAT float
#define PHG_FLOAT2 vector<float, 2>
#define PHG_FLOAT3 vector<float, 3>
#define PHG_FLOAT4 vector<float, 4>
#define PHG_FLOAT4x4 matrix<float, 4, 4>
#endif
PHG_FLOAT4 vZero = { 0.0, 0.0, 0.0, 0.0 };
PHG_FLOAT4 vOne = { 1.0, 1.0, 1.0, 1.0 };
PHG_FLOAT fGeometryScale;
PHG_FLOAT4x4 mW; // World Matrix
PHG_FLOAT4x4 mWVP; // World * View * Projection Matrix
PHG_FLOAT4 vLightPosition; // Light Position
//
// Shaders
//
struct VS_OUTPUT
{
PHG_FLOAT4 vPosition : POSITION;
PHG_FLOAT3 vVertexToLight : TEXCOORD0;
};
// Default Vertex Shader
VS_OUTPUT DefaultVertexShader( PHG_FLOAT4 vPosition : POSITION )
{
VS_OUTPUT o;
o.vPosition = mul( vPosition, mWVP );
o.vVertexToLight = mul( vPosition, mW ).xyz * fGeometryScale - vLightPosition.xyz;
return o;
}
// Default Pixel Shader
PHG_FLOAT4 DefaultPixelShader ( PHG_FLOAT3 vVertexToLight : TEXCOORD0 ) : COLOR
{
#if PACKED_DEPTH
PHG_FLOAT fDepth = dot( vVertexToLight, vVertexToLight );
return PHG_FLOAT4( floor(fDepth) / 256.0, frac(fDepth), frac(fDepth), frac(fDepth) );
#else
return dot( vVertexToLight, vVertexToLight );
#endif
}
// Default Technique
technique TDefault
{
pass p0
{
// Common States
// CullMode = CCW;
// AlphaBlendEnable = False;
// ZWriteEnable = True;
// Shaders
PixelShader = compile ps_2_0 DefaultPixelShader();
vertexshader = compile vs_2_0 DefaultVertexShader();
}
}
// End of file
// End of file
| {
"language": "C"
} |
/*
* max8997.c - Regulator driver for the Maxim 8997/8966
*
* Copyright (C) 2011 Samsung Electronics
* MyungJoo Ham <myungjoo.ham@smasung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* This driver is based on max8998.c
*/
#include <linux/bug.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/mfd/max8997.h>
#include <linux/mfd/max8997-private.h>
struct max8997_data {
struct device *dev;
struct max8997_dev *iodev;
int num_regulators;
struct regulator_dev **rdev;
int ramp_delay; /* in mV/us */
bool buck1_gpiodvs;
bool buck2_gpiodvs;
bool buck5_gpiodvs;
u8 buck1_vol[8];
u8 buck2_vol[8];
u8 buck5_vol[8];
int buck125_gpios[3];
int buck125_gpioindex;
bool ignore_gpiodvs_side_effect;
u8 saved_states[MAX8997_REG_MAX];
};
static inline void max8997_set_gpio(struct max8997_data *max8997)
{
int set3 = (max8997->buck125_gpioindex) & 0x1;
int set2 = ((max8997->buck125_gpioindex) >> 1) & 0x1;
int set1 = ((max8997->buck125_gpioindex) >> 2) & 0x1;
gpio_set_value(max8997->buck125_gpios[0], set1);
gpio_set_value(max8997->buck125_gpios[1], set2);
gpio_set_value(max8997->buck125_gpios[2], set3);
}
struct voltage_map_desc {
int min;
int max;
int step;
unsigned int n_bits;
};
/* Voltage maps in mV */
static const struct voltage_map_desc ldo_voltage_map_desc = {
.min = 800, .max = 3950, .step = 50, .n_bits = 6,
}; /* LDO1 ~ 18, 21 all */
static const struct voltage_map_desc buck1245_voltage_map_desc = {
.min = 650, .max = 2225, .step = 25, .n_bits = 6,
}; /* Buck1, 2, 4, 5 */
static const struct voltage_map_desc buck37_voltage_map_desc = {
.min = 750, .max = 3900, .step = 50, .n_bits = 6,
}; /* Buck3, 7 */
/* current map in mA */
static const struct voltage_map_desc charger_current_map_desc = {
.min = 200, .max = 950, .step = 50, .n_bits = 4,
};
static const struct voltage_map_desc topoff_current_map_desc = {
.min = 50, .max = 200, .step = 10, .n_bits = 4,
};
static const struct voltage_map_desc *reg_voltage_map[] = {
[MAX8997_LDO1] = &ldo_voltage_map_desc,
[MAX8997_LDO2] = &ldo_voltage_map_desc,
[MAX8997_LDO3] = &ldo_voltage_map_desc,
[MAX8997_LDO4] = &ldo_voltage_map_desc,
[MAX8997_LDO5] = &ldo_voltage_map_desc,
[MAX8997_LDO6] = &ldo_voltage_map_desc,
[MAX8997_LDO7] = &ldo_voltage_map_desc,
[MAX8997_LDO8] = &ldo_voltage_map_desc,
[MAX8997_LDO9] = &ldo_voltage_map_desc,
[MAX8997_LDO10] = &ldo_voltage_map_desc,
[MAX8997_LDO11] = &ldo_voltage_map_desc,
[MAX8997_LDO12] = &ldo_voltage_map_desc,
[MAX8997_LDO13] = &ldo_voltage_map_desc,
[MAX8997_LDO14] = &ldo_voltage_map_desc,
[MAX8997_LDO15] = &ldo_voltage_map_desc,
[MAX8997_LDO16] = &ldo_voltage_map_desc,
[MAX8997_LDO17] = &ldo_voltage_map_desc,
[MAX8997_LDO18] = &ldo_voltage_map_desc,
[MAX8997_LDO21] = &ldo_voltage_map_desc,
[MAX8997_BUCK1] = &buck1245_voltage_map_desc,
[MAX8997_BUCK2] = &buck1245_voltage_map_desc,
[MAX8997_BUCK3] = &buck37_voltage_map_desc,
[MAX8997_BUCK4] = &buck1245_voltage_map_desc,
[MAX8997_BUCK5] = &buck1245_voltage_map_desc,
[MAX8997_BUCK6] = NULL,
[MAX8997_BUCK7] = &buck37_voltage_map_desc,
[MAX8997_EN32KHZ_AP] = NULL,
[MAX8997_EN32KHZ_CP] = NULL,
[MAX8997_ENVICHG] = NULL,
[MAX8997_ESAFEOUT1] = NULL,
[MAX8997_ESAFEOUT2] = NULL,
[MAX8997_CHARGER_CV] = NULL,
[MAX8997_CHARGER] = &charger_current_map_desc,
[MAX8997_CHARGER_TOPOFF] = &topoff_current_map_desc,
};
static int max8997_list_voltage_safeout(struct regulator_dev *rdev,
unsigned int selector)
{
int rid = rdev_get_id(rdev);
if (rid == MAX8997_ESAFEOUT1 || rid == MAX8997_ESAFEOUT2) {
switch (selector) {
case 0:
return 4850000;
case 1:
return 4900000;
case 2:
return 4950000;
case 3:
return 3300000;
default:
return -EINVAL;
}
}
return -EINVAL;
}
static int max8997_list_voltage_charger_cv(struct regulator_dev *rdev,
unsigned int selector)
{
int rid = rdev_get_id(rdev);
if (rid != MAX8997_CHARGER_CV)
goto err;
switch (selector) {
case 0x00:
return 4200000;
case 0x01 ... 0x0E:
return 4000000 + 20000 * (selector - 0x01);
case 0x0F:
return 4350000;
default:
return -EINVAL;
}
err:
return -EINVAL;
}
static int max8997_list_voltage(struct regulator_dev *rdev,
unsigned int selector)
{
const struct voltage_map_desc *desc;
int rid = rdev_get_id(rdev);
int val;
if (rid >= ARRAY_SIZE(reg_voltage_map) ||
rid < 0)
return -EINVAL;
desc = reg_voltage_map[rid];
if (desc == NULL)
return -EINVAL;
val = desc->min + desc->step * selector;
if (val > desc->max)
return -EINVAL;
return val * 1000;
}
static int max8997_get_enable_register(struct regulator_dev *rdev,
int *reg, int *mask, int *pattern)
{
int rid = rdev_get_id(rdev);
switch (rid) {
case MAX8997_LDO1 ... MAX8997_LDO21:
*reg = MAX8997_REG_LDO1CTRL + (rid - MAX8997_LDO1);
*mask = 0xC0;
*pattern = 0xC0;
break;
case MAX8997_BUCK1:
*reg = MAX8997_REG_BUCK1CTRL;
*mask = 0x01;
*pattern = 0x01;
break;
case MAX8997_BUCK2:
*reg = MAX8997_REG_BUCK2CTRL;
*mask = 0x01;
*pattern = 0x01;
break;
case MAX8997_BUCK3:
*reg = MAX8997_REG_BUCK3CTRL;
*mask = 0x01;
*pattern = 0x01;
break;
case MAX8997_BUCK4:
*reg = MAX8997_REG_BUCK4CTRL;
*mask = 0x01;
*pattern = 0x01;
break;
case MAX8997_BUCK5:
*reg = MAX8997_REG_BUCK5CTRL;
*mask = 0x01;
*pattern = 0x01;
break;
case MAX8997_BUCK6:
*reg = MAX8997_REG_BUCK6CTRL;
*mask = 0x01;
*pattern = 0x01;
break;
case MAX8997_BUCK7:
*reg = MAX8997_REG_BUCK7CTRL;
*mask = 0x01;
*pattern = 0x01;
break;
case MAX8997_EN32KHZ_AP ... MAX8997_EN32KHZ_CP:
*reg = MAX8997_REG_MAINCON1;
*mask = 0x01 << (rid - MAX8997_EN32KHZ_AP);
*pattern = 0x01 << (rid - MAX8997_EN32KHZ_AP);
break;
case MAX8997_ENVICHG:
*reg = MAX8997_REG_MBCCTRL1;
*mask = 0x80;
*pattern = 0x80;
break;
case MAX8997_ESAFEOUT1 ... MAX8997_ESAFEOUT2:
*reg = MAX8997_REG_SAFEOUTCTRL;
*mask = 0x40 << (rid - MAX8997_ESAFEOUT1);
*pattern = 0x40 << (rid - MAX8997_ESAFEOUT1);
break;
case MAX8997_CHARGER:
*reg = MAX8997_REG_MBCCTRL2;
*mask = 0x40;
*pattern = 0x40;
break;
default:
/* Not controllable or not exists */
return -EINVAL;
}
return 0;
}
static int max8997_reg_is_enabled(struct regulator_dev *rdev)
{
struct max8997_data *max8997 = rdev_get_drvdata(rdev);
struct i2c_client *i2c = max8997->iodev->i2c;
int ret, reg, mask, pattern;
u8 val;
ret = max8997_get_enable_register(rdev, ®, &mask, &pattern);
if (ret == -EINVAL)
return 1; /* "not controllable" */
else if (ret)
return ret;
ret = max8997_read_reg(i2c, reg, &val);
if (ret)
return ret;
return (val & mask) == pattern;
}
static int max8997_reg_enable(struct regulator_dev *rdev)
{
struct max8997_data *max8997 = rdev_get_drvdata(rdev);
struct i2c_client *i2c = max8997->iodev->i2c;
int ret, reg, mask, pattern;
ret = max8997_get_enable_register(rdev, ®, &mask, &pattern);
if (ret)
return ret;
return max8997_update_reg(i2c, reg, pattern, mask);
}
static int max8997_reg_disable(struct regulator_dev *rdev)
{
struct max8997_data *max8997 = rdev_get_drvdata(rdev);
struct i2c_client *i2c = max8997->iodev->i2c;
int ret, reg, mask, pattern;
ret = max8997_get_enable_register(rdev, ®, &mask, &pattern);
if (ret)
return ret;
return max8997_update_reg(i2c, reg, ~pattern, mask);
}
static int max8997_get_voltage_register(struct regulator_dev *rdev,
int *_reg, int *_shift, int *_mask)
{
int rid = rdev_get_id(rdev);
int reg, shift = 0, mask = 0x3f;
switch (rid) {
case MAX8997_LDO1 ... MAX8997_LDO21:
reg = MAX8997_REG_LDO1CTRL + (rid - MAX8997_LDO1);
break;
case MAX8997_BUCK1:
reg = MAX8997_REG_BUCK1DVS1;
break;
case MAX8997_BUCK2:
reg = MAX8997_REG_BUCK2DVS1;
break;
case MAX8997_BUCK3:
reg = MAX8997_REG_BUCK3DVS;
break;
case MAX8997_BUCK4:
reg = MAX8997_REG_BUCK4DVS;
break;
case MAX8997_BUCK5:
reg = MAX8997_REG_BUCK5DVS1;
break;
case MAX8997_BUCK7:
reg = MAX8997_REG_BUCK7DVS;
break;
case MAX8997_ESAFEOUT1 ... MAX8997_ESAFEOUT2:
reg = MAX8997_REG_SAFEOUTCTRL;
shift = (rid == MAX8997_ESAFEOUT2) ? 2 : 0;
mask = 0x3;
break;
case MAX8997_CHARGER_CV:
reg = MAX8997_REG_MBCCTRL3;
shift = 0;
mask = 0xf;
break;
case MAX8997_CHARGER:
reg = MAX8997_REG_MBCCTRL4;
shift = 0;
mask = 0xf;
break;
case MAX8997_CHARGER_TOPOFF:
reg = MAX8997_REG_MBCCTRL5;
shift = 0;
mask = 0xf;
break;
default:
return -EINVAL;
}
*_reg = reg;
*_shift = shift;
*_mask = mask;
return 0;
}
static int max8997_get_voltage(struct regulator_dev *rdev)
{
struct max8997_data *max8997 = rdev_get_drvdata(rdev);
struct i2c_client *i2c = max8997->iodev->i2c;
int reg, shift, mask, ret;
int rid = rdev_get_id(rdev);
u8 val;
ret = max8997_get_voltage_register(rdev, ®, &shift, &mask);
if (ret)
return ret;
if ((rid == MAX8997_BUCK1 && max8997->buck1_gpiodvs) ||
(rid == MAX8997_BUCK2 && max8997->buck2_gpiodvs) ||
(rid == MAX8997_BUCK5 && max8997->buck5_gpiodvs))
reg += max8997->buck125_gpioindex;
ret = max8997_read_reg(i2c, reg, &val);
if (ret)
return ret;
val >>= shift;
val &= mask;
if (rdev->desc && rdev->desc->ops && rdev->desc->ops->list_voltage)
return rdev->desc->ops->list_voltage(rdev, val);
/*
* max8997_list_voltage returns value for any rdev with voltage_map,
* which works for "CHARGER" and "CHARGER TOPOFF" that do not have
* list_voltage ops (they are current regulators).
*/
return max8997_list_voltage(rdev, val);
}
static inline int max8997_get_voltage_proper_val(
const struct voltage_map_desc *desc,
int min_vol, int max_vol)
{
int i = 0;
if (desc == NULL)
return -EINVAL;
if (max_vol < desc->min || min_vol > desc->max)
return -EINVAL;
while (desc->min + desc->step * i < min_vol &&
desc->min + desc->step * i < desc->max)
i++;
if (desc->min + desc->step * i > max_vol)
return -EINVAL;
if (i >= (1 << desc->n_bits))
return -EINVAL;
return i;
}
static int max8997_set_voltage_charger_cv(struct regulator_dev *rdev,
int min_uV, int max_uV, unsigned *selector)
{
struct max8997_data *max8997 = rdev_get_drvdata(rdev);
struct i2c_client *i2c = max8997->iodev->i2c;
int rid = rdev_get_id(rdev);
int lb, ub;
int reg, shift = 0, mask, ret = 0;
u8 val = 0x0;
if (rid != MAX8997_CHARGER_CV)
return -EINVAL;
ret = max8997_get_voltage_register(rdev, ®, &shift, &mask);
if (ret)
return ret;
if (max_uV < 4000000 || min_uV > 4350000)
return -EINVAL;
if (min_uV <= 4000000) {
if (max_uV >= 4000000)
return -EINVAL;
else
val = 0x1;
} else if (min_uV <= 4200000 && max_uV >= 4200000)
val = 0x0;
else {
lb = (min_uV - 4000001) / 20000 + 2;
ub = (max_uV - 4000000) / 20000 + 1;
if (lb > ub)
return -EINVAL;
if (lb < 0xf)
val = lb;
else {
if (ub >= 0xf)
val = 0xf;
else
return -EINVAL;
}
}
*selector = val;
ret = max8997_update_reg(i2c, reg, val << shift, mask);
return ret;
}
/*
* For LDO1 ~ LDO21, BUCK1~5, BUCK7, CHARGER, CHARGER_TOPOFF
* BUCK1, 2, and 5 are available if they are not controlled by gpio
*/
static int max8997_set_voltage_ldobuck(struct regulator_dev *rdev,
int min_uV, int max_uV, unsigned *selector)
{
struct max8997_data *max8997 = rdev_get_drvdata(rdev);
struct i2c_client *i2c = max8997->iodev->i2c;
int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
const struct voltage_map_desc *desc;
int rid = rdev_get_id(rdev);
int reg, shift = 0, mask, ret;
int i;
u8 org;
switch (rid) {
case MAX8997_LDO1 ... MAX8997_LDO21:
break;
case MAX8997_BUCK1 ... MAX8997_BUCK5:
break;
case MAX8997_BUCK6:
return -EINVAL;
case MAX8997_BUCK7:
break;
case MAX8997_CHARGER:
break;
case MAX8997_CHARGER_TOPOFF:
break;
default:
return -EINVAL;
}
desc = reg_voltage_map[rid];
i = max8997_get_voltage_proper_val(desc, min_vol, max_vol);
if (i < 0)
return i;
ret = max8997_get_voltage_register(rdev, ®, &shift, &mask);
if (ret)
return ret;
max8997_read_reg(i2c, reg, &org);
org = (org & mask) >> shift;
ret = max8997_update_reg(i2c, reg, i << shift, mask << shift);
*selector = i;
if (rid == MAX8997_BUCK1 || rid == MAX8997_BUCK2 ||
rid == MAX8997_BUCK4 || rid == MAX8997_BUCK5) {
/* If the voltage is increasing */
if (org < i)
udelay(DIV_ROUND_UP(desc->step * (i - org),
max8997->ramp_delay));
}
return ret;
}
/*
* Assess the damage on the voltage setting of BUCK1,2,5 by the change.
*
* When GPIO-DVS mode is used for multiple bucks, changing the voltage value
* of one of the bucks may affect that of another buck, which is the side
* effect of the change (set_voltage). This function examines the GPIO-DVS
* configurations and checks whether such side-effect exists.
*/
static int max8997_assess_side_effect(struct regulator_dev *rdev,
u8 new_val, int *best)
{
struct max8997_data *max8997 = rdev_get_drvdata(rdev);
int rid = rdev_get_id(rdev);
u8 *buckx_val[3];
bool buckx_gpiodvs[3];
int side_effect[8];
int min_side_effect = INT_MAX;
int i;
*best = -1;
switch (rid) {
case MAX8997_BUCK1:
rid = 0;
break;
case MAX8997_BUCK2:
rid = 1;
break;
case MAX8997_BUCK5:
rid = 2;
break;
default:
return -EINVAL;
}
buckx_val[0] = max8997->buck1_vol;
buckx_val[1] = max8997->buck2_vol;
buckx_val[2] = max8997->buck5_vol;
buckx_gpiodvs[0] = max8997->buck1_gpiodvs;
buckx_gpiodvs[1] = max8997->buck2_gpiodvs;
buckx_gpiodvs[2] = max8997->buck5_gpiodvs;
for (i = 0; i < 8; i++) {
int others;
if (new_val != (buckx_val[rid])[i]) {
side_effect[i] = -1;
continue;
}
side_effect[i] = 0;
for (others = 0; others < 3; others++) {
int diff;
if (others == rid)
continue;
if (buckx_gpiodvs[others] == false)
continue; /* Not affected */
diff = (buckx_val[others])[i] -
(buckx_val[others])[max8997->buck125_gpioindex];
if (diff > 0)
side_effect[i] += diff;
else if (diff < 0)
side_effect[i] -= diff;
}
if (side_effect[i] == 0) {
*best = i;
return 0; /* NO SIDE EFFECT! Use This! */
}
if (side_effect[i] < min_side_effect) {
min_side_effect = side_effect[i];
*best = i;
}
}
if (*best == -1)
return -EINVAL;
return side_effect[*best];
}
/*
* For Buck 1 ~ 5 and 7. If it is not controlled by GPIO, this calls
* max8997_set_voltage_ldobuck to do the job.
*/
static int max8997_set_voltage_buck(struct regulator_dev *rdev,
int min_uV, int max_uV, unsigned *selector)
{
struct max8997_data *max8997 = rdev_get_drvdata(rdev);
int rid = rdev_get_id(rdev);
const struct voltage_map_desc *desc;
int new_val, new_idx, damage, tmp_val, tmp_idx, tmp_dmg;
bool gpio_dvs_mode = false;
int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
if (rid < MAX8997_BUCK1 || rid > MAX8997_BUCK7)
return -EINVAL;
switch (rid) {
case MAX8997_BUCK1:
if (max8997->buck1_gpiodvs)
gpio_dvs_mode = true;
break;
case MAX8997_BUCK2:
if (max8997->buck2_gpiodvs)
gpio_dvs_mode = true;
break;
case MAX8997_BUCK5:
if (max8997->buck5_gpiodvs)
gpio_dvs_mode = true;
break;
}
if (!gpio_dvs_mode)
return max8997_set_voltage_ldobuck(rdev, min_uV, max_uV,
selector);
desc = reg_voltage_map[rid];
new_val = max8997_get_voltage_proper_val(desc, min_vol, max_vol);
if (new_val < 0)
return new_val;
tmp_dmg = INT_MAX;
tmp_idx = -1;
tmp_val = -1;
do {
damage = max8997_assess_side_effect(rdev, new_val, &new_idx);
if (damage == 0)
goto out;
if (tmp_dmg > damage) {
tmp_idx = new_idx;
tmp_val = new_val;
tmp_dmg = damage;
}
new_val++;
} while (desc->min + desc->step * new_val <= desc->max);
new_idx = tmp_idx;
new_val = tmp_val;
if (max8997->ignore_gpiodvs_side_effect == false)
return -EINVAL;
dev_warn(&rdev->dev, "MAX8997 GPIO-DVS Side Effect Warning: GPIO SET:"
" %d -> %d\n", max8997->buck125_gpioindex, tmp_idx);
out:
if (new_idx < 0 || new_val < 0)
return -EINVAL;
max8997->buck125_gpioindex = new_idx;
max8997_set_gpio(max8997);
*selector = new_val;
return 0;
}
static const int safeoutvolt[] = {
3300000,
4850000,
4900000,
4950000,
};
/* For SAFEOUT1 and SAFEOUT2 */
static int max8997_set_voltage_safeout(struct regulator_dev *rdev,
int min_uV, int max_uV, unsigned *selector)
{
struct max8997_data *max8997 = rdev_get_drvdata(rdev);
struct i2c_client *i2c = max8997->iodev->i2c;
int rid = rdev_get_id(rdev);
int reg, shift = 0, mask, ret;
int i = 0;
u8 val;
if (rid != MAX8997_ESAFEOUT1 && rid != MAX8997_ESAFEOUT2)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(safeoutvolt); i++) {
if (min_uV <= safeoutvolt[i] &&
max_uV >= safeoutvolt[i])
break;
}
if (i >= ARRAY_SIZE(safeoutvolt))
return -EINVAL;
if (i == 0)
val = 0x3;
else
val = i - 1;
ret = max8997_get_voltage_register(rdev, ®, &shift, &mask);
if (ret)
return ret;
ret = max8997_update_reg(i2c, reg, val << shift, mask << shift);
*selector = val;
return ret;
}
static int max8997_reg_enable_suspend(struct regulator_dev *rdev)
{
return 0;
}
static int max8997_reg_disable_suspend(struct regulator_dev *rdev)
{
struct max8997_data *max8997 = rdev_get_drvdata(rdev);
struct i2c_client *i2c = max8997->iodev->i2c;
int ret, reg, mask, pattern;
int rid = rdev_get_id(rdev);
ret = max8997_get_enable_register(rdev, ®, &mask, &pattern);
if (ret)
return ret;
max8997_read_reg(i2c, reg, &max8997->saved_states[rid]);
if (rid == MAX8997_LDO1 ||
rid == MAX8997_LDO10 ||
rid == MAX8997_LDO21) {
dev_dbg(&rdev->dev, "Conditional Power-Off for %s\n",
rdev->desc->name);
return max8997_update_reg(i2c, reg, 0x40, mask);
}
dev_dbg(&rdev->dev, "Full Power-Off for %s (%xh -> %xh)\n",
rdev->desc->name, max8997->saved_states[rid] & mask,
(~pattern) & mask);
return max8997_update_reg(i2c, reg, ~pattern, mask);
}
static struct regulator_ops max8997_ldo_ops = {
.list_voltage = max8997_list_voltage,
.is_enabled = max8997_reg_is_enabled,
.enable = max8997_reg_enable,
.disable = max8997_reg_disable,
.get_voltage = max8997_get_voltage,
.set_voltage = max8997_set_voltage_ldobuck,
.set_suspend_enable = max8997_reg_enable_suspend,
.set_suspend_disable = max8997_reg_disable_suspend,
};
static struct regulator_ops max8997_buck_ops = {
.list_voltage = max8997_list_voltage,
.is_enabled = max8997_reg_is_enabled,
.enable = max8997_reg_enable,
.disable = max8997_reg_disable,
.get_voltage = max8997_get_voltage,
.set_voltage = max8997_set_voltage_buck,
.set_suspend_enable = max8997_reg_enable_suspend,
.set_suspend_disable = max8997_reg_disable_suspend,
};
static struct regulator_ops max8997_fixedvolt_ops = {
.list_voltage = max8997_list_voltage,
.is_enabled = max8997_reg_is_enabled,
.enable = max8997_reg_enable,
.disable = max8997_reg_disable,
.set_suspend_enable = max8997_reg_enable_suspend,
.set_suspend_disable = max8997_reg_disable_suspend,
};
static struct regulator_ops max8997_safeout_ops = {
.list_voltage = max8997_list_voltage_safeout,
.is_enabled = max8997_reg_is_enabled,
.enable = max8997_reg_enable,
.disable = max8997_reg_disable,
.get_voltage = max8997_get_voltage,
.set_voltage = max8997_set_voltage_safeout,
.set_suspend_enable = max8997_reg_enable_suspend,
.set_suspend_disable = max8997_reg_disable_suspend,
};
static struct regulator_ops max8997_fixedstate_ops = {
.list_voltage = max8997_list_voltage_charger_cv,
.get_voltage = max8997_get_voltage,
.set_voltage = max8997_set_voltage_charger_cv,
};
static int max8997_set_voltage_ldobuck_wrap(struct regulator_dev *rdev,
int min_uV, int max_uV)
{
unsigned dummy;
return max8997_set_voltage_ldobuck(rdev, min_uV, max_uV, &dummy);
}
static struct regulator_ops max8997_charger_ops = {
.is_enabled = max8997_reg_is_enabled,
.enable = max8997_reg_enable,
.disable = max8997_reg_disable,
.get_current_limit = max8997_get_voltage,
.set_current_limit = max8997_set_voltage_ldobuck_wrap,
};
static struct regulator_ops max8997_charger_fixedstate_ops = {
.is_enabled = max8997_reg_is_enabled,
.get_current_limit = max8997_get_voltage,
.set_current_limit = max8997_set_voltage_ldobuck_wrap,
};
#define regulator_desc_ldo(num) { \
.name = "LDO"#num, \
.id = MAX8997_LDO##num, \
.ops = &max8997_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
}
#define regulator_desc_buck(num) { \
.name = "BUCK"#num, \
.id = MAX8997_BUCK##num, \
.ops = &max8997_buck_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
}
static struct regulator_desc regulators[] = {
regulator_desc_ldo(1),
regulator_desc_ldo(2),
regulator_desc_ldo(3),
regulator_desc_ldo(4),
regulator_desc_ldo(5),
regulator_desc_ldo(6),
regulator_desc_ldo(7),
regulator_desc_ldo(8),
regulator_desc_ldo(9),
regulator_desc_ldo(10),
regulator_desc_ldo(11),
regulator_desc_ldo(12),
regulator_desc_ldo(13),
regulator_desc_ldo(14),
regulator_desc_ldo(15),
regulator_desc_ldo(16),
regulator_desc_ldo(17),
regulator_desc_ldo(18),
regulator_desc_ldo(21),
regulator_desc_buck(1),
regulator_desc_buck(2),
regulator_desc_buck(3),
regulator_desc_buck(4),
regulator_desc_buck(5),
{
.name = "BUCK6",
.id = MAX8997_BUCK6,
.ops = &max8997_fixedvolt_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
regulator_desc_buck(7),
{
.name = "EN32KHz_AP",
.id = MAX8997_EN32KHZ_AP,
.ops = &max8997_fixedvolt_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
}, {
.name = "EN32KHz_CP",
.id = MAX8997_EN32KHZ_CP,
.ops = &max8997_fixedvolt_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
}, {
.name = "ENVICHG",
.id = MAX8997_ENVICHG,
.ops = &max8997_fixedvolt_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
}, {
.name = "ESAFEOUT1",
.id = MAX8997_ESAFEOUT1,
.ops = &max8997_safeout_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
}, {
.name = "ESAFEOUT2",
.id = MAX8997_ESAFEOUT2,
.ops = &max8997_safeout_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
}, {
.name = "CHARGER_CV",
.id = MAX8997_CHARGER_CV,
.ops = &max8997_fixedstate_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
}, {
.name = "CHARGER",
.id = MAX8997_CHARGER,
.ops = &max8997_charger_ops,
.type = REGULATOR_CURRENT,
.owner = THIS_MODULE,
}, {
.name = "CHARGER_TOPOFF",
.id = MAX8997_CHARGER_TOPOFF,
.ops = &max8997_charger_fixedstate_ops,
.type = REGULATOR_CURRENT,
.owner = THIS_MODULE,
},
};
static __devinit int max8997_pmic_probe(struct platform_device *pdev)
{
struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct max8997_platform_data *pdata = dev_get_platdata(iodev->dev);
struct regulator_dev **rdev;
struct max8997_data *max8997;
struct i2c_client *i2c;
int i, ret, size;
u8 max_buck1 = 0, max_buck2 = 0, max_buck5 = 0;
if (!pdata) {
dev_err(pdev->dev.parent, "No platform init data supplied.\n");
return -ENODEV;
}
max8997 = kzalloc(sizeof(struct max8997_data), GFP_KERNEL);
if (!max8997)
return -ENOMEM;
size = sizeof(struct regulator_dev *) * pdata->num_regulators;
max8997->rdev = kzalloc(size, GFP_KERNEL);
if (!max8997->rdev) {
kfree(max8997);
return -ENOMEM;
}
rdev = max8997->rdev;
max8997->dev = &pdev->dev;
max8997->iodev = iodev;
max8997->num_regulators = pdata->num_regulators;
platform_set_drvdata(pdev, max8997);
i2c = max8997->iodev->i2c;
max8997->buck125_gpioindex = pdata->buck125_default_idx;
max8997->buck1_gpiodvs = pdata->buck1_gpiodvs;
max8997->buck2_gpiodvs = pdata->buck2_gpiodvs;
max8997->buck5_gpiodvs = pdata->buck5_gpiodvs;
memcpy(max8997->buck125_gpios, pdata->buck125_gpios, sizeof(int) * 3);
max8997->ignore_gpiodvs_side_effect = pdata->ignore_gpiodvs_side_effect;
for (i = 0; i < 8; i++) {
max8997->buck1_vol[i] = ret =
max8997_get_voltage_proper_val(
&buck1245_voltage_map_desc,
pdata->buck1_voltage[i] / 1000,
pdata->buck1_voltage[i] / 1000 +
buck1245_voltage_map_desc.step);
if (ret < 0)
goto err_alloc;
max8997->buck2_vol[i] = ret =
max8997_get_voltage_proper_val(
&buck1245_voltage_map_desc,
pdata->buck2_voltage[i] / 1000,
pdata->buck2_voltage[i] / 1000 +
buck1245_voltage_map_desc.step);
if (ret < 0)
goto err_alloc;
max8997->buck5_vol[i] = ret =
max8997_get_voltage_proper_val(
&buck1245_voltage_map_desc,
pdata->buck5_voltage[i] / 1000,
pdata->buck5_voltage[i] / 1000 +
buck1245_voltage_map_desc.step);
if (ret < 0)
goto err_alloc;
if (max_buck1 < max8997->buck1_vol[i])
max_buck1 = max8997->buck1_vol[i];
if (max_buck2 < max8997->buck2_vol[i])
max_buck2 = max8997->buck2_vol[i];
if (max_buck5 < max8997->buck5_vol[i])
max_buck5 = max8997->buck5_vol[i];
}
/* For the safety, set max voltage before setting up */
for (i = 0; i < 8; i++) {
max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS1 + i,
max_buck1, 0x3f);
max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS1 + i,
max_buck2, 0x3f);
max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS1 + i,
max_buck5, 0x3f);
}
/*
* If buck 1, 2, and 5 do not care DVS GPIO settings, ignore them.
* If at least one of them cares, set gpios.
*/
if (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs ||
pdata->buck5_gpiodvs) {
bool gpio1set = false, gpio2set = false;
if (!gpio_is_valid(pdata->buck125_gpios[0]) ||
!gpio_is_valid(pdata->buck125_gpios[1]) ||
!gpio_is_valid(pdata->buck125_gpios[2])) {
dev_err(&pdev->dev, "GPIO NOT VALID\n");
ret = -EINVAL;
goto err_alloc;
}
ret = gpio_request(pdata->buck125_gpios[0],
"MAX8997 SET1");
if (ret == -EBUSY)
dev_warn(&pdev->dev, "Duplicated gpio request"
" on SET1\n");
else if (ret)
goto err_alloc;
else
gpio1set = true;
ret = gpio_request(pdata->buck125_gpios[1],
"MAX8997 SET2");
if (ret == -EBUSY)
dev_warn(&pdev->dev, "Duplicated gpio request"
" on SET2\n");
else if (ret) {
if (gpio1set)
gpio_free(pdata->buck125_gpios[0]);
goto err_alloc;
} else
gpio2set = true;
ret = gpio_request(pdata->buck125_gpios[2],
"MAX8997 SET3");
if (ret == -EBUSY)
dev_warn(&pdev->dev, "Duplicated gpio request"
" on SET3\n");
else if (ret) {
if (gpio1set)
gpio_free(pdata->buck125_gpios[0]);
if (gpio2set)
gpio_free(pdata->buck125_gpios[1]);
goto err_alloc;
}
gpio_direction_output(pdata->buck125_gpios[0],
(max8997->buck125_gpioindex >> 2)
& 0x1); /* SET1 */
gpio_direction_output(pdata->buck125_gpios[1],
(max8997->buck125_gpioindex >> 1)
& 0x1); /* SET2 */
gpio_direction_output(pdata->buck125_gpios[2],
(max8997->buck125_gpioindex >> 0)
& 0x1); /* SET3 */
ret = 0;
}
/* DVS-GPIO disabled */
max8997_update_reg(i2c, MAX8997_REG_BUCK1CTRL, (pdata->buck1_gpiodvs) ?
(1 << 1) : (0 << 1), 1 << 1);
max8997_update_reg(i2c, MAX8997_REG_BUCK2CTRL, (pdata->buck2_gpiodvs) ?
(1 << 1) : (0 << 1), 1 << 1);
max8997_update_reg(i2c, MAX8997_REG_BUCK5CTRL, (pdata->buck5_gpiodvs) ?
(1 << 1) : (0 << 1), 1 << 1);
/* Initialize all the DVS related BUCK registers */
for (i = 0; i < 8; i++) {
max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS1 + i,
max8997->buck1_vol[i],
0x3f);
max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS1 + i,
max8997->buck2_vol[i],
0x3f);
max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS1 + i,
max8997->buck5_vol[i],
0x3f);
}
/* Misc Settings */
max8997->ramp_delay = 10; /* set 10mV/us, which is the default */
max8997_write_reg(i2c, MAX8997_REG_BUCKRAMP, (0xf << 4) | 0x9);
for (i = 0; i < pdata->num_regulators; i++) {
const struct voltage_map_desc *desc;
int id = pdata->regulators[i].id;
desc = reg_voltage_map[id];
if (desc)
regulators[id].n_voltages =
(desc->max - desc->min) / desc->step + 1;
else if (id == MAX8997_ESAFEOUT1 || id == MAX8997_ESAFEOUT2)
regulators[id].n_voltages = 4;
else if (id == MAX8997_CHARGER_CV)
regulators[id].n_voltages = 16;
rdev[i] = regulator_register(®ulators[id], max8997->dev,
pdata->regulators[i].initdata, max8997, NULL);
if (IS_ERR(rdev[i])) {
ret = PTR_ERR(rdev[i]);
dev_err(max8997->dev, "regulator init failed for %d\n",
id);
rdev[i] = NULL;
goto err;
}
}
return 0;
err:
for (i = 0; i < max8997->num_regulators; i++)
if (rdev[i])
regulator_unregister(rdev[i]);
err_alloc:
kfree(max8997->rdev);
kfree(max8997);
return ret;
}
static int __devexit max8997_pmic_remove(struct platform_device *pdev)
{
struct max8997_data *max8997 = platform_get_drvdata(pdev);
struct regulator_dev **rdev = max8997->rdev;
int i;
for (i = 0; i < max8997->num_regulators; i++)
if (rdev[i])
regulator_unregister(rdev[i]);
kfree(max8997->rdev);
kfree(max8997);
return 0;
}
static const struct platform_device_id max8997_pmic_id[] = {
{ "max8997-pmic", 0},
{ },
};
MODULE_DEVICE_TABLE(platform, max8997_pmic_id);
static struct platform_driver max8997_pmic_driver = {
.driver = {
.name = "max8997-pmic",
.owner = THIS_MODULE,
},
.probe = max8997_pmic_probe,
.remove = __devexit_p(max8997_pmic_remove),
.id_table = max8997_pmic_id,
};
static int __init max8997_pmic_init(void)
{
return platform_driver_register(&max8997_pmic_driver);
}
subsys_initcall(max8997_pmic_init);
static void __exit max8997_pmic_cleanup(void)
{
platform_driver_unregister(&max8997_pmic_driver);
}
module_exit(max8997_pmic_cleanup);
MODULE_DESCRIPTION("MAXIM 8997/8966 Regulator Driver");
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
MODULE_LICENSE("GPL");
| {
"language": "C"
} |
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2007 Michael Sevakis
*
* LCD scrolling driver and scheduler
*
* Much collected and combined from the various Rockbox LCD drivers.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
#ifndef __SCROLL_ENGINE_H__
#define __SCROLL_ENGINE_H__
#include <stdbool.h>
#include "config.h"
#include "file.h"
struct viewport;
struct scrollinfo;
extern void scroll_init(void) INIT_ATTR;
extern void lcd_bidir_scroll(int threshold);
extern void lcd_scroll_speed(int speed);
extern void lcd_scroll_delay(int ms);
extern void lcd_scroll_stop(void);
extern void lcd_scroll_stop_viewport(const struct viewport *vp);
extern void lcd_scroll_stop_viewport_rect(const struct viewport *vp, int x, int y, int width, int height);
extern bool lcd_scroll_now(struct scrollinfo *scroll);
#ifdef HAVE_REMOTE_LCD
extern void lcd_remote_scroll_speed(int speed);
extern void lcd_remote_scroll_delay(int ms);
extern void lcd_remote_scroll_stop(void);
extern void lcd_remote_scroll_stop_viewport(const struct viewport *vp);
extern void lcd_remote_scroll_stop_viewport_rect(const struct viewport *vp, int x, int y, int width, int height);
extern bool lcd_remote_scroll_now(struct scrollinfo *scroll);
#endif
/* internal usage, but in multiple drivers
* larger than the normal linebuffer since it holds the line a second
* time (+3 spaces) for non-bidir scrolling */
#define SCROLL_SPACING 3
#define SCROLL_LINE_SIZE (MAX_PATH + SCROLL_SPACING + 3*LCD_WIDTH/2 + 2)
struct scrollinfo
{
struct viewport* vp;
char linebuffer[(SCROLL_LINE_SIZE / 2) - SCROLL_SPACING];
const char *line;
/* rectangle for the line */
int x, y; /* relative to the viewort */
int width, height;
/* pixel to skip from the beginning of the string, increments as the text scrolls */
int offset;
/* scroll presently forward or backward? */
bool backward;
bool bidir;
long start_tick;
/* support for custom scrolling functions,
* must be called with ::line == NULL to indicate that the line
* stops scrolling or when the userdata pointer is going to be changed
* (the custom scroller can release the userdata then) */
void (*scroll_func)(struct scrollinfo *s);
void *userdata;
};
struct scroll_screen_info
{
struct scrollinfo * const scroll;
const int num_scroll; /* number of scrollable lines (also number of scroll structs) */
int lines; /* Number of currently scrolling lines */
long ticks; /* # of ticks between updates*/
long delay; /* ticks delay before start */
int bidir_limit; /* percent */
int step; /* pixels per scroll step */
#if defined(HAVE_REMOTE_LCD)
long last_scroll;
#endif
};
/** main lcd **/
#define LCD_SCROLLABLE_LINES ((LCD_HEIGHT+4)/5 < 32 ? (LCD_HEIGHT+4)/5 : 32)
extern struct scroll_screen_info lcd_scroll_info;
/** remote lcd **/
#ifdef HAVE_REMOTE_LCD
#define LCD_REMOTE_SCROLLABLE_LINES \
(((LCD_REMOTE_HEIGHT+4)/5 < 32) ? (LCD_REMOTE_HEIGHT+4)/5 : 32)
extern struct scroll_screen_info lcd_remote_scroll_info;
#endif
#endif /* __SCROLL_ENGINE_H__ */
| {
"language": "C"
} |
/*
* Copyright (c) 2005, Bull S.A.. All rights reserved.
* Created by: Sebastien Decugis
* Copyright (c) 2013 Cyril Hrubis <chrubis@suse.cz>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* This sample test aims to check the following assertions:
*
* If SA_SIGINFO is set in sa_flags and Real Time Signals extension is supported,
* sa_sigaction is used as the signal handling function.
*
* The steps are:
* -> test for RTS extension
* -> register a handler for SIGVTALRM with SA_SIGINFO, and a known function
* as sa_sigaction
* -> raise SIGVTALRM, and check the function has been called.
*
* The test fails if the function is not called
*/
#include <pthread.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <signal.h>
#include <errno.h>
#include "posixtest.h"
#define WRITE(str) write(STDOUT_FILENO, str, sizeof(str) - 1)
static volatile sig_atomic_t called = 0;
static void handler(int sig, siginfo_t *info, void *context)
{
(void) sig;
(void) context;
if (info->si_signo != SIGVTALRM) {
WRITE("Wrong signal generated?\n");
_exit(PTS_FAIL);
}
called = 1;
}
int main(void)
{
int ret;
long rts;
struct sigaction sa;
/* Test the RTS extension */
rts = sysconf(_SC_REALTIME_SIGNALS);
if (rts < 0L) {
fprintf(stderr, "This test needs the RTS extension");
return PTS_UNTESTED;
}
/* Set the signal handler */
sa.sa_flags = SA_SIGINFO;
sa.sa_sigaction = handler;
ret = sigemptyset(&sa.sa_mask);
if (ret != 0) {
perror("Failed to empty signal set");
return PTS_UNRESOLVED;
}
/* Install the signal handler for SIGVTALRM */
ret = sigaction(SIGVTALRM, &sa, 0);
if (ret != 0) {
perror("Failed to set signal handler");
return PTS_UNTESTED;
}
if (called) {
fprintf(stderr,
"The signal handler has been called before signal was raised");
return PTS_FAIL;
}
ret = raise(SIGVTALRM);
if (ret != 0) {
perror("Failed to raise SIGVTALRM");
return PTS_UNRESOLVED;
}
if (!called) {
fprintf(stderr, "The sa_handler was not called");
return PTS_FAIL;
}
printf("Test PASSED\n");
return PTS_PASS;
}
| {
"language": "C"
} |
// © 2016 and later: Unicode, Inc. and others.
// License & terms of use: http://www.unicode.org/copyright.html
/*
*******************************************************************************
* Copyright (C) 2011-2016, International Business Machines Corporation and
* others. All Rights Reserved.
*******************************************************************************
*/
#ifndef __TZNAMES_H
#define __TZNAMES_H
/**
* \file
* \brief C++ API: TimeZoneNames
*/
#include "unicode/utypes.h"
#if !UCONFIG_NO_FORMATTING
#include "unicode/uloc.h"
#include "unicode/unistr.h"
U_CDECL_BEGIN
/**
* Constants for time zone display name types.
* @stable ICU 50
*/
typedef enum UTimeZoneNameType {
/**
* Unknown display name type.
* @stable ICU 50
*/
UTZNM_UNKNOWN = 0x00,
/**
* Long display name, such as "Eastern Time".
* @stable ICU 50
*/
UTZNM_LONG_GENERIC = 0x01,
/**
* Long display name for standard time, such as "Eastern Standard Time".
* @stable ICU 50
*/
UTZNM_LONG_STANDARD = 0x02,
/**
* Long display name for daylight saving time, such as "Eastern Daylight Time".
* @stable ICU 50
*/
UTZNM_LONG_DAYLIGHT = 0x04,
/**
* Short display name, such as "ET".
* @stable ICU 50
*/
UTZNM_SHORT_GENERIC = 0x08,
/**
* Short display name for standard time, such as "EST".
* @stable ICU 50
*/
UTZNM_SHORT_STANDARD = 0x10,
/**
* Short display name for daylight saving time, such as "EDT".
* @stable ICU 50
*/
UTZNM_SHORT_DAYLIGHT = 0x20,
/**
* Exemplar location name, such as "Los Angeles".
* @stable ICU 51
*/
UTZNM_EXEMPLAR_LOCATION = 0x40
} UTimeZoneNameType;
U_CDECL_END
U_NAMESPACE_BEGIN
class UVector;
struct MatchInfo;
/**
* <code>TimeZoneNames</code> is an abstract class representing the time zone display name data model defined
* by <a href="http://www.unicode.org/reports/tr35/">UTS#35 Unicode Locale Data Markup Language (LDML)</a>.
* The model defines meta zone, which is used for storing a set of display names. A meta zone can be shared
* by multiple time zones. Also a time zone may have multiple meta zone historic mappings.
* <p>
* For example, people in the United States refer the zone used by the east part of North America as "Eastern Time".
* The tz database contains multiple time zones "America/New_York", "America/Detroit", "America/Montreal" and some
* others that belong to "Eastern Time". However, assigning different display names to these time zones does not make
* much sense for most of people.
* <p>
* In <a href="http://cldr.unicode.org/">CLDR</a> (which uses LDML for representing locale data), the display name
* "Eastern Time" is stored as long generic display name of a meta zone identified by the ID "America_Eastern".
* Then, there is another table maintaining the historic mapping to meta zones for each time zone. The time zones in
* the above example ("America/New_York", "America/Detroit"...) are mapped to the meta zone "America_Eastern".
* <p>
* Sometimes, a time zone is mapped to a different time zone in the past. For example, "America/Indiana/Knox"
* had been moving "Eastern Time" and "Central Time" back and forth. Therefore, it is necessary that time zone
* to meta zones mapping data are stored by date range.
*
* <p><b>Note:</b>
* The methods in this class assume that time zone IDs are already canonicalized. For example, you may not get proper
* result returned by a method with time zone ID "America/Indiana/Indianapolis", because it's not a canonical time zone
* ID (the canonical time zone ID for the time zone is "America/Indianapolis". See
* {@link TimeZone#getCanonicalID(const UnicodeString& id, UnicodeString& canonicalID, UErrorCode& status)} about ICU
* canonical time zone IDs.
*
* <p>
* In CLDR, most of time zone display names except location names are provided through meta zones. But a time zone may
* have a specific name that is not shared with other time zones.
*
* For example, time zone "Europe/London" has English long name for standard time "Greenwich Mean Time", which is also
* shared with other time zones. However, the long name for daylight saving time is "British Summer Time", which is only
* used for "Europe/London".
*
* <p>
* {@link #getTimeZoneDisplayName} is designed for accessing a name only used by a single time zone.
* But is not necessarily mean that a subclass implementation use the same model with CLDR. A subclass implementation
* may provide time zone names only through {@link #getTimeZoneDisplayName}, or only through {@link #getMetaZoneDisplayName},
* or both.
*
* <p>
* The default <code>TimeZoneNames</code> implementation returned by {@link #createInstance}
* uses the locale data imported from CLDR. In CLDR, set of meta zone IDs and mappings between zone IDs and meta zone
* IDs are shared by all locales. Therefore, the behavior of {@link #getAvailableMetaZoneIDs},
* {@link #getMetaZoneID}, and {@link #getReferenceZoneID} won't be changed no matter
* what locale is used for getting an instance of <code>TimeZoneNames</code>.
*
* @stable ICU 50
*/
class U_I18N_API TimeZoneNames : public UObject {
public:
/**
* Destructor.
* @stable ICU 50
*/
virtual ~TimeZoneNames();
/**
* Return true if the given TimeZoneNames objects are semantically equal.
* @param other the object to be compared with.
* @return Return TRUE if the given Format objects are semantically equal.
* @stable ICU 50
*/
virtual UBool operator==(const TimeZoneNames& other) const = 0;
/**
* Return true if the given TimeZoneNames objects are not semantically
* equal.
* @param other the object to be compared with.
* @return Return TRUE if the given Format objects are not semantically equal.
* @stable ICU 50
*/
UBool operator!=(const TimeZoneNames& other) const { return !operator==(other); }
/**
* Clone this object polymorphically. The caller is responsible
* for deleting the result when done.
* @return A copy of the object
* @stable ICU 50
*/
virtual TimeZoneNames* clone() const = 0;
/**
* Returns an instance of <code>TimeZoneNames</code> for the specified locale.
*
* @param locale The locale.
* @param status Receives the status.
* @return An instance of <code>TimeZoneNames</code>
* @stable ICU 50
*/
static TimeZoneNames* U_EXPORT2 createInstance(const Locale& locale, UErrorCode& status);
/**
* Returns an instance of <code>TimeZoneNames</code> containing only short specific
* zone names (SHORT_STANDARD and SHORT_DAYLIGHT),
* compatible with the IANA tz database's zone abbreviations (not localized).
* <br>
* Note: The input locale is used for resolving ambiguous names (e.g. "IST" is parsed
* as Israel Standard Time for Israel, while it is parsed as India Standard Time for
* all other regions). The zone names returned by this instance are not localized.
* @stable ICU 54
*/
static TimeZoneNames* U_EXPORT2 createTZDBInstance(const Locale& locale, UErrorCode& status);
/**
* Returns an enumeration of all available meta zone IDs.
* @param status Receives the status.
* @return an enumeration object, owned by the caller.
* @stable ICU 50
*/
virtual StringEnumeration* getAvailableMetaZoneIDs(UErrorCode& status) const = 0;
/**
* Returns an enumeration of all available meta zone IDs used by the given time zone.
* @param tzID The canoical tiem zone ID.
* @param status Receives the status.
* @return an enumeration object, owned by the caller.
* @stable ICU 50
*/
virtual StringEnumeration* getAvailableMetaZoneIDs(const UnicodeString& tzID, UErrorCode& status) const = 0;
/**
* Returns the meta zone ID for the given canonical time zone ID at the given date.
* @param tzID The canonical time zone ID.
* @param date The date.
* @param mzID Receives the meta zone ID for the given time zone ID at the given date. If the time zone does not have a
* corresponding meta zone at the given date or the implementation does not support meta zones, "bogus" state
* is set.
* @return A reference to the result.
* @stable ICU 50
*/
virtual UnicodeString& getMetaZoneID(const UnicodeString& tzID, UDate date, UnicodeString& mzID) const = 0;
/**
* Returns the reference zone ID for the given meta zone ID for the region.
*
* Note: Each meta zone must have a reference zone associated with a special region "001" (world).
* Some meta zones may have region specific reference zone IDs other than the special region
* "001". When a meta zone does not have any region specific reference zone IDs, this method
* return the reference zone ID for the special region "001" (world).
*
* @param mzID The meta zone ID.
* @param region The region.
* @param tzID Receives the reference zone ID ("golden zone" in the LDML specification) for the given time zone ID for the
* region. If the meta zone is unknown or the implementation does not support meta zones, "bogus" state
* is set.
* @return A reference to the result.
* @stable ICU 50
*/
virtual UnicodeString& getReferenceZoneID(const UnicodeString& mzID, const char* region, UnicodeString& tzID) const = 0;
/**
* Returns the display name of the meta zone.
* @param mzID The meta zone ID.
* @param type The display name type. See {@link #UTimeZoneNameType}.
* @param name Receives the display name of the meta zone. When this object does not have a localized display name for the given
* meta zone with the specified type or the implementation does not provide any display names associated
* with meta zones, "bogus" state is set.
* @return A reference to the result.
* @stable ICU 50
*/
virtual UnicodeString& getMetaZoneDisplayName(const UnicodeString& mzID, UTimeZoneNameType type, UnicodeString& name) const = 0;
/**
* Returns the display name of the time zone. Unlike {@link #getDisplayName},
* this method does not get a name from a meta zone used by the time zone.
* @param tzID The canonical time zone ID.
* @param type The display name type. See {@link #UTimeZoneNameType}.
* @param name Receives the display name for the time zone. When this object does not have a localized display name for the given
* time zone with the specified type, "bogus" state is set.
* @return A reference to the result.
* @stable ICU 50
*/
virtual UnicodeString& getTimeZoneDisplayName(const UnicodeString& tzID, UTimeZoneNameType type, UnicodeString& name) const = 0;
/**
* Returns the exemplar location name for the given time zone. When this object does not have a localized location
* name, the default implementation may still returns a programmatically generated name with the logic described
* below.
* <ol>
* <li>Check if the ID contains "/". If not, return null.
* <li>Check if the ID does not start with "Etc/" or "SystemV/". If it does, return null.
* <li>Extract a substring after the last occurrence of "/".
* <li>Replace "_" with " ".
* </ol>
* For example, "New York" is returned for the time zone ID "America/New_York" when this object does not have the
* localized location name.
*
* @param tzID The canonical time zone ID
* @param name Receives the exemplar location name for the given time zone, or "bogus" state is set when a localized
* location name is not available and the fallback logic described above cannot extract location from the ID.
* @return A reference to the result.
* @stable ICU 50
*/
virtual UnicodeString& getExemplarLocationName(const UnicodeString& tzID, UnicodeString& name) const;
/**
* Returns the display name of the time zone at the given date.
* <p>
* <b>Note:</b> This method calls the subclass's {@link #getTimeZoneDisplayName} first. When the
* result is bogus, this method calls {@link #getMetaZoneID} to get the meta zone ID mapped from the
* time zone, then calls {@link #getMetaZoneDisplayName}.
*
* @param tzID The canonical time zone ID.
* @param type The display name type. See {@link #UTimeZoneNameType}.
* @param date The date.
* @param name Receives the display name for the time zone at the given date. When this object does not have a localized display
* name for the time zone with the specified type and date, "bogus" state is set.
* @return A reference to the result.
* @stable ICU 50
*/
virtual UnicodeString& getDisplayName(const UnicodeString& tzID, UTimeZoneNameType type, UDate date, UnicodeString& name) const;
/**
* @internal ICU internal only, for specific users only until proposed publicly.
*/
virtual void loadAllDisplayNames(UErrorCode& status);
/**
* @internal ICU internal only, for specific users only until proposed publicly.
*/
virtual void getDisplayNames(const UnicodeString& tzID, const UTimeZoneNameType types[], int32_t numTypes, UDate date, UnicodeString dest[], UErrorCode& status) const;
/**
* <code>MatchInfoCollection</code> represents a collection of time zone name matches used by
* {@link TimeZoneNames#find}.
* @internal
*/
class U_I18N_API MatchInfoCollection : public UMemory {
public:
/**
* Constructor.
* @internal
*/
MatchInfoCollection();
/**
* Destructor.
* @internal
*/
virtual ~MatchInfoCollection();
#ifndef U_HIDE_INTERNAL_API
/**
* Adds a zone match.
* @param nameType The name type.
* @param matchLength The match length.
* @param tzID The time zone ID.
* @param status Receives the status
* @internal
*/
void addZone(UTimeZoneNameType nameType, int32_t matchLength,
const UnicodeString& tzID, UErrorCode& status);
/**
* Adds a meata zone match.
* @param nameType The name type.
* @param matchLength The match length.
* @param mzID The metazone ID.
* @param status Receives the status
* @internal
*/
void addMetaZone(UTimeZoneNameType nameType, int32_t matchLength,
const UnicodeString& mzID, UErrorCode& status);
/**
* Returns the number of entries available in this object.
* @return The number of entries.
* @internal
*/
int32_t size() const;
/**
* Returns the time zone name type of a match at the specified index.
* @param idx The index
* @return The time zone name type. If the specified idx is out of range,
* it returns UTZNM_UNKNOWN.
* @see UTimeZoneNameType
* @internal
*/
UTimeZoneNameType getNameTypeAt(int32_t idx) const;
/**
* Returns the match length of a match at the specified index.
* @param idx The index
* @return The match length. If the specified idx is out of range,
* it returns 0.
* @internal
*/
int32_t getMatchLengthAt(int32_t idx) const;
/**
* Gets the zone ID of a match at the specified index.
* @param idx The index
* @param tzID Receives the zone ID.
* @return TRUE if the zone ID was set to tzID.
* @internal
*/
UBool getTimeZoneIDAt(int32_t idx, UnicodeString& tzID) const;
/**
* Gets the metazone ID of a match at the specified index.
* @param idx The index
* @param mzID Receives the metazone ID
* @return TRUE if the meta zone ID was set to mzID.
* @internal
*/
UBool getMetaZoneIDAt(int32_t idx, UnicodeString& mzID) const;
#endif /* U_HIDE_INTERNAL_API */
private:
UVector* fMatches; // vector of MatchEntry
UVector* matches(UErrorCode& status);
};
/**
* Finds time zone name prefix matches for the input text at the
* given offset and returns a collection of the matches.
* @param text The text.
* @param start The starting offset within the text.
* @param types The set of name types represented by bitwise flags of UTimeZoneNameType enums,
* or UTZNM_UNKNOWN for all name types.
* @param status Receives the status.
* @return A collection of matches (owned by the caller), or NULL if no matches are found.
* @see UTimeZoneNameType
* @see MatchInfoCollection
* @internal
*/
virtual MatchInfoCollection* find(const UnicodeString& text, int32_t start, uint32_t types, UErrorCode& status) const = 0;
};
U_NAMESPACE_END
#endif
#endif
| {
"language": "C"
} |
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
#ifndef __LINUX_OVERFLOW_H
#define __LINUX_OVERFLOW_H
#include <linux/compiler.h>
/*
* In the fallback code below, we need to compute the minimum and
* maximum values representable in a given type. These macros may also
* be useful elsewhere, so we provide them outside the
* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block.
*
* It would seem more obvious to do something like
*
* #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
* #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
*
* Unfortunately, the middle expressions, strictly speaking, have
* undefined behaviour, and at least some versions of gcc warn about
* the type_max expression (but not if -fsanitize=undefined is in
* effect; in that case, the warning is deferred to runtime...).
*
* The slightly excessive casting in type_min is to make sure the
* macros also produce sensible values for the exotic type _Bool. [The
* overflow checkers only almost work for _Bool, but that's
* a-feature-not-a-bug, since people shouldn't be doing arithmetic on
* _Bools. Besides, the gcc builtins don't allow _Bool* as third
* argument.]
*
* Idea stolen from
* https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html -
* credit to Christian Biere.
*/
#define is_signed_type(type) (((type)(-1)) < (type)1)
#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type)))
#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
#define type_min(T) ((T)((T)-type_max(T)-(T)1))
#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
/*
* For simplicity and code hygiene, the fallback code below insists on
* a, b and *d having the same type (similar to the min() and max()
* macros), whereas gcc's type-generic overflow checkers accept
* different types. Hence we don't just make check_add_overflow an
* alias for __builtin_add_overflow, but add type checks similar to
* below.
*/
#define check_add_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
__builtin_add_overflow(__a, __b, __d); \
})
#define check_sub_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
__builtin_sub_overflow(__a, __b, __d); \
})
#define check_mul_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
__builtin_mul_overflow(__a, __b, __d); \
})
#else
/* Checking for unsigned overflow is relatively easy without causing UB. */
#define __unsigned_add_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
*__d = __a + __b; \
*__d < __a; \
})
#define __unsigned_sub_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
*__d = __a - __b; \
__a < __b; \
})
/*
* If one of a or b is a compile-time constant, this avoids a division.
*/
#define __unsigned_mul_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
*__d = __a * __b; \
__builtin_constant_p(__b) ? \
__b > 0 && __a > type_max(typeof(__a)) / __b : \
__a > 0 && __b > type_max(typeof(__b)) / __a; \
})
/*
* For signed types, detecting overflow is much harder, especially if
* we want to avoid UB. But the interface of these macros is such that
* we must provide a result in *d, and in fact we must produce the
* result promised by gcc's builtins, which is simply the possibly
* wrapped-around value. Fortunately, we can just formally do the
* operations in the widest relevant unsigned type (u64) and then
* truncate the result - gcc is smart enough to generate the same code
* with and without the (u64) casts.
*/
/*
* Adding two signed integers can overflow only if they have the same
* sign, and overflow has happened iff the result has the opposite
* sign.
*/
#define __signed_add_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
*__d = (u64)__a + (u64)__b; \
(((~(__a ^ __b)) & (*__d ^ __a)) \
& type_min(typeof(__a))) != 0; \
})
/*
* Subtraction is similar, except that overflow can now happen only
* when the signs are opposite. In this case, overflow has happened if
* the result has the opposite sign of a.
*/
#define __signed_sub_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
*__d = (u64)__a - (u64)__b; \
((((__a ^ __b)) & (*__d ^ __a)) \
& type_min(typeof(__a))) != 0; \
})
/*
* Signed multiplication is rather hard. gcc always follows C99, so
* division is truncated towards 0. This means that we can write the
* overflow check like this:
*
* (a > 0 && (b > MAX/a || b < MIN/a)) ||
* (a < -1 && (b > MIN/a || b < MAX/a) ||
* (a == -1 && b == MIN)
*
* The redundant casts of -1 are to silence an annoying -Wtype-limits
* (included in -Wextra) warning: When the type is u8 or u16, the
* __b_c_e in check_mul_overflow obviously selects
* __unsigned_mul_overflow, but unfortunately gcc still parses this
* code and warns about the limited range of __b.
*/
#define __signed_mul_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
typeof(a) __tmax = type_max(typeof(a)); \
typeof(a) __tmin = type_min(typeof(a)); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
*__d = (u64)__a * (u64)__b; \
(__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \
(__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \
(__b == (typeof(__b))-1 && __a == __tmin); \
})
#define check_add_overflow(a, b, d) \
__builtin_choose_expr(is_signed_type(typeof(a)), \
__signed_add_overflow(a, b, d), \
__unsigned_add_overflow(a, b, d))
#define check_sub_overflow(a, b, d) \
__builtin_choose_expr(is_signed_type(typeof(a)), \
__signed_sub_overflow(a, b, d), \
__unsigned_sub_overflow(a, b, d))
#define check_mul_overflow(a, b, d) \
__builtin_choose_expr(is_signed_type(typeof(a)), \
__signed_mul_overflow(a, b, d), \
__unsigned_mul_overflow(a, b, d))
#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */
/**
* array_size() - Calculate size of 2-dimensional array.
*
* @a: dimension one
* @b: dimension two
*
* Calculates size of 2-dimensional array: @a * @b.
*
* Returns: number of bytes needed to represent the array or SIZE_MAX on
* overflow.
*/
static inline __must_check size_t array_size(size_t a, size_t b)
{
size_t bytes;
if (check_mul_overflow(a, b, &bytes))
return SIZE_MAX;
return bytes;
}
/**
* array3_size() - Calculate size of 3-dimensional array.
*
* @a: dimension one
* @b: dimension two
* @c: dimension three
*
* Calculates size of 3-dimensional array: @a * @b * @c.
*
* Returns: number of bytes needed to represent the array or SIZE_MAX on
* overflow.
*/
static inline __must_check size_t array3_size(size_t a, size_t b, size_t c)
{
size_t bytes;
if (check_mul_overflow(a, b, &bytes))
return SIZE_MAX;
if (check_mul_overflow(bytes, c, &bytes))
return SIZE_MAX;
return bytes;
}
static inline __must_check size_t __ab_c_size(size_t n, size_t size, size_t c)
{
size_t bytes;
if (check_mul_overflow(n, size, &bytes))
return SIZE_MAX;
if (check_add_overflow(bytes, c, &bytes))
return SIZE_MAX;
return bytes;
}
/**
* struct_size() - Calculate size of structure with trailing array.
* @p: Pointer to the structure.
* @member: Name of the array member.
* @n: Number of elements in the array.
*
* Calculates size of memory needed for structure @p followed by an
* array of @n @member elements.
*
* Return: number of bytes needed or SIZE_MAX on overflow.
*/
#define struct_size(p, member, n) \
__ab_c_size(n, \
sizeof(*(p)->member) + __must_be_array((p)->member),\
sizeof(*(p)))
#endif /* __LINUX_OVERFLOW_H */
| {
"language": "C"
} |
/***************************************************************************
urlutils.h
---------------
begin : Jun 2020
copyright : (C) 2020 by Ivan Ivanov
email : ivan@opengis.ch
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
#ifndef URLUTILS_H
#define URLUTILS_H
#include <QObject>
class UrlUtils : public QObject
{
Q_OBJECT
public:
explicit UrlUtils( QObject *parent = nullptr );
/**
* Checks whether the provided string is a relative url (has no protocol or starts with `file://`).
*/
static Q_INVOKABLE bool isRelativeOrFileUrl( const QString &url );
};
#endif // URLUTILS_H
| {
"language": "C"
} |
// SPDX-License-Identifier: GPL-2.0
/*
* Tahvo USB transceiver driver
*
* Copyright (C) 2005-2006 Nokia Corporation
*
* Parts copied from isp1301_omap.c.
* Copyright (C) 2004 Texas Instruments
* Copyright (C) 2004 David Brownell
*
* Original driver written by Juha Yrjölä, Tony Lindgren and Timo Teräs.
* Modified for Retu/Tahvo MFD by Aaro Koskinen.
*/
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/usb.h>
#include <linux/extcon-provider.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb/otg.h>
#include <linux/mfd/retu.h>
#include <linux/usb/gadget.h>
#include <linux/platform_device.h>
#define DRIVER_NAME "tahvo-usb"
#define TAHVO_REG_IDSR 0x02
#define TAHVO_REG_USBR 0x06
#define USBR_SLAVE_CONTROL (1 << 8)
#define USBR_VPPVIO_SW (1 << 7)
#define USBR_SPEED (1 << 6)
#define USBR_REGOUT (1 << 5)
#define USBR_MASTER_SW2 (1 << 4)
#define USBR_MASTER_SW1 (1 << 3)
#define USBR_SLAVE_SW (1 << 2)
#define USBR_NSUSPEND (1 << 1)
#define USBR_SEMODE (1 << 0)
#define TAHVO_MODE_HOST 0
#define TAHVO_MODE_PERIPHERAL 1
struct tahvo_usb {
struct platform_device *pt_dev;
struct usb_phy phy;
int vbus_state;
struct mutex serialize;
struct clk *ick;
int irq;
int tahvo_mode;
struct extcon_dev *extcon;
};
static const unsigned int tahvo_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
EXTCON_NONE,
};
static ssize_t vbus_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct tahvo_usb *tu = dev_get_drvdata(device);
return sprintf(buf, "%s\n", tu->vbus_state ? "on" : "off");
}
static DEVICE_ATTR_RO(vbus);
static void check_vbus_state(struct tahvo_usb *tu)
{
struct retu_dev *rdev = dev_get_drvdata(tu->pt_dev->dev.parent);
int reg, prev_state;
reg = retu_read(rdev, TAHVO_REG_IDSR);
if (reg & TAHVO_STAT_VBUS) {
switch (tu->phy.otg->state) {
case OTG_STATE_B_IDLE:
/* Enable the gadget driver */
if (tu->phy.otg->gadget)
usb_gadget_vbus_connect(tu->phy.otg->gadget);
tu->phy.otg->state = OTG_STATE_B_PERIPHERAL;
usb_phy_set_event(&tu->phy, USB_EVENT_ENUMERATED);
break;
case OTG_STATE_A_IDLE:
/*
* Session is now valid assuming the USB hub is driving
* Vbus.
*/
tu->phy.otg->state = OTG_STATE_A_HOST;
break;
default:
break;
}
dev_info(&tu->pt_dev->dev, "USB cable connected\n");
} else {
switch (tu->phy.otg->state) {
case OTG_STATE_B_PERIPHERAL:
if (tu->phy.otg->gadget)
usb_gadget_vbus_disconnect(tu->phy.otg->gadget);
tu->phy.otg->state = OTG_STATE_B_IDLE;
usb_phy_set_event(&tu->phy, USB_EVENT_NONE);
break;
case OTG_STATE_A_HOST:
tu->phy.otg->state = OTG_STATE_A_IDLE;
break;
default:
break;
}
dev_info(&tu->pt_dev->dev, "USB cable disconnected\n");
}
prev_state = tu->vbus_state;
tu->vbus_state = reg & TAHVO_STAT_VBUS;
if (prev_state != tu->vbus_state) {
extcon_set_state_sync(tu->extcon, EXTCON_USB, tu->vbus_state);
sysfs_notify(&tu->pt_dev->dev.kobj, NULL, "vbus_state");
}
}
static void tahvo_usb_become_host(struct tahvo_usb *tu)
{
struct retu_dev *rdev = dev_get_drvdata(tu->pt_dev->dev.parent);
extcon_set_state_sync(tu->extcon, EXTCON_USB_HOST, true);
/* Power up the transceiver in USB host mode */
retu_write(rdev, TAHVO_REG_USBR, USBR_REGOUT | USBR_NSUSPEND |
USBR_MASTER_SW2 | USBR_MASTER_SW1);
tu->phy.otg->state = OTG_STATE_A_IDLE;
check_vbus_state(tu);
}
static void tahvo_usb_stop_host(struct tahvo_usb *tu)
{
tu->phy.otg->state = OTG_STATE_A_IDLE;
}
static void tahvo_usb_become_peripheral(struct tahvo_usb *tu)
{
struct retu_dev *rdev = dev_get_drvdata(tu->pt_dev->dev.parent);
extcon_set_state_sync(tu->extcon, EXTCON_USB_HOST, false);
/* Power up transceiver and set it in USB peripheral mode */
retu_write(rdev, TAHVO_REG_USBR, USBR_SLAVE_CONTROL | USBR_REGOUT |
USBR_NSUSPEND | USBR_SLAVE_SW);
tu->phy.otg->state = OTG_STATE_B_IDLE;
check_vbus_state(tu);
}
static void tahvo_usb_stop_peripheral(struct tahvo_usb *tu)
{
if (tu->phy.otg->gadget)
usb_gadget_vbus_disconnect(tu->phy.otg->gadget);
tu->phy.otg->state = OTG_STATE_B_IDLE;
}
static void tahvo_usb_power_off(struct tahvo_usb *tu)
{
struct retu_dev *rdev = dev_get_drvdata(tu->pt_dev->dev.parent);
/* Disable gadget controller if any */
if (tu->phy.otg->gadget)
usb_gadget_vbus_disconnect(tu->phy.otg->gadget);
/* Power off transceiver */
retu_write(rdev, TAHVO_REG_USBR, 0);
tu->phy.otg->state = OTG_STATE_UNDEFINED;
}
static int tahvo_usb_set_suspend(struct usb_phy *dev, int suspend)
{
struct tahvo_usb *tu = container_of(dev, struct tahvo_usb, phy);
struct retu_dev *rdev = dev_get_drvdata(tu->pt_dev->dev.parent);
u16 w;
dev_dbg(&tu->pt_dev->dev, "%s\n", __func__);
w = retu_read(rdev, TAHVO_REG_USBR);
if (suspend)
w &= ~USBR_NSUSPEND;
else
w |= USBR_NSUSPEND;
retu_write(rdev, TAHVO_REG_USBR, w);
return 0;
}
static int tahvo_usb_set_host(struct usb_otg *otg, struct usb_bus *host)
{
struct tahvo_usb *tu = container_of(otg->usb_phy, struct tahvo_usb,
phy);
dev_dbg(&tu->pt_dev->dev, "%s %p\n", __func__, host);
mutex_lock(&tu->serialize);
if (host == NULL) {
if (tu->tahvo_mode == TAHVO_MODE_HOST)
tahvo_usb_power_off(tu);
otg->host = NULL;
mutex_unlock(&tu->serialize);
return 0;
}
if (tu->tahvo_mode == TAHVO_MODE_HOST) {
otg->host = NULL;
tahvo_usb_become_host(tu);
}
otg->host = host;
mutex_unlock(&tu->serialize);
return 0;
}
static int tahvo_usb_set_peripheral(struct usb_otg *otg,
struct usb_gadget *gadget)
{
struct tahvo_usb *tu = container_of(otg->usb_phy, struct tahvo_usb,
phy);
dev_dbg(&tu->pt_dev->dev, "%s %p\n", __func__, gadget);
mutex_lock(&tu->serialize);
if (!gadget) {
if (tu->tahvo_mode == TAHVO_MODE_PERIPHERAL)
tahvo_usb_power_off(tu);
tu->phy.otg->gadget = NULL;
mutex_unlock(&tu->serialize);
return 0;
}
tu->phy.otg->gadget = gadget;
if (tu->tahvo_mode == TAHVO_MODE_PERIPHERAL)
tahvo_usb_become_peripheral(tu);
mutex_unlock(&tu->serialize);
return 0;
}
static irqreturn_t tahvo_usb_vbus_interrupt(int irq, void *_tu)
{
struct tahvo_usb *tu = _tu;
mutex_lock(&tu->serialize);
check_vbus_state(tu);
mutex_unlock(&tu->serialize);
return IRQ_HANDLED;
}
static ssize_t otg_mode_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct tahvo_usb *tu = dev_get_drvdata(device);
switch (tu->tahvo_mode) {
case TAHVO_MODE_HOST:
return sprintf(buf, "host\n");
case TAHVO_MODE_PERIPHERAL:
return sprintf(buf, "peripheral\n");
}
return -EINVAL;
}
static ssize_t otg_mode_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct tahvo_usb *tu = dev_get_drvdata(device);
int r;
mutex_lock(&tu->serialize);
if (count >= 4 && strncmp(buf, "host", 4) == 0) {
if (tu->tahvo_mode == TAHVO_MODE_PERIPHERAL)
tahvo_usb_stop_peripheral(tu);
tu->tahvo_mode = TAHVO_MODE_HOST;
if (tu->phy.otg->host) {
dev_info(device, "HOST mode: host controller present\n");
tahvo_usb_become_host(tu);
} else {
dev_info(device, "HOST mode: no host controller, powering off\n");
tahvo_usb_power_off(tu);
}
r = strlen(buf);
} else if (count >= 10 && strncmp(buf, "peripheral", 10) == 0) {
if (tu->tahvo_mode == TAHVO_MODE_HOST)
tahvo_usb_stop_host(tu);
tu->tahvo_mode = TAHVO_MODE_PERIPHERAL;
if (tu->phy.otg->gadget) {
dev_info(device, "PERIPHERAL mode: gadget driver present\n");
tahvo_usb_become_peripheral(tu);
} else {
dev_info(device, "PERIPHERAL mode: no gadget driver, powering off\n");
tahvo_usb_power_off(tu);
}
r = strlen(buf);
} else {
r = -EINVAL;
}
mutex_unlock(&tu->serialize);
return r;
}
static DEVICE_ATTR_RW(otg_mode);
static struct attribute *tahvo_attributes[] = {
&dev_attr_vbus.attr,
&dev_attr_otg_mode.attr,
NULL
};
static const struct attribute_group tahvo_attr_group = {
.attrs = tahvo_attributes,
};
static int tahvo_usb_probe(struct platform_device *pdev)
{
struct retu_dev *rdev = dev_get_drvdata(pdev->dev.parent);
struct tahvo_usb *tu;
int ret;
tu = devm_kzalloc(&pdev->dev, sizeof(*tu), GFP_KERNEL);
if (!tu)
return -ENOMEM;
tu->phy.otg = devm_kzalloc(&pdev->dev, sizeof(*tu->phy.otg),
GFP_KERNEL);
if (!tu->phy.otg)
return -ENOMEM;
tu->pt_dev = pdev;
/* Default mode */
#ifdef CONFIG_TAHVO_USB_HOST_BY_DEFAULT
tu->tahvo_mode = TAHVO_MODE_HOST;
#else
tu->tahvo_mode = TAHVO_MODE_PERIPHERAL;
#endif
mutex_init(&tu->serialize);
tu->ick = devm_clk_get(&pdev->dev, "usb_l4_ick");
if (!IS_ERR(tu->ick))
clk_enable(tu->ick);
/*
* Set initial state, so that we generate kevents only on state changes.
*/
tu->vbus_state = retu_read(rdev, TAHVO_REG_IDSR) & TAHVO_STAT_VBUS;
tu->extcon = devm_extcon_dev_allocate(&pdev->dev, tahvo_cable);
if (IS_ERR(tu->extcon)) {
dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
ret = PTR_ERR(tu->extcon);
goto err_disable_clk;
}
ret = devm_extcon_dev_register(&pdev->dev, tu->extcon);
if (ret) {
dev_err(&pdev->dev, "could not register extcon device: %d\n",
ret);
goto err_disable_clk;
}
/* Set the initial cable state. */
extcon_set_state_sync(tu->extcon, EXTCON_USB_HOST,
tu->tahvo_mode == TAHVO_MODE_HOST);
extcon_set_state_sync(tu->extcon, EXTCON_USB, tu->vbus_state);
/* Create OTG interface */
tahvo_usb_power_off(tu);
tu->phy.dev = &pdev->dev;
tu->phy.otg->state = OTG_STATE_UNDEFINED;
tu->phy.label = DRIVER_NAME;
tu->phy.set_suspend = tahvo_usb_set_suspend;
tu->phy.otg->usb_phy = &tu->phy;
tu->phy.otg->set_host = tahvo_usb_set_host;
tu->phy.otg->set_peripheral = tahvo_usb_set_peripheral;
ret = usb_add_phy(&tu->phy, USB_PHY_TYPE_USB2);
if (ret < 0) {
dev_err(&pdev->dev, "cannot register USB transceiver: %d\n",
ret);
goto err_disable_clk;
}
dev_set_drvdata(&pdev->dev, tu);
tu->irq = platform_get_irq(pdev, 0);
ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt,
IRQF_ONESHOT,
"tahvo-vbus", tu);
if (ret) {
dev_err(&pdev->dev, "could not register tahvo-vbus irq: %d\n",
ret);
goto err_remove_phy;
}
/* Attributes */
ret = sysfs_create_group(&pdev->dev.kobj, &tahvo_attr_group);
if (ret) {
dev_err(&pdev->dev, "cannot create sysfs group: %d\n", ret);
goto err_free_irq;
}
return 0;
err_free_irq:
free_irq(tu->irq, tu);
err_remove_phy:
usb_remove_phy(&tu->phy);
err_disable_clk:
if (!IS_ERR(tu->ick))
clk_disable(tu->ick);
return ret;
}
static int tahvo_usb_remove(struct platform_device *pdev)
{
struct tahvo_usb *tu = platform_get_drvdata(pdev);
sysfs_remove_group(&pdev->dev.kobj, &tahvo_attr_group);
free_irq(tu->irq, tu);
usb_remove_phy(&tu->phy);
if (!IS_ERR(tu->ick))
clk_disable(tu->ick);
return 0;
}
static struct platform_driver tahvo_usb_driver = {
.probe = tahvo_usb_probe,
.remove = tahvo_usb_remove,
.driver = {
.name = "tahvo-usb",
},
};
module_platform_driver(tahvo_usb_driver);
MODULE_DESCRIPTION("Tahvo USB transceiver driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Juha Yrjölä, Tony Lindgren, and Timo Teräs");
MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
| {
"language": "C"
} |
/*
* Probing flash chips with QINFO records.
* (C) 2008 Korolev Alexey <akorolev@infradead.org>
* (C) 2008 Vasiliy Leonenko <vasiliy.leonenko@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/mtd/xip.h>
#include <linux/mtd/map.h>
#include <linux/mtd/pfow.h>
#include <linux/mtd/qinfo.h>
static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr);
struct mtd_info *lpddr_probe(struct map_info *map);
static struct lpddr_private *lpddr_probe_chip(struct map_info *map);
static int lpddr_pfow_present(struct map_info *map,
struct lpddr_private *lpddr);
static struct qinfo_query_info qinfo_array[] = {
/* General device info */
{0, 0, "DevSizeShift", "Device size 2^n bytes"},
{0, 3, "BufSizeShift", "Program buffer size 2^n bytes"},
/* Erase block information */
{1, 1, "TotalBlocksNum", "Total number of blocks"},
{1, 2, "UniformBlockSizeShift", "Uniform block size 2^n bytes"},
/* Partition information */
{2, 1, "HWPartsNum", "Number of hardware partitions"},
/* Optional features */
{5, 1, "SuspEraseSupp", "Suspend erase supported"},
/* Operation typical time */
{10, 0, "SingleWordProgTime", "Single word program 2^n u-sec"},
{10, 1, "ProgBufferTime", "Program buffer write 2^n u-sec"},
{10, 2, "BlockEraseTime", "Block erase 2^n m-sec"},
{10, 3, "FullChipEraseTime", "Full chip erase 2^n m-sec"},
};
static long lpddr_get_qinforec_pos(struct map_info *map, char *id_str)
{
int qinfo_lines = ARRAY_SIZE(qinfo_array);
int i;
int bankwidth = map_bankwidth(map) * 8;
int major, minor;
for (i = 0; i < qinfo_lines; i++) {
if (strcmp(id_str, qinfo_array[i].id_str) == 0) {
major = qinfo_array[i].major & ((1 << bankwidth) - 1);
minor = qinfo_array[i].minor & ((1 << bankwidth) - 1);
return minor | (major << bankwidth);
}
}
printk(KERN_ERR"%s qinfo id string is wrong! \n", map->name);
BUG();
return -1;
}
static uint16_t lpddr_info_query(struct map_info *map, char *id_str)
{
unsigned int dsr, val;
int bits_per_chip = map_bankwidth(map) * 8;
unsigned long adr = lpddr_get_qinforec_pos(map, id_str);
int attempts = 20;
/* Write a request for the PFOW record */
map_write(map, CMD(LPDDR_INFO_QUERY),
map->pfow_base + PFOW_COMMAND_CODE);
map_write(map, CMD(adr & ((1 << bits_per_chip) - 1)),
map->pfow_base + PFOW_COMMAND_ADDRESS_L);
map_write(map, CMD(adr >> bits_per_chip),
map->pfow_base + PFOW_COMMAND_ADDRESS_H);
map_write(map, CMD(LPDDR_START_EXECUTION),
map->pfow_base + PFOW_COMMAND_EXECUTE);
while ((attempts--) > 0) {
dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR));
if (dsr & DSR_READY_STATUS)
break;
udelay(10);
}
val = CMDVAL(map_read(map, map->pfow_base + PFOW_COMMAND_DATA));
return val;
}
static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
{
map_word pfow_val[4];
/* Check identification string */
pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
pfow_val[2] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_O);
pfow_val[3] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_W);
if (!map_word_equal(map, CMD('P'), pfow_val[0]))
goto out;
if (!map_word_equal(map, CMD('F'), pfow_val[1]))
goto out;
if (!map_word_equal(map, CMD('O'), pfow_val[2]))
goto out;
if (!map_word_equal(map, CMD('W'), pfow_val[3]))
goto out;
return 1; /* "PFOW" is found */
out:
printk(KERN_WARNING"%s: PFOW string at 0x%lx is not found \n",
map->name, map->pfow_base);
return 0;
}
static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr)
{
lpddr->qinfo = kzalloc(sizeof(struct qinfo_chip), GFP_KERNEL);
if (!lpddr->qinfo)
return 0;
/* Get the ManuID */
lpddr->ManufactId = CMDVAL(map_read(map, map->pfow_base + PFOW_MANUFACTURER_ID));
/* Get the DeviceID */
lpddr->DevId = CMDVAL(map_read(map, map->pfow_base + PFOW_DEVICE_ID));
/* read parameters from chip qinfo table */
lpddr->qinfo->DevSizeShift = lpddr_info_query(map, "DevSizeShift");
lpddr->qinfo->TotalBlocksNum = lpddr_info_query(map, "TotalBlocksNum");
lpddr->qinfo->BufSizeShift = lpddr_info_query(map, "BufSizeShift");
lpddr->qinfo->HWPartsNum = lpddr_info_query(map, "HWPartsNum");
lpddr->qinfo->UniformBlockSizeShift =
lpddr_info_query(map, "UniformBlockSizeShift");
lpddr->qinfo->SuspEraseSupp = lpddr_info_query(map, "SuspEraseSupp");
lpddr->qinfo->SingleWordProgTime =
lpddr_info_query(map, "SingleWordProgTime");
lpddr->qinfo->ProgBufferTime = lpddr_info_query(map, "ProgBufferTime");
lpddr->qinfo->BlockEraseTime = lpddr_info_query(map, "BlockEraseTime");
return 1;
}
static struct lpddr_private *lpddr_probe_chip(struct map_info *map)
{
struct lpddr_private lpddr;
struct lpddr_private *retlpddr;
int numvirtchips;
if ((map->pfow_base + 0x1000) >= map->size) {
printk(KERN_NOTICE"%s Probe at base (0x%08lx) past the end of"
"the map(0x%08lx)\n", map->name,
(unsigned long)map->pfow_base, map->size - 1);
return NULL;
}
memset(&lpddr, 0, sizeof(struct lpddr_private));
if (!lpddr_pfow_present(map, &lpddr))
return NULL;
if (!lpddr_chip_setup(map, &lpddr))
return NULL;
/* Ok so we found a chip */
lpddr.chipshift = lpddr.qinfo->DevSizeShift;
lpddr.numchips = 1;
numvirtchips = lpddr.numchips * lpddr.qinfo->HWPartsNum;
retlpddr = kzalloc(sizeof(struct lpddr_private) +
numvirtchips * sizeof(struct flchip), GFP_KERNEL);
if (!retlpddr)
return NULL;
memcpy(retlpddr, &lpddr, sizeof(struct lpddr_private));
retlpddr->numchips = numvirtchips;
retlpddr->chipshift = retlpddr->qinfo->DevSizeShift -
__ffs(retlpddr->qinfo->HWPartsNum);
return retlpddr;
}
struct mtd_info *lpddr_probe(struct map_info *map)
{
struct mtd_info *mtd = NULL;
struct lpddr_private *lpddr;
/* First probe the map to see if we havecan open PFOW here */
lpddr = lpddr_probe_chip(map);
if (!lpddr)
return NULL;
map->fldrv_priv = lpddr;
mtd = lpddr_cmdset(map);
if (mtd) {
if (mtd->size > map->size) {
printk(KERN_WARNING "Reducing visibility of %ldKiB chip"
"to %ldKiB\n", (unsigned long)mtd->size >> 10,
(unsigned long)map->size >> 10);
mtd->size = map->size;
}
return mtd;
}
kfree(lpddr->qinfo);
kfree(lpddr);
map->fldrv_priv = NULL;
return NULL;
}
static struct mtd_chip_driver lpddr_chipdrv = {
.probe = lpddr_probe,
.name = "qinfo_probe",
.module = THIS_MODULE
};
static int __init lpddr_probe_init(void)
{
register_mtd_chip_driver(&lpddr_chipdrv);
return 0;
}
static void __exit lpddr_probe_exit(void)
{
unregister_mtd_chip_driver(&lpddr_chipdrv);
}
module_init(lpddr_probe_init);
module_exit(lpddr_probe_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vasiliy Leonenko <vasiliy.leonenko@gmail.com>");
MODULE_DESCRIPTION("Driver to probe qinfo flash chips");
| {
"language": "C"
} |
/* Copyright (C) 1991-2019 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/*
* POSIX Standard: 2.10 Symbolic Constants <unistd.h>
*/
#ifndef _UNISTD_H
#define _UNISTD_H 1
#include <features.h>
__BEGIN_DECLS
/* These may be used to determine what facilities are present at compile time.
Their values can be obtained at run time from `sysconf'. */
#ifdef __USE_XOPEN2K8
/* POSIX Standard approved as ISO/IEC 9945-1 as of September 2008. */
# define _POSIX_VERSION 200809L
#elif defined __USE_XOPEN2K
/* POSIX Standard approved as ISO/IEC 9945-1 as of December 2001. */
# define _POSIX_VERSION 200112L
#elif defined __USE_POSIX199506
/* POSIX Standard approved as ISO/IEC 9945-1 as of June 1995. */
# define _POSIX_VERSION 199506L
#elif defined __USE_POSIX199309
/* POSIX Standard approved as ISO/IEC 9945-1 as of September 1993. */
# define _POSIX_VERSION 199309L
#else
/* POSIX Standard approved as ISO/IEC 9945-1 as of September 1990. */
# define _POSIX_VERSION 199009L
#endif
/* These are not #ifdef __USE_POSIX2 because they are
in the theoretically application-owned namespace. */
#ifdef __USE_XOPEN2K8
# define __POSIX2_THIS_VERSION 200809L
/* The utilities on GNU systems also correspond to this version. */
#elif defined __USE_XOPEN2K
/* The utilities on GNU systems also correspond to this version. */
# define __POSIX2_THIS_VERSION 200112L
#elif defined __USE_POSIX199506
/* The utilities on GNU systems also correspond to this version. */
# define __POSIX2_THIS_VERSION 199506L
#else
/* The utilities on GNU systems also correspond to this version. */
# define __POSIX2_THIS_VERSION 199209L
#endif
/* The utilities on GNU systems also correspond to this version. */
#define _POSIX2_VERSION __POSIX2_THIS_VERSION
/* This symbol was required until the 2001 edition of POSIX. */
#define _POSIX2_C_VERSION __POSIX2_THIS_VERSION
/* If defined, the implementation supports the
C Language Bindings Option. */
#define _POSIX2_C_BIND __POSIX2_THIS_VERSION
/* If defined, the implementation supports the
C Language Development Utilities Option. */
#define _POSIX2_C_DEV __POSIX2_THIS_VERSION
/* If defined, the implementation supports the
Software Development Utilities Option. */
#define _POSIX2_SW_DEV __POSIX2_THIS_VERSION
/* If defined, the implementation supports the
creation of locales with the localedef utility. */
#define _POSIX2_LOCALEDEF __POSIX2_THIS_VERSION
/* X/Open version number to which the library conforms. It is selectable. */
#ifdef __USE_XOPEN2K8
# define _XOPEN_VERSION 700
#elif defined __USE_XOPEN2K
# define _XOPEN_VERSION 600
#elif defined __USE_UNIX98
# define _XOPEN_VERSION 500
#else
# define _XOPEN_VERSION 4
#endif
/* Commands and utilities from XPG4 are available. */
#define _XOPEN_XCU_VERSION 4
/* We are compatible with the old published standards as well. */
#define _XOPEN_XPG2 1
#define _XOPEN_XPG3 1
#define _XOPEN_XPG4 1
/* The X/Open Unix extensions are available. */
#define _XOPEN_UNIX 1
/* The enhanced internationalization capabilities according to XPG4.2
are present. */
#define _XOPEN_ENH_I18N 1
/* The legacy interfaces are also available. */
#define _XOPEN_LEGACY 1
/* Get values of POSIX options:
If these symbols are defined, the corresponding features are
always available. If not, they may be available sometimes.
The current values can be obtained with `sysconf'.
_POSIX_JOB_CONTROL Job control is supported.
_POSIX_SAVED_IDS Processes have a saved set-user-ID
and a saved set-group-ID.
_POSIX_REALTIME_SIGNALS Real-time, queued signals are supported.
_POSIX_PRIORITY_SCHEDULING Priority scheduling is supported.
_POSIX_TIMERS POSIX.4 clocks and timers are supported.
_POSIX_ASYNCHRONOUS_IO Asynchronous I/O is supported.
_POSIX_PRIORITIZED_IO Prioritized asynchronous I/O is supported.
_POSIX_SYNCHRONIZED_IO Synchronizing file data is supported.
_POSIX_FSYNC The fsync function is present.
_POSIX_MAPPED_FILES Mapping of files to memory is supported.
_POSIX_MEMLOCK Locking of all memory is supported.
_POSIX_MEMLOCK_RANGE Locking of ranges of memory is supported.
_POSIX_MEMORY_PROTECTION Setting of memory protections is supported.
_POSIX_MESSAGE_PASSING POSIX.4 message queues are supported.
_POSIX_SEMAPHORES POSIX.4 counting semaphores are supported.
_POSIX_SHARED_MEMORY_OBJECTS POSIX.4 shared memory objects are supported.
_POSIX_THREADS POSIX.1c pthreads are supported.
_POSIX_THREAD_ATTR_STACKADDR Thread stack address attribute option supported.
_POSIX_THREAD_ATTR_STACKSIZE Thread stack size attribute option supported.
_POSIX_THREAD_SAFE_FUNCTIONS Thread-safe functions are supported.
_POSIX_THREAD_PRIORITY_SCHEDULING
POSIX.1c thread execution scheduling supported.
_POSIX_THREAD_PRIO_INHERIT Thread priority inheritance option supported.
_POSIX_THREAD_PRIO_PROTECT Thread priority protection option supported.
_POSIX_THREAD_PROCESS_SHARED Process-shared synchronization supported.
_POSIX_PII Protocol-independent interfaces are supported.
_POSIX_PII_XTI XTI protocol-indep. interfaces are supported.
_POSIX_PII_SOCKET Socket protocol-indep. interfaces are supported.
_POSIX_PII_INTERNET Internet family of protocols supported.
_POSIX_PII_INTERNET_STREAM Connection-mode Internet protocol supported.
_POSIX_PII_INTERNET_DGRAM Connectionless Internet protocol supported.
_POSIX_PII_OSI ISO/OSI family of protocols supported.
_POSIX_PII_OSI_COTS Connection-mode ISO/OSI service supported.
_POSIX_PII_OSI_CLTS Connectionless ISO/OSI service supported.
_POSIX_POLL Implementation supports `poll' function.
_POSIX_SELECT Implementation supports `select' and `pselect'.
_XOPEN_REALTIME X/Open realtime support is available.
_XOPEN_REALTIME_THREADS X/Open realtime thread support is available.
_XOPEN_SHM Shared memory interface according to XPG4.2.
_XBS5_ILP32_OFF32 Implementation provides environment with 32-bit
int, long, pointer, and off_t types.
_XBS5_ILP32_OFFBIG Implementation provides environment with 32-bit
int, long, and pointer and off_t with at least
64 bits.
_XBS5_LP64_OFF64 Implementation provides environment with 32-bit
int, and 64-bit long, pointer, and off_t types.
_XBS5_LPBIG_OFFBIG Implementation provides environment with at
least 32 bits int and long, pointer, and off_t
with at least 64 bits.
If any of these symbols is defined as -1, the corresponding option is not
true for any file. If any is defined as other than -1, the corresponding
option is true for all files. If a symbol is not defined at all, the value
for a specific file can be obtained from `pathconf' and `fpathconf'.
_POSIX_CHOWN_RESTRICTED Only the super user can use `chown' to change
the owner of a file. `chown' can only be used
to change the group ID of a file to a group of
which the calling process is a member.
_POSIX_NO_TRUNC Pathname components longer than
NAME_MAX generate an error.
_POSIX_VDISABLE If defined, if the value of an element of the
`c_cc' member of `struct termios' is
_POSIX_VDISABLE, no character will have the
effect associated with that element.
_POSIX_SYNC_IO Synchronous I/O may be performed.
_POSIX_ASYNC_IO Asynchronous I/O may be performed.
_POSIX_PRIO_IO Prioritized Asynchronous I/O may be performed.
Support for the Large File Support interface is not generally available.
If it is available the following constants are defined to one.
_LFS64_LARGEFILE Low-level I/O supports large files.
_LFS64_STDIO Standard I/O supports large files.
*/
#include <bits/posix_opt.h>
/* Get the environment definitions from Unix98. */
#if defined __USE_UNIX98 || defined __USE_XOPEN2K
# include <bits/environments.h>
#endif
/* Standard file descriptors. */
#define STDIN_FILENO 0 /* Standard input. */
#define STDOUT_FILENO 1 /* Standard output. */
#define STDERR_FILENO 2 /* Standard error output. */
/* All functions that are not declared anywhere else. */
#include <bits/types.h>
#ifndef __ssize_t_defined
typedef __ssize_t ssize_t;
# define __ssize_t_defined
#endif
#define __need_size_t
#define __need_NULL
#include <stddef.h>
#if defined __USE_XOPEN || defined __USE_XOPEN2K
/* The Single Unix specification says that some more types are
available here. */
# ifndef __gid_t_defined
typedef __gid_t gid_t;
# define __gid_t_defined
# endif
# ifndef __uid_t_defined
typedef __uid_t uid_t;
# define __uid_t_defined
# endif
# ifndef __off_t_defined
# ifndef __USE_FILE_OFFSET64
typedef __off_t off_t;
# else
typedef __off64_t off_t;
# endif
# define __off_t_defined
# endif
# if defined __USE_LARGEFILE64 && !defined __off64_t_defined
typedef __off64_t off64_t;
# define __off64_t_defined
# endif
# ifndef __useconds_t_defined
typedef __useconds_t useconds_t;
# define __useconds_t_defined
# endif
# ifndef __pid_t_defined
typedef __pid_t pid_t;
# define __pid_t_defined
# endif
#endif /* X/Open */
#if defined __USE_XOPEN_EXTENDED || defined __USE_XOPEN2K
# ifndef __intptr_t_defined
typedef __intptr_t intptr_t;
# define __intptr_t_defined
# endif
#endif
#if defined __USE_MISC || defined __USE_XOPEN
# ifndef __socklen_t_defined
typedef __socklen_t socklen_t;
# define __socklen_t_defined
# endif
#endif
/* Values for the second argument to access.
These may be OR'd together. */
#define R_OK 4 /* Test for read permission. */
#define W_OK 2 /* Test for write permission. */
#define X_OK 1 /* Test for execute permission. */
#define F_OK 0 /* Test for existence. */
/* Test for access to NAME using the real UID and real GID. */
extern int access (const char *__name, int __type) __THROW __nonnull ((1));
#ifdef __USE_GNU
/* Test for access to NAME using the effective UID and GID
(as normal file operations use). */
extern int euidaccess (const char *__name, int __type)
__THROW __nonnull ((1));
/* An alias for `euidaccess', used by some other systems. */
extern int eaccess (const char *__name, int __type)
__THROW __nonnull ((1));
#endif
#ifdef __USE_ATFILE
/* Test for access to FILE relative to the directory FD is open on.
If AT_EACCESS is set in FLAG, then use effective IDs like `eaccess',
otherwise use real IDs like `access'. */
extern int faccessat (int __fd, const char *__file, int __type, int __flag)
__THROW __nonnull ((2)) __wur;
#endif /* Use GNU. */
/* Values for the WHENCE argument to lseek. */
#ifndef _STDIO_H /* <stdio.h> has the same definitions. */
# define SEEK_SET 0 /* Seek from beginning of file. */
# define SEEK_CUR 1 /* Seek from current position. */
# define SEEK_END 2 /* Seek from end of file. */
# ifdef __USE_GNU
# define SEEK_DATA 3 /* Seek to next data. */
# define SEEK_HOLE 4 /* Seek to next hole. */
# endif
#endif
#if defined __USE_MISC && !defined L_SET
/* Old BSD names for the same constants; just for compatibility. */
# define L_SET SEEK_SET
# define L_INCR SEEK_CUR
# define L_XTND SEEK_END
#endif
/* Move FD's file position to OFFSET bytes from the
beginning of the file (if WHENCE is SEEK_SET),
the current position (if WHENCE is SEEK_CUR),
or the end of the file (if WHENCE is SEEK_END).
Return the new file position. */
#ifndef __USE_FILE_OFFSET64
extern __off_t lseek (int __fd, __off_t __offset, int __whence) __THROW;
#else
# ifdef __REDIRECT_NTH
extern __off64_t __REDIRECT_NTH (lseek,
(int __fd, __off64_t __offset, int __whence),
lseek64);
# else
# define lseek lseek64
# endif
#endif
#ifdef __USE_LARGEFILE64
extern __off64_t lseek64 (int __fd, __off64_t __offset, int __whence)
__THROW;
#endif
/* Close the file descriptor FD.
This function is a cancellation point and therefore not marked with
__THROW. */
extern int close (int __fd);
/* Read NBYTES into BUF from FD. Return the
number read, -1 for errors or 0 for EOF.
This function is a cancellation point and therefore not marked with
__THROW. */
extern ssize_t read (int __fd, void *__buf, size_t __nbytes) __wur;
/* Write N bytes of BUF to FD. Return the number written, or -1.
This function is a cancellation point and therefore not marked with
__THROW. */
extern ssize_t write (int __fd, const void *__buf, size_t __n) __wur;
#if defined __USE_UNIX98 || defined __USE_XOPEN2K8
# ifndef __USE_FILE_OFFSET64
/* Read NBYTES into BUF from FD at the given position OFFSET without
changing the file pointer. Return the number read, -1 for errors
or 0 for EOF.
This function is a cancellation point and therefore not marked with
__THROW. */
extern ssize_t pread (int __fd, void *__buf, size_t __nbytes,
__off_t __offset) __wur;
/* Write N bytes of BUF to FD at the given position OFFSET without
changing the file pointer. Return the number written, or -1.
This function is a cancellation point and therefore not marked with
__THROW. */
extern ssize_t pwrite (int __fd, const void *__buf, size_t __n,
__off_t __offset) __wur;
# else
# ifdef __REDIRECT
extern ssize_t __REDIRECT (pread, (int __fd, void *__buf, size_t __nbytes,
__off64_t __offset),
pread64) __wur;
extern ssize_t __REDIRECT (pwrite, (int __fd, const void *__buf,
size_t __nbytes, __off64_t __offset),
pwrite64) __wur;
# else
# define pread pread64
# define pwrite pwrite64
# endif
# endif
# ifdef __USE_LARGEFILE64
/* Read NBYTES into BUF from FD at the given position OFFSET without
changing the file pointer. Return the number read, -1 for errors
or 0 for EOF. */
extern ssize_t pread64 (int __fd, void *__buf, size_t __nbytes,
__off64_t __offset) __wur;
/* Write N bytes of BUF to FD at the given position OFFSET without
changing the file pointer. Return the number written, or -1. */
extern ssize_t pwrite64 (int __fd, const void *__buf, size_t __n,
__off64_t __offset) __wur;
# endif
#endif
/* Create a one-way communication channel (pipe).
If successful, two file descriptors are stored in PIPEDES;
bytes written on PIPEDES[1] can be read from PIPEDES[0].
Returns 0 if successful, -1 if not. */
extern int pipe (int __pipedes[2]) __THROW __wur;
#ifdef __USE_GNU
/* Same as pipe but apply flags passed in FLAGS to the new file
descriptors. */
extern int pipe2 (int __pipedes[2], int __flags) __THROW __wur;
#endif
/* Schedule an alarm. In SECONDS seconds, the process will get a SIGALRM.
If SECONDS is zero, any currently scheduled alarm will be cancelled.
The function returns the number of seconds remaining until the last
alarm scheduled would have signaled, or zero if there wasn't one.
There is no return value to indicate an error, but you can set `errno'
to 0 and check its value after calling `alarm', and this might tell you.
The signal may come late due to processor scheduling. */
extern unsigned int alarm (unsigned int __seconds) __THROW;
/* Make the process sleep for SECONDS seconds, or until a signal arrives
and is not ignored. The function returns the number of seconds less
than SECONDS which it actually slept (thus zero if it slept the full time).
If a signal handler does a `longjmp' or modifies the handling of the
SIGALRM signal while inside `sleep' call, the handling of the SIGALRM
signal afterwards is undefined. There is no return value to indicate
error, but if `sleep' returns SECONDS, it probably didn't work.
This function is a cancellation point and therefore not marked with
__THROW. */
extern unsigned int sleep (unsigned int __seconds);
#if (defined __USE_XOPEN_EXTENDED && !defined __USE_XOPEN2K8) \
|| defined __USE_MISC
/* Set an alarm to go off (generating a SIGALRM signal) in VALUE
microseconds. If INTERVAL is nonzero, when the alarm goes off, the
timer is reset to go off every INTERVAL microseconds thereafter.
Returns the number of microseconds remaining before the alarm. */
extern __useconds_t ualarm (__useconds_t __value, __useconds_t __interval)
__THROW;
/* Sleep USECONDS microseconds, or until a signal arrives that is not blocked
or ignored.
This function is a cancellation point and therefore not marked with
__THROW. */
extern int usleep (__useconds_t __useconds);
#endif
/* Suspend the process until a signal arrives.
This always returns -1 and sets `errno' to EINTR.
This function is a cancellation point and therefore not marked with
__THROW. */
extern int pause (void);
/* Change the owner and group of FILE. */
extern int chown (const char *__file, __uid_t __owner, __gid_t __group)
__THROW __nonnull ((1)) __wur;
#if defined __USE_XOPEN_EXTENDED || defined __USE_XOPEN2K8
/* Change the owner and group of the file that FD is open on. */
extern int fchown (int __fd, __uid_t __owner, __gid_t __group) __THROW __wur;
/* Change owner and group of FILE, if it is a symbolic
link the ownership of the symbolic link is changed. */
extern int lchown (const char *__file, __uid_t __owner, __gid_t __group)
__THROW __nonnull ((1)) __wur;
#endif /* Use X/Open Unix. */
#ifdef __USE_ATFILE
/* Change the owner and group of FILE relative to the directory FD is open
on. */
extern int fchownat (int __fd, const char *__file, __uid_t __owner,
__gid_t __group, int __flag)
__THROW __nonnull ((2)) __wur;
#endif /* Use GNU. */
/* Change the process's working directory to PATH. */
extern int chdir (const char *__path) __THROW __nonnull ((1)) __wur;
#if defined __USE_XOPEN_EXTENDED || defined __USE_XOPEN2K8
/* Change the process's working directory to the one FD is open on. */
extern int fchdir (int __fd) __THROW __wur;
#endif
/* Get the pathname of the current working directory,
and put it in SIZE bytes of BUF. Returns NULL if the
directory couldn't be determined or SIZE was too small.
If successful, returns BUF. In GNU, if BUF is NULL,
an array is allocated with `malloc'; the array is SIZE
bytes long, unless SIZE == 0, in which case it is as
big as necessary. */
extern char *getcwd (char *__buf, size_t __size) __THROW __wur;
#ifdef __USE_GNU
/* Return a malloc'd string containing the current directory name.
If the environment variable `PWD' is set, and its value is correct,
that value is used. */
extern char *get_current_dir_name (void) __THROW;
#endif
#if (defined __USE_XOPEN_EXTENDED && !defined __USE_XOPEN2K8) \
|| defined __USE_MISC
/* Put the absolute pathname of the current working directory in BUF.
If successful, return BUF. If not, put an error message in
BUF and return NULL. BUF should be at least PATH_MAX bytes long. */
extern char *getwd (char *__buf)
__THROW __nonnull ((1)) __attribute_deprecated__ __wur;
#endif
/* Duplicate FD, returning a new file descriptor on the same file. */
extern int dup (int __fd) __THROW __wur;
/* Duplicate FD to FD2, closing FD2 and making it open on the same file. */
extern int dup2 (int __fd, int __fd2) __THROW;
#ifdef __USE_GNU
/* Duplicate FD to FD2, closing FD2 and making it open on the same
file while setting flags according to FLAGS. */
extern int dup3 (int __fd, int __fd2, int __flags) __THROW;
#endif
/* NULL-terminated array of "NAME=VALUE" environment variables. */
extern char **__environ;
#ifdef __USE_GNU
extern char **environ;
#endif
/* Replace the current process, executing PATH with arguments ARGV and
environment ENVP. ARGV and ENVP are terminated by NULL pointers. */
extern int execve (const char *__path, char *const __argv[],
char *const __envp[]) __THROW __nonnull ((1, 2));
#ifdef __USE_XOPEN2K8
/* Execute the file FD refers to, overlaying the running program image.
ARGV and ENVP are passed to the new program, as for `execve'. */
extern int fexecve (int __fd, char *const __argv[], char *const __envp[])
__THROW __nonnull ((2));
#endif
/* Execute PATH with arguments ARGV and environment from `environ'. */
extern int execv (const char *__path, char *const __argv[])
__THROW __nonnull ((1, 2));
/* Execute PATH with all arguments after PATH until a NULL pointer,
and the argument after that for environment. */
extern int execle (const char *__path, const char *__arg, ...)
__THROW __nonnull ((1, 2));
/* Execute PATH with all arguments after PATH until
a NULL pointer and environment from `environ'. */
extern int execl (const char *__path, const char *__arg, ...)
__THROW __nonnull ((1, 2));
/* Execute FILE, searching in the `PATH' environment variable if it contains
no slashes, with arguments ARGV and environment from `environ'. */
extern int execvp (const char *__file, char *const __argv[])
__THROW __nonnull ((1, 2));
/* Execute FILE, searching in the `PATH' environment variable if
it contains no slashes, with all arguments after FILE until a
NULL pointer and environment from `environ'. */
extern int execlp (const char *__file, const char *__arg, ...)
__THROW __nonnull ((1, 2));
#ifdef __USE_GNU
/* Execute FILE, searching in the `PATH' environment variable if it contains
no slashes, with arguments ARGV and environment from `environ'. */
extern int execvpe (const char *__file, char *const __argv[],
char *const __envp[])
__THROW __nonnull ((1, 2));
#endif
#if defined __USE_MISC || defined __USE_XOPEN
/* Add INC to priority of the current process. */
extern int nice (int __inc) __THROW __wur;
#endif
/* Terminate program execution with the low-order 8 bits of STATUS. */
extern void _exit (int __status) __attribute__ ((__noreturn__));
/* Get the `_PC_*' symbols for the NAME argument to `pathconf' and `fpathconf';
the `_SC_*' symbols for the NAME argument to `sysconf';
and the `_CS_*' symbols for the NAME argument to `confstr'. */
#include <bits/confname.h>
/* Get file-specific configuration information about PATH. */
extern long int pathconf (const char *__path, int __name)
__THROW __nonnull ((1));
/* Get file-specific configuration about descriptor FD. */
extern long int fpathconf (int __fd, int __name) __THROW;
/* Get the value of the system variable NAME. */
extern long int sysconf (int __name) __THROW;
#ifdef __USE_POSIX2
/* Get the value of the string-valued system variable NAME. */
extern size_t confstr (int __name, char *__buf, size_t __len) __THROW;
#endif
/* Get the process ID of the calling process. */
extern __pid_t getpid (void) __THROW;
/* Get the process ID of the calling process's parent. */
extern __pid_t getppid (void) __THROW;
/* Get the process group ID of the calling process. */
extern __pid_t getpgrp (void) __THROW;
/* Get the process group ID of process PID. */
extern __pid_t __getpgid (__pid_t __pid) __THROW;
#if defined __USE_XOPEN_EXTENDED || defined __USE_XOPEN2K8
extern __pid_t getpgid (__pid_t __pid) __THROW;
#endif
/* Set the process group ID of the process matching PID to PGID.
If PID is zero, the current process's process group ID is set.
If PGID is zero, the process ID of the process is used. */
extern int setpgid (__pid_t __pid, __pid_t __pgid) __THROW;
#if defined __USE_MISC || defined __USE_XOPEN_EXTENDED
/* Both System V and BSD have `setpgrp' functions, but with different
calling conventions. The BSD function is the same as POSIX.1 `setpgid'
(above). The System V function takes no arguments and puts the calling
process in its on group like `setpgid (0, 0)'.
New programs should always use `setpgid' instead.
GNU provides the POSIX.1 function. */
/* Set the process group ID of the calling process to its own PID.
This is exactly the same as `setpgid (0, 0)'. */
extern int setpgrp (void) __THROW;
#endif /* Use misc or X/Open. */
/* Create a new session with the calling process as its leader.
The process group IDs of the session and the calling process
are set to the process ID of the calling process, which is returned. */
extern __pid_t setsid (void) __THROW;
#if defined __USE_XOPEN_EXTENDED || defined __USE_XOPEN2K8
/* Return the session ID of the given process. */
extern __pid_t getsid (__pid_t __pid) __THROW;
#endif
/* Get the real user ID of the calling process. */
extern __uid_t getuid (void) __THROW;
/* Get the effective user ID of the calling process. */
extern __uid_t geteuid (void) __THROW;
/* Get the real group ID of the calling process. */
extern __gid_t getgid (void) __THROW;
/* Get the effective group ID of the calling process. */
extern __gid_t getegid (void) __THROW;
/* If SIZE is zero, return the number of supplementary groups
the calling process is in. Otherwise, fill in the group IDs
of its supplementary groups in LIST and return the number written. */
extern int getgroups (int __size, __gid_t __list[]) __THROW __wur;
#ifdef __USE_GNU
/* Return nonzero iff the calling process is in group GID. */
extern int group_member (__gid_t __gid) __THROW;
#endif
/* Set the user ID of the calling process to UID.
If the calling process is the super-user, set the real
and effective user IDs, and the saved set-user-ID to UID;
if not, the effective user ID is set to UID. */
extern int setuid (__uid_t __uid) __THROW __wur;
#if defined __USE_MISC || defined __USE_XOPEN_EXTENDED
/* Set the real user ID of the calling process to RUID,
and the effective user ID of the calling process to EUID. */
extern int setreuid (__uid_t __ruid, __uid_t __euid) __THROW __wur;
#endif
#ifdef __USE_XOPEN2K
/* Set the effective user ID of the calling process to UID. */
extern int seteuid (__uid_t __uid) __THROW __wur;
#endif /* Use POSIX.1-2001. */
/* Set the group ID of the calling process to GID.
If the calling process is the super-user, set the real
and effective group IDs, and the saved set-group-ID to GID;
if not, the effective group ID is set to GID. */
extern int setgid (__gid_t __gid) __THROW __wur;
#if defined __USE_MISC || defined __USE_XOPEN_EXTENDED
/* Set the real group ID of the calling process to RGID,
and the effective group ID of the calling process to EGID. */
extern int setregid (__gid_t __rgid, __gid_t __egid) __THROW __wur;
#endif
#ifdef __USE_XOPEN2K
/* Set the effective group ID of the calling process to GID. */
extern int setegid (__gid_t __gid) __THROW __wur;
#endif /* Use POSIX.1-2001. */
#ifdef __USE_GNU
/* Fetch the real user ID, effective user ID, and saved-set user ID,
of the calling process. */
extern int getresuid (__uid_t *__ruid, __uid_t *__euid, __uid_t *__suid)
__THROW;
/* Fetch the real group ID, effective group ID, and saved-set group ID,
of the calling process. */
extern int getresgid (__gid_t *__rgid, __gid_t *__egid, __gid_t *__sgid)
__THROW;
/* Set the real user ID, effective user ID, and saved-set user ID,
of the calling process to RUID, EUID, and SUID, respectively. */
extern int setresuid (__uid_t __ruid, __uid_t __euid, __uid_t __suid)
__THROW __wur;
/* Set the real group ID, effective group ID, and saved-set group ID,
of the calling process to RGID, EGID, and SGID, respectively. */
extern int setresgid (__gid_t __rgid, __gid_t __egid, __gid_t __sgid)
__THROW __wur;
#endif
/* Clone the calling process, creating an exact copy.
Return -1 for errors, 0 to the new process,
and the process ID of the new process to the old process. */
extern __pid_t fork (void) __THROWNL;
#if (defined __USE_XOPEN_EXTENDED && !defined __USE_XOPEN2K8) \
|| defined __USE_MISC
/* Clone the calling process, but without copying the whole address space.
The calling process is suspended until the new process exits or is
replaced by a call to `execve'. Return -1 for errors, 0 to the new process,
and the process ID of the new process to the old process. */
extern __pid_t vfork (void) __THROW;
#endif /* Use misc or XPG < 7. */
/* Return the pathname of the terminal FD is open on, or NULL on errors.
The returned storage is good only until the next call to this function. */
extern char *ttyname (int __fd) __THROW;
/* Store at most BUFLEN characters of the pathname of the terminal FD is
open on in BUF. Return 0 on success, otherwise an error number. */
extern int ttyname_r (int __fd, char *__buf, size_t __buflen)
__THROW __nonnull ((2)) __wur;
/* Return 1 if FD is a valid descriptor associated
with a terminal, zero if not. */
extern int isatty (int __fd) __THROW;
#ifdef __USE_MISC
/* Return the index into the active-logins file (utmp) for
the controlling terminal. */
extern int ttyslot (void) __THROW;
#endif
/* Make a link to FROM named TO. */
extern int link (const char *__from, const char *__to)
__THROW __nonnull ((1, 2)) __wur;
#ifdef __USE_ATFILE
/* Like link but relative paths in TO and FROM are interpreted relative
to FROMFD and TOFD respectively. */
extern int linkat (int __fromfd, const char *__from, int __tofd,
const char *__to, int __flags)
__THROW __nonnull ((2, 4)) __wur;
#endif
#if defined __USE_XOPEN_EXTENDED || defined __USE_XOPEN2K
/* Make a symbolic link to FROM named TO. */
extern int symlink (const char *__from, const char *__to)
__THROW __nonnull ((1, 2)) __wur;
/* Read the contents of the symbolic link PATH into no more than
LEN bytes of BUF. The contents are not null-terminated.
Returns the number of characters read, or -1 for errors. */
extern ssize_t readlink (const char *__restrict __path,
char *__restrict __buf, size_t __len)
__THROW __nonnull ((1, 2)) __wur;
#endif /* Use POSIX.1-2001. */
#ifdef __USE_ATFILE
/* Like symlink but a relative path in TO is interpreted relative to TOFD. */
extern int symlinkat (const char *__from, int __tofd,
const char *__to) __THROW __nonnull ((1, 3)) __wur;
/* Like readlink but a relative PATH is interpreted relative to FD. */
extern ssize_t readlinkat (int __fd, const char *__restrict __path,
char *__restrict __buf, size_t __len)
__THROW __nonnull ((2, 3)) __wur;
#endif
/* Remove the link NAME. */
extern int unlink (const char *__name) __THROW __nonnull ((1));
#ifdef __USE_ATFILE
/* Remove the link NAME relative to FD. */
extern int unlinkat (int __fd, const char *__name, int __flag)
__THROW __nonnull ((2));
#endif
/* Remove the directory PATH. */
extern int rmdir (const char *__path) __THROW __nonnull ((1));
/* Return the foreground process group ID of FD. */
extern __pid_t tcgetpgrp (int __fd) __THROW;
/* Set the foreground process group ID of FD set PGRP_ID. */
extern int tcsetpgrp (int __fd, __pid_t __pgrp_id) __THROW;
/* Return the login name of the user.
This function is a possible cancellation point and therefore not
marked with __THROW. */
extern char *getlogin (void);
#ifdef __USE_POSIX199506
/* Return at most NAME_LEN characters of the login name of the user in NAME.
If it cannot be determined or some other error occurred, return the error
code. Otherwise return 0.
This function is a possible cancellation point and therefore not
marked with __THROW. */
extern int getlogin_r (char *__name, size_t __name_len) __nonnull ((1));
#endif
#ifdef __USE_MISC
/* Set the login name returned by `getlogin'. */
extern int setlogin (const char *__name) __THROW __nonnull ((1));
#endif
#ifdef __USE_POSIX2
/* Get definitions and prototypes for functions to process the
arguments in ARGV (ARGC of them, minus the program name) for
options given in OPTS. */
# include <bits/getopt_posix.h>
#endif
#if defined __USE_XOPEN_EXTENDED || defined __USE_XOPEN2K
/* Put the name of the current host in no more than LEN bytes of NAME.
The result is null-terminated if LEN is large enough for the full
name and the terminator. */
extern int gethostname (char *__name, size_t __len) __THROW __nonnull ((1));
#endif
#if defined __USE_MISC
/* Set the name of the current host to NAME, which is LEN bytes long.
This call is restricted to the super-user. */
extern int sethostname (const char *__name, size_t __len)
__THROW __nonnull ((1)) __wur;
/* Set the current machine's Internet number to ID.
This call is restricted to the super-user. */
extern int sethostid (long int __id) __THROW __wur;
/* Get and set the NIS (aka YP) domain name, if any.
Called just like `gethostname' and `sethostname'.
The NIS domain name is usually the empty string when not using NIS. */
extern int getdomainname (char *__name, size_t __len)
__THROW __nonnull ((1)) __wur;
extern int setdomainname (const char *__name, size_t __len)
__THROW __nonnull ((1)) __wur;
/* Revoke access permissions to all processes currently communicating
with the control terminal, and then send a SIGHUP signal to the process
group of the control terminal. */
extern int vhangup (void) __THROW;
/* Revoke the access of all descriptors currently open on FILE. */
extern int revoke (const char *__file) __THROW __nonnull ((1)) __wur;
/* Enable statistical profiling, writing samples of the PC into at most
SIZE bytes of SAMPLE_BUFFER; every processor clock tick while profiling
is enabled, the system examines the user PC and increments
SAMPLE_BUFFER[((PC - OFFSET) / 2) * SCALE / 65536]. If SCALE is zero,
disable profiling. Returns zero on success, -1 on error. */
extern int profil (unsigned short int *__sample_buffer, size_t __size,
size_t __offset, unsigned int __scale)
__THROW __nonnull ((1));
/* Turn accounting on if NAME is an existing file. The system will then write
a record for each process as it terminates, to this file. If NAME is NULL,
turn accounting off. This call is restricted to the super-user. */
extern int acct (const char *__name) __THROW;
/* Successive calls return the shells listed in `/etc/shells'. */
extern char *getusershell (void) __THROW;
extern void endusershell (void) __THROW; /* Discard cached info. */
extern void setusershell (void) __THROW; /* Rewind and re-read the file. */
/* Put the program in the background, and dissociate from the controlling
terminal. If NOCHDIR is zero, do `chdir ("/")'. If NOCLOSE is zero,
redirects stdin, stdout, and stderr to /dev/null. */
extern int daemon (int __nochdir, int __noclose) __THROW __wur;
#endif /* Use misc. */
#if defined __USE_MISC || (defined __USE_XOPEN && !defined __USE_XOPEN2K)
/* Make PATH be the root directory (the starting point for absolute paths).
This call is restricted to the super-user. */
extern int chroot (const char *__path) __THROW __nonnull ((1)) __wur;
/* Prompt with PROMPT and read a string from the terminal without echoing.
Uses /dev/tty if possible; otherwise stderr and stdin. */
extern char *getpass (const char *__prompt) __nonnull ((1));
#endif /* Use misc || X/Open. */
/* Make all changes done to FD actually appear on disk.
This function is a cancellation point and therefore not marked with
__THROW. */
extern int fsync (int __fd);
#ifdef __USE_GNU
/* Make all changes done to all files on the file system associated
with FD actually appear on disk. */
extern int syncfs (int __fd) __THROW;
#endif
#if defined __USE_MISC || defined __USE_XOPEN_EXTENDED
/* Return identifier for the current host. */
extern long int gethostid (void);
/* Make all changes done to all files actually appear on disk. */
extern void sync (void) __THROW;
# if defined __USE_MISC || !defined __USE_XOPEN2K
/* Return the number of bytes in a page. This is the system's page size,
which is not necessarily the same as the hardware page size. */
extern int getpagesize (void) __THROW __attribute__ ((__const__));
/* Return the maximum number of file descriptors
the current process could possibly have. */
extern int getdtablesize (void) __THROW;
# endif
#endif /* Use misc || X/Open Unix. */
#if defined __USE_XOPEN_EXTENDED || defined __USE_XOPEN2K8
/* Truncate FILE to LENGTH bytes. */
# ifndef __USE_FILE_OFFSET64
extern int truncate (const char *__file, __off_t __length)
__THROW __nonnull ((1)) __wur;
# else
# ifdef __REDIRECT_NTH
extern int __REDIRECT_NTH (truncate,
(const char *__file, __off64_t __length),
truncate64) __nonnull ((1)) __wur;
# else
# define truncate truncate64
# endif
# endif
# ifdef __USE_LARGEFILE64
extern int truncate64 (const char *__file, __off64_t __length)
__THROW __nonnull ((1)) __wur;
# endif
#endif /* Use X/Open Unix || POSIX 2008. */
#if defined __USE_POSIX199309 \
|| defined __USE_XOPEN_EXTENDED || defined __USE_XOPEN2K
/* Truncate the file FD is open on to LENGTH bytes. */
# ifndef __USE_FILE_OFFSET64
extern int ftruncate (int __fd, __off_t __length) __THROW __wur;
# else
# ifdef __REDIRECT_NTH
extern int __REDIRECT_NTH (ftruncate, (int __fd, __off64_t __length),
ftruncate64) __wur;
# else
# define ftruncate ftruncate64
# endif
# endif
# ifdef __USE_LARGEFILE64
extern int ftruncate64 (int __fd, __off64_t __length) __THROW __wur;
# endif
#endif /* Use POSIX.1b || X/Open Unix || XPG6. */
#if (defined __USE_XOPEN_EXTENDED && !defined __USE_XOPEN2K) \
|| defined __USE_MISC
/* Set the end of accessible data space (aka "the break") to ADDR.
Returns zero on success and -1 for errors (with errno set). */
extern int brk (void *__addr) __THROW __wur;
/* Increase or decrease the end of accessible data space by DELTA bytes.
If successful, returns the address the previous end of data space
(i.e. the beginning of the new space, if DELTA > 0);
returns (void *) -1 for errors (with errno set). */
extern void *sbrk (intptr_t __delta) __THROW;
#endif
#ifdef __USE_MISC
/* Invoke `system call' number SYSNO, passing it the remaining arguments.
This is completely system-dependent, and not often useful.
In Unix, `syscall' sets `errno' for all errors and most calls return -1
for errors; in many systems you cannot pass arguments or get return
values for all system calls (`pipe', `fork', and `getppid' typically
among them).
In Mach, all system calls take normal arguments and always return an
error code (zero for success). */
extern long int syscall (long int __sysno, ...) __THROW;
#endif /* Use misc. */
#if (defined __USE_MISC || defined __USE_XOPEN_EXTENDED) && !defined F_LOCK
/* NOTE: These declarations also appear in <fcntl.h>; be sure to keep both
files consistent. Some systems have them there and some here, and some
software depends on the macros being defined without including both. */
/* `lockf' is a simpler interface to the locking facilities of `fcntl'.
LEN is always relative to the current file position.
The CMD argument is one of the following.
This function is a cancellation point and therefore not marked with
__THROW. */
# define F_ULOCK 0 /* Unlock a previously locked region. */
# define F_LOCK 1 /* Lock a region for exclusive use. */
# define F_TLOCK 2 /* Test and lock a region for exclusive use. */
# define F_TEST 3 /* Test a region for other processes locks. */
# ifndef __USE_FILE_OFFSET64
extern int lockf (int __fd, int __cmd, __off_t __len) __wur;
# else
# ifdef __REDIRECT
extern int __REDIRECT (lockf, (int __fd, int __cmd, __off64_t __len),
lockf64) __wur;
# else
# define lockf lockf64
# endif
# endif
# ifdef __USE_LARGEFILE64
extern int lockf64 (int __fd, int __cmd, __off64_t __len) __wur;
# endif
#endif /* Use misc and F_LOCK not already defined. */
#ifdef __USE_GNU
/* Evaluate EXPRESSION, and repeat as long as it returns -1 with `errno'
set to EINTR. */
# define TEMP_FAILURE_RETRY(expression) \
(__extension__ \
({ long int __result; \
do __result = (long int) (expression); \
while (__result == -1L && errno == EINTR); \
__result; }))
/* Copy LENGTH bytes from INFD to OUTFD. */
ssize_t copy_file_range (int __infd, __off64_t *__pinoff,
int __outfd, __off64_t *__poutoff,
size_t __length, unsigned int __flags);
#endif /* __USE_GNU */
#if defined __USE_POSIX199309 || defined __USE_UNIX98
/* Synchronize at least the data part of a file with the underlying
media. */
extern int fdatasync (int __fildes);
#endif /* Use POSIX199309 */
#ifdef __USE_MISC
/* One-way hash PHRASE, returning a string suitable for storage in the
user database. SALT selects the one-way function to use, and
ensures that no two users' hashes are the same, even if they use
the same passphrase. The return value points to static storage
which will be overwritten by the next call to crypt. */
extern char *crypt (const char *__key, const char *__salt)
__THROW __nonnull ((1, 2));
#endif
#ifdef __USE_XOPEN
/* Swab pairs bytes in the first N bytes of the area pointed to by
FROM and copy the result to TO. The value of TO must not be in the
range [FROM - N + 1, FROM - 1]. If N is odd the first byte in FROM
is without partner. */
extern void swab (const void *__restrict __from, void *__restrict __to,
ssize_t __n) __THROW __nonnull ((1, 2));
#endif
/* Prior to Issue 6, the Single Unix Specification required these
prototypes to appear in this header. They are also found in
<stdio.h>. */
#if defined __USE_XOPEN && !defined __USE_XOPEN2K
/* Return the name of the controlling terminal. */
extern char *ctermid (char *__s) __THROW;
/* Return the name of the current user. */
extern char *cuserid (char *__s);
#endif
/* Unix98 requires this function to be declared here. In other
standards it is in <pthread.h>. */
#if defined __USE_UNIX98 && !defined __USE_XOPEN2K
extern int pthread_atfork (void (*__prepare) (void),
void (*__parent) (void),
void (*__child) (void)) __THROW;
#endif
#ifdef __USE_MISC
/* Write LENGTH bytes of randomness starting at BUFFER. Return 0 on
success or -1 on error. */
int getentropy (void *__buffer, size_t __length) __wur;
#endif
/* Define some macros helping to catch buffer overflows. */
#if __USE_FORTIFY_LEVEL > 0 && defined __fortify_function
# include <bits/unistd.h>
#endif
__END_DECLS
#endif /* unistd.h */
| {
"language": "C"
} |
#ifndef IRSSI_CORE_SERVERS_RECONNECT_H
#define IRSSI_CORE_SERVERS_RECONNECT_H
/* wait for half an hour before trying to reconnect to host where last
connection failed */
#define FAILED_RECONNECT_WAIT (60*30)
typedef struct {
int tag;
time_t next_connect;
SERVER_CONNECT_REC *conn;
} RECONNECT_REC;
extern GSList *reconnects;
void reconnect_save_status(SERVER_CONNECT_REC *conn, SERVER_REC *server);
void server_reconnect_destroy(RECONNECT_REC *rec);
void servers_reconnect_init(void);
void servers_reconnect_deinit(void);
#endif
| {
"language": "C"
} |
/**
* @file
* ARP protocol definitions
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
*
*/
#ifndef LWIP_HDR_PROT_ETHARP_H
#define LWIP_HDR_PROT_ETHARP_H
#include "lwip/arch.h"
#include "lwip/prot/ethernet.h"
#include "lwip/ip4_addr.h"
#ifdef __cplusplus
extern "C" {
#endif
#ifndef ETHARP_HWADDR_LEN
#define ETHARP_HWADDR_LEN ETH_HWADDR_LEN
#endif
#ifdef PACK_STRUCT_USE_INCLUDES
# include "arch/bpstruct.h"
#endif
PACK_STRUCT_BEGIN
/** the ARP message, see RFC 826 ("Packet format") */
struct etharp_hdr {
PACK_STRUCT_FIELD(u16_t hwtype);
PACK_STRUCT_FIELD(u16_t proto);
PACK_STRUCT_FLD_8(u8_t hwlen);
PACK_STRUCT_FLD_8(u8_t protolen);
PACK_STRUCT_FIELD(u16_t opcode);
PACK_STRUCT_FLD_S(struct eth_addr shwaddr);
PACK_STRUCT_FLD_S(struct ip4_addr2 sipaddr);
PACK_STRUCT_FLD_S(struct eth_addr dhwaddr);
PACK_STRUCT_FLD_S(struct ip4_addr2 dipaddr);
} PACK_STRUCT_STRUCT;
PACK_STRUCT_END
#ifdef PACK_STRUCT_USE_INCLUDES
# include "arch/epstruct.h"
#endif
#define SIZEOF_ETHARP_HDR 28
/* ARP hwtype values */
enum etharp_hwtype {
HWTYPE_ETHERNET = 1
/* others not used */
};
/* ARP message types (opcodes) */
enum etharp_opcode {
ARP_REQUEST = 1,
ARP_REPLY = 2
};
#ifdef __cplusplus
}
#endif
#endif /* LWIP_HDR_PROT_ETHARP_H */
| {
"language": "C"
} |
/*===---- __wmmintrin_aes.h - AES intrinsics -------------------------------===
*
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
* See https://llvm.org/LICENSE.txt for license information.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
#ifndef __WMMINTRIN_H
#error "Never use <__wmmintrin_aes.h> directly; include <wmmintrin.h> instead."
#endif
#ifndef __WMMINTRIN_AES_H
#define __WMMINTRIN_AES_H
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("aes"), __min_vector_width__(128)))
/// Performs a single round of AES encryption using the Equivalent
/// Inverse Cipher, transforming the state value from the first source
/// operand using a 128-bit round key value contained in the second source
/// operand, and writes the result to the destination.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VAESENC </c> instruction.
///
/// \param __V
/// A 128-bit integer vector containing the state value.
/// \param __R
/// A 128-bit integer vector containing the round key value.
/// \returns A 128-bit integer vector containing the encrypted value.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_aesenc_si128(__m128i __V, __m128i __R)
{
return (__m128i)__builtin_ia32_aesenc128((__v2di)__V, (__v2di)__R);
}
/// Performs the final round of AES encryption using the Equivalent
/// Inverse Cipher, transforming the state value from the first source
/// operand using a 128-bit round key value contained in the second source
/// operand, and writes the result to the destination.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VAESENCLAST </c> instruction.
///
/// \param __V
/// A 128-bit integer vector containing the state value.
/// \param __R
/// A 128-bit integer vector containing the round key value.
/// \returns A 128-bit integer vector containing the encrypted value.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_aesenclast_si128(__m128i __V, __m128i __R)
{
return (__m128i)__builtin_ia32_aesenclast128((__v2di)__V, (__v2di)__R);
}
/// Performs a single round of AES decryption using the Equivalent
/// Inverse Cipher, transforming the state value from the first source
/// operand using a 128-bit round key value contained in the second source
/// operand, and writes the result to the destination.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VAESDEC </c> instruction.
///
/// \param __V
/// A 128-bit integer vector containing the state value.
/// \param __R
/// A 128-bit integer vector containing the round key value.
/// \returns A 128-bit integer vector containing the decrypted value.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_aesdec_si128(__m128i __V, __m128i __R)
{
return (__m128i)__builtin_ia32_aesdec128((__v2di)__V, (__v2di)__R);
}
/// Performs the final round of AES decryption using the Equivalent
/// Inverse Cipher, transforming the state value from the first source
/// operand using a 128-bit round key value contained in the second source
/// operand, and writes the result to the destination.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VAESDECLAST </c> instruction.
///
/// \param __V
/// A 128-bit integer vector containing the state value.
/// \param __R
/// A 128-bit integer vector containing the round key value.
/// \returns A 128-bit integer vector containing the decrypted value.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_aesdeclast_si128(__m128i __V, __m128i __R)
{
return (__m128i)__builtin_ia32_aesdeclast128((__v2di)__V, (__v2di)__R);
}
/// Applies the AES InvMixColumns() transformation to an expanded key
/// contained in the source operand, and writes the result to the
/// destination.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VAESIMC </c> instruction.
///
/// \param __V
/// A 128-bit integer vector containing the expanded key.
/// \returns A 128-bit integer vector containing the transformed value.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_aesimc_si128(__m128i __V)
{
return (__m128i)__builtin_ia32_aesimc128((__v2di)__V);
}
/// Generates a round key for AES encryption, operating on 128-bit data
/// specified in the first source operand and using an 8-bit round constant
/// specified by the second source operand, and writes the result to the
/// destination.
///
/// \headerfile <x86intrin.h>
///
/// \code
/// __m128i _mm_aeskeygenassist_si128(__m128i C, const int R);
/// \endcode
///
/// This intrinsic corresponds to the <c> AESKEYGENASSIST </c> instruction.
///
/// \param C
/// A 128-bit integer vector that is used to generate the AES encryption key.
/// \param R
/// An 8-bit round constant used to generate the AES encryption key.
/// \returns A 128-bit round key for AES encryption.
#define _mm_aeskeygenassist_si128(C, R) \
(__m128i)__builtin_ia32_aeskeygenassist128((__v2di)(__m128i)(C), (int)(R))
#undef __DEFAULT_FN_ATTRS
#endif /* __WMMINTRIN_AES_H */
| {
"language": "C"
} |
/**
******************************************************************************
* @file stm32f10x_cec.h
* @author MCD Application Team
* @version V3.5.0
* @date 11-March-2011
* @brief This file contains all the functions prototypes for the CEC firmware
* library.
******************************************************************************
* @attention
*
* THE PRESENT FIRMWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS
* WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE
* TIME. AS A RESULT, STMICROELECTRONICS SHALL NOT BE HELD LIABLE FOR ANY
* DIRECT, INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING
* FROM THE CONTENT OF SUCH FIRMWARE AND/OR THE USE MADE BY CUSTOMERS OF THE
* CODING INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS.
*
* <h2><center>© COPYRIGHT 2011 STMicroelectronics</center></h2>
******************************************************************************
*/
/* Define to prevent recursive inclusion -------------------------------------*/
#ifndef __STM32F10x_CEC_H
#define __STM32F10x_CEC_H
#ifdef __cplusplus
extern "C" {
#endif
/* Includes ------------------------------------------------------------------*/
#include "stm32f10x.h"
/** @addtogroup STM32F10x_StdPeriph_Driver
* @{
*/
/** @addtogroup CEC
* @{
*/
/** @defgroup CEC_Exported_Types
* @{
*/
/**
* @brief CEC Init structure definition
*/
typedef struct
{
uint16_t CEC_BitTimingMode; /*!< Configures the CEC Bit Timing Error Mode.
This parameter can be a value of @ref CEC_BitTiming_Mode */
uint16_t CEC_BitPeriodMode; /*!< Configures the CEC Bit Period Error Mode.
This parameter can be a value of @ref CEC_BitPeriod_Mode */
}CEC_InitTypeDef;
/**
* @}
*/
/** @defgroup CEC_Exported_Constants
* @{
*/
/** @defgroup CEC_BitTiming_Mode
* @{
*/
#define CEC_BitTimingStdMode ((uint16_t)0x00) /*!< Bit timing error Standard Mode */
#define CEC_BitTimingErrFreeMode CEC_CFGR_BTEM /*!< Bit timing error Free Mode */
#define IS_CEC_BIT_TIMING_ERROR_MODE(MODE) (((MODE) == CEC_BitTimingStdMode) || \
((MODE) == CEC_BitTimingErrFreeMode))
/**
* @}
*/
/** @defgroup CEC_BitPeriod_Mode
* @{
*/
#define CEC_BitPeriodStdMode ((uint16_t)0x00) /*!< Bit period error Standard Mode */
#define CEC_BitPeriodFlexibleMode CEC_CFGR_BPEM /*!< Bit period error Flexible Mode */
#define IS_CEC_BIT_PERIOD_ERROR_MODE(MODE) (((MODE) == CEC_BitPeriodStdMode) || \
((MODE) == CEC_BitPeriodFlexibleMode))
/**
* @}
*/
/** @defgroup CEC_interrupts_definition
* @{
*/
#define CEC_IT_TERR CEC_CSR_TERR
#define CEC_IT_TBTRF CEC_CSR_TBTRF
#define CEC_IT_RERR CEC_CSR_RERR
#define CEC_IT_RBTF CEC_CSR_RBTF
#define IS_CEC_GET_IT(IT) (((IT) == CEC_IT_TERR) || ((IT) == CEC_IT_TBTRF) || \
((IT) == CEC_IT_RERR) || ((IT) == CEC_IT_RBTF))
/**
* @}
*/
/** @defgroup CEC_Own_Address
* @{
*/
#define IS_CEC_ADDRESS(ADDRESS) ((ADDRESS) < 0x10)
/**
* @}
*/
/** @defgroup CEC_Prescaler
* @{
*/
#define IS_CEC_PRESCALER(PRESCALER) ((PRESCALER) <= 0x3FFF)
/**
* @}
*/
/** @defgroup CEC_flags_definition
* @{
*/
/**
* @brief ESR register flags
*/
#define CEC_FLAG_BTE ((uint32_t)0x10010000)
#define CEC_FLAG_BPE ((uint32_t)0x10020000)
#define CEC_FLAG_RBTFE ((uint32_t)0x10040000)
#define CEC_FLAG_SBE ((uint32_t)0x10080000)
#define CEC_FLAG_ACKE ((uint32_t)0x10100000)
#define CEC_FLAG_LINE ((uint32_t)0x10200000)
#define CEC_FLAG_TBTFE ((uint32_t)0x10400000)
/**
* @brief CSR register flags
*/
#define CEC_FLAG_TEOM ((uint32_t)0x00000002)
#define CEC_FLAG_TERR ((uint32_t)0x00000004)
#define CEC_FLAG_TBTRF ((uint32_t)0x00000008)
#define CEC_FLAG_RSOM ((uint32_t)0x00000010)
#define CEC_FLAG_REOM ((uint32_t)0x00000020)
#define CEC_FLAG_RERR ((uint32_t)0x00000040)
#define CEC_FLAG_RBTF ((uint32_t)0x00000080)
#define IS_CEC_CLEAR_FLAG(FLAG) ((((FLAG) & (uint32_t)0xFFFFFF03) == 0x00) && ((FLAG) != 0x00))
#define IS_CEC_GET_FLAG(FLAG) (((FLAG) == CEC_FLAG_BTE) || ((FLAG) == CEC_FLAG_BPE) || \
((FLAG) == CEC_FLAG_RBTFE) || ((FLAG)== CEC_FLAG_SBE) || \
((FLAG) == CEC_FLAG_ACKE) || ((FLAG) == CEC_FLAG_LINE) || \
((FLAG) == CEC_FLAG_TBTFE) || ((FLAG) == CEC_FLAG_TEOM) || \
((FLAG) == CEC_FLAG_TERR) || ((FLAG) == CEC_FLAG_TBTRF) || \
((FLAG) == CEC_FLAG_RSOM) || ((FLAG) == CEC_FLAG_REOM) || \
((FLAG) == CEC_FLAG_RERR) || ((FLAG) == CEC_FLAG_RBTF))
/**
* @}
*/
/**
* @}
*/
/** @defgroup CEC_Exported_Macros
* @{
*/
/**
* @}
*/
/** @defgroup CEC_Exported_Functions
* @{
*/
void CEC_DeInit(void);
void CEC_Init(CEC_InitTypeDef* CEC_InitStruct);
void CEC_Cmd(FunctionalState NewState);
void CEC_ITConfig(FunctionalState NewState);
void CEC_OwnAddressConfig(uint8_t CEC_OwnAddress);
void CEC_SetPrescaler(uint16_t CEC_Prescaler);
void CEC_SendDataByte(uint8_t Data);
uint8_t CEC_ReceiveDataByte(void);
void CEC_StartOfMessage(void);
void CEC_EndOfMessageCmd(FunctionalState NewState);
FlagStatus CEC_GetFlagStatus(uint32_t CEC_FLAG);
void CEC_ClearFlag(uint32_t CEC_FLAG);
ITStatus CEC_GetITStatus(uint8_t CEC_IT);
void CEC_ClearITPendingBit(uint16_t CEC_IT);
#ifdef __cplusplus
}
#endif
#endif /* __STM32F10x_CEC_H */
/**
* @}
*/
/**
* @}
*/
/**
* @}
*/
/******************* (C) COPYRIGHT 2011 STMicroelectronics *****END OF FILE****/
| {
"language": "C"
} |
/*
File: FixMath.h
Contains: Fixed Math Interfaces.
Version: Technology: Mac OS 8
Release: QuickTime 6.0.2
Copyright: (c) 1985-2001 by Apple Computer, Inc., all rights reserved
Bugs?: For bug reports, consult the following page on
the World Wide Web:
http://developer.apple.com/bugreporter/
*/
#ifndef __FIXMATH__
#define __FIXMATH__
#ifndef __MACTYPES__
#include "MacTypes.h"
#endif
#if PRAGMA_ONCE
#pragma once
#endif
#ifdef __cplusplus
extern "C" {
#endif
#if PRAGMA_IMPORT
#pragma import on
#endif
#if PRAGMA_STRUCT_ALIGN
#pragma options align=mac68k
#elif PRAGMA_STRUCT_PACKPUSH
#pragma pack(push, 2)
#elif PRAGMA_STRUCT_PACK
#pragma pack(2)
#endif
#define fixed1 ((Fixed) 0x00010000L)
#define fract1 ((Fract) 0x40000000L)
#define positiveInfinity ((long) 0x7FFFFFFFL)
#define negativeInfinity ((long) 0x80000000L)
/*
FixRatio, FixMul, and FixRound were previously in ToolUtils.h
*/
EXTERN_API( Fixed )
FixRatio (short numer,
short denom) ONEWORDINLINE(0xA869);
EXTERN_API( Fixed )
FixMul (Fixed a,
Fixed b) ONEWORDINLINE(0xA868);
EXTERN_API( short )
FixRound (Fixed x) ONEWORDINLINE(0xA86C);
EXTERN_API( Fract )
Fix2Frac (Fixed x) ONEWORDINLINE(0xA841);
EXTERN_API( long )
Fix2Long (Fixed x) ONEWORDINLINE(0xA840);
EXTERN_API( Fixed )
Long2Fix (long x) ONEWORDINLINE(0xA83F);
EXTERN_API( Fixed )
Frac2Fix (Fract x) ONEWORDINLINE(0xA842);
EXTERN_API( Fract )
FracMul (Fract x,
Fract y) ONEWORDINLINE(0xA84A);
EXTERN_API( Fixed )
FixDiv (Fixed x,
Fixed y) ONEWORDINLINE(0xA84D);
EXTERN_API( Fract )
FracDiv (Fract x,
Fract y) ONEWORDINLINE(0xA84B);
EXTERN_API( Fract )
FracSqrt (Fract x) ONEWORDINLINE(0xA849);
EXTERN_API( Fract )
FracSin (Fixed x) ONEWORDINLINE(0xA848);
EXTERN_API( Fract )
FracCos (Fixed x) ONEWORDINLINE(0xA847);
EXTERN_API( Fixed )
FixATan2 (long x,
long y) ONEWORDINLINE(0xA818);
/*
Frac2X, Fix2X, X2Fix, and X2Frac translate to and from
the floating point type "extended" (that's what the X is for).
On the original Mac this was 80-bits and the functions could be
accessed via A-Traps. When the 68881 co-processor was added,
it used 96-bit floating point types, so the A-Traps could not
be used. When PowerPC was added, it used 64-bit floating point
types, so yet another prototype was added.
*/
#if TARGET_CPU_68K
#if TARGET_RT_MAC_68881
#if CALL_NOT_IN_CARBON
EXTERN_API( long double )
Frac2X (Fract x);
EXTERN_API( long double )
Fix2X (Fixed x);
EXTERN_API( Fixed )
X2Fix (long double x);
EXTERN_API( Fract )
X2Frac (long double x);
#endif /* CALL_NOT_IN_CARBON */
#else
#if CALL_NOT_IN_CARBON
EXTERN_API( long double )
Frac2X (Fract x) ONEWORDINLINE(0xA845);
EXTERN_API( long double )
Fix2X (Fixed x) ONEWORDINLINE(0xA843);
EXTERN_API( Fixed )
X2Fix (long double x) ONEWORDINLINE(0xA844);
EXTERN_API( Fract )
X2Frac (long double x) ONEWORDINLINE(0xA846);
#endif /* CALL_NOT_IN_CARBON */
#endif /* TARGET_RT_MAC_68881 */
#else
EXTERN_API( double )
Frac2X (Fract x);
EXTERN_API( double )
Fix2X (Fixed x);
EXTERN_API( Fixed )
X2Fix (double x);
EXTERN_API( Fract )
X2Frac (double x);
#endif /* TARGET_CPU_68K */
/* QuickTime 3.0 makes these Wide routines available on other platforms*/
#if TARGET_CPU_PPC || !TARGET_OS_MAC || TARGET_CPU_X86
EXTERN_API_C( short )
WideCompare (const wide * target,
const wide * source);
EXTERN_API_C( wide *)
WideAdd (wide * target,
const wide * source);
EXTERN_API_C( wide *)
WideSubtract (wide * target,
const wide * source);
EXTERN_API_C( wide *)
WideNegate (wide * target);
EXTERN_API_C( wide *)
WideShift (wide * target,
long shift);
EXTERN_API_C( unsigned long )
WideSquareRoot (const wide * source);
EXTERN_API_C( wide *)
WideMultiply (long multiplicand,
long multiplier,
wide * target);
/* returns the quotient */
EXTERN_API_C( long )
WideDivide (const wide * dividend,
long divisor,
long * remainder);
/* quotient replaces dividend */
EXTERN_API_C( wide *)
WideWideDivide (wide * dividend,
long divisor,
long * remainder);
EXTERN_API_C( wide *)
WideBitShift (wide * src,
long shift);
#endif /* TARGET_CPU_PPC || !TARGET_OS_MAC || TARGET_CPU_X86 */
#if PRAGMA_STRUCT_ALIGN
#pragma options align=reset
#elif PRAGMA_STRUCT_PACKPUSH
#pragma pack(pop)
#elif PRAGMA_STRUCT_PACK
#pragma pack()
#endif
#ifdef PRAGMA_IMPORT_OFF
#pragma import off
#elif PRAGMA_IMPORT
#pragma import reset
#endif
#ifdef __cplusplus
}
#endif
#endif /* __FIXMATH__ */
| {
"language": "C"
} |
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
* Copyright 2009 Jerome Glisse.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
* Jerome Glisse
*/
#include <linux/console.h>
#include <linux/slab.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_cache.h>
#include <drm/radeon_drm.h>
#include <linux/pm_runtime.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <linux/efi.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "atom.h"
static const char radeon_family_name[][16] = {
"R100",
"RV100",
"RS100",
"RV200",
"RS200",
"R200",
"RV250",
"RS300",
"RV280",
"R300",
"R350",
"RV350",
"RV380",
"R420",
"R423",
"RV410",
"RS400",
"RS480",
"RS600",
"RS690",
"RS740",
"RV515",
"R520",
"RV530",
"RV560",
"RV570",
"R580",
"R600",
"RV610",
"RV630",
"RV670",
"RV620",
"RV635",
"RS780",
"RS880",
"RV770",
"RV730",
"RV710",
"RV740",
"CEDAR",
"REDWOOD",
"JUNIPER",
"CYPRESS",
"HEMLOCK",
"PALM",
"SUMO",
"SUMO2",
"BARTS",
"TURKS",
"CAICOS",
"CAYMAN",
"ARUBA",
"TAHITI",
"PITCAIRN",
"VERDE",
"OLAND",
"HAINAN",
"BONAIRE",
"KAVERI",
"KABINI",
"HAWAII",
"MULLINS",
"LAST",
};
#if defined(CONFIG_VGA_SWITCHEROO)
bool radeon_has_atpx_dgpu_power_cntl(void);
bool radeon_is_atpx_hybrid(void);
#else
static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
static inline bool radeon_is_atpx_hybrid(void) { return false; }
#endif
#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
struct radeon_px_quirk {
u32 chip_vendor;
u32 chip_device;
u32 subsys_vendor;
u32 subsys_device;
u32 px_quirk_flags;
};
static struct radeon_px_quirk radeon_px_quirk_list[] = {
/* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
* https://bugzilla.kernel.org/show_bug.cgi?id=74551
*/
{ PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
/* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
* https://bugzilla.kernel.org/show_bug.cgi?id=51381
*/
{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
* https://bugzilla.kernel.org/show_bug.cgi?id=51381
*/
{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
* https://bugs.freedesktop.org/show_bug.cgi?id=101491
*/
{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
/* Asus K73TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
* https://bugzilla.kernel.org/show_bug.cgi?id=51381#c52
*/
{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2123, RADEON_PX_QUIRK_DISABLE_PX },
{ 0, 0, 0, 0, 0 },
};
bool radeon_is_px(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
if (rdev->flags & RADEON_IS_PX)
return true;
return false;
}
static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
{
struct radeon_px_quirk *p = radeon_px_quirk_list;
/* Apply PX quirks */
while (p && p->chip_device != 0) {
if (rdev->pdev->vendor == p->chip_vendor &&
rdev->pdev->device == p->chip_device &&
rdev->pdev->subsystem_vendor == p->subsys_vendor &&
rdev->pdev->subsystem_device == p->subsys_device) {
rdev->px_quirk_flags = p->px_quirk_flags;
break;
}
++p;
}
if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
rdev->flags &= ~RADEON_IS_PX;
/* disable PX is the system doesn't support dGPU power control or hybrid gfx */
if (!radeon_is_atpx_hybrid() &&
!radeon_has_atpx_dgpu_power_cntl())
rdev->flags &= ~RADEON_IS_PX;
}
/**
* radeon_program_register_sequence - program an array of registers.
*
* @rdev: radeon_device pointer
* @registers: pointer to the register array
* @array_size: size of the register array
*
* Programs an array or registers with and and or masks.
* This is a helper for setting golden registers.
*/
void radeon_program_register_sequence(struct radeon_device *rdev,
const u32 *registers,
const u32 array_size)
{
u32 tmp, reg, and_mask, or_mask;
int i;
if (array_size % 3)
return;
for (i = 0; i < array_size; i +=3) {
reg = registers[i + 0];
and_mask = registers[i + 1];
or_mask = registers[i + 2];
if (and_mask == 0xffffffff) {
tmp = or_mask;
} else {
tmp = RREG32(reg);
tmp &= ~and_mask;
tmp |= or_mask;
}
WREG32(reg, tmp);
}
}
void radeon_pci_config_reset(struct radeon_device *rdev)
{
pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
}
/**
* radeon_surface_init - Clear GPU surface registers.
*
* @rdev: radeon_device pointer
*
* Clear GPU surface registers (r1xx-r5xx).
*/
void radeon_surface_init(struct radeon_device *rdev)
{
/* FIXME: check this out */
if (rdev->family < CHIP_R600) {
int i;
for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
if (rdev->surface_regs[i].bo)
radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
else
radeon_clear_surface_reg(rdev, i);
}
/* enable surfaces */
WREG32(RADEON_SURFACE_CNTL, 0);
}
}
/*
* GPU scratch registers helpers function.
*/
/**
* radeon_scratch_init - Init scratch register driver information.
*
* @rdev: radeon_device pointer
*
* Init CP scratch register driver information (r1xx-r5xx)
*/
void radeon_scratch_init(struct radeon_device *rdev)
{
int i;
/* FIXME: check this out */
if (rdev->family < CHIP_R300) {
rdev->scratch.num_reg = 5;
} else {
rdev->scratch.num_reg = 7;
}
rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
for (i = 0; i < rdev->scratch.num_reg; i++) {
rdev->scratch.free[i] = true;
rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
}
}
/**
* radeon_scratch_get - Allocate a scratch register
*
* @rdev: radeon_device pointer
* @reg: scratch register mmio offset
*
* Allocate a CP scratch register for use by the driver (all asics).
* Returns 0 on success or -EINVAL on failure.
*/
int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
{
int i;
for (i = 0; i < rdev->scratch.num_reg; i++) {
if (rdev->scratch.free[i]) {
rdev->scratch.free[i] = false;
*reg = rdev->scratch.reg[i];
return 0;
}
}
return -EINVAL;
}
/**
* radeon_scratch_free - Free a scratch register
*
* @rdev: radeon_device pointer
* @reg: scratch register mmio offset
*
* Free a CP scratch register allocated for use by the driver (all asics)
*/
void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
{
int i;
for (i = 0; i < rdev->scratch.num_reg; i++) {
if (rdev->scratch.reg[i] == reg) {
rdev->scratch.free[i] = true;
return;
}
}
}
/*
* GPU doorbell aperture helpers function.
*/
/**
* radeon_doorbell_init - Init doorbell driver information.
*
* @rdev: radeon_device pointer
*
* Init doorbell driver information (CIK)
* Returns 0 on success, error on failure.
*/
static int radeon_doorbell_init(struct radeon_device *rdev)
{
/* doorbell bar mapping */
rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
if (rdev->doorbell.num_doorbells == 0)
return -EINVAL;
rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
if (rdev->doorbell.ptr == NULL) {
return -ENOMEM;
}
DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
return 0;
}
/**
* radeon_doorbell_fini - Tear down doorbell driver information.
*
* @rdev: radeon_device pointer
*
* Tear down doorbell driver information (CIK)
*/
static void radeon_doorbell_fini(struct radeon_device *rdev)
{
iounmap(rdev->doorbell.ptr);
rdev->doorbell.ptr = NULL;
}
/**
* radeon_doorbell_get - Allocate a doorbell entry
*
* @rdev: radeon_device pointer
* @doorbell: doorbell index
*
* Allocate a doorbell for use by the driver (all asics).
* Returns 0 on success or -EINVAL on failure.
*/
int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
{
unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
if (offset < rdev->doorbell.num_doorbells) {
__set_bit(offset, rdev->doorbell.used);
*doorbell = offset;
return 0;
} else {
return -EINVAL;
}
}
/**
* radeon_doorbell_free - Free a doorbell entry
*
* @rdev: radeon_device pointer
* @doorbell: doorbell index
*
* Free a doorbell allocated for use by the driver (all asics)
*/
void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
{
if (doorbell < rdev->doorbell.num_doorbells)
__clear_bit(doorbell, rdev->doorbell.used);
}
/*
* radeon_wb_*()
* Writeback is the the method by which the the GPU updates special pages
* in memory with the status of certain GPU events (fences, ring pointers,
* etc.).
*/
/**
* radeon_wb_disable - Disable Writeback
*
* @rdev: radeon_device pointer
*
* Disables Writeback (all asics). Used for suspend.
*/
void radeon_wb_disable(struct radeon_device *rdev)
{
rdev->wb.enabled = false;
}
/**
* radeon_wb_fini - Disable Writeback and free memory
*
* @rdev: radeon_device pointer
*
* Disables Writeback and frees the Writeback memory (all asics).
* Used at driver shutdown.
*/
void radeon_wb_fini(struct radeon_device *rdev)
{
radeon_wb_disable(rdev);
if (rdev->wb.wb_obj) {
if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
radeon_bo_kunmap(rdev->wb.wb_obj);
radeon_bo_unpin(rdev->wb.wb_obj);
radeon_bo_unreserve(rdev->wb.wb_obj);
}
radeon_bo_unref(&rdev->wb.wb_obj);
rdev->wb.wb = NULL;
rdev->wb.wb_obj = NULL;
}
}
/**
* radeon_wb_init- Init Writeback driver info and allocate memory
*
* @rdev: radeon_device pointer
*
* Disables Writeback and frees the Writeback memory (all asics).
* Used at driver startup.
* Returns 0 on success or an -error on failure.
*/
int radeon_wb_init(struct radeon_device *rdev)
{
int r;
if (rdev->wb.wb_obj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
&rdev->wb.wb_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
return r;
}
r = radeon_bo_reserve(rdev->wb.wb_obj, false);
if (unlikely(r != 0)) {
radeon_wb_fini(rdev);
return r;
}
r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
&rdev->wb.gpu_addr);
if (r) {
radeon_bo_unreserve(rdev->wb.wb_obj);
dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
radeon_wb_fini(rdev);
return r;
}
r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
radeon_bo_unreserve(rdev->wb.wb_obj);
if (r) {
dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
radeon_wb_fini(rdev);
return r;
}
}
/* clear wb memory */
memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
/* disable event_write fences */
rdev->wb.use_event = false;
/* disabled via module param */
if (radeon_no_wb == 1) {
rdev->wb.enabled = false;
} else {
if (rdev->flags & RADEON_IS_AGP) {
/* often unreliable on AGP */
rdev->wb.enabled = false;
} else if (rdev->family < CHIP_R300) {
/* often unreliable on pre-r300 */
rdev->wb.enabled = false;
} else {
rdev->wb.enabled = true;
/* event_write fences are only available on r600+ */
if (rdev->family >= CHIP_R600) {
rdev->wb.use_event = true;
}
}
}
/* always use writeback/events on NI, APUs */
if (rdev->family >= CHIP_PALM) {
rdev->wb.enabled = true;
rdev->wb.use_event = true;
}
dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
return 0;
}
/**
* radeon_vram_location - try to find VRAM location
* @rdev: radeon device structure holding all necessary informations
* @mc: memory controller structure holding memory informations
* @base: base address at which to put VRAM
*
* Function will place try to place VRAM at base address provided
* as parameter (which is so far either PCI aperture address or
* for IGP TOM base address).
*
* If there is not enough space to fit the unvisible VRAM in the 32bits
* address space then we limit the VRAM size to the aperture.
*
* If we are using AGP and if the AGP aperture doesn't allow us to have
* room for all the VRAM than we restrict the VRAM to the PCI aperture
* size and print a warning.
*
* This function will never fails, worst case are limiting VRAM.
*
* Note: GTT start, end, size should be initialized before calling this
* function on AGP platform.
*
* Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
* this shouldn't be a problem as we are using the PCI aperture as a reference.
* Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
* not IGP.
*
* Note: we use mc_vram_size as on some board we need to program the mc to
* cover the whole aperture even if VRAM size is inferior to aperture size
* Novell bug 204882 + along with lots of ubuntu ones
*
* Note: when limiting vram it's safe to overwritte real_vram_size because
* we are not in case where real_vram_size is inferior to mc_vram_size (ie
* note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
* ones)
*
* Note: IGP TOM addr should be the same as the aperture addr, we don't
* explicitly check for that thought.
*
* FIXME: when reducing VRAM size align new size on power of 2.
*/
void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
{
uint64_t limit = (uint64_t)radeon_vram_limit << 20;
mc->vram_start = base;
if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
mc->real_vram_size = mc->aper_size;
mc->mc_vram_size = mc->aper_size;
}
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
mc->real_vram_size = mc->aper_size;
mc->mc_vram_size = mc->aper_size;
}
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
if (limit && limit < mc->real_vram_size)
mc->real_vram_size = limit;
dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
mc->mc_vram_size >> 20, mc->vram_start,
mc->vram_end, mc->real_vram_size >> 20);
}
/**
* radeon_gtt_location - try to find GTT location
* @rdev: radeon device structure holding all necessary informations
* @mc: memory controller structure holding memory informations
*
* Function will place try to place GTT before or after VRAM.
*
* If GTT size is bigger than space left then we ajust GTT size.
* Thus function will never fails.
*
* FIXME: when reducing GTT size align new size on power of 2.
*/
void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
{
u64 size_af, size_bf;
size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
size_bf = mc->vram_start & ~mc->gtt_base_align;
if (size_bf > size_af) {
if (mc->gtt_size > size_bf) {
dev_warn(rdev->dev, "limiting GTT\n");
mc->gtt_size = size_bf;
}
mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
} else {
if (mc->gtt_size > size_af) {
dev_warn(rdev->dev, "limiting GTT\n");
mc->gtt_size = size_af;
}
mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
}
mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
}
/*
* GPU helpers function.
*/
/**
* radeon_device_is_virtual - check if we are running is a virtual environment
*
* Check if the asic has been passed through to a VM (all asics).
* Used at driver startup.
* Returns true if virtual or false if not.
*/
bool radeon_device_is_virtual(void)
{
#ifdef CONFIG_X86
return boot_cpu_has(X86_FEATURE_HYPERVISOR);
#else
return false;
#endif
}
/**
* radeon_card_posted - check if the hw has already been initialized
*
* @rdev: radeon_device pointer
*
* Check if the asic has been initialized (all asics).
* Used at driver startup.
* Returns true if initialized or false if not.
*/
bool radeon_card_posted(struct radeon_device *rdev)
{
uint32_t reg;
/* for pass through, always force asic_init for CI */
if (rdev->family >= CHIP_BONAIRE &&
radeon_device_is_virtual())
return false;
/* required for EFI mode on macbook2,1 which uses an r5xx asic */
if (efi_enabled(EFI_BOOT) &&
(rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
(rdev->family < CHIP_R600))
return false;
if (ASIC_IS_NODCE(rdev))
goto check_memsize;
/* first check CRTCs */
if (ASIC_IS_DCE4(rdev)) {
reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
if (rdev->num_crtc >= 4) {
reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
}
if (rdev->num_crtc >= 6) {
reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
}
if (reg & EVERGREEN_CRTC_MASTER_EN)
return true;
} else if (ASIC_IS_AVIVO(rdev)) {
reg = RREG32(AVIVO_D1CRTC_CONTROL) |
RREG32(AVIVO_D2CRTC_CONTROL);
if (reg & AVIVO_CRTC_EN) {
return true;
}
} else {
reg = RREG32(RADEON_CRTC_GEN_CNTL) |
RREG32(RADEON_CRTC2_GEN_CNTL);
if (reg & RADEON_CRTC_EN) {
return true;
}
}
check_memsize:
/* then check MEM_SIZE, in case the crtcs are off */
if (rdev->family >= CHIP_R600)
reg = RREG32(R600_CONFIG_MEMSIZE);
else
reg = RREG32(RADEON_CONFIG_MEMSIZE);
if (reg)
return true;
return false;
}
/**
* radeon_update_bandwidth_info - update display bandwidth params
*
* @rdev: radeon_device pointer
*
* Used when sclk/mclk are switched or display modes are set.
* params are used to calculate display watermarks (all asics)
*/
void radeon_update_bandwidth_info(struct radeon_device *rdev)
{
fixed20_12 a;
u32 sclk = rdev->pm.current_sclk;
u32 mclk = rdev->pm.current_mclk;
/* sclk/mclk in Mhz */
a.full = dfixed_const(100);
rdev->pm.sclk.full = dfixed_const(sclk);
rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
rdev->pm.mclk.full = dfixed_const(mclk);
rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
if (rdev->flags & RADEON_IS_IGP) {
a.full = dfixed_const(16);
/* core_bandwidth = sclk(Mhz) * 16 */
rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
}
}
/**
* radeon_boot_test_post_card - check and possibly initialize the hw
*
* @rdev: radeon_device pointer
*
* Check if the asic is initialized and if not, attempt to initialize
* it (all asics).
* Returns true if initialized or false if not.
*/
bool radeon_boot_test_post_card(struct radeon_device *rdev)
{
if (radeon_card_posted(rdev))
return true;
if (rdev->bios) {
DRM_INFO("GPU not posted. posting now...\n");
if (rdev->is_atom_bios)
atom_asic_init(rdev->mode_info.atom_context);
else
radeon_combios_asic_init(rdev->ddev);
return true;
} else {
dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
return false;
}
}
/**
* radeon_dummy_page_init - init dummy page used by the driver
*
* @rdev: radeon_device pointer
*
* Allocate the dummy page used by the driver (all asics).
* This dummy page is used by the driver as a filler for gart entries
* when pages are taken out of the GART
* Returns 0 on sucess, -ENOMEM on failure.
*/
int radeon_dummy_page_init(struct radeon_device *rdev)
{
if (rdev->dummy_page.page)
return 0;
rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
if (rdev->dummy_page.page == NULL)
return -ENOMEM;
rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
__free_page(rdev->dummy_page.page);
rdev->dummy_page.page = NULL;
return -ENOMEM;
}
rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
RADEON_GART_PAGE_DUMMY);
return 0;
}
/**
* radeon_dummy_page_fini - free dummy page used by the driver
*
* @rdev: radeon_device pointer
*
* Frees the dummy page used by the driver (all asics).
*/
void radeon_dummy_page_fini(struct radeon_device *rdev)
{
if (rdev->dummy_page.page == NULL)
return;
pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
__free_page(rdev->dummy_page.page);
rdev->dummy_page.page = NULL;
}
/* ATOM accessor methods */
/*
* ATOM is an interpreted byte code stored in tables in the vbios. The
* driver registers callbacks to access registers and the interpreter
* in the driver parses the tables and executes then to program specific
* actions (set display modes, asic init, etc.). See radeon_atombios.c,
* atombios.h, and atom.c
*/
/**
* cail_pll_read - read PLL register
*
* @info: atom card_info pointer
* @reg: PLL register offset
*
* Provides a PLL register accessor for the atom interpreter (r4xx+).
* Returns the value of the PLL register.
*/
static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
{
struct radeon_device *rdev = info->dev->dev_private;
uint32_t r;
r = rdev->pll_rreg(rdev, reg);
return r;
}
/**
* cail_pll_write - write PLL register
*
* @info: atom card_info pointer
* @reg: PLL register offset
* @val: value to write to the pll register
*
* Provides a PLL register accessor for the atom interpreter (r4xx+).
*/
static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
{
struct radeon_device *rdev = info->dev->dev_private;
rdev->pll_wreg(rdev, reg, val);
}
/**
* cail_mc_read - read MC (Memory Controller) register
*
* @info: atom card_info pointer
* @reg: MC register offset
*
* Provides an MC register accessor for the atom interpreter (r4xx+).
* Returns the value of the MC register.
*/
static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
{
struct radeon_device *rdev = info->dev->dev_private;
uint32_t r;
r = rdev->mc_rreg(rdev, reg);
return r;
}
/**
* cail_mc_write - write MC (Memory Controller) register
*
* @info: atom card_info pointer
* @reg: MC register offset
* @val: value to write to the pll register
*
* Provides a MC register accessor for the atom interpreter (r4xx+).
*/
static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
{
struct radeon_device *rdev = info->dev->dev_private;
rdev->mc_wreg(rdev, reg, val);
}
/**
* cail_reg_write - write MMIO register
*
* @info: atom card_info pointer
* @reg: MMIO register offset
* @val: value to write to the pll register
*
* Provides a MMIO register accessor for the atom interpreter (r4xx+).
*/
static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
{
struct radeon_device *rdev = info->dev->dev_private;
WREG32(reg*4, val);
}
/**
* cail_reg_read - read MMIO register
*
* @info: atom card_info pointer
* @reg: MMIO register offset
*
* Provides an MMIO register accessor for the atom interpreter (r4xx+).
* Returns the value of the MMIO register.
*/
static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
{
struct radeon_device *rdev = info->dev->dev_private;
uint32_t r;
r = RREG32(reg*4);
return r;
}
/**
* cail_ioreg_write - write IO register
*
* @info: atom card_info pointer
* @reg: IO register offset
* @val: value to write to the pll register
*
* Provides a IO register accessor for the atom interpreter (r4xx+).
*/
static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
{
struct radeon_device *rdev = info->dev->dev_private;
WREG32_IO(reg*4, val);
}
/**
* cail_ioreg_read - read IO register
*
* @info: atom card_info pointer
* @reg: IO register offset
*
* Provides an IO register accessor for the atom interpreter (r4xx+).
* Returns the value of the IO register.
*/
static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
{
struct radeon_device *rdev = info->dev->dev_private;
uint32_t r;
r = RREG32_IO(reg*4);
return r;
}
/**
* radeon_atombios_init - init the driver info and callbacks for atombios
*
* @rdev: radeon_device pointer
*
* Initializes the driver info and register access callbacks for the
* ATOM interpreter (r4xx+).
* Returns 0 on sucess, -ENOMEM on failure.
* Called at driver startup.
*/
int radeon_atombios_init(struct radeon_device *rdev)
{
struct card_info *atom_card_info =
kzalloc(sizeof(struct card_info), GFP_KERNEL);
if (!atom_card_info)
return -ENOMEM;
rdev->mode_info.atom_card_info = atom_card_info;
atom_card_info->dev = rdev->ddev;
atom_card_info->reg_read = cail_reg_read;
atom_card_info->reg_write = cail_reg_write;
/* needed for iio ops */
if (rdev->rio_mem) {
atom_card_info->ioreg_read = cail_ioreg_read;
atom_card_info->ioreg_write = cail_ioreg_write;
} else {
DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
atom_card_info->ioreg_read = cail_reg_read;
atom_card_info->ioreg_write = cail_reg_write;
}
atom_card_info->mc_read = cail_mc_read;
atom_card_info->mc_write = cail_mc_write;
atom_card_info->pll_read = cail_pll_read;
atom_card_info->pll_write = cail_pll_write;
rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
if (!rdev->mode_info.atom_context) {
radeon_atombios_fini(rdev);
return -ENOMEM;
}
mutex_init(&rdev->mode_info.atom_context->mutex);
mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
atom_allocate_fb_scratch(rdev->mode_info.atom_context);
return 0;
}
/**
* radeon_atombios_fini - free the driver info and callbacks for atombios
*
* @rdev: radeon_device pointer
*
* Frees the driver info and register access callbacks for the ATOM
* interpreter (r4xx+).
* Called at driver shutdown.
*/
void radeon_atombios_fini(struct radeon_device *rdev)
{
if (rdev->mode_info.atom_context) {
kfree(rdev->mode_info.atom_context->scratch);
}
kfree(rdev->mode_info.atom_context);
rdev->mode_info.atom_context = NULL;
kfree(rdev->mode_info.atom_card_info);
rdev->mode_info.atom_card_info = NULL;
}
/* COMBIOS */
/*
* COMBIOS is the bios format prior to ATOM. It provides
* command tables similar to ATOM, but doesn't have a unified
* parser. See radeon_combios.c
*/
/**
* radeon_combios_init - init the driver info for combios
*
* @rdev: radeon_device pointer
*
* Initializes the driver info for combios (r1xx-r3xx).
* Returns 0 on sucess.
* Called at driver startup.
*/
int radeon_combios_init(struct radeon_device *rdev)
{
radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
return 0;
}
/**
* radeon_combios_fini - free the driver info for combios
*
* @rdev: radeon_device pointer
*
* Frees the driver info for combios (r1xx-r3xx).
* Called at driver shutdown.
*/
void radeon_combios_fini(struct radeon_device *rdev)
{
}
/* if we get transitioned to only one device, take VGA back */
/**
* radeon_vga_set_decode - enable/disable vga decode
*
* @cookie: radeon_device pointer
* @state: enable/disable vga decode
*
* Enable/disable vga decode (all asics).
* Returns VGA resource flags.
*/
static unsigned int radeon_vga_set_decode(void *cookie, bool state)
{
struct radeon_device *rdev = cookie;
radeon_vga_set_state(rdev, state);
if (state)
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
else
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
/**
* radeon_check_pot_argument - check that argument is a power of two
*
* @arg: value to check
*
* Validates that a certain argument is a power of two (all asics).
* Returns true if argument is valid.
*/
static bool radeon_check_pot_argument(int arg)
{
return (arg & (arg - 1)) == 0;
}
/**
* Determine a sensible default GART size according to ASIC family.
*
* @family ASIC family name
*/
static int radeon_gart_size_auto(enum radeon_family family)
{
/* default to a larger gart size on newer asics */
if (family >= CHIP_TAHITI)
return 2048;
else if (family >= CHIP_RV770)
return 1024;
else
return 512;
}
/**
* radeon_check_arguments - validate module params
*
* @rdev: radeon_device pointer
*
* Validates certain module parameters and updates
* the associated values used by the driver (all asics).
*/
static void radeon_check_arguments(struct radeon_device *rdev)
{
/* vramlimit must be a power of two */
if (!radeon_check_pot_argument(radeon_vram_limit)) {
dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
radeon_vram_limit);
radeon_vram_limit = 0;
}
if (radeon_gart_size == -1) {
radeon_gart_size = radeon_gart_size_auto(rdev->family);
}
/* gtt size must be power of two and greater or equal to 32M */
if (radeon_gart_size < 32) {
dev_warn(rdev->dev, "gart size (%d) too small\n",
radeon_gart_size);
radeon_gart_size = radeon_gart_size_auto(rdev->family);
} else if (!radeon_check_pot_argument(radeon_gart_size)) {
dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
radeon_gart_size);
radeon_gart_size = radeon_gart_size_auto(rdev->family);
}
rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
/* AGP mode can only be -1, 1, 2, 4, 8 */
switch (radeon_agpmode) {
case -1:
case 0:
case 1:
case 2:
case 4:
case 8:
break;
default:
dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
radeon_agpmode = 0;
break;
}
if (!radeon_check_pot_argument(radeon_vm_size)) {
dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
radeon_vm_size);
radeon_vm_size = 4;
}
if (radeon_vm_size < 1) {
dev_warn(rdev->dev, "VM size (%d) too small, min is 1GB\n",
radeon_vm_size);
radeon_vm_size = 4;
}
/*
* Max GPUVM size for Cayman, SI and CI are 40 bits.
*/
if (radeon_vm_size > 1024) {
dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
radeon_vm_size);
radeon_vm_size = 4;
}
/* defines number of bits in page table versus page directory,
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the
* page table and the remaining bits are in the page directory */
if (radeon_vm_block_size == -1) {
/* Total bits covered by PD + PTs */
unsigned bits = ilog2(radeon_vm_size) + 18;
/* Make sure the PD is 4K in size up to 8GB address space.
Above that split equal between PD and PTs */
if (radeon_vm_size <= 8)
radeon_vm_block_size = bits - 9;
else
radeon_vm_block_size = (bits + 3) / 2;
} else if (radeon_vm_block_size < 9) {
dev_warn(rdev->dev, "VM page table size (%d) too small\n",
radeon_vm_block_size);
radeon_vm_block_size = 9;
}
if (radeon_vm_block_size > 24 ||
(radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
dev_warn(rdev->dev, "VM page table size (%d) too large\n",
radeon_vm_block_size);
radeon_vm_block_size = 9;
}
}
/**
* radeon_switcheroo_set_state - set switcheroo state
*
* @pdev: pci dev pointer
* @state: vga_switcheroo state
*
* Callback for the switcheroo driver. Suspends or resumes the
* the asics before or after it is powered up using ACPI methods.
*/
static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
return;
if (state == VGA_SWITCHEROO_ON) {
pr_info("radeon: switched on\n");
/* don't suspend or resume card normally */
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
radeon_resume_kms(dev, true, true);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
drm_kms_helper_poll_enable(dev);
} else {
pr_info("radeon: switched off\n");
drm_kms_helper_poll_disable(dev);
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
radeon_suspend_kms(dev, true, true, false);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
/**
* radeon_switcheroo_can_switch - see if switcheroo state can change
*
* @pdev: pci dev pointer
*
* Callback for the switcheroo driver. Check of the switcheroo
* state can be changed.
* Returns true if the state can be changed, false if not.
*/
static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
/*
* FIXME: open_count is protected by drm_global_mutex but that would lead to
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
return dev->open_count == 0;
}
static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
.set_gpu_state = radeon_switcheroo_set_state,
.reprobe = NULL,
.can_switch = radeon_switcheroo_can_switch,
};
/**
* radeon_device_init - initialize the driver
*
* @rdev: radeon_device pointer
* @pdev: drm dev pointer
* @pdev: pci dev pointer
* @flags: driver flags
*
* Initializes the driver info and hw (all asics).
* Returns 0 for success or an error on failure.
* Called at driver startup.
*/
int radeon_device_init(struct radeon_device *rdev,
struct drm_device *ddev,
struct pci_dev *pdev,
uint32_t flags)
{
int r, i;
int dma_bits;
bool runtime = false;
rdev->shutdown = false;
rdev->dev = &pdev->dev;
rdev->ddev = ddev;
rdev->pdev = pdev;
rdev->flags = flags;
rdev->family = flags & RADEON_FAMILY_MASK;
rdev->is_atom_bios = false;
rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
rdev->mc.gtt_size = 512 * 1024 * 1024;
rdev->accel_working = false;
/* set up ring ids */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
rdev->ring[i].idx = i;
}
rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS);
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
radeon_family_name[rdev->family], pdev->vendor, pdev->device,
pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
/* mutex initialization are all done here so we
* can recall function without having locking issues */
mutex_init(&rdev->ring_lock);
mutex_init(&rdev->dc_hw_i2c_mutex);
atomic_set(&rdev->ih.lock, 0);
mutex_init(&rdev->gem.mutex);
mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->gpu_clock_mutex);
mutex_init(&rdev->srbm_mutex);
init_rwsem(&rdev->pm.mclk_lock);
init_rwsem(&rdev->exclusive_lock);
init_waitqueue_head(&rdev->irq.vblank_queue);
mutex_init(&rdev->mn_lock);
hash_init(rdev->mn_hash);
r = radeon_gem_init(rdev);
if (r)
return r;
radeon_check_arguments(rdev);
/* Adjust VM size here.
* Max GPUVM size for cayman+ is 40 bits.
*/
rdev->vm_manager.max_pfn = radeon_vm_size << 18;
/* Set asic functions */
r = radeon_asic_init(rdev);
if (r)
return r;
/* all of the newer IGP chips have an internal gart
* However some rs4xx report as AGP, so remove that here.
*/
if ((rdev->family >= CHIP_RS400) &&
(rdev->flags & RADEON_IS_IGP)) {
rdev->flags &= ~RADEON_IS_AGP;
}
if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
radeon_agp_disable(rdev);
}
/* Set the internal MC address mask
* This is the max address of the GPU's
* internal address space.
*/
if (rdev->family >= CHIP_CAYMAN)
rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
else if (rdev->family >= CHIP_CEDAR)
rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
else
rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
/* set DMA mask + need_dma32 flags.
* PCIE - can handle 40-bits.
* IGP - can handle 40-bits
* AGP - generally dma32 is safest
* PCI - dma32 for legacy pci gart, 40 bits on newer asics
*/
rdev->need_dma32 = false;
if (rdev->flags & RADEON_IS_AGP)
rdev->need_dma32 = true;
if ((rdev->flags & RADEON_IS_PCI) &&
(rdev->family <= CHIP_RS740))
rdev->need_dma32 = true;
#ifdef CONFIG_PPC64
if (rdev->family == CHIP_CEDAR)
rdev->need_dma32 = true;
#endif
dma_bits = rdev->need_dma32 ? 32 : 40;
r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
if (r) {
rdev->need_dma32 = true;
dma_bits = 32;
pr_warn("radeon: No suitable DMA available\n");
}
r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
if (r) {
pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
pr_warn("radeon: No coherent DMA available\n");
}
rdev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
/* Registers mapping */
/* TODO: block userspace mapping of io register */
spin_lock_init(&rdev->mmio_idx_lock);
spin_lock_init(&rdev->smc_idx_lock);
spin_lock_init(&rdev->pll_idx_lock);
spin_lock_init(&rdev->mc_idx_lock);
spin_lock_init(&rdev->pcie_idx_lock);
spin_lock_init(&rdev->pciep_idx_lock);
spin_lock_init(&rdev->pif_idx_lock);
spin_lock_init(&rdev->cg_idx_lock);
spin_lock_init(&rdev->uvd_idx_lock);
spin_lock_init(&rdev->rcu_idx_lock);
spin_lock_init(&rdev->didt_idx_lock);
spin_lock_init(&rdev->end_idx_lock);
if (rdev->family >= CHIP_BONAIRE) {
rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
} else {
rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
}
rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
if (rdev->rmmio == NULL)
return -ENOMEM;
/* doorbell bar mapping */
if (rdev->family >= CHIP_BONAIRE)
radeon_doorbell_init(rdev);
/* io port mapping */
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
break;
}
}
if (rdev->rio_mem == NULL)
DRM_ERROR("Unable to find PCI I/O BAR\n");
if (rdev->flags & RADEON_IS_PX)
radeon_device_handle_px_quirks(rdev);
/* if we have > 1 VGA cards, then disable the radeon VGA resources */
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
if (rdev->flags & RADEON_IS_PX)
runtime = true;
if (!pci_is_thunderbolt_attached(rdev->pdev))
vga_switcheroo_register_client(rdev->pdev,
&radeon_switcheroo_ops, runtime);
if (runtime)
vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
r = radeon_init(rdev);
if (r)
goto failed;
r = radeon_gem_debugfs_init(rdev);
if (r) {
DRM_ERROR("registering gem debugfs failed (%d).\n", r);
}
r = radeon_mst_debugfs_init(rdev);
if (r) {
DRM_ERROR("registering mst debugfs failed (%d).\n", r);
}
if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
/* Acceleration not working on AGP card try again
* with fallback to PCI or PCIE GART
*/
radeon_asic_reset(rdev);
radeon_fini(rdev);
radeon_agp_disable(rdev);
r = radeon_init(rdev);
if (r)
goto failed;
}
r = radeon_ib_ring_tests(rdev);
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
/*
* Turks/Thames GPU will freeze whole laptop if DPM is not restarted
* after the CP ring have chew one packet at least. Hence here we stop
* and restart DPM after the radeon_ib_ring_tests().
*/
if (rdev->pm.dpm_enabled &&
(rdev->pm.pm_method == PM_METHOD_DPM) &&
(rdev->family == CHIP_TURKS) &&
(rdev->flags & RADEON_IS_MOBILITY)) {
mutex_lock(&rdev->pm.mutex);
radeon_dpm_disable(rdev);
radeon_dpm_enable(rdev);
mutex_unlock(&rdev->pm.mutex);
}
if ((radeon_testing & 1)) {
if (rdev->accel_working)
radeon_test_moves(rdev);
else
DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
}
if ((radeon_testing & 2)) {
if (rdev->accel_working)
radeon_test_syncing(rdev);
else
DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
}
if (radeon_benchmarking) {
if (rdev->accel_working)
radeon_benchmark(rdev, radeon_benchmarking);
else
DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
}
return 0;
failed:
/* balance pm_runtime_get_sync() in radeon_driver_unload_kms() */
if (radeon_is_px(ddev))
pm_runtime_put_noidle(ddev->dev);
if (runtime)
vga_switcheroo_fini_domain_pm_ops(rdev->dev);
return r;
}
/**
* radeon_device_fini - tear down the driver
*
* @rdev: radeon_device pointer
*
* Tear down the driver info (all asics).
* Called at driver shutdown.
*/
void radeon_device_fini(struct radeon_device *rdev)
{
DRM_INFO("radeon: finishing device.\n");
rdev->shutdown = true;
/* evict vram memory */
radeon_bo_evict_vram(rdev);
radeon_fini(rdev);
if (!pci_is_thunderbolt_attached(rdev->pdev))
vga_switcheroo_unregister_client(rdev->pdev);
if (rdev->flags & RADEON_IS_PX)
vga_switcheroo_fini_domain_pm_ops(rdev->dev);
vga_client_register(rdev->pdev, NULL, NULL, NULL);
if (rdev->rio_mem)
pci_iounmap(rdev->pdev, rdev->rio_mem);
rdev->rio_mem = NULL;
iounmap(rdev->rmmio);
rdev->rmmio = NULL;
if (rdev->family >= CHIP_BONAIRE)
radeon_doorbell_fini(rdev);
}
/*
* Suspend & resume.
*/
/**
* radeon_suspend_kms - initiate device suspend
*
* @pdev: drm dev pointer
* @state: suspend state
*
* Puts the hw in the suspend state (all asics).
* Returns 0 for success or an error on failure.
* Called at driver suspend.
*/
int radeon_suspend_kms(struct drm_device *dev, bool suspend,
bool fbcon, bool freeze)
{
struct radeon_device *rdev;
struct drm_crtc *crtc;
struct drm_connector *connector;
int i, r;
if (dev == NULL || dev->dev_private == NULL) {
return -ENODEV;
}
rdev = dev->dev_private;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
drm_kms_helper_poll_disable(dev);
drm_modeset_lock_all(dev);
/* turn off display hw */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
}
drm_modeset_unlock_all(dev);
/* unpin the front buffers and cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
struct radeon_bo *robj;
if (radeon_crtc->cursor_bo) {
struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
r = radeon_bo_reserve(robj, false);
if (r == 0) {
radeon_bo_unpin(robj);
radeon_bo_unreserve(robj);
}
}
if (fb == NULL || fb->obj[0] == NULL) {
continue;
}
robj = gem_to_radeon_bo(fb->obj[0]);
/* don't unpin kernel fb objects */
if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
r = radeon_bo_reserve(robj, false);
if (r == 0) {
radeon_bo_unpin(robj);
radeon_bo_unreserve(robj);
}
}
}
/* evict vram memory */
radeon_bo_evict_vram(rdev);
/* wait for gpu to finish processing current batch */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
r = radeon_fence_wait_empty(rdev, i);
if (r) {
/* delay GPU reset to resume */
radeon_fence_driver_force_completion(rdev, i);
}
}
radeon_save_bios_scratch_regs(rdev);
radeon_suspend(rdev);
radeon_hpd_fini(rdev);
/* evict remaining vram memory
* This second call to evict vram is to evict the gart page table
* using the CPU.
*/
radeon_bo_evict_vram(rdev);
radeon_agp_suspend(rdev);
pci_save_state(dev->pdev);
if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) {
rdev->asic->asic_reset(rdev, true);
pci_restore_state(dev->pdev);
} else if (suspend) {
/* Shut down the device */
pci_disable_device(dev->pdev);
pci_set_power_state(dev->pdev, PCI_D3hot);
}
if (fbcon) {
console_lock();
radeon_fbdev_set_suspend(rdev, 1);
console_unlock();
}
return 0;
}
/**
* radeon_resume_kms - initiate device resume
*
* @pdev: drm dev pointer
*
* Bring the hw back to operating state (all asics).
* Returns 0 for success or an error on failure.
* Called at driver resume.
*/
int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
{
struct drm_connector *connector;
struct radeon_device *rdev = dev->dev_private;
struct drm_crtc *crtc;
int r;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
if (fbcon) {
console_lock();
}
if (resume) {
pci_set_power_state(dev->pdev, PCI_D0);
pci_restore_state(dev->pdev);
if (pci_enable_device(dev->pdev)) {
if (fbcon)
console_unlock();
return -1;
}
}
/* resume AGP if in use */
radeon_agp_resume(rdev);
radeon_resume(rdev);
r = radeon_ib_ring_tests(rdev);
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
/* do dpm late init */
r = radeon_pm_late_init(rdev);
if (r) {
rdev->pm.dpm_enabled = false;
DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
}
} else {
/* resume old pm late */
radeon_pm_resume(rdev);
}
radeon_restore_bios_scratch_regs(rdev);
/* pin cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
if (radeon_crtc->cursor_bo) {
struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
r = radeon_bo_reserve(robj, false);
if (r == 0) {
/* Only 27 bit offset for legacy cursor */
r = radeon_bo_pin_restricted(robj,
RADEON_GEM_DOMAIN_VRAM,
ASIC_IS_AVIVO(rdev) ?
0 : 1 << 27,
&radeon_crtc->cursor_addr);
if (r != 0)
DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
radeon_bo_unreserve(robj);
}
}
}
/* init dig PHYs, disp eng pll */
if (rdev->is_atom_bios) {
radeon_atom_encoder_init(rdev);
radeon_atom_disp_eng_pll_init(rdev);
/* turn on the BL */
if (rdev->mode_info.bl_encoder) {
u8 bl_level = radeon_get_backlight_level(rdev,
rdev->mode_info.bl_encoder);
radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
bl_level);
}
}
/* reset hpd state */
radeon_hpd_init(rdev);
/* blat the mode back in */
if (fbcon) {
drm_helper_resume_force_mode(dev);
/* turn on display hw */
drm_modeset_lock_all(dev);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
}
drm_modeset_unlock_all(dev);
}
drm_kms_helper_poll_enable(dev);
/* set the power state here in case we are a PX system or headless */
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
radeon_pm_compute_clocks(rdev);
if (fbcon) {
radeon_fbdev_set_suspend(rdev, 0);
console_unlock();
}
return 0;
}
/**
* radeon_gpu_reset - reset the asic
*
* @rdev: radeon device pointer
*
* Attempt the reset the GPU if it has hung (all asics).
* Returns 0 for success or an error on failure.
*/
int radeon_gpu_reset(struct radeon_device *rdev)
{
unsigned ring_sizes[RADEON_NUM_RINGS];
uint32_t *ring_data[RADEON_NUM_RINGS];
bool saved = false;
int i, r;
int resched;
down_write(&rdev->exclusive_lock);
if (!rdev->needs_reset) {
up_write(&rdev->exclusive_lock);
return 0;
}
atomic_inc(&rdev->gpu_reset_counter);
radeon_save_bios_scratch_regs(rdev);
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
radeon_suspend(rdev);
radeon_hpd_fini(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
&ring_data[i]);
if (ring_sizes[i]) {
saved = true;
dev_info(rdev->dev, "Saved %d dwords of commands "
"on ring %d.\n", ring_sizes[i], i);
}
}
r = radeon_asic_reset(rdev);
if (!r) {
dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
radeon_resume(rdev);
}
radeon_restore_bios_scratch_regs(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (!r && ring_data[i]) {
radeon_ring_restore(rdev, &rdev->ring[i],
ring_sizes[i], ring_data[i]);
} else {
radeon_fence_driver_force_completion(rdev, i);
kfree(ring_data[i]);
}
}
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
/* do dpm late init */
r = radeon_pm_late_init(rdev);
if (r) {
rdev->pm.dpm_enabled = false;
DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
}
} else {
/* resume old pm late */
radeon_pm_resume(rdev);
}
/* init dig PHYs, disp eng pll */
if (rdev->is_atom_bios) {
radeon_atom_encoder_init(rdev);
radeon_atom_disp_eng_pll_init(rdev);
/* turn on the BL */
if (rdev->mode_info.bl_encoder) {
u8 bl_level = radeon_get_backlight_level(rdev,
rdev->mode_info.bl_encoder);
radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
bl_level);
}
}
/* reset hpd state */
radeon_hpd_init(rdev);
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
rdev->in_reset = true;
rdev->needs_reset = false;
downgrade_write(&rdev->exclusive_lock);
drm_helper_resume_force_mode(rdev->ddev);
/* set the power state here in case we are a PX system or headless */
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
radeon_pm_compute_clocks(rdev);
if (!r) {
r = radeon_ib_ring_tests(rdev);
if (r && saved)
r = -EAGAIN;
} else {
/* bad news, how to tell it to userspace ? */
dev_info(rdev->dev, "GPU reset failed\n");
}
rdev->needs_reset = r == -EAGAIN;
rdev->in_reset = false;
up_read(&rdev->exclusive_lock);
return r;
}
/*
* Debugfs
*/
int radeon_debugfs_add_files(struct radeon_device *rdev,
struct drm_info_list *files,
unsigned nfiles)
{
unsigned i;
for (i = 0; i < rdev->debugfs_count; i++) {
if (rdev->debugfs[i].files == files) {
/* Already registered */
return 0;
}
}
i = rdev->debugfs_count + 1;
if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
DRM_ERROR("Reached maximum number of debugfs components.\n");
DRM_ERROR("Report so we increase "
"RADEON_DEBUGFS_MAX_COMPONENTS.\n");
return -EINVAL;
}
rdev->debugfs[rdev->debugfs_count].files = files;
rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
rdev->debugfs_count = i;
#if defined(CONFIG_DEBUG_FS)
drm_debugfs_create_files(files, nfiles,
rdev->ddev->primary->debugfs_root,
rdev->ddev->primary);
#endif
return 0;
}
| {
"language": "C"
} |
#include "boards.h"
#include "uf2/configkeys.h"
__attribute__((used, section(".bootloaderConfig")))
const uint32_t bootloaderConfig[] =
{
/* CF2 START */
CFG_MAGIC0, CFG_MAGIC1, // magic
5, 100, // used entries, total entries
204, 0x100000, // FLASH_BYTES = 0x100000
205, 0x40000, // RAM_BYTES = 0x40000
208, (USB_DESC_VID << 16) | USB_DESC_UF2_PID, // BOOTLOADER_BOARD_ID = USB VID+PID, used for verification when updating bootloader via uf2
209, 0xada52840, // UF2_FAMILY = 0xada52840
210, 0x20, // PINS_PORT_SIZE = PA_32
0, 0, 0, 0, 0, 0, 0, 0
/* CF2 END */
};
| {
"language": "C"
} |
/* $NetBSD: make_malloc.c,v 1.10 2012/06/20 17:46:28 sjg Exp $ */
/*-
* Copyright (c) 2009 The NetBSD Foundation, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef MAKE_NATIVE
#include <sys/cdefs.h>
__RCSID("$NetBSD: make_malloc.c,v 1.10 2012/06/20 17:46:28 sjg Exp $");
#endif
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include "make.h"
#ifndef USE_EMALLOC
static void enomem(void) MAKE_ATTR_DEAD;
/*
* enomem --
* die when out of memory.
*/
static void
enomem(void)
{
(void)fprintf(stderr, "%s: %s.\n", progname, strerror(ENOMEM));
exit(2);
}
/*
* bmake_malloc --
* malloc, but die on error.
*/
void *
bmake_malloc(size_t len)
{
void *p;
if ((p = malloc(len)) == NULL)
enomem();
return(p);
}
/*
* bmake_strdup --
* strdup, but die on error.
*/
char *
bmake_strdup(const char *str)
{
size_t len;
char *p;
len = strlen(str) + 1;
if ((p = malloc(len)) == NULL)
enomem();
return memcpy(p, str, len);
}
/*
* bmake_strndup --
* strndup, but die on error.
*/
char *
bmake_strndup(const char *str, size_t max_len)
{
size_t len;
char *p;
if (str == NULL)
return NULL;
len = strlen(str);
if (len > max_len)
len = max_len;
p = bmake_malloc(len + 1);
memcpy(p, str, len);
p[len] = '\0';
return(p);
}
/*
* bmake_realloc --
* realloc, but die on error.
*/
void *
bmake_realloc(void *ptr, size_t size)
{
if ((ptr = realloc(ptr, size)) == NULL)
enomem();
return(ptr);
}
#endif
| {
"language": "C"
} |
/*
* Copyright (C) 2001-2003 Sistina Software (UK) Limited.
*
* This file is released under the GPL.
*/
#include "dm.h"
#include <linux/device-mapper.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
#include <linux/dax.h>
#include <linux/slab.h>
#include <linux/log2.h>
#define DM_MSG_PREFIX "striped"
#define DM_IO_ERROR_THRESHOLD 15
struct stripe {
struct dm_dev *dev;
sector_t physical_start;
atomic_t error_count;
};
struct stripe_c {
uint32_t stripes;
int stripes_shift;
/* The size of this target / num. stripes */
sector_t stripe_width;
uint32_t chunk_size;
int chunk_size_shift;
/* Needed for handling events */
struct dm_target *ti;
/* Work struct used for triggering events*/
struct work_struct trigger_event;
struct stripe stripe[];
};
/*
* An event is triggered whenever a drive
* drops out of a stripe volume.
*/
static void trigger_event(struct work_struct *work)
{
struct stripe_c *sc = container_of(work, struct stripe_c,
trigger_event);
dm_table_event(sc->ti->table);
}
/*
* Parse a single <dev> <sector> pair
*/
static int get_stripe(struct dm_target *ti, struct stripe_c *sc,
unsigned int stripe, char **argv)
{
unsigned long long start;
char dummy;
int ret;
if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1)
return -EINVAL;
ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
&sc->stripe[stripe].dev);
if (ret)
return ret;
sc->stripe[stripe].physical_start = start;
return 0;
}
/*
* Construct a striped mapping.
* <number of stripes> <chunk size> [<dev_path> <offset>]+
*/
static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct stripe_c *sc;
sector_t width, tmp_len;
uint32_t stripes;
uint32_t chunk_size;
int r;
unsigned int i;
if (argc < 2) {
ti->error = "Not enough arguments";
return -EINVAL;
}
if (kstrtouint(argv[0], 10, &stripes) || !stripes) {
ti->error = "Invalid stripe count";
return -EINVAL;
}
if (kstrtouint(argv[1], 10, &chunk_size) || !chunk_size) {
ti->error = "Invalid chunk_size";
return -EINVAL;
}
width = ti->len;
if (sector_div(width, stripes)) {
ti->error = "Target length not divisible by "
"number of stripes";
return -EINVAL;
}
tmp_len = width;
if (sector_div(tmp_len, chunk_size)) {
ti->error = "Target length not divisible by "
"chunk size";
return -EINVAL;
}
/*
* Do we have enough arguments for that many stripes ?
*/
if (argc != (2 + 2 * stripes)) {
ti->error = "Not enough destinations "
"specified";
return -EINVAL;
}
sc = kmalloc(struct_size(sc, stripe, stripes), GFP_KERNEL);
if (!sc) {
ti->error = "Memory allocation for striped context "
"failed";
return -ENOMEM;
}
INIT_WORK(&sc->trigger_event, trigger_event);
/* Set pointer to dm target; used in trigger_event */
sc->ti = ti;
sc->stripes = stripes;
sc->stripe_width = width;
if (stripes & (stripes - 1))
sc->stripes_shift = -1;
else
sc->stripes_shift = __ffs(stripes);
r = dm_set_target_max_io_len(ti, chunk_size);
if (r) {
kfree(sc);
return r;
}
ti->num_flush_bios = stripes;
ti->num_discard_bios = stripes;
ti->num_secure_erase_bios = stripes;
ti->num_write_same_bios = stripes;
ti->num_write_zeroes_bios = stripes;
sc->chunk_size = chunk_size;
if (chunk_size & (chunk_size - 1))
sc->chunk_size_shift = -1;
else
sc->chunk_size_shift = __ffs(chunk_size);
/*
* Get the stripe destinations.
*/
for (i = 0; i < stripes; i++) {
argv += 2;
r = get_stripe(ti, sc, i, argv);
if (r < 0) {
ti->error = "Couldn't parse stripe destination";
while (i--)
dm_put_device(ti, sc->stripe[i].dev);
kfree(sc);
return r;
}
atomic_set(&(sc->stripe[i].error_count), 0);
}
ti->private = sc;
return 0;
}
static void stripe_dtr(struct dm_target *ti)
{
unsigned int i;
struct stripe_c *sc = (struct stripe_c *) ti->private;
for (i = 0; i < sc->stripes; i++)
dm_put_device(ti, sc->stripe[i].dev);
flush_work(&sc->trigger_event);
kfree(sc);
}
static void stripe_map_sector(struct stripe_c *sc, sector_t sector,
uint32_t *stripe, sector_t *result)
{
sector_t chunk = dm_target_offset(sc->ti, sector);
sector_t chunk_offset;
if (sc->chunk_size_shift < 0)
chunk_offset = sector_div(chunk, sc->chunk_size);
else {
chunk_offset = chunk & (sc->chunk_size - 1);
chunk >>= sc->chunk_size_shift;
}
if (sc->stripes_shift < 0)
*stripe = sector_div(chunk, sc->stripes);
else {
*stripe = chunk & (sc->stripes - 1);
chunk >>= sc->stripes_shift;
}
if (sc->chunk_size_shift < 0)
chunk *= sc->chunk_size;
else
chunk <<= sc->chunk_size_shift;
*result = chunk + chunk_offset;
}
static void stripe_map_range_sector(struct stripe_c *sc, sector_t sector,
uint32_t target_stripe, sector_t *result)
{
uint32_t stripe;
stripe_map_sector(sc, sector, &stripe, result);
if (stripe == target_stripe)
return;
/* round down */
sector = *result;
if (sc->chunk_size_shift < 0)
*result -= sector_div(sector, sc->chunk_size);
else
*result = sector & ~(sector_t)(sc->chunk_size - 1);
if (target_stripe < stripe)
*result += sc->chunk_size; /* next chunk */
}
static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
uint32_t target_stripe)
{
sector_t begin, end;
stripe_map_range_sector(sc, bio->bi_iter.bi_sector,
target_stripe, &begin);
stripe_map_range_sector(sc, bio_end_sector(bio),
target_stripe, &end);
if (begin < end) {
bio_set_dev(bio, sc->stripe[target_stripe].dev->bdev);
bio->bi_iter.bi_sector = begin +
sc->stripe[target_stripe].physical_start;
bio->bi_iter.bi_size = to_bytes(end - begin);
return DM_MAPIO_REMAPPED;
} else {
/* The range doesn't map to the target stripe */
bio_endio(bio);
return DM_MAPIO_SUBMITTED;
}
}
static int stripe_map(struct dm_target *ti, struct bio *bio)
{
struct stripe_c *sc = ti->private;
uint32_t stripe;
unsigned target_bio_nr;
if (bio->bi_opf & REQ_PREFLUSH) {
target_bio_nr = dm_bio_get_target_bio_nr(bio);
BUG_ON(target_bio_nr >= sc->stripes);
bio_set_dev(bio, sc->stripe[target_bio_nr].dev->bdev);
return DM_MAPIO_REMAPPED;
}
if (unlikely(bio_op(bio) == REQ_OP_DISCARD) ||
unlikely(bio_op(bio) == REQ_OP_SECURE_ERASE) ||
unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES) ||
unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) {
target_bio_nr = dm_bio_get_target_bio_nr(bio);
BUG_ON(target_bio_nr >= sc->stripes);
return stripe_map_range(sc, bio, target_bio_nr);
}
stripe_map_sector(sc, bio->bi_iter.bi_sector,
&stripe, &bio->bi_iter.bi_sector);
bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start;
bio_set_dev(bio, sc->stripe[stripe].dev->bdev);
return DM_MAPIO_REMAPPED;
}
#if IS_ENABLED(CONFIG_DAX_DRIVER)
static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
long nr_pages, void **kaddr, pfn_t *pfn)
{
sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
struct stripe_c *sc = ti->private;
struct dax_device *dax_dev;
struct block_device *bdev;
uint32_t stripe;
long ret;
stripe_map_sector(sc, sector, &stripe, &dev_sector);
dev_sector += sc->stripe[stripe].physical_start;
dax_dev = sc->stripe[stripe].dev->dax_dev;
bdev = sc->stripe[stripe].dev->bdev;
ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages * PAGE_SIZE, &pgoff);
if (ret)
return ret;
return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
}
static size_t stripe_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *i)
{
sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
struct stripe_c *sc = ti->private;
struct dax_device *dax_dev;
struct block_device *bdev;
uint32_t stripe;
stripe_map_sector(sc, sector, &stripe, &dev_sector);
dev_sector += sc->stripe[stripe].physical_start;
dax_dev = sc->stripe[stripe].dev->dax_dev;
bdev = sc->stripe[stripe].dev->bdev;
if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
return 0;
return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
}
static size_t stripe_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *i)
{
sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
struct stripe_c *sc = ti->private;
struct dax_device *dax_dev;
struct block_device *bdev;
uint32_t stripe;
stripe_map_sector(sc, sector, &stripe, &dev_sector);
dev_sector += sc->stripe[stripe].physical_start;
dax_dev = sc->stripe[stripe].dev->dax_dev;
bdev = sc->stripe[stripe].dev->bdev;
if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
return 0;
return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i);
}
static int stripe_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
size_t nr_pages)
{
int ret;
sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
struct stripe_c *sc = ti->private;
struct dax_device *dax_dev;
struct block_device *bdev;
uint32_t stripe;
stripe_map_sector(sc, sector, &stripe, &dev_sector);
dev_sector += sc->stripe[stripe].physical_start;
dax_dev = sc->stripe[stripe].dev->dax_dev;
bdev = sc->stripe[stripe].dev->bdev;
ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages << PAGE_SHIFT, &pgoff);
if (ret)
return ret;
return dax_zero_page_range(dax_dev, pgoff, nr_pages);
}
#else
#define stripe_dax_direct_access NULL
#define stripe_dax_copy_from_iter NULL
#define stripe_dax_copy_to_iter NULL
#define stripe_dax_zero_page_range NULL
#endif
/*
* Stripe status:
*
* INFO
* #stripes [stripe_name <stripe_name>] [group word count]
* [error count 'A|D' <error count 'A|D'>]
*
* TABLE
* #stripes [stripe chunk size]
* [stripe_name physical_start <stripe_name physical_start>]
*
*/
static void stripe_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
{
struct stripe_c *sc = (struct stripe_c *) ti->private;
unsigned int sz = 0;
unsigned int i;
switch (type) {
case STATUSTYPE_INFO:
DMEMIT("%d ", sc->stripes);
for (i = 0; i < sc->stripes; i++) {
DMEMIT("%s ", sc->stripe[i].dev->name);
}
DMEMIT("1 ");
for (i = 0; i < sc->stripes; i++) {
DMEMIT("%c", atomic_read(&(sc->stripe[i].error_count)) ?
'D' : 'A');
}
break;
case STATUSTYPE_TABLE:
DMEMIT("%d %llu", sc->stripes,
(unsigned long long)sc->chunk_size);
for (i = 0; i < sc->stripes; i++)
DMEMIT(" %s %llu", sc->stripe[i].dev->name,
(unsigned long long)sc->stripe[i].physical_start);
break;
}
}
static int stripe_end_io(struct dm_target *ti, struct bio *bio,
blk_status_t *error)
{
unsigned i;
char major_minor[16];
struct stripe_c *sc = ti->private;
if (!*error)
return DM_ENDIO_DONE; /* I/O complete */
if (bio->bi_opf & REQ_RAHEAD)
return DM_ENDIO_DONE;
if (*error == BLK_STS_NOTSUPP)
return DM_ENDIO_DONE;
memset(major_minor, 0, sizeof(major_minor));
sprintf(major_minor, "%d:%d", MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)));
/*
* Test to see which stripe drive triggered the event
* and increment error count for all stripes on that device.
* If the error count for a given device exceeds the threshold
* value we will no longer trigger any further events.
*/
for (i = 0; i < sc->stripes; i++)
if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
atomic_inc(&(sc->stripe[i].error_count));
if (atomic_read(&(sc->stripe[i].error_count)) <
DM_IO_ERROR_THRESHOLD)
schedule_work(&sc->trigger_event);
}
return DM_ENDIO_DONE;
}
static int stripe_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
struct stripe_c *sc = ti->private;
int ret = 0;
unsigned i = 0;
do {
ret = fn(ti, sc->stripe[i].dev,
sc->stripe[i].physical_start,
sc->stripe_width, data);
} while (!ret && ++i < sc->stripes);
return ret;
}
static void stripe_io_hints(struct dm_target *ti,
struct queue_limits *limits)
{
struct stripe_c *sc = ti->private;
unsigned chunk_size = sc->chunk_size << SECTOR_SHIFT;
blk_limits_io_min(limits, chunk_size);
blk_limits_io_opt(limits, chunk_size * sc->stripes);
}
static struct target_type stripe_target = {
.name = "striped",
.version = {1, 6, 0},
.features = DM_TARGET_PASSES_INTEGRITY,
.module = THIS_MODULE,
.ctr = stripe_ctr,
.dtr = stripe_dtr,
.map = stripe_map,
.end_io = stripe_end_io,
.status = stripe_status,
.iterate_devices = stripe_iterate_devices,
.io_hints = stripe_io_hints,
.direct_access = stripe_dax_direct_access,
.dax_copy_from_iter = stripe_dax_copy_from_iter,
.dax_copy_to_iter = stripe_dax_copy_to_iter,
.dax_zero_page_range = stripe_dax_zero_page_range,
};
int __init dm_stripe_init(void)
{
int r;
r = dm_register_target(&stripe_target);
if (r < 0)
DMWARN("target registration failed");
return r;
}
void dm_stripe_exit(void)
{
dm_unregister_target(&stripe_target);
}
| {
"language": "C"
} |
/*
Code for time stepping with the Runge-Kutta method
Notes:
The general system is written as
Udot = F(t,U)
*/
#include <petsc/private/tsimpl.h> /*I "petscts.h" I*/
#include <petscdm.h>
#include <../src/ts/impls/explicit/rk/rk.h>
#include <../src/ts/impls/explicit/rk/mrk.h>
static TSRKType TSRKDefault = TSRK3BS;
static PetscBool TSRKRegisterAllCalled;
static PetscBool TSRKPackageInitialized;
static RKTableauLink RKTableauList;
/*MC
TSRK1FE - First order forward Euler scheme.
This method has one stage.
Options database:
. -ts_rk_type 1fe
Level: advanced
.seealso: TSRK, TSRKType, TSRKSetType()
M*/
/*MC
TSRK2A - Second order RK scheme.
This method has two stages.
Options database:
. -ts_rk_type 2a
Level: advanced
.seealso: TSRK, TSRKType, TSRKSetType()
M*/
/*MC
TSRK3 - Third order RK scheme.
This method has three stages.
Options database:
. -ts_rk_type 3
Level: advanced
.seealso: TSRK, TSRKType, TSRKSetType()
M*/
/*MC
TSRK3BS - Third order RK scheme of Bogacki-Shampine with 2nd order embedded method.
This method has four stages with the First Same As Last (FSAL) property.
Options database:
. -ts_rk_type 3bs
Level: advanced
References: https://doi.org/10.1016/0893-9659(89)90079-7
.seealso: TSRK, TSRKType, TSRKSetType()
M*/
/*MC
TSRK4 - Fourth order RK scheme.
This is the classical Runge-Kutta method with four stages.
Options database:
. -ts_rk_type 4
Level: advanced
.seealso: TSRK, TSRKType, TSRKSetType()
M*/
/*MC
TSRK5F - Fifth order Fehlberg RK scheme with a 4th order embedded method.
This method has six stages.
Options database:
. -ts_rk_type 5f
Level: advanced
.seealso: TSRK, TSRKType, TSRKSetType()
M*/
/*MC
TSRK5DP - Fifth order Dormand-Prince RK scheme with the 4th order embedded method.
This method has seven stages with the First Same As Last (FSAL) property.
Options database:
. -ts_rk_type 5dp
Level: advanced
References: https://doi.org/10.1016/0771-050X(80)90013-3
.seealso: TSRK, TSRKType, TSRKSetType()
M*/
/*MC
TSRK5BS - Fifth order Bogacki-Shampine RK scheme with 4th order embedded method.
This method has eight stages with the First Same As Last (FSAL) property.
Options database:
. -ts_rk_type 5bs
Level: advanced
References: https://doi.org/10.1016/0898-1221(96)00141-1
.seealso: TSRK, TSRKType, TSRKSetType()
M*/
/*MC
TSRK6VR - Sixth order robust Verner RK scheme with fifth order embedded method.
This method has nine stages with the First Same As Last (FSAL) property.
Options database:
. -ts_rk_type 6vr
Level: advanced
References: http://people.math.sfu.ca/~jverner/RKV65.IIIXb.Robust.00010102836.081204.CoeffsOnlyRAT
.seealso: TSRK, TSRKType, TSRKSetType()
M*/
/*MC
TSRK7VR - Seventh order robust Verner RK scheme with sixth order embedded method.
This method has ten stages with the First Same As Last (FSAL) property.
Options database:
. -ts_rk_type 7vr
Level: advanced
References: http://people.math.sfu.ca/~jverner/RKV76.IIa.Robust.000027015646.081206.CoeffsOnlyRAT
.seealso: TSRK, TSRKType, TSRKSetType()
M*/
/*MC
TSRK8VR - Eigth order robust Verner RK scheme with seventh order embedded method.
This method has thirteen stages with the First Same As Last (FSAL) property.
Options database:
. -ts_rk_type 8vr
Level: advanced
References: http://people.math.sfu.ca/~jverner/RKV87.IIa.Robust.00000754677.081208.CoeffsOnlyRATandFLOAT
.seealso: TSRK, TSRKType, TSRKSetType()
M*/
/*@C
TSRKRegisterAll - Registers all of the Runge-Kutta explicit methods in TSRK
Not Collective, but should be called by all processes which will need the schemes to be registered
Level: advanced
.seealso: TSRKRegisterDestroy()
@*/
PetscErrorCode TSRKRegisterAll(void)
{
PetscErrorCode ierr;
PetscFunctionBegin;
if (TSRKRegisterAllCalled) PetscFunctionReturn(0);
TSRKRegisterAllCalled = PETSC_TRUE;
#define RC PetscRealConstant
{
const PetscReal
A[1][1] = {{0}},
b[1] = {RC(1.0)};
ierr = TSRKRegister(TSRK1FE,1,1,&A[0][0],b,NULL,NULL,0,NULL);CHKERRQ(ierr);
}
{
const PetscReal
A[2][2] = {{0,0},
{RC(1.0),0}},
b[2] = {RC(0.5),RC(0.5)},
bembed[2] = {RC(1.0),0};
ierr = TSRKRegister(TSRK2A,2,2,&A[0][0],b,NULL,bembed,0,NULL);CHKERRQ(ierr);
}
{
const PetscReal
A[3][3] = {{0,0,0},
{RC(2.0)/RC(3.0),0,0},
{RC(-1.0)/RC(3.0),RC(1.0),0}},
b[3] = {RC(0.25),RC(0.5),RC(0.25)};
ierr = TSRKRegister(TSRK3,3,3,&A[0][0],b,NULL,NULL,0,NULL);CHKERRQ(ierr);
}
{
const PetscReal
A[4][4] = {{0,0,0,0},
{RC(1.0)/RC(2.0),0,0,0},
{0,RC(3.0)/RC(4.0),0,0},
{RC(2.0)/RC(9.0),RC(1.0)/RC(3.0),RC(4.0)/RC(9.0),0}},
b[4] = {RC(2.0)/RC(9.0),RC(1.0)/RC(3.0),RC(4.0)/RC(9.0),0},
bembed[4] = {RC(7.0)/RC(24.0),RC(1.0)/RC(4.0),RC(1.0)/RC(3.0),RC(1.0)/RC(8.0)};
ierr = TSRKRegister(TSRK3BS,3,4,&A[0][0],b,NULL,bembed,0,NULL);CHKERRQ(ierr);
}
{
const PetscReal
A[4][4] = {{0,0,0,0},
{RC(0.5),0,0,0},
{0,RC(0.5),0,0},
{0,0,RC(1.0),0}},
b[4] = {RC(1.0)/RC(6.0),RC(1.0)/RC(3.0),RC(1.0)/RC(3.0),RC(1.0)/RC(6.0)};
ierr = TSRKRegister(TSRK4,4,4,&A[0][0],b,NULL,NULL,0,NULL);CHKERRQ(ierr);
}
{
const PetscReal
A[6][6] = {{0,0,0,0,0,0},
{RC(0.25),0,0,0,0,0},
{RC(3.0)/RC(32.0),RC(9.0)/RC(32.0),0,0,0,0},
{RC(1932.0)/RC(2197.0),RC(-7200.0)/RC(2197.0),RC(7296.0)/RC(2197.0),0,0,0},
{RC(439.0)/RC(216.0),RC(-8.0),RC(3680.0)/RC(513.0),RC(-845.0)/RC(4104.0),0,0},
{RC(-8.0)/RC(27.0),RC(2.0),RC(-3544.0)/RC(2565.0),RC(1859.0)/RC(4104.0),RC(-11.0)/RC(40.0),0}},
b[6] = {RC(16.0)/RC(135.0),0,RC(6656.0)/RC(12825.0),RC(28561.0)/RC(56430.0),RC(-9.0)/RC(50.0),RC(2.0)/RC(55.0)},
bembed[6] = {RC(25.0)/RC(216.0),0,RC(1408.0)/RC(2565.0),RC(2197.0)/RC(4104.0),RC(-1.0)/RC(5.0),0};
ierr = TSRKRegister(TSRK5F,5,6,&A[0][0],b,NULL,bembed,0,NULL);CHKERRQ(ierr);
}
{
const PetscReal
A[7][7] = {{0,0,0,0,0,0,0},
{RC(1.0)/RC(5.0),0,0,0,0,0,0},
{RC(3.0)/RC(40.0),RC(9.0)/RC(40.0),0,0,0,0,0},
{RC(44.0)/RC(45.0),RC(-56.0)/RC(15.0),RC(32.0)/RC(9.0),0,0,0,0},
{RC(19372.0)/RC(6561.0),RC(-25360.0)/RC(2187.0),RC(64448.0)/RC(6561.0),RC(-212.0)/RC(729.0),0,0,0},
{RC(9017.0)/RC(3168.0),RC(-355.0)/RC(33.0),RC(46732.0)/RC(5247.0),RC(49.0)/RC(176.0),RC(-5103.0)/RC(18656.0),0,0},
{RC(35.0)/RC(384.0),0,RC(500.0)/RC(1113.0),RC(125.0)/RC(192.0),RC(-2187.0)/RC(6784.0),RC(11.0)/RC(84.0),0}},
b[7] = {RC(35.0)/RC(384.0),0,RC(500.0)/RC(1113.0),RC(125.0)/RC(192.0),RC(-2187.0)/RC(6784.0),RC(11.0)/RC(84.0),0},
bembed[7] = {RC(5179.0)/RC(57600.0),0,RC(7571.0)/RC(16695.0),RC(393.0)/RC(640.0),RC(-92097.0)/RC(339200.0),RC(187.0)/RC(2100.0),RC(1.0)/RC(40.0)},
binterp[7][5] = {{RC(1.0),RC(-4034104133.0)/RC(1410260304.0),RC(105330401.0)/RC(33982176.0),RC(-13107642775.0)/RC(11282082432.0),RC(6542295.0)/RC(470086768.0)},
{0,0,0,0,0},
{0,RC(132343189600.0)/RC(32700410799.0),RC(-833316000.0)/RC(131326951.0),RC(91412856700.0)/RC(32700410799.0),RC(-523383600.0)/RC(10900136933.0)},
{0,RC(-115792950.0)/RC(29380423.0),RC(185270875.0)/RC(16991088.0),RC(-12653452475.0)/RC(1880347072.0),RC(98134425.0)/RC(235043384.0)},
{0,RC(70805911779.0)/RC(24914598704.0),RC(-4531260609.0)/RC(600351776.0),RC(988140236175.0)/RC(199316789632.0),RC(-14307999165.0)/RC(24914598704.0)},
{0,RC(-331320693.0)/RC(205662961.0),RC(31361737.0)/RC(7433601.0),RC(-2426908385.0)/RC(822651844.0),RC(97305120.0)/RC(205662961.0)},
{0,RC(44764047.0)/RC(29380423.0),RC(-1532549.0)/RC(353981.0),RC(90730570.0)/RC(29380423.0),RC(-8293050.0)/RC(29380423.0)}};
ierr = TSRKRegister(TSRK5DP,5,7,&A[0][0],b,NULL,bembed,5,binterp[0]);CHKERRQ(ierr);
}
{
const PetscReal
A[8][8] = {{0,0,0,0,0,0,0,0},
{RC(1.0)/RC(6.0),0,0,0,0,0,0,0},
{RC(2.0)/RC(27.0),RC(4.0)/RC(27.0),0,0,0,0,0,0},
{RC(183.0)/RC(1372.0),RC(-162.0)/RC(343.0),RC(1053.0)/RC(1372.0),0,0,0,0,0},
{RC(68.0)/RC(297.0),RC(-4.0)/RC(11.0),RC(42.0)/RC(143.0),RC(1960.0)/RC(3861.0),0,0,0,0},
{RC(597.0)/RC(22528.0),RC(81.0)/RC(352.0),RC(63099.0)/RC(585728.0),RC(58653.0)/RC(366080.0),RC(4617.0)/RC(20480.0),0,0,0},
{RC(174197.0)/RC(959244.0),RC(-30942.0)/RC(79937.0),RC(8152137.0)/RC(19744439.0),RC(666106.0)/RC(1039181.0),RC(-29421.0)/RC(29068.0),RC(482048.0)/RC(414219.0),0,0},
{RC(587.0)/RC(8064.0),0,RC(4440339.0)/RC(15491840.0),RC(24353.0)/RC(124800.0),RC(387.0)/RC(44800.0),RC(2152.0)/RC(5985.0),RC(7267.0)/RC(94080.0),0}},
b[8] = {RC(587.0)/RC(8064.0),0,RC(4440339.0)/RC(15491840.0),RC(24353.0)/RC(124800.0),RC(387.0)/RC(44800.0),RC(2152.0)/RC(5985.0),RC(7267.0)/RC(94080.0),0},
bembed[8] = {RC(2479.0)/RC(34992.0),0,RC(123.0)/RC(416.0),RC(612941.0)/RC(3411720.0),RC(43.0)/RC(1440.0),RC(2272.0)/RC(6561.0),RC(79937.0)/RC(1113912.0),RC(3293.0)/RC(556956.0)};
ierr = TSRKRegister(TSRK5BS,5,8,&A[0][0],b,NULL,bembed,0,NULL);CHKERRQ(ierr);
}
{
const PetscReal
A[9][9] = {{0,0,0,0,0,0,0,0,0},
{RC(1.8000000000000000000000000000000000000000e-01),0,0,0,0,0,0,0,0},
{RC(8.9506172839506172839506172839506172839506e-02),RC(7.7160493827160493827160493827160493827160e-02),0,0,0,0,0,0,0},
{RC(6.2500000000000000000000000000000000000000e-02),0,RC(1.8750000000000000000000000000000000000000e-01),0,0,0,0,0,0},
{RC(3.1651600000000000000000000000000000000000e-01),0,RC(-1.0449480000000000000000000000000000000000e+00),RC(1.2584320000000000000000000000000000000000e+00),0,0,0,0,0},
{RC(2.7232612736485626257225065566674305502508e-01),0,RC(-8.2513360323886639676113360323886639676113e-01),RC(1.0480917678812415654520917678812415654521e+00),RC(1.0471570799276856873679117969088177628396e-01),0,0,0,0},
{RC(-1.6699418599716514314329607278961797333198e-01),0,RC(6.3170850202429149797570850202429149797571e-01),RC(1.7461044552773876082146758838488161796432e-01),RC(-1.0665356459086066122525194734018680677781e+00),RC(1.2272108843537414965986394557823129251701e+00),0,0,0},
{RC(3.6423751686909581646423751686909581646424e-01),0,RC(-2.0404858299595141700404858299595141700405e-01),RC(-3.4883737816068643136312309244640071707741e-01),RC(3.2619323032856867443333608747142581729048e+00),RC(-2.7551020408163265306122448979591836734694e+00),RC(6.8181818181818181818181818181818181818182e-01),0,0},
{RC(7.6388888888888888888888888888888888888889e-02),0,0,RC(3.6940836940836940836940836940836940836941e-01),0,RC(2.4801587301587301587301587301587301587302e-01),RC(2.3674242424242424242424242424242424242424e-01),RC(6.9444444444444444444444444444444444444444e-02),0}},
b[9] = {RC(7.6388888888888888888888888888888888888889e-02),0,0,RC(3.6940836940836940836940836940836940836941e-01),0,RC(2.4801587301587301587301587301587301587302e-01),RC(2.3674242424242424242424242424242424242424e-01),RC(6.9444444444444444444444444444444444444444e-02),0},
bembed[9] = {RC(5.8700209643605870020964360587002096436059e-02),0,0,RC(4.8072562358276643990929705215419501133787e-01),RC(-8.5341242076919085578832094861228313083563e-01),RC(1.2046485260770975056689342403628117913832e+00),0,RC(-5.9242373072160306202859394348756050883710e-02),RC(1.6858043453788134639198468985703028256220e-01)};
ierr = TSRKRegister(TSRK6VR,6,9,&A[0][0],b,NULL,bembed,0,NULL);CHKERRQ(ierr);
}
{
const PetscReal
A[10][10] = {{0,0,0,0,0,0,0,0,0,0},
{RC(5.0000000000000000000000000000000000000000e-03),0,0,0,0,0,0,0,0,0},
{RC(-1.0767901234567901234567901234567901234568e+00),RC(1.1856790123456790123456790123456790123457e+00),0,0,0,0,0,0,0,0},
{RC(4.0833333333333333333333333333333333333333e-02),0,RC(1.2250000000000000000000000000000000000000e-01),0,0,0,0,0,0,0},
{RC(6.3607142857142857142857142857142857142857e-01),0,RC(-2.4444642857142857142857142857142857142857e+00),RC(2.2633928571428571428571428571428571428571e+00),0,0,0,0,0,0},
{RC(-2.5351211079349245229256383554660215487207e+00),0,RC(1.0299374654449267920438514460756024913612e+01),RC(-7.9513032885990579949493217458266876536482e+00),RC(7.9301148923100592201226014271115261823800e-01),0,0,0,0,0},
{RC(1.0018765812524632961969196583094999808207e+00),0,RC(-4.1665712824423798331313938005470971453189e+00),RC(3.8343432929128642412552665218251378665197e+00),RC(-5.0233333560710847547464330228611765612403e-01),RC(6.6768474388416077115385092269857695410259e-01),0,0,0,0},
{RC(2.7255018354630767130333963819175005717348e+01),0,RC(-4.2004617278410638355318645443909295369611e+01),RC(-1.0535713126619489917921081600546526103722e+01),RC(8.0495536711411937147983652158926826634202e+01),RC(-6.7343882271790513468549075963212975640927e+01),RC(1.3048657610777937463471187029566964762710e+01),0,0,0},
{RC(-3.0397378057114965146943658658755763226883e+00),0,RC(1.0138161410329801111857946190709700150441e+01),RC(-6.4293056748647215721462825629555298064437e+00),RC(-1.5864371483408276587115312853798610579467e+00),RC(1.8921781841968424410864308909131353365021e+00),RC(1.9699335407608869061292360163336442838006e-02),RC(5.4416989827933235465102724247952572977903e-03),0,0},
{RC(-1.4449518916777735137351003179355712360517e+00),0,RC(8.0318913859955919224117033223019560435041e+00),RC(-7.5831741663401346820798883023671588604984e+00),RC(3.5816169353190074211247685442452878696855e+00),RC(-2.4369722632199529411183809065693752383733e+00),RC(8.5158999992326179339689766032486142173390e-01),0,0,0}},
b[10] = {RC(4.7425837833706756083569172717574534698932e-02),0,0,RC(2.5622361659370562659961727458274623448160e-01),RC(2.6951376833074206619473817258075952886764e-01),RC(1.2686622409092782845989138364739173247882e-01),RC(2.4887225942060071622046449427647492767292e-01),RC(3.0744837408200631335304388479099184768645e-03),RC(4.8023809989496943308189063347143123323209e-02),0},
bembed[10] = {RC(4.7485247699299631037531273805727961552268e-02),0,0,RC(2.5599412588690633297154918245905393870497e-01),RC(2.7058478081067688722530891099268135732387e-01),RC(1.2505618684425992913638822323746917920448e-01),RC(2.5204468723743860507184043820197442562182e-01),0,0,RC(4.8834971521418614557381971303093137592592e-02)};
ierr = TSRKRegister(TSRK7VR,7,10,&A[0][0],b,NULL,bembed,0,NULL);CHKERRQ(ierr);
}
{
const PetscReal
A[13][13] = {{0,0,0,0,0,0,0,0,0,0,0,0,0},
{RC(2.5000000000000000000000000000000000000000e-01),0,0,0,0,0,0,0,0,0,0,0,0},
{RC(8.7400846504915232052686327594877411977046e-02),RC(2.5487604938654321753087950620345685135815e-02),0,0,0,0,0,0,0,0,0,0,0},
{RC(4.2333169291338582677165354330708661417323e-02),0,RC(1.2699950787401574803149606299212598425197e-01),0,0,0,0,0,0,0,0,0,0},
{RC(4.2609505888742261494881445237572274090942e-01),0,RC(-1.5987952846591523265427733230657181117089e+00),RC(1.5967002257717297115939588706899953707994e+00),0,0,0,0,0,0,0,0,0},
{RC(5.0719337296713929515090618138513639239329e-02),0,0,RC(2.5433377264600407582754714408877778031369e-01),RC(2.0394689005728199465736223777270858044698e-01),0,0,0,0,0,0,0,0},
{RC(-2.9000374717523110970388379285425896124091e-01),0,0,RC(1.3441873910260789889438681109414337003184e+00),RC(-2.8647779433614427309611103827036562829470e+00),RC(2.6775942995105948517211260646164815438695e+00),0,0,0,0,0,0,0},
{RC(9.8535011337993546469740402980727014284756e-02),0,0,0,RC(2.2192680630751384842024036498197387903583e-01),RC(-1.8140622911806994312690338288073952457474e-01),RC(1.0944411472562548236922614918038631254153e-02),0,0,0,0,0,0},
{RC(3.8711052545731144679444618165166373405645e-01),0,0,RC(-1.4424454974855277571256745553077927767173e+00),RC(2.9053981890699509317691346449233848441744e+00),RC(-1.8537710696301059290843332675811978025183e+00),RC(1.4003648098728154269497325109771241479223e-01),RC(5.7273940811495816575746774624447706488753e-01),0,0,0,0,0},
{RC(-1.6124403444439308100630016197913480595436e-01),0,0,RC(-1.7339602957358984083578404473962567894901e-01),RC(-1.3012892814065147406016812745172492529744e+00),RC(1.1379503751738617308558792131431003472124e+00),RC(-3.1747649663966880106923521138043024698980e-02),RC(9.3351293824933666439811064486056884856590e-01),RC(-8.3786318334733852703300855629616433201504e-02),0,0,0,0},
{RC(-1.9199444881589533281510804651483576073142e-02),0,0,RC(2.7330857265264284907942326254016124275617e-01),RC(-6.7534973206944372919691611210942380856240e-01),RC(3.4151849813846016071738489974728382711981e-01),RC(-6.7950064803375772478920516198524629391910e-02),RC(9.6591752247623878884265586491216376509746e-02),RC(1.3253082511182101180721038466545389951226e-01),RC(3.6854959360386113446906329951531666812946e-01),0,0,0},
{RC(6.0918774036452898676888412111588817784584e-01),0,0,RC(-2.2725690858980016768999800931413088399719e+00),RC(4.7578983426940290068155255881914785497547e+00),RC(-5.5161067066927584824294689667844248244842e+00),RC(2.9005963696801192709095818565946174378180e-01),RC(5.6914239633590368229109858454801849145630e-01),RC(7.9267957603321670271339916205893327579951e-01),RC(1.5473720453288822894126190771849898232047e-01),RC(1.6149708956621816247083215106334544434974e+00),0,0},
{RC(8.8735762208534719663211694051981022704884e-01),0,0,RC(-2.9754597821085367558513632804709301581977e+00),RC(5.6007170094881630597990392548350098923829e+00),RC(-5.9156074505366744680014930189941657351840e+00),RC(2.2029689156134927016879142540807638331238e-01),RC(1.0155097824462216666143271340902996997549e-01),RC(1.1514345647386055909780397752125850553556e+00),RC(1.9297101665271239396134361900805843653065e+00),0,0,0}},
b[13] = {RC(4.4729564666695714203015840429049382466467e-02),0,0,0,0,RC(1.5691033527708199813368698010726645409175e-01),RC(1.8460973408151637740702451873526277892035e-01),RC(2.2516380602086991042479419400350721970920e-01),RC(1.4794615651970234687005179885449141753736e-01),RC(7.6055542444955825269798361910336491012732e-02),RC(1.2277290235018619610824346315921437388535e-01),RC(4.1811958638991631583384842800871882376786e-02),0},
bembed[13] = {RC(4.5847111400495925878664730122010282095875e-02),0,0,0,0,RC(2.6231891404152387437443356584845803392392e-01),RC(1.9169372337852611904485738635688429008025e-01),RC(2.1709172327902618330978407422906448568196e-01),RC(1.2738189624833706796803169450656737867900e-01),RC(1.1510530385365326258240515750043192148894e-01),0,0,RC(4.0561327798437566841823391436583608050053e-02)};
ierr = TSRKRegister(TSRK8VR,8,13,&A[0][0],b,NULL,bembed,0,NULL);CHKERRQ(ierr);
}
#undef RC
PetscFunctionReturn(0);
}
/*@C
TSRKRegisterDestroy - Frees the list of schemes that were registered by TSRKRegister().
Not Collective
Level: advanced
.seealso: TSRKRegister(), TSRKRegisterAll()
@*/
PetscErrorCode TSRKRegisterDestroy(void)
{
PetscErrorCode ierr;
RKTableauLink link;
PetscFunctionBegin;
while ((link = RKTableauList)) {
RKTableau t = &link->tab;
RKTableauList = link->next;
ierr = PetscFree3(t->A,t->b,t->c);CHKERRQ(ierr);
ierr = PetscFree(t->bembed);CHKERRQ(ierr);
ierr = PetscFree(t->binterp);CHKERRQ(ierr);
ierr = PetscFree(t->name);CHKERRQ(ierr);
ierr = PetscFree(link);CHKERRQ(ierr);
}
TSRKRegisterAllCalled = PETSC_FALSE;
PetscFunctionReturn(0);
}
/*@C
TSRKInitializePackage - This function initializes everything in the TSRK package. It is called
from TSInitializePackage().
Level: developer
.seealso: PetscInitialize()
@*/
PetscErrorCode TSRKInitializePackage(void)
{
PetscErrorCode ierr;
PetscFunctionBegin;
if (TSRKPackageInitialized) PetscFunctionReturn(0);
TSRKPackageInitialized = PETSC_TRUE;
ierr = TSRKRegisterAll();CHKERRQ(ierr);
ierr = PetscRegisterFinalize(TSRKFinalizePackage);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*@C
TSRKFinalizePackage - This function destroys everything in the TSRK package. It is
called from PetscFinalize().
Level: developer
.seealso: PetscFinalize()
@*/
PetscErrorCode TSRKFinalizePackage(void)
{
PetscErrorCode ierr;
PetscFunctionBegin;
TSRKPackageInitialized = PETSC_FALSE;
ierr = TSRKRegisterDestroy();CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*@C
TSRKRegister - register an RK scheme by providing the entries in the Butcher tableau and optionally embedded approximations and interpolation
Not Collective, but the same schemes should be registered on all processes on which they will be used
Input Parameters:
+ name - identifier for method
. order - approximation order of method
. s - number of stages, this is the dimension of the matrices below
. A - stage coefficients (dimension s*s, row-major)
. b - step completion table (dimension s; NULL to use last row of A)
. c - abscissa (dimension s; NULL to use row sums of A)
. bembed - completion table for embedded method (dimension s; NULL if not available)
. p - Order of the interpolation scheme, equal to the number of columns of binterp
- binterp - Coefficients of the interpolation formula (dimension s*p; NULL to reuse b with p=1)
Notes:
Several RK methods are provided, this function is only needed to create new methods.
Level: advanced
.seealso: TSRK
@*/
PetscErrorCode TSRKRegister(TSRKType name,PetscInt order,PetscInt s,
const PetscReal A[],const PetscReal b[],const PetscReal c[],
const PetscReal bembed[],PetscInt p,const PetscReal binterp[])
{
PetscErrorCode ierr;
RKTableauLink link;
RKTableau t;
PetscInt i,j;
PetscFunctionBegin;
PetscValidCharPointer(name,1);
PetscValidRealPointer(A,4);
if (b) PetscValidRealPointer(b,5);
if (c) PetscValidRealPointer(c,6);
if (bembed) PetscValidRealPointer(bembed,7);
if (binterp || p > 1) PetscValidRealPointer(binterp,9);
ierr = TSRKInitializePackage();CHKERRQ(ierr);
ierr = PetscNew(&link);CHKERRQ(ierr);
t = &link->tab;
ierr = PetscStrallocpy(name,&t->name);CHKERRQ(ierr);
t->order = order;
t->s = s;
ierr = PetscMalloc3(s*s,&t->A,s,&t->b,s,&t->c);CHKERRQ(ierr);
ierr = PetscArraycpy(t->A,A,s*s);CHKERRQ(ierr);
if (b) { ierr = PetscArraycpy(t->b,b,s);CHKERRQ(ierr); }
else for (i=0; i<s; i++) t->b[i] = A[(s-1)*s+i];
if (c) { ierr = PetscArraycpy(t->c,c,s);CHKERRQ(ierr); }
else for (i=0; i<s; i++) for (j=0,t->c[i]=0; j<s; j++) t->c[i] += A[i*s+j];
t->FSAL = PETSC_TRUE;
for (i=0; i<s; i++) if (t->A[(s-1)*s+i] != t->b[i]) t->FSAL = PETSC_FALSE;
if (bembed) {
ierr = PetscMalloc1(s,&t->bembed);CHKERRQ(ierr);
ierr = PetscArraycpy(t->bembed,bembed,s);CHKERRQ(ierr);
}
if (!binterp) { p = 1; binterp = t->b; }
t->p = p;
ierr = PetscMalloc1(s*p,&t->binterp);CHKERRQ(ierr);
ierr = PetscArraycpy(t->binterp,binterp,s*p);CHKERRQ(ierr);
link->next = RKTableauList;
RKTableauList = link;
PetscFunctionReturn(0);
}
PetscErrorCode TSRKGetTableau_RK(TS ts, PetscInt *s, const PetscReal **A, const PetscReal **b, const PetscReal **c, const PetscReal **bembed,
PetscInt *p, const PetscReal **binterp, PetscBool *FSAL)
{
TS_RK *rk = (TS_RK*)ts->data;
RKTableau tab = rk->tableau;
PetscFunctionBegin;
if (s) *s = tab->s;
if (A) *A = tab->A;
if (b) *b = tab->b;
if (c) *c = tab->c;
if (bembed) *bembed = tab->bembed;
if (p) *p = tab->p;
if (binterp) *binterp = tab->binterp;
if (FSAL) *FSAL = tab->FSAL;
PetscFunctionReturn(0);
}
/*@C
TSRKGetTableau - Get info on the RK tableau
Not Collective
Input Parameters:
. ts - timestepping context
Output Parameters:
+ s - number of stages, this is the dimension of the matrices below
. A - stage coefficients (dimension s*s, row-major)
. b - step completion table (dimension s)
. c - abscissa (dimension s)
. bembed - completion table for embedded method (dimension s; NULL if not available)
. p - Order of the interpolation scheme, equal to the number of columns of binterp
. binterp - Coefficients of the interpolation formula (dimension s*p)
- FSAL - wheather or not the scheme has the First Same As Last property
Level: developer
.seealso: TSRK
@*/
PetscErrorCode TSRKGetTableau(TS ts, PetscInt *s, const PetscReal **A, const PetscReal **b, const PetscReal **c, const PetscReal **bembed,
PetscInt *p, const PetscReal **binterp, PetscBool *FSAL)
{
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(ts,TS_CLASSID,1);
ierr = PetscUseMethod(ts,"TSRKGetTableau_C",(TS,PetscInt*,const PetscReal**,const PetscReal**,const PetscReal**,const PetscReal**,
PetscInt*,const PetscReal**,PetscBool*),(ts,s,A,b,c,bembed,p,binterp,FSAL));CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*
This is for single-step RK method
The step completion formula is
x1 = x0 + h b^T YdotRHS
This function can be called before or after ts->vec_sol has been updated.
Suppose we have a completion formula (b) and an embedded formula (be) of different order.
We can write
x1e = x0 + h be^T YdotRHS
= x1 - h b^T YdotRHS + h be^T YdotRHS
= x1 + h (be - b)^T YdotRHS
so we can evaluate the method with different order even after the step has been optimistically completed.
*/
static PetscErrorCode TSEvaluateStep_RK(TS ts,PetscInt order,Vec X,PetscBool *done)
{
TS_RK *rk = (TS_RK*)ts->data;
RKTableau tab = rk->tableau;
PetscScalar *w = rk->work;
PetscReal h;
PetscInt s = tab->s,j;
PetscErrorCode ierr;
PetscFunctionBegin;
switch (rk->status) {
case TS_STEP_INCOMPLETE:
case TS_STEP_PENDING:
h = ts->time_step; break;
case TS_STEP_COMPLETE:
h = ts->ptime - ts->ptime_prev; break;
default: SETERRQ(PetscObjectComm((PetscObject)ts),PETSC_ERR_PLIB,"Invalid TSStepStatus");
}
if (order == tab->order) {
if (rk->status == TS_STEP_INCOMPLETE) {
ierr = VecCopy(ts->vec_sol,X);CHKERRQ(ierr);
for (j=0; j<s; j++) w[j] = h*tab->b[j]/rk->dtratio;
ierr = VecMAXPY(X,s,w,rk->YdotRHS);CHKERRQ(ierr);
} else {ierr = VecCopy(ts->vec_sol,X);CHKERRQ(ierr);}
PetscFunctionReturn(0);
} else if (order == tab->order-1) {
if (!tab->bembed) goto unavailable;
if (rk->status == TS_STEP_INCOMPLETE) { /*Complete with the embedded method (be)*/
ierr = VecCopy(ts->vec_sol,X);CHKERRQ(ierr);
for (j=0; j<s; j++) w[j] = h*tab->bembed[j];
ierr = VecMAXPY(X,s,w,rk->YdotRHS);CHKERRQ(ierr);
} else { /*Rollback and re-complete using (be-b) */
ierr = VecCopy(ts->vec_sol,X);CHKERRQ(ierr);
for (j=0; j<s; j++) w[j] = h*(tab->bembed[j] - tab->b[j]);
ierr = VecMAXPY(X,s,w,rk->YdotRHS);CHKERRQ(ierr);
}
if (done) *done = PETSC_TRUE;
PetscFunctionReturn(0);
}
unavailable:
if (done) *done = PETSC_FALSE;
else SETERRQ3(PetscObjectComm((PetscObject)ts),PETSC_ERR_SUP,"RK '%s' of order %D cannot evaluate step at order %D. Consider using -ts_adapt_type none or a different method that has an embedded estimate.",tab->name,tab->order,order);
PetscFunctionReturn(0);
}
static PetscErrorCode TSForwardCostIntegral_RK(TS ts)
{
TS_RK *rk = (TS_RK*)ts->data;
TS quadts = ts->quadraturets;
RKTableau tab = rk->tableau;
const PetscInt s = tab->s;
const PetscReal *b = tab->b,*c = tab->c;
Vec *Y = rk->Y;
PetscInt i;
PetscErrorCode ierr;
PetscFunctionBegin;
/* No need to backup quadts->vec_sol since it can be reverted in TSRollBack_RK */
for (i=s-1; i>=0; i--) {
/* Evolve quadrature TS solution to compute integrals */
ierr = TSComputeRHSFunction(quadts,rk->ptime+rk->time_step*c[i],Y[i],ts->vec_costintegrand);CHKERRQ(ierr);
ierr = VecAXPY(quadts->vec_sol,rk->time_step*b[i],ts->vec_costintegrand);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode TSAdjointCostIntegral_RK(TS ts)
{
TS_RK *rk = (TS_RK*)ts->data;
RKTableau tab = rk->tableau;
TS quadts = ts->quadraturets;
const PetscInt s = tab->s;
const PetscReal *b = tab->b,*c = tab->c;
Vec *Y = rk->Y;
PetscInt i;
PetscErrorCode ierr;
PetscFunctionBegin;
for (i=s-1; i>=0; i--) {
/* Evolve quadrature TS solution to compute integrals */
ierr = TSComputeRHSFunction(quadts,ts->ptime+ts->time_step*(1.0-c[i]),Y[i],ts->vec_costintegrand);CHKERRQ(ierr);
ierr = VecAXPY(quadts->vec_sol,-ts->time_step*b[i],ts->vec_costintegrand);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode TSRollBack_RK(TS ts)
{
TS_RK *rk = (TS_RK*)ts->data;
TS quadts = ts->quadraturets;
RKTableau tab = rk->tableau;
const PetscInt s = tab->s;
const PetscReal *b = tab->b,*c = tab->c;
PetscScalar *w = rk->work;
Vec *Y = rk->Y,*YdotRHS = rk->YdotRHS;
PetscInt j;
PetscReal h;
PetscErrorCode ierr;
PetscFunctionBegin;
switch (rk->status) {
case TS_STEP_INCOMPLETE:
case TS_STEP_PENDING:
h = ts->time_step; break;
case TS_STEP_COMPLETE:
h = ts->ptime - ts->ptime_prev; break;
default: SETERRQ(PetscObjectComm((PetscObject)ts),PETSC_ERR_PLIB,"Invalid TSStepStatus");
}
for (j=0; j<s; j++) w[j] = -h*b[j];
ierr = VecMAXPY(ts->vec_sol,s,w,YdotRHS);CHKERRQ(ierr);
if (quadts && ts->costintegralfwd) {
for (j=0; j<s; j++) {
/* Revert the quadrature TS solution */
ierr = TSComputeRHSFunction(quadts,rk->ptime+h*c[j],Y[j],ts->vec_costintegrand);CHKERRQ(ierr);
ierr = VecAXPY(quadts->vec_sol,-h*b[j],ts->vec_costintegrand);CHKERRQ(ierr);
}
}
PetscFunctionReturn(0);
}
static PetscErrorCode TSForwardStep_RK(TS ts)
{
TS_RK *rk = (TS_RK*)ts->data;
RKTableau tab = rk->tableau;
Mat J,*MatsFwdSensipTemp = rk->MatsFwdSensipTemp;
const PetscInt s = tab->s;
const PetscReal *A = tab->A,*c = tab->c,*b = tab->b;
Vec *Y = rk->Y;
PetscInt i,j;
PetscReal stage_time,h = ts->time_step;
PetscBool zero;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatCopy(ts->mat_sensip,rk->MatFwdSensip0,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
ierr = TSGetRHSJacobian(ts,&J,NULL,NULL,NULL);CHKERRQ(ierr);
for (i=0; i<s; i++) {
stage_time = ts->ptime + h*c[i];
zero = PETSC_FALSE;
if (b[i] == 0 && i == s-1) zero = PETSC_TRUE;
/* TLM Stage values */
if (!i) {
ierr = MatCopy(ts->mat_sensip,rk->MatsFwdStageSensip[i],SAME_NONZERO_PATTERN);CHKERRQ(ierr);
} else if (!zero) {
ierr = MatZeroEntries(rk->MatsFwdStageSensip[i]);CHKERRQ(ierr);
for (j=0; j<i; j++) {
ierr = MatAXPY(rk->MatsFwdStageSensip[i],h*A[i*s+j],MatsFwdSensipTemp[j],SAME_NONZERO_PATTERN);CHKERRQ(ierr);
}
ierr = MatAXPY(rk->MatsFwdStageSensip[i],1.,ts->mat_sensip,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
} else {
ierr = MatZeroEntries(rk->MatsFwdStageSensip[i]);CHKERRQ(ierr);
}
ierr = TSComputeRHSJacobian(ts,stage_time,Y[i],J,J);CHKERRQ(ierr);
ierr = MatMatMult(J,rk->MatsFwdStageSensip[i],MAT_REUSE_MATRIX,PETSC_DEFAULT,&MatsFwdSensipTemp[i]);CHKERRQ(ierr);
if (ts->Jacprhs) {
ierr = TSComputeRHSJacobianP(ts,stage_time,Y[i],ts->Jacprhs);CHKERRQ(ierr); /* get f_p */
if (ts->vecs_sensi2p) { /* TLM used for 2nd-order adjoint */
PetscScalar *xarr;
ierr = MatDenseGetColumn(MatsFwdSensipTemp[i],0,&xarr);CHKERRQ(ierr);
ierr = VecPlaceArray(rk->VecDeltaFwdSensipCol,xarr);CHKERRQ(ierr);
ierr = MatMultAdd(ts->Jacprhs,ts->vec_dir,rk->VecDeltaFwdSensipCol,rk->VecDeltaFwdSensipCol);CHKERRQ(ierr);
ierr = VecResetArray(rk->VecDeltaFwdSensipCol);CHKERRQ(ierr);
ierr = MatDenseRestoreColumn(MatsFwdSensipTemp[i],&xarr);CHKERRQ(ierr);
} else {
ierr = MatAXPY(MatsFwdSensipTemp[i],1.,ts->Jacprhs,SUBSET_NONZERO_PATTERN);CHKERRQ(ierr);
}
}
}
for (i=0; i<s; i++) {
ierr = MatAXPY(ts->mat_sensip,h*b[i],rk->MatsFwdSensipTemp[i],SAME_NONZERO_PATTERN);CHKERRQ(ierr);
}
rk->status = TS_STEP_COMPLETE;
PetscFunctionReturn(0);
}
static PetscErrorCode TSForwardGetStages_RK(TS ts,PetscInt *ns,Mat **stagesensip)
{
TS_RK *rk = (TS_RK*)ts->data;
RKTableau tab = rk->tableau;
PetscFunctionBegin;
if (ns) *ns = tab->s;
if (stagesensip) *stagesensip = rk->MatsFwdStageSensip;
PetscFunctionReturn(0);
}
static PetscErrorCode TSForwardSetUp_RK(TS ts)
{
TS_RK *rk = (TS_RK*)ts->data;
RKTableau tab = rk->tableau;
PetscInt i;
PetscErrorCode ierr;
PetscFunctionBegin;
/* backup sensitivity results for roll-backs */
ierr = MatDuplicate(ts->mat_sensip,MAT_DO_NOT_COPY_VALUES,&rk->MatFwdSensip0);CHKERRQ(ierr);
ierr = PetscMalloc1(tab->s,&rk->MatsFwdStageSensip);CHKERRQ(ierr);
ierr = PetscMalloc1(tab->s,&rk->MatsFwdSensipTemp);CHKERRQ(ierr);
for (i=0; i<tab->s; i++) {
ierr = MatDuplicate(ts->mat_sensip,MAT_DO_NOT_COPY_VALUES,&rk->MatsFwdStageSensip[i]);CHKERRQ(ierr);
ierr = MatDuplicate(ts->mat_sensip,MAT_DO_NOT_COPY_VALUES,&rk->MatsFwdSensipTemp[i]);CHKERRQ(ierr);
}
ierr = VecDuplicate(ts->vec_sol,&rk->VecDeltaFwdSensipCol);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode TSForwardReset_RK(TS ts)
{
TS_RK *rk = (TS_RK*)ts->data;
RKTableau tab = rk->tableau;
PetscInt i;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatDestroy(&rk->MatFwdSensip0);CHKERRQ(ierr);
if (rk->MatsFwdStageSensip) {
for (i=0; i<tab->s; i++) {
ierr = MatDestroy(&rk->MatsFwdStageSensip[i]);CHKERRQ(ierr);
}
ierr = PetscFree(rk->MatsFwdStageSensip);CHKERRQ(ierr);
}
if (rk->MatsFwdSensipTemp) {
for (i=0; i<tab->s; i++) {
ierr = MatDestroy(&rk->MatsFwdSensipTemp[i]);CHKERRQ(ierr);
}
ierr = PetscFree(rk->MatsFwdSensipTemp);CHKERRQ(ierr);
}
ierr = VecDestroy(&rk->VecDeltaFwdSensipCol);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode TSStep_RK(TS ts)
{
TS_RK *rk = (TS_RK*)ts->data;
RKTableau tab = rk->tableau;
const PetscInt s = tab->s;
const PetscReal *A = tab->A,*c = tab->c;
PetscScalar *w = rk->work;
Vec *Y = rk->Y,*YdotRHS = rk->YdotRHS;
PetscBool FSAL = tab->FSAL;
TSAdapt adapt;
PetscInt i,j;
PetscInt rejections = 0;
PetscBool stageok,accept = PETSC_TRUE;
PetscReal next_time_step = ts->time_step;
PetscErrorCode ierr;
PetscFunctionBegin;
if (ts->steprollback || ts->steprestart) FSAL = PETSC_FALSE;
if (FSAL) { ierr = VecCopy(YdotRHS[s-1],YdotRHS[0]);CHKERRQ(ierr); }
rk->status = TS_STEP_INCOMPLETE;
while (!ts->reason && rk->status != TS_STEP_COMPLETE) {
PetscReal t = ts->ptime;
PetscReal h = ts->time_step;
for (i=0; i<s; i++) {
rk->stage_time = t + h*c[i];
ierr = TSPreStage(ts,rk->stage_time);CHKERRQ(ierr);
ierr = VecCopy(ts->vec_sol,Y[i]);CHKERRQ(ierr);
for (j=0; j<i; j++) w[j] = h*A[i*s+j];
ierr = VecMAXPY(Y[i],i,w,YdotRHS);CHKERRQ(ierr);
ierr = TSPostStage(ts,rk->stage_time,i,Y);CHKERRQ(ierr);
ierr = TSGetAdapt(ts,&adapt);CHKERRQ(ierr);
ierr = TSAdaptCheckStage(adapt,ts,rk->stage_time,Y[i],&stageok);CHKERRQ(ierr);
if (!stageok) goto reject_step;
if (FSAL && !i) continue;
ierr = TSComputeRHSFunction(ts,t+h*c[i],Y[i],YdotRHS[i]);CHKERRQ(ierr);
}
rk->status = TS_STEP_INCOMPLETE;
ierr = TSEvaluateStep(ts,tab->order,ts->vec_sol,NULL);CHKERRQ(ierr);
rk->status = TS_STEP_PENDING;
ierr = TSGetAdapt(ts,&adapt);CHKERRQ(ierr);
ierr = TSAdaptCandidatesClear(adapt);CHKERRQ(ierr);
ierr = TSAdaptCandidateAdd(adapt,tab->name,tab->order,1,tab->ccfl,(PetscReal)tab->s,PETSC_TRUE);CHKERRQ(ierr);
ierr = TSAdaptChoose(adapt,ts,ts->time_step,NULL,&next_time_step,&accept);CHKERRQ(ierr);
rk->status = accept ? TS_STEP_COMPLETE : TS_STEP_INCOMPLETE;
if (!accept) { /* Roll back the current step */
ierr = TSRollBack_RK(ts);CHKERRQ(ierr);
ts->time_step = next_time_step;
goto reject_step;
}
if (ts->costintegralfwd) { /* Save the info for the later use in cost integral evaluation */
rk->ptime = ts->ptime;
rk->time_step = ts->time_step;
}
ts->ptime += ts->time_step;
ts->time_step = next_time_step;
break;
reject_step:
ts->reject++; accept = PETSC_FALSE;
if (!ts->reason && ++rejections > ts->max_reject && ts->max_reject >= 0) {
ts->reason = TS_DIVERGED_STEP_REJECTED;
ierr = PetscInfo2(ts,"Step=%D, step rejections %D greater than current TS allowed, stopping solve\n",ts->steps,rejections);CHKERRQ(ierr);
}
}
PetscFunctionReturn(0);
}
static PetscErrorCode TSAdjointSetUp_RK(TS ts)
{
TS_RK *rk = (TS_RK*)ts->data;
RKTableau tab = rk->tableau;
PetscInt s = tab->s;
PetscErrorCode ierr;
PetscFunctionBegin;
if (ts->adjointsetupcalled++) PetscFunctionReturn(0);
ierr = VecDuplicateVecs(ts->vecs_sensi[0],s*ts->numcost,&rk->VecsDeltaLam);CHKERRQ(ierr);
ierr = VecDuplicateVecs(ts->vecs_sensi[0],ts->numcost,&rk->VecsSensiTemp);CHKERRQ(ierr);
if (ts->vecs_sensip) {
ierr = VecDuplicate(ts->vecs_sensip[0],&rk->VecDeltaMu);CHKERRQ(ierr);
}
if (ts->vecs_sensi2) {
ierr = VecDuplicateVecs(ts->vecs_sensi[0],s*ts->numcost,&rk->VecsDeltaLam2);CHKERRQ(ierr);
ierr = VecDuplicateVecs(ts->vecs_sensi2[0],ts->numcost,&rk->VecsSensi2Temp);CHKERRQ(ierr);
}
if (ts->vecs_sensi2p) {
ierr = VecDuplicate(ts->vecs_sensi2p[0],&rk->VecDeltaMu2);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
/*
Assumptions:
- TSStep_RK() always evaluates the step with b, not bembed.
*/
static PetscErrorCode TSAdjointStep_RK(TS ts)
{
TS_RK *rk = (TS_RK*)ts->data;
TS quadts = ts->quadraturets;
RKTableau tab = rk->tableau;
Mat J,Jpre,Jquad;
const PetscInt s = tab->s;
const PetscReal *A = tab->A,*b = tab->b,*c = tab->c;
PetscScalar *w = rk->work,*xarr;
Vec *Y = rk->Y,*VecsDeltaLam = rk->VecsDeltaLam,VecDeltaMu = rk->VecDeltaMu,*VecsSensiTemp = rk->VecsSensiTemp;
Vec *VecsDeltaLam2 = rk->VecsDeltaLam2,VecDeltaMu2 = rk->VecDeltaMu2,*VecsSensi2Temp = rk->VecsSensi2Temp;
Vec VecDRDUTransCol = ts->vec_drdu_col,VecDRDPTransCol = ts->vec_drdp_col;
PetscInt i,j,nadj;
PetscReal t = ts->ptime;
PetscReal h = ts->time_step;
PetscErrorCode ierr;
PetscFunctionBegin;
rk->status = TS_STEP_INCOMPLETE;
ierr = TSGetRHSJacobian(ts,&J,&Jpre,NULL,NULL);CHKERRQ(ierr);
if (quadts) {
ierr = TSGetRHSJacobian(quadts,&Jquad,NULL,NULL,NULL);CHKERRQ(ierr);
}
for (i=s-1; i>=0; i--) {
if (tab->FSAL && i == s-1) {
/* VecsDeltaLam[nadj*s+s-1] are initialized with zeros and the values never change.*/
continue;
}
rk->stage_time = t + h*(1.0-c[i]);
ierr = TSComputeSNESJacobian(ts,Y[i],J,Jpre);CHKERRQ(ierr);
if (quadts) {
ierr = TSComputeRHSJacobian(quadts,rk->stage_time,Y[i],Jquad,Jquad);CHKERRQ(ierr); /* get r_u^T */
}
if (ts->vecs_sensip) {
ierr = TSComputeRHSJacobianP(ts,rk->stage_time,Y[i],ts->Jacprhs);CHKERRQ(ierr); /* get f_p */
if (quadts) {
ierr = TSComputeRHSJacobianP(quadts,rk->stage_time,Y[i],quadts->Jacprhs);CHKERRQ(ierr); /* get f_p for the quadrature */
}
}
if (b[i]) {
for (j=i+1; j<s; j++) w[j-i-1] = A[j*s+i]/b[i]; /* coefficients for computing VecsSensiTemp */
} else {
for (j=i+1; j<s; j++) w[j-i-1] = A[j*s+i]; /* coefficients for computing VecsSensiTemp */
}
for (nadj=0; nadj<ts->numcost; nadj++) {
/* Stage values of lambda */
if (b[i]) {
/* lambda_{n+1} + \sum_{j=i+1}^s a_{ji}/b[i]*lambda_{s,j} */
ierr = VecCopy(ts->vecs_sensi[nadj],VecsSensiTemp[nadj]);CHKERRQ(ierr); /* VecDeltaLam is an vec array of size s by numcost */
ierr = VecMAXPY(VecsSensiTemp[nadj],s-i-1,w,&VecsDeltaLam[nadj*s+i+1]);CHKERRQ(ierr);
ierr = MatMultTranspose(J,VecsSensiTemp[nadj],VecsDeltaLam[nadj*s+i]);CHKERRQ(ierr); /* VecsSensiTemp will be reused by 2nd-order adjoint */
ierr = VecScale(VecsDeltaLam[nadj*s+i],-h*b[i]);CHKERRQ(ierr);
if (quadts) {
ierr = MatDenseGetColumn(Jquad,nadj,&xarr);CHKERRQ(ierr);
ierr = VecPlaceArray(VecDRDUTransCol,xarr);CHKERRQ(ierr);
ierr = VecAXPY(VecsDeltaLam[nadj*s+i],-h*b[i],VecDRDUTransCol);CHKERRQ(ierr);
ierr = VecResetArray(VecDRDUTransCol);CHKERRQ(ierr);
ierr = MatDenseRestoreColumn(Jquad,&xarr);CHKERRQ(ierr);
}
} else {
/* \sum_{j=i+1}^s a_{ji}*lambda_{s,j} */
ierr = VecSet(VecsSensiTemp[nadj],0);CHKERRQ(ierr);
ierr = VecMAXPY(VecsSensiTemp[nadj],s-i-1,w,&VecsDeltaLam[nadj*s+i+1]);CHKERRQ(ierr);
ierr = MatMultTranspose(J,VecsSensiTemp[nadj],VecsDeltaLam[nadj*s+i]);CHKERRQ(ierr);
ierr = VecScale(VecsDeltaLam[nadj*s+i],-h);CHKERRQ(ierr);
}
/* Stage values of mu */
if (ts->vecs_sensip) {
ierr = MatMultTranspose(ts->Jacprhs,VecsSensiTemp[nadj],VecDeltaMu);CHKERRQ(ierr);
if (b[i]) {
ierr = VecScale(VecDeltaMu,-h*b[i]);CHKERRQ(ierr);
if (quadts) {
ierr = MatDenseGetColumn(quadts->Jacprhs,nadj,&xarr);CHKERRQ(ierr);
ierr = VecPlaceArray(VecDRDPTransCol,xarr);CHKERRQ(ierr);
ierr = VecAXPY(VecDeltaMu,-h*b[i],VecDRDPTransCol);CHKERRQ(ierr);
ierr = VecResetArray(VecDRDPTransCol);CHKERRQ(ierr);
ierr = MatDenseRestoreColumn(quadts->Jacprhs,&xarr);CHKERRQ(ierr);
}
} else {
ierr = VecScale(VecDeltaMu,-h);CHKERRQ(ierr);
}
ierr = VecAXPY(ts->vecs_sensip[nadj],1.,VecDeltaMu);CHKERRQ(ierr); /* update sensip for each stage */
}
}
if (ts->vecs_sensi2 && ts->forward_solve) { /* 2nd-order adjoint, TLM mode has to be turned on */
/* Get w1 at t_{n+1} from TLM matrix */
ierr = MatDenseGetColumn(rk->MatsFwdStageSensip[i],0,&xarr);CHKERRQ(ierr);
ierr = VecPlaceArray(ts->vec_sensip_col,xarr);CHKERRQ(ierr);
/* lambda_s^T F_UU w_1 */
ierr = TSComputeRHSHessianProductFunctionUU(ts,rk->stage_time,Y[i],VecsSensiTemp,ts->vec_sensip_col,ts->vecs_guu);CHKERRQ(ierr);
if (quadts) {
/* R_UU w_1 */
ierr = TSComputeRHSHessianProductFunctionUU(quadts,rk->stage_time,Y[i],NULL,ts->vec_sensip_col,ts->vecs_guu);CHKERRQ(ierr);
}
if (ts->vecs_sensip) {
/* lambda_s^T F_UP w_2 */
ierr = TSComputeRHSHessianProductFunctionUP(ts,rk->stage_time,Y[i],VecsSensiTemp,ts->vec_dir,ts->vecs_gup);CHKERRQ(ierr);
if (quadts) {
/* R_UP w_2 */
ierr = TSComputeRHSHessianProductFunctionUP(quadts,rk->stage_time,Y[i],NULL,ts->vec_sensip_col,ts->vecs_gup);CHKERRQ(ierr);
}
}
if (ts->vecs_sensi2p) {
/* lambda_s^T F_PU w_1 */
ierr = TSComputeRHSHessianProductFunctionPU(ts,rk->stage_time,Y[i],VecsSensiTemp,ts->vec_sensip_col,ts->vecs_gpu);CHKERRQ(ierr);
/* lambda_s^T F_PP w_2 */
ierr = TSComputeRHSHessianProductFunctionPP(ts,rk->stage_time,Y[i],VecsSensiTemp,ts->vec_dir,ts->vecs_gpp);CHKERRQ(ierr);
if (b[i] && quadts) {
/* R_PU w_1 */
ierr = TSComputeRHSHessianProductFunctionPU(quadts,rk->stage_time,Y[i],NULL,ts->vec_sensip_col,ts->vecs_gpu);CHKERRQ(ierr);
/* R_PP w_2 */
ierr = TSComputeRHSHessianProductFunctionPP(quadts,rk->stage_time,Y[i],NULL,ts->vec_dir,ts->vecs_gpp);CHKERRQ(ierr);
}
}
ierr = VecResetArray(ts->vec_sensip_col);CHKERRQ(ierr);
ierr = MatDenseRestoreColumn(rk->MatsFwdStageSensip[i],&xarr);CHKERRQ(ierr);
for (nadj=0; nadj<ts->numcost; nadj++) {
/* Stage values of lambda */
if (b[i]) {
/* J_i^T*(Lambda_{n+1}+\sum_{j=i+1}^s a_{ji}/b_i*Lambda_{s,j} */
ierr = VecCopy(ts->vecs_sensi2[nadj],VecsSensi2Temp[nadj]);CHKERRQ(ierr);
ierr = VecMAXPY(VecsSensi2Temp[nadj],s-i-1,w,&VecsDeltaLam2[nadj*s+i+1]);CHKERRQ(ierr);
ierr = MatMultTranspose(J,VecsSensi2Temp[nadj],VecsDeltaLam2[nadj*s+i]);CHKERRQ(ierr);
ierr = VecScale(VecsDeltaLam2[nadj*s+i],-h*b[i]);CHKERRQ(ierr);
ierr = VecAXPY(VecsDeltaLam2[nadj*s+i],-h*b[i],ts->vecs_guu[nadj]);CHKERRQ(ierr);
if (ts->vecs_sensip) {
ierr = VecAXPY(VecsDeltaLam2[nadj*s+i],-h*b[i],ts->vecs_gup[nadj]);CHKERRQ(ierr);
}
} else {
/* \sum_{j=i+1}^s a_{ji}*Lambda_{s,j} */
ierr = VecSet(VecsDeltaLam2[nadj*s+i],0);CHKERRQ(ierr);
ierr = VecMAXPY(VecsSensi2Temp[nadj],s-i-1,w,&VecsDeltaLam2[nadj*s+i+1]);CHKERRQ(ierr);
ierr = MatMultTranspose(J,VecsSensi2Temp[nadj],VecsDeltaLam2[nadj*s+i]);CHKERRQ(ierr);
ierr = VecScale(VecsDeltaLam2[nadj*s+i],-h);CHKERRQ(ierr);
ierr = VecAXPY(VecsDeltaLam2[nadj*s+i],-h,ts->vecs_guu[nadj]);CHKERRQ(ierr);
if (ts->vecs_sensip) {
ierr = VecAXPY(VecsDeltaLam2[nadj*s+i],-h,ts->vecs_gup[nadj]);CHKERRQ(ierr);
}
}
if (ts->vecs_sensi2p) { /* 2nd-order adjoint for parameters */
ierr = MatMultTranspose(ts->Jacprhs,VecsSensi2Temp[nadj],VecDeltaMu2);CHKERRQ(ierr);
if (b[i]) {
ierr = VecScale(VecDeltaMu2,-h*b[i]);CHKERRQ(ierr);
ierr = VecAXPY(VecDeltaMu2,-h*b[i],ts->vecs_gpu[nadj]);CHKERRQ(ierr);
ierr = VecAXPY(VecDeltaMu2,-h*b[i],ts->vecs_gpp[nadj]);CHKERRQ(ierr);
} else {
ierr = VecScale(VecDeltaMu2,-h);CHKERRQ(ierr);
ierr = VecAXPY(VecDeltaMu2,-h,ts->vecs_gpu[nadj]);CHKERRQ(ierr);
ierr = VecAXPY(VecDeltaMu2,-h,ts->vecs_gpp[nadj]);CHKERRQ(ierr);
}
ierr = VecAXPY(ts->vecs_sensi2p[nadj],1,VecDeltaMu2);CHKERRQ(ierr); /* update sensi2p for each stage */
}
}
}
}
for (j=0; j<s; j++) w[j] = 1.0;
for (nadj=0; nadj<ts->numcost; nadj++) { /* no need to do this for mu's */
ierr = VecMAXPY(ts->vecs_sensi[nadj],s,w,&VecsDeltaLam[nadj*s]);CHKERRQ(ierr);
if (ts->vecs_sensi2) {
ierr = VecMAXPY(ts->vecs_sensi2[nadj],s,w,&VecsDeltaLam2[nadj*s]);CHKERRQ(ierr);
}
}
rk->status = TS_STEP_COMPLETE;
PetscFunctionReturn(0);
}
static PetscErrorCode TSAdjointReset_RK(TS ts)
{
TS_RK *rk = (TS_RK*)ts->data;
RKTableau tab = rk->tableau;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecDestroyVecs(tab->s*ts->numcost,&rk->VecsDeltaLam);CHKERRQ(ierr);
ierr = VecDestroyVecs(ts->numcost,&rk->VecsSensiTemp);CHKERRQ(ierr);
ierr = VecDestroy(&rk->VecDeltaMu);CHKERRQ(ierr);
ierr = VecDestroyVecs(tab->s*ts->numcost,&rk->VecsDeltaLam2);CHKERRQ(ierr);
ierr = VecDestroy(&rk->VecDeltaMu2);CHKERRQ(ierr);
ierr = VecDestroyVecs(ts->numcost,&rk->VecsSensi2Temp);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode TSInterpolate_RK(TS ts,PetscReal itime,Vec X)
{
TS_RK *rk = (TS_RK*)ts->data;
PetscInt s = rk->tableau->s,p = rk->tableau->p,i,j;
PetscReal h;
PetscReal tt,t;
PetscScalar *b;
const PetscReal *B = rk->tableau->binterp;
PetscErrorCode ierr;
PetscFunctionBegin;
if (!B) SETERRQ1(PetscObjectComm((PetscObject)ts),PETSC_ERR_SUP,"TSRK %s does not have an interpolation formula",rk->tableau->name);
switch (rk->status) {
case TS_STEP_INCOMPLETE:
case TS_STEP_PENDING:
h = ts->time_step;
t = (itime - ts->ptime)/h;
break;
case TS_STEP_COMPLETE:
h = ts->ptime - ts->ptime_prev;
t = (itime - ts->ptime)/h + 1; /* In the interval [0,1] */
break;
default: SETERRQ(PetscObjectComm((PetscObject)ts),PETSC_ERR_PLIB,"Invalid TSStepStatus");
}
ierr = PetscMalloc1(s,&b);CHKERRQ(ierr);
for (i=0; i<s; i++) b[i] = 0;
for (j=0,tt=t; j<p; j++,tt*=t) {
for (i=0; i<s; i++) {
b[i] += h * B[i*p+j] * tt;
}
}
ierr = VecCopy(rk->Y[0],X);CHKERRQ(ierr);
ierr = VecMAXPY(X,s,b,rk->YdotRHS);CHKERRQ(ierr);
ierr = PetscFree(b);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*------------------------------------------------------------*/
static PetscErrorCode TSRKTableauReset(TS ts)
{
TS_RK *rk = (TS_RK*)ts->data;
RKTableau tab = rk->tableau;
PetscErrorCode ierr;
PetscFunctionBegin;
if (!tab) PetscFunctionReturn(0);
ierr = PetscFree(rk->work);CHKERRQ(ierr);
ierr = VecDestroyVecs(tab->s,&rk->Y);CHKERRQ(ierr);
ierr = VecDestroyVecs(tab->s,&rk->YdotRHS);CHKERRQ(ierr);
ierr = VecDestroyVecs(tab->s*ts->numcost,&rk->VecsDeltaLam);CHKERRQ(ierr);
ierr = VecDestroyVecs(ts->numcost,&rk->VecsSensiTemp);CHKERRQ(ierr);
ierr = VecDestroy(&rk->VecDeltaMu);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode TSReset_RK(TS ts)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = TSRKTableauReset(ts);CHKERRQ(ierr);
if (ts->use_splitrhsfunction) {
ierr = PetscTryMethod(ts,"TSReset_RK_MultirateSplit_C",(TS),(ts));CHKERRQ(ierr);
} else {
ierr = PetscTryMethod(ts,"TSReset_RK_MultirateNonsplit_C",(TS),(ts));CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode DMCoarsenHook_TSRK(DM fine,DM coarse,void *ctx)
{
PetscFunctionBegin;
PetscFunctionReturn(0);
}
static PetscErrorCode DMRestrictHook_TSRK(DM fine,Mat restrct,Vec rscale,Mat inject,DM coarse,void *ctx)
{
PetscFunctionBegin;
PetscFunctionReturn(0);
}
static PetscErrorCode DMSubDomainHook_TSRK(DM dm,DM subdm,void *ctx)
{
PetscFunctionBegin;
PetscFunctionReturn(0);
}
static PetscErrorCode DMSubDomainRestrictHook_TSRK(DM dm,VecScatter gscat,VecScatter lscat,DM subdm,void *ctx)
{
PetscFunctionBegin;
PetscFunctionReturn(0);
}
static PetscErrorCode TSRKTableauSetUp(TS ts)
{
TS_RK *rk = (TS_RK*)ts->data;
RKTableau tab = rk->tableau;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscMalloc1(tab->s,&rk->work);CHKERRQ(ierr);
ierr = VecDuplicateVecs(ts->vec_sol,tab->s,&rk->Y);CHKERRQ(ierr);
ierr = VecDuplicateVecs(ts->vec_sol,tab->s,&rk->YdotRHS);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode TSSetUp_RK(TS ts)
{
TS quadts = ts->quadraturets;
PetscErrorCode ierr;
DM dm;
PetscFunctionBegin;
ierr = TSCheckImplicitTerm(ts);CHKERRQ(ierr);
ierr = TSRKTableauSetUp(ts);CHKERRQ(ierr);
if (quadts && ts->costintegralfwd) {
Mat Jquad;
ierr = TSGetRHSJacobian(quadts,&Jquad,NULL,NULL,NULL);CHKERRQ(ierr);
}
ierr = TSGetDM(ts,&dm);CHKERRQ(ierr);
ierr = DMCoarsenHookAdd(dm,DMCoarsenHook_TSRK,DMRestrictHook_TSRK,ts);CHKERRQ(ierr);
ierr = DMSubDomainHookAdd(dm,DMSubDomainHook_TSRK,DMSubDomainRestrictHook_TSRK,ts);CHKERRQ(ierr);
if (ts->use_splitrhsfunction) {
ierr = PetscTryMethod(ts,"TSSetUp_RK_MultirateSplit_C",(TS),(ts));CHKERRQ(ierr);
} else {
ierr = PetscTryMethod(ts,"TSSetUp_RK_MultirateNonsplit_C",(TS),(ts));CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode TSSetFromOptions_RK(PetscOptionItems *PetscOptionsObject,TS ts)
{
TS_RK *rk = (TS_RK*)ts->data;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscOptionsHead(PetscOptionsObject,"RK ODE solver options");CHKERRQ(ierr);
{
RKTableauLink link;
PetscInt count,choice;
PetscBool flg,use_multirate = PETSC_FALSE;
const char **namelist;
for (link=RKTableauList,count=0; link; link=link->next,count++) ;
ierr = PetscMalloc1(count,(char***)&namelist);CHKERRQ(ierr);
for (link=RKTableauList,count=0; link; link=link->next,count++) namelist[count] = link->tab.name;
ierr = PetscOptionsBool("-ts_rk_multirate","Use interpolation-based multirate RK method","TSRKSetMultirate",rk->use_multirate,&use_multirate,&flg);CHKERRQ(ierr);
if (flg) {
ierr = TSRKSetMultirate(ts,use_multirate);CHKERRQ(ierr);
}
ierr = PetscOptionsEList("-ts_rk_type","Family of RK method","TSRKSetType",(const char*const*)namelist,count,rk->tableau->name,&choice,&flg);CHKERRQ(ierr);
if (flg) {ierr = TSRKSetType(ts,namelist[choice]);CHKERRQ(ierr);}
ierr = PetscFree(namelist);CHKERRQ(ierr);
}
ierr = PetscOptionsTail();CHKERRQ(ierr);
ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)ts),NULL,"Multirate methods options","");CHKERRQ(ierr);
ierr = PetscOptionsInt("-ts_rk_dtratio","time step ratio between slow and fast","",rk->dtratio,&rk->dtratio,NULL);CHKERRQ(ierr);
ierr = PetscOptionsEnd();CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode TSView_RK(TS ts,PetscViewer viewer)
{
TS_RK *rk = (TS_RK*)ts->data;
PetscBool iascii;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr);
if (iascii) {
RKTableau tab = rk->tableau;
TSRKType rktype;
const PetscReal *c;
PetscInt s;
char buf[512];
PetscBool FSAL;
ierr = TSRKGetType(ts,&rktype);CHKERRQ(ierr);
ierr = TSRKGetTableau(ts,&s,NULL,NULL,&c,NULL,NULL,NULL,&FSAL);CHKERRQ(ierr);
ierr = PetscViewerASCIIPrintf(viewer," RK type %s\n",rktype);CHKERRQ(ierr);
ierr = PetscViewerASCIIPrintf(viewer," Order: %D\n",tab->order);CHKERRQ(ierr);
ierr = PetscViewerASCIIPrintf(viewer," FSAL property: %s\n",FSAL ? "yes" : "no");CHKERRQ(ierr);
ierr = PetscFormatRealArray(buf,sizeof(buf),"% 8.6f",s,c);CHKERRQ(ierr);
ierr = PetscViewerASCIIPrintf(viewer," Abscissa c = %s\n",buf);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode TSLoad_RK(TS ts,PetscViewer viewer)
{
PetscErrorCode ierr;
TSAdapt adapt;
PetscFunctionBegin;
ierr = TSGetAdapt(ts,&adapt);CHKERRQ(ierr);
ierr = TSAdaptLoad(adapt,viewer);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*@
TSRKGetOrder - Get the order of RK scheme
Not collective
Input Parameter:
. ts - timestepping context
Output Parameter:
. order - order of RK-scheme
Level: intermediate
.seealso: TSRKGetType()
@*/
PetscErrorCode TSRKGetOrder(TS ts,PetscInt *order)
{
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(ts,TS_CLASSID,1);
PetscValidIntPointer(order,2);
ierr = PetscUseMethod(ts,"TSRKGetOrder_C",(TS,PetscInt*),(ts,order));CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*@C
TSRKSetType - Set the type of RK scheme
Logically collective
Input Parameter:
+ ts - timestepping context
- rktype - type of RK-scheme
Options Database:
. -ts_rk_type - <1fe,2a,3,3bs,4,5f,5dp,5bs>
Level: intermediate
.seealso: TSRKGetType(), TSRK, TSRKType, TSRK1FE, TSRK2A, TSRK3, TSRK3BS, TSRK4, TSRK5F, TSRK5DP, TSRK5BS, TSRK6VR, TSRK7VR, TSRK8VR
@*/
PetscErrorCode TSRKSetType(TS ts,TSRKType rktype)
{
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(ts,TS_CLASSID,1);
PetscValidCharPointer(rktype,2);
ierr = PetscTryMethod(ts,"TSRKSetType_C",(TS,TSRKType),(ts,rktype));CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*@C
TSRKGetType - Get the type of RK scheme
Not collective
Input Parameter:
. ts - timestepping context
Output Parameter:
. rktype - type of RK-scheme
Level: intermediate
.seealso: TSRKSetType()
@*/
PetscErrorCode TSRKGetType(TS ts,TSRKType *rktype)
{
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(ts,TS_CLASSID,1);
ierr = PetscUseMethod(ts,"TSRKGetType_C",(TS,TSRKType*),(ts,rktype));CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode TSRKGetOrder_RK(TS ts,PetscInt *order)
{
TS_RK *rk = (TS_RK*)ts->data;
PetscFunctionBegin;
*order = rk->tableau->order;
PetscFunctionReturn(0);
}
static PetscErrorCode TSRKGetType_RK(TS ts,TSRKType *rktype)
{
TS_RK *rk = (TS_RK*)ts->data;
PetscFunctionBegin;
*rktype = rk->tableau->name;
PetscFunctionReturn(0);
}
static PetscErrorCode TSRKSetType_RK(TS ts,TSRKType rktype)
{
TS_RK *rk = (TS_RK*)ts->data;
PetscErrorCode ierr;
PetscBool match;
RKTableauLink link;
PetscFunctionBegin;
if (rk->tableau) {
ierr = PetscStrcmp(rk->tableau->name,rktype,&match);CHKERRQ(ierr);
if (match) PetscFunctionReturn(0);
}
for (link = RKTableauList; link; link=link->next) {
ierr = PetscStrcmp(link->tab.name,rktype,&match);CHKERRQ(ierr);
if (match) {
if (ts->setupcalled) {ierr = TSRKTableauReset(ts);CHKERRQ(ierr);}
rk->tableau = &link->tab;
if (ts->setupcalled) {ierr = TSRKTableauSetUp(ts);CHKERRQ(ierr);}
ts->default_adapt_type = rk->tableau->bembed ? TSADAPTBASIC : TSADAPTNONE;
PetscFunctionReturn(0);
}
}
SETERRQ1(PetscObjectComm((PetscObject)ts),PETSC_ERR_ARG_UNKNOWN_TYPE,"Could not find '%s'",rktype);
PetscFunctionReturn(0);
}
static PetscErrorCode TSGetStages_RK(TS ts,PetscInt *ns,Vec **Y)
{
TS_RK *rk = (TS_RK*)ts->data;
PetscFunctionBegin;
if (ns) *ns = rk->tableau->s;
if (Y) *Y = rk->Y;
PetscFunctionReturn(0);
}
static PetscErrorCode TSDestroy_RK(TS ts)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = TSReset_RK(ts);CHKERRQ(ierr);
if (ts->dm) {
ierr = DMCoarsenHookRemove(ts->dm,DMCoarsenHook_TSRK,DMRestrictHook_TSRK,ts);CHKERRQ(ierr);
ierr = DMSubDomainHookRemove(ts->dm,DMSubDomainHook_TSRK,DMSubDomainRestrictHook_TSRK,ts);CHKERRQ(ierr);
}
ierr = PetscFree(ts->data);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)ts,"TSRKGetOrder_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)ts,"TSRKGetType_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)ts,"TSRKSetType_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)ts,"TSRKGetTableau_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)ts,"TSRKSetMultirate_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)ts,"TSRKGetMultirate_C",NULL);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*
This defines the nonlinear equation that is to be solved with SNES
We do not need to solve the equation; we just use SNES to approximate the Jacobian
*/
static PetscErrorCode SNESTSFormFunction_RK(SNES snes,Vec x,Vec y,TS ts)
{
TS_RK *rk = (TS_RK*)ts->data;
DM dm,dmsave;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = SNESGetDM(snes,&dm);CHKERRQ(ierr);
/* DM monkey-business allows user code to call TSGetDM() inside of functions evaluated on levels of FAS */
dmsave = ts->dm;
ts->dm = dm;
ierr = TSComputeRHSFunction(ts,rk->stage_time,x,y);CHKERRQ(ierr);
ts->dm = dmsave;
PetscFunctionReturn(0);
}
static PetscErrorCode SNESTSFormJacobian_RK(SNES snes,Vec x,Mat A,Mat B,TS ts)
{
TS_RK *rk = (TS_RK*)ts->data;
DM dm,dmsave;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = SNESGetDM(snes,&dm);CHKERRQ(ierr);
dmsave = ts->dm;
ts->dm = dm;
ierr = TSComputeRHSJacobian(ts,rk->stage_time,x,A,B);CHKERRQ(ierr);
ts->dm = dmsave;
PetscFunctionReturn(0);
}
/*@C
TSRKSetMultirate - Use the interpolation-based multirate RK method
Logically collective
Input Parameter:
+ ts - timestepping context
- use_multirate - PETSC_TRUE enables the multirate RK method, sets the basic method to be RK2A and sets the ratio between slow stepsize and fast stepsize to be 2
Options Database:
. -ts_rk_multirate - <true,false>
Notes:
The multirate method requires interpolation. The default interpolation works for 1st- and 2nd- order RK, but not for high-order RKs except TSRK5DP which comes with the interpolation coeffcients (binterp).
Level: intermediate
.seealso: TSRKGetMultirate()
@*/
PetscErrorCode TSRKSetMultirate(TS ts,PetscBool use_multirate)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscTryMethod(ts,"TSRKSetMultirate_C",(TS,PetscBool),(ts,use_multirate));CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*@C
TSRKGetMultirate - Gets whether to Use the interpolation-based multirate RK method
Not collective
Input Parameter:
. ts - timestepping context
Output Parameter:
. use_multirate - PETSC_TRUE if the multirate RK method is enabled, PETSC_FALSE otherwise
Level: intermediate
.seealso: TSRKSetMultirate()
@*/
PetscErrorCode TSRKGetMultirate(TS ts,PetscBool *use_multirate)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscUseMethod(ts,"TSRKGetMultirate_C",(TS,PetscBool*),(ts,use_multirate));CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*MC
TSRK - ODE and DAE solver using Runge-Kutta schemes
The user should provide the right hand side of the equation
using TSSetRHSFunction().
Notes:
The default is TSRK3BS, it can be changed with TSRKSetType() or -ts_rk_type
Level: beginner
.seealso: TSCreate(), TS, TSSetType(), TSRKSetType(), TSRKGetType(), TSRK2D, TTSRK2E, TSRK3,
TSRK4, TSRK5, TSRKPRSSP2, TSRKBPR3, TSRKType, TSRKRegister(), TSRKSetMultirate(), TSRKGetMultirate()
M*/
PETSC_EXTERN PetscErrorCode TSCreate_RK(TS ts)
{
TS_RK *rk;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = TSRKInitializePackage();CHKERRQ(ierr);
ts->ops->reset = TSReset_RK;
ts->ops->destroy = TSDestroy_RK;
ts->ops->view = TSView_RK;
ts->ops->load = TSLoad_RK;
ts->ops->setup = TSSetUp_RK;
ts->ops->interpolate = TSInterpolate_RK;
ts->ops->step = TSStep_RK;
ts->ops->evaluatestep = TSEvaluateStep_RK;
ts->ops->rollback = TSRollBack_RK;
ts->ops->setfromoptions = TSSetFromOptions_RK;
ts->ops->getstages = TSGetStages_RK;
ts->ops->snesfunction = SNESTSFormFunction_RK;
ts->ops->snesjacobian = SNESTSFormJacobian_RK;
ts->ops->adjointintegral = TSAdjointCostIntegral_RK;
ts->ops->adjointsetup = TSAdjointSetUp_RK;
ts->ops->adjointstep = TSAdjointStep_RK;
ts->ops->adjointreset = TSAdjointReset_RK;
ts->ops->forwardintegral = TSForwardCostIntegral_RK;
ts->ops->forwardsetup = TSForwardSetUp_RK;
ts->ops->forwardreset = TSForwardReset_RK;
ts->ops->forwardstep = TSForwardStep_RK;
ts->ops->forwardgetstages= TSForwardGetStages_RK;
ierr = PetscNewLog(ts,&rk);CHKERRQ(ierr);
ts->data = (void*)rk;
ierr = PetscObjectComposeFunction((PetscObject)ts,"TSRKGetOrder_C",TSRKGetOrder_RK);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)ts,"TSRKGetType_C",TSRKGetType_RK);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)ts,"TSRKSetType_C",TSRKSetType_RK);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)ts,"TSRKGetTableau_C",TSRKGetTableau_RK);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)ts,"TSRKSetMultirate_C",TSRKSetMultirate_RK);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)ts,"TSRKGetMultirate_C",TSRKGetMultirate_RK);CHKERRQ(ierr);
ierr = TSRKSetType(ts,TSRKDefault);CHKERRQ(ierr);
rk->dtratio = 1;
PetscFunctionReturn(0);
}
| {
"language": "C"
} |
/* -----------------------------------------------------------------------------
*
* (c) The GHC Team, 1998-2009
*
* Rts settings.
*
* NOTE: assumes #include "ghcconfig.h"
*
* NB: THIS FILE IS INCLUDED IN NON-C CODE AND DATA! #defines only please.
*
* To understand the structure of the RTS headers, see the wiki:
* http://hackage.haskell.org/trac/ghc/wiki/Commentary/SourceTree/Includes
*
* ---------------------------------------------------------------------------*/
#ifndef RTS_CONFIG_H
#define RTS_CONFIG_H
#if defined(TICKY_TICKY) && defined(THREADED_RTS)
#error TICKY_TICKY is incompatible with THREADED_RTS
#endif
/*
* Whether the runtime system will use libbfd for debugging purposes.
*/
#if defined(DEBUG) && defined(HAVE_BFD_H) && defined(HAVE_LIBBFD) && !defined(_WIN32)
#define USING_LIBBFD 1
#endif
/* DEBUG implies TRACING and TICKY_TICKY
*/
#if defined(DEBUG)
#define TRACING
#define TICKY_TICKY
#endif
/* -----------------------------------------------------------------------------
Signals - supported on non-PAR versions of the runtime. See RtsSignals.h.
-------------------------------------------------------------------------- */
#define RTS_USER_SIGNALS 1
/* Profile spin locks */
#define PROF_SPIN
#endif /* RTS_CONFIG_H */
| {
"language": "C"
} |
/* ----------------------------------------------------------------------------
* SAM Software Package License
* ----------------------------------------------------------------------------
* Copyright (c) 2012, Atmel Corporation
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following condition is met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the disclaimer below.
*
* Atmel's name may not be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
* DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* ----------------------------------------------------------------------------
*/
#ifndef _SAM3U_PIOC_INSTANCE_
#define _SAM3U_PIOC_INSTANCE_
/* ========== Register definition for PIOC peripheral ========== */
#if (defined(__ASSEMBLY__) || defined(__IAR_SYSTEMS_ASM__))
#define REG_PIOC_PER (0x400E1000U) /**< \brief (PIOC) PIO Enable Register */
#define REG_PIOC_PDR (0x400E1004U) /**< \brief (PIOC) PIO Disable Register */
#define REG_PIOC_PSR (0x400E1008U) /**< \brief (PIOC) PIO Status Register */
#define REG_PIOC_OER (0x400E1010U) /**< \brief (PIOC) Output Enable Register */
#define REG_PIOC_ODR (0x400E1014U) /**< \brief (PIOC) Output Disable Register */
#define REG_PIOC_OSR (0x400E1018U) /**< \brief (PIOC) Output Status Register */
#define REG_PIOC_IFER (0x400E1020U) /**< \brief (PIOC) Glitch Input Filter Enable Register */
#define REG_PIOC_IFDR (0x400E1024U) /**< \brief (PIOC) Glitch Input Filter Disable Register */
#define REG_PIOC_IFSR (0x400E1028U) /**< \brief (PIOC) Glitch Input Filter Status Register */
#define REG_PIOC_SODR (0x400E1030U) /**< \brief (PIOC) Set Output Data Register */
#define REG_PIOC_CODR (0x400E1034U) /**< \brief (PIOC) Clear Output Data Register */
#define REG_PIOC_ODSR (0x400E1038U) /**< \brief (PIOC) Output Data Status Register */
#define REG_PIOC_PDSR (0x400E103CU) /**< \brief (PIOC) Pin Data Status Register */
#define REG_PIOC_IER (0x400E1040U) /**< \brief (PIOC) Interrupt Enable Register */
#define REG_PIOC_IDR (0x400E1044U) /**< \brief (PIOC) Interrupt Disable Register */
#define REG_PIOC_IMR (0x400E1048U) /**< \brief (PIOC) Interrupt Mask Register */
#define REG_PIOC_ISR (0x400E104CU) /**< \brief (PIOC) Interrupt Status Register */
#define REG_PIOC_MDER (0x400E1050U) /**< \brief (PIOC) Multi-driver Enable Register */
#define REG_PIOC_MDDR (0x400E1054U) /**< \brief (PIOC) Multi-driver Disable Register */
#define REG_PIOC_MDSR (0x400E1058U) /**< \brief (PIOC) Multi-driver Status Register */
#define REG_PIOC_PUDR (0x400E1060U) /**< \brief (PIOC) Pull-up Disable Register */
#define REG_PIOC_PUER (0x400E1064U) /**< \brief (PIOC) Pull-up Enable Register */
#define REG_PIOC_PUSR (0x400E1068U) /**< \brief (PIOC) Pad Pull-up Status Register */
#define REG_PIOC_ABSR (0x400E1070U) /**< \brief (PIOC) Peripheral AB Select Register */
#define REG_PIOC_SCIFSR (0x400E1080U) /**< \brief (PIOC) System Clock Glitch Input Filter Select Register */
#define REG_PIOC_DIFSR (0x400E1084U) /**< \brief (PIOC) Debouncing Input Filter Select Register */
#define REG_PIOC_IFDGSR (0x400E1088U) /**< \brief (PIOC) Glitch or Debouncing Input Filter Clock Selection Status Register */
#define REG_PIOC_SCDR (0x400E108CU) /**< \brief (PIOC) Slow Clock Divider Debouncing Register */
#define REG_PIOC_OWER (0x400E10A0U) /**< \brief (PIOC) Output Write Enable */
#define REG_PIOC_OWDR (0x400E10A4U) /**< \brief (PIOC) Output Write Disable */
#define REG_PIOC_OWSR (0x400E10A8U) /**< \brief (PIOC) Output Write Status Register */
#define REG_PIOC_AIMER (0x400E10B0U) /**< \brief (PIOC) Additional Interrupt Modes Enable Register */
#define REG_PIOC_AIMDR (0x400E10B4U) /**< \brief (PIOC) Additional Interrupt Modes Disables Register */
#define REG_PIOC_AIMMR (0x400E10B8U) /**< \brief (PIOC) Additional Interrupt Modes Mask Register */
#define REG_PIOC_ESR (0x400E10C0U) /**< \brief (PIOC) Edge Select Register */
#define REG_PIOC_LSR (0x400E10C4U) /**< \brief (PIOC) Level Select Register */
#define REG_PIOC_ELSR (0x400E10C8U) /**< \brief (PIOC) Edge/Level Status Register */
#define REG_PIOC_FELLSR (0x400E10D0U) /**< \brief (PIOC) Falling Edge/Low Level Select Register */
#define REG_PIOC_REHLSR (0x400E10D4U) /**< \brief (PIOC) Rising Edge/ High Level Select Register */
#define REG_PIOC_FRLHSR (0x400E10D8U) /**< \brief (PIOC) Fall/Rise - Low/High Status Register */
#define REG_PIOC_LOCKSR (0x400E10E0U) /**< \brief (PIOC) Lock Status */
#define REG_PIOC_WPMR (0x400E10E4U) /**< \brief (PIOC) Write Protect Mode Register */
#define REG_PIOC_WPSR (0x400E10E8U) /**< \brief (PIOC) Write Protect Status Register */
#else
#define REG_PIOC_PER (*(WoReg*)0x400E1000U) /**< \brief (PIOC) PIO Enable Register */
#define REG_PIOC_PDR (*(WoReg*)0x400E1004U) /**< \brief (PIOC) PIO Disable Register */
#define REG_PIOC_PSR (*(RoReg*)0x400E1008U) /**< \brief (PIOC) PIO Status Register */
#define REG_PIOC_OER (*(WoReg*)0x400E1010U) /**< \brief (PIOC) Output Enable Register */
#define REG_PIOC_ODR (*(WoReg*)0x400E1014U) /**< \brief (PIOC) Output Disable Register */
#define REG_PIOC_OSR (*(RoReg*)0x400E1018U) /**< \brief (PIOC) Output Status Register */
#define REG_PIOC_IFER (*(WoReg*)0x400E1020U) /**< \brief (PIOC) Glitch Input Filter Enable Register */
#define REG_PIOC_IFDR (*(WoReg*)0x400E1024U) /**< \brief (PIOC) Glitch Input Filter Disable Register */
#define REG_PIOC_IFSR (*(RoReg*)0x400E1028U) /**< \brief (PIOC) Glitch Input Filter Status Register */
#define REG_PIOC_SODR (*(WoReg*)0x400E1030U) /**< \brief (PIOC) Set Output Data Register */
#define REG_PIOC_CODR (*(WoReg*)0x400E1034U) /**< \brief (PIOC) Clear Output Data Register */
#define REG_PIOC_ODSR (*(RwReg*)0x400E1038U) /**< \brief (PIOC) Output Data Status Register */
#define REG_PIOC_PDSR (*(RoReg*)0x400E103CU) /**< \brief (PIOC) Pin Data Status Register */
#define REG_PIOC_IER (*(WoReg*)0x400E1040U) /**< \brief (PIOC) Interrupt Enable Register */
#define REG_PIOC_IDR (*(WoReg*)0x400E1044U) /**< \brief (PIOC) Interrupt Disable Register */
#define REG_PIOC_IMR (*(RoReg*)0x400E1048U) /**< \brief (PIOC) Interrupt Mask Register */
#define REG_PIOC_ISR (*(RoReg*)0x400E104CU) /**< \brief (PIOC) Interrupt Status Register */
#define REG_PIOC_MDER (*(WoReg*)0x400E1050U) /**< \brief (PIOC) Multi-driver Enable Register */
#define REG_PIOC_MDDR (*(WoReg*)0x400E1054U) /**< \brief (PIOC) Multi-driver Disable Register */
#define REG_PIOC_MDSR (*(RoReg*)0x400E1058U) /**< \brief (PIOC) Multi-driver Status Register */
#define REG_PIOC_PUDR (*(WoReg*)0x400E1060U) /**< \brief (PIOC) Pull-up Disable Register */
#define REG_PIOC_PUER (*(WoReg*)0x400E1064U) /**< \brief (PIOC) Pull-up Enable Register */
#define REG_PIOC_PUSR (*(RoReg*)0x400E1068U) /**< \brief (PIOC) Pad Pull-up Status Register */
#define REG_PIOC_ABSR (*(RwReg*)0x400E1070U) /**< \brief (PIOC) Peripheral AB Select Register */
#define REG_PIOC_SCIFSR (*(WoReg*)0x400E1080U) /**< \brief (PIOC) System Clock Glitch Input Filter Select Register */
#define REG_PIOC_DIFSR (*(WoReg*)0x400E1084U) /**< \brief (PIOC) Debouncing Input Filter Select Register */
#define REG_PIOC_IFDGSR (*(RoReg*)0x400E1088U) /**< \brief (PIOC) Glitch or Debouncing Input Filter Clock Selection Status Register */
#define REG_PIOC_SCDR (*(RwReg*)0x400E108CU) /**< \brief (PIOC) Slow Clock Divider Debouncing Register */
#define REG_PIOC_OWER (*(WoReg*)0x400E10A0U) /**< \brief (PIOC) Output Write Enable */
#define REG_PIOC_OWDR (*(WoReg*)0x400E10A4U) /**< \brief (PIOC) Output Write Disable */
#define REG_PIOC_OWSR (*(RoReg*)0x400E10A8U) /**< \brief (PIOC) Output Write Status Register */
#define REG_PIOC_AIMER (*(WoReg*)0x400E10B0U) /**< \brief (PIOC) Additional Interrupt Modes Enable Register */
#define REG_PIOC_AIMDR (*(WoReg*)0x400E10B4U) /**< \brief (PIOC) Additional Interrupt Modes Disables Register */
#define REG_PIOC_AIMMR (*(RoReg*)0x400E10B8U) /**< \brief (PIOC) Additional Interrupt Modes Mask Register */
#define REG_PIOC_ESR (*(WoReg*)0x400E10C0U) /**< \brief (PIOC) Edge Select Register */
#define REG_PIOC_LSR (*(WoReg*)0x400E10C4U) /**< \brief (PIOC) Level Select Register */
#define REG_PIOC_ELSR (*(RoReg*)0x400E10C8U) /**< \brief (PIOC) Edge/Level Status Register */
#define REG_PIOC_FELLSR (*(WoReg*)0x400E10D0U) /**< \brief (PIOC) Falling Edge/Low Level Select Register */
#define REG_PIOC_REHLSR (*(WoReg*)0x400E10D4U) /**< \brief (PIOC) Rising Edge/ High Level Select Register */
#define REG_PIOC_FRLHSR (*(RoReg*)0x400E10D8U) /**< \brief (PIOC) Fall/Rise - Low/High Status Register */
#define REG_PIOC_LOCKSR (*(RoReg*)0x400E10E0U) /**< \brief (PIOC) Lock Status */
#define REG_PIOC_WPMR (*(RwReg*)0x400E10E4U) /**< \brief (PIOC) Write Protect Mode Register */
#define REG_PIOC_WPSR (*(RoReg*)0x400E10E8U) /**< \brief (PIOC) Write Protect Status Register */
#endif /* (defined(__ASSEMBLY__) || defined(__IAR_SYSTEMS_ASM__)) */
#endif /* _SAM3U_PIOC_INSTANCE_ */
| {
"language": "C"
} |
/* Copyright (c) 1997-2002 Miller Puckette and others.
* For information on usage and redistribution, and for a DISCLAIMER OF ALL
* WARRANTIES, see the file, "LICENSE.txt," in this distribution. */
/*
Routines to read and write canvases to files:
canvas_savetofile() writes a root canvas to a "pd" file. (Reading "pd" files
is done simply by passing the contents to the pd message interpreter.)
Alternatively, the glist_read() and glist_write() routines read and write
"data" from and to files (reading reads into an existing canvas), using a
file format as in the dialog window for data.
*/
#include <stdlib.h>
#include <stdio.h>
#include "m_pd.h"
#include "g_canvas.h"
#include <string.h>
/* object to assist in saving state by abstractions */
static t_class *savestate_class;
typedef struct _savestate
{
t_object x_obj;
t_outlet *x_stateout;
t_outlet *x_bangout;
t_binbuf *x_savetobuf;
} t_savestate;
static void *savestate_new(void)
{
t_savestate *x = (t_savestate *)pd_new(savestate_class);
x->x_stateout = outlet_new(&x->x_obj, &s_list);
x->x_bangout = outlet_new(&x->x_obj, &s_bang);
x->x_savetobuf = 0;
return (x);
}
/* call this when the owning abstraction's parent patch is saved so we
can add state-restoring messages to binbuf */
static void savestate_doit(t_savestate *x, t_binbuf *b)
{
x->x_savetobuf = b;
outlet_bang(x->x_bangout);
x->x_savetobuf = 0;
}
/* called by abstraction in response to savestate_doit(); lists received
here are added to the parent patch's save buffer after the line that will
create the abstraction, addressed to "#A" which will be this patch after
it is recreated by reopening the parent patch, pasting, or "undo". */
static void savestate_list(t_savestate *x, t_symbol *s, int argc, t_atom *argv)
{
if (x->x_savetobuf)
{
binbuf_addv(x->x_savetobuf, "ss", gensym("#A"), gensym("saved"));
binbuf_add(x->x_savetobuf, argc, argv);
binbuf_addv(x->x_savetobuf, ";");
}
else pd_error(x, "savestate: ignoring message sent when not saving parent");
}
static void savestate_setup(void)
{
savestate_class = class_new(gensym("savestate"),
(t_newmethod)savestate_new, 0, sizeof(t_savestate), 0, 0);
class_addlist(savestate_class, savestate_list);
}
void canvas_statesavers_doit(t_glist *x, t_binbuf *b)
{
t_gobj *g;
for (g = x->gl_list; g; g = g->g_next)
if (g->g_pd == savestate_class)
savestate_doit((t_savestate *)g, b);
else if (g->g_pd == canvas_class && !canvas_isabstraction((t_canvas *)g))
canvas_statesavers_doit((t_glist *)g, b);
}
void canvas_saved(t_glist *x, t_symbol *s, int argc, t_atom *argv)
{
t_gobj *g;
for (g = x->gl_list; g; g = g->g_next)
if (g->g_pd == savestate_class)
outlet_list(((t_savestate *)g)->x_stateout, 0, argc, argv);
else if (g->g_pd == canvas_class && !canvas_isabstraction((t_canvas *)g))
canvas_saved((t_glist *)g, s, argc, argv);
}
static t_class *declare_class;
void canvas_savedeclarationsto(t_canvas *x, t_binbuf *b);
/* the following routines read "scalars" from a file into a canvas. */
static int canvas_scanbinbuf(int natoms, t_atom *vec, int *p_indexout,
int *p_next)
{
int i;
int indexwas = *p_next;
*p_indexout = indexwas;
if (indexwas >= natoms)
return (0);
for (i = indexwas; i < natoms && vec[i].a_type != A_SEMI; i++)
;
if (i >= natoms)
*p_next = i;
else *p_next = i + 1;
return (i - indexwas);
}
int canvas_readscalar(t_glist *x, int natoms, t_atom *vec,
int *p_nextmsg, int selectit);
static void canvas_readerror(int natoms, t_atom *vec, int message,
int nline, char *s)
{
error("%s", s);
startpost("line was:");
postatom(nline, vec + message);
endpost();
}
/* fill in the contents of the scalar into the vector w. */
static void glist_readatoms(t_glist *x, int natoms, t_atom *vec,
int *p_nextmsg, t_symbol *templatesym, t_word *w, int argc, t_atom *argv)
{
int message, n, i;
t_template *template = template_findbyname(templatesym);
if (!template)
{
error("%s: no such template", templatesym->s_name);
*p_nextmsg = natoms;
return;
}
word_restore(w, template, argc, argv);
n = template->t_n;
for (i = 0; i < n; i++)
{
if (template->t_vec[i].ds_type == DT_ARRAY)
{
t_array *a = w[i].w_array;
int elemsize = a->a_elemsize, nitems = 0;
t_symbol *arraytemplatesym = template->t_vec[i].ds_arraytemplate;
t_template *arraytemplate =
template_findbyname(arraytemplatesym);
if (!arraytemplate)
{
error("%s: no such template", arraytemplatesym->s_name);
}
else while (1)
{
t_word *element;
int nline = canvas_scanbinbuf(natoms, vec, &message, p_nextmsg);
/* empty line terminates array */
if (!nline)
break;
array_resize(a, nitems + 1);
element = (t_word *)(((char *)a->a_vec) +
nitems * elemsize);
glist_readatoms(x, natoms, vec, p_nextmsg, arraytemplatesym,
element, nline, vec + message);
nitems++;
}
}
else if (template->t_vec[i].ds_type == DT_TEXT)
{
t_binbuf *z = binbuf_new();
int first = *p_nextmsg, last;
for (last = first; last < natoms && vec[last].a_type != A_SEMI;
last++);
binbuf_restore(z, last-first, vec+first);
binbuf_add(w[i].w_binbuf, binbuf_getnatom(z), binbuf_getvec(z));
binbuf_free(z);
last++;
if (last > natoms) last = natoms;
*p_nextmsg = last;
}
}
}
int canvas_readscalar(t_glist *x, int natoms, t_atom *vec,
int *p_nextmsg, int selectit)
{
int message, nline;
t_template *template;
t_symbol *templatesym;
t_scalar *sc;
int nextmsg = *p_nextmsg;
int wasvis = glist_isvisible(x);
if (nextmsg >= natoms || vec[nextmsg].a_type != A_SYMBOL)
{
if (nextmsg < natoms)
post("stopping early: type %d", vec[nextmsg].a_type);
*p_nextmsg = natoms;
return (0);
}
templatesym = canvas_makebindsym(vec[nextmsg].a_w.w_symbol);
*p_nextmsg = nextmsg + 1;
if (!(template = template_findbyname(templatesym)))
{
error("canvas_read: %s: no such template", templatesym->s_name);
*p_nextmsg = natoms;
return (0);
}
sc = scalar_new(x, templatesym);
if (!sc)
{
error("couldn't create scalar \"%s\"", templatesym->s_name);
*p_nextmsg = natoms;
return (0);
}
if (wasvis)
{
/* temporarily lie about vis flag while this is built */
glist_getcanvas(x)->gl_mapped = 0;
}
glist_add(x, &sc->sc_gobj);
nline = canvas_scanbinbuf(natoms, vec, &message, p_nextmsg);
glist_readatoms(x, natoms, vec, p_nextmsg, templatesym, sc->sc_vec,
nline, vec + message);
if (wasvis)
{
/* reset vis flag as before */
glist_getcanvas(x)->gl_mapped = 1;
gobj_vis(&sc->sc_gobj, x, 1);
}
if (selectit)
{
glist_select(x, &sc->sc_gobj);
}
return (1);
}
void glist_readfrombinbuf(t_glist *x, const t_binbuf *b, const char *filename, int selectem)
{
t_canvas *canvas = glist_getcanvas(x);
int natoms, nline, message, nextmsg = 0;
t_atom *vec;
natoms = binbuf_getnatom(b);
vec = binbuf_getvec(b);
/* check for file type */
nline = canvas_scanbinbuf(natoms, vec, &message, &nextmsg);
if (nline != 1 && vec[message].a_type != A_SYMBOL &&
strcmp(vec[message].a_w.w_symbol->s_name, "data"))
{
pd_error(x, "%s: file apparently of wrong type", filename);
return;
}
/* read in templates and check for consistency */
while (1)
{
t_template *newtemplate, *existtemplate;
t_symbol *templatesym;
t_atom *templateargs = getbytes(0);
int ntemplateargs = 0, newnargs;
nline = canvas_scanbinbuf(natoms, vec, &message, &nextmsg);
if (nline < 2)
{
t_freebytes(templateargs, sizeof (*templateargs) * ntemplateargs);
break;
}
else if (nline > 2)
canvas_readerror(natoms, vec, message, nline,
"extra items ignored");
else if (vec[message].a_type != A_SYMBOL ||
strcmp(vec[message].a_w.w_symbol->s_name, "template") ||
vec[message + 1].a_type != A_SYMBOL)
{
canvas_readerror(natoms, vec, message, nline,
"bad template header");
continue;
}
templatesym = canvas_makebindsym(vec[message + 1].a_w.w_symbol);
while (1)
{
nline = canvas_scanbinbuf(natoms, vec, &message, &nextmsg);
if (nline != 2 && nline != 3)
break;
newnargs = ntemplateargs + nline;
templateargs = (t_atom *)t_resizebytes(templateargs,
sizeof(*templateargs) * ntemplateargs,
sizeof(*templateargs) * newnargs);
templateargs[ntemplateargs] = vec[message];
templateargs[ntemplateargs + 1] = vec[message + 1];
if (nline == 3)
templateargs[ntemplateargs + 2] = vec[message + 2];
ntemplateargs = newnargs;
}
if (!(existtemplate = template_findbyname(templatesym)))
{
error("%s: template not found in current patch",
templatesym->s_name);
t_freebytes(templateargs, sizeof (*templateargs) * ntemplateargs);
return;
}
newtemplate = template_new(templatesym, ntemplateargs, templateargs);
t_freebytes(templateargs, sizeof (*templateargs) * ntemplateargs);
if (!template_match(existtemplate, newtemplate))
{
error("%s: template doesn't match current one",
templatesym->s_name);
pd_free(&newtemplate->t_pdobj);
return;
}
pd_free(&newtemplate->t_pdobj);
}
while (nextmsg < natoms)
{
canvas_readscalar(x, natoms, vec, &nextmsg, selectem);
}
}
static void glist_doread(t_glist *x, t_symbol *filename, t_symbol *format,
int clearme)
{
t_binbuf *b = binbuf_new();
t_canvas *canvas = glist_getcanvas(x);
int wasvis = glist_isvisible(canvas);
int cr = 0;
if (!strcmp(format->s_name, "cr"))
cr = 1;
else if (*format->s_name)
error("qlist_read: unknown flag: %s", format->s_name);
if (binbuf_read_via_canvas(b, filename->s_name, canvas, cr))
{
pd_error(x, "read failed");
binbuf_free(b);
return;
}
if (wasvis)
canvas_vis(canvas, 0);
if (clearme)
glist_clear(x);
glist_readfrombinbuf(x, b, filename->s_name, 0);
if (wasvis)
canvas_vis(canvas, 1);
binbuf_free(b);
}
void glist_read(t_glist *x, t_symbol *filename, t_symbol *format)
{
glist_doread(x, filename, format, 1);
}
void glist_mergefile(t_glist *x, t_symbol *filename, t_symbol *format)
{
glist_doread(x, filename, format, 0);
}
/* read text from a "properties" window, called from a gfxstub set
up in scalar_properties(). We try to restore the object; if successful
we either copy the data from the new scalar to the old one in place
(if their templates match) or else delete the old scalar and put the new
thing in its place on the list. */
void canvas_dataproperties(t_canvas *x, t_scalar *sc, t_binbuf *b)
{
int ntotal, nnew, scindex;
t_gobj *y, *y2 = 0, *newone, *oldone = 0;
t_template *template;
glist_noselect(x);
for (y = x->gl_list, ntotal = 0, scindex = -1; y; y = y->g_next)
{
if (y == &sc->sc_gobj)
scindex = ntotal, oldone = y;
ntotal++;
}
if (scindex == -1)
{
error("data_properties: scalar disappeared");
return;
}
glist_readfrombinbuf(x, b, "properties dialog", 0);
newone = 0;
/* take the new object off the list */
if (ntotal)
{
for (y = x->gl_list, nnew = 1; (y2 = y->g_next);
y = y2, nnew++)
if (nnew == ntotal)
{
newone = y2;
gobj_vis(newone, x, 0);
y->g_next = y2->g_next;
break;
}
}
else gobj_vis((newone = x->gl_list), x, 0), x->gl_list = newone->g_next;
if (!newone)
error("couldn't update properties (perhaps a format problem?)");
else if (!oldone)
bug("data_properties: couldn't find old element");
else if (newone->g_pd == scalar_class && oldone->g_pd == scalar_class
&& ((t_scalar *)newone)->sc_template ==
((t_scalar *)oldone)->sc_template
&& (template = template_findbyname(((t_scalar *)newone)->sc_template)))
{
/* swap new one with old one; then delete new one */
int i;
for (i = 0; i < template->t_n; i++)
{
t_word w = ((t_scalar *)newone)->sc_vec[i];
((t_scalar *)newone)->sc_vec[i] = ((t_scalar *)oldone)->sc_vec[i];
((t_scalar *)oldone)->sc_vec[i] = w;
}
pd_free(&newone->g_pd);
if (glist_isvisible(x))
{
gobj_vis(oldone, x, 0);
gobj_vis(oldone, x, 1);
}
}
else
{
/* delete old one; put new one where the old one was on glist */
glist_delete(x, oldone);
if (scindex > 0)
{
for (y = x->gl_list, nnew = 1; y;
y = y->g_next, nnew++)
if (nnew == scindex || !y->g_next)
{
newone->g_next = y->g_next;
y->g_next = newone;
goto didit;
}
bug("data_properties: can't reinsert");
}
else newone->g_next = x->gl_list, x->gl_list = newone;
}
didit:
;
}
/* ----------- routines to write data to a binbuf ----------- */
void canvas_doaddtemplate(t_symbol *templatesym,
int *p_ntemplates, t_symbol ***p_templatevec)
{
int n = *p_ntemplates, i;
t_symbol **templatevec = *p_templatevec;
for (i = 0; i < n; i++)
if (templatevec[i] == templatesym)
return;
templatevec = (t_symbol **)t_resizebytes(templatevec,
n * sizeof(*templatevec), (n+1) * sizeof(*templatevec));
templatevec[n] = templatesym;
*p_templatevec = templatevec;
*p_ntemplates = n+1;
}
static void glist_writelist(t_gobj *y, t_binbuf *b);
void binbuf_savetext(t_binbuf *bfrom, t_binbuf *bto);
void canvas_writescalar(t_symbol *templatesym, t_word *w, t_binbuf *b,
int amarrayelement)
{
t_template *template = template_findbyname(templatesym);
t_atom *a = (t_atom *)t_getbytes(0);
int i, n = template?(template->t_n):0, natom = 0;
if (!amarrayelement)
{
t_atom templatename;
SETSYMBOL(&templatename, gensym(templatesym->s_name + 3));
binbuf_add(b, 1, &templatename);
}
if (!template)
bug("canvas_writescalar");
/* write the atoms (floats and symbols) */
for (i = 0; i < n; i++)
{
if (template->t_vec[i].ds_type == DT_FLOAT ||
template->t_vec[i].ds_type == DT_SYMBOL)
{
a = (t_atom *)t_resizebytes(a,
natom * sizeof(*a), (natom + 1) * sizeof (*a));
if (template->t_vec[i].ds_type == DT_FLOAT)
SETFLOAT(a + natom, w[i].w_float);
else SETSYMBOL(a + natom, w[i].w_symbol);
natom++;
}
}
/* array elements have to have at least something */
if (natom == 0 && amarrayelement)
SETSYMBOL(a + natom, &s_bang), natom++;
binbuf_add(b, natom, a);
binbuf_addsemi(b);
t_freebytes(a, natom * sizeof(*a));
for (i = 0; i < n; i++)
{
if (template->t_vec[i].ds_type == DT_ARRAY)
{
int j;
t_array *a = w[i].w_array;
int elemsize = a->a_elemsize, nitems = a->a_n;
t_symbol *arraytemplatesym = template->t_vec[i].ds_arraytemplate;
for (j = 0; j < nitems; j++)
canvas_writescalar(arraytemplatesym,
(t_word *)(((char *)a->a_vec) + elemsize * j), b, 1);
binbuf_addsemi(b);
}
else if (template->t_vec[i].ds_type == DT_TEXT)
binbuf_savetext(w[i].w_binbuf, b);
}
}
static void glist_writelist(t_gobj *y, t_binbuf *b)
{
for (; y; y = y->g_next)
{
if (pd_class(&y->g_pd) == scalar_class)
{
canvas_writescalar(((t_scalar *)y)->sc_template,
((t_scalar *)y)->sc_vec, b, 0);
}
}
}
/* ------------ routines to write out templates for data ------- */
static void canvas_addtemplatesforlist(t_gobj *y,
int *p_ntemplates, t_symbol ***p_templatevec);
static void canvas_addtemplatesforscalar(t_symbol *templatesym,
t_word *w, int *p_ntemplates, t_symbol ***p_templatevec)
{
t_dataslot *ds;
int i;
t_template *template = template_findbyname(templatesym);
canvas_doaddtemplate(templatesym, p_ntemplates, p_templatevec);
if (!template)
bug("canvas_addtemplatesforscalar");
else for (ds = template->t_vec, i = template->t_n; i--; ds++, w++)
{
if (ds->ds_type == DT_ARRAY)
{
int j;
t_array *a = w->w_array;
int elemsize = a->a_elemsize, nitems = a->a_n;
t_symbol *arraytemplatesym = ds->ds_arraytemplate;
canvas_doaddtemplate(arraytemplatesym, p_ntemplates, p_templatevec);
for (j = 0; j < nitems; j++)
canvas_addtemplatesforscalar(arraytemplatesym,
(t_word *)(((char *)a->a_vec) + elemsize * j),
p_ntemplates, p_templatevec);
}
}
}
static void canvas_addtemplatesforlist(t_gobj *y,
int *p_ntemplates, t_symbol ***p_templatevec)
{
for (; y; y = y->g_next)
{
if (pd_class(&y->g_pd) == scalar_class)
{
canvas_addtemplatesforscalar(((t_scalar *)y)->sc_template,
((t_scalar *)y)->sc_vec, p_ntemplates, p_templatevec);
}
}
}
/* write all "scalars" in a glist to a binbuf. */
t_binbuf *glist_writetobinbuf(t_glist *x, int wholething)
{
int i;
t_symbol **templatevec = getbytes(0);
int ntemplates = 0;
t_gobj *y;
t_binbuf *b = binbuf_new();
for (y = x->gl_list; y; y = y->g_next)
{
if ((pd_class(&y->g_pd) == scalar_class) &&
(wholething || glist_isselected(x, y)))
{
canvas_addtemplatesforscalar(((t_scalar *)y)->sc_template,
((t_scalar *)y)->sc_vec, &ntemplates, &templatevec);
}
}
binbuf_addv(b, "s;", gensym("data"));
for (i = 0; i < ntemplates; i++)
{
t_template *template = template_findbyname(templatevec[i]);
int j, m = template->t_n;
/* drop "pd-" prefix from template symbol to print it: */
binbuf_addv(b, "ss;", gensym("template"),
gensym(templatevec[i]->s_name + 3));
for (j = 0; j < m; j++)
{
t_symbol *type;
switch (template->t_vec[j].ds_type)
{
case DT_FLOAT: type = &s_float; break;
case DT_SYMBOL: type = &s_symbol; break;
case DT_ARRAY: type = gensym("array"); break;
case DT_TEXT: type = &s_list; break;
default: type = &s_float; bug("canvas_write");
}
if (template->t_vec[j].ds_type == DT_ARRAY)
binbuf_addv(b, "sss;", type, template->t_vec[j].ds_name,
gensym(template->t_vec[j].ds_arraytemplate->s_name + 3));
else binbuf_addv(b, "ss;", type, template->t_vec[j].ds_name);
}
binbuf_addsemi(b);
}
binbuf_addsemi(b);
/* now write out the objects themselves */
for (y = x->gl_list; y; y = y->g_next)
{
if ((pd_class(&y->g_pd) == scalar_class) &&
(wholething || glist_isselected(x, y)))
{
canvas_writescalar(((t_scalar *)y)->sc_template,
((t_scalar *)y)->sc_vec, b, 0);
}
}
t_freebytes(templatevec, ntemplates*sizeof(*templatevec));
return (b);
}
static void glist_write(t_glist *x, t_symbol *filename, t_symbol *format)
{
int cr = 0;
t_binbuf *b;
char buf[MAXPDSTRING];
t_canvas *canvas = glist_getcanvas(x);
canvas_makefilename(canvas, filename->s_name, buf, MAXPDSTRING);
if (!strcmp(format->s_name, "cr"))
cr = 1;
else if (*format->s_name)
error("qlist_read: unknown flag: %s", format->s_name);
b = glist_writetobinbuf(x, 1);
if (b)
{
if (binbuf_write(b, buf, "", cr))
error("%s: write failed", filename->s_name);
binbuf_free(b);
}
}
/* ------ routines to save and restore canvases (patches) recursively. ----*/
typedef void (*t_zoomfn)(void *x, t_floatarg arg1);
/* save to a binbuf, called recursively; cf. canvas_savetofile() which
saves the document, and is only called on root canvases. */
static void canvas_saveto(t_canvas *x, t_binbuf *b)
{
t_gobj *y;
t_linetraverser t;
t_outconnect *oc;
/* subpatch */
if (x->gl_owner && !x->gl_env)
{
/* have to go to original binbuf to find out how we were named. */
t_binbuf *bz = binbuf_new();
t_symbol *patchsym;
binbuf_addbinbuf(bz, x->gl_obj.ob_binbuf);
patchsym = atom_getsymbolarg(1, binbuf_getnatom(bz), binbuf_getvec(bz));
binbuf_free(bz);
binbuf_addv(b, "ssiiiisi;", gensym("#N"), gensym("canvas"),
(int)(x->gl_screenx1),
(int)(x->gl_screeny1),
(int)(x->gl_screenx2 - x->gl_screenx1),
(int)(x->gl_screeny2 - x->gl_screeny1),
(patchsym != &s_ ? patchsym: gensym("(subpatch)")),
x->gl_mapped);
}
/* root or abstraction */
else
{
binbuf_addv(b, "ssiiiii;", gensym("#N"), gensym("canvas"),
(int)(x->gl_screenx1),
(int)(x->gl_screeny1),
(int)(x->gl_screenx2 - x->gl_screenx1),
(int)(x->gl_screeny2 - x->gl_screeny1),
(int)x->gl_font);
canvas_savedeclarationsto(x, b);
}
for (y = x->gl_list; y; y = y->g_next)
gobj_save(y, b);
linetraverser_start(&t, x);
while ((oc = linetraverser_next(&t)))
{
int srcno = canvas_getindex(x, &t.tr_ob->ob_g);
int sinkno = canvas_getindex(x, &t.tr_ob2->ob_g);
binbuf_addv(b, "ssiiii;", gensym("#X"), gensym("connect"),
srcno, t.tr_outno, sinkno, t.tr_inno);
}
/* unless everything is the default (as in ordinary subpatches)
print out a "coords" message to set up the coordinate systems */
if (x->gl_isgraph || x->gl_x1 || x->gl_y1 ||
x->gl_x2 != 1 || x->gl_y2 != 1 || x->gl_pixwidth || x->gl_pixheight)
{
if (x->gl_isgraph && x->gl_goprect)
/* if we have a graph-on-parent rectangle, we're new style.
The format is arranged so
that old versions of Pd can at least do something with it. */
binbuf_addv(b, "ssfffffffff;", gensym("#X"), gensym("coords"),
x->gl_x1, x->gl_y1,
x->gl_x2, x->gl_y2,
(t_float)x->gl_pixwidth, (t_float)x->gl_pixheight,
(t_float)((x->gl_hidetext)?2.:1.),
(t_float)x->gl_xmargin, (t_float)x->gl_ymargin);
/* otherwise write in 0.38-compatible form */
else binbuf_addv(b, "ssfffffff;", gensym("#X"), gensym("coords"),
x->gl_x1, x->gl_y1,
x->gl_x2, x->gl_y2,
(t_float)x->gl_pixwidth, (t_float)x->gl_pixheight,
(t_float)x->gl_isgraph);
}
}
/* call this recursively to collect all the template names for
a canvas or for the selection. */
static void canvas_collecttemplatesfor(t_canvas *x, int *ntemplatesp,
t_symbol ***templatevecp, int wholething)
{
t_gobj *y;
for (y = x->gl_list; y; y = y->g_next)
{
if ((pd_class(&y->g_pd) == scalar_class) &&
(wholething || glist_isselected(x, y)))
canvas_addtemplatesforscalar(((t_scalar *)y)->sc_template,
((t_scalar *)y)->sc_vec, ntemplatesp, templatevecp);
else if ((pd_class(&y->g_pd) == canvas_class) &&
(wholething || glist_isselected(x, y)))
canvas_collecttemplatesfor((t_canvas *)y,
ntemplatesp, templatevecp, 1);
}
}
/* save the templates needed by a canvas to a binbuf. */
static void canvas_savetemplatesto(t_canvas *x, t_binbuf *b, int wholething)
{
t_symbol **templatevec = getbytes(0);
int i, ntemplates = 0;
canvas_collecttemplatesfor(x, &ntemplates, &templatevec, wholething);
for (i = 0; i < ntemplates; i++)
{
t_template *template = template_findbyname(templatevec[i]);
int j, m;
if (!template)
{
bug("canvas_savetemplatesto");
continue;
}
m = template->t_n;
/* drop "pd-" prefix from template symbol to print */
binbuf_addv(b, "sss", &s__N, gensym("struct"),
gensym(templatevec[i]->s_name + 3));
for (j = 0; j < m; j++)
{
t_symbol *type;
switch (template->t_vec[j].ds_type)
{
case DT_FLOAT: type = &s_float; break;
case DT_SYMBOL: type = &s_symbol; break;
case DT_ARRAY: type = gensym("array"); break;
case DT_TEXT: type = gensym("text"); break;
default: type = &s_float; bug("canvas_write");
}
if (template->t_vec[j].ds_type == DT_ARRAY)
binbuf_addv(b, "sss", type, template->t_vec[j].ds_name,
gensym(template->t_vec[j].ds_arraytemplate->s_name + 3));
else binbuf_addv(b, "ss", type, template->t_vec[j].ds_name);
}
binbuf_addsemi(b);
}
freebytes(templatevec, ntemplates * sizeof(*templatevec));
}
void canvas_reload(t_symbol *name, t_symbol *dir, t_glist *except);
/* save a "root" canvas to a file; cf. canvas_saveto() which saves the
body (and which is called recursively.) */
static void canvas_savetofile(t_canvas *x, t_symbol *filename, t_symbol *dir,
float fdestroy)
{
t_binbuf *b = binbuf_new();
canvas_savetemplatesto(x, b, 1);
canvas_saveto(x, b);
if (binbuf_write(b, filename->s_name, dir->s_name, 0)) sys_ouch();
else
{
/* if not an abstraction, reset title bar and directory */
if (!x->gl_owner)
{
canvas_rename(x, filename, dir);
/* update window list in case Save As changed the window name */
canvas_updatewindowlist();
}
post("saved to: %s/%s", dir->s_name, filename->s_name);
canvas_dirty(x, 0);
canvas_reload(filename, dir, x);
if (fdestroy != 0)
vmess(&x->gl_pd, gensym("menuclose"), "f", 1.);
}
binbuf_free(b);
}
static void canvas_menusaveas(t_canvas *x, float fdestroy)
{
t_canvas *x2 = canvas_getrootfor(x);
sys_vgui("pdtk_canvas_saveas .x%lx {%s} {%s} %d\n", x2,
x2->gl_name->s_name, canvas_getdir(x2)->s_name, (fdestroy != 0));
}
static void canvas_menusave(t_canvas *x, float fdestroy)
{
t_canvas *x2 = canvas_getrootfor(x);
const char *name = x2->gl_name->s_name;
if (*name && strncmp(name, "Untitled", 8)
&& (strlen(name) < 4 || strcmp(name + strlen(name)-4, ".pat")
|| strcmp(name + strlen(name)-4, ".mxt")))
{
canvas_savetofile(x2, x2->gl_name, canvas_getdir(x2), fdestroy);
}
else canvas_menusaveas(x2, fdestroy);
}
void g_readwrite_setup(void)
{
savestate_setup();
class_addmethod(canvas_class, (t_method)glist_write,
gensym("write"), A_SYMBOL, A_DEFSYM, A_NULL);
class_addmethod(canvas_class, (t_method)glist_read,
gensym("read"), A_SYMBOL, A_DEFSYM, A_NULL);
class_addmethod(canvas_class, (t_method)glist_mergefile,
gensym("mergefile"), A_SYMBOL, A_DEFSYM, A_NULL);
class_addmethod(canvas_class, (t_method)canvas_savetofile,
gensym("savetofile"), A_SYMBOL, A_SYMBOL, A_DEFFLOAT, 0);
class_addmethod(canvas_class, (t_method)canvas_saveto,
gensym("saveto"), A_CANT, 0);
class_addmethod(canvas_class, (t_method)canvas_saved,
gensym("saved"), A_GIMME, 0);
/* ------------------ from the menu ------------------------- */
class_addmethod(canvas_class, (t_method)canvas_menusave,
gensym("menusave"), A_DEFFLOAT, 0);
class_addmethod(canvas_class, (t_method)canvas_menusaveas,
gensym("menusaveas"), A_DEFFLOAT, 0);
}
void canvas_readwrite_for_class(t_class *c)
{
class_addmethod(c, (t_method)canvas_menusave,
gensym("menusave"), A_DEFFLOAT, 0);
class_addmethod(c, (t_method)canvas_menusaveas,
gensym("menusaveas"), A_DEFFLOAT, 0);
}
| {
"language": "C"
} |
/* $NetBSD: setpassent.c,v 1.4 2003/10/27 00:12:43 lukem Exp $ */
#include "nbtool_config.h"
#if !HAVE_SETPASSENT || !HAVE_DECL_SETPASSENT
#include <pwd.h>
int setpassent(int stayopen) {
setpwent();
return 1;
}
#endif
| {
"language": "C"
} |
diff --git include/ruby/ruby.h include/ruby/ruby.h
index 60cfb1174e..dccfdc763a 100644
--- include/ruby/ruby.h
+++ include/ruby/ruby.h
@@ -551,27 +551,23 @@ static inline int rb_type(VALUE obj);
((type) == RUBY_T_FLOAT) ? RB_FLOAT_TYPE_P(obj) : \
(!RB_SPECIAL_CONST_P(obj) && RB_BUILTIN_TYPE(obj) == (type)))
-/* RB_GC_GUARD_PTR() is an intermediate macro, and has no effect by
- * itself. don't use it directly */
#ifdef __GNUC__
-#define RB_GC_GUARD_PTR(ptr) \
- __extension__ ({volatile VALUE *rb_gc_guarded_ptr = (ptr); rb_gc_guarded_ptr;})
-#else
-#ifdef _MSC_VER
+#define RB_GC_GUARD(v) \
+ (*__extension__ ({ \
+ volatile VALUE *rb_gc_guarded_ptr = &(v); \
+ __asm__("" : : "m"(rb_gc_guarded_ptr)); \
+ rb_gc_guarded_ptr; \
+ }))
+#elif defined _MSC_VER
#pragma optimize("", off)
static inline volatile VALUE *rb_gc_guarded_ptr(volatile VALUE *ptr) {return ptr;}
#pragma optimize("", on)
+#define RB_GC_GUARD(v) (*rb_gc_guarded_ptr(&(v)))
#else
volatile VALUE *rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val);
#define HAVE_RB_GC_GUARDED_PTR_VAL 1
#define RB_GC_GUARD(v) (*rb_gc_guarded_ptr_val(&(v),(v)))
#endif
-#define RB_GC_GUARD_PTR(ptr) rb_gc_guarded_ptr(ptr)
-#endif
-
-#ifndef RB_GC_GUARD
-#define RB_GC_GUARD(v) (*RB_GC_GUARD_PTR(&(v)))
-#endif
#ifdef __GNUC__
#define RB_UNUSED_VAR(x) x __attribute__ ((unused))
diff --git marshal.c marshal.c
index c56de4af8d..b7274bf3c4 100644
--- marshal.c
+++ marshal.c
@@ -1022,7 +1022,7 @@ VALUE
rb_marshal_dump_limited(VALUE obj, VALUE port, int limit)
{
struct dump_arg *arg;
- VALUE wrapper; /* used to avoid memory leak in case of exception */
+ volatile VALUE wrapper; /* used to avoid memory leak in case of exception */
wrapper = TypedData_Make_Struct(rb_cData, struct dump_arg, &dump_arg_data, arg);
arg->dest = 0;
@@ -1051,8 +1051,8 @@ rb_marshal_dump_limited(VALUE obj, VALUE port, int limit)
rb_io_write(arg->dest, arg->str);
rb_str_resize(arg->str, 0);
}
- clear_dump_arg(arg);
- RB_GC_GUARD(wrapper);
+ free_dump_arg(arg);
+ rb_gc_force_recycle(wrapper);
return port;
}
@@ -2044,7 +2044,7 @@ rb_marshal_load_with_proc(VALUE port, VALUE proc)
{
int major, minor, infection = 0;
VALUE v;
- VALUE wrapper; /* used to avoid memory leak in case of exception */
+ volatile VALUE wrapper; /* used to avoid memory leak in case of exception */
struct load_arg *arg;
v = rb_check_string_type(port);
@@ -2090,8 +2090,8 @@ rb_marshal_load_with_proc(VALUE port, VALUE proc)
if (!NIL_P(proc)) arg->proc = proc;
v = r_object(arg);
- clear_load_arg(arg);
- RB_GC_GUARD(wrapper);
+ free_load_arg(arg);
+ rb_gc_force_recycle(wrapper);
return v;
}
diff --git test/ruby/test_marshal.rb test/ruby/test_marshal.rb
index 6ac5c29991..dc2b8b30dc 100644
--- test/ruby/test_marshal.rb
+++ test/ruby/test_marshal.rb
@@ -645,6 +645,9 @@ def test_continuation
c = Bug9523.new
assert_raise_with_message(RuntimeError, /Marshal\.dump reentered at marshal_dump/) do
Marshal.dump(c)
+ GC.start
+ 1000.times {"x"*1000}
+ GC.start
c.cc.call
end
end
| {
"language": "C"
} |